From 9993d97443b074dd9e2e9416382fe96e05a581f7 Mon Sep 17 00:00:00 2001 From: yatinkarel Date: Jul 02 2021 07:46:47 +0000 Subject: Import ovn-2021-21.03.0-40 From Fast DataPath --- diff --git a/SOURCES/ovn-21.03.0.patch b/SOURCES/ovn-21.03.0.patch index 99c4e61..57474ee 100644 --- a/SOURCES/ovn-21.03.0.patch +++ b/SOURCES/ovn-21.03.0.patch @@ -117,11 +117,68 @@ index 37b476d53..f3de6fef2 100644 AC_CONFIG_MACRO_DIR([m4]) AC_CONFIG_AUX_DIR([build-aux]) AC_CONFIG_HEADERS([config.h]) +diff --git a/controller/automake.mk b/controller/automake.mk +index e664f1980..2f6c50890 100644 +--- a/controller/automake.mk ++++ b/controller/automake.mk +@@ -10,6 +10,8 @@ controller_ovn_controller_SOURCES = \ + controller/encaps.h \ + controller/ha-chassis.c \ + controller/ha-chassis.h \ ++ controller/if-status.c \ ++ controller/if-status.h \ + controller/ip-mcast.c \ + controller/ip-mcast.h \ + controller/lflow.c \ diff --git a/controller/binding.c b/controller/binding.c -index 4e6c75696..514f5f33f 100644 +index 4e6c75696..31f3a210f 100644 --- a/controller/binding.c +++ b/controller/binding.c -@@ -597,6 +597,23 @@ remove_local_lport_ids(const struct sbrec_port_binding *pb, +@@ -16,9 +16,9 @@ + #include + #include "binding.h" + #include "ha-chassis.h" ++#include "if-status.h" + #include "lflow.h" + #include "lport.h" +-#include "ofctrl-seqno.h" + #include "patch.h" + + #include "lib/bitmap.h" +@@ -41,32 +41,6 @@ VLOG_DEFINE_THIS_MODULE(binding); + */ + #define OVN_INSTALLED_EXT_ID "ovn-installed" + +-/* Set of OVS interface IDs that have been released in the most recent +- * processing iterations. This gets updated in release_lport() and is +- * periodically emptied in binding_seqno_run(). +- */ +-static struct sset binding_iface_released_set = +- SSET_INITIALIZER(&binding_iface_released_set); +- +-/* Set of OVS interface IDs that have been bound in the most recent +- * processing iterations. This gets updated in release_lport() and is +- * periodically emptied in binding_seqno_run(). +- */ +-static struct sset binding_iface_bound_set = +- SSET_INITIALIZER(&binding_iface_bound_set); +- +-static void +-binding_iface_released_add(const char *iface_id) +-{ +- sset_add(&binding_iface_released_set, iface_id); +-} +- +-static void +-binding_iface_bound_add(const char *iface_id) +-{ +- sset_add(&binding_iface_bound_set, iface_id); +-} +- + #define OVN_QOS_TYPE "linux-htb" + + struct qos_queue { +@@ -597,6 +571,23 @@ remove_local_lport_ids(const struct sbrec_port_binding *pb, } } @@ -145,7 +202,7 @@ index 4e6c75696..514f5f33f 100644 /* Local bindings. binding.c module binds the logical port (represented by * Port_Binding rows) and sets the 'chassis' column when it sees the * OVS interface row (of type "" or "internal") with the -@@ -608,134 +625,180 @@ remove_local_lport_ids(const struct sbrec_port_binding *pb, +@@ -608,134 +599,270 @@ remove_local_lport_ids(const struct sbrec_port_binding *pb, * 'struct local_binding' is used. A shash of these local bindings is * maintained with the 'external_ids:iface-id' as the key to the shash. * @@ -196,69 +253,46 @@ index 4e6c75696..514f5f33f 100644 * * An object of 'struct local_binding' is created: - * - For each interface that has iface-id configured with the type - BT_VIF. -- * -- * - For each container Port Binding (of type BT_CONTAINER) and its -- * parent Port_Binding (of type BT_VIF), no matter if -- * they are bound to this chassis i.e even if OVS interface row for the -- * parent is not present. + * - For each interface that has external_ids:iface-id configured. - * -- * - For each 'virtual' Port Binding (of type BT_VIRTUAL) provided its parent -- * is bound to this chassis. ++ * + * - For each port binding (also referred as lport) of type 'LP_VIF' + * if it is a parent lport of container lports even if there is no + * corresponding OVS interface. - */ ++ */ +struct local_binding { + char *name; + const struct ovsrec_interface *iface; + struct ovs_list binding_lports; +}; - --static struct local_binding * --local_binding_create(const char *name, const struct ovsrec_interface *iface, -- const struct sbrec_port_binding *pb, -- enum local_binding_type type) --{ -- struct local_binding *lbinding = xzalloc(sizeof *lbinding); -- lbinding->name = xstrdup(name); -- lbinding->type = type; -- lbinding->pb = pb; -- lbinding->iface = iface; -- shash_init(&lbinding->children); -- return lbinding; --} -- --static void --local_binding_add(struct shash *local_bindings, struct local_binding *lbinding) --{ -- shash_add(local_bindings, lbinding->name, lbinding); --} ++ +/* This structure represents a logical port (or port binding) + * which is associated with 'struct local_binding'. -+ * + * +- * - For each container Port Binding (of type BT_CONTAINER) and its +- * parent Port_Binding (of type BT_VIF), no matter if +- * they are bound to this chassis i.e even if OVS interface row for the +- * parent is not present. + * An instance of 'struct binding_lport' is created for a logical port + * - If the OVS interface's iface-id corresponds to the logical port. + * - If it is a container or virtual logical port and its parent + * has a 'local binding'. -+ * -+ */ + * +- * - For each 'virtual' Port Binding (of type BT_VIRTUAL) provided its parent +- * is bound to this chassis. + */ +struct binding_lport { + struct ovs_list list_node; /* Node in local_binding.binding_lports. */ --static void --local_binding_destroy(struct local_binding *lbinding) --{ -- local_bindings_destroy(&lbinding->children); +-static struct local_binding * +-local_binding_create(const char *name, const struct ovsrec_interface *iface, +- const struct sbrec_port_binding *pb, +- enum local_binding_type type) + char *name; + const struct sbrec_port_binding *pb; + struct local_binding *lbinding; + enum en_lport_type type; +}; - -- free(lbinding->name); -- free(lbinding); --} ++ +static struct local_binding *local_binding_create( + const char *name, const struct ovsrec_interface *); +static void local_binding_add(struct shash *local_bindings, @@ -269,7 +303,8 @@ index 4e6c75696..514f5f33f 100644 + struct shash *binding_lports); +static void local_binding_delete(struct local_binding *, + struct shash *local_bindings, -+ struct shash *binding_lports); ++ struct shash *binding_lports, ++ struct if_status_mgr *if_mgr); +static struct binding_lport *local_binding_add_lport( + struct shash *binding_lports, + struct local_binding *, @@ -289,6 +324,8 @@ index 4e6c75696..514f5f33f 100644 + struct binding_lport *); +static void binding_lport_add(struct shash *binding_lports, + struct binding_lport *); ++static void binding_lport_set_up(struct binding_lport *, bool sb_readonly); ++static void binding_lport_set_down(struct binding_lport *, bool sb_readonly); +static struct binding_lport *binding_lport_find( + struct shash *binding_lports, const char *lport_name); +static const struct sbrec_port_binding *binding_lport_get_parent_pb( @@ -297,22 +334,28 @@ index 4e6c75696..514f5f33f 100644 + struct binding_lport *, struct shash *b_lports); + +static char *get_lport_type_str(enum en_lport_type lport_type); - - void --local_bindings_init(struct shash *local_bindings) ++ ++void +local_binding_data_init(struct local_binding_data *lbinding_data) { -- shash_init(local_bindings); +- struct local_binding *lbinding = xzalloc(sizeof *lbinding); +- lbinding->name = xstrdup(name); +- lbinding->type = type; +- lbinding->pb = pb; +- lbinding->iface = iface; +- shash_init(&lbinding->children); +- return lbinding; + shash_init(&lbinding_data->bindings); + shash_init(&lbinding_data->lports); } - void --local_bindings_destroy(struct shash *local_bindings) +-static void +-local_binding_add(struct shash *local_bindings, struct local_binding *lbinding) ++void +local_binding_data_destroy(struct local_binding_data *lbinding_data) { - struct shash_node *node, *next; -- SHASH_FOR_EACH_SAFE (node, next, local_bindings) { +- shash_add(local_bindings, lbinding->name, lbinding); ++ struct shash_node *node, *next; + + SHASH_FOR_EACH_SAFE (node, next, &lbinding_data->lports) { + struct binding_lport *b_lport = node->data; @@ -321,56 +364,154 @@ index 4e6c75696..514f5f33f 100644 + } + + SHASH_FOR_EACH_SAFE (node, next, &lbinding_data->bindings) { - struct local_binding *lbinding = node->data; -- local_binding_destroy(lbinding); -- shash_delete(local_bindings, node); ++ struct local_binding *lbinding = node->data; + local_binding_destroy(lbinding, &lbinding_data->lports); + shash_delete(&lbinding_data->bindings, node); - } - -- shash_destroy(local_bindings); ++ } ++ + shash_destroy(&lbinding_data->lports); + shash_destroy(&lbinding_data->bindings); } --static --void local_binding_delete(struct shash *local_bindings, -- struct local_binding *lbinding) +-static void +-local_binding_destroy(struct local_binding *lbinding) +const struct sbrec_port_binding * +local_binding_get_primary_pb(struct shash *local_bindings, const char *pb_name) { -- shash_find_and_delete(local_bindings, lbinding->name); -- local_binding_destroy(lbinding); --} +- local_bindings_destroy(&lbinding->children); ++ struct local_binding *lbinding = ++ local_binding_find(local_bindings, pb_name); ++ struct binding_lport *b_lport = local_binding_get_primary_lport(lbinding); + +- free(lbinding->name); +- free(lbinding); ++ return b_lport ? b_lport->pb : NULL; + } + +-void +-local_bindings_init(struct shash *local_bindings) ++bool ++local_binding_is_up(struct shash *local_bindings, const char *pb_name) + { +- shash_init(local_bindings); ++ struct local_binding *lbinding = ++ local_binding_find(local_bindings, pb_name); ++ struct binding_lport *b_lport = local_binding_get_primary_lport(lbinding); ++ if (lbinding && b_lport && lbinding->iface) { ++ if (b_lport->pb->n_up && !b_lport->pb->up[0]) { ++ return false; ++ } ++ return smap_get_bool(&lbinding->iface->external_ids, ++ OVN_INSTALLED_EXT_ID, false); ++ } ++ return false; + } + +-void +-local_bindings_destroy(struct shash *local_bindings) ++bool ++local_binding_is_down(struct shash *local_bindings, const char *pb_name) + { +- struct shash_node *node, *next; +- SHASH_FOR_EACH_SAFE (node, next, local_bindings) { +- struct local_binding *lbinding = node->data; +- local_binding_destroy(lbinding); +- shash_delete(local_bindings, node); + struct local_binding *lbinding = + local_binding_find(local_bindings, pb_name); ++ + struct binding_lport *b_lport = local_binding_get_primary_lport(lbinding); ++ ++ if (!lbinding) { ++ return true; + } + +- shash_destroy(local_bindings); +-} ++ if (lbinding->iface && smap_get_bool(&lbinding->iface->external_ids, ++ OVN_INSTALLED_EXT_ID, false)) { ++ return false; ++ } + +-static +-void local_binding_delete(struct shash *local_bindings, +- struct local_binding *lbinding) +-{ +- shash_find_and_delete(local_bindings, lbinding->name); +- local_binding_destroy(lbinding); ++ if (b_lport && b_lport->pb->n_up && b_lport->pb->up[0]) { ++ return false; ++ } ++ ++ return true; + } -static void -local_binding_add_child(struct local_binding *lbinding, - struct local_binding *child) --{ ++void ++local_binding_set_up(struct shash *local_bindings, const char *pb_name, ++ bool sb_readonly, bool ovs_readonly) + { - local_binding_add(&lbinding->children, child); - child->parent = lbinding; -+ return b_lport ? b_lport->pb : NULL; ++ struct local_binding *lbinding = ++ local_binding_find(local_bindings, pb_name); ++ struct binding_lport *b_lport = local_binding_get_primary_lport(lbinding); ++ ++ if (!ovs_readonly && lbinding && lbinding->iface ++ && !smap_get_bool(&lbinding->iface->external_ids, ++ OVN_INSTALLED_EXT_ID, false)) { ++ ovsrec_interface_update_external_ids_setkey(lbinding->iface, ++ OVN_INSTALLED_EXT_ID, ++ "true"); ++ } ++ ++ if (!sb_readonly && lbinding && b_lport && b_lport->pb->n_up) { ++ binding_lport_set_up(b_lport, sb_readonly); ++ LIST_FOR_EACH (b_lport, list_node, &lbinding->binding_lports) { ++ binding_lport_set_up(b_lport, sb_readonly); ++ } ++ } } -static struct local_binding * -local_binding_find_child(struct local_binding *lbinding, - const char *child_name) +void -+binding_dump_local_bindings(struct local_binding_data *lbinding_data, -+ struct ds *out_data) ++local_binding_set_down(struct shash *local_bindings, const char *pb_name, ++ bool sb_readonly, bool ovs_readonly) { - return local_binding_find(&lbinding->children, child_name); --} -+ const struct shash_node **nodes; ++ struct local_binding *lbinding = ++ local_binding_find(local_bindings, pb_name); ++ struct binding_lport *b_lport = local_binding_get_primary_lport(lbinding); ++ ++ if (!ovs_readonly && lbinding && lbinding->iface ++ && smap_get_bool(&lbinding->iface->external_ids, ++ OVN_INSTALLED_EXT_ID, false)) { ++ ovsrec_interface_update_external_ids_delkey(lbinding->iface, ++ OVN_INSTALLED_EXT_ID); ++ } ++ ++ if (!sb_readonly && b_lport && b_lport->pb->n_up) { ++ binding_lport_set_down(b_lport, sb_readonly); ++ LIST_FOR_EACH (b_lport, list_node, &lbinding->binding_lports) { ++ binding_lport_set_down(b_lport, sb_readonly); ++ } ++ } + } -static void -local_binding_delete_child(struct local_binding *lbinding, - struct local_binding *child) --{ ++void ++binding_dump_local_bindings(struct local_binding_data *lbinding_data, ++ struct ds *out_data) + { - shash_find_and_delete(&lbinding->children, child->name); ++ const struct shash_node **nodes; ++ + nodes = shash_sort(&lbinding_data->bindings); + size_t n = shash_count(&lbinding_data->bindings); + @@ -428,7 +569,7 @@ index 4e6c75696..514f5f33f 100644 } static bool -@@ -744,12 +807,6 @@ is_lport_vif(const struct sbrec_port_binding *pb) +@@ -744,12 +871,6 @@ is_lport_vif(const struct sbrec_port_binding *pb) return !pb->type[0]; } @@ -441,7 +582,7 @@ index 4e6c75696..514f5f33f 100644 static struct tracked_binding_datapath * tracked_binding_datapath_create(const struct sbrec_datapath_binding *dp, bool is_new, -@@ -818,26 +875,13 @@ binding_tracked_dp_destroy(struct hmap *tracked_datapaths) +@@ -818,26 +939,13 @@ binding_tracked_dp_destroy(struct hmap *tracked_datapaths) hmap_destroy(tracked_datapaths); } @@ -471,7 +612,7 @@ index 4e6c75696..514f5f33f 100644 return LP_VIF; } else if (!strcmp(pb->type, "patch")) { return LP_PATCH; -@@ -864,6 +908,41 @@ get_lport_type(const struct sbrec_port_binding *pb) +@@ -864,6 +972,41 @@ get_lport_type(const struct sbrec_port_binding *pb) return LP_UNKNOWN; } @@ -513,7 +654,62 @@ index 4e6c75696..514f5f33f 100644 /* For newly claimed ports, if 'notify_up' is 'false': * - set the 'pb.up' field to true if 'pb' has no 'parent_pb'. * - set the 'pb.up' field to true if 'parent_pb.up' is 'true' (e.g., for -@@ -991,14 +1070,15 @@ release_lport(const struct sbrec_port_binding *pb, bool sb_readonly, +@@ -880,7 +1023,7 @@ static void + claimed_lport_set_up(const struct sbrec_port_binding *pb, + const struct sbrec_port_binding *parent_pb, + const struct sbrec_chassis *chassis_rec, +- bool notify_up) ++ bool notify_up, struct if_status_mgr *if_mgr) + { + if (!notify_up) { + bool up = true; +@@ -891,7 +1034,7 @@ claimed_lport_set_up(const struct sbrec_port_binding *pb, + } + + if (pb->chassis != chassis_rec || (pb->n_up && !pb->up[0])) { +- binding_iface_bound_add(pb->logical_port); ++ if_status_mgr_claim_iface(if_mgr, pb->logical_port); + } + } + +@@ -904,10 +1047,11 @@ claim_lport(const struct sbrec_port_binding *pb, + const struct sbrec_chassis *chassis_rec, + const struct ovsrec_interface *iface_rec, + bool sb_readonly, bool notify_up, +- struct hmap *tracked_datapaths) ++ struct hmap *tracked_datapaths, ++ struct if_status_mgr *if_mgr) + { + if (!sb_readonly) { +- claimed_lport_set_up(pb, parent_pb, chassis_rec, notify_up); ++ claimed_lport_set_up(pb, parent_pb, chassis_rec, notify_up, if_mgr); + } + + if (pb->chassis != chassis_rec) { +@@ -955,7 +1099,7 @@ claim_lport(const struct sbrec_port_binding *pb, + */ + static bool + release_lport(const struct sbrec_port_binding *pb, bool sb_readonly, +- struct hmap *tracked_datapaths) ++ struct hmap *tracked_datapaths, struct if_status_mgr *if_mgr) + { + if (pb->encap) { + if (sb_readonly) { +@@ -978,12 +1122,8 @@ release_lport(const struct sbrec_port_binding *pb, bool sb_readonly, + sbrec_port_binding_set_virtual_parent(pb, NULL); + } + +- if (pb->n_up) { +- bool up = false; +- sbrec_port_binding_set_up(pb, &up, 1); +- } + update_lport_tracking(pb, tracked_datapaths); +- binding_iface_released_add(pb->logical_port); ++ if_status_mgr_release_iface(if_mgr, pb->logical_port); + VLOG_INFO("Releasing lport %s from this chassis.", pb->logical_port); + return true; + } +@@ -991,14 +1131,15 @@ release_lport(const struct sbrec_port_binding *pb, bool sb_readonly, static bool is_lbinding_set(struct local_binding *lbinding) { @@ -533,7 +729,7 @@ index 4e6c75696..514f5f33f 100644 } static bool -@@ -1010,15 +1090,14 @@ can_bind_on_this_chassis(const struct sbrec_chassis *chassis_rec, +@@ -1010,15 +1151,14 @@ can_bind_on_this_chassis(const struct sbrec_chassis *chassis_rec, || !strcmp(requested_chassis, chassis_rec->hostname); } @@ -553,7 +749,7 @@ index 4e6c75696..514f5f33f 100644 return true; } } -@@ -1027,66 +1106,41 @@ is_lbinding_container_parent(struct local_binding *lbinding) +@@ -1027,66 +1167,44 @@ is_lbinding_container_parent(struct local_binding *lbinding) } static bool @@ -576,12 +772,14 @@ index 4e6c75696..514f5f33f 100644 + if (is_binding_lport_this_chassis(b_lport, chassis_rec)) { + remove_local_lport_ids(b_lport->pb, b_ctx_out); + if (!release_lport(b_lport->pb, sb_readonly, -+ b_ctx_out->tracked_dp_bindings)) { ++ b_ctx_out->tracked_dp_bindings, ++ b_ctx_out->if_mgr)) { + return false; } - - /* Clear the local bindings' 'iface'. */ - l->iface = NULL; ++ binding_lport_set_down(b_lport, sb_readonly); } return true; @@ -632,11 +830,12 @@ index 4e6c75696..514f5f33f 100644 - b_ctx_out->tracked_dp_bindings)){ + b_lport->lbinding->iface, + !b_ctx_in->ovnsb_idl_txn, -+ !parent_pb, b_ctx_out->tracked_dp_bindings)){ ++ !parent_pb, b_ctx_out->tracked_dp_bindings, ++ b_ctx_out->if_mgr)){ return false; } -@@ -1098,7 +1152,7 @@ consider_vif_lport_(const struct sbrec_port_binding *pb, +@@ -1098,7 +1216,7 @@ consider_vif_lport_(const struct sbrec_port_binding *pb, b_ctx_out->tracked_dp_bindings); update_local_lport_ids(pb, b_ctx_out); update_local_lports(pb->logical_port, b_ctx_out); @@ -645,7 +844,17 @@ index 4e6c75696..514f5f33f 100644 get_qos_params(pb, qos_map); } } else { -@@ -1136,16 +1190,19 @@ consider_vif_lport(const struct sbrec_port_binding *pb, +@@ -1117,7 +1235,8 @@ consider_vif_lport_(const struct sbrec_port_binding *pb, + /* Release the lport if there is no lbinding. */ + if (!lbinding_set || !can_bind) { + return release_lport(pb, !b_ctx_in->ovnsb_idl_txn, +- b_ctx_out->tracked_dp_bindings); ++ b_ctx_out->tracked_dp_bindings, ++ b_ctx_out->if_mgr); + } + } + +@@ -1136,16 +1255,19 @@ consider_vif_lport(const struct sbrec_port_binding *pb, vif_chassis); if (!lbinding) { @@ -668,7 +877,7 @@ index 4e6c75696..514f5f33f 100644 } static bool -@@ -1154,9 +1211,9 @@ consider_container_lport(const struct sbrec_port_binding *pb, +@@ -1154,9 +1276,9 @@ consider_container_lport(const struct sbrec_port_binding *pb, struct binding_ctx_out *b_ctx_out, struct hmap *qos_map) { @@ -680,7 +889,7 @@ index 4e6c75696..514f5f33f 100644 if (!parent_lbinding) { /* There is no local_binding for parent port. Create it -@@ -1171,54 +1228,61 @@ consider_container_lport(const struct sbrec_port_binding *pb, +@@ -1171,54 +1293,62 @@ consider_container_lport(const struct sbrec_port_binding *pb, * we want the these container ports also be claimed by the * chassis. * */ @@ -752,7 +961,8 @@ index 4e6c75696..514f5f33f 100644 + if (is_binding_lport_this_chassis(container_b_lport, + b_ctx_in->chassis_rec)) { + return release_lport(pb, !b_ctx_in->ovnsb_idl_txn, -+ b_ctx_out->tracked_dp_bindings); ++ b_ctx_out->tracked_dp_bindings, ++ b_ctx_out->if_mgr); } + + return true; @@ -771,7 +981,7 @@ index 4e6c75696..514f5f33f 100644 } static bool -@@ -1227,46 +1291,58 @@ consider_virtual_lport(const struct sbrec_port_binding *pb, +@@ -1227,46 +1357,58 @@ consider_virtual_lport(const struct sbrec_port_binding *pb, struct binding_ctx_out *b_ctx_out, struct hmap *qos_map) { @@ -859,7 +1069,22 @@ index 4e6c75696..514f5f33f 100644 } /* Considers either claiming the lport or releasing the lport -@@ -1407,6 +1483,8 @@ build_local_bindings(struct binding_ctx_in *b_ctx_in, +@@ -1291,10 +1433,12 @@ consider_nonvif_lport_(const struct sbrec_port_binding *pb, + update_local_lport_ids(pb, b_ctx_out); + return claim_lport(pb, NULL, b_ctx_in->chassis_rec, NULL, + !b_ctx_in->ovnsb_idl_txn, false, +- b_ctx_out->tracked_dp_bindings); ++ b_ctx_out->tracked_dp_bindings, ++ b_ctx_out->if_mgr); + } else if (pb->chassis == b_ctx_in->chassis_rec) { + return release_lport(pb, !b_ctx_in->ovnsb_idl_txn, +- b_ctx_out->tracked_dp_bindings); ++ b_ctx_out->tracked_dp_bindings, ++ b_ctx_out->if_mgr); + } + + return true; +@@ -1407,6 +1551,8 @@ build_local_bindings(struct binding_ctx_in *b_ctx_in, continue; } @@ -868,7 +1093,7 @@ index 4e6c75696..514f5f33f 100644 for (j = 0; j < port_rec->n_interfaces; j++) { const struct ovsrec_interface *iface_rec; -@@ -1416,11 +1494,10 @@ build_local_bindings(struct binding_ctx_in *b_ctx_in, +@@ -1416,11 +1562,10 @@ build_local_bindings(struct binding_ctx_in *b_ctx_in, if (iface_id && ofport > 0) { struct local_binding *lbinding = @@ -883,7 +1108,7 @@ index 4e6c75696..514f5f33f 100644 } else { static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5); -@@ -1431,7 +1508,6 @@ build_local_bindings(struct binding_ctx_in *b_ctx_in, +@@ -1431,7 +1576,6 @@ build_local_bindings(struct binding_ctx_in *b_ctx_in, "configuration on interface [%s]", lbinding->iface->name, iface_rec->name, iface_rec->name); @@ -891,7 +1116,7 @@ index 4e6c75696..514f5f33f 100644 } update_local_lports(iface_id, b_ctx_out); -@@ -1494,11 +1570,11 @@ binding_run(struct binding_ctx_in *b_ctx_in, struct binding_ctx_out *b_ctx_out) +@@ -1494,11 +1638,11 @@ binding_run(struct binding_ctx_in *b_ctx_in, struct binding_ctx_out *b_ctx_out) break; case LP_VIF: @@ -908,7 +1133,7 @@ index 4e6c75696..514f5f33f 100644 break; case LP_VIRTUAL: -@@ -1799,39 +1875,44 @@ consider_iface_claim(const struct ovsrec_interface *iface_rec, +@@ -1799,39 +1943,44 @@ consider_iface_claim(const struct ovsrec_interface *iface_rec, update_local_lports(iface_id, b_ctx_out); smap_replace(b_ctx_out->local_iface_ids, iface_rec->name, iface_id); @@ -973,7 +1198,7 @@ index 4e6c75696..514f5f33f 100644 qos_map)) { return false; } -@@ -1862,32 +1943,42 @@ consider_iface_release(const struct ovsrec_interface *iface_rec, +@@ -1862,32 +2011,43 @@ consider_iface_release(const struct ovsrec_interface *iface_rec, struct binding_ctx_out *b_ctx_out) { struct local_binding *lbinding; @@ -1027,11 +1252,12 @@ index 4e6c75696..514f5f33f 100644 * If so, don't delete the local_binding. */ if (lbinding && !is_lbinding_container_parent(lbinding)) { - local_binding_delete(b_ctx_out->local_bindings, lbinding); -+ local_binding_delete(lbinding, local_bindings, binding_lports); ++ local_binding_delete(lbinding, local_bindings, binding_lports, ++ b_ctx_out->if_mgr); } remove_local_lports(iface_id, b_ctx_out); -@@ -2088,56 +2179,35 @@ handle_deleted_lport(const struct sbrec_port_binding *pb, +@@ -2088,56 +2248,35 @@ handle_deleted_lport(const struct sbrec_port_binding *pb, } } @@ -1110,7 +1336,7 @@ index 4e6c75696..514f5f33f 100644 } } -@@ -2147,18 +2217,8 @@ handle_deleted_vif_lport(const struct sbrec_port_binding *pb, +@@ -2147,18 +2286,8 @@ handle_deleted_vif_lport(const struct sbrec_port_binding *pb, * it from local_lports if there is a VIF entry. * consider_iface_release() takes care of removing from the local_lports * when the interface change happens. */ @@ -1130,7 +1356,7 @@ index 4e6c75696..514f5f33f 100644 } handle_deleted_lport(pb, b_ctx_in, b_ctx_out); -@@ -2177,7 +2237,7 @@ handle_updated_vif_lport(const struct sbrec_port_binding *pb, +@@ -2177,7 +2306,7 @@ handle_updated_vif_lport(const struct sbrec_port_binding *pb, if (lport_type == LP_VIRTUAL) { handled = consider_virtual_lport(pb, b_ctx_in, b_ctx_out, qos_map); @@ -1139,7 +1365,7 @@ index 4e6c75696..514f5f33f 100644 handled = consider_container_lport(pb, b_ctx_in, b_ctx_out, qos_map); } else { handled = consider_vif_lport(pb, b_ctx_in, b_ctx_out, NULL, qos_map); -@@ -2189,14 +2249,14 @@ handle_updated_vif_lport(const struct sbrec_port_binding *pb, +@@ -2189,14 +2318,14 @@ handle_updated_vif_lport(const struct sbrec_port_binding *pb, bool now_claimed = (pb->chassis == b_ctx_in->chassis_rec); @@ -1158,7 +1384,7 @@ index 4e6c75696..514f5f33f 100644 /* If the ovs port backing this binding previously was removed in the * meantime, we won't have a local_binding for it. -@@ -2206,12 +2266,11 @@ handle_updated_vif_lport(const struct sbrec_port_binding *pb, +@@ -2206,12 +2335,11 @@ handle_updated_vif_lport(const struct sbrec_port_binding *pb, return true; } @@ -1176,7 +1402,7 @@ index 4e6c75696..514f5f33f 100644 if (!handled) { return false; } -@@ -2256,12 +2315,25 @@ binding_handle_port_binding_changes(struct binding_ctx_in *b_ctx_in, +@@ -2256,12 +2384,25 @@ binding_handle_port_binding_changes(struct binding_ctx_in *b_ctx_in, enum en_lport_type lport_type = get_lport_type(pb); @@ -1207,7 +1433,7 @@ index 4e6c75696..514f5f33f 100644 } else if (lport_type == LP_VIRTUAL) { shash_add(&deleted_virtual_pbs, pb->logical_port, pb); } else { -@@ -2272,7 +2344,7 @@ binding_handle_port_binding_changes(struct binding_ctx_in *b_ctx_in, +@@ -2272,7 +2413,7 @@ binding_handle_port_binding_changes(struct binding_ctx_in *b_ctx_in, struct shash_node *node; struct shash_node *node_next; SHASH_FOR_EACH_SAFE (node, node_next, &deleted_container_pbs) { @@ -1216,7 +1442,7 @@ index 4e6c75696..514f5f33f 100644 b_ctx_out); shash_delete(&deleted_container_pbs, node); if (!handled) { -@@ -2326,12 +2398,33 @@ delete_done: +@@ -2326,12 +2467,33 @@ delete_done: enum en_lport_type lport_type = get_lport_type(pb); @@ -1250,90 +1476,12 @@ index 4e6c75696..514f5f33f 100644 case LP_VIRTUAL: handled = handle_updated_vif_lport(pb, lport_type, b_ctx_in, b_ctx_out, qos_map_ptr); -@@ -2468,11 +2561,11 @@ binding_init(void) - * available. - */ - void --binding_seqno_run(struct shash *local_bindings) -+binding_seqno_run(struct local_binding_data *lbinding_data) - { - const char *iface_id; - const char *iface_id_next; -- -+ struct shash *local_bindings = &lbinding_data->bindings; - SSET_FOR_EACH_SAFE (iface_id, iface_id_next, &binding_iface_released_set) { - struct shash_node *lb_node = shash_find(local_bindings, iface_id); - -@@ -2508,16 +2601,17 @@ binding_seqno_run(struct shash *local_bindings) - * If so, then this is a newly bound interface, make sure we reset the - * Port_Binding 'up' field and the OVS Interface 'external-id'. - */ -- if (lb && lb->pb && lb->iface) { -+ struct binding_lport *b_lport = local_binding_get_primary_lport(lb); -+ if (lb && b_lport && lb->iface) { - new_ifaces = true; - - if (smap_get(&lb->iface->external_ids, OVN_INSTALLED_EXT_ID)) { - ovsrec_interface_update_external_ids_delkey( - lb->iface, OVN_INSTALLED_EXT_ID); - } -- if (lb->pb->n_up) { -+ if (b_lport->pb->n_up) { - bool up = false; -- sbrec_port_binding_set_up(lb->pb, &up, 1); -+ sbrec_port_binding_set_up(b_lport->pb, &up, 1); - } - simap_put(&binding_iface_seqno_map, lb->name, new_seqno); - } -@@ -2542,12 +2636,13 @@ binding_seqno_run(struct shash *local_bindings) - * available. - */ - void --binding_seqno_install(struct shash *local_bindings) -+binding_seqno_install(struct local_binding_data *lbinding_data) - { - struct ofctrl_acked_seqnos *acked_seqnos = - ofctrl_acked_seqnos_get(binding_seq_type_pb_cfg); - struct simap_node *node; - struct simap_node *node_next; -+ struct shash *local_bindings = &lbinding_data->bindings; - - SIMAP_FOR_EACH_SAFE (node, node_next, &binding_iface_seqno_map) { - struct shash_node *lb_node = shash_find(local_bindings, node->name); -@@ -2557,7 +2652,8 @@ binding_seqno_install(struct shash *local_bindings) - } - - struct local_binding *lb = lb_node->data; -- if (!lb->pb || !lb->iface) { -+ struct binding_lport *b_lport = local_binding_get_primary_lport(lb); -+ if (!b_lport || !lb->iface) { - goto del_seqno; - } - -@@ -2568,14 +2664,12 @@ binding_seqno_install(struct shash *local_bindings) - ovsrec_interface_update_external_ids_setkey(lb->iface, - OVN_INSTALLED_EXT_ID, - "true"); -- if (lb->pb->n_up) { -+ if (b_lport->pb->n_up) { - bool up = true; - -- sbrec_port_binding_set_up(lb->pb, &up, 1); -- struct shash_node *child_node; -- SHASH_FOR_EACH (child_node, &lb->children) { -- struct local_binding *lb_child = child_node->data; -- sbrec_port_binding_set_up(lb_child->pb, &up, 1); -+ sbrec_port_binding_set_up(b_lport->pb, &up, 1); -+ LIST_FOR_EACH (b_lport, list_node, &lb->binding_lports) { -+ sbrec_port_binding_set_up(b_lport->pb, &up, 1); - } - } - -@@ -2591,3 +2685,305 @@ binding_seqno_flush(void) - { - simap_clear(&binding_iface_seqno_map); +@@ -2440,154 +2602,327 @@ delete_done: + return handled; } -+ + +-/* Registered ofctrl seqno type for port_binding flow installation. */ +-static size_t binding_seq_type_pb_cfg; +/* Static functions for local_lbindind and binding_lport. */ +static struct local_binding * +local_binding_create(const char *name, const struct ovsrec_interface *iface) @@ -1345,23 +1493,35 @@ index 4e6c75696..514f5f33f 100644 + + return lbinding; +} -+ + +-/* Binding specific seqno to be acked by ofctrl when flows for new interfaces +- * have been installed. +- */ +-static uint32_t binding_iface_seqno = 0; +static struct local_binding * +local_binding_find(struct shash *local_bindings, const char *name) +{ + return shash_find_data(local_bindings, name); +} -+ + +-/* Map indexed by iface-id containing the sequence numbers that when acked +- * indicate that the OVS flows for the iface-id have been installed. +- */ +-static struct simap binding_iface_seqno_map = +- SIMAP_INITIALIZER(&binding_iface_seqno_map); +static void +local_binding_add(struct shash *local_bindings, struct local_binding *lbinding) +{ + shash_add(local_bindings, lbinding->name, lbinding); +} -+ + +-void +-binding_init(void) +static void +local_binding_destroy(struct local_binding *lbinding, + struct shash *binding_lports) -+{ + { +- binding_seq_type_pb_cfg = ofctrl_seqno_add_type(); + struct binding_lport *b_lport; + LIST_FOR_EACH_POP (b_lport, list_node, &lbinding->binding_lports) { + b_lport->lbinding = NULL; @@ -1370,14 +1530,22 @@ index 4e6c75696..514f5f33f 100644 + + free(lbinding->name); + free(lbinding); -+} -+ -+static void -+local_binding_delete(struct local_binding *lbinding, + } + +-/* Processes new release/bind operations OVN ports. For newly bound ports +- * it creates ofctrl seqno update requests that will be acked when +- * corresponding OVS flows have been installed. +- * +- * NOTE: Should be called only when valid SB and OVS transactions are +- * available. ++static void ++local_binding_delete(struct local_binding *lbinding, + struct shash *local_bindings, -+ struct shash *binding_lports) ++ struct shash *binding_lports, ++ struct if_status_mgr *if_mgr) +{ + shash_find_and_delete(local_bindings, lbinding->name); ++ if_status_mgr_delete_iface(if_mgr, lbinding->name); + local_binding_destroy(lbinding, binding_lports); +} + @@ -1385,25 +1553,42 @@ index 4e6c75696..514f5f33f 100644 + * binding lports list. A binding lport is considered primary + * if binding lport's type is LP_VIF and the name matches + * with the 'lbinding'. -+ */ + */ +-void +-binding_seqno_run(struct shash *local_bindings) +static struct binding_lport * +local_binding_get_primary_lport(struct local_binding *lbinding) -+{ + { +- const char *iface_id; +- const char *iface_id_next; + if (!lbinding) { + return NULL; + } -+ + +- SSET_FOR_EACH_SAFE (iface_id, iface_id_next, &binding_iface_released_set) { +- struct shash_node *lb_node = shash_find(local_bindings, iface_id); + if (!ovs_list_is_empty(&lbinding->binding_lports)) { + struct binding_lport *b_lport = NULL; + b_lport = CONTAINER_OF(ovs_list_front(&lbinding->binding_lports), + struct binding_lport, list_node); -+ + +- /* If the local binding still exists (i.e., the OVS interface is +- * still configured locally) then remove the external id and remove +- * it from the in-flight seqno map. +- */ +- if (lb_node) { +- struct local_binding *lb = lb_node->data; + if (b_lport->type == LP_VIF && + !strcmp(lbinding->name, b_lport->name)) { + return b_lport; + } + } -+ + +- if (lb->iface && smap_get(&lb->iface->external_ids, +- OVN_INSTALLED_EXT_ID)) { +- ovsrec_interface_update_external_ids_delkey( +- lb->iface, OVN_INSTALLED_EXT_ID); +- } + return NULL; +} + @@ -1424,11 +1609,16 @@ index 4e6c75696..514f5f33f 100644 + add_to_lport_list = true; + if (!ovs_list_is_empty(&b_lport->list_node)) { + ovs_list_remove(&b_lport->list_node); -+ } + } +- simap_find_and_delete(&binding_iface_seqno_map, iface_id); +- sset_delete(&binding_iface_released_set, +- SSET_NODE_FROM_NAME(iface_id)); + b_lport->lbinding = lbinding; + b_lport->type = b_type; -+ } -+ + } + +- bool new_ifaces = false; +- uint32_t new_seqno = binding_iface_seqno + 1; + if (add_to_lport_list) { + if (b_type == LP_VIF) { + ovs_list_push_front(&lbinding->binding_lports, &b_lport->list_node); @@ -1436,10 +1626,13 @@ index 4e6c75696..514f5f33f 100644 + ovs_list_push_back(&lbinding->binding_lports, &b_lport->list_node); + } + } -+ + +- SSET_FOR_EACH_SAFE (iface_id, iface_id_next, &binding_iface_bound_set) { +- struct shash_node *lb_node = shash_find(local_bindings, iface_id); + return b_lport; +} -+ + +- struct local_binding *lb = lb_node ? lb_node->data : NULL; +/* This function handles the stale binding lports of 'lbinding' if 'lbinding' + * doesn't have a primary binding lport. + */ @@ -1455,7 +1648,25 @@ index 4e6c75696..514f5f33f 100644 + /* Nothing to be done. */ + return true; + } -+ + +- /* Make sure the binding is still complete, i.e., both SB port_binding +- * and OVS interface still exist. +- * +- * If so, then this is a newly bound interface, make sure we reset the +- * Port_Binding 'up' field and the OVS Interface 'external-id'. +- */ +- if (lb && lb->pb && lb->iface) { +- new_ifaces = true; +- +- if (smap_get(&lb->iface->external_ids, OVN_INSTALLED_EXT_ID)) { +- ovsrec_interface_update_external_ids_delkey( +- lb->iface, OVN_INSTALLED_EXT_ID); +- } +- if (lb->pb->n_up) { +- bool up = false; +- sbrec_port_binding_set_up(lb->pb, &up, 1); +- } +- simap_put(&binding_iface_seqno_map, lb->name, new_seqno); + bool handled = true; + struct binding_lport *b_lport, *next; + const struct sbrec_port_binding *pb; @@ -1488,9 +1699,16 @@ index 4e6c75696..514f5f33f 100644 + + if (!handled) { + return false; -+ } -+ } -+ + } +- sset_delete(&binding_iface_bound_set, SSET_NODE_FROM_NAME(iface_id)); + } + +- /* Request a seqno update when the flows for new interfaces have been +- * installed in OVS. +- */ +- if (new_ifaces) { +- binding_iface_seqno = new_seqno; +- ofctrl_seqno_update_create(binding_seq_type_pb_cfg, new_seqno); + return handled; +} + @@ -1520,11 +1738,15 @@ index 4e6c75696..514f5f33f 100644 +{ + if (!lport_name) { + return NULL; -+ } + } + + return shash_find_data(binding_lports, lport_name); -+} -+ + } + +-/* Processes ofctrl seqno ACKs for new bindings. Sets the +- * 'OVN_INSTALLED_EXT_ID' external-id in the OVS interface and the +- * Port_Binding.up field for all ports for which OVS flows have been +- * installed. +static void +binding_lport_destroy(struct binding_lport *b_lport) +{ @@ -1544,6 +1766,27 @@ index 4e6c75696..514f5f33f 100644 + binding_lport_destroy(b_lport); +} + ++static void ++binding_lport_set_up(struct binding_lport *b_lport, bool sb_readonly) ++{ ++ if (sb_readonly || !b_lport || !b_lport->pb->n_up || b_lport->pb->up[0]) { ++ return; ++ } ++ ++ bool up = true; ++ sbrec_port_binding_set_up(b_lport->pb, &up, 1); ++} ++ ++static void ++binding_lport_set_down(struct binding_lport *b_lport, bool sb_readonly) ++{ ++ if (sb_readonly || !b_lport || !b_lport->pb->n_up || !b_lport->pb->up[0]) { ++ return; ++ } ++ ++ bool up = false; ++ sbrec_port_binding_set_up(b_lport->pb, &up, 1); ++} + +static const struct sbrec_port_binding * +binding_lport_get_parent_pb(struct binding_lport *b_lport) @@ -1570,7 +1813,9 @@ index 4e6c75696..514f5f33f 100644 + * + * If the 'b_lport' type is LP_VIF, then its name and its lbinding->name + * should match. Otherwise this should be cleaned up. -+ * + * +- * NOTE: Should be called only when valid SB and OVS transactions are +- * available. + * If the 'b_lport' type is LP_CONTAINER, then its parent_port name should + * be the same as its lbinding's name. Otherwise this should be + * cleaned up. @@ -1582,38 +1827,69 @@ index 4e6c75696..514f5f33f 100644 + * If the 'b_lport' type is not LP_VIF, LP_CONTAINER or LP_VIRTUAL, it + * should be cleaned up. This can happen if the CMS changes + * the port binding type. -+ */ + */ +-void +-binding_seqno_install(struct shash *local_bindings) +static struct binding_lport * +binding_lport_check_and_cleanup(struct binding_lport *b_lport, + struct shash *binding_lports) -+{ + { +- struct ofctrl_acked_seqnos *acked_seqnos = +- ofctrl_acked_seqnos_get(binding_seq_type_pb_cfg); +- struct simap_node *node; +- struct simap_node *node_next; + bool cleanup_blport = false; -+ + +- SIMAP_FOR_EACH_SAFE (node, node_next, &binding_iface_seqno_map) { +- struct shash_node *lb_node = shash_find(local_bindings, node->name); +- +- if (!lb_node) { +- goto del_seqno; +- } + if (!b_lport->lbinding) { + cleanup_blport = true; + goto cleanup; + } -+ + +- struct local_binding *lb = lb_node->data; +- if (!lb->pb || !lb->iface) { +- goto del_seqno; + switch (b_lport->type) { + case LP_VIF: + if (strcmp(b_lport->name, b_lport->lbinding->name)) { + cleanup_blport = true; -+ } + } + break; -+ + +- if (!ofctrl_acked_seqnos_contains(acked_seqnos, node->data)) { +- continue; + case LP_CONTAINER: + if (strcmp(b_lport->pb->parent_port, b_lport->lbinding->name)) { + cleanup_blport = true; -+ } + } + break; -+ + +- ovsrec_interface_update_external_ids_setkey(lb->iface, +- OVN_INSTALLED_EXT_ID, +- "true"); +- if (lb->pb->n_up) { +- bool up = true; +- +- sbrec_port_binding_set_up(lb->pb, &up, 1); +- struct shash_node *child_node; +- SHASH_FOR_EACH (child_node, &lb->children) { +- struct local_binding *lb_child = child_node->data; +- sbrec_port_binding_set_up(lb_child->pb, &up, 1); +- } + case LP_VIRTUAL: + if (!b_lport->pb->virtual_parent || + strcmp(b_lport->pb->virtual_parent, b_lport->lbinding->name)) { + cleanup_blport = true; -+ } + } + break; -+ + +-del_seqno: +- simap_delete(&binding_iface_seqno_map, node); + case LP_PATCH: + case LP_LOCALPORT: + case LP_VTEP: @@ -1625,29 +1901,36 @@ index 4e6c75696..514f5f33f 100644 + case LP_REMOTE: + case LP_UNKNOWN: + cleanup_blport = true; -+ } -+ + } + +- ofctrl_acked_seqnos_destroy(acked_seqnos); +-} +cleanup: + if (cleanup_blport) { + binding_lport_delete(binding_lports, b_lport); + return NULL; + } -+ + +-void +-binding_seqno_flush(void) +-{ +- simap_clear(&binding_iface_seqno_map); + return b_lport; -+} + } diff --git a/controller/binding.h b/controller/binding.h -index c9ebef4b1..4fc9ef207 100644 +index c9ebef4b1..7a6495320 100644 --- a/controller/binding.h +++ b/controller/binding.h -@@ -36,6 +36,7 @@ struct sbrec_chassis; +@@ -36,6 +36,8 @@ struct sbrec_chassis; struct sbrec_port_binding_table; struct sset; struct sbrec_port_binding; +struct ds; ++struct if_status_mgr; struct binding_ctx_in { struct ovsdb_idl_txn *ovnsb_idl_txn; -@@ -56,7 +57,7 @@ struct binding_ctx_in { +@@ -56,7 +58,7 @@ struct binding_ctx_in { struct binding_ctx_out { struct hmap *local_datapaths; @@ -1656,17 +1939,17 @@ index c9ebef4b1..4fc9ef207 100644 /* sset of (potential) local lports. */ struct sset *local_lports; -@@ -86,28 +87,16 @@ struct binding_ctx_out { +@@ -84,30 +86,26 @@ struct binding_ctx_out { + * binding_handle_port_binding_changes) fills in for + * the changed datapaths and port bindings. */ struct hmap *tracked_dp_bindings; - }; +-}; -enum local_binding_type { - BT_VIF, - BT_CONTAINER, - BT_VIRTUAL -+struct local_binding_data { -+ struct shash bindings; -+ struct shash lports; ++ struct if_status_mgr *if_mgr; }; -struct local_binding { @@ -1678,21 +1961,31 @@ index c9ebef4b1..4fc9ef207 100644 - /* shash of 'struct local_binding' representing children. */ - struct shash children; - struct local_binding *parent; --}; -+void local_binding_data_init(struct local_binding_data *); -+void local_binding_data_destroy(struct local_binding_data *); ++struct local_binding_data { ++ struct shash bindings; ++ struct shash lports; + }; -static inline struct local_binding * -local_binding_find(struct shash *local_bindings, const char *name) -{ - return shash_find_data(local_bindings, name); -} ++void local_binding_data_init(struct local_binding_data *); ++void local_binding_data_destroy(struct local_binding_data *); ++ +const struct sbrec_port_binding *local_binding_get_primary_pb( + struct shash *local_bindings, const char *pb_name); ++bool local_binding_is_up(struct shash *local_bindings, const char *pb_name); ++bool local_binding_is_down(struct shash *local_bindings, const char *pb_name); ++void local_binding_set_up(struct shash *local_bindings, const char *pb_name, ++ bool sb_readonly, bool ovs_readonly); ++void local_binding_set_down(struct shash *local_bindings, const char *pb_name, ++ bool sb_readonly, bool ovs_readonly); /* Represents a tracked binding logical port. */ struct tracked_binding_lport { -@@ -128,8 +117,6 @@ bool binding_cleanup(struct ovsdb_idl_txn *ovnsb_idl_txn, +@@ -128,16 +126,11 @@ bool binding_cleanup(struct ovsdb_idl_txn *ovnsb_idl_txn, const struct sbrec_port_binding_table *, const struct sbrec_chassis *); @@ -1701,17 +1994,479 @@ index c9ebef4b1..4fc9ef207 100644 bool binding_handle_ovs_interface_changes(struct binding_ctx_in *, struct binding_ctx_out *); bool binding_handle_port_binding_changes(struct binding_ctx_in *, -@@ -137,7 +124,8 @@ bool binding_handle_port_binding_changes(struct binding_ctx_in *, + struct binding_ctx_out *); void binding_tracked_dp_destroy(struct hmap *tracked_datapaths); - void binding_init(void); +-void binding_init(void); -void binding_seqno_run(struct shash *local_bindings); -void binding_seqno_install(struct shash *local_bindings); -+void binding_seqno_run(struct local_binding_data *lbinding_data); -+void binding_seqno_install(struct local_binding_data *lbinding_data); - void binding_seqno_flush(void); +-void binding_seqno_flush(void); +void binding_dump_local_bindings(struct local_binding_data *, struct ds *); #endif /* controller/binding.h */ +diff --git a/controller/if-status.c b/controller/if-status.c +new file mode 100644 +index 000000000..8d8c8d436 +--- /dev/null ++++ b/controller/if-status.c +@@ -0,0 +1,415 @@ ++/* Copyright (c) 2021, Red Hat, Inc. ++ * ++ * Licensed under the Apache License, Version 2.0 (the "License"); ++ * you may not use this file except in compliance with the License. ++ * You may obtain a copy of the License at: ++ * ++ * http://www.apache.org/licenses/LICENSE-2.0 ++ * ++ * Unless required by applicable law or agreed to in writing, software ++ * distributed under the License is distributed on an "AS IS" BASIS, ++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++ * See the License for the specific language governing permissions and ++ * limitations under the License. ++ */ ++ ++#include ++ ++#include "binding.h" ++#include "if-status.h" ++#include "ofctrl-seqno.h" ++ ++#include "lib/hmapx.h" ++#include "lib/util.h" ++#include "openvswitch/vlog.h" ++ ++VLOG_DEFINE_THIS_MODULE(if_status); ++ ++/* This module implements an interface manager that maintains the state of ++ * the interfaces wrt. their flows being completely installed in OVS and ++ * their corresponding bindings being marked up/down. ++ * ++ * A state machine is maintained for each interface. ++ * ++ * Transitions are triggered between states by three types of events: ++ * A. Events received from the binding module: ++ * - interface is claimed: if_status_mgr_claim_iface() ++ * - interface is released: if_status_mgr_release_iface() ++ * - interface is deleted: if_status_mgr_delete_iface() ++ * ++ * B. At every iteration, based on SB/OVS updates, handled in ++ * if_status_mgr_update(): ++ * - an interface binding has been marked "up" both in the Southbound and OVS ++ * databases. ++ * - an interface binding has been marked "down" both in the Southbound and OVS ++ * databases. ++ * - new interface has been claimed. ++ * ++ * C. At every iteration, based on ofctrl_seqno updates, handled in ++ * if_status_mgr_run(): ++ * - the flows for a previously claimed interface have been installed in OVS. ++ */ ++ ++enum if_state { ++ OIF_CLAIMED, /* Newly claimed interface. */ ++ OIF_INSTALL_FLOWS, /* Already claimed interface for which flows are still ++ * being installed. ++ */ ++ OIF_MARK_UP, /* Interface with flows successfully installed in OVS ++ * but not yet marked "up" in the binding module (in ++ * SB and OVS databases). ++ */ ++ OIF_MARK_DOWN, /* Released interface but not yet marked "down" in the ++ * binding module (in SB and/or OVS databases). ++ */ ++ OIF_INSTALLED, /* Interface flows programmed in OVS and binding marked ++ * "up" in the binding module. ++ */ ++ OIF_MAX, ++}; ++ ++static const char *if_state_names[] = { ++ [OIF_CLAIMED] = "CLAIMED", ++ [OIF_INSTALL_FLOWS] = "INSTALL_FLOWS", ++ [OIF_MARK_UP] = "MARK_UP", ++ [OIF_MARK_DOWN] = "MARK_DOWN", ++ [OIF_INSTALLED] = "INSTALLED", ++}; ++ ++struct ovs_iface { ++ char *id; /* Extracted from OVS external_ids.iface_id. */ ++ enum if_state state; /* State of the interface in the state machine. */ ++ uint32_t install_seqno; /* Seqno at which this interface is expected to ++ * be fully programmed in OVS. Only used in state ++ * OIF_INSTALL_FLOWS. ++ */ ++}; ++ ++/* State machine manager for all local OVS interfaces. */ ++struct if_status_mgr { ++ /* All local interfaces, mapping from 'iface-id' to 'struct ovs_iface'. */ ++ struct shash ifaces; ++ ++ /* All local interfaces, stored per state. */ ++ struct hmapx ifaces_per_state[OIF_MAX]; ++ ++ /* Registered ofctrl seqno type for port_binding flow installation. */ ++ size_t iface_seq_type_pb_cfg; ++ ++ /* Interface specific seqno to be acked by ofctrl when flows for new ++ * interfaces have been installed. ++ */ ++ uint32_t iface_seqno; ++}; ++ ++static struct ovs_iface *ovs_iface_create(struct if_status_mgr *, ++ const char *iface_id, ++ enum if_state ); ++static void ovs_iface_destroy(struct if_status_mgr *, struct ovs_iface *); ++static void ovs_iface_set_state(struct if_status_mgr *, struct ovs_iface *, ++ enum if_state); ++ ++static void if_status_mgr_update_bindings( ++ struct if_status_mgr *mgr, struct local_binding_data *binding_data, ++ bool sb_readonly, bool ovs_readonly); ++ ++struct if_status_mgr * ++if_status_mgr_create(void) ++{ ++ struct if_status_mgr *mgr = xzalloc(sizeof *mgr); ++ ++ mgr->iface_seq_type_pb_cfg = ofctrl_seqno_add_type(); ++ for (size_t i = 0; i < ARRAY_SIZE(mgr->ifaces_per_state); i++) { ++ hmapx_init(&mgr->ifaces_per_state[i]); ++ } ++ shash_init(&mgr->ifaces); ++ return mgr; ++} ++ ++void ++if_status_mgr_clear(struct if_status_mgr *mgr) ++{ ++ struct shash_node *node_next; ++ struct shash_node *node; ++ ++ SHASH_FOR_EACH_SAFE (node, node_next, &mgr->ifaces) { ++ ovs_iface_destroy(mgr, node->data); ++ } ++ ovs_assert(shash_is_empty(&mgr->ifaces)); ++ ++ for (size_t i = 0; i < ARRAY_SIZE(mgr->ifaces_per_state); i++) { ++ ovs_assert(hmapx_is_empty(&mgr->ifaces_per_state[i])); ++ } ++} ++ ++void ++if_status_mgr_destroy(struct if_status_mgr *mgr) ++{ ++ if_status_mgr_clear(mgr); ++ shash_destroy(&mgr->ifaces); ++ for (size_t i = 0; i < ARRAY_SIZE(mgr->ifaces_per_state); i++) { ++ hmapx_destroy(&mgr->ifaces_per_state[i]); ++ } ++ free(mgr); ++} ++ ++void ++if_status_mgr_claim_iface(struct if_status_mgr *mgr, const char *iface_id) ++{ ++ struct ovs_iface *iface = shash_find_data(&mgr->ifaces, iface_id); ++ ++ if (!iface) { ++ iface = ovs_iface_create(mgr, iface_id, OIF_CLAIMED); ++ } ++ ++ switch (iface->state) { ++ case OIF_CLAIMED: ++ case OIF_INSTALL_FLOWS: ++ case OIF_MARK_UP: ++ /* Nothing to do here. */ ++ break; ++ case OIF_INSTALLED: ++ case OIF_MARK_DOWN: ++ ovs_iface_set_state(mgr, iface, OIF_CLAIMED); ++ break; ++ case OIF_MAX: ++ OVS_NOT_REACHED(); ++ break; ++ } ++} ++ ++void ++if_status_mgr_release_iface(struct if_status_mgr *mgr, const char *iface_id) ++{ ++ struct ovs_iface *iface = shash_find_data(&mgr->ifaces, iface_id); ++ ++ if (!iface) { ++ static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 1); ++ VLOG_WARN_RL(&rl, "Trying to release unknown interface %s", iface_id); ++ return; ++ } ++ ++ switch (iface->state) { ++ case OIF_CLAIMED: ++ case OIF_INSTALL_FLOWS: ++ /* Not yet fully installed interfaces can be safely deleted. */ ++ ovs_iface_destroy(mgr, iface); ++ break; ++ case OIF_MARK_UP: ++ case OIF_INSTALLED: ++ /* Properly mark interfaces "down" if their flows were already ++ * programmed in OVS. ++ */ ++ ovs_iface_set_state(mgr, iface, OIF_MARK_DOWN); ++ break; ++ case OIF_MARK_DOWN: ++ /* Nothing to do here. */ ++ break; ++ case OIF_MAX: ++ OVS_NOT_REACHED(); ++ break; ++ } ++} ++ ++void ++if_status_mgr_delete_iface(struct if_status_mgr *mgr, const char *iface_id) ++{ ++ struct ovs_iface *iface = shash_find_data(&mgr->ifaces, iface_id); ++ ++ if (!iface) { ++ return; ++ } ++ ++ switch (iface->state) { ++ case OIF_CLAIMED: ++ case OIF_INSTALL_FLOWS: ++ /* Not yet fully installed interfaces can be safely deleted. */ ++ ovs_iface_destroy(mgr, iface); ++ break; ++ case OIF_MARK_UP: ++ case OIF_INSTALLED: ++ /* Properly mark interfaces "down" if their flows were already ++ * programmed in OVS. ++ */ ++ ovs_iface_set_state(mgr, iface, OIF_MARK_DOWN); ++ break; ++ case OIF_MARK_DOWN: ++ /* Nothing to do here. */ ++ break; ++ case OIF_MAX: ++ OVS_NOT_REACHED(); ++ break; ++ } ++} ++ ++void ++if_status_mgr_update(struct if_status_mgr *mgr, ++ struct local_binding_data *binding_data) ++{ ++ if (!binding_data) { ++ return; ++ } ++ ++ struct shash *bindings = &binding_data->bindings; ++ struct hmapx_node *node_next; ++ struct hmapx_node *node; ++ ++ /* Move all interfaces that have been confirmed "up" by the binding module, ++ * from OIF_MARK_UP to OIF_INSTALLED. ++ */ ++ HMAPX_FOR_EACH_SAFE (node, node_next, ++ &mgr->ifaces_per_state[OIF_MARK_UP]) { ++ struct ovs_iface *iface = node->data; ++ ++ if (local_binding_is_up(bindings, iface->id)) { ++ ovs_iface_set_state(mgr, iface, OIF_INSTALLED); ++ } ++ } ++ ++ /* Cleanup all interfaces that have been confirmed "down" by the binding ++ * module. ++ */ ++ HMAPX_FOR_EACH_SAFE (node, node_next, ++ &mgr->ifaces_per_state[OIF_MARK_DOWN]) { ++ struct ovs_iface *iface = node->data; ++ ++ if (local_binding_is_down(bindings, iface->id)) { ++ ovs_iface_destroy(mgr, iface); ++ } ++ } ++ ++ /* Register for a notification about flows being installed in OVS for all ++ * newly claimed interfaces. ++ * ++ * Move them from OIF_CLAIMED to OIF_INSTALL_FLOWS. ++ */ ++ bool new_ifaces = false; ++ HMAPX_FOR_EACH_SAFE (node, node_next, ++ &mgr->ifaces_per_state[OIF_CLAIMED]) { ++ struct ovs_iface *iface = node->data; ++ ++ ovs_iface_set_state(mgr, iface, OIF_INSTALL_FLOWS); ++ iface->install_seqno = mgr->iface_seqno + 1; ++ new_ifaces = true; ++ } ++ ++ /* Request a seqno update when the flows for new interfaces have been ++ * installed in OVS. ++ */ ++ if (new_ifaces) { ++ mgr->iface_seqno++; ++ ofctrl_seqno_update_create(mgr->iface_seq_type_pb_cfg, ++ mgr->iface_seqno); ++ VLOG_DBG("Seqno requested: %"PRIu32, mgr->iface_seqno); ++ } ++} ++ ++void ++if_status_mgr_run(struct if_status_mgr *mgr, ++ struct local_binding_data *binding_data, ++ bool sb_readonly, bool ovs_readonly) ++{ ++ struct ofctrl_acked_seqnos *acked_seqnos = ++ ofctrl_acked_seqnos_get(mgr->iface_seq_type_pb_cfg); ++ struct hmapx_node *node_next; ++ struct hmapx_node *node; ++ ++ /* Move interfaces from state OIF_INSTALL_FLOWS to OIF_MARK_UP if a ++ * notification has been received aabout their flows being installed ++ * in OVS. ++ */ ++ HMAPX_FOR_EACH_SAFE (node, node_next, ++ &mgr->ifaces_per_state[OIF_INSTALL_FLOWS]) { ++ struct ovs_iface *iface = node->data; ++ ++ if (!ofctrl_acked_seqnos_contains(acked_seqnos, ++ iface->install_seqno)) { ++ continue; ++ } ++ ovs_iface_set_state(mgr, iface, OIF_MARK_UP); ++ } ++ ofctrl_acked_seqnos_destroy(acked_seqnos); ++ ++ /* Update binding states. */ ++ if_status_mgr_update_bindings(mgr, binding_data, sb_readonly, ++ ovs_readonly); ++} ++ ++static struct ovs_iface * ++ovs_iface_create(struct if_status_mgr *mgr, const char *iface_id, ++ enum if_state state) ++{ ++ struct ovs_iface *iface = xzalloc(sizeof *iface); ++ ++ VLOG_DBG("Interface %s create.", iface->id); ++ iface->id = xstrdup(iface_id); ++ shash_add(&mgr->ifaces, iface_id, iface); ++ ovs_iface_set_state(mgr, iface, state); ++ return iface; ++} ++ ++static void ++ovs_iface_destroy(struct if_status_mgr *mgr, struct ovs_iface *iface) ++{ ++ VLOG_DBG("Interface %s destroy: state %s", iface->id, ++ if_state_names[iface->state]); ++ hmapx_find_and_delete(&mgr->ifaces_per_state[iface->state], iface); ++ shash_find_and_delete(&mgr->ifaces, iface->id); ++ free(iface->id); ++ free(iface); ++} ++ ++static void ++ovs_iface_set_state(struct if_status_mgr *mgr, struct ovs_iface *iface, ++ enum if_state state) ++{ ++ VLOG_DBG("Interface %s set state: old %s, new %s", iface->id, ++ if_state_names[iface->state], ++ if_state_names[state]); ++ ++ hmapx_find_and_delete(&mgr->ifaces_per_state[iface->state], iface); ++ iface->state = state; ++ hmapx_add(&mgr->ifaces_per_state[iface->state], iface); ++ iface->install_seqno = 0; ++} ++ ++static void ++if_status_mgr_update_bindings(struct if_status_mgr *mgr, ++ struct local_binding_data *binding_data, ++ bool sb_readonly, bool ovs_readonly) ++{ ++ if (!binding_data) { ++ return; ++ } ++ ++ struct shash *bindings = &binding_data->bindings; ++ struct hmapx_node *node; ++ ++ /* Notify the binding module to set "down" all bindings that are still ++ * in the process of being installed in OVS, i.e., are not yet instsalled. ++ */ ++ HMAPX_FOR_EACH (node, &mgr->ifaces_per_state[OIF_INSTALL_FLOWS]) { ++ struct ovs_iface *iface = node->data; ++ ++ local_binding_set_down(bindings, iface->id, sb_readonly, ovs_readonly); ++ } ++ ++ /* Notifiy the binding module to set "up" all bindings that have had ++ * their flows installed but are not yet marked "up" in the binding ++ * module. ++ */ ++ HMAPX_FOR_EACH (node, &mgr->ifaces_per_state[OIF_MARK_UP]) { ++ struct ovs_iface *iface = node->data; ++ ++ local_binding_set_up(bindings, iface->id, sb_readonly, ovs_readonly); ++ } ++ ++ /* Notify the binding module to set "down" all bindings that have been ++ * released but are not yet marked as "down" in the binding module. ++ */ ++ HMAPX_FOR_EACH (node, &mgr->ifaces_per_state[OIF_MARK_DOWN]) { ++ struct ovs_iface *iface = node->data; ++ ++ local_binding_set_down(bindings, iface->id, sb_readonly, ovs_readonly); ++ } ++} +diff --git a/controller/if-status.h b/controller/if-status.h +new file mode 100644 +index 000000000..51fe7c684 +--- /dev/null ++++ b/controller/if-status.h +@@ -0,0 +1,37 @@ ++/* Copyright (c) 2021, Red Hat, Inc. ++ * ++ * Licensed under the Apache License, Version 2.0 (the "License"); ++ * you may not use this file except in compliance with the License. ++ * You may obtain a copy of the License at: ++ * ++ * http://www.apache.org/licenses/LICENSE-2.0 ++ * ++ * Unless required by applicable law or agreed to in writing, software ++ * distributed under the License is distributed on an "AS IS" BASIS, ++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++ * See the License for the specific language governing permissions and ++ * limitations under the License. ++ */ ++ ++#ifndef IF_STATUS_H ++#define IF_STATUS_H 1 ++ ++#include "openvswitch/shash.h" ++ ++#include "binding.h" ++ ++struct if_status_mgr; ++ ++struct if_status_mgr *if_status_mgr_create(void); ++void if_status_mgr_clear(struct if_status_mgr *); ++void if_status_mgr_destroy(struct if_status_mgr *); ++ ++void if_status_mgr_claim_iface(struct if_status_mgr *, const char *iface_id); ++void if_status_mgr_release_iface(struct if_status_mgr *, const char *iface_id); ++void if_status_mgr_delete_iface(struct if_status_mgr *, const char *iface_id); ++ ++void if_status_mgr_update(struct if_status_mgr *, struct local_binding_data *); ++void if_status_mgr_run(struct if_status_mgr *mgr, struct local_binding_data *, ++ bool sb_readonly, bool ovs_readonly); ++ ++# endif /* controller/if-status.h */ diff --git a/controller/ovn-controller.8.xml b/controller/ovn-controller.8.xml index 51c0c372c..8886df568 100644 --- a/controller/ovn-controller.8.xml @@ -1746,10 +2501,18 @@ index 51c0c372c..8886df568 100644

diff --git a/controller/ovn-controller.c b/controller/ovn-controller.c -index 5dd643f52..9102b9903 100644 +index 5dd643f52..b4eee4848 100644 --- a/controller/ovn-controller.c +++ b/controller/ovn-controller.c -@@ -81,6 +81,7 @@ static unixctl_cb_func cluster_state_reset_cmd; +@@ -33,6 +33,7 @@ + #include "openvswitch/dynamic-string.h" + #include "encaps.h" + #include "fatal-signal.h" ++#include "if-status.h" + #include "ip-mcast.h" + #include "openvswitch/hmap.h" + #include "lflow.h" +@@ -81,6 +82,7 @@ static unixctl_cb_func cluster_state_reset_cmd; static unixctl_cb_func debug_pause_execution; static unixctl_cb_func debug_resume_execution; static unixctl_cb_func debug_status_execution; @@ -1757,7 +2520,76 @@ index 5dd643f52..9102b9903 100644 static unixctl_cb_func lflow_cache_flush_cmd; static unixctl_cb_func lflow_cache_show_stats_cmd; static unixctl_cb_func debug_delay_nb_cfg_report; -@@ -1182,8 +1183,7 @@ struct ed_type_runtime_data { +@@ -102,6 +104,7 @@ OVS_NO_RETURN static void usage(void); + + struct controller_engine_ctx { + struct lflow_cache *lflow_cache; ++ struct if_status_mgr *if_mgr; + }; + + /* Pending packet to be injected into connected OVS. */ +@@ -258,23 +261,15 @@ update_sb_monitors(struct ovsdb_idl *ovnsb_idl, + uuid); + } + +- /* Updating conditions to receive logical flows that references +- * datapath groups containing local datapaths. */ +- const struct sbrec_logical_dp_group *group; +- SBREC_LOGICAL_DP_GROUP_FOR_EACH (group, ovnsb_idl) { +- struct uuid *uuid = CONST_CAST(struct uuid *, +- &group->header_.uuid); +- size_t i; +- +- for (i = 0; i < group->n_datapaths; i++) { +- if (get_local_datapath(local_datapaths, +- group->datapaths[i]->tunnel_key)) { +- sbrec_logical_flow_add_clause_logical_dp_group( +- &lf, OVSDB_F_EQ, uuid); +- break; +- } +- } +- } ++ /* Datapath groups are immutable, which means a new group record is ++ * created when a datapath is added to a group. The logical flows ++ * referencing a datapath group are also updated in such cases but the ++ * new group UUID is not known by ovn-controller until the SB update ++ * is received. To avoid unnecessarily removing and adding lflows ++ * that reference datapath groups, set the monitor condition to always ++ * request all of them. ++ */ ++ sbrec_logical_flow_add_clause_logical_dp_group(&lf, OVSDB_F_NE, NULL); + } + + out:; +@@ -420,6 +415,10 @@ process_br_int(struct ovsdb_idl_txn *ovs_idl_txn, + if (datapath_type && strcmp(br_int->datapath_type, datapath_type)) { + ovsrec_bridge_set_datapath_type(br_int, datapath_type); + } ++ if (!br_int->fail_mode || strcmp(br_int->fail_mode, "secure")) { ++ ovsrec_bridge_set_fail_mode(br_int, "secure"); ++ VLOG_WARN("Integration bridge fail-mode changed to 'secure'."); ++ } + } + return br_int; + } +@@ -1003,6 +1002,7 @@ en_ofctrl_is_connected_cleanup(void *data OVS_UNUSED) + static void + en_ofctrl_is_connected_run(struct engine_node *node, void *data) + { ++ struct controller_engine_ctx *ctrl_ctx = engine_get_context()->client_ctx; + struct ed_type_ofctrl_is_connected *of_data = data; + if (of_data->connected != ofctrl_is_connected()) { + of_data->connected = !of_data->connected; +@@ -1010,7 +1010,7 @@ en_ofctrl_is_connected_run(struct engine_node *node, void *data) + /* Flush ofctrl seqno requests when the ofctrl connection goes down. */ + if (!of_data->connected) { + ofctrl_seqno_flush(); +- binding_seqno_flush(); ++ if_status_mgr_clear(ctrl_ctx->if_mgr); + } + engine_set_node_state(node, EN_UPDATED); + return; +@@ -1182,8 +1182,7 @@ struct ed_type_runtime_data { /* Contains "struct local_datapath" nodes. */ struct hmap local_datapaths; @@ -1767,7 +2599,7 @@ index 5dd643f52..9102b9903 100644 /* Contains the name of each logical port resident on the local * hypervisor. These logical ports include the VIFs (and their child -@@ -1222,9 +1222,9 @@ struct ed_type_runtime_data { +@@ -1222,9 +1221,9 @@ struct ed_type_runtime_data { * | | Interface and Port Binding changes store the | * | @tracked_dp_bindings | changed datapaths (datapaths added/removed from | * | | local_datapaths) and changed port bindings | @@ -1779,7 +2611,7 @@ index 5dd643f52..9102b9903 100644 * | | here. | * ------------------------------------------------------------------------ * | | This is a bool which represents if the runtime | -@@ -1251,7 +1251,7 @@ struct ed_type_runtime_data { +@@ -1251,7 +1250,7 @@ struct ed_type_runtime_data { * * --------------------------------------------------------------------- * | local_datapaths | The changes to these runtime data is captured in | @@ -1788,7 +2620,7 @@ index 5dd643f52..9102b9903 100644 * | local_lport_ids | is not tracked explicitly. | * --------------------------------------------------------------------- * | local_iface_ids | This is used internally within the runtime data | -@@ -1294,7 +1294,7 @@ en_runtime_data_init(struct engine_node *node OVS_UNUSED, +@@ -1294,7 +1293,7 @@ en_runtime_data_init(struct engine_node *node OVS_UNUSED, sset_init(&data->active_tunnels); sset_init(&data->egress_ifaces); smap_init(&data->local_iface_ids); @@ -1797,7 +2629,7 @@ index 5dd643f52..9102b9903 100644 /* Init the tracked data. */ hmap_init(&data->tracked_dp_bindings); -@@ -1322,7 +1322,7 @@ en_runtime_data_cleanup(void *data) +@@ -1322,7 +1321,7 @@ en_runtime_data_cleanup(void *data) free(cur_node); } hmap_destroy(&rt_data->local_datapaths); @@ -1806,7 +2638,16 @@ index 5dd643f52..9102b9903 100644 hmapx_destroy(&rt_data->ct_updated_datapaths); } -@@ -1405,7 +1405,7 @@ init_binding_ctx(struct engine_node *node, +@@ -1383,6 +1382,8 @@ init_binding_ctx(struct engine_node *node, + engine_get_input("SB_port_binding", node), + "datapath"); + ++ struct controller_engine_ctx *ctrl_ctx = engine_get_context()->client_ctx; ++ + b_ctx_in->ovnsb_idl_txn = engine_get_context()->ovnsb_idl_txn; + b_ctx_in->ovs_idl_txn = engine_get_context()->ovs_idl_txn; + b_ctx_in->sbrec_datapath_binding_by_key = sbrec_datapath_binding_by_key; +@@ -1405,10 +1406,10 @@ init_binding_ctx(struct engine_node *node, b_ctx_out->local_lport_ids_changed = false; b_ctx_out->non_vif_ports_changed = false; b_ctx_out->egress_ifaces = &rt_data->egress_ifaces; @@ -1814,8 +2655,12 @@ index 5dd643f52..9102b9903 100644 + b_ctx_out->lbinding_data = &rt_data->lbinding_data; b_ctx_out->local_iface_ids = &rt_data->local_iface_ids; b_ctx_out->tracked_dp_bindings = NULL; - b_ctx_out->local_lports_changed = NULL; -@@ -1449,7 +1449,7 @@ en_runtime_data_run(struct engine_node *node, void *data) +- b_ctx_out->local_lports_changed = NULL; ++ b_ctx_out->if_mgr = ctrl_ctx->if_mgr; + } + + static void +@@ -1449,7 +1450,7 @@ en_runtime_data_run(struct engine_node *node, void *data) free(cur_node); } hmap_clear(local_datapaths); @@ -1824,7 +2669,7 @@ index 5dd643f52..9102b9903 100644 sset_destroy(local_lports); sset_destroy(local_lport_ids); sset_destroy(active_tunnels); -@@ -1460,7 +1460,7 @@ en_runtime_data_run(struct engine_node *node, void *data) +@@ -1460,7 +1461,7 @@ en_runtime_data_run(struct engine_node *node, void *data) sset_init(active_tunnels); sset_init(&rt_data->egress_ifaces); smap_init(&rt_data->local_iface_ids); @@ -1833,7 +2678,15 @@ index 5dd643f52..9102b9903 100644 hmapx_clear(&rt_data->ct_updated_datapaths); } -@@ -1822,7 +1822,7 @@ static void init_physical_ctx(struct engine_node *node, +@@ -1715,6 +1716,7 @@ en_physical_flow_changes_run(struct engine_node *node, void *data) + { + struct ed_type_pfc_data *pfc_tdata = data; + pfc_tdata->recompute_physical_flows = true; ++ pfc_tdata->ovs_ifaces_changed = true; + engine_set_node_state(node, EN_UPDATED); + } + +@@ -1822,7 +1824,7 @@ static void init_physical_ctx(struct engine_node *node, p_ctx->local_lports = &rt_data->local_lports; p_ctx->ct_zones = ct_zones; p_ctx->mff_ovn_geneve = ed_mff_ovn_geneve->mff_ovn_geneve; @@ -1842,7 +2695,15 @@ index 5dd643f52..9102b9903 100644 p_ctx->ct_updated_datapaths = &rt_data->ct_updated_datapaths; } -@@ -2685,7 +2685,8 @@ main(int argc, char *argv[]) +@@ -2448,7 +2450,6 @@ main(int argc, char *argv[]) + /* Register ofctrl seqno types. */ + ofctrl_seq_type_nb_cfg = ofctrl_seqno_add_type(); + +- binding_init(); + patch_init(); + pinctrl_init(); + lflow_init(); +@@ -2685,7 +2686,8 @@ main(int argc, char *argv[]) engine_get_internal_data(&en_flow_output); struct ed_type_ct_zones *ct_zones_data = engine_get_internal_data(&en_ct_zones); @@ -1852,7 +2713,7 @@ index 5dd643f52..9102b9903 100644 ofctrl_init(&flow_output_data->group_table, &flow_output_data->meter_table, -@@ -2738,6 +2739,10 @@ main(int argc, char *argv[]) +@@ -2738,13 +2740,19 @@ main(int argc, char *argv[]) unixctl_command_register("debug/delay-nb-cfg-report", "SECONDS", 1, 1, debug_delay_nb_cfg_report, &delay_nb_cfg_report); @@ -1863,25 +2724,50 @@ index 5dd643f52..9102b9903 100644 unsigned int ovs_cond_seqno = UINT_MAX; unsigned int ovnsb_cond_seqno = UINT_MAX; unsigned int ovnsb_expected_cond_seqno = UINT_MAX; -@@ -2955,7 +2960,7 @@ main(int argc, char *argv[]) + + struct controller_engine_ctx ctrl_engine_ctx = { + .lflow_cache = lflow_cache_create(), ++ .if_mgr = if_status_mgr_create(), + }; ++ struct if_status_mgr *if_mgr = ctrl_engine_ctx.if_mgr; + + char *ovn_version = ovn_get_internal_version(); + VLOG_INFO("OVN internal version is : [%s]", ovn_version); +@@ -2954,9 +2962,10 @@ main(int argc, char *argv[]) + ovnsb_idl_loop.idl), ovnsb_cond_seqno, ovnsb_expected_cond_seqno)); - if (runtime_data && ovs_idl_txn && ovnsb_idl_txn) { +- if (runtime_data && ovs_idl_txn && ovnsb_idl_txn) { - binding_seqno_run(&runtime_data->local_bindings); -+ binding_seqno_run(&runtime_data->lbinding_data); - } +- } ++ ++ struct local_binding_data *binding_data = ++ runtime_data ? &runtime_data->lbinding_data : NULL; ++ if_status_mgr_update(if_mgr, binding_data); flow_output_data = engine_get_data(&en_flow_output); -@@ -2968,7 +2973,7 @@ main(int argc, char *argv[]) + if (flow_output_data && ct_zones_data) { +@@ -2967,9 +2976,8 @@ main(int argc, char *argv[]) + engine_node_changed(&en_flow_output)); } ofctrl_seqno_run(ofctrl_get_cur_cfg()); - if (runtime_data && ovs_idl_txn && ovnsb_idl_txn) { +- if (runtime_data && ovs_idl_txn && ovnsb_idl_txn) { - binding_seqno_install(&runtime_data->local_bindings); -+ binding_seqno_install(&runtime_data->lbinding_data); - } +- } ++ if_status_mgr_run(if_mgr, binding_data, !ovnsb_idl_txn, ++ !ovs_idl_txn); } -@@ -3408,3 +3413,13 @@ debug_delay_nb_cfg_report(struct unixctl_conn *conn, int argc OVS_UNUSED, + } +@@ -3135,6 +3143,7 @@ loop_done: + ofctrl_destroy(); + pinctrl_destroy(); + patch_destroy(); ++ if_status_mgr_destroy(if_mgr); + + ovsdb_idl_loop_destroy(&ovs_idl_loop); + ovsdb_idl_loop_destroy(&ovnsb_idl_loop); +@@ -3408,3 +3417,13 @@ debug_delay_nb_cfg_report(struct unixctl_conn *conn, int argc OVS_UNUSED, unixctl_command_reply(conn, "no delay for nb_cfg report."); } } @@ -1896,10 +2782,47 @@ index 5dd643f52..9102b9903 100644 + ds_destroy(&binding_data); +} diff --git a/controller/physical.c b/controller/physical.c -index fa5d0d692..874d1ee27 100644 +index fa5d0d692..c7090b351 100644 --- a/controller/physical.c +++ b/controller/physical.c -@@ -1839,20 +1839,19 @@ physical_handle_ovs_iface_changes(struct physical_ctx *p_ctx, +@@ -1160,6 +1160,11 @@ consider_port_binding(struct ovsdb_idl_index *sbrec_port_binding_by_name, + + load_logical_ingress_metadata(binding, &zone_ids, ofpacts_p); + ++ if (!strcmp(binding->type, "localport")) { ++ /* mark the packet as incoming from a localport */ ++ put_load(1, MFF_LOG_FLAGS, MLF_LOCALPORT_BIT, 1, ofpacts_p); ++ } ++ + /* Resubmit to first logical ingress pipeline table. */ + put_resubmit(OFTABLE_LOG_INGRESS_PIPELINE, ofpacts_p); + ofctrl_add_flow(flow_table, OFTABLE_PHY_TO_LOG, +@@ -1219,6 +1224,24 @@ consider_port_binding(struct ovsdb_idl_index *sbrec_port_binding_by_name, + ofport, flow_table); + } + ++ /* Table 39, priority 160. ++ * ======================= ++ * ++ * Do not forward local traffic from a localport to a localnet port. ++ */ ++ if (!strcmp(binding->type, "localnet")) { ++ /* do not forward traffic from localport to localnet port */ ++ match_init_catchall(&match); ++ ofpbuf_clear(ofpacts_p); ++ match_set_metadata(&match, htonll(dp_key)); ++ match_set_reg(&match, MFF_LOG_OUTPORT - MFF_REG0, port_key); ++ match_set_reg_masked(&match, MFF_LOG_FLAGS - MFF_REG0, ++ MLF_LOCALPORT, MLF_LOCALPORT); ++ ofctrl_add_flow(flow_table, OFTABLE_CHECK_LOOPBACK, 160, ++ binding->header_.uuid.parts[0], &match, ++ ofpacts_p, &binding->header_.uuid); ++ } ++ + } else if (!tun && !is_ha_remote) { + /* Remote port connected by localnet port */ + /* Table 33, priority 100. +@@ -1839,20 +1862,29 @@ physical_handle_ovs_iface_changes(struct physical_ctx *p_ctx, continue; } @@ -1907,10 +2830,21 @@ index fa5d0d692..874d1ee27 100644 - local_binding_find(p_ctx->local_bindings, iface_id); - - if (!lb || !lb->pb) { +- continue; + const struct sbrec_port_binding *lb_pb = + local_binding_get_primary_pb(p_ctx->local_bindings, iface_id); + if (!lb_pb) { - continue; ++ /* For regular VIFs (e.g. lsp) the upcoming port-binding update ++ * will remove lfows related to the unclaimed ovs port. ++ * Localport is a special case and it needs to be managed here ++ * since the port is not binded and otherwise the related lfows ++ * will not be cleared removing the ovs port. ++ */ ++ lb_pb = lport_lookup_by_name(p_ctx->sbrec_port_binding_by_name, ++ iface_id); ++ if (!lb_pb || strcmp(lb_pb->type, "localport")) { ++ continue; ++ } } int64_t ofport = iface_rec->n_ofport ? *iface_rec->ofport : 0; @@ -1925,7 +2859,7 @@ index fa5d0d692..874d1ee27 100644 } simap_put(&localvif_to_ofport, iface_id, ofport); -@@ -1860,7 +1859,7 @@ physical_handle_ovs_iface_changes(struct physical_ctx *p_ctx, +@@ -1860,7 +1892,7 @@ physical_handle_ovs_iface_changes(struct physical_ctx *p_ctx, p_ctx->mff_ovn_geneve, p_ctx->ct_zones, p_ctx->active_tunnels, p_ctx->local_datapaths, @@ -1966,18 +2900,19 @@ index 51f9bcc91..25a04f8ae 100644 * New upstream version diff --git a/include/ovn/logical-fields.h b/include/ovn/logical-fields.h -index 017176f98..d44b30b30 100644 +index 017176f98..ef97117b9 100644 --- a/include/ovn/logical-fields.h +++ b/include/ovn/logical-fields.h -@@ -66,6 +66,7 @@ enum mff_log_flags_bits { +@@ -66,6 +66,8 @@ enum mff_log_flags_bits { MLF_LOOKUP_MAC_BIT = 6, MLF_LOOKUP_LB_HAIRPIN_BIT = 7, MLF_LOOKUP_FDB_BIT = 8, + MLF_SKIP_SNAT_FOR_LB_BIT = 9, ++ MLF_LOCALPORT_BIT = 10, }; /* MFF_LOG_FLAGS_REG flag assignments */ -@@ -102,6 +103,10 @@ enum mff_log_flags { +@@ -102,6 +104,13 @@ enum mff_log_flags { /* Indicate that the lookup in the fdb table was successful. */ MLF_LOOKUP_FDB = (1 << MLF_LOOKUP_FDB_BIT), @@ -1985,9 +2920,25 @@ index 017176f98..d44b30b30 100644 + /* Indicate that a packet must not SNAT in the gateway router when + * load-balancing has taken place. */ + MLF_SKIP_SNAT_FOR_LB = (1 << MLF_SKIP_SNAT_FOR_LB_BIT), ++ ++ /* Indicate the packet has been received from a localport */ ++ MLF_LOCALPORT = (1 << MLF_LOCALPORT_BIT), }; /* OVN logical fields +diff --git a/lib/expr.c b/lib/expr.c +index f061a8fbe..7b3d3ddb3 100644 +--- a/lib/expr.c ++++ b/lib/expr.c +@@ -2452,7 +2452,7 @@ crush_and_numeric(struct expr *expr, const struct expr_symbol *symbol) + free(or); + return cmp; + } else { +- return or; ++ return crush_cmps(or, symbol); + } + } else { + /* Transform "x && (a0 || a1) && (b0 || b1) && ..." into diff --git a/lib/inc-proc-eng.c b/lib/inc-proc-eng.c index 916dbbe39..a6337a1d9 100644 --- a/lib/inc-proc-eng.c @@ -2046,101 +2997,511 @@ index 916dbbe39..a6337a1d9 100644 } } + -+ unixctl_command_register("inc-engine/show-stats", "", 0, 0, -+ engine_dump_stats, NULL); -+ unixctl_command_register("inc-engine/clear-stats", "", 0, 0, -+ engine_clear_stats, NULL); - } ++ unixctl_command_register("inc-engine/show-stats", "", 0, 0, ++ engine_dump_stats, NULL); ++ unixctl_command_register("inc-engine/clear-stats", "", 0, 0, ++ engine_clear_stats, NULL); + } + + void +@@ -288,6 +328,7 @@ engine_recompute(struct engine_node *node, bool forced, bool allowed) + + /* Run the node handler which might change state. */ + node->run(node, node->data); ++ node->stats.recompute++; + } + + /* Return true if the node could be computed, false otherwise. */ +@@ -312,6 +353,8 @@ engine_compute(struct engine_node *node, bool recompute_allowed) + } + } + } ++ node->stats.compute++; ++ + return true; + } + +@@ -321,6 +364,7 @@ engine_run_node(struct engine_node *node, bool recompute_allowed) + if (!node->n_inputs) { + /* Run the node handler which might change state. */ + node->run(node, node->data); ++ node->stats.recompute++; + return; + } + +@@ -377,6 +421,7 @@ engine_run(bool recompute_allowed) + engine_run_node(engine_nodes[i], recompute_allowed); + + if (engine_nodes[i]->state == EN_ABORTED) { ++ engine_nodes[i]->stats.abort++; + engine_run_aborted = true; + return; + } +@@ -393,6 +438,7 @@ engine_need_run(void) + } + + engine_nodes[i]->run(engine_nodes[i], engine_nodes[i]->data); ++ engine_nodes[i]->stats.recompute++; + VLOG_DBG("input node: %s, state: %s", engine_nodes[i]->name, + engine_node_state_name[engine_nodes[i]->state]); + if (engine_nodes[i]->state == EN_UPDATED) { +diff --git a/lib/inc-proc-eng.h b/lib/inc-proc-eng.h +index 857234677..7e9f5bb70 100644 +--- a/lib/inc-proc-eng.h ++++ b/lib/inc-proc-eng.h +@@ -107,6 +107,12 @@ enum engine_node_state { + EN_STATE_MAX, + }; + ++struct engine_stats { ++ uint64_t recompute; ++ uint64_t compute; ++ uint64_t abort; ++}; ++ + struct engine_node { + /* A unique name for each node. */ + char *name; +@@ -154,6 +160,9 @@ struct engine_node { + /* Method to clear up tracked data maintained by the engine node in the + * engine 'data'. It may be NULL. */ + void (*clear_tracked_data)(void *tracked_data); ++ ++ /* Engine stats. */ ++ struct engine_stats stats; + }; + + /* Initialize the data for the engine nodes. It calls each node's +diff --git a/lib/logical-fields.c b/lib/logical-fields.c +index 9d08b44c2..72853013e 100644 +--- a/lib/logical-fields.c ++++ b/lib/logical-fields.c +@@ -121,6 +121,10 @@ ovn_init_symtab(struct shash *symtab) + MLF_FORCE_SNAT_FOR_LB_BIT); + expr_symtab_add_subfield(symtab, "flags.force_snat_for_lb", NULL, + flags_str); ++ snprintf(flags_str, sizeof flags_str, "flags[%d]", ++ MLF_SKIP_SNAT_FOR_LB_BIT); ++ expr_symtab_add_subfield(symtab, "flags.skip_snat_for_lb", NULL, ++ flags_str); + + /* Connection tracking state. */ + expr_symtab_add_field_scoped(symtab, "ct_mark", MFF_CT_MARK, NULL, false, +diff --git a/northd/ovn-northd.8.xml b/northd/ovn-northd.8.xml +index c272cc922..37d1728b8 100644 +--- a/northd/ovn-northd.8.xml ++++ b/northd/ovn-northd.8.xml +@@ -407,12 +407,13 @@ + it contains a priority-110 flow to move IPv6 Neighbor Discovery and MLD + traffic to the next table. If load balancing rules with virtual IP + addresses (and ports) are configured in OVN_Northbound +- database for alogical switch datapath, a priority-100 flow is added ++ database for a logical switch datapath, a priority-100 flow is added + with the match ip to match on IP packets and sets the action +- reg0[0] = 1; next; to act as a hint for table ++ reg0[2] = 1; next; to act as a hint for table + Pre-stateful to send IP packets to the connection tracker +- for packet de-fragmentation before eventually advancing to ingress +- table LB. ++ for packet de-fragmentation (and to possibly do DNAT for already ++ established load balanced traffic) before eventually advancing to ingress ++ table Stateful. + If controller_event has been enabled and load balancing rules with + empty backends have been added in OVN_Northbound, a 130 flow + is added to trigger ovn-controller events whenever the chassis receives a +@@ -470,11 +471,38 @@ +

+ This table prepares flows for all possible stateful processing + in next tables. It contains a priority-0 flow that simply moves +- traffic to the next table. A priority-100 flow sends the packets to +- connection tracker based on a hint provided by the previous tables +- (with a match for reg0[0] == 1) by using the +- ct_next; action. ++ traffic to the next table. +

++
    ++
  • ++ Priority-120 flows that send the packets to connection tracker using ++ ct_lb; as the action so that the already established ++ traffic destined to the load balancer VIP gets DNATted based on a hint ++ provided by the previous tables (with a match ++ for reg0[2] == 1 and on supported load balancer protocols ++ and address families). For IPv4 traffic the flows also load the ++ original destination IP and transport port in registers ++ reg1 and reg2. For IPv6 traffic the flows ++ also load the original destination IP and transport port in ++ registers xxreg1 and reg2. ++
  • ++ ++
  • ++ A priority-110 flow sends the packets to connection tracker based ++ on a hint provided by the previous tables ++ (with a match for reg0[2] == 1) by using the ++ ct_lb; action. This flow is added to handle ++ the traffic for load balancer VIPs whose protocol is not defined ++ (mainly for ICMP traffic). ++
  • ++ ++
  • ++ A priority-100 flow sends the packets to connection tracker based ++ on a hint provided by the previous tables ++ (with a match for reg0[0] == 1) by using the ++ ct_next; action. ++
  • ++
+ +

Ingress Table 8: from-lport ACL hints

+ +@@ -511,6 +539,14 @@ +

+ The table contains the following flows: +

++
    ++
  • ++ A priority-65535 flow to advance to the next table if the logical ++ switch has no ACLs configured, otherwise a ++ priority-0 flow to advance to the next table. ++
  • ++
++ +
    +
  • + A priority-7 flow that matches on packets that initiate a new session. +@@ -551,9 +587,6 @@ + This flow sets reg0[10] and then advances to the next + table. +
  • +-
  • +- A priority-0 flow to advance to the next table. +-
  • +
+ +

Ingress table 9: from-lport ACLs

+@@ -599,9 +632,14 @@ + + +

+- This table also contains a priority 0 flow with action +- next;, so that ACLs allow packets by default. If the +- logical datapath has a stateful ACL or a load balancer with VIP ++ This table contains a priority-65535 flow to advance to the next table ++ if the logical switch has no ACLs configured, otherwise a ++ priority-0 flow to advance to the next table so that ACLs allow ++ packets by default. ++

++ ++

++ If the logical datapath has a stateful ACL or a load balancer with VIP + configured, the following flows will also be added: +

+ +@@ -615,7 +653,7 @@ + + +
  • +- A priority-65535 flow that allows any traffic in the reply ++ A priority-65532 flow that allows any traffic in the reply + direction for a connection that has been committed to the + connection tracker (i.e., established flows), as long as + the committed flow does not have ct_label.blocked set. +@@ -628,19 +666,19 @@ +
  • + +
  • +- A priority-65535 flow that allows any traffic that is considered ++ A priority-65532 flow that allows any traffic that is considered + related to a committed flow in the connection tracker (e.g., an + ICMP Port Unreachable from a non-listening UDP port), as long + as the committed flow does not have ct_label.blocked set. +
  • + +
  • +- A priority-65535 flow that drops all traffic marked by the ++ A priority-65532 flow that drops all traffic marked by the + connection tracker as invalid. +
  • + +
  • +- A priority-65535 flow that drops all traffic in the reply direction ++ A priority-65532 flow that drops all traffic in the reply direction + with ct_label.blocked set meaning that the connection + should no longer be allowed due to a policy change. Packets + in the request direction are skipped here to let a newly created +@@ -648,11 +686,18 @@ +
  • + +
  • +- A priority-65535 flow that allows IPv6 Neighbor solicitation, ++ A priority-65532 flow that allows IPv6 Neighbor solicitation, + Neighbor discover, Router solicitation, Router advertisement and MLD + packets. +
  • ++ + ++

    ++ If the logical datapath has any ACL or a load balancer with VIP ++ configured, the following flow will also be added: ++

    ++ ++
      +
    • + A priority 34000 logical flow is added for each logical switch datapath + with the match eth.dst = E to allow the service +@@ -709,33 +754,7 @@ +
    • +
    + +-

    Ingress Table 12: LB

    +- +-

    +- It contains a priority-0 flow that simply moves traffic to the next +- table. +-

    +- +-

    +- A priority-65535 flow with the match +- inport == I for all logical switch +- datapaths to move traffic to the next table. Where I +- is the peer of a logical router port. This flow is added to +- skip the connection tracking of packets which enter from +- logical router datapath to logical switch datapath. +-

    +- +-

    +- For established connections a priority 65534 flow matches on +- ct.est && !ct.rel && !ct.new && +- !ct.inv and sets an action reg0[2] = 1; next; to act +- as a hint for table Stateful to send packets through +- connection tracker to NAT the packets. (The packet will automatically +- get DNATed to the same IP address as the first packet in that +- connection.) +-

    +- +-

    Ingress Table 13: Stateful

    ++

    Ingress Table 12: Stateful

    + +
      +
    • +@@ -792,23 +811,12 @@ + ct_commit; next; action based on a hint provided by + the previous tables (with a match for reg0[1] == 1). +
    • +-
    • +- Priority-100 flows that send the packets to connection tracker using +- ct_lb; as the action based on a hint provided by the +- previous tables (with a match for reg0[2] == 1 and +- on supported load balancer protocols and address families). +- For IPv4 traffic the flows also load the original destination +- IP and transport port in registers reg1 and +- reg2. For IPv6 traffic the flows also load the original +- destination IP and transport port in registers xxreg1 and +- reg2. +-
    • +
    • + A priority-0 flow that simply moves traffic to the next table. +
    • +
    + +-

    Ingress Table 14: Pre-Hairpin

    ++

    Ingress Table 13: Pre-Hairpin

    +
      +
    • + If the logical switch has load balancer(s) configured, then a +@@ -826,7 +834,7 @@ +
    • +
    + +-

    Ingress Table 15: Nat-Hairpin

    ++

    Ingress Table 14: Nat-Hairpin

    +
      +
    • + If the logical switch has load balancer(s) configured, then a +@@ -861,7 +869,7 @@ +
    • +
    + +-

    Ingress Table 16: Hairpin

    ++

    Ingress Table 15: Hairpin

    +
      +
    • + A priority-1 flow that hairpins traffic matched by non-default +@@ -874,7 +882,7 @@ +
    • +
    + +-

    Ingress Table 17: ARP/ND responder

    ++

    Ingress Table 16: ARP/ND responder

    + +

    + This table implements ARP/ND responder in a logical switch for known +@@ -1164,7 +1172,7 @@ output; + + + +-

    Ingress Table 18: DHCP option processing

    ++

    Ingress Table 17: DHCP option processing

    + +

    + This table adds the DHCPv4 options to a DHCPv4 packet from the +@@ -1225,7 +1233,7 @@ next; + + + +-

    Ingress Table 19: DHCP responses

    ++

    Ingress Table 18: DHCP responses

    + +

    + This table implements DHCP responder for the DHCP replies generated by +@@ -1306,7 +1314,7 @@ output; + + + +-

    Ingress Table 20 DNS Lookup

    ++

    Ingress Table 19 DNS Lookup

    + +

    + This table looks up and resolves the DNS names to the corresponding +@@ -1335,7 +1343,7 @@ reg0[4] = dns_lookup(); next; + + + +-

    Ingress Table 21 DNS Responses

    ++

    Ingress Table 20 DNS Responses

    + +

    + This table implements DNS responder for the DNS replies generated by +@@ -1370,7 +1378,7 @@ output; + + + +-

    Ingress table 22 External ports

    ++

    Ingress table 21 External ports

    + +

    + Traffic from the external logical ports enter the ingress +@@ -1413,7 +1421,7 @@ output; + + + +-

    Ingress Table 23 Destination Lookup

    ++

    Ingress Table 22 Destination Lookup

    + +

    + This table implements switching behavior. It contains these logical +@@ -1639,9 +1647,11 @@ output; + Moreover it contains a priority-110 flow to move IPv6 Neighbor Discovery + traffic to the next table. If any load balancing rules exist for the + datapath, a priority-100 flow is added with a match of ip +- and action of reg0[0] = 1; next; to act as a hint for ++ and action of reg0[2] = 1; next; to act as a hint for + table Pre-stateful to send IP packets to the connection +- tracker for packet de-fragmentation. ++ tracker for packet de-fragmentation and possibly DNAT the destination ++ VIP to one of the selected backend for already commited load balanced ++ traffic. +

    + +

    +@@ -1683,20 +1693,39 @@ output; +

    Egress Table 2: Pre-stateful

    + +

    +- This is similar to ingress table Pre-stateful. ++ This is similar to ingress table Pre-stateful. This table ++ adds the below 3 logical flows. +

    + +-

    Egress Table 3: LB

    +-

    +- This is similar to ingress table LB. +-

    ++
      ++
    • ++ A Priority-120 flow that send the packets to connection tracker using ++ ct_lb; as the action so that the already established ++ traffic gets unDNATted from the backend IP to the load balancer VIP ++ based on a hint provided by the previous tables with a match ++ for reg0[2] == 1. If the packet was not DNATted earlier, ++ then ct_lb functions like ct_next. ++
    • + +-

      Egress Table 4: from-lport ACL hints

      ++
    • ++ A priority-100 flow sends the packets to connection tracker based ++ on a hint provided by the previous tables ++ (with a match for reg0[0] == 1) by using the ++ ct_next; action. ++
    • ++ ++
    • ++ A priority-0 flow that matches all packets to advance to the next ++ table. ++
    • ++
    ++ ++

    Egress Table 3: from-lport ACL hints

    +

    + This is similar to ingress table ACL hints. +

    - void -@@ -288,6 +328,7 @@ engine_recompute(struct engine_node *node, bool forced, bool allowed) +-

    Egress Table 5: to-lport ACLs

    ++

    Egress Table 4: to-lport ACLs

    - /* Run the node handler which might change state. */ - node->run(node, node->data); -+ node->stats.recompute++; - } +

    + This is similar to ingress table ACLs except for +@@ -1733,28 +1762,28 @@ output; + + - /* Return true if the node could be computed, false otherwise. */ -@@ -312,6 +353,8 @@ engine_compute(struct engine_node *node, bool recompute_allowed) - } - } - } -+ node->stats.compute++; -+ - return true; - } +-

    Egress Table 6: to-lport QoS Marking

    ++

    Egress Table 5: to-lport QoS Marking

    -@@ -321,6 +364,7 @@ engine_run_node(struct engine_node *node, bool recompute_allowed) - if (!node->n_inputs) { - /* Run the node handler which might change state. */ - node->run(node, node->data); -+ node->stats.recompute++; - return; - } +

    + This is similar to ingress table QoS marking except + they apply to to-lport QoS rules. +

    -@@ -377,6 +421,7 @@ engine_run(bool recompute_allowed) - engine_run_node(engine_nodes[i], recompute_allowed); +-

    Egress Table 7: to-lport QoS Meter

    ++

    Egress Table 6: to-lport QoS Meter

    - if (engine_nodes[i]->state == EN_ABORTED) { -+ engine_nodes[i]->stats.abort++; - engine_run_aborted = true; - return; - } -@@ -393,6 +438,7 @@ engine_need_run(void) - } +

    + This is similar to ingress table QoS meter except + they apply to to-lport QoS rules. +

    - engine_nodes[i]->run(engine_nodes[i], engine_nodes[i]->data); -+ engine_nodes[i]->stats.recompute++; - VLOG_DBG("input node: %s, state: %s", engine_nodes[i]->name, - engine_node_state_name[engine_nodes[i]->state]); - if (engine_nodes[i]->state == EN_UPDATED) { -diff --git a/lib/inc-proc-eng.h b/lib/inc-proc-eng.h -index 857234677..7e9f5bb70 100644 ---- a/lib/inc-proc-eng.h -+++ b/lib/inc-proc-eng.h -@@ -107,6 +107,12 @@ enum engine_node_state { - EN_STATE_MAX, - }; +-

    Egress Table 8: Stateful

    ++

    Egress Table 7: Stateful

    -+struct engine_stats { -+ uint64_t recompute; -+ uint64_t compute; -+ uint64_t abort; -+}; -+ - struct engine_node { - /* A unique name for each node. */ - char *name; -@@ -154,6 +160,9 @@ struct engine_node { - /* Method to clear up tracked data maintained by the engine node in the - * engine 'data'. It may be NULL. */ - void (*clear_tracked_data)(void *tracked_data); -+ -+ /* Engine stats. */ -+ struct engine_stats stats; - }; +

    + This is similar to ingress table Stateful except that + there are no rules added for load balancing new connections. +

    - /* Initialize the data for the engine nodes. It calls each node's -diff --git a/lib/logical-fields.c b/lib/logical-fields.c -index 9d08b44c2..72853013e 100644 ---- a/lib/logical-fields.c -+++ b/lib/logical-fields.c -@@ -121,6 +121,10 @@ ovn_init_symtab(struct shash *symtab) - MLF_FORCE_SNAT_FOR_LB_BIT); - expr_symtab_add_subfield(symtab, "flags.force_snat_for_lb", NULL, - flags_str); -+ snprintf(flags_str, sizeof flags_str, "flags[%d]", -+ MLF_SKIP_SNAT_FOR_LB_BIT); -+ expr_symtab_add_subfield(symtab, "flags.skip_snat_for_lb", NULL, -+ flags_str); +-

    Egress Table 9: Egress Port Security - IP

    ++

    Egress Table 8: Egress Port Security - IP

    - /* Connection tracking state. */ - expr_symtab_add_field_scoped(symtab, "ct_mark", MFF_CT_MARK, NULL, false, -diff --git a/northd/ovn-northd.8.xml b/northd/ovn-northd.8.xml -index c272cc922..3300f7180 100644 ---- a/northd/ovn-northd.8.xml -+++ b/northd/ovn-northd.8.xml -@@ -2720,7 +2720,11 @@ icmp6 { +

    + This is similar to the port security logic in table +@@ -1764,7 +1793,7 @@ output; + ip4.src and ip6.src +

    + +-

    Egress Table 10: Egress Port Security - L2

    ++

    Egress Table 9: Egress Port Security - L2

    + +

    + This is similar to the ingress port security logic in ingress table +@@ -2283,8 +2312,7 @@ eth.src = xreg0[0..47]; + arp.op = 2; /* ARP reply. */ + arp.tha = arp.sha; + arp.sha = xreg0[0..47]; +-arp.tpa = arp.spa; +-arp.spa = A; ++arp.tpa <-> arp.spa; + outport = inport; + flags.loopback = 1; + output; +@@ -2720,7 +2748,11 @@ icmp6 { (and optional port numbers) to load balance to. If the router is configured to force SNAT any load-balanced packets, the above action will be replaced by flags.force_snat_for_lb = 1; @@ -2153,7 +3514,7 @@ index c272cc922..3300f7180 100644 args will only contain those endpoints whose service monitor status entry in OVN_Southbound db is either online or empty. -@@ -2737,6 +2741,9 @@ icmp6 { +@@ -2737,6 +2769,9 @@ icmp6 { with an action of ct_dnat;. If the router is configured to force SNAT any load-balanced packets, the above action will be replaced by flags.force_snat_for_lb = 1; ct_dnat;. @@ -2163,7 +3524,7 @@ index c272cc922..3300f7180 100644

  • -@@ -2751,6 +2758,9 @@ icmp6 { +@@ -2751,6 +2786,9 @@ icmp6 { to force SNAT any load-balanced packets, the above action will be replaced by flags.force_snat_for_lb = 1; ct_lb(args);. @@ -2173,7 +3534,7 @@ index c272cc922..3300f7180 100644
  • -@@ -2763,6 +2773,9 @@ icmp6 { +@@ -2763,6 +2801,9 @@ icmp6 { If the router is configured to force SNAT any load-balanced packets, the above action will be replaced by flags.force_snat_for_lb = 1; ct_dnat;. @@ -2183,7 +3544,7 @@ index c272cc922..3300f7180 100644
  • -@@ -3795,6 +3808,15 @@ nd_ns { +@@ -3795,6 +3836,15 @@ nd_ns {

  • @@ -2200,10 +3561,478 @@ index c272cc922..3300f7180 100644

    If the Gateway router in the OVN Northbound database has been diff --git a/northd/ovn-northd.c b/northd/ovn-northd.c -index 5a2018c2e..4e406c594 100644 +index 5a2018c2e..a478d3324 100644 --- a/northd/ovn-northd.c +++ b/northd/ovn-northd.c -@@ -8573,10 +8573,16 @@ get_force_snat_ip(struct ovn_datapath *od, const char *key_type, +@@ -97,6 +97,10 @@ static bool check_lsp_is_up; + static char svc_monitor_mac[ETH_ADDR_STRLEN + 1]; + static struct eth_addr svc_monitor_mac_ea; + ++/* If this option is 'true' northd will make use of ct.inv match fields. ++ * Otherwise, it will avoid using it. The default is true. */ ++static bool use_ct_inv_match = true; ++ + /* Default probe interval for NB and SB DB connections. */ + #define DEFAULT_PROBE_INTERVAL_MSEC 5000 + static int northd_probe_interval_nb = 0; +@@ -147,32 +151,30 @@ enum ovn_stage { + PIPELINE_STAGE(SWITCH, IN, ACL, 9, "ls_in_acl") \ + PIPELINE_STAGE(SWITCH, IN, QOS_MARK, 10, "ls_in_qos_mark") \ + PIPELINE_STAGE(SWITCH, IN, QOS_METER, 11, "ls_in_qos_meter") \ +- PIPELINE_STAGE(SWITCH, IN, LB, 12, "ls_in_lb") \ +- PIPELINE_STAGE(SWITCH, IN, STATEFUL, 13, "ls_in_stateful") \ +- PIPELINE_STAGE(SWITCH, IN, PRE_HAIRPIN, 14, "ls_in_pre_hairpin") \ +- PIPELINE_STAGE(SWITCH, IN, NAT_HAIRPIN, 15, "ls_in_nat_hairpin") \ +- PIPELINE_STAGE(SWITCH, IN, HAIRPIN, 16, "ls_in_hairpin") \ +- PIPELINE_STAGE(SWITCH, IN, ARP_ND_RSP, 17, "ls_in_arp_rsp") \ +- PIPELINE_STAGE(SWITCH, IN, DHCP_OPTIONS, 18, "ls_in_dhcp_options") \ +- PIPELINE_STAGE(SWITCH, IN, DHCP_RESPONSE, 19, "ls_in_dhcp_response") \ +- PIPELINE_STAGE(SWITCH, IN, DNS_LOOKUP, 20, "ls_in_dns_lookup") \ +- PIPELINE_STAGE(SWITCH, IN, DNS_RESPONSE, 21, "ls_in_dns_response") \ +- PIPELINE_STAGE(SWITCH, IN, EXTERNAL_PORT, 22, "ls_in_external_port") \ +- PIPELINE_STAGE(SWITCH, IN, L2_LKUP, 23, "ls_in_l2_lkup") \ +- PIPELINE_STAGE(SWITCH, IN, L2_UNKNOWN, 24, "ls_in_l2_unknown") \ ++ PIPELINE_STAGE(SWITCH, IN, STATEFUL, 12, "ls_in_stateful") \ ++ PIPELINE_STAGE(SWITCH, IN, PRE_HAIRPIN, 13, "ls_in_pre_hairpin") \ ++ PIPELINE_STAGE(SWITCH, IN, NAT_HAIRPIN, 14, "ls_in_nat_hairpin") \ ++ PIPELINE_STAGE(SWITCH, IN, HAIRPIN, 15, "ls_in_hairpin") \ ++ PIPELINE_STAGE(SWITCH, IN, ARP_ND_RSP, 16, "ls_in_arp_rsp") \ ++ PIPELINE_STAGE(SWITCH, IN, DHCP_OPTIONS, 17, "ls_in_dhcp_options") \ ++ PIPELINE_STAGE(SWITCH, IN, DHCP_RESPONSE, 18, "ls_in_dhcp_response") \ ++ PIPELINE_STAGE(SWITCH, IN, DNS_LOOKUP, 19, "ls_in_dns_lookup") \ ++ PIPELINE_STAGE(SWITCH, IN, DNS_RESPONSE, 20, "ls_in_dns_response") \ ++ PIPELINE_STAGE(SWITCH, IN, EXTERNAL_PORT, 21, "ls_in_external_port") \ ++ PIPELINE_STAGE(SWITCH, IN, L2_LKUP, 22, "ls_in_l2_lkup") \ ++ PIPELINE_STAGE(SWITCH, IN, L2_UNKNOWN, 23, "ls_in_l2_unknown") \ + \ + /* Logical switch egress stages. */ \ + PIPELINE_STAGE(SWITCH, OUT, PRE_LB, 0, "ls_out_pre_lb") \ + PIPELINE_STAGE(SWITCH, OUT, PRE_ACL, 1, "ls_out_pre_acl") \ + PIPELINE_STAGE(SWITCH, OUT, PRE_STATEFUL, 2, "ls_out_pre_stateful") \ +- PIPELINE_STAGE(SWITCH, OUT, LB, 3, "ls_out_lb") \ +- PIPELINE_STAGE(SWITCH, OUT, ACL_HINT, 4, "ls_out_acl_hint") \ +- PIPELINE_STAGE(SWITCH, OUT, ACL, 5, "ls_out_acl") \ +- PIPELINE_STAGE(SWITCH, OUT, QOS_MARK, 6, "ls_out_qos_mark") \ +- PIPELINE_STAGE(SWITCH, OUT, QOS_METER, 7, "ls_out_qos_meter") \ +- PIPELINE_STAGE(SWITCH, OUT, STATEFUL, 8, "ls_out_stateful") \ +- PIPELINE_STAGE(SWITCH, OUT, PORT_SEC_IP, 9, "ls_out_port_sec_ip") \ +- PIPELINE_STAGE(SWITCH, OUT, PORT_SEC_L2, 10, "ls_out_port_sec_l2") \ ++ PIPELINE_STAGE(SWITCH, OUT, ACL_HINT, 3, "ls_out_acl_hint") \ ++ PIPELINE_STAGE(SWITCH, OUT, ACL, 4, "ls_out_acl") \ ++ PIPELINE_STAGE(SWITCH, OUT, QOS_MARK, 5, "ls_out_qos_mark") \ ++ PIPELINE_STAGE(SWITCH, OUT, QOS_METER, 6, "ls_out_qos_meter") \ ++ PIPELINE_STAGE(SWITCH, OUT, STATEFUL, 7, "ls_out_stateful") \ ++ PIPELINE_STAGE(SWITCH, OUT, PORT_SEC_IP, 8, "ls_out_port_sec_ip") \ ++ PIPELINE_STAGE(SWITCH, OUT, PORT_SEC_L2, 9, "ls_out_port_sec_l2") \ + \ + /* Logical router ingress stages. */ \ + PIPELINE_STAGE(ROUTER, IN, ADMISSION, 0, "lr_in_admission") \ +@@ -626,6 +628,7 @@ struct ovn_datapath { + bool has_stateful_acl; + bool has_lb_vip; + bool has_unknown; ++ bool has_acls; + + /* IPAM data. */ + struct ipam_info ipam_info; +@@ -664,9 +667,6 @@ struct ovn_datapath { + struct hmap nb_pgs; + }; + +-static bool ls_has_stateful_acl(struct ovn_datapath *od); +-static bool ls_has_lb_vip(struct ovn_datapath *od); +- + /* Contains a NAT entry with the external addresses pre-parsed. */ + struct ovn_nat { + const struct nbrec_nat *nb; +@@ -4729,27 +4729,38 @@ ovn_ls_port_group_destroy(struct hmap *nb_pgs) + hmap_destroy(nb_pgs); + } + +-static bool +-ls_has_stateful_acl(struct ovn_datapath *od) ++static void ++ls_get_acl_flags(struct ovn_datapath *od) + { +- for (size_t i = 0; i < od->nbs->n_acls; i++) { +- struct nbrec_acl *acl = od->nbs->acls[i]; +- if (!strcmp(acl->action, "allow-related")) { +- return true; ++ od->has_acls = false; ++ od->has_stateful_acl = false; ++ ++ if (od->nbs->n_acls) { ++ od->has_acls = true; ++ ++ for (size_t i = 0; i < od->nbs->n_acls; i++) { ++ struct nbrec_acl *acl = od->nbs->acls[i]; ++ if (!strcmp(acl->action, "allow-related")) { ++ od->has_stateful_acl = true; ++ return; ++ } + } + } + + struct ovn_ls_port_group *ls_pg; + HMAP_FOR_EACH (ls_pg, key_node, &od->nb_pgs) { +- for (size_t i = 0; i < ls_pg->nb_pg->n_acls; i++) { +- struct nbrec_acl *acl = ls_pg->nb_pg->acls[i]; +- if (!strcmp(acl->action, "allow-related")) { +- return true; ++ if (ls_pg->nb_pg->n_acls) { ++ od->has_acls = true; ++ ++ for (size_t i = 0; i < ls_pg->nb_pg->n_acls; i++) { ++ struct nbrec_acl *acl = ls_pg->nb_pg->acls[i]; ++ if (!strcmp(acl->action, "allow-related")) { ++ od->has_stateful_acl = true; ++ return; ++ } + } + } + } +- +- return false; + } + + /* Logical switch ingress table 0: Ingress port security - L2 +@@ -5128,8 +5139,8 @@ build_pre_lb(struct ovn_datapath *od, struct hmap *lflows, + vip_configured = (vip_configured || lb->n_vips); + } + +- /* 'REGBIT_CONNTRACK_DEFRAG' is set to let the pre-stateful table send +- * packet to conntrack for defragmentation. ++ /* 'REGBIT_CONNTRACK_NAT' is set to let the pre-stateful table send ++ * packet to conntrack for defragmentation and possibly for unNATting. + * + * Send all the packets to conntrack in the ingress pipeline if the + * logical switch has a load balancer with VIP configured. Earlier +@@ -5159,9 +5170,9 @@ build_pre_lb(struct ovn_datapath *od, struct hmap *lflows, + */ + if (vip_configured) { + ovn_lflow_add(lflows, od, S_SWITCH_IN_PRE_LB, +- 100, "ip", REGBIT_CONNTRACK_DEFRAG" = 1; next;"); ++ 100, "ip", REGBIT_CONNTRACK_NAT" = 1; next;"); + ovn_lflow_add(lflows, od, S_SWITCH_OUT_PRE_LB, +- 100, "ip", REGBIT_CONNTRACK_DEFRAG" = 1; next;"); ++ 100, "ip", REGBIT_CONNTRACK_NAT" = 1; next;"); + } + } + +@@ -5173,10 +5184,46 @@ build_pre_stateful(struct ovn_datapath *od, struct hmap *lflows) + ovn_lflow_add(lflows, od, S_SWITCH_IN_PRE_STATEFUL, 0, "1", "next;"); + ovn_lflow_add(lflows, od, S_SWITCH_OUT_PRE_STATEFUL, 0, "1", "next;"); + ++ const char *lb_protocols[] = {"tcp", "udp", "sctp"}; ++ struct ds actions = DS_EMPTY_INITIALIZER; ++ struct ds match = DS_EMPTY_INITIALIZER; ++ ++ for (size_t i = 0; i < ARRAY_SIZE(lb_protocols); i++) { ++ ds_clear(&match); ++ ds_clear(&actions); ++ ds_put_format(&match, REGBIT_CONNTRACK_NAT" == 1 && ip4 && %s", ++ lb_protocols[i]); ++ ds_put_format(&actions, REG_ORIG_DIP_IPV4 " = ip4.dst; " ++ REG_ORIG_TP_DPORT " = %s.dst; ct_lb;", ++ lb_protocols[i]); ++ ovn_lflow_add(lflows, od, S_SWITCH_IN_PRE_STATEFUL, 120, ++ ds_cstr(&match), ds_cstr(&actions)); ++ ++ ds_clear(&match); ++ ds_clear(&actions); ++ ds_put_format(&match, REGBIT_CONNTRACK_NAT" == 1 && ip6 && %s", ++ lb_protocols[i]); ++ ds_put_format(&actions, REG_ORIG_DIP_IPV6 " = ip6.dst; " ++ REG_ORIG_TP_DPORT " = %s.dst; ct_lb;", ++ lb_protocols[i]); ++ ovn_lflow_add(lflows, od, S_SWITCH_IN_PRE_STATEFUL, 120, ++ ds_cstr(&match), ds_cstr(&actions)); ++ } ++ ++ ds_destroy(&actions); ++ ds_destroy(&match); ++ ++ ovn_lflow_add(lflows, od, S_SWITCH_IN_PRE_STATEFUL, 110, ++ REGBIT_CONNTRACK_NAT" == 1", "ct_lb;"); ++ ++ ovn_lflow_add(lflows, od, S_SWITCH_OUT_PRE_STATEFUL, 110, ++ REGBIT_CONNTRACK_NAT" == 1", "ct_lb;"); ++ + /* If REGBIT_CONNTRACK_DEFRAG is set as 1, then the packets should be + * sent to conntrack for tracking and defragmentation. */ + ovn_lflow_add(lflows, od, S_SWITCH_IN_PRE_STATEFUL, 100, + REGBIT_CONNTRACK_DEFRAG" == 1", "ct_next;"); ++ + ovn_lflow_add(lflows, od, S_SWITCH_OUT_PRE_STATEFUL, 100, + REGBIT_CONNTRACK_DEFRAG" == 1", "ct_next;"); + } +@@ -5206,7 +5253,11 @@ build_acl_hints(struct ovn_datapath *od, struct hmap *lflows) + enum ovn_stage stage = stages[i]; + + /* In any case, advance to the next stage. */ +- ovn_lflow_add(lflows, od, stage, 0, "1", "next;"); ++ if (!od->has_acls && !od->has_lb_vip) { ++ ovn_lflow_add(lflows, od, stage, UINT16_MAX, "1", "next;"); ++ } else { ++ ovn_lflow_add(lflows, od, stage, 0, "1", "next;"); ++ } + + if (!od->has_stateful_acl && !od->has_lb_vip) { + continue; +@@ -5606,10 +5657,19 @@ build_acls(struct ovn_datapath *od, struct hmap *lflows, + bool has_stateful = od->has_stateful_acl || od->has_lb_vip; + + /* Ingress and Egress ACL Table (Priority 0): Packets are allowed by +- * default. A related rule at priority 1 is added below if there ++ * default. If the logical switch has no ACLs or no load balancers, ++ * then add 65535-priority flow to advance the packet to next ++ * stage. ++ * ++ * A related rule at priority 1 is added below if there + * are any stateful ACLs in this datapath. */ +- ovn_lflow_add(lflows, od, S_SWITCH_IN_ACL, 0, "1", "next;"); +- ovn_lflow_add(lflows, od, S_SWITCH_OUT_ACL, 0, "1", "next;"); ++ if (!od->has_acls && !od->has_lb_vip) { ++ ovn_lflow_add(lflows, od, S_SWITCH_IN_ACL, UINT16_MAX, "1", "next;"); ++ ovn_lflow_add(lflows, od, S_SWITCH_OUT_ACL, UINT16_MAX, "1", "next;"); ++ } else { ++ ovn_lflow_add(lflows, od, S_SWITCH_IN_ACL, 0, "1", "next;"); ++ ovn_lflow_add(lflows, od, S_SWITCH_OUT_ACL, 0, "1", "next;"); ++ } + + if (has_stateful) { + /* Ingress and Egress ACL Table (Priority 1). +@@ -5640,21 +5700,23 @@ build_acls(struct ovn_datapath *od, struct hmap *lflows, + "ip && (!ct.est || (ct.est && ct_label.blocked == 1))", + REGBIT_CONNTRACK_COMMIT" = 1; next;"); + +- /* Ingress and Egress ACL Table (Priority 65535). ++ /* Ingress and Egress ACL Table (Priority 65532). + * + * Always drop traffic that's in an invalid state. Also drop + * reply direction packets for connections that have been marked + * for deletion (bit 0 of ct_label is set). + * + * This is enforced at a higher priority than ACLs can be defined. */ +- ovn_lflow_add(lflows, od, S_SWITCH_IN_ACL, UINT16_MAX, +- "ct.inv || (ct.est && ct.rpl && ct_label.blocked == 1)", +- "drop;"); +- ovn_lflow_add(lflows, od, S_SWITCH_OUT_ACL, UINT16_MAX, +- "ct.inv || (ct.est && ct.rpl && ct_label.blocked == 1)", +- "drop;"); ++ char *match = ++ xasprintf("%s(ct.est && ct.rpl && ct_label.blocked == 1)", ++ use_ct_inv_match ? "ct.inv || " : ""); ++ ovn_lflow_add(lflows, od, S_SWITCH_IN_ACL, UINT16_MAX - 3, ++ match, "drop;"); ++ ovn_lflow_add(lflows, od, S_SWITCH_OUT_ACL, UINT16_MAX - 3, ++ match, "drop;"); ++ free(match); + +- /* Ingress and Egress ACL Table (Priority 65535). ++ /* Ingress and Egress ACL Table (Priority 65535 - 3). + * + * Allow reply traffic that is part of an established + * conntrack entry that has not been marked for deletion +@@ -5663,14 +5725,15 @@ build_acls(struct ovn_datapath *od, struct hmap *lflows, + * direction to hit the currently defined policy from ACLs. + * + * This is enforced at a higher priority than ACLs can be defined. */ +- ovn_lflow_add(lflows, od, S_SWITCH_IN_ACL, UINT16_MAX, +- "ct.est && !ct.rel && !ct.new && !ct.inv " +- "&& ct.rpl && ct_label.blocked == 0", +- "next;"); +- ovn_lflow_add(lflows, od, S_SWITCH_OUT_ACL, UINT16_MAX, +- "ct.est && !ct.rel && !ct.new && !ct.inv " +- "&& ct.rpl && ct_label.blocked == 0", +- "next;"); ++ match = xasprintf("ct.est && !ct.rel && !ct.new%s && " ++ "ct.rpl && ct_label.blocked == 0", ++ use_ct_inv_match ? " && !ct.inv" : ""); ++ ++ ovn_lflow_add(lflows, od, S_SWITCH_IN_ACL, UINT16_MAX - 3, ++ match, "next;"); ++ ovn_lflow_add(lflows, od, S_SWITCH_OUT_ACL, UINT16_MAX - 3, ++ match, "next;"); ++ free(match); + + /* Ingress and Egress ACL Table (Priority 65535). + * +@@ -5683,21 +5746,21 @@ build_acls(struct ovn_datapath *od, struct hmap *lflows, + * a dynamically negotiated FTP data channel), but will allow + * related traffic such as an ICMP Port Unreachable through + * that's generated from a non-listening UDP port. */ +- ovn_lflow_add(lflows, od, S_SWITCH_IN_ACL, UINT16_MAX, +- "!ct.est && ct.rel && !ct.new && !ct.inv " +- "&& ct_label.blocked == 0", +- "next;"); +- ovn_lflow_add(lflows, od, S_SWITCH_OUT_ACL, UINT16_MAX, +- "!ct.est && ct.rel && !ct.new && !ct.inv " +- "&& ct_label.blocked == 0", +- "next;"); ++ match = xasprintf("!ct.est && ct.rel && !ct.new%s && " ++ "ct_label.blocked == 0", ++ use_ct_inv_match ? " && !ct.inv" : ""); ++ ovn_lflow_add(lflows, od, S_SWITCH_IN_ACL, UINT16_MAX - 3, ++ match, "next;"); ++ ovn_lflow_add(lflows, od, S_SWITCH_OUT_ACL, UINT16_MAX - 3, ++ match, "next;"); ++ free(match); + +- /* Ingress and Egress ACL Table (Priority 65535). ++ /* Ingress and Egress ACL Table (Priority 65532). + * + * Not to do conntrack on ND packets. */ +- ovn_lflow_add(lflows, od, S_SWITCH_IN_ACL, UINT16_MAX, ++ ovn_lflow_add(lflows, od, S_SWITCH_IN_ACL, UINT16_MAX - 3, + "nd || nd_ra || nd_rs || mldv1 || mldv2", "next;"); +- ovn_lflow_add(lflows, od, S_SWITCH_OUT_ACL, UINT16_MAX, ++ ovn_lflow_add(lflows, od, S_SWITCH_OUT_ACL, UINT16_MAX - 3, + "nd || nd_ra || nd_rs || mldv1 || mldv2", "next;"); + } + +@@ -5784,15 +5847,18 @@ build_acls(struct ovn_datapath *od, struct hmap *lflows, + actions); + } + +- /* Add a 34000 priority flow to advance the service monitor reply +- * packets to skip applying ingress ACLs. */ +- ovn_lflow_add(lflows, od, S_SWITCH_IN_ACL, 34000, +- "eth.dst == $svc_monitor_mac", "next;"); + +- /* Add a 34000 priority flow to advance the service monitor packets +- * generated by ovn-controller to skip applying egress ACLs. */ +- ovn_lflow_add(lflows, od, S_SWITCH_OUT_ACL, 34000, +- "eth.src == $svc_monitor_mac", "next;"); ++ if (od->has_acls || od->has_lb_vip) { ++ /* Add a 34000 priority flow to advance the service monitor reply ++ * packets to skip applying ingress ACLs. */ ++ ovn_lflow_add(lflows, od, S_SWITCH_IN_ACL, 34000, ++ "eth.dst == $svc_monitor_mac", "next;"); ++ ++ /* Add a 34000 priority flow to advance the service monitor packets ++ * generated by ovn-controller to skip applying egress ACLs. */ ++ ovn_lflow_add(lflows, od, S_SWITCH_OUT_ACL, 34000, ++ "eth.src == $svc_monitor_mac", "next;"); ++ } + } + + static void +@@ -5856,37 +5922,6 @@ build_qos(struct ovn_datapath *od, struct hmap *lflows) { + } + } + +-static void +-build_lb(struct ovn_datapath *od, struct hmap *lflows) +-{ +- /* Ingress and Egress LB Table (Priority 0): Packets are allowed by +- * default. */ +- ovn_lflow_add(lflows, od, S_SWITCH_IN_LB, 0, "1", "next;"); +- ovn_lflow_add(lflows, od, S_SWITCH_OUT_LB, 0, "1", "next;"); +- +- if (od->nbs->n_load_balancer) { +- for (size_t i = 0; i < od->n_router_ports; i++) { +- skip_port_from_conntrack(od, od->router_ports[i], +- S_SWITCH_IN_LB, S_SWITCH_OUT_LB, +- UINT16_MAX, lflows); +- } +- } +- +- if (od->has_lb_vip) { +- /* Ingress and Egress LB Table (Priority 65534). +- * +- * Send established traffic through conntrack for just NAT. */ +- ovn_lflow_add(lflows, od, S_SWITCH_IN_LB, UINT16_MAX - 1, +- "ct.est && !ct.rel && !ct.new && !ct.inv && " +- "ct_label.natted == 1", +- REGBIT_CONNTRACK_NAT" = 1; next;"); +- ovn_lflow_add(lflows, od, S_SWITCH_OUT_LB, UINT16_MAX - 1, +- "ct.est && !ct.rel && !ct.new && !ct.inv && " +- "ct_label.natted == 1", +- REGBIT_CONNTRACK_NAT" = 1; next;"); +- } +-} +- + static void + build_lb_rules(struct ovn_datapath *od, struct hmap *lflows, + struct ovn_northd_lb *lb) +@@ -5971,48 +6006,6 @@ build_stateful(struct ovn_datapath *od, struct hmap *lflows, struct hmap *lbs) + REGBIT_CONNTRACK_COMMIT" == 1", + "ct_commit { ct_label.blocked = 0; }; next;"); + +- /* If REGBIT_CONNTRACK_NAT is set as 1, then packets should just be sent +- * through nat (without committing). +- * +- * REGBIT_CONNTRACK_COMMIT is set for new connections and +- * REGBIT_CONNTRACK_NAT is set for established connections. So they +- * don't overlap. +- * +- * In the ingress pipeline, also store the original destination IP and +- * transport port to be used when detecting hairpin packets. +- */ +- const char *lb_protocols[] = {"tcp", "udp", "sctp"}; +- struct ds actions = DS_EMPTY_INITIALIZER; +- struct ds match = DS_EMPTY_INITIALIZER; +- +- for (size_t i = 0; i < ARRAY_SIZE(lb_protocols); i++) { +- ds_clear(&match); +- ds_clear(&actions); +- ds_put_format(&match, REGBIT_CONNTRACK_NAT" == 1 && ip4 && %s", +- lb_protocols[i]); +- ds_put_format(&actions, REG_ORIG_DIP_IPV4 " = ip4.dst; " +- REG_ORIG_TP_DPORT " = %s.dst; ct_lb;", +- lb_protocols[i]); +- ovn_lflow_add(lflows, od, S_SWITCH_IN_STATEFUL, 100, +- ds_cstr(&match), ds_cstr(&actions)); +- +- ds_clear(&match); +- ds_clear(&actions); +- ds_put_format(&match, REGBIT_CONNTRACK_NAT" == 1 && ip6 && %s", +- lb_protocols[i]); +- ds_put_format(&actions, REG_ORIG_DIP_IPV6 " = ip6.dst; " +- REG_ORIG_TP_DPORT " = %s.dst; ct_lb;", +- lb_protocols[i]); +- ovn_lflow_add(lflows, od, S_SWITCH_IN_STATEFUL, 100, +- ds_cstr(&match), ds_cstr(&actions)); +- } +- +- ds_destroy(&actions); +- ds_destroy(&match); +- +- ovn_lflow_add(lflows, od, S_SWITCH_OUT_STATEFUL, 100, +- REGBIT_CONNTRACK_NAT" == 1", "ct_lb;"); +- + /* Load balancing rules for new connections get committed to conntrack + * table. So even if REGBIT_CONNTRACK_COMMIT is set in a previous table + * a higher priority rule for load balancing below also commits the +@@ -6759,7 +6752,7 @@ build_lswitch_flows(struct hmap *datapaths, struct hmap *lflows) + struct ds actions = DS_EMPTY_INITIALIZER; + struct ovn_datapath *od; + +- /* Ingress table 24: Destination lookup for unknown MACs (priority 0). */ ++ /* Ingress table 23: Destination lookup for unknown MACs (priority 0). */ + HMAP_FOR_EACH (od, key_node, datapaths) { + if (!od->nbs) { + continue; +@@ -6794,8 +6787,8 @@ build_lswitch_lflows_pre_acl_and_acl(struct ovn_datapath *od, + struct hmap *lbs) + { + if (od->nbs) { +- od->has_stateful_acl = ls_has_stateful_acl(od); + od->has_lb_vip = ls_has_lb_vip(od); ++ ls_get_acl_flags(od); + + build_pre_acls(od, lflows); + build_pre_lb(od, lflows, meter_groups, lbs); +@@ -6803,7 +6796,6 @@ build_lswitch_lflows_pre_acl_and_acl(struct ovn_datapath *od, + build_acl_hints(od, lflows); + build_acls(od, lflows, port_groups, meter_groups); + build_qos(od, lflows); +- build_lb(od, lflows); + build_stateful(od, lflows, lbs); + build_lb_hairpin(od, lflows); + } +@@ -8573,10 +8565,16 @@ get_force_snat_ip(struct ovn_datapath *od, const char *key_type, return true; } @@ -2221,7 +4050,7 @@ index 5a2018c2e..4e406c594 100644 const char *proto, struct nbrec_load_balancer *lb, struct shash *meter_groups, struct sset *nat_entries) { -@@ -8585,9 +8591,10 @@ add_router_lb_flow(struct hmap *lflows, struct ovn_datapath *od, +@@ -8585,9 +8583,10 @@ add_router_lb_flow(struct hmap *lflows, struct ovn_datapath *od, /* A match and actions for new connections. */ char *new_match = xasprintf("ct.new && %s", ds_cstr(match)); @@ -2235,7 +4064,7 @@ index 5a2018c2e..4e406c594 100644 ovn_lflow_add_with_hint(lflows, od, S_ROUTER_IN_DNAT, priority, new_match, new_actions, &lb->header_); free(new_actions); -@@ -8598,11 +8605,12 @@ add_router_lb_flow(struct hmap *lflows, struct ovn_datapath *od, +@@ -8598,11 +8597,12 @@ add_router_lb_flow(struct hmap *lflows, struct ovn_datapath *od, /* A match and actions for established connections. */ char *est_match = xasprintf("ct.est && %s", ds_cstr(match)); @@ -2252,7 +4081,7 @@ index 5a2018c2e..4e406c594 100644 } else { ovn_lflow_add_with_hint(lflows, od, S_ROUTER_IN_DNAT, priority, est_match, "ct_dnat;", &lb->header_); -@@ -8675,11 +8683,13 @@ add_router_lb_flow(struct hmap *lflows, struct ovn_datapath *od, +@@ -8675,11 +8675,13 @@ add_router_lb_flow(struct hmap *lflows, struct ovn_datapath *od, ds_put_format(&undnat_match, ") && outport == %s && " "is_chassis_resident(%s)", od->l3dgw_port->json_key, od->l3redirect_port->json_key); @@ -2269,7 +4098,7 @@ index 5a2018c2e..4e406c594 100644 } else { ovn_lflow_add_with_hint(lflows, od, S_ROUTER_OUT_UNDNAT, 120, ds_cstr(&undnat_match), "ct_dnat;", -@@ -8689,6 +8699,105 @@ add_router_lb_flow(struct hmap *lflows, struct ovn_datapath *od, +@@ -8689,6 +8691,105 @@ add_router_lb_flow(struct hmap *lflows, struct ovn_datapath *od, ds_destroy(&undnat_match); } @@ -2375,7 +4204,52 @@ index 5a2018c2e..4e406c594 100644 #define ND_RA_MAX_INTERVAL_MAX 1800 #define ND_RA_MAX_INTERVAL_MIN 4 -@@ -11002,668 +11111,643 @@ build_lrouter_ipv4_ip_input(struct ovn_port *op, +@@ -8893,14 +8994,12 @@ build_lrouter_arp_flow(struct ovn_datapath *od, struct ovn_port *op, + "arp.op = 2; /* ARP reply */ " + "arp.tha = arp.sha; " + "arp.sha = %s; " +- "arp.tpa = arp.spa; " +- "arp.spa = %s; " ++ "arp.tpa <-> arp.spa; " + "outport = inport; " + "flags.loopback = 1; " + "output;", + eth_addr, +- eth_addr, +- ip_address); ++ eth_addr); + } + + ovn_lflow_add_with_hint(lflows, od, S_ROUTER_IN_IP_INPUT, priority, +@@ -10855,16 +10954,24 @@ build_lrouter_ipv4_ip_input(struct ovn_port *op, + get_router_load_balancer_ips(op->od, &all_ips_v4, &all_ips_v6); + + const char *ip_address; +- SSET_FOR_EACH (ip_address, &all_ips_v4) { ++ if (sset_count(&all_ips_v4)) { + ds_clear(match); + if (op == op->od->l3dgw_port) { + ds_put_format(match, "is_chassis_resident(%s)", + op->od->l3redirect_port->json_key); + } + +- build_lrouter_arp_flow(op->od, op, +- ip_address, REG_INPORT_ETH_ADDR, ++ struct ds load_balancer_ips_v4 = DS_EMPTY_INITIALIZER; ++ ++ /* For IPv4 we can just create one rule with all required IPs. */ ++ ds_put_cstr(&load_balancer_ips_v4, "{ "); ++ ds_put_and_free_cstr(&load_balancer_ips_v4, ++ sset_join(&all_ips_v4, ", ", " }")); ++ ++ build_lrouter_arp_flow(op->od, op, ds_cstr(&load_balancer_ips_v4), ++ REG_INPORT_ETH_ADDR, + match, false, 90, NULL, lflows); ++ ds_destroy(&load_balancer_ips_v4); + } + + SSET_FOR_EACH (ip_address, &all_ips_v6) { +@@ -11002,668 +11109,643 @@ build_lrouter_ipv4_ip_input(struct ovn_port *op, } } @@ -3615,11 +5489,43 @@ index 5a2018c2e..4e406c594 100644 } +@@ -12909,6 +12991,9 @@ ovnnb_db_run(struct northd_context *ctx, + + use_logical_dp_groups = smap_get_bool(&nb->options, + "use_logical_dp_groups", false); ++ use_ct_inv_match = smap_get_bool(&nb->options, ++ "use_ct_inv_match", true); ++ + /* deprecated, use --event instead */ + controller_event_en = smap_get_bool(&nb->options, + "controller_event", false); diff --git a/ovn-nb.xml b/ovn-nb.xml -index b0a4adffe..408c98090 100644 +index b0a4adffe..046d053e9 100644 --- a/ovn-nb.xml +++ b/ovn-nb.xml -@@ -1653,6 +1653,12 @@ +@@ -227,6 +227,21 @@ +

    + + ++ ++

    ++ If set to false, ovn-northd will not use the ++ ct.inv field in any of the logical flow matches. ++ The default value is true. If the NIC supports offloading ++ OVS datapath flows but doesn't support offloading ct_state ++ inv flag, then the datapath flows matching on this flag ++ (either +inv or -inv) will not be ++ offloaded. CMS should consider setting use_ct_inv_match ++ to false in such cases. This results in a side effect ++ of the invalid packets getting delivered to the destination VIF, ++ which otherwise would have been dropped by OVN. ++

    ++
    ++ + +

    + These options control how routes are advertised between OVN +@@ -1653,6 +1668,12 @@ exactly one IPv4 and/or one IPv6 address on it, separated by a space character. @@ -3715,16 +5621,569 @@ index 2cd3e261f..5c64fff12 100644 +primary lport : [[lsp1]] +---------------------------------------- +]) -+ done -+done ++ done ++done ++ ++OVN_CLEANUP([hv1]) ++AT_CLEANUP +diff --git a/tests/ovn-macros.at b/tests/ovn-macros.at +index 2ba29a960..4cf14b1f2 100644 +--- a/tests/ovn-macros.at ++++ b/tests/ovn-macros.at +@@ -433,6 +433,24 @@ wait_for_ports_up() { + done + fi + } ++ ++# reset_pcap_file iface pcap_file ++# Resets the pcap file associates with OVS interface. should be used ++# with dummy datapath. ++reset_iface_pcap_file() { ++ local iface=$1 ++ local pcap_file=$2 ++ check rm -f dummy-*.pcap ++ check ovs-vsctl -- set Interface $iface options:tx_pcap=dummy-tx.pcap \ ++options:rxq_pcap=dummy-rx.pcap ++ OVS_WAIT_WHILE([test 24 = $(wc -c dummy-tx.pcap | cut -d " " -f1)]) ++ check rm -f ${pcap_file}*.pcap ++ check ovs-vsctl -- set Interface $iface options:tx_pcap=${pcap_file}-tx.pcap \ ++options:rxq_pcap=${pcap_file}-rx.pcap ++ ++ OVS_WAIT_WHILE([test 24 = $(wc -c ${pcap_file}-tx.pcap | cut -d " " -f1)]) ++} ++ + OVS_END_SHELL_HELPERS + + m4_define([OVN_POPULATE_ARP], [AT_CHECK(ovn_populate_arp__, [0], [ignore])]) +diff --git a/tests/ovn-nbctl.at b/tests/ovn-nbctl.at +index 6d91aa4c5..8af55161f 100644 +--- a/tests/ovn-nbctl.at ++++ b/tests/ovn-nbctl.at +@@ -1551,6 +1551,7 @@ IPv4 Routes + AT_CHECK([ovn-nbctl --ecmp lr-route-add lr0 10.0.0.0/24 11.0.0.2], [1], [], + [ovn-nbctl: duplicate nexthop for the same ECMP route + ]) ++AT_CHECK([ovn-nbctl --may-exist --ecmp lr-route-add lr0 10.0.0.0/24 11.0.0.2]) + + dnl Delete ecmp routes + AT_CHECK([ovn-nbctl lr-route-del lr0 10.0.0.0/24 11.0.0.1]) +@@ -1614,6 +1615,7 @@ AT_CHECK([ovn-nbctl --ecmp-symmetric-reply lr-route-add lr0 2003:0db8:1::/64 200 + AT_CHECK([ovn-nbctl --ecmp-symmetric-reply lr-route-add lr0 2003:0db8:1::/64 2001:0db8:0:f103::6], [1], [], + [ovn-nbctl: duplicate nexthop for the same ECMP route + ]) ++AT_CHECK([ovn-nbctl --may-exist --ecmp-symmetric-reply lr-route-add lr0 2003:0db8:1::/64 2001:0db8:0:f103::6]) + + AT_CHECK([ovn-nbctl lr-route-list lr0], [0], [dnl + IPv4 Routes +diff --git a/tests/ovn-northd.at b/tests/ovn-northd.at +index b78baa708..8ca915302 100644 +--- a/tests/ovn-northd.at ++++ b/tests/ovn-northd.at +@@ -1077,7 +1077,7 @@ check ovn-nbctl --wait=sb ls-lb-add sw0 lb1 + + AT_CAPTURE_FILE([sbflows]) + OVS_WAIT_FOR_OUTPUT( +- [ovn-sbctl dump-flows sw0 | tee sbflows | grep 'priority=120.*ct_lb' | sed 's/table=..//'], 0, [dnl ++ [ovn-sbctl dump-flows sw0 | tee sbflows | grep 'priority=120.*backends' | sed 's/table=..//'], 0, [dnl + (ls_in_stateful ), priority=120 , match=(ct.new && ip4.dst == 10.0.0.10 && tcp.dst == 80), action=(reg1 = 10.0.0.10; reg2[[0..15]] = 80; ct_lb(backends=10.0.0.3:80,20.0.0.3:80);) + ]) + +@@ -1087,7 +1087,7 @@ wait_row_count Service_Monitor 0 + + AT_CAPTURE_FILE([sbflows2]) + OVS_WAIT_FOR_OUTPUT( +- [ovn-sbctl dump-flows sw0 | tee sbflows2 | grep 'priority=120.*ct_lb' | sed 's/table=..//'], [0], ++ [ovn-sbctl dump-flows sw0 | tee sbflows2 | grep 'priority=120.*backends' | sed 's/table=..//'], [0], + [ (ls_in_stateful ), priority=120 , match=(ct.new && ip4.dst == 10.0.0.10 && tcp.dst == 80), action=(reg1 = 10.0.0.10; reg2[[0..15]] = 80; ct_lb(backends=10.0.0.3:80,20.0.0.3:80);) + ]) + +@@ -1098,7 +1098,7 @@ health_check @hc + wait_row_count Service_Monitor 2 + check ovn-nbctl --wait=sb sync + +-ovn-sbctl dump-flows sw0 | grep ct_lb | grep priority=120 > lflows.txt ++ovn-sbctl dump-flows sw0 | grep backends | grep priority=120 > lflows.txt + AT_CHECK([cat lflows.txt | sed 's/table=..//'], [0], [dnl + (ls_in_stateful ), priority=120 , match=(ct.new && ip4.dst == 10.0.0.10 && tcp.dst == 80), action=(reg1 = 10.0.0.10; reg2[[0..15]] = 80; ct_lb(backends=10.0.0.3:80,20.0.0.3:80);) + ]) +@@ -1109,7 +1109,7 @@ sm_sw1_p1=$(fetch_column Service_Monitor _uuid logical_port=sw1-p1) + + AT_CAPTURE_FILE([sbflows3]) + OVS_WAIT_FOR_OUTPUT( +- [ovn-sbctl dump-flows sw0 | tee sbflows 3 | grep 'priority=120.*ct_lb' | sed 's/table=..//'], [0], ++ [ovn-sbctl dump-flows sw0 | tee sbflows 3 | grep 'priority=120.*backends' | sed 's/table=..//'], [0], + [ (ls_in_stateful ), priority=120 , match=(ct.new && ip4.dst == 10.0.0.10 && tcp.dst == 80), action=(reg1 = 10.0.0.10; reg2[[0..15]] = 80; ct_lb(backends=10.0.0.3:80,20.0.0.3:80);) + ]) + +@@ -1120,7 +1120,7 @@ check ovn-nbctl --wait=sb sync + + AT_CAPTURE_FILE([sbflows4]) + OVS_WAIT_FOR_OUTPUT( +- [ovn-sbctl dump-flows sw0 | tee sbflows4 | grep 'priority=120.*ct_lb' | sed 's/table=..//'], [0], ++ [ovn-sbctl dump-flows sw0 | tee sbflows4 | grep 'priority=120.*backends' | sed 's/table=..//'], [0], + [ (ls_in_stateful ), priority=120 , match=(ct.new && ip4.dst == 10.0.0.10 && tcp.dst == 80), action=(reg1 = 10.0.0.10; reg2[[0..15]] = 80; ct_lb(backends=10.0.0.3:80);) + ]) + +@@ -1132,7 +1132,7 @@ check ovn-nbctl --wait=sb sync + + AT_CAPTURE_FILE([sbflows5]) + OVS_WAIT_FOR_OUTPUT( +- [ovn-sbctl dump-flows sw0 | tee sbflows5 | grep 'priority=120.*ct_lb'], 1) ++ [ovn-sbctl dump-flows sw0 | tee sbflows5 | grep 'priority=120.*backends'], 1) + + AT_CAPTURE_FILE([sbflows6]) + OVS_WAIT_FOR_OUTPUT( +@@ -1149,7 +1149,7 @@ check ovn-nbctl --wait=sb sync + + AT_CAPTURE_FILE([sbflows7]) + OVS_WAIT_FOR_OUTPUT( +- [ovn-sbctl dump-flows sw0 | tee sbflows7 | grep ct_lb | grep priority=120 | sed 's/table=..//'], 0, ++ [ovn-sbctl dump-flows sw0 | tee sbflows7 | grep backends | grep priority=120 | sed 's/table=..//'], 0, + [ (ls_in_stateful ), priority=120 , match=(ct.new && ip4.dst == 10.0.0.10 && tcp.dst == 80), action=(reg1 = 10.0.0.10; reg2[[0..15]] = 80; ct_lb(backends=10.0.0.3:80,20.0.0.3:80);) + ]) + +@@ -1185,7 +1185,7 @@ wait_row_count Service_Monitor 1 port=1000 + + AT_CAPTURE_FILE([sbflows9]) + OVS_WAIT_FOR_OUTPUT( +- [ovn-sbctl dump-flows sw0 | tee sbflows9 | grep ct_lb | grep priority=120 | sed 's/table=..//' | sort], ++ [ovn-sbctl dump-flows sw0 | tee sbflows9 | grep backends | grep priority=120 | sed 's/table=..//' | sort], + 0, + [ (ls_in_stateful ), priority=120 , match=(ct.new && ip4.dst == 10.0.0.10 && tcp.dst == 80), action=(reg1 = 10.0.0.10; reg2[[0..15]] = 80; ct_lb(backends=10.0.0.3:80);) + (ls_in_stateful ), priority=120 , match=(ct.new && ip4.dst == 10.0.0.40 && tcp.dst == 1000), action=(reg1 = 10.0.0.40; reg2[[0..15]] = 1000; ct_lb(backends=10.0.0.3:1000);) +@@ -1199,7 +1199,7 @@ check ovn-nbctl --wait=sb sync + + AT_CAPTURE_FILE([sbflows10]) + OVS_WAIT_FOR_OUTPUT( +- [ovn-sbctl dump-flows sw0 | tee sbflows10 | grep ct_lb | grep priority=120 | sed 's/table=..//' | sort], ++ [ovn-sbctl dump-flows sw0 | tee sbflows10 | grep backends | grep priority=120 | sed 's/table=..//' | sort], + 0, + [ (ls_in_stateful ), priority=120 , match=(ct.new && ip4.dst == 10.0.0.10 && tcp.dst == 80), action=(reg1 = 10.0.0.10; reg2[[0..15]] = 80; ct_lb(backends=10.0.0.3:80,20.0.0.3:80);) + (ls_in_stateful ), priority=120 , match=(ct.new && ip4.dst == 10.0.0.40 && tcp.dst == 1000), action=(reg1 = 10.0.0.40; reg2[[0..15]] = 1000; ct_lb(backends=10.0.0.3:1000,20.0.0.3:80);) +@@ -1209,7 +1209,7 @@ AS_BOX([Associate lb1 to sw1]) + check ovn-nbctl --wait=sb ls-lb-add sw1 lb1 + AT_CAPTURE_FILE([sbflows11]) + OVS_WAIT_FOR_OUTPUT( +- [ovn-sbctl dump-flows sw1 | tee sbflows11 | grep ct_lb | grep priority=120 | sed 's/table=..//' | sort], ++ [ovn-sbctl dump-flows sw1 | tee sbflows11 | grep backends | grep priority=120 | sed 's/table=..//' | sort], + 0, [dnl + (ls_in_stateful ), priority=120 , match=(ct.new && ip4.dst == 10.0.0.10 && tcp.dst == 80), action=(reg1 = 10.0.0.10; reg2[[0..15]] = 80; ct_lb(backends=10.0.0.3:80,20.0.0.3:80);) + (ls_in_stateful ), priority=120 , match=(ct.new && ip4.dst == 10.0.0.40 && tcp.dst == 1000), action=(reg1 = 10.0.0.40; reg2[[0..15]] = 1000; ct_lb(backends=10.0.0.3:1000,20.0.0.3:80);) +@@ -1269,7 +1269,7 @@ ovn-sbctl set service_monitor $sm_sw1_p1 status=offline + AT_CAPTURE_FILE([sbflows12]) + OVS_WAIT_FOR_OUTPUT( + [ovn-sbctl dump-flows sw0 | tee sbflows12 | grep "ip4.dst == 10.0.0.10 && tcp.dst == 80" | grep priority=120 | sed 's/table=..//'], [0], [dnl +- (ls_in_stateful ), priority=120 , match=(ct.new && ip4.dst == 10.0.0.10 && tcp.dst == 80), action=(reg0 = 0; reject { outport <-> inport; next(pipeline=egress,table=6);};) ++ (ls_in_stateful ), priority=120 , match=(ct.new && ip4.dst == 10.0.0.10 && tcp.dst == 80), action=(reg0 = 0; reject { outport <-> inport; next(pipeline=egress,table=5);};) + ]) + + AT_CLEANUP +@@ -1504,6 +1504,19 @@ ovn-nbctl lr-nat-add lr dnat_and_snat 43.43.43.4 42.42.42.4 ls-vm 00:00:00:00:00 + ovn-nbctl lr-nat-add lr snat 43.43.43.150 43.43.43.50 + ovn-nbctl lr-nat-add lr snat 43.43.43.150 43.43.43.51 + ++ovn-nbctl lb-add lb1 "192.168.2.1:8080" "10.0.0.4:8080" ++ovn-nbctl lb-add lb2 "192.168.2.4:8080" "10.0.0.5:8080" udp ++ovn-nbctl lb-add lb3 "192.168.2.5:8080" "10.0.0.6:8080" ++ovn-nbctl lb-add lb4 "192.168.2.6:8080" "10.0.0.7:8080" ++ovn-nbctl lb-add lb5 "fe80::200:ff:fe00:101:8080" "fe02::200:ff:fe00:101:8080" ++ovn-nbctl lb-add lb5 "fe80::200:ff:fe00:102:8080" "fe02::200:ff:fe00:102:8080" ++ ++ovn-nbctl lr-lb-add lr lb1 ++ovn-nbctl lr-lb-add lr lb2 ++ovn-nbctl lr-lb-add lr lb3 ++ovn-nbctl lr-lb-add lr lb4 ++ovn-nbctl lr-lb-add lr lb5 ++ + ovn-nbctl --wait=sb sync + + # Ingress router port ETH address is stored in lr_in_admission. +@@ -1526,28 +1539,46 @@ action=(xreg0[[0..47]] = 00:00:00:00:01:00; next;) + AT_CHECK([ovn-sbctl lflow-list | grep -E "lr_in_ip_input.*priority=90" | grep "arp\|nd" | sort], [0], [dnl + table=3 (lr_in_ip_input ), priority=90 , dnl + match=(arp.op == 1 && arp.tpa == 43.43.43.150), dnl +-action=(eth.dst = eth.src; eth.src = xreg0[[0..47]]; arp.op = 2; /* ARP reply */ arp.tha = arp.sha; arp.sha = xreg0[[0..47]]; arp.tpa = arp.spa; arp.spa = 43.43.43.150; outport = inport; flags.loopback = 1; output;) ++action=(eth.dst = eth.src; eth.src = xreg0[[0..47]]; arp.op = 2; /* ARP reply */ arp.tha = arp.sha; arp.sha = xreg0[[0..47]]; arp.tpa <-> arp.spa; outport = inport; flags.loopback = 1; output;) + table=3 (lr_in_ip_input ), priority=90 , dnl + match=(arp.op == 1 && arp.tpa == 43.43.43.2), dnl +-action=(eth.dst = eth.src; eth.src = xreg0[[0..47]]; arp.op = 2; /* ARP reply */ arp.tha = arp.sha; arp.sha = xreg0[[0..47]]; arp.tpa = arp.spa; arp.spa = 43.43.43.2; outport = inport; flags.loopback = 1; output;) ++action=(eth.dst = eth.src; eth.src = xreg0[[0..47]]; arp.op = 2; /* ARP reply */ arp.tha = arp.sha; arp.sha = xreg0[[0..47]]; arp.tpa <-> arp.spa; outport = inport; flags.loopback = 1; output;) + table=3 (lr_in_ip_input ), priority=90 , dnl + match=(arp.op == 1 && arp.tpa == 43.43.43.3), dnl +-action=(eth.dst = eth.src; eth.src = xreg0[[0..47]]; arp.op = 2; /* ARP reply */ arp.tha = arp.sha; arp.sha = xreg0[[0..47]]; arp.tpa = arp.spa; arp.spa = 43.43.43.3; outport = inport; flags.loopback = 1; output;) ++action=(eth.dst = eth.src; eth.src = xreg0[[0..47]]; arp.op = 2; /* ARP reply */ arp.tha = arp.sha; arp.sha = xreg0[[0..47]]; arp.tpa <-> arp.spa; outport = inport; flags.loopback = 1; output;) + table=3 (lr_in_ip_input ), priority=90 , dnl + match=(arp.op == 1 && arp.tpa == 43.43.43.4), dnl +-action=(eth.dst = eth.src; eth.src = xreg0[[0..47]]; arp.op = 2; /* ARP reply */ arp.tha = arp.sha; arp.sha = xreg0[[0..47]]; arp.tpa = arp.spa; arp.spa = 43.43.43.4; outport = inport; flags.loopback = 1; output;) ++action=(eth.dst = eth.src; eth.src = xreg0[[0..47]]; arp.op = 2; /* ARP reply */ arp.tha = arp.sha; arp.sha = xreg0[[0..47]]; arp.tpa <-> arp.spa; outport = inport; flags.loopback = 1; output;) + table=3 (lr_in_ip_input ), priority=90 , dnl + match=(inport == "lrp" && arp.op == 1 && arp.tpa == 42.42.42.1 && arp.spa == 42.42.42.0/24), dnl +-action=(eth.dst = eth.src; eth.src = xreg0[[0..47]]; arp.op = 2; /* ARP reply */ arp.tha = arp.sha; arp.sha = xreg0[[0..47]]; arp.tpa = arp.spa; arp.spa = 42.42.42.1; outport = inport; flags.loopback = 1; output;) ++action=(eth.dst = eth.src; eth.src = xreg0[[0..47]]; arp.op = 2; /* ARP reply */ arp.tha = arp.sha; arp.sha = xreg0[[0..47]]; arp.tpa <-> arp.spa; outport = inport; flags.loopback = 1; output;) ++ table=3 (lr_in_ip_input ), priority=90 , dnl ++match=(inport == "lrp" && arp.op == 1 && arp.tpa == { 192.168.2.1, 192.168.2.4, 192.168.2.5, 192.168.2.6 }), dnl ++action=(eth.dst = eth.src; eth.src = xreg0[[0..47]]; arp.op = 2; /* ARP reply */ arp.tha = arp.sha; arp.sha = xreg0[[0..47]]; arp.tpa <-> arp.spa; outport = inport; flags.loopback = 1; output;) + table=3 (lr_in_ip_input ), priority=90 , dnl + match=(inport == "lrp" && ip6.dst == {fe80::200:ff:fe00:1, ff02::1:ff00:1} && nd_ns && nd.target == fe80::200:ff:fe00:1), dnl + action=(nd_na_router { eth.src = xreg0[[0..47]]; ip6.src = fe80::200:ff:fe00:1; nd.target = fe80::200:ff:fe00:1; nd.tll = xreg0[[0..47]]; outport = inport; flags.loopback = 1; output; };) + table=3 (lr_in_ip_input ), priority=90 , dnl ++match=(inport == "lrp" && nd_ns && nd.target == fe80::200:ff:fe00:101:8080), dnl ++action=(nd_na { eth.src = xreg0[[0..47]]; ip6.src = fe80::200:ff:fe00:101:8080; nd.target = fe80::200:ff:fe00:101:8080; nd.tll = xreg0[[0..47]]; outport = inport; flags.loopback = 1; output; };) ++ table=3 (lr_in_ip_input ), priority=90 , dnl ++match=(inport == "lrp" && nd_ns && nd.target == fe80::200:ff:fe00:102:8080), dnl ++action=(nd_na { eth.src = xreg0[[0..47]]; ip6.src = fe80::200:ff:fe00:102:8080; nd.target = fe80::200:ff:fe00:102:8080; nd.tll = xreg0[[0..47]]; outport = inport; flags.loopback = 1; output; };) ++ table=3 (lr_in_ip_input ), priority=90 , dnl + match=(inport == "lrp-public" && arp.op == 1 && arp.tpa == 43.43.43.1 && arp.spa == 43.43.43.0/24), dnl +-action=(eth.dst = eth.src; eth.src = xreg0[[0..47]]; arp.op = 2; /* ARP reply */ arp.tha = arp.sha; arp.sha = xreg0[[0..47]]; arp.tpa = arp.spa; arp.spa = 43.43.43.1; outport = inport; flags.loopback = 1; output;) ++action=(eth.dst = eth.src; eth.src = xreg0[[0..47]]; arp.op = 2; /* ARP reply */ arp.tha = arp.sha; arp.sha = xreg0[[0..47]]; arp.tpa <-> arp.spa; outport = inport; flags.loopback = 1; output;) ++ table=3 (lr_in_ip_input ), priority=90 , dnl ++match=(inport == "lrp-public" && arp.op == 1 && arp.tpa == { 192.168.2.1, 192.168.2.4, 192.168.2.5, 192.168.2.6 }), dnl ++action=(eth.dst = eth.src; eth.src = xreg0[[0..47]]; arp.op = 2; /* ARP reply */ arp.tha = arp.sha; arp.sha = xreg0[[0..47]]; arp.tpa <-> arp.spa; outport = inport; flags.loopback = 1; output;) + table=3 (lr_in_ip_input ), priority=90 , dnl + match=(inport == "lrp-public" && ip6.dst == {fe80::200:ff:fe00:100, ff02::1:ff00:100} && nd_ns && nd.target == fe80::200:ff:fe00:100), dnl + action=(nd_na_router { eth.src = xreg0[[0..47]]; ip6.src = fe80::200:ff:fe00:100; nd.target = fe80::200:ff:fe00:100; nd.tll = xreg0[[0..47]]; outport = inport; flags.loopback = 1; output; };) ++ table=3 (lr_in_ip_input ), priority=90 , dnl ++match=(inport == "lrp-public" && nd_ns && nd.target == fe80::200:ff:fe00:101:8080), dnl ++action=(nd_na { eth.src = xreg0[[0..47]]; ip6.src = fe80::200:ff:fe00:101:8080; nd.target = fe80::200:ff:fe00:101:8080; nd.tll = xreg0[[0..47]]; outport = inport; flags.loopback = 1; output; };) ++ table=3 (lr_in_ip_input ), priority=90 , dnl ++match=(inport == "lrp-public" && nd_ns && nd.target == fe80::200:ff:fe00:102:8080), dnl ++action=(nd_na { eth.src = xreg0[[0..47]]; ip6.src = fe80::200:ff:fe00:102:8080; nd.target = fe80::200:ff:fe00:102:8080; nd.tll = xreg0[[0..47]]; outport = inport; flags.loopback = 1; output; };) + ]) + + # xreg0[0..47] isn't used anywhere else. +@@ -1583,28 +1614,46 @@ action=(xreg0[[0..47]] = 00:00:00:00:01:00; next;) + AT_CHECK([ovn-sbctl lflow-list | grep -E "lr_in_ip_input.*priority=90" | grep "arp\|nd" | sort], [0], [dnl + table=3 (lr_in_ip_input ), priority=90 , dnl + match=(arp.op == 1 && arp.tpa == 43.43.43.150), dnl +-action=(eth.dst = eth.src; eth.src = xreg0[[0..47]]; arp.op = 2; /* ARP reply */ arp.tha = arp.sha; arp.sha = xreg0[[0..47]]; arp.tpa = arp.spa; arp.spa = 43.43.43.150; outport = inport; flags.loopback = 1; output;) ++action=(eth.dst = eth.src; eth.src = xreg0[[0..47]]; arp.op = 2; /* ARP reply */ arp.tha = arp.sha; arp.sha = xreg0[[0..47]]; arp.tpa <-> arp.spa; outport = inport; flags.loopback = 1; output;) + table=3 (lr_in_ip_input ), priority=90 , dnl + match=(arp.op == 1 && arp.tpa == 43.43.43.2), dnl +-action=(eth.dst = eth.src; eth.src = xreg0[[0..47]]; arp.op = 2; /* ARP reply */ arp.tha = arp.sha; arp.sha = xreg0[[0..47]]; arp.tpa = arp.spa; arp.spa = 43.43.43.2; outport = inport; flags.loopback = 1; output;) ++action=(eth.dst = eth.src; eth.src = xreg0[[0..47]]; arp.op = 2; /* ARP reply */ arp.tha = arp.sha; arp.sha = xreg0[[0..47]]; arp.tpa <-> arp.spa; outport = inport; flags.loopback = 1; output;) + table=3 (lr_in_ip_input ), priority=90 , dnl + match=(arp.op == 1 && arp.tpa == 43.43.43.3), dnl +-action=(eth.dst = eth.src; eth.src = xreg0[[0..47]]; arp.op = 2; /* ARP reply */ arp.tha = arp.sha; arp.sha = xreg0[[0..47]]; arp.tpa = arp.spa; arp.spa = 43.43.43.3; outport = inport; flags.loopback = 1; output;) ++action=(eth.dst = eth.src; eth.src = xreg0[[0..47]]; arp.op = 2; /* ARP reply */ arp.tha = arp.sha; arp.sha = xreg0[[0..47]]; arp.tpa <-> arp.spa; outport = inport; flags.loopback = 1; output;) + table=3 (lr_in_ip_input ), priority=90 , dnl + match=(arp.op == 1 && arp.tpa == 43.43.43.4), dnl +-action=(eth.dst = eth.src; eth.src = xreg0[[0..47]]; arp.op = 2; /* ARP reply */ arp.tha = arp.sha; arp.sha = xreg0[[0..47]]; arp.tpa = arp.spa; arp.spa = 43.43.43.4; outport = inport; flags.loopback = 1; output;) ++action=(eth.dst = eth.src; eth.src = xreg0[[0..47]]; arp.op = 2; /* ARP reply */ arp.tha = arp.sha; arp.sha = xreg0[[0..47]]; arp.tpa <-> arp.spa; outport = inport; flags.loopback = 1; output;) + table=3 (lr_in_ip_input ), priority=90 , dnl + match=(inport == "lrp" && arp.op == 1 && arp.tpa == 42.42.42.1 && arp.spa == 42.42.42.0/24), dnl +-action=(eth.dst = eth.src; eth.src = xreg0[[0..47]]; arp.op = 2; /* ARP reply */ arp.tha = arp.sha; arp.sha = xreg0[[0..47]]; arp.tpa = arp.spa; arp.spa = 42.42.42.1; outport = inport; flags.loopback = 1; output;) ++action=(eth.dst = eth.src; eth.src = xreg0[[0..47]]; arp.op = 2; /* ARP reply */ arp.tha = arp.sha; arp.sha = xreg0[[0..47]]; arp.tpa <-> arp.spa; outport = inport; flags.loopback = 1; output;) ++ table=3 (lr_in_ip_input ), priority=90 , dnl ++match=(inport == "lrp" && arp.op == 1 && arp.tpa == { 192.168.2.1, 192.168.2.4, 192.168.2.5, 192.168.2.6 }), dnl ++action=(eth.dst = eth.src; eth.src = xreg0[[0..47]]; arp.op = 2; /* ARP reply */ arp.tha = arp.sha; arp.sha = xreg0[[0..47]]; arp.tpa <-> arp.spa; outport = inport; flags.loopback = 1; output;) + table=3 (lr_in_ip_input ), priority=90 , dnl + match=(inport == "lrp" && ip6.dst == {fe80::200:ff:fe00:1, ff02::1:ff00:1} && nd_ns && nd.target == fe80::200:ff:fe00:1), dnl + action=(nd_na_router { eth.src = xreg0[[0..47]]; ip6.src = fe80::200:ff:fe00:1; nd.target = fe80::200:ff:fe00:1; nd.tll = xreg0[[0..47]]; outport = inport; flags.loopback = 1; output; };) + table=3 (lr_in_ip_input ), priority=90 , dnl ++match=(inport == "lrp" && nd_ns && nd.target == fe80::200:ff:fe00:101:8080), dnl ++action=(nd_na { eth.src = xreg0[[0..47]]; ip6.src = fe80::200:ff:fe00:101:8080; nd.target = fe80::200:ff:fe00:101:8080; nd.tll = xreg0[[0..47]]; outport = inport; flags.loopback = 1; output; };) ++ table=3 (lr_in_ip_input ), priority=90 , dnl ++match=(inport == "lrp" && nd_ns && nd.target == fe80::200:ff:fe00:102:8080), dnl ++action=(nd_na { eth.src = xreg0[[0..47]]; ip6.src = fe80::200:ff:fe00:102:8080; nd.target = fe80::200:ff:fe00:102:8080; nd.tll = xreg0[[0..47]]; outport = inport; flags.loopback = 1; output; };) ++ table=3 (lr_in_ip_input ), priority=90 , dnl + match=(inport == "lrp-public" && arp.op == 1 && arp.tpa == 43.43.43.1 && arp.spa == 43.43.43.0/24), dnl +-action=(eth.dst = eth.src; eth.src = xreg0[[0..47]]; arp.op = 2; /* ARP reply */ arp.tha = arp.sha; arp.sha = xreg0[[0..47]]; arp.tpa = arp.spa; arp.spa = 43.43.43.1; outport = inport; flags.loopback = 1; output;) ++action=(eth.dst = eth.src; eth.src = xreg0[[0..47]]; arp.op = 2; /* ARP reply */ arp.tha = arp.sha; arp.sha = xreg0[[0..47]]; arp.tpa <-> arp.spa; outport = inport; flags.loopback = 1; output;) ++ table=3 (lr_in_ip_input ), priority=90 , dnl ++match=(inport == "lrp-public" && arp.op == 1 && arp.tpa == { 192.168.2.1, 192.168.2.4, 192.168.2.5, 192.168.2.6 } && is_chassis_resident("cr-lrp-public")), dnl ++action=(eth.dst = eth.src; eth.src = xreg0[[0..47]]; arp.op = 2; /* ARP reply */ arp.tha = arp.sha; arp.sha = xreg0[[0..47]]; arp.tpa <-> arp.spa; outport = inport; flags.loopback = 1; output;) + table=3 (lr_in_ip_input ), priority=90 , dnl + match=(inport == "lrp-public" && ip6.dst == {fe80::200:ff:fe00:100, ff02::1:ff00:100} && nd_ns && nd.target == fe80::200:ff:fe00:100 && is_chassis_resident("cr-lrp-public")), dnl + action=(nd_na_router { eth.src = xreg0[[0..47]]; ip6.src = fe80::200:ff:fe00:100; nd.target = fe80::200:ff:fe00:100; nd.tll = xreg0[[0..47]]; outport = inport; flags.loopback = 1; output; };) ++ table=3 (lr_in_ip_input ), priority=90 , dnl ++match=(inport == "lrp-public" && nd_ns && nd.target == fe80::200:ff:fe00:101:8080 && is_chassis_resident("cr-lrp-public")), dnl ++action=(nd_na { eth.src = xreg0[[0..47]]; ip6.src = fe80::200:ff:fe00:101:8080; nd.target = fe80::200:ff:fe00:101:8080; nd.tll = xreg0[[0..47]]; outport = inport; flags.loopback = 1; output; };) ++ table=3 (lr_in_ip_input ), priority=90 , dnl ++match=(inport == "lrp-public" && nd_ns && nd.target == fe80::200:ff:fe00:102:8080 && is_chassis_resident("cr-lrp-public")), dnl ++action=(nd_na { eth.src = xreg0[[0..47]]; ip6.src = fe80::200:ff:fe00:102:8080; nd.target = fe80::200:ff:fe00:102:8080; nd.tll = xreg0[[0..47]]; outport = inport; flags.loopback = 1; output; };) + ]) + + # Priority 91 drop flows (per distributed gw port), if port is not resident. +@@ -1626,16 +1675,16 @@ action=(drop;) + AT_CHECK([ovn-sbctl lflow-list | grep -E "lr_in_ip_input.*priority=92" | grep "arp\|nd" | sort], [0], [dnl + table=3 (lr_in_ip_input ), priority=92 , dnl + match=(inport == "lrp-public" && arp.op == 1 && arp.tpa == 43.43.43.150 && is_chassis_resident("cr-lrp-public")), dnl +-action=(eth.dst = eth.src; eth.src = xreg0[[0..47]]; arp.op = 2; /* ARP reply */ arp.tha = arp.sha; arp.sha = xreg0[[0..47]]; arp.tpa = arp.spa; arp.spa = 43.43.43.150; outport = inport; flags.loopback = 1; output;) ++action=(eth.dst = eth.src; eth.src = xreg0[[0..47]]; arp.op = 2; /* ARP reply */ arp.tha = arp.sha; arp.sha = xreg0[[0..47]]; arp.tpa <-> arp.spa; outport = inport; flags.loopback = 1; output;) + table=3 (lr_in_ip_input ), priority=92 , dnl + match=(inport == "lrp-public" && arp.op == 1 && arp.tpa == 43.43.43.2 && is_chassis_resident("cr-lrp-public")), dnl +-action=(eth.dst = eth.src; eth.src = xreg0[[0..47]]; arp.op = 2; /* ARP reply */ arp.tha = arp.sha; arp.sha = xreg0[[0..47]]; arp.tpa = arp.spa; arp.spa = 43.43.43.2; outport = inport; flags.loopback = 1; output;) ++action=(eth.dst = eth.src; eth.src = xreg0[[0..47]]; arp.op = 2; /* ARP reply */ arp.tha = arp.sha; arp.sha = xreg0[[0..47]]; arp.tpa <-> arp.spa; outport = inport; flags.loopback = 1; output;) + table=3 (lr_in_ip_input ), priority=92 , dnl + match=(inport == "lrp-public" && arp.op == 1 && arp.tpa == 43.43.43.3 && is_chassis_resident("cr-lrp-public")), dnl +-action=(eth.dst = eth.src; eth.src = xreg0[[0..47]]; arp.op = 2; /* ARP reply */ arp.tha = arp.sha; arp.sha = xreg0[[0..47]]; arp.tpa = arp.spa; arp.spa = 43.43.43.3; outport = inport; flags.loopback = 1; output;) ++action=(eth.dst = eth.src; eth.src = xreg0[[0..47]]; arp.op = 2; /* ARP reply */ arp.tha = arp.sha; arp.sha = xreg0[[0..47]]; arp.tpa <-> arp.spa; outport = inport; flags.loopback = 1; output;) + table=3 (lr_in_ip_input ), priority=92 , dnl + match=(inport == "lrp-public" && arp.op == 1 && arp.tpa == 43.43.43.4 && is_chassis_resident("ls-vm")), dnl +-action=(eth.dst = eth.src; eth.src = 00:00:00:00:00:02; arp.op = 2; /* ARP reply */ arp.tha = arp.sha; arp.sha = 00:00:00:00:00:02; arp.tpa = arp.spa; arp.spa = 43.43.43.4; outport = inport; flags.loopback = 1; output;) ++action=(eth.dst = eth.src; eth.src = 00:00:00:00:00:02; arp.op = 2; /* ARP reply */ arp.tha = arp.sha; arp.sha = 00:00:00:00:00:02; arp.tpa <-> arp.spa; outport = inport; flags.loopback = 1; output;) + ]) + + # xreg0[0..47] isn't used anywhere else. +@@ -1671,13 +1720,13 @@ AT_CHECK([ovn-sbctl lflow-list | grep "ls_out_pre_lb.*priority=100" | grep reg0 + ovn-nbctl ls-lb-add sw0 lb1 + ovn-nbctl --wait=sb sync + AT_CHECK([ovn-sbctl lflow-list | grep "ls_out_pre_lb.*priority=100" | grep reg0 | sort], [0], [dnl +- table=0 (ls_out_pre_lb ), priority=100 , match=(ip), action=(reg0[[0]] = 1; next;) ++ table=0 (ls_out_pre_lb ), priority=100 , match=(ip), action=(reg0[[2]] = 1; next;) + ]) + + ovn-nbctl ls-lb-add sw0 lb2 + ovn-nbctl --wait=sb sync + AT_CHECK([ovn-sbctl lflow-list | grep "ls_out_pre_lb.*priority=100" | grep reg0 | sort], [0], [dnl +- table=0 (ls_out_pre_lb ), priority=100 , match=(ip), action=(reg0[[0]] = 1; next;) ++ table=0 (ls_out_pre_lb ), priority=100 , match=(ip), action=(reg0[[2]] = 1; next;) + ]) + + lb1_uuid=$(ovn-nbctl --bare --columns _uuid find load_balancer name=lb1) +@@ -1686,7 +1735,7 @@ lb2_uuid=$(ovn-nbctl --bare --columns _uuid find load_balancer name=lb2) + ovn-nbctl clear load_balancer $lb1_uuid vips + ovn-nbctl --wait=sb sync + AT_CHECK([ovn-sbctl lflow-list | grep "ls_out_pre_lb.*priority=100" | grep reg0 | sort], [0], [dnl +- table=0 (ls_out_pre_lb ), priority=100 , match=(ip), action=(reg0[[0]] = 1; next;) ++ table=0 (ls_out_pre_lb ), priority=100 , match=(ip), action=(reg0[[2]] = 1; next;) + ]) + + ovn-nbctl clear load_balancer $lb2_uuid vips +@@ -1699,14 +1748,14 @@ ovn-nbctl set load_balancer $lb2_uuid vips:"10.0.0.11"="10.0.0.4" + + ovn-nbctl --wait=sb sync + AT_CHECK([ovn-sbctl lflow-list | grep "ls_out_pre_lb.*priority=100" | grep reg0 | sort], [0], [dnl +- table=0 (ls_out_pre_lb ), priority=100 , match=(ip), action=(reg0[[0]] = 1; next;) ++ table=0 (ls_out_pre_lb ), priority=100 , match=(ip), action=(reg0[[2]] = 1; next;) + ]) + + # Now reverse the order of clearing the vip. + ovn-nbctl clear load_balancer $lb2_uuid vips + ovn-nbctl --wait=sb sync + AT_CHECK([ovn-sbctl lflow-list | grep "ls_out_pre_lb.*priority=100" | grep reg0 | sort], [0], [dnl +- table=0 (ls_out_pre_lb ), priority=100 , match=(ip), action=(reg0[[0]] = 1; next;) ++ table=0 (ls_out_pre_lb ), priority=100 , match=(ip), action=(reg0[[2]] = 1; next;) + ]) + + ovn-nbctl clear load_balancer $lb1_uuid vips +@@ -1754,10 +1803,10 @@ AT_CAPTURE_FILE([sw1flows]) + + AT_CHECK( + [grep -E 'ls_(in|out)_acl' sw0flows sw1flows | grep pg0 | sort], [0], [dnl +-sw0flows: table=5 (ls_out_acl ), priority=2003 , match=(outport == @pg0 && ip6 && udp), action=(reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=23); };) +-sw0flows: table=9 (ls_in_acl ), priority=2002 , match=(inport == @pg0 && ip4 && tcp && tcp.dst == 80), action=(reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=egress,table=6); };) +-sw1flows: table=5 (ls_out_acl ), priority=2003 , match=(outport == @pg0 && ip6 && udp), action=(reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=23); };) +-sw1flows: table=9 (ls_in_acl ), priority=2002 , match=(inport == @pg0 && ip4 && tcp && tcp.dst == 80), action=(reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=egress,table=6); };) ++sw0flows: table=4 (ls_out_acl ), priority=2003 , match=(outport == @pg0 && ip6 && udp), action=(reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=22); };) ++sw0flows: table=9 (ls_in_acl ), priority=2002 , match=(inport == @pg0 && ip4 && tcp && tcp.dst == 80), action=(reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=egress,table=5); };) ++sw1flows: table=4 (ls_out_acl ), priority=2003 , match=(outport == @pg0 && ip6 && udp), action=(reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=22); };) ++sw1flows: table=9 (ls_in_acl ), priority=2002 , match=(inport == @pg0 && ip4 && tcp && tcp.dst == 80), action=(reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=egress,table=5); };) + ]) + + AS_BOX([2]) +@@ -1770,10 +1819,10 @@ ovn-sbctl dump-flows sw1 > sw1flows2 + AT_CAPTURE_FILE([sw1flows2]) + + AT_CHECK([grep "ls_out_acl" sw0flows2 sw1flows2 | grep pg0 | sort], [0], [dnl +-sw0flows2: table=5 (ls_out_acl ), priority=2002 , match=(outport == @pg0 && ip4 && udp), action=(reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=23); };) +-sw0flows2: table=5 (ls_out_acl ), priority=2003 , match=(outport == @pg0 && ip6 && udp), action=(reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=23); };) +-sw1flows2: table=5 (ls_out_acl ), priority=2002 , match=(outport == @pg0 && ip4 && udp), action=(reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=23); };) +-sw1flows2: table=5 (ls_out_acl ), priority=2003 , match=(outport == @pg0 && ip6 && udp), action=(reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=23); };) ++sw0flows2: table=4 (ls_out_acl ), priority=2002 , match=(outport == @pg0 && ip4 && udp), action=(reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=22); };) ++sw0flows2: table=4 (ls_out_acl ), priority=2003 , match=(outport == @pg0 && ip6 && udp), action=(reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=22); };) ++sw1flows2: table=4 (ls_out_acl ), priority=2002 , match=(outport == @pg0 && ip4 && udp), action=(reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=22); };) ++sw1flows2: table=4 (ls_out_acl ), priority=2003 , match=(outport == @pg0 && ip6 && udp), action=(reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=22); };) + ]) + + AS_BOX([3]) +@@ -1786,18 +1835,18 @@ ovn-sbctl dump-flows sw1 > sw1flows3 + AT_CAPTURE_FILE([sw1flows3]) + + AT_CHECK([grep "ls_out_acl" sw0flows3 sw1flows3 | grep pg0 | sort], [0], [dnl +-sw0flows3: table=5 (ls_out_acl ), priority=2001 , match=(reg0[[7]] == 1 && (outport == @pg0 && ip)), action=(reg0[[1]] = 1; next;) +-sw0flows3: table=5 (ls_out_acl ), priority=2001 , match=(reg0[[8]] == 1 && (outport == @pg0 && ip)), action=(next;) +-sw0flows3: table=5 (ls_out_acl ), priority=2002 , match=((reg0[[10]] == 1) && outport == @pg0 && ip4 && udp), action=(ct_commit { ct_label.blocked = 1; }; reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=23); };) +-sw0flows3: table=5 (ls_out_acl ), priority=2002 , match=((reg0[[9]] == 1) && outport == @pg0 && ip4 && udp), action=(reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=23); };) +-sw0flows3: table=5 (ls_out_acl ), priority=2003 , match=((reg0[[10]] == 1) && outport == @pg0 && ip6 && udp), action=(ct_commit { ct_label.blocked = 1; }; reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=23); };) +-sw0flows3: table=5 (ls_out_acl ), priority=2003 , match=((reg0[[9]] == 1) && outport == @pg0 && ip6 && udp), action=(reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=23); };) +-sw1flows3: table=5 (ls_out_acl ), priority=2001 , match=(reg0[[7]] == 1 && (outport == @pg0 && ip)), action=(reg0[[1]] = 1; next;) +-sw1flows3: table=5 (ls_out_acl ), priority=2001 , match=(reg0[[8]] == 1 && (outport == @pg0 && ip)), action=(next;) +-sw1flows3: table=5 (ls_out_acl ), priority=2002 , match=((reg0[[10]] == 1) && outport == @pg0 && ip4 && udp), action=(ct_commit { ct_label.blocked = 1; }; reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=23); };) +-sw1flows3: table=5 (ls_out_acl ), priority=2002 , match=((reg0[[9]] == 1) && outport == @pg0 && ip4 && udp), action=(reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=23); };) +-sw1flows3: table=5 (ls_out_acl ), priority=2003 , match=((reg0[[10]] == 1) && outport == @pg0 && ip6 && udp), action=(ct_commit { ct_label.blocked = 1; }; reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=23); };) +-sw1flows3: table=5 (ls_out_acl ), priority=2003 , match=((reg0[[9]] == 1) && outport == @pg0 && ip6 && udp), action=(reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=23); };) ++sw0flows3: table=4 (ls_out_acl ), priority=2001 , match=(reg0[[7]] == 1 && (outport == @pg0 && ip)), action=(reg0[[1]] = 1; next;) ++sw0flows3: table=4 (ls_out_acl ), priority=2001 , match=(reg0[[8]] == 1 && (outport == @pg0 && ip)), action=(next;) ++sw0flows3: table=4 (ls_out_acl ), priority=2002 , match=((reg0[[10]] == 1) && outport == @pg0 && ip4 && udp), action=(ct_commit { ct_label.blocked = 1; }; reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=22); };) ++sw0flows3: table=4 (ls_out_acl ), priority=2002 , match=((reg0[[9]] == 1) && outport == @pg0 && ip4 && udp), action=(reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=22); };) ++sw0flows3: table=4 (ls_out_acl ), priority=2003 , match=((reg0[[10]] == 1) && outport == @pg0 && ip6 && udp), action=(ct_commit { ct_label.blocked = 1; }; reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=22); };) ++sw0flows3: table=4 (ls_out_acl ), priority=2003 , match=((reg0[[9]] == 1) && outport == @pg0 && ip6 && udp), action=(reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=22); };) ++sw1flows3: table=4 (ls_out_acl ), priority=2001 , match=(reg0[[7]] == 1 && (outport == @pg0 && ip)), action=(reg0[[1]] = 1; next;) ++sw1flows3: table=4 (ls_out_acl ), priority=2001 , match=(reg0[[8]] == 1 && (outport == @pg0 && ip)), action=(next;) ++sw1flows3: table=4 (ls_out_acl ), priority=2002 , match=((reg0[[10]] == 1) && outport == @pg0 && ip4 && udp), action=(ct_commit { ct_label.blocked = 1; }; reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=22); };) ++sw1flows3: table=4 (ls_out_acl ), priority=2002 , match=((reg0[[9]] == 1) && outport == @pg0 && ip4 && udp), action=(reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=22); };) ++sw1flows3: table=4 (ls_out_acl ), priority=2003 , match=((reg0[[10]] == 1) && outport == @pg0 && ip6 && udp), action=(ct_commit { ct_label.blocked = 1; }; reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=22); };) ++sw1flows3: table=4 (ls_out_acl ), priority=2003 , match=((reg0[[9]] == 1) && outport == @pg0 && ip6 && udp), action=(reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=22); };) + ]) + + AT_CLEANUP +@@ -1932,17 +1981,17 @@ check ovn-nbctl --wait=sb \ + -- acl-add ls from-lport 2 "udp" allow-related \ + -- acl-add ls to-lport 2 "udp" allow-related + AT_CHECK([ovn-sbctl lflow-list ls | grep -e ls_in_acl_hint -e ls_out_acl_hint -e ls_in_acl -e ls_out_acl | grep 'ct\.' | sort], [0], [dnl +- table=4 (ls_out_acl_hint ), priority=1 , match=(ct.est && ct_label.blocked == 0), action=(reg0[[10]] = 1; next;) +- table=4 (ls_out_acl_hint ), priority=2 , match=(ct.est && ct_label.blocked == 1), action=(reg0[[9]] = 1; next;) +- table=4 (ls_out_acl_hint ), priority=3 , match=(!ct.est), action=(reg0[[9]] = 1; next;) +- table=4 (ls_out_acl_hint ), priority=4 , match=(!ct.new && ct.est && !ct.rpl && ct_label.blocked == 0), action=(reg0[[8]] = 1; reg0[[10]] = 1; next;) +- table=4 (ls_out_acl_hint ), priority=5 , match=(!ct.trk), action=(reg0[[8]] = 1; reg0[[9]] = 1; next;) +- table=4 (ls_out_acl_hint ), priority=6 , match=(!ct.new && ct.est && !ct.rpl && ct_label.blocked == 1), action=(reg0[[7]] = 1; reg0[[9]] = 1; next;) +- table=4 (ls_out_acl_hint ), priority=7 , match=(ct.new && !ct.est), action=(reg0[[7]] = 1; reg0[[9]] = 1; next;) +- table=5 (ls_out_acl ), priority=1 , match=(ip && (!ct.est || (ct.est && ct_label.blocked == 1))), action=(reg0[[1]] = 1; next;) +- table=5 (ls_out_acl ), priority=65535, match=(!ct.est && ct.rel && !ct.new && !ct.inv && ct_label.blocked == 0), action=(next;) +- table=5 (ls_out_acl ), priority=65535, match=(ct.est && !ct.rel && !ct.new && !ct.inv && ct.rpl && ct_label.blocked == 0), action=(next;) +- table=5 (ls_out_acl ), priority=65535, match=(ct.inv || (ct.est && ct.rpl && ct_label.blocked == 1)), action=(drop;) ++ table=3 (ls_out_acl_hint ), priority=1 , match=(ct.est && ct_label.blocked == 0), action=(reg0[[10]] = 1; next;) ++ table=3 (ls_out_acl_hint ), priority=2 , match=(ct.est && ct_label.blocked == 1), action=(reg0[[9]] = 1; next;) ++ table=3 (ls_out_acl_hint ), priority=3 , match=(!ct.est), action=(reg0[[9]] = 1; next;) ++ table=3 (ls_out_acl_hint ), priority=4 , match=(!ct.new && ct.est && !ct.rpl && ct_label.blocked == 0), action=(reg0[[8]] = 1; reg0[[10]] = 1; next;) ++ table=3 (ls_out_acl_hint ), priority=5 , match=(!ct.trk), action=(reg0[[8]] = 1; reg0[[9]] = 1; next;) ++ table=3 (ls_out_acl_hint ), priority=6 , match=(!ct.new && ct.est && !ct.rpl && ct_label.blocked == 1), action=(reg0[[7]] = 1; reg0[[9]] = 1; next;) ++ table=3 (ls_out_acl_hint ), priority=7 , match=(ct.new && !ct.est), action=(reg0[[7]] = 1; reg0[[9]] = 1; next;) ++ table=4 (ls_out_acl ), priority=1 , match=(ip && (!ct.est || (ct.est && ct_label.blocked == 1))), action=(reg0[[1]] = 1; next;) ++ table=4 (ls_out_acl ), priority=65532, match=(!ct.est && ct.rel && !ct.new && !ct.inv && ct_label.blocked == 0), action=(next;) ++ table=4 (ls_out_acl ), priority=65532, match=(ct.est && !ct.rel && !ct.new && !ct.inv && ct.rpl && ct_label.blocked == 0), action=(next;) ++ table=4 (ls_out_acl ), priority=65532, match=(ct.inv || (ct.est && ct.rpl && ct_label.blocked == 1)), action=(drop;) + table=8 (ls_in_acl_hint ), priority=1 , match=(ct.est && ct_label.blocked == 0), action=(reg0[[10]] = 1; next;) + table=8 (ls_in_acl_hint ), priority=2 , match=(ct.est && ct_label.blocked == 1), action=(reg0[[9]] = 1; next;) + table=8 (ls_in_acl_hint ), priority=3 , match=(!ct.est), action=(reg0[[9]] = 1; next;) +@@ -1951,9 +2000,9 @@ AT_CHECK([ovn-sbctl lflow-list ls | grep -e ls_in_acl_hint -e ls_out_acl_hint -e + table=8 (ls_in_acl_hint ), priority=6 , match=(!ct.new && ct.est && !ct.rpl && ct_label.blocked == 1), action=(reg0[[7]] = 1; reg0[[9]] = 1; next;) + table=8 (ls_in_acl_hint ), priority=7 , match=(ct.new && !ct.est), action=(reg0[[7]] = 1; reg0[[9]] = 1; next;) + table=9 (ls_in_acl ), priority=1 , match=(ip && (!ct.est || (ct.est && ct_label.blocked == 1))), action=(reg0[[1]] = 1; next;) +- table=9 (ls_in_acl ), priority=65535, match=(!ct.est && ct.rel && !ct.new && !ct.inv && ct_label.blocked == 0), action=(next;) +- table=9 (ls_in_acl ), priority=65535, match=(ct.est && !ct.rel && !ct.new && !ct.inv && ct.rpl && ct_label.blocked == 0), action=(next;) +- table=9 (ls_in_acl ), priority=65535, match=(ct.inv || (ct.est && ct.rpl && ct_label.blocked == 1)), action=(drop;) ++ table=9 (ls_in_acl ), priority=65532, match=(!ct.est && ct.rel && !ct.new && !ct.inv && ct_label.blocked == 0), action=(next;) ++ table=9 (ls_in_acl ), priority=65532, match=(ct.est && !ct.rel && !ct.new && !ct.inv && ct.rpl && ct_label.blocked == 0), action=(next;) ++ table=9 (ls_in_acl ), priority=65532, match=(ct.inv || (ct.est && ct.rpl && ct_label.blocked == 1)), action=(drop;) + ]) + + AS_BOX([Check match ct_state with load balancer]) +@@ -1963,18 +2012,25 @@ check ovn-nbctl --wait=sb \ + -- lb-add lb "10.0.0.1" "10.0.0.2" \ + -- ls-lb-add ls lb + +-AT_CHECK([ovn-sbctl lflow-list ls | grep -e ls_in_acl_hint -e ls_out_acl_hint -e ls_in_acl -e ls_out_acl | grep 'ct\.' | sort], [0], [dnl +- table=4 (ls_out_acl_hint ), priority=1 , match=(ct.est && ct_label.blocked == 0), action=(reg0[[10]] = 1; next;) +- table=4 (ls_out_acl_hint ), priority=2 , match=(ct.est && ct_label.blocked == 1), action=(reg0[[9]] = 1; next;) +- table=4 (ls_out_acl_hint ), priority=3 , match=(!ct.est), action=(reg0[[9]] = 1; next;) +- table=4 (ls_out_acl_hint ), priority=4 , match=(!ct.new && ct.est && !ct.rpl && ct_label.blocked == 0), action=(reg0[[8]] = 1; reg0[[10]] = 1; next;) +- table=4 (ls_out_acl_hint ), priority=5 , match=(!ct.trk), action=(reg0[[8]] = 1; reg0[[9]] = 1; next;) +- table=4 (ls_out_acl_hint ), priority=6 , match=(!ct.new && ct.est && !ct.rpl && ct_label.blocked == 1), action=(reg0[[7]] = 1; reg0[[9]] = 1; next;) +- table=4 (ls_out_acl_hint ), priority=7 , match=(ct.new && !ct.est), action=(reg0[[7]] = 1; reg0[[9]] = 1; next;) +- table=5 (ls_out_acl ), priority=1 , match=(ip && (!ct.est || (ct.est && ct_label.blocked == 1))), action=(reg0[[1]] = 1; next;) +- table=5 (ls_out_acl ), priority=65535, match=(!ct.est && ct.rel && !ct.new && !ct.inv && ct_label.blocked == 0), action=(next;) +- table=5 (ls_out_acl ), priority=65535, match=(ct.est && !ct.rel && !ct.new && !ct.inv && ct.rpl && ct_label.blocked == 0), action=(next;) +- table=5 (ls_out_acl ), priority=65535, match=(ct.inv || (ct.est && ct.rpl && ct_label.blocked == 1)), action=(drop;) ++AT_CHECK([ovn-sbctl lflow-list ls | grep -e ls_in_acl_hint -e ls_out_acl_hint -e ls_in_acl -e ls_out_acl | sort], [0], [dnl ++ table=3 (ls_out_acl_hint ), priority=0 , match=(1), action=(next;) ++ table=3 (ls_out_acl_hint ), priority=1 , match=(ct.est && ct_label.blocked == 0), action=(reg0[[10]] = 1; next;) ++ table=3 (ls_out_acl_hint ), priority=2 , match=(ct.est && ct_label.blocked == 1), action=(reg0[[9]] = 1; next;) ++ table=3 (ls_out_acl_hint ), priority=3 , match=(!ct.est), action=(reg0[[9]] = 1; next;) ++ table=3 (ls_out_acl_hint ), priority=4 , match=(!ct.new && ct.est && !ct.rpl && ct_label.blocked == 0), action=(reg0[[8]] = 1; reg0[[10]] = 1; next;) ++ table=3 (ls_out_acl_hint ), priority=5 , match=(!ct.trk), action=(reg0[[8]] = 1; reg0[[9]] = 1; next;) ++ table=3 (ls_out_acl_hint ), priority=6 , match=(!ct.new && ct.est && !ct.rpl && ct_label.blocked == 1), action=(reg0[[7]] = 1; reg0[[9]] = 1; next;) ++ table=3 (ls_out_acl_hint ), priority=7 , match=(ct.new && !ct.est), action=(reg0[[7]] = 1; reg0[[9]] = 1; next;) ++ table=4 (ls_out_acl ), priority=0 , match=(1), action=(next;) ++ table=4 (ls_out_acl ), priority=1 , match=(ip && (!ct.est || (ct.est && ct_label.blocked == 1))), action=(reg0[[1]] = 1; next;) ++ table=4 (ls_out_acl ), priority=1001 , match=(reg0[[7]] == 1 && (ip)), action=(reg0[[1]] = 1; next;) ++ table=4 (ls_out_acl ), priority=1001 , match=(reg0[[8]] == 1 && (ip)), action=(next;) ++ table=4 (ls_out_acl ), priority=34000, match=(eth.src == $svc_monitor_mac), action=(next;) ++ table=4 (ls_out_acl ), priority=65532, match=(!ct.est && ct.rel && !ct.new && !ct.inv && ct_label.blocked == 0), action=(next;) ++ table=4 (ls_out_acl ), priority=65532, match=(ct.est && !ct.rel && !ct.new && !ct.inv && ct.rpl && ct_label.blocked == 0), action=(next;) ++ table=4 (ls_out_acl ), priority=65532, match=(ct.inv || (ct.est && ct.rpl && ct_label.blocked == 1)), action=(drop;) ++ table=4 (ls_out_acl ), priority=65532, match=(nd || nd_ra || nd_rs || mldv1 || mldv2), action=(next;) ++ table=8 (ls_in_acl_hint ), priority=0 , match=(1), action=(next;) + table=8 (ls_in_acl_hint ), priority=1 , match=(ct.est && ct_label.blocked == 0), action=(reg0[[10]] = 1; next;) + table=8 (ls_in_acl_hint ), priority=2 , match=(ct.est && ct_label.blocked == 1), action=(reg0[[9]] = 1; next;) + table=8 (ls_in_acl_hint ), priority=3 , match=(!ct.est), action=(reg0[[9]] = 1; next;) +@@ -1982,12 +2038,28 @@ AT_CHECK([ovn-sbctl lflow-list ls | grep -e ls_in_acl_hint -e ls_out_acl_hint -e + table=8 (ls_in_acl_hint ), priority=5 , match=(!ct.trk), action=(reg0[[8]] = 1; reg0[[9]] = 1; next;) + table=8 (ls_in_acl_hint ), priority=6 , match=(!ct.new && ct.est && !ct.rpl && ct_label.blocked == 1), action=(reg0[[7]] = 1; reg0[[9]] = 1; next;) + table=8 (ls_in_acl_hint ), priority=7 , match=(ct.new && !ct.est), action=(reg0[[7]] = 1; reg0[[9]] = 1; next;) ++ table=9 (ls_in_acl ), priority=0 , match=(1), action=(next;) + table=9 (ls_in_acl ), priority=1 , match=(ip && (!ct.est || (ct.est && ct_label.blocked == 1))), action=(reg0[[1]] = 1; next;) +- table=9 (ls_in_acl ), priority=65535, match=(!ct.est && ct.rel && !ct.new && !ct.inv && ct_label.blocked == 0), action=(next;) +- table=9 (ls_in_acl ), priority=65535, match=(ct.est && !ct.rel && !ct.new && !ct.inv && ct.rpl && ct_label.blocked == 0), action=(next;) +- table=9 (ls_in_acl ), priority=65535, match=(ct.inv || (ct.est && ct.rpl && ct_label.blocked == 1)), action=(drop;) ++ table=9 (ls_in_acl ), priority=1001 , match=(reg0[[7]] == 1 && (ip)), action=(reg0[[1]] = 1; next;) ++ table=9 (ls_in_acl ), priority=1001 , match=(reg0[[8]] == 1 && (ip)), action=(next;) ++ table=9 (ls_in_acl ), priority=34000, match=(eth.dst == $svc_monitor_mac), action=(next;) ++ table=9 (ls_in_acl ), priority=65532, match=(!ct.est && ct.rel && !ct.new && !ct.inv && ct_label.blocked == 0), action=(next;) ++ table=9 (ls_in_acl ), priority=65532, match=(ct.est && !ct.rel && !ct.new && !ct.inv && ct.rpl && ct_label.blocked == 0), action=(next;) ++ table=9 (ls_in_acl ), priority=65532, match=(ct.inv || (ct.est && ct.rpl && ct_label.blocked == 1)), action=(drop;) ++ table=9 (ls_in_acl ), priority=65532, match=(nd || nd_ra || nd_rs || mldv1 || mldv2), action=(next;) ++]) ++ ++ovn-nbctl --wait=sb clear logical_switch ls acls ++ovn-nbctl --wait=sb clear logical_switch ls load_balancer ++ ++AT_CHECK([ovn-sbctl lflow-list ls | grep -e ls_in_acl_hint -e ls_out_acl_hint -e ls_in_acl -e ls_out_acl | sort], [0], [dnl ++ table=3 (ls_out_acl_hint ), priority=65535, match=(1), action=(next;) ++ table=4 (ls_out_acl ), priority=65535, match=(1), action=(next;) ++ table=8 (ls_in_acl_hint ), priority=65535, match=(1), action=(next;) ++ table=9 (ls_in_acl ), priority=65535, match=(1), action=(next;) + ]) + ++ + AT_CLEANUP + + AT_SETUP([datapath requested-tnl-key]) +@@ -2197,20 +2269,20 @@ check ovn-nbctl \ + check ovn-nbctl --wait=sb sync + + AT_CHECK([ovn-sbctl lflow-list sw0 | grep ls_in_pre_hairpin | sort], [0], [dnl +- table=14(ls_in_pre_hairpin ), priority=0 , match=(1), action=(next;) +- table=14(ls_in_pre_hairpin ), priority=100 , match=(ip && ct.trk), action=(reg0[[6]] = chk_lb_hairpin(); reg0[[12]] = chk_lb_hairpin_reply(); next;) ++ table=13(ls_in_pre_hairpin ), priority=0 , match=(1), action=(next;) ++ table=13(ls_in_pre_hairpin ), priority=100 , match=(ip && ct.trk), action=(reg0[[6]] = chk_lb_hairpin(); reg0[[12]] = chk_lb_hairpin_reply(); next;) + ]) + + AT_CHECK([ovn-sbctl lflow-list sw0 | grep ls_in_nat_hairpin | sort], [0], [dnl +- table=15(ls_in_nat_hairpin ), priority=0 , match=(1), action=(next;) +- table=15(ls_in_nat_hairpin ), priority=100 , match=(ip && ct.est && ct.trk && reg0[[6]] == 1), action=(ct_snat;) +- table=15(ls_in_nat_hairpin ), priority=100 , match=(ip && ct.new && ct.trk && reg0[[6]] == 1), action=(ct_snat_to_vip; next;) +- table=15(ls_in_nat_hairpin ), priority=90 , match=(ip && reg0[[12]] == 1), action=(ct_snat;) ++ table=14(ls_in_nat_hairpin ), priority=0 , match=(1), action=(next;) ++ table=14(ls_in_nat_hairpin ), priority=100 , match=(ip && ct.est && ct.trk && reg0[[6]] == 1), action=(ct_snat;) ++ table=14(ls_in_nat_hairpin ), priority=100 , match=(ip && ct.new && ct.trk && reg0[[6]] == 1), action=(ct_snat_to_vip; next;) ++ table=14(ls_in_nat_hairpin ), priority=90 , match=(ip && reg0[[12]] == 1), action=(ct_snat;) + ]) + + AT_CHECK([ovn-sbctl lflow-list sw0 | grep ls_in_hairpin | sort], [0], [dnl +- table=16(ls_in_hairpin ), priority=0 , match=(1), action=(next;) +- table=16(ls_in_hairpin ), priority=1 , match=((reg0[[6]] == 1 || reg0[[12]] == 1)), action=(eth.dst <-> eth.src; outport = inport; flags.loopback = 1; output;) ++ table=15(ls_in_hairpin ), priority=0 , match=(1), action=(next;) ++ table=15(ls_in_hairpin ), priority=1 , match=((reg0[[6]] == 1 || reg0[[12]] == 1)), action=(eth.dst <-> eth.src; outport = inport; flags.loopback = 1; output;) + ]) + + AT_CLEANUP +@@ -2324,6 +2396,13 @@ check ovn-nbctl lsp-set-options public-lr0 router-port=lr0-public + + check ovn-nbctl --wait=sb lr-policy-add lr0 10 "ip4.src == 10.0.0.3" reroute 172.168.0.101,172.168.0.102 + ++ovn-nbctl lr-policy-list lr0 > policy-list ++AT_CAPTURE_FILE([policy-list]) ++AT_CHECK([cat policy-list], [0], [dnl ++Routing Policies ++ 10 ip4.src == 10.0.0.3 reroute 172.168.0.101, 172.168.0.102 ++]) + -+OVN_CLEANUP([hv1]) -+AT_CLEANUP -diff --git a/tests/ovn-northd.at b/tests/ovn-northd.at -index b78baa708..6d5dce668 100644 ---- a/tests/ovn-northd.at -+++ b/tests/ovn-northd.at -@@ -2551,7 +2551,7 @@ wait_row_count nb:Logical_Switch_Port 1 up=false name=lsp1 + ovn-sbctl dump-flows lr0 > lr0flows3 + AT_CAPTURE_FILE([lr0flows3]) + +@@ -2551,7 +2630,7 @@ wait_row_count nb:Logical_Switch_Port 1 up=false name=lsp1 AT_CLEANUP @@ -3733,7 +6192,7 @@ index b78baa708..6d5dce668 100644 ovn_start check ovn-nbctl ls-add sw0 -@@ -2589,11 +2589,11 @@ AT_CHECK([grep "lr_in_unsnat" lr0flows | sort], [0], [dnl +@@ -2589,11 +2668,11 @@ AT_CHECK([grep "lr_in_unsnat" lr0flows | sort], [0], [dnl table=5 (lr_in_unsnat ), priority=0 , match=(1), action=(next;) ]) @@ -3750,7 +6209,7 @@ index b78baa708..6d5dce668 100644 ]) check ovn-nbctl --wait=sb set logical_router lr0 options:lb_force_snat_ip="20.0.0.4 aef0::4" -@@ -2608,14 +2608,18 @@ AT_CHECK([grep "lr_in_unsnat" lr0flows | sort], [0], [dnl +@@ -2608,14 +2687,18 @@ AT_CHECK([grep "lr_in_unsnat" lr0flows | sort], [0], [dnl table=5 (lr_in_unsnat ), priority=110 , match=(ip6 && ip6.dst == aef0::4), action=(ct_snat;) ]) @@ -3771,7 +6230,7 @@ index b78baa708..6d5dce668 100644 ]) check ovn-nbctl --wait=sb set logical_router lr0 options:lb_force_snat_ip="router_ip" -@@ -2633,15 +2637,19 @@ AT_CHECK([grep "lr_in_unsnat" lr0flows | sort], [0], [dnl +@@ -2633,15 +2716,19 @@ AT_CHECK([grep "lr_in_unsnat" lr0flows | sort], [0], [dnl table=5 (lr_in_unsnat ), priority=110 , match=(inport == "lr0-sw1" && ip4.dst == 20.0.0.1), action=(ct_snat;) ]) @@ -3793,7 +6252,7 @@ index b78baa708..6d5dce668 100644 ]) check ovn-nbctl --wait=sb remove logical_router lr0 options chassis -@@ -2653,7 +2661,9 @@ AT_CHECK([grep "lr_in_unsnat" lr0flows | sort], [0], [dnl +@@ -2653,7 +2740,9 @@ AT_CHECK([grep "lr_in_unsnat" lr0flows | sort], [0], [dnl table=5 (lr_in_unsnat ), priority=0 , match=(1), action=(next;) ]) @@ -3804,7 +6263,7 @@ index b78baa708..6d5dce668 100644 ]) check ovn-nbctl set logical_router lr0 options:chassis=ch1 -@@ -2670,16 +2680,43 @@ AT_CHECK([grep "lr_in_unsnat" lr0flows | sort], [0], [dnl +@@ -2670,16 +2759,43 @@ AT_CHECK([grep "lr_in_unsnat" lr0flows | sort], [0], [dnl table=5 (lr_in_unsnat ), priority=110 , match=(inport == "lr0-sw1" && ip6.dst == bef0::1), action=(ct_snat;) ]) @@ -3850,21 +6309,334 @@ index b78baa708..6d5dce668 100644 ]) AT_CLEANUP +@@ -2783,3 +2899,206 @@ wait_row_count FDB 0 + ovn-sbctl list FDB + + AT_CLEANUP ++ ++AT_SETUP([ovn -- LS load balancer logical flows]) ++ovn_start ++ ++check ovn-nbctl \ ++ -- ls-add sw0 \ ++ -- lb-add lb0 10.0.0.10:80 10.0.0.4:8080 \ ++ -- ls-lb-add sw0 lb0 ++ ++check ovn-nbctl lr-add lr0 ++check ovn-nbctl lrp-add lr0 lr0-sw0 00:00:00:00:ff:01 10.0.0.1/24 ++check ovn-nbctl lsp-add sw0 sw0-lr0 ++check ovn-nbctl lsp-set-type sw0-lr0 router ++check ovn-nbctl lsp-set-addresses sw0-lr0 00:00:00:00:ff:01 ++check ovn-nbctl lsp-set-options sw0-lr0 router-port=lr0-sw0 ++ ++check ovn-nbctl --wait=sb sync ++ ++check_stateful_flows() { ++ ovn-sbctl dump-flows sw0 > sw0flows ++ AT_CAPTURE_FILE([sw0flows]) ++ ++ AT_CHECK([grep "ls_in_pre_lb" sw0flows | sort], [0], [dnl ++ table=6 (ls_in_pre_lb ), priority=0 , match=(1), action=(next;) ++ table=6 (ls_in_pre_lb ), priority=100 , match=(ip), action=(reg0[[2]] = 1; next;) ++ table=6 (ls_in_pre_lb ), priority=110 , match=(eth.dst == $svc_monitor_mac), action=(next;) ++ table=6 (ls_in_pre_lb ), priority=110 , match=(ip && inport == "sw0-lr0"), action=(next;) ++ table=6 (ls_in_pre_lb ), priority=110 , match=(nd || nd_rs || nd_ra || mldv1 || mldv2), action=(next;) ++]) ++ ++ AT_CHECK([grep "ls_in_pre_stateful" sw0flows | sort], [0], [dnl ++ table=7 (ls_in_pre_stateful ), priority=0 , match=(1), action=(next;) ++ table=7 (ls_in_pre_stateful ), priority=100 , match=(reg0[[0]] == 1), action=(ct_next;) ++ table=7 (ls_in_pre_stateful ), priority=110 , match=(reg0[[2]] == 1), action=(ct_lb;) ++ table=7 (ls_in_pre_stateful ), priority=120 , match=(reg0[[2]] == 1 && ip4 && sctp), action=(reg1 = ip4.dst; reg2[[0..15]] = sctp.dst; ct_lb;) ++ table=7 (ls_in_pre_stateful ), priority=120 , match=(reg0[[2]] == 1 && ip4 && tcp), action=(reg1 = ip4.dst; reg2[[0..15]] = tcp.dst; ct_lb;) ++ table=7 (ls_in_pre_stateful ), priority=120 , match=(reg0[[2]] == 1 && ip4 && udp), action=(reg1 = ip4.dst; reg2[[0..15]] = udp.dst; ct_lb;) ++ table=7 (ls_in_pre_stateful ), priority=120 , match=(reg0[[2]] == 1 && ip6 && sctp), action=(xxreg1 = ip6.dst; reg2[[0..15]] = sctp.dst; ct_lb;) ++ table=7 (ls_in_pre_stateful ), priority=120 , match=(reg0[[2]] == 1 && ip6 && tcp), action=(xxreg1 = ip6.dst; reg2[[0..15]] = tcp.dst; ct_lb;) ++ table=7 (ls_in_pre_stateful ), priority=120 , match=(reg0[[2]] == 1 && ip6 && udp), action=(xxreg1 = ip6.dst; reg2[[0..15]] = udp.dst; ct_lb;) ++]) ++ ++ AT_CHECK([grep "ls_in_stateful" sw0flows | sort], [0], [dnl ++ table=12(ls_in_stateful ), priority=0 , match=(1), action=(next;) ++ table=12(ls_in_stateful ), priority=100 , match=(reg0[[1]] == 1), action=(ct_commit { ct_label.blocked = 0; }; next;) ++ table=12(ls_in_stateful ), priority=120 , match=(ct.new && ip4.dst == 10.0.0.10 && tcp.dst == 80), action=(reg1 = 10.0.0.10; reg2[[0..15]] = 80; ct_lb(backends=10.0.0.4:8080);) ++]) ++ ++ AT_CHECK([grep "ls_out_pre_lb" sw0flows | sort], [0], [dnl ++ table=0 (ls_out_pre_lb ), priority=0 , match=(1), action=(next;) ++ table=0 (ls_out_pre_lb ), priority=100 , match=(ip), action=(reg0[[2]] = 1; next;) ++ table=0 (ls_out_pre_lb ), priority=110 , match=(eth.src == $svc_monitor_mac), action=(next;) ++ table=0 (ls_out_pre_lb ), priority=110 , match=(ip && outport == "sw0-lr0"), action=(next;) ++ table=0 (ls_out_pre_lb ), priority=110 , match=(nd || nd_rs || nd_ra || mldv1 || mldv2), action=(next;) ++]) ++ ++ AT_CHECK([grep "ls_out_pre_stateful" sw0flows | sort], [0], [dnl ++ table=2 (ls_out_pre_stateful), priority=0 , match=(1), action=(next;) ++ table=2 (ls_out_pre_stateful), priority=100 , match=(reg0[[0]] == 1), action=(ct_next;) ++ table=2 (ls_out_pre_stateful), priority=110 , match=(reg0[[2]] == 1), action=(ct_lb;) ++]) ++ ++ AT_CHECK([grep "ls_out_lb" sw0flows | sort], [0], []) ++ ++ AT_CHECK([grep "ls_out_stateful" sw0flows | sort], [0], [dnl ++ table=7 (ls_out_stateful ), priority=0 , match=(1), action=(next;) ++ table=7 (ls_out_stateful ), priority=100 , match=(reg0[[1]] == 1), action=(ct_commit { ct_label.blocked = 0; }; next;) ++]) ++} ++ ++check_stateful_flows ++ ++# Add few ACLs ++check ovn-nbctl --wait=sb acl-add sw0 from-lport 1002 "ip4 && tcp && tcp.dst == 80" allow-related ++check ovn-nbctl --wait=sb acl-add sw0 to-lport 1002 "ip4 && tcp && tcp.src == 80" drop ++ ++check_stateful_flows ++ ++# Remove load balancer from sw0 ++check ovn-nbctl --wait=sb ls-lb-del sw0 lb0 ++ ++ovn-sbctl dump-flows sw0 > sw0flows ++AT_CAPTURE_FILE([sw0flows]) ++ ++AT_CHECK([grep "ls_in_pre_lb" sw0flows | sort], [0], [dnl ++ table=6 (ls_in_pre_lb ), priority=0 , match=(1), action=(next;) ++ table=6 (ls_in_pre_lb ), priority=110 , match=(eth.dst == $svc_monitor_mac), action=(next;) ++ table=6 (ls_in_pre_lb ), priority=110 , match=(ip && inport == "sw0-lr0"), action=(next;) ++ table=6 (ls_in_pre_lb ), priority=110 , match=(nd || nd_rs || nd_ra || mldv1 || mldv2), action=(next;) ++]) ++ ++AT_CHECK([grep "ls_in_pre_stateful" sw0flows | sort], [0], [dnl ++ table=7 (ls_in_pre_stateful ), priority=0 , match=(1), action=(next;) ++ table=7 (ls_in_pre_stateful ), priority=100 , match=(reg0[[0]] == 1), action=(ct_next;) ++ table=7 (ls_in_pre_stateful ), priority=110 , match=(reg0[[2]] == 1), action=(ct_lb;) ++ table=7 (ls_in_pre_stateful ), priority=120 , match=(reg0[[2]] == 1 && ip4 && sctp), action=(reg1 = ip4.dst; reg2[[0..15]] = sctp.dst; ct_lb;) ++ table=7 (ls_in_pre_stateful ), priority=120 , match=(reg0[[2]] == 1 && ip4 && tcp), action=(reg1 = ip4.dst; reg2[[0..15]] = tcp.dst; ct_lb;) ++ table=7 (ls_in_pre_stateful ), priority=120 , match=(reg0[[2]] == 1 && ip4 && udp), action=(reg1 = ip4.dst; reg2[[0..15]] = udp.dst; ct_lb;) ++ table=7 (ls_in_pre_stateful ), priority=120 , match=(reg0[[2]] == 1 && ip6 && sctp), action=(xxreg1 = ip6.dst; reg2[[0..15]] = sctp.dst; ct_lb;) ++ table=7 (ls_in_pre_stateful ), priority=120 , match=(reg0[[2]] == 1 && ip6 && tcp), action=(xxreg1 = ip6.dst; reg2[[0..15]] = tcp.dst; ct_lb;) ++ table=7 (ls_in_pre_stateful ), priority=120 , match=(reg0[[2]] == 1 && ip6 && udp), action=(xxreg1 = ip6.dst; reg2[[0..15]] = udp.dst; ct_lb;) ++]) ++ ++AT_CHECK([grep "ls_in_stateful" sw0flows | sort], [0], [dnl ++ table=12(ls_in_stateful ), priority=0 , match=(1), action=(next;) ++ table=12(ls_in_stateful ), priority=100 , match=(reg0[[1]] == 1), action=(ct_commit { ct_label.blocked = 0; }; next;) ++]) ++ ++AT_CHECK([grep "ls_out_pre_lb" sw0flows | sort], [0], [dnl ++ table=0 (ls_out_pre_lb ), priority=0 , match=(1), action=(next;) ++ table=0 (ls_out_pre_lb ), priority=110 , match=(eth.src == $svc_monitor_mac), action=(next;) ++ table=0 (ls_out_pre_lb ), priority=110 , match=(ip && outport == "sw0-lr0"), action=(next;) ++ table=0 (ls_out_pre_lb ), priority=110 , match=(nd || nd_rs || nd_ra || mldv1 || mldv2), action=(next;) ++]) ++ ++AT_CHECK([grep "ls_out_pre_stateful" sw0flows | sort], [0], [dnl ++ table=2 (ls_out_pre_stateful), priority=0 , match=(1), action=(next;) ++ table=2 (ls_out_pre_stateful), priority=100 , match=(reg0[[0]] == 1), action=(ct_next;) ++ table=2 (ls_out_pre_stateful), priority=110 , match=(reg0[[2]] == 1), action=(ct_lb;) ++]) ++ ++AT_CHECK([grep "ls_out_stateful" sw0flows | sort], [0], [dnl ++ table=7 (ls_out_stateful ), priority=0 , match=(1), action=(next;) ++ table=7 (ls_out_stateful ), priority=100 , match=(reg0[[1]] == 1), action=(ct_commit { ct_label.blocked = 0; }; next;) ++]) ++ ++AT_CLEANUP ++]) ++ ++AT_SETUP([ovn -- ct.inv usage]) ++ovn_start ++ ++check ovn-nbctl ls-add sw0 ++check ovn-nbctl lsp-add sw0 sw0p1 ++ ++check ovn-nbctl --wait=sb acl-add sw0 to-lport 1002 ip allow-related ++ ++ovn-sbctl dump-flows sw0 > sw0flows ++AT_CAPTURE_FILE([sw0flows]) ++ ++AT_CHECK([grep -w "ls_in_acl" sw0flows | grep 6553 | sort], [0], [dnl ++ table=9 (ls_in_acl ), priority=65532, match=(!ct.est && ct.rel && !ct.new && !ct.inv && ct_label.blocked == 0), action=(next;) ++ table=9 (ls_in_acl ), priority=65532, match=(ct.est && !ct.rel && !ct.new && !ct.inv && ct.rpl && ct_label.blocked == 0), action=(next;) ++ table=9 (ls_in_acl ), priority=65532, match=(ct.inv || (ct.est && ct.rpl && ct_label.blocked == 1)), action=(drop;) ++ table=9 (ls_in_acl ), priority=65532, match=(nd || nd_ra || nd_rs || mldv1 || mldv2), action=(next;) ++]) ++ ++AT_CHECK([grep -w "ls_out_acl" sw0flows | grep 6553 | sort], [0], [dnl ++ table=4 (ls_out_acl ), priority=65532, match=(!ct.est && ct.rel && !ct.new && !ct.inv && ct_label.blocked == 0), action=(next;) ++ table=4 (ls_out_acl ), priority=65532, match=(ct.est && !ct.rel && !ct.new && !ct.inv && ct.rpl && ct_label.blocked == 0), action=(next;) ++ table=4 (ls_out_acl ), priority=65532, match=(ct.inv || (ct.est && ct.rpl && ct_label.blocked == 1)), action=(drop;) ++ table=4 (ls_out_acl ), priority=65532, match=(nd || nd_ra || nd_rs || mldv1 || mldv2), action=(next;) ++]) ++ ++# Disable ct.inv usage. ++check ovn-nbctl --wait=sb set NB_Global . options:use_ct_inv_match=false ++ ++ovn-sbctl dump-flows sw0 > sw0flows ++AT_CAPTURE_FILE([sw0flows]) ++ ++AT_CHECK([grep -w "ls_in_acl" sw0flows | grep 6553 | sort], [0], [dnl ++ table=9 (ls_in_acl ), priority=65532, match=(!ct.est && ct.rel && !ct.new && ct_label.blocked == 0), action=(next;) ++ table=9 (ls_in_acl ), priority=65532, match=((ct.est && ct.rpl && ct_label.blocked == 1)), action=(drop;) ++ table=9 (ls_in_acl ), priority=65532, match=(ct.est && !ct.rel && !ct.new && ct.rpl && ct_label.blocked == 0), action=(next;) ++ table=9 (ls_in_acl ), priority=65532, match=(nd || nd_ra || nd_rs || mldv1 || mldv2), action=(next;) ++]) ++ ++AT_CHECK([grep -w "ls_out_acl" sw0flows | grep 6553 | sort], [0], [dnl ++ table=4 (ls_out_acl ), priority=65532, match=(!ct.est && ct.rel && !ct.new && ct_label.blocked == 0), action=(next;) ++ table=4 (ls_out_acl ), priority=65532, match=((ct.est && ct.rpl && ct_label.blocked == 1)), action=(drop;) ++ table=4 (ls_out_acl ), priority=65532, match=(ct.est && !ct.rel && !ct.new && ct.rpl && ct_label.blocked == 0), action=(next;) ++ table=4 (ls_out_acl ), priority=65532, match=(nd || nd_ra || nd_rs || mldv1 || mldv2), action=(next;) ++]) ++ ++AT_CHECK([grep -c "ct.inv" sw0flows], [1], [dnl ++0 ++]) ++ ++# Enable ct.inv usage. ++check ovn-nbctl --wait=sb set NB_Global . options:use_ct_inv_match=true ++ ++ovn-sbctl dump-flows sw0 > sw0flows ++AT_CAPTURE_FILE([sw0flows]) ++ ++AT_CHECK([grep -w "ls_in_acl" sw0flows | grep 6553 | sort], [0], [dnl ++ table=9 (ls_in_acl ), priority=65532, match=(!ct.est && ct.rel && !ct.new && !ct.inv && ct_label.blocked == 0), action=(next;) ++ table=9 (ls_in_acl ), priority=65532, match=(ct.est && !ct.rel && !ct.new && !ct.inv && ct.rpl && ct_label.blocked == 0), action=(next;) ++ table=9 (ls_in_acl ), priority=65532, match=(ct.inv || (ct.est && ct.rpl && ct_label.blocked == 1)), action=(drop;) ++ table=9 (ls_in_acl ), priority=65532, match=(nd || nd_ra || nd_rs || mldv1 || mldv2), action=(next;) ++]) ++ ++AT_CHECK([grep -w "ls_out_acl" sw0flows | grep 6553 | sort], [0], [dnl ++ table=4 (ls_out_acl ), priority=65532, match=(!ct.est && ct.rel && !ct.new && !ct.inv && ct_label.blocked == 0), action=(next;) ++ table=4 (ls_out_acl ), priority=65532, match=(ct.est && !ct.rel && !ct.new && !ct.inv && ct.rpl && ct_label.blocked == 0), action=(next;) ++ table=4 (ls_out_acl ), priority=65532, match=(ct.inv || (ct.est && ct.rpl && ct_label.blocked == 1)), action=(drop;) ++ table=4 (ls_out_acl ), priority=65532, match=(nd || nd_ra || nd_rs || mldv1 || mldv2), action=(next;) ++]) ++ ++AT_CHECK([grep -c "ct.inv" sw0flows], [0], [dnl ++6 ++]) ++ ++AT_CLEANUP diff --git a/tests/ovn.at b/tests/ovn.at -index b465784cd..dbc6e549b 100644 +index b465784cd..0377b75c3 100644 --- a/tests/ovn.at +++ b/tests/ovn.at -@@ -11494,6 +11494,59 @@ OVN_CLEANUP([hv1],[hv2]) +@@ -693,6 +693,11 @@ ip,nw_src=4.0.0.0/4.0.0.0 + ip,nw_src=64.0.0.0/64.0.0.0 + ip,nw_src=8.0.0.0/8.0.0.0 + ]) ++AT_CHECK([expr_to_flow 'ip4.dst == 172.27.0.65 && ip4.src == $set1 && ip4.dst != 10.128.0.0/14'], [0], [dnl ++ip,nw_src=10.0.0.1,nw_dst=172.27.0.65 ++ip,nw_src=10.0.0.2,nw_dst=172.27.0.65 ++ip,nw_src=10.0.0.3,nw_dst=172.27.0.65 ++]) + AT_CLEANUP + + AT_SETUP([ovn -- converting expressions to flows -- port groups]) +@@ -9878,15 +9883,12 @@ AT_CHECK([ovn-nbctl --wait=sb sync], [0], [ignore]) + ovn-sbctl dump-flows > sbflows + AT_CAPTURE_FILE([sbflows]) + +-reset_pcap_file() { +- local iface=$1 +- local pcap_file=$2 +- check ovs-vsctl -- set Interface $iface options:tx_pcap=dummy-tx.pcap \ +-options:rxq_pcap=dummy-rx.pcap +- rm -f ${pcap_file}*.pcap +- check ovs-vsctl -- set Interface $iface options:tx_pcap=${pcap_file}-tx.pcap \ +-options:rxq_pcap=${pcap_file}-rx.pcap +-} ++hv1_gw1_ofport=$(as hv1 ovs-vsctl --bare --columns ofport find Interface name=ovn-gw1-0) ++hv1_gw2_ofport=$(as hv1 ovs-vsctl --bare --columns ofport find Interface name=ovn-gw2-0) ++ ++OVS_WAIT_UNTIL([ ++ test 1 = $(as hv1 ovs-ofctl dump-flows br-int table=37 | grep -c "active_backup,ofport,members:$hv1_gw1_ofport,$hv1_gw2_ofport") ++]) + + test_ip_packet() + { +@@ -9932,13 +9934,13 @@ test_ip_packet() + echo $expected > ext1-vif1.expected + exp_gw_ip_garp=ffffffffffff00000201020308060001080006040001000002010203ac100101000000000000ac100101 + echo $exp_gw_ip_garp >> ext1-vif1.expected +- as $active_gw reset_pcap_file br-phys_n1 $active_gw/br-phys_n1 ++ as $active_gw reset_iface_pcap_file br-phys_n1 $active_gw/br-phys_n1 + + if test $backup_vswitchd_dead != 1; then + # Reset the file only if vswitchd in backup gw is alive +- as $backup_gw reset_pcap_file br-phys_n1 $backup_gw/br-phys_n1 ++ as $backup_gw reset_iface_pcap_file br-phys_n1 $backup_gw/br-phys_n1 + fi +- as ext1 reset_pcap_file ext1-vif1 ext1/vif1 ++ as ext1 reset_iface_pcap_file ext1-vif1 ext1/vif1 + + # Resend packet from foo1 to outside1 + check as hv1 ovs-appctl netdev-dummy/receive hv1-vif1 $packet +@@ -9990,6 +9992,10 @@ AT_CHECK( + <1> + ]) + ++OVS_WAIT_UNTIL([ ++ test 1 = $(as hv1 ovs-ofctl dump-flows br-int table=37 | grep -c "active_backup,ofport,members:$hv1_gw2_ofport,$hv1_gw1_ofport") ++]) ++ + test_ip_packet gw2 gw1 0 + + # Get the claim count of both gw1 and gw2. +@@ -10010,6 +10016,12 @@ OVS_WAIT_UNTIL([test $gw1_claim_ct = `cat gw1/ovn-controller.log \ + AT_CHECK([test $gw2_claim_ct = `cat gw2/ovn-controller.log | \ + grep -c "cr-alice: Claiming"`]) + ++OVS_WAIT_UNTIL([ ++ bfd_status=$(as hv1 ovs-vsctl get interface ovn-gw2-0 bfd_status:state) ++ echo "bfd status = $bfd_status" ++ test "$bfd_status" = "down" ++]) ++ + test_ip_packet gw1 gw2 1 + + as gw2 +@@ -11490,10 +11502,100 @@ for i in 1 2; do + done + done + ++AT_CHECK([as hv1 ovs-ofctl dump-flows br-int |awk '/table=65/{print substr($8, 16, length($8))}' |sort -n], [0], [dnl ++10 ++11 ++]) ++ ++# remove the localport from br-int and re-create it ++as hv1 ++check ovs-vsctl del-port vif01 ++AT_CHECK([as hv1 ovs-ofctl dump-flows br-int |awk '/table=65/{print substr($8, 16, length($8))}' |sort -n], [0], [dnl ++11 ++]) ++ ++as hv1 ++check ovs-vsctl add-port br-int vif01 \ ++ -- set Interface vif01 external-ids:iface-id=lp01 ++AT_CHECK([as hv1 ovs-ofctl dump-flows br-int |awk '/table=65/{print substr($8, 16, length($8))}' |sort -n], [0], [dnl ++2 ++11 ++]) ++ + OVN_CLEANUP([hv1],[hv2]) AT_CLEANUP +AT_SETUP([ovn -- localport suppress gARP]) +ovn_start + ++send_garp() { ++ local inport=$1 eth_src=$2 eth_dst=$3 spa=$4 tpa=$5 ++ local request=${eth_dst}${eth_src}08060001080006040001${eth_src}${spa}${eth_dst}${tpa} ++ as hv1 ovs-appctl netdev-dummy/receive vif$inport $request ++} ++ +net_add n1 +sim_add hv1 +as hv1 +check ovs-vsctl add-br br-phys ++ovs-vsctl set open . external-ids:ovn-bridge-mappings=phys:br-phys +ovn_attach n1 br-phys 192.168.0.1 + +check ovs-vsctl set open . external-ids:ovn-bridge-mappings=phys:br-phys @@ -3875,6 +6647,7 @@ index b465784cd..dbc6e549b 100644 + -- lsp-set-addresses lp "00:00:00:00:00:01 10.0.0.1" \ + -- lsp-add ls ln \ + -- lsp-set-type ln localnet \ ++ -- lsp-set-addresses ln unknown \ + -- lsp-set-options ln network_name=phys \ + -- lsp-add ls lsp \ + -- lsp-set-addresses lsp "00:00:00:00:00:02 10.0.0.2" @@ -3908,13 +6681,162 @@ index b465784cd..dbc6e549b 100644 + test 0 -eq $pkts +]) + ++spa=$(ip_to_hex 10 0 0 1) ++tpa=$(ip_to_hex 10 0 0 100) ++send_garp 1 000000000001 ffffffffffff $spa $tpa ++ ++dnl traffic from localport should not be sent to localnet ++AT_CHECK([tcpdump -r hv1/br-phys_n1-tx.pcap arp[[24:4]]=0x0a000064 | wc -l],[0],[dnl ++0 ++],[ignore]) ++ +OVN_CLEANUP([hv1]) +AT_CLEANUP + AT_SETUP([ovn -- 1 LR with HA distributed router gateway port]) ovn_start -@@ -16647,56 +16700,67 @@ ovs-vsctl -- add-port br-int hv2-vif2 -- \ +@@ -13901,16 +14003,16 @@ check ovn-nbctl acl-add ls1 to-lport 3 'ip4.src==10.0.0.1' allow + check ovn-nbctl --wait=hv sync + + # Check OVS flows, the less restrictive flows should have been installed. +-AT_CHECK([as hv1 ovs-ofctl dump-flows br-int table=45 | ofctl_strip_all | \ ++AT_CHECK([as hv1 ovs-ofctl dump-flows br-int table=44 | ofctl_strip_all | \ + grep "priority=1003" | \ + sed 's/conjunction([[^)]]*)/conjunction()/g' | sort], [0], [dnl +- table=45, priority=1003,conj_id=2,ip,metadata=0x1 actions=resubmit(,46) +- table=45, priority=1003,conj_id=3,ip,metadata=0x1 actions=resubmit(,46) +- table=45, priority=1003,ip,metadata=0x1,nw_dst=10.0.0.3 actions=conjunction(),conjunction() +- table=45, priority=1003,ip,metadata=0x1,nw_dst=10.0.0.4 actions=conjunction(),conjunction() +- table=45, priority=1003,ip,metadata=0x1,nw_src=10.0.0.1 actions=resubmit(,46) +- table=45, priority=1003,ip,metadata=0x1,nw_src=10.0.0.2 actions=conjunction() +- table=45, priority=1003,ip,metadata=0x1,nw_src=10.0.0.42 actions=conjunction() ++ table=44, priority=1003,conj_id=2,ip,metadata=0x1 actions=resubmit(,45) ++ table=44, priority=1003,conj_id=3,ip,metadata=0x1 actions=resubmit(,45) ++ table=44, priority=1003,ip,metadata=0x1,nw_dst=10.0.0.3 actions=conjunction(),conjunction() ++ table=44, priority=1003,ip,metadata=0x1,nw_dst=10.0.0.4 actions=conjunction(),conjunction() ++ table=44, priority=1003,ip,metadata=0x1,nw_src=10.0.0.1 actions=resubmit(,45) ++ table=44, priority=1003,ip,metadata=0x1,nw_src=10.0.0.2 actions=conjunction() ++ table=44, priority=1003,ip,metadata=0x1,nw_src=10.0.0.42 actions=conjunction() + ]) + + # Traffic 10.0.0.1, 10.0.0.2 -> 10.0.0.3, 10.0.0.4 should be allowed. +@@ -13945,16 +14047,16 @@ check ovn-nbctl acl-del ls1 to-lport 3 'ip4.src==10.0.0.1 || ip4.src==10.0.0.1' + check ovn-nbctl --wait=hv sync + + # Check OVS flows, the second less restrictive allow ACL should have been installed. +-AT_CHECK([as hv1 ovs-ofctl dump-flows br-int table=45 | ofctl_strip_all | \ ++AT_CHECK([as hv1 ovs-ofctl dump-flows br-int table=44 | ofctl_strip_all | \ + grep "priority=1003" | \ + sed 's/conjunction([[^)]]*)/conjunction()/g' | sort], [0], [dnl +- table=45, priority=1003,conj_id=2,ip,metadata=0x1 actions=resubmit(,46) +- table=45, priority=1003,conj_id=3,ip,metadata=0x1 actions=resubmit(,46) +- table=45, priority=1003,ip,metadata=0x1,nw_dst=10.0.0.3 actions=conjunction(),conjunction() +- table=45, priority=1003,ip,metadata=0x1,nw_dst=10.0.0.4 actions=conjunction(),conjunction() +- table=45, priority=1003,ip,metadata=0x1,nw_src=10.0.0.1 actions=resubmit(,46) +- table=45, priority=1003,ip,metadata=0x1,nw_src=10.0.0.2 actions=conjunction() +- table=45, priority=1003,ip,metadata=0x1,nw_src=10.0.0.42 actions=conjunction() ++ table=44, priority=1003,conj_id=2,ip,metadata=0x1 actions=resubmit(,45) ++ table=44, priority=1003,conj_id=3,ip,metadata=0x1 actions=resubmit(,45) ++ table=44, priority=1003,ip,metadata=0x1,nw_dst=10.0.0.3 actions=conjunction(),conjunction() ++ table=44, priority=1003,ip,metadata=0x1,nw_dst=10.0.0.4 actions=conjunction(),conjunction() ++ table=44, priority=1003,ip,metadata=0x1,nw_src=10.0.0.1 actions=resubmit(,45) ++ table=44, priority=1003,ip,metadata=0x1,nw_src=10.0.0.2 actions=conjunction() ++ table=44, priority=1003,ip,metadata=0x1,nw_src=10.0.0.42 actions=conjunction() + ]) + + # Remove the less restrictive allow ACL. +@@ -13962,16 +14064,16 @@ check ovn-nbctl acl-del ls1 to-lport 3 'ip4.src==10.0.0.1' + check ovn-nbctl --wait=hv sync + + # Check OVS flows, the 10.0.0.1 conjunction should have been reinstalled. +-AT_CHECK([as hv1 ovs-ofctl dump-flows br-int table=45 | ofctl_strip_all | \ ++AT_CHECK([as hv1 ovs-ofctl dump-flows br-int table=44 | ofctl_strip_all | \ + grep "priority=1003" | \ + sed 's/conjunction([[^)]]*)/conjunction()/g' | sort], [0], [dnl +- table=45, priority=1003,conj_id=2,ip,metadata=0x1 actions=resubmit(,46) +- table=45, priority=1003,conj_id=3,ip,metadata=0x1 actions=resubmit(,46) +- table=45, priority=1003,ip,metadata=0x1,nw_dst=10.0.0.3 actions=conjunction(),conjunction() +- table=45, priority=1003,ip,metadata=0x1,nw_dst=10.0.0.4 actions=conjunction(),conjunction() +- table=45, priority=1003,ip,metadata=0x1,nw_src=10.0.0.1 actions=conjunction(),conjunction() +- table=45, priority=1003,ip,metadata=0x1,nw_src=10.0.0.2 actions=conjunction() +- table=45, priority=1003,ip,metadata=0x1,nw_src=10.0.0.42 actions=conjunction() ++ table=44, priority=1003,conj_id=2,ip,metadata=0x1 actions=resubmit(,45) ++ table=44, priority=1003,conj_id=3,ip,metadata=0x1 actions=resubmit(,45) ++ table=44, priority=1003,ip,metadata=0x1,nw_dst=10.0.0.3 actions=conjunction(),conjunction() ++ table=44, priority=1003,ip,metadata=0x1,nw_dst=10.0.0.4 actions=conjunction(),conjunction() ++ table=44, priority=1003,ip,metadata=0x1,nw_src=10.0.0.1 actions=conjunction(),conjunction() ++ table=44, priority=1003,ip,metadata=0x1,nw_src=10.0.0.2 actions=conjunction() ++ table=44, priority=1003,ip,metadata=0x1,nw_src=10.0.0.42 actions=conjunction() + ]) + + # Traffic 10.0.0.1, 10.0.0.2 -> 10.0.0.3, 10.0.0.4 should be allowed. +@@ -14001,16 +14103,16 @@ check ovn-nbctl acl-add ls1 to-lport 3 'ip4.src==10.0.0.1' allow + check ovn-nbctl --wait=hv sync + + # Check OVS flows, the less restrictive flows should have been installed. +-AT_CHECK([as hv1 ovs-ofctl dump-flows br-int table=45 | ofctl_strip_all | \ ++AT_CHECK([as hv1 ovs-ofctl dump-flows br-int table=44 | ofctl_strip_all | \ + grep "priority=1003" | \ + sed 's/conjunction([[^)]]*)/conjunction()/g' | sort], [0], [dnl +- table=45, priority=1003,conj_id=2,ip,metadata=0x1 actions=resubmit(,46) +- table=45, priority=1003,conj_id=3,ip,metadata=0x1 actions=resubmit(,46) +- table=45, priority=1003,ip,metadata=0x1,nw_dst=10.0.0.3 actions=conjunction(),conjunction() +- table=45, priority=1003,ip,metadata=0x1,nw_dst=10.0.0.4 actions=conjunction(),conjunction() +- table=45, priority=1003,ip,metadata=0x1,nw_src=10.0.0.1 actions=resubmit(,46) +- table=45, priority=1003,ip,metadata=0x1,nw_src=10.0.0.2 actions=conjunction() +- table=45, priority=1003,ip,metadata=0x1,nw_src=10.0.0.42 actions=conjunction() ++ table=44, priority=1003,conj_id=2,ip,metadata=0x1 actions=resubmit(,45) ++ table=44, priority=1003,conj_id=3,ip,metadata=0x1 actions=resubmit(,45) ++ table=44, priority=1003,ip,metadata=0x1,nw_dst=10.0.0.3 actions=conjunction(),conjunction() ++ table=44, priority=1003,ip,metadata=0x1,nw_dst=10.0.0.4 actions=conjunction(),conjunction() ++ table=44, priority=1003,ip,metadata=0x1,nw_src=10.0.0.1 actions=resubmit(,45) ++ table=44, priority=1003,ip,metadata=0x1,nw_src=10.0.0.2 actions=conjunction() ++ table=44, priority=1003,ip,metadata=0x1,nw_src=10.0.0.42 actions=conjunction() + ]) + + # Add another ACL that overlaps with the existing less restrictive ones. +@@ -14021,19 +14123,19 @@ check ovn-nbctl --wait=hv sync + # with an additional conjunction action. + # + # New non-conjunctive flows should be added to match on 'udp'. +-AT_CHECK([as hv1 ovs-ofctl dump-flows br-int table=45 | ofctl_strip_all | \ ++AT_CHECK([as hv1 ovs-ofctl dump-flows br-int table=44 | ofctl_strip_all | \ + grep "priority=1003" | \ + sed 's/conjunction([[^)]]*)/conjunction()/g' | sort], [0], [dnl +- table=45, priority=1003,conj_id=2,ip,metadata=0x1 actions=resubmit(,46) +- table=45, priority=1003,conj_id=3,ip,metadata=0x1 actions=resubmit(,46) +- table=45, priority=1003,conj_id=4,ip,metadata=0x1 actions=resubmit(,46) +- table=45, priority=1003,ip,metadata=0x1,nw_dst=10.0.0.3 actions=conjunction(),conjunction(),conjunction() +- table=45, priority=1003,ip,metadata=0x1,nw_dst=10.0.0.4 actions=conjunction(),conjunction(),conjunction() +- table=45, priority=1003,ip,metadata=0x1,nw_src=10.0.0.1 actions=resubmit(,46) +- table=45, priority=1003,ip,metadata=0x1,nw_src=10.0.0.2 actions=conjunction(),conjunction() +- table=45, priority=1003,ip,metadata=0x1,nw_src=10.0.0.42 actions=conjunction() +- table=45, priority=1003,udp,metadata=0x1 actions=resubmit(,46) +- table=45, priority=1003,udp6,metadata=0x1 actions=resubmit(,46) ++ table=44, priority=1003,conj_id=2,ip,metadata=0x1 actions=resubmit(,45) ++ table=44, priority=1003,conj_id=3,ip,metadata=0x1 actions=resubmit(,45) ++ table=44, priority=1003,conj_id=4,ip,metadata=0x1 actions=resubmit(,45) ++ table=44, priority=1003,ip,metadata=0x1,nw_dst=10.0.0.3 actions=conjunction(),conjunction(),conjunction() ++ table=44, priority=1003,ip,metadata=0x1,nw_dst=10.0.0.4 actions=conjunction(),conjunction(),conjunction() ++ table=44, priority=1003,ip,metadata=0x1,nw_src=10.0.0.1 actions=resubmit(,45) ++ table=44, priority=1003,ip,metadata=0x1,nw_src=10.0.0.2 actions=conjunction(),conjunction() ++ table=44, priority=1003,ip,metadata=0x1,nw_src=10.0.0.42 actions=conjunction() ++ table=44, priority=1003,udp,metadata=0x1 actions=resubmit(,45) ++ table=44, priority=1003,udp6,metadata=0x1 actions=resubmit(,45) + ]) + + OVN_CLEANUP([hv1]) +@@ -15375,7 +15477,7 @@ wait_for_ports_up ls1-lp_ext1 + # There should be a flow in hv2 to drop traffic from ls1-lp_ext1 destined + # to router mac. + AT_CHECK([as hv2 ovs-ofctl dump-flows br-int \ +-table=30,dl_src=f0:00:00:00:00:03,dl_dst=a0:10:00:00:00:01 | \ ++table=29,dl_src=f0:00:00:00:00:03,dl_dst=a0:10:00:00:00:01 | \ + grep -c "actions=drop"], [0], [1 + ]) + +@@ -16647,56 +16749,67 @@ ovs-vsctl -- add-port br-int hv2-vif2 -- \ ovn-nbctl ls-add sw0 @@ -4021,21 +6943,21 @@ index b465784cd..dbc6e549b 100644 wait_for_ports_up ovn-nbctl --wait=hv sync -@@ -16746,6 +16810,30 @@ ovs-vsctl del-port hv1-vif3 +@@ -16746,6 +16859,30 @@ ovs-vsctl del-port hv1-vif3 AT_CHECK([test x$(ovn-sbctl --bare --columns chassis find port_binding \ logical_port=sw0-vir) = x], [0], []) +check_virtual_offlows_present() { + hv=$1 + -+ AT_CHECK([as $hv ovs-ofctl dump-flows br-int table=45 | ofctl_strip_all | grep "priority=2000"], [0], [dnl -+ table=45, priority=2000,ip,metadata=0x1 actions=resubmit(,46) -+ table=45, priority=2000,ipv6,metadata=0x1 actions=resubmit(,46) ++ AT_CHECK([as $hv ovs-ofctl dump-flows br-int table=44 | ofctl_strip_all | grep "priority=2000"], [0], [dnl ++ table=44, priority=2000,ip,metadata=0x1 actions=resubmit(,45) ++ table=44, priority=2000,ipv6,metadata=0x1 actions=resubmit(,45) +]) + + AT_CHECK([as $hv ovs-ofctl dump-flows br-int table=11 | ofctl_strip_all | \ + grep "priority=92" | grep 172.168.0.50], [0], [dnl -+ table=11, priority=92,arp,reg14=0x3,metadata=0x3,arp_tpa=172.168.0.50,arp_op=1 actions=move:NXM_OF_ETH_SRC[[]]->NXM_OF_ETH_DST[[]],mod_dl_src:10:54:00:00:00:10,load:0x2->NXM_OF_ARP_OP[[]],move:NXM_NX_ARP_SHA[[]]->NXM_NX_ARP_THA[[]],load:0x105400000010->NXM_NX_ARP_SHA[[]],move:NXM_OF_ARP_SPA[[]]->NXM_OF_ARP_TPA[[]],load:0xaca80032->NXM_OF_ARP_SPA[[]],move:NXM_NX_REG14[[]]->NXM_NX_REG15[[]],load:0x1->NXM_NX_REG10[[0]],resubmit(,37) ++ table=11, priority=92,arp,reg14=0x3,metadata=0x3,arp_tpa=172.168.0.50,arp_op=1 actions=move:NXM_OF_ETH_SRC[[]]->NXM_OF_ETH_DST[[]],mod_dl_src:10:54:00:00:00:10,load:0x2->NXM_OF_ARP_OP[[]],move:NXM_NX_ARP_SHA[[]]->NXM_NX_ARP_THA[[]],load:0x105400000010->NXM_NX_ARP_SHA[[]],push:NXM_OF_ARP_SPA[[]],push:NXM_OF_ARP_TPA[[]],pop:NXM_OF_ARP_SPA[[]],pop:NXM_OF_ARP_TPA[[]],move:NXM_NX_REG14[[]]->NXM_NX_REG15[[]],load:0x1->NXM_NX_REG10[[0]],resubmit(,37) +]) +} + @@ -4052,7 +6974,7 @@ index b465784cd..dbc6e549b 100644 # From sw0-p0 send GARP for 10.0.0.10. hv1 should claim sw0-vir # and sw0-p1 should be its virtual_parent. eth_src=505400000003 -@@ -16767,6 +16855,13 @@ AT_CHECK([grep lr_in_arp_resolve lr0-flows2 | grep "reg0 == 10.0.0.10" | sed 's/ +@@ -16767,6 +16904,13 @@ AT_CHECK([grep lr_in_arp_resolve lr0-flows2 | grep "reg0 == 10.0.0.10" | sed 's/ table=??(lr_in_arp_resolve ), priority=100 , match=(outport == "lr0-sw0" && reg0 == 10.0.0.10), action=(eth.dst = 50:54:00:00:00:03; next;) ]) @@ -4066,7 +6988,7 @@ index b465784cd..dbc6e549b 100644 # Forcibly clear virtual_parent. ovn-controller should release the binding # gracefully. pb_uuid=$(ovn-sbctl --bare --columns _uuid find port_binding logical_port=sw0-vir) -@@ -16777,6 +16872,13 @@ logical_port=sw0-vir) = x]) +@@ -16777,6 +16921,13 @@ logical_port=sw0-vir) = x]) wait_row_count nb:Logical_Switch_Port 1 up=false name=sw0-vir @@ -4080,7 +7002,7 @@ index b465784cd..dbc6e549b 100644 # From sw0-p0 resend GARP for 10.0.0.10. hv1 should reclaim sw0-vir # and sw0-p1 should be its virtual_parent. send_garp 1 1 $eth_src $eth_dst $spa $tpa -@@ -16789,6 +16891,58 @@ logical_port=sw0-vir) = xsw0-p1]) +@@ -16789,6 +16940,58 @@ logical_port=sw0-vir) = xsw0-p1]) wait_for_ports_up sw0-vir @@ -4139,7 +7061,7 @@ index b465784cd..dbc6e549b 100644 # From sw0-p3 send GARP for 10.0.0.10. hv1 should claim sw0-vir # and sw0-p3 should be its virtual_parent. eth_src=505400000005 -@@ -16806,8 +16960,8 @@ logical_port=sw0-vir) = xsw0-p3]) +@@ -16806,8 +17009,8 @@ logical_port=sw0-vir) = xsw0-p3]) wait_for_ports_up sw0-vir # There should be an arp resolve flow to resolve the virtual_ip with the @@ -4150,7 +7072,7 @@ index b465784cd..dbc6e549b 100644 ovn-sbctl dump-flows lr0 > lr0-flows3 AT_CAPTURE_FILE([lr0-flows3]) cp ovn-sb/ovn-sb.db lr0-flows3.db -@@ -16815,6 +16969,13 @@ AT_CHECK([grep lr_in_arp_resolve lr0-flows3 | grep "reg0 == 10.0.0.10" | sed 's +@@ -16815,6 +17018,13 @@ AT_CHECK([grep lr_in_arp_resolve lr0-flows3 | grep "reg0 == 10.0.0.10" | sed 's table=??(lr_in_arp_resolve ), priority=100 , match=(outport == "lr0-sw0" && reg0 == 10.0.0.10), action=(eth.dst = 50:54:00:00:00:05; next;) ]) @@ -4164,7 +7086,7 @@ index b465784cd..dbc6e549b 100644 # send the garp from sw0-p2 (in hv2). hv2 should claim sw0-vir # and sw0-p2 shpuld be its virtual_parent. eth_src=505400000004 -@@ -16832,14 +16993,21 @@ logical_port=sw0-vir) = xsw0-p2]) +@@ -16832,14 +17042,21 @@ logical_port=sw0-vir) = xsw0-p2]) wait_for_ports_up sw0-vir # There should be an arp resolve flow to resolve the virtual_ip with the @@ -4188,7 +7110,7 @@ index b465784cd..dbc6e549b 100644 # Now send arp reply from sw0-p1. hv1 should claim sw0-vir # and sw0-p1 shpuld be its virtual_parent. eth_src=505400000003 -@@ -16863,6 +17031,14 @@ AT_CHECK([grep lr_in_arp_resolve lr0-flows5 | grep "reg0 == 10.0.0.10" | sed 's/ +@@ -16863,6 +17080,14 @@ AT_CHECK([grep lr_in_arp_resolve lr0-flows5 | grep "reg0 == 10.0.0.10" | sed 's/ table=??(lr_in_arp_resolve ), priority=100 , match=(outport == "lr0-sw0" && reg0 == 10.0.0.10), action=(eth.dst = 50:54:00:00:00:03; next;) ]) @@ -4203,7 +7125,7 @@ index b465784cd..dbc6e549b 100644 # Delete hv1-vif1 port. hv1 should release sw0-vir as hv1 ovs-vsctl del-port hv1-vif1 -@@ -16883,6 +17059,15 @@ AT_CHECK([grep lr_in_arp_resolve lr0-flows6 | grep "reg0 == 10.0.0.10" | sed 's/ +@@ -16883,6 +17108,15 @@ AT_CHECK([grep lr_in_arp_resolve lr0-flows6 | grep "reg0 == 10.0.0.10" | sed 's/ table=??(lr_in_arp_resolve ), priority=100 , match=(outport == "lr0-sw0" && reg0 == 10.0.0.10), action=(eth.dst = 00:00:00:00:00:00; next;) ]) @@ -4219,7 +7141,7 @@ index b465784cd..dbc6e549b 100644 # Now send arp reply from sw0-p2. hv2 should claim sw0-vir # and sw0-p2 should be its virtual_parent. eth_src=505400000004 -@@ -16906,6 +17091,14 @@ AT_CHECK([grep lr_in_arp_resolve lr0-flows7 | grep "reg0 == 10.0.0.10" | sed 's/ +@@ -16906,6 +17140,14 @@ AT_CHECK([grep lr_in_arp_resolve lr0-flows7 | grep "reg0 == 10.0.0.10" | sed 's/ table=??(lr_in_arp_resolve ), priority=100 , match=(outport == "lr0-sw0" && reg0 == 10.0.0.10), action=(eth.dst = 50:54:00:00:00:04; next;) ]) @@ -4234,7 +7156,7 @@ index b465784cd..dbc6e549b 100644 # Delete sw0-p2 logical port ovn-nbctl lsp-del sw0-p2 -@@ -16933,6 +17126,14 @@ AT_CHECK([grep ls_in_arp_rsp sw0-flows3 | grep bind_vport | sed 's/table=../tabl +@@ -16933,6 +17175,14 @@ AT_CHECK([grep ls_in_arp_rsp sw0-flows3 | grep bind_vport | sed 's/table=../tabl table=??(ls_in_arp_rsp ), priority=100 , match=(inport == "sw0-p3" && ((arp.op == 1 && arp.spa == 10.0.0.10 && arp.tpa == 10.0.0.10) || (arp.op == 2 && arp.spa == 10.0.0.10))), action=(bind_vport("sw0-vir", inport); next;) ]) @@ -4249,7 +7171,7 @@ index b465784cd..dbc6e549b 100644 ovn-nbctl --wait=hv remove logical_switch_port sw0-vir options virtual-parents ovn-sbctl dump-flows sw0 > sw0-flows4 AT_CAPTURE_FILE([sw0-flows4]) -@@ -16942,6 +17143,38 @@ ovn-sbctl dump-flows lr0 > lr0-flows8 +@@ -16942,6 +17192,38 @@ ovn-sbctl dump-flows lr0 > lr0-flows8 AT_CAPTURE_FILE([lr0-flows8]) AT_CHECK([grep lr_in_arp_resolve lr0-flows8 | grep "reg0 == 10.0.0.10"], [1]) @@ -4288,7 +7210,274 @@ index b465784cd..dbc6e549b 100644 OVN_CLEANUP([hv1], [hv2]) AT_CLEANUP -@@ -24918,3 +25151,633 @@ AT_CHECK([cat hv2_offlows_table72.txt | grep -v NXST], [1], [dnl +@@ -17321,6 +17603,27 @@ check ovs-vsctl -- add-port br-int hv2-vif4 -- \ + ofport-request=1 + ovs-vsctl set open . external-ids:ovn-bridge-mappings=phys:br-phys + ++AT_CAPTURE_FILE([exp]) ++AT_CAPTURE_FILE([rcv]) ++check_packets() { ++ > exp ++ > rcv ++ if test "$1" = --uniq; then ++ sort="sort -u"; shift ++ else ++ sort=sort ++ fi ++ for tuple in "$@"; do ++ set $tuple; pcap=$1 type=$2 ++ echo "--- $pcap" | tee -a exp >> rcv ++ $sort "$type" >> exp ++ $PYTHON "$ovs_srcdir/utilities/ovs-pcap.in" $pcap | $sort >> rcv ++ echo | tee -a exp >> rcv ++ done ++ ++ $at_diff exp rcv >/dev/null ++} ++ + OVN_POPULATE_ARP + + # Enable IGMP snooping on sw1. +@@ -17337,21 +17640,16 @@ ovn-sbctl dump-flows > sbflows + AT_CAPTURE_FILE([expected]) + AT_CAPTURE_FILE([received]) + > expected +-> received +-for i in 1 2; do +- for j in 1 2; do +- pcap=hv$i/vif$j-tx.pcap +- echo "--- $pcap" | tee -a expected >> received +- $PYTHON "$ovs_srcdir/utilities/ovs-pcap.in" $pcap | sort >> received +- echo | tee -a expected >> received +- done +-done +-check $at_diff -F'^---' expected received ++OVS_WAIT_UNTIL( ++ [check_packets 'hv1/vif1-tx.pcap expected' \ ++ 'hv1/vif2-tx.pcap expected' \ ++ 'hv2/vif1-tx.pcap expected' \ ++ 'hv2/vif2-tx.pcap expected'], ++ [$at_diff -F'^---' exp rcv]) + + check ovn-nbctl --wait=hv sync + + AT_CAPTURE_FILE([sbflows2]) +-cp ovn-sb/ovn-sb.db ovn-sb2.db + ovn-sbctl dump-flows > sbflows2 + + # Inject IGMP Join for 239.0.1.68 on sw1-p11. +@@ -17369,7 +17667,6 @@ wait_row_count IGMP_Group 2 address=239.0.1.68 + check ovn-nbctl --wait=hv sync + + AT_CAPTURE_FILE([sbflows3]) +-cp ovn-sb/ovn-sb.db ovn-sb3.db + ovn-sbctl dump-flows > sbflows3 + + AS_BOX([IGMP traffic test 1]) +@@ -17386,22 +17683,6 @@ store_ip_multicast_pkt \ + $(ip_to_hex 10 0 0 42) $(ip_to_hex 239 0 1 68) 1e 20 ca70 11 \ + e518e518000a3b3a0000 expected + +-AT_CAPTURE_FILE([exp]) +-AT_CAPTURE_FILE([rcv]) +-check_packets() { +- > exp +- > rcv +- for tuple in "$@"; do +- set $tuple; pcap=$1 type=$2 +- echo "--- $pcap" | tee -a exp >> rcv +- sort "$type" >> exp +- $PYTHON "$ovs_srcdir/utilities/ovs-pcap.in" $pcap | sort >> rcv +- echo | tee -a exp >> rcv +- done +- +- $at_diff exp rcv >/dev/null +-} +- + OVS_WAIT_UNTIL( + [check_packets 'hv1/vif1-tx.pcap expected' \ + 'hv2/vif1-tx.pcap expected' \ +@@ -17492,15 +17773,26 @@ check ovn-nbctl set Logical_Switch sw2 \ + other_config:mcast_ip4_src="20.0.0.254" + + AS_BOX([IGMP traffic test 4]) +-# Wait for 1 query interval (1 sec) and check that two queries are generated. ++# Check that multiple queries are generated over time. + > expected + store_igmp_v3_query 0000000002fe $(ip_to_hex 20 0 0 254) 84dd expected + store_igmp_v3_query 0000000002fe $(ip_to_hex 20 0 0 254) 84dd expected + +-OVS_WAIT_UNTIL( +- [check_packets 'hv1/vif3-tx.pcap expected' \ +- 'hv2/vif3-tx.pcap expected'], +- [$at_diff -F'^---' exp rcv]) ++for count in 1 2 3; do ++ as hv1 reset_pcap_file hv1-vif1 hv1/vif1 ++ as hv1 reset_pcap_file hv1-vif2 hv1/vif2 ++ as hv1 reset_pcap_file hv1-vif3 hv1/vif3 ++ as hv1 reset_pcap_file hv1-vif4 hv1/vif4 ++ as hv2 reset_pcap_file hv2-vif1 hv2/vif1 ++ as hv2 reset_pcap_file hv2-vif2 hv2/vif2 ++ as hv2 reset_pcap_file hv2-vif3 hv2/vif3 ++ as hv2 reset_pcap_file hv2-vif4 hv2/vif4 ++ OVS_WAIT_UNTIL( ++ [check_packets --uniq \ ++ 'hv1/vif3-tx.pcap expected' \ ++ 'hv2/vif3-tx.pcap expected'], ++ [$at_diff -F'^---' exp rcv]) ++done + + # Disable IGMP querier on sw2. + check ovn-nbctl set Logical_Switch sw2 \ +@@ -19776,7 +20068,14 @@ AT_CAPTURE_FILE([sbflows]) + OVS_WAIT_FOR_OUTPUT( + [ovn-sbctl dump-flows > sbflows + ovn-sbctl dump-flows sw0 | grep ct_lb | grep priority=120 | sed 's/table=..//'], 0, +- [ (ls_in_stateful ), priority=120 , match=(ct.new && ip4.dst == 10.0.0.10 && tcp.dst == 80), action=(reg1 = 10.0.0.10; reg2[[0..15]] = 80; ct_lb(backends=10.0.0.3:80,20.0.0.3:80; hash_fields="ip_dst,ip_src,tcp_dst,tcp_src");) ++ [dnl ++ (ls_in_pre_stateful ), priority=120 , match=(reg0[[2]] == 1 && ip4 && sctp), action=(reg1 = ip4.dst; reg2[[0..15]] = sctp.dst; ct_lb;) ++ (ls_in_pre_stateful ), priority=120 , match=(reg0[[2]] == 1 && ip4 && tcp), action=(reg1 = ip4.dst; reg2[[0..15]] = tcp.dst; ct_lb;) ++ (ls_in_pre_stateful ), priority=120 , match=(reg0[[2]] == 1 && ip4 && udp), action=(reg1 = ip4.dst; reg2[[0..15]] = udp.dst; ct_lb;) ++ (ls_in_pre_stateful ), priority=120 , match=(reg0[[2]] == 1 && ip6 && sctp), action=(xxreg1 = ip6.dst; reg2[[0..15]] = sctp.dst; ct_lb;) ++ (ls_in_pre_stateful ), priority=120 , match=(reg0[[2]] == 1 && ip6 && tcp), action=(xxreg1 = ip6.dst; reg2[[0..15]] = tcp.dst; ct_lb;) ++ (ls_in_pre_stateful ), priority=120 , match=(reg0[[2]] == 1 && ip6 && udp), action=(xxreg1 = ip6.dst; reg2[[0..15]] = udp.dst; ct_lb;) ++ (ls_in_stateful ), priority=120 , match=(ct.new && ip4.dst == 10.0.0.10 && tcp.dst == 80), action=(reg1 = 10.0.0.10; reg2[[0..15]] = 80; ct_lb(backends=10.0.0.3:80,20.0.0.3:80; hash_fields="ip_dst,ip_src,tcp_dst,tcp_src");) + ]) + + AT_CAPTURE_FILE([sbflows2]) +@@ -22463,7 +22762,7 @@ check ovn-nbctl --wait=hv sync + # wait_conj_id_count COUNT ["ID COUNT [MATCH]"]... + # + # Waits until COUNT flows matching against conj_id appear in the +-# table 45 on hv1's br-int bridge. Makes the flows available in ++# table 44 on hv1's br-int bridge. Makes the flows available in + # "hv1flows", which will be logged on error. + # + # In addition, for each quoted "ID COUNT" or "ID COUNT MATCH", +@@ -22480,7 +22779,7 @@ wait_conj_id_count() { + echo "waiting for $1 conj_id flows..." + OVS_WAIT_FOR_OUTPUT_UNQUOTED( + [ovs-ofctl dump-flows br-int > hv1flows +- grep table=45 hv1flows | grep -c conj_id], ++ grep table=44 hv1flows | grep -c conj_id], + [$retval], [$1 + ]) + +@@ -22489,7 +22788,7 @@ wait_conj_id_count() { + set -- $arg; id=$1 count=$2 match=$3 + echo "checking that there are $count ${match:+$match }flows with conj_id=$id..." + AT_CHECK_UNQUOTED( +- [grep table=45 hv1flows | grep "$match" | grep -c conj_id=$id], ++ [grep table=44 hv1flows | grep "$match" | grep -c conj_id=$id], + [0], [$count + ]) + done +@@ -22514,8 +22813,8 @@ wait_conj_id_count 1 "3 1 udp" + AS_BOX([Add back the tcp ACL.]) + check ovn-nbctl --wait=hv acl-add pg0 to-lport 1002 "outport == @pg0 && ip4 && tcp.dst >= 80 && tcp.dst <= 82" allow + wait_conj_id_count 2 "3 1 udp" "4 1 tcp" +-AT_CHECK([test 1 = $(as hv1 ovs-ofctl dump-flows br-int table=45 | grep udp | grep -c "conj_id=3")]) +-AT_CHECK([test 1 = $(as hv1 ovs-ofctl dump-flows br-int table=45 | grep tcp | grep -c "conj_id=4")]) ++AT_CHECK([test 1 = $(as hv1 ovs-ofctl dump-flows br-int table=44 | grep udp | grep -c "conj_id=3")]) ++AT_CHECK([test 1 = $(as hv1 ovs-ofctl dump-flows br-int table=44 | grep tcp | grep -c "conj_id=4")]) + + AS_BOX([Add another tcp ACL.]) + check ovn-nbctl --wait=hv acl-add pg0 to-lport 1002 "outport == @pg0 && inport == @pg0 && ip4 && tcp.dst >= 84 && tcp.dst <= 86" allow +@@ -24317,6 +24616,14 @@ as hv1 ovn-appctl -t ovn-controller debug/resume + wait_column "true" Port_Binding up logical_port=lsp1 + wait_column "true" nb:Logical_Switch_Port up name=lsp1 + ++AS_BOX([ovn-controller should set Port_Binding.up - to false when OVS port is released]) ++check ovs-vsctl remove Interface lsp1 external_ids iface-id ++check ovs-vsctl remove Interface lsp2 external_ids iface-id ++wait_column "false" Port_Binding up logical_port=lsp1 ++wait_column "false" Port_Binding up logical_port=lsp2 ++wait_column "false" Port_Binding up logical_port=lsp1 ++wait_column "false" nb:Logical_Switch_Port up name=lsp1 ++ + OVN_CLEANUP([hv1]) + AT_CLEANUP + +@@ -24454,43 +24761,43 @@ AT_CHECK([kill -0 $(cat hv1/ovn-controller.pid)]) + check ovn-nbctl --wait=hv sync + + # Check OVS flows are installed properly. +-AT_CHECK([as hv1 ovs-ofctl dump-flows br-int table=45 | ofctl_strip_all | \ ++AT_CHECK([as hv1 ovs-ofctl dump-flows br-int table=44 | ofctl_strip_all | \ + grep "priority=2002" | grep conjunction | \ + sed 's/conjunction([[^)]]*)/conjunction()/g' | sort], [0], [dnl +- table=45, priority=2002,udp,reg0=0x100/0x100,metadata=0x1,nw_src=192.168.47.3,tp_dst=0x10/0xfff0 actions=conjunction() +- table=45, priority=2002,udp,reg0=0x100/0x100,metadata=0x1,nw_src=192.168.47.3,tp_dst=0x100/0xff00 actions=conjunction() +- table=45, priority=2002,udp,reg0=0x100/0x100,metadata=0x1,nw_src=192.168.47.3,tp_dst=0x1000/0xf000 actions=conjunction() +- table=45, priority=2002,udp,reg0=0x100/0x100,metadata=0x1,nw_src=192.168.47.3,tp_dst=0x2/0xfffe actions=conjunction() +- table=45, priority=2002,udp,reg0=0x100/0x100,metadata=0x1,nw_src=192.168.47.3,tp_dst=0x20/0xffe0 actions=conjunction() +- table=45, priority=2002,udp,reg0=0x100/0x100,metadata=0x1,nw_src=192.168.47.3,tp_dst=0x200/0xfe00 actions=conjunction() +- table=45, priority=2002,udp,reg0=0x100/0x100,metadata=0x1,nw_src=192.168.47.3,tp_dst=0x2000/0xe000 actions=conjunction() +- table=45, priority=2002,udp,reg0=0x100/0x100,metadata=0x1,nw_src=192.168.47.3,tp_dst=0x4/0xfffc actions=conjunction() +- table=45, priority=2002,udp,reg0=0x100/0x100,metadata=0x1,nw_src=192.168.47.3,tp_dst=0x40/0xffc0 actions=conjunction() +- table=45, priority=2002,udp,reg0=0x100/0x100,metadata=0x1,nw_src=192.168.47.3,tp_dst=0x400/0xfc00 actions=conjunction() +- table=45, priority=2002,udp,reg0=0x100/0x100,metadata=0x1,nw_src=192.168.47.3,tp_dst=0x4000/0xc000 actions=conjunction() +- table=45, priority=2002,udp,reg0=0x100/0x100,metadata=0x1,nw_src=192.168.47.3,tp_dst=0x8/0xfff8 actions=conjunction() +- table=45, priority=2002,udp,reg0=0x100/0x100,metadata=0x1,nw_src=192.168.47.3,tp_dst=0x80/0xff80 actions=conjunction() +- table=45, priority=2002,udp,reg0=0x100/0x100,metadata=0x1,nw_src=192.168.47.3,tp_dst=0x800/0xf800 actions=conjunction() +- table=45, priority=2002,udp,reg0=0x100/0x100,metadata=0x1,nw_src=192.168.47.3,tp_dst=0x8000/0x8000 actions=conjunction() +- table=45, priority=2002,udp,reg0=0x100/0x100,metadata=0x1,nw_src=192.168.47.3,tp_dst=1 actions=conjunction() +- table=45, priority=2002,udp,reg0=0x100/0x100,reg15=0x3,metadata=0x1,nw_src=192.168.47.3 actions=conjunction() +- table=45, priority=2002,udp,reg0=0x80/0x80,metadata=0x1,nw_src=192.168.47.3,tp_dst=0x10/0xfff0 actions=conjunction() +- table=45, priority=2002,udp,reg0=0x80/0x80,metadata=0x1,nw_src=192.168.47.3,tp_dst=0x100/0xff00 actions=conjunction() +- table=45, priority=2002,udp,reg0=0x80/0x80,metadata=0x1,nw_src=192.168.47.3,tp_dst=0x1000/0xf000 actions=conjunction() +- table=45, priority=2002,udp,reg0=0x80/0x80,metadata=0x1,nw_src=192.168.47.3,tp_dst=0x2/0xfffe actions=conjunction() +- table=45, priority=2002,udp,reg0=0x80/0x80,metadata=0x1,nw_src=192.168.47.3,tp_dst=0x20/0xffe0 actions=conjunction() +- table=45, priority=2002,udp,reg0=0x80/0x80,metadata=0x1,nw_src=192.168.47.3,tp_dst=0x200/0xfe00 actions=conjunction() +- table=45, priority=2002,udp,reg0=0x80/0x80,metadata=0x1,nw_src=192.168.47.3,tp_dst=0x2000/0xe000 actions=conjunction() +- table=45, priority=2002,udp,reg0=0x80/0x80,metadata=0x1,nw_src=192.168.47.3,tp_dst=0x4/0xfffc actions=conjunction() +- table=45, priority=2002,udp,reg0=0x80/0x80,metadata=0x1,nw_src=192.168.47.3,tp_dst=0x40/0xffc0 actions=conjunction() +- table=45, priority=2002,udp,reg0=0x80/0x80,metadata=0x1,nw_src=192.168.47.3,tp_dst=0x400/0xfc00 actions=conjunction() +- table=45, priority=2002,udp,reg0=0x80/0x80,metadata=0x1,nw_src=192.168.47.3,tp_dst=0x4000/0xc000 actions=conjunction() +- table=45, priority=2002,udp,reg0=0x80/0x80,metadata=0x1,nw_src=192.168.47.3,tp_dst=0x8/0xfff8 actions=conjunction() +- table=45, priority=2002,udp,reg0=0x80/0x80,metadata=0x1,nw_src=192.168.47.3,tp_dst=0x80/0xff80 actions=conjunction() +- table=45, priority=2002,udp,reg0=0x80/0x80,metadata=0x1,nw_src=192.168.47.3,tp_dst=0x800/0xf800 actions=conjunction() +- table=45, priority=2002,udp,reg0=0x80/0x80,metadata=0x1,nw_src=192.168.47.3,tp_dst=0x8000/0x8000 actions=conjunction() +- table=45, priority=2002,udp,reg0=0x80/0x80,metadata=0x1,nw_src=192.168.47.3,tp_dst=1 actions=conjunction() +- table=45, priority=2002,udp,reg0=0x80/0x80,reg15=0x3,metadata=0x1,nw_src=192.168.47.3 actions=conjunction() ++ table=44, priority=2002,udp,reg0=0x100/0x100,metadata=0x1,nw_src=192.168.47.3,tp_dst=0x10/0xfff0 actions=conjunction() ++ table=44, priority=2002,udp,reg0=0x100/0x100,metadata=0x1,nw_src=192.168.47.3,tp_dst=0x100/0xff00 actions=conjunction() ++ table=44, priority=2002,udp,reg0=0x100/0x100,metadata=0x1,nw_src=192.168.47.3,tp_dst=0x1000/0xf000 actions=conjunction() ++ table=44, priority=2002,udp,reg0=0x100/0x100,metadata=0x1,nw_src=192.168.47.3,tp_dst=0x2/0xfffe actions=conjunction() ++ table=44, priority=2002,udp,reg0=0x100/0x100,metadata=0x1,nw_src=192.168.47.3,tp_dst=0x20/0xffe0 actions=conjunction() ++ table=44, priority=2002,udp,reg0=0x100/0x100,metadata=0x1,nw_src=192.168.47.3,tp_dst=0x200/0xfe00 actions=conjunction() ++ table=44, priority=2002,udp,reg0=0x100/0x100,metadata=0x1,nw_src=192.168.47.3,tp_dst=0x2000/0xe000 actions=conjunction() ++ table=44, priority=2002,udp,reg0=0x100/0x100,metadata=0x1,nw_src=192.168.47.3,tp_dst=0x4/0xfffc actions=conjunction() ++ table=44, priority=2002,udp,reg0=0x100/0x100,metadata=0x1,nw_src=192.168.47.3,tp_dst=0x40/0xffc0 actions=conjunction() ++ table=44, priority=2002,udp,reg0=0x100/0x100,metadata=0x1,nw_src=192.168.47.3,tp_dst=0x400/0xfc00 actions=conjunction() ++ table=44, priority=2002,udp,reg0=0x100/0x100,metadata=0x1,nw_src=192.168.47.3,tp_dst=0x4000/0xc000 actions=conjunction() ++ table=44, priority=2002,udp,reg0=0x100/0x100,metadata=0x1,nw_src=192.168.47.3,tp_dst=0x8/0xfff8 actions=conjunction() ++ table=44, priority=2002,udp,reg0=0x100/0x100,metadata=0x1,nw_src=192.168.47.3,tp_dst=0x80/0xff80 actions=conjunction() ++ table=44, priority=2002,udp,reg0=0x100/0x100,metadata=0x1,nw_src=192.168.47.3,tp_dst=0x800/0xf800 actions=conjunction() ++ table=44, priority=2002,udp,reg0=0x100/0x100,metadata=0x1,nw_src=192.168.47.3,tp_dst=0x8000/0x8000 actions=conjunction() ++ table=44, priority=2002,udp,reg0=0x100/0x100,metadata=0x1,nw_src=192.168.47.3,tp_dst=1 actions=conjunction() ++ table=44, priority=2002,udp,reg0=0x100/0x100,reg15=0x3,metadata=0x1,nw_src=192.168.47.3 actions=conjunction() ++ table=44, priority=2002,udp,reg0=0x80/0x80,metadata=0x1,nw_src=192.168.47.3,tp_dst=0x10/0xfff0 actions=conjunction() ++ table=44, priority=2002,udp,reg0=0x80/0x80,metadata=0x1,nw_src=192.168.47.3,tp_dst=0x100/0xff00 actions=conjunction() ++ table=44, priority=2002,udp,reg0=0x80/0x80,metadata=0x1,nw_src=192.168.47.3,tp_dst=0x1000/0xf000 actions=conjunction() ++ table=44, priority=2002,udp,reg0=0x80/0x80,metadata=0x1,nw_src=192.168.47.3,tp_dst=0x2/0xfffe actions=conjunction() ++ table=44, priority=2002,udp,reg0=0x80/0x80,metadata=0x1,nw_src=192.168.47.3,tp_dst=0x20/0xffe0 actions=conjunction() ++ table=44, priority=2002,udp,reg0=0x80/0x80,metadata=0x1,nw_src=192.168.47.3,tp_dst=0x200/0xfe00 actions=conjunction() ++ table=44, priority=2002,udp,reg0=0x80/0x80,metadata=0x1,nw_src=192.168.47.3,tp_dst=0x2000/0xe000 actions=conjunction() ++ table=44, priority=2002,udp,reg0=0x80/0x80,metadata=0x1,nw_src=192.168.47.3,tp_dst=0x4/0xfffc actions=conjunction() ++ table=44, priority=2002,udp,reg0=0x80/0x80,metadata=0x1,nw_src=192.168.47.3,tp_dst=0x40/0xffc0 actions=conjunction() ++ table=44, priority=2002,udp,reg0=0x80/0x80,metadata=0x1,nw_src=192.168.47.3,tp_dst=0x400/0xfc00 actions=conjunction() ++ table=44, priority=2002,udp,reg0=0x80/0x80,metadata=0x1,nw_src=192.168.47.3,tp_dst=0x4000/0xc000 actions=conjunction() ++ table=44, priority=2002,udp,reg0=0x80/0x80,metadata=0x1,nw_src=192.168.47.3,tp_dst=0x8/0xfff8 actions=conjunction() ++ table=44, priority=2002,udp,reg0=0x80/0x80,metadata=0x1,nw_src=192.168.47.3,tp_dst=0x80/0xff80 actions=conjunction() ++ table=44, priority=2002,udp,reg0=0x80/0x80,metadata=0x1,nw_src=192.168.47.3,tp_dst=0x800/0xf800 actions=conjunction() ++ table=44, priority=2002,udp,reg0=0x80/0x80,metadata=0x1,nw_src=192.168.47.3,tp_dst=0x8000/0x8000 actions=conjunction() ++ table=44, priority=2002,udp,reg0=0x80/0x80,metadata=0x1,nw_src=192.168.47.3,tp_dst=1 actions=conjunction() ++ table=44, priority=2002,udp,reg0=0x80/0x80,reg15=0x3,metadata=0x1,nw_src=192.168.47.3 actions=conjunction() + ]) + + OVN_CLEANUP([hv1]) +@@ -24918,3 +25225,633 @@ AT_CHECK([cat hv2_offlows_table72.txt | grep -v NXST], [1], [dnl OVN_CLEANUP([hv1], [hv2]) AT_CLEANUP @@ -4922,6 +8111,313 @@ index b465784cd..dbc6e549b 100644 + +OVN_CLEANUP([hv1], [hv2]) +AT_CLEANUP +diff --git a/tests/system-ovn.at b/tests/system-ovn.at +index 9819573bb..bd27b01a0 100644 +--- a/tests/system-ovn.at ++++ b/tests/system-ovn.at +@@ -4722,7 +4722,7 @@ OVS_WAIT_UNTIL([ + ]) + + OVS_WAIT_UNTIL([ +- n_pkt=$(ovs-ofctl dump-flows br-int table=45 | grep -v n_packets=0 | \ ++ n_pkt=$(ovs-ofctl dump-flows br-int table=44 | grep -v n_packets=0 | \ + grep controller | grep tp_dst=84 -c) + test $n_pkt -eq 1 + ]) +@@ -5831,3 +5831,131 @@ as + OVS_TRAFFIC_VSWITCHD_STOP(["/.*error receiving.*/d + /.*terminating with signal 15.*/d"]) + AT_CLEANUP ++ ++AT_SETUP([ovn -- No ct_state matches in dp flows when no ACLs in an LS]) ++AT_KEYWORDS([no ct_state match]) ++ovn_start ++ ++OVS_TRAFFIC_VSWITCHD_START() ++ADD_BR([br-int]) ++ ++# Set external-ids in br-int needed for ovn-controller ++ovs-vsctl \ ++ -- set Open_vSwitch . external-ids:system-id=hv1 \ ++ -- set Open_vSwitch . external-ids:ovn-remote=unix:$ovs_base/ovn-sb/ovn-sb.sock \ ++ -- set Open_vSwitch . external-ids:ovn-encap-type=geneve \ ++ -- set Open_vSwitch . external-ids:ovn-encap-ip=169.0.0.1 \ ++ -- set bridge br-int fail-mode=secure other-config:disable-in-band=true ++ ++# Start ovn-controller ++start_daemon ovn-controller ++ ++check ovn-nbctl ls-add sw0 ++ ++check ovn-nbctl lsp-add sw0 sw0-p1 ++check ovn-nbctl lsp-set-addresses sw0-p1 "50:54:00:00:00:03" ++check ovn-nbctl lsp-set-port-security sw0-p1 "50:54:00:00:00:03" ++ ++check ovn-nbctl lsp-add sw0 sw0-p2 ++check ovn-nbctl lsp-set-addresses sw0-p2 "50:54:00:00:00:04 10.0.0.4" ++check ovn-nbctl lsp-set-port-security sw0-p2 "50:54:00:00:00:04 10.0.0.4" ++ ++ ++# Create the second logical switch with one port and configure some ACLs. ++check ovn-nbctl ls-add sw1 ++check ovn-nbctl lsp-add sw1 sw1-p1 ++ ++# Create port group and ACLs for sw1 ports. ++check ovn-nbctl pg-add pg1 sw1-p1 ++check ovn-nbctl acl-add pg1 from-lport 1002 "ip" allow-related ++check ovn-nbctl acl-add pg1 to-lport 1002 "ip" allow-related ++ ++ ++OVN_POPULATE_ARP ++ovn-nbctl --wait=hv sync ++ ++ADD_NAMESPACES(sw0-p1) ++ADD_VETH(sw0-p1, sw0-p1, br-int, "10.0.0.3/24", "50:54:00:00:00:03", \ ++ "10.0.0.1") ++ ++ ++ADD_NAMESPACES(sw0-p2) ++ADD_VETH(sw0-p2, sw0-p2, br-int, "10.0.0.4/24", "50:54:00:00:00:04", \ ++ "10.0.0.1") ++ ++ADD_NAMESPACES(sw1-p1) ++ADD_VETH(sw1-p1, sw1-p1, br-int, "20.0.0.4/24", "30:54:00:00:00:04", \ ++ "20.0.0.1") ++ ++wait_for_ports_up ++ ++NS_CHECK_EXEC([sw0-p1], [ping -q -c 3 -i 0.3 -w 2 10.0.0.4 | FORMAT_PING], \ ++[0], [dnl ++3 packets transmitted, 3 received, 0% packet loss, time 0ms ++]) ++ ++ovs-appctl dpctl/dump-flows ++ ++# sw1-p1 may send IPv6 traffic. So filter this out. Since sw1-p1 has ++# ACLs configured, the datapath flows for the packets from sw1-p1 will have ++# matches on ct_state and ct_label fields. ++# Since sw0 doesn't have any ACLs, there should be no match on ct fields. ++AT_CHECK([ovs-appctl dpctl/dump-flows | grep ct_state | grep -v ipv6 -c], [1], [dnl ++0 ++]) ++ ++AT_CHECK([ovs-appctl dpctl/dump-flows | grep ct_label | grep -v ipv6 -c], [1], [dnl ++0 ++]) ++ ++# Add an ACL to sw0. ++check ovn-nbctl --wait=hv acl-add sw0 to-lport 1002 ip allow-related ++ ++NS_CHECK_EXEC([sw0-p1], [ping -q -c 3 -i 0.3 -w 2 10.0.0.4 | FORMAT_PING], \ ++[0], [dnl ++3 packets transmitted, 3 received, 0% packet loss, time 0ms ++]) ++ ++ovs-appctl dpctl/dump-flows ++ ++AT_CHECK([ovs-appctl dpctl/dump-flows | grep ct_state | grep -v ipv6 -c], [0], [ignore]) ++ ++AT_CHECK([ovs-appctl dpctl/dump-flows | grep ct_label | grep -v ipv6 -c], [0], [ignore]) ++ ++# Clear ACL for sw0 ++check ovn-nbctl --wait=hv clear logical_switch sw0 acls ++ ++check ovs-appctl dpctl/del-flows ++ ++check ovn-nbctl --wait=hv sync ++ ++NS_CHECK_EXEC([sw0-p1], [ping -q -c 3 -i 0.3 -w 2 10.0.0.4 | FORMAT_PING], \ ++[0], [dnl ++3 packets transmitted, 3 received, 0% packet loss, time 0ms ++]) ++ ++ovs-appctl dpctl/dump-flows ++ ++AT_CHECK([ovs-appctl dpctl/dump-flows | grep ct_state | grep -v ipv6 -c], [1], [dnl ++0 ++]) ++ ++AT_CHECK([ovs-appctl dpctl/dump-flows | grep ct_label | grep -v ipv6 -c], [1], [dnl ++0 ++]) ++ ++OVS_APP_EXIT_AND_WAIT([ovn-controller]) ++ ++as ovn-sb ++OVS_APP_EXIT_AND_WAIT([ovsdb-server]) ++ ++as ovn-nb ++OVS_APP_EXIT_AND_WAIT([ovsdb-server]) ++ ++as northd ++OVS_APP_EXIT_AND_WAIT([NORTHD_TYPE]) ++ ++as ++OVS_TRAFFIC_VSWITCHD_STOP(["/failed to query port patch-.*/d ++/connection dropped.*/d"]) ++AT_CLEANUP +diff --git a/utilities/ovn-ctl b/utilities/ovn-ctl +index 967db6d6c..c52c17ee0 100755 +--- a/utilities/ovn-ctl ++++ b/utilities/ovn-ctl +@@ -45,18 +45,12 @@ pidfile_is_running () { + test -e "$pidfile" && [ -s "$pidfile" ] && pid=`cat "$pidfile"` && pid_exists "$pid" + } >/dev/null 2>&1 + +-stop_xx_ovsdb() { +- if pidfile_is_running $1; then +- ovn-appctl -t $OVN_RUNDIR/$2 exit +- fi +-} +- + stop_nb_ovsdb() { +- stop_xx_ovsdb $DB_NB_PID ovnnb_db.ctl ++ OVS_RUNDIR=${OVS_RUNDIR} stop_ovn_daemon ovnnb_db $DB_NB_PID $OVN_RUNDIR/ovnnb_db.ctl + } + + stop_sb_ovsdb() { +- stop_xx_ovsdb $DB_SB_PID ovnsb_db.ctl ++ OVS_RUNDIR=${OVS_RUNDIR} stop_ovn_daemon ovnsb_db $DB_SB_PID $OVN_RUNDIR/ovnsb_db.ctl + } + + stop_ovsdb () { +@@ -65,11 +59,11 @@ stop_ovsdb () { + } + + stop_ic_nb_ovsdb() { +- stop_xx_ovsdb $DB_IC_NB_PID ovn_ic_nb_db.ctl ++ OVS_RUNDIR=${OVS_RUNDIR} stop_ovn_daemon ovn_ic_nb_db $DB_IC_NB_PID $OVN_RUNDIR/ovn_ic_nb_db.ctl + } + + stop_ic_sb_ovsdb() { +- stop_xx_ovsdb $DB_IC_SB_PID ovn_ic_sb_db.ctl ++ OVS_RUNDIR=${OVS_RUNDIR} stop_ovn_daemon ovn_ic_sb_db $DB_IC_SB_PID $OVN_RUNDIR/ovn_ic_sb_db.ctl + } + + stop_ic_ovsdb () { +@@ -590,7 +584,7 @@ stop_ic () { + } + + stop_controller () { +- OVS_RUNDIR=${OVS_RUNDIR} stop_ovn_daemon ovn-controller "$@" ++ OVS_RUNDIR=${OVS_RUNDIR} stop_ovn_daemon ovn-controller "" "" "$@" + } + + stop_controller_vtep () { +diff --git a/utilities/ovn-lib.in b/utilities/ovn-lib.in +index 016815626..301cc5712 100644 +--- a/utilities/ovn-lib.in ++++ b/utilities/ovn-lib.in +@@ -137,10 +137,22 @@ start_ovn_daemon () { + } + + stop_ovn_daemon () { +- if test -e "$ovn_rundir/$1.pid"; then +- if pid=`cat "$ovn_rundir/$1.pid"`; then ++ local pid_file=$2 ++ local ctl_file=$3 ++ local other_args=$4 ++ ++ if [ -z "$pid_file" ]; then ++ pid_file="$ovn_rundir/$1.pid" ++ fi ++ ++ if test -e "$pid_file"; then ++ if pid=`cat "$pid_file"`; then ++ if [ -z "$ctl_file" ]; then ++ ctl_file="$ovn_rundir/$1.$pid.ctl" ++ fi ++ + if pid_exists "$pid" >/dev/null 2>&1; then :; else +- rm -f $ovn_rundir/$1.$pid.ctl $ovn_rundir/$1.$pid ++ rm -f $ctl_file $pid_file + return 0 + fi + +@@ -148,7 +160,7 @@ stop_ovn_daemon () { + actions="TERM .1 .25 .65 1 1 1 1 \ + KILL 1 1 1 2 10 15 30 \ + FAIL" +- version=`ovs-appctl -T 1 -t $ovn_rundir/$1.$pid.ctl version \ ++ version=`ovs-appctl -T 1 -t $ctl_file version \ + | awk 'NR==1{print $NF}'` + + # Use `ovs-appctl exit` only if the running daemon version +@@ -159,20 +171,36 @@ stop_ovn_daemon () { + if version_geq "$version" "2.5.90"; then + actions="$graceful $actions" + fi ++ actiontype="" + for action in $actions; do + if pid_exists "$pid" >/dev/null 2>&1; then :; else +- return 0 ++ # pid does not exist. ++ if [ -n "$actiontype" ]; then ++ return 0 ++ fi ++ # But, does the file exist? We may have had a daemon ++ # segfault with `ovs-appctl exit`. Check one more time ++ # before deciding that the daemon is dead. ++ [ -e "$pid_file" ] && sleep 2 && pid=`cat "$pid_file"` 2>/dev/null ++ if pid_exists "$pid" >/dev/null 2>&1; then :; else ++ return 0 ++ fi + fi + case $action in + EXIT) + action "Exiting $1 ($pid)" \ +- ${bindir}/ovs-appctl -T 1 -t $ovn_rundir/$1.$pid.ctl exit $2 ++ ${bindir}/ovs-appctl -T 1 -t $ctl_file exit $other_args ++ # The above command could have resulted in delayed ++ # daemon segfault. And if a monitor is running, it ++ # would restart the daemon giving it a new pid. + ;; + TERM) + action "Killing $1 ($pid)" kill $pid ++ actiontype="force" + ;; + KILL) + action "Killing $1 ($pid) with SIGKILL" kill -9 $pid ++ actiontype="force" + ;; + FAIL) + log_failure_msg "Killing $1 ($pid) failed" +diff --git a/utilities/ovn-nbctl.c b/utilities/ovn-nbctl.c +index 2c77f4ba7..51af138c6 100644 +--- a/utilities/ovn-nbctl.c ++++ b/utilities/ovn-nbctl.c +@@ -3866,11 +3866,15 @@ static void + print_routing_policy(const struct nbrec_logical_router_policy *policy, + struct ds *s) + { +- if (policy->nexthop != NULL) { +- char *next_hop = normalize_prefix_str(policy->nexthop); +- ds_put_format(s, "%10"PRId64" %50s %15s %25s", policy->priority, +- policy->match, policy->action, next_hop); +- free(next_hop); ++ if (policy->n_nexthops) { ++ ds_put_format(s, "%10"PRId64" %50s %15s", policy->priority, ++ policy->match, policy->action); ++ for (int i = 0; i < policy->n_nexthops; i++) { ++ char *next_hop = normalize_prefix_str(policy->nexthops[i]); ++ char *fmt = i ? ", %s" : " %25s"; ++ ds_put_format(s, fmt, next_hop); ++ free(next_hop); ++ } + } else { + ds_put_format(s, "%10"PRId64" %50s %15s", policy->priority, + policy->match, policy->action); +@@ -4068,7 +4072,9 @@ nbctl_lr_route_add(struct ctl_context *ctx) + goto cleanup; + } + } else if (route) { +- ctl_error(ctx, "duplicate nexthop for the same ECMP route"); ++ if (!may_exist) { ++ ctl_error(ctx, "duplicate nexthop for the same ECMP route"); ++ } + goto cleanup; + } + diff --git a/utilities/ovndb-servers.ocf b/utilities/ovndb-servers.ocf index 7351c7d64..eba9c97a1 100755 --- a/utilities/ovndb-servers.ocf diff --git a/SPECS/ovn-2021.spec b/SPECS/ovn-2021.spec index b85781f..4d13e3a 100644 --- a/SPECS/ovn-2021.spec +++ b/SPECS/ovn-2021.spec @@ -51,7 +51,7 @@ Summary: Open Virtual Network support Group: System Environment/Daemons URL: http://www.ovn.org/ Version: 21.03.0 -Release: 21%{?commit0:.%{date}git%{shortcommit0}}%{?dist} +Release: 40%{?commit0:.%{date}git%{shortcommit0}}%{?dist} Provides: openvswitch%{pkgver}-ovn-common = %{?epoch:%{epoch}:}%{version}-%{release} Obsoletes: openvswitch%{pkgver}-ovn-common < 2.11.0-1 @@ -526,6 +526,101 @@ fi %{_unitdir}/ovn-controller-vtep.service %changelog +* Thu May 27 2021 Dumitru Ceara - 21.03.0-40 +- if-status: Add OVS interface status management module. (#1952846) + [Gerrit: 7272e3cb2866d65dfffda5fa0b6f062a086bcbcf] + [Upstream: 5c3371922994c2d8a3610c9353902156db27d108] + +* Thu May 27 2021 Han Zhou - 21.03.0-39 +- ovn-controller.c: Remove extra local_lports_changed setting. + [Gerrit: 8da45224b868d72afce47778a577ecb602fc8652] + [Upstream: fa28ba6963650d5f8ed90865df3b81699a0a9b60] + +* Fri May 21 2021 Lorenzo Bianconi - 21.03.0-38 +- physical: do not forward traffic from localport to a localnet one + [Gerrit: 89e27e959ab66592c3b716bec3e2e757161b2586] + [Upstream: 96959e56d634c8d888af9e3ee340602593c7e4fa] + +* Thu May 20 2021 Lorenzo Bianconi - 21.03.0-37 +- ovn-nbctl: do not report an error for duplicated ecmp routes with --may-exist + [Gerrit: 6d9a0af88ea8db8cc048fb913a4316e1fcbe32ad] + [Upstream: f63b609a0610a8c9fcd13c38f3acd3526b8a8b0c] + +* Wed May 19 2021 Lorenzo Bianconi - 21.03.0-36 +- controller: fix physical flow update for localport + [Gerrit: d040f88b0b0b5c5e42004996f128350e8fd420ca] + [Upstream: 925ed83a6c8064fcb93250acd7493b59c034fa7b] + +* Tue May 18 2021 Mark Michelson - 21.03.0-35 +- expr: crush the result of a sorted OR expression. + [Gerrit: 9cb6c3e6a3e7a21c07169cf631ebdcd94398025e] + [Upstream: 3dab95aa5c8c6ea97395127dd2acf27487fd1cd5] + +* Fri May 14 2021 Numan Siddique - 21.03.0-34 +- Fix compilation error introduced in the previous commit. + [Gerrit: 0cc5455f8ba4a57c153811b7e93bc4c4d0a4e97d] + [Upstream: 0675bb01221b9b2d5b0b7b55715979204454cada] + +* Fri May 14 2021 Ilya Maximets - 21.03.0-33 +- northd: Combine router arp flows. (#1945415) + [Gerrit: 0e60182997ecc3a6606772d78609040203dbe67e] + [Upstream: ea6ee901ff9107a084bc830a8a38c4e0bd9f75f7] + +* Wed May 12 2021 Flavio Fernandes - 21.03.0-32 +- ovn-controller: Ensure br-int is using secure fail-mode (#1957025) + [Gerrit: f56d885a7b4b9776a677a98ce758a177238e043f] + [Upstream: 9cc334bc1a036a93cc1a541513d48f4df6933e9b] + +* Tue May 11 2021 Numan Siddique - 21.03.0-31 +- northd: Support flow offloading for logical switches with no ACLs. (#1955191) + [Gerrit: 80f98b4a82f3a7cece6a33a5190b748b86cf868c] + [Upstream: 127bf166ccf4a2509f670c48a00b0340039f20d2] + +* Tue May 11 2021 Numan Siddique - 21.03.0-30 +- northd: Provide the option to not use ct.inv in lflows. + [Gerrit: 65cf2afebcdaa70941ba953b117da82e3f97f6fe] + [Upstream: 3bb91366a6b0d60df5ce8f9c7f6427f7d37dfdd4] + +* Tue May 11 2021 Numan Siddique - 21.03.0-29 +- northd: Optimize ct nat for load balancer traffic. + [Gerrit: 6e1a063b8e7f90ff7bfc95ec65347088d6ff8225] + [Upstream: 0038579d192802fff03c3594e4f85dab4f7af2bd] + +* Fri Apr 30 2021 Dumitru Ceara - 21.03.0-28 +- binding: Don't reset expected seqno for interfaces already being installed. (#1946420) + [Gerrit: 7126b44ee9a5d74d77d5b8326b2cf87630f92cec] + [Upstream: 9c9b6b1d98e38d3d7a1dcf01741b095a6b9e8f0c] + +* Mon Apr 26 2021 Dumitru Ceara - 21.03.0-27 +- tests: Improve test "IGMP snoop/querier/relay". (#1941067) + [Gerrit: 6100b80c194a9988f0967a2a232065eca5940fcd] + [Upstream: f5a27f67d825eb306d3d39815293cb2191c89716] + +* Thu Apr 22 2021 Lorenzo Bianconi - 21.03.0-26 +- ovn-nbctl: dump next-hop for router policies + [Gerrit: 6ebe05a0b9765f66a7f1350882c122cccd8f7304] + [Upstream: d8b282b2852e2b0d4e44963b3b9ade8d28a0b899] + +* Wed Apr 21 2021 Dan Williams - 21.03.0-25 +- ovn-ctl: stop databases with stop_ovn_daemon() (#1944239) + [Gerrit: dac053806bb3e061669fa449f2c704fbef9aff1d] + [Upstream: N/A] + +* Wed Apr 21 2021 Dan Williams - 21.03.0-24 +- ovn-lib: harmonize stop_ovn_daemon() with ovs-lib + [Gerrit: 92d983366b8cbd513b1d69f729a920204a2c2973] + [Upstream: N/A] + +* Wed Apr 21 2021 Numan Siddique - 21.03.0-23 +- tests: Fix frequent failure of "4 HV, 1 LS, 1 LR, packet test with HA distributed router gateway port:". + [Gerrit: 56233e8004c1651add1211d8217cc23d9a74eea7] + [Upstream: N/A] + +* Wed Apr 21 2021 Dumitru Ceara - 21.03.0-22 +- controller: Monitor all logical flows that refer to datapath groups. (#1947056) + [Gerrit: ca662b1557adad61fba46ec249d5c5511738bcac] + [Upstream: N/A] + * Wed Apr 14 2021 Numan Siddique - 21.03.0-21 - controller: Fix virtual lport I-P handling. (#1947823) [Gerrit: 0938c49138dac280bbc59148fe87dc0debed6f62]