diff --git a/.ovn.metadata b/.ovn.metadata
index 2dec392..20291e2 100644
--- a/.ovn.metadata
+++ b/.ovn.metadata
@@ -1,5 +1,5 @@
002450621b33c5690060345b0aac25bc2426d675 SOURCES/docutils-0.12.tar.gz
-b7cb5bddcefce929e60e4533da84d13dc8ce4fd0 SOURCES/openvswitch-ac85cdb.tar.gz
-35a22f67bf3675fce0ca8a39ee4aed7e0b716560 SOURCES/ovn-21.03.0.tar.gz
+90c634ea30bd1f8c09eb1e65dea10b5ce4dbb3fb SOURCES/openvswitch-e6ad4d8.tar.gz
+20793f6d7758400adb7c51c1df631083fd5fee4b SOURCES/ovn-21.06.0.tar.gz
d34f96421a86004aa5d26ecf975edefd09f948b1 SOURCES/Pygments-1.4.tar.gz
6beb30f18ffac3de7689b7fd63e9a8a7d9c8df3a SOURCES/Sphinx-1.1.3.tar.gz
diff --git a/SOURCES/ovn-21.06.0.patch b/SOURCES/ovn-21.06.0.patch
new file mode 100644
index 0000000..365364d
--- /dev/null
+++ b/SOURCES/ovn-21.06.0.patch
@@ -0,0 +1,3527 @@
+diff --git a/TODO.rst b/TODO.rst
+index c89fe203e..618ea4844 100644
+--- a/TODO.rst
++++ b/TODO.rst
+@@ -164,3 +164,9 @@ OVN To-do List
+ to find a way of determining if routing has already been executed (on a
+ different hypervisor) for the IP multicast packet being processed locally
+ in the router pipeline.
++
++* ovn-controller Incremental processing
++
++ * physical.c has a global simap -localvif_to_ofport which stores the
++ local OVS interfaces and the ofport numbers. Move this to the engine data
++ of the engine data node - ed_type_pflow_output.
+diff --git a/controller/binding.c b/controller/binding.c
+index 7fde0fdbb..ba558efdb 100644
+--- a/controller/binding.c
++++ b/controller/binding.c
+@@ -22,6 +22,7 @@
+ #include "patch.h"
+
+ #include "lib/bitmap.h"
++#include "lib/hmapx.h"
+ #include "openvswitch/poll-loop.h"
+ #include "lib/sset.h"
+ #include "lib/util.h"
+@@ -108,6 +109,7 @@ add_local_datapath__(struct ovsdb_idl_index *sbrec_datapath_binding_by_key,
+ hmap_insert(local_datapaths, &ld->hmap_node, dp_key);
+ ld->datapath = datapath;
+ ld->localnet_port = NULL;
++ shash_init(&ld->external_ports);
+ ld->has_local_l3gateway = has_local_l3gateway;
+
+ if (tracked_datapaths) {
+@@ -474,6 +476,18 @@ is_network_plugged(const struct sbrec_port_binding *binding_rec,
+ return network ? !!shash_find_data(bridge_mappings, network) : false;
+ }
+
++static void
++update_ld_external_ports(const struct sbrec_port_binding *binding_rec,
++ struct hmap *local_datapaths)
++{
++ struct local_datapath *ld = get_local_datapath(
++ local_datapaths, binding_rec->datapath->tunnel_key);
++ if (ld) {
++ shash_replace(&ld->external_ports, binding_rec->logical_port,
++ binding_rec);
++ }
++}
++
+ static void
+ update_ld_localnet_port(const struct sbrec_port_binding *binding_rec,
+ struct shash *bridge_mappings,
+@@ -531,38 +545,41 @@ remove_local_lports(const char *iface_id, struct binding_ctx_out *b_ctx)
+ }
+ }
+
+-/* Add a port binding ID (of the form "dp-key"_"port-key") to the set of local
+- * lport IDs. Also track if the set has changed.
++/* Add a port binding to the set of locally relevant lports.
++ * Also track if the set has changed.
+ */
+ static void
+-update_local_lport_ids(const struct sbrec_port_binding *pb,
+- struct binding_ctx_out *b_ctx)
++update_related_lport(const struct sbrec_port_binding *pb,
++ struct binding_ctx_out *b_ctx)
+ {
+ char buf[16];
+ get_unique_lport_key(pb->datapath->tunnel_key, pb->tunnel_key,
+ buf, sizeof(buf));
+- if (sset_add(b_ctx->local_lport_ids, buf) != NULL) {
+- b_ctx->local_lport_ids_changed = true;
++ if (sset_add(&b_ctx->related_lports->lport_ids, buf) != NULL) {
++ b_ctx->related_lports_changed = true;
+
+ if (b_ctx->tracked_dp_bindings) {
+ /* Add the 'pb' to the tracked_datapaths. */
+ tracked_binding_datapath_lport_add(pb, b_ctx->tracked_dp_bindings);
+ }
+ }
++ sset_add(&b_ctx->related_lports->lport_names, pb->logical_port);
+ }
+
+-/* Remove a port binding id from the set of local lport IDs. Also track if
+- * the set has changed.
++/* Remove a port binding id from the set of locally relevant lports.
++ * Also track if the set has changed.
+ */
+ static void
+-remove_local_lport_ids(const struct sbrec_port_binding *pb,
+- struct binding_ctx_out *b_ctx)
++remove_related_lport(const struct sbrec_port_binding *pb,
++ struct binding_ctx_out *b_ctx)
+ {
+ char buf[16];
+ get_unique_lport_key(pb->datapath->tunnel_key, pb->tunnel_key,
+ buf, sizeof(buf));
+- if (sset_find_and_delete(b_ctx->local_lport_ids, buf)) {
+- b_ctx->local_lport_ids_changed = true;
++ sset_find_and_delete(&b_ctx->related_lports->lport_names,
++ pb->logical_port);
++ if (sset_find_and_delete(&b_ctx->related_lports->lport_ids, buf)) {
++ b_ctx->related_lports_changed = true;
+
+ if (b_ctx->tracked_dp_bindings) {
+ /* Add the 'pb' to the tracked_datapaths. */
+@@ -678,6 +695,20 @@ static struct binding_lport *binding_lport_check_and_cleanup(
+
+ static char *get_lport_type_str(enum en_lport_type lport_type);
+
++void
++related_lports_init(struct related_lports *rp)
++{
++ sset_init(&rp->lport_names);
++ sset_init(&rp->lport_ids);
++}
++
++void
++related_lports_destroy(struct related_lports *rp)
++{
++ sset_destroy(&rp->lport_names);
++ sset_destroy(&rp->lport_ids);
++}
++
+ void
+ local_binding_data_init(struct local_binding_data *lbinding_data)
+ {
+@@ -1172,7 +1203,7 @@ release_binding_lport(const struct sbrec_chassis *chassis_rec,
+ struct binding_ctx_out *b_ctx_out)
+ {
+ if (is_binding_lport_this_chassis(b_lport, chassis_rec)) {
+- remove_local_lport_ids(b_lport->pb, b_ctx_out);
++ remove_related_lport(b_lport->pb, b_ctx_out);
+ if (!release_lport(b_lport->pb, sb_readonly,
+ b_ctx_out->tracked_dp_bindings,
+ b_ctx_out->if_mgr)) {
+@@ -1214,7 +1245,7 @@ consider_vif_lport_(const struct sbrec_port_binding *pb,
+ pb->datapath, false,
+ b_ctx_out->local_datapaths,
+ b_ctx_out->tracked_dp_bindings);
+- update_local_lport_ids(pb, b_ctx_out);
++ update_related_lport(pb, b_ctx_out);
+ update_local_lports(pb->logical_port, b_ctx_out);
+ if (b_lport->lbinding->iface && qos_map && b_ctx_in->ovs_idl_txn) {
+ get_qos_params(pb, qos_map);
+@@ -1405,7 +1436,7 @@ consider_virtual_lport(const struct sbrec_port_binding *pb,
+ * its entry from the local_lport_ids if present. This is required
+ * when a virtual port moves from one chassis to other.*/
+ if (!virtual_b_lport) {
+- remove_local_lport_ids(pb, b_ctx_out);
++ remove_related_lport(pb, b_ctx_out);
+ }
+
+ return true;
+@@ -1430,7 +1461,7 @@ consider_nonvif_lport_(const struct sbrec_port_binding *pb,
+ b_ctx_out->local_datapaths,
+ b_ctx_out->tracked_dp_bindings);
+
+- update_local_lport_ids(pb, b_ctx_out);
++ update_related_lport(pb, b_ctx_out);
+ return claim_lport(pb, NULL, b_ctx_in->chassis_rec, NULL,
+ !b_ctx_in->ovnsb_idl_txn, false,
+ b_ctx_out->tracked_dp_bindings,
+@@ -1482,7 +1513,7 @@ consider_localnet_lport(const struct sbrec_port_binding *pb,
+ get_qos_params(pb, qos_map);
+ }
+
+- update_local_lport_ids(pb, b_ctx_out);
++ update_related_lport(pb, b_ctx_out);
+ }
+
+ static bool
+@@ -1512,7 +1543,7 @@ consider_ha_lport(const struct sbrec_port_binding *pb,
+ pb->datapath, false,
+ b_ctx_out->local_datapaths,
+ b_ctx_out->tracked_dp_bindings);
+- update_local_lport_ids(pb, b_ctx_out);
++ update_related_lport(pb, b_ctx_out);
+ }
+
+ return consider_nonvif_lport_(pb, our_chassis, false, b_ctx_in, b_ctx_out);
+@@ -1614,8 +1645,9 @@ binding_run(struct binding_ctx_in *b_ctx_in, struct binding_ctx_out *b_ctx_out)
+ !sset_is_empty(b_ctx_out->egress_ifaces) ? &qos_map : NULL;
+
+ struct ovs_list localnet_lports = OVS_LIST_INITIALIZER(&localnet_lports);
++ struct ovs_list external_lports = OVS_LIST_INITIALIZER(&external_lports);
+
+- struct localnet_lport {
++ struct lport {
+ struct ovs_list list_node;
+ const struct sbrec_port_binding *pb;
+ };
+@@ -1634,7 +1666,7 @@ binding_run(struct binding_ctx_in *b_ctx_in, struct binding_ctx_out *b_ctx_out)
+ case LP_PATCH:
+ case LP_LOCALPORT:
+ case LP_VTEP:
+- update_local_lport_ids(pb, b_ctx_out);
++ update_related_lport(pb, b_ctx_out);
+ break;
+
+ case LP_VIF:
+@@ -1663,11 +1695,14 @@ binding_run(struct binding_ctx_in *b_ctx_in, struct binding_ctx_out *b_ctx_out)
+
+ case LP_EXTERNAL:
+ consider_external_lport(pb, b_ctx_in, b_ctx_out);
++ struct lport *ext_lport = xmalloc(sizeof *ext_lport);
++ ext_lport->pb = pb;
++ ovs_list_push_back(&external_lports, &ext_lport->list_node);
+ break;
+
+ case LP_LOCALNET: {
+ consider_localnet_lport(pb, b_ctx_in, b_ctx_out, &qos_map);
+- struct localnet_lport *lnet_lport = xmalloc(sizeof *lnet_lport);
++ struct lport *lnet_lport = xmalloc(sizeof *lnet_lport);
+ lnet_lport->pb = pb;
+ ovs_list_push_back(&localnet_lports, &lnet_lport->list_node);
+ break;
+@@ -1694,7 +1729,7 @@ binding_run(struct binding_ctx_in *b_ctx_in, struct binding_ctx_out *b_ctx_out)
+ /* Run through each localnet lport list to see if it is a localnet port
+ * on local datapaths discovered from above loop, and update the
+ * corresponding local datapath accordingly. */
+- struct localnet_lport *lnet_lport;
++ struct lport *lnet_lport;
+ LIST_FOR_EACH_POP (lnet_lport, list_node, &localnet_lports) {
+ update_ld_localnet_port(lnet_lport->pb, &bridge_mappings,
+ b_ctx_out->egress_ifaces,
+@@ -1702,6 +1737,15 @@ binding_run(struct binding_ctx_in *b_ctx_in, struct binding_ctx_out *b_ctx_out)
+ free(lnet_lport);
+ }
+
++ /* Run through external lport list to see if these are external ports
++ * on local datapaths discovered from above loop, and update the
++ * corresponding local datapath accordingly. */
++ struct lport *ext_lport;
++ LIST_FOR_EACH_POP (ext_lport, list_node, &external_lports) {
++ update_ld_external_ports(ext_lport->pb, b_ctx_out->local_datapaths);
++ free(ext_lport);
++ }
++
+ shash_destroy(&bridge_mappings);
+
+ if (!sset_is_empty(b_ctx_out->egress_ifaces)
+@@ -1895,7 +1939,7 @@ remove_pb_from_local_datapath(const struct sbrec_port_binding *pb,
+ struct binding_ctx_out *b_ctx_out,
+ struct local_datapath *ld)
+ {
+- remove_local_lport_ids(pb, b_ctx_out);
++ remove_related_lport(pb, b_ctx_out);
+ if (!strcmp(pb->type, "patch") ||
+ !strcmp(pb->type, "l3gateway")) {
+ remove_local_datapath_peer_port(pb, ld, b_ctx_out->local_datapaths);
+@@ -1904,6 +1948,8 @@ remove_pb_from_local_datapath(const struct sbrec_port_binding *pb,
+ pb->logical_port)) {
+ ld->localnet_port = NULL;
+ }
++ } else if (!strcmp(pb->type, "external")) {
++ shash_find_and_delete(&ld->external_ports, pb->logical_port);
+ }
+
+ if (!strcmp(pb->type, "l3gateway")) {
+@@ -2407,6 +2453,9 @@ binding_handle_port_binding_changes(struct binding_ctx_in *b_ctx_in,
+ shash_add(&deleted_virtual_pbs, pb->logical_port, pb);
+ } else {
+ shash_add(&deleted_other_pbs, pb->logical_port, pb);
++ if (lport_type == LP_EXTERNAL) {
++ hmapx_add(b_ctx_out->extport_updated_datapaths, pb->datapath);
++ }
+ }
+ }
+
+@@ -2502,7 +2551,7 @@ delete_done:
+ case LP_PATCH:
+ case LP_LOCALPORT:
+ case LP_VTEP:
+- update_local_lport_ids(pb, b_ctx_out);
++ update_related_lport(pb, b_ctx_out);
+ if (lport_type == LP_PATCH) {
+ if (!ld) {
+ /* If 'ld' for this lport is not present, then check if
+@@ -2561,6 +2610,8 @@ delete_done:
+
+ case LP_EXTERNAL:
+ handled = consider_external_lport(pb, b_ctx_in, b_ctx_out);
++ update_ld_external_ports(pb, b_ctx_out->local_datapaths);
++ hmapx_add(b_ctx_out->extport_updated_datapaths, pb->datapath);
+ break;
+
+ case LP_LOCALNET: {
+@@ -2926,23 +2977,3 @@ cleanup:
+
+ return b_lport;
+ }
+-
+-struct sset *
+-binding_collect_local_binding_lports(struct local_binding_data *lbinding_data)
+-{
+- struct sset *lports = xzalloc(sizeof *lports);
+- sset_init(lports);
+- struct shash_node *shash_node;
+- SHASH_FOR_EACH (shash_node, &lbinding_data->lports) {
+- struct binding_lport *b_lport = shash_node->data;
+- sset_add(lports, b_lport->name);
+- }
+- return lports;
+-}
+-
+-void
+-binding_destroy_local_binding_lports(struct sset *lports)
+-{
+- sset_destroy(lports);
+- free(lports);
+-}
+diff --git a/controller/binding.h b/controller/binding.h
+index 8f3289476..8fd54092e 100644
+--- a/controller/binding.h
++++ b/controller/binding.h
+@@ -22,6 +22,7 @@
+ #include "openvswitch/hmap.h"
+ #include "openvswitch/uuid.h"
+ #include "openvswitch/list.h"
++#include "sset.h"
+
+ struct hmap;
+ struct ovsdb_idl;
+@@ -56,6 +57,19 @@ struct binding_ctx_in {
+ const struct ovsrec_interface_table *iface_table;
+ };
+
++/* Locally relevant port bindings, e.g., VIFs that might be bound locally,
++ * patch ports.
++ */
++struct related_lports {
++ struct sset lport_names; /* Set of port names. */
++ struct sset lport_ids; /* Set of _
++ * IDs for fast lookup.
++ */
++};
++
++void related_lports_init(struct related_lports *);
++void related_lports_destroy(struct related_lports *);
++
+ struct binding_ctx_out {
+ struct hmap *local_datapaths;
+ struct local_binding_data *lbinding_data;
+@@ -65,11 +79,9 @@ struct binding_ctx_out {
+ /* Track if local_lports have been updated. */
+ bool local_lports_changed;
+
+- /* sset of local lport ids in the format
+- * _. */
+- struct sset *local_lport_ids;
+- /* Track if local_lport_ids has been updated. */
+- bool local_lport_ids_changed;
++ /* Port bindings that are relevant to the local chassis. */
++ struct related_lports *related_lports;
++ bool related_lports_changed;
+
+ /* Track if non-vif port bindings (e.g., patch, external) have been
+ * added/deleted.
+@@ -88,6 +100,8 @@ struct binding_ctx_out {
+ struct hmap *tracked_dp_bindings;
+
+ struct if_status_mgr *if_mgr;
++
++ struct hmapx *extport_updated_datapaths;
+ };
+
+ struct local_binding_data {
+@@ -133,13 +147,4 @@ bool binding_handle_port_binding_changes(struct binding_ctx_in *,
+ void binding_tracked_dp_destroy(struct hmap *tracked_datapaths);
+
+ void binding_dump_local_bindings(struct local_binding_data *, struct ds *);
+-
+-/* Generates a sset of lport names from local_binding_data.
+- * Note: the caller is responsible for destroying and freeing the returned
+- * sset, by calling binding_detroy_local_binding_lports(). */
+-struct sset *binding_collect_local_binding_lports(struct local_binding_data *);
+-
+-/* Destroy and free the lports sset returned by
+- * binding_collect_local_binding_lports(). */
+-void binding_destroy_local_binding_lports(struct sset *lports);
+ #endif /* controller/binding.h */
+diff --git a/controller/lflow.c b/controller/lflow.c
+index 680b8cca1..4270d0a33 100644
+--- a/controller/lflow.c
++++ b/controller/lflow.c
+@@ -611,7 +611,7 @@ add_matches_to_flow_table(const struct sbrec_logical_flow *lflow,
+ get_unique_lport_key(dp_id, port_id, buf, sizeof(buf));
+ lflow_resource_add(l_ctx_out->lfrr, REF_TYPE_PORTBINDING, buf,
+ &lflow->header_.uuid);
+- if (!sset_contains(l_ctx_in->local_lport_ids, buf)) {
++ if (!sset_contains(l_ctx_in->related_lport_ids, buf)) {
+ VLOG_DBG("lflow "UUID_FMT
+ " port %s in match is not local, skip",
+ UUID_ARGS(&lflow->header_.uuid),
+diff --git a/controller/lflow.h b/controller/lflow.h
+index 3c929d8a6..076b05beb 100644
+--- a/controller/lflow.h
++++ b/controller/lflow.h
+@@ -143,7 +143,7 @@ struct lflow_ctx_in {
+ const struct shash *addr_sets;
+ const struct shash *port_groups;
+ const struct sset *active_tunnels;
+- const struct sset *local_lport_ids;
++ const struct sset *related_lport_ids;
+ };
+
+ struct lflow_ctx_out {
+diff --git a/controller/ofctrl.c b/controller/ofctrl.c
+index c29c3d180..053631590 100644
+--- a/controller/ofctrl.c
++++ b/controller/ofctrl.c
+@@ -173,7 +173,7 @@ struct sb_flow_ref {
+ struct uuid sb_uuid;
+ };
+
+-/* A installed flow, in static variable installed_flows.
++/* An installed flow, in static variable installed_lflows/installed_pflows.
+ *
+ * Installed flows are updated in ofctrl_put for maintaining the flow
+ * installation to OVS. They are updated according to desired flows: either by
+@@ -234,7 +234,7 @@ static struct desired_flow *desired_flow_lookup_conjunctive(
+ static void desired_flow_destroy(struct desired_flow *);
+
+ static struct installed_flow *installed_flow_lookup(
+- const struct ovn_flow *target);
++ const struct ovn_flow *target, struct hmap *installed_flows);
+ static void installed_flow_destroy(struct installed_flow *);
+ static struct installed_flow *installed_flow_dup(struct desired_flow *);
+ static struct desired_flow *installed_flow_get_active(struct installed_flow *);
+@@ -302,9 +302,12 @@ static ovs_be32 xid, xid2;
+ * zero, to avoid unbounded buffering. */
+ static struct rconn_packet_counter *tx_counter;
+
+-/* Flow table of "struct ovn_flow"s, that holds the flow table currently
+- * installed in the switch. */
+-static struct hmap installed_flows;
++/* Flow table of "struct ovn_flow"s, that holds the logical flow table
++ * currently installed in the switch. */
++static struct hmap installed_lflows;
++/* Flow table of "struct ovn_flow"s, that holds the physical flow table
++ * currently installed in the switch. */
++static struct hmap installed_pflows;
+
+ /* A reference to the group_table. */
+ static struct ovn_extend_table *groups;
+@@ -343,7 +346,8 @@ ofctrl_init(struct ovn_extend_table *group_table,
+ swconn = rconn_create(inactivity_probe_interval, 0,
+ DSCP_DEFAULT, 1 << OFP15_VERSION);
+ tx_counter = rconn_packet_counter_create();
+- hmap_init(&installed_flows);
++ hmap_init(&installed_lflows);
++ hmap_init(&installed_pflows);
+ ovs_list_init(&flow_updates);
+ ovn_init_symtab(&symtab);
+ groups = group_table;
+@@ -1426,11 +1430,12 @@ desired_flow_lookup_conjunctive(struct ovn_desired_flow_table *flow_table,
+ /* Finds and returns an installed_flow in installed_flows whose key is
+ * identical to 'target''s key, or NULL if there is none. */
+ static struct installed_flow *
+-installed_flow_lookup(const struct ovn_flow *target)
++installed_flow_lookup(const struct ovn_flow *target,
++ struct hmap *installed_flows)
+ {
+ struct installed_flow *i;
+ HMAP_FOR_EACH_WITH_HASH (i, match_hmap_node, target->hash,
+- &installed_flows) {
++ installed_flows) {
+ struct ovn_flow *f = &i->flow;
+ if (f->table_id == target->table_id
+ && f->priority == target->priority
+@@ -1542,8 +1547,14 @@ static void
+ ovn_installed_flow_table_clear(void)
+ {
+ struct installed_flow *f, *next;
+- HMAP_FOR_EACH_SAFE (f, next, match_hmap_node, &installed_flows) {
+- hmap_remove(&installed_flows, &f->match_hmap_node);
++ HMAP_FOR_EACH_SAFE (f, next, match_hmap_node, &installed_lflows) {
++ hmap_remove(&installed_lflows, &f->match_hmap_node);
++ unlink_all_refs_for_installed_flow(f);
++ installed_flow_destroy(f);
++ }
++
++ HMAP_FOR_EACH_SAFE (f, next, match_hmap_node, &installed_pflows) {
++ hmap_remove(&installed_pflows, &f->match_hmap_node);
+ unlink_all_refs_for_installed_flow(f);
+ installed_flow_destroy(f);
+ }
+@@ -1553,7 +1564,8 @@ static void
+ ovn_installed_flow_table_destroy(void)
+ {
+ ovn_installed_flow_table_clear();
+- hmap_destroy(&installed_flows);
++ hmap_destroy(&installed_lflows);
++ hmap_destroy(&installed_pflows);
+ }
+
+ /* Flow table update. */
+@@ -1829,6 +1841,7 @@ installed_flow_del(struct ovn_flow *i,
+ static void
+ update_installed_flows_by_compare(struct ovn_desired_flow_table *flow_table,
+ struct ofputil_bundle_ctrl_msg *bc,
++ struct hmap *installed_flows,
+ struct ovs_list *msgs)
+ {
+ ovs_assert(ovs_list_is_empty(&flow_table->tracked_flows));
+@@ -1836,7 +1849,7 @@ update_installed_flows_by_compare(struct ovn_desired_flow_table *flow_table,
+ * longer desired, delete them; if any of them should have different
+ * actions, update them. */
+ struct installed_flow *i, *next;
+- HMAP_FOR_EACH_SAFE (i, next, match_hmap_node, &installed_flows) {
++ HMAP_FOR_EACH_SAFE (i, next, match_hmap_node, installed_flows) {
+ unlink_all_refs_for_installed_flow(i);
+ struct desired_flow *d = desired_flow_lookup(flow_table, &i->flow);
+ if (!d) {
+@@ -1845,7 +1858,7 @@ update_installed_flows_by_compare(struct ovn_desired_flow_table *flow_table,
+ installed_flow_del(&i->flow, bc, msgs);
+ ovn_flow_log(&i->flow, "removing installed");
+
+- hmap_remove(&installed_flows, &i->match_hmap_node);
++ hmap_remove(installed_flows, &i->match_hmap_node);
+ installed_flow_destroy(i);
+ } else {
+ if (!ofpacts_equal(i->flow.ofpacts, i->flow.ofpacts_len,
+@@ -1863,14 +1876,14 @@ update_installed_flows_by_compare(struct ovn_desired_flow_table *flow_table,
+ * in the installed flow table. */
+ struct desired_flow *d;
+ HMAP_FOR_EACH (d, match_hmap_node, &flow_table->match_flow_table) {
+- i = installed_flow_lookup(&d->flow);
++ i = installed_flow_lookup(&d->flow, installed_flows);
+ if (!i) {
+ ovn_flow_log(&d->flow, "adding installed");
+ installed_flow_add(&d->flow, bc, msgs);
+
+ /* Copy 'd' from 'flow_table' to installed_flows. */
+ i = installed_flow_dup(d);
+- hmap_insert(&installed_flows, &i->match_hmap_node, i->flow.hash);
++ hmap_insert(installed_flows, &i->match_hmap_node, i->flow.hash);
+ link_installed_to_desired(i, d);
+ } else if (!d->installed_flow) {
+ /* This is a desired_flow that conflicts with one installed
+@@ -1961,6 +1974,7 @@ merge_tracked_flows(struct ovn_desired_flow_table *flow_table)
+ static void
+ update_installed_flows_by_track(struct ovn_desired_flow_table *flow_table,
+ struct ofputil_bundle_ctrl_msg *bc,
++ struct hmap *installed_flows,
+ struct ovs_list *msgs)
+ {
+ merge_tracked_flows(flow_table);
+@@ -1979,7 +1993,7 @@ update_installed_flows_by_track(struct ovn_desired_flow_table *flow_table,
+ installed_flow_del(&i->flow, bc, msgs);
+ ovn_flow_log(&i->flow, "removing installed (tracked)");
+
+- hmap_remove(&installed_flows, &i->match_hmap_node);
++ hmap_remove(installed_flows, &i->match_hmap_node);
+ installed_flow_destroy(i);
+ } else if (was_active) {
+ /* There are other desired flow(s) referencing this
+@@ -1993,7 +2007,8 @@ update_installed_flows_by_track(struct ovn_desired_flow_table *flow_table,
+ desired_flow_destroy(f);
+ } else {
+ /* The desired flow was added or modified. */
+- struct installed_flow *i = installed_flow_lookup(&f->flow);
++ struct installed_flow *i = installed_flow_lookup(&f->flow,
++ installed_flows);
+ if (!i) {
+ /* Adding a new flow. */
+ installed_flow_add(&f->flow, bc, msgs);
+@@ -2001,7 +2016,7 @@ update_installed_flows_by_track(struct ovn_desired_flow_table *flow_table,
+
+ /* Copy 'f' from 'flow_table' to installed_flows. */
+ struct installed_flow *new_node = installed_flow_dup(f);
+- hmap_insert(&installed_flows, &new_node->match_hmap_node,
++ hmap_insert(installed_flows, &new_node->match_hmap_node,
+ new_node->flow.hash);
+ link_installed_to_desired(new_node, f);
+ } else if (installed_flow_get_active(i) == f) {
+@@ -2055,16 +2070,19 @@ ofctrl_can_put(void)
+ *
+ * This should be called after ofctrl_run() within the main loop. */
+ void
+-ofctrl_put(struct ovn_desired_flow_table *flow_table,
++ofctrl_put(struct ovn_desired_flow_table *lflow_table,
++ struct ovn_desired_flow_table *pflow_table,
+ struct shash *pending_ct_zones,
+ const struct sbrec_meter_table *meter_table,
+ uint64_t req_cfg,
+- bool flow_changed)
++ bool lflows_changed,
++ bool pflows_changed)
+ {
+ static bool skipped_last_time = false;
+ static uint64_t old_req_cfg = 0;
+ bool need_put = false;
+- if (flow_changed || skipped_last_time || need_reinstall_flows) {
++ if (lflows_changed || pflows_changed || skipped_last_time ||
++ need_reinstall_flows) {
+ need_put = true;
+ old_req_cfg = req_cfg;
+ } else if (req_cfg != old_req_cfg) {
+@@ -2093,7 +2111,6 @@ ofctrl_put(struct ovn_desired_flow_table *flow_table,
+ return;
+ }
+
+- skipped_last_time = false;
+ need_reinstall_flows = false;
+
+ /* OpenFlow messages to send to the switch to bring it up-to-date. */
+@@ -2159,12 +2176,35 @@ ofctrl_put(struct ovn_desired_flow_table *flow_table,
+ bundle_open = ofputil_encode_bundle_ctrl_request(OFP15_VERSION, &bc);
+ ovs_list_push_back(&msgs, &bundle_open->list_node);
+
+- if (flow_table->change_tracked) {
+- update_installed_flows_by_track(flow_table, &bc, &msgs);
+- } else {
+- update_installed_flows_by_compare(flow_table, &bc, &msgs);
++ /* If skipped last time, then process the flow table
++ * (tracked) flows even if lflows_changed is not set.
++ * Same for pflows_changed. */
++ if (lflows_changed || skipped_last_time) {
++ if (lflow_table->change_tracked) {
++ update_installed_flows_by_track(lflow_table, &bc,
++ &installed_lflows,
++ &msgs);
++ } else {
++ update_installed_flows_by_compare(lflow_table, &bc,
++ &installed_lflows,
++ &msgs);
++ }
++ }
++
++ if (pflows_changed || skipped_last_time) {
++ if (pflow_table->change_tracked) {
++ update_installed_flows_by_track(pflow_table, &bc,
++ &installed_pflows,
++ &msgs);
++ } else {
++ update_installed_flows_by_compare(pflow_table, &bc,
++ &installed_pflows,
++ &msgs);
++ }
+ }
+
++ skipped_last_time = false;
++
+ if (ovs_list_back(&msgs) == &bundle_open->list_node) {
+ /* No flow updates. Removing the bundle open request. */
+ ovs_list_pop_back(&msgs);
+@@ -2287,8 +2327,11 @@ ofctrl_put(struct ovn_desired_flow_table *flow_table,
+ cur_cfg = req_cfg;
+ }
+
+- flow_table->change_tracked = true;
+- ovs_assert(ovs_list_is_empty(&flow_table->tracked_flows));
++ lflow_table->change_tracked = true;
++ ovs_assert(ovs_list_is_empty(&lflow_table->tracked_flows));
++
++ pflow_table->change_tracked = true;
++ ovs_assert(ovs_list_is_empty(&pflow_table->tracked_flows));
+ }
+
+ /* Looks up the logical port with the name 'port_name' in 'br_int_'. If
+diff --git a/controller/ofctrl.h b/controller/ofctrl.h
+index 88769566a..ead8088c5 100644
+--- a/controller/ofctrl.h
++++ b/controller/ofctrl.h
+@@ -52,11 +52,13 @@ void ofctrl_init(struct ovn_extend_table *group_table,
+ void ofctrl_run(const struct ovsrec_bridge *br_int,
+ struct shash *pending_ct_zones);
+ enum mf_field_id ofctrl_get_mf_field_id(void);
+-void ofctrl_put(struct ovn_desired_flow_table *,
++void ofctrl_put(struct ovn_desired_flow_table *lflow_table,
++ struct ovn_desired_flow_table *pflow_table,
+ struct shash *pending_ct_zones,
+ const struct sbrec_meter_table *,
+ uint64_t nb_cfg,
+- bool flow_changed);
++ bool lflow_changed,
++ bool pflow_changed);
+ bool ofctrl_can_put(void);
+ void ofctrl_wait(void);
+ void ofctrl_destroy(void);
+diff --git a/controller/ovn-controller.c b/controller/ovn-controller.c
+index 07c6fcfd1..ea03638a9 100644
+--- a/controller/ovn-controller.c
++++ b/controller/ovn-controller.c
+@@ -46,6 +46,7 @@
+ #include "openvswitch/vconn.h"
+ #include "openvswitch/vlog.h"
+ #include "ovn/actions.h"
++#include "ovn/features.h"
+ #include "lib/chassis-index.h"
+ #include "lib/extend-table.h"
+ #include "lib/ip-mcast-index.h"
+@@ -88,6 +89,7 @@ static unixctl_cb_func lflow_cache_show_stats_cmd;
+ static unixctl_cb_func debug_delay_nb_cfg_report;
+
+ #define DEFAULT_BRIDGE_NAME "br-int"
++#define DEFAULT_DATAPATH "system"
+ #define DEFAULT_PROBE_INTERVAL_MSEC 5000
+ #define OFCTRL_DEFAULT_PROBE_INTERVAL_SEC 0
+
+@@ -319,10 +321,6 @@ static const struct ovsrec_bridge *
+ create_br_int(struct ovsdb_idl_txn *ovs_idl_txn,
+ const struct ovsrec_open_vswitch_table *ovs_table)
+ {
+- if (!ovs_idl_txn) {
+- return NULL;
+- }
+-
+ const struct ovsrec_open_vswitch *cfg;
+ cfg = ovsrec_open_vswitch_table_first(ovs_table);
+ if (!cfg) {
+@@ -386,6 +384,21 @@ create_br_int(struct ovsdb_idl_txn *ovs_idl_txn,
+ return bridge;
+ }
+
++static const struct ovsrec_datapath *
++create_br_datapath(struct ovsdb_idl_txn *ovs_idl_txn,
++ const struct ovsrec_open_vswitch *cfg,
++ const char *datapath_type)
++{
++ ovsdb_idl_txn_add_comment(ovs_idl_txn,
++ "ovn-controller: creating bridge datapath '%s'",
++ datapath_type);
++
++ struct ovsrec_datapath *dp = ovsrec_datapath_insert(ovs_idl_txn);
++ ovsrec_open_vswitch_verify_datapaths(cfg);
++ ovsrec_open_vswitch_update_datapaths_setkey(cfg, datapath_type, dp);
++ return dp;
++}
++
+ static const struct ovsrec_bridge *
+ get_br_int(const struct ovsrec_bridge_table *bridge_table,
+ const struct ovsrec_open_vswitch_table *ovs_table)
+@@ -399,33 +412,69 @@ get_br_int(const struct ovsrec_bridge_table *bridge_table,
+ return get_bridge(bridge_table, br_int_name(cfg));
+ }
+
+-static const struct ovsrec_bridge *
++static const struct ovsrec_datapath *
++get_br_datapath(const struct ovsrec_open_vswitch *cfg,
++ const char *datapath_type)
++{
++ for (size_t i = 0; i < cfg->n_datapaths; i++) {
++ if (!strcmp(cfg->key_datapaths[i], datapath_type)) {
++ return cfg->value_datapaths[i];
++ }
++ }
++ return NULL;
++}
++
++static void
+ process_br_int(struct ovsdb_idl_txn *ovs_idl_txn,
+ const struct ovsrec_bridge_table *bridge_table,
+- const struct ovsrec_open_vswitch_table *ovs_table)
++ const struct ovsrec_open_vswitch_table *ovs_table,
++ const struct ovsrec_bridge **br_int_,
++ const struct ovsrec_datapath **br_int_dp_)
+ {
+- const struct ovsrec_bridge *br_int = get_br_int(bridge_table,
+- ovs_table);
+- if (!br_int) {
+- br_int = create_br_int(ovs_idl_txn, ovs_table);
+- }
+- if (br_int && ovs_idl_txn) {
+- const struct ovsrec_open_vswitch *cfg;
+- cfg = ovsrec_open_vswitch_table_first(ovs_table);
+- ovs_assert(cfg);
+- const char *datapath_type = smap_get(&cfg->external_ids,
+- "ovn-bridge-datapath-type");
+- /* Check for the datapath_type and set it only if it is defined in
+- * cfg. */
+- if (datapath_type && strcmp(br_int->datapath_type, datapath_type)) {
+- ovsrec_bridge_set_datapath_type(br_int, datapath_type);
++ const struct ovsrec_bridge *br_int = get_br_int(bridge_table, ovs_table);
++ const struct ovsrec_datapath *br_int_dp = NULL;
++
++ ovs_assert(br_int_ && br_int_dp_);
++ if (ovs_idl_txn) {
++ if (!br_int) {
++ br_int = create_br_int(ovs_idl_txn, ovs_table);
+ }
+- if (!br_int->fail_mode || strcmp(br_int->fail_mode, "secure")) {
+- ovsrec_bridge_set_fail_mode(br_int, "secure");
+- VLOG_WARN("Integration bridge fail-mode changed to 'secure'.");
++
++ if (br_int) {
++ const struct ovsrec_open_vswitch *cfg =
++ ovsrec_open_vswitch_table_first(ovs_table);
++ ovs_assert(cfg);
++
++ /* Propagate "ovn-bridge-datapath-type" from OVS table, if any.
++ * Otherwise use the datapath-type set in br-int, if any.
++ * Finally, assume "system" datapath if none configured.
++ */
++ const char *datapath_type =
++ smap_get(&cfg->external_ids, "ovn-bridge-datapath-type");
++
++ if (!datapath_type) {
++ if (br_int->datapath_type[0]) {
++ datapath_type = br_int->datapath_type;
++ } else {
++ datapath_type = DEFAULT_DATAPATH;
++ }
++ }
++ if (strcmp(br_int->datapath_type, datapath_type)) {
++ ovsrec_bridge_set_datapath_type(br_int, datapath_type);
++ }
++ if (!br_int->fail_mode || strcmp(br_int->fail_mode, "secure")) {
++ ovsrec_bridge_set_fail_mode(br_int, "secure");
++ VLOG_WARN("Integration bridge fail-mode changed to 'secure'.");
++ }
++ br_int_dp = get_br_datapath(cfg, datapath_type);
++ if (!br_int_dp) {
++ br_int_dp = create_br_datapath(ovs_idl_txn, cfg,
++ datapath_type);
++ }
+ }
+ }
+- return br_int;
++ *br_int_ = br_int;
++ *br_int_dp_ = br_int_dp;
+ }
+
+ static const char *
+@@ -563,7 +612,7 @@ add_pending_ct_zone_entry(struct shash *pending_ct_zones,
+ static void
+ update_ct_zones(const struct sset *lports, const struct hmap *local_datapaths,
+ struct simap *ct_zones, unsigned long *ct_zone_bitmap,
+- struct shash *pending_ct_zones, struct hmapx *updated_dps)
++ struct shash *pending_ct_zones)
+ {
+ struct simap_node *ct_zone, *ct_zone_next;
+ int scan_start = 1;
+@@ -653,11 +702,6 @@ update_ct_zones(const struct sset *lports, const struct hmap *local_datapaths,
+
+ bitmap_set1(ct_zone_bitmap, snat_req_node->data);
+ simap_put(ct_zones, snat_req_node->name, snat_req_node->data);
+- struct shash_node *ld_node = shash_find(&all_lds, snat_req_node->name);
+- if (ld_node) {
+- struct local_datapath *dp = ld_node->data;
+- hmapx_add(updated_dps, (void *) dp->datapath);
+- }
+ }
+
+ /* xxx This is wasteful to assign a zone to each port--even if no
+@@ -686,12 +730,6 @@ update_ct_zones(const struct sset *lports, const struct hmap *local_datapaths,
+
+ bitmap_set1(ct_zone_bitmap, zone);
+ simap_put(ct_zones, user, zone);
+-
+- struct shash_node *ld_node = shash_find(&all_lds, user);
+- if (ld_node) {
+- struct local_datapath *dp = ld_node->data;
+- hmapx_add(updated_dps, (void *) dp->datapath);
+- }
+ }
+
+ simap_destroy(&req_snat_zones);
+@@ -848,6 +886,7 @@ ctrl_register_ovs_idl(struct ovsdb_idl *ovs_idl)
+ ovsdb_idl_add_table(ovs_idl, &ovsrec_table_open_vswitch);
+ ovsdb_idl_add_column(ovs_idl, &ovsrec_open_vswitch_col_external_ids);
+ ovsdb_idl_add_column(ovs_idl, &ovsrec_open_vswitch_col_bridges);
++ ovsdb_idl_add_column(ovs_idl, &ovsrec_open_vswitch_col_datapaths);
+ ovsdb_idl_add_table(ovs_idl, &ovsrec_table_interface);
+ ovsdb_idl_track_add_column(ovs_idl, &ovsrec_interface_col_name);
+ ovsdb_idl_track_add_column(ovs_idl, &ovsrec_interface_col_bfd);
+@@ -870,6 +909,8 @@ ctrl_register_ovs_idl(struct ovsdb_idl *ovs_idl)
+ ovsdb_idl_add_column(ovs_idl, &ovsrec_ssl_col_ca_cert);
+ ovsdb_idl_add_column(ovs_idl, &ovsrec_ssl_col_certificate);
+ ovsdb_idl_add_column(ovs_idl, &ovsrec_ssl_col_private_key);
++ ovsdb_idl_add_table(ovs_idl, &ovsrec_table_datapath);
++ ovsdb_idl_add_column(ovs_idl, &ovsrec_datapath_col_capabilities);
+ chassis_register_ovs_idl(ovs_idl);
+ encaps_register_ovs_idl(ovs_idl);
+ binding_register_ovs_idl(ovs_idl);
+@@ -970,9 +1011,10 @@ struct ed_type_runtime_data {
+ * local hypervisor, and localnet ports. */
+ struct sset local_lports;
+
+- /* Contains the same ports as local_lports, but in the format:
+- * _ */
+- struct sset local_lport_ids;
++ /* Port bindings that are relevant to the local chassis (VIFs bound
++ * localy, patch ports).
++ */
++ struct related_lports related_lports;
+ struct sset active_tunnels;
+
+ /* runtime data engine private data. */
+@@ -986,6 +1028,9 @@ struct ed_type_runtime_data {
+
+ /* CT zone data. Contains datapaths that had updated CT zones */
+ struct hmapx ct_updated_datapaths;
++
++ /* Contains datapaths that had updated external ports. */
++ struct hmapx extport_updated_datapaths;
+ };
+
+ /* struct ed_type_runtime_data has the below members for tracking the
+@@ -1068,7 +1113,7 @@ en_runtime_data_init(struct engine_node *node OVS_UNUSED,
+
+ hmap_init(&data->local_datapaths);
+ sset_init(&data->local_lports);
+- sset_init(&data->local_lport_ids);
++ related_lports_init(&data->related_lports);
+ sset_init(&data->active_tunnels);
+ sset_init(&data->egress_ifaces);
+ smap_init(&data->local_iface_ids);
+@@ -1078,6 +1123,7 @@ en_runtime_data_init(struct engine_node *node OVS_UNUSED,
+ hmap_init(&data->tracked_dp_bindings);
+
+ hmapx_init(&data->ct_updated_datapaths);
++ hmapx_init(&data->extport_updated_datapaths);
+
+ return data;
+ }
+@@ -1088,7 +1134,7 @@ en_runtime_data_cleanup(void *data)
+ struct ed_type_runtime_data *rt_data = data;
+
+ sset_destroy(&rt_data->local_lports);
+- sset_destroy(&rt_data->local_lport_ids);
++ related_lports_destroy(&rt_data->related_lports);
+ sset_destroy(&rt_data->active_tunnels);
+ sset_destroy(&rt_data->egress_ifaces);
+ smap_destroy(&rt_data->local_iface_ids);
+@@ -1096,12 +1142,14 @@ en_runtime_data_cleanup(void *data)
+ HMAP_FOR_EACH_SAFE (cur_node, next_node, hmap_node,
+ &rt_data->local_datapaths) {
+ free(cur_node->peer_ports);
++ shash_destroy(&cur_node->external_ports);
+ hmap_remove(&rt_data->local_datapaths, &cur_node->hmap_node);
+ free(cur_node);
+ }
+ hmap_destroy(&rt_data->local_datapaths);
+ local_binding_data_destroy(&rt_data->lbinding_data);
+ hmapx_destroy(&rt_data->ct_updated_datapaths);
++ hmapx_destroy(&rt_data->extport_updated_datapaths);
+ }
+
+ static void
+@@ -1181,14 +1229,15 @@ init_binding_ctx(struct engine_node *node,
+ b_ctx_out->local_datapaths = &rt_data->local_datapaths;
+ b_ctx_out->local_lports = &rt_data->local_lports;
+ b_ctx_out->local_lports_changed = false;
+- b_ctx_out->local_lport_ids = &rt_data->local_lport_ids;
+- b_ctx_out->local_lport_ids_changed = false;
++ b_ctx_out->related_lports = &rt_data->related_lports;
++ b_ctx_out->related_lports_changed = false;
+ b_ctx_out->non_vif_ports_changed = false;
+ b_ctx_out->egress_ifaces = &rt_data->egress_ifaces;
+ b_ctx_out->lbinding_data = &rt_data->lbinding_data;
+ b_ctx_out->local_iface_ids = &rt_data->local_iface_ids;
+ b_ctx_out->tracked_dp_bindings = NULL;
+ b_ctx_out->if_mgr = ctrl_ctx->if_mgr;
++ b_ctx_out->extport_updated_datapaths = &rt_data->extport_updated_datapaths;
+ }
+
+ static void
+@@ -1197,7 +1246,6 @@ en_runtime_data_run(struct engine_node *node, void *data)
+ struct ed_type_runtime_data *rt_data = data;
+ struct hmap *local_datapaths = &rt_data->local_datapaths;
+ struct sset *local_lports = &rt_data->local_lports;
+- struct sset *local_lport_ids = &rt_data->local_lport_ids;
+ struct sset *active_tunnels = &rt_data->active_tunnels;
+
+ static bool first_run = true;
+@@ -1208,23 +1256,25 @@ en_runtime_data_run(struct engine_node *node, void *data)
+ struct local_datapath *cur_node, *next_node;
+ HMAP_FOR_EACH_SAFE (cur_node, next_node, hmap_node, local_datapaths) {
+ free(cur_node->peer_ports);
++ shash_destroy(&cur_node->external_ports);
+ hmap_remove(local_datapaths, &cur_node->hmap_node);
+ free(cur_node);
+ }
+ hmap_clear(local_datapaths);
+ local_binding_data_destroy(&rt_data->lbinding_data);
+ sset_destroy(local_lports);
+- sset_destroy(local_lport_ids);
++ related_lports_destroy(&rt_data->related_lports);
+ sset_destroy(active_tunnels);
+ sset_destroy(&rt_data->egress_ifaces);
+ smap_destroy(&rt_data->local_iface_ids);
+ sset_init(local_lports);
+- sset_init(local_lport_ids);
++ related_lports_init(&rt_data->related_lports);
+ sset_init(active_tunnels);
+ sset_init(&rt_data->egress_ifaces);
+ smap_init(&rt_data->local_iface_ids);
+ local_binding_data_init(&rt_data->lbinding_data);
+ hmapx_clear(&rt_data->ct_updated_datapaths);
++ hmapx_clear(&rt_data->extport_updated_datapaths);
+ }
+
+ struct binding_ctx_in b_ctx_in;
+@@ -1289,7 +1339,7 @@ runtime_data_sb_port_binding_handler(struct engine_node *node, void *data)
+ return false;
+ }
+
+- if (b_ctx_out.local_lport_ids_changed ||
++ if (b_ctx_out.related_lports_changed ||
+ b_ctx_out.non_vif_ports_changed ||
+ !hmap_is_empty(b_ctx_out.tracked_dp_bindings)) {
+ engine_set_node_state(node, EN_UPDATED);
+@@ -1599,11 +1649,8 @@ en_port_groups_run(struct engine_node *node, void *data)
+ struct ed_type_runtime_data *rt_data =
+ engine_get_input_data("runtime_data", node);
+
+- struct sset *local_b_lports = binding_collect_local_binding_lports(
+- &rt_data->lbinding_data);
+- port_groups_init(pg_table, local_b_lports, &pg->port_group_ssets,
+- &pg->port_groups_cs_local);
+- binding_destroy_local_binding_lports(local_b_lports);
++ port_groups_init(pg_table, &rt_data->related_lports.lport_names,
++ &pg->port_group_ssets, &pg->port_groups_cs_local);
+
+ engine_set_node_state(node, EN_UPDATED);
+ }
+@@ -1620,12 +1667,9 @@ port_groups_sb_port_group_handler(struct engine_node *node, void *data)
+ struct ed_type_runtime_data *rt_data =
+ engine_get_input_data("runtime_data", node);
+
+- struct sset *local_b_lports = binding_collect_local_binding_lports(
+- &rt_data->lbinding_data);
+- port_groups_update(pg_table, local_b_lports, &pg->port_group_ssets,
+- &pg->port_groups_cs_local, &pg->new, &pg->deleted,
+- &pg->updated);
+- binding_destroy_local_binding_lports(local_b_lports);
++ port_groups_update(pg_table, &rt_data->related_lports.lport_names,
++ &pg->port_group_ssets, &pg->port_groups_cs_local,
++ &pg->new, &pg->deleted, &pg->updated);
+
+ if (!sset_is_empty(&pg->new) || !sset_is_empty(&pg->deleted) ||
+ !sset_is_empty(&pg->updated)) {
+@@ -1658,9 +1702,6 @@ port_groups_runtime_data_handler(struct engine_node *node, void *data)
+ goto out;
+ }
+
+- struct sset *local_b_lports = binding_collect_local_binding_lports(
+- &rt_data->lbinding_data);
+-
+ const struct sbrec_port_group *pg_sb;
+ SBREC_PORT_GROUP_TABLE_FOR_EACH (pg_sb, pg_table) {
+ struct sset *pg_lports = shash_find_data(&pg->port_group_ssets,
+@@ -1687,13 +1728,12 @@ port_groups_runtime_data_handler(struct engine_node *node, void *data)
+ if (need_update) {
+ expr_const_sets_add_strings(&pg->port_groups_cs_local, pg_sb->name,
+ (const char *const *) pg_sb->ports,
+- pg_sb->n_ports, local_b_lports);
++ pg_sb->n_ports,
++ &rt_data->related_lports.lport_names);
+ sset_add(&pg->updated, pg_sb->name);
+ }
+ }
+
+- binding_destroy_local_binding_lports(local_b_lports);
+-
+ out:
+ if (!sset_is_empty(&pg->new) || !sset_is_empty(&pg->deleted) ||
+ !sset_is_empty(&pg->updated)) {
+@@ -1748,10 +1788,9 @@ en_ct_zones_run(struct engine_node *node, void *data)
+ struct ed_type_runtime_data *rt_data =
+ engine_get_input_data("runtime_data", node);
+
+- hmapx_clear(&rt_data->ct_updated_datapaths);
+ update_ct_zones(&rt_data->local_lports, &rt_data->local_datapaths,
+ &ct_zones_data->current, ct_zones_data->bitmap,
+- &ct_zones_data->pending, &rt_data->ct_updated_datapaths);
++ &ct_zones_data->pending);
+
+
+ engine_set_node_state(node, EN_UPDATED);
+@@ -1794,107 +1833,13 @@ en_mff_ovn_geneve_run(struct engine_node *node, void *data)
+ engine_set_node_state(node, EN_UNCHANGED);
+ }
+
+-/* Engine node en_physical_flow_changes indicates whether
+- * there is a need to
+- * - recompute only physical flows or
+- * - we can incrementally process the physical flows.
+- *
+- * en_physical_flow_changes is an input to flow_output engine node.
+- * If the engine node 'en_physical_flow_changes' gets updated during
+- * engine run, it means the handler for this -
+- * flow_output_physical_flow_changes_handler() will either
+- * - recompute the physical flows by calling 'physical_run() or
+- * - incrementlly process some of the changes for physical flow
+- * calculation. Right now we handle OVS interfaces changes
+- * for physical flow computation.
+- *
+- * When ever a port binding happens, the follow up
+- * activity is the zone id allocation for that port binding.
+- * With this intermediate engine node, we avoid full recomputation.
+- * Instead we do physical flow computation (either full recomputation
+- * by calling physical_run() or handling the changes incrementally.
+- *
+- * Hence this is an intermediate engine node to indicate the
+- * flow_output engine to recomputes/compute the physical flows.
+- *
+- * TODO 1. Ideally this engine node should recompute/compute the physical
+- * flows instead of relegating it to the flow_output node.
+- * But this requires splitting the flow_output node to
+- * logical_flow_output and physical_flow_output.
+- *
+- * TODO 2. We can further optimise the en_ct_zone changes to
+- * compute the phsyical flows for changed zone ids.
+- *
+- * TODO 3: physical.c has a global simap -localvif_to_ofport which stores the
+- * local OVS interfaces and the ofport numbers. Ideally this should be
+- * part of the engine data.
+- */
+-struct ed_type_pfc_data {
+- /* Both these variables are tracked and set in each engine run. */
+- bool recompute_physical_flows;
+- bool ovs_ifaces_changed;
+-};
+-
+-static void
+-en_physical_flow_changes_clear_tracked_data(void *data_)
+-{
+- struct ed_type_pfc_data *data = data_;
+- data->recompute_physical_flows = false;
+- data->ovs_ifaces_changed = false;
+-}
+-
+-static void *
+-en_physical_flow_changes_init(struct engine_node *node OVS_UNUSED,
+- struct engine_arg *arg OVS_UNUSED)
+-{
+- struct ed_type_pfc_data *data = xzalloc(sizeof *data);
+- return data;
+-}
+-
+-static void
+-en_physical_flow_changes_cleanup(void *data OVS_UNUSED)
+-{
+-}
+-
+-/* Indicate to the flow_output engine that we need to recompute physical
+- * flows. */
+-static void
+-en_physical_flow_changes_run(struct engine_node *node, void *data)
+-{
+- struct ed_type_pfc_data *pfc_tdata = data;
+- pfc_tdata->recompute_physical_flows = true;
+- pfc_tdata->ovs_ifaces_changed = true;
+- engine_set_node_state(node, EN_UPDATED);
+-}
+-
+-/* ct_zone changes are not handled incrementally but a handler is required
+- * to avoid skipping the ovs_iface incremental change handler.
+- */
+-static bool
+-physical_flow_changes_ct_zones_handler(struct engine_node *node OVS_UNUSED,
+- void *data OVS_UNUSED)
+-{
+- return false;
+-}
+-
+-/* There are OVS interface changes. Indicate to the flow_output engine
+- * to handle these OVS interface changes for physical flow computations. */
+-static bool
+-physical_flow_changes_ovs_iface_handler(struct engine_node *node, void *data)
+-{
+- struct ed_type_pfc_data *pfc_tdata = data;
+- pfc_tdata->ovs_ifaces_changed = true;
+- engine_set_node_state(node, EN_UPDATED);
+- return true;
+-}
+-
+-struct flow_output_persistent_data {
++struct lflow_output_persistent_data {
+ uint32_t conj_id_ofs;
+ struct lflow_cache *lflow_cache;
+ };
+
+-struct ed_type_flow_output {
+- /* desired flows */
++struct ed_type_lflow_output {
++ /* Logical flow table */
+ struct ovn_desired_flow_table flow_table;
+ /* group ids for load balancing */
+ struct ovn_extend_table group_table;
+@@ -1905,81 +1850,15 @@ struct ed_type_flow_output {
+
+ /* Data which is persistent and not cleared during
+ * full recompute. */
+- struct flow_output_persistent_data pd;
++ struct lflow_output_persistent_data pd;
+ };
+
+-static void init_physical_ctx(struct engine_node *node,
+- struct ed_type_runtime_data *rt_data,
+- struct physical_ctx *p_ctx)
+-{
+- struct ovsdb_idl_index *sbrec_port_binding_by_name =
+- engine_ovsdb_node_get_index(
+- engine_get_input("SB_port_binding", node),
+- "name");
+-
+- struct sbrec_multicast_group_table *multicast_group_table =
+- (struct sbrec_multicast_group_table *)EN_OVSDB_GET(
+- engine_get_input("SB_multicast_group", node));
+-
+- struct sbrec_port_binding_table *port_binding_table =
+- (struct sbrec_port_binding_table *)EN_OVSDB_GET(
+- engine_get_input("SB_port_binding", node));
+-
+- struct sbrec_chassis_table *chassis_table =
+- (struct sbrec_chassis_table *)EN_OVSDB_GET(
+- engine_get_input("SB_chassis", node));
+-
+- struct ed_type_mff_ovn_geneve *ed_mff_ovn_geneve =
+- engine_get_input_data("mff_ovn_geneve", node);
+-
+- struct ovsrec_open_vswitch_table *ovs_table =
+- (struct ovsrec_open_vswitch_table *)EN_OVSDB_GET(
+- engine_get_input("OVS_open_vswitch", node));
+- struct ovsrec_bridge_table *bridge_table =
+- (struct ovsrec_bridge_table *)EN_OVSDB_GET(
+- engine_get_input("OVS_bridge", node));
+- const struct ovsrec_bridge *br_int = get_br_int(bridge_table, ovs_table);
+- const char *chassis_id = get_ovs_chassis_id(ovs_table);
+- const struct sbrec_chassis *chassis = NULL;
+- struct ovsdb_idl_index *sbrec_chassis_by_name =
+- engine_ovsdb_node_get_index(
+- engine_get_input("SB_chassis", node),
+- "name");
+- if (chassis_id) {
+- chassis = chassis_lookup_by_name(sbrec_chassis_by_name, chassis_id);
+- }
+-
+- ovs_assert(br_int && chassis);
+-
+- struct ovsrec_interface_table *iface_table =
+- (struct ovsrec_interface_table *)EN_OVSDB_GET(
+- engine_get_input("OVS_interface", node));
+-
+- struct ed_type_ct_zones *ct_zones_data =
+- engine_get_input_data("ct_zones", node);
+- struct simap *ct_zones = &ct_zones_data->current;
+-
+- p_ctx->sbrec_port_binding_by_name = sbrec_port_binding_by_name;
+- p_ctx->port_binding_table = port_binding_table;
+- p_ctx->mc_group_table = multicast_group_table;
+- p_ctx->br_int = br_int;
+- p_ctx->chassis_table = chassis_table;
+- p_ctx->iface_table = iface_table;
+- p_ctx->chassis = chassis;
+- p_ctx->active_tunnels = &rt_data->active_tunnels;
+- p_ctx->local_datapaths = &rt_data->local_datapaths;
+- p_ctx->local_lports = &rt_data->local_lports;
+- p_ctx->ct_zones = ct_zones;
+- p_ctx->mff_ovn_geneve = ed_mff_ovn_geneve->mff_ovn_geneve;
+- p_ctx->local_bindings = &rt_data->lbinding_data.bindings;
+- p_ctx->ct_updated_datapaths = &rt_data->ct_updated_datapaths;
+-}
+-
+-static void init_lflow_ctx(struct engine_node *node,
+- struct ed_type_runtime_data *rt_data,
+- struct ed_type_flow_output *fo,
+- struct lflow_ctx_in *l_ctx_in,
+- struct lflow_ctx_out *l_ctx_out)
++static void
++init_lflow_ctx(struct engine_node *node,
++ struct ed_type_runtime_data *rt_data,
++ struct ed_type_lflow_output *fo,
++ struct lflow_ctx_in *l_ctx_in,
++ struct lflow_ctx_out *l_ctx_out)
+ {
+ struct ovsdb_idl_index *sbrec_port_binding_by_name =
+ engine_ovsdb_node_get_index(
+@@ -2077,7 +1956,7 @@ static void init_lflow_ctx(struct engine_node *node,
+ l_ctx_in->addr_sets = addr_sets;
+ l_ctx_in->port_groups = port_groups;
+ l_ctx_in->active_tunnels = &rt_data->active_tunnels;
+- l_ctx_in->local_lport_ids = &rt_data->local_lport_ids;
++ l_ctx_in->related_lport_ids = &rt_data->related_lports.lport_ids;
+
+ l_ctx_out->flow_table = &fo->flow_table;
+ l_ctx_out->group_table = &fo->group_table;
+@@ -2089,11 +1968,10 @@ static void init_lflow_ctx(struct engine_node *node,
+ }
+
+ static void *
+-en_flow_output_init(struct engine_node *node OVS_UNUSED,
+- struct engine_arg *arg OVS_UNUSED)
++en_lflow_output_init(struct engine_node *node OVS_UNUSED,
++ struct engine_arg *arg OVS_UNUSED)
+ {
+- struct ed_type_flow_output *data = xzalloc(sizeof *data);
+-
++ struct ed_type_lflow_output *data = xzalloc(sizeof *data);
+ ovn_desired_flow_table_init(&data->flow_table);
+ ovn_extend_table_init(&data->group_table);
+ ovn_extend_table_init(&data->meter_table);
+@@ -2103,9 +1981,9 @@ en_flow_output_init(struct engine_node *node OVS_UNUSED,
+ }
+
+ static void
+-en_flow_output_cleanup(void *data)
++en_lflow_output_cleanup(void *data)
+ {
+- struct ed_type_flow_output *flow_output_data = data;
++ struct ed_type_lflow_output *flow_output_data = data;
+ ovn_desired_flow_table_destroy(&flow_output_data->flow_table);
+ ovn_extend_table_destroy(&flow_output_data->group_table);
+ ovn_extend_table_destroy(&flow_output_data->meter_table);
+@@ -2114,7 +1992,7 @@ en_flow_output_cleanup(void *data)
+ }
+
+ static void
+-en_flow_output_run(struct engine_node *node, void *data)
++en_lflow_output_run(struct engine_node *node, void *data)
+ {
+ struct ed_type_runtime_data *rt_data =
+ engine_get_input_data("runtime_data", node);
+@@ -2140,8 +2018,8 @@ en_flow_output_run(struct engine_node *node, void *data)
+
+ ovs_assert(br_int && chassis);
+
+- struct ed_type_flow_output *fo = data;
+- struct ovn_desired_flow_table *flow_table = &fo->flow_table;
++ struct ed_type_lflow_output *fo = data;
++ struct ovn_desired_flow_table *lflow_table = &fo->flow_table;
+ struct ovn_extend_table *group_table = &fo->group_table;
+ struct ovn_extend_table *meter_table = &fo->meter_table;
+ struct lflow_resource_ref *lfrr = &fo->lflow_resource_ref;
+@@ -2150,7 +2028,7 @@ en_flow_output_run(struct engine_node *node, void *data)
+ if (first_run) {
+ first_run = false;
+ } else {
+- ovn_desired_flow_table_clear(flow_table);
++ ovn_desired_flow_table_clear(lflow_table);
+ ovn_extend_table_clear(group_table, false /* desired */);
+ ovn_extend_table_clear(meter_table, false /* desired */);
+ lflow_resource_clear(lfrr);
+@@ -2172,7 +2050,7 @@ en_flow_output_run(struct engine_node *node, void *data)
+ if (l_ctx_out.conj_id_overflow) {
+ /* Conjunction ids overflow. There can be many holes in between.
+ * Destroy lflow cache and call lflow_run() again. */
+- ovn_desired_flow_table_clear(flow_table);
++ ovn_desired_flow_table_clear(lflow_table);
+ ovn_extend_table_clear(group_table, false /* desired */);
+ ovn_extend_table_clear(meter_table, false /* desired */);
+ lflow_resource_clear(lfrr);
+@@ -2185,16 +2063,11 @@ en_flow_output_run(struct engine_node *node, void *data)
+ }
+ }
+
+- struct physical_ctx p_ctx;
+- init_physical_ctx(node, rt_data, &p_ctx);
+-
+- physical_run(&p_ctx, &fo->flow_table);
+-
+ engine_set_node_state(node, EN_UPDATED);
+ }
+
+ static bool
+-flow_output_sb_logical_flow_handler(struct engine_node *node, void *data)
++lflow_output_sb_logical_flow_handler(struct engine_node *node, void *data)
+ {
+ struct ed_type_runtime_data *rt_data =
+ engine_get_input_data("runtime_data", node);
+@@ -2207,7 +2080,7 @@ flow_output_sb_logical_flow_handler(struct engine_node *node, void *data)
+ const struct ovsrec_bridge *br_int = get_br_int(bridge_table, ovs_table);
+ ovs_assert(br_int);
+
+- struct ed_type_flow_output *fo = data;
++ struct ed_type_lflow_output *fo = data;
+ struct lflow_ctx_in l_ctx_in;
+ struct lflow_ctx_out l_ctx_out;
+ init_lflow_ctx(node, rt_data, fo, &l_ctx_in, &l_ctx_out);
+@@ -2219,7 +2092,7 @@ flow_output_sb_logical_flow_handler(struct engine_node *node, void *data)
+ }
+
+ static bool
+-flow_output_sb_mac_binding_handler(struct engine_node *node, void *data)
++lflow_output_sb_mac_binding_handler(struct engine_node *node, void *data)
+ {
+ struct ovsdb_idl_index *sbrec_port_binding_by_name =
+ engine_ovsdb_node_get_index(
+@@ -2234,60 +2107,17 @@ flow_output_sb_mac_binding_handler(struct engine_node *node, void *data)
+ engine_get_input_data("runtime_data", node);
+ const struct hmap *local_datapaths = &rt_data->local_datapaths;
+
+- struct ed_type_flow_output *fo = data;
+- struct ovn_desired_flow_table *flow_table = &fo->flow_table;
++ struct ed_type_lflow_output *lfo = data;
+
+ lflow_handle_changed_neighbors(sbrec_port_binding_by_name,
+- mac_binding_table, local_datapaths, flow_table);
++ mac_binding_table, local_datapaths, &lfo->flow_table);
+
+ engine_set_node_state(node, EN_UPDATED);
+ return true;
+ }
+
+ static bool
+-flow_output_sb_port_binding_handler(struct engine_node *node,
+- void *data)
+-{
+- struct ed_type_runtime_data *rt_data =
+- engine_get_input_data("runtime_data", node);
+-
+- struct ed_type_flow_output *fo = data;
+- struct ovn_desired_flow_table *flow_table = &fo->flow_table;
+-
+- struct physical_ctx p_ctx;
+- init_physical_ctx(node, rt_data, &p_ctx);
+-
+- /* We handle port-binding changes for physical flow processing
+- * only. flow_output runtime data handler takes care of processing
+- * logical flows for any port binding changes.
+- */
+- physical_handle_port_binding_changes(&p_ctx, flow_table);
+-
+- engine_set_node_state(node, EN_UPDATED);
+- return true;
+-}
+-
+-static bool
+-flow_output_sb_multicast_group_handler(struct engine_node *node, void *data)
+-{
+- struct ed_type_runtime_data *rt_data =
+- engine_get_input_data("runtime_data", node);
+-
+- struct ed_type_flow_output *fo = data;
+- struct ovn_desired_flow_table *flow_table = &fo->flow_table;
+-
+- struct physical_ctx p_ctx;
+- init_physical_ctx(node, rt_data, &p_ctx);
+-
+- physical_handle_mc_group_changes(&p_ctx, flow_table);
+-
+- engine_set_node_state(node, EN_UPDATED);
+- return true;
+-
+-}
+-
+-static bool
+-_flow_output_resource_ref_handler(struct engine_node *node, void *data,
++_lflow_output_resource_ref_handler(struct engine_node *node, void *data,
+ enum ref_type ref_type)
+ {
+ struct ed_type_runtime_data *rt_data =
+@@ -2319,7 +2149,7 @@ _flow_output_resource_ref_handler(struct engine_node *node, void *data,
+
+ ovs_assert(br_int && chassis);
+
+- struct ed_type_flow_output *fo = data;
++ struct ed_type_lflow_output *fo = data;
+
+ struct lflow_ctx_in l_ctx_in;
+ struct lflow_ctx_out l_ctx_out;
+@@ -2388,53 +2218,20 @@ _flow_output_resource_ref_handler(struct engine_node *node, void *data,
+ }
+
+ static bool
+-flow_output_addr_sets_handler(struct engine_node *node, void *data)
++lflow_output_addr_sets_handler(struct engine_node *node, void *data)
+ {
+- return _flow_output_resource_ref_handler(node, data, REF_TYPE_ADDRSET);
++ return _lflow_output_resource_ref_handler(node, data, REF_TYPE_ADDRSET);
+ }
+
+ static bool
+-flow_output_port_groups_handler(struct engine_node *node, void *data)
++lflow_output_port_groups_handler(struct engine_node *node, void *data)
+ {
+- return _flow_output_resource_ref_handler(node, data, REF_TYPE_PORTGROUP);
++ return _lflow_output_resource_ref_handler(node, data, REF_TYPE_PORTGROUP);
+ }
+
+ static bool
+-flow_output_physical_flow_changes_handler(struct engine_node *node, void *data)
+-{
+- struct ed_type_runtime_data *rt_data =
+- engine_get_input_data("runtime_data", node);
+-
+- struct ed_type_flow_output *fo = data;
+- struct physical_ctx p_ctx;
+- init_physical_ctx(node, rt_data, &p_ctx);
+-
+- engine_set_node_state(node, EN_UPDATED);
+- struct ed_type_pfc_data *pfc_data =
+- engine_get_input_data("physical_flow_changes", node);
+-
+- /* If there are OVS interface changes. Try to handle them incrementally. */
+- if (pfc_data->ovs_ifaces_changed) {
+- if (!physical_handle_ovs_iface_changes(&p_ctx, &fo->flow_table)) {
+- return false;
+- }
+- }
+-
+- if (pfc_data->recompute_physical_flows) {
+- /* This indicates that we need to recompute the physical flows. */
+- physical_clear_unassoc_flows_with_db(&fo->flow_table);
+- physical_clear_dp_flows(&p_ctx, &rt_data->ct_updated_datapaths,
+- &fo->flow_table);
+- physical_run(&p_ctx, &fo->flow_table);
+- return true;
+- }
+-
+- return true;
+-}
+-
+-static bool
+-flow_output_runtime_data_handler(struct engine_node *node,
+- void *data OVS_UNUSED)
++lflow_output_runtime_data_handler(struct engine_node *node,
++ void *data OVS_UNUSED)
+ {
+ struct ed_type_runtime_data *rt_data =
+ engine_get_input_data("runtime_data", node);
+@@ -2455,12 +2252,9 @@ flow_output_runtime_data_handler(struct engine_node *node,
+
+ struct lflow_ctx_in l_ctx_in;
+ struct lflow_ctx_out l_ctx_out;
+- struct ed_type_flow_output *fo = data;
++ struct ed_type_lflow_output *fo = data;
+ init_lflow_ctx(node, rt_data, fo, &l_ctx_in, &l_ctx_out);
+
+- struct physical_ctx p_ctx;
+- init_physical_ctx(node, rt_data, &p_ctx);
+-
+ struct tracked_binding_datapath *tdp;
+ HMAP_FOR_EACH (tdp, node, tracked_dp_bindings) {
+ if (tdp->is_new) {
+@@ -2485,12 +2279,12 @@ flow_output_runtime_data_handler(struct engine_node *node,
+ }
+
+ static bool
+-flow_output_sb_load_balancer_handler(struct engine_node *node, void *data)
++lflow_output_sb_load_balancer_handler(struct engine_node *node, void *data)
+ {
+ struct ed_type_runtime_data *rt_data =
+ engine_get_input_data("runtime_data", node);
+
+- struct ed_type_flow_output *fo = data;
++ struct ed_type_lflow_output *fo = data;
+ struct lflow_ctx_in l_ctx_in;
+ struct lflow_ctx_out l_ctx_out;
+ init_lflow_ctx(node, rt_data, fo, &l_ctx_in, &l_ctx_out);
+@@ -2502,12 +2296,12 @@ flow_output_sb_load_balancer_handler(struct engine_node *node, void *data)
+ }
+
+ static bool
+-flow_output_sb_fdb_handler(struct engine_node *node, void *data)
++lflow_output_sb_fdb_handler(struct engine_node *node, void *data)
+ {
+ struct ed_type_runtime_data *rt_data =
+ engine_get_input_data("runtime_data", node);
+
+- struct ed_type_flow_output *fo = data;
++ struct ed_type_lflow_output *fo = data;
+ struct lflow_ctx_in l_ctx_in;
+ struct lflow_ctx_out l_ctx_out;
+ init_lflow_ctx(node, rt_data, fo, &l_ctx_in, &l_ctx_out);
+@@ -2518,6 +2312,205 @@ flow_output_sb_fdb_handler(struct engine_node *node, void *data)
+ return handled;
+ }
+
++struct ed_type_pflow_output {
++ /* Desired physical flows. */
++ struct ovn_desired_flow_table flow_table;
++};
++
++static void init_physical_ctx(struct engine_node *node,
++ struct ed_type_runtime_data *rt_data,
++ struct physical_ctx *p_ctx)
++{
++ struct ovsdb_idl_index *sbrec_port_binding_by_name =
++ engine_ovsdb_node_get_index(
++ engine_get_input("SB_port_binding", node),
++ "name");
++
++ struct sbrec_multicast_group_table *multicast_group_table =
++ (struct sbrec_multicast_group_table *)EN_OVSDB_GET(
++ engine_get_input("SB_multicast_group", node));
++
++ struct sbrec_port_binding_table *port_binding_table =
++ (struct sbrec_port_binding_table *)EN_OVSDB_GET(
++ engine_get_input("SB_port_binding", node));
++
++ struct sbrec_chassis_table *chassis_table =
++ (struct sbrec_chassis_table *)EN_OVSDB_GET(
++ engine_get_input("SB_chassis", node));
++
++ struct ed_type_mff_ovn_geneve *ed_mff_ovn_geneve =
++ engine_get_input_data("mff_ovn_geneve", node);
++
++ struct ovsrec_open_vswitch_table *ovs_table =
++ (struct ovsrec_open_vswitch_table *)EN_OVSDB_GET(
++ engine_get_input("OVS_open_vswitch", node));
++ struct ovsrec_bridge_table *bridge_table =
++ (struct ovsrec_bridge_table *)EN_OVSDB_GET(
++ engine_get_input("OVS_bridge", node));
++ const struct ovsrec_bridge *br_int = get_br_int(bridge_table, ovs_table);
++ const char *chassis_id = get_ovs_chassis_id(ovs_table);
++ const struct sbrec_chassis *chassis = NULL;
++ struct ovsdb_idl_index *sbrec_chassis_by_name =
++ engine_ovsdb_node_get_index(
++ engine_get_input("SB_chassis", node),
++ "name");
++ if (chassis_id) {
++ chassis = chassis_lookup_by_name(sbrec_chassis_by_name, chassis_id);
++ }
++
++ ovs_assert(br_int && chassis);
++
++ struct ovsrec_interface_table *iface_table =
++ (struct ovsrec_interface_table *)EN_OVSDB_GET(
++ engine_get_input("OVS_interface", node));
++
++ struct ed_type_ct_zones *ct_zones_data =
++ engine_get_input_data("ct_zones", node);
++ struct simap *ct_zones = &ct_zones_data->current;
++
++ p_ctx->sbrec_port_binding_by_name = sbrec_port_binding_by_name;
++ p_ctx->port_binding_table = port_binding_table;
++ p_ctx->mc_group_table = multicast_group_table;
++ p_ctx->br_int = br_int;
++ p_ctx->chassis_table = chassis_table;
++ p_ctx->iface_table = iface_table;
++ p_ctx->chassis = chassis;
++ p_ctx->active_tunnels = &rt_data->active_tunnels;
++ p_ctx->local_datapaths = &rt_data->local_datapaths;
++ p_ctx->local_lports = &rt_data->local_lports;
++ p_ctx->ct_zones = ct_zones;
++ p_ctx->mff_ovn_geneve = ed_mff_ovn_geneve->mff_ovn_geneve;
++ p_ctx->local_bindings = &rt_data->lbinding_data.bindings;
++}
++
++static void *
++en_pflow_output_init(struct engine_node *node OVS_UNUSED,
++ struct engine_arg *arg OVS_UNUSED)
++{
++ struct ed_type_pflow_output *data = xzalloc(sizeof *data);
++ ovn_desired_flow_table_init(&data->flow_table);
++ return data;
++}
++
++static void
++en_pflow_output_cleanup(void *data OVS_UNUSED)
++{
++ struct ed_type_pflow_output *pfo = data;
++ ovn_desired_flow_table_destroy(&pfo->flow_table);
++}
++
++static void
++en_pflow_output_run(struct engine_node *node, void *data)
++{
++ struct ed_type_pflow_output *pfo = data;
++ struct ovn_desired_flow_table *pflow_table = &pfo->flow_table;
++ static bool first_run = true;
++ if (first_run) {
++ first_run = false;
++ } else {
++ ovn_desired_flow_table_clear(pflow_table);
++ }
++
++ struct ed_type_runtime_data *rt_data =
++ engine_get_input_data("runtime_data", node);
++
++ struct physical_ctx p_ctx;
++ init_physical_ctx(node, rt_data, &p_ctx);
++ physical_run(&p_ctx, pflow_table);
++
++ engine_set_node_state(node, EN_UPDATED);
++}
++
++static bool
++pflow_output_sb_port_binding_handler(struct engine_node *node,
++ void *data)
++{
++ struct ed_type_runtime_data *rt_data =
++ engine_get_input_data("runtime_data", node);
++
++ struct ed_type_pflow_output *pfo = data;
++
++ struct physical_ctx p_ctx;
++ init_physical_ctx(node, rt_data, &p_ctx);
++
++ /* We handle port-binding changes for physical flow processing
++ * only. flow_output runtime data handler takes care of processing
++ * logical flows for any port binding changes.
++ */
++ physical_handle_port_binding_changes(&p_ctx, &pfo->flow_table);
++
++ engine_set_node_state(node, EN_UPDATED);
++ return true;
++}
++
++static bool
++pflow_output_sb_multicast_group_handler(struct engine_node *node, void *data)
++{
++ struct ed_type_runtime_data *rt_data =
++ engine_get_input_data("runtime_data", node);
++
++ struct ed_type_pflow_output *pfo = data;
++
++ struct physical_ctx p_ctx;
++ init_physical_ctx(node, rt_data, &p_ctx);
++
++ physical_handle_mc_group_changes(&p_ctx, &pfo->flow_table);
++
++ engine_set_node_state(node, EN_UPDATED);
++ return true;
++}
++
++static bool
++pflow_output_ovs_iface_handler(struct engine_node *node OVS_UNUSED,
++ void *data OVS_UNUSED)
++{
++ struct ed_type_runtime_data *rt_data =
++ engine_get_input_data("runtime_data", node);
++
++ struct ed_type_pflow_output *pfo = data;
++
++ struct physical_ctx p_ctx;
++ init_physical_ctx(node, rt_data, &p_ctx);
++
++ engine_set_node_state(node, EN_UPDATED);
++ return physical_handle_ovs_iface_changes(&p_ctx, &pfo->flow_table);
++}
++
++static void *
++en_flow_output_init(struct engine_node *node OVS_UNUSED,
++ struct engine_arg *arg OVS_UNUSED)
++{
++ return NULL;
++}
++
++static void
++en_flow_output_cleanup(void *data OVS_UNUSED)
++{
++
++}
++
++static void
++en_flow_output_run(struct engine_node *node OVS_UNUSED, void *data OVS_UNUSED)
++{
++ engine_set_node_state(node, EN_UPDATED);
++}
++
++static bool
++flow_output_pflow_output_handler(struct engine_node *node,
++ void *data OVS_UNUSED)
++{
++ engine_set_node_state(node, EN_UPDATED);
++ return true;
++}
++
++static bool
++flow_output_lflow_output_handler(struct engine_node *node,
++ void *data OVS_UNUSED)
++{
++ engine_set_node_state(node, EN_UPDATED);
++ return true;
++}
++
+ struct ovn_controller_exit_args {
+ bool *exiting;
+ bool *restart;
+@@ -2710,8 +2703,8 @@ main(int argc, char *argv[])
+ ENGINE_NODE_WITH_CLEAR_TRACK_DATA(runtime_data, "runtime_data");
+ ENGINE_NODE(mff_ovn_geneve, "mff_ovn_geneve");
+ ENGINE_NODE(ofctrl_is_connected, "ofctrl_is_connected");
+- ENGINE_NODE_WITH_CLEAR_TRACK_DATA(physical_flow_changes,
+- "physical_flow_changes");
++ ENGINE_NODE(pflow_output, "physical_flow_output");
++ ENGINE_NODE(lflow_output, "logical_flow_output");
+ ENGINE_NODE(flow_output, "flow_output");
+ ENGINE_NODE(addr_sets, "addr_sets");
+ ENGINE_NODE_WITH_CLEAR_TRACK_DATA(port_groups, "port_groups");
+@@ -2735,58 +2728,68 @@ main(int argc, char *argv[])
+ engine_add_input(&en_port_groups, &en_runtime_data,
+ port_groups_runtime_data_handler);
+
+- /* Engine node physical_flow_changes indicates whether
+- * we can recompute only physical flows or we can
+- * incrementally process the physical flows.
+- *
+- * Note: The order of inputs is important, all OVS interface changes must
++ /* Note: The order of inputs is important, all OVS interface changes must
+ * be handled before any ct_zone changes.
+ */
+- engine_add_input(&en_physical_flow_changes, &en_ovs_interface,
+- physical_flow_changes_ovs_iface_handler);
+- engine_add_input(&en_physical_flow_changes, &en_ct_zones,
+- physical_flow_changes_ct_zones_handler);
+-
+- engine_add_input(&en_flow_output, &en_addr_sets,
+- flow_output_addr_sets_handler);
+- engine_add_input(&en_flow_output, &en_port_groups,
+- flow_output_port_groups_handler);
+- engine_add_input(&en_flow_output, &en_runtime_data,
+- flow_output_runtime_data_handler);
+- engine_add_input(&en_flow_output, &en_mff_ovn_geneve, NULL);
+- engine_add_input(&en_flow_output, &en_physical_flow_changes,
+- flow_output_physical_flow_changes_handler);
+-
+- /* We need this input nodes for only data. Hence the noop handler. */
+- engine_add_input(&en_flow_output, &en_ct_zones, engine_noop_handler);
+- engine_add_input(&en_flow_output, &en_ovs_interface, engine_noop_handler);
+-
+- engine_add_input(&en_flow_output, &en_ovs_open_vswitch, NULL);
+- engine_add_input(&en_flow_output, &en_ovs_bridge, NULL);
+-
+- engine_add_input(&en_flow_output, &en_sb_chassis, NULL);
+- engine_add_input(&en_flow_output, &en_sb_encap, NULL);
+- engine_add_input(&en_flow_output, &en_sb_multicast_group,
+- flow_output_sb_multicast_group_handler);
+- engine_add_input(&en_flow_output, &en_sb_port_binding,
+- flow_output_sb_port_binding_handler);
+- engine_add_input(&en_flow_output, &en_sb_mac_binding,
+- flow_output_sb_mac_binding_handler);
+- engine_add_input(&en_flow_output, &en_sb_logical_flow,
+- flow_output_sb_logical_flow_handler);
++ engine_add_input(&en_pflow_output, &en_ovs_interface,
++ pflow_output_ovs_iface_handler);
++ engine_add_input(&en_pflow_output, &en_ct_zones, NULL);
++ engine_add_input(&en_pflow_output, &en_sb_chassis, NULL);
++ engine_add_input(&en_pflow_output, &en_sb_port_binding,
++ pflow_output_sb_port_binding_handler);
++ engine_add_input(&en_pflow_output, &en_sb_multicast_group,
++ pflow_output_sb_multicast_group_handler);
++
++ engine_add_input(&en_pflow_output, &en_runtime_data,
++ NULL);
++ engine_add_input(&en_pflow_output, &en_sb_encap, NULL);
++ engine_add_input(&en_pflow_output, &en_mff_ovn_geneve, NULL);
++ engine_add_input(&en_pflow_output, &en_ovs_open_vswitch, NULL);
++ engine_add_input(&en_pflow_output, &en_ovs_bridge, NULL);
++
++ engine_add_input(&en_lflow_output, &en_addr_sets,
++ lflow_output_addr_sets_handler);
++ engine_add_input(&en_lflow_output, &en_port_groups,
++ lflow_output_port_groups_handler);
++ engine_add_input(&en_lflow_output, &en_runtime_data,
++ lflow_output_runtime_data_handler);
++
++ /* We need these input nodes only for the data. Hence the noop handler.
++ * Changes to en_sb_multicast_group is handled by the pflow_output engine
++ * node.
++ * */
++ engine_add_input(&en_lflow_output, &en_sb_multicast_group,
++ engine_noop_handler);
++
++ engine_add_input(&en_lflow_output, &en_sb_chassis, NULL);
++
++ /* Any changes to the port binding, need not be handled
++ * for lflow_outout engine. We still need sb_port_binding
++ * as input to access the port binding data in lflow.c and
++ * hence the noop handler. */
++ engine_add_input(&en_lflow_output, &en_sb_port_binding,
++ engine_noop_handler);
++
++ engine_add_input(&en_lflow_output, &en_ovs_open_vswitch, NULL);
++ engine_add_input(&en_lflow_output, &en_ovs_bridge, NULL);
++
++ engine_add_input(&en_lflow_output, &en_sb_mac_binding,
++ lflow_output_sb_mac_binding_handler);
++ engine_add_input(&en_lflow_output, &en_sb_logical_flow,
++ lflow_output_sb_logical_flow_handler);
+ /* Using a noop handler since we don't really need any data from datapath
+ * groups or a full recompute. Update of a datapath group will put
+ * logical flow into the tracked list, so the logical flow handler will
+ * process all changes. */
+- engine_add_input(&en_flow_output, &en_sb_logical_dp_group,
++ engine_add_input(&en_lflow_output, &en_sb_logical_dp_group,
+ engine_noop_handler);
+- engine_add_input(&en_flow_output, &en_sb_dhcp_options, NULL);
+- engine_add_input(&en_flow_output, &en_sb_dhcpv6_options, NULL);
+- engine_add_input(&en_flow_output, &en_sb_dns, NULL);
+- engine_add_input(&en_flow_output, &en_sb_load_balancer,
+- flow_output_sb_load_balancer_handler);
+- engine_add_input(&en_flow_output, &en_sb_fdb,
+- flow_output_sb_fdb_handler);
++ engine_add_input(&en_lflow_output, &en_sb_dhcp_options, NULL);
++ engine_add_input(&en_lflow_output, &en_sb_dhcpv6_options, NULL);
++ engine_add_input(&en_lflow_output, &en_sb_dns, NULL);
++ engine_add_input(&en_lflow_output, &en_sb_load_balancer,
++ lflow_output_sb_load_balancer_handler);
++ engine_add_input(&en_lflow_output, &en_sb_fdb,
++ lflow_output_sb_fdb_handler);
+
+ engine_add_input(&en_ct_zones, &en_ovs_open_vswitch, NULL);
+ engine_add_input(&en_ct_zones, &en_ovs_bridge, NULL);
+@@ -2808,12 +2811,20 @@ main(int argc, char *argv[])
+ /* The OVS interface handler for runtime_data changes MUST be executed
+ * after the sb_port_binding_handler as port_binding deletes must be
+ * processed first.
++ *
++ * runtime_data needs to access the OVS Port data and hence a noop
++ * handler.
+ */
+ engine_add_input(&en_runtime_data, &en_ovs_port,
+ engine_noop_handler);
+ engine_add_input(&en_runtime_data, &en_ovs_interface,
+ runtime_data_ovs_interface_handler);
+
++ engine_add_input(&en_flow_output, &en_lflow_output,
++ flow_output_lflow_output_handler);
++ engine_add_input(&en_flow_output, &en_pflow_output,
++ flow_output_pflow_output_handler);
++
+ struct engine_arg engine_arg = {
+ .sb_idl = ovnsb_idl_loop.idl,
+ .ovs_idl = ovs_idl_loop.idl,
+@@ -2836,25 +2847,27 @@ main(int argc, char *argv[])
+ engine_ovsdb_node_add_index(&en_sb_datapath_binding, "key",
+ sbrec_datapath_binding_by_key);
+
+- struct ed_type_flow_output *flow_output_data =
+- engine_get_internal_data(&en_flow_output);
++ struct ed_type_lflow_output *lflow_output_data =
++ engine_get_internal_data(&en_lflow_output);
++ struct ed_type_lflow_output *pflow_output_data =
++ engine_get_internal_data(&en_pflow_output);
+ struct ed_type_ct_zones *ct_zones_data =
+ engine_get_internal_data(&en_ct_zones);
+ struct ed_type_runtime_data *runtime_data =
+ engine_get_internal_data(&en_runtime_data);
+
+- ofctrl_init(&flow_output_data->group_table,
+- &flow_output_data->meter_table,
++ ofctrl_init(&lflow_output_data->group_table,
++ &lflow_output_data->meter_table,
+ get_ofctrl_probe_interval(ovs_idl_loop.idl));
+ ofctrl_seqno_init();
+
+ unixctl_command_register("group-table-list", "", 0, 0,
+ extend_table_list,
+- &flow_output_data->group_table);
++ &lflow_output_data->group_table);
+
+ unixctl_command_register("meter-table-list", "", 0, 0,
+ extend_table_list,
+- &flow_output_data->meter_table);
++ &lflow_output_data->meter_table);
+
+ unixctl_command_register("ct-zone-list", "", 0, 0,
+ ct_zone_list,
+@@ -2868,14 +2881,14 @@ main(int argc, char *argv[])
+ NULL);
+ unixctl_command_register("lflow-cache/flush", "", 0, 0,
+ lflow_cache_flush_cmd,
+- &flow_output_data->pd);
++ &lflow_output_data->pd);
+ /* Keep deprecated 'flush-lflow-cache' command for now. */
+ unixctl_command_register("flush-lflow-cache", "[deprecated]", 0, 0,
+ lflow_cache_flush_cmd,
+- &flow_output_data->pd);
++ &lflow_output_data->pd);
+ unixctl_command_register("lflow-cache/show-stats", "", 0, 0,
+ lflow_cache_show_stats_cmd,
+- &flow_output_data->pd);
++ &lflow_output_data->pd);
+
+ bool reset_ovnsb_idl_min_index = false;
+ unixctl_command_register("sb-cluster-state-reset", "", 0, 0,
+@@ -2981,8 +2994,10 @@ main(int argc, char *argv[])
+ ovsrec_bridge_table_get(ovs_idl_loop.idl);
+ const struct ovsrec_open_vswitch_table *ovs_table =
+ ovsrec_open_vswitch_table_get(ovs_idl_loop.idl);
+- const struct ovsrec_bridge *br_int =
+- process_br_int(ovs_idl_txn, bridge_table, ovs_table);
++ const struct ovsrec_bridge *br_int = NULL;
++ const struct ovsrec_datapath *br_int_dp = NULL;
++ process_br_int(ovs_idl_txn, bridge_table, ovs_table,
++ &br_int, &br_int_dp);
+
+ if (ovsdb_idl_has_ever_connected(ovnsb_idl_loop.idl) &&
+ northd_version_match) {
+@@ -3013,6 +3028,13 @@ main(int argc, char *argv[])
+ &chassis_private);
+ }
+
++ /* If any OVS feature support changed, force a full recompute. */
++ if (br_int_dp
++ && ovs_feature_support_update(&br_int_dp->capabilities)) {
++ VLOG_INFO("OVS feature set changed, force recompute.");
++ engine_set_force_recompute(true);
++ }
++
+ if (br_int) {
+ ct_zones_data = engine_get_data(&en_ct_zones);
+ if (ct_zones_data) {
+@@ -3121,13 +3143,17 @@ main(int argc, char *argv[])
+ runtime_data ? &runtime_data->lbinding_data : NULL;
+ if_status_mgr_update(if_mgr, binding_data);
+
+- flow_output_data = engine_get_data(&en_flow_output);
+- if (flow_output_data && ct_zones_data) {
+- ofctrl_put(&flow_output_data->flow_table,
++ lflow_output_data = engine_get_data(&en_lflow_output);
++ pflow_output_data = engine_get_data(&en_pflow_output);
++ if (lflow_output_data && pflow_output_data &&
++ ct_zones_data) {
++ ofctrl_put(&lflow_output_data->flow_table,
++ &pflow_output_data->flow_table,
+ &ct_zones_data->pending,
+ sbrec_meter_table_get(ovnsb_idl_loop.idl),
+ ofctrl_seqno_get_req_cfg(),
+- engine_node_changed(&en_flow_output));
++ engine_node_changed(&en_lflow_output),
++ engine_node_changed(&en_pflow_output));
+ }
+ ofctrl_seqno_run(ofctrl_get_cur_cfg());
+ if_status_mgr_run(if_mgr, binding_data, !ovnsb_idl_txn,
+@@ -3495,7 +3521,7 @@ lflow_cache_flush_cmd(struct unixctl_conn *conn OVS_UNUSED,
+ void *arg_)
+ {
+ VLOG_INFO("User triggered lflow cache flush.");
+- struct flow_output_persistent_data *fo_pd = arg_;
++ struct lflow_output_persistent_data *fo_pd = arg_;
+ lflow_cache_flush(fo_pd->lflow_cache);
+ fo_pd->conj_id_ofs = 1;
+ engine_set_force_recompute(true);
+@@ -3507,7 +3533,7 @@ static void
+ lflow_cache_show_stats_cmd(struct unixctl_conn *conn, int argc OVS_UNUSED,
+ const char *argv[] OVS_UNUSED, void *arg_)
+ {
+- struct flow_output_persistent_data *fo_pd = arg_;
++ struct lflow_output_persistent_data *fo_pd = arg_;
+ struct lflow_cache *lc = fo_pd->lflow_cache;
+ struct ds ds = DS_EMPTY_INITIALIZER;
+
+diff --git a/controller/ovn-controller.h b/controller/ovn-controller.h
+index 5d9466880..2bf1fecbf 100644
+--- a/controller/ovn-controller.h
++++ b/controller/ovn-controller.h
+@@ -67,6 +67,8 @@ struct local_datapath {
+
+ size_t n_peer_ports;
+ size_t n_allocated_peer_ports;
++
++ struct shash external_ports;
+ };
+
+ struct local_datapath *get_local_datapath(const struct hmap *,
+diff --git a/controller/physical.c b/controller/physical.c
+index 018e09540..a9a3dc720 100644
+--- a/controller/physical.c
++++ b/controller/physical.c
+@@ -1272,6 +1272,52 @@ consider_port_binding(struct ovsdb_idl_index *sbrec_port_binding_by_name,
+ ofctrl_add_flow(flow_table, OFTABLE_CHECK_LOOPBACK, 160,
+ binding->header_.uuid.parts[0], &match,
+ ofpacts_p, &binding->header_.uuid);
++
++ /* localport traffic directed to external is *not* local */
++ struct shash_node *node;
++ SHASH_FOR_EACH (node, &ld->external_ports) {
++ const struct sbrec_port_binding *pb = node->data;
++
++ /* skip ports that are not claimed by this chassis */
++ if (!pb->chassis) {
++ continue;
++ }
++ if (strcmp(pb->chassis->name, chassis->name)) {
++ continue;
++ }
++
++ ofpbuf_clear(ofpacts_p);
++ for (int i = 0; i < MFF_N_LOG_REGS; i++) {
++ put_load(0, MFF_REG0 + i, 0, 32, ofpacts_p);
++ }
++ put_resubmit(OFTABLE_LOG_EGRESS_PIPELINE, ofpacts_p);
++
++ /* allow traffic directed to external MAC address */
++ static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 1);
++ for (int i = 0; i < pb->n_mac; i++) {
++ char *err_str;
++ struct eth_addr peer_mac;
++ if ((err_str = str_to_mac(pb->mac[i], &peer_mac))) {
++ VLOG_WARN_RL(
++ &rl, "Parsing MAC failed for external port: %s, "
++ "with error: %s", pb->logical_port, err_str);
++ free(err_str);
++ continue;
++ }
++
++ match_init_catchall(&match);
++ match_set_metadata(&match, htonll(dp_key));
++ match_set_reg(&match, MFF_LOG_OUTPORT - MFF_REG0,
++ port_key);
++ match_set_reg_masked(&match, MFF_LOG_FLAGS - MFF_REG0,
++ MLF_LOCALPORT, MLF_LOCALPORT);
++ match_set_dl_dst(&match, peer_mac);
++
++ ofctrl_add_flow(flow_table, OFTABLE_CHECK_LOOPBACK, 170,
++ binding->header_.uuid.parts[0], &match,
++ ofpacts_p, &binding->header_.uuid);
++ }
++ }
+ }
+
+ } else if (!tun && !is_ha_remote) {
+@@ -1953,22 +1999,3 @@ physical_clear_unassoc_flows_with_db(struct ovn_desired_flow_table *flow_table)
+ ofctrl_remove_flows(flow_table, hc_uuid);
+ }
+ }
+-
+-void
+-physical_clear_dp_flows(struct physical_ctx *p_ctx,
+- struct hmapx *ct_updated_datapaths,
+- struct ovn_desired_flow_table *flow_table)
+-{
+- const struct sbrec_port_binding *binding;
+- SBREC_PORT_BINDING_TABLE_FOR_EACH (binding, p_ctx->port_binding_table) {
+- if (!hmapx_find(ct_updated_datapaths, binding->datapath)) {
+- continue;
+- }
+- const struct sbrec_port_binding *peer =
+- get_binding_peer(p_ctx->sbrec_port_binding_by_name, binding);
+- ofctrl_remove_flows(flow_table, &binding->header_.uuid);
+- if (peer) {
+- ofctrl_remove_flows(flow_table, &peer->header_.uuid);
+- }
+- }
+-}
+diff --git a/controller/physical.h b/controller/physical.h
+index 0bf13f268..feab41df4 100644
+--- a/controller/physical.h
++++ b/controller/physical.h
+@@ -56,16 +56,12 @@ struct physical_ctx {
+ const struct simap *ct_zones;
+ enum mf_field_id mff_ovn_geneve;
+ struct shash *local_bindings;
+- struct hmapx *ct_updated_datapaths;
+ };
+
+ void physical_register_ovs_idl(struct ovsdb_idl *);
+ void physical_run(struct physical_ctx *,
+ struct ovn_desired_flow_table *);
+ void physical_clear_unassoc_flows_with_db(struct ovn_desired_flow_table *);
+-void physical_clear_dp_flows(struct physical_ctx *p_ctx,
+- struct hmapx *ct_updated_datapaths,
+- struct ovn_desired_flow_table *flow_table);
+ void physical_handle_port_binding_changes(struct physical_ctx *,
+ struct ovn_desired_flow_table *);
+ void physical_handle_mc_group_changes(struct physical_ctx *,
+diff --git a/include/ovn/actions.h b/include/ovn/actions.h
+index 040213177..f5eb01eb7 100644
+--- a/include/ovn/actions.h
++++ b/include/ovn/actions.h
+@@ -25,6 +25,7 @@
+ #include "openvswitch/hmap.h"
+ #include "openvswitch/uuid.h"
+ #include "util.h"
++#include "ovn/features.h"
+
+ struct expr;
+ struct lexer;
+diff --git a/include/ovn/features.h b/include/ovn/features.h
+index 10ee46fcd..c35d59b14 100644
+--- a/include/ovn/features.h
++++ b/include/ovn/features.h
+@@ -16,7 +16,25 @@
+ #ifndef OVN_FEATURES_H
+ #define OVN_FEATURES_H 1
+
++#include
++
++#include "smap.h"
++
+ /* ovn-controller supported feature names. */
+ #define OVN_FEATURE_PORT_UP_NOTIF "port-up-notif"
+
++/* OVS datapath supported features. Based on availability OVN might generate
++ * different types of openflows.
++ */
++enum ovs_feature_support_bits {
++ OVS_CT_ZERO_SNAT_SUPPORT_BIT,
++};
++
++enum ovs_feature_value {
++ OVS_CT_ZERO_SNAT_SUPPORT = (1 << OVS_CT_ZERO_SNAT_SUPPORT_BIT),
++};
++
++bool ovs_feature_is_supported(enum ovs_feature_value feature);
++bool ovs_feature_support_update(const struct smap *ovs_capabilities);
++
+ #endif
+diff --git a/lib/actions.c b/lib/actions.c
+index b3433f49e..7010fab2b 100644
+--- a/lib/actions.c
++++ b/lib/actions.c
+@@ -742,6 +742,22 @@ encode_CT_COMMIT_V1(const struct ovnact_ct_commit_v1 *cc,
+ ct->zone_src.ofs = 0;
+ ct->zone_src.n_bits = 16;
+
++ /* If the datapath supports all-zero SNAT then use it to avoid tuple
++ * collisions at commit time between NATed and firewalled-only sessions.
++ */
++
++ if (ovs_feature_is_supported(OVS_CT_ZERO_SNAT_SUPPORT)) {
++ size_t nat_offset = ofpacts->size;
++ ofpbuf_pull(ofpacts, nat_offset);
++
++ struct ofpact_nat *nat = ofpact_put_NAT(ofpacts);
++ nat->flags = 0;
++ nat->range_af = AF_UNSPEC;
++ nat->flags |= NX_NAT_F_SRC;
++ ofpacts->header = ofpbuf_push_uninit(ofpacts, nat_offset);
++ ct = ofpacts->header;
++ }
++
+ size_t set_field_offset = ofpacts->size;
+ ofpbuf_pull(ofpacts, set_field_offset);
+
+@@ -792,6 +808,21 @@ encode_CT_COMMIT_V2(const struct ovnact_nest *on,
+ ct->zone_src.ofs = 0;
+ ct->zone_src.n_bits = 16;
+
++ /* If the datapath supports all-zero SNAT then use it to avoid tuple
++ * collisions at commit time between NATed and firewalled-only sessions.
++ */
++ if (ovs_feature_is_supported(OVS_CT_ZERO_SNAT_SUPPORT)) {
++ size_t nat_offset = ofpacts->size;
++ ofpbuf_pull(ofpacts, nat_offset);
++
++ struct ofpact_nat *nat = ofpact_put_NAT(ofpacts);
++ nat->flags = 0;
++ nat->range_af = AF_UNSPEC;
++ nat->flags |= NX_NAT_F_SRC;
++ ofpacts->header = ofpbuf_push_uninit(ofpacts, nat_offset);
++ ct = ofpacts->header;
++ }
++
+ size_t set_field_offset = ofpacts->size;
+ ofpbuf_pull(ofpacts, set_field_offset);
+
+diff --git a/lib/automake.mk b/lib/automake.mk
+index 781be2109..917b28e1e 100644
+--- a/lib/automake.mk
++++ b/lib/automake.mk
+@@ -13,6 +13,7 @@ lib_libovn_la_SOURCES = \
+ lib/expr.c \
+ lib/extend-table.h \
+ lib/extend-table.c \
++ lib/features.c \
+ lib/ovn-parallel-hmap.h \
+ lib/ovn-parallel-hmap.c \
+ lib/ip-mcast-index.c \
+diff --git a/lib/features.c b/lib/features.c
+new file mode 100644
+index 000000000..87d04ee3f
+--- /dev/null
++++ b/lib/features.c
+@@ -0,0 +1,84 @@
++/* Copyright (c) 2021, Red Hat, Inc.
++ *
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at:
++ *
++ * http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++
++#include
++#include
++#include
++
++#include "lib/util.h"
++#include "openvswitch/vlog.h"
++#include "ovn/features.h"
++
++VLOG_DEFINE_THIS_MODULE(features);
++
++struct ovs_feature {
++ enum ovs_feature_value value;
++ const char *name;
++};
++
++static struct ovs_feature all_ovs_features[] = {
++ {
++ .value = OVS_CT_ZERO_SNAT_SUPPORT,
++ .name = "ct_zero_snat"
++ },
++};
++
++/* A bitmap of OVS features that have been detected as 'supported'. */
++static uint32_t supported_ovs_features;
++
++static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 5);
++
++static bool
++ovs_feature_is_valid(enum ovs_feature_value feature)
++{
++ switch (feature) {
++ case OVS_CT_ZERO_SNAT_SUPPORT:
++ return true;
++ default:
++ return false;
++ }
++}
++
++bool
++ovs_feature_is_supported(enum ovs_feature_value feature)
++{
++ ovs_assert(ovs_feature_is_valid(feature));
++ return supported_ovs_features & feature;
++}
++
++/* Returns 'true' if the set of tracked OVS features has been updated. */
++bool
++ovs_feature_support_update(const struct smap *ovs_capabilities)
++{
++ bool updated = false;
++
++ for (size_t i = 0; i < ARRAY_SIZE(all_ovs_features); i++) {
++ enum ovs_feature_value value = all_ovs_features[i].value;
++ const char *name = all_ovs_features[i].name;
++ bool old_state = supported_ovs_features & value;
++ bool new_state = smap_get_bool(ovs_capabilities, name, false);
++ if (new_state != old_state) {
++ updated = true;
++ if (new_state) {
++ supported_ovs_features |= value;
++ } else {
++ supported_ovs_features &= ~value;
++ }
++ VLOG_INFO_RL(&rl, "OVS Feature: %s, state: %s", name,
++ new_state ? "supported" : "not supported");
++ }
++ }
++ return updated;
++}
+diff --git a/lib/test-ovn-features.c b/lib/test-ovn-features.c
+new file mode 100644
+index 000000000..deb97581e
+--- /dev/null
++++ b/lib/test-ovn-features.c
+@@ -0,0 +1,56 @@
++/* Copyright (c) 2021, Red Hat, Inc.
++ *
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at:
++ *
++ * http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++
++#include
++
++#include "ovn/features.h"
++#include "tests/ovstest.h"
++
++static void
++test_ovn_features(struct ovs_cmdl_context *ctx OVS_UNUSED)
++{
++ ovs_assert(!ovs_feature_is_supported(OVS_CT_ZERO_SNAT_SUPPORT));
++
++ struct smap features = SMAP_INITIALIZER(&features);
++
++ smap_add(&features, "ct_zero_snat", "false");
++ ovs_assert(!ovs_feature_support_update(&features));
++ ovs_assert(!ovs_feature_is_supported(OVS_CT_ZERO_SNAT_SUPPORT));
++
++ smap_replace(&features, "ct_zero_snat", "true");
++ ovs_assert(ovs_feature_support_update(&features));
++ ovs_assert(ovs_feature_is_supported(OVS_CT_ZERO_SNAT_SUPPORT));
++
++ smap_add(&features, "unknown_feature", "true");
++ ovs_assert(!ovs_feature_support_update(&features));
++
++ smap_destroy(&features);
++}
++
++static void
++test_ovn_features_main(int argc, char *argv[])
++{
++ set_program_name(argv[0]);
++ static const struct ovs_cmdl_command commands[] = {
++ {"run", NULL, 0, 0, test_ovn_features, OVS_RO},
++ {NULL, NULL, 0, 0, NULL, OVS_RO},
++ };
++ struct ovs_cmdl_context ctx;
++ ctx.argc = argc - 1;
++ ctx.argv = argv + 1;
++ ovs_cmdl_run_command(&ctx, commands);
++}
++
++OVSTEST_REGISTER("test-ovn-features", test_ovn_features_main);
+diff --git a/northd/ovn-northd.8.xml b/northd/ovn-northd.8.xml
+index 407464602..890775797 100644
+--- a/northd/ovn-northd.8.xml
++++ b/northd/ovn-northd.8.xml
+@@ -1072,8 +1072,10 @@ output;
+ localport
ports) that are down (unless
+ ignore_lsp_down
is configured as true in options
+ column of NB_Global
table of the Northbound
+- database), for logical ports of type virtual
and for
+- logical ports with 'unknown' address set.
++ database), for logical ports of type virtual
, for
++ logical ports with 'unknown' address set and for logical ports of
++ a logical switch configured with
++ other_config:vlan-passthru=true
.
+
+
+
+@@ -3710,6 +3712,13 @@ icmp6 {
+ external ip and D is NAT external mac.
+
+
++
++ For each NAT rule in the OVN Northbound database that can
++ be handled in a distributed manner, a priority-80 logical flow
++ with drop action if the NAT logical port is a virtual port not
++ claimed by any chassis yet.
++
++
+
+ A priority-50 logical flow with match
+ outport == GW
has actions
+diff --git a/northd/ovn-northd.c b/northd/ovn-northd.c
+index 3dae7bb1c..576b6cbc9 100644
+--- a/northd/ovn-northd.c
++++ b/northd/ovn-northd.c
+@@ -7007,6 +7007,10 @@ build_lswitch_arp_nd_responder_known_ips(struct ovn_port *op,
+ return;
+ }
+
++ if (is_vlan_transparent(op->od)) {
++ return;
++ }
++
+ for (size_t i = 0; i < op->n_lsp_addrs; i++) {
+ for (size_t j = 0; j < op->lsp_addrs[i].n_ipv4_addrs; j++) {
+ ds_clear(match);
+@@ -7371,6 +7375,7 @@ build_lswitch_ip_mcast_igmp_mld(struct ovn_igmp_group *igmp_group,
+
+ struct mcast_switch_info *mcast_sw_info =
+ &igmp_group->datapath->mcast_info.sw;
++ uint64_t table_size = mcast_sw_info->table_size;
+
+ if (IN6_IS_ADDR_V4MAPPED(&igmp_group->address)) {
+ /* RFC 4541, section 2.1.2, item 2: Skip groups in the 224.0.0.X
+@@ -7381,10 +7386,8 @@ build_lswitch_ip_mcast_igmp_mld(struct ovn_igmp_group *igmp_group,
+ if (ip_is_local_multicast(group_address)) {
+ return;
+ }
+-
+ if (atomic_compare_exchange_strong(
+- &mcast_sw_info->active_v4_flows,
+- (uint64_t *) &mcast_sw_info->table_size,
++ &mcast_sw_info->active_v4_flows, &table_size,
+ mcast_sw_info->table_size)) {
+ return;
+ }
+@@ -7399,8 +7402,7 @@ build_lswitch_ip_mcast_igmp_mld(struct ovn_igmp_group *igmp_group,
+ return;
+ }
+ if (atomic_compare_exchange_strong(
+- &mcast_sw_info->active_v6_flows,
+- (uint64_t *) &mcast_sw_info->table_size,
++ &mcast_sw_info->active_v6_flows, &table_size,
+ mcast_sw_info->table_size)) {
+ return;
+ }
+@@ -11656,6 +11658,7 @@ lrouter_check_nat_entry(struct ovn_datapath *od, const struct nbrec_nat *nat,
+ static void
+ build_lrouter_nat_defrag_and_lb(struct ovn_datapath *od,
+ struct hmap *lflows,
++ struct hmap *ports,
+ struct shash *meter_groups,
+ struct hmap *lbs,
+ struct ds *match, struct ds *actions)
+@@ -11763,10 +11766,21 @@ build_lrouter_nat_defrag_and_lb(struct ovn_datapath *od,
+ ds_clear(match);
+ ds_clear(actions);
+ ds_put_format(match,
+- "ip%s.src == %s && outport == %s && "
+- "is_chassis_resident(\"%s\")",
++ "ip%s.src == %s && outport == %s",
+ is_v6 ? "6" : "4", nat->logical_ip,
+- od->l3dgw_port->json_key, nat->logical_port);
++ od->l3dgw_port->json_key);
++ /* Add a rule to drop traffic from a distributed NAT if
++ * the virtual port has not claimed yet becaused otherwise
++ * the traffic will be centralized misconfiguring the TOR switch.
++ */
++ struct ovn_port *op = ovn_port_find(ports, nat->logical_port);
++ if (op && op->nbsp && !strcmp(op->nbsp->type, "virtual")) {
++ ovn_lflow_add_with_hint(lflows, od, S_ROUTER_IN_GW_REDIRECT,
++ 80, ds_cstr(match), "drop;",
++ &nat->header_);
++ }
++ ds_put_format(match, " && is_chassis_resident(\"%s\")",
++ nat->logical_port);
+ ds_put_format(actions, "eth.src = %s; %s = %s; next;",
+ nat->external_mac,
+ is_v6 ? REG_SRC_IPV6 : REG_SRC_IPV4,
+@@ -11800,6 +11814,7 @@ build_lrouter_nat_defrag_and_lb(struct ovn_datapath *od,
+ ds_put_format(actions,
+ "clone { ct_clear; "
+ "inport = outport; outport = \"\"; "
++ "eth.dst <-> eth.src; "
+ "flags = 0; flags.loopback = 1; ");
+ for (int j = 0; j < MFF_N_LOG_REGS; j++) {
+ ds_put_format(actions, "reg%d = 0; ", j);
+@@ -11925,8 +11940,9 @@ build_lswitch_and_lrouter_iterate_by_od(struct ovn_datapath *od,
+ &lsi->actions);
+ build_misc_local_traffic_drop_flows_for_lrouter(od, lsi->lflows);
+ build_lrouter_arp_nd_for_datapath(od, lsi->lflows);
+- build_lrouter_nat_defrag_and_lb(od, lsi->lflows, lsi->meter_groups,
+- lsi->lbs, &lsi->match, &lsi->actions);
++ build_lrouter_nat_defrag_and_lb(od, lsi->lflows, lsi->ports,
++ lsi->meter_groups, lsi->lbs, &lsi->match,
++ &lsi->actions);
+ }
+
+ /* Helper function to combine all lflow generation which is iterated by port.
+diff --git a/northd/ovn_northd.dl b/northd/ovn_northd.dl
+index 3afa80a3b..46da9a3a4 100644
+--- a/northd/ovn_northd.dl
++++ b/northd/ovn_northd.dl
+@@ -3309,7 +3309,8 @@ for (CheckLspIsUp[check_lsp_is_up]) {
+ ((lsp_is_up(lsp) or not check_lsp_is_up)
+ or lsp.__type == "router" or lsp.__type == "localport") and
+ lsp.__type != "external" and lsp.__type != "virtual" and
+- not lsp.addresses.contains("unknown"))
++ not lsp.addresses.contains("unknown") and
++ not sw.is_vlan_transparent)
+ {
+ var __match = "arp.tpa == ${addr.addr} && arp.op == 1" in
+ {
+@@ -3359,7 +3360,8 @@ for (SwitchPortIPv6Address(.port = &SwitchPort{.lsp = lsp, .json_name = json_nam
+ .ea = ea, .addr = addr)
+ if lsp.is_enabled() and
+ (lsp_is_up(lsp) or lsp.__type == "router" or lsp.__type == "localport") and
+- lsp.__type != "external" and lsp.__type != "virtual")
++ lsp.__type != "external" and lsp.__type != "virtual" and
++ not sw.is_vlan_transparent)
+ {
+ var __match = "nd_ns && ip6.dst == {${addr.addr}, ${addr.solicited_node()}} && nd.target == ${addr.addr}" in
+ var actions = "${if (lsp.__type == \"router\") \"nd_na_router\" else \"nd_na\"} { "
+@@ -5555,6 +5557,10 @@ for (rp in &RouterPort(.router = &Router{._uuid = lr_uuid, .options = lr_options
+ }
+ }
+
++relation VirtualLogicalPort(logical_port: Option)
++VirtualLogicalPort(Some{logical_port}) :-
++ lsp in &nb::Logical_Switch_Port(.name = logical_port, .__type = "virtual").
++
+ /* NAT rules are only valid on Gateway routers and routers with
+ * l3dgw_port (router has a port with "redirect-chassis"
+ * specified). */
+@@ -5649,7 +5655,7 @@ for (r in &Router(._uuid = lr_uuid,
+ } in
+ if (nat.nat.__type == "dnat" or nat.nat.__type == "dnat_and_snat") {
+ None = l3dgw_port in
+- var __match = "ip && ip4.dst == ${nat.nat.external_ip}" in
++ var __match = "ip && ${ipX}.dst == ${nat.nat.external_ip}" in
+ (var ext_ip_match, var ext_flow) = lrouter_nat_add_ext_ip_match(
+ r, nat, __match, ipX, true, mask) in
+ {
+@@ -5900,6 +5906,17 @@ for (r in &Router(._uuid = lr_uuid,
+ .actions = actions,
+ .external_ids = stage_hint(nat.nat._uuid));
+
++ for (VirtualLogicalPort(nat.nat.logical_port)) {
++ Some{var gwport} = l3dgw_port in
++ Flow(.logical_datapath = lr_uuid,
++ .stage = s_ROUTER_IN_GW_REDIRECT(),
++ .priority = 80,
++ .__match = "${ipX}.src == ${nat.nat.logical_ip} && "
++ "outport == ${json_string_escape(gwport.name)}",
++ .actions = "drop;",
++ .external_ids = stage_hint(nat.nat._uuid))
++ };
++
+ /* Egress Loopback table: For NAT on a distributed router.
+ * If packets in the egress pipeline on the distributed
+ * gateway port have ip.dst matching a NAT external IP, then
+@@ -5925,6 +5942,7 @@ for (r in &Router(._uuid = lr_uuid,
+ var actions =
+ "clone { ct_clear; "
+ "inport = outport; outport = \"\"; "
++ "eth.dst <-> eth.src; "
+ "flags = 0; flags.loopback = 1; " ++
+ regs.join("") ++
+ "${rEGBIT_EGRESS_LOOPBACK()} = 1; "
+diff --git a/tests/automake.mk b/tests/automake.mk
+index 742e5cff2..a8ec64212 100644
+--- a/tests/automake.mk
++++ b/tests/automake.mk
+@@ -34,6 +34,7 @@ TESTSUITE_AT = \
+ tests/ovn-performance.at \
+ tests/ovn-ofctrl-seqno.at \
+ tests/ovn-ipam.at \
++ tests/ovn-features.at \
+ tests/ovn-lflow-cache.at \
+ tests/ovn-ipsec.at
+
+@@ -207,6 +208,7 @@ $(srcdir)/package.m4: $(top_srcdir)/configure.ac
+
+ noinst_PROGRAMS += tests/ovstest
+ tests_ovstest_SOURCES = \
++ include/ovn/features.h \
+ tests/ovstest.c \
+ tests/ovstest.h \
+ tests/test-utils.c \
+@@ -218,6 +220,7 @@ tests_ovstest_SOURCES = \
+ controller/lflow-cache.h \
+ controller/ofctrl-seqno.c \
+ controller/ofctrl-seqno.h \
++ lib/test-ovn-features.c \
+ northd/test-ipam.c \
+ northd/ipam.c \
+ northd/ipam.h
+diff --git a/tests/ovn-controller.at b/tests/ovn-controller.at
+index 72c07b3fa..9c25193e8 100644
+--- a/tests/ovn-controller.at
++++ b/tests/ovn-controller.at
+@@ -151,23 +151,24 @@ sysid=$(ovs-vsctl get Open_vSwitch . external_ids:system-id)
+ check_datapath_type () {
+ datapath_type=$1
+ chassis_datapath_type=$(ovn-sbctl get Chassis ${sysid} other_config:datapath-type | sed -e 's/"//g') #"
+- test "${datapath_type}" = "${chassis_datapath_type}"
++ ovs_datapath_type=$(ovs-vsctl get Bridge br-int datapath-type)
++ test "${datapath_type}" = "${chassis_datapath_type}" && test "${datapath_type}" = "${ovs_datapath_type}"
+ }
+
+-OVS_WAIT_UNTIL([check_datapath_type ""])
++OVS_WAIT_UNTIL([check_datapath_type system])
+
+ ovs-vsctl set Bridge br-int datapath-type=foo
+ OVS_WAIT_UNTIL([check_datapath_type foo])
+
+ # Change "ovn-bridge-mappings" value. It should not change the "datapath-type".
+ ovs-vsctl set Open_vSwitch . external_ids:ovn-bridge-mappings=foo-mapping
+-check_datapath_type foo
++AT_CHECK([check_datapath_type foo])
+
+ ovs-vsctl set Bridge br-int datapath-type=bar
+ OVS_WAIT_UNTIL([check_datapath_type bar])
+
+ ovs-vsctl set Bridge br-int datapath-type=\"\"
+-OVS_WAIT_UNTIL([check_datapath_type ""])
++OVS_WAIT_UNTIL([check_datapath_type system])
+
+ # Set the datapath_type in external_ids:ovn-bridge-datapath-type.
+ ovs-vsctl set Open_vSwitch . external_ids:ovn-bridge-datapath-type=foo
+@@ -176,11 +177,9 @@ OVS_WAIT_UNTIL([check_datapath_type foo])
+ # Change the br-int's datapath type to bar.
+ # It should be reset to foo since ovn-bridge-datapath-type is configured.
+ ovs-vsctl set Bridge br-int datapath-type=bar
+-OVS_WAIT_UNTIL([test foo = `ovs-vsctl get Bridge br-int datapath-type`])
+ OVS_WAIT_UNTIL([check_datapath_type foo])
+
+ ovs-vsctl set Open_vSwitch . external_ids:ovn-bridge-datapath-type=foobar
+-OVS_WAIT_UNTIL([test foobar = `ovs-vsctl get Bridge br-int datapath-type`])
+ OVS_WAIT_UNTIL([check_datapath_type foobar])
+
+ expected_iface_types=$(ovs-vsctl get Open_vSwitch . iface_types | tr -d '[[]] ""')
+diff --git a/tests/ovn-features.at b/tests/ovn-features.at
+new file mode 100644
+index 000000000..36bd83055
+--- /dev/null
++++ b/tests/ovn-features.at
+@@ -0,0 +1,8 @@
++#
++# Unit tests for the lib/features.c module.
++#
++AT_BANNER([OVN unit tests - features])
++
++AT_SETUP([ovn -- unit test -- OVS feature detection tests])
++AT_CHECK([ovstest test-ovn-features run], [0], [])
++AT_CLEANUP
+diff --git a/tests/ovn.at b/tests/ovn.at
+index aa80a7c48..450445036 100644
+--- a/tests/ovn.at
++++ b/tests/ovn.at
+@@ -3169,6 +3169,118 @@ OVN_CLEANUP([hv-1],[hv-2])
+ AT_CLEANUP
+ ])
+
++OVN_FOR_EACH_NORTHD([
++AT_SETUP([ovn -- VLAN transparency, passthru=true, ARP responder disabled])
++ovn_start
++
++net_add net
++check ovs-vsctl add-br br-phys
++ovn_attach net br-phys 192.168.0.1
++
++check ovn-nbctl ls-add ls
++check ovn-nbctl --wait=sb add Logical-Switch ls other_config vlan-passthru=true
++
++for i in 1 2; do
++ check ovn-nbctl lsp-add ls lsp$i
++ check ovn-nbctl lsp-set-addresses lsp$i "f0:00:00:00:00:0$i 10.0.0.$i"
++done
++
++for i in 1 2; do
++ check ovs-vsctl add-port br-int vif$i -- set Interface vif$i external-ids:iface-id=lsp$i \
++ options:tx_pcap=vif$i-tx.pcap \
++ options:rxq_pcap=vif$i-rx.pcap \
++ ofport-request=$i
++done
++
++wait_for_ports_up
++
++ovn-sbctl dump-flows ls > lsflows
++AT_CAPTURE_FILE([lsflows])
++
++AT_CHECK([grep -w "ls_in_arp_rsp" lsflows | sort], [0], [dnl
++ table=16(ls_in_arp_rsp ), priority=0 , match=(1), action=(next;)
++])
++
++test_arp() {
++ local inport=$1 outport=$2 sha=$3 spa=$4 tpa=$5 reply_ha=$6
++ tag=8100fefe
++ local request=ffffffffffff${sha}${tag}08060001080006040001${sha}${spa}ffffffffffff${tpa}
++ ovs-appctl netdev-dummy/receive vif$inport $request
++ echo $request >> $outport.expected
++
++ local reply=${sha}${reply_ha}${tag}08060001080006040002${reply_ha}${tpa}${sha}${spa}
++ ovs-appctl netdev-dummy/receive vif$outport $reply
++ echo $reply >> $inport.expected
++}
++
++test_arp 1 2 f00000000001 0a000001 0a000002 f00000000002
++test_arp 2 1 f00000000002 0a000002 0a000001 f00000000001
++
++for i in 1 2; do
++ OVN_CHECK_PACKETS([vif$i-tx.pcap], [$i.expected])
++done
++
++AT_CLEANUP
++])
++
++OVN_FOR_EACH_NORTHD([
++AT_SETUP([ovn -- VLAN transparency, passthru=true, ND/NA responder disabled])
++ovn_start
++
++net_add net
++check ovs-vsctl add-br br-phys
++ovn_attach net br-phys 192.168.0.1
++
++check ovn-nbctl ls-add ls
++check ovn-nbctl --wait=sb add Logical-Switch ls other_config vlan-passthru=true
++
++for i in 1 2; do
++ check ovn-nbctl lsp-add ls lsp$i
++ check ovn-nbctl lsp-set-addresses lsp$i "f0:00:00:00:00:0$i fe00::$i"
++done
++
++for i in 1 2; do
++ check ovs-vsctl add-port br-int vif$i -- set Interface vif$i external-ids:iface-id=lsp$i \
++ options:tx_pcap=vif$i-tx.pcap \
++ options:rxq_pcap=vif$i-rx.pcap \
++ ofport-request=$i
++done
++
++wait_for_ports_up
++
++ovn-sbctl dump-flows ls > lsflows
++AT_CAPTURE_FILE([lsflows])
++
++AT_CHECK([grep -w "ls_in_arp_rsp" lsflows | sort], [0], [dnl
++ table=16(ls_in_arp_rsp ), priority=0 , match=(1), action=(next;)
++])
++
++test_nd_na() {
++ local inport=$1 outport=$2 sha=$3 spa=$4 tpa=$5 reply_ha=$6
++ tag=8100fefe
++ icmp_type=87
++ local request=ffffffffffff${sha}${tag}86dd6000000000183aff${spa}ff0200000000000000000001ff${tpa: -6}${icmp_type}007ea100000000${tpa}
++ ovs-appctl netdev-dummy/receive vif$inport $request
++ echo $request >> $outport.expected
++ echo $request
++
++ icmp_type=88
++ local reply=${sha}${reply_ha}${tag}86dd6000000000183aff${tpa}${spa}${icmp_type}003da540000000${tpa}
++ ovs-appctl netdev-dummy/receive vif$outport $reply
++ echo $reply >> $inport.expected
++ echo $reply
++}
++
++test_nd_na 1 2 f00000000001 fe000000000000000000000000000001 fe000000000000000000000000000002 f00000000002
++test_nd_na 2 1 f00000000002 fe000000000000000000000000000002 fe000000000000000000000000000001 f00000000001
++
++for i in 1 2; do
++ OVN_CHECK_PACKETS([vif$i-tx.pcap], [$i.expected])
++done
++
++AT_CLEANUP
++])
++
+ OVN_FOR_EACH_NORTHD([
+ AT_SETUP([ovn -- VLAN transparency, passthru=true, multiple hosts])
+ ovn_start
+@@ -11260,7 +11372,7 @@ ovn-nbctl lsp-add foo ln-foo
+ ovn-nbctl lsp-set-addresses ln-foo unknown
+ ovn-nbctl lsp-set-options ln-foo network_name=public
+ ovn-nbctl lsp-set-type ln-foo localnet
+-AT_CHECK([ovn-nbctl set Logical_Switch_Port ln-foo tag=2])
++check ovn-nbctl set Logical_Switch_Port ln-foo tag_request=2
+
+ # Create localnet port in alice
+ ovn-nbctl lsp-add alice ln-alice
+@@ -12024,6 +12136,91 @@ OVN_CLEANUP([hv1])
+ AT_CLEANUP
+ ])
+
++OVN_FOR_EACH_NORTHD([
++AT_SETUP([localport doesn't suppress ARP directed to external port])
++
++ovn_start
++net_add n1
++
++check ovs-vsctl add-br br-phys
++check ovs-vsctl set open . external-ids:ovn-bridge-mappings=phys:br-phys
++ovn_attach n1 br-phys 192.168.0.1
++
++check ovn-nbctl ls-add ls
++
++# create topology to allow to talk from localport through localnet to external port
++check ovn-nbctl lsp-add ls lp
++check ovn-nbctl lsp-set-addresses lp "00:00:00:00:00:01 10.0.0.1"
++check ovn-nbctl lsp-set-type lp localport
++check ovs-vsctl add-port br-int lp -- set Interface lp external-ids:iface-id=lp
++
++check ovn-nbctl --wait=sb ha-chassis-group-add hagrp
++check ovn-nbctl --wait=sb ha-chassis-group-add-chassis hagrp main 10
++check ovn-nbctl lsp-add ls lext
++check ovn-nbctl lsp-set-addresses lext "00:00:00:00:00:02 10.0.0.2"
++check ovn-nbctl lsp-set-type lext external
++hagrp_uuid=`ovn-nbctl --bare --columns _uuid find ha_chassis_group name=hagrp`
++check ovn-nbctl set logical_switch_port lext ha_chassis_group=$hagrp_uuid
++
++check ovn-nbctl lsp-add ls ln
++check ovn-nbctl lsp-set-addresses ln unknown
++check ovn-nbctl lsp-set-type ln localnet
++check ovn-nbctl lsp-set-options ln network_name=phys
++check ovn-nbctl --wait=hv sync
++
++# also create second external port AFTER localnet to check that order is irrelevant
++check ovn-nbctl lsp-add ls lext2
++check ovn-nbctl lsp-set-addresses lext2 "00:00:00:00:00:10 10.0.0.10"
++check ovn-nbctl lsp-set-type lext2 external
++check ovn-nbctl set logical_switch_port lext2 ha_chassis_group=$hagrp_uuid
++check ovn-nbctl --wait=hv sync
++
++# create and immediately delete an external port to later check that flows for
++# deleted ports are not left over in flow table
++check ovn-nbctl lsp-add ls lext-deleted
++check ovn-nbctl lsp-set-addresses lext-deleted "00:00:00:00:00:03 10.0.0.3"
++check ovn-nbctl lsp-set-type lext-deleted external
++check ovn-nbctl set logical_switch_port lext-deleted ha_chassis_group=$hagrp_uuid
++check ovn-nbctl --wait=hv sync
++check ovn-nbctl lsp-del lext-deleted
++check ovn-nbctl --wait=hv sync
++
++send_garp() {
++ local inport=$1 eth_src=$2 eth_dst=$3 spa=$4 tpa=$5
++ local request=${eth_dst}${eth_src}08060001080006040001${eth_src}${spa}${eth_dst}${tpa}
++ ovs-appctl netdev-dummy/receive $inport $request
++}
++
++spa=$(ip_to_hex 10 0 0 1)
++tpa=$(ip_to_hex 10 0 0 2)
++send_garp lp 000000000001 000000000002 $spa $tpa
++
++spa=$(ip_to_hex 10 0 0 1)
++tpa=$(ip_to_hex 10 0 0 10)
++send_garp lp 000000000001 000000000010 $spa $tpa
++
++spa=$(ip_to_hex 10 0 0 1)
++tpa=$(ip_to_hex 10 0 0 3)
++send_garp lp 000000000001 000000000003 $spa $tpa
++
++dnl external traffic from localport should be sent to localnet
++AT_CHECK([tcpdump -r main/br-phys_n1-tx.pcap arp[[24:4]]=0x0a000002 | wc -l],[0],[dnl
++1
++],[ignore])
++
++#dnl ...regardless of localnet / external ports creation order
++AT_CHECK([tcpdump -r main/br-phys_n1-tx.pcap arp[[24:4]]=0x0a00000a | wc -l],[0],[dnl
++1
++],[ignore])
++
++dnl traffic from localport should not be sent to deleted external port
++AT_CHECK([tcpdump -r main/br-phys_n1-tx.pcap arp[[24:4]]=0x0a000003 | wc -l],[0],[dnl
++0
++],[ignore])
++
++AT_CLEANUP
++])
++
+ OVN_FOR_EACH_NORTHD([
+ AT_SETUP([ovn -- 1 LR with HA distributed router gateway port])
+ ovn_start
+@@ -12668,7 +12865,7 @@ $PYTHON "$ovs_srcdir/utilities/ovs-pcap.in" hv2/br-phys_n1-tx.pcap | trim_zeros
+ AT_CHECK([grep $garp hv2_br_phys_tx | sort], [0], [])
+
+ # change localnet port tag.
+-AT_CHECK([ovn-nbctl set Logical_Switch_Port ln_port tag=2014])
++check ovn-nbctl set Logical_Switch_Port ln_port tag_request=2014
+
+ # wait for earlier changes to take effect
+ OVS_WAIT_UNTIL([test 1 = `as hv2 ovs-ofctl dump-flows br-int table=65 | \
+@@ -17172,6 +17369,16 @@ send_arp_reply() {
+ as hv$hv ovs-appctl netdev-dummy/receive hv${hv}-vif$inport $request
+ }
+
++send_icmp_packet() {
++ local inport=$1 hv=$2 eth_src=$3 eth_dst=$4 ipv4_src=$5 ipv4_dst=$6 ip_chksum=$7 data=$8
++ shift 8
++
++ local ip_ttl=ff
++ local ip_len=001c
++ local packet=${eth_dst}${eth_src}08004500${ip_len}00004000${ip_ttl}01${ip_chksum}${ipv4_src}${ipv4_dst}${data}
++ as hv$hv ovs-appctl netdev-dummy/receive hv${hv}-vif$inport $packet
++}
++
+ net_add n1
+
+ sim_add hv1
+@@ -17311,27 +17518,29 @@ logical_port=sw0-vir) = x])
+ as hv1
+ ovs-vsctl set interface hv1-vif3 external-ids:iface-id=sw0-vir
+
+-AT_CHECK([test x$(ovn-sbctl --bare --columns chassis find port_binding \
+-logical_port=sw0-vir) = x], [0], [])
++wait_column "" Port_Binding chassis logical_port=sw0-vir
+
+ # Cleanup hv1-vif3.
+ as hv1
+ ovs-vsctl del-port hv1-vif3
+
+-AT_CHECK([test x$(ovn-sbctl --bare --columns chassis find port_binding \
+-logical_port=sw0-vir) = x], [0], [])
++wait_column "" Port_Binding chassis logical_port=sw0-vir
+
+ check_virtual_offlows_present() {
+ hv=$1
+
+- AT_CHECK([as $hv ovs-ofctl dump-flows br-int table=44 | ofctl_strip_all | grep "priority=2000"], [0], [dnl
+- table=44, priority=2000,ip,metadata=0x1 actions=resubmit(,45)
+- table=44, priority=2000,ipv6,metadata=0x1 actions=resubmit(,45)
++ sw0_dp_key=$(printf "%x" $(fetch_column Datapath_Binding tunnel_key external_ids:name=sw0))
++ lr0_dp_key=$(printf "%x" $(fetch_column Datapath_Binding tunnel_key external_ids:name=lr0))
++ lr0_public_dp_key=$(printf "%x" $(fetch_column Port_Binding tunnel_key logical_port=lr0-public))
++
++ AT_CHECK_UNQUOTED([as $hv ovs-ofctl dump-flows br-int table=44 | ofctl_strip_all | grep "priority=2000"], [0], [dnl
++ table=44, priority=2000,ip,metadata=0x$sw0_dp_key actions=resubmit(,45)
++ table=44, priority=2000,ipv6,metadata=0x$sw0_dp_key actions=resubmit(,45)
+ ])
+
+- AT_CHECK([as $hv ovs-ofctl dump-flows br-int table=11 | ofctl_strip_all | \
++ AT_CHECK_UNQUOTED([as $hv ovs-ofctl dump-flows br-int table=11 | ofctl_strip_all | \
+ grep "priority=92" | grep 172.168.0.50], [0], [dnl
+- table=11, priority=92,arp,reg14=0x3,metadata=0x3,arp_tpa=172.168.0.50,arp_op=1 actions=move:NXM_OF_ETH_SRC[[]]->NXM_OF_ETH_DST[[]],mod_dl_src:10:54:00:00:00:10,load:0x2->NXM_OF_ARP_OP[[]],move:NXM_NX_ARP_SHA[[]]->NXM_NX_ARP_THA[[]],load:0x105400000010->NXM_NX_ARP_SHA[[]],push:NXM_OF_ARP_SPA[[]],push:NXM_OF_ARP_TPA[[]],pop:NXM_OF_ARP_SPA[[]],pop:NXM_OF_ARP_TPA[[]],move:NXM_NX_REG14[[]]->NXM_NX_REG15[[]],load:0x1->NXM_NX_REG10[[0]],resubmit(,37)
++ table=11, priority=92,arp,reg14=0x$lr0_public_dp_key,metadata=0x$lr0_dp_key,arp_tpa=172.168.0.50,arp_op=1 actions=move:NXM_OF_ETH_SRC[[]]->NXM_OF_ETH_DST[[]],mod_dl_src:10:54:00:00:00:10,load:0x2->NXM_OF_ARP_OP[[]],move:NXM_NX_ARP_SHA[[]]->NXM_NX_ARP_THA[[]],load:0x105400000010->NXM_NX_ARP_SHA[[]],push:NXM_OF_ARP_SPA[[]],push:NXM_OF_ARP_TPA[[]],pop:NXM_OF_ARP_SPA[[]],pop:NXM_OF_ARP_TPA[[]],move:NXM_NX_REG14[[]]->NXM_NX_REG15[[]],load:0x1->NXM_NX_REG10[[0]],resubmit(,37)
+ ])
+ }
+
+@@ -17384,6 +17593,22 @@ logical_port=sw0-vir) = x])
+ wait_row_count nb:Logical_Switch_Port 1 up=false name=sw0-vir
+
+ check ovn-nbctl --wait=hv sync
++
++# verify the traffic from virtual port is discarded if the port is not claimed
++AT_CHECK([grep lr_in_gw_redirect lr0-flows2 | grep "ip4.src == 10.0.0.10"], [0], [dnl
++ table=17(lr_in_gw_redirect ), priority=100 , match=(ip4.src == 10.0.0.10 && outport == "lr0-public" && is_chassis_resident("sw0-vir")), action=(eth.src = 10:54:00:00:00:10; reg1 = 172.168.0.50; next;)
++ table=17(lr_in_gw_redirect ), priority=80 , match=(ip4.src == 10.0.0.10 && outport == "lr0-public"), action=(drop;)
++])
++
++eth_src=505400000003
++eth_dst=00000000ff01
++ip_src=$(ip_to_hex 10 0 0 10)
++ip_dst=$(ip_to_hex 172 168 0 101)
++send_icmp_packet 1 1 $eth_src $eth_dst $ip_src $ip_dst c4c9 0000000000000000000000
++AT_CHECK([as hv1 ovs-ofctl dump-flows br-int | awk '/table=25, n_packets=1, n_bytes=45/{print $7" "$8}'],[0],[dnl
++priority=80,ip,reg15=0x3,metadata=0x3,nw_src=10.0.0.10 actions=drop
++])
++
+ # hv1 should remove the flow for the ACL with is_chassis_redirect check for sw0-vir.
+ check_virtual_offlows_not_present hv1
+
+@@ -23116,7 +23341,7 @@ AT_CHECK([
+ for hv in 1 2; do
+ grep table=15 hv${hv}flows | \
+ grep "priority=100" | \
+- grep -c "ct(commit,zone=NXM_NX_REG11\\[[0..15\\]],exec(move:NXM_OF_ETH_SRC\\[[\\]]->NXM_NX_CT_LABEL\\[[32..79\\]],load:0x[[0-9]]->NXM_NX_CT_LABEL\\[[80..95\\]]))"
++ grep -c "ct(commit,zone=NXM_NX_REG11\\[[0..15\\]],.*exec(move:NXM_OF_ETH_SRC\\[[\\]]->NXM_NX_CT_LABEL\\[[32..79\\]],load:0x[[0-9]]->NXM_NX_CT_LABEL\\[[80..95\\]]))"
+
+ grep table=22 hv${hv}flows | \
+ grep "priority=200" | \
+@@ -23241,7 +23466,7 @@ AT_CHECK([
+ for hv in 1 2; do
+ grep table=15 hv${hv}flows | \
+ grep "priority=100" | \
+- grep -c "ct(commit,zone=NXM_NX_REG11\\[[0..15\\]],exec(move:NXM_OF_ETH_SRC\\[[\\]]->NXM_NX_CT_LABEL\\[[32..79\\]],load:0x[[0-9]]->NXM_NX_CT_LABEL\\[[80..95\\]]))"
++ grep -c "ct(commit,zone=NXM_NX_REG11\\[[0..15\\]],.*exec(move:NXM_OF_ETH_SRC\\[[\\]]->NXM_NX_CT_LABEL\\[[32..79\\]],load:0x[[0-9]]->NXM_NX_CT_LABEL\\[[80..95\\]]))"
+
+ grep table=22 hv${hv}flows | \
+ grep "priority=200" | \
+@@ -26688,6 +26913,50 @@ OVN_CLEANUP([hv1])
+ AT_CLEANUP
+ ])
+
++# Tests that ACLs referencing port groups that include ports connected to
++# logical routers are correctly applied.
++OVN_FOR_EACH_NORTHD([
++AT_SETUP([ovn -- ACL with Port Group including router ports])
++ovn_start
++net_add n1
++
++sim_add hv1
++as hv1
++ovs-vsctl add-br br-phys
++ovn_attach n1 br-phys 192.168.0.1
++
++check ovn-nbctl \
++ -- lr-add lr \
++ -- ls-add ls \
++ -- lrp-add lr lrp_ls 00:00:00:00:00:01 42.42.42.1/24 \
++ -- lsp-add ls ls_lr \
++ -- lsp-set-addresses ls_lr router \
++ -- lsp-set-type ls_lr router \
++ -- lsp-set-options ls_lr router-port=lr_ls \
++ -- lsp-add ls vm1
++
++check ovn-nbctl pg-add pg ls_lr \
++ -- acl-add pg from-lport 1 'inport == @pg && ip4.dst == 42.42.42.42' drop
++
++check ovs-vsctl add-port br-int vm1 \
++ -- set interface vm1 external_ids:iface-id=vm1
++
++wait_for_ports_up
++check ovn-nbctl --wait=hv sync
++
++dp_key=$(fetch_column Datapath_Binding tunnel_key external_ids:name=ls)
++rtr_port_key=$(fetch_column Port_Binding tunnel_key logical_port=ls_lr)
++
++# Check that ovn-controller adds a flow to drop packets with dest IP
++# 42.42.42.42 coming from the router port.
++AT_CHECK([ovs-ofctl dump-flows br-int table=17 | grep "reg14=0x${rtr_port_key},metadata=0x${dp_key},nw_dst=42.42.42.42 actions=drop" -c], [0], [dnl
++1
++])
++
++OVN_CLEANUP([hv1])
++AT_CLEANUP
++])
++
+ OVN_FOR_EACH_NORTHD([
+ AT_SETUP([ovn -- Static route with discard nexthop])
+ ovn_start
+diff --git a/tests/system-common-macros.at b/tests/system-common-macros.at
+index c8fa6f03f..b742a2cb9 100644
+--- a/tests/system-common-macros.at
++++ b/tests/system-common-macros.at
+@@ -330,3 +330,7 @@ m4_define([OVS_CHECK_IPROUTE_ENCAP],
+ # OVS_CHECK_CT_CLEAR()
+ m4_define([OVS_CHECK_CT_CLEAR],
+ [AT_SKIP_IF([! grep -q "Datapath supports ct_clear action" ovs-vswitchd.log])])
++
++# OVS_CHECK_CT_ZERO_SNAT()
++m4_define([OVS_CHECK_CT_ZERO_SNAT],
++ [AT_SKIP_IF([! grep -q "Datapath supports ct_zero_snat" ovs-vswitchd.log])]))
+diff --git a/tests/system-ovn.at b/tests/system-ovn.at
+index 310bd3d5a..56cd26535 100644
+--- a/tests/system-ovn.at
++++ b/tests/system-ovn.at
+@@ -1348,7 +1348,7 @@ as ovn-nb
+ OVS_APP_EXIT_AND_WAIT([ovsdb-server])
+
+ as northd
+-OVS_APP_EXIT_AND_WAIT([ovn-northd])
++OVS_APP_EXIT_AND_WAIT([NORTHD_TYPE])
+
+ as
+ OVS_TRAFFIC_VSWITCHD_STOP(["/failed to query port patch-.*/d
+@@ -3121,7 +3121,7 @@ as ovn-nb
+ OVS_APP_EXIT_AND_WAIT([ovsdb-server])
+
+ as northd
+-OVS_APP_EXIT_AND_WAIT([ovn-northd])
++OVS_APP_EXIT_AND_WAIT([NORTHD_TYPE])
+
+ as
+ OVS_TRAFFIC_VSWITCHD_STOP(["/failed to query port patch-.*/d
+@@ -4577,7 +4577,7 @@ as ovn-nb
+ OVS_APP_EXIT_AND_WAIT([ovsdb-server])
+
+ as northd
+-OVS_APP_EXIT_AND_WAIT([ovn-northd])
++OVS_APP_EXIT_AND_WAIT([NORTHD_TYPE])
+
+ as
+ OVS_TRAFFIC_VSWITCHD_STOP(["/failed to query port patch-.*/d
+@@ -4663,7 +4663,7 @@ as ovn-nb
+ OVS_APP_EXIT_AND_WAIT([ovsdb-server])
+
+ as northd
+-OVS_APP_EXIT_AND_WAIT([ovn-northd])
++OVS_APP_EXIT_AND_WAIT([NORTHD_TYPE])
+
+ as
+ OVS_TRAFFIC_VSWITCHD_STOP(["/failed to query port patch-.*/d
+@@ -4903,7 +4903,7 @@ as ovn-nb
+ OVS_APP_EXIT_AND_WAIT([ovsdb-server])
+
+ as northd
+-OVS_APP_EXIT_AND_WAIT([ovn-northd])
++OVS_APP_EXIT_AND_WAIT([NORTHD_TYPE])
+
+ as
+ OVS_TRAFFIC_VSWITCHD_STOP(["/failed to query port patch-.*/d
+@@ -5287,7 +5287,7 @@ as ovn-nb
+ OVS_APP_EXIT_AND_WAIT([ovsdb-server])
+
+ as northd
+-OVS_APP_EXIT_AND_WAIT([ovn-northd])
++OVS_APP_EXIT_AND_WAIT([NORTHD_TYPE])
+
+ as
+ OVS_TRAFFIC_VSWITCHD_STOP(["/failed to query port patch-.*/d
+@@ -5296,6 +5296,196 @@ OVS_TRAFFIC_VSWITCHD_STOP(["/failed to query port patch-.*/d
+ AT_CLEANUP
+ ])
+
++OVN_FOR_EACH_NORTHD([
++AT_SETUP([ovn -- load-balancer and firewall tuple conflict IPv4])
++AT_SKIP_IF([test $HAVE_NC = no])
++AT_KEYWORDS([ovnlb])
++
++CHECK_CONNTRACK()
++CHECK_CONNTRACK_NAT()
++ovn_start
++OVS_TRAFFIC_VSWITCHD_START()
++OVS_CHECK_CT_ZERO_SNAT()
++ADD_BR([br-int])
++
++# Set external-ids in br-int needed for ovn-controller
++ovs-vsctl \
++ -- set Open_vSwitch . external-ids:system-id=hv1 \
++ -- set Open_vSwitch . external-ids:ovn-remote=unix:$ovs_base/ovn-sb/ovn-sb.sock \
++ -- set Open_vSwitch . external-ids:ovn-encap-type=geneve \
++ -- set Open_vSwitch . external-ids:ovn-encap-ip=169.0.0.1 \
++ -- set bridge br-int fail-mode=secure other-config:disable-in-band=true
++
++# Start ovn-controller
++start_daemon ovn-controller
++
++# Logical network:
++# 1 logical switch connetected to one logical router.
++# 2 VMs, one used as backend for a load balancer.
++
++check ovn-nbctl \
++ -- lr-add rtr \
++ -- lrp-add rtr rtr-ls 00:00:00:00:01:00 42.42.42.1/24 \
++ -- ls-add ls \
++ -- lsp-add ls ls-rtr \
++ -- lsp-set-addresses ls-rtr 00:00:00:00:01:00 \
++ -- lsp-set-type ls-rtr router \
++ -- lsp-set-options ls-rtr router-port=rtr-ls \
++ -- lsp-add ls vm1 -- lsp-set-addresses vm1 00:00:00:00:00:01 \
++ -- lsp-add ls vm2 -- lsp-set-addresses vm2 00:00:00:00:00:02 \
++ -- lb-add lb-test 66.66.66.66:666 42.42.42.2:4242 tcp \
++ -- ls-lb-add ls lb-test
++
++ADD_NAMESPACES(vm1)
++ADD_VETH(vm1, vm1, br-int, "42.42.42.2/24", "00:00:00:00:00:01", "42.42.42.1")
++
++ADD_NAMESPACES(vm2)
++ADD_VETH(vm2, vm2, br-int, "42.42.42.3/24", "00:00:00:00:00:02", "42.42.42.1")
++
++# Wait for ovn-controller to catch up.
++wait_for_ports_up
++check ovn-nbctl --wait=hv sync
++
++# Start IPv4 TCP server on vm1.
++NETNS_DAEMONIZE([vm1], [nc -k -l 42.42.42.2 4242], [nc-vm1.pid])
++
++# Make sure connecting to the VIP works.
++NS_CHECK_EXEC([vm2], [nc 66.66.66.66 666 -p 2000 -z])
++
++# Start IPv4 TCP connection to VIP from vm2.
++NS_CHECK_EXEC([vm2], [nc 66.66.66.66 666 -p 2001 -z])
++
++# Check conntrack. We expect two entries:
++# - one in vm1's zone (firewall)
++# - one in vm2's zone (dnat)
++AT_CHECK([ovs-appctl dpctl/dump-conntrack | grep 2001 | \
++grep "orig=.src=42\.42\.42\.3" | \
++sed -e 's/port=2001/port=/g' \
++ -e 's/sport=4242,dport=[[0-9]]\+/sport=4242,dport=/g' \
++ -e 's/state=[[0-9_A-Z]]*/state=/g' \
++ -e 's/zone=[[0-9]]*/zone=/' | sort], [0], [dnl
++tcp,orig=(src=42.42.42.3,dst=42.42.42.2,sport=,dport=4242),reply=(src=42.42.42.2,dst=42.42.42.3,sport=4242,dport=),zone=,protoinfo=(state=)
++tcp,orig=(src=42.42.42.3,dst=66.66.66.66,sport=,dport=666),reply=(src=42.42.42.2,dst=42.42.42.3,sport=4242,dport=),zone=,labels=0x2,protoinfo=(state=)
++])
++
++# Start IPv4 TCP connection to backend IP from vm2 which would require
++# additional source port translation to avoid a tuple conflict.
++NS_CHECK_EXEC([vm2], [nc 42.42.42.2 4242 -p 2001 -z])
++
++# Check conntrack. We expect three entries:
++# - one in vm1's zone (firewall) - reused from the previous connection.
++# - one in vm2's zone (dnat) - still in TIME_WAIT after the previous connection.
++# - one in vm2's zone (firewall + additional all-zero SNAT)
++AT_CHECK([ovs-appctl dpctl/dump-conntrack | grep 2001 | \
++grep "orig=.src=42\.42\.42\.3" | \
++sed -e 's/port=2001/port=/g' \
++ -e 's/sport=4242,dport=[[0-9]]\+/sport=4242,dport=/g' \
++ -e 's/state=[[0-9_A-Z]]*/state=/g' \
++ -e 's/zone=[[0-9]]*/zone=/' | sort], [0], [dnl
++tcp,orig=(src=42.42.42.3,dst=42.42.42.2,sport=,dport=4242),reply=(src=42.42.42.2,dst=42.42.42.3,sport=4242,dport=),zone=,protoinfo=(state=)
++tcp,orig=(src=42.42.42.3,dst=42.42.42.2,sport=,dport=4242),reply=(src=42.42.42.2,dst=42.42.42.3,sport=4242,dport=),zone=,protoinfo=(state=)
++tcp,orig=(src=42.42.42.3,dst=66.66.66.66,sport=,dport=666),reply=(src=42.42.42.2,dst=42.42.42.3,sport=4242,dport=),zone=,labels=0x2,protoinfo=(state=)
++])
++
++AT_CLEANUP
++])
++
++OVN_FOR_EACH_NORTHD([
++AT_SETUP([ovn -- load-balancer and firewall tuple conflict IPv6])
++AT_SKIP_IF([test $HAVE_NC = no])
++AT_KEYWORDS([ovnlb])
++
++CHECK_CONNTRACK()
++CHECK_CONNTRACK_NAT()
++ovn_start
++OVS_TRAFFIC_VSWITCHD_START()
++OVS_CHECK_CT_ZERO_SNAT()
++ADD_BR([br-int])
++
++# Set external-ids in br-int needed for ovn-controller
++ovs-vsctl \
++ -- set Open_vSwitch . external-ids:system-id=hv1 \
++ -- set Open_vSwitch . external-ids:ovn-remote=unix:$ovs_base/ovn-sb/ovn-sb.sock \
++ -- set Open_vSwitch . external-ids:ovn-encap-type=geneve \
++ -- set Open_vSwitch . external-ids:ovn-encap-ip=169.0.0.1 \
++ -- set bridge br-int fail-mode=secure other-config:disable-in-band=true
++
++# Start ovn-controller
++start_daemon ovn-controller
++
++# Logical network:
++# 1 logical switch connetected to one logical router.
++# 2 VMs, one used as backend for a load balancer.
++
++check ovn-nbctl \
++ -- lr-add rtr \
++ -- lrp-add rtr rtr-ls 00:00:00:00:01:00 4242::1/64 \
++ -- ls-add ls \
++ -- lsp-add ls ls-rtr \
++ -- lsp-set-addresses ls-rtr 00:00:00:00:01:00 \
++ -- lsp-set-type ls-rtr router \
++ -- lsp-set-options ls-rtr router-port=rtr-ls \
++ -- lsp-add ls vm1 -- lsp-set-addresses vm1 00:00:00:00:00:01 \
++ -- lsp-add ls vm2 -- lsp-set-addresses vm2 00:00:00:00:00:02 \
++ -- lb-add lb-test [[6666::1]]:666 [[4242::2]]:4242 tcp \
++ -- ls-lb-add ls lb-test
++
++ADD_NAMESPACES(vm1)
++ADD_VETH(vm1, vm1, br-int, "4242::2/64", "00:00:00:00:00:01", "4242::1")
++OVS_WAIT_UNTIL([test "$(ip netns exec vm1 ip a | grep 4242::2 | grep tentative)" = ""])
++
++ADD_NAMESPACES(vm2)
++ADD_VETH(vm2, vm2, br-int, "4242::3/64", "00:00:00:00:00:02", "4242::1")
++OVS_WAIT_UNTIL([test "$(ip netns exec vm2 ip a | grep 4242::3 | grep tentative)" = ""])
++
++# Wait for ovn-controller to catch up.
++wait_for_ports_up
++check ovn-nbctl --wait=hv sync
++
++# Start IPv6 TCP server on vm1.
++NETNS_DAEMONIZE([vm1], [nc -k -l 4242::2 4242], [nc-vm1.pid])
++
++# Make sure connecting to the VIP works.
++NS_CHECK_EXEC([vm2], [nc 6666::1 666 -p 2000 -z])
++
++# Start IPv6 TCP connection to VIP from vm2.
++NS_CHECK_EXEC([vm2], [nc 6666::1 666 -p 2001 -z])
++
++# Check conntrack. We expect two entries:
++# - one in vm1's zone (firewall)
++# - one in vm2's zone (dnat)
++AT_CHECK([ovs-appctl dpctl/dump-conntrack | grep 2001 | \
++grep "orig=.src=4242::3" | \
++sed -e 's/port=2001/port=/g' \
++ -e 's/sport=4242,dport=[[0-9]]\+/sport=4242,dport=/g' \
++ -e 's/state=[[0-9_A-Z]]*/state=/g' \
++ -e 's/zone=[[0-9]]*/zone=/' | sort], [0], [dnl
++tcp,orig=(src=4242::3,dst=4242::2,sport=,dport=4242),reply=(src=4242::2,dst=4242::3,sport=4242,dport=),zone=,protoinfo=(state=)
++tcp,orig=(src=4242::3,dst=6666::1,sport=,dport=666),reply=(src=4242::2,dst=4242::3,sport=4242,dport=),zone=,labels=0x2,protoinfo=(state=)
++])
++
++# Start IPv6 TCP connection to backend IP from vm2 which would require
++# additional source port translation to avoid a tuple conflict.
++NS_CHECK_EXEC([vm2], [nc 4242::2 4242 -p 2001 -z])
++
++# Check conntrack. We expect three entries:
++# - one in vm1's zone (firewall) - reused from the previous connection.
++# - one in vm2's zone (dnat) - still in TIME_WAIT after the previous connection.
++# - one in vm2's zone (firewall + additional all-zero SNAT)
++AT_CHECK([ovs-appctl dpctl/dump-conntrack | grep 2001 | \
++grep "orig=.src=4242::3" | \
++sed -e 's/port=2001/port=/g' \
++ -e 's/sport=4242,dport=[[0-9]]\+/sport=4242,dport=/g' \
++ -e 's/state=[[0-9_A-Z]]*/state=/g' \
++ -e 's/zone=[[0-9]]*/zone=/' | sort], [0], [dnl
++tcp,orig=(src=4242::3,dst=4242::2,sport=,dport=4242),reply=(src=4242::2,dst=4242::3,sport=4242,dport=),zone=,protoinfo=(state=)
++tcp,orig=(src=4242::3,dst=4242::2,sport=,dport=4242),reply=(src=4242::2,dst=4242::3,sport=4242,dport=),zone=,protoinfo=(state=)
++tcp,orig=(src=4242::3,dst=6666::1,sport=,dport=666),reply=(src=4242::2,dst=4242::3,sport=4242,dport=),zone=,labels=0x2,protoinfo=(state=)
++])
++
++AT_CLEANUP
++])
++
+ # When a lport is released on a chassis, ovn-controller was
+ # not clearing some of the flowss in the table 33 leading
+ # to packet drops if ct() is hit.
+@@ -5527,7 +5717,7 @@ as ovn-nb
+ OVS_APP_EXIT_AND_WAIT([ovsdb-server])
+
+ as northd
+-OVS_APP_EXIT_AND_WAIT([ovn-northd])
++OVS_APP_EXIT_AND_WAIT([NORTHD_TYPE])
+
+ as
+ OVS_TRAFFIC_VSWITCHD_STOP(["/failed to query port patch-.*/d
+@@ -5689,7 +5879,7 @@ as ovn-nb
+ OVS_APP_EXIT_AND_WAIT([ovsdb-server])
+
+ as northd
+-OVS_APP_EXIT_AND_WAIT([ovn-northd])
++OVS_APP_EXIT_AND_WAIT([NORTHD_TYPE])
+
+ as
+ OVS_TRAFFIC_VSWITCHD_STOP(["/failed to query port patch-.*/d
+@@ -5738,7 +5928,7 @@ as ovn-nb
+ OVS_APP_EXIT_AND_WAIT([ovsdb-server])
+
+ as northd
+-OVS_APP_EXIT_AND_WAIT([ovn-northd])
++OVS_APP_EXIT_AND_WAIT([NORTHD_TYPE])
+
+ as
+ OVS_TRAFFIC_VSWITCHD_STOP(["/failed to query port patch-.*/d
+@@ -5831,7 +6021,7 @@ as ovn-nb
+ OVS_APP_EXIT_AND_WAIT([ovsdb-server])
+
+ as northd
+-OVS_APP_EXIT_AND_WAIT([ovn-northd])
++OVS_APP_EXIT_AND_WAIT([NORTHD_TYPE])
+
+ as
+ OVS_TRAFFIC_VSWITCHD_STOP(["/.*error receiving.*/d
+@@ -5893,7 +6083,7 @@ as ovn-nb
+ OVS_APP_EXIT_AND_WAIT([ovsdb-server])
+
+ as northd
+-OVS_APP_EXIT_AND_WAIT([ovn-northd])
++OVS_APP_EXIT_AND_WAIT([NORTHD_TYPE])
+
+ as
+ OVS_TRAFFIC_VSWITCHD_STOP(["/.*error receiving.*/d
+@@ -6044,7 +6234,7 @@ as ovn-nb
+ OVS_APP_EXIT_AND_WAIT([ovsdb-server])
+
+ as northd
+-OVS_APP_EXIT_AND_WAIT([ovn-northd])
++OVS_APP_EXIT_AND_WAIT([NORTHD_TYPE])
+
+ as
+ OVS_TRAFFIC_VSWITCHD_STOP(["/.*error receiving.*/d
+@@ -6091,7 +6281,6 @@ check ovn-nbctl pg-add pg1 sw1-p1
+ check ovn-nbctl acl-add pg1 from-lport 1002 "ip" allow-related
+ check ovn-nbctl acl-add pg1 to-lport 1002 "ip" allow-related
+
+-
+ OVN_POPULATE_ARP
+ ovn-nbctl --wait=hv sync
+
+@@ -6179,5 +6368,117 @@ OVS_APP_EXIT_AND_WAIT([NORTHD_TYPE])
+ as
+ OVS_TRAFFIC_VSWITCHD_STOP(["/failed to query port patch-.*/d
+ /connection dropped.*/d"])
++
++AT_CLEANUP
++])
++
++OVN_FOR_EACH_NORTHD([
++AT_SETUP(ovn -- DNAT LR hairpin IPv4)
++AT_KEYWORDS(hairpin)
++
++ovn_start
++
++OVS_TRAFFIC_VSWITCHD_START()
++ADD_BR([br-int])
++
++# Set external-ids in br-int needed for ovn-controller
++ovs-vsctl \
++ -- set Open_vSwitch . external-ids:system-id=hv1 \
++ -- set Open_vSwitch . external-ids:ovn-remote=unix:$ovs_base/ovn-sb/ovn-sb.sock \
++ -- set Open_vSwitch . external-ids:ovn-encap-type=geneve \
++ -- set Open_vSwitch . external-ids:ovn-encap-ip=169.0.0.1 \
++ -- set bridge br-int fail-mode=secure other-config:disable-in-band=true
++
++start_daemon ovn-controller
++
++# Logical network:
++# Two VMs
++# * VM1 with IP address 192.168.100.5
++# * VM2 with IP address 192.168.100.6
++# The VMs connect to logical switch ls1.
++#
++# An external router with IP address 172.18.1.2. We simulate this with a network namespace.
++# There will be no traffic going here in this test.
++# The external router connects to logical switch ls-pub
++#
++# One logical router (lr1) connects to ls1 and ls-pub. The router port connected to ls-pub is
++# a gateway port.
++# * The subnet connected to ls1 is 192.168.100.0/24. The Router IP address is 192.168.100.1
++# * The subnet connected to ls-pub is 172.18.1.0/24. The Router IP address is 172.168.1.1
++# lr1 has the following attributes:
++# * It has a "default" static route that sends traffic out the gateway router port.
++# * It has a DNAT rule that translates 172.18.2.10 to 192.168.100.6 (VM2)
++#
++# In this test, we want to ensure that a ping from VM1 to IP address 172.18.2.10 reaches VM2.
++
++ovn-nbctl ls-add ls1
++ovn-nbctl lsp-add ls1 vm1 -- lsp-set-addresses vm1 "00:00:00:00:00:05 192.168.100.5"
++ovn-nbctl lsp-add ls1 vm2 -- lsp-set-addresses vm2 "00:00:00:00:00:06 192.168.100.6"
++
++ovn-nbctl ls-add ls-pub
++ovn-nbctl lsp-add ls-pub ext-router -- lsp-set-addresses ext-router "00:00:00:00:01:02 172.18.1.2"
++
++ovn-nbctl lr-add lr1
++ovn-nbctl lrp-add lr1 lr1-ls1 00:00:00:00:00:01 192.168.100.1/24
++ovn-nbctl lsp-add ls1 ls1-lr1 \
++ -- lsp-set-type ls1-lr1 router \
++ -- lsp-set-addresses ls1-lr1 00:00:00:00:00:01 \
++ -- lsp-set-options ls1-lr1 router-port=lr1-ls1
++
++ovn-nbctl lrp-add lr1 lr1-ls-pub 00:00:00:00:01:01 172.18.1.1/24
++ovn-nbctl lrp-set-gateway-chassis lr1-ls-pub hv1
++ovn-nbctl lsp-add ls-pub ls-pub-lr1 \
++ -- lsp-set-type ls-pub-lr1 router \
++ -- lsp-set-addresses ls-pub-lr1 00:00:00:00:01:01 \
++ -- lsp-set-options ls-pub-lr1 router-port=lr1-ls-pub
++
++ovn-nbctl lr-nat-add lr1 snat 172.18.1.1 192.168.100.0/24
++ovn-nbctl lr-nat-add lr1 dnat_and_snat 172.18.2.10 192.168.100.6
++ovn-nbctl lr-route-add lr1 0.0.0.0/0 172.18.1.2
++
++#ls1_uuid=$(fetch_column Port_Binding datapath logical_port=vm1)
++#ovn-sbctl create MAC_Binding ip=172.18.2.10 datapath=$ls1_uuid logical_port=vm2 mac="00:00:00:00:00:06"
++
++OVN_POPULATE_ARP
++ovn-nbctl --wait=hv sync
++
++ADD_NAMESPACES(vm1)
++ADD_VETH(vm1, vm1, br-int, "192.168.100.5/24", "00:00:00:00:00:05", \
++ "192.168.100.1")
++
++ADD_NAMESPACES(vm2)
++ADD_VETH(vm2, vm2, br-int, "192.168.100.6/24", "00:00:00:00:00:06", \
++ "192.168.100.1")
++
++ADD_NAMESPACES(ext-router)
++ADD_VETH(ext-router, ext-router, br-int, "172.18.1.2/24", "00:00:00:00:01:02", \
++ "172.18.1.1")
++
++# Let's take a quick look at the logical flows
++ovn-sbctl lflow-list
++
++# Let's check what ovn-trace says...
++ovn-trace ls1 'inport == "vm1" && eth.src == 00:00:00:00:00:05 && ip4.src == 192.168.100.5 && eth.dst == 00:00:00:00:00:01 && ip4.dst == 172.18.2.10 && ip.ttl == 32'
++
++# A ping from vm1 should hairpin in lr1 and successfully DNAT to vm2
++NS_CHECK_EXEC([vm1], [ping -q -c 3 -i 0.3 -w 2 172.18.2.10 | FORMAT_PING], \
++[0], [dnl
++3 packets transmitted, 3 received, 0% packet loss, time 0ms
++])
++kill $(pidof ovn-controller)
++
++as ovn-sb
++OVS_APP_EXIT_AND_WAIT([ovsdb-server])
++
++as ovn-nb
++OVS_APP_EXIT_AND_WAIT([ovsdb-server])
++
++as northd
++OVS_APP_EXIT_AND_WAIT([NORTHD_TYPE])
++
++as
++OVS_TRAFFIC_VSWITCHD_STOP(["/.*error receiving.*/d
++/.*terminating with signal 15.*/d"])
++
+ AT_CLEANUP
+ ])
+diff --git a/tests/testsuite.at b/tests/testsuite.at
+index ddc3f11d6..b716a1ad9 100644
+--- a/tests/testsuite.at
++++ b/tests/testsuite.at
+@@ -27,6 +27,7 @@ m4_include([tests/ovn.at])
+ m4_include([tests/ovn-performance.at])
+ m4_include([tests/ovn-northd.at])
+ m4_include([tests/ovn-nbctl.at])
++m4_include([tests/ovn-features.at])
+ m4_include([tests/ovn-lflow-cache.at])
+ m4_include([tests/ovn-ofctrl-seqno.at])
+ m4_include([tests/ovn-sbctl.at])
diff --git a/SPECS/ovn-2021.spec b/SPECS/ovn-2021.spec
index 4d13e3a..1936c34 100644
--- a/SPECS/ovn-2021.spec
+++ b/SPECS/ovn-2021.spec
@@ -50,8 +50,8 @@ Name: %{pkgname}
Summary: Open Virtual Network support
Group: System Environment/Daemons
URL: http://www.ovn.org/
-Version: 21.03.0
-Release: 40%{?commit0:.%{date}git%{shortcommit0}}%{?dist}
+Version: 21.06.0
+Release: 17%{?commit0:.%{date}git%{shortcommit0}}%{?dist}
Provides: openvswitch%{pkgver}-ovn-common = %{?epoch:%{epoch}:}%{version}-%{release}
Obsoletes: openvswitch%{pkgver}-ovn-common < 2.11.0-1
@@ -62,8 +62,8 @@ License: ASL 2.0 and LGPLv2+ and SISSL
# Always pull an upstream release, since this is what we rebase to.
Source: https://github.com/ovn-org/ovn/archive/v%{version}.tar.gz#/ovn-%{version}.tar.gz
-%define ovscommit ac85cdb38c1f33e7952bc4c0347d6c7873fb56a1
-%define ovsshortcommit ac85cdb
+%define ovscommit e6ad4d8d9c9273f226ec9a993b64fccfb50bdf4c
+%define ovsshortcommit e6ad4d8
Source10: https://github.com/openvswitch/ovs/archive/%{ovscommit}.tar.gz#/openvswitch-%{ovsshortcommit}.tar.gz
%define ovsdir ovs-%{ovscommit}
@@ -119,6 +119,7 @@ BuildRequires: procps-ng
%if 0%{?rhel} > 7 || 0%{?fedora}
BuildRequires: python3-pyOpenSSL
%endif
+BuildRequires: tcpdump
%if %{with libcapng}
BuildRequires: libcap-ng libcap-ng-devel
@@ -272,7 +273,7 @@ rm -f $RPM_BUILD_ROOT/%{_bindir}/ovn-docker-overlay-driver \
%if %{with check}
touch resolv.conf
export OVS_RESOLV_CONF=$(pwd)/resolv.conf
- if ! make check TESTSUITEFLAGS='%{_smp_mflags} -k ovn'; then
+ if ! make check TESTSUITEFLAGS='%{_smp_mflags}'; then
cat tests/testsuite.log
if ! make check TESTSUITEFLAGS='--recheck'; then
cat tests/testsuite.log
@@ -526,188 +527,73 @@ fi
%{_unitdir}/ovn-controller-vtep.service
%changelog
-* Thu May 27 2021 Dumitru Ceara - 21.03.0-40
-- if-status: Add OVS interface status management module. (#1952846)
- [Gerrit: 7272e3cb2866d65dfffda5fa0b6f062a086bcbcf]
- [Upstream: 5c3371922994c2d8a3610c9353902156db27d108]
-
-* Thu May 27 2021 Han Zhou - 21.03.0-39
-- ovn-controller.c: Remove extra local_lports_changed setting.
- [Gerrit: 8da45224b868d72afce47778a577ecb602fc8652]
- [Upstream: fa28ba6963650d5f8ed90865df3b81699a0a9b60]
-
-* Fri May 21 2021 Lorenzo Bianconi - 21.03.0-38
-- physical: do not forward traffic from localport to a localnet one
- [Gerrit: 89e27e959ab66592c3b716bec3e2e757161b2586]
- [Upstream: 96959e56d634c8d888af9e3ee340602593c7e4fa]
-
-* Thu May 20 2021 Lorenzo Bianconi - 21.03.0-37
-- ovn-nbctl: do not report an error for duplicated ecmp routes with --may-exist
- [Gerrit: 6d9a0af88ea8db8cc048fb913a4316e1fcbe32ad]
- [Upstream: f63b609a0610a8c9fcd13c38f3acd3526b8a8b0c]
-
-* Wed May 19 2021 Lorenzo Bianconi - 21.03.0-36
-- controller: fix physical flow update for localport
- [Gerrit: d040f88b0b0b5c5e42004996f128350e8fd420ca]
- [Upstream: 925ed83a6c8064fcb93250acd7493b59c034fa7b]
-
-* Tue May 18 2021 Mark Michelson - 21.03.0-35
-- expr: crush the result of a sorted OR expression.
- [Gerrit: 9cb6c3e6a3e7a21c07169cf631ebdcd94398025e]
- [Upstream: 3dab95aa5c8c6ea97395127dd2acf27487fd1cd5]
-
-* Fri May 14 2021 Numan Siddique - 21.03.0-34
-- Fix compilation error introduced in the previous commit.
- [Gerrit: 0cc5455f8ba4a57c153811b7e93bc4c4d0a4e97d]
- [Upstream: 0675bb01221b9b2d5b0b7b55715979204454cada]
-
-* Fri May 14 2021 Ilya Maximets - 21.03.0-33
-- northd: Combine router arp flows. (#1945415)
- [Gerrit: 0e60182997ecc3a6606772d78609040203dbe67e]
- [Upstream: ea6ee901ff9107a084bc830a8a38c4e0bd9f75f7]
-
-* Wed May 12 2021 Flavio Fernandes - 21.03.0-32
-- ovn-controller: Ensure br-int is using secure fail-mode (#1957025)
- [Gerrit: f56d885a7b4b9776a677a98ce758a177238e043f]
- [Upstream: 9cc334bc1a036a93cc1a541513d48f4df6933e9b]
-
-* Tue May 11 2021 Numan Siddique - 21.03.0-31
-- northd: Support flow offloading for logical switches with no ACLs. (#1955191)
- [Gerrit: 80f98b4a82f3a7cece6a33a5190b748b86cf868c]
- [Upstream: 127bf166ccf4a2509f670c48a00b0340039f20d2]
-
-* Tue May 11 2021 Numan Siddique - 21.03.0-30
-- northd: Provide the option to not use ct.inv in lflows.
- [Gerrit: 65cf2afebcdaa70941ba953b117da82e3f97f6fe]
- [Upstream: 3bb91366a6b0d60df5ce8f9c7f6427f7d37dfdd4]
-
-* Tue May 11 2021 Numan Siddique - 21.03.0-29
-- northd: Optimize ct nat for load balancer traffic.
- [Gerrit: 6e1a063b8e7f90ff7bfc95ec65347088d6ff8225]
- [Upstream: 0038579d192802fff03c3594e4f85dab4f7af2bd]
-
-* Fri Apr 30 2021 Dumitru Ceara - 21.03.0-28
-- binding: Don't reset expected seqno for interfaces already being installed. (#1946420)
- [Gerrit: 7126b44ee9a5d74d77d5b8326b2cf87630f92cec]
- [Upstream: 9c9b6b1d98e38d3d7a1dcf01741b095a6b9e8f0c]
-
-* Mon Apr 26 2021 Dumitru Ceara - 21.03.0-27
-- tests: Improve test "IGMP snoop/querier/relay". (#1941067)
- [Gerrit: 6100b80c194a9988f0967a2a232065eca5940fcd]
- [Upstream: f5a27f67d825eb306d3d39815293cb2191c89716]
-
-* Thu Apr 22 2021 Lorenzo Bianconi - 21.03.0-26
-- ovn-nbctl: dump next-hop for router policies
- [Gerrit: 6ebe05a0b9765f66a7f1350882c122cccd8f7304]
- [Upstream: d8b282b2852e2b0d4e44963b3b9ade8d28a0b899]
-
-* Wed Apr 21 2021 Dan Williams - 21.03.0-25
-- ovn-ctl: stop databases with stop_ovn_daemon() (#1944239)
- [Gerrit: dac053806bb3e061669fa449f2c704fbef9aff1d]
+* Tue Jul 27 2021 Numan Siddique - 21.06.0-17
+- ovn-controller: Split logical flow and physical flow processing. (#1986484)
+ [Gerrit: 6e1e90064ad1f5769fdc96e3b735ee236c30b7e2]
+ [Upstream: ceb12c9190a124c70bc938e8e1bea17612b498be]
+
+* Tue Jul 27 2021 Dumitru Ceara - 21.06.0-16
+- ovn.at: Fix "Symmetric IPv6 ECMP reply flows" test.
+ [Gerrit: 801f6c69c3bb45f981135ac6c197fdbd3f18118d]
+ [Upstream: 4e6c498068dc4fa9546d3661f78f0a42e99c74bb]
+
+* Tue Jul 27 2021 Dumitru Ceara - 21.06.0-15
+- ovn-controller: Handle DNAT/no-NAT conntrack tuple collisions. (#1939676)
+ [Gerrit: abfd62cb228b7d311ae7cae18adfe9cfcf68affc]
+ [Upstream: 58683a4271e6a885f2f2aea27f3df88e69a5c388]
+
+* Tue Jul 27 2021 Dumitru Ceara - 21.06.0-14
+- ovn-controller: Detect OVS datapath capabilities.
+ [Gerrit: ca1df0396e6e6eb016c3cad82db7c49cc05ec99a]
+ [Upstream: 56e2cd3a2f06b79b7d57cc8637fc0d258652aff5]
+
+* Mon Jul 26 2021 Lorenzo Bianconi - 21.06.0-13
+- northd: do not centralize traffic for unclaimed virtual ports
+ [Gerrit: 5b6826906a76779b527d72d1c49d211ce492e62e]
[Upstream: N/A]
-* Wed Apr 21 2021 Dan Williams - 21.03.0-24
-- ovn-lib: harmonize stop_ovn_daemon() with ovs-lib
- [Gerrit: 92d983366b8cbd513b1d69f729a920204a2c2973]
- [Upstream: N/A]
-
-* Wed Apr 21 2021 Numan Siddique - 21.03.0-23
-- tests: Fix frequent failure of "4 HV, 1 LS, 1 LR, packet test with HA distributed router gateway port:".
- [Gerrit: 56233e8004c1651add1211d8217cc23d9a74eea7]
- [Upstream: N/A]
-
-* Wed Apr 21 2021 Dumitru Ceara - 21.03.0-22
-- controller: Monitor all logical flows that refer to datapath groups. (#1947056)
- [Gerrit: ca662b1557adad61fba46ec249d5c5511738bcac]
- [Upstream: N/A]
-
-* Wed Apr 14 2021 Numan Siddique - 21.03.0-21
-- controller: Fix virtual lport I-P handling. (#1947823)
- [Gerrit: 0938c49138dac280bbc59148fe87dc0debed6f62]
- [Upstream: 1ad0a974b55dc6f31f7ea940e3b7d63368babb04]
-
-* Tue Apr 13 2021 Dumitru Ceara - 21.03.0-20
-- northd: Restore flows that recirculate packets in the router DNAT zone.
- [Gerrit: 4f5d3099d94c4860737546c4c1f6561f15dc7519]
- [Upstream: 82b4c619dd6c772a50d5403bf6d40aa4b4f7e38d]
-
-* Tue Apr 13 2021 Numan Siddique - 21.03.0-19
-- Merge "binding: Fix the crashes seen when port binding type changes." into ovn-2021
- [Gerrit: 5a15f57371ce318a382c0e3aa262e5dab790e168]
- [Upstream: N/A]
-
-* Tue Apr 13 2021 Lorenzo Bianconi - 21.03.0-18
-- Merge "northd: introduce per-lb lb_skip_snat option" into ovn-2021
- [Gerrit: 91e869a442511d649b123d42e69b789f5d3ee96e]
- [Upstream: N/A]
-
-* Mon Mar 29 2021 Lorenzo Bianconi - 21.03.0-17
-- northd: introduce lrouter_check_nat_entry routine
- [Gerrit: 0b3ca120ea30a2b1d34d5b55e9b7b953757da4dd]
- [Upstream: e02cd3d2001db87b92bd139eab533e69e0d48aee]
-
-* Mon Mar 29 2021 Lorenzo Bianconi - 21.03.0-16
-- northd: introduce build_lrouter_ingress_flow routine
- [Gerrit: db27342ff9355f45021013ff3aaf2ebafe71c47a]
- [Upstream: 0d16a8b64c5035529cbbbf245384618711024ae4]
-
-* Mon Mar 29 2021 Lorenzo Bianconi - 21.03.0-15
-- northd: introduce build_lrouter_out_snat_flow routine
- [Gerrit: 37b0ba144b999a089cd439023f90980651400616]
- [Upstream: 5e8fadf69161bc7e56b8f9f57124e5083b496b83]
-
-* Mon Mar 29 2021 Lorenzo Bianconi - 21.03.0-14
-- northd: introduce build_lrouter_out_undnat_flow routine
- [Gerrit: 68226a2a7dd698bb0097a87c2f2367c261f5f234]
- [Upstream: d8edf46f9e40791954d6bfc0231064e6e09252db]
-
-* Mon Mar 29 2021 Lorenzo Bianconi - 21.03.0-13
-- northd: introduce build_lrouter_in_dnat_flow routine
- [Gerrit: a5189c81e017ef54c5612072c025fb0b2b24f836]
- [Upstream: 225426081f8533e3d4df022b392105028f8bb37c]
-
-* Mon Mar 29 2021 Lorenzo Bianconi - 21.03.0-12
-- northd: introduce build_lrouter_in_unsnat_flow routine
- [Gerrit: 083b610a6cabac7bb5136e72a15e35ae8a95b6fe]
- [Upstream: fa91da7c9979d7b21b7a2a5557705c238cde97a0]
-
-* Mon Mar 29 2021 Lorenzo Bianconi - 21.03.0-11
-- northd: introduce build_lrouter_lb_flows routine
- [Gerrit: d737b0572331da6e54b7dfa6a941be48035e061f]
- [Upstream: 949e4319904938c1d83df2557c37b4bdfa6cbf25]
-
-* Mon Mar 29 2021 Lorenzo Bianconi - 21.03.0-10
-- northd: reduce indentation in build_lrouter_nat_defrag_and_lb
- [Gerrit: 2de341869c169b807c8ae0abb413d01e381e698b]
- [Upstream: 3ba84a110fd969d2c017070b5047f6df1129ac48]
-
-* Mon Mar 29 2021 Lorenzo Bianconi - 21.03.0-9
-- controller: introduce stats counters for ovn-controller incremental processing
- [Gerrit: eee1993bbf9812adbb3717a2868b87cb01124665]
- [Upstream: 0ddb8b2c979c1102d206b4f855eb5fbe1566768e]
-
-* Mon Mar 29 2021 Mark Michelson - 21.03.0-8
-- Add distgit syncing features.
- [Gerrit: fccf9eb76e6e89e51788a3c3decf02b3b7cf621d]
- [Upstream: N/A]
-
-* Thu Mar 25 2021 Michele Baldessari - 21.03.0-7
-- Fix connection string in case of changes in the ovndb-servers.ocf RA
- [Gerrit: aab170f85e7e271d199f62ca3f1d050531f124bf]
- [Upstream: 7f8bb3f2f77567d8fb30657ad5c3a9408692d6b5]
-
-* Thu Mar 25 2021 Daniel Alvarez Sanchez - 21.03.0-6
-- pinctrl: Don't send gARPs for localports (#1939470)
- [Gerrit: a49a1c229790e896d391fcc0a6ed07fbf977963f]
- [Upstream: 578238b36073256c524a4c2b6ed7521f73aa0019]
-
-* Mon Mar 15 2021 Ilya Maximets - 21.03.0-5
-- ci: Fix handling of python packages.
- [Gerrit: da028c72bdc7742b3065d1df95a3789fbc16b27a]
- [Upstream: 338a6ddb5ea1c89b48c484b0448a216a82225adc]
-
-* Fri Mar 12 2021 Mark Michelson - 21.03.0-4
-- Prepare for 21.03.1
- [Gerrit: 79d8c9d594f8cda5023d3e1fefbaf53e109de89b]
- [Upstream: N/A]
+* Thu Jul 15 2021 Ihar Hrachyshka - 21.06.0-12
+- Don't suppress localport traffic directed to external port (#1974062)
+ [Gerrit: 330e6e7400e1d5e4e6ef4fc6446eeaa945ac6a13]
+ [Upstream: 1148580290d0ace803f20aeaa0241dd51c100630]
+
+* Thu Jul 15 2021 Dumitru Ceara - 21.06.0-11
+- northd: Fix multicast table full comparison. (#1979870)
+ [Gerrit: 38f44df1b8a0ed1ebb86183de29d9e5c3423abdb]
+ [Upstream: 969c98d7297b526c704c6fd2a7138f584f9ad577]
+
+* Thu Jul 15 2021 Dumitru Ceara - 21.06.0-10
+- northd-ddlog: Fix IP family match for DNAT flows.
+ [Gerrit: 518ea2e15df2c77fc19afe74b68d616983638743]
+ [Upstream: 38467229905bdf09a3afa325eaa7a98183f44c72]
+
+* Thu Jul 15 2021 Ihar Hrachyshka - 21.06.0-9
+- Disable ARP/NA responders for vlan-passthru switches
+ [Gerrit: 56fbcfaf71d9a6df0b4cdee583c8d17ca7a82aab]
+ [Upstream: ea57f666f6eef1eb1d578f0e975baa14c5d23ec9]
+
+* Thu Jul 15 2021 Ben Pfaff - 21.06.0-8
+- tests: Fix "vlan traffic for external network with distributed..."
+ [Gerrit: ca26e77c4206a39ae6eab4a1d430ef04b726b640]
+ [Upstream: 5453cc8ca5535e3f33d1b191929e1a3c9ad30f20]
+
+* Thu Jul 15 2021 Dumitru Ceara - 21.06.0-7
+- ovn-controller: Fix port group I-P when they contain non-vif ports.
+ [Gerrit: 3c7f29238c889b248155cbb2c866c0adbf8b46c1]
+ [Upstream: 1bb32e0f8146d7f4fff84af5e3d2836ebe939e04]
+
+* Thu Jul 15 2021 Numan Siddique - 21.06.0-6
+- system-tests: Fix the test file.
+ [Gerrit: 85337cec3f2e5967a14afc5a552ac17dff6c15f6]
+ [Upstream: 9c1978300fa12709e01df07ed8403d8ad43f61fb]
+
+* Thu Jul 15 2021 Mark Michelson - 21.06.0-5
+- northd: Swap src and dst eth addresses in router egress loop.
+ [Gerrit: 86207fcac41b639d14de05e1b0965ad9d8293218]
+ [Upstream: 9be470dc69daf16ac1fbbe13cc295f46862226ad]
+
+* Tue Jun 29 2021 Han Zhou - 21.06.0-4
+- ovn.at: Fix test "virtual ports -- ovn-northd-ddlog".
+ [Gerrit: d61cfca4cadca33e598ba1a23cfdbe81a72d3501]
+ [Upstream: 9e3404e03620f183adc4f05db13bf5a38618b757]