diff --git a/AUTHORS.rst b/AUTHORS.rst
index 9f9b4fbaa..c243c5358 100644
--- a/AUTHORS.rst
+++ b/AUTHORS.rst
@@ -271,6 +271,7 @@ Miguel Angel Ajo majopela@redhat.com
Mijo Safradin mijo@linux.vnet.ibm.com
Mika Vaisanen mika.vaisanen@gmail.com
Minoru TAKAHASHI takahashi.minoru7@gmail.com
+Mohammad Heib mheib@redhat.com
Moshe Levi moshele@mellanox.com
Murphy McCauley murphy.mccauley@gmail.com
Natasha Gude
diff --git a/NEWS b/NEWS
index 839ab2cfe..237a9d8f6 100644
--- a/NEWS
+++ b/NEWS
@@ -1,3 +1,7 @@
+OVN v21.06.1 - xx xxx xxxx
+--------------------------
+ - Allow static routes without nexthops.
+
OVN v21.06.0 - 18 Jun 2021
-------------------------
- ovn-northd-ddlog: New implementation of northd, based on DDlog. This
diff --git a/TODO.rst b/TODO.rst
index c89fe203e..618ea4844 100644
--- a/TODO.rst
+++ b/TODO.rst
@@ -164,3 +164,9 @@ OVN To-do List
to find a way of determining if routing has already been executed (on a
different hypervisor) for the IP multicast packet being processed locally
in the router pipeline.
+
+* ovn-controller Incremental processing
+
+ * physical.c has a global simap -localvif_to_ofport which stores the
+ local OVS interfaces and the ofport numbers. Move this to the engine data
+ of the engine data node - ed_type_pflow_output.
diff --git a/configure.ac b/configure.ac
index 53034388a..a1cdcb7a9 100644
--- a/configure.ac
+++ b/configure.ac
@@ -13,7 +13,7 @@
# limitations under the License.
AC_PREREQ(2.63)
-AC_INIT(ovn, 21.06.0, bugs@openvswitch.org)
+AC_INIT(ovn, 21.06.1, bugs@openvswitch.org)
AC_CONFIG_MACRO_DIR([m4])
AC_CONFIG_AUX_DIR([build-aux])
AC_CONFIG_HEADERS([config.h])
diff --git a/controller/binding.c b/controller/binding.c
index 7fde0fdbb..ba558efdb 100644
--- a/controller/binding.c
+++ b/controller/binding.c
@@ -22,6 +22,7 @@
#include "patch.h"
#include "lib/bitmap.h"
+#include "lib/hmapx.h"
#include "openvswitch/poll-loop.h"
#include "lib/sset.h"
#include "lib/util.h"
@@ -108,6 +109,7 @@ add_local_datapath__(struct ovsdb_idl_index *sbrec_datapath_binding_by_key,
hmap_insert(local_datapaths, &ld->hmap_node, dp_key);
ld->datapath = datapath;
ld->localnet_port = NULL;
+ shash_init(&ld->external_ports);
ld->has_local_l3gateway = has_local_l3gateway;
if (tracked_datapaths) {
@@ -474,6 +476,18 @@ is_network_plugged(const struct sbrec_port_binding *binding_rec,
return network ? !!shash_find_data(bridge_mappings, network) : false;
}
+static void
+update_ld_external_ports(const struct sbrec_port_binding *binding_rec,
+ struct hmap *local_datapaths)
+{
+ struct local_datapath *ld = get_local_datapath(
+ local_datapaths, binding_rec->datapath->tunnel_key);
+ if (ld) {
+ shash_replace(&ld->external_ports, binding_rec->logical_port,
+ binding_rec);
+ }
+}
+
static void
update_ld_localnet_port(const struct sbrec_port_binding *binding_rec,
struct shash *bridge_mappings,
@@ -531,38 +545,41 @@ remove_local_lports(const char *iface_id, struct binding_ctx_out *b_ctx)
}
}
-/* Add a port binding ID (of the form "dp-key"_"port-key") to the set of local
- * lport IDs. Also track if the set has changed.
+/* Add a port binding to the set of locally relevant lports.
+ * Also track if the set has changed.
*/
static void
-update_local_lport_ids(const struct sbrec_port_binding *pb,
- struct binding_ctx_out *b_ctx)
+update_related_lport(const struct sbrec_port_binding *pb,
+ struct binding_ctx_out *b_ctx)
{
char buf[16];
get_unique_lport_key(pb->datapath->tunnel_key, pb->tunnel_key,
buf, sizeof(buf));
- if (sset_add(b_ctx->local_lport_ids, buf) != NULL) {
- b_ctx->local_lport_ids_changed = true;
+ if (sset_add(&b_ctx->related_lports->lport_ids, buf) != NULL) {
+ b_ctx->related_lports_changed = true;
if (b_ctx->tracked_dp_bindings) {
/* Add the 'pb' to the tracked_datapaths. */
tracked_binding_datapath_lport_add(pb, b_ctx->tracked_dp_bindings);
}
}
+ sset_add(&b_ctx->related_lports->lport_names, pb->logical_port);
}
-/* Remove a port binding id from the set of local lport IDs. Also track if
- * the set has changed.
+/* Remove a port binding id from the set of locally relevant lports.
+ * Also track if the set has changed.
*/
static void
-remove_local_lport_ids(const struct sbrec_port_binding *pb,
- struct binding_ctx_out *b_ctx)
+remove_related_lport(const struct sbrec_port_binding *pb,
+ struct binding_ctx_out *b_ctx)
{
char buf[16];
get_unique_lport_key(pb->datapath->tunnel_key, pb->tunnel_key,
buf, sizeof(buf));
- if (sset_find_and_delete(b_ctx->local_lport_ids, buf)) {
- b_ctx->local_lport_ids_changed = true;
+ sset_find_and_delete(&b_ctx->related_lports->lport_names,
+ pb->logical_port);
+ if (sset_find_and_delete(&b_ctx->related_lports->lport_ids, buf)) {
+ b_ctx->related_lports_changed = true;
if (b_ctx->tracked_dp_bindings) {
/* Add the 'pb' to the tracked_datapaths. */
@@ -678,6 +695,20 @@ static struct binding_lport *binding_lport_check_and_cleanup(
static char *get_lport_type_str(enum en_lport_type lport_type);
+void
+related_lports_init(struct related_lports *rp)
+{
+ sset_init(&rp->lport_names);
+ sset_init(&rp->lport_ids);
+}
+
+void
+related_lports_destroy(struct related_lports *rp)
+{
+ sset_destroy(&rp->lport_names);
+ sset_destroy(&rp->lport_ids);
+}
+
void
local_binding_data_init(struct local_binding_data *lbinding_data)
{
@@ -1172,7 +1203,7 @@ release_binding_lport(const struct sbrec_chassis *chassis_rec,
struct binding_ctx_out *b_ctx_out)
{
if (is_binding_lport_this_chassis(b_lport, chassis_rec)) {
- remove_local_lport_ids(b_lport->pb, b_ctx_out);
+ remove_related_lport(b_lport->pb, b_ctx_out);
if (!release_lport(b_lport->pb, sb_readonly,
b_ctx_out->tracked_dp_bindings,
b_ctx_out->if_mgr)) {
@@ -1214,7 +1245,7 @@ consider_vif_lport_(const struct sbrec_port_binding *pb,
pb->datapath, false,
b_ctx_out->local_datapaths,
b_ctx_out->tracked_dp_bindings);
- update_local_lport_ids(pb, b_ctx_out);
+ update_related_lport(pb, b_ctx_out);
update_local_lports(pb->logical_port, b_ctx_out);
if (b_lport->lbinding->iface && qos_map && b_ctx_in->ovs_idl_txn) {
get_qos_params(pb, qos_map);
@@ -1405,7 +1436,7 @@ consider_virtual_lport(const struct sbrec_port_binding *pb,
* its entry from the local_lport_ids if present. This is required
* when a virtual port moves from one chassis to other.*/
if (!virtual_b_lport) {
- remove_local_lport_ids(pb, b_ctx_out);
+ remove_related_lport(pb, b_ctx_out);
}
return true;
@@ -1430,7 +1461,7 @@ consider_nonvif_lport_(const struct sbrec_port_binding *pb,
b_ctx_out->local_datapaths,
b_ctx_out->tracked_dp_bindings);
- update_local_lport_ids(pb, b_ctx_out);
+ update_related_lport(pb, b_ctx_out);
return claim_lport(pb, NULL, b_ctx_in->chassis_rec, NULL,
!b_ctx_in->ovnsb_idl_txn, false,
b_ctx_out->tracked_dp_bindings,
@@ -1482,7 +1513,7 @@ consider_localnet_lport(const struct sbrec_port_binding *pb,
get_qos_params(pb, qos_map);
}
- update_local_lport_ids(pb, b_ctx_out);
+ update_related_lport(pb, b_ctx_out);
}
static bool
@@ -1512,7 +1543,7 @@ consider_ha_lport(const struct sbrec_port_binding *pb,
pb->datapath, false,
b_ctx_out->local_datapaths,
b_ctx_out->tracked_dp_bindings);
- update_local_lport_ids(pb, b_ctx_out);
+ update_related_lport(pb, b_ctx_out);
}
return consider_nonvif_lport_(pb, our_chassis, false, b_ctx_in, b_ctx_out);
@@ -1614,8 +1645,9 @@ binding_run(struct binding_ctx_in *b_ctx_in, struct binding_ctx_out *b_ctx_out)
!sset_is_empty(b_ctx_out->egress_ifaces) ? &qos_map : NULL;
struct ovs_list localnet_lports = OVS_LIST_INITIALIZER(&localnet_lports);
+ struct ovs_list external_lports = OVS_LIST_INITIALIZER(&external_lports);
- struct localnet_lport {
+ struct lport {
struct ovs_list list_node;
const struct sbrec_port_binding *pb;
};
@@ -1634,7 +1666,7 @@ binding_run(struct binding_ctx_in *b_ctx_in, struct binding_ctx_out *b_ctx_out)
case LP_PATCH:
case LP_LOCALPORT:
case LP_VTEP:
- update_local_lport_ids(pb, b_ctx_out);
+ update_related_lport(pb, b_ctx_out);
break;
case LP_VIF:
@@ -1663,11 +1695,14 @@ binding_run(struct binding_ctx_in *b_ctx_in, struct binding_ctx_out *b_ctx_out)
case LP_EXTERNAL:
consider_external_lport(pb, b_ctx_in, b_ctx_out);
+ struct lport *ext_lport = xmalloc(sizeof *ext_lport);
+ ext_lport->pb = pb;
+ ovs_list_push_back(&external_lports, &ext_lport->list_node);
break;
case LP_LOCALNET: {
consider_localnet_lport(pb, b_ctx_in, b_ctx_out, &qos_map);
- struct localnet_lport *lnet_lport = xmalloc(sizeof *lnet_lport);
+ struct lport *lnet_lport = xmalloc(sizeof *lnet_lport);
lnet_lport->pb = pb;
ovs_list_push_back(&localnet_lports, &lnet_lport->list_node);
break;
@@ -1694,7 +1729,7 @@ binding_run(struct binding_ctx_in *b_ctx_in, struct binding_ctx_out *b_ctx_out)
/* Run through each localnet lport list to see if it is a localnet port
* on local datapaths discovered from above loop, and update the
* corresponding local datapath accordingly. */
- struct localnet_lport *lnet_lport;
+ struct lport *lnet_lport;
LIST_FOR_EACH_POP (lnet_lport, list_node, &localnet_lports) {
update_ld_localnet_port(lnet_lport->pb, &bridge_mappings,
b_ctx_out->egress_ifaces,
@@ -1702,6 +1737,15 @@ binding_run(struct binding_ctx_in *b_ctx_in, struct binding_ctx_out *b_ctx_out)
free(lnet_lport);
}
+ /* Run through external lport list to see if these are external ports
+ * on local datapaths discovered from above loop, and update the
+ * corresponding local datapath accordingly. */
+ struct lport *ext_lport;
+ LIST_FOR_EACH_POP (ext_lport, list_node, &external_lports) {
+ update_ld_external_ports(ext_lport->pb, b_ctx_out->local_datapaths);
+ free(ext_lport);
+ }
+
shash_destroy(&bridge_mappings);
if (!sset_is_empty(b_ctx_out->egress_ifaces)
@@ -1895,7 +1939,7 @@ remove_pb_from_local_datapath(const struct sbrec_port_binding *pb,
struct binding_ctx_out *b_ctx_out,
struct local_datapath *ld)
{
- remove_local_lport_ids(pb, b_ctx_out);
+ remove_related_lport(pb, b_ctx_out);
if (!strcmp(pb->type, "patch") ||
!strcmp(pb->type, "l3gateway")) {
remove_local_datapath_peer_port(pb, ld, b_ctx_out->local_datapaths);
@@ -1904,6 +1948,8 @@ remove_pb_from_local_datapath(const struct sbrec_port_binding *pb,
pb->logical_port)) {
ld->localnet_port = NULL;
}
+ } else if (!strcmp(pb->type, "external")) {
+ shash_find_and_delete(&ld->external_ports, pb->logical_port);
}
if (!strcmp(pb->type, "l3gateway")) {
@@ -2407,6 +2453,9 @@ binding_handle_port_binding_changes(struct binding_ctx_in *b_ctx_in,
shash_add(&deleted_virtual_pbs, pb->logical_port, pb);
} else {
shash_add(&deleted_other_pbs, pb->logical_port, pb);
+ if (lport_type == LP_EXTERNAL) {
+ hmapx_add(b_ctx_out->extport_updated_datapaths, pb->datapath);
+ }
}
}
@@ -2502,7 +2551,7 @@ delete_done:
case LP_PATCH:
case LP_LOCALPORT:
case LP_VTEP:
- update_local_lport_ids(pb, b_ctx_out);
+ update_related_lport(pb, b_ctx_out);
if (lport_type == LP_PATCH) {
if (!ld) {
/* If 'ld' for this lport is not present, then check if
@@ -2561,6 +2610,8 @@ delete_done:
case LP_EXTERNAL:
handled = consider_external_lport(pb, b_ctx_in, b_ctx_out);
+ update_ld_external_ports(pb, b_ctx_out->local_datapaths);
+ hmapx_add(b_ctx_out->extport_updated_datapaths, pb->datapath);
break;
case LP_LOCALNET: {
@@ -2926,23 +2977,3 @@ cleanup:
return b_lport;
}
-
-struct sset *
-binding_collect_local_binding_lports(struct local_binding_data *lbinding_data)
-{
- struct sset *lports = xzalloc(sizeof *lports);
- sset_init(lports);
- struct shash_node *shash_node;
- SHASH_FOR_EACH (shash_node, &lbinding_data->lports) {
- struct binding_lport *b_lport = shash_node->data;
- sset_add(lports, b_lport->name);
- }
- return lports;
-}
-
-void
-binding_destroy_local_binding_lports(struct sset *lports)
-{
- sset_destroy(lports);
- free(lports);
-}
diff --git a/controller/binding.h b/controller/binding.h
index 8f3289476..8fd54092e 100644
--- a/controller/binding.h
+++ b/controller/binding.h
@@ -22,6 +22,7 @@
#include "openvswitch/hmap.h"
#include "openvswitch/uuid.h"
#include "openvswitch/list.h"
+#include "sset.h"
struct hmap;
struct ovsdb_idl;
@@ -56,6 +57,19 @@ struct binding_ctx_in {
const struct ovsrec_interface_table *iface_table;
};
+/* Locally relevant port bindings, e.g., VIFs that might be bound locally,
+ * patch ports.
+ */
+struct related_lports {
+ struct sset lport_names; /* Set of port names. */
+ struct sset lport_ids; /* Set of <datapath-tunnel-key>_<port-tunnel-key>
+ * IDs for fast lookup.
+ */
+};
+
+void related_lports_init(struct related_lports *);
+void related_lports_destroy(struct related_lports *);
+
struct binding_ctx_out {
struct hmap *local_datapaths;
struct local_binding_data *lbinding_data;
@@ -65,11 +79,9 @@ struct binding_ctx_out {
/* Track if local_lports have been updated. */
bool local_lports_changed;
- /* sset of local lport ids in the format
- * <datapath-tunnel-key>_<port-tunnel-key>. */
- struct sset *local_lport_ids;
- /* Track if local_lport_ids has been updated. */
- bool local_lport_ids_changed;
+ /* Port bindings that are relevant to the local chassis. */
+ struct related_lports *related_lports;
+ bool related_lports_changed;
/* Track if non-vif port bindings (e.g., patch, external) have been
* added/deleted.
@@ -88,6 +100,8 @@ struct binding_ctx_out {
struct hmap *tracked_dp_bindings;
struct if_status_mgr *if_mgr;
+
+ struct hmapx *extport_updated_datapaths;
};
struct local_binding_data {
@@ -133,13 +147,4 @@ bool binding_handle_port_binding_changes(struct binding_ctx_in *,
void binding_tracked_dp_destroy(struct hmap *tracked_datapaths);
void binding_dump_local_bindings(struct local_binding_data *, struct ds *);
-
-/* Generates a sset of lport names from local_binding_data.
- * Note: the caller is responsible for destroying and freeing the returned
- * sset, by calling binding_detroy_local_binding_lports(). */
-struct sset *binding_collect_local_binding_lports(struct local_binding_data *);
-
-/* Destroy and free the lports sset returned by
- * binding_collect_local_binding_lports(). */
-void binding_destroy_local_binding_lports(struct sset *lports);
#endif /* controller/binding.h */
diff --git a/controller/lflow.c b/controller/lflow.c
index 680b8cca1..4270d0a33 100644
--- a/controller/lflow.c
+++ b/controller/lflow.c
@@ -611,7 +611,7 @@ add_matches_to_flow_table(const struct sbrec_logical_flow *lflow,
get_unique_lport_key(dp_id, port_id, buf, sizeof(buf));
lflow_resource_add(l_ctx_out->lfrr, REF_TYPE_PORTBINDING, buf,
&lflow->header_.uuid);
- if (!sset_contains(l_ctx_in->local_lport_ids, buf)) {
+ if (!sset_contains(l_ctx_in->related_lport_ids, buf)) {
VLOG_DBG("lflow "UUID_FMT
" port %s in match is not local, skip",
UUID_ARGS(&lflow->header_.uuid),
diff --git a/controller/lflow.h b/controller/lflow.h
index 3c929d8a6..076b05beb 100644
--- a/controller/lflow.h
+++ b/controller/lflow.h
@@ -143,7 +143,7 @@ struct lflow_ctx_in {
const struct shash *addr_sets;
const struct shash *port_groups;
const struct sset *active_tunnels;
- const struct sset *local_lport_ids;
+ const struct sset *related_lport_ids;
};
struct lflow_ctx_out {
diff --git a/controller/ofctrl.c b/controller/ofctrl.c
index c29c3d180..053631590 100644
--- a/controller/ofctrl.c
+++ b/controller/ofctrl.c
@@ -173,7 +173,7 @@ struct sb_flow_ref {
struct uuid sb_uuid;
};
-/* A installed flow, in static variable installed_flows.
+/* An installed flow, in static variable installed_lflows/installed_pflows.
*
* Installed flows are updated in ofctrl_put for maintaining the flow
* installation to OVS. They are updated according to desired flows: either by
@@ -234,7 +234,7 @@ static struct desired_flow *desired_flow_lookup_conjunctive(
static void desired_flow_destroy(struct desired_flow *);
static struct installed_flow *installed_flow_lookup(
- const struct ovn_flow *target);
+ const struct ovn_flow *target, struct hmap *installed_flows);
static void installed_flow_destroy(struct installed_flow *);
static struct installed_flow *installed_flow_dup(struct desired_flow *);
static struct desired_flow *installed_flow_get_active(struct installed_flow *);
@@ -302,9 +302,12 @@ static ovs_be32 xid, xid2;
* zero, to avoid unbounded buffering. */
static struct rconn_packet_counter *tx_counter;
-/* Flow table of "struct ovn_flow"s, that holds the flow table currently
- * installed in the switch. */
-static struct hmap installed_flows;
+/* Flow table of "struct ovn_flow"s, that holds the logical flow table
+ * currently installed in the switch. */
+static struct hmap installed_lflows;
+/* Flow table of "struct ovn_flow"s, that holds the physical flow table
+ * currently installed in the switch. */
+static struct hmap installed_pflows;
/* A reference to the group_table. */
static struct ovn_extend_table *groups;
@@ -343,7 +346,8 @@ ofctrl_init(struct ovn_extend_table *group_table,
swconn = rconn_create(inactivity_probe_interval, 0,
DSCP_DEFAULT, 1 << OFP15_VERSION);
tx_counter = rconn_packet_counter_create();
- hmap_init(&installed_flows);
+ hmap_init(&installed_lflows);
+ hmap_init(&installed_pflows);
ovs_list_init(&flow_updates);
ovn_init_symtab(&symtab);
groups = group_table;
@@ -1426,11 +1430,12 @@ desired_flow_lookup_conjunctive(struct ovn_desired_flow_table *flow_table,
/* Finds and returns an installed_flow in installed_flows whose key is
* identical to 'target''s key, or NULL if there is none. */
static struct installed_flow *
-installed_flow_lookup(const struct ovn_flow *target)
+installed_flow_lookup(const struct ovn_flow *target,
+ struct hmap *installed_flows)
{
struct installed_flow *i;
HMAP_FOR_EACH_WITH_HASH (i, match_hmap_node, target->hash,
- &installed_flows) {
+ installed_flows) {
struct ovn_flow *f = &i->flow;
if (f->table_id == target->table_id
&& f->priority == target->priority
@@ -1542,8 +1547,14 @@ static void
ovn_installed_flow_table_clear(void)
{
struct installed_flow *f, *next;
- HMAP_FOR_EACH_SAFE (f, next, match_hmap_node, &installed_flows) {
- hmap_remove(&installed_flows, &f->match_hmap_node);
+ HMAP_FOR_EACH_SAFE (f, next, match_hmap_node, &installed_lflows) {
+ hmap_remove(&installed_lflows, &f->match_hmap_node);
+ unlink_all_refs_for_installed_flow(f);
+ installed_flow_destroy(f);
+ }
+
+ HMAP_FOR_EACH_SAFE (f, next, match_hmap_node, &installed_pflows) {
+ hmap_remove(&installed_pflows, &f->match_hmap_node);
unlink_all_refs_for_installed_flow(f);
installed_flow_destroy(f);
}
@@ -1553,7 +1564,8 @@ static void
ovn_installed_flow_table_destroy(void)
{
ovn_installed_flow_table_clear();
- hmap_destroy(&installed_flows);
+ hmap_destroy(&installed_lflows);
+ hmap_destroy(&installed_pflows);
}
/* Flow table update. */
@@ -1829,6 +1841,7 @@ installed_flow_del(struct ovn_flow *i,
static void
update_installed_flows_by_compare(struct ovn_desired_flow_table *flow_table,
struct ofputil_bundle_ctrl_msg *bc,
+ struct hmap *installed_flows,
struct ovs_list *msgs)
{
ovs_assert(ovs_list_is_empty(&flow_table->tracked_flows));
@@ -1836,7 +1849,7 @@ update_installed_flows_by_compare(struct ovn_desired_flow_table *flow_table,
* longer desired, delete them; if any of them should have different
* actions, update them. */
struct installed_flow *i, *next;
- HMAP_FOR_EACH_SAFE (i, next, match_hmap_node, &installed_flows) {
+ HMAP_FOR_EACH_SAFE (i, next, match_hmap_node, installed_flows) {
unlink_all_refs_for_installed_flow(i);
struct desired_flow *d = desired_flow_lookup(flow_table, &i->flow);
if (!d) {
@@ -1845,7 +1858,7 @@ update_installed_flows_by_compare(struct ovn_desired_flow_table *flow_table,
installed_flow_del(&i->flow, bc, msgs);
ovn_flow_log(&i->flow, "removing installed");
- hmap_remove(&installed_flows, &i->match_hmap_node);
+ hmap_remove(installed_flows, &i->match_hmap_node);
installed_flow_destroy(i);
} else {
if (!ofpacts_equal(i->flow.ofpacts, i->flow.ofpacts_len,
@@ -1863,14 +1876,14 @@ update_installed_flows_by_compare(struct ovn_desired_flow_table *flow_table,
* in the installed flow table. */
struct desired_flow *d;
HMAP_FOR_EACH (d, match_hmap_node, &flow_table->match_flow_table) {
- i = installed_flow_lookup(&d->flow);
+ i = installed_flow_lookup(&d->flow, installed_flows);
if (!i) {
ovn_flow_log(&d->flow, "adding installed");
installed_flow_add(&d->flow, bc, msgs);
/* Copy 'd' from 'flow_table' to installed_flows. */
i = installed_flow_dup(d);
- hmap_insert(&installed_flows, &i->match_hmap_node, i->flow.hash);
+ hmap_insert(installed_flows, &i->match_hmap_node, i->flow.hash);
link_installed_to_desired(i, d);
} else if (!d->installed_flow) {
/* This is a desired_flow that conflicts with one installed
@@ -1961,6 +1974,7 @@ merge_tracked_flows(struct ovn_desired_flow_table *flow_table)
static void
update_installed_flows_by_track(struct ovn_desired_flow_table *flow_table,
struct ofputil_bundle_ctrl_msg *bc,
+ struct hmap *installed_flows,
struct ovs_list *msgs)
{
merge_tracked_flows(flow_table);
@@ -1979,7 +1993,7 @@ update_installed_flows_by_track(struct ovn_desired_flow_table *flow_table,
installed_flow_del(&i->flow, bc, msgs);
ovn_flow_log(&i->flow, "removing installed (tracked)");
- hmap_remove(&installed_flows, &i->match_hmap_node);
+ hmap_remove(installed_flows, &i->match_hmap_node);
installed_flow_destroy(i);
} else if (was_active) {
/* There are other desired flow(s) referencing this
@@ -1993,7 +2007,8 @@ update_installed_flows_by_track(struct ovn_desired_flow_table *flow_table,
desired_flow_destroy(f);
} else {
/* The desired flow was added or modified. */
- struct installed_flow *i = installed_flow_lookup(&f->flow);
+ struct installed_flow *i = installed_flow_lookup(&f->flow,
+ installed_flows);
if (!i) {
/* Adding a new flow. */
installed_flow_add(&f->flow, bc, msgs);
@@ -2001,7 +2016,7 @@ update_installed_flows_by_track(struct ovn_desired_flow_table *flow_table,
/* Copy 'f' from 'flow_table' to installed_flows. */
struct installed_flow *new_node = installed_flow_dup(f);
- hmap_insert(&installed_flows, &new_node->match_hmap_node,
+ hmap_insert(installed_flows, &new_node->match_hmap_node,
new_node->flow.hash);
link_installed_to_desired(new_node, f);
} else if (installed_flow_get_active(i) == f) {
@@ -2055,16 +2070,19 @@ ofctrl_can_put(void)
*
* This should be called after ofctrl_run() within the main loop. */
void
-ofctrl_put(struct ovn_desired_flow_table *flow_table,
+ofctrl_put(struct ovn_desired_flow_table *lflow_table,
+ struct ovn_desired_flow_table *pflow_table,
struct shash *pending_ct_zones,
const struct sbrec_meter_table *meter_table,
uint64_t req_cfg,
- bool flow_changed)
+ bool lflows_changed,
+ bool pflows_changed)
{
static bool skipped_last_time = false;
static uint64_t old_req_cfg = 0;
bool need_put = false;
- if (flow_changed || skipped_last_time || need_reinstall_flows) {
+ if (lflows_changed || pflows_changed || skipped_last_time ||
+ need_reinstall_flows) {
need_put = true;
old_req_cfg = req_cfg;
} else if (req_cfg != old_req_cfg) {
@@ -2093,7 +2111,6 @@ ofctrl_put(struct ovn_desired_flow_table *flow_table,
return;
}
- skipped_last_time = false;
need_reinstall_flows = false;
/* OpenFlow messages to send to the switch to bring it up-to-date. */
@@ -2159,12 +2176,35 @@ ofctrl_put(struct ovn_desired_flow_table *flow_table,
bundle_open = ofputil_encode_bundle_ctrl_request(OFP15_VERSION, &bc);
ovs_list_push_back(&msgs, &bundle_open->list_node);
- if (flow_table->change_tracked) {
- update_installed_flows_by_track(flow_table, &bc, &msgs);
- } else {
- update_installed_flows_by_compare(flow_table, &bc, &msgs);
+ /* If skipped last time, then process the flow table
+ * (tracked) flows even if lflows_changed is not set.
+ * Same for pflows_changed. */
+ if (lflows_changed || skipped_last_time) {
+ if (lflow_table->change_tracked) {
+ update_installed_flows_by_track(lflow_table, &bc,
+ &installed_lflows,
+ &msgs);
+ } else {
+ update_installed_flows_by_compare(lflow_table, &bc,
+ &installed_lflows,
+ &msgs);
+ }
+ }
+
+ if (pflows_changed || skipped_last_time) {
+ if (pflow_table->change_tracked) {
+ update_installed_flows_by_track(pflow_table, &bc,
+ &installed_pflows,
+ &msgs);
+ } else {
+ update_installed_flows_by_compare(pflow_table, &bc,
+ &installed_pflows,
+ &msgs);
+ }
}
+ skipped_last_time = false;
+
if (ovs_list_back(&msgs) == &bundle_open->list_node) {
/* No flow updates. Removing the bundle open request. */
ovs_list_pop_back(&msgs);
@@ -2287,8 +2327,11 @@ ofctrl_put(struct ovn_desired_flow_table *flow_table,
cur_cfg = req_cfg;
}
- flow_table->change_tracked = true;
- ovs_assert(ovs_list_is_empty(&flow_table->tracked_flows));
+ lflow_table->change_tracked = true;
+ ovs_assert(ovs_list_is_empty(&lflow_table->tracked_flows));
+
+ pflow_table->change_tracked = true;
+ ovs_assert(ovs_list_is_empty(&pflow_table->tracked_flows));
}
/* Looks up the logical port with the name 'port_name' in 'br_int_'. If
diff --git a/controller/ofctrl.h b/controller/ofctrl.h
index 88769566a..ead8088c5 100644
--- a/controller/ofctrl.h
+++ b/controller/ofctrl.h
@@ -52,11 +52,13 @@ void ofctrl_init(struct ovn_extend_table *group_table,
void ofctrl_run(const struct ovsrec_bridge *br_int,
struct shash *pending_ct_zones);
enum mf_field_id ofctrl_get_mf_field_id(void);
-void ofctrl_put(struct ovn_desired_flow_table *,
+void ofctrl_put(struct ovn_desired_flow_table *lflow_table,
+ struct ovn_desired_flow_table *pflow_table,
struct shash *pending_ct_zones,
const struct sbrec_meter_table *,
uint64_t nb_cfg,
- bool flow_changed);
+ bool lflow_changed,
+ bool pflow_changed);
bool ofctrl_can_put(void);
void ofctrl_wait(void);
void ofctrl_destroy(void);
diff --git a/controller/ovn-controller.c b/controller/ovn-controller.c
index 07c6fcfd1..ea03638a9 100644
--- a/controller/ovn-controller.c
+++ b/controller/ovn-controller.c
@@ -46,6 +46,7 @@
#include "openvswitch/vconn.h"
#include "openvswitch/vlog.h"
#include "ovn/actions.h"
+#include "ovn/features.h"
#include "lib/chassis-index.h"
#include "lib/extend-table.h"
#include "lib/ip-mcast-index.h"
@@ -88,6 +89,7 @@ static unixctl_cb_func lflow_cache_show_stats_cmd;
static unixctl_cb_func debug_delay_nb_cfg_report;
#define DEFAULT_BRIDGE_NAME "br-int"
+#define DEFAULT_DATAPATH "system"
#define DEFAULT_PROBE_INTERVAL_MSEC 5000
#define OFCTRL_DEFAULT_PROBE_INTERVAL_SEC 0
@@ -319,10 +321,6 @@ static const struct ovsrec_bridge *
create_br_int(struct ovsdb_idl_txn *ovs_idl_txn,
const struct ovsrec_open_vswitch_table *ovs_table)
{
- if (!ovs_idl_txn) {
- return NULL;
- }
-
const struct ovsrec_open_vswitch *cfg;
cfg = ovsrec_open_vswitch_table_first(ovs_table);
if (!cfg) {
@@ -386,6 +384,21 @@ create_br_int(struct ovsdb_idl_txn *ovs_idl_txn,
return bridge;
}
+static const struct ovsrec_datapath *
+create_br_datapath(struct ovsdb_idl_txn *ovs_idl_txn,
+ const struct ovsrec_open_vswitch *cfg,
+ const char *datapath_type)
+{
+ ovsdb_idl_txn_add_comment(ovs_idl_txn,
+ "ovn-controller: creating bridge datapath '%s'",
+ datapath_type);
+
+ struct ovsrec_datapath *dp = ovsrec_datapath_insert(ovs_idl_txn);
+ ovsrec_open_vswitch_verify_datapaths(cfg);
+ ovsrec_open_vswitch_update_datapaths_setkey(cfg, datapath_type, dp);
+ return dp;
+}
+
static const struct ovsrec_bridge *
get_br_int(const struct ovsrec_bridge_table *bridge_table,
const struct ovsrec_open_vswitch_table *ovs_table)
@@ -399,33 +412,69 @@ get_br_int(const struct ovsrec_bridge_table *bridge_table,
return get_bridge(bridge_table, br_int_name(cfg));
}
-static const struct ovsrec_bridge *
+static const struct ovsrec_datapath *
+get_br_datapath(const struct ovsrec_open_vswitch *cfg,
+ const char *datapath_type)
+{
+ for (size_t i = 0; i < cfg->n_datapaths; i++) {
+ if (!strcmp(cfg->key_datapaths[i], datapath_type)) {
+ return cfg->value_datapaths[i];
+ }
+ }
+ return NULL;
+}
+
+static void
process_br_int(struct ovsdb_idl_txn *ovs_idl_txn,
const struct ovsrec_bridge_table *bridge_table,
- const struct ovsrec_open_vswitch_table *ovs_table)
+ const struct ovsrec_open_vswitch_table *ovs_table,
+ const struct ovsrec_bridge **br_int_,
+ const struct ovsrec_datapath **br_int_dp_)
{
- const struct ovsrec_bridge *br_int = get_br_int(bridge_table,
- ovs_table);
- if (!br_int) {
- br_int = create_br_int(ovs_idl_txn, ovs_table);
- }
- if (br_int && ovs_idl_txn) {
- const struct ovsrec_open_vswitch *cfg;
- cfg = ovsrec_open_vswitch_table_first(ovs_table);
- ovs_assert(cfg);
- const char *datapath_type = smap_get(&cfg->external_ids,
- "ovn-bridge-datapath-type");
- /* Check for the datapath_type and set it only if it is defined in
- * cfg. */
- if (datapath_type && strcmp(br_int->datapath_type, datapath_type)) {
- ovsrec_bridge_set_datapath_type(br_int, datapath_type);
+ const struct ovsrec_bridge *br_int = get_br_int(bridge_table, ovs_table);
+ const struct ovsrec_datapath *br_int_dp = NULL;
+
+ ovs_assert(br_int_ && br_int_dp_);
+ if (ovs_idl_txn) {
+ if (!br_int) {
+ br_int = create_br_int(ovs_idl_txn, ovs_table);
}
- if (!br_int->fail_mode || strcmp(br_int->fail_mode, "secure")) {
- ovsrec_bridge_set_fail_mode(br_int, "secure");
- VLOG_WARN("Integration bridge fail-mode changed to 'secure'.");
+
+ if (br_int) {
+ const struct ovsrec_open_vswitch *cfg =
+ ovsrec_open_vswitch_table_first(ovs_table);
+ ovs_assert(cfg);
+
+ /* Propagate "ovn-bridge-datapath-type" from OVS table, if any.
+ * Otherwise use the datapath-type set in br-int, if any.
+ * Finally, assume "system" datapath if none configured.
+ */
+ const char *datapath_type =
+ smap_get(&cfg->external_ids, "ovn-bridge-datapath-type");
+
+ if (!datapath_type) {
+ if (br_int->datapath_type[0]) {
+ datapath_type = br_int->datapath_type;
+ } else {
+ datapath_type = DEFAULT_DATAPATH;
+ }
+ }
+ if (strcmp(br_int->datapath_type, datapath_type)) {
+ ovsrec_bridge_set_datapath_type(br_int, datapath_type);
+ }
+ if (!br_int->fail_mode || strcmp(br_int->fail_mode, "secure")) {
+ ovsrec_bridge_set_fail_mode(br_int, "secure");
+ VLOG_WARN("Integration bridge fail-mode changed to 'secure'.");
+ }
+ br_int_dp = get_br_datapath(cfg, datapath_type);
+ if (!br_int_dp) {
+ br_int_dp = create_br_datapath(ovs_idl_txn, cfg,
+ datapath_type);
+ }
}
}
- return br_int;
+ *br_int_ = br_int;
+ *br_int_dp_ = br_int_dp;
}
static const char *
@@ -563,7 +612,7 @@ add_pending_ct_zone_entry(struct shash *pending_ct_zones,
static void
update_ct_zones(const struct sset *lports, const struct hmap *local_datapaths,
struct simap *ct_zones, unsigned long *ct_zone_bitmap,
- struct shash *pending_ct_zones, struct hmapx *updated_dps)
+ struct shash *pending_ct_zones)
{
struct simap_node *ct_zone, *ct_zone_next;
int scan_start = 1;
@@ -653,11 +702,6 @@ update_ct_zones(const struct sset *lports, const struct hmap *local_datapaths,
bitmap_set1(ct_zone_bitmap, snat_req_node->data);
simap_put(ct_zones, snat_req_node->name, snat_req_node->data);
- struct shash_node *ld_node = shash_find(&all_lds, snat_req_node->name);
- if (ld_node) {
- struct local_datapath *dp = ld_node->data;
- hmapx_add(updated_dps, (void *) dp->datapath);
- }
}
/* xxx This is wasteful to assign a zone to each port--even if no
@@ -686,12 +730,6 @@ update_ct_zones(const struct sset *lports, const struct hmap *local_datapaths,
bitmap_set1(ct_zone_bitmap, zone);
simap_put(ct_zones, user, zone);
-
- struct shash_node *ld_node = shash_find(&all_lds, user);
- if (ld_node) {
- struct local_datapath *dp = ld_node->data;
- hmapx_add(updated_dps, (void *) dp->datapath);
- }
}
simap_destroy(&req_snat_zones);
@@ -848,6 +886,7 @@ ctrl_register_ovs_idl(struct ovsdb_idl *ovs_idl)
ovsdb_idl_add_table(ovs_idl, &ovsrec_table_open_vswitch);
ovsdb_idl_add_column(ovs_idl, &ovsrec_open_vswitch_col_external_ids);
ovsdb_idl_add_column(ovs_idl, &ovsrec_open_vswitch_col_bridges);
+ ovsdb_idl_add_column(ovs_idl, &ovsrec_open_vswitch_col_datapaths);
ovsdb_idl_add_table(ovs_idl, &ovsrec_table_interface);
ovsdb_idl_track_add_column(ovs_idl, &ovsrec_interface_col_name);
ovsdb_idl_track_add_column(ovs_idl, &ovsrec_interface_col_bfd);
@@ -870,6 +909,8 @@ ctrl_register_ovs_idl(struct ovsdb_idl *ovs_idl)
ovsdb_idl_add_column(ovs_idl, &ovsrec_ssl_col_ca_cert);
ovsdb_idl_add_column(ovs_idl, &ovsrec_ssl_col_certificate);
ovsdb_idl_add_column(ovs_idl, &ovsrec_ssl_col_private_key);
+ ovsdb_idl_add_table(ovs_idl, &ovsrec_table_datapath);
+ ovsdb_idl_add_column(ovs_idl, &ovsrec_datapath_col_capabilities);
chassis_register_ovs_idl(ovs_idl);
encaps_register_ovs_idl(ovs_idl);
binding_register_ovs_idl(ovs_idl);
@@ -970,9 +1011,10 @@ struct ed_type_runtime_data {
* local hypervisor, and localnet ports. */
struct sset local_lports;
- /* Contains the same ports as local_lports, but in the format:
- * <datapath-tunnel-key>_<port-tunnel-key> */
- struct sset local_lport_ids;
+ /* Port bindings that are relevant to the local chassis (VIFs bound
+ * localy, patch ports).
+ */
+ struct related_lports related_lports;
struct sset active_tunnels;
/* runtime data engine private data. */
@@ -986,6 +1028,9 @@ struct ed_type_runtime_data {
/* CT zone data. Contains datapaths that had updated CT zones */
struct hmapx ct_updated_datapaths;
+
+ /* Contains datapaths that had updated external ports. */
+ struct hmapx extport_updated_datapaths;
};
/* struct ed_type_runtime_data has the below members for tracking the
@@ -1068,7 +1113,7 @@ en_runtime_data_init(struct engine_node *node OVS_UNUSED,
hmap_init(&data->local_datapaths);
sset_init(&data->local_lports);
- sset_init(&data->local_lport_ids);
+ related_lports_init(&data->related_lports);
sset_init(&data->active_tunnels);
sset_init(&data->egress_ifaces);
smap_init(&data->local_iface_ids);
@@ -1078,6 +1123,7 @@ en_runtime_data_init(struct engine_node *node OVS_UNUSED,
hmap_init(&data->tracked_dp_bindings);
hmapx_init(&data->ct_updated_datapaths);
+ hmapx_init(&data->extport_updated_datapaths);
return data;
}
@@ -1088,7 +1134,7 @@ en_runtime_data_cleanup(void *data)
struct ed_type_runtime_data *rt_data = data;
sset_destroy(&rt_data->local_lports);
- sset_destroy(&rt_data->local_lport_ids);
+ related_lports_destroy(&rt_data->related_lports);
sset_destroy(&rt_data->active_tunnels);
sset_destroy(&rt_data->egress_ifaces);
smap_destroy(&rt_data->local_iface_ids);
@@ -1096,12 +1142,14 @@ en_runtime_data_cleanup(void *data)
HMAP_FOR_EACH_SAFE (cur_node, next_node, hmap_node,
&rt_data->local_datapaths) {
free(cur_node->peer_ports);
+ shash_destroy(&cur_node->external_ports);
hmap_remove(&rt_data->local_datapaths, &cur_node->hmap_node);
free(cur_node);
}
hmap_destroy(&rt_data->local_datapaths);
local_binding_data_destroy(&rt_data->lbinding_data);
hmapx_destroy(&rt_data->ct_updated_datapaths);
+ hmapx_destroy(&rt_data->extport_updated_datapaths);
}
static void
@@ -1181,14 +1229,15 @@ init_binding_ctx(struct engine_node *node,
b_ctx_out->local_datapaths = &rt_data->local_datapaths;
b_ctx_out->local_lports = &rt_data->local_lports;
b_ctx_out->local_lports_changed = false;
- b_ctx_out->local_lport_ids = &rt_data->local_lport_ids;
- b_ctx_out->local_lport_ids_changed = false;
+ b_ctx_out->related_lports = &rt_data->related_lports;
+ b_ctx_out->related_lports_changed = false;
b_ctx_out->non_vif_ports_changed = false;
b_ctx_out->egress_ifaces = &rt_data->egress_ifaces;
b_ctx_out->lbinding_data = &rt_data->lbinding_data;
b_ctx_out->local_iface_ids = &rt_data->local_iface_ids;
b_ctx_out->tracked_dp_bindings = NULL;
b_ctx_out->if_mgr = ctrl_ctx->if_mgr;
+ b_ctx_out->extport_updated_datapaths = &rt_data->extport_updated_datapaths;
}
static void
@@ -1197,7 +1246,6 @@ en_runtime_data_run(struct engine_node *node, void *data)
struct ed_type_runtime_data *rt_data = data;
struct hmap *local_datapaths = &rt_data->local_datapaths;
struct sset *local_lports = &rt_data->local_lports;
- struct sset *local_lport_ids = &rt_data->local_lport_ids;
struct sset *active_tunnels = &rt_data->active_tunnels;
static bool first_run = true;
@@ -1208,23 +1256,25 @@ en_runtime_data_run(struct engine_node *node, void *data)
struct local_datapath *cur_node, *next_node;
HMAP_FOR_EACH_SAFE (cur_node, next_node, hmap_node, local_datapaths) {
free(cur_node->peer_ports);
+ shash_destroy(&cur_node->external_ports);
hmap_remove(local_datapaths, &cur_node->hmap_node);
free(cur_node);
}
hmap_clear(local_datapaths);
local_binding_data_destroy(&rt_data->lbinding_data);
sset_destroy(local_lports);
- sset_destroy(local_lport_ids);
+ related_lports_destroy(&rt_data->related_lports);
sset_destroy(active_tunnels);
sset_destroy(&rt_data->egress_ifaces);
smap_destroy(&rt_data->local_iface_ids);
sset_init(local_lports);
- sset_init(local_lport_ids);
+ related_lports_init(&rt_data->related_lports);
sset_init(active_tunnels);
sset_init(&rt_data->egress_ifaces);
smap_init(&rt_data->local_iface_ids);
local_binding_data_init(&rt_data->lbinding_data);
hmapx_clear(&rt_data->ct_updated_datapaths);
+ hmapx_clear(&rt_data->extport_updated_datapaths);
}
struct binding_ctx_in b_ctx_in;
@@ -1289,7 +1339,7 @@ runtime_data_sb_port_binding_handler(struct engine_node *node, void *data)
return false;
}
- if (b_ctx_out.local_lport_ids_changed ||
+ if (b_ctx_out.related_lports_changed ||
b_ctx_out.non_vif_ports_changed ||
!hmap_is_empty(b_ctx_out.tracked_dp_bindings)) {
engine_set_node_state(node, EN_UPDATED);
@@ -1599,11 +1649,8 @@ en_port_groups_run(struct engine_node *node, void *data)
struct ed_type_runtime_data *rt_data =
engine_get_input_data("runtime_data", node);
- struct sset *local_b_lports = binding_collect_local_binding_lports(
- &rt_data->lbinding_data);
- port_groups_init(pg_table, local_b_lports, &pg->port_group_ssets,
- &pg->port_groups_cs_local);
- binding_destroy_local_binding_lports(local_b_lports);
+ port_groups_init(pg_table, &rt_data->related_lports.lport_names,
+ &pg->port_group_ssets, &pg->port_groups_cs_local);
engine_set_node_state(node, EN_UPDATED);
}
@@ -1620,12 +1667,9 @@ port_groups_sb_port_group_handler(struct engine_node *node, void *data)
struct ed_type_runtime_data *rt_data =
engine_get_input_data("runtime_data", node);
- struct sset *local_b_lports = binding_collect_local_binding_lports(
- &rt_data->lbinding_data);
- port_groups_update(pg_table, local_b_lports, &pg->port_group_ssets,
- &pg->port_groups_cs_local, &pg->new, &pg->deleted,
- &pg->updated);
- binding_destroy_local_binding_lports(local_b_lports);
+ port_groups_update(pg_table, &rt_data->related_lports.lport_names,
+ &pg->port_group_ssets, &pg->port_groups_cs_local,
+ &pg->new, &pg->deleted, &pg->updated);
if (!sset_is_empty(&pg->new) || !sset_is_empty(&pg->deleted) ||
!sset_is_empty(&pg->updated)) {
@@ -1658,9 +1702,6 @@ port_groups_runtime_data_handler(struct engine_node *node, void *data)
goto out;
}
- struct sset *local_b_lports = binding_collect_local_binding_lports(
- &rt_data->lbinding_data);
-
const struct sbrec_port_group *pg_sb;
SBREC_PORT_GROUP_TABLE_FOR_EACH (pg_sb, pg_table) {
struct sset *pg_lports = shash_find_data(&pg->port_group_ssets,
@@ -1687,13 +1728,12 @@ port_groups_runtime_data_handler(struct engine_node *node, void *data)
if (need_update) {
expr_const_sets_add_strings(&pg->port_groups_cs_local, pg_sb->name,
(const char *const *) pg_sb->ports,
- pg_sb->n_ports, local_b_lports);
+ pg_sb->n_ports,
+ &rt_data->related_lports.lport_names);
sset_add(&pg->updated, pg_sb->name);
}
}
- binding_destroy_local_binding_lports(local_b_lports);
-
out:
if (!sset_is_empty(&pg->new) || !sset_is_empty(&pg->deleted) ||
!sset_is_empty(&pg->updated)) {
@@ -1748,10 +1788,9 @@ en_ct_zones_run(struct engine_node *node, void *data)
struct ed_type_runtime_data *rt_data =
engine_get_input_data("runtime_data", node);
- hmapx_clear(&rt_data->ct_updated_datapaths);
update_ct_zones(&rt_data->local_lports, &rt_data->local_datapaths,
&ct_zones_data->current, ct_zones_data->bitmap,
- &ct_zones_data->pending, &rt_data->ct_updated_datapaths);
+ &ct_zones_data->pending);
engine_set_node_state(node, EN_UPDATED);
@@ -1794,107 +1833,13 @@ en_mff_ovn_geneve_run(struct engine_node *node, void *data)
engine_set_node_state(node, EN_UNCHANGED);
}
-/* Engine node en_physical_flow_changes indicates whether
- * there is a need to
- * - recompute only physical flows or
- * - we can incrementally process the physical flows.
- *
- * en_physical_flow_changes is an input to flow_output engine node.
- * If the engine node 'en_physical_flow_changes' gets updated during
- * engine run, it means the handler for this -
- * flow_output_physical_flow_changes_handler() will either
- * - recompute the physical flows by calling 'physical_run() or
- * - incrementlly process some of the changes for physical flow
- * calculation. Right now we handle OVS interfaces changes
- * for physical flow computation.
- *
- * When ever a port binding happens, the follow up
- * activity is the zone id allocation for that port binding.
- * With this intermediate engine node, we avoid full recomputation.
- * Instead we do physical flow computation (either full recomputation
- * by calling physical_run() or handling the changes incrementally.
- *
- * Hence this is an intermediate engine node to indicate the
- * flow_output engine to recomputes/compute the physical flows.
- *
- * TODO 1. Ideally this engine node should recompute/compute the physical
- * flows instead of relegating it to the flow_output node.
- * But this requires splitting the flow_output node to
- * logical_flow_output and physical_flow_output.
- *
- * TODO 2. We can further optimise the en_ct_zone changes to
- * compute the phsyical flows for changed zone ids.
- *
- * TODO 3: physical.c has a global simap -localvif_to_ofport which stores the
- * local OVS interfaces and the ofport numbers. Ideally this should be
- * part of the engine data.
- */
-struct ed_type_pfc_data {
- /* Both these variables are tracked and set in each engine run. */
- bool recompute_physical_flows;
- bool ovs_ifaces_changed;
-};
-
-static void
-en_physical_flow_changes_clear_tracked_data(void *data_)
-{
- struct ed_type_pfc_data *data = data_;
- data->recompute_physical_flows = false;
- data->ovs_ifaces_changed = false;
-}
-
-static void *
-en_physical_flow_changes_init(struct engine_node *node OVS_UNUSED,
- struct engine_arg *arg OVS_UNUSED)
-{
- struct ed_type_pfc_data *data = xzalloc(sizeof *data);
- return data;
-}
-
-static void
-en_physical_flow_changes_cleanup(void *data OVS_UNUSED)
-{
-}
-
-/* Indicate to the flow_output engine that we need to recompute physical
- * flows. */
-static void
-en_physical_flow_changes_run(struct engine_node *node, void *data)
-{
- struct ed_type_pfc_data *pfc_tdata = data;
- pfc_tdata->recompute_physical_flows = true;
- pfc_tdata->ovs_ifaces_changed = true;
- engine_set_node_state(node, EN_UPDATED);
-}
-
-/* ct_zone changes are not handled incrementally but a handler is required
- * to avoid skipping the ovs_iface incremental change handler.
- */
-static bool
-physical_flow_changes_ct_zones_handler(struct engine_node *node OVS_UNUSED,
- void *data OVS_UNUSED)
-{
- return false;
-}
-
-/* There are OVS interface changes. Indicate to the flow_output engine
- * to handle these OVS interface changes for physical flow computations. */
-static bool
-physical_flow_changes_ovs_iface_handler(struct engine_node *node, void *data)
-{
- struct ed_type_pfc_data *pfc_tdata = data;
- pfc_tdata->ovs_ifaces_changed = true;
- engine_set_node_state(node, EN_UPDATED);
- return true;
-}
-
-struct flow_output_persistent_data {
+struct lflow_output_persistent_data {
uint32_t conj_id_ofs;
struct lflow_cache *lflow_cache;
};
-struct ed_type_flow_output {
- /* desired flows */
+struct ed_type_lflow_output {
+ /* Logical flow table */
struct ovn_desired_flow_table flow_table;
/* group ids for load balancing */
struct ovn_extend_table group_table;
@@ -1905,81 +1850,15 @@ struct ed_type_flow_output {
/* Data which is persistent and not cleared during
* full recompute. */
- struct flow_output_persistent_data pd;
+ struct lflow_output_persistent_data pd;
};
-static void init_physical_ctx(struct engine_node *node,
- struct ed_type_runtime_data *rt_data,
- struct physical_ctx *p_ctx)
-{
- struct ovsdb_idl_index *sbrec_port_binding_by_name =
- engine_ovsdb_node_get_index(
- engine_get_input("SB_port_binding", node),
- "name");
-
- struct sbrec_multicast_group_table *multicast_group_table =
- (struct sbrec_multicast_group_table *)EN_OVSDB_GET(
- engine_get_input("SB_multicast_group", node));
-
- struct sbrec_port_binding_table *port_binding_table =
- (struct sbrec_port_binding_table *)EN_OVSDB_GET(
- engine_get_input("SB_port_binding", node));
-
- struct sbrec_chassis_table *chassis_table =
- (struct sbrec_chassis_table *)EN_OVSDB_GET(
- engine_get_input("SB_chassis", node));
-
- struct ed_type_mff_ovn_geneve *ed_mff_ovn_geneve =
- engine_get_input_data("mff_ovn_geneve", node);
-
- struct ovsrec_open_vswitch_table *ovs_table =
- (struct ovsrec_open_vswitch_table *)EN_OVSDB_GET(
- engine_get_input("OVS_open_vswitch", node));
- struct ovsrec_bridge_table *bridge_table =
- (struct ovsrec_bridge_table *)EN_OVSDB_GET(
- engine_get_input("OVS_bridge", node));
- const struct ovsrec_bridge *br_int = get_br_int(bridge_table, ovs_table);
- const char *chassis_id = get_ovs_chassis_id(ovs_table);
- const struct sbrec_chassis *chassis = NULL;
- struct ovsdb_idl_index *sbrec_chassis_by_name =
- engine_ovsdb_node_get_index(
- engine_get_input("SB_chassis", node),
- "name");
- if (chassis_id) {
- chassis = chassis_lookup_by_name(sbrec_chassis_by_name, chassis_id);
- }
-
- ovs_assert(br_int && chassis);
-
- struct ovsrec_interface_table *iface_table =
- (struct ovsrec_interface_table *)EN_OVSDB_GET(
- engine_get_input("OVS_interface", node));
-
- struct ed_type_ct_zones *ct_zones_data =
- engine_get_input_data("ct_zones", node);
- struct simap *ct_zones = &ct_zones_data->current;
-
- p_ctx->sbrec_port_binding_by_name = sbrec_port_binding_by_name;
- p_ctx->port_binding_table = port_binding_table;
- p_ctx->mc_group_table = multicast_group_table;
- p_ctx->br_int = br_int;
- p_ctx->chassis_table = chassis_table;
- p_ctx->iface_table = iface_table;
- p_ctx->chassis = chassis;
- p_ctx->active_tunnels = &rt_data->active_tunnels;
- p_ctx->local_datapaths = &rt_data->local_datapaths;
- p_ctx->local_lports = &rt_data->local_lports;
- p_ctx->ct_zones = ct_zones;
- p_ctx->mff_ovn_geneve = ed_mff_ovn_geneve->mff_ovn_geneve;
- p_ctx->local_bindings = &rt_data->lbinding_data.bindings;
- p_ctx->ct_updated_datapaths = &rt_data->ct_updated_datapaths;
-}
-
-static void init_lflow_ctx(struct engine_node *node,
- struct ed_type_runtime_data *rt_data,
- struct ed_type_flow_output *fo,
- struct lflow_ctx_in *l_ctx_in,
- struct lflow_ctx_out *l_ctx_out)
+static void
+init_lflow_ctx(struct engine_node *node,
+ struct ed_type_runtime_data *rt_data,
+ struct ed_type_lflow_output *fo,
+ struct lflow_ctx_in *l_ctx_in,
+ struct lflow_ctx_out *l_ctx_out)
{
struct ovsdb_idl_index *sbrec_port_binding_by_name =
engine_ovsdb_node_get_index(
@@ -2077,7 +1956,7 @@ static void init_lflow_ctx(struct engine_node *node,
l_ctx_in->addr_sets = addr_sets;
l_ctx_in->port_groups = port_groups;
l_ctx_in->active_tunnels = &rt_data->active_tunnels;
- l_ctx_in->local_lport_ids = &rt_data->local_lport_ids;
+ l_ctx_in->related_lport_ids = &rt_data->related_lports.lport_ids;
l_ctx_out->flow_table = &fo->flow_table;
l_ctx_out->group_table = &fo->group_table;
@@ -2089,11 +1968,10 @@ static void init_lflow_ctx(struct engine_node *node,
}
static void *
-en_flow_output_init(struct engine_node *node OVS_UNUSED,
- struct engine_arg *arg OVS_UNUSED)
+en_lflow_output_init(struct engine_node *node OVS_UNUSED,
+ struct engine_arg *arg OVS_UNUSED)
{
- struct ed_type_flow_output *data = xzalloc(sizeof *data);
-
+ struct ed_type_lflow_output *data = xzalloc(sizeof *data);
ovn_desired_flow_table_init(&data->flow_table);
ovn_extend_table_init(&data->group_table);
ovn_extend_table_init(&data->meter_table);
@@ -2103,9 +1981,9 @@ en_flow_output_init(struct engine_node *node OVS_UNUSED,
}
static void
-en_flow_output_cleanup(void *data)
+en_lflow_output_cleanup(void *data)
{
- struct ed_type_flow_output *flow_output_data = data;
+ struct ed_type_lflow_output *flow_output_data = data;
ovn_desired_flow_table_destroy(&flow_output_data->flow_table);
ovn_extend_table_destroy(&flow_output_data->group_table);
ovn_extend_table_destroy(&flow_output_data->meter_table);
@@ -2114,7 +1992,7 @@ en_flow_output_cleanup(void *data)
}
static void
-en_flow_output_run(struct engine_node *node, void *data)
+en_lflow_output_run(struct engine_node *node, void *data)
{
struct ed_type_runtime_data *rt_data =
engine_get_input_data("runtime_data", node);
@@ -2140,8 +2018,8 @@ en_flow_output_run(struct engine_node *node, void *data)
ovs_assert(br_int && chassis);
- struct ed_type_flow_output *fo = data;
- struct ovn_desired_flow_table *flow_table = &fo->flow_table;
+ struct ed_type_lflow_output *fo = data;
+ struct ovn_desired_flow_table *lflow_table = &fo->flow_table;
struct ovn_extend_table *group_table = &fo->group_table;
struct ovn_extend_table *meter_table = &fo->meter_table;
struct lflow_resource_ref *lfrr = &fo->lflow_resource_ref;
@@ -2150,7 +2028,7 @@ en_flow_output_run(struct engine_node *node, void *data)
if (first_run) {
first_run = false;
} else {
- ovn_desired_flow_table_clear(flow_table);
+ ovn_desired_flow_table_clear(lflow_table);
ovn_extend_table_clear(group_table, false /* desired */);
ovn_extend_table_clear(meter_table, false /* desired */);
lflow_resource_clear(lfrr);
@@ -2172,7 +2050,7 @@ en_flow_output_run(struct engine_node *node, void *data)
if (l_ctx_out.conj_id_overflow) {
/* Conjunction ids overflow. There can be many holes in between.
* Destroy lflow cache and call lflow_run() again. */
- ovn_desired_flow_table_clear(flow_table);
+ ovn_desired_flow_table_clear(lflow_table);
ovn_extend_table_clear(group_table, false /* desired */);
ovn_extend_table_clear(meter_table, false /* desired */);
lflow_resource_clear(lfrr);
@@ -2185,16 +2063,11 @@ en_flow_output_run(struct engine_node *node, void *data)
}
}
- struct physical_ctx p_ctx;
- init_physical_ctx(node, rt_data, &p_ctx);
-
- physical_run(&p_ctx, &fo->flow_table);
-
engine_set_node_state(node, EN_UPDATED);
}
static bool
-flow_output_sb_logical_flow_handler(struct engine_node *node, void *data)
+lflow_output_sb_logical_flow_handler(struct engine_node *node, void *data)
{
struct ed_type_runtime_data *rt_data =
engine_get_input_data("runtime_data", node);
@@ -2207,7 +2080,7 @@ flow_output_sb_logical_flow_handler(struct engine_node *node, void *data)
const struct ovsrec_bridge *br_int = get_br_int(bridge_table, ovs_table);
ovs_assert(br_int);
- struct ed_type_flow_output *fo = data;
+ struct ed_type_lflow_output *fo = data;
struct lflow_ctx_in l_ctx_in;
struct lflow_ctx_out l_ctx_out;
init_lflow_ctx(node, rt_data, fo, &l_ctx_in, &l_ctx_out);
@@ -2219,7 +2092,7 @@ flow_output_sb_logical_flow_handler(struct engine_node *node, void *data)
}
static bool
-flow_output_sb_mac_binding_handler(struct engine_node *node, void *data)
+lflow_output_sb_mac_binding_handler(struct engine_node *node, void *data)
{
struct ovsdb_idl_index *sbrec_port_binding_by_name =
engine_ovsdb_node_get_index(
@@ -2234,60 +2107,17 @@ flow_output_sb_mac_binding_handler(struct engine_node *node, void *data)
engine_get_input_data("runtime_data", node);
const struct hmap *local_datapaths = &rt_data->local_datapaths;
- struct ed_type_flow_output *fo = data;
- struct ovn_desired_flow_table *flow_table = &fo->flow_table;
+ struct ed_type_lflow_output *lfo = data;
lflow_handle_changed_neighbors(sbrec_port_binding_by_name,
- mac_binding_table, local_datapaths, flow_table);
+ mac_binding_table, local_datapaths, &lfo->flow_table);
engine_set_node_state(node, EN_UPDATED);
return true;
}
static bool
-flow_output_sb_port_binding_handler(struct engine_node *node,
- void *data)
-{
- struct ed_type_runtime_data *rt_data =
- engine_get_input_data("runtime_data", node);
-
- struct ed_type_flow_output *fo = data;
- struct ovn_desired_flow_table *flow_table = &fo->flow_table;
-
- struct physical_ctx p_ctx;
- init_physical_ctx(node, rt_data, &p_ctx);
-
- /* We handle port-binding changes for physical flow processing
- * only. flow_output runtime data handler takes care of processing
- * logical flows for any port binding changes.
- */
- physical_handle_port_binding_changes(&p_ctx, flow_table);
-
- engine_set_node_state(node, EN_UPDATED);
- return true;
-}
-
-static bool
-flow_output_sb_multicast_group_handler(struct engine_node *node, void *data)
-{
- struct ed_type_runtime_data *rt_data =
- engine_get_input_data("runtime_data", node);
-
- struct ed_type_flow_output *fo = data;
- struct ovn_desired_flow_table *flow_table = &fo->flow_table;
-
- struct physical_ctx p_ctx;
- init_physical_ctx(node, rt_data, &p_ctx);
-
- physical_handle_mc_group_changes(&p_ctx, flow_table);
-
- engine_set_node_state(node, EN_UPDATED);
- return true;
-
-}
-
-static bool
-_flow_output_resource_ref_handler(struct engine_node *node, void *data,
+_lflow_output_resource_ref_handler(struct engine_node *node, void *data,
enum ref_type ref_type)
{
struct ed_type_runtime_data *rt_data =
@@ -2319,7 +2149,7 @@ _flow_output_resource_ref_handler(struct engine_node *node, void *data,
ovs_assert(br_int && chassis);
- struct ed_type_flow_output *fo = data;
+ struct ed_type_lflow_output *fo = data;
struct lflow_ctx_in l_ctx_in;
struct lflow_ctx_out l_ctx_out;
@@ -2388,53 +2218,20 @@ _flow_output_resource_ref_handler(struct engine_node *node, void *data,
}
static bool
-flow_output_addr_sets_handler(struct engine_node *node, void *data)
+lflow_output_addr_sets_handler(struct engine_node *node, void *data)
{
- return _flow_output_resource_ref_handler(node, data, REF_TYPE_ADDRSET);
+ return _lflow_output_resource_ref_handler(node, data, REF_TYPE_ADDRSET);
}
static bool
-flow_output_port_groups_handler(struct engine_node *node, void *data)
+lflow_output_port_groups_handler(struct engine_node *node, void *data)
{
- return _flow_output_resource_ref_handler(node, data, REF_TYPE_PORTGROUP);
+ return _lflow_output_resource_ref_handler(node, data, REF_TYPE_PORTGROUP);
}
static bool
-flow_output_physical_flow_changes_handler(struct engine_node *node, void *data)
-{
- struct ed_type_runtime_data *rt_data =
- engine_get_input_data("runtime_data", node);
-
- struct ed_type_flow_output *fo = data;
- struct physical_ctx p_ctx;
- init_physical_ctx(node, rt_data, &p_ctx);
-
- engine_set_node_state(node, EN_UPDATED);
- struct ed_type_pfc_data *pfc_data =
- engine_get_input_data("physical_flow_changes", node);
-
- /* If there are OVS interface changes. Try to handle them incrementally. */
- if (pfc_data->ovs_ifaces_changed) {
- if (!physical_handle_ovs_iface_changes(&p_ctx, &fo->flow_table)) {
- return false;
- }
- }
-
- if (pfc_data->recompute_physical_flows) {
- /* This indicates that we need to recompute the physical flows. */
- physical_clear_unassoc_flows_with_db(&fo->flow_table);
- physical_clear_dp_flows(&p_ctx, &rt_data->ct_updated_datapaths,
- &fo->flow_table);
- physical_run(&p_ctx, &fo->flow_table);
- return true;
- }
-
- return true;
-}
-
-static bool
-flow_output_runtime_data_handler(struct engine_node *node,
- void *data OVS_UNUSED)
+lflow_output_runtime_data_handler(struct engine_node *node,
+ void *data OVS_UNUSED)
{
struct ed_type_runtime_data *rt_data =
engine_get_input_data("runtime_data", node);
@@ -2455,12 +2252,9 @@ flow_output_runtime_data_handler(struct engine_node *node,
struct lflow_ctx_in l_ctx_in;
struct lflow_ctx_out l_ctx_out;
- struct ed_type_flow_output *fo = data;
+ struct ed_type_lflow_output *fo = data;
init_lflow_ctx(node, rt_data, fo, &l_ctx_in, &l_ctx_out);
- struct physical_ctx p_ctx;
- init_physical_ctx(node, rt_data, &p_ctx);
-
struct tracked_binding_datapath *tdp;
HMAP_FOR_EACH (tdp, node, tracked_dp_bindings) {
if (tdp->is_new) {
@@ -2485,12 +2279,12 @@ flow_output_runtime_data_handler(struct engine_node *node,
}
static bool
-flow_output_sb_load_balancer_handler(struct engine_node *node, void *data)
+lflow_output_sb_load_balancer_handler(struct engine_node *node, void *data)
{
struct ed_type_runtime_data *rt_data =
engine_get_input_data("runtime_data", node);
- struct ed_type_flow_output *fo = data;
+ struct ed_type_lflow_output *fo = data;
struct lflow_ctx_in l_ctx_in;
struct lflow_ctx_out l_ctx_out;
init_lflow_ctx(node, rt_data, fo, &l_ctx_in, &l_ctx_out);
@@ -2502,12 +2296,12 @@ flow_output_sb_load_balancer_handler(struct engine_node *node, void *data)
}
static bool
-flow_output_sb_fdb_handler(struct engine_node *node, void *data)
+lflow_output_sb_fdb_handler(struct engine_node *node, void *data)
{
struct ed_type_runtime_data *rt_data =
engine_get_input_data("runtime_data", node);
- struct ed_type_flow_output *fo = data;
+ struct ed_type_lflow_output *fo = data;
struct lflow_ctx_in l_ctx_in;
struct lflow_ctx_out l_ctx_out;
init_lflow_ctx(node, rt_data, fo, &l_ctx_in, &l_ctx_out);
@@ -2518,6 +2312,205 @@ flow_output_sb_fdb_handler(struct engine_node *node, void *data)
return handled;
}
+struct ed_type_pflow_output {
+ /* Desired physical flows. */
+ struct ovn_desired_flow_table flow_table;
+};
+
+static void init_physical_ctx(struct engine_node *node,
+ struct ed_type_runtime_data *rt_data,
+ struct physical_ctx *p_ctx)
+{
+ struct ovsdb_idl_index *sbrec_port_binding_by_name =
+ engine_ovsdb_node_get_index(
+ engine_get_input("SB_port_binding", node),
+ "name");
+
+ struct sbrec_multicast_group_table *multicast_group_table =
+ (struct sbrec_multicast_group_table *)EN_OVSDB_GET(
+ engine_get_input("SB_multicast_group", node));
+
+ struct sbrec_port_binding_table *port_binding_table =
+ (struct sbrec_port_binding_table *)EN_OVSDB_GET(
+ engine_get_input("SB_port_binding", node));
+
+ struct sbrec_chassis_table *chassis_table =
+ (struct sbrec_chassis_table *)EN_OVSDB_GET(
+ engine_get_input("SB_chassis", node));
+
+ struct ed_type_mff_ovn_geneve *ed_mff_ovn_geneve =
+ engine_get_input_data("mff_ovn_geneve", node);
+
+ struct ovsrec_open_vswitch_table *ovs_table =
+ (struct ovsrec_open_vswitch_table *)EN_OVSDB_GET(
+ engine_get_input("OVS_open_vswitch", node));
+ struct ovsrec_bridge_table *bridge_table =
+ (struct ovsrec_bridge_table *)EN_OVSDB_GET(
+ engine_get_input("OVS_bridge", node));
+ const struct ovsrec_bridge *br_int = get_br_int(bridge_table, ovs_table);
+ const char *chassis_id = get_ovs_chassis_id(ovs_table);
+ const struct sbrec_chassis *chassis = NULL;
+ struct ovsdb_idl_index *sbrec_chassis_by_name =
+ engine_ovsdb_node_get_index(
+ engine_get_input("SB_chassis", node),
+ "name");
+ if (chassis_id) {
+ chassis = chassis_lookup_by_name(sbrec_chassis_by_name, chassis_id);
+ }
+
+ ovs_assert(br_int && chassis);
+
+ struct ovsrec_interface_table *iface_table =
+ (struct ovsrec_interface_table *)EN_OVSDB_GET(
+ engine_get_input("OVS_interface", node));
+
+ struct ed_type_ct_zones *ct_zones_data =
+ engine_get_input_data("ct_zones", node);
+ struct simap *ct_zones = &ct_zones_data->current;
+
+ p_ctx->sbrec_port_binding_by_name = sbrec_port_binding_by_name;
+ p_ctx->port_binding_table = port_binding_table;
+ p_ctx->mc_group_table = multicast_group_table;
+ p_ctx->br_int = br_int;
+ p_ctx->chassis_table = chassis_table;
+ p_ctx->iface_table = iface_table;
+ p_ctx->chassis = chassis;
+ p_ctx->active_tunnels = &rt_data->active_tunnels;
+ p_ctx->local_datapaths = &rt_data->local_datapaths;
+ p_ctx->local_lports = &rt_data->local_lports;
+ p_ctx->ct_zones = ct_zones;
+ p_ctx->mff_ovn_geneve = ed_mff_ovn_geneve->mff_ovn_geneve;
+ p_ctx->local_bindings = &rt_data->lbinding_data.bindings;
+}
+
+static void *
+en_pflow_output_init(struct engine_node *node OVS_UNUSED,
+ struct engine_arg *arg OVS_UNUSED)
+{
+ struct ed_type_pflow_output *data = xzalloc(sizeof *data);
+ ovn_desired_flow_table_init(&data->flow_table);
+ return data;
+}
+
+static void
+en_pflow_output_cleanup(void *data OVS_UNUSED)
+{
+ struct ed_type_pflow_output *pfo = data;
+ ovn_desired_flow_table_destroy(&pfo->flow_table);
+}
+
+static void
+en_pflow_output_run(struct engine_node *node, void *data)
+{
+ struct ed_type_pflow_output *pfo = data;
+ struct ovn_desired_flow_table *pflow_table = &pfo->flow_table;
+ static bool first_run = true;
+ if (first_run) {
+ first_run = false;
+ } else {
+ ovn_desired_flow_table_clear(pflow_table);
+ }
+
+ struct ed_type_runtime_data *rt_data =
+ engine_get_input_data("runtime_data", node);
+
+ struct physical_ctx p_ctx;
+ init_physical_ctx(node, rt_data, &p_ctx);
+ physical_run(&p_ctx, pflow_table);
+
+ engine_set_node_state(node, EN_UPDATED);
+}
+
+static bool
+pflow_output_sb_port_binding_handler(struct engine_node *node,
+ void *data)
+{
+ struct ed_type_runtime_data *rt_data =
+ engine_get_input_data("runtime_data", node);
+
+ struct ed_type_pflow_output *pfo = data;
+
+ struct physical_ctx p_ctx;
+ init_physical_ctx(node, rt_data, &p_ctx);
+
+ /* We handle port-binding changes for physical flow processing
+ * only. flow_output runtime data handler takes care of processing
+ * logical flows for any port binding changes.
+ */
+ physical_handle_port_binding_changes(&p_ctx, &pfo->flow_table);
+
+ engine_set_node_state(node, EN_UPDATED);
+ return true;
+}
+
+static bool
+pflow_output_sb_multicast_group_handler(struct engine_node *node, void *data)
+{
+ struct ed_type_runtime_data *rt_data =
+ engine_get_input_data("runtime_data", node);
+
+ struct ed_type_pflow_output *pfo = data;
+
+ struct physical_ctx p_ctx;
+ init_physical_ctx(node, rt_data, &p_ctx);
+
+ physical_handle_mc_group_changes(&p_ctx, &pfo->flow_table);
+
+ engine_set_node_state(node, EN_UPDATED);
+ return true;
+}
+
+static bool
+pflow_output_ovs_iface_handler(struct engine_node *node OVS_UNUSED,
+ void *data OVS_UNUSED)
+{
+ struct ed_type_runtime_data *rt_data =
+ engine_get_input_data("runtime_data", node);
+
+ struct ed_type_pflow_output *pfo = data;
+
+ struct physical_ctx p_ctx;
+ init_physical_ctx(node, rt_data, &p_ctx);
+
+ engine_set_node_state(node, EN_UPDATED);
+ return physical_handle_ovs_iface_changes(&p_ctx, &pfo->flow_table);
+}
+
+static void *
+en_flow_output_init(struct engine_node *node OVS_UNUSED,
+ struct engine_arg *arg OVS_UNUSED)
+{
+ return NULL;
+}
+
+static void
+en_flow_output_cleanup(void *data OVS_UNUSED)
+{
+
+}
+
+static void
+en_flow_output_run(struct engine_node *node OVS_UNUSED, void *data OVS_UNUSED)
+{
+ engine_set_node_state(node, EN_UPDATED);
+}
+
+static bool
+flow_output_pflow_output_handler(struct engine_node *node,
+ void *data OVS_UNUSED)
+{
+ engine_set_node_state(node, EN_UPDATED);
+ return true;
+}
+
+static bool
+flow_output_lflow_output_handler(struct engine_node *node,
+ void *data OVS_UNUSED)
+{
+ engine_set_node_state(node, EN_UPDATED);
+ return true;
+}
+
struct ovn_controller_exit_args {
bool *exiting;
bool *restart;
@@ -2710,8 +2703,8 @@ main(int argc, char *argv[])
ENGINE_NODE_WITH_CLEAR_TRACK_DATA(runtime_data, "runtime_data");
ENGINE_NODE(mff_ovn_geneve, "mff_ovn_geneve");
ENGINE_NODE(ofctrl_is_connected, "ofctrl_is_connected");
- ENGINE_NODE_WITH_CLEAR_TRACK_DATA(physical_flow_changes,
- "physical_flow_changes");
+ ENGINE_NODE(pflow_output, "physical_flow_output");
+ ENGINE_NODE(lflow_output, "logical_flow_output");
ENGINE_NODE(flow_output, "flow_output");
ENGINE_NODE(addr_sets, "addr_sets");
ENGINE_NODE_WITH_CLEAR_TRACK_DATA(port_groups, "port_groups");
@@ -2735,58 +2728,68 @@ main(int argc, char *argv[])
engine_add_input(&en_port_groups, &en_runtime_data,
port_groups_runtime_data_handler);
- /* Engine node physical_flow_changes indicates whether
- * we can recompute only physical flows or we can
- * incrementally process the physical flows.
- *
- * Note: The order of inputs is important, all OVS interface changes must
+ /* Note: The order of inputs is important, all OVS interface changes must
* be handled before any ct_zone changes.
*/
- engine_add_input(&en_physical_flow_changes, &en_ovs_interface,
- physical_flow_changes_ovs_iface_handler);
- engine_add_input(&en_physical_flow_changes, &en_ct_zones,
- physical_flow_changes_ct_zones_handler);
-
- engine_add_input(&en_flow_output, &en_addr_sets,
- flow_output_addr_sets_handler);
- engine_add_input(&en_flow_output, &en_port_groups,
- flow_output_port_groups_handler);
- engine_add_input(&en_flow_output, &en_runtime_data,
- flow_output_runtime_data_handler);
- engine_add_input(&en_flow_output, &en_mff_ovn_geneve, NULL);
- engine_add_input(&en_flow_output, &en_physical_flow_changes,
- flow_output_physical_flow_changes_handler);
-
- /* We need this input nodes for only data. Hence the noop handler. */
- engine_add_input(&en_flow_output, &en_ct_zones, engine_noop_handler);
- engine_add_input(&en_flow_output, &en_ovs_interface, engine_noop_handler);
-
- engine_add_input(&en_flow_output, &en_ovs_open_vswitch, NULL);
- engine_add_input(&en_flow_output, &en_ovs_bridge, NULL);
-
- engine_add_input(&en_flow_output, &en_sb_chassis, NULL);
- engine_add_input(&en_flow_output, &en_sb_encap, NULL);
- engine_add_input(&en_flow_output, &en_sb_multicast_group,
- flow_output_sb_multicast_group_handler);
- engine_add_input(&en_flow_output, &en_sb_port_binding,
- flow_output_sb_port_binding_handler);
- engine_add_input(&en_flow_output, &en_sb_mac_binding,
- flow_output_sb_mac_binding_handler);
- engine_add_input(&en_flow_output, &en_sb_logical_flow,
- flow_output_sb_logical_flow_handler);
+ engine_add_input(&en_pflow_output, &en_ovs_interface,
+ pflow_output_ovs_iface_handler);
+ engine_add_input(&en_pflow_output, &en_ct_zones, NULL);
+ engine_add_input(&en_pflow_output, &en_sb_chassis, NULL);
+ engine_add_input(&en_pflow_output, &en_sb_port_binding,
+ pflow_output_sb_port_binding_handler);
+ engine_add_input(&en_pflow_output, &en_sb_multicast_group,
+ pflow_output_sb_multicast_group_handler);
+
+ engine_add_input(&en_pflow_output, &en_runtime_data,
+ NULL);
+ engine_add_input(&en_pflow_output, &en_sb_encap, NULL);
+ engine_add_input(&en_pflow_output, &en_mff_ovn_geneve, NULL);
+ engine_add_input(&en_pflow_output, &en_ovs_open_vswitch, NULL);
+ engine_add_input(&en_pflow_output, &en_ovs_bridge, NULL);
+
+ engine_add_input(&en_lflow_output, &en_addr_sets,
+ lflow_output_addr_sets_handler);
+ engine_add_input(&en_lflow_output, &en_port_groups,
+ lflow_output_port_groups_handler);
+ engine_add_input(&en_lflow_output, &en_runtime_data,
+ lflow_output_runtime_data_handler);
+
+ /* We need these input nodes only for the data. Hence the noop handler.
+ * Changes to en_sb_multicast_group is handled by the pflow_output engine
+ * node.
+ * */
+ engine_add_input(&en_lflow_output, &en_sb_multicast_group,
+ engine_noop_handler);
+
+ engine_add_input(&en_lflow_output, &en_sb_chassis, NULL);
+
+ /* Any changes to the port binding, need not be handled
+ * for lflow_outout engine. We still need sb_port_binding
+ * as input to access the port binding data in lflow.c and
+ * hence the noop handler. */
+ engine_add_input(&en_lflow_output, &en_sb_port_binding,
+ engine_noop_handler);
+
+ engine_add_input(&en_lflow_output, &en_ovs_open_vswitch, NULL);
+ engine_add_input(&en_lflow_output, &en_ovs_bridge, NULL);
+
+ engine_add_input(&en_lflow_output, &en_sb_mac_binding,
+ lflow_output_sb_mac_binding_handler);
+ engine_add_input(&en_lflow_output, &en_sb_logical_flow,
+ lflow_output_sb_logical_flow_handler);
/* Using a noop handler since we don't really need any data from datapath
* groups or a full recompute. Update of a datapath group will put
* logical flow into the tracked list, so the logical flow handler will
* process all changes. */
- engine_add_input(&en_flow_output, &en_sb_logical_dp_group,
+ engine_add_input(&en_lflow_output, &en_sb_logical_dp_group,
engine_noop_handler);
- engine_add_input(&en_flow_output, &en_sb_dhcp_options, NULL);
- engine_add_input(&en_flow_output, &en_sb_dhcpv6_options, NULL);
- engine_add_input(&en_flow_output, &en_sb_dns, NULL);
- engine_add_input(&en_flow_output, &en_sb_load_balancer,
- flow_output_sb_load_balancer_handler);
- engine_add_input(&en_flow_output, &en_sb_fdb,
- flow_output_sb_fdb_handler);
+ engine_add_input(&en_lflow_output, &en_sb_dhcp_options, NULL);
+ engine_add_input(&en_lflow_output, &en_sb_dhcpv6_options, NULL);
+ engine_add_input(&en_lflow_output, &en_sb_dns, NULL);
+ engine_add_input(&en_lflow_output, &en_sb_load_balancer,
+ lflow_output_sb_load_balancer_handler);
+ engine_add_input(&en_lflow_output, &en_sb_fdb,
+ lflow_output_sb_fdb_handler);
engine_add_input(&en_ct_zones, &en_ovs_open_vswitch, NULL);
engine_add_input(&en_ct_zones, &en_ovs_bridge, NULL);
@@ -2808,12 +2811,20 @@ main(int argc, char *argv[])
/* The OVS interface handler for runtime_data changes MUST be executed
* after the sb_port_binding_handler as port_binding deletes must be
* processed first.
+ *
+ * runtime_data needs to access the OVS Port data and hence a noop
+ * handler.
*/
engine_add_input(&en_runtime_data, &en_ovs_port,
engine_noop_handler);
engine_add_input(&en_runtime_data, &en_ovs_interface,
runtime_data_ovs_interface_handler);
+ engine_add_input(&en_flow_output, &en_lflow_output,
+ flow_output_lflow_output_handler);
+ engine_add_input(&en_flow_output, &en_pflow_output,
+ flow_output_pflow_output_handler);
+
struct engine_arg engine_arg = {
.sb_idl = ovnsb_idl_loop.idl,
.ovs_idl = ovs_idl_loop.idl,
@@ -2836,25 +2847,27 @@ main(int argc, char *argv[])
engine_ovsdb_node_add_index(&en_sb_datapath_binding, "key",
sbrec_datapath_binding_by_key);
- struct ed_type_flow_output *flow_output_data =
- engine_get_internal_data(&en_flow_output);
+ struct ed_type_lflow_output *lflow_output_data =
+ engine_get_internal_data(&en_lflow_output);
+ struct ed_type_lflow_output *pflow_output_data =
+ engine_get_internal_data(&en_pflow_output);
struct ed_type_ct_zones *ct_zones_data =
engine_get_internal_data(&en_ct_zones);
struct ed_type_runtime_data *runtime_data =
engine_get_internal_data(&en_runtime_data);
- ofctrl_init(&flow_output_data->group_table,
- &flow_output_data->meter_table,
+ ofctrl_init(&lflow_output_data->group_table,
+ &lflow_output_data->meter_table,
get_ofctrl_probe_interval(ovs_idl_loop.idl));
ofctrl_seqno_init();
unixctl_command_register("group-table-list", "", 0, 0,
extend_table_list,
- &flow_output_data->group_table);
+ &lflow_output_data->group_table);
unixctl_command_register("meter-table-list", "", 0, 0,
extend_table_list,
- &flow_output_data->meter_table);
+ &lflow_output_data->meter_table);
unixctl_command_register("ct-zone-list", "", 0, 0,
ct_zone_list,
@@ -2868,14 +2881,14 @@ main(int argc, char *argv[])
NULL);
unixctl_command_register("lflow-cache/flush", "", 0, 0,
lflow_cache_flush_cmd,
- &flow_output_data->pd);
+ &lflow_output_data->pd);
/* Keep deprecated 'flush-lflow-cache' command for now. */
unixctl_command_register("flush-lflow-cache", "[deprecated]", 0, 0,
lflow_cache_flush_cmd,
- &flow_output_data->pd);
+ &lflow_output_data->pd);
unixctl_command_register("lflow-cache/show-stats", "", 0, 0,
lflow_cache_show_stats_cmd,
- &flow_output_data->pd);
+ &lflow_output_data->pd);
bool reset_ovnsb_idl_min_index = false;
unixctl_command_register("sb-cluster-state-reset", "", 0, 0,
@@ -2981,8 +2994,10 @@ main(int argc, char *argv[])
ovsrec_bridge_table_get(ovs_idl_loop.idl);
const struct ovsrec_open_vswitch_table *ovs_table =
ovsrec_open_vswitch_table_get(ovs_idl_loop.idl);
- const struct ovsrec_bridge *br_int =
- process_br_int(ovs_idl_txn, bridge_table, ovs_table);
+ const struct ovsrec_bridge *br_int = NULL;
+ const struct ovsrec_datapath *br_int_dp = NULL;
+ process_br_int(ovs_idl_txn, bridge_table, ovs_table,
+ &br_int, &br_int_dp);
if (ovsdb_idl_has_ever_connected(ovnsb_idl_loop.idl) &&
northd_version_match) {
@@ -3013,6 +3028,13 @@ main(int argc, char *argv[])
&chassis_private);
}
+ /* If any OVS feature support changed, force a full recompute. */
+ if (br_int_dp
+ && ovs_feature_support_update(&br_int_dp->capabilities)) {
+ VLOG_INFO("OVS feature set changed, force recompute.");
+ engine_set_force_recompute(true);
+ }
+
if (br_int) {
ct_zones_data = engine_get_data(&en_ct_zones);
if (ct_zones_data) {
@@ -3121,13 +3143,17 @@ main(int argc, char *argv[])
runtime_data ? &runtime_data->lbinding_data : NULL;
if_status_mgr_update(if_mgr, binding_data);
- flow_output_data = engine_get_data(&en_flow_output);
- if (flow_output_data && ct_zones_data) {
- ofctrl_put(&flow_output_data->flow_table,
+ lflow_output_data = engine_get_data(&en_lflow_output);
+ pflow_output_data = engine_get_data(&en_pflow_output);
+ if (lflow_output_data && pflow_output_data &&
+ ct_zones_data) {
+ ofctrl_put(&lflow_output_data->flow_table,
+ &pflow_output_data->flow_table,
&ct_zones_data->pending,
sbrec_meter_table_get(ovnsb_idl_loop.idl),
ofctrl_seqno_get_req_cfg(),
- engine_node_changed(&en_flow_output));
+ engine_node_changed(&en_lflow_output),
+ engine_node_changed(&en_pflow_output));
}
ofctrl_seqno_run(ofctrl_get_cur_cfg());
if_status_mgr_run(if_mgr, binding_data, !ovnsb_idl_txn,
@@ -3495,7 +3521,7 @@ lflow_cache_flush_cmd(struct unixctl_conn *conn OVS_UNUSED,
void *arg_)
{
VLOG_INFO("User triggered lflow cache flush.");
- struct flow_output_persistent_data *fo_pd = arg_;
+ struct lflow_output_persistent_data *fo_pd = arg_;
lflow_cache_flush(fo_pd->lflow_cache);
fo_pd->conj_id_ofs = 1;
engine_set_force_recompute(true);
@@ -3507,7 +3533,7 @@ static void
lflow_cache_show_stats_cmd(struct unixctl_conn *conn, int argc OVS_UNUSED,
const char *argv[] OVS_UNUSED, void *arg_)
{
- struct flow_output_persistent_data *fo_pd = arg_;
+ struct lflow_output_persistent_data *fo_pd = arg_;
struct lflow_cache *lc = fo_pd->lflow_cache;
struct ds ds = DS_EMPTY_INITIALIZER;
diff --git a/controller/ovn-controller.h b/controller/ovn-controller.h
index 5d9466880..2bf1fecbf 100644
--- a/controller/ovn-controller.h
+++ b/controller/ovn-controller.h
@@ -67,6 +67,8 @@ struct local_datapath {
size_t n_peer_ports;
size_t n_allocated_peer_ports;
+
+ struct shash external_ports;
};
struct local_datapath *get_local_datapath(const struct hmap *,
diff --git a/controller/physical.c b/controller/physical.c
index 018e09540..a9a3dc720 100644
--- a/controller/physical.c
+++ b/controller/physical.c
@@ -1272,6 +1272,52 @@ consider_port_binding(struct ovsdb_idl_index *sbrec_port_binding_by_name,
ofctrl_add_flow(flow_table, OFTABLE_CHECK_LOOPBACK, 160,
binding->header_.uuid.parts[0], &match,
ofpacts_p, &binding->header_.uuid);
+
+ /* localport traffic directed to external is *not* local */
+ struct shash_node *node;
+ SHASH_FOR_EACH (node, &ld->external_ports) {
+ const struct sbrec_port_binding *pb = node->data;
+
+ /* skip ports that are not claimed by this chassis */
+ if (!pb->chassis) {
+ continue;
+ }
+ if (strcmp(pb->chassis->name, chassis->name)) {
+ continue;
+ }
+
+ ofpbuf_clear(ofpacts_p);
+ for (int i = 0; i < MFF_N_LOG_REGS; i++) {
+ put_load(0, MFF_REG0 + i, 0, 32, ofpacts_p);
+ }
+ put_resubmit(OFTABLE_LOG_EGRESS_PIPELINE, ofpacts_p);
+
+ /* allow traffic directed to external MAC address */
+ static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 1);
+ for (int i = 0; i < pb->n_mac; i++) {
+ char *err_str;
+ struct eth_addr peer_mac;
+ if ((err_str = str_to_mac(pb->mac[i], &peer_mac))) {
+ VLOG_WARN_RL(
+ &rl, "Parsing MAC failed for external port: %s, "
+ "with error: %s", pb->logical_port, err_str);
+ free(err_str);
+ continue;
+ }
+
+ match_init_catchall(&match);
+ match_set_metadata(&match, htonll(dp_key));
+ match_set_reg(&match, MFF_LOG_OUTPORT - MFF_REG0,
+ port_key);
+ match_set_reg_masked(&match, MFF_LOG_FLAGS - MFF_REG0,
+ MLF_LOCALPORT, MLF_LOCALPORT);
+ match_set_dl_dst(&match, peer_mac);
+
+ ofctrl_add_flow(flow_table, OFTABLE_CHECK_LOOPBACK, 170,
+ binding->header_.uuid.parts[0], &match,
+ ofpacts_p, &binding->header_.uuid);
+ }
+ }
}
} else if (!tun && !is_ha_remote) {
@@ -1953,22 +1999,3 @@ physical_clear_unassoc_flows_with_db(struct ovn_desired_flow_table *flow_table)
ofctrl_remove_flows(flow_table, hc_uuid);
}
}
-
-void
-physical_clear_dp_flows(struct physical_ctx *p_ctx,
- struct hmapx *ct_updated_datapaths,
- struct ovn_desired_flow_table *flow_table)
-{
- const struct sbrec_port_binding *binding;
- SBREC_PORT_BINDING_TABLE_FOR_EACH (binding, p_ctx->port_binding_table) {
- if (!hmapx_find(ct_updated_datapaths, binding->datapath)) {
- continue;
- }
- const struct sbrec_port_binding *peer =
- get_binding_peer(p_ctx->sbrec_port_binding_by_name, binding);
- ofctrl_remove_flows(flow_table, &binding->header_.uuid);
- if (peer) {
- ofctrl_remove_flows(flow_table, &peer->header_.uuid);
- }
- }
-}
diff --git a/controller/physical.h b/controller/physical.h
index 0bf13f268..feab41df4 100644
--- a/controller/physical.h
+++ b/controller/physical.h
@@ -56,16 +56,12 @@ struct physical_ctx {
const struct simap *ct_zones;
enum mf_field_id mff_ovn_geneve;
struct shash *local_bindings;
- struct hmapx *ct_updated_datapaths;
};
void physical_register_ovs_idl(struct ovsdb_idl *);
void physical_run(struct physical_ctx *,
struct ovn_desired_flow_table *);
void physical_clear_unassoc_flows_with_db(struct ovn_desired_flow_table *);
-void physical_clear_dp_flows(struct physical_ctx *p_ctx,
- struct hmapx *ct_updated_datapaths,
- struct ovn_desired_flow_table *flow_table);
void physical_handle_port_binding_changes(struct physical_ctx *,
struct ovn_desired_flow_table *);
void physical_handle_mc_group_changes(struct physical_ctx *,
diff --git a/controller/pinctrl.c b/controller/pinctrl.c
index 78ecfed84..1859d33d6 100644
--- a/controller/pinctrl.c
+++ b/controller/pinctrl.c
@@ -768,6 +768,13 @@ pinctrl_parse_dhcpv6_advt(struct rconn *swconn, const struct flow *ip_flow,
pfd->state = PREFIX_REQUEST;
+ char ip6_s[INET6_ADDRSTRLEN + 1];
+ if (ipv6_string_mapped(ip6_s, &ip_flow->ipv6_src)) {
+ static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(20, 40);
+ VLOG_DBG_RL(&rl, "Received DHCPv6 advt from %s with aid %d"
+ " sending DHCPv6 request", ip6_s, aid);
+ }
+
uint64_t packet_stub[256 / 8];
struct dp_packet packet;
@@ -936,6 +943,14 @@ pinctrl_parse_dhcpv6_reply(struct dp_packet *pkt_in,
in_dhcpv6_data += opt_len;
}
if (status) {
+ char prefix[INET6_ADDRSTRLEN + 1];
+ char ip6_s[INET6_ADDRSTRLEN + 1];
+ if (ipv6_string_mapped(ip6_s, &ip_flow->ipv6_src) &&
+ ipv6_string_mapped(prefix, &ipv6)) {
+ static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(20, 40);
+ VLOG_DBG_RL(&rl, "Received DHCPv6 reply from %s with prefix %s/%d"
+ " aid %d", ip6_s, prefix, prefix_len, aid);
+ }
pinctrl_prefixd_state_handler(ip_flow, ipv6, aid, eth->eth_src,
in_ip->ip6_src, prefix_len, t1, t2,
plife_time, vlife_time, uuid, uuid_len);
@@ -1226,18 +1241,26 @@ fill_ipv6_prefix_state(struct ovsdb_idl_txn *ovnsb_idl_txn,
}
} else if (pfd->state == PREFIX_PENDING && ovnsb_idl_txn) {
char prefix_str[INET6_ADDRSTRLEN + 1] = {};
- struct smap options;
+ if (!ipv6_string_mapped(prefix_str, &pfd->prefix)) {
+ goto out;
+ }
+ static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(20, 40);
+ VLOG_DBG_RL(&rl, "updating port_binding for %s with prefix %s/%d"
+ " aid %d", pb->logical_port, prefix_str, pfd->plen,
+ pfd->aid);
pfd->state = PREFIX_DONE;
pfd->last_complete = time_msec();
pfd->next_announce = pfd->last_complete + pfd->t1;
- ipv6_string_mapped(prefix_str, &pfd->prefix);
+ struct smap options;
smap_clone(&options, &pb->options);
+ smap_remove(&options, "ipv6_ra_pd_list");
smap_add_format(&options, "ipv6_ra_pd_list", "%d:%s/%d",
pfd->aid, prefix_str, pfd->plen);
sbrec_port_binding_set_options(pb, &options);
smap_destroy(&options);
}
+out:
pfd->last_used = time_msec();
destroy_lport_addresses(&c_addrs);
}
@@ -1288,7 +1311,8 @@ prepare_ipv6_prefixd(struct ovsdb_idl_txn *ovnsb_idl_txn,
sbrec_port_binding_by_name, chassis, active_tunnels,
redirect_name);
free(redirect_name);
- if (!resident && strcmp(pb->type, "l3gateway")) {
+ if ((strcmp(pb->type, "l3gateway") || pb->chassis != chassis) &&
+ !resident) {
continue;
}
diff --git a/debian/changelog b/debian/changelog
index 9e6e5215d..42b952144 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,3 +1,9 @@
+ovn (21.06.1-1) unstable; urgency=low
+
+ * New upstream version
+
+ -- OVN team <dev@openvswitch.org> Fri, 18 Jun 2021 13:21:08 -0400
+
ovn (21.06.0-1) unstable; urgency=low
* New upstream version
diff --git a/include/ovn/actions.h b/include/ovn/actions.h
index 040213177..f5eb01eb7 100644
--- a/include/ovn/actions.h
+++ b/include/ovn/actions.h
@@ -25,6 +25,7 @@
#include "openvswitch/hmap.h"
#include "openvswitch/uuid.h"
#include "util.h"
+#include "ovn/features.h"
struct expr;
struct lexer;
diff --git a/include/ovn/features.h b/include/ovn/features.h
index 10ee46fcd..c35d59b14 100644
--- a/include/ovn/features.h
+++ b/include/ovn/features.h
@@ -16,7 +16,25 @@
#ifndef OVN_FEATURES_H
#define OVN_FEATURES_H 1
+#include <stdbool.h>
+
+#include "smap.h"
+
/* ovn-controller supported feature names. */
#define OVN_FEATURE_PORT_UP_NOTIF "port-up-notif"
+/* OVS datapath supported features. Based on availability OVN might generate
+ * different types of openflows.
+ */
+enum ovs_feature_support_bits {
+ OVS_CT_ZERO_SNAT_SUPPORT_BIT,
+};
+
+enum ovs_feature_value {
+ OVS_CT_ZERO_SNAT_SUPPORT = (1 << OVS_CT_ZERO_SNAT_SUPPORT_BIT),
+};
+
+bool ovs_feature_is_supported(enum ovs_feature_value feature);
+bool ovs_feature_support_update(const struct smap *ovs_capabilities);
+
#endif
diff --git a/lib/actions.c b/lib/actions.c
index b3433f49e..7010fab2b 100644
--- a/lib/actions.c
+++ b/lib/actions.c
@@ -742,6 +742,22 @@ encode_CT_COMMIT_V1(const struct ovnact_ct_commit_v1 *cc,
ct->zone_src.ofs = 0;
ct->zone_src.n_bits = 16;
+ /* If the datapath supports all-zero SNAT then use it to avoid tuple
+ * collisions at commit time between NATed and firewalled-only sessions.
+ */
+
+ if (ovs_feature_is_supported(OVS_CT_ZERO_SNAT_SUPPORT)) {
+ size_t nat_offset = ofpacts->size;
+ ofpbuf_pull(ofpacts, nat_offset);
+
+ struct ofpact_nat *nat = ofpact_put_NAT(ofpacts);
+ nat->flags = 0;
+ nat->range_af = AF_UNSPEC;
+ nat->flags |= NX_NAT_F_SRC;
+ ofpacts->header = ofpbuf_push_uninit(ofpacts, nat_offset);
+ ct = ofpacts->header;
+ }
+
size_t set_field_offset = ofpacts->size;
ofpbuf_pull(ofpacts, set_field_offset);
@@ -792,6 +808,21 @@ encode_CT_COMMIT_V2(const struct ovnact_nest *on,
ct->zone_src.ofs = 0;
ct->zone_src.n_bits = 16;
+ /* If the datapath supports all-zero SNAT then use it to avoid tuple
+ * collisions at commit time between NATed and firewalled-only sessions.
+ */
+ if (ovs_feature_is_supported(OVS_CT_ZERO_SNAT_SUPPORT)) {
+ size_t nat_offset = ofpacts->size;
+ ofpbuf_pull(ofpacts, nat_offset);
+
+ struct ofpact_nat *nat = ofpact_put_NAT(ofpacts);
+ nat->flags = 0;
+ nat->range_af = AF_UNSPEC;
+ nat->flags |= NX_NAT_F_SRC;
+ ofpacts->header = ofpbuf_push_uninit(ofpacts, nat_offset);
+ ct = ofpacts->header;
+ }
+
size_t set_field_offset = ofpacts->size;
ofpbuf_pull(ofpacts, set_field_offset);
diff --git a/lib/automake.mk b/lib/automake.mk
index 781be2109..917b28e1e 100644
--- a/lib/automake.mk
+++ b/lib/automake.mk
@@ -13,6 +13,7 @@ lib_libovn_la_SOURCES = \
lib/expr.c \
lib/extend-table.h \
lib/extend-table.c \
+ lib/features.c \
lib/ovn-parallel-hmap.h \
lib/ovn-parallel-hmap.c \
lib/ip-mcast-index.c \
diff --git a/lib/features.c b/lib/features.c
new file mode 100644
index 000000000..87d04ee3f
--- /dev/null
+++ b/lib/features.c
@@ -0,0 +1,84 @@
+/* Copyright (c) 2021, Red Hat, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <config.h>
+#include <stdint.h>
+#include <stdlib.h>
+
+#include "lib/util.h"
+#include "openvswitch/vlog.h"
+#include "ovn/features.h"
+
+VLOG_DEFINE_THIS_MODULE(features);
+
+struct ovs_feature {
+ enum ovs_feature_value value;
+ const char *name;
+};
+
+static struct ovs_feature all_ovs_features[] = {
+ {
+ .value = OVS_CT_ZERO_SNAT_SUPPORT,
+ .name = "ct_zero_snat"
+ },
+};
+
+/* A bitmap of OVS features that have been detected as 'supported'. */
+static uint32_t supported_ovs_features;
+
+static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 5);
+
+static bool
+ovs_feature_is_valid(enum ovs_feature_value feature)
+{
+ switch (feature) {
+ case OVS_CT_ZERO_SNAT_SUPPORT:
+ return true;
+ default:
+ return false;
+ }
+}
+
+bool
+ovs_feature_is_supported(enum ovs_feature_value feature)
+{
+ ovs_assert(ovs_feature_is_valid(feature));
+ return supported_ovs_features & feature;
+}
+
+/* Returns 'true' if the set of tracked OVS features has been updated. */
+bool
+ovs_feature_support_update(const struct smap *ovs_capabilities)
+{
+ bool updated = false;
+
+ for (size_t i = 0; i < ARRAY_SIZE(all_ovs_features); i++) {
+ enum ovs_feature_value value = all_ovs_features[i].value;
+ const char *name = all_ovs_features[i].name;
+ bool old_state = supported_ovs_features & value;
+ bool new_state = smap_get_bool(ovs_capabilities, name, false);
+ if (new_state != old_state) {
+ updated = true;
+ if (new_state) {
+ supported_ovs_features |= value;
+ } else {
+ supported_ovs_features &= ~value;
+ }
+ VLOG_INFO_RL(&rl, "OVS Feature: %s, state: %s", name,
+ new_state ? "supported" : "not supported");
+ }
+ }
+ return updated;
+}
diff --git a/lib/test-ovn-features.c b/lib/test-ovn-features.c
new file mode 100644
index 000000000..deb97581e
--- /dev/null
+++ b/lib/test-ovn-features.c
@@ -0,0 +1,56 @@
+/* Copyright (c) 2021, Red Hat, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <config.h>
+
+#include "ovn/features.h"
+#include "tests/ovstest.h"
+
+static void
+test_ovn_features(struct ovs_cmdl_context *ctx OVS_UNUSED)
+{
+ ovs_assert(!ovs_feature_is_supported(OVS_CT_ZERO_SNAT_SUPPORT));
+
+ struct smap features = SMAP_INITIALIZER(&features);
+
+ smap_add(&features, "ct_zero_snat", "false");
+ ovs_assert(!ovs_feature_support_update(&features));
+ ovs_assert(!ovs_feature_is_supported(OVS_CT_ZERO_SNAT_SUPPORT));
+
+ smap_replace(&features, "ct_zero_snat", "true");
+ ovs_assert(ovs_feature_support_update(&features));
+ ovs_assert(ovs_feature_is_supported(OVS_CT_ZERO_SNAT_SUPPORT));
+
+ smap_add(&features, "unknown_feature", "true");
+ ovs_assert(!ovs_feature_support_update(&features));
+
+ smap_destroy(&features);
+}
+
+static void
+test_ovn_features_main(int argc, char *argv[])
+{
+ set_program_name(argv[0]);
+ static const struct ovs_cmdl_command commands[] = {
+ {"run", NULL, 0, 0, test_ovn_features, OVS_RO},
+ {NULL, NULL, 0, 0, NULL, OVS_RO},
+ };
+ struct ovs_cmdl_context ctx;
+ ctx.argc = argc - 1;
+ ctx.argv = argv + 1;
+ ovs_cmdl_run_command(&ctx, commands);
+}
+
+OVSTEST_REGISTER("test-ovn-features", test_ovn_features_main);
diff --git a/northd/lrouter.dl b/northd/lrouter.dl
index 6c25b1ca9..6805b9036 100644
--- a/northd/lrouter.dl
+++ b/northd/lrouter.dl
@@ -692,6 +692,17 @@ relation &StaticRoute(lrsr: nb::Logical_Router_Static_Route,
},
var esr = lrsr.options.get_bool_def("ecmp_symmetric_reply", false).
+relation &StaticRouteEmptyNextHop(lrsr: nb::Logical_Router_Static_Route,
+ key: route_key,
+ output_port: Option<string>)
+&StaticRouteEmptyNextHop(.lrsr = lrsr,
+ .key = RouteKey{policy, ip_prefix, plen},
+ .output_port = lrsr.output_port) :-
+ lrsr in nb::Logical_Router_Static_Route(.nexthop = ""),
+ not StaticRouteDown(lrsr._uuid),
+ var policy = route_policy_from_string(lrsr.policy),
+ Some{(var ip_prefix, var plen)} = ip46_parse_cidr(lrsr.ip_prefix).
+
/* Returns the IP address of the router port 'op' that
* overlaps with 'ip'. If one is not found, returns None. */
function find_lrp_member_ip(networks: lport_addresses, ip: v46_ip): Option<v46_ip> =
@@ -743,6 +754,19 @@ RouterStaticRoute_(.router = router,
var route_id = FlatMap(routes),
route in &StaticRoute(.lrsr = nb::Logical_Router_Static_Route{._uuid = route_id}).
+relation RouterStaticRouteEmptyNextHop_(
+ router : Intern<Router>,
+ key : route_key,
+ output_port : Option<string>)
+
+RouterStaticRouteEmptyNextHop_(.router = router,
+ .key = route.key,
+ .output_port = route.output_port) :-
+ router in &Router(),
+ nb::Logical_Router(._uuid = router._uuid, .static_routes = routes),
+ var route_id = FlatMap(routes),
+ route in &StaticRouteEmptyNextHop(.lrsr = nb::Logical_Router_Static_Route{._uuid = route_id}).
+
/* Step-2: compute output_port for each pair */
typedef route_dst = RouteDst {
nexthop: v46_ip,
@@ -805,6 +829,42 @@ RouterStaticRoute(router, key, dsts) :-
},
var dsts = set_singleton(RouteDst{nexthop, src_ip, port, ecmp_symmetric_reply}).
+relation RouterStaticRouteEmptyNextHop(
+ router : Intern<Router>,
+ key : route_key,
+ dsts : Set<route_dst>)
+
+RouterStaticRouteEmptyNextHop(router, key, dsts) :-
+ RouterStaticRouteEmptyNextHop_(.router = router,
+ .key = key,
+ .output_port = Some{oport}),
+ /* output_port specified */
+ port in &RouterPort(.lrp = &nb::Logical_Router_Port{.name = oport},
+ .networks = networks),
+ /* There are no IP networks configured on the router's port via
+ * which 'route->nexthop' is theoretically reachable. But since
+ * 'out_port' has been specified, we honor it by trying to reach
+ * 'route->nexthop' via the first IP address of 'out_port'.
+ * (There are cases, e.g in GCE, where each VM gets a /32 IP
+ * address and the default gateway is still reachable from it.) */
+ Some{var src_ip} = match (key.ip_prefix) {
+ IPv4{_} -> match (networks.ipv4_addrs.nth(0)) {
+ Some{addr} -> Some{IPv4{addr.addr}},
+ None -> {
+ warn("No path for static route ${key.ip_prefix}");
+ None
+ }
+ },
+ IPv6{_} -> match (networks.ipv6_addrs.nth(0)) {
+ Some{addr} -> Some{IPv6{addr.addr}},
+ None -> {
+ warn("No path for static route ${key.ip_prefix}");
+ None
+ }
+ }
+ },
+ var dsts = set_singleton(RouteDst{src_ip, src_ip, port, false}).
+
/* compute route-route pairs for nexthop = "discard" routes */
relation &DiscardRoute(lrsr: nb::Logical_Router_Static_Route,
key: route_key)
diff --git a/northd/ovn-northd.8.xml b/northd/ovn-northd.8.xml
index 407464602..890775797 100644
--- a/northd/ovn-northd.8.xml
+++ b/northd/ovn-northd.8.xml
@@ -1072,8 +1072,10 @@ output;
<code>localport</code> ports) that are down (unless <code>
ignore_lsp_down</code> is configured as true in <code>options</code>
column of <code>NB_Global</code> table of the <code>Northbound</code>
- database), for logical ports of type <code>virtual</code> and for
- logical ports with 'unknown' address set.
+ database), for logical ports of type <code>virtual</code>, for
+ logical ports with 'unknown' address set and for logical ports of
+ a logical switch configured with
+ <code>other_config:vlan-passthru=true</code>.
</p>
</li>
@@ -3710,6 +3712,13 @@ icmp6 {
external ip and <var>D</var> is NAT external mac.
</li>
+ <li>
+ For each NAT rule in the OVN Northbound database that can
+ be handled in a distributed manner, a priority-80 logical flow
+ with drop action if the NAT logical port is a virtual port not
+ claimed by any chassis yet.
+ </li>
+
<li>
A priority-50 logical flow with match
<code>outport == <var>GW</var></code> has actions
diff --git a/northd/ovn-northd.c b/northd/ovn-northd.c
index 3dae7bb1c..148e3ee21 100644
--- a/northd/ovn-northd.c
+++ b/northd/ovn-northd.c
@@ -7007,6 +7007,10 @@ build_lswitch_arp_nd_responder_known_ips(struct ovn_port *op,
return;
}
+ if (is_vlan_transparent(op->od)) {
+ return;
+ }
+
for (size_t i = 0; i < op->n_lsp_addrs; i++) {
for (size_t j = 0; j < op->lsp_addrs[i].n_ipv4_addrs; j++) {
ds_clear(match);
@@ -7371,6 +7375,7 @@ build_lswitch_ip_mcast_igmp_mld(struct ovn_igmp_group *igmp_group,
struct mcast_switch_info *mcast_sw_info =
&igmp_group->datapath->mcast_info.sw;
+ uint64_t table_size = mcast_sw_info->table_size;
if (IN6_IS_ADDR_V4MAPPED(&igmp_group->address)) {
/* RFC 4541, section 2.1.2, item 2: Skip groups in the 224.0.0.X
@@ -7381,10 +7386,8 @@ build_lswitch_ip_mcast_igmp_mld(struct ovn_igmp_group *igmp_group,
if (ip_is_local_multicast(group_address)) {
return;
}
-
if (atomic_compare_exchange_strong(
- &mcast_sw_info->active_v4_flows,
- (uint64_t *) &mcast_sw_info->table_size,
+ &mcast_sw_info->active_v4_flows, &table_size,
mcast_sw_info->table_size)) {
return;
}
@@ -7399,8 +7402,7 @@ build_lswitch_ip_mcast_igmp_mld(struct ovn_igmp_group *igmp_group,
return;
}
if (atomic_compare_exchange_strong(
- &mcast_sw_info->active_v6_flows,
- (uint64_t *) &mcast_sw_info->table_size,
+ &mcast_sw_info->active_v6_flows, &table_size,
mcast_sw_info->table_size)) {
return;
}
@@ -8039,10 +8041,16 @@ route_hash(struct parsed_route *route)
static struct ovs_mutex bfd_lock = OVS_MUTEX_INITIALIZER;
+static bool
+find_static_route_outport(struct ovn_datapath *od, struct hmap *ports,
+ const struct nbrec_logical_router_static_route *route, bool is_ipv4,
+ const char **p_lrp_addr_s, struct ovn_port **p_out_port);
+
/* Parse and validate the route. Return the parsed route if successful.
* Otherwise return NULL. */
static struct parsed_route *
-parsed_routes_add(struct ovs_list *routes,
+parsed_routes_add(struct ovn_datapath *od, struct hmap *ports,
+ struct ovs_list *routes,
const struct nbrec_logical_router_static_route *route,
struct hmap *bfd_connections)
{
@@ -8050,7 +8058,8 @@ parsed_routes_add(struct ovs_list *routes,
struct in6_addr nexthop;
unsigned int plen;
bool is_discard_route = !strcmp(route->nexthop, "discard");
- if (!is_discard_route) {
+ bool valid_nexthop = strlen(route->nexthop) && !is_discard_route;
+ if (valid_nexthop) {
if (!ip46_parse_cidr(route->nexthop, &nexthop, &plen)) {
static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 1);
VLOG_WARN_RL(&rl, "bad 'nexthop' %s in static route"
@@ -8079,7 +8088,7 @@ parsed_routes_add(struct ovs_list *routes,
}
/* Verify that ip_prefix and nexthop have same address familiy. */
- if (!is_discard_route) {
+ if (valid_nexthop) {
if (IN6_IS_ADDR_V4MAPPED(&prefix) != IN6_IS_ADDR_V4MAPPED(&nexthop)) {
static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 1);
VLOG_WARN_RL(&rl, "Address family doesn't match between 'ip_prefix'"
@@ -8090,6 +8099,14 @@ parsed_routes_add(struct ovs_list *routes,
}
}
+ /* Verify that ip_prefix and nexthop are on the same network. */
+ if (!is_discard_route &&
+ !find_static_route_outport(od, ports, route,
+ IN6_IS_ADDR_V4MAPPED(&prefix),
+ NULL, NULL)) {
+ return NULL;
+ }
+
const struct nbrec_bfd *nb_bt = route->bfd;
if (nb_bt && !strcmp(nb_bt->dst_ip, route->nexthop)) {
struct bfd_entry *bfd_e;
@@ -8364,8 +8381,12 @@ find_static_route_outport(struct ovn_datapath *od, struct hmap *ports,
route->ip_prefix, route->nexthop);
return false;
}
- *p_out_port = out_port;
- *p_lrp_addr_s = lrp_addr_s;
+ if (p_out_port) {
+ *p_out_port = out_port;
+ }
+ if (p_lrp_addr_s) {
+ *p_lrp_addr_s = lrp_addr_s;
+ }
return true;
}
@@ -8563,7 +8584,7 @@ add_route(struct hmap *lflows, struct ovn_datapath *od,
} else {
ds_put_format(&common_actions, REG_ECMP_GROUP_ID" = 0; %s = ",
is_ipv4 ? REG_NEXT_HOP_IPV4 : REG_NEXT_HOP_IPV6);
- if (gateway) {
+ if (gateway && strlen(gateway)) {
ds_put_cstr(&common_actions, gateway);
} else {
ds_put_format(&common_actions, "ip%s.dst", is_ipv4 ? "4" : "6");
@@ -9892,8 +9913,8 @@ build_static_route_flows_for_lrouter(
struct ecmp_groups_node *group;
for (int i = 0; i < od->nbr->n_static_routes; i++) {
struct parsed_route *route =
- parsed_routes_add(&parsed_routes, od->nbr->static_routes[i],
- bfd_connections);
+ parsed_routes_add(od, ports, &parsed_routes,
+ od->nbr->static_routes[i], bfd_connections);
if (!route) {
continue;
}
@@ -11656,6 +11677,7 @@ lrouter_check_nat_entry(struct ovn_datapath *od, const struct nbrec_nat *nat,
static void
build_lrouter_nat_defrag_and_lb(struct ovn_datapath *od,
struct hmap *lflows,
+ struct hmap *ports,
struct shash *meter_groups,
struct hmap *lbs,
struct ds *match, struct ds *actions)
@@ -11763,10 +11785,21 @@ build_lrouter_nat_defrag_and_lb(struct ovn_datapath *od,
ds_clear(match);
ds_clear(actions);
ds_put_format(match,
- "ip%s.src == %s && outport == %s && "
- "is_chassis_resident(\"%s\")",
+ "ip%s.src == %s && outport == %s",
is_v6 ? "6" : "4", nat->logical_ip,
- od->l3dgw_port->json_key, nat->logical_port);
+ od->l3dgw_port->json_key);
+ /* Add a rule to drop traffic from a distributed NAT if
+ * the virtual port has not claimed yet becaused otherwise
+ * the traffic will be centralized misconfiguring the TOR switch.
+ */
+ struct ovn_port *op = ovn_port_find(ports, nat->logical_port);
+ if (op && op->nbsp && !strcmp(op->nbsp->type, "virtual")) {
+ ovn_lflow_add_with_hint(lflows, od, S_ROUTER_IN_GW_REDIRECT,
+ 80, ds_cstr(match), "drop;",
+ &nat->header_);
+ }
+ ds_put_format(match, " && is_chassis_resident(\"%s\")",
+ nat->logical_port);
ds_put_format(actions, "eth.src = %s; %s = %s; next;",
nat->external_mac,
is_v6 ? REG_SRC_IPV6 : REG_SRC_IPV4,
@@ -11800,6 +11833,7 @@ build_lrouter_nat_defrag_and_lb(struct ovn_datapath *od,
ds_put_format(actions,
"clone { ct_clear; "
"inport = outport; outport = \"\"; "
+ "eth.dst <-> eth.src; "
"flags = 0; flags.loopback = 1; ");
for (int j = 0; j < MFF_N_LOG_REGS; j++) {
ds_put_format(actions, "reg%d = 0; ", j);
@@ -11925,8 +11959,9 @@ build_lswitch_and_lrouter_iterate_by_od(struct ovn_datapath *od,
&lsi->actions);
build_misc_local_traffic_drop_flows_for_lrouter(od, lsi->lflows);
build_lrouter_arp_nd_for_datapath(od, lsi->lflows);
- build_lrouter_nat_defrag_and_lb(od, lsi->lflows, lsi->meter_groups,
- lsi->lbs, &lsi->match, &lsi->actions);
+ build_lrouter_nat_defrag_and_lb(od, lsi->lflows, lsi->ports,
+ lsi->meter_groups, lsi->lbs, &lsi->match,
+ &lsi->actions);
}
/* Helper function to combine all lflow generation which is iterated by port.
@@ -13271,7 +13306,7 @@ ovnnb_db_run(struct northd_context *ctx,
struct smap options;
smap_clone(&options, &nb->options);
- smap_add(&options, "mac_prefix", mac_addr_prefix);
+ smap_replace(&options, "mac_prefix", mac_addr_prefix);
if (!monitor_mac) {
eth_addr_random(&svc_monitor_mac_ea);
@@ -13286,8 +13321,10 @@ ovnnb_db_run(struct northd_context *ctx,
smap_replace(&options, "northd_internal_version", ovn_internal_version);
- nbrec_nb_global_verify_options(nb);
- nbrec_nb_global_set_options(nb, &options);
+ if (!smap_equal(&nb->options, &options)) {
+ nbrec_nb_global_verify_options(nb);
+ nbrec_nb_global_set_options(nb, &options);
+ }
smap_destroy(&options);
diff --git a/northd/ovn_northd.dl b/northd/ovn_northd.dl
index 3afa80a3b..de6a0652e 100644
--- a/northd/ovn_northd.dl
+++ b/northd/ovn_northd.dl
@@ -3309,7 +3309,8 @@ for (CheckLspIsUp[check_lsp_is_up]) {
((lsp_is_up(lsp) or not check_lsp_is_up)
or lsp.__type == "router" or lsp.__type == "localport") and
lsp.__type != "external" and lsp.__type != "virtual" and
- not lsp.addresses.contains("unknown"))
+ not lsp.addresses.contains("unknown") and
+ not sw.is_vlan_transparent)
{
var __match = "arp.tpa == ${addr.addr} && arp.op == 1" in
{
@@ -3359,7 +3360,8 @@ for (SwitchPortIPv6Address(.port = &SwitchPort{.lsp = lsp, .json_name = json_nam
.ea = ea, .addr = addr)
if lsp.is_enabled() and
(lsp_is_up(lsp) or lsp.__type == "router" or lsp.__type == "localport") and
- lsp.__type != "external" and lsp.__type != "virtual")
+ lsp.__type != "external" and lsp.__type != "virtual" and
+ not sw.is_vlan_transparent)
{
var __match = "nd_ns && ip6.dst == {${addr.addr}, ${addr.solicited_node()}} && nd.target == ${addr.addr}" in
var actions = "${if (lsp.__type == \"router\") \"nd_na_router\" else \"nd_na\"} { "
@@ -5555,6 +5557,10 @@ for (rp in &RouterPort(.router = &Router{._uuid = lr_uuid, .options = lr_options
}
}
+relation VirtualLogicalPort(logical_port: Option<string>)
+VirtualLogicalPort(Some{logical_port}) :-
+ lsp in &nb::Logical_Switch_Port(.name = logical_port, .__type = "virtual").
+
/* NAT rules are only valid on Gateway routers and routers with
* l3dgw_port (router has a port with "redirect-chassis"
* specified). */
@@ -5649,7 +5655,7 @@ for (r in &Router(._uuid = lr_uuid,
} in
if (nat.nat.__type == "dnat" or nat.nat.__type == "dnat_and_snat") {
None = l3dgw_port in
- var __match = "ip && ip4.dst == ${nat.nat.external_ip}" in
+ var __match = "ip && ${ipX}.dst == ${nat.nat.external_ip}" in
(var ext_ip_match, var ext_flow) = lrouter_nat_add_ext_ip_match(
r, nat, __match, ipX, true, mask) in
{
@@ -5900,6 +5906,17 @@ for (r in &Router(._uuid = lr_uuid,
.actions = actions,
.external_ids = stage_hint(nat.nat._uuid));
+ for (VirtualLogicalPort(nat.nat.logical_port)) {
+ Some{var gwport} = l3dgw_port in
+ Flow(.logical_datapath = lr_uuid,
+ .stage = s_ROUTER_IN_GW_REDIRECT(),
+ .priority = 80,
+ .__match = "${ipX}.src == ${nat.nat.logical_ip} && "
+ "outport == ${json_string_escape(gwport.name)}",
+ .actions = "drop;",
+ .external_ids = stage_hint(nat.nat._uuid))
+ };
+
/* Egress Loopback table: For NAT on a distributed router.
* If packets in the egress pipeline on the distributed
* gateway port have ip.dst matching a NAT external IP, then
@@ -5925,6 +5942,7 @@ for (r in &Router(._uuid = lr_uuid,
var actions =
"clone { ct_clear; "
"inport = outport; outport = \"\"; "
+ "eth.dst <-> eth.src; "
"flags = 0; flags.loopback = 1; " ++
regs.join("") ++
"${rEGBIT_EGRESS_LOOPBACK()} = 1; "
@@ -6468,6 +6486,11 @@ Route(key, dst.port, dst.src_ip, Some{dst.nexthop}) :-
dsts.size() == 1,
Some{var dst} = dsts.nth(0).
+Route(key, dst.port, dst.src_ip, None) :-
+ RouterStaticRouteEmptyNextHop(.router = router, .key = key, .dsts = dsts),
+ dsts.size() == 1,
+ Some{var dst} = dsts.nth(0).
+
/* Return a vector of pairs (1, set[0]), ... (n, set[n - 1]). */
function numbered_vec(set: Set<'A>) : Vec<(bit<16>, 'A)> = {
var vec = vec_with_capacity(set.size());
diff --git a/tests/automake.mk b/tests/automake.mk
index 742e5cff2..a8ec64212 100644
--- a/tests/automake.mk
+++ b/tests/automake.mk
@@ -34,6 +34,7 @@ TESTSUITE_AT = \
tests/ovn-performance.at \
tests/ovn-ofctrl-seqno.at \
tests/ovn-ipam.at \
+ tests/ovn-features.at \
tests/ovn-lflow-cache.at \
tests/ovn-ipsec.at
@@ -207,6 +208,7 @@ $(srcdir)/package.m4: $(top_srcdir)/configure.ac
noinst_PROGRAMS += tests/ovstest
tests_ovstest_SOURCES = \
+ include/ovn/features.h \
tests/ovstest.c \
tests/ovstest.h \
tests/test-utils.c \
@@ -218,6 +220,7 @@ tests_ovstest_SOURCES = \
controller/lflow-cache.h \
controller/ofctrl-seqno.c \
controller/ofctrl-seqno.h \
+ lib/test-ovn-features.c \
northd/test-ipam.c \
northd/ipam.c \
northd/ipam.h
diff --git a/tests/ovn-controller.at b/tests/ovn-controller.at
index 72c07b3fa..1aab49ae8 100644
--- a/tests/ovn-controller.at
+++ b/tests/ovn-controller.at
@@ -151,23 +151,24 @@ sysid=$(ovs-vsctl get Open_vSwitch . external_ids:system-id)
check_datapath_type () {
datapath_type=$1
chassis_datapath_type=$(ovn-sbctl get Chassis ${sysid} other_config:datapath-type | sed -e 's/"//g') #"
- test "${datapath_type}" = "${chassis_datapath_type}"
+ ovs_datapath_type=$(ovs-vsctl get Bridge br-int datapath-type)
+ test "${datapath_type}" = "${chassis_datapath_type}" && test "${datapath_type}" = "${ovs_datapath_type}"
}
-OVS_WAIT_UNTIL([check_datapath_type ""])
+OVS_WAIT_UNTIL([check_datapath_type system])
ovs-vsctl set Bridge br-int datapath-type=foo
OVS_WAIT_UNTIL([check_datapath_type foo])
# Change "ovn-bridge-mappings" value. It should not change the "datapath-type".
ovs-vsctl set Open_vSwitch . external_ids:ovn-bridge-mappings=foo-mapping
-check_datapath_type foo
+AT_CHECK([check_datapath_type foo])
ovs-vsctl set Bridge br-int datapath-type=bar
OVS_WAIT_UNTIL([check_datapath_type bar])
ovs-vsctl set Bridge br-int datapath-type=\"\"
-OVS_WAIT_UNTIL([check_datapath_type ""])
+OVS_WAIT_UNTIL([check_datapath_type system])
# Set the datapath_type in external_ids:ovn-bridge-datapath-type.
ovs-vsctl set Open_vSwitch . external_ids:ovn-bridge-datapath-type=foo
@@ -176,11 +177,9 @@ OVS_WAIT_UNTIL([check_datapath_type foo])
# Change the br-int's datapath type to bar.
# It should be reset to foo since ovn-bridge-datapath-type is configured.
ovs-vsctl set Bridge br-int datapath-type=bar
-OVS_WAIT_UNTIL([test foo = `ovs-vsctl get Bridge br-int datapath-type`])
OVS_WAIT_UNTIL([check_datapath_type foo])
ovs-vsctl set Open_vSwitch . external_ids:ovn-bridge-datapath-type=foobar
-OVS_WAIT_UNTIL([test foobar = `ovs-vsctl get Bridge br-int datapath-type`])
OVS_WAIT_UNTIL([check_datapath_type foobar])
expected_iface_types=$(ovs-vsctl get Open_vSwitch . iface_types | tr -d '[[]] ""')
@@ -393,6 +392,37 @@ OVN_CLEANUP([hv])
AT_CLEANUP
])
+# check that nb_cfg overflow cases handled properly
+AT_SETUP([ovn-controller - overflow the nb_cfg value across the tables])
+AT_KEYWORDS([ovn])
+ovn_start
+
+net_add n1
+sim_add hv
+as hv
+ovs-vsctl add-br br-phys
+ovn_attach n1 br-phys 192.168.0.1
+
+check ovn-nbctl --wait=hv sync
+
+# overflow the NB_Global nb_cfg value
+check ovn-nbctl set NB_Global . nb_cfg=9223372036854775806
+
+# nb_cfg must be set to zero if it exceed the value of LLONG_MAX
+# the command below will try incress the value of nb_cfg to be greater than LLONG_MAX and
+# expect zero as a return value
+check ovn-nbctl --wait=hv sync
+check ovn-nbctl --wait=hv sync
+
+# nb_cfg should be set to 1 in the chassis_private/nb_global/sb_global table
+check_column 1 chassis_private nb_cfg
+check_column 1 sb_global nb_cfg
+check_column 1 nb:nb_global nb_cfg
+check_column 0 chassis nb_cfg
+
+OVN_CLEANUP([hv])
+AT_CLEANUP
+
# Test unix command: debug/delay-nb-cfg-report
OVN_FOR_EACH_NORTHD([
AT_SETUP([ovn-controller - debug/delay-nb-cfg-report])
diff --git a/tests/ovn-features.at b/tests/ovn-features.at
new file mode 100644
index 000000000..36bd83055
--- /dev/null
+++ b/tests/ovn-features.at
@@ -0,0 +1,8 @@
+#
+# Unit tests for the lib/features.c module.
+#
+AT_BANNER([OVN unit tests - features])
+
+AT_SETUP([ovn -- unit test -- OVS feature detection tests])
+AT_CHECK([ovstest test-ovn-features run], [0], [])
+AT_CLEANUP
diff --git a/tests/ovn-nbctl.at b/tests/ovn-nbctl.at
index 1058d418a..0922e1aa0 100644
--- a/tests/ovn-nbctl.at
+++ b/tests/ovn-nbctl.at
@@ -1442,11 +1442,16 @@ dnl ---------------------------------------------------------------------
OVN_NBCTL_TEST([ovn_nbctl_routes], [routes], [
AT_CHECK([ovn-nbctl lr-add lr0])
+AT_CHECK([ovn-nbctl lrp-add lr0 lp0 f0:00:00:00:00:01 10.0.0.254/24])
dnl Check IPv4 routes
AT_CHECK([ovn-nbctl lr-route-add lr0 0.0.0.0/0 192.168.0.1])
AT_CHECK([ovn-nbctl lr-route-add lr0 10.0.1.0/24 11.0.1.1 lp0])
AT_CHECK([ovn-nbctl lr-route-add lr0 10.0.0.1/24 11.0.0.2])
+AT_CHECK([ovn-nbctl lr-route-add lr0 10.0.10.0/24 lp0])
+AT_CHECK([ovn-nbctl lr-route-add lr0 10.0.10.0/24 lp1], [1], [],
+ [ovn-nbctl: bad IPv4 nexthop argument: lp1
+])
dnl Add overlapping route with 10.0.0.1/24
AT_CHECK([ovn-nbctl lr-route-add lr0 10.0.0.111/24 11.0.0.1], [1], [],
@@ -1495,6 +1500,7 @@ AT_CHECK([ovn-nbctl lr-route-list lr0], [0], [dnl
IPv4 Routes
10.0.0.0/24 11.0.0.1 dst-ip
10.0.1.0/24 11.0.1.1 dst-ip lp0
+ 10.0.10.0/24 dst-ip lp0
20.0.0.0/24 discard dst-ip
9.16.1.0/24 11.0.0.1 src-ip
10.0.0.0/24 11.0.0.2 src-ip
@@ -1502,11 +1508,13 @@ IPv4 Routes
0.0.0.0/0 192.168.0.1 dst-ip
])
+AT_CHECK([ovn-nbctl lrp-add lr0 lp1 f0:00:00:00:00:02 11.0.0.254/24])
AT_CHECK([ovn-nbctl --may-exist lr-route-add lr0 10.0.0.111/24 11.0.0.1 lp1])
AT_CHECK([ovn-nbctl lr-route-list lr0], [0], [dnl
IPv4 Routes
10.0.0.0/24 11.0.0.1 dst-ip lp1
10.0.1.0/24 11.0.1.1 dst-ip lp0
+ 10.0.10.0/24 dst-ip lp0
20.0.0.0/24 discard dst-ip
9.16.1.0/24 11.0.0.1 src-ip
10.0.0.0/24 11.0.0.2 src-ip
@@ -1535,6 +1543,7 @@ AT_CHECK([ovn-nbctl --policy=src-ip lr-route-del lr0 9.16.1.0/24])
AT_CHECK([ovn-nbctl lr-route-list lr0], [0], [dnl
IPv4 Routes
10.0.0.0/24 11.0.0.1 dst-ip lp1
+ 10.0.10.0/24 dst-ip lp0
10.0.0.0/24 11.0.0.2 src-ip
0.0.0.0/0 192.168.0.1 dst-ip
])
@@ -1544,6 +1553,7 @@ AT_CHECK([ovn-nbctl --policy=dst-ip lr-route-del lr0 10.0.0.0/24])
AT_CHECK([ovn-nbctl --policy=src-ip lr-route-del lr0 10.0.0.0/24])
AT_CHECK([ovn-nbctl lr-route-list lr0], [0], [dnl
IPv4 Routes
+ 10.0.10.0/24 dst-ip lp0
0.0.0.0/0 192.168.0.1 dst-ip
])
@@ -1553,6 +1563,7 @@ AT_CHECK([ovn-nbctl --policy=src-ip lr-route-add lr0 10.0.0.0/24 11.0.0.2])
AT_CHECK([ovn-nbctl lr-route-del lr0 10.0.0.0/24])
AT_CHECK([ovn-nbctl lr-route-list lr0], [0], [dnl
IPv4 Routes
+ 10.0.10.0/24 dst-ip lp0
0.0.0.0/0 192.168.0.1 dst-ip
])
diff --git a/tests/ovn-northd.at b/tests/ovn-northd.at
index ad1732da3..2b70f48f6 100644
--- a/tests/ovn-northd.at
+++ b/tests/ovn-northd.at
@@ -3016,16 +3016,16 @@ for i in $(seq 1 5); do
check ovn-nbctl --wait=sb lsp-set-addresses sw$i-r0 00:00:00:00:00:0$i
done
-uuid=$(ovn-nbctl create bfd logical_port=r0-sw1 dst_ip=192.168.10.2 status=down min_tx=250 min_rx=250 detect_mult=10)
-ovn-nbctl create bfd logical_port=r0-sw2 dst_ip=192.168.20.2 status=down min_tx=500 min_rx=500 detect_mult=20
-ovn-nbctl create bfd logical_port=r0-sw3 dst_ip=192.168.30.2 status=down
-ovn-nbctl create bfd logical_port=r0-sw4 dst_ip=192.168.40.2 status=down min_tx=0 detect_mult=0
+uuid=$(ovn-nbctl create bfd logical_port=r0-sw1 dst_ip=192.168.1.2 status=down min_tx=250 min_rx=250 detect_mult=10)
+ovn-nbctl create bfd logical_port=r0-sw2 dst_ip=192.168.2.2 status=down min_tx=500 min_rx=500 detect_mult=20
+ovn-nbctl create bfd logical_port=r0-sw3 dst_ip=192.168.3.2 status=down
+ovn-nbctl create bfd logical_port=r0-sw4 dst_ip=192.168.4.2 status=down min_tx=0 detect_mult=0
-wait_row_count bfd 1 logical_port=r0-sw1 detect_mult=10 dst_ip=192.168.10.2 \
+wait_row_count bfd 1 logical_port=r0-sw1 detect_mult=10 dst_ip=192.168.1.2 \
min_rx=250 min_tx=250 status=admin_down
-wait_row_count bfd 1 logical_port=r0-sw2 detect_mult=20 dst_ip=192.168.20.2 \
+wait_row_count bfd 1 logical_port=r0-sw2 detect_mult=20 dst_ip=192.168.2.2 \
min_rx=500 min_tx=500 status=admin_down
-wait_row_count bfd 1 logical_port=r0-sw3 detect_mult=5 dst_ip=192.168.30.2 \
+wait_row_count bfd 1 logical_port=r0-sw3 detect_mult=5 dst_ip=192.168.3.2 \
min_rx=1000 min_tx=1000 status=admin_down
uuid=$(fetch_column nb:bfd _uuid logical_port=r0-sw1)
@@ -3036,17 +3036,17 @@ check ovn-nbctl clear bfd $uuid_2 min_rx
wait_row_count bfd 1 logical_port=r0-sw2 min_rx=1000
wait_row_count bfd 1 logical_port=r0-sw1 min_rx=1000 min_tx=1000 detect_mult=100
-check ovn-nbctl --bfd=$uuid lr-route-add r0 100.0.0.0/8 192.168.10.2
+check ovn-nbctl --bfd=$uuid lr-route-add r0 100.0.0.0/8 192.168.1.2
wait_column down bfd status logical_port=r0-sw1
-AT_CHECK([ovn-nbctl lr-route-list r0 | grep 192.168.10.2 | grep -q bfd],[0])
+AT_CHECK([ovn-nbctl lr-route-list r0 | grep 192.168.1.2 | grep -q bfd],[0])
-check ovn-nbctl --bfd lr-route-add r0 200.0.0.0/8 192.168.20.2
+check ovn-nbctl --bfd lr-route-add r0 200.0.0.0/8 192.168.2.2
wait_column down bfd status logical_port=r0-sw2
-AT_CHECK([ovn-nbctl lr-route-list r0 | grep 192.168.20.2 | grep -q bfd],[0])
+AT_CHECK([ovn-nbctl lr-route-list r0 | grep 192.168.2.2 | grep -q bfd],[0])
-check ovn-nbctl --bfd lr-route-add r0 240.0.0.0/8 192.168.50.2 r0-sw5
+check ovn-nbctl --bfd lr-route-add r0 240.0.0.0/8 192.168.5.2 r0-sw5
wait_column down bfd status logical_port=r0-sw5
-AT_CHECK([ovn-nbctl lr-route-list r0 | grep 192.168.50.2 | grep -q bfd],[0])
+AT_CHECK([ovn-nbctl lr-route-list r0 | grep 192.168.5.2 | grep -q bfd],[0])
route_uuid=$(fetch_column nb:logical_router_static_route _uuid ip_prefix="100.0.0.0/8")
check ovn-nbctl clear logical_router_static_route $route_uuid bfd
@@ -3659,3 +3659,73 @@ check ovn-nbctl --wait=sb sync
OVS_APP_EXIT_AND_WAIT([NORTHD_TYPE])
AT_CLEANUP
])
+
+OVN_FOR_EACH_NORTHD([
+AT_SETUP([ovn -- static routes flows])
+AT_KEYWORDS([static-routes-flows])
+ovn_start
+
+check ovn-sbctl chassis-add ch1 geneve 127.0.0.1
+
+check ovn-nbctl lr-add lr0
+check ovn-nbctl ls-add public
+check ovn-nbctl lrp-add lr0 lr0-public 00:00:20:20:12:13 192.168.0.1/24
+check ovn-nbctl lsp-add public public-lr0
+check ovn-nbctl lsp-set-type public-lr0 router
+check ovn-nbctl lsp-set-addresses public-lr0 router
+check ovn-nbctl lsp-set-options public-lr0 router-port=lr0-public
+
+check ovn-nbctl --wait=sb --ecmp-symmetric-reply lr-route-add lr0 1.0.0.1 192.168.0.10
+
+ovn-sbctl dump-flows lr0 > lr0flows
+
+AT_CHECK([grep -e "lr_in_ip_routing.*select" lr0flows |sort], [0], [dnl
+])
+AT_CHECK([grep -e "lr_in_ip_routing_ecmp" lr0flows |sort], [0], [dnl
+ table=11(lr_in_ip_routing_ecmp), priority=150 , match=(reg8[[0..15]] == 0), action=(next;)
+])
+
+check ovn-nbctl --wait=sb --ecmp-symmetric-reply lr-route-add lr0 1.0.0.1 192.168.0.20
+
+ovn-sbctl dump-flows lr0 > lr0flows
+AT_CHECK([grep -e "lr_in_ip_routing.*select" lr0flows |sort], [0], [dnl
+ table=10(lr_in_ip_routing ), priority=65 , match=(ip4.dst == 1.0.0.1/32), action=(ip.ttl--; flags.loopback = 1; reg8[[0..15]] = 1; reg8[[16..31]] = select(1, 2);)
+])
+AT_CHECK([grep -e "lr_in_ip_routing_ecmp" lr0flows | sed 's/192\.168\.0\..0/192.168.0.??/' |sort], [0], [dnl
+ table=11(lr_in_ip_routing_ecmp), priority=100 , match=(reg8[[0..15]] == 1 && reg8[[16..31]] == 1), action=(reg0 = 192.168.0.??; reg1 = 192.168.0.1; eth.src = 00:00:20:20:12:13; outport = "lr0-public"; next;)
+ table=11(lr_in_ip_routing_ecmp), priority=100 , match=(reg8[[0..15]] == 1 && reg8[[16..31]] == 2), action=(reg0 = 192.168.0.??; reg1 = 192.168.0.1; eth.src = 00:00:20:20:12:13; outport = "lr0-public"; next;)
+ table=11(lr_in_ip_routing_ecmp), priority=150 , match=(reg8[[0..15]] == 0), action=(next;)
+])
+
+# add ecmp route with wrong nexthop
+check ovn-nbctl --wait=sb --ecmp-symmetric-reply lr-route-add lr0 1.0.0.1 192.168.1.20
+
+ovn-sbctl dump-flows lr0 > lr0flows
+AT_CHECK([grep -e "lr_in_ip_routing.*select" lr0flows |sort], [0], [dnl
+ table=10(lr_in_ip_routing ), priority=65 , match=(ip4.dst == 1.0.0.1/32), action=(ip.ttl--; flags.loopback = 1; reg8[[0..15]] = 1; reg8[[16..31]] = select(1, 2);)
+])
+AT_CHECK([grep -e "lr_in_ip_routing_ecmp" lr0flows | sed 's/192\.168\.0\..0/192.168.0.??/' |sort], [0], [dnl
+ table=11(lr_in_ip_routing_ecmp), priority=100 , match=(reg8[[0..15]] == 1 && reg8[[16..31]] == 1), action=(reg0 = 192.168.0.??; reg1 = 192.168.0.1; eth.src = 00:00:20:20:12:13; outport = "lr0-public"; next;)
+ table=11(lr_in_ip_routing_ecmp), priority=100 , match=(reg8[[0..15]] == 1 && reg8[[16..31]] == 2), action=(reg0 = 192.168.0.??; reg1 = 192.168.0.1; eth.src = 00:00:20:20:12:13; outport = "lr0-public"; next;)
+ table=11(lr_in_ip_routing_ecmp), priority=150 , match=(reg8[[0..15]] == 0), action=(next;)
+])
+
+check ovn-nbctl lr-route-del lr0
+wait_row_count nb:Logical_Router_Static_Route 0
+
+check ovn-nbctl --wait=sb lr-route-add lr0 1.0.0.0/24 192.168.0.10
+ovn-sbctl dump-flows lr0 > lr0flows
+
+AT_CHECK([grep -e "lr_in_ip_routing.*192.168.0.10" lr0flows |sort], [0], [dnl
+ table=10(lr_in_ip_routing ), priority=49 , match=(ip4.dst == 1.0.0.0/24), action=(ip.ttl--; reg8[[0..15]] = 0; reg0 = 192.168.0.10; reg1 = 192.168.0.1; eth.src = 00:00:20:20:12:13; outport = "lr0-public"; flags.loopback = 1; next;)
+])
+
+check ovn-nbctl --wait=sb lr-route-add lr0 2.0.0.0/24 lr0-public
+
+ovn-sbctl dump-flows lr0 > lr0flows
+AT_CHECK([grep -e "lr_in_ip_routing.*2.0.0.0" lr0flows |sort], [0], [dnl
+ table=10(lr_in_ip_routing ), priority=49 , match=(ip4.dst == 2.0.0.0/24), action=(ip.ttl--; reg8[[0..15]] = 0; reg0 = ip4.dst; reg1 = 192.168.0.1; eth.src = 00:00:20:20:12:13; outport = "lr0-public"; flags.loopback = 1; next;)
+])
+
+AT_CLEANUP
+])
diff --git a/tests/ovn.at b/tests/ovn.at
index aa80a7c48..5eb7f8b7b 100644
--- a/tests/ovn.at
+++ b/tests/ovn.at
@@ -3169,6 +3169,118 @@ OVN_CLEANUP([hv-1],[hv-2])
AT_CLEANUP
])
+OVN_FOR_EACH_NORTHD([
+AT_SETUP([ovn -- VLAN transparency, passthru=true, ARP responder disabled])
+ovn_start
+
+net_add net
+check ovs-vsctl add-br br-phys
+ovn_attach net br-phys 192.168.0.1
+
+check ovn-nbctl ls-add ls
+check ovn-nbctl --wait=sb add Logical-Switch ls other_config vlan-passthru=true
+
+for i in 1 2; do
+ check ovn-nbctl lsp-add ls lsp$i
+ check ovn-nbctl lsp-set-addresses lsp$i "f0:00:00:00:00:0$i 10.0.0.$i"
+done
+
+for i in 1 2; do
+ check ovs-vsctl add-port br-int vif$i -- set Interface vif$i external-ids:iface-id=lsp$i \
+ options:tx_pcap=vif$i-tx.pcap \
+ options:rxq_pcap=vif$i-rx.pcap \
+ ofport-request=$i
+done
+
+wait_for_ports_up
+
+ovn-sbctl dump-flows ls > lsflows
+AT_CAPTURE_FILE([lsflows])
+
+AT_CHECK([grep -w "ls_in_arp_rsp" lsflows | sort], [0], [dnl
+ table=16(ls_in_arp_rsp ), priority=0 , match=(1), action=(next;)
+])
+
+test_arp() {
+ local inport=$1 outport=$2 sha=$3 spa=$4 tpa=$5 reply_ha=$6
+ tag=8100fefe
+ local request=ffffffffffff${sha}${tag}08060001080006040001${sha}${spa}ffffffffffff${tpa}
+ ovs-appctl netdev-dummy/receive vif$inport $request
+ echo $request >> $outport.expected
+
+ local reply=${sha}${reply_ha}${tag}08060001080006040002${reply_ha}${tpa}${sha}${spa}
+ ovs-appctl netdev-dummy/receive vif$outport $reply
+ echo $reply >> $inport.expected
+}
+
+test_arp 1 2 f00000000001 0a000001 0a000002 f00000000002
+test_arp 2 1 f00000000002 0a000002 0a000001 f00000000001
+
+for i in 1 2; do
+ OVN_CHECK_PACKETS([vif$i-tx.pcap], [$i.expected])
+done
+
+AT_CLEANUP
+])
+
+OVN_FOR_EACH_NORTHD([
+AT_SETUP([ovn -- VLAN transparency, passthru=true, ND/NA responder disabled])
+ovn_start
+
+net_add net
+check ovs-vsctl add-br br-phys
+ovn_attach net br-phys 192.168.0.1
+
+check ovn-nbctl ls-add ls
+check ovn-nbctl --wait=sb add Logical-Switch ls other_config vlan-passthru=true
+
+for i in 1 2; do
+ check ovn-nbctl lsp-add ls lsp$i
+ check ovn-nbctl lsp-set-addresses lsp$i "f0:00:00:00:00:0$i fe00::$i"
+done
+
+for i in 1 2; do
+ check ovs-vsctl add-port br-int vif$i -- set Interface vif$i external-ids:iface-id=lsp$i \
+ options:tx_pcap=vif$i-tx.pcap \
+ options:rxq_pcap=vif$i-rx.pcap \
+ ofport-request=$i
+done
+
+wait_for_ports_up
+
+ovn-sbctl dump-flows ls > lsflows
+AT_CAPTURE_FILE([lsflows])
+
+AT_CHECK([grep -w "ls_in_arp_rsp" lsflows | sort], [0], [dnl
+ table=16(ls_in_arp_rsp ), priority=0 , match=(1), action=(next;)
+])
+
+test_nd_na() {
+ local inport=$1 outport=$2 sha=$3 spa=$4 tpa=$5 reply_ha=$6
+ tag=8100fefe
+ icmp_type=87
+ local request=ffffffffffff${sha}${tag}86dd6000000000183aff${spa}ff0200000000000000000001ff${tpa: -6}${icmp_type}007ea100000000${tpa}
+ ovs-appctl netdev-dummy/receive vif$inport $request
+ echo $request >> $outport.expected
+ echo $request
+
+ icmp_type=88
+ local reply=${sha}${reply_ha}${tag}86dd6000000000183aff${tpa}${spa}${icmp_type}003da540000000${tpa}
+ ovs-appctl netdev-dummy/receive vif$outport $reply
+ echo $reply >> $inport.expected
+ echo $reply
+}
+
+test_nd_na 1 2 f00000000001 fe000000000000000000000000000001 fe000000000000000000000000000002 f00000000002
+test_nd_na 2 1 f00000000002 fe000000000000000000000000000002 fe000000000000000000000000000001 f00000000001
+
+for i in 1 2; do
+ OVN_CHECK_PACKETS([vif$i-tx.pcap], [$i.expected])
+done
+
+AT_CLEANUP
+])
+
OVN_FOR_EACH_NORTHD([
AT_SETUP([ovn -- VLAN transparency, passthru=true, multiple hosts])
ovn_start
@@ -7821,6 +7933,19 @@ mac_prefix=$(ovn-nbctl --wait=sb get NB_Global . options:mac_prefix | tr -d \")
port_addr=$(ovn-nbctl get Logical-Switch-Port p91 dynamic_addresses | tr -d \")
AT_CHECK([test "$port_addr" = "${mac_prefix}:00:00:09"], [0], [])
+# set mac_prefix to all-zeroes and check it is allocated in a random manner
+ovn-nbctl --wait=hv set NB_Global . options:mac_prefix="00:00:00:00:00:00"
+ovn-nbctl ls-add sw14
+ovn-nbctl --wait=sb set Logical-Switch sw14 other_config:mac_only=true
+ovn-nbctl --wait=sb lsp-add sw14 p141 -- lsp-set-addresses p141 dynamic
+
+mac_prefix=$(ovn-nbctl --wait=sb get NB_Global . options:mac_prefix | tr -d \")
+port_addr=$(ovn-nbctl get Logical-Switch-Port p141 dynamic_addresses | tr -d \")
+AT_CHECK([test "$mac_prefix" != "00:00:00:00:00:00"], [0], [])
+AT_CHECK([test "$port_addr" = "${mac_prefix}:00:00:0a"], [0], [])
+ovn-nbctl --wait=sb lsp-del sw14 p141
+ovn-nbctl --wait=sb ls-del sw14
+
ovn-nbctl --wait=hv set NB_Global . options:mac_prefix="00:11:22"
ovn-nbctl ls-add sw10
ovn-nbctl --wait=sb set Logical-Switch sw10 other_config:ipv6_prefix="ae01::"
@@ -11260,7 +11385,7 @@ ovn-nbctl lsp-add foo ln-foo
ovn-nbctl lsp-set-addresses ln-foo unknown
ovn-nbctl lsp-set-options ln-foo network_name=public
ovn-nbctl lsp-set-type ln-foo localnet
-AT_CHECK([ovn-nbctl set Logical_Switch_Port ln-foo tag=2])
+check ovn-nbctl set Logical_Switch_Port ln-foo tag_request=2
# Create localnet port in alice
ovn-nbctl lsp-add alice ln-alice
@@ -12024,6 +12149,91 @@ OVN_CLEANUP([hv1])
AT_CLEANUP
])
+OVN_FOR_EACH_NORTHD([
+AT_SETUP([localport doesn't suppress ARP directed to external port])
+
+ovn_start
+net_add n1
+
+check ovs-vsctl add-br br-phys
+check ovs-vsctl set open . external-ids:ovn-bridge-mappings=phys:br-phys
+ovn_attach n1 br-phys 192.168.0.1
+
+check ovn-nbctl ls-add ls
+
+# create topology to allow to talk from localport through localnet to external port
+check ovn-nbctl lsp-add ls lp
+check ovn-nbctl lsp-set-addresses lp "00:00:00:00:00:01 10.0.0.1"
+check ovn-nbctl lsp-set-type lp localport
+check ovs-vsctl add-port br-int lp -- set Interface lp external-ids:iface-id=lp
+
+check ovn-nbctl --wait=sb ha-chassis-group-add hagrp
+check ovn-nbctl --wait=sb ha-chassis-group-add-chassis hagrp main 10
+check ovn-nbctl lsp-add ls lext
+check ovn-nbctl lsp-set-addresses lext "00:00:00:00:00:02 10.0.0.2"
+check ovn-nbctl lsp-set-type lext external
+hagrp_uuid=`ovn-nbctl --bare --columns _uuid find ha_chassis_group name=hagrp`
+check ovn-nbctl set logical_switch_port lext ha_chassis_group=$hagrp_uuid
+
+check ovn-nbctl lsp-add ls ln
+check ovn-nbctl lsp-set-addresses ln unknown
+check ovn-nbctl lsp-set-type ln localnet
+check ovn-nbctl lsp-set-options ln network_name=phys
+check ovn-nbctl --wait=hv sync
+
+# also create second external port AFTER localnet to check that order is irrelevant
+check ovn-nbctl lsp-add ls lext2
+check ovn-nbctl lsp-set-addresses lext2 "00:00:00:00:00:10 10.0.0.10"
+check ovn-nbctl lsp-set-type lext2 external
+check ovn-nbctl set logical_switch_port lext2 ha_chassis_group=$hagrp_uuid
+check ovn-nbctl --wait=hv sync
+
+# create and immediately delete an external port to later check that flows for
+# deleted ports are not left over in flow table
+check ovn-nbctl lsp-add ls lext-deleted
+check ovn-nbctl lsp-set-addresses lext-deleted "00:00:00:00:00:03 10.0.0.3"
+check ovn-nbctl lsp-set-type lext-deleted external
+check ovn-nbctl set logical_switch_port lext-deleted ha_chassis_group=$hagrp_uuid
+check ovn-nbctl --wait=hv sync
+check ovn-nbctl lsp-del lext-deleted
+check ovn-nbctl --wait=hv sync
+
+send_garp() {
+ local inport=$1 eth_src=$2 eth_dst=$3 spa=$4 tpa=$5
+ local request=${eth_dst}${eth_src}08060001080006040001${eth_src}${spa}${eth_dst}${tpa}
+ ovs-appctl netdev-dummy/receive $inport $request
+}
+
+spa=$(ip_to_hex 10 0 0 1)
+tpa=$(ip_to_hex 10 0 0 2)
+send_garp lp 000000000001 000000000002 $spa $tpa
+
+spa=$(ip_to_hex 10 0 0 1)
+tpa=$(ip_to_hex 10 0 0 10)
+send_garp lp 000000000001 000000000010 $spa $tpa
+
+spa=$(ip_to_hex 10 0 0 1)
+tpa=$(ip_to_hex 10 0 0 3)
+send_garp lp 000000000001 000000000003 $spa $tpa
+
+dnl external traffic from localport should be sent to localnet
+AT_CHECK([tcpdump -r main/br-phys_n1-tx.pcap arp[[24:4]]=0x0a000002 | wc -l],[0],[dnl
+1
+],[ignore])
+
+#dnl ...regardless of localnet / external ports creation order
+AT_CHECK([tcpdump -r main/br-phys_n1-tx.pcap arp[[24:4]]=0x0a00000a | wc -l],[0],[dnl
+1
+],[ignore])
+
+dnl traffic from localport should not be sent to deleted external port
+AT_CHECK([tcpdump -r main/br-phys_n1-tx.pcap arp[[24:4]]=0x0a000003 | wc -l],[0],[dnl
+0
+],[ignore])
+
+AT_CLEANUP
+])
+
OVN_FOR_EACH_NORTHD([
AT_SETUP([ovn -- 1 LR with HA distributed router gateway port])
ovn_start
@@ -12668,7 +12878,7 @@ $PYTHON "$ovs_srcdir/utilities/ovs-pcap.in" hv2/br-phys_n1-tx.pcap | trim_zeros
AT_CHECK([grep $garp hv2_br_phys_tx | sort], [0], [])
# change localnet port tag.
-AT_CHECK([ovn-nbctl set Logical_Switch_Port ln_port tag=2014])
+check ovn-nbctl set Logical_Switch_Port ln_port tag_request=2014
# wait for earlier changes to take effect
OVS_WAIT_UNTIL([test 1 = `as hv2 ovs-ofctl dump-flows br-int table=65 | \
@@ -17172,6 +17382,16 @@ send_arp_reply() {
as hv$hv ovs-appctl netdev-dummy/receive hv${hv}-vif$inport $request
}
+send_icmp_packet() {
+ local inport=$1 hv=$2 eth_src=$3 eth_dst=$4 ipv4_src=$5 ipv4_dst=$6 ip_chksum=$7 data=$8
+ shift 8
+
+ local ip_ttl=ff
+ local ip_len=001c
+ local packet=${eth_dst}${eth_src}08004500${ip_len}00004000${ip_ttl}01${ip_chksum}${ipv4_src}${ipv4_dst}${data}
+ as hv$hv ovs-appctl netdev-dummy/receive hv${hv}-vif$inport $packet
+}
+
net_add n1
sim_add hv1
@@ -17311,27 +17531,29 @@ logical_port=sw0-vir) = x])
as hv1
ovs-vsctl set interface hv1-vif3 external-ids:iface-id=sw0-vir
-AT_CHECK([test x$(ovn-sbctl --bare --columns chassis find port_binding \
-logical_port=sw0-vir) = x], [0], [])
+wait_column "" Port_Binding chassis logical_port=sw0-vir
# Cleanup hv1-vif3.
as hv1
ovs-vsctl del-port hv1-vif3
-AT_CHECK([test x$(ovn-sbctl --bare --columns chassis find port_binding \
-logical_port=sw0-vir) = x], [0], [])
+wait_column "" Port_Binding chassis logical_port=sw0-vir
check_virtual_offlows_present() {
hv=$1
- AT_CHECK([as $hv ovs-ofctl dump-flows br-int table=44 | ofctl_strip_all | grep "priority=2000"], [0], [dnl
- table=44, priority=2000,ip,metadata=0x1 actions=resubmit(,45)
- table=44, priority=2000,ipv6,metadata=0x1 actions=resubmit(,45)
+ sw0_dp_key=$(printf "%x" $(fetch_column Datapath_Binding tunnel_key external_ids:name=sw0))
+ lr0_dp_key=$(printf "%x" $(fetch_column Datapath_Binding tunnel_key external_ids:name=lr0))
+ lr0_public_dp_key=$(printf "%x" $(fetch_column Port_Binding tunnel_key logical_port=lr0-public))
+
+ AT_CHECK_UNQUOTED([as $hv ovs-ofctl dump-flows br-int table=44 | ofctl_strip_all | grep "priority=2000"], [0], [dnl
+ table=44, priority=2000,ip,metadata=0x$sw0_dp_key actions=resubmit(,45)
+ table=44, priority=2000,ipv6,metadata=0x$sw0_dp_key actions=resubmit(,45)
])
- AT_CHECK([as $hv ovs-ofctl dump-flows br-int table=11 | ofctl_strip_all | \
+ AT_CHECK_UNQUOTED([as $hv ovs-ofctl dump-flows br-int table=11 | ofctl_strip_all | \
grep "priority=92" | grep 172.168.0.50], [0], [dnl
- table=11, priority=92,arp,reg14=0x3,metadata=0x3,arp_tpa=172.168.0.50,arp_op=1 actions=move:NXM_OF_ETH_SRC[[]]->NXM_OF_ETH_DST[[]],mod_dl_src:10:54:00:00:00:10,load:0x2->NXM_OF_ARP_OP[[]],move:NXM_NX_ARP_SHA[[]]->NXM_NX_ARP_THA[[]],load:0x105400000010->NXM_NX_ARP_SHA[[]],push:NXM_OF_ARP_SPA[[]],push:NXM_OF_ARP_TPA[[]],pop:NXM_OF_ARP_SPA[[]],pop:NXM_OF_ARP_TPA[[]],move:NXM_NX_REG14[[]]->NXM_NX_REG15[[]],load:0x1->NXM_NX_REG10[[0]],resubmit(,37)
+ table=11, priority=92,arp,reg14=0x$lr0_public_dp_key,metadata=0x$lr0_dp_key,arp_tpa=172.168.0.50,arp_op=1 actions=move:NXM_OF_ETH_SRC[[]]->NXM_OF_ETH_DST[[]],mod_dl_src:10:54:00:00:00:10,load:0x2->NXM_OF_ARP_OP[[]],move:NXM_NX_ARP_SHA[[]]->NXM_NX_ARP_THA[[]],load:0x105400000010->NXM_NX_ARP_SHA[[]],push:NXM_OF_ARP_SPA[[]],push:NXM_OF_ARP_TPA[[]],pop:NXM_OF_ARP_SPA[[]],pop:NXM_OF_ARP_TPA[[]],move:NXM_NX_REG14[[]]->NXM_NX_REG15[[]],load:0x1->NXM_NX_REG10[[0]],resubmit(,37)
])
}
@@ -17384,6 +17606,22 @@ logical_port=sw0-vir) = x])
wait_row_count nb:Logical_Switch_Port 1 up=false name=sw0-vir
check ovn-nbctl --wait=hv sync
+
+# verify the traffic from virtual port is discarded if the port is not claimed
+AT_CHECK([grep lr_in_gw_redirect lr0-flows2 | grep "ip4.src == 10.0.0.10"], [0], [dnl
+ table=17(lr_in_gw_redirect ), priority=100 , match=(ip4.src == 10.0.0.10 && outport == "lr0-public" && is_chassis_resident("sw0-vir")), action=(eth.src = 10:54:00:00:00:10; reg1 = 172.168.0.50; next;)
+ table=17(lr_in_gw_redirect ), priority=80 , match=(ip4.src == 10.0.0.10 && outport == "lr0-public"), action=(drop;)
+])
+
+eth_src=505400000003
+eth_dst=00000000ff01
+ip_src=$(ip_to_hex 10 0 0 10)
+ip_dst=$(ip_to_hex 172 168 0 101)
+send_icmp_packet 1 1 $eth_src $eth_dst $ip_src $ip_dst c4c9 0000000000000000000000
+AT_CHECK([as hv1 ovs-ofctl dump-flows br-int | awk '/table=25, n_packets=1, n_bytes=45/{print $7" "$8}'],[0],[dnl
+priority=80,ip,reg15=0x3,metadata=0x3,nw_src=10.0.0.10 actions=drop
+])
+
# hv1 should remove the flow for the ACL with is_chassis_redirect check for sw0-vir.
check_virtual_offlows_not_present hv1
@@ -23116,7 +23354,7 @@ AT_CHECK([
for hv in 1 2; do
grep table=15 hv${hv}flows | \
grep "priority=100" | \
- grep -c "ct(commit,zone=NXM_NX_REG11\\[[0..15\\]],exec(move:NXM_OF_ETH_SRC\\[[\\]]->NXM_NX_CT_LABEL\\[[32..79\\]],load:0x[[0-9]]->NXM_NX_CT_LABEL\\[[80..95\\]]))"
+ grep -c "ct(commit,zone=NXM_NX_REG11\\[[0..15\\]],.*exec(move:NXM_OF_ETH_SRC\\[[\\]]->NXM_NX_CT_LABEL\\[[32..79\\]],load:0x[[0-9]]->NXM_NX_CT_LABEL\\[[80..95\\]]))"
grep table=22 hv${hv}flows | \
grep "priority=200" | \
@@ -23241,7 +23479,7 @@ AT_CHECK([
for hv in 1 2; do
grep table=15 hv${hv}flows | \
grep "priority=100" | \
- grep -c "ct(commit,zone=NXM_NX_REG11\\[[0..15\\]],exec(move:NXM_OF_ETH_SRC\\[[\\]]->NXM_NX_CT_LABEL\\[[32..79\\]],load:0x[[0-9]]->NXM_NX_CT_LABEL\\[[80..95\\]]))"
+ grep -c "ct(commit,zone=NXM_NX_REG11\\[[0..15\\]],.*exec(move:NXM_OF_ETH_SRC\\[[\\]]->NXM_NX_CT_LABEL\\[[32..79\\]],load:0x[[0-9]]->NXM_NX_CT_LABEL\\[[80..95\\]]))"
grep table=22 hv${hv}flows | \
grep "priority=200" | \
@@ -26688,6 +26926,50 @@ OVN_CLEANUP([hv1])
AT_CLEANUP
])
+# Tests that ACLs referencing port groups that include ports connected to
+# logical routers are correctly applied.
+OVN_FOR_EACH_NORTHD([
+AT_SETUP([ovn -- ACL with Port Group including router ports])
+ovn_start
+net_add n1
+
+sim_add hv1
+as hv1
+ovs-vsctl add-br br-phys
+ovn_attach n1 br-phys 192.168.0.1
+
+check ovn-nbctl \
+ -- lr-add lr \
+ -- ls-add ls \
+ -- lrp-add lr lrp_ls 00:00:00:00:00:01 42.42.42.1/24 \
+ -- lsp-add ls ls_lr \
+ -- lsp-set-addresses ls_lr router \
+ -- lsp-set-type ls_lr router \
+ -- lsp-set-options ls_lr router-port=lr_ls \
+ -- lsp-add ls vm1
+
+check ovn-nbctl pg-add pg ls_lr \
+ -- acl-add pg from-lport 1 'inport == @pg && ip4.dst == 42.42.42.42' drop
+
+check ovs-vsctl add-port br-int vm1 \
+ -- set interface vm1 external_ids:iface-id=vm1
+
+wait_for_ports_up
+check ovn-nbctl --wait=hv sync
+
+dp_key=$(fetch_column Datapath_Binding tunnel_key external_ids:name=ls)
+rtr_port_key=$(fetch_column Port_Binding tunnel_key logical_port=ls_lr)
+
+# Check that ovn-controller adds a flow to drop packets with dest IP
+# 42.42.42.42 coming from the router port.
+AT_CHECK([ovs-ofctl dump-flows br-int table=17 | grep "reg14=0x${rtr_port_key},metadata=0x${dp_key},nw_dst=42.42.42.42 actions=drop" -c], [0], [dnl
+1
+])
+
+OVN_CLEANUP([hv1])
+AT_CLEANUP
+])
+
OVN_FOR_EACH_NORTHD([
AT_SETUP([ovn -- Static route with discard nexthop])
ovn_start
diff --git a/tests/system-common-macros.at b/tests/system-common-macros.at
index c8fa6f03f..b742a2cb9 100644
--- a/tests/system-common-macros.at
+++ b/tests/system-common-macros.at
@@ -330,3 +330,7 @@ m4_define([OVS_CHECK_IPROUTE_ENCAP],
# OVS_CHECK_CT_CLEAR()
m4_define([OVS_CHECK_CT_CLEAR],
[AT_SKIP_IF([! grep -q "Datapath supports ct_clear action" ovs-vswitchd.log])])
+
+# OVS_CHECK_CT_ZERO_SNAT()
+m4_define([OVS_CHECK_CT_ZERO_SNAT],
+ [AT_SKIP_IF([! grep -q "Datapath supports ct_zero_snat" ovs-vswitchd.log])]))
diff --git a/tests/system-ovn.at b/tests/system-ovn.at
index 310bd3d5a..56cd26535 100644
--- a/tests/system-ovn.at
+++ b/tests/system-ovn.at
@@ -1348,7 +1348,7 @@ as ovn-nb
OVS_APP_EXIT_AND_WAIT([ovsdb-server])
as northd
-OVS_APP_EXIT_AND_WAIT([ovn-northd])
+OVS_APP_EXIT_AND_WAIT([NORTHD_TYPE])
as
OVS_TRAFFIC_VSWITCHD_STOP(["/failed to query port patch-.*/d
@@ -3121,7 +3121,7 @@ as ovn-nb
OVS_APP_EXIT_AND_WAIT([ovsdb-server])
as northd
-OVS_APP_EXIT_AND_WAIT([ovn-northd])
+OVS_APP_EXIT_AND_WAIT([NORTHD_TYPE])
as
OVS_TRAFFIC_VSWITCHD_STOP(["/failed to query port patch-.*/d
@@ -4577,7 +4577,7 @@ as ovn-nb
OVS_APP_EXIT_AND_WAIT([ovsdb-server])
as northd
-OVS_APP_EXIT_AND_WAIT([ovn-northd])
+OVS_APP_EXIT_AND_WAIT([NORTHD_TYPE])
as
OVS_TRAFFIC_VSWITCHD_STOP(["/failed to query port patch-.*/d
@@ -4663,7 +4663,7 @@ as ovn-nb
OVS_APP_EXIT_AND_WAIT([ovsdb-server])
as northd
-OVS_APP_EXIT_AND_WAIT([ovn-northd])
+OVS_APP_EXIT_AND_WAIT([NORTHD_TYPE])
as
OVS_TRAFFIC_VSWITCHD_STOP(["/failed to query port patch-.*/d
@@ -4903,7 +4903,7 @@ as ovn-nb
OVS_APP_EXIT_AND_WAIT([ovsdb-server])
as northd
-OVS_APP_EXIT_AND_WAIT([ovn-northd])
+OVS_APP_EXIT_AND_WAIT([NORTHD_TYPE])
as
OVS_TRAFFIC_VSWITCHD_STOP(["/failed to query port patch-.*/d
@@ -5287,7 +5287,7 @@ as ovn-nb
OVS_APP_EXIT_AND_WAIT([ovsdb-server])
as northd
-OVS_APP_EXIT_AND_WAIT([ovn-northd])
+OVS_APP_EXIT_AND_WAIT([NORTHD_TYPE])
as
OVS_TRAFFIC_VSWITCHD_STOP(["/failed to query port patch-.*/d
@@ -5296,6 +5296,196 @@ OVS_TRAFFIC_VSWITCHD_STOP(["/failed to query port patch-.*/d
AT_CLEANUP
])
+OVN_FOR_EACH_NORTHD([
+AT_SETUP([ovn -- load-balancer and firewall tuple conflict IPv4])
+AT_SKIP_IF([test $HAVE_NC = no])
+AT_KEYWORDS([ovnlb])
+
+CHECK_CONNTRACK()
+CHECK_CONNTRACK_NAT()
+ovn_start
+OVS_TRAFFIC_VSWITCHD_START()
+OVS_CHECK_CT_ZERO_SNAT()
+ADD_BR([br-int])
+
+# Set external-ids in br-int needed for ovn-controller
+ovs-vsctl \
+ -- set Open_vSwitch . external-ids:system-id=hv1 \
+ -- set Open_vSwitch . external-ids:ovn-remote=unix:$ovs_base/ovn-sb/ovn-sb.sock \
+ -- set Open_vSwitch . external-ids:ovn-encap-type=geneve \
+ -- set Open_vSwitch . external-ids:ovn-encap-ip=169.0.0.1 \
+ -- set bridge br-int fail-mode=secure other-config:disable-in-band=true
+
+# Start ovn-controller
+start_daemon ovn-controller
+
+# Logical network:
+# 1 logical switch connetected to one logical router.
+# 2 VMs, one used as backend for a load balancer.
+
+check ovn-nbctl \
+ -- lr-add rtr \
+ -- lrp-add rtr rtr-ls 00:00:00:00:01:00 42.42.42.1/24 \
+ -- ls-add ls \
+ -- lsp-add ls ls-rtr \
+ -- lsp-set-addresses ls-rtr 00:00:00:00:01:00 \
+ -- lsp-set-type ls-rtr router \
+ -- lsp-set-options ls-rtr router-port=rtr-ls \
+ -- lsp-add ls vm1 -- lsp-set-addresses vm1 00:00:00:00:00:01 \
+ -- lsp-add ls vm2 -- lsp-set-addresses vm2 00:00:00:00:00:02 \
+ -- lb-add lb-test 66.66.66.66:666 42.42.42.2:4242 tcp \
+ -- ls-lb-add ls lb-test
+
+ADD_NAMESPACES(vm1)
+ADD_VETH(vm1, vm1, br-int, "42.42.42.2/24", "00:00:00:00:00:01", "42.42.42.1")
+
+ADD_NAMESPACES(vm2)
+ADD_VETH(vm2, vm2, br-int, "42.42.42.3/24", "00:00:00:00:00:02", "42.42.42.1")
+
+# Wait for ovn-controller to catch up.
+wait_for_ports_up
+check ovn-nbctl --wait=hv sync
+
+# Start IPv4 TCP server on vm1.
+NETNS_DAEMONIZE([vm1], [nc -k -l 42.42.42.2 4242], [nc-vm1.pid])
+
+# Make sure connecting to the VIP works.
+NS_CHECK_EXEC([vm2], [nc 66.66.66.66 666 -p 2000 -z])
+
+# Start IPv4 TCP connection to VIP from vm2.
+NS_CHECK_EXEC([vm2], [nc 66.66.66.66 666 -p 2001 -z])
+
+# Check conntrack. We expect two entries:
+# - one in vm1's zone (firewall)
+# - one in vm2's zone (dnat)
+AT_CHECK([ovs-appctl dpctl/dump-conntrack | grep 2001 | \
+grep "orig=.src=42\.42\.42\.3" | \
+sed -e 's/port=2001/port=<clnt_s_port>/g' \
+ -e 's/sport=4242,dport=[[0-9]]\+/sport=4242,dport=<rnd_port>/g' \
+ -e 's/state=[[0-9_A-Z]]*/state=<cleared>/g' \
+ -e 's/zone=[[0-9]]*/zone=<cleared>/' | sort], [0], [dnl
+tcp,orig=(src=42.42.42.3,dst=42.42.42.2,sport=<clnt_s_port>,dport=4242),reply=(src=42.42.42.2,dst=42.42.42.3,sport=4242,dport=<clnt_s_port>),zone=<cleared>,protoinfo=(state=<cleared>)
+tcp,orig=(src=42.42.42.3,dst=66.66.66.66,sport=<clnt_s_port>,dport=666),reply=(src=42.42.42.2,dst=42.42.42.3,sport=4242,dport=<clnt_s_port>),zone=<cleared>,labels=0x2,protoinfo=(state=<cleared>)
+])
+
+# Start IPv4 TCP connection to backend IP from vm2 which would require
+# additional source port translation to avoid a tuple conflict.
+NS_CHECK_EXEC([vm2], [nc 42.42.42.2 4242 -p 2001 -z])
+
+# Check conntrack. We expect three entries:
+# - one in vm1's zone (firewall) - reused from the previous connection.
+# - one in vm2's zone (dnat) - still in TIME_WAIT after the previous connection.
+# - one in vm2's zone (firewall + additional all-zero SNAT)
+AT_CHECK([ovs-appctl dpctl/dump-conntrack | grep 2001 | \
+grep "orig=.src=42\.42\.42\.3" | \
+sed -e 's/port=2001/port=<clnt_s_port>/g' \
+ -e 's/sport=4242,dport=[[0-9]]\+/sport=4242,dport=<rnd_port>/g' \
+ -e 's/state=[[0-9_A-Z]]*/state=<cleared>/g' \
+ -e 's/zone=[[0-9]]*/zone=<cleared>/' | sort], [0], [dnl
+tcp,orig=(src=42.42.42.3,dst=42.42.42.2,sport=<clnt_s_port>,dport=4242),reply=(src=42.42.42.2,dst=42.42.42.3,sport=4242,dport=<clnt_s_port>),zone=<cleared>,protoinfo=(state=<cleared>)
+tcp,orig=(src=42.42.42.3,dst=42.42.42.2,sport=<clnt_s_port>,dport=4242),reply=(src=42.42.42.2,dst=42.42.42.3,sport=4242,dport=<rnd_port>),zone=<cleared>,protoinfo=(state=<cleared>)
+tcp,orig=(src=42.42.42.3,dst=66.66.66.66,sport=<clnt_s_port>,dport=666),reply=(src=42.42.42.2,dst=42.42.42.3,sport=4242,dport=<clnt_s_port>),zone=<cleared>,labels=0x2,protoinfo=(state=<cleared>)
+])
+
+AT_CLEANUP
+])
+
+OVN_FOR_EACH_NORTHD([
+AT_SETUP([ovn -- load-balancer and firewall tuple conflict IPv6])
+AT_SKIP_IF([test $HAVE_NC = no])
+AT_KEYWORDS([ovnlb])
+
+CHECK_CONNTRACK()
+CHECK_CONNTRACK_NAT()
+ovn_start
+OVS_TRAFFIC_VSWITCHD_START()
+OVS_CHECK_CT_ZERO_SNAT()
+ADD_BR([br-int])
+
+# Set external-ids in br-int needed for ovn-controller
+ovs-vsctl \
+ -- set Open_vSwitch . external-ids:system-id=hv1 \
+ -- set Open_vSwitch . external-ids:ovn-remote=unix:$ovs_base/ovn-sb/ovn-sb.sock \
+ -- set Open_vSwitch . external-ids:ovn-encap-type=geneve \
+ -- set Open_vSwitch . external-ids:ovn-encap-ip=169.0.0.1 \
+ -- set bridge br-int fail-mode=secure other-config:disable-in-band=true
+
+# Start ovn-controller
+start_daemon ovn-controller
+
+# Logical network:
+# 1 logical switch connetected to one logical router.
+# 2 VMs, one used as backend for a load balancer.
+
+check ovn-nbctl \
+ -- lr-add rtr \
+ -- lrp-add rtr rtr-ls 00:00:00:00:01:00 4242::1/64 \
+ -- ls-add ls \
+ -- lsp-add ls ls-rtr \
+ -- lsp-set-addresses ls-rtr 00:00:00:00:01:00 \
+ -- lsp-set-type ls-rtr router \
+ -- lsp-set-options ls-rtr router-port=rtr-ls \
+ -- lsp-add ls vm1 -- lsp-set-addresses vm1 00:00:00:00:00:01 \
+ -- lsp-add ls vm2 -- lsp-set-addresses vm2 00:00:00:00:00:02 \
+ -- lb-add lb-test [[6666::1]]:666 [[4242::2]]:4242 tcp \
+ -- ls-lb-add ls lb-test
+
+ADD_NAMESPACES(vm1)
+ADD_VETH(vm1, vm1, br-int, "4242::2/64", "00:00:00:00:00:01", "4242::1")
+OVS_WAIT_UNTIL([test "$(ip netns exec vm1 ip a | grep 4242::2 | grep tentative)" = ""])
+
+ADD_NAMESPACES(vm2)
+ADD_VETH(vm2, vm2, br-int, "4242::3/64", "00:00:00:00:00:02", "4242::1")
+OVS_WAIT_UNTIL([test "$(ip netns exec vm2 ip a | grep 4242::3 | grep tentative)" = ""])
+
+# Wait for ovn-controller to catch up.
+wait_for_ports_up
+check ovn-nbctl --wait=hv sync
+
+# Start IPv6 TCP server on vm1.
+NETNS_DAEMONIZE([vm1], [nc -k -l 4242::2 4242], [nc-vm1.pid])
+
+# Make sure connecting to the VIP works.
+NS_CHECK_EXEC([vm2], [nc 6666::1 666 -p 2000 -z])
+
+# Start IPv6 TCP connection to VIP from vm2.
+NS_CHECK_EXEC([vm2], [nc 6666::1 666 -p 2001 -z])
+
+# Check conntrack. We expect two entries:
+# - one in vm1's zone (firewall)
+# - one in vm2's zone (dnat)
+AT_CHECK([ovs-appctl dpctl/dump-conntrack | grep 2001 | \
+grep "orig=.src=4242::3" | \
+sed -e 's/port=2001/port=<clnt_s_port>/g' \
+ -e 's/sport=4242,dport=[[0-9]]\+/sport=4242,dport=<rnd_port>/g' \
+ -e 's/state=[[0-9_A-Z]]*/state=<cleared>/g' \
+ -e 's/zone=[[0-9]]*/zone=<cleared>/' | sort], [0], [dnl
+tcp,orig=(src=4242::3,dst=4242::2,sport=<clnt_s_port>,dport=4242),reply=(src=4242::2,dst=4242::3,sport=4242,dport=<clnt_s_port>),zone=<cleared>,protoinfo=(state=<cleared>)
+tcp,orig=(src=4242::3,dst=6666::1,sport=<clnt_s_port>,dport=666),reply=(src=4242::2,dst=4242::3,sport=4242,dport=<clnt_s_port>),zone=<cleared>,labels=0x2,protoinfo=(state=<cleared>)
+])
+
+# Start IPv6 TCP connection to backend IP from vm2 which would require
+# additional source port translation to avoid a tuple conflict.
+NS_CHECK_EXEC([vm2], [nc 4242::2 4242 -p 2001 -z])
+
+# Check conntrack. We expect three entries:
+# - one in vm1's zone (firewall) - reused from the previous connection.
+# - one in vm2's zone (dnat) - still in TIME_WAIT after the previous connection.
+# - one in vm2's zone (firewall + additional all-zero SNAT)
+AT_CHECK([ovs-appctl dpctl/dump-conntrack | grep 2001 | \
+grep "orig=.src=4242::3" | \
+sed -e 's/port=2001/port=<clnt_s_port>/g' \
+ -e 's/sport=4242,dport=[[0-9]]\+/sport=4242,dport=<rnd_port>/g' \
+ -e 's/state=[[0-9_A-Z]]*/state=<cleared>/g' \
+ -e 's/zone=[[0-9]]*/zone=<cleared>/' | sort], [0], [dnl
+tcp,orig=(src=4242::3,dst=4242::2,sport=<clnt_s_port>,dport=4242),reply=(src=4242::2,dst=4242::3,sport=4242,dport=<clnt_s_port>),zone=<cleared>,protoinfo=(state=<cleared>)
+tcp,orig=(src=4242::3,dst=4242::2,sport=<clnt_s_port>,dport=4242),reply=(src=4242::2,dst=4242::3,sport=4242,dport=<rnd_port>),zone=<cleared>,protoinfo=(state=<cleared>)
+tcp,orig=(src=4242::3,dst=6666::1,sport=<clnt_s_port>,dport=666),reply=(src=4242::2,dst=4242::3,sport=4242,dport=<clnt_s_port>),zone=<cleared>,labels=0x2,protoinfo=(state=<cleared>)
+])
+
+AT_CLEANUP
+])
+
# When a lport is released on a chassis, ovn-controller was
# not clearing some of the flowss in the table 33 leading
# to packet drops if ct() is hit.
@@ -5527,7 +5717,7 @@ as ovn-nb
OVS_APP_EXIT_AND_WAIT([ovsdb-server])
as northd
-OVS_APP_EXIT_AND_WAIT([ovn-northd])
+OVS_APP_EXIT_AND_WAIT([NORTHD_TYPE])
as
OVS_TRAFFIC_VSWITCHD_STOP(["/failed to query port patch-.*/d
@@ -5689,7 +5879,7 @@ as ovn-nb
OVS_APP_EXIT_AND_WAIT([ovsdb-server])
as northd
-OVS_APP_EXIT_AND_WAIT([ovn-northd])
+OVS_APP_EXIT_AND_WAIT([NORTHD_TYPE])
as
OVS_TRAFFIC_VSWITCHD_STOP(["/failed to query port patch-.*/d
@@ -5738,7 +5928,7 @@ as ovn-nb
OVS_APP_EXIT_AND_WAIT([ovsdb-server])
as northd
-OVS_APP_EXIT_AND_WAIT([ovn-northd])
+OVS_APP_EXIT_AND_WAIT([NORTHD_TYPE])
as
OVS_TRAFFIC_VSWITCHD_STOP(["/failed to query port patch-.*/d
@@ -5831,7 +6021,7 @@ as ovn-nb
OVS_APP_EXIT_AND_WAIT([ovsdb-server])
as northd
-OVS_APP_EXIT_AND_WAIT([ovn-northd])
+OVS_APP_EXIT_AND_WAIT([NORTHD_TYPE])
as
OVS_TRAFFIC_VSWITCHD_STOP(["/.*error receiving.*/d
@@ -5893,7 +6083,7 @@ as ovn-nb
OVS_APP_EXIT_AND_WAIT([ovsdb-server])
as northd
-OVS_APP_EXIT_AND_WAIT([ovn-northd])
+OVS_APP_EXIT_AND_WAIT([NORTHD_TYPE])
as
OVS_TRAFFIC_VSWITCHD_STOP(["/.*error receiving.*/d
@@ -6044,7 +6234,7 @@ as ovn-nb
OVS_APP_EXIT_AND_WAIT([ovsdb-server])
as northd
-OVS_APP_EXIT_AND_WAIT([ovn-northd])
+OVS_APP_EXIT_AND_WAIT([NORTHD_TYPE])
as
OVS_TRAFFIC_VSWITCHD_STOP(["/.*error receiving.*/d
@@ -6091,7 +6281,6 @@ check ovn-nbctl pg-add pg1 sw1-p1
check ovn-nbctl acl-add pg1 from-lport 1002 "ip" allow-related
check ovn-nbctl acl-add pg1 to-lport 1002 "ip" allow-related
-
OVN_POPULATE_ARP
ovn-nbctl --wait=hv sync
@@ -6179,5 +6368,117 @@ OVS_APP_EXIT_AND_WAIT([NORTHD_TYPE])
as
OVS_TRAFFIC_VSWITCHD_STOP(["/failed to query port patch-.*/d
/connection dropped.*/d"])
+
+AT_CLEANUP
+])
+
+OVN_FOR_EACH_NORTHD([
+AT_SETUP(ovn -- DNAT LR hairpin IPv4)
+AT_KEYWORDS(hairpin)
+
+ovn_start
+
+OVS_TRAFFIC_VSWITCHD_START()
+ADD_BR([br-int])
+
+# Set external-ids in br-int needed for ovn-controller
+ovs-vsctl \
+ -- set Open_vSwitch . external-ids:system-id=hv1 \
+ -- set Open_vSwitch . external-ids:ovn-remote=unix:$ovs_base/ovn-sb/ovn-sb.sock \
+ -- set Open_vSwitch . external-ids:ovn-encap-type=geneve \
+ -- set Open_vSwitch . external-ids:ovn-encap-ip=169.0.0.1 \
+ -- set bridge br-int fail-mode=secure other-config:disable-in-band=true
+
+start_daemon ovn-controller
+
+# Logical network:
+# Two VMs
+# * VM1 with IP address 192.168.100.5
+# * VM2 with IP address 192.168.100.6
+# The VMs connect to logical switch ls1.
+#
+# An external router with IP address 172.18.1.2. We simulate this with a network namespace.
+# There will be no traffic going here in this test.
+# The external router connects to logical switch ls-pub
+#
+# One logical router (lr1) connects to ls1 and ls-pub. The router port connected to ls-pub is
+# a gateway port.
+# * The subnet connected to ls1 is 192.168.100.0/24. The Router IP address is 192.168.100.1
+# * The subnet connected to ls-pub is 172.18.1.0/24. The Router IP address is 172.168.1.1
+# lr1 has the following attributes:
+# * It has a "default" static route that sends traffic out the gateway router port.
+# * It has a DNAT rule that translates 172.18.2.10 to 192.168.100.6 (VM2)
+#
+# In this test, we want to ensure that a ping from VM1 to IP address 172.18.2.10 reaches VM2.
+
+ovn-nbctl ls-add ls1
+ovn-nbctl lsp-add ls1 vm1 -- lsp-set-addresses vm1 "00:00:00:00:00:05 192.168.100.5"
+ovn-nbctl lsp-add ls1 vm2 -- lsp-set-addresses vm2 "00:00:00:00:00:06 192.168.100.6"
+
+ovn-nbctl ls-add ls-pub
+ovn-nbctl lsp-add ls-pub ext-router -- lsp-set-addresses ext-router "00:00:00:00:01:02 172.18.1.2"
+
+ovn-nbctl lr-add lr1
+ovn-nbctl lrp-add lr1 lr1-ls1 00:00:00:00:00:01 192.168.100.1/24
+ovn-nbctl lsp-add ls1 ls1-lr1 \
+ -- lsp-set-type ls1-lr1 router \
+ -- lsp-set-addresses ls1-lr1 00:00:00:00:00:01 \
+ -- lsp-set-options ls1-lr1 router-port=lr1-ls1
+
+ovn-nbctl lrp-add lr1 lr1-ls-pub 00:00:00:00:01:01 172.18.1.1/24
+ovn-nbctl lrp-set-gateway-chassis lr1-ls-pub hv1
+ovn-nbctl lsp-add ls-pub ls-pub-lr1 \
+ -- lsp-set-type ls-pub-lr1 router \
+ -- lsp-set-addresses ls-pub-lr1 00:00:00:00:01:01 \
+ -- lsp-set-options ls-pub-lr1 router-port=lr1-ls-pub
+
+ovn-nbctl lr-nat-add lr1 snat 172.18.1.1 192.168.100.0/24
+ovn-nbctl lr-nat-add lr1 dnat_and_snat 172.18.2.10 192.168.100.6
+ovn-nbctl lr-route-add lr1 0.0.0.0/0 172.18.1.2
+
+#ls1_uuid=$(fetch_column Port_Binding datapath logical_port=vm1)
+#ovn-sbctl create MAC_Binding ip=172.18.2.10 datapath=$ls1_uuid logical_port=vm2 mac="00:00:00:00:00:06"
+
+OVN_POPULATE_ARP
+ovn-nbctl --wait=hv sync
+
+ADD_NAMESPACES(vm1)
+ADD_VETH(vm1, vm1, br-int, "192.168.100.5/24", "00:00:00:00:00:05", \
+ "192.168.100.1")
+
+ADD_NAMESPACES(vm2)
+ADD_VETH(vm2, vm2, br-int, "192.168.100.6/24", "00:00:00:00:00:06", \
+ "192.168.100.1")
+
+ADD_NAMESPACES(ext-router)
+ADD_VETH(ext-router, ext-router, br-int, "172.18.1.2/24", "00:00:00:00:01:02", \
+ "172.18.1.1")
+
+# Let's take a quick look at the logical flows
+ovn-sbctl lflow-list
+
+# Let's check what ovn-trace says...
+ovn-trace ls1 'inport == "vm1" && eth.src == 00:00:00:00:00:05 && ip4.src == 192.168.100.5 && eth.dst == 00:00:00:00:00:01 && ip4.dst == 172.18.2.10 && ip.ttl == 32'
+
+# A ping from vm1 should hairpin in lr1 and successfully DNAT to vm2
+NS_CHECK_EXEC([vm1], [ping -q -c 3 -i 0.3 -w 2 172.18.2.10 | FORMAT_PING], \
+[0], [dnl
+3 packets transmitted, 3 received, 0% packet loss, time 0ms
+])
+kill $(pidof ovn-controller)
+
+as ovn-sb
+OVS_APP_EXIT_AND_WAIT([ovsdb-server])
+
+as ovn-nb
+OVS_APP_EXIT_AND_WAIT([ovsdb-server])
+
+as northd
+OVS_APP_EXIT_AND_WAIT([NORTHD_TYPE])
+
+as
+OVS_TRAFFIC_VSWITCHD_STOP(["/.*error receiving.*/d
+/.*terminating with signal 15.*/d"])
+
AT_CLEANUP
])
diff --git a/tests/testsuite.at b/tests/testsuite.at
index ddc3f11d6..b716a1ad9 100644
--- a/tests/testsuite.at
+++ b/tests/testsuite.at
@@ -27,6 +27,7 @@ m4_include([tests/ovn.at])
m4_include([tests/ovn-performance.at])
m4_include([tests/ovn-northd.at])
m4_include([tests/ovn-nbctl.at])
+m4_include([tests/ovn-features.at])
m4_include([tests/ovn-lflow-cache.at])
m4_include([tests/ovn-ofctrl-seqno.at])
m4_include([tests/ovn-sbctl.at])
diff --git a/utilities/ovn-nbctl.c b/utilities/ovn-nbctl.c
index dc13fa9ca..3a0b7c3e3 100644
--- a/utilities/ovn-nbctl.c
+++ b/utilities/ovn-nbctl.c
@@ -805,6 +805,8 @@ static void
nbctl_pre_sync(struct ctl_context *base OVS_UNUSED)
{
force_wait = true;
+ /* Monitor nb_cfg to detect and handle potential overflows. */
+ ovsdb_idl_add_column(base->idl, &nbrec_nb_global_col_nb_cfg);
}
static void
@@ -3976,6 +3978,8 @@ nbctl_pre_lr_route_add(struct ctl_context *ctx)
ovsdb_idl_add_column(ctx->idl, &nbrec_logical_router_col_name);
ovsdb_idl_add_column(ctx->idl, &nbrec_logical_router_col_static_routes);
+ ovsdb_idl_add_column(ctx->idl, &nbrec_logical_router_port_col_name);
+
ovsdb_idl_add_column(ctx->idl, &nbrec_bfd_col_dst_ip);
ovsdb_idl_add_column(ctx->idl,
@@ -3992,6 +3996,10 @@ nbctl_pre_lr_route_add(struct ctl_context *ctx)
&nbrec_logical_router_static_route_col_options);
}
+static char * OVS_WARN_UNUSED_RESULT
+lrp_by_name_or_uuid(struct ctl_context *ctx, const char *id, bool must_exist,
+ const struct nbrec_logical_router_port **lrp_p);
+
static void
nbctl_lr_route_add(struct ctl_context *ctx)
{
@@ -4001,6 +4009,7 @@ nbctl_lr_route_add(struct ctl_context *ctx)
ctx->error = error;
return;
}
+ const struct nbrec_logical_router_port *out_lrp = NULL;
char *prefix = NULL, *next_hop = NULL;
const char *policy = shash_find_data(&ctx->options, "--policy");
@@ -4034,9 +4043,15 @@ nbctl_lr_route_add(struct ctl_context *ctx)
? normalize_ipv6_addr_str(ctx->argv[3])
: normalize_ipv4_addr_str(ctx->argv[3]);
if (!next_hop) {
- ctl_error(ctx, "bad %s nexthop argument: %s",
- v6_prefix ? "IPv6" : "IPv4", ctx->argv[3]);
- goto cleanup;
+ /* check if it is a output port. */
+ error = lrp_by_name_or_uuid(ctx, ctx->argv[3], true, &out_lrp);
+ if (error) {
+ ctl_error(ctx, "bad %s nexthop argument: %s",
+ v6_prefix ? "IPv6" : "IPv4", ctx->argv[3]);
+ free(error);
+ goto cleanup;
+ }
+ next_hop = "";
}
}
@@ -4063,6 +4078,15 @@ nbctl_lr_route_add(struct ctl_context *ctx)
}
}
+ if (ctx->argc == 5) {
+ /* validate output port. */
+ error = lrp_by_name_or_uuid(ctx, ctx->argv[4], true, &out_lrp);
+ if (error) {
+ ctx->error = error;
+ goto cleanup;
+ }
+ }
+
bool may_exist = shash_find(&ctx->options, "--may-exist") != NULL;
bool ecmp_symmetric_reply = shash_find(&ctx->options,
"--ecmp-symmetric-reply") != NULL;
@@ -4081,7 +4105,7 @@ nbctl_lr_route_add(struct ctl_context *ctx)
ctl_error(ctx, "bfd dst_ip cannot be discard.");
goto cleanup;
}
- if (ctx->argc == 5) {
+ if (out_lrp) {
if (is_discard_route) {
ctl_error(ctx, "outport is not valid for discard routes.");
goto cleanup;
@@ -4104,22 +4128,22 @@ nbctl_lr_route_add(struct ctl_context *ctx)
nbrec_logical_router_static_route_verify_nexthop(route);
nbrec_logical_router_static_route_set_ip_prefix(route, prefix);
nbrec_logical_router_static_route_set_nexthop(route, next_hop);
- if (ctx->argc == 5) {
+ if (out_lrp) {
nbrec_logical_router_static_route_set_output_port(
- route, ctx->argv[4]);
+ route, out_lrp->name);
}
if (policy) {
nbrec_logical_router_static_route_set_policy(route, policy);
}
if (bfd) {
if (!nb_bt) {
- if (ctx->argc != 5) {
+ if (!out_lrp) {
ctl_error(ctx, "insert entry in the BFD table failed");
goto cleanup;
}
nb_bt = nbrec_bfd_insert(ctx->txn);
nbrec_bfd_set_dst_ip(nb_bt, next_hop);
- nbrec_bfd_set_logical_port(nb_bt, ctx->argv[4]);
+ nbrec_bfd_set_logical_port(nb_bt, out_lrp->name);
}
nbrec_logical_router_static_route_set_bfd(route, nb_bt);
}
@@ -4142,8 +4166,9 @@ nbctl_lr_route_add(struct ctl_context *ctx)
route = nbrec_logical_router_static_route_insert(ctx->txn);
nbrec_logical_router_static_route_set_ip_prefix(route, prefix);
nbrec_logical_router_static_route_set_nexthop(route, next_hop);
- if (ctx->argc == 5) {
- nbrec_logical_router_static_route_set_output_port(route, ctx->argv[4]);
+ if (out_lrp) {
+ nbrec_logical_router_static_route_set_output_port(route,
+ out_lrp->name);
}
if (policy) {
nbrec_logical_router_static_route_set_policy(route, policy);
@@ -4159,19 +4184,21 @@ nbctl_lr_route_add(struct ctl_context *ctx)
nbrec_logical_router_update_static_routes_addvalue(lr, route);
if (bfd) {
if (!nb_bt) {
- if (ctx->argc != 5) {
+ if (!out_lrp) {
ctl_error(ctx, "insert entry in the BFD table failed");
goto cleanup;
}
nb_bt = nbrec_bfd_insert(ctx->txn);
nbrec_bfd_set_dst_ip(nb_bt, next_hop);
- nbrec_bfd_set_logical_port(nb_bt, ctx->argv[4]);
+ nbrec_bfd_set_logical_port(nb_bt, out_lrp->name);
}
nbrec_logical_router_static_route_set_bfd(route, nb_bt);
}
cleanup:
- free(next_hop);
+ if (next_hop && strlen(next_hop)) {
+ free(next_hop);
+ }
free(prefix);
}
@@ -5847,12 +5874,18 @@ print_route(const struct nbrec_logical_router_static_route *route,
{
char *prefix = normalize_prefix_str(route->ip_prefix);
- char *next_hop = !strcmp(route->nexthop, "discard")
- ? xasprintf("discard")
- : normalize_prefix_str(route->nexthop);
+ char *next_hop = "";
+
+ if (!strcmp(route->nexthop, "discard")) {
+ next_hop = xasprintf("discard");
+ } else if (strlen(route->nexthop)) {
+ next_hop = normalize_prefix_str(route->nexthop);
+ }
ds_put_format(s, "%25s %25s", prefix, next_hop);
free(prefix);
- free(next_hop);
+ if (strlen(next_hop)) {
+ free(next_hop);
+ }
if (route->policy) {
ds_put_format(s, " %s", route->policy);