diff --git a/SOURCES/openvswitch-3.1.0.patch b/SOURCES/openvswitch-3.1.0.patch
index 81d32a3..860eccb 100644
--- a/SOURCES/openvswitch-3.1.0.patch
+++ b/SOURCES/openvswitch-3.1.0.patch
@@ -49,6 +49,81 @@ index a5ad222c4..83cc8e010 100644
openvswitch (3.1.0-1) unstable; urgency=low
* New upstream version
+diff --git a/include/openvswitch/meta-flow.h b/include/openvswitch/meta-flow.h
+index 045dce8f5..3b0220aaa 100644
+--- a/include/openvswitch/meta-flow.h
++++ b/include/openvswitch/meta-flow.h
+@@ -2366,6 +2366,10 @@ void mf_format_subvalue(const union mf_subvalue *subvalue, struct ds *s);
+ void field_array_set(enum mf_field_id id, const union mf_value *,
+ struct field_array *);
+
++/* Mask the required l3 prerequisites if a 'set' action occurs. */
++void mf_set_mask_l3_prereqs(const struct mf_field *, const struct flow *,
++ struct flow_wildcards *);
++
+ #ifdef __cplusplus
+ }
+ #endif
+diff --git a/lib/classifier.c b/lib/classifier.c
+index 0a89626cc..18dbfc83a 100644
+--- a/lib/classifier.c
++++ b/lib/classifier.c
+@@ -1695,6 +1695,8 @@ find_match_wc(const struct cls_subtable *subtable, ovs_version_t version,
+ const struct cls_match *rule = NULL;
+ struct flowmap stages_map = FLOWMAP_EMPTY_INITIALIZER;
+ unsigned int mask_offset = 0;
++ bool adjust_ports_mask = false;
++ ovs_be32 ports_mask;
+ int i;
+
+ /* Try to finish early by checking fields in segments. */
+@@ -1722,6 +1724,9 @@ find_match_wc(const struct cls_subtable *subtable, ovs_version_t version,
+ subtable->index_maps[i], flow, wc)) {
+ goto no_match;
+ }
++ /* Accumulate the map used so far. */
++ stages_map = flowmap_or(stages_map, subtable->index_maps[i]);
++
+ hash = flow_hash_in_minimask_range(flow, &subtable->mask,
+ subtable->index_maps[i],
+ &mask_offset, &basis);
+@@ -1731,14 +1736,16 @@ find_match_wc(const struct cls_subtable *subtable, ovs_version_t version,
+ * unwildcarding all the ports bits, use the ports trie to figure out a
+ * smaller set of bits to unwildcard. */
+ unsigned int mbits;
+- ovs_be32 value, plens, mask;
++ ovs_be32 value, plens;
+
+- mask = miniflow_get_ports(&subtable->mask.masks);
+- value = ((OVS_FORCE ovs_be32 *)flow)[TP_PORTS_OFS32] & mask;
++ ports_mask = miniflow_get_ports(&subtable->mask.masks);
++ value = ((OVS_FORCE ovs_be32 *) flow)[TP_PORTS_OFS32] & ports_mask;
+ mbits = trie_lookup_value(&subtable->ports_trie, &value, &plens, 32);
+
+- ((OVS_FORCE ovs_be32 *)&wc->masks)[TP_PORTS_OFS32] |=
+- mask & be32_prefix_mask(mbits);
++ ports_mask &= be32_prefix_mask(mbits);
++ ports_mask |= ((OVS_FORCE ovs_be32 *) &wc->masks)[TP_PORTS_OFS32];
++
++ adjust_ports_mask = true;
+
+ goto no_match;
+ }
+@@ -1751,6 +1758,14 @@ no_match:
+ /* Unwildcard the bits in stages so far, as they were used in determining
+ * there is no match. */
+ flow_wildcards_fold_minimask_in_map(wc, &subtable->mask, stages_map);
++ if (adjust_ports_mask) {
++ /* This has to be done after updating flow wildcards to overwrite
++ * the ports mask back. We can't simply disable the corresponding bit
++ * in the stages map, because it has 64-bit resolution, i.e. one
++ * bit covers not only tp_src/dst, but also ct_tp_src/dst, which are
++ * not covered by the trie. */
++ ((OVS_FORCE ovs_be32 *) &wc->masks)[TP_PORTS_OFS32] = ports_mask;
++ }
+ return NULL;
+ }
+
diff --git a/lib/conntrack.c b/lib/conntrack.c
index 524670e45..8cf7779c6 100644
--- a/lib/conntrack.c
@@ -68,6 +143,397 @@ index 524670e45..8cf7779c6 100644
}
ct->next_sweep = (i < N_EXP_LISTS) ? i : 0;
+diff --git a/lib/db-ctl-base.c b/lib/db-ctl-base.c
+index 134496ef3..5d2635946 100644
+--- a/lib/db-ctl-base.c
++++ b/lib/db-ctl-base.c
+@@ -1492,7 +1492,7 @@ cmd_add(struct ctl_context *ctx)
+ const struct ovsdb_idl_column *column;
+ const struct ovsdb_idl_row *row;
+ const struct ovsdb_type *type;
+- struct ovsdb_datum new;
++ struct ovsdb_datum old;
+ int i;
+
+ ctx->error = get_table(table_name, &table);
+@@ -1516,13 +1516,7 @@ cmd_add(struct ctl_context *ctx)
+ }
+
+ type = &column->type;
+-
+- if (ctx->last_command) {
+- ovsdb_datum_init_empty(&new);
+- } else {
+- ovsdb_datum_clone(&new, ovsdb_idl_read(row, column));
+- }
+-
++ ovsdb_datum_clone(&old, ovsdb_idl_read(row, column));
+ for (i = 4; i < ctx->argc; i++) {
+ struct ovsdb_type add_type;
+ struct ovsdb_datum add;
+@@ -1533,41 +1527,23 @@ cmd_add(struct ctl_context *ctx)
+ ctx->error = ovsdb_datum_from_string(&add, &add_type, ctx->argv[i],
+ ctx->symtab);
+ if (ctx->error) {
+- ovsdb_datum_destroy(&new, &column->type);
++ ovsdb_datum_destroy(&old, &column->type);
+ return;
+ }
+- ovsdb_datum_union(&new, &add, type);
++ ovsdb_datum_union(&old, &add, type);
+ ovsdb_datum_destroy(&add, type);
+ }
+-
+- if (!ctx->last_command && new.n > type->n_max) {
++ if (old.n > type->n_max) {
+ ctl_error(ctx, "\"add\" operation would put %u %s in column %s of "
+ "table %s but the maximum number is %u",
+- new.n,
++ old.n,
+ type->value.type == OVSDB_TYPE_VOID ? "values" : "pairs",
+ column->name, table->name, type->n_max);
+- ovsdb_datum_destroy(&new, &column->type);
++ ovsdb_datum_destroy(&old, &column->type);
+ return;
+ }
+-
+- if (ctx->last_command) {
+- /* Partial updates can only be made one by one. */
+- for (i = 0; i < new.n; i++) {
+- struct ovsdb_datum *datum = xmalloc(sizeof *datum);
+-
+- ovsdb_datum_init_empty(datum);
+- ovsdb_datum_add_from_index_unsafe(datum, &new, i, type);
+- if (ovsdb_type_is_map(type)) {
+- ovsdb_idl_txn_write_partial_map(row, column, datum);
+- } else {
+- ovsdb_idl_txn_write_partial_set(row, column, datum);
+- }
+- }
+- ovsdb_datum_destroy(&new, &column->type);
+- } else {
+- ovsdb_idl_txn_verify(row, column);
+- ovsdb_idl_txn_write(row, column, &new);
+- }
++ ovsdb_idl_txn_verify(row, column);
++ ovsdb_idl_txn_write(row, column, &old);
+
+ invalidate_cache(ctx);
+ }
+diff --git a/lib/dpctl.c b/lib/dpctl.c
+index d12d9b8a5..970373389 100644
+--- a/lib/dpctl.c
++++ b/lib/dpctl.c
+@@ -1713,10 +1713,16 @@ dpctl_flush_conntrack(int argc, const char *argv[],
+ uint16_t zone, *pzone = NULL;
+ int error;
+ int args = argc - 1;
++ int zone_pos = 1;
++
++ if (dp_arg_exists(argc, argv)) {
++ args--;
++ zone_pos = 2;
++ }
+
+ /* Parse zone. */
+- if (args && !strncmp(argv[1], "zone=", 5)) {
+- if (!ovs_scan(argv[1], "zone=%"SCNu16, &zone)) {
++ if (args && !strncmp(argv[zone_pos], "zone=", 5)) {
++ if (!ovs_scan(argv[zone_pos], "zone=%"SCNu16, &zone)) {
+ ds_put_cstr(&ds, "failed to parse zone");
+ error = EINVAL;
+ goto error;
+@@ -1744,7 +1750,7 @@ dpctl_flush_conntrack(int argc, const char *argv[],
+ }
+
+ /* Report error if there is more than one unparsed argument. */
+- if (args > 1) {
++ if (args > 0) {
+ ds_put_cstr(&ds, "invalid arguments");
+ error = EINVAL;
+ goto error;
+diff --git a/lib/dpif-netdev.c b/lib/dpif-netdev.c
+index c9f7179c3..aed2c8fbb 100644
+--- a/lib/dpif-netdev.c
++++ b/lib/dpif-netdev.c
+@@ -9616,6 +9616,7 @@ dpif_netdev_bond_stats_get(struct dpif *dpif, uint32_t bond_id,
+ const struct dpif_class dpif_netdev_class = {
+ "netdev",
+ true, /* cleanup_required */
++ true, /* synced_dp_layers */
+ dpif_netdev_init,
+ dpif_netdev_enumerate,
+ dpif_netdev_port_open_type,
+diff --git a/lib/dpif-netlink.c b/lib/dpif-netlink.c
+index 026b0daa8..2f829f589 100644
+--- a/lib/dpif-netlink.c
++++ b/lib/dpif-netlink.c
+@@ -2582,7 +2582,7 @@ dpif_netlink_calculate_n_handlers(void)
+ n_handlers = MIN(next_prime_num, total_cores);
+ }
+
+- return n_handlers;
++ return MAX(n_handlers, 1);
+ }
+
+ static int
+@@ -4515,6 +4515,7 @@ dpif_netlink_cache_set_size(struct dpif *dpif_, uint32_t level, uint32_t size)
+ const struct dpif_class dpif_netlink_class = {
+ "system",
+ false, /* cleanup_required */
++ false, /* synced_dp_layers */
+ NULL, /* init */
+ dpif_netlink_enumerate,
+ NULL,
+diff --git a/lib/dpif-provider.h b/lib/dpif-provider.h
+index 12477a24f..b8ead8a02 100644
+--- a/lib/dpif-provider.h
++++ b/lib/dpif-provider.h
+@@ -127,6 +127,14 @@ struct dpif_class {
+ * datapaths that can not exist without it (e.g. netdev datapath). */
+ bool cleanup_required;
+
++ /* If 'true' the specific dpif implementation synchronizes the various
++ * datapath implementation layers, i.e., the dpif's layer in combination
++ * with the underlying netdev offload layers. For example, dpif-netlink
++ * does not sync its kernel flows with the tc ones, i.e., only one gets
++ * installed. On the other hand, dpif-netdev installs both flows,
++ * internally keeps track of both, and represents them as one. */
++ bool synced_dp_layers;
++
+ /* Called when the dpif provider is registered, typically at program
+ * startup. Returning an error from this function will prevent any
+ * datapath with this class from being created.
+diff --git a/lib/dpif.c b/lib/dpif.c
+index fe4db83fb..3305401fe 100644
+--- a/lib/dpif.c
++++ b/lib/dpif.c
+@@ -2109,3 +2109,9 @@ dpif_cache_set_size(struct dpif *dpif, uint32_t level, uint32_t size)
+ ? dpif->dpif_class->cache_set_size(dpif, level, size)
+ : EOPNOTSUPP;
+ }
++
++bool
++dpif_synced_dp_layers(struct dpif *dpif)
++{
++ return dpif->dpif_class->synced_dp_layers;
++}
+diff --git a/lib/dpif.h b/lib/dpif.h
+index 6cb4dae6d..129cbf6a1 100644
+--- a/lib/dpif.h
++++ b/lib/dpif.h
+@@ -939,6 +939,7 @@ int dpif_get_pmds_for_port(const struct dpif * dpif, odp_port_t port_no,
+ char *dpif_get_dp_version(const struct dpif *);
+ bool dpif_supports_tnl_push_pop(const struct dpif *);
+ bool dpif_supports_explicit_drop_action(const struct dpif *);
++bool dpif_synced_dp_layers(struct dpif *);
+
+ /* Log functions. */
+ struct vlog_module;
+diff --git a/lib/meta-flow.c b/lib/meta-flow.c
+index c576ae620..474344194 100644
+--- a/lib/meta-flow.c
++++ b/lib/meta-flow.c
+@@ -3676,3 +3676,28 @@ mf_bitmap_not(struct mf_bitmap x)
+ bitmap_not(x.bm, MFF_N_IDS);
+ return x;
+ }
++
++void
++mf_set_mask_l3_prereqs(const struct mf_field *mf, const struct flow *fl,
++ struct flow_wildcards *wc)
++{
++ if (is_ip_any(fl) &&
++ ((mf->id == MFF_IPV4_SRC) ||
++ (mf->id == MFF_IPV4_DST) ||
++ (mf->id == MFF_IPV6_SRC) ||
++ (mf->id == MFF_IPV6_DST) ||
++ (mf->id == MFF_IPV6_LABEL) ||
++ (mf->id == MFF_IP_DSCP) ||
++ (mf->id == MFF_IP_ECN) ||
++ (mf->id == MFF_IP_TTL))) {
++ WC_MASK_FIELD(wc, nw_proto);
++ } else if ((fl->dl_type == htons(ETH_TYPE_ARP)) &&
++ ((mf->id == MFF_ARP_OP) ||
++ (mf->id == MFF_ARP_SHA) ||
++ (mf->id == MFF_ARP_THA) ||
++ (mf->id == MFF_ARP_SPA) ||
++ (mf->id == MFF_ARP_TPA))) {
++ /* mask only the lower 8 bits. */
++ wc->masks.nw_proto = 0xff;
++ }
++}
+diff --git a/lib/netdev-offload-tc.c b/lib/netdev-offload-tc.c
+index 4c78c4816..c38247423 100644
+--- a/lib/netdev-offload-tc.c
++++ b/lib/netdev-offload-tc.c
+@@ -276,8 +276,9 @@ del_filter_and_ufid_mapping(struct tcf_id *id, const ovs_u128 *ufid,
+ }
+
+ err = tc_del_flower_filter(id);
+- if (!err) {
++ if (!err || err == ENODEV) {
+ del_ufid_tc_mapping(ufid);
++ return 0;
+ }
+ return err;
+ }
+@@ -871,7 +872,7 @@ parse_tc_flower_to_actions__(struct tc_flower *flower, struct ofpbuf *buf,
+ outport =
+ netdev_ifindex_to_odp_port(action->out.ifindex_out);
+ if (!outport) {
+- return ENOENT;
++ return -ENOENT;
+ }
+ }
+ nl_msg_put_u32(buf, OVS_ACTION_ATTR_OUTPUT, odp_to_u32(outport));
+@@ -964,7 +965,7 @@ parse_tc_flower_to_actions__(struct tc_flower *flower, struct ofpbuf *buf,
+ uint32_t meter_id;
+
+ if (police_idx_lookup(action->police.index, &meter_id)) {
+- return ENOENT;
++ return -ENOENT;
+ }
+ nl_msg_put_u32(buf, OVS_ACTION_ATTR_METER, meter_id);
+ }
+@@ -983,6 +984,9 @@ parse_tc_flower_to_actions__(struct tc_flower *flower, struct ofpbuf *buf,
+ buf, OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_GREATER);
+ i = parse_tc_flower_to_actions__(flower, buf, i + 1,
+ action->police.result_jump);
++ if (i < 0) {
++ return i;
++ }
+ nl_msg_end_nested(buf, act_offset);
+
+ act_offset = nl_msg_start_nested(
+@@ -994,6 +998,9 @@ parse_tc_flower_to_actions__(struct tc_flower *flower, struct ofpbuf *buf,
+ }
+ if (jump != 0) {
+ i = parse_tc_flower_to_actions__(flower, buf, i, jump);
++ if (i < 0) {
++ return i;
++ }
+ }
+ nl_msg_end_nested(buf, act_offset);
+
+@@ -1013,11 +1020,11 @@ parse_tc_flower_to_actions__(struct tc_flower *flower, struct ofpbuf *buf,
+ return i;
+ }
+
+-static void
++static int
+ parse_tc_flower_to_actions(struct tc_flower *flower,
+ struct ofpbuf *buf)
+ {
+- parse_tc_flower_to_actions__(flower, buf, 0, 0);
++ return parse_tc_flower_to_actions__(flower, buf, 0, 0);
+ }
+
+ static int
+@@ -1030,9 +1037,10 @@ parse_tc_flower_to_match(const struct netdev *netdev,
+ struct ofpbuf *buf,
+ bool terse)
+ {
+- size_t act_off;
+ struct tc_flower_key *key = &flower->key;
+ struct tc_flower_key *mask = &flower->mask;
++ size_t act_off;
++ int err;
+
+ if (terse) {
+ return parse_tc_flower_terse_to_match(flower, match, stats, attrs);
+@@ -1229,7 +1237,10 @@ parse_tc_flower_to_match(const struct netdev *netdev,
+ }
+
+ act_off = nl_msg_start_nested(buf, OVS_FLOW_ATTR_ACTIONS);
+- parse_tc_flower_to_actions(flower, buf);
++ err = parse_tc_flower_to_actions(flower, buf);
++ if (err < 0) {
++ return -err;
++ }
+ nl_msg_end_nested(buf, act_off);
+
+ *actions = ofpbuf_at_assert(buf, act_off, sizeof(struct nlattr));
+@@ -1272,8 +1283,8 @@ netdev_tc_flow_dump_next(struct netdev_flow_dump *dump,
+ continue;
+ }
+
+- if (flower.act_cookie.len) {
+- *ufid = *((ovs_u128 *) flower.act_cookie.data);
++ if (flower.act_cookie.len >= sizeof *ufid) {
++ *ufid = get_32aligned_u128(flower.act_cookie.data);
+ } else if (!find_ufid(netdev, &id, ufid)) {
+ continue;
+ }
+@@ -2490,15 +2501,23 @@ netdev_tc_flow_get(struct netdev *netdev,
+
+ err = tc_get_flower(&id, &flower);
+ if (err) {
+- VLOG_ERR_RL(&error_rl, "flow get failed (dev %s prio %d handle %d): %s",
++ VLOG_ERR_RL(&error_rl,
++ "flow get failed (dev %s prio %d handle %d): %s",
+ netdev_get_name(netdev), id.prio, id.handle,
+ ovs_strerror(err));
+ return err;
+ }
+
+ in_port = netdev_ifindex_to_odp_port(id.ifindex);
+- parse_tc_flower_to_match(netdev, &flower, match, actions,
+- stats, attrs, buf, false);
++ err = parse_tc_flower_to_match(netdev, &flower, match, actions,
++ stats, attrs, buf, false);
++ if (err) {
++ VLOG_ERR_RL(&error_rl,
++ "flow get parse failed (dev %s prio %d handle %d): %s",
++ netdev_get_name(netdev), id.prio, id.handle,
++ ovs_strerror(err));
++ return err;
++ }
+
+ if (stats) {
+ struct dpif_flow_stats adjust_stats;
+diff --git a/lib/netdev-windows.c b/lib/netdev-windows.c
+index 4ad45ffa1..3fad501e3 100644
+--- a/lib/netdev-windows.c
++++ b/lib/netdev-windows.c
+@@ -156,6 +156,7 @@ netdev_windows_system_construct(struct netdev *netdev_)
+ struct netdev_windows_netdev_info info;
+ struct ofpbuf *buf;
+ int ret;
++ const char *type = NULL;
+
+ /* Query the attributes and runtime status of the netdev. */
+ ret = query_netdev(netdev_get_name(&netdev->up), &info, &buf);
+@@ -167,6 +168,16 @@ netdev_windows_system_construct(struct netdev *netdev_)
+ }
+ ofpbuf_delete(buf);
+
++ /* Don't create netdev if ovs-type is "internal"
++ * but the type of netdev->up is "system". */
++ type = netdev_get_type(&netdev->up);
++ if (type && !strcmp(type, "system") &&
++ (info.ovs_type == OVS_VPORT_TYPE_INTERNAL)) {
++ VLOG_DBG("construct device %s, ovs_type: %u failed",
++ netdev_get_name(&netdev->up), info.ovs_type);
++ return 1;
++ }
++
+ netdev->change_seq = 1;
+ netdev->dev_type = info.ovs_type;
+ netdev->port_no = info.port_no;
+diff --git a/lib/ovs-thread.c b/lib/ovs-thread.c
+index 2d382f1e8..ac5d2c3d0 100644
+--- a/lib/ovs-thread.c
++++ b/lib/ovs-thread.c
+@@ -674,7 +674,7 @@ count_cpu_cores(void)
+ static int cpu_cores;
+
+ ovs_mutex_lock(&cpu_cores_mutex);
+- if (now - last_updated >= COUNT_CPU_UPDATE_TIME_MS) {
++ if (!last_updated || now - last_updated >= COUNT_CPU_UPDATE_TIME_MS) {
+ last_updated = now;
+ cpu_cores = count_cpu_cores__();
+ }
diff --git a/ofproto/ofproto-dpif-ipfix.c b/ofproto/ofproto-dpif-ipfix.c
index 742eed399..f13478a88 100644
--- a/ofproto/ofproto-dpif-ipfix.c
@@ -362,3 +828,1244 @@ index 742eed399..f13478a88 100644
{
uint64_t export_time_usec;
uint32_t export_time_sec;
+diff --git a/ofproto/ofproto-dpif-upcall.c b/ofproto/ofproto-dpif-upcall.c
+index e05ffe312..89df92242 100644
+--- a/ofproto/ofproto-dpif-upcall.c
++++ b/ofproto/ofproto-dpif-upcall.c
+@@ -47,17 +47,20 @@
+
+ #define UPCALL_MAX_BATCH 64
+ #define REVALIDATE_MAX_BATCH 50
++#define UINT64_THREE_QUARTERS (UINT64_MAX / 4 * 3)
+
+ VLOG_DEFINE_THIS_MODULE(ofproto_dpif_upcall);
+
+ COVERAGE_DEFINE(dumped_duplicate_flow);
+ COVERAGE_DEFINE(dumped_new_flow);
+ COVERAGE_DEFINE(handler_duplicate_upcall);
+-COVERAGE_DEFINE(upcall_ukey_contention);
+-COVERAGE_DEFINE(upcall_ukey_replace);
+ COVERAGE_DEFINE(revalidate_missed_dp_flow);
++COVERAGE_DEFINE(ukey_dp_change);
++COVERAGE_DEFINE(ukey_invalid_stat_reset);
+ COVERAGE_DEFINE(upcall_flow_limit_hit);
+ COVERAGE_DEFINE(upcall_flow_limit_kill);
++COVERAGE_DEFINE(upcall_ukey_contention);
++COVERAGE_DEFINE(upcall_ukey_replace);
+
+ /* A thread that reads upcalls from dpif, forwards each upcall's packet,
+ * and possibly sets up a kernel flow as a cache. */
+@@ -287,6 +290,7 @@ struct udpif_key {
+
+ struct ovs_mutex mutex; /* Guards the following. */
+ struct dpif_flow_stats stats OVS_GUARDED; /* Last known stats.*/
++ const char *dp_layer OVS_GUARDED; /* Last known dp_layer. */
+ long long int created OVS_GUARDED; /* Estimate of creation time. */
+ uint64_t dump_seq OVS_GUARDED; /* Tracks udpif->dump_seq. */
+ uint64_t reval_seq OVS_GUARDED; /* Tracks udpif->reval_seq. */
+@@ -780,6 +784,17 @@ udpif_get_n_flows(struct udpif *udpif)
+ atomic_store_relaxed(&udpif->n_flows_timestamp, now);
+ dpif_get_dp_stats(udpif->dpif, &stats);
+ flow_count = stats.n_flows;
++
++ if (!dpif_synced_dp_layers(udpif->dpif)) {
++ /* If the dpif layer does not sync the flows, we need to include
++ * the hardware offloaded flows separately. */
++ uint64_t hw_flows;
++
++ if (!dpif_get_n_offloaded_flows(udpif->dpif, &hw_flows)) {
++ flow_count += hw_flows;
++ }
++ }
++
+ atomic_store_relaxed(&udpif->n_flows, flow_count);
+ ovs_mutex_unlock(&udpif->n_flows_mutex);
+ } else {
+@@ -1766,6 +1781,7 @@ ukey_create__(const struct nlattr *key, size_t key_len,
+ ukey->created = ukey->flow_time = time_msec();
+ memset(&ukey->stats, 0, sizeof ukey->stats);
+ ukey->stats.used = used;
++ ukey->dp_layer = NULL;
+ ukey->xcache = NULL;
+
+ ukey->offloaded = false;
+@@ -2095,10 +2111,12 @@ ukey_delete(struct umap *umap, struct udpif_key *ukey)
+ }
+
+ static bool
+-should_revalidate(const struct udpif *udpif, uint64_t packets,
+- long long int used)
++should_revalidate(const struct udpif *udpif, const struct udpif_key *ukey,
++ uint64_t packets)
++ OVS_REQUIRES(ukey->mutex)
+ {
+ long long int metric, now, duration;
++ long long int used = ukey->stats.used;
+
+ if (!used) {
+ /* Always revalidate the first time a flow is dumped. */
+@@ -2125,8 +2143,12 @@ should_revalidate(const struct udpif *udpif, uint64_t packets,
+ duration = now - used;
+ metric = duration / packets;
+
+- if (metric < 1000 / ofproto_min_revalidate_pps) {
+- /* The flow is receiving more than min-revalidate-pps, so keep it. */
++ if (metric < 1000 / ofproto_min_revalidate_pps ||
++ (ukey->offloaded && duration < ofproto_offloaded_stats_delay)) {
++ /* The flow is receiving more than min-revalidate-pps, so keep it.
++ * Or it's a hardware offloaded flow that might take up to X seconds
++ * to update its statistics. Until we are sure the statistics had a
++ * chance to be updated, also keep it. */
+ return true;
+ }
+ return false;
+@@ -2324,7 +2346,7 @@ static enum reval_result
+ revalidate_ukey(struct udpif *udpif, struct udpif_key *ukey,
+ const struct dpif_flow_stats *stats,
+ struct ofpbuf *odp_actions, uint64_t reval_seq,
+- struct recirc_refs *recircs, bool offloaded)
++ struct recirc_refs *recircs)
+ OVS_REQUIRES(ukey->mutex)
+ {
+ bool need_revalidate = ukey->reval_seq != reval_seq;
+@@ -2342,8 +2364,15 @@ revalidate_ukey(struct udpif *udpif, struct udpif_key *ukey,
+ ? stats->n_bytes - ukey->stats.n_bytes
+ : 0);
+
++ if (stats->n_packets < ukey->stats.n_packets &&
++ ukey->stats.n_packets < UINT64_THREE_QUARTERS) {
++ /* Report cases where the packet counter is lower than the previous
++ * instance, but exclude the potential wrapping of an uint64_t. */
++ COVERAGE_INC(ukey_invalid_stat_reset);
++ }
++
+ if (need_revalidate) {
+- if (should_revalidate(udpif, push.n_packets, ukey->stats.used)) {
++ if (should_revalidate(udpif, ukey, push.n_packets)) {
+ if (!ukey->xcache) {
+ ukey->xcache = xlate_cache_new();
+ } else {
+@@ -2359,7 +2388,7 @@ revalidate_ukey(struct udpif *udpif, struct udpif_key *ukey,
+
+ /* Stats for deleted flows will be attributed upon flow deletion. Skip. */
+ if (result != UKEY_DELETE) {
+- xlate_push_stats(ukey->xcache, &push, offloaded);
++ xlate_push_stats(ukey->xcache, &push, ukey->offloaded);
+ ukey->stats = *stats;
+ ukey->reval_seq = reval_seq;
+ }
+@@ -2455,6 +2484,15 @@ push_dp_ops(struct udpif *udpif, struct ukey_op *ops, size_t n_ops)
+ push->tcp_flags = stats->tcp_flags | op->ukey->stats.tcp_flags;
+ push->n_packets = stats->n_packets - op->ukey->stats.n_packets;
+ push->n_bytes = stats->n_bytes - op->ukey->stats.n_bytes;
++
++ if (stats->n_packets < op->ukey->stats.n_packets &&
++ op->ukey->stats.n_packets < UINT64_THREE_QUARTERS) {
++ /* Report cases where the packet counter is lower than the
++ * previous instance, but exclude the potential wrapping of an
++ * uint64_t. */
++ COVERAGE_INC(ukey_invalid_stat_reset);
++ }
++
+ ovs_mutex_unlock(&op->ukey->mutex);
+ } else {
+ push = stats;
+@@ -2759,6 +2797,22 @@ revalidate(struct revalidator *revalidator)
+ continue;
+ }
+
++ ukey->offloaded = f->attrs.offloaded;
++ if (!ukey->dp_layer
++ || (!dpif_synced_dp_layers(udpif->dpif)
++ && strcmp(ukey->dp_layer, f->attrs.dp_layer))) {
++
++ if (ukey->dp_layer) {
++ /* The dp_layer has changed this is probably due to an
++ * earlier revalidate cycle moving it to/from hw offload.
++ * In this case we should reset the ukey stored statistics,
++ * as they are from the deleted DP flow. */
++ COVERAGE_INC(ukey_dp_change);
++ memset(&ukey->stats, 0, sizeof ukey->stats);
++ }
++ ukey->dp_layer = f->attrs.dp_layer;
++ }
++
+ already_dumped = ukey->dump_seq == dump_seq;
+ if (already_dumped) {
+ /* The flow has already been handled during this flow dump
+@@ -2790,8 +2844,7 @@ revalidate(struct revalidator *revalidator)
+ result = UKEY_DELETE;
+ } else {
+ result = revalidate_ukey(udpif, ukey, &stats, &odp_actions,
+- reval_seq, &recircs,
+- f->attrs.offloaded);
++ reval_seq, &recircs);
+ }
+ ukey->dump_seq = dump_seq;
+
+@@ -2876,7 +2929,7 @@ revalidator_sweep__(struct revalidator *revalidator, bool purge)
+ COVERAGE_INC(revalidate_missed_dp_flow);
+ memcpy(&stats, &ukey->stats, sizeof stats);
+ result = revalidate_ukey(udpif, ukey, &stats, &odp_actions,
+- reval_seq, &recircs, false);
++ reval_seq, &recircs);
+ }
+ if (result != UKEY_KEEP) {
+ /* Clears 'recircs' if filled by revalidate_ukey(). */
+diff --git a/ofproto/ofproto-dpif-xlate.c b/ofproto/ofproto-dpif-xlate.c
+index a9cf3cbee..cffd733c5 100644
+--- a/ofproto/ofproto-dpif-xlate.c
++++ b/ofproto/ofproto-dpif-xlate.c
+@@ -5211,6 +5211,7 @@ compose_dec_ttl(struct xlate_ctx *ctx, struct ofpact_cnt_ids *ids)
+ }
+
+ ctx->wc->masks.nw_ttl = 0xff;
++ WC_MASK_FIELD(ctx->wc, nw_proto);
+ if (flow->nw_ttl > 1) {
+ flow->nw_ttl--;
+ return false;
+@@ -7128,6 +7129,7 @@ do_xlate_actions(const struct ofpact *ofpacts, size_t ofpacts_len,
+ case OFPACT_SET_IPV4_SRC:
+ if (flow->dl_type == htons(ETH_TYPE_IP)) {
+ memset(&wc->masks.nw_src, 0xff, sizeof wc->masks.nw_src);
++ WC_MASK_FIELD(wc, nw_proto);
+ flow->nw_src = ofpact_get_SET_IPV4_SRC(a)->ipv4;
+ }
+ break;
+@@ -7135,12 +7137,14 @@ do_xlate_actions(const struct ofpact *ofpacts, size_t ofpacts_len,
+ case OFPACT_SET_IPV4_DST:
+ if (flow->dl_type == htons(ETH_TYPE_IP)) {
+ memset(&wc->masks.nw_dst, 0xff, sizeof wc->masks.nw_dst);
++ WC_MASK_FIELD(wc, nw_proto);
+ flow->nw_dst = ofpact_get_SET_IPV4_DST(a)->ipv4;
+ }
+ break;
+
+ case OFPACT_SET_IP_DSCP:
+ if (is_ip_any(flow)) {
++ WC_MASK_FIELD(wc, nw_proto);
+ wc->masks.nw_tos |= IP_DSCP_MASK;
+ flow->nw_tos &= ~IP_DSCP_MASK;
+ flow->nw_tos |= ofpact_get_SET_IP_DSCP(a)->dscp;
+@@ -7149,6 +7153,7 @@ do_xlate_actions(const struct ofpact *ofpacts, size_t ofpacts_len,
+
+ case OFPACT_SET_IP_ECN:
+ if (is_ip_any(flow)) {
++ WC_MASK_FIELD(wc, nw_proto);
+ wc->masks.nw_tos |= IP_ECN_MASK;
+ flow->nw_tos &= ~IP_ECN_MASK;
+ flow->nw_tos |= ofpact_get_SET_IP_ECN(a)->ecn;
+@@ -7157,6 +7162,7 @@ do_xlate_actions(const struct ofpact *ofpacts, size_t ofpacts_len,
+
+ case OFPACT_SET_IP_TTL:
+ if (is_ip_any(flow)) {
++ WC_MASK_FIELD(wc, nw_proto);
+ wc->masks.nw_ttl = 0xff;
+ flow->nw_ttl = ofpact_get_SET_IP_TTL(a)->ttl;
+ }
+@@ -7224,6 +7230,7 @@ do_xlate_actions(const struct ofpact *ofpacts, size_t ofpacts_len,
+
+ /* Set the field only if the packet actually has it. */
+ if (mf_are_prereqs_ok(mf, flow, wc)) {
++ mf_set_mask_l3_prereqs(mf, flow, wc);
+ mf_mask_field_masked(mf, ofpact_set_field_mask(set_field), wc);
+ mf_set_flow_value_masked(mf, set_field->value,
+ ofpact_set_field_mask(set_field),
+@@ -7280,6 +7287,7 @@ do_xlate_actions(const struct ofpact *ofpacts, size_t ofpacts_len,
+
+ case OFPACT_DEC_TTL:
+ wc->masks.nw_ttl = 0xff;
++ WC_MASK_FIELD(wc, nw_proto);
+ if (compose_dec_ttl(ctx, ofpact_get_DEC_TTL(a))) {
+ return;
+ }
+diff --git a/ofproto/ofproto-dpif.c b/ofproto/ofproto-dpif.c
+index f87e27a8c..fad7342b0 100644
+--- a/ofproto/ofproto-dpif.c
++++ b/ofproto/ofproto-dpif.c
+@@ -714,12 +714,6 @@ close_dpif_backer(struct dpif_backer *backer, bool del)
+ free(backer);
+ }
+
+-/* Datapath port slated for removal from datapath. */
+-struct odp_garbage {
+- struct ovs_list list_node;
+- odp_port_t odp_port;
+-};
+-
+ static void check_support(struct dpif_backer *backer);
+
+ static int
+@@ -729,8 +723,6 @@ open_dpif_backer(const char *type, struct dpif_backer **backerp)
+ struct dpif_port_dump port_dump;
+ struct dpif_port port;
+ struct shash_node *node;
+- struct ovs_list garbage_list;
+- struct odp_garbage *garbage;
+
+ struct sset names;
+ char *backer_name;
+@@ -792,25 +784,23 @@ open_dpif_backer(const char *type, struct dpif_backer **backerp)
+ dpif_flow_flush(backer->dpif);
+ }
+
+- /* Loop through the ports already on the datapath and remove any
+- * that we don't need anymore. */
+- ovs_list_init(&garbage_list);
++ /* Loop through the ports already on the datapath and find ones that are
++ * not on the initial OpenFlow ports list. These are stale ports, that we
++ * do not need anymore, or tunnel backing interfaces, that do not generally
++ * match the name of OpenFlow tunnel ports, or both. Add all of them to
++ * the list of tunnel backers. type_run() will garbage collect those that
++ * are not active tunnel backing interfaces during revalidation. */
+ dpif_port_dump_start(&port_dump, backer->dpif);
+ while (dpif_port_dump_next(&port_dump, &port)) {
+ node = shash_find(&init_ofp_ports, port.name);
+ if (!node && strcmp(port.name, dpif_base_name(backer->dpif))) {
+- garbage = xmalloc(sizeof *garbage);
+- garbage->odp_port = port.port_no;
+- ovs_list_push_front(&garbage_list, &garbage->list_node);
++ simap_put(&backer->tnl_backers, port.name,
++ odp_to_u32(port.port_no));
++ backer->need_revalidate = REV_RECONFIGURE;
+ }
+ }
+ dpif_port_dump_done(&port_dump);
+
+- LIST_FOR_EACH_POP (garbage, list_node, &garbage_list) {
+- dpif_port_del(backer->dpif, garbage->odp_port, false);
+- free(garbage);
+- }
+-
+ shash_add(&all_dpif_backers, type, backer);
+
+ check_support(backer);
+diff --git a/ofproto/ofproto-provider.h b/ofproto/ofproto-provider.h
+index a84ddc1d0..143ded690 100644
+--- a/ofproto/ofproto-provider.h
++++ b/ofproto/ofproto-provider.h
+@@ -541,6 +541,11 @@ extern unsigned ofproto_max_revalidator;
+ * duration exceeds half of max-revalidator config variable. */
+ extern unsigned ofproto_min_revalidate_pps;
+
++/* Worst case delay (in ms) it might take before statistics of offloaded flows
++ * are updated. Offloaded flows younger than this delay will always be
++ * revalidated regardless of ofproto_min_revalidate_pps. */
++extern unsigned ofproto_offloaded_stats_delay;
++
+ /* Number of upcall handler and revalidator threads. Only affects the
+ * ofproto-dpif implementation. */
+ extern uint32_t n_handlers, n_revalidators;
+diff --git a/ofproto/ofproto.c b/ofproto/ofproto.c
+index 17f636ed9..5fc1a7409 100644
+--- a/ofproto/ofproto.c
++++ b/ofproto/ofproto.c
+@@ -311,6 +311,7 @@ unsigned ofproto_flow_limit = OFPROTO_FLOW_LIMIT_DEFAULT;
+ unsigned ofproto_max_idle = OFPROTO_MAX_IDLE_DEFAULT;
+ unsigned ofproto_max_revalidator = OFPROTO_MAX_REVALIDATOR_DEFAULT;
+ unsigned ofproto_min_revalidate_pps = OFPROTO_MIN_REVALIDATE_PPS_DEFAULT;
++unsigned ofproto_offloaded_stats_delay = OFPROTO_OFFLOADED_STATS_DELAY;
+
+ uint32_t n_handlers, n_revalidators;
+
+@@ -727,6 +728,15 @@ ofproto_set_min_revalidate_pps(unsigned min_revalidate_pps)
+ ofproto_min_revalidate_pps = min_revalidate_pps ? min_revalidate_pps : 1;
+ }
+
++/* Set worst case delay (in ms) it might take before statistics of offloaded
++ * flows are updated. Offloaded flows younger than this delay will always be
++ * revalidated regardless of ofproto_min_revalidate_pps. */
++void
++ofproto_set_offloaded_stats_delay(unsigned offloaded_stats_delay)
++{
++ ofproto_offloaded_stats_delay = offloaded_stats_delay;
++}
++
+ /* If forward_bpdu is true, the NORMAL action will forward frames with
+ * reserved (e.g. STP) destination Ethernet addresses. if forward_bpdu is false,
+ * the NORMAL action will drop these frames. */
+diff --git a/ofproto/ofproto.h b/ofproto/ofproto.h
+index 4e15167ab..fa7973ac7 100644
+--- a/ofproto/ofproto.h
++++ b/ofproto/ofproto.h
+@@ -311,6 +311,7 @@ int ofproto_port_dump_done(struct ofproto_port_dump *);
+ #define OFPROTO_MAX_IDLE_DEFAULT 10000 /* ms */
+ #define OFPROTO_MAX_REVALIDATOR_DEFAULT 500 /* ms */
+ #define OFPROTO_MIN_REVALIDATE_PPS_DEFAULT 5
++#define OFPROTO_OFFLOADED_STATS_DELAY 2000 /* ms */
+
+ const char *ofproto_port_open_type(const struct ofproto *,
+ const char *port_type);
+@@ -340,6 +341,7 @@ void ofproto_set_flow_limit(unsigned limit);
+ void ofproto_set_max_idle(unsigned max_idle);
+ void ofproto_set_max_revalidator(unsigned max_revalidator);
+ void ofproto_set_min_revalidate_pps(unsigned min_revalidate_pps);
++void ofproto_set_offloaded_stats_delay(unsigned offloaded_stats_delay);
+ void ofproto_set_forward_bpdu(struct ofproto *, bool forward_bpdu);
+ void ofproto_set_mac_table_config(struct ofproto *, unsigned idle_time,
+ size_t max_entries);
+diff --git a/tests/classifier.at b/tests/classifier.at
+index f652b5983..de2705653 100644
+--- a/tests/classifier.at
++++ b/tests/classifier.at
+@@ -65,6 +65,94 @@ Datapath actions: 2
+ OVS_VSWITCHD_STOP
+ AT_CLEANUP
+
++AT_SETUP([flow classifier - lookup segmentation - final stage])
++OVS_VSWITCHD_START
++add_of_ports br0 1 2 3
++AT_DATA([flows.txt], [dnl
++table=0 in_port=1 priority=33,tcp,tp_dst=80,tcp_flags=+psh,action=output(2)
++table=0 in_port=1 priority=0,ip,action=drop
++table=0 in_port=2 priority=16,icmp6,nw_ttl=255,icmp_type=135,icmp_code=0,nd_target=1000::1 ,action=output(1)
++table=0 in_port=2 priority=0,ip,action=drop
++table=0 in_port=3 action=resubmit(,1)
++table=1 in_port=3 priority=45,ct_state=+trk+rpl,ct_nw_proto=6,ct_tp_src=3/0x1,tcp,tp_dst=80,tcp_flags=+psh,action=output(2)
++table=1 in_port=3 priority=10,ip,action=drop
++])
++AT_CHECK([ovs-ofctl add-flows br0 flows.txt])
++
++AT_CHECK([ovs-appctl ofproto/trace br0 'in_port=1,dl_src=50:54:00:00:00:05,dl_dst=50:54:00:00:00:07,dl_type=0x0800,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_proto=6,nw_tos=0,nw_ttl=128,tp_src=8,tp_dst=80,tcp_flags=syn'], [0], [stdout])
++AT_CHECK([tail -2 stdout], [0],
++ [Megaflow: recirc_id=0,eth,tcp,in_port=1,nw_frag=no,tp_dst=80,tcp_flags=-psh
++Datapath actions: drop
++])
++AT_CHECK([ovs-appctl ofproto/trace br0 'in_port=1,dl_src=50:54:00:00:00:05,dl_dst=50:54:00:00:00:07,dl_type=0x0800,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_proto=6,nw_tos=0,nw_ttl=128,tp_src=8,tp_dst=80,tcp_flags=syn|ack'], [0], [stdout])
++AT_CHECK([tail -2 stdout], [0],
++ [Megaflow: recirc_id=0,eth,tcp,in_port=1,nw_frag=no,tp_dst=80,tcp_flags=-psh
++Datapath actions: drop
++])
++AT_CHECK([ovs-appctl ofproto/trace br0 'in_port=1,dl_src=50:54:00:00:00:05,dl_dst=50:54:00:00:00:07,dl_type=0x0800,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_proto=6,nw_tos=0,nw_ttl=128,tp_src=8,tp_dst=80,tcp_flags=ack|psh'], [0], [stdout])
++AT_CHECK([tail -2 stdout], [0],
++ [Megaflow: recirc_id=0,eth,tcp,in_port=1,nw_frag=no,tp_dst=80,tcp_flags=+psh
++Datapath actions: 2
++])
++AT_CHECK([ovs-appctl ofproto/trace br0 'in_port=1,dl_src=50:54:00:00:00:05,dl_dst=50:54:00:00:00:07,dl_type=0x0800,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_proto=6,nw_tos=0,nw_ttl=128,tp_src=8,tp_dst=80'], [0], [stdout])
++AT_CHECK([tail -2 stdout], [0],
++ [Megaflow: recirc_id=0,eth,tcp,in_port=1,nw_frag=no,tp_dst=80,tcp_flags=-psh
++Datapath actions: drop
++])
++AT_CHECK([ovs-appctl ofproto/trace br0 'in_port=1,dl_src=50:54:00:00:00:05,dl_dst=50:54:00:00:00:07,dl_type=0x0800,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_proto=6,nw_tos=0,nw_ttl=128,tp_src=8,tp_dst=79'], [0], [stdout])
++AT_CHECK([tail -2 stdout], [0],
++ [Megaflow: recirc_id=0,eth,tcp,in_port=1,nw_frag=no,tp_dst=0x40/0xfff0,tcp_flags=-psh
++Datapath actions: drop
++])
++
++dnl Having both the port and the tcp flags in the resulting megaflow below
++dnl is redundant, but that is how ports trie logic is implemented.
++AT_CHECK([ovs-appctl ofproto/trace br0 'in_port=1,dl_src=50:54:00:00:00:05,dl_dst=50:54:00:00:00:07,dl_type=0x0800,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_proto=6,nw_tos=0,nw_ttl=128,tp_src=8,tp_dst=81'], [0], [stdout])
++AT_CHECK([tail -2 stdout], [0],
++ [Megaflow: recirc_id=0,eth,tcp,in_port=1,nw_frag=no,tp_dst=81,tcp_flags=-psh
++Datapath actions: drop
++])
++
++dnl nd_target is redundant in the megaflow below and it is also not relevant
++dnl for an icmp reply. Datapath may discard that match, but it is OK as long
++dnl as we have prerequisites (icmp_type) in the match as well.
++AT_CHECK([ovs-appctl ofproto/trace br0 "in_port=2,eth_src=f6:d2:b0:19:5e:7b,eth_dst=d2:49:19:91:78:fe,dl_type=0x86dd,ipv6_src=1000::3,ipv6_dst=1000::4,nw_proto=58,nw_ttl=255,icmpv6_type=128,icmpv6_code=0"], [0], [stdout])
++AT_CHECK([tail -2 stdout], [0],
++ [Megaflow: recirc_id=0,eth,icmp6,in_port=2,nw_ttl=255,nw_frag=no,icmp_type=0x80/0xfc,nd_target=::
++Datapath actions: drop
++])
++
++AT_CHECK([ovs-appctl ofproto/trace br0 "in_port=2,eth_src=f6:d2:b0:19:5e:7b,eth_dst=d2:49:19:91:78:fe,dl_type=0x86dd,ipv6_src=1000::3,ipv6_dst=1000::4,nw_proto=58,nw_ttl=255,icmpv6_type=135,icmpv6_code=0"], [0], [stdout])
++AT_CHECK([tail -2 stdout], [0],
++ [Megaflow: recirc_id=0,eth,icmp6,in_port=2,nw_ttl=255,nw_frag=no,icmp_type=0x87/0xff,icmp_code=0x0/0xff,nd_target=::
++Datapath actions: drop
++])
++AT_CHECK([ovs-appctl ofproto/trace br0 "in_port=2,eth_src=f6:d2:b0:19:5e:7b,eth_dst=d2:49:19:91:78:fe,dl_type=0x86dd,ipv6_src=1000::3,ipv6_dst=1000::4,nw_proto=58,nw_ttl=255,icmpv6_type=135,icmpv6_code=0,nd_target=1000::1"], [0], [stdout])
++AT_CHECK([tail -2 stdout], [0],
++ [Megaflow: recirc_id=0,eth,icmp6,in_port=2,nw_ttl=255,nw_frag=no,icmp_type=0x87/0xff,icmp_code=0x0/0xff,nd_target=1000::1
++Datapath actions: 1
++])
++AT_CHECK([ovs-appctl ofproto/trace br0 "in_port=2,eth_src=f6:d2:b0:19:5e:7b,eth_dst=d2:49:19:91:78:fe,dl_type=0x86dd,ipv6_src=1000::3,ipv6_dst=1000::4,nw_proto=58,nw_ttl=255,icmpv6_type=135,icmpv6_code=0,nd_target=1000::2"], [0], [stdout])
++AT_CHECK([tail -2 stdout], [0],
++ [Megaflow: recirc_id=0,eth,icmp6,in_port=2,nw_ttl=255,nw_frag=no,icmp_type=0x87/0xff,icmp_code=0x0/0xff,nd_target=1000::2
++Datapath actions: drop
++])
++
++dnl Check that ports' mask doesn't affect ct ports.
++AT_CHECK([ovs-appctl ofproto/trace br0 'in_port=3,ct_state=trk|rpl,ct_nw_proto=6,ct_tp_src=3,dl_src=50:54:00:00:00:05,dl_dst=50:54:00:00:00:07,dl_type=0x0800,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_proto=6,nw_tos=0,nw_ttl=128,tp_src=8,tp_dst=80,tcp_flags=psh'], [0], [stdout])
++AT_CHECK([tail -2 stdout], [0],
++ [Megaflow: recirc_id=0,ct_state=+rpl+trk,ct_nw_proto=6,ct_tp_src=0x1/0x1,eth,tcp,in_port=3,nw_frag=no,tp_dst=80,tcp_flags=+psh
++Datapath actions: 2
++])
++AT_CHECK([ovs-appctl ofproto/trace br0 'in_port=3,ct_state=trk|rpl,ct_nw_proto=6,ct_tp_src=3,dl_src=50:54:00:00:00:05,dl_dst=50:54:00:00:00:07,dl_type=0x0800,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_proto=6,nw_tos=0,nw_ttl=128,tp_src=8,tp_dst=79,tcp_flags=psh'], [0], [stdout])
++AT_CHECK([tail -2 stdout], [0],
++ [Megaflow: recirc_id=0,ct_state=+rpl+trk,ct_nw_proto=6,ct_tp_src=0x1/0x1,eth,tcp,in_port=3,nw_frag=no,tp_dst=0x40/0xfff0,tcp_flags=+psh
++Datapath actions: drop
++])
++
++OVS_VSWITCHD_STOP
++AT_CLEANUP
++
+ AT_BANNER([flow classifier prefix lookup])
+ AT_SETUP([flow classifier - prefix lookup])
+ OVS_VSWITCHD_START
+diff --git a/tests/ofproto-dpif.at b/tests/ofproto-dpif.at
+index fa6111c1e..6b58cabec 100644
+--- a/tests/ofproto-dpif.at
++++ b/tests/ofproto-dpif.at
+@@ -849,7 +849,7 @@ table=2 ip actions=set_field:192.168.3.91->ip_src,output(11)
+ AT_CHECK([ovs-ofctl -O OpenFlow12 add-flows br0 flows.txt])
+ AT_CHECK([ovs-appctl ofproto/trace br0 'in_port=1,dl_src=50:54:00:00:00:05,dl_dst=50:54:00:00:00:07,dl_type=0x0800,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_proto=1,nw_tos=0,nw_ttl=128,nw_frag=no,icmp_type=8,icmp_code=0'], [0], [stdout])
+ AT_CHECK([tail -2 stdout], [0],
+- [Megaflow: recirc_id=0,eth,ip,in_port=1,nw_src=192.168.0.1,nw_frag=no
++ [Megaflow: recirc_id=0,eth,icmp,in_port=1,nw_src=192.168.0.1,nw_frag=no
+ Datapath actions: 10,set(ipv4(src=192.168.3.91)),11,set(ipv4(src=192.168.3.90)),13
+ ])
+ OVS_VSWITCHD_STOP
+@@ -912,7 +912,7 @@ AT_CHECK([ovs-appctl ofproto/trace br0 'in_port=1,dl_src=50:54:00:00:00:05,dl_ds
+ # Must match on the source address to be able to restore it's value for
+ # the second bucket
+ AT_CHECK([tail -2 stdout], [0],
+- [Megaflow: recirc_id=0,eth,ip,in_port=1,nw_src=192.168.0.1,nw_frag=no
++ [Megaflow: recirc_id=0,eth,icmp,in_port=1,nw_src=192.168.0.1,nw_frag=no
+ Datapath actions: set(ipv4(src=192.168.3.90)),10,set(ipv4(src=192.168.0.1)),11
+ ])
+ OVS_VSWITCHD_STOP
+@@ -944,7 +944,7 @@ done
+ AT_CHECK([ovs-appctl dpctl/dump-flows | sed 's/dp_hash(.*\/0xf)/dp_hash(0xXXXX\/0xf)/' | sed 's/packets.*actions:/actions:/' | strip_ufid | strip_used | sort], [0], [dnl
+ flow-dump from the main thread:
+ recirc_id(0),in_port(1),packet_type(ns=0,id=0),eth_type(0x0800),ipv4(frag=no), actions:hash(sym_l4(0)),recirc(0x1)
+-recirc_id(0x1),dp_hash(0xXXXX/0xf),in_port(1),packet_type(ns=0,id=0),eth_type(0x0800),ipv4(src=192.168.0.1,frag=no), actions:set(ipv4(src=192.168.3.90)),10,set(ipv4(src=192.168.0.1)),10
++recirc_id(0x1),dp_hash(0xXXXX/0xf),in_port(1),packet_type(ns=0,id=0),eth_type(0x0800),ipv4(src=192.168.0.1,proto=1,frag=no), actions:set(ipv4(src=192.168.3.90)),10,set(ipv4(src=192.168.0.1)),10
+ ])
+
+ OVS_VSWITCHD_STOP
+@@ -959,7 +959,7 @@ AT_CHECK([ovs-appctl ofproto/trace br0 'in_port=1,dl_src=50:54:00:00:00:05,dl_ds
+ # Must match on the source address to be able to restore it's value for
+ # the third bucket
+ AT_CHECK([tail -2 stdout], [0],
+- [Megaflow: recirc_id=0,eth,ip,in_port=1,nw_src=192.168.0.1,nw_frag=no
++ [Megaflow: recirc_id=0,eth,icmp,in_port=1,nw_src=192.168.0.1,nw_frag=no
+ Datapath actions: set(ipv4(src=192.168.3.90)),10,set(ipv4(src=192.168.0.1)),11
+ ])
+ OVS_VSWITCHD_STOP
+@@ -1536,17 +1536,17 @@ AT_CHECK([ovs-ofctl add-flows br0 flows.txt])
+ AT_CHECK([ovs-appctl ofproto/trace ovs-dummy 'in_port(1),eth(src=50:54:00:00:00:05,dst=50:54:00:00:00:07),eth_type(0x0800),ipv4(src=192.168.0.1,dst=192.168.0.2,proto=111,tos=0,ttl=2,frag=no)' -generate], [0], [stdout])
+ AT_CHECK([tail -4 stdout], [0], [
+ Final flow: ip,in_port=1,vlan_tci=0x0000,dl_src=50:54:00:00:00:05,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_proto=111,nw_tos=0,nw_ecn=0,nw_ttl=1,nw_frag=no
+-Megaflow: recirc_id=0,eth,ip,in_port=1,nw_ttl=2,nw_frag=no
++Megaflow: recirc_id=0,eth,ip,in_port=1,nw_proto=111,nw_ttl=2,nw_frag=no
+ Datapath actions: set(ipv4(ttl=1)),2,userspace(pid=0,controller(reason=2,dont_send=0,continuation=0,recirc_id=1,rule_cookie=0,controller_id=0,max_len=65535)),4
+ ])
+ AT_CHECK([ovs-appctl ofproto/trace ovs-dummy 'in_port(1),eth(src=50:54:00:00:00:05,dst=50:54:00:00:00:07),eth_type(0x0800),ipv4(src=192.168.0.1,dst=192.168.0.2,proto=111,tos=0,ttl=3,frag=no)'], [0], [stdout])
+ AT_CHECK([tail -2 stdout], [0],
+- [Megaflow: recirc_id=0,eth,ip,in_port=1,nw_ttl=3,nw_frag=no
++ [Megaflow: recirc_id=0,eth,ip,in_port=1,nw_proto=111,nw_ttl=3,nw_frag=no
+ Datapath actions: set(ipv4(ttl=2)),2,set(ipv4(ttl=1)),3,4
+ ])
+ AT_CHECK([ovs-appctl ofproto/trace ovs-dummy 'in_port(1),eth(src=50:54:00:00:00:05,dst=50:54:00:00:00:07),eth_type(0x86dd),ipv6(src=::1,dst=::2,label=0,proto=10,tclass=0x70,hlimit=128,frag=no)'], [0], [stdout])
+ AT_CHECK([tail -2 stdout], [0],
+- [Megaflow: recirc_id=0,eth,ipv6,in_port=1,nw_ttl=128,nw_frag=no
++ [Megaflow: recirc_id=0,eth,ipv6,in_port=1,nw_proto=10,nw_ttl=128,nw_frag=no
+ Datapath actions: set(ipv6(hlimit=127)),2,set(ipv6(hlimit=126)),3,4
+ ])
+
+@@ -1656,7 +1656,7 @@ AT_CHECK([ovs-vsctl -- \
+ --id=@q2 create Queue dscp=2], [0], [ignore])
+ AT_CHECK([ovs-appctl ofproto/trace ovs-dummy 'in_port(9),eth(src=50:54:00:00:00:05,dst=50:54:00:00:00:07),eth_type(0x0800),ipv4(src=1.1.1.1,dst=2.2.2.2,proto=1,tos=0xff,ttl=128,frag=no),icmp(type=8,code=0)'], [0], [stdout])
+ AT_CHECK([tail -2 stdout], [0],
+- [Megaflow: recirc_id=0,skb_priority=0,eth,ip,in_port=9,nw_tos=252,nw_frag=no
++ [Megaflow: recirc_id=0,skb_priority=0,eth,icmp,in_port=9,nw_tos=252,nw_frag=no
+ Datapath actions: dnl
+ 100,dnl
+ set(ipv4(tos=0x4/0xfc)),set(skb_priority(0x1)),1,dnl
+@@ -11884,7 +11884,7 @@ ovs-ofctl dump-flows br0
+
+ AT_CHECK([ovs-appctl ofproto/trace ovs-dummy 'in_port(1),eth(src=50:54:00:00:00:09,dst=50:54:00:00:00:0a),eth_type(0x0800),ipv4(src=10.10.10.2,dst=10.10.10.1,proto=1,tos=1,ttl=128,frag=no),icmp(type=8,code=0)'], [0], [stdout])
+ AT_CHECK([tail -3 stdout], [0], [dnl
+-Megaflow: recirc_id=0,eth,ip,reg0=0/0x1,in_port=1,nw_src=10.10.10.2,nw_frag=no
++Megaflow: recirc_id=0,eth,icmp,reg0=0/0x1,in_port=1,nw_src=10.10.10.2,nw_frag=no
+ Datapath actions: drop
+ Translation failed (Recursion too deep), packet is dropped.
+ ])
+diff --git a/tests/ofproto.at b/tests/ofproto.at
+index a666bebca..2fa8486a8 100644
+--- a/tests/ofproto.at
++++ b/tests/ofproto.at
+@@ -6538,3 +6538,185 @@ verify_deleted
+
+ OVS_VSWITCHD_STOP(["/
++ Set worst case delay (in ms) it might take before statistics of ++ offloaded flows are updated. Offloaded flows younger than this ++ delay will always be revalidated regardless of ++ . ++
++++ The default is 2000. ++
+++@@ -6296,6 +6309,12 @@ ovs-vsctl add-port br0 p0 -- set Interface p0 type=patch options:peer=p1 \ + translated to an ephemeral port. If there is no collision, no SNAT + is performed. +
NXT_CT_FLUSH
. The NXT_CT_FLUSH
++ extensions allows to flush CT entries based on specified parameters.
++