diff --git a/SOURCES/openvswitch-2.13.0.patch b/SOURCES/openvswitch-2.13.0.patch index 23ca3c0..fff50d5 100644 --- a/SOURCES/openvswitch-2.13.0.patch +++ b/SOURCES/openvswitch-2.13.0.patch @@ -80817,7 +80817,7 @@ index 68c33a0f96..9b251f81fa 100644 * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/lib/dpif-netdev.c b/lib/dpif-netdev.c -index d393aab5e3..8b04f779e4 100644 +index d393aab5e3..d953da223f 100644 --- a/lib/dpif-netdev.c +++ b/lib/dpif-netdev.c @@ -83,9 +83,9 @@ @@ -80914,6 +80914,24 @@ index d393aab5e3..8b04f779e4 100644 cmap_remove(&flow_mark.mark_to_flow, mark_node, hash_int(mark, 0)); flow->mark = INVALID_FLOW_MARK; +@@ -2401,7 +2425,8 @@ dp_netdev_flow_offload_put(struct dp_flow_offload_item *offload) + const struct dpif_class *dpif_class = pmd->dp->class; + struct dp_netdev_flow *flow = offload->flow; + odp_port_t in_port = flow->flow.in_port.odp_port; +- bool modification = offload->op == DP_NETDEV_FLOW_OFFLOAD_OP_MOD; ++ bool modification = offload->op == DP_NETDEV_FLOW_OFFLOAD_OP_MOD ++ && flow->mark != INVALID_FLOW_MARK; + struct offload_info info; + struct netdev *port; + uint32_t mark; +@@ -2413,7 +2438,6 @@ dp_netdev_flow_offload_put(struct dp_flow_offload_item *offload) + + if (modification) { + mark = flow->mark; +- ovs_assert(mark != INVALID_FLOW_MARK); + } else { + /* + * If a mega flow has already been offloaded (from other PMD @@ -2433,6 +2457,7 @@ dp_netdev_flow_offload_put(struct dp_flow_offload_item *offload) mark = flow_mark_alloc(); if (mark == INVALID_FLOW_MARK) { @@ -80930,7 +80948,31 @@ index d393aab5e3..8b04f779e4 100644 } return NULL; -@@ -3032,9 +3058,56 @@ dp_netdev_pmd_find_flow(const struct dp_netdev_pmd_thread *pmd, +@@ -2538,10 +2564,9 @@ queue_netdev_flow_del(struct dp_netdev_pmd_thread *pmd, + static void + queue_netdev_flow_put(struct dp_netdev_pmd_thread *pmd, + struct dp_netdev_flow *flow, struct match *match, +- const struct nlattr *actions, size_t actions_len) ++ const struct nlattr *actions, size_t actions_len, int op) + { + struct dp_flow_offload_item *offload; +- int op; + + if (!netdev_is_flow_api_enabled()) { + return; +@@ -2554,11 +2579,6 @@ queue_netdev_flow_put(struct dp_netdev_pmd_thread *pmd, + ovsthread_once_done(&offload_thread_once); + } + +- if (flow->mark != INVALID_FLOW_MARK) { +- op = DP_NETDEV_FLOW_OFFLOAD_OP_MOD; +- } else { +- op = DP_NETDEV_FLOW_OFFLOAD_OP_ADD; +- } + offload = dp_netdev_alloc_flow_offload(pmd, flow, op); + offload->match = *match; + offload->actions = xmalloc(actions_len); +@@ -3032,9 +3052,56 @@ dp_netdev_pmd_find_flow(const struct dp_netdev_pmd_thread *pmd, return NULL; } @@ -80988,7 +81030,7 @@ index d393aab5e3..8b04f779e4 100644 struct dpif_flow_stats *stats, struct dpif_flow_attrs *attrs) { -@@ -3056,11 +3129,31 @@ dpif_netdev_get_flow_offload_status(const struct dp_netdev *dp, +@@ -3056,11 +3123,31 @@ dpif_netdev_get_flow_offload_status(const struct dp_netdev *dp, } ofpbuf_use_stack(&buf, &act_buf, sizeof act_buf); /* Taking a global 'port_mutex' to fulfill thread safety @@ -81025,7 +81067,7 @@ index d393aab5e3..8b04f779e4 100644 netdev_close(netdev); if (ret) { return false; -@@ -3329,6 +3422,9 @@ dp_netdev_flow_add(struct dp_netdev_pmd_thread *pmd, +@@ -3329,6 +3416,9 @@ dp_netdev_flow_add(struct dp_netdev_pmd_thread *pmd, /* Do not allocate extra space. */ flow = xmalloc(sizeof *flow - sizeof flow->cr.flow.mf + mask.len); memset(&flow->stats, 0, sizeof flow->stats); @@ -81035,7 +81077,27 @@ index d393aab5e3..8b04f779e4 100644 flow->dead = false; flow->batch = NULL; flow->mark = INVALID_FLOW_MARK; -@@ -3506,6 +3602,15 @@ dpif_netdev_flow_put(struct dpif *dpif, const struct dpif_flow_put *put) +@@ -3360,7 +3450,8 @@ dp_netdev_flow_add(struct dp_netdev_pmd_thread *pmd, + cmap_insert(&pmd->flow_table, CONST_CAST(struct cmap_node *, &flow->node), + dp_netdev_flow_hash(&flow->ufid)); + +- queue_netdev_flow_put(pmd, flow, match, actions, actions_len); ++ queue_netdev_flow_put(pmd, flow, match, actions, actions_len, ++ DP_NETDEV_FLOW_OFFLOAD_OP_ADD); + + if (OVS_UNLIKELY(!VLOG_DROP_DBG((&upcall_rl)))) { + struct ds ds = DS_EMPTY_INITIALIZER; +@@ -3450,7 +3541,8 @@ flow_put_on_pmd(struct dp_netdev_pmd_thread *pmd, + ovsrcu_set(&netdev_flow->actions, new_actions); + + queue_netdev_flow_put(pmd, netdev_flow, match, +- put->actions, put->actions_len); ++ put->actions, put->actions_len, ++ DP_NETDEV_FLOW_OFFLOAD_OP_MOD); + + if (stats) { + get_dpif_flow_status(pmd->dp, netdev_flow, stats, NULL); +@@ -3506,6 +3598,15 @@ dpif_netdev_flow_put(struct dpif *dpif, const struct dpif_flow_put *put) return error; } @@ -81051,7 +81113,7 @@ index d393aab5e3..8b04f779e4 100644 if (put->ufid) { ufid = *put->ufid; } else { -@@ -3831,7 +3936,6 @@ dpif_netdev_execute(struct dpif *dpif, struct dpif_execute *execute) +@@ -3831,7 +3932,6 @@ dpif_netdev_execute(struct dpif *dpif, struct dpif_execute *execute) } dp_packet_batch_init_packet(&pp, execute->packet); @@ -81059,7 +81121,7 @@ index d393aab5e3..8b04f779e4 100644 dp_netdev_execute_actions(pmd, &pp, false, execute->flow, execute->actions, execute->actions_len); dp_netdev_pmd_flush_output_packets(pmd, true); -@@ -3875,11 +3979,12 @@ dpif_netdev_operate(struct dpif *dpif, struct dpif_op **ops, size_t n_ops, +@@ -3875,11 +3975,12 @@ dpif_netdev_operate(struct dpif *dpif, struct dpif_op **ops, size_t n_ops, /* Enable or Disable PMD auto load balancing. */ static void @@ -81073,7 +81135,7 @@ index d393aab5e3..8b04f779e4 100644 bool enable_alb = false; bool multi_rxq = false; -@@ -3906,18 +4011,24 @@ set_pmd_auto_lb(struct dp_netdev *dp) +@@ -3906,18 +4007,24 @@ set_pmd_auto_lb(struct dp_netdev *dp) enable_alb = enable_alb && pmd_rxq_assign_cyc && pmd_alb->auto_lb_requested; @@ -81102,7 +81164,7 @@ index d393aab5e3..8b04f779e4 100644 } /* Applies datapath configuration from the database. Some of the changes are -@@ -3935,6 +4046,9 @@ dpif_netdev_set_config(struct dpif *dpif, const struct smap *other_config) +@@ -3935,6 +4042,9 @@ dpif_netdev_set_config(struct dpif *dpif, const struct smap *other_config) uint32_t insert_min, cur_min; uint32_t tx_flush_interval, cur_tx_flush_interval; uint64_t rebalance_intvl; @@ -81112,7 +81174,7 @@ index d393aab5e3..8b04f779e4 100644 tx_flush_interval = smap_get_int(other_config, "tx-flush-interval", DEFAULT_TX_FLUSH_INTERVAL); -@@ -4012,7 +4126,7 @@ dpif_netdev_set_config(struct dpif *dpif, const struct smap *other_config) +@@ -4012,7 +4122,7 @@ dpif_netdev_set_config(struct dpif *dpif, const struct smap *other_config) false); rebalance_intvl = smap_get_int(other_config, "pmd-auto-lb-rebal-interval", @@ -81121,7 +81183,7 @@ index d393aab5e3..8b04f779e4 100644 /* Input is in min, convert it to msec. */ rebalance_intvl = -@@ -4020,9 +4134,38 @@ dpif_netdev_set_config(struct dpif *dpif, const struct smap *other_config) +@@ -4020,9 +4130,38 @@ dpif_netdev_set_config(struct dpif *dpif, const struct smap *other_config) if (pmd_alb->rebalance_intvl != rebalance_intvl) { pmd_alb->rebalance_intvl = rebalance_intvl; @@ -81163,7 +81225,7 @@ index d393aab5e3..8b04f779e4 100644 return 0; } -@@ -4493,6 +4636,12 @@ struct rr_numa { +@@ -4493,6 +4632,12 @@ struct rr_numa { bool idx_inc; }; @@ -81176,7 +81238,7 @@ index d393aab5e3..8b04f779e4 100644 static struct rr_numa * rr_numa_list_lookup(struct rr_numa_list *rr, int numa_id) { -@@ -4940,9 +5089,17 @@ reconfigure_datapath(struct dp_netdev *dp) +@@ -4940,9 +5085,17 @@ reconfigure_datapath(struct dp_netdev *dp) /* Check for all the ports that need reconfiguration. We cache this in * 'port->need_reconfigure', because netdev_is_reconf_required() can @@ -81196,7 +81258,7 @@ index d393aab5e3..8b04f779e4 100644 port->need_reconfigure = true; } } -@@ -5076,7 +5233,7 @@ reconfigure_datapath(struct dp_netdev *dp) +@@ -5076,7 +5229,7 @@ reconfigure_datapath(struct dp_netdev *dp) reload_affected_pmds(dp); /* Check if PMD Auto LB is to be enabled */ @@ -81205,7 +81267,7 @@ index d393aab5e3..8b04f779e4 100644 } /* Returns true if one of the netdevs in 'dp' requires a reconfiguration */ -@@ -5189,10 +5346,17 @@ get_dry_run_variance(struct dp_netdev *dp, uint32_t *core_list, +@@ -5189,10 +5342,17 @@ get_dry_run_variance(struct dp_netdev *dp, uint32_t *core_list, for (int i = 0; i < n_rxqs; i++) { int numa_id = netdev_get_numa_id(rxqs[i]->port->netdev); numa = rr_numa_list_lookup(&rr, numa_id); @@ -81226,7 +81288,7 @@ index d393aab5e3..8b04f779e4 100644 goto cleanup; } -@@ -5320,7 +5484,7 @@ pmd_rebalance_dry_run(struct dp_netdev *dp) +@@ -5320,7 +5480,7 @@ pmd_rebalance_dry_run(struct dp_netdev *dp) improvement = ((curr_variance - new_variance) * 100) / curr_variance; } @@ -81235,7 +81297,7 @@ index d393aab5e3..8b04f779e4 100644 ret = false; } } -@@ -5787,12 +5951,14 @@ dp_netdev_run_meter(struct dp_netdev *dp, struct dp_packet_batch *packets_, +@@ -5787,12 +5947,14 @@ dp_netdev_run_meter(struct dp_netdev *dp, struct dp_packet_batch *packets_, /* Update all bands and find the one hit with the highest rate for each * packet (if any). */ for (int m = 0; m < meter->n_bands; ++m) { @@ -81254,7 +81316,7 @@ index d393aab5e3..8b04f779e4 100644 } /* Drain the bucket for all the packets, if possible. */ -@@ -5810,8 +5976,8 @@ dp_netdev_run_meter(struct dp_netdev *dp, struct dp_packet_batch *packets_, +@@ -5810,8 +5972,8 @@ dp_netdev_run_meter(struct dp_netdev *dp, struct dp_packet_batch *packets_, * (Only one band will be fired by a packet, and that * can be different for each packet.) */ for (int i = band_exceeded_pkt; i < cnt; i++) { @@ -81265,7 +81327,7 @@ index d393aab5e3..8b04f779e4 100644 exceeded_band[i] = m; } } -@@ -5830,8 +5996,8 @@ dp_netdev_run_meter(struct dp_netdev *dp, struct dp_packet_batch *packets_, +@@ -5830,8 +5992,8 @@ dp_netdev_run_meter(struct dp_netdev *dp, struct dp_packet_batch *packets_, /* Update the exceeding band for the exceeding packet. * (Only one band will be fired by a packet, and that * can be different for each packet.) */ @@ -81276,7 +81338,7 @@ index d393aab5e3..8b04f779e4 100644 exceeded_band[i] = m; } } -@@ -5913,16 +6079,14 @@ dpif_netdev_meter_set(struct dpif *dpif, ofproto_meter_id meter_id, +@@ -5913,16 +6075,14 @@ dpif_netdev_meter_set(struct dpif *dpif, ofproto_meter_id meter_id, config->bands[i].burst_size = config->bands[i].rate; } @@ -81298,7 +81360,7 @@ index d393aab5e3..8b04f779e4 100644 if (band_max_delta_t > meter->max_delta_t) { meter->max_delta_t = band_max_delta_t; } -@@ -7800,6 +7964,7 @@ const struct dpif_class dpif_netdev_class = { +@@ -7800,6 +7960,7 @@ const struct dpif_class dpif_netdev_class = { NULL, /* ct_timeout_policy_dump_next */ NULL, /* ct_timeout_policy_dump_done */ NULL, /* ct_get_timeout_policy_name */ @@ -81306,7 +81368,7 @@ index d393aab5e3..8b04f779e4 100644 dpif_netdev_ipf_set_enabled, dpif_netdev_ipf_set_min_frag, dpif_netdev_ipf_set_max_nfrags, -@@ -8040,6 +8205,7 @@ dp_netdev_pmd_try_optimize(struct dp_netdev_pmd_thread *pmd, +@@ -8040,6 +8201,7 @@ dp_netdev_pmd_try_optimize(struct dp_netdev_pmd_thread *pmd, if (pmd->ctx.now > pmd->rxq_next_cycle_store) { uint64_t curr_tsc; @@ -81314,7 +81376,7 @@ index d393aab5e3..8b04f779e4 100644 struct pmd_auto_lb *pmd_alb = &pmd->dp->pmd_alb; if (pmd_alb->is_enabled && !pmd->isolated && (pmd->perf_stats.counters.n[PMD_CYCLES_ITER_IDLE] >= -@@ -8056,7 +8222,9 @@ dp_netdev_pmd_try_optimize(struct dp_netdev_pmd_thread *pmd, +@@ -8056,7 +8218,9 @@ dp_netdev_pmd_try_optimize(struct dp_netdev_pmd_thread *pmd, pmd_load = ((tot_proc * 100) / (tot_idle + tot_proc)); } diff --git a/SPECS/openvswitch2.13.spec b/SPECS/openvswitch2.13.spec index effb36a..266565f 100644 --- a/SPECS/openvswitch2.13.spec +++ b/SPECS/openvswitch2.13.spec @@ -59,7 +59,7 @@ Summary: Open vSwitch Group: System Environment/Daemons daemon/database/utilities URL: http://www.openvswitch.org/ Version: 2.13.0 -Release: 119%{?commit0:.%{date}git%{shortcommit0}}%{?commit1:dpdk%{shortcommit1}}%{?dist} +Release: 120%{?commit0:.%{date}git%{shortcommit0}}%{?commit1:dpdk%{shortcommit1}}%{?dist} # Nearly all of openvswitch is ASL 2.0. The bugtool is LGPLv2+, and the # lib/sflow*.[ch] files are SISSL @@ -710,6 +710,10 @@ exit 0 %endif %changelog +* Mon Aug 02 2021 Open vSwitch CI - 2.13.0-120 +- Merging upstream branch-2.13 + [eda2f50cf9764b167acbcaa4919f58d06b95f66d] + * Mon Jul 26 2021 Open vSwitch CI - 2.13.0-119 - Merging upstream branch-2.13 [c2e53c31505b228aaa405be2bc36c25de4d9faeb]