diff --git a/SOURCES/openvswitch-2.15.0.patch b/SOURCES/openvswitch-2.15.0.patch index d8b5512..dad3484 100644 --- a/SOURCES/openvswitch-2.15.0.patch +++ b/SOURCES/openvswitch-2.15.0.patch @@ -18617,7 +18617,7 @@ index 9e2d06b3dd..cb3f30e5b6 100644 static inline void diff --git a/lib/dpif-netdev.c b/lib/dpif-netdev.c -index 4381c618f1..f6fe441dda 100644 +index 4381c618f1..3e45c816a0 100644 --- a/lib/dpif-netdev.c +++ b/lib/dpif-netdev.c @@ -279,8 +279,9 @@ static bool dpcls_lookup(struct dpcls *cls, @@ -18678,31 +18678,31 @@ index 4381c618f1..f6fe441dda 100644 - "please specify an existing datapath"); - return; - } -- ++ SHASH_FOR_EACH (node, &dp_netdevs) { ++ struct dp_netdev *dp = node->data; + - /* Get PMD threads list, required to get DPCLS instances. */ - size_t n; - uint32_t lookup_dpcls_changed = 0; - uint32_t lookup_subtable_changed = 0; - struct dp_netdev_pmd_thread **pmd_list; - sorted_poll_thread_list(dp, &pmd_list, &n); -+ SHASH_FOR_EACH (node, &dp_netdevs) { -+ struct dp_netdev *dp = node->data; - -- /* take port mutex as HMAP iters over them. */ -- ovs_mutex_lock(&dp->port_mutex); + /* Get PMD threads list, required to get DPCLS instances. */ + size_t n; + struct dp_netdev_pmd_thread **pmd_list; + sorted_poll_thread_list(dp, &pmd_list, &n); +- /* take port mutex as HMAP iters over them. */ +- ovs_mutex_lock(&dp->port_mutex); ++ /* take port mutex as HMAP iters over them. */ ++ ovs_mutex_lock(&dp->port_mutex); + - for (size_t i = 0; i < n; i++) { - struct dp_netdev_pmd_thread *pmd = pmd_list[i]; - if (pmd->core_id == NON_PMD_CORE_ID) { - continue; - } -+ /* take port mutex as HMAP iters over them. */ -+ ovs_mutex_lock(&dp->port_mutex); - +- - struct dp_netdev_port *port = NULL; - HMAP_FOR_EACH (port, node, &dp->ports) { - odp_port_t in_port = port->port_no; @@ -18753,7 +18753,26 @@ index 4381c618f1..f6fe441dda 100644 NULL); unixctl_command_register("dpif-netdev/subtable-lookup-prio-get", "", 0, 0, dpif_netdev_subtable_lookup_get, -@@ -2659,7 +2646,8 @@ dp_netdev_flow_offload_put(struct dp_flow_offload_item *offload) +@@ -2569,18 +2556,6 @@ mark_to_flow_disassociate(struct dp_netdev_pmd_thread *pmd, + return ret; + } + +-static void +-flow_mark_flush(struct dp_netdev_pmd_thread *pmd) +-{ +- struct dp_netdev_flow *flow; +- +- CMAP_FOR_EACH (flow, mark_node, &flow_mark.mark_to_flow) { +- if (flow->pmd_id == pmd->core_id) { +- queue_netdev_flow_del(pmd, flow); +- } +- } +-} +- + static struct dp_netdev_flow * + mark_to_flow_find(const struct dp_netdev_pmd_thread *pmd, + const uint32_t mark) +@@ -2659,7 +2634,8 @@ dp_netdev_flow_offload_put(struct dp_flow_offload_item *offload) struct dp_netdev_flow *flow = offload->flow; odp_port_t in_port = flow->flow.in_port.odp_port; const char *dpif_type_str = dpif_normalize_type(pmd->dp->class->type); @@ -18763,7 +18782,7 @@ index 4381c618f1..f6fe441dda 100644 struct offload_info info; struct netdev *port; uint32_t mark; -@@ -2671,7 +2659,6 @@ dp_netdev_flow_offload_put(struct dp_flow_offload_item *offload) +@@ -2671,7 +2647,6 @@ dp_netdev_flow_offload_put(struct dp_flow_offload_item *offload) if (modification) { mark = flow->mark; @@ -18771,7 +18790,7 @@ index 4381c618f1..f6fe441dda 100644 } else { /* * If a mega flow has already been offloaded (from other PMD -@@ -2798,10 +2785,9 @@ queue_netdev_flow_del(struct dp_netdev_pmd_thread *pmd, +@@ -2798,10 +2773,9 @@ queue_netdev_flow_del(struct dp_netdev_pmd_thread *pmd, static void queue_netdev_flow_put(struct dp_netdev_pmd_thread *pmd, struct dp_netdev_flow *flow, struct match *match, @@ -18783,7 +18802,7 @@ index 4381c618f1..f6fe441dda 100644 if (!netdev_is_flow_api_enabled()) { return; -@@ -2814,11 +2800,6 @@ queue_netdev_flow_put(struct dp_netdev_pmd_thread *pmd, +@@ -2814,11 +2788,6 @@ queue_netdev_flow_put(struct dp_netdev_pmd_thread *pmd, ovsthread_once_done(&offload_thread_once); } @@ -18795,7 +18814,7 @@ index 4381c618f1..f6fe441dda 100644 offload = dp_netdev_alloc_flow_offload(pmd, flow, op); offload->match = *match; offload->actions = xmalloc(actions_len); -@@ -3691,7 +3672,8 @@ dp_netdev_flow_add(struct dp_netdev_pmd_thread *pmd, +@@ -3691,7 +3660,8 @@ dp_netdev_flow_add(struct dp_netdev_pmd_thread *pmd, cmap_insert(&pmd->flow_table, CONST_CAST(struct cmap_node *, &flow->node), dp_netdev_flow_hash(&flow->ufid)); @@ -18805,7 +18824,7 @@ index 4381c618f1..f6fe441dda 100644 if (OVS_UNLIKELY(!VLOG_DROP_DBG((&upcall_rl)))) { struct ds ds = DS_EMPTY_INITIALIZER; -@@ -3778,7 +3760,8 @@ flow_put_on_pmd(struct dp_netdev_pmd_thread *pmd, +@@ -3778,7 +3748,8 @@ flow_put_on_pmd(struct dp_netdev_pmd_thread *pmd, ovsrcu_set(&netdev_flow->actions, new_actions); queue_netdev_flow_put(pmd, netdev_flow, match, @@ -18815,7 +18834,7 @@ index 4381c618f1..f6fe441dda 100644 if (stats) { get_dpif_flow_status(pmd->dp, netdev_flow, stats, NULL); -@@ -3834,6 +3817,15 @@ dpif_netdev_flow_put(struct dpif *dpif, const struct dpif_flow_put *put) +@@ -3834,6 +3805,15 @@ dpif_netdev_flow_put(struct dpif *dpif, const struct dpif_flow_put *put) return error; } @@ -18831,7 +18850,7 @@ index 4381c618f1..f6fe441dda 100644 if (put->ufid) { ufid = *put->ufid; } else { -@@ -4159,7 +4151,6 @@ dpif_netdev_execute(struct dpif *dpif, struct dpif_execute *execute) +@@ -4159,7 +4139,6 @@ dpif_netdev_execute(struct dpif *dpif, struct dpif_execute *execute) } dp_packet_batch_init_packet(&pp, execute->packet); @@ -18839,7 +18858,7 @@ index 4381c618f1..f6fe441dda 100644 dp_netdev_execute_actions(pmd, &pp, false, execute->flow, execute->actions, execute->actions_len); dp_netdev_pmd_flush_output_packets(pmd, true); -@@ -4878,6 +4869,12 @@ struct rr_numa { +@@ -4878,6 +4857,12 @@ struct rr_numa { bool idx_inc; }; @@ -18852,7 +18871,15 @@ index 4381c618f1..f6fe441dda 100644 static struct rr_numa * rr_numa_list_lookup(struct rr_numa_list *rr, int numa_id) { -@@ -5590,10 +5587,17 @@ get_dry_run_variance(struct dp_netdev *dp, uint32_t *core_list, +@@ -5149,7 +5134,6 @@ reload_affected_pmds(struct dp_netdev *dp) + + CMAP_FOR_EACH (pmd, node, &dp->poll_threads) { + if (pmd->need_reload) { +- flow_mark_flush(pmd); + dp_netdev_reload_pmd__(pmd); + } + } +@@ -5590,10 +5574,17 @@ get_dry_run_variance(struct dp_netdev *dp, uint32_t *core_list, for (int i = 0; i < n_rxqs; i++) { int numa_id = netdev_get_numa_id(rxqs[i]->port->netdev); numa = rr_numa_list_lookup(&rr, numa_id); @@ -18873,7 +18900,7 @@ index 4381c618f1..f6fe441dda 100644 goto cleanup; } -@@ -6203,12 +6207,14 @@ dp_netdev_run_meter(struct dp_netdev *dp, struct dp_packet_batch *packets_, +@@ -6203,12 +6194,14 @@ dp_netdev_run_meter(struct dp_netdev *dp, struct dp_packet_batch *packets_, /* Update all bands and find the one hit with the highest rate for each * packet (if any). */ for (int m = 0; m < meter->n_bands; ++m) { @@ -18892,7 +18919,7 @@ index 4381c618f1..f6fe441dda 100644 } /* Drain the bucket for all the packets, if possible. */ -@@ -6226,8 +6232,8 @@ dp_netdev_run_meter(struct dp_netdev *dp, struct dp_packet_batch *packets_, +@@ -6226,8 +6219,8 @@ dp_netdev_run_meter(struct dp_netdev *dp, struct dp_packet_batch *packets_, * (Only one band will be fired by a packet, and that * can be different for each packet.) */ for (int i = band_exceeded_pkt; i < cnt; i++) { @@ -18903,7 +18930,7 @@ index 4381c618f1..f6fe441dda 100644 exceeded_band[i] = m; } } -@@ -6246,8 +6252,8 @@ dp_netdev_run_meter(struct dp_netdev *dp, struct dp_packet_batch *packets_, +@@ -6246,8 +6239,8 @@ dp_netdev_run_meter(struct dp_netdev *dp, struct dp_packet_batch *packets_, /* Update the exceeding band for the exceeding packet. * (Only one band will be fired by a packet, and that * can be different for each packet.) */ @@ -18914,7 +18941,7 @@ index 4381c618f1..f6fe441dda 100644 exceeded_band[i] = m; } } -@@ -6329,16 +6335,14 @@ dpif_netdev_meter_set(struct dpif *dpif, ofproto_meter_id meter_id, +@@ -6329,16 +6322,14 @@ dpif_netdev_meter_set(struct dpif *dpif, ofproto_meter_id meter_id, config->bands[i].burst_size = config->bands[i].rate; } @@ -18936,7 +18963,7 @@ index 4381c618f1..f6fe441dda 100644 if (band_max_delta_t > meter->max_delta_t) { meter->max_delta_t = band_max_delta_t; } -@@ -8493,6 +8497,7 @@ const struct dpif_class dpif_netdev_class = { +@@ -8493,6 +8484,7 @@ const struct dpif_class dpif_netdev_class = { NULL, /* ct_timeout_policy_dump_next */ NULL, /* ct_timeout_policy_dump_done */ dpif_netdev_ct_get_timeout_policy_name, diff --git a/SPECS/openvswitch2.15.spec b/SPECS/openvswitch2.15.spec index 4355da2..e015487 100644 --- a/SPECS/openvswitch2.15.spec +++ b/SPECS/openvswitch2.15.spec @@ -57,7 +57,7 @@ Summary: Open vSwitch Group: System Environment/Daemons daemon/database/utilities URL: http://www.openvswitch.org/ Version: 2.15.0 -Release: 31%{?dist} +Release: 32%{?dist} # Nearly all of openvswitch is ASL 2.0. The bugtool is LGPLv2+, and the # lib/sflow*.[ch] files are SISSL @@ -697,6 +697,12 @@ exit 0 %endif %changelog +* Tue Aug 03 2021 Open vSwitch CI - 2.15.0-32 +- Merging upstream branch-2.15 [RH gerrit: 2cc833ce5b] + Commit list: + 90b219275d dpif-netdev: Do not flush PMD offloads on reload. + + * Mon Aug 02 2021 Open vSwitch CI - 2.15.0-31 - Merging upstream branch-2.15 [RH gerrit: 88fb0bba8a] Commit list: