From a6552c7d13dd6c1c5b31f6a1aebfb03f5298454f Mon Sep 17 00:00:00 2001 From: Open vSwitch CI Date: Oct 13 2021 21:01:52 +0000 Subject: Import openvswitch2.13-2.13.0-130 from Fast DataPath --- diff --git a/SOURCES/openvswitch-2.13.0.patch b/SOURCES/openvswitch-2.13.0.patch index 323bfd3..b2dffa5 100644 --- a/SOURCES/openvswitch-2.13.0.patch +++ b/SOURCES/openvswitch-2.13.0.patch @@ -80892,7 +80892,7 @@ index 68c33a0f96..9b251f81fa 100644 * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/lib/dpif-netdev.c b/lib/dpif-netdev.c -index d393aab5e3..2fb0d418b1 100644 +index d393aab5e3..a73102ea98 100644 --- a/lib/dpif-netdev.c +++ b/lib/dpif-netdev.c @@ -83,9 +83,9 @@ @@ -81201,22 +81201,32 @@ index d393aab5e3..2fb0d418b1 100644 dp_netdev_execute_actions(pmd, &pp, false, execute->flow, execute->actions, execute->actions_len); dp_netdev_pmd_flush_output_packets(pmd, true); -@@ -3841,6 +3944,14 @@ dpif_netdev_execute(struct dpif *dpif, struct dpif_execute *execute) +@@ -3841,6 +3944,24 @@ dpif_netdev_execute(struct dpif *dpif, struct dpif_execute *execute) dp_netdev_pmd_unref(pmd); } -+ if (dp_packet_batch_size(&pp)) { ++ if (dp_packet_batch_size(&pp) == 1) { + /* Packet wasn't dropped during the execution. Swapping content with + * the original packet, because the caller might expect actions to -+ * modify it. */ -+ dp_packet_swap(execute->packet, packet_clone); ++ * modify it. Uisng the packet from a batch instead of 'packet_clone' ++ * because it maybe stolen and replaced by other packet, e.g. by ++ * the fragmentation engine. */ ++ dp_packet_swap(execute->packet, pp.packets[0]); ++ dp_packet_delete_batch(&pp, true); ++ } else if (dp_packet_batch_size(&pp)) { ++ /* FIXME: We have more packets than expected. Likely, we got IP ++ * fragments of the reassembled packet. Dropping them here as we have ++ * no way to get them to the caller. It might be that all the required ++ * actions with them are already executed, but it also might not be a ++ * case, e.g. if dpif_netdev_execute() called to execute a single ++ * tunnel push. */ + dp_packet_delete_batch(&pp, true); + } + return 0; } -@@ -3875,11 +3986,12 @@ dpif_netdev_operate(struct dpif *dpif, struct dpif_op **ops, size_t n_ops, +@@ -3875,11 +3996,12 @@ dpif_netdev_operate(struct dpif *dpif, struct dpif_op **ops, size_t n_ops, /* Enable or Disable PMD auto load balancing. */ static void @@ -81230,7 +81240,7 @@ index d393aab5e3..2fb0d418b1 100644 bool enable_alb = false; bool multi_rxq = false; -@@ -3906,18 +4018,24 @@ set_pmd_auto_lb(struct dp_netdev *dp) +@@ -3906,18 +4028,24 @@ set_pmd_auto_lb(struct dp_netdev *dp) enable_alb = enable_alb && pmd_rxq_assign_cyc && pmd_alb->auto_lb_requested; @@ -81259,7 +81269,7 @@ index d393aab5e3..2fb0d418b1 100644 } /* Applies datapath configuration from the database. Some of the changes are -@@ -3935,6 +4053,9 @@ dpif_netdev_set_config(struct dpif *dpif, const struct smap *other_config) +@@ -3935,6 +4063,9 @@ dpif_netdev_set_config(struct dpif *dpif, const struct smap *other_config) uint32_t insert_min, cur_min; uint32_t tx_flush_interval, cur_tx_flush_interval; uint64_t rebalance_intvl; @@ -81269,7 +81279,7 @@ index d393aab5e3..2fb0d418b1 100644 tx_flush_interval = smap_get_int(other_config, "tx-flush-interval", DEFAULT_TX_FLUSH_INTERVAL); -@@ -4012,7 +4133,7 @@ dpif_netdev_set_config(struct dpif *dpif, const struct smap *other_config) +@@ -4012,7 +4143,7 @@ dpif_netdev_set_config(struct dpif *dpif, const struct smap *other_config) false); rebalance_intvl = smap_get_int(other_config, "pmd-auto-lb-rebal-interval", @@ -81278,7 +81288,7 @@ index d393aab5e3..2fb0d418b1 100644 /* Input is in min, convert it to msec. */ rebalance_intvl = -@@ -4020,9 +4141,38 @@ dpif_netdev_set_config(struct dpif *dpif, const struct smap *other_config) +@@ -4020,9 +4151,38 @@ dpif_netdev_set_config(struct dpif *dpif, const struct smap *other_config) if (pmd_alb->rebalance_intvl != rebalance_intvl) { pmd_alb->rebalance_intvl = rebalance_intvl; @@ -81320,7 +81330,7 @@ index d393aab5e3..2fb0d418b1 100644 return 0; } -@@ -4493,6 +4643,12 @@ struct rr_numa { +@@ -4493,6 +4653,12 @@ struct rr_numa { bool idx_inc; }; @@ -81333,7 +81343,7 @@ index d393aab5e3..2fb0d418b1 100644 static struct rr_numa * rr_numa_list_lookup(struct rr_numa_list *rr, int numa_id) { -@@ -4940,9 +5096,17 @@ reconfigure_datapath(struct dp_netdev *dp) +@@ -4940,9 +5106,17 @@ reconfigure_datapath(struct dp_netdev *dp) /* Check for all the ports that need reconfiguration. We cache this in * 'port->need_reconfigure', because netdev_is_reconf_required() can @@ -81353,7 +81363,7 @@ index d393aab5e3..2fb0d418b1 100644 port->need_reconfigure = true; } } -@@ -5076,7 +5240,7 @@ reconfigure_datapath(struct dp_netdev *dp) +@@ -5076,7 +5250,7 @@ reconfigure_datapath(struct dp_netdev *dp) reload_affected_pmds(dp); /* Check if PMD Auto LB is to be enabled */ @@ -81362,7 +81372,7 @@ index d393aab5e3..2fb0d418b1 100644 } /* Returns true if one of the netdevs in 'dp' requires a reconfiguration */ -@@ -5189,10 +5353,17 @@ get_dry_run_variance(struct dp_netdev *dp, uint32_t *core_list, +@@ -5189,10 +5363,17 @@ get_dry_run_variance(struct dp_netdev *dp, uint32_t *core_list, for (int i = 0; i < n_rxqs; i++) { int numa_id = netdev_get_numa_id(rxqs[i]->port->netdev); numa = rr_numa_list_lookup(&rr, numa_id); @@ -81383,7 +81393,7 @@ index d393aab5e3..2fb0d418b1 100644 goto cleanup; } -@@ -5320,7 +5491,7 @@ pmd_rebalance_dry_run(struct dp_netdev *dp) +@@ -5320,7 +5501,7 @@ pmd_rebalance_dry_run(struct dp_netdev *dp) improvement = ((curr_variance - new_variance) * 100) / curr_variance; } @@ -81392,7 +81402,7 @@ index d393aab5e3..2fb0d418b1 100644 ret = false; } } -@@ -5787,12 +5958,14 @@ dp_netdev_run_meter(struct dp_netdev *dp, struct dp_packet_batch *packets_, +@@ -5787,12 +5968,14 @@ dp_netdev_run_meter(struct dp_netdev *dp, struct dp_packet_batch *packets_, /* Update all bands and find the one hit with the highest rate for each * packet (if any). */ for (int m = 0; m < meter->n_bands; ++m) { @@ -81411,7 +81421,7 @@ index d393aab5e3..2fb0d418b1 100644 } /* Drain the bucket for all the packets, if possible. */ -@@ -5810,8 +5983,8 @@ dp_netdev_run_meter(struct dp_netdev *dp, struct dp_packet_batch *packets_, +@@ -5810,8 +5993,8 @@ dp_netdev_run_meter(struct dp_netdev *dp, struct dp_packet_batch *packets_, * (Only one band will be fired by a packet, and that * can be different for each packet.) */ for (int i = band_exceeded_pkt; i < cnt; i++) { @@ -81422,7 +81432,7 @@ index d393aab5e3..2fb0d418b1 100644 exceeded_band[i] = m; } } -@@ -5830,8 +6003,8 @@ dp_netdev_run_meter(struct dp_netdev *dp, struct dp_packet_batch *packets_, +@@ -5830,8 +6013,8 @@ dp_netdev_run_meter(struct dp_netdev *dp, struct dp_packet_batch *packets_, /* Update the exceeding band for the exceeding packet. * (Only one band will be fired by a packet, and that * can be different for each packet.) */ @@ -81433,7 +81443,7 @@ index d393aab5e3..2fb0d418b1 100644 exceeded_band[i] = m; } } -@@ -5913,16 +6086,14 @@ dpif_netdev_meter_set(struct dpif *dpif, ofproto_meter_id meter_id, +@@ -5913,16 +6096,14 @@ dpif_netdev_meter_set(struct dpif *dpif, ofproto_meter_id meter_id, config->bands[i].burst_size = config->bands[i].rate; } @@ -81455,7 +81465,7 @@ index d393aab5e3..2fb0d418b1 100644 if (band_max_delta_t > meter->max_delta_t) { meter->max_delta_t = band_max_delta_t; } -@@ -7800,6 +7971,7 @@ const struct dpif_class dpif_netdev_class = { +@@ -7800,6 +7981,7 @@ const struct dpif_class dpif_netdev_class = { NULL, /* ct_timeout_policy_dump_next */ NULL, /* ct_timeout_policy_dump_done */ NULL, /* ct_get_timeout_policy_name */ @@ -81463,7 +81473,7 @@ index d393aab5e3..2fb0d418b1 100644 dpif_netdev_ipf_set_enabled, dpif_netdev_ipf_set_min_frag, dpif_netdev_ipf_set_max_nfrags, -@@ -8040,6 +8212,7 @@ dp_netdev_pmd_try_optimize(struct dp_netdev_pmd_thread *pmd, +@@ -8040,6 +8222,7 @@ dp_netdev_pmd_try_optimize(struct dp_netdev_pmd_thread *pmd, if (pmd->ctx.now > pmd->rxq_next_cycle_store) { uint64_t curr_tsc; @@ -81471,7 +81481,7 @@ index d393aab5e3..2fb0d418b1 100644 struct pmd_auto_lb *pmd_alb = &pmd->dp->pmd_alb; if (pmd_alb->is_enabled && !pmd->isolated && (pmd->perf_stats.counters.n[PMD_CYCLES_ITER_IDLE] >= -@@ -8056,7 +8229,9 @@ dp_netdev_pmd_try_optimize(struct dp_netdev_pmd_thread *pmd, +@@ -8056,7 +8239,9 @@ dp_netdev_pmd_try_optimize(struct dp_netdev_pmd_thread *pmd, pmd_load = ((tot_proc * 100) / (tot_idle + tot_proc)); } @@ -91139,10 +91149,10 @@ index 4af44200e8..68ce2c5442 100644 return (error || !stream) ? 1 : 0; } diff --git a/tests/tunnel-push-pop.at b/tests/tunnel-push-pop.at -index b92c23fde8..9feb1c5fec 100644 +index b92c23fde8..320ae5c9c2 100644 --- a/tests/tunnel-push-pop.at +++ b/tests/tunnel-push-pop.at -@@ -573,6 +573,62 @@ OVS_WAIT_UNTIL([test `ovs-pcap p0.pcap | grep 50540000000a5054000000091235 | wc +@@ -573,6 +573,64 @@ OVS_WAIT_UNTIL([test `ovs-pcap p0.pcap | grep 50540000000a5054000000091235 | wc OVS_VSWITCHD_STOP AT_CLEANUP @@ -91179,20 +91189,22 @@ index b92c23fde8..9feb1c5fec 100644 +AT_CHECK([ovs-vsctl -- set Interface p0 options:tx_pcap=p0.pcap]) + +packet=50540000000a505400000009123 -+encap=f8bc124434b6aa55aa5500000800450000320000400040113406010102580101025c83a917c1001e00000000655800007b00 ++dnl Source port is based on a packet hash, so it may differ depending on the ++dnl compiler flags and CPU type. Masked with '....'. ++encap=f8bc124434b6aa55aa5500000800450000320000400040113406010102580101025c....17c1001e00000000655800007b00 + +dnl Output to tunnel from a int-br internal port. +dnl Checking that the packet arrived and it was correctly encapsulated. +AT_CHECK([ovs-ofctl add-flow int-br "in_port=LOCAL,actions=debug_slow,output:2"]) +AT_CHECK([ovs-appctl netdev-dummy/receive int-br "${packet}4"]) -+OVS_WAIT_UNTIL([test `ovs-pcap p0.pcap | grep "${encap}${packet}4" | wc -l` -ge 1]) ++OVS_WAIT_UNTIL([test `ovs-pcap p0.pcap | egrep "${encap}${packet}4" | wc -l` -ge 1]) +dnl Sending again to exercise the non-miss upcall path. +AT_CHECK([ovs-appctl netdev-dummy/receive int-br "${packet}4"]) -+OVS_WAIT_UNTIL([test `ovs-pcap p0.pcap | grep "${encap}${packet}4" | wc -l` -ge 2]) ++OVS_WAIT_UNTIL([test `ovs-pcap p0.pcap | egrep "${encap}${packet}4" | wc -l` -ge 2]) + +dnl Output to tunnel from the controller. +AT_CHECK([ovs-ofctl -O OpenFlow13 packet-out int-br CONTROLLER "debug_slow,output:2" "${packet}5"]) -+OVS_WAIT_UNTIL([test `ovs-pcap p0.pcap | grep "${encap}${packet}5" | wc -l` -ge 1]) ++OVS_WAIT_UNTIL([test `ovs-pcap p0.pcap | egrep "${encap}${packet}5" | wc -l` -ge 1]) + +dnl Datapath actions should not have tunnel push action. +AT_CHECK([ovs-appctl dpctl/dump-flows | grep -q tnl_push], [1]) diff --git a/SPECS/openvswitch2.13.spec b/SPECS/openvswitch2.13.spec index e1d1384..b1fad2f 100644 --- a/SPECS/openvswitch2.13.spec +++ b/SPECS/openvswitch2.13.spec @@ -59,7 +59,7 @@ Summary: Open vSwitch Group: System Environment/Daemons daemon/database/utilities URL: http://www.openvswitch.org/ Version: 2.13.0 -Release: 129%{?commit0:.%{date}git%{shortcommit0}}%{?commit1:dpdk%{shortcommit1}}%{?dist} +Release: 130%{?commit0:.%{date}git%{shortcommit0}}%{?commit1:dpdk%{shortcommit1}}%{?dist} # Nearly all of openvswitch is ASL 2.0. The bugtool is LGPLv2+, and the # lib/sflow*.[ch] files are SISSL @@ -712,6 +712,13 @@ exit 0 %endif %changelog +* Wed Oct 13 2021 Open vSwitch CI - 2.13.0-130 +- Merging upstream branch-2.13 [RH git: beb8cdaec6] + Commit list: + ceb395773d dpif-netdev: Fix use-after-free on PACKET_OUT of IP fragments. + fed4df9bb7 tunnel-push-pop.at: Mask source port in tunnel header. + + * Tue Oct 12 2021 Open vSwitch CI - 2.13.0-129 - Merging upstream branch-2.13 [RH git: fc819dabd3] Commit list: