diff --git a/SOURCES/openvswitch-2.16.0.patch b/SOURCES/openvswitch-2.16.0.patch index a40e55e..c96bcfb 100644 --- a/SOURCES/openvswitch-2.16.0.patch +++ b/SOURCES/openvswitch-2.16.0.patch @@ -2731,7 +2731,7 @@ index bddce75b63..5f90dd4ceb 100644 return subtables_changed; } diff --git a/lib/dpif-netlink.c b/lib/dpif-netlink.c -index 34fc042373..a681fa1b33 100644 +index 34fc042373..537d3951af 100644 --- a/lib/dpif-netlink.c +++ b/lib/dpif-netlink.c @@ -84,6 +84,8 @@ enum { MAX_PORTS = USHRT_MAX }; @@ -2828,7 +2828,67 @@ index 34fc042373..a681fa1b33 100644 ofpbuf_delete(buf); if (create) { -@@ -1036,7 +1064,7 @@ dpif_netlink_port_add_compat(struct dpif_netlink *dpif, struct netdev *netdev, +@@ -763,14 +791,28 @@ dpif_netlink_set_handler_pids(struct dpif *dpif_, const uint32_t *upcall_pids, + uint32_t n_upcall_pids) + { + struct dpif_netlink *dpif = dpif_netlink_cast(dpif_); ++ int largest_cpu_id = ovs_numa_get_largest_core_id(); + struct dpif_netlink_dp request, reply; + struct ofpbuf *bufp; +- int error; +- int n_cores; + +- n_cores = count_cpu_cores(); +- ovs_assert(n_cores == n_upcall_pids); +- VLOG_DBG("Dispatch mode(per-cpu): Number of CPUs is %d", n_cores); ++ uint32_t *corrected; ++ int error, i, n_cores; ++ ++ if (largest_cpu_id == OVS_NUMA_UNSPEC) { ++ largest_cpu_id = -1; ++ } ++ ++ /* Some systems have non-continuous cpu core ids. count_total_cores() ++ * would return an accurate number, however, this number cannot be used. ++ * e.g. If the largest core_id of a system is cpu9, but the system only ++ * has 4 cpus then the OVS kernel module would throw a "CPU mismatch" ++ * warning. With the MAX() in place in this example we send an array of ++ * size 10 and prevent the warning. This has no bearing on the number of ++ * threads created. ++ */ ++ n_cores = MAX(count_total_cores(), largest_cpu_id + 1); ++ VLOG_DBG("Dispatch mode(per-cpu): Setting up handler PIDs for %d cores", ++ n_cores); + + dpif_netlink_dp_init(&request); + request.cmd = OVS_DP_CMD_SET; +@@ -779,7 +821,12 @@ dpif_netlink_set_handler_pids(struct dpif *dpif_, const uint32_t *upcall_pids, + request.user_features = dpif->user_features | + OVS_DP_F_DISPATCH_UPCALL_PER_CPU; + +- request.upcall_pids = upcall_pids; ++ corrected = xcalloc(n_cores, sizeof *corrected); ++ ++ for (i = 0; i < n_cores; i++) { ++ corrected[i] = upcall_pids[i % n_upcall_pids]; ++ } ++ request.upcall_pids = corrected; + request.n_upcall_pids = n_cores; + + error = dpif_netlink_dp_transact(&request, &reply, &bufp); +@@ -787,9 +834,10 @@ dpif_netlink_set_handler_pids(struct dpif *dpif_, const uint32_t *upcall_pids, + dpif->user_features = reply.user_features; + ofpbuf_delete(bufp); + if (!dpif_netlink_upcall_per_cpu(dpif)) { +- return -EOPNOTSUPP; ++ error = -EOPNOTSUPP; + } + } ++ free(corrected); + return error; + } + +@@ -1036,7 +1084,7 @@ dpif_netlink_port_add_compat(struct dpif_netlink *dpif, struct netdev *netdev, ext_ofs = nl_msg_start_nested(&options, OVS_TUNNEL_ATTR_EXTENSION); for (i = 0; i < 32; i++) { @@ -2837,6 +2897,102 @@ index 34fc042373..a681fa1b33 100644 nl_msg_put_flag(&options, i); } } +@@ -2453,6 +2501,77 @@ dpif_netlink_handler_uninit(struct dpif_handler *handler) + } + #endif + ++/* Returns true if num is a prime number, ++ * otherwise, return false. ++ */ ++static bool ++is_prime(uint32_t num) ++{ ++ if (num == 2) { ++ return true; ++ } ++ ++ if (num < 2) { ++ return false; ++ } ++ ++ if (num % 2 == 0) { ++ return false; ++ } ++ ++ for (uint64_t i = 3; i * i <= num; i += 2) { ++ if (num % i == 0) { ++ return false; ++ } ++ } ++ ++ return true; ++} ++ ++/* Returns start if start is a prime number. Otherwise returns the next ++ * prime greater than start. Search is limited by UINT32_MAX. ++ * ++ * Returns 0 if no prime has been found between start and UINT32_MAX. ++ */ ++static uint32_t ++next_prime(uint32_t start) ++{ ++ if (start <= 2) { ++ return 2; ++ } ++ ++ for (uint32_t i = start; i < UINT32_MAX; i++) { ++ if (is_prime(i)) { ++ return i; ++ } ++ } ++ ++ return 0; ++} ++ ++/* Calculates and returns the number of handler threads needed based ++ * the following formula: ++ * ++ * handlers_n = min(next_prime(active_cores + 1), total_cores) ++ */ ++static uint32_t ++dpif_netlink_calculate_n_handlers(void) ++{ ++ uint32_t total_cores = count_total_cores(); ++ uint32_t n_handlers = count_cpu_cores(); ++ uint32_t next_prime_num; ++ ++ /* If not all cores are available to OVS, create additional handler ++ * threads to ensure more fair distribution of load between them. ++ */ ++ if (n_handlers < total_cores && total_cores > 2) { ++ next_prime_num = next_prime(n_handlers + 1); ++ n_handlers = MIN(next_prime_num, total_cores); ++ } ++ ++ return n_handlers; ++} ++ + static int + dpif_netlink_refresh_handlers_cpu_dispatch(struct dpif_netlink *dpif) + OVS_REQ_WRLOCK(dpif->upcall_lock) +@@ -2462,7 +2581,7 @@ dpif_netlink_refresh_handlers_cpu_dispatch(struct dpif_netlink *dpif) + uint32_t n_handlers; + uint32_t *upcall_pids; + +- n_handlers = count_cpu_cores(); ++ n_handlers = dpif_netlink_calculate_n_handlers(); + if (dpif->n_handlers != n_handlers) { + VLOG_DBG("Dispatch mode(per-cpu): initializing %d handlers", + n_handlers); +@@ -2702,7 +2821,7 @@ dpif_netlink_number_handlers_required(struct dpif *dpif_, uint32_t *n_handlers) + struct dpif_netlink *dpif = dpif_netlink_cast(dpif_); + + if (dpif_netlink_upcall_per_cpu(dpif)) { +- *n_handlers = count_cpu_cores(); ++ *n_handlers = dpif_netlink_calculate_n_handlers(); + return true; + } + diff --git a/lib/flow.c b/lib/flow.c index 89837de95d..a021bc0eba 100644 --- a/lib/flow.c @@ -3816,11 +3972,59 @@ index 4edb3c114a..05c0b5711d 100644 ofpbuf_prealloc_tailroom(b, size); b->data = (char*)b->data + size; } +diff --git a/lib/ovs-numa.c b/lib/ovs-numa.c +index 9e3fa54216..6a197772c1 100644 +--- a/lib/ovs-numa.c ++++ b/lib/ovs-numa.c +@@ -387,6 +387,35 @@ ovs_numa_get_n_cores_on_numa(int numa_id) + return OVS_CORE_UNSPEC; + } + ++/* Returns the largest core_id. ++ * ++ * Return OVS_CORE_UNSPEC, if core_id information is not found. ++ * ++ * Returning OVS_CORE_UNSPEC comes at a caveat. The caller function ++ * must remember to check the return value of this callee function ++ * against OVS_CORE_UNSPEC. OVS_CORE_UNSPEC is a positive integer ++ * INT_MAX, which the caller may interpret it as the largest ++ * core_id if it's not checking for it. ++ */ ++unsigned ++ovs_numa_get_largest_core_id(void) ++{ ++ struct cpu_core *core; ++ unsigned max_id = 0; ++ ++ if (!found_numa_and_core) { ++ return OVS_CORE_UNSPEC; ++ } ++ ++ HMAP_FOR_EACH (core, hmap_node, &all_cpu_cores) { ++ if (core->core_id > max_id) { ++ max_id = core->core_id; ++ } ++ } ++ ++ return max_id; ++} ++ + static struct ovs_numa_dump * + ovs_numa_dump_create(void) + { diff --git a/lib/ovs-numa.h b/lib/ovs-numa.h -index ecc251a7ff..83bd10cca5 100644 +index ecc251a7ff..02c9e84cf5 100644 --- a/lib/ovs-numa.h +++ b/lib/ovs-numa.h -@@ -68,9 +68,9 @@ void ovs_numa_dump_destroy(struct ovs_numa_dump *); +@@ -56,6 +56,7 @@ int ovs_numa_get_n_numas(void); + int ovs_numa_get_n_cores(void); + int ovs_numa_get_numa_id(unsigned core_id); + int ovs_numa_get_n_cores_on_numa(int numa_id); ++unsigned ovs_numa_get_largest_core_id(void); + struct ovs_numa_dump *ovs_numa_dump_cores_on_numa(int numa_id); + struct ovs_numa_dump *ovs_numa_dump_cores_with_cmask(const char *cmask); + struct ovs_numa_dump *ovs_numa_dump_n_cores_per_numa(int n); +@@ -68,9 +69,9 @@ void ovs_numa_dump_destroy(struct ovs_numa_dump *); int ovs_numa_thread_setaffinity_core(unsigned core_id); #define FOR_EACH_CORE_ON_DUMP(ITER, DUMP) \ @@ -3908,6 +4112,46 @@ index ecc4c92010..8b397b7fb0 100644 +void ovsrcu_barrier(void); + #endif /* ovs-rcu.h */ +diff --git a/lib/ovs-thread.c b/lib/ovs-thread.c +index b686e45481..10a5b1b4a8 100644 +--- a/lib/ovs-thread.c ++++ b/lib/ovs-thread.c +@@ -624,6 +624,23 @@ count_cpu_cores(void) + return n_cores > 0 ? n_cores : 0; + } + ++/* Returns the total number of cores on the system, or 0 if the ++ * number cannot be determined. */ ++int ++count_total_cores(void) ++{ ++ long int n_cores; ++ ++#ifndef _WIN32 ++ n_cores = sysconf(_SC_NPROCESSORS_CONF); ++#else ++ n_cores = 0; ++ errno = ENOTSUP; ++#endif ++ ++ return n_cores > 0 ? n_cores : 0; ++} ++ + /* Returns 'true' if current thread is PMD thread. */ + bool + thread_is_pmd(void) +diff --git a/lib/ovs-thread.h b/lib/ovs-thread.h +index 7ee98bd4e2..85546f54b1 100644 +--- a/lib/ovs-thread.h ++++ b/lib/ovs-thread.h +@@ -522,6 +522,7 @@ bool may_fork(void); + /* Useful functions related to threading. */ + + int count_cpu_cores(void); ++int count_total_cores(void); + bool thread_is_pmd(void); + + #endif /* ovs-thread.h */ diff --git a/lib/ovsdb-cs.c b/lib/ovsdb-cs.c index 659d49dbf7..dead31275d 100644 --- a/lib/ovsdb-cs.c diff --git a/SPECS/openvswitch2.16.spec b/SPECS/openvswitch2.16.spec index 3c23f39..1caa38a 100644 --- a/SPECS/openvswitch2.16.spec +++ b/SPECS/openvswitch2.16.spec @@ -57,7 +57,7 @@ Summary: Open vSwitch Group: System Environment/Daemons daemon/database/utilities URL: http://www.openvswitch.org/ Version: 2.16.0 -Release: 95%{?dist} +Release: 96%{?dist} # Nearly all of openvswitch is ASL 2.0. The bugtool is LGPLv2+, and the # lib/sflow*.[ch] files are SISSL @@ -699,6 +699,13 @@ exit 0 %endif %changelog +* Mon Aug 15 2022 Open vSwitch CI - 2.16.0-96 +- Merging upstream branch-2.16 [RH git: ddb62f8750] + Commit list: + f68ae52392 handlers: Fix handlers mapping. + 3cbadc0b74 handlers: Create additional handler threads when using CPU isolation. + + * Wed Aug 10 2022 Timothy Redaelli - 2.16.0-95 - pkgtool: keep %{?dist} before added bz string [RH git: 339efe77c4] Signed-off-by: Timothy Redaelli