From fbbb37b8c3cb420551279a18c3c235c119a85436 Mon Sep 17 00:00:00 2001 From: Open vSwitch CI Date: Apr 06 2021 16:45:28 +0000 Subject: Import openvswitch2.13-2.13.0-103 from Fast DataPath --- diff --git a/SOURCES/arm64-armv8a-linuxapp-gcc-config b/SOURCES/arm64-armv8a-linuxapp-gcc-config index 06a3d70..3db2efb 100644 --- a/SOURCES/arm64-armv8a-linuxapp-gcc-config +++ b/SOURCES/arm64-armv8a-linuxapp-gcc-config @@ -1,4 +1,4 @@ -# -*- cfg-sha: bfd08c718502ce9a9d75d102e9b680c4ecf9fb2b14b112aa45899a016d3bc7bb +# -*- cfg-sha: b96dcc8ca3ed08c34a442991fd5e08c64629dfcf8cc5833bd9c311dd0b7e0f77 # SPDX-License-Identifier: BSD-3-Clause # Copyright(c) 2015 Cavium, Inc # SPDX-License-Identifier: BSD-3-Clause @@ -12,7 +12,7 @@ CONFIG_RTE_VER_PREFIX="DPDK" # Version information completed when this file is processed for a build CONFIG_RTE_VER_YEAR=19 CONFIG_RTE_VER_MONTH=11 -CONFIG_RTE_VER_MINOR=3 +CONFIG_RTE_VER_MINOR=7 CONFIG_RTE_VER_SUFFIX="" CONFIG_RTE_VER_RELEASE=99 # RTE_EXEC_ENV values are the directories in mk/exec-env/ @@ -197,7 +197,6 @@ CONFIG_RTE_LIBRTE_ICE_PMD=n CONFIG_RTE_LIBRTE_ICE_DEBUG_RX=n CONFIG_RTE_LIBRTE_ICE_DEBUG_TX=n CONFIG_RTE_LIBRTE_ICE_DEBUG_TX_FREE=n -CONFIG_RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC=y CONFIG_RTE_LIBRTE_ICE_16BYTE_RX_DESC=n # Compile burst-oriented IAVF PMD driver CONFIG_RTE_LIBRTE_IAVF_PMD=n @@ -328,7 +327,6 @@ CONFIG_RTE_LIBRTE_CRYPTODEV=n CONFIG_RTE_CRYPTO_MAX_DEVS=64 # Compile PMD for ARMv8 Crypto device CONFIG_RTE_LIBRTE_PMD_ARMV8_CRYPTO=n -CONFIG_RTE_LIBRTE_PMD_ARMV8_CRYPTO_DEBUG=n # Compile NXP CAAM JR crypto Driver CONFIG_RTE_LIBRTE_PMD_CAAM_JR=n CONFIG_RTE_LIBRTE_PMD_CAAM_JR_BE=n @@ -604,3 +602,4 @@ CONFIG_RTE_ARCH_ARM64_MEMCPY=n #CONFIG_RTE_ARM64_MEMCPY_STRICT_ALIGN=n # NXP PFE PMD Driver CONFIG_RTE_TOOLCHAIN_GCC=y +CONFIG_RTE_LIBRTE_PMD_XENVIRT=n diff --git a/SOURCES/openvswitch-2.13.0.patch b/SOURCES/openvswitch-2.13.0.patch index af1fc0b..36605d6 100644 --- a/SOURCES/openvswitch-2.13.0.patch +++ b/SOURCES/openvswitch-2.13.0.patch @@ -1788,19 +1788,83 @@ index 8f90d06f28..77ac26dd85 100644 - env: DEF_LIB="static" BUILD_32BIT=1 compiler: gcc diff --git a/dpdk/MAINTAINERS b/dpdk/MAINTAINERS -index 4395d8df14..10c4e1a613 100644 +index 4395d8df14..952ded7b00 100644 --- a/dpdk/MAINTAINERS +++ b/dpdk/MAINTAINERS -@@ -370,7 +370,7 @@ F: devtools/test-null.sh +@@ -46,7 +46,7 @@ M: Jerin Jacob + T: git://dpdk.org/next/dpdk-next-net-mrvl + + Next-net-mlx Tree +-M: Raslan Darawsheh ++M: Raslan Darawsheh + T: git://dpdk.org/next/dpdk-next-net-mlx + + Next-virtio Tree +@@ -128,8 +128,11 @@ F: meson.build + F: lib/librte_eal/freebsd/BSDmakefile.meson + F: meson_options.txt + F: config/rte_config.h ++F: buildtools/call-sphinx-build.py + F: buildtools/gen-pmdinfo-cfile.sh + F: buildtools/map_to_def.py ++F: buildtools/list-dir-globs.py ++F: buildtools/pkg-config/ + F: buildtools/symlink-drivers-solibs.sh + + Public CI +@@ -370,7 +373,7 @@ F: devtools/test-null.sh F: doc/guides/prog_guide/switch_representation.rst Flow API -M: Adrien Mazarguil -+M: Ori Kam ++M: Ori Kam T: git://dpdk.org/next/dpdk-next-net F: app/test-pmd/cmdline_flow.c F: doc/guides/prog_guide/rte_flow.rst -@@ -910,7 +910,7 @@ F: drivers/net/null/ +@@ -456,8 +459,8 @@ F: lib/librte_eventdev/*crypto_adapter* + F: app/test/test_event_crypto_adapter.c + F: doc/guides/prog_guide/event_crypto_adapter.rst + +-Raw device API - EXPERIMENTAL +-M: Shreyansh Jain ++Raw device API ++M: Nipun Gupta + M: Hemant Agrawal + F: lib/librte_rawdev/ + F: drivers/raw/skeleton/ +@@ -728,17 +731,17 @@ F: doc/guides/nics/features/octeontx2*.ini + F: doc/guides/nics/octeontx2.rst + + Mellanox mlx4 +-M: Matan Azrad +-M: Shahaf Shuler ++M: Matan Azrad ++M: Shahaf Shuler + T: git://dpdk.org/next/dpdk-next-net-mlx + F: drivers/net/mlx4/ + F: doc/guides/nics/mlx4.rst + F: doc/guides/nics/features/mlx4.ini + + Mellanox mlx5 +-M: Matan Azrad +-M: Shahaf Shuler +-M: Viacheslav Ovsiienko ++M: Matan Azrad ++M: Shahaf Shuler ++M: Viacheslav Ovsiienko + T: git://dpdk.org/next/dpdk-next-net-mlx + F: drivers/net/mlx5/ + F: buildtools/options-ibverbs-static.sh +@@ -746,7 +749,7 @@ F: doc/guides/nics/mlx5.rst + F: doc/guides/nics/features/mlx5.ini + + Microsoft vdev_netvsc - EXPERIMENTAL +-M: Matan Azrad ++M: Matan Azrad + F: drivers/net/vdev_netvsc/ + F: doc/guides/nics/vdev_netvsc.rst + F: doc/guides/nics/features/vdev_netvsc.ini +@@ -910,7 +913,7 @@ F: drivers/net/null/ F: doc/guides/nics/features/null.ini Fail-safe PMD @@ -1809,7 +1873,7 @@ index 4395d8df14..10c4e1a613 100644 F: drivers/net/failsafe/ F: doc/guides/nics/fail_safe.rst F: doc/guides/nics/features/failsafe.ini -@@ -1373,7 +1373,7 @@ F: app/test/test_rcu* +@@ -1373,7 +1376,7 @@ F: app/test/test_rcu* F: doc/guides/prog_guide/rcu_lib.rst PCI @@ -1818,7 +1882,7 @@ index 4395d8df14..10c4e1a613 100644 F: lib/librte_pci/ Power management -@@ -1434,6 +1434,7 @@ Unit tests framework +@@ -1434,6 +1437,7 @@ Unit tests framework F: app/test/Makefile F: app/test/autotest* F: app/test/commands.c @@ -1826,13 +1890,56 @@ index 4395d8df14..10c4e1a613 100644 F: app/test/packet_burst_generator.c F: app/test/packet_burst_generator.h F: app/test/process.h +@@ -1490,7 +1494,7 @@ M: Marko Kovacevic + F: examples/fips_validation/ + F: doc/guides/sample_app_ug/fips_validation.rst + +-M: Ori Kam ++M: Ori Kam + F: examples/flow_filtering/ + F: doc/guides/sample_app_ug/flow_filtering.rst + diff --git a/dpdk/VERSION b/dpdk/VERSION -index 22131b00aa..a43c349903 100644 +index 22131b00aa..8bda73742f 100644 --- a/dpdk/VERSION +++ b/dpdk/VERSION @@ -1 +1 @@ -19.11.0 -+19.11.3 ++19.11.7 +diff --git a/dpdk/app/meson.build b/dpdk/app/meson.build +index 71109cc422..c7f689eb79 100644 +--- a/dpdk/app/meson.build ++++ b/dpdk/app/meson.build +@@ -22,6 +22,10 @@ apps = [ + lib_execinfo = cc.find_library('execinfo', required: false) + + default_cflags = machine_args ++default_ldflags = [] ++if get_option('default_library') == 'static' and not is_windows ++ default_ldflags += ['-Wl,--export-dynamic'] ++endif + + foreach app:apps + build = true +@@ -30,6 +34,7 @@ foreach app:apps + sources = [] + includes = [] + cflags = default_cflags ++ ldflags = default_ldflags + objs = [] # other object files to link against, used e.g. for + # instruction-set optimized versions of code + +@@ -60,8 +65,10 @@ foreach app:apps + executable('dpdk-' + name, + sources, + c_args: cflags, ++ link_args: ldflags, + link_whole: link_libs, + dependencies: dep_objs, ++ include_directories: includes, + install_rpath: join_paths(get_option('prefix'), + driver_install_path), + install: true) diff --git a/dpdk/app/pdump/main.c b/dpdk/app/pdump/main.c index 903d02f482..c38c53719e 100644 --- a/dpdk/app/pdump/main.c @@ -1855,6 +1962,49 @@ index 903d02f482..c38c53719e 100644 ret = rte_eth_tx_queue_setup(port_id, q, TX_DESC_PER_QUEUE, rte_eth_dev_socket_id(port_id), NULL); if (ret < 0) +diff --git a/dpdk/app/proc-info/main.c b/dpdk/app/proc-info/main.c +index abeca4aab4..f6d19cdac2 100644 +--- a/dpdk/app/proc-info/main.c ++++ b/dpdk/app/proc-info/main.c +@@ -310,14 +310,13 @@ proc_info_parse_args(int argc, char **argv) + } else if (!strncmp(long_option[option_index].name, + "xstats-ids", + MAX_LONG_OPT_SZ)) { +- nb_xstats_ids = parse_xstats_ids(optarg, ++ int ret = parse_xstats_ids(optarg, + xstats_ids, MAX_NB_XSTATS_IDS); +- +- if (nb_xstats_ids <= 0) { ++ if (ret <= 0) { + printf("xstats-id list parse error.\n"); + return -1; + } +- ++ nb_xstats_ids = ret; + } + break; + default: +@@ -429,11 +428,9 @@ static void collectd_resolve_cnt_type(char *cnt_type, size_t cnt_type_len, + } else if ((type_end != NULL) && + (strncmp(cnt_name, "flow_", strlen("flow_"))) == 0) { + if (strncmp(type_end, "_filters", strlen("_filters")) == 0) +- strlcpy(cnt_type, "operations", cnt_type_len); ++ strlcpy(cnt_type, "filter_result", cnt_type_len); + else if (strncmp(type_end, "_errors", strlen("_errors")) == 0) + strlcpy(cnt_type, "errors", cnt_type_len); +- else if (strncmp(type_end, "_filters", strlen("_filters")) == 0) +- strlcpy(cnt_type, "filter_result", cnt_type_len); + } else if ((type_end != NULL) && + (strncmp(cnt_name, "mac_", strlen("mac_"))) == 0) { + if (strncmp(type_end, "_errors", strlen("_errors")) == 0) +@@ -1110,7 +1107,6 @@ show_crypto(void) + + display_crypto_feature_info(dev_info.feature_flags); + +- memset(&stats, 0, sizeof(0)); + if (rte_cryptodev_stats_get(i, &stats) == 0) { + printf("\t -- stats\n"); + printf("\t\t + enqueue count (%"PRIu64")" diff --git a/dpdk/app/test-acl/main.c b/dpdk/app/test-acl/main.c index 57f23942eb..08f06c1fa3 100644 --- a/dpdk/app/test-acl/main.c @@ -1868,10 +2018,126 @@ index 57f23942eb..08f06c1fa3 100644 #define RTE_LOGTYPE_TESTACL RTE_LOGTYPE_USER1 +diff --git a/dpdk/app/test-bbdev/ldpc_enc_default.data b/dpdk/app/test-bbdev/ldpc_enc_default.data +index 371cbc692d..52d51ae330 120000 +--- a/dpdk/app/test-bbdev/ldpc_enc_default.data ++++ b/dpdk/app/test-bbdev/ldpc_enc_default.data +@@ -1 +1 @@ +-test_vectors/turbo_dec_c1_k6144_r0_e10376_crc24b_sbd_negllr_high_snr.data +\ No newline at end of file ++test_vectors/ldpc_enc_v2342.data +\ No newline at end of file +diff --git a/dpdk/app/test-crypto-perf/cperf_options_parsing.c b/dpdk/app/test-crypto-perf/cperf_options_parsing.c +index f43c5bede7..49b469781c 100644 +--- a/dpdk/app/test-crypto-perf/cperf_options_parsing.c ++++ b/dpdk/app/test-crypto-perf/cperf_options_parsing.c +@@ -23,7 +23,7 @@ usage(char *progname) + { + printf("%s [EAL options] --\n" + " --silent: disable options dump\n" +- " --ptest throughput / latency / verify / pmd-cycleount :" ++ " --ptest throughput / latency / verify / pmd-cyclecount :" + " set test type\n" + " --pool_sz N: set the number of crypto ops/mbufs allocated\n" + " --total-ops N: set the number of total operations performed\n" +diff --git a/dpdk/app/test-crypto-perf/cperf_test_latency.c b/dpdk/app/test-crypto-perf/cperf_test_latency.c +index 62478a2df5..951b4d10ac 100644 +--- a/dpdk/app/test-crypto-perf/cperf_test_latency.c ++++ b/dpdk/app/test-crypto-perf/cperf_test_latency.c +@@ -313,11 +313,11 @@ cperf_latency_test_runner(void *arg) + if (ctx->options->csv) { + if (rte_atomic16_test_and_set(&display_once)) + printf("\n# lcore, Buffer Size, Burst Size, Pakt Seq #, " +- "Packet Size, cycles, time (us)"); ++ "cycles, time (us)"); + + for (i = 0; i < ctx->options->total_ops; i++) { + +- printf("\n%u;%u;%u;%"PRIu64";%"PRIu64";%.3f", ++ printf("\n%u,%u,%u,%"PRIu64",%"PRIu64",%.3f", + ctx->lcore_id, ctx->options->test_buffer_size, + test_burst_size, i + 1, + ctx->res[i].tsc_end - ctx->res[i].tsc_start, +diff --git a/dpdk/app/test-crypto-perf/cperf_test_pmd_cyclecount.c b/dpdk/app/test-crypto-perf/cperf_test_pmd_cyclecount.c +index 74371faa8d..de01e3bc51 100644 +--- a/dpdk/app/test-crypto-perf/cperf_test_pmd_cyclecount.c ++++ b/dpdk/app/test-crypto-perf/cperf_test_pmd_cyclecount.c +@@ -16,7 +16,7 @@ + #define PRETTY_HDR_FMT "%12s%12s%12s%12s%12s%12s%12s%12s%12s%12s\n\n" + #define PRETTY_LINE_FMT "%12u%12u%12u%12u%12u%12u%12u%12.0f%12.0f%12.0f\n" + #define CSV_HDR_FMT "%s,%s,%s,%s,%s,%s,%s,%s,%s,%s\n" +-#define CSV_LINE_FMT "%10u;%10u;%u;%u;%u;%u;%u;%.3f;%.3f;%.3f\n" ++#define CSV_LINE_FMT "%10u,%10u,%u,%u,%u,%u,%u,%.3f,%.3f,%.3f\n" + + struct cperf_pmd_cyclecount_ctx { + uint8_t dev_id; +diff --git a/dpdk/app/test-crypto-perf/cperf_test_throughput.c b/dpdk/app/test-crypto-perf/cperf_test_throughput.c +index 35c51026fe..2528f39571 100644 +--- a/dpdk/app/test-crypto-perf/cperf_test_throughput.c ++++ b/dpdk/app/test-crypto-perf/cperf_test_throughput.c +@@ -298,8 +298,8 @@ cperf_throughput_test_runner(void *test_ctx) + "Failed Deq,Ops(Millions),Throughput(Gbps)," + "Cycles/Buf\n\n"); + +- printf("%u;%u;%u;%"PRIu64";%"PRIu64";%"PRIu64";%"PRIu64";" +- "%.3f;%.3f;%.3f\n", ++ printf("%u,%u,%u,%"PRIu64",%"PRIu64",%"PRIu64",%"PRIu64"," ++ "%.3f,%.3f,%.3f\n", + ctx->lcore_id, + ctx->options->test_buffer_size, + test_burst_size, +diff --git a/dpdk/app/test-crypto-perf/cperf_test_verify.c b/dpdk/app/test-crypto-perf/cperf_test_verify.c +index 833bc9a552..2939aeaa93 100644 +--- a/dpdk/app/test-crypto-perf/cperf_test_verify.c ++++ b/dpdk/app/test-crypto-perf/cperf_test_verify.c +@@ -406,7 +406,7 @@ cperf_verify_test_runner(void *test_ctx) + "Burst Size,Enqueued,Dequeued,Failed Enq," + "Failed Deq,Failed Ops\n"); + +- printf("%10u;%10u;%u;%"PRIu64";%"PRIu64";%"PRIu64";%"PRIu64";" ++ printf("%10u,%10u,%u,%"PRIu64",%"PRIu64",%"PRIu64",%"PRIu64"," + "%"PRIu64"\n", + ctx->lcore_id, + ctx->options->max_buffer_size, diff --git a/dpdk/app/test-crypto-perf/main.c b/dpdk/app/test-crypto-perf/main.c -index 52a1860fbf..7bb286ccbe 100644 +index 52a1860fbf..048b10c652 100644 --- a/dpdk/app/test-crypto-perf/main.c +++ b/dpdk/app/test-crypto-perf/main.c +@@ -380,7 +380,7 @@ cperf_check_test_vector(struct cperf_options *opts, + if (opts->cipher_algo == RTE_CRYPTO_CIPHER_NULL) { + if (test_vec->plaintext.data == NULL) + return -1; +- } else if (opts->cipher_algo != RTE_CRYPTO_CIPHER_NULL) { ++ } else { + if (test_vec->plaintext.data == NULL) + return -1; + if (test_vec->plaintext.length < opts->max_buffer_size) +@@ -430,7 +430,7 @@ cperf_check_test_vector(struct cperf_options *opts, + return -1; + if (test_vec->plaintext.length < opts->max_buffer_size) + return -1; +- } else if (opts->cipher_algo != RTE_CRYPTO_CIPHER_NULL) { ++ } else { + if (test_vec->plaintext.data == NULL) + return -1; + if (test_vec->plaintext.length < opts->max_buffer_size) +@@ -520,14 +520,14 @@ main(int argc, char **argv) + + ret = cperf_options_parse(&opts, argc, argv); + if (ret) { +- RTE_LOG(ERR, USER1, "Parsing on or more user options failed\n"); ++ RTE_LOG(ERR, USER1, "Parsing one or more user options failed\n"); + goto err; + } + + ret = cperf_options_check(&opts); + if (ret) { + RTE_LOG(ERR, USER1, +- "Checking on or more user options failed\n"); ++ "Checking one or more user options failed\n"); + goto err; + } + @@ -582,7 +582,8 @@ main(int argc, char **argv) goto err; } @@ -1882,6 +2148,32 @@ index 52a1860fbf..7bb286ccbe 100644 show_test_vector(t_vec); total_nb_qps = nb_cryptodevs * opts.nb_qps; +diff --git a/dpdk/app/test-crypto-perf/meson.build b/dpdk/app/test-crypto-perf/meson.build +index 0674396da8..dcc4bf9cbc 100644 +--- a/dpdk/app/test-crypto-perf/meson.build ++++ b/dpdk/app/test-crypto-perf/meson.build +@@ -13,3 +13,6 @@ sources = files('cperf_ops.c', + 'cperf_test_verify.c', + 'main.c') + deps += ['cryptodev', 'security'] ++if dpdk_conf.has('RTE_LIBRTE_CRYPTO_SCHEDULER_PMD') ++ deps += 'pmd_crypto_scheduler' ++endif +diff --git a/dpdk/app/test-eventdev/evt_options.c b/dpdk/app/test-eventdev/evt_options.c +index c60b61a904..4f4800d99d 100644 +--- a/dpdk/app/test-eventdev/evt_options.c ++++ b/dpdk/app/test-eventdev/evt_options.c +@@ -197,6 +197,10 @@ evt_parse_nb_timer_adptrs(struct evt_options *opt, const char *arg) + int ret; + + ret = parser_read_uint8(&(opt->nb_timer_adptrs), arg); ++ if (opt->nb_timer_adptrs <= 0) { ++ evt_err("Number of timer adapters cannot be <= 0"); ++ return -EINVAL; ++ } + + return ret; + } diff --git a/dpdk/app/test-eventdev/meson.build b/dpdk/app/test-eventdev/meson.build index 7ff2b786cf..9e588d9ec7 100644 --- a/dpdk/app/test-eventdev/meson.build @@ -1896,6 +2188,59 @@ index 7ff2b786cf..9e588d9ec7 100644 + 'test_pipeline_atq.c', + 'test_pipeline_queue.c') deps += 'eventdev' +diff --git a/dpdk/app/test-eventdev/test_perf_common.h b/dpdk/app/test-eventdev/test_perf_common.h +index d8fbee6d89..e095da9a47 100644 +--- a/dpdk/app/test-eventdev/test_perf_common.h ++++ b/dpdk/app/test-eventdev/test_perf_common.h +@@ -97,8 +97,13 @@ perf_process_last_stage(struct rte_mempool *const pool, + void *bufs[], int const buf_sz, uint8_t count) + { + bufs[count++] = ev->event_ptr; +- w->processed_pkts++; ++ ++ /* wmb here ensures event_prt is stored before ++ * updating the number of processed packets ++ * for worker lcores ++ */ + rte_smp_wmb(); ++ w->processed_pkts++; + + if (unlikely(count == buf_sz)) { + count = 0; +@@ -116,6 +121,12 @@ perf_process_last_stage_latency(struct rte_mempool *const pool, + struct perf_elt *const m = ev->event_ptr; + + bufs[count++] = ev->event_ptr; ++ ++ /* wmb here ensures event_prt is stored before ++ * updating the number of processed packets ++ * for worker lcores ++ */ ++ rte_smp_wmb(); + w->processed_pkts++; + + if (unlikely(count == buf_sz)) { +@@ -127,7 +138,6 @@ perf_process_last_stage_latency(struct rte_mempool *const pool, + } + + w->latency += latency; +- rte_smp_wmb(); + return count; + } + +diff --git a/dpdk/app/test-eventdev/test_pipeline_atq.c b/dpdk/app/test-eventdev/test_pipeline_atq.c +index 8e8686c145..0872b25b53 100644 +--- a/dpdk/app/test-eventdev/test_pipeline_atq.c ++++ b/dpdk/app/test-eventdev/test_pipeline_atq.c +@@ -495,6 +495,8 @@ pipeline_atq_capability_check(struct evt_options *opt) + evt_nr_active_lcores(opt->wlcores), + dev_info.max_event_ports); + } ++ if (!evt_has_all_types_queue(opt->dev_id)) ++ return false; + + return true; + } diff --git a/dpdk/app/test-eventdev/test_pipeline_common.c b/dpdk/app/test-eventdev/test_pipeline_common.c index fa91bf2290..126e2165a3 100644 --- a/dpdk/app/test-eventdev/test_pipeline_common.c @@ -1920,6 +2265,89 @@ index fa91bf2290..126e2165a3 100644 return ret; } } +diff --git a/dpdk/app/test-eventdev/test_pipeline_queue.c b/dpdk/app/test-eventdev/test_pipeline_queue.c +index 7bebac34fc..9a9febb199 100644 +--- a/dpdk/app/test-eventdev/test_pipeline_queue.c ++++ b/dpdk/app/test-eventdev/test_pipeline_queue.c +@@ -83,16 +83,15 @@ pipeline_queue_worker_single_stage_burst_tx(void *arg) + rte_prefetch0(ev[i + 1].mbuf); + if (ev[i].sched_type == RTE_SCHED_TYPE_ATOMIC) { + pipeline_event_tx(dev, port, &ev[i]); +- ev[i].op = RTE_EVENT_OP_RELEASE; + w->processed_pkts++; + } else { + ev[i].queue_id++; + pipeline_fwd_event(&ev[i], + RTE_SCHED_TYPE_ATOMIC); ++ pipeline_event_enqueue_burst(dev, port, ev, ++ nb_rx); + } + } +- +- pipeline_event_enqueue_burst(dev, port, ev, nb_rx); + } + + return 0; +@@ -180,13 +179,13 @@ pipeline_queue_worker_multi_stage_fwd(void *arg) + ev.queue_id = tx_queue[ev.mbuf->port]; + rte_event_eth_tx_adapter_txq_set(ev.mbuf, 0); + pipeline_fwd_event(&ev, RTE_SCHED_TYPE_ATOMIC); ++ pipeline_event_enqueue(dev, port, &ev); + w->processed_pkts++; + } else { + ev.queue_id++; + pipeline_fwd_event(&ev, sched_type_list[cq_id]); ++ pipeline_event_enqueue(dev, port, &ev); + } +- +- pipeline_event_enqueue(dev, port, &ev); + } + + return 0; +@@ -213,7 +212,6 @@ pipeline_queue_worker_multi_stage_burst_tx(void *arg) + + if (ev[i].queue_id == tx_queue[ev[i].mbuf->port]) { + pipeline_event_tx(dev, port, &ev[i]); +- ev[i].op = RTE_EVENT_OP_RELEASE; + w->processed_pkts++; + continue; + } +@@ -222,9 +220,8 @@ pipeline_queue_worker_multi_stage_burst_tx(void *arg) + pipeline_fwd_event(&ev[i], cq_id != last_queue ? + sched_type_list[cq_id] : + RTE_SCHED_TYPE_ATOMIC); ++ pipeline_event_enqueue_burst(dev, port, ev, nb_rx); + } +- +- pipeline_event_enqueue_burst(dev, port, ev, nb_rx); + } + + return 0; +@@ -237,6 +234,7 @@ pipeline_queue_worker_multi_stage_burst_fwd(void *arg) + const uint8_t *tx_queue = t->tx_evqueue_id; + + while (t->done == false) { ++ uint16_t processed_pkts = 0; + uint16_t nb_rx = rte_event_dequeue_burst(dev, port, ev, + BURST_SIZE, 0); + +@@ -254,7 +252,7 @@ pipeline_queue_worker_multi_stage_burst_fwd(void *arg) + rte_event_eth_tx_adapter_txq_set(ev[i].mbuf, 0); + pipeline_fwd_event(&ev[i], + RTE_SCHED_TYPE_ATOMIC); +- w->processed_pkts++; ++ processed_pkts++; + } else { + ev[i].queue_id++; + pipeline_fwd_event(&ev[i], +@@ -263,6 +261,7 @@ pipeline_queue_worker_multi_stage_burst_fwd(void *arg) + } + + pipeline_event_enqueue_burst(dev, port, ev, nb_rx); ++ w->processed_pkts += processed_pkts; + } + + return 0; diff --git a/dpdk/app/test-pipeline/config.c b/dpdk/app/test-pipeline/config.c index 28ac9fcc0e..33f3f1c827 100644 --- a/dpdk/app/test-pipeline/config.c @@ -1933,8 +2361,30 @@ index 28ac9fcc0e..33f3f1c827 100644 static const char usage[] = "\n"; void +diff --git a/dpdk/app/test-pmd/bpf_cmd.c b/dpdk/app/test-pmd/bpf_cmd.c +index 830bfc13a5..d2deadd4e6 100644 +--- a/dpdk/app/test-pmd/bpf_cmd.c ++++ b/dpdk/app/test-pmd/bpf_cmd.c +@@ -55,7 +55,7 @@ static const struct rte_bpf_xsym bpf_xsym[] = { + struct cmd_bpf_ld_result { + cmdline_fixed_string_t bpf; + cmdline_fixed_string_t dir; +- uint8_t port; ++ uint16_t port; + uint16_t queue; + cmdline_fixed_string_t op; + cmdline_fixed_string_t flags; +@@ -153,7 +153,7 @@ cmdline_parse_inst_t cmd_operate_bpf_ld_parse = { + struct cmd_bpf_unld_result { + cmdline_fixed_string_t bpf; + cmdline_fixed_string_t dir; +- uint8_t port; ++ uint16_t port; + uint16_t queue; + }; + diff --git a/dpdk/app/test-pmd/cmdline.c b/dpdk/app/test-pmd/cmdline.c -index 9f3e0b251b..d508d1e26d 100644 +index 9f3e0b251b..9a9da744a1 100644 --- a/dpdk/app/test-pmd/cmdline.c +++ b/dpdk/app/test-pmd/cmdline.c @@ -94,7 +94,7 @@ static void cmd_help_brief_parsed(__attribute__((unused)) void *parsed_result, @@ -1946,6 +2396,15 @@ index 9f3e0b251b..d508d1e26d 100644 " help devices : Device related cmds.\n" " help all : All of the above sections.\n\n" ); +@@ -614,7 +614,7 @@ static void cmd_help_long_parsed(void *parsed_result, + "set bonding mode IEEE802.3AD aggregator policy (port_id) (agg_name)" + " Set Aggregation mode for IEEE802.3AD (mode 4)" + +- "set bonding xmit_balance_policy (port_id) (l2|l23|l34)\n" ++ "set bonding balance_xmit_policy (port_id) (l2|l23|l34)\n" + " Set the transmit balance policy for bonded device running in balance mode.\n\n" + + "set bonding mon_period (port_id) (value)\n" @@ -1437,7 +1437,7 @@ cmdline_parse_inst_t cmd_set_port_setup_on = { struct cmd_operate_attach_port_result { cmdline_fixed_string_t port; @@ -1988,7 +2447,101 @@ index 9f3e0b251b..d508d1e26d 100644 else printf("Unknown parameter\n"); } -@@ -5120,7 +5122,7 @@ cmd_gso_size_parsed(void *parsed_result, +@@ -1911,18 +1913,13 @@ cmd_config_rx_tx_parsed(void *parsed_result, + nb_txq = res->value; + } + else if (!strcmp(res->name, "rxd")) { +- if (res->value <= 0 || res->value > RTE_TEST_RX_DESC_MAX) { +- printf("rxd %d invalid - must be > 0 && <= %d\n", +- res->value, RTE_TEST_RX_DESC_MAX); ++ if (check_nb_rxd(res->value) != 0) + return; +- } + nb_rxd = res->value; + } else if (!strcmp(res->name, "txd")) { +- if (res->value <= 0 || res->value > RTE_TEST_TX_DESC_MAX) { +- printf("txd %d invalid - must be > 0 && <= %d\n", +- res->value, RTE_TEST_TX_DESC_MAX); ++ if (check_nb_txd(res->value) != 0) + return; +- } ++ + nb_txd = res->value; + } else { + printf("Unknown parameter\n"); +@@ -1977,7 +1974,9 @@ cmd_config_max_pkt_len_parsed(void *parsed_result, + __attribute__((unused)) void *data) + { + struct cmd_config_max_pkt_len_result *res = parsed_result; ++ uint32_t max_rx_pkt_len_backup = 0; + portid_t pid; ++ int ret; + + if (!all_ports_stopped()) { + printf("Please stop all ports first\n"); +@@ -1986,7 +1985,6 @@ cmd_config_max_pkt_len_parsed(void *parsed_result, + + RTE_ETH_FOREACH_DEV(pid) { + struct rte_port *port = &ports[pid]; +- uint64_t rx_offloads = port->dev_conf.rxmode.offloads; + + if (!strcmp(res->name, "max-pkt-len")) { + if (res->value < RTE_ETHER_MIN_LEN) { +@@ -1997,12 +1995,18 @@ cmd_config_max_pkt_len_parsed(void *parsed_result, + if (res->value == port->dev_conf.rxmode.max_rx_pkt_len) + return; + ++ ret = eth_dev_info_get_print_err(pid, &port->dev_info); ++ if (ret != 0) { ++ printf("rte_eth_dev_info_get() failed for port %u\n", ++ pid); ++ return; ++ } ++ ++ max_rx_pkt_len_backup = port->dev_conf.rxmode.max_rx_pkt_len; ++ + port->dev_conf.rxmode.max_rx_pkt_len = res->value; +- if (res->value > RTE_ETHER_MAX_LEN) +- rx_offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME; +- else +- rx_offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME; +- port->dev_conf.rxmode.offloads = rx_offloads; ++ if (update_jumbo_frame_offload(pid) != 0) ++ port->dev_conf.rxmode.max_rx_pkt_len = max_rx_pkt_len_backup; + } else { + printf("Unknown parameter\n"); + return; +@@ -4171,6 +4175,9 @@ cmd_tx_vlan_set_parsed(void *parsed_result, + { + struct cmd_tx_vlan_set_result *res = parsed_result; + ++ if (port_id_is_invalid(res->port_id, ENABLED_WARN)) ++ return; ++ + if (!port_is_stopped(res->port_id)) { + printf("Please stop port %d first\n", res->port_id); + return; +@@ -4225,6 +4232,9 @@ cmd_tx_vlan_set_qinq_parsed(void *parsed_result, + { + struct cmd_tx_vlan_set_qinq_result *res = parsed_result; + ++ if (port_id_is_invalid(res->port_id, ENABLED_WARN)) ++ return; ++ + if (!port_is_stopped(res->port_id)) { + printf("Please stop port %d first\n", res->port_id); + return; +@@ -4338,6 +4348,9 @@ cmd_tx_vlan_reset_parsed(void *parsed_result, + { + struct cmd_tx_vlan_reset_result *res = parsed_result; + ++ if (port_id_is_invalid(res->port_id, ENABLED_WARN)) ++ return; ++ + if (!port_is_stopped(res->port_id)) { + printf("Please stop port %d first\n", res->port_id); + return; +@@ -5120,7 +5133,7 @@ cmd_gso_size_parsed(void *parsed_result, if (test_done == 0) { printf("Before setting GSO segsz, please first" @@ -1997,7 +2550,7 @@ index 9f3e0b251b..d508d1e26d 100644 return; } -@@ -7078,9 +7080,10 @@ cmd_priority_flow_ctrl_set_parsed(void *parsed_result, +@@ -7078,9 +7091,10 @@ cmd_priority_flow_ctrl_set_parsed(void *parsed_result, * the RTE_FC_RX_PAUSE, Respond to the pause frame at the Tx side. */ static enum rte_eth_fc_mode rx_tx_onoff_2_pfc_mode[2][2] = { @@ -2009,7 +2562,7 @@ index 9f3e0b251b..d508d1e26d 100644 rx_fc_enable = (!strncmp(res->rx_pfc_mode, "on",2)) ? 1 : 0; tx_fc_enable = (!strncmp(res->tx_pfc_mode, "on",2)) ? 1 : 0; pfc_conf.fc.mode = rx_tx_onoff_2_pfc_mode[rx_fc_enable][tx_fc_enable]; -@@ -16802,8 +16805,10 @@ cmd_ddp_get_list_parsed( +@@ -16802,8 +16816,10 @@ cmd_ddp_get_list_parsed( #ifdef RTE_LIBRTE_I40E_PMD size = PROFILE_INFO_SIZE * MAX_PROFILE_NUM + 4; p_list = (struct rte_pmd_i40e_profile_list *)malloc(size); @@ -2021,8 +2574,42 @@ index 9f3e0b251b..d508d1e26d 100644 if (ret == -ENOTSUP) ret = rte_pmd_i40e_get_ddp_list(res->port_id, +@@ -19494,6 +19510,7 @@ cmdline_read_from_file(const char *filename) + void + prompt(void) + { ++ int ret; + /* initialize non-constant commands */ + cmd_set_fwd_mode_init(); + cmd_set_fwd_retry_mode_init(); +@@ -19501,15 +19518,23 @@ prompt(void) + testpmd_cl = cmdline_stdin_new(main_ctx, "testpmd> "); + if (testpmd_cl == NULL) + return; ++ ++ ret = atexit(prompt_exit); ++ if (ret != 0) ++ printf("Cannot set exit function for cmdline\n"); ++ + cmdline_interact(testpmd_cl); +- cmdline_stdin_exit(testpmd_cl); ++ if (ret != 0) ++ cmdline_stdin_exit(testpmd_cl); + } + + void + prompt_exit(void) + { +- if (testpmd_cl != NULL) ++ if (testpmd_cl != NULL) { + cmdline_quit(testpmd_cl); ++ cmdline_stdin_exit(testpmd_cl); ++ } + } + + static void diff --git a/dpdk/app/test-pmd/cmdline_flow.c b/dpdk/app/test-pmd/cmdline_flow.c -index 99dade7d8c..deced65016 100644 +index 99dade7d8c..da3533c557 100644 --- a/dpdk/app/test-pmd/cmdline_flow.c +++ b/dpdk/app/test-pmd/cmdline_flow.c @@ -1005,7 +1005,6 @@ static const enum index item_pppoes[] = { @@ -2050,7 +2637,52 @@ index 99dade7d8c..deced65016 100644 .call = parse_vc, }, [ITEM_HIGIG2] = { -@@ -4534,7 +4536,9 @@ parse_vc_action_mplsogre_decap(struct context *ctx, const struct token *token, +@@ -2766,7 +2768,10 @@ static const struct token token_list[] = { + .name = "key", + .help = "RSS hash key", + .next = NEXT(action_rss, NEXT_ENTRY(HEX)), +- .args = ARGS(ARGS_ENTRY_ARB(0, 0), ++ .args = ARGS(ARGS_ENTRY_ARB ++ (offsetof(struct action_rss_data, conf) + ++ offsetof(struct rte_flow_action_rss, key), ++ sizeof(((struct rte_flow_action_rss *)0)->key)), + ARGS_ENTRY_ARB + (offsetof(struct action_rss_data, conf) + + offsetof(struct rte_flow_action_rss, key_len), +@@ -3898,30 +3903,15 @@ parse_vc_action_rss(struct context *ctx, const struct token *token, + .func = RTE_ETH_HASH_FUNCTION_DEFAULT, + .level = 0, + .types = rss_hf, +- .key_len = sizeof(action_rss_data->key), ++ .key_len = 0, + .queue_num = RTE_MIN(nb_rxq, ACTION_RSS_QUEUE_NUM), +- .key = action_rss_data->key, ++ .key = NULL, + .queue = action_rss_data->queue, + }, +- .key = "testpmd's default RSS hash key, " +- "override it for better balancing", + .queue = { 0 }, + }; + for (i = 0; i < action_rss_data->conf.queue_num; ++i) + action_rss_data->queue[i] = i; +- if (!port_id_is_invalid(ctx->port, DISABLED_WARN) && +- ctx->port != (portid_t)RTE_PORT_ALL) { +- struct rte_eth_dev_info info; +- int ret2; +- +- ret2 = rte_eth_dev_info_get(ctx->port, &info); +- if (ret2 != 0) +- return ret2; +- +- action_rss_data->conf.key_len = +- RTE_MIN(sizeof(action_rss_data->key), +- info.hash_key_size); +- } + action->conf = &action_rss_data->conf; + return ret; + } +@@ -4534,7 +4524,9 @@ parse_vc_action_mplsogre_decap(struct context *ctx, const struct token *token, struct rte_flow_item_gre gre = { .protocol = rte_cpu_to_be_16(ETHER_TYPE_MPLS_UNICAST), }; @@ -2061,7 +2693,7 @@ index 99dade7d8c..deced65016 100644 uint8_t *header; int ret; -@@ -6236,6 +6240,9 @@ flow_item_default_mask(const struct rte_flow_item *item) +@@ -6236,6 +6228,9 @@ flow_item_default_mask(const struct rte_flow_item *item) case RTE_FLOW_ITEM_TYPE_GTP_PSC: mask = &rte_flow_item_gtp_psc_mask; break; @@ -2071,11 +2703,234 @@ index 99dade7d8c..deced65016 100644 case RTE_FLOW_ITEM_TYPE_PPPOE_PROTO_ID: mask = &rte_flow_item_pppoe_proto_id_mask; default: +diff --git a/dpdk/app/test-pmd/cmdline_mtr.c b/dpdk/app/test-pmd/cmdline_mtr.c +index ab5c8642db..c6e7529b3d 100644 +--- a/dpdk/app/test-pmd/cmdline_mtr.c ++++ b/dpdk/app/test-pmd/cmdline_mtr.c +@@ -312,7 +312,7 @@ static void cmd_show_port_meter_cap_parsed(void *parsed_result, + cmdline_parse_inst_t cmd_show_port_meter_cap = { + .f = cmd_show_port_meter_cap_parsed, + .data = NULL, +- .help_str = "Show port meter cap", ++ .help_str = "show port meter cap ", + .tokens = { + (void *)&cmd_show_port_meter_cap_show, + (void *)&cmd_show_port_meter_cap_port, +@@ -408,7 +408,7 @@ static void cmd_add_port_meter_profile_srtcm_parsed(void *parsed_result, + cmdline_parse_inst_t cmd_add_port_meter_profile_srtcm = { + .f = cmd_add_port_meter_profile_srtcm_parsed, + .data = NULL, +- .help_str = "Add port meter profile srtcm (rfc2697)", ++ .help_str = "add port meter profile srtcm_rfc2697 ", + .tokens = { + (void *)&cmd_add_port_meter_profile_srtcm_add, + (void *)&cmd_add_port_meter_profile_srtcm_port, +@@ -515,7 +515,7 @@ static void cmd_add_port_meter_profile_trtcm_parsed(void *parsed_result, + cmdline_parse_inst_t cmd_add_port_meter_profile_trtcm = { + .f = cmd_add_port_meter_profile_trtcm_parsed, + .data = NULL, +- .help_str = "Add port meter profile trtcm (rfc2698)", ++ .help_str = "add port meter profile trtcm_rfc2698 ", + .tokens = { + (void *)&cmd_add_port_meter_profile_trtcm_add, + (void *)&cmd_add_port_meter_profile_trtcm_port, +@@ -627,7 +627,7 @@ static void cmd_add_port_meter_profile_trtcm_rfc4115_parsed( + cmdline_parse_inst_t cmd_add_port_meter_profile_trtcm_rfc4115 = { + .f = cmd_add_port_meter_profile_trtcm_rfc4115_parsed, + .data = NULL, +- .help_str = "Add port meter profile trtcm (rfc4115)", ++ .help_str = "add port meter profile trtcm_rfc4115 ", + .tokens = { + (void *)&cmd_add_port_meter_profile_trtcm_rfc4115_add, + (void *)&cmd_add_port_meter_profile_trtcm_rfc4115_port, +@@ -702,7 +702,7 @@ static void cmd_del_port_meter_profile_parsed(void *parsed_result, + cmdline_parse_inst_t cmd_del_port_meter_profile = { + .f = cmd_del_port_meter_profile_parsed, + .data = NULL, +- .help_str = "Delete port meter profile", ++ .help_str = "del port meter profile ", + .tokens = { + (void *)&cmd_del_port_meter_profile_del, + (void *)&cmd_del_port_meter_profile_port, +@@ -827,7 +827,10 @@ static void cmd_create_port_meter_parsed(void *parsed_result, + cmdline_parse_inst_t cmd_create_port_meter = { + .f = cmd_create_port_meter_parsed, + .data = NULL, +- .help_str = "Create port meter", ++ .help_str = "create port meter (yes|no) " ++ "(R|Y|G|D) (R|Y|G|D) (R|Y|G|D) " ++ " " ++ "[ ...]", + .tokens = { + (void *)&cmd_create_port_meter_create, + (void *)&cmd_create_port_meter_port, +@@ -896,7 +899,7 @@ static void cmd_enable_port_meter_parsed(void *parsed_result, + cmdline_parse_inst_t cmd_enable_port_meter = { + .f = cmd_enable_port_meter_parsed, + .data = NULL, +- .help_str = "Enable port meter", ++ .help_str = "enable port meter ", + .tokens = { + (void *)&cmd_enable_port_meter_enable, + (void *)&cmd_enable_port_meter_port, +@@ -957,7 +960,7 @@ static void cmd_disable_port_meter_parsed(void *parsed_result, + cmdline_parse_inst_t cmd_disable_port_meter = { + .f = cmd_disable_port_meter_parsed, + .data = NULL, +- .help_str = "Disable port meter", ++ .help_str = "disable port meter ", + .tokens = { + (void *)&cmd_disable_port_meter_disable, + (void *)&cmd_disable_port_meter_port, +@@ -1018,7 +1021,7 @@ static void cmd_del_port_meter_parsed(void *parsed_result, + cmdline_parse_inst_t cmd_del_port_meter = { + .f = cmd_del_port_meter_parsed, + .data = NULL, +- .help_str = "Delete port meter", ++ .help_str = "del port meter ", + .tokens = { + (void *)&cmd_del_port_meter_del, + (void *)&cmd_del_port_meter_port, +@@ -1089,7 +1092,7 @@ static void cmd_set_port_meter_profile_parsed(void *parsed_result, + cmdline_parse_inst_t cmd_set_port_meter_profile = { + .f = cmd_set_port_meter_profile_parsed, + .data = NULL, +- .help_str = "Set port meter profile", ++ .help_str = "set port meter profile ", + .tokens = { + (void *)&cmd_set_port_meter_profile_set, + (void *)&cmd_set_port_meter_profile_port, +@@ -1163,7 +1166,8 @@ static void cmd_set_port_meter_dscp_table_parsed(void *parsed_result, + cmdline_parse_inst_t cmd_set_port_meter_dscp_table = { + .f = cmd_set_port_meter_dscp_table_parsed, + .data = NULL, +- .help_str = "Update port meter dscp table", ++ .help_str = "set port meter dscp table " ++ "[ ... ]", + .tokens = { + (void *)&cmd_set_port_meter_dscp_table_set, + (void *)&cmd_set_port_meter_dscp_table_port, +@@ -1262,6 +1266,7 @@ static void cmd_set_port_meter_policer_action_parsed(void *parsed_result, + ret = rte_mtr_policer_actions_update(port_id, mtr_id, + action_mask, actions, &error); + if (ret != 0) { ++ free(actions); + print_err_msg(&error); + return; + } +@@ -1272,7 +1277,8 @@ static void cmd_set_port_meter_policer_action_parsed(void *parsed_result, + cmdline_parse_inst_t cmd_set_port_meter_policer_action = { + .f = cmd_set_port_meter_policer_action_parsed, + .data = NULL, +- .help_str = "Set port meter policer action", ++ .help_str = "set port meter policer action " ++ " [ ]", + .tokens = { + (void *)&cmd_set_port_meter_policer_action_set, + (void *)&cmd_set_port_meter_policer_action_port, +@@ -1349,7 +1355,7 @@ static void cmd_set_port_meter_stats_mask_parsed(void *parsed_result, + cmdline_parse_inst_t cmd_set_port_meter_stats_mask = { + .f = cmd_set_port_meter_stats_mask_parsed, + .data = NULL, +- .help_str = "Set port meter stats mask", ++ .help_str = "set port meter stats mask ", + .tokens = { + (void *)&cmd_set_port_meter_stats_mask_set, + (void *)&cmd_set_port_meter_stats_mask_port, +@@ -1453,7 +1459,7 @@ static void cmd_show_port_meter_stats_parsed(void *parsed_result, + cmdline_parse_inst_t cmd_show_port_meter_stats = { + .f = cmd_show_port_meter_stats_parsed, + .data = NULL, +- .help_str = "Show port meter stats", ++ .help_str = "show port meter stats (yes|no)", + .tokens = { + (void *)&cmd_show_port_meter_stats_show, + (void *)&cmd_show_port_meter_stats_port, diff --git a/dpdk/app/test-pmd/config.c b/dpdk/app/test-pmd/config.c -index d599682788..42eba68b35 100644 +index d599682788..e14ff42745 100644 --- a/dpdk/app/test-pmd/config.c +++ b/dpdk/app/test-pmd/config.c -@@ -223,11 +223,26 @@ nic_stats_display(portid_t port_id) +@@ -53,6 +53,14 @@ + + #include "testpmd.h" + ++#ifdef CLOCK_MONOTONIC_RAW /* Defined in glibc bits/time.h */ ++#define CLOCK_TYPE_ID CLOCK_MONOTONIC_RAW ++#else ++#define CLOCK_TYPE_ID CLOCK_MONOTONIC ++#endif ++ ++#define NS_PER_SEC 1E9 ++ + static char *flowtype_to_str(uint16_t flow_type); + + static const struct { +@@ -125,9 +133,10 @@ nic_stats_display(portid_t port_id) + static uint64_t prev_pkts_tx[RTE_MAX_ETHPORTS]; + static uint64_t prev_bytes_rx[RTE_MAX_ETHPORTS]; + static uint64_t prev_bytes_tx[RTE_MAX_ETHPORTS]; +- static uint64_t prev_cycles[RTE_MAX_ETHPORTS]; ++ static uint64_t prev_ns[RTE_MAX_ETHPORTS]; ++ struct timespec cur_time; + uint64_t diff_pkts_rx, diff_pkts_tx, diff_bytes_rx, diff_bytes_tx, +- diff_cycles; ++ diff_ns; + uint64_t mpps_rx, mpps_tx, mbps_rx, mbps_tx; + struct rte_eth_stats stats; + struct rte_port *port = &ports[port_id]; +@@ -184,10 +193,17 @@ nic_stats_display(portid_t port_id) + } + } + +- diff_cycles = prev_cycles[port_id]; +- prev_cycles[port_id] = rte_rdtsc(); +- if (diff_cycles > 0) +- diff_cycles = prev_cycles[port_id] - diff_cycles; ++ diff_ns = 0; ++ if (clock_gettime(CLOCK_TYPE_ID, &cur_time) == 0) { ++ uint64_t ns; ++ ++ ns = cur_time.tv_sec * NS_PER_SEC; ++ ns += cur_time.tv_nsec; ++ ++ if (prev_ns[port_id] != 0) ++ diff_ns = ns - prev_ns[port_id]; ++ prev_ns[port_id] = ns; ++ } + + diff_pkts_rx = (stats.ipackets > prev_pkts_rx[port_id]) ? + (stats.ipackets - prev_pkts_rx[port_id]) : 0; +@@ -195,10 +211,10 @@ nic_stats_display(portid_t port_id) + (stats.opackets - prev_pkts_tx[port_id]) : 0; + prev_pkts_rx[port_id] = stats.ipackets; + prev_pkts_tx[port_id] = stats.opackets; +- mpps_rx = diff_cycles > 0 ? +- diff_pkts_rx * rte_get_tsc_hz() / diff_cycles : 0; +- mpps_tx = diff_cycles > 0 ? +- diff_pkts_tx * rte_get_tsc_hz() / diff_cycles : 0; ++ mpps_rx = diff_ns > 0 ? ++ (double)diff_pkts_rx / diff_ns * NS_PER_SEC : 0; ++ mpps_tx = diff_ns > 0 ? ++ (double)diff_pkts_tx / diff_ns * NS_PER_SEC : 0; + + diff_bytes_rx = (stats.ibytes > prev_bytes_rx[port_id]) ? + (stats.ibytes - prev_bytes_rx[port_id]) : 0; +@@ -206,10 +222,10 @@ nic_stats_display(portid_t port_id) + (stats.obytes - prev_bytes_tx[port_id]) : 0; + prev_bytes_rx[port_id] = stats.ibytes; + prev_bytes_tx[port_id] = stats.obytes; +- mbps_rx = diff_cycles > 0 ? +- diff_bytes_rx * rte_get_tsc_hz() / diff_cycles : 0; +- mbps_tx = diff_cycles > 0 ? +- diff_bytes_tx * rte_get_tsc_hz() / diff_cycles : 0; ++ mbps_rx = diff_ns > 0 ? ++ (double)diff_bytes_rx / diff_ns * NS_PER_SEC : 0; ++ mbps_tx = diff_ns > 0 ? ++ (double)diff_bytes_tx / diff_ns * NS_PER_SEC : 0; + + printf("\n Throughput (since last show)\n"); + printf(" Rx-pps: %12"PRIu64" Rx-bps: %12"PRIu64"\n Tx-pps: %12" +@@ -223,11 +239,28 @@ nic_stats_display(portid_t port_id) void nic_stats_clear(portid_t port_id) { @@ -2090,12 +2945,14 @@ index d599682788..42eba68b35 100644 + ret = rte_eth_stats_reset(port_id); + if (ret != 0) { + printf("%s: Error: failed to reset stats (port %u): %s", -+ __func__, port_id, strerror(ret)); ++ __func__, port_id, strerror(-ret)); + return; + } + + ret = rte_eth_stats_get(port_id, &ports[port_id].stats); + if (ret != 0) { ++ if (ret < 0) ++ ret = -ret; + printf("%s: Error: failed to get stats (port %u): %s", + __func__, port_id, strerror(ret)); + return; @@ -2103,7 +2960,7 @@ index d599682788..42eba68b35 100644 printf("\n NIC statistics for port %d cleared\n", port_id); } -@@ -303,10 +318,19 @@ nic_xstats_clear(portid_t port_id) +@@ -303,10 +336,21 @@ nic_xstats_clear(portid_t port_id) print_valid_ports(); return; } @@ -2111,19 +2968,21 @@ index d599682788..42eba68b35 100644 ret = rte_eth_xstats_reset(port_id); if (ret != 0) { printf("%s: Error: failed to reset xstats (port %u): %s", - __func__, port_id, strerror(ret)); ++ __func__, port_id, strerror(-ret)); + return; + } + + ret = rte_eth_stats_get(port_id, &ports[port_id].stats); + if (ret != 0) { ++ if (ret < 0) ++ ret = -ret; + printf("%s: Error: failed to get stats (port %u): %s", -+ __func__, port_id, strerror(ret)); + __func__, port_id, strerror(ret)); + return; } } -@@ -1216,7 +1240,9 @@ void +@@ -1216,7 +1260,9 @@ void port_mtu_set(portid_t port_id, uint16_t mtu) { int diag; @@ -2133,20 +2992,23 @@ index d599682788..42eba68b35 100644 int ret; if (port_id_is_invalid(port_id, ENABLED_WARN)) -@@ -1232,8 +1258,25 @@ port_mtu_set(portid_t port_id, uint16_t mtu) +@@ -1232,9 +1278,24 @@ port_mtu_set(portid_t port_id, uint16_t mtu) return; } diag = rte_eth_dev_set_mtu(port_id, mtu); - if (diag == 0) -+ if (diag == 0 && -+ dev_info.rx_offload_capa & DEV_RX_OFFLOAD_JUMBO_FRAME) { +- return; +- printf("Set MTU failed. diag=%d\n", diag); ++ if (diag) ++ printf("Set MTU failed. diag=%d\n", diag); ++ else if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_JUMBO_FRAME) { + /* + * Ether overhead in driver is equal to the difference of + * max_rx_pktlen and max_mtu in rte_eth_dev_info when the + * device supports jumbo frame. + */ + eth_overhead = dev_info.max_rx_pktlen - dev_info.max_mtu; -+ if (mtu > RTE_ETHER_MAX_LEN - eth_overhead) { ++ if (mtu > RTE_ETHER_MTU) { + rte_port->dev_conf.rxmode.offloads |= + DEV_RX_OFFLOAD_JUMBO_FRAME; + rte_port->dev_conf.rxmode.max_rx_pkt_len = @@ -2154,13 +3016,393 @@ index d599682788..42eba68b35 100644 + } else + rte_port->dev_conf.rxmode.offloads &= + ~DEV_RX_OFFLOAD_JUMBO_FRAME; ++ } + } + + /* Generic flow management functions. */ +@@ -1507,7 +1568,7 @@ port_flow_query(portid_t port_id, uint32_t rule, + + /** List flow rules. */ + void +-port_flow_list(portid_t port_id, uint32_t n, const uint32_t group[n]) ++port_flow_list(portid_t port_id, uint32_t n, const uint32_t *group) + { + struct rte_port *port; + struct port_flow *pf; +@@ -1624,22 +1685,102 @@ tx_queue_id_is_invalid(queueid_t txq_id) + } + + static int +-rx_desc_id_is_invalid(uint16_t rxdesc_id) ++get_rx_ring_size(portid_t port_id, queueid_t rxq_id, uint16_t *ring_size) + { +- if (rxdesc_id < nb_rxd) ++ struct rte_port *port = &ports[port_id]; ++ struct rte_eth_rxq_info rx_qinfo; ++ int ret; ++ ++ ret = rte_eth_rx_queue_info_get(port_id, rxq_id, &rx_qinfo); ++ if (ret == 0) { ++ *ring_size = rx_qinfo.nb_desc; ++ return ret; ++ } ++ ++ if (ret != -ENOTSUP) ++ return ret; ++ /* ++ * If the rte_eth_rx_queue_info_get is not support for this PMD, ++ * ring_size stored in testpmd will be used for validity verification. ++ * When configure the rxq by rte_eth_rx_queue_setup with nb_rx_desc ++ * being 0, it will use a default value provided by PMDs to setup this ++ * rxq. If the default value is 0, it will use the ++ * RTE_ETH_DEV_FALLBACK_RX_RINGSIZE to setup this rxq. ++ */ ++ if (port->nb_rx_desc[rxq_id]) ++ *ring_size = port->nb_rx_desc[rxq_id]; ++ else if (port->dev_info.default_rxportconf.ring_size) ++ *ring_size = port->dev_info.default_rxportconf.ring_size; ++ else ++ *ring_size = RTE_ETH_DEV_FALLBACK_RX_RINGSIZE; ++ return 0; ++} ++ ++static int ++get_tx_ring_size(portid_t port_id, queueid_t txq_id, uint16_t *ring_size) ++{ ++ struct rte_port *port = &ports[port_id]; ++ struct rte_eth_txq_info tx_qinfo; ++ int ret; ++ ++ ret = rte_eth_tx_queue_info_get(port_id, txq_id, &tx_qinfo); ++ if (ret == 0) { ++ *ring_size = tx_qinfo.nb_desc; ++ return ret; ++ } ++ ++ if (ret != -ENOTSUP) ++ return ret; ++ /* ++ * If the rte_eth_tx_queue_info_get is not support for this PMD, ++ * ring_size stored in testpmd will be used for validity verification. ++ * When configure the txq by rte_eth_tx_queue_setup with nb_tx_desc ++ * being 0, it will use a default value provided by PMDs to setup this ++ * txq. If the default value is 0, it will use the ++ * RTE_ETH_DEV_FALLBACK_TX_RINGSIZE to setup this txq. ++ */ ++ if (port->nb_tx_desc[txq_id]) ++ *ring_size = port->nb_tx_desc[txq_id]; ++ else if (port->dev_info.default_txportconf.ring_size) ++ *ring_size = port->dev_info.default_txportconf.ring_size; ++ else ++ *ring_size = RTE_ETH_DEV_FALLBACK_TX_RINGSIZE; ++ return 0; ++} ++ ++static int ++rx_desc_id_is_invalid(portid_t port_id, queueid_t rxq_id, uint16_t rxdesc_id) ++{ ++ uint16_t ring_size; ++ int ret; ++ ++ ret = get_rx_ring_size(port_id, rxq_id, &ring_size); ++ if (ret) ++ return 1; ++ ++ if (rxdesc_id < ring_size) + return 0; +- printf("Invalid RX descriptor %d (must be < nb_rxd=%d)\n", +- rxdesc_id, nb_rxd); ++ ++ printf("Invalid RX descriptor %u (must be < ring_size=%u)\n", ++ rxdesc_id, ring_size); + return 1; + } + + static int +-tx_desc_id_is_invalid(uint16_t txdesc_id) ++tx_desc_id_is_invalid(portid_t port_id, queueid_t txq_id, uint16_t txdesc_id) + { +- if (txdesc_id < nb_txd) ++ uint16_t ring_size; ++ int ret; ++ ++ ret = get_tx_ring_size(port_id, txq_id, &ring_size); ++ if (ret) ++ return 1; ++ ++ if (txdesc_id < ring_size) + return 0; +- printf("Invalid TX descriptor %d (must be < nb_txd=%d)\n", +- txdesc_id, nb_txd); ++ ++ printf("Invalid TX descriptor %u (must be < ring_size=%u)\n", ++ txdesc_id, ring_size); + return 1; + } + +@@ -1760,11 +1901,7 @@ rx_ring_desc_display(portid_t port_id, queueid_t rxq_id, uint16_t rxd_id) + { + const struct rte_memzone *rx_mz; + +- if (port_id_is_invalid(port_id, ENABLED_WARN)) +- return; +- if (rx_queue_id_is_invalid(rxq_id)) +- return; +- if (rx_desc_id_is_invalid(rxd_id)) ++ if (rx_desc_id_is_invalid(port_id, rxq_id, rxd_id)) + return; + rx_mz = ring_dma_zone_lookup("rx_ring", port_id, rxq_id); + if (rx_mz == NULL) +@@ -1777,11 +1914,7 @@ tx_ring_desc_display(portid_t port_id, queueid_t txq_id, uint16_t txd_id) + { + const struct rte_memzone *tx_mz; + +- if (port_id_is_invalid(port_id, ENABLED_WARN)) +- return; +- if (tx_queue_id_is_invalid(txq_id)) +- return; +- if (tx_desc_id_is_invalid(txd_id)) ++ if (tx_desc_id_is_invalid(port_id, txq_id, txd_id)) + return; + tx_mz = ring_dma_zone_lookup("tx_ring", port_id, txq_id); + if (tx_mz == NULL) +@@ -1822,10 +1955,17 @@ rxtx_config_display(void) + struct rte_eth_txconf *tx_conf = &ports[pid].tx_conf[0]; + uint16_t *nb_rx_desc = &ports[pid].nb_rx_desc[0]; + uint16_t *nb_tx_desc = &ports[pid].nb_tx_desc[0]; +- uint16_t nb_rx_desc_tmp; +- uint16_t nb_tx_desc_tmp; + struct rte_eth_rxq_info rx_qinfo; + struct rte_eth_txq_info tx_qinfo; ++ uint16_t rx_free_thresh_tmp; ++ uint16_t tx_free_thresh_tmp; ++ uint16_t tx_rs_thresh_tmp; ++ uint16_t nb_rx_desc_tmp; ++ uint16_t nb_tx_desc_tmp; ++ uint64_t offloads_tmp; ++ uint8_t pthresh_tmp; ++ uint8_t hthresh_tmp; ++ uint8_t wthresh_tmp; + int32_t rc; + + /* per port config */ +@@ -1839,41 +1979,64 @@ rxtx_config_display(void) + /* per rx queue config only for first queue to be less verbose */ + for (qid = 0; qid < 1; qid++) { + rc = rte_eth_rx_queue_info_get(pid, qid, &rx_qinfo); +- if (rc) ++ if (rc) { + nb_rx_desc_tmp = nb_rx_desc[qid]; +- else ++ rx_free_thresh_tmp = ++ rx_conf[qid].rx_free_thresh; ++ pthresh_tmp = rx_conf[qid].rx_thresh.pthresh; ++ hthresh_tmp = rx_conf[qid].rx_thresh.hthresh; ++ wthresh_tmp = rx_conf[qid].rx_thresh.wthresh; ++ offloads_tmp = rx_conf[qid].offloads; ++ } else { + nb_rx_desc_tmp = rx_qinfo.nb_desc; ++ rx_free_thresh_tmp = ++ rx_qinfo.conf.rx_free_thresh; ++ pthresh_tmp = rx_qinfo.conf.rx_thresh.pthresh; ++ hthresh_tmp = rx_qinfo.conf.rx_thresh.hthresh; ++ wthresh_tmp = rx_qinfo.conf.rx_thresh.wthresh; ++ offloads_tmp = rx_qinfo.conf.offloads; ++ } + + printf(" RX queue: %d\n", qid); + printf(" RX desc=%d - RX free threshold=%d\n", +- nb_rx_desc_tmp, rx_conf[qid].rx_free_thresh); ++ nb_rx_desc_tmp, rx_free_thresh_tmp); + printf(" RX threshold registers: pthresh=%d hthresh=%d " + " wthresh=%d\n", +- rx_conf[qid].rx_thresh.pthresh, +- rx_conf[qid].rx_thresh.hthresh, +- rx_conf[qid].rx_thresh.wthresh); +- printf(" RX Offloads=0x%"PRIx64"\n", +- rx_conf[qid].offloads); ++ pthresh_tmp, hthresh_tmp, wthresh_tmp); ++ printf(" RX Offloads=0x%"PRIx64"\n", offloads_tmp); + } + + /* per tx queue config only for first queue to be less verbose */ + for (qid = 0; qid < 1; qid++) { + rc = rte_eth_tx_queue_info_get(pid, qid, &tx_qinfo); +- if (rc) ++ if (rc) { + nb_tx_desc_tmp = nb_tx_desc[qid]; +- else ++ tx_free_thresh_tmp = ++ tx_conf[qid].tx_free_thresh; ++ pthresh_tmp = tx_conf[qid].tx_thresh.pthresh; ++ hthresh_tmp = tx_conf[qid].tx_thresh.hthresh; ++ wthresh_tmp = tx_conf[qid].tx_thresh.wthresh; ++ offloads_tmp = tx_conf[qid].offloads; ++ tx_rs_thresh_tmp = tx_conf[qid].tx_rs_thresh; ++ } else { + nb_tx_desc_tmp = tx_qinfo.nb_desc; ++ tx_free_thresh_tmp = ++ tx_qinfo.conf.tx_free_thresh; ++ pthresh_tmp = tx_qinfo.conf.tx_thresh.pthresh; ++ hthresh_tmp = tx_qinfo.conf.tx_thresh.hthresh; ++ wthresh_tmp = tx_qinfo.conf.tx_thresh.wthresh; ++ offloads_tmp = tx_qinfo.conf.offloads; ++ tx_rs_thresh_tmp = tx_qinfo.conf.tx_rs_thresh; ++ } + + printf(" TX queue: %d\n", qid); + printf(" TX desc=%d - TX free threshold=%d\n", +- nb_tx_desc_tmp, tx_conf[qid].tx_free_thresh); ++ nb_tx_desc_tmp, tx_free_thresh_tmp); + printf(" TX threshold registers: pthresh=%d hthresh=%d " + " wthresh=%d\n", +- tx_conf[qid].tx_thresh.pthresh, +- tx_conf[qid].tx_thresh.hthresh, +- tx_conf[qid].tx_thresh.wthresh); ++ pthresh_tmp, hthresh_tmp, wthresh_tmp); + printf(" TX offloads=0x%"PRIx64" - TX RS bit threshold=%d\n", +- tx_conf[qid].offloads, tx_conf->tx_rs_thresh); ++ offloads_tmp, tx_rs_thresh_tmp); + } + } + } +@@ -2518,6 +2681,10 @@ set_fwd_lcores_mask(uint64_t lcoremask) + void + set_fwd_lcores_number(uint16_t nb_lc) + { ++ if (test_done == 0) { ++ printf("Please stop forwarding first\n"); ++ return; ++ } + if (nb_lc > nb_cfg_lcores) { + printf("nb fwd cores %u > %u (max. number of configured " + "lcores) - ignored\n", +@@ -2665,17 +2832,41 @@ show_tx_pkt_segments(void) + printf("Split packet: %s\n", split); + } + ++static bool ++nb_segs_is_invalid(unsigned int nb_segs) ++{ ++ uint16_t ring_size; ++ uint16_t queue_id; ++ uint16_t port_id; ++ int ret; ++ ++ RTE_ETH_FOREACH_DEV(port_id) { ++ for (queue_id = 0; queue_id < nb_txq; queue_id++) { ++ ret = get_tx_ring_size(port_id, queue_id, &ring_size); ++ ++ if (ret) ++ return true; ++ ++ if (ring_size < nb_segs) { ++ printf("nb segments per TX packets=%u >= " ++ "TX queue(%u) ring_size=%u - ignored\n", ++ nb_segs, queue_id, ring_size); ++ return true; ++ } ++ } ++ } ++ ++ return false; ++} + + void + set_tx_pkt_segments(unsigned *seg_lengths, unsigned nb_segs) + { + uint16_t tx_pkt_len; + unsigned i; + +- if (nb_segs >= (unsigned) nb_txd) { +- printf("nb segments per TX packets=%u >= nb_txd=%u - ignored\n", +- nb_segs, (unsigned int) nb_txd); ++ if (nb_segs_is_invalid(nb_segs)) return; +- } + + /* + * Check that each segment length is greater or equal than +@@ -3019,9 +3210,11 @@ vlan_extend_set(portid_t port_id, int on) + } + + diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); +- if (diag < 0) ++ if (diag < 0) { + printf("rx_vlan_extend_set(port_pi=%d, on=%d) failed " + "diag=%d\n", port_id, on, diag); ++ return; ++ } + ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; + } + +@@ -3046,9 +3239,11 @@ rx_vlan_strip_set(portid_t port_id, int on) + } + + diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); +- if (diag < 0) ++ if (diag < 0) { + printf("rx_vlan_strip_set(port_pi=%d, on=%d) failed " + "diag=%d\n", port_id, on, diag); ++ return; ++ } + ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; + } + +@@ -3087,9 +3282,11 @@ rx_vlan_filter_set(portid_t port_id, int on) + } + + diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); +- if (diag < 0) ++ if (diag < 0) { + printf("rx_vlan_filter_set(port_pi=%d, on=%d) failed " + "diag=%d\n", port_id, on, diag); ++ return; ++ } + ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; + } + +@@ -3114,9 +3311,11 @@ rx_vlan_qinq_strip_set(portid_t port_id, int on) + } + + diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); +- if (diag < 0) ++ if (diag < 0) { + printf("%s(port_pi=%d, on=%d) failed " + "diag=%d\n", __func__, port_id, on, diag); ++ return; + } - printf("Set MTU failed. diag=%d\n", diag); + ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; } -@@ -3707,6 +3750,14 @@ mcast_addr_pool_extend(struct rte_port *port) +@@ -3174,8 +3373,6 @@ tx_vlan_set(portid_t port_id, uint16_t vlan_id) + struct rte_eth_dev_info dev_info; + int ret; + +- if (port_id_is_invalid(port_id, ENABLED_WARN)) +- return; + if (vlan_id_is_invalid(vlan_id)) + return; + +@@ -3206,8 +3403,6 @@ tx_qinq_set(portid_t port_id, uint16_t vlan_id, uint16_t vlan_id_outer) + struct rte_eth_dev_info dev_info; + int ret; + +- if (port_id_is_invalid(port_id, ENABLED_WARN)) +- return; + if (vlan_id_is_invalid(vlan_id)) + return; + if (vlan_id_is_invalid(vlan_id_outer)) +@@ -3233,8 +3428,6 @@ tx_qinq_set(portid_t port_id, uint16_t vlan_id, uint16_t vlan_id_outer) + void + tx_vlan_reset(portid_t port_id) + { +- if (port_id_is_invalid(port_id, ENABLED_WARN)) +- return; + ports[port_id].dev_conf.txmode.offloads &= + ~(DEV_TX_OFFLOAD_VLAN_INSERT | + DEV_TX_OFFLOAD_QINQ_INSERT); +@@ -3707,6 +3900,14 @@ mcast_addr_pool_extend(struct rte_port *port) } @@ -2175,7 +3417,7 @@ index d599682788..42eba68b35 100644 static void mcast_addr_pool_remove(struct rte_port *port, uint32_t addr_idx) { -@@ -3725,7 +3776,7 @@ mcast_addr_pool_remove(struct rte_port *port, uint32_t addr_idx) +@@ -3725,7 +3926,7 @@ mcast_addr_pool_remove(struct rte_port *port, uint32_t addr_idx) sizeof(struct rte_ether_addr) * (port->mc_addr_nb - addr_idx)); } @@ -2184,7 +3426,7 @@ index d599682788..42eba68b35 100644 eth_port_multicast_addr_list_set(portid_t port_id) { struct rte_port *port; -@@ -3734,10 +3785,11 @@ eth_port_multicast_addr_list_set(portid_t port_id) +@@ -3734,10 +3935,11 @@ eth_port_multicast_addr_list_set(portid_t port_id) port = &ports[port_id]; diag = rte_eth_dev_set_mc_addr_list(port_id, port->mc_addr_pool, port->mc_addr_nb); @@ -2200,7 +3442,7 @@ index d599682788..42eba68b35 100644 } void -@@ -3762,10 +3814,10 @@ mcast_addr_add(portid_t port_id, struct rte_ether_addr *mc_addr) +@@ -3762,10 +3964,10 @@ mcast_addr_add(portid_t port_id, struct rte_ether_addr *mc_addr) } } @@ -2215,7 +3457,7 @@ index d599682788..42eba68b35 100644 } void -@@ -3792,7 +3844,9 @@ mcast_addr_remove(portid_t port_id, struct rte_ether_addr *mc_addr) +@@ -3792,7 +3994,9 @@ mcast_addr_remove(portid_t port_id, struct rte_ether_addr *mc_addr) } mcast_addr_pool_remove(port, i); @@ -2261,7 +3503,7 @@ index 25091de881..7b92ab1195 100644 info->ethertype = vlan_hdr->eth_proto; } diff --git a/dpdk/app/test-pmd/flowgen.c b/dpdk/app/test-pmd/flowgen.c -index 03b72aaa56..68931fdea6 100644 +index 03b72aaa56..3e1335b627 100644 --- a/dpdk/app/test-pmd/flowgen.c +++ b/dpdk/app/test-pmd/flowgen.c @@ -1,35 +1,5 @@ @@ -2302,6 +3544,28 @@ index 03b72aaa56..68931fdea6 100644 */ #include +@@ -83,8 +53,11 @@ static struct rte_ether_addr cfg_ether_dst = + + #define IP_DEFTTL 64 /* from RFC 1340. */ + ++/* Use this type to inform GCC that ip_sum violates aliasing rules. */ ++typedef unaligned_uint16_t alias_int16_t __attribute__((__may_alias__)); ++ + static inline uint16_t +-ip_sum(const unaligned_uint16_t *hdr, int hdr_len) ++ip_sum(const alias_int16_t *hdr, int hdr_len) + { + uint32_t sum = 0; + +@@ -186,7 +159,7 @@ pkt_burst_flow_gen(struct fwd_stream *fs) + next_flow); + ip_hdr->total_length = RTE_CPU_TO_BE_16(pkt_size - + sizeof(*eth_hdr)); +- ip_hdr->hdr_checksum = ip_sum((unaligned_uint16_t *)ip_hdr, ++ ip_hdr->hdr_checksum = ip_sum((const alias_int16_t *)ip_hdr, + sizeof(*ip_hdr)); + + /* Initialize UDP header. */ diff --git a/dpdk/app/test-pmd/macswap.c b/dpdk/app/test-pmd/macswap.c index 71af916fc3..8428c26d85 100644 --- a/dpdk/app/test-pmd/macswap.c @@ -2343,8 +3607,31 @@ index 71af916fc3..8428c26d85 100644 */ #include +diff --git a/dpdk/app/test-pmd/meson.build b/dpdk/app/test-pmd/meson.build +index 6006c60f99..b0249bdb3c 100644 +--- a/dpdk/app/test-pmd/meson.build ++++ b/dpdk/app/test-pmd/meson.build +@@ -28,6 +28,18 @@ deps += ['ethdev', 'gro', 'gso', 'cmdline', 'metrics', 'meter', 'bus_pci'] + if dpdk_conf.has('RTE_LIBRTE_PDUMP') + deps += 'pdump' + endif ++if dpdk_conf.has('RTE_LIBRTE_BITRATESTATS') ++ deps += 'bitratestats' ++endif ++if dpdk_conf.has('RTE_LIBRTE_LATENCYSTATS') ++ deps += 'latencystats' ++endif ++if dpdk_conf.has('RTE_LIBRTE_PMD_CRYPTO_SCHEDULER') ++ deps += 'pmd_crypto_scheduler' ++endif ++if dpdk_conf.has('RTE_LIBRTE_BOND_PMD') ++ deps += 'pmd_bond' ++endif + if dpdk_conf.has('RTE_LIBRTE_BNXT_PMD') + deps += 'pmd_bnxt' + endif diff --git a/dpdk/app/test-pmd/parameters.c b/dpdk/app/test-pmd/parameters.c -index 2e7a504415..0eb7844783 100644 +index 2e7a504415..a1c08a411a 100644 --- a/dpdk/app/test-pmd/parameters.c +++ b/dpdk/app/test-pmd/parameters.c @@ -49,7 +49,7 @@ @@ -2356,11 +3643,291 @@ index 2e7a504415..0eb7844783 100644 #ifdef RTE_LIBRTE_CMDLINE "[--interactive|-i] " "[--cmdline-file=FILENAME] " +@@ -884,12 +884,9 @@ launch_args_parse(int argc, char** argv) + } + if (!strcmp(lgopts[opt_idx].name, "max-pkt-len")) { + n = atoi(optarg); +- if (n >= RTE_ETHER_MIN_LEN) { ++ if (n >= RTE_ETHER_MIN_LEN) + rx_mode.max_rx_pkt_len = (uint32_t) n; +- if (n > RTE_ETHER_MAX_LEN) +- rx_offloads |= +- DEV_RX_OFFLOAD_JUMBO_FRAME; +- } else ++ else + rte_exit(EXIT_FAILURE, + "Invalid max-pkt-len=%d - should be > %d\n", + n, RTE_ETHER_MIN_LEN); diff --git a/dpdk/app/test-pmd/testpmd.c b/dpdk/app/test-pmd/testpmd.c -index b374682236..0b126594b7 100644 +index b374682236..0c3361e817 100644 --- a/dpdk/app/test-pmd/testpmd.c +++ b/dpdk/app/test-pmd/testpmd.c -@@ -2549,32 +2549,17 @@ setup_attached_port(portid_t pi) +@@ -421,8 +421,11 @@ lcoreid_t latencystats_lcore_id = -1; + * Ethernet device configuration. + */ + struct rte_eth_rxmode rx_mode = { +- .max_rx_pkt_len = RTE_ETHER_MAX_LEN, +- /**< Default maximum frame length. */ ++ /* Default maximum frame length. ++ * Zero is converted to "RTE_ETHER_MTU + PMD Ethernet overhead" ++ * in init_config(). ++ */ ++ .max_rx_pkt_len = 0, + }; + + struct rte_eth_txmode tx_mode = { +@@ -1071,6 +1074,177 @@ check_nb_txq(queueid_t txq) + return 0; + } + ++/* ++ * Get the allowed maximum number of RXDs of every rx queue. ++ * *pid return the port id which has minimal value of ++ * max_rxd in all queues of all ports. ++ */ ++static uint16_t ++get_allowed_max_nb_rxd(portid_t *pid) ++{ ++ uint16_t allowed_max_rxd = UINT16_MAX; ++ portid_t pi; ++ struct rte_eth_dev_info dev_info; ++ ++ RTE_ETH_FOREACH_DEV(pi) { ++ if (eth_dev_info_get_print_err(pi, &dev_info) != 0) ++ continue; ++ ++ if (dev_info.rx_desc_lim.nb_max < allowed_max_rxd) { ++ allowed_max_rxd = dev_info.rx_desc_lim.nb_max; ++ *pid = pi; ++ } ++ } ++ return allowed_max_rxd; ++} ++ ++/* ++ * Get the allowed minimal number of RXDs of every rx queue. ++ * *pid return the port id which has minimal value of ++ * min_rxd in all queues of all ports. ++ */ ++static uint16_t ++get_allowed_min_nb_rxd(portid_t *pid) ++{ ++ uint16_t allowed_min_rxd = 0; ++ portid_t pi; ++ struct rte_eth_dev_info dev_info; ++ ++ RTE_ETH_FOREACH_DEV(pi) { ++ if (eth_dev_info_get_print_err(pi, &dev_info) != 0) ++ continue; ++ ++ if (dev_info.rx_desc_lim.nb_min > allowed_min_rxd) { ++ allowed_min_rxd = dev_info.rx_desc_lim.nb_min; ++ *pid = pi; ++ } ++ } ++ ++ return allowed_min_rxd; ++} ++ ++/* ++ * Check input rxd is valid or not. ++ * If input rxd is not greater than any of maximum number ++ * of RXDs of every Rx queues and is not less than any of ++ * minimal number of RXDs of every Rx queues, it is valid. ++ * if valid, return 0, else return -1 ++ */ ++int ++check_nb_rxd(queueid_t rxd) ++{ ++ uint16_t allowed_max_rxd; ++ uint16_t allowed_min_rxd; ++ portid_t pid = 0; ++ ++ allowed_max_rxd = get_allowed_max_nb_rxd(&pid); ++ if (rxd > allowed_max_rxd) { ++ printf("Fail: input rxd (%u) can't be greater " ++ "than max_rxds (%u) of port %u\n", ++ rxd, ++ allowed_max_rxd, ++ pid); ++ return -1; ++ } ++ ++ allowed_min_rxd = get_allowed_min_nb_rxd(&pid); ++ if (rxd < allowed_min_rxd) { ++ printf("Fail: input rxd (%u) can't be less " ++ "than min_rxds (%u) of port %u\n", ++ rxd, ++ allowed_min_rxd, ++ pid); ++ return -1; ++ } ++ ++ return 0; ++} ++ ++/* ++ * Get the allowed maximum number of TXDs of every rx queues. ++ * *pid return the port id which has minimal value of ++ * max_txd in every tx queue. ++ */ ++static uint16_t ++get_allowed_max_nb_txd(portid_t *pid) ++{ ++ uint16_t allowed_max_txd = UINT16_MAX; ++ portid_t pi; ++ struct rte_eth_dev_info dev_info; ++ ++ RTE_ETH_FOREACH_DEV(pi) { ++ if (eth_dev_info_get_print_err(pi, &dev_info) != 0) ++ continue; ++ ++ if (dev_info.tx_desc_lim.nb_max < allowed_max_txd) { ++ allowed_max_txd = dev_info.tx_desc_lim.nb_max; ++ *pid = pi; ++ } ++ } ++ return allowed_max_txd; ++} ++ ++/* ++ * Get the allowed maximum number of TXDs of every tx queues. ++ * *pid return the port id which has minimal value of ++ * min_txd in every tx queue. ++ */ ++static uint16_t ++get_allowed_min_nb_txd(portid_t *pid) ++{ ++ uint16_t allowed_min_txd = 0; ++ portid_t pi; ++ struct rte_eth_dev_info dev_info; ++ ++ RTE_ETH_FOREACH_DEV(pi) { ++ if (eth_dev_info_get_print_err(pi, &dev_info) != 0) ++ continue; ++ ++ if (dev_info.tx_desc_lim.nb_min > allowed_min_txd) { ++ allowed_min_txd = dev_info.tx_desc_lim.nb_min; ++ *pid = pi; ++ } ++ } ++ ++ return allowed_min_txd; ++} ++ ++/* ++ * Check input txd is valid or not. ++ * If input txd is not greater than any of maximum number ++ * of TXDs of every Rx queues, it is valid. ++ * if valid, return 0, else return -1 ++ */ ++int ++check_nb_txd(queueid_t txd) ++{ ++ uint16_t allowed_max_txd; ++ uint16_t allowed_min_txd; ++ portid_t pid = 0; ++ ++ allowed_max_txd = get_allowed_max_nb_txd(&pid); ++ if (txd > allowed_max_txd) { ++ printf("Fail: input txd (%u) can't be greater " ++ "than max_txds (%u) of port %u\n", ++ txd, ++ allowed_max_txd, ++ pid); ++ return -1; ++ } ++ ++ allowed_min_txd = get_allowed_min_nb_txd(&pid); ++ if (txd < allowed_min_txd) { ++ printf("Fail: input txd (%u) can't be less " ++ "than min_txds (%u) of port %u\n", ++ txd, ++ allowed_min_txd, ++ pid); ++ return -1; ++ } ++ return 0; ++} ++ ++ + /* + * Get the allowed maximum number of hairpin queues. + * *pid return the port id which has minimal value of +@@ -1166,6 +1340,11 @@ init_config(void) + rte_exit(EXIT_FAILURE, + "rte_eth_dev_info_get() failed\n"); + ++ ret = update_jumbo_frame_offload(pid); ++ if (ret != 0) ++ printf("Updating jumbo frame offload failed for port %u\n", ++ pid); ++ + if (!(port->dev_info.tx_offload_capa & + DEV_TX_OFFLOAD_MBUF_FAST_FREE)) + port->dev_conf.txmode.offloads &= +@@ -1430,9 +1609,9 @@ init_fwd_streams(void) + static void + pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs) + { +- unsigned int total_burst; +- unsigned int nb_burst; +- unsigned int burst_stats[3]; ++ uint64_t total_burst; ++ uint64_t nb_burst; ++ uint64_t burst_stats[3]; + uint16_t pktnb_stats[3]; + uint16_t nb_pkt; + int burst_percent[3]; +@@ -1461,8 +1640,8 @@ pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs) + } + if (total_burst == 0) + return; +- burst_percent[0] = (burst_stats[0] * 100) / total_burst; +- printf(" %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst, ++ burst_percent[0] = (double)burst_stats[0] / total_burst * 100; ++ printf(" %s-bursts : %"PRIu64" [%d%% of %d pkts", rx_tx, total_burst, + burst_percent[0], (int) pktnb_stats[0]); + if (burst_stats[0] == total_burst) { + printf("]\n"); +@@ -1473,7 +1652,7 @@ pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs) + 100 - burst_percent[0], pktnb_stats[1]); + return; + } +- burst_percent[1] = (burst_stats[1] * 100) / total_burst; ++ burst_percent[1] = (double)burst_stats[1] / total_burst * 100; + burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]); + if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) { + printf(" + %d%% of others]\n", 100 - burst_percent[0]); +@@ -1698,11 +1877,22 @@ fwd_stats_display(void) + "%s\n", + acc_stats_border, acc_stats_border); + #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES +- if (total_recv > 0) +- printf("\n CPU cycles/packet=%u (total cycles=" +- "%"PRIu64" / total RX packets=%"PRIu64")\n", +- (unsigned int)(fwd_cycles / total_recv), +- fwd_cycles, total_recv); ++#define CYC_PER_MHZ 1E6 ++ if (total_recv > 0 || total_xmit > 0) { ++ uint64_t total_pkts = 0; ++ if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 || ++ strcmp(cur_fwd_eng->fwd_mode_name, "flowgen") == 0) ++ total_pkts = total_xmit; ++ else ++ total_pkts = total_recv; ++ ++ printf("\n CPU cycles/packet=%.2F (total cycles=" ++ "%"PRIu64" / total %s packets=%"PRIu64") at %"PRIu64 ++ " MHz Clock\n", ++ (double) fwd_cycles / total_pkts, ++ fwd_cycles, cur_fwd_eng->fwd_mode_name, total_pkts, ++ (uint64_t)(rte_get_tsc_hz() / CYC_PER_MHZ)); ++ } + #endif + } + +@@ -2549,32 +2739,17 @@ setup_attached_port(portid_t pi) printf("Done\n"); } @@ -2396,7 +3963,7 @@ index b374682236..0b126594b7 100644 if (rte_dev_remove(dev) < 0) { TESTPMD_LOG(ERR, "Failed to detach device %s\n", dev->name); -@@ -2592,14 +2577,33 @@ detach_port_device(portid_t port_id) +@@ -2592,14 +2767,33 @@ detach_port_device(portid_t port_id) remove_invalid_ports(); @@ -2432,7 +3999,7 @@ index b374682236..0b126594b7 100644 { struct rte_dev_iterator iterator; struct rte_devargs da; -@@ -2748,7 +2752,7 @@ check_all_ports_link_status(uint32_t port_mask) +@@ -2748,7 +2942,7 @@ check_all_ports_link_status(uint32_t port_mask) "Port%d Link Up. speed %u Mbps- %s\n", portid, link.link_speed, (link.link_duplex == ETH_LINK_FULL_DUPLEX) ? @@ -2441,7 +4008,7 @@ index b374682236..0b126594b7 100644 else printf("Port %d Link Down\n", portid); continue; -@@ -2790,6 +2794,7 @@ rmv_port_callback(void *arg) +@@ -2790,6 +2984,7 @@ rmv_port_callback(void *arg) int need_to_start = 0; int org_no_link_check = no_link_check; portid_t port_id = (intptr_t)arg; @@ -2449,7 +4016,7 @@ index b374682236..0b126594b7 100644 RTE_ETH_VALID_PORTID_OR_RET(port_id); -@@ -2800,8 +2805,12 @@ rmv_port_callback(void *arg) +@@ -2800,8 +2995,12 @@ rmv_port_callback(void *arg) no_link_check = 1; stop_port(port_id); no_link_check = org_no_link_check; @@ -2463,7 +4030,88 @@ index b374682236..0b126594b7 100644 if (need_to_start) start_packet_forwarding(0); } -@@ -3184,6 +3193,8 @@ get_eth_dcb_conf(portid_t pid, struct rte_eth_conf *eth_conf, +@@ -3049,6 +3248,80 @@ rxtx_port_config(struct rte_port *port) + } + } + ++/* ++ * Helper function to arrange max_rx_pktlen value and JUMBO_FRAME offload, ++ * MTU is also aligned if JUMBO_FRAME offload is not set. ++ * ++ * port->dev_info should be set before calling this function. ++ * ++ * return 0 on success, negative on error ++ */ ++int ++update_jumbo_frame_offload(portid_t portid) ++{ ++ struct rte_port *port = &ports[portid]; ++ uint32_t eth_overhead; ++ uint64_t rx_offloads; ++ int ret; ++ bool on; ++ ++ /* Update the max_rx_pkt_len to have MTU as RTE_ETHER_MTU */ ++ if (port->dev_info.max_mtu != UINT16_MAX && ++ port->dev_info.max_rx_pktlen > port->dev_info.max_mtu) ++ eth_overhead = port->dev_info.max_rx_pktlen - ++ port->dev_info.max_mtu; ++ else ++ eth_overhead = RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN; ++ ++ rx_offloads = port->dev_conf.rxmode.offloads; ++ ++ /* Default config value is 0 to use PMD specific overhead */ ++ if (port->dev_conf.rxmode.max_rx_pkt_len == 0) ++ port->dev_conf.rxmode.max_rx_pkt_len = RTE_ETHER_MTU + eth_overhead; ++ ++ if (port->dev_conf.rxmode.max_rx_pkt_len <= RTE_ETHER_MTU + eth_overhead) { ++ rx_offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME; ++ on = false; ++ } else { ++ if ((port->dev_info.rx_offload_capa & DEV_RX_OFFLOAD_JUMBO_FRAME) == 0) { ++ printf("Frame size (%u) is not supported by port %u\n", ++ port->dev_conf.rxmode.max_rx_pkt_len, ++ portid); ++ return -1; ++ } ++ rx_offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME; ++ on = true; ++ } ++ ++ if (rx_offloads != port->dev_conf.rxmode.offloads) { ++ uint16_t qid; ++ ++ port->dev_conf.rxmode.offloads = rx_offloads; ++ ++ /* Apply JUMBO_FRAME offload configuration to Rx queue(s) */ ++ for (qid = 0; qid < port->dev_info.nb_rx_queues; qid++) { ++ if (on) ++ port->rx_conf[qid].offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME; ++ else ++ port->rx_conf[qid].offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME; ++ } ++ } ++ ++ /* If JUMBO_FRAME is set MTU conversion done by ethdev layer, ++ * if unset do it here ++ */ ++ if ((rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) == 0) { ++ ret = rte_eth_dev_set_mtu(portid, ++ port->dev_conf.rxmode.max_rx_pkt_len - eth_overhead); ++ if (ret) ++ printf("Failed to set MTU to %u for port %u\n", ++ port->dev_conf.rxmode.max_rx_pkt_len - eth_overhead, ++ portid); ++ } ++ ++ return 0; ++} ++ + void + init_port_config(void) + { +@@ -3184,6 +3457,8 @@ get_eth_dcb_conf(portid_t pid, struct rte_eth_conf *eth_conf, struct rte_eth_dcb_tx_conf *tx_conf = ð_conf->tx_adv_conf.dcb_tx_conf; @@ -2472,7 +4120,7 @@ index b374682236..0b126594b7 100644 rc = rte_eth_dev_rss_hash_conf_get(pid, &rss_conf); if (rc != 0) return rc; -@@ -3570,5 +3581,10 @@ main(int argc, char** argv) +@@ -3570,5 +3845,10 @@ main(int argc, char** argv) return 1; } @@ -2485,7 +4133,7 @@ index b374682236..0b126594b7 100644 + return EXIT_SUCCESS; } diff --git a/dpdk/app/test-pmd/testpmd.h b/dpdk/app/test-pmd/testpmd.h -index 217d577018..0694e1ef8b 100644 +index 217d577018..4dbcee3a62 100644 --- a/dpdk/app/test-pmd/testpmd.h +++ b/dpdk/app/test-pmd/testpmd.h @@ -797,7 +797,7 @@ void stop_port(portid_t pid); @@ -2497,8 +4145,26 @@ index 217d577018..0694e1ef8b 100644 void detach_port_device(portid_t port_id); int all_ports_stopped(void); int port_is_stopped(portid_t port_id); +@@ -859,6 +859,8 @@ queueid_t get_allowed_max_nb_rxq(portid_t *pid); + int check_nb_rxq(queueid_t rxq); + queueid_t get_allowed_max_nb_txq(portid_t *pid); + int check_nb_txq(queueid_t txq); ++int check_nb_rxd(queueid_t rxd); ++int check_nb_txd(queueid_t txd); + queueid_t get_allowed_max_nb_hairpinq(portid_t *pid); + int check_nb_hairpinq(queueid_t hairpinq); + +@@ -881,6 +883,8 @@ uint16_t tx_pkt_set_md(uint16_t port_id, __rte_unused uint16_t queue, + void add_tx_md_callback(portid_t portid); + void remove_tx_md_callback(portid_t portid); + ++int update_jumbo_frame_offload(portid_t portid); ++ + /* + * Work-around of a compilation error with ICC on invocations of the + * rte_be_to_cpu_16() function. diff --git a/dpdk/app/test-pmd/txonly.c b/dpdk/app/test-pmd/txonly.c -index 3caf281cb8..8ed436def5 100644 +index 3caf281cb8..a1822c631d 100644 --- a/dpdk/app/test-pmd/txonly.c +++ b/dpdk/app/test-pmd/txonly.c @@ -45,8 +45,8 @@ uint16_t tx_udp_src_port = 9; @@ -2512,7 +4178,41 @@ index 3caf281cb8..8ed436def5 100644 #define IP_DEFTTL 64 /* from RFC 1340. */ -@@ -153,7 +153,6 @@ pkt_burst_prepare(struct rte_mbuf *pkt, struct rte_mempool *mbp, +@@ -147,13 +147,40 @@ setup_pkt_udp_ip_headers(struct rte_ipv4_hdr *ip_hdr, + ip_hdr->hdr_checksum = (uint16_t) ip_cksum; + } + ++static inline void ++update_pkt_header(struct rte_mbuf *pkt, uint32_t total_pkt_len) ++{ ++ struct rte_ipv4_hdr *ip_hdr; ++ struct rte_udp_hdr *udp_hdr; ++ uint16_t pkt_data_len; ++ uint16_t pkt_len; ++ ++ pkt_data_len = (uint16_t) (total_pkt_len - ( ++ sizeof(struct rte_ether_hdr) + ++ sizeof(struct rte_ipv4_hdr) + ++ sizeof(struct rte_udp_hdr))); ++ /* updata udp pkt length */ ++ udp_hdr = rte_pktmbuf_mtod_offset(pkt, struct rte_udp_hdr *, ++ sizeof(struct rte_ether_hdr) + ++ sizeof(struct rte_ipv4_hdr)); ++ pkt_len = (uint16_t) (pkt_data_len + sizeof(struct rte_udp_hdr)); ++ udp_hdr->dgram_len = RTE_CPU_TO_BE_16(pkt_len); ++ ++ /* updata ip pkt length and csum */ ++ ip_hdr = rte_pktmbuf_mtod_offset(pkt, struct rte_ipv4_hdr *, ++ sizeof(struct rte_ether_hdr)); ++ ip_hdr->hdr_checksum = 0; ++ pkt_len = (uint16_t) (pkt_len + sizeof(struct rte_ipv4_hdr)); ++ ip_hdr->total_length = RTE_CPU_TO_BE_16(pkt_len); ++ ip_hdr->hdr_checksum = rte_ipv4_cksum(ip_hdr); ++} ++ + static inline bool + pkt_burst_prepare(struct rte_mbuf *pkt, struct rte_mempool *mbp, + struct rte_ether_hdr *eth_hdr, const uint16_t vlan_tci, const uint16_t vlan_tci_outer, const uint64_t ol_flags) { struct rte_mbuf *pkt_segs[RTE_MAX_SEGS_PER_PKT]; @@ -2520,7 +4220,7 @@ index 3caf281cb8..8ed436def5 100644 struct rte_mbuf *pkt_seg; uint32_t nb_segs, pkt_len; uint8_t i; -@@ -192,6 +191,7 @@ pkt_burst_prepare(struct rte_mbuf *pkt, struct rte_mempool *mbp, +@@ -192,6 +219,7 @@ pkt_burst_prepare(struct rte_mbuf *pkt, struct rte_mempool *mbp, copy_buf_to_pkt(&pkt_ip_hdr, sizeof(pkt_ip_hdr), pkt, sizeof(struct rte_ether_hdr)); if (txonly_multi_flow) { @@ -2528,7 +4228,7 @@ index 3caf281cb8..8ed436def5 100644 struct rte_ipv4_hdr *ip_hdr; uint32_t addr; -@@ -207,6 +207,7 @@ pkt_burst_prepare(struct rte_mbuf *pkt, struct rte_mempool *mbp, +@@ -207,10 +235,15 @@ pkt_burst_prepare(struct rte_mbuf *pkt, struct rte_mempool *mbp, */ addr = (tx_ip_dst_addr | (ip_var++ << 8)) + rte_lcore_id(); ip_hdr->src_addr = rte_cpu_to_be_32(addr); @@ -2536,7 +4236,15 @@ index 3caf281cb8..8ed436def5 100644 } copy_buf_to_pkt(&pkt_udp_hdr, sizeof(pkt_udp_hdr), pkt, sizeof(struct rte_ether_hdr) + -@@ -314,7 +315,7 @@ pkt_burst_transmit(struct fwd_stream *fs) + sizeof(struct rte_ipv4_hdr)); ++ ++ if (unlikely(tx_pkt_split == TX_PKT_SPLIT_RND) || txonly_multi_flow) ++ update_pkt_header(pkt, pkt_len); ++ + /* + * Complete first mbuf of packet and append it to the + * burst of packets to be transmitted. +@@ -314,7 +347,7 @@ pkt_burst_transmit(struct fwd_stream *fs) fs->tx_packets += nb_tx; if (txonly_multi_flow) @@ -2546,7 +4254,7 @@ index 3caf281cb8..8ed436def5 100644 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS fs->tx_burst_stats.pkt_burst_spread[nb_tx]++; diff --git a/dpdk/app/test-pmd/util.c b/dpdk/app/test-pmd/util.c -index b514be5e16..4e4ead3075 100644 +index b514be5e16..487260d59d 100644 --- a/dpdk/app/test-pmd/util.c +++ b/dpdk/app/test-pmd/util.c @@ -1,6 +1,6 @@ @@ -2557,10 +4265,230 @@ index b514be5e16..4e4ead3075 100644 */ #include +@@ -14,12 +14,23 @@ + + #include "testpmd.h" + ++#define MAX_STRING_LEN 8192 ++ ++#define MKDUMPSTR(buf, buf_size, cur_len, ...) \ ++do { \ ++ if (cur_len >= buf_size) \ ++ break; \ ++ cur_len += snprintf(buf + cur_len, buf_size - cur_len, __VA_ARGS__); \ ++} while (0) ++ + static inline void +-print_ether_addr(const char *what, const struct rte_ether_addr *eth_addr) ++print_ether_addr(const char *what, const struct rte_ether_addr *eth_addr, ++ char print_buf[], size_t buf_size, size_t *cur_len) + { + char buf[RTE_ETHER_ADDR_FMT_SIZE]; ++ + rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr); +- printf("%s%s", what, buf); ++ MKDUMPSTR(print_buf, buf_size, *cur_len, "%s%s", what, buf); + } + + static inline void +@@ -39,13 +50,15 @@ dump_pkt_burst(uint16_t port_id, uint16_t queue, struct rte_mbuf *pkts[], + uint16_t udp_port; + uint32_t vx_vni; + const char *reason; ++ char print_buf[MAX_STRING_LEN]; ++ size_t buf_size = MAX_STRING_LEN; ++ size_t cur_len = 0; + + if (!nb_pkts) + return; +- printf("port %u/queue %u: %s %u packets\n", +- port_id, queue, +- is_rx ? "received" : "sent", +- (unsigned int) nb_pkts); ++ MKDUMPSTR(print_buf, buf_size, cur_len, ++ "port %u/queue %u: %s %u packets\n", port_id, queue, ++ is_rx ? "received" : "sent", (unsigned int) nb_pkts); + for (i = 0; i < nb_pkts; i++) { + mb = pkts[i]; + eth_hdr = rte_pktmbuf_read(mb, 0, sizeof(_eth_hdr), &_eth_hdr); +@@ -54,62 +67,84 @@ dump_pkt_burst(uint16_t port_id, uint16_t queue, struct rte_mbuf *pkts[], + packet_type = mb->packet_type; + is_encapsulation = RTE_ETH_IS_TUNNEL_PKT(packet_type); + +- print_ether_addr(" src=", ð_hdr->s_addr); +- print_ether_addr(" - dst=", ð_hdr->d_addr); +- printf(" - type=0x%04x - length=%u - nb_segs=%d", +- eth_type, (unsigned int) mb->pkt_len, +- (int)mb->nb_segs); ++ print_ether_addr(" src=", ð_hdr->s_addr, ++ print_buf, buf_size, &cur_len); ++ print_ether_addr(" - dst=", ð_hdr->d_addr, ++ print_buf, buf_size, &cur_len); ++ MKDUMPSTR(print_buf, buf_size, cur_len, ++ " - type=0x%04x - length=%u - nb_segs=%d", ++ eth_type, (unsigned int) mb->pkt_len, ++ (int)mb->nb_segs); + if (ol_flags & PKT_RX_RSS_HASH) { +- printf(" - RSS hash=0x%x", (unsigned int) mb->hash.rss); +- printf(" - RSS queue=0x%x", (unsigned int) queue); ++ MKDUMPSTR(print_buf, buf_size, cur_len, ++ " - RSS hash=0x%x", ++ (unsigned int) mb->hash.rss); ++ MKDUMPSTR(print_buf, buf_size, cur_len, ++ " - RSS queue=0x%x", (unsigned int) queue); + } + if (ol_flags & PKT_RX_FDIR) { +- printf(" - FDIR matched "); ++ MKDUMPSTR(print_buf, buf_size, cur_len, ++ " - FDIR matched "); + if (ol_flags & PKT_RX_FDIR_ID) +- printf("ID=0x%x", +- mb->hash.fdir.hi); ++ MKDUMPSTR(print_buf, buf_size, cur_len, ++ "ID=0x%x", mb->hash.fdir.hi); + else if (ol_flags & PKT_RX_FDIR_FLX) +- printf("flex bytes=0x%08x %08x", +- mb->hash.fdir.hi, mb->hash.fdir.lo); ++ MKDUMPSTR(print_buf, buf_size, cur_len, ++ "flex bytes=0x%08x %08x", ++ mb->hash.fdir.hi, mb->hash.fdir.lo); + else +- printf("hash=0x%x ID=0x%x ", +- mb->hash.fdir.hash, mb->hash.fdir.id); ++ MKDUMPSTR(print_buf, buf_size, cur_len, ++ "hash=0x%x ID=0x%x ", ++ mb->hash.fdir.hash, mb->hash.fdir.id); + } + if (ol_flags & PKT_RX_TIMESTAMP) +- printf(" - timestamp %"PRIu64" ", mb->timestamp); ++ MKDUMPSTR(print_buf, buf_size, cur_len, ++ " - timestamp %"PRIu64" ", mb->timestamp); + if (ol_flags & PKT_RX_QINQ) +- printf(" - QinQ VLAN tci=0x%x, VLAN tci outer=0x%x", +- mb->vlan_tci, mb->vlan_tci_outer); ++ MKDUMPSTR(print_buf, buf_size, cur_len, ++ " - QinQ VLAN tci=0x%x, VLAN tci outer=0x%x", ++ mb->vlan_tci, mb->vlan_tci_outer); + else if (ol_flags & PKT_RX_VLAN) +- printf(" - VLAN tci=0x%x", mb->vlan_tci); ++ MKDUMPSTR(print_buf, buf_size, cur_len, ++ " - VLAN tci=0x%x", mb->vlan_tci); + if (!is_rx && (ol_flags & PKT_TX_DYNF_METADATA)) +- printf(" - Tx metadata: 0x%x", +- *RTE_FLOW_DYNF_METADATA(mb)); ++ MKDUMPSTR(print_buf, buf_size, cur_len, ++ " - Tx metadata: 0x%x", ++ *RTE_FLOW_DYNF_METADATA(mb)); + if (is_rx && (ol_flags & PKT_RX_DYNF_METADATA)) +- printf(" - Rx metadata: 0x%x", +- *RTE_FLOW_DYNF_METADATA(mb)); ++ MKDUMPSTR(print_buf, buf_size, cur_len, ++ " - Rx metadata: 0x%x", ++ *RTE_FLOW_DYNF_METADATA(mb)); + if (mb->packet_type) { + rte_get_ptype_name(mb->packet_type, buf, sizeof(buf)); +- printf(" - hw ptype: %s", buf); ++ MKDUMPSTR(print_buf, buf_size, cur_len, ++ " - hw ptype: %s", buf); + } + sw_packet_type = rte_net_get_ptype(mb, &hdr_lens, + RTE_PTYPE_ALL_MASK); + rte_get_ptype_name(sw_packet_type, buf, sizeof(buf)); +- printf(" - sw ptype: %s", buf); ++ MKDUMPSTR(print_buf, buf_size, cur_len, " - sw ptype: %s", buf); + if (sw_packet_type & RTE_PTYPE_L2_MASK) +- printf(" - l2_len=%d", hdr_lens.l2_len); ++ MKDUMPSTR(print_buf, buf_size, cur_len, " - l2_len=%d", ++ hdr_lens.l2_len); + if (sw_packet_type & RTE_PTYPE_L3_MASK) +- printf(" - l3_len=%d", hdr_lens.l3_len); ++ MKDUMPSTR(print_buf, buf_size, cur_len, " - l3_len=%d", ++ hdr_lens.l3_len); + if (sw_packet_type & RTE_PTYPE_L4_MASK) +- printf(" - l4_len=%d", hdr_lens.l4_len); ++ MKDUMPSTR(print_buf, buf_size, cur_len, " - l4_len=%d", ++ hdr_lens.l4_len); + if (sw_packet_type & RTE_PTYPE_TUNNEL_MASK) +- printf(" - tunnel_len=%d", hdr_lens.tunnel_len); ++ MKDUMPSTR(print_buf, buf_size, cur_len, ++ " - tunnel_len=%d", hdr_lens.tunnel_len); + if (sw_packet_type & RTE_PTYPE_INNER_L2_MASK) +- printf(" - inner_l2_len=%d", hdr_lens.inner_l2_len); ++ MKDUMPSTR(print_buf, buf_size, cur_len, ++ " - inner_l2_len=%d", hdr_lens.inner_l2_len); + if (sw_packet_type & RTE_PTYPE_INNER_L3_MASK) +- printf(" - inner_l3_len=%d", hdr_lens.inner_l3_len); ++ MKDUMPSTR(print_buf, buf_size, cur_len, ++ " - inner_l3_len=%d", hdr_lens.inner_l3_len); + if (sw_packet_type & RTE_PTYPE_INNER_L4_MASK) +- printf(" - inner_l4_len=%d", hdr_lens.inner_l4_len); ++ MKDUMPSTR(print_buf, buf_size, cur_len, ++ " - inner_l4_len=%d", hdr_lens.inner_l4_len); + if (is_encapsulation) { + struct rte_ipv4_hdr *ipv4_hdr; + struct rte_ipv6_hdr *ipv6_hdr; +@@ -146,18 +181,27 @@ dump_pkt_burst(uint16_t port_id, uint16_t queue, struct rte_mbuf *pkts[], + l2_len + l3_len + l4_len); + udp_port = RTE_BE_TO_CPU_16(udp_hdr->dst_port); + vx_vni = rte_be_to_cpu_32(vxlan_hdr->vx_vni); +- printf(" - VXLAN packet: packet type =%d, " +- "Destination UDP port =%d, VNI = %d", +- packet_type, udp_port, vx_vni >> 8); ++ MKDUMPSTR(print_buf, buf_size, cur_len, ++ " - VXLAN packet: packet type =%d, " ++ "Destination UDP port =%d, VNI = %d", ++ packet_type, udp_port, vx_vni >> 8); + } + } +- printf(" - %s queue=0x%x", is_rx ? "Receive" : "Send", +- (unsigned int) queue); +- printf("\n"); ++ MKDUMPSTR(print_buf, buf_size, cur_len, ++ " - %s queue=0x%x", is_rx ? "Receive" : "Send", ++ (unsigned int) queue); ++ MKDUMPSTR(print_buf, buf_size, cur_len, "\n"); + rte_get_rx_ol_flag_list(mb->ol_flags, buf, sizeof(buf)); +- printf(" ol_flags: %s\n", buf); ++ MKDUMPSTR(print_buf, buf_size, cur_len, ++ " ol_flags: %s\n", buf); + if (rte_mbuf_check(mb, 1, &reason) < 0) +- printf("INVALID mbuf: %s\n", reason); ++ MKDUMPSTR(print_buf, buf_size, cur_len, ++ "INVALID mbuf: %s\n", reason); ++ if (cur_len >= buf_size) ++ printf("%s ...\n", print_buf); ++ else ++ printf("%s", print_buf); ++ cur_len = 0; + } + } + +diff --git a/dpdk/app/test-sad/main.c b/dpdk/app/test-sad/main.c +index b01e84c570..8380fad744 100644 +--- a/dpdk/app/test-sad/main.c ++++ b/dpdk/app/test-sad/main.c +@@ -617,7 +617,7 @@ main(int argc, char **argv) + { + int ret; + struct rte_ipsec_sad *sad; +- struct rte_ipsec_sad_conf conf; ++ struct rte_ipsec_sad_conf conf = {0}; + unsigned int lcore_id; + + ret = rte_eal_init(argc, argv); diff --git a/dpdk/app/test/Makefile b/dpdk/app/test/Makefile -index 57930c00b1..1ee1550094 100644 +index 57930c00b1..30eff33206 100644 --- a/dpdk/app/test/Makefile +++ b/dpdk/app/test/Makefile +@@ -122,7 +122,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_HASH) += test_hash_perf.c + SRCS-$(CONFIG_RTE_LIBRTE_HASH) += test_hash_functions.c + SRCS-$(CONFIG_RTE_LIBRTE_HASH) += test_hash_multiwriter.c + SRCS-$(CONFIG_RTE_LIBRTE_HASH) += test_hash_readwrite.c +-SRCS-$(CONFIG_RTE_LIBRTE_HASH) += test_hash_readwrite_lf.c ++SRCS-$(CONFIG_RTE_LIBRTE_HASH) += test_hash_readwrite_lf_perf.c + + SRCS-$(CONFIG_RTE_LIBRTE_RIB) += test_rib.c + SRCS-$(CONFIG_RTE_LIBRTE_RIB) += test_rib6.c @@ -151,8 +151,12 @@ SRCS-y += test_func_reentrancy.c SRCS-y += test_service_cores.c @@ -2609,6 +4537,21 @@ index 57930c00b1..1ee1550094 100644 ifeq ($(CONFIG_RTE_BUILD_SHARED_LIB),y) ifeq ($(CONFIG_RTE_LIBRTE_PMD_BOND),y) +diff --git a/dpdk/app/test/autotest_data.py b/dpdk/app/test/autotest_data.py +index 6deb97bcc1..ca29b09f31 100644 +--- a/dpdk/app/test/autotest_data.py ++++ b/dpdk/app/test/autotest_data.py +@@ -670,8 +670,8 @@ + "Report": None, + }, + { +- "Name": "Hash read-write lock-free concurrency autotest", +- "Command": "hash_readwrite_lf_autotest", ++ "Name": "Hash read-write lock-free concurrency perf autotest", ++ "Command": "hash_readwrite_lf_perf_autotest", + "Func": default_autotest, + "Report": None, + }, diff --git a/dpdk/app/test/get-coremask.sh b/dpdk/app/test/get-coremask.sh new file mode 100755 index 0000000000..bb8cf404d2 @@ -2629,7 +4572,7 @@ index 0000000000..bb8cf404d2 + echo 0-3 +fi diff --git a/dpdk/app/test/meson.build b/dpdk/app/test/meson.build -index fb49d804ba..8524a986a1 100644 +index fb49d804ba..24fb59f74f 100644 --- a/dpdk/app/test/meson.build +++ b/dpdk/app/test/meson.build @@ -7,13 +7,11 @@ endif @@ -2654,7 +4597,14 @@ index fb49d804ba..8524a986a1 100644 'test_event_timer_adapter.c', 'test_eventdev.c', 'test_external_mem.c', -@@ -65,9 +62,7 @@ test_sources = files('commands.c', +@@ -59,15 +56,13 @@ test_sources = files('commands.c', + 'test_hash_multiwriter.c', + 'test_hash_readwrite.c', + 'test_hash_perf.c', +- 'test_hash_readwrite_lf.c', ++ 'test_hash_readwrite_lf_perf.c', + 'test_interrupts.c', + 'test_ipsec.c', 'test_ipsec_sad.c', 'test_kni.c', 'test_kvargs.c', @@ -2676,34 +4626,183 @@ index fb49d804ba..8524a986a1 100644 'test_power.c', 'test_power_cpufreq.c', 'test_power_kvm_vm.c', -@@ -212,7 +204,6 @@ fast_test_names = [ - 'rib_autotest', - 'rib6_autotest', - 'ring_autotest', +@@ -162,92 +154,89 @@ test_deps = ['acl', + 'timer' + ] + +-fast_test_names = [ +- 'acl_autotest', +- 'alarm_autotest', +- 'atomic_autotest', +- 'byteorder_autotest', +- 'cmdline_autotest', +- 'common_autotest', +- 'cpuflags_autotest', +- 'cycles_autotest', +- 'debug_autotest', +- 'eal_flags_c_opt_autotest', +- 'eal_flags_master_opt_autotest', +- 'eal_flags_n_opt_autotest', +- 'eal_flags_hpet_autotest', +- 'eal_flags_no_huge_autotest', +- 'eal_flags_w_opt_autotest', +- 'eal_flags_b_opt_autotest', +- 'eal_flags_vdev_opt_autotest', +- 'eal_flags_r_opt_autotest', +- 'eal_flags_mem_autotest', +- 'eal_flags_file_prefix_autotest', +- 'eal_flags_misc_autotest', +- 'eal_fs_autotest', +- 'errno_autotest', +- 'event_ring_autotest', +- 'fib_autotest', +- 'fib6_autotest', +- 'func_reentrancy_autotest', +- 'flow_classify_autotest', +- 'hash_autotest', +- 'interrupt_autotest', +- 'logs_autotest', +- 'lpm_autotest', +- 'lpm6_autotest', +- 'malloc_autotest', +- 'mbuf_autotest', +- 'mcslock_autotest', +- 'memcpy_autotest', +- 'memory_autotest', +- 'mempool_autotest', +- 'memzone_autotest', +- 'meter_autotest', +- 'multiprocess_autotest', +- 'per_lcore_autotest', +- 'prefetch_autotest', +- 'rcu_qsbr_autotest', +- 'red_autotest', +- 'rib_autotest', +- 'rib6_autotest', +- 'ring_autotest', - 'ring_pmd_autotest', - 'rwlock_test1_autotest', - 'rwlock_rda_autotest', - 'rwlock_rds_wrm_autotest', -@@ -227,7 +218,6 @@ fast_test_names = [ - 'timer_autotest', - 'user_delay_us', - 'version_autotest', +- 'rwlock_test1_autotest', +- 'rwlock_rda_autotest', +- 'rwlock_rds_wrm_autotest', +- 'rwlock_rde_wro_autotest', +- 'sched_autotest', +- 'spinlock_autotest', +- 'stack_autotest', +- 'stack_lf_autotest', +- 'string_autotest', +- 'table_autotest', +- 'tailq_autotest', +- 'timer_autotest', +- 'user_delay_us', +- 'version_autotest', - 'bitratestats_autotest', - 'crc_autotest', - 'delay_us_sleep_autotest', - 'distributor_autotest', -@@ -238,10 +228,8 @@ fast_test_names = [ - 'ipsec_autotest', - 'kni_autotest', - 'kvargs_autotest', +- 'crc_autotest', +- 'delay_us_sleep_autotest', +- 'distributor_autotest', +- 'eventdev_common_autotest', +- 'fbarray_autotest', +- 'hash_readwrite_autotest', +- 'hash_readwrite_lf_autotest', +- 'ipsec_autotest', +- 'kni_autotest', +- 'kvargs_autotest', - 'latencystats_autotest', - 'member_autotest', - 'metrics_autotest', +- 'member_autotest', +- 'metrics_autotest', - 'pdump_autotest', - 'power_cpufreq_autotest', - 'power_autotest', - 'power_kvm_vm_autotest', -@@ -277,7 +265,6 @@ perf_test_names = [ +- 'power_cpufreq_autotest', +- 'power_autotest', +- 'power_kvm_vm_autotest', +- 'reorder_autotest', +- 'service_autotest', +- 'thash_autotest', ++# Each test is marked with flag true/false ++# to indicate whether it can run in no-huge mode. ++fast_tests = [ ++ ['acl_autotest', true], ++ ['alarm_autotest', false], ++ ['atomic_autotest', false], ++ ['byteorder_autotest', true], ++ ['cmdline_autotest', true], ++ ['common_autotest', true], ++ ['cpuflags_autotest', true], ++ ['cycles_autotest', true], ++ ['debug_autotest', true], ++ ['eal_flags_c_opt_autotest', false], ++ ['eal_flags_master_opt_autotest', false], ++ ['eal_flags_n_opt_autotest', false], ++ ['eal_flags_hpet_autotest', false], ++ ['eal_flags_no_huge_autotest', false], ++ ['eal_flags_w_opt_autotest', false], ++ ['eal_flags_b_opt_autotest', false], ++ ['eal_flags_vdev_opt_autotest', false], ++ ['eal_flags_r_opt_autotest', false], ++ ['eal_flags_mem_autotest', false], ++ ['eal_flags_file_prefix_autotest', false], ++ ['eal_flags_misc_autotest', false], ++ ['eal_fs_autotest', true], ++ ['errno_autotest', true], ++ ['event_ring_autotest', true], ++ ['fib_autotest', true], ++ ['fib6_autotest', true], ++ ['func_reentrancy_autotest', false], ++ ['flow_classify_autotest', false], ++ ['hash_autotest', true], ++ ['interrupt_autotest', true], ++ ['logs_autotest', true], ++ ['lpm_autotest', true], ++ ['lpm6_autotest', true], ++ ['malloc_autotest', false], ++ ['mbuf_autotest', false], ++ ['mcslock_autotest', false], ++ ['memcpy_autotest', true], ++ ['memory_autotest', false], ++ ['mempool_autotest', false], ++ ['memzone_autotest', false], ++ ['meter_autotest', true], ++ ['multiprocess_autotest', false], ++ ['per_lcore_autotest', true], ++ ['prefetch_autotest', true], ++ ['rcu_qsbr_autotest', true], ++ ['red_autotest', true], ++ ['rib_autotest', true], ++ ['rib6_autotest', true], ++ ['ring_autotest', true], ++ ['rwlock_test1_autotest', true], ++ ['rwlock_rda_autotest', true], ++ ['rwlock_rds_wrm_autotest', true], ++ ['rwlock_rde_wro_autotest', true], ++ ['sched_autotest', true], ++ ['spinlock_autotest', true], ++ ['stack_autotest', false], ++ ['stack_lf_autotest', false], ++ ['string_autotest', true], ++ ['table_autotest', true], ++ ['tailq_autotest', true], ++ ['timer_autotest', false], ++ ['user_delay_us', true], ++ ['version_autotest', true], ++ ['crc_autotest', true], ++ ['delay_us_sleep_autotest', true], ++ ['distributor_autotest', false], ++ ['eventdev_common_autotest', true], ++ ['fbarray_autotest', true], ++ ['hash_readwrite_autotest', false], ++ ['ipsec_autotest', true], ++ ['kni_autotest', false], ++ ['kvargs_autotest', true], ++ ['member_autotest', true], ++ ['metrics_autotest', true], ++ ['power_cpufreq_autotest', false], ++ ['power_autotest', true], ++ ['power_kvm_vm_autotest', false], ++ ['reorder_autotest', true], ++ ['service_autotest', true], ++ ['thash_autotest', true], + ] + + perf_test_names = [ +@@ -277,11 +266,11 @@ perf_test_names = [ 'rcu_qsbr_perf_autotest', 'red_perf', 'distributor_perf_autotest', @@ -2711,7 +4810,12 @@ index fb49d804ba..8524a986a1 100644 'pmd_perf_autotest', 'stack_perf_autotest', 'stack_lf_perf_autotest', -@@ -302,7 +289,6 @@ driver_test_names = [ + 'rand_perf_autotest', ++ 'hash_readwrite_lf_perf_autotest', + ] + + driver_test_names = [ +@@ -302,7 +291,6 @@ driver_test_names = [ 'eventdev_selftest_octeontx', 'eventdev_selftest_sw', 'link_bonding_autotest', @@ -2719,7 +4823,14 @@ index fb49d804ba..8524a986a1 100644 'link_bonding_rssconf_autotest', 'rawdev_autotest', ] -@@ -339,6 +325,21 @@ if dpdk_conf.has('RTE_LIBRTE_BOND_PMD') +@@ -336,9 +324,26 @@ endif + # they are used via a driver-specific API. + if dpdk_conf.has('RTE_LIBRTE_BOND_PMD') + test_deps += 'pmd_bond' ++ if dpdk_conf.has('RTE_LIBRTE_RING_PMD') ++ test_sources += 'test_link_bonding_mode4.c' ++ driver_test_names += 'link_bonding_mode4_autotest' ++ endif endif if dpdk_conf.has('RTE_LIBRTE_RING_PMD') test_deps += 'pmd_ring' @@ -2728,24 +4839,79 @@ index fb49d804ba..8524a986a1 100644 + test_sources += 'test_event_eth_tx_adapter.c' + test_sources += 'test_bitratestats.c' + test_sources += 'test_latencystats.c' -+ test_sources += 'test_link_bonding_mode4.c' + test_sources += 'sample_packet_forward.c' + test_sources += 'test_pdump.c' -+ fast_test_names += 'ring_pmd_autotest' ++ fast_tests += [['ring_pmd_autotest', true]] + perf_test_names += 'ring_pmd_perf_autotest' -+ fast_test_names += 'event_eth_tx_adapter_autotest' -+ fast_test_names += 'bitratestats_autotest' -+ fast_test_names += 'latencystats_autotest' -+ driver_test_names += 'link_bonding_mode4_autotest' -+ fast_test_names += 'pdump_autotest' ++ fast_tests += [['event_eth_tx_adapter_autotest', false]] ++ fast_tests += [['bitratestats_autotest', true]] ++ fast_tests += [['latencystats_autotest', true]] ++ fast_tests += [['pdump_autotest', true]] endif if dpdk_conf.has('RTE_LIBRTE_POWER') -@@ -398,45 +399,36 @@ dpdk_test = executable('dpdk-test', - timeout_seconds = 600 - timeout_seconds_fast = 10 +@@ -359,19 +364,23 @@ endif + # specify -D_GNU_SOURCE unconditionally + cflags += '-D_GNU_SOURCE' + ++# Enable using internal APIs in unit tests ++cflags += ['-DALLOW_INTERNAL_API'] ++ + test_dep_objs = [] + if dpdk_conf.has('RTE_LIBRTE_COMPRESSDEV') +- compress_test_dep = dependency('zlib', required: false) ++ compress_test_dep = dependency('zlib', required: false, method: 'pkg-config') + if compress_test_dep.found() + test_dep_objs += compress_test_dep + test_sources += 'test_compressdev.c' + test_deps += 'compressdev' +- fast_test_names += 'compressdev_autotest' ++ fast_tests += [['compressdev_autotest', false]] + endif + endif --# Retrieve the number of CPU cores, defaulting to 4. +-if dpdk_conf.has('RTE_LIBRTE_PMD_CRYPTO_SCHEDULER') ++if dpdk_conf.has('RTE_LIBRTE_CRYPTO_SCHEDULER_PMD') + driver_test_names += 'cryptodev_scheduler_autotest' ++ test_deps += 'pmd_crypto_scheduler' + endif + + foreach d:test_deps +@@ -382,7 +391,7 @@ test_dep_objs += cc.find_library('execinfo', required: false) + + link_libs = [] + if get_option('default_library') == 'static' +- link_libs = dpdk_drivers ++ link_libs = dpdk_static_libraries + dpdk_drivers + endif + + dpdk_test = executable('dpdk-test', +@@ -390,53 +399,66 @@ dpdk_test = executable('dpdk-test', + link_whole: link_libs, + dependencies: test_dep_objs, + c_args: [cflags, '-DALLOW_EXPERIMENTAL_API'], +- install_rpath: driver_install_path, ++ install_rpath: join_paths(get_option('prefix'), ++ driver_install_path), + install: true) + ++has_hugepage = true ++if is_linux ++ check_hugepage = run_command('cat', ++ '/proc/sys/vm/nr_hugepages') ++ if (check_hugepage.returncode() != 0 or ++ check_hugepage.stdout().strip() == '0') ++ has_hugepage = false ++ endif ++endif ++message('hugepage availability: @0@'.format(has_hugepage)) ++ + # some perf tests (eg: memcpy perf autotest)take very long + # to complete, so timeout to 10 minutes + timeout_seconds = 600 + timeout_seconds_fast = 10 + +-# Retrieve the number of CPU cores, defaulting to 4. -num_cores = '0-3' -if host_machine.system() == 'linux' - num_cores = run_command('cat', @@ -2764,7 +4930,7 @@ index fb49d804ba..8524a986a1 100644 +default_test_args = [num_cores_arg] -test_args = [num_cores_arg] - foreach arg : fast_test_names +-foreach arg : fast_test_names - if host_machine.system() == 'linux' - test(arg, dpdk_test, - env : ['DPDK_TEST=' + arg], @@ -2776,26 +4942,38 @@ index fb49d804ba..8524a986a1 100644 - else - test(arg, dpdk_test, - env : ['DPDK_TEST=' + arg], -- args : test_args, ++foreach arg : fast_tests + test_args = default_test_args ++ run_test = true ++ if not has_hugepage ++ if arg[1] ++ test_args += ['--no-huge', '-m', '2048'] ++ else ++ run_test = false ++ endif ++ endif + + if (get_option('default_library') == 'shared' and -+ arg == 'event_eth_tx_adapter_autotest') ++ arg[0] == 'event_eth_tx_adapter_autotest') + foreach drv:dpdk_drivers + test_args += ['-d', drv.full_path().split('.a')[0] + '.so'] + endforeach + endif + if is_linux -+ test_args += ['--file-prefix=@0@'.format(arg)] ++ test_args += ['--file-prefix=@0@'.format(arg[0])] + endif + -+ test(arg, dpdk_test, -+ env : ['DPDK_TEST=' + arg], -+ args : test_args, - timeout : timeout_seconds_fast, - is_parallel : false, - suite : 'fast-tests') -- endif ++ if run_test ++ test(arg[0], dpdk_test, ++ env : ['DPDK_TEST=' + arg[0]], + args : test_args, +- timeout : timeout_seconds_fast, +- is_parallel : false, +- suite : 'fast-tests') ++ timeout : timeout_seconds_fast, ++ is_parallel : false, ++ suite : 'fast-tests') + endif endforeach foreach arg : perf_test_names @@ -2806,7 +4984,7 @@ index fb49d804ba..8524a986a1 100644 timeout : timeout_seconds, is_parallel : false, suite : 'perf-tests') -@@ -445,7 +437,7 @@ endforeach +@@ -445,7 +467,7 @@ endforeach foreach arg : driver_test_names test(arg, dpdk_test, env : ['DPDK_TEST=' + arg], @@ -2815,7 +4993,7 @@ index fb49d804ba..8524a986a1 100644 timeout : timeout_seconds, is_parallel : false, suite : 'driver-tests') -@@ -454,7 +446,7 @@ endforeach +@@ -454,7 +476,7 @@ endforeach foreach arg : dump_test_names test(arg, dpdk_test, env : ['DPDK_TEST=' + arg], @@ -2874,7 +5052,7 @@ index 191d2796a9..c3b3780337 100644 return status; } diff --git a/dpdk/app/test/test.c b/dpdk/app/test/test.c -index cd7aaf645f..d0826ca69e 100644 +index cd7aaf645f..4736a17ff3 100644 --- a/dpdk/app/test/test.c +++ b/dpdk/app/test/test.c @@ -53,7 +53,9 @@ do_recursive_call(void) @@ -2887,6 +5065,56 @@ index cd7aaf645f..d0826ca69e 100644 #endif { "test_missing_c_flag", no_action }, { "test_master_lcore_flag", no_action }, +@@ -162,29 +164,38 @@ main(int argc, char **argv) + + + #ifdef RTE_LIBRTE_CMDLINE +- cl = cmdline_stdin_new(main_ctx, "RTE>>"); +- if (cl == NULL) { +- ret = -1; +- goto out; +- } +- + char *dpdk_test = getenv("DPDK_TEST"); + if (dpdk_test && strlen(dpdk_test)) { + char buf[1024]; ++ ++ cl = cmdline_new(main_ctx, "RTE>>", 0, 1); ++ if (cl == NULL) { ++ ret = -1; ++ goto out; ++ } ++ + snprintf(buf, sizeof(buf), "%s\n", dpdk_test); + if (cmdline_in(cl, buf, strlen(buf)) < 0) { + printf("error on cmdline input\n"); ++ ++ ret = -1; ++ } else { ++ ret = last_test_result; ++ } ++ cmdline_free(cl); ++ goto out; ++ } else { ++ /* if no DPDK_TEST env variable, go interactive */ ++ cl = cmdline_stdin_new(main_ctx, "RTE>>"); ++ if (cl == NULL) { + ret = -1; + goto out; + } + ++ cmdline_interact(cl); + cmdline_stdin_exit(cl); +- ret = last_test_result; +- goto out; ++ cmdline_free(cl); + } +- /* if no DPDK_TEST env variable, go interactive */ +- cmdline_interact(cl); +- cmdline_stdin_exit(cl); + #endif + ret = 0; + diff --git a/dpdk/app/test/test.h b/dpdk/app/test/test.h index ac0c50616c..b07f6c1ef0 100644 --- a/dpdk/app/test/test.h @@ -2933,6 +5161,94 @@ index 9cd9e37dbe..b78b67193a 100644 } /* NULL name */ +diff --git a/dpdk/app/test/test_bpf.c b/dpdk/app/test/test_bpf.c +index ee534687a6..4a61a7d7cb 100644 +--- a/dpdk/app/test/test_bpf.c ++++ b/dpdk/app/test/test_bpf.c +@@ -1797,13 +1797,6 @@ test_call1_check(uint64_t rc, const void *arg) + dummy_func1(arg, &v32, &v64); + v64 += v32; + +- if (v64 != rc) { +- printf("%s@%d: invalid return value " +- "expected=0x%" PRIx64 ", actual=0x%" PRIx64 "\n", +- __func__, __LINE__, v64, rc); +- return -1; +- } +- return 0; + return cmp_res(__func__, v64, rc, dv, dv, sizeof(*dv)); + } + +@@ -1934,13 +1927,7 @@ test_call2_check(uint64_t rc, const void *arg) + dummy_func2(&a, &b); + v = a.u64 + a.u32 + b.u16 + b.u8; + +- if (v != rc) { +- printf("%s@%d: invalid return value " +- "expected=0x%" PRIx64 ", actual=0x%" PRIx64 "\n", +- __func__, __LINE__, v, rc); +- return -1; +- } +- return 0; ++ return cmp_res(__func__, v, rc, arg, arg, 0); + } + + static const struct rte_bpf_xsym test_call2_xsym[] = { +@@ -2429,7 +2416,6 @@ test_call5_check(uint64_t rc, const void *arg) + v = 0; + + fail: +- + return cmp_res(__func__, v, rc, &v, &rc, sizeof(v)); + } + +@@ -2458,6 +2444,7 @@ static const struct rte_bpf_xsym test_call5_xsym[] = { + }, + }; + ++/* all bpf test cases */ + static const struct bpf_test tests[] = { + { + .name = "test_store1", +@@ -2738,7 +2725,6 @@ run_test(const struct bpf_test *tst) + } + + tst->prepare(tbuf); +- + rc = rte_bpf_exec(bpf, tbuf); + ret = tst->check_result(rc, tbuf); + if (ret != 0) { +@@ -2746,17 +2732,20 @@ run_test(const struct bpf_test *tst) + __func__, __LINE__, tst->name, ret, strerror(ret)); + } + ++ /* repeat the same test with jit, when possible */ + rte_bpf_get_jit(bpf, &jit); +- if (jit.func == NULL) +- return 0; +- +- tst->prepare(tbuf); +- rc = jit.func(tbuf); +- rv = tst->check_result(rc, tbuf); +- ret |= rv; +- if (rv != 0) { +- printf("%s@%d: check_result(%s) failed, error: %d(%s);\n", +- __func__, __LINE__, tst->name, rv, strerror(ret)); ++ if (jit.func != NULL) { ++ ++ tst->prepare(tbuf); ++ rc = jit.func(tbuf); ++ rv = tst->check_result(rc, tbuf); ++ ret |= rv; ++ if (rv != 0) { ++ printf("%s@%d: check_result(%s) failed, " ++ "error: %d(%s);\n", ++ __func__, __LINE__, tst->name, ++ rv, strerror(ret)); ++ } + } + + rte_bpf_destroy(bpf); diff --git a/dpdk/app/test/test_common.c b/dpdk/app/test/test_common.c index 2b856f8ba5..12bd1cad90 100644 --- a/dpdk/app/test/test_common.c @@ -3172,7 +5488,7 @@ index c0492f89a2..d241602445 100644 #endif /* TEST_COMPRESSDEV_TEST_BUFFERS_H_ */ diff --git a/dpdk/app/test/test_cryptodev.c b/dpdk/app/test/test_cryptodev.c -index 1b561456d7..db9dd3aecb 100644 +index 1b561456d7..a852040ec2 100644 --- a/dpdk/app/test/test_cryptodev.c +++ b/dpdk/app/test/test_cryptodev.c @@ -143,7 +143,7 @@ static struct rte_crypto_op * @@ -3196,13 +5512,38 @@ index 1b561456d7..db9dd3aecb 100644 return op; } -@@ -2823,9 +2828,18 @@ create_wireless_algo_auth_cipher_session(uint8_t dev_id, +@@ -638,7 +643,7 @@ test_device_configure_invalid_dev_id(void) + "Need at least %d devices for test", 1); + + /* valid dev_id values */ +- dev_id = ts_params->valid_devs[ts_params->valid_dev_count - 1]; ++ dev_id = ts_params->valid_devs[0]; + + /* Stop the device in case it's started so it can be configured */ + rte_cryptodev_stop(dev_id); +@@ -2696,13 +2701,15 @@ create_wireless_algo_cipher_auth_session(uint8_t dev_id, + /* Create Crypto session*/ ut_params->sess = rte_cryptodev_sym_session_create( ts_params->session_mpool); ++ TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed"); -- status = rte_cryptodev_sym_session_init(dev_id, ut_params->sess, -- &ut_params->auth_xform, -- ts_params->session_priv_mpool); + status = rte_cryptodev_sym_session_init(dev_id, ut_params->sess, + &ut_params->cipher_xform, + ts_params->session_priv_mpool); ++ if (status == -ENOTSUP) ++ return status; + + TEST_ASSERT_EQUAL(status, 0, "session init failed"); +- TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed"); + return 0; + } + +@@ -2822,12 +2829,24 @@ create_wireless_algo_auth_cipher_session(uint8_t dev_id, + /* Create Crypto session*/ + ut_params->sess = rte_cryptodev_sym_session_create( + ts_params->session_mpool); ++ TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed"); ++ + if (cipher_op == RTE_CRYPTO_CIPHER_OP_DECRYPT) { + ut_params->auth_xform.next = NULL; + ut_params->cipher_xform.next = &ut_params->auth_xform; @@ -3215,10 +5556,83 @@ index 1b561456d7..db9dd3aecb 100644 + &ut_params->auth_xform, + ts_params->session_priv_mpool); + ++ if (status == -ENOTSUP) ++ return status; + +- status = rte_cryptodev_sym_session_init(dev_id, ut_params->sess, +- &ut_params->auth_xform, +- ts_params->session_priv_mpool); TEST_ASSERT_EQUAL(status, 0, "session init failed"); - TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed"); +- TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed"); + + return 0; + } +@@ -2971,6 +2990,11 @@ create_wireless_algo_cipher_hash_operation(const uint8_t *auth_tag, + struct crypto_testsuite_params *ts_params = &testsuite_params; + struct crypto_unittest_params *ut_params = &unittest_params; + ++ enum rte_crypto_cipher_algorithm cipher_algo = ++ ut_params->cipher_xform.cipher.algo; ++ enum rte_crypto_auth_algorithm auth_algo = ++ ut_params->auth_xform.auth.algo; ++ + /* Generate Crypto op data structure */ + ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool, + RTE_CRYPTO_OP_TYPE_SYMMETRIC); +@@ -2991,8 +3015,22 @@ create_wireless_algo_cipher_hash_operation(const uint8_t *auth_tag, + TEST_ASSERT_NOT_NULL(sym_op->auth.digest.data, + "no room to append auth tag"); + ut_params->digest = sym_op->auth.digest.data; +- sym_op->auth.digest.phys_addr = rte_pktmbuf_iova_offset( +- ut_params->ibuf, data_pad_len); ++ ++ if (rte_pktmbuf_is_contiguous(ut_params->ibuf)) { ++ sym_op->auth.digest.phys_addr = rte_pktmbuf_iova_offset( ++ ut_params->ibuf, data_pad_len); ++ } else { ++ struct rte_mbuf *m = ut_params->ibuf; ++ unsigned int offset = data_pad_len; ++ ++ while (offset > m->data_len && m->next != NULL) { ++ offset -= m->data_len; ++ m = m->next; ++ } ++ sym_op->auth.digest.phys_addr = rte_pktmbuf_iova_offset( ++ m, offset); ++ } ++ + if (op == RTE_CRYPTO_AUTH_OP_GENERATE) + memset(sym_op->auth.digest.data, 0, auth_tag_len); + else +@@ -3009,22 +3047,38 @@ create_wireless_algo_cipher_hash_operation(const uint8_t *auth_tag, + iv_ptr += cipher_iv_len; + rte_memcpy(iv_ptr, auth_iv, auth_iv_len); + +- sym_op->cipher.data.length = cipher_len; +- sym_op->cipher.data.offset = cipher_offset; +- sym_op->auth.data.length = auth_len; +- sym_op->auth.data.offset = auth_offset; ++ if (cipher_algo == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 || ++ cipher_algo == RTE_CRYPTO_CIPHER_KASUMI_F8 || ++ cipher_algo == RTE_CRYPTO_CIPHER_ZUC_EEA3) { ++ sym_op->cipher.data.length = cipher_len; ++ sym_op->cipher.data.offset = cipher_offset; ++ } else { ++ sym_op->cipher.data.length = cipher_len >> 3; ++ sym_op->cipher.data.offset = cipher_offset >> 3; ++ } ++ ++ if (auth_algo == RTE_CRYPTO_AUTH_SNOW3G_UIA2 || ++ auth_algo == RTE_CRYPTO_AUTH_KASUMI_F9 || ++ auth_algo == RTE_CRYPTO_AUTH_ZUC_EIA3) { ++ sym_op->auth.data.length = auth_len; ++ sym_op->auth.data.offset = auth_offset; ++ } else { ++ sym_op->auth.data.length = auth_len >> 3; ++ sym_op->auth.data.offset = auth_offset >> 3; ++ } -@@ -3018,13 +3032,14 @@ create_wireless_algo_cipher_hash_operation(const uint8_t *auth_tag, + return 0; } static int @@ -3235,7 +5649,7 @@ index 1b561456d7..db9dd3aecb 100644 { struct crypto_testsuite_params *ts_params = &testsuite_params; struct crypto_unittest_params *ut_params = &unittest_params; -@@ -3081,6 +3096,10 @@ create_wireless_algo_auth_cipher_operation(unsigned int auth_tag_len, +@@ -3081,6 +3135,10 @@ create_wireless_algo_auth_cipher_operation(unsigned int auth_tag_len, } } @@ -3246,7 +5660,7 @@ index 1b561456d7..db9dd3aecb 100644 /* Copy cipher and auth IVs at the end of the crypto operation */ uint8_t *iv_ptr = rte_crypto_op_ctod_offset( ut_params->op, uint8_t *, IV_OFFSET); -@@ -4643,7 +4662,7 @@ test_snow3g_auth_cipher(const struct snow3g_test_data *tdata, +@@ -4643,7 +4701,7 @@ test_snow3g_auth_cipher(const struct snow3g_test_data *tdata, /* Create SNOW 3G operation */ retval = create_wireless_algo_auth_cipher_operation( @@ -3255,7 +5669,7 @@ index 1b561456d7..db9dd3aecb 100644 tdata->cipher_iv.data, tdata->cipher_iv.len, tdata->auth_iv.data, tdata->auth_iv.len, (tdata->digest.offset_bytes == 0 ? -@@ -4653,7 +4672,7 @@ test_snow3g_auth_cipher(const struct snow3g_test_data *tdata, +@@ -4653,7 +4711,7 @@ test_snow3g_auth_cipher(const struct snow3g_test_data *tdata, tdata->cipher.offset_bits, tdata->validAuthLenInBits.len, tdata->auth.offset_bits, @@ -3264,7 +5678,7 @@ index 1b561456d7..db9dd3aecb 100644 if (retval < 0) return retval; -@@ -4819,7 +4838,7 @@ test_snow3g_auth_cipher_sgl(const struct snow3g_test_data *tdata, +@@ -4819,7 +4877,7 @@ test_snow3g_auth_cipher_sgl(const struct snow3g_test_data *tdata, /* Create SNOW 3G operation */ retval = create_wireless_algo_auth_cipher_operation( @@ -3273,7 +5687,7 @@ index 1b561456d7..db9dd3aecb 100644 tdata->cipher_iv.data, tdata->cipher_iv.len, tdata->auth_iv.data, tdata->auth_iv.len, (tdata->digest.offset_bytes == 0 ? -@@ -4829,7 +4848,7 @@ test_snow3g_auth_cipher_sgl(const struct snow3g_test_data *tdata, +@@ -4829,7 +4887,7 @@ test_snow3g_auth_cipher_sgl(const struct snow3g_test_data *tdata, tdata->cipher.offset_bits, tdata->validAuthLenInBits.len, tdata->auth.offset_bits, @@ -3282,7 +5696,7 @@ index 1b561456d7..db9dd3aecb 100644 if (retval < 0) return retval; -@@ -4988,7 +5007,7 @@ test_kasumi_auth_cipher(const struct kasumi_test_data *tdata, +@@ -4988,7 +5046,7 @@ test_kasumi_auth_cipher(const struct kasumi_test_data *tdata, /* Create KASUMI operation */ retval = create_wireless_algo_auth_cipher_operation( @@ -3291,7 +5705,7 @@ index 1b561456d7..db9dd3aecb 100644 tdata->cipher_iv.data, tdata->cipher_iv.len, NULL, 0, (tdata->digest.offset_bytes == 0 ? -@@ -4998,7 +5017,7 @@ test_kasumi_auth_cipher(const struct kasumi_test_data *tdata, +@@ -4998,7 +5056,7 @@ test_kasumi_auth_cipher(const struct kasumi_test_data *tdata, tdata->validCipherOffsetInBits.len, tdata->validAuthLenInBits.len, 0, @@ -3300,7 +5714,7 @@ index 1b561456d7..db9dd3aecb 100644 if (retval < 0) return retval; -@@ -5165,7 +5184,7 @@ test_kasumi_auth_cipher_sgl(const struct kasumi_test_data *tdata, +@@ -5165,7 +5223,7 @@ test_kasumi_auth_cipher_sgl(const struct kasumi_test_data *tdata, /* Create KASUMI operation */ retval = create_wireless_algo_auth_cipher_operation( @@ -3309,7 +5723,7 @@ index 1b561456d7..db9dd3aecb 100644 tdata->cipher_iv.data, tdata->cipher_iv.len, NULL, 0, (tdata->digest.offset_bytes == 0 ? -@@ -5175,7 +5194,7 @@ test_kasumi_auth_cipher_sgl(const struct kasumi_test_data *tdata, +@@ -5175,7 +5233,7 @@ test_kasumi_auth_cipher_sgl(const struct kasumi_test_data *tdata, tdata->validCipherOffsetInBits.len, tdata->validAuthLenInBits.len, 0, @@ -3318,7 +5732,7 @@ index 1b561456d7..db9dd3aecb 100644 if (retval < 0) return retval; -@@ -5666,7 +5685,7 @@ test_zuc_auth_cipher(const struct wireless_test_data *tdata, +@@ -5666,7 +5724,7 @@ test_zuc_auth_cipher(const struct wireless_test_data *tdata, /* Create ZUC operation */ retval = create_wireless_algo_auth_cipher_operation( @@ -3327,7 +5741,7 @@ index 1b561456d7..db9dd3aecb 100644 tdata->cipher_iv.data, tdata->cipher_iv.len, tdata->auth_iv.data, tdata->auth_iv.len, (tdata->digest.offset_bytes == 0 ? -@@ -5676,7 +5695,7 @@ test_zuc_auth_cipher(const struct wireless_test_data *tdata, +@@ -5676,7 +5734,7 @@ test_zuc_auth_cipher(const struct wireless_test_data *tdata, tdata->validCipherOffsetInBits.len, tdata->validAuthLenInBits.len, 0, @@ -3336,7 +5750,7 @@ index 1b561456d7..db9dd3aecb 100644 if (retval < 0) return retval; -@@ -5852,7 +5871,7 @@ test_zuc_auth_cipher_sgl(const struct wireless_test_data *tdata, +@@ -5852,7 +5910,7 @@ test_zuc_auth_cipher_sgl(const struct wireless_test_data *tdata, /* Create ZUC operation */ retval = create_wireless_algo_auth_cipher_operation( @@ -3345,7 +5759,7 @@ index 1b561456d7..db9dd3aecb 100644 tdata->cipher_iv.data, tdata->cipher_iv.len, NULL, 0, (tdata->digest.offset_bytes == 0 ? -@@ -5862,7 +5881,7 @@ test_zuc_auth_cipher_sgl(const struct wireless_test_data *tdata, +@@ -5862,7 +5920,7 @@ test_zuc_auth_cipher_sgl(const struct wireless_test_data *tdata, tdata->validCipherOffsetInBits.len, tdata->validAuthLenInBits.len, 0, @@ -3354,7 +5768,57 @@ index 1b561456d7..db9dd3aecb 100644 if (retval < 0) return retval; -@@ -6643,7 +6662,7 @@ test_mixed_auth_cipher(const struct mixed_cipher_auth_test_data *tdata, +@@ -6576,8 +6634,9 @@ test_mixed_auth_cipher(const struct mixed_cipher_auth_test_data *tdata, + unsigned int ciphertext_len; + + struct rte_cryptodev_info dev_info; ++ struct rte_crypto_op *op; + +- /* Check if device supports particular algorithms */ ++ /* Check if device supports particular algorithms separately */ + if (test_mixed_check_if_unsupported(tdata)) + return -ENOTSUP; + +@@ -6593,18 +6652,26 @@ test_mixed_auth_cipher(const struct mixed_cipher_auth_test_data *tdata, + } + + /* Create the session */ +- retval = create_wireless_algo_auth_cipher_session( +- ts_params->valid_devs[0], +- (verify ? RTE_CRYPTO_CIPHER_OP_DECRYPT +- : RTE_CRYPTO_CIPHER_OP_ENCRYPT), +- (verify ? RTE_CRYPTO_AUTH_OP_VERIFY +- : RTE_CRYPTO_AUTH_OP_GENERATE), +- tdata->auth_algo, +- tdata->cipher_algo, +- tdata->auth_key.data, tdata->auth_key.len, +- tdata->auth_iv.len, tdata->digest_enc.len, +- tdata->cipher_iv.len); +- ++ if (verify) ++ retval = create_wireless_algo_cipher_auth_session( ++ ts_params->valid_devs[0], ++ RTE_CRYPTO_CIPHER_OP_DECRYPT, ++ RTE_CRYPTO_AUTH_OP_VERIFY, ++ tdata->auth_algo, ++ tdata->cipher_algo, ++ tdata->auth_key.data, tdata->auth_key.len, ++ tdata->auth_iv.len, tdata->digest_enc.len, ++ tdata->cipher_iv.len); ++ else ++ retval = create_wireless_algo_auth_cipher_session( ++ ts_params->valid_devs[0], ++ RTE_CRYPTO_CIPHER_OP_ENCRYPT, ++ RTE_CRYPTO_AUTH_OP_GENERATE, ++ tdata->auth_algo, ++ tdata->cipher_algo, ++ tdata->auth_key.data, tdata->auth_key.len, ++ tdata->auth_iv.len, tdata->digest_enc.len, ++ tdata->cipher_iv.len); + if (retval < 0) + return retval; + +@@ -6643,24 +6710,34 @@ test_mixed_auth_cipher(const struct mixed_cipher_auth_test_data *tdata, /* Create the operation */ retval = create_wireless_algo_auth_cipher_operation( @@ -3363,7 +5827,10 @@ index 1b561456d7..db9dd3aecb 100644 tdata->cipher_iv.data, tdata->cipher_iv.len, tdata->auth_iv.data, tdata->auth_iv.len, (tdata->digest_enc.offset == 0 ? -@@ -6653,7 +6672,7 @@ test_mixed_auth_cipher(const struct mixed_cipher_auth_test_data *tdata, +- (verify ? ciphertext_pad_len : plaintext_pad_len) ++ plaintext_pad_len + : tdata->digest_enc.offset), + tdata->validCipherLen.len_bits, tdata->cipher.offset_bits, tdata->validAuthLen.len_bits, tdata->auth.offset_bits, @@ -3372,7 +5839,107 @@ index 1b561456d7..db9dd3aecb 100644 if (retval < 0) return retval; -@@ -6827,7 +6846,7 @@ test_mixed_auth_cipher_sgl(const struct mixed_cipher_auth_test_data *tdata, + +- ut_params->op = process_crypto_request(ts_params->valid_devs[0], ++ op = process_crypto_request(ts_params->valid_devs[0], + ut_params->op); + ++ /* Check if the op failed because the device doesn't */ ++ /* support this particular combination of algorithms */ ++ if (op == NULL && ut_params->op->status == ++ RTE_CRYPTO_OP_STATUS_INVALID_SESSION) { ++ printf("Device doesn't support this mixed combination. " ++ "Test Skipped.\n"); ++ return -ENOTSUP; ++ } ++ ut_params->op = op; ++ + TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf"); + + ut_params->obuf = (op_mode == IN_PLACE ? +@@ -6675,12 +6752,10 @@ test_mixed_auth_cipher(const struct mixed_cipher_auth_test_data *tdata, + (tdata->cipher.offset_bits >> 3); + + debug_hexdump(stdout, "plaintext:", plaintext, +- (tdata->plaintext.len_bits >> 3) - +- tdata->digest_enc.len); ++ tdata->plaintext.len_bits >> 3); + debug_hexdump(stdout, "plaintext expected:", + tdata->plaintext.data, +- (tdata->plaintext.len_bits >> 3) - +- tdata->digest_enc.len); ++ tdata->plaintext.len_bits >> 3); + } else { + if (ut_params->obuf) + ciphertext = rte_pktmbuf_mtod(ut_params->obuf, +@@ -6725,6 +6800,10 @@ test_mixed_auth_cipher(const struct mixed_cipher_auth_test_data *tdata, + DIGEST_BYTE_LENGTH_SNOW3G_UIA2, + "Generated auth tag not as expected"); + } ++ ++ TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS, ++ "crypto op processing failed"); ++ + return 0; + } + +@@ -6748,6 +6827,7 @@ test_mixed_auth_cipher_sgl(const struct mixed_cipher_auth_test_data *tdata, + uint8_t digest_buffer[10000]; + + struct rte_cryptodev_info dev_info; ++ struct rte_crypto_op *op; + + /* Check if device supports particular algorithms */ + if (test_mixed_check_if_unsupported(tdata)) +@@ -6776,18 +6856,26 @@ test_mixed_auth_cipher_sgl(const struct mixed_cipher_auth_test_data *tdata, + } + + /* Create the session */ +- retval = create_wireless_algo_auth_cipher_session( +- ts_params->valid_devs[0], +- (verify ? RTE_CRYPTO_CIPHER_OP_DECRYPT +- : RTE_CRYPTO_CIPHER_OP_ENCRYPT), +- (verify ? RTE_CRYPTO_AUTH_OP_VERIFY +- : RTE_CRYPTO_AUTH_OP_GENERATE), +- tdata->auth_algo, +- tdata->cipher_algo, +- tdata->auth_key.data, tdata->auth_key.len, +- tdata->auth_iv.len, tdata->digest_enc.len, +- tdata->cipher_iv.len); +- ++ if (verify) ++ retval = create_wireless_algo_cipher_auth_session( ++ ts_params->valid_devs[0], ++ RTE_CRYPTO_CIPHER_OP_DECRYPT, ++ RTE_CRYPTO_AUTH_OP_VERIFY, ++ tdata->auth_algo, ++ tdata->cipher_algo, ++ tdata->auth_key.data, tdata->auth_key.len, ++ tdata->auth_iv.len, tdata->digest_enc.len, ++ tdata->cipher_iv.len); ++ else ++ retval = create_wireless_algo_auth_cipher_session( ++ ts_params->valid_devs[0], ++ RTE_CRYPTO_CIPHER_OP_ENCRYPT, ++ RTE_CRYPTO_AUTH_OP_GENERATE, ++ tdata->auth_algo, ++ tdata->cipher_algo, ++ tdata->auth_key.data, tdata->auth_key.len, ++ tdata->auth_iv.len, tdata->digest_enc.len, ++ tdata->cipher_iv.len); + if (retval < 0) + return retval; + +@@ -6797,7 +6885,7 @@ test_mixed_auth_cipher_sgl(const struct mixed_cipher_auth_test_data *tdata, + plaintext_pad_len = RTE_ALIGN_CEIL(plaintext_len, 16); + + ut_params->ibuf = create_segmented_mbuf(ts_params->mbuf_pool, +- plaintext_pad_len, 15, 0); ++ ciphertext_pad_len, 15, 0); + TEST_ASSERT_NOT_NULL(ut_params->ibuf, + "Failed to allocate input buffer in mempool"); + +@@ -6827,24 +6915,35 @@ test_mixed_auth_cipher_sgl(const struct mixed_cipher_auth_test_data *tdata, /* Create the operation */ retval = create_wireless_algo_auth_cipher_operation( @@ -3381,7 +5948,10 @@ index 1b561456d7..db9dd3aecb 100644 tdata->cipher_iv.data, tdata->cipher_iv.len, tdata->auth_iv.data, tdata->auth_iv.len, (tdata->digest_enc.offset == 0 ? -@@ -6837,7 +6856,7 @@ test_mixed_auth_cipher_sgl(const struct mixed_cipher_auth_test_data *tdata, +- (verify ? ciphertext_pad_len : plaintext_pad_len) ++ plaintext_pad_len + : tdata->digest_enc.offset), + tdata->validCipherLen.len_bits, tdata->cipher.offset_bits, tdata->validAuthLen.len_bits, tdata->auth.offset_bits, @@ -3390,7 +5960,214 @@ index 1b561456d7..db9dd3aecb 100644 if (retval < 0) return retval; -@@ -9139,8 +9158,10 @@ test_stats(void) + +- ut_params->op = process_crypto_request(ts_params->valid_devs[0], ++ op = process_crypto_request(ts_params->valid_devs[0], + ut_params->op); + ++ /* Check if the op failed because the device doesn't */ ++ /* support this particular combination of algorithms */ ++ if (op == NULL && ut_params->op->status == ++ RTE_CRYPTO_OP_STATUS_INVALID_SESSION) { ++ printf("Device doesn't support this mixed combination. " ++ "Test Skipped.\n"); ++ return -ENOTSUP; ++ } ++ ++ ut_params->op = op; ++ + TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf"); + + ut_params->obuf = (op_mode == IN_PLACE ? +@@ -6917,6 +7016,10 @@ test_mixed_auth_cipher_sgl(const struct mixed_cipher_auth_test_data *tdata, + tdata->digest_enc.len, + "Generated auth tag not as expected"); + } ++ ++ TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS, ++ "crypto op processing failed"); ++ + return 0; + } + +@@ -6978,6 +7081,176 @@ test_verify_aes_cmac_aes_ctr_digest_enc_test_case_1_oop_sgl(void) + &auth_aes_cmac_cipher_aes_ctr_test_case_1, OUT_OF_PLACE, 1); + } + ++/** MIXED AUTH + CIPHER */ ++ ++static int ++test_auth_zuc_cipher_snow_test_case_1(void) ++{ ++ return test_mixed_auth_cipher( ++ &auth_zuc_cipher_snow_test_case_1, OUT_OF_PLACE, 0); ++} ++ ++static int ++test_verify_auth_zuc_cipher_snow_test_case_1(void) ++{ ++ return test_mixed_auth_cipher( ++ &auth_zuc_cipher_snow_test_case_1, OUT_OF_PLACE, 1); ++} ++ ++static int ++test_auth_aes_cmac_cipher_snow_test_case_1(void) ++{ ++ return test_mixed_auth_cipher( ++ &auth_aes_cmac_cipher_snow_test_case_1, OUT_OF_PLACE, 0); ++} ++ ++static int ++test_verify_auth_aes_cmac_cipher_snow_test_case_1(void) ++{ ++ return test_mixed_auth_cipher( ++ &auth_aes_cmac_cipher_snow_test_case_1, OUT_OF_PLACE, 1); ++} ++ ++static int ++test_auth_zuc_cipher_aes_ctr_test_case_1(void) ++{ ++ return test_mixed_auth_cipher( ++ &auth_zuc_cipher_aes_ctr_test_case_1, OUT_OF_PLACE, 0); ++} ++ ++static int ++test_verify_auth_zuc_cipher_aes_ctr_test_case_1(void) ++{ ++ return test_mixed_auth_cipher( ++ &auth_zuc_cipher_aes_ctr_test_case_1, OUT_OF_PLACE, 1); ++} ++ ++static int ++test_auth_snow_cipher_aes_ctr_test_case_1(void) ++{ ++ return test_mixed_auth_cipher( ++ &auth_snow_cipher_aes_ctr_test_case_1, OUT_OF_PLACE, 0); ++} ++ ++static int ++test_verify_auth_snow_cipher_aes_ctr_test_case_1(void) ++{ ++ return test_mixed_auth_cipher( ++ &auth_snow_cipher_aes_ctr_test_case_1, OUT_OF_PLACE, 1); ++} ++ ++static int ++test_auth_snow_cipher_zuc_test_case_1(void) ++{ ++ return test_mixed_auth_cipher( ++ &auth_snow_cipher_zuc_test_case_1, OUT_OF_PLACE, 0); ++} ++ ++static int ++test_verify_auth_snow_cipher_zuc_test_case_1(void) ++{ ++ return test_mixed_auth_cipher( ++ &auth_snow_cipher_zuc_test_case_1, OUT_OF_PLACE, 1); ++} ++ ++static int ++test_auth_aes_cmac_cipher_zuc_test_case_1(void) ++{ ++ return test_mixed_auth_cipher( ++ &auth_aes_cmac_cipher_zuc_test_case_1, OUT_OF_PLACE, 0); ++} ++ ++static int ++test_verify_auth_aes_cmac_cipher_zuc_test_case_1(void) ++{ ++ return test_mixed_auth_cipher( ++ &auth_aes_cmac_cipher_zuc_test_case_1, OUT_OF_PLACE, 1); ++} ++ ++static int ++test_auth_null_cipher_snow_test_case_1(void) ++{ ++ return test_mixed_auth_cipher( ++ &auth_null_cipher_snow_test_case_1, OUT_OF_PLACE, 0); ++} ++ ++static int ++test_verify_auth_null_cipher_snow_test_case_1(void) ++{ ++ return test_mixed_auth_cipher( ++ &auth_null_cipher_snow_test_case_1, OUT_OF_PLACE, 1); ++} ++ ++static int ++test_auth_null_cipher_zuc_test_case_1(void) ++{ ++ return test_mixed_auth_cipher( ++ &auth_null_cipher_zuc_test_case_1, OUT_OF_PLACE, 0); ++} ++ ++static int ++test_verify_auth_null_cipher_zuc_test_case_1(void) ++{ ++ return test_mixed_auth_cipher( ++ &auth_null_cipher_zuc_test_case_1, OUT_OF_PLACE, 1); ++} ++ ++static int ++test_auth_snow_cipher_null_test_case_1(void) ++{ ++ return test_mixed_auth_cipher( ++ &auth_snow_cipher_null_test_case_1, OUT_OF_PLACE, 0); ++} ++ ++static int ++test_verify_auth_snow_cipher_null_test_case_1(void) ++{ ++ return test_mixed_auth_cipher( ++ &auth_snow_cipher_null_test_case_1, OUT_OF_PLACE, 1); ++} ++ ++static int ++test_auth_zuc_cipher_null_test_case_1(void) ++{ ++ return test_mixed_auth_cipher( ++ &auth_zuc_cipher_null_test_case_1, OUT_OF_PLACE, 0); ++} ++ ++static int ++test_verify_auth_zuc_cipher_null_test_case_1(void) ++{ ++ return test_mixed_auth_cipher( ++ &auth_zuc_cipher_null_test_case_1, OUT_OF_PLACE, 1); ++} ++ ++static int ++test_auth_null_cipher_aes_ctr_test_case_1(void) ++{ ++ return test_mixed_auth_cipher( ++ &auth_null_cipher_aes_ctr_test_case_1, OUT_OF_PLACE, 0); ++} ++ ++static int ++test_verify_auth_null_cipher_aes_ctr_test_case_1(void) ++{ ++ return test_mixed_auth_cipher( ++ &auth_null_cipher_aes_ctr_test_case_1, OUT_OF_PLACE, 1); ++} ++ ++static int ++test_auth_aes_cmac_cipher_null_test_case_1(void) ++{ ++ return test_mixed_auth_cipher( ++ &auth_aes_cmac_cipher_null_test_case_1, OUT_OF_PLACE, 0); ++} ++ ++static int ++test_verify_auth_aes_cmac_cipher_null_test_case_1(void) ++{ ++ return test_mixed_auth_cipher( ++ &auth_aes_cmac_cipher_null_test_case_1, OUT_OF_PLACE, 1); ++} ++ + static int + test_3DES_chain_qat_all(void) + { +@@ -9139,8 +9412,10 @@ test_stats(void) { struct crypto_testsuite_params *ts_params = &testsuite_params; struct rte_cryptodev_stats stats; @@ -3403,7 +6180,7 @@ index 1b561456d7..db9dd3aecb 100644 rte_cryptodev_stats_reset(ts_params->valid_devs[0]); TEST_ASSERT((rte_cryptodev_stats_get(ts_params->valid_devs[0] + 600, -@@ -9148,13 +9169,6 @@ test_stats(void) +@@ -9148,18 +9423,9 @@ test_stats(void) "rte_cryptodev_stats_get invalid dev failed"); TEST_ASSERT((rte_cryptodev_stats_get(ts_params->valid_devs[0], 0) != 0), "rte_cryptodev_stats_get invalid Param failed"); @@ -3416,8 +6193,35 @@ index 1b561456d7..db9dd3aecb 100644 - dev->dev_ops->stats_get = temp_pfn; /* Test expected values */ - ut_setup(); -@@ -10818,13 +10832,8 @@ test_authentication_verify_fail_when_data_corruption( +- ut_setup(); + test_AES_CBC_HMAC_SHA1_encrypt_digest(); +- ut_teardown(); + TEST_ASSERT_SUCCESS(rte_cryptodev_stats_get(ts_params->valid_devs[0], + &stats), + "rte_cryptodev_stats_get failed"); +@@ -10450,7 +10716,7 @@ aes128cbc_hmac_sha1_test_vector = { + static const struct test_crypto_vector + aes128cbc_hmac_sha1_aad_test_vector = { + .crypto_algo = RTE_CRYPTO_CIPHER_AES_CBC, +- .cipher_offset = 12, ++ .cipher_offset = 8, + .cipher_len = 496, + .cipher_key = { + .data = { +@@ -10486,9 +10752,9 @@ aes128cbc_hmac_sha1_aad_test_vector = { + }, + .digest = { + .data = { +- 0x1F, 0x6A, 0xD2, 0x8B, 0x4B, 0xB3, 0xC0, 0x9E, +- 0x86, 0x9B, 0x3A, 0xF2, 0x00, 0x5B, 0x4F, 0x08, +- 0x62, 0x8D, 0x62, 0x65 ++ 0x6D, 0xF3, 0x50, 0x79, 0x7A, 0x2A, 0xAC, 0x7F, ++ 0xA6, 0xF0, 0xC6, 0x38, 0x1F, 0xA4, 0xDD, 0x9B, ++ 0x62, 0x0F, 0xFB, 0x10 + }, + .len = 20 + } +@@ -10818,13 +11084,8 @@ test_authentication_verify_fail_when_data_corruption( ut_params->op = process_crypto_request(ts_params->valid_devs[0], ut_params->op); @@ -3432,7 +6236,7 @@ index 1b561456d7..db9dd3aecb 100644 return 0; } -@@ -10879,13 +10888,8 @@ test_authentication_verify_GMAC_fail_when_corruption( +@@ -10879,13 +11140,8 @@ test_authentication_verify_GMAC_fail_when_corruption( ut_params->op = process_crypto_request(ts_params->valid_devs[0], ut_params->op); @@ -3447,7 +6251,7 @@ index 1b561456d7..db9dd3aecb 100644 return 0; } -@@ -10940,13 +10944,7 @@ test_authenticated_decryption_fail_when_corruption( +@@ -10940,13 +11196,7 @@ test_authenticated_decryption_fail_when_corruption( ut_params->op = process_crypto_request(ts_params->valid_devs[0], ut_params->op); @@ -3462,7 +6266,7 @@ index 1b561456d7..db9dd3aecb 100644 return 0; } -@@ -11149,6 +11147,7 @@ create_aead_operation_SGL(enum rte_crypto_aead_operation op, +@@ -11149,6 +11399,7 @@ create_aead_operation_SGL(enum rte_crypto_aead_operation op, const unsigned int auth_tag_len = tdata->auth_tag.len; const unsigned int iv_len = tdata->iv.len; unsigned int aad_len = tdata->aad.len; @@ -3470,7 +6274,7 @@ index 1b561456d7..db9dd3aecb 100644 /* Generate Crypto op data structure */ ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool, -@@ -11203,8 +11202,10 @@ create_aead_operation_SGL(enum rte_crypto_aead_operation op, +@@ -11203,8 +11454,10 @@ create_aead_operation_SGL(enum rte_crypto_aead_operation op, rte_memcpy(iv_ptr, tdata->iv.data, iv_len); @@ -3482,7 +6286,7 @@ index 1b561456d7..db9dd3aecb 100644 TEST_ASSERT_NOT_NULL(sym_op->aead.aad.data, "no room to prepend aad"); sym_op->aead.aad.phys_addr = rte_pktmbuf_iova( -@@ -11219,7 +11220,7 @@ create_aead_operation_SGL(enum rte_crypto_aead_operation op, +@@ -11219,7 +11472,7 @@ create_aead_operation_SGL(enum rte_crypto_aead_operation op, } sym_op->aead.data.length = tdata->plaintext.len; @@ -3491,7 +6295,7 @@ index 1b561456d7..db9dd3aecb 100644 return 0; } -@@ -11252,7 +11253,7 @@ test_authenticated_encryption_SGL(const struct aead_test_data *tdata, +@@ -11252,7 +11505,7 @@ test_authenticated_encryption_SGL(const struct aead_test_data *tdata, int ecx = 0; void *digest_mem = NULL; @@ -3500,7 +6304,7 @@ index 1b561456d7..db9dd3aecb 100644 if (tdata->plaintext.len % fragsz != 0) { if (tdata->plaintext.len / fragsz + 1 > SGL_MAX_NO) -@@ -11915,6 +11916,8 @@ static struct unit_test_suite cryptodev_qat_testsuite = { +@@ -11915,6 +12168,8 @@ static struct unit_test_suite cryptodev_qat_testsuite = { test_AES_GCM_auth_encrypt_SGL_out_of_place_400B_400B), TEST_CASE_ST(ut_setup, ut_teardown, test_AES_GCM_auth_encrypt_SGL_out_of_place_1500B_2000B), @@ -3509,6 +6313,228 @@ index 1b561456d7..db9dd3aecb 100644 TEST_CASE_ST(ut_setup, ut_teardown, test_AES_GCM_authenticated_encryption_test_case_1), TEST_CASE_ST(ut_setup, ut_teardown, +@@ -12288,6 +12543,68 @@ static struct unit_test_suite cryptodev_qat_testsuite = { + TEST_CASE_ST(ut_setup, ut_teardown, + test_verify_aes_cmac_aes_ctr_digest_enc_test_case_1_oop_sgl), + ++ /** AUTH ZUC + CIPHER SNOW3G */ ++ TEST_CASE_ST(ut_setup, ut_teardown, ++ test_auth_zuc_cipher_snow_test_case_1), ++ TEST_CASE_ST(ut_setup, ut_teardown, ++ test_verify_auth_zuc_cipher_snow_test_case_1), ++ /** AUTH AES CMAC + CIPHER SNOW3G */ ++ TEST_CASE_ST(ut_setup, ut_teardown, ++ test_auth_aes_cmac_cipher_snow_test_case_1), ++ TEST_CASE_ST(ut_setup, ut_teardown, ++ test_verify_auth_aes_cmac_cipher_snow_test_case_1), ++ /** AUTH ZUC + CIPHER AES CTR */ ++ TEST_CASE_ST(ut_setup, ut_teardown, ++ test_auth_zuc_cipher_aes_ctr_test_case_1), ++ TEST_CASE_ST(ut_setup, ut_teardown, ++ test_verify_auth_zuc_cipher_aes_ctr_test_case_1), ++ /** AUTH SNOW3G + CIPHER AES CTR */ ++ TEST_CASE_ST(ut_setup, ut_teardown, ++ test_auth_snow_cipher_aes_ctr_test_case_1), ++ TEST_CASE_ST(ut_setup, ut_teardown, ++ test_verify_auth_snow_cipher_aes_ctr_test_case_1), ++ /** AUTH SNOW3G + CIPHER ZUC */ ++ TEST_CASE_ST(ut_setup, ut_teardown, ++ test_auth_snow_cipher_zuc_test_case_1), ++ TEST_CASE_ST(ut_setup, ut_teardown, ++ test_verify_auth_snow_cipher_zuc_test_case_1), ++ /** AUTH AES CMAC + CIPHER ZUC */ ++ TEST_CASE_ST(ut_setup, ut_teardown, ++ test_auth_aes_cmac_cipher_zuc_test_case_1), ++ TEST_CASE_ST(ut_setup, ut_teardown, ++ test_verify_auth_aes_cmac_cipher_zuc_test_case_1), ++ ++ /** AUTH NULL + CIPHER SNOW3G */ ++ TEST_CASE_ST(ut_setup, ut_teardown, ++ test_auth_null_cipher_snow_test_case_1), ++ TEST_CASE_ST(ut_setup, ut_teardown, ++ test_verify_auth_null_cipher_snow_test_case_1), ++ /** AUTH NULL + CIPHER ZUC */ ++ TEST_CASE_ST(ut_setup, ut_teardown, ++ test_auth_null_cipher_zuc_test_case_1), ++ TEST_CASE_ST(ut_setup, ut_teardown, ++ test_verify_auth_null_cipher_zuc_test_case_1), ++ /** AUTH SNOW3G + CIPHER NULL */ ++ TEST_CASE_ST(ut_setup, ut_teardown, ++ test_auth_snow_cipher_null_test_case_1), ++ TEST_CASE_ST(ut_setup, ut_teardown, ++ test_verify_auth_snow_cipher_null_test_case_1), ++ /** AUTH ZUC + CIPHER NULL */ ++ TEST_CASE_ST(ut_setup, ut_teardown, ++ test_auth_zuc_cipher_null_test_case_1), ++ TEST_CASE_ST(ut_setup, ut_teardown, ++ test_verify_auth_zuc_cipher_null_test_case_1), ++ /** AUTH NULL + CIPHER AES CTR */ ++ TEST_CASE_ST(ut_setup, ut_teardown, ++ test_auth_null_cipher_aes_ctr_test_case_1), ++ TEST_CASE_ST(ut_setup, ut_teardown, ++ test_verify_auth_null_cipher_aes_ctr_test_case_1), ++ /** AUTH AES CMAC + CIPHER NULL */ ++ TEST_CASE_ST(ut_setup, ut_teardown, ++ test_auth_aes_cmac_cipher_null_test_case_1), ++ TEST_CASE_ST(ut_setup, ut_teardown, ++ test_verify_auth_aes_cmac_cipher_null_test_case_1), ++ + TEST_CASES_END() /**< NULL terminate unit test array */ + } + }; +diff --git a/dpdk/app/test/test_cryptodev_aes_test_vectors.h b/dpdk/app/test/test_cryptodev_aes_test_vectors.h +index 8307fcf9ae..66994b659a 100644 +--- a/dpdk/app/test/test_cryptodev_aes_test_vectors.h ++++ b/dpdk/app/test/test_cryptodev_aes_test_vectors.h +@@ -358,69 +358,69 @@ static const struct blockcipher_test_data null_test_data_chain_x1_multiple = { + + static const uint8_t ciphertext512_aes128cbc_aad[] = { + 0x57, 0x68, 0x61, 0x74, 0x20, 0x61, 0x20, 0x6C, +- 0x6F, 0x75, 0x73, 0x79, 0x6D, 0x70, 0xB4, 0xAD, +- 0x09, 0x7C, 0xD7, 0x52, 0xD6, 0xF2, 0xBF, 0xD1, +- 0x9D, 0x79, 0xC6, 0xB6, 0x8F, 0x94, 0xEB, 0xD8, +- 0xBA, 0x5E, 0x01, 0x49, 0x7D, 0xB3, 0xC5, 0xFE, +- 0x18, 0xF4, 0xE3, 0x60, 0x8C, 0x84, 0x68, 0x13, +- 0x33, 0x06, 0x85, 0x60, 0xD3, 0xE7, 0x8A, 0xB5, +- 0x23, 0xA2, 0xDE, 0x52, 0x5C, 0xB6, 0x26, 0x37, +- 0xBB, 0x23, 0x8A, 0x38, 0x07, 0x85, 0xB6, 0x2E, +- 0xC3, 0x69, 0x57, 0x79, 0x6B, 0xE4, 0xD7, 0x86, +- 0x23, 0x72, 0x4C, 0x65, 0x49, 0x08, 0x1E, 0xF3, +- 0xCC, 0x71, 0x4C, 0x45, 0x97, 0x03, 0xBC, 0xA0, +- 0x9D, 0xF0, 0x4F, 0x5D, 0xEC, 0x40, 0x6C, 0xC6, +- 0x52, 0xC0, 0x9D, 0x1C, 0xDC, 0x8B, 0xC2, 0xFA, +- 0x35, 0xA7, 0x3A, 0x00, 0x04, 0x1C, 0xA6, 0x91, +- 0x5D, 0xEB, 0x07, 0xA1, 0xB9, 0x3E, 0xD1, 0xB6, +- 0xCA, 0x96, 0xEC, 0x71, 0xF7, 0x7D, 0xB6, 0x09, +- 0x3D, 0x19, 0x6E, 0x75, 0x03, 0xC3, 0x1A, 0x4E, +- 0x5B, 0x4D, 0xEA, 0xD9, 0x92, 0x96, 0x01, 0xFB, +- 0xA3, 0xC2, 0x6D, 0xC4, 0x17, 0x6B, 0xB4, 0x3B, +- 0x1E, 0x87, 0x54, 0x26, 0x95, 0x63, 0x07, 0x73, +- 0xB6, 0xBA, 0x52, 0xD7, 0xA7, 0xD0, 0x9C, 0x75, +- 0x8A, 0xCF, 0xC4, 0x3C, 0x4A, 0x55, 0x0E, 0x53, +- 0xEC, 0xE0, 0x31, 0x51, 0xB7, 0xB7, 0xD2, 0xB4, +- 0xF3, 0x2B, 0x70, 0x6D, 0x15, 0x9E, 0x57, 0x30, +- 0x72, 0xE5, 0xA4, 0x71, 0x5F, 0xA4, 0xE8, 0x7C, +- 0x46, 0x58, 0x36, 0x71, 0x91, 0x55, 0xAA, 0x99, +- 0x3B, 0x3F, 0xF6, 0xA2, 0x9D, 0x27, 0xBF, 0xC2, +- 0x62, 0x2C, 0x85, 0xB7, 0x51, 0xDD, 0xFD, 0x7B, +- 0x8B, 0xB5, 0xDD, 0x2A, 0x73, 0xF8, 0x93, 0x9A, +- 0x3F, 0xAD, 0x1D, 0xF0, 0x46, 0xD1, 0x76, 0x83, +- 0x71, 0x4E, 0xD3, 0x0D, 0x64, 0x8C, 0xC3, 0xE6, +- 0x03, 0xED, 0xE8, 0x53, 0x23, 0x1A, 0xC7, 0x86, +- 0xEB, 0x87, 0xD6, 0x78, 0xF9, 0xFB, 0x9C, 0x1D, +- 0xE7, 0x4E, 0xC0, 0x70, 0x27, 0x7A, 0x43, 0xE2, +- 0x5D, 0xA4, 0x10, 0x40, 0xBE, 0x61, 0x0D, 0x2B, +- 0x25, 0x08, 0x75, 0x91, 0xB5, 0x5A, 0x26, 0xC8, +- 0x32, 0xA7, 0xC6, 0x88, 0xBF, 0x75, 0x94, 0xCC, +- 0x58, 0xA4, 0xFE, 0x2F, 0xF7, 0x5C, 0xD2, 0x36, +- 0x66, 0x55, 0xF0, 0xEA, 0xF5, 0x64, 0x43, 0xE7, +- 0x6D, 0xE0, 0xED, 0xA1, 0x10, 0x0A, 0x84, 0x07, +- 0x11, 0x88, 0xFA, 0xA1, 0xD3, 0xA0, 0x00, 0x5D, +- 0xEB, 0xB5, 0x62, 0x01, 0x72, 0xC1, 0x9B, 0x39, +- 0x0B, 0xD3, 0xAF, 0x04, 0x19, 0x42, 0xEC, 0xFF, +- 0x4B, 0xB3, 0x5E, 0x87, 0x27, 0xE4, 0x26, 0x57, +- 0x76, 0xCD, 0x36, 0x31, 0x5B, 0x94, 0x74, 0xFF, +- 0x33, 0x91, 0xAA, 0xD1, 0x45, 0x34, 0xC2, 0x11, +- 0xF0, 0x35, 0x44, 0xC9, 0xD5, 0xA2, 0x5A, 0xC2, +- 0xE9, 0x9E, 0xCA, 0xE2, 0x6F, 0xD2, 0x40, 0xB4, +- 0x93, 0x42, 0x78, 0x20, 0x92, 0x88, 0xC7, 0x16, +- 0xCF, 0x15, 0x54, 0x7B, 0xE1, 0x46, 0x38, 0x69, +- 0xB8, 0xE4, 0xF1, 0x81, 0xF0, 0x08, 0x6F, 0x92, +- 0x6D, 0x1A, 0xD9, 0x93, 0xFA, 0xD7, 0x35, 0xFE, +- 0x7F, 0x59, 0x43, 0x1D, 0x3A, 0x3B, 0xFC, 0xD0, +- 0x14, 0x95, 0x1E, 0xB2, 0x04, 0x08, 0x4F, 0xC6, +- 0xEA, 0xE8, 0x22, 0xF3, 0xD7, 0x66, 0x93, 0xAA, +- 0xFD, 0xA0, 0xFE, 0x03, 0x96, 0x54, 0x78, 0x35, +- 0x18, 0xED, 0xB7, 0x2F, 0x40, 0xE3, 0x8E, 0x22, +- 0xC6, 0xDA, 0xB0, 0x8E, 0xA0, 0xA1, 0x62, 0x03, +- 0x63, 0x34, 0x11, 0xF5, 0x9E, 0xAA, 0x6B, 0xC4, +- 0x14, 0x75, 0x4C, 0xF4, 0xD8, 0xD9, 0xF1, 0x76, +- 0xE3, 0xD3, 0x55, 0xCE, 0x22, 0x7D, 0x4A, 0xB7, +- 0xBB, 0x7F, 0x4F, 0x09, 0x88, 0x70, 0x6E, 0x09, +- 0x84, 0x6B, 0x24, 0x19, 0x2C, 0x20, 0x73, 0x75 ++ 0x1D, 0x7C, 0x76, 0xED, 0xC2, 0x10, 0x3C, 0xB5, ++ 0x14, 0x07, 0x3C, 0x33, 0x7B, 0xBE, 0x9E, 0xA9, ++ 0x01, 0xC5, 0xAA, 0xA6, 0xB6, 0x7A, 0xE1, 0xDB, ++ 0x39, 0xAA, 0xAA, 0xF4, 0xEE, 0xA7, 0x71, 0x71, ++ 0x78, 0x0D, 0x5A, 0xD4, 0xF9, 0xCD, 0x75, 0xD1, ++ 0x9C, 0x7F, 0xC8, 0x58, 0x46, 0x7A, 0xD1, 0x81, ++ 0xEA, 0xCC, 0x08, 0xDC, 0x82, 0x73, 0x22, 0x08, ++ 0x11, 0x73, 0x7C, 0xB1, 0x84, 0x6A, 0x8E, 0x67, ++ 0x3F, 0x5D, 0xDB, 0x0E, 0xE2, 0xC2, 0xCB, 0x6D, ++ 0x88, 0xEC, 0x3F, 0x50, 0x44, 0xD3, 0x47, 0x6E, ++ 0xDD, 0x42, 0xDC, 0x2A, 0x5E, 0x5C, 0x50, 0x24, ++ 0x57, 0x8A, 0xE7, 0xC5, 0x53, 0x6D, 0x89, 0x33, ++ 0x21, 0x65, 0x82, 0xD6, 0xE9, 0xE7, 0x77, 0x10, ++ 0xC2, 0x09, 0x91, 0xC1, 0x42, 0x62, 0x36, 0xF4, ++ 0x43, 0x37, 0x95, 0xB3, 0x7E, 0x21, 0xC5, 0x3E, ++ 0x65, 0xCB, 0xB6, 0xAA, 0xEC, 0xA5, 0xC6, 0x5C, ++ 0x4D, 0xBE, 0x14, 0xF1, 0x98, 0xBF, 0x6C, 0x8A, ++ 0x9E, 0x9F, 0xD4, 0xB4, 0xF2, 0x22, 0x96, 0x99, ++ 0x37, 0x32, 0xB6, 0xC1, 0x04, 0x66, 0x52, 0x37, ++ 0x5D, 0x5F, 0x58, 0x92, 0xC9, 0x97, 0xEA, 0x60, ++ 0x60, 0x27, 0x57, 0xF9, 0x47, 0x4F, 0xBC, 0xDF, ++ 0x05, 0xBD, 0x37, 0x87, 0xBB, 0x09, 0xA5, 0xBE, ++ 0xC1, 0xFC, 0x32, 0x86, 0x6A, 0xB7, 0x8B, 0x1E, ++ 0x6B, 0xCE, 0x8D, 0x81, 0x63, 0x4C, 0xF2, 0x7F, ++ 0xD1, 0x45, 0x82, 0xE8, 0x0D, 0x1C, 0x4D, 0xA8, ++ 0xBF, 0x2D, 0x2B, 0x52, 0xE5, 0xDB, 0xAB, 0xFD, ++ 0x04, 0xA2, 0xA1, 0x1E, 0x21, 0x1D, 0x06, 0x9A, ++ 0xC2, 0x7D, 0x99, 0xFC, 0xB4, 0x72, 0x89, 0x41, ++ 0x55, 0x69, 0xFA, 0x1F, 0x78, 0x2F, 0x35, 0x59, ++ 0xD7, 0x59, 0x6D, 0xA6, 0x45, 0xC9, 0x2B, 0x06, ++ 0x6C, 0xEC, 0x83, 0x34, 0xA5, 0x08, 0xDB, 0x6F, ++ 0xDE, 0x75, 0x21, 0x9B, 0xB0, 0xCB, 0x0A, 0xAE, ++ 0x22, 0x99, 0x74, 0x1C, 0x9D, 0x37, 0x0E, 0xC6, ++ 0x3A, 0x45, 0x49, 0xE5, 0xE3, 0x21, 0x11, 0xEA, ++ 0x34, 0x25, 0xD5, 0x76, 0xB0, 0x30, 0x19, 0x87, ++ 0x14, 0x3A, 0x10, 0x6F, 0x6D, 0xDD, 0xE9, 0x60, ++ 0x6A, 0x00, 0x6A, 0x4C, 0x5B, 0x85, 0x3E, 0x1A, ++ 0x41, 0xFA, 0xDE, 0x2D, 0x2F, 0x2E, 0x5B, 0x79, ++ 0x09, 0x66, 0x65, 0xD0, 0xDB, 0x32, 0x05, 0xB5, ++ 0xEA, 0xFB, 0x6A, 0xD5, 0x43, 0xF8, 0xBD, 0x98, ++ 0x7B, 0x8E, 0x3B, 0x85, 0x89, 0x5D, 0xC5, 0x59, ++ 0x54, 0x22, 0x75, 0xA8, 0x60, 0xDC, 0x0A, 0x37, ++ 0x8C, 0xD8, 0x05, 0xEA, 0x62, 0x62, 0x71, 0x98, ++ 0x0C, 0xCB, 0xCE, 0x0A, 0xD9, 0xE6, 0xE8, 0xA7, ++ 0xB3, 0x2D, 0x89, 0xA7, 0x60, 0xF0, 0x42, 0xA7, ++ 0x3D, 0x80, 0x44, 0xE7, 0xC1, 0xA6, 0x88, 0xB1, ++ 0x4F, 0xC0, 0xB1, 0xAF, 0x40, 0xF3, 0x54, 0x72, ++ 0x8F, 0xAF, 0x47, 0x96, 0x19, 0xEB, 0xA5, 0x5C, ++ 0x00, 0x3B, 0x36, 0xC8, 0x3F, 0x1E, 0x63, 0x54, ++ 0xF3, 0x3D, 0x85, 0x44, 0x9B, 0x9B, 0x20, 0xE3, ++ 0x9D, 0xEF, 0x62, 0x21, 0xA1, 0x0B, 0x78, 0xF4, ++ 0x2B, 0x89, 0x66, 0x5E, 0x97, 0xC6, 0xC4, 0x55, ++ 0x35, 0x32, 0xD7, 0x44, 0x95, 0x9A, 0xE7, 0xF2, ++ 0x57, 0x52, 0x5B, 0x92, 0x86, 0x8F, 0x8B, 0xCF, ++ 0x41, 0x89, 0xF6, 0x2A, 0xD3, 0x42, 0x87, 0x43, ++ 0x56, 0x1F, 0x0E, 0x49, 0xF1, 0x32, 0x6D, 0xA8, ++ 0x62, 0xDF, 0x47, 0xBB, 0xB6, 0x53, 0xF8, 0x5C, ++ 0x36, 0xDA, 0x34, 0x34, 0x2D, 0x2E, 0x1D, 0x33, ++ 0xAF, 0x6A, 0x1E, 0xF1, 0xC9, 0x72, 0xB5, 0x3C, ++ 0x64, 0x4C, 0x96, 0x12, 0x78, 0x67, 0x6A, 0xE5, ++ 0x8B, 0x05, 0x80, 0xAE, 0x7D, 0xE5, 0x9B, 0x24, ++ 0xDB, 0xFF, 0x1E, 0xB8, 0x36, 0x6D, 0x3D, 0x5D, ++ 0x73, 0x65, 0x72, 0x73, 0x2C, 0x20, 0x73, 0x75 + }; + + /* AES128-CTR-SHA1 test vector */ +diff --git a/dpdk/app/test/test_cryptodev_asym.c b/dpdk/app/test/test_cryptodev_asym.c +index 69df293041..a0802994fa 100644 +--- a/dpdk/app/test/test_cryptodev_asym.c ++++ b/dpdk/app/test/test_cryptodev_asym.c +@@ -933,8 +933,9 @@ testsuite_setup(void) + } + + /* setup asym session pool */ +- unsigned int session_size = +- rte_cryptodev_asym_get_private_session_size(dev_id); ++ unsigned int session_size = RTE_MAX( ++ rte_cryptodev_asym_get_private_session_size(dev_id), ++ rte_cryptodev_asym_get_header_session_size()); + /* + * Create mempool with TEST_NUM_SESSIONS * 2, + * to include the session headers diff --git a/dpdk/app/test/test_cryptodev_blockcipher.c b/dpdk/app/test/test_cryptodev_blockcipher.c index 5bfe2d009f..2f91d000a2 100644 --- a/dpdk/app/test/test_cryptodev_blockcipher.c @@ -3606,6 +6632,2028 @@ index cff2831185..394bb6b60b 100644 BLOCKCIPHER_TEST_TARGET_PMD_CCP | BLOCKCIPHER_TEST_TARGET_PMD_MVSAM | #if IMB_VERSION_NUM >= IMB_VERSION(0, 52, 0) +diff --git a/dpdk/app/test/test_cryptodev_mixed_test_vectors.h b/dpdk/app/test/test_cryptodev_mixed_test_vectors.h +index bca47c05c8..f50dcb0457 100644 +--- a/dpdk/app/test/test_cryptodev_mixed_test_vectors.h ++++ b/dpdk/app/test/test_cryptodev_mixed_test_vectors.h +@@ -126,9 +126,9 @@ struct mixed_cipher_auth_test_data auth_aes_cmac_cipher_aes_ctr_test_case_1 = { + 0x5A, 0x5A, 0x5A, 0x5A, 0x5A, 0x5A, 0x5A, 0x5A, + 0x5A, 0x5A, 0x5A, 0x5A, 0x5A, 0x5A, 0x5A, 0x5A, + 0x5A, 0x5A, 0x5A, 0x5A, 0x5A, 0x5A, 0x5A, 0x5A, +- 0x5A, 0x5A, 0x5A, 0x5A, 0x5A, 0x5A, 0x5A, 0x5A, ++ 0x5A, 0x5A, 0x5A, 0x5A + }, +- .len_bits = 128 << 3, ++ .len_bits = 124 << 3, + }, + .ciphertext = { + .data = { +@@ -169,4 +169,1320 @@ struct mixed_cipher_auth_test_data auth_aes_cmac_cipher_aes_ctr_test_case_1 = { + } + }; + ++struct mixed_cipher_auth_test_data auth_zuc_cipher_snow_test_case_1 = { ++ .auth_algo = RTE_CRYPTO_AUTH_ZUC_EIA3, ++ .auth_key = { ++ .data = { ++ 0xc9, 0xe6, 0xce, 0xc4, 0x60, 0x7c, 0x72, 0xdb, ++ 0x00, 0x0a, 0xef, 0xa8, 0x83, 0x85, 0xab, 0x0a ++ }, ++ .len = 16, ++ }, ++ .auth_iv = { ++ .data = { ++ 0xa9, 0x40, 0x59, 0xda, 0x50, 0x00, 0x00, 0x00, ++ 0x29, 0x40, 0x59, 0xda, 0x50, 0x00, 0x80, 0x00 ++ }, ++ .len = 16, ++ }, ++ .auth = { ++ .len_bits = 73 << 3, ++ .offset_bits = 0, ++ }, ++ .cipher_algo = RTE_CRYPTO_CIPHER_SNOW3G_UEA2, ++ .cipher_key = { ++ .data = { ++ 0xc9, 0xe6, 0xce, 0xc4, 0x60, 0x7c, 0x72, 0xdb, ++ 0x00, 0x0a, 0xef, 0xa8, 0x83, 0x85, 0xab, 0x0a ++ }, ++ .len = 16, ++ }, ++ .cipher_iv = { ++ .data = { ++ 0xa9, 0x40, 0x59, 0xda, 0x50, 0x00, 0x00, 0x00, ++ 0x29, 0x40, 0x59, 0xda, 0x50, 0x00, 0x80, 0x00 ++ }, ++ .len = 16, ++ }, ++ .cipher = { ++ .len_bits = 77 << 3, ++ .offset_bits = 0, ++ }, ++ .plaintext = { ++ .data = { ++ 0x98, 0x3b, 0x41, 0xd4, 0x7d, 0x78, 0x0c, 0x9e, ++ 0x1a, 0xd1, 0x1d, 0x7e, 0xb7, 0x03, 0x91, 0xb1, ++ 0xde, 0x0b, 0x35, 0xda, 0x2d, 0xc6, 0x2f, 0x83, ++ 0xe7, 0xb7, 0x8d, 0x63, 0x06, 0xca, 0x0e, 0xa0, ++ 0x7e, 0x94, 0x1b, 0x7b, 0xe9, 0x13, 0x48, 0xf9, ++ 0xfc, 0xb1, 0x70, 0xe2, 0x21, 0x7f, 0xec, 0xd9, ++ 0x7f, 0x9f, 0x68, 0xad, 0xb1, 0x6e, 0x5d, 0x7d, ++ 0x21, 0xe5, 0x69, 0xd2, 0x80, 0xed, 0x77, 0x5c, ++ 0xeb, 0xde, 0x3f, 0x40, 0x93, 0xc5, 0x38, 0x81, ++ 0x00 ++ }, ++ .len_bits = 73 << 3, ++ }, ++ .ciphertext = { ++ .data = { ++ 0x18, 0x46, 0xE1, 0xC5, 0x2C, 0x85, 0x93, 0x22, ++ 0x84, 0x80, 0xD6, 0x84, 0x5C, 0x99, 0x55, 0xE0, ++ 0xD5, 0x02, 0x41, 0x74, 0x4A, 0xD2, 0x8E, 0x7E, ++ 0xB9, 0x79, 0xD3, 0xE5, 0x76, 0x75, 0xD5, 0x59, ++ 0x26, 0xD7, 0x06, 0x2D, 0xF4, 0x71, 0x26, 0x40, ++ 0xAC, 0x77, 0x62, 0xAC, 0x35, 0x0D, 0xC5, 0x35, ++ 0xF8, 0x03, 0x54, 0x52, 0x2E, 0xCA, 0x14, 0xD8, ++ 0x2E, 0x6C, 0x0E, 0x7A, 0x09, 0xE7, 0x20, 0xDD, ++ 0x7C, 0xE3, 0x28, 0x77, 0x53, 0x65, 0xBA, 0x54, ++ 0xE8, 0x25, 0x04, 0x52, 0xFD ++ }, ++ .len_bits = 77 << 3, ++ }, ++ .digest_enc = { ++ .data = { ++ 0x25, 0x04, 0x52, 0xFD ++ }, ++ .len = 4, ++ .offset = 73, ++ }, ++ .validDataLen = { ++ .len_bits = 77 << 3, ++ }, ++ .validCipherLen = { ++ .len_bits = 77 << 3, ++ }, ++ .validAuthLen = { ++ .len_bits = 73 << 3, ++ } ++}; ++ ++struct mixed_cipher_auth_test_data auth_aes_cmac_cipher_snow_test_case_1 = { ++ .auth_algo = RTE_CRYPTO_AUTH_AES_CMAC, ++ .auth_key = { ++ .data = { ++ 0x2B, 0x7E, 0x15, 0x16, 0x28, 0xAE, 0xD2, 0xA6, ++ 0xAB, 0xF7, 0x15, 0x88, 0x09, 0xCF, 0x4F, 0x3C ++ }, ++ .len = 16, ++ }, ++ .auth_iv = { ++ .data = { ++ }, ++ .len = 0, ++ }, ++ .auth = { ++ .len_bits = 512 << 3, ++ .offset_bits = 0, ++ }, ++ .cipher_algo = RTE_CRYPTO_CIPHER_SNOW3G_UEA2, ++ .cipher_key = { ++ .data = { ++ 0x2B, 0x7E, 0x15, 0x16, 0x28, 0xAE, 0xD2, 0xA6, ++ 0xAB, 0xF7, 0x15, 0x88, 0x09, 0xCF, 0x4F, 0x3C ++ }, ++ .len = 16, ++ }, ++ .cipher_iv = { ++ .data = { ++ }, ++ .len = 0, ++ }, ++ .cipher = { ++ .len_bits = 516 << 3, ++ .offset_bits = 0, ++ }, ++ .plaintext = { ++ .data = { ++ 0x57, 0x68, 0x61, 0x74, 0x20, 0x61, 0x20, 0x6C, ++ 0x6F, 0x75, 0x73, 0x79, 0x20, 0x65, 0x61, 0x72, ++ 0x74, 0x68, 0x21, 0x20, 0x48, 0x65, 0x20, 0x77, ++ 0x6F, 0x6E, 0x64, 0x65, 0x72, 0x65, 0x64, 0x20, ++ 0x68, 0x6F, 0x77, 0x20, 0x6D, 0x61, 0x6E, 0x79, ++ 0x20, 0x70, 0x65, 0x6F, 0x70, 0x6C, 0x65, 0x20, ++ 0x77, 0x65, 0x72, 0x65, 0x20, 0x64, 0x65, 0x73, ++ 0x74, 0x69, 0x74, 0x75, 0x74, 0x65, 0x20, 0x74, ++ 0x68, 0x61, 0x74, 0x20, 0x73, 0x61, 0x6D, 0x65, ++ 0x20, 0x6E, 0x69, 0x67, 0x68, 0x74, 0x20, 0x65, ++ 0x76, 0x65, 0x6E, 0x20, 0x69, 0x6E, 0x20, 0x68, ++ 0x69, 0x73, 0x20, 0x6F, 0x77, 0x6E, 0x20, 0x70, ++ 0x72, 0x6F, 0x73, 0x70, 0x65, 0x72, 0x6F, 0x75, ++ 0x73, 0x20, 0x63, 0x6F, 0x75, 0x6E, 0x74, 0x72, ++ 0x79, 0x2C, 0x20, 0x68, 0x6F, 0x77, 0x20, 0x6D, ++ 0x61, 0x6E, 0x79, 0x20, 0x68, 0x6F, 0x6D, 0x65, ++ 0x73, 0x20, 0x77, 0x65, 0x72, 0x65, 0x20, 0x73, ++ 0x68, 0x61, 0x6E, 0x74, 0x69, 0x65, 0x73, 0x2C, ++ 0x20, 0x68, 0x6F, 0x77, 0x20, 0x6D, 0x61, 0x6E, ++ 0x79, 0x20, 0x68, 0x75, 0x73, 0x62, 0x61, 0x6E, ++ 0x64, 0x73, 0x20, 0x77, 0x65, 0x72, 0x65, 0x20, ++ 0x64, 0x72, 0x75, 0x6E, 0x6B, 0x20, 0x61, 0x6E, ++ 0x64, 0x20, 0x77, 0x69, 0x76, 0x65, 0x73, 0x20, ++ 0x73, 0x6F, 0x63, 0x6B, 0x65, 0x64, 0x2C, 0x20, ++ 0x61, 0x6E, 0x64, 0x20, 0x68, 0x6F, 0x77, 0x20, ++ 0x6D, 0x61, 0x6E, 0x79, 0x20, 0x63, 0x68, 0x69, ++ 0x6C, 0x64, 0x72, 0x65, 0x6E, 0x20, 0x77, 0x65, ++ 0x72, 0x65, 0x20, 0x62, 0x75, 0x6C, 0x6C, 0x69, ++ 0x65, 0x64, 0x2C, 0x20, 0x61, 0x62, 0x75, 0x73, ++ 0x65, 0x64, 0x2C, 0x20, 0x6F, 0x72, 0x20, 0x61, ++ 0x62, 0x61, 0x6E, 0x64, 0x6F, 0x6E, 0x65, 0x64, ++ 0x2E, 0x20, 0x48, 0x6F, 0x77, 0x20, 0x6D, 0x61, ++ 0x6E, 0x79, 0x20, 0x66, 0x61, 0x6D, 0x69, 0x6C, ++ 0x69, 0x65, 0x73, 0x20, 0x68, 0x75, 0x6E, 0x67, ++ 0x65, 0x72, 0x65, 0x64, 0x20, 0x66, 0x6F, 0x72, ++ 0x20, 0x66, 0x6F, 0x6F, 0x64, 0x20, 0x74, 0x68, ++ 0x65, 0x79, 0x20, 0x63, 0x6F, 0x75, 0x6C, 0x64, ++ 0x20, 0x6E, 0x6F, 0x74, 0x20, 0x61, 0x66, 0x66, ++ 0x6F, 0x72, 0x64, 0x20, 0x74, 0x6F, 0x20, 0x62, ++ 0x75, 0x79, 0x3F, 0x20, 0x48, 0x6F, 0x77, 0x20, ++ 0x6D, 0x61, 0x6E, 0x79, 0x20, 0x68, 0x65, 0x61, ++ 0x72, 0x74, 0x73, 0x20, 0x77, 0x65, 0x72, 0x65, ++ 0x20, 0x62, 0x72, 0x6F, 0x6B, 0x65, 0x6E, 0x3F, ++ 0x20, 0x48, 0x6F, 0x77, 0x20, 0x6D, 0x61, 0x6E, ++ 0x79, 0x20, 0x73, 0x75, 0x69, 0x63, 0x69, 0x64, ++ 0x65, 0x73, 0x20, 0x77, 0x6F, 0x75, 0x6C, 0x64, ++ 0x20, 0x74, 0x61, 0x6B, 0x65, 0x20, 0x70, 0x6C, ++ 0x61, 0x63, 0x65, 0x20, 0x74, 0x68, 0x61, 0x74, ++ 0x20, 0x73, 0x61, 0x6D, 0x65, 0x20, 0x6E, 0x69, ++ 0x67, 0x68, 0x74, 0x2C, 0x20, 0x68, 0x6F, 0x77, ++ 0x20, 0x6D, 0x61, 0x6E, 0x79, 0x20, 0x70, 0x65, ++ 0x6F, 0x70, 0x6C, 0x65, 0x20, 0x77, 0x6F, 0x75, ++ 0x6C, 0x64, 0x20, 0x67, 0x6F, 0x20, 0x69, 0x6E, ++ 0x73, 0x61, 0x6E, 0x65, 0x3F, 0x20, 0x48, 0x6F, ++ 0x77, 0x20, 0x6D, 0x61, 0x6E, 0x79, 0x20, 0x63, ++ 0x6F, 0x63, 0x6B, 0x72, 0x6F, 0x61, 0x63, 0x68, ++ 0x65, 0x73, 0x20, 0x61, 0x6E, 0x64, 0x20, 0x6C, ++ 0x61, 0x6E, 0x64, 0x6C, 0x6F, 0x72, 0x64, 0x73, ++ 0x20, 0x77, 0x6F, 0x75, 0x6C, 0x64, 0x20, 0x74, ++ 0x72, 0x69, 0x75, 0x6D, 0x70, 0x68, 0x3F, 0x20, ++ 0x48, 0x6F, 0x77, 0x20, 0x6D, 0x61, 0x6E, 0x79, ++ 0x20, 0x77, 0x69, 0x6E, 0x6E, 0x65, 0x72, 0x73, ++ 0x20, 0x77, 0x65, 0x72, 0x65, 0x20, 0x6C, 0x6F, ++ 0x73, 0x65, 0x72, 0x73, 0x2C, 0x20, 0x73, 0x75 ++ }, ++ .len_bits = 512 << 3, ++ }, ++ .ciphertext = { ++ .data = { ++ 0x8A, 0xA9, 0x74, 0x31, 0xB1, 0xF2, 0xAB, 0x00, ++ 0xD6, 0x3D, 0xFA, 0xBD, 0xD9, 0x65, 0x52, 0x80, ++ 0xB5, 0x98, 0x20, 0xFF, 0x8D, 0x1C, 0x0F, 0x53, ++ 0xDD, 0x79, 0xCC, 0x9D, 0x7A, 0x6D, 0x76, 0x06, ++ 0xB6, 0xF4, 0xAC, 0xDA, 0xF2, 0x24, 0x02, 0x58, ++ 0x5F, 0xE3, 0xD4, 0xF7, 0x0B, 0x3B, 0x1C, 0x4C, ++ 0x0B, 0x4C, 0xC7, 0x4D, 0x3D, 0xFA, 0x28, 0xD9, ++ 0xA0, 0x90, 0x3E, 0x91, 0xDC, 0xC4, 0xE1, 0x2E, ++ 0x7C, 0xB4, 0xBD, 0xE0, 0x9E, 0xC8, 0x33, 0x42, ++ 0x0E, 0x84, 0xEF, 0x3C, 0xF1, 0x8B, 0x2C, 0xBD, ++ 0x33, 0x70, 0x22, 0xBA, 0xD4, 0x0B, 0xB2, 0x83, ++ 0x7F, 0x27, 0x51, 0x92, 0xD1, 0x40, 0x1E, 0xCD, ++ 0x62, 0x0F, 0x61, 0x5F, 0xB4, 0xB1, 0x0D, 0x1A, ++ 0x16, 0x1B, 0xE8, 0xA8, 0x2B, 0x45, 0xBA, 0x56, ++ 0x30, 0xD0, 0xE3, 0xCA, 0x4D, 0x23, 0xA3, 0x38, ++ 0xD6, 0x2C, 0xE4, 0x8D, 0xFF, 0x23, 0x97, 0x9E, ++ 0xE9, 0xBD, 0x70, 0xAF, 0x6B, 0x68, 0xA7, 0x21, ++ 0x3C, 0xFB, 0xB2, 0x99, 0x4D, 0xE9, 0x70, 0x56, ++ 0x36, 0xB8, 0xD7, 0xE0, 0xEB, 0x62, 0xA1, 0x79, ++ 0xF9, 0xD6, 0xAD, 0x83, 0x75, 0x54, 0xF5, 0x45, ++ 0x82, 0xE8, 0xD6, 0xA9, 0x76, 0x11, 0xC7, 0x81, ++ 0x2C, 0xBA, 0x67, 0xB5, 0xDB, 0xE5, 0xF2, 0x6B, ++ 0x7D, 0x9F, 0x4E, 0xDC, 0xA1, 0x62, 0xF1, 0xF0, ++ 0xAD, 0xD4, 0x7A, 0xA3, 0xF3, 0x76, 0x29, 0xA4, ++ 0xB7, 0xF3, 0x31, 0x84, 0xE7, 0x1F, 0x0D, 0x01, ++ 0xBD, 0x46, 0x07, 0x51, 0x05, 0x76, 0xE2, 0x95, ++ 0xF8, 0x48, 0x18, 0x8A, 0x1E, 0x92, 0x8B, 0xBC, ++ 0x30, 0x05, 0xF5, 0xD6, 0x96, 0xEF, 0x78, 0xB6, ++ 0xF3, 0xEC, 0x4C, 0xB1, 0x88, 0x8B, 0x63, 0x40, ++ 0x07, 0x37, 0xB4, 0x1A, 0xBD, 0xE9, 0x38, 0xB4, ++ 0x31, 0x35, 0x9D, 0x0C, 0xF1, 0x24, 0x0E, 0xD2, ++ 0xAE, 0x39, 0xA6, 0x41, 0x3C, 0x91, 0x6A, 0x4B, ++ 0xEC, 0x46, 0x76, 0xB4, 0x15, 0xC3, 0x58, 0x96, ++ 0x69, 0x02, 0x21, 0x37, 0x65, 0xDF, 0xA6, 0x43, ++ 0x78, 0x81, 0x8B, 0x39, 0x37, 0xE3, 0xF3, 0xD9, ++ 0xA2, 0xAA, 0x3F, 0xA9, 0x21, 0x24, 0x93, 0x4A, ++ 0xB0, 0xDE, 0x22, 0x5F, 0xF8, 0xD3, 0xCC, 0x13, ++ 0x5C, 0xC2, 0x5C, 0x98, 0x6D, 0xFB, 0x34, 0x26, ++ 0xE2, 0xC9, 0x26, 0x23, 0x41, 0xAB, 0xC3, 0x8A, ++ 0xEC, 0x62, 0xA9, 0x5B, 0x51, 0xB9, 0x10, 0x9D, ++ 0xB1, 0xBB, 0xDE, 0x78, 0xDE, 0xE7, 0xF0, 0x9F, ++ 0x91, 0x6C, 0x4D, 0xFC, 0xB3, 0x9C, 0xFF, 0xA4, ++ 0x9D, 0xB8, 0xCD, 0xF6, 0xA8, 0x6A, 0xDB, 0x3B, ++ 0x82, 0xFE, 0xCD, 0x6B, 0x08, 0x0A, 0x5E, 0x76, ++ 0xE9, 0xB3, 0xA2, 0x78, 0x25, 0xDB, 0xB1, 0x76, ++ 0x42, 0x2C, 0xFB, 0x20, 0x87, 0x81, 0x76, 0x17, ++ 0x99, 0xFD, 0x56, 0x52, 0xE2, 0xB0, 0x8E, 0x1B, ++ 0x99, 0xB3, 0x6B, 0x16, 0xC5, 0x4F, 0x0D, 0xBB, ++ 0x0E, 0xB7, 0x54, 0x63, 0xD9, 0x67, 0xD9, 0x85, ++ 0x1F, 0xA8, 0xF0, 0xF0, 0xB0, 0x41, 0xDC, 0xBC, ++ 0x75, 0xEE, 0x23, 0x7D, 0x40, 0xCE, 0xB8, 0x0A, ++ 0x6D, 0xC1, 0xD7, 0xCB, 0xAE, 0xCE, 0x91, 0x9E, ++ 0x3E, 0x5A, 0x76, 0xF8, 0xC0, 0xF2, 0x7F, 0x0B, ++ 0xD2, 0x5F, 0x63, 0xBE, 0xB2, 0x81, 0x8E, 0x6D, ++ 0xB3, 0x6B, 0x67, 0x9D, 0xAC, 0xE2, 0xDB, 0x7C, ++ 0x11, 0x19, 0x55, 0x55, 0x11, 0xED, 0x7F, 0x4E, ++ 0x9E, 0x4B, 0x6E, 0x01, 0x74, 0x4A, 0xE8, 0x78, ++ 0xEC, 0xCD, 0xF7, 0xA2, 0x6E, 0xDB, 0xB6, 0x3B, ++ 0x4D, 0x2C, 0x09, 0x62, 0x57, 0x6E, 0x38, 0x8A, ++ 0x61, 0x17, 0x00, 0xE9, 0x86, 0x7F, 0x3D, 0x93, ++ 0xBC, 0xC3, 0x27, 0x90, 0x7E, 0x41, 0x81, 0xBA, ++ 0x74, 0x70, 0x19, 0xE8, 0xD2, 0x88, 0x61, 0xDF, ++ 0xB4, 0xED, 0xA4, 0x9D, 0x3D, 0xED, 0x95, 0x65, ++ 0xCA, 0xFF, 0x8D, 0x58, 0x63, 0x10, 0x9D, 0xBE, ++ 0x78, 0x81, 0x47, 0x38 ++ }, ++ .len_bits = 516 << 3, ++ }, ++ .digest_enc = { ++ .data = { ++ 0x78, 0x81, 0x47, 0x38 ++ }, ++ .len = 4, ++ .offset = 512, ++ }, ++ .validDataLen = { ++ .len_bits = 516 << 3, ++ }, ++ .validCipherLen = { ++ .len_bits = 516 << 3, ++ }, ++ .validAuthLen = { ++ .len_bits = 512 << 3, ++ } ++}; ++ ++struct mixed_cipher_auth_test_data auth_zuc_cipher_aes_ctr_test_case_1 = { ++ .auth_algo = RTE_CRYPTO_AUTH_ZUC_EIA3, ++ .auth_key = { ++ .data = { ++ 0xc9, 0xe6, 0xce, 0xc4, 0x60, 0x7c, 0x72, 0xdb, ++ 0x00, 0x0a, 0xef, 0xa8, 0x83, 0x85, 0xab, 0x0a ++ }, ++ .len = 16, ++ }, ++ .auth_iv = { ++ .data = { ++ 0xa9, 0x40, 0x59, 0xda, 0x50, 0x00, 0x00, 0x00, ++ 0x29, 0x40, 0x59, 0xda, 0x50, 0x00, 0x80, 0x00 ++ }, ++ .len = 16, ++ }, ++ .auth = { ++ .len_bits = 73 << 3, ++ .offset_bits = 0, ++ }, ++ .cipher_algo = RTE_CRYPTO_CIPHER_AES_CTR, ++ .cipher_key = { ++ .data = { ++ 0xc9, 0xe6, 0xce, 0xc4, 0x60, 0x7c, 0x72, 0xdb, ++ 0x00, 0x0a, 0xef, 0xa8, 0x83, 0x85, 0xab, 0x0a ++ }, ++ .len = 16, ++ }, ++ .cipher_iv = { ++ .data = { ++ 0xa9, 0x40, 0x59, 0xda, 0x50, 0x00, 0x00, 0x00, ++ 0x29, 0x40, 0x59, 0xda, 0x50, 0x00, 0x80, 0x00 ++ }, ++ .len = 16, ++ }, ++ .cipher = { ++ .len_bits = 77 << 3, ++ .offset_bits = 0, ++ }, ++ .plaintext = { ++ .data = { ++ 0x98, 0x3b, 0x41, 0xd4, 0x7d, 0x78, 0x0c, 0x9e, ++ 0x1a, 0xd1, 0x1d, 0x7e, 0xb7, 0x03, 0x91, 0xb1, ++ 0xde, 0x0b, 0x35, 0xda, 0x2d, 0xc6, 0x2f, 0x83, ++ 0xe7, 0xb7, 0x8d, 0x63, 0x06, 0xca, 0x0e, 0xa0, ++ 0x7e, 0x94, 0x1b, 0x7b, 0xe9, 0x13, 0x48, 0xf9, ++ 0xfc, 0xb1, 0x70, 0xe2, 0x21, 0x7f, 0xec, 0xd9, ++ 0x7f, 0x9f, 0x68, 0xad, 0xb1, 0x6e, 0x5d, 0x7d, ++ 0x21, 0xe5, 0x69, 0xd2, 0x80, 0xed, 0x77, 0x5c, ++ 0xeb, 0xde, 0x3f, 0x40, 0x93, 0xc5, 0x38, 0x81, ++ 0x00 ++ }, ++ .len_bits = 73 << 3, ++ }, ++ .ciphertext = { ++ .data = { ++ 0x53, 0x92, 0x9F, 0x88, 0x32, 0xA1, 0x6D, 0x66, ++ 0x00, 0x32, 0x29, 0xF9, 0x14, 0x75, 0x6D, 0xB3, ++ 0xEB, 0x64, 0x25, 0x09, 0xE1, 0x80, 0x31, 0x8C, ++ 0xF8, 0x47, 0x64, 0xAA, 0x07, 0x8E, 0x06, 0xBF, ++ 0x05, 0xD7, 0x43, 0xEE, 0xFF, 0x11, 0x33, 0x4A, ++ 0x82, 0xCF, 0x88, 0x6F, 0x33, 0xB2, 0xB5, 0x67, ++ 0x50, 0x0A, 0x74, 0x2D, 0xE4, 0x56, 0x40, 0x31, ++ 0xEE, 0xB3, 0x6C, 0x6E, 0x6A, 0x7B, 0x20, 0xBA, ++ 0x4E, 0x44, 0x34, 0xC8, 0x62, 0x21, 0x8C, 0x45, ++ 0xD7, 0x85, 0x44, 0xF4, 0x7E ++ }, ++ .len_bits = 77 << 3, ++ }, ++ .digest_enc = { ++ .data = { ++ 0x85, 0x44, 0xF4, 0x7E ++ }, ++ .len = 4, ++ .offset = 73, ++ }, ++ .validDataLen = { ++ .len_bits = 77 << 3, ++ }, ++ .validCipherLen = { ++ .len_bits = 77 << 3, ++ }, ++ .validAuthLen = { ++ .len_bits = 73 << 3, ++ } ++}; ++ ++struct mixed_cipher_auth_test_data auth_snow_cipher_aes_ctr_test_case_1 = { ++ .auth_algo = RTE_CRYPTO_AUTH_SNOW3G_UIA2, ++ .auth_key = { ++ .data = { ++ 0xC7, 0x36, 0xC6, 0xAA, 0xB2, 0x2B, 0xFF, 0xF9, ++ 0x1E, 0x26, 0x98, 0xD2, 0xE2, 0x2A, 0xD5, 0x7E ++ }, ++ .len = 16, ++ }, ++ .auth_iv = { ++ .data = { ++ 0x14, 0x79, 0x3E, 0x41, 0x03, 0x97, 0xE8, 0xFD, ++ 0x94, 0x79, 0x3E, 0x41, 0x03, 0x97, 0x68, 0xFD ++ }, ++ .len = 16, ++ }, ++ .auth = { ++ .len_bits = 48 << 3, ++ .offset_bits = 0, ++ }, ++ .cipher_algo = RTE_CRYPTO_CIPHER_AES_CTR, ++ .cipher_key = { ++ .data = { ++ 0xC7, 0x36, 0xC6, 0xAA, 0xB2, 0x2B, 0xFF, 0xF9, ++ 0x1E, 0x26, 0x98, 0xD2, 0xE2, 0x2A, 0xD5, 0x7E ++ }, ++ .len = 16, ++ }, ++ .cipher_iv = { ++ .data = { ++ 0x14, 0x79, 0x3E, 0x41, 0x03, 0x97, 0xE8, 0xFD, ++ 0x94, 0x79, 0x3E, 0x41, 0x03, 0x97, 0x68, 0xFD ++ }, ++ .len = 16, ++ }, ++ .cipher = { ++ .len_bits = 52 << 3, ++ .offset_bits = 0, ++ }, ++ .plaintext = { ++ .data = { ++ 0xD0, 0xA7, 0xD4, 0x63, 0xDF, 0x9F, 0xB2, 0xB2, ++ 0x78, 0x83, 0x3F, 0xA0, 0x2E, 0x23, 0x5A, 0xA1, ++ 0x72, 0xBD, 0x97, 0x0C, 0x14, 0x73, 0xE1, 0x29, ++ 0x07, 0xFB, 0x64, 0x8B, 0x65, 0x99, 0xAA, 0xA0, ++ 0xB2, 0x4A, 0x03, 0x86, 0x65, 0x42, 0x2B, 0x20, ++ 0xA4, 0x99, 0x27, 0x6A, 0x50, 0x42, 0x70, 0x09 ++ }, ++ .len_bits = 48 << 3, ++ }, ++ .ciphertext = { ++ .data = { ++ 0x91, 0x96, 0x28, 0xB4, 0x89, 0x74, 0xF6, 0x5E, ++ 0x98, 0x58, 0xA1, 0xD3, 0x0E, 0xE3, 0xFC, 0x39, ++ 0xDB, 0x36, 0xE4, 0x97, 0x74, 0x5B, 0x5E, 0xD4, ++ 0x1B, 0x8A, 0xC5, 0x9D, 0xDF, 0x96, 0x97, 0x5F, ++ 0x58, 0x4A, 0x75, 0x74, 0x27, 0x07, 0xF3, 0x7F, ++ 0xCE, 0x2C, 0x4A, 0x6C, 0xE5, 0x19, 0xE7, 0x8B, ++ 0xF3, 0x21, 0x84, 0x6C ++ }, ++ .len_bits = 52 << 3, ++ }, ++ .digest_enc = { ++ .data = { ++ 0xF3, 0x21, 0x84, 0x6C ++ }, ++ .len = 4, ++ .offset = 48, ++ }, ++ .validDataLen = { ++ .len_bits = 52 << 3, ++ }, ++ .validCipherLen = { ++ .len_bits = 52 << 3, ++ }, ++ .validAuthLen = { ++ .len_bits = 48 << 3, ++ } ++}; ++ ++struct mixed_cipher_auth_test_data auth_snow_cipher_zuc_test_case_1 = { ++ .auth_algo = RTE_CRYPTO_AUTH_SNOW3G_UIA2, ++ .auth_key = { ++ .data = { ++ 0xC7, 0x36, 0xC6, 0xAA, 0xB2, 0x2B, 0xFF, 0xF9, ++ 0x1E, 0x26, 0x98, 0xD2, 0xE2, 0x2A, 0xD5, 0x7E ++ }, ++ .len = 16, ++ }, ++ .auth_iv = { ++ .data = { ++ 0x14, 0x79, 0x3E, 0x41, 0x03, 0x97, 0xE8, 0xFD, ++ 0x94, 0x79, 0x3E, 0x41, 0x03, 0x97, 0x68, 0xFD ++ }, ++ .len = 16, ++ }, ++ .auth = { ++ .len_bits = 48 << 3, ++ .offset_bits = 0, ++ }, ++ .cipher_algo = RTE_CRYPTO_CIPHER_ZUC_EEA3, ++ .cipher_key = { ++ .data = { ++ 0xC7, 0x36, 0xC6, 0xAA, 0xB2, 0x2B, 0xFF, 0xF9, ++ 0x1E, 0x26, 0x98, 0xD2, 0xE2, 0x2A, 0xD5, 0x7E ++ }, ++ .len = 16, ++ }, ++ .cipher_iv = { ++ .data = { ++ 0x14, 0x79, 0x3E, 0x41, 0x03, 0x97, 0xE8, 0xFD, ++ 0x94, 0x79, 0x3E, 0x41, 0x03, 0x97, 0x68, 0xFD ++ }, ++ .len = 16, ++ }, ++ .cipher = { ++ .len_bits = 52 << 3, ++ .offset_bits = 0, ++ }, ++ .plaintext = { ++ .data = { ++ 0xD0, 0xA7, 0xD4, 0x63, 0xDF, 0x9F, 0xB2, 0xB2, ++ 0x78, 0x83, 0x3F, 0xA0, 0x2E, 0x23, 0x5A, 0xA1, ++ 0x72, 0xBD, 0x97, 0x0C, 0x14, 0x73, 0xE1, 0x29, ++ 0x07, 0xFB, 0x64, 0x8B, 0x65, 0x99, 0xAA, 0xA0, ++ 0xB2, 0x4A, 0x03, 0x86, 0x65, 0x42, 0x2B, 0x20, ++ 0xA4, 0x99, 0x27, 0x6A, 0x50, 0x42, 0x70, 0x09 ++ }, ++ .len_bits = 48 << 3, ++ }, ++ .ciphertext = { ++ .data = { ++ 0x52, 0x11, 0xCD, 0xFF, 0xF8, 0x88, 0x61, 0x1E, ++ 0xF5, 0xD2, 0x8E, 0xEB, 0x2A, 0x49, 0x18, 0x1F, ++ 0xF4, 0xDA, 0x8B, 0x19, 0x60, 0x0B, 0x92, 0x9E, ++ 0x79, 0x2A, 0x5B, 0x0B, 0x7E, 0xC6, 0x22, 0x36, ++ 0x74, 0xA4, 0x6C, 0xBC, 0xF5, 0x25, 0x69, 0xAE, ++ 0xDA, 0x04, 0xB9, 0xAF, 0x16, 0x42, 0x0F, 0xCB, ++ 0x3E, 0xC9, 0x49, 0xE9 ++ }, ++ .len_bits = 52 << 3, ++ }, ++ .digest_enc = { ++ .data = { ++ 0x3E, 0xC9, 0x49, 0xE9 ++ }, ++ .len = 4, ++ .offset = 48, ++ }, ++ .validDataLen = { ++ .len_bits = 52 << 3, ++ }, ++ .validCipherLen = { ++ .len_bits = 52 << 3, ++ }, ++ .validAuthLen = { ++ .len_bits = 48 << 3, ++ } ++}; ++ ++struct mixed_cipher_auth_test_data auth_aes_cmac_cipher_zuc_test_case_1 = { ++ .auth_algo = RTE_CRYPTO_AUTH_AES_CMAC, ++ .auth_key = { ++ .data = { ++ 0x2B, 0x7E, 0x15, 0x16, 0x28, 0xAE, 0xD2, 0xA6, ++ 0xAB, 0xF7, 0x15, 0x88, 0x09, 0xCF, 0x4F, 0x3C ++ }, ++ .len = 16, ++ }, ++ .auth_iv = { ++ .data = { ++ }, ++ .len = 0, ++ }, ++ .auth = { ++ .len_bits = 512 << 3, ++ .offset_bits = 0, ++ }, ++ .cipher_algo = RTE_CRYPTO_CIPHER_ZUC_EEA3, ++ .cipher_key = { ++ .data = { ++ 0x2B, 0x7E, 0x15, 0x16, 0x28, 0xAE, 0xD2, 0xA6, ++ 0xAB, 0xF7, 0x15, 0x88, 0x09, 0xCF, 0x4F, 0x3C ++ }, ++ .len = 16, ++ }, ++ .cipher_iv = { ++ .data = { ++ }, ++ .len = 0, ++ }, ++ .cipher = { ++ .len_bits = 516 << 3, ++ .offset_bits = 0, ++ }, ++ .plaintext = { ++ .data = { ++ 0x57, 0x68, 0x61, 0x74, 0x20, 0x61, 0x20, 0x6C, ++ 0x6F, 0x75, 0x73, 0x79, 0x20, 0x65, 0x61, 0x72, ++ 0x74, 0x68, 0x21, 0x20, 0x48, 0x65, 0x20, 0x77, ++ 0x6F, 0x6E, 0x64, 0x65, 0x72, 0x65, 0x64, 0x20, ++ 0x68, 0x6F, 0x77, 0x20, 0x6D, 0x61, 0x6E, 0x79, ++ 0x20, 0x70, 0x65, 0x6F, 0x70, 0x6C, 0x65, 0x20, ++ 0x77, 0x65, 0x72, 0x65, 0x20, 0x64, 0x65, 0x73, ++ 0x74, 0x69, 0x74, 0x75, 0x74, 0x65, 0x20, 0x74, ++ 0x68, 0x61, 0x74, 0x20, 0x73, 0x61, 0x6D, 0x65, ++ 0x20, 0x6E, 0x69, 0x67, 0x68, 0x74, 0x20, 0x65, ++ 0x76, 0x65, 0x6E, 0x20, 0x69, 0x6E, 0x20, 0x68, ++ 0x69, 0x73, 0x20, 0x6F, 0x77, 0x6E, 0x20, 0x70, ++ 0x72, 0x6F, 0x73, 0x70, 0x65, 0x72, 0x6F, 0x75, ++ 0x73, 0x20, 0x63, 0x6F, 0x75, 0x6E, 0x74, 0x72, ++ 0x79, 0x2C, 0x20, 0x68, 0x6F, 0x77, 0x20, 0x6D, ++ 0x61, 0x6E, 0x79, 0x20, 0x68, 0x6F, 0x6D, 0x65, ++ 0x73, 0x20, 0x77, 0x65, 0x72, 0x65, 0x20, 0x73, ++ 0x68, 0x61, 0x6E, 0x74, 0x69, 0x65, 0x73, 0x2C, ++ 0x20, 0x68, 0x6F, 0x77, 0x20, 0x6D, 0x61, 0x6E, ++ 0x79, 0x20, 0x68, 0x75, 0x73, 0x62, 0x61, 0x6E, ++ 0x64, 0x73, 0x20, 0x77, 0x65, 0x72, 0x65, 0x20, ++ 0x64, 0x72, 0x75, 0x6E, 0x6B, 0x20, 0x61, 0x6E, ++ 0x64, 0x20, 0x77, 0x69, 0x76, 0x65, 0x73, 0x20, ++ 0x73, 0x6F, 0x63, 0x6B, 0x65, 0x64, 0x2C, 0x20, ++ 0x61, 0x6E, 0x64, 0x20, 0x68, 0x6F, 0x77, 0x20, ++ 0x6D, 0x61, 0x6E, 0x79, 0x20, 0x63, 0x68, 0x69, ++ 0x6C, 0x64, 0x72, 0x65, 0x6E, 0x20, 0x77, 0x65, ++ 0x72, 0x65, 0x20, 0x62, 0x75, 0x6C, 0x6C, 0x69, ++ 0x65, 0x64, 0x2C, 0x20, 0x61, 0x62, 0x75, 0x73, ++ 0x65, 0x64, 0x2C, 0x20, 0x6F, 0x72, 0x20, 0x61, ++ 0x62, 0x61, 0x6E, 0x64, 0x6F, 0x6E, 0x65, 0x64, ++ 0x2E, 0x20, 0x48, 0x6F, 0x77, 0x20, 0x6D, 0x61, ++ 0x6E, 0x79, 0x20, 0x66, 0x61, 0x6D, 0x69, 0x6C, ++ 0x69, 0x65, 0x73, 0x20, 0x68, 0x75, 0x6E, 0x67, ++ 0x65, 0x72, 0x65, 0x64, 0x20, 0x66, 0x6F, 0x72, ++ 0x20, 0x66, 0x6F, 0x6F, 0x64, 0x20, 0x74, 0x68, ++ 0x65, 0x79, 0x20, 0x63, 0x6F, 0x75, 0x6C, 0x64, ++ 0x20, 0x6E, 0x6F, 0x74, 0x20, 0x61, 0x66, 0x66, ++ 0x6F, 0x72, 0x64, 0x20, 0x74, 0x6F, 0x20, 0x62, ++ 0x75, 0x79, 0x3F, 0x20, 0x48, 0x6F, 0x77, 0x20, ++ 0x6D, 0x61, 0x6E, 0x79, 0x20, 0x68, 0x65, 0x61, ++ 0x72, 0x74, 0x73, 0x20, 0x77, 0x65, 0x72, 0x65, ++ 0x20, 0x62, 0x72, 0x6F, 0x6B, 0x65, 0x6E, 0x3F, ++ 0x20, 0x48, 0x6F, 0x77, 0x20, 0x6D, 0x61, 0x6E, ++ 0x79, 0x20, 0x73, 0x75, 0x69, 0x63, 0x69, 0x64, ++ 0x65, 0x73, 0x20, 0x77, 0x6F, 0x75, 0x6C, 0x64, ++ 0x20, 0x74, 0x61, 0x6B, 0x65, 0x20, 0x70, 0x6C, ++ 0x61, 0x63, 0x65, 0x20, 0x74, 0x68, 0x61, 0x74, ++ 0x20, 0x73, 0x61, 0x6D, 0x65, 0x20, 0x6E, 0x69, ++ 0x67, 0x68, 0x74, 0x2C, 0x20, 0x68, 0x6F, 0x77, ++ 0x20, 0x6D, 0x61, 0x6E, 0x79, 0x20, 0x70, 0x65, ++ 0x6F, 0x70, 0x6C, 0x65, 0x20, 0x77, 0x6F, 0x75, ++ 0x6C, 0x64, 0x20, 0x67, 0x6F, 0x20, 0x69, 0x6E, ++ 0x73, 0x61, 0x6E, 0x65, 0x3F, 0x20, 0x48, 0x6F, ++ 0x77, 0x20, 0x6D, 0x61, 0x6E, 0x79, 0x20, 0x63, ++ 0x6F, 0x63, 0x6B, 0x72, 0x6F, 0x61, 0x63, 0x68, ++ 0x65, 0x73, 0x20, 0x61, 0x6E, 0x64, 0x20, 0x6C, ++ 0x61, 0x6E, 0x64, 0x6C, 0x6F, 0x72, 0x64, 0x73, ++ 0x20, 0x77, 0x6F, 0x75, 0x6C, 0x64, 0x20, 0x74, ++ 0x72, 0x69, 0x75, 0x6D, 0x70, 0x68, 0x3F, 0x20, ++ 0x48, 0x6F, 0x77, 0x20, 0x6D, 0x61, 0x6E, 0x79, ++ 0x20, 0x77, 0x69, 0x6E, 0x6E, 0x65, 0x72, 0x73, ++ 0x20, 0x77, 0x65, 0x72, 0x65, 0x20, 0x6C, 0x6F, ++ 0x73, 0x65, 0x72, 0x73, 0x2C, 0x20, 0x73, 0x75 ++ }, ++ .len_bits = 512 << 3, ++ }, ++ .ciphertext = { ++ .data = { ++ 0x3C, 0x89, 0x1C, 0xE5, 0xB7, 0xDE, 0x61, 0x4D, ++ 0x05, 0x37, 0x3F, 0x40, 0xC9, 0xCF, 0x10, 0x07, ++ 0x7F, 0x18, 0xC5, 0x96, 0x21, 0xA9, 0xCF, 0xF5, ++ 0xBB, 0x9C, 0x22, 0x72, 0x00, 0xBE, 0xAC, 0x4B, ++ 0x55, 0x02, 0x19, 0x2B, 0x37, 0x64, 0x15, 0x6B, ++ 0x54, 0x74, 0xAE, 0x0F, 0xE7, 0x68, 0xB3, 0x92, ++ 0x17, 0x26, 0x75, 0xEE, 0x0B, 0xE9, 0x46, 0x3C, ++ 0x6E, 0x76, 0x52, 0x14, 0x2B, 0xD0, 0xB6, 0xD0, ++ 0x09, 0x07, 0x17, 0x12, 0x58, 0x61, 0xE8, 0x2A, ++ 0x7C, 0x55, 0x67, 0x66, 0x49, 0xD1, 0x4E, 0x2F, ++ 0x06, 0x96, 0x3A, 0xF7, 0x05, 0xE3, 0x65, 0x47, ++ 0x7C, 0xBB, 0x66, 0x25, 0xC4, 0x73, 0xB3, 0x7B, ++ 0x3D, 0x1D, 0x59, 0x54, 0x4E, 0x38, 0x9C, 0x4D, ++ 0x10, 0x4B, 0x49, 0xA4, 0x92, 0xC7, 0xD7, 0x17, ++ 0x6F, 0xC0, 0xEE, 0x8D, 0xBE, 0xA5, 0xE3, 0xB9, ++ 0xBA, 0x5E, 0x88, 0x36, 0x06, 0x19, 0xB7, 0x86, ++ 0x66, 0x19, 0x90, 0xC4, 0xAE, 0xB3, 0xFE, 0xA7, ++ 0xCF, 0x2A, 0xD8, 0x6C, 0x0E, 0xD5, 0x24, 0x2A, ++ 0x92, 0x93, 0xB9, 0x12, 0xCB, 0x50, 0x0A, 0x22, ++ 0xB0, 0x09, 0x06, 0x17, 0x85, 0xC9, 0x03, 0x70, ++ 0x18, 0xF2, 0xD5, 0x6A, 0x66, 0xC2, 0xB6, 0xC6, ++ 0xA5, 0xA3, 0x24, 0xEC, 0xB9, 0x07, 0xD5, 0x8A, ++ 0xA0, 0x44, 0x54, 0xD7, 0x21, 0x9F, 0x02, 0x83, ++ 0x78, 0x7B, 0x78, 0x9C, 0x97, 0x2A, 0x36, 0x51, ++ 0xAF, 0xE1, 0x79, 0x81, 0x07, 0x53, 0xE4, 0xA0, ++ 0xC7, 0xCF, 0x10, 0x7C, 0xB2, 0xE6, 0xA1, 0xFD, ++ 0x81, 0x0B, 0x96, 0x50, 0x5D, 0xFE, 0xB3, 0xC6, ++ 0x75, 0x00, 0x0C, 0x56, 0x83, 0x9B, 0x7B, 0xF4, ++ 0xE0, 0x3A, 0xC0, 0xE1, 0xA9, 0xEC, 0xAC, 0x47, ++ 0x24, 0xF5, 0x12, 0x1B, 0xD0, 0x28, 0x32, 0xE2, ++ 0x3B, 0x42, 0xC1, 0x5B, 0x98, 0x98, 0x78, 0x2D, ++ 0xC1, 0x69, 0x05, 0x37, 0x24, 0xF0, 0x73, 0xBA, ++ 0xBE, 0x57, 0xAC, 0x40, 0x9A, 0x91, 0x42, 0x49, ++ 0x31, 0x0F, 0xED, 0x45, 0xA8, 0x25, 0xFF, 0x1B, ++ 0xF4, 0x2F, 0x61, 0x7A, 0xB0, 0x60, 0xC6, 0x5E, ++ 0x0E, 0xF6, 0x96, 0x35, 0x90, 0xAF, 0x3B, 0x9D, ++ 0x4D, 0x6C, 0xE7, 0xF2, 0x4F, 0xC0, 0xBA, 0x57, ++ 0x92, 0x18, 0xB7, 0xF5, 0x1D, 0x06, 0x81, 0xF6, ++ 0xE3, 0xF4, 0x66, 0x8C, 0x33, 0x74, 0xBE, 0x64, ++ 0x8C, 0x18, 0xED, 0x7F, 0x68, 0x2A, 0xE4, 0xAF, ++ 0xF1, 0x02, 0x07, 0x51, 0x22, 0x96, 0xC8, 0x9E, ++ 0x23, 0x7F, 0x6A, 0xD7, 0x80, 0x0F, 0x2D, 0xFC, ++ 0xCC, 0xD0, 0x95, 0x86, 0x00, 0x2A, 0x77, 0xDD, ++ 0xA2, 0x60, 0x1E, 0x0F, 0x8E, 0x42, 0x44, 0x37, ++ 0x7E, 0x33, 0xC4, 0xE0, 0x04, 0x53, 0xF6, 0x3F, ++ 0xDD, 0x1D, 0x5E, 0x24, 0xDA, 0xAE, 0xEF, 0x06, ++ 0x06, 0x05, 0x13, 0x3A, 0x1E, 0xFF, 0xAD, 0xAD, ++ 0xEE, 0x0F, 0x6F, 0x05, 0xA5, 0xFB, 0x3B, 0xC3, ++ 0xDB, 0xA0, 0x20, 0xC1, 0x65, 0x8B, 0x39, 0xAB, ++ 0xC9, 0xEC, 0xA8, 0x31, 0x85, 0x6C, 0xD2, 0xE4, ++ 0x76, 0x77, 0x76, 0xD5, 0x81, 0x01, 0x73, 0x36, ++ 0x08, 0x8C, 0xC3, 0xD4, 0x70, 0x7A, 0xA3, 0xDF, ++ 0xAD, 0x3A, 0x00, 0x46, 0x88, 0x65, 0x10, 0xBE, ++ 0xD8, 0x1C, 0x19, 0x98, 0xE9, 0x29, 0xDD, 0x58, ++ 0x46, 0x31, 0xEB, 0x3D, 0xD0, 0x12, 0x02, 0x83, ++ 0x15, 0xDD, 0x70, 0x27, 0x0D, 0xB5, 0xBB, 0x0C, ++ 0xE3, 0xF1, 0x02, 0xF2, 0xD7, 0x1D, 0x17, 0x6D, ++ 0xDF, 0x2A, 0x42, 0x1F, 0x01, 0x5C, 0x68, 0xB1, ++ 0x64, 0x74, 0xCE, 0x74, 0xB1, 0x3C, 0x2F, 0x43, ++ 0x5F, 0xB7, 0x7E, 0x3E, 0x6F, 0xE3, 0xDC, 0x03, ++ 0xD9, 0x0C, 0xDD, 0x42, 0x65, 0x7F, 0xEA, 0x69, ++ 0x6F, 0xDB, 0xD7, 0xFB, 0xFF, 0x4D, 0xB4, 0x48, ++ 0xFE, 0x0F, 0x59, 0x24, 0x8F, 0x13, 0xA8, 0x60, ++ 0xF7, 0x13, 0xE5, 0xB1, 0x8D, 0xB7, 0x70, 0xEE, ++ 0x82, 0x8F, 0xCF, 0x7E ++ }, ++ .len_bits = 516 << 3, ++ }, ++ .digest_enc = { ++ .data = { ++ 0x82, 0x8F, 0xCF, 0x7E ++ }, ++ .len = 4, ++ .offset = 512, ++ }, ++ .validDataLen = { ++ .len_bits = 516 << 3, ++ }, ++ .validCipherLen = { ++ .len_bits = 516 << 3, ++ }, ++ .validAuthLen = { ++ .len_bits = 512 << 3, ++ } ++}; ++ ++struct mixed_cipher_auth_test_data auth_null_cipher_snow_test_case_1 = { ++ .auth_algo = RTE_CRYPTO_AUTH_NULL, ++ .auth_key = { ++ .data = { ++ 0xC7, 0x36, 0xC6, 0xAA, 0xB2, 0x2B, 0xFF, 0xF9, ++ 0x1E, 0x26, 0x98, 0xD2, 0xE2, 0x2A, 0xD5, 0x7E ++ }, ++ .len = 16, ++ }, ++ .auth_iv = { ++ .data = { ++ 0x14, 0x79, 0x3E, 0x41, 0x03, 0x97, 0xE8, 0xFD, ++ 0x94, 0x79, 0x3E, 0x41, 0x03, 0x97, 0x68, 0xFD ++ }, ++ .len = 16, ++ }, ++ .auth = { ++ .len_bits = 44 << 3, ++ .offset_bits = 0, ++ }, ++ .cipher_algo = RTE_CRYPTO_CIPHER_SNOW3G_UEA2, ++ .cipher_key = { ++ .data = { ++ 0xC7, 0x36, 0xC6, 0xAA, 0xB2, 0x2B, 0xFF, 0xF9, ++ 0x1E, 0x26, 0x98, 0xD2, 0xE2, 0x2A, 0xD5, 0x7E ++ }, ++ .len = 16, ++ }, ++ .cipher_iv = { ++ .data = { ++ 0x14, 0x79, 0x3E, 0x41, 0x03, 0x97, 0xE8, 0xFD, ++ 0x94, 0x79, 0x3E, 0x41, 0x03, 0x97, 0x68, 0xFD ++ }, ++ .len = 16, ++ }, ++ .cipher = { ++ .len_bits = 48 << 3, ++ .offset_bits = 0, ++ }, ++ .plaintext = { ++ .data = { ++ 0xD0, 0xA7, 0xD4, 0x63, 0xDF, 0x9F, 0xB2, 0xB2, ++ 0x78, 0x83, 0x3F, 0xA0, 0x2E, 0x23, 0x5A, 0xA1, ++ 0x72, 0xBD, 0x97, 0x0C, 0x14, 0x73, 0xE1, 0x29, ++ 0x07, 0xFB, 0x64, 0x8B, 0x65, 0x99, 0xAA, 0xA0, ++ 0xB2, 0x4A, 0x03, 0x86, 0x65, 0x42, 0x2B, 0x20, ++ 0xA4, 0x99, 0x27, 0x6A, ++ }, ++ .len_bits = 44 << 3, ++ }, ++ .ciphertext = { ++ .data = { ++ 0x95, 0x2E, 0x5A, 0xE1, 0x50, 0xB8, 0x59, 0x2A, ++ 0x9B, 0xA0, 0x38, 0xA9, 0x8E, 0x2F, 0xED, 0xAB, ++ 0xFD, 0xC8, 0x3B, 0x47, 0x46, 0x0B, 0x50, 0x16, ++ 0xEC, 0x88, 0x45, 0xB6, 0x05, 0xC7, 0x54, 0xF8, ++ 0xBD, 0x91, 0xAA, 0xB6, 0xA4, 0xDC, 0x64, 0xB4, ++ 0xCB, 0xEB, 0x97, 0x06, 0x1C, 0xB5, 0x72, 0x34 ++ }, ++ .len_bits = 48 << 3, ++ }, ++ .digest_enc = { ++ .data = { ++ 0x1C, 0xB5, 0x72, 0x34 ++ }, ++ .len = 4, ++ .offset = 44, ++ }, ++ .validDataLen = { ++ .len_bits = 48 << 3, ++ }, ++ .validCipherLen = { ++ .len_bits = 48 << 3, ++ }, ++ .validAuthLen = { ++ .len_bits = 44 << 3, ++ } ++}; ++ ++struct mixed_cipher_auth_test_data auth_null_cipher_zuc_test_case_1 = { ++ .auth_algo = RTE_CRYPTO_AUTH_NULL, ++ .auth_key = { ++ .data = { ++ 0xC7, 0x36, 0xC6, 0xAA, 0xB2, 0x2B, 0xFF, 0xF9, ++ 0x1E, 0x26, 0x98, 0xD2, 0xE2, 0x2A, 0xD5, 0x7E ++ }, ++ .len = 16, ++ }, ++ .auth_iv = { ++ .data = { ++ 0x14, 0x79, 0x3E, 0x41, 0x03, 0x97, 0xE8, 0xFD, ++ 0x94, 0x79, 0x3E, 0x41, 0x03, 0x97, 0x68, 0xFD ++ }, ++ .len = 16, ++ }, ++ .auth = { ++ .len_bits = 48 << 3, ++ .offset_bits = 0, ++ }, ++ .cipher_algo = RTE_CRYPTO_CIPHER_ZUC_EEA3, ++ .cipher_key = { ++ .data = { ++ 0xC7, 0x36, 0xC6, 0xAA, 0xB2, 0x2B, 0xFF, 0xF9, ++ 0x1E, 0x26, 0x98, 0xD2, 0xE2, 0x2A, 0xD5, 0x7E ++ }, ++ .len = 16, ++ }, ++ .cipher_iv = { ++ .data = { ++ 0x14, 0x79, 0x3E, 0x41, 0x03, 0x97, 0xE8, 0xFD, ++ 0x94, 0x79, 0x3E, 0x41, 0x03, 0x97, 0x68, 0xFD ++ }, ++ .len = 16, ++ }, ++ .cipher = { ++ .len_bits = 52 << 3, ++ .offset_bits = 0, ++ }, ++ .plaintext = { ++ .data = { ++ 0xD0, 0xA7, 0xD4, 0x63, 0xDF, 0x9F, 0xB2, 0xB2, ++ 0x78, 0x83, 0x3F, 0xA0, 0x2E, 0x23, 0x5A, 0xA1, ++ 0x72, 0xBD, 0x97, 0x0C, 0x14, 0x73, 0xE1, 0x29, ++ 0x07, 0xFB, 0x64, 0x8B, 0x65, 0x99, 0xAA, 0xA0, ++ 0xB2, 0x4A, 0x03, 0x86, 0x65, 0x42, 0x2B, 0x20, ++ 0xA4, 0x99, 0x27, 0x6A, 0x50, 0x42, 0x70, 0x09 ++ }, ++ .len_bits = 48 << 3, ++ }, ++ .ciphertext = { ++ .data = { ++ 0x52, 0x11, 0xCD, 0xFF, 0xF8, 0x88, 0x61, 0x1E, ++ 0xF5, 0xD2, 0x8E, 0xEB, 0x2A, 0x49, 0x18, 0x1F, ++ 0xF4, 0xDA, 0x8B, 0x19, 0x60, 0x0B, 0x92, 0x9E, ++ 0x79, 0x2A, 0x5B, 0x0B, 0x7E, 0xC6, 0x22, 0x36, ++ 0x74, 0xA4, 0x6C, 0xBC, 0xF5, 0x25, 0x69, 0xAE, ++ 0xDA, 0x04, 0xB9, 0xAF, 0x16, 0x42, 0x0F, 0xCB, ++ 0x06, 0x7C, 0x1D, 0x29 ++ }, ++ .len_bits = 52 << 3, ++ }, ++ .digest_enc = { ++ .data = { ++ 0x06, 0x7C, 0x1D, 0x29 ++ }, ++ .len = 4, ++ .offset = 48, ++ }, ++ .validDataLen = { ++ .len_bits = 52 << 3, ++ }, ++ .validCipherLen = { ++ .len_bits = 52 << 3, ++ }, ++ .validAuthLen = { ++ .len_bits = 48 << 3, ++ } ++}; ++ ++struct mixed_cipher_auth_test_data auth_snow_cipher_null_test_case_1 = { ++ .auth_algo = RTE_CRYPTO_AUTH_SNOW3G_UIA2, ++ .auth_key = { ++ .data = { ++ 0xC7, 0x36, 0xC6, 0xAA, 0xB2, 0x2B, 0xFF, 0xF9, ++ 0x1E, 0x26, 0x98, 0xD2, 0xE2, 0x2A, 0xD5, 0x7E ++ }, ++ .len = 16, ++ }, ++ .auth_iv = { ++ .data = { ++ 0x14, 0x79, 0x3E, 0x41, 0x03, 0x97, 0xE8, 0xFD, ++ 0x94, 0x79, 0x3E, 0x41, 0x03, 0x97, 0x68, 0xFD ++ }, ++ .len = 16, ++ }, ++ .auth = { ++ .len_bits = 48 << 3, ++ .offset_bits = 0, ++ }, ++ .cipher_algo = RTE_CRYPTO_CIPHER_NULL, ++ .cipher_key = { ++ .data = { ++ 0xC7, 0x36, 0xC6, 0xAA, 0xB2, 0x2B, 0xFF, 0xF9, ++ 0x1E, 0x26, 0x98, 0xD2, 0xE2, 0x2A, 0xD5, 0x7E ++ }, ++ .len = 16, ++ }, ++ .cipher_iv = { ++ .data = { ++ 0x14, 0x79, 0x3E, 0x41, 0x03, 0x97, 0xE8, 0xFD, ++ 0x94, 0x79, 0x3E, 0x41, 0x03, 0x97, 0x68, 0xFD ++ }, ++ .len = 16, ++ }, ++ .cipher = { ++ .len_bits = 52 << 3, ++ .offset_bits = 0, ++ }, ++ .plaintext = { ++ .data = { ++ 0xD0, 0xA7, 0xD4, 0x63, 0xDF, 0x9F, 0xB2, 0xB2, ++ 0x78, 0x83, 0x3F, 0xA0, 0x2E, 0x23, 0x5A, 0xA1, ++ 0x72, 0xBD, 0x97, 0x0C, 0x14, 0x73, 0xE1, 0x29, ++ 0x07, 0xFB, 0x64, 0x8B, 0x65, 0x99, 0xAA, 0xA0, ++ 0xB2, 0x4A, 0x03, 0x86, 0x65, 0x42, 0x2B, 0x20, ++ 0xA4, 0x99, 0x27, 0x6A, 0x50, 0x42, 0x70, 0x09 ++ }, ++ .len_bits = 48 << 3, ++ }, ++ .ciphertext = { ++ .data = { ++ 0xD0, 0xA7, 0xD4, 0x63, 0xDF, 0x9F, 0xB2, 0xB2, ++ 0x78, 0x83, 0x3F, 0xA0, 0x2E, 0x23, 0x5A, 0xA1, ++ 0x72, 0xBD, 0x97, 0x0C, 0x14, 0x73, 0xE1, 0x29, ++ 0x07, 0xFB, 0x64, 0x8B, 0x65, 0x99, 0xAA, 0xA0, ++ 0xB2, 0x4A, 0x03, 0x86, 0x65, 0x42, 0x2B, 0x20, ++ 0xA4, 0x99, 0x27, 0x6A, 0x50, 0x42, 0x70, 0x09, ++ 0x38, 0xB5, 0x54, 0xC0 ++ }, ++ .len_bits = 52 << 3, ++ }, ++ .digest_enc = { ++ .data = { ++ 0x38, 0xB5, 0x54, 0xC0 ++ }, ++ .len = 4, ++ .offset = 48, ++ }, ++ .validDataLen = { ++ .len_bits = 52 << 3, ++ }, ++ .validCipherLen = { ++ .len_bits = 52 << 3, ++ }, ++ .validAuthLen = { ++ .len_bits = 48 << 3, ++ } ++}; ++ ++struct mixed_cipher_auth_test_data auth_zuc_cipher_null_test_case_1 = { ++ .auth_algo = RTE_CRYPTO_AUTH_ZUC_EIA3, ++ .auth_key = { ++ .data = { ++ 0xc9, 0xe6, 0xce, 0xc4, 0x60, 0x7c, 0x72, 0xdb, ++ 0x00, 0x0a, 0xef, 0xa8, 0x83, 0x85, 0xab, 0x0a ++ }, ++ .len = 16, ++ }, ++ .auth_iv = { ++ .data = { ++ 0xa9, 0x40, 0x59, 0xda, 0x50, 0x00, 0x00, 0x00, ++ 0x29, 0x40, 0x59, 0xda, 0x50, 0x00, 0x80, 0x00 ++ }, ++ .len = 16, ++ }, ++ .auth = { ++ .len_bits = 73 << 3, ++ .offset_bits = 0, ++ }, ++ .cipher_algo = RTE_CRYPTO_CIPHER_NULL, ++ .cipher_key = { ++ .data = { ++ 0xc9, 0xe6, 0xce, 0xc4, 0x60, 0x7c, 0x72, 0xdb, ++ 0x00, 0x0a, 0xef, 0xa8, 0x83, 0x85, 0xab, 0x0a ++ }, ++ .len = 16, ++ }, ++ .cipher_iv = { ++ .data = { ++ 0xa9, 0x40, 0x59, 0xda, 0x50, 0x00, 0x00, 0x00, ++ 0x29, 0x40, 0x59, 0xda, 0x50, 0x00, 0x80, 0x00 ++ }, ++ .len = 16, ++ }, ++ .cipher = { ++ .len_bits = 77 << 3, ++ .offset_bits = 0, ++ }, ++ .plaintext = { ++ .data = { ++ 0x98, 0x3b, 0x41, 0xd4, 0x7d, 0x78, 0x0c, 0x9e, ++ 0x1a, 0xd1, 0x1d, 0x7e, 0xb7, 0x03, 0x91, 0xb1, ++ 0xde, 0x0b, 0x35, 0xda, 0x2d, 0xc6, 0x2f, 0x83, ++ 0xe7, 0xb7, 0x8d, 0x63, 0x06, 0xca, 0x0e, 0xa0, ++ 0x7e, 0x94, 0x1b, 0x7b, 0xe9, 0x13, 0x48, 0xf9, ++ 0xfc, 0xb1, 0x70, 0xe2, 0x21, 0x7f, 0xec, 0xd9, ++ 0x7f, 0x9f, 0x68, 0xad, 0xb1, 0x6e, 0x5d, 0x7d, ++ 0x21, 0xe5, 0x69, 0xd2, 0x80, 0xed, 0x77, 0x5c, ++ 0xeb, 0xde, 0x3f, 0x40, 0x93, 0xc5, 0x38, 0x81, ++ 0x00 ++ }, ++ .len_bits = 73 << 3, ++ }, ++ .ciphertext = { ++ .data = { ++ 0x98, 0x3b, 0x41, 0xd4, 0x7d, 0x78, 0x0c, 0x9e, ++ 0x1a, 0xd1, 0x1d, 0x7e, 0xb7, 0x03, 0x91, 0xb1, ++ 0xde, 0x0b, 0x35, 0xda, 0x2d, 0xc6, 0x2f, 0x83, ++ 0xe7, 0xb7, 0x8d, 0x63, 0x06, 0xca, 0x0e, 0xa0, ++ 0x7e, 0x94, 0x1b, 0x7b, 0xe9, 0x13, 0x48, 0xf9, ++ 0xfc, 0xb1, 0x70, 0xe2, 0x21, 0x7f, 0xec, 0xd9, ++ 0x7f, 0x9f, 0x68, 0xad, 0xb1, 0x6e, 0x5d, 0x7d, ++ 0x21, 0xe5, 0x69, 0xd2, 0x80, 0xed, 0x77, 0x5c, ++ 0xeb, 0xde, 0x3f, 0x40, 0x93, 0xc5, 0x38, 0x81, ++ 0x00, 0x24, 0xa8, 0x42, 0xb3 ++ }, ++ .len_bits = 77 << 3, ++ }, ++ .digest_enc = { ++ .data = { ++ 0x24, 0xa8, 0x42, 0xb3 ++ }, ++ .len = 4, ++ .offset = 73, ++ }, ++ .validDataLen = { ++ .len_bits = 77 << 3, ++ }, ++ .validCipherLen = { ++ .len_bits = 77 << 3, ++ }, ++ .validAuthLen = { ++ .len_bits = 73 << 3, ++ } ++}; ++ ++struct mixed_cipher_auth_test_data auth_null_cipher_aes_ctr_test_case_1 = { ++ .auth_algo = RTE_CRYPTO_AUTH_NULL, ++ .auth_key = { ++ .data = { ++ 0xC7, 0x36, 0xC6, 0xAA, 0xB2, 0x2B, 0xFF, 0xF9, ++ 0x1E, 0x26, 0x98, 0xD2, 0xE2, 0x2A, 0xD5, 0x7E ++ }, ++ .len = 16, ++ }, ++ .auth_iv = { ++ .data = { ++ 0x14, 0x79, 0x3E, 0x41, 0x03, 0x97, 0xE8, 0xFD, ++ 0x94, 0x79, 0x3E, 0x41, 0x03, 0x97, 0x68, 0xFD ++ }, ++ .len = 16, ++ }, ++ .auth = { ++ .len_bits = 48 << 3, ++ .offset_bits = 0, ++ }, ++ .cipher_algo = RTE_CRYPTO_CIPHER_AES_CTR, ++ .cipher_key = { ++ .data = { ++ 0xC7, 0x36, 0xC6, 0xAA, 0xB2, 0x2B, 0xFF, 0xF9, ++ 0x1E, 0x26, 0x98, 0xD2, 0xE2, 0x2A, 0xD5, 0x7E ++ }, ++ .len = 16, ++ }, ++ .cipher_iv = { ++ .data = { ++ 0x14, 0x79, 0x3E, 0x41, 0x03, 0x97, 0xE8, 0xFD, ++ 0x94, 0x79, 0x3E, 0x41, 0x03, 0x97, 0x68, 0xFD ++ }, ++ .len = 16, ++ }, ++ .cipher = { ++ .len_bits = 52 << 3, ++ .offset_bits = 0, ++ }, ++ .plaintext = { ++ .data = { ++ 0xD0, 0xA7, 0xD4, 0x63, 0xDF, 0x9F, 0xB2, 0xB2, ++ 0x78, 0x83, 0x3F, 0xA0, 0x2E, 0x23, 0x5A, 0xA1, ++ 0x72, 0xBD, 0x97, 0x0C, 0x14, 0x73, 0xE1, 0x29, ++ 0x07, 0xFB, 0x64, 0x8B, 0x65, 0x99, 0xAA, 0xA0, ++ 0xB2, 0x4A, 0x03, 0x86, 0x65, 0x42, 0x2B, 0x20, ++ 0xA4, 0x99, 0x27, 0x6A, 0x50, 0x42, 0x70, 0x09 ++ }, ++ .len_bits = 48 << 3, ++ }, ++ .ciphertext = { ++ .data = { ++ 0x91, 0x96, 0x28, 0xB4, 0x89, 0x74, 0xF6, 0x5E, ++ 0x98, 0x58, 0xA1, 0xD3, 0x0E, 0xE3, 0xFC, 0x39, ++ 0xDB, 0x36, 0xE4, 0x97, 0x74, 0x5B, 0x5E, 0xD4, ++ 0x1B, 0x8A, 0xC5, 0x9D, 0xDF, 0x96, 0x97, 0x5F, ++ 0x58, 0x4A, 0x75, 0x74, 0x27, 0x07, 0xF3, 0x7F, ++ 0xCE, 0x2C, 0x4A, 0x6C, 0xE5, 0x19, 0xE7, 0x8B, ++ 0xCB, 0x94, 0xD0, 0xAC ++ }, ++ .len_bits = 52 << 3, ++ }, ++ .digest_enc = { ++ .data = { ++ 0xCB, 0x94, 0xD0, 0xAC ++ }, ++ .len = 4, ++ .offset = 48, ++ }, ++ .validDataLen = { ++ .len_bits = 52 << 3, ++ }, ++ .validCipherLen = { ++ .len_bits = 52 << 3, ++ }, ++ .validAuthLen = { ++ .len_bits = 48 << 3, ++ } ++}; ++ ++struct mixed_cipher_auth_test_data auth_aes_cmac_cipher_null_test_case_1 = { ++ .auth_algo = RTE_CRYPTO_AUTH_AES_CMAC, ++ .auth_key = { ++ .data = { ++ 0x2B, 0x7E, 0x15, 0x16, 0x28, 0xAE, 0xD2, 0xA6, ++ 0xAB, 0xF7, 0x15, 0x88, 0x09, 0xCF, 0x4F, 0x3C ++ }, ++ .len = 16, ++ }, ++ .auth_iv = { ++ .data = { ++ }, ++ .len = 0, ++ }, ++ .auth = { ++ .len_bits = 512 << 3, ++ .offset_bits = 0, ++ }, ++ .cipher_algo = RTE_CRYPTO_CIPHER_NULL, ++ .cipher_key = { ++ .data = { ++ 0x2B, 0x7E, 0x15, 0x16, 0x28, 0xAE, 0xD2, 0xA6, ++ 0xAB, 0xF7, 0x15, 0x88, 0x09, 0xCF, 0x4F, 0x3C ++ }, ++ .len = 16, ++ }, ++ .cipher_iv = { ++ .data = { ++ }, ++ .len = 0, ++ }, ++ .cipher = { ++ .len_bits = 516 << 3, ++ .offset_bits = 0, ++ }, ++ .plaintext = { ++ .data = { ++ 0x57, 0x68, 0x61, 0x74, 0x20, 0x61, 0x20, 0x6C, ++ 0x6F, 0x75, 0x73, 0x79, 0x20, 0x65, 0x61, 0x72, ++ 0x74, 0x68, 0x21, 0x20, 0x48, 0x65, 0x20, 0x77, ++ 0x6F, 0x6E, 0x64, 0x65, 0x72, 0x65, 0x64, 0x20, ++ 0x68, 0x6F, 0x77, 0x20, 0x6D, 0x61, 0x6E, 0x79, ++ 0x20, 0x70, 0x65, 0x6F, 0x70, 0x6C, 0x65, 0x20, ++ 0x77, 0x65, 0x72, 0x65, 0x20, 0x64, 0x65, 0x73, ++ 0x74, 0x69, 0x74, 0x75, 0x74, 0x65, 0x20, 0x74, ++ 0x68, 0x61, 0x74, 0x20, 0x73, 0x61, 0x6D, 0x65, ++ 0x20, 0x6E, 0x69, 0x67, 0x68, 0x74, 0x20, 0x65, ++ 0x76, 0x65, 0x6E, 0x20, 0x69, 0x6E, 0x20, 0x68, ++ 0x69, 0x73, 0x20, 0x6F, 0x77, 0x6E, 0x20, 0x70, ++ 0x72, 0x6F, 0x73, 0x70, 0x65, 0x72, 0x6F, 0x75, ++ 0x73, 0x20, 0x63, 0x6F, 0x75, 0x6E, 0x74, 0x72, ++ 0x79, 0x2C, 0x20, 0x68, 0x6F, 0x77, 0x20, 0x6D, ++ 0x61, 0x6E, 0x79, 0x20, 0x68, 0x6F, 0x6D, 0x65, ++ 0x73, 0x20, 0x77, 0x65, 0x72, 0x65, 0x20, 0x73, ++ 0x68, 0x61, 0x6E, 0x74, 0x69, 0x65, 0x73, 0x2C, ++ 0x20, 0x68, 0x6F, 0x77, 0x20, 0x6D, 0x61, 0x6E, ++ 0x79, 0x20, 0x68, 0x75, 0x73, 0x62, 0x61, 0x6E, ++ 0x64, 0x73, 0x20, 0x77, 0x65, 0x72, 0x65, 0x20, ++ 0x64, 0x72, 0x75, 0x6E, 0x6B, 0x20, 0x61, 0x6E, ++ 0x64, 0x20, 0x77, 0x69, 0x76, 0x65, 0x73, 0x20, ++ 0x73, 0x6F, 0x63, 0x6B, 0x65, 0x64, 0x2C, 0x20, ++ 0x61, 0x6E, 0x64, 0x20, 0x68, 0x6F, 0x77, 0x20, ++ 0x6D, 0x61, 0x6E, 0x79, 0x20, 0x63, 0x68, 0x69, ++ 0x6C, 0x64, 0x72, 0x65, 0x6E, 0x20, 0x77, 0x65, ++ 0x72, 0x65, 0x20, 0x62, 0x75, 0x6C, 0x6C, 0x69, ++ 0x65, 0x64, 0x2C, 0x20, 0x61, 0x62, 0x75, 0x73, ++ 0x65, 0x64, 0x2C, 0x20, 0x6F, 0x72, 0x20, 0x61, ++ 0x62, 0x61, 0x6E, 0x64, 0x6F, 0x6E, 0x65, 0x64, ++ 0x2E, 0x20, 0x48, 0x6F, 0x77, 0x20, 0x6D, 0x61, ++ 0x6E, 0x79, 0x20, 0x66, 0x61, 0x6D, 0x69, 0x6C, ++ 0x69, 0x65, 0x73, 0x20, 0x68, 0x75, 0x6E, 0x67, ++ 0x65, 0x72, 0x65, 0x64, 0x20, 0x66, 0x6F, 0x72, ++ 0x20, 0x66, 0x6F, 0x6F, 0x64, 0x20, 0x74, 0x68, ++ 0x65, 0x79, 0x20, 0x63, 0x6F, 0x75, 0x6C, 0x64, ++ 0x20, 0x6E, 0x6F, 0x74, 0x20, 0x61, 0x66, 0x66, ++ 0x6F, 0x72, 0x64, 0x20, 0x74, 0x6F, 0x20, 0x62, ++ 0x75, 0x79, 0x3F, 0x20, 0x48, 0x6F, 0x77, 0x20, ++ 0x6D, 0x61, 0x6E, 0x79, 0x20, 0x68, 0x65, 0x61, ++ 0x72, 0x74, 0x73, 0x20, 0x77, 0x65, 0x72, 0x65, ++ 0x20, 0x62, 0x72, 0x6F, 0x6B, 0x65, 0x6E, 0x3F, ++ 0x20, 0x48, 0x6F, 0x77, 0x20, 0x6D, 0x61, 0x6E, ++ 0x79, 0x20, 0x73, 0x75, 0x69, 0x63, 0x69, 0x64, ++ 0x65, 0x73, 0x20, 0x77, 0x6F, 0x75, 0x6C, 0x64, ++ 0x20, 0x74, 0x61, 0x6B, 0x65, 0x20, 0x70, 0x6C, ++ 0x61, 0x63, 0x65, 0x20, 0x74, 0x68, 0x61, 0x74, ++ 0x20, 0x73, 0x61, 0x6D, 0x65, 0x20, 0x6E, 0x69, ++ 0x67, 0x68, 0x74, 0x2C, 0x20, 0x68, 0x6F, 0x77, ++ 0x20, 0x6D, 0x61, 0x6E, 0x79, 0x20, 0x70, 0x65, ++ 0x6F, 0x70, 0x6C, 0x65, 0x20, 0x77, 0x6F, 0x75, ++ 0x6C, 0x64, 0x20, 0x67, 0x6F, 0x20, 0x69, 0x6E, ++ 0x73, 0x61, 0x6E, 0x65, 0x3F, 0x20, 0x48, 0x6F, ++ 0x77, 0x20, 0x6D, 0x61, 0x6E, 0x79, 0x20, 0x63, ++ 0x6F, 0x63, 0x6B, 0x72, 0x6F, 0x61, 0x63, 0x68, ++ 0x65, 0x73, 0x20, 0x61, 0x6E, 0x64, 0x20, 0x6C, ++ 0x61, 0x6E, 0x64, 0x6C, 0x6F, 0x72, 0x64, 0x73, ++ 0x20, 0x77, 0x6F, 0x75, 0x6C, 0x64, 0x20, 0x74, ++ 0x72, 0x69, 0x75, 0x6D, 0x70, 0x68, 0x3F, 0x20, ++ 0x48, 0x6F, 0x77, 0x20, 0x6D, 0x61, 0x6E, 0x79, ++ 0x20, 0x77, 0x69, 0x6E, 0x6E, 0x65, 0x72, 0x73, ++ 0x20, 0x77, 0x65, 0x72, 0x65, 0x20, 0x6C, 0x6F, ++ 0x73, 0x65, 0x72, 0x73, 0x2C, 0x20, 0x73, 0x75 ++ }, ++ .len_bits = 512 << 3, ++ }, ++ .ciphertext = { ++ .data = { ++ 0x57, 0x68, 0x61, 0x74, 0x20, 0x61, 0x20, 0x6C, ++ 0x6F, 0x75, 0x73, 0x79, 0x20, 0x65, 0x61, 0x72, ++ 0x74, 0x68, 0x21, 0x20, 0x48, 0x65, 0x20, 0x77, ++ 0x6F, 0x6E, 0x64, 0x65, 0x72, 0x65, 0x64, 0x20, ++ 0x68, 0x6F, 0x77, 0x20, 0x6D, 0x61, 0x6E, 0x79, ++ 0x20, 0x70, 0x65, 0x6F, 0x70, 0x6C, 0x65, 0x20, ++ 0x77, 0x65, 0x72, 0x65, 0x20, 0x64, 0x65, 0x73, ++ 0x74, 0x69, 0x74, 0x75, 0x74, 0x65, 0x20, 0x74, ++ 0x68, 0x61, 0x74, 0x20, 0x73, 0x61, 0x6D, 0x65, ++ 0x20, 0x6E, 0x69, 0x67, 0x68, 0x74, 0x20, 0x65, ++ 0x76, 0x65, 0x6E, 0x20, 0x69, 0x6E, 0x20, 0x68, ++ 0x69, 0x73, 0x20, 0x6F, 0x77, 0x6E, 0x20, 0x70, ++ 0x72, 0x6F, 0x73, 0x70, 0x65, 0x72, 0x6F, 0x75, ++ 0x73, 0x20, 0x63, 0x6F, 0x75, 0x6E, 0x74, 0x72, ++ 0x79, 0x2C, 0x20, 0x68, 0x6F, 0x77, 0x20, 0x6D, ++ 0x61, 0x6E, 0x79, 0x20, 0x68, 0x6F, 0x6D, 0x65, ++ 0x73, 0x20, 0x77, 0x65, 0x72, 0x65, 0x20, 0x73, ++ 0x68, 0x61, 0x6E, 0x74, 0x69, 0x65, 0x73, 0x2C, ++ 0x20, 0x68, 0x6F, 0x77, 0x20, 0x6D, 0x61, 0x6E, ++ 0x79, 0x20, 0x68, 0x75, 0x73, 0x62, 0x61, 0x6E, ++ 0x64, 0x73, 0x20, 0x77, 0x65, 0x72, 0x65, 0x20, ++ 0x64, 0x72, 0x75, 0x6E, 0x6B, 0x20, 0x61, 0x6E, ++ 0x64, 0x20, 0x77, 0x69, 0x76, 0x65, 0x73, 0x20, ++ 0x73, 0x6F, 0x63, 0x6B, 0x65, 0x64, 0x2C, 0x20, ++ 0x61, 0x6E, 0x64, 0x20, 0x68, 0x6F, 0x77, 0x20, ++ 0x6D, 0x61, 0x6E, 0x79, 0x20, 0x63, 0x68, 0x69, ++ 0x6C, 0x64, 0x72, 0x65, 0x6E, 0x20, 0x77, 0x65, ++ 0x72, 0x65, 0x20, 0x62, 0x75, 0x6C, 0x6C, 0x69, ++ 0x65, 0x64, 0x2C, 0x20, 0x61, 0x62, 0x75, 0x73, ++ 0x65, 0x64, 0x2C, 0x20, 0x6F, 0x72, 0x20, 0x61, ++ 0x62, 0x61, 0x6E, 0x64, 0x6F, 0x6E, 0x65, 0x64, ++ 0x2E, 0x20, 0x48, 0x6F, 0x77, 0x20, 0x6D, 0x61, ++ 0x6E, 0x79, 0x20, 0x66, 0x61, 0x6D, 0x69, 0x6C, ++ 0x69, 0x65, 0x73, 0x20, 0x68, 0x75, 0x6E, 0x67, ++ 0x65, 0x72, 0x65, 0x64, 0x20, 0x66, 0x6F, 0x72, ++ 0x20, 0x66, 0x6F, 0x6F, 0x64, 0x20, 0x74, 0x68, ++ 0x65, 0x79, 0x20, 0x63, 0x6F, 0x75, 0x6C, 0x64, ++ 0x20, 0x6E, 0x6F, 0x74, 0x20, 0x61, 0x66, 0x66, ++ 0x6F, 0x72, 0x64, 0x20, 0x74, 0x6F, 0x20, 0x62, ++ 0x75, 0x79, 0x3F, 0x20, 0x48, 0x6F, 0x77, 0x20, ++ 0x6D, 0x61, 0x6E, 0x79, 0x20, 0x68, 0x65, 0x61, ++ 0x72, 0x74, 0x73, 0x20, 0x77, 0x65, 0x72, 0x65, ++ 0x20, 0x62, 0x72, 0x6F, 0x6B, 0x65, 0x6E, 0x3F, ++ 0x20, 0x48, 0x6F, 0x77, 0x20, 0x6D, 0x61, 0x6E, ++ 0x79, 0x20, 0x73, 0x75, 0x69, 0x63, 0x69, 0x64, ++ 0x65, 0x73, 0x20, 0x77, 0x6F, 0x75, 0x6C, 0x64, ++ 0x20, 0x74, 0x61, 0x6B, 0x65, 0x20, 0x70, 0x6C, ++ 0x61, 0x63, 0x65, 0x20, 0x74, 0x68, 0x61, 0x74, ++ 0x20, 0x73, 0x61, 0x6D, 0x65, 0x20, 0x6E, 0x69, ++ 0x67, 0x68, 0x74, 0x2C, 0x20, 0x68, 0x6F, 0x77, ++ 0x20, 0x6D, 0x61, 0x6E, 0x79, 0x20, 0x70, 0x65, ++ 0x6F, 0x70, 0x6C, 0x65, 0x20, 0x77, 0x6F, 0x75, ++ 0x6C, 0x64, 0x20, 0x67, 0x6F, 0x20, 0x69, 0x6E, ++ 0x73, 0x61, 0x6E, 0x65, 0x3F, 0x20, 0x48, 0x6F, ++ 0x77, 0x20, 0x6D, 0x61, 0x6E, 0x79, 0x20, 0x63, ++ 0x6F, 0x63, 0x6B, 0x72, 0x6F, 0x61, 0x63, 0x68, ++ 0x65, 0x73, 0x20, 0x61, 0x6E, 0x64, 0x20, 0x6C, ++ 0x61, 0x6E, 0x64, 0x6C, 0x6F, 0x72, 0x64, 0x73, ++ 0x20, 0x77, 0x6F, 0x75, 0x6C, 0x64, 0x20, 0x74, ++ 0x72, 0x69, 0x75, 0x6D, 0x70, 0x68, 0x3F, 0x20, ++ 0x48, 0x6F, 0x77, 0x20, 0x6D, 0x61, 0x6E, 0x79, ++ 0x20, 0x77, 0x69, 0x6E, 0x6E, 0x65, 0x72, 0x73, ++ 0x20, 0x77, 0x65, 0x72, 0x65, 0x20, 0x6C, 0x6F, ++ 0x73, 0x65, 0x72, 0x73, 0x2C, 0x20, 0x73, 0x75, ++ 0x4C, 0x77, 0x87, 0xA0 ++ }, ++ .len_bits = 516 << 3, ++ }, ++ .digest_enc = { ++ .data = { ++ 0x4C, 0x77, 0x87, 0xA0 ++ }, ++ .len = 4, ++ .offset = 512, ++ }, ++ .validDataLen = { ++ .len_bits = 516 << 3, ++ }, ++ .validCipherLen = { ++ .len_bits = 516 << 3, ++ }, ++ .validAuthLen = { ++ .len_bits = 512 << 3, ++ } ++}; ++ + #endif /* TEST_CRYPTODEV_MIXED_TEST_VECTORS_H_ */ +diff --git a/dpdk/app/test/test_cycles.c b/dpdk/app/test/test_cycles.c +index c78e6a5b12..97d42f3032 100644 +--- a/dpdk/app/test/test_cycles.c ++++ b/dpdk/app/test/test_cycles.c +@@ -79,8 +79,14 @@ REGISTER_TEST_COMMAND(cycles_autotest, test_cycles); + static int + test_delay_us_sleep(void) + { ++ int rv; ++ + rte_delay_us_callback_register(rte_delay_us_sleep); +- return check_wait_one_second(); ++ rv = check_wait_one_second(); ++ /* restore original delay function */ ++ rte_delay_us_callback_register(rte_delay_us_block); ++ ++ return rv; + } + + REGISTER_TEST_COMMAND(delay_us_sleep_autotest, test_delay_us_sleep); +diff --git a/dpdk/app/test/test_distributor.c b/dpdk/app/test/test_distributor.c +index ba1f81cf8d..acfe728f0c 100644 +--- a/dpdk/app/test/test_distributor.c ++++ b/dpdk/app/test/test_distributor.c +@@ -27,7 +27,9 @@ struct worker_params worker_params; + /* statics - all zero-initialized by default */ + static volatile int quit; /**< general quit variable for all threads */ + static volatile int zero_quit; /**< var for when we just want thr0 to quit*/ ++static volatile int zero_sleep; /**< thr0 has quit basic loop and is sleeping*/ + static volatile unsigned worker_idx; ++static volatile unsigned zero_idx; + + struct worker_stats { + volatile unsigned handled_packets; +@@ -42,7 +44,8 @@ total_packet_count(void) + { + unsigned i, count = 0; + for (i = 0; i < worker_idx; i++) +- count += worker_stats[i].handled_packets; ++ count += __atomic_load_n(&worker_stats[i].handled_packets, ++ __ATOMIC_RELAXED); + return count; + } + +@@ -50,7 +53,10 @@ total_packet_count(void) + static inline void + clear_packet_count(void) + { +- memset(&worker_stats, 0, sizeof(worker_stats)); ++ unsigned int i; ++ for (i = 0; i < RTE_MAX_LCORE; i++) ++ __atomic_store_n(&worker_stats[i].handled_packets, 0, ++ __ATOMIC_RELAXED); + } + + /* this is the basic worker function for sanity test +@@ -62,23 +68,18 @@ handle_work(void *arg) + struct rte_mbuf *buf[8] __rte_cache_aligned; + struct worker_params *wp = arg; + struct rte_distributor *db = wp->dist; +- unsigned int count = 0, num = 0; ++ unsigned int num; + unsigned int id = __atomic_fetch_add(&worker_idx, 1, __ATOMIC_RELAXED); +- int i; + +- for (i = 0; i < 8; i++) +- buf[i] = NULL; +- num = rte_distributor_get_pkt(db, id, buf, buf, num); ++ num = rte_distributor_get_pkt(db, id, buf, NULL, 0); + while (!quit) { + __atomic_fetch_add(&worker_stats[id].handled_packets, num, + __ATOMIC_RELAXED); +- count += num; + num = rte_distributor_get_pkt(db, id, + buf, buf, num); + } + __atomic_fetch_add(&worker_stats[id].handled_packets, num, + __ATOMIC_RELAXED); +- count += num; + rte_distributor_return_pkt(db, id, buf, num); + return 0; + } +@@ -102,6 +103,7 @@ sanity_test(struct worker_params *wp, struct rte_mempool *p) + struct rte_mbuf *returns[BURST*2]; + unsigned int i, count; + unsigned int retries; ++ unsigned int processed; + + printf("=== Basic distributor sanity tests ===\n"); + clear_packet_count(); +@@ -115,7 +117,11 @@ sanity_test(struct worker_params *wp, struct rte_mempool *p) + for (i = 0; i < BURST; i++) + bufs[i]->hash.usr = 0; + +- rte_distributor_process(db, bufs, BURST); ++ processed = 0; ++ while (processed < BURST) ++ processed += rte_distributor_process(db, &bufs[processed], ++ BURST - processed); ++ + count = 0; + do { + +@@ -128,12 +134,14 @@ sanity_test(struct worker_params *wp, struct rte_mempool *p) + printf("Line %d: Error, not all packets flushed. " + "Expected %u, got %u\n", + __LINE__, BURST, total_packet_count()); ++ rte_mempool_put_bulk(p, (void *)bufs, BURST); + return -1; + } + + for (i = 0; i < rte_lcore_count() - 1; i++) + printf("Worker %u handled %u packets\n", i, +- worker_stats[i].handled_packets); ++ __atomic_load_n(&worker_stats[i].handled_packets, ++ __ATOMIC_RELAXED)); + printf("Sanity test with all zero hashes done.\n"); + + /* pick two flows and check they go correctly */ +@@ -153,12 +161,15 @@ sanity_test(struct worker_params *wp, struct rte_mempool *p) + printf("Line %d: Error, not all packets flushed. " + "Expected %u, got %u\n", + __LINE__, BURST, total_packet_count()); ++ rte_mempool_put_bulk(p, (void *)bufs, BURST); + return -1; + } + + for (i = 0; i < rte_lcore_count() - 1; i++) + printf("Worker %u handled %u packets\n", i, +- worker_stats[i].handled_packets); ++ __atomic_load_n( ++ &worker_stats[i].handled_packets, ++ __ATOMIC_RELAXED)); + printf("Sanity test with two hash values done\n"); + } + +@@ -179,12 +190,14 @@ sanity_test(struct worker_params *wp, struct rte_mempool *p) + printf("Line %d: Error, not all packets flushed. " + "Expected %u, got %u\n", + __LINE__, BURST, total_packet_count()); ++ rte_mempool_put_bulk(p, (void *)bufs, BURST); + return -1; + } + + for (i = 0; i < rte_lcore_count() - 1; i++) + printf("Worker %u handled %u packets\n", i, +- worker_stats[i].handled_packets); ++ __atomic_load_n(&worker_stats[i].handled_packets, ++ __ATOMIC_RELAXED)); + printf("Sanity test with non-zero hashes done\n"); + + rte_mempool_put_bulk(p, (void *)bufs, BURST); +@@ -194,6 +207,8 @@ sanity_test(struct worker_params *wp, struct rte_mempool *p) + clear_packet_count(); + struct rte_mbuf *many_bufs[BIG_BATCH], *return_bufs[BIG_BATCH]; + unsigned num_returned = 0; ++ unsigned int num_being_processed = 0; ++ unsigned int return_buffer_capacity = 127;/* RTE_DISTRIB_RETURNS_MASK */ + + /* flush out any remaining packets */ + rte_distributor_flush(db); +@@ -210,16 +225,16 @@ sanity_test(struct worker_params *wp, struct rte_mempool *p) + for (i = 0; i < BIG_BATCH/BURST; i++) { + rte_distributor_process(db, + &many_bufs[i*BURST], BURST); +- count = rte_distributor_returned_pkts(db, +- &return_bufs[num_returned], +- BIG_BATCH - num_returned); +- num_returned += count; ++ num_being_processed += BURST; ++ do { ++ count = rte_distributor_returned_pkts(db, ++ &return_bufs[num_returned], ++ BIG_BATCH - num_returned); ++ num_being_processed -= count; ++ num_returned += count; ++ rte_distributor_flush(db); ++ } while (num_being_processed + BURST > return_buffer_capacity); + } +- rte_distributor_flush(db); +- count = rte_distributor_returned_pkts(db, +- &return_bufs[num_returned], +- BIG_BATCH - num_returned); +- num_returned += count; + retries = 0; + do { + rte_distributor_flush(db); +@@ -233,6 +248,7 @@ sanity_test(struct worker_params *wp, struct rte_mempool *p) + if (num_returned != BIG_BATCH) { + printf("line %d: Missing packets, expected %d\n", + __LINE__, num_returned); ++ rte_mempool_put_bulk(p, (void *)many_bufs, BIG_BATCH); + return -1; + } + +@@ -247,6 +263,7 @@ sanity_test(struct worker_params *wp, struct rte_mempool *p) + + if (j == BIG_BATCH) { + printf("Error: could not find source packet #%u\n", i); ++ rte_mempool_put_bulk(p, (void *)many_bufs, BIG_BATCH); + return -1; + } + } +@@ -270,24 +287,20 @@ handle_work_with_free_mbufs(void *arg) + struct rte_mbuf *buf[8] __rte_cache_aligned; + struct worker_params *wp = arg; + struct rte_distributor *d = wp->dist; +- unsigned int count = 0; + unsigned int i; +- unsigned int num = 0; ++ unsigned int num; + unsigned int id = __atomic_fetch_add(&worker_idx, 1, __ATOMIC_RELAXED); + +- for (i = 0; i < 8; i++) +- buf[i] = NULL; +- num = rte_distributor_get_pkt(d, id, buf, buf, num); ++ num = rte_distributor_get_pkt(d, id, buf, NULL, 0); + while (!quit) { +- worker_stats[id].handled_packets += num; +- count += num; ++ __atomic_fetch_add(&worker_stats[id].handled_packets, num, ++ __ATOMIC_RELAXED); + for (i = 0; i < num; i++) + rte_pktmbuf_free(buf[i]); +- num = rte_distributor_get_pkt(d, +- id, buf, buf, num); ++ num = rte_distributor_get_pkt(d, id, buf, NULL, 0); + } +- worker_stats[id].handled_packets += num; +- count += num; ++ __atomic_fetch_add(&worker_stats[id].handled_packets, num, ++ __ATOMIC_RELAXED); + rte_distributor_return_pkt(d, id, buf, num); + return 0; + } +@@ -303,6 +316,7 @@ sanity_test_with_mbuf_alloc(struct worker_params *wp, struct rte_mempool *p) + struct rte_distributor *d = wp->dist; + unsigned i; + struct rte_mbuf *bufs[BURST]; ++ unsigned int processed; + + printf("=== Sanity test with mbuf alloc/free (%s) ===\n", wp->name); + +@@ -313,10 +327,12 @@ sanity_test_with_mbuf_alloc(struct worker_params *wp, struct rte_mempool *p) + rte_distributor_process(d, NULL, 0); + for (j = 0; j < BURST; j++) { + bufs[j]->hash.usr = (i+j) << 1; +- rte_mbuf_refcnt_set(bufs[j], 1); + } + +- rte_distributor_process(d, bufs, BURST); ++ processed = 0; ++ while (processed < BURST) ++ processed += rte_distributor_process(d, ++ &bufs[processed], BURST - processed); + } + + rte_distributor_flush(d); +@@ -337,55 +353,61 @@ sanity_test_with_mbuf_alloc(struct worker_params *wp, struct rte_mempool *p) + static int + handle_work_for_shutdown_test(void *arg) + { +- struct rte_mbuf *pkt = NULL; + struct rte_mbuf *buf[8] __rte_cache_aligned; + struct worker_params *wp = arg; + struct rte_distributor *d = wp->dist; +- unsigned int count = 0; +- unsigned int num = 0; +- unsigned int total = 0; +- unsigned int i; +- unsigned int returned = 0; ++ unsigned int num; ++ unsigned int zero_id = 0; ++ unsigned int zero_unset; + const unsigned int id = __atomic_fetch_add(&worker_idx, 1, + __ATOMIC_RELAXED); + +- num = rte_distributor_get_pkt(d, id, buf, buf, num); ++ num = rte_distributor_get_pkt(d, id, buf, NULL, 0); ++ ++ if (num > 0) { ++ zero_unset = RTE_MAX_LCORE; ++ __atomic_compare_exchange_n(&zero_idx, &zero_unset, id, ++ 0, __ATOMIC_ACQ_REL, __ATOMIC_ACQUIRE); ++ } ++ zero_id = __atomic_load_n(&zero_idx, __ATOMIC_ACQUIRE); + + /* wait for quit single globally, or for worker zero, wait + * for zero_quit */ +- while (!quit && !(id == 0 && zero_quit)) { +- worker_stats[id].handled_packets += num; +- count += num; +- for (i = 0; i < num; i++) +- rte_pktmbuf_free(buf[i]); +- num = rte_distributor_get_pkt(d, +- id, buf, buf, num); +- total += num; ++ while (!quit && !(id == zero_id && zero_quit)) { ++ __atomic_fetch_add(&worker_stats[id].handled_packets, num, ++ __ATOMIC_RELAXED); ++ num = rte_distributor_get_pkt(d, id, buf, NULL, 0); ++ ++ if (num > 0) { ++ zero_unset = RTE_MAX_LCORE; ++ __atomic_compare_exchange_n(&zero_idx, &zero_unset, id, ++ 0, __ATOMIC_ACQ_REL, __ATOMIC_ACQUIRE); ++ } ++ zero_id = __atomic_load_n(&zero_idx, __ATOMIC_ACQUIRE); + } +- worker_stats[id].handled_packets += num; +- count += num; +- returned = rte_distributor_return_pkt(d, id, buf, num); + +- if (id == 0) { ++ __atomic_fetch_add(&worker_stats[id].handled_packets, num, ++ __ATOMIC_RELAXED); ++ if (id == zero_id) { ++ rte_distributor_return_pkt(d, id, NULL, 0); ++ + /* for worker zero, allow it to restart to pick up last packet + * when all workers are shutting down. + */ ++ __atomic_store_n(&zero_sleep, 1, __ATOMIC_RELEASE); + while (zero_quit) + usleep(100); ++ __atomic_store_n(&zero_sleep, 0, __ATOMIC_RELEASE); + +- num = rte_distributor_get_pkt(d, +- id, buf, buf, num); ++ num = rte_distributor_get_pkt(d, id, buf, NULL, 0); + + while (!quit) { +- worker_stats[id].handled_packets += num; +- count += num; +- rte_pktmbuf_free(pkt); +- num = rte_distributor_get_pkt(d, id, buf, buf, num); ++ __atomic_fetch_add(&worker_stats[id].handled_packets, ++ num, __ATOMIC_RELAXED); ++ num = rte_distributor_get_pkt(d, id, buf, NULL, 0); + } +- returned = rte_distributor_return_pkt(d, +- id, buf, num); +- printf("Num returned = %d\n", returned); + } ++ rte_distributor_return_pkt(d, id, buf, num); + return 0; + } + +@@ -401,7 +423,10 @@ sanity_test_with_worker_shutdown(struct worker_params *wp, + { + struct rte_distributor *d = wp->dist; + struct rte_mbuf *bufs[BURST]; +- unsigned i; ++ struct rte_mbuf *bufs2[BURST]; ++ unsigned int i; ++ unsigned int failed = 0; ++ unsigned int processed = 0; + + printf("=== Sanity test of worker shutdown ===\n"); + +@@ -419,7 +444,10 @@ sanity_test_with_worker_shutdown(struct worker_params *wp, + for (i = 0; i < BURST; i++) + bufs[i]->hash.usr = 1; + +- rte_distributor_process(d, bufs, BURST); ++ processed = 0; ++ while (processed < BURST) ++ processed += rte_distributor_process(d, &bufs[processed], ++ BURST - processed); + rte_distributor_flush(d); + + /* at this point, we will have processed some packets and have a full +@@ -427,32 +455,45 @@ sanity_test_with_worker_shutdown(struct worker_params *wp, + */ + + /* get more buffers to queue up, again setting them to the same flow */ +- if (rte_mempool_get_bulk(p, (void *)bufs, BURST) != 0) { ++ if (rte_mempool_get_bulk(p, (void *)bufs2, BURST) != 0) { + printf("line %d: Error getting mbufs from pool\n", __LINE__); ++ rte_mempool_put_bulk(p, (void *)bufs, BURST); + return -1; + } + for (i = 0; i < BURST; i++) +- bufs[i]->hash.usr = 1; ++ bufs2[i]->hash.usr = 1; + + /* get worker zero to quit */ + zero_quit = 1; +- rte_distributor_process(d, bufs, BURST); ++ rte_distributor_process(d, bufs2, BURST); + + /* flush the distributor */ + rte_distributor_flush(d); +- rte_delay_us(10000); ++ while (!__atomic_load_n(&zero_sleep, __ATOMIC_ACQUIRE)) ++ rte_distributor_flush(d); ++ ++ zero_quit = 0; ++ while (__atomic_load_n(&zero_sleep, __ATOMIC_ACQUIRE)) ++ rte_delay_us(100); + + for (i = 0; i < rte_lcore_count() - 1; i++) + printf("Worker %u handled %u packets\n", i, +- worker_stats[i].handled_packets); ++ __atomic_load_n(&worker_stats[i].handled_packets, ++ __ATOMIC_RELAXED)); + + if (total_packet_count() != BURST * 2) { + printf("Line %d: Error, not all packets flushed. " + "Expected %u, got %u\n", + __LINE__, BURST * 2, total_packet_count()); +- return -1; ++ failed = 1; + } + ++ rte_mempool_put_bulk(p, (void *)bufs, BURST); ++ rte_mempool_put_bulk(p, (void *)bufs2, BURST); ++ ++ if (failed) ++ return -1; ++ + printf("Sanity test with worker shutdown passed\n\n"); + return 0; + } +@@ -466,7 +507,9 @@ test_flush_with_worker_shutdown(struct worker_params *wp, + { + struct rte_distributor *d = wp->dist; + struct rte_mbuf *bufs[BURST]; +- unsigned i; ++ unsigned int i; ++ unsigned int failed = 0; ++ unsigned int processed; + + printf("=== Test flush fn with worker shutdown (%s) ===\n", wp->name); + +@@ -481,7 +524,10 @@ test_flush_with_worker_shutdown(struct worker_params *wp, + for (i = 0; i < BURST; i++) + bufs[i]->hash.usr = 0; + +- rte_distributor_process(d, bufs, BURST); ++ processed = 0; ++ while (processed < BURST) ++ processed += rte_distributor_process(d, &bufs[processed], ++ BURST - processed); + /* at this point, we will have processed some packets and have a full + * backlog for the other ones at worker 0. + */ +@@ -492,20 +538,31 @@ test_flush_with_worker_shutdown(struct worker_params *wp, + /* flush the distributor */ + rte_distributor_flush(d); + +- rte_delay_us(10000); ++ while (!__atomic_load_n(&zero_sleep, __ATOMIC_ACQUIRE)) ++ rte_distributor_flush(d); + + zero_quit = 0; ++ ++ while (__atomic_load_n(&zero_sleep, __ATOMIC_ACQUIRE)) ++ rte_delay_us(100); ++ + for (i = 0; i < rte_lcore_count() - 1; i++) + printf("Worker %u handled %u packets\n", i, +- worker_stats[i].handled_packets); ++ __atomic_load_n(&worker_stats[i].handled_packets, ++ __ATOMIC_RELAXED)); + + if (total_packet_count() != BURST) { + printf("Line %d: Error, not all packets flushed. " + "Expected %u, got %u\n", + __LINE__, BURST, total_packet_count()); +- return -1; ++ failed = 1; + } + ++ rte_mempool_put_bulk(p, (void *)bufs, BURST); ++ ++ if (failed) ++ return -1; ++ + printf("Flush test with worker shutdown passed\n\n"); + return 0; + } +@@ -571,21 +628,34 @@ quit_workers(struct worker_params *wp, struct rte_mempool *p) + const unsigned num_workers = rte_lcore_count() - 1; + unsigned i; + struct rte_mbuf *bufs[RTE_MAX_LCORE]; +- rte_mempool_get_bulk(p, (void *)bufs, num_workers); ++ struct rte_mbuf *returns[RTE_MAX_LCORE]; ++ if (rte_mempool_get_bulk(p, (void *)bufs, num_workers) != 0) { ++ printf("line %d: Error getting mbufs from pool\n", __LINE__); ++ return; ++ } + + zero_quit = 0; + quit = 1; +- for (i = 0; i < num_workers; i++) ++ for (i = 0; i < num_workers; i++) { + bufs[i]->hash.usr = i << 1; +- rte_distributor_process(d, bufs, num_workers); +- +- rte_mempool_put_bulk(p, (void *)bufs, num_workers); ++ rte_distributor_process(d, &bufs[i], 1); ++ } + + rte_distributor_process(d, NULL, 0); + rte_distributor_flush(d); + rte_eal_mp_wait_lcore(); ++ ++ while (rte_distributor_returned_pkts(d, returns, RTE_MAX_LCORE)) ++ ; ++ ++ rte_distributor_clear_returns(d); ++ rte_mempool_put_bulk(p, (void *)bufs, num_workers); ++ + quit = 0; + worker_idx = 0; ++ zero_idx = RTE_MAX_LCORE; ++ zero_quit = 0; ++ zero_sleep = 0; + } + + static int +diff --git a/dpdk/app/test/test_event_crypto_adapter.c b/dpdk/app/test/test_event_crypto_adapter.c +index 8d42462d87..1c0a0fa5e3 100644 +--- a/dpdk/app/test/test_event_crypto_adapter.c ++++ b/dpdk/app/test/test_event_crypto_adapter.c +@@ -171,7 +171,6 @@ test_op_forward_mode(uint8_t session_less) + struct rte_event ev; + uint32_t cap; + int ret; +- uint8_t cipher_key[17]; + + memset(&m_data, 0, sizeof(m_data)); + +@@ -183,15 +182,9 @@ test_op_forward_mode(uint8_t session_less) + /* Setup Cipher Parameters */ + cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER; + cipher_xform.next = NULL; +- +- cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC; ++ cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_NULL; + cipher_xform.cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT; + +- cipher_xform.cipher.key.data = cipher_key; +- cipher_xform.cipher.key.length = 16; +- cipher_xform.cipher.iv.offset = IV_OFFSET; +- cipher_xform.cipher.iv.length = 16; +- + op = rte_crypto_op_alloc(params.op_mpool, + RTE_CRYPTO_OP_TYPE_SYMMETRIC); + TEST_ASSERT_NOT_NULL(op, +@@ -209,8 +202,8 @@ test_op_forward_mode(uint8_t session_less) + &cipher_xform, params.session_priv_mpool); + TEST_ASSERT_SUCCESS(ret, "Failed to init session\n"); + +- ret = rte_event_crypto_adapter_caps_get(TEST_ADAPTER_ID, +- evdev, &cap); ++ ret = rte_event_crypto_adapter_caps_get(evdev, TEST_CDEV_ID, ++ &cap); + TEST_ASSERT_SUCCESS(ret, "Failed to get adapter capabilities\n"); + + if (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_SESSION_PRIVATE_DATA) { +@@ -296,7 +289,7 @@ test_sessionless_with_op_forward_mode(void) + uint32_t cap; + int ret; + +- ret = rte_event_crypto_adapter_caps_get(TEST_ADAPTER_ID, evdev, &cap); ++ ret = rte_event_crypto_adapter_caps_get(evdev, TEST_CDEV_ID, &cap); + TEST_ASSERT_SUCCESS(ret, "Failed to get adapter capabilities\n"); + + if (!(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) && +@@ -321,7 +314,7 @@ test_session_with_op_forward_mode(void) + uint32_t cap; + int ret; + +- ret = rte_event_crypto_adapter_caps_get(TEST_ADAPTER_ID, evdev, &cap); ++ ret = rte_event_crypto_adapter_caps_get(evdev, TEST_CDEV_ID, &cap); + TEST_ASSERT_SUCCESS(ret, "Failed to get adapter capabilities\n"); + + if (!(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) && +@@ -378,7 +371,6 @@ test_op_new_mode(uint8_t session_less) + struct rte_mbuf *m; + uint32_t cap; + int ret; +- uint8_t cipher_key[17]; + + memset(&m_data, 0, sizeof(m_data)); + +@@ -390,15 +382,9 @@ test_op_new_mode(uint8_t session_less) + /* Setup Cipher Parameters */ + cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER; + cipher_xform.next = NULL; +- +- cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC; ++ cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_NULL; + cipher_xform.cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT; + +- cipher_xform.cipher.key.data = cipher_key; +- cipher_xform.cipher.key.length = 16; +- cipher_xform.cipher.iv.offset = IV_OFFSET; +- cipher_xform.cipher.iv.length = 16; +- + op = rte_crypto_op_alloc(params.op_mpool, + RTE_CRYPTO_OP_TYPE_SYMMETRIC); + TEST_ASSERT_NOT_NULL(op, "Failed to allocate crypto_op!\n"); +@@ -410,8 +396,8 @@ test_op_new_mode(uint8_t session_less) + params.session_mpool); + TEST_ASSERT_NOT_NULL(sess, "Session creation failed\n"); + +- ret = rte_event_crypto_adapter_caps_get(TEST_ADAPTER_ID, +- evdev, &cap); ++ ret = rte_event_crypto_adapter_caps_get(evdev, TEST_CDEV_ID, ++ &cap); + TEST_ASSERT_SUCCESS(ret, "Failed to get adapter capabilities\n"); + + if (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_SESSION_PRIVATE_DATA) { +@@ -460,7 +446,7 @@ test_sessionless_with_op_new_mode(void) + uint32_t cap; + int ret; + +- ret = rte_event_crypto_adapter_caps_get(TEST_ADAPTER_ID, evdev, &cap); ++ ret = rte_event_crypto_adapter_caps_get(evdev, TEST_CDEV_ID, &cap); + TEST_ASSERT_SUCCESS(ret, "Failed to get adapter capabilities\n"); + + if (!(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) && +@@ -486,7 +472,7 @@ test_session_with_op_new_mode(void) + uint32_t cap; + int ret; + +- ret = rte_event_crypto_adapter_caps_get(TEST_ADAPTER_ID, evdev, &cap); ++ ret = rte_event_crypto_adapter_caps_get(evdev, TEST_CDEV_ID, &cap); + TEST_ASSERT_SUCCESS(ret, "Failed to get adapter capabilities\n"); + + if (!(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) && +@@ -564,7 +550,9 @@ configure_cryptodev(void) + + params.session_mpool = rte_cryptodev_sym_session_pool_create( + "CRYPTO_ADAPTER_SESSION_MP", +- MAX_NB_SESSIONS, 0, 0, 0, SOCKET_ID_ANY); ++ MAX_NB_SESSIONS, 0, 0, ++ sizeof(union rte_event_crypto_metadata), ++ SOCKET_ID_ANY); + TEST_ASSERT_NOT_NULL(params.session_mpool, + "session mempool allocation failed\n"); + +@@ -706,7 +694,7 @@ test_crypto_adapter_create(void) + + /* Create adapter with default port creation callback */ + ret = rte_event_crypto_adapter_create(TEST_ADAPTER_ID, +- TEST_CDEV_ID, ++ evdev, + &conf, 0); + TEST_ASSERT_SUCCESS(ret, "Failed to create event crypto adapter\n"); + +@@ -719,7 +707,7 @@ test_crypto_adapter_qp_add_del(void) + uint32_t cap; + int ret; + +- ret = rte_event_crypto_adapter_caps_get(TEST_ADAPTER_ID, evdev, &cap); ++ ret = rte_event_crypto_adapter_caps_get(evdev, TEST_CDEV_ID, &cap); + TEST_ASSERT_SUCCESS(ret, "Failed to get adapter capabilities\n"); + + if (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND) { +diff --git a/dpdk/app/test/test_event_eth_tx_adapter.c b/dpdk/app/test/test_event_eth_tx_adapter.c +index 3af749280a..7073030902 100644 +--- a/dpdk/app/test/test_event_eth_tx_adapter.c ++++ b/dpdk/app/test/test_event_eth_tx_adapter.c +@@ -45,7 +45,7 @@ static uint64_t eid = ~0ULL; + static uint32_t tid; + + static inline int +-port_init_common(uint8_t port, const struct rte_eth_conf *port_conf, ++port_init_common(uint16_t port, const struct rte_eth_conf *port_conf, + struct rte_mempool *mp) + { + const uint16_t rx_ring_size = RING_SIZE, tx_ring_size = RING_SIZE; +@@ -104,7 +104,7 @@ port_init_common(uint8_t port, const struct rte_eth_conf *port_conf, + } + + static inline int +-port_init(uint8_t port, struct rte_mempool *mp) ++port_init(uint16_t port, struct rte_mempool *mp) + { + struct rte_eth_conf conf = { 0 }; + return port_init_common(port, &conf, mp); diff --git a/dpdk/app/test/test_eventdev.c b/dpdk/app/test/test_eventdev.c index 427dbbf77f..43ccb1ce97 100644 --- a/dpdk/app/test/test_eventdev.c @@ -3678,8 +8726,40 @@ index 0052dce2de..2ac298e21e 100644 handle = rte_fbk_hash_create(&invalid_params_same_name_1); RETURN_IF_ERROR_FBK(handle == NULL, "fbk hash creation should have succeeded"); +diff --git a/dpdk/app/test/test_hash_readwrite_lf.c b/dpdk/app/test/test_hash_readwrite_lf_perf.c +similarity index 99% +rename from dpdk/app/test/test_hash_readwrite_lf.c +rename to dpdk/app/test/test_hash_readwrite_lf_perf.c +index 97c304054c..7bfc067f4e 100644 +--- a/dpdk/app/test/test_hash_readwrite_lf.c ++++ b/dpdk/app/test/test_hash_readwrite_lf_perf.c +@@ -1241,7 +1241,7 @@ test_hash_add_ks_lookup_hit_extbkt(struct rwc_perf *rwc_perf_results, + } + + static int +-test_hash_readwrite_lf_main(void) ++test_hash_readwrite_lf_perf_main(void) + { + /* + * Variables used to choose different tests. +@@ -1254,7 +1254,7 @@ test_hash_readwrite_lf_main(void) + int ext_bkt = 0; + + if (rte_lcore_count() < 2) { +- printf("Not enough cores for hash_readwrite_lf_autotest, expecting at least 2\n"); ++ printf("Not enough cores for hash_readwrite_lf_perf_autotest, expecting at least 2\n"); + return TEST_SKIPPED; + } + +@@ -1431,4 +1431,5 @@ test_hash_readwrite_lf_main(void) + return 0; + } + +-REGISTER_TEST_COMMAND(hash_readwrite_lf_autotest, test_hash_readwrite_lf_main); ++REGISTER_TEST_COMMAND(hash_readwrite_lf_perf_autotest, ++ test_hash_readwrite_lf_perf_main); diff --git a/dpdk/app/test/test_ipsec.c b/dpdk/app/test/test_ipsec.c -index 7dc83fee7e..79d00d7e02 100644 +index 7dc83fee7e..6a4bd12f7f 100644 --- a/dpdk/app/test/test_ipsec.c +++ b/dpdk/app/test/test_ipsec.c @@ -237,7 +237,7 @@ fill_crypto_xform(struct ipsec_unitest_params *ut_params, @@ -3700,6 +8780,15 @@ index 7dc83fee7e..79d00d7e02 100644 if (rc == 0) { ts_params->valid_dev = i; ts_params->valid_dev_found = 1; +@@ -743,7 +743,7 @@ create_sa(enum rte_security_session_action_type action_type, + ut->ss[j].type = action_type; + rc = create_session(ut, &ts->qp_conf, ts->valid_dev, j); + if (rc != 0) +- return TEST_FAILED; ++ return rc; + + rc = rte_ipsec_sa_init(ut->ss[j].sa, &ut->sa_prm, sz); + rc = (rc > 0 && (uint32_t)rc <= sz) ? 0 : -EINVAL; @@ -1167,6 +1167,34 @@ test_ipsec_dump_buffers(struct ipsec_unitest_params *ut_params, int i) } } @@ -3747,6 +8836,141 @@ index 7dc83fee7e..79d00d7e02 100644 } static int +@@ -1219,7 +1246,7 @@ test_ipsec_crypto_inb_burst_null_null(int i) + test_cfg[i].replay_win_sz, test_cfg[i].flags, 0); + if (rc != 0) { + RTE_LOG(ERR, USER1, "create_sa failed, cfg %d\n", i); +- return TEST_FAILED; ++ return rc; + } + + /* Generate test mbuf data */ +@@ -1321,7 +1348,7 @@ test_ipsec_crypto_outb_burst_null_null(int i) + test_cfg[i].replay_win_sz, test_cfg[i].flags, 0); + if (rc != 0) { + RTE_LOG(ERR, USER1, "create_sa failed, cfg %d\n", i); +- return TEST_FAILED; ++ return rc; + } + + /* Generate input mbuf data */ +@@ -1430,7 +1457,7 @@ test_ipsec_inline_crypto_inb_burst_null_null(int i) + test_cfg[i].replay_win_sz, test_cfg[i].flags, 0); + if (rc != 0) { + RTE_LOG(ERR, USER1, "create_sa failed, cfg %d\n", i); +- return TEST_FAILED; ++ return rc; + } + + /* Generate inbound mbuf data */ +@@ -1508,7 +1535,7 @@ test_ipsec_inline_proto_inb_burst_null_null(int i) + test_cfg[i].replay_win_sz, test_cfg[i].flags, 0); + if (rc != 0) { + RTE_LOG(ERR, USER1, "create_sa failed, cfg %d\n", i); +- return TEST_FAILED; ++ return rc; + } + + /* Generate inbound mbuf data */ +@@ -1616,7 +1643,7 @@ test_ipsec_inline_crypto_outb_burst_null_null(int i) + test_cfg[i].replay_win_sz, test_cfg[i].flags, 0); + if (rc != 0) { + RTE_LOG(ERR, USER1, "create_sa failed, cfg %d\n", i); +- return TEST_FAILED; ++ return rc; + } + + /* Generate test mbuf data */ +@@ -1694,7 +1721,7 @@ test_ipsec_inline_proto_outb_burst_null_null(int i) + test_cfg[i].replay_win_sz, test_cfg[i].flags, 0); + if (rc != 0) { + RTE_LOG(ERR, USER1, "create_sa failed, cfg %d\n", i); +- return TEST_FAILED; ++ return rc; + } + + /* Generate test mbuf data */ +@@ -1770,7 +1797,7 @@ test_ipsec_lksd_proto_inb_burst_null_null(int i) + test_cfg[i].replay_win_sz, test_cfg[i].flags, 0); + if (rc != 0) { + RTE_LOG(ERR, USER1, "create_sa failed, cfg %d\n", i); +- return TEST_FAILED; ++ return rc; + } + + /* Generate test mbuf data */ +@@ -1883,7 +1910,7 @@ test_ipsec_replay_inb_inside_null_null(int i) + test_cfg[i].replay_win_sz, test_cfg[i].flags, 0); + if (rc != 0) { + RTE_LOG(ERR, USER1, "create_sa failed, cfg %d\n", i); +- return TEST_FAILED; ++ return rc; + } + + /* Generate inbound mbuf data */ +@@ -1976,7 +2003,7 @@ test_ipsec_replay_inb_outside_null_null(int i) + test_cfg[i].replay_win_sz, test_cfg[i].flags, 0); + if (rc != 0) { + RTE_LOG(ERR, USER1, "create_sa failed, cfg %d\n", i); +- return TEST_FAILED; ++ return rc; + } + + /* Generate test mbuf data */ +@@ -2076,7 +2103,7 @@ test_ipsec_replay_inb_repeat_null_null(int i) + test_cfg[i].replay_win_sz, test_cfg[i].flags, 0); + if (rc != 0) { + RTE_LOG(ERR, USER1, "create_sa failed, cfg %d\n", i); +- return TEST_FAILED; ++ return rc; + } + + /* Generate test mbuf data */ +@@ -2177,7 +2204,7 @@ test_ipsec_replay_inb_inside_burst_null_null(int i) + test_cfg[i].replay_win_sz, test_cfg[i].flags, 0); + if (rc != 0) { + RTE_LOG(ERR, USER1, "create_sa failed, cfg %d\n", i); +- return TEST_FAILED; ++ return rc; + } + + /* Generate inbound mbuf data */ +@@ -2310,7 +2337,7 @@ test_ipsec_crypto_inb_burst_2sa_null_null(int i) + test_cfg[i].replay_win_sz, test_cfg[i].flags, 0); + if (rc != 0) { + RTE_LOG(ERR, USER1, "create_sa 0 failed, cfg %d\n", i); +- return TEST_FAILED; ++ return rc; + } + + /* create second rte_ipsec_sa */ +@@ -2320,7 +2347,7 @@ test_ipsec_crypto_inb_burst_2sa_null_null(int i) + if (rc != 0) { + RTE_LOG(ERR, USER1, "create_sa 1 failed, cfg %d\n", i); + destroy_sa(0); +- return TEST_FAILED; ++ return rc; + } + + /* Generate test mbuf data */ +@@ -2396,7 +2423,7 @@ test_ipsec_crypto_inb_burst_2sa_4grp_null_null(int i) + test_cfg[i].replay_win_sz, test_cfg[i].flags, 0); + if (rc != 0) { + RTE_LOG(ERR, USER1, "create_sa 0 failed, cfg %d\n", i); +- return TEST_FAILED; ++ return rc; + } + + /* create second rte_ipsec_sa */ +@@ -2406,7 +2433,7 @@ test_ipsec_crypto_inb_burst_2sa_4grp_null_null(int i) + if (rc != 0) { + RTE_LOG(ERR, USER1, "create_sa 1 failed, cfg %d\n", i); + destroy_sa(0); +- return TEST_FAILED; ++ return rc; + } + + /* Generate test mbuf data */ diff --git a/dpdk/app/test/test_kvargs.c b/dpdk/app/test/test_kvargs.c index a42056f361..2a2dae43a0 100644 --- a/dpdk/app/test/test_kvargs.c @@ -3861,7 +9085,7 @@ index a16e28cc32..57f796f9e5 100644 err_return: diff --git a/dpdk/app/test/test_mbuf.c b/dpdk/app/test/test_mbuf.c -index 61ecffc184..f2922e05e0 100644 +index 61ecffc184..a5bd1693b2 100644 --- a/dpdk/app/test/test_mbuf.c +++ b/dpdk/app/test/test_mbuf.c @@ -1144,7 +1144,7 @@ test_refcnt_mbuf(void) @@ -3873,8 +9097,89 @@ index 61ecffc184..f2922e05e0 100644 tref, refcnt_lcore[master]); rte_mempool_dump(stdout, refcnt_pool); +@@ -2481,9 +2481,13 @@ test_mbuf_dyn(struct rte_mempool *pktmbuf_pool) + + offset3 = rte_mbuf_dynfield_register_offset(&dynfield3, + offsetof(struct rte_mbuf, dynfield1[1])); +- if (offset3 != offsetof(struct rte_mbuf, dynfield1[1])) +- GOTO_FAIL("failed to register dynamic field 3, offset=%d: %s", +- offset3, strerror(errno)); ++ if (offset3 != offsetof(struct rte_mbuf, dynfield1[1])) { ++ if (rte_errno == EBUSY) ++ printf("mbuf test error skipped: dynfield is busy\n"); ++ else ++ GOTO_FAIL("failed to register dynamic field 3, offset=" ++ "%d: %s", offset3, strerror(errno)); ++ } + + printf("dynfield: offset=%d, offset2=%d, offset3=%d\n", + offset, offset2, offset3); +@@ -2519,7 +2523,7 @@ test_mbuf_dyn(struct rte_mempool *pktmbuf_pool) + flag3 = rte_mbuf_dynflag_register_bitnum(&dynflag3, + rte_bsf64(PKT_LAST_FREE)); + if (flag3 != rte_bsf64(PKT_LAST_FREE)) +- GOTO_FAIL("failed to register dynamic flag 3, flag2=%d: %s", ++ GOTO_FAIL("failed to register dynamic flag 3, flag3=%d: %s", + flag3, strerror(errno)); + + printf("dynflag: flag=%d, flag2=%d, flag3=%d\n", flag, flag2, flag3); +diff --git a/dpdk/app/test/test_mcslock.c b/dpdk/app/test/test_mcslock.c +index e9359df2ee..b70dd4775b 100644 +--- a/dpdk/app/test/test_mcslock.c ++++ b/dpdk/app/test/test_mcslock.c +@@ -37,10 +37,6 @@ + * lock multiple times. + */ + +-RTE_DEFINE_PER_LCORE(rte_mcslock_t, _ml_me); +-RTE_DEFINE_PER_LCORE(rte_mcslock_t, _ml_try_me); +-RTE_DEFINE_PER_LCORE(rte_mcslock_t, _ml_perf_me); +- + rte_mcslock_t *p_ml; + rte_mcslock_t *p_ml_try; + rte_mcslock_t *p_ml_perf; +@@ -53,7 +49,7 @@ static int + test_mcslock_per_core(__attribute__((unused)) void *arg) + { + /* Per core me node. */ +- rte_mcslock_t ml_me = RTE_PER_LCORE(_ml_me); ++ rte_mcslock_t ml_me; + + rte_mcslock_lock(&p_ml, &ml_me); + printf("MCS lock taken on core %u\n", rte_lcore_id()); +@@ -77,7 +73,7 @@ load_loop_fn(void *func_param) + const unsigned int lcore = rte_lcore_id(); + + /**< Per core me node. */ +- rte_mcslock_t ml_perf_me = RTE_PER_LCORE(_ml_perf_me); ++ rte_mcslock_t ml_perf_me; + + /* wait synchro */ + while (rte_atomic32_read(&synchro) == 0) +@@ -151,8 +147,8 @@ static int + test_mcslock_try(__attribute__((unused)) void *arg) + { + /**< Per core me node. */ +- rte_mcslock_t ml_me = RTE_PER_LCORE(_ml_me); +- rte_mcslock_t ml_try_me = RTE_PER_LCORE(_ml_try_me); ++ rte_mcslock_t ml_me; ++ rte_mcslock_t ml_try_me; + + /* Locked ml_try in the master lcore, so it should fail + * when trying to lock it in the slave lcore. +@@ -178,8 +174,8 @@ test_mcslock(void) + int i; + + /* Define per core me node. */ +- rte_mcslock_t ml_me = RTE_PER_LCORE(_ml_me); +- rte_mcslock_t ml_try_me = RTE_PER_LCORE(_ml_try_me); ++ rte_mcslock_t ml_me; ++ rte_mcslock_t ml_try_me; + + /* + * Test mcs lock & unlock on each core diff --git a/dpdk/app/test/test_pmd_perf.c b/dpdk/app/test/test_pmd_perf.c -index d61be58bb3..352cd47156 100644 +index d61be58bb3..de7e726429 100644 --- a/dpdk/app/test/test_pmd_perf.c +++ b/dpdk/app/test/test_pmd_perf.c @@ -151,7 +151,7 @@ check_all_ports_link_status(uint16_t port_num, uint32_t port_mask) @@ -3886,6 +9191,276 @@ index d61be58bb3..352cd47156 100644 if (link_mbps == 0) link_mbps = link.link_speed; } else +@@ -609,10 +609,10 @@ poll_burst(void *args) + static int + exec_burst(uint32_t flags, int lcore) + { +- unsigned i, portid, nb_tx = 0; ++ unsigned int portid, nb_tx = 0; + struct lcore_conf *conf; + uint32_t pkt_per_port; +- int num, idx = 0; ++ int num, i, idx = 0; + int diff_tsc; + + conf = &lcore_conf[lcore]; +@@ -631,16 +631,14 @@ exec_burst(uint32_t flags, int lcore) + rte_atomic64_set(&start, 1); + + /* start xmit */ ++ i = 0; + while (num) { + nb_tx = RTE_MIN(MAX_PKT_BURST, num); +- for (i = 0; i < conf->nb_ports; i++) { +- portid = conf->portlist[i]; +- nb_tx = rte_eth_tx_burst(portid, 0, +- &tx_burst[idx], nb_tx); +- idx += nb_tx; +- num -= nb_tx; +- } +- ++ portid = conf->portlist[i]; ++ nb_tx = rte_eth_tx_burst(portid, 0, &tx_burst[idx], nb_tx); ++ idx += nb_tx; ++ num -= nb_tx; ++ i = (i >= conf->nb_ports - 1) ? 0 : (i + 1); + } + + sleep(5); +diff --git a/dpdk/app/test/test_rcu_qsbr.c b/dpdk/app/test/test_rcu_qsbr.c +index b60dc5099c..5542b3c175 100644 +--- a/dpdk/app/test/test_rcu_qsbr.c ++++ b/dpdk/app/test/test_rcu_qsbr.c +@@ -273,13 +273,13 @@ static int + test_rcu_qsbr_start(void) + { + uint64_t token; +- int i; ++ unsigned int i; + + printf("\nTest rte_rcu_qsbr_start()\n"); + + rte_rcu_qsbr_init(t[0], RTE_MAX_LCORE); + +- for (i = 0; i < 3; i++) ++ for (i = 0; i < num_cores; i++) + rte_rcu_qsbr_thread_register(t[0], enabled_core_ids[i]); + + token = rte_rcu_qsbr_start(t[0]); +@@ -293,14 +293,18 @@ test_rcu_qsbr_check_reader(void *arg) + { + struct rte_rcu_qsbr *temp; + uint8_t read_type = (uint8_t)((uintptr_t)arg); ++ unsigned int i; + + temp = t[read_type]; + + /* Update quiescent state counter */ +- rte_rcu_qsbr_quiescent(temp, enabled_core_ids[0]); +- rte_rcu_qsbr_quiescent(temp, enabled_core_ids[1]); +- rte_rcu_qsbr_thread_unregister(temp, enabled_core_ids[2]); +- rte_rcu_qsbr_quiescent(temp, enabled_core_ids[3]); ++ for (i = 0; i < num_cores; i++) { ++ if (i % 2 == 0) ++ rte_rcu_qsbr_quiescent(temp, enabled_core_ids[i]); ++ else ++ rte_rcu_qsbr_thread_unregister(temp, ++ enabled_core_ids[i]); ++ } + return 0; + } + +@@ -311,7 +315,8 @@ test_rcu_qsbr_check_reader(void *arg) + static int + test_rcu_qsbr_check(void) + { +- int i, ret; ++ int ret; ++ unsigned int i; + uint64_t token; + + printf("\nTest rte_rcu_qsbr_check()\n"); +@@ -329,7 +334,7 @@ test_rcu_qsbr_check(void) + ret = rte_rcu_qsbr_check(t[0], token, true); + TEST_RCU_QSBR_RETURN_IF_ERROR((ret == 0), "Blocking QSBR check"); + +- for (i = 0; i < 3; i++) ++ for (i = 0; i < num_cores; i++) + rte_rcu_qsbr_thread_register(t[0], enabled_core_ids[i]); + + ret = rte_rcu_qsbr_check(t[0], token, false); +@@ -344,7 +349,7 @@ test_rcu_qsbr_check(void) + /* Threads are offline, hence this should pass */ + TEST_RCU_QSBR_RETURN_IF_ERROR((ret == 0), "Non-blocking QSBR check"); + +- for (i = 0; i < 3; i++) ++ for (i = 0; i < num_cores; i++) + rte_rcu_qsbr_thread_unregister(t[0], enabled_core_ids[i]); + + ret = rte_rcu_qsbr_check(t[0], token, true); +@@ -352,7 +357,7 @@ test_rcu_qsbr_check(void) + + rte_rcu_qsbr_init(t[0], RTE_MAX_LCORE); + +- for (i = 0; i < 4; i++) ++ for (i = 0; i < num_cores; i++) + rte_rcu_qsbr_thread_register(t[0], enabled_core_ids[i]); + + token = rte_rcu_qsbr_start(t[0]); +@@ -591,7 +596,7 @@ test_rcu_qsbr_thread_offline(void) + static int + test_rcu_qsbr_dump(void) + { +- int i; ++ unsigned int i; + + printf("\nTest rte_rcu_qsbr_dump()\n"); + +@@ -608,7 +613,7 @@ test_rcu_qsbr_dump(void) + + rte_rcu_qsbr_thread_register(t[0], enabled_core_ids[0]); + +- for (i = 1; i < 3; i++) ++ for (i = 1; i < num_cores; i++) + rte_rcu_qsbr_thread_register(t[1], enabled_core_ids[i]); + + rte_rcu_qsbr_dump(stdout, t[0]); +@@ -758,7 +763,7 @@ test_rcu_qsbr_sw_sv_3qs(void) + { + uint64_t token[3]; + uint32_t c; +- int i; ++ int i, num_readers; + int32_t pos[3]; + + writer_done = 0; +@@ -781,7 +786,11 @@ test_rcu_qsbr_sw_sv_3qs(void) + thread_info[0].ih = 0; + + /* Reader threads are launched */ +- for (i = 0; i < 4; i++) ++ /* Keep the number of reader threads low to reduce ++ * the execution time. ++ */ ++ num_readers = num_cores < 4 ? num_cores : 4; ++ for (i = 0; i < num_readers; i++) + rte_eal_remote_launch(test_rcu_qsbr_reader, &thread_info[0], + enabled_core_ids[i]); + +@@ -814,7 +823,7 @@ test_rcu_qsbr_sw_sv_3qs(void) + + /* Check the quiescent state status */ + rte_rcu_qsbr_check(t[0], token[0], true); +- for (i = 0; i < 4; i++) { ++ for (i = 0; i < num_readers; i++) { + c = hash_data[0][0][enabled_core_ids[i]]; + if (c != COUNTER_VALUE && c != 0) { + printf("Reader lcore %d did not complete #0 = %d\n", +@@ -832,7 +841,7 @@ test_rcu_qsbr_sw_sv_3qs(void) + + /* Check the quiescent state status */ + rte_rcu_qsbr_check(t[0], token[1], true); +- for (i = 0; i < 4; i++) { ++ for (i = 0; i < num_readers; i++) { + c = hash_data[0][3][enabled_core_ids[i]]; + if (c != COUNTER_VALUE && c != 0) { + printf("Reader lcore %d did not complete #3 = %d\n", +@@ -850,7 +859,7 @@ test_rcu_qsbr_sw_sv_3qs(void) + + /* Check the quiescent state status */ + rte_rcu_qsbr_check(t[0], token[2], true); +- for (i = 0; i < 4; i++) { ++ for (i = 0; i < num_readers; i++) { + c = hash_data[0][6][enabled_core_ids[i]]; + if (c != COUNTER_VALUE && c != 0) { + printf("Reader lcore %d did not complete #6 = %d\n", +@@ -869,7 +878,7 @@ test_rcu_qsbr_sw_sv_3qs(void) + writer_done = 1; + + /* Wait and check return value from reader threads */ +- for (i = 0; i < 4; i++) ++ for (i = 0; i < num_readers; i++) + if (rte_eal_wait_lcore(enabled_core_ids[i]) < 0) + goto error; + rte_hash_free(h[0]); +@@ -899,6 +908,12 @@ test_rcu_qsbr_mw_mv_mqs(void) + unsigned int i, j; + unsigned int test_cores; + ++ if (RTE_MAX_LCORE < 5 || num_cores < 4) { ++ printf("Not enough cores for %s, expecting at least 5\n", ++ __func__); ++ return TEST_SKIPPED; ++ } ++ + writer_done = 0; + test_cores = num_cores / 4; + test_cores = test_cores * 4; +@@ -984,11 +999,6 @@ test_rcu_qsbr_main(void) + { + uint16_t core_id; + +- if (rte_lcore_count() < 5) { +- printf("Not enough cores for rcu_qsbr_autotest, expecting at least 5\n"); +- return TEST_SKIPPED; +- } +- + num_cores = 0; + RTE_LCORE_FOREACH_SLAVE(core_id) { + enabled_core_ids[num_cores] = core_id; +diff --git a/dpdk/app/test/test_ring.c b/dpdk/app/test/test_ring.c +index aaf1e70ad8..4825c9e2e9 100644 +--- a/dpdk/app/test/test_ring.c ++++ b/dpdk/app/test/test_ring.c +@@ -696,7 +696,7 @@ test_ring_basic_ex(void) + + printf("%u ring entries are now free\n", rte_ring_free_count(rp)); + +- for (i = 0; i < RING_SIZE; i ++) { ++ for (i = 0; i < RING_SIZE - 1; i ++) { + rte_ring_enqueue(rp, obj[i]); + } + +@@ -705,7 +705,7 @@ test_ring_basic_ex(void) + goto fail_test; + } + +- for (i = 0; i < RING_SIZE; i ++) { ++ for (i = 0; i < RING_SIZE - 1; i ++) { + rte_ring_dequeue(rp, &obj[i]); + } + +diff --git a/dpdk/app/test/test_ring_perf.c b/dpdk/app/test/test_ring_perf.c +index 70ee46ffe6..3cf27965de 100644 +--- a/dpdk/app/test/test_ring_perf.c ++++ b/dpdk/app/test/test_ring_perf.c +@@ -296,12 +296,13 @@ load_loop_fn(void *p) + static int + run_on_all_cores(struct rte_ring *r) + { +- uint64_t total = 0; ++ uint64_t total; + struct thread_params param; + unsigned int i, c; + + memset(¶m, 0, sizeof(struct thread_params)); + for (i = 0; i < RTE_DIM(bulk_sizes); i++) { ++ total = 0; + printf("\nBulk enq/dequeue count on size %u\n", bulk_sizes[i]); + param.size = bulk_sizes[i]; + param.r = r; +diff --git a/dpdk/app/test/test_service_cores.c b/dpdk/app/test/test_service_cores.c +index a922c7ddcc..2a4978e29a 100644 +--- a/dpdk/app/test/test_service_cores.c ++++ b/dpdk/app/test/test_service_cores.c +@@ -114,6 +114,7 @@ unregister_all(void) + } + + rte_service_lcore_reset_all(); ++ rte_eal_mp_wait_lcore(); + + return TEST_SUCCESS; + } diff --git a/dpdk/app/test/test_table_pipeline.c b/dpdk/app/test/test_table_pipeline.c index 441338ac01..bc412c3081 100644 --- a/dpdk/app/test/test_table_pipeline.c @@ -3909,11 +9484,48 @@ index 441338ac01..bc412c3081 100644 } /* Check pipeline consistency */ +diff --git a/dpdk/buildtools/call-sphinx-build.py b/dpdk/buildtools/call-sphinx-build.py +new file mode 100755 +index 0000000000..b9a3994e17 +--- /dev/null ++++ b/dpdk/buildtools/call-sphinx-build.py +@@ -0,0 +1,31 @@ ++#! /usr/bin/env python3 ++# SPDX-License-Identifier: BSD-3-Clause ++# Copyright(c) 2019 Intel Corporation ++# ++ ++import sys ++import os ++from os.path import join ++from subprocess import run, PIPE ++from distutils.version import StrictVersion ++ ++(sphinx, src, dst) = sys.argv[1:] # assign parameters to variables ++ ++# for sphinx version >= 1.7 add parallelism using "-j auto" ++ver = run([sphinx, '--version'], stdout=PIPE).stdout.decode().split()[-1] ++sphinx_cmd = [sphinx] ++if StrictVersion(ver) >= StrictVersion('1.7'): ++ sphinx_cmd += ['-j', 'auto'] ++ ++# find all the files sphinx will process so we can write them as dependencies ++srcfiles = [] ++for root, dirs, files in os.walk(src): ++ srcfiles.extend([join(root, f) for f in files]) ++ ++# run sphinx, putting the html output in a "html" directory ++process = run(sphinx_cmd + ['-b', 'html', src, join(dst, 'html')], check=True) ++print(str(process.args) + ' Done OK') ++ ++# create a gcc format .d file giving all the dependencies of this doc build ++with open(join(dst, '.html.d'), 'w') as d: ++ d.write('html: ' + ' '.join(srcfiles) + '\n') diff --git a/dpdk/buildtools/meson.build b/dpdk/buildtools/meson.build -index 6ef2c5721c..cd6f4c1af0 100644 +index 6ef2c5721c..ea13d9fc3f 100644 --- a/dpdk/buildtools/meson.build +++ b/dpdk/buildtools/meson.build -@@ -3,9 +3,11 @@ +@@ -3,17 +3,21 @@ subdir('pmdinfogen') @@ -3925,6 +9537,18 @@ index 6ef2c5721c..cd6f4c1af0 100644 # set up map-to-def script using python, either built-in or external python3 = import('python').find_installation(required: false) + if python3.found() +- map_to_def_cmd = [python3, files('map_to_def.py')] ++ py3 = [python3] + else +- map_to_def_cmd = ['meson', 'runpython', files('map_to_def.py')] ++ py3 = ['meson', 'runpython'] + endif ++map_to_def_cmd = py3 + files('map_to_def.py') ++sphinx_wrapper = py3 + files('call-sphinx-build.py') + + # stable ABI always starts with "DPDK_" + is_experimental_cmd = [find_program('grep', 'findstr'), '^DPDK_'] diff --git a/dpdk/buildtools/options-ibverbs-static.sh b/dpdk/buildtools/options-ibverbs-static.sh index 0f285a343b..0740a711ff 100755 --- a/dpdk/buildtools/options-ibverbs-static.sh @@ -3945,8 +9569,130 @@ index 0f285a343b..0740a711ff 100755 + tac | + awk "/^-l:$lib.a/&&c++ {next} 1" | # drop first duplicates of main lib + tac +diff --git a/dpdk/buildtools/pkg-config/meson.build b/dpdk/buildtools/pkg-config/meson.build +new file mode 100644 +index 0000000000..39a8fd1c8e +--- /dev/null ++++ b/dpdk/buildtools/pkg-config/meson.build +@@ -0,0 +1,59 @@ ++# SPDX-License-Identifier: BSD-3-Clause ++# Copyright(c) 2020 Intel Corporation ++ ++pkg = import('pkgconfig') ++pkg_extra_cflags = ['-include', 'rte_config.h'] + machine_args ++if is_freebsd ++ pkg_extra_cflags += ['-D__BSD_VISIBLE'] ++endif ++ ++# When calling pkg-config --static --libs, pkg-config will always output the ++# regular libs first, and then the extra libs from Libs.private field, ++# since the assumption is that those are additional dependencies for building ++# statically that the .a files depend upon. The output order of .pc fields is: ++# Libs Libs.private Requires Requires.private ++# The fields Requires* are for package names. ++# The flags of the DPDK libraries must be defined in Libs* fields. ++# However, the DPDK drivers are linked only in static builds (Libs.private), ++# and those need to come *before* the regular libraries (Libs field). ++# This requirement is satisfied by moving the regular libs in a separate file ++# included in the field Requires (after Libs.private). ++# Another requirement is to allow linking dependencies as shared libraries, ++# while linking static DPDK libraries and drivers. It is satisfied by ++# listing the static files in Libs.private with the explicit syntax -l:libfoo.a. ++# As a consequence, the regular DPDK libraries are already listed as static ++# in the field Libs.private. The second occurences of DPDK libraries, ++# included from Requires and used for shared library linkage case, ++# are skipped in the case of static linkage thanks to the flag --as-needed. ++ ++ ++pkg.generate(name: 'dpdk-libs', ++ filebase: 'libdpdk-libs', ++ description: '''Internal-only DPDK pkgconfig file. Not for direct use. ++Use libdpdk.pc instead of this file to query DPDK compile/link arguments''', ++ version: meson.project_version(), ++ subdirs: [get_option('include_subdir_arch'), '.'], ++ extra_cflags: pkg_extra_cflags, ++ libraries: ['-Wl,--as-needed'] + dpdk_libraries, ++ libraries_private: dpdk_extra_ldflags) ++ ++platform_flags = [] ++if not is_windows ++ platform_flags += ['-Wl,--export-dynamic'] # ELF only ++endif ++pkg.generate(name: 'DPDK', # main DPDK pkgconfig file ++ filebase: 'libdpdk', ++ version: meson.project_version(), ++ description: '''The Data Plane Development Kit (DPDK). ++Note that CFLAGS might contain an -march flag higher than typical baseline. ++This is required for a number of static inline functions in the public headers.''', ++ requires: ['libdpdk-libs', libbsd], # may need libbsd for string funcs ++ # if libbsd is not enabled, then this is blank ++ libraries_private: ['-Wl,--whole-archive'] + ++ dpdk_drivers + dpdk_static_libraries + ++ ['-Wl,--no-whole-archive'] + platform_flags ++) ++ ++# For static linking with dependencies as shared libraries, ++# the internal static libraries must be flagged explicitly. ++run_command(py3, 'set-static-linker-flags.py', check: true) +diff --git a/dpdk/buildtools/pkg-config/set-static-linker-flags.py b/dpdk/buildtools/pkg-config/set-static-linker-flags.py +new file mode 100644 +index 0000000000..2745db34c2 +--- /dev/null ++++ b/dpdk/buildtools/pkg-config/set-static-linker-flags.py +@@ -0,0 +1,38 @@ ++#!/usr/bin/env python3 ++# SPDX-License-Identifier: BSD-3-Clause ++# Copyright(c) 2020 Intel Corporation ++ ++# Script to fix flags for static linking in pkgconfig files from meson ++# Should be called from meson build itself ++import os ++import sys ++ ++ ++def fix_ldflag(f): ++ if not f.startswith('-lrte_'): ++ return f ++ return '-l:lib' + f[2:] + '.a' ++ ++ ++def fix_libs_private(line): ++ if not line.startswith('Libs.private'): ++ return line ++ ldflags = [fix_ldflag(flag) for flag in line.split()] ++ return ' '.join(ldflags) + '\n' ++ ++ ++def process_pc_file(filepath): ++ print('Processing', filepath) ++ with open(filepath) as src: ++ lines = src.readlines() ++ with open(filepath, 'w') as dst: ++ dst.writelines([fix_libs_private(line) for line in lines]) ++ ++ ++if 'MESON_BUILD_ROOT' not in os.environ: ++ print('This script must be called from a meson build environment') ++ sys.exit(1) ++for root, dirs, files in os.walk(os.environ['MESON_BUILD_ROOT']): ++ pc_files = [f for f in files if f.endswith('.pc')] ++ for f in pc_files: ++ process_pc_file(os.path.join(root, f)) +diff --git a/dpdk/buildtools/pmdinfogen/pmdinfogen.h b/dpdk/buildtools/pmdinfogen/pmdinfogen.h +index c8a9e2136a..467216d12b 100644 +--- a/dpdk/buildtools/pmdinfogen/pmdinfogen.h ++++ b/dpdk/buildtools/pmdinfogen/pmdinfogen.h +@@ -82,7 +82,7 @@ if ((fend) == ELFDATA2LSB) \ + ___x = le##width##toh(x); \ + else \ + ___x = be##width##toh(x); \ +- ___x; \ ++___x; \ + }) + + #define TO_NATIVE(fend, width, x) CONVERT_NATIVE(fend, width, x) diff --git a/dpdk/config/common_base b/dpdk/config/common_base -index 7dec7ed457..861f7d1a0b 100644 +index 7dec7ed457..3406146372 100644 --- a/dpdk/config/common_base +++ b/dpdk/config/common_base @@ -328,7 +328,6 @@ CONFIG_RTE_LIBRTE_ICE_PMD=y @@ -3966,8 +9712,63 @@ index 7dec7ed457..861f7d1a0b 100644 # CONFIG_RTE_LIBRTE_MLX5_PMD=n CONFIG_RTE_LIBRTE_MLX5_DEBUG=n +@@ -573,7 +572,6 @@ CONFIG_RTE_CRYPTO_MAX_DEVS=64 + # Compile PMD for ARMv8 Crypto device + # + CONFIG_RTE_LIBRTE_PMD_ARMV8_CRYPTO=n +-CONFIG_RTE_LIBRTE_PMD_ARMV8_CRYPTO_DEBUG=n + + # + # Compile NXP CAAM JR crypto Driver +diff --git a/dpdk/config/defconfig_arm-armv7a-linuxapp-gcc b/dpdk/config/defconfig_arm-armv7a-linuxapp-gcc +index c91423f0e6..749f9924d5 100644 +--- a/dpdk/config/defconfig_arm-armv7a-linuxapp-gcc ++++ b/dpdk/config/defconfig_arm-armv7a-linuxapp-gcc +@@ -45,7 +45,6 @@ CONFIG_RTE_LIBRTE_CXGBE_PMD=n + CONFIG_RTE_LIBRTE_E1000_PMD=n + CONFIG_RTE_LIBRTE_ENIC_PMD=n + CONFIG_RTE_LIBRTE_FM10K_PMD=n +-CONFIG_RTE_LIBRTE_I40E_PMD=n + CONFIG_RTE_LIBRTE_IXGBE_PMD=n + CONFIG_RTE_LIBRTE_MLX4_PMD=n + CONFIG_RTE_LIBRTE_VMXNET3_PMD=n +diff --git a/dpdk/config/defconfig_arm64-graviton2-linux-gcc b/dpdk/config/defconfig_arm64-graviton2-linux-gcc +new file mode 120000 +index 0000000000..80ac94d54d +--- /dev/null ++++ b/dpdk/config/defconfig_arm64-graviton2-linux-gcc +@@ -0,0 +1 @@ ++defconfig_arm64-graviton2-linuxapp-gcc +\ No newline at end of file +diff --git a/dpdk/config/defconfig_arm64-graviton2-linuxapp-gcc b/dpdk/config/defconfig_arm64-graviton2-linuxapp-gcc +new file mode 100644 +index 0000000000..e99fef3073 +--- /dev/null ++++ b/dpdk/config/defconfig_arm64-graviton2-linuxapp-gcc +@@ -0,0 +1,13 @@ ++# SPDX-License-Identifier: BSD-3-Clause ++# Copyright(c) Amazon.com, Inc or its affiliates ++# ++ ++#include "defconfig_arm64-armv8a-linux-gcc" ++ ++CONFIG_RTE_MACHINE="graviton2" ++CONFIG_RTE_MAX_LCORE=64 ++CONFIG_RTE_CACHE_LINE_SIZE=64 ++CONFIG_RTE_MAX_MEM_MB=1048576 ++CONFIG_RTE_MAX_NUMA_NODES=1 ++CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=n ++CONFIG_RTE_LIBRTE_VHOST_NUMA=n +diff --git a/dpdk/config/defconfig_graviton2 b/dpdk/config/defconfig_graviton2 +new file mode 120000 +index 0000000000..80ac94d54d +--- /dev/null ++++ b/dpdk/config/defconfig_graviton2 +@@ -0,0 +1 @@ ++defconfig_arm64-graviton2-linuxapp-gcc +\ No newline at end of file diff --git a/dpdk/config/meson.build b/dpdk/config/meson.build -index 364a8d7394..78bfdf3094 100644 +index 364a8d7394..b1f728ee86 100644 --- a/dpdk/config/meson.build +++ b/dpdk/config/meson.build @@ -14,6 +14,10 @@ foreach env:supported_exec_envs @@ -3981,7 +9782,22 @@ index 364a8d7394..78bfdf3094 100644 # set the major version, which might be used by drivers and libraries # depending on the configuration options pver = meson.project_version().split('.') -@@ -98,14 +102,18 @@ dpdk_conf.set('RTE_TOOLCHAIN_' + toolchain.to_upper(), 1) +@@ -50,9 +54,11 @@ eal_pmd_path = join_paths(get_option('prefix'), driver_install_path) + # driver .so files often depend upon the bus drivers for their connect bus, + # e.g. ixgbe depends on librte_bus_pci. This means that the bus drivers need + # to be in the library path, so symlink the drivers from the main lib directory. +-meson.add_install_script('../buildtools/symlink-drivers-solibs.sh', +- get_option('libdir'), +- pmd_subdir_opt) ++if not is_windows ++ meson.add_install_script('../buildtools/symlink-drivers-solibs.sh', ++ get_option('libdir'), ++ pmd_subdir_opt) ++endif + + # set the machine type and cflags for it + if meson.is_cross_build() +@@ -98,14 +104,18 @@ dpdk_conf.set('RTE_TOOLCHAIN_' + toolchain.to_upper(), 1) dpdk_conf.set('RTE_ARCH_64', cc.sizeof('void *') == 8) @@ -4005,7 +9821,47 @@ index 364a8d7394..78bfdf3094 100644 # some libs depend on maths lib add_project_link_arguments('-lm', language: 'c') dpdk_extra_ldflags += '-lm' -@@ -183,6 +191,10 @@ warning_flags = [ +@@ -136,18 +146,25 @@ if numa_dep.found() and cc.has_header('numaif.h') + dpdk_extra_ldflags += '-lnuma' + endif + ++has_libfdt = 0 ++fdt_dep = cc.find_library('libfdt', required: false) ++if fdt_dep.found() and cc.has_header('fdt.h') ++ dpdk_conf.set10('RTE_HAS_LIBFDT', true) ++ has_libfdt = 1 ++ add_project_link_arguments('-lfdt', language: 'c') ++ dpdk_extra_ldflags += '-lfdt' ++endif ++ + # check for libbsd +-libbsd = dependency('libbsd', required: false) ++libbsd = dependency('libbsd', required: false, method: 'pkg-config') + if libbsd.found() + dpdk_conf.set('RTE_USE_LIBBSD', 1) + endif + + # check for pcap +-pcap_dep = dependency('pcap', required: false) +-if pcap_dep.found() +- # pcap got a pkg-config file only in 1.9.0 and before that meson uses +- # an internal pcap-config finder, which is not compatible with +- # cross-compilation, so try to fallback to find_library ++pcap_dep = dependency('libpcap', required: false, method: 'pkg-config') ++if not pcap_dep.found() ++ # pcap got a pkg-config file only in 1.9.0 + pcap_dep = cc.find_library('pcap', required: false) + endif + if pcap_dep.found() and cc.has_header('pcap.h', dependencies: pcap_dep) +@@ -166,6 +183,7 @@ warning_flags = [ + # additional warnings in alphabetical order + '-Wcast-qual', + '-Wdeprecated', ++ '-Wformat', + '-Wformat-nonliteral', + '-Wformat-security', + '-Wmissing-declarations', +@@ -183,6 +201,10 @@ warning_flags = [ '-Wno-packed-not-aligned', '-Wno-missing-field-initializers' ] @@ -4016,7 +9872,19 @@ index 364a8d7394..78bfdf3094 100644 if not dpdk_conf.get('RTE_ARCH_64') # for 32-bit, don't warn about casting a 32-bit pointer to 64-bit int - it's fine!! warning_flags += '-Wno-pointer-to-int-cast' -@@ -231,6 +243,16 @@ if is_freebsd +@@ -202,6 +224,11 @@ dpdk_conf.set('RTE_LIBEAL_USE_HPET', get_option('use_hpet')) + dpdk_conf.set('RTE_MAX_VFIO_GROUPS', 64) + dpdk_conf.set('RTE_DRIVER_MEMPOOL_BUCKET_SIZE_KB', 64) + dpdk_conf.set('RTE_LIBRTE_DPAA2_USE_PHYS_IOVA', true) ++if dpdk_conf.get('RTE_ARCH_64') ++ dpdk_conf.set('RTE_MAX_MEM_MB', 524288) ++else # for 32-bit we need smaller reserved memory areas ++ dpdk_conf.set('RTE_MAX_MEM_MB', 2048) ++endif + + + compile_time_cpuflags = [] +@@ -231,6 +258,16 @@ if is_freebsd add_project_arguments('-D__BSD_VISIBLE', language: 'c') endif @@ -4033,6 +9901,50 @@ index 364a8d7394..78bfdf3094 100644 if get_option('b_lto') if cc.has_argument('-ffat-lto-objects') add_project_arguments('-ffat-lto-objects', language: 'c') +@@ -243,3 +280,12 @@ if get_option('b_lto') + add_project_link_arguments('-Wno-lto-type-mismatch', language: 'c') + endif + endif ++ ++if get_option('default_library') == 'both' ++ error( ''' ++ Unsupported value "both" for "default_library" option. ++ ++ NOTE: DPDK always builds both shared and static libraries. Please set ++ "default_library" to either "static" or "shared" to select default linkage ++ for apps and any examples.''') ++endif +diff --git a/dpdk/config/rte_config.h b/dpdk/config/rte_config.h +index d30786bc08..8ec0a58f19 100644 +--- a/dpdk/config/rte_config.h ++++ b/dpdk/config/rte_config.h +@@ -38,7 +38,6 @@ + #define RTE_MAX_MEM_MB_PER_LIST 32768 + #define RTE_MAX_MEMSEG_PER_TYPE 32768 + #define RTE_MAX_MEM_MB_PER_TYPE 65536 +-#define RTE_MAX_MEM_MB 524288 + #define RTE_MAX_MEMZONE 2560 + #define RTE_MAX_TAILQ 32 + #define RTE_LOG_DP_LEVEL RTE_LOG_INFO +@@ -95,11 +94,18 @@ + #define RTE_SCHED_PORT_N_GRINDERS 8 + #undef RTE_SCHED_VECTOR + ++#ifdef RTE_LIBRTE_CRYPTO_SCHEDULER_PMD ++#define RTE_LIBRTE_PMD_CRYPTO_SCHEDULER 1 ++#endif ++ + /* KNI defines */ + #define RTE_KNI_PREEMPT_DEFAULT 1 + + /****** driver defines ********/ + ++/* Packet prefetching in PMDs */ ++#define RTE_PMD_PACKET_PREFETCH 1 ++ + /* QuickAssist device */ + /* Max. number of QuickAssist devices which can be attached */ + #define RTE_PMD_QAT_MAX_PCI_DEVICES 48 diff --git a/dpdk/config/x86/meson.build b/dpdk/config/x86/meson.build index 8b0fa3e6f1..adc857ba28 100644 --- a/dpdk/config/x86/meson.build @@ -4051,6 +9963,28 @@ index 8b0fa3e6f1..adc857ba28 100644 endif base_flags = ['SSE', 'SSE2', 'SSE3','SSSE3', 'SSE4_1', 'SSE4_2'] +diff --git a/dpdk/devtools/check-forbidden-tokens.awk b/dpdk/devtools/check-forbidden-tokens.awk +index 8c89de3d4e..61ba707c9b 100755 +--- a/dpdk/devtools/check-forbidden-tokens.awk ++++ b/dpdk/devtools/check-forbidden-tokens.awk +@@ -54,7 +54,7 @@ BEGIN { + } + for (i in deny_folders) { + re = "^\\+\\+\\+ b/" deny_folders[i]; +- if ($0 ~ deny_folders[i]) { ++ if ($0 ~ re) { + in_file = 1 + last_file = $0 + } +@@ -62,7 +62,7 @@ BEGIN { + } + END { + if (count > 0) { +- print "Warning in " substr(last_file,6) ":" ++ print "Warning in " substr(last_file,7) ":" + print MESSAGE + exit RET_ON_FAIL + } diff --git a/dpdk/devtools/check-symbol-change.sh b/dpdk/devtools/check-symbol-change.sh index c5434f3bb0..ed2178e36e 100755 --- a/dpdk/devtools/check-symbol-change.sh @@ -4182,8 +10116,59 @@ index be565a1bea..52305fbb8c 100755 sed -ri="" 's,(TEST_PMD_RECORD_.*=)n,\1y,' $1/.config ) # Automatic configuration +diff --git a/dpdk/devtools/test-meson-builds.sh b/dpdk/devtools/test-meson-builds.sh +index 688567714b..8678a3d824 100755 +--- a/dpdk/devtools/test-meson-builds.sh ++++ b/dpdk/devtools/test-meson-builds.sh +@@ -38,20 +38,21 @@ else + fi + + default_path=$PATH +-default_pkgpath=$PKG_CONFIG_PATH + default_cppflags=$CPPFLAGS + default_cflags=$CFLAGS + default_ldflags=$LDFLAGS ++default_meson_options=$DPDK_MESON_OPTIONS + + load_env () # + { + targetcc=$1 ++ # reset variables before target-specific config + export PATH=$default_path +- export PKG_CONFIG_PATH=$default_pkgpath ++ unset PKG_CONFIG_PATH # global default makes no sense + export CPPFLAGS=$default_cppflags + export CFLAGS=$default_cflags + export LDFLAGS=$default_ldflags +- unset DPDK_MESON_OPTIONS ++ export DPDK_MESON_OPTIONS=$default_meson_options + command -v $targetcc >/dev/null 2>&1 || return 1 + DPDK_TARGET=$($targetcc -v 2>&1 | sed -n 's,^Target: ,,p') + . $srcdir/devtools/load-devel-config +@@ -134,19 +135,17 @@ done + + # Test installation of the x86-default target, to be used for checking + # the sample apps build using the pkg-config file for cflags and libs ++load_env cc + build_path=$(readlink -f $builds_dir/build-x86-default) + export DESTDIR=$build_path/install-root + $ninja_cmd -C $build_path install +- +-load_env cc + pc_file=$(find $DESTDIR -name libdpdk.pc) + export PKG_CONFIG_PATH=$(dirname $pc_file):$PKG_CONFIG_PATH +- + # if pkg-config defines the necessary flags, test building some examples + if pkg-config --define-prefix libdpdk >/dev/null 2>&1; then + export PKGCONF="pkg-config --define-prefix" + for example in cmdline helloworld l2fwd l3fwd skeleton timer; do + echo "## Building $example" +- $MAKE -C $DESTDIR/usr/local/share/dpdk/examples/$example clean all ++ $MAKE -C $DESTDIR/usr/local/share/dpdk/examples/$example clean shared static + done + fi diff --git a/dpdk/doc/api/doxy-api-index.md b/dpdk/doc/api/doxy-api-index.md -index dff496be09..d7c8bd24db 100644 +index dff496be09..5568dbc616 100644 --- a/dpdk/doc/api/doxy-api-index.md +++ b/dpdk/doc/api/doxy-api-index.md @@ -1,4 +1,4 @@ @@ -4192,6 +10177,30 @@ index dff496be09..d7c8bd24db 100644 === IPv4 */ +diff --git a/dpdk/drivers/net/ice/ice_rxtx.h b/dpdk/drivers/net/ice/ice_rxtx.h +index 9e3d2cd076..f80fdc1d6b 100644 +--- a/dpdk/drivers/net/ice/ice_rxtx.h ++++ b/dpdk/drivers/net/ice/ice_rxtx.h +@@ -31,7 +31,7 @@ + + #define ICE_VPMD_RX_BURST 32 + #define ICE_VPMD_TX_BURST 32 +-#define ICE_RXQ_REARM_THRESH 32 ++#define ICE_RXQ_REARM_THRESH 64 + #define ICE_MAX_RX_BURST ICE_RXQ_REARM_THRESH + #define ICE_TX_MAX_FREE_BUF_SZ 64 + #define ICE_DESCS_PER_LOOP 4 +@@ -66,7 +66,7 @@ struct ice_rx_queue { + uint16_t rxrearm_start; /**< the idx we start the re-arming from */ + uint64_t mbuf_initializer; /**< value to init mbufs */ + +- uint8_t port_id; /* device port ID */ ++ uint16_t port_id; /* device port ID */ + uint8_t crc_len; /* 0 if CRC stripped, 4 otherwise */ + uint16_t queue_id; /* RX queue index */ + uint16_t reg_idx; /* RX queue register index */ +@@ -109,7 +109,7 @@ struct ice_tx_queue { + uint8_t pthresh; /**< Prefetch threshold register. */ + uint8_t hthresh; /**< Host threshold register. */ + uint8_t wthresh; /**< Write-back threshold reg. */ +- uint8_t port_id; /* Device port identifier. */ ++ uint16_t port_id; /* Device port identifier. */ + uint16_t queue_id; /* TX queue index. */ + uint32_t q_teid; /* TX schedule node id. */ + uint16_t reg_idx; +diff --git a/dpdk/drivers/net/ice/ice_rxtx_vec_avx2.c b/dpdk/drivers/net/ice/ice_rxtx_vec_avx2.c +index be50677c2f..1acda1a383 100644 +--- a/dpdk/drivers/net/ice/ice_rxtx_vec_avx2.c ++++ b/dpdk/drivers/net/ice/ice_rxtx_vec_avx2.c +@@ -232,43 +232,88 @@ _ice_recv_raw_pkts_vec_avx2(struct ice_rx_queue *rxq, struct rte_mbuf **rx_pkts, + * bit13 is for VLAN indication. + */ + const __m256i flags_mask = +- _mm256_set1_epi32((7 << 4) | (1 << 12) | (1 << 13)); ++ _mm256_set1_epi32((0xF << 4) | (1 << 12) | (1 << 13)); + /** + * data to be shuffled by the result of the flags mask shifted by 4 + * bits. This gives use the l3_l4 flags. + */ +- const __m256i l3_l4_flags_shuf = _mm256_set_epi8(0, 0, 0, 0, 0, 0, 0, 0, +- /* shift right 1 bit to make sure it not exceed 255 */ +- (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD | +- PKT_RX_IP_CKSUM_BAD) >> 1, +- (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD | +- PKT_RX_IP_CKSUM_GOOD) >> 1, +- (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD | +- PKT_RX_IP_CKSUM_BAD) >> 1, +- (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD | +- PKT_RX_IP_CKSUM_GOOD) >> 1, +- (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1, +- (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_GOOD) >> 1, +- (PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD) >> 1, +- (PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_GOOD) >> 1, +- /* second 128-bits */ +- 0, 0, 0, 0, 0, 0, 0, 0, +- (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD | +- PKT_RX_IP_CKSUM_BAD) >> 1, +- (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD | +- PKT_RX_IP_CKSUM_GOOD) >> 1, +- (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD | +- PKT_RX_IP_CKSUM_BAD) >> 1, +- (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD | +- PKT_RX_IP_CKSUM_GOOD) >> 1, +- (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1, +- (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_GOOD) >> 1, +- (PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD) >> 1, +- (PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_GOOD) >> 1); ++ const __m256i l3_l4_flags_shuf = ++ _mm256_set_epi8((PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | ++ PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD | ++ PKT_RX_IP_CKSUM_BAD) >> 1, ++ (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_EIP_CKSUM_BAD | ++ PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_GOOD) >> 1, ++ (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_EIP_CKSUM_BAD | ++ PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD) >> 1, ++ (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_EIP_CKSUM_BAD | ++ PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_GOOD) >> 1, ++ (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_L4_CKSUM_BAD | ++ PKT_RX_IP_CKSUM_BAD) >> 1, ++ (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_L4_CKSUM_BAD | ++ PKT_RX_IP_CKSUM_GOOD) >> 1, ++ (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_L4_CKSUM_GOOD | ++ PKT_RX_IP_CKSUM_BAD) >> 1, ++ (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_L4_CKSUM_GOOD | ++ PKT_RX_IP_CKSUM_GOOD) >> 1, ++ (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_EIP_CKSUM_BAD | ++ PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1, ++ (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_EIP_CKSUM_BAD | ++ PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_GOOD) >> 1, ++ (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_EIP_CKSUM_BAD | ++ PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD) >> 1, ++ (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_EIP_CKSUM_BAD | ++ PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_GOOD) >> 1, ++ (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_L4_CKSUM_BAD | ++ PKT_RX_IP_CKSUM_BAD) >> 1, ++ (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_L4_CKSUM_BAD | ++ PKT_RX_IP_CKSUM_GOOD) >> 1, ++ (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_L4_CKSUM_GOOD | ++ PKT_RX_IP_CKSUM_BAD) >> 1, ++ (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_L4_CKSUM_GOOD | ++ PKT_RX_IP_CKSUM_GOOD) >> 1, ++ /** ++ * second 128-bits ++ * shift right 20 bits to use the low two bits to indicate ++ * outer checksum status ++ * shift right 1 bit to make sure it not exceed 255 ++ */ ++ (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_EIP_CKSUM_BAD | ++ PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1, ++ (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_EIP_CKSUM_BAD | ++ PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_GOOD) >> 1, ++ (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_EIP_CKSUM_BAD | ++ PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD) >> 1, ++ (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_EIP_CKSUM_BAD | ++ PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_GOOD) >> 1, ++ (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_L4_CKSUM_BAD | ++ PKT_RX_IP_CKSUM_BAD) >> 1, ++ (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_L4_CKSUM_BAD | ++ PKT_RX_IP_CKSUM_GOOD) >> 1, ++ (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_L4_CKSUM_GOOD | ++ PKT_RX_IP_CKSUM_BAD) >> 1, ++ (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_L4_CKSUM_GOOD | ++ PKT_RX_IP_CKSUM_GOOD) >> 1, ++ (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_EIP_CKSUM_BAD | ++ PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1, ++ (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_EIP_CKSUM_BAD | ++ PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_GOOD) >> 1, ++ (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_EIP_CKSUM_BAD | ++ PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD) >> 1, ++ (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_EIP_CKSUM_BAD | ++ PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_GOOD) >> 1, ++ (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_L4_CKSUM_BAD | ++ PKT_RX_IP_CKSUM_BAD) >> 1, ++ (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_L4_CKSUM_BAD | ++ PKT_RX_IP_CKSUM_GOOD) >> 1, ++ (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_L4_CKSUM_GOOD | ++ PKT_RX_IP_CKSUM_BAD) >> 1, ++ (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_L4_CKSUM_GOOD | ++ PKT_RX_IP_CKSUM_GOOD) >> 1); + const __m256i cksum_mask = +- _mm256_set1_epi32(PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD | +- PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD | +- PKT_RX_EIP_CKSUM_BAD); ++ _mm256_set1_epi32(PKT_RX_IP_CKSUM_MASK | ++ PKT_RX_L4_CKSUM_MASK | ++ PKT_RX_EIP_CKSUM_BAD | ++ PKT_RX_OUTER_L4_CKSUM_MASK); + /** + * data to be shuffled by result of flag mask, shifted down 12. + * If RSS(bit12)/VLAN(bit13) are set, +@@ -450,6 +495,15 @@ _ice_recv_raw_pkts_vec_avx2(struct ice_rx_queue *rxq, struct rte_mbuf **rx_pkts, + __m256i l3_l4_flags = _mm256_shuffle_epi8(l3_l4_flags_shuf, + _mm256_srli_epi32(flag_bits, 4)); + l3_l4_flags = _mm256_slli_epi32(l3_l4_flags, 1); ++ ++ __m256i l4_outer_mask = _mm256_set1_epi32(0x6); ++ __m256i l4_outer_flags = ++ _mm256_and_si256(l3_l4_flags, l4_outer_mask); ++ l4_outer_flags = _mm256_slli_epi32(l4_outer_flags, 20); ++ ++ __m256i l3_l4_mask = _mm256_set1_epi32(~0x6); ++ l3_l4_flags = _mm256_and_si256(l3_l4_flags, l3_l4_mask); ++ l3_l4_flags = _mm256_or_si256(l3_l4_flags, l4_outer_flags); + l3_l4_flags = _mm256_and_si256(l3_l4_flags, cksum_mask); + /* set rss and vlan flags */ + const __m256i rss_vlan_flag_bits = diff --git a/dpdk/drivers/net/ice/ice_rxtx_vec_common.h b/dpdk/drivers/net/ice/ice_rxtx_vec_common.h -index 5e6f89642a..46e3be98a6 100644 +index 5e6f89642a..97e26d2968 100644 --- a/dpdk/drivers/net/ice/ice_rxtx_vec_common.h +++ b/dpdk/drivers/net/ice/ice_rxtx_vec_common.h @@ -29,6 +29,7 @@ ice_rx_reassemble_packets(struct ice_rx_queue *rxq, struct rte_mbuf **rx_bufs, @@ -21136,28 +45279,545 @@ index 5e6f89642a..46e3be98a6 100644 start->ol_flags = end->ol_flags; /* we need to strip crc for the whole packet */ start->pkt_len -= rxq->crc_len; -@@ -245,6 +246,7 @@ ice_rx_vec_queue_default(struct ice_rx_queue *rxq) +@@ -243,8 +244,10 @@ ice_rx_vec_queue_default(struct ice_rx_queue *rxq) + #define ICE_NO_VECTOR_FLAGS ( \ + DEV_TX_OFFLOAD_MULTI_SEGS | \ DEV_TX_OFFLOAD_VLAN_INSERT | \ ++ DEV_TX_OFFLOAD_IPV4_CKSUM | \ DEV_TX_OFFLOAD_SCTP_CKSUM | \ DEV_TX_OFFLOAD_UDP_CKSUM | \ + DEV_TX_OFFLOAD_TCP_TSO | \ DEV_TX_OFFLOAD_TCP_CKSUM) static inline int +diff --git a/dpdk/drivers/net/ice/ice_rxtx_vec_sse.c b/dpdk/drivers/net/ice/ice_rxtx_vec_sse.c +index 9d5f1f194f..4e6b0a0aa8 100644 +--- a/dpdk/drivers/net/ice/ice_rxtx_vec_sse.c ++++ b/dpdk/drivers/net/ice/ice_rxtx_vec_sse.c +@@ -95,39 +95,67 @@ ice_rx_desc_to_olflags_v(struct ice_rx_queue *rxq, __m128i descs[4], + * bit12 for RSS indication. + * bit13 for VLAN indication. + */ +- const __m128i desc_mask = _mm_set_epi32(0x3070, 0x3070, +- 0x3070, 0x3070); +- ++ const __m128i desc_mask = _mm_set_epi32(0x30f0, 0x30f0, ++ 0x30f0, 0x30f0); + const __m128i cksum_mask = _mm_set_epi32(PKT_RX_IP_CKSUM_MASK | + PKT_RX_L4_CKSUM_MASK | ++ PKT_RX_OUTER_L4_CKSUM_MASK | + PKT_RX_EIP_CKSUM_BAD, + PKT_RX_IP_CKSUM_MASK | + PKT_RX_L4_CKSUM_MASK | ++ PKT_RX_OUTER_L4_CKSUM_MASK | + PKT_RX_EIP_CKSUM_BAD, + PKT_RX_IP_CKSUM_MASK | + PKT_RX_L4_CKSUM_MASK | ++ PKT_RX_OUTER_L4_CKSUM_MASK | + PKT_RX_EIP_CKSUM_BAD, + PKT_RX_IP_CKSUM_MASK | + PKT_RX_L4_CKSUM_MASK | ++ PKT_RX_OUTER_L4_CKSUM_MASK | + PKT_RX_EIP_CKSUM_BAD); + + /* map the checksum, rss and vlan fields to the checksum, rss + * and vlan flag + */ +- const __m128i cksum_flags = _mm_set_epi8(0, 0, 0, 0, 0, 0, 0, 0, +- /* shift right 1 bit to make sure it not exceed 255 */ +- (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD | +- PKT_RX_IP_CKSUM_BAD) >> 1, +- (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD | +- PKT_RX_IP_CKSUM_GOOD) >> 1, +- (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD | +- PKT_RX_IP_CKSUM_BAD) >> 1, +- (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD | +- PKT_RX_IP_CKSUM_GOOD) >> 1, +- (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1, +- (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_GOOD) >> 1, +- (PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD) >> 1, +- (PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_GOOD) >> 1); ++ const __m128i cksum_flags = ++ _mm_set_epi8((PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | ++ PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD | ++ PKT_RX_IP_CKSUM_BAD) >> 1, ++ (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_EIP_CKSUM_BAD | ++ PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_GOOD) >> 1, ++ (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_EIP_CKSUM_BAD | ++ PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD) >> 1, ++ (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_EIP_CKSUM_BAD | ++ PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_GOOD) >> 1, ++ (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_L4_CKSUM_BAD | ++ PKT_RX_IP_CKSUM_BAD) >> 1, ++ (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_L4_CKSUM_BAD | ++ PKT_RX_IP_CKSUM_GOOD) >> 1, ++ (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_L4_CKSUM_GOOD | ++ PKT_RX_IP_CKSUM_BAD) >> 1, ++ (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_L4_CKSUM_GOOD | ++ PKT_RX_IP_CKSUM_GOOD) >> 1, ++ /** ++ * shift right 20 bits to use the low two bits to indicate ++ * outer checksum status ++ * shift right 1 bit to make sure it not exceed 255 ++ */ ++ (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_EIP_CKSUM_BAD | ++ PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1, ++ (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_EIP_CKSUM_BAD | ++ PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_GOOD) >> 1, ++ (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_EIP_CKSUM_BAD | ++ PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD) >> 1, ++ (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_EIP_CKSUM_BAD | ++ PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_GOOD) >> 1, ++ (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_L4_CKSUM_BAD | ++ PKT_RX_IP_CKSUM_BAD) >> 1, ++ (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_L4_CKSUM_BAD | ++ PKT_RX_IP_CKSUM_GOOD) >> 1, ++ (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_L4_CKSUM_GOOD | ++ PKT_RX_IP_CKSUM_BAD) >> 1, ++ (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_L4_CKSUM_GOOD | ++ PKT_RX_IP_CKSUM_GOOD) >> 1); + + const __m128i rss_vlan_flags = _mm_set_epi8(0, 0, 0, 0, + 0, 0, 0, 0, +@@ -140,13 +168,21 @@ ice_rx_desc_to_olflags_v(struct ice_rx_queue *rxq, __m128i descs[4], + flags = _mm_unpackhi_epi32(descs[0], descs[1]); + tmp_desc = _mm_unpackhi_epi32(descs[2], descs[3]); + tmp_desc = _mm_unpacklo_epi64(flags, tmp_desc); +- tmp_desc = _mm_and_si128(flags, desc_mask); ++ tmp_desc = _mm_and_si128(tmp_desc, desc_mask); + + /* checksum flags */ + tmp_desc = _mm_srli_epi32(tmp_desc, 4); + flags = _mm_shuffle_epi8(cksum_flags, tmp_desc); + /* then we shift left 1 bit */ + flags = _mm_slli_epi32(flags, 1); ++ ++ __m128i l4_outer_mask = _mm_set_epi32(0x6, 0x6, 0x6, 0x6); ++ __m128i l4_outer_flags = _mm_and_si128(flags, l4_outer_mask); ++ l4_outer_flags = _mm_slli_epi32(l4_outer_flags, 20); ++ ++ __m128i l3_l4_mask = _mm_set_epi32(~0x6, ~0x6, ~0x6, ~0x6); ++ __m128i l3_l4_flags = _mm_and_si128(flags, l3_l4_mask); ++ flags = _mm_or_si128(l3_l4_flags, l4_outer_flags); + /* we need to mask out the reduntant bits introduced by RSS or + * VLAN fields. + */ +@@ -168,10 +204,10 @@ ice_rx_desc_to_olflags_v(struct ice_rx_queue *rxq, __m128i descs[4], + * appropriate flags means that we have to do a shift and blend for + * each mbuf before we do the write. + */ +- rearm0 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(flags, 8), 0x10); +- rearm1 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(flags, 4), 0x10); +- rearm2 = _mm_blend_epi16(mbuf_init, flags, 0x10); +- rearm3 = _mm_blend_epi16(mbuf_init, _mm_srli_si128(flags, 4), 0x10); ++ rearm0 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(flags, 8), 0x30); ++ rearm1 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(flags, 4), 0x30); ++ rearm2 = _mm_blend_epi16(mbuf_init, flags, 0x30); ++ rearm3 = _mm_blend_epi16(mbuf_init, _mm_srli_si128(flags, 4), 0x30); + + /* write the rearm data and the olflags in one write */ + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, ol_flags) != +@@ -188,10 +224,10 @@ static inline void + ice_rx_desc_to_ptype_v(__m128i descs[4], struct rte_mbuf **rx_pkts, + uint32_t *ptype_tbl) + { +- const __m128i ptype_mask = _mm_set_epi16(0, ICE_RX_FLEX_DESC_PTYPE_M, +- 0, ICE_RX_FLEX_DESC_PTYPE_M, +- 0, ICE_RX_FLEX_DESC_PTYPE_M, +- 0, ICE_RX_FLEX_DESC_PTYPE_M); ++ const __m128i ptype_mask = _mm_set_epi16(ICE_RX_FLEX_DESC_PTYPE_M, 0, ++ ICE_RX_FLEX_DESC_PTYPE_M, 0, ++ ICE_RX_FLEX_DESC_PTYPE_M, 0, ++ ICE_RX_FLEX_DESC_PTYPE_M, 0); + __m128i ptype_01 = _mm_unpacklo_epi32(descs[0], descs[1]); + __m128i ptype_23 = _mm_unpacklo_epi32(descs[2], descs[3]); + __m128i ptype_all = _mm_unpacklo_epi64(ptype_01, ptype_23); +@@ -205,10 +241,11 @@ ice_rx_desc_to_ptype_v(__m128i descs[4], struct rte_mbuf **rx_pkts, + } + + /** ++ * vPMD raw receive routine, only accept(nb_pkts >= ICE_DESCS_PER_LOOP) ++ * + * Notice: + * - nb_pkts < ICE_DESCS_PER_LOOP, just return no packet +- * - nb_pkts > ICE_VPMD_RX_BURST, only scan ICE_VPMD_RX_BURST +- * numbers of DD bits ++ * - floor align nb_pkts to a ICE_DESCS_PER_LOOP power-of-two + */ + static inline uint16_t + _ice_recv_raw_pkts_vec(struct ice_rx_queue *rxq, struct rte_mbuf **rx_pkts, +@@ -264,9 +301,6 @@ _ice_recv_raw_pkts_vec(struct ice_rx_queue *rxq, struct rte_mbuf **rx_pkts, + const __m128i eop_check = _mm_set_epi64x(0x0000000200000002LL, + 0x0000000200000002LL); + +- /* nb_pkts shall be less equal than ICE_MAX_RX_BURST */ +- nb_pkts = RTE_MIN(nb_pkts, ICE_MAX_RX_BURST); +- + /* nb_pkts has to be floor-aligned to ICE_DESCS_PER_LOOP */ + nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, ICE_DESCS_PER_LOOP); + +@@ -454,15 +488,15 @@ ice_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts, + return _ice_recv_raw_pkts_vec(rx_queue, rx_pkts, nb_pkts, NULL); + } + +-/* vPMD receive routine that reassembles scattered packets ++/** ++ * vPMD receive routine that reassembles single burst of 32 scattered packets ++ * + * Notice: + * - nb_pkts < ICE_DESCS_PER_LOOP, just return no packet +- * - nb_pkts > ICE_VPMD_RX_BURST, only scan ICE_VPMD_RX_BURST +- * numbers of DD bits + */ +-uint16_t +-ice_recv_scattered_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts, +- uint16_t nb_pkts) ++static uint16_t ++ice_recv_scattered_burst_vec(void *rx_queue, struct rte_mbuf **rx_pkts, ++ uint16_t nb_pkts) + { + struct ice_rx_queue *rxq = rx_queue; + uint8_t split_flags[ICE_VPMD_RX_BURST] = {0}; +@@ -496,6 +530,32 @@ ice_recv_scattered_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts, + &split_flags[i]); + } + ++/** ++ * vPMD receive routine that reassembles scattered packets. ++ */ ++uint16_t ++ice_recv_scattered_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts, ++ uint16_t nb_pkts) ++{ ++ uint16_t retval = 0; ++ ++ while (nb_pkts > ICE_VPMD_RX_BURST) { ++ uint16_t burst; ++ ++ burst = ice_recv_scattered_burst_vec(rx_queue, ++ rx_pkts + retval, ++ ICE_VPMD_RX_BURST); ++ retval += burst; ++ nb_pkts -= burst; ++ if (burst < ICE_VPMD_RX_BURST) ++ return retval; ++ } ++ ++ return retval + ice_recv_scattered_burst_vec(rx_queue, ++ rx_pkts + retval, ++ nb_pkts); ++} ++ + static inline void + ice_vtx1(volatile struct ice_tx_desc *txdp, struct rte_mbuf *pkt, + uint64_t flags) diff --git a/dpdk/drivers/net/ice/ice_switch_filter.c b/dpdk/drivers/net/ice/ice_switch_filter.c -index 4a9356b317..6c24731638 100644 +index 4a9356b317..03493ee464 100644 --- a/dpdk/drivers/net/ice/ice_switch_filter.c +++ b/dpdk/drivers/net/ice/ice_switch_filter.c -@@ -871,7 +871,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], +@@ -25,7 +25,8 @@ + #include "ice_generic_flow.h" + + +-#define MAX_QGRP_NUM_TYPE 7 ++#define MAX_QGRP_NUM_TYPE 7 ++#define MAX_INPUT_SET_BYTE 32 + + #define ICE_SW_INSET_ETHER ( \ + ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE) +@@ -97,12 +98,47 @@ struct sw_meta { + + static struct ice_flow_parser ice_switch_dist_parser_os; + static struct ice_flow_parser ice_switch_dist_parser_comms; +-static struct ice_flow_parser ice_switch_perm_parser; ++static struct ice_flow_parser ice_switch_perm_parser_os; ++static struct ice_flow_parser ice_switch_perm_parser_comms; ++ ++static struct ++ice_pattern_match_item ice_switch_pattern_dist_os[] = { ++ {pattern_ethertype, ++ ICE_SW_INSET_ETHER, ICE_INSET_NONE}, ++ {pattern_eth_arp, ++ ICE_INSET_NONE, ICE_INSET_NONE}, ++ {pattern_eth_ipv4, ++ ICE_SW_INSET_MAC_IPV4, ICE_INSET_NONE}, ++ {pattern_eth_ipv4_udp, ++ ICE_SW_INSET_MAC_IPV4_UDP, ICE_INSET_NONE}, ++ {pattern_eth_ipv4_tcp, ++ ICE_SW_INSET_MAC_IPV4_TCP, ICE_INSET_NONE}, ++ {pattern_eth_ipv6, ++ ICE_SW_INSET_MAC_IPV6, ICE_INSET_NONE}, ++ {pattern_eth_ipv6_udp, ++ ICE_SW_INSET_MAC_IPV6_UDP, ICE_INSET_NONE}, ++ {pattern_eth_ipv6_tcp, ++ ICE_SW_INSET_MAC_IPV6_TCP, ICE_INSET_NONE}, ++ {pattern_eth_ipv4_udp_vxlan_eth_ipv4, ++ ICE_SW_INSET_DIST_VXLAN_IPV4, ICE_INSET_NONE}, ++ {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp, ++ ICE_SW_INSET_DIST_VXLAN_IPV4_UDP, ICE_INSET_NONE}, ++ {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp, ++ ICE_SW_INSET_DIST_VXLAN_IPV4_TCP, ICE_INSET_NONE}, ++ {pattern_eth_ipv4_nvgre_eth_ipv4, ++ ICE_SW_INSET_DIST_NVGRE_IPV4, ICE_INSET_NONE}, ++ {pattern_eth_ipv4_nvgre_eth_ipv4_udp, ++ ICE_SW_INSET_DIST_NVGRE_IPV4_UDP, ICE_INSET_NONE}, ++ {pattern_eth_ipv4_nvgre_eth_ipv4_tcp, ++ ICE_SW_INSET_DIST_NVGRE_IPV4_TCP, ICE_INSET_NONE}, ++}; + + static struct + ice_pattern_match_item ice_switch_pattern_dist_comms[] = { + {pattern_ethertype, + ICE_SW_INSET_ETHER, ICE_INSET_NONE}, ++ {pattern_eth_arp, ++ ICE_INSET_NONE, ICE_INSET_NONE}, + {pattern_eth_ipv4, + ICE_SW_INSET_MAC_IPV4, ICE_INSET_NONE}, + {pattern_eth_ipv4_udp, +@@ -138,7 +174,7 @@ ice_pattern_match_item ice_switch_pattern_dist_comms[] = { + }; + + static struct +-ice_pattern_match_item ice_switch_pattern_dist_os[] = { ++ice_pattern_match_item ice_switch_pattern_perm_os[] = { + {pattern_ethertype, + ICE_SW_INSET_ETHER, ICE_INSET_NONE}, + {pattern_eth_arp, +@@ -156,21 +192,25 @@ ice_pattern_match_item ice_switch_pattern_dist_os[] = { + {pattern_eth_ipv6_tcp, + ICE_SW_INSET_MAC_IPV6_TCP, ICE_INSET_NONE}, + {pattern_eth_ipv4_udp_vxlan_eth_ipv4, +- ICE_SW_INSET_DIST_VXLAN_IPV4, ICE_INSET_NONE}, ++ ICE_SW_INSET_PERM_TUNNEL_IPV4, ICE_INSET_NONE}, + {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp, +- ICE_SW_INSET_DIST_VXLAN_IPV4_UDP, ICE_INSET_NONE}, ++ ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP, ICE_INSET_NONE}, + {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp, +- ICE_SW_INSET_DIST_VXLAN_IPV4_TCP, ICE_INSET_NONE}, ++ ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP, ICE_INSET_NONE}, + {pattern_eth_ipv4_nvgre_eth_ipv4, +- ICE_SW_INSET_DIST_NVGRE_IPV4, ICE_INSET_NONE}, ++ ICE_SW_INSET_PERM_TUNNEL_IPV4, ICE_INSET_NONE}, + {pattern_eth_ipv4_nvgre_eth_ipv4_udp, +- ICE_SW_INSET_DIST_NVGRE_IPV4_UDP, ICE_INSET_NONE}, ++ ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP, ICE_INSET_NONE}, + {pattern_eth_ipv4_nvgre_eth_ipv4_tcp, +- ICE_SW_INSET_DIST_NVGRE_IPV4_TCP, ICE_INSET_NONE}, ++ ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP, ICE_INSET_NONE}, + }; + + static struct +-ice_pattern_match_item ice_switch_pattern_perm[] = { ++ice_pattern_match_item ice_switch_pattern_perm_comms[] = { ++ {pattern_ethertype, ++ ICE_SW_INSET_ETHER, ICE_INSET_NONE}, ++ {pattern_eth_arp, ++ ICE_INSET_NONE, ICE_INSET_NONE}, + {pattern_eth_ipv4, + ICE_SW_INSET_MAC_IPV4, ICE_INSET_NONE}, + {pattern_eth_ipv4_udp, +@@ -320,6 +360,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}; + uint64_t input_set = ICE_INSET_NONE; ++ uint16_t input_set_byte = 0; + uint16_t j, t = 0; + uint16_t tunnel_valid = 0; + +@@ -369,6 +410,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], + m->src_addr[j] = + eth_mask->src.addr_bytes[j]; + i = 1; ++ input_set_byte++; + } + if (eth_mask->dst.addr_bytes[j] == + UINT8_MAX) { +@@ -377,6 +419,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], + m->dst_addr[j] = + eth_mask->dst.addr_bytes[j]; + i = 1; ++ input_set_byte++; + } + } + if (i) +@@ -387,6 +430,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], + eth_spec->type; + list[t].m_u.ethertype.ethtype_id = + UINT16_MAX; ++ input_set_byte += 2; + t++; + } + } else if (!eth_spec && !eth_mask) { +@@ -458,30 +502,35 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], + ipv4_spec->hdr.src_addr; + list[t].m_u.ipv4_hdr.src_addr = + UINT32_MAX; ++ input_set_byte += 2; + } + if (ipv4_mask->hdr.dst_addr == UINT32_MAX) { + list[t].h_u.ipv4_hdr.dst_addr = + ipv4_spec->hdr.dst_addr; + list[t].m_u.ipv4_hdr.dst_addr = + UINT32_MAX; ++ input_set_byte += 2; + } + if (ipv4_mask->hdr.time_to_live == UINT8_MAX) { + list[t].h_u.ipv4_hdr.time_to_live = + ipv4_spec->hdr.time_to_live; + list[t].m_u.ipv4_hdr.time_to_live = + UINT8_MAX; ++ input_set_byte++; + } + if (ipv4_mask->hdr.next_proto_id == UINT8_MAX) { + list[t].h_u.ipv4_hdr.protocol = + ipv4_spec->hdr.next_proto_id; + list[t].m_u.ipv4_hdr.protocol = + UINT8_MAX; ++ input_set_byte++; + } + if (ipv4_mask->hdr.type_of_service == + UINT8_MAX) { + list[t].h_u.ipv4_hdr.tos = + ipv4_spec->hdr.type_of_service; + list[t].m_u.ipv4_hdr.tos = UINT8_MAX; ++ input_set_byte++; + } + t++; + } else if (!ipv4_spec && !ipv4_mask) { +@@ -563,6 +612,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], + ipv6_spec->hdr.src_addr[j]; + s->src_addr[j] = + ipv6_mask->hdr.src_addr[j]; ++ input_set_byte++; + } + if (ipv6_mask->hdr.dst_addr[j] == + UINT8_MAX) { +@@ -570,17 +620,20 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], + ipv6_spec->hdr.dst_addr[j]; + s->dst_addr[j] = + ipv6_mask->hdr.dst_addr[j]; ++ input_set_byte++; + } + } + if (ipv6_mask->hdr.proto == UINT8_MAX) { + f->next_hdr = + ipv6_spec->hdr.proto; + s->next_hdr = UINT8_MAX; ++ input_set_byte++; + } + if (ipv6_mask->hdr.hop_limits == UINT8_MAX) { + f->hop_limit = + ipv6_spec->hdr.hop_limits; + s->hop_limit = UINT8_MAX; ++ input_set_byte++; + } + if ((ipv6_mask->hdr.vtc_flow & + rte_cpu_to_be_32 +@@ -597,6 +650,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], + f->be_ver_tc_flow = CPU_TO_BE32(vtf.u.val); + vtf.u.fld.tc = UINT8_MAX; + s->be_ver_tc_flow = CPU_TO_BE32(vtf.u.val); ++ input_set_byte += 4; + } + t++; + } else if (!ipv6_spec && !ipv6_mask) { +@@ -648,14 +702,16 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], + udp_spec->hdr.src_port; + list[t].m_u.l4_hdr.src_port = + udp_mask->hdr.src_port; ++ input_set_byte += 2; + } + if (udp_mask->hdr.dst_port == UINT16_MAX) { + list[t].h_u.l4_hdr.dst_port = + udp_spec->hdr.dst_port; + list[t].m_u.l4_hdr.dst_port = + udp_mask->hdr.dst_port; ++ input_set_byte += 2; + } +- t++; ++ t++; + } else if (!udp_spec && !udp_mask) { + list[t].type = ICE_UDP_ILOS; + } +@@ -705,12 +761,14 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], + tcp_spec->hdr.src_port; + list[t].m_u.l4_hdr.src_port = + tcp_mask->hdr.src_port; ++ input_set_byte += 2; + } + if (tcp_mask->hdr.dst_port == UINT16_MAX) { + list[t].h_u.l4_hdr.dst_port = + tcp_spec->hdr.dst_port; + list[t].m_u.l4_hdr.dst_port = + tcp_mask->hdr.dst_port; ++ input_set_byte += 2; + } + t++; + } else if (!tcp_spec && !tcp_mask) { +@@ -756,12 +814,14 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], + sctp_spec->hdr.src_port; + list[t].m_u.sctp_hdr.src_port = + sctp_mask->hdr.src_port; ++ input_set_byte += 2; + } + if (sctp_mask->hdr.dst_port == UINT16_MAX) { + list[t].h_u.sctp_hdr.dst_port = + sctp_spec->hdr.dst_port; + list[t].m_u.sctp_hdr.dst_port = + sctp_mask->hdr.dst_port; ++ input_set_byte += 2; + } + t++; + } else if (!sctp_spec && !sctp_mask) { +@@ -799,6 +859,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], + UINT32_MAX; + input_set |= + ICE_INSET_TUN_VXLAN_VNI; ++ input_set_byte += 2; + } + t++; + } else if (!vxlan_spec && !vxlan_mask) { +@@ -835,6 +896,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], + UINT32_MAX; + input_set |= + ICE_INSET_TUN_NVGRE_TNI; ++ input_set_byte += 2; + } + t++; + } else if (!nvgre_spec && !nvgre_mask) { +@@ -865,13 +927,15 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], + list[t].m_u.vlan_hdr.vlan = + UINT16_MAX; + input_set |= ICE_INSET_VLAN_OUTER; ++ input_set_byte += 2; + } + if (vlan_mask->inner_type == UINT16_MAX) { + list[t].h_u.vlan_hdr.type = vlan_spec->inner_type; list[t].m_u.vlan_hdr.type = UINT16_MAX; - input_set |= ICE_INSET_VLAN_OUTER; + input_set |= ICE_INSET_ETHERTYPE; ++ input_set_byte += 2; } t++; } else if (!vlan_spec && !vlan_mask) { -@@ -937,6 +937,8 @@ ice_switch_parse_action(struct ice_pf *pf, +@@ -906,6 +970,14 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], + } + } + ++ if (input_set_byte > MAX_INPUT_SET_BYTE) { ++ rte_flow_error_set(error, EINVAL, ++ RTE_FLOW_ERROR_TYPE_ITEM, ++ item, ++ "too much input set"); ++ return -ENOTSUP; ++ } ++ + *lkups_num = t; + + return input_set; +@@ -937,6 +1009,8 @@ ice_switch_parse_action(struct ice_pf *pf, switch (action_type) { case RTE_FLOW_ACTION_TYPE_RSS: act_qgrop = action->conf; @@ -21166,7 +45826,7 @@ index 4a9356b317..6c24731638 100644 rule_info->sw_act.fltr_act = ICE_FWD_TO_QGRP; rule_info->sw_act.fwd_id.q_id = -@@ -997,6 +999,46 @@ ice_switch_parse_action(struct ice_pf *pf, +@@ -997,6 +1071,46 @@ ice_switch_parse_action(struct ice_pf *pf, return -rte_errno; } @@ -21199,7 +45859,7 @@ index 4a9356b317..6c24731638 100644 + } + } + -+ if (actions_num > 1) { ++ if (actions_num != 1) { + rte_flow_error_set(error, + EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, + actions, @@ -21213,7 +45873,7 @@ index 4a9356b317..6c24731638 100644 static int ice_switch_parse_pattern_action(struct ice_adapter *ad, struct ice_pattern_match_item *array, -@@ -1015,7 +1057,8 @@ ice_switch_parse_pattern_action(struct ice_adapter *ad, +@@ -1015,7 +1129,8 @@ ice_switch_parse_pattern_action(struct ice_adapter *ad, uint16_t lkups_num = 0; const struct rte_flow_item *item = pattern; uint16_t item_num = 0; @@ -21223,7 +45883,7 @@ index 4a9356b317..6c24731638 100644 struct ice_pattern_match_item *pattern_match_item = NULL; for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { -@@ -1051,6 +1094,7 @@ ice_switch_parse_pattern_action(struct ice_adapter *ad, +@@ -1051,6 +1166,7 @@ ice_switch_parse_pattern_action(struct ice_adapter *ad, return -rte_errno; } @@ -21231,7 +45891,7 @@ index 4a9356b317..6c24731638 100644 rule_info.tun_type = tun_type; sw_meta_ptr = -@@ -1081,6 +1125,14 @@ ice_switch_parse_pattern_action(struct ice_adapter *ad, +@@ -1081,6 +1197,14 @@ ice_switch_parse_pattern_action(struct ice_adapter *ad, goto error; } @@ -21246,7 +45906,7 @@ index 4a9356b317..6c24731638 100644 ret = ice_switch_parse_action(pf, actions, error, &rule_info); if (ret) { rte_flow_error_set(error, EINVAL, -@@ -1088,10 +1140,17 @@ ice_switch_parse_pattern_action(struct ice_adapter *ad, +@@ -1088,10 +1212,17 @@ ice_switch_parse_pattern_action(struct ice_adapter *ad, "Invalid input action"); goto error; } @@ -21268,8 +45928,116 @@ index 4a9356b317..6c24731638 100644 rte_free(pattern_match_item); return 0; +@@ -1123,7 +1254,7 @@ ice_switch_init(struct ice_adapter *ad) + { + int ret = 0; + struct ice_flow_parser *dist_parser; +- struct ice_flow_parser *perm_parser = &ice_switch_perm_parser; ++ struct ice_flow_parser *perm_parser; + + if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS) + dist_parser = &ice_switch_dist_parser_comms; +@@ -1132,10 +1263,16 @@ ice_switch_init(struct ice_adapter *ad) + else + return -EINVAL; + +- if (ad->devargs.pipe_mode_support) ++ if (ad->devargs.pipe_mode_support) { ++ if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS) ++ perm_parser = &ice_switch_perm_parser_comms; ++ else ++ perm_parser = &ice_switch_perm_parser_os; ++ + ret = ice_register_parser(perm_parser, ad); +- else ++ } else { + ret = ice_register_parser(dist_parser, ad); ++ } + return ret; + } + +@@ -1143,17 +1280,25 @@ static void + ice_switch_uninit(struct ice_adapter *ad) + { + struct ice_flow_parser *dist_parser; +- struct ice_flow_parser *perm_parser = &ice_switch_perm_parser; ++ struct ice_flow_parser *perm_parser; + + if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS) + dist_parser = &ice_switch_dist_parser_comms; +- else ++ else if (ad->active_pkg_type == ICE_PKG_TYPE_OS_DEFAULT) + dist_parser = &ice_switch_dist_parser_os; ++ else ++ return; ++ ++ if (ad->devargs.pipe_mode_support) { ++ if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS) ++ perm_parser = &ice_switch_perm_parser_comms; ++ else ++ perm_parser = &ice_switch_perm_parser_os; + +- if (ad->devargs.pipe_mode_support) + ice_unregister_parser(perm_parser, ad); +- else ++ } else { + ice_unregister_parser(dist_parser, ad); ++ } + } + + static struct +@@ -1186,10 +1331,19 @@ ice_flow_parser ice_switch_dist_parser_comms = { + }; + + static struct +-ice_flow_parser ice_switch_perm_parser = { ++ice_flow_parser ice_switch_perm_parser_os = { ++ .engine = &ice_switch_engine, ++ .array = ice_switch_pattern_perm_os, ++ .array_len = RTE_DIM(ice_switch_pattern_perm_os), ++ .parse_pattern_action = ice_switch_parse_pattern_action, ++ .stage = ICE_FLOW_STAGE_PERMISSION, ++}; ++ ++static struct ++ice_flow_parser ice_switch_perm_parser_comms = { + .engine = &ice_switch_engine, +- .array = ice_switch_pattern_perm, +- .array_len = RTE_DIM(ice_switch_pattern_perm), ++ .array = ice_switch_pattern_perm_comms, ++ .array_len = RTE_DIM(ice_switch_pattern_perm_comms), + .parse_pattern_action = ice_switch_parse_pattern_action, + .stage = ICE_FLOW_STAGE_PERMISSION, + }; +diff --git a/dpdk/drivers/net/ifc/base/ifcvf.h b/dpdk/drivers/net/ifc/base/ifcvf.h +index 9be2770fea..3f7497bd02 100644 +--- a/dpdk/drivers/net/ifc/base/ifcvf.h ++++ b/dpdk/drivers/net/ifc/base/ifcvf.h +@@ -13,7 +13,10 @@ + #define IFCVF_SUBSYS_DEVICE_ID 0x001A + + #define IFCVF_MAX_QUEUES 1 ++ ++#ifndef VIRTIO_F_IOMMU_PLATFORM + #define VIRTIO_F_IOMMU_PLATFORM 33 ++#endif + + /* Common configuration */ + #define IFCVF_PCI_CAP_COMMON_CFG 1 +diff --git a/dpdk/drivers/net/ipn3ke/ipn3ke_ethdev.h b/dpdk/drivers/net/ipn3ke/ipn3ke_ethdev.h +index 9b0cf309c8..a6815a9cca 100644 +--- a/dpdk/drivers/net/ipn3ke/ipn3ke_ethdev.h ++++ b/dpdk/drivers/net/ipn3ke/ipn3ke_ethdev.h +@@ -640,6 +640,7 @@ ipn3ke_tm_ops_get(struct rte_eth_dev *ethdev, + */ + #define IPN3KE_ETH_OVERHEAD \ + (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + IPN3KE_VLAN_TAG_SIZE * 2) ++#define IPN3KE_ETH_MAX_LEN (RTE_ETHER_MTU + IPN3KE_ETH_OVERHEAD) + + #define IPN3KE_MAC_FRAME_SIZE_MAX 9728 + #define IPN3KE_MAC_RX_FRAME_MAXLENGTH 0x00AE diff --git a/dpdk/drivers/net/ipn3ke/ipn3ke_representor.c b/dpdk/drivers/net/ipn3ke/ipn3ke_representor.c -index 8d9ebef915..b673c49149 100644 +index 8d9ebef915..d7dada7bde 100644 --- a/dpdk/drivers/net/ipn3ke/ipn3ke_representor.c +++ b/dpdk/drivers/net/ipn3ke/ipn3ke_representor.c @@ -701,7 +701,7 @@ struct ipn3ke_rpst_hw_port_stats *hw_stats) @@ -21291,11 +46059,33 @@ index 8d9ebef915..b673c49149 100644 NULL, ipn3ke_rpst_scan_handle_request, NULL); if (ret) { +@@ -2793,7 +2794,7 @@ ipn3ke_rpst_mtu_set(struct rte_eth_dev *ethdev, uint16_t mtu) + return -EBUSY; + } + +- if (frame_size > RTE_ETHER_MAX_LEN) ++ if (frame_size > IPN3KE_ETH_MAX_LEN) + dev_data->dev_conf.rxmode.offloads |= + (uint64_t)(DEV_RX_OFFLOAD_JUMBO_FRAME); + else diff --git a/dpdk/drivers/net/ipn3ke/meson.build b/dpdk/drivers/net/ipn3ke/meson.build -index e3c8a6768c..bfec592aba 100644 +index e3c8a6768c..0d1dc9866b 100644 --- a/dpdk/drivers/net/ipn3ke/meson.build +++ b/dpdk/drivers/net/ipn3ke/meson.build -@@ -21,9 +21,11 @@ endif +@@ -9,11 +9,7 @@ + # rte_eth_switch_domain_free() + # + +-dep = dependency('libfdt', required: false) +-if not dep.found() +- dep = cc.find_library('libfdt', required: false) +-endif +-if not dep.found() ++if has_libfdt == 0 + build = false + reason = 'missing dependency, "libfdt"' + endif +@@ -21,9 +17,11 @@ endif if build allow_experimental_apis = true @@ -21398,7 +46188,7 @@ index ff8f7b2611..33e7c3c215 100644 #ifndef _IXGBE_API_H_ diff --git a/dpdk/drivers/net/ixgbe/base/ixgbe_common.c b/dpdk/drivers/net/ixgbe/base/ixgbe_common.c -index 62ff767230..4eb98dc198 100644 +index 62ff767230..9e7182eb33 100644 --- a/dpdk/drivers/net/ixgbe/base/ixgbe_common.c +++ b/dpdk/drivers/net/ixgbe/base/ixgbe_common.c @@ -1,5 +1,5 @@ @@ -21408,6 +46198,30 @@ index 62ff767230..4eb98dc198 100644 */ #include "ixgbe_common.h" +@@ -3777,11 +3777,11 @@ s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq) + if (vmdq == IXGBE_CLEAR_VMDQ_ALL) { + if (mpsar_lo) { + IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0); +- mpsar_lo = 0; ++ mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar)); + } + if (mpsar_hi) { + IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0); +- mpsar_hi = 0; ++ mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar)); + } + } else if (vmdq < 32) { + mpsar_lo &= ~(1 << vmdq); +@@ -4600,7 +4600,8 @@ s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer, + * Read Flash command requires reading buffer length from + * two byes instead of one byte + */ +- if (resp->cmd == 0x30) { ++ if (resp->cmd == IXGBE_HOST_INTERFACE_FLASH_READ_CMD || ++ resp->cmd == IXGBE_HOST_INTERFACE_SHADOW_RAM_READ_CMD) { + for (; bi < dword_len + 2; bi++) { + buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, + bi); diff --git a/dpdk/drivers/net/ixgbe/base/ixgbe_common.h b/dpdk/drivers/net/ixgbe/base/ixgbe_common.h index 3bb2475119..7a31f088c4 100644 --- a/dpdk/drivers/net/ixgbe/base/ixgbe_common.h @@ -21442,7 +46256,7 @@ index 503d06018f..c2a1013ac0 100644 #ifndef _IXGBE_DCB_H_ diff --git a/dpdk/drivers/net/ixgbe/base/ixgbe_dcb_82598.c b/dpdk/drivers/net/ixgbe/base/ixgbe_dcb_82598.c -index d87cb58857..bb309e28fd 100644 +index d87cb58857..dc7b905c5d 100644 --- a/dpdk/drivers/net/ixgbe/base/ixgbe_dcb_82598.c +++ b/dpdk/drivers/net/ixgbe/base/ixgbe_dcb_82598.c @@ -1,5 +1,5 @@ @@ -21452,6 +46266,21 @@ index d87cb58857..bb309e28fd 100644 */ +@@ -36,14 +36,6 @@ s32 ixgbe_dcb_get_tc_stats_82598(struct ixgbe_hw *hw, + stats->qprc[tc] += IXGBE_READ_REG(hw, IXGBE_QPRC(tc)); + /* Received Bytes */ + stats->qbrc[tc] += IXGBE_READ_REG(hw, IXGBE_QBRC(tc)); +- +-#if 0 +- /* Can we get rid of these?? Consequently, getting rid +- * of the tc_stats structure. +- */ +- tc_stats_array[up]->in_overflow_discards = 0; +- tc_stats_array[up]->out_overflow_discards = 0; +-#endif + } + + return IXGBE_SUCCESS; diff --git a/dpdk/drivers/net/ixgbe/base/ixgbe_dcb_82598.h b/dpdk/drivers/net/ixgbe/base/ixgbe_dcb_82598.h index 1a14744482..8f36881378 100644 --- a/dpdk/drivers/net/ixgbe/base/ixgbe_dcb_82598.h @@ -21563,7 +46392,7 @@ index f1605f2cc9..a06c3be170 100644 #ifndef _IXGBE_PHY_H_ diff --git a/dpdk/drivers/net/ixgbe/base/ixgbe_type.h b/dpdk/drivers/net/ixgbe/base/ixgbe_type.h -index 077b8f01c7..15e9370105 100644 +index 077b8f01c7..bc927a34ee 100644 --- a/dpdk/drivers/net/ixgbe/base/ixgbe_type.h +++ b/dpdk/drivers/net/ixgbe/base/ixgbe_type.h @@ -1,5 +1,5 @@ @@ -21573,6 +46402,23 @@ index 077b8f01c7..15e9370105 100644 */ #ifndef _IXGBE_TYPE_H_ +@@ -4364,4 +4364,16 @@ struct ixgbe_hw { + #define IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD \ + (0x1F << IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT) + ++/* Code Command (Flash I/F Interface) */ ++#define IXGBE_HOST_INTERFACE_FLASH_READ_CMD 0x30 ++#define IXGBE_HOST_INTERFACE_SHADOW_RAM_READ_CMD 0x31 ++#define IXGBE_HOST_INTERFACE_FLASH_WRITE_CMD 0x32 ++#define IXGBE_HOST_INTERFACE_SHADOW_RAM_WRITE_CMD 0x33 ++#define IXGBE_HOST_INTERFACE_FLASH_MODULE_UPDATE_CMD 0x34 ++#define IXGBE_HOST_INTERFACE_FLASH_BLOCK_EREASE_CMD 0x35 ++#define IXGBE_HOST_INTERFACE_SHADOW_RAM_DUMP_CMD 0x36 ++#define IXGBE_HOST_INTERFACE_FLASH_INFO_CMD 0x37 ++#define IXGBE_HOST_INTERFACE_APPLY_UPDATE_CMD 0x38 ++#define IXGBE_HOST_INTERFACE_MASK_CMD 0x000000FF ++ + #endif /* _IXGBE_TYPE_H_ */ diff --git a/dpdk/drivers/net/ixgbe/base/ixgbe_vf.c b/dpdk/drivers/net/ixgbe/base/ixgbe_vf.c index aac37822e4..7f69ece107 100644 --- a/dpdk/drivers/net/ixgbe/base/ixgbe_vf.c @@ -21596,7 +46442,7 @@ index dba643fced..be58b4f76e 100644 #ifndef _IXGBE_VF_H_ diff --git a/dpdk/drivers/net/ixgbe/base/ixgbe_x540.c b/dpdk/drivers/net/ixgbe/base/ixgbe_x540.c -index f00f0eae7e..d65f47c181 100644 +index f00f0eae7e..d91633a2da 100644 --- a/dpdk/drivers/net/ixgbe/base/ixgbe_x540.c +++ b/dpdk/drivers/net/ixgbe/base/ixgbe_x540.c @@ -1,5 +1,5 @@ @@ -21606,6 +46452,15 @@ index f00f0eae7e..d65f47c181 100644 */ #include "ixgbe_x540.h" +@@ -784,7 +784,7 @@ s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask) + * bits in the SW_FW_SYNC register. + */ + if (ixgbe_get_swfw_sync_semaphore(hw)) { +- DEBUGOUT("Failed to get NVM sempahore and register semaphore while forcefully ignoring FW sempahore bit(s) and setting SW semaphore bit(s), returning IXGBE_ERR_SWFW_SYNC\n"); ++ DEBUGOUT("Failed to get NVM semaphore and register semaphore while forcefully ignoring FW semaphore bit(s) and setting SW semaphore bit(s), returning IXGBE_ERR_SWFW_SYNC\n"); + return IXGBE_ERR_SWFW_SYNC; + } + swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC_BY_MAC(hw)); diff --git a/dpdk/drivers/net/ixgbe/base/ixgbe_x540.h b/dpdk/drivers/net/ixgbe/base/ixgbe_x540.h index 231dfe56e5..ba79847d11 100644 --- a/dpdk/drivers/net/ixgbe/base/ixgbe_x540.h @@ -21618,7 +46473,7 @@ index 231dfe56e5..ba79847d11 100644 #ifndef _IXGBE_X540_H_ diff --git a/dpdk/drivers/net/ixgbe/base/ixgbe_x550.c b/dpdk/drivers/net/ixgbe/base/ixgbe_x550.c -index 930a61a20b..3de406fd35 100644 +index 930a61a20b..9fa999e01d 100644 --- a/dpdk/drivers/net/ixgbe/base/ixgbe_x550.c +++ b/dpdk/drivers/net/ixgbe/base/ixgbe_x550.c @@ -1,5 +1,5 @@ @@ -21628,6 +46483,29 @@ index 930a61a20b..3de406fd35 100644 */ #include "ixgbe_x550.h" +@@ -1891,7 +1891,14 @@ s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw, + else + *speed = IXGBE_LINK_SPEED_10GB_FULL; + } else { ++ *autoneg = true; ++ + switch (hw->phy.type) { ++ case ixgbe_phy_x550em_xfi: ++ *speed = IXGBE_LINK_SPEED_1GB_FULL | ++ IXGBE_LINK_SPEED_10GB_FULL; ++ *autoneg = false; ++ break; + case ixgbe_phy_ext_1g_t: + #ifdef PREBOOT_SUPPORT + *speed = IXGBE_LINK_SPEED_1GB_FULL; +@@ -1925,7 +1932,6 @@ s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw, + IXGBE_LINK_SPEED_1GB_FULL; + break; + } +- *autoneg = true; + } + + return IXGBE_SUCCESS; diff --git a/dpdk/drivers/net/ixgbe/base/ixgbe_x550.h b/dpdk/drivers/net/ixgbe/base/ixgbe_x550.h index 3bd98f243d..10086ab423 100644 --- a/dpdk/drivers/net/ixgbe/base/ixgbe_x550.h @@ -21651,7 +46529,7 @@ index bbd0f51ea5..20677ab034 100644 sources = [ 'ixgbe_82598.c', diff --git a/dpdk/drivers/net/ixgbe/ixgbe_ethdev.c b/dpdk/drivers/net/ixgbe/ixgbe_ethdev.c -index 2c6fd0f131..50edb69cb2 100644 +index 2c6fd0f131..dcd7291b97 100644 --- a/dpdk/drivers/net/ixgbe/ixgbe_ethdev.c +++ b/dpdk/drivers/net/ixgbe/ixgbe_ethdev.c @@ -229,7 +229,9 @@ static int ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev); @@ -21681,6 +46559,15 @@ index 2c6fd0f131..50edb69cb2 100644 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; struct ixgbe_hw *hw = +@@ -1091,7 +1095,7 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused) + IXGBE_DEV_PRIVATE_TO_BW_CONF(eth_dev->data->dev_private); + uint32_t ctrl_ext; + uint16_t csum; +- int diag, i; ++ int diag, i, ret; + + PMD_INIT_FUNC_TRACE(); + @@ -1126,6 +1130,7 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused) return 0; } @@ -21708,7 +46595,23 @@ index 2c6fd0f131..50edb69cb2 100644 #endif /* RTE_LIBRTE_IXGBE_BYPASS */ /* -@@ -1301,8 +1305,6 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused) +@@ -1270,7 +1274,14 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused) + memset(hwstrip, 0, sizeof(*hwstrip)); + + /* initialize PF if max_vfs not zero */ +- ixgbe_pf_host_init(eth_dev); ++ ret = ixgbe_pf_host_init(eth_dev); ++ if (ret) { ++ rte_free(eth_dev->data->mac_addrs); ++ eth_dev->data->mac_addrs = NULL; ++ rte_free(eth_dev->data->hash_mac_addrs); ++ eth_dev->data->hash_mac_addrs = NULL; ++ return ret; ++ } + + ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); + /* let hardware know driver is loaded */ +@@ -1301,8 +1312,6 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused) /* enable support intr */ ixgbe_enable_intr(eth_dev); @@ -21717,7 +46620,7 @@ index 2c6fd0f131..50edb69cb2 100644 /* initialize filter info */ memset(filter_info, 0, sizeof(struct ixgbe_filter_info)); -@@ -1564,6 +1566,7 @@ eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev) +@@ -1564,6 +1573,7 @@ eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev) { int diag; uint32_t tc, tcs; @@ -21725,7 +46628,7 @@ index 2c6fd0f131..50edb69cb2 100644 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; struct ixgbe_hw *hw = -@@ -1604,6 +1607,7 @@ eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev) +@@ -1604,6 +1614,7 @@ eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev) return 0; } @@ -21733,7 +46636,7 @@ index 2c6fd0f131..50edb69cb2 100644 ixgbevf_parse_devargs(eth_dev->data->dev_private, pci_dev->device.devargs); -@@ -2530,6 +2534,41 @@ ixgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf, +@@ -2530,6 +2541,41 @@ ixgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf, return 0; } @@ -21775,7 +46678,7 @@ index 2c6fd0f131..50edb69cb2 100644 /* * Configure device link speed and setup link. * It returns 0 on success. -@@ -2558,19 +2597,8 @@ ixgbe_dev_start(struct rte_eth_dev *dev) +@@ -2558,19 +2604,8 @@ ixgbe_dev_start(struct rte_eth_dev *dev) PMD_INIT_FUNC_TRACE(); @@ -21796,7 +46699,7 @@ index 2c6fd0f131..50edb69cb2 100644 /* disable uio/vfio intr/eventfd mapping */ rte_intr_disable(intr_handle); -@@ -2666,6 +2694,12 @@ ixgbe_dev_start(struct rte_eth_dev *dev) +@@ -2666,6 +2701,12 @@ ixgbe_dev_start(struct rte_eth_dev *dev) ixgbe_restore_statistics_mapping(dev); @@ -21809,7 +46712,7 @@ index 2c6fd0f131..50edb69cb2 100644 err = ixgbe_dev_rxtx_start(dev); if (err < 0) { PMD_INIT_LOG(ERR, "Unable to start rxtx queues"); -@@ -2724,7 +2758,11 @@ ixgbe_dev_start(struct rte_eth_dev *dev) +@@ -2724,7 +2765,11 @@ ixgbe_dev_start(struct rte_eth_dev *dev) } link_speeds = &dev->data->dev_conf.link_speeds; @@ -21822,7 +46725,7 @@ index 2c6fd0f131..50edb69cb2 100644 PMD_INIT_LOG(ERR, "Invalid link setting"); goto error; } -@@ -2801,6 +2839,11 @@ ixgbe_dev_start(struct rte_eth_dev *dev) +@@ -2801,6 +2846,11 @@ ixgbe_dev_start(struct rte_eth_dev *dev) "please call hierarchy_commit() " "before starting the port"); @@ -21834,7 +46737,7 @@ index 2c6fd0f131..50edb69cb2 100644 /* * Update link status right before return, because it may * start link configuration process in a separate thread. -@@ -2842,7 +2885,7 @@ ixgbe_dev_stop(struct rte_eth_dev *dev) +@@ -2842,7 +2892,7 @@ ixgbe_dev_stop(struct rte_eth_dev *dev) PMD_INIT_FUNC_TRACE(); @@ -21843,16 +46746,7 @@ index 2c6fd0f131..50edb69cb2 100644 /* disable interrupts */ ixgbe_disable_intr(hw); -@@ -2893,6 +2936,8 @@ ixgbe_dev_stop(struct rte_eth_dev *dev) - - adapter->rss_reta_updated = 0; - -+ adapter->mac_ctrl_frame_fwd = 0; -+ - hw->adapter_stopped = true; - } - -@@ -4095,16 +4140,46 @@ ixgbevf_check_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed, +@@ -4095,16 +4145,46 @@ ixgbevf_check_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed, return ret_val; } @@ -21901,7 +46795,7 @@ index 2c6fd0f131..50edb69cb2 100644 speed = hw->phy.autoneg_advertised; if (!speed) ixgbe_get_link_capabilities(hw, &speed, &autoneg); -@@ -4112,6 +4187,40 @@ ixgbe_dev_setup_link_alarm_handler(void *param) +@@ -4112,6 +4192,40 @@ ixgbe_dev_setup_link_alarm_handler(void *param) ixgbe_setup_link(hw, speed, true); intr->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG; @@ -21942,7 +46836,7 @@ index 2c6fd0f131..50edb69cb2 100644 } /* return 0 means link status changed, -1 means not changed */ -@@ -4120,6 +4229,7 @@ ixgbe_dev_link_update_share(struct rte_eth_dev *dev, +@@ -4120,6 +4234,7 @@ ixgbe_dev_link_update_share(struct rte_eth_dev *dev, int wait_to_complete, int vf) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); @@ -21950,7 +46844,7 @@ index 2c6fd0f131..50edb69cb2 100644 struct rte_eth_link link; ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN; struct ixgbe_interrupt *intr = -@@ -4133,7 +4243,8 @@ ixgbe_dev_link_update_share(struct rte_eth_dev *dev, +@@ -4133,7 +4248,8 @@ ixgbe_dev_link_update_share(struct rte_eth_dev *dev, link.link_status = ETH_LINK_DOWN; link.link_speed = ETH_SPEED_NUM_NONE; link.link_duplex = ETH_LINK_HALF_DUPLEX; @@ -21960,7 +46854,7 @@ index 2c6fd0f131..50edb69cb2 100644 hw->mac.get_link_status = true; -@@ -4144,6 +4255,11 @@ ixgbe_dev_link_update_share(struct rte_eth_dev *dev, +@@ -4144,6 +4260,11 @@ ixgbe_dev_link_update_share(struct rte_eth_dev *dev, if (wait_to_complete == 0 || dev->data->dev_conf.intr_conf.lsc != 0) wait = 0; @@ -21972,7 +46866,7 @@ index 2c6fd0f131..50edb69cb2 100644 if (vf) diag = ixgbevf_check_link(hw, &link_speed, &link_up, wait); else -@@ -4155,15 +4271,34 @@ ixgbe_dev_link_update_share(struct rte_eth_dev *dev, +@@ -4155,15 +4276,34 @@ ixgbe_dev_link_update_share(struct rte_eth_dev *dev, return rte_eth_linkstatus_set(dev, &link); } @@ -22013,7 +46907,30 @@ index 2c6fd0f131..50edb69cb2 100644 } return rte_eth_linkstatus_set(dev, &link); } -@@ -4646,10 +4781,10 @@ static int +@@ -4181,6 +4321,10 @@ ixgbe_dev_link_update_share(struct rte_eth_dev *dev, + link.link_speed = ETH_SPEED_NUM_100M; + break; + ++ case IXGBE_LINK_SPEED_10_FULL: ++ link.link_speed = ETH_SPEED_NUM_10M; ++ break; ++ + case IXGBE_LINK_SPEED_100_FULL: + link.link_speed = ETH_SPEED_NUM_100M; + break; +@@ -4615,6 +4759,11 @@ ixgbe_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) + * MFLCN register. + */ + mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN); ++ if (mflcn_reg & IXGBE_MFLCN_PMCF) ++ fc_conf->mac_ctrl_frame_fwd = 1; ++ else ++ fc_conf->mac_ctrl_frame_fwd = 0; ++ + if (mflcn_reg & (IXGBE_MFLCN_RPFCE | IXGBE_MFLCN_RFCE)) + rx_pause = 1; + else +@@ -4646,10 +4795,10 @@ static int ixgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) { struct ixgbe_hw *hw; @@ -22025,7 +46942,7 @@ index 2c6fd0f131..50edb69cb2 100644 enum ixgbe_fc_mode rte_fcmode_2_ixgbe_fcmode[] = { ixgbe_fc_none, ixgbe_fc_rx_pause, -@@ -4682,31 +4817,14 @@ ixgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) +@@ -4682,31 +4831,14 @@ ixgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) hw->fc.low_water[0] = fc_conf->low_water; hw->fc.send_xon = fc_conf->send_xon; hw->fc.disable_fc_autoneg = !fc_conf->autoneg; @@ -22063,7 +46980,16 @@ index 2c6fd0f131..50edb69cb2 100644 } /** -@@ -5207,7 +5325,7 @@ ixgbevf_dev_start(struct rte_eth_dev *dev) +@@ -5090,7 +5222,7 @@ ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) + hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); + + /* switch to jumbo mode if needed */ +- if (frame_size > RTE_ETHER_MAX_LEN) { ++ if (frame_size > IXGBE_ETH_MAX_LEN) { + dev->data->dev_conf.rxmode.offloads |= + DEV_RX_OFFLOAD_JUMBO_FRAME; + hlreg0 |= IXGBE_HLREG0_JUMBOEN; +@@ -5207,13 +5339,19 @@ ixgbevf_dev_start(struct rte_eth_dev *dev) PMD_INIT_FUNC_TRACE(); /* Stop the link setup handler before resetting the HW. */ @@ -22071,8 +46997,21 @@ index 2c6fd0f131..50edb69cb2 100644 + ixgbe_dev_wait_setup_link_complete(dev, 0); err = hw->mac.ops.reset_hw(hw); - if (err) { -@@ -5305,7 +5423,7 @@ ixgbevf_dev_stop(struct rte_eth_dev *dev) +- if (err) { ++ ++ /** ++ * In this case, reuses the MAC address assigned by VF ++ * initialization. ++ */ ++ if (err != IXGBE_SUCCESS && err != IXGBE_ERR_INVALID_MAC_ADDR) { + PMD_INIT_LOG(ERR, "Unable to reset vf hardware (%d)", err); + return err; + } ++ + hw->mac.get_link_status = true; + + /* negotiate mailbox API version to use with the PF. */ +@@ -5305,7 +5443,7 @@ ixgbevf_dev_stop(struct rte_eth_dev *dev) PMD_INIT_FUNC_TRACE(); @@ -22081,11 +47020,31 @@ index 2c6fd0f131..50edb69cb2 100644 ixgbevf_intr_disable(dev); +@@ -6520,7 +6658,8 @@ ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) + * prior to 3.11.33 which contains the following change: + * "ixgbe: Enable jumbo frames support w/ SR-IOV" + */ +- ixgbevf_rlpml_set_vf(hw, max_frame); ++ if (ixgbevf_rlpml_set_vf(hw, max_frame)) ++ return -EINVAL; + + /* update max frame size */ + dev->data->dev_conf.rxmode.max_rx_pkt_len = max_frame; diff --git a/dpdk/drivers/net/ixgbe/ixgbe_ethdev.h b/dpdk/drivers/net/ixgbe/ixgbe_ethdev.h -index 76a1b9d184..0334c226be 100644 +index 76a1b9d184..e406b754e8 100644 --- a/dpdk/drivers/net/ixgbe/ixgbe_ethdev.h +++ b/dpdk/drivers/net/ixgbe/ixgbe_ethdev.h -@@ -510,6 +510,9 @@ struct ixgbe_adapter { +@@ -104,6 +104,9 @@ + /* The overhead from MTU to max frame size. */ + #define IXGBE_ETH_OVERHEAD (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN) + ++/* The max frame size with default MTU */ ++#define IXGBE_ETH_MAX_LEN (RTE_ETHER_MTU + IXGBE_ETH_OVERHEAD) ++ + /* bit of VXLAN tunnel type | 7 bits of zeros | 8 bits of zeros*/ + #define IXGBE_FDIR_VXLAN_TUNNEL_TYPE 0x8000 + /* bit of NVGRE tunnel type | 7 bits of zeros | 8 bits of zeros*/ +@@ -510,6 +513,9 @@ struct ixgbe_adapter { * mailbox status) link status. */ uint8_t pflink_fullchk; @@ -22095,8 +47054,242 @@ index 76a1b9d184..0334c226be 100644 }; struct ixgbe_vf_representor { +@@ -709,7 +715,7 @@ void ixgbe_vlan_hw_filter_disable(struct rte_eth_dev *dev); + + void ixgbe_vlan_hw_strip_config(struct rte_eth_dev *dev); + +-void ixgbe_pf_host_init(struct rte_eth_dev *eth_dev); ++int ixgbe_pf_host_init(struct rte_eth_dev *eth_dev); + + void ixgbe_pf_host_uninit(struct rte_eth_dev *eth_dev); + +diff --git a/dpdk/drivers/net/ixgbe/ixgbe_fdir.c b/dpdk/drivers/net/ixgbe/ixgbe_fdir.c +index 166dae1e03..9ff5aa8c72 100644 +--- a/dpdk/drivers/net/ixgbe/ixgbe_fdir.c ++++ b/dpdk/drivers/net/ixgbe/ixgbe_fdir.c +@@ -515,9 +515,30 @@ ixgbe_fdir_set_flexbytes_offset(struct rte_eth_dev *dev, + uint16_t offset) + { + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); ++ struct ixgbe_hw_fdir_info *fdir_info = ++ IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private); + uint32_t fdirctrl; + int i; + ++ if (fdir_info->flex_bytes_offset == offset) ++ return 0; ++ ++ /** ++ * 82599 adapters flow director init flow cannot be restarted, ++ * Workaround 82599 silicon errata by performing the following steps ++ * before re-writing the FDIRCTRL control register with the same value. ++ * - write 1 to bit 8 of FDIRCMD register & ++ * - write 0 to bit 8 of FDIRCMD register ++ */ ++ IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, ++ (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) | ++ IXGBE_FDIRCMD_CLEARHT)); ++ IXGBE_WRITE_FLUSH(hw); ++ IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, ++ (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) & ++ ~IXGBE_FDIRCMD_CLEARHT)); ++ IXGBE_WRITE_FLUSH(hw); ++ + fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL); + + fdirctrl &= ~IXGBE_FDIRCTRL_FLEX_MASK; +@@ -532,6 +553,14 @@ ixgbe_fdir_set_flexbytes_offset(struct rte_eth_dev *dev, + break; + msec_delay(1); + } ++ ++ if (i >= IXGBE_FDIR_INIT_DONE_POLL) { ++ PMD_DRV_LOG(ERR, "Flow Director poll time exceeded!"); ++ return -ETIMEDOUT; ++ } ++ ++ fdir_info->flex_bytes_offset = offset; ++ + return 0; + } + +diff --git a/dpdk/drivers/net/ixgbe/ixgbe_flow.c b/dpdk/drivers/net/ixgbe/ixgbe_flow.c +index b2a2bfc02f..d539951896 100644 +--- a/dpdk/drivers/net/ixgbe/ixgbe_flow.c ++++ b/dpdk/drivers/net/ixgbe/ixgbe_flow.c +@@ -870,15 +870,6 @@ ixgbe_parse_ethertype_filter(struct rte_eth_dev *dev, + if (ret) + return ret; + +- /* Ixgbe doesn't support MAC address. */ +- if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) { +- memset(filter, 0, sizeof(struct rte_eth_ethertype_filter)); +- rte_flow_error_set(error, EINVAL, +- RTE_FLOW_ERROR_TYPE_ITEM, +- NULL, "Not supported by ethertype filter"); +- return -rte_errno; +- } +- + if (filter->queue >= dev->data->nb_rx_queues) { + memset(filter, 0, sizeof(struct rte_eth_ethertype_filter)); + rte_flow_error_set(error, EINVAL, +@@ -3146,13 +3137,13 @@ ixgbe_flow_create(struct rte_eth_dev *dev, + rte_memcpy(&fdir_info->mask, + &fdir_rule.mask, + sizeof(struct ixgbe_hw_fdir_mask)); +- fdir_info->flex_bytes_offset = +- fdir_rule.flex_bytes_offset; + +- if (fdir_rule.mask.flex_bytes_mask) +- ixgbe_fdir_set_flexbytes_offset(dev, ++ if (fdir_rule.mask.flex_bytes_mask) { ++ ret = ixgbe_fdir_set_flexbytes_offset(dev, + fdir_rule.flex_bytes_offset); +- ++ if (ret) ++ goto out; ++ } + ret = ixgbe_fdir_set_input_mask(dev); + if (ret) + goto out; +@@ -3170,8 +3161,9 @@ ixgbe_flow_create(struct rte_eth_dev *dev, + if (ret) + goto out; + +- if (fdir_info->flex_bytes_offset != +- fdir_rule.flex_bytes_offset) ++ if (fdir_rule.mask.flex_bytes_mask && ++ fdir_info->flex_bytes_offset != ++ fdir_rule.flex_bytes_offset) + goto out; + } + } +diff --git a/dpdk/drivers/net/ixgbe/ixgbe_pf.c b/dpdk/drivers/net/ixgbe/ixgbe_pf.c +index d0d85e1386..2e46e30b79 100644 +--- a/dpdk/drivers/net/ixgbe/ixgbe_pf.c ++++ b/dpdk/drivers/net/ixgbe/ixgbe_pf.c +@@ -66,7 +66,7 @@ ixgbe_mb_intr_setup(struct rte_eth_dev *dev) + return 0; + } + +-void ixgbe_pf_host_init(struct rte_eth_dev *eth_dev) ++int ixgbe_pf_host_init(struct rte_eth_dev *eth_dev) + { + struct ixgbe_vf_info **vfinfo = + IXGBE_DEV_PRIVATE_TO_P_VFDATA(eth_dev->data->dev_private); +@@ -78,19 +78,27 @@ void ixgbe_pf_host_init(struct rte_eth_dev *eth_dev) + IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); + uint16_t vf_num; + uint8_t nb_queue; ++ int ret = 0; + + PMD_INIT_FUNC_TRACE(); + + RTE_ETH_DEV_SRIOV(eth_dev).active = 0; + vf_num = dev_num_vf(eth_dev); + if (vf_num == 0) +- return; ++ return ret; + + *vfinfo = rte_zmalloc("vf_info", sizeof(struct ixgbe_vf_info) * vf_num, 0); + if (*vfinfo == NULL) + rte_panic("Cannot allocate memory for private VF data\n"); + +- rte_eth_switch_domain_alloc(&(*vfinfo)->switch_domain_id); ++ ret = rte_eth_switch_domain_alloc(&(*vfinfo)->switch_domain_id); ++ if (ret) { ++ PMD_INIT_LOG(ERR, ++ "failed to allocate switch domain for device %d", ret); ++ rte_free(*vfinfo); ++ *vfinfo = NULL; ++ return ret; ++ } + + memset(mirror_info, 0, sizeof(struct ixgbe_mirror_info)); + memset(uta_info, 0, sizeof(struct ixgbe_uta_info)); +@@ -118,6 +126,8 @@ void ixgbe_pf_host_init(struct rte_eth_dev *eth_dev) + + /* set mb interrupt mask */ + ixgbe_mb_intr_setup(eth_dev); ++ ++ return ret; + } + + void ixgbe_pf_host_uninit(struct rte_eth_dev *eth_dev) +@@ -542,16 +552,44 @@ static int + ixgbe_set_vf_lpe(struct rte_eth_dev *dev, __rte_unused uint32_t vf, uint32_t *msgbuf) + { + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); +- uint32_t new_mtu = msgbuf[1]; ++ uint32_t max_frame = msgbuf[1]; + uint32_t max_frs; +- int max_frame = new_mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN; ++ uint32_t hlreg0; + + /* X540 and X550 support jumbo frames in IOV mode */ + if (hw->mac.type != ixgbe_mac_X540 && + hw->mac.type != ixgbe_mac_X550 && + hw->mac.type != ixgbe_mac_X550EM_x && +- hw->mac.type != ixgbe_mac_X550EM_a) +- return -1; ++ hw->mac.type != ixgbe_mac_X550EM_a) { ++ struct ixgbe_vf_info *vfinfo = ++ *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private); ++ ++ switch (vfinfo[vf].api_version) { ++ case ixgbe_mbox_api_11: ++ case ixgbe_mbox_api_12: ++ case ixgbe_mbox_api_13: ++ /** ++ * Version 1.1&1.2&1.3 supports jumbo frames on VFs ++ * if PF has jumbo frames enabled which means legacy ++ * VFs are disabled. ++ */ ++ if (dev->data->dev_conf.rxmode.max_rx_pkt_len > ++ IXGBE_ETH_MAX_LEN) ++ break; ++ /* fall through */ ++ default: ++ /** ++ * If the PF or VF are running w/ jumbo frames enabled, ++ * we return -1 as we cannot support jumbo frames on ++ * legacy VFs. ++ */ ++ if (max_frame > IXGBE_ETH_MAX_LEN || ++ dev->data->dev_conf.rxmode.max_rx_pkt_len > ++ IXGBE_ETH_MAX_LEN) ++ return -1; ++ break; ++ } ++ } + + if (max_frame < RTE_ETHER_MIN_LEN || + max_frame > RTE_ETHER_MAX_JUMBO_FRAME_LEN) +@@ -559,8 +597,20 @@ ixgbe_set_vf_lpe(struct rte_eth_dev *dev, __rte_unused uint32_t vf, uint32_t *ms + + max_frs = (IXGBE_READ_REG(hw, IXGBE_MAXFRS) & + IXGBE_MHADD_MFS_MASK) >> IXGBE_MHADD_MFS_SHIFT; +- if (max_frs < new_mtu) { +- max_frs = new_mtu << IXGBE_MHADD_MFS_SHIFT; ++ if (max_frs < max_frame) { ++ hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); ++ if (max_frame > IXGBE_ETH_MAX_LEN) { ++ dev->data->dev_conf.rxmode.offloads |= ++ DEV_RX_OFFLOAD_JUMBO_FRAME; ++ hlreg0 |= IXGBE_HLREG0_JUMBOEN; ++ } else { ++ dev->data->dev_conf.rxmode.offloads &= ++ ~DEV_RX_OFFLOAD_JUMBO_FRAME; ++ hlreg0 &= ~IXGBE_HLREG0_JUMBOEN; ++ } ++ IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); ++ ++ max_frs = max_frame << IXGBE_MHADD_MFS_SHIFT; + IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, max_frs); + } + diff --git a/dpdk/drivers/net/ixgbe/ixgbe_rxtx.c b/dpdk/drivers/net/ixgbe/ixgbe_rxtx.c -index fa572d184d..a7b24cd053 100644 +index fa572d184d..8b9b009396 100644 --- a/dpdk/drivers/net/ixgbe/ixgbe_rxtx.c +++ b/dpdk/drivers/net/ixgbe/ixgbe_rxtx.c @@ -87,11 +87,6 @@ @@ -22124,8 +47317,114 @@ index fa572d184d..a7b24cd053 100644 #ifdef RTE_LIBRTE_ETHDEV_DEBUG ret = rte_validate_tx_offload(m); if (ret != 0) { +@@ -1439,7 +1440,8 @@ rx_desc_status_to_pkt_flags(uint32_t rx_status, uint64_t vlan_flags) + } + + static inline uint64_t +-rx_desc_error_to_pkt_flags(uint32_t rx_status) ++rx_desc_error_to_pkt_flags(uint32_t rx_status, uint16_t pkt_info, ++ uint8_t rx_udp_csum_zero_err) + { + uint64_t pkt_flags; + +@@ -1456,6 +1458,15 @@ rx_desc_error_to_pkt_flags(uint32_t rx_status) + pkt_flags = error_to_pkt_flags_map[(rx_status >> + IXGBE_RXDADV_ERR_CKSUM_BIT) & IXGBE_RXDADV_ERR_CKSUM_MSK]; + ++ /* Mask out the bad UDP checksum error if the hardware has UDP zero ++ * checksum error issue, so that the software application will then ++ * have to recompute the checksum itself if needed. ++ */ ++ if ((rx_status & IXGBE_RXDADV_ERR_TCPE) && ++ (pkt_info & IXGBE_RXDADV_PKTTYPE_UDP) && ++ rx_udp_csum_zero_err) ++ pkt_flags &= ~PKT_RX_L4_CKSUM_BAD; ++ + if ((rx_status & IXGBE_RXD_STAT_OUTERIPCS) && + (rx_status & IXGBE_RXDADV_ERR_OUTERIPER)) { + pkt_flags |= PKT_RX_EIP_CKSUM_BAD; +@@ -1542,7 +1553,9 @@ ixgbe_rx_scan_hw_ring(struct ixgbe_rx_queue *rxq) + /* convert descriptor fields to rte mbuf flags */ + pkt_flags = rx_desc_status_to_pkt_flags(s[j], + vlan_flags); +- pkt_flags |= rx_desc_error_to_pkt_flags(s[j]); ++ pkt_flags |= rx_desc_error_to_pkt_flags(s[j], ++ (uint16_t)pkt_info[j], ++ rxq->rx_udp_csum_zero_err); + pkt_flags |= ixgbe_rxd_pkt_info_to_pkt_flags + ((uint16_t)pkt_info[j]); + mb->ol_flags = pkt_flags; +@@ -1875,7 +1888,9 @@ ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, + rxm->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan); + + pkt_flags = rx_desc_status_to_pkt_flags(staterr, vlan_flags); +- pkt_flags = pkt_flags | rx_desc_error_to_pkt_flags(staterr); ++ pkt_flags = pkt_flags | ++ rx_desc_error_to_pkt_flags(staterr, (uint16_t)pkt_info, ++ rxq->rx_udp_csum_zero_err); + pkt_flags = pkt_flags | + ixgbe_rxd_pkt_info_to_pkt_flags((uint16_t)pkt_info); + rxm->ol_flags = pkt_flags; +@@ -1968,7 +1983,8 @@ ixgbe_fill_cluster_head_buf( + head->vlan_tci = rte_le_to_cpu_16(desc->wb.upper.vlan); + pkt_info = rte_le_to_cpu_32(desc->wb.lower.lo_dword.data); + pkt_flags = rx_desc_status_to_pkt_flags(staterr, rxq->vlan_flags); +- pkt_flags |= rx_desc_error_to_pkt_flags(staterr); ++ pkt_flags |= rx_desc_error_to_pkt_flags(staterr, (uint16_t)pkt_info, ++ rxq->rx_udp_csum_zero_err); + pkt_flags |= ixgbe_rxd_pkt_info_to_pkt_flags((uint16_t)pkt_info); + head->ol_flags = pkt_flags; + head->packet_type = +@@ -2980,6 +2996,13 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, + else + rxq->pkt_type_mask = IXGBE_PACKET_TYPE_MASK_82599; + ++ /* ++ * 82599 errata, UDP frames with a 0 checksum can be marked as checksum ++ * errors. ++ */ ++ if (hw->mac.type == ixgbe_mac_82599EB) ++ rxq->rx_udp_csum_zero_err = 1; ++ + /* + * Allocate RX ring hardware descriptors. A memzone large enough to + * handle the maximum ring size is allocated in order to allow for +@@ -4785,15 +4808,11 @@ ixgbe_set_rsc(struct rte_eth_dev *dev) + /* RFCTL configuration */ + rfctl = IXGBE_READ_REG(hw, IXGBE_RFCTL); + if ((rsc_capable) && (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO)) +- /* +- * Since NFS packets coalescing is not supported - clear +- * RFCTL.NFSW_DIS and RFCTL.NFSR_DIS when RSC is +- * enabled. +- */ +- rfctl &= ~(IXGBE_RFCTL_RSC_DIS | IXGBE_RFCTL_NFSW_DIS | +- IXGBE_RFCTL_NFSR_DIS); ++ rfctl &= ~IXGBE_RFCTL_RSC_DIS; + else + rfctl |= IXGBE_RFCTL_RSC_DIS; ++ /* disable NFS filtering */ ++ rfctl |= IXGBE_RFCTL_NFSW_DIS | IXGBE_RFCTL_NFSR_DIS; + IXGBE_WRITE_REG(hw, IXGBE_RFCTL, rfctl); + + /* If LRO hasn't been requested - we are done here. */ +@@ -5521,8 +5540,12 @@ ixgbevf_dev_rx_init(struct rte_eth_dev *dev) + * ixgbevf_rlpml_set_vf even if jumbo frames are not used. This way, + * VF packets received can work in all cases. + */ +- ixgbevf_rlpml_set_vf(hw, +- (uint16_t)dev->data->dev_conf.rxmode.max_rx_pkt_len); ++ if (ixgbevf_rlpml_set_vf(hw, ++ (uint16_t)dev->data->dev_conf.rxmode.max_rx_pkt_len)) { ++ PMD_INIT_LOG(ERR, "Set max packet length to %d failed.", ++ dev->data->dev_conf.rxmode.max_rx_pkt_len); ++ return -EINVAL; ++ } + + /* + * Assume no header split and no VLAN strip support diff --git a/dpdk/drivers/net/ixgbe/ixgbe_rxtx.h b/dpdk/drivers/net/ixgbe/ixgbe_rxtx.h -index 505d344b9c..5e1ac44942 100644 +index 505d344b9c..4adfbb3089 100644 --- a/dpdk/drivers/net/ixgbe/ixgbe_rxtx.h +++ b/dpdk/drivers/net/ixgbe/ixgbe_rxtx.h @@ -53,6 +53,8 @@ @@ -22137,6 +47436,470 @@ index 505d344b9c..5e1ac44942 100644 #define IXGBE_PACKET_TYPE_MASK_82599 0X7F #define IXGBE_PACKET_TYPE_MASK_X550 0X10FF #define IXGBE_PACKET_TYPE_MASK_TUNNEL 0XFF +@@ -127,6 +129,8 @@ struct ixgbe_rx_queue { + uint8_t crc_len; /**< 0 if CRC stripped, 4 otherwise. */ + uint8_t drop_en; /**< If not 0, set SRRCTL.Drop_En. */ + uint8_t rx_deferred_start; /**< not in global dev start. */ ++ /** UDP frames with a 0 checksum can be marked as checksum errors. */ ++ uint8_t rx_udp_csum_zero_err; + /** flags to set in mbuf when a vlan is detected. */ + uint64_t vlan_flags; + uint64_t offloads; /**< Rx offloads with DEV_RX_OFFLOAD_* */ +diff --git a/dpdk/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c b/dpdk/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c +index feb86c61ee..d6d941cbc6 100644 +--- a/dpdk/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c ++++ b/dpdk/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c +@@ -5,13 +5,12 @@ + #include + #include + #include ++#include + + #include "ixgbe_ethdev.h" + #include "ixgbe_rxtx.h" + #include "ixgbe_rxtx_vec_common.h" + +-#include +- + #pragma GCC diagnostic ignored "-Wcast-qual" + + static inline void +@@ -82,27 +81,20 @@ ixgbe_rxq_rearm(struct ixgbe_rx_queue *rxq) + IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id); + } + +-#define VTAG_SHIFT (3) +- + static inline void + desc_to_olflags_v(uint8x16x2_t sterr_tmp1, uint8x16x2_t sterr_tmp2, +- uint8x16_t staterr, struct rte_mbuf **rx_pkts) ++ uint8x16_t staterr, uint8_t vlan_flags, struct rte_mbuf **rx_pkts) + { + uint8x16_t ptype; +- uint8x16_t vtag; ++ uint8x16_t vtag_lo, vtag_hi, vtag; ++ uint8x16_t temp_csum; ++ uint32x4_t csum = {0, 0, 0, 0}; + + union { +- uint8_t e[4]; +- uint32_t word; ++ uint16_t e[4]; ++ uint64_t word; + } vol; + +- const uint8x16_t pkttype_msk = { +- PKT_RX_VLAN, PKT_RX_VLAN, +- PKT_RX_VLAN, PKT_RX_VLAN, +- 0x00, 0x00, 0x00, 0x00, +- 0x00, 0x00, 0x00, 0x00, +- 0x00, 0x00, 0x00, 0x00}; +- + const uint8x16_t rsstype_msk = { + 0x0F, 0x0F, 0x0F, 0x0F, + 0x00, 0x00, 0x00, 0x00, +@@ -115,15 +107,67 @@ desc_to_olflags_v(uint8x16x2_t sterr_tmp1, uint8x16x2_t sterr_tmp2, + PKT_RX_RSS_HASH, 0, 0, 0, + 0, 0, 0, PKT_RX_FDIR}; + ++ /* mask everything except vlan present and l4/ip csum error */ ++ const uint8x16_t vlan_csum_msk = { ++ IXGBE_RXD_STAT_VP, IXGBE_RXD_STAT_VP, ++ IXGBE_RXD_STAT_VP, IXGBE_RXD_STAT_VP, ++ 0, 0, 0, 0, ++ 0, 0, 0, 0, ++ (IXGBE_RXDADV_ERR_TCPE | IXGBE_RXDADV_ERR_IPE) >> 24, ++ (IXGBE_RXDADV_ERR_TCPE | IXGBE_RXDADV_ERR_IPE) >> 24, ++ (IXGBE_RXDADV_ERR_TCPE | IXGBE_RXDADV_ERR_IPE) >> 24, ++ (IXGBE_RXDADV_ERR_TCPE | IXGBE_RXDADV_ERR_IPE) >> 24}; ++ ++ /* map vlan present (0x8), IPE (0x2), L4E (0x1) to ol_flags */ ++ const uint8x16_t vlan_csum_map_lo = { ++ PKT_RX_IP_CKSUM_GOOD, ++ PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD, ++ PKT_RX_IP_CKSUM_BAD, ++ PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD, ++ 0, 0, 0, 0, ++ vlan_flags | PKT_RX_IP_CKSUM_GOOD, ++ vlan_flags | PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD, ++ vlan_flags | PKT_RX_IP_CKSUM_BAD, ++ vlan_flags | PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD, ++ 0, 0, 0, 0}; ++ ++ const uint8x16_t vlan_csum_map_hi = { ++ PKT_RX_L4_CKSUM_GOOD >> sizeof(uint8_t), 0, ++ PKT_RX_L4_CKSUM_GOOD >> sizeof(uint8_t), 0, ++ 0, 0, 0, 0, ++ PKT_RX_L4_CKSUM_GOOD >> sizeof(uint8_t), 0, ++ PKT_RX_L4_CKSUM_GOOD >> sizeof(uint8_t), 0, ++ 0, 0, 0, 0}; ++ + ptype = vzipq_u8(sterr_tmp1.val[0], sterr_tmp2.val[0]).val[0]; + ptype = vandq_u8(ptype, rsstype_msk); + ptype = vqtbl1q_u8(rss_flags, ptype); + +- vtag = vshrq_n_u8(staterr, VTAG_SHIFT); +- vtag = vandq_u8(vtag, pkttype_msk); +- vtag = vorrq_u8(ptype, vtag); ++ /* extract vlan_flags and csum_error from staterr */ ++ vtag = vandq_u8(staterr, vlan_csum_msk); ++ ++ /* csum bits are in the most significant, to use shuffle we need to ++ * shift them. Change mask from 0xc0 to 0x03. ++ */ ++ temp_csum = vshrq_n_u8(vtag, 6); ++ ++ /* 'OR' the most significant 32 bits containing the checksum ++ * flags with the vlan present flags ++ * Then bits layout of each lane(8bits) will be 'xxxx,VP,x,IPE,L4E' ++ */ ++ csum = vsetq_lane_u32(vgetq_lane_u32(vreinterpretq_u32_u8(temp_csum), 3), csum, 0); ++ vtag = vorrq_u8(vreinterpretq_u8_u32(csum), vtag); + +- vol.word = vgetq_lane_u32(vreinterpretq_u32_u8(vtag), 0); ++ /* convert L4 checksum correct type to vtag_hi */ ++ vtag_hi = vqtbl1q_u8(vlan_csum_map_hi, vtag); ++ vtag_hi = vshrq_n_u8(vtag_hi, 7); ++ ++ /* convert VP, IPE, L4E to vtag_lo */ ++ vtag_lo = vqtbl1q_u8(vlan_csum_map_lo, vtag); ++ vtag_lo = vorrq_u8(ptype, vtag_lo); ++ ++ vtag = vzipq_u8(vtag_lo, vtag_hi).val[0]; ++ vol.word = vgetq_lane_u64(vreinterpretq_u64_u8(vtag), 0); + + rx_pkts[0]->ol_flags = vol.e[0]; + rx_pkts[1]->ol_flags = vol.e[1]; +@@ -131,17 +175,6 @@ desc_to_olflags_v(uint8x16x2_t sterr_tmp1, uint8x16x2_t sterr_tmp2, + rx_pkts[3]->ol_flags = vol.e[3]; + } + +-/* +- * vPMD raw receive routine, only accept(nb_pkts >= RTE_IXGBE_DESCS_PER_LOOP) +- * +- * Notice: +- * - nb_pkts < RTE_IXGBE_DESCS_PER_LOOP, just return no packet +- * - nb_pkts > RTE_IXGBE_MAX_RX_BURST, only scan RTE_IXGBE_MAX_RX_BURST +- * numbers of DD bit +- * - floor align nb_pkts to a RTE_IXGBE_DESC_PER_LOOP power-of-two +- * - don't support ol_flags for rss and csum err +- */ +- + #define IXGBE_VPMD_DESC_EOP_MASK 0x02020202 + #define IXGBE_UINT8_BIT (CHAR_BIT * sizeof(uint8_t)) + +@@ -207,6 +240,13 @@ desc_to_ptype_v(uint64x2_t descs[4], uint16_t pkt_type_mask, + vgetq_lane_u32(tunnel_check, 3)); + } + ++/** ++ * vPMD raw receive routine, only accept(nb_pkts >= RTE_IXGBE_DESCS_PER_LOOP) ++ * ++ * Notice: ++ * - nb_pkts < RTE_IXGBE_DESCS_PER_LOOP, just return no packet ++ * - floor align nb_pkts to a RTE_IXGBE_DESC_PER_LOOP power-of-two ++ */ + static inline uint16_t + _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts, uint8_t *split_packet) +@@ -226,9 +266,7 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts, + }; + uint16x8_t crc_adjust = {0, 0, rxq->crc_len, 0, + rxq->crc_len, 0, 0, 0}; +- +- /* nb_pkts shall be less equal than RTE_IXGBE_MAX_RX_BURST */ +- nb_pkts = RTE_MIN(nb_pkts, RTE_IXGBE_MAX_RX_BURST); ++ uint8_t vlan_flags; + + /* nb_pkts has to be floor-aligned to RTE_IXGBE_DESCS_PER_LOOP */ + nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, RTE_IXGBE_DESCS_PER_LOOP); +@@ -258,6 +296,10 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts, + */ + sw_ring = &rxq->sw_ring[rxq->rx_tail]; + ++ /* ensure these 2 flags are in the lower 8 bits */ ++ RTE_BUILD_BUG_ON((PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED) > UINT8_MAX); ++ vlan_flags = rxq->vlan_flags & UINT8_MAX; ++ + /* A. load 4 packet in one loop + * B. copy 4 mbuf point from swring to rx_pkts + * C. calc the number of DD bits among the 4 packets +@@ -319,7 +361,7 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts, + staterr = vzipq_u8(sterr_tmp1.val[1], sterr_tmp2.val[1]).val[0]; + + /* set ol_flags with vlan packet type */ +- desc_to_olflags_v(sterr_tmp1, sterr_tmp2, staterr, ++ desc_to_olflags_v(sterr_tmp1, sterr_tmp2, staterr, vlan_flags, + &rx_pkts[pos]); + + /* D.2 pkt 3,4 set in_port/nb_seg and remove crc */ +@@ -383,15 +425,12 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts, + return nb_pkts_recd; + } + +-/* ++/** + * vPMD receive routine, only accept(nb_pkts >= RTE_IXGBE_DESCS_PER_LOOP) + * + * Notice: + * - nb_pkts < RTE_IXGBE_DESCS_PER_LOOP, just return no packet +- * - nb_pkts > RTE_IXGBE_MAX_RX_BURST, only scan RTE_IXGBE_MAX_RX_BURST +- * numbers of DD bit + * - floor align nb_pkts to a RTE_IXGBE_DESC_PER_LOOP power-of-two +- * - don't support ol_flags for rss and csum err + */ + uint16_t + ixgbe_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts, +@@ -400,19 +439,16 @@ ixgbe_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts, + return _recv_raw_pkts_vec(rx_queue, rx_pkts, nb_pkts, NULL); + } + +-/* ++/** + * vPMD receive routine that reassembles scattered packets + * + * Notice: +- * - don't support ol_flags for rss and csum err + * - nb_pkts < RTE_IXGBE_DESCS_PER_LOOP, just return no packet +- * - nb_pkts > RTE_IXGBE_MAX_RX_BURST, only scan RTE_IXGBE_MAX_RX_BURST +- * numbers of DD bit + * - floor align nb_pkts to a RTE_IXGBE_DESC_PER_LOOP power-of-two + */ +-uint16_t +-ixgbe_recv_scattered_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts, +- uint16_t nb_pkts) ++static uint16_t ++ixgbe_recv_scattered_burst_vec(void *rx_queue, struct rte_mbuf **rx_pkts, ++ uint16_t nb_pkts) + { + struct ixgbe_rx_queue *rxq = rx_queue; + uint8_t split_flags[RTE_IXGBE_MAX_RX_BURST] = {0}; +@@ -444,6 +480,32 @@ ixgbe_recv_scattered_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts, + &split_flags[i]); + } + ++/** ++ * vPMD receive routine that reassembles scattered packets. ++ */ ++uint16_t ++ixgbe_recv_scattered_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts, ++ uint16_t nb_pkts) ++{ ++ uint16_t retval = 0; ++ ++ while (nb_pkts > RTE_IXGBE_MAX_RX_BURST) { ++ uint16_t burst; ++ ++ burst = ixgbe_recv_scattered_burst_vec(rx_queue, ++ rx_pkts + retval, ++ RTE_IXGBE_MAX_RX_BURST); ++ retval += burst; ++ nb_pkts -= burst; ++ if (burst < RTE_IXGBE_MAX_RX_BURST) ++ return retval; ++ } ++ ++ return retval + ixgbe_recv_scattered_burst_vec(rx_queue, ++ rx_pkts + retval, ++ nb_pkts); ++} ++ + static inline void + vtx1(volatile union ixgbe_adv_tx_desc *txdp, + struct rte_mbuf *pkt, uint64_t flags) +@@ -576,11 +638,5 @@ ixgbe_txq_vec_setup(struct ixgbe_tx_queue *txq) + int __attribute__((cold)) + ixgbe_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev) + { +- struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode; +- +- /* no csum error report support */ +- if (rxmode->offloads & DEV_RX_OFFLOAD_CHECKSUM) +- return -1; +- + return ixgbe_rx_vec_dev_conf_condition_check_default(dev); + } +diff --git a/dpdk/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c b/dpdk/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c +index 599ba30e51..4b658605bf 100644 +--- a/dpdk/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c ++++ b/dpdk/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c +@@ -132,9 +132,9 @@ desc_to_olflags_v_ipsec(__m128i descs[4], struct rte_mbuf **rx_pkts) + + static inline void + desc_to_olflags_v(__m128i descs[4], __m128i mbuf_init, uint8_t vlan_flags, +- struct rte_mbuf **rx_pkts) ++ uint16_t udp_p_flag, struct rte_mbuf **rx_pkts) + { +- __m128i ptype0, ptype1, vtag0, vtag1, csum; ++ __m128i ptype0, ptype1, vtag0, vtag1, csum, udp_csum_skip; + __m128i rearm0, rearm1, rearm2, rearm3; + + /* mask everything except rss type */ +@@ -161,6 +161,7 @@ desc_to_olflags_v(__m128i descs[4], __m128i mbuf_init, uint8_t vlan_flags, + (IXGBE_RXDADV_ERR_TCPE | IXGBE_RXDADV_ERR_IPE) >> 16, + IXGBE_RXD_STAT_VP, IXGBE_RXD_STAT_VP, + IXGBE_RXD_STAT_VP, IXGBE_RXD_STAT_VP); ++ + /* map vlan present (0x8), IPE (0x2), L4E (0x1) to ol_flags */ + const __m128i vlan_csum_map_lo = _mm_set_epi8( + 0, 0, 0, 0, +@@ -182,12 +183,23 @@ desc_to_olflags_v(__m128i descs[4], __m128i mbuf_init, uint8_t vlan_flags, + 0, PKT_RX_L4_CKSUM_GOOD >> sizeof(uint8_t), 0, + PKT_RX_L4_CKSUM_GOOD >> sizeof(uint8_t)); + ++ /* mask everything except UDP header present if specified */ ++ const __m128i udp_hdr_p_msk = _mm_set_epi16 ++ (0, 0, 0, 0, ++ udp_p_flag, udp_p_flag, udp_p_flag, udp_p_flag); ++ ++ const __m128i udp_csum_bad_shuf = _mm_set_epi8 ++ (0, 0, 0, 0, 0, 0, 0, 0, ++ 0, 0, 0, 0, 0, 0, ~(uint8_t)PKT_RX_L4_CKSUM_BAD, 0xFF); ++ + ptype0 = _mm_unpacklo_epi16(descs[0], descs[1]); + ptype1 = _mm_unpacklo_epi16(descs[2], descs[3]); + vtag0 = _mm_unpackhi_epi16(descs[0], descs[1]); + vtag1 = _mm_unpackhi_epi16(descs[2], descs[3]); + + ptype0 = _mm_unpacklo_epi32(ptype0, ptype1); ++ /* save the UDP header present information */ ++ udp_csum_skip = _mm_and_si128(ptype0, udp_hdr_p_msk); + ptype0 = _mm_and_si128(ptype0, rsstype_msk); + ptype0 = _mm_shuffle_epi8(rss_flags, ptype0); + +@@ -215,6 +227,15 @@ desc_to_olflags_v(__m128i descs[4], __m128i mbuf_init, uint8_t vlan_flags, + + vtag1 = _mm_or_si128(ptype0, vtag1); + ++ /* convert the UDP header present 0x200 to 0x1 for aligning with each ++ * PKT_RX_L4_CKSUM_BAD value in low byte of 16 bits word ol_flag in ++ * vtag1 (4x16). Then mask out the bad checksum value by shuffle and ++ * bit-mask. ++ */ ++ udp_csum_skip = _mm_srli_epi16(udp_csum_skip, 9); ++ udp_csum_skip = _mm_shuffle_epi8(udp_csum_bad_shuf, udp_csum_skip); ++ vtag1 = _mm_and_si128(vtag1, udp_csum_skip); ++ + /* + * At this point, we have the 4 sets of flags in the low 64-bits + * of vtag1 (4x16). +@@ -302,13 +323,11 @@ desc_to_ptype_v(__m128i descs[4], uint16_t pkt_type_mask, + get_packet_type(3, pkt_info, etqf_check, tunnel_check); + } + +-/* ++/** + * vPMD raw receive routine, only accept(nb_pkts >= RTE_IXGBE_DESCS_PER_LOOP) + * + * Notice: + * - nb_pkts < RTE_IXGBE_DESCS_PER_LOOP, just return no packet +- * - nb_pkts > RTE_IXGBE_MAX_RX_BURST, only scan RTE_IXGBE_MAX_RX_BURST +- * numbers of DD bit + * - floor align nb_pkts to a RTE_IXGBE_DESC_PER_LOOP power-of-two + */ + static inline uint16_t +@@ -343,9 +362,7 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts, + __m128i dd_check, eop_check; + __m128i mbuf_init; + uint8_t vlan_flags; +- +- /* nb_pkts shall be less equal than RTE_IXGBE_MAX_RX_BURST */ +- nb_pkts = RTE_MIN(nb_pkts, RTE_IXGBE_MAX_RX_BURST); ++ uint16_t udp_p_flag = 0; /* Rx Descriptor UDP header present */ + + /* nb_pkts has to be floor-aligned to RTE_IXGBE_DESCS_PER_LOOP */ + nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, RTE_IXGBE_DESCS_PER_LOOP); +@@ -370,6 +387,9 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts, + rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD))) + return 0; + ++ if (rxq->rx_udp_csum_zero_err) ++ udp_p_flag = IXGBE_RXDADV_PKTTYPE_UDP; ++ + /* 4 packets DD mask */ + dd_check = _mm_set_epi64x(0x0000000100000001LL, 0x0000000100000001LL); + +@@ -482,7 +502,8 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts, + sterr_tmp1 = _mm_unpackhi_epi32(descs[1], descs[0]); + + /* set ol_flags with vlan packet type */ +- desc_to_olflags_v(descs, mbuf_init, vlan_flags, &rx_pkts[pos]); ++ desc_to_olflags_v(descs, mbuf_init, vlan_flags, udp_p_flag, ++ &rx_pkts[pos]); + + #ifdef RTE_LIBRTE_SECURITY + if (unlikely(use_ipsec)) +@@ -556,13 +577,11 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts, + return nb_pkts_recd; + } + +-/* ++/** + * vPMD receive routine, only accept(nb_pkts >= RTE_IXGBE_DESCS_PER_LOOP) + * + * Notice: + * - nb_pkts < RTE_IXGBE_DESCS_PER_LOOP, just return no packet +- * - nb_pkts > RTE_IXGBE_MAX_RX_BURST, only scan RTE_IXGBE_MAX_RX_BURST +- * numbers of DD bit + * - floor align nb_pkts to a RTE_IXGBE_DESC_PER_LOOP power-of-two + */ + uint16_t +@@ -572,18 +591,16 @@ ixgbe_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts, + return _recv_raw_pkts_vec(rx_queue, rx_pkts, nb_pkts, NULL); + } + +-/* ++/** + * vPMD receive routine that reassembles scattered packets + * + * Notice: + * - nb_pkts < RTE_IXGBE_DESCS_PER_LOOP, just return no packet +- * - nb_pkts > RTE_IXGBE_MAX_RX_BURST, only scan RTE_IXGBE_MAX_RX_BURST +- * numbers of DD bit + * - floor align nb_pkts to a RTE_IXGBE_DESC_PER_LOOP power-of-two + */ +-uint16_t +-ixgbe_recv_scattered_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts, +- uint16_t nb_pkts) ++static uint16_t ++ixgbe_recv_scattered_burst_vec(void *rx_queue, struct rte_mbuf **rx_pkts, ++ uint16_t nb_pkts) + { + struct ixgbe_rx_queue *rxq = rx_queue; + uint8_t split_flags[RTE_IXGBE_MAX_RX_BURST] = {0}; +@@ -615,6 +632,32 @@ ixgbe_recv_scattered_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts, + &split_flags[i]); + } + ++/** ++ * vPMD receive routine that reassembles scattered packets. ++ */ ++uint16_t ++ixgbe_recv_scattered_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts, ++ uint16_t nb_pkts) ++{ ++ uint16_t retval = 0; ++ ++ while (nb_pkts > RTE_IXGBE_MAX_RX_BURST) { ++ uint16_t burst; ++ ++ burst = ixgbe_recv_scattered_burst_vec(rx_queue, ++ rx_pkts + retval, ++ RTE_IXGBE_MAX_RX_BURST); ++ retval += burst; ++ nb_pkts -= burst; ++ if (burst < RTE_IXGBE_MAX_RX_BURST) ++ return retval; ++ } ++ ++ return retval + ixgbe_recv_scattered_burst_vec(rx_queue, ++ rx_pkts + retval, ++ nb_pkts); ++} ++ + static inline void + vtx1(volatile union ixgbe_adv_tx_desc *txdp, + struct rte_mbuf *pkt, uint64_t flags) diff --git a/dpdk/drivers/net/ixgbe/rte_pmd_ixgbe.c b/dpdk/drivers/net/ixgbe/rte_pmd_ixgbe.c index 8bcaded6e5..9bff557f97 100644 --- a/dpdk/drivers/net/ixgbe/rte_pmd_ixgbe.c @@ -22161,6 +47924,80 @@ index 8bcaded6e5..9bff557f97 100644 ixgbe_dev_macsec_setting_reset(dev); ixgbe_dev_macsec_register_disable(dev); +diff --git a/dpdk/drivers/net/ixgbe/rte_pmd_ixgbe.h b/dpdk/drivers/net/ixgbe/rte_pmd_ixgbe.h +index f62fd761dd..d1017c7b1a 100644 +--- a/dpdk/drivers/net/ixgbe/rte_pmd_ixgbe.h ++++ b/dpdk/drivers/net/ixgbe/rte_pmd_ixgbe.h +@@ -11,7 +11,9 @@ + #ifndef _PMD_IXGBE_H_ + #define _PMD_IXGBE_H_ + +-#include ++#include ++#include ++#include + + /** + * Notify VF when PF link status changes. +diff --git a/dpdk/drivers/net/kni/rte_eth_kni.c b/dpdk/drivers/net/kni/rte_eth_kni.c +index d88cb1778e..c3345f5cb7 100644 +--- a/dpdk/drivers/net/kni/rte_eth_kni.c ++++ b/dpdk/drivers/net/kni/rte_eth_kni.c +@@ -47,6 +47,7 @@ struct pmd_queue { + + struct pmd_internals { + struct rte_kni *kni; ++ uint16_t port_id; + int is_kni_started; + + pthread_t thread; +@@ -78,8 +79,11 @@ eth_kni_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs) + struct pmd_queue *kni_q = q; + struct rte_kni *kni = kni_q->internals->kni; + uint16_t nb_pkts; ++ int i; + + nb_pkts = rte_kni_rx_burst(kni, bufs, nb_bufs); ++ for (i = 0; i < nb_pkts; i++) ++ bufs[i]->port = kni_q->internals->port_id; + + kni_q->rx.pkts += nb_pkts; + +@@ -372,6 +376,7 @@ eth_kni_create(struct rte_vdev_device *vdev, + return NULL; + + internals = eth_dev->data->dev_private; ++ internals->port_id = eth_dev->data->port_id; + data = eth_dev->data; + data->nb_rx_queues = 1; + data->nb_tx_queues = 1; +diff --git a/dpdk/drivers/net/liquidio/lio_ethdev.c b/dpdk/drivers/net/liquidio/lio_ethdev.c +index ad4a51ecda..ac0472967c 100644 +--- a/dpdk/drivers/net/liquidio/lio_ethdev.c ++++ b/dpdk/drivers/net/liquidio/lio_ethdev.c +@@ -484,7 +484,7 @@ lio_dev_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu) + return -1; + } + +- if (frame_len > RTE_ETHER_MAX_LEN) ++ if (frame_len > LIO_ETH_MAX_LEN) + eth_dev->data->dev_conf.rxmode.offloads |= + DEV_RX_OFFLOAD_JUMBO_FRAME; + else +diff --git a/dpdk/drivers/net/liquidio/lio_ethdev.h b/dpdk/drivers/net/liquidio/lio_ethdev.h +index 74cd2fb6c6..d33be1c44d 100644 +--- a/dpdk/drivers/net/liquidio/lio_ethdev.h ++++ b/dpdk/drivers/net/liquidio/lio_ethdev.h +@@ -13,6 +13,9 @@ + #define LIO_LSC_TIMEOUT 100000 /* 100000us (100ms) */ + #define LIO_MAX_CMD_TIMEOUT 10000 /* 10000ms (10s) */ + ++/* The max frame size with default MTU */ ++#define LIO_ETH_MAX_LEN (RTE_ETHER_MTU + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN) ++ + #define LIO_DEV(_eth_dev) ((_eth_dev)->data->dev_private) + + /* LIO Response condition variable */ diff --git a/dpdk/drivers/net/memif/memif_socket.c b/dpdk/drivers/net/memif/memif_socket.c index ad5e30b96e..c1967c67bf 100644 --- a/dpdk/drivers/net/memif/memif_socket.c @@ -22201,10 +48038,54 @@ index ad5e30b96e..c1967c67bf 100644 memif_intr_handler, cc, diff --git a/dpdk/drivers/net/memif/rte_eth_memif.c b/dpdk/drivers/net/memif/rte_eth_memif.c -index 8dd1d0d63d..aa83df3652 100644 +index 8dd1d0d63d..aa75a04278 100644 --- a/dpdk/drivers/net/memif/rte_eth_memif.c +++ b/dpdk/drivers/net/memif/rte_eth_memif.c -@@ -1501,7 +1501,7 @@ memif_create(struct rte_vdev_device *vdev, enum memif_role_t role, +@@ -398,7 +398,11 @@ eth_memif_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) + + refill: + if (type == MEMIF_RING_M2S) { +- head = __atomic_load_n(&ring->head, __ATOMIC_ACQUIRE); ++ /* ring->head is updated by the receiver and this function ++ * is called in the context of receiver thread. The loads in ++ * the receiver do not need to synchronize with its own stores. ++ */ ++ head = __atomic_load_n(&ring->head, __ATOMIC_RELAXED); + n_slots = ring_size - head + mq->last_tail; + + while (n_slots--) { +@@ -561,14 +565,24 @@ eth_memif_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) + ring_size = 1 << mq->log2_ring_size; + mask = ring_size - 1; + +- n_free = __atomic_load_n(&ring->tail, __ATOMIC_ACQUIRE) - mq->last_tail; +- mq->last_tail += n_free; +- + if (type == MEMIF_RING_S2M) { +- slot = __atomic_load_n(&ring->head, __ATOMIC_ACQUIRE); +- n_free = ring_size - slot + mq->last_tail; ++ /* For S2M queues ring->head is updated by the sender and ++ * this function is called in the context of sending thread. ++ * The loads in the sender do not need to synchronize with ++ * its own stores. Hence, the following load can be a ++ * relaxed load. ++ */ ++ slot = __atomic_load_n(&ring->head, __ATOMIC_RELAXED); ++ n_free = ring_size - slot + ++ __atomic_load_n(&ring->tail, __ATOMIC_ACQUIRE); + } else { +- slot = __atomic_load_n(&ring->tail, __ATOMIC_ACQUIRE); ++ /* For M2S queues ring->tail is updated by the sender and ++ * this function is called in the context of sending thread. ++ * The loads in the sender do not need to synchronize with ++ * its own stores. Hence, the following load can be a ++ * relaxed load. ++ */ ++ slot = __atomic_load_n(&ring->tail, __ATOMIC_RELAXED); + n_free = __atomic_load_n(&ring->head, __ATOMIC_ACQUIRE) - slot; + } + +@@ -1501,7 +1515,7 @@ memif_create(struct rte_vdev_device *vdev, enum memif_role_t role, } @@ -22214,7 +48095,7 @@ index 8dd1d0d63d..aa83df3652 100644 rte_eth_dev_probing_finish(eth_dev); diff --git a/dpdk/drivers/net/mlx4/meson.build b/dpdk/drivers/net/mlx4/meson.build -index 9eb4988420..ff7386aee2 100644 +index 9eb4988420..a03ab930cc 100644 --- a/dpdk/drivers/net/mlx4/meson.build +++ b/dpdk/drivers/net/mlx4/meson.build @@ -9,11 +9,12 @@ if not is_linux @@ -22232,13 +48113,14 @@ index 9eb4988420..ff7386aee2 100644 dpdk_conf.set('RTE_IBVERBS_LINK_DLOPEN', 1) cflags += [ '-DMLX4_GLUE="@0@"'.format(LIB_GLUE), -@@ -24,12 +25,15 @@ endif +@@ -24,12 +25,16 @@ endif libnames = [ 'mlx4', 'ibverbs' ] libs = [] foreach libname:libnames - lib = dependency('lib' + libname, required:false) - if not lib.found() -+ lib = dependency('lib' + libname, static:static_ibverbs, required:false) ++ lib = dependency('lib' + libname, static:static_ibverbs, ++ required:false, method: 'pkg-config') + if not lib.found() and not static_ibverbs lib = cc.find_library(libname, required:false) endif @@ -22251,7 +48133,7 @@ index 9eb4988420..ff7386aee2 100644 else build = false reason = 'missing dependency, "' + libname + '"' -@@ -37,8 +41,17 @@ foreach libname:libnames +@@ -37,8 +42,17 @@ foreach libname:libnames endforeach if build @@ -22270,7 +48152,7 @@ index 9eb4988420..ff7386aee2 100644 sources = files( 'mlx4.c', 'mlx4_ethdev.c', -@@ -51,7 +64,7 @@ if build +@@ -51,7 +65,7 @@ if build 'mlx4_txq.c', 'mlx4_utils.c', ) @@ -22279,7 +48161,7 @@ index 9eb4988420..ff7386aee2 100644 sources += files('mlx4_glue.c') endif cflags_options = [ -@@ -103,7 +116,7 @@ if build +@@ -103,7 +117,7 @@ if build configure_file(output : 'mlx4_autoconf.h', configuration : config) endif # Build Glue Library @@ -22289,7 +48171,7 @@ index 9eb4988420..ff7386aee2 100644 dlopen_lib_name = driver_name_fmt.format(dlopen_name) dlopen_so_version = LIB_GLUE_VERSION diff --git a/dpdk/drivers/net/mlx4/mlx4.c b/dpdk/drivers/net/mlx4/mlx4.c -index ab5e6c66cb..7771417ebe 100644 +index ab5e6c66cb..4479022a42 100644 --- a/dpdk/drivers/net/mlx4/mlx4.c +++ b/dpdk/drivers/net/mlx4/mlx4.c @@ -49,6 +49,10 @@ @@ -22303,6 +48185,215 @@ index ab5e6c66cb..7771417ebe 100644 static const char *MZ_MLX4_PMD_SHARED_DATA = "mlx4_pmd_shared_data"; /* Shared memory between primary and secondary processes. */ +@@ -194,7 +198,7 @@ mlx4_free_verbs_buf(void *ptr, void *data __rte_unused) + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +-static int ++int + mlx4_proc_priv_init(struct rte_eth_dev *dev) + { + struct mlx4_proc_priv *ppriv; +@@ -206,13 +210,13 @@ mlx4_proc_priv_init(struct rte_eth_dev *dev) + */ + ppriv_size = sizeof(struct mlx4_proc_priv) + + dev->data->nb_tx_queues * sizeof(void *); +- ppriv = rte_malloc_socket("mlx4_proc_priv", ppriv_size, +- RTE_CACHE_LINE_SIZE, dev->device->numa_node); ++ ppriv = rte_zmalloc_socket("mlx4_proc_priv", ppriv_size, ++ RTE_CACHE_LINE_SIZE, dev->device->numa_node); + if (!ppriv) { + rte_errno = ENOMEM; + return -rte_errno; + } +- ppriv->uar_table_sz = ppriv_size; ++ ppriv->uar_table_sz = dev->data->nb_tx_queues; + dev->process_private = ppriv; + return 0; + } +@@ -223,7 +227,7 @@ mlx4_proc_priv_init(struct rte_eth_dev *dev) + * @param dev + * Pointer to Ethernet device structure. + */ +-static void ++void + mlx4_proc_priv_uninit(struct rte_eth_dev *dev) + { + if (!dev->process_private) +@@ -248,9 +252,6 @@ mlx4_dev_configure(struct rte_eth_dev *dev) + struct rte_flow_error error; + int ret; + +- if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) +- dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH; +- + /* Prepare internal flow rules. */ + ret = mlx4_flow_sync(priv, &error); + if (ret) { +@@ -461,6 +462,7 @@ mlx4_ibv_device_to_pci_addr(const struct ibv_device *device, + { + FILE *file; + char line[32]; ++ int rc = -ENOENT; + MKSTR(path, "%s/device/uevent", device->ibdev_path); + + file = fopen(path, "rb"); +@@ -470,16 +472,18 @@ mlx4_ibv_device_to_pci_addr(const struct ibv_device *device, + } + while (fgets(line, sizeof(line), file) == line) { + size_t len = strlen(line); +- int ret; + + /* Truncate long lines. */ +- if (len == (sizeof(line) - 1)) ++ if (len == (sizeof(line) - 1)) { + while (line[(len - 1)] != '\n') { +- ret = fgetc(file); ++ int ret = fgetc(file); + if (ret == EOF) +- break; ++ goto exit; + line[(len - 1)] = ret; + } ++ /* No match for long lines. */ ++ continue; ++ } + /* Extract information. */ + if (sscanf(line, + "PCI_SLOT_NAME=" +@@ -488,12 +492,15 @@ mlx4_ibv_device_to_pci_addr(const struct ibv_device *device, + &pci_addr->bus, + &pci_addr->devid, + &pci_addr->function) == 4) { +- ret = 0; ++ rc = 0; + break; + } + } ++exit: + fclose(file); +- return 0; ++ if (rc) ++ rte_errno = -rc; ++ return rc; + } + + /** +@@ -760,6 +767,7 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) + struct ibv_context *attr_ctx = NULL; + struct ibv_device_attr device_attr; + struct ibv_device_attr_ex device_attr_ex; ++ struct rte_eth_dev *prev_dev = NULL; + struct mlx4_conf conf = { + .ports.present = 0, + .mr_ext_memseg_en = 1, +@@ -874,7 +882,7 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) + ERROR("can not attach rte ethdev"); + rte_errno = ENOMEM; + err = rte_errno; +- goto error; ++ goto err_secondary; + } + priv = eth_dev->data->dev_private; + if (!priv->verbs_alloc_ctx.enabled) { +@@ -883,24 +891,24 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) + " from Verbs"); + rte_errno = ENOTSUP; + err = rte_errno; +- goto error; ++ goto err_secondary; + } + eth_dev->device = &pci_dev->device; + eth_dev->dev_ops = &mlx4_dev_sec_ops; + err = mlx4_proc_priv_init(eth_dev); + if (err) +- goto error; ++ goto err_secondary; + /* Receive command fd from primary process. */ + err = mlx4_mp_req_verbs_cmd_fd(eth_dev); + if (err < 0) { + err = rte_errno; +- goto error; ++ goto err_secondary; + } + /* Remap UAR for Tx queues. */ + err = mlx4_tx_uar_init_secondary(eth_dev, err); + if (err) { + err = rte_errno; +- goto error; ++ goto err_secondary; + } + /* + * Ethdev pointer is still required as input since +@@ -912,7 +920,14 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) + claim_zero(mlx4_glue->close_device(ctx)); + rte_eth_copy_pci_info(eth_dev, pci_dev); + rte_eth_dev_probing_finish(eth_dev); ++ prev_dev = eth_dev; + continue; ++err_secondary: ++ claim_zero(mlx4_glue->close_device(ctx)); ++ rte_eth_dev_release_port(eth_dev); ++ if (prev_dev) ++ rte_eth_dev_release_port(prev_dev); ++ break; + } + /* Check port status. */ + err = mlx4_glue->query_port(ctx, port, &port_attr); +@@ -1029,10 +1044,9 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) + eth_dev->device = &pci_dev->device; + rte_eth_copy_pci_info(eth_dev, pci_dev); + /* Initialize local interrupt handle for current port. */ +- priv->intr_handle = (struct rte_intr_handle){ +- .fd = -1, +- .type = RTE_INTR_HANDLE_EXT, +- }; ++ memset(&priv->intr_handle, 0, sizeof(struct rte_intr_handle)); ++ priv->intr_handle.fd = -1; ++ priv->intr_handle.type = RTE_INTR_HANDLE_EXT; + /* + * Override ethdev interrupt handle pointer with private + * handle instead of that of the parent PCI device used by +@@ -1088,6 +1102,7 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) + priv, mem_event_cb); + rte_rwlock_write_unlock(&mlx4_shared_data->mem_event_rwlock); + rte_eth_dev_probing_finish(eth_dev); ++ prev_dev = eth_dev; + continue; + port_error: + rte_free(priv); +@@ -1102,14 +1117,10 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) + eth_dev->data->mac_addrs = NULL; + rte_eth_dev_release_port(eth_dev); + } ++ if (prev_dev) ++ mlx4_dev_close(prev_dev); + break; + } +- /* +- * XXX if something went wrong in the loop above, there is a resource +- * leak (ctx, pd, priv, dpdk ethdev) but we can do nothing about it as +- * long as the dpdk does not provide a way to deallocate a ethdev and a +- * way to enumerate the registered ethdevs to free the previous ones. +- */ + error: + if (attr_ctx) + claim_zero(mlx4_glue->close_device(attr_ctx)); +diff --git a/dpdk/drivers/net/mlx4/mlx4.h b/dpdk/drivers/net/mlx4/mlx4.h +index c6cb29493e..87710d3996 100644 +--- a/dpdk/drivers/net/mlx4/mlx4.h ++++ b/dpdk/drivers/net/mlx4/mlx4.h +@@ -197,6 +197,10 @@ struct mlx4_priv { + #define PORT_ID(priv) ((priv)->dev_data->port_id) + #define ETH_DEV(priv) (&rte_eth_devices[PORT_ID(priv)]) + ++int mlx4_proc_priv_init(struct rte_eth_dev *dev); ++void mlx4_proc_priv_uninit(struct rte_eth_dev *dev); ++ ++ + /* mlx4_ethdev.c */ + + int mlx4_get_ifname(const struct mlx4_priv *priv, char (*ifname)[IF_NAMESIZE]); diff --git a/dpdk/drivers/net/mlx4/mlx4_flow.c b/dpdk/drivers/net/mlx4/mlx4_flow.c index 96479b83dd..2da4f6d965 100644 --- a/dpdk/drivers/net/mlx4/mlx4_flow.c @@ -22338,8 +48429,64 @@ index 668ca86700..5d9e985495 100644 +extern const struct mlx4_glue *mlx4_glue; #endif /* MLX4_GLUE_H_ */ +diff --git a/dpdk/drivers/net/mlx4/mlx4_mp.c b/dpdk/drivers/net/mlx4/mlx4_mp.c +index cdb648517a..4da743d9e3 100644 +--- a/dpdk/drivers/net/mlx4/mlx4_mp.c ++++ b/dpdk/drivers/net/mlx4/mlx4_mp.c +@@ -112,6 +112,9 @@ mp_secondary_handle(const struct rte_mp_msg *mp_msg, const void *peer) + const struct mlx4_mp_param *param = + (const struct mlx4_mp_param *)mp_msg->param; + struct rte_eth_dev *dev; ++#ifdef HAVE_IBV_MLX4_UAR_MMAP_OFFSET ++ struct mlx4_proc_priv *ppriv; ++#endif + int ret; + + assert(rte_eal_process_type() == RTE_PROC_SECONDARY); +@@ -127,6 +130,21 @@ mp_secondary_handle(const struct rte_mp_msg *mp_msg, const void *peer) + rte_mb(); + dev->tx_pkt_burst = mlx4_tx_burst; + dev->rx_pkt_burst = mlx4_rx_burst; ++#ifdef HAVE_IBV_MLX4_UAR_MMAP_OFFSET ++ ppriv = (struct mlx4_proc_priv *)dev->process_private; ++ if (ppriv->uar_table_sz != dev->data->nb_tx_queues) { ++ mlx4_tx_uar_uninit_secondary(dev); ++ mlx4_proc_priv_uninit(dev); ++ ret = mlx4_proc_priv_init(dev); ++ if (ret) ++ return -rte_errno; ++ ret = mlx4_tx_uar_init_secondary(dev, mp_msg->fds[0]); ++ if (ret) { ++ mlx4_proc_priv_uninit(dev); ++ return -rte_errno; ++ } ++ } ++#endif + mp_init_msg(dev, &mp_res, param->type); + res->result = 0; + ret = rte_mp_reply(&mp_res, peer); +@@ -164,6 +182,7 @@ mp_req_on_rxtx(struct rte_eth_dev *dev, enum mlx4_mp_req_type type) + struct rte_mp_reply mp_rep; + struct mlx4_mp_param *res __rte_unused; + struct timespec ts = {.tv_sec = MLX4_MP_REQ_TIMEOUT_SEC, .tv_nsec = 0}; ++ struct mlx4_priv *priv; + int ret; + int i; + +@@ -176,6 +195,11 @@ mp_req_on_rxtx(struct rte_eth_dev *dev, enum mlx4_mp_req_type type) + return; + } + mp_init_msg(dev, &mp_req, type); ++ if (type == MLX4_MP_REQ_START_RXTX) { ++ priv = dev->data->dev_private; ++ mp_req.num_fds = 1; ++ mp_req.fds[0] = priv->ctx->cmd_fd; ++ } + ret = rte_mp_request_sync(&mp_req, &mp_rep, &ts); + if (ret) { + if (rte_errno != ENOTSUP) diff --git a/dpdk/drivers/net/mlx4/mlx4_rxtx.h b/dpdk/drivers/net/mlx4/mlx4_rxtx.h -index 8baf33fa94..9de6c59411 100644 +index 8baf33fa94..136ca56ca4 100644 --- a/dpdk/drivers/net/mlx4/mlx4_rxtx.h +++ b/dpdk/drivers/net/mlx4/mlx4_rxtx.h @@ -124,7 +124,7 @@ struct txq { @@ -22351,6 +48498,60 @@ index 8baf33fa94..9de6c59411 100644 int mlx4_rss_init(struct mlx4_priv *priv); void mlx4_rss_deinit(struct mlx4_priv *priv); struct mlx4_rss *mlx4_rss_get(struct mlx4_priv *priv, uint64_t fields, +@@ -157,6 +157,7 @@ uint16_t mlx4_rx_burst_removed(void *dpdk_rxq, struct rte_mbuf **pkts, + /* mlx4_txq.c */ + + int mlx4_tx_uar_init_secondary(struct rte_eth_dev *dev, int fd); ++void mlx4_tx_uar_uninit_secondary(struct rte_eth_dev *dev); + uint64_t mlx4_get_tx_port_offloads(struct mlx4_priv *priv); + int mlx4_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, + uint16_t desc, unsigned int socket, +diff --git a/dpdk/drivers/net/mlx4/mlx4_txq.c b/dpdk/drivers/net/mlx4/mlx4_txq.c +index 01a5efd80d..824ddbd827 100644 +--- a/dpdk/drivers/net/mlx4/mlx4_txq.c ++++ b/dpdk/drivers/net/mlx4/mlx4_txq.c +@@ -158,6 +158,27 @@ mlx4_tx_uar_init_secondary(struct rte_eth_dev *dev, int fd) + } while (i--); + return -rte_errno; + } ++ ++void ++mlx4_tx_uar_uninit_secondary(struct rte_eth_dev *dev) ++{ ++ struct mlx4_proc_priv *ppriv = ++ (struct mlx4_proc_priv *)dev->process_private; ++ const size_t page_size = sysconf(_SC_PAGESIZE); ++ void *addr; ++ size_t i; ++ ++ if (page_size == (size_t)-1) { ++ ERROR("Failed to get mem page size"); ++ return; ++ } ++ for (i = 0; i < ppriv->uar_table_sz; i++) { ++ addr = ppriv->uar_table[i]; ++ if (addr) ++ munmap(RTE_PTR_ALIGN_FLOOR(addr, page_size), page_size); ++ } ++} ++ + #else + int + mlx4_tx_uar_init_secondary(struct rte_eth_dev *dev __rte_unused, +@@ -168,6 +189,13 @@ mlx4_tx_uar_init_secondary(struct rte_eth_dev *dev __rte_unused, + rte_errno = ENOTSUP; + return -rte_errno; + } ++ ++void ++mlx4_tx_uar_uninit_secondary(struct rte_eth_dev *dev __rte_unused) ++{ ++ assert(rte_eal_process_type() == RTE_PROC_SECONDARY); ++ ERROR("UAR remap is not supported"); ++} + #endif + + /** diff --git a/dpdk/drivers/net/mlx4/mlx4_utils.h b/dpdk/drivers/net/mlx4/mlx4_utils.h index 74b9d2ecdc..5718b9c742 100644 --- a/dpdk/drivers/net/mlx4/mlx4_utils.h @@ -22385,7 +48586,7 @@ index c5cf4397ac..605975c245 100644 HAVE_MLX5DV_DR_ACTION_DEST_DEVX_TIR \ infiniband/mlx5dv.h \ diff --git a/dpdk/drivers/net/mlx5/meson.build b/dpdk/drivers/net/mlx5/meson.build -index d6b32db794..a5775d18e3 100644 +index d6b32db794..139056cbe8 100644 --- a/dpdk/drivers/net/mlx5/meson.build +++ b/dpdk/drivers/net/mlx5/meson.build @@ -9,11 +9,12 @@ if not is_linux @@ -22403,13 +48604,14 @@ index d6b32db794..a5775d18e3 100644 dpdk_conf.set('RTE_IBVERBS_LINK_DLOPEN', 1) cflags += [ '-DMLX5_GLUE="@0@"'.format(LIB_GLUE), -@@ -24,12 +25,15 @@ endif +@@ -24,12 +25,16 @@ endif libnames = [ 'mlx5', 'ibverbs' ] libs = [] foreach libname:libnames - lib = dependency('lib' + libname, required:false) - if not lib.found() -+ lib = dependency('lib' + libname, static:static_ibverbs, required:false) ++ lib = dependency('lib' + libname, static:static_ibverbs, ++ required:false, method: 'pkg-config') + if not lib.found() and not static_ibverbs lib = cc.find_library(libname, required:false) endif @@ -22421,7 +48623,7 @@ index d6b32db794..a5775d18e3 100644 else build = false reason = 'missing dependency, "' + libname + '"' -@@ -37,9 +41,18 @@ foreach libname:libnames +@@ -37,9 +42,18 @@ foreach libname:libnames endforeach if build @@ -22441,7 +48643,7 @@ index d6b32db794..a5775d18e3 100644 sources = files( 'mlx5.c', 'mlx5_ethdev.c', -@@ -67,7 +80,7 @@ if build +@@ -67,7 +81,7 @@ if build or dpdk_conf.has('RTE_ARCH_PPC_64')) sources += files('mlx5_rxtx_vec.c') endif @@ -22450,7 +48652,7 @@ index d6b32db794..a5775d18e3 100644 sources += files('mlx5_glue.c') endif cflags_options = [ -@@ -130,6 +143,8 @@ if build +@@ -130,6 +144,8 @@ if build 'MLX5DV_FLOW_ACTION_COUNTERS_DEVX' ], [ 'HAVE_IBV_DEVX_ASYNC', 'infiniband/mlx5dv.h', 'mlx5dv_devx_obj_query_async' ], @@ -22459,7 +48661,7 @@ index d6b32db794..a5775d18e3 100644 [ 'HAVE_MLX5DV_DR_ACTION_DEST_DEVX_TIR', 'infiniband/mlx5dv.h', 'mlx5dv_dr_action_create_dest_devx_tir' ], [ 'HAVE_MLX5_DR_CREATE_ACTION_FLOW_METER', 'infiniband/mlx5dv.h', -@@ -200,7 +215,7 @@ if build +@@ -200,7 +216,7 @@ if build configure_file(output : 'mlx5_autoconf.h', configuration : config) endif # Build Glue Library @@ -22469,9 +48671,18 @@ index d6b32db794..a5775d18e3 100644 dlopen_lib_name = driver_name_fmt.format(dlopen_name) dlopen_so_version = LIB_GLUE_VERSION diff --git a/dpdk/drivers/net/mlx5/mlx5.c b/dpdk/drivers/net/mlx5/mlx5.c -index d84a6f91b4..8879df317d 100644 +index d84a6f91b4..f8de9e329e 100644 --- a/dpdk/drivers/net/mlx5/mlx5.c +++ b/dpdk/drivers/net/mlx5/mlx5.c +@@ -12,7 +12,7 @@ + #include + #include + #include +-#include ++#include + #include + + /* Verbs header. */ @@ -62,6 +62,9 @@ /* Device parameter to configure log 2 of the number of strides for MPRQ. */ #define MLX5_RX_MPRQ_LOG_STRIDE_NUM "mprq_log_stride_num" @@ -22526,7 +48737,101 @@ index d84a6f91b4..8879df317d 100644 rte_errno = ENOMEM; DRV_LOG(ERR, "no free id"); return -rte_errno; -@@ -590,7 +601,7 @@ mlx5_alloc_shared_ibctx(const struct mlx5_dev_spawn_data *spawn, +@@ -459,6 +470,85 @@ mlx5_restore_doorbell_mapping_env(int value) + setenv(MLX5_SHUT_UP_BF, value ? "1" : "0", 1); + } + ++/** ++ * Install shared asynchronous device events handler. ++ * This function is implemented to support event sharing ++ * between multiple ports of single IB device. ++ * ++ * @param sh ++ * Pointer to mlx5_ibv_shared object. ++ */ ++static void ++mlx5_dev_shared_handler_install(struct mlx5_ibv_shared *sh) ++{ ++ int ret; ++ int flags; ++ ++ sh->intr_handle.fd = -1; ++ flags = fcntl(sh->ctx->async_fd, F_GETFL); ++ ret = fcntl(sh->ctx->async_fd, F_SETFL, flags | O_NONBLOCK); ++ if (ret) { ++ DRV_LOG(INFO, "failed to change file descriptor async event" ++ " queue"); ++ } else { ++ sh->intr_handle.fd = sh->ctx->async_fd; ++ sh->intr_handle.type = RTE_INTR_HANDLE_EXT; ++ if (rte_intr_callback_register(&sh->intr_handle, ++ mlx5_dev_interrupt_handler, sh)) { ++ DRV_LOG(INFO, "Fail to install the shared interrupt."); ++ sh->intr_handle.fd = -1; ++ } ++ } ++ if (sh->devx) { ++#ifdef HAVE_IBV_DEVX_ASYNC ++ sh->intr_handle_devx.fd = -1; ++ sh->devx_comp = mlx5_glue->devx_create_cmd_comp(sh->ctx); ++ if (!sh->devx_comp) { ++ DRV_LOG(INFO, "failed to allocate devx_comp."); ++ return; ++ } ++ flags = fcntl(sh->devx_comp->fd, F_GETFL); ++ ret = fcntl(sh->devx_comp->fd, F_SETFL, flags | O_NONBLOCK); ++ if (ret) { ++ DRV_LOG(INFO, "failed to change file descriptor" ++ " devx comp"); ++ return; ++ } ++ sh->intr_handle_devx.fd = sh->devx_comp->fd; ++ sh->intr_handle_devx.type = RTE_INTR_HANDLE_EXT; ++ if (rte_intr_callback_register(&sh->intr_handle_devx, ++ mlx5_dev_interrupt_handler_devx, sh)) { ++ DRV_LOG(INFO, "Fail to install the devx shared" ++ " interrupt."); ++ sh->intr_handle_devx.fd = -1; ++ } ++#endif /* HAVE_IBV_DEVX_ASYNC */ ++ } ++} ++ ++/** ++ * Uninstall shared asynchronous device events handler. ++ * This function is implemented to support event sharing ++ * between multiple ports of single IB device. ++ * ++ * @param dev ++ * Pointer to mlx5_ibv_shared object. ++ */ ++static void ++mlx5_dev_shared_handler_uninstall(struct mlx5_ibv_shared *sh) ++{ ++ if (sh->intr_handle.fd >= 0) ++ mlx5_intr_callback_unregister(&sh->intr_handle, ++ mlx5_dev_interrupt_handler, sh); ++#ifdef HAVE_IBV_DEVX_ASYNC ++ if (sh->intr_handle_devx.fd >= 0) ++ rte_intr_callback_unregister(&sh->intr_handle_devx, ++ mlx5_dev_interrupt_handler_devx, sh); ++ if (sh->devx_comp) ++ mlx5_glue->devx_destroy_cmd_comp(sh->devx_comp); ++#endif ++} ++ + /** + * Allocate shared IB device context. If there is multiport device the + * master and representors will share this context, if there is single +@@ -553,7 +643,6 @@ mlx5_alloc_shared_ibctx(const struct mlx5_dev_spawn_data *spawn, + sizeof(sh->ibdev_name)); + strncpy(sh->ibdev_path, sh->ctx->device->ibdev_path, + sizeof(sh->ibdev_path)); +- pthread_mutex_init(&sh->intr_mutex, NULL); + /* + * Setting port_id to max unallowed value means + * there is no interrupt subhandler installed for +@@ -590,13 +679,19 @@ mlx5_alloc_shared_ibctx(const struct mlx5_dev_spawn_data *spawn, goto error; } } @@ -22535,7 +48840,27 @@ index d84a6f91b4..8879df317d 100644 if (!sh->flow_id_pool) { DRV_LOG(ERR, "can't create flow id pool"); err = ENOMEM; -@@ -673,12 +684,12 @@ mlx5_free_shared_ibctx(struct mlx5_ibv_shared *sh) + goto error; + } + #endif /* HAVE_IBV_FLOW_DV_SUPPORT */ ++#ifndef RTE_ARCH_64 ++ /* Initialize UAR access locks for 32bit implementations. */ ++ rte_spinlock_init(&sh->uar_lock_cq); ++ for (i = 0; i < MLX5_UAR_PAGE_NUM_MAX; i++) ++ rte_spinlock_init(&sh->uar_lock[i]); ++#endif + /* + * Once the device is added to the list of memory event + * callback, its global MR cache table cannot be expanded +@@ -613,6 +708,7 @@ mlx5_alloc_shared_ibctx(const struct mlx5_dev_spawn_data *spawn, + err = rte_errno; + goto error; + } ++ mlx5_dev_shared_handler_install(sh); + mlx5_flow_counters_mng_init(sh); + /* Add device to memory callback list. */ + rte_rwlock_write_lock(&mlx5_shared_data->mem_event_rwlock); +@@ -673,12 +769,12 @@ mlx5_free_shared_ibctx(struct mlx5_ibv_shared *sh) assert(rte_eal_process_type() == RTE_PROC_PRIMARY); if (--sh->refcnt) goto exit; @@ -22550,22 +48875,175 @@ index d84a6f91b4..8879df317d 100644 /* Remove context from the global device list. */ LIST_REMOVE(sh, next); /* -@@ -868,8 +879,13 @@ mlx5_alloc_shared_dr(struct mlx5_priv *priv) +@@ -686,20 +782,7 @@ mlx5_free_shared_ibctx(struct mlx5_ibv_shared *sh) + * Only primary process handles async device events. + **/ + mlx5_flow_counters_mng_close(sh); +- assert(!sh->intr_cnt); +- if (sh->intr_cnt) +- mlx5_intr_callback_unregister +- (&sh->intr_handle, mlx5_dev_interrupt_handler, sh); +-#ifdef HAVE_MLX5_DEVX_ASYNC_SUPPORT +- if (sh->devx_intr_cnt) { +- if (sh->intr_handle_devx.fd) +- rte_intr_callback_unregister(&sh->intr_handle_devx, +- mlx5_dev_interrupt_handler_devx, sh); +- if (sh->devx_comp) +- mlx5dv_devx_destroy_cmd_comp(sh->devx_comp); +- } +-#endif +- pthread_mutex_destroy(&sh->intr_mutex); ++ mlx5_dev_shared_handler_uninstall(sh); + if (sh->pd) + claim_zero(mlx5_glue->dealloc_pd(sh->pd)); + if (sh->tis) +@@ -789,7 +872,7 @@ mlx5_alloc_table_hash_list(struct mlx5_priv *priv) + snprintf(s, sizeof(s), "%s_flow_table", priv->sh->ibdev_name); + sh->flow_tbls = mlx5_hlist_create(s, MLX5_FLOW_TABLE_HLIST_ARRAY_SIZE); + if (!sh->flow_tbls) { +- DRV_LOG(ERR, "flow tables with hash creation failed.\n"); ++ DRV_LOG(ERR, "flow tables with hash creation failed."); + err = ENOMEM; + return err; + } +@@ -868,27 +951,25 @@ mlx5_alloc_shared_dr(struct mlx5_priv *priv) { struct mlx5_ibv_shared *sh = priv->sh; char s[MLX5_HLIST_NAMESIZE]; - int err = mlx5_alloc_table_hash_list(priv); -+ int err = 0; ++ int err; -+ if (!sh->flow_tbls) -+ err = mlx5_alloc_table_hash_list(priv); -+ else -+ DRV_LOG(DEBUG, "sh->flow_tbls[%p] already created, reuse\n", -+ (void *)sh->flow_tbls); ++ assert(sh && sh->refcnt); ++ if (sh->refcnt > 1) ++ return 0; ++ err = mlx5_alloc_table_hash_list(priv); if (err) return err; /* Create tags hash list table. */ -@@ -1490,6 +1506,8 @@ mlx5_args_check(const char *key, const char *val, void *opaque) + snprintf(s, sizeof(s), "%s_tags", sh->ibdev_name); + sh->tag_table = mlx5_hlist_create(s, MLX5_TAGS_HLIST_ARRAY_SIZE); + if (!sh->tag_table) { +- DRV_LOG(ERR, "tags with hash creation failed.\n"); ++ DRV_LOG(ERR, "tags with hash creation failed."); + err = ENOMEM; + goto error; + } + #ifdef HAVE_MLX5DV_DR + void *domain; + +- if (sh->dv_refcnt) { +- /* Shared DV/DR structures is already initialized. */ +- sh->dv_refcnt++; +- priv->dr_shared = 1; +- return 0; +- } + /* Reference counter is zero, we should initialize structures. */ + domain = mlx5_glue->dr_create_domain(sh->ctx, + MLX5DV_DR_DOMAIN_TYPE_NIC_RX); +@@ -922,8 +1003,6 @@ mlx5_alloc_shared_dr(struct mlx5_priv *priv) + #endif + sh->pop_vlan_action = mlx5_glue->dr_create_flow_action_pop_vlan(); + #endif /* HAVE_MLX5DV_DR */ +- sh->dv_refcnt++; +- priv->dr_shared = 1; + return 0; + error: + /* Rollback the created objects. */ +@@ -965,17 +1044,12 @@ mlx5_alloc_shared_dr(struct mlx5_priv *priv) + static void + mlx5_free_shared_dr(struct mlx5_priv *priv) + { +- struct mlx5_ibv_shared *sh; ++ struct mlx5_ibv_shared *sh = priv->sh; + +- if (!priv->dr_shared) ++ assert(sh && sh->refcnt); ++ if (sh->refcnt > 1) + return; +- priv->dr_shared = 0; +- sh = priv->sh; +- assert(sh); + #ifdef HAVE_MLX5DV_DR +- assert(sh->dv_refcnt); +- if (sh->dv_refcnt && --sh->dv_refcnt) +- return; + if (sh->rx_domain) { + mlx5_glue->dr_destroy_domain(sh->rx_domain); + sh->rx_domain = NULL; +@@ -1177,13 +1251,13 @@ mlx5_proc_priv_init(struct rte_eth_dev *dev) + */ + ppriv_size = + sizeof(struct mlx5_proc_priv) + priv->txqs_n * sizeof(void *); +- ppriv = rte_malloc_socket("mlx5_proc_priv", ppriv_size, ++ ppriv = rte_zmalloc_socket("mlx5_proc_priv", ppriv_size, + RTE_CACHE_LINE_SIZE, dev->device->numa_node); + if (!ppriv) { + rte_errno = ENOMEM; + return -rte_errno; + } +- ppriv->uar_table_sz = ppriv_size; ++ ppriv->uar_table_sz = priv->txqs_n; + dev->process_private = ppriv; + return 0; + } +@@ -1194,7 +1268,7 @@ mlx5_proc_priv_init(struct rte_eth_dev *dev) + * @param dev + * Pointer to Ethernet device structure. + */ +-static void ++void + mlx5_proc_priv_uninit(struct rte_eth_dev *dev) + { + if (!dev->process_private) +@@ -1218,12 +1292,20 @@ mlx5_dev_close(struct rte_eth_dev *dev) + unsigned int i; + int ret; + ++ if (rte_eal_process_type() == RTE_PROC_SECONDARY) { ++ /* Check if process_private released. */ ++ if (!dev->process_private) ++ return; ++ mlx5_tx_uar_uninit_secondary(dev); ++ mlx5_proc_priv_uninit(dev); ++ rte_eth_dev_release_port(dev); ++ return; ++ } ++ if (!priv->sh) ++ return; + DRV_LOG(DEBUG, "port %u closing device \"%s\"", + dev->data->port_id, + ((priv->sh->ctx != NULL) ? priv->sh->ctx->device->name : "")); +- /* In case mlx5_dev_stop() has not been called. */ +- mlx5_dev_interrupt_handler_uninstall(dev); +- mlx5_dev_interrupt_handler_devx_uninstall(dev); + mlx5_traffic_disable(dev); + mlx5_flow_flush(dev, NULL); + mlx5_flow_meter_flush(dev, NULL); +@@ -1266,16 +1348,13 @@ mlx5_dev_close(struct rte_eth_dev *dev) + close(priv->nl_socket_rdma); + if (priv->vmwa_context) + mlx5_vlan_vmwa_exit(priv->vmwa_context); +- if (priv->sh) { +- /* +- * Free the shared context in last turn, because the cleanup +- * routines above may use some shared fields, like +- * mlx5_nl_mac_addr_flush() uses ibdev_path for retrieveing +- * ifindex if Netlink fails. +- */ +- mlx5_free_shared_ibctx(priv->sh); +- priv->sh = NULL; +- } ++ /* ++ * Free the shared context in last turn, because the cleanup ++ * routines above may use some shared fields, like ++ * mlx5_nl_mac_addr_flush() uses ibdev_path for retrieveing ++ * ifindex if Netlink fails. ++ */ ++ mlx5_free_shared_ibctx(priv->sh); + ret = mlx5_hrxq_verify(dev); + if (ret) + DRV_LOG(WARNING, "port %u some hash Rx queue still remain", +@@ -1490,6 +1569,8 @@ mlx5_args_check(const char *key, const char *val, void *opaque) config->mprq.enabled = !!tmp; } else if (strcmp(MLX5_RX_MPRQ_LOG_STRIDE_NUM, key) == 0) { config->mprq.stride_num_n = tmp; @@ -22574,7 +49052,7 @@ index d84a6f91b4..8879df317d 100644 } else if (strcmp(MLX5_RX_MPRQ_MAX_MEMCPY_LEN, key) == 0) { config->mprq.max_memcpy_len = tmp; } else if (strcmp(MLX5_RXQS_MIN_MPRQ, key) == 0) { -@@ -1582,6 +1600,7 @@ mlx5_args(struct mlx5_dev_config *config, struct rte_devargs *devargs) +@@ -1582,6 +1663,7 @@ mlx5_args(struct mlx5_dev_config *config, struct rte_devargs *devargs) MLX5_RXQ_PKT_PAD_EN, MLX5_RX_MPRQ_EN, MLX5_RX_MPRQ_LOG_STRIDE_NUM, @@ -22582,7 +49060,7 @@ index d84a6f91b4..8879df317d 100644 MLX5_RX_MPRQ_MAX_MEMCPY_LEN, MLX5_RXQS_MIN_MPRQ, MLX5_TXQ_INLINE, -@@ -1697,7 +1716,7 @@ mlx5_init_once(void) +@@ -1697,7 +1779,7 @@ mlx5_init_once(void) * key is specified in devargs * - if DevX is enabled the inline mode is queried from the * device (HCA attributes and NIC vport context if needed). @@ -22591,7 +49069,7 @@ index d84a6f91b4..8879df317d 100644 * and none (0 bytes) for other NICs * * @param spawn -@@ -1931,9 +1950,9 @@ mlx5_get_dbr(struct rte_eth_dev *dev, struct mlx5_devx_dbr_page **dbr_page) +@@ -1931,12 +2013,12 @@ mlx5_get_dbr(struct rte_eth_dev *dev, struct mlx5_devx_dbr_page **dbr_page) i++) ; /* Empty. */ /* Find the first clear bit. */ @@ -22602,8 +49080,12 @@ index d84a6f91b4..8879df317d 100644 + page->dbr_bitmap[i] |= (UINT64_C(1) << j); page->dbr_count++; *dbr_page = page; - return (((i * 64) + j) * sizeof(uint64_t)); -@@ -1978,7 +1997,7 @@ mlx5_release_dbr(struct rte_eth_dev *dev, uint32_t umem_id, uint64_t offset) +- return (((i * 64) + j) * sizeof(uint64_t)); ++ return (i * CHAR_BIT * sizeof(uint64_t) + j) * MLX5_DBR_SIZE; + } + + /** +@@ -1978,7 +2060,7 @@ mlx5_release_dbr(struct rte_eth_dev *dev, uint32_t umem_id, uint64_t offset) int i = offset / 64; int j = offset % 64; @@ -22612,7 +49094,31 @@ index d84a6f91b4..8879df317d 100644 } return ret; } -@@ -2236,8 +2255,6 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev, +@@ -2145,11 +2227,11 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev, + /* Receive command fd from primary process */ + err = mlx5_mp_req_verbs_cmd_fd(eth_dev); + if (err < 0) +- return NULL; ++ goto err_secondary; + /* Remap UAR for Tx queues. */ + err = mlx5_tx_uar_init_secondary(eth_dev, err); + if (err) +- return NULL; ++ goto err_secondary; + /* + * Ethdev pointer is still required as input since + * the primary device is not accessible from the +@@ -2158,6 +2240,9 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev, + eth_dev->rx_pkt_burst = mlx5_select_rx_function(eth_dev); + eth_dev->tx_pkt_burst = mlx5_select_tx_function(eth_dev); + return eth_dev; ++err_secondary: ++ mlx5_dev_close(eth_dev); ++ return NULL; + } + /* + * Some parameters ("tx_db_nc" in particularly) are needed in +@@ -2236,8 +2321,6 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev, mprq_caps.min_single_wqe_log_num_of_strides; mprq_max_stride_num_n = mprq_caps.max_single_wqe_log_num_of_strides; @@ -22621,7 +49127,20 @@ index d84a6f91b4..8879df317d 100644 } #endif if (RTE_CACHE_LINE_SIZE == 128 && -@@ -2543,6 +2560,8 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev, +@@ -2307,12 +2390,6 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev, + priv->ibv_port = spawn->ibv_port; + priv->pci_dev = spawn->pci_dev; + priv->mtu = RTE_ETHER_MTU; +-#ifndef RTE_ARCH_64 +- /* Initialize UAR access locks for 32bit implementations. */ +- rte_spinlock_init(&priv->uar_lock_cq); +- for (i = 0; i < MLX5_UAR_PAGE_NUM_MAX; i++) +- rte_spinlock_init(&priv->uar_lock[i]); +-#endif + /* Some internal functions rely on Netlink sockets, open them now. */ + priv->nl_socket_rdma = mlx5_nl_init(NETLINK_RDMA); + priv->nl_socket_route = mlx5_nl_init(NETLINK_ROUTE); +@@ -2543,6 +2620,8 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev, priv->mtr_color_reg = ffs(reg_c_mask) - 1 + REG_C_0; priv->mtr_en = 1; @@ -22630,7 +49149,7 @@ index d84a6f91b4..8879df317d 100644 DRV_LOG(DEBUG, "The REG_C meter uses is %d", priv->mtr_color_reg); } -@@ -2550,17 +2569,32 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev, +@@ -2550,17 +2629,32 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev, #endif } if (config.mprq.enabled && mprq) { @@ -22667,7 +49186,7 @@ index d84a6f91b4..8879df317d 100644 config.mprq.min_stride_size_n = mprq_min_stride_size_n; config.mprq.max_stride_size_n = mprq_max_stride_size_n; } else if (config.mprq.enabled && !mprq) { -@@ -2675,7 +2709,12 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev, +@@ -2675,7 +2769,12 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev, err = mlx5_alloc_shared_dr(priv); if (err) goto error; @@ -22681,7 +49200,7 @@ index d84a6f91b4..8879df317d 100644 if (!priv->qrss_id_pool) { DRV_LOG(ERR, "can't create flow id pool"); err = ENOMEM; -@@ -3074,7 +3113,7 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, +@@ -3074,7 +3173,7 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, /* * Single IB device with multiple ports found, * it may be E-Switch master device and representors. @@ -22690,7 +49209,7 @@ index d84a6f91b4..8879df317d 100644 */ assert(nl_rdma >= 0); assert(ns == 0); -@@ -3274,7 +3313,8 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, +@@ -3274,7 +3373,8 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, .mr_ext_memseg_en = 1, .mprq = { .enabled = 0, /* Disabled by default. */ @@ -22700,10 +49219,64 @@ index d84a6f91b4..8879df317d 100644 .max_memcpy_len = MLX5_MPRQ_MEMCPY_DEFAULT_LEN, .min_rxqs_num = MLX5_MPRQ_MIN_RXQS, }, +@@ -3289,7 +3389,7 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, + case PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF: + case PCI_DEVICE_ID_MELLANOX_CONNECTX5BFVF: + case PCI_DEVICE_ID_MELLANOX_CONNECTX6VF: +- case PCI_DEVICE_ID_MELLANOX_CONNECTX6DXVF: ++ case PCI_DEVICE_ID_MELLANOX_CONNECTXVF: + dev_config.vf = 1; + break; + default: +@@ -3311,7 +3411,6 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, + rte_eth_copy_pci_info(list[i].eth_dev, pci_dev); + /* Restore non-PCI flags cleared by the above call. */ + list[i].eth_dev->data->dev_flags |= restore; +- mlx5_dev_interrupt_handler_devx_install(list[i].eth_dev); + rte_eth_dev_probing_finish(list[i].eth_dev); + } + if (i != ns) { +@@ -3405,8 +3504,16 @@ mlx5_pci_remove(struct rte_pci_device *pci_dev) + { + uint16_t port_id; + +- RTE_ETH_FOREACH_DEV_OF(port_id, &pci_dev->device) +- rte_eth_dev_close(port_id); ++ RTE_ETH_FOREACH_DEV_OF(port_id, &pci_dev->device) { ++ /* ++ * mlx5_dev_close() is not registered to secondary process, ++ * call the close function explicitly for secondary process. ++ */ ++ if (rte_eal_process_type() == RTE_PROC_SECONDARY) ++ mlx5_dev_close(&rte_eth_devices[port_id]); ++ else ++ rte_eth_dev_close(port_id); ++ } + return 0; + } + +@@ -3465,7 +3572,7 @@ static const struct rte_pci_id mlx5_pci_id_map[] = { + }, + { + RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, +- PCI_DEVICE_ID_MELLANOX_CONNECTX6DXVF) ++ PCI_DEVICE_ID_MELLANOX_CONNECTXVF) + }, + { + .vendor_id = 0 diff --git a/dpdk/drivers/net/mlx5/mlx5.h b/dpdk/drivers/net/mlx5/mlx5.h -index 0c3a90e1bf..e4af5d40db 100644 +index 0c3a90e1bf..0e4a5f870d 100644 --- a/dpdk/drivers/net/mlx5/mlx5.h +++ b/dpdk/drivers/net/mlx5/mlx5.h +@@ -57,7 +57,7 @@ enum { + PCI_DEVICE_ID_MELLANOX_CONNECTX6 = 0x101b, + PCI_DEVICE_ID_MELLANOX_CONNECTX6VF = 0x101c, + PCI_DEVICE_ID_MELLANOX_CONNECTX6DX = 0x101d, +- PCI_DEVICE_ID_MELLANOX_CONNECTX6DXVF = 0x101e, ++ PCI_DEVICE_ID_MELLANOX_CONNECTXVF = 0x101e, + }; + + /* Request types for IPC. */ @@ -148,12 +148,15 @@ struct mlx5_xstats_ctrl { /* Index in the device counters table. */ uint16_t dev_table_idx[MLX5_MAX_XSTATS]; @@ -22746,7 +49319,55 @@ index 0c3a90e1bf..e4af5d40db 100644 struct mlx5_rx_hash_field_select rx_hash_field_selector_outer; struct mlx5_rx_hash_field_select rx_hash_field_selector_inner; }; -@@ -626,6 +632,7 @@ struct mlx5_flow_id_pool { +@@ -480,7 +486,8 @@ struct mlx5_flow_counter { + uint32_t shared:1; /**< Share counter ID with other flow rules. */ + uint32_t batch: 1; + /**< Whether the counter was allocated by batch command. */ +- uint32_t ref_cnt:30; /**< Reference counter. */ ++ uint32_t ref_cnt:29; /**< Reference counter. */ ++ uint32_t skipped:1; /* This counter is skipped or not. */ + uint32_t id; /**< Counter ID. */ + union { /**< Holds the counters for the rule. */ + #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) +@@ -512,6 +519,7 @@ struct mlx5_flow_counter_pool { + /* The devx object of the minimum counter ID. */ + rte_atomic64_t query_gen; + uint32_t n_counters: 16; /* Number of devx allocated counters. */ ++ uint32_t skip_cnt:1; /* Pool contains skipped counter. */ + rte_spinlock_t sl; /* The pool lock. */ + struct mlx5_counter_stats_raw *raw; + struct mlx5_counter_stats_raw *raw_hw; /* The raw on HW working. */ +@@ -594,20 +602,22 @@ struct mlx5_flow_tbl_resource { + }; + + #define MLX5_MAX_TABLES UINT16_MAX +-#define MLX5_FLOW_TABLE_LEVEL_METER (UINT16_MAX - 3) +-#define MLX5_FLOW_TABLE_LEVEL_SUFFIX (UINT16_MAX - 2) + #define MLX5_HAIRPIN_TX_TABLE (UINT16_MAX - 1) + /* Reserve the last two tables for metadata register copy. */ + #define MLX5_FLOW_MREG_ACT_TABLE_GROUP (MLX5_MAX_TABLES - 1) + #define MLX5_FLOW_MREG_CP_TABLE_GROUP (MLX5_MAX_TABLES - 2) + /* Tables for metering splits should be added here. */ +-#define MLX5_MAX_TABLES_EXTERNAL (MLX5_MAX_TABLES - 3) ++#define MLX5_FLOW_TABLE_LEVEL_SUFFIX (MLX5_MAX_TABLES - 3) ++#define MLX5_FLOW_TABLE_LEVEL_METER (MLX5_MAX_TABLES - 4) ++#define MLX5_MAX_TABLES_EXTERNAL MLX5_FLOW_TABLE_LEVEL_METER + #define MLX5_MAX_TABLES_FDB UINT16_MAX + +-#define MLX5_DBR_PAGE_SIZE 4096 /* Must be >= 512. */ +-#define MLX5_DBR_SIZE 8 +-#define MLX5_DBR_PER_PAGE (MLX5_DBR_PAGE_SIZE / MLX5_DBR_SIZE) +-#define MLX5_DBR_BITMAP_SIZE (MLX5_DBR_PER_PAGE / 64) ++#define MLX5_DBR_SIZE RTE_CACHE_LINE_SIZE ++#define MLX5_DBR_PER_PAGE 64 ++/* Must be >= CHAR_BIT * sizeof(uint64_t) */ ++#define MLX5_DBR_PAGE_SIZE (MLX5_DBR_PER_PAGE * MLX5_DBR_SIZE) ++/* Page size must be >= 512. */ ++#define MLX5_DBR_BITMAP_SIZE (MLX5_DBR_PER_PAGE / (CHAR_BIT * sizeof(uint64_t))) + + struct mlx5_devx_dbr_page { + /* Door-bell records, must be first member in structure. */ +@@ -626,6 +636,7 @@ struct mlx5_flow_id_pool { /**< The next index that can be used without any free elements. */ uint32_t *curr; /**< Pointer to the index to pop. */ uint32_t *last; /**< Pointer to the last element in the empty arrray. */ @@ -22754,9 +49375,11 @@ index 0c3a90e1bf..e4af5d40db 100644 }; /* -@@ -660,14 +667,8 @@ struct mlx5_ibv_shared { +@@ -658,16 +669,14 @@ struct mlx5_ibv_shared { + uint32_t dv_meta_mask; /* flow META metadata supported mask. */ + uint32_t dv_mark_mask; /* flow MARK metadata supported mask. */ uint32_t dv_regc0_mask; /* available bits of metatada reg_c[0]. */ - uint32_t dv_refcnt; /* DV/DR data reference counter. */ +- uint32_t dv_refcnt; /* DV/DR data reference counter. */ void *fdb_domain; /* FDB Direct Rules name space handle. */ - struct mlx5_flow_tbl_resource *fdb_mtr_sfx_tbl; - /* FDB meter suffix rules table. */ @@ -22766,26 +49389,80 @@ index 0c3a90e1bf..e4af5d40db 100644 void *tx_domain; /* TX Direct Rules name space handle. */ - struct mlx5_flow_tbl_resource *tx_mtr_sfx_tbl; - /* TX meter suffix rules table. */ ++#ifndef RTE_ARCH_64 ++ rte_spinlock_t uar_lock_cq; /* CQs share a common distinct UAR */ ++ rte_spinlock_t uar_lock[MLX5_UAR_PAGE_NUM_MAX]; ++ /* UAR same-page access control required in 32bit implementations. */ ++#endif struct mlx5_hlist *flow_tbls; /* Direct Rules tables for FDB, NIC TX+RX */ void *esw_drop_action; /* Pointer to DR E-Switch drop action. */ -@@ -727,6 +728,7 @@ struct mlx5_priv { - unsigned int dr_shared:1; /* DV/DR data is shared. */ +@@ -681,10 +690,7 @@ struct mlx5_ibv_shared { + push_vlan_action_list; /* List of push VLAN actions. */ + struct mlx5_flow_counter_mng cmng; /* Counters management structure. */ + /* Shared interrupt handler section. */ +- pthread_mutex_t intr_mutex; /* Interrupt config mutex. */ +- uint32_t intr_cnt; /* Interrupt handler reference counter. */ + struct rte_intr_handle intr_handle; /* Interrupt handler for device. */ +- uint32_t devx_intr_cnt; /* Devx interrupt handler reference counter. */ + struct rte_intr_handle intr_handle_devx; /* DEVX interrupt handler. */ + struct mlx5dv_devx_cmd_comp *devx_comp; /* DEVX async comp obj. */ + struct mlx5_devx_obj *tis; /* TIS object. */ +@@ -693,7 +699,10 @@ struct mlx5_ibv_shared { + struct mlx5_ibv_shared_port port[]; /* per device port data array. */ + }; + +-/* Per-process private structure. */ ++/* ++ * Per-process private structure. ++ * Caution, secondary process may rebuild the struct during port start. ++ */ + struct mlx5_proc_priv { + size_t uar_table_sz; + /* Size of UAR register table. */ +@@ -724,9 +733,9 @@ struct mlx5_priv { + unsigned int isolated:1; /* Whether isolated mode is enabled. */ + unsigned int representor:1; /* Device is a port representor. */ + unsigned int master:1; /* Device is a E-Switch master. */ +- unsigned int dr_shared:1; /* DV/DR data is shared. */ unsigned int counter_fallback:1; /* Use counter fallback management. */ unsigned int mtr_en:1; /* Whether support meter. */ + unsigned int mtr_reg_share:1; /* Whether support meter REG_C share. */ uint16_t domain_id; /* Switch domain identifier. */ uint16_t vport_id; /* Associated VF vport index (if any). */ uint32_t vport_meta_tag; /* Used for vport index match ove VF LAG. */ -@@ -784,6 +786,7 @@ struct mlx5_priv { - /* UAR same-page access control required in 32bit implementations. */ - #endif +@@ -778,12 +787,8 @@ struct mlx5_priv { + uint8_t mtr_color_reg; /* Meter color match REG_C. */ + struct mlx5_mtr_profiles flow_meter_profiles; /* MTR profile list. */ + struct mlx5_flow_meters flow_meters; /* MTR list. */ +-#ifndef RTE_ARCH_64 +- rte_spinlock_t uar_lock_cq; /* CQs share a common distinct UAR */ +- rte_spinlock_t uar_lock[MLX5_UAR_PAGE_NUM_MAX]; +- /* UAR same-page access control required in 32bit implementations. */ +-#endif uint8_t skip_default_rss_reta; /* Skip configuration of default reta. */ + uint8_t fdb_def_rule; /* Whether fdb jump to table 1 is configured. */ }; #define PORT_ID(priv) ((priv)->dev_data->port_id) -@@ -972,6 +975,7 @@ struct mlx5_flow_counter *mlx5_counter_alloc(struct rte_eth_dev *dev); +@@ -797,6 +802,7 @@ int64_t mlx5_get_dbr(struct rte_eth_dev *dev, + struct mlx5_devx_dbr_page **dbr_page); + int32_t mlx5_release_dbr(struct rte_eth_dev *dev, uint32_t umem_id, + uint64_t offset); ++void mlx5_proc_priv_uninit(struct rte_eth_dev *dev); + int mlx5_udp_tunnel_port_add(struct rte_eth_dev *dev, + struct rte_eth_udp_tunnel *udp_tunnel); + uint16_t mlx5_eth_find_next(uint16_t port_id, struct rte_pci_device *pci_dev); +@@ -835,8 +841,6 @@ void mlx5_dev_interrupt_handler(void *arg); + void mlx5_dev_interrupt_handler_devx(void *arg); + void mlx5_dev_interrupt_handler_uninstall(struct rte_eth_dev *dev); + void mlx5_dev_interrupt_handler_install(struct rte_eth_dev *dev); +-void mlx5_dev_interrupt_handler_devx_uninstall(struct rte_eth_dev *dev); +-void mlx5_dev_interrupt_handler_devx_install(struct rte_eth_dev *dev); + int mlx5_set_link_down(struct rte_eth_dev *dev); + int mlx5_set_link_up(struct rte_eth_dev *dev); + int mlx5_is_removed(struct rte_eth_dev *dev); +@@ -972,6 +976,7 @@ struct mlx5_flow_counter *mlx5_counter_alloc(struct rte_eth_dev *dev); void mlx5_counter_free(struct rte_eth_dev *dev, struct mlx5_flow_counter *cnt); int mlx5_counter_query(struct rte_eth_dev *dev, struct mlx5_flow_counter *cnt, bool clear, uint64_t *pkts, uint64_t *bytes); @@ -22794,9 +49471,18 @@ index 0c3a90e1bf..e4af5d40db 100644 /* mlx5_mp.c */ void mlx5_mp_req_start_rxtx(struct rte_eth_dev *dev); diff --git a/dpdk/drivers/net/mlx5/mlx5_defs.h b/dpdk/drivers/net/mlx5/mlx5_defs.h -index 042e1f31ee..418e744d65 100644 +index 042e1f31ee..2836099b75 100644 --- a/dpdk/drivers/net/mlx5/mlx5_defs.h +++ b/dpdk/drivers/net/mlx5/mlx5_defs.h +@@ -61,7 +61,7 @@ + + /* Switch port ID parameters for bonding configurations. */ + #define MLX5_PORT_ID_BONDING_PF_MASK 0xf +-#define MLX5_PORT_ID_BONDING_PF_SHIFT 0xf ++#define MLX5_PORT_ID_BONDING_PF_SHIFT 12 + + /* Alarm timeout. */ + #define MLX5_ALARM_TIMEOUT_US 100000 @@ -146,6 +146,9 @@ /* Log 2 of the default number of strides per WQE for Multi-Packet RQ. */ #define MLX5_MPRQ_STRIDE_NUM_N 6U @@ -22819,7 +49505,7 @@ index 042e1f31ee..418e744d65 100644 #ifndef HAVE_STATIC_ASSERT #define static_assert _Static_assert diff --git a/dpdk/drivers/net/mlx5/mlx5_devx_cmds.c b/dpdk/drivers/net/mlx5/mlx5_devx_cmds.c -index 9893287ba8..e223ee9b18 100644 +index 9893287ba8..f9c4043c11 100644 --- a/dpdk/drivers/net/mlx5/mlx5_devx_cmds.c +++ b/dpdk/drivers/net/mlx5/mlx5_devx_cmds.c @@ -362,6 +362,8 @@ mlx5_devx_cmd_query_hca_attr(struct ibv_context *ctx, @@ -22855,11 +49541,30 @@ index 9893287ba8..e223ee9b18 100644 outer = MLX5_ADDR_OF(tirc, tir_ctx, rx_hash_field_selector_outer); MLX5_SET(rx_hash_field_select, outer, l3_prot_type, tir_attr->rx_hash_field_selector_outer.l3_prot_type); +@@ -779,7 +778,7 @@ mlx5_devx_cmd_create_sq(struct ibv_context *ctx, + MLX5_SET(sqc, sq_ctx, fre, sq_attr->fre); + MLX5_SET(sqc, sq_ctx, flush_in_error_en, sq_attr->flush_in_error_en); + MLX5_SET(sqc, sq_ctx, allow_multi_pkt_send_wqe, +- sq_attr->flush_in_error_en); ++ sq_attr->allow_multi_pkt_send_wqe); + MLX5_SET(sqc, sq_ctx, min_wqe_inline_mode, + sq_attr->min_wqe_inline_mode); + MLX5_SET(sqc, sq_ctx, state, sq_attr->state); diff --git a/dpdk/drivers/net/mlx5/mlx5_ethdev.c b/dpdk/drivers/net/mlx5/mlx5_ethdev.c -index d80ae458bc..3b4c5dbe7a 100644 +index d80ae458bc..efcc69ca44 100644 --- a/dpdk/drivers/net/mlx5/mlx5_ethdev.c +++ b/dpdk/drivers/net/mlx5/mlx5_ethdev.c -@@ -476,7 +476,7 @@ mlx5_dev_configure_rss_reta(struct rte_eth_dev *dev) +@@ -405,9 +405,6 @@ mlx5_dev_configure(struct rte_eth_dev *dev) + return -rte_errno; + } + +- if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) +- dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH; +- + memcpy(priv->rss_conf.rss_key, + use_app_rss_key ? + dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key : +@@ -476,7 +473,7 @@ mlx5_dev_configure_rss_reta(struct rte_eth_dev *dev) rxq_data = (*priv->rxqs)[i]; rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq); @@ -22868,8 +49573,349 @@ index d80ae458bc..3b4c5dbe7a 100644 rss_queue_arr[j++] = i; } rss_queue_n = j; +@@ -649,14 +646,22 @@ mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info) + * representors (more than 4K) or PFs (more than 15) + * this approach must be reconsidered. + */ +- if ((info->switch_info.port_id >> +- MLX5_PORT_ID_BONDING_PF_SHIFT) || ++ /* Switch port ID for VF representors: 0 - 0xFFE */ ++ if ((info->switch_info.port_id != 0xffff && ++ info->switch_info.port_id >= ++ ((1 << MLX5_PORT_ID_BONDING_PF_SHIFT) - 1)) || + priv->pf_bond > MLX5_PORT_ID_BONDING_PF_MASK) { + DRV_LOG(ERR, "can't update switch port ID" + " for bonding device"); + assert(false); + return -ENODEV; + } ++ /* ++ * Switch port ID for Host PF representor ++ * (representor_id is -1) , set to 0xFFF ++ */ ++ if (info->switch_info.port_id == 0xffff) ++ info->switch_info.port_id = 0xfff; + info->switch_info.port_id |= + priv->pf_bond << MLX5_PORT_ID_BONDING_PF_SHIFT; + } +@@ -1226,6 +1231,7 @@ mlx5_dev_to_pci_addr(const char *dev_path, + { + FILE *file; + char line[32]; ++ int rc = -ENOENT; + MKSTR(path, "%s/device/uevent", dev_path); + + file = fopen(path, "rb"); +@@ -1235,16 +1241,18 @@ mlx5_dev_to_pci_addr(const char *dev_path, + } + while (fgets(line, sizeof(line), file) == line) { + size_t len = strlen(line); +- int ret; + + /* Truncate long lines. */ +- if (len == (sizeof(line) - 1)) ++ if (len == (sizeof(line) - 1)) { + while (line[(len - 1)] != '\n') { +- ret = fgetc(file); ++ int ret = fgetc(file); + if (ret == EOF) +- break; ++ goto exit; + line[(len - 1)] = ret; + } ++ /* No match for long lines. */ ++ continue; ++ } + /* Extract information. */ + if (sscanf(line, + "PCI_SLOT_NAME=" +@@ -1253,12 +1261,15 @@ mlx5_dev_to_pci_addr(const char *dev_path, + &pci_addr->bus, + &pci_addr->devid, + &pci_addr->function) == 4) { +- ret = 0; ++ rc = 0; + break; + } + } ++exit: + fclose(file); +- return 0; ++ if (rc) ++ rte_errno = -rc; ++ return rc; + } + + /** +@@ -1475,249 +1486,6 @@ mlx5_dev_interrupt_handler_devx(void *cb_arg) + #endif /* HAVE_IBV_DEVX_ASYNC */ + } + +-/** +- * Uninstall shared asynchronous device events handler. +- * This function is implemented to support event sharing +- * between multiple ports of single IB device. +- * +- * @param dev +- * Pointer to Ethernet device. +- */ +-static void +-mlx5_dev_shared_handler_uninstall(struct rte_eth_dev *dev) +-{ +- struct mlx5_priv *priv = dev->data->dev_private; +- struct mlx5_ibv_shared *sh = priv->sh; +- +- if (rte_eal_process_type() != RTE_PROC_PRIMARY) +- return; +- pthread_mutex_lock(&sh->intr_mutex); +- assert(priv->ibv_port); +- assert(priv->ibv_port <= sh->max_port); +- assert(dev->data->port_id < RTE_MAX_ETHPORTS); +- if (sh->port[priv->ibv_port - 1].ih_port_id >= RTE_MAX_ETHPORTS) +- goto exit; +- assert(sh->port[priv->ibv_port - 1].ih_port_id == +- (uint32_t)dev->data->port_id); +- assert(sh->intr_cnt); +- sh->port[priv->ibv_port - 1].ih_port_id = RTE_MAX_ETHPORTS; +- if (!sh->intr_cnt || --sh->intr_cnt) +- goto exit; +- mlx5_intr_callback_unregister(&sh->intr_handle, +- mlx5_dev_interrupt_handler, sh); +- sh->intr_handle.fd = 0; +- sh->intr_handle.type = RTE_INTR_HANDLE_UNKNOWN; +-exit: +- pthread_mutex_unlock(&sh->intr_mutex); +-} +- +-/** +- * Uninstall devx shared asynchronous device events handler. +- * This function is implemeted to support event sharing +- * between multiple ports of single IB device. +- * +- * @param dev +- * Pointer to Ethernet device. +- */ +-static void +-mlx5_dev_shared_handler_devx_uninstall(struct rte_eth_dev *dev) +-{ +- struct mlx5_priv *priv = dev->data->dev_private; +- struct mlx5_ibv_shared *sh = priv->sh; +- +- if (rte_eal_process_type() != RTE_PROC_PRIMARY) +- return; +- pthread_mutex_lock(&sh->intr_mutex); +- assert(priv->ibv_port); +- assert(priv->ibv_port <= sh->max_port); +- assert(dev->data->port_id < RTE_MAX_ETHPORTS); +- if (sh->port[priv->ibv_port - 1].devx_ih_port_id >= RTE_MAX_ETHPORTS) +- goto exit; +- assert(sh->port[priv->ibv_port - 1].devx_ih_port_id == +- (uint32_t)dev->data->port_id); +- sh->port[priv->ibv_port - 1].devx_ih_port_id = RTE_MAX_ETHPORTS; +- if (!sh->devx_intr_cnt || --sh->devx_intr_cnt) +- goto exit; +- if (sh->intr_handle_devx.fd) { +- rte_intr_callback_unregister(&sh->intr_handle_devx, +- mlx5_dev_interrupt_handler_devx, +- sh); +- sh->intr_handle_devx.fd = 0; +- sh->intr_handle_devx.type = RTE_INTR_HANDLE_UNKNOWN; +- } +- if (sh->devx_comp) { +- mlx5_glue->devx_destroy_cmd_comp(sh->devx_comp); +- sh->devx_comp = NULL; +- } +-exit: +- pthread_mutex_unlock(&sh->intr_mutex); +-} +- +-/** +- * Install shared asynchronous device events handler. +- * This function is implemented to support event sharing +- * between multiple ports of single IB device. +- * +- * @param dev +- * Pointer to Ethernet device. +- */ +-static void +-mlx5_dev_shared_handler_install(struct rte_eth_dev *dev) +-{ +- struct mlx5_priv *priv = dev->data->dev_private; +- struct mlx5_ibv_shared *sh = priv->sh; +- int ret; +- int flags; +- +- if (rte_eal_process_type() != RTE_PROC_PRIMARY) +- return; +- pthread_mutex_lock(&sh->intr_mutex); +- assert(priv->ibv_port); +- assert(priv->ibv_port <= sh->max_port); +- assert(dev->data->port_id < RTE_MAX_ETHPORTS); +- if (sh->port[priv->ibv_port - 1].ih_port_id < RTE_MAX_ETHPORTS) { +- /* The handler is already installed for this port. */ +- assert(sh->intr_cnt); +- goto exit; +- } +- if (sh->intr_cnt) { +- sh->port[priv->ibv_port - 1].ih_port_id = +- (uint32_t)dev->data->port_id; +- sh->intr_cnt++; +- goto exit; +- } +- /* No shared handler installed. */ +- assert(sh->ctx->async_fd > 0); +- flags = fcntl(sh->ctx->async_fd, F_GETFL); +- ret = fcntl(sh->ctx->async_fd, F_SETFL, flags | O_NONBLOCK); +- if (ret) { +- DRV_LOG(INFO, "failed to change file descriptor async event" +- " queue"); +- /* Indicate there will be no interrupts. */ +- dev->data->dev_conf.intr_conf.lsc = 0; +- dev->data->dev_conf.intr_conf.rmv = 0; +- } else { +- sh->intr_handle.fd = sh->ctx->async_fd; +- sh->intr_handle.type = RTE_INTR_HANDLE_EXT; +- rte_intr_callback_register(&sh->intr_handle, +- mlx5_dev_interrupt_handler, sh); +- sh->intr_cnt++; +- sh->port[priv->ibv_port - 1].ih_port_id = +- (uint32_t)dev->data->port_id; +- } +-exit: +- pthread_mutex_unlock(&sh->intr_mutex); +-} +- +-/** +- * Install devx shared asyncronous device events handler. +- * This function is implemeted to support event sharing +- * between multiple ports of single IB device. +- * +- * @param dev +- * Pointer to Ethernet device. +- */ +-static void +-mlx5_dev_shared_handler_devx_install(struct rte_eth_dev *dev) +-{ +- struct mlx5_priv *priv = dev->data->dev_private; +- struct mlx5_ibv_shared *sh = priv->sh; +- +- if (rte_eal_process_type() != RTE_PROC_PRIMARY) +- return; +- pthread_mutex_lock(&sh->intr_mutex); +- assert(priv->ibv_port); +- assert(priv->ibv_port <= sh->max_port); +- assert(dev->data->port_id < RTE_MAX_ETHPORTS); +- if (sh->port[priv->ibv_port - 1].devx_ih_port_id < RTE_MAX_ETHPORTS) { +- /* The handler is already installed for this port. */ +- assert(sh->devx_intr_cnt); +- goto exit; +- } +- if (sh->devx_intr_cnt) { +- sh->devx_intr_cnt++; +- sh->port[priv->ibv_port - 1].devx_ih_port_id = +- (uint32_t)dev->data->port_id; +- goto exit; +- } +- if (priv->config.devx) { +-#ifndef HAVE_IBV_DEVX_ASYNC +- goto exit; +-#else +- sh->devx_comp = mlx5_glue->devx_create_cmd_comp(sh->ctx); +- if (sh->devx_comp) { +- int flags = fcntl(sh->devx_comp->fd, F_GETFL); +- int ret = fcntl(sh->devx_comp->fd, F_SETFL, +- flags | O_NONBLOCK); +- +- if (ret) { +- DRV_LOG(INFO, "failed to change file descriptor" +- " devx async event queue"); +- } else { +- sh->intr_handle_devx.fd = sh->devx_comp->fd; +- sh->intr_handle_devx.type = RTE_INTR_HANDLE_EXT; +- rte_intr_callback_register +- (&sh->intr_handle_devx, +- mlx5_dev_interrupt_handler_devx, sh); +- sh->devx_intr_cnt++; +- sh->port[priv->ibv_port - 1].devx_ih_port_id = +- (uint32_t)dev->data->port_id; +- } +- } +-#endif /* HAVE_IBV_DEVX_ASYNC */ +- } +-exit: +- pthread_mutex_unlock(&sh->intr_mutex); +-} +- +-/** +- * Uninstall interrupt handler. +- * +- * @param dev +- * Pointer to Ethernet device. +- */ +-void +-mlx5_dev_interrupt_handler_uninstall(struct rte_eth_dev *dev) +-{ +- mlx5_dev_shared_handler_uninstall(dev); +-} +- +-/** +- * Install interrupt handler. +- * +- * @param dev +- * Pointer to Ethernet device. +- */ +-void +-mlx5_dev_interrupt_handler_install(struct rte_eth_dev *dev) +-{ +- mlx5_dev_shared_handler_install(dev); +-} +- +-/** +- * Devx uninstall interrupt handler. +- * +- * @param dev +- * Pointer to Ethernet device. +- */ +-void +-mlx5_dev_interrupt_handler_devx_uninstall(struct rte_eth_dev *dev) +-{ +- mlx5_dev_shared_handler_devx_uninstall(dev); +-} +- +-/** +- * Devx install interrupt handler. +- * +- * @param dev +- * Pointer to Ethernet device. +- */ +-void +-mlx5_dev_interrupt_handler_devx_install(struct rte_eth_dev *dev) +-{ +- mlx5_dev_shared_handler_devx_install(dev); +-} +- + /** + * DPDK callback to bring the link DOWN. + * +@@ -2189,12 +1957,13 @@ int mlx5_get_module_eeprom(struct rte_eth_dev *dev, + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +-int mlx5_hairpin_cap_get(struct rte_eth_dev *dev, +- struct rte_eth_hairpin_cap *cap) ++int ++mlx5_hairpin_cap_get(struct rte_eth_dev *dev, struct rte_eth_hairpin_cap *cap) + { + struct mlx5_priv *priv = dev->data->dev_private; ++ struct mlx5_dev_config *config = &priv->config; + +- if (priv->sh->devx == 0) { ++ if (!priv->sh->devx || !config->dest_tir || !config->dv_flow_en) { + rte_errno = ENOTSUP; + return -rte_errno; + } diff --git a/dpdk/drivers/net/mlx5/mlx5_flow.c b/dpdk/drivers/net/mlx5/mlx5_flow.c -index 008716367c..e05c35a417 100644 +index 008716367c..74f17d3710 100644 --- a/dpdk/drivers/net/mlx5/mlx5_flow.c +++ b/dpdk/drivers/net/mlx5/mlx5_flow.c @@ -165,7 +165,9 @@ static const struct rte_flow_expand_node mlx5_support_expansion[] = { @@ -22900,7 +49946,7 @@ index 008716367c..e05c35a417 100644 switch (feature) { case MLX5_HAIRPIN_RX: -@@ -383,29 +386,36 @@ mlx5_flow_get_reg_id(struct rte_eth_dev *dev, +@@ -383,30 +386,37 @@ mlx5_flow_get_reg_id(struct rte_eth_dev *dev, return REG_C_0; } break; @@ -22941,12 +49987,14 @@ index 008716367c..e05c35a417 100644 - REG_C_4; - else - start_reg = REG_C_2; +- if (id > (REG_C_7 - start_reg)) + start_reg = priv->mtr_color_reg != REG_C_2 ? REG_C_2 : + (priv->mtr_reg_share ? REG_C_3 : REG_C_4); + skip_mtr_reg = !!(priv->mtr_en && start_reg == REG_C_2); - if (id > (REG_C_7 - start_reg)) ++ if (id > (uint32_t)(REG_C_7 - start_reg)) return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, + NULL, "invalid tag id"); @@ -420,12 +430,16 @@ mlx5_flow_get_reg_id(struct rte_eth_dev *dev, * If the available index REG_C_y >= REG_C_x, skip the * color register. @@ -22957,7 +50005,7 @@ index 008716367c..e05c35a417 100644 - REG_NONE) + if (skip_mtr_reg && config->flow_mreg_c + [id + start_reg - REG_C_0] >= priv->mtr_color_reg) { -+ if (id >= (REG_C_7 - start_reg)) ++ if (id >= (uint32_t)(REG_C_7 - start_reg)) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + NULL, "invalid tag id"); @@ -23054,7 +50102,40 @@ index 008716367c..e05c35a417 100644 if (attr->egress) return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, -@@ -1634,7 +1654,6 @@ mlx5_flow_validate_item_ipv6(const struct rte_flow_item *item, +@@ -1101,6 +1121,7 @@ mlx5_flow_validate_action_rss(const struct rte_flow_action *action, + struct mlx5_priv *priv = dev->data->dev_private; + const struct rte_flow_action_rss *rss = action->conf; + int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); ++ enum mlx5_rxq_type rxq_type = MLX5_RXQ_TYPE_UNDEFINED; + unsigned int i; + + if (action_flags & MLX5_FLOW_FATE_ACTIONS) +@@ -1159,6 +1180,8 @@ mlx5_flow_validate_action_rss(const struct rte_flow_action *action, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, + NULL, "No queues configured"); + for (i = 0; i != rss->queue_num; ++i) { ++ struct mlx5_rxq_ctrl *rxq_ctrl; ++ + if (rss->queue[i] >= priv->rxqs_n) + return rte_flow_error_set + (error, EINVAL, +@@ -1168,6 +1191,15 @@ mlx5_flow_validate_action_rss(const struct rte_flow_action *action, + return rte_flow_error_set + (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION_CONF, + &rss->queue[i], "queue is not configured"); ++ rxq_ctrl = container_of((*priv->rxqs)[rss->queue[i]], ++ struct mlx5_rxq_ctrl, rxq); ++ if (i == 0) ++ rxq_type = rxq_ctrl->type; ++ if (rxq_type != rxq_ctrl->type) ++ return rte_flow_error_set ++ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION_CONF, ++ &rss->queue[i], ++ "combining hairpin and regular RSS queues is not supported"); + } + if (attr->egress) + return rte_flow_error_set(error, ENOTSUP, +@@ -1634,7 +1666,6 @@ mlx5_flow_validate_item_ipv6(const struct rte_flow_item *item, "\xff\xff\xff\xff\xff\xff\xff\xff", .vtc_flow = RTE_BE32(0xffffffff), .proto = 0xff, @@ -23062,7 +50143,7 @@ index 008716367c..e05c35a417 100644 }, }; const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); -@@ -1831,7 +1850,6 @@ mlx5_flow_validate_item_vxlan(const struct rte_flow_item *item, +@@ -1831,7 +1862,6 @@ mlx5_flow_validate_item_vxlan(const struct rte_flow_item *item, uint32_t vlan_id; uint8_t vni[4]; } id = { .vlan_id = 0, }; @@ -23070,7 +50151,7 @@ index 008716367c..e05c35a417 100644 if (item_flags & MLX5_FLOW_LAYER_TUNNEL) -@@ -1858,23 +1876,8 @@ mlx5_flow_validate_item_vxlan(const struct rte_flow_item *item, +@@ -1858,23 +1888,8 @@ mlx5_flow_validate_item_vxlan(const struct rte_flow_item *item, return ret; if (spec) { memcpy(&id.vni[1], spec->vni, 3); @@ -23094,7 +50175,7 @@ index 008716367c..e05c35a417 100644 if (!(item_flags & MLX5_FLOW_LAYER_OUTER)) return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item, -@@ -1913,7 +1916,6 @@ mlx5_flow_validate_item_vxlan_gpe(const struct rte_flow_item *item, +@@ -1913,7 +1928,6 @@ mlx5_flow_validate_item_vxlan_gpe(const struct rte_flow_item *item, uint32_t vlan_id; uint8_t vni[4]; } id = { .vlan_id = 0, }; @@ -23102,7 +50183,7 @@ index 008716367c..e05c35a417 100644 if (!priv->config.l3_vxlan_en) return rte_flow_error_set(error, ENOTSUP, -@@ -1951,22 +1953,8 @@ mlx5_flow_validate_item_vxlan_gpe(const struct rte_flow_item *item, +@@ -1951,22 +1965,8 @@ mlx5_flow_validate_item_vxlan_gpe(const struct rte_flow_item *item, "VxLAN-GPE protocol" " not supported"); memcpy(&id.vni[1], spec->vni, 3); @@ -23125,7 +50206,7 @@ index 008716367c..e05c35a417 100644 if (!(item_flags & MLX5_FLOW_LAYER_OUTER)) return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item, -@@ -2131,9 +2119,7 @@ mlx5_flow_validate_item_geneve(const struct rte_flow_item *item, +@@ -2131,9 +2131,7 @@ mlx5_flow_validate_item_geneve(const struct rte_flow_item *item, .protocol = RTE_BE16(UINT16_MAX), }; @@ -23136,7 +50217,31 @@ index 008716367c..e05c35a417 100644 return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item, "L3 Geneve is not enabled by device" -@@ -2349,6 +2335,7 @@ flow_null_validate(struct rte_eth_dev *dev __rte_unused, +@@ -2223,7 +2221,8 @@ mlx5_flow_validate_item_mpls(struct rte_eth_dev *dev __rte_unused, + /* MPLS over IP, UDP, GRE is allowed */ + if (!(prev_layer & (MLX5_FLOW_LAYER_OUTER_L3 | + MLX5_FLOW_LAYER_OUTER_L4_UDP | +- MLX5_FLOW_LAYER_GRE))) ++ MLX5_FLOW_LAYER_GRE | ++ MLX5_FLOW_LAYER_GRE_KEY))) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "protocol filtering not compatible" +@@ -2244,11 +2243,12 @@ mlx5_flow_validate_item_mpls(struct rte_eth_dev *dev __rte_unused, + if (ret < 0) + return ret; + return 0; +-#endif ++#else + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "MPLS is not supported by Verbs, please" + " update."); ++#endif + } + + /** +@@ -2349,6 +2349,7 @@ flow_null_validate(struct rte_eth_dev *dev __rte_unused, const struct rte_flow_item items[] __rte_unused, const struct rte_flow_action actions[] __rte_unused, bool external __rte_unused, @@ -23144,7 +50249,7 @@ index 008716367c..e05c35a417 100644 struct rte_flow_error *error) { return rte_flow_error_set(error, ENOTSUP, -@@ -2463,6 +2450,8 @@ flow_get_drv_type(struct rte_eth_dev *dev, const struct rte_flow_attr *attr) +@@ -2463,6 +2464,8 @@ flow_get_drv_type(struct rte_eth_dev *dev, const struct rte_flow_attr *attr) * Pointer to the list of actions. * @param[in] external * This flow rule is created by request external to PMD. @@ -23153,7 +50258,7 @@ index 008716367c..e05c35a417 100644 * @param[out] error * Pointer to the error structure. * -@@ -2474,13 +2463,14 @@ flow_drv_validate(struct rte_eth_dev *dev, +@@ -2474,13 +2477,14 @@ flow_drv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, const struct rte_flow_item items[], const struct rte_flow_action actions[], @@ -23170,7 +50275,7 @@ index 008716367c..e05c35a417 100644 } /** -@@ -2638,47 +2628,6 @@ flow_drv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow) +@@ -2638,47 +2642,6 @@ flow_drv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow) fops->destroy(dev, flow); } @@ -23218,7 +50323,7 @@ index 008716367c..e05c35a417 100644 /** * Get RSS action from the action list. * -@@ -2723,7 +2672,44 @@ find_graph_root(const struct rte_flow_item pattern[], uint32_t rss_level) +@@ -2723,7 +2686,44 @@ find_graph_root(const struct rte_flow_item pattern[], uint32_t rss_level) } /** @@ -23264,7 +50369,7 @@ index 008716367c..e05c35a417 100644 * * @param[in] actions * Pointer to the list of actions. -@@ -2732,18 +2718,38 @@ find_graph_root(const struct rte_flow_item pattern[], uint32_t rss_level) +@@ -2732,18 +2732,38 @@ find_graph_root(const struct rte_flow_item pattern[], uint32_t rss_level) * @param[out] qrss_type * Pointer to the action type to return. RTE_FLOW_ACTION_TYPE_END is returned * if no QUEUE/RSS is found. @@ -23305,7 +50410,7 @@ index 008716367c..e05c35a417 100644 case RTE_FLOW_ACTION_TYPE_QUEUE: case RTE_FLOW_ACTION_TYPE_RSS: *qrss = actions; -@@ -2753,6 +2759,8 @@ flow_parse_qrss_action(const struct rte_flow_action actions[], +@@ -2753,6 +2773,8 @@ flow_parse_qrss_action(const struct rte_flow_action actions[], } actions_n++; } @@ -23314,7 +50419,69 @@ index 008716367c..e05c35a417 100644 /* Count RTE_FLOW_ACTION_TYPE_END. */ return actions_n + 1; } -@@ -2958,18 +2966,21 @@ flow_mreg_add_copy_action(struct rte_eth_dev *dev, uint32_t mark_id, +@@ -2790,10 +2812,10 @@ flow_check_meter_action(const struct rte_flow_action actions[], uint32_t *mtr) + } + + /** +- * Check if the flow should be splited due to hairpin. ++ * Check if the flow should be split due to hairpin. + * The reason for the split is that in current HW we can't +- * support encap on Rx, so if a flow have encap we move it +- * to Tx. ++ * support encap and push-vlan on Rx, so if a flow contains ++ * these actions we move it to Tx. + * + * @param dev + * Pointer to Ethernet device. +@@ -2813,7 +2835,7 @@ flow_check_hairpin_split(struct rte_eth_dev *dev, + { + int queue_action = 0; + int action_n = 0; +- int encap = 0; ++ int split = 0; + const struct rte_flow_action_queue *queue; + const struct rte_flow_action_rss *rss; + const struct rte_flow_action_raw_encap *raw_encap; +@@ -2844,7 +2866,10 @@ flow_check_hairpin_split(struct rte_eth_dev *dev, + break; + case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP: + case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP: +- encap = 1; ++ case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN: ++ case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID: ++ case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP: ++ split++; + action_n++; + break; + case RTE_FLOW_ACTION_TYPE_RAW_ENCAP: +@@ -2852,7 +2877,7 @@ flow_check_hairpin_split(struct rte_eth_dev *dev, + if (raw_encap->size > + (sizeof(struct rte_flow_item_eth) + + sizeof(struct rte_flow_item_ipv4))) +- encap = 1; ++ split++; + action_n++; + break; + default: +@@ -2860,7 +2885,7 @@ flow_check_hairpin_split(struct rte_eth_dev *dev, + break; + } + } +- if (encap == 1 && queue_action) ++ if (split && queue_action) + return action_n; + return 0; + } +@@ -2922,7 +2947,7 @@ flow_mreg_add_copy_action(struct rte_eth_dev *dev, uint32_t mark_id, + }; + struct mlx5_flow_action_copy_mreg cp_mreg = { + .dst = REG_B, +- .src = 0, ++ .src = REG_NONE, + }; + struct rte_flow_action_jump jump = { + .group = MLX5_FLOW_MREG_ACT_TABLE_GROUP, +@@ -2958,18 +2983,21 @@ flow_mreg_add_copy_action(struct rte_eth_dev *dev, uint32_t mark_id, /* Build a new flow. */ if (mark_id != MLX5_DEFAULT_COPY_ID) { items[0] = (struct rte_flow_item){ @@ -23339,7 +50506,7 @@ index 008716367c..e05c35a417 100644 .conf = &cp_mreg, }; actions[2] = (struct rte_flow_action){ -@@ -2986,7 +2997,8 @@ flow_mreg_add_copy_action(struct rte_eth_dev *dev, uint32_t mark_id, +@@ -2986,7 +3014,8 @@ flow_mreg_add_copy_action(struct rte_eth_dev *dev, uint32_t mark_id, .type = RTE_FLOW_ITEM_TYPE_END, }; actions[0] = (struct rte_flow_action){ @@ -23349,7 +50516,27 @@ index 008716367c..e05c35a417 100644 .conf = &cp_mreg, }; actions[1] = (struct rte_flow_action){ -@@ -3360,7 +3372,8 @@ flow_hairpin_split(struct rte_eth_dev *dev, +@@ -3263,7 +3292,8 @@ flow_mreg_update_copy_table(struct rte_eth_dev *dev, + + /** + * Split the hairpin flow. +- * Since HW can't support encap on Rx we move the encap to Tx. ++ * Since HW can't support encap and push-vlan on Rx, we move these ++ * actions to Tx. + * If the count action is after the encap then we also + * move the count action. in this case the count will also measure + * the outer bytes. +@@ -3307,6 +3337,9 @@ flow_hairpin_split(struct rte_eth_dev *dev, + switch (actions->type) { + case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP: + case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP: ++ case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN: ++ case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID: ++ case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP: + rte_memcpy(actions_tx, actions, + sizeof(struct rte_flow_action)); + actions_tx++; +@@ -3360,7 +3393,8 @@ flow_hairpin_split(struct rte_eth_dev *dev, } /* Add set meta action and end action for the Rx flow. */ tag_action = actions_rx; @@ -23359,7 +50546,7 @@ index 008716367c..e05c35a417 100644 actions_rx++; rte_memcpy(actions_rx, actions, sizeof(struct rte_flow_action)); actions_rx++; -@@ -3373,7 +3386,8 @@ flow_hairpin_split(struct rte_eth_dev *dev, +@@ -3373,7 +3407,8 @@ flow_hairpin_split(struct rte_eth_dev *dev, rte_memcpy(actions_tx, actions, sizeof(struct rte_flow_action)); addr = (void *)&pattern_tx[2]; item = pattern_tx; @@ -23369,7 +50556,15 @@ index 008716367c..e05c35a417 100644 tag_item = (void *)addr; tag_item->data = *flow_id; tag_item->id = mlx5_flow_get_reg_id(dev, MLX5_HAIRPIN_TX, 0, NULL); -@@ -3401,6 +3415,8 @@ flow_hairpin_split(struct rte_eth_dev *dev, +@@ -3384,7 +3419,6 @@ flow_hairpin_split(struct rte_eth_dev *dev, + tag_item->data = UINT32_MAX; + tag_item->id = UINT16_MAX; + item->mask = tag_item; +- addr += sizeof(struct mlx5_rte_flow_item_tag); + item->last = NULL; + item++; + item->type = RTE_FLOW_ITEM_TYPE_END; +@@ -3401,6 +3435,8 @@ flow_hairpin_split(struct rte_eth_dev *dev, * Parent flow structure pointer. * @param[in, out] sub_flow * Pointer to return the created subflow, may be NULL. @@ -23378,7 +50573,7 @@ index 008716367c..e05c35a417 100644 * @param[in] attr * Flow rule attributes. * @param[in] items -@@ -3418,6 +3434,7 @@ static int +@@ -3418,6 +3454,7 @@ static int flow_create_split_inner(struct rte_eth_dev *dev, struct rte_flow *flow, struct mlx5_flow **sub_flow, @@ -23386,7 +50581,7 @@ index 008716367c..e05c35a417 100644 const struct rte_flow_attr *attr, const struct rte_flow_item items[], const struct rte_flow_action actions[], -@@ -3432,6 +3449,12 @@ flow_create_split_inner(struct rte_eth_dev *dev, +@@ -3432,6 +3469,12 @@ flow_create_split_inner(struct rte_eth_dev *dev, dev_flow->external = external; /* Subflow object was created, we must include one in the list. */ LIST_INSERT_HEAD(&flow->dev_flows, dev_flow, next); @@ -23399,7 +50594,7 @@ index 008716367c..e05c35a417 100644 if (sub_flow) *sub_flow = dev_flow; return flow_drv_translate(dev, dev_flow, attr, items, actions, error); -@@ -3451,6 +3474,10 @@ flow_create_split_inner(struct rte_eth_dev *dev, +@@ -3451,6 +3494,10 @@ flow_create_split_inner(struct rte_eth_dev *dev, * * @param dev * Pointer to Ethernet device. @@ -23410,7 +50605,7 @@ index 008716367c..e05c35a417 100644 * @param[in] actions * Associated actions (list terminated by the END action). * @param[out] actions_sfx -@@ -3467,66 +3494,61 @@ flow_create_split_inner(struct rte_eth_dev *dev, +@@ -3467,66 +3514,61 @@ flow_create_split_inner(struct rte_eth_dev *dev, */ static int flow_meter_split_prep(struct rte_eth_dev *dev, @@ -23506,7 +50701,7 @@ index 008716367c..e05c35a417 100644 } /* Add end action to the actions. */ actions_sfx->type = RTE_FLOW_ACTION_TYPE_END; -@@ -3539,8 +3561,47 @@ flow_meter_split_prep(struct rte_eth_dev *dev, +@@ -3539,8 +3581,47 @@ flow_meter_split_prep(struct rte_eth_dev *dev, * Get the id from the qrss_pool to make qrss share the id with meter. */ tag_id = flow_qrss_get_id(dev); @@ -23555,7 +50750,7 @@ index 008716367c..e05c35a417 100644 return tag_id; } -@@ -3640,7 +3701,8 @@ flow_mreg_split_qrss_prep(struct rte_eth_dev *dev, +@@ -3640,7 +3721,8 @@ flow_mreg_split_qrss_prep(struct rte_eth_dev *dev, /* Construct new actions array. */ /* Replace QUEUE/RSS action. */ split_actions[qrss_idx] = (struct rte_flow_action){ @@ -23565,7 +50760,7 @@ index 008716367c..e05c35a417 100644 .conf = set_tag, }; } -@@ -3673,6 +3735,8 @@ flow_mreg_split_qrss_prep(struct rte_eth_dev *dev, +@@ -3673,6 +3755,8 @@ flow_mreg_split_qrss_prep(struct rte_eth_dev *dev, * Number of actions in the list. * @param[out] error * Perform verbose error reporting if not NULL. @@ -23574,7 +50769,7 @@ index 008716367c..e05c35a417 100644 * * @return * 0 on success, negative value otherwise -@@ -3681,7 +3745,8 @@ static int +@@ -3681,7 +3765,8 @@ static int flow_mreg_tx_copy_prep(struct rte_eth_dev *dev, struct rte_flow_action *ext_actions, const struct rte_flow_action *actions, @@ -23584,7 +50779,7 @@ index 008716367c..e05c35a417 100644 { struct mlx5_flow_action_copy_mreg *cp_mreg = (struct mlx5_flow_action_copy_mreg *) -@@ -3696,15 +3761,26 @@ flow_mreg_tx_copy_prep(struct rte_eth_dev *dev, +@@ -3696,15 +3781,26 @@ flow_mreg_tx_copy_prep(struct rte_eth_dev *dev, if (ret < 0) return ret; cp_mreg->src = ret; @@ -23620,7 +50815,7 @@ index 008716367c..e05c35a417 100644 return 0; } -@@ -3722,6 +3798,8 @@ flow_mreg_tx_copy_prep(struct rte_eth_dev *dev, +@@ -3722,6 +3818,8 @@ flow_mreg_tx_copy_prep(struct rte_eth_dev *dev, * Pointer to Ethernet device. * @param[in] flow * Parent flow structure pointer. @@ -23629,7 +50824,7 @@ index 008716367c..e05c35a417 100644 * @param[in] attr * Flow rule attributes. * @param[in] items -@@ -3738,6 +3816,7 @@ flow_mreg_tx_copy_prep(struct rte_eth_dev *dev, +@@ -3738,6 +3836,7 @@ flow_mreg_tx_copy_prep(struct rte_eth_dev *dev, static int flow_create_split_metadata(struct rte_eth_dev *dev, struct rte_flow *flow, @@ -23637,7 +50832,7 @@ index 008716367c..e05c35a417 100644 const struct rte_flow_attr *attr, const struct rte_flow_item items[], const struct rte_flow_action actions[], -@@ -3752,15 +3831,18 @@ flow_create_split_metadata(struct rte_eth_dev *dev, +@@ -3752,15 +3851,18 @@ flow_create_split_metadata(struct rte_eth_dev *dev, int mtr_sfx = 0; size_t act_size; int actions_n; @@ -23659,7 +50854,7 @@ index 008716367c..e05c35a417 100644 if (qrss) { /* Exclude hairpin flows from splitting. */ if (qrss->type == RTE_FLOW_ACTION_TYPE_QUEUE) { -@@ -3807,6 +3889,7 @@ flow_create_split_metadata(struct rte_eth_dev *dev, +@@ -3807,6 +3909,7 @@ flow_create_split_metadata(struct rte_eth_dev *dev, RTE_FLOW_ACTION_TYPE_VOID; else ext_actions[qrss - actions].type = @@ -23667,7 +50862,7 @@ index 008716367c..e05c35a417 100644 MLX5_RTE_FLOW_ACTION_TYPE_TAG; /* * Create the new actions list with removed Q/RSS action -@@ -3835,14 +3918,14 @@ flow_create_split_metadata(struct rte_eth_dev *dev, +@@ -3835,14 +3938,14 @@ flow_create_split_metadata(struct rte_eth_dev *dev, "metadata flow"); /* Create the action list appended with copy register. */ ret = flow_mreg_tx_copy_prep(dev, ext_actions, actions, @@ -23686,7 +50881,12 @@ index 008716367c..e05c35a417 100644 if (ret < 0) goto exit; assert(dev_flow); -@@ -3858,7 +3941,8 @@ flow_create_split_metadata(struct rte_eth_dev *dev, +@@ -3854,11 +3957,12 @@ flow_create_split_metadata(struct rte_eth_dev *dev, + /* Internal PMD action to set register. */ + struct mlx5_rte_flow_item_tag q_tag_spec = { + .data = qrss_id, +- .id = 0, ++ .id = REG_NONE, }; struct rte_flow_item q_items[] = { { @@ -23696,7 +50896,7 @@ index 008716367c..e05c35a417 100644 .spec = &q_tag_spec, .last = NULL, .mask = NULL, -@@ -3876,7 +3960,7 @@ flow_create_split_metadata(struct rte_eth_dev *dev, +@@ -3876,7 +3980,7 @@ flow_create_split_metadata(struct rte_eth_dev *dev, .type = RTE_FLOW_ACTION_TYPE_END, }, }; @@ -23705,7 +50905,7 @@ index 008716367c..e05c35a417 100644 /* * Configure the tag item only if there is no meter subflow. -@@ -3903,14 +3987,13 @@ flow_create_split_metadata(struct rte_eth_dev *dev, +@@ -3903,14 +4007,13 @@ flow_create_split_metadata(struct rte_eth_dev *dev, } dev_flow = NULL; /* Add suffix subflow to execute Q/RSS. */ @@ -23721,7 +50921,7 @@ index 008716367c..e05c35a417 100644 } exit: -@@ -3963,7 +4046,6 @@ flow_create_split_meter(struct rte_eth_dev *dev, +@@ -3963,7 +4066,6 @@ flow_create_split_meter(struct rte_eth_dev *dev, struct rte_flow_action *sfx_actions = NULL; struct rte_flow_action *pre_actions = NULL; struct rte_flow_item *sfx_items = NULL; @@ -23729,7 +50929,7 @@ index 008716367c..e05c35a417 100644 struct mlx5_flow *dev_flow = NULL; struct rte_flow_attr sfx_attr = *attr; uint32_t mtr = 0; -@@ -3976,63 +4058,47 @@ flow_create_split_meter(struct rte_eth_dev *dev, +@@ -3976,63 +4078,47 @@ flow_create_split_meter(struct rte_eth_dev *dev, if (priv->mtr_en) actions_n = flow_check_meter_action(actions, &mtr); if (mtr) { @@ -23808,7 +51008,7 @@ index 008716367c..e05c35a417 100644 sfx_items ? sfx_items : items, sfx_actions ? sfx_actions : actions, external, error); -@@ -4146,14 +4212,18 @@ flow_list_create(struct rte_eth_dev *dev, struct mlx5_flows *list, +@@ -4146,14 +4232,18 @@ flow_list_create(struct rte_eth_dev *dev, struct mlx5_flows *list, } items_tx; struct rte_flow_expand_rss *buf = &expand_buffer.buf; const struct rte_flow_action *p_actions_rx = actions; @@ -23829,7 +51029,7 @@ index 008716367c..e05c35a417 100644 if (hairpin_flow > 0) { if (hairpin_flow > MLX5_MAX_SPLIT_ACTIONS) { rte_errno = EINVAL; -@@ -4164,10 +4234,6 @@ flow_list_create(struct rte_eth_dev *dev, struct mlx5_flows *list, +@@ -4164,10 +4254,6 @@ flow_list_create(struct rte_eth_dev *dev, struct mlx5_flows *list, &hairpin_id); p_actions_rx = actions_rx.actions; } @@ -23840,7 +51040,7 @@ index 008716367c..e05c35a417 100644 flow_size = sizeof(struct rte_flow); rss = flow_get_rss_action(p_actions_rx); if (rss) -@@ -4334,6 +4400,26 @@ mlx5_flow_create_esw_table_zero_flow(struct rte_eth_dev *dev) +@@ -4334,6 +4420,26 @@ mlx5_flow_create_esw_table_zero_flow(struct rte_eth_dev *dev) actions, false, &error); } @@ -23867,7 +51067,7 @@ index 008716367c..e05c35a417 100644 /** * Create a flow. * -@@ -4518,7 +4604,8 @@ mlx5_ctrl_flow_source_queue(struct rte_eth_dev *dev, +@@ -4518,7 +4624,8 @@ mlx5_ctrl_flow_source_queue(struct rte_eth_dev *dev, }; struct rte_flow_item items[] = { { @@ -23877,7 +51077,7 @@ index 008716367c..e05c35a417 100644 .spec = &queue_spec, .last = NULL, .mask = &queue_mask, -@@ -4623,6 +4710,8 @@ mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev, +@@ -4623,6 +4730,8 @@ mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev, if (!priv->reta_idx_n || !priv->rxqs_n) { return 0; } @@ -23886,7 +51086,27 @@ index 008716367c..e05c35a417 100644 for (i = 0; i != priv->reta_idx_n; ++i) queue[i] = (*priv->reta_idx)[i]; flow = flow_list_create(dev, &priv->ctrl_flows, -@@ -5570,6 +5659,8 @@ mlx5_flow_async_pool_query_handle(struct mlx5_ibv_shared *sh, +@@ -5498,6 +5607,11 @@ mlx5_flow_query_alarm(void *arg) + goto set_alarm; + dcs = (struct mlx5_devx_obj *)(uintptr_t)rte_atomic64_read + (&pool->a64_dcs); ++ if (dcs->id & (MLX5_CNT_BATCH_QUERY_ID_ALIGNMENT - 1)) { ++ /* Pool without valid counter. */ ++ pool->raw_hw = NULL; ++ goto next_pool; ++ } + offset = batch ? 0 : dcs->id % MLX5_COUNTERS_PER_POOL; + ret = mlx5_devx_cmd_flow_counter_query(dcs, 0, MLX5_COUNTERS_PER_POOL - + offset, NULL, NULL, +@@ -5515,6 +5629,7 @@ mlx5_flow_query_alarm(void *arg) + pool->raw_hw->min_dcs_id = dcs->id; + LIST_REMOVE(pool->raw_hw, next); + sh->cmng.pending_queries++; ++next_pool: + pool_index++; + if (pool_index >= rte_atomic16_read(&cont->n_valid)) { + batch ^= 0x1; +@@ -5570,6 +5685,8 @@ mlx5_flow_async_pool_query_handle(struct mlx5_ibv_shared *sh, * Value is part of flow rule created by request external to PMD. * @param[in] group * rte_flow group index value. @@ -23895,7 +51115,7 @@ index 008716367c..e05c35a417 100644 * @param[out] table * HW table value. * @param[out] error -@@ -5580,10 +5671,10 @@ mlx5_flow_async_pool_query_handle(struct mlx5_ibv_shared *sh, +@@ -5580,10 +5697,10 @@ mlx5_flow_async_pool_query_handle(struct mlx5_ibv_shared *sh, */ int mlx5_flow_group_to_table(const struct rte_flow_attr *attributes, bool external, @@ -23908,7 +51128,7 @@ index 008716367c..e05c35a417 100644 if (group == UINT32_MAX) return rte_flow_error_set (error, EINVAL, -@@ -5633,7 +5724,8 @@ mlx5_flow_discover_mreg_c(struct rte_eth_dev *dev) +@@ -5633,7 +5750,8 @@ mlx5_flow_discover_mreg_c(struct rte_eth_dev *dev) }; struct rte_flow_action actions[] = { [0] = { @@ -23919,7 +51139,7 @@ index 008716367c..e05c35a417 100644 .src = REG_C_1, .dst = idx, diff --git a/dpdk/drivers/net/mlx5/mlx5_flow.h b/dpdk/drivers/net/mlx5/mlx5_flow.h -index 3fff5dd7da..f8046119ec 100644 +index 3fff5dd7da..4300e62fad 100644 --- a/dpdk/drivers/net/mlx5/mlx5_flow.h +++ b/dpdk/drivers/net/mlx5/mlx5_flow.h @@ -33,6 +33,7 @@ enum mlx5_rte_flow_item_type { @@ -24019,8 +51239,8 @@ index 3fff5dd7da..f8046119ec 100644 #define MLX5_GENEVE_OPT_LEN_0 14 #define MLX5_GENEVE_OPT_LEN_1 63 -+#define MLX5_ENCAPSULATION_DECISION_SIZE (sizeof(struct rte_flow_item_eth) + \ -+ sizeof(struct rte_flow_item_ipv4)) ++#define MLX5_ENCAPSULATION_DECISION_SIZE (sizeof(struct rte_ether_hdr) + \ ++ sizeof(struct rte_ipv4_hdr)) + +/* Software header modify action numbers of a flow. */ +#define MLX5_ACT_NUM_MDF_IPV4 1 @@ -24118,10 +51338,18 @@ index 3fff5dd7da..f8046119ec 100644 uint32_t id, struct rte_flow_error *error); diff --git a/dpdk/drivers/net/mlx5/mlx5_flow_dv.c b/dpdk/drivers/net/mlx5/mlx5_flow_dv.c -index 73aaea4536..d83e49f954 100644 +index 73aaea4536..a1b805c105 100644 --- a/dpdk/drivers/net/mlx5/mlx5_flow_dv.c +++ b/dpdk/drivers/net/mlx5/mlx5_flow_dv.c -@@ -51,8 +51,6 @@ +@@ -27,6 +27,7 @@ + #include + #include + #include ++#include + + #include "mlx5.h" + #include "mlx5_defs.h" +@@ -51,8 +52,6 @@ #define MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL 1 #endif @@ -24130,7 +51358,7 @@ index 73aaea4536..d83e49f954 100644 /* VLAN header definitions */ #define MLX5DV_FLOW_VLAN_PCP_SHIFT 13 #define MLX5DV_FLOW_VLAN_PCP_MASK (0x7 << MLX5DV_FLOW_VLAN_PCP_SHIFT) -@@ -72,6 +70,10 @@ union flow_dv_attr { +@@ -72,6 +71,10 @@ union flow_dv_attr { uint32_t attr; }; @@ -24141,7 +51369,7 @@ index 73aaea4536..d83e49f954 100644 /** * Initialize flow attributes structure according to flow items' types. * -@@ -82,19 +84,74 @@ union flow_dv_attr { +@@ -82,19 +85,74 @@ union flow_dv_attr { * Pointer to item specification. * @param[out] attr * Pointer to flow attributes structure. @@ -24217,7 +51445,25 @@ index 73aaea4536..d83e49f954 100644 break; case RTE_FLOW_ITEM_TYPE_UDP: if (!attr->tcp) -@@ -363,7 +420,7 @@ flow_dv_convert_modify_action(struct rte_flow_item *item, +@@ -222,7 +280,7 @@ flow_dv_shared_lock(struct rte_eth_dev *dev) + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_ibv_shared *sh = priv->sh; + +- if (sh->dv_refcnt > 1) { ++ if (sh->refcnt > 1) { + int ret; + + ret = pthread_mutex_lock(&sh->dv_mutex); +@@ -237,7 +295,7 @@ flow_dv_shared_unlock(struct rte_eth_dev *dev) + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_ibv_shared *sh = priv->sh; + +- if (sh->dv_refcnt > 1) { ++ if (sh->refcnt > 1) { + int ret; + + ret = pthread_mutex_unlock(&sh->dv_mutex); +@@ -363,7 +421,7 @@ flow_dv_convert_modify_action(struct rte_flow_item *item, uint32_t mask; uint32_t data; @@ -24226,7 +51472,7 @@ index 73aaea4536..d83e49f954 100644 return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, NULL, "too many items to modify"); -@@ -380,10 +437,12 @@ flow_dv_convert_modify_action(struct rte_flow_item *item, +@@ -380,10 +438,12 @@ flow_dv_convert_modify_action(struct rte_flow_item *item, off_b - __builtin_clz(mask); assert(size_b); size_b = size_b == sizeof(uint32_t) * CHAR_BIT ? 0 : size_b; @@ -24243,7 +51489,7 @@ index 73aaea4536..d83e49f954 100644 /* Convert entire record to expected big-endian format. */ actions[i].data0 = rte_cpu_to_be_32(actions[i].data0); if (type == MLX5_MODIFICATION_TYPE_COPY) { -@@ -404,11 +463,11 @@ flow_dv_convert_modify_action(struct rte_flow_item *item, +@@ -404,11 +464,11 @@ flow_dv_convert_modify_action(struct rte_flow_item *item, ++i; ++field; } while (field->size); @@ -24257,7 +51503,7 @@ index 73aaea4536..d83e49f954 100644 return 0; } -@@ -566,17 +625,19 @@ flow_dv_convert_action_modify_vlan_vid +@@ -566,17 +626,19 @@ flow_dv_convert_action_modify_vlan_vid const struct rte_flow_action_of_set_vlan_vid *conf = (const struct rte_flow_action_of_set_vlan_vid *)(action->conf); int i = resource->actions_num; @@ -24283,7 +51529,7 @@ index 73aaea4536..d83e49f954 100644 actions[i].data0 = rte_cpu_to_be_32(actions[i].data0); actions[i].data1 = conf->vlan_vid; actions[i].data1 = actions[i].data1 << 16; -@@ -595,6 +656,10 @@ flow_dv_convert_action_modify_vlan_vid +@@ -595,6 +657,10 @@ flow_dv_convert_action_modify_vlan_vid * Pointer to rte_flow_item objects list. * @param[in] attr * Pointer to flow attributes structure. @@ -24294,7 +51540,7 @@ index 73aaea4536..d83e49f954 100644 * @param[out] error * Pointer to the error structure. * -@@ -606,8 +671,8 @@ flow_dv_convert_action_modify_tp +@@ -606,8 +672,8 @@ flow_dv_convert_action_modify_tp (struct mlx5_flow_dv_modify_hdr_resource *resource, const struct rte_flow_action *action, const struct rte_flow_item *items, @@ -24305,7 +51551,7 @@ index 73aaea4536..d83e49f954 100644 { const struct rte_flow_action_set_tp *conf = (const struct rte_flow_action_set_tp *)(action->conf); -@@ -619,7 +684,7 @@ flow_dv_convert_action_modify_tp +@@ -619,7 +685,7 @@ flow_dv_convert_action_modify_tp struct field_modify_info *field; if (!attr->valid) @@ -24314,7 +51560,7 @@ index 73aaea4536..d83e49f954 100644 if (attr->udp) { memset(&udp, 0, sizeof(udp)); memset(&udp_mask, 0, sizeof(udp_mask)); -@@ -636,8 +701,8 @@ flow_dv_convert_action_modify_tp +@@ -636,8 +702,8 @@ flow_dv_convert_action_modify_tp item.spec = &udp; item.mask = &udp_mask; field = modify_udp; @@ -24325,7 +51571,7 @@ index 73aaea4536..d83e49f954 100644 memset(&tcp, 0, sizeof(tcp)); memset(&tcp_mask, 0, sizeof(tcp_mask)); if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) { -@@ -669,6 +734,10 @@ flow_dv_convert_action_modify_tp +@@ -669,6 +735,10 @@ flow_dv_convert_action_modify_tp * Pointer to rte_flow_item objects list. * @param[in] attr * Pointer to flow attributes structure. @@ -24336,7 +51582,7 @@ index 73aaea4536..d83e49f954 100644 * @param[out] error * Pointer to the error structure. * -@@ -680,8 +749,8 @@ flow_dv_convert_action_modify_ttl +@@ -680,8 +750,8 @@ flow_dv_convert_action_modify_ttl (struct mlx5_flow_dv_modify_hdr_resource *resource, const struct rte_flow_action *action, const struct rte_flow_item *items, @@ -24347,7 +51593,7 @@ index 73aaea4536..d83e49f954 100644 { const struct rte_flow_action_set_ttl *conf = (const struct rte_flow_action_set_ttl *)(action->conf); -@@ -693,7 +762,7 @@ flow_dv_convert_action_modify_ttl +@@ -693,7 +763,7 @@ flow_dv_convert_action_modify_ttl struct field_modify_info *field; if (!attr->valid) @@ -24356,7 +51602,7 @@ index 73aaea4536..d83e49f954 100644 if (attr->ipv4) { memset(&ipv4, 0, sizeof(ipv4)); memset(&ipv4_mask, 0, sizeof(ipv4_mask)); -@@ -703,8 +772,8 @@ flow_dv_convert_action_modify_ttl +@@ -703,8 +773,8 @@ flow_dv_convert_action_modify_ttl item.spec = &ipv4; item.mask = &ipv4_mask; field = modify_ipv4; @@ -24367,7 +51613,7 @@ index 73aaea4536..d83e49f954 100644 memset(&ipv6, 0, sizeof(ipv6)); memset(&ipv6_mask, 0, sizeof(ipv6_mask)); ipv6.hdr.hop_limits = conf->ttl_value; -@@ -729,6 +798,10 @@ flow_dv_convert_action_modify_ttl +@@ -729,6 +799,10 @@ flow_dv_convert_action_modify_ttl * Pointer to rte_flow_item objects list. * @param[in] attr * Pointer to flow attributes structure. @@ -24378,7 +51624,7 @@ index 73aaea4536..d83e49f954 100644 * @param[out] error * Pointer to the error structure. * -@@ -739,8 +812,8 @@ static int +@@ -739,8 +813,8 @@ static int flow_dv_convert_action_modify_dec_ttl (struct mlx5_flow_dv_modify_hdr_resource *resource, const struct rte_flow_item *items, @@ -24389,7 +51635,7 @@ index 73aaea4536..d83e49f954 100644 { struct rte_flow_item item; struct rte_flow_item_ipv4 ipv4; -@@ -750,7 +823,7 @@ flow_dv_convert_action_modify_dec_ttl +@@ -750,7 +824,7 @@ flow_dv_convert_action_modify_dec_ttl struct field_modify_info *field; if (!attr->valid) @@ -24398,7 +51644,7 @@ index 73aaea4536..d83e49f954 100644 if (attr->ipv4) { memset(&ipv4, 0, sizeof(ipv4)); memset(&ipv4_mask, 0, sizeof(ipv4_mask)); -@@ -760,8 +833,8 @@ flow_dv_convert_action_modify_dec_ttl +@@ -760,8 +834,8 @@ flow_dv_convert_action_modify_dec_ttl item.spec = &ipv4; item.mask = &ipv4_mask; field = modify_ipv4; @@ -24409,7 +51655,7 @@ index 73aaea4536..d83e49f954 100644 memset(&ipv6, 0, sizeof(ipv6)); memset(&ipv6_mask, 0, sizeof(ipv6_mask)); ipv6.hdr.hop_limits = 0xFF; -@@ -902,22 +975,20 @@ flow_dv_convert_action_set_reg +@@ -902,22 +976,20 @@ flow_dv_convert_action_set_reg struct mlx5_modification_cmd *actions = resource->actions; uint32_t i = resource->actions_num; @@ -24419,9 +51665,10 @@ index 73aaea4536..d83e49f954 100644 RTE_FLOW_ERROR_TYPE_ACTION, NULL, "too many items to modify"); assert(conf->id != REG_NONE); - assert(conf->id < RTE_DIM(reg_to_field)); +- assert(conf->id < RTE_DIM(reg_to_field)); - actions[i].action_type = MLX5_MODIFICATION_TYPE_SET; - actions[i].field = reg_to_field[conf->id]; ++ assert(conf->id < (enum modify_reg)RTE_DIM(reg_to_field)); + actions[i] = (struct mlx5_modification_cmd) { + .action_type = MLX5_MODIFICATION_TYPE_SET, + .field = reg_to_field[conf->id], @@ -24437,19 +51684,24 @@ index 73aaea4536..d83e49f954 100644 return 0; } -@@ -1078,7 +1149,7 @@ flow_dv_convert_action_mark(struct rte_eth_dev *dev, - {4, 0, 0}, /* dynamic instead of MLX5_MODI_META_REG_C_1. */ - {0, 0, 0}, +@@ -1075,10 +1147,9 @@ flow_dv_convert_action_mark(struct rte_eth_dev *dev, + .mask = &mask, + }; + struct field_modify_info reg_c_x[] = { +- {4, 0, 0}, /* dynamic instead of MLX5_MODI_META_REG_C_1. */ +- {0, 0, 0}, ++ [1] = {0, 0, 0}, }; - enum modify_reg reg; + int reg; if (!mask) return rte_flow_error_set(error, EINVAL, -@@ -1088,6 +1159,14 @@ flow_dv_convert_action_mark(struct rte_eth_dev *dev, +@@ -1088,7 +1159,15 @@ flow_dv_convert_action_mark(struct rte_eth_dev *dev, if (reg < 0) return reg; assert(reg > 0); +- reg_c_x[0].id = reg_to_field[reg]; + if (reg == REG_C_0) { + uint32_t msk_c0 = priv->sh->dv_regc0_mask; + uint32_t shl_c0 = rte_bsf32(msk_c0); @@ -24458,9 +51710,10 @@ index 73aaea4536..d83e49f954 100644 + mask = rte_cpu_to_be_32(mask) & msk_c0; + mask = rte_cpu_to_be_32(mask << shl_c0); + } - reg_c_x[0].id = reg_to_field[reg]; ++ reg_c_x[0] = (struct field_modify_info){4, 0, reg_to_field[reg]}; return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource, MLX5_MODIFICATION_TYPE_SET, error); + } @@ -1112,7 +1191,7 @@ flow_dv_get_metadata_reg(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, struct rte_flow_error *error) @@ -24511,7 +51764,17 @@ index 73aaea4536..d83e49f954 100644 if (config->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) { if (!mlx5_flow_ext_mreg_supported(dev)) return rte_flow_error_set(error, ENOTSUP, -@@ -1318,6 +1398,11 @@ flow_dv_validate_item_meta(struct rte_eth_dev *dev __rte_unused, +@@ -1315,9 +1395,21 @@ flow_dv_validate_item_meta(struct rte_eth_dev *dev __rte_unused, + "isn't supported"); + if (reg != REG_A) + nic_mask.data = priv->sh->dv_meta_mask; ++ } else if (attr->transfer) { ++ return rte_flow_error_set(error, ENOTSUP, ++ RTE_FLOW_ERROR_TYPE_ITEM, item, ++ "extended metadata feature " ++ "should be enabled when " ++ "meta item is requested " ++ "with e-switch mode "); } if (!mask) mask = &rte_flow_item_meta_mask; @@ -24523,7 +51786,7 @@ index 73aaea4536..d83e49f954 100644 ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask, (const uint8_t *)&nic_mask, sizeof(struct rte_flow_item_meta), -@@ -1366,6 +1451,11 @@ flow_dv_validate_item_tag(struct rte_eth_dev *dev, +@@ -1366,6 +1458,11 @@ flow_dv_validate_item_tag(struct rte_eth_dev *dev, "data cannot be empty"); if (!mask) mask = &rte_flow_item_tag_mask; @@ -24535,7 +51798,7 @@ index 73aaea4536..d83e49f954 100644 ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask, (const uint8_t *)&nic_mask, sizeof(struct rte_flow_item_tag), -@@ -1465,6 +1555,79 @@ flow_dv_validate_item_port_id(struct rte_eth_dev *dev, +@@ -1465,6 +1562,79 @@ flow_dv_validate_item_port_id(struct rte_eth_dev *dev, return 0; } @@ -24615,7 +51878,7 @@ index 73aaea4536..d83e49f954 100644 /** * Validate the pop VLAN action. * -@@ -1492,7 +1655,7 @@ flow_dv_validate_action_pop_vlan(struct rte_eth_dev *dev, +@@ -1492,7 +1662,7 @@ flow_dv_validate_action_pop_vlan(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, struct rte_flow_error *error) { @@ -24624,7 +51887,7 @@ index 73aaea4536..d83e49f954 100644 (void)action; (void)attr; -@@ -1501,17 +1664,16 @@ flow_dv_validate_action_pop_vlan(struct rte_eth_dev *dev, +@@ -1501,19 +1671,28 @@ flow_dv_validate_action_pop_vlan(struct rte_eth_dev *dev, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, "pop vlan action is not supported"); @@ -24648,9 +51911,22 @@ index 73aaea4536..d83e49f954 100644 + RTE_FLOW_ERROR_TYPE_ACTION, action, + "no support for multiple VLAN " "actions"); - if (!(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN)) +- if (!(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN)) ++ /* Pop VLAN with preceding Decap requires inner header with VLAN. */ ++ if ((action_flags & MLX5_FLOW_ACTION_DECAP) && ++ !(item_flags & MLX5_FLOW_LAYER_INNER_VLAN)) ++ return rte_flow_error_set(error, ENOTSUP, ++ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, ++ NULL, ++ "cannot pop vlan after decap without " ++ "match on inner vlan in the flow"); ++ /* Pop VLAN without preceding Decap requires outer header with VLAN. */ ++ if (!(action_flags & MLX5_FLOW_ACTION_DECAP) && ++ !(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN)) return rte_flow_error_set(error, ENOTSUP, -@@ -1524,20 +1686,21 @@ flow_dv_validate_action_pop_vlan(struct rte_eth_dev *dev, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, +@@ -1524,20 +1703,21 @@ flow_dv_validate_action_pop_vlan(struct rte_eth_dev *dev, RTE_FLOW_ERROR_TYPE_ACTION, action, "wrong action order, port_id should " "be after pop VLAN action"); @@ -24677,7 +51953,7 @@ index 73aaea4536..d83e49f954 100644 * * @return * 0 on success, a negative errno value otherwise and rte_errno is set. -@@ -1554,19 +1717,26 @@ flow_dev_get_vlan_info_from_items(const struct rte_flow_item *items, +@@ -1554,19 +1734,26 @@ flow_dev_get_vlan_info_from_items(const struct rte_flow_item *items, if (items == NULL) return; @@ -24709,7 +51985,7 @@ index 73aaea4536..d83e49f954 100644 vlan->vlan_tci |= rte_be_to_cpu_16(vlan_v->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE); -@@ -1587,10 +1757,14 @@ flow_dev_get_vlan_info_from_items(const struct rte_flow_item *items, +@@ -1587,10 +1774,14 @@ flow_dev_get_vlan_info_from_items(const struct rte_flow_item *items, /** * Validate the push VLAN action. * @@ -24725,7 +52001,7 @@ index 73aaea4536..d83e49f954 100644 * @param[in] attr * Pointer to flow attributes * @param[out] error -@@ -1600,38 +1774,68 @@ flow_dev_get_vlan_info_from_items(const struct rte_flow_item *items, +@@ -1600,38 +1791,57 @@ flow_dev_get_vlan_info_from_items(const struct rte_flow_item *items, * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int @@ -24741,12 +52017,6 @@ index 73aaea4536..d83e49f954 100644 const struct rte_flow_action_of_push_vlan *push_vlan = action->conf; + const struct mlx5_priv *priv = dev->data->dev_private; -+ if (!attr->transfer && attr->ingress) -+ return rte_flow_error_set(error, ENOTSUP, -+ RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, -+ NULL, -+ "push VLAN action not supported for " -+ "ingress"); if (push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_VLAN) && push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_QINQ)) return rte_flow_error_set(error, EINVAL, @@ -24754,11 +52024,10 @@ index 73aaea4536..d83e49f954 100644 "invalid vlan ethertype"); - if (action_flags & - (MLX5_FLOW_ACTION_OF_POP_VLAN | MLX5_FLOW_ACTION_OF_PUSH_VLAN)) -+ if (action_flags & MLX5_FLOW_VLAN_ACTIONS) - return rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_ACTION, action, - "no support for multiple VLAN " - "actions"); +- return rte_flow_error_set(error, ENOTSUP, +- RTE_FLOW_ERROR_TYPE_ACTION, action, +- "no support for multiple VLAN " +- "actions"); - if (!mlx5_flow_find_action - (action + 1, RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID) && - !(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN)) @@ -24806,7 +52075,7 @@ index 73aaea4536..d83e49f954 100644 (void)attr; return 0; } -@@ -1643,8 +1847,6 @@ flow_dv_validate_action_push_vlan(uint64_t action_flags, +@@ -1643,8 +1853,6 @@ flow_dv_validate_action_push_vlan(uint64_t action_flags, * Holds the actions detected until now. * @param[in] actions * Pointer to the list of actions remaining in the flow rule. @@ -24815,7 +52084,7 @@ index 73aaea4536..d83e49f954 100644 * @param[out] error * Pointer to error structure. * -@@ -1686,10 +1888,10 @@ flow_dv_validate_action_set_vlan_pcp(uint64_t action_flags, +@@ -1686,10 +1894,10 @@ flow_dv_validate_action_set_vlan_pcp(uint64_t action_flags, * * @param[in] item_flags * Holds the items detected in this rule. @@ -24828,7 +52097,7 @@ index 73aaea4536..d83e49f954 100644 * @param[out] error * Pointer to error structure. * -@@ -1705,37 +1907,21 @@ flow_dv_validate_action_set_vlan_vid(uint64_t item_flags, +@@ -1705,37 +1913,21 @@ flow_dv_validate_action_set_vlan_vid(uint64_t item_flags, const struct rte_flow_action *action = actions; const struct rte_flow_action_of_set_vlan_vid *conf = action->conf; @@ -24873,7 +52142,7 @@ index 73aaea4536..d83e49f954 100644 if (action_flags & MLX5_FLOW_ACTION_PORT_ID) return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, action, -@@ -1788,10 +1974,6 @@ flow_dv_validate_action_flag(struct rte_eth_dev *dev, +@@ -1788,10 +1980,6 @@ flow_dv_validate_action_flag(struct rte_eth_dev *dev, if (ret < 0) return ret; assert(ret > 0); @@ -24884,7 +52153,7 @@ index 73aaea4536..d83e49f954 100644 if (action_flags & MLX5_FLOW_ACTION_MARK) return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, NULL, -@@ -1861,10 +2043,6 @@ flow_dv_validate_action_mark(struct rte_eth_dev *dev, +@@ -1861,10 +2049,6 @@ flow_dv_validate_action_mark(struct rte_eth_dev *dev, RTE_FLOW_ERROR_TYPE_ACTION_CONF, &mark->id, "mark id exceeds the limit"); @@ -24895,7 +52164,7 @@ index 73aaea4536..d83e49f954 100644 if (action_flags & MLX5_FLOW_ACTION_FLAG) return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, NULL, -@@ -1883,7 +2061,7 @@ flow_dv_validate_action_mark(struct rte_eth_dev *dev, +@@ -1883,7 +2067,7 @@ flow_dv_validate_action_mark(struct rte_eth_dev *dev, * @param[in] dev * Pointer to the rte_eth_dev structure. * @param[in] action @@ -24904,7 +52173,7 @@ index 73aaea4536..d83e49f954 100644 * @param[in] action_flags * Holds the actions detected until now. * @param[in] attr -@@ -1903,7 +2081,7 @@ flow_dv_validate_action_set_meta(struct rte_eth_dev *dev, +@@ -1903,7 +2087,7 @@ flow_dv_validate_action_set_meta(struct rte_eth_dev *dev, { const struct rte_flow_action_set_meta *conf; uint32_t nic_mask = UINT32_MAX; @@ -24913,7 +52182,7 @@ index 73aaea4536..d83e49f954 100644 if (!mlx5_flow_ext_mreg_supported(dev)) return rte_flow_error_set(error, ENOTSUP, -@@ -1931,10 +2109,6 @@ flow_dv_validate_action_set_meta(struct rte_eth_dev *dev, +@@ -1931,10 +2115,6 @@ flow_dv_validate_action_set_meta(struct rte_eth_dev *dev, return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, action, "meta data must be within reg C0"); @@ -24924,7 +52193,7 @@ index 73aaea4536..d83e49f954 100644 return 0; } -@@ -1944,7 +2118,7 @@ flow_dv_validate_action_set_meta(struct rte_eth_dev *dev, +@@ -1944,7 +2124,7 @@ flow_dv_validate_action_set_meta(struct rte_eth_dev *dev, * @param[in] dev * Pointer to the rte_eth_dev structure. * @param[in] action @@ -24933,7 +52202,7 @@ index 73aaea4536..d83e49f954 100644 * @param[in] action_flags * Holds the actions detected until now. * @param[in] attr -@@ -1998,7 +2172,7 @@ flow_dv_validate_action_set_tag(struct rte_eth_dev *dev, +@@ -1998,7 +2178,7 @@ flow_dv_validate_action_set_tag(struct rte_eth_dev *dev, * Validate count action. * * @param[in] dev @@ -24942,7 +52211,7 @@ index 73aaea4536..d83e49f954 100644 * @param[out] error * Pointer to error structure. * -@@ -2027,12 +2201,14 @@ flow_dv_validate_action_count(struct rte_eth_dev *dev, +@@ -2027,12 +2207,14 @@ flow_dv_validate_action_count(struct rte_eth_dev *dev, /** * Validate the L2 encap action. * @@ -24959,7 +52228,7 @@ index 73aaea4536..d83e49f954 100644 * @param[out] error * Pointer to error structure. * -@@ -2040,36 +2216,36 @@ flow_dv_validate_action_count(struct rte_eth_dev *dev, +@@ -2040,38 +2222,42 @@ flow_dv_validate_action_count(struct rte_eth_dev *dev, * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int @@ -25009,8 +52278,14 @@ index 73aaea4536..d83e49f954 100644 + * Pointer to the rte_eth_dev structure. * @param[in] action_flags * Holds the actions detected until now. ++ * @param[in] action ++ * Pointer to the action structure. ++ * @param[in] item_flags ++ * Holds the items detected. * @param[in] attr -@@ -2081,19 +2257,20 @@ flow_dv_validate_action_l2_encap(uint64_t action_flags, + * Pointer to flow attributes + * @param[out] error +@@ -2081,19 +2267,22 @@ flow_dv_validate_action_l2_encap(uint64_t action_flags, * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int @@ -25019,6 +52294,8 @@ index 73aaea4536..d83e49f954 100644 - struct rte_flow_error *error) +flow_dv_validate_action_decap(struct rte_eth_dev *dev, + uint64_t action_flags, ++ const struct rte_flow_action *action, ++ const uint64_t item_flags, + const struct rte_flow_attr *attr, + struct rte_flow_error *error) { @@ -25042,22 +52319,40 @@ index 73aaea4536..d83e49f954 100644 if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS) return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, NULL, -@@ -2105,69 +2282,31 @@ flow_dv_validate_action_l2_decap(uint64_t action_flags, +@@ -2105,69 +2294,40 @@ flow_dv_validate_action_l2_decap(uint64_t action_flags, NULL, "decap action not supported for " "egress"); -- return 0; --} -- --/** ++ if (!attr->transfer && priv->representor) ++ return rte_flow_error_set(error, ENOTSUP, ++ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, ++ "decap action for VF representor " ++ "not supported on NIC table"); ++ if (action->type == RTE_FLOW_ACTION_TYPE_VXLAN_DECAP && ++ !(item_flags & MLX5_FLOW_LAYER_VXLAN)) ++ return rte_flow_error_set(error, ENOTSUP, ++ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, ++ "VXLAN item should be present for VXLAN decap"); + return 0; + } + ++const struct rte_flow_action_raw_decap empty_decap = {.data = NULL, .size = 0,}; ++ + /** - * Validate the raw encap action. -- * ++ * Validate the raw encap and decap actions. + * - * @param[in] action_flags - * Holds the actions detected until now. - * @param[in] action -- * Pointer to the encap action. -- * @param[in] attr -- * Pointer to flow attributes ++ * @param[in] dev ++ * Pointer to the rte_eth_dev structure. ++ * @param[in] decap ++ * Pointer to the decap action. ++ * @param[in] encap + * Pointer to the encap action. + * @param[in] attr + * Pointer to flow attributes - * @param[out] error - * Pointer to error structure. - * @@ -25088,8 +52383,7 @@ index 73aaea4536..d83e49f954 100644 - /* encap without preceding decap is not supported for ingress */ - if (!attr->transfer && attr->ingress && - !(action_flags & MLX5_FLOW_ACTION_RAW_DECAP)) -+ if (!attr->transfer && priv->representor) - return rte_flow_error_set(error, ENOTSUP, +- return rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, - NULL, - "encap action not supported for " @@ -25098,37 +52392,28 @@ index 73aaea4536..d83e49f954 100644 - return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ACTION, action, - "raw encap data cannot be empty"); -+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, -+ "decap action for VF representor " -+ "not supported on NIC table"); - return 0; - } - -+const struct rte_flow_action_raw_decap empty_decap = {.data = NULL, .size = 0,}; -+ - /** +- return 0; +-} +- +-/** - * Validate the raw decap action. -+ * Validate the raw encap and decap actions. - * +- * - * @param[in] action_flags -- * Holds the actions detected until now. -- * @param[in] action -+ * @param[in] dev -+ * Pointer to the rte_eth_dev structure. -+ * @param[in] decap -+ * Pointer to the decap action. -+ * @param[in] encap - * Pointer to the encap action. - * @param[in] attr - * Pointer to flow attributes + * @param[in/out] action_flags -+ * Holds the actions detected until now. + * Holds the actions detected until now. + * @param[out] actions_n + * pointer to the number of actions counter. + * @param[in] action +- * Pointer to the encap action. +- * @param[in] attr +- * Pointer to flow attributes ++ * Pointer to the action structure. ++ * @param[in] item_flags ++ * Holds the items detected. * @param[out] error * Pointer to error structure. * -@@ -2175,41 +2314,72 @@ flow_dv_validate_action_raw_encap(uint64_t action_flags, +@@ -2175,41 +2335,73 @@ flow_dv_validate_action_raw_encap(uint64_t action_flags, * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int @@ -25143,7 +52428,8 @@ index 73aaea4536..d83e49f954 100644 + const struct rte_flow_action_raw_decap *decap, + const struct rte_flow_action_raw_encap *encap, + const struct rte_flow_attr *attr, uint64_t *action_flags, -+ int *actions_n, struct rte_flow_error *error) ++ int *actions_n, const struct rte_flow_action *action, ++ uint64_t item_flags, struct rte_flow_error *error) +{ + const struct mlx5_priv *priv = dev->data->dev_private; + int ret; @@ -25204,8 +52490,8 @@ index 73aaea4536..d83e49f954 100644 + "encap combination"); + } + if (decap) { -+ ret = flow_dv_validate_action_decap(dev, *action_flags, attr, -+ error); ++ ret = flow_dv_validate_action_decap(dev, *action_flags, action, ++ item_flags, attr, error); + if (ret < 0) + return ret; + *action_flags |= MLX5_FLOW_ACTION_DECAP; @@ -25233,7 +52519,7 @@ index 73aaea4536..d83e49f954 100644 } return 0; } -@@ -2248,7 +2418,6 @@ flow_dv_encap_decap_resource_register +@@ -2248,7 +2440,6 @@ flow_dv_encap_decap_resource_register domain = sh->rx_domain; else domain = sh->tx_domain; @@ -25241,7 +52527,7 @@ index 73aaea4536..d83e49f954 100644 /* Lookup a matching resource from cache. */ LIST_FOREACH(cache_resource, &sh->encaps_decaps, next) { if (resource->reformat_type == cache_resource->reformat_type && -@@ -2334,6 +2503,8 @@ flow_dv_jump_tbl_resource_register +@@ -2334,6 +2525,8 @@ flow_dv_jump_tbl_resource_register DRV_LOG(DEBUG, "new jump table resource %p: refcnt %d++", (void *)&tbl_data->jump, cnt); } else { @@ -25250,7 +52536,80 @@ index 73aaea4536..d83e49f954 100644 assert(tbl_data->jump.action); DRV_LOG(DEBUG, "existed jump table resource %p: refcnt %d++", (void *)&tbl_data->jump, cnt); -@@ -2799,8 +2970,6 @@ flow_dv_create_action_l2_encap(struct rte_eth_dev *dev, +@@ -2484,7 +2677,7 @@ flow_dv_push_vlan_action_resource_register + return 0; + } + /** +- * Get the size of specific rte_flow_item_type ++ * Get the size of specific rte_flow_item_type hdr size + * + * @param[in] item_type + * Tested rte_flow_item_type. +@@ -2493,43 +2686,39 @@ flow_dv_push_vlan_action_resource_register + * sizeof struct item_type, 0 if void or irrelevant. + */ + static size_t +-flow_dv_get_item_len(const enum rte_flow_item_type item_type) ++flow_dv_get_item_hdr_len(const enum rte_flow_item_type item_type) + { + size_t retval; + + switch (item_type) { + case RTE_FLOW_ITEM_TYPE_ETH: +- retval = sizeof(struct rte_flow_item_eth); ++ retval = sizeof(struct rte_ether_hdr); + break; + case RTE_FLOW_ITEM_TYPE_VLAN: +- retval = sizeof(struct rte_flow_item_vlan); ++ retval = sizeof(struct rte_vlan_hdr); + break; + case RTE_FLOW_ITEM_TYPE_IPV4: +- retval = sizeof(struct rte_flow_item_ipv4); ++ retval = sizeof(struct rte_ipv4_hdr); + break; + case RTE_FLOW_ITEM_TYPE_IPV6: +- retval = sizeof(struct rte_flow_item_ipv6); ++ retval = sizeof(struct rte_ipv6_hdr); + break; + case RTE_FLOW_ITEM_TYPE_UDP: +- retval = sizeof(struct rte_flow_item_udp); ++ retval = sizeof(struct rte_udp_hdr); + break; + case RTE_FLOW_ITEM_TYPE_TCP: +- retval = sizeof(struct rte_flow_item_tcp); ++ retval = sizeof(struct rte_tcp_hdr); + break; + case RTE_FLOW_ITEM_TYPE_VXLAN: +- retval = sizeof(struct rte_flow_item_vxlan); ++ case RTE_FLOW_ITEM_TYPE_VXLAN_GPE: ++ retval = sizeof(struct rte_vxlan_hdr); + break; + case RTE_FLOW_ITEM_TYPE_GRE: +- retval = sizeof(struct rte_flow_item_gre); +- break; + case RTE_FLOW_ITEM_TYPE_NVGRE: +- retval = sizeof(struct rte_flow_item_nvgre); +- break; +- case RTE_FLOW_ITEM_TYPE_VXLAN_GPE: +- retval = sizeof(struct rte_flow_item_vxlan_gpe); ++ retval = sizeof(struct rte_gre_hdr); + break; + case RTE_FLOW_ITEM_TYPE_MPLS: +- retval = sizeof(struct rte_flow_item_mpls); ++ retval = sizeof(struct rte_mpls_hdr); + break; + case RTE_FLOW_ITEM_TYPE_VOID: /* Fall through. */ + default: +@@ -2582,7 +2771,7 @@ flow_dv_convert_encap_data(const struct rte_flow_item *items, uint8_t *buf, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, "invalid empty data"); + for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) { +- len = flow_dv_get_item_len(items->type); ++ len = flow_dv_get_item_hdr_len(items->type); + if (len + temp_size > MLX5_ENCAP_MAX_LEN) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, +@@ -2799,8 +2988,6 @@ flow_dv_create_action_l2_encap(struct rte_eth_dev *dev, (const struct rte_flow_action_raw_encap *)action->conf; res.size = raw_encap_data->size; memcpy(res.buf, raw_encap_data->data, res.size); @@ -25259,7 +52618,7 @@ index 73aaea4536..d83e49f954 100644 } else { if (action->type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP) encap_data = -@@ -2814,6 +2983,8 @@ flow_dv_create_action_l2_encap(struct rte_eth_dev *dev, +@@ -2814,6 +3001,8 @@ flow_dv_create_action_l2_encap(struct rte_eth_dev *dev, &res.size, error)) return -rte_errno; } @@ -25268,7 +52627,7 @@ index 73aaea4536..d83e49f954 100644 if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error)) return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, -@@ -2907,12 +3078,12 @@ flow_dv_create_action_raw_encap(struct rte_eth_dev *dev, +@@ -2907,12 +3096,12 @@ flow_dv_create_action_raw_encap(struct rte_eth_dev *dev, * * @param[in] dev * Pointer to rte_eth_dev structure. @@ -25285,7 +52644,7 @@ index 73aaea4536..d83e49f954 100644 * @param[out] error * Pointer to the error structure. * -@@ -2962,7 +3133,7 @@ flow_dv_validate_action_modify_hdr(const uint64_t action_flags, +@@ -2962,7 +3151,7 @@ flow_dv_validate_action_modify_hdr(const uint64_t action_flags, return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, "action configuration not set"); @@ -25294,7 +52653,7 @@ index 73aaea4536..d83e49f954 100644 return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, NULL, "can't have encap action before" -@@ -3026,10 +3197,14 @@ flow_dv_validate_action_modify_ipv4(const uint64_t action_flags, +@@ -3026,10 +3215,14 @@ flow_dv_validate_action_modify_ipv4(const uint64_t action_flags, struct rte_flow_error *error) { int ret = 0; @@ -25310,7 +52669,7 @@ index 73aaea4536..d83e49f954 100644 return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, NULL, -@@ -3060,10 +3235,14 @@ flow_dv_validate_action_modify_ipv6(const uint64_t action_flags, +@@ -3060,10 +3253,14 @@ flow_dv_validate_action_modify_ipv6(const uint64_t action_flags, struct rte_flow_error *error) { int ret = 0; @@ -25326,7 +52685,7 @@ index 73aaea4536..d83e49f954 100644 return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, NULL, -@@ -3094,10 +3273,14 @@ flow_dv_validate_action_modify_tp(const uint64_t action_flags, +@@ -3094,10 +3291,14 @@ flow_dv_validate_action_modify_tp(const uint64_t action_flags, struct rte_flow_error *error) { int ret = 0; @@ -25342,7 +52701,7 @@ index 73aaea4536..d83e49f954 100644 return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, NULL, "no transport layer " -@@ -3129,10 +3312,14 @@ flow_dv_validate_action_modify_tcp_seq(const uint64_t action_flags, +@@ -3129,10 +3330,14 @@ flow_dv_validate_action_modify_tcp_seq(const uint64_t action_flags, struct rte_flow_error *error) { int ret = 0; @@ -25358,7 +52717,7 @@ index 73aaea4536..d83e49f954 100644 return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, NULL, "no TCP item in" -@@ -3174,10 +3361,14 @@ flow_dv_validate_action_modify_tcp_ack(const uint64_t action_flags, +@@ -3174,10 +3379,14 @@ flow_dv_validate_action_modify_tcp_ack(const uint64_t action_flags, struct rte_flow_error *error) { int ret = 0; @@ -25374,7 +52733,7 @@ index 73aaea4536..d83e49f954 100644 return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, NULL, "no TCP item in" -@@ -3218,10 +3409,14 @@ flow_dv_validate_action_modify_ttl(const uint64_t action_flags, +@@ -3218,10 +3427,14 @@ flow_dv_validate_action_modify_ttl(const uint64_t action_flags, struct rte_flow_error *error) { int ret = 0; @@ -25390,7 +52749,7 @@ index 73aaea4536..d83e49f954 100644 return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, NULL, -@@ -3273,7 +3468,7 @@ flow_dv_validate_action_jump(const struct rte_flow_action *action, +@@ -3273,7 +3486,7 @@ flow_dv_validate_action_jump(const struct rte_flow_action *action, target_group = ((const struct rte_flow_action_jump *)action->conf)->group; ret = mlx5_flow_group_to_table(attributes, external, target_group, @@ -25399,7 +52758,7 @@ index 73aaea4536..d83e49f954 100644 if (ret) return ret; if (attributes->group == target_group) -@@ -3359,21 +3554,24 @@ flow_dv_validate_action_port_id(struct rte_eth_dev *dev, +@@ -3359,21 +3572,24 @@ flow_dv_validate_action_port_id(struct rte_eth_dev *dev, * * @param dev * Pointer to rte_eth_dev structure. @@ -25432,7 +52791,7 @@ index 73aaea4536..d83e49f954 100644 } /** -@@ -3402,7 +3600,12 @@ mlx5_flow_validate_action_meter(struct rte_eth_dev *dev, +@@ -3402,7 +3618,12 @@ mlx5_flow_validate_action_meter(struct rte_eth_dev *dev, { struct mlx5_priv *priv = dev->data->dev_private; const struct rte_flow_action_meter *am = action->conf; @@ -25446,7 +52805,7 @@ index 73aaea4536..d83e49f954 100644 if (action_flags & MLX5_FLOW_ACTION_METER) return rte_flow_error_set(error, ENOTSUP, -@@ -3417,6 +3620,7 @@ mlx5_flow_validate_action_meter(struct rte_eth_dev *dev, +@@ -3417,6 +3638,7 @@ mlx5_flow_validate_action_meter(struct rte_eth_dev *dev, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, "meter action not supported"); @@ -25454,7 +52813,7 @@ index 73aaea4536..d83e49f954 100644 if (!fm) return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, NULL, -@@ -3458,8 +3662,12 @@ flow_dv_modify_hdr_resource_register +@@ -3458,8 +3680,12 @@ flow_dv_modify_hdr_resource_register struct mlx5_ibv_shared *sh = priv->sh; struct mlx5_flow_dv_modify_hdr_resource *cache_resource; struct mlx5dv_dr_domain *ns; @@ -25468,7 +52827,7 @@ index 73aaea4536..d83e49f954 100644 return rte_flow_error_set(error, EOVERFLOW, RTE_FLOW_ERROR_TYPE_ACTION, NULL, "too many modify header items"); -@@ -3469,17 +3677,15 @@ flow_dv_modify_hdr_resource_register +@@ -3469,17 +3695,15 @@ flow_dv_modify_hdr_resource_register ns = sh->tx_domain; else ns = sh->rx_domain; @@ -25488,7 +52847,7 @@ index 73aaea4536..d83e49f954 100644 DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d++", (void *)cache_resource, rte_atomic32_read(&cache_resource->refcnt)); -@@ -3489,18 +3695,18 @@ flow_dv_modify_hdr_resource_register +@@ -3489,18 +3713,18 @@ flow_dv_modify_hdr_resource_register } } /* Register new modify-header resource. */ @@ -25512,7 +52871,7 @@ index 73aaea4536..d83e49f954 100644 (uint64_t *)cache_resource->actions); if (!cache_resource->verbs_action) { rte_free(cache_resource); -@@ -3846,11 +4052,13 @@ _flow_dv_query_count(struct rte_eth_dev *dev, +@@ -3846,11 +4070,13 @@ _flow_dv_query_count(struct rte_eth_dev *dev, * The devX counter handle. * @param[in] batch * Whether the pool is for counter that was allocated by batch command. @@ -25528,7 +52887,7 @@ index 73aaea4536..d83e49f954 100644 flow_dv_pool_create(struct rte_eth_dev *dev, struct mlx5_devx_obj *dcs, uint32_t batch) { -@@ -3884,12 +4092,12 @@ flow_dv_pool_create(struct rte_eth_dev *dev, struct mlx5_devx_obj *dcs, +@@ -3884,12 +4110,69 @@ flow_dv_pool_create(struct rte_eth_dev *dev, struct mlx5_devx_obj *dcs, */ rte_atomic64_set(&pool->query_gen, 0x2); TAILQ_INIT(&pool->counters); @@ -25540,10 +52899,67 @@ index 73aaea4536..d83e49f954 100644 rte_atomic16_add(&cont->n_valid, 1); - return pool; + return cont; ++} ++/** ++ * Restore skipped counters in the pool. ++ * ++ * As counter pool query requires the first counter dcs ++ * ID start with 4 alinged, if the pool counters with ++ * min_dcs ID are not aligned with 4, the counters will ++ * be skipped. ++ * Once other min_dcs ID less than these skipped counter ++ * dcs ID appears, the skipped counters will be safe to ++ * use. ++ * Should be called when min_dcs is updated. ++ * ++ * @param[in] pool ++ * Current counter pool. ++ * @param[in] last_min_dcs ++ * Last min_dcs. ++ */ ++static void ++flow_dv_counter_restore(struct mlx5_flow_counter_pool *pool, ++ struct mlx5_devx_obj *last_min_dcs) ++{ ++ struct mlx5_flow_counter *cnt; ++ uint32_t offset, new_offset; ++ uint32_t skip_cnt = 0; ++ uint32_t i; ++ ++ if (!pool->skip_cnt) ++ return; ++ /* ++ * If last min_dcs is not valid. The skipped counter may even after ++ * last min_dcs, set the offset to the whole pool. ++ */ ++ if (last_min_dcs->id & (MLX5_CNT_BATCH_QUERY_ID_ALIGNMENT - 1)) ++ offset = MLX5_COUNTERS_PER_POOL; ++ else ++ offset = last_min_dcs->id % MLX5_COUNTERS_PER_POOL; ++ new_offset = pool->min_dcs->id % MLX5_COUNTERS_PER_POOL; ++ /* ++ * Check the counters from 1 to the last_min_dcs range. Counters ++ * before new min_dcs indicates pool still has skipped counters. ++ * Counters be skipped after new min_dcs will be ready to use. ++ * Offset 0 counter must be empty or min_dcs, start from 1. ++ */ ++ for (i = 1; i < offset; i++) { ++ cnt = &pool->counters_raw[i]; ++ if (cnt->skipped) { ++ if (i > new_offset) { ++ cnt->skipped = 0; ++ TAILQ_INSERT_TAIL(&pool->counters, cnt, next); ++ } else { ++ skip_cnt++; ++ } ++ } ++ } ++ if (!skip_cnt) ++ pool->skip_cnt = 0; } /** -@@ -3903,33 +4111,35 @@ flow_dv_pool_create(struct rte_eth_dev *dev, struct mlx5_devx_obj *dcs, +@@ -3903,42 +4186,77 @@ flow_dv_pool_create(struct rte_eth_dev *dev, struct mlx5_devx_obj *dcs, * Whether the pool is for counter that was allocated by batch command. * * @return @@ -25561,11 +52977,13 @@ index 73aaea4536..d83e49f954 100644 + struct mlx5_pools_container *cont; struct mlx5_flow_counter_pool *pool; struct mlx5_devx_obj *dcs = NULL; ++ struct mlx5_devx_obj *last_min_dcs; struct mlx5_flow_counter *cnt; uint32_t i; + cont = MLX5_CNT_CONTAINER(priv->sh, batch, 0); if (!batch) { ++retry: /* bulk_bitmap must be 0 for single counter allocation. */ dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0); if (!dcs) @@ -25581,20 +52999,54 @@ index 73aaea4536..d83e49f954 100644 mlx5_devx_cmd_destroy(dcs); return NULL; } +- } else if (dcs->id < pool->min_dcs->id) { + pool = TAILQ_FIRST(&cont->pool_list); - } else if (dcs->id < pool->min_dcs->id) { ++ } else if (((dcs->id < pool->min_dcs->id) || ++ pool->min_dcs->id & ++ (MLX5_CNT_BATCH_QUERY_ID_ALIGNMENT - 1)) && ++ !(dcs->id & ++ (MLX5_CNT_BATCH_QUERY_ID_ALIGNMENT - 1))) { ++ /* ++ * Update the pool min_dcs only if current dcs is ++ * valid and exist min_dcs is not valid or greater ++ * than new dcs. ++ */ ++ last_min_dcs = pool->min_dcs; rte_atomic64_set(&pool->a64_dcs, (int64_t)(uintptr_t)dcs); -@@ -3938,7 +4148,7 @@ flow_dv_counter_pool_prepare(struct rte_eth_dev *dev, - TAILQ_INSERT_HEAD(&pool->counters, cnt, next); ++ /* ++ * Restore any skipped counters if the new min_dcs ++ * ID is smaller or min_dcs is not valid. ++ */ ++ if (dcs->id < last_min_dcs->id || ++ last_min_dcs->id & ++ (MLX5_CNT_BATCH_QUERY_ID_ALIGNMENT - 1)) ++ flow_dv_counter_restore(pool, last_min_dcs); + } + cnt = &pool->counters_raw[dcs->id % MLX5_COUNTERS_PER_POOL]; +- TAILQ_INSERT_HEAD(&pool->counters, cnt, next); cnt->dcs = dcs; ++ /* ++ * If min_dcs is not valid, it means the new allocated dcs ++ * also fail to become the valid min_dcs, just skip it. ++ * Or if min_dcs is valid, and new dcs ID is smaller than ++ * min_dcs, but not become the min_dcs, also skip it. ++ */ ++ if (pool->min_dcs->id & ++ (MLX5_CNT_BATCH_QUERY_ID_ALIGNMENT - 1) || ++ dcs->id < pool->min_dcs->id) { ++ cnt->skipped = 1; ++ pool->skip_cnt = 1; ++ goto retry; ++ } ++ TAILQ_INSERT_HEAD(&pool->counters, cnt, next); *cnt_free = cnt; - return pool; + return cont; } /* bulk_bitmap is in 128 counters units. */ if (priv->config.hca_attr.flow_counter_bulk_alloc_bitmap & 0x4) -@@ -3947,18 +4157,19 @@ flow_dv_counter_pool_prepare(struct rte_eth_dev *dev, +@@ -3947,18 +4265,19 @@ flow_dv_counter_pool_prepare(struct rte_eth_dev *dev, rte_errno = ENODATA; return NULL; } @@ -25617,7 +53069,7 @@ index 73aaea4536..d83e49f954 100644 } /** -@@ -4059,9 +4270,10 @@ flow_dv_counter_alloc(struct rte_eth_dev *dev, uint32_t shared, uint32_t id, +@@ -4059,9 +4378,10 @@ flow_dv_counter_alloc(struct rte_eth_dev *dev, uint32_t shared, uint32_t id, cnt_free = NULL; } if (!cnt_free) { @@ -25630,7 +53082,7 @@ index 73aaea4536..d83e49f954 100644 } cnt_free->batch = batch; /* Create a DV counter action only in the first time usage. */ -@@ -4146,7 +4358,9 @@ flow_dv_counter_release(struct rte_eth_dev *dev, +@@ -4146,7 +4466,9 @@ flow_dv_counter_release(struct rte_eth_dev *dev, * Pointer to error structure. * * @return @@ -25641,7 +53093,7 @@ index 73aaea4536..d83e49f954 100644 */ static int flow_dv_validate_attributes(struct rte_eth_dev *dev, -@@ -4156,6 +4370,7 @@ flow_dv_validate_attributes(struct rte_eth_dev *dev, +@@ -4156,6 +4478,7 @@ flow_dv_validate_attributes(struct rte_eth_dev *dev, { struct mlx5_priv *priv = dev->data->dev_private; uint32_t priority_max = priv->config.flow_prio - 1; @@ -25649,7 +53101,7 @@ index 73aaea4536..d83e49f954 100644 #ifndef HAVE_MLX5DV_DR if (attributes->group) -@@ -4164,14 +4379,15 @@ flow_dv_validate_attributes(struct rte_eth_dev *dev, +@@ -4164,14 +4487,15 @@ flow_dv_validate_attributes(struct rte_eth_dev *dev, NULL, "groups are not supported"); #else @@ -25668,7 +53120,7 @@ index 73aaea4536..d83e49f954 100644 #endif if (attributes->priority != MLX5_FLOW_PRIO_RSVD && attributes->priority >= priority_max) -@@ -4201,7 +4417,7 @@ flow_dv_validate_attributes(struct rte_eth_dev *dev, +@@ -4201,7 +4525,7 @@ flow_dv_validate_attributes(struct rte_eth_dev *dev, RTE_FLOW_ERROR_TYPE_ATTR, NULL, "must specify exactly one of " "ingress or egress"); @@ -25677,7 +53129,7 @@ index 73aaea4536..d83e49f954 100644 } /** -@@ -4217,6 +4433,8 @@ flow_dv_validate_attributes(struct rte_eth_dev *dev, +@@ -4217,6 +4541,8 @@ flow_dv_validate_attributes(struct rte_eth_dev *dev, * Pointer to the list of actions. * @param[in] external * This flow rule is created by request external to PMD. @@ -25686,7 +53138,7 @@ index 73aaea4536..d83e49f954 100644 * @param[out] error * Pointer to the error structure. * -@@ -4227,7 +4445,7 @@ static int +@@ -4227,7 +4553,7 @@ static int flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, const struct rte_flow_item items[], const struct rte_flow_action actions[], @@ -25695,7 +53147,7 @@ index 73aaea4536..d83e49f954 100644 { int ret; uint64_t action_flags = 0; -@@ -4236,7 +4454,11 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, +@@ -4236,7 +4562,11 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, uint8_t next_protocol = 0xff; uint16_t ether_type = 0; int actions_n = 0; @@ -25707,7 +53159,7 @@ index 73aaea4536..d83e49f954 100644 struct rte_flow_item_tcp nic_tcp_mask = { .hdr = { .tcp_flags = 0xFF, -@@ -4246,12 +4468,17 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, +@@ -4246,12 +4576,17 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, }; struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_dev_config *dev_conf = &priv->config; @@ -25725,7 +53177,7 @@ index 73aaea4536..d83e49f954 100644 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) { int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); int type = items->type; -@@ -4286,8 +4513,8 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, +@@ -4286,8 +4621,8 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, } break; case RTE_FLOW_ITEM_TYPE_VLAN: @@ -25736,7 +53188,7 @@ index 73aaea4536..d83e49f954 100644 if (ret < 0) return ret; last_item = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN : -@@ -4303,6 +4530,9 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, +@@ -4303,6 +4638,9 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, } else { ether_type = 0; } @@ -25746,7 +53198,7 @@ index 73aaea4536..d83e49f954 100644 break; case RTE_FLOW_ITEM_TYPE_IPV4: mlx5_flow_tunnel_ip_check(items, next_protocol, -@@ -4343,6 +4573,9 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, +@@ -4343,6 +4681,9 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, if (items->mask != NULL && ((const struct rte_flow_item_ipv6 *) items->mask)->hdr.proto) { @@ -25756,7 +53208,7 @@ index 73aaea4536..d83e49f954 100644 next_protocol = ((const struct rte_flow_item_ipv6 *) items->spec)->hdr.proto; -@@ -4418,7 +4651,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, +@@ -4418,7 +4759,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, error); if (ret < 0) return ret; @@ -25765,7 +53217,7 @@ index 73aaea4536..d83e49f954 100644 break; case RTE_FLOW_ITEM_TYPE_MPLS: ret = mlx5_flow_validate_item_mpls(dev, items, -@@ -4457,6 +4690,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, +@@ -4457,6 +4798,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, error); if (ret < 0) return ret; @@ -25773,7 +53225,7 @@ index 73aaea4536..d83e49f954 100644 last_item = MLX5_FLOW_LAYER_ICMP6; break; case RTE_FLOW_ITEM_TYPE_TAG: -@@ -4512,6 +4746,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, +@@ -4512,6 +4854,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, action_flags |= MLX5_FLOW_ACTION_FLAG; ++actions_n; } @@ -25781,7 +53233,7 @@ index 73aaea4536..d83e49f954 100644 break; case RTE_FLOW_ACTION_TYPE_MARK: ret = flow_dv_validate_action_mark(dev, actions, -@@ -4530,6 +4765,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, +@@ -4530,6 +4873,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, action_flags |= MLX5_FLOW_ACTION_MARK; ++actions_n; } @@ -25789,7 +53241,7 @@ index 73aaea4536..d83e49f954 100644 break; case RTE_FLOW_ACTION_TYPE_SET_META: ret = flow_dv_validate_action_set_meta(dev, actions, -@@ -4541,6 +4777,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, +@@ -4541,6 +4885,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)) ++actions_n; action_flags |= MLX5_FLOW_ACTION_SET_META; @@ -25797,7 +53249,7 @@ index 73aaea4536..d83e49f954 100644 break; case RTE_FLOW_ACTION_TYPE_SET_TAG: ret = flow_dv_validate_action_set_tag(dev, actions, -@@ -4552,6 +4789,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, +@@ -4552,6 +4897,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)) ++actions_n; action_flags |= MLX5_FLOW_ACTION_SET_TAG; @@ -25805,7 +53257,7 @@ index 73aaea4536..d83e49f954 100644 break; case RTE_FLOW_ACTION_TYPE_DROP: ret = mlx5_flow_validate_action_drop(action_flags, -@@ -4567,16 +4805,21 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, +@@ -4567,16 +4913,21 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, attr, error); if (ret < 0) return ret; @@ -25827,7 +53279,7 @@ index 73aaea4536..d83e49f954 100644 action_flags |= MLX5_FLOW_ACTION_RSS; ++actions_n; break; -@@ -4598,8 +4841,9 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, +@@ -4598,8 +4949,9 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, ++actions_n; break; case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN: @@ -25839,7 +53291,7 @@ index 73aaea4536..d83e49f954 100644 actions, attr, error); if (ret < 0) -@@ -4623,49 +4867,52 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, +@@ -4623,49 +4975,53 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, return ret; /* Count VID with push_vlan command. */ action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID; @@ -25866,6 +53318,7 @@ index 73aaea4536..d83e49f954 100644 - ret = flow_dv_validate_action_l2_decap(action_flags, - attr, error); + ret = flow_dv_validate_action_decap(dev, action_flags, ++ actions, item_flags, + attr, error); if (ret < 0) return ret; @@ -25882,7 +53335,7 @@ index 73aaea4536..d83e49f954 100644 - error); + ret = flow_dv_validate_action_raw_encap_decap + (dev, NULL, actions->conf, attr, &action_flags, -+ &actions_n, error); ++ &actions_n, actions, item_flags, error); if (ret < 0) return ret; - action_flags |= MLX5_FLOW_ACTION_RAW_ENCAP; @@ -25905,7 +53358,7 @@ index 73aaea4536..d83e49f954 100644 + (dev, + decap ? decap : &empty_decap, encap, + attr, &action_flags, &actions_n, -+ error); ++ actions, item_flags, error); if (ret < 0) return ret; - action_flags |= MLX5_FLOW_ACTION_RAW_DECAP; @@ -25913,7 +53366,7 @@ index 73aaea4536..d83e49f954 100644 break; case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC: case RTE_FLOW_ACTION_TYPE_SET_MAC_DST: -@@ -4682,8 +4929,15 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, +@@ -4682,8 +5038,15 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ? MLX5_FLOW_ACTION_SET_MAC_SRC : MLX5_FLOW_ACTION_SET_MAC_DST; @@ -25930,7 +53383,7 @@ index 73aaea4536..d83e49f954 100644 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC: case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST: ret = flow_dv_validate_action_modify_ipv4(action_flags, -@@ -4699,6 +4953,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, +@@ -4699,6 +5062,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ? MLX5_FLOW_ACTION_SET_IPV4_SRC : MLX5_FLOW_ACTION_SET_IPV4_DST; @@ -25938,7 +53391,7 @@ index 73aaea4536..d83e49f954 100644 break; case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC: case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST: -@@ -4708,6 +4963,12 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, +@@ -4708,6 +5072,12 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, error); if (ret < 0) return ret; @@ -25951,7 +53404,7 @@ index 73aaea4536..d83e49f954 100644 /* Count all modify-header actions as one action. */ if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)) ++actions_n; -@@ -4715,6 +4976,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, +@@ -4715,6 +5085,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ? MLX5_FLOW_ACTION_SET_IPV6_SRC : MLX5_FLOW_ACTION_SET_IPV6_DST; @@ -25959,7 +53412,7 @@ index 73aaea4536..d83e49f954 100644 break; case RTE_FLOW_ACTION_TYPE_SET_TP_SRC: case RTE_FLOW_ACTION_TYPE_SET_TP_DST: -@@ -4731,6 +4993,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, +@@ -4731,6 +5102,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, RTE_FLOW_ACTION_TYPE_SET_TP_SRC ? MLX5_FLOW_ACTION_SET_TP_SRC : MLX5_FLOW_ACTION_SET_TP_DST; @@ -25967,7 +53420,7 @@ index 73aaea4536..d83e49f954 100644 break; case RTE_FLOW_ACTION_TYPE_DEC_TTL: case RTE_FLOW_ACTION_TYPE_SET_TTL: -@@ -4747,6 +5010,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, +@@ -4747,6 +5119,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, RTE_FLOW_ACTION_TYPE_SET_TTL ? MLX5_FLOW_ACTION_SET_TTL : MLX5_FLOW_ACTION_DEC_TTL; @@ -25975,7 +53428,7 @@ index 73aaea4536..d83e49f954 100644 break; case RTE_FLOW_ACTION_TYPE_JUMP: ret = flow_dv_validate_action_jump(actions, -@@ -4774,6 +5038,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, +@@ -4774,6 +5147,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ? MLX5_FLOW_ACTION_INC_TCP_SEQ : MLX5_FLOW_ACTION_DEC_TCP_SEQ; @@ -25983,7 +53436,7 @@ index 73aaea4536..d83e49f954 100644 break; case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK: case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK: -@@ -4791,10 +5056,13 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, +@@ -4791,10 +5165,13 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ? MLX5_FLOW_ACTION_INC_TCP_ACK : MLX5_FLOW_ACTION_DEC_TCP_ACK; @@ -25998,7 +53451,7 @@ index 73aaea4536..d83e49f954 100644 break; case RTE_FLOW_ACTION_TYPE_METER: ret = mlx5_flow_validate_action_meter(dev, -@@ -4805,6 +5073,8 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, +@@ -4805,6 +5182,8 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, return ret; action_flags |= MLX5_FLOW_ACTION_METER; ++actions_n; @@ -26007,7 +53460,7 @@ index 73aaea4536..d83e49f954 100644 break; default: return rte_flow_error_set(error, ENOTSUP, -@@ -4813,13 +5083,18 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, +@@ -4813,13 +5192,18 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, "action not supported"); } } @@ -26033,25 +53486,42 @@ index 73aaea4536..d83e49f954 100644 /* Eswitch has few restrictions on using items and actions */ if (attr->transfer) { if (!mlx5_flow_ext_mreg_supported(dev) && -@@ -4856,6 +5131,37 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, +@@ -4856,6 +5240,54 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, actions, "no fate action is found"); } -+ /* Continue validation for Xcap actions.*/ -+ if ((action_flags & MLX5_FLOW_XCAP_ACTIONS) && (queue_index == 0xFFFF || -+ mlx5_rxq_get_type(dev, queue_index) != MLX5_RXQ_TYPE_HAIRPIN)) { ++ /* Continue validation for Xcap and VLAN actions.*/ ++ if ((action_flags & (MLX5_FLOW_XCAP_ACTIONS | ++ MLX5_FLOW_VLAN_ACTIONS)) && ++ (queue_index == 0xFFFF || ++ mlx5_rxq_get_type(dev, queue_index) != MLX5_RXQ_TYPE_HAIRPIN)) { + if ((action_flags & MLX5_FLOW_XCAP_ACTIONS) == + MLX5_FLOW_XCAP_ACTIONS) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, "encap and decap " + "combination aren't supported"); -+ if (!attr->transfer && attr->ingress && (action_flags & -+ MLX5_FLOW_ACTION_ENCAP)) -+ return rte_flow_error_set(error, ENOTSUP, -+ RTE_FLOW_ERROR_TYPE_ACTION, -+ NULL, "encap is not supported" -+ " for ingress traffic"); ++ if (!attr->transfer && attr->ingress) { ++ if (action_flags & MLX5_FLOW_ACTION_ENCAP) ++ return rte_flow_error_set ++ (error, ENOTSUP, ++ RTE_FLOW_ERROR_TYPE_ACTION, ++ NULL, "encap is not supported" ++ " for ingress traffic"); ++ else if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN) ++ return rte_flow_error_set ++ (error, ENOTSUP, ++ RTE_FLOW_ERROR_TYPE_ACTION, ++ NULL, "push VLAN action not " ++ "supported for ingress"); ++ else if ((action_flags & MLX5_FLOW_VLAN_ACTIONS) == ++ MLX5_FLOW_VLAN_ACTIONS) ++ return rte_flow_error_set ++ (error, ENOTSUP, ++ RTE_FLOW_ERROR_TYPE_ACTION, ++ NULL, "no support for " ++ "multiple VLAN actions"); ++ } + } + /* Hairpin flow will add one more TAG action. */ + if (hairpin > 0) @@ -26071,7 +53541,7 @@ index 73aaea4536..d83e49f954 100644 return 0; } -@@ -4984,6 +5290,23 @@ flow_dv_translate_item_eth(void *matcher, void *key, +@@ -4984,6 +5416,23 @@ flow_dv_translate_item_eth(void *matcher, void *key, /* The value must be in the range of the mask. */ for (i = 0; i < sizeof(eth_m->dst); ++i) l24_v[i] = eth_m->src.addr_bytes[i] & eth_v->src.addr_bytes[i]; @@ -26095,7 +53565,7 @@ index 73aaea4536..d83e49f954 100644 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype, rte_be_to_cpu_16(eth_m->type)); l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, ethertype); -@@ -5017,10 +5340,6 @@ flow_dv_translate_item_vlan(struct mlx5_flow *dev_flow, +@@ -5017,10 +5466,6 @@ flow_dv_translate_item_vlan(struct mlx5_flow *dev_flow, uint16_t tci_m; uint16_t tci_v; @@ -26106,7 +53576,7 @@ index 73aaea4536..d83e49f954 100644 if (inner) { headers_m = MLX5_ADDR_OF(fte_match_param, matcher, inner_headers); -@@ -5033,13 +5352,22 @@ flow_dv_translate_item_vlan(struct mlx5_flow *dev_flow, +@@ -5033,13 +5478,22 @@ flow_dv_translate_item_vlan(struct mlx5_flow *dev_flow, * This is workaround, masks are not supported, * and pre-validated. */ @@ -26133,7 +53603,7 @@ index 73aaea4536..d83e49f954 100644 MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_vid, tci_m); MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, tci_v); MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_cfi, tci_m >> 12); -@@ -5061,6 +5389,8 @@ flow_dv_translate_item_vlan(struct mlx5_flow *dev_flow, +@@ -5061,6 +5515,8 @@ flow_dv_translate_item_vlan(struct mlx5_flow *dev_flow, * Flow matcher value. * @param[in] item * Flow pattern to translate. @@ -26142,7 +53612,7 @@ index 73aaea4536..d83e49f954 100644 * @param[in] inner * Item is inner pattern. * @param[in] group -@@ -5069,6 +5399,7 @@ flow_dv_translate_item_vlan(struct mlx5_flow *dev_flow, +@@ -5069,6 +5525,7 @@ flow_dv_translate_item_vlan(struct mlx5_flow *dev_flow, static void flow_dv_translate_item_ipv4(void *matcher, void *key, const struct rte_flow_item *item, @@ -26150,7 +53620,7 @@ index 73aaea4536..d83e49f954 100644 int inner, uint32_t group) { const struct rte_flow_item_ipv4 *ipv4_m = item->mask; -@@ -5101,6 +5432,13 @@ flow_dv_translate_item_ipv4(void *matcher, void *key, +@@ -5101,6 +5558,13 @@ flow_dv_translate_item_ipv4(void *matcher, void *key, else MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0x4); MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 4); @@ -26164,7 +53634,7 @@ index 73aaea4536..d83e49f954 100644 if (!ipv4_v) return; if (!ipv4_m) -@@ -5139,6 +5477,8 @@ flow_dv_translate_item_ipv4(void *matcher, void *key, +@@ -5139,6 +5603,8 @@ flow_dv_translate_item_ipv4(void *matcher, void *key, * Flow matcher value. * @param[in] item * Flow pattern to translate. @@ -26173,7 +53643,7 @@ index 73aaea4536..d83e49f954 100644 * @param[in] inner * Item is inner pattern. * @param[in] group -@@ -5147,6 +5487,7 @@ flow_dv_translate_item_ipv4(void *matcher, void *key, +@@ -5147,6 +5613,7 @@ flow_dv_translate_item_ipv4(void *matcher, void *key, static void flow_dv_translate_item_ipv6(void *matcher, void *key, const struct rte_flow_item *item, @@ -26181,7 +53651,7 @@ index 73aaea4536..d83e49f954 100644 int inner, uint32_t group) { const struct rte_flow_item_ipv6 *ipv6_m = item->mask; -@@ -5189,6 +5530,13 @@ flow_dv_translate_item_ipv6(void *matcher, void *key, +@@ -5189,6 +5656,13 @@ flow_dv_translate_item_ipv6(void *matcher, void *key, else MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0x6); MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 6); @@ -26195,7 +53665,7 @@ index 73aaea4536..d83e49f954 100644 if (!ipv6_v) return; if (!ipv6_m) -@@ -5354,13 +5702,13 @@ flow_dv_translate_item_gre_key(void *matcher, void *key, +@@ -5354,13 +5828,13 @@ flow_dv_translate_item_gre_key(void *matcher, void *key, void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters); rte_be32_t gre_key_default_mask = RTE_BE32(UINT32_MAX); @@ -26212,7 +53682,27 @@ index 73aaea4536..d83e49f954 100644 MLX5_SET(fte_match_set_misc, misc_m, gre_key_h, rte_be_to_cpu_32(*key_m) >> 8); MLX5_SET(fte_match_set_misc, misc_v, gre_key_h, -@@ -5558,6 +5906,76 @@ flow_dv_translate_item_vxlan(void *matcher, void *key, +@@ -5468,8 +5942,8 @@ flow_dv_translate_item_nvgre(void *matcher, void *key, + const struct rte_flow_item_nvgre *nvgre_v = item->spec; + void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters); + void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters); +- const char *tni_flow_id_m = (const char *)nvgre_m->tni; +- const char *tni_flow_id_v = (const char *)nvgre_v->tni; ++ const char *tni_flow_id_m; ++ const char *tni_flow_id_v; + char *gre_key_m; + char *gre_key_v; + int size; +@@ -5494,6 +5968,8 @@ flow_dv_translate_item_nvgre(void *matcher, void *key, + return; + if (!nvgre_m) + nvgre_m = &rte_flow_item_nvgre_mask; ++ tni_flow_id_m = (const char *)nvgre_m->tni; ++ tni_flow_id_v = (const char *)nvgre_v->tni; + size = sizeof(nvgre_m->tni) + sizeof(nvgre_m->flow_id); + gre_key_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, gre_key_h); + gre_key_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, gre_key_h); +@@ -5558,6 +6034,76 @@ flow_dv_translate_item_vxlan(void *matcher, void *key, vni_v[i] = vni_m[i] & vxlan_v->vni[i]; } @@ -26289,7 +53779,7 @@ index 73aaea4536..d83e49f954 100644 /** * Add Geneve item to matcher and to the value. * -@@ -5742,6 +6160,7 @@ flow_dv_match_meta_reg(void *matcher, void *key, +@@ -5742,6 +6288,7 @@ flow_dv_match_meta_reg(void *matcher, void *key, MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_2); void *misc2_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2); @@ -26297,7 +53787,7 @@ index 73aaea4536..d83e49f954 100644 data &= mask; switch (reg_type) { -@@ -5754,8 +6173,18 @@ flow_dv_match_meta_reg(void *matcher, void *key, +@@ -5754,8 +6301,18 @@ flow_dv_match_meta_reg(void *matcher, void *key, MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_b, data); break; case REG_C_0: @@ -26318,7 +53808,7 @@ index 73aaea4536..d83e49f954 100644 break; case REG_C_1: MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_1, mask); -@@ -5825,6 +6254,15 @@ flow_dv_translate_item_mark(struct rte_eth_dev *dev, +@@ -5825,6 +6382,15 @@ flow_dv_translate_item_mark(struct rte_eth_dev *dev, /* Get the metadata register index for the mark. */ reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, NULL); assert(reg > 0); @@ -26334,7 +53824,7 @@ index 73aaea4536..d83e49f954 100644 flow_dv_match_meta_reg(matcher, key, reg, value, mask); } } -@@ -5857,7 +6295,7 @@ flow_dv_translate_item_meta(struct rte_eth_dev *dev, +@@ -5857,7 +6423,7 @@ flow_dv_translate_item_meta(struct rte_eth_dev *dev, meta_m = &rte_flow_item_meta_mask; meta_v = (const void *)item->spec; if (meta_v) { @@ -26343,7 +53833,7 @@ index 73aaea4536..d83e49f954 100644 uint32_t value = meta_v->data; uint32_t mask = meta_m->data; -@@ -5875,8 +6313,12 @@ flow_dv_translate_item_meta(struct rte_eth_dev *dev, +@@ -5875,8 +6441,12 @@ flow_dv_translate_item_meta(struct rte_eth_dev *dev, struct mlx5_priv *priv = dev->data->dev_private; uint32_t msk_c0 = priv->sh->dv_regc0_mask; uint32_t shl_c0 = rte_bsf32(msk_c0); @@ -26357,7 +53847,7 @@ index 73aaea4536..d83e49f954 100644 value <<= shl_c0; mask <<= shl_c0; assert(msk_c0); -@@ -5906,6 +6348,8 @@ flow_dv_translate_item_meta_vport(void *matcher, void *key, +@@ -5906,6 +6476,8 @@ flow_dv_translate_item_meta_vport(void *matcher, void *key, /** * Add tag item to matcher * @@ -26366,7 +53856,7 @@ index 73aaea4536..d83e49f954 100644 * @param[in, out] matcher * Flow matcher. * @param[in, out] key -@@ -5914,15 +6358,27 @@ flow_dv_translate_item_meta_vport(void *matcher, void *key, +@@ -5914,15 +6486,27 @@ flow_dv_translate_item_meta_vport(void *matcher, void *key, * Flow pattern to translate. */ static void @@ -26397,7 +53887,7 @@ index 73aaea4536..d83e49f954 100644 } /** -@@ -6056,6 +6512,12 @@ flow_dv_translate_item_icmp6(void *matcher, void *key, +@@ -6056,6 +6640,12 @@ flow_dv_translate_item_icmp6(void *matcher, void *key, return; if (!icmp6_m) icmp6_m = &rte_flow_item_icmp6_mask; @@ -26410,7 +53900,7 @@ index 73aaea4536..d83e49f954 100644 MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_type, icmp6_m->type); MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_type, icmp6_v->type & icmp6_m->type); -@@ -6103,6 +6565,12 @@ flow_dv_translate_item_icmp(void *matcher, void *key, +@@ -6103,6 +6693,12 @@ flow_dv_translate_item_icmp(void *matcher, void *key, return; if (!icmp_m) icmp_m = &rte_flow_item_icmp_mask; @@ -26423,7 +53913,7 @@ index 73aaea4536..d83e49f954 100644 MLX5_SET(fte_match_set_misc3, misc3_m, icmp_type, icmp_m->hdr.icmp_type); MLX5_SET(fte_match_set_misc3, misc3_v, icmp_type, -@@ -6618,10 +7086,13 @@ __flow_dv_translate(struct rte_eth_dev *dev, +@@ -6618,10 +7214,13 @@ __flow_dv_translate(struct rte_eth_dev *dev, }; int actions_n = 0; bool actions_end = false; @@ -26441,7 +53931,7 @@ index 73aaea4536..d83e49f954 100644 union flow_dv_attr flow_attr = { .attr = 0 }; uint32_t tag_be; union mlx5_flow_tbl_key tbl_key; -@@ -6633,15 +7104,19 @@ __flow_dv_translate(struct rte_eth_dev *dev, +@@ -6633,15 +7232,19 @@ __flow_dv_translate(struct rte_eth_dev *dev, uint32_t table; int ret = 0; @@ -26463,7 +53953,7 @@ index 73aaea4536..d83e49f954 100644 for (; !actions_end ; actions++) { const struct rte_flow_action_queue *queue; const struct rte_flow_action_rss *rss; -@@ -6679,7 +7154,7 @@ __flow_dv_translate(struct rte_eth_dev *dev, +@@ -6679,7 +7282,7 @@ __flow_dv_translate(struct rte_eth_dev *dev, }; if (flow_dv_convert_action_mark(dev, &mark, @@ -26472,7 +53962,7 @@ index 73aaea4536..d83e49f954 100644 error)) return -rte_errno; action_flags |= MLX5_FLOW_ACTION_MARK_EXT; -@@ -6701,7 +7176,7 @@ __flow_dv_translate(struct rte_eth_dev *dev, +@@ -6701,7 +7304,7 @@ __flow_dv_translate(struct rte_eth_dev *dev, actions->conf; if (flow_dv_convert_action_mark(dev, mark, @@ -26481,7 +53971,7 @@ index 73aaea4536..d83e49f954 100644 error)) return -rte_errno; action_flags |= MLX5_FLOW_ACTION_MARK_EXT; -@@ -6722,7 +7197,7 @@ __flow_dv_translate(struct rte_eth_dev *dev, +@@ -6722,7 +7325,7 @@ __flow_dv_translate(struct rte_eth_dev *dev, break; case RTE_FLOW_ACTION_TYPE_SET_META: if (flow_dv_convert_action_set_meta @@ -26490,7 +53980,7 @@ index 73aaea4536..d83e49f954 100644 (const struct rte_flow_action_set_meta *) actions->conf, error)) return -rte_errno; -@@ -6730,7 +7205,7 @@ __flow_dv_translate(struct rte_eth_dev *dev, +@@ -6730,7 +7333,7 @@ __flow_dv_translate(struct rte_eth_dev *dev, break; case RTE_FLOW_ACTION_TYPE_SET_TAG: if (flow_dv_convert_action_set_tag @@ -26499,7 +53989,7 @@ index 73aaea4536..d83e49f954 100644 (const struct rte_flow_action_set_tag *) actions->conf, error)) return -rte_errno; -@@ -6798,7 +7273,9 @@ __flow_dv_translate(struct rte_eth_dev *dev, +@@ -6798,7 +7401,9 @@ __flow_dv_translate(struct rte_eth_dev *dev, action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN; break; case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN: @@ -26510,7 +54000,7 @@ index 73aaea4536..d83e49f954 100644 vlan.eth_proto = rte_be_to_cpu_16 ((((const struct rte_flow_action_of_push_vlan *) actions->conf)->ethertype)); -@@ -6830,7 +7307,7 @@ __flow_dv_translate(struct rte_eth_dev *dev, +@@ -6830,7 +7435,7 @@ __flow_dv_translate(struct rte_eth_dev *dev, mlx5_update_vlan_vid_pcp(actions, &vlan); /* If no VLAN push - this is a modify header action */ if (flow_dv_convert_action_modify_vlan_vid @@ -26519,7 +54009,7 @@ index 73aaea4536..d83e49f954 100644 return -rte_errno; action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID; break; -@@ -6843,10 +7320,7 @@ __flow_dv_translate(struct rte_eth_dev *dev, +@@ -6843,10 +7448,7 @@ __flow_dv_translate(struct rte_eth_dev *dev, return -rte_errno; dev_flow->dv.actions[actions_n++] = dev_flow->dv.encap_decap->verbs_action; @@ -26531,7 +54021,7 @@ index 73aaea4536..d83e49f954 100644 break; case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP: case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP: -@@ -6856,14 +7330,11 @@ __flow_dv_translate(struct rte_eth_dev *dev, +@@ -6856,14 +7458,11 @@ __flow_dv_translate(struct rte_eth_dev *dev, return -rte_errno; dev_flow->dv.actions[actions_n++] = dev_flow->dv.encap_decap->verbs_action; @@ -26548,7 +54038,7 @@ index 73aaea4536..d83e49f954 100644 if (flow_dv_create_action_raw_encap (dev, actions, dev_flow, attr, error)) return -rte_errno; -@@ -6878,15 +7349,11 @@ __flow_dv_translate(struct rte_eth_dev *dev, +@@ -6878,15 +7477,11 @@ __flow_dv_translate(struct rte_eth_dev *dev, dev_flow->dv.actions[actions_n++] = dev_flow->dv.encap_decap->verbs_action; } @@ -26567,7 +54057,7 @@ index 73aaea4536..d83e49f954 100644 if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) { if (flow_dv_create_action_l2_decap (dev, dev_flow, attr->transfer, error)) -@@ -6895,13 +7362,14 @@ __flow_dv_translate(struct rte_eth_dev *dev, +@@ -6895,13 +7490,14 @@ __flow_dv_translate(struct rte_eth_dev *dev, dev_flow->dv.encap_decap->verbs_action; } /* If decap is followed by encap, handle it at encap. */ @@ -26585,7 +54075,7 @@ index 73aaea4536..d83e49f954 100644 if (ret) return ret; tbl = flow_dv_tbl_resource_get(dev, table, -@@ -6929,7 +7397,7 @@ __flow_dv_translate(struct rte_eth_dev *dev, +@@ -6929,7 +7525,7 @@ __flow_dv_translate(struct rte_eth_dev *dev, case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC: case RTE_FLOW_ACTION_TYPE_SET_MAC_DST: if (flow_dv_convert_action_modify_mac @@ -26594,7 +54084,7 @@ index 73aaea4536..d83e49f954 100644 return -rte_errno; action_flags |= actions->type == RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ? -@@ -6939,7 +7407,7 @@ __flow_dv_translate(struct rte_eth_dev *dev, +@@ -6939,7 +7535,7 @@ __flow_dv_translate(struct rte_eth_dev *dev, case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC: case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST: if (flow_dv_convert_action_modify_ipv4 @@ -26603,7 +54093,7 @@ index 73aaea4536..d83e49f954 100644 return -rte_errno; action_flags |= actions->type == RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ? -@@ -6949,7 +7417,7 @@ __flow_dv_translate(struct rte_eth_dev *dev, +@@ -6949,7 +7545,7 @@ __flow_dv_translate(struct rte_eth_dev *dev, case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC: case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST: if (flow_dv_convert_action_modify_ipv6 @@ -26612,7 +54102,7 @@ index 73aaea4536..d83e49f954 100644 return -rte_errno; action_flags |= actions->type == RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ? -@@ -6959,8 +7427,9 @@ __flow_dv_translate(struct rte_eth_dev *dev, +@@ -6959,8 +7555,9 @@ __flow_dv_translate(struct rte_eth_dev *dev, case RTE_FLOW_ACTION_TYPE_SET_TP_SRC: case RTE_FLOW_ACTION_TYPE_SET_TP_DST: if (flow_dv_convert_action_modify_tp @@ -26624,7 +54114,7 @@ index 73aaea4536..d83e49f954 100644 return -rte_errno; action_flags |= actions->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC ? -@@ -6969,21 +7438,24 @@ __flow_dv_translate(struct rte_eth_dev *dev, +@@ -6969,21 +7566,24 @@ __flow_dv_translate(struct rte_eth_dev *dev, break; case RTE_FLOW_ACTION_TYPE_DEC_TTL: if (flow_dv_convert_action_modify_dec_ttl @@ -26653,7 +54143,7 @@ index 73aaea4536..d83e49f954 100644 return -rte_errno; action_flags |= actions->type == RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ? -@@ -6994,7 +7466,7 @@ __flow_dv_translate(struct rte_eth_dev *dev, +@@ -6994,7 +7594,7 @@ __flow_dv_translate(struct rte_eth_dev *dev, case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK: case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK: if (flow_dv_convert_action_modify_tcp_ack @@ -26662,7 +54152,7 @@ index 73aaea4536..d83e49f954 100644 return -rte_errno; action_flags |= actions->type == RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ? -@@ -7003,13 +7475,13 @@ __flow_dv_translate(struct rte_eth_dev *dev, +@@ -7003,13 +7603,13 @@ __flow_dv_translate(struct rte_eth_dev *dev, break; case MLX5_RTE_FLOW_ACTION_TYPE_TAG: if (flow_dv_convert_action_set_reg @@ -26678,7 +54168,7 @@ index 73aaea4536..d83e49f954 100644 return -rte_errno; action_flags |= MLX5_FLOW_ACTION_SET_TAG; break; -@@ -7034,10 +7506,10 @@ __flow_dv_translate(struct rte_eth_dev *dev, +@@ -7034,10 +7634,10 @@ __flow_dv_translate(struct rte_eth_dev *dev, break; case RTE_FLOW_ACTION_TYPE_END: actions_end = true; @@ -26691,7 +54181,7 @@ index 73aaea4536..d83e49f954 100644 return -rte_errno; dev_flow->dv.actions[modify_action_position] = dev_flow->dv.modify_hdr->verbs_action; -@@ -7046,7 +7518,7 @@ __flow_dv_translate(struct rte_eth_dev *dev, +@@ -7046,7 +7646,7 @@ __flow_dv_translate(struct rte_eth_dev *dev, default: break; } @@ -26700,7 +54190,7 @@ index 73aaea4536..d83e49f954 100644 modify_action_position == UINT32_MAX) modify_action_position = actions_n++; } -@@ -7083,7 +7555,7 @@ __flow_dv_translate(struct rte_eth_dev *dev, +@@ -7083,7 +7683,7 @@ __flow_dv_translate(struct rte_eth_dev *dev, mlx5_flow_tunnel_ip_check(items, next_protocol, &item_flags, &tunnel); flow_dv_translate_item_ipv4(match_mask, match_value, @@ -26709,7 +54199,7 @@ index 73aaea4536..d83e49f954 100644 dev_flow->group); matcher.priority = MLX5_PRIORITY_MAP_L3; dev_flow->hash_fields |= -@@ -7111,7 +7583,7 @@ __flow_dv_translate(struct rte_eth_dev *dev, +@@ -7111,7 +7711,7 @@ __flow_dv_translate(struct rte_eth_dev *dev, mlx5_flow_tunnel_ip_check(items, next_protocol, &item_flags, &tunnel); flow_dv_translate_item_ipv6(match_mask, match_value, @@ -26718,7 +54208,7 @@ index 73aaea4536..d83e49f954 100644 dev_flow->group); matcher.priority = MLX5_PRIORITY_MAP_L3; dev_flow->hash_fields |= -@@ -7162,6 +7634,8 @@ __flow_dv_translate(struct rte_eth_dev *dev, +@@ -7162,6 +7762,8 @@ __flow_dv_translate(struct rte_eth_dev *dev, case RTE_FLOW_ITEM_TYPE_GRE: flow_dv_translate_item_gre(match_mask, match_value, items, tunnel); @@ -26727,7 +54217,7 @@ index 73aaea4536..d83e49f954 100644 last_item = MLX5_FLOW_LAYER_GRE; break; case RTE_FLOW_ITEM_TYPE_GRE_KEY: -@@ -7172,26 +7646,37 @@ __flow_dv_translate(struct rte_eth_dev *dev, +@@ -7172,26 +7774,37 @@ __flow_dv_translate(struct rte_eth_dev *dev, case RTE_FLOW_ITEM_TYPE_NVGRE: flow_dv_translate_item_nvgre(match_mask, match_value, items, tunnel); @@ -26767,7 +54257,7 @@ index 73aaea4536..d83e49f954 100644 last_item = MLX5_FLOW_LAYER_MPLS; break; case RTE_FLOW_ITEM_TYPE_MARK: -@@ -7220,7 +7705,7 @@ __flow_dv_translate(struct rte_eth_dev *dev, +@@ -7220,7 +7833,7 @@ __flow_dv_translate(struct rte_eth_dev *dev, last_item = MLX5_FLOW_ITEM_TAG; break; case MLX5_RTE_FLOW_ITEM_TYPE_TAG: @@ -26776,7 +54266,7 @@ index 73aaea4536..d83e49f954 100644 match_value, items); last_item = MLX5_FLOW_ITEM_TAG; break; -@@ -7236,13 +7721,13 @@ __flow_dv_translate(struct rte_eth_dev *dev, +@@ -7236,13 +7849,13 @@ __flow_dv_translate(struct rte_eth_dev *dev, item_flags |= last_item; } /* @@ -26793,7 +54283,7 @@ index 73aaea4536..d83e49f954 100644 (priv->representor || priv->master)) { if (flow_dv_translate_item_port_id(dev, match_mask, match_value, NULL)) -@@ -7250,7 +7735,11 @@ __flow_dv_translate(struct rte_eth_dev *dev, +@@ -7250,7 +7863,11 @@ __flow_dv_translate(struct rte_eth_dev *dev, } assert(!flow_dv_check_valid_spec(matcher.mask.buf, dev_flow->dv.value.buf)); @@ -26806,7 +54296,7 @@ index 73aaea4536..d83e49f954 100644 /* Register matcher. */ matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf, matcher.mask.size); -@@ -7779,8 +8268,9 @@ flow_dv_destroy_mtr_tbl(struct rte_eth_dev *dev, +@@ -7779,8 +8396,9 @@ flow_dv_destroy_mtr_tbl(struct rte_eth_dev *dev, claim_zero(mlx5_glue->dv_destroy_flow_matcher (mtd->egress.any_matcher)); if (mtd->egress.tbl) @@ -26818,7 +54308,7 @@ index 73aaea4536..d83e49f954 100644 if (mtd->ingress.color_matcher) claim_zero(mlx5_glue->dv_destroy_flow_matcher (mtd->ingress.color_matcher)); -@@ -7788,8 +8278,9 @@ flow_dv_destroy_mtr_tbl(struct rte_eth_dev *dev, +@@ -7788,8 +8406,9 @@ flow_dv_destroy_mtr_tbl(struct rte_eth_dev *dev, claim_zero(mlx5_glue->dv_destroy_flow_matcher (mtd->ingress.any_matcher)); if (mtd->ingress.tbl) @@ -26830,7 +54320,7 @@ index 73aaea4536..d83e49f954 100644 if (mtd->transfer.color_matcher) claim_zero(mlx5_glue->dv_destroy_flow_matcher (mtd->transfer.color_matcher)); -@@ -7797,8 +8288,9 @@ flow_dv_destroy_mtr_tbl(struct rte_eth_dev *dev, +@@ -7797,8 +8416,9 @@ flow_dv_destroy_mtr_tbl(struct rte_eth_dev *dev, claim_zero(mlx5_glue->dv_destroy_flow_matcher (mtd->transfer.any_matcher)); if (mtd->transfer.tbl) @@ -26842,7 +54332,7 @@ index 73aaea4536..d83e49f954 100644 if (mtd->drop_actn) claim_zero(mlx5_glue->destroy_flow_action(mtd->drop_actn)); rte_free(mtd); -@@ -7846,31 +8338,16 @@ flow_dv_prepare_mtr_tables(struct rte_eth_dev *dev, +@@ -7846,31 +8466,16 @@ flow_dv_prepare_mtr_tables(struct rte_eth_dev *dev, .match_mask = (void *)&mask, }; void *actions[METER_ACTIONS]; @@ -26877,7 +54367,7 @@ index 73aaea4536..d83e49f954 100644 /* Create the meter table with METER level. */ dtb->tbl = flow_dv_tbl_resource_get(dev, MLX5_FLOW_TABLE_LEVEL_METER, egress, transfer, &error); -@@ -7878,6 +8355,14 @@ flow_dv_prepare_mtr_tables(struct rte_eth_dev *dev, +@@ -7878,6 +8483,14 @@ flow_dv_prepare_mtr_tables(struct rte_eth_dev *dev, DRV_LOG(ERR, "Failed to create meter policer table."); return -1; } @@ -26892,7 +54382,7 @@ index 73aaea4536..d83e49f954 100644 /* Create matchers, Any and Color. */ dv_attr.priority = 3; dv_attr.match_criteria_enable = 0; -@@ -7893,7 +8378,7 @@ flow_dv_prepare_mtr_tables(struct rte_eth_dev *dev, +@@ -7893,7 +8506,7 @@ flow_dv_prepare_mtr_tables(struct rte_eth_dev *dev, dv_attr.match_criteria_enable = 1 << MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT; flow_dv_match_meta_reg(mask.buf, value.buf, color_reg_c_idx, @@ -26901,7 +54391,7 @@ index 73aaea4536..d83e49f954 100644 dtb->color_matcher = mlx5_glue->dv_create_flow_matcher(sh->ctx, &dv_attr, dtb->tbl->obj); -@@ -8048,8 +8533,6 @@ flow_dv_destroy_policer_rules(struct rte_eth_dev *dev __rte_unused, +@@ -8048,8 +8661,6 @@ flow_dv_destroy_policer_rules(struct rte_eth_dev *dev __rte_unused, * Pointer to flow meter structure. * @param[in] mtb * Pointer to DV meter table set. @@ -26910,7 +54400,7 @@ index 73aaea4536..d83e49f954 100644 * @param[in] mtr_reg_c * Color match REG_C. * -@@ -8059,7 +8542,6 @@ flow_dv_destroy_policer_rules(struct rte_eth_dev *dev __rte_unused, +@@ -8059,7 +8670,6 @@ flow_dv_destroy_policer_rules(struct rte_eth_dev *dev __rte_unused, static int flow_dv_create_policer_forward_rule(struct mlx5_flow_meter *fm, struct mlx5_meter_domain_info *dtb, @@ -26918,7 +54408,7 @@ index 73aaea4536..d83e49f954 100644 uint8_t mtr_reg_c) { struct mlx5_flow_dv_match_params matcher = { -@@ -8073,12 +8555,10 @@ flow_dv_create_policer_forward_rule(struct mlx5_flow_meter *fm, +@@ -8073,12 +8683,10 @@ flow_dv_create_policer_forward_rule(struct mlx5_flow_meter *fm, int i; /* Create jump action. */ @@ -26932,7 +54422,7 @@ index 73aaea4536..d83e49f954 100644 if (!dtb->jump_actn) { DRV_LOG(ERR, "Failed to create policer jump action."); goto error; -@@ -8087,7 +8567,7 @@ flow_dv_create_policer_forward_rule(struct mlx5_flow_meter *fm, +@@ -8087,7 +8695,7 @@ flow_dv_create_policer_forward_rule(struct mlx5_flow_meter *fm, int j = 0; flow_dv_match_meta_reg(matcher.buf, value.buf, mtr_reg_c, @@ -26941,7 +54431,7 @@ index 73aaea4536..d83e49f954 100644 if (mtb->count_actns[i]) actions[j++] = mtb->count_actns[i]; if (fm->params.action[i] == MTR_POLICER_ACTION_DROP) -@@ -8133,7 +8613,6 @@ flow_dv_create_policer_rules(struct rte_eth_dev *dev, +@@ -8133,7 +8741,6 @@ flow_dv_create_policer_rules(struct rte_eth_dev *dev, if (attr->egress) { ret = flow_dv_create_policer_forward_rule(fm, &mtb->egress, @@ -26949,7 +54439,7 @@ index 73aaea4536..d83e49f954 100644 priv->mtr_color_reg); if (ret) { DRV_LOG(ERR, "Failed to create egress policer."); -@@ -8142,7 +8621,6 @@ flow_dv_create_policer_rules(struct rte_eth_dev *dev, +@@ -8142,7 +8749,6 @@ flow_dv_create_policer_rules(struct rte_eth_dev *dev, } if (attr->ingress) { ret = flow_dv_create_policer_forward_rule(fm, &mtb->ingress, @@ -26957,7 +54447,7 @@ index 73aaea4536..d83e49f954 100644 priv->mtr_color_reg); if (ret) { DRV_LOG(ERR, "Failed to create ingress policer."); -@@ -8151,7 +8629,6 @@ flow_dv_create_policer_rules(struct rte_eth_dev *dev, +@@ -8151,7 +8757,6 @@ flow_dv_create_policer_rules(struct rte_eth_dev *dev, } if (attr->transfer) { ret = flow_dv_create_policer_forward_rule(fm, &mtb->transfer, @@ -26965,8 +54455,116 @@ index 73aaea4536..d83e49f954 100644 priv->mtr_color_reg); if (ret) { DRV_LOG(ERR, "Failed to create transfer policer."); +diff --git a/dpdk/drivers/net/mlx5/mlx5_flow_meter.c b/dpdk/drivers/net/mlx5/mlx5_flow_meter.c +index c4d28b282e..62e3a35902 100644 +--- a/dpdk/drivers/net/mlx5/mlx5_flow_meter.c ++++ b/dpdk/drivers/net/mlx5/mlx5_flow_meter.c +@@ -301,7 +301,7 @@ mlx5_flow_mtr_cap_get(struct rte_eth_dev *dev, + if (!priv->mtr_en) + return -rte_mtr_error_set(error, ENOTSUP, + RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL, +- "Meter is not support"); ++ "Meter is not supported"); + memset(cap, 0, sizeof(*cap)); + cap->n_max = 1 << qattr->log_max_flow_meter; + cap->n_shared_max = cap->n_max; +@@ -347,7 +347,7 @@ mlx5_flow_meter_profile_add(struct rte_eth_dev *dev, + if (!priv->mtr_en) + return -rte_mtr_error_set(error, ENOTSUP, + RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL, +- "Meter is not support"); ++ "Meter is not supported"); + /* Check input params. */ + ret = mlx5_flow_meter_profile_validate(dev, meter_profile_id, + profile, error); +@@ -400,19 +400,19 @@ mlx5_flow_meter_profile_delete(struct rte_eth_dev *dev, + if (!priv->mtr_en) + return -rte_mtr_error_set(error, ENOTSUP, + RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL, +- "Meter is not support"); ++ "Meter is not supported"); + /* Meter profile must exist. */ + fmp = mlx5_flow_meter_profile_find(priv, meter_profile_id); + if (fmp == NULL) + return -rte_mtr_error_set(error, ENOENT, + RTE_MTR_ERROR_TYPE_METER_PROFILE_ID, + &meter_profile_id, +- "Meter profile id invalid."); ++ "Meter profile id is invalid."); + /* Check profile is unused. */ + if (fmp->ref_cnt) + return -rte_mtr_error_set(error, EBUSY, + RTE_MTR_ERROR_TYPE_METER_PROFILE_ID, +- NULL, "Meter profile in use."); ++ NULL, "Meter profile is in use."); + /* Remove from list. */ + TAILQ_REMOVE(&priv->flow_meter_profiles, fmp, next); + rte_free(fmp); +@@ -633,7 +633,7 @@ mlx5_flow_meter_create(struct rte_eth_dev *dev, uint32_t meter_id, + if (!priv->mtr_en) + return -rte_mtr_error_set(error, ENOTSUP, + RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL, +- "Meter is not support"); ++ "Meter is not supported"); + /* Validate the parameters. */ + ret = mlx5_flow_meter_validate(priv, meter_id, params, error); + if (ret) +@@ -718,7 +718,7 @@ mlx5_flow_meter_destroy(struct rte_eth_dev *dev, uint32_t meter_id, + if (!priv->mtr_en) + return -rte_mtr_error_set(error, ENOTSUP, + RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL, +- "Meter is not support"); ++ "Meter is not supported"); + /* Meter object must exist. */ + fm = mlx5_flow_meter_find(priv, meter_id); + if (fm == NULL) +@@ -823,7 +823,7 @@ mlx5_flow_meter_enable(struct rte_eth_dev *dev, + if (!priv->mtr_en) + return -rte_mtr_error_set(error, ENOTSUP, + RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL, +- "Meter is not support"); ++ "Meter is not supported"); + /* Meter object must exist. */ + fm = mlx5_flow_meter_find(priv, meter_id); + if (fm == NULL) +@@ -864,7 +864,7 @@ mlx5_flow_meter_disable(struct rte_eth_dev *dev, + if (!priv->mtr_en) + return -rte_mtr_error_set(error, ENOTSUP, + RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL, +- "Meter is not support"); ++ "Meter is not supported"); + /* Meter object must exist. */ + fm = mlx5_flow_meter_find(priv, meter_id); + if (fm == NULL) +@@ -912,7 +912,7 @@ mlx5_flow_meter_profile_update(struct rte_eth_dev *dev, + if (!priv->mtr_en) + return -rte_mtr_error_set(error, ENOTSUP, + RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL, +- "Meter is not support"); ++ "Meter is not supported"); + /* Meter profile must exist. */ + fmp = mlx5_flow_meter_profile_find(priv, meter_profile_id); + if (fmp == NULL) +@@ -975,7 +975,7 @@ mlx5_flow_meter_stats_update(struct rte_eth_dev *dev, + if (!priv->mtr_en) + return -rte_mtr_error_set(error, ENOTSUP, + RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL, +- "Meter is not support"); ++ "Meter is not supported"); + /* Meter object must exist. */ + fm = mlx5_flow_meter_find(priv, meter_id); + if (fm == NULL) +@@ -1032,7 +1032,7 @@ mlx5_flow_meter_stats_read(struct rte_eth_dev *dev, + if (!priv->mtr_en) + return -rte_mtr_error_set(error, ENOTSUP, + RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL, +- "Meter is not support"); ++ "Meter is not supported"); + /* Meter object must exist. */ + fm = mlx5_flow_meter_find(priv, meter_id); + if (fm == NULL) diff --git a/dpdk/drivers/net/mlx5/mlx5_flow_verbs.c b/dpdk/drivers/net/mlx5/mlx5_flow_verbs.c -index c787c9838d..7ac6a25e43 100644 +index c787c9838d..a670c5f3c5 100644 --- a/dpdk/drivers/net/mlx5/mlx5_flow_verbs.c +++ b/dpdk/drivers/net/mlx5/mlx5_flow_verbs.c @@ -493,14 +493,12 @@ flow_verbs_translate_item_ipv6(struct mlx5_flow *dev_flow, @@ -27038,7 +54636,43 @@ index c787c9838d..7ac6a25e43 100644 struct rte_flow_error *error) { int ret; -@@ -1255,6 +1277,18 @@ flow_verbs_validate(struct rte_eth_dev *dev, +@@ -1039,6 +1061,7 @@ flow_verbs_validate(struct rte_eth_dev *dev, + uint64_t last_item = 0; + uint8_t next_protocol = 0xff; + uint16_t ether_type = 0; ++ bool is_empty_vlan = false; + + if (items == NULL) + return -1; +@@ -1066,6 +1089,8 @@ flow_verbs_validate(struct rte_eth_dev *dev, + ether_type &= + ((const struct rte_flow_item_eth *) + items->mask)->type; ++ if (ether_type == RTE_BE16(RTE_ETHER_TYPE_VLAN)) ++ is_empty_vlan = true; + ether_type = rte_be_to_cpu_16(ether_type); + } else { + ether_type = 0; +@@ -1091,6 +1116,7 @@ flow_verbs_validate(struct rte_eth_dev *dev, + } else { + ether_type = 0; + } ++ is_empty_vlan = false; + break; + case RTE_FLOW_ITEM_TYPE_IPV4: + ret = mlx5_flow_validate_item_ipv4(items, item_flags, +@@ -1195,6 +1221,10 @@ flow_verbs_validate(struct rte_eth_dev *dev, + } + item_flags |= last_item; + } ++ if (is_empty_vlan) ++ return rte_flow_error_set(error, ENOTSUP, ++ RTE_FLOW_ERROR_TYPE_ITEM, NULL, ++ "VLAN matching without vid specification is not supported"); + for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { + switch (actions->type) { + case RTE_FLOW_ACTION_TYPE_VOID: +@@ -1255,6 +1285,18 @@ flow_verbs_validate(struct rte_eth_dev *dev, "action not supported"); } } @@ -27057,11 +54691,81 @@ index c787c9838d..7ac6a25e43 100644 if (!(action_flags & MLX5_FLOW_FATE_ACTIONS)) return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, actions, +@@ -1444,6 +1486,7 @@ flow_verbs_translate(struct rte_eth_dev *dev, + uint64_t priority = attr->priority; + uint32_t subpriority = 0; + struct mlx5_priv *priv = dev->data->dev_private; ++ struct rte_flow *flow = dev_flow->flow; + + if (priority == MLX5_FLOW_PRIO_RSVD) + priority = priv->config.flow_prio - 1; +@@ -1562,25 +1605,33 @@ flow_verbs_translate(struct rte_eth_dev *dev, + case RTE_FLOW_ITEM_TYPE_VXLAN: + flow_verbs_translate_item_vxlan(dev_flow, items, + item_flags); +- subpriority = MLX5_PRIORITY_MAP_L2; ++ subpriority = flow->rss.level >= 2 ? ++ MLX5_PRIORITY_MAP_L2 : ++ MLX5_PRIORITY_MAP_L4; + item_flags |= MLX5_FLOW_LAYER_VXLAN; + break; + case RTE_FLOW_ITEM_TYPE_VXLAN_GPE: + flow_verbs_translate_item_vxlan_gpe(dev_flow, items, + item_flags); +- subpriority = MLX5_PRIORITY_MAP_L2; ++ subpriority = flow->rss.level >= 2 ? ++ MLX5_PRIORITY_MAP_L2 : ++ MLX5_PRIORITY_MAP_L4; + item_flags |= MLX5_FLOW_LAYER_VXLAN_GPE; + break; + case RTE_FLOW_ITEM_TYPE_GRE: + flow_verbs_translate_item_gre(dev_flow, items, + item_flags); +- subpriority = MLX5_PRIORITY_MAP_L2; ++ subpriority = flow->rss.level >= 2 ? ++ MLX5_PRIORITY_MAP_L2 : ++ MLX5_PRIORITY_MAP_L4; + item_flags |= MLX5_FLOW_LAYER_GRE; + break; + case RTE_FLOW_ITEM_TYPE_MPLS: + flow_verbs_translate_item_mpls(dev_flow, items, + item_flags); +- subpriority = MLX5_PRIORITY_MAP_L2; ++ subpriority = flow->rss.level >= 2 ? ++ MLX5_PRIORITY_MAP_L2 : ++ MLX5_PRIORITY_MAP_L4; + item_flags |= MLX5_FLOW_LAYER_MPLS; + break; + default: diff --git a/dpdk/drivers/net/mlx5/mlx5_glue.c b/dpdk/drivers/net/mlx5/mlx5_glue.c -index 0917bf28d6..44f63116a8 100644 +index 0917bf28d6..65b63bd607 100644 --- a/dpdk/drivers/net/mlx5/mlx5_glue.c +++ b/dpdk/drivers/net/mlx5/mlx5_glue.c -@@ -1008,7 +1008,7 @@ mlx5_glue_devx_qp_query(struct ibv_qp *qp, +@@ -754,7 +754,7 @@ mlx5_glue_dv_create_flow_action_tag(uint32_t tag) + #ifdef HAVE_IBV_FLOW_DV_SUPPORT + #ifdef HAVE_MLX5DV_DR + return mlx5dv_dr_action_create_tag(tag); +-#else ++#else /* HAVE_MLX5DV_DR */ + struct mlx5dv_flow_action_attr *action; + action = malloc(sizeof(*action)); + if (!action) +@@ -762,11 +762,12 @@ mlx5_glue_dv_create_flow_action_tag(uint32_t tag) + action->type = MLX5DV_FLOW_ACTION_TAG; + action->tag_value = tag; + return action; +-#endif +-#endif ++#endif /* HAVE_MLX5DV_DR */ ++#else /* HAVE_IBV_FLOW_DV_SUPPORT */ + (void)tag; + errno = ENOTSUP; + return NULL; ++#endif /* HAVE_IBV_FLOW_DV_SUPPORT */ + } + + static void * +@@ -1008,7 +1009,7 @@ mlx5_glue_devx_qp_query(struct ibv_qp *qp, const void *in, size_t inlen, void *out, size_t outlen) { @@ -27071,9 +54775,23 @@ index 0917bf28d6..44f63116a8 100644 #else (void)qp; diff --git a/dpdk/drivers/net/mlx5/mlx5_glue.h b/dpdk/drivers/net/mlx5/mlx5_glue.h -index 6442f1eba8..4e6465523a 100644 +index 6442f1eba8..9895e55974 100644 --- a/dpdk/drivers/net/mlx5/mlx5_glue.h +++ b/dpdk/drivers/net/mlx5/mlx5_glue.h +@@ -167,11 +167,11 @@ struct mlx5_glue { + void *(*dr_create_flow_action_dest_flow_tbl)(void *tbl); + void *(*dr_create_flow_action_dest_port)(void *domain, + uint32_t port); +- void *(*dr_create_flow_action_drop)(); ++ void *(*dr_create_flow_action_drop)(void); + void *(*dr_create_flow_action_push_vlan) + (struct mlx5dv_dr_domain *domain, + rte_be32_t vlan_tag); +- void *(*dr_create_flow_action_pop_vlan)(); ++ void *(*dr_create_flow_action_pop_vlan)(void); + void *(*dr_create_flow_tbl)(void *domain, uint32_t level); + int (*dr_destroy_flow_tbl)(void *tbl); + void *(*dr_create_domain)(struct ibv_context *ctx, @@ -258,6 +258,6 @@ struct mlx5_glue { struct mlx5dv_devx_port *mlx5_devx_port); }; @@ -27082,6 +54800,105 @@ index 6442f1eba8..4e6465523a 100644 +extern const struct mlx5_glue *mlx5_glue; #endif /* MLX5_GLUE_H_ */ +diff --git a/dpdk/drivers/net/mlx5/mlx5_mac.c b/dpdk/drivers/net/mlx5/mlx5_mac.c +index 7bdaa2a392..177871b211 100644 +--- a/dpdk/drivers/net/mlx5/mlx5_mac.c ++++ b/dpdk/drivers/net/mlx5/mlx5_mac.c +@@ -11,7 +11,6 @@ + #include + #include + #include +-#include + + /* Verbs header. */ + /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */ +@@ -200,8 +199,11 @@ mlx5_mac_addr_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr) + uint16_t port_id; + struct mlx5_priv *priv = dev->data->dev_private; + +- /* Configuring the VF instead of its representor. */ +- if (priv->representor) { ++ /* ++ * Configuring the VF instead of its representor, ++ * need to skip the special case of HPF on Bluefield. ++ */ ++ if (priv->representor && priv->representor_id >= 0) { + DRV_LOG(DEBUG, "VF represented by port %u setting primary MAC address", + dev->data->port_id); + RTE_ETH_FOREACH_DEV_SIBLING(port_id, dev->data->port_id) { +diff --git a/dpdk/drivers/net/mlx5/mlx5_mp.c b/dpdk/drivers/net/mlx5/mlx5_mp.c +index 2a031e2610..e889247871 100644 +--- a/dpdk/drivers/net/mlx5/mlx5_mp.c ++++ b/dpdk/drivers/net/mlx5/mlx5_mp.c +@@ -119,6 +119,8 @@ mp_secondary_handle(const struct rte_mp_msg *mp_msg, const void *peer) + const struct mlx5_mp_param *param = + (const struct mlx5_mp_param *)mp_msg->param; + struct rte_eth_dev *dev; ++ struct mlx5_proc_priv *ppriv; ++ struct mlx5_priv *priv; + int ret; + + assert(rte_eal_process_type() == RTE_PROC_SECONDARY); +@@ -128,12 +130,27 @@ mp_secondary_handle(const struct rte_mp_msg *mp_msg, const void *peer) + return -rte_errno; + } + dev = &rte_eth_devices[param->port_id]; ++ priv = dev->data->dev_private; + switch (param->type) { + case MLX5_MP_REQ_START_RXTX: + DRV_LOG(INFO, "port %u starting datapath", dev->data->port_id); + rte_mb(); + dev->rx_pkt_burst = mlx5_select_rx_function(dev); + dev->tx_pkt_burst = mlx5_select_tx_function(dev); ++ ppriv = (struct mlx5_proc_priv *)dev->process_private; ++ /* If Tx queue number changes, re-initialize UAR. */ ++ if (ppriv->uar_table_sz != priv->txqs_n) { ++ mlx5_tx_uar_uninit_secondary(dev); ++ mlx5_proc_priv_uninit(dev); ++ ret = mlx5_proc_priv_init(dev); ++ if (ret) ++ return -rte_errno; ++ ret = mlx5_tx_uar_init_secondary(dev, mp_msg->fds[0]); ++ if (ret) { ++ mlx5_proc_priv_uninit(dev); ++ return -rte_errno; ++ } ++ } + mp_init_msg(dev, &mp_res, param->type); + res->result = 0; + ret = rte_mp_reply(&mp_res, peer); +@@ -172,6 +189,7 @@ mp_req_on_rxtx(struct rte_eth_dev *dev, enum mlx5_mp_req_type type) + struct rte_mp_reply mp_rep; + struct mlx5_mp_param *res; + struct timespec ts = {.tv_sec = MLX5_MP_REQ_TIMEOUT_SEC, .tv_nsec = 0}; ++ struct mlx5_priv *priv = dev->data->dev_private; + int ret; + int i; + +@@ -184,6 +202,10 @@ mp_req_on_rxtx(struct rte_eth_dev *dev, enum mlx5_mp_req_type type) + return; + } + mp_init_msg(dev, &mp_req, type); ++ if (type == MLX5_MP_REQ_START_RXTX) { ++ mp_req.num_fds = 1; ++ mp_req.fds[0] = ((struct ibv_context *)priv->sh->ctx)->cmd_fd; ++ } + ret = rte_mp_request_sync(&mp_req, &mp_rep, &ts); + if (ret) { + if (rte_errno != ENOTSUP) +diff --git a/dpdk/drivers/net/mlx5/mlx5_mr.c b/dpdk/drivers/net/mlx5/mlx5_mr.c +index 0d549b68e6..54a11bd5d1 100644 +--- a/dpdk/drivers/net/mlx5/mlx5_mr.c ++++ b/dpdk/drivers/net/mlx5/mlx5_mr.c +@@ -1408,7 +1408,7 @@ mlx5_dma_unmap(struct rte_pci_device *pdev, void *addr, + return -1; + } + LIST_REMOVE(mr, mr); +- LIST_INSERT_HEAD(&sh->mr.mr_free_list, mr, mr); ++ mr_free(mr); + DEBUG("port %u remove MR(%p) from list", dev->data->port_id, + (void *)mr); + mr_rebuild_dev_cache(sh); diff --git a/dpdk/drivers/net/mlx5/mlx5_nl.c b/dpdk/drivers/net/mlx5/mlx5_nl.c index e7ba03471d..64580b9e6a 100644 --- a/dpdk/drivers/net/mlx5/mlx5_nl.c @@ -27157,7 +54974,7 @@ index e7ba03471d..64580b9e6a 100644 } diff --git a/dpdk/drivers/net/mlx5/mlx5_prm.h b/dpdk/drivers/net/mlx5/mlx5_prm.h -index a805363757..4c86719769 100644 +index a805363757..1d13bbb009 100644 --- a/dpdk/drivers/net/mlx5/mlx5_prm.h +++ b/dpdk/drivers/net/mlx5/mlx5_prm.h @@ -18,6 +18,8 @@ @@ -27187,7 +55004,26 @@ index a805363757..4c86719769 100644 /* Completion mode. */ enum mlx5_completion_mode { -@@ -1196,7 +1198,9 @@ struct mlx5_ifc_qos_cap_bits { +@@ -513,7 +515,7 @@ typedef uint8_t u8; + + #define __mlx5_nullp(typ) ((struct mlx5_ifc_##typ##_bits *)0) + #define __mlx5_bit_sz(typ, fld) sizeof(__mlx5_nullp(typ)->fld) +-#define __mlx5_bit_off(typ, fld) ((unsigned int)(unsigned long) \ ++#define __mlx5_bit_off(typ, fld) ((unsigned int)(uintptr_t) \ + (&(__mlx5_nullp(typ)->fld))) + #define __mlx5_dw_bit_off(typ, fld) (32 - __mlx5_bit_sz(typ, fld) - \ + (__mlx5_bit_off(typ, fld) & 0x1f)) +@@ -723,6 +725,9 @@ enum { + MLX5_MKC_ACCESS_MODE_MTT = 0x1, + }; + ++/* The counter batch query requires ID align with 4. */ ++#define MLX5_CNT_BATCH_QUERY_ID_ALIGNMENT 4 ++ + /* Flow counters. */ + struct mlx5_ifc_alloc_flow_counter_out_bits { + u8 status[0x8]; +@@ -1196,7 +1201,9 @@ struct mlx5_ifc_qos_cap_bits { u8 reserved_at_8[0x8]; u8 log_max_flow_meter[0x8]; u8 flow_meter_reg_id[0x8]; @@ -27198,7 +55034,7 @@ index a805363757..4c86719769 100644 u8 packet_pacing_max_rate[0x20]; u8 packet_pacing_min_rate[0x20]; u8 reserved_at_80[0x10]; -@@ -1816,6 +1820,9 @@ enum { +@@ -1816,6 +1823,9 @@ enum { #define MLX5_SRTCM_CIR_MAX (8 * (1ULL << 30) * 0xFF) #define MLX5_SRTCM_EBS_MAX 0 @@ -27208,8 +55044,25 @@ index a805363757..4c86719769 100644 /** * Convert a user mark to flow mark. * +diff --git a/dpdk/drivers/net/mlx5/mlx5_rss.c b/dpdk/drivers/net/mlx5/mlx5_rss.c +index 102826452d..170005a7af 100644 +--- a/dpdk/drivers/net/mlx5/mlx5_rss.c ++++ b/dpdk/drivers/net/mlx5/mlx5_rss.c +@@ -221,9 +221,11 @@ mlx5_dev_rss_reta_update(struct rte_eth_dev *dev, + assert(reta_conf[idx].reta[pos] < priv->rxqs_n); + (*priv->reta_idx)[i] = reta_conf[idx].reta[pos]; + } ++ ++ priv->skip_default_rss_reta = 1; ++ + if (dev->data->dev_started) { + mlx5_dev_stop(dev); +- priv->skip_default_rss_reta = 1; + return mlx5_dev_start(dev); + } + return 0; diff --git a/dpdk/drivers/net/mlx5/mlx5_rxq.c b/dpdk/drivers/net/mlx5/mlx5_rxq.c -index 986ec016df..2b6ab21b90 100644 +index 986ec016df..e3f41d121d 100644 --- a/dpdk/drivers/net/mlx5/mlx5_rxq.c +++ b/dpdk/drivers/net/mlx5/mlx5_rxq.c @@ -36,6 +36,7 @@ @@ -27220,15 +55073,103 @@ index 986ec016df..2b6ab21b90 100644 /* Default RSS hash key also used for ConnectX-3. */ uint8_t rss_hash_default_key[] = { -@@ -1260,6 +1261,7 @@ mlx5_rxq_obj_hairpin_new(struct rte_eth_dev *dev, uint16_t idx) +@@ -104,7 +105,7 @@ inline int + mlx5_mprq_enabled(struct rte_eth_dev *dev) + { + struct mlx5_priv *priv = dev->data->dev_private; +- uint16_t i; ++ uint32_t i; + uint16_t n = 0; + uint16_t n_ibv = 0; + +@@ -445,19 +446,19 @@ mlx5_rxq_releasable(struct rte_eth_dev *dev, uint16_t idx) + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ + static int +-mlx5_rx_queue_pre_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc) ++mlx5_rx_queue_pre_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t *desc) + { + struct mlx5_priv *priv = dev->data->dev_private; + +- if (!rte_is_power_of_2(desc)) { +- desc = 1 << log2above(desc); ++ if (!rte_is_power_of_2(*desc)) { ++ *desc = 1 << log2above(*desc); + DRV_LOG(WARNING, + "port %u increased number of descriptors in Rx queue %u" + " to the next power of two (%d)", +- dev->data->port_id, idx, desc); ++ dev->data->port_id, idx, *desc); + } + DRV_LOG(DEBUG, "port %u configuring Rx queue %u for %u descriptors", +- dev->data->port_id, idx, desc); ++ dev->data->port_id, idx, *desc); + if (idx >= priv->rxqs_n) { + DRV_LOG(ERR, "port %u Rx queue index out of range (%u >= %u)", + dev->data->port_id, idx, priv->rxqs_n); +@@ -503,7 +504,7 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, + container_of(rxq, struct mlx5_rxq_ctrl, rxq); + int res; + +- res = mlx5_rx_queue_pre_setup(dev, idx, desc); ++ res = mlx5_rx_queue_pre_setup(dev, idx, &desc); + if (res) + return res; + rxq_ctrl = mlx5_rxq_new(dev, idx, desc, socket, conf, mp); +@@ -544,7 +545,7 @@ mlx5_rx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t idx, + container_of(rxq, struct mlx5_rxq_ctrl, rxq); + int res; + +- res = mlx5_rx_queue_pre_setup(dev, idx, desc); ++ res = mlx5_rx_queue_pre_setup(dev, idx, &desc); + if (res) + return res; + if (hairpin_conf->peer_count != 1 || +@@ -722,6 +723,9 @@ mlx5_rx_intr_vec_enable(struct rte_eth_dev *dev) + unsigned int count = 0; + struct rte_intr_handle *intr_handle = dev->intr_handle; + ++ /* Representor shares dev->intr_handle with PF. */ ++ if (priv->representor) ++ return 0; + if (!dev->data->dev_conf.intr_conf.rxq) + return 0; + mlx5_rx_intr_vec_disable(dev); +@@ -799,6 +803,9 @@ mlx5_rx_intr_vec_disable(struct rte_eth_dev *dev) + unsigned int rxqs_n = priv->rxqs_n; + unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID); + ++ /* Representor shares dev->intr_handle with PF. */ ++ if (priv->representor) ++ return; + if (!dev->data->dev_conf.intr_conf.rxq) + return; + if (!intr_handle->intr_vec) +@@ -1152,7 +1159,7 @@ static void + mlx5_devx_wq_attr_fill(struct mlx5_priv *priv, struct mlx5_rxq_ctrl *rxq_ctrl, + struct mlx5_devx_wq_attr *wq_attr) + { +- wq_attr->end_padding_mode = priv->config.cqe_pad ? ++ wq_attr->end_padding_mode = priv->config.hw_padding ? + MLX5_WQ_END_PAD_MODE_ALIGN : + MLX5_WQ_END_PAD_MODE_NONE; + wq_attr->pd = priv->sh->pdn; +@@ -1259,7 +1266,7 @@ mlx5_rxq_obj_hairpin_new(struct rte_eth_dev *dev, uint16_t idx) + container_of(rxq_data, struct mlx5_rxq_ctrl, rxq); struct mlx5_devx_create_rq_attr attr = { 0 }; struct mlx5_rxq_obj *tmpl = NULL; - int ret = 0; +- int ret = 0; + uint32_t max_wq_data; assert(rxq_data); assert(!rxq_ctrl->obj); -@@ -1275,11 +1277,15 @@ mlx5_rxq_obj_hairpin_new(struct rte_eth_dev *dev, uint16_t idx) +@@ -1270,24 +1277,29 @@ mlx5_rxq_obj_hairpin_new(struct rte_eth_dev *dev, uint16_t idx) + "port %u Rx queue %u cannot allocate verbs resources", + dev->data->port_id, rxq_data->idx); + rte_errno = ENOMEM; +- goto error; ++ return NULL; + } tmpl->type = MLX5_RXQ_OBJ_TYPE_DEVX_HAIRPIN; tmpl->rxq_ctrl = rxq_ctrl; attr.hairpin = 1; @@ -27248,6 +55189,29 @@ index 986ec016df..2b6ab21b90 100644 tmpl->rq = mlx5_devx_cmd_create_rq(priv->sh->ctx, &attr, rxq_ctrl->socket); if (!tmpl->rq) { + DRV_LOG(ERR, + "port %u Rx hairpin queue %u can't create rq object", + dev->data->port_id, idx); ++ rte_free(tmpl); + rte_errno = errno; +- goto error; ++ return NULL; + } + DRV_LOG(DEBUG, "port %u rxq %u updated with %p", dev->data->port_id, + idx, (void *)&tmpl); +@@ -1295,12 +1307,6 @@ mlx5_rxq_obj_hairpin_new(struct rte_eth_dev *dev, uint16_t idx) + LIST_INSERT_HEAD(&priv->rxqsobj, tmpl, next); + priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE; + return tmpl; +-error: +- ret = rte_errno; /* Save rte_errno before cleanup. */ +- if (tmpl->rq) +- mlx5_devx_cmd_destroy(tmpl->rq); +- rte_errno = ret; /* Restore rte_errno. */ +- return NULL; + } + + /** @@ -1762,9 +1768,10 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_rxq_ctrl *tmpl; @@ -27360,7 +55324,23 @@ index 986ec016df..2b6ab21b90 100644 DRV_LOG(DEBUG, "port %u maximum number of segments per packet: %u", dev->data->port_id, 1 << tmpl->rxq.sges_n); if (desc % (1 << tmpl->rxq.sges_n)) { -@@ -2465,13 +2482,42 @@ mlx5_hrxq_new(struct rte_eth_dev *dev, +@@ -1958,13 +1975,14 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, + tmpl->rxq.elts = + (struct rte_mbuf *(*)[1 << tmpl->rxq.elts_n])(tmpl + 1); + #ifndef RTE_ARCH_64 +- tmpl->rxq.uar_lock_cq = &priv->uar_lock_cq; ++ tmpl->rxq.uar_lock_cq = &priv->sh->uar_lock_cq; + #endif + tmpl->rxq.idx = idx; + rte_atomic32_inc(&tmpl->refcnt); + LIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next); + return tmpl; + error: ++ mlx5_mr_btree_free(&tmpl->rxq.mr_ctrl.cache_bh); + rte_free(tmpl); + return NULL; + } +@@ -2465,13 +2483,42 @@ mlx5_hrxq_new(struct rte_eth_dev *dev, memset(&tir_attr, 0, sizeof(tir_attr)); tir_attr.disp_type = MLX5_TIRC_DISP_TYPE_INDIRECT; tir_attr.rx_hash_fn = MLX5_RX_HASH_FN_TOEPLITZ; @@ -27407,10 +55387,63 @@ index 986ec016df..2b6ab21b90 100644 if (dev->data->dev_conf.lpbk_mode) tir_attr.self_lb_block = diff --git a/dpdk/drivers/net/mlx5/mlx5_rxtx.c b/dpdk/drivers/net/mlx5/mlx5_rxtx.c -index acf0fd794b..488a87f593 100644 +index acf0fd794b..ac5c1868a0 100644 --- a/dpdk/drivers/net/mlx5/mlx5_rxtx.c +++ b/dpdk/drivers/net/mlx5/mlx5_rxtx.c -@@ -654,10 +654,10 @@ check_err_cqe_seen(volatile struct mlx5_err_cqe *err_cqe) +@@ -469,13 +469,15 @@ rx_queue_count(struct mlx5_rxq_data *rxq) + struct rxq_zip *zip = &rxq->zip; + volatile struct mlx5_cqe *cqe; + const unsigned int cqe_n = (1 << rxq->cqe_n); ++ const unsigned int sges_n = (1 << rxq->sges_n); ++ const unsigned int elts_n = (1 << rxq->elts_n); ++ const unsigned int strd_n = (1 << rxq->strd_num_n); + const unsigned int cqe_cnt = cqe_n - 1; +- unsigned int cq_ci; +- unsigned int used; ++ unsigned int cq_ci, used; + + /* if we are processing a compressed cqe */ + if (zip->ai) { +- used = zip->cqe_cnt - zip->ca; ++ used = zip->cqe_cnt - zip->ai; + cq_ci = zip->cq_ci; + } else { + used = 0; +@@ -495,7 +497,7 @@ rx_queue_count(struct mlx5_rxq_data *rxq) + used += n; + cqe = &(*rxq->cqes)[cq_ci & cqe_cnt]; + } +- used = RTE_MIN(used, (1U << rxq->elts_n) - 1); ++ used = RTE_MIN(used * sges_n, elts_n * strd_n); + return used; + } + +@@ -518,11 +520,12 @@ mlx5_rx_descriptor_status(void *rx_queue, uint16_t offset) + container_of(rxq, struct mlx5_rxq_ctrl, rxq); + struct rte_eth_dev *dev = ETH_DEV(rxq_ctrl->priv); + +- if (dev->rx_pkt_burst != mlx5_rx_burst) { ++ if (dev->rx_pkt_burst == NULL || ++ dev->rx_pkt_burst == removed_rx_burst) { + rte_errno = ENOTSUP; + return -rte_errno; + } +- if (offset >= (1 << rxq->elts_n)) { ++ if (offset >= (1 << rxq->cqe_n)) { + rte_errno = EINVAL; + return -rte_errno; + } +@@ -550,7 +553,8 @@ mlx5_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id) + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_rxq_data *rxq; + +- if (dev->rx_pkt_burst != mlx5_rx_burst) { ++ if (dev->rx_pkt_burst == NULL || ++ dev->rx_pkt_burst == removed_rx_burst) { + rte_errno = ENOTSUP; + return -rte_errno; + } +@@ -654,10 +658,10 @@ check_err_cqe_seen(volatile struct mlx5_err_cqe *err_cqe) * Pointer to the error CQE. * * @return @@ -27424,7 +55457,7 @@ index acf0fd794b..488a87f593 100644 mlx5_tx_error_cqe_handle(struct mlx5_txq_data *restrict txq, volatile struct mlx5_err_cqe *err_cqe) { -@@ -701,18 +701,14 @@ mlx5_tx_error_cqe_handle(struct mlx5_txq_data *restrict txq, +@@ -701,18 +705,14 @@ mlx5_tx_error_cqe_handle(struct mlx5_txq_data *restrict txq, */ txq->stats.oerrors += ((txq->wqe_ci & wqe_m) - new_wqe_pi) & wqe_m; @@ -27449,7 +55482,58 @@ index acf0fd794b..488a87f593 100644 } /** -@@ -1253,9 +1249,10 @@ rxq_cq_to_mbuf(struct mlx5_rxq_data *rxq, struct rte_mbuf *pkt, +@@ -1132,6 +1132,7 @@ mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe, + } else { + int ret; + int8_t op_own; ++ uint32_t cq_ci; + + ret = check_cqe(cqe, cqe_n, rxq->cq_ci); + if (unlikely(ret != MLX5_CQE_STATUS_SW_OWN)) { +@@ -1145,14 +1146,19 @@ mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe, + return 0; + } + } +- ++rxq->cq_ci; ++ /* ++ * Introduce the local variable to have queue cq_ci ++ * index in queue structure always consistent with ++ * actual CQE boundary (not pointing to the middle ++ * of compressed CQE session). ++ */ ++ cq_ci = rxq->cq_ci + 1; + op_own = cqe->op_own; + if (MLX5_CQE_FORMAT(op_own) == MLX5_COMPRESSED) { + volatile struct mlx5_mini_cqe8 (*mc)[8] = + (volatile struct mlx5_mini_cqe8 (*)[8]) + (uintptr_t)(&(*rxq->cqes) +- [rxq->cq_ci & +- cqe_cnt].pkt_info); ++ [cq_ci & cqe_cnt].pkt_info); + + /* Fix endianness. */ + zip->cqe_cnt = rte_be_to_cpu_32(cqe->byte_cnt); +@@ -1165,10 +1171,9 @@ mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe, + * 7 CQEs after the initial CQE instead of 8 + * for subsequent ones. + */ +- zip->ca = rxq->cq_ci; ++ zip->ca = cq_ci; + zip->na = zip->ca + 7; + /* Compute the next non compressed CQE. */ +- --rxq->cq_ci; + zip->cq_ci = rxq->cq_ci + zip->cqe_cnt; + /* Get packet size to return. */ + len = rte_be_to_cpu_32((*mc)[0].byte_cnt); +@@ -1183,6 +1188,7 @@ mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe, + ++idx; + } + } else { ++ rxq->cq_ci = cq_ci; + len = rte_be_to_cpu_32(cqe->byte_cnt); + } + } +@@ -1253,9 +1259,10 @@ rxq_cq_to_mbuf(struct mlx5_rxq_data *rxq, struct rte_mbuf *pkt, pkt->hash.fdir.hi = mlx5_flow_mark_get(mark); } } @@ -27463,7 +55547,16 @@ index acf0fd794b..488a87f593 100644 } if (rxq->csum) pkt->ol_flags |= rxq_cq_to_ol_flags(cqe); -@@ -1574,21 +1571,20 @@ mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) +@@ -1442,7 +1449,7 @@ mlx5_lro_update_tcp_hdr(struct rte_tcp_hdr *restrict tcp, + if (cqe->lro_tcppsh_abort_dupack & MLX5_CQE_LRO_PUSH_MASK) + tcp->tcp_flags |= RTE_TCP_PSH_FLAG; + tcp->cksum = 0; +- csum += rte_raw_cksum(tcp, (tcp->data_off & 0xF) * 4); ++ csum += rte_raw_cksum(tcp, (tcp->data_off >> 4) * 4); + csum = ((csum & 0xffff0000) >> 16) + (csum & 0xffff); + csum = (~csum) & 0xffff; + if (csum == 0) +@@ -1574,21 +1581,20 @@ mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) unsigned int i = 0; uint32_t rq_ci = rxq->rq_ci; uint16_t consumed_strd = rxq->consumed_strd; @@ -27487,7 +55580,7 @@ index acf0fd794b..488a87f593 100644 if (consumed_strd == strd_n) { /* Replace WQE only if the buffer is still in use. */ -@@ -1634,18 +1630,6 @@ mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) +@@ -1634,18 +1640,6 @@ mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) } assert(strd_idx < strd_n); assert(!((rte_be_to_cpu_16(cqe->wqe_id) ^ rq_ci) & wq_mask)); @@ -27506,7 +55599,7 @@ index acf0fd794b..488a87f593 100644 pkt = rte_pktmbuf_alloc(rxq->mp); if (unlikely(pkt == NULL)) { ++rxq->stats.rx_nombuf; -@@ -1657,23 +1641,57 @@ mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) +@@ -1657,23 +1651,57 @@ mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) len -= RTE_ETHER_CRC_LEN; offset = strd_idx * strd_sz + strd_shift; addr = RTE_PTR_ADD(mlx5_mprq_buf_addr(buf, strd_n), offset); @@ -27572,7 +55665,7 @@ index acf0fd794b..488a87f593 100644 } else { rte_iova_t buf_iova; struct rte_mbuf_ext_shared_info *shinfo; -@@ -1684,7 +1702,7 @@ mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) +@@ -1684,7 +1712,7 @@ mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) rte_atomic16_add_return(&buf->refcnt, 1); assert((uint16_t)rte_atomic16_read(&buf->refcnt) <= strd_n + 1); @@ -27581,7 +55674,7 @@ index acf0fd794b..488a87f593 100644 /* * MLX5 device doesn't use iova but it is necessary in a * case where the Rx packet is transmitted via a -@@ -1703,43 +1721,42 @@ mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) +@@ -1703,43 +1731,42 @@ mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) rte_pktmbuf_attach_extbuf(pkt, buf_addr, buf_iova, buf_len, shinfo); /* Set mbuf head-room. */ @@ -27646,7 +55739,7 @@ index acf0fd794b..488a87f593 100644 } PKT_LEN(pkt) = len; PORT(pkt) = rxq->port_id; -@@ -1751,6 +1768,7 @@ mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) +@@ -1751,6 +1778,7 @@ mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) *(pkts++) = pkt; ++i; } @@ -27654,7 +55747,7 @@ index acf0fd794b..488a87f593 100644 /* Update the consumer indexes. */ rxq->consumed_strd = consumed_strd; rte_cio_wmb(); -@@ -2034,8 +2052,6 @@ mlx5_tx_copy_elts(struct mlx5_txq_data *restrict txq, +@@ -2034,8 +2062,6 @@ mlx5_tx_copy_elts(struct mlx5_txq_data *restrict txq, * Pointer to TX queue structure. * @param valid CQE pointer * if not NULL update txq->wqe_pi and flush the buffers @@ -27663,7 +55756,7 @@ index acf0fd794b..488a87f593 100644 * @param olx * Configured Tx offloads mask. It is fully defined at * compile time and may be used for optimization. -@@ -2043,25 +2059,17 @@ mlx5_tx_copy_elts(struct mlx5_txq_data *restrict txq, +@@ -2043,25 +2069,17 @@ mlx5_tx_copy_elts(struct mlx5_txq_data *restrict txq, static __rte_always_inline void mlx5_tx_comp_flush(struct mlx5_txq_data *restrict txq, volatile struct mlx5_cqe *last_cqe, @@ -27696,7 +55789,7 @@ index acf0fd794b..488a87f593 100644 } } -@@ -2085,6 +2093,7 @@ mlx5_tx_handle_completion(struct mlx5_txq_data *restrict txq, +@@ -2085,6 +2103,7 @@ mlx5_tx_handle_completion(struct mlx5_txq_data *restrict txq, { unsigned int count = MLX5_TX_COMP_MAX_CQE; volatile struct mlx5_cqe *last_cqe = NULL; @@ -27704,7 +55797,7 @@ index acf0fd794b..488a87f593 100644 int ret; static_assert(MLX5_CQE_STATUS_HW_OWN < 0, "Must be negative value"); -@@ -2109,31 +2118,49 @@ mlx5_tx_handle_completion(struct mlx5_txq_data *restrict txq, +@@ -2109,31 +2128,49 @@ mlx5_tx_handle_completion(struct mlx5_txq_data *restrict txq, rte_wmb(); ret = mlx5_tx_error_cqe_handle (txq, (volatile struct mlx5_err_cqe *)cqe); @@ -27772,7 +55865,7 @@ index acf0fd794b..488a87f593 100644 } /** -@@ -2145,9 +2172,6 @@ mlx5_tx_handle_completion(struct mlx5_txq_data *restrict txq, +@@ -2145,9 +2182,6 @@ mlx5_tx_handle_completion(struct mlx5_txq_data *restrict txq, * Pointer to TX queue structure. * @param loc * Pointer to burst routine local context. @@ -27782,7 +55875,7 @@ index acf0fd794b..488a87f593 100644 * @param olx * Configured Tx offloads mask. It is fully defined at * compile time and may be used for optimization. -@@ -2155,13 +2179,12 @@ mlx5_tx_handle_completion(struct mlx5_txq_data *restrict txq, +@@ -2155,13 +2189,12 @@ mlx5_tx_handle_completion(struct mlx5_txq_data *restrict txq, static __rte_always_inline void mlx5_tx_request_completion(struct mlx5_txq_data *restrict txq, struct mlx5_txq_local *restrict loc, @@ -27797,7 +55890,7 @@ index acf0fd794b..488a87f593 100644 0 : loc->pkts_sent - loc->pkts_copy; head += part; if ((uint16_t)(head - txq->elts_comp) >= MLX5_TX_COMP_THRESH || -@@ -2175,15 +2198,15 @@ mlx5_tx_request_completion(struct mlx5_txq_data *restrict txq, +@@ -2175,15 +2208,15 @@ mlx5_tx_request_completion(struct mlx5_txq_data *restrict txq, /* Request unconditional completion on last WQE. */ last->cseg.flags = RTE_BE32(MLX5_COMP_ALWAYS << MLX5_COMP_MODE_OFFSET); @@ -27822,7 +55915,7 @@ index acf0fd794b..488a87f593 100644 } } -@@ -2818,8 +2841,14 @@ mlx5_tx_dseg_empw(struct mlx5_txq_data *restrict txq, +@@ -2818,8 +2851,14 @@ mlx5_tx_dseg_empw(struct mlx5_txq_data *restrict txq, unsigned int part; uint8_t *pdst; @@ -27839,7 +55932,7 @@ index acf0fd794b..488a87f593 100644 /* * The WQEBB space availability is checked by caller. * Here we should be aware of WQE ring buffer wraparound only. -@@ -2831,7 +2860,8 @@ mlx5_tx_dseg_empw(struct mlx5_txq_data *restrict txq, +@@ -2831,7 +2870,8 @@ mlx5_tx_dseg_empw(struct mlx5_txq_data *restrict txq, len -= part; if (likely(!len)) { pdst += part; @@ -27849,7 +55942,7 @@ index acf0fd794b..488a87f593 100644 /* Note: no final wraparound check here. */ return (struct mlx5_wqe_dseg *)pdst; } -@@ -2879,9 +2909,16 @@ mlx5_tx_dseg_vlan(struct mlx5_txq_data *restrict txq, +@@ -2879,9 +2919,16 @@ mlx5_tx_dseg_vlan(struct mlx5_txq_data *restrict txq, static_assert(MLX5_DSEG_MIN_INLINE_SIZE == (2 * RTE_ETHER_ADDR_LEN), "invalid Data Segment data size"); @@ -27869,7 +55962,7 @@ index acf0fd794b..488a87f593 100644 memcpy(pdst, buf, MLX5_DSEG_MIN_INLINE_SIZE); buf += MLX5_DSEG_MIN_INLINE_SIZE; pdst += MLX5_DSEG_MIN_INLINE_SIZE; -@@ -2904,7 +2941,8 @@ mlx5_tx_dseg_vlan(struct mlx5_txq_data *restrict txq, +@@ -2904,7 +2951,8 @@ mlx5_tx_dseg_vlan(struct mlx5_txq_data *restrict txq, len -= part; if (likely(!len)) { pdst += part; @@ -27879,7 +55972,7 @@ index acf0fd794b..488a87f593 100644 /* Note: no final wraparound check here. */ return (struct mlx5_wqe_dseg *)pdst; } -@@ -3120,8 +3158,6 @@ mlx5_tx_packet_multi_tso(struct mlx5_txq_data *restrict txq, +@@ -3120,8 +3168,6 @@ mlx5_tx_packet_multi_tso(struct mlx5_txq_data *restrict txq, wqe->cseg.sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | ds); txq->wqe_ci += (ds + 3) / 4; loc->wqe_free -= (ds + 3) / 4; @@ -27888,7 +55981,7 @@ index acf0fd794b..488a87f593 100644 return MLX5_TXCMP_CODE_MULTI; } -@@ -3230,8 +3266,6 @@ mlx5_tx_packet_multi_send(struct mlx5_txq_data *restrict txq, +@@ -3230,8 +3276,6 @@ mlx5_tx_packet_multi_send(struct mlx5_txq_data *restrict txq, } while (true); txq->wqe_ci += (ds + 3) / 4; loc->wqe_free -= (ds + 3) / 4; @@ -27897,7 +55990,7 @@ index acf0fd794b..488a87f593 100644 return MLX5_TXCMP_CODE_MULTI; } -@@ -3388,8 +3422,6 @@ mlx5_tx_packet_multi_inline(struct mlx5_txq_data *restrict txq, +@@ -3388,8 +3432,6 @@ mlx5_tx_packet_multi_inline(struct mlx5_txq_data *restrict txq, wqe->cseg.sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | ds); txq->wqe_ci += (ds + 3) / 4; loc->wqe_free -= (ds + 3) / 4; @@ -27906,7 +55999,7 @@ index acf0fd794b..488a87f593 100644 return MLX5_TXCMP_CODE_MULTI; } -@@ -3599,8 +3631,6 @@ mlx5_tx_burst_tso(struct mlx5_txq_data *restrict txq, +@@ -3599,8 +3641,6 @@ mlx5_tx_burst_tso(struct mlx5_txq_data *restrict txq, --loc->elts_free; ++loc->pkts_sent; --pkts_n; @@ -27915,7 +56008,7 @@ index acf0fd794b..488a87f593 100644 if (unlikely(!pkts_n || !loc->elts_free || !loc->wqe_free)) return MLX5_TXCMP_CODE_EXIT; loc->mbuf = *pkts++; -@@ -3750,7 +3780,7 @@ mlx5_tx_sdone_empw(struct mlx5_txq_data *restrict txq, +@@ -3750,7 +3790,7 @@ mlx5_tx_sdone_empw(struct mlx5_txq_data *restrict txq, struct mlx5_txq_local *restrict loc, unsigned int ds, unsigned int slen, @@ -27924,7 +56017,7 @@ index acf0fd794b..488a87f593 100644 { assert(!MLX5_TXOFF_CONFIG(INLINE)); #ifdef MLX5_PMD_SOFT_COUNTERS -@@ -3765,8 +3795,6 @@ mlx5_tx_sdone_empw(struct mlx5_txq_data *restrict txq, +@@ -3765,8 +3805,6 @@ mlx5_tx_sdone_empw(struct mlx5_txq_data *restrict txq, loc->wqe_last->cseg.sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | ds); txq->wqe_ci += (ds + 3) / 4; loc->wqe_free -= (ds + 3) / 4; @@ -27933,7 +56026,7 @@ index acf0fd794b..488a87f593 100644 } /* -@@ -3797,20 +3825,36 @@ mlx5_tx_idone_empw(struct mlx5_txq_data *restrict txq, +@@ -3797,20 +3835,36 @@ mlx5_tx_idone_empw(struct mlx5_txq_data *restrict txq, unsigned int slen, unsigned int olx __rte_unused) { @@ -27974,7 +56067,7 @@ index acf0fd794b..488a87f593 100644 } /** -@@ -4011,8 +4055,6 @@ mlx5_tx_burst_empw_simple(struct mlx5_txq_data *restrict txq, +@@ -4011,8 +4065,6 @@ mlx5_tx_burst_empw_simple(struct mlx5_txq_data *restrict txq, txq->wqe_ci += (2 + part + 3) / 4; loc->wqe_free -= (2 + part + 3) / 4; pkts_n -= part; @@ -27983,7 +56076,7 @@ index acf0fd794b..488a87f593 100644 if (unlikely(!pkts_n || !loc->elts_free || !loc->wqe_free)) return MLX5_TXCMP_CODE_EXIT; loc->mbuf = *pkts++; -@@ -4088,6 +4130,15 @@ mlx5_tx_burst_empw_inline(struct mlx5_txq_data *restrict txq, +@@ -4088,6 +4140,15 @@ mlx5_tx_burst_empw_inline(struct mlx5_txq_data *restrict txq, loc->wqe_free) * MLX5_WQE_SIZE - MLX5_WQE_CSEG_SIZE - MLX5_WQE_ESEG_SIZE; @@ -27999,7 +56092,7 @@ index acf0fd794b..488a87f593 100644 /* Build WQE till we have space, packets and resources. */ part = room; for (;;) { -@@ -4117,8 +4168,28 @@ mlx5_tx_burst_empw_inline(struct mlx5_txq_data *restrict txq, +@@ -4117,8 +4178,28 @@ mlx5_tx_burst_empw_inline(struct mlx5_txq_data *restrict txq, /* Inline or not inline - that's the Question. */ if (dlen > txq->inlen_empw) goto pointer_empw; @@ -28029,7 +56122,7 @@ index acf0fd794b..488a87f593 100644 if (MLX5_TXOFF_CONFIG(VLAN) && loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) { /* -@@ -4143,7 +4214,8 @@ mlx5_tx_burst_empw_inline(struct mlx5_txq_data *restrict txq, +@@ -4143,7 +4224,8 @@ mlx5_tx_burst_empw_inline(struct mlx5_txq_data *restrict txq, dseg = mlx5_tx_dseg_empw(txq, loc, dseg, dptr, dlen, olx); } @@ -28039,7 +56132,7 @@ index acf0fd794b..488a87f593 100644 assert(room >= tlen); room -= tlen; /* -@@ -4153,6 +4225,14 @@ mlx5_tx_burst_empw_inline(struct mlx5_txq_data *restrict txq, +@@ -4153,6 +4235,14 @@ mlx5_tx_burst_empw_inline(struct mlx5_txq_data *restrict txq, rte_pktmbuf_free_seg(loc->mbuf); goto next_mbuf; pointer_empw: @@ -28054,7 +56147,7 @@ index acf0fd794b..488a87f593 100644 /* * Not inlinable VLAN packets are * proceeded outside of this routine. -@@ -4496,8 +4576,6 @@ mlx5_tx_burst_single_send(struct mlx5_txq_data *restrict txq, +@@ -4496,8 +4586,6 @@ mlx5_tx_burst_single_send(struct mlx5_txq_data *restrict txq, } ++loc->pkts_sent; --pkts_n; @@ -28063,7 +56156,7 @@ index acf0fd794b..488a87f593 100644 if (unlikely(!pkts_n || !loc->elts_free || !loc->wqe_free)) return MLX5_TXCMP_CODE_EXIT; loc->mbuf = *pkts++; -@@ -4596,7 +4674,7 @@ mlx5_tx_burst_tmpl(struct mlx5_txq_data *restrict txq, +@@ -4596,7 +4684,7 @@ mlx5_tx_burst_tmpl(struct mlx5_txq_data *restrict txq, /* * Calculate the number of available resources - elts and WQEs. * There are two possible different scenarios: @@ -28072,7 +56165,7 @@ index acf0fd794b..488a87f593 100644 * four packets, in this case elts become scarce resource * - data inlining into WQEs, one packet may require multiple * WQEBBs, the WQEs become the limiting factor. -@@ -4776,6 +4854,8 @@ mlx5_tx_burst_tmpl(struct mlx5_txq_data *restrict txq, +@@ -4776,6 +4864,8 @@ mlx5_tx_burst_tmpl(struct mlx5_txq_data *restrict txq, /* Take a shortcut if nothing is sent. */ if (unlikely(loc.pkts_sent == loc.pkts_loop)) goto burst_exit; @@ -28081,7 +56174,7 @@ index acf0fd794b..488a87f593 100644 /* * Ring QP doorbell immediately after WQE building completion * to improve latencies. The pure software related data treatment -@@ -4977,7 +5057,7 @@ MLX5_TXOFF_DECL(iv, +@@ -4977,7 +5067,7 @@ MLX5_TXOFF_DECL(iv, /* * Generate routines with Legacy Multi-Packet Write support. @@ -28090,7 +56183,7 @@ index acf0fd794b..488a87f593 100644 * offload limitations, not supported: * - ACL/Flows (metadata are becoming meaningless) * - WQE Inline headers -@@ -4995,6 +5075,10 @@ MLX5_TXOFF_DECL(mci_mpw, +@@ -4995,6 +5085,10 @@ MLX5_TXOFF_DECL(mci_mpw, MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_EMPW | MLX5_TXOFF_CONFIG_MPW) @@ -28101,7 +56194,7 @@ index acf0fd794b..488a87f593 100644 MLX5_TXOFF_DECL(i_mpw, MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_EMPW | MLX5_TXOFF_CONFIG_MPW) -@@ -5151,6 +5235,10 @@ MLX5_TXOFF_INFO(mci_mpw, +@@ -5151,6 +5245,10 @@ MLX5_TXOFF_INFO(mci_mpw, MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_EMPW | MLX5_TXOFF_CONFIG_MPW) @@ -28112,8 +56205,18 @@ index acf0fd794b..488a87f593 100644 MLX5_TXOFF_INFO(i_mpw, MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_EMPW | MLX5_TXOFF_CONFIG_MPW) +@@ -5277,6 +5375,9 @@ mlx5_select_tx_function(struct rte_eth_dev *dev) + /* Does not meet requested offloads at all. */ + continue; + } ++ if ((olx ^ tmp) & MLX5_TXOFF_CONFIG_MPW) ++ /* Do not enable legacy MPW if not configured. */ ++ continue; + if ((olx ^ tmp) & MLX5_TXOFF_CONFIG_EMPW) + /* Do not enable eMPW if not configured. */ + continue; diff --git a/dpdk/drivers/net/mlx5/mlx5_rxtx.h b/dpdk/drivers/net/mlx5/mlx5_rxtx.h -index e927343f7d..a50f057c1e 100644 +index e927343f7d..daa67e2f5c 100644 --- a/dpdk/drivers/net/mlx5/mlx5_rxtx.h +++ b/dpdk/drivers/net/mlx5/mlx5_rxtx.h @@ -114,9 +114,9 @@ struct mlx5_rxq_data { @@ -28160,7 +56263,15 @@ index e927343f7d..a50f057c1e 100644 volatile struct mlx5_cqe *cqes; /* Completion queue. */ volatile uint32_t *qp_db; /* Work queue doorbell. */ volatile uint32_t *cq_db; /* Completion queue doorbell. */ -@@ -440,6 +445,7 @@ int mlx5_txq_release(struct rte_eth_dev *dev, uint16_t idx); +@@ -424,6 +429,7 @@ int mlx5_tx_hairpin_queue_setup + const struct rte_eth_hairpin_conf *hairpin_conf); + void mlx5_tx_queue_release(void *dpdk_txq); + int mlx5_tx_uar_init_secondary(struct rte_eth_dev *dev, int fd); ++void mlx5_tx_uar_uninit_secondary(struct rte_eth_dev *dev); + struct mlx5_txq_obj *mlx5_txq_obj_new(struct rte_eth_dev *dev, uint16_t idx, + enum mlx5_txq_obj_type type); + struct mlx5_txq_obj *mlx5_txq_obj_get(struct rte_eth_dev *dev, uint16_t idx); +@@ -440,6 +446,7 @@ int mlx5_txq_release(struct rte_eth_dev *dev, uint16_t idx); int mlx5_txq_releasable(struct rte_eth_dev *dev, uint16_t idx); int mlx5_txq_verify(struct rte_eth_dev *dev); void txq_alloc_elts(struct mlx5_txq_ctrl *txq_ctrl); @@ -28168,7 +56279,7 @@ index e927343f7d..a50f057c1e 100644 uint64_t mlx5_get_tx_port_offloads(struct rte_eth_dev *dev); /* mlx5_rxtx.c */ -@@ -451,9 +457,6 @@ extern uint8_t mlx5_swp_types_table[]; +@@ -451,9 +458,6 @@ extern uint8_t mlx5_swp_types_table[]; void mlx5_set_ptype_table(void); void mlx5_set_cksum_table(void); void mlx5_set_swp_types_table(void); @@ -28178,8 +56289,47 @@ index e927343f7d..a50f057c1e 100644 uint16_t mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n); void mlx5_rxq_initialize(struct mlx5_rxq_data *rxq); __rte_noinline int mlx5_rx_err_handle(struct mlx5_rxq_data *rxq, uint8_t vec); +diff --git a/dpdk/drivers/net/mlx5/mlx5_rxtx_vec.c b/dpdk/drivers/net/mlx5/mlx5_rxtx_vec.c +index d85f90874d..0c705d1f7f 100644 +--- a/dpdk/drivers/net/mlx5/mlx5_rxtx_vec.c ++++ b/dpdk/drivers/net/mlx5/mlx5_rxtx_vec.c +@@ -103,13 +103,20 @@ uint16_t + mlx5_rx_burst_vec(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) + { + struct mlx5_rxq_data *rxq = dpdk_rxq; +- uint16_t nb_rx; ++ uint16_t nb_rx = 0; ++ uint16_t tn = 0; + uint64_t err = 0; ++ bool no_cq = false; + +- nb_rx = rxq_burst_v(rxq, pkts, pkts_n, &err); +- if (unlikely(err | rxq->err_state)) +- nb_rx = rxq_handle_pending_error(rxq, pkts, nb_rx); +- return nb_rx; ++ do { ++ nb_rx = rxq_burst_v(rxq, pkts + tn, pkts_n - tn, &err, &no_cq); ++ if (unlikely(err | rxq->err_state)) ++ nb_rx = rxq_handle_pending_error(rxq, pkts + tn, nb_rx); ++ tn += nb_rx; ++ if (unlikely(no_cq)) ++ break; ++ } while (tn != pkts_n); ++ return tn; + } + + /** +@@ -149,7 +156,7 @@ int __attribute__((cold)) + mlx5_check_vec_rx_support(struct rte_eth_dev *dev) + { + struct mlx5_priv *priv = dev->data->dev_private; +- uint16_t i; ++ uint32_t i; + + if (!priv->config.rx_vec_en) + return -ENOTSUP; diff --git a/dpdk/drivers/net/mlx5/mlx5_rxtx_vec_altivec.h b/dpdk/drivers/net/mlx5/mlx5_rxtx_vec_altivec.h -index 8e79883dfe..feb17fe1ce 100644 +index 8e79883dfe..c167672f52 100644 --- a/dpdk/drivers/net/mlx5/mlx5_rxtx_vec_altivec.h +++ b/dpdk/drivers/net/mlx5/mlx5_rxtx_vec_altivec.h @@ -11,7 +11,7 @@ @@ -28217,7 +56367,36 @@ index 8e79883dfe..feb17fe1ce 100644 pos += MLX5_VPMD_DESCS_PER_LOOP; /* Move to next CQE and invalidate consumed CQEs. */ -@@ -1010,9 +1029,9 @@ rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t pkts_n, +@@ -545,13 +564,15 @@ rxq_cq_to_ptype_oflags_v(struct mlx5_rxq_data *rxq, + * @param[out] err + * Pointer to a flag. Set non-zero value if pkts array has at least one error + * packet to handle. ++ * @param[out] no_cq ++ * Pointer to a boolean. Set true if no new CQE seen. + * + * @return + * Number of packets received including errors (<= pkts_n). + */ + static inline uint16_t + rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t pkts_n, +- uint64_t *err) ++ uint64_t *err, bool *no_cq) + { + const uint16_t q_n = 1 << rxq->cqe_n; + const uint16_t q_mask = q_n - 1; +@@ -644,8 +665,10 @@ rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t pkts_n, + /* Not to cross queue end. */ + pkts_n = RTE_MIN(pkts_n, q_n - elts_idx); + pkts_n = RTE_MIN(pkts_n, q_n - cq_idx); +- if (!pkts_n) ++ if (!pkts_n) { ++ *no_cq = !rcvd_pkt; + return rcvd_pkt; ++ } + /* At this point, there shouldn't be any remaining packets. */ + assert(rxq->decompressed == 0); + +@@ -1010,9 +1033,9 @@ rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t pkts_n, pkts[pos + 3]->timestamp = rte_be_to_cpu_64(cq[pos + p3].timestamp); } @@ -28230,8 +56409,28 @@ index 8e79883dfe..feb17fe1ce 100644 uint32_t metadata; /* This code is subject for futher optimization. */ +@@ -1060,8 +1083,10 @@ rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t pkts_n, + break; + } + /* If no new CQE seen, return without updating cq_db. */ +- if (unlikely(!nocmp_n && comp_idx == MLX5_VPMD_DESCS_PER_LOOP)) ++ if (unlikely(!nocmp_n && comp_idx == MLX5_VPMD_DESCS_PER_LOOP)) { ++ *no_cq = true; + return rcvd_pkt; ++ } + /* Update the consumer indexes for non-compressed CQEs. */ + assert(nocmp_n <= pkts_n); + rxq->cq_ci += nocmp_n; +@@ -1089,6 +1114,7 @@ rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t pkts_n, + } + rte_compiler_barrier(); + *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci); ++ *no_cq = !rcvd_pkt; + return rcvd_pkt; + } + diff --git a/dpdk/drivers/net/mlx5/mlx5_rxtx_vec_neon.h b/dpdk/drivers/net/mlx5/mlx5_rxtx_vec_neon.h -index 86785c7496..f92ece4299 100644 +index 86785c7496..607659a629 100644 --- a/dpdk/drivers/net/mlx5/mlx5_rxtx_vec_neon.h +++ b/dpdk/drivers/net/mlx5/mlx5_rxtx_vec_neon.h @@ -205,6 +205,25 @@ rxq_cq_decompress_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq, @@ -28260,12 +56459,41 @@ index 86785c7496..f92ece4299 100644 pos += MLX5_VPMD_DESCS_PER_LOOP; /* Move to next CQE and invalidate consumed CQEs. */ if (!(pos & 0x7) && pos < mcqe_n) { -@@ -687,28 +706,30 @@ rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t pkts_n, +@@ -358,13 +377,15 @@ rxq_cq_to_ptype_oflags_v(struct mlx5_rxq_data *rxq, + * @param[out] err + * Pointer to a flag. Set non-zero value if pkts array has at least one error + * packet to handle. ++ * @param[out] no_cq ++ * Pointer to a boolean. Set true if no new CQE seen. + * + * @return + * Number of packets received including errors (<= pkts_n). + */ + static inline uint16_t + rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t pkts_n, +- uint64_t *err) ++ uint64_t *err, bool *no_cq) + { + const uint16_t q_n = 1 << rxq->cqe_n; + const uint16_t q_mask = q_n - 1; +@@ -465,8 +486,10 @@ rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t pkts_n, + /* Not to cross queue end. */ + pkts_n = RTE_MIN(pkts_n, q_n - elts_idx); + pkts_n = RTE_MIN(pkts_n, q_n - cq_idx); +- if (!pkts_n) ++ if (!pkts_n) { ++ *no_cq = !rcvd_pkt; + return rcvd_pkt; ++ } + /* At this point, there shouldn't be any remained packets. */ + assert(rxq->decompressed == 0); + /* +@@ -687,28 +710,30 @@ rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t pkts_n, container_of(p3, struct mlx5_cqe, pkt_info)->timestamp); } - if (rte_flow_dynf_metadata_avail()) { -+ if (!!rxq->flow_meta_mask) { ++ if (rxq->dynf_meta) { /* This code is subject for futher optimization. */ - *RTE_FLOW_DYNF_METADATA(elts[pos]) = + int32_t offs = rxq->flow_meta_offset; @@ -28274,15 +56502,15 @@ index 86785c7496..f92ece4299 100644 container_of(p0, struct mlx5_cqe, pkt_info)->flow_table_metadata; - *RTE_FLOW_DYNF_METADATA(elts[pos + 1]) = -+ *RTE_MBUF_DYNFIELD(pkts[pos], offs, uint32_t *) = ++ *RTE_MBUF_DYNFIELD(pkts[pos + 1], offs, uint32_t *) = container_of(p1, struct mlx5_cqe, pkt_info)->flow_table_metadata; - *RTE_FLOW_DYNF_METADATA(elts[pos + 2]) = -+ *RTE_MBUF_DYNFIELD(pkts[pos], offs, uint32_t *) = ++ *RTE_MBUF_DYNFIELD(pkts[pos + 2], offs, uint32_t *) = container_of(p2, struct mlx5_cqe, pkt_info)->flow_table_metadata; - *RTE_FLOW_DYNF_METADATA(elts[pos + 3]) = -+ *RTE_MBUF_DYNFIELD(pkts[pos], offs, uint32_t *) = ++ *RTE_MBUF_DYNFIELD(pkts[pos + 3], offs, uint32_t *) = container_of(p3, struct mlx5_cqe, pkt_info)->flow_table_metadata; - if (*RTE_FLOW_DYNF_METADATA(elts[pos])) @@ -28304,8 +56532,28 @@ index 86785c7496..f92ece4299 100644 } #ifdef MLX5_PMD_SOFT_COUNTERS /* Add up received bytes count. */ +@@ -723,8 +748,10 @@ rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t pkts_n, + break; + } + /* If no new CQE seen, return without updating cq_db. */ +- if (unlikely(!nocmp_n && comp_idx == MLX5_VPMD_DESCS_PER_LOOP)) ++ if (unlikely(!nocmp_n && comp_idx == MLX5_VPMD_DESCS_PER_LOOP)) { ++ *no_cq = true; + return rcvd_pkt; ++ } + /* Update the consumer indexes for non-compressed CQEs. */ + assert(nocmp_n <= pkts_n); + rxq->cq_ci += nocmp_n; +@@ -752,6 +779,7 @@ rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t pkts_n, + } + rte_cio_wmb(); + *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci); ++ *no_cq = !rcvd_pkt; + return rcvd_pkt; + } + diff --git a/dpdk/drivers/net/mlx5/mlx5_rxtx_vec_sse.h b/dpdk/drivers/net/mlx5/mlx5_rxtx_vec_sse.h -index 35b7761007..bb59163a26 100644 +index 35b7761007..9935299d59 100644 --- a/dpdk/drivers/net/mlx5/mlx5_rxtx_vec_sse.h +++ b/dpdk/drivers/net/mlx5/mlx5_rxtx_vec_sse.h @@ -118,7 +118,6 @@ rxq_cq_decompress_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq, @@ -28342,7 +56590,36 @@ index 35b7761007..bb59163a26 100644 pos += MLX5_VPMD_DESCS_PER_LOOP; /* Move to next CQE and invalidate consumed CQEs. */ if (!(pos & 0x7) && pos < mcqe_n) { -@@ -640,24 +658,26 @@ rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t pkts_n, +@@ -365,13 +383,15 @@ rxq_cq_to_ptype_oflags_v(struct mlx5_rxq_data *rxq, __m128i cqes[4], + * @param[out] err + * Pointer to a flag. Set non-zero value if pkts array has at least one error + * packet to handle. ++ * @param[out] no_cq ++ * Pointer to a boolean. Set true if no new CQE seen. + * + * @return + * Number of packets received including errors (<= pkts_n). + */ + static inline uint16_t + rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t pkts_n, +- uint64_t *err) ++ uint64_t *err, bool *no_cq) + { + const uint16_t q_n = 1 << rxq->cqe_n; + const uint16_t q_mask = q_n - 1; +@@ -453,8 +473,10 @@ rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t pkts_n, + /* Not to cross queue end. */ + pkts_n = RTE_MIN(pkts_n, q_n - elts_idx); + pkts_n = RTE_MIN(pkts_n, q_n - cq_idx); +- if (!pkts_n) ++ if (!pkts_n) { ++ *no_cq = !rcvd_pkt; + return rcvd_pkt; ++ } + /* At this point, there shouldn't be any remained packets. */ + assert(rxq->decompressed == 0); + /* +@@ -640,24 +662,26 @@ rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t pkts_n, pkts[pos + 3]->timestamp = rte_be_to_cpu_64(cq[pos + p3].timestamp); } @@ -28382,8 +56659,28 @@ index 35b7761007..bb59163a26 100644 } #ifdef MLX5_PMD_SOFT_COUNTERS /* Add up received bytes count. */ +@@ -674,8 +698,10 @@ rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t pkts_n, + break; + } + /* If no new CQE seen, return without updating cq_db. */ +- if (unlikely(!nocmp_n && comp_idx == MLX5_VPMD_DESCS_PER_LOOP)) ++ if (unlikely(!nocmp_n && comp_idx == MLX5_VPMD_DESCS_PER_LOOP)) { ++ *no_cq = true; + return rcvd_pkt; ++ } + /* Update the consumer indexes for non-compressed CQEs. */ + assert(nocmp_n <= pkts_n); + rxq->cq_ci += nocmp_n; +@@ -703,6 +729,7 @@ rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t pkts_n, + } + rte_compiler_barrier(); + *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci); ++ *no_cq = !rcvd_pkt; + return rcvd_pkt; + } + diff --git a/dpdk/drivers/net/mlx5/mlx5_stats.c b/dpdk/drivers/net/mlx5/mlx5_stats.c -index 205e4fec78..636fc80c7c 100644 +index 205e4fec78..ccdcdcfff6 100644 --- a/dpdk/drivers/net/mlx5/mlx5_stats.c +++ b/dpdk/drivers/net/mlx5/mlx5_stats.c @@ -3,11 +3,13 @@ @@ -28400,7 +56697,7 @@ index 205e4fec78..636fc80c7c 100644 #include #include -@@ -136,26 +138,30 @@ static const struct mlx5_counter_ctrl mlx5_counters_init[] = { +@@ -136,26 +138,40 @@ static const struct mlx5_counter_ctrl mlx5_counters_init[] = { static const unsigned int xstats_n = RTE_DIM(mlx5_counters_init); @@ -28413,9 +56710,9 @@ index 205e4fec78..636fc80c7c 100644 + if (priv->sh) { MKSTR(path, "%s/ports/%d/hw_counters/%s", - priv->sh->ibdev_path, - priv->ibv_port, - ctr_name); +- priv->sh->ibdev_path, +- priv->ibv_port, +- ctr_name); - - file = fopen(path, "rb"); - if (file) { @@ -28424,7 +56721,20 @@ index 205e4fec78..636fc80c7c 100644 - fclose(file); - if (n == 1) - return; ++ priv->sh->ibdev_path, ++ priv->ibv_port, ++ ctr_name); + fd = open(path, O_RDONLY); ++ /* ++ * in switchdev the file location is not per port ++ * but rather in /hw_counters/. ++ */ ++ if (fd == -1) { ++ MKSTR(path1, "%s/hw_counters/%s", ++ priv->sh->ibdev_path, ++ ctr_name); ++ fd = open(path1, O_RDONLY); ++ } + if (fd != -1) { + char buf[21] = {'\0'}; + ssize_t n = read(fd, buf, sizeof(buf)); @@ -28441,7 +56751,7 @@ index 205e4fec78..636fc80c7c 100644 } /** -@@ -194,8 +200,14 @@ mlx5_read_dev_counters(struct rte_eth_dev *dev, uint64_t *stats) +@@ -194,8 +210,14 @@ mlx5_read_dev_counters(struct rte_eth_dev *dev, uint64_t *stats) } for (i = 0; i != xstats_ctrl->mlx5_stats_n; ++i) { if (xstats_ctrl->info[i].ib) { @@ -28458,7 +56768,7 @@ index 205e4fec78..636fc80c7c 100644 } else { stats[i] = (uint64_t) et_stats->data[xstats_ctrl->dev_table_idx[i]]; -@@ -301,6 +313,7 @@ mlx5_stats_init(struct rte_eth_dev *dev) +@@ -301,6 +323,7 @@ mlx5_stats_init(struct rte_eth_dev *dev) unsigned int idx = xstats_ctrl->mlx5_stats_n++; xstats_ctrl->info[idx] = mlx5_counters_init[i]; @@ -28466,7 +56776,7 @@ index 205e4fec78..636fc80c7c 100644 } } assert(xstats_ctrl->mlx5_stats_n <= MLX5_MAX_XSTATS); -@@ -311,6 +324,7 @@ mlx5_stats_init(struct rte_eth_dev *dev) +@@ -311,6 +334,7 @@ mlx5_stats_init(struct rte_eth_dev *dev) DRV_LOG(ERR, "port %u cannot read device counters: %s", dev->data->port_id, strerror(rte_errno)); mlx5_read_ib_stat(priv, "out_of_buffer", &stats_ctrl->imissed_base); @@ -28474,7 +56784,7 @@ index 205e4fec78..636fc80c7c 100644 free: rte_free(strings); } -@@ -353,7 +367,23 @@ mlx5_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *stats, +@@ -353,7 +377,23 @@ mlx5_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *stats, return ret; for (i = 0; i != mlx5_stats_n; ++i) { stats[i].id = i; @@ -28499,7 +56809,7 @@ index 205e4fec78..636fc80c7c 100644 } } return mlx5_stats_n; -@@ -375,9 +405,12 @@ int +@@ -375,9 +415,12 @@ int mlx5_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) { struct mlx5_priv *priv = dev->data->dev_private; @@ -28512,7 +56822,7 @@ index 205e4fec78..636fc80c7c 100644 memset(&tmp, 0, sizeof(tmp)); /* Add software counters. */ -@@ -420,8 +453,18 @@ mlx5_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) +@@ -420,8 +463,18 @@ mlx5_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) #endif tmp.oerrors += txq->stats.oerrors; } @@ -28533,7 +56843,7 @@ index 205e4fec78..636fc80c7c 100644 #ifndef MLX5_PMD_SOFT_COUNTERS /* FIXME: retrieve and add hardware counters. */ #endif -@@ -458,6 +501,7 @@ mlx5_stats_reset(struct rte_eth_dev *dev) +@@ -458,6 +511,7 @@ mlx5_stats_reset(struct rte_eth_dev *dev) sizeof(struct mlx5_txq_stats)); } mlx5_read_ib_stat(priv, "out_of_buffer", &stats_ctrl->imissed_base); @@ -28541,20 +56851,47 @@ index 205e4fec78..636fc80c7c 100644 #ifndef MLX5_PMD_SOFT_COUNTERS /* FIXME: reset hardware counters. */ #endif -@@ -500,8 +544,10 @@ mlx5_xstats_reset(struct rte_eth_dev *dev) +@@ -482,8 +536,7 @@ mlx5_xstats_reset(struct rte_eth_dev *dev) + struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl; + int stats_n; + unsigned int i; +- unsigned int n = xstats_ctrl->mlx5_stats_n; +- uint64_t counters[n]; ++ uint64_t *counters; + int ret; + + stats_n = mlx5_ethtool_get_stats_n(dev); +@@ -494,15 +547,26 @@ mlx5_xstats_reset(struct rte_eth_dev *dev) + } + if (xstats_ctrl->stats_n != stats_n) + mlx5_stats_init(dev); ++ counters = malloc(sizeof(*counters) * xstats_ctrl->mlx5_stats_n); ++ if (!counters) { ++ DRV_LOG(WARNING, "port %u unable to allocate memory for xstats " ++ "counters", ++ dev->data->port_id); ++ rte_errno = ENOMEM; ++ return -rte_errno; ++ } + ret = mlx5_read_dev_counters(dev, counters); + if (ret) { + DRV_LOG(ERR, "port %u cannot read device counters: %s", dev->data->port_id, strerror(rte_errno)); ++ free(counters); return ret; } - for (i = 0; i != n; ++i) -+ for (i = 0; i != n; ++i) { ++ for (i = 0; i != xstats_ctrl->mlx5_stats_n; ++i) { xstats_ctrl->base[i] = counters[i]; +- + xstats_ctrl->hw_stats[i] = 0; + } - ++ free(counters); return 0; } + diff --git a/dpdk/drivers/net/mlx5/mlx5_trigger.c b/dpdk/drivers/net/mlx5/mlx5_trigger.c -index cafab25c67..6fc4190f4e 100644 +index cafab25c67..04b06e11d7 100644 --- a/dpdk/drivers/net/mlx5/mlx5_trigger.c +++ b/dpdk/drivers/net/mlx5/mlx5_trigger.c @@ -106,9 +106,12 @@ mlx5_rxq_start(struct rte_eth_dev *dev) @@ -28599,7 +56936,37 @@ index cafab25c67..6fc4190f4e 100644 ret = mlx5_flow_start(dev, &priv->flows); if (ret) { DRV_LOG(DEBUG, "port %u failed to set flows", -@@ -420,9 +427,14 @@ mlx5_traffic_enable(struct rte_eth_dev *dev) +@@ -320,7 +327,18 @@ mlx5_dev_start(struct rte_eth_dev *dev) + dev->rx_pkt_burst = mlx5_select_rx_function(dev); + /* Enable datapath on secondary process. */ + mlx5_mp_req_start_rxtx(dev); +- mlx5_dev_interrupt_handler_install(dev); ++ if (priv->sh->intr_handle.fd >= 0) { ++ priv->sh->port[priv->ibv_port - 1].ih_port_id = ++ (uint32_t)dev->data->port_id; ++ } else { ++ DRV_LOG(INFO, "port %u starts without LSC and RMV interrupts.", ++ dev->data->port_id); ++ dev->data->dev_conf.intr_conf.lsc = 0; ++ dev->data->dev_conf.intr_conf.rmv = 0; ++ } ++ if (priv->sh->intr_handle_devx.fd >= 0) ++ priv->sh->port[priv->ibv_port - 1].devx_ih_port_id = ++ (uint32_t)dev->data->port_id; + return 0; + error: + ret = rte_errno; /* Save rte_errno before cleanup. */ +@@ -359,7 +377,8 @@ mlx5_dev_stop(struct rte_eth_dev *dev) + mlx5_flow_stop(dev, &priv->flows); + mlx5_traffic_disable(dev); + mlx5_rx_intr_vec_disable(dev); +- mlx5_dev_interrupt_handler_uninstall(dev); ++ priv->sh->port[priv->ibv_port - 1].ih_port_id = RTE_MAX_ETHPORTS; ++ priv->sh->port[priv->ibv_port - 1].devx_ih_port_id = RTE_MAX_ETHPORTS; + mlx5_txq_stop(dev); + mlx5_rxq_stop(dev); + } +@@ -420,9 +439,14 @@ mlx5_traffic_enable(struct rte_eth_dev *dev) } mlx5_txq_release(dev, i); } @@ -28618,7 +56985,7 @@ index cafab25c67..6fc4190f4e 100644 return 0; if (dev->data->promiscuous) { diff --git a/dpdk/drivers/net/mlx5/mlx5_txq.c b/dpdk/drivers/net/mlx5/mlx5_txq.c -index bac4f71c24..c7751e83c0 100644 +index bac4f71c24..9c929a57ea 100644 --- a/dpdk/drivers/net/mlx5/mlx5_txq.c +++ b/dpdk/drivers/net/mlx5/mlx5_txq.c @@ -62,7 +62,7 @@ txq_alloc_elts(struct mlx5_txq_ctrl *txq_ctrl) @@ -28630,6 +56997,61 @@ index bac4f71c24..c7751e83c0 100644 txq_free_elts(struct mlx5_txq_ctrl *txq_ctrl) { const uint16_t elts_n = 1 << txq_ctrl->txq.elts_n; +@@ -147,27 +147,27 @@ mlx5_get_tx_port_offloads(struct rte_eth_dev *dev) + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ + static int +-mlx5_tx_queue_pre_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc) ++mlx5_tx_queue_pre_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t *desc) + { + struct mlx5_priv *priv = dev->data->dev_private; + +- if (desc <= MLX5_TX_COMP_THRESH) { ++ if (*desc <= MLX5_TX_COMP_THRESH) { + DRV_LOG(WARNING, + "port %u number of descriptors requested for Tx queue" + " %u must be higher than MLX5_TX_COMP_THRESH, using %u" +- " instead of %u", +- dev->data->port_id, idx, MLX5_TX_COMP_THRESH + 1, desc); +- desc = MLX5_TX_COMP_THRESH + 1; ++ " instead of %u", dev->data->port_id, idx, ++ MLX5_TX_COMP_THRESH + 1, *desc); ++ *desc = MLX5_TX_COMP_THRESH + 1; + } +- if (!rte_is_power_of_2(desc)) { +- desc = 1 << log2above(desc); ++ if (!rte_is_power_of_2(*desc)) { ++ *desc = 1 << log2above(*desc); + DRV_LOG(WARNING, + "port %u increased number of descriptors in Tx queue" + " %u to the next power of two (%d)", +- dev->data->port_id, idx, desc); ++ dev->data->port_id, idx, *desc); + } + DRV_LOG(DEBUG, "port %u configuring queue %u for %u descriptors", +- dev->data->port_id, idx, desc); ++ dev->data->port_id, idx, *desc); + if (idx >= priv->txqs_n) { + DRV_LOG(ERR, "port %u Tx queue index out of range (%u >= %u)", + dev->data->port_id, idx, priv->txqs_n); +@@ -210,7 +210,7 @@ mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, + container_of(txq, struct mlx5_txq_ctrl, txq); + int res; + +- res = mlx5_tx_queue_pre_setup(dev, idx, desc); ++ res = mlx5_tx_queue_pre_setup(dev, idx, &desc); + if (res) + return res; + txq_ctrl = mlx5_txq_new(dev, idx, desc, socket, conf); +@@ -251,7 +251,7 @@ mlx5_tx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t idx, + container_of(txq, struct mlx5_txq_ctrl, txq); + int res; + +- res = mlx5_tx_queue_pre_setup(dev, idx, desc); ++ res = mlx5_tx_queue_pre_setup(dev, idx, &desc); + if (res) + return res; + if (hairpin_conf->peer_count != 1 || @@ -272,7 +272,6 @@ mlx5_tx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t idx, DRV_LOG(DEBUG, "port %u adding Tx queue %u to list", dev->data->port_id, idx); @@ -28658,15 +57080,68 @@ index bac4f71c24..c7751e83c0 100644 txq_ctrl->txq.db_heu = priv->config.dbnc == MLX5_TXDB_HEURISTIC; txq_ctrl->txq.db_nc = 0; -@@ -492,6 +491,7 @@ mlx5_txq_obj_hairpin_new(struct rte_eth_dev *dev, uint16_t idx) +@@ -353,7 +352,7 @@ txq_uar_init(struct mlx5_txq_ctrl *txq_ctrl) + /* Assign an UAR lock according to UAR page number */ + lock_idx = (txq_ctrl->uar_mmap_offset / page_size) & + MLX5_UAR_PAGE_NUM_MASK; +- txq_ctrl->txq.uar_lock = &priv->uar_lock[lock_idx]; ++ txq_ctrl->txq.uar_lock = &priv->sh->uar_lock[lock_idx]; + #endif + } + +@@ -425,6 +424,35 @@ txq_uar_uninit_secondary(struct mlx5_txq_ctrl *txq_ctrl) + munmap(RTE_PTR_ALIGN_FLOOR(addr, page_size), page_size); + } + ++/** ++ * Deinitialize Tx UAR registers for secondary process. ++ * ++ * @param dev ++ * Pointer to Ethernet device. ++ */ ++void ++mlx5_tx_uar_uninit_secondary(struct rte_eth_dev *dev) ++{ ++ struct mlx5_proc_priv *ppriv = (struct mlx5_proc_priv *) ++ dev->process_private; ++ const size_t page_size = sysconf(_SC_PAGESIZE); ++ void *addr; ++ unsigned int i; ++ ++ if (page_size == (size_t)-1) { ++ DRV_LOG(ERR, "Failed to get mem page size"); ++ return; ++ } ++ assert(rte_eal_process_type() == RTE_PROC_SECONDARY); ++ for (i = 0; i != ppriv->uar_table_sz; ++i) { ++ if (!ppriv->uar_table[i]) ++ continue; ++ addr = ppriv->uar_table[i]; ++ munmap(RTE_PTR_ALIGN_FLOOR(addr, page_size), page_size); ++ ++ } ++} ++ + /** + * Initialize Tx UAR registers for secondary process. + * +@@ -491,7 +519,7 @@ mlx5_txq_obj_hairpin_new(struct rte_eth_dev *dev, uint16_t idx) + container_of(txq_data, struct mlx5_txq_ctrl, txq); struct mlx5_devx_create_sq_attr attr = { 0 }; struct mlx5_txq_obj *tmpl = NULL; - int ret = 0; +- int ret = 0; + uint32_t max_wq_data; assert(txq_data); assert(!txq_ctrl->obj); -@@ -508,11 +508,15 @@ mlx5_txq_obj_hairpin_new(struct rte_eth_dev *dev, uint16_t idx) +@@ -502,39 +530,36 @@ mlx5_txq_obj_hairpin_new(struct rte_eth_dev *dev, uint16_t idx) + "port %u Tx queue %u cannot allocate memory resources", + dev->data->port_id, txq_data->idx); + rte_errno = ENOMEM; +- goto error; ++ return NULL; + } + tmpl->type = MLX5_TXQ_OBJ_TYPE_DEVX_HAIRPIN; tmpl->txq_ctrl = txq_ctrl; attr.hairpin = 1; attr.tis_lst_sz = 1; @@ -28686,7 +57161,49 @@ index bac4f71c24..c7751e83c0 100644 attr.tis_num = priv->sh->tis->id; tmpl->sq = mlx5_devx_cmd_create_sq(priv->sh->ctx, &attr); if (!tmpl->sq) { -@@ -718,13 +722,22 @@ mlx5_txq_obj_new(struct rte_eth_dev *dev, uint16_t idx, + DRV_LOG(ERR, + "port %u tx hairpin queue %u can't create sq object", + dev->data->port_id, idx); ++ rte_free(tmpl); + rte_errno = errno; +- goto error; ++ return NULL; + } + DRV_LOG(DEBUG, "port %u sxq %u updated with %p", dev->data->port_id, + idx, (void *)&tmpl); + rte_atomic32_inc(&tmpl->refcnt); + LIST_INSERT_HEAD(&priv->txqsobj, tmpl, next); + return tmpl; +-error: +- ret = rte_errno; /* Save rte_errno before cleanup. */ +- if (tmpl->tis) +- mlx5_devx_cmd_destroy(tmpl->tis); +- if (tmpl->sq) +- mlx5_devx_cmd_destroy(tmpl->sq); +- rte_errno = ret; /* Restore rte_errno. */ +- return NULL; + } + + /** +@@ -562,7 +587,6 @@ mlx5_txq_obj_new(struct rte_eth_dev *dev, uint16_t idx, + struct mlx5_txq_obj *txq_obj = NULL; + union { + struct ibv_qp_init_attr_ex init; +- struct ibv_cq_init_attr_ex cq; + struct ibv_qp_attr mod; + } attr; + unsigned int cqe_n; +@@ -590,9 +614,6 @@ mlx5_txq_obj_new(struct rte_eth_dev *dev, uint16_t idx, + return NULL; + } + memset(&tmpl, 0, sizeof(struct mlx5_txq_obj)); +- attr.cq = (struct ibv_cq_init_attr_ex){ +- .comp_mask = 0, +- }; + cqe_n = desc / MLX5_TX_COMP_THRESH + + 1 + MLX5_TX_COMP_THRESH_INLINE_DIV; + tmpl.cq = mlx5_glue->create_cq(priv->sh->ctx, cqe_n, NULL, NULL, 0); +@@ -718,13 +739,22 @@ mlx5_txq_obj_new(struct rte_eth_dev *dev, uint16_t idx, txq_data->cq_db = cq_info.dbrec; txq_data->cqes = (volatile struct mlx5_cqe *)cq_info.buf; txq_data->cq_ci = 0; @@ -28711,16 +57228,16 @@ index bac4f71c24..c7751e83c0 100644 #ifdef HAVE_IBV_FLOW_DV_SUPPORT /* * If using DevX need to query and store TIS transport domain value. -@@ -773,6 +786,8 @@ mlx5_txq_obj_new(struct rte_eth_dev *dev, uint16_t idx, +@@ -773,6 +803,8 @@ mlx5_txq_obj_new(struct rte_eth_dev *dev, uint16_t idx, claim_zero(mlx5_glue->destroy_cq(tmpl.cq)); if (tmpl.qp) claim_zero(mlx5_glue->destroy_qp(tmpl.qp)); -+ if (txq_data && txq_data->fcqs) ++ if (txq_data->fcqs) + rte_free(txq_data->fcqs); if (txq_obj) rte_free(txq_obj); priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE; -@@ -827,6 +842,8 @@ mlx5_txq_obj_release(struct mlx5_txq_obj *txq_obj) +@@ -827,6 +859,8 @@ mlx5_txq_obj_release(struct mlx5_txq_obj *txq_obj) } else { claim_zero(mlx5_glue->destroy_qp(txq_obj->qp)); claim_zero(mlx5_glue->destroy_cq(txq_obj->cq)); @@ -28729,7 +57246,7 @@ index bac4f71c24..c7751e83c0 100644 } LIST_REMOVE(txq_obj, next); rte_free(txq_obj); -@@ -964,7 +981,7 @@ txq_set_params(struct mlx5_txq_ctrl *txq_ctrl) +@@ -964,7 +998,7 @@ txq_set_params(struct mlx5_txq_ctrl *txq_ctrl) * If there is requested minimal amount of data to inline * we MUST enable inlining. This is a case for ConnectX-4 * which usually requires L2 inlined for correct operating @@ -28738,8 +57255,47 @@ index bac4f71c24..c7751e83c0 100644 * support E-Switch Flows. */ if (inlen_mode) { +@@ -1288,6 +1322,7 @@ mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, + LIST_INSERT_HEAD(&priv->txqsctrl, tmpl, next); + return tmpl; + error: ++ mlx5_mr_btree_free(&tmpl->txq.mr_ctrl.cache_bh); + rte_free(tmpl); + return NULL; + } +diff --git a/dpdk/drivers/net/mlx5/mlx5_utils.c b/dpdk/drivers/net/mlx5/mlx5_utils.c +index 5d86615ea0..c1c238941d 100644 +--- a/dpdk/drivers/net/mlx5/mlx5_utils.c ++++ b/dpdk/drivers/net/mlx5/mlx5_utils.c +@@ -20,7 +20,7 @@ mlx5_hlist_create(const char *name, uint32_t size) + if (!rte_is_power_of_2(size)) { + act_size = rte_align32pow2(size); + DRV_LOG(WARNING, "Size 0x%" PRIX32 " is not power of 2, will " +- "be aligned to 0x%" PRIX32 ".\n", size, act_size); ++ "be aligned to 0x%" PRIX32 ".", size, act_size); + } else { + act_size = size; + } +@@ -29,7 +29,7 @@ mlx5_hlist_create(const char *name, uint32_t size) + /* Using zmalloc, then no need to initialize the heads. */ + h = rte_zmalloc(name, alloc_size, RTE_CACHE_LINE_SIZE); + if (!h) { +- DRV_LOG(ERR, "No memory for hash list %s creation\n", ++ DRV_LOG(ERR, "No memory for hash list %s creation", + name ? name : "None"); + return NULL; + } +@@ -37,7 +37,7 @@ mlx5_hlist_create(const char *name, uint32_t size) + snprintf(h->name, MLX5_HLIST_NAMESIZE, "%s", name); + h->table_sz = act_size; + h->mask = act_size - 1; +- DRV_LOG(DEBUG, "Hash list with %s size 0x%" PRIX32 " is created.\n", ++ DRV_LOG(DEBUG, "Hash list with %s size 0x%" PRIX32 " is created.", + h->name, act_size); + return h; + } diff --git a/dpdk/drivers/net/mlx5/mlx5_utils.h b/dpdk/drivers/net/mlx5/mlx5_utils.h -index b4ed8c6dad..fdf1379866 100644 +index b4ed8c6dad..b23eec622d 100644 --- a/dpdk/drivers/net/mlx5/mlx5_utils.h +++ b/dpdk/drivers/net/mlx5/mlx5_utils.h @@ -15,16 +15,6 @@ @@ -28759,7 +57315,18 @@ index b4ed8c6dad..fdf1379866 100644 /* Bit-field manipulation. */ #define BITFIELD_DECLARE(bf, type, size) \ type bf[(((size_t)(size) / (sizeof(type) * CHAR_BIT)) + \ -@@ -146,9 +136,10 @@ extern int mlx5_logtype; +@@ -125,10 +115,6 @@ extern int mlx5_logtype; + + #endif /* NDEBUG */ + +-#define INFO(...) DRV_LOG(INFO, __VA_ARGS__) +-#define WARN(...) DRV_LOG(WARNING, __VA_ARGS__) +-#define ERROR(...) DRV_LOG(ERR, __VA_ARGS__) +- + /* Convenience macros for accessing mbuf fields. */ + #define NEXT(m) ((m)->next) + #define DATA_LEN(m) ((m)->data_len) +@@ -146,9 +132,10 @@ extern int mlx5_logtype; /* Allocate a buffer on the stack and fill it with a printf format string. */ #define MKSTR(name, ...) \ @@ -28785,6 +57352,199 @@ index 865ad61aed..4aea876488 100644 return ret; } +diff --git a/dpdk/drivers/net/mvneta/mvneta_rxtx.c b/dpdk/drivers/net/mvneta/mvneta_rxtx.c +index 10b6f57584..dfa7ecc090 100644 +--- a/dpdk/drivers/net/mvneta/mvneta_rxtx.c ++++ b/dpdk/drivers/net/mvneta/mvneta_rxtx.c +@@ -872,7 +872,17 @@ mvneta_rx_queue_flush(struct mvneta_rxq *rxq) + int ret, i; + + descs = rte_malloc("rxdesc", MRVL_NETA_RXD_MAX * sizeof(*descs), 0); ++ if (descs == NULL) { ++ MVNETA_LOG(ERR, "Failed to allocate descs."); ++ return; ++ } ++ + bufs = rte_malloc("buffs", MRVL_NETA_RXD_MAX * sizeof(*bufs), 0); ++ if (bufs == NULL) { ++ MVNETA_LOG(ERR, "Failed to allocate bufs."); ++ rte_free(descs); ++ return; ++ } + + do { + num = MRVL_NETA_RXD_MAX; +diff --git a/dpdk/drivers/net/mvpp2/mrvl_ethdev.c b/dpdk/drivers/net/mvpp2/mrvl_ethdev.c +index b98b1fd667..fe4cadda04 100644 +--- a/dpdk/drivers/net/mvpp2/mrvl_ethdev.c ++++ b/dpdk/drivers/net/mvpp2/mrvl_ethdev.c +@@ -443,8 +443,8 @@ mrvl_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) + * when this feature has not been enabled/supported so far + * (TODO check scattered_rx flag here once scattered RX is supported). + */ +- if (mru + MRVL_PKT_OFFS > mbuf_data_size) { +- mru = mbuf_data_size - MRVL_PKT_OFFS; ++ if (mru - RTE_ETHER_CRC_LEN + MRVL_PKT_OFFS > mbuf_data_size) { ++ mru = mbuf_data_size + RTE_ETHER_CRC_LEN - MRVL_PKT_OFFS; + mtu = MRVL_PP2_MRU_TO_MTU(mru); + MRVL_LOG(WARNING, "MTU too big, max MTU possible limitted " + "by current mbuf size: %u. Set MTU to %u, MRU to %u", +@@ -673,18 +673,6 @@ mrvl_dev_start(struct rte_eth_dev *dev) + priv->uc_mc_flushed = 1; + } + +- if (!priv->vlan_flushed) { +- ret = pp2_ppio_flush_vlan(priv->ppio); +- if (ret) { +- MRVL_LOG(ERR, "Failed to flush vlan list"); +- /* +- * TODO +- * once pp2_ppio_flush_vlan() is supported jump to out +- * goto out; +- */ +- } +- priv->vlan_flushed = 1; +- } + ret = mrvl_mtu_set(dev, dev->data->mtu); + if (ret) + MRVL_LOG(ERR, "Failed to set MTU to %d", dev->data->mtu); +@@ -816,7 +804,7 @@ mrvl_flush_bpool(struct rte_eth_dev *dev) + unsigned int core_id = rte_lcore_id(); + + if (core_id == LCORE_ID_ANY) +- core_id = 0; ++ core_id = rte_get_master_lcore(); + + hif = mrvl_get_hif(priv, core_id); + +@@ -1611,8 +1599,8 @@ mrvl_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) + static int + mrvl_fill_bpool(struct mrvl_rxq *rxq, int num) + { +- struct buff_release_entry entries[MRVL_PP2_RXD_MAX]; +- struct rte_mbuf *mbufs[MRVL_PP2_RXD_MAX]; ++ struct buff_release_entry entries[num]; ++ struct rte_mbuf *mbufs[num]; + int i, ret; + unsigned int core_id; + struct pp2_hif *hif; +@@ -1620,7 +1608,7 @@ mrvl_fill_bpool(struct mrvl_rxq *rxq, int num) + + core_id = rte_lcore_id(); + if (core_id == LCORE_ID_ANY) +- core_id = 0; ++ core_id = rte_get_master_lcore(); + + hif = mrvl_get_hif(rxq->priv, core_id); + if (!hif) +@@ -1708,7 +1696,8 @@ mrvl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, + return -EFAULT; + } + +- frame_size = buf_size - RTE_PKTMBUF_HEADROOM - MRVL_PKT_EFFEC_OFFS; ++ frame_size = buf_size - RTE_PKTMBUF_HEADROOM - ++ MRVL_PKT_EFFEC_OFFS + RTE_ETHER_CRC_LEN; + if (frame_size < max_rx_pkt_len) { + MRVL_LOG(WARNING, + "Mbuf size must be increased to %u bytes to hold up " +@@ -1770,7 +1759,7 @@ mrvl_rx_queue_release(void *rxq) + unsigned int core_id = rte_lcore_id(); + + if (core_id == LCORE_ID_ANY) +- core_id = 0; ++ core_id = rte_get_master_lcore(); + + if (!q) + return; +@@ -2168,7 +2157,6 @@ mrvl_desc_to_packet_type_and_offset(struct pp2_ppio_desc *desc, + *l4_offset = *l3_offset + MRVL_ARP_LENGTH; + break; + default: +- MRVL_LOG(DEBUG, "Failed to recognise l3 packet type"); + break; + } + +@@ -2180,7 +2168,6 @@ mrvl_desc_to_packet_type_and_offset(struct pp2_ppio_desc *desc, + packet_type |= RTE_PTYPE_L4_UDP; + break; + default: +- MRVL_LOG(DEBUG, "Failed to recognise l4 packet type"); + break; + } + +@@ -2250,10 +2237,9 @@ mrvl_rx_pkt_burst(void *rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) + + ret = pp2_ppio_recv(q->priv->ppio, q->priv->rxq_map[q->queue_id].tc, + q->priv->rxq_map[q->queue_id].inq, descs, &nb_pkts); +- if (unlikely(ret < 0)) { +- MRVL_LOG(ERR, "Failed to receive packets"); ++ if (unlikely(ret < 0)) + return 0; +- } ++ + mrvl_port_bpool_size[bpool->pp2_id][bpool->id][core_id] -= nb_pkts; + + for (i = 0; i < nb_pkts; i++) { +@@ -2316,21 +2302,13 @@ mrvl_rx_pkt_burst(void *rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) + + if (unlikely(num <= q->priv->bpool_min_size || + (!rx_done && num < q->priv->bpool_init_size))) { +- ret = mrvl_fill_bpool(q, MRVL_BURST_SIZE); +- if (ret) +- MRVL_LOG(ERR, "Failed to fill bpool"); ++ mrvl_fill_bpool(q, MRVL_BURST_SIZE); + } else if (unlikely(num > q->priv->bpool_max_size)) { + int i; + int pkt_to_remove = num - q->priv->bpool_init_size; + struct rte_mbuf *mbuf; + struct pp2_buff_inf buff; + +- MRVL_LOG(DEBUG, +- "port-%d:%d: bpool %d oversize - remove %d buffers (pool size: %d -> %d)", +- bpool->pp2_id, q->priv->ppio->port_id, +- bpool->id, pkt_to_remove, num, +- q->priv->bpool_init_size); +- + for (i = 0; i < pkt_to_remove; i++) { + ret = pp2_bpool_get_buff(hif, bpool, &buff); + if (ret) +@@ -2523,12 +2501,8 @@ mrvl_tx_pkt_burst(void *txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) + sq, q->queue_id, 0); + + sq_free_size = MRVL_PP2_TX_SHADOWQ_SIZE - sq->size - 1; +- if (unlikely(nb_pkts > sq_free_size)) { +- MRVL_LOG(DEBUG, +- "No room in shadow queue for %d packets! %d packets will be sent.", +- nb_pkts, sq_free_size); ++ if (unlikely(nb_pkts > sq_free_size)) + nb_pkts = sq_free_size; +- } + + for (i = 0; i < nb_pkts; i++) { + struct rte_mbuf *mbuf = tx_pkts[i]; +@@ -2645,10 +2619,6 @@ mrvl_tx_sg_pkt_burst(void *txq, struct rte_mbuf **tx_pkts, + */ + if (unlikely(total_descs > sq_free_size)) { + total_descs -= nb_segs; +- RTE_LOG(DEBUG, PMD, +- "No room in shadow queue for %d packets! " +- "%d packets will be sent.\n", +- nb_pkts, i); + break; + } + +diff --git a/dpdk/drivers/net/mvpp2/mrvl_ethdev.h b/dpdk/drivers/net/mvpp2/mrvl_ethdev.h +index db6632f5b6..eee5182ce8 100644 +--- a/dpdk/drivers/net/mvpp2/mrvl_ethdev.h ++++ b/dpdk/drivers/net/mvpp2/mrvl_ethdev.h +@@ -186,7 +186,6 @@ struct mrvl_priv { + uint8_t bpool_bit; + uint8_t rss_hf_tcp; + uint8_t uc_mc_flushed; +- uint8_t vlan_flushed; + uint8_t isolated; + uint8_t multiseg; + diff --git a/dpdk/drivers/net/mvpp2/mrvl_flow.c b/dpdk/drivers/net/mvpp2/mrvl_flow.c index 381b54e291..ea43255284 100644 --- a/dpdk/drivers/net/mvpp2/mrvl_flow.c @@ -28806,8 +57566,38 @@ index 381b54e291..ea43255284 100644 key->key_size += 2; key->num_fields += 1; } +diff --git a/dpdk/drivers/net/mvpp2/mrvl_mtr.c b/dpdk/drivers/net/mvpp2/mrvl_mtr.c +index 39272acea4..2fa5cb43ad 100644 +--- a/dpdk/drivers/net/mvpp2/mrvl_mtr.c ++++ b/dpdk/drivers/net/mvpp2/mrvl_mtr.c +@@ -329,6 +329,12 @@ mrvl_create(struct rte_eth_dev *dev, uint32_t mtr_id, + struct mrvl_mtr_profile *profile; + struct mrvl_mtr *mtr; + ++ profile = mrvl_mtr_profile_from_id(priv, params->meter_profile_id); ++ if (!profile) ++ return -rte_mtr_error_set(error, EINVAL, ++ RTE_MTR_ERROR_TYPE_METER_PROFILE_ID, ++ NULL, "Profile id does not exist\n"); ++ + mtr = mrvl_mtr_from_id(priv, mtr_id); + if (mtr) + return -rte_mtr_error_set(error, EEXIST, +@@ -341,12 +347,6 @@ mrvl_create(struct rte_eth_dev *dev, uint32_t mtr_id, + RTE_MTR_ERROR_TYPE_UNSPECIFIED, + NULL, NULL); + +- profile = mrvl_mtr_profile_from_id(priv, params->meter_profile_id); +- if (!profile) +- return -rte_mtr_error_set(error, EINVAL, +- RTE_MTR_ERROR_TYPE_METER_PROFILE_ID, +- NULL, "Profile id does not exist\n"); +- + mtr->shared = shared; + mtr->mtr_id = mtr_id; + mtr->plcr_bit = MRVL_PLCR_BIT_INVALID; diff --git a/dpdk/drivers/net/netvsc/hn_ethdev.c b/dpdk/drivers/net/netvsc/hn_ethdev.c -index 164e9ad174..6950682a94 100644 +index 164e9ad174..988f3cf1cb 100644 --- a/dpdk/drivers/net/netvsc/hn_ethdev.c +++ b/dpdk/drivers/net/netvsc/hn_ethdev.c @@ -42,7 +42,8 @@ @@ -28838,7 +57628,25 @@ index 164e9ad174..6950682a94 100644 /* free ether device */ rte_eth_dev_release_port(eth_dev); -@@ -256,15 +255,19 @@ static int hn_dev_info_get(struct rte_eth_dev *dev, +@@ -202,7 +201,7 @@ static int hn_parse_args(const struct rte_eth_dev *dev) + */ + int + hn_dev_link_update(struct rte_eth_dev *dev, +- int wait_to_complete) ++ int wait_to_complete __rte_unused) + { + struct hn_data *hv = dev->data->dev_private; + struct rte_eth_link link, old; +@@ -216,8 +215,6 @@ hn_dev_link_update(struct rte_eth_dev *dev, + + hn_rndis_get_linkspeed(hv); + +- hn_vf_link_update(dev, wait_to_complete); +- + link = (struct rte_eth_link) { + .link_duplex = ETH_LINK_FULL_DUPLEX, + .link_autoneg = ETH_LINK_SPEED_FIXED, +@@ -256,15 +253,19 @@ static int hn_dev_info_get(struct rte_eth_dev *dev, dev_info->max_rx_queues = hv->max_queues; dev_info->max_tx_queues = hv->max_queues; @@ -28847,11 +57655,11 @@ index 164e9ad174..6950682a94 100644 - return rc; + dev_info->tx_desc_lim.nb_min = 1; + dev_info->tx_desc_lim.nb_max = 4096; -+ -+ if (rte_eal_process_type() != RTE_PROC_PRIMARY) -+ return 0; - rc = hn_vf_info_get(hv, dev_info); ++ if (rte_eal_process_type() != RTE_PROC_PRIMARY) ++ return 0; ++ + /* fills in rx and tx offload capability */ + rc = hn_rndis_get_offload(hv, dev_info); if (rc != 0) @@ -28863,7 +57671,7 @@ index 164e9ad174..6950682a94 100644 } static int hn_rss_reta_update(struct rte_eth_dev *dev, -@@ -291,6 +294,13 @@ static int hn_rss_reta_update(struct rte_eth_dev *dev, +@@ -291,6 +292,13 @@ static int hn_rss_reta_update(struct rte_eth_dev *dev, hv->rss_ind[i] = reta_conf[idx].reta[shift]; } @@ -28877,7 +57685,7 @@ index 164e9ad174..6950682a94 100644 err = hn_rndis_conf_rss(hv, 0); if (err) { PMD_DRV_LOG(NOTICE, -@@ -366,14 +376,15 @@ static int hn_rss_hash_update(struct rte_eth_dev *dev, +@@ -366,14 +374,15 @@ static int hn_rss_hash_update(struct rte_eth_dev *dev, hn_rss_hash_init(hv, rss_conf); @@ -28899,7 +57707,7 @@ index 164e9ad174..6950682a94 100644 return hn_vf_rss_hash_update(dev, rss_conf); } -@@ -565,7 +576,7 @@ static int hn_dev_configure(struct rte_eth_dev *dev) +@@ -565,7 +574,7 @@ static int hn_dev_configure(struct rte_eth_dev *dev) dev->data->nb_tx_queues); for (i = 0; i < NDIS_HASH_INDCNT; i++) @@ -28908,7 +57716,7 @@ index 164e9ad174..6950682a94 100644 hn_rss_hash_init(hv, rss_conf); -@@ -578,12 +589,21 @@ static int hn_dev_configure(struct rte_eth_dev *dev) +@@ -578,12 +587,21 @@ static int hn_dev_configure(struct rte_eth_dev *dev) return err; } @@ -28932,7 +57740,7 @@ index 164e9ad174..6950682a94 100644 } return hn_vf_configure(dev, dev_conf); -@@ -807,6 +827,10 @@ hn_dev_start(struct rte_eth_dev *dev) +@@ -807,6 +825,10 @@ hn_dev_start(struct rte_eth_dev *dev) if (error) hn_rndis_set_rxfilter(hv, 0); @@ -28943,7 +57751,7 @@ index 164e9ad174..6950682a94 100644 return error; } -@@ -921,8 +945,14 @@ eth_hn_dev_init(struct rte_eth_dev *eth_dev) +@@ -921,8 +943,14 @@ eth_hn_dev_init(struct rte_eth_dev *eth_dev) if (rte_eal_process_type() != RTE_PROC_PRIMARY) return 0; @@ -28960,7 +57768,16 @@ index 164e9ad174..6950682a94 100644 hv->vmbus = vmbus; hv->rxbuf_res = &vmbus->resource[HV_RECV_BUF_MAP]; -@@ -962,11 +992,11 @@ eth_hn_dev_init(struct rte_eth_dev *eth_dev) +@@ -930,7 +958,7 @@ eth_hn_dev_init(struct rte_eth_dev *eth_dev) + hv->port_id = eth_dev->data->port_id; + hv->latency = HN_CHAN_LATENCY_NS; + hv->max_queues = 1; +- rte_spinlock_init(&hv->vf_lock); ++ rte_rwlock_init(&hv->vf_lock); + hv->vf_port = HN_INVALID_PORT; + + err = hn_parse_args(eth_dev); +@@ -962,11 +990,11 @@ eth_hn_dev_init(struct rte_eth_dev *eth_dev) if (err) goto failed; @@ -28974,7 +57791,7 @@ index 164e9ad174..6950682a94 100644 if (err) goto failed; -@@ -998,7 +1028,7 @@ eth_hn_dev_init(struct rte_eth_dev *eth_dev) +@@ -998,7 +1026,7 @@ eth_hn_dev_init(struct rte_eth_dev *eth_dev) failed: PMD_INIT_LOG(NOTICE, "device init failed"); @@ -28983,7 +57800,7 @@ index 164e9ad174..6950682a94 100644 hn_detach(hv); return err; } -@@ -1022,7 +1052,7 @@ eth_hn_dev_uninit(struct rte_eth_dev *eth_dev) +@@ -1022,7 +1050,7 @@ eth_hn_dev_uninit(struct rte_eth_dev *eth_dev) eth_dev->rx_pkt_burst = NULL; hn_detach(hv); @@ -28993,7 +57810,7 @@ index 164e9ad174..6950682a94 100644 rte_free(hv->primary); ret = rte_eth_dev_owner_delete(hv->owner.id); diff --git a/dpdk/drivers/net/netvsc/hn_nvs.c b/dpdk/drivers/net/netvsc/hn_nvs.c -index 6b518685ab..477202b2a0 100644 +index 6b518685ab..03b6cc1551 100644 --- a/dpdk/drivers/net/netvsc/hn_nvs.c +++ b/dpdk/drivers/net/netvsc/hn_nvs.c @@ -54,7 +54,7 @@ static int hn_nvs_req_send(struct hn_data *hv, @@ -29022,7 +57839,7 @@ index 6b518685ab..477202b2a0 100644 if (ret == -EAGAIN) { rte_delay_us(HN_CHAN_INTERVAL_US); goto retry; -@@ -88,7 +89,20 @@ hn_nvs_execute(struct hn_data *hv, +@@ -88,7 +89,24 @@ hn_nvs_execute(struct hn_data *hv, return ret; } @@ -29034,16 +57851,20 @@ index 6b518685ab..477202b2a0 100644 hdr = (struct hn_nvs_hdr *)buffer; + + /* Silently drop received packets while waiting for response */ -+ if (hdr->type == NVS_TYPE_RNDIS) { ++ switch (hdr->type) { ++ case NVS_TYPE_RNDIS: + hn_nvs_ack_rxbuf(chan, xactid); -+ --hv->rxbuf_outstanding; ++ /* fallthrough */ ++ ++ case NVS_TYPE_TXTBL_NOTE: ++ PMD_DRV_LOG(DEBUG, "discard packet type 0x%x", hdr->type); + goto retry; + } + if (hdr->type != type) { PMD_DRV_LOG(ERR, "unexpected NVS resp %#x, expect %#x", hdr->type, type); -@@ -108,6 +122,29 @@ hn_nvs_execute(struct hn_data *hv, +@@ -108,6 +126,29 @@ hn_nvs_execute(struct hn_data *hv, return 0; } @@ -29073,6 +57894,33 @@ index 6b518685ab..477202b2a0 100644 static int hn_nvs_doinit(struct hn_data *hv, uint32_t nvs_ver) { +@@ -187,9 +228,15 @@ hn_nvs_conn_rxbuf(struct hn_data *hv) + resp.nvs_sect[0].slotcnt); + hv->rxbuf_section_cnt = resp.nvs_sect[0].slotcnt; + +- hv->rxbuf_info = rte_calloc("HN_RXBUF_INFO", hv->rxbuf_section_cnt, +- sizeof(*hv->rxbuf_info), RTE_CACHE_LINE_SIZE); +- if (!hv->rxbuf_info) { ++ /* ++ * Pimary queue's rxbuf_info is not allocated at creation time. ++ * Now we can allocate it after we figure out the slotcnt. ++ */ ++ hv->primary->rxbuf_info = rte_calloc("HN_RXBUF_INFO", ++ hv->rxbuf_section_cnt, ++ sizeof(*hv->primary->rxbuf_info), ++ RTE_CACHE_LINE_SIZE); ++ if (!hv->primary->rxbuf_info) { + PMD_DRV_LOG(ERR, + "could not allocate rxbuf info"); + return -ENOMEM; +@@ -219,7 +266,6 @@ hn_nvs_disconn_rxbuf(struct hn_data *hv) + error); + } + +- rte_free(hv->rxbuf_info); + /* + * Linger long enough for NVS to disconnect RXBUF. + */ diff --git a/dpdk/drivers/net/netvsc/hn_nvs.h b/dpdk/drivers/net/netvsc/hn_nvs.h index 2563fd8d86..015839e364 100644 --- a/dpdk/drivers/net/netvsc/hn_nvs.h @@ -29086,8 +57934,88 @@ index 2563fd8d86..015839e364 100644 */ #define NVS_STATUS_OK 1 #define NVS_STATUS_FAILED 2 +diff --git a/dpdk/drivers/net/netvsc/hn_rndis.c b/dpdk/drivers/net/netvsc/hn_rndis.c +index 2b4714042e..6a976ce5e8 100644 +--- a/dpdk/drivers/net/netvsc/hn_rndis.c ++++ b/dpdk/drivers/net/netvsc/hn_rndis.c +@@ -10,6 +10,7 @@ + #include + #include + #include ++#include + + #include + #include +@@ -33,6 +34,9 @@ + #include "hn_rndis.h" + #include "ndis.h" + ++#define RNDIS_TIMEOUT_SEC 5 ++#define RNDIS_DELAY_MS 10 ++ + #define HN_RNDIS_XFER_SIZE 0x4000 + + #define HN_NDIS_TXCSUM_CAP_IP4 \ +@@ -272,7 +276,7 @@ static int hn_nvs_send_rndis_ctrl(struct vmbus_channel *chan, + sg.len = reqlen; + + if (sg.ofs + reqlen > PAGE_SIZE) { +- PMD_DRV_LOG(ERR, "RNDIS request crosses page bounary"); ++ PMD_DRV_LOG(ERR, "RNDIS request crosses page boundary"); + return -EINVAL; + } + +@@ -348,7 +352,7 @@ void hn_rndis_receive_response(struct hn_data *hv, + rte_smp_wmb(); + + if (rte_atomic32_cmpset(&hv->rndis_pending, hdr->rid, 0) == 0) { +- PMD_DRV_LOG(ERR, ++ PMD_DRV_LOG(NOTICE, + "received id %#x pending id %#x", + hdr->rid, (uint32_t)hv->rndis_pending); + } +@@ -371,6 +375,11 @@ static int hn_rndis_exec1(struct hn_data *hv, + return -EIO; + } + ++ if (rid == 0) { ++ PMD_DRV_LOG(ERR, "Invalid request id"); ++ return -EINVAL; ++ } ++ + if (comp != NULL && + rte_atomic32_cmpset(&hv->rndis_pending, 0, rid) == 0) { + PMD_DRV_LOG(ERR, +@@ -385,9 +394,26 @@ static int hn_rndis_exec1(struct hn_data *hv, + } + + if (comp) { ++ time_t start = time(NULL); ++ + /* Poll primary channel until response received */ +- while (hv->rndis_pending == rid) ++ while (hv->rndis_pending == rid) { ++ if (hv->closed) ++ return -ENETDOWN; ++ ++ if (time(NULL) - start > RNDIS_TIMEOUT_SEC) { ++ PMD_DRV_LOG(ERR, ++ "RNDIS response timed out"); ++ ++ rte_atomic32_cmpset(&hv->rndis_pending, rid, 0); ++ return -ETIMEDOUT; ++ } ++ ++ if (rte_vmbus_chan_rx_empty(hv->primary->chan)) ++ rte_delay_ms(RNDIS_DELAY_MS); ++ + hn_process_events(hv, 0, 1); ++ } + + memcpy(comp, hv->rndis_resp, comp_len); + } diff --git a/dpdk/drivers/net/netvsc/hn_rxtx.c b/dpdk/drivers/net/netvsc/hn_rxtx.c -index 7212780c15..19f00a0528 100644 +index 7212780c15..af702dafe6 100644 --- a/dpdk/drivers/net/netvsc/hn_rxtx.c +++ b/dpdk/drivers/net/netvsc/hn_rxtx.c @@ -18,6 +18,7 @@ @@ -29098,6 +58026,15 @@ index 7212780c15..19f00a0528 100644 #include #include #include +@@ -41,7 +42,7 @@ + #define HN_TXD_CACHE_SIZE 32 /* per cpu tx_descriptor pool cache */ + #define HN_TXCOPY_THRESHOLD 512 + +-#define HN_RXCOPY_THRESHOLD 256 ++#define HN_RXCOPY_THRESHOLD UINT_MAX + #define HN_RXQ_EVENT_DEFAULT 2048 + + struct hn_rxinfo { @@ -83,7 +84,7 @@ struct hn_txdesc { struct rte_mbuf *m; @@ -29122,7 +58059,7 @@ index 7212780c15..19f00a0528 100644 static void hn_update_packet_stats(struct hn_stats *stats, const struct rte_mbuf *m) -@@ -150,63 +153,77 @@ hn_rndis_pktmsg_offset(uint32_t ofs) +@@ -150,63 +153,79 @@ hn_rndis_pktmsg_offset(uint32_t ofs) static void hn_txd_init(struct rte_mempool *mp __rte_unused, void *opaque, void *obj, unsigned int idx) { @@ -29143,8 +58080,8 @@ index 7212780c15..19f00a0528 100644 - txd->rndis_pkt = pkt; + txd->queue_id = txq->queue_id; + txd->chim_index = NVS_CHIM_IDX_INVALID; -+ txd->rndis_pkt = (struct rndis_packet_msg *)(char *)txq->tx_rndis -+ + idx * HN_RNDIS_PKT_ALIGNED; ++ txd->rndis_pkt = (struct rndis_packet_msg *)((char *)txq->tx_rndis ++ + idx * HN_RNDIS_PKT_ALIGNED); } -/* @@ -29217,11 +58154,13 @@ index 7212780c15..19f00a0528 100644 +static uint32_t hn_chim_alloc(struct hn_data *hv) +{ + uint32_t index = NVS_CHIM_IDX_INVALID; -+ uint64_t slab; ++ uint64_t slab = 0; + + rte_spinlock_lock(&hv->chim_lock); -+ if (rte_bitmap_scan(hv->chim_bmap, &index, &slab)) ++ if (rte_bitmap_scan(hv->chim_bmap, &index, &slab)) { ++ index += rte_bsf64(slab); + rte_bitmap_clear(hv->chim_bmap, index); ++ } + rte_spinlock_unlock(&hv->chim_lock); + + return index; @@ -29238,7 +58177,7 @@ index 7212780c15..19f00a0528 100644 } } -@@ -220,15 +237,16 @@ static void hn_reset_txagg(struct hn_tx_queue *txq) +@@ -220,18 +239,33 @@ static void hn_reset_txagg(struct hn_tx_queue *txq) int hn_dev_tx_queue_setup(struct rte_eth_dev *dev, @@ -29257,16 +58196,11 @@ index 7212780c15..19f00a0528 100644 PMD_INIT_FUNC_TRACE(); -@@ -244,14 +262,42 @@ hn_dev_tx_queue_setup(struct rte_eth_dev *dev, - - tx_free_thresh = tx_conf->tx_free_thresh; - if (tx_free_thresh == 0) -- tx_free_thresh = RTE_MIN(hv->chim_cnt / 4, ++ tx_free_thresh = tx_conf->tx_free_thresh; ++ if (tx_free_thresh == 0) + tx_free_thresh = RTE_MIN(nb_desc / 4, - DEFAULT_TX_FREE_THRESH); - -- if (tx_free_thresh >= hv->chim_cnt - 3) -- tx_free_thresh = hv->chim_cnt - 3; ++ DEFAULT_TX_FREE_THRESH); ++ + if (tx_free_thresh + 3 >= nb_desc) { + PMD_INIT_LOG(ERR, + "tx_free_thresh must be less than the number of TX entries minus 3(%u)." @@ -29275,19 +58209,38 @@ index 7212780c15..19f00a0528 100644 + tx_free_thresh, dev->data->port_id, queue_idx); + return -EINVAL; + } ++ + txq = rte_zmalloc_socket("HN_TXQ", sizeof(*txq), RTE_CACHE_LINE_SIZE, + socket_id); + if (!txq) +@@ -241,16 +275,34 @@ hn_dev_tx_queue_setup(struct rte_eth_dev *dev, + txq->chan = hv->channels[queue_idx]; + txq->port_id = dev->data->port_id; + txq->queue_id = queue_idx; ++ txq->free_thresh = tx_free_thresh; - txq->free_thresh = tx_free_thresh; - +- tx_free_thresh = tx_conf->tx_free_thresh; +- if (tx_free_thresh == 0) +- tx_free_thresh = RTE_MIN(hv->chim_cnt / 4, +- DEFAULT_TX_FREE_THRESH); + snprintf(name, sizeof(name), + "hn_txd_%u_%u", dev->data->port_id, queue_idx); -+ + +- if (tx_free_thresh >= hv->chim_cnt - 3) +- tx_free_thresh = hv->chim_cnt - 3; + PMD_INIT_LOG(DEBUG, "TX descriptor pool %s n=%u size=%zu", + name, nb_desc, sizeof(struct hn_txdesc)); -+ -+ txq->tx_rndis = rte_calloc("hn_txq_rndis", nb_desc, -+ HN_RNDIS_PKT_ALIGNED, RTE_CACHE_LINE_SIZE); -+ if (txq->tx_rndis == NULL) + +- txq->free_thresh = tx_free_thresh; ++ txq->tx_rndis_mz = rte_memzone_reserve_aligned(name, ++ nb_desc * HN_RNDIS_PKT_ALIGNED, rte_socket_id(), ++ RTE_MEMZONE_IOVA_CONTIG, HN_RNDIS_PKT_ALIGNED); ++ if (!txq->tx_rndis_mz) { ++ err = -rte_errno; + goto error; ++ } ++ txq->tx_rndis = txq->tx_rndis_mz->addr; ++ txq->tx_rndis_iova = txq->tx_rndis_mz->iova; + + txq->txdesc_pool = rte_mempool_create(name, nb_desc, + sizeof(struct hn_txdesc), @@ -29299,11 +58252,10 @@ index 7212780c15..19f00a0528 100644 + "mempool %s create failed: %d", name, rte_errno); + goto error; + } -+ + txq->agg_szmax = RTE_MIN(hv->chim_szmax, hv->rndis_agg_size); txq->agg_pktmax = hv->rndis_agg_pkts; - txq->agg_align = hv->rndis_agg_align; -@@ -260,31 +306,57 @@ hn_dev_tx_queue_setup(struct rte_eth_dev *dev, +@@ -260,31 +312,57 @@ hn_dev_tx_queue_setup(struct rte_eth_dev *dev, err = hn_vf_tx_queue_setup(dev, queue_idx, nb_desc, socket_id, tx_conf); @@ -29320,7 +58272,7 @@ index 7212780c15..19f00a0528 100644 +error: + if (txq->txdesc_pool) + rte_mempool_free(txq->txdesc_pool); -+ rte_free(txq->tx_rndis); ++ rte_memzone_free(txq->tx_rndis_mz); + rte_free(txq); + return err; +} @@ -29367,11 +58319,11 @@ index 7212780c15..19f00a0528 100644 + if (txq->txdesc_pool) + rte_mempool_free(txq->txdesc_pool); -+ rte_free(txq->tx_rndis); ++ rte_memzone_free(txq->tx_rndis_mz); rte_free(txq); } -@@ -292,6 +364,7 @@ static void +@@ -292,6 +370,7 @@ static void hn_nvs_send_completed(struct rte_eth_dev *dev, uint16_t queue_id, unsigned long xactid, const struct hn_nvs_rndis_ack *ack) { @@ -29379,13 +58331,15 @@ index 7212780c15..19f00a0528 100644 struct hn_txdesc *txd = (struct hn_txdesc *)xactid; struct hn_tx_queue *txq; -@@ -312,9 +385,11 @@ hn_nvs_send_completed(struct rte_eth_dev *dev, uint16_t queue_id, +@@ -312,9 +391,13 @@ hn_nvs_send_completed(struct rte_eth_dev *dev, uint16_t queue_id, ++txq->stats.errors; } - rte_pktmbuf_free(txd->m); -+ if (txd->chim_index != NVS_CHIM_IDX_INVALID) ++ if (txd->chim_index != NVS_CHIM_IDX_INVALID) { + hn_chim_free(hv, txd->chim_index); ++ txd->chim_index = NVS_CHIM_IDX_INVALID; ++ } - rte_mempool_put(txq->hv->tx_pool, txd); + rte_pktmbuf_free(txd->m); @@ -29393,7 +58347,143 @@ index 7212780c15..19f00a0528 100644 } /* Handle transmit completion events */ -@@ -894,10 +969,6 @@ uint32_t hn_process_events(struct hn_data *hv, uint16_t queue_id, +@@ -413,35 +496,24 @@ hn_rndis_rxinfo(const void *info_data, unsigned int info_dlen, + return 0; + } + +-/* +- * Ack the consumed RXBUF associated w/ this channel packet, +- * so that this RXBUF can be recycled by the hypervisor. +- */ +-static void hn_rx_buf_release(struct hn_rx_bufinfo *rxb) +-{ +- struct rte_mbuf_ext_shared_info *shinfo = &rxb->shinfo; +- struct hn_data *hv = rxb->hv; +- +- if (rte_mbuf_ext_refcnt_update(shinfo, -1) == 0) { +- hn_nvs_ack_rxbuf(rxb->chan, rxb->xactid); +- --hv->rxbuf_outstanding; +- } +-} +- + static void hn_rx_buf_free_cb(void *buf __rte_unused, void *opaque) + { +- hn_rx_buf_release(opaque); ++ struct hn_rx_bufinfo *rxb = opaque; ++ struct hn_rx_queue *rxq = rxb->rxq; ++ ++ rte_atomic32_dec(&rxq->rxbuf_outstanding); ++ hn_nvs_ack_rxbuf(rxb->chan, rxb->xactid); + } + +-static struct hn_rx_bufinfo *hn_rx_buf_init(const struct hn_rx_queue *rxq, ++static struct hn_rx_bufinfo *hn_rx_buf_init(struct hn_rx_queue *rxq, + const struct vmbus_chanpkt_rxbuf *pkt) + { + struct hn_rx_bufinfo *rxb; + +- rxb = rxq->hv->rxbuf_info + pkt->hdr.xactid; ++ rxb = rxq->rxbuf_info + pkt->hdr.xactid; + rxb->chan = rxq->chan; + rxb->xactid = pkt->hdr.xactid; +- rxb->hv = rxq->hv; ++ rxb->rxq = rxq; + + rxb->shinfo.free_cb = hn_rx_buf_free_cb; + rxb->shinfo.fcb_opaque = rxb; +@@ -470,7 +542,8 @@ static void hn_rxpkt(struct hn_rx_queue *rxq, struct hn_rx_bufinfo *rxb, + * some space available in receive area for later packets. + */ + if (dlen >= HN_RXCOPY_THRESHOLD && +- hv->rxbuf_outstanding < hv->rxbuf_section_cnt / 2) { ++ (uint32_t)rte_atomic32_read(&rxq->rxbuf_outstanding) < ++ hv->rxbuf_section_cnt / 2) { + struct rte_mbuf_ext_shared_info *shinfo; + const void *rxbuf; + rte_iova_t iova; +@@ -484,8 +557,9 @@ static void hn_rxpkt(struct hn_rx_queue *rxq, struct hn_rx_bufinfo *rxb, + iova = rte_mem_virt2iova(rxbuf) + RTE_PTR_DIFF(data, rxbuf); + shinfo = &rxb->shinfo; + +- if (rte_mbuf_ext_refcnt_update(shinfo, 1) == 1) +- ++hv->rxbuf_outstanding; ++ /* shinfo is already set to 1 by the caller */ ++ if (rte_mbuf_ext_refcnt_update(shinfo, 1) == 2) ++ rte_atomic32_inc(&rxq->rxbuf_outstanding); + + rte_pktmbuf_attach_extbuf(m, data, iova, + dlen + headroom, shinfo); +@@ -558,7 +632,8 @@ static void hn_rndis_rx_data(struct hn_rx_queue *rxq, + struct hn_rx_bufinfo *rxb, + void *data, uint32_t dlen) + { +- unsigned int data_off, data_len, pktinfo_off, pktinfo_len; ++ unsigned int data_off, data_len; ++ unsigned int pktinfo_off, pktinfo_len; + const struct rndis_packet_msg *pkt = data; + struct hn_rxinfo info = { + .vlan_info = HN_NDIS_VLAN_INFO_INVALID, +@@ -603,7 +678,8 @@ static void hn_rndis_rx_data(struct hn_rx_queue *rxq, + goto error; + } + +- if (unlikely(data_off + data_len > pkt->len)) ++ /* overflow check */ ++ if (data_len > data_len + data_off || data_len + data_off > pkt->len) + goto error; + + if (unlikely(data_len < RTE_ETHER_HDR_LEN)) +@@ -725,7 +801,8 @@ hn_nvs_handle_rxbuf(struct rte_eth_dev *dev, + } + + /* Send ACK now if external mbuf not used */ +- hn_rx_buf_release(rxb); ++ if (rte_mbuf_ext_refcnt_update(&rxb->shinfo, -1) == 0) ++ hn_nvs_ack_rxbuf(rxb->chan, rxb->xactid); + } + + /* +@@ -781,6 +858,23 @@ struct hn_rx_queue *hn_rx_queue_alloc(struct hn_data *hv, + return NULL; + } + ++ /* setup rxbuf_info for non-primary queue */ ++ if (queue_id) { ++ rxq->rxbuf_info = rte_calloc("HN_RXBUF_INFO", ++ hv->rxbuf_section_cnt, ++ sizeof(*rxq->rxbuf_info), ++ RTE_CACHE_LINE_SIZE); ++ ++ if (!rxq->rxbuf_info) { ++ PMD_DRV_LOG(ERR, ++ "Could not allocate rxbuf info for queue %d\n", ++ queue_id); ++ rte_free(rxq->event_buf); ++ rte_free(rxq); ++ return NULL; ++ } ++ } ++ + return rxq; + } + +@@ -835,6 +929,7 @@ hn_dev_rx_queue_setup(struct rte_eth_dev *dev, + + fail: + rte_ring_free(rxq->rx_ring); ++ rte_free(rxq->rxbuf_info); + rte_free(rxq->event_buf); + rte_free(rxq); + return error; +@@ -857,6 +952,7 @@ hn_rx_queue_free(struct hn_rx_queue *rxq, bool keep_primary) + if (keep_primary && rxq == rxq->hv->primary) + return; + ++ rte_free(rxq->rxbuf_info); + rte_free(rxq->event_buf); + rte_free(rxq); + } +@@ -894,10 +990,6 @@ uint32_t hn_process_events(struct hn_data *hv, uint16_t queue_id, rxq = queue_id == 0 ? hv->primary : dev->data->rx_queues[queue_id]; @@ -29404,7 +58494,7 @@ index 7212780c15..19f00a0528 100644 /* * Since channel is shared between Rx and TX queue need to have a lock * since DPDK does not force same CPU to be used for Rx/Tx. -@@ -961,9 +1032,6 @@ uint32_t hn_process_events(struct hn_data *hv, uint16_t queue_id, +@@ -961,9 +1053,6 @@ uint32_t hn_process_events(struct hn_data *hv, uint16_t queue_id, if (tx_limit && tx_done >= tx_limit) break; @@ -29414,7 +58504,7 @@ index 7212780c15..19f00a0528 100644 } if (bytes_read > 0) -@@ -1036,28 +1104,15 @@ static int hn_flush_txagg(struct hn_tx_queue *txq, bool *need_sig) +@@ -1036,28 +1125,15 @@ static int hn_flush_txagg(struct hn_tx_queue *txq, bool *need_sig) return ret; } @@ -29451,7 +58541,7 @@ index 7212780c15..19f00a0528 100644 { struct hn_txdesc *agg_txd = txq->agg_txd; struct rndis_packet_msg *pkt; -@@ -1085,7 +1140,7 @@ hn_try_txagg(struct hn_data *hv, struct hn_tx_queue *txq, uint32_t pktsize) +@@ -1085,7 +1161,7 @@ hn_try_txagg(struct hn_data *hv, struct hn_tx_queue *txq, uint32_t pktsize) } chim = (uint8_t *)pkt + pkt->len; @@ -29460,7 +58550,7 @@ index 7212780c15..19f00a0528 100644 txq->agg_pktleft--; txq->agg_szleft -= pktsize; if (txq->agg_szleft < HN_PKTSIZE_MIN(txq->agg_align)) { -@@ -1095,18 +1150,21 @@ hn_try_txagg(struct hn_data *hv, struct hn_tx_queue *txq, uint32_t pktsize) +@@ -1095,18 +1171,21 @@ hn_try_txagg(struct hn_data *hv, struct hn_tx_queue *txq, uint32_t pktsize) */ txq->agg_pktleft = 0; } @@ -29492,7 +58582,21 @@ index 7212780c15..19f00a0528 100644 txq->agg_prevpkt = chim; return chim; -@@ -1314,7 +1372,7 @@ hn_xmit_pkts(void *ptxq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) +@@ -1282,11 +1361,8 @@ static int hn_xmit_sg(struct hn_tx_queue *txq, + hn_rndis_dump(txd->rndis_pkt); + + /* pass IOVA of rndis header in first segment */ +- addr = rte_malloc_virt2iova(txd->rndis_pkt); +- if (unlikely(addr == RTE_BAD_IOVA)) { +- PMD_DRV_LOG(ERR, "RNDIS transmit can not get iova"); +- return -EINVAL; +- } ++ addr = txq->tx_rndis_iova + ++ ((char *)txd->rndis_pkt - (char *)txq->tx_rndis); + + sg[0].page = addr / PAGE_SIZE; + sg[0].ofs = addr & PAGE_MASK; +@@ -1314,28 +1390,38 @@ hn_xmit_pkts(void *ptxq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) struct hn_data *hv = txq->hv; struct rte_eth_dev *vf_dev; bool need_sig = false; @@ -29501,9 +58605,21 @@ index 7212780c15..19f00a0528 100644 int ret; if (unlikely(hv->closed)) -@@ -1329,13 +1387,19 @@ hn_xmit_pkts(void *ptxq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) - return (*vf_dev->tx_pkt_burst)(sub_q, tx_pkts, nb_pkts); + return 0; + + /* Transmit over VF if present and up */ ++ rte_rwlock_read_lock(&hv->vf_lock); + vf_dev = hn_get_vf_dev(hv); + + if (vf_dev && vf_dev->data->dev_started) { + void *sub_q = vf_dev->data->tx_queues[queue_id]; + +- return (*vf_dev->tx_pkt_burst)(sub_q, tx_pkts, nb_pkts); ++ nb_tx = (*vf_dev->tx_pkt_burst)(sub_q, tx_pkts, nb_pkts); ++ rte_rwlock_read_unlock(&hv->vf_lock); ++ return nb_tx; } ++ rte_rwlock_read_unlock(&hv->vf_lock); - if (rte_mempool_avail_count(hv->tx_pool) <= txq->free_thresh) + avail = rte_mempool_avail_count(txq->txdesc_pool); @@ -29522,7 +58638,7 @@ index 7212780c15..19f00a0528 100644 /* For small packets aggregate them in chimney buffer */ if (m->pkt_len < HN_TXCOPY_THRESHOLD && pkt_size <= txq->agg_szmax) { -@@ -1346,7 +1410,8 @@ hn_xmit_pkts(void *ptxq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) +@@ -1346,7 +1432,8 @@ hn_xmit_pkts(void *ptxq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) goto fail; } @@ -29532,7 +58648,7 @@ index 7212780c15..19f00a0528 100644 if (unlikely(!pkt)) break; -@@ -1360,21 +1425,13 @@ hn_xmit_pkts(void *ptxq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) +@@ -1360,21 +1447,13 @@ hn_xmit_pkts(void *ptxq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) hn_flush_txagg(txq, &need_sig)) goto fail; } else { @@ -29558,7 +58674,7 @@ index 7212780c15..19f00a0528 100644 ++txd->packets; hn_encap(pkt, queue_id, m); -@@ -1383,7 +1440,7 @@ hn_xmit_pkts(void *ptxq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) +@@ -1383,7 +1462,7 @@ hn_xmit_pkts(void *ptxq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) if (unlikely(ret != 0)) { PMD_TX_LOG(NOTICE, "sg send failed: %d", ret); ++txq->stats.errors; @@ -29567,20 +58683,68 @@ index 7212780c15..19f00a0528 100644 goto fail; } } +@@ -1442,10 +1521,12 @@ hn_recv_pkts(void *prxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) + (void **)rx_pkts, nb_pkts, NULL); + + /* If VF is available, check that as well */ ++ rte_rwlock_read_lock(&hv->vf_lock); + if (vf_dev && vf_dev->data->dev_started) + nb_rcv += hn_recv_vf(vf_dev->data->port_id, rxq, + rx_pkts + nb_rcv, nb_pkts - nb_rcv); + ++ rte_rwlock_read_unlock(&hv->vf_lock); + return nb_rcv; + } + diff --git a/dpdk/drivers/net/netvsc/hn_var.h b/dpdk/drivers/net/netvsc/hn_var.h -index 05bc492511..b4c6171737 100644 +index 05bc492511..9814113a0c 100644 --- a/dpdk/drivers/net/netvsc/hn_var.h +++ b/dpdk/drivers/net/netvsc/hn_var.h -@@ -52,6 +52,8 @@ struct hn_tx_queue { +@@ -52,6 +52,10 @@ struct hn_tx_queue { uint16_t port_id; uint16_t queue_id; uint32_t free_thresh; + struct rte_mempool *txdesc_pool; ++ const struct rte_memzone *tx_rndis_mz; + void *tx_rndis; ++ rte_iova_t tx_rndis_iova; /* Applied packet transmission aggregation limits. */ uint32_t agg_szmax; -@@ -115,8 +117,10 @@ struct hn_data { +@@ -80,13 +84,15 @@ struct hn_rx_queue { + struct hn_stats stats; + + void *event_buf; ++ struct hn_rx_bufinfo *rxbuf_info; ++ rte_atomic32_t rxbuf_outstanding; + }; + + + /* multi-packet data from host */ + struct hn_rx_bufinfo { + struct vmbus_channel *chan; +- struct hn_data *hv; ++ struct hn_rx_queue *rxq; + uint64_t xactid; + struct rte_mbuf_ext_shared_info shinfo; + } __rte_cache_aligned; +@@ -96,7 +102,7 @@ struct hn_rx_bufinfo { + struct hn_data { + struct rte_vmbus_device *vmbus; + struct hn_rx_queue *primary; +- rte_spinlock_t vf_lock; ++ rte_rwlock_t vf_lock; + uint16_t port_id; + uint16_t vf_port; + +@@ -108,15 +114,15 @@ struct hn_data { + uint32_t link_speed; + + struct rte_mem_resource *rxbuf_res; /* UIO resource for Rx */ +- struct hn_rx_bufinfo *rxbuf_info; + uint32_t rxbuf_section_cnt; /* # of Rx sections */ +- volatile uint32_t rxbuf_outstanding; + uint16_t max_queues; /* Max available queues */ uint16_t num_queues; uint64_t rss_offloads; @@ -29592,16 +58756,18 @@ index 05bc492511..b4c6171737 100644 uint32_t chim_szmax; /* Max size per buffer */ uint32_t chim_cnt; /* Max packets per buffer */ -@@ -135,8 +139,6 @@ struct hn_data { +@@ -135,10 +141,7 @@ struct hn_data { uint8_t rss_key[40]; uint16_t rss_ind[128]; - struct rte_ether_addr mac_addr; - struct rte_eth_dev_owner owner; - struct rte_intr_handle vf_intr; +- struct rte_intr_handle vf_intr; -@@ -157,8 +159,8 @@ uint16_t hn_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, + struct vmbus_channel *channels[HN_MAX_CHANNELS]; + }; +@@ -157,8 +160,8 @@ uint16_t hn_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t hn_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts); @@ -29612,14 +58778,117 @@ index 05bc492511..b4c6171737 100644 int hn_dev_link_update(struct rte_eth_dev *dev, int wait); int hn_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, uint16_t nb_desc, unsigned int socket_id, +@@ -186,15 +189,15 @@ hn_vf_attached(const struct hn_data *hv) + return hv->vf_port != HN_INVALID_PORT; + } + +-/* Get VF device for existing netvsc device */ ++/* ++ * Get VF device for existing netvsc device ++ * Assumes vf_lock is held. ++ */ + static inline struct rte_eth_dev * + hn_get_vf_dev(const struct hn_data *hv) + { + uint16_t vf_port = hv->vf_port; + +- /* make sure vf_port is loaded */ +- rte_smp_rmb(); +- + if (vf_port == HN_INVALID_PORT) + return NULL; + else +@@ -220,8 +223,6 @@ int hn_vf_mc_addr_list(struct rte_eth_dev *dev, + struct rte_ether_addr *mc_addr_set, + uint32_t nb_mc_addr); + +-int hn_vf_link_update(struct rte_eth_dev *dev, +- int wait_to_complete); + int hn_vf_tx_queue_setup(struct rte_eth_dev *dev, + uint16_t queue_idx, uint16_t nb_desc, + unsigned int socket_id, diff --git a/dpdk/drivers/net/netvsc/hn_vf.c b/dpdk/drivers/net/netvsc/hn_vf.c -index 7a3734cadf..1261b2e2ef 100644 +index 7a3734cadf..f5f15c0462 100644 --- a/dpdk/drivers/net/netvsc/hn_vf.c +++ b/dpdk/drivers/net/netvsc/hn_vf.c -@@ -167,6 +167,17 @@ hn_nvs_handle_vfassoc(struct rte_eth_dev *dev, - hn_vf_remove(hv); +@@ -82,8 +82,6 @@ static int hn_vf_attach(struct hn_data *hv, uint16_t port_id) + + PMD_DRV_LOG(DEBUG, "Attach VF device %u", port_id); + hv->vf_port = port_id; +- rte_smp_wmb(); +- + return 0; + } + +@@ -98,19 +96,9 @@ int hn_vf_add(struct rte_eth_dev *dev, struct hn_data *hv) + return port; + } + +- rte_spinlock_lock(&hv->vf_lock); + err = hn_vf_attach(hv, port); +- +- if (err == 0) { +- dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC; +- hv->vf_intr = (struct rte_intr_handle) { +- .fd = -1, +- .type = RTE_INTR_HANDLE_EXT, +- }; +- dev->intr_handle = &hv->vf_intr; ++ if (err == 0) + hn_nvs_set_datapath(hv, NVS_DATAPATH_VF); +- } +- rte_spinlock_unlock(&hv->vf_lock); + + return err; + } +@@ -119,22 +107,18 @@ int hn_vf_add(struct rte_eth_dev *dev, struct hn_data *hv) + static void hn_vf_remove(struct hn_data *hv) + { + +- rte_spinlock_lock(&hv->vf_lock); +- + if (!hn_vf_attached(hv)) { + PMD_DRV_LOG(ERR, "VF path not active"); + } else { + /* Stop incoming packets from arriving on VF */ + hn_nvs_set_datapath(hv, NVS_DATAPATH_SYNTHETIC); + +- /* Stop transmission over VF */ +- hv->vf_port = HN_INVALID_PORT; +- rte_smp_wmb(); +- + /* Give back ownership */ + rte_eth_dev_owner_unset(hv->vf_port, hv->owner.id); ++ ++ /* Stop transmission over VF */ ++ hv->vf_port = HN_INVALID_PORT; + } +- rte_spinlock_unlock(&hv->vf_lock); } + /* Handle VF association message from host */ +@@ -156,15 +140,27 @@ hn_nvs_handle_vfassoc(struct rte_eth_dev *dev, + vf_assoc->allocated ? "add to" : "remove from", + dev->data->port_id); + ++ rte_rwlock_write_lock(&hv->vf_lock); + hv->vf_present = vf_assoc->allocated; + +- if (dev->state != RTE_ETH_DEV_ATTACHED) +- return; ++ if (dev->state == RTE_ETH_DEV_ATTACHED) { ++ if (vf_assoc->allocated) ++ hn_vf_add(dev, hv); ++ else ++ hn_vf_remove(hv); ++ } ++ rte_rwlock_write_unlock(&hv->vf_lock); ++} + +- if (vf_assoc->allocated) +- hn_vf_add(dev, hv); +- else +- hn_vf_remove(hv); +static void +hn_vf_merge_desc_lim(struct rte_eth_desc_lim *lim, + const struct rte_eth_desc_lim *vf_lim) @@ -29629,12 +58898,10 @@ index 7a3734cadf..1261b2e2ef 100644 + lim->nb_align = RTE_MAX(vf_lim->nb_align, lim->nb_align); + lim->nb_seg_max = RTE_MIN(vf_lim->nb_seg_max, lim->nb_seg_max); + lim->nb_mtu_seg_max = RTE_MIN(vf_lim->nb_seg_max, lim->nb_seg_max); -+} -+ + } + /* - * Merge the info from the VF and synthetic path. - * use the default config of the VF -@@ -196,11 +207,13 @@ static int hn_vf_info_merge(struct rte_eth_dev *vf_dev, +@@ -196,11 +192,13 @@ static int hn_vf_info_merge(struct rte_eth_dev *vf_dev, info->max_tx_queues); info->tx_offload_capa &= vf_info.tx_offload_capa; info->tx_queue_offload_capa &= vf_info.tx_queue_offload_capa; @@ -29648,11 +58915,491 @@ index 7a3734cadf..1261b2e2ef 100644 return 0; } +@@ -210,85 +208,11 @@ int hn_vf_info_get(struct hn_data *hv, struct rte_eth_dev_info *info) + struct rte_eth_dev *vf_dev; + int ret = 0; + +- rte_spinlock_lock(&hv->vf_lock); ++ rte_rwlock_read_lock(&hv->vf_lock); + vf_dev = hn_get_vf_dev(hv); + if (vf_dev) + ret = hn_vf_info_merge(vf_dev, info); +- rte_spinlock_unlock(&hv->vf_lock); +- return ret; +-} +- +-int hn_vf_link_update(struct rte_eth_dev *dev, +- int wait_to_complete) +-{ +- struct hn_data *hv = dev->data->dev_private; +- struct rte_eth_dev *vf_dev; +- int ret = 0; +- +- rte_spinlock_lock(&hv->vf_lock); +- vf_dev = hn_get_vf_dev(hv); +- if (vf_dev && vf_dev->dev_ops->link_update) +- ret = (*vf_dev->dev_ops->link_update)(vf_dev, wait_to_complete); +- rte_spinlock_unlock(&hv->vf_lock); +- +- return ret; +-} +- +-/* called when VF has link state interrupts enabled */ +-static int hn_vf_lsc_event(uint16_t port_id __rte_unused, +- enum rte_eth_event_type event, +- void *cb_arg, void *out __rte_unused) +-{ +- struct rte_eth_dev *dev = cb_arg; +- +- if (event != RTE_ETH_EVENT_INTR_LSC) +- return 0; +- +- /* if link state has changed pass on */ +- if (hn_dev_link_update(dev, 0) == 0) +- return 0; /* no change */ +- +- return _rte_eth_dev_callback_process(dev, +- RTE_ETH_EVENT_INTR_LSC, +- NULL); +-} +- +-static int _hn_vf_configure(struct rte_eth_dev *dev, +- uint16_t vf_port, +- const struct rte_eth_conf *dev_conf) +-{ +- struct rte_eth_conf vf_conf = *dev_conf; +- struct rte_eth_dev *vf_dev; +- int ret; +- +- vf_dev = &rte_eth_devices[vf_port]; +- if (dev_conf->intr_conf.lsc && +- (vf_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)) { +- PMD_DRV_LOG(DEBUG, "enabling LSC for VF %u", +- vf_port); +- vf_conf.intr_conf.lsc = 1; +- } else { +- PMD_DRV_LOG(DEBUG, "disabling LSC for VF %u", +- vf_port); +- vf_conf.intr_conf.lsc = 0; +- } +- +- ret = rte_eth_dev_configure(vf_port, +- dev->data->nb_rx_queues, +- dev->data->nb_tx_queues, +- &vf_conf); +- if (ret) { +- PMD_DRV_LOG(ERR, +- "VF configuration failed: %d", ret); +- } else if (vf_conf.intr_conf.lsc) { +- ret = rte_eth_dev_callback_register(vf_port, +- RTE_ETH_DEV_INTR_LSC, +- hn_vf_lsc_event, dev); +- if (ret) +- PMD_DRV_LOG(ERR, +- "Failed to register LSC callback for VF %u", +- vf_port); +- } ++ rte_rwlock_read_unlock(&hv->vf_lock); + return ret; + } + +@@ -300,12 +224,23 @@ int hn_vf_configure(struct rte_eth_dev *dev, + const struct rte_eth_conf *dev_conf) + { + struct hn_data *hv = dev->data->dev_private; ++ struct rte_eth_conf vf_conf = *dev_conf; + int ret = 0; + +- rte_spinlock_lock(&hv->vf_lock); +- if (hv->vf_port != HN_INVALID_PORT) +- ret = _hn_vf_configure(dev, hv->vf_port, dev_conf); +- rte_spinlock_unlock(&hv->vf_lock); ++ /* link state interrupt does not matter here. */ ++ vf_conf.intr_conf.lsc = 0; ++ ++ rte_rwlock_read_lock(&hv->vf_lock); ++ if (hv->vf_port != HN_INVALID_PORT) { ++ ret = rte_eth_dev_configure(hv->vf_port, ++ dev->data->nb_rx_queues, ++ dev->data->nb_tx_queues, ++ &vf_conf); ++ if (ret != 0) ++ PMD_DRV_LOG(ERR, ++ "VF configuration failed: %d", ret); ++ } ++ rte_rwlock_read_unlock(&hv->vf_lock); + return ret; + } + +@@ -315,11 +250,11 @@ const uint32_t *hn_vf_supported_ptypes(struct rte_eth_dev *dev) + struct rte_eth_dev *vf_dev; + const uint32_t *ptypes = NULL; + +- rte_spinlock_lock(&hv->vf_lock); ++ rte_rwlock_read_lock(&hv->vf_lock); + vf_dev = hn_get_vf_dev(hv); + if (vf_dev && vf_dev->dev_ops->dev_supported_ptypes_get) + ptypes = (*vf_dev->dev_ops->dev_supported_ptypes_get)(vf_dev); +- rte_spinlock_unlock(&hv->vf_lock); ++ rte_rwlock_read_unlock(&hv->vf_lock); + + return ptypes; + } +@@ -330,11 +265,11 @@ int hn_vf_start(struct rte_eth_dev *dev) + struct rte_eth_dev *vf_dev; + int ret = 0; + +- rte_spinlock_lock(&hv->vf_lock); ++ rte_rwlock_read_lock(&hv->vf_lock); + vf_dev = hn_get_vf_dev(hv); + if (vf_dev) + ret = rte_eth_dev_start(vf_dev->data->port_id); +- rte_spinlock_unlock(&hv->vf_lock); ++ rte_rwlock_read_unlock(&hv->vf_lock); + return ret; + } + +@@ -343,11 +278,11 @@ void hn_vf_stop(struct rte_eth_dev *dev) + struct hn_data *hv = dev->data->dev_private; + struct rte_eth_dev *vf_dev; + +- rte_spinlock_lock(&hv->vf_lock); ++ rte_rwlock_read_lock(&hv->vf_lock); + vf_dev = hn_get_vf_dev(hv); + if (vf_dev) + rte_eth_dev_stop(vf_dev->data->port_id); +- rte_spinlock_unlock(&hv->vf_lock); ++ rte_rwlock_read_unlock(&hv->vf_lock); + } + + /* If VF is present, then cascade configuration down */ +@@ -355,11 +290,11 @@ void hn_vf_stop(struct rte_eth_dev *dev) + { \ + struct hn_data *hv = (dev)->data->dev_private; \ + struct rte_eth_dev *vf_dev; \ +- rte_spinlock_lock(&hv->vf_lock); \ ++ rte_rwlock_read_lock(&hv->vf_lock); \ + vf_dev = hn_get_vf_dev(hv); \ + if (vf_dev) \ + func(vf_dev->data->port_id); \ +- rte_spinlock_unlock(&hv->vf_lock); \ ++ rte_rwlock_read_unlock(&hv->vf_lock); \ + } + + /* If VF is present, then cascade configuration down */ +@@ -368,11 +303,11 @@ void hn_vf_stop(struct rte_eth_dev *dev) + struct hn_data *hv = (dev)->data->dev_private; \ + struct rte_eth_dev *vf_dev; \ + int ret = 0; \ +- rte_spinlock_lock(&hv->vf_lock); \ ++ rte_rwlock_read_lock(&hv->vf_lock); \ + vf_dev = hn_get_vf_dev(hv); \ + if (vf_dev) \ + ret = func(vf_dev->data->port_id); \ +- rte_spinlock_unlock(&hv->vf_lock); \ ++ rte_rwlock_read_unlock(&hv->vf_lock); \ + return ret; \ + } + +@@ -386,13 +321,13 @@ void hn_vf_close(struct rte_eth_dev *dev) + struct hn_data *hv = dev->data->dev_private; + uint16_t vf_port; + +- rte_spinlock_lock(&hv->vf_lock); ++ rte_rwlock_read_lock(&hv->vf_lock); + vf_port = hv->vf_port; + if (vf_port != HN_INVALID_PORT) + rte_eth_dev_close(vf_port); + + hv->vf_port = HN_INVALID_PORT; +- rte_spinlock_unlock(&hv->vf_lock); ++ rte_rwlock_read_unlock(&hv->vf_lock); + } + + int hn_vf_stats_reset(struct rte_eth_dev *dev) +@@ -428,12 +363,12 @@ int hn_vf_mc_addr_list(struct rte_eth_dev *dev, + struct rte_eth_dev *vf_dev; + int ret = 0; + +- rte_spinlock_lock(&hv->vf_lock); ++ rte_rwlock_read_lock(&hv->vf_lock); + vf_dev = hn_get_vf_dev(hv); + if (vf_dev) + ret = rte_eth_dev_set_mc_addr_list(vf_dev->data->port_id, + mc_addr_set, nb_mc_addr); +- rte_spinlock_unlock(&hv->vf_lock); ++ rte_rwlock_read_unlock(&hv->vf_lock); + return ret; + } + +@@ -446,13 +381,13 @@ int hn_vf_tx_queue_setup(struct rte_eth_dev *dev, + struct rte_eth_dev *vf_dev; + int ret = 0; + +- rte_spinlock_lock(&hv->vf_lock); ++ rte_rwlock_read_lock(&hv->vf_lock); + vf_dev = hn_get_vf_dev(hv); + if (vf_dev) + ret = rte_eth_tx_queue_setup(vf_dev->data->port_id, + queue_idx, nb_desc, + socket_id, tx_conf); +- rte_spinlock_unlock(&hv->vf_lock); ++ rte_rwlock_read_unlock(&hv->vf_lock); + return ret; + } + +@@ -460,7 +395,7 @@ void hn_vf_tx_queue_release(struct hn_data *hv, uint16_t queue_id) + { + struct rte_eth_dev *vf_dev; + +- rte_spinlock_lock(&hv->vf_lock); ++ rte_rwlock_read_lock(&hv->vf_lock); + vf_dev = hn_get_vf_dev(hv); + if (vf_dev && vf_dev->dev_ops->tx_queue_release) { + void *subq = vf_dev->data->tx_queues[queue_id]; +@@ -468,7 +403,7 @@ void hn_vf_tx_queue_release(struct hn_data *hv, uint16_t queue_id) + (*vf_dev->dev_ops->tx_queue_release)(subq); + } + +- rte_spinlock_unlock(&hv->vf_lock); ++ rte_rwlock_read_unlock(&hv->vf_lock); + } + + int hn_vf_rx_queue_setup(struct rte_eth_dev *dev, +@@ -481,13 +416,13 @@ int hn_vf_rx_queue_setup(struct rte_eth_dev *dev, + struct rte_eth_dev *vf_dev; + int ret = 0; + +- rte_spinlock_lock(&hv->vf_lock); ++ rte_rwlock_read_lock(&hv->vf_lock); + vf_dev = hn_get_vf_dev(hv); + if (vf_dev) + ret = rte_eth_rx_queue_setup(vf_dev->data->port_id, + queue_idx, nb_desc, + socket_id, rx_conf, mp); +- rte_spinlock_unlock(&hv->vf_lock); ++ rte_rwlock_read_unlock(&hv->vf_lock); + return ret; + } + +@@ -495,14 +430,14 @@ void hn_vf_rx_queue_release(struct hn_data *hv, uint16_t queue_id) + { + struct rte_eth_dev *vf_dev; + +- rte_spinlock_lock(&hv->vf_lock); ++ rte_rwlock_read_lock(&hv->vf_lock); + vf_dev = hn_get_vf_dev(hv); + if (vf_dev && vf_dev->dev_ops->rx_queue_release) { + void *subq = vf_dev->data->rx_queues[queue_id]; + + (*vf_dev->dev_ops->rx_queue_release)(subq); + } +- rte_spinlock_unlock(&hv->vf_lock); ++ rte_rwlock_read_unlock(&hv->vf_lock); + } + + int hn_vf_stats_get(struct rte_eth_dev *dev, +@@ -512,11 +447,11 @@ int hn_vf_stats_get(struct rte_eth_dev *dev, + struct rte_eth_dev *vf_dev; + int ret = 0; + +- rte_spinlock_lock(&hv->vf_lock); ++ rte_rwlock_read_lock(&hv->vf_lock); + vf_dev = hn_get_vf_dev(hv); + if (vf_dev) + ret = rte_eth_stats_get(vf_dev->data->port_id, stats); +- rte_spinlock_unlock(&hv->vf_lock); ++ rte_rwlock_read_unlock(&hv->vf_lock); + return ret; + } + +@@ -528,12 +463,12 @@ int hn_vf_xstats_get_names(struct rte_eth_dev *dev, + struct rte_eth_dev *vf_dev; + int i, count = 0; + +- rte_spinlock_lock(&hv->vf_lock); ++ rte_rwlock_read_lock(&hv->vf_lock); + vf_dev = hn_get_vf_dev(hv); + if (vf_dev) + count = rte_eth_xstats_get_names(vf_dev->data->port_id, + names, n); +- rte_spinlock_unlock(&hv->vf_lock); ++ rte_rwlock_read_unlock(&hv->vf_lock); + + /* add vf_ prefix to xstat names */ + if (names) { +@@ -557,12 +492,12 @@ int hn_vf_xstats_get(struct rte_eth_dev *dev, + struct rte_eth_dev *vf_dev; + int i, count = 0; + +- rte_spinlock_lock(&hv->vf_lock); ++ rte_rwlock_read_lock(&hv->vf_lock); + vf_dev = hn_get_vf_dev(hv); + if (vf_dev) + count = rte_eth_xstats_get(vf_dev->data->port_id, + xstats + offset, n - offset); +- rte_spinlock_unlock(&hv->vf_lock); ++ rte_rwlock_read_unlock(&hv->vf_lock); + + /* Offset id's for VF stats */ + if (count > 0) { +@@ -579,13 +514,13 @@ int hn_vf_xstats_reset(struct rte_eth_dev *dev) + struct rte_eth_dev *vf_dev; + int ret; + +- rte_spinlock_lock(&hv->vf_lock); ++ rte_rwlock_read_lock(&hv->vf_lock); + vf_dev = hn_get_vf_dev(hv); + if (vf_dev) + ret = rte_eth_xstats_reset(vf_dev->data->port_id); + else + ret = -EINVAL; +- rte_spinlock_unlock(&hv->vf_lock); ++ rte_rwlock_read_unlock(&hv->vf_lock); + + return ret; + } +@@ -597,11 +532,11 @@ int hn_vf_rss_hash_update(struct rte_eth_dev *dev, + struct rte_eth_dev *vf_dev; + int ret = 0; + +- rte_spinlock_lock(&hv->vf_lock); ++ rte_rwlock_read_lock(&hv->vf_lock); + vf_dev = hn_get_vf_dev(hv); + if (vf_dev && vf_dev->dev_ops->rss_hash_update) + ret = vf_dev->dev_ops->rss_hash_update(vf_dev, rss_conf); +- rte_spinlock_unlock(&hv->vf_lock); ++ rte_rwlock_read_unlock(&hv->vf_lock); + + return ret; + } +@@ -614,12 +549,12 @@ int hn_vf_reta_hash_update(struct rte_eth_dev *dev, + struct rte_eth_dev *vf_dev; + int ret = 0; + +- rte_spinlock_lock(&hv->vf_lock); ++ rte_rwlock_read_lock(&hv->vf_lock); + vf_dev = hn_get_vf_dev(hv); + if (vf_dev && vf_dev->dev_ops->reta_update) + ret = vf_dev->dev_ops->reta_update(vf_dev, + reta_conf, reta_size); +- rte_spinlock_unlock(&hv->vf_lock); ++ rte_rwlock_read_unlock(&hv->vf_lock); + + return ret; + } +diff --git a/dpdk/drivers/net/nfb/meson.build b/dpdk/drivers/net/nfb/meson.build +index d53e8eca7d..995c44c61c 100644 +--- a/dpdk/drivers/net/nfb/meson.build ++++ b/dpdk/drivers/net/nfb/meson.build +@@ -3,7 +3,7 @@ + # Copyright(c) 2019 Netcope Technologies, a.s. + # All rights reserved. + +-dep = dependency('netcope-common', required: false) ++dep = dependency('netcope-common', required: false, method: 'pkg-config') + reason = 'missing dependency, "libnfb"' + build = dep.found() + ext_deps += dep diff --git a/dpdk/drivers/net/nfp/nfp_net.c b/dpdk/drivers/net/nfp/nfp_net.c -index 3aafa7f80f..b6ff5ecd7d 100644 +index 3aafa7f80f..2aa3b2a103 100644 --- a/dpdk/drivers/net/nfp/nfp_net.c +++ b/dpdk/drivers/net/nfp/nfp_net.c -@@ -3014,7 +3014,7 @@ nfp_cpp_bridge_serve_write(int sockfd, struct nfp_cpp *cpp) +@@ -1250,6 +1250,20 @@ nfp_net_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) + .tx_rs_thresh = DEFAULT_TX_RSBIT_THRESH, + }; + ++ dev_info->rx_desc_lim = (struct rte_eth_desc_lim) { ++ .nb_max = NFP_NET_MAX_RX_DESC, ++ .nb_min = NFP_NET_MIN_RX_DESC, ++ .nb_align = NFP_ALIGN_RING_DESC, ++ }; ++ ++ dev_info->tx_desc_lim = (struct rte_eth_desc_lim) { ++ .nb_max = NFP_NET_MAX_TX_DESC, ++ .nb_min = NFP_NET_MIN_TX_DESC, ++ .nb_align = NFP_ALIGN_RING_DESC, ++ .nb_seg_max = NFP_TX_MAX_SEG, ++ .nb_mtu_seg_max = NFP_TX_MAX_MTU_SEG, ++ }; ++ + dev_info->flow_type_rss_offloads = ETH_RSS_IPV4 | + ETH_RSS_NONFRAG_IPV4_TCP | + ETH_RSS_NONFRAG_IPV4_UDP | +@@ -1487,7 +1501,7 @@ nfp_net_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) + } + + /* switch to jumbo mode if needed */ +- if ((uint32_t)mtu > RTE_ETHER_MAX_LEN) ++ if ((uint32_t)mtu > RTE_ETHER_MTU) + dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME; + else + dev->data->dev_conf.rxmode.offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME; +@@ -1513,15 +1527,17 @@ nfp_net_rx_queue_setup(struct rte_eth_dev *dev, + const struct rte_memzone *tz; + struct nfp_net_rxq *rxq; + struct nfp_net_hw *hw; ++ uint32_t rx_desc_sz; + + hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + PMD_INIT_FUNC_TRACE(); + + /* Validating number of descriptors */ +- if (((nb_desc * sizeof(struct nfp_net_rx_desc)) % 128) != 0 || +- (nb_desc > NFP_NET_MAX_RX_DESC) || +- (nb_desc < NFP_NET_MIN_RX_DESC)) { ++ rx_desc_sz = nb_desc * sizeof(struct nfp_net_rx_desc); ++ if (rx_desc_sz % NFP_ALIGN_RING_DESC != 0 || ++ nb_desc > NFP_NET_MAX_RX_DESC || ++ nb_desc < NFP_NET_MIN_RX_DESC) { + PMD_DRV_LOG(ERR, "Wrong nb_desc value"); + return -EINVAL; + } +@@ -1660,15 +1676,17 @@ nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, + struct nfp_net_txq *txq; + uint16_t tx_free_thresh; + struct nfp_net_hw *hw; ++ uint32_t tx_desc_sz; + + hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + PMD_INIT_FUNC_TRACE(); + + /* Validating number of descriptors */ +- if (((nb_desc * sizeof(struct nfp_net_tx_desc)) % 128) != 0 || +- (nb_desc > NFP_NET_MAX_TX_DESC) || +- (nb_desc < NFP_NET_MIN_TX_DESC)) { ++ tx_desc_sz = nb_desc * sizeof(struct nfp_net_tx_desc); ++ if (tx_desc_sz % NFP_ALIGN_RING_DESC != 0 || ++ nb_desc > NFP_NET_MAX_TX_DESC || ++ nb_desc < NFP_NET_MIN_TX_DESC) { + PMD_DRV_LOG(ERR, "Wrong nb_desc value"); + return -EINVAL; + } +@@ -2353,11 +2371,6 @@ nfp_net_vlan_offload_set(struct rte_eth_dev *dev, int mask) + hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); + new_ctrl = 0; + +- if ((mask & ETH_VLAN_FILTER_OFFLOAD) || +- (mask & ETH_VLAN_EXTEND_OFFLOAD)) +- PMD_DRV_LOG(INFO, "No support for ETH_VLAN_FILTER_OFFLOAD or" +- " ETH_VLAN_EXTEND_OFFLOAD"); +- + /* Enable vlan strip if it is not configured yet */ + if ((mask & ETH_VLAN_STRIP_OFFLOAD) && + !(hw->ctrl & NFP_NET_CFG_CTRL_RXVLAN)) +@@ -2626,6 +2639,9 @@ nfp_net_rss_hash_conf_get(struct rte_eth_dev *dev, + if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV6) + rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_NONFRAG_IPV6_UDP; + ++ /* Propagate current RSS hash functions to caller */ ++ rss_conf->rss_hf = rss_hf; ++ + /* Reading the key size */ + rss_conf->rss_key_len = nn_cfg_readl(hw, NFP_NET_CFG_RSS_KEY_SZ); + +@@ -3014,7 +3030,7 @@ nfp_cpp_bridge_serve_write(int sockfd, struct nfp_cpp *cpp) size_t count, curlen, totlen = 0; int err = 0; @@ -29661,7 +59408,7 @@ index 3aafa7f80f..b6ff5ecd7d 100644 sizeof(off_t), sizeof(size_t)); /* Reading the count param */ -@@ -3033,9 +3033,9 @@ nfp_cpp_bridge_serve_write(int sockfd, struct nfp_cpp *cpp) +@@ -3033,9 +3049,9 @@ nfp_cpp_bridge_serve_write(int sockfd, struct nfp_cpp *cpp) cpp_id = (offset >> 40) << 8; nfp_offset = offset & ((1ull << 40) - 1); @@ -29673,7 +59420,7 @@ index 3aafa7f80f..b6ff5ecd7d 100644 cpp_id, nfp_offset); /* Adjust length if not aligned */ -@@ -3067,12 +3067,12 @@ nfp_cpp_bridge_serve_write(int sockfd, struct nfp_cpp *cpp) +@@ -3067,12 +3083,12 @@ nfp_cpp_bridge_serve_write(int sockfd, struct nfp_cpp *cpp) if (len > sizeof(tmpbuf)) len = sizeof(tmpbuf); @@ -29688,7 +59435,7 @@ index 3aafa7f80f..b6ff5ecd7d 100644 __func__, err, count); nfp_cpp_area_release(area); nfp_cpp_area_free(area); -@@ -3116,7 +3116,7 @@ nfp_cpp_bridge_serve_read(int sockfd, struct nfp_cpp *cpp) +@@ -3116,7 +3132,7 @@ nfp_cpp_bridge_serve_read(int sockfd, struct nfp_cpp *cpp) size_t count, curlen, totlen = 0; int err = 0; @@ -29697,7 +59444,7 @@ index 3aafa7f80f..b6ff5ecd7d 100644 sizeof(off_t), sizeof(size_t)); /* Reading the count param */ -@@ -3135,9 +3135,9 @@ nfp_cpp_bridge_serve_read(int sockfd, struct nfp_cpp *cpp) +@@ -3135,9 +3151,9 @@ nfp_cpp_bridge_serve_read(int sockfd, struct nfp_cpp *cpp) cpp_id = (offset >> 40) << 8; nfp_offset = offset & ((1ull << 40) - 1); @@ -29709,7 +59456,7 @@ index 3aafa7f80f..b6ff5ecd7d 100644 cpp_id, nfp_offset); /* Adjust length if not aligned */ -@@ -3174,13 +3174,13 @@ nfp_cpp_bridge_serve_read(int sockfd, struct nfp_cpp *cpp) +@@ -3174,13 +3190,13 @@ nfp_cpp_bridge_serve_read(int sockfd, struct nfp_cpp *cpp) nfp_cpp_area_free(area); return -EIO; } @@ -29725,7 +59472,7 @@ index 3aafa7f80f..b6ff5ecd7d 100644 __func__, err, count); nfp_cpp_area_release(area); nfp_cpp_area_free(area); -@@ -3451,9 +3451,10 @@ nfp_pf_create_dev(struct rte_pci_device *dev, int port, int ports, +@@ -3451,9 +3467,10 @@ nfp_pf_create_dev(struct rte_pci_device *dev, int port, int ports, probe_failed: rte_free(port_name); /* free ports private data if primary process */ @@ -29738,6 +59485,125 @@ index 3aafa7f80f..b6ff5ecd7d 100644 rte_eth_dev_release_port(eth_dev); return retval; +diff --git a/dpdk/drivers/net/nfp/nfp_net_pmd.h b/dpdk/drivers/net/nfp/nfp_net_pmd.h +index cc1055c49a..466a11aca2 100644 +--- a/dpdk/drivers/net/nfp/nfp_net_pmd.h ++++ b/dpdk/drivers/net/nfp/nfp_net_pmd.h +@@ -33,6 +33,12 @@ struct nfp_net_adapter; + #define NFP_NET_MAX_RX_DESC (32 * 1024) + #define NFP_NET_MIN_RX_DESC 64 + ++/* Descriptor alignment */ ++#define NFP_ALIGN_RING_DESC 128 ++ ++#define NFP_TX_MAX_SEG UINT8_MAX ++#define NFP_TX_MAX_MTU_SEG 8 ++ + /* Bar allocation */ + #define NFP_NET_CRTL_BAR 0 + #define NFP_NET_TX_BAR 2 +diff --git a/dpdk/drivers/net/nfp/nfpcore/nfp_cpp.h b/dpdk/drivers/net/nfp/nfpcore/nfp_cpp.h +index 1427954c17..08d656da14 100644 +--- a/dpdk/drivers/net/nfp/nfpcore/nfp_cpp.h ++++ b/dpdk/drivers/net/nfp/nfpcore/nfp_cpp.h +@@ -170,7 +170,7 @@ void *nfp_cpp_priv(struct nfp_cpp *cpp); + */ + void *nfp_cpp_area_priv(struct nfp_cpp_area *cpp_area); + +-uint32_t __nfp_cpp_model_autodetect(struct nfp_cpp *cpp); ++uint32_t __nfp_cpp_model_autodetect(struct nfp_cpp *cpp, uint32_t *model); + + /* + * NFP CPP core interface for CPP clients. +diff --git a/dpdk/drivers/net/nfp/nfpcore/nfp_cppcore.c b/dpdk/drivers/net/nfp/nfpcore/nfp_cppcore.c +index dec4a8b6d1..6d629430d4 100644 +--- a/dpdk/drivers/net/nfp/nfpcore/nfp_cppcore.c ++++ b/dpdk/drivers/net/nfp/nfpcore/nfp_cppcore.c +@@ -22,8 +22,9 @@ + + #define NFP_PL_DEVICE_ID 0x00000004 + #define NFP_PL_DEVICE_ID_MASK 0xff +- +-#define NFP6000_ARM_GCSR_SOFTMODEL0 0x00400144 ++#define NFP_PL_DEVICE_PART_MASK 0xffff0000 ++#define NFP_PL_DEVICE_MODEL_MASK (NFP_PL_DEVICE_PART_MASK | \ ++ NFP_PL_DEVICE_ID_MASK) + + void + nfp_cpp_priv_set(struct nfp_cpp *cpp, void *priv) +@@ -46,13 +47,18 @@ nfp_cpp_model_set(struct nfp_cpp *cpp, uint32_t model) + uint32_t + nfp_cpp_model(struct nfp_cpp *cpp) + { ++ int err; ++ uint32_t model; ++ + if (!cpp) + return NFP_CPP_MODEL_INVALID; + +- if (cpp->model == 0) +- cpp->model = __nfp_cpp_model_autodetect(cpp); ++ err = __nfp_cpp_model_autodetect(cpp, &model); + +- return cpp->model; ++ if (err < 0) ++ return err; ++ ++ return model; + } + + void +@@ -389,9 +395,6 @@ nfp_xpb_to_cpp(struct nfp_cpp *cpp, uint32_t *xpb_addr) + uint32_t xpb; + int island; + +- if (!NFP_CPP_MODEL_IS_6000(cpp->model)) +- return 0; +- + xpb = NFP_CPP_ID(14, NFP_CPP_ACTION_RW, 0); + + /* +@@ -796,29 +799,21 @@ nfp_cpp_area_fill(struct nfp_cpp_area *area, unsigned long offset, + * as those are model-specific + */ + uint32_t +-__nfp_cpp_model_autodetect(struct nfp_cpp *cpp) ++__nfp_cpp_model_autodetect(struct nfp_cpp *cpp, uint32_t *model) + { +- uint32_t arm_id = NFP_CPP_ID(NFP_CPP_TARGET_ARM, 0, 0); +- uint32_t model = 0; +- +- if (nfp_cpp_readl(cpp, arm_id, NFP6000_ARM_GCSR_SOFTMODEL0, &model)) +- return 0; +- +- if (NFP_CPP_MODEL_IS_6000(model)) { +- uint32_t tmp; +- +- nfp_cpp_model_set(cpp, model); ++ uint32_t reg; ++ int err; + +- /* The PL's PluDeviceID revision code is authoratative */ +- model &= ~0xff; +- if (nfp_xpb_readl(cpp, NFP_XPB_DEVICE(1, 1, 16) + +- NFP_PL_DEVICE_ID, &tmp)) +- return 0; ++ err = nfp_xpb_readl(cpp, NFP_XPB_DEVICE(1, 1, 16) + NFP_PL_DEVICE_ID, ++ ®); ++ if (err < 0) ++ return err; + +- model |= (NFP_PL_DEVICE_ID_MASK & tmp) - 0x10; +- } ++ *model = reg & NFP_PL_DEVICE_MODEL_MASK; ++ if (*model & NFP_PL_DEVICE_ID_MASK) ++ *model -= 0x10; + +- return model; ++ return 0; + } + + /* diff --git a/dpdk/drivers/net/null/rte_eth_null.c b/dpdk/drivers/net/null/rte_eth_null.c index 025b73acb3..beedd5f4b2 100644 --- a/dpdk/drivers/net/null/rte_eth_null.c @@ -29810,6 +59676,46 @@ index a06a2c89c9..e1060fc4ec 100644 endforeach c_args = cflags +diff --git a/dpdk/drivers/net/octeontx/base/octeontx_io.h b/dpdk/drivers/net/octeontx/base/octeontx_io.h +index 04b9ce1910..d0b9cfbc67 100644 +--- a/dpdk/drivers/net/octeontx/base/octeontx_io.h ++++ b/dpdk/drivers/net/octeontx/base/octeontx_io.h +@@ -52,6 +52,11 @@ do { \ + #endif + + #if defined(RTE_ARCH_ARM64) ++#if defined(__ARM_FEATURE_SVE) ++#define __LSE_PREAMBLE " .cpu generic+lse+sve\n" ++#else ++#define __LSE_PREAMBLE " .cpu generic+lse\n" ++#endif + /** + * Perform an atomic fetch-and-add operation. + */ +@@ -61,7 +66,7 @@ octeontx_reg_ldadd_u64(void *addr, int64_t off) + uint64_t old_val; + + __asm__ volatile( +- " .cpu generic+lse\n" ++ __LSE_PREAMBLE + " ldadd %1, %0, [%2]\n" + : "=r" (old_val) : "r" (off), "r" (addr) : "memory"); + +@@ -98,12 +103,13 @@ octeontx_reg_lmtst(void *lmtline_va, void *ioreg_va, const uint64_t cmdbuf[], + + /* LDEOR initiates atomic transfer to I/O device */ + __asm__ volatile( +- " .cpu generic+lse\n" ++ __LSE_PREAMBLE + " ldeor xzr, %0, [%1]\n" + : "=r" (result) : "r" (ioreg_va) : "memory"); + } while (!result); + } + ++#undef __LSE_PREAMBLE + #else + + static inline uint64_t diff --git a/dpdk/drivers/net/octeontx/octeontx_ethdev.c b/dpdk/drivers/net/octeontx/octeontx_ethdev.c index 679803dd4c..e85acdde0a 100644 --- a/dpdk/drivers/net/octeontx/octeontx_ethdev.c @@ -29845,7 +59751,7 @@ index 679803dd4c..e85acdde0a 100644 if (nic) octeontx_port_close(nic); diff --git a/dpdk/drivers/net/octeontx2/otx2_ethdev.c b/dpdk/drivers/net/octeontx2/otx2_ethdev.c -index ed329273dc..102d06b39b 100644 +index ed329273dc..c952373be4 100644 --- a/dpdk/drivers/net/octeontx2/otx2_ethdev.c +++ b/dpdk/drivers/net/octeontx2/otx2_ethdev.c @@ -18,7 +18,8 @@ nix_get_rx_offload_capa(struct otx2_eth_dev *dev) @@ -29879,7 +59785,51 @@ index ed329273dc..102d06b39b 100644 aq->rq.lpb_sizem1 -= 1; /* Expressed in size minus one */ aq->rq.ena = 1; aq->rq.pb_caching = 0x2; /* First cache aligned block to LLC */ -@@ -1114,10 +1112,12 @@ nix_store_queue_cfg_and_then_release(struct rte_eth_dev *eth_dev) +@@ -570,6 +568,9 @@ otx2_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t rq, + } + } + ++ /* Setup scatter mode if needed by jumbo */ ++ otx2_nix_enable_mseg_on_jumbo(rxq); ++ + return 0; + + free_rxq: +@@ -697,6 +698,33 @@ nix_tx_offload_flags(struct rte_eth_dev *eth_dev) + return flags; + } + ++void ++otx2_nix_enable_mseg_on_jumbo(struct otx2_eth_rxq *rxq) ++{ ++ struct rte_pktmbuf_pool_private *mbp_priv; ++ struct rte_eth_dev *eth_dev; ++ struct otx2_eth_dev *dev; ++ uint32_t buffsz; ++ ++ eth_dev = rxq->eth_dev; ++ dev = otx2_eth_pmd_priv(eth_dev); ++ ++ /* Get rx buffer size */ ++ mbp_priv = rte_mempool_get_priv(rxq->pool); ++ buffsz = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM; ++ ++ if (eth_dev->data->dev_conf.rxmode.max_rx_pkt_len > buffsz) { ++ dev->rx_offloads |= DEV_RX_OFFLOAD_SCATTER; ++ dev->tx_offloads |= DEV_TX_OFFLOAD_MULTI_SEGS; ++ ++ /* Setting up the rx[tx]_offload_flags due to change ++ * in rx[tx]_offloads. ++ */ ++ dev->rx_offload_flags |= nix_rx_offload_flags(eth_dev); ++ dev->tx_offload_flags |= nix_tx_offload_flags(eth_dev); ++ } ++} ++ + static int + nix_sq_init(struct otx2_eth_txq *txq) + { +@@ -1114,10 +1142,12 @@ nix_store_queue_cfg_and_then_release(struct rte_eth_dev *eth_dev) txq = (struct otx2_eth_txq **)eth_dev->data->tx_queues; for (i = 0; i < nb_txq; i++) { if (txq[i] == NULL) { @@ -29894,7 +59844,7 @@ index ed329273dc..102d06b39b 100644 otx2_nix_tx_queue_release(txq[i]); eth_dev->data->tx_queues[i] = NULL; } -@@ -1125,10 +1125,12 @@ nix_store_queue_cfg_and_then_release(struct rte_eth_dev *eth_dev) +@@ -1125,10 +1155,12 @@ nix_store_queue_cfg_and_then_release(struct rte_eth_dev *eth_dev) rxq = (struct otx2_eth_rxq **)eth_dev->data->rx_queues; for (i = 0; i < nb_rxq; i++) { if (rxq[i] == NULL) { @@ -29909,7 +59859,20 @@ index ed329273dc..102d06b39b 100644 otx2_nix_rx_queue_release(rxq[i]); eth_dev->data->rx_queues[i] = NULL; } -@@ -1183,6 +1185,8 @@ nix_restore_queue_cfg(struct rte_eth_dev *eth_dev) +@@ -1138,10 +1170,8 @@ nix_store_queue_cfg_and_then_release(struct rte_eth_dev *eth_dev) + return 0; + + fail: +- if (tx_qconf) +- free(tx_qconf); +- if (rx_qconf) +- free(rx_qconf); ++ free(tx_qconf); ++ free(rx_qconf); + + return -ENOMEM; + } +@@ -1183,6 +1213,8 @@ nix_restore_queue_cfg(struct rte_eth_dev *eth_dev) * queues are already setup in port_configure(). */ for (i = 0; i < nb_txq; i++) { @@ -29918,7 +59881,7 @@ index ed329273dc..102d06b39b 100644 rc = otx2_nix_tx_queue_setup(eth_dev, i, tx_qconf[i].nb_desc, tx_qconf[i].socket_id, &tx_qconf[i].conf.tx); -@@ -1198,6 +1202,8 @@ nix_restore_queue_cfg(struct rte_eth_dev *eth_dev) +@@ -1198,6 +1230,8 @@ nix_restore_queue_cfg(struct rte_eth_dev *eth_dev) free(tx_qconf); tx_qconf = NULL; for (i = 0; i < nb_rxq; i++) { @@ -29927,7 +59890,7 @@ index ed329273dc..102d06b39b 100644 rc = otx2_nix_rx_queue_setup(eth_dev, i, rx_qconf[i].nb_desc, rx_qconf[i].socket_id, &rx_qconf[i].conf.rx, -@@ -1641,6 +1647,15 @@ otx2_nix_configure(struct rte_eth_dev *eth_dev) +@@ -1641,6 +1675,15 @@ otx2_nix_configure(struct rte_eth_dev *eth_dev) goto fail_offloads; } @@ -29943,7 +59906,7 @@ index ed329273dc..102d06b39b 100644 rc = nix_lf_switch_header_type_enable(dev); if (rc) { otx2_err("Failed to enable switch type nix_lf rc=%d", rc); -@@ -1714,6 +1729,12 @@ otx2_nix_configure(struct rte_eth_dev *eth_dev) +@@ -1714,6 +1757,12 @@ otx2_nix_configure(struct rte_eth_dev *eth_dev) goto cq_fini; } @@ -29957,10 +59920,19 @@ index ed329273dc..102d06b39b 100644 if (rc < 0) { otx2_err("Failed to install mc address list rc=%d", rc); diff --git a/dpdk/drivers/net/octeontx2/otx2_ethdev.h b/dpdk/drivers/net/octeontx2/otx2_ethdev.h -index 987e7607c4..864356e36c 100644 +index 987e7607c4..6855200fb2 100644 --- a/dpdk/drivers/net/octeontx2/otx2_ethdev.h +++ b/dpdk/drivers/net/octeontx2/otx2_ethdev.h -@@ -192,6 +192,7 @@ struct otx2_eth_qconf { +@@ -50,6 +50,8 @@ + /* ETH_HLEN+ETH_FCS+2*VLAN_HLEN */ + #define NIX_L2_OVERHEAD \ + (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + 8) ++#define NIX_L2_MAX_LEN \ ++ (RTE_ETHER_MTU + NIX_L2_OVERHEAD) + + /* HW config of frame size doesn't include FCS */ + #define NIX_MAX_HW_FRS 9212 +@@ -192,6 +194,7 @@ struct otx2_eth_qconf { void *mempool; uint32_t socket_id; uint16_t nb_desc; @@ -29968,7 +59940,24 @@ index 987e7607c4..864356e36c 100644 }; struct otx2_fc_info { -@@ -438,6 +439,8 @@ int oxt2_nix_register_cq_irqs(struct rte_eth_dev *eth_dev); +@@ -266,6 +269,7 @@ struct otx2_eth_dev { + uint8_t mac_addr[RTE_ETHER_ADDR_LEN]; + uint8_t mkex_pfl_name[MKEX_NAME_LEN]; + uint8_t max_mac_entries; ++ bool dmac_filter_enable; + uint8_t lf_tx_stats; + uint8_t lf_rx_stats; + uint16_t flags; +@@ -422,6 +426,8 @@ int otx2_nix_set_mc_addr_list(struct rte_eth_dev *eth_dev, + /* MTU */ + int otx2_nix_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu); + int otx2_nix_recalc_mtu(struct rte_eth_dev *eth_dev); ++void otx2_nix_enable_mseg_on_jumbo(struct otx2_eth_rxq *rxq); ++ + + /* Link */ + void otx2_nix_toggle_flag_link_cfg(struct otx2_eth_dev *dev, bool set); +@@ -438,6 +444,8 @@ int oxt2_nix_register_cq_irqs(struct rte_eth_dev *eth_dev); void otx2_nix_unregister_irqs(struct rte_eth_dev *eth_dev); void oxt2_nix_unregister_queue_irqs(struct rte_eth_dev *eth_dev); void oxt2_nix_unregister_cq_irqs(struct rte_eth_dev *eth_dev); @@ -29977,7 +59966,7 @@ index 987e7607c4..864356e36c 100644 int otx2_nix_rx_queue_intr_enable(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id); -@@ -504,6 +507,8 @@ int otx2_cgx_mac_addr_set(struct rte_eth_dev *eth_dev, +@@ -504,6 +512,8 @@ int otx2_cgx_mac_addr_set(struct rte_eth_dev *eth_dev, struct rte_ether_addr *addr); /* Flow Control */ @@ -30066,6 +60055,68 @@ index 2256e40b6f..b121488faf 100644 + else + otx2_write64(~0ull, dev->base + NIX_LF_RAS_ENA_W1C); +} +diff --git a/dpdk/drivers/net/octeontx2/otx2_ethdev_ops.c b/dpdk/drivers/net/octeontx2/otx2_ethdev_ops.c +index 8f1635dbab..beb4f58148 100644 +--- a/dpdk/drivers/net/octeontx2/otx2_ethdev_ops.c ++++ b/dpdk/drivers/net/octeontx2/otx2_ethdev_ops.c +@@ -58,7 +58,7 @@ otx2_nix_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu) + if (rc) + return rc; + +- if (frame_size > RTE_ETHER_MAX_LEN) ++ if (frame_size > NIX_L2_MAX_LEN) + dev->rx_offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME; + else + dev->rx_offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME; +@@ -72,22 +72,15 @@ otx2_nix_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu) + int + otx2_nix_recalc_mtu(struct rte_eth_dev *eth_dev) + { +- struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + struct rte_eth_dev_data *data = eth_dev->data; +- struct rte_pktmbuf_pool_private *mbp_priv; + struct otx2_eth_rxq *rxq; +- uint32_t buffsz; + uint16_t mtu; + int rc; + +- /* Get rx buffer size */ + rxq = data->rx_queues[0]; +- mbp_priv = rte_mempool_get_priv(rxq->pool); +- buffsz = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM; + + /* Setup scatter mode if needed by jumbo */ +- if (data->dev_conf.rxmode.max_rx_pkt_len > buffsz) +- dev->rx_offloads |= DEV_RX_OFFLOAD_SCATTER; ++ otx2_nix_enable_mseg_on_jumbo(rxq); + + /* Setup MTU based on max_rx_pkt_len */ + mtu = data->dev_conf.rxmode.max_rx_pkt_len - NIX_L2_OVERHEAD; +@@ -148,8 +141,10 @@ otx2_nix_promisc_enable(struct rte_eth_dev *eth_dev) + int + otx2_nix_promisc_disable(struct rte_eth_dev *eth_dev) + { +- otx2_nix_promisc_config(eth_dev, 0); ++ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); ++ otx2_nix_promisc_config(eth_dev, dev->dmac_filter_enable); + nix_cgx_promisc_config(eth_dev, 0); ++ dev->dmac_filter_enable = false; + + return 0; + } +diff --git a/dpdk/drivers/net/octeontx2/otx2_flow.c b/dpdk/drivers/net/octeontx2/otx2_flow.c +index f1fb9f9884..c9886c9705 100644 +--- a/dpdk/drivers/net/octeontx2/otx2_flow.c ++++ b/dpdk/drivers/net/octeontx2/otx2_flow.c +@@ -269,6 +269,8 @@ flow_program_rss_action(struct rte_eth_dev *eth_dev, + if (rc) + return rc; + ++ flow->npc_action &= (~(0xfULL)); ++ flow->npc_action |= NIX_RX_ACTIONOP_RSS; + flow->npc_action |= + ((uint64_t)(alg_idx & NIX_RSS_ACT_ALG_MASK) << + NIX_RSS_ACT_ALG_OFFSET) | diff --git a/dpdk/drivers/net/octeontx2/otx2_flow_ctrl.c b/dpdk/drivers/net/octeontx2/otx2_flow_ctrl.c index c6d7b1971a..76bf481001 100644 --- a/dpdk/drivers/net/octeontx2/otx2_flow_ctrl.c @@ -30126,6 +60177,35 @@ index c6d7b1971a..76bf481001 100644 +exit: + return rc; +} +diff --git a/dpdk/drivers/net/octeontx2/otx2_flow_parse.c b/dpdk/drivers/net/octeontx2/otx2_flow_parse.c +index 2d9a5857c0..7a7cb29fbd 100644 +--- a/dpdk/drivers/net/octeontx2/otx2_flow_parse.c ++++ b/dpdk/drivers/net/octeontx2/otx2_flow_parse.c +@@ -1034,7 +1034,10 @@ otx2_flow_parse_actions(struct rte_eth_dev *dev, + + set_pf_func: + /* Ideally AF must ensure that correct pf_func is set */ +- flow->npc_action |= (uint64_t)pf_func << 4; ++ if (attr->egress) ++ flow->npc_action |= (uint64_t)pf_func << 48; ++ else ++ flow->npc_action |= (uint64_t)pf_func << 4; + + return 0; + +diff --git a/dpdk/drivers/net/octeontx2/otx2_flow_utils.c b/dpdk/drivers/net/octeontx2/otx2_flow_utils.c +index 14625c9ad1..1adeff2563 100644 +--- a/dpdk/drivers/net/octeontx2/otx2_flow_utils.c ++++ b/dpdk/drivers/net/octeontx2/otx2_flow_utils.c +@@ -940,7 +940,7 @@ otx2_flow_mcam_alloc_and_write(struct rte_flow *flow, struct otx2_mbox *mbox, + req->entry_data.kw[0] |= flow_info->channel; + req->entry_data.kw_mask[0] |= (BIT_ULL(12) - 1); + } else { +- uint16_t pf_func = (flow->npc_action >> 4) & 0xffff; ++ uint16_t pf_func = (flow->npc_action >> 48) & 0xffff; + + pf_func = htons(pf_func); + req->entry_data.kw[0] |= ((uint64_t)pf_func << 32); diff --git a/dpdk/drivers/net/octeontx2/otx2_link.c b/dpdk/drivers/net/octeontx2/otx2_link.c index f5679b06e7..4128f56d90 100644 --- a/dpdk/drivers/net/octeontx2/otx2_link.c @@ -30225,6 +60305,19 @@ index bcf2ff4e8f..5685571166 100644 } int +diff --git a/dpdk/drivers/net/octeontx2/otx2_mac.c b/dpdk/drivers/net/octeontx2/otx2_mac.c +index 262d185e54..49a700ca1d 100644 +--- a/dpdk/drivers/net/octeontx2/otx2_mac.c ++++ b/dpdk/drivers/net/octeontx2/otx2_mac.c +@@ -76,6 +76,8 @@ otx2_nix_mac_addr_add(struct rte_eth_dev *eth_dev, struct rte_ether_addr *addr, + + /* Enable promiscuous mode at NIX level */ + otx2_nix_promisc_config(eth_dev, 1); ++ dev->dmac_filter_enable = true; ++ eth_dev->data->promiscuous = 0; + + done: + return rc; diff --git a/dpdk/drivers/net/octeontx2/otx2_ptp.c b/dpdk/drivers/net/octeontx2/otx2_ptp.c index f34b9339c4..ae5a2b7cd1 100644 --- a/dpdk/drivers/net/octeontx2/otx2_ptp.c @@ -30254,8 +60347,177 @@ index bc7b64387a..d80579725a 100644 return 0; /* Update default RSS key and cfg */ +diff --git a/dpdk/drivers/net/octeontx2/otx2_rx.c b/dpdk/drivers/net/octeontx2/otx2_rx.c +index 48565db030..23d5c30b59 100644 +--- a/dpdk/drivers/net/octeontx2/otx2_rx.c ++++ b/dpdk/drivers/net/octeontx2/otx2_rx.c +@@ -273,6 +273,12 @@ nix_recv_pkts_vector(void *rx_queue, struct rte_mbuf **rx_pkts, + vst1q_u64((uint64_t *)mbuf2->rearm_data, rearm2); + vst1q_u64((uint64_t *)mbuf3->rearm_data, rearm3); + ++ /* Update that no more segments */ ++ mbuf0->next = NULL; ++ mbuf1->next = NULL; ++ mbuf2->next = NULL; ++ mbuf3->next = NULL; ++ + /* Store the mbufs to rx_pkts */ + vst1q_u64((uint64_t *)&rx_pkts[packets], mbuf01); + vst1q_u64((uint64_t *)&rx_pkts[packets + 2], mbuf23); +diff --git a/dpdk/drivers/net/octeontx2/otx2_rx.h b/dpdk/drivers/net/octeontx2/otx2_rx.h +index 351ad0fcb4..1863bfde72 100644 +--- a/dpdk/drivers/net/octeontx2/otx2_rx.h ++++ b/dpdk/drivers/net/octeontx2/otx2_rx.h +@@ -188,6 +188,7 @@ nix_cqe_xtract_mseg(const struct nix_rx_parse_s *rx, + iova_list = (const rte_iova_t *)(iova_list + 1); + } + } ++ mbuf->next = NULL; + } + + static __rte_always_inline void +@@ -235,10 +236,12 @@ otx2_nix_cqe_to_mbuf(const struct nix_cqe_hdr_s *cq, const uint32_t tag, + *(uint64_t *)(&mbuf->rearm_data) = val; + mbuf->pkt_len = len; + +- if (flag & NIX_RX_MULTI_SEG_F) ++ if (flag & NIX_RX_MULTI_SEG_F) { + nix_cqe_xtract_mseg(rx, mbuf, val); +- else ++ } else { + mbuf->data_len = len; ++ mbuf->next = NULL; ++ } + } + + #define CKSUM_F NIX_RX_OFFLOAD_CHECKSUM_F +diff --git a/dpdk/drivers/net/octeontx2/otx2_vlan.c b/dpdk/drivers/net/octeontx2/otx2_vlan.c +index 322a565b3e..7357b06695 100644 +--- a/dpdk/drivers/net/octeontx2/otx2_vlan.c ++++ b/dpdk/drivers/net/octeontx2/otx2_vlan.c +@@ -717,11 +717,6 @@ otx2_nix_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask) + + rxmode = ð_dev->data->dev_conf.rxmode; + +- if (mask & ETH_VLAN_EXTEND_MASK) { +- otx2_err("Extend offload not supported"); +- return -ENOTSUP; +- } +- + if (mask & ETH_VLAN_STRIP_MASK) { + if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) { + offloads |= DEV_RX_OFFLOAD_VLAN_STRIP; +diff --git a/dpdk/drivers/net/pcap/rte_eth_pcap.c b/dpdk/drivers/net/pcap/rte_eth_pcap.c +index aa7ef6fdbc..f4afe67116 100644 +--- a/dpdk/drivers/net/pcap/rte_eth_pcap.c ++++ b/dpdk/drivers/net/pcap/rte_eth_pcap.c +@@ -377,7 +377,7 @@ eth_tx_drop(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) + return 0; + + for (i = 0; i < nb_pkts; i++) { +- tx_bytes += bufs[i]->data_len; ++ tx_bytes += bufs[i]->pkt_len; + rte_pktmbuf_free(bufs[i]); + } + +@@ -723,6 +723,17 @@ eth_stats_reset(struct rte_eth_dev *dev) + return 0; + } + ++static inline void ++infinite_rx_ring_free(struct rte_ring *pkts) ++{ ++ struct rte_mbuf *bufs; ++ ++ while (!rte_ring_dequeue(pkts, (void **)&bufs)) ++ rte_pktmbuf_free(bufs); ++ ++ rte_ring_free(pkts); ++} ++ + static void + eth_dev_close(struct rte_eth_dev *dev) + { +@@ -733,13 +744,15 @@ eth_dev_close(struct rte_eth_dev *dev) + if (internals->infinite_rx) { + for (i = 0; i < dev->data->nb_rx_queues; i++) { + struct pcap_rx_queue *pcap_q = &internals->rx_queue[i]; +- struct rte_mbuf *pcap_buf; + +- while (!rte_ring_dequeue(pcap_q->pkts, +- (void **)&pcap_buf)) +- rte_pktmbuf_free(pcap_buf); ++ /* ++ * 'pcap_q->pkts' can be NULL if 'eth_dev_close()' ++ * called before 'eth_rx_queue_setup()' has been called ++ */ ++ if (pcap_q->pkts == NULL) ++ continue; + +- rte_ring_free(pcap_q->pkts); ++ infinite_rx_ring_free(pcap_q->pkts); + } + } + +@@ -803,21 +816,25 @@ eth_rx_queue_setup(struct rte_eth_dev *dev, + while (eth_pcap_rx(pcap_q, bufs, 1)) { + /* Check for multiseg mbufs. */ + if (bufs[0]->nb_segs != 1) { +- rte_pktmbuf_free(*bufs); +- +- while (!rte_ring_dequeue(pcap_q->pkts, +- (void **)bufs)) +- rte_pktmbuf_free(*bufs); +- +- rte_ring_free(pcap_q->pkts); +- PMD_LOG(ERR, "Multiseg mbufs are not supported in infinite_rx " +- "mode."); ++ infinite_rx_ring_free(pcap_q->pkts); ++ PMD_LOG(ERR, ++ "Multiseg mbufs are not supported in infinite_rx mode."); + return -EINVAL; + } + + rte_ring_enqueue_bulk(pcap_q->pkts, + (void * const *)bufs, 1, NULL); + } ++ ++ if (rte_ring_count(pcap_q->pkts) < pcap_pkt_count) { ++ infinite_rx_ring_free(pcap_q->pkts); ++ PMD_LOG(ERR, ++ "Not enough mbufs to accommodate packets in pcap file. " ++ "At least %" PRIu64 " mbufs per queue is required.", ++ pcap_pkt_count); ++ return -EINVAL; ++ } ++ + /* + * Reset the stats for this queue since eth_pcap_rx calls above + * didn't result in the application receiving packets. +@@ -1291,9 +1308,8 @@ eth_from_pcaps(struct rte_vdev_device *vdev, + + /* phy_mac arg is applied only only if "iface" devarg is provided */ + if (rx_queues->phy_mac) { +- int ret = eth_pcap_update_mac(rx_queues->queue[0].name, +- eth_dev, vdev->device.numa_node); +- if (ret == 0) ++ if (eth_pcap_update_mac(rx_queues->queue[0].name, ++ eth_dev, vdev->device.numa_node) == 0) + internals->phy_mac = 1; + } + } +@@ -1398,7 +1414,8 @@ pmd_pcap_probe(struct rte_vdev_device *dev) + devargs_all.is_rx_pcap = + rte_kvargs_count(kvlist, ETH_PCAP_RX_PCAP_ARG) ? 1 : 0; + devargs_all.is_rx_iface = +- rte_kvargs_count(kvlist, ETH_PCAP_RX_IFACE_ARG) ? 1 : 0; ++ (rte_kvargs_count(kvlist, ETH_PCAP_RX_IFACE_ARG) + ++ rte_kvargs_count(kvlist, ETH_PCAP_RX_IFACE_IN_ARG)) ? 1 : 0; + pcaps.num_of_queue = 0; + + devargs_all.is_tx_pcap = diff --git a/dpdk/drivers/net/pfe/pfe_ethdev.c b/dpdk/drivers/net/pfe/pfe_ethdev.c -index 9403478198..b1de866d34 100644 +index 9403478198..5a231918a1 100644 --- a/dpdk/drivers/net/pfe/pfe_ethdev.c +++ b/dpdk/drivers/net/pfe/pfe_ethdev.c @@ -13,7 +13,7 @@ @@ -30275,7 +60537,17 @@ index 9403478198..b1de866d34 100644 rte_eth_dev_release_port(dev); pfe->nb_devs--; } -@@ -990,7 +989,7 @@ pmd_pfe_probe(struct rte_vdev_device *vdev) +@@ -430,9 +429,6 @@ static int + pfe_eth_info(struct rte_eth_dev *dev, + struct rte_eth_dev_info *dev_info) + { +- struct pfe_eth_priv_s *internals = dev->data->dev_private; +- +- dev_info->if_index = internals->id; + dev_info->max_mac_addrs = PFE_MAX_MACS; + dev_info->max_rx_queues = dev->data->nb_rx_queues; + dev_info->max_tx_queues = dev->data->nb_tx_queues; +@@ -990,7 +986,7 @@ pmd_pfe_probe(struct rte_vdev_device *vdev) if (rc < 0) return -EINVAL; @@ -30284,7 +60556,7 @@ index 9403478198..b1de866d34 100644 name, init_params.gem_id); if (g_pfe) { -@@ -1118,7 +1117,7 @@ pmd_pfe_probe(struct rte_vdev_device *vdev) +@@ -1118,7 +1114,7 @@ pmd_pfe_probe(struct rte_vdev_device *vdev) else gem_id = init_params.gem_id; @@ -30293,6 +60565,21 @@ index 9403478198..b1de866d34 100644 name, gem_id, init_params.gem_id); rc = pfe_eth_init(vdev, g_pfe, gem_id); +diff --git a/dpdk/drivers/net/qede/base/bcm_osal.h b/dpdk/drivers/net/qede/base/bcm_osal.h +index 0f09557cf0..d9f507f233 100644 +--- a/dpdk/drivers/net/qede/base/bcm_osal.h ++++ b/dpdk/drivers/net/qede/base/bcm_osal.h +@@ -81,9 +81,8 @@ typedef int bool; + + #define DELAY(x) rte_delay_us(x) + #define usec_delay(x) DELAY(x) +-#define msec_delay(x) DELAY(1000 * (x)) + #define OSAL_UDELAY(time) usec_delay(time) +-#define OSAL_MSLEEP(time) msec_delay(time) ++#define OSAL_MSLEEP(time) rte_delay_us_sleep(1000 * (time)) + + /* Memory allocations and deallocations */ + diff --git a/dpdk/drivers/net/qede/base/ecore_dev.c b/dpdk/drivers/net/qede/base/ecore_dev.c index 9d1db14590..86ecfb2690 100644 --- a/dpdk/drivers/net/qede/base/ecore_dev.c @@ -30401,7 +60688,7 @@ index c998dbf8d5..5450018121 100644 struct ecore_mcp_link_params; diff --git a/dpdk/drivers/net/qede/base/ecore_sriov.c b/dpdk/drivers/net/qede/base/ecore_sriov.c -index deee04ac4b..e60257e190 100644 +index deee04ac4b..6633d6b42e 100644 --- a/dpdk/drivers/net/qede/base/ecore_sriov.c +++ b/dpdk/drivers/net/qede/base/ecore_sriov.c @@ -61,6 +61,39 @@ const char *qede_ecore_channel_tlvs_string[] = { @@ -30444,6 +60731,15 @@ index deee04ac4b..e60257e190 100644 "CHANNEL_TLV_MAX" }; +@@ -4042,7 +4075,7 @@ ecore_iov_execute_vf_flr_cleanup(struct ecore_hwfn *p_hwfn, + rc = ecore_iov_enable_vf_access(p_hwfn, p_ptt, p_vf); + if (rc) { + /* TODO - again, a mess... */ +- DP_ERR(p_hwfn, "Failed to re-enable VF[%d] acces\n", ++ DP_ERR(p_hwfn, "Failed to re-enable VF[%d] access\n", + vfid); + return rc; + } diff --git a/dpdk/drivers/net/qede/base/ecore_vf.c b/dpdk/drivers/net/qede/base/ecore_vf.c index 24846cfb51..0e5b7d5eb3 100644 --- a/dpdk/drivers/net/qede/base/ecore_vf.c @@ -30705,10 +61001,44 @@ index 98b9723dd4..6667c2d7ab 100644 #define FW_MSG_CODE_ERR_RESOURCE_ALREADY_ALLOCATED 0x008c0000 #define FW_MSG_CODE_ERR_RESOURCE_NOT_ALLOCATED 0x008d0000 diff --git a/dpdk/drivers/net/qede/qede_ethdev.c b/dpdk/drivers/net/qede/qede_ethdev.c -index 19d2e96191..2a1c82ac9a 100644 +index 19d2e96191..6f2f0051f6 100644 --- a/dpdk/drivers/net/qede/qede_ethdev.c +++ b/dpdk/drivers/net/qede/qede_ethdev.c -@@ -1064,7 +1064,7 @@ static int qede_dev_start(struct rte_eth_dev *eth_dev) +@@ -551,17 +551,16 @@ qed_configure_filter_rx_mode(struct rte_eth_dev *eth_dev, + ECORE_ACCEPT_BCAST; + + if (type == QED_FILTER_RX_MODE_TYPE_PROMISC) { +- flags.rx_accept_filter |= ECORE_ACCEPT_UCAST_UNMATCHED; ++ flags.rx_accept_filter |= (ECORE_ACCEPT_UCAST_UNMATCHED | ++ ECORE_ACCEPT_MCAST_UNMATCHED); + if (IS_VF(edev)) { +- flags.tx_accept_filter |= ECORE_ACCEPT_UCAST_UNMATCHED; +- DP_INFO(edev, "Enabling Tx unmatched flag for VF\n"); ++ flags.tx_accept_filter |= ++ (ECORE_ACCEPT_UCAST_UNMATCHED | ++ ECORE_ACCEPT_MCAST_UNMATCHED); ++ DP_INFO(edev, "Enabling Tx unmatched flags for VF\n"); + } + } else if (type == QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC) { + flags.rx_accept_filter |= ECORE_ACCEPT_MCAST_UNMATCHED; +- } else if (type == (QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC | +- QED_FILTER_RX_MODE_TYPE_PROMISC)) { +- flags.rx_accept_filter |= ECORE_ACCEPT_UCAST_UNMATCHED | +- ECORE_ACCEPT_MCAST_UNMATCHED; + } + + return ecore_filter_accept_cmd(edev, 0, flags, false, false, +@@ -962,9 +961,6 @@ static int qede_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask) + } + } + +- if (mask & ETH_VLAN_EXTEND_MASK) +- DP_ERR(edev, "Extend VLAN not supported\n"); +- + qdev->vlan_offload_mask = mask; + + DP_INFO(edev, "VLAN offload mask %d\n", mask); +@@ -1064,7 +1060,7 @@ static int qede_dev_start(struct rte_eth_dev *eth_dev) qede_reset_queue_stats(qdev, true); /* Newer SR-IOV PF driver expects RX/TX queues to be started before @@ -30717,7 +61047,7 @@ index 19d2e96191..2a1c82ac9a 100644 * Also, we would like to retain similar behavior in PF case, so we * don't do PF/VF specific check here. */ -@@ -1076,6 +1076,9 @@ static int qede_dev_start(struct rte_eth_dev *eth_dev) +@@ -1076,6 +1072,9 @@ static int qede_dev_start(struct rte_eth_dev *eth_dev) if (qede_activate_vport(eth_dev, true)) goto err; @@ -30727,7 +61057,7 @@ index 19d2e96191..2a1c82ac9a 100644 /* Update link status */ qede_link_update(eth_dev, 0); -@@ -1097,6 +1100,12 @@ static void qede_dev_stop(struct rte_eth_dev *eth_dev) +@@ -1097,6 +1096,12 @@ static void qede_dev_stop(struct rte_eth_dev *eth_dev) PMD_INIT_FUNC_TRACE(edev); @@ -30740,7 +61070,7 @@ index 19d2e96191..2a1c82ac9a 100644 /* Disable vport */ if (qede_activate_vport(eth_dev, false)) return; -@@ -1182,6 +1191,8 @@ static int qede_dev_configure(struct rte_eth_dev *eth_dev) +@@ -1182,6 +1187,8 @@ static int qede_dev_configure(struct rte_eth_dev *eth_dev) struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); struct rte_eth_rxmode *rxmode = ð_dev->data->dev_conf.rxmode; @@ -30749,7 +61079,7 @@ index 19d2e96191..2a1c82ac9a 100644 int ret; PMD_INIT_FUNC_TRACE(edev); -@@ -1214,12 +1225,17 @@ static int qede_dev_configure(struct rte_eth_dev *eth_dev) +@@ -1214,12 +1221,17 @@ static int qede_dev_configure(struct rte_eth_dev *eth_dev) if (qede_check_fdir_support(eth_dev)) return -ENOTSUP; @@ -30773,7 +61103,27 @@ index 19d2e96191..2a1c82ac9a 100644 /* If jumbo enabled adjust MTU */ if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) -@@ -1472,7 +1488,8 @@ static void qede_dev_close(struct rte_eth_dev *eth_dev) +@@ -1404,16 +1416,13 @@ qede_link_update(struct rte_eth_dev *eth_dev, __rte_unused int wait_to_complete) + + static int qede_promiscuous_enable(struct rte_eth_dev *eth_dev) + { +- struct qede_dev *qdev = eth_dev->data->dev_private; +- struct ecore_dev *edev = &qdev->edev; +- enum qed_filter_rx_mode_type type = QED_FILTER_RX_MODE_TYPE_PROMISC; + enum _ecore_status_t ecore_status; ++ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); ++ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); ++ enum qed_filter_rx_mode_type type = QED_FILTER_RX_MODE_TYPE_PROMISC; + + PMD_INIT_FUNC_TRACE(edev); + +- if (rte_eth_allmulticast_get(eth_dev->data->port_id) == 1) +- type |= QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC; +- + ecore_status = qed_configure_filter_rx_mode(eth_dev, type); + + return ecore_status >= ECORE_SUCCESS ? 0 : -EAGAIN; +@@ -1472,7 +1481,8 @@ static void qede_dev_close(struct rte_eth_dev *eth_dev) if (eth_dev->data->dev_started) qede_dev_stop(eth_dev); @@ -30783,7 +61133,7 @@ index 19d2e96191..2a1c82ac9a 100644 qdev->vport_started = false; qede_fdir_dealloc_resc(eth_dev); qede_dealloc_fp_resc(eth_dev); -@@ -1480,8 +1497,6 @@ static void qede_dev_close(struct rte_eth_dev *eth_dev) +@@ -1480,8 +1490,6 @@ static void qede_dev_close(struct rte_eth_dev *eth_dev) eth_dev->data->nb_rx_queues = 0; eth_dev->data->nb_tx_queues = 0; @@ -30792,7 +61142,26 @@ index 19d2e96191..2a1c82ac9a 100644 qdev->ops->common->slowpath_stop(edev); qdev->ops->common->remove(edev); rte_intr_disable(&pci_dev->intr_handle); -@@ -2604,9 +2619,6 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf) +@@ -1789,8 +1797,7 @@ static int qede_allmulticast_enable(struct rte_eth_dev *eth_dev) + enum _ecore_status_t ecore_status; + + if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1) +- type |= QED_FILTER_RX_MODE_TYPE_PROMISC; +- ++ type = QED_FILTER_RX_MODE_TYPE_PROMISC; + ecore_status = qed_configure_filter_rx_mode(eth_dev, type); + + return ecore_status >= ECORE_SUCCESS ? 0 : -EAGAIN; +@@ -2275,7 +2282,7 @@ static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) + fp->rxq->rx_buf_size = rc; + } + } +- if (max_rx_pkt_len > RTE_ETHER_MAX_LEN) ++ if (frame_size > QEDE_ETH_MAX_LEN) + dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME; + else + dev->data->dev_conf.rxmode.offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME; +@@ -2604,9 +2611,6 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf) eth_dev->dev_ops = (is_vf) ? &qede_eth_vf_dev_ops : &qede_eth_dev_ops; @@ -30803,7 +61172,7 @@ index 19d2e96191..2a1c82ac9a 100644 adapter->num_rx_queues = 0; SLIST_INIT(&adapter->arfs_info.arfs_list_head); diff --git a/dpdk/drivers/net/qede/qede_main.c b/dpdk/drivers/net/qede/qede_main.c -index 4eb79d0fbb..8580cbcd7f 100644 +index 4eb79d0fbb..67392d6aa4 100644 --- a/dpdk/drivers/net/qede/qede_main.c +++ b/dpdk/drivers/net/qede/qede_main.c @@ -56,6 +56,10 @@ qed_probe(struct ecore_dev *edev, struct rte_pci_device *pci_dev, @@ -30817,8 +61186,26 @@ index 4eb79d0fbb..8580cbcd7f 100644 hw_prepare_params.personality = ECORE_PCI_ETH; hw_prepare_params.drv_resc_alloc = false; hw_prepare_params.chk_reg_fifo = false; +@@ -571,13 +575,12 @@ qed_get_current_link(struct ecore_dev *edev, struct qed_link_output *if_link) + hwfn = &edev->hwfns[0]; + if (IS_PF(edev)) { + ptt = ecore_ptt_acquire(hwfn); +- if (!ptt) +- DP_NOTICE(hwfn, true, "Failed to fill link; No PTT\n"); +- ++ if (ptt) { + qed_fill_link(hwfn, ptt, if_link); +- +- if (ptt) + ecore_ptt_release(hwfn, ptt); ++ } else { ++ DP_NOTICE(hwfn, true, "Failed to fill link; No PTT\n"); ++ } + } else { + qed_fill_link(hwfn, NULL, if_link); + } diff --git a/dpdk/drivers/net/qede/qede_rxtx.c b/dpdk/drivers/net/qede/qede_rxtx.c -index a28dd0a07f..3c55c0efdf 100644 +index a28dd0a07f..64e6de4743 100644 --- a/dpdk/drivers/net/qede/qede_rxtx.c +++ b/dpdk/drivers/net/qede/qede_rxtx.c @@ -593,12 +593,14 @@ qede_alloc_mem_sb(struct qede_dev *qdev, struct ecore_sb_info *sb_info, @@ -30837,11 +61224,54 @@ index a28dd0a07f..3c55c0efdf 100644 if (IS_VF(edev)) ecore_vf_get_num_sbs(ECORE_LEADING_HWFN(edev), &num_sbs); else +@@ -645,8 +647,6 @@ int qede_alloc_fp_resc(struct qede_dev *qdev) + + for (sb_idx = 0; sb_idx < QEDE_RXTX_MAX(qdev); sb_idx++) { + fp = &qdev->fp_array[sb_idx]; +- if (!fp) +- continue; + fp->sb_info = rte_calloc("sb", 1, sizeof(struct ecore_sb_info), + RTE_CACHE_LINE_SIZE); + if (!fp->sb_info) { +@@ -676,11 +676,9 @@ void qede_dealloc_fp_resc(struct rte_eth_dev *eth_dev) + + for (sb_idx = 0; sb_idx < QEDE_RXTX_MAX(qdev); sb_idx++) { + fp = &qdev->fp_array[sb_idx]; +- if (!fp) +- continue; +- DP_INFO(edev, "Free sb_info index 0x%x\n", +- fp->sb_info->igu_sb_id); + if (fp->sb_info) { ++ DP_INFO(edev, "Free sb_info index 0x%x\n", ++ fp->sb_info->igu_sb_id); + OSAL_DMA_FREE_COHERENT(edev, fp->sb_info->sb_virt, + fp->sb_info->sb_phys, + sizeof(struct status_block)); +diff --git a/dpdk/drivers/net/qede/qede_rxtx.h b/dpdk/drivers/net/qede/qede_rxtx.h +index 75cc930fd5..8dc669963b 100644 +--- a/dpdk/drivers/net/qede/qede_rxtx.h ++++ b/dpdk/drivers/net/qede/qede_rxtx.h +@@ -71,6 +71,7 @@ + + (QEDE_LLC_SNAP_HDR_LEN) + 2) + + #define QEDE_MAX_ETHER_HDR_LEN (RTE_ETHER_HDR_LEN + QEDE_ETH_OVERHEAD) ++#define QEDE_ETH_MAX_LEN (RTE_ETHER_MTU + QEDE_MAX_ETHER_HDR_LEN) + + #define QEDE_RSS_OFFLOAD_ALL (ETH_RSS_IPV4 |\ + ETH_RSS_NONFRAG_IPV4_TCP |\ diff --git a/dpdk/drivers/net/ring/rte_eth_ring.c b/dpdk/drivers/net/ring/rte_eth_ring.c -index 41acbc513d..f0fafa0c0d 100644 +index 41acbc513d..cae959b76c 100644 --- a/dpdk/drivers/net/ring/rte_eth_ring.c +++ b/dpdk/drivers/net/ring/rte_eth_ring.c -@@ -246,6 +246,7 @@ static const struct eth_dev_ops ops = { +@@ -16,6 +16,7 @@ + #define ETH_RING_ACTION_CREATE "CREATE" + #define ETH_RING_ACTION_ATTACH "ATTACH" + #define ETH_RING_INTERNAL_ARG "internal" ++#define ETH_RING_INTERNAL_ARG_MAX_LEN 19 /* "0x..16chars..\0" */ + + static const char *valid_arguments[] = { + ETH_RING_NUMA_NODE_ACTION_ARG, +@@ -246,6 +247,7 @@ static const struct eth_dev_ops ops = { static int do_eth_dev_ring_create(const char *name, @@ -30849,7 +61279,7 @@ index 41acbc513d..f0fafa0c0d 100644 struct rte_ring * const rx_queues[], const unsigned int nb_rx_queues, struct rte_ring *const tx_queues[], -@@ -291,12 +292,15 @@ do_eth_dev_ring_create(const char *name, +@@ -291,12 +293,15 @@ do_eth_dev_ring_create(const char *name, } /* now put it all together @@ -30865,7 +61295,7 @@ index 41acbc513d..f0fafa0c0d 100644 data = eth_dev->data; data->rx_queues = rx_queues_local; data->tx_queues = tx_queues_local; -@@ -408,7 +412,9 @@ rte_eth_from_ring(struct rte_ring *r) +@@ -408,7 +413,9 @@ rte_eth_from_ring(struct rte_ring *r) } static int @@ -30876,7 +61306,7 @@ index 41acbc513d..f0fafa0c0d 100644 enum dev_action action, struct rte_eth_dev **eth_dev) { /* rx and tx are so-called from point of view of first port. -@@ -438,7 +444,7 @@ eth_dev_ring_create(const char *name, const unsigned int numa_node, +@@ -438,7 +445,7 @@ eth_dev_ring_create(const char *name, const unsigned int numa_node, return -1; } @@ -30885,7 +61315,30 @@ index 41acbc513d..f0fafa0c0d 100644 numa_node, action, eth_dev) < 0) return -1; -@@ -560,12 +566,12 @@ rte_pmd_ring_probe(struct rte_vdev_device *dev) +@@ -533,8 +540,21 @@ parse_internal_args(const char *key __rte_unused, const char *value, + { + struct ring_internal_args **internal_args = data; + void *args; ++ int ret, n; + +- sscanf(value, "%p", &args); ++ /* make sure 'value' is valid pointer length */ ++ if (strnlen(value, ETH_RING_INTERNAL_ARG_MAX_LEN) >= ++ ETH_RING_INTERNAL_ARG_MAX_LEN) { ++ PMD_LOG(ERR, "Error parsing internal args, argument is too long"); ++ return -1; ++ } ++ ++ ret = sscanf(value, "%p%n", &args, &n); ++ if (ret == 0 || (size_t)n != strlen(value)) { ++ PMD_LOG(ERR, "Error parsing internal args"); ++ ++ return -1; ++ } + + *internal_args = args; + +@@ -560,12 +580,12 @@ rte_pmd_ring_probe(struct rte_vdev_device *dev) PMD_LOG(INFO, "Initializing pmd_ring for %s", name); if (params == NULL || params[0] == '\0') { @@ -30900,11 +61353,13 @@ index 41acbc513d..f0fafa0c0d 100644 DEV_ATTACH, ð_dev); } } else { -@@ -574,19 +580,16 @@ rte_pmd_ring_probe(struct rte_vdev_device *dev) +@@ -573,20 +593,17 @@ rte_pmd_ring_probe(struct rte_vdev_device *dev) + if (!kvlist) { PMD_LOG(INFO, - "Ignoring unsupported parameters when creatingrings-backed ethernet device"); +- "Ignoring unsupported parameters when creatingrings-backed ethernet device"); - ret = eth_dev_ring_create(name, rte_socket_id(), ++ "Ignoring unsupported parameters when creating rings-backed ethernet device"); + ret = eth_dev_ring_create(name, dev, rte_socket_id(), DEV_CREATE, ð_dev); if (ret == -1) { @@ -30922,7 +61377,7 @@ index 41acbc513d..f0fafa0c0d 100644 return ret; } -@@ -597,7 +600,7 @@ rte_pmd_ring_probe(struct rte_vdev_device *dev) +@@ -597,7 +614,7 @@ rte_pmd_ring_probe(struct rte_vdev_device *dev) if (ret < 0) goto out_free; @@ -30931,7 +61386,7 @@ index 41acbc513d..f0fafa0c0d 100644 internal_args->rx_queues, internal_args->nb_rx_queues, internal_args->tx_queues, -@@ -627,6 +630,7 @@ rte_pmd_ring_probe(struct rte_vdev_device *dev) +@@ -627,6 +644,7 @@ rte_pmd_ring_probe(struct rte_vdev_device *dev) for (info->count = 0; info->count < info->total; info->count++) { ret = eth_dev_ring_create(info->list[info->count].name, @@ -30939,7 +61394,7 @@ index 41acbc513d..f0fafa0c0d 100644 info->list[info->count].node, info->list[info->count].action, ð_dev); -@@ -635,7 +639,7 @@ rte_pmd_ring_probe(struct rte_vdev_device *dev) +@@ -635,7 +653,7 @@ rte_pmd_ring_probe(struct rte_vdev_device *dev) PMD_LOG(INFO, "Attach to pmd_ring for %s", name); @@ -30948,7 +61403,7 @@ index 41acbc513d..f0fafa0c0d 100644 info->list[info->count].node, DEV_ATTACH, ð_dev); -@@ -644,9 +648,6 @@ rte_pmd_ring_probe(struct rte_vdev_device *dev) +@@ -644,9 +662,6 @@ rte_pmd_ring_probe(struct rte_vdev_device *dev) } } @@ -32082,8 +62537,38 @@ index 791105a5a0..ecf703b03d 100644 __checkReturn efx_rc_t efx_proxy_auth_init( +diff --git a/dpdk/drivers/net/sfc/base/efx_tunnel.c b/dpdk/drivers/net/sfc/base/efx_tunnel.c +index edb6be028f..c92c02cfa2 100644 +--- a/dpdk/drivers/net/sfc/base/efx_tunnel.c ++++ b/dpdk/drivers/net/sfc/base/efx_tunnel.c +@@ -421,7 +421,7 @@ ef10_tunnel_reconfigure( + { + efx_tunnel_cfg_t *etcp = &enp->en_tunnel_cfg; + efx_rc_t rc; +- boolean_t resetting; ++ boolean_t resetting = B_FALSE; + efsys_lock_state_t state; + efx_tunnel_cfg_t etc; + +@@ -446,8 +446,14 @@ ef10_tunnel_reconfigure( + */ + rc = efx_mcdi_set_tunnel_encap_udp_ports(enp, &etc, B_FALSE, + &resetting); +- if (rc != 0) +- goto fail2; ++ if (rc != 0) { ++ /* ++ * Do not fail if the access is denied when no ++ * tunnel encap UDP ports are configured. ++ */ ++ if (rc != EACCES || etc.etc_udp_entries_num != 0) ++ goto fail2; ++ } + + /* + * Although the caller should be able to handle MC reboot, diff --git a/dpdk/drivers/net/sfc/sfc.c b/dpdk/drivers/net/sfc/sfc.c -index 141c767f09..3f5cd7758b 100644 +index 141c767f09..da67acaa87 100644 --- a/dpdk/drivers/net/sfc/sfc.c +++ b/dpdk/drivers/net/sfc/sfc.c @@ -30,7 +30,7 @@ sfc_dma_alloc(const struct sfc_adapter *sa, const char *name, uint16_t id, @@ -32105,8 +62590,59 @@ index 141c767f09..3f5cd7758b 100644 return rc; } +@@ -684,6 +684,7 @@ sfc_rss_attach(struct sfc_adapter *sa) + efx_intr_fini(sa->nic); + + rte_memcpy(rss->key, default_rss_key, sizeof(rss->key)); ++ rss->dummy_rss_context = EFX_RSS_CONTEXT_DEFAULT; + + return 0; + +diff --git a/dpdk/drivers/net/sfc/sfc.h b/dpdk/drivers/net/sfc/sfc.h +index cc52228771..bce6beefaa 100644 +--- a/dpdk/drivers/net/sfc/sfc.h ++++ b/dpdk/drivers/net/sfc/sfc.h +@@ -172,6 +172,8 @@ struct sfc_rss { + efx_rx_hash_type_t hash_types; + unsigned int tbl[EFX_RSS_TBL_SIZE]; + uint8_t key[EFX_RSS_KEY_SIZE]; ++ ++ uint32_t dummy_rss_context; + }; + + /* Adapter private data shared by primary and secondary processes */ +diff --git a/dpdk/drivers/net/sfc/sfc_ef10_tx.c b/dpdk/drivers/net/sfc/sfc_ef10_tx.c +index 43e3447805..b57c9afb71 100644 +--- a/dpdk/drivers/net/sfc/sfc_ef10_tx.c ++++ b/dpdk/drivers/net/sfc/sfc_ef10_tx.c +@@ -477,6 +477,25 @@ sfc_ef10_xmit_tso_pkt(struct sfc_ef10_txq * const txq, struct rte_mbuf *m_seg, + needed_desc--; + } + ++ /* ++ * 8000-series EF10 hardware requires that innermost IP length ++ * be greater than or equal to the value which each segment is ++ * supposed to have; otherwise, TCP checksum will be incorrect. ++ * ++ * The same concern applies to outer UDP datagram length field. ++ */ ++ switch (m_seg->ol_flags & PKT_TX_TUNNEL_MASK) { ++ case PKT_TX_TUNNEL_VXLAN: ++ /* FALLTHROUGH */ ++ case PKT_TX_TUNNEL_GENEVE: ++ sfc_tso_outer_udp_fix_len(first_m_seg, hdr_addr); ++ break; ++ default: ++ break; ++ } ++ ++ sfc_tso_innermost_ip_fix_len(first_m_seg, hdr_addr, iph_off); ++ + /* + * Tx prepare has debug-only checks that offload flags are correctly + * filled in in TSO mbuf. Use zero IPID if there is no IPv4 flag. diff --git a/dpdk/drivers/net/sfc/sfc_ethdev.c b/dpdk/drivers/net/sfc/sfc_ethdev.c -index 454b8956a2..f8867b0ec0 100644 +index 454b8956a2..cc7eefb322 100644 --- a/dpdk/drivers/net/sfc/sfc_ethdev.c +++ b/dpdk/drivers/net/sfc/sfc_ethdev.c @@ -405,25 +405,37 @@ sfc_dev_filter_set(struct rte_eth_dev *dev, enum sfc_dev_filter_mode mode, @@ -32151,7 +62687,52 @@ index 454b8956a2..f8867b0ec0 100644 } static int -@@ -1520,7 +1532,7 @@ sfc_dev_rss_hash_update(struct rte_eth_dev *dev, +@@ -596,10 +608,19 @@ sfc_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) + mac_stats[EFX_MAC_VADAPTER_TX_BROADCAST_BYTES]; + stats->imissed = mac_stats[EFX_MAC_VADAPTER_RX_BAD_PACKETS]; + stats->oerrors = mac_stats[EFX_MAC_VADAPTER_TX_BAD_PACKETS]; ++ ++ /* CRC is included in these stats, but shouldn't be */ ++ stats->ibytes -= stats->ipackets * RTE_ETHER_CRC_LEN; ++ stats->obytes -= stats->opackets * RTE_ETHER_CRC_LEN; + } else { + stats->opackets = mac_stats[EFX_MAC_TX_PKTS]; + stats->ibytes = mac_stats[EFX_MAC_RX_OCTETS]; + stats->obytes = mac_stats[EFX_MAC_TX_OCTETS]; ++ ++ /* CRC is included in these stats, but shouldn't be */ ++ stats->ibytes -= mac_stats[EFX_MAC_RX_PKTS] * RTE_ETHER_CRC_LEN; ++ stats->obytes -= mac_stats[EFX_MAC_TX_PKTS] * RTE_ETHER_CRC_LEN; ++ + /* + * Take into account stats which are whenever supported + * on EF10. If some stat is not supported by current +@@ -972,7 +993,7 @@ sfc_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) + * The driver does not use it, but other PMDs update jumbo frame + * flag and max_rx_pkt_len when MTU is set. + */ +- if (mtu > RTE_ETHER_MAX_LEN) { ++ if (mtu > RTE_ETHER_MTU) { + struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode; + rxmode->offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME; + } +@@ -1503,8 +1524,15 @@ sfc_dev_rss_hash_update(struct rte_eth_dev *dev, + struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); + struct sfc_rss *rss = &sfc_sa2shared(sa)->rss; + unsigned int efx_hash_types; ++ uint32_t contexts[] = {EFX_RSS_CONTEXT_DEFAULT, rss->dummy_rss_context}; ++ unsigned int n_contexts; ++ unsigned int mode_i = 0; ++ unsigned int key_i = 0; ++ unsigned int i = 0; + int rc = 0; + ++ n_contexts = rss->dummy_rss_context == EFX_RSS_CONTEXT_DEFAULT ? 1 : 2; ++ + if (sfc_sa2shared(sa)->isolated) + return -ENOTSUP; + +@@ -1520,7 +1548,7 @@ sfc_dev_rss_hash_update(struct rte_eth_dev *dev, if ((rss_conf->rss_key != NULL) && (rss_conf->rss_key_len != sizeof(rss->key))) { @@ -32160,8 +62741,68 @@ index 454b8956a2..f8867b0ec0 100644 sizeof(rss->key)); return -EINVAL; } +@@ -1531,19 +1559,24 @@ sfc_dev_rss_hash_update(struct rte_eth_dev *dev, + if (rc != 0) + goto fail_rx_hf_rte_to_efx; + +- rc = efx_rx_scale_mode_set(sa->nic, EFX_RSS_CONTEXT_DEFAULT, +- rss->hash_alg, efx_hash_types, B_TRUE); +- if (rc != 0) +- goto fail_scale_mode_set; ++ for (mode_i = 0; mode_i < n_contexts; mode_i++) { ++ rc = efx_rx_scale_mode_set(sa->nic, contexts[mode_i], ++ rss->hash_alg, efx_hash_types, ++ B_TRUE); ++ if (rc != 0) ++ goto fail_scale_mode_set; ++ } + + if (rss_conf->rss_key != NULL) { + if (sa->state == SFC_ADAPTER_STARTED) { +- rc = efx_rx_scale_key_set(sa->nic, +- EFX_RSS_CONTEXT_DEFAULT, +- rss_conf->rss_key, +- sizeof(rss->key)); +- if (rc != 0) +- goto fail_scale_key_set; ++ for (key_i = 0; key_i < n_contexts; key_i++) { ++ rc = efx_rx_scale_key_set(sa->nic, ++ contexts[key_i], ++ rss_conf->rss_key, ++ sizeof(rss->key)); ++ if (rc != 0) ++ goto fail_scale_key_set; ++ } + } + + rte_memcpy(rss->key, rss_conf->rss_key, sizeof(rss->key)); +@@ -1556,12 +1589,20 @@ sfc_dev_rss_hash_update(struct rte_eth_dev *dev, + return 0; + + fail_scale_key_set: +- if (efx_rx_scale_mode_set(sa->nic, EFX_RSS_CONTEXT_DEFAULT, +- EFX_RX_HASHALG_TOEPLITZ, +- rss->hash_types, B_TRUE) != 0) +- sfc_err(sa, "failed to restore RSS mode"); ++ for (i = 0; i < key_i; i++) { ++ if (efx_rx_scale_key_set(sa->nic, contexts[i], rss->key, ++ sizeof(rss->key)) != 0) ++ sfc_err(sa, "failed to restore RSS key"); ++ } + + fail_scale_mode_set: ++ for (i = 0; i < mode_i; i++) { ++ if (efx_rx_scale_mode_set(sa->nic, contexts[i], ++ EFX_RX_HASHALG_TOEPLITZ, ++ rss->hash_types, B_TRUE) != 0) ++ sfc_err(sa, "failed to restore RSS mode"); ++ } ++ + fail_rx_hf_rte_to_efx: + sfc_adapter_unlock(sa); + return -rc; diff --git a/dpdk/drivers/net/sfc/sfc_flow.c b/dpdk/drivers/net/sfc/sfc_flow.c -index 8d636f6923..023e55d951 100644 +index 8d636f6923..91aa2a687a 100644 --- a/dpdk/drivers/net/sfc/sfc_flow.c +++ b/dpdk/drivers/net/sfc/sfc_flow.c @@ -1132,6 +1132,7 @@ sfc_flow_parse_attr(const struct rte_flow_attr *attr, @@ -32172,8 +62813,172 @@ index 8d636f6923..023e55d951 100644 return 0; } +@@ -1239,6 +1240,7 @@ sfc_flow_parse_queue(struct sfc_adapter *sa, + struct rte_flow *flow) + { + struct sfc_rxq *rxq; ++ struct sfc_rxq_info *rxq_info; + + if (queue->index >= sfc_sa2shared(sa)->rxq_count) + return -EINVAL; +@@ -1246,6 +1248,10 @@ sfc_flow_parse_queue(struct sfc_adapter *sa, + rxq = &sa->rxq_ctrl[queue->index]; + flow->spec.template.efs_dmaq_id = (uint16_t)rxq->hw_index; + ++ rxq_info = &sfc_sa2shared(sa)->rxq_info[queue->index]; ++ flow->spec.rss_hash_required = !!(rxq_info->rxq_flags & ++ SFC_RXQ_FLAG_RSS_HASH); ++ + return 0; + } + +@@ -1404,13 +1410,34 @@ sfc_flow_filter_insert(struct sfc_adapter *sa, + struct sfc_rss *rss = &sas->rss; + struct sfc_flow_rss *flow_rss = &flow->rss_conf; + uint32_t efs_rss_context = EFX_RSS_CONTEXT_DEFAULT; ++ boolean_t create_context; + unsigned int i; + int rc = 0; + +- if (flow->rss) { +- unsigned int rss_spread = MIN(flow_rss->rxq_hw_index_max - +- flow_rss->rxq_hw_index_min + 1, +- EFX_MAXRSS); ++ create_context = flow->rss || (flow->spec.rss_hash_required && ++ rss->dummy_rss_context == EFX_RSS_CONTEXT_DEFAULT); ++ ++ if (create_context) { ++ unsigned int rss_spread; ++ unsigned int rss_hash_types; ++ uint8_t *rss_key; ++ ++ if (flow->rss) { ++ rss_spread = MIN(flow_rss->rxq_hw_index_max - ++ flow_rss->rxq_hw_index_min + 1, ++ EFX_MAXRSS); ++ rss_hash_types = flow_rss->rss_hash_types; ++ rss_key = flow_rss->rss_key; ++ } else { ++ /* ++ * Initialize dummy RSS context parameters to have ++ * valid RSS hash. Use default RSS hash function and ++ * key. ++ */ ++ rss_spread = 1; ++ rss_hash_types = rss->hash_types; ++ rss_key = rss->key; ++ } + + rc = efx_rx_scale_context_alloc(sa->nic, + EFX_RX_SCALE_EXCLUSIVE, +@@ -1421,16 +1448,19 @@ sfc_flow_filter_insert(struct sfc_adapter *sa, + + rc = efx_rx_scale_mode_set(sa->nic, efs_rss_context, + rss->hash_alg, +- flow_rss->rss_hash_types, B_TRUE); ++ rss_hash_types, B_TRUE); + if (rc != 0) + goto fail_scale_mode_set; + + rc = efx_rx_scale_key_set(sa->nic, efs_rss_context, +- flow_rss->rss_key, +- sizeof(rss->key)); ++ rss_key, sizeof(rss->key)); + if (rc != 0) + goto fail_scale_key_set; ++ } else { ++ efs_rss_context = rss->dummy_rss_context; ++ } + ++ if (flow->rss || flow->spec.rss_hash_required) { + /* + * At this point, fully elaborated filter specifications + * have been produced from the template. To make sure that +@@ -1441,8 +1471,9 @@ sfc_flow_filter_insert(struct sfc_adapter *sa, + efx_filter_spec_t *spec = &flow->spec.filters[i]; + + spec->efs_rss_context = efs_rss_context; +- spec->efs_dmaq_id = flow_rss->rxq_hw_index_min; + spec->efs_flags |= EFX_FILTER_FLAG_RX_RSS; ++ if (flow->rss) ++ spec->efs_dmaq_id = flow_rss->rxq_hw_index_min; + } + } + +@@ -1450,7 +1481,12 @@ sfc_flow_filter_insert(struct sfc_adapter *sa, + if (rc != 0) + goto fail_filter_insert; + +- if (flow->rss) { ++ if (create_context) { ++ unsigned int dummy_tbl[RTE_DIM(flow_rss->rss_tbl)] = {0}; ++ unsigned int *tbl; ++ ++ tbl = flow->rss ? flow_rss->rss_tbl : dummy_tbl; ++ + /* + * Scale table is set after filter insertion because + * the table entries are relative to the base RxQ ID +@@ -1460,10 +1496,13 @@ sfc_flow_filter_insert(struct sfc_adapter *sa, + * the table entries, and the operation will succeed + */ + rc = efx_rx_scale_tbl_set(sa->nic, efs_rss_context, +- flow_rss->rss_tbl, +- RTE_DIM(flow_rss->rss_tbl)); ++ tbl, RTE_DIM(flow_rss->rss_tbl)); + if (rc != 0) + goto fail_scale_tbl_set; ++ ++ /* Remember created dummy RSS context */ ++ if (!flow->rss) ++ rss->dummy_rss_context = efs_rss_context; + } + + return 0; +@@ -1474,7 +1513,7 @@ sfc_flow_filter_insert(struct sfc_adapter *sa, + fail_filter_insert: + fail_scale_key_set: + fail_scale_mode_set: +- if (efs_rss_context != EFX_RSS_CONTEXT_DEFAULT) ++ if (create_context) + efx_rx_scale_context_free(sa->nic, efs_rss_context); + + fail_scale_context_alloc: +@@ -2473,12 +2512,19 @@ sfc_flow_fini(struct sfc_adapter *sa) + void + sfc_flow_stop(struct sfc_adapter *sa) + { ++ struct sfc_adapter_shared * const sas = sfc_sa2shared(sa); ++ struct sfc_rss *rss = &sas->rss; + struct rte_flow *flow; + + SFC_ASSERT(sfc_adapter_is_locked(sa)); + + TAILQ_FOREACH(flow, &sa->filter.flow_list, entries) + sfc_flow_filter_remove(sa, flow); ++ ++ if (rss->dummy_rss_context != EFX_RSS_CONTEXT_DEFAULT) { ++ efx_rx_scale_context_free(sa->nic, rss->dummy_rss_context); ++ rss->dummy_rss_context = EFX_RSS_CONTEXT_DEFAULT; ++ } + } + + int +diff --git a/dpdk/drivers/net/sfc/sfc_flow.h b/dpdk/drivers/net/sfc/sfc_flow.h +index 71ec18cb95..f59db0a468 100644 +--- a/dpdk/drivers/net/sfc/sfc_flow.h ++++ b/dpdk/drivers/net/sfc/sfc_flow.h +@@ -43,6 +43,8 @@ struct sfc_flow_spec { + efx_filter_spec_t filters[SF_FLOW_SPEC_NB_FILTERS_MAX]; + /* number of complete specifications */ + unsigned int count; ++ /* RSS hash toggle */ ++ boolean_t rss_hash_required; + }; + + /* PMD-specific definition of the opaque type from rte_flow.h */ diff --git a/dpdk/drivers/net/sfc/sfc_rx.c b/dpdk/drivers/net/sfc/sfc_rx.c -index 74218296cd..891709fd04 100644 +index 74218296cd..9a1c368328 100644 --- a/dpdk/drivers/net/sfc/sfc_rx.c +++ b/dpdk/drivers/net/sfc/sfc_rx.c @@ -719,6 +719,7 @@ sfc_rx_default_rxq_set_filter(struct sfc_adapter *sa, struct sfc_rxq *rxq) @@ -32206,7 +63011,33 @@ index 74218296cd..891709fd04 100644 fail_rx_qcreate: fail_bad_contig_block_size: -@@ -1403,7 +1407,7 @@ sfc_rx_process_adv_conf_rss(struct sfc_adapter *sa, +@@ -1135,6 +1139,13 @@ sfc_rx_qinit(struct sfc_adapter *sa, unsigned int sw_index, + rxq_info->refill_threshold = + RTE_MAX(rx_free_thresh, SFC_RX_REFILL_BULK); + rxq_info->refill_mb_pool = mb_pool; ++ ++ if (rss->hash_support == EFX_RX_HASH_AVAILABLE && rss->channels > 0 && ++ (offloads & DEV_RX_OFFLOAD_RSS_HASH)) ++ rxq_info->rxq_flags = SFC_RXQ_FLAG_RSS_HASH; ++ else ++ rxq_info->rxq_flags = 0; ++ + rxq->buf_size = buf_size; + + rc = sfc_dma_alloc(sa, "rxq", sw_index, +@@ -1150,10 +1161,7 @@ sfc_rx_qinit(struct sfc_adapter *sa, unsigned int sw_index, + info.buf_size = buf_size; + info.batch_max = encp->enc_rx_batch_max; + info.prefix_size = encp->enc_rx_prefix_size; +- +- if (rss->hash_support == EFX_RX_HASH_AVAILABLE && rss->channels > 0) +- info.flags |= SFC_RXQ_FLAG_RSS_HASH; +- ++ info.flags = rxq_info->rxq_flags; + info.rxq_entries = rxq_info->entries; + info.rxq_hw_ring = rxq->mem.esm_base; + info.evq_hw_index = sfc_evq_index_by_rxq_sw_index(sa, sw_index); +@@ -1403,7 +1411,7 @@ sfc_rx_process_adv_conf_rss(struct sfc_adapter *sa, if (conf->rss_key != NULL) { if (conf->rss_key_len != sizeof(rss->key)) { @@ -32215,6 +63046,315 @@ index 74218296cd..891709fd04 100644 sizeof(rss->key)); return EINVAL; } +@@ -1557,10 +1565,6 @@ sfc_rx_check_mode(struct sfc_adapter *sa, struct rte_eth_rxmode *rxmode) + rxmode->offloads |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM; + } + +- if ((offloads_supported & DEV_RX_OFFLOAD_RSS_HASH) && +- (rxmode->mq_mode & ETH_MQ_RX_RSS_FLAG)) +- rxmode->offloads |= DEV_RX_OFFLOAD_RSS_HASH; +- + return rc; + } + +diff --git a/dpdk/drivers/net/sfc/sfc_rx.h b/dpdk/drivers/net/sfc/sfc_rx.h +index 42b16e2ee6..4c31cfbaa2 100644 +--- a/dpdk/drivers/net/sfc/sfc_rx.h ++++ b/dpdk/drivers/net/sfc/sfc_rx.h +@@ -115,6 +115,7 @@ struct sfc_rxq_info { + boolean_t deferred_started; + unsigned int refill_threshold; + struct rte_mempool *refill_mb_pool; ++ unsigned int rxq_flags; + }; + + struct sfc_rxq_info *sfc_rxq_info_by_dp_rxq(const struct sfc_dp_rxq *dp_rxq); +diff --git a/dpdk/drivers/net/sfc/sfc_tso.c b/dpdk/drivers/net/sfc/sfc_tso.c +index 2e34fc0450..b1949004bb 100644 +--- a/dpdk/drivers/net/sfc/sfc_tso.c ++++ b/dpdk/drivers/net/sfc/sfc_tso.c +@@ -140,6 +140,13 @@ sfc_efx_tso_do(struct sfc_efx_txq *txq, unsigned int idx, + tsoh = rte_pktmbuf_mtod(m, uint8_t *); + } + ++ /* ++ * 8000-series EF10 hardware requires that innermost IP length ++ * be greater than or equal to the value which each segment is ++ * supposed to have; otherwise, TCP checksum will be incorrect. ++ */ ++ sfc_tso_innermost_ip_fix_len(m, tsoh, nh_off); ++ + /* + * Handle IP header. Tx prepare has debug-only checks that offload flags + * are correctly filled in in TSO mbuf. Use zero IPID if there is no +diff --git a/dpdk/drivers/net/sfc/sfc_tso.h b/dpdk/drivers/net/sfc/sfc_tso.h +index ef257519ac..3d1c3e46c3 100644 +--- a/dpdk/drivers/net/sfc/sfc_tso.h ++++ b/dpdk/drivers/net/sfc/sfc_tso.h +@@ -38,6 +38,36 @@ sfc_tso_ip4_get_ipid(const uint8_t *pkt_hdrp, size_t ip_hdr_off) + return rte_be_to_cpu_16(ipid); + } + ++static inline void ++sfc_tso_outer_udp_fix_len(const struct rte_mbuf *m, uint8_t *tsoh) ++{ ++ rte_be16_t len = rte_cpu_to_be_16(m->l2_len + m->l3_len + m->l4_len + ++ m->tso_segsz); ++ ++ rte_memcpy(tsoh + m->outer_l2_len + m->outer_l3_len + ++ offsetof(struct rte_udp_hdr, dgram_len), ++ &len, sizeof(len)); ++} ++ ++static inline void ++sfc_tso_innermost_ip_fix_len(const struct rte_mbuf *m, uint8_t *tsoh, ++ size_t iph_ofst) ++{ ++ size_t ip_payload_len = m->l4_len + m->tso_segsz; ++ size_t field_ofst; ++ rte_be16_t len; ++ ++ if (m->ol_flags & PKT_TX_IPV4) { ++ field_ofst = offsetof(struct rte_ipv4_hdr, total_length); ++ len = rte_cpu_to_be_16(m->l3_len + ip_payload_len); ++ } else { ++ field_ofst = offsetof(struct rte_ipv6_hdr, payload_len); ++ len = rte_cpu_to_be_16(ip_payload_len); ++ } ++ ++ rte_memcpy(tsoh + iph_ofst + field_ofst, &len, sizeof(len)); ++} ++ + unsigned int sfc_tso_prepare_header(uint8_t *tsoh, size_t header_len, + struct rte_mbuf **in_seg, size_t *in_off); + +diff --git a/dpdk/drivers/net/softnic/parser.c b/dpdk/drivers/net/softnic/parser.c +index dc15ec8aa2..ebcb10268a 100644 +--- a/dpdk/drivers/net/softnic/parser.c ++++ b/dpdk/drivers/net/softnic/parser.c +@@ -4,24 +4,6 @@ + * All rights reserved. + */ + +-/* For inet_pton4() and inet_pton6() functions: +- * +- * Copyright (c) 1996 by Internet Software Consortium. +- * +- * Permission to use, copy, modify, and distribute this software for any +- * purpose with or without fee is hereby granted, provided that the above +- * copyright notice and this permission notice appear in all copies. +- * +- * THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SOFTWARE CONSORTIUM DISCLAIMS +- * ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES +- * OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL INTERNET SOFTWARE +- * CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL +- * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR +- * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS +- * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS +- * SOFTWARE. +- */ +- + #include + #include + #include +@@ -33,6 +15,8 @@ + #include + #include + #include ++#include ++#include + + #include + +@@ -364,170 +348,6 @@ softnic_parse_mpls_labels(char *string, uint32_t *labels, uint32_t *n_labels) + return 0; + } + +-#define INADDRSZ 4 +-#define IN6ADDRSZ 16 +- +-/* int +- * inet_pton4(src, dst) +- * like inet_aton() but without all the hexadecimal and shorthand. +- * return: +- * 1 if `src' is a valid dotted quad, else 0. +- * notice: +- * does not touch `dst' unless it's returning 1. +- * author: +- * Paul Vixie, 1996. +- */ +-static int +-inet_pton4(const char *src, unsigned char *dst) +-{ +- static const char digits[] = "0123456789"; +- int saw_digit, octets, ch; +- unsigned char tmp[INADDRSZ], *tp; +- +- saw_digit = 0; +- octets = 0; +- *(tp = tmp) = 0; +- while ((ch = *src++) != '\0') { +- const char *pch; +- +- pch = strchr(digits, ch); +- if (pch != NULL) { +- unsigned int new = *tp * 10 + (pch - digits); +- +- if (new > 255) +- return 0; +- if (!saw_digit) { +- if (++octets > 4) +- return 0; +- saw_digit = 1; +- } +- *tp = (unsigned char)new; +- } else if (ch == '.' && saw_digit) { +- if (octets == 4) +- return 0; +- *++tp = 0; +- saw_digit = 0; +- } else +- return 0; +- } +- if (octets < 4) +- return 0; +- +- memcpy(dst, tmp, INADDRSZ); +- return 1; +-} +- +-/* int +- * inet_pton6(src, dst) +- * convert presentation level address to network order binary form. +- * return: +- * 1 if `src' is a valid [RFC1884 2.2] address, else 0. +- * notice: +- * (1) does not touch `dst' unless it's returning 1. +- * (2) :: in a full address is silently ignored. +- * credit: +- * inspired by Mark Andrews. +- * author: +- * Paul Vixie, 1996. +- */ +-static int +-inet_pton6(const char *src, unsigned char *dst) +-{ +- static const char xdigits_l[] = "0123456789abcdef", +- xdigits_u[] = "0123456789ABCDEF"; +- unsigned char tmp[IN6ADDRSZ], *tp = 0, *endp = 0, *colonp = 0; +- const char *xdigits = 0, *curtok = 0; +- int ch = 0, saw_xdigit = 0, count_xdigit = 0; +- unsigned int val = 0; +- unsigned int dbloct_count = 0; +- +- memset((tp = tmp), '\0', IN6ADDRSZ); +- endp = tp + IN6ADDRSZ; +- colonp = NULL; +- /* Leading :: requires some special handling. */ +- if (*src == ':') +- if (*++src != ':') +- return 0; +- curtok = src; +- saw_xdigit = count_xdigit = 0; +- val = 0; +- +- while ((ch = *src++) != '\0') { +- const char *pch; +- +- pch = strchr((xdigits = xdigits_l), ch); +- if (pch == NULL) +- pch = strchr((xdigits = xdigits_u), ch); +- if (pch != NULL) { +- if (count_xdigit >= 4) +- return 0; +- val <<= 4; +- val |= (pch - xdigits); +- if (val > 0xffff) +- return 0; +- saw_xdigit = 1; +- count_xdigit++; +- continue; +- } +- if (ch == ':') { +- curtok = src; +- if (!saw_xdigit) { +- if (colonp) +- return 0; +- colonp = tp; +- continue; +- } else if (*src == '\0') { +- return 0; +- } +- if (tp + sizeof(int16_t) > endp) +- return 0; +- *tp++ = (unsigned char)((val >> 8) & 0xff); +- *tp++ = (unsigned char)(val & 0xff); +- saw_xdigit = 0; +- count_xdigit = 0; +- val = 0; +- dbloct_count++; +- continue; +- } +- if (ch == '.' && ((tp + INADDRSZ) <= endp) && +- inet_pton4(curtok, tp) > 0) { +- tp += INADDRSZ; +- saw_xdigit = 0; +- dbloct_count += 2; +- break; /* '\0' was seen by inet_pton4(). */ +- } +- return 0; +- } +- if (saw_xdigit) { +- if (tp + sizeof(int16_t) > endp) +- return 0; +- *tp++ = (unsigned char)((val >> 8) & 0xff); +- *tp++ = (unsigned char)(val & 0xff); +- dbloct_count++; +- } +- if (colonp != NULL) { +- /* if we already have 8 double octets, having a colon means error */ +- if (dbloct_count == 8) +- return 0; +- +- /* Since some memmove()'s erroneously fail to handle +- * overlapping regions, we'll do the shift by hand. +- */ +- const int n = tp - colonp; +- int i; +- +- for (i = 1; i <= n; i++) { +- endp[-i] = colonp[n - i]; +- colonp[n - i] = 0; +- } +- tp = endp; +- } +- if (tp != endp) +- return 0; +- memcpy(dst, tmp, IN6ADDRSZ); +- return 1; +-} +- + static struct rte_ether_addr * + my_ether_aton(const char *a) + { +@@ -577,7 +397,7 @@ softnic_parse_ipv4_addr(const char *token, struct in_addr *ipv4) + if (strlen(token) >= INET_ADDRSTRLEN) + return -EINVAL; + +- if (inet_pton4(token, (unsigned char *)ipv4) != 1) ++ if (inet_pton(AF_INET, token, ipv4) != 1) + return -EINVAL; + + return 0; +@@ -589,7 +409,7 @@ softnic_parse_ipv6_addr(const char *token, struct in6_addr *ipv6) + if (strlen(token) >= INET6_ADDRSTRLEN) + return -EINVAL; + +- if (inet_pton6(token, (unsigned char *)ipv6) != 1) ++ if (inet_pton(AF_INET6, token, ipv6) != 1) + return -EINVAL; + + return 0; diff --git a/dpdk/drivers/net/softnic/rte_eth_softnic_thread.c b/dpdk/drivers/net/softnic/rte_eth_softnic_thread.c index d610b1617e..dcfb5eb82c 100644 --- a/dpdk/drivers/net/softnic/rte_eth_softnic_thread.c @@ -32383,8 +63523,21 @@ index d610b1617e..dcfb5eb82c 100644 /* Read response */ status = rsp->status; +diff --git a/dpdk/drivers/net/szedata2/meson.build b/dpdk/drivers/net/szedata2/meson.build +index b53fcbc591..77a5b0ed80 100644 +--- a/dpdk/drivers/net/szedata2/meson.build ++++ b/dpdk/drivers/net/szedata2/meson.build +@@ -1,7 +1,7 @@ + # SPDX-License-Identifier: BSD-3-Clause + # Copyright(c) 2018 Intel Corporation + +-dep = dependency('libsze2', required: false) ++dep = dependency('libsze2', required: false, method: 'pkg-config') + build = dep.found() + reason = 'missing dependency, "libsze2"' + ext_deps += dep diff --git a/dpdk/drivers/net/tap/rte_eth_tap.c b/dpdk/drivers/net/tap/rte_eth_tap.c -index a13d8d50d7..7081ae23e9 100644 +index a13d8d50d7..cfbd579cd6 100644 --- a/dpdk/drivers/net/tap/rte_eth_tap.c +++ b/dpdk/drivers/net/tap/rte_eth_tap.c @@ -18,8 +18,8 @@ @@ -32577,7 +63730,17 @@ index a13d8d50d7..7081ae23e9 100644 } if (process_private->txq_fds[i] != -1) { close(process_private->txq_fds[i]); -@@ -1054,10 +1089,10 @@ tap_rx_queue_release(void *queue) +@@ -1035,6 +1070,9 @@ tap_dev_close(struct rte_eth_dev *dev) + &internals->remote_initial_flags); + } + ++ rte_mempool_free(internals->gso_ctx_mp); ++ internals->gso_ctx_mp = NULL; ++ + if (internals->ka_fd != -1) { + close(internals->ka_fd); + internals->ka_fd = -1; +@@ -1054,10 +1092,10 @@ tap_rx_queue_release(void *queue) if (!rxq) return; process_private = rte_eth_devices[rxq->in_port].process_private; @@ -32590,7 +63753,7 @@ index a13d8d50d7..7081ae23e9 100644 rte_free(rxq->iovecs); rxq->pool = NULL; rxq->iovecs = NULL; -@@ -1074,7 +1109,7 @@ tap_tx_queue_release(void *queue) +@@ -1074,7 +1112,7 @@ tap_tx_queue_release(void *queue) return; process_private = rte_eth_devices[txq->out_port].process_private; @@ -32599,18 +63762,65 @@ index a13d8d50d7..7081ae23e9 100644 close(process_private->txq_fds[txq->queue_id]); process_private->txq_fds[txq->queue_id] = -1; } -@@ -1301,7 +1336,9 @@ tap_gso_ctx_setup(struct rte_gso_ctx *gso_ctx, struct rte_eth_dev *dev) +@@ -1282,33 +1320,40 @@ tap_gso_ctx_setup(struct rte_gso_ctx *gso_ctx, struct rte_eth_dev *dev) + { + uint32_t gso_types; + char pool_name[64]; +- +- /* +- * Create private mbuf pool with TAP_GSO_MBUF_SEG_SIZE bytes +- * size per mbuf use this pool for both direct and indirect mbufs +- */ +- +- struct rte_mempool *mp; /* Mempool for GSO packets */ ++ struct pmd_internals *pmd = dev->data->dev_private; ++ int ret; + + /* initialize GSO context */ + gso_types = DEV_TX_OFFLOAD_TCP_TSO; +- snprintf(pool_name, sizeof(pool_name), "mp_%s", dev->device->name); +- mp = rte_mempool_lookup((const char *)pool_name); +- if (!mp) { +- mp = rte_pktmbuf_pool_create(pool_name, TAP_GSO_MBUFS_NUM, +- TAP_GSO_MBUF_CACHE_SIZE, 0, ++ if (!pmd->gso_ctx_mp) { ++ /* ++ * Create private mbuf pool with TAP_GSO_MBUF_SEG_SIZE ++ * bytes size per mbuf use this pool for both direct and ++ * indirect mbufs ++ */ ++ ret = snprintf(pool_name, sizeof(pool_name), "mp_%s", ++ dev->device->name); ++ if (ret < 0 || ret >= (int)sizeof(pool_name)) { ++ TAP_LOG(ERR, ++ "%s: failed to create mbuf pool name for device %s," ++ "device name too long or output error, ret: %d\n", ++ pmd->name, dev->device->name, ret); ++ return -ENAMETOOLONG; ++ } ++ pmd->gso_ctx_mp = rte_pktmbuf_pool_create(pool_name, ++ TAP_GSO_MBUFS_NUM, TAP_GSO_MBUF_CACHE_SIZE, 0, + RTE_PKTMBUF_HEADROOM + TAP_GSO_MBUF_SEG_SIZE, SOCKET_ID_ANY); - if (!mp) { - struct pmd_internals *pmd = dev->data->dev_private; +- if (!mp) { +- struct pmd_internals *pmd = dev->data->dev_private; - RTE_LOG(DEBUG, PMD, "%s: failed to create mbuf pool for device %s\n", -+ ++ if (!pmd->gso_ctx_mp) { + TAP_LOG(ERR, + "%s: failed to create mbuf pool for device %s\n", pmd->name, dev->device->name); return -1; } -@@ -1465,7 +1502,7 @@ tap_rx_queue_setup(struct rte_eth_dev *dev, + } + +- gso_ctx->direct_pool = mp; +- gso_ctx->indirect_pool = mp; ++ gso_ctx->direct_pool = pmd->gso_ctx_mp; ++ gso_ctx->indirect_pool = pmd->gso_ctx_mp; + gso_ctx->gso_types = gso_types; + gso_ctx->gso_size = 0; /* gso_size is set in tx_burst() per packet */ + gso_ctx->flag = 0; +@@ -1465,7 +1510,7 @@ tap_rx_queue_setup(struct rte_eth_dev *dev, return 0; error: @@ -32619,7 +63829,7 @@ index a13d8d50d7..7081ae23e9 100644 rxq->pool = NULL; rte_free(rxq->iovecs); rxq->iovecs = NULL; -@@ -1563,13 +1600,12 @@ static int +@@ -1563,13 +1608,12 @@ static int tap_lsc_intr_handle_set(struct rte_eth_dev *dev, int set) { struct pmd_internals *pmd = dev->data->dev_private; @@ -32635,7 +63845,7 @@ index a13d8d50d7..7081ae23e9 100644 } return 0; } -@@ -1580,9 +1616,26 @@ tap_lsc_intr_handle_set(struct rte_eth_dev *dev, int set) +@@ -1580,9 +1624,26 @@ tap_lsc_intr_handle_set(struct rte_eth_dev *dev, int set) return rte_intr_callback_register( &pmd->intr_handle, tap_dev_intr_handler, dev); } @@ -32664,7 +63874,7 @@ index a13d8d50d7..7081ae23e9 100644 } static int -@@ -1591,8 +1644,11 @@ tap_intr_handle_set(struct rte_eth_dev *dev, int set) +@@ -1591,8 +1652,11 @@ tap_intr_handle_set(struct rte_eth_dev *dev, int set) int err; err = tap_lsc_intr_handle_set(dev, set); @@ -32677,16 +63887,17 @@ index a13d8d50d7..7081ae23e9 100644 err = tap_rx_intr_vec_set(dev, set); if (err && set) tap_lsc_intr_handle_set(dev, 0); -@@ -1784,6 +1840,8 @@ eth_dev_tap_create(struct rte_vdev_device *vdev, const char *tap_name, +@@ -1784,6 +1848,9 @@ eth_dev_tap_create(struct rte_vdev_device *vdev, const char *tap_name, pmd->dev = dev; strlcpy(pmd->name, tap_name, sizeof(pmd->name)); pmd->type = type; + pmd->ka_fd = -1; + pmd->nlsk_fd = -1; ++ pmd->gso_ctx_mp = NULL; pmd->ioctl_sock = socket(AF_INET, SOCK_DGRAM, 0); if (pmd->ioctl_sock == -1) { -@@ -1814,7 +1872,6 @@ eth_dev_tap_create(struct rte_vdev_device *vdev, const char *tap_name, +@@ -1814,7 +1881,6 @@ eth_dev_tap_create(struct rte_vdev_device *vdev, const char *tap_name, dev->intr_handle = &pmd->intr_handle; /* Presetup the fds to -1 as being not valid */ @@ -32694,7 +63905,7 @@ index a13d8d50d7..7081ae23e9 100644 for (i = 0; i < RTE_PMD_TAP_MAX_QUEUES; i++) { process_private->rxq_fds[i] = -1; process_private->txq_fds[i] = -1; -@@ -1954,7 +2011,11 @@ eth_dev_tap_create(struct rte_vdev_device *vdev, const char *tap_name, +@@ -1954,7 +2020,11 @@ eth_dev_tap_create(struct rte_vdev_device *vdev, const char *tap_name, tap_flow_implicit_flush(pmd, NULL); error_exit: @@ -32707,7 +63918,7 @@ index a13d8d50d7..7081ae23e9 100644 close(pmd->ioctl_sock); /* mac_addrs must not be freed alone because part of dev_private */ dev->data->mac_addrs = NULL; -@@ -2386,8 +2447,6 @@ rte_pmd_tap_remove(struct rte_vdev_device *dev) +@@ -2386,8 +2456,6 @@ rte_pmd_tap_remove(struct rte_vdev_device *dev) { struct rte_eth_dev *eth_dev = NULL; struct pmd_internals *internals; @@ -32716,7 +63927,7 @@ index a13d8d50d7..7081ae23e9 100644 /* find the ethdev entry */ eth_dev = rte_eth_dev_allocated(rte_vdev_device_name(dev)); -@@ -2400,28 +2459,12 @@ rte_pmd_tap_remove(struct rte_vdev_device *dev) +@@ -2400,28 +2468,12 @@ rte_pmd_tap_remove(struct rte_vdev_device *dev) if (rte_eal_process_type() != RTE_PROC_PRIMARY) return rte_eth_dev_release_port(eth_dev); @@ -32747,7 +63958,7 @@ index a13d8d50d7..7081ae23e9 100644 close(internals->ioctl_sock); rte_free(eth_dev->process_private); if (tap_devices_count == 1) -@@ -2429,10 +2472,6 @@ rte_pmd_tap_remove(struct rte_vdev_device *dev) +@@ -2429,10 +2481,6 @@ rte_pmd_tap_remove(struct rte_vdev_device *dev) tap_devices_count--; rte_eth_dev_release_port(eth_dev); @@ -32758,6 +63969,18 @@ index a13d8d50d7..7081ae23e9 100644 return 0; } +diff --git a/dpdk/drivers/net/tap/rte_eth_tap.h b/dpdk/drivers/net/tap/rte_eth_tap.h +index 8d6d53dc0a..ba45de8409 100644 +--- a/dpdk/drivers/net/tap/rte_eth_tap.h ++++ b/dpdk/drivers/net/tap/rte_eth_tap.h +@@ -91,6 +91,7 @@ struct pmd_internals { + struct tx_queue txq[RTE_PMD_TAP_MAX_QUEUES]; /* List of TX queues */ + struct rte_intr_handle intr_handle; /* LSC interrupt handle. */ + int ka_fd; /* keep-alive file descriptor */ ++ struct rte_mempool *gso_ctx_mp; /* Mempool for GSO packets */ + }; + + struct pmd_process_private { diff --git a/dpdk/drivers/net/tap/tap_flow.c b/dpdk/drivers/net/tap/tap_flow.c index 9d90361d99..1538349e9c 100644 --- a/dpdk/drivers/net/tap/tap_flow.c @@ -32819,10 +64042,31 @@ index 7af0010e37..5cf4f173a0 100644 /* Use invalid intr_vec[] index to disable entry. */ intr_handle->intr_vec[i] = RTE_INTR_VEC_RXTX_OFFSET + +diff --git a/dpdk/drivers/net/thunderx/base/nicvf_hw_defs.h b/dpdk/drivers/net/thunderx/base/nicvf_hw_defs.h +index b12c8ec50a..adc8ec943d 100644 +--- a/dpdk/drivers/net/thunderx/base/nicvf_hw_defs.h ++++ b/dpdk/drivers/net/thunderx/base/nicvf_hw_defs.h +@@ -176,6 +176,7 @@ + #define NIC_HW_MAX_MTU (9190) + #define NIC_HW_MAX_FRS (NIC_HW_MAX_MTU + NIC_HW_L2_OVERHEAD) + #define NIC_HW_MAX_SEGS (12) ++#define NIC_HW_L2_MAX_LEN (RTE_ETHER_MTU + NIC_HW_L2_OVERHEAD) + + /* Descriptor alignments */ + #define NICVF_RBDR_BASE_ALIGN_BYTES (128) /* 7 bits */ diff --git a/dpdk/drivers/net/thunderx/nicvf_ethdev.c b/dpdk/drivers/net/thunderx/nicvf_ethdev.c -index 2cf0ffe13b..26191586f7 100644 +index 2cf0ffe13b..0fba26ac45 100644 --- a/dpdk/drivers/net/thunderx/nicvf_ethdev.c +++ b/dpdk/drivers/net/thunderx/nicvf_ethdev.c +@@ -191,7 +191,7 @@ nicvf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) + (frame_size + 2 * VLAN_TAG_SIZE > buffsz * NIC_HW_MAX_SEGS)) + return -EINVAL; + +- if (frame_size > RTE_ETHER_MAX_LEN) ++ if (frame_size > NIC_HW_L2_MAX_LEN) + rxmode->offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME; + else + rxmode->offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME; @@ -496,9 +496,10 @@ nicvf_dev_reta_query(struct rte_eth_dev *dev, int ret, i, j; @@ -32861,11 +64105,89 @@ index 2cf0ffe13b..26191586f7 100644 return -EINVAL; } +@@ -652,6 +653,7 @@ nicvf_qset_rbdr_alloc(struct rte_eth_dev *dev, struct nicvf *nic, + NICVF_RBDR_BASE_ALIGN_BYTES, nic->node); + if (rz == NULL) { + PMD_INIT_LOG(ERR, "Failed to allocate mem for rbdr desc ring"); ++ rte_free(rbdr); + return -ENOMEM; + } + +diff --git a/dpdk/drivers/net/vdev_netvsc/vdev_netvsc.c b/dpdk/drivers/net/vdev_netvsc/vdev_netvsc.c +index be8f19c0c6..48081b9146 100644 +--- a/dpdk/drivers/net/vdev_netvsc/vdev_netvsc.c ++++ b/dpdk/drivers/net/vdev_netvsc/vdev_netvsc.c +@@ -673,6 +673,7 @@ vdev_netvsc_vdev_probe(struct rte_vdev_device *dev) + int ret; + + DRV_LOG(DEBUG, "invoked as \"%s\", using arguments \"%s\"", name, args); ++ rte_eal_alarm_cancel(vdev_netvsc_alarm, NULL); + if (!kvargs) { + DRV_LOG(ERR, "cannot parse arguments list"); + goto error; +@@ -688,17 +689,13 @@ vdev_netvsc_vdev_probe(struct rte_vdev_device *dev) + !strcmp(pair->key, VDEV_NETVSC_ARG_MAC)) + ++specified; + } +- if (ignore) { +- if (kvargs) +- rte_kvargs_free(kvargs); +- return 0; +- } ++ if (ignore) ++ goto ignore; + if (specified > 1) { + DRV_LOG(ERR, "More than one way used to specify the netvsc" + " device."); + goto error; + } +- rte_eal_alarm_cancel(vdev_netvsc_alarm, NULL); + /* Gather interfaces. */ + ret = vdev_netvsc_foreach_iface(vdev_netvsc_netvsc_probe, 1, name, + kvargs, specified, &matched); +@@ -719,17 +716,19 @@ vdev_netvsc_vdev_probe(struct rte_vdev_device *dev) + } + DRV_LOG(WARNING, "non-netvsc device was probed as netvsc"); + } +- ret = rte_eal_alarm_set(VDEV_NETVSC_PROBE_MS * 1000, +- vdev_netvsc_alarm, NULL); +- if (ret < 0) { +- DRV_LOG(ERR, "unable to schedule alarm callback: %s", +- rte_strerror(-ret)); +- goto error; +- } + error: ++ ++vdev_netvsc_ctx_inst; ++ignore: + if (kvargs) + rte_kvargs_free(kvargs); +- ++vdev_netvsc_ctx_inst; ++ /* Reset alarm if there are device context created */ ++ if (vdev_netvsc_ctx_count) { ++ ret = rte_eal_alarm_set(VDEV_NETVSC_PROBE_MS * 1000, ++ vdev_netvsc_alarm, NULL); ++ if (ret < 0) ++ DRV_LOG(ERR, "unable to schedule alarm callback: %s", ++ rte_strerror(-ret)); ++ } + return 0; + } + diff --git a/dpdk/drivers/net/vhost/rte_eth_vhost.c b/dpdk/drivers/net/vhost/rte_eth_vhost.c -index 46f01a7f46..85f91f0b9d 100644 +index 46f01a7f46..323efb3c07 100644 --- a/dpdk/drivers/net/vhost/rte_eth_vhost.c +++ b/dpdk/drivers/net/vhost/rte_eth_vhost.c -@@ -97,6 +97,8 @@ struct pmd_internal { +@@ -68,6 +68,9 @@ enum vhost_xstats_pkts { + VHOST_BROADCAST_PKT, + VHOST_MULTICAST_PKT, + VHOST_UNICAST_PKT, ++ VHOST_PKT, ++ VHOST_BYTE, ++ VHOST_MISSED_PKT, + VHOST_ERRORS_PKT, + VHOST_ERRORS_FRAGMENTED, + VHOST_ERRORS_JABBER, +@@ -97,6 +100,8 @@ struct pmd_internal { rte_atomic32_t dev_attached; char *dev_name; char *iface_name; @@ -32874,6 +64196,172 @@ index 46f01a7f46..85f91f0b9d 100644 uint16_t max_queues; int vid; rte_atomic32_t started; +@@ -141,11 +146,11 @@ struct vhost_xstats_name_off { + /* [rx]_is prepended to the name string here */ + static const struct vhost_xstats_name_off vhost_rxport_stat_strings[] = { + {"good_packets", +- offsetof(struct vhost_queue, stats.pkts)}, ++ offsetof(struct vhost_queue, stats.xstats[VHOST_PKT])}, + {"total_bytes", +- offsetof(struct vhost_queue, stats.bytes)}, ++ offsetof(struct vhost_queue, stats.xstats[VHOST_BYTE])}, + {"missed_pkts", +- offsetof(struct vhost_queue, stats.missed_pkts)}, ++ offsetof(struct vhost_queue, stats.xstats[VHOST_MISSED_PKT])}, + {"broadcast_packets", + offsetof(struct vhost_queue, stats.xstats[VHOST_BROADCAST_PKT])}, + {"multicast_packets", +@@ -181,11 +186,11 @@ static const struct vhost_xstats_name_off vhost_rxport_stat_strings[] = { + /* [tx]_ is prepended to the name string here */ + static const struct vhost_xstats_name_off vhost_txport_stat_strings[] = { + {"good_packets", +- offsetof(struct vhost_queue, stats.pkts)}, ++ offsetof(struct vhost_queue, stats.xstats[VHOST_PKT])}, + {"total_bytes", +- offsetof(struct vhost_queue, stats.bytes)}, ++ offsetof(struct vhost_queue, stats.xstats[VHOST_BYTE])}, + {"missed_pkts", +- offsetof(struct vhost_queue, stats.missed_pkts)}, ++ offsetof(struct vhost_queue, stats.xstats[VHOST_MISSED_PKT])}, + {"broadcast_packets", + offsetof(struct vhost_queue, stats.xstats[VHOST_BROADCAST_PKT])}, + {"multicast_packets", +@@ -279,23 +284,6 @@ vhost_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, + if (n < nxstats) + return nxstats; + +- for (i = 0; i < dev->data->nb_rx_queues; i++) { +- vq = dev->data->rx_queues[i]; +- if (!vq) +- continue; +- vq->stats.xstats[VHOST_UNICAST_PKT] = vq->stats.pkts +- - (vq->stats.xstats[VHOST_BROADCAST_PKT] +- + vq->stats.xstats[VHOST_MULTICAST_PKT]); +- } +- for (i = 0; i < dev->data->nb_tx_queues; i++) { +- vq = dev->data->tx_queues[i]; +- if (!vq) +- continue; +- vq->stats.xstats[VHOST_UNICAST_PKT] = vq->stats.pkts +- + vq->stats.missed_pkts +- - (vq->stats.xstats[VHOST_BROADCAST_PKT] +- + vq->stats.xstats[VHOST_MULTICAST_PKT]); +- } + for (t = 0; t < VHOST_NB_XSTATS_RXPORT; t++) { + xstats[count].value = 0; + for (i = 0; i < dev->data->nb_rx_queues; i++) { +@@ -326,7 +314,7 @@ vhost_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, + } + + static inline void +-vhost_count_multicast_broadcast(struct vhost_queue *vq, ++vhost_count_xcast_packets(struct vhost_queue *vq, + struct rte_mbuf *mbuf) + { + struct rte_ether_addr *ea = NULL; +@@ -338,20 +326,27 @@ vhost_count_multicast_broadcast(struct vhost_queue *vq, + pstats->xstats[VHOST_BROADCAST_PKT]++; + else + pstats->xstats[VHOST_MULTICAST_PKT]++; ++ } else { ++ pstats->xstats[VHOST_UNICAST_PKT]++; + } + } + + static void +-vhost_update_packet_xstats(struct vhost_queue *vq, +- struct rte_mbuf **bufs, +- uint16_t count) ++vhost_update_packet_xstats(struct vhost_queue *vq, struct rte_mbuf **bufs, ++ uint16_t count, uint64_t nb_bytes, ++ uint64_t nb_missed) + { + uint32_t pkt_len = 0; + uint64_t i = 0; + uint64_t index; + struct vhost_stats *pstats = &vq->stats; + ++ pstats->xstats[VHOST_BYTE] += nb_bytes; ++ pstats->xstats[VHOST_MISSED_PKT] += nb_missed; ++ pstats->xstats[VHOST_UNICAST_PKT] += nb_missed; ++ + for (i = 0; i < count ; i++) { ++ pstats->xstats[VHOST_PKT]++; + pkt_len = bufs[i]->pkt_len; + if (pkt_len == 64) { + pstats->xstats[VHOST_64_PKT]++; +@@ -367,7 +362,7 @@ vhost_update_packet_xstats(struct vhost_queue *vq, + else if (pkt_len > 1522) + pstats->xstats[VHOST_1523_TO_MAX_PKT]++; + } +- vhost_count_multicast_broadcast(vq, bufs[i]); ++ vhost_count_xcast_packets(vq, bufs[i]); + } + } + +@@ -377,6 +372,7 @@ eth_vhost_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs) + struct vhost_queue *r = q; + uint16_t i, nb_rx = 0; + uint16_t nb_receive = nb_bufs; ++ uint64_t nb_bytes = 0; + + if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0)) + return 0; +@@ -411,10 +407,11 @@ eth_vhost_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs) + if (r->internal->vlan_strip) + rte_vlan_strip(bufs[i]); + +- r->stats.bytes += bufs[i]->pkt_len; ++ nb_bytes += bufs[i]->pkt_len; + } + +- vhost_update_packet_xstats(r, bufs, nb_rx); ++ r->stats.bytes += nb_bytes; ++ vhost_update_packet_xstats(r, bufs, nb_rx, nb_bytes, 0); + + out: + rte_atomic32_set(&r->while_queuing, 0); +@@ -428,6 +425,8 @@ eth_vhost_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs) + struct vhost_queue *r = q; + uint16_t i, nb_tx = 0; + uint16_t nb_send = 0; ++ uint64_t nb_bytes = 0; ++ uint64_t nb_missed = 0; + + if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0)) + return 0; +@@ -468,20 +467,23 @@ eth_vhost_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs) + break; + } + ++ for (i = 0; likely(i < nb_tx); i++) ++ nb_bytes += bufs[i]->pkt_len; ++ ++ nb_missed = nb_bufs - nb_tx; ++ + r->stats.pkts += nb_tx; ++ r->stats.bytes += nb_bytes; + r->stats.missed_pkts += nb_bufs - nb_tx; + +- for (i = 0; likely(i < nb_tx); i++) +- r->stats.bytes += bufs[i]->pkt_len; +- +- vhost_update_packet_xstats(r, bufs, nb_tx); ++ vhost_update_packet_xstats(r, bufs, nb_tx, nb_bytes, nb_missed); + +- /* According to RFC2863 page42 section ifHCOutMulticastPkts and +- * ifHCOutBroadcastPkts, the counters "multicast" and "broadcast" +- * are increased when packets are not transmitted successfully. ++ /* According to RFC2863, ifHCOutUcastPkts, ifHCOutMulticastPkts and ++ * ifHCOutBroadcastPkts counters are increased when packets are not ++ * transmitted successfully. + */ + for (i = nb_tx; i < nb_bufs; i++) +- vhost_count_multicast_broadcast(r, bufs[i]); ++ vhost_count_xcast_packets(r, bufs[i]); + + for (i = 0; likely(i < nb_tx); i++) + rte_pktmbuf_free(bufs[i]); @@ -491,17 +493,6 @@ eth_vhost_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs) return nb_tx; } @@ -33149,7 +64637,7 @@ index 46f01a7f46..85f91f0b9d 100644 out_free: rte_kvargs_free(kvlist); diff --git a/dpdk/drivers/net/virtio/virtio_ethdev.c b/dpdk/drivers/net/virtio/virtio_ethdev.c -index 044eb10a70..35203940a7 100644 +index 044eb10a70..8a107ebf9e 100644 --- a/dpdk/drivers/net/virtio/virtio_ethdev.c +++ b/dpdk/drivers/net/virtio/virtio_ethdev.c @@ -466,7 +466,7 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t vtpci_queue_idx) @@ -33172,7 +64660,36 @@ index 044eb10a70..35203940a7 100644 * VIRTIO_MBUF_DATA_DMA_ADDR in virtqueue.h for more information. */ if (!hw->virtio_user_dev) -@@ -1913,6 +1913,8 @@ eth_virtio_dev_init(struct rte_eth_dev *eth_dev) +@@ -610,10 +610,9 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t vtpci_queue_idx) + txr = hdr_mz->addr; + memset(txr, 0, vq_size * sizeof(*txr)); + for (i = 0; i < vq_size; i++) { +- struct vring_desc *start_dp = txr[i].tx_indir; +- + /* first indirect descriptor is always the tx header */ + if (!vtpci_packed_queue(hw)) { ++ struct vring_desc *start_dp = txr[i].tx_indir; + vring_desc_init_split(start_dp, + RTE_DIM(txr[i].tx_indir)); + start_dp->addr = txvq->virtio_net_hdr_mem +@@ -622,6 +621,16 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t vtpci_queue_idx) + tx_hdr); + start_dp->len = hw->vtnet_hdr_size; + start_dp->flags = VRING_DESC_F_NEXT; ++ } else { ++ struct vring_packed_desc *start_dp = ++ txr[i].tx_packed_indir; ++ vring_desc_init_indirect_packed(start_dp, ++ RTE_DIM(txr[i].tx_packed_indir)); ++ start_dp->addr = txvq->virtio_net_hdr_mem ++ + i * sizeof(*txr) ++ + offsetof(struct virtio_tx_region, ++ tx_hdr); ++ start_dp->len = hw->vtnet_hdr_size; + } + } + } +@@ -1913,6 +1922,8 @@ eth_virtio_dev_init(struct rte_eth_dev *eth_dev) goto err_vtpci_init; } @@ -33181,7 +64698,7 @@ index 044eb10a70..35203940a7 100644 /* reset device and negotiate default features */ ret = virtio_init_device(eth_dev, VIRTIO_PMD_DEFAULT_GUEST_FEATURES); if (ret < 0) -@@ -2155,8 +2157,6 @@ virtio_dev_configure(struct rte_eth_dev *dev) +@@ -2155,8 +2166,6 @@ virtio_dev_configure(struct rte_eth_dev *dev) return -EBUSY; } @@ -33191,7 +64708,7 @@ index 044eb10a70..35203940a7 100644 if (vtpci_with_feature(hw, VIRTIO_F_IN_ORDER)) { diff --git a/dpdk/drivers/net/virtio/virtio_rxtx.c b/dpdk/drivers/net/virtio/virtio_rxtx.c -index 752faa0f6e..060410577a 100644 +index 752faa0f6e..5211736d29 100644 --- a/dpdk/drivers/net/virtio/virtio_rxtx.c +++ b/dpdk/drivers/net/virtio/virtio_rxtx.c @@ -1085,7 +1085,7 @@ virtio_dev_tx_queue_setup(struct rte_eth_dev *dev, @@ -33221,6 +64738,19 @@ index 752faa0f6e..060410577a 100644 rte_pktmbuf_free(m); } } +@@ -1184,9 +1184,10 @@ virtio_rx_offload(struct rte_mbuf *m, struct virtio_net_hdr *hdr) + */ + uint16_t csum = 0, off; + +- rte_raw_cksum_mbuf(m, hdr->csum_start, ++ if (rte_raw_cksum_mbuf(m, hdr->csum_start, + rte_pktmbuf_pkt_len(m) - hdr->csum_start, +- &csum); ++ &csum) < 0) ++ return -EINVAL; + if (likely(csum != 0xffff)) + csum = ~csum; + off = hdr->csum_offset + hdr->csum_start; diff --git a/dpdk/drivers/net/virtio/virtio_rxtx_simple_altivec.c b/dpdk/drivers/net/virtio/virtio_rxtx_simple_altivec.c index 47225f4121..003b6ec3f6 100644 --- a/dpdk/drivers/net/virtio/virtio_rxtx_simple_altivec.c @@ -33298,7 +64828,7 @@ index 5c81e8dd9f..2c805077af 100644 } diff --git a/dpdk/drivers/net/virtio/virtio_user/vhost_kernel_tap.c b/dpdk/drivers/net/virtio/virtio_user/vhost_kernel_tap.c -index 76bf75423e..2fa4f0d661 100644 +index 76bf75423e..79b8446f8e 100644 --- a/dpdk/drivers/net/virtio/virtio_user/vhost_kernel_tap.c +++ b/dpdk/drivers/net/virtio/virtio_user/vhost_kernel_tap.c @@ -18,7 +18,7 @@ @@ -33322,7 +64852,7 @@ index 76bf75423e..2fa4f0d661 100644 - } + /* Check if our kernel supports TUNSETOFFLOAD */ + if (ioctl(fd, TUNSETOFFLOAD, 0) != 0 && errno == EINVAL) { -+ PMD_DRV_LOG(ERR, "Kernel does't support TUNSETOFFLOAD\n"); ++ PMD_DRV_LOG(ERR, "Kernel doesn't support TUNSETOFFLOAD\n"); + return -ENOTSUP; + } @@ -33365,7 +64895,19 @@ index 76bf75423e..2fa4f0d661 100644 /* TODO: * 1. verify we can get/set vnet_hdr_len, tap_probe_vnet_hdr_len -@@ -131,7 +140,9 @@ vhost_kernel_open_tap(char **p_ifname, int hdr_size, int req_mq, +@@ -119,7 +128,10 @@ vhost_kernel_open_tap(char **p_ifname, int hdr_size, int req_mq, + goto error; + } + +- fcntl(tapfd, F_SETFL, O_NONBLOCK); ++ if (fcntl(tapfd, F_SETFL, O_NONBLOCK) < 0) { ++ PMD_DRV_LOG(ERR, "fcntl tapfd failed: %s", strerror(errno)); ++ goto error; ++ } + + if (ioctl(tapfd, TUNSETVNETHDRSZ, &hdr_size) < 0) { + PMD_DRV_LOG(ERR, "TUNSETVNETHDRSZ failed: %s", strerror(errno)); +@@ -131,7 +143,9 @@ vhost_kernel_open_tap(char **p_ifname, int hdr_size, int req_mq, goto error; } @@ -33422,10 +64964,27 @@ index a4b5c25cd3..d8e083ba8b 100644 } diff --git a/dpdk/drivers/net/virtio/virtio_user/virtio_user_dev.c b/dpdk/drivers/net/virtio/virtio_user/virtio_user_dev.c -index ea016e85d8..1c6b26f8d3 100644 +index ea016e85d8..ad5d2f3bf8 100644 --- a/dpdk/drivers/net/virtio/virtio_user/virtio_user_dev.c +++ b/dpdk/drivers/net/virtio/virtio_user/virtio_user_dev.c -@@ -537,7 +537,8 @@ virtio_user_dev_uninit(struct virtio_user_dev *dev) +@@ -263,6 +263,7 @@ virtio_user_dev_init_notify(struct virtio_user_dev *dev) + } + kickfd = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK); + if (kickfd < 0) { ++ close(callfd); + PMD_DRV_LOG(ERR, "kickfd error, %s", strerror(errno)); + break; + } +@@ -271,7 +272,7 @@ virtio_user_dev_init_notify(struct virtio_user_dev *dev) + } + + if (i < VIRTIO_MAX_VIRTQUEUES) { +- for (j = 0; j <= i; ++j) { ++ for (j = 0; j < i; ++j) { + close(dev->callfds[j]); + close(dev->kickfds[j]); + } +@@ -537,7 +538,8 @@ virtio_user_dev_uninit(struct virtio_user_dev *dev) close(dev->kickfds[i]); } @@ -33435,7 +64994,7 @@ index ea016e85d8..1c6b26f8d3 100644 if (dev->is_server && dev->listenfd >= 0) { close(dev->listenfd); -@@ -545,8 +546,11 @@ virtio_user_dev_uninit(struct virtio_user_dev *dev) +@@ -545,8 +547,11 @@ virtio_user_dev_uninit(struct virtio_user_dev *dev) } if (dev->vhostfds) { @@ -33449,10 +65008,18 @@ index ea016e85d8..1c6b26f8d3 100644 free(dev->tapfds); } diff --git a/dpdk/drivers/net/virtio/virtio_user/virtio_user_dev.h b/dpdk/drivers/net/virtio/virtio_user/virtio_user_dev.h -index ad86837717..3b6b6065a5 100644 +index ad86837717..8937124d92 100644 --- a/dpdk/drivers/net/virtio/virtio_user/virtio_user_dev.h +++ b/dpdk/drivers/net/virtio/virtio_user/virtio_user_dev.h -@@ -49,6 +49,7 @@ struct virtio_user_dev { +@@ -41,6 +41,7 @@ struct virtio_user_dev { + uint64_t frontend_features; /* enabled frontend features */ + uint64_t unsupported_features; /* unsupported features mask */ + uint8_t status; ++ uint16_t net_status; + uint16_t port_id; + uint8_t mac_addr[RTE_ETHER_ADDR_LEN]; + char path[PATH_MAX]; +@@ -49,6 +50,7 @@ struct virtio_user_dev { struct vring_packed packed_vrings[VIRTIO_MAX_VIRTQUEUES]; }; struct virtio_user_queue packed_queues[VIRTIO_MAX_VIRTQUEUES]; @@ -33461,7 +65028,7 @@ index ad86837717..3b6b6065a5 100644 struct virtio_user_backend_ops *ops; pthread_mutex_t mutex; diff --git a/dpdk/drivers/net/virtio/virtio_user_ethdev.c b/dpdk/drivers/net/virtio/virtio_user_ethdev.c -index 3fc1725736..e2cbd2478d 100644 +index 3fc1725736..4a35fa1c95 100644 --- a/dpdk/drivers/net/virtio/virtio_user_ethdev.c +++ b/dpdk/drivers/net/virtio/virtio_user_ethdev.c @@ -13,6 +13,7 @@ @@ -33536,6 +65103,40 @@ index 3fc1725736..e2cbd2478d 100644 ret = virtio_user_start_device(dev); if (ret < 0) return -1; +@@ -140,7 +185,7 @@ virtio_user_read_dev_config(struct virtio_hw *hw, size_t offset, + } + r = recv(dev->vhostfd, buf, 128, MSG_PEEK); + if (r == 0 || (r < 0 && errno != EAGAIN)) { +- dev->status &= (~VIRTIO_NET_S_LINK_UP); ++ dev->net_status &= (~VIRTIO_NET_S_LINK_UP); + PMD_DRV_LOG(ERR, "virtio-user port %u is down", + hw->port_id); + +@@ -152,7 +197,7 @@ virtio_user_read_dev_config(struct virtio_hw *hw, size_t offset, + virtio_user_delayed_handler, + (void *)hw); + } else { +- dev->status |= VIRTIO_NET_S_LINK_UP; ++ dev->net_status |= VIRTIO_NET_S_LINK_UP; + } + if (fcntl(dev->vhostfd, F_SETFL, + flags & ~O_NONBLOCK) == -1) { +@@ -160,12 +205,12 @@ virtio_user_read_dev_config(struct virtio_hw *hw, size_t offset, + return; + } + } else if (dev->is_server) { +- dev->status &= (~VIRTIO_NET_S_LINK_UP); ++ dev->net_status &= (~VIRTIO_NET_S_LINK_UP); + if (virtio_user_server_reconnect(dev) >= 0) +- dev->status |= VIRTIO_NET_S_LINK_UP; ++ dev->net_status |= VIRTIO_NET_S_LINK_UP; + } + +- *(uint16_t *)dst = dev->status; ++ *(uint16_t *)dst = dev->net_status; + } + + if (offset == offsetof(struct virtio_net_config, max_virtqueue_pairs)) @@ -433,12 +478,17 @@ static int get_integer_arg(const char *key __rte_unused, const char *value, void *extra_args) @@ -33658,10 +65259,41 @@ index 5ff1e3587e..02c8b9fc54 100644 + return 0; +} diff --git a/dpdk/drivers/net/virtio/virtqueue.h b/dpdk/drivers/net/virtio/virtqueue.h -index 8d7f197b13..58ad7309ae 100644 +index 8d7f197b13..e901249601 100644 --- a/dpdk/drivers/net/virtio/virtqueue.h +++ b/dpdk/drivers/net/virtio/virtqueue.h -@@ -443,6 +443,10 @@ struct rte_mbuf *virtqueue_detach_unused(struct virtqueue *vq); +@@ -324,8 +324,11 @@ struct virtio_net_hdr_mrg_rxbuf { + #define VIRTIO_MAX_TX_INDIRECT 8 + struct virtio_tx_region { + struct virtio_net_hdr_mrg_rxbuf tx_hdr; +- struct vring_desc tx_indir[VIRTIO_MAX_TX_INDIRECT] +- __attribute__((__aligned__(16))); ++ union { ++ struct vring_desc tx_indir[VIRTIO_MAX_TX_INDIRECT]; ++ struct vring_packed_desc ++ tx_packed_indir[VIRTIO_MAX_TX_INDIRECT]; ++ } __attribute__((__aligned__(16))); + }; + + static inline int +@@ -363,6 +366,16 @@ vring_desc_init_split(struct vring_desc *dp, uint16_t n) + dp[i].next = VQ_RING_DESC_CHAIN_END; + } + ++static inline void ++vring_desc_init_indirect_packed(struct vring_packed_desc *dp, int n) ++{ ++ int i; ++ for (i = 0; i < n; i++) { ++ dp[i].id = (uint16_t)i; ++ dp[i].flags = VRING_DESC_F_WRITE; ++ } ++} ++ + /** + * Tell the backend not to interrupt us. Implementation for packed virtqueues. + */ +@@ -443,6 +456,10 @@ struct rte_mbuf *virtqueue_detach_unused(struct virtqueue *vq); /* Flush the elements in the used ring. */ void virtqueue_rxvq_flush(struct virtqueue *vq); @@ -33739,19 +65371,309 @@ index 7794d74214..73e270f30f 100644 rss_hf = port_rss_conf->rss_hf & (VMXNET3_V4_RSS_MASK | VMXNET3_RSS_OFFLOAD_ALL); +diff --git a/dpdk/drivers/raw/dpaa2_qdma/dpaa2_qdma.c b/dpdk/drivers/raw/dpaa2_qdma/dpaa2_qdma.c +index c905954004..b31d7aacb3 100644 +--- a/dpdk/drivers/raw/dpaa2_qdma/dpaa2_qdma.c ++++ b/dpdk/drivers/raw/dpaa2_qdma/dpaa2_qdma.c +@@ -455,9 +455,10 @@ rte_qdma_reset(void) + /* In case there are pending jobs on any VQ, return -EBUSY */ + for (i = 0; i < qdma_dev.max_vqs; i++) { + if (qdma_vqs[i].in_use && (qdma_vqs[i].num_enqueues != +- qdma_vqs[i].num_dequeues)) ++ qdma_vqs[i].num_dequeues)) { + DPAA2_QDMA_ERR("Jobs are still pending on VQ: %d", i); + return -EBUSY; ++ } + } + + /* Reset HW queues */ +diff --git a/dpdk/drivers/raw/ifpga/base/ifpga_fme.c b/dpdk/drivers/raw/ifpga/base/ifpga_fme.c +index c31a94cf80..9057087b55 100644 +--- a/dpdk/drivers/raw/ifpga/base/ifpga_fme.c ++++ b/dpdk/drivers/raw/ifpga/base/ifpga_fme.c +@@ -979,28 +979,32 @@ struct ifpga_feature_ops fme_spi_master_ops = { + static int nios_spi_wait_init_done(struct altera_spi_device *dev) + { + u32 val = 0; +- unsigned long timeout = msecs_to_timer_cycles(10000); ++ unsigned long timeout = rte_get_timer_cycles() + ++ msecs_to_timer_cycles(10000); + unsigned long ticks; + int major_version; ++ int fecmode = FEC_MODE_NO; + + if (spi_reg_read(dev, NIOS_VERSION, &val)) + return -EIO; + +- major_version = (val >> NIOS_VERSION_MAJOR_SHIFT) & +- NIOS_VERSION_MAJOR; +- dev_debug(dev, "A10 NIOS FW version %d\n", major_version); ++ major_version = ++ (val & NIOS_VERSION_MAJOR) >> NIOS_VERSION_MAJOR_SHIFT; ++ dev_info(dev, "A10 NIOS FW version %d\n", major_version); + + if (major_version >= 3) { + /* read NIOS_INIT to check if PKVL INIT done or not */ + if (spi_reg_read(dev, NIOS_INIT, &val)) + return -EIO; + ++ dev_debug(dev, "read NIOS_INIT: 0x%x\n", val); ++ + /* check if PKVLs are initialized already */ + if (val & NIOS_INIT_DONE || val & NIOS_INIT_START) + goto nios_init_done; + + /* start to config the default FEC mode */ +- val = NIOS_INIT_START; ++ val = fecmode | NIOS_INIT_START; + + if (spi_reg_write(dev, NIOS_INIT, val)) + return -EIO; +@@ -1010,14 +1014,23 @@ static int nios_spi_wait_init_done(struct altera_spi_device *dev) + do { + if (spi_reg_read(dev, NIOS_INIT, &val)) + return -EIO; +- if (val) ++ if (val & NIOS_INIT_DONE) + break; + + ticks = rte_get_timer_cycles(); + if (time_after(ticks, timeout)) + return -ETIMEDOUT; + msleep(100); +- } while (!val); ++ } while (1); ++ ++ /* get the fecmode */ ++ if (spi_reg_read(dev, NIOS_INIT, &val)) ++ return -EIO; ++ dev_debug(dev, "read NIOS_INIT: 0x%x\n", val); ++ fecmode = (val & REQ_FEC_MODE) >> REQ_FEC_MODE_SHIFT; ++ dev_info(dev, "fecmode: 0x%x, %s\n", fecmode, ++ (fecmode == FEC_MODE_KR) ? "kr" : ++ ((fecmode == FEC_MODE_RS) ? "rs" : "no")); + + return 0; + } +diff --git a/dpdk/drivers/raw/ifpga/base/opae_spi.h b/dpdk/drivers/raw/ifpga/base/opae_spi.h +index d20a4c3edd..73a2276739 100644 +--- a/dpdk/drivers/raw/ifpga/base/opae_spi.h ++++ b/dpdk/drivers/raw/ifpga/base/opae_spi.h +@@ -153,6 +153,7 @@ int spi_reg_read(struct altera_spi_device *dev, u32 reg, u32 *val); + + #define NIOS_INIT 0x1000 + #define REQ_FEC_MODE GENMASK(23, 8) ++#define REQ_FEC_MODE_SHIFT 8 + #define FEC_MODE_NO 0x0 + #define FEC_MODE_KR 0x5555 + #define FEC_MODE_RS 0xaaaa +diff --git a/dpdk/drivers/raw/ifpga/base/opae_spi_transaction.c b/dpdk/drivers/raw/ifpga/base/opae_spi_transaction.c +index 013efee3e6..d13d2fbc83 100644 +--- a/dpdk/drivers/raw/ifpga/base/opae_spi_transaction.c ++++ b/dpdk/drivers/raw/ifpga/base/opae_spi_transaction.c +@@ -166,7 +166,7 @@ static int byte_to_core_convert(struct spi_transaction_dev *dev, + current_byte = send_data[i]; + switch (current_byte) { + case SPI_BYTE_IDLE: +- *p++ = SPI_BYTE_IDLE; ++ *p++ = SPI_BYTE_ESC; + *p++ = xor_20(current_byte); + break; + case SPI_BYTE_ESC: +diff --git a/dpdk/drivers/raw/ifpga/ifpga_rawdev.c b/dpdk/drivers/raw/ifpga/ifpga_rawdev.c +index b8701e155b..0c5392d082 100644 +--- a/dpdk/drivers/raw/ifpga/ifpga_rawdev.c ++++ b/dpdk/drivers/raw/ifpga/ifpga_rawdev.c +@@ -237,8 +237,9 @@ static int ifpga_rawdev_fill_info(struct ifpga_rawdev *ifpga_dev, + memset(link, 0, sizeof(link)); + memset(link1, 0, sizeof(link1)); + ret = readlink(path, link, (sizeof(link)-1)); +- if (ret == -1) ++ if ((ret < 0) || ((unsigned int)ret > (sizeof(link)-1))) + return -1; ++ link[ret] = 0; /* terminate string with null character */ + strlcpy(link1, link, sizeof(link1)); + memset(ifpga_dev->parent_bdf, 0, 16); + point = strlen(link); +@@ -779,7 +780,7 @@ rte_fpga_do_pr(struct rte_rawdev *rawdev, int port_id, + int file_fd; + int ret = 0; + ssize_t buffer_size; +- void *buffer; ++ void *buffer, *buf_to_free; + u64 pr_error; + + if (!file_name) +@@ -811,6 +812,7 @@ rte_fpga_do_pr(struct rte_rawdev *rawdev, int port_id, + ret = -ENOMEM; + goto close_fd; + } ++ buf_to_free = buffer; + + /*read the raw data*/ + if (buffer_size != read(file_fd, (void *)buffer, buffer_size)) { +@@ -828,8 +830,8 @@ rte_fpga_do_pr(struct rte_rawdev *rawdev, int port_id, + } + + free_buffer: +- if (buffer) +- rte_free(buffer); ++ if (buf_to_free) ++ rte_free(buf_to_free); + close_fd: + close(file_fd); + file_fd = 0; +@@ -1336,17 +1338,18 @@ int + ifpga_unregister_msix_irq(enum ifpga_irq_type type, + int vec_start, rte_intr_callback_fn handler, void *arg) + { +- struct rte_intr_handle intr_handle; ++ struct rte_intr_handle *intr_handle; + + if (type == IFPGA_FME_IRQ) +- intr_handle = ifpga_irq_handle[0]; ++ intr_handle = &ifpga_irq_handle[0]; + else if (type == IFPGA_AFU_IRQ) +- intr_handle = ifpga_irq_handle[vec_start + 1]; ++ intr_handle = &ifpga_irq_handle[vec_start + 1]; ++ else ++ return 0; + +- rte_intr_efd_disable(&intr_handle); ++ rte_intr_efd_disable(intr_handle); + +- return rte_intr_callback_unregister(&intr_handle, +- handler, arg); ++ return rte_intr_callback_unregister(intr_handle, handler, arg); + } + + int +@@ -1356,7 +1359,7 @@ ifpga_register_msix_irq(struct rte_rawdev *dev, int port_id, + void *arg) + { + int ret; +- struct rte_intr_handle intr_handle; ++ struct rte_intr_handle *intr_handle; + struct opae_adapter *adapter; + struct opae_manager *mgr; + struct opae_accelerator *acc; +@@ -1370,26 +1373,29 @@ ifpga_register_msix_irq(struct rte_rawdev *dev, int port_id, + return -ENODEV; + + if (type == IFPGA_FME_IRQ) { +- intr_handle = ifpga_irq_handle[0]; ++ intr_handle = &ifpga_irq_handle[0]; + count = 1; +- } else if (type == IFPGA_AFU_IRQ) +- intr_handle = ifpga_irq_handle[vec_start + 1]; ++ } else if (type == IFPGA_AFU_IRQ) { ++ intr_handle = &ifpga_irq_handle[vec_start + 1]; ++ } else { ++ return -EINVAL; ++ } + +- intr_handle.type = RTE_INTR_HANDLE_VFIO_MSIX; ++ intr_handle->type = RTE_INTR_HANDLE_VFIO_MSIX; + +- ret = rte_intr_efd_enable(&intr_handle, count); ++ ret = rte_intr_efd_enable(intr_handle, count); + if (ret) + return -ENODEV; + +- intr_handle.fd = intr_handle.efds[0]; ++ intr_handle->fd = intr_handle->efds[0]; + + IFPGA_RAWDEV_PMD_DEBUG("register %s irq, vfio_fd=%d, fd=%d\n", +- name, intr_handle.vfio_dev_fd, +- intr_handle.fd); ++ name, intr_handle->vfio_dev_fd, ++ intr_handle->fd); + + if (type == IFPGA_FME_IRQ) { + struct fpga_fme_err_irq_set err_irq_set; +- err_irq_set.evtfd = intr_handle.efds[0]; ++ err_irq_set.evtfd = intr_handle->efds[0]; + + ret = opae_manager_ifpga_set_err_irq(mgr, &err_irq_set); + if (ret) +@@ -1399,13 +1405,14 @@ ifpga_register_msix_irq(struct rte_rawdev *dev, int port_id, + if (!acc) + return -EINVAL; + +- ret = opae_acc_set_irq(acc, vec_start, count, intr_handle.efds); ++ ret = opae_acc_set_irq(acc, vec_start, count, ++ intr_handle->efds); + if (ret) + return -EINVAL; + } + + /* register interrupt handler using DPDK API */ +- ret = rte_intr_callback_register(&intr_handle, ++ ret = rte_intr_callback_register(intr_handle, + handler, (void *)arg); + if (ret) + return -EINVAL; +@@ -1558,7 +1565,7 @@ ifpga_rawdev_destroy(struct rte_pci_device *pci_dev) + return -ENODEV; + + if (ifpga_unregister_msix_irq(IFPGA_FME_IRQ, 0, +- fme_interrupt_handler, mgr)) ++ fme_interrupt_handler, mgr) < 0) + return -EINVAL; + + opae_adapter_data_free(adapter->data); diff --git a/dpdk/drivers/raw/ifpga/meson.build b/dpdk/drivers/raw/ifpga/meson.build -index 206136ff48..d4027068d6 100644 +index 206136ff48..c267373e5e 100644 --- a/dpdk/drivers/raw/ifpga/meson.build +++ b/dpdk/drivers/raw/ifpga/meson.build -@@ -15,7 +15,7 @@ if build +@@ -1,11 +1,7 @@ + # SPDX-License-Identifier: BSD-3-Clause + # Copyright(c) 2018 Intel Corporation + +-dep = dependency('libfdt', required: false) +-if not dep.found() +- dep = cc.find_library('libfdt', required: false) +-endif +-if not dep.found() ++if has_libfdt == 0 + build = false + reason = 'missing dependency, "libfdt"' + endif +@@ -15,8 +11,7 @@ if build objs = [base_objs] deps += ['ethdev', 'rawdev', 'pci', 'bus_pci', 'kvargs', - 'bus_vdev', 'bus_ifpga', 'net', 'i40e', 'ipn3ke'] +- ext_deps += dep + 'bus_vdev', 'bus_ifpga', 'net', 'pmd_i40e', 'pmd_ipn3ke'] - ext_deps += dep sources = files('ifpga_rawdev.c') + +diff --git a/dpdk/drivers/raw/ioat/ioat_rawdev.c b/dpdk/drivers/raw/ioat/ioat_rawdev.c +index af8414b34c..a9b762330a 100644 +--- a/dpdk/drivers/raw/ioat/ioat_rawdev.c ++++ b/dpdk/drivers/raw/ioat/ioat_rawdev.c +@@ -198,6 +198,12 @@ ioat_xstats_reset(struct rte_rawdev *dev, const uint32_t *ids, uint32_t nb_ids) + return 0; + } + ++static int ++ioat_dev_close(struct rte_rawdev *dev __rte_unused) ++{ ++ return 0; ++} ++ + extern int ioat_rawdev_test(uint16_t dev_id); + + static int +@@ -207,6 +213,7 @@ ioat_rawdev_create(const char *name, struct rte_pci_device *dev) + .dev_configure = ioat_dev_configure, + .dev_start = ioat_dev_start, + .dev_stop = ioat_dev_stop, ++ .dev_close = ioat_dev_close, + .dev_info_get = ioat_dev_info_get, + .xstats_get = ioat_xstats_get, + .xstats_get_names = ioat_xstats_get_names, diff --git a/dpdk/drivers/raw/ntb/ntb.c b/dpdk/drivers/raw/ntb/ntb.c index ad7f6abfd3..dd0b72f8c5 100644 --- a/dpdk/drivers/raw/ntb/ntb.c @@ -33776,6 +65698,105 @@ index ad7f6abfd3..dd0b72f8c5 100644 /* update queue stats */ off = NTB_XSTATS_NUM * ((size_t)context + 1); +diff --git a/dpdk/drivers/raw/skeleton/skeleton_rawdev.c b/dpdk/drivers/raw/skeleton/skeleton_rawdev.c +index 586183a5b8..1daf0fecd2 100644 +--- a/dpdk/drivers/raw/skeleton/skeleton_rawdev.c ++++ b/dpdk/drivers/raw/skeleton/skeleton_rawdev.c +@@ -189,9 +189,11 @@ static int skeleton_rawdev_close(struct rte_rawdev *dev) + } + break; + case SKELETON_FW_READY: ++ SKELETON_PMD_DEBUG("Device already in stopped state"); ++ break; + case SKELETON_FW_ERROR: + default: +- SKELETON_PMD_DEBUG("Device already in stopped state"); ++ SKELETON_PMD_DEBUG("Device in impossible state"); + ret = -EINVAL; + break; + } +diff --git a/dpdk/drivers/raw/skeleton/skeleton_rawdev_test.c b/dpdk/drivers/raw/skeleton/skeleton_rawdev_test.c +index 9ecfdee818..1190e28bb7 100644 +--- a/dpdk/drivers/raw/skeleton/skeleton_rawdev_test.c ++++ b/dpdk/drivers/raw/skeleton/skeleton_rawdev_test.c +@@ -42,6 +42,12 @@ static int + testsuite_setup(void) + { + uint8_t count; ++ ++ total = 0; ++ passed = 0; ++ failed = 0; ++ unsupported = 0; ++ + count = rte_rawdev_count(); + if (!count) { + SKELDEV_TEST_INFO("\tNo existing rawdev; " +diff --git a/dpdk/examples/bbdev_app/Makefile b/dpdk/examples/bbdev_app/Makefile +index ead3f016b8..3c8eb75a4e 100644 +--- a/dpdk/examples/bbdev_app/Makefile ++++ b/dpdk/examples/bbdev_app/Makefile +@@ -22,7 +22,7 @@ PKGCONF ?= pkg-config + PC_FILE := $(shell $(PKGCONF) --path libdpdk 2>/dev/null) + CFLAGS += -O3 $(shell $(PKGCONF) --cflags libdpdk) + LDFLAGS_SHARED = $(shell $(PKGCONF) --libs libdpdk) +-LDFLAGS_STATIC = -Wl,-Bstatic $(shell $(PKGCONF) --static --libs libdpdk) ++LDFLAGS_STATIC = $(shell $(PKGCONF) --static --libs libdpdk) + + CFLAGS += -DALLOW_EXPERIMENTAL_API + +diff --git a/dpdk/examples/bbdev_app/main.c b/dpdk/examples/bbdev_app/main.c +index fb38dc3a72..68a46050c0 100644 +--- a/dpdk/examples/bbdev_app/main.c ++++ b/dpdk/examples/bbdev_app/main.c +@@ -659,6 +659,8 @@ print_stats(struct stats_lcore_params *stats_lcore) + print_lcore_stats(stats_lcore->lconf[l_id].lcore_stats, l_id); + } + ++ fflush(stdout); ++ + free(xstats); + free(xstats_names); + } +diff --git a/dpdk/examples/bond/Makefile b/dpdk/examples/bond/Makefile +index 2030ca410a..4e4289e151 100644 +--- a/dpdk/examples/bond/Makefile ++++ b/dpdk/examples/bond/Makefile +@@ -24,7 +24,7 @@ PKGCONF ?= pkg-config + PC_FILE := $(shell $(PKGCONF) --path libdpdk 2>/dev/null) + CFLAGS += -O3 $(shell $(PKGCONF) --cflags libdpdk) + LDFLAGS_SHARED = $(shell $(PKGCONF) --libs libdpdk) +-LDFLAGS_STATIC = -Wl,-Bstatic $(shell $(PKGCONF) --static --libs libdpdk) ++LDFLAGS_STATIC = $(shell $(PKGCONF) --static --libs libdpdk) + + CFLAGS += -DALLOW_EXPERIMENTAL_API + +diff --git a/dpdk/examples/cmdline/Makefile b/dpdk/examples/cmdline/Makefile +index 0b6b54540a..9418b50b87 100644 +--- a/dpdk/examples/cmdline/Makefile ++++ b/dpdk/examples/cmdline/Makefile +@@ -22,7 +22,7 @@ PKGCONF ?= pkg-config + PC_FILE := $(shell $(PKGCONF) --path libdpdk 2>/dev/null) + CFLAGS += -O3 $(shell $(PKGCONF) --cflags libdpdk) + LDFLAGS_SHARED = $(shell $(PKGCONF) --libs libdpdk) +-LDFLAGS_STATIC = -Wl,-Bstatic $(shell $(PKGCONF) --static --libs libdpdk) ++LDFLAGS_STATIC = $(shell $(PKGCONF) --static --libs libdpdk) + + build/$(APP)-shared: $(SRCS-y) Makefile $(PC_FILE) | build + $(CC) $(CFLAGS) $(SRCS-y) -o $@ $(LDFLAGS) $(LDFLAGS_SHARED) +diff --git a/dpdk/examples/distributor/Makefile b/dpdk/examples/distributor/Makefile +index 4192d8a4ae..5253780793a 100644 +--- a/dpdk/examples/distributor/Makefile ++++ b/dpdk/examples/distributor/Makefile +@@ -22,7 +22,7 @@ PKGCONF ?= pkg-config + PC_FILE := $(shell $(PKGCONF) --path libdpdk 2>/dev/null) + CFLAGS += -O3 $(shell $(PKGCONF) --cflags libdpdk) + LDFLAGS_SHARED = $(shell $(PKGCONF) --libs libdpdk) +-LDFLAGS_STATIC = -Wl,-Bstatic $(shell $(PKGCONF) --static --libs libdpdk) ++LDFLAGS_STATIC = $(shell $(PKGCONF) --static --libs libdpdk) + + build/$(APP)-shared: $(SRCS-y) Makefile $(PC_FILE) | build + $(CC) $(CFLAGS) $(SRCS-y) -o $@ $(LDFLAGS) $(LDFLAGS_SHARED) diff --git a/dpdk/examples/ethtool/lib/rte_ethtool.c b/dpdk/examples/ethtool/lib/rte_ethtool.c index 667d7eaf27..db8150efd5 100644 --- a/dpdk/examples/ethtool/lib/rte_ethtool.c @@ -33791,8 +65812,21 @@ index 667d7eaf27..db8150efd5 100644 return 0; } +diff --git a/dpdk/examples/eventdev_pipeline/Makefile b/dpdk/examples/eventdev_pipeline/Makefile +index 96cd244378..95a8d0884a 100644 +--- a/dpdk/examples/eventdev_pipeline/Makefile ++++ b/dpdk/examples/eventdev_pipeline/Makefile +@@ -24,7 +24,7 @@ PKGCONF ?= pkg-config + PC_FILE := $(shell $(PKGCONF) --path libdpdk 2>/dev/null) + CFLAGS += -O3 $(shell $(PKGCONF) --cflags libdpdk) + LDFLAGS_SHARED = $(shell $(PKGCONF) --libs libdpdk) +-LDFLAGS_STATIC = -Wl,-Bstatic $(shell $(PKGCONF) --static --libs libdpdk) ++LDFLAGS_STATIC = $(shell $(PKGCONF) --static --libs libdpdk) + + build/$(APP)-shared: $(SRCS-y) Makefile $(PC_FILE) | build + $(CC) $(CFLAGS) $(SRCS-y) -o $@ $(LDFLAGS) $(LDFLAGS_SHARED) diff --git a/dpdk/examples/eventdev_pipeline/main.c b/dpdk/examples/eventdev_pipeline/main.c -index d3ff1bbe4f..21958269f7 100644 +index d3ff1bbe4f..a3eeb50a75 100644 --- a/dpdk/examples/eventdev_pipeline/main.c +++ b/dpdk/examples/eventdev_pipeline/main.c @@ -10,6 +10,8 @@ @@ -33804,10 +65838,98 @@ index d3ff1bbe4f..21958269f7 100644 struct config_data cdata = { .num_packets = (1L << 25), /* do ~32M packets */ .num_fids = 512, -@@ -299,12 +301,6 @@ signal_handler(int signum) +@@ -20,6 +22,32 @@ struct config_data cdata = { + .worker_cq_depth = 16 + }; - rte_eal_mp_wait_lcore(); ++static void ++dump_core_info(unsigned int lcore_id, struct worker_data *data, ++ unsigned int worker_idx) ++{ ++ if (fdata->rx_core[lcore_id]) ++ printf( ++ "[%s()] lcore %d executing NIC Rx\n", ++ __func__, lcore_id); ++ ++ if (fdata->tx_core[lcore_id]) ++ printf( ++ "[%s()] lcore %d executing NIC Tx\n", ++ __func__, lcore_id); ++ ++ if (fdata->sched_core[lcore_id]) ++ printf( ++ "[%s()] lcore %d executing scheduler\n", ++ __func__, lcore_id); ++ ++ if (fdata->worker_core[lcore_id]) ++ printf( ++ "[%s()] lcore %d executing worker, using eventdev port %u\n", ++ __func__, lcore_id, ++ data[worker_idx].port_id); ++} ++ + static bool + core_in_use(unsigned int lcore_id) { + return (fdata->rx_core[lcore_id] || fdata->sched_core[lcore_id] || +@@ -79,7 +107,7 @@ parse_coremask(const char *coremask) + val = xdigit2val(c); + for (j = 0; j < BITS_HEX && idx < MAX_NUM_CORE; j++, idx++) { + if ((1 << j) & val) { +- mask |= (1UL << idx); ++ mask |= (1ULL << idx); + count++; + } + } +@@ -230,15 +258,22 @@ parse_app_args(int argc, char **argv) + usage(); + + for (i = 0; i < MAX_NUM_CORE; i++) { +- fdata->rx_core[i] = !!(rx_lcore_mask & (1UL << i)); +- fdata->tx_core[i] = !!(tx_lcore_mask & (1UL << i)); +- fdata->sched_core[i] = !!(sched_lcore_mask & (1UL << i)); +- fdata->worker_core[i] = !!(worker_lcore_mask & (1UL << i)); ++ fdata->rx_core[i] = !!(rx_lcore_mask & (1ULL << i)); ++ fdata->tx_core[i] = !!(tx_lcore_mask & (1ULL << i)); ++ fdata->sched_core[i] = !!(sched_lcore_mask & (1ULL << i)); ++ fdata->worker_core[i] = !!(worker_lcore_mask & (1ULL << i)); + + if (fdata->worker_core[i]) + cdata.num_workers++; +- if (core_in_use(i)) ++ if (core_in_use(i)) { ++ if (!rte_lcore_is_enabled(i)) { ++ printf("lcore %d is not enabled in lcore list\n", ++ i); ++ rte_exit(EXIT_FAILURE, ++ "check lcore params failed\n"); ++ } + cdata.active_cores++; ++ } + } + } +@@ -278,7 +313,6 @@ static void + signal_handler(int signum) + { + static uint8_t once; +- uint16_t portid; + + if (fdata->done) + rte_exit(1, "Exiting on signal %d\n", signum); +@@ -289,22 +323,6 @@ signal_handler(int signum) + rte_event_dev_dump(0, stdout); + once = 1; + fdata->done = 1; +- rte_smp_wmb(); +- +- RTE_ETH_FOREACH_DEV(portid) { +- rte_event_eth_rx_adapter_stop(portid); +- rte_event_eth_tx_adapter_stop(portid); +- rte_eth_dev_stop(portid); +- } +- +- rte_eal_mp_wait_lcore(); +- - RTE_ETH_FOREACH_DEV(portid) { - rte_eth_dev_close(portid); - } @@ -33817,11 +65939,57 @@ index d3ff1bbe4f..21958269f7 100644 } if (signum == SIGTSTP) rte_event_dev_dump(0, stdout); -@@ -467,5 +463,14 @@ main(int argc, char **argv) +@@ -409,25 +427,7 @@ main(int argc, char **argv) + !fdata->sched_core[lcore_id]) + continue; + +- if (fdata->rx_core[lcore_id]) +- printf( +- "[%s()] lcore %d executing NIC Rx\n", +- __func__, lcore_id); +- +- if (fdata->tx_core[lcore_id]) +- printf( +- "[%s()] lcore %d executing NIC Tx\n", +- __func__, lcore_id); +- +- if (fdata->sched_core[lcore_id]) +- printf("[%s()] lcore %d executing scheduler\n", +- __func__, lcore_id); +- +- if (fdata->worker_core[lcore_id]) +- printf( +- "[%s()] lcore %d executing worker, using eventdev port %u\n", +- __func__, lcore_id, +- worker_data[worker_idx].port_id); ++ dump_core_info(lcore_id, worker_data, worker_idx); + + err = rte_eal_remote_launch(fdata->cap.worker, + &worker_data[worker_idx], lcore_id); +@@ -442,8 +442,13 @@ main(int argc, char **argv) + + lcore_id = rte_lcore_id(); + +- if (core_in_use(lcore_id)) +- fdata->cap.worker(&worker_data[worker_idx++]); ++ if (core_in_use(lcore_id)) { ++ dump_core_info(lcore_id, worker_data, worker_idx); ++ fdata->cap.worker(&worker_data[worker_idx]); ++ ++ if (fdata->worker_core[lcore_id]) ++ worker_idx++; ++ } + + rte_eal_mp_wait_lcore(); + +@@ -467,5 +472,17 @@ main(int argc, char **argv) } + RTE_ETH_FOREACH_DEV(portid) { ++ rte_event_eth_rx_adapter_stop(portid); ++ rte_event_eth_tx_adapter_stop(portid); ++ rte_eth_dev_stop(portid); + rte_eth_dev_close(portid); + } + @@ -33833,9 +66001,24 @@ index d3ff1bbe4f..21958269f7 100644 return 0; } diff --git a/dpdk/examples/eventdev_pipeline/pipeline_common.h b/dpdk/examples/eventdev_pipeline/pipeline_common.h -index 8e30393d09..c7245f7f0f 100644 +index 8e30393d09..6a4287602e 100644 --- a/dpdk/examples/eventdev_pipeline/pipeline_common.h +++ b/dpdk/examples/eventdev_pipeline/pipeline_common.h +@@ -51,10 +51,10 @@ struct fastpath_data { + bool rx_single; + bool tx_single; + bool sched_single; +- unsigned int rx_core[MAX_NUM_CORE]; +- unsigned int tx_core[MAX_NUM_CORE]; +- unsigned int sched_core[MAX_NUM_CORE]; +- unsigned int worker_core[MAX_NUM_CORE]; ++ uint64_t rx_core[MAX_NUM_CORE]; ++ uint64_t tx_core[MAX_NUM_CORE]; ++ uint64_t sched_core[MAX_NUM_CORE]; ++ uint64_t worker_core[MAX_NUM_CORE]; + struct setup_data cap; + } __rte_cache_aligned; + @@ -93,8 +93,8 @@ struct port_link { uint8_t priority; }; @@ -33847,11 +66030,52 @@ index 8e30393d09..c7245f7f0f 100644 static __rte_always_inline void exchange_mac(struct rte_mbuf *m) +diff --git a/dpdk/examples/fips_validation/Makefile b/dpdk/examples/fips_validation/Makefile +index 1385e8cc8c..5bcf1872c4 100644 +--- a/dpdk/examples/fips_validation/Makefile ++++ b/dpdk/examples/fips_validation/Makefile +@@ -31,7 +31,9 @@ PKGCONF ?= pkg-config + PC_FILE := $(shell $(PKGCONF) --path libdpdk 2>/dev/null) + CFLAGS += -O3 $(shell $(PKGCONF) --cflags libdpdk) + LDFLAGS_SHARED = $(shell $(PKGCONF) --libs libdpdk) +-LDFLAGS_STATIC = -Wl,-Bstatic $(shell $(PKGCONF) --static --libs libdpdk) ++LDFLAGS_STATIC = $(shell $(PKGCONF) --static --libs libdpdk) ++ ++CFLAGS += -DALLOW_EXPERIMENTAL_API + + build/$(APP)-shared: $(SRCS-y) Makefile $(PC_FILE) | build + $(CC) $(CFLAGS) $(SRCS-y) -o $@ $(LDFLAGS) $(LDFLAGS_SHARED) diff --git a/dpdk/examples/fips_validation/fips_validation.c b/dpdk/examples/fips_validation/fips_validation.c -index 07ffa62e9e..b79a095aca 100644 +index 07ffa62e9e..701d87dc4d 100644 --- a/dpdk/examples/fips_validation/fips_validation.c +++ b/dpdk/examples/fips_validation/fips_validation.c -@@ -144,6 +144,24 @@ fips_test_parse_header(void) +@@ -92,6 +92,15 @@ fips_test_fetch_one_block(void) + return -ENOMEM; + } + ++static void ++fips_test_parse_version(void) ++{ ++ int len = strlen(info.vec[0]); ++ char *ptr = info.vec[0]; ++ ++ info.version = strtof(ptr + len - 4, NULL); ++} ++ + static int + fips_test_parse_header(void) + { +@@ -106,6 +115,9 @@ fips_test_parse_header(void) + if (ret < 0) + return ret; + ++ if (info.nb_vec_lines) ++ fips_test_parse_version(); ++ + for (i = 0; i < info.nb_vec_lines; i++) { + if (!algo_parsed) { + if (strstr(info.vec[i], "AESVS")) { +@@ -144,6 +156,24 @@ fips_test_parse_header(void) ret = parse_test_tdes_init(); if (ret < 0) return 0; @@ -33876,6 +66100,140 @@ index 07ffa62e9e..b79a095aca 100644 } else if (strstr(info.vec[i], "SHA-")) { algo_parsed = 1; info.algo = FIPS_TEST_ALGO_SHA; +@@ -257,7 +287,11 @@ fips_test_init(const char *req_file_path, const char *rsp_file_path, + + fips_test_clear(); + +- strcpy(info.file_name, req_file_path); ++ if (rte_strscpy(info.file_name, req_file_path, ++ sizeof(info.file_name)) < 0) { ++ RTE_LOG(ERR, USER1, "Path %s too long\n", req_file_path); ++ return -EINVAL; ++ } + info.algo = FIPS_TEST_ALGO_MAX; + if (parse_file_type(req_file_path) < 0) { + RTE_LOG(ERR, USER1, "File %s type not supported\n", +@@ -283,7 +317,11 @@ fips_test_init(const char *req_file_path, const char *rsp_file_path, + return -ENOMEM; + } + +- strlcpy(info.device_name, device_name, sizeof(info.device_name)); ++ if (rte_strscpy(info.device_name, device_name, ++ sizeof(info.device_name)) < 0) { ++ RTE_LOG(ERR, USER1, "Device name %s too long\n", device_name); ++ return -EINVAL; ++ } + + if (fips_test_parse_header() < 0) { + RTE_LOG(ERR, USER1, "Failed parsing header\n"); +@@ -316,11 +354,15 @@ int + fips_test_parse_one_case(void) + { + uint32_t i, j = 0; +- uint32_t is_interim = 0; ++ uint32_t is_interim; ++ uint32_t interim_cnt = 0; + int ret; + ++ info.vec_start_off = 0; ++ + if (info.interim_callbacks) { + for (i = 0; i < info.nb_vec_lines; i++) { ++ is_interim = 0; + for (j = 0; info.interim_callbacks[j].key != NULL; j++) + if (strstr(info.vec[i], + info.interim_callbacks[j].key)) { +@@ -333,17 +375,31 @@ fips_test_parse_one_case(void) + if (ret < 0) + return ret; + } ++ ++ if (is_interim) ++ interim_cnt += 1; + } + } + +- if (is_interim) { +- for (i = 0; i < info.nb_vec_lines; i++) +- fprintf(info.fp_wr, "%s\n", info.vec[i]); +- fprintf(info.fp_wr, "\n"); +- return 1; ++ if (interim_cnt) { ++ if (info.version == 21.4f) { ++ for (i = 0; i < interim_cnt; i++) ++ fprintf(info.fp_wr, "%s\n", info.vec[i]); ++ fprintf(info.fp_wr, "\n"); ++ ++ if (info.nb_vec_lines == interim_cnt) ++ return 1; ++ } else { ++ for (i = 0; i < info.nb_vec_lines; i++) ++ fprintf(info.fp_wr, "%s\n", info.vec[i]); ++ fprintf(info.fp_wr, "\n"); ++ return 1; ++ } + } + +- for (i = 0; i < info.nb_vec_lines; i++) { ++ info.vec_start_off = interim_cnt; ++ ++ for (i = info.vec_start_off; i < info.nb_vec_lines; i++) { + for (j = 0; info.callbacks[j].key != NULL; j++) + if (strstr(info.vec[i], info.callbacks[j].key)) { + ret = info.callbacks[j].cb( +@@ -363,7 +419,7 @@ fips_test_write_one_case(void) + { + uint32_t i; + +- for (i = 0; i < info.nb_vec_lines; i++) ++ for (i = info.vec_start_off; i < info.nb_vec_lines; i++) + fprintf(info.fp_wr, "%s\n", info.vec[i]); + } + +@@ -607,9 +663,16 @@ update_info_vec(uint32_t count) + + cb = &info.writeback_callbacks[0]; + +- snprintf(info.vec[0], strlen(info.vec[0]) + 4, "%s%u", cb->key, count); ++ if ((info.version == 21.4f) && (!(strstr(info.vec[0], cb->key)))) { ++ fprintf(info.fp_wr, "%s%u\n", cb->key, count); ++ i = 0; ++ } else { ++ snprintf(info.vec[0], strlen(info.vec[0]) + 4, "%s%u", cb->key, ++ count); ++ i = 1; ++ } + +- for (i = 1; i < info.nb_vec_lines; i++) { ++ for (; i < info.nb_vec_lines; i++) { + for (j = 1; info.writeback_callbacks[j].key != NULL; j++) { + cb = &info.writeback_callbacks[j]; + if (strstr(info.vec[i], cb->key)) { +diff --git a/dpdk/examples/fips_validation/fips_validation.h b/dpdk/examples/fips_validation/fips_validation.h +index d487fb0058..295624b1b5 100644 +--- a/dpdk/examples/fips_validation/fips_validation.h ++++ b/dpdk/examples/fips_validation/fips_validation.h +@@ -14,6 +14,7 @@ + #define MAX_NB_TESTS 10240 + #define MAX_BUF_SIZE 2048 + #define MAX_STRING_SIZE 64 ++#define MAX_FILE_NAME_SIZE 256 + #define MAX_DIGEST_SIZE 64 + + #define POSITIVE_TEST 0 +@@ -160,9 +161,11 @@ struct fips_test_interim_info { + enum fips_test_algorithms algo; + char *one_line_text; + char *vec[MAX_LINE_PER_VECTOR]; ++ uint32_t vec_start_off; + uint32_t nb_vec_lines; + char device_name[MAX_STRING_SIZE]; +- char file_name[MAX_STRING_SIZE]; ++ char file_name[MAX_FILE_NAME_SIZE]; ++ float version; + + union { + struct aesavs_interim_data aes_data; diff --git a/dpdk/examples/fips_validation/fips_validation_gcm.c b/dpdk/examples/fips_validation/fips_validation_gcm.c index ea48ddf707..47576e9a38 100644 --- a/dpdk/examples/fips_validation/fips_validation_gcm.c @@ -33891,11 +66249,171 @@ index ea48ddf707..47576e9a38 100644 {AADLEN_STR, parser_read_uint32_bit_val, &vec.aead.aad}, {TAGLEN_STR, parser_read_uint32_bit_val, &vec.aead.digest}, +diff --git a/dpdk/examples/fips_validation/fips_validation_tdes.c b/dpdk/examples/fips_validation/fips_validation_tdes.c +index 5b6737643a..f4c738c789 100644 +--- a/dpdk/examples/fips_validation/fips_validation_tdes.c ++++ b/dpdk/examples/fips_validation/fips_validation_tdes.c +@@ -59,9 +59,7 @@ static int + parse_tdes_uint8_hex_str(const char *key, char *src, struct fips_val *val); + + static int +-parse_tdes_interim(const char *key, +- __attribute__((__unused__)) char *text, +- struct fips_val *val); ++parse_tdes_interim(const char *key, char *text, struct fips_val *val); + + struct fips_test_callback tdes_tests_vectors[] = { + {KEYS_STR, parse_tdes_uint8_hex_str, &vec.cipher_auth.key}, +@@ -77,6 +75,7 @@ struct fips_test_callback tdes_tests_vectors[] = { + struct fips_test_callback tdes_tests_interim_vectors[] = { + {ENC_STR, parse_tdes_interim, NULL}, + {DEC_STR, parse_tdes_interim, NULL}, ++ {NK_STR, parse_tdes_interim, NULL}, + {NULL, NULL, NULL} /**< end pointer */ + }; + +@@ -94,21 +93,23 @@ struct fips_test_callback tdes_writeback_callbacks[] = { + }; + + static int +-parse_tdes_interim(const char *key, +- __attribute__((__unused__)) char *text, ++parse_tdes_interim(const char *key, char *text, + __attribute__((__unused__)) struct fips_val *val) + { + if (strstr(key, ENC_STR)) + info.op = FIPS_TEST_ENC_AUTH_GEN; + else if (strstr(key, DEC_STR)) + info.op = FIPS_TEST_DEC_AUTH_VERIF; +- else if (strstr(NK_STR, "NumKeys = 1")) +- info.interim_info.tdes_data.nb_keys = 1; +- else if (strstr(NK_STR, "NumKeys = 2")) +- info.interim_info.tdes_data.nb_keys = 2; +- else if (strstr(NK_STR, "NumKeys = 3")) +- info.interim_info.tdes_data.nb_keys = 3; +- else ++ else if (strstr(key, NK_STR)) { ++ if (strcmp(text, "NumKeys = 1") == 0) ++ info.interim_info.tdes_data.nb_keys = 1; ++ else if (strcmp(text, "NumKeys = 2") == 0) ++ info.interim_info.tdes_data.nb_keys = 2; ++ else if (strcmp(text, "NumKeys = 3") == 0) ++ info.interim_info.tdes_data.nb_keys = 3; ++ else ++ return -EINVAL; ++ } else + return -EINVAL; + + return 0; +diff --git a/dpdk/examples/fips_validation/main.c b/dpdk/examples/fips_validation/main.c +index 9a2c8da619..eadfdb4bac 100644 +--- a/dpdk/examples/fips_validation/main.c ++++ b/dpdk/examples/fips_validation/main.c +@@ -1030,6 +1030,11 @@ fips_mct_tdes_test(void) + int test_mode = info.interim_info.tdes_data.test_mode; + + for (i = 0; i < TDES_EXTERN_ITER; i++) { ++ if ((i == 0) && (info.version == 21.4f)) { ++ if (!(strstr(info.vec[0], "COUNT"))) ++ fprintf(info.fp_wr, "%s%u\n", "COUNT = ", 0); ++ } ++ + if (i != 0) + update_info_vec(i); + +diff --git a/dpdk/examples/flow_classify/Makefile b/dpdk/examples/flow_classify/Makefile +index 6864941b3e..161d576b60 100644 +--- a/dpdk/examples/flow_classify/Makefile ++++ b/dpdk/examples/flow_classify/Makefile +@@ -22,7 +22,7 @@ PKGCONF ?= pkg-config + PC_FILE := $(shell $(PKGCONF) --path libdpdk 2>/dev/null) + CFLAGS += -O3 $(shell $(PKGCONF) --cflags libdpdk) + LDFLAGS_SHARED = $(shell $(PKGCONF) --libs libdpdk) +-LDFLAGS_STATIC = -Wl,-Bstatic $(shell $(PKGCONF) --static --libs libdpdk) ++LDFLAGS_STATIC = $(shell $(PKGCONF) --static --libs libdpdk) + + CFLAGS += -DALLOW_EXPERIMENTAL_API + +diff --git a/dpdk/examples/flow_filtering/Makefile b/dpdk/examples/flow_filtering/Makefile +index 6c51c0b7a0..0ce20d3485 100644 +--- a/dpdk/examples/flow_filtering/Makefile ++++ b/dpdk/examples/flow_filtering/Makefile +@@ -20,7 +20,7 @@ PKGCONF ?= pkg-config + PC_FILE := $(shell $(PKGCONF) --path libdpdk 2>/dev/null) + CFLAGS += -O3 $(shell $(PKGCONF) --cflags libdpdk) + LDFLAGS_SHARED = $(shell $(PKGCONF) --libs libdpdk) +-LDFLAGS_STATIC = -Wl,-Bstatic $(shell $(PKGCONF) --static --libs libdpdk) ++LDFLAGS_STATIC = $(shell $(PKGCONF) --static --libs libdpdk) + + build/$(APP)-shared: $(SRCS-y) Makefile $(PC_FILE) | build + $(CC) $(CFLAGS) $(SRCS-y) -o $@ $(LDFLAGS) $(LDFLAGS_SHARED) +diff --git a/dpdk/examples/helloworld/Makefile b/dpdk/examples/helloworld/Makefile +index 16d82b02f0..9a07f89efc 100644 +--- a/dpdk/examples/helloworld/Makefile ++++ b/dpdk/examples/helloworld/Makefile +@@ -22,7 +22,7 @@ PKGCONF ?= pkg-config + PC_FILE := $(shell $(PKGCONF) --path libdpdk 2>/dev/null) + CFLAGS += -O3 $(shell $(PKGCONF) --cflags libdpdk) + LDFLAGS_SHARED = $(shell $(PKGCONF) --libs libdpdk) +-LDFLAGS_STATIC = -Wl,-Bstatic $(shell $(PKGCONF) --static --libs libdpdk) ++LDFLAGS_STATIC = $(shell $(PKGCONF) --static --libs libdpdk) + + build/$(APP)-shared: $(SRCS-y) Makefile $(PC_FILE) | build + $(CC) $(CFLAGS) $(SRCS-y) -o $@ $(LDFLAGS) $(LDFLAGS_SHARED) +diff --git a/dpdk/examples/ioat/Makefile b/dpdk/examples/ioat/Makefile +index ef63f5d689..dd4930136e 100644 +--- a/dpdk/examples/ioat/Makefile ++++ b/dpdk/examples/ioat/Makefile +@@ -22,7 +22,7 @@ PKGCONF ?= pkg-config + PC_FILE := $(shell $(PKGCONF) --path libdpdk 2>/dev/null) + CFLAGS += -O3 $(shell $(PKGCONF) --cflags libdpdk) + LDFLAGS_SHARED = $(shell $(PKGCONF) --libs libdpdk) +-LDFLAGS_STATIC = -Wl,-Bstatic $(shell $(PKGCONF) --static --libs libdpdk) ++LDFLAGS_STATIC = $(shell $(PKGCONF) --static --libs libdpdk) + + build/$(APP)-shared: $(SRCS-y) Makefile $(PC_FILE) | build + $(CC) $(CFLAGS) $(SRCS-y) -o $@ $(LDFLAGS) $(LDFLAGS_SHARED) diff --git a/dpdk/examples/ioat/ioatfwd.c b/dpdk/examples/ioat/ioatfwd.c -index e9117718fe..53de231795 100644 +index e9117718fe..7971442b5d 100644 --- a/dpdk/examples/ioat/ioatfwd.c +++ b/dpdk/examples/ioat/ioatfwd.c -@@ -460,7 +460,7 @@ ioat_tx_port(struct rxtx_port_config *tx_config) +@@ -168,7 +168,7 @@ print_stats(char *prgname) + struct rte_rawdev_xstats_name *names_xstats; + uint64_t *xstats; + unsigned int *ids_xstats, nb_xstats; +- char status_string[120]; /* to print at the top of the output */ ++ char status_string[255]; /* to print at the top of the output */ + int status_strlen; + int ret; + +@@ -194,7 +194,7 @@ print_stats(char *prgname) + "Rx Queues = %d, ", nb_queues); + status_strlen += snprintf(status_string + status_strlen, + sizeof(status_string) - status_strlen, +- "Ring Size = %d\n", ring_size); ++ "Ring Size = %d", ring_size); + + /* Allocate memory for xstats names and values */ + ret = rte_rawdev_xstats_names_get( +@@ -251,7 +251,7 @@ print_stats(char *prgname) + + memset(&delta_ts, 0, sizeof(struct total_statistics)); + +- printf("%s", status_string); ++ printf("%s\n", status_string); + + for (i = 0; i < cfg.nb_ports; i++) { + port_id = cfg.ports[i].rxtx_port; +@@ -294,6 +294,8 @@ print_stats(char *prgname) + printf("\n"); + print_total_stats(&delta_ts); + ++ fflush(stdout); ++ + ts.total_packets_tx += delta_ts.total_packets_tx; + ts.total_packets_rx += delta_ts.total_packets_rx; + ts.total_packets_dropped += delta_ts.total_packets_dropped; +@@ -460,7 +462,7 @@ ioat_tx_port(struct rxtx_port_config *tx_config) MAX_PKT_BURST, NULL); } @@ -33904,7 +66422,7 @@ index e9117718fe..53de231795 100644 return; if (copy_mode == COPY_MODE_IOAT_NUM) -@@ -697,7 +697,7 @@ check_link_status(uint32_t port_mask) +@@ -697,7 +699,7 @@ check_link_status(uint32_t port_mask) { uint16_t portid; struct rte_eth_link link; @@ -33913,7 +66431,7 @@ index e9117718fe..53de231795 100644 printf("\nChecking link status\n"); RTE_ETH_FOREACH_DEV(portid) { -@@ -705,7 +705,12 @@ check_link_status(uint32_t port_mask) +@@ -705,7 +707,12 @@ check_link_status(uint32_t port_mask) continue; memset(&link, 0, sizeof(link)); @@ -33927,7 +66445,7 @@ index e9117718fe..53de231795 100644 /* Print link status */ if (link.link_status) { -@@ -713,12 +718,12 @@ check_link_status(uint32_t port_mask) +@@ -713,12 +720,12 @@ check_link_status(uint32_t port_mask) "Port %d Link Up. Speed %u Mbps - %s\n", portid, link.link_speed, (link.link_duplex == ETH_LINK_FULL_DUPLEX) ? @@ -33943,7 +66461,7 @@ index e9117718fe..53de231795 100644 } static void -@@ -824,7 +829,11 @@ port_init(uint16_t portid, struct rte_mempool *mbuf_pool, uint16_t nb_queues) +@@ -824,7 +831,11 @@ port_init(uint16_t portid, struct rte_mempool *mbuf_pool, uint16_t nb_queues) /* Init port */ printf("Initializing port %u... ", portid); fflush(stdout); @@ -33956,6 +66474,19 @@ index e9117718fe..53de231795 100644 local_port_conf.rx_adv_conf.rss_conf.rss_hf &= dev_info.flow_type_rss_offloads; if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE) +diff --git a/dpdk/examples/ip_fragmentation/Makefile b/dpdk/examples/ip_fragmentation/Makefile +index ede0c4f02b..d200cc627d 100644 +--- a/dpdk/examples/ip_fragmentation/Makefile ++++ b/dpdk/examples/ip_fragmentation/Makefile +@@ -23,7 +23,7 @@ PKGCONF ?= pkg-config + PC_FILE := $(shell $(PKGCONF) --path libdpdk 2>/dev/null) + CFLAGS += -O3 $(shell $(PKGCONF) --cflags libdpdk) + LDFLAGS_SHARED = $(shell $(PKGCONF) --libs libdpdk) +-LDFLAGS_STATIC = -Wl,-Bstatic $(shell $(PKGCONF) --static --libs libdpdk) ++LDFLAGS_STATIC = $(shell $(PKGCONF) --static --libs libdpdk) + + build/$(APP)-shared: $(SRCS-y) Makefile $(PC_FILE) | build + $(CC) $(CFLAGS) $(SRCS-y) -o $@ $(LDFLAGS) $(LDFLAGS_SHARED) diff --git a/dpdk/examples/ip_fragmentation/main.c b/dpdk/examples/ip_fragmentation/main.c index 104612339c..90e4d1ea4a 100644 --- a/dpdk/examples/ip_fragmentation/main.c @@ -33978,6 +66509,251 @@ index 104612339c..90e4d1ea4a 100644 static int check_ptype(int portid) { +diff --git a/dpdk/examples/ip_pipeline/Makefile b/dpdk/examples/ip_pipeline/Makefile +index 3a0193818f..b8e086267b 100644 +--- a/dpdk/examples/ip_pipeline/Makefile ++++ b/dpdk/examples/ip_pipeline/Makefile +@@ -35,9 +35,9 @@ PKGCONF ?= pkg-config + PC_FILE := $(shell $(PKGCONF) --path libdpdk 2>/dev/null) + CFLAGS += -O3 $(shell $(PKGCONF) --cflags libdpdk) + LDFLAGS_SHARED = $(shell $(PKGCONF) --libs libdpdk) +-LDFLAGS_STATIC = -Wl,-Bstatic $(shell $(PKGCONF) --static --libs libdpdk) ++LDFLAGS_STATIC = $(shell $(PKGCONF) --static --libs libdpdk) + +-CFLAGS += -I. ++CFLAGS += -I. -DALLOW_EXPERIMENTAL_API -D_GNU_SOURCE + + OBJS := $(patsubst %.c,build/%.o,$(SRCS-y)) + +diff --git a/dpdk/examples/ip_pipeline/parser.c b/dpdk/examples/ip_pipeline/parser.c +index 3fffeb5867..f043d6bf4f 100644 +--- a/dpdk/examples/ip_pipeline/parser.c ++++ b/dpdk/examples/ip_pipeline/parser.c +@@ -4,25 +4,6 @@ + * All rights reserved. + */ + +-/* +- * For inet_pton4() and inet_pton6() functions: +- * +- * Copyright (c) 1996 by Internet Software Consortium. +- * +- * Permission to use, copy, modify, and distribute this software for any +- * purpose with or without fee is hereby granted, provided that the above +- * copyright notice and this permission notice appear in all copies. +- * +- * THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SOFTWARE CONSORTIUM DISCLAIMS +- * ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES +- * OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL INTERNET SOFTWARE +- * CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL +- * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR +- * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS +- * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS +- * SOFTWARE. +- */ +- + #include + #include + #include +@@ -34,6 +15,8 @@ + #include + #include + #include ++#include ++#include + + #include + #include +@@ -348,171 +331,6 @@ parse_mpls_labels(char *string, uint32_t *labels, uint32_t *n_labels) + return 0; + } + +-#define INADDRSZ 4 +-#define IN6ADDRSZ 16 +- +-/* int +- * inet_pton4(src, dst) +- * like inet_aton() but without all the hexadecimal and shorthand. +- * return: +- * 1 if `src' is a valid dotted quad, else 0. +- * notice: +- * does not touch `dst' unless it's returning 1. +- * author: +- * Paul Vixie, 1996. +- */ +-static int +-inet_pton4(const char *src, unsigned char *dst) +-{ +- static const char digits[] = "0123456789"; +- int saw_digit, octets, ch; +- unsigned char tmp[INADDRSZ], *tp; +- +- saw_digit = 0; +- octets = 0; +- *(tp = tmp) = 0; +- while ((ch = *src++) != '\0') { +- const char *pch; +- +- pch = strchr(digits, ch); +- if (pch != NULL) { +- unsigned int new = *tp * 10 + (pch - digits); +- +- if (new > 255) +- return 0; +- if (!saw_digit) { +- if (++octets > 4) +- return 0; +- saw_digit = 1; +- } +- *tp = (unsigned char)new; +- } else if (ch == '.' && saw_digit) { +- if (octets == 4) +- return 0; +- *++tp = 0; +- saw_digit = 0; +- } else +- return 0; +- } +- if (octets < 4) +- return 0; +- +- memcpy(dst, tmp, INADDRSZ); +- return 1; +-} +- +-/* int +- * inet_pton6(src, dst) +- * convert presentation level address to network order binary form. +- * return: +- * 1 if `src' is a valid [RFC1884 2.2] address, else 0. +- * notice: +- * (1) does not touch `dst' unless it's returning 1. +- * (2) :: in a full address is silently ignored. +- * credit: +- * inspired by Mark Andrews. +- * author: +- * Paul Vixie, 1996. +- */ +-static int +-inet_pton6(const char *src, unsigned char *dst) +-{ +- static const char xdigits_l[] = "0123456789abcdef", +- xdigits_u[] = "0123456789ABCDEF"; +- unsigned char tmp[IN6ADDRSZ], *tp = 0, *endp = 0, *colonp = 0; +- const char *xdigits = 0, *curtok = 0; +- int ch = 0, saw_xdigit = 0, count_xdigit = 0; +- unsigned int val = 0; +- unsigned dbloct_count = 0; +- +- memset((tp = tmp), '\0', IN6ADDRSZ); +- endp = tp + IN6ADDRSZ; +- colonp = NULL; +- /* Leading :: requires some special handling. */ +- if (*src == ':') +- if (*++src != ':') +- return 0; +- curtok = src; +- saw_xdigit = count_xdigit = 0; +- val = 0; +- +- while ((ch = *src++) != '\0') { +- const char *pch; +- +- pch = strchr((xdigits = xdigits_l), ch); +- if (pch == NULL) +- pch = strchr((xdigits = xdigits_u), ch); +- if (pch != NULL) { +- if (count_xdigit >= 4) +- return 0; +- val <<= 4; +- val |= (pch - xdigits); +- if (val > 0xffff) +- return 0; +- saw_xdigit = 1; +- count_xdigit++; +- continue; +- } +- if (ch == ':') { +- curtok = src; +- if (!saw_xdigit) { +- if (colonp) +- return 0; +- colonp = tp; +- continue; +- } else if (*src == '\0') { +- return 0; +- } +- if (tp + sizeof(int16_t) > endp) +- return 0; +- *tp++ = (unsigned char) ((val >> 8) & 0xff); +- *tp++ = (unsigned char) (val & 0xff); +- saw_xdigit = 0; +- count_xdigit = 0; +- val = 0; +- dbloct_count++; +- continue; +- } +- if (ch == '.' && ((tp + INADDRSZ) <= endp) && +- inet_pton4(curtok, tp) > 0) { +- tp += INADDRSZ; +- saw_xdigit = 0; +- dbloct_count += 2; +- break; /* '\0' was seen by inet_pton4(). */ +- } +- return 0; +- } +- if (saw_xdigit) { +- if (tp + sizeof(int16_t) > endp) +- return 0; +- *tp++ = (unsigned char) ((val >> 8) & 0xff); +- *tp++ = (unsigned char) (val & 0xff); +- dbloct_count++; +- } +- if (colonp != NULL) { +- /* if we already have 8 double octets, having a colon means error */ +- if (dbloct_count == 8) +- return 0; +- +- /* +- * Since some memmove()'s erroneously fail to handle +- * overlapping regions, we'll do the shift by hand. +- */ +- const int n = tp - colonp; +- int i; +- +- for (i = 1; i <= n; i++) { +- endp[-i] = colonp[n - i]; +- colonp[n - i] = 0; +- } +- tp = endp; +- } +- if (tp != endp) +- return 0; +- memcpy(dst, tmp, IN6ADDRSZ); +- return 1; +-} +- + static struct rte_ether_addr * + my_ether_aton(const char *a) + { +@@ -562,7 +380,7 @@ parse_ipv4_addr(const char *token, struct in_addr *ipv4) + if (strlen(token) >= INET_ADDRSTRLEN) + return -EINVAL; + +- if (inet_pton4(token, (unsigned char *)ipv4) != 1) ++ if (inet_pton(AF_INET, token, ipv4) != 1) + return -EINVAL; + + return 0; +@@ -574,7 +392,7 @@ parse_ipv6_addr(const char *token, struct in6_addr *ipv6) + if (strlen(token) >= INET6_ADDRSTRLEN) + return -EINVAL; + +- if (inet_pton6(token, (unsigned char *)ipv6) != 1) ++ if (inet_pton(AF_INET6, token, ipv6) != 1) + return -EINVAL; + + return 0; diff --git a/dpdk/examples/ip_pipeline/thread.c b/dpdk/examples/ip_pipeline/thread.c index 272fbbeed1..adb83167cd 100644 --- a/dpdk/examples/ip_pipeline/thread.c @@ -34159,6 +66935,19 @@ index 272fbbeed1..adb83167cd 100644 /* Read response */ status = rsp->status; +diff --git a/dpdk/examples/ip_reassembly/Makefile b/dpdk/examples/ip_reassembly/Makefile +index 3f2888b338..c7424da2b7 100644 +--- a/dpdk/examples/ip_reassembly/Makefile ++++ b/dpdk/examples/ip_reassembly/Makefile +@@ -23,7 +23,7 @@ PKGCONF ?= pkg-config + PC_FILE := $(shell $(PKGCONF) --path libdpdk 2>/dev/null) + CFLAGS += -O3 $(shell $(PKGCONF) --cflags libdpdk) + LDFLAGS_SHARED = $(shell $(PKGCONF) --libs libdpdk) +-LDFLAGS_STATIC = -Wl,-Bstatic $(shell $(PKGCONF) --static --libs libdpdk) ++LDFLAGS_STATIC = $(shell $(PKGCONF) --static --libs libdpdk) + + build/$(APP)-shared: $(SRCS-y) Makefile $(PC_FILE) | build + $(CC) $(CFLAGS) $(SRCS-y) -o $@ $(LDFLAGS) $(LDFLAGS_SHARED) diff --git a/dpdk/examples/ip_reassembly/main.c b/dpdk/examples/ip_reassembly/main.c index d59e6d02ff..29b34d0710 100644 --- a/dpdk/examples/ip_reassembly/main.c @@ -34172,6 +66961,19 @@ index d59e6d02ff..29b34d0710 100644 else printf("Port %d Link Down\n", portid); continue; +diff --git a/dpdk/examples/ipsec-secgw/Makefile b/dpdk/examples/ipsec-secgw/Makefile +index a4977f61f8..38c456daa2 100644 +--- a/dpdk/examples/ipsec-secgw/Makefile ++++ b/dpdk/examples/ipsec-secgw/Makefile +@@ -33,7 +33,7 @@ PKGCONF ?= pkg-config + PC_FILE := $(shell $(PKGCONF) --path libdpdk 2>/dev/null) + CFLAGS += -O3 $(shell $(PKGCONF) --cflags libdpdk) + LDFLAGS_SHARED = $(shell $(PKGCONF) --libs libdpdk) +-LDFLAGS_STATIC = -Wl,-Bstatic $(shell $(PKGCONF) --static --libs libdpdk) ++LDFLAGS_STATIC = $(shell $(PKGCONF) --static --libs libdpdk) + + CFLAGS += -DALLOW_EXPERIMENTAL_API + diff --git a/dpdk/examples/ipsec-secgw/ipsec-secgw.c b/dpdk/examples/ipsec-secgw/ipsec-secgw.c index 3b5aaf6832..1493be9025 100644 --- a/dpdk/examples/ipsec-secgw/ipsec-secgw.c @@ -34197,6 +66999,211 @@ index 2eb5c8b345..37f406d46c 100644 for (i = 0, n = 0; i != num; i++) { if (sa != sa_ptr[i]) { +diff --git a/dpdk/examples/ipsec-secgw/parser.c b/dpdk/examples/ipsec-secgw/parser.c +index fc8c238fe5..dfabac2dbf 100644 +--- a/dpdk/examples/ipsec-secgw/parser.c ++++ b/dpdk/examples/ipsec-secgw/parser.c +@@ -1,6 +1,9 @@ + /* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2016 Intel Corporation + */ ++#include ++#include ++ + #include + #include + #include +@@ -39,172 +42,6 @@ parse_tokenize_string(char *string, char *tokens[], uint32_t *n_tokens) + return 0; + } + +-#define INADDRSZ 4 +-#define IN6ADDRSZ 16 +- +-/* int +- * inet_pton4(src, dst) +- * like inet_aton() but without all the hexadecimal and shorthand. +- * return: +- * 1 if `src' is a valid dotted quad, else 0. +- * notice: +- * does not touch `dst' unless it's returning 1. +- * author: +- * Paul Vixie, 1996. +- */ +-static int +-inet_pton4(const char *src, unsigned char *dst) +-{ +- static const char digits[] = "0123456789"; +- int saw_digit, octets, ch; +- unsigned char tmp[INADDRSZ], *tp; +- +- saw_digit = 0; +- octets = 0; +- *(tp = tmp) = 0; +- while ((ch = *src++) != '\0') { +- const char *pch; +- +- pch = strchr(digits, ch); +- if (pch != NULL) { +- unsigned int new = *tp * 10 + (pch - digits); +- +- if (new > 255) +- return 0; +- if (!saw_digit) { +- if (++octets > 4) +- return 0; +- saw_digit = 1; +- } +- *tp = (unsigned char)new; +- } else if (ch == '.' && saw_digit) { +- if (octets == 4) +- return 0; +- *++tp = 0; +- saw_digit = 0; +- } else +- return 0; +- } +- if (octets < 4) +- return 0; +- +- memcpy(dst, tmp, INADDRSZ); +- return 1; +-} +- +-/* int +- * inet_pton6(src, dst) +- * convert presentation level address to network order binary form. +- * return: +- * 1 if `src' is a valid [RFC1884 2.2] address, else 0. +- * notice: +- * (1) does not touch `dst' unless it's returning 1. +- * (2) :: in a full address is silently ignored. +- * credit: +- * inspired by Mark Andrews. +- * author: +- * Paul Vixie, 1996. +- */ +-static int +-inet_pton6(const char *src, unsigned char *dst) +-{ +- static const char xdigits_l[] = "0123456789abcdef", +- xdigits_u[] = "0123456789ABCDEF"; +- unsigned char tmp[IN6ADDRSZ], *tp = 0, *endp = 0, *colonp = 0; +- const char *xdigits = 0, *curtok = 0; +- int ch = 0, saw_xdigit = 0, count_xdigit = 0; +- unsigned int val = 0; +- unsigned dbloct_count = 0; +- +- memset((tp = tmp), '\0', IN6ADDRSZ); +- endp = tp + IN6ADDRSZ; +- colonp = NULL; +- /* Leading :: requires some special handling. */ +- if (*src == ':') +- if (*++src != ':') +- return 0; +- curtok = src; +- saw_xdigit = count_xdigit = 0; +- val = 0; +- +- while ((ch = *src++) != '\0') { +- const char *pch; +- +- pch = strchr((xdigits = xdigits_l), ch); +- if (pch == NULL) +- pch = strchr((xdigits = xdigits_u), ch); +- if (pch != NULL) { +- if (count_xdigit >= 4) +- return 0; +- val <<= 4; +- val |= (pch - xdigits); +- if (val > 0xffff) +- return 0; +- saw_xdigit = 1; +- count_xdigit++; +- continue; +- } +- if (ch == ':') { +- curtok = src; +- if (!saw_xdigit) { +- if (colonp) +- return 0; +- colonp = tp; +- continue; +- } else if (*src == '\0') { +- return 0; +- } +- if (tp + sizeof(int16_t) > endp) +- return 0; +- *tp++ = (unsigned char) ((val >> 8) & 0xff); +- *tp++ = (unsigned char) (val & 0xff); +- saw_xdigit = 0; +- count_xdigit = 0; +- val = 0; +- dbloct_count++; +- continue; +- } +- if (ch == '.' && ((tp + INADDRSZ) <= endp) && +- inet_pton4(curtok, tp) > 0) { +- tp += INADDRSZ; +- saw_xdigit = 0; +- dbloct_count += 2; +- break; /* '\0' was seen by inet_pton4(). */ +- } +- return 0; +- } +- if (saw_xdigit) { +- if (tp + sizeof(int16_t) > endp) +- return 0; +- *tp++ = (unsigned char) ((val >> 8) & 0xff); +- *tp++ = (unsigned char) (val & 0xff); +- dbloct_count++; +- } +- if (colonp != NULL) { +- /* if we already have 8 double octets, having a colon +- * means error */ +- if (dbloct_count == 8) +- return 0; +- +- /* +- * Since some memmove()'s erroneously fail to handle +- * overlapping regions, we'll do the shift by hand. +- */ +- const int n = tp - colonp; +- int i; +- +- for (i = 1; i <= n; i++) { +- endp[-i] = colonp[n - i]; +- colonp[n - i] = 0; +- } +- tp = endp; +- } +- if (tp != endp) +- return 0; +- memcpy(dst, tmp, IN6ADDRSZ); +- return 1; +-} +- + int + parse_ipv4_addr(const char *token, struct in_addr *ipv4, uint32_t *mask) + { +@@ -229,7 +66,7 @@ parse_ipv4_addr(const char *token, struct in_addr *ipv4, uint32_t *mask) + if (strlen(ip_str) >= INET_ADDRSTRLEN) + return -EINVAL; + +- if (inet_pton4(ip_str, (unsigned char *)ipv4) != 1) ++ if (inet_pton(AF_INET, ip_str, ipv4) != 1) + return -EINVAL; + + return 0; +@@ -260,7 +97,7 @@ parse_ipv6_addr(const char *token, struct in6_addr *ipv6, uint32_t *mask) + if (strlen(ip_str) >= INET6_ADDRSTRLEN) + return -EINVAL; + +- if (inet_pton6(ip_str, (unsigned char *)ipv6) != 1) ++ if (inet_pton(AF_INET6, ip_str, ipv6) != 1) + return -EINVAL; + + return 0; diff --git a/dpdk/examples/ipsec-secgw/sa.c b/dpdk/examples/ipsec-secgw/sa.c index 7f046e3ed7..fcc6695388 100644 --- a/dpdk/examples/ipsec-secgw/sa.c @@ -34283,6 +67290,19 @@ index 7f046e3ed7..fcc6695388 100644 } return 0; +diff --git a/dpdk/examples/ipv4_multicast/Makefile b/dpdk/examples/ipv4_multicast/Makefile +index 92d3db0f4d..5e34bbba00 100644 +--- a/dpdk/examples/ipv4_multicast/Makefile ++++ b/dpdk/examples/ipv4_multicast/Makefile +@@ -23,7 +23,7 @@ PKGCONF ?= pkg-config + PC_FILE := $(shell $(PKGCONF) --path libdpdk 2>/dev/null) + CFLAGS += -O3 $(shell $(PKGCONF) --cflags libdpdk) + LDFLAGS_SHARED = $(shell $(PKGCONF) --libs libdpdk) +-LDFLAGS_STATIC = -Wl,-Bstatic $(shell $(PKGCONF) --static --libs libdpdk) ++LDFLAGS_STATIC = $(shell $(PKGCONF) --static --libs libdpdk) + + build/$(APP)-shared: $(SRCS-y) Makefile $(PC_FILE) | build + $(CC) $(CFLAGS) $(SRCS-y) -o $@ $(LDFLAGS) $(LDFLAGS_SHARED) diff --git a/dpdk/examples/ipv4_multicast/main.c b/dpdk/examples/ipv4_multicast/main.c index 63333b5b69..09d9270aff 100644 --- a/dpdk/examples/ipv4_multicast/main.c @@ -34296,11 +67316,35 @@ index 63333b5b69..09d9270aff 100644 else printf("Port %d Link Down\n", portid); continue; +diff --git a/dpdk/examples/kni/Makefile b/dpdk/examples/kni/Makefile +index c7ca96d8a0..10b42891da 100644 +--- a/dpdk/examples/kni/Makefile ++++ b/dpdk/examples/kni/Makefile +@@ -23,7 +23,9 @@ PC_FILE := $(shell $(PKGCONF) --path libdpdk 2>/dev/null) + CFLAGS += -O3 $(shell $(PKGCONF) --cflags libdpdk) + CFLAGS += -DALLOW_EXPERIMENTAL_API + LDFLAGS_SHARED = $(shell $(PKGCONF) --libs libdpdk) +-LDFLAGS_STATIC = -Wl,-Bstatic $(shell $(PKGCONF) --static --libs libdpdk) ++LDFLAGS_STATIC = $(shell $(PKGCONF) --static --libs libdpdk) ++ ++LDFLAGS += -pthread + + build/$(APP)-shared: $(SRCS-y) Makefile $(PC_FILE) | build + $(CC) $(CFLAGS) $(SRCS-y) -o $@ $(LDFLAGS) $(LDFLAGS_SHARED) diff --git a/dpdk/examples/kni/main.c b/dpdk/examples/kni/main.c -index 5f713e6b22..d48a59fcb1 100644 +index 5f713e6b22..7edc73ab91 100644 --- a/dpdk/examples/kni/main.c +++ b/dpdk/examples/kni/main.c -@@ -679,7 +679,7 @@ check_all_ports_link_status(uint32_t port_mask) +@@ -158,6 +158,8 @@ print_stats(void) + kni_stats[i].tx_dropped); + } + printf("====== ============== ============ ============ ============ ============\n"); ++ ++ fflush(stdout); + } + + /* Custom handling of signals to handle stats and kni processing */ +@@ -679,7 +681,7 @@ check_all_ports_link_status(uint32_t port_mask) "Port%d Link Up - speed %uMbps - %s\n", portid, link.link_speed, (link.link_duplex == ETH_LINK_FULL_DUPLEX) ? @@ -34309,7 +67353,7 @@ index 5f713e6b22..d48a59fcb1 100644 else printf("Port %d Link Down\n", portid); continue; -@@ -764,15 +764,16 @@ monitor_all_ports_link_status(void *arg) +@@ -764,15 +766,16 @@ monitor_all_ports_link_status(void *arg) return NULL; } @@ -34328,7 +67372,7 @@ index 5f713e6b22..d48a59fcb1 100644 if (!rte_eth_dev_is_valid_port(port_id)) { RTE_LOG(ERR, APP, "Invalid port id %d\n", port_id); -@@ -800,7 +801,7 @@ kni_change_mtu(uint16_t port_id, unsigned int new_mtu) +@@ -800,7 +803,7 @@ kni_change_mtu(uint16_t port_id, unsigned int new_mtu) return ret; } @@ -34337,7 +67381,7 @@ index 5f713e6b22..d48a59fcb1 100644 if (ret < 0) rte_exit(EXIT_FAILURE, "Could not adjust number of descriptors " "for port%u (%d)\n", (unsigned int)port_id, -@@ -825,6 +826,16 @@ kni_change_mtu(uint16_t port_id, unsigned int new_mtu) +@@ -825,6 +828,16 @@ kni_change_mtu(uint16_t port_id, unsigned int new_mtu) return ret; } @@ -34354,7 +67398,7 @@ index 5f713e6b22..d48a59fcb1 100644 /* Restart specific port */ ret = rte_eth_dev_start(port_id); if (ret < 0) { -@@ -835,6 +846,19 @@ kni_change_mtu(uint16_t port_id, unsigned int new_mtu) +@@ -835,6 +848,19 @@ kni_change_mtu(uint16_t port_id, unsigned int new_mtu) return 0; } @@ -34374,11 +67418,52 @@ index 5f713e6b22..d48a59fcb1 100644 /* Callback for request of configuring network interface up/down */ static int kni_config_network_interface(uint16_t port_id, uint8_t if_up) +diff --git a/dpdk/examples/l2fwd-cat/Makefile b/dpdk/examples/l2fwd-cat/Makefile +index b0e53c37e8..e8fdc46d74 100644 +--- a/dpdk/examples/l2fwd-cat/Makefile ++++ b/dpdk/examples/l2fwd-cat/Makefile +@@ -22,7 +22,7 @@ PKGCONF ?= pkg-config + PC_FILE := $(shell $(PKGCONF) --path libdpdk 2>/dev/null) + CFLAGS += -O3 $(shell $(PKGCONF) --cflags libdpdk) + LDFLAGS_SHARED = $(shell $(PKGCONF) --libs libdpdk) +-LDFLAGS_STATIC = -Wl,-Bstatic $(shell $(PKGCONF) --static --libs libdpdk) ++LDFLAGS_STATIC = $(shell $(PKGCONF) --static --libs libdpdk) + + LDFLAGS += -lpqos + +diff --git a/dpdk/examples/l2fwd-crypto/Makefile b/dpdk/examples/l2fwd-crypto/Makefile +index 2f1405a72b..cafe778fc6 100644 +--- a/dpdk/examples/l2fwd-crypto/Makefile ++++ b/dpdk/examples/l2fwd-crypto/Makefile +@@ -22,7 +22,13 @@ PKGCONF ?= pkg-config + PC_FILE := $(shell $(PKGCONF) --path libdpdk 2>/dev/null) + CFLAGS += -O3 $(shell $(PKGCONF) --cflags libdpdk) + LDFLAGS_SHARED = $(shell $(PKGCONF) --libs libdpdk) +-LDFLAGS_STATIC = -Wl,-Bstatic $(shell $(PKGCONF) --static --libs libdpdk) ++LDFLAGS_STATIC = $(shell $(PKGCONF) --static --libs libdpdk) ++ ++CFLAGS += -DALLOW_EXPERIMENTAL_API ++CONFIG_DEFINES = $(shell $(CC) $(CFLAGS) -dM -E - < /dev/null) ++ifneq ($(findstring RTE_CRYPTO_SCHEDULER,$(CONFIG_DEFINES)),) ++LDFLAGS_SHARED += -lrte_crypto_scheduler ++endif + + build/$(APP)-shared: $(SRCS-y) Makefile $(PC_FILE) | build + $(CC) $(CFLAGS) $(SRCS-y) -o $@ $(LDFLAGS) $(LDFLAGS_SHARED) diff --git a/dpdk/examples/l2fwd-crypto/main.c b/dpdk/examples/l2fwd-crypto/main.c -index 61d78295d4..fcb55c370a 100644 +index 61d78295d4..827da9b3e3 100644 --- a/dpdk/examples/l2fwd-crypto/main.c +++ b/dpdk/examples/l2fwd-crypto/main.c -@@ -1756,7 +1756,7 @@ check_all_ports_link_status(uint32_t port_mask) +@@ -334,6 +334,8 @@ print_stats(void) + total_packets_dropped, + total_packets_errors); + printf("\n====================================================\n"); ++ ++ fflush(stdout); + } + + static int +@@ -1756,7 +1758,7 @@ check_all_ports_link_status(uint32_t port_mask) "Port%d Link Up. Speed %u Mbps - %s\n", portid, link.link_speed, (link.link_duplex == ETH_LINK_FULL_DUPLEX) ? @@ -34387,6 +67472,33 @@ index 61d78295d4..fcb55c370a 100644 else printf("Port %d Link Down\n", portid); continue; +diff --git a/dpdk/examples/l2fwd-crypto/meson.build b/dpdk/examples/l2fwd-crypto/meson.build +index 6c852ad199..c08d8469b4 100644 +--- a/dpdk/examples/l2fwd-crypto/meson.build ++++ b/dpdk/examples/l2fwd-crypto/meson.build +@@ -7,6 +7,9 @@ + # DPDK instance, use 'make' + + deps += 'cryptodev' ++if dpdk_conf.has('RTE_LIBRTE_CRYPTO_SCHEDULER_PMD') ++ deps += 'pmd_crypto_scheduler' ++endif + allow_experimental_apis = true + sources = files( + 'main.c' +diff --git a/dpdk/examples/l2fwd-event/Makefile b/dpdk/examples/l2fwd-event/Makefile +index 4cdae36f17..bf0ee890d5 100644 +--- a/dpdk/examples/l2fwd-event/Makefile ++++ b/dpdk/examples/l2fwd-event/Makefile +@@ -28,7 +28,7 @@ PKGCONF ?= pkg-config + PC_FILE := $(shell $(PKGCONF) --path libdpdk 2>/dev/null) + CFLAGS += -O3 $(shell $(PKGCONF) --cflags libdpdk) + LDFLAGS_SHARED = $(shell $(PKGCONF) --libs libdpdk) +-LDFLAGS_STATIC = -Wl,-Bstatic $(shell $(PKGCONF) --static --libs libdpdk) ++LDFLAGS_STATIC = $(shell $(PKGCONF) --static --libs libdpdk) + + build/$(APP)-shared: $(SRCS-y) Makefile $(PC_FILE) | build + $(CC) $(CFLAGS) $(SRCS-y) -o $@ $(LDFLAGS) $(LDFLAGS_SHARED) diff --git a/dpdk/examples/l2fwd-event/l2fwd_common.c b/dpdk/examples/l2fwd-event/l2fwd_common.c index 181301fe6b..ab341e55b2 100644 --- a/dpdk/examples/l2fwd-event/l2fwd_common.c @@ -34545,7 +67657,7 @@ index a3a3835582..2033c65e54 100644 rsrc->rx_queue_per_lcore) { rx_lcore_id++; diff --git a/dpdk/examples/l2fwd-event/main.c b/dpdk/examples/l2fwd-event/main.c -index 89a6bb9a44..384b71238f 100644 +index 89a6bb9a44..7969a1c313 100644 --- a/dpdk/examples/l2fwd-event/main.c +++ b/dpdk/examples/l2fwd-event/main.c @@ -263,7 +263,7 @@ check_all_ports_link_status(struct l2fwd_resources *rsrc, @@ -34557,11 +67669,43 @@ index 89a6bb9a44..384b71238f 100644 else printf("Port %d Link Down\n", port_id); continue; +@@ -385,6 +385,8 @@ print_stats(struct l2fwd_resources *rsrc) + total_packets_rx, + total_packets_dropped); + printf("\n====================================================\n"); ++ ++ fflush(stdout); + } + + static void +diff --git a/dpdk/examples/l2fwd-jobstats/Makefile b/dpdk/examples/l2fwd-jobstats/Makefile +index 73c91faa8d..c26803909f 100644 +--- a/dpdk/examples/l2fwd-jobstats/Makefile ++++ b/dpdk/examples/l2fwd-jobstats/Makefile +@@ -22,7 +22,7 @@ PKGCONF ?= pkg-config + PC_FILE := $(shell $(PKGCONF) --path libdpdk 2>/dev/null) + CFLAGS += -O3 $(shell $(PKGCONF) --cflags libdpdk) + LDFLAGS_SHARED = $(shell $(PKGCONF) --libs libdpdk) +-LDFLAGS_STATIC = -Wl,-Bstatic $(shell $(PKGCONF) --static --libs libdpdk) ++LDFLAGS_STATIC = $(shell $(PKGCONF) --static --libs libdpdk) + + build/$(APP)-shared: $(SRCS-y) Makefile $(PC_FILE) | build + $(CC) $(CFLAGS) $(SRCS-y) -o $@ $(LDFLAGS) $(LDFLAGS_SHARED) diff --git a/dpdk/examples/l2fwd-jobstats/main.c b/dpdk/examples/l2fwd-jobstats/main.c -index f975aa12d0..e0255080e2 100644 +index f975aa12d0..c2180c2d1c 100644 --- a/dpdk/examples/l2fwd-jobstats/main.c +++ b/dpdk/examples/l2fwd-jobstats/main.c -@@ -710,7 +710,7 @@ check_all_ports_link_status(uint32_t port_mask) +@@ -329,6 +329,9 @@ show_stats_cb(__rte_unused void *param) + } + + printf("\n====================================================\n"); ++ ++ fflush(stdout); ++ + rte_eal_alarm_set(timer_period * US_PER_S, show_stats_cb, NULL); + } + +@@ -710,7 +713,7 @@ check_all_ports_link_status(uint32_t port_mask) "Port%d Link Up. Speed %u Mbps - %s\n", portid, link.link_speed, (link.link_duplex == ETH_LINK_FULL_DUPLEX) ? @@ -34570,8 +67714,21 @@ index f975aa12d0..e0255080e2 100644 else printf("Port %d Link Down\n", portid); continue; +diff --git a/dpdk/examples/l2fwd-keepalive/Makefile b/dpdk/examples/l2fwd-keepalive/Makefile +index 94d1e58bb5..ea3a9cbbf3 100644 +--- a/dpdk/examples/l2fwd-keepalive/Makefile ++++ b/dpdk/examples/l2fwd-keepalive/Makefile +@@ -24,7 +24,7 @@ PKGCONF ?= pkg-config + PC_FILE := $(shell $(PKGCONF) --path libdpdk 2>/dev/null) + CFLAGS += -O3 $(shell $(PKGCONF) --cflags libdpdk) + LDFLAGS_SHARED = $(shell $(PKGCONF) --libs libdpdk) +-LDFLAGS_STATIC = -Wl,-Bstatic $(shell $(PKGCONF) --static --libs libdpdk) ++LDFLAGS_STATIC = $(shell $(PKGCONF) --static --libs libdpdk) + + build/$(APP)-shared: $(SRCS-y) Makefile $(PC_FILE) | build + $(CC) $(CFLAGS) $(SRCS-y) -o $@ $(LDFLAGS) $(LDFLAGS_SHARED) diff --git a/dpdk/examples/l2fwd-keepalive/main.c b/dpdk/examples/l2fwd-keepalive/main.c -index b36834974e..3d59e2ca90 100644 +index b36834974e..ba2745c915 100644 --- a/dpdk/examples/l2fwd-keepalive/main.c +++ b/dpdk/examples/l2fwd-keepalive/main.c @@ -44,7 +44,7 @@ @@ -34583,7 +67740,16 @@ index b36834974e..3d59e2ca90 100644 #define MAX_PKT_BURST 32 #define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */ -@@ -475,7 +475,7 @@ check_all_ports_link_status(uint32_t port_mask) +@@ -160,6 +160,8 @@ print_stats(__attribute__((unused)) struct rte_timer *ptr_timer, + total_packets_rx, + total_packets_dropped); + printf("\n====================================================\n"); ++ ++ fflush(stdout); + } + + static void +@@ -475,7 +477,7 @@ check_all_ports_link_status(uint32_t port_mask) "Port%d Link Up. Speed %u Mbps - %s\n", portid, link.link_speed, (link.link_duplex == ETH_LINK_FULL_DUPLEX) ? @@ -34592,7 +67758,7 @@ index b36834974e..3d59e2ca90 100644 else printf("Port %d Link Down\n", portid); continue; -@@ -536,6 +536,7 @@ main(int argc, char **argv) +@@ -536,6 +538,7 @@ main(int argc, char **argv) uint16_t portid, last_port; unsigned lcore_id, rx_lcore_id; unsigned nb_ports_in_mask = 0; @@ -34600,7 +67766,7 @@ index b36834974e..3d59e2ca90 100644 struct sigaction signal_handler; struct rte_keepalive_shm *ka_shm; -@@ -561,16 +562,19 @@ main(int argc, char **argv) +@@ -561,16 +564,19 @@ main(int argc, char **argv) if (ret < 0) rte_exit(EXIT_FAILURE, "Invalid L2FWD arguments\n"); @@ -34626,11 +67792,52 @@ index b36834974e..3d59e2ca90 100644 /* reset l2fwd_dst_ports */ for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) l2fwd_dst_ports[portid] = 0; +diff --git a/dpdk/examples/l2fwd-keepalive/meson.build b/dpdk/examples/l2fwd-keepalive/meson.build +index 6f7b007e1e..aecc8d9fc9 100644 +--- a/dpdk/examples/l2fwd-keepalive/meson.build ++++ b/dpdk/examples/l2fwd-keepalive/meson.build +@@ -6,7 +6,13 @@ + # To build this example as a standalone application with an already-installed + # DPDK instance, use 'make' + +-ext_deps += cc.find_library('rt') ++librt = cc.find_library('rt', required: false) ++if not librt.found() ++ build = false ++ subdir_done() ++endif ++ ++ext_deps += librt + deps += 'timer' + sources = files( + 'main.c', 'shm.c' +diff --git a/dpdk/examples/l2fwd/Makefile b/dpdk/examples/l2fwd/Makefile +index 8b7b26cb90..15105ac57e 100644 +--- a/dpdk/examples/l2fwd/Makefile ++++ b/dpdk/examples/l2fwd/Makefile +@@ -24,7 +24,7 @@ CFLAGS += -O3 $(shell $(PKGCONF) --cflags libdpdk) + # Add flag to allow experimental API as l2fwd uses rte_ethdev_set_ptype API + CFLAGS += -DALLOW_EXPERIMENTAL_API + LDFLAGS_SHARED = $(shell $(PKGCONF) --libs libdpdk) +-LDFLAGS_STATIC = -Wl,-Bstatic $(shell $(PKGCONF) --static --libs libdpdk) ++LDFLAGS_STATIC = $(shell $(PKGCONF) --static --libs libdpdk) + + build/$(APP)-shared: $(SRCS-y) Makefile $(PC_FILE) | build + $(CC) $(CFLAGS) $(SRCS-y) -o $@ $(LDFLAGS) $(LDFLAGS_SHARED) diff --git a/dpdk/examples/l2fwd/main.c b/dpdk/examples/l2fwd/main.c -index 09257aab1c..fcef232731 100644 +index 09257aab1c..e3767a3159 100644 --- a/dpdk/examples/l2fwd/main.c +++ b/dpdk/examples/l2fwd/main.c -@@ -478,7 +478,7 @@ check_all_ports_link_status(uint32_t port_mask) +@@ -146,6 +146,8 @@ print_stats(void) + total_packets_rx, + total_packets_dropped); + printf("\n====================================================\n"); ++ ++ fflush(stdout); + } + + static void +@@ -478,7 +480,7 @@ check_all_ports_link_status(uint32_t port_mask) "Port%d Link Up. Speed %u Mbps - %s\n", portid, link.link_speed, (link.link_duplex == ETH_LINK_FULL_DUPLEX) ? @@ -34639,6 +67846,19 @@ index 09257aab1c..fcef232731 100644 else printf("Port %d Link Down\n", portid); continue; +diff --git a/dpdk/examples/l3fwd-acl/Makefile b/dpdk/examples/l3fwd-acl/Makefile +index d9909584b1..156dc19606 100644 +--- a/dpdk/examples/l3fwd-acl/Makefile ++++ b/dpdk/examples/l3fwd-acl/Makefile +@@ -22,7 +22,7 @@ PKGCONF ?= pkg-config + PC_FILE := $(shell $(PKGCONF) --path libdpdk 2>/dev/null) + CFLAGS += -O3 $(shell $(PKGCONF) --cflags libdpdk) + LDFLAGS_SHARED = $(shell $(PKGCONF) --libs libdpdk) +-LDFLAGS_STATIC = -Wl,-Bstatic $(shell $(PKGCONF) --static --libs libdpdk) ++LDFLAGS_STATIC = $(shell $(PKGCONF) --static --libs libdpdk) + + build/$(APP)-shared: $(SRCS-y) Makefile $(PC_FILE) | build + $(CC) $(CFLAGS) $(SRCS-y) -o $@ $(LDFLAGS) $(LDFLAGS_SHARED) diff --git a/dpdk/examples/l3fwd-acl/main.c b/dpdk/examples/l3fwd-acl/main.c index fa92a28297..cfbeee962b 100644 --- a/dpdk/examples/l3fwd-acl/main.c @@ -34652,8 +67872,21 @@ index fa92a28297..cfbeee962b 100644 else printf("Port %d Link Down\n", portid); continue; +diff --git a/dpdk/examples/l3fwd-power/Makefile b/dpdk/examples/l3fwd-power/Makefile +index 729d49639b..74441f98cf 100644 +--- a/dpdk/examples/l3fwd-power/Makefile ++++ b/dpdk/examples/l3fwd-power/Makefile +@@ -22,7 +22,7 @@ PKGCONF ?= pkg-config + PC_FILE := $(shell $(PKGCONF) --path libdpdk 2>/dev/null) + CFLAGS += -O3 $(shell $(PKGCONF) --cflags libdpdk) + LDFLAGS_SHARED = $(shell $(PKGCONF) --libs libdpdk) +-LDFLAGS_STATIC = -Wl,-Bstatic $(shell $(PKGCONF) --static --libs libdpdk) ++LDFLAGS_STATIC = $(shell $(PKGCONF) --static --libs libdpdk) + + CFLAGS += -DALLOW_EXPERIMENTAL_API + diff --git a/dpdk/examples/l3fwd-power/main.c b/dpdk/examples/l3fwd-power/main.c -index d049d8a5dc..aa6ff2627b 100644 +index d049d8a5dc..7fe5cbf577 100644 --- a/dpdk/examples/l3fwd-power/main.c +++ b/dpdk/examples/l3fwd-power/main.c @@ -880,9 +880,6 @@ sleep_until_rx_interrupt(int num) @@ -34711,10 +67944,55 @@ index d049d8a5dc..aa6ff2627b 100644 else printf("Port %d Link Down\n", (uint8_t)portid); +@@ -2444,9 +2445,7 @@ main(int argc, char **argv) + if (add_cb_parse_ptype(portid, queueid) < 0) + rte_exit(EXIT_FAILURE, + "Fail to add ptype cb\n"); +- } else if (!check_ptype(portid)) +- rte_exit(EXIT_FAILURE, +- "PMD can not provide needed ptypes\n"); ++ } + } + } + +@@ -2477,6 +2476,11 @@ main(int argc, char **argv) + } + /* initialize spinlock for each port */ + rte_spinlock_init(&(locks[portid])); ++ ++ if (!parse_ptype) ++ if (!check_ptype(portid)) ++ rte_exit(EXIT_FAILURE, ++ "PMD can not provide needed ptypes\n"); + } + + check_all_ports_link_status(enabled_port_mask); +diff --git a/dpdk/examples/l3fwd/Makefile b/dpdk/examples/l3fwd/Makefile +index b2dbf26075..38a370c2cf 100644 +--- a/dpdk/examples/l3fwd/Makefile ++++ b/dpdk/examples/l3fwd/Makefile +@@ -22,7 +22,7 @@ PKGCONF ?= pkg-config + PC_FILE := $(shell $(PKGCONF) --path libdpdk 2>/dev/null) + CFLAGS += -O3 $(shell $(PKGCONF) --cflags libdpdk) + LDFLAGS_SHARED = $(shell $(PKGCONF) --libs libdpdk) +-LDFLAGS_STATIC = -Wl,-Bstatic $(shell $(PKGCONF) --static --libs libdpdk) ++LDFLAGS_STATIC = $(shell $(PKGCONF) --static --libs libdpdk) + + build/$(APP)-shared: $(SRCS-y) Makefile $(PC_FILE) | build + $(CC) $(CFLAGS) $(SRCS-y) -o $@ $(LDFLAGS) $(LDFLAGS_SHARED) diff --git a/dpdk/examples/l3fwd/main.c b/dpdk/examples/l3fwd/main.c -index 4dea12a653..3a8ec5a7f2 100644 +index 4dea12a653..b98040df45 100644 --- a/dpdk/examples/l3fwd/main.c +++ b/dpdk/examples/l3fwd/main.c +@@ -53,7 +53,7 @@ + #define RTE_TEST_RX_DESC_DEFAULT 1024 + #define RTE_TEST_TX_DESC_DEFAULT 1024 + +-#define MAX_TX_QUEUE_PER_PORT RTE_MAX_ETHPORTS ++#define MAX_TX_QUEUE_PER_PORT RTE_MAX_LCORE + #define MAX_RX_QUEUE_PER_PORT 128 + + #define MAX_LCORE_PARAMS 1024 @@ -747,7 +747,7 @@ check_all_ports_link_status(uint32_t port_mask) "Port%d Link Up. Speed %u Mbps -%s\n", portid, link.link_speed, @@ -34724,11 +68002,33 @@ index 4dea12a653..3a8ec5a7f2 100644 else printf("Port %d Link Down\n", portid); continue; +diff --git a/dpdk/examples/link_status_interrupt/Makefile b/dpdk/examples/link_status_interrupt/Makefile +index 4f02a89013..879ee7384d 100644 +--- a/dpdk/examples/link_status_interrupt/Makefile ++++ b/dpdk/examples/link_status_interrupt/Makefile +@@ -22,7 +22,7 @@ PKGCONF ?= pkg-config + PC_FILE := $(shell $(PKGCONF) --path libdpdk 2>/dev/null) + CFLAGS += -O3 $(shell $(PKGCONF) --cflags libdpdk) + LDFLAGS_SHARED = $(shell $(PKGCONF) --libs libdpdk) +-LDFLAGS_STATIC = -Wl,-Bstatic $(shell $(PKGCONF) --static --libs libdpdk) ++LDFLAGS_STATIC = $(shell $(PKGCONF) --static --libs libdpdk) + + build/$(APP)-shared: $(SRCS-y) Makefile $(PC_FILE) | build + $(CC) $(CFLAGS) $(SRCS-y) -o $@ $(LDFLAGS) $(LDFLAGS_SHARED) diff --git a/dpdk/examples/link_status_interrupt/main.c b/dpdk/examples/link_status_interrupt/main.c -index a924aa2313..72f86e502f 100644 +index a924aa2313..6b8c153b58 100644 --- a/dpdk/examples/link_status_interrupt/main.c +++ b/dpdk/examples/link_status_interrupt/main.c -@@ -500,7 +500,7 @@ check_all_ports_link_status(uint16_t port_num, uint32_t port_mask) +@@ -162,6 +162,8 @@ print_stats(void) + total_packets_rx, + total_packets_dropped); + printf("\n====================================================\n"); ++ ++ fflush(stdout); + } + + static void +@@ -500,7 +502,7 @@ check_all_ports_link_status(uint16_t port_num, uint32_t port_mask) "Port%d Link Up. Speed %u Mbps - %s\n", portid, link.link_speed, (link.link_duplex == ETH_LINK_FULL_DUPLEX) ? @@ -34737,6 +68037,66 @@ index a924aa2313..72f86e502f 100644 else printf("Port %d Link Down\n", portid); continue; +diff --git a/dpdk/examples/meson.build b/dpdk/examples/meson.build +index 1f2b6f5168..9e081f3fb7 100644 +--- a/dpdk/examples/meson.build ++++ b/dpdk/examples/meson.build +@@ -1,9 +1,9 @@ + # SPDX-License-Identifier: BSD-3-Clause + # Copyright(c) 2017-2019 Intel Corporation + +-driver_libs = [] ++link_whole_libs = [] + if get_option('default_library') == 'static' +- driver_libs = dpdk_drivers ++ link_whole_libs = dpdk_static_libraries + dpdk_drivers + endif + + execinfo = cc.find_library('execinfo', required: false) +@@ -46,13 +46,6 @@ all_examples = [ + 'vm_power_manager/guest_cli', + 'vmdq', 'vmdq_dcb', + ] +-# install all example code on install - irrespective of whether the example in +-# question is to be built as part of this build or not. +-foreach ex:all_examples +- install_subdir(ex, +- install_dir: get_option('datadir') + '/dpdk/examples', +- exclude_files: 'meson.build') +-endforeach + + if get_option('examples') == '' + subdir_done() +@@ -69,6 +62,10 @@ default_cflags = machine_args + if cc.has_argument('-Wno-format-truncation') + default_cflags += '-Wno-format-truncation' + endif ++default_ldflags = dpdk_extra_ldflags ++if get_option('default_library') == 'static' and not is_windows ++ default_ldflags += ['-Wl,--export-dynamic'] ++endif + + foreach example: examples + name = example.split('/')[-1] +@@ -76,6 +73,7 @@ foreach example: examples + sources = [] + allow_experimental_apis = false + cflags = default_cflags ++ ldflags = default_ldflags + + ext_deps = [execinfo] + includes = [include_directories(example)] +@@ -99,8 +97,8 @@ foreach example: examples + endif + executable('dpdk-' + name, sources, + include_directories: includes, +- link_whole: driver_libs, +- link_args: dpdk_extra_ldflags, ++ link_whole: link_whole_libs, ++ link_args: ldflags, + c_args: cflags, + dependencies: dep_objs) + elif not allow_skips diff --git a/dpdk/examples/multi_process/client_server_mp/mp_server/init.c b/dpdk/examples/multi_process/client_server_mp/mp_server/init.c index ad9f46f0aa..c2ec07ac65 100644 --- a/dpdk/examples/multi_process/client_server_mp/mp_server/init.c @@ -34750,6 +68110,57 @@ index ad9f46f0aa..c2ec07ac65 100644 else printf("Port %d Link Down\n", (uint8_t)ports->id[portid]); +diff --git a/dpdk/examples/multi_process/client_server_mp/mp_server/main.c b/dpdk/examples/multi_process/client_server_mp/mp_server/main.c +index 802e29d10d..1084b303ff 100644 +--- a/dpdk/examples/multi_process/client_server_mp/mp_server/main.c ++++ b/dpdk/examples/multi_process/client_server_mp/mp_server/main.c +@@ -59,12 +59,17 @@ static struct client_rx_buf *cl_rx_buf; + static const char * + get_printable_mac_addr(uint16_t port) + { +- static const char err_address[] = "00:00:00:00:00:00"; +- static char addresses[RTE_MAX_ETHPORTS][sizeof(err_address)]; ++ static const struct rte_ether_addr null_mac; /* static defaults to 0 */ ++ static char err_address[32]; ++ static char addresses[RTE_MAX_ETHPORTS][32]; + int ret; + +- if (unlikely(port >= RTE_MAX_ETHPORTS)) ++ if (unlikely(port >= RTE_MAX_ETHPORTS)) { ++ if (err_address[0] == '\0') ++ rte_ether_format_addr(err_address, ++ sizeof(err_address), &null_mac); + return err_address; ++ } + if (unlikely(addresses[port][0]=='\0')){ + struct rte_ether_addr mac; + ret = rte_eth_macaddr_get(port, &mac); +@@ -73,10 +78,8 @@ get_printable_mac_addr(uint16_t port) + port, rte_strerror(-ret)); + return err_address; + } +- snprintf(addresses[port], sizeof(addresses[port]), +- "%02x:%02x:%02x:%02x:%02x:%02x\n", +- mac.addr_bytes[0], mac.addr_bytes[1], mac.addr_bytes[2], +- mac.addr_bytes[3], mac.addr_bytes[4], mac.addr_bytes[5]); ++ rte_ether_format_addr(addresses[port], ++ sizeof(addresses[port]), &mac); + } + return addresses[port]; + } +diff --git a/dpdk/examples/multi_process/client_server_mp/shared/common.h b/dpdk/examples/multi_process/client_server_mp/shared/common.h +index 6dd43fcac2..76beca0101 100644 +--- a/dpdk/examples/multi_process/client_server_mp/shared/common.h ++++ b/dpdk/examples/multi_process/client_server_mp/shared/common.h +@@ -43,7 +43,7 @@ struct port_info { + * Given the rx queue name template above, get the queue name + */ + static inline const char * +-get_rx_queue_name(unsigned id) ++get_rx_queue_name(uint8_t id) + { + /* buffer for return value. Size calculated by %u being replaced + * by maximum 3 digits (plus an extra byte for safety) */ diff --git a/dpdk/examples/multi_process/symmetric_mp/main.c b/dpdk/examples/multi_process/symmetric_mp/main.c index 7f491452a7..c5cd8825e5 100644 --- a/dpdk/examples/multi_process/symmetric_mp/main.c @@ -34763,6 +68174,28 @@ index 7f491452a7..c5cd8825e5 100644 else printf("Port %d Link Down\n", portid); continue; +diff --git a/dpdk/examples/ntb/Makefile b/dpdk/examples/ntb/Makefile +index baeba11e85..f41ccfac27 100644 +--- a/dpdk/examples/ntb/Makefile ++++ b/dpdk/examples/ntb/Makefile +@@ -26,7 +26,7 @@ LDFLAGS += -pthread + PC_FILE := $(shell $(PKGCONF) --path libdpdk 2>/dev/null) + CFLAGS += -O3 $(shell $(PKGCONF) --cflags libdpdk) + LDFLAGS_SHARED = $(shell $(PKGCONF) --libs libdpdk) +-LDFLAGS_STATIC = -Wl,-Bstatic $(shell $(PKGCONF) --static --libs libdpdk) ++LDFLAGS_STATIC = $(shell $(PKGCONF) --static --libs libdpdk) + + build/$(APP)-shared: $(SRCS-y) Makefile $(PC_FILE) | build + $(CC) $(CFLAGS) $(SRCS-y) -o $@ $(LDFLAGS) $(LDFLAGS_SHARED) +@@ -40,7 +40,7 @@ build: + .PHONY: clean + clean: + rm -f build/$(APP) build/$(APP)-static build/$(APP)-shared +- rmdir --ignore-fail-on-non-empty build ++ test -d build && rmdir -p build || true + + else # Build using legacy build system + diff --git a/dpdk/examples/ntb/ntb_fwd.c b/dpdk/examples/ntb/ntb_fwd.c index c914256dd4..17eedcf0b8 100644 --- a/dpdk/examples/ntb/ntb_fwd.c @@ -34787,11 +68220,76 @@ index c914256dd4..17eedcf0b8 100644 memset(&mbp_priv, 0, sizeof(mbp_priv)); mbp_priv.mbuf_data_room_size = mbuf_seg_size; mbp_priv.mbuf_priv_size = 0; +diff --git a/dpdk/examples/packet_ordering/Makefile b/dpdk/examples/packet_ordering/Makefile +index 261b7f06a8..1e50389421 100644 +--- a/dpdk/examples/packet_ordering/Makefile ++++ b/dpdk/examples/packet_ordering/Makefile +@@ -22,7 +22,7 @@ PKGCONF ?= pkg-config + PC_FILE := $(shell $(PKGCONF) --path libdpdk 2>/dev/null) + CFLAGS += -O3 $(shell $(PKGCONF) --cflags libdpdk) + LDFLAGS_SHARED = $(shell $(PKGCONF) --libs libdpdk) +-LDFLAGS_STATIC = -Wl,-Bstatic $(shell $(PKGCONF) --static --libs libdpdk) ++LDFLAGS_STATIC = $(shell $(PKGCONF) --static --libs libdpdk) + + build/$(APP)-shared: $(SRCS-y) Makefile $(PC_FILE) | build + $(CC) $(CFLAGS) $(SRCS-y) -o $@ $(LDFLAGS) $(LDFLAGS_SHARED) +diff --git a/dpdk/examples/packet_ordering/main.c b/dpdk/examples/packet_ordering/main.c +index b397b318e6..edaf810d94 100644 +--- a/dpdk/examples/packet_ordering/main.c ++++ b/dpdk/examples/packet_ordering/main.c +@@ -675,7 +675,7 @@ main(int argc, char **argv) + /* Initialize EAL */ + ret = rte_eal_init(argc, argv); + if (ret < 0) +- return -1; ++ rte_exit(EXIT_FAILURE, "Invalid EAL arguments\n"); + + argc -= ret; + argv += ret; +@@ -683,7 +683,7 @@ main(int argc, char **argv) + /* Parse the application specific arguments */ + ret = parse_args(argc, argv); + if (ret < 0) +- return -1; ++ rte_exit(EXIT_FAILURE, "Invalid packet_ordering arguments\n"); + + /* Check if we have enought cores */ + if (rte_lcore_count() < 3) diff --git a/dpdk/examples/performance-thread/l3fwd-thread/main.c b/dpdk/examples/performance-thread/l3fwd-thread/main.c -index ad540fd842..f58a70b77f 100644 +index ad540fd842..96fbdcdde2 100644 --- a/dpdk/examples/performance-thread/l3fwd-thread/main.c +++ b/dpdk/examples/performance-thread/l3fwd-thread/main.c -@@ -3457,7 +3457,7 @@ check_all_ports_link_status(uint32_t port_mask) +@@ -2,6 +2,10 @@ + * Copyright(c) 2010-2016 Intel Corporation + */ + ++#ifndef _GNU_SOURCE ++#define _GNU_SOURCE ++#endif ++ + #include + #include + #include +@@ -12,6 +16,7 @@ + #include + #include + #include ++#include + + #include + #include +@@ -599,8 +604,8 @@ struct thread_rx_conf rx_thread[MAX_RX_THREAD]; + struct thread_tx_conf { + struct thread_conf conf; + +- uint16_t tx_queue_id[RTE_MAX_LCORE]; +- struct mbuf_table tx_mbufs[RTE_MAX_LCORE]; ++ uint16_t tx_queue_id[RTE_MAX_ETHPORTS]; ++ struct mbuf_table tx_mbufs[RTE_MAX_ETHPORTS]; + + struct rte_ring *ring; + struct lthread_cond **ready; +@@ -3457,7 +3462,7 @@ check_all_ports_link_status(uint32_t port_mask) "Port%d Link Up. Speed %u Mbps - %s\n", portid, link.link_speed, (link.link_duplex == ETH_LINK_FULL_DUPLEX) ? @@ -34800,6 +68298,58 @@ index ad540fd842..f58a70b77f 100644 else printf("Port %d Link Down\n", portid); continue; +diff --git a/dpdk/examples/ptpclient/Makefile b/dpdk/examples/ptpclient/Makefile +index 82d72b3e31..9e47a60117 100644 +--- a/dpdk/examples/ptpclient/Makefile ++++ b/dpdk/examples/ptpclient/Makefile +@@ -22,7 +22,7 @@ PKGCONF ?= pkg-config + PC_FILE := $(shell $(PKGCONF) --path libdpdk 2>/dev/null) + CFLAGS += -O3 $(shell $(PKGCONF) --cflags libdpdk) + LDFLAGS_SHARED = $(shell $(PKGCONF) --libs libdpdk) +-LDFLAGS_STATIC = -Wl,-Bstatic $(shell $(PKGCONF) --static --libs libdpdk) ++LDFLAGS_STATIC = $(shell $(PKGCONF) --static --libs libdpdk) + + build/$(APP)-shared: $(SRCS-y) Makefile $(PC_FILE) | build + $(CC) $(CFLAGS) $(SRCS-y) -o $@ $(LDFLAGS) $(LDFLAGS_SHARED) +diff --git a/dpdk/examples/qos_meter/Makefile b/dpdk/examples/qos_meter/Makefile +index 7c2bf88a90..f733facaeb 100644 +--- a/dpdk/examples/qos_meter/Makefile ++++ b/dpdk/examples/qos_meter/Makefile +@@ -22,7 +22,7 @@ PKGCONF ?= pkg-config + PC_FILE := $(shell $(PKGCONF) --path libdpdk 2>/dev/null) + CFLAGS += -O3 $(shell $(PKGCONF) --cflags libdpdk) + LDFLAGS_SHARED = $(shell $(PKGCONF) --libs libdpdk) +-LDFLAGS_STATIC = -Wl,-Bstatic $(shell $(PKGCONF) --static --libs libdpdk) ++LDFLAGS_STATIC = $(shell $(PKGCONF) --static --libs libdpdk) + + CFLAGS += -DALLOW_EXPERIMENTAL_API + +diff --git a/dpdk/examples/qos_sched/Makefile b/dpdk/examples/qos_sched/Makefile +index 525061ca07..736ac00a36 100644 +--- a/dpdk/examples/qos_sched/Makefile ++++ b/dpdk/examples/qos_sched/Makefile +@@ -22,7 +22,7 @@ PKGCONF ?= pkg-config + PC_FILE := $(shell $(PKGCONF) --path libdpdk 2>/dev/null) + CFLAGS += -O3 $(shell $(PKGCONF) --cflags libdpdk) + LDFLAGS_SHARED = $(shell $(PKGCONF) --libs libdpdk) +-LDFLAGS_STATIC = -Wl,-Bstatic $(shell $(PKGCONF) --static --libs libdpdk) ++LDFLAGS_STATIC = $(shell $(PKGCONF) --static --libs libdpdk) + + build/$(APP)-shared: $(SRCS-y) Makefile $(PC_FILE) | build + $(CC) $(CFLAGS) $(SRCS-y) -o $@ $(LDFLAGS) $(LDFLAGS_SHARED) +diff --git a/dpdk/examples/qos_sched/args.c b/dpdk/examples/qos_sched/args.c +index 7431b29816..2727fd4f27 100644 +--- a/dpdk/examples/qos_sched/args.c ++++ b/dpdk/examples/qos_sched/args.c +@@ -39,7 +39,7 @@ static const char usage[] = + " multiple pfc can be configured in command line \n" + " \n" + "Application optional parameters: \n" +- " --i : run in interactive mode (default value is %u) \n" ++ " -i : run in interactive mode (default value is %u) \n" + " --mst I : master core index (default value is %u) \n" + " --rsz \"A, B, C\" : Ring sizes \n" + " A = Size (in number of buffer descriptors) of each of the NIC RX \n" diff --git a/dpdk/examples/qos_sched/cfg_file.c b/dpdk/examples/qos_sched/cfg_file.c index 5714c3f36d..f078e4f7de 100644 --- a/dpdk/examples/qos_sched/cfg_file.c @@ -34842,6 +68392,21 @@ index baa2b3eadc..23bc418d97 100644 extern struct rte_sched_port_params port_params; extern struct rte_sched_subport_params subport_params[MAX_SCHED_SUBPORTS]; +diff --git a/dpdk/examples/rxtx_callbacks/Makefile b/dpdk/examples/rxtx_callbacks/Makefile +index 584b9fafb0..eecdcada3e 100644 +--- a/dpdk/examples/rxtx_callbacks/Makefile ++++ b/dpdk/examples/rxtx_callbacks/Makefile +@@ -22,7 +22,9 @@ PKGCONF ?= pkg-config + PC_FILE := $(shell $(PKGCONF) --path libdpdk 2>/dev/null) + CFLAGS += -O3 $(shell $(PKGCONF) --cflags libdpdk) + LDFLAGS_SHARED = $(shell $(PKGCONF) --libs libdpdk) +-LDFLAGS_STATIC = -Wl,-Bstatic $(shell $(PKGCONF) --static --libs libdpdk) ++LDFLAGS_STATIC = $(shell $(PKGCONF) --static --libs libdpdk) ++ ++CFLAGS += -DALLOW_EXPERIMENTAL_API + + build/$(APP)-shared: $(SRCS-y) Makefile $(PC_FILE) | build + $(CC) $(CFLAGS) $(SRCS-y) -o $@ $(LDFLAGS) $(LDFLAGS_SHARED) diff --git a/dpdk/examples/server_node_efd/server/init.c b/dpdk/examples/server_node_efd/server/init.c index 00e2e40599..378a74fa5c 100644 --- a/dpdk/examples/server_node_efd/server/init.c @@ -34855,6 +68420,58 @@ index 00e2e40599..378a74fa5c 100644 else printf("Port %d Link Down\n", info->id[portid]); +diff --git a/dpdk/examples/service_cores/Makefile b/dpdk/examples/service_cores/Makefile +index c47055813e..b8669fdf7e 100644 +--- a/dpdk/examples/service_cores/Makefile ++++ b/dpdk/examples/service_cores/Makefile +@@ -22,7 +22,7 @@ PKGCONF ?= pkg-config + PC_FILE := $(shell $(PKGCONF) --path libdpdk 2>/dev/null) + CFLAGS += -O3 $(shell $(PKGCONF) --cflags libdpdk) + LDFLAGS_SHARED = $(shell $(PKGCONF) --libs libdpdk) +-LDFLAGS_STATIC = -Wl,-Bstatic $(shell $(PKGCONF) --static --libs libdpdk) ++LDFLAGS_STATIC = $(shell $(PKGCONF) --static --libs libdpdk) + + build/$(APP)-shared: $(SRCS-y) Makefile $(PC_FILE) | build + $(CC) $(CFLAGS) $(SRCS-y) -o $@ $(LDFLAGS) $(LDFLAGS_SHARED) +diff --git a/dpdk/examples/skeleton/Makefile b/dpdk/examples/skeleton/Makefile +index 2c29004d79..5059f3123f 100644 +--- a/dpdk/examples/skeleton/Makefile ++++ b/dpdk/examples/skeleton/Makefile +@@ -22,7 +22,7 @@ PKGCONF ?= pkg-config + PC_FILE := $(shell $(PKGCONF) --path libdpdk 2>/dev/null) + CFLAGS += -O3 $(shell $(PKGCONF) --cflags libdpdk) + LDFLAGS_SHARED = $(shell $(PKGCONF) --libs libdpdk) +-LDFLAGS_STATIC = -Wl,-Bstatic $(shell $(PKGCONF) --static --libs libdpdk) ++LDFLAGS_STATIC = $(shell $(PKGCONF) --static --libs libdpdk) + + build/$(APP)-shared: $(SRCS-y) Makefile $(PC_FILE) | build + $(CC) $(CFLAGS) $(SRCS-y) -o $@ $(LDFLAGS) $(LDFLAGS_SHARED) +diff --git a/dpdk/examples/tep_termination/Makefile b/dpdk/examples/tep_termination/Makefile +index 645112498d..548ca3cee3 100644 +--- a/dpdk/examples/tep_termination/Makefile ++++ b/dpdk/examples/tep_termination/Makefile +@@ -24,7 +24,7 @@ PKGCONF ?= pkg-config + PC_FILE := $(shell $(PKGCONF) --path libdpdk 2>/dev/null) + CFLAGS += -O3 $(shell $(PKGCONF) --cflags libdpdk) + LDFLAGS_SHARED = $(shell $(PKGCONF) --libs libdpdk) +-LDFLAGS_STATIC = -Wl,-Bstatic $(shell $(PKGCONF) --static --libs libdpdk) ++LDFLAGS_STATIC = $(shell $(PKGCONF) --static --libs libdpdk) + + CFLAGS += -Wno-deprecated-declarations + +diff --git a/dpdk/examples/tep_termination/main.c b/dpdk/examples/tep_termination/main.c +index ab956ad7ce..b9fffca020 100644 +--- a/dpdk/examples/tep_termination/main.c ++++ b/dpdk/examples/tep_termination/main.c +@@ -1110,6 +1110,8 @@ print_stats(__rte_unused void *arg) + dev_ll = dev_ll->next; + } + printf("\n================================================\n"); ++ ++ fflush(stdout); + } + + return NULL; diff --git a/dpdk/examples/tep_termination/vxlan_setup.c b/dpdk/examples/tep_termination/vxlan_setup.c index eca119a728..4b44ccc143 100644 --- a/dpdk/examples/tep_termination/vxlan_setup.c @@ -34868,11 +68485,89 @@ index eca119a728..4b44ccc143 100644 if ((dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_TSO) == 0) RTE_LOG(WARNING, PORT, "hardware TSO offload is not supported\n"); +diff --git a/dpdk/examples/timer/Makefile b/dpdk/examples/timer/Makefile +index bf86339ab7..b34c8baa6b 100644 +--- a/dpdk/examples/timer/Makefile ++++ b/dpdk/examples/timer/Makefile +@@ -22,7 +22,7 @@ PKGCONF ?= pkg-config + PC_FILE := $(shell $(PKGCONF) --path libdpdk 2>/dev/null) + CFLAGS += -O3 $(shell $(PKGCONF) --cflags libdpdk) + LDFLAGS_SHARED = $(shell $(PKGCONF) --libs libdpdk) +-LDFLAGS_STATIC = -Wl,-Bstatic $(shell $(PKGCONF) --static --libs libdpdk) ++LDFLAGS_STATIC = $(shell $(PKGCONF) --static --libs libdpdk) + + build/$(APP)-shared: $(SRCS-y) Makefile $(PC_FILE) | build + $(CC) $(CFLAGS) $(SRCS-y) -o $@ $(LDFLAGS) $(LDFLAGS_SHARED) +diff --git a/dpdk/examples/vdpa/Makefile b/dpdk/examples/vdpa/Makefile +index 6a25497cd8..bc0b6793e6 100644 +--- a/dpdk/examples/vdpa/Makefile ++++ b/dpdk/examples/vdpa/Makefile +@@ -23,7 +23,7 @@ PKGCONF ?= pkg-config + PC_FILE := $(shell $(PKGCONF) --path libdpdk 2>/dev/null) + CFLAGS += -O3 $(shell $(PKGCONF) --cflags libdpdk) + LDFLAGS_SHARED = $(shell $(PKGCONF) --libs libdpdk) +-LDFLAGS_STATIC = -Wl,-Bstatic $(shell $(PKGCONF) --static --libs libdpdk) ++LDFLAGS_STATIC = $(shell $(PKGCONF) --static --libs libdpdk) + + build/$(APP)-shared: $(SRCS-y) Makefile $(PC_FILE) | build + $(CC) $(CFLAGS) $(SRCS-y) -o $@ $(LDFLAGS) $(LDFLAGS_SHARED) +diff --git a/dpdk/examples/vhost/Makefile b/dpdk/examples/vhost/Makefile +index f2b1615418..ef6f3550f3 100644 +--- a/dpdk/examples/vhost/Makefile ++++ b/dpdk/examples/vhost/Makefile +@@ -24,7 +24,7 @@ PKGCONF ?= pkg-config + PC_FILE := $(shell $(PKGCONF) --path libdpdk 2>/dev/null) + CFLAGS += -O3 $(shell $(PKGCONF) --cflags libdpdk) + LDFLAGS_SHARED = $(shell $(PKGCONF) --libs libdpdk) +-LDFLAGS_STATIC = -Wl,-Bstatic $(shell $(PKGCONF) --static --libs libdpdk) ++LDFLAGS_STATIC = $(shell $(PKGCONF) --static --libs libdpdk) + + CFLAGS += -DALLOW_EXPERIMENTAL_API + +diff --git a/dpdk/examples/vhost/main.c b/dpdk/examples/vhost/main.c +index ab649bf147..312829e8b9 100644 +--- a/dpdk/examples/vhost/main.c ++++ b/dpdk/examples/vhost/main.c +@@ -1334,6 +1334,8 @@ print_stats(__rte_unused void *arg) + } + + printf("===================================================\n"); ++ ++ fflush(stdout); + } + + return NULL; +diff --git a/dpdk/examples/vhost_blk/Makefile b/dpdk/examples/vhost_blk/Makefile +index 39244320d8..3952791784 100644 +--- a/dpdk/examples/vhost_blk/Makefile ++++ b/dpdk/examples/vhost_blk/Makefile +@@ -25,7 +25,7 @@ LDFLAGS += -pthread + PC_FILE := $(shell $(PKGCONF) --path libdpdk 2>/dev/null) + CFLAGS += -O3 $(shell $(PKGCONF) --cflags libdpdk) + LDFLAGS_SHARED = $(shell $(PKGCONF) --libs libdpdk) +-LDFLAGS_STATIC = -Wl,-Bstatic $(shell $(PKGCONF) --static --libs libdpdk) ++LDFLAGS_STATIC = $(shell $(PKGCONF) --static --libs libdpdk) + + CFLAGS += -DALLOW_EXPERIMENTAL_API + diff --git a/dpdk/examples/vhost_blk/vhost_blk.c b/dpdk/examples/vhost_blk/vhost_blk.c -index 3182a488bb..b757c9228b 100644 +index 3182a488bb..d0c30a9c6f 100644 --- a/dpdk/examples/vhost_blk/vhost_blk.c +++ b/dpdk/examples/vhost_blk/vhost_blk.c -@@ -31,6 +31,8 @@ +@@ -2,6 +2,12 @@ + * Copyright(c) 2010-2019 Intel Corporation + */ + ++#ifndef _GNU_SOURCE ++#define _GNU_SOURCE ++#endif ++#include ++#include ++ + #include + #include + #include +@@ -31,6 +37,8 @@ (1ULL << VIRTIO_F_NOTIFY_ON_EMPTY) | \ (1ULL << VHOST_USER_F_PROTOCOL_FEATURES)) @@ -34881,7 +68576,7 @@ index 3182a488bb..b757c9228b 100644 /* Path to folder where character device will be created. Can be set by user. */ static char dev_pathname[PATH_MAX] = ""; static sem_t exit_sem; -@@ -856,6 +858,7 @@ new_device(int vid) +@@ -856,6 +864,7 @@ new_device(int vid) ctrlr->bdev->vid, i, &blk_vq->last_avail_idx, &blk_vq->last_used_idx); @@ -34889,7 +68584,7 @@ index 3182a488bb..b757c9228b 100644 blk_vq->avail_wrap_counter = blk_vq->last_avail_idx & (1 << 15); -@@ -993,11 +996,7 @@ vhost_blk_ctrlr_construct(const char *ctrlr_name) +@@ -993,11 +1002,7 @@ vhost_blk_ctrlr_construct(const char *ctrlr_name) } snprintf(dev_pathname, sizeof(dev_pathname), "%s/%s", path, ctrlr_name); @@ -34902,7 +68597,7 @@ index 3182a488bb..b757c9228b 100644 if (rte_vhost_driver_register(dev_pathname, 0) != 0) { fprintf(stderr, "socket %s already exists\n", dev_pathname); -@@ -1040,8 +1039,7 @@ signal_handler(__rte_unused int signum) +@@ -1040,8 +1045,7 @@ signal_handler(__rte_unused int signum) { struct vhost_blk_ctrlr *ctrlr; @@ -34912,6 +68607,19 @@ index 3182a488bb..b757c9228b 100644 if (g_should_stop != -1) { g_should_stop = 1; +@@ -1084,7 +1088,11 @@ int main(int argc, char *argv[]) + return -1; + } + +- rte_vhost_driver_start(dev_pathname); ++ ret = rte_vhost_driver_start(dev_pathname); ++ if (ret < 0) { ++ fprintf(stderr, "Failed to start vhost driver.\n"); ++ return -1; ++ } + + /* loop for exit the application */ + while (1) diff --git a/dpdk/examples/vhost_blk/vhost_blk.h b/dpdk/examples/vhost_blk/vhost_blk.h index 933e2b7c57..17258d284b 100644 --- a/dpdk/examples/vhost_blk/vhost_blk.h @@ -34927,8 +68635,43 @@ index 933e2b7c57..17258d284b 100644 int vhost_bdev_process_blk_commands(struct vhost_block_dev *bdev, struct vhost_blk_task *task); +diff --git a/dpdk/examples/vhost_crypto/Makefile b/dpdk/examples/vhost_crypto/Makefile +index ae8cb81f87..28e3e4de74 100644 +--- a/dpdk/examples/vhost_crypto/Makefile ++++ b/dpdk/examples/vhost_crypto/Makefile +@@ -23,7 +23,7 @@ PKGCONF ?= pkg-config + PC_FILE := $(shell $(PKGCONF) --path libdpdk 2>/dev/null) + CFLAGS += -O3 $(shell $(PKGCONF) --cflags libdpdk) + LDFLAGS_SHARED = $(shell $(PKGCONF) --libs libdpdk) +-LDFLAGS_STATIC = -Wl,-Bstatic $(shell $(PKGCONF) --static --libs libdpdk) ++LDFLAGS_STATIC = $(shell $(PKGCONF) --static --libs libdpdk) + + build/$(APP)-shared: $(SRCS-y) Makefile $(PC_FILE) | build + $(CC) $(CFLAGS) $(SRCS-y) -o $@ $(LDFLAGS) $(LDFLAGS_SHARED) +diff --git a/dpdk/examples/vhost_crypto/main.c b/dpdk/examples/vhost_crypto/main.c +index 1d7ba94196..405d16966d 100644 +--- a/dpdk/examples/vhost_crypto/main.c ++++ b/dpdk/examples/vhost_crypto/main.c +@@ -195,7 +195,7 @@ vhost_crypto_usage(const char *prgname) + { + printf("%s [EAL options] --\n" + " --%s ,SOCKET-FILE-PATH\n" +- " --%s (lcore,cdev_id,queue_id)[,(lcore,cdev_id,queue_id)]" ++ " --%s (lcore,cdev_id,queue_id)[,(lcore,cdev_id,queue_id)]\n" + " --%s: zero copy\n" + " --%s: guest polling\n", + prgname, SOCKET_FILE_KEYWORD, CONFIG_KEYWORD, +@@ -544,7 +544,7 @@ main(int argc, char *argv[]) + snprintf(name, 127, "COPPOOL_%u", lo->lcore_id); + info->cop_pool = rte_crypto_op_pool_create(name, + RTE_CRYPTO_OP_TYPE_SYMMETRIC, NB_MEMPOOL_OBJS, +- NB_CACHE_OBJS, 0, ++ NB_CACHE_OBJS, VHOST_CRYPTO_MAX_IV_LEN, + rte_lcore_to_socket_id(lo->lcore_id)); + + if (!info->cop_pool) { diff --git a/dpdk/examples/vm_power_manager/channel_manager.c b/dpdk/examples/vm_power_manager/channel_manager.c -index 4ac21f02c1..74a2a677e8 100644 +index 4ac21f02c1..0a28cb643b 100644 --- a/dpdk/examples/vm_power_manager/channel_manager.c +++ b/dpdk/examples/vm_power_manager/channel_manager.c @@ -4,7 +4,6 @@ @@ -34939,7 +68682,14 @@ index 4ac21f02c1..74a2a677e8 100644 #include #include #include -@@ -35,6 +34,8 @@ +@@ -28,13 +27,14 @@ + #include + + #include "channel_manager.h" +-#include "channel_commands.h" + #include "channel_monitor.h" + #include "power_manager.h" + #define RTE_LOGTYPE_CHANNEL_MANAGER RTE_LOGTYPE_USER1 @@ -34948,6 +68698,24 @@ index 4ac21f02c1..74a2a677e8 100644 /* Global pointer to libvirt connection */ static virConnectPtr global_vir_conn_ptr; +@@ -466,9 +466,15 @@ add_all_channels(const char *vm_name) + continue; + } + +- snprintf(chan_info->channel_path, ++ if ((size_t)snprintf(chan_info->channel_path, + sizeof(chan_info->channel_path), "%s%s", +- CHANNEL_MGR_SOCKET_PATH, dir->d_name); ++ CHANNEL_MGR_SOCKET_PATH, dir->d_name) ++ >= sizeof(chan_info->channel_path)) { ++ RTE_LOG(ERR, CHANNEL_MANAGER, "Pathname too long for channel '%s%s'\n", ++ CHANNEL_MGR_SOCKET_PATH, dir->d_name); ++ rte_free(chan_info); ++ continue; ++ } + + if (setup_channel_info(&vm_info, &chan_info, channel_num) < 0) { + rte_free(chan_info); diff --git a/dpdk/examples/vm_power_manager/channel_manager.h b/dpdk/examples/vm_power_manager/channel_manager.h index 8284be0a18..e55376fcdb 100644 --- a/dpdk/examples/vm_power_manager/channel_manager.h @@ -34983,20 +68751,850 @@ index 8284be0a18..e55376fcdb 100644 enum channel_status { CHANNEL_MGR_CHANNEL_DISCONNECTED = 0, CHANNEL_MGR_CHANNEL_CONNECTED, diff --git a/dpdk/examples/vm_power_manager/channel_monitor.c b/dpdk/examples/vm_power_manager/channel_monitor.c -index 090c2a98b0..1d00a6cf6c 100644 +index 090c2a98b0..fe5a183fd7 100644 --- a/dpdk/examples/vm_power_manager/channel_monitor.c +++ b/dpdk/examples/vm_power_manager/channel_monitor.c -@@ -868,7 +868,7 @@ process_request(struct channel_packet *pkt, struct channel_info *chan_info) +@@ -35,7 +35,6 @@ + + #include + #include "channel_monitor.h" +-#include "channel_commands.h" + #include "channel_manager.h" + #include "power_manager.h" + #include "oob_monitor.h" +@@ -108,7 +107,7 @@ str_to_ether_addr(const char *a, struct rte_ether_addr *ether_addr) + } + + static int +-set_policy_mac(struct channel_packet *pkt, int idx, char *mac) ++set_policy_mac(struct rte_power_channel_packet *pkt, int idx, char *mac) + { + union PFID pfid; + int ret; +@@ -165,7 +164,7 @@ get_resource_id_from_vmname(const char *vm_name) + } + + static int +-parse_json_to_pkt(json_t *element, struct channel_packet *pkt, ++parse_json_to_pkt(json_t *element, struct rte_power_channel_packet *pkt, + const char *vm_name) + { + const char *key; +@@ -173,14 +172,14 @@ parse_json_to_pkt(json_t *element, struct channel_packet *pkt, + int ret; + int resource_id; + +- memset(pkt, 0, sizeof(struct channel_packet)); ++ memset(pkt, 0, sizeof(*pkt)); + + pkt->nb_mac_to_monitor = 0; + pkt->t_boost_status.tbEnabled = false; +- pkt->workload = LOW; +- pkt->policy_to_use = TIME; +- pkt->command = PKT_POLICY; +- pkt->core_type = CORE_TYPE_PHYSICAL; ++ pkt->workload = RTE_POWER_WL_LOW; ++ pkt->policy_to_use = RTE_POWER_POLICY_TIME; ++ pkt->command = RTE_POWER_PKT_POLICY; ++ pkt->core_type = RTE_POWER_CORE_TYPE_PHYSICAL; + + if (vm_name == NULL) { + RTE_LOG(ERR, CHANNEL_MONITOR, +@@ -203,11 +202,11 @@ parse_json_to_pkt(json_t *element, struct channel_packet *pkt, + char command[32]; + strlcpy(command, json_string_value(value), 32); + if (!strcmp(command, "power")) { +- pkt->command = CPU_POWER; ++ pkt->command = RTE_POWER_CPU_POWER; + } else if (!strcmp(command, "create")) { +- pkt->command = PKT_POLICY; ++ pkt->command = RTE_POWER_PKT_POLICY; + } else if (!strcmp(command, "destroy")) { +- pkt->command = PKT_POLICY_REMOVE; ++ pkt->command = RTE_POWER_PKT_POLICY_REMOVE; + } else { + RTE_LOG(ERR, CHANNEL_MONITOR, + "Invalid command received in JSON\n"); +@@ -217,13 +216,17 @@ parse_json_to_pkt(json_t *element, struct channel_packet *pkt, + char command[32]; + strlcpy(command, json_string_value(value), 32); + if (!strcmp(command, "TIME")) { +- pkt->policy_to_use = TIME; ++ pkt->policy_to_use = ++ RTE_POWER_POLICY_TIME; + } else if (!strcmp(command, "TRAFFIC")) { +- pkt->policy_to_use = TRAFFIC; ++ pkt->policy_to_use = ++ RTE_POWER_POLICY_TRAFFIC; + } else if (!strcmp(command, "WORKLOAD")) { +- pkt->policy_to_use = WORKLOAD; ++ pkt->policy_to_use = ++ RTE_POWER_POLICY_WORKLOAD; + } else if (!strcmp(command, "BRANCH_RATIO")) { +- pkt->policy_to_use = BRANCH_RATIO; ++ pkt->policy_to_use = ++ RTE_POWER_POLICY_BRANCH_RATIO; + } else { + RTE_LOG(ERR, CHANNEL_MONITOR, + "Wrong policy_type received in JSON\n"); +@@ -233,11 +236,11 @@ parse_json_to_pkt(json_t *element, struct channel_packet *pkt, + char command[32]; + strlcpy(command, json_string_value(value), 32); + if (!strcmp(command, "HIGH")) { +- pkt->workload = HIGH; ++ pkt->workload = RTE_POWER_WL_HIGH; + } else if (!strcmp(command, "MEDIUM")) { +- pkt->workload = MEDIUM; ++ pkt->workload = RTE_POWER_WL_MEDIUM; + } else if (!strcmp(command, "LOW")) { +- pkt->workload = LOW; ++ pkt->workload = RTE_POWER_WL_LOW; + } else { + RTE_LOG(ERR, CHANNEL_MONITOR, + "Wrong workload received in JSON\n"); +@@ -283,17 +286,17 @@ parse_json_to_pkt(json_t *element, struct channel_packet *pkt, + char unit[32]; + strlcpy(unit, json_string_value(value), 32); + if (!strcmp(unit, "SCALE_UP")) { +- pkt->unit = CPU_POWER_SCALE_UP; ++ pkt->unit = RTE_POWER_SCALE_UP; + } else if (!strcmp(unit, "SCALE_DOWN")) { +- pkt->unit = CPU_POWER_SCALE_DOWN; ++ pkt->unit = RTE_POWER_SCALE_DOWN; + } else if (!strcmp(unit, "SCALE_MAX")) { +- pkt->unit = CPU_POWER_SCALE_MAX; ++ pkt->unit = RTE_POWER_SCALE_MAX; + } else if (!strcmp(unit, "SCALE_MIN")) { +- pkt->unit = CPU_POWER_SCALE_MIN; ++ pkt->unit = RTE_POWER_SCALE_MIN; + } else if (!strcmp(unit, "ENABLE_TURBO")) { +- pkt->unit = CPU_POWER_ENABLE_TURBO; ++ pkt->unit = RTE_POWER_ENABLE_TURBO; + } else if (!strcmp(unit, "DISABLE_TURBO")) { +- pkt->unit = CPU_POWER_DISABLE_TURBO; ++ pkt->unit = RTE_POWER_DISABLE_TURBO; + } else { + RTE_LOG(ERR, CHANNEL_MONITOR, + "Invalid command received in JSON\n"); +@@ -312,7 +315,7 @@ parse_json_to_pkt(json_t *element, struct channel_packet *pkt, + vm_name); + return -1; + } +- strlcpy(pkt->vm_name, vm_name, VM_MAX_NAME_SZ); ++ strlcpy(pkt->vm_name, vm_name, RTE_POWER_VM_MAX_NAME_SZ); + pkt->resource_id = resource_id; + } + return 0; +@@ -367,7 +370,7 @@ pcpu_monitor(struct policy *pol, struct core_info *ci, int pcpu, int count) + { + int ret = 0; + +- if (pol->pkt.policy_to_use == BRANCH_RATIO) { ++ if (pol->pkt.policy_to_use == RTE_POWER_POLICY_BRANCH_RATIO) { + ci->cd[pcpu].oob_enabled = 1; + ret = add_core_to_monitor(pcpu); + if (ret == 0) +@@ -407,7 +410,7 @@ get_pcpu_to_control(struct policy *pol) + * differenciate between them when adding them to the branch monitor. + * Virtual cores need to be converted to physical cores. + */ +- if (pol->pkt.core_type == CORE_TYPE_VIRTUAL) { ++ if (pol->pkt.core_type == RTE_POWER_CORE_TYPE_VIRTUAL) { + /* + * If the cores in the policy are virtual, we need to map them + * to physical core. We look up the vm info and use that for +@@ -463,7 +466,7 @@ get_pfid(struct policy *pol) + } + + static int +-update_policy(struct channel_packet *pkt) ++update_policy(struct rte_power_channel_packet *pkt) + { + + unsigned int updated = 0; +@@ -479,7 +482,8 @@ update_policy(struct channel_packet *pkt) + policies[i].pkt = *pkt; + get_pcpu_to_control(&policies[i]); + /* Check Eth dev only for Traffic policy */ +- if (policies[i].pkt.policy_to_use == TRAFFIC) { ++ if (policies[i].pkt.policy_to_use == ++ RTE_POWER_POLICY_TRAFFIC) { + if (get_pfid(&policies[i]) < 0) { + updated = 1; + break; +@@ -496,7 +500,8 @@ update_policy(struct channel_packet *pkt) + policies[i].pkt = *pkt; + get_pcpu_to_control(&policies[i]); + /* Check Eth dev only for Traffic policy */ +- if (policies[i].pkt.policy_to_use == TRAFFIC) { ++ if (policies[i].pkt.policy_to_use == ++ RTE_POWER_POLICY_TRAFFIC) { + if (get_pfid(&policies[i]) < 0) { + updated = 1; + break; +@@ -512,7 +517,7 @@ update_policy(struct channel_packet *pkt) + } + + static int +-remove_policy(struct channel_packet *pkt __rte_unused) ++remove_policy(struct rte_power_channel_packet *pkt __rte_unused) + { + unsigned int i; + +@@ -615,7 +620,7 @@ apply_time_profile(struct policy *pol) + /* Format the date and time, down to a single second. */ + strftime(time_string, sizeof(time_string), "%Y-%m-%d %H:%M:%S", ptm); + +- for (x = 0; x < HOURS; x++) { ++ for (x = 0; x < RTE_POWER_HOURS_PER_DAY; x++) { + + if (ptm->tm_hour == pol->pkt.timer_policy.busy_hours[x]) { + for (count = 0; count < pol->pkt.num_vcpu; count++) { +@@ -648,19 +653,19 @@ apply_workload_profile(struct policy *pol) + + int count; + +- if (pol->pkt.workload == HIGH) { ++ if (pol->pkt.workload == RTE_POWER_WL_HIGH) { + for (count = 0; count < pol->pkt.num_vcpu; count++) { + if (pol->core_share[count].status != 1) + power_manager_scale_core_max( + pol->core_share[count].pcpu); + } +- } else if (pol->pkt.workload == MEDIUM) { ++ } else if (pol->pkt.workload == RTE_POWER_WL_MEDIUM) { + for (count = 0; count < pol->pkt.num_vcpu; count++) { + if (pol->core_share[count].status != 1) + power_manager_scale_core_med( + pol->core_share[count].pcpu); + } +- } else if (pol->pkt.workload == LOW) { ++ } else if (pol->pkt.workload == RTE_POWER_WL_LOW) { + for (count = 0; count < pol->pkt.num_vcpu; count++) { + if (pol->core_share[count].status != 1) + power_manager_scale_core_min( +@@ -673,14 +678,14 @@ static void + apply_policy(struct policy *pol) + { + +- struct channel_packet *pkt = &pol->pkt; ++ struct rte_power_channel_packet *pkt = &pol->pkt; + + /*Check policy to use*/ +- if (pkt->policy_to_use == TRAFFIC) ++ if (pkt->policy_to_use == RTE_POWER_POLICY_TRAFFIC) + apply_traffic_profile(pol); +- else if (pkt->policy_to_use == TIME) ++ else if (pkt->policy_to_use == RTE_POWER_POLICY_TIME) + apply_time_profile(pol); +- else if (pkt->policy_to_use == WORKLOAD) ++ else if (pkt->policy_to_use == RTE_POWER_POLICY_WORKLOAD) + apply_workload_profile(pol); + } + +@@ -715,24 +720,24 @@ write_binary_packet(void *buffer, + } + + static int +-send_freq(struct channel_packet *pkt, ++send_freq(struct rte_power_channel_packet *pkt, + struct channel_info *chan_info, + bool freq_list) + { + unsigned int vcore_id = pkt->resource_id; +- struct channel_packet_freq_list channel_pkt_freq_list; ++ struct rte_power_channel_packet_freq_list channel_pkt_freq_list; + struct vm_info info; + + if (get_info_vm(pkt->vm_name, &info) != 0) + return -1; + +- if (!freq_list && vcore_id >= MAX_VCPU_PER_VM) ++ if (!freq_list && vcore_id >= RTE_POWER_MAX_VCPU_PER_VM) + return -1; + + if (!info.allow_query) + return -1; + +- channel_pkt_freq_list.command = CPU_POWER_FREQ_LIST; ++ channel_pkt_freq_list.command = RTE_POWER_FREQ_LIST; + channel_pkt_freq_list.num_vcpu = info.num_vcpus; + + if (freq_list) { +@@ -751,12 +756,12 @@ send_freq(struct channel_packet *pkt, + } + + static int +-send_capabilities(struct channel_packet *pkt, ++send_capabilities(struct rte_power_channel_packet *pkt, + struct channel_info *chan_info, + bool list_requested) + { + unsigned int vcore_id = pkt->resource_id; +- struct channel_packet_caps_list channel_pkt_caps_list; ++ struct rte_power_channel_packet_caps_list channel_pkt_caps_list; + struct vm_info info; + struct rte_power_core_capabilities caps; + int ret; +@@ -764,13 +769,13 @@ send_capabilities(struct channel_packet *pkt, + if (get_info_vm(pkt->vm_name, &info) != 0) + return -1; + +- if (!list_requested && vcore_id >= MAX_VCPU_PER_VM) ++ if (!list_requested && vcore_id >= RTE_POWER_MAX_VCPU_PER_VM) + return -1; + + if (!info.allow_query) + return -1; + +- channel_pkt_caps_list.command = CPU_POWER_CAPS_LIST; ++ channel_pkt_caps_list.command = RTE_POWER_CAPS_LIST; + channel_pkt_caps_list.num_vcpu = info.num_vcpus; + + if (list_requested) { +@@ -805,18 +810,19 @@ send_capabilities(struct channel_packet *pkt, + } + + static int +-send_ack_for_received_cmd(struct channel_packet *pkt, ++send_ack_for_received_cmd(struct rte_power_channel_packet *pkt, + struct channel_info *chan_info, + uint32_t command) + { + pkt->command = command; + return write_binary_packet(pkt, +- sizeof(struct channel_packet), ++ sizeof(*pkt), + chan_info); + } + + static int +-process_request(struct channel_packet *pkt, struct channel_info *chan_info) ++process_request(struct rte_power_channel_packet *pkt, ++ struct channel_info *chan_info) + { + int ret; + +@@ -827,10 +833,10 @@ process_request(struct channel_packet *pkt, struct channel_info *chan_info) + CHANNEL_MGR_CHANNEL_PROCESSING) == 0) + return -1; + +- if (pkt->command == CPU_POWER) { ++ if (pkt->command == RTE_POWER_CPU_POWER) { + unsigned int core_num; + +- if (pkt->core_type == CORE_TYPE_VIRTUAL) ++ if (pkt->core_type == RTE_POWER_CORE_TYPE_VIRTUAL) + core_num = get_pcpu(chan_info, pkt->resource_id); + else + core_num = pkt->resource_id; +@@ -842,22 +848,22 @@ process_request(struct channel_packet *pkt, struct channel_info *chan_info) + bool valid_unit = true; + + switch (pkt->unit) { +- case(CPU_POWER_SCALE_MIN): ++ case(RTE_POWER_SCALE_MIN): + scale_res = power_manager_scale_core_min(core_num); + break; +- case(CPU_POWER_SCALE_MAX): ++ case(RTE_POWER_SCALE_MAX): + scale_res = power_manager_scale_core_max(core_num); + break; +- case(CPU_POWER_SCALE_DOWN): ++ case(RTE_POWER_SCALE_DOWN): + scale_res = power_manager_scale_core_down(core_num); + break; +- case(CPU_POWER_SCALE_UP): ++ case(RTE_POWER_SCALE_UP): + scale_res = power_manager_scale_core_up(core_num); + break; +- case(CPU_POWER_ENABLE_TURBO): ++ case(RTE_POWER_ENABLE_TURBO): + scale_res = power_manager_enable_turbo_core(core_num); + break; +- case(CPU_POWER_DISABLE_TURBO): ++ case(RTE_POWER_DISABLE_TURBO): + scale_res = power_manager_disable_turbo_core(core_num); + break; + default: +@@ -868,9 +874,9 @@ process_request(struct channel_packet *pkt, struct channel_info *chan_info) if (valid_unit) { ret = send_ack_for_received_cmd(pkt, chan_info, - scale_res > 0 ? +- CPU_POWER_CMD_ACK : +- CPU_POWER_CMD_NACK); + scale_res >= 0 ? - CPU_POWER_CMD_ACK : - CPU_POWER_CMD_NACK); ++ RTE_POWER_CMD_ACK : ++ RTE_POWER_CMD_NACK); if (ret < 0) + RTE_LOG(ERR, CHANNEL_MONITOR, "Error during sending ack command.\n"); + } else +@@ -878,19 +884,19 @@ process_request(struct channel_packet *pkt, struct channel_info *chan_info) + + } + +- if (pkt->command == PKT_POLICY) { ++ if (pkt->command == RTE_POWER_PKT_POLICY) { + RTE_LOG(INFO, CHANNEL_MONITOR, "Processing policy request %s\n", + pkt->vm_name); + int ret = send_ack_for_received_cmd(pkt, + chan_info, +- CPU_POWER_CMD_ACK); ++ RTE_POWER_CMD_ACK); + if (ret < 0) + RTE_LOG(ERR, CHANNEL_MONITOR, "Error during sending ack command.\n"); + update_policy(pkt); + policy_is_set = 1; + } + +- if (pkt->command == PKT_POLICY_REMOVE) { ++ if (pkt->command == RTE_POWER_PKT_POLICY_REMOVE) { + ret = remove_policy(pkt); + if (ret == 0) + RTE_LOG(INFO, CHANNEL_MONITOR, +@@ -900,26 +906,26 @@ process_request(struct channel_packet *pkt, struct channel_info *chan_info) + "Policy %s does not exist\n", pkt->vm_name); + } + +- if (pkt->command == CPU_POWER_QUERY_FREQ_LIST || +- pkt->command == CPU_POWER_QUERY_FREQ) { ++ if (pkt->command == RTE_POWER_QUERY_FREQ_LIST || ++ pkt->command == RTE_POWER_QUERY_FREQ) { + + RTE_LOG(INFO, CHANNEL_MONITOR, + "Frequency for %s requested.\n", pkt->vm_name); + int ret = send_freq(pkt, + chan_info, +- pkt->command == CPU_POWER_QUERY_FREQ_LIST); ++ pkt->command == RTE_POWER_QUERY_FREQ_LIST); + if (ret < 0) + RTE_LOG(ERR, CHANNEL_MONITOR, "Error during frequency sending.\n"); + } + +- if (pkt->command == CPU_POWER_QUERY_CAPS_LIST || +- pkt->command == CPU_POWER_QUERY_CAPS) { ++ if (pkt->command == RTE_POWER_QUERY_CAPS_LIST || ++ pkt->command == RTE_POWER_QUERY_CAPS) { + + RTE_LOG(INFO, CHANNEL_MONITOR, + "Capabilities for %s requested.\n", pkt->vm_name); + int ret = send_capabilities(pkt, + chan_info, +- pkt->command == CPU_POWER_QUERY_CAPS_LIST); ++ pkt->command == RTE_POWER_QUERY_CAPS_LIST); + if (ret < 0) + RTE_LOG(ERR, CHANNEL_MONITOR, "Error during sending capabilities.\n"); + } +@@ -988,7 +994,7 @@ channel_monitor_init(void) + static void + read_binary_packet(struct channel_info *chan_info) + { +- struct channel_packet pkt; ++ struct rte_power_channel_packet pkt; + void *buffer = &pkt; + int buffer_len = sizeof(pkt); + int n_bytes, err = 0; +@@ -1019,7 +1025,7 @@ read_binary_packet(struct channel_info *chan_info) + static void + read_json_packet(struct channel_info *chan_info) + { +- struct channel_packet pkt; ++ struct rte_power_channel_packet pkt; + int n_bytes, ret; + json_t *root; + json_error_t error; +@@ -1063,7 +1069,7 @@ read_json_packet(struct channel_info *chan_info) + /* + * Because our data is now in the json + * object, we can overwrite the pkt +- * with a channel_packet struct, using ++ * with a rte_power_channel_packet struct, using + * parse_json_to_pkt() + */ + ret = parse_json_to_pkt(root, &pkt, resource_name); +diff --git a/dpdk/examples/vm_power_manager/channel_monitor.h b/dpdk/examples/vm_power_manager/channel_monitor.h +index 7362a80d26..2b38c554b5 100644 +--- a/dpdk/examples/vm_power_manager/channel_monitor.h ++++ b/dpdk/examples/vm_power_manager/channel_monitor.h +@@ -5,8 +5,9 @@ + #ifndef CHANNEL_MONITOR_H_ + #define CHANNEL_MONITOR_H_ + ++#include ++ + #include "channel_manager.h" +-#include "channel_commands.h" + + struct core_share { + unsigned int pcpu; +@@ -18,11 +19,11 @@ struct core_share { + }; + + struct policy { +- struct channel_packet pkt; +- uint32_t pfid[MAX_VFS]; +- uint32_t port[MAX_VFS]; ++ struct rte_power_channel_packet pkt; ++ uint32_t pfid[RTE_POWER_MAX_VFS]; ++ uint32_t port[RTE_POWER_MAX_VFS]; + unsigned int enabled; +- struct core_share core_share[MAX_VCPU_PER_VM]; ++ struct core_share core_share[RTE_POWER_MAX_VCPU_PER_VM]; + }; + + #ifdef __cplusplus +diff --git a/dpdk/examples/vm_power_manager/guest_cli/main.c b/dpdk/examples/vm_power_manager/guest_cli/main.c +index f63b3c988a..4e17f7fb90 100644 +--- a/dpdk/examples/vm_power_manager/guest_cli/main.c ++++ b/dpdk/examples/vm_power_manager/guest_cli/main.c +@@ -48,10 +48,10 @@ parse_args(int argc, char **argv) + { "policy", required_argument, 0, 'o'}, + {NULL, 0, 0, 0} + }; +- struct channel_packet *policy; ++ struct rte_power_channel_packet *policy; + unsigned short int hours[MAX_HOURS]; +- unsigned short int cores[MAX_VCPU_PER_VM]; +- unsigned short int ports[MAX_VCPU_PER_VM]; ++ unsigned short int cores[RTE_POWER_MAX_VCPU_PER_VM]; ++ unsigned short int ports[RTE_POWER_MAX_VCPU_PER_VM]; + int i, cnt, idx; + + policy = get_policy(); +@@ -69,7 +69,8 @@ parse_args(int argc, char **argv) + switch (opt) { + /* portmask */ + case 'n': +- strlcpy(policy->vm_name, optarg, VM_MAX_NAME_SZ); ++ strlcpy(policy->vm_name, optarg, ++ RTE_POWER_VM_MAX_NAME_SZ); + printf("Setting VM Name to [%s]\n", policy->vm_name); + break; + case 'b': +@@ -97,14 +98,15 @@ parse_args(int argc, char **argv) + } + break; + case 'l': +- cnt = parse_set(optarg, cores, MAX_VCPU_PER_VM); ++ cnt = parse_set(optarg, cores, ++ RTE_POWER_MAX_VCPU_PER_VM); + if (cnt < 0) { + printf("Invalid value passed to vcpu-list - [%s]\n", + optarg); + break; + } + idx = 0; +- for (i = 0; i < MAX_VCPU_PER_VM; i++) { ++ for (i = 0; i < RTE_POWER_MAX_VCPU_PER_VM; i++) { + if (cores[i]) { + printf("***Using core %d\n", i); + policy->vcpu_to_control[idx++] = i; +@@ -114,14 +116,15 @@ parse_args(int argc, char **argv) + printf("Total cores: %d\n", idx); + break; + case 'p': +- cnt = parse_set(optarg, ports, MAX_VCPU_PER_VM); ++ cnt = parse_set(optarg, ports, ++ RTE_POWER_MAX_VCPU_PER_VM); + if (cnt < 0) { + printf("Invalid value passed to port-list - [%s]\n", + optarg); + break; + } + idx = 0; +- for (i = 0; i < MAX_VCPU_PER_VM; i++) { ++ for (i = 0; i < RTE_POWER_MAX_VCPU_PER_VM; i++) { + if (ports[i]) { + printf("***Using port %d\n", i); + if (set_policy_mac(i, idx++) != 0) { +@@ -135,13 +138,17 @@ parse_args(int argc, char **argv) + break; + case 'o': + if (!strcmp(optarg, "TRAFFIC")) +- policy->policy_to_use = TRAFFIC; ++ policy->policy_to_use = ++ RTE_POWER_POLICY_TRAFFIC; + else if (!strcmp(optarg, "TIME")) +- policy->policy_to_use = TIME; ++ policy->policy_to_use = ++ RTE_POWER_POLICY_TIME; + else if (!strcmp(optarg, "WORKLOAD")) +- policy->policy_to_use = WORKLOAD; ++ policy->policy_to_use = ++ RTE_POWER_POLICY_WORKLOAD; + else if (!strcmp(optarg, "BRANCH_RATIO")) +- policy->policy_to_use = BRANCH_RATIO; ++ policy->policy_to_use = ++ RTE_POWER_POLICY_BRANCH_RATIO; + else { + printf("Invalid policy specified: %s\n", + optarg); +diff --git a/dpdk/examples/vm_power_manager/guest_cli/vm_power_cli_guest.c b/dpdk/examples/vm_power_manager/guest_cli/vm_power_cli_guest.c +index 96c1a1ff69..1618030627 100644 +--- a/dpdk/examples/vm_power_manager/guest_cli/vm_power_cli_guest.c ++++ b/dpdk/examples/vm_power_manager/guest_cli/vm_power_cli_guest.c +@@ -19,7 +19,6 @@ + #include + + #include +-#include + + #include "vm_power_cli_guest.h" + +@@ -38,9 +37,9 @@ union PFID { + uint64_t pfid; + }; + +-static struct channel_packet policy; ++static struct rte_power_channel_packet policy; + +-struct channel_packet * ++struct rte_power_channel_packet * + get_policy(void) + { + return &policy; +@@ -49,7 +48,7 @@ get_policy(void) + int + set_policy_mac(int port, int idx) + { +- struct channel_packet *policy; ++ struct rte_power_channel_packet *policy; + union PFID pfid; + int ret; + +@@ -73,7 +72,7 @@ set_policy_mac(int port, int idx) + } + + int +-set_policy_defaults(struct channel_packet *pkt) ++set_policy_defaults(struct rte_power_channel_packet *pkt) + { + int ret; + +@@ -103,10 +102,10 @@ set_policy_defaults(struct channel_packet *pkt) + pkt->timer_policy.hours_to_use_traffic_profile[0] = 8; + pkt->timer_policy.hours_to_use_traffic_profile[1] = 10; + +- pkt->core_type = CORE_TYPE_VIRTUAL; +- pkt->workload = LOW; +- pkt->policy_to_use = TIME; +- pkt->command = PKT_POLICY; ++ pkt->core_type = RTE_POWER_CORE_TYPE_VIRTUAL; ++ pkt->workload = RTE_POWER_WL_LOW; ++ pkt->policy_to_use = RTE_POWER_POLICY_TIME; ++ pkt->command = RTE_POWER_PKT_POLICY; + strlcpy(pkt->vm_name, "ubuntu2", sizeof(pkt->vm_name)); + + return 0; +@@ -145,7 +144,7 @@ struct cmd_freq_list_result { + }; + + static int +-query_data(struct channel_packet *pkt, unsigned int lcore_id) ++query_data(struct rte_power_channel_packet *pkt, unsigned int lcore_id) + { + int ret; + ret = rte_power_guest_channel_send_msg(pkt, lcore_id); +@@ -157,19 +156,19 @@ query_data(struct channel_packet *pkt, unsigned int lcore_id) + } + + static int +-receive_freq_list(struct channel_packet_freq_list *pkt_freq_list, ++receive_freq_list(struct rte_power_channel_packet_freq_list *pkt_freq_list, + unsigned int lcore_id) + { + int ret; + + ret = rte_power_guest_channel_receive_msg(pkt_freq_list, +- sizeof(struct channel_packet_freq_list), ++ sizeof(*pkt_freq_list), + lcore_id); + if (ret < 0) { + RTE_LOG(ERR, GUEST_CLI, "Error receiving message.\n"); + return -1; + } +- if (pkt_freq_list->command != CPU_POWER_FREQ_LIST) { ++ if (pkt_freq_list->command != RTE_POWER_FREQ_LIST) { + RTE_LOG(ERR, GUEST_CLI, "Unexpected message received.\n"); + return -1; + } +@@ -183,14 +182,14 @@ cmd_query_freq_list_parsed(void *parsed_result, + { + struct cmd_freq_list_result *res = parsed_result; + unsigned int lcore_id; +- struct channel_packet_freq_list pkt_freq_list; +- struct channel_packet pkt; ++ struct rte_power_channel_packet_freq_list pkt_freq_list; ++ struct rte_power_channel_packet pkt; + bool query_list = false; + int ret; + char *ep; + +- memset(&pkt, 0, sizeof(struct channel_packet)); +- memset(&pkt_freq_list, 0, sizeof(struct channel_packet_freq_list)); ++ memset(&pkt, 0, sizeof(pkt)); ++ memset(&pkt_freq_list, 0, sizeof(pkt_freq_list)); + + if (!strcmp(res->cpu_num, "all")) { + +@@ -203,18 +202,18 @@ cmd_query_freq_list_parsed(void *parsed_result, + return; + } + +- pkt.command = CPU_POWER_QUERY_FREQ_LIST; ++ pkt.command = RTE_POWER_QUERY_FREQ_LIST; + strlcpy(pkt.vm_name, policy.vm_name, sizeof(pkt.vm_name)); + query_list = true; + } else { + errno = 0; + lcore_id = (unsigned int)strtol(res->cpu_num, &ep, 10); +- if (errno != 0 || lcore_id >= MAX_VCPU_PER_VM || ++ if (errno != 0 || lcore_id >= RTE_POWER_MAX_VCPU_PER_VM || + ep == res->cpu_num) { + cmdline_printf(cl, "Invalid parameter provided.\n"); + return; + } +- pkt.command = CPU_POWER_QUERY_FREQ; ++ pkt.command = RTE_POWER_QUERY_FREQ; + strlcpy(pkt.vm_name, policy.vm_name, sizeof(pkt.vm_name)); + pkt.resource_id = lcore_id; + } +@@ -267,19 +266,19 @@ struct cmd_query_caps_result { + }; + + static int +-receive_capabilities(struct channel_packet_caps_list *pkt_caps_list, ++receive_capabilities(struct rte_power_channel_packet_caps_list *pkt_caps_list, + unsigned int lcore_id) + { + int ret; + + ret = rte_power_guest_channel_receive_msg(pkt_caps_list, +- sizeof(struct channel_packet_caps_list), ++ sizeof(*pkt_caps_list), + lcore_id); + if (ret < 0) { + RTE_LOG(ERR, GUEST_CLI, "Error receiving message.\n"); + return -1; + } +- if (pkt_caps_list->command != CPU_POWER_CAPS_LIST) { ++ if (pkt_caps_list->command != RTE_POWER_CAPS_LIST) { + RTE_LOG(ERR, GUEST_CLI, "Unexpected message received.\n"); + return -1; + } +@@ -293,14 +292,14 @@ cmd_query_caps_list_parsed(void *parsed_result, + { + struct cmd_query_caps_result *res = parsed_result; + unsigned int lcore_id; +- struct channel_packet_caps_list pkt_caps_list; +- struct channel_packet pkt; ++ struct rte_power_channel_packet_caps_list pkt_caps_list; ++ struct rte_power_channel_packet pkt; + bool query_list = false; + int ret; + char *ep; + +- memset(&pkt, 0, sizeof(struct channel_packet)); +- memset(&pkt_caps_list, 0, sizeof(struct channel_packet_caps_list)); ++ memset(&pkt, 0, sizeof(pkt)); ++ memset(&pkt_caps_list, 0, sizeof(pkt_caps_list)); + + if (!strcmp(res->cpu_num, "all")) { + +@@ -313,18 +312,18 @@ cmd_query_caps_list_parsed(void *parsed_result, + return; + } + +- pkt.command = CPU_POWER_QUERY_CAPS_LIST; ++ pkt.command = RTE_POWER_QUERY_CAPS_LIST; + strlcpy(pkt.vm_name, policy.vm_name, sizeof(pkt.vm_name)); + query_list = true; + } else { + errno = 0; + lcore_id = (unsigned int)strtol(res->cpu_num, &ep, 10); +- if (errno != 0 || lcore_id >= MAX_VCPU_PER_VM || ++ if (errno != 0 || lcore_id >= RTE_POWER_MAX_VCPU_PER_VM || + ep == res->cpu_num) { + cmdline_printf(cl, "Invalid parameter provided.\n"); + return; + } +- pkt.command = CPU_POWER_QUERY_CAPS; ++ pkt.command = RTE_POWER_QUERY_CAPS; + strlcpy(pkt.vm_name, policy.vm_name, sizeof(pkt.vm_name)); + pkt.resource_id = lcore_id; + } +@@ -344,13 +343,15 @@ cmd_query_caps_list_parsed(void *parsed_result, + unsigned int i; + for (i = 0; i < pkt_caps_list.num_vcpu; ++i) + cmdline_printf(cl, "Capabilities of [%d] vcore are:" +- " turbo possibility: %ld, is priority core: %ld.\n", ++ " turbo possibility: %" PRId64 ", " ++ "is priority core: %" PRId64 ".\n", + i, + pkt_caps_list.turbo[i], + pkt_caps_list.priority[i]); + } else { + cmdline_printf(cl, "Capabilities of [%d] vcore are:" +- " turbo possibility: %ld, is priority core: %ld.\n", ++ " turbo possibility: %" PRId64 ", " ++ "is priority core: %" PRId64 ".\n", + lcore_id, + pkt_caps_list.turbo[lcore_id], + pkt_caps_list.priority[lcore_id]); +@@ -378,7 +379,7 @@ cmdline_parse_inst_t cmd_query_caps_list = { + static int + check_response_cmd(unsigned int lcore_id, int *result) + { +- struct channel_packet pkt; ++ struct rte_power_channel_packet pkt; + int ret; + + ret = rte_power_guest_channel_receive_msg(&pkt, sizeof pkt, lcore_id); +@@ -386,10 +387,10 @@ check_response_cmd(unsigned int lcore_id, int *result) + return -1; + + switch (pkt.command) { +- case(CPU_POWER_CMD_ACK): ++ case(RTE_POWER_CMD_ACK): + *result = 1; + break; +- case(CPU_POWER_CMD_NACK): ++ case(RTE_POWER_CMD_NACK): + *result = 0; + break; + default: +@@ -471,7 +472,7 @@ struct cmd_send_policy_result { + }; + + static inline int +-send_policy(struct channel_packet *pkt, struct cmdline *cl) ++send_policy(struct rte_power_channel_packet *pkt, struct cmdline *cl) + { + int ret; + +diff --git a/dpdk/examples/vm_power_manager/guest_cli/vm_power_cli_guest.h b/dpdk/examples/vm_power_manager/guest_cli/vm_power_cli_guest.h +index 0c2cc1374d..50c435544e 100644 +--- a/dpdk/examples/vm_power_manager/guest_cli/vm_power_cli_guest.h ++++ b/dpdk/examples/vm_power_manager/guest_cli/vm_power_cli_guest.h +@@ -9,13 +9,11 @@ + extern "C" { + #endif + +-#include "channel_commands.h" +- +-struct channel_packet *get_policy(void); ++struct rte_power_channel_packet *get_policy(void); + + int set_policy_mac(int port, int idx); + +-int set_policy_defaults(struct channel_packet *pkt); ++int set_policy_defaults(struct rte_power_channel_packet *pkt); + + void run_cli(__attribute__((unused)) void *arg); + diff --git a/dpdk/examples/vm_power_manager/main.c b/dpdk/examples/vm_power_manager/main.c -index d39f044c1e..0409a832b5 100644 +index d39f044c1e..2316aace5a 100644 --- a/dpdk/examples/vm_power_manager/main.c +++ b/dpdk/examples/vm_power_manager/main.c @@ -272,7 +272,7 @@ check_all_ports_link_status(uint32_t port_mask) @@ -35008,6 +69606,28 @@ index d39f044c1e..0409a832b5 100644 else printf("Port %d Link Down\n", (uint16_t)portid); +@@ -395,7 +395,7 @@ main(int argc, char **argv) + "Cannot init port %"PRIu8 "\n", + portid); + +- for (w = 0; w < MAX_VFS; w++) { ++ for (w = 0; w < RTE_POWER_MAX_VFS; w++) { + eth.addr_bytes[5] = w + 0xf0; + + ret = -ENOTSUP; +diff --git a/dpdk/examples/vm_power_manager/meson.build b/dpdk/examples/vm_power_manager/meson.build +index 20a4a05b3b..8dd5bd2eb6 100644 +--- a/dpdk/examples/vm_power_manager/meson.build ++++ b/dpdk/examples/vm_power_manager/meson.build +@@ -40,7 +40,7 @@ opt_dep = cc.find_library('virt', required : false) + build = opt_dep.found() + ext_deps += opt_dep + +-opt_dep = dependency('jansson', required : false) ++opt_dep = dependency('jansson', required : false, method: 'pkg-config') + if opt_dep.found() + ext_deps += opt_dep + cflags += '-DUSE_JANSSON' diff --git a/dpdk/examples/vm_power_manager/power_manager.c b/dpdk/examples/vm_power_manager/power_manager.c index 7b4f4b3c4d..cd51d4741f 100644 --- a/dpdk/examples/vm_power_manager/power_manager.c @@ -35020,6 +69640,31 @@ index 7b4f4b3c4d..cd51d4741f 100644 #include #include #include +diff --git a/dpdk/examples/vm_power_manager/vm_power_cli.c b/dpdk/examples/vm_power_manager/vm_power_cli.c +index 5f64b83fb0..8eb87217e9 100644 +--- a/dpdk/examples/vm_power_manager/vm_power_cli.c ++++ b/dpdk/examples/vm_power_manager/vm_power_cli.c +@@ -21,7 +21,6 @@ + #include "channel_manager.h" + #include "channel_monitor.h" + #include "power_manager.h" +-#include "channel_commands.h" + + struct cmd_quit_result { + cmdline_fixed_string_t quit; +diff --git a/dpdk/examples/vmdq/Makefile b/dpdk/examples/vmdq/Makefile +index 0767c715a1..7e59e4d658 100644 +--- a/dpdk/examples/vmdq/Makefile ++++ b/dpdk/examples/vmdq/Makefile +@@ -22,7 +22,7 @@ PKGCONF ?= pkg-config + PC_FILE := $(shell $(PKGCONF) --path libdpdk 2>/dev/null) + CFLAGS += -O3 $(shell $(PKGCONF) --cflags libdpdk) + LDFLAGS_SHARED = $(shell $(PKGCONF) --libs libdpdk) +-LDFLAGS_STATIC = -Wl,-Bstatic $(shell $(PKGCONF) --static --libs libdpdk) ++LDFLAGS_STATIC = $(shell $(PKGCONF) --static --libs libdpdk) + + build/$(APP)-shared: $(SRCS-y) Makefile $(PC_FILE) | build + $(CC) $(CFLAGS) $(SRCS-y) -o $@ $(LDFLAGS) $(LDFLAGS_SHARED) diff --git a/dpdk/examples/vmdq/main.c b/dpdk/examples/vmdq/main.c index 6e6fc91ec0..b082bc8c1c 100644 --- a/dpdk/examples/vmdq/main.c @@ -35129,6 +69774,19 @@ index 6e6fc91ec0..b082bc8c1c 100644 printf("%lu ", rxPackets[q]); } printf("\nFinished handling signal %d\n", signum); +diff --git a/dpdk/examples/vmdq_dcb/Makefile b/dpdk/examples/vmdq_dcb/Makefile +index 2a9b04143f..2302577d00 100644 +--- a/dpdk/examples/vmdq_dcb/Makefile ++++ b/dpdk/examples/vmdq_dcb/Makefile +@@ -22,7 +22,7 @@ PKGCONF ?= pkg-config + PC_FILE := $(shell $(PKGCONF) --path libdpdk 2>/dev/null) + CFLAGS += -O3 $(shell $(PKGCONF) --cflags libdpdk) + LDFLAGS_SHARED = $(shell $(PKGCONF) --libs libdpdk) +-LDFLAGS_STATIC = -Wl,-Bstatic $(shell $(PKGCONF) --static --libs libdpdk) ++LDFLAGS_STATIC = $(shell $(PKGCONF) --static --libs libdpdk) + + build/$(APP)-shared: $(SRCS-y) Makefile $(PC_FILE) | build + $(CC) $(CFLAGS) $(SRCS-y) -o $@ $(LDFLAGS) $(LDFLAGS_SHARED) diff --git a/dpdk/kernel/freebsd/contigmem/contigmem.c b/dpdk/kernel/freebsd/contigmem/contigmem.c index 64e0a7fecd..abb76f241e 100644 --- a/dpdk/kernel/freebsd/contigmem/contigmem.c @@ -35147,19 +69805,25 @@ index 64e0a7fecd..abb76f241e 100644 mtx_destroy(&contigmem_buffers[i].mtx); } diff --git a/dpdk/kernel/linux/kni/compat.h b/dpdk/kernel/linux/kni/compat.h -index 7109474ec5..9ee45dbf6f 100644 +index 7109474ec5..5f65640d5e 100644 --- a/dpdk/kernel/linux/kni/compat.h +++ b/dpdk/kernel/linux/kni/compat.h -@@ -130,3 +130,7 @@ +@@ -130,3 +130,13 @@ #if KERNEL_VERSION(4, 10, 0) <= LINUX_VERSION_CODE #define HAVE_IOVA_TO_KVA_MAPPING_SUPPORT #endif + -+#if KERNEL_VERSION(5, 6, 0) <= LINUX_VERSION_CODE ++#if KERNEL_VERSION(5, 6, 0) <= LINUX_VERSION_CODE || \ ++ (defined(RHEL_RELEASE_CODE) && \ ++ RHEL_RELEASE_VERSION(8, 3) <= RHEL_RELEASE_CODE) +#define HAVE_TX_TIMEOUT_TXQUEUE +#endif ++ ++#if KERNEL_VERSION(5, 9, 0) > LINUX_VERSION_CODE ++#define HAVE_TSK_IN_GUP ++#endif diff --git a/dpdk/kernel/linux/kni/kni_dev.h b/dpdk/kernel/linux/kni/kni_dev.h -index 5e75c6371f..ca5f92a47b 100644 +index 5e75c6371f..c15da311ba 100644 --- a/dpdk/kernel/linux/kni/kni_dev.h +++ b/dpdk/kernel/linux/kni/kni_dev.h @@ -32,7 +32,7 @@ @@ -35171,6 +69835,20 @@ index 5e75c6371f..ca5f92a47b 100644 /** * A structure describing the private information for a kni device. +@@ -101,8 +101,13 @@ static inline phys_addr_t iova_to_phys(struct task_struct *tsk, + offset = iova & (PAGE_SIZE - 1); + + /* Read one page struct info */ ++#ifdef HAVE_TSK_IN_GUP + ret = get_user_pages_remote(tsk, tsk->mm, iova, 1, + FOLL_TOUCH, &page, NULL, NULL); ++#else ++ ret = get_user_pages_remote(tsk->mm, iova, 1, ++ FOLL_TOUCH, &page, NULL, NULL); ++#endif + if (ret < 0) + return 0; + diff --git a/dpdk/kernel/linux/kni/kni_misc.c b/dpdk/kernel/linux/kni/kni_misc.c index cda71bde08..2b464c4381 100644 --- a/dpdk/kernel/linux/kni/kni_misc.c @@ -35447,11 +70125,41 @@ index b06bbe9207..d1f920b09c 100644 return root; } +diff --git a/dpdk/lib/librte_acl/rte_acl.c b/dpdk/lib/librte_acl/rte_acl.c +index 777ec4d340..715b023592 100644 +--- a/dpdk/lib/librte_acl/rte_acl.c ++++ b/dpdk/lib/librte_acl/rte_acl.c +@@ -16,7 +16,6 @@ static struct rte_tailq_elem rte_acl_tailq = { + }; + EAL_REGISTER_TAILQ(rte_acl_tailq) + +-#ifndef RTE_ARCH_X86 + #ifndef CC_AVX2_SUPPORT + /* + * If the compiler doesn't support AVX2 instructions, +@@ -33,6 +32,7 @@ rte_acl_classify_avx2(__rte_unused const struct rte_acl_ctx *ctx, + } + #endif + ++#ifndef RTE_ARCH_X86 + int + rte_acl_classify_sse(__rte_unused const struct rte_acl_ctx *ctx, + __rte_unused const uint8_t **data, diff --git a/dpdk/lib/librte_bbdev/rte_bbdev.h b/dpdk/lib/librte_bbdev/rte_bbdev.h -index 591fb7914a..1f58a0762f 100644 +index 591fb7914a..6fcf448262 100644 --- a/dpdk/lib/librte_bbdev/rte_bbdev.h +++ b/dpdk/lib/librte_bbdev/rte_bbdev.h -@@ -440,21 +440,21 @@ TAILQ_HEAD(rte_bbdev_cb_list, rte_bbdev_callback); +@@ -11,7 +11,8 @@ + * Wireless base band device abstraction APIs. + * + * @warning +- * @b EXPERIMENTAL: this API may change without prior notice ++ * @b EXPERIMENTAL: ++ * All functions in this file may be changed or removed without prior notice. + * + * This API allows an application to discover, configure and use a device to + * process operations. An asynchronous API (enqueue, followed by later dequeue) +@@ -440,21 +441,21 @@ TAILQ_HEAD(rte_bbdev_cb_list, rte_bbdev_callback); * these fields, but should only write to the *_ops fields. */ struct __rte_cache_aligned rte_bbdev { @@ -35572,11 +70280,39 @@ index 24ddcee7af..237e3361d7 100644 rte_bbdev_queue_stop_t queue_stop; /** Enable queue interrupt. Optional */ +diff --git a/dpdk/lib/librte_bpf/bpf_validate.c b/dpdk/lib/librte_bpf/bpf_validate.c +index 6bd6f78e9b..80d21fabbe 100644 +--- a/dpdk/lib/librte_bpf/bpf_validate.c ++++ b/dpdk/lib/librte_bpf/bpf_validate.c +@@ -226,7 +226,7 @@ eval_add(struct bpf_reg_val *rd, const struct bpf_reg_val *rs, uint64_t msk) + struct bpf_reg_val rv; + + rv.u.min = (rd->u.min + rs->u.min) & msk; +- rv.u.max = (rd->u.min + rs->u.max) & msk; ++ rv.u.max = (rd->u.max + rs->u.max) & msk; + rv.s.min = (rd->s.min + rs->s.min) & msk; + rv.s.max = (rd->s.max + rs->s.max) & msk; + +@@ -254,10 +254,10 @@ eval_sub(struct bpf_reg_val *rd, const struct bpf_reg_val *rs, uint64_t msk) + { + struct bpf_reg_val rv; + +- rv.u.min = (rd->u.min - rs->u.min) & msk; +- rv.u.max = (rd->u.min - rs->u.max) & msk; +- rv.s.min = (rd->s.min - rs->s.min) & msk; +- rv.s.max = (rd->s.max - rs->s.max) & msk; ++ rv.u.min = (rd->u.min - rs->u.max) & msk; ++ rv.u.max = (rd->u.max - rs->u.min) & msk; ++ rv.s.min = (rd->s.min - rs->s.max) & msk; ++ rv.s.max = (rd->s.max - rs->s.min) & msk; + + /* + * if at least one of the operands is not constant, diff --git a/dpdk/lib/librte_bpf/meson.build b/dpdk/lib/librte_bpf/meson.build -index 13fc02db38..52cfaf9ac2 100644 +index 13fc02db38..76e00e9fd4 100644 --- a/dpdk/lib/librte_bpf/meson.build +++ b/dpdk/lib/librte_bpf/meson.build -@@ -14,7 +14,7 @@ elif dpdk_conf.has('RTE_ARCH_ARM64') +@@ -14,13 +14,13 @@ elif dpdk_conf.has('RTE_ARCH_ARM64') sources += files('bpf_jit_arm64.c') endif @@ -35585,6 +70321,46 @@ index 13fc02db38..52cfaf9ac2 100644 'rte_bpf.h', 'rte_bpf_ethdev.h') + deps += ['mbuf', 'net', 'ethdev'] + +-dep = dependency('libelf', required: false) ++dep = dependency('libelf', required: false, method: 'pkg-config') + if dep.found() + dpdk_conf.set('RTE_LIBRTE_BPF_ELF', 1) + sources += files('bpf_load_elf.c') +diff --git a/dpdk/lib/librte_bpf/rte_bpf.h b/dpdk/lib/librte_bpf/rte_bpf.h +index cbf1cddaca..e2d419b4ef 100644 +--- a/dpdk/lib/librte_bpf/rte_bpf.h ++++ b/dpdk/lib/librte_bpf/rte_bpf.h +@@ -7,9 +7,13 @@ + + /** + * @file rte_bpf.h +- * @b EXPERIMENTAL: this API may change without prior notice + * + * RTE BPF support. ++ * ++ * @warning ++ * @b EXPERIMENTAL: ++ * All functions in this file may be changed or removed without prior notice. ++ * + * librte_bpf provides a framework to load and execute eBPF bytecode + * inside user-space dpdk based applications. + * It supports basic set of features from eBPF spec +diff --git a/dpdk/lib/librte_cfgfile/rte_cfgfile.c b/dpdk/lib/librte_cfgfile/rte_cfgfile.c +index 9049fd9c23..0c419d6adc 100644 +--- a/dpdk/lib/librte_cfgfile/rte_cfgfile.c ++++ b/dpdk/lib/librte_cfgfile/rte_cfgfile.c +@@ -191,7 +191,8 @@ rte_cfgfile_load_with_params(const char *filename, int flags, + } + /* skip parsing if comment character found */ + pos = memchr(buffer, params->comment_character, len); +- if (pos != NULL && (*(pos-1) != '\\')) { ++ if (pos != NULL && ++ (pos == buffer || *(pos-1) != '\\')) { + *pos = '\0'; + len = pos - buffer; + } diff --git a/dpdk/lib/librte_cfgfile/rte_cfgfile_version.map b/dpdk/lib/librte_cfgfile/rte_cfgfile_version.map index 906eee96bf..22c999fe16 100644 --- a/dpdk/lib/librte_cfgfile/rte_cfgfile_version.map @@ -35597,6 +70373,23 @@ index 906eee96bf..22c999fe16 100644 rte_cfgfile_sections; rte_cfgfile_set_entry; +diff --git a/dpdk/lib/librte_compressdev/rte_compressdev.h b/dpdk/lib/librte_compressdev/rte_compressdev.h +index 8052efe675..2840c27c6c 100644 +--- a/dpdk/lib/librte_compressdev/rte_compressdev.h ++++ b/dpdk/lib/librte_compressdev/rte_compressdev.h +@@ -8,7 +8,11 @@ + /** + * @file rte_compressdev.h + * +- * RTE Compression Device APIs ++ * RTE Compression Device APIs. ++ * ++ * @warning ++ * @b EXPERIMENTAL: ++ * All functions in this file may be changed or removed without prior notice. + * + * Defines comp device APIs for the provisioning of compression operations. + */ diff --git a/dpdk/lib/librte_cryptodev/rte_crypto_sym.h b/dpdk/lib/librte_cryptodev/rte_crypto_sym.h index ffa038dc40..4e05c7c6ac 100644 --- a/dpdk/lib/librte_cryptodev/rte_crypto_sym.h @@ -35727,6 +70520,34 @@ index 89aa2ed3e2..ed9de3eb92 100644 if (dev == NULL) return NULL; +diff --git a/dpdk/lib/librte_cryptodev/rte_cryptodev_pmd.h b/dpdk/lib/librte_cryptodev/rte_cryptodev_pmd.h +index fba14f2fa0..c4935b3307 100644 +--- a/dpdk/lib/librte_cryptodev/rte_cryptodev_pmd.h ++++ b/dpdk/lib/librte_cryptodev/rte_cryptodev_pmd.h +@@ -41,7 +41,8 @@ extern "C" { + static const char * const cryptodev_pmd_valid_params[] = { + RTE_CRYPTODEV_PMD_NAME_ARG, + RTE_CRYPTODEV_PMD_MAX_NB_QP_ARG, +- RTE_CRYPTODEV_PMD_SOCKET_ID_ARG ++ RTE_CRYPTODEV_PMD_SOCKET_ID_ARG, ++ NULL + }; + + /** +diff --git a/dpdk/lib/librte_distributor/distributor_private.h b/dpdk/lib/librte_distributor/distributor_private.h +index 489aef2acb..689fe3e183 100644 +--- a/dpdk/lib/librte_distributor/distributor_private.h ++++ b/dpdk/lib/librte_distributor/distributor_private.h +@@ -155,6 +155,9 @@ struct rte_distributor { + enum rte_distributor_match_function dist_match_fn; + + struct rte_distributor_single *d_single; ++ ++ uint8_t active[RTE_DISTRIB_MAX_WORKERS]; ++ uint8_t activesum; + }; + + void diff --git a/dpdk/lib/librte_distributor/meson.build b/dpdk/lib/librte_distributor/meson.build index 50b91887b5..266af64348 100644 --- a/dpdk/lib/librte_distributor/meson.build @@ -35740,7 +70561,7 @@ index 50b91887b5..266af64348 100644 # for clang 32-bit compiles we need libatomic for 64-bit atomic ops if cc.get_id() == 'clang' and dpdk_conf.get('RTE_ARCH_64') == false diff --git a/dpdk/lib/librte_distributor/rte_distributor.c b/dpdk/lib/librte_distributor/rte_distributor.c -index 6c5b0c86e8..1c047f065a 100644 +index 6c5b0c86e8..ef34facba6 100644 --- a/dpdk/lib/librte_distributor/rte_distributor.c +++ b/dpdk/lib/librte_distributor/rte_distributor.c @@ -8,7 +8,6 @@ @@ -35751,8 +70572,501 @@ index 6c5b0c86e8..1c047f065a 100644 #include #include #include +@@ -43,7 +42,7 @@ rte_distributor_request_pkt(struct rte_distributor *d, + + if (unlikely(d->alg_type == RTE_DIST_ALG_SINGLE)) { + rte_distributor_request_pkt_single(d->d_single, +- worker_id, oldpkt[0]); ++ worker_id, count ? oldpkt[0] : NULL); + return; + } + +@@ -52,7 +51,7 @@ rte_distributor_request_pkt(struct rte_distributor *d, + * Sync with worker on GET_BUF flag. + */ + while (unlikely(__atomic_load_n(retptr64, __ATOMIC_ACQUIRE) +- & RTE_DISTRIB_GET_BUF)) { ++ & (RTE_DISTRIB_GET_BUF | RTE_DISTRIB_RETURN_BUF))) { + rte_pause(); + uint64_t t = rte_rdtsc()+100; + +@@ -68,11 +67,11 @@ rte_distributor_request_pkt(struct rte_distributor *d, + for (i = count; i < RTE_DIST_BURST_SIZE; i++) + buf->retptr64[i] = 0; + +- /* Set Return bit for each packet returned */ ++ /* Set VALID_BUF bit for each packet returned */ + for (i = count; i-- > 0; ) + buf->retptr64[i] = + (((int64_t)(uintptr_t)(oldpkt[i])) << +- RTE_DISTRIB_FLAG_BITS) | RTE_DISTRIB_RETURN_BUF; ++ RTE_DISTRIB_FLAG_BITS) | RTE_DISTRIB_VALID_BUF; + + /* + * Finally, set the GET_BUF to signal to distributor that cache +@@ -98,11 +97,13 @@ rte_distributor_poll_pkt(struct rte_distributor *d, + return (pkts[0]) ? 1 : 0; + } + +- /* If bit is set, return ++ /* If any of below bits is set, return. ++ * GET_BUF is set when distributor hasn't sent any packets yet ++ * RETURN_BUF is set when distributor must retrieve in-flight packets + * Sync with distributor to acquire bufptrs + */ + if (__atomic_load_n(&(buf->bufptr64[0]), __ATOMIC_ACQUIRE) +- & RTE_DISTRIB_GET_BUF) ++ & (RTE_DISTRIB_GET_BUF | RTE_DISTRIB_RETURN_BUF)) + return -1; + + /* since bufptr64 is signed, this should be an arithmetic shift */ +@@ -114,7 +115,7 @@ rte_distributor_poll_pkt(struct rte_distributor *d, + } + + /* +- * so now we've got the contents of the cacheline into an array of ++ * so now we've got the contents of the cacheline into an array of + * mbuf pointers, so toggle the bit so scheduler can start working + * on the next cacheline while we're working. + * Sync with distributor on GET_BUF flag. Release bufptrs. +@@ -135,7 +136,7 @@ rte_distributor_get_pkt(struct rte_distributor *d, + if (unlikely(d->alg_type == RTE_DIST_ALG_SINGLE)) { + if (return_count <= 1) { + pkts[0] = rte_distributor_get_pkt_single(d->d_single, +- worker_id, oldpkt[0]); ++ worker_id, return_count ? oldpkt[0] : NULL); + return (pkts[0]) ? 1 : 0; + } else + return -EINVAL; +@@ -166,25 +167,48 @@ rte_distributor_return_pkt(struct rte_distributor *d, + if (num == 1) + return rte_distributor_return_pkt_single(d->d_single, + worker_id, oldpkt[0]); ++ else if (num == 0) ++ return rte_distributor_return_pkt_single(d->d_single, ++ worker_id, NULL); + else + return -EINVAL; + } + ++ /* Spin while handshake bits are set (scheduler clears it). ++ * Sync with worker on GET_BUF flag. ++ */ ++ while (unlikely(__atomic_load_n(&(buf->retptr64[0]), __ATOMIC_RELAXED) ++ & (RTE_DISTRIB_GET_BUF | RTE_DISTRIB_RETURN_BUF))) { ++ rte_pause(); ++ uint64_t t = rte_rdtsc()+100; ++ ++ while (rte_rdtsc() < t) ++ rte_pause(); ++ } ++ + /* Sync with distributor to acquire retptrs */ + __atomic_thread_fence(__ATOMIC_ACQUIRE); + for (i = 0; i < RTE_DIST_BURST_SIZE; i++) + /* Switch off the return bit first */ +- buf->retptr64[i] &= ~RTE_DISTRIB_RETURN_BUF; ++ buf->retptr64[i] = 0; + + for (i = num; i-- > 0; ) + buf->retptr64[i] = (((int64_t)(uintptr_t)oldpkt[i]) << +- RTE_DISTRIB_FLAG_BITS) | RTE_DISTRIB_RETURN_BUF; ++ RTE_DISTRIB_FLAG_BITS) | RTE_DISTRIB_VALID_BUF; + +- /* set the GET_BUF but even if we got no returns. +- * Sync with distributor on GET_BUF flag. Release retptrs. ++ /* Use RETURN_BUF on bufptr64 to notify distributor that ++ * we won't read any mbufs from there even if GET_BUF is set. ++ * This allows distributor to retrieve in-flight already sent packets. ++ */ ++ __atomic_or_fetch(&(buf->bufptr64[0]), RTE_DISTRIB_RETURN_BUF, ++ __ATOMIC_ACQ_REL); ++ ++ /* set the RETURN_BUF on retptr64 even if we got no returns. ++ * Sync with distributor on RETURN_BUF flag. Release retptrs. ++ * Notify distributor that we don't request more packets any more. + */ + __atomic_store_n(&(buf->retptr64[0]), +- buf->retptr64[0] | RTE_DISTRIB_GET_BUF, __ATOMIC_RELEASE); ++ buf->retptr64[0] | RTE_DISTRIB_RETURN_BUF, __ATOMIC_RELEASE); + + return 0; + } +@@ -235,13 +259,13 @@ find_match_scalar(struct rte_distributor *d, + + for (j = 0; j < RTE_DIST_BURST_SIZE ; j++) + for (w = 0; w < RTE_DIST_BURST_SIZE; w++) +- if (d->in_flight_tags[i][j] == data_ptr[w]) { ++ if (d->in_flight_tags[i][w] == data_ptr[j]) { + output_ptr[j] = i+1; + break; + } + for (j = 0; j < RTE_DIST_BURST_SIZE; j++) + for (w = 0; w < RTE_DIST_BURST_SIZE; w++) +- if (bl->tags[j] == data_ptr[w]) { ++ if (bl->tags[w] == data_ptr[j]) { + output_ptr[j] = i+1; + break; + } +@@ -254,6 +278,59 @@ find_match_scalar(struct rte_distributor *d, + */ + } + ++/* ++ * When worker called rte_distributor_return_pkt() ++ * and passed RTE_DISTRIB_RETURN_BUF handshake through retptr64, ++ * distributor must retrieve both inflight and backlog packets assigned ++ * to the worker and reprocess them to another worker. ++ */ ++static void ++handle_worker_shutdown(struct rte_distributor *d, unsigned int wkr) ++{ ++ struct rte_distributor_buffer *buf = &(d->bufs[wkr]); ++ /* double BURST size for storing both inflights and backlog */ ++ struct rte_mbuf *pkts[RTE_DIST_BURST_SIZE * 2]; ++ unsigned int pkts_count = 0; ++ unsigned int i; ++ ++ /* If GET_BUF is cleared there are in-flight packets sent ++ * to worker which does not require new packets. ++ * They must be retrieved and assigned to another worker. ++ */ ++ if (!(__atomic_load_n(&(buf->bufptr64[0]), __ATOMIC_ACQUIRE) ++ & RTE_DISTRIB_GET_BUF)) ++ for (i = 0; i < RTE_DIST_BURST_SIZE; i++) ++ if (buf->bufptr64[i] & RTE_DISTRIB_VALID_BUF) ++ pkts[pkts_count++] = (void *)((uintptr_t) ++ (buf->bufptr64[i] ++ >> RTE_DISTRIB_FLAG_BITS)); ++ ++ /* Make following operations on handshake flags on bufptr64: ++ * - set GET_BUF to indicate that distributor can overwrite buffer ++ * with new packets if worker will make a new request. ++ * - clear RETURN_BUF to unlock reads on worker side. ++ */ ++ __atomic_store_n(&(buf->bufptr64[0]), RTE_DISTRIB_GET_BUF, ++ __ATOMIC_RELEASE); ++ ++ /* Collect backlog packets from worker */ ++ for (i = 0; i < d->backlog[wkr].count; i++) ++ pkts[pkts_count++] = (void *)((uintptr_t) ++ (d->backlog[wkr].pkts[i] >> RTE_DISTRIB_FLAG_BITS)); ++ ++ d->backlog[wkr].count = 0; ++ ++ /* Clear both inflight and backlog tags */ ++ for (i = 0; i < RTE_DIST_BURST_SIZE; i++) { ++ d->in_flight_tags[wkr][i] = 0; ++ d->backlog[wkr].tags[i] = 0; ++ } ++ ++ /* Recursive call */ ++ if (pkts_count > 0) ++ rte_distributor_process(d, pkts, pkts_count); ++} ++ + + /* + * When the handshake bits indicate that there are packets coming +@@ -272,19 +349,33 @@ handle_returns(struct rte_distributor *d, unsigned int wkr) + + /* Sync on GET_BUF flag. Acquire retptrs. */ + if (__atomic_load_n(&(buf->retptr64[0]), __ATOMIC_ACQUIRE) +- & RTE_DISTRIB_GET_BUF) { ++ & (RTE_DISTRIB_GET_BUF | RTE_DISTRIB_RETURN_BUF)) { + for (i = 0; i < RTE_DIST_BURST_SIZE; i++) { +- if (buf->retptr64[i] & RTE_DISTRIB_RETURN_BUF) { ++ if (buf->retptr64[i] & RTE_DISTRIB_VALID_BUF) { + oldbuf = ((uintptr_t)(buf->retptr64[i] >> + RTE_DISTRIB_FLAG_BITS)); + /* store returns in a circular buffer */ + store_return(oldbuf, d, &ret_start, &ret_count); + count++; +- buf->retptr64[i] &= ~RTE_DISTRIB_RETURN_BUF; ++ buf->retptr64[i] &= ~RTE_DISTRIB_VALID_BUF; + } + } + d->returns.start = ret_start; + d->returns.count = ret_count; ++ ++ /* If worker requested packets with GET_BUF, set it to active ++ * otherwise (RETURN_BUF), set it to not active. ++ */ ++ d->activesum -= d->active[wkr]; ++ d->active[wkr] = !!(buf->retptr64[0] & RTE_DISTRIB_GET_BUF); ++ d->activesum += d->active[wkr]; ++ ++ /* If worker returned packets without requesting new ones, ++ * handle all in-flights and backlog packets assigned to it. ++ */ ++ if (unlikely(buf->retptr64[0] & RTE_DISTRIB_RETURN_BUF)) ++ handle_worker_shutdown(d, wkr); ++ + /* Clear for the worker to populate with more returns. + * Sync with distributor on GET_BUF flag. Release retptrs. + */ +@@ -308,12 +399,18 @@ release(struct rte_distributor *d, unsigned int wkr) + struct rte_distributor_buffer *buf = &(d->bufs[wkr]); + unsigned int i; + ++ handle_returns(d, wkr); ++ if (unlikely(!d->active[wkr])) ++ return 0; ++ + /* Sync with worker on GET_BUF flag */ + while (!(__atomic_load_n(&(d->bufs[wkr].bufptr64[0]), __ATOMIC_ACQUIRE) +- & RTE_DISTRIB_GET_BUF)) ++ & RTE_DISTRIB_GET_BUF)) { ++ handle_returns(d, wkr); ++ if (unlikely(!d->active[wkr])) ++ return 0; + rte_pause(); +- +- handle_returns(d, wkr); ++ } + + buf->count = 0; + +@@ -351,7 +448,7 @@ rte_distributor_process(struct rte_distributor *d, + int64_t next_value = 0; + uint16_t new_tag = 0; + uint16_t flows[RTE_DIST_BURST_SIZE] __rte_cache_aligned; +- unsigned int i, j, w, wid; ++ unsigned int i, j, w, wid, matching_required; + + if (d->alg_type == RTE_DIST_ALG_SINGLE) { + /* Call the old API */ +@@ -359,12 +456,16 @@ rte_distributor_process(struct rte_distributor *d, + mbufs, num_mbufs); + } + ++ for (wid = 0 ; wid < d->num_workers; wid++) ++ handle_returns(d, wid); ++ + if (unlikely(num_mbufs == 0)) { + /* Flush out all non-full cache-lines to workers. */ + for (wid = 0 ; wid < d->num_workers; wid++) { + /* Sync with worker on GET_BUF flag. */ + if (__atomic_load_n(&(d->bufs[wid].bufptr64[0]), + __ATOMIC_ACQUIRE) & RTE_DISTRIB_GET_BUF) { ++ d->bufs[wid].count = 0; + release(d, wid); + handle_returns(d, wid); + } +@@ -372,15 +473,13 @@ rte_distributor_process(struct rte_distributor *d, + return 0; + } + ++ if (unlikely(!d->activesum)) ++ return 0; ++ + while (next_idx < num_mbufs) { + uint16_t matches[RTE_DIST_BURST_SIZE]; + unsigned int pkts; + +- /* Sync with worker on GET_BUF flag. */ +- if (__atomic_load_n(&(d->bufs[wkr].bufptr64[0]), +- __ATOMIC_ACQUIRE) & RTE_DISTRIB_GET_BUF) +- d->bufs[wkr].count = 0; +- + if ((num_mbufs - next_idx) < RTE_DIST_BURST_SIZE) + pkts = num_mbufs - next_idx; + else +@@ -396,22 +495,30 @@ rte_distributor_process(struct rte_distributor *d, + for (; i < RTE_DIST_BURST_SIZE; i++) + flows[i] = 0; + +- switch (d->dist_match_fn) { +- case RTE_DIST_MATCH_VECTOR: +- find_match_vec(d, &flows[0], &matches[0]); +- break; +- default: +- find_match_scalar(d, &flows[0], &matches[0]); +- } ++ matching_required = 1; + ++ for (j = 0; j < pkts; j++) { ++ if (unlikely(!d->activesum)) ++ return next_idx; ++ ++ if (unlikely(matching_required)) { ++ switch (d->dist_match_fn) { ++ case RTE_DIST_MATCH_VECTOR: ++ find_match_vec(d, &flows[0], ++ &matches[0]); ++ break; ++ default: ++ find_match_scalar(d, &flows[0], ++ &matches[0]); ++ } ++ matching_required = 0; ++ } + /* + * Matches array now contain the intended worker ID (+1) of + * the incoming packets. Any zeroes need to be assigned + * workers. + */ + +- for (j = 0; j < pkts; j++) { +- + next_mb = mbufs[next_idx++]; + next_value = (((int64_t)(uintptr_t)next_mb) << + RTE_DISTRIB_FLAG_BITS); +@@ -431,12 +538,18 @@ rte_distributor_process(struct rte_distributor *d, + */ + /* matches[j] = 0; */ + +- if (matches[j]) { ++ if (matches[j] && d->active[matches[j]-1]) { + struct rte_distributor_backlog *bl = + &d->backlog[matches[j]-1]; + if (unlikely(bl->count == + RTE_DIST_BURST_SIZE)) { + release(d, matches[j]-1); ++ if (!d->active[matches[j]-1]) { ++ j--; ++ next_idx--; ++ matching_required = 1; ++ continue; ++ } + } + + /* Add to worker that already has flow */ +@@ -446,11 +559,21 @@ rte_distributor_process(struct rte_distributor *d, + bl->pkts[idx] = next_value; + + } else { +- struct rte_distributor_backlog *bl = +- &d->backlog[wkr]; ++ struct rte_distributor_backlog *bl; ++ ++ while (unlikely(!d->active[wkr])) ++ wkr = (wkr + 1) % d->num_workers; ++ bl = &d->backlog[wkr]; ++ + if (unlikely(bl->count == + RTE_DIST_BURST_SIZE)) { + release(d, wkr); ++ if (!d->active[wkr]) { ++ j--; ++ next_idx--; ++ matching_required = 1; ++ continue; ++ } + } + + /* Add to current worker worker */ +@@ -469,17 +592,17 @@ rte_distributor_process(struct rte_distributor *d, + matches[w] = wkr+1; + } + } +- wkr++; +- if (wkr >= d->num_workers) +- wkr = 0; ++ wkr = (wkr + 1) % d->num_workers; + } + + /* Flush out all non-full cache-lines to workers. */ + for (wid = 0 ; wid < d->num_workers; wid++) + /* Sync with worker on GET_BUF flag. */ + if ((__atomic_load_n(&(d->bufs[wid].bufptr64[0]), +- __ATOMIC_ACQUIRE) & RTE_DISTRIB_GET_BUF)) ++ __ATOMIC_ACQUIRE) & RTE_DISTRIB_GET_BUF)) { ++ d->bufs[wid].count = 0; + release(d, wid); ++ } + + return num_mbufs; + } +@@ -522,7 +645,7 @@ total_outstanding(const struct rte_distributor *d) + unsigned int wkr, total_outstanding = 0; + + for (wkr = 0; wkr < d->num_workers; wkr++) +- total_outstanding += d->backlog[wkr].count; ++ total_outstanding += d->backlog[wkr].count + d->bufs[wkr].count; + + return total_outstanding; + } +@@ -579,6 +702,8 @@ rte_distributor_clear_returns(struct rte_distributor *d) + /* Sync with worker. Release retptrs. */ + __atomic_store_n(&(d->bufs[wkr].retptr64[0]), 0, + __ATOMIC_RELEASE); ++ ++ d->returns.start = d->returns.count = 0; + } + + /* creates a distributor instance */ +@@ -647,6 +772,9 @@ rte_distributor_create(const char *name, + for (i = 0 ; i < num_workers ; i++) + d->backlog[i].tags = &d->in_flight_tags[i][RTE_DIST_BURST_SIZE]; + ++ memset(d->active, 0, sizeof(d->active)); ++ d->activesum = 0; ++ + dist_burst_list = RTE_TAILQ_CAST(rte_dist_burst_tailq.head, + rte_dist_burst_list); + +diff --git a/dpdk/lib/librte_distributor/rte_distributor.h b/dpdk/lib/librte_distributor/rte_distributor.h +index 327c0c4ab2..a073e64612 100644 +--- a/dpdk/lib/librte_distributor/rte_distributor.h ++++ b/dpdk/lib/librte_distributor/rte_distributor.h +@@ -155,7 +155,7 @@ rte_distributor_clear_returns(struct rte_distributor *d); + * @param pkts + * The mbufs pointer array to be filled in (up to 8 packets) + * @param oldpkt +- * The previous packet, if any, being processed by the worker ++ * The previous packets, if any, being processed by the worker + * @param retcount + * The number of packets being returned + * +@@ -187,15 +187,15 @@ rte_distributor_return_pkt(struct rte_distributor *d, + + /** + * API called by a worker to request a new packet to process. +- * Any previous packet given to the worker is assumed to have completed ++ * Any previous packets given to the worker are assumed to have completed + * processing, and may be optionally returned to the distributor via + * the oldpkt parameter. +- * Unlike rte_distributor_get_pkt_burst(), this function does not wait for a +- * new packet to be provided by the distributor. ++ * Unlike rte_distributor_get_pkt(), this function does not wait for ++ * new packets to be provided by the distributor. + * +- * NOTE: after calling this function, rte_distributor_poll_pkt_burst() should +- * be used to poll for the packet requested. The rte_distributor_get_pkt_burst() +- * API should *not* be used to try and retrieve the new packet. ++ * NOTE: after calling this function, rte_distributor_poll_pkt() should ++ * be used to poll for the packets requested. The rte_distributor_get_pkt() ++ * API should *not* be used to try and retrieve the new packets. + * + * @param d + * The distributor instance to be used +@@ -213,9 +213,9 @@ rte_distributor_request_pkt(struct rte_distributor *d, + unsigned int count); + + /** +- * API called by a worker to check for a new packet that was previously ++ * API called by a worker to check for new packets that were previously + * requested by a call to rte_distributor_request_pkt(). It does not wait +- * for the new packet to be available, but returns NULL if the request has ++ * for the new packets to be available, but returns if the request has + * not yet been fulfilled by the distributor. + * + * @param d +@@ -227,8 +227,9 @@ rte_distributor_request_pkt(struct rte_distributor *d, + * The array of mbufs being given to the worker + * + * @return +- * The number of packets being given to the worker thread, zero if no +- * packet is yet available. ++ * The number of packets being given to the worker thread, ++ * -1 if no packets are yet available (burst API - RTE_DIST_ALG_BURST) ++ * 0 if no packets are yet available (legacy single API - RTE_DIST_ALG_SINGLE) + */ + int + rte_distributor_poll_pkt(struct rte_distributor *d, diff --git a/dpdk/lib/librte_distributor/rte_distributor_single.c b/dpdk/lib/librte_distributor/rte_distributor_single.c -index 91d8824c64..abaf7730c3 100644 +index 91d8824c64..f4725b1d0b 100644 --- a/dpdk/lib/librte_distributor/rte_distributor_single.c +++ b/dpdk/lib/librte_distributor/rte_distributor_single.c @@ -9,7 +9,6 @@ @@ -35763,10 +71077,85 @@ index 91d8824c64..abaf7730c3 100644 #include #include #include +@@ -75,6 +74,10 @@ rte_distributor_return_pkt_single(struct rte_distributor_single *d, + union rte_distributor_buffer_single *buf = &d->bufs[worker_id]; + uint64_t req = (((int64_t)(uintptr_t)oldpkt) << RTE_DISTRIB_FLAG_BITS) + | RTE_DISTRIB_RETURN_BUF; ++ while (unlikely(__atomic_load_n(&buf->bufptr64, __ATOMIC_RELAXED) ++ & RTE_DISTRIB_FLAGS_MASK)) ++ rte_pause(); ++ + /* Sync with distributor on RETURN_BUF flag. */ + __atomic_store_n(&(buf->bufptr64), req, __ATOMIC_RELEASE); + return 0; +diff --git a/dpdk/lib/librte_eal/common/arch/arm/rte_cycles.c b/dpdk/lib/librte_eal/common/arch/arm/rte_cycles.c +index 3500d523ef..5bd29b24b1 100644 +--- a/dpdk/lib/librte_eal/common/arch/arm/rte_cycles.c ++++ b/dpdk/lib/librte_eal/common/arch/arm/rte_cycles.c +@@ -3,14 +3,35 @@ + */ + + #include "eal_private.h" ++#include "rte_cycles.h" + + uint64_t + get_tsc_freq_arch(void) + { + #if defined RTE_ARCH_ARM64 && !defined RTE_ARM_EAL_RDTSC_USE_PMU +- uint64_t freq; +- asm volatile("mrs %0, cntfrq_el0" : "=r" (freq)); +- return freq; ++ return __rte_arm64_cntfrq(); ++#elif defined RTE_ARCH_ARM64 && defined RTE_ARM_EAL_RDTSC_USE_PMU ++#define CYC_PER_1MHZ 1E6 ++ /* Use the generic counter ticks to calculate the PMU ++ * cycle frequency. ++ */ ++ uint64_t ticks; ++ uint64_t start_ticks, cur_ticks; ++ uint64_t start_pmu_cycles, end_pmu_cycles; ++ ++ /* Number of ticks for 1/10 second */ ++ ticks = __rte_arm64_cntfrq() / 10; ++ ++ start_ticks = __rte_arm64_cntvct_precise(); ++ start_pmu_cycles = rte_rdtsc_precise(); ++ do { ++ cur_ticks = __rte_arm64_cntvct(); ++ } while ((cur_ticks - start_ticks) < ticks); ++ end_pmu_cycles = rte_rdtsc_precise(); ++ ++ /* Adjust the cycles to next 1Mhz */ ++ return RTE_ALIGN_MUL_CEIL(end_pmu_cycles - start_pmu_cycles, ++ CYC_PER_1MHZ) * 10; + #else + return 0; + #endif +diff --git a/dpdk/lib/librte_eal/common/eal_common_dev.c b/dpdk/lib/librte_eal/common/eal_common_dev.c +index 9e4f09d83e..363a2ca95e 100644 +--- a/dpdk/lib/librte_eal/common/eal_common_dev.c ++++ b/dpdk/lib/librte_eal/common/eal_common_dev.c +@@ -526,6 +526,7 @@ rte_dev_event_callback_unregister(const char *device_name, + */ + if (event_cb->active == 0) { + TAILQ_REMOVE(&dev_event_cbs, event_cb, next); ++ free(event_cb->dev_name); + free(event_cb); + ret++; + } else { diff --git a/dpdk/lib/librte_eal/common/eal_common_fbarray.c b/dpdk/lib/librte_eal/common/eal_common_fbarray.c -index 1312f936b8..4f8f1af73c 100644 +index 1312f936b8..de7e772042 100644 --- a/dpdk/lib/librte_eal/common/eal_common_fbarray.c +++ b/dpdk/lib/librte_eal/common/eal_common_fbarray.c +@@ -114,7 +114,7 @@ overlap(const struct mem_area *ma, const void *start, size_t len) + if (start >= ma_start && start < ma_end) + return 1; + /* end overlap? */ +- if (end >= ma_start && end < ma_end) ++ if (end > ma_start && end < ma_end) + return 1; + return 0; + } @@ -1337,7 +1337,7 @@ fbarray_find_biggest(struct rte_fbarray *arr, unsigned int start, bool used, */ @@ -35776,6 +71165,38 @@ index 1312f936b8..4f8f1af73c 100644 * read-locking the fbarray, so read lock here is OK. */ rte_rwlock_read_lock(&arr->rwlock); +diff --git a/dpdk/lib/librte_eal/common/eal_common_lcore.c b/dpdk/lib/librte_eal/common/eal_common_lcore.c +index 39efadef1a..3ecca3869b 100644 +--- a/dpdk/lib/librte_eal/common/eal_common_lcore.c ++++ b/dpdk/lib/librte_eal/common/eal_common_lcore.c +@@ -31,8 +31,12 @@ int rte_lcore_index(int lcore_id) + if (unlikely(lcore_id >= RTE_MAX_LCORE)) + return -1; + +- if (lcore_id < 0) ++ if (lcore_id < 0) { ++ if (rte_lcore_id() == LCORE_ID_ANY) ++ return -1; ++ + lcore_id = (int)rte_lcore_id(); ++ } + + return lcore_config[lcore_id].core_index; + } +@@ -42,8 +46,12 @@ int rte_lcore_to_cpu_id(int lcore_id) + if (unlikely(lcore_id >= RTE_MAX_LCORE)) + return -1; + +- if (lcore_id < 0) ++ if (lcore_id < 0) { ++ if (rte_lcore_id() == LCORE_ID_ANY) ++ return -1; ++ + lcore_id = (int)rte_lcore_id(); ++ } + + return lcore_config[lcore_id].core_id; + } diff --git a/dpdk/lib/librte_eal/common/eal_common_log.c b/dpdk/lib/librte_eal/common/eal_common_log.c index c0efd5214f..975aea90db 100644 --- a/dpdk/lib/librte_eal/common/eal_common_log.c @@ -35790,10 +71211,25 @@ index c0efd5214f..975aea90db 100644 } else { if (regexec(&opt_ll->re_match, name, 0, NULL, 0) == 0) diff --git a/dpdk/lib/librte_eal/common/eal_common_memory.c b/dpdk/lib/librte_eal/common/eal_common_memory.c -index 4a9cc1f19a..cc7d54e0c7 100644 +index 4a9cc1f19a..4c897a13f1 100644 --- a/dpdk/lib/librte_eal/common/eal_common_memory.c +++ b/dpdk/lib/librte_eal/common/eal_common_memory.c -@@ -97,7 +97,7 @@ eal_get_virtual_area(void *requested_addr, size_t *size, +@@ -40,6 +40,14 @@ + static void *next_baseaddr; + static uint64_t system_page_sz; + ++#ifdef RTE_EXEC_ENV_LINUX ++#define RTE_DONTDUMP MADV_DONTDUMP ++#elif defined RTE_EXEC_ENV_FREEBSD ++#define RTE_DONTDUMP MADV_NOCORE ++#else ++#error "madvise doesn't support this OS" ++#endif ++ + #define MAX_MMAP_WITH_DEFINED_ADDR_TRIES 5 + void * + eal_get_virtual_area(void *requested_addr, size_t *size, +@@ -97,7 +105,7 @@ eal_get_virtual_area(void *requested_addr, size_t *size, return NULL; } @@ -35802,6 +71238,20 @@ index 4a9cc1f19a..cc7d54e0c7 100644 mmap_flags, -1, 0); if (mapped_addr == MAP_FAILED && allow_shrink) *size -= page_sz; +@@ -179,6 +187,13 @@ eal_get_virtual_area(void *requested_addr, size_t *size, + munmap(aligned_end, after_len); + } + ++ if (!unmap) { ++ /* Exclude these pages from a core dump. */ ++ if (madvise(aligned_addr, *size, RTE_DONTDUMP) != 0) ++ RTE_LOG(DEBUG, EAL, "madvise failed: %s\n", ++ strerror(errno)); ++ } ++ + return aligned_addr; + } + diff --git a/dpdk/lib/librte_eal/common/eal_common_options.c b/dpdk/lib/librte_eal/common/eal_common_options.c index a7f9c5f9bd..f791e9671d 100644 --- a/dpdk/lib/librte_eal/common/eal_common_options.c @@ -35815,6 +71265,137 @@ index a7f9c5f9bd..f791e9671d 100644 goto fail; } if (rte_log_save_regexp(regex, priority) < 0) +diff --git a/dpdk/lib/librte_eal/common/eal_common_proc.c b/dpdk/lib/librte_eal/common/eal_common_proc.c +index 935e8fefeb..13ae2b915e 100644 +--- a/dpdk/lib/librte_eal/common/eal_common_proc.c ++++ b/dpdk/lib/librte_eal/common/eal_common_proc.c +@@ -416,7 +416,7 @@ process_async_request(struct pending_request *sr, const struct timespec *now) + /* did we timeout? */ + timeout = timespec_cmp(¶m->end, now) <= 0; + +- /* if we received a response, adjust relevant data and copy mesasge. */ ++ /* if we received a response, adjust relevant data and copy message. */ + if (sr->reply_received == 1 && sr->reply) { + struct rte_mp_msg *msg, *user_msgs, *tmp; + +@@ -621,7 +621,7 @@ rte_mp_channel_init(void) + + if (rte_ctrl_thread_create(&mp_handle_tid, "rte_mp_handle", + NULL, mp_handle, NULL) < 0) { +- RTE_LOG(ERR, EAL, "failed to create mp thead: %s\n", ++ RTE_LOG(ERR, EAL, "failed to create mp thread: %s\n", + strerror(errno)); + close(mp_fd); + close(dir_fd); +diff --git a/dpdk/lib/librte_eal/common/include/arch/arm/meson.build b/dpdk/lib/librte_eal/common/include/arch/arm/meson.build +index 77893fa359..faa3d84f67 100644 +--- a/dpdk/lib/librte_eal/common/include/arch/arm/meson.build ++++ b/dpdk/lib/librte_eal/common/include/arch/arm/meson.build +@@ -14,6 +14,7 @@ install_headers( + 'rte_cycles.h', + 'rte_io_64.h', + 'rte_io.h', ++ 'rte_mcslock.h', + 'rte_memcpy_32.h', + 'rte_memcpy_64.h', + 'rte_memcpy.h', +@@ -25,5 +26,6 @@ install_headers( + 'rte_prefetch.h', + 'rte_rwlock.h', + 'rte_spinlock.h', ++ 'rte_ticketlock.h', + 'rte_vect.h', + subdir: get_option('include_subdir_arch')) +diff --git a/dpdk/lib/librte_eal/common/include/arch/arm/rte_atomic_64.h b/dpdk/lib/librte_eal/common/include/arch/arm/rte_atomic_64.h +index 859ae129d8..3384934afa 100644 +--- a/dpdk/lib/librte_eal/common/include/arch/arm/rte_atomic_64.h ++++ b/dpdk/lib/librte_eal/common/include/arch/arm/rte_atomic_64.h +@@ -47,19 +47,26 @@ extern "C" { + /*------------------------ 128 bit atomic operations -------------------------*/ + + #if defined(__ARM_FEATURE_ATOMICS) || defined(RTE_ARM_FEATURE_ATOMICS) ++#if defined(RTE_CC_CLANG) ++#define __LSE_PREAMBLE ".arch armv8-a+lse\n" ++#else ++#define __LSE_PREAMBLE "" ++#endif ++ + #define __ATOMIC128_CAS_OP(cas_op_name, op_string) \ +-static __rte_noinline rte_int128_t \ +-cas_op_name(rte_int128_t *dst, rte_int128_t old, rte_int128_t updated) \ ++static __rte_noinline void \ ++cas_op_name(rte_int128_t *dst, rte_int128_t *old, rte_int128_t updated) \ + { \ + /* caspX instructions register pair must start from even-numbered + * register at operand 1. + * So, specify registers for local variables here. + */ \ +- register uint64_t x0 __asm("x0") = (uint64_t)old.val[0]; \ +- register uint64_t x1 __asm("x1") = (uint64_t)old.val[1]; \ ++ register uint64_t x0 __asm("x0") = (uint64_t)old->val[0]; \ ++ register uint64_t x1 __asm("x1") = (uint64_t)old->val[1]; \ + register uint64_t x2 __asm("x2") = (uint64_t)updated.val[0]; \ + register uint64_t x3 __asm("x3") = (uint64_t)updated.val[1]; \ + asm volatile( \ ++ __LSE_PREAMBLE \ + op_string " %[old0], %[old1], %[upd0], %[upd1], [%[dst]]" \ + : [old0] "+r" (x0), \ + [old1] "+r" (x1) \ +@@ -67,9 +74,8 @@ cas_op_name(rte_int128_t *dst, rte_int128_t old, rte_int128_t updated) \ + [upd1] "r" (x3), \ + [dst] "r" (dst) \ + : "memory"); \ +- old.val[0] = x0; \ +- old.val[1] = x1; \ +- return old; \ ++ old->val[0] = x0; \ ++ old->val[1] = x1; \ + } + + __ATOMIC128_CAS_OP(__cas_128_relaxed, "casp") +@@ -77,6 +83,7 @@ __ATOMIC128_CAS_OP(__cas_128_acquire, "caspa") + __ATOMIC128_CAS_OP(__cas_128_release, "caspl") + __ATOMIC128_CAS_OP(__cas_128_acq_rel, "caspal") + ++#undef __LSE_PREAMBLE + #undef __ATOMIC128_CAS_OP + + #endif +@@ -106,13 +113,14 @@ rte_atomic128_cmp_exchange(rte_int128_t *dst, rte_int128_t *exp, + + #if defined(__ARM_FEATURE_ATOMICS) || defined(RTE_ARM_FEATURE_ATOMICS) + if (success == __ATOMIC_RELAXED) +- old = __cas_128_relaxed(dst, expected, desired); ++ __cas_128_relaxed(dst, exp, desired); + else if (success == __ATOMIC_ACQUIRE) +- old = __cas_128_acquire(dst, expected, desired); ++ __cas_128_acquire(dst, exp, desired); + else if (success == __ATOMIC_RELEASE) +- old = __cas_128_release(dst, expected, desired); ++ __cas_128_release(dst, exp, desired); + else +- old = __cas_128_acq_rel(dst, expected, desired); ++ __cas_128_acq_rel(dst, exp, desired); ++ old = *exp; + #else + #define __HAS_ACQ(mo) ((mo) != __ATOMIC_RELAXED && (mo) != __ATOMIC_RELEASE) + #define __HAS_RLS(mo) ((mo) == __ATOMIC_RELEASE || (mo) == __ATOMIC_ACQ_REL || \ +@@ -176,12 +184,12 @@ rte_atomic128_cmp_exchange(rte_int128_t *dst, rte_int128_t *exp, + #undef __STORE_128 + + } while (unlikely(ret)); +-#endif + +- /* Unconditionally updating expected removes an 'if' statement. +- * expected should already be in register if not in the cache. ++ /* Unconditionally updating the value of exp removes an 'if' statement. ++ * The value of exp should already be in register if not in the cache. + */ + *exp = old; ++#endif + + return (old.int128 == expected.int128); + } diff --git a/dpdk/lib/librte_eal/common/include/arch/arm/rte_cycles_32.h b/dpdk/lib/librte_eal/common/include/arch/arm/rte_cycles_32.h index 859b09748c..f79718ce8c 100644 --- a/dpdk/lib/librte_eal/common/include/arch/arm/rte_cycles_32.h @@ -35829,10 +71410,86 @@ index 859b09748c..f79718ce8c 100644 static inline uint64_t __rte_rdtsc_pmccntr(void) diff --git a/dpdk/lib/librte_eal/common/include/arch/arm/rte_cycles_64.h b/dpdk/lib/librte_eal/common/include/arch/arm/rte_cycles_64.h -index 68e7c73384..da557b6a10 100644 +index 68e7c73384..e41f9dbd62 100644 --- a/dpdk/lib/librte_eal/common/include/arch/arm/rte_cycles_64.h +++ b/dpdk/lib/librte_eal/common/include/arch/arm/rte_cycles_64.h -@@ -62,7 +62,7 @@ rte_rdtsc(void) +@@ -1,5 +1,6 @@ + /* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2015 Cavium, Inc ++ * Copyright(c) 2020 Arm Limited + */ + + #ifndef _RTE_CYCLES_ARM64_H_ +@@ -11,6 +12,33 @@ extern "C" { + + #include "generic/rte_cycles.h" + ++/** Read generic counter frequency */ ++static __rte_always_inline uint64_t ++__rte_arm64_cntfrq(void) ++{ ++ uint64_t freq; ++ ++ asm volatile("mrs %0, cntfrq_el0" : "=r" (freq)); ++ return freq; ++} ++ ++/** Read generic counter */ ++static __rte_always_inline uint64_t ++__rte_arm64_cntvct(void) ++{ ++ uint64_t tsc; ++ ++ asm volatile("mrs %0, cntvct_el0" : "=r" (tsc)); ++ return tsc; ++} ++ ++static __rte_always_inline uint64_t ++__rte_arm64_cntvct_precise(void) ++{ ++ asm volatile("isb" : : : "memory"); ++ return __rte_arm64_cntvct(); ++} ++ + /** + * Read the time base register. + * +@@ -25,10 +53,7 @@ extern "C" { + static inline uint64_t + rte_rdtsc(void) + { +- uint64_t tsc; +- +- asm volatile("mrs %0, cntvct_el0" : "=r" (tsc)); +- return tsc; ++ return __rte_arm64_cntvct(); + } + #else + /** +@@ -49,20 +74,28 @@ rte_rdtsc(void) + * asm volatile("msr pmcr_el0, %0" : : "r" (val)); + * + */ +-static inline uint64_t +-rte_rdtsc(void) ++ ++/** Read PMU cycle counter */ ++static __rte_always_inline uint64_t ++__rte_arm64_pmccntr(void) + { + uint64_t tsc; + + asm volatile("mrs %0, pmccntr_el0" : "=r"(tsc)); + return tsc; + } ++ ++static inline uint64_t ++rte_rdtsc(void) ++{ ++ return __rte_arm64_pmccntr(); ++} + #endif + static inline uint64_t rte_rdtsc_precise(void) { @@ -35841,11 +71498,28 @@ index 68e7c73384..da557b6a10 100644 return rte_rdtsc(); } +diff --git a/dpdk/lib/librte_eal/common/include/arch/arm/rte_vect.h b/dpdk/lib/librte_eal/common/include/arch/arm/rte_vect.h +index 2a18a68546..71e56ec63e 100644 +--- a/dpdk/lib/librte_eal/common/include/arch/arm/rte_vect.h ++++ b/dpdk/lib/librte_eal/common/include/arch/arm/rte_vect.h +@@ -62,7 +62,11 @@ vaddvq_u16(uint16x8_t a) + + #endif + +-#if defined(RTE_TOOLCHAIN_GCC) && (GCC_VERSION < 70000) ++#if defined(RTE_ARCH_ARM) || \ ++(defined(RTE_ARCH_ARM64) && RTE_TOOLCHAIN_GCC && (GCC_VERSION < 70000)) ++/* NEON intrinsic vcopyq_laneq_u32() is not supported in ARMv7-A(AArch32) ++ * On AArch64, this intrinsic is supported since GCC version 7. ++ */ + static inline uint32x4_t + vcopyq_laneq_u32(uint32x4_t a, const int lane_a, + uint32x4_t b, const int lane_b) diff --git a/dpdk/lib/librte_eal/common/include/arch/ppc_64/meson.build b/dpdk/lib/librte_eal/common/include/arch/ppc_64/meson.build -index 00f9611768..7949c86258 100644 +index 00f9611768..5d3ebafa26 100644 --- a/dpdk/lib/librte_eal/common/include/arch/ppc_64/meson.build +++ b/dpdk/lib/librte_eal/common/include/arch/ppc_64/meson.build -@@ -2,6 +2,7 @@ +@@ -2,15 +2,18 @@ # Copyright(c) 2018 Luca Boccassi install_headers( @@ -35853,6 +71527,17 @@ index 00f9611768..7949c86258 100644 'rte_atomic.h', 'rte_byteorder.h', 'rte_cpuflags.h', + 'rte_cycles.h', + 'rte_io.h', ++ 'rte_mcslock.h', + 'rte_memcpy.h', + 'rte_pause.h', + 'rte_prefetch.h', + 'rte_rwlock.h', + 'rte_spinlock.h', ++ 'rte_ticketlock.h', + 'rte_vect.h', + subdir: get_option('include_subdir_arch')) diff --git a/dpdk/lib/librte_eal/common/include/arch/ppc_64/rte_altivec.h b/dpdk/lib/librte_eal/common/include/arch/ppc_64/rte_altivec.h new file mode 100644 index 0000000000..1551a94544 @@ -35935,6 +71620,24 @@ index 068c805b22..4caafd9d2b 100644 #include "generic/rte_vect.h" #ifdef __cplusplus +diff --git a/dpdk/lib/librte_eal/common/include/arch/x86/meson.build b/dpdk/lib/librte_eal/common/include/arch/x86/meson.build +index bc8ffea1ee..806cfad042 100644 +--- a/dpdk/lib/librte_eal/common/include/arch/x86/meson.build ++++ b/dpdk/lib/librte_eal/common/include/arch/x86/meson.build +@@ -11,11 +11,13 @@ install_headers( + 'rte_cpuflags.h', + 'rte_cycles.h', + 'rte_io.h', ++ 'rte_mcslock.h', + 'rte_memcpy.h', + 'rte_prefetch.h', + 'rte_pause.h', + 'rte_rtm.h', + 'rte_rwlock.h', + 'rte_spinlock.h', ++ 'rte_ticketlock.h', + 'rte_vect.h', + subdir: get_option('include_subdir_arch')) diff --git a/dpdk/lib/librte_eal/common/include/arch/x86/rte_atomic.h b/dpdk/lib/librte_eal/common/include/arch/x86/rte_atomic.h index 148398f50a..b9dcd30aba 100644 --- a/dpdk/lib/librte_eal/common/include/arch/x86/rte_atomic.h @@ -35949,7 +71652,7 @@ index 148398f50a..b9dcd30aba 100644 * Basic idea is to use lock prefixed add with some dummy memory location * as the destination. From their experiments 128B(2 cache lines) below diff --git a/dpdk/lib/librte_eal/common/include/arch/x86/rte_memcpy.h b/dpdk/lib/librte_eal/common/include/arch/x86/rte_memcpy.h -index ba44c4a328..9c67232df9 100644 +index ba44c4a328..d01832fa15 100644 --- a/dpdk/lib/librte_eal/common/include/arch/x86/rte_memcpy.h +++ b/dpdk/lib/librte_eal/common/include/arch/x86/rte_memcpy.h @@ -22,6 +22,11 @@ @@ -35964,6 +71667,15 @@ index ba44c4a328..9c67232df9 100644 /** * Copy bytes from one location to another. The locations must not overlap. * +@@ -40,7 +45,7 @@ extern "C" { + static __rte_always_inline void * + rte_memcpy(void *dst, const void *src, size_t n); + +-#ifdef RTE_MACHINE_CPUFLAG_AVX512F ++#if defined RTE_MACHINE_CPUFLAG_AVX512F && defined RTE_MEMCPY_AVX512 + + #define ALIGNMENT_MASK 0x3F + @@ -869,6 +874,10 @@ rte_memcpy(void *dst, const void *src, size_t n) return rte_memcpy_generic(dst, src, n); } @@ -35992,10 +71704,80 @@ index 38e8cfd32b..9ca960932f 100644 #else #error Unsupported endianness. #endif +diff --git a/dpdk/lib/librte_eal/common/include/generic/rte_mcslock.h b/dpdk/lib/librte_eal/common/include/generic/rte_mcslock.h +index 2bef28351c..896dfbf10f 100644 +--- a/dpdk/lib/librte_eal/common/include/generic/rte_mcslock.h ++++ b/dpdk/lib/librte_eal/common/include/generic/rte_mcslock.h +@@ -22,6 +22,7 @@ + #include + #include + #include ++#include + + /** + * The rte_mcslock_t type. +@@ -68,7 +69,14 @@ rte_mcslock_lock(rte_mcslock_t **msl, rte_mcslock_t *me) + */ + return; + } +- __atomic_store_n(&prev->next, me, __ATOMIC_RELAXED); ++ /* The store to me->next above should also complete before the node is ++ * visible to predecessor thread releasing the lock. Hence, the store ++ * prev->next also requires release semantics. Note that, for example, ++ * on ARM, the release semantics in the exchange operation is not ++ * strong as a release fence and is not sufficient to enforce the ++ * desired order here. ++ */ ++ __atomic_store_n(&prev->next, me, __ATOMIC_RELEASE); + + /* The while-load of me->locked should not move above the previous + * store to prev->next. Otherwise it will cause a deadlock. Need a +diff --git a/dpdk/lib/librte_eal/common/include/generic/rte_memcpy.h b/dpdk/lib/librte_eal/common/include/generic/rte_memcpy.h +index 701e550c31..e7f0f8eaa9 100644 +--- a/dpdk/lib/librte_eal/common/include/generic/rte_memcpy.h ++++ b/dpdk/lib/librte_eal/common/include/generic/rte_memcpy.h +@@ -95,6 +95,10 @@ rte_mov256(uint8_t *dst, const uint8_t *src); + * @note This is implemented as a macro, so it's address should not be taken + * and care is needed as parameter expressions may be evaluated multiple times. + * ++ * @note For x86 platforms to enable the AVX-512 memcpy implementation, set ++ * -DRTE_MEMCPY_AVX512 macro in CFLAGS, or define the RTE_MEMCPY_AVX512 macro ++ * explicitly in the source file before including the rte_memcpy header file. ++ * + * @param dst + * Pointer to the destination of the data. + * @param src diff --git a/dpdk/lib/librte_eal/common/include/rte_common.h b/dpdk/lib/librte_eal/common/include/rte_common.h -index 459d082d14..41e2778ec1 100644 +index 459d082d14..fe7539af26 100644 --- a/dpdk/lib/librte_eal/common/include/rte_common.h +++ b/dpdk/lib/librte_eal/common/include/rte_common.h +@@ -245,7 +245,7 @@ static void __attribute__((destructor(RTE_PRIO(prio)), used)) func(void) + * than the first parameter. + */ + #define RTE_ALIGN_MUL_CEIL(v, mul) \ +- (((v + (typeof(v))(mul) - 1) / ((typeof(v))(mul))) * (typeof(v))(mul)) ++ ((((v) + (typeof(v))(mul) - 1) / ((typeof(v))(mul))) * (typeof(v))(mul)) + + /** + * Macro to align a value to the multiple of given value. The resultant +@@ -253,7 +253,7 @@ static void __attribute__((destructor(RTE_PRIO(prio)), used)) func(void) + * than the first parameter. + */ + #define RTE_ALIGN_MUL_FLOOR(v, mul) \ +- ((v / ((typeof(v))(mul))) * (typeof(v))(mul)) ++ (((v) / ((typeof(v))(mul))) * (typeof(v))(mul)) + + /** + * Macro to align value to the nearest multiple of the given value. +@@ -264,7 +264,7 @@ static void __attribute__((destructor(RTE_PRIO(prio)), used)) func(void) + ({ \ + typeof(v) ceil = RTE_ALIGN_MUL_CEIL(v, mul); \ + typeof(v) floor = RTE_ALIGN_MUL_FLOOR(v, mul); \ +- (ceil - v) > (v - floor) ? floor : ceil; \ ++ (ceil - (v)) > ((v) - floor) ? floor : ceil; \ + }) + + /** @@ -347,7 +347,7 @@ typedef uint64_t rte_iova_t; * The combined value. */ @@ -36034,8 +71816,37 @@ index 459d082d14..41e2778ec1 100644 * @param v * The input parameter. * @return +diff --git a/dpdk/lib/librte_eal/common/include/rte_eal.h b/dpdk/lib/librte_eal/common/include/rte_eal.h +index 2f9ed298de..58f41d329f 100644 +--- a/dpdk/lib/librte_eal/common/include/rte_eal.h ++++ b/dpdk/lib/librte_eal/common/include/rte_eal.h +@@ -138,8 +138,9 @@ int rte_eal_init(int argc, char **argv); + * be made. It is expected that common usage of this function is to call it + * just before terminating the process. + * +- * @return 0 Successfully released all internal EAL resources +- * @return -EFAULT There was an error in releasing all resources. ++ * @return ++ * - 0 Successfully released all internal EAL resources. ++ * - -EFAULT There was an error in releasing all resources. + */ + int rte_eal_cleanup(void); + +diff --git a/dpdk/lib/librte_eal/common/include/rte_reciprocal.h b/dpdk/lib/librte_eal/common/include/rte_reciprocal.h +index 63e16fde0a..735adb029b 100644 +--- a/dpdk/lib/librte_eal/common/include/rte_reciprocal.h ++++ b/dpdk/lib/librte_eal/common/include/rte_reciprocal.h +@@ -27,6 +27,8 @@ + + #include + ++#include ++ + struct rte_reciprocal { + uint32_t m; + uint8_t sh1, sh2; diff --git a/dpdk/lib/librte_eal/common/include/rte_service.h b/dpdk/lib/librte_eal/common/include/rte_service.h -index d8701dd4cf..3a1c735c58 100644 +index d8701dd4cf..e2d0a6dd32 100644 --- a/dpdk/lib/librte_eal/common/include/rte_service.h +++ b/dpdk/lib/librte_eal/common/include/rte_service.h @@ -104,12 +104,16 @@ int32_t rte_service_probe_capability(uint32_t id, uint32_t capability); @@ -36057,11 +71868,34 @@ index d8701dd4cf..3a1c735c58 100644 * @param service_id the service to apply the lcore to * @param lcore The lcore that will be mapped to service * @param enable Zero to unmap or disable the core, non-zero to enable +@@ -300,6 +304,10 @@ int32_t rte_service_lcore_count(void); + * from duty, just unmaps all services / cores, and stops() the service cores. + * The runstate of services is not modified. + * ++ * The cores that are stopped with this call, are in FINISHED state and ++ * the application must take care of bringing them back to a launchable state: ++ * e.g. call *rte_eal_lcore_wait* on the lcore_id. ++ * + * @retval 0 Success + */ + int32_t rte_service_lcore_reset_all(void); diff --git a/dpdk/lib/librte_eal/common/include/rte_service_component.h b/dpdk/lib/librte_eal/common/include/rte_service_component.h -index 16eab79eea..b75aba11b9 100644 +index 16eab79eea..9e66ee7e29 100644 --- a/dpdk/lib/librte_eal/common/include/rte_service_component.h +++ b/dpdk/lib/librte_eal/common/include/rte_service_component.h -@@ -43,7 +43,7 @@ struct rte_service_spec { +@@ -9,6 +9,11 @@ + * Include this file if you are writing a component that requires CPU cycles to + * operate, and you wish to run the component using service cores + */ ++ ++#ifdef __cplusplus ++extern "C" { ++#endif ++ + #include + #include + +@@ -43,7 +48,7 @@ struct rte_service_spec { /** * Register a new service. * @@ -36070,7 +71904,7 @@ index 16eab79eea..b75aba11b9 100644 * achieve its purpose. * * For example the eventdev SW PMD requires CPU cycles to perform its -@@ -56,6 +56,10 @@ struct rte_service_spec { +@@ -56,6 +61,10 @@ struct rte_service_spec { * *rte_service_component_runstate_set*, which indicates that the service * component is ready to be executed. * @@ -36081,8 +71915,30 @@ index 16eab79eea..b75aba11b9 100644 * @param spec The specification of the service to register * @param[out] service_id A pointer to a uint32_t, which will be filled in * during registration of the service. It is set to the integers +@@ -126,4 +135,8 @@ int32_t rte_service_init(void); + */ + void rte_service_finalize(void); + ++#ifdef __cplusplus ++} ++#endif ++ + #endif /* _SERVICE_PRIVATE_H_ */ +diff --git a/dpdk/lib/librte_eal/common/include/rte_uuid.h b/dpdk/lib/librte_eal/common/include/rte_uuid.h +index 044afbdfab..8b42e070af 100644 +--- a/dpdk/lib/librte_eal/common/include/rte_uuid.h ++++ b/dpdk/lib/librte_eal/common/include/rte_uuid.h +@@ -15,6 +15,8 @@ extern "C" { + #endif + + #include ++#include ++#include + + /** + * Struct describing a Universal Unique Identifier diff --git a/dpdk/lib/librte_eal/common/malloc_elem.c b/dpdk/lib/librte_eal/common/malloc_elem.c -index 885d00424b..51cdfc5d59 100644 +index 885d00424b..a0f2d22774 100644 --- a/dpdk/lib/librte_eal/common/malloc_elem.c +++ b/dpdk/lib/librte_eal/common/malloc_elem.c @@ -171,7 +171,7 @@ malloc_elem_insert(struct malloc_elem *elem) @@ -36094,6 +71950,25 @@ index 885d00424b..51cdfc5d59 100644 uint64_t dist_from_start, dist_from_end; dist_from_end = RTE_PTR_DIFF(heap->last, elem); +@@ -382,14 +382,14 @@ malloc_elem_free_list_index(size_t size) + return 0; + + /* Find next power of 2 >= size. */ +- log2 = sizeof(size) * 8 - __builtin_clzl(size-1); ++ log2 = sizeof(size) * 8 - __builtin_clzl(size - 1); + + /* Compute freelist index, based on log2(size). */ + index = (log2 - MALLOC_MINSIZE_LOG2 + MALLOC_LOG2_INCREMENT - 1) / +- MALLOC_LOG2_INCREMENT; ++ MALLOC_LOG2_INCREMENT; + +- return index <= RTE_HEAP_NUM_FREELISTS-1? +- index: RTE_HEAP_NUM_FREELISTS-1; ++ return index <= RTE_HEAP_NUM_FREELISTS - 1 ? ++ index : RTE_HEAP_NUM_FREELISTS - 1; + } + + /* diff --git a/dpdk/lib/librte_eal/common/malloc_heap.c b/dpdk/lib/librte_eal/common/malloc_heap.c index 842eb9de75..bd5065698d 100644 --- a/dpdk/lib/librte_eal/common/malloc_heap.c @@ -36122,7 +71997,7 @@ index 57ec8fb2b3..b7a089ac4f 100644 RTE_INIT(rte_rand_init) diff --git a/dpdk/lib/librte_eal/common/rte_service.c b/dpdk/lib/librte_eal/common/rte_service.c -index 79235c03f8..d5dd32d8d9 100644 +index 79235c03f8..8fcccac85c 100644 --- a/dpdk/lib/librte_eal/common/rte_service.c +++ b/dpdk/lib/librte_eal/common/rte_service.c @@ -50,6 +50,10 @@ struct rte_service_spec_impl { @@ -36212,7 +72087,7 @@ index 79235c03f8..d5dd32d8d9 100644 return 0; } -@@ -383,7 +393,7 @@ rte_service_may_be_active(uint32_t id) +@@ -383,11 +393,11 @@ rte_service_may_be_active(uint32_t id) int32_t lcore_count = rte_service_lcore_list(ids, RTE_MAX_LCORE); int i; @@ -36221,6 +72096,11 @@ index 79235c03f8..d5dd32d8d9 100644 return -EINVAL; for (i = 0; i < lcore_count; i++) { +- if (lcore_states[i].service_active_on_lcore[id]) ++ if (lcore_states[ids[i]].service_active_on_lcore[id]) + return 1; + } + @@ -397,49 +407,39 @@ rte_service_may_be_active(uint32_t id) int32_t rte_service_run_iter_on_app_lcore(uint32_t id, uint32_t serialize_mt_unsafe) @@ -36284,7 +72164,16 @@ index 79235c03f8..d5dd32d8d9 100644 } cs->loops++; -@@ -693,9 +693,9 @@ rte_service_lcore_start(uint32_t lcore) +@@ -447,8 +447,6 @@ rte_service_runner_func(void *arg) + rte_smp_rmb(); + } + +- lcore_config[lcore].state = WAIT; +- + return 0; + } + +@@ -693,9 +691,9 @@ rte_service_lcore_start(uint32_t lcore) /* set core to run state first, and then launch otherwise it will * return immediately as runstate keeps it in the service poll loop */ @@ -36296,7 +72185,7 @@ index 79235c03f8..d5dd32d8d9 100644 /* returns -EBUSY if the core is already launched, 0 on success */ return ret; } -@@ -774,13 +774,9 @@ rte_service_lcore_attr_get(uint32_t lcore, uint32_t attr_id, +@@ -774,13 +772,9 @@ rte_service_lcore_attr_get(uint32_t lcore, uint32_t attr_id, } static void @@ -36311,7 +72200,7 @@ index 79235c03f8..d5dd32d8d9 100644 int calls = 1; if (s->calls != 0) calls = s->calls; -@@ -807,7 +803,7 @@ rte_service_attr_reset_all(uint32_t id) +@@ -807,7 +801,7 @@ rte_service_attr_reset_all(uint32_t id) SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL); int reset = 1; @@ -36320,7 +72209,7 @@ index 79235c03f8..d5dd32d8d9 100644 return 0; } -@@ -851,21 +847,13 @@ rte_service_dump(FILE *f, uint32_t id) +@@ -851,21 +845,13 @@ rte_service_dump(FILE *f, uint32_t id) uint32_t i; int print_one = (id != UINT32_MAX); @@ -36343,7 +72232,7 @@ index 79235c03f8..d5dd32d8d9 100644 return 0; } -@@ -875,7 +863,7 @@ rte_service_dump(FILE *f, uint32_t id) +@@ -875,7 +861,7 @@ rte_service_dump(FILE *f, uint32_t id) if (!service_valid(i)) continue; uint32_t reset = 0; @@ -36352,6 +72241,19 @@ index 79235c03f8..d5dd32d8d9 100644 } fprintf(f, "Service Cores Summary\n"); +diff --git a/dpdk/lib/librte_eal/freebsd/eal/eal.c b/dpdk/lib/librte_eal/freebsd/eal/eal.c +index 6ae37e7e69..2a995ee3f4 100644 +--- a/dpdk/lib/librte_eal/freebsd/eal/eal.c ++++ b/dpdk/lib/librte_eal/freebsd/eal/eal.c +@@ -949,7 +949,7 @@ rte_eal_init(int argc, char **argv) + * place, so no cleanup needed. + */ + if (!internal_config.no_shconf && eal_clean_runtime_dir() < 0) { +- rte_eal_init_alert("Cannot clear runtime directory\n"); ++ rte_eal_init_alert("Cannot clear runtime directory"); + return -1; + } + diff --git a/dpdk/lib/librte_eal/freebsd/eal/eal_interrupts.c b/dpdk/lib/librte_eal/freebsd/eal/eal_interrupts.c index f6831b7902..3fee762be9 100644 --- a/dpdk/lib/librte_eal/freebsd/eal/eal_interrupts.c @@ -36470,9 +72372,18 @@ index f6831b7902..3fee762be9 100644 TAILQ_REMOVE(&intr_sources, src, next); free(src); diff --git a/dpdk/lib/librte_eal/freebsd/eal/eal_memory.c b/dpdk/lib/librte_eal/freebsd/eal/eal_memory.c -index a97d8f0f0c..5bc2da160c 100644 +index a97d8f0f0c..fb1549fbe6 100644 --- a/dpdk/lib/librte_eal/freebsd/eal/eal_memory.c +++ b/dpdk/lib/librte_eal/freebsd/eal/eal_memory.c +@@ -193,7 +193,7 @@ rte_eal_hugepage_init(void) + if (msl_idx == RTE_MAX_MEMSEG_LISTS) { + RTE_LOG(ERR, EAL, "Could not find space for memseg. Please increase %s and/or %s in configuration.\n", + RTE_STR(CONFIG_RTE_MAX_MEMSEG_PER_TYPE), +- RTE_STR(CONFIG_RTE_MAX_MEM_PER_TYPE)); ++ RTE_STR(CONFIG_RTE_MAX_MEM_MB_PER_TYPE)); + return -1; + } + arr = &msl->memseg_arr; @@ -449,7 +449,7 @@ memseg_primary_init(void) * * we need (N*2)-1 segments because we cannot guarantee that @@ -36483,7 +72394,7 @@ index a97d8f0f0c..5bc2da160c 100644 */ avail_segs = (hpi->num_pages[0] * 2) - 1; diff --git a/dpdk/lib/librte_eal/linux/eal/eal.c b/dpdk/lib/librte_eal/linux/eal/eal.c -index c4233ec3c8..e6d4cc7178 100644 +index c4233ec3c8..8f1bcab390 100644 --- a/dpdk/lib/librte_eal/linux/eal/eal.c +++ b/dpdk/lib/librte_eal/linux/eal/eal.c @@ -25,6 +25,7 @@ @@ -36503,10 +72414,79 @@ index c4233ec3c8..e6d4cc7178 100644 #endif } else if (is_iommu_enabled()) { /* we have an IOMMU, pick IOVA as VA mode */ +@@ -1287,7 +1288,7 @@ rte_eal_init(int argc, char **argv) + * place, so no cleanup needed. + */ + if (!internal_config.no_shconf && eal_clean_runtime_dir() < 0) { +- rte_eal_init_alert("Cannot clear runtime directory\n"); ++ rte_eal_init_alert("Cannot clear runtime directory"); + return -1; + } + +diff --git a/dpdk/lib/librte_eal/linux/eal/eal_dev.c b/dpdk/lib/librte_eal/linux/eal/eal_dev.c +index 83c9cd6607..803979ff46 100644 +--- a/dpdk/lib/librte_eal/linux/eal/eal_dev.c ++++ b/dpdk/lib/librte_eal/linux/eal/eal_dev.c +@@ -189,7 +189,7 @@ dev_uev_parse(const char *buf, struct rte_dev_event *event, int length) + else if (!strncmp(subsystem, "vfio", 4)) + event->subsystem = EAL_DEV_EVENT_SUBSYSTEM_VFIO; + else +- return -1; ++ goto err; + + /* parse the action type */ + if (!strncmp(action, "add", 3)) +@@ -197,8 +197,11 @@ dev_uev_parse(const char *buf, struct rte_dev_event *event, int length) + else if (!strncmp(action, "remove", 6)) + event->type = RTE_DEV_EVENT_REMOVE; + else +- return -1; ++ goto err; + return 0; ++err: ++ free(event->devname); ++ return -1; + } + + static void +@@ -234,8 +237,7 @@ dev_uev_handler(__rte_unused void *param) + + ret = dev_uev_parse(buf, &uevent, EAL_UEV_MSG_LEN); + if (ret < 0) { +- RTE_LOG(DEBUG, EAL, "It is not an valid event " +- "that need to be handle.\n"); ++ RTE_LOG(DEBUG, EAL, "Ignoring uevent '%s'\n", buf); + return; + } + +@@ -277,12 +279,14 @@ dev_uev_handler(__rte_unused void *param) + rte_spinlock_unlock(&failure_handle_lock); + } + rte_dev_event_callback_process(uevent.devname, uevent.type); ++ free(uevent.devname); + } + + return; + + failure_handle_err: + rte_spinlock_unlock(&failure_handle_lock); ++ free(uevent.devname); + } + + int diff --git a/dpdk/lib/librte_eal/linux/eal/eal_interrupts.c b/dpdk/lib/librte_eal/linux/eal/eal_interrupts.c -index 1955324d30..14ebb108ce 100644 +index 1955324d30..e570c0684e 100644 --- a/dpdk/lib/librte_eal/linux/eal/eal_interrupts.c +++ b/dpdk/lib/librte_eal/linux/eal/eal_interrupts.c +@@ -984,7 +984,7 @@ eal_intr_process_interrupts(struct epoll_event *events, int nfds) + } + + /* notify the pipe fd waited by epoll_wait to rebuild the wait list */ +- if (rv >= 0 && write(intr_pipe.writefd, "1", 1) < 0) { ++ if (rv > 0 && write(intr_pipe.writefd, "1", 1) < 0) { + rte_spinlock_unlock(&intr_lock); + return -EPIPE; + } @@ -1045,8 +1045,6 @@ eal_intr_handle_interrupts(int pfd, unsigned totalfds) static __attribute__((noreturn)) void * eal_intr_thread_main(__rte_unused void *arg) @@ -36528,11 +72508,79 @@ index 1955324d30..14ebb108ce 100644 ev.events = EPOLLIN | EPOLLPRI | EPOLLRDHUP | EPOLLHUP; ev.data.fd = src->intr_handle.fd; +@@ -1204,7 +1205,7 @@ eal_epoll_process_event(struct epoll_event *evs, unsigned int n, + events[count].status = RTE_EPOLL_VALID; + events[count].fd = rev->fd; + events[count].epfd = rev->epfd; +- events[count].epdata.event = rev->epdata.event; ++ events[count].epdata.event = evs[i].events; + events[count].epdata.data = rev->epdata.data; + if (rev->epdata.cb_fun) + rev->epdata.cb_fun(rev->fd, diff --git a/dpdk/lib/librte_eal/linux/eal/eal_memalloc.c b/dpdk/lib/librte_eal/linux/eal/eal_memalloc.c -index af6d0d023a..678094acf9 100644 +index af6d0d023a..22f1ff68eb 100644 --- a/dpdk/lib/librte_eal/linux/eal/eal_memalloc.c +++ b/dpdk/lib/librte_eal/linux/eal/eal_memalloc.c -@@ -680,7 +680,7 @@ free_seg(struct rte_memseg *ms, struct hugepage_info *hi, +@@ -325,6 +325,21 @@ get_seg_fd(char *path, int buflen, struct hugepage_info *hi, + fd = fd_list[list_idx].fds[seg_idx]; + + if (fd < 0) { ++ /* A primary process is the only one creating these ++ * files. If there is a leftover that was not cleaned ++ * by clear_hugedir(), we must *now* make sure to drop ++ * the file or we will remap old stuff while the rest ++ * of the code is built on the assumption that a new ++ * page is clean. ++ */ ++ if (rte_eal_process_type() == RTE_PROC_PRIMARY && ++ unlink(path) == -1 && ++ errno != ENOENT) { ++ RTE_LOG(DEBUG, EAL, "%s(): could not remove '%s': %s\n", ++ __func__, path, strerror(errno)); ++ return -1; ++ } ++ + fd = open(path, O_CREAT | O_RDWR, 0600); + if (fd < 0) { + RTE_LOG(DEBUG, EAL, "%s(): open failed: %s\n", +@@ -599,17 +614,25 @@ alloc_seg(struct rte_memseg *ms, void *addr, int socket_id, + } + + #ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES +- ret = get_mempolicy(&cur_socket_id, NULL, 0, addr, +- MPOL_F_NODE | MPOL_F_ADDR); +- if (ret < 0) { +- RTE_LOG(DEBUG, EAL, "%s(): get_mempolicy: %s\n", +- __func__, strerror(errno)); +- goto mapped; +- } else if (cur_socket_id != socket_id) { +- RTE_LOG(DEBUG, EAL, +- "%s(): allocation happened on wrong socket (wanted %d, got %d)\n", +- __func__, socket_id, cur_socket_id); +- goto mapped; ++ /* ++ * If the kernel has been built without NUMA support, get_mempolicy() ++ * will return an error. If check_numa() returns false, memory ++ * allocation is not NUMA aware and the socket_id should not be ++ * checked. ++ */ ++ if (check_numa()) { ++ ret = get_mempolicy(&cur_socket_id, NULL, 0, addr, ++ MPOL_F_NODE | MPOL_F_ADDR); ++ if (ret < 0) { ++ RTE_LOG(DEBUG, EAL, "%s(): get_mempolicy: %s\n", ++ __func__, strerror(errno)); ++ goto mapped; ++ } else if (cur_socket_id != socket_id) { ++ RTE_LOG(DEBUG, EAL, ++ "%s(): allocation happened on wrong socket (wanted %d, got %d)\n", ++ __func__, socket_id, cur_socket_id); ++ goto mapped; ++ } + } + #else + if (rte_socket_count() > 1) +@@ -680,13 +703,16 @@ free_seg(struct rte_memseg *ms, struct hugepage_info *hi, /* erase page data */ memset(ms->addr, 0, ms->len); @@ -36541,10 +72589,28 @@ index af6d0d023a..678094acf9 100644 MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0) == MAP_FAILED) { RTE_LOG(DEBUG, EAL, "couldn't unmap page\n"); + return -1; + } + ++ if (madvise(ms->addr, ms->len, MADV_DONTDUMP) != 0) ++ RTE_LOG(DEBUG, EAL, "madvise failed: %s\n", strerror(errno)); ++ + exit_early = false; + + /* if we're using anonymous hugepages, nothing to be done */ diff --git a/dpdk/lib/librte_eal/linux/eal/eal_memory.c b/dpdk/lib/librte_eal/linux/eal/eal_memory.c -index 43e4ffc757..7a9c97ff88 100644 +index 43e4ffc757..c93d9bb2b8 100644 --- a/dpdk/lib/librte_eal/linux/eal/eal_memory.c +++ b/dpdk/lib/librte_eal/linux/eal/eal_memory.c +@@ -703,7 +703,7 @@ remap_segment(struct hugepage_file *hugepages, int seg_start, int seg_end) + if (msl_idx == RTE_MAX_MEMSEG_LISTS) { + RTE_LOG(ERR, EAL, "Could not find space for memseg. Please increase %s and/or %s in configuration.\n", + RTE_STR(CONFIG_RTE_MAX_MEMSEG_PER_TYPE), +- RTE_STR(CONFIG_RTE_MAX_MEM_PER_TYPE)); ++ RTE_STR(CONFIG_RTE_MAX_MEM_MB_PER_TYPE)); + return -1; + } + @@ -1340,6 +1340,8 @@ eal_legacy_hugepage_init(void) /* hugetlbfs can be disabled */ @@ -36649,19 +72715,55 @@ index 43e4ffc757..7a9c97ff88 100644 munmap(hp, size); if (fd_hugepage >= 0) diff --git a/dpdk/lib/librte_eal/linux/eal/eal_vfio.c b/dpdk/lib/librte_eal/linux/eal/eal_vfio.c -index 95f615c2e3..62ffe13e0e 100644 +index 95f615c2e3..1be02e7f13 100644 --- a/dpdk/lib/librte_eal/linux/eal/eal_vfio.c +++ b/dpdk/lib/librte_eal/linux/eal/eal_vfio.c -@@ -379,7 +379,7 @@ vfio_get_group_fd(struct vfio_config *vfio_cfg, +@@ -293,7 +293,7 @@ vfio_open_group_fd(int iommu_group_num) + strerror(errno)); + return -1; + } +- return 0; ++ return -ENOENT; + } + /* noiommu group found */ + } +@@ -318,12 +318,12 @@ vfio_open_group_fd(int iommu_group_num) + vfio_group_fd = mp_rep->fds[0]; + } else if (p->result == SOCKET_NO_FD) { + RTE_LOG(ERR, EAL, " bad VFIO group fd\n"); +- vfio_group_fd = 0; ++ vfio_group_fd = -ENOENT; + } } + free(mp_reply.msgs); +- if (vfio_group_fd < 0) ++ if (vfio_group_fd < 0 && vfio_group_fd != -ENOENT) + RTE_LOG(ERR, EAL, " cannot request group fd\n"); + return vfio_group_fd; + } +@@ -381,7 +381,7 @@ vfio_get_group_fd(struct vfio_config *vfio_cfg, vfio_group_fd = vfio_open_group_fd(iommu_group_num); -- if (vfio_group_fd < 0) { -+ if (vfio_group_fd <= 0) { + if (vfio_group_fd < 0) { RTE_LOG(ERR, EAL, "Failed to open group %d\n", iommu_group_num); - return -1; +- return -1; ++ return vfio_group_fd; } -@@ -532,6 +532,17 @@ vfio_mem_event_callback(enum rte_mem_event type, const void *addr, size_t len, + + cur_grp->group_num = iommu_group_num; +@@ -514,9 +514,11 @@ static void + vfio_mem_event_callback(enum rte_mem_event type, const void *addr, size_t len, + void *arg __rte_unused) + { ++ rte_iova_t iova_start, iova_expected; + struct rte_memseg_list *msl; + struct rte_memseg *ms; + size_t cur_len = 0; ++ uint64_t va_start; + + msl = rte_mem_virt2memseg_list(addr); + +@@ -532,25 +534,88 @@ vfio_mem_event_callback(enum rte_mem_event type, const void *addr, size_t len, return; } @@ -36678,11 +72780,72 @@ index 95f615c2e3..62ffe13e0e 100644 +#endif /* memsegs are contiguous in memory */ ms = rte_mem_virt2memseg(addr, msl); ++ ++ /* ++ * This memory is not guaranteed to be contiguous, but it still could ++ * be, or it could have some small contiguous chunks. Since the number ++ * of VFIO mappings is limited, and VFIO appears to not concatenate ++ * adjacent mappings, we have to do this ourselves. ++ * ++ * So, find contiguous chunks, then map them. ++ */ ++ va_start = ms->addr_64; ++ iova_start = iova_expected = ms->iova; while (cur_len < len) { -@@ -551,6 +562,17 @@ vfio_mem_event_callback(enum rte_mem_event type, const void *addr, size_t len, - cur_len += ms->len; - ++ms; - } ++ bool new_contig_area = ms->iova != iova_expected; ++ bool last_seg = (len - cur_len) == ms->len; ++ bool skip_last = false; ++ ++ /* only do mappings when current contiguous area ends */ ++ if (new_contig_area) { ++ if (type == RTE_MEM_EVENT_ALLOC) ++ vfio_dma_mem_map(default_vfio_cfg, va_start, ++ iova_start, ++ iova_expected - iova_start, 1); ++ else ++ vfio_dma_mem_map(default_vfio_cfg, va_start, ++ iova_start, ++ iova_expected - iova_start, 0); ++ va_start = ms->addr_64; ++ iova_start = ms->iova; ++ } + /* some memory segments may have invalid IOVA */ + if (ms->iova == RTE_BAD_IOVA) { + RTE_LOG(DEBUG, EAL, "Memory segment at %p has bad IOVA, skipping\n", + ms->addr); +- goto next; ++ skip_last = true; + } +- if (type == RTE_MEM_EVENT_ALLOC) +- vfio_dma_mem_map(default_vfio_cfg, ms->addr_64, +- ms->iova, ms->len, 1); +- else +- vfio_dma_mem_map(default_vfio_cfg, ms->addr_64, +- ms->iova, ms->len, 0); +-next: ++ iova_expected = ms->iova + ms->len; ++ cur_len += ms->len; ++ ++ms; ++ ++ /* ++ * don't count previous segment, and don't attempt to ++ * dereference a potentially invalid pointer. ++ */ ++ if (skip_last && !last_seg) { ++ iova_expected = iova_start = ms->iova; ++ va_start = ms->addr_64; ++ } else if (!skip_last && last_seg) { ++ /* this is the last segment and we're not skipping */ ++ if (type == RTE_MEM_EVENT_ALLOC) ++ vfio_dma_mem_map(default_vfio_cfg, va_start, ++ iova_start, ++ iova_expected - iova_start, 1); ++ else ++ vfio_dma_mem_map(default_vfio_cfg, va_start, ++ iova_start, ++ iova_expected - iova_start, 0); ++ } ++ } +#ifdef RTE_ARCH_PPC_64 + cur_len = 0; + ms = rte_mem_virt2memseg(addr, msl); @@ -36690,14 +72853,55 @@ index 95f615c2e3..62ffe13e0e 100644 + int idx = rte_fbarray_find_idx(&msl->memseg_arr, ms); + + rte_fbarray_set_used(&msl->memseg_arr, idx); -+ cur_len += ms->len; -+ ++ms; -+ } + cur_len += ms->len; + ++ms; + } +#endif } static int -@@ -1027,6 +1049,7 @@ vfio_get_default_container_fd(void) +@@ -663,11 +728,14 @@ rte_vfio_setup_device(const char *sysfs_base, const char *dev_addr, + + /* get the actual group fd */ + vfio_group_fd = rte_vfio_get_group_fd(iommu_group_num); +- if (vfio_group_fd < 0) ++ if (vfio_group_fd < 0 && vfio_group_fd != -ENOENT) + return -1; + +- /* if group_fd == 0, that means the device isn't managed by VFIO */ +- if (vfio_group_fd == 0) { ++ /* ++ * if vfio_group_fd == -ENOENT, that means the device ++ * isn't managed by VFIO ++ */ ++ if (vfio_group_fd == -ENOENT) { + RTE_LOG(WARNING, EAL, " %s not managed by VFIO driver, skipping\n", + dev_addr); + return 1; +@@ -864,9 +932,6 @@ int + rte_vfio_release_device(const char *sysfs_base, const char *dev_addr, + int vfio_dev_fd) + { +- struct vfio_group_status group_status = { +- .argsz = sizeof(group_status) +- }; + struct vfio_config *vfio_cfg; + int vfio_group_fd; + int iommu_group_num; +@@ -890,10 +955,10 @@ rte_vfio_release_device(const char *sysfs_base, const char *dev_addr, + + /* get the actual group fd */ + vfio_group_fd = rte_vfio_get_group_fd(iommu_group_num); +- if (vfio_group_fd <= 0) { ++ if (vfio_group_fd < 0) { + RTE_LOG(INFO, EAL, "rte_vfio_get_group_fd failed for %s\n", + dev_addr); +- ret = -1; ++ ret = vfio_group_fd; + goto out; + } + +@@ -1027,6 +1092,7 @@ vfio_get_default_container_fd(void) struct rte_mp_reply mp_reply = {0}; struct timespec ts = {.tv_sec = 5, .tv_nsec = 0}; struct vfio_mp_param *p = (struct vfio_mp_param *)mp_req.param; @@ -36705,7 +72909,7 @@ index 95f615c2e3..62ffe13e0e 100644 if (default_vfio_cfg->vfio_enabled) return default_vfio_cfg->vfio_container_fd; -@@ -1049,8 +1072,9 @@ vfio_get_default_container_fd(void) +@@ -1049,8 +1115,9 @@ vfio_get_default_container_fd(void) mp_rep = &mp_reply.msgs[0]; p = (struct vfio_mp_param *)mp_rep->param; if (p->result == SOCKET_OK && mp_rep->num_fds == 1) { @@ -36716,7 +72920,7 @@ index 95f615c2e3..62ffe13e0e 100644 } } -@@ -1416,16 +1440,11 @@ vfio_spapr_dma_do_map(int vfio_container_fd, uint64_t vaddr, uint64_t iova, +@@ -1416,16 +1483,11 @@ vfio_spapr_dma_do_map(int vfio_container_fd, uint64_t vaddr, uint64_t iova, return 0; } @@ -36734,7 +72938,7 @@ index 95f615c2e3..62ffe13e0e 100644 /* skip external memory that isn't a heap */ if (msl->external && !msl->heap) -@@ -1435,10 +1454,7 @@ vfio_spapr_map_walk(const struct rte_memseg_list *msl, +@@ -1435,10 +1497,7 @@ vfio_spapr_map_walk(const struct rte_memseg_list *msl, if (ms->iova == RTE_BAD_IOVA) return 0; @@ -36746,7 +72950,7 @@ index 95f615c2e3..62ffe13e0e 100644 ms->len, 1); } -@@ -1446,7 +1462,7 @@ static int +@@ -1446,7 +1505,7 @@ static int vfio_spapr_unmap_walk(const struct rte_memseg_list *msl, const struct rte_memseg *ms, void *arg) { @@ -36755,7 +72959,7 @@ index 95f615c2e3..62ffe13e0e 100644 /* skip external memory that isn't a heap */ if (msl->external && !msl->heap) -@@ -1456,17 +1472,13 @@ vfio_spapr_unmap_walk(const struct rte_memseg_list *msl, +@@ -1456,17 +1515,13 @@ vfio_spapr_unmap_walk(const struct rte_memseg_list *msl, if (ms->iova == RTE_BAD_IOVA) return 0; @@ -36774,7 +72978,7 @@ index 95f615c2e3..62ffe13e0e 100644 }; static int -@@ -1484,10 +1496,6 @@ vfio_spapr_window_size_walk(const struct rte_memseg_list *msl, +@@ -1484,10 +1539,6 @@ vfio_spapr_window_size_walk(const struct rte_memseg_list *msl, if (ms->iova == RTE_BAD_IOVA) return 0; @@ -36785,7 +72989,7 @@ index 95f615c2e3..62ffe13e0e 100644 if (max > param->window_size) { param->hugepage_sz = ms->hugepage_sz; param->window_size = max; -@@ -1531,20 +1539,11 @@ vfio_spapr_create_new_dma_window(int vfio_container_fd, +@@ -1531,20 +1582,11 @@ vfio_spapr_create_new_dma_window(int vfio_container_fd, /* try possible page_shift and levels for workaround */ uint32_t levels; @@ -36811,7 +73015,7 @@ index 95f615c2e3..62ffe13e0e 100644 } #endif if (ret) { -@@ -1585,7 +1584,6 @@ vfio_spapr_dma_mem_map(int vfio_container_fd, uint64_t vaddr, uint64_t iova, +@@ -1585,7 +1627,6 @@ vfio_spapr_dma_mem_map(int vfio_container_fd, uint64_t vaddr, uint64_t iova, /* check if window size needs to be adjusted */ memset(¶m, 0, sizeof(param)); @@ -36819,7 +73023,7 @@ index 95f615c2e3..62ffe13e0e 100644 /* we're inside a callback so use thread-unsafe version */ if (rte_memseg_walk_thread_unsafe(vfio_spapr_window_size_walk, -@@ -1610,14 +1608,9 @@ vfio_spapr_dma_mem_map(int vfio_container_fd, uint64_t vaddr, uint64_t iova, +@@ -1610,14 +1651,9 @@ vfio_spapr_dma_mem_map(int vfio_container_fd, uint64_t vaddr, uint64_t iova, if (do_map) { /* re-create window and remap the entire memory */ if (iova + len > create.window_size) { @@ -36835,7 +73039,7 @@ index 95f615c2e3..62ffe13e0e 100644 RTE_LOG(ERR, EAL, "Could not release DMA maps\n"); ret = -1; goto out; -@@ -1644,7 +1637,7 @@ vfio_spapr_dma_mem_map(int vfio_container_fd, uint64_t vaddr, uint64_t iova, +@@ -1644,7 +1680,7 @@ vfio_spapr_dma_mem_map(int vfio_container_fd, uint64_t vaddr, uint64_t iova, /* we're inside a callback, so use thread-unsafe version */ if (rte_memseg_walk_thread_unsafe(vfio_spapr_map_walk, @@ -36844,7 +73048,7 @@ index 95f615c2e3..62ffe13e0e 100644 RTE_LOG(ERR, EAL, "Could not recreate DMA maps\n"); ret = -1; goto out; -@@ -1691,7 +1684,6 @@ vfio_spapr_dma_map(int vfio_container_fd) +@@ -1691,7 +1727,6 @@ vfio_spapr_dma_map(int vfio_container_fd) struct spapr_walk_param param; memset(¶m, 0, sizeof(param)); @@ -36852,6 +73056,33 @@ index 95f615c2e3..62ffe13e0e 100644 /* create DMA window from 0 to max(phys_addr + len) */ rte_memseg_walk(vfio_spapr_window_size_walk, ¶m); +diff --git a/dpdk/lib/librte_eal/linux/eal/eal_vfio_mp_sync.c b/dpdk/lib/librte_eal/linux/eal/eal_vfio_mp_sync.c +index 5f2a5fc1d9..6254696ae5 100644 +--- a/dpdk/lib/librte_eal/linux/eal/eal_vfio_mp_sync.c ++++ b/dpdk/lib/librte_eal/linux/eal/eal_vfio_mp_sync.c +@@ -44,9 +44,9 @@ vfio_mp_primary(const struct rte_mp_msg *msg, const void *peer) + r->req = SOCKET_REQ_GROUP; + r->group_num = m->group_num; + fd = rte_vfio_get_group_fd(m->group_num); +- if (fd < 0) ++ if (fd < 0 && fd != -ENOENT) + r->result = SOCKET_ERR; +- else if (fd == 0) ++ else if (fd == -ENOENT) + /* if VFIO group exists but isn't bound to VFIO driver */ + r->result = SOCKET_NO_FD; + else { +diff --git a/dpdk/lib/librte_eal/rte_eal_exports.def b/dpdk/lib/librte_eal/rte_eal_exports.def +index 12a6c79d6a..c1bdee1c40 100644 +--- a/dpdk/lib/librte_eal/rte_eal_exports.def ++++ b/dpdk/lib/librte_eal/rte_eal_exports.def +@@ -1,6 +1,5 @@ + EXPORTS + __rte_panic +- rte_eal_get_configuration + rte_eal_init + rte_eal_mp_remote_launch + rte_eal_mp_wait_lcore diff --git a/dpdk/lib/librte_eal/windows/eal/include/sched.h b/dpdk/lib/librte_eal/windows/eal/include/sched.h index 257060594c..29868c93d1 100644 --- a/dpdk/lib/librte_eal/windows/eal/include/sched.h @@ -36885,6 +73116,18 @@ index 257060594c..29868c93d1 100644 (s)->_bits[_i] = 0LL; \ } while (0) +diff --git a/dpdk/lib/librte_efd/rte_efd.c b/dpdk/lib/librte_efd/rte_efd.c +index 4deeb17924..3fd1f1c97b 100644 +--- a/dpdk/lib/librte_efd/rte_efd.c ++++ b/dpdk/lib/librte_efd/rte_efd.c +@@ -707,6 +707,7 @@ rte_efd_create(const char *name, uint32_t max_num_rules, uint32_t key_len, + + error_unlock_exit: + rte_mcfg_tailq_write_unlock(); ++ rte_free(te); + rte_efd_free(table); + + return NULL; diff --git a/dpdk/lib/librte_ethdev/ethdev_profile.h b/dpdk/lib/librte_ethdev/ethdev_profile.h index 65031e6f3f..e5ee4df824 100644 --- a/dpdk/lib/librte_ethdev/ethdev_profile.h @@ -36903,11 +73146,61 @@ index 65031e6f3f..e5ee4df824 100644 +#endif /* RTE_ETHDEV_PROFILE_WITH_VTUNE */ + #endif +diff --git a/dpdk/lib/librte_ethdev/rte_eth_ctrl.h b/dpdk/lib/librte_ethdev/rte_eth_ctrl.h +index 1416c371fb..906cd4f458 100644 +--- a/dpdk/lib/librte_ethdev/rte_eth_ctrl.h ++++ b/dpdk/lib/librte_ethdev/rte_eth_ctrl.h +@@ -9,6 +9,7 @@ + #include + #include + #include "rte_flow.h" ++#include "rte_ethdev.h" + + /** + * @deprecated Please use rte_flow API instead of this legacy one. diff --git a/dpdk/lib/librte_ethdev/rte_ethdev.c b/dpdk/lib/librte_ethdev/rte_ethdev.c -index 6e9cb243ea..c3657509c5 100644 +index 6e9cb243ea..fb0912a4a8 100644 --- a/dpdk/lib/librte_ethdev/rte_ethdev.c +++ b/dpdk/lib/librte_ethdev/rte_ethdev.c -@@ -1166,14 +1166,14 @@ check_lro_pkt_size(uint16_t port_id, uint32_t config_size, +@@ -279,7 +279,7 @@ rte_eth_iterator_init(struct rte_dev_iterator *iter, const char *devargs_str) + + error: + if (ret == -ENOTSUP) +- RTE_LOG(ERR, EAL, "Bus %s does not support iterating.\n", ++ RTE_ETHDEV_LOG(ERR, "Bus %s does not support iterating.\n", + iter->bus->name); + free(devargs.args); + free(bus_str); +@@ -408,7 +408,9 @@ is_allocated(const struct rte_eth_dev *ethdev) + static struct rte_eth_dev * + _rte_eth_dev_allocated(const char *name) + { +- unsigned i; ++ uint16_t i; ++ ++ RTE_BUILD_BUG_ON(RTE_MAX_ETHPORTS >= UINT16_MAX); + + for (i = 0; i < RTE_MAX_ETHPORTS; i++) { + if (rte_eth_devices[i].data != NULL && +@@ -437,7 +439,7 @@ rte_eth_dev_allocated(const char *name) + static uint16_t + rte_eth_dev_find_free_port(void) + { +- unsigned i; ++ uint16_t i; + + for (i = 0; i < RTE_MAX_ETHPORTS; i++) { + /* Using shared name field to find a free port. */ +@@ -800,7 +802,7 @@ rte_eth_dev_get_name_by_port(uint16_t port_id, char *name) + int + rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id) + { +- uint32_t pid; ++ uint16_t pid; + + if (name == NULL) { + RTE_ETHDEV_LOG(ERR, "Null pointer is specified\n"); +@@ -1166,14 +1168,14 @@ check_lro_pkt_size(uint16_t port_id, uint32_t config_size, /* * Validate offloads that are requested through rte_eth_dev_configure against @@ -36924,7 +73217,7 @@ index 6e9cb243ea..c3657509c5 100644 * @param offload_type * The offload type i.e. Rx/Tx string. * @param offload_name -@@ -1202,7 +1202,7 @@ validate_offloads(uint16_t port_id, uint64_t req_offloads, +@@ -1202,7 +1204,7 @@ validate_offloads(uint16_t port_id, uint64_t req_offloads, ret = -EINVAL; } @@ -36933,7 +73226,78 @@ index 6e9cb243ea..c3657509c5 100644 if (offload & set_offloads) { RTE_ETHDEV_LOG(DEBUG, "Port %u %s offload %s is not requested but enabled\n", -@@ -2968,6 +2968,7 @@ rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info) +@@ -1222,8 +1224,10 @@ rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q, + struct rte_eth_dev *dev; + struct rte_eth_dev_info dev_info; + struct rte_eth_conf orig_conf; ++ uint16_t overhead_len; + int diag; + int ret; ++ uint16_t old_mtu; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL); + +@@ -1249,10 +1253,20 @@ rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q, + memcpy(&dev->data->dev_conf, dev_conf, + sizeof(dev->data->dev_conf)); + ++ /* Backup mtu for rollback */ ++ old_mtu = dev->data->mtu; ++ + ret = rte_eth_dev_info_get(port_id, &dev_info); + if (ret != 0) + goto rollback; + ++ /* Get the real Ethernet overhead length */ ++ if (dev_info.max_mtu != UINT16_MAX && ++ dev_info.max_rx_pktlen > dev_info.max_mtu) ++ overhead_len = dev_info.max_rx_pktlen - dev_info.max_mtu; ++ else ++ overhead_len = RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN; ++ + /* If number of queues specified by application for both Rx and Tx is + * zero, use driver preferred values. This cannot be done individually + * as it is valid for either Tx or Rx (but not both) to be zero. +@@ -1339,12 +1353,17 @@ rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q, + ret = -EINVAL; + goto rollback; + } ++ ++ /* Scale the MTU size to adapt max_rx_pkt_len */ ++ dev->data->mtu = dev->data->dev_conf.rxmode.max_rx_pkt_len - ++ overhead_len; + } else { +- if (dev_conf->rxmode.max_rx_pkt_len < RTE_ETHER_MIN_LEN || +- dev_conf->rxmode.max_rx_pkt_len > RTE_ETHER_MAX_LEN) ++ uint16_t pktlen = dev_conf->rxmode.max_rx_pkt_len; ++ if (pktlen < RTE_ETHER_MIN_MTU + overhead_len || ++ pktlen > RTE_ETHER_MTU + overhead_len) + /* Use default value */ + dev->data->dev_conf.rxmode.max_rx_pkt_len = +- RTE_ETHER_MAX_LEN; ++ RTE_ETHER_MTU + overhead_len; + } + + /* +@@ -1478,6 +1497,8 @@ rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q, + rte_eth_dev_tx_queue_config(dev, 0); + rollback: + memcpy(&dev->data->dev_conf, &orig_conf, sizeof(dev->data->dev_conf)); ++ if (old_mtu != dev->data->mtu) ++ dev->data->mtu = old_mtu; + + return ret; + } +@@ -1814,7 +1835,7 @@ rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id, + } + mbp_buf_size = rte_pktmbuf_data_room_size(mp); + +- if ((mbp_buf_size - RTE_PKTMBUF_HEADROOM) < dev_info.min_rx_bufsize) { ++ if (mbp_buf_size < dev_info.min_rx_bufsize + RTE_PKTMBUF_HEADROOM) { + RTE_ETHDEV_LOG(ERR, + "%s mbuf_data_room_size %d < %d (RTE_PKTMBUF_HEADROOM=%d + min_rx_bufsize(dev)=%d)\n", + mp->name, (int)mbp_buf_size, +@@ -2968,6 +2989,7 @@ rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info) * return status and does not know if get is successful or not. */ memset(dev_info, 0, sizeof(struct rte_eth_dev_info)); @@ -36941,12 +73305,19 @@ index 6e9cb243ea..c3657509c5 100644 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); dev = &rte_eth_devices[port_id]; -@@ -3253,53 +3254,53 @@ rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask) +@@ -3248,58 +3270,60 @@ rte_eth_dev_set_vlan_ether_type(uint16_t port_id, + int + rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask) + { ++ struct rte_eth_dev_info dev_info; + struct rte_eth_dev *dev; + int ret = 0; int mask = 0; int cur, org = 0; uint64_t orig_offloads; - uint64_t *dev_offloads; + uint64_t dev_offloads; ++ uint64_t new_offloads; RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); dev = &rte_eth_devices[port_id]; @@ -37010,9 +73381,26 @@ index 6e9cb243ea..c3657509c5 100644 mask |= ETH_QINQ_STRIP_MASK; } -@@ -3308,10 +3309,11 @@ rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask) +@@ -3307,11 +3331,28 @@ rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask) + if (mask == 0) return ret; ++ ret = rte_eth_dev_info_get(port_id, &dev_info); ++ if (ret != 0) ++ return ret; ++ ++ /* Rx VLAN offloading must be within its device capabilities */ ++ if ((dev_offloads & dev_info.rx_offload_capa) != dev_offloads) { ++ new_offloads = dev_offloads & ~orig_offloads; ++ RTE_ETHDEV_LOG(ERR, ++ "Ethdev port_id=%u requested new added VLAN offloads " ++ "0x%" PRIx64 " must be within Rx offloads capabilities " ++ "0x%" PRIx64 " in %s()\n", ++ port_id, new_offloads, dev_info.rx_offload_capa, ++ __func__); ++ return -EINVAL; ++ } ++ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_offload_set, -ENOTSUP); + dev->data->dev_conf.rxmode.offloads = dev_offloads; ret = (*dev->dev_ops->vlan_offload_set)(dev, mask); @@ -37023,7 +73411,34 @@ index 6e9cb243ea..c3657509c5 100644 } return eth_err(port_id, ret); -@@ -4039,7 +4041,7 @@ rte_eth_dev_callback_unregister(uint16_t port_id, +@@ -3934,7 +3975,7 @@ rte_eth_mirror_rule_reset(uint16_t port_id, uint8_t rule_id) + + RTE_INIT(eth_dev_init_cb_lists) + { +- int i; ++ uint16_t i; + + for (i = 0; i < RTE_MAX_ETHPORTS; i++) + TAILQ_INIT(&rte_eth_devices[i].link_intr_cbs); +@@ -3947,7 +3988,7 @@ rte_eth_dev_callback_register(uint16_t port_id, + { + struct rte_eth_dev *dev; + struct rte_eth_dev_callback *user_cb; +- uint32_t next_port; /* size is 32-bit to prevent loop wrap-around */ ++ uint16_t next_port; + uint16_t last_port; + + if (!cb_fn) +@@ -4010,7 +4051,7 @@ rte_eth_dev_callback_unregister(uint16_t port_id, + int ret; + struct rte_eth_dev *dev; + struct rte_eth_dev_callback *cb, *next; +- uint32_t next_port; /* size is 32-bit to prevent loop wrap-around */ ++ uint16_t next_port; + uint16_t last_port; + + if (!cb_fn) +@@ -4039,7 +4080,7 @@ rte_eth_dev_callback_unregister(uint16_t port_id, next = TAILQ_NEXT(cb, next); if (cb->cb_fn != cb_fn || cb->event != event || @@ -37032,7 +73447,70 @@ index 6e9cb243ea..c3657509c5 100644 continue; /* -@@ -4452,7 +4454,7 @@ rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id, +@@ -4219,7 +4260,8 @@ rte_eth_dev_create(struct rte_device *device, const char *name, + device->numa_node); + + if (!ethdev->data->dev_private) { +- RTE_LOG(ERR, EAL, "failed to allocate private data"); ++ RTE_ETHDEV_LOG(ERR, ++ "failed to allocate private data\n"); + retval = -ENOMEM; + goto probe_failed; + } +@@ -4227,8 +4269,8 @@ rte_eth_dev_create(struct rte_device *device, const char *name, + } else { + ethdev = rte_eth_dev_attach_secondary(name); + if (!ethdev) { +- RTE_LOG(ERR, EAL, "secondary process attach failed, " +- "ethdev doesn't exist"); ++ RTE_ETHDEV_LOG(ERR, ++ "secondary process attach failed, ethdev doesn't exist\n"); + return -ENODEV; + } + } +@@ -4238,15 +4280,15 @@ rte_eth_dev_create(struct rte_device *device, const char *name, + if (ethdev_bus_specific_init) { + retval = ethdev_bus_specific_init(ethdev, bus_init_params); + if (retval) { +- RTE_LOG(ERR, EAL, +- "ethdev bus specific initialisation failed"); ++ RTE_ETHDEV_LOG(ERR, ++ "ethdev bus specific initialisation failed\n"); + goto probe_failed; + } + } + + retval = ethdev_init(ethdev, init_params); + if (retval) { +- RTE_LOG(ERR, EAL, "ethdev initialisation failed"); ++ RTE_ETHDEV_LOG(ERR, "ethdev initialisation failed\n"); + goto probe_failed; + } + +@@ -4414,12 +4456,20 @@ rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id, + rte_eth_devices[port_id].post_rx_burst_cbs[queue_id]; + + if (!tail) { +- rte_eth_devices[port_id].post_rx_burst_cbs[queue_id] = cb; ++ /* Stores to cb->fn and cb->param should complete before ++ * cb is visible to data plane. ++ */ ++ __atomic_store_n( ++ &rte_eth_devices[port_id].post_rx_burst_cbs[queue_id], ++ cb, __ATOMIC_RELEASE); + + } else { + while (tail->next) + tail = tail->next; +- tail->next = cb; ++ /* Stores to cb->fn and cb->param should complete before ++ * cb is visible to data plane. ++ */ ++ __atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE); + } + rte_spinlock_unlock(&rte_eth_rx_cb_lock); + +@@ -4452,7 +4502,7 @@ rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id, cb->param = user_param; rte_spinlock_lock(&rte_eth_rx_cb_lock); @@ -37041,7 +73519,53 @@ index 6e9cb243ea..c3657509c5 100644 cb->next = rte_eth_devices[port_id].post_rx_burst_cbs[queue_id]; rte_smp_wmb(); rte_eth_devices[port_id].post_rx_burst_cbs[queue_id] = cb; -@@ -5064,8 +5066,7 @@ rte_eth_switch_domain_alloc(uint16_t *domain_id) +@@ -4500,12 +4550,20 @@ rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id, + rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id]; + + if (!tail) { +- rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id] = cb; ++ /* Stores to cb->fn and cb->param should complete before ++ * cb is visible to data plane. ++ */ ++ __atomic_store_n( ++ &rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id], ++ cb, __ATOMIC_RELEASE); + + } else { + while (tail->next) + tail = tail->next; +- tail->next = cb; ++ /* Stores to cb->fn and cb->param should complete before ++ * cb is visible to data plane. ++ */ ++ __atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE); + } + rte_spinlock_unlock(&rte_eth_tx_cb_lock); + +@@ -4536,7 +4594,7 @@ rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id, + cb = *prev_cb; + if (cb == user_cb) { + /* Remove the user cb from the callback list. */ +- *prev_cb = cb->next; ++ __atomic_store_n(prev_cb, cb->next, __ATOMIC_RELAXED); + ret = 0; + break; + } +@@ -4570,7 +4628,7 @@ rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id, + cb = *prev_cb; + if (cb == user_cb) { + /* Remove the user cb from the callback list. */ +- *prev_cb = cb->next; ++ __atomic_store_n(prev_cb, cb->next, __ATOMIC_RELAXED); + ret = 0; + break; + } +@@ -5060,12 +5118,11 @@ static struct rte_eth_dev_switch { + int + rte_eth_switch_domain_alloc(uint16_t *domain_id) + { +- unsigned int i; ++ uint16_t i; *domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID; @@ -37052,10 +73576,37 @@ index 6e9cb243ea..c3657509c5 100644 RTE_ETH_SWITCH_DOMAIN_UNUSED) { rte_eth_switch_domains[i].state = diff --git a/dpdk/lib/librte_ethdev/rte_ethdev.h b/dpdk/lib/librte_ethdev/rte_ethdev.h -index 18a9defc24..d1a593ad11 100644 +index 18a9defc24..017459e595 100644 --- a/dpdk/lib/librte_ethdev/rte_ethdev.h +++ b/dpdk/lib/librte_ethdev/rte_ethdev.h -@@ -1196,7 +1196,7 @@ struct rte_eth_dev_portconf { +@@ -1160,17 +1160,20 @@ struct rte_eth_conf { + #define DEV_TX_OFFLOAD_IP_TNL_TSO 0x00080000 + /** Device supports outer UDP checksum */ + #define DEV_TX_OFFLOAD_OUTER_UDP_CKSUM 0x00100000 +- +-#define RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP 0x00000001 +-/**< Device supports Rx queue setup after device started*/ +-#define RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP 0x00000002 +-/**< Device supports Tx queue setup after device started*/ +- + /* + * If new Tx offload capabilities are defined, they also must be + * mentioned in rte_tx_offload_names in rte_ethdev.c file. + */ + ++/**@{@name Device capabilities ++ * Non-offload capabilities reported in rte_eth_dev_info.dev_capa. ++ */ ++/** Device supports Rx queue setup after device started. */ ++#define RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP 0x00000001 ++/** Device supports Tx queue setup after device started. */ ++#define RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP 0x00000002 ++/**@}*/ ++ + /* + * Fallback default preferred Rx/Tx port parameters. + * These are used if an application requests default parameters +@@ -1196,7 +1199,7 @@ struct rte_eth_dev_portconf { * Default values for switch domain id when ethdev does not support switch * domain definitions. */ @@ -37064,11 +73615,136 @@ index 18a9defc24..d1a593ad11 100644 /** * Ethernet device associated switch information +@@ -3612,7 +3615,8 @@ struct rte_eth_rxtx_callback; + * The callback function + * @param user_param + * A generic pointer parameter which will be passed to each invocation of the +- * callback function on this port and queue. ++ * callback function on this port and queue. Inter-thread synchronization ++ * of any user data changes is the responsibility of the user. + * + * @return + * NULL on error. +@@ -3641,7 +3645,8 @@ rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id, + * The callback function + * @param user_param + * A generic pointer parameter which will be passed to each invocation of the +- * callback function on this port and queue. ++ * callback function on this port and queue. Inter-thread synchronization ++ * of any user data changes is the responsibility of the user. + * + * @return + * NULL on error. +@@ -3669,7 +3674,8 @@ rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id, + * The callback function + * @param user_param + * A generic pointer parameter which will be passed to each invocation of the +- * callback function on this port and queue. ++ * callback function on this port and queue. Inter-thread synchronization ++ * of any user data changes is the responsibility of the user. + * + * @return + * NULL on error. +@@ -3694,7 +3700,9 @@ rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id, + * on that queue. + * + * - After a short delay - where the delay is sufficient to allow any +- * in-flight callbacks to complete. ++ * in-flight callbacks to complete. Alternately, the RCU mechanism can be ++ * used to detect when data plane threads have ceased referencing the ++ * callback memory. + * + * @param port_id + * The port identifier of the Ethernet device. +@@ -3727,7 +3735,9 @@ int rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id, + * on that queue. + * + * - After a short delay - where the delay is sufficient to allow any +- * in-flight callbacks to complete. ++ * in-flight callbacks to complete. Alternately, the RCU mechanism can be ++ * used to detect when data plane threads have ceased referencing the ++ * callback memory. + * + * @param port_id + * The port identifier of the Ethernet device. +@@ -4388,10 +4398,18 @@ rte_eth_rx_burst(uint16_t port_id, uint16_t queue_id, + rx_pkts, nb_pkts); + + #ifdef RTE_ETHDEV_RXTX_CALLBACKS +- if (unlikely(dev->post_rx_burst_cbs[queue_id] != NULL)) { +- struct rte_eth_rxtx_callback *cb = +- dev->post_rx_burst_cbs[queue_id]; ++ struct rte_eth_rxtx_callback *cb; ++ ++ /* __ATOMIC_RELEASE memory order was used when the ++ * call back was inserted into the list. ++ * Since there is a clear dependency between loading ++ * cb and cb->fn/cb->next, __ATOMIC_ACQUIRE memory order is ++ * not required. ++ */ ++ cb = __atomic_load_n(&dev->post_rx_burst_cbs[queue_id], ++ __ATOMIC_RELAXED); + ++ if (unlikely(cb != NULL)) { + do { + nb_rx = cb->fn.rx(port_id, queue_id, rx_pkts, nb_rx, + nb_pkts, cb->param); +@@ -4652,7 +4670,16 @@ rte_eth_tx_burst(uint16_t port_id, uint16_t queue_id, + #endif + + #ifdef RTE_ETHDEV_RXTX_CALLBACKS +- struct rte_eth_rxtx_callback *cb = dev->pre_tx_burst_cbs[queue_id]; ++ struct rte_eth_rxtx_callback *cb; ++ ++ /* __ATOMIC_RELEASE memory order was used when the ++ * call back was inserted into the list. ++ * Since there is a clear dependency between loading ++ * cb and cb->fn/cb->next, __ATOMIC_ACQUIRE memory order is ++ * not required. ++ */ ++ cb = __atomic_load_n(&dev->pre_tx_burst_cbs[queue_id], ++ __ATOMIC_RELAXED); + + if (unlikely(cb != NULL)) { + do { diff --git a/dpdk/lib/librte_ethdev/rte_ethdev_pci.h b/dpdk/lib/librte_ethdev/rte_ethdev_pci.h -index ccdbb46ec0..cca94ec864 100644 +index ccdbb46ec0..d44a8e2a39 100644 --- a/dpdk/lib/librte_ethdev/rte_ethdev_pci.h +++ b/dpdk/lib/librte_ethdev/rte_ethdev_pci.h -@@ -42,6 +42,8 @@ +@@ -3,32 +3,6 @@ + * + * Copyright(c) 2017 Brocade Communications Systems, Inc. + * Author: Jan Blunck +- * +- * Redistribution and use in source and binary forms, with or without +- * modification, are permitted provided that the following conditions +- * are met: +- * +- * * Redistributions of source code must retain the above copyright +- * notice, this list of conditions and the following disclaimer. +- * * Redistributions in binary form must reproduce the above copyright +- * notice, this list of conditions and the following disclaimer in +- * the documentation and/or other materials provided with the +- * distribution. +- * * Neither the name of the copyright holder nor the names of its +- * contributors may be used to endorse or promote products derived +- * from this software without specific prior written permission. +- * +- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + + #ifndef _RTE_ETHDEV_PCI_H_ +@@ -42,6 +16,8 @@ /** * Copy pci device info to the Ethernet device data. @@ -37077,7 +73753,7 @@ index ccdbb46ec0..cca94ec864 100644 * * @param eth_dev * The *eth_dev* pointer is the address of the *rte_eth_dev* structure. -@@ -60,14 +62,16 @@ rte_eth_copy_pci_info(struct rte_eth_dev *eth_dev, +@@ -60,14 +36,16 @@ rte_eth_copy_pci_info(struct rte_eth_dev *eth_dev, eth_dev->intr_handle = &pci_dev->intr_handle; @@ -37102,8 +73778,45 @@ index ccdbb46ec0..cca94ec864 100644 } static inline int +diff --git a/dpdk/lib/librte_ethdev/rte_ethdev_vdev.h b/dpdk/lib/librte_ethdev/rte_ethdev_vdev.h +index 259feda3f7..0abce0d21c 100644 +--- a/dpdk/lib/librte_ethdev/rte_ethdev_vdev.h ++++ b/dpdk/lib/librte_ethdev/rte_ethdev_vdev.h +@@ -3,32 +3,6 @@ + * + * Copyright(c) 2017 Brocade Communications Systems, Inc. + * Author: Jan Blunck +- * +- * Redistribution and use in source and binary forms, with or without +- * modification, are permitted provided that the following conditions +- * are met: +- * +- * * Redistributions of source code must retain the above copyright +- * notice, this list of conditions and the following disclaimer. +- * * Redistributions in binary form must reproduce the above copyright +- * notice, this list of conditions and the following disclaimer in +- * the documentation and/or other materials provided with the +- * distribution. +- * * Neither the name of the copyright holder nor the names of its +- * contributors may be used to endorse or promote products derived +- * from this software without specific prior written permission. +- * +- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + + #ifndef _RTE_ETHDEV_VDEV_H_ diff --git a/dpdk/lib/librte_ethdev/rte_flow.c b/dpdk/lib/librte_ethdev/rte_flow.c -index 87a3e8c4c6..391165646a 100644 +index 87a3e8c4c6..9964a241cb 100644 --- a/dpdk/lib/librte_ethdev/rte_flow.c +++ b/dpdk/lib/librte_ethdev/rte_flow.c @@ -19,7 +19,7 @@ @@ -37115,6 +73828,68 @@ index 87a3e8c4c6..391165646a 100644 /* Mbuf dynamic field flag bit number for metadata. */ uint64_t rte_flow_dynf_metadata_mask; +@@ -241,6 +241,8 @@ rte_flow_expand_rss_item_complete(const struct rte_flow_item *item) + ret = RTE_FLOW_ITEM_TYPE_IPV6; + else if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_VLAN) + ret = RTE_FLOW_ITEM_TYPE_VLAN; ++ else ++ ret = RTE_FLOW_ITEM_TYPE_END; + break; + case RTE_FLOW_ITEM_TYPE_VLAN: + if (item->mask) +@@ -258,6 +260,8 @@ rte_flow_expand_rss_item_complete(const struct rte_flow_item *item) + ret = RTE_FLOW_ITEM_TYPE_IPV6; + else if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_VLAN) + ret = RTE_FLOW_ITEM_TYPE_VLAN; ++ else ++ ret = RTE_FLOW_ITEM_TYPE_END; + break; + case RTE_FLOW_ITEM_TYPE_IPV4: + if (item->mask) +@@ -278,6 +282,8 @@ rte_flow_expand_rss_item_complete(const struct rte_flow_item *item) + ret = RTE_FLOW_ITEM_TYPE_IPV4; + else if (ip_next_proto == IPPROTO_IPV6) + ret = RTE_FLOW_ITEM_TYPE_IPV6; ++ else ++ ret = RTE_FLOW_ITEM_TYPE_END; + break; + case RTE_FLOW_ITEM_TYPE_IPV6: + if (item->mask) +@@ -298,6 +304,8 @@ rte_flow_expand_rss_item_complete(const struct rte_flow_item *item) + ret = RTE_FLOW_ITEM_TYPE_IPV4; + else if (ip_next_proto == IPPROTO_IPV6) + ret = RTE_FLOW_ITEM_TYPE_IPV6; ++ else ++ ret = RTE_FLOW_ITEM_TYPE_END; + break; + default: + ret = RTE_FLOW_ITEM_TYPE_VOID; +@@ -613,7 +621,7 @@ rte_flow_conv_action_conf(void *buf, const size_t size, + }), + size > sizeof(*dst.rss) ? sizeof(*dst.rss) : size); + off = sizeof(*dst.rss); +- if (src.rss->key_len) { ++ if (src.rss->key_len && src.rss->key) { + off = RTE_ALIGN_CEIL(off, sizeof(*dst.rss->key)); + tmp = sizeof(*src.rss->key) * src.rss->key_len; + if (size >= off + tmp) +@@ -1104,10 +1112,14 @@ rte_flow_expand_rss(struct rte_flow_expand_rss *buf, size_t size, + memset(flow_items, 0, sizeof(flow_items)); + user_pattern_size -= sizeof(*item); + /* +- * Check if the last valid item has spec set +- * and need complete pattern. ++ * Check if the last valid item has spec set, need complete pattern, ++ * and the pattern can be used for expansion. + */ + missed_item.type = rte_flow_expand_rss_item_complete(last_item); ++ if (missed_item.type == RTE_FLOW_ITEM_TYPE_END) { ++ /* Item type END indicates expansion is not required. */ ++ return lsize; ++ } + if (missed_item.type != RTE_FLOW_ITEM_TYPE_VOID) { + next = NULL; + missed = 1; diff --git a/dpdk/lib/librte_ethdev/rte_flow.h b/dpdk/lib/librte_ethdev/rte_flow.h index 452d359a16..693824da8a 100644 --- a/dpdk/lib/librte_ethdev/rte_flow.h @@ -37137,6 +73912,235 @@ index 452d359a16..693824da8a 100644 /* Mbuf dynamic field flag mask for metadata. */ extern uint64_t rte_flow_dynf_metadata_mask; +diff --git a/dpdk/lib/librte_eventdev/rte_event_crypto_adapter.c b/dpdk/lib/librte_eventdev/rte_event_crypto_adapter.c +index 22d9108168..db9af8c200 100644 +--- a/dpdk/lib/librte_eventdev/rte_event_crypto_adapter.c ++++ b/dpdk/lib/librte_eventdev/rte_event_crypto_adapter.c +@@ -240,6 +240,7 @@ rte_event_crypto_adapter_create_ext(uint8_t id, uint8_t dev_id, + if (ret < 0) { + RTE_EDEV_LOG_ERR("Failed to get info for eventdev %d: %s!", + dev_id, dev_info.driver_name); ++ rte_free(adapter); + return ret; + } + +diff --git a/dpdk/lib/librte_eventdev/rte_event_eth_tx_adapter.c b/dpdk/lib/librte_eventdev/rte_event_eth_tx_adapter.c +index d02ef57f4e..8a1573e75f 100644 +--- a/dpdk/lib/librte_eventdev/rte_event_eth_tx_adapter.c ++++ b/dpdk/lib/librte_eventdev/rte_event_eth_tx_adapter.c +@@ -733,6 +733,8 @@ txa_service_queue_add(uint8_t id, + + qdone = rte_zmalloc(txa->mem_name, + nb_queues * sizeof(*qdone), 0); ++ if (qdone == NULL) ++ return -ENOMEM; + j = 0; + for (i = 0; i < nb_queues; i++) { + if (txa_service_is_queue_added(txa, eth_dev, i)) +diff --git a/dpdk/lib/librte_eventdev/rte_event_timer_adapter.c b/dpdk/lib/librte_eventdev/rte_event_timer_adapter.c +index 161e21a685..36c13fe3b5 100644 +--- a/dpdk/lib/librte_eventdev/rte_event_timer_adapter.c ++++ b/dpdk/lib/librte_eventdev/rte_event_timer_adapter.c +@@ -550,7 +550,7 @@ struct swtim { + uint32_t timer_data_id; + /* Track which cores have actually armed a timer */ + struct { +- rte_atomic16_t v; ++ uint16_t v; + } __rte_cache_aligned in_use[RTE_MAX_LCORE]; + /* Track which cores' timer lists should be polled */ + unsigned int poll_lcores[RTE_MAX_LCORE]; +@@ -579,6 +579,7 @@ swtim_callback(struct rte_timer *tim) + uint16_t nb_evs_invalid = 0; + uint64_t opaque; + int ret; ++ int n_lcores; + + opaque = evtim->impl_opaque[1]; + adapter = (struct rte_event_timer_adapter *)(uintptr_t)opaque; +@@ -601,8 +602,13 @@ swtim_callback(struct rte_timer *tim) + "with immediate expiry value"); + } + +- if (unlikely(rte_atomic16_test_and_set(&sw->in_use[lcore].v))) +- sw->poll_lcores[sw->n_poll_lcores++] = lcore; ++ if (unlikely(sw->in_use[lcore].v == 0)) { ++ sw->in_use[lcore].v = 1; ++ n_lcores = __atomic_fetch_add(&sw->n_poll_lcores, 1, ++ __ATOMIC_RELAXED); ++ __atomic_store_n(&sw->poll_lcores[n_lcores], lcore, ++ __ATOMIC_RELAXED); ++ } + } else { + EVTIM_BUF_LOG_DBG("buffered an event timer expiry event"); + +@@ -619,7 +625,8 @@ swtim_callback(struct rte_timer *tim) + sw->expired_timers[sw->n_expired_timers++] = tim; + sw->stats.evtim_exp_count++; + +- evtim->state = RTE_EVENT_TIMER_NOT_ARMED; ++ __atomic_store_n(&evtim->state, RTE_EVENT_TIMER_NOT_ARMED, ++ __ATOMIC_RELEASE); + } + + if (event_buffer_batch_ready(&sw->buffer)) { +@@ -825,7 +832,7 @@ swtim_init(struct rte_event_timer_adapter *adapter) + + /* Initialize the variables that track in-use timer lists */ + for (i = 0; i < RTE_MAX_LCORE; i++) +- rte_atomic16_init(&sw->in_use[i].v); ++ sw->in_use[i].v = 0; + + /* Initialize the timer subsystem and allocate timer data instance */ + ret = rte_timer_subsystem_init(); +@@ -1007,6 +1014,10 @@ __swtim_arm_burst(const struct rte_event_timer_adapter *adapter, + uint32_t lcore_id = rte_lcore_id(); + struct rte_timer *tim, *tims[nb_evtims]; + uint64_t cycles; ++ int n_lcores; ++ /* Timer list for this lcore is not in use. */ ++ uint16_t exp_state = 0; ++ enum rte_event_timer_state n_state; + + #ifdef RTE_LIBRTE_EVENTDEV_DEBUG + /* Check that the service is running. */ +@@ -1025,12 +1036,18 @@ __swtim_arm_burst(const struct rte_event_timer_adapter *adapter, + /* If this is the first time we're arming an event timer on this lcore, + * mark this lcore as "in use"; this will cause the service + * function to process the timer list that corresponds to this lcore. ++ * The atomic compare-and-swap operation can prevent the race condition ++ * on in_use flag between multiple non-EAL threads. + */ +- if (unlikely(rte_atomic16_test_and_set(&sw->in_use[lcore_id].v))) { ++ if (unlikely(__atomic_compare_exchange_n(&sw->in_use[lcore_id].v, ++ &exp_state, 1, 0, ++ __ATOMIC_RELAXED, __ATOMIC_RELAXED))) { + EVTIM_LOG_DBG("Adding lcore id = %u to list of lcores to poll", + lcore_id); +- sw->poll_lcores[sw->n_poll_lcores] = lcore_id; +- ++sw->n_poll_lcores; ++ n_lcores = __atomic_fetch_add(&sw->n_poll_lcores, 1, ++ __ATOMIC_RELAXED); ++ __atomic_store_n(&sw->poll_lcores[n_lcores], lcore_id, ++ __ATOMIC_RELAXED); + } + + ret = rte_mempool_get_bulk(sw->tim_pool, (void **)tims, +@@ -1041,30 +1058,36 @@ __swtim_arm_burst(const struct rte_event_timer_adapter *adapter, + } + + for (i = 0; i < nb_evtims; i++) { +- /* Don't modify the event timer state in these cases */ +- if (evtims[i]->state == RTE_EVENT_TIMER_ARMED) { ++ n_state = __atomic_load_n(&evtims[i]->state, __ATOMIC_ACQUIRE); ++ if (n_state == RTE_EVENT_TIMER_ARMED) { + rte_errno = EALREADY; + break; +- } else if (!(evtims[i]->state == RTE_EVENT_TIMER_NOT_ARMED || +- evtims[i]->state == RTE_EVENT_TIMER_CANCELED)) { ++ } else if (!(n_state == RTE_EVENT_TIMER_NOT_ARMED || ++ n_state == RTE_EVENT_TIMER_CANCELED)) { + rte_errno = EINVAL; + break; + } + + ret = check_timeout(evtims[i], adapter); + if (unlikely(ret == -1)) { +- evtims[i]->state = RTE_EVENT_TIMER_ERROR_TOOLATE; ++ __atomic_store_n(&evtims[i]->state, ++ RTE_EVENT_TIMER_ERROR_TOOLATE, ++ __ATOMIC_RELAXED); + rte_errno = EINVAL; + break; + } else if (unlikely(ret == -2)) { +- evtims[i]->state = RTE_EVENT_TIMER_ERROR_TOOEARLY; ++ __atomic_store_n(&evtims[i]->state, ++ RTE_EVENT_TIMER_ERROR_TOOEARLY, ++ __ATOMIC_RELAXED); + rte_errno = EINVAL; + break; + } + + if (unlikely(check_destination_event_queue(evtims[i], + adapter) < 0)) { +- evtims[i]->state = RTE_EVENT_TIMER_ERROR; ++ __atomic_store_n(&evtims[i]->state, ++ RTE_EVENT_TIMER_ERROR, ++ __ATOMIC_RELAXED); + rte_errno = EINVAL; + break; + } +@@ -1080,13 +1103,18 @@ __swtim_arm_burst(const struct rte_event_timer_adapter *adapter, + SINGLE, lcore_id, NULL, evtims[i]); + if (ret < 0) { + /* tim was in RUNNING or CONFIG state */ +- evtims[i]->state = RTE_EVENT_TIMER_ERROR; ++ __atomic_store_n(&evtims[i]->state, ++ RTE_EVENT_TIMER_ERROR, ++ __ATOMIC_RELEASE); + break; + } + +- rte_smp_wmb(); + EVTIM_LOG_DBG("armed an event timer"); +- evtims[i]->state = RTE_EVENT_TIMER_ARMED; ++ /* RELEASE ordering guarantees the adapter specific value ++ * changes observed before the update of state. ++ */ ++ __atomic_store_n(&evtims[i]->state, RTE_EVENT_TIMER_ARMED, ++ __ATOMIC_RELEASE); + } + + if (i < nb_evtims) +@@ -1113,6 +1141,7 @@ swtim_cancel_burst(const struct rte_event_timer_adapter *adapter, + struct rte_timer *timp; + uint64_t opaque; + struct swtim *sw = swtim_pmd_priv(adapter); ++ enum rte_event_timer_state n_state; + + #ifdef RTE_LIBRTE_EVENTDEV_DEBUG + /* Check that the service is running. */ +@@ -1124,16 +1153,18 @@ swtim_cancel_burst(const struct rte_event_timer_adapter *adapter, + + for (i = 0; i < nb_evtims; i++) { + /* Don't modify the event timer state in these cases */ +- if (evtims[i]->state == RTE_EVENT_TIMER_CANCELED) { ++ /* ACQUIRE ordering guarantees the access of implementation ++ * specific opaque data under the correct state. ++ */ ++ n_state = __atomic_load_n(&evtims[i]->state, __ATOMIC_ACQUIRE); ++ if (n_state == RTE_EVENT_TIMER_CANCELED) { + rte_errno = EALREADY; + break; +- } else if (evtims[i]->state != RTE_EVENT_TIMER_ARMED) { ++ } else if (n_state != RTE_EVENT_TIMER_ARMED) { + rte_errno = EINVAL; + break; + } + +- rte_smp_rmb(); +- + opaque = evtims[i]->impl_opaque[0]; + timp = (struct rte_timer *)(uintptr_t)opaque; + RTE_ASSERT(timp != NULL); +@@ -1147,11 +1178,12 @@ swtim_cancel_burst(const struct rte_event_timer_adapter *adapter, + + rte_mempool_put(sw->tim_pool, (void **)timp); + +- evtims[i]->state = RTE_EVENT_TIMER_CANCELED; +- evtims[i]->impl_opaque[0] = 0; +- evtims[i]->impl_opaque[1] = 0; +- +- rte_smp_wmb(); ++ /* The RELEASE ordering here pairs with atomic ordering ++ * to make sure the state update data observed between ++ * threads. ++ */ ++ __atomic_store_n(&evtims[i]->state, RTE_EVENT_TIMER_CANCELED, ++ __ATOMIC_RELEASE); + } + + return i; diff --git a/dpdk/lib/librte_eventdev/rte_eventdev.c b/dpdk/lib/librte_eventdev/rte_eventdev.c index b987e07454..9aca7fbd52 100644 --- a/dpdk/lib/librte_eventdev/rte_eventdev.c @@ -37164,6 +74168,20 @@ index b987e07454..9aca7fbd52 100644 eventdev_globals.nb_devs++; } +diff --git a/dpdk/lib/librte_eventdev/rte_eventdev_pmd.h b/dpdk/lib/librte_eventdev/rte_eventdev_pmd.h +index d118b9e5ba..c5d41ddad8 100644 +--- a/dpdk/lib/librte_eventdev/rte_eventdev_pmd.h ++++ b/dpdk/lib/librte_eventdev/rte_eventdev_pmd.h +@@ -155,9 +155,6 @@ rte_event_pmd_is_valid_dev(uint8_t dev_id) + * Event device pointer + * @param dev_info + * Event device information structure +- * +- * @return +- * Returns 0 on success + */ + typedef void (*eventdev_info_get_t)(struct rte_eventdev *dev, + struct rte_event_dev_info *dev_info); diff --git a/dpdk/lib/librte_eventdev/rte_eventdev_pmd_pci.h b/dpdk/lib/librte_eventdev/rte_eventdev_pmd_pci.h index 8fb61386fd..443cd38c23 100644 --- a/dpdk/lib/librte_eventdev/rte_eventdev_pmd_pci.h @@ -37184,11 +74202,26 @@ index 8fb61386fd..443cd38c23 100644 /* Invoke PMD device un-init function */ if (devuninit) diff --git a/dpdk/lib/librte_fib/rte_fib.h b/dpdk/lib/librte_fib/rte_fib.h -index d06c5ef55a..af3bbf07ee 100644 +index d06c5ef55a..04c490ed6e 100644 --- a/dpdk/lib/librte_fib/rte_fib.h +++ b/dpdk/lib/librte_fib/rte_fib.h -@@ -14,6 +14,10 @@ +@@ -8,12 +8,25 @@ + + /** + * @file ++ * ++ * RTE FIB library. ++ * ++ * @warning ++ * @b EXPERIMENTAL: ++ * All functions in this file may be changed or removed without prior notice. ++ * + * FIB (Forwarding information base) implementation + * for IPv4 Longest Prefix Match + */ ++#include ++ #include +#ifdef __cplusplus @@ -37198,7 +74231,7 @@ index d06c5ef55a..af3bbf07ee 100644 struct rte_fib; struct rte_rib; -@@ -185,4 +189,8 @@ __rte_experimental +@@ -185,4 +198,8 @@ __rte_experimental struct rte_rib * rte_fib_get_rib(struct rte_fib *fib); @@ -37208,11 +74241,26 @@ index d06c5ef55a..af3bbf07ee 100644 + #endif /* _RTE_FIB_H_ */ diff --git a/dpdk/lib/librte_fib/rte_fib6.h b/dpdk/lib/librte_fib/rte_fib6.h -index 4268704038..66c71c84c9 100644 +index 4268704038..9228d87149 100644 --- a/dpdk/lib/librte_fib/rte_fib6.h +++ b/dpdk/lib/librte_fib/rte_fib6.h -@@ -14,6 +14,10 @@ +@@ -8,12 +8,25 @@ + + /** + * @file ++ * ++ * RTE FIB6 library. ++ * ++ * @warning ++ * @b EXPERIMENTAL: ++ * All functions in this file may be changed or removed without prior notice. ++ * + * FIB (Forwarding information base) implementation + * for IPv6 Longest Prefix Match + */ ++#include ++ #include +#ifdef __cplusplus @@ -37222,7 +74270,7 @@ index 4268704038..66c71c84c9 100644 #define RTE_FIB6_IPV6_ADDR_SIZE 16 /** Maximum depth value possible for IPv6 FIB. */ #define RTE_FIB6_MAXDEPTH 128 -@@ -190,4 +194,8 @@ __rte_experimental +@@ -190,4 +203,8 @@ __rte_experimental struct rte_rib6 * rte_fib6_get_rib(struct rte_fib6 *fib); @@ -37299,6 +74347,73 @@ index 124aa8b98b..2ae2add4f3 100644 next_hop << 1, dp->nh_sz, *ip_part); } tbl8_recycle(dp, &val, tbl8_idx); +diff --git a/dpdk/lib/librte_flow_classify/rte_flow_classify.h b/dpdk/lib/librte_flow_classify/rte_flow_classify.h +index 74d1ecaf50..82ea92b6a6 100644 +--- a/dpdk/lib/librte_flow_classify/rte_flow_classify.h ++++ b/dpdk/lib/librte_flow_classify/rte_flow_classify.h +@@ -8,9 +8,11 @@ + /** + * @file + * +- * RTE Flow Classify Library ++ * RTE Flow Classify Library. + * +- * @b EXPERIMENTAL: this API may change without prior notice ++ * @warning ++ * @b EXPERIMENTAL: ++ * All functions in this file may be changed or removed without prior notice. + * + * This library provides flow record information with some measured properties. + * +diff --git a/dpdk/lib/librte_gro/rte_gro.c b/dpdk/lib/librte_gro/rte_gro.c +index 6618f4d32d..24e413d8ea 100644 +--- a/dpdk/lib/librte_gro/rte_gro.c ++++ b/dpdk/lib/librte_gro/rte_gro.c +@@ -27,18 +27,21 @@ static gro_tbl_pkt_count_fn tbl_pkt_count_fn[RTE_GRO_TYPE_MAX_NUM] = { + NULL}; + + #define IS_IPV4_TCP_PKT(ptype) (RTE_ETH_IS_IPV4_HDR(ptype) && \ +- ((ptype & RTE_PTYPE_L4_TCP) == RTE_PTYPE_L4_TCP)) ++ ((ptype & RTE_PTYPE_L4_TCP) == RTE_PTYPE_L4_TCP) && \ ++ (RTE_ETH_IS_TUNNEL_PKT(ptype) == 0)) + + #define IS_IPV4_VXLAN_TCP4_PKT(ptype) (RTE_ETH_IS_IPV4_HDR(ptype) && \ + ((ptype & RTE_PTYPE_L4_UDP) == RTE_PTYPE_L4_UDP) && \ + ((ptype & RTE_PTYPE_TUNNEL_VXLAN) == \ + RTE_PTYPE_TUNNEL_VXLAN) && \ +- ((ptype & RTE_PTYPE_INNER_L4_TCP) == \ +- RTE_PTYPE_INNER_L4_TCP) && \ +- (((ptype & RTE_PTYPE_INNER_L3_MASK) & \ +- (RTE_PTYPE_INNER_L3_IPV4 | \ +- RTE_PTYPE_INNER_L3_IPV4_EXT | \ +- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN)) != 0)) ++ ((ptype & RTE_PTYPE_INNER_L4_TCP) == \ ++ RTE_PTYPE_INNER_L4_TCP) && \ ++ (((ptype & RTE_PTYPE_INNER_L3_MASK) == \ ++ RTE_PTYPE_INNER_L3_IPV4) || \ ++ ((ptype & RTE_PTYPE_INNER_L3_MASK) == \ ++ RTE_PTYPE_INNER_L3_IPV4_EXT) || \ ++ ((ptype & RTE_PTYPE_INNER_L3_MASK) == \ ++ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN))) + + /* + * GRO context structure. It keeps the table structures, which are +diff --git a/dpdk/lib/librte_gso/gso_udp4.c b/dpdk/lib/librte_gso/gso_udp4.c +index 21fea09273..6fa68f243a 100644 +--- a/dpdk/lib/librte_gso/gso_udp4.c ++++ b/dpdk/lib/librte_gso/gso_udp4.c +@@ -69,7 +69,10 @@ gso_udp4_segment(struct rte_mbuf *pkt, + return 1; + } + +- pyld_unit_size = gso_size - hdr_offset; ++ /* pyld_unit_size must be a multiple of 8 because frag_off ++ * uses 8 bytes as unit. ++ */ ++ pyld_unit_size = (gso_size - hdr_offset) & ~7U; + + /* Segment the payload */ + ret = gso_do_segment(pkt, hdr_offset, pyld_unit_size, direct_pool, diff --git a/dpdk/lib/librte_hash/meson.build b/dpdk/lib/librte_hash/meson.build index 5d02b3084f..bce11ad9e0 100644 --- a/dpdk/lib/librte_hash/meson.build @@ -37315,6 +74430,49 @@ index 5d02b3084f..bce11ad9e0 100644 'rte_fbk_hash.h', 'rte_hash_crc.h', 'rte_hash.h', +diff --git a/dpdk/lib/librte_hash/rte_cuckoo_hash.c b/dpdk/lib/librte_hash/rte_cuckoo_hash.c +index 87a4c01f2f..51ebae9cd9 100644 +--- a/dpdk/lib/librte_hash/rte_cuckoo_hash.c ++++ b/dpdk/lib/librte_hash/rte_cuckoo_hash.c +@@ -144,6 +144,7 @@ rte_hash_create(const struct rte_hash_parameters *params) + unsigned int no_free_on_del = 0; + uint32_t *ext_bkt_to_free = NULL; + uint32_t *tbl_chng_cnt = NULL; ++ struct lcore_cache *local_free_slots = NULL; + unsigned int readwrite_concur_lf_support = 0; + + rte_hash_function default_hash_func = (rte_hash_function)rte_jhash; +@@ -369,9 +370,13 @@ rte_hash_create(const struct rte_hash_parameters *params) + #endif + + if (use_local_cache) { +- h->local_free_slots = rte_zmalloc_socket(NULL, ++ local_free_slots = rte_zmalloc_socket(NULL, + sizeof(struct lcore_cache) * RTE_MAX_LCORE, + RTE_CACHE_LINE_SIZE, params->socket_id); ++ if (local_free_slots == NULL) { ++ RTE_LOG(ERR, HASH, "local free slots memory allocation failed\n"); ++ goto err_unlock; ++ } + } + + /* Default hash function */ +@@ -402,6 +407,7 @@ rte_hash_create(const struct rte_hash_parameters *params) + *h->tbl_chng_cnt = 0; + h->hw_trans_mem_support = hw_trans_mem_support; + h->use_local_cache = use_local_cache; ++ h->local_free_slots = local_free_slots; + h->readwrite_concur_support = readwrite_concur_support; + h->ext_table_support = ext_table_support; + h->writer_takes_lock = writer_takes_lock; +@@ -447,6 +453,7 @@ rte_hash_create(const struct rte_hash_parameters *params) + rte_ring_free(r); + rte_ring_free(r_ext); + rte_free(te); ++ rte_free(local_free_slots); + rte_free(h); + rte_free(buckets); + rte_free(buckets_ext); diff --git a/dpdk/lib/librte_hash/rte_hash.h b/dpdk/lib/librte_hash/rte_hash.h index 0d73370dc4..ab7be1d528 100644 --- a/dpdk/lib/librte_hash/rte_hash.h @@ -37328,6 +74486,89 @@ index 0d73370dc4..ab7be1d528 100644 */ #define RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY_LF 0x20 +diff --git a/dpdk/lib/librte_ip_frag/rte_ipv4_reassembly.c b/dpdk/lib/librte_ip_frag/rte_ipv4_reassembly.c +index 1dda8aca02..69666c8b82 100644 +--- a/dpdk/lib/librte_ip_frag/rte_ipv4_reassembly.c ++++ b/dpdk/lib/librte_ip_frag/rte_ipv4_reassembly.c +@@ -104,6 +104,7 @@ rte_ipv4_frag_reassemble_packet(struct rte_ip_frag_tbl *tbl, + const unaligned_uint64_t *psd; + uint16_t flag_offset, ip_ofs, ip_flag; + int32_t ip_len; ++ int32_t trim; + + flag_offset = rte_be_to_cpu_16(ip_hdr->fragment_offset); + ip_ofs = (uint16_t)(flag_offset & RTE_IPV4_HDR_OFFSET_MASK); +@@ -117,14 +118,15 @@ rte_ipv4_frag_reassemble_packet(struct rte_ip_frag_tbl *tbl, + + ip_ofs *= RTE_IPV4_HDR_OFFSET_UNITS; + ip_len = rte_be_to_cpu_16(ip_hdr->total_length) - mb->l3_len; ++ trim = mb->pkt_len - (ip_len + mb->l3_len + mb->l2_len); + + IP_FRAG_LOG(DEBUG, "%s:%d:\n" +- "mbuf: %p, tms: %" PRIu64 +- ", key: <%" PRIx64 ", %#x>, ofs: %u, len: %d, flags: %#x\n" ++ "mbuf: %p, tms: %" PRIu64 ", key: <%" PRIx64 ", %#x>" ++ "ofs: %u, len: %d, padding: %d, flags: %#x\n" + "tbl: %p, max_cycles: %" PRIu64 ", entry_mask: %#x, " + "max_entries: %u, use_entries: %u\n\n", + __func__, __LINE__, +- mb, tms, key.src_dst[0], key.id, ip_ofs, ip_len, ip_flag, ++ mb, tms, key.src_dst[0], key.id, ip_ofs, ip_len, trim, ip_flag, + tbl, tbl->max_cycles, tbl->entry_mask, tbl->max_entries, + tbl->use_entries); + +@@ -134,6 +136,9 @@ rte_ipv4_frag_reassemble_packet(struct rte_ip_frag_tbl *tbl, + return NULL; + } + ++ if (unlikely(trim > 0)) ++ rte_pktmbuf_trim(mb, trim); ++ + /* try to find/add entry into the fragment's table. */ + if ((fp = ip_frag_find(tbl, dr, &key, tms)) == NULL) { + IP_FRAG_MBUF2DR(dr, mb); +diff --git a/dpdk/lib/librte_ip_frag/rte_ipv6_reassembly.c b/dpdk/lib/librte_ip_frag/rte_ipv6_reassembly.c +index ad01055184..6bc0bf792a 100644 +--- a/dpdk/lib/librte_ip_frag/rte_ipv6_reassembly.c ++++ b/dpdk/lib/librte_ip_frag/rte_ipv6_reassembly.c +@@ -142,6 +142,7 @@ rte_ipv6_frag_reassemble_packet(struct rte_ip_frag_tbl *tbl, + struct ip_frag_key key; + uint16_t ip_ofs; + int32_t ip_len; ++ int32_t trim; + + rte_memcpy(&key.src_dst[0], ip_hdr->src_addr, 16); + rte_memcpy(&key.src_dst[2], ip_hdr->dst_addr, 16); +@@ -158,16 +159,17 @@ rte_ipv6_frag_reassemble_packet(struct rte_ip_frag_tbl *tbl, + * this is what we remove from the payload len. + */ + ip_len = rte_be_to_cpu_16(ip_hdr->payload_len) - sizeof(*frag_hdr); ++ trim = mb->pkt_len - (ip_len + mb->l3_len + mb->l2_len); + + IP_FRAG_LOG(DEBUG, "%s:%d:\n" + "mbuf: %p, tms: %" PRIu64 + ", key: <" IPv6_KEY_BYTES_FMT ", %#x>, " +- "ofs: %u, len: %d, flags: %#x\n" ++ "ofs: %u, len: %d, padding: %d, flags: %#x\n" + "tbl: %p, max_cycles: %" PRIu64 ", entry_mask: %#x, " + "max_entries: %u, use_entries: %u\n\n", + __func__, __LINE__, + mb, tms, IPv6_KEY_BYTES(key.src_dst), key.id, ip_ofs, ip_len, +- RTE_IPV6_GET_MF(frag_hdr->frag_data), ++ trim, RTE_IPV6_GET_MF(frag_hdr->frag_data), + tbl, tbl->max_cycles, tbl->entry_mask, tbl->max_entries, + tbl->use_entries); + +@@ -177,6 +179,9 @@ rte_ipv6_frag_reassemble_packet(struct rte_ip_frag_tbl *tbl, + return NULL; + } + ++ if (unlikely(trim > 0)) ++ rte_pktmbuf_trim(mb, trim); ++ + /* try to find/add entry into the fragment's table. */ + fp = ip_frag_find(tbl, dr, &key, tms); + if (fp == NULL) { diff --git a/dpdk/lib/librte_ipsec/ipsec_sad.c b/dpdk/lib/librte_ipsec/ipsec_sad.c index db2c44c804..31b5956d89 100644 --- a/dpdk/lib/librte_ipsec/ipsec_sad.c @@ -37341,6 +74582,38 @@ index db2c44c804..31b5956d89 100644 if (key_type == RTE_IPSEC_SAD_SPI_DIP) sad->cnt_arr[ret].cnt_dip += notexist; else +diff --git a/dpdk/lib/librte_ipsec/rte_ipsec.h b/dpdk/lib/librte_ipsec/rte_ipsec.h +index f3b1f936b9..f2da7ace9c 100644 +--- a/dpdk/lib/librte_ipsec/rte_ipsec.h ++++ b/dpdk/lib/librte_ipsec/rte_ipsec.h +@@ -7,9 +7,13 @@ + + /** + * @file rte_ipsec.h +- * @b EXPERIMENTAL: this API may change without prior notice + * + * RTE IPsec support. ++ * ++ * @warning ++ * @b EXPERIMENTAL: ++ * All functions in this file may be changed or removed without prior notice. ++ * + * librte_ipsec provides a framework for data-path IPsec protocol + * processing (ESP/AH). + */ +diff --git a/dpdk/lib/librte_ipsec/rte_ipsec_sad.h b/dpdk/lib/librte_ipsec/rte_ipsec_sad.h +index 8386f73df7..cf50a11cb4 100644 +--- a/dpdk/lib/librte_ipsec/rte_ipsec_sad.h ++++ b/dpdk/lib/librte_ipsec/rte_ipsec_sad.h +@@ -6,6 +6,8 @@ + #ifndef _RTE_IPSEC_SAD_H_ + #define _RTE_IPSEC_SAD_H_ + ++#include ++ + #include + + /** diff --git a/dpdk/lib/librte_ipsec/sa.h b/dpdk/lib/librte_ipsec/sa.h index 51e69ad05a..0cfe82f634 100644 --- a/dpdk/lib/librte_ipsec/sa.h @@ -37404,6 +74677,19 @@ index e388751e33..bcf82cc2d5 100644 KNI_MEM_CHECK(kni->m_sync_addr == NULL, sync_addr_fail); return 0; +diff --git a/dpdk/lib/librte_kni/rte_kni.h b/dpdk/lib/librte_kni/rte_kni.h +index f1bb782c68..855facd1a3 100644 +--- a/dpdk/lib/librte_kni/rte_kni.h ++++ b/dpdk/lib/librte_kni/rte_kni.h +@@ -212,7 +212,7 @@ const char *rte_kni_get_name(const struct rte_kni *kni); + + /** + * Register KNI request handling for a specified port,and it can +- * be called by master process or slave process. ++ * be called by primary process or secondary process. + * + * @param kni + * pointer to struct rte_kni. diff --git a/dpdk/lib/librte_kvargs/rte_kvargs.c b/dpdk/lib/librte_kvargs/rte_kvargs.c index d39332999e..285081c86c 100644 --- a/dpdk/lib/librte_kvargs/rte_kvargs.c @@ -37531,26 +74817,340 @@ index c46e557e23..6e1b18d6fd 100644 /* minus top level */ depth -= 24; -- /* interate through levels (tbl8s) -+ /* iterate through levels (tbl8s) - * until we reach the last one - */ - while (depth > 8) { -diff --git a/dpdk/lib/librte_mbuf/rte_mbuf.h b/dpdk/lib/librte_mbuf/rte_mbuf.h -index 219b110b76..6d080527f6 100644 ---- a/dpdk/lib/librte_mbuf/rte_mbuf.h -+++ b/dpdk/lib/librte_mbuf/rte_mbuf.h -@@ -1535,7 +1535,7 @@ static inline int rte_pktmbuf_trim(struct rte_mbuf *m, uint16_t len) - static inline int rte_pktmbuf_is_contiguous(const struct rte_mbuf *m) - { - __rte_mbuf_sanity_check(m, 1); -- return !!(m->nb_segs == 1); -+ return m->nb_segs == 1; - } +- /* interate through levels (tbl8s) ++ /* iterate through levels (tbl8s) + * until we reach the last one + */ + while (depth > 8) { +diff --git a/dpdk/lib/librte_lpm/rte_lpm_altivec.h b/dpdk/lib/librte_lpm/rte_lpm_altivec.h +index 228c41b38e..4fbc1b595d 100644 +--- a/dpdk/lib/librte_lpm/rte_lpm_altivec.h ++++ b/dpdk/lib/librte_lpm/rte_lpm_altivec.h +@@ -88,28 +88,28 @@ rte_lpm_lookupx4(const struct rte_lpm *lpm, xmm_t ip, uint32_t hop[4], + if (unlikely((pt & RTE_LPM_VALID_EXT_ENTRY_BITMASK) == + RTE_LPM_VALID_EXT_ENTRY_BITMASK)) { + i8.u32[0] = i8.u32[0] + +- (uint8_t)tbl[0] * RTE_LPM_TBL8_GROUP_NUM_ENTRIES; ++ (tbl[0] & 0x00FFFFFF) * RTE_LPM_TBL8_GROUP_NUM_ENTRIES; + ptbl = (const uint32_t *)&lpm->tbl8[i8.u32[0]]; + tbl[0] = *ptbl; + } + if (unlikely((pt >> 32 & RTE_LPM_VALID_EXT_ENTRY_BITMASK) == + RTE_LPM_VALID_EXT_ENTRY_BITMASK)) { + i8.u32[1] = i8.u32[1] + +- (uint8_t)tbl[1] * RTE_LPM_TBL8_GROUP_NUM_ENTRIES; ++ (tbl[1] & 0x00FFFFFF) * RTE_LPM_TBL8_GROUP_NUM_ENTRIES; + ptbl = (const uint32_t *)&lpm->tbl8[i8.u32[1]]; + tbl[1] = *ptbl; + } + if (unlikely((pt2 & RTE_LPM_VALID_EXT_ENTRY_BITMASK) == + RTE_LPM_VALID_EXT_ENTRY_BITMASK)) { + i8.u32[2] = i8.u32[2] + +- (uint8_t)tbl[2] * RTE_LPM_TBL8_GROUP_NUM_ENTRIES; ++ (tbl[2] & 0x00FFFFFF) * RTE_LPM_TBL8_GROUP_NUM_ENTRIES; + ptbl = (const uint32_t *)&lpm->tbl8[i8.u32[2]]; + tbl[2] = *ptbl; + } + if (unlikely((pt2 >> 32 & RTE_LPM_VALID_EXT_ENTRY_BITMASK) == + RTE_LPM_VALID_EXT_ENTRY_BITMASK)) { + i8.u32[3] = i8.u32[3] + +- (uint8_t)tbl[3] * RTE_LPM_TBL8_GROUP_NUM_ENTRIES; ++ (tbl[3] & 0x00FFFFFF) * RTE_LPM_TBL8_GROUP_NUM_ENTRIES; + ptbl = (const uint32_t *)&lpm->tbl8[i8.u32[3]]; + tbl[3] = *ptbl; + } +diff --git a/dpdk/lib/librte_lpm/rte_lpm_neon.h b/dpdk/lib/librte_lpm/rte_lpm_neon.h +index 6c131d3125..4642a866f1 100644 +--- a/dpdk/lib/librte_lpm/rte_lpm_neon.h ++++ b/dpdk/lib/librte_lpm/rte_lpm_neon.h +@@ -81,28 +81,28 @@ rte_lpm_lookupx4(const struct rte_lpm *lpm, xmm_t ip, uint32_t hop[4], + if (unlikely((pt & RTE_LPM_VALID_EXT_ENTRY_BITMASK) == + RTE_LPM_VALID_EXT_ENTRY_BITMASK)) { + i8.u32[0] = i8.u32[0] + +- (uint8_t)tbl[0] * RTE_LPM_TBL8_GROUP_NUM_ENTRIES; ++ (tbl[0] & 0x00FFFFFF) * RTE_LPM_TBL8_GROUP_NUM_ENTRIES; + ptbl = (const uint32_t *)&lpm->tbl8[i8.u32[0]]; + tbl[0] = *ptbl; + } + if (unlikely((pt >> 32 & RTE_LPM_VALID_EXT_ENTRY_BITMASK) == + RTE_LPM_VALID_EXT_ENTRY_BITMASK)) { + i8.u32[1] = i8.u32[1] + +- (uint8_t)tbl[1] * RTE_LPM_TBL8_GROUP_NUM_ENTRIES; ++ (tbl[1] & 0x00FFFFFF) * RTE_LPM_TBL8_GROUP_NUM_ENTRIES; + ptbl = (const uint32_t *)&lpm->tbl8[i8.u32[1]]; + tbl[1] = *ptbl; + } + if (unlikely((pt2 & RTE_LPM_VALID_EXT_ENTRY_BITMASK) == + RTE_LPM_VALID_EXT_ENTRY_BITMASK)) { + i8.u32[2] = i8.u32[2] + +- (uint8_t)tbl[2] * RTE_LPM_TBL8_GROUP_NUM_ENTRIES; ++ (tbl[2] & 0x00FFFFFF) * RTE_LPM_TBL8_GROUP_NUM_ENTRIES; + ptbl = (const uint32_t *)&lpm->tbl8[i8.u32[2]]; + tbl[2] = *ptbl; + } + if (unlikely((pt2 >> 32 & RTE_LPM_VALID_EXT_ENTRY_BITMASK) == + RTE_LPM_VALID_EXT_ENTRY_BITMASK)) { + i8.u32[3] = i8.u32[3] + +- (uint8_t)tbl[3] * RTE_LPM_TBL8_GROUP_NUM_ENTRIES; ++ (tbl[3] & 0x00FFFFFF) * RTE_LPM_TBL8_GROUP_NUM_ENTRIES; + ptbl = (const uint32_t *)&lpm->tbl8[i8.u32[3]]; + tbl[3] = *ptbl; + } +diff --git a/dpdk/lib/librte_lpm/rte_lpm_sse.h b/dpdk/lib/librte_lpm/rte_lpm_sse.h +index 44770b6ff8..eaa863c522 100644 +--- a/dpdk/lib/librte_lpm/rte_lpm_sse.h ++++ b/dpdk/lib/librte_lpm/rte_lpm_sse.h +@@ -82,28 +82,28 @@ rte_lpm_lookupx4(const struct rte_lpm *lpm, xmm_t ip, uint32_t hop[4], + if (unlikely((pt & RTE_LPM_VALID_EXT_ENTRY_BITMASK) == + RTE_LPM_VALID_EXT_ENTRY_BITMASK)) { + i8.u32[0] = i8.u32[0] + +- (uint8_t)tbl[0] * RTE_LPM_TBL8_GROUP_NUM_ENTRIES; ++ (tbl[0] & 0x00FFFFFF) * RTE_LPM_TBL8_GROUP_NUM_ENTRIES; + ptbl = (const uint32_t *)&lpm->tbl8[i8.u32[0]]; + tbl[0] = *ptbl; + } + if (unlikely((pt >> 32 & RTE_LPM_VALID_EXT_ENTRY_BITMASK) == + RTE_LPM_VALID_EXT_ENTRY_BITMASK)) { + i8.u32[1] = i8.u32[1] + +- (uint8_t)tbl[1] * RTE_LPM_TBL8_GROUP_NUM_ENTRIES; ++ (tbl[1] & 0x00FFFFFF) * RTE_LPM_TBL8_GROUP_NUM_ENTRIES; + ptbl = (const uint32_t *)&lpm->tbl8[i8.u32[1]]; + tbl[1] = *ptbl; + } + if (unlikely((pt2 & RTE_LPM_VALID_EXT_ENTRY_BITMASK) == + RTE_LPM_VALID_EXT_ENTRY_BITMASK)) { + i8.u32[2] = i8.u32[2] + +- (uint8_t)tbl[2] * RTE_LPM_TBL8_GROUP_NUM_ENTRIES; ++ (tbl[2] & 0x00FFFFFF) * RTE_LPM_TBL8_GROUP_NUM_ENTRIES; + ptbl = (const uint32_t *)&lpm->tbl8[i8.u32[2]]; + tbl[2] = *ptbl; + } + if (unlikely((pt2 >> 32 & RTE_LPM_VALID_EXT_ENTRY_BITMASK) == + RTE_LPM_VALID_EXT_ENTRY_BITMASK)) { + i8.u32[3] = i8.u32[3] + +- (uint8_t)tbl[3] * RTE_LPM_TBL8_GROUP_NUM_ENTRIES; ++ (tbl[3] & 0x00FFFFFF) * RTE_LPM_TBL8_GROUP_NUM_ENTRIES; + ptbl = (const uint32_t *)&lpm->tbl8[i8.u32[3]]; + tbl[3] = *ptbl; + } +diff --git a/dpdk/lib/librte_mbuf/rte_mbuf.h b/dpdk/lib/librte_mbuf/rte_mbuf.h +index 219b110b76..6d080527f6 100644 +--- a/dpdk/lib/librte_mbuf/rte_mbuf.h ++++ b/dpdk/lib/librte_mbuf/rte_mbuf.h +@@ -1535,7 +1535,7 @@ static inline int rte_pktmbuf_trim(struct rte_mbuf *m, uint16_t len) + static inline int rte_pktmbuf_is_contiguous(const struct rte_mbuf *m) + { + __rte_mbuf_sanity_check(m, 1); +- return !!(m->nb_segs == 1); ++ return m->nb_segs == 1; + } + + /** +diff --git a/dpdk/lib/librte_mbuf/rte_mbuf_core.h b/dpdk/lib/librte_mbuf/rte_mbuf_core.h +index 9a8557d1cc..e7f38422e7 100644 +--- a/dpdk/lib/librte_mbuf/rte_mbuf_core.h ++++ b/dpdk/lib/librte_mbuf/rte_mbuf_core.h +@@ -15,7 +15,9 @@ + */ + + #include ++ + #include ++#include + #include + + #ifdef __cplusplus +diff --git a/dpdk/lib/librte_mbuf/rte_mbuf_dyn.c b/dpdk/lib/librte_mbuf/rte_mbuf_dyn.c +index d6931f8471..5762008b7e 100644 +--- a/dpdk/lib/librte_mbuf/rte_mbuf_dyn.c ++++ b/dpdk/lib/librte_mbuf/rte_mbuf_dyn.c +@@ -19,7 +19,6 @@ + #define RTE_MBUF_DYN_MZNAME "rte_mbuf_dyn" + + struct mbuf_dynfield_elt { +- TAILQ_ENTRY(mbuf_dynfield_elt) next; + struct rte_mbuf_dynfield params; + size_t offset; + }; +@@ -31,7 +30,6 @@ static struct rte_tailq_elem mbuf_dynfield_tailq = { + EAL_REGISTER_TAILQ(mbuf_dynfield_tailq); + + struct mbuf_dynflag_elt { +- TAILQ_ENTRY(mbuf_dynflag_elt) next; + struct rte_mbuf_dynflag params; + unsigned int bitnum; + }; +@@ -69,12 +67,16 @@ process_score(void) + shm->free_space[i] = 1; + } + +- for (off = 0; off < sizeof(struct rte_mbuf); off++) { ++ off = 0; ++ while (off < sizeof(struct rte_mbuf)) { + /* get the size of the free zone */ +- for (size = 0; shm->free_space[off + size]; size++) ++ for (size = 0; (off + size) < sizeof(struct rte_mbuf) && ++ shm->free_space[off + size]; size++) + ; +- if (size == 0) ++ if (size == 0) { ++ off++; + continue; ++ } + + /* get the alignment of biggest object that can fit in + * the zone at this offset. +@@ -85,8 +87,10 @@ process_score(void) + ; + + /* save it in free_space[] */ +- for (i = off; i < off + size; i++) ++ for (i = off; i < off + align; i++) + shm->free_space[i] = RTE_MAX(align, shm->free_space[i]); ++ ++ off += align; + } + } + +@@ -168,7 +172,7 @@ __mbuf_dynfield_lookup(const char *name) + break; + } + +- if (te == NULL) { ++ if (te == NULL || mbuf_dynfield == NULL) { + rte_errno = ENOENT; + return NULL; + } +@@ -181,19 +185,15 @@ rte_mbuf_dynfield_lookup(const char *name, struct rte_mbuf_dynfield *params) + { + struct mbuf_dynfield_elt *mbuf_dynfield; + +- if (shm == NULL) { +- rte_errno = ENOENT; +- return -1; +- } +- + rte_mcfg_tailq_read_lock(); +- mbuf_dynfield = __mbuf_dynfield_lookup(name); ++ if (shm == NULL && init_shared_mem() < 0) ++ mbuf_dynfield = NULL; ++ else ++ mbuf_dynfield = __mbuf_dynfield_lookup(name); + rte_mcfg_tailq_read_unlock(); + +- if (mbuf_dynfield == NULL) { +- rte_errno = ENOENT; ++ if (mbuf_dynfield == NULL) + return -1; +- } + + if (params != NULL) + memcpy(params, &mbuf_dynfield->params, sizeof(*params)); +@@ -279,12 +279,15 @@ __rte_mbuf_dynfield_register_offset(const struct rte_mbuf_dynfield *params, + mbuf_dynfield_tailq.head, mbuf_dynfield_list); + + te = rte_zmalloc("MBUF_DYNFIELD_TAILQ_ENTRY", sizeof(*te), 0); +- if (te == NULL) ++ if (te == NULL) { ++ rte_errno = ENOMEM; + return -1; ++ } + + mbuf_dynfield = rte_zmalloc("mbuf_dynfield", sizeof(*mbuf_dynfield), 0); + if (mbuf_dynfield == NULL) { + rte_free(te); ++ rte_errno = ENOMEM; + return -1; + } + +@@ -377,19 +380,15 @@ rte_mbuf_dynflag_lookup(const char *name, + { + struct mbuf_dynflag_elt *mbuf_dynflag; + +- if (shm == NULL) { +- rte_errno = ENOENT; +- return -1; +- } +- + rte_mcfg_tailq_read_lock(); +- mbuf_dynflag = __mbuf_dynflag_lookup(name); ++ if (shm == NULL && init_shared_mem() < 0) ++ mbuf_dynflag = NULL; ++ else ++ mbuf_dynflag = __mbuf_dynflag_lookup(name); + rte_mcfg_tailq_read_unlock(); + +- if (mbuf_dynflag == NULL) { +- rte_errno = ENOENT; ++ if (mbuf_dynflag == NULL) + return -1; +- } + + if (params != NULL) + memcpy(params, &mbuf_dynflag->params, sizeof(*params)); +@@ -457,12 +456,15 @@ __rte_mbuf_dynflag_register_bitnum(const struct rte_mbuf_dynflag *params, + mbuf_dynflag_tailq.head, mbuf_dynflag_list); + + te = rte_zmalloc("MBUF_DYNFLAG_TAILQ_ENTRY", sizeof(*te), 0); +- if (te == NULL) ++ if (te == NULL) { ++ rte_errno = ENOMEM; + return -1; ++ } + + mbuf_dynflag = rte_zmalloc("mbuf_dynflag", sizeof(*mbuf_dynflag), 0); + if (mbuf_dynflag == NULL) { + rte_free(te); ++ rte_errno = ENOMEM; + return -1; + } + +@@ -542,7 +544,7 @@ void rte_mbuf_dyn_dump(FILE *out) + dynflag->params.name, dynflag->bitnum, + dynflag->params.flags); + } +- fprintf(out, "Free space in mbuf (0 = free, value = zone alignment):\n"); ++ fprintf(out, "Free space in mbuf (0 = occupied, value = free zone alignment):\n"); + for (i = 0; i < sizeof(struct rte_mbuf); i++) { + if ((i % 8) == 0) + fprintf(out, " %4.4zx: ", i); +diff --git a/dpdk/lib/librte_mbuf/rte_mbuf_dyn.h b/dpdk/lib/librte_mbuf/rte_mbuf_dyn.h +index 96c363137e..2a87ef9217 100644 +--- a/dpdk/lib/librte_mbuf/rte_mbuf_dyn.h ++++ b/dpdk/lib/librte_mbuf/rte_mbuf_dyn.h +@@ -62,11 +62,20 @@ + * conventions than function names in dpdk: + * - "rte_mbuf_dynfield_" if defined in mbuf library + * - "rte__dynfield_" if defined in another library +- * - "rte_net__dynfield_" if defined in a in PMD ++ * - "rte_net__dynfield_" if defined in a PMD + * - any name that does not start with "rte_" in an application + */ ++#include ++#include + #include ++ ++#include ++ ++#ifdef __cplusplus ++extern "C" { ++#endif ++ /** + * Maximum length of the dynamic field or flag string. + */ +@@ -250,4 +259,8 @@ void rte_mbuf_dyn_dump(FILE *out); + #define RTE_MBUF_DYNFIELD_METADATA_NAME "rte_flow_dynfield_metadata" + #define RTE_MBUF_DYNFLAG_METADATA_NAME "rte_flow_dynflag_metadata" + ++#ifdef __cplusplus ++} + #endif ++ ++#endif /* _RTE_MBUF_DYN_H_ */ diff --git a/dpdk/lib/librte_mempool/rte_mempool.c b/dpdk/lib/librte_mempool/rte_mempool.c -index 78d8eb941e..08906df9ee 100644 +index 78d8eb941e..89b6d39ac0 100644 --- a/dpdk/lib/librte_mempool/rte_mempool.c +++ b/dpdk/lib/librte_mempool/rte_mempool.c @@ -297,8 +297,8 @@ mempool_ops_alloc_once(struct rte_mempool *mp) @@ -37655,7 +75255,7 @@ index 78d8eb941e..08906df9ee 100644 - mz = rte_memzone_reserve_aligned(mz_name, 0, - mp->socket_id, mz_flags, align); - } -+ if (mz == NULL && rte_errno != ENOMEM) ++ if (mz != NULL || rte_errno != ENOMEM) + break; + + max_alloc_size = RTE_MIN(max_alloc_size, @@ -37701,6 +75301,15 @@ index 78d8eb941e..08906df9ee 100644 return mp->populated_size; +@@ -1141,7 +1160,7 @@ mempool_audit_cache(const struct rte_mempool *mp) + for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) { + const struct rte_mempool_cache *cache; + cache = &mp->local_cache[lcore_id]; +- if (cache->len > cache->flushthresh) { ++ if (cache->len > RTE_DIM(cache->objs)) { + RTE_LOG(CRIT, MEMPOOL, "badness on cache[%u]\n", + lcore_id); + rte_panic("MEMPOOL: invalid cache len\n"); diff --git a/dpdk/lib/librte_mempool/rte_mempool.h b/dpdk/lib/librte_mempool/rte_mempool.h index f81152af96..4907c0808e 100644 --- a/dpdk/lib/librte_mempool/rte_mempool.h @@ -37757,6 +75366,119 @@ index d002dfc46f..d67ed2e2b9 100644 rte_mempool_in_use_count; rte_mempool_list_dump; rte_mempool_lookup; +diff --git a/dpdk/lib/librte_meter/rte_meter_version.map b/dpdk/lib/librte_meter/rte_meter_version.map +index 46410b0369..3fc7ddd199 100644 +--- a/dpdk/lib/librte_meter/rte_meter_version.map ++++ b/dpdk/lib/librte_meter/rte_meter_version.map +@@ -1,12 +1,8 @@ + DPDK_20.0 { + global: + +- rte_meter_srtcm_color_aware_check; +- rte_meter_srtcm_color_blind_check; + rte_meter_srtcm_config; + rte_meter_srtcm_profile_config; +- rte_meter_trtcm_color_aware_check; +- rte_meter_trtcm_color_blind_check; + rte_meter_trtcm_config; + rte_meter_trtcm_profile_config; + +@@ -16,8 +12,6 @@ DPDK_20.0 { + EXPERIMENTAL { + global: + +- rte_meter_trtcm_rfc4115_color_aware_check; +- rte_meter_trtcm_rfc4115_color_blind_check; + rte_meter_trtcm_rfc4115_config; + rte_meter_trtcm_rfc4115_profile_config; + }; +diff --git a/dpdk/lib/librte_net/rte_ip.h b/dpdk/lib/librte_net/rte_ip.h +index 1ceb7b7931..d34c0611f0 100644 +--- a/dpdk/lib/librte_net/rte_ip.h ++++ b/dpdk/lib/librte_net/rte_ip.h +@@ -139,8 +139,11 @@ __rte_raw_cksum(const void *buf, size_t len, uint32_t sum) + } + + /* if length is in odd bytes */ +- if (len == 1) +- sum += *((const uint8_t *)u16_buf); ++ if (len == 1) { ++ uint16_t left = 0; ++ *(uint8_t *)&left = *(const uint8_t *)u16_buf; ++ sum += left; ++ } + + return sum; + } +@@ -222,6 +225,9 @@ rte_raw_cksum_mbuf(const struct rte_mbuf *m, uint32_t off, uint32_t len, + break; + off -= seglen; + } ++ RTE_ASSERT(seg != NULL); ++ if (seg == NULL) ++ return -1; + seglen -= off; + buf = rte_pktmbuf_mtod_offset(seg, const char *, off); + if (seglen >= len) { +@@ -267,7 +273,7 @@ rte_ipv4_cksum(const struct rte_ipv4_hdr *ipv4_hdr) + { + uint16_t cksum; + cksum = rte_raw_cksum(ipv4_hdr, sizeof(struct rte_ipv4_hdr)); +- return (cksum == 0xffff) ? cksum : (uint16_t)~cksum; ++ return (uint16_t)~cksum; + } + + /** +@@ -324,8 +330,7 @@ rte_ipv4_phdr_cksum(const struct rte_ipv4_hdr *ipv4_hdr, uint64_t ol_flags) + * @param l4_hdr + * The pointer to the beginning of the L4 header. + * @return +- * The complemented checksum to set in the IP packet +- * or 0 on error ++ * The complemented checksum to set in the IP packet. + */ + static inline uint16_t + rte_ipv4_udptcp_cksum(const struct rte_ipv4_hdr *ipv4_hdr, const void *l4_hdr) +@@ -344,7 +349,12 @@ rte_ipv4_udptcp_cksum(const struct rte_ipv4_hdr *ipv4_hdr, const void *l4_hdr) + + cksum = ((cksum & 0xffff0000) >> 16) + (cksum & 0xffff); + cksum = (~cksum) & 0xffff; +- if (cksum == 0) ++ /* ++ * Per RFC 768:If the computed checksum is zero for UDP, ++ * it is transmitted as all ones ++ * (the equivalent in one's complement arithmetic). ++ */ ++ if (cksum == 0 && ipv4_hdr->next_proto_id == IPPROTO_UDP) + cksum = 0xffff; + + return (uint16_t)cksum; +@@ -436,7 +446,12 @@ rte_ipv6_udptcp_cksum(const struct rte_ipv6_hdr *ipv6_hdr, const void *l4_hdr) + + cksum = ((cksum & 0xffff0000) >> 16) + (cksum & 0xffff); + cksum = (~cksum) & 0xffff; +- if (cksum == 0) ++ /* ++ * Per RFC 768: If the computed checksum is zero for UDP, ++ * it is transmitted as all ones ++ * (the equivalent in one's complement arithmetic). ++ */ ++ if (cksum == 0 && ipv6_hdr->proto == IPPROTO_UDP) + cksum = 0xffff; + + return (uint16_t)cksum; +diff --git a/dpdk/lib/librte_net/rte_mpls.h b/dpdk/lib/librte_net/rte_mpls.h +index 32b6431219..14663f51b4 100644 +--- a/dpdk/lib/librte_net/rte_mpls.h ++++ b/dpdk/lib/librte_net/rte_mpls.h +@@ -21,6 +21,7 @@ extern "C" { + /** + * MPLS header. + */ ++__extension__ + struct rte_mpls_hdr { + uint16_t tag_msb; /**< Label(msb). */ + #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN diff --git a/dpdk/lib/librte_pci/rte_pci.c b/dpdk/lib/librte_pci/rte_pci.c index a753cf3eca..5f7726fa89 100644 --- a/dpdk/lib/librte_pci/rte_pci.c @@ -37803,10 +75525,10 @@ index a753cf3eca..5f7726fa89 100644 in = get_u8_pciaddr_field(in, &dev_addr->bus, ':'); if (in == NULL) diff --git a/dpdk/lib/librte_pci/rte_pci.h b/dpdk/lib/librte_pci/rte_pci.h -index c87891405c..4087771c1e 100644 +index c87891405c..db83919653 100644 --- a/dpdk/lib/librte_pci/rte_pci.h +++ b/dpdk/lib/librte_pci/rte_pci.h -@@ -17,16 +17,10 @@ extern "C" { +@@ -17,18 +17,12 @@ extern "C" { #endif #include @@ -37821,8 +75543,1005 @@ index c87891405c..4087771c1e 100644 -#include - /** Formatting string for PCI device identifier: Ex: 0000:00:01.0 */ - #define PCI_PRI_FMT "%.4" PRIx16 ":%.2" PRIx8 ":%.2" PRIx8 ".%" PRIx8 +-#define PCI_PRI_FMT "%.4" PRIx16 ":%.2" PRIx8 ":%.2" PRIx8 ".%" PRIx8 ++#define PCI_PRI_FMT "%.4" PRIx32 ":%.2" PRIx8 ":%.2" PRIx8 ".%" PRIx8 #define PCI_PRI_STR_SIZE sizeof("XXXXXXXX:XX:XX.X") + + /** Short formatting string, without domain, for PCI device: Ex: 00:01.0 */ +diff --git a/dpdk/lib/librte_port/rte_port_source_sink.c b/dpdk/lib/librte_port/rte_port_source_sink.c +index 74b7385a28..68575c9833 100644 +--- a/dpdk/lib/librte_port/rte_port_source_sink.c ++++ b/dpdk/lib/librte_port/rte_port_source_sink.c +@@ -116,7 +116,7 @@ pcap_source_load(struct rte_port_source *port, + } + + for (i = 0; i < n_pkts; i++) { +- pkt = pcap_next(pcap_handle, &pcap_hdr); ++ pcap_next(pcap_handle, &pcap_hdr); + port->pkt_len[i] = RTE_MIN(max_len, pcap_hdr.len); + pkt_len_aligns[i] = RTE_CACHE_LINE_ROUNDUP( + port->pkt_len[i]); +diff --git a/dpdk/lib/librte_power/Makefile b/dpdk/lib/librte_power/Makefile +index 9a6db07e5f..16e384e446 100644 +--- a/dpdk/lib/librte_power/Makefile ++++ b/dpdk/lib/librte_power/Makefile +@@ -19,6 +19,6 @@ SRCS-$(CONFIG_RTE_LIBRTE_POWER) += rte_power_empty_poll.c + SRCS-$(CONFIG_RTE_LIBRTE_POWER) += power_pstate_cpufreq.c + + # install this header file +-SYMLINK-$(CONFIG_RTE_LIBRTE_POWER)-include := rte_power.h rte_power_empty_poll.h ++SYMLINK-$(CONFIG_RTE_LIBRTE_POWER)-include := rte_power.h rte_power_empty_poll.h rte_power_guest_channel.h + + include $(RTE_SDK)/mk/rte.lib.mk +diff --git a/dpdk/lib/librte_power/channel_commands.h b/dpdk/lib/librte_power/channel_commands.h +deleted file mode 100644 +index adc8e5ca27..0000000000 +--- a/dpdk/lib/librte_power/channel_commands.h ++++ /dev/null +@@ -1,125 +0,0 @@ +-/* SPDX-License-Identifier: BSD-3-Clause +- * Copyright(c) 2010-2014 Intel Corporation +- */ +- +-#ifndef CHANNEL_COMMANDS_H_ +-#define CHANNEL_COMMANDS_H_ +- +-#ifdef __cplusplus +-extern "C" { +-#endif +- +-#include +-#include +- +-/* --- Incoming messages --- */ +- +-/* Valid Commands */ +-#define CPU_POWER 1 +-#define CPU_POWER_CONNECT 2 +-#define PKT_POLICY 3 +-#define PKT_POLICY_REMOVE 4 +- +-/* CPU Power Command Scaling */ +-#define CPU_POWER_SCALE_UP 1 +-#define CPU_POWER_SCALE_DOWN 2 +-#define CPU_POWER_SCALE_MAX 3 +-#define CPU_POWER_SCALE_MIN 4 +-#define CPU_POWER_ENABLE_TURBO 5 +-#define CPU_POWER_DISABLE_TURBO 6 +- +-/* CPU Power Queries */ +-#define CPU_POWER_QUERY_FREQ_LIST 7 +-#define CPU_POWER_QUERY_FREQ 8 +-#define CPU_POWER_QUERY_CAPS_LIST 9 +-#define CPU_POWER_QUERY_CAPS 10 +- +-/* --- Outgoing messages --- */ +- +-/* Generic Power Command Response */ +-#define CPU_POWER_CMD_ACK 1 +-#define CPU_POWER_CMD_NACK 2 +- +-/* CPU Power Query Responses */ +-#define CPU_POWER_FREQ_LIST 3 +-#define CPU_POWER_CAPS_LIST 4 +- +-#define HOURS 24 +- +-#define MAX_VFS 10 +-#define VM_MAX_NAME_SZ 32 +- +-#define MAX_VCPU_PER_VM 8 +- +-struct t_boost_status { +- bool tbEnabled; +-}; +- +-struct timer_profile { +- int busy_hours[HOURS]; +- int quiet_hours[HOURS]; +- int hours_to_use_traffic_profile[HOURS]; +-}; +- +-enum workload {HIGH, MEDIUM, LOW}; +-enum policy_to_use { +- TRAFFIC, +- TIME, +- WORKLOAD, +- BRANCH_RATIO +-}; +- +-struct traffic { +- uint32_t min_packet_thresh; +- uint32_t avg_max_packet_thresh; +- uint32_t max_max_packet_thresh; +-}; +- +-#define CORE_TYPE_VIRTUAL 0 +-#define CORE_TYPE_PHYSICAL 1 +- +-struct channel_packet { +- uint64_t resource_id; /**< core_num, device */ +- uint32_t unit; /**< scale down/up/min/max */ +- uint32_t command; /**< Power, IO, etc */ +- char vm_name[VM_MAX_NAME_SZ]; +- +- uint64_t vfid[MAX_VFS]; +- int nb_mac_to_monitor; +- struct traffic traffic_policy; +- uint8_t vcpu_to_control[MAX_VCPU_PER_VM]; +- uint8_t num_vcpu; +- struct timer_profile timer_policy; +- bool core_type; +- enum workload workload; +- enum policy_to_use policy_to_use; +- struct t_boost_status t_boost_status; +-}; +- +-struct channel_packet_freq_list { +- uint64_t resource_id; /**< core_num, device */ +- uint32_t unit; /**< scale down/up/min/max */ +- uint32_t command; /**< Power, IO, etc */ +- char vm_name[VM_MAX_NAME_SZ]; +- +- uint32_t freq_list[MAX_VCPU_PER_VM]; +- uint8_t num_vcpu; +-}; +- +-struct channel_packet_caps_list { +- uint64_t resource_id; /**< core_num, device */ +- uint32_t unit; /**< scale down/up/min/max */ +- uint32_t command; /**< Power, IO, etc */ +- char vm_name[VM_MAX_NAME_SZ]; +- +- uint64_t turbo[MAX_VCPU_PER_VM]; +- uint64_t priority[MAX_VCPU_PER_VM]; +- uint8_t num_vcpu; +-}; +- +- +-#ifdef __cplusplus +-} +-#endif +- +-#endif /* CHANNEL_COMMANDS_H_ */ +diff --git a/dpdk/lib/librte_power/guest_channel.c b/dpdk/lib/librte_power/guest_channel.c +index b984d55bc8..4dadf5ef9f 100644 +--- a/dpdk/lib/librte_power/guest_channel.c ++++ b/dpdk/lib/librte_power/guest_channel.c +@@ -14,9 +14,9 @@ + + + #include ++#include + + #include "guest_channel.h" +-#include "channel_commands.h" + + #define RTE_LOGTYPE_GUEST_CHANNEL RTE_LOGTYPE_USER1 + +@@ -29,7 +29,7 @@ int + guest_channel_host_connect(const char *path, unsigned int lcore_id) + { + int flags, ret; +- struct channel_packet pkt; ++ struct rte_power_channel_packet pkt; + char fd_path[PATH_MAX]; + int fd = -1; + +@@ -74,7 +74,7 @@ guest_channel_host_connect(const char *path, unsigned int lcore_id) + /* Send a test packet, this command is ignored by the host, but a successful + * send indicates that the host endpoint is monitoring. + */ +- pkt.command = CPU_POWER_CONNECT; ++ pkt.command = RTE_POWER_CPU_POWER_CONNECT; + global_fds[lcore_id] = fd; + ret = guest_channel_send_msg(&pkt, lcore_id); + if (ret != 0) { +@@ -93,7 +93,8 @@ guest_channel_host_connect(const char *path, unsigned int lcore_id) + } + + int +-guest_channel_send_msg(struct channel_packet *pkt, unsigned int lcore_id) ++guest_channel_send_msg(struct rte_power_channel_packet *pkt, ++ unsigned int lcore_id) + { + int ret, buffer_len = sizeof(*pkt); + void *buffer = pkt; +@@ -123,7 +124,7 @@ guest_channel_send_msg(struct channel_packet *pkt, unsigned int lcore_id) + return 0; + } + +-int rte_power_guest_channel_send_msg(struct channel_packet *pkt, ++int rte_power_guest_channel_send_msg(struct rte_power_channel_packet *pkt, + unsigned int lcore_id) + { + return guest_channel_send_msg(pkt, lcore_id); +diff --git a/dpdk/lib/librte_power/guest_channel.h b/dpdk/lib/librte_power/guest_channel.h +index 025961606c..b790f3661a 100644 +--- a/dpdk/lib/librte_power/guest_channel.h ++++ b/dpdk/lib/librte_power/guest_channel.h +@@ -8,8 +8,6 @@ + extern "C" { + #endif + +-#include +- + /** + * Connect to the Virtio-Serial VM end-point located in path. It is + * thread safe for unique lcore_ids. This function must be only called once from +@@ -51,31 +49,16 @@ void guest_channel_host_disconnect(unsigned int lcore_id); + * - Negative on channel not connected. + * - errno on write to channel error. + */ +-int guest_channel_send_msg(struct channel_packet *pkt, unsigned int lcore_id); +- +-/** +- * Send a message contained in pkt over the Virtio-Serial to the host endpoint. +- * +- * @param pkt +- * Pointer to a populated struct channel_packet +- * +- * @param lcore_id +- * lcore_id. +- * +- * @return +- * - 0 on success. +- * - Negative on error. +- */ +-int rte_power_guest_channel_send_msg(struct channel_packet *pkt, +- unsigned int lcore_id); ++int guest_channel_send_msg(struct rte_power_channel_packet *pkt, ++ unsigned int lcore_id); + + /** + * Read a message contained in pkt over the Virtio-Serial + * from the host endpoint. + * + * @param pkt +- * Pointer to channel_packet or +- * channel_packet_freq_list struct. ++ * Pointer to rte_power_channel_packet or ++ * rte_power_channel_packet_freq_list struct. + * + * @param pkt_len + * Size of expected data packet. +@@ -91,30 +74,6 @@ int power_guest_channel_read_msg(void *pkt, + size_t pkt_len, + unsigned int lcore_id); + +-/** +- * Receive a message contained in pkt over the Virtio-Serial +- * from the host endpoint. +- * +- * @param pkt +- * Pointer to channel_packet or +- * channel_packet_freq_list struct. +- * +- * @param pkt_len +- * Size of expected data packet. +- * +- * @param lcore_id +- * lcore_id. +- * +- * @return +- * - 0 on success. +- * - Negative on error. +- */ +-__rte_experimental +-int +-rte_power_guest_channel_receive_msg(void *pkt, +- size_t pkt_len, +- unsigned int lcore_id); +- + + #ifdef __cplusplus + } +diff --git a/dpdk/lib/librte_power/meson.build b/dpdk/lib/librte_power/meson.build +index cdf08f6df3..6281adfe17 100644 +--- a/dpdk/lib/librte_power/meson.build ++++ b/dpdk/lib/librte_power/meson.build +@@ -9,5 +9,6 @@ sources = files('rte_power.c', 'power_acpi_cpufreq.c', + 'power_kvm_vm.c', 'guest_channel.c', + 'rte_power_empty_poll.c', + 'power_pstate_cpufreq.c') +-headers = files('rte_power.h','rte_power_empty_poll.h') ++headers = files('rte_power.h','rte_power_empty_poll.h', ++ 'rte_power_guest_channel.h') + deps += ['timer'] +diff --git a/dpdk/lib/librte_power/power_kvm_vm.c b/dpdk/lib/librte_power/power_kvm_vm.c +index 277ebbeaeb..b34773d9c6 100644 +--- a/dpdk/lib/librte_power/power_kvm_vm.c ++++ b/dpdk/lib/librte_power/power_kvm_vm.c +@@ -6,14 +6,14 @@ + + #include + ++#include "rte_power_guest_channel.h" + #include "guest_channel.h" +-#include "channel_commands.h" + #include "power_kvm_vm.h" + #include "power_common.h" + + #define FD_PATH "/dev/virtio-ports/virtio.serial.port.poweragent" + +-static struct channel_packet pkt[RTE_MAX_LCORE]; ++static struct rte_power_channel_packet pkt[RTE_MAX_LCORE]; + + + int +@@ -24,7 +24,7 @@ power_kvm_vm_init(unsigned int lcore_id) + lcore_id, RTE_MAX_LCORE-1); + return -1; + } +- pkt[lcore_id].command = CPU_POWER; ++ pkt[lcore_id].command = RTE_POWER_CPU_POWER; + pkt[lcore_id].resource_id = lcore_id; + return guest_channel_host_connect(FD_PATH, lcore_id); + } +@@ -85,25 +85,25 @@ send_msg(unsigned int lcore_id, uint32_t scale_direction) + int + power_kvm_vm_freq_up(unsigned int lcore_id) + { +- return send_msg(lcore_id, CPU_POWER_SCALE_UP); ++ return send_msg(lcore_id, RTE_POWER_SCALE_UP); + } + + int + power_kvm_vm_freq_down(unsigned int lcore_id) + { +- return send_msg(lcore_id, CPU_POWER_SCALE_DOWN); ++ return send_msg(lcore_id, RTE_POWER_SCALE_DOWN); + } + + int + power_kvm_vm_freq_max(unsigned int lcore_id) + { +- return send_msg(lcore_id, CPU_POWER_SCALE_MAX); ++ return send_msg(lcore_id, RTE_POWER_SCALE_MAX); + } + + int + power_kvm_vm_freq_min(unsigned int lcore_id) + { +- return send_msg(lcore_id, CPU_POWER_SCALE_MIN); ++ return send_msg(lcore_id, RTE_POWER_SCALE_MIN); + } + + int +@@ -116,13 +116,13 @@ power_kvm_vm_turbo_status(__attribute__((unused)) unsigned int lcore_id) + int + power_kvm_vm_enable_turbo(unsigned int lcore_id) + { +- return send_msg(lcore_id, CPU_POWER_ENABLE_TURBO); ++ return send_msg(lcore_id, RTE_POWER_ENABLE_TURBO); + } + + int + power_kvm_vm_disable_turbo(unsigned int lcore_id) + { +- return send_msg(lcore_id, CPU_POWER_DISABLE_TURBO); ++ return send_msg(lcore_id, RTE_POWER_DISABLE_TURBO); + } + + struct rte_power_core_capabilities; +diff --git a/dpdk/lib/librte_power/power_pstate_cpufreq.c b/dpdk/lib/librte_power/power_pstate_cpufreq.c +index 2d8a9499dc..fa16b44146 100644 +--- a/dpdk/lib/librte_power/power_pstate_cpufreq.c ++++ b/dpdk/lib/librte_power/power_pstate_cpufreq.c +@@ -52,6 +52,9 @@ + } \ + } while (0) + ++/* macros used for rounding frequency to nearest 100000 */ ++#define FREQ_ROUNDING_DELTA 50000 ++#define ROUND_FREQ_TO_N_100000 100000 + + #define POWER_CONVERT_TO_DECIMAL 10 + #define BUS_FREQ 100000 +@@ -531,6 +534,57 @@ power_get_available_freqs(struct pstate_power_info *pi) + return ret; + } + ++static int ++power_get_cur_idx(struct pstate_power_info *pi) ++{ ++ FILE *f_cur; ++ int ret = -1; ++ char *p_cur; ++ char buf_cur[BUFSIZ]; ++ char fullpath_cur[PATH_MAX]; ++ char *s_cur; ++ uint32_t sys_cur_freq = 0; ++ unsigned int i; ++ ++ snprintf(fullpath_cur, sizeof(fullpath_cur), ++ POWER_SYSFILE_CUR_FREQ, ++ pi->lcore_id); ++ f_cur = fopen(fullpath_cur, "r"); ++ FOPEN_OR_ERR_RET(f_cur, ret); ++ ++ /* initialize the cur_idx to matching current frequency freq index */ ++ s_cur = fgets(buf_cur, sizeof(buf_cur), f_cur); ++ FOPS_OR_NULL_GOTO(s_cur, fail); ++ ++ p_cur = strchr(buf_cur, '\n'); ++ if (p_cur != NULL) ++ *p_cur = 0; ++ sys_cur_freq = strtoul(buf_cur, &p_cur, POWER_CONVERT_TO_DECIMAL); ++ ++ /* convert the frequency to nearest 100000 value ++ * Ex: if sys_cur_freq=1396789 then freq_conv=1400000 ++ * Ex: if sys_cur_freq=800030 then freq_conv=800000 ++ * Ex: if sys_cur_freq=800030 then freq_conv=800000 ++ */ ++ unsigned int freq_conv = 0; ++ freq_conv = (sys_cur_freq + FREQ_ROUNDING_DELTA) ++ / ROUND_FREQ_TO_N_100000; ++ freq_conv = freq_conv * ROUND_FREQ_TO_N_100000; ++ ++ for (i = 0; i < pi->nb_freqs; i++) { ++ if (freq_conv == pi->freqs[i]) { ++ pi->curr_idx = i; ++ break; ++ } ++ } ++ ++ fclose(f_cur); ++ return 0; ++fail: ++ fclose(f_cur); ++ return ret; ++} ++ + int + power_pstate_cpufreq_init(unsigned int lcore_id) + { +@@ -571,6 +625,11 @@ power_pstate_cpufreq_init(unsigned int lcore_id) + goto fail; + } + ++ if (power_get_cur_idx(pi) < 0) { ++ RTE_LOG(ERR, POWER, "Cannot get current frequency " ++ "index of lcore %u\n", lcore_id); ++ goto fail; ++ } + + /* Set freq to max by default */ + if (power_pstate_cpufreq_freq_max(lcore_id) < 0) { +diff --git a/dpdk/lib/librte_power/rte_power.h b/dpdk/lib/librte_power/rte_power.h +index 427058b811..04dc4cb1da 100644 +--- a/dpdk/lib/librte_power/rte_power.h ++++ b/dpdk/lib/librte_power/rte_power.h +@@ -14,6 +14,7 @@ + #include + #include + #include ++#include + + #ifdef __cplusplus + extern "C" { +diff --git a/dpdk/lib/librte_power/rte_power_guest_channel.h b/dpdk/lib/librte_power/rte_power_guest_channel.h +new file mode 100644 +index 0000000000..ed4fbfdcd3 +--- /dev/null ++++ b/dpdk/lib/librte_power/rte_power_guest_channel.h +@@ -0,0 +1,176 @@ ++/* SPDX-License-Identifier: BSD-3-Clause ++ * Copyright(c) 2010-2021 Intel Corporation ++ */ ++#ifndef RTE_POWER_GUEST_CHANNEL_H ++#define RTE_POWER_GUEST_CHANNEL_H ++ ++#include ++#include ++#include ++ ++#include ++ ++#ifdef __cplusplus ++extern "C" { ++#endif ++ ++#define RTE_POWER_MAX_VFS 10 ++#define RTE_POWER_VM_MAX_NAME_SZ 32 ++#define RTE_POWER_MAX_VCPU_PER_VM 8 ++#define RTE_POWER_HOURS_PER_DAY 24 ++ ++/* Valid Commands */ ++#define RTE_POWER_CPU_POWER 1 ++#define RTE_POWER_CPU_POWER_CONNECT 2 ++#define RTE_POWER_PKT_POLICY 3 ++#define RTE_POWER_PKT_POLICY_REMOVE 4 ++ ++#define RTE_POWER_CORE_TYPE_VIRTUAL 0 ++#define RTE_POWER_CORE_TYPE_PHYSICAL 1 ++ ++/* CPU Power Command Scaling */ ++#define RTE_POWER_SCALE_UP 1 ++#define RTE_POWER_SCALE_DOWN 2 ++#define RTE_POWER_SCALE_MAX 3 ++#define RTE_POWER_SCALE_MIN 4 ++#define RTE_POWER_ENABLE_TURBO 5 ++#define RTE_POWER_DISABLE_TURBO 6 ++ ++/* CPU Power Queries */ ++#define RTE_POWER_QUERY_FREQ_LIST 7 ++#define RTE_POWER_QUERY_FREQ 8 ++#define RTE_POWER_QUERY_CAPS_LIST 9 ++#define RTE_POWER_QUERY_CAPS 10 ++ ++/* Generic Power Command Response */ ++#define RTE_POWER_CMD_ACK 1 ++#define RTE_POWER_CMD_NACK 2 ++ ++/* CPU Power Query Responses */ ++#define RTE_POWER_FREQ_LIST 3 ++#define RTE_POWER_CAPS_LIST 4 ++ ++struct rte_power_traffic_policy { ++ uint32_t min_packet_thresh; ++ uint32_t avg_max_packet_thresh; ++ uint32_t max_max_packet_thresh; ++}; ++ ++struct rte_power_timer_profile { ++ int busy_hours[RTE_POWER_HOURS_PER_DAY]; ++ int quiet_hours[RTE_POWER_HOURS_PER_DAY]; ++ int hours_to_use_traffic_profile[RTE_POWER_HOURS_PER_DAY]; ++}; ++ ++enum rte_power_workload_level { ++ RTE_POWER_WL_HIGH, ++ RTE_POWER_WL_MEDIUM, ++ RTE_POWER_WL_LOW ++}; ++ ++enum rte_power_policy { ++ RTE_POWER_POLICY_TRAFFIC, ++ RTE_POWER_POLICY_TIME, ++ RTE_POWER_POLICY_WORKLOAD, ++ RTE_POWER_POLICY_BRANCH_RATIO ++}; ++ ++struct rte_power_turbo_status { ++ bool tbEnabled; ++}; ++ ++struct rte_power_channel_packet { ++ uint64_t resource_id; /**< core_num, device */ ++ uint32_t unit; /**< scale down/up/min/max */ ++ uint32_t command; /**< Power, IO, etc */ ++ char vm_name[RTE_POWER_VM_MAX_NAME_SZ]; ++ ++ uint64_t vfid[RTE_POWER_MAX_VFS]; ++ int nb_mac_to_monitor; ++ struct rte_power_traffic_policy traffic_policy; ++ uint8_t vcpu_to_control[RTE_POWER_MAX_VCPU_PER_VM]; ++ uint8_t num_vcpu; ++ struct rte_power_timer_profile timer_policy; ++ bool core_type; ++ enum rte_power_workload_level workload; ++ enum rte_power_policy policy_to_use; ++ struct rte_power_turbo_status t_boost_status; ++}; ++ ++struct rte_power_channel_packet_freq_list { ++ uint64_t resource_id; /**< core_num, device */ ++ uint32_t unit; /**< scale down/up/min/max */ ++ uint32_t command; /**< Power, IO, etc */ ++ char vm_name[RTE_POWER_VM_MAX_NAME_SZ]; ++ ++ uint32_t freq_list[RTE_POWER_MAX_VCPU_PER_VM]; ++ uint8_t num_vcpu; ++}; ++ ++struct rte_power_channel_packet_caps_list { ++ uint64_t resource_id; /**< core_num, device */ ++ uint32_t unit; /**< scale down/up/min/max */ ++ uint32_t command; /**< Power, IO, etc */ ++ char vm_name[RTE_POWER_VM_MAX_NAME_SZ]; ++ ++ uint64_t turbo[RTE_POWER_MAX_VCPU_PER_VM]; ++ uint64_t priority[RTE_POWER_MAX_VCPU_PER_VM]; ++ uint8_t num_vcpu; ++}; ++ ++/** ++ * @internal ++ * ++ * @warning ++ * @b EXPERIMENTAL: this API may change without prior notice. ++ * ++ * Send a message contained in pkt over the Virtio-Serial to the host endpoint. ++ * ++ * @param pkt ++ * Pointer to a populated struct channel_packet. ++ * ++ * @param lcore_id ++ * Use channel specific to this lcore_id. ++ * ++ * @return ++ * - 0 on success. ++ * - Negative on error. ++ */ ++__rte_experimental ++int rte_power_guest_channel_send_msg(struct rte_power_channel_packet *pkt, ++ unsigned int lcore_id); ++ ++/** ++ * @internal ++ * ++ * @warning ++ * @b EXPERIMENTAL: this API may change without prior notice. ++ * ++ * Receive a message contained in pkt over the Virtio-Serial ++ * from the host endpoint. ++ * ++ * @param pkt ++ * Pointer to channel_packet or ++ * channel_packet_freq_list struct. ++ * ++ * @param pkt_len ++ * Size of expected data packet. ++ * ++ * @param lcore_id ++ * Use channel specific to this lcore_id. ++ * ++ * @return ++ * - 0 on success. ++ * - Negative on error. ++ */ ++__rte_experimental ++int rte_power_guest_channel_receive_msg(void *pkt, ++ size_t pkt_len, ++ unsigned int lcore_id); ++ ++ ++#ifdef __cplusplus ++} ++#endif ++ ++#endif /* RTE_POWER_GUEST_CHANNEL_H_ */ +diff --git a/dpdk/lib/librte_power/rte_power_version.map b/dpdk/lib/librte_power/rte_power_version.map +index 55a168f56e..0fe85377ab 100644 +--- a/dpdk/lib/librte_power/rte_power_version.map ++++ b/dpdk/lib/librte_power/rte_power_version.map +@@ -33,4 +33,8 @@ EXPERIMENTAL { + rte_power_guest_channel_receive_msg; + rte_power_poll_stat_fetch; + rte_power_poll_stat_update; ++ ++ # added in 21.02 ++ rte_power_guest_channel_receive_msg; ++ rte_power_guest_channel_send_msg; + }; +diff --git a/dpdk/lib/librte_rawdev/rte_rawdev.c b/dpdk/lib/librte_rawdev/rte_rawdev.c +index b6f1e1c779..fe289cefdf 100644 +--- a/dpdk/lib/librte_rawdev/rte_rawdev.c ++++ b/dpdk/lib/librte_rawdev/rte_rawdev.c +@@ -89,15 +89,15 @@ rte_rawdev_info_get(uint16_t dev_id, struct rte_rawdev_info *dev_info) + + rawdev = &rte_rawdevs[dev_id]; + +- RTE_FUNC_PTR_OR_ERR_RET(*rawdev->dev_ops->dev_info_get, -ENOTSUP); +- (*rawdev->dev_ops->dev_info_get)(rawdev, dev_info->dev_private); +- +- if (dev_info) { +- +- dev_info->driver_name = rawdev->driver_name; +- dev_info->device = rawdev->device; ++ if (dev_info->dev_private != NULL) { ++ RTE_FUNC_PTR_OR_ERR_RET(*rawdev->dev_ops->dev_info_get, -ENOTSUP); ++ (*rawdev->dev_ops->dev_info_get)(rawdev, dev_info->dev_private); + } + ++ dev_info->driver_name = rawdev->driver_name; ++ dev_info->device = rawdev->device; ++ dev_info->socket_id = rawdev->socket_id; ++ + return 0; + } + +diff --git a/dpdk/lib/librte_rawdev/rte_rawdev.h b/dpdk/lib/librte_rawdev/rte_rawdev.h +index ed011ca228..32f6b8bb03 100644 +--- a/dpdk/lib/librte_rawdev/rte_rawdev.h ++++ b/dpdk/lib/librte_rawdev/rte_rawdev.h +@@ -12,9 +12,6 @@ + * + * This API allow applications to configure and use generic devices having + * no specific type already available in DPDK. +- * +- * @warning +- * @b EXPERIMENTAL: this API may change without prior notice + */ + + #ifdef __cplusplus +@@ -77,7 +74,13 @@ struct rte_rawdev_info; + * + * @param[out] dev_info + * A pointer to a structure of type *rte_rawdev_info* to be filled with the +- * contextual information of the device. ++ * contextual information of the device. The dev_info->dev_private field ++ * should point to an appropriate buffer space for holding the device- ++ * specific info for that hardware. ++ * If the dev_private field is set to NULL, then the device-specific info ++ * function will not be called and only basic information about the device ++ * will be returned. This can be used to safely query the type of a rawdev ++ * instance without needing to know the size of the private data to return. + * + * @return + * - 0: Success, driver updates the contextual information of the raw device +diff --git a/dpdk/lib/librte_rawdev/rte_rawdev_pmd.h b/dpdk/lib/librte_rawdev/rte_rawdev_pmd.h +index cb3555ab50..4395a2182d 100644 +--- a/dpdk/lib/librte_rawdev/rte_rawdev_pmd.h ++++ b/dpdk/lib/librte_rawdev/rte_rawdev_pmd.h +@@ -11,9 +11,6 @@ + * @note + * Driver facing APIs for a raw device. These are not to be called directly by + * any application. +- * +- * @warning +- * @b EXPERIMENTAL: this API may change without prior notice + */ + + #ifdef __cplusplus +diff --git a/dpdk/lib/librte_rawdev/rte_rawdev_version.map b/dpdk/lib/librte_rawdev/rte_rawdev_version.map +index d847c9e0d3..63b54f598b 100644 +--- a/dpdk/lib/librte_rawdev/rte_rawdev_version.map ++++ b/dpdk/lib/librte_rawdev/rte_rawdev_version.map +@@ -5,6 +5,7 @@ DPDK_20.0 { + rte_rawdev_configure; + rte_rawdev_count; + rte_rawdev_dequeue_buffers; ++ rte_rawdev_dump; + rte_rawdev_enqueue_buffers; + rte_rawdev_firmware_load; + rte_rawdev_firmware_status_get; +diff --git a/dpdk/lib/librte_rcu/rte_rcu_qsbr.c b/dpdk/lib/librte_rcu/rte_rcu_qsbr.c +index 2f3fad776e..58d58c5be5 100644 +--- a/dpdk/lib/librte_rcu/rte_rcu_qsbr.c ++++ b/dpdk/lib/librte_rcu/rte_rcu_qsbr.c +@@ -242,10 +242,10 @@ rte_rcu_qsbr_dump(FILE *f, struct rte_rcu_qsbr *v) + + fprintf(f, "\n"); + +- fprintf(f, " Token = %"PRIu64"\n", ++ fprintf(f, " Token = %" PRIu64 "\n", + __atomic_load_n(&v->token, __ATOMIC_ACQUIRE)); + +- fprintf(f, " Least Acknowledged Token = %"PRIu64"\n", ++ fprintf(f, " Least Acknowledged Token = %" PRIu64 "\n", + __atomic_load_n(&v->acked_token, __ATOMIC_ACQUIRE)); + + fprintf(f, "Quiescent State Counts for readers:\n"); +@@ -255,7 +255,7 @@ rte_rcu_qsbr_dump(FILE *f, struct rte_rcu_qsbr *v) + id = i << __RTE_QSBR_THRID_INDEX_SHIFT; + while (bmap) { + t = __builtin_ctzl(bmap); +- fprintf(f, "thread ID = %u, count = %"PRIu64", lock count = %u\n", ++ fprintf(f, "thread ID = %u, count = %" PRIu64 ", lock count = %u\n", + id + t, + __atomic_load_n( + &v->qsbr_cnt[id + t].cnt, +diff --git a/dpdk/lib/librte_rcu/rte_rcu_qsbr.h b/dpdk/lib/librte_rcu/rte_rcu_qsbr.h +index 0b5585925f..430cdfb58a 100644 +--- a/dpdk/lib/librte_rcu/rte_rcu_qsbr.h ++++ b/dpdk/lib/librte_rcu/rte_rcu_qsbr.h +@@ -7,7 +7,12 @@ + + /** + * @file +- * RTE Quiescent State Based Reclamation (QSBR) ++ * ++ * RTE Quiescent State Based Reclamation (QSBR). ++ * ++ * @warning ++ * @b EXPERIMENTAL: ++ * All functions in this file may be changed or removed without prior notice. + * + * Quiescent State (QS) is any point in the thread execution + * where the thread does not hold a reference to a data structure +@@ -465,7 +470,7 @@ rte_rcu_qsbr_quiescent(struct rte_rcu_qsbr *v, unsigned int thread_id) + __atomic_store_n(&v->qsbr_cnt[thread_id].cnt, + t, __ATOMIC_RELEASE); + +- __RTE_RCU_DP_LOG(DEBUG, "%s: update: token = %"PRIu64", Thread ID = %d", ++ __RTE_RCU_DP_LOG(DEBUG, "%s: update: token = %" PRIu64 ", Thread ID = %d", + __func__, t, thread_id); + } + +@@ -493,13 +498,13 @@ __rte_rcu_qsbr_check_selective(struct rte_rcu_qsbr *v, uint64_t t, bool wait) + while (bmap) { + j = __builtin_ctzl(bmap); + __RTE_RCU_DP_LOG(DEBUG, +- "%s: check: token = %"PRIu64", wait = %d, Bit Map = 0x%"PRIx64", Thread ID = %d", ++ "%s: check: token = %" PRIu64 ", wait = %d, Bit Map = 0x%" PRIx64 ", Thread ID = %d", + __func__, t, wait, bmap, id + j); + c = __atomic_load_n( + &v->qsbr_cnt[id + j].cnt, + __ATOMIC_ACQUIRE); + __RTE_RCU_DP_LOG(DEBUG, +- "%s: status: token = %"PRIu64", wait = %d, Thread QS cnt = %"PRIu64", Thread ID = %d", ++ "%s: status: token = %" PRIu64 ", wait = %d, Thread QS cnt = %" PRIu64 ", Thread ID = %d", + __func__, t, wait, c, id+j); + + /* Counter is not checked for wrap-around condition +@@ -556,12 +561,12 @@ __rte_rcu_qsbr_check_all(struct rte_rcu_qsbr *v, uint64_t t, bool wait) + + for (i = 0, cnt = v->qsbr_cnt; i < v->max_threads; i++, cnt++) { + __RTE_RCU_DP_LOG(DEBUG, +- "%s: check: token = %"PRIu64", wait = %d, Thread ID = %d", ++ "%s: check: token = %" PRIu64 ", wait = %d, Thread ID = %d", + __func__, t, wait, i); + while (1) { + c = __atomic_load_n(&cnt->cnt, __ATOMIC_ACQUIRE); + __RTE_RCU_DP_LOG(DEBUG, +- "%s: status: token = %"PRIu64", wait = %d, Thread QS cnt = %"PRIu64", Thread ID = %d", ++ "%s: status: token = %" PRIu64 ", wait = %d, Thread QS cnt = %" PRIu64 ", Thread ID = %d", + __func__, t, wait, c, i); + + /* Counter is not checked for wrap-around condition +diff --git a/dpdk/lib/librte_rib/rte_rib.c b/dpdk/lib/librte_rib/rte_rib.c +index 55d612dc2e..07b3c068ed 100644 +--- a/dpdk/lib/librte_rib/rte_rib.c ++++ b/dpdk/lib/librte_rib/rte_rib.c +@@ -301,7 +301,7 @@ rte_rib_insert(struct rte_rib *rib, uint32_t ip, uint8_t depth) + /* closest node found, new_node should be inserted in the middle */ + common_depth = RTE_MIN(depth, (*tmp)->depth); + common_prefix = ip ^ (*tmp)->ip; +- d = __builtin_clz(common_prefix); ++ d = (common_prefix == 0) ? 32 : __builtin_clz(common_prefix); + + common_depth = RTE_MIN(d, common_depth); + common_prefix = ip & rte_rib_depth_to_mask(common_depth); +diff --git a/dpdk/lib/librte_rib/rte_rib.h b/dpdk/lib/librte_rib/rte_rib.h +index 6b70de980a..93cd93b4ae 100644 +--- a/dpdk/lib/librte_rib/rte_rib.h ++++ b/dpdk/lib/librte_rib/rte_rib.h +@@ -8,11 +8,25 @@ + + /** + * @file ++ * ++ * RTE RIB library. ++ * ++ * @warning ++ * @b EXPERIMENTAL: ++ * All functions in this file may be changed or removed without prior notice. ++ * + * Level compressed tree implementation for IPv4 Longest Prefix Match + */ + ++#include ++#include ++ + #include + ++#ifdef __cplusplus ++extern "C" { ++#endif ++ + /** + * rte_rib_get_nxt() flags + */ +@@ -274,4 +288,8 @@ __rte_experimental + void + rte_rib_free(struct rte_rib *rib); + ++#ifdef __cplusplus ++} ++#endif ++ + #endif /* _RTE_RIB_H_ */ +diff --git a/dpdk/lib/librte_rib/rte_rib6.h b/dpdk/lib/librte_rib/rte_rib6.h +index 871457138d..b80665bf44 100644 +--- a/dpdk/lib/librte_rib/rte_rib6.h ++++ b/dpdk/lib/librte_rib/rte_rib6.h +@@ -8,11 +8,23 @@ + + /** + * @file ++ * ++ * RTE rib6 library. ++ * ++ * @warning ++ * @b EXPERIMENTAL: ++ * All functions in this file may be changed or removed without prior notice. ++ * + * Level compressed tree implementation for IPv6 Longest Prefix Match + */ + + #include + #include ++#include ++ ++#ifdef __cplusplus ++extern "C" { ++#endif + + #define RTE_RIB6_IPV6_ADDR_SIZE 16 + +@@ -331,4 +343,8 @@ __rte_experimental + void + rte_rib6_free(struct rte_rib6 *rib); + +-#endif /* _RTE_RIB_H_ */ ++#ifdef __cplusplus ++} ++#endif ++ ++#endif /* _RTE_RIB6_H_ */ +diff --git a/dpdk/lib/librte_sched/rte_sched.c b/dpdk/lib/librte_sched/rte_sched.c +index c0983ddda4..0fa0741664 100644 +--- a/dpdk/lib/librte_sched/rte_sched.c ++++ b/dpdk/lib/librte_sched/rte_sched.c +@@ -222,6 +222,7 @@ struct rte_sched_port { + uint64_t time_cpu_bytes; /* Current CPU time measured in bytes */ + uint64_t time; /* Current NIC TX time measured in bytes */ + struct rte_reciprocal inv_cycles_per_byte; /* CPU cycles per byte */ ++ uint64_t cycles_per_byte; + + /* Grinders */ + struct rte_mbuf **pkts_out; +@@ -304,7 +305,7 @@ rte_sched_port_tc_queue(struct rte_sched_port *port, uint32_t qindex) + + static int + pipe_profile_check(struct rte_sched_pipe_params *params, +- uint32_t rate, uint16_t *qsize) ++ uint64_t rate, uint16_t *qsize) + { + uint32_t i; + +@@ -624,7 +625,7 @@ rte_sched_pipe_profile_convert(struct rte_sched_subport *subport, + + static void + rte_sched_subport_config_pipe_profile_table(struct rte_sched_subport *subport, +- struct rte_sched_subport_params *params, uint32_t rate) ++ struct rte_sched_subport_params *params, uint64_t rate) + { + uint32_t i; + +@@ -852,6 +853,7 @@ rte_sched_port_config(struct rte_sched_port_params *params) + cycles_per_byte = (rte_get_tsc_hz() << RTE_SCHED_TIME_SHIFT) + / params->rate; + port->inv_cycles_per_byte = rte_reciprocal_value(cycles_per_byte); ++ port->cycles_per_byte = cycles_per_byte; + + /* Grinders */ + port->pkts_out = NULL; +@@ -888,7 +890,7 @@ rte_sched_subport_free(struct rte_sched_port *port, + } + } + +- rte_bitmap_free(subport->bmp); ++ rte_free(subport); + } + + void +@@ -2673,16 +2675,21 @@ static inline void + rte_sched_port_time_resync(struct rte_sched_port *port) + { + uint64_t cycles = rte_get_tsc_cycles(); +- uint64_t cycles_diff = cycles - port->time_cpu_cycles; ++ uint64_t cycles_diff; + uint64_t bytes_diff; + uint32_t i; + ++ if (cycles < port->time_cpu_cycles) ++ port->time_cpu_cycles = 0; ++ ++ cycles_diff = cycles - port->time_cpu_cycles; + /* Compute elapsed time in bytes */ + bytes_diff = rte_reciprocal_divide(cycles_diff << RTE_SCHED_TIME_SHIFT, + port->inv_cycles_per_byte); + + /* Advance port time */ +- port->time_cpu_cycles = cycles; ++ port->time_cpu_cycles += ++ (bytes_diff * port->cycles_per_byte) >> RTE_SCHED_TIME_SHIFT; + port->time_cpu_bytes += bytes_diff; + if (port->time < port->time_cpu_bytes) + port->time = port->time_cpu_bytes; diff --git a/dpdk/lib/librte_security/rte_security.c b/dpdk/lib/librte_security/rte_security.c index bc81ce15d1..dc9a3e89cd 100644 --- a/dpdk/lib/librte_security/rte_security.c @@ -38014,6 +76733,189 @@ index 546779df2b..b4b4eb2d85 100644 */ int rte_security_session_destroy(struct rte_security_ctx *instance, +diff --git a/dpdk/lib/librte_stack/rte_stack.h b/dpdk/lib/librte_stack/rte_stack.h +index 27ddb199e5..abf6420766 100644 +--- a/dpdk/lib/librte_stack/rte_stack.h ++++ b/dpdk/lib/librte_stack/rte_stack.h +@@ -4,9 +4,12 @@ + + /** + * @file rte_stack.h +- * @b EXPERIMENTAL: this API may change without prior notice + * +- * RTE Stack ++ * RTE Stack. ++ * ++ * @warning ++ * @b EXPERIMENTAL: ++ * All functions in this file may be changed or removed without prior notice. + * + * librte_stack provides an API for configuration and use of a bounded stack of + * pointers. Push and pop operations are MT-safe, allowing concurrent access, +diff --git a/dpdk/lib/librte_stack/rte_stack_lf_c11.h b/dpdk/lib/librte_stack/rte_stack_lf_c11.h +index 999359f081..94129c3e8a 100644 +--- a/dpdk/lib/librte_stack/rte_stack_lf_c11.h ++++ b/dpdk/lib/librte_stack/rte_stack_lf_c11.h +@@ -139,8 +139,10 @@ __rte_stack_lf_pop_elems(struct rte_stack_lf_list *list, + /* If NULL was encountered, the list was modified while + * traversing it. Retry. + */ +- if (i != num) ++ if (i != num) { ++ old_head = list->head; + continue; ++ } + + new_head.top = tmp; + new_head.cnt = old_head.cnt + 1; +diff --git a/dpdk/lib/librte_stack/rte_stack_lf_generic.h b/dpdk/lib/librte_stack/rte_stack_lf_generic.h +index 3abbb53428..4850a05ee7 100644 +--- a/dpdk/lib/librte_stack/rte_stack_lf_generic.h ++++ b/dpdk/lib/librte_stack/rte_stack_lf_generic.h +@@ -78,7 +78,7 @@ __rte_stack_lf_pop_elems(struct rte_stack_lf_list *list, + struct rte_stack_lf_elem **last) + { + struct rte_stack_lf_head old_head; +- int success; ++ int success = 0; + + /* Reserve num elements, if available */ + while (1) { +diff --git a/dpdk/lib/librte_table/rte_table_hash_key16.c b/dpdk/lib/librte_table/rte_table_hash_key16.c +index 2cca1c924a..c4384b114d 100644 +--- a/dpdk/lib/librte_table/rte_table_hash_key16.c ++++ b/dpdk/lib/librte_table/rte_table_hash_key16.c +@@ -33,6 +33,7 @@ + + #endif + ++#ifdef RTE_ARCH_64 + struct rte_bucket_4_16 { + /* Cache line 0 */ + uint64_t signature[4 + 1]; +@@ -46,6 +47,22 @@ struct rte_bucket_4_16 { + /* Cache line 2 */ + uint8_t data[0]; + }; ++#else ++struct rte_bucket_4_16 { ++ /* Cache line 0 */ ++ uint64_t signature[4 + 1]; ++ uint64_t lru_list; ++ struct rte_bucket_4_16 *next; ++ uint32_t pad; ++ uint64_t next_valid; ++ ++ /* Cache line 1 */ ++ uint64_t key[4][2]; ++ ++ /* Cache line 2 */ ++ uint8_t data[0]; ++}; ++#endif + + struct rte_table_hash { + struct rte_table_stats stats; +diff --git a/dpdk/lib/librte_table/rte_table_hash_key32.c b/dpdk/lib/librte_table/rte_table_hash_key32.c +index a137c50284..3e0031fe1e 100644 +--- a/dpdk/lib/librte_table/rte_table_hash_key32.c ++++ b/dpdk/lib/librte_table/rte_table_hash_key32.c +@@ -33,6 +33,7 @@ + + #endif + ++#ifdef RTE_ARCH_64 + struct rte_bucket_4_32 { + /* Cache line 0 */ + uint64_t signature[4 + 1]; +@@ -46,6 +47,22 @@ struct rte_bucket_4_32 { + /* Cache line 3 */ + uint8_t data[0]; + }; ++#else ++struct rte_bucket_4_32 { ++ /* Cache line 0 */ ++ uint64_t signature[4 + 1]; ++ uint64_t lru_list; ++ struct rte_bucket_4_32 *next; ++ uint32_t pad; ++ uint64_t next_valid; ++ ++ /* Cache lines 1 and 2 */ ++ uint64_t key[4][4]; ++ ++ /* Cache line 3 */ ++ uint8_t data[0]; ++}; ++#endif + + struct rte_table_hash { + struct rte_table_stats stats; +diff --git a/dpdk/lib/librte_table/rte_table_hash_key8.c b/dpdk/lib/librte_table/rte_table_hash_key8.c +index 1811ad8d05..34e3ed1af9 100644 +--- a/dpdk/lib/librte_table/rte_table_hash_key8.c ++++ b/dpdk/lib/librte_table/rte_table_hash_key8.c +@@ -31,6 +31,7 @@ + + #endif + ++#ifdef RTE_ARCH_64 + struct rte_bucket_4_8 { + /* Cache line 0 */ + uint64_t signature; +@@ -43,6 +44,21 @@ struct rte_bucket_4_8 { + /* Cache line 1 */ + uint8_t data[0]; + }; ++#else ++struct rte_bucket_4_8 { ++ /* Cache line 0 */ ++ uint64_t signature; ++ uint64_t lru_list; ++ struct rte_bucket_4_8 *next; ++ uint32_t pad; ++ uint64_t next_valid; ++ ++ uint64_t key[4]; ++ ++ /* Cache line 1 */ ++ uint8_t data[0]; ++}; ++#endif + + struct rte_table_hash { + struct rte_table_stats stats; +diff --git a/dpdk/lib/librte_telemetry/meson.build b/dpdk/lib/librte_telemetry/meson.build +index 26a331140b..f65f1f4acf 100644 +--- a/dpdk/lib/librte_telemetry/meson.build ++++ b/dpdk/lib/librte_telemetry/meson.build +@@ -6,7 +6,7 @@ headers = files('rte_telemetry.h', 'rte_telemetry_internal.h', 'rte_telemetry_pa + deps += ['metrics', 'ethdev'] + cflags += '-DALLOW_EXPERIMENTAL_API' + +-jansson = dependency('jansson', required: false) ++jansson = dependency('jansson', required: false, method: 'pkg-config') + if jansson.found() + ext_deps += jansson + dpdk_app_link_libraries += ['telemetry'] +diff --git a/dpdk/lib/librte_telemetry/rte_telemetry.h b/dpdk/lib/librte_telemetry/rte_telemetry.h +index aedb318598..f1376ea35f 100644 +--- a/dpdk/lib/librte_telemetry/rte_telemetry.h ++++ b/dpdk/lib/librte_telemetry/rte_telemetry.h +@@ -9,7 +9,12 @@ + + /** + * @file +- * RTE Telemetry ++ * ++ * RTE Telemetry. ++ * ++ * @warning ++ * @b EXPERIMENTAL: ++ * All functions in this file may be changed or removed without prior notice. + * + * The telemetry library provides a method to retrieve statistics from + * DPDK by sending a JSON encoded message over a socket. DPDK will send diff --git a/dpdk/lib/librte_telemetry/rte_telemetry_parser.c b/dpdk/lib/librte_telemetry/rte_telemetry_parser.c index 9601323970..e8c269e85e 100644 --- a/dpdk/lib/librte_telemetry/rte_telemetry_parser.c @@ -38104,8 +77006,38 @@ index ca88454ff6..99862a3ba1 100644 } /* Initialize the timer handle tim for use */ +diff --git a/dpdk/lib/librte_timer/rte_timer.h b/dpdk/lib/librte_timer/rte_timer.h +index 9dc5fc3092..88cc267686 100644 +--- a/dpdk/lib/librte_timer/rte_timer.h ++++ b/dpdk/lib/librte_timer/rte_timer.h +@@ -274,6 +274,12 @@ int rte_timer_reset(struct rte_timer *tim, uint64_t ticks, + * The callback function of the timer. + * @param arg + * The user argument of the callback function. ++ * ++ * @note ++ * This API should not be called inside a timer's callback function to ++ * reset another timer; doing so could hang in certain scenarios. Instead, ++ * the rte_timer_reset() API can be called directly and its return code ++ * can be checked for success or failure. + */ + void + rte_timer_reset_sync(struct rte_timer *tim, uint64_t ticks, +@@ -313,6 +319,12 @@ int rte_timer_stop(struct rte_timer *tim); + * + * @param tim + * The timer handle. ++ * ++ * @note ++ * This API should not be called inside a timer's callback function to ++ * stop another timer; doing so could hang in certain scenarios. Instead, the ++ * rte_timer_stop() API can be called directly and its return code can ++ * be checked for success or failure. + */ + void rte_timer_stop_sync(struct rte_timer *tim); + diff --git a/dpdk/lib/librte_vhost/iotlb.c b/dpdk/lib/librte_vhost/iotlb.c -index 4a1d8c1253..07443a94bc 100644 +index 4a1d8c1253..2e90a63bb1 100644 --- a/dpdk/lib/librte_vhost/iotlb.c +++ b/dpdk/lib/librte_vhost/iotlb.c @@ -308,8 +308,9 @@ vhost_user_iotlb_init(struct virtio_net *dev, int vq_index) @@ -38120,11 +77052,45 @@ index 4a1d8c1253..07443a94bc 100644 /* If already created, free it and recreate */ vq->iotlb_pool = rte_mempool_lookup(pool_name); +@@ -320,8 +321,7 @@ vhost_user_iotlb_init(struct virtio_net *dev, int vq_index) + IOTLB_CACHE_SIZE, sizeof(struct vhost_iotlb_entry), 0, + 0, 0, NULL, NULL, NULL, socket, + MEMPOOL_F_NO_CACHE_ALIGN | +- MEMPOOL_F_SP_PUT | +- MEMPOOL_F_SC_GET); ++ MEMPOOL_F_SP_PUT); + if (!vq->iotlb_pool) { + RTE_LOG(ERR, VHOST_CONFIG, + "Failed to create IOTLB cache pool (%s)\n", diff --git a/dpdk/lib/librte_vhost/rte_vhost.h b/dpdk/lib/librte_vhost/rte_vhost.h -index 7b5dc87c2e..532ee0dec7 100644 +index 7b5dc87c2e..53a30a0623 100644 --- a/dpdk/lib/librte_vhost/rte_vhost.h +++ b/dpdk/lib/librte_vhost/rte_vhost.h -@@ -68,6 +68,10 @@ extern "C" { +@@ -35,6 +35,23 @@ extern "C" { + /* support only linear buffers (no chained mbufs) */ + #define RTE_VHOST_USER_LINEARBUF_SUPPORT (1ULL << 6) + ++/* Features. */ ++#ifndef VIRTIO_NET_F_GUEST_ANNOUNCE ++ #define VIRTIO_NET_F_GUEST_ANNOUNCE 21 ++#endif ++ ++#ifndef VIRTIO_NET_F_MQ ++ #define VIRTIO_NET_F_MQ 22 ++#endif ++ ++#ifndef VIRTIO_NET_F_MTU ++ #define VIRTIO_NET_F_MTU 3 ++#endif ++ ++#ifndef VIRTIO_F_ANY_LAYOUT ++ #define VIRTIO_F_ANY_LAYOUT 27 ++#endif ++ + /** Protocol features. */ + #ifndef VHOST_USER_PROTOCOL_F_MQ + #define VHOST_USER_PROTOCOL_F_MQ 0 +@@ -68,6 +85,10 @@ extern "C" { #define VHOST_USER_PROTOCOL_F_PAGEFAULT 8 #endif @@ -38135,7 +77101,7 @@ index 7b5dc87c2e..532ee0dec7 100644 #ifndef VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD #define VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD 10 #endif -@@ -85,6 +89,7 @@ extern "C" { +@@ -85,6 +106,7 @@ extern "C" { #define VHOST_USER_F_PROTOCOL_FEATURES 30 #endif @@ -38143,7 +77109,7 @@ index 7b5dc87c2e..532ee0dec7 100644 /** * Information relating to memory regions including offsets to * addresses in QEMUs memory file. -@@ -253,7 +258,7 @@ struct vhost_device_ops { +@@ -253,7 +275,7 @@ struct vhost_device_ops { /** * This callback gets called each time a guest gets notified @@ -38152,8 +77118,25 @@ index 7b5dc87c2e..532ee0dec7 100644 * the eventfd_write(callfd), which can be used for counting these * "slow" syscalls. */ +diff --git a/dpdk/lib/librte_vhost/rte_vhost_crypto.h b/dpdk/lib/librte_vhost/rte_vhost_crypto.h +index d29871c7ea..b54d61db69 100644 +--- a/dpdk/lib/librte_vhost/rte_vhost_crypto.h ++++ b/dpdk/lib/librte_vhost/rte_vhost_crypto.h +@@ -7,9 +7,12 @@ + + #define VHOST_CRYPTO_MBUF_POOL_SIZE (8192) + #define VHOST_CRYPTO_MAX_BURST_SIZE (64) ++#define VHOST_CRYPTO_MAX_DATA_SIZE (4096) + #define VHOST_CRYPTO_SESSION_MAP_ENTRIES (1024) /**< Max nb sessions */ + /** max nb virtual queues in a burst for finalizing*/ + #define VIRTIO_CRYPTO_MAX_NUM_BURST_VQS (64) ++#define VHOST_CRYPTO_MAX_IV_LEN (32) ++#define VHOST_CRYPTO_MAX_N_DESC (32) + + enum rte_vhost_crypto_zero_copy { + RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE = 0, diff --git a/dpdk/lib/librte_vhost/socket.c b/dpdk/lib/librte_vhost/socket.c -index ebb2ff6c28..2461549fea 100644 +index ebb2ff6c28..dc3ee1e99d 100644 --- a/dpdk/lib/librte_vhost/socket.c +++ b/dpdk/lib/librte_vhost/socket.c @@ -127,7 +127,8 @@ read_fd_message(int sockfd, char *buf, int buflen, int *fds, int max_fds, @@ -38196,20 +77179,18 @@ index ebb2ff6c28..2461549fea 100644 vsocket->dequeue_zero_copy = flags & RTE_VHOST_USER_DEQUEUE_ZERO_COPY; vsocket->extbuf = flags & RTE_VHOST_USER_EXTBUF_SUPPORT; vsocket->linearbuf = flags & RTE_VHOST_USER_LINEARBUF_SUPPORT; -@@ -924,6 +926,12 @@ rte_vhost_driver_register(const char *path, uint64_t flags) +@@ -924,6 +926,10 @@ rte_vhost_driver_register(const char *path, uint64_t flags) ret = -1; goto out_mutex; } -+ if ((flags & RTE_VHOST_USER_CLIENT) != 0) { -+ RTE_LOG(ERR, VHOST_CONFIG, -+ "error: zero copy is incompatible with vhost client mode\n"); -+ ret = -1; -+ goto out_mutex; -+ } ++ if ((flags & RTE_VHOST_USER_CLIENT) != 0) ++ RTE_LOG(WARNING, VHOST_CONFIG, ++ "zero copy may be incompatible with vhost client mode\n"); ++ vsocket->supported_features &= ~(1ULL << VIRTIO_F_IN_ORDER); vsocket->features &= ~(1ULL << VIRTIO_F_IN_ORDER); -@@ -1051,9 +1059,10 @@ rte_vhost_driver_unregister(const char *path) +@@ -1051,9 +1057,10 @@ rte_vhost_driver_unregister(const char *path) next = TAILQ_NEXT(conn, next); /* @@ -38223,7 +77204,7 @@ index ebb2ff6c28..2461549fea 100644 */ if (fdset_try_del(&vhost_user.fdset, conn->connfd) == -1) { -@@ -1074,8 +1083,17 @@ rte_vhost_driver_unregister(const char *path) +@@ -1074,8 +1081,17 @@ rte_vhost_driver_unregister(const char *path) pthread_mutex_unlock(&vsocket->conn_mutex); if (vsocket->is_server) { @@ -38244,10 +77225,18 @@ index ebb2ff6c28..2461549fea 100644 unlink(path); } else if (vsocket->reconnect) { diff --git a/dpdk/lib/librte_vhost/vhost.c b/dpdk/lib/librte_vhost/vhost.c -index 1cbe948f74..20fda61518 100644 +index 1cbe948f74..2d5bb2cfde 100644 --- a/dpdk/lib/librte_vhost/vhost.c +++ b/dpdk/lib/librte_vhost/vhost.c -@@ -350,6 +350,57 @@ free_device(struct virtio_net *dev) +@@ -26,6 +26,7 @@ + #include "vhost_user.h" + + struct virtio_net *vhost_devices[MAX_VHOST_DEVICE]; ++pthread_mutex_t vhost_dev_lock = PTHREAD_MUTEX_INITIALIZER; + + /* Called with iotlb_lock read-locked */ + uint64_t +@@ -350,6 +351,57 @@ free_device(struct virtio_net *dev) rte_free(dev); } @@ -38305,7 +77294,7 @@ index 1cbe948f74..20fda61518 100644 static int vring_translate_split(struct virtio_net *dev, struct vhost_virtqueue *vq) { -@@ -388,6 +439,7 @@ vring_translate_split(struct virtio_net *dev, struct vhost_virtqueue *vq) +@@ -388,6 +440,7 @@ vring_translate_split(struct virtio_net *dev, struct vhost_virtqueue *vq) return 0; } @@ -38313,7 +77302,7 @@ index 1cbe948f74..20fda61518 100644 static int vring_translate_packed(struct virtio_net *dev, struct vhost_virtqueue *vq) { -@@ -434,6 +486,10 @@ vring_translate(struct virtio_net *dev, struct vhost_virtqueue *vq) +@@ -434,6 +487,10 @@ vring_translate(struct virtio_net *dev, struct vhost_virtqueue *vq) if (vring_translate_split(dev, vq) < 0) return -1; } @@ -38324,11 +77313,182 @@ index 1cbe948f74..20fda61518 100644 vq->access_ok = 1; return 0; +@@ -504,22 +561,29 @@ int + alloc_vring_queue(struct virtio_net *dev, uint32_t vring_idx) + { + struct vhost_virtqueue *vq; ++ uint32_t i; + +- vq = rte_malloc(NULL, sizeof(struct vhost_virtqueue), 0); +- if (vq == NULL) { +- RTE_LOG(ERR, VHOST_CONFIG, +- "Failed to allocate memory for vring:%u.\n", vring_idx); +- return -1; +- } ++ /* Also allocate holes, if any, up to requested vring index. */ ++ for (i = 0; i <= vring_idx; i++) { ++ if (dev->virtqueue[i]) ++ continue; + +- dev->virtqueue[vring_idx] = vq; +- init_vring_queue(dev, vring_idx); +- rte_spinlock_init(&vq->access_lock); +- vq->avail_wrap_counter = 1; +- vq->used_wrap_counter = 1; +- vq->signalled_used_valid = false; ++ vq = rte_malloc(NULL, sizeof(struct vhost_virtqueue), 0); ++ if (vq == NULL) { ++ RTE_LOG(ERR, VHOST_CONFIG, ++ "Failed to allocate memory for vring:%u.\n", i); ++ return -1; ++ } + +- dev->nr_vring += 1; ++ dev->virtqueue[i] = vq; ++ init_vring_queue(dev, i); ++ rte_spinlock_init(&vq->access_lock); ++ vq->avail_wrap_counter = 1; ++ vq->used_wrap_counter = 1; ++ vq->signalled_used_valid = false; ++ } ++ ++ dev->nr_vring = RTE_MAX(dev->nr_vring, vring_idx + 1); + + return 0; + } +@@ -552,6 +616,7 @@ vhost_new_device(void) + struct virtio_net *dev; + int i; + ++ pthread_mutex_lock(&vhost_dev_lock); + for (i = 0; i < MAX_VHOST_DEVICE; i++) { + if (vhost_devices[i] == NULL) + break; +@@ -560,6 +625,7 @@ vhost_new_device(void) + if (i == MAX_VHOST_DEVICE) { + RTE_LOG(ERR, VHOST_CONFIG, + "Failed to find a free slot for new device.\n"); ++ pthread_mutex_unlock(&vhost_dev_lock); + return -1; + } + +@@ -567,10 +633,13 @@ vhost_new_device(void) + if (dev == NULL) { + RTE_LOG(ERR, VHOST_CONFIG, + "Failed to allocate memory for new dev.\n"); ++ pthread_mutex_unlock(&vhost_dev_lock); + return -1; + } + + vhost_devices[i] = dev; ++ pthread_mutex_unlock(&vhost_dev_lock); ++ + dev->vid = i; + dev->flags = VIRTIO_DEV_BUILTIN_VIRTIO_NET; + dev->slave_req_fd = -1; +@@ -1195,7 +1264,12 @@ rte_vhost_avail_entries(int vid, uint16_t queue_id) + if (!dev) + return 0; + ++ if (queue_id >= VHOST_MAX_VRING) ++ return 0; ++ + vq = dev->virtqueue[queue_id]; ++ if (!vq) ++ return 0; + + rte_spinlock_lock(&vq->access_lock); + +@@ -1265,7 +1339,12 @@ rte_vhost_enable_guest_notification(int vid, uint16_t queue_id, int enable) + if (!dev) + return -1; + ++ if (queue_id >= VHOST_MAX_VRING) ++ return -1; ++ + vq = dev->virtqueue[queue_id]; ++ if (!vq) ++ return -1; + + rte_spinlock_lock(&vq->access_lock); + +@@ -1376,6 +1455,9 @@ int rte_vhost_get_vring_base(int vid, uint16_t queue_id, + if (dev == NULL || last_avail_idx == NULL || last_used_idx == NULL) + return -1; + ++ if (queue_id >= VHOST_MAX_VRING) ++ return -1; ++ + vq = dev->virtqueue[queue_id]; + if (!vq) + return -1; +@@ -1402,6 +1484,9 @@ int rte_vhost_set_vring_base(int vid, uint16_t queue_id, + if (!dev) + return -1; + ++ if (queue_id >= VHOST_MAX_VRING) ++ return -1; ++ + vq = dev->virtqueue[queue_id]; + if (!vq) + return -1; +@@ -1426,15 +1511,23 @@ rte_vhost_get_vring_base_from_inflight(int vid, + uint16_t *last_used_idx) + { + struct rte_vhost_inflight_info_packed *inflight_info; ++ struct vhost_virtqueue *vq; + struct virtio_net *dev = get_device(vid); + + if (dev == NULL || last_avail_idx == NULL || last_used_idx == NULL) + return -1; + ++ if (queue_id >= VHOST_MAX_VRING) ++ return -1; ++ ++ vq = dev->virtqueue[queue_id]; ++ if (!vq) ++ return -1; ++ + if (!vq_is_packed(dev)) + return -1; + +- inflight_info = dev->virtqueue[queue_id]->inflight_packed; ++ inflight_info = vq->inflight_packed; + if (!inflight_info) + return -1; + diff --git a/dpdk/lib/librte_vhost/vhost.h b/dpdk/lib/librte_vhost/vhost.h -index 9f11b28a31..844904ca3b 100644 +index 9f11b28a31..deeca18f8f 100644 --- a/dpdk/lib/librte_vhost/vhost.h +++ b/dpdk/lib/librte_vhost/vhost.h -@@ -462,14 +462,23 @@ static __rte_always_inline void +@@ -202,26 +202,9 @@ struct vhost_virtqueue { + TAILQ_HEAD(, vhost_iotlb_entry) iotlb_pending_list; + } __rte_cache_aligned; + +-/* Old kernels have no such macros defined */ +-#ifndef VIRTIO_NET_F_GUEST_ANNOUNCE +- #define VIRTIO_NET_F_GUEST_ANNOUNCE 21 +-#endif +- +-#ifndef VIRTIO_NET_F_MQ +- #define VIRTIO_NET_F_MQ 22 +-#endif +- + #define VHOST_MAX_VRING 0x100 + #define VHOST_MAX_QUEUE_PAIRS 0x80 + +-#ifndef VIRTIO_NET_F_MTU +- #define VIRTIO_NET_F_MTU 3 +-#endif +- +-#ifndef VIRTIO_F_ANY_LAYOUT +- #define VIRTIO_F_ANY_LAYOUT 27 +-#endif +- + /* Declare IOMMU related bits for older kernels */ + #ifndef VIRTIO_F_IOMMU_PLATFORM + +@@ -462,14 +445,23 @@ static __rte_always_inline void vhost_log_cache_used_vring(struct virtio_net *dev, struct vhost_virtqueue *vq, uint64_t offset, uint64_t len) { @@ -38354,7 +77514,7 @@ index 9f11b28a31..844904ca3b 100644 } static __rte_always_inline void -@@ -528,7 +537,6 @@ vhost_log_write_iova(struct virtio_net *dev, struct vhost_virtqueue *vq, +@@ -528,7 +520,6 @@ vhost_log_write_iova(struct virtio_net *dev, struct vhost_virtqueue *vq, #define PRINT_PACKET(device, addr, size, header) do {} while (0) #endif @@ -38362,7 +77522,7 @@ index 9f11b28a31..844904ca3b 100644 #define MAX_VHOST_DEVICE 1024 extern struct virtio_net *vhost_devices[MAX_VHOST_DEVICE]; -@@ -620,6 +628,8 @@ void *vhost_alloc_copy_ind_table(struct virtio_net *dev, +@@ -620,6 +611,8 @@ void *vhost_alloc_copy_ind_table(struct virtio_net *dev, struct vhost_virtqueue *vq, uint64_t desc_addr, uint64_t desc_len); int vring_translate(struct virtio_net *dev, struct vhost_virtqueue *vq); @@ -38372,10 +77532,10 @@ index 9f11b28a31..844904ca3b 100644 static __rte_always_inline uint64_t diff --git a/dpdk/lib/librte_vhost/vhost_crypto.c b/dpdk/lib/librte_vhost/vhost_crypto.c -index 684fddc30b..0f9df4059d 100644 +index 684fddc30b..e08f9c6d75 100644 --- a/dpdk/lib/librte_vhost/vhost_crypto.c +++ b/dpdk/lib/librte_vhost/vhost_crypto.c -@@ -40,7 +40,8 @@ +@@ -40,11 +40,20 @@ (1 << VIRTIO_RING_F_EVENT_IDX) | \ (1 << VIRTIO_CRYPTO_SERVICE_CIPHER) | \ (1 << VIRTIO_CRYPTO_SERVICE_MAC) | \ @@ -38385,7 +77545,19 @@ index 684fddc30b..0f9df4059d 100644 #define IOVA_TO_VVA(t, r, a, l, p) \ ((t)(uintptr_t)vhost_iova_to_vva(r->dev, r->vq, a, l, p)) -@@ -237,6 +238,11 @@ transform_cipher_param(struct rte_crypto_sym_xform *xform, + ++/* ++ * vhost_crypto_desc is used to copy original vring_desc to the local buffer ++ * before processing (except the next index). The copy result will be an ++ * array of vhost_crypto_desc elements that follows the sequence of original ++ * vring_desc.next is arranged. ++ */ ++#define vhost_crypto_desc vring_desc ++ + static int + cipher_algo_transform(uint32_t virtio_cipher_algo, + enum rte_crypto_cipher_algorithm *algo) +@@ -237,6 +246,11 @@ transform_cipher_param(struct rte_crypto_sym_xform *xform, if (unlikely(ret < 0)) return ret; @@ -38397,7 +77569,7 @@ index 684fddc30b..0f9df4059d 100644 xform->type = RTE_CRYPTO_SYM_XFORM_CIPHER; xform->cipher.key.length = param->cipher_key_len; if (xform->cipher.key.length > 0) -@@ -287,6 +293,12 @@ transform_chain_param(struct rte_crypto_sym_xform *xforms, +@@ -287,6 +301,12 @@ transform_chain_param(struct rte_crypto_sym_xform *xforms, &xform_cipher->cipher.algo); if (unlikely(ret < 0)) return ret; @@ -38410,7 +77582,7 @@ index 684fddc30b..0f9df4059d 100644 xform_cipher->type = RTE_CRYPTO_SYM_XFORM_CIPHER; xform_cipher->cipher.key.length = param->cipher_key_len; xform_cipher->cipher.key.data = param->cipher_key_buf; -@@ -301,6 +313,12 @@ transform_chain_param(struct rte_crypto_sym_xform *xforms, +@@ -301,6 +321,12 @@ transform_chain_param(struct rte_crypto_sym_xform *xforms, ret = auth_algo_transform(param->hash_algo, &xform_auth->auth.algo); if (unlikely(ret < 0)) return ret; @@ -38423,7 +77595,810 @@ index 684fddc30b..0f9df4059d 100644 xform_auth->auth.digest_length = param->digest_len; xform_auth->auth.key.length = param->auth_key_len; xform_auth->auth.key.data = param->auth_key_buf; -@@ -1539,18 +1557,18 @@ rte_vhost_crypto_fetch_requests(int vid, uint32_t qid, +@@ -461,82 +487,71 @@ vhost_crypto_msg_post_handler(int vid, void *msg) + return ret; + } + +-static __rte_always_inline struct vring_desc * +-find_write_desc(struct vring_desc *head, struct vring_desc *desc, +- uint32_t *nb_descs, uint32_t vq_size) ++static __rte_always_inline struct vhost_crypto_desc * ++find_write_desc(struct vhost_crypto_desc *head, struct vhost_crypto_desc *desc, ++ uint32_t max_n_descs) + { +- if (desc->flags & VRING_DESC_F_WRITE) +- return desc; +- +- while (desc->flags & VRING_DESC_F_NEXT) { +- if (unlikely(*nb_descs == 0 || desc->next >= vq_size)) +- return NULL; +- (*nb_descs)--; ++ if (desc < head) ++ return NULL; + +- desc = &head[desc->next]; ++ while (desc - head < (int)max_n_descs) { + if (desc->flags & VRING_DESC_F_WRITE) + return desc; ++ desc++; + } + + return NULL; + } + +-static struct virtio_crypto_inhdr * +-reach_inhdr(struct vhost_crypto_data_req *vc_req, struct vring_desc *desc, +- uint32_t *nb_descs, uint32_t vq_size) ++static __rte_always_inline struct virtio_crypto_inhdr * ++reach_inhdr(struct vhost_crypto_data_req *vc_req, ++ struct vhost_crypto_desc *head, ++ uint32_t max_n_descs) + { +- uint64_t dlen; + struct virtio_crypto_inhdr *inhdr; ++ struct vhost_crypto_desc *last = head + (max_n_descs - 1); ++ uint64_t dlen = last->len; + +- while (desc->flags & VRING_DESC_F_NEXT) { +- if (unlikely(*nb_descs == 0 || desc->next >= vq_size)) +- return NULL; +- (*nb_descs)--; +- desc = &vc_req->head[desc->next]; +- } ++ if (unlikely(dlen != sizeof(*inhdr))) ++ return NULL; + +- dlen = desc->len; +- inhdr = IOVA_TO_VVA(struct virtio_crypto_inhdr *, vc_req, desc->addr, ++ inhdr = IOVA_TO_VVA(struct virtio_crypto_inhdr *, vc_req, last->addr, + &dlen, VHOST_ACCESS_WO); +- if (unlikely(!inhdr || dlen != desc->len)) ++ if (unlikely(!inhdr || dlen != last->len)) + return NULL; + + return inhdr; + } + + static __rte_always_inline int +-move_desc(struct vring_desc *head, struct vring_desc **cur_desc, +- uint32_t size, uint32_t *nb_descs, uint32_t vq_size) ++move_desc(struct vhost_crypto_desc *head, ++ struct vhost_crypto_desc **cur_desc, ++ uint32_t size, uint32_t max_n_descs) + { +- struct vring_desc *desc = *cur_desc; ++ struct vhost_crypto_desc *desc = *cur_desc; + int left = size - desc->len; + +- while ((desc->flags & VRING_DESC_F_NEXT) && left > 0) { +- (*nb_descs)--; +- if (unlikely(*nb_descs == 0 || desc->next >= vq_size)) +- return -1; +- +- desc = &head[desc->next]; +- rte_prefetch0(&head[desc->next]); ++ while (desc->flags & VRING_DESC_F_NEXT && left > 0 && ++ desc >= head && ++ desc - head < (int)max_n_descs) { ++ desc++; + left -= desc->len; + } + + if (unlikely(left > 0)) + return -1; + +- if (unlikely(*nb_descs == 0)) ++ if (unlikely(head - desc == (int)max_n_descs)) + *cur_desc = NULL; +- else { +- if (unlikely(desc->next >= vq_size)) +- return -1; +- *cur_desc = &head[desc->next]; +- } ++ else ++ *cur_desc = desc + 1; + + return 0; + } + + static __rte_always_inline void * +-get_data_ptr(struct vhost_crypto_data_req *vc_req, struct vring_desc *cur_desc, ++get_data_ptr(struct vhost_crypto_data_req *vc_req, ++ struct vhost_crypto_desc *cur_desc, + uint8_t perm) + { + void *data; +@@ -551,12 +566,13 @@ get_data_ptr(struct vhost_crypto_data_req *vc_req, struct vring_desc *cur_desc, + return data; + } + +-static int ++static __rte_always_inline int + copy_data(void *dst_data, struct vhost_crypto_data_req *vc_req, +- struct vring_desc **cur_desc, uint32_t size, +- uint32_t *nb_descs, uint32_t vq_size) ++ struct vhost_crypto_desc *head, ++ struct vhost_crypto_desc **cur_desc, ++ uint32_t size, uint32_t max_n_descs) + { +- struct vring_desc *desc = *cur_desc; ++ struct vhost_crypto_desc *desc = *cur_desc; + uint64_t remain, addr, dlen, len; + uint32_t to_copy; + uint8_t *data = dst_data; +@@ -595,17 +611,10 @@ copy_data(void *dst_data, struct vhost_crypto_data_req *vc_req, + + left -= to_copy; + +- while ((desc->flags & VRING_DESC_F_NEXT) && left > 0) { +- if (unlikely(*nb_descs == 0 || desc->next >= vq_size)) { +- VC_LOG_ERR("Invalid descriptors"); +- return -1; +- } +- (*nb_descs)--; +- +- desc = &vc_req->head[desc->next]; +- rte_prefetch0(&vc_req->head[desc->next]); ++ while (desc >= head && desc - head < (int)max_n_descs && left) { ++ desc++; + to_copy = RTE_MIN(desc->len, (uint32_t)left); +- dlen = desc->len; ++ dlen = to_copy; + src = IOVA_TO_VVA(uint8_t *, vc_req, desc->addr, &dlen, + VHOST_ACCESS_RO); + if (unlikely(!src || !dlen)) { +@@ -644,13 +653,10 @@ copy_data(void *dst_data, struct vhost_crypto_data_req *vc_req, + return -1; + } + +- if (unlikely(*nb_descs == 0)) ++ if (unlikely(desc - head == (int)max_n_descs)) + *cur_desc = NULL; +- else { +- if (unlikely(desc->next >= vq_size)) +- return -1; +- *cur_desc = &vc_req->head[desc->next]; +- } ++ else ++ *cur_desc = desc + 1; + + return 0; + } +@@ -662,6 +668,7 @@ write_back_data(struct vhost_crypto_data_req *vc_req) + + while (wb_data) { + rte_memcpy(wb_data->dst, wb_data->src, wb_data->len); ++ memset(wb_data->src, 0, wb_data->len); + wb_last = wb_data; + wb_data = wb_data->next; + rte_mempool_put(vc_req->wb_pool, wb_last); +@@ -703,17 +710,18 @@ free_wb_data(struct vhost_crypto_writeback_data *wb_data, + * @return + * The pointer to the start of the write back data linked list. + */ +-static struct vhost_crypto_writeback_data * ++static __rte_always_inline struct vhost_crypto_writeback_data * + prepare_write_back_data(struct vhost_crypto_data_req *vc_req, +- struct vring_desc **cur_desc, ++ struct vhost_crypto_desc *head_desc, ++ struct vhost_crypto_desc **cur_desc, + struct vhost_crypto_writeback_data **end_wb_data, + uint8_t *src, + uint32_t offset, + uint64_t write_back_len, +- uint32_t *nb_descs, uint32_t vq_size) ++ uint32_t max_n_descs) + { + struct vhost_crypto_writeback_data *wb_data, *head; +- struct vring_desc *desc = *cur_desc; ++ struct vhost_crypto_desc *desc = *cur_desc; + uint64_t dlen; + uint8_t *dst; + int ret; +@@ -730,14 +738,14 @@ prepare_write_back_data(struct vhost_crypto_data_req *vc_req, + wb_data->src = src + offset; + dlen = desc->len; + dst = IOVA_TO_VVA(uint8_t *, vc_req, desc->addr, +- &dlen, VHOST_ACCESS_RW) + offset; ++ &dlen, VHOST_ACCESS_RW); + if (unlikely(!dst || dlen != desc->len)) { + VC_LOG_ERR("Failed to map descriptor"); + goto error_exit; + } + +- wb_data->dst = dst; +- wb_data->len = desc->len - offset; ++ wb_data->dst = dst + offset; ++ wb_data->len = RTE_MIN(dlen - offset, write_back_len); + write_back_len -= wb_data->len; + src += offset + wb_data->len; + offset = 0; +@@ -756,14 +764,10 @@ prepare_write_back_data(struct vhost_crypto_data_req *vc_req, + } else + offset -= desc->len; + +- while (write_back_len) { +- if (unlikely(*nb_descs == 0 || desc->next >= vq_size)) { +- VC_LOG_ERR("Invalid descriptors"); +- goto error_exit; +- } +- (*nb_descs)--; +- +- desc = &vc_req->head[desc->next]; ++ while (write_back_len && ++ desc >= head_desc && ++ desc - head_desc < (int)max_n_descs) { ++ desc++; + if (unlikely(!(desc->flags & VRING_DESC_F_WRITE))) { + VC_LOG_ERR("incorrect descriptor"); + goto error_exit; +@@ -782,7 +786,7 @@ prepare_write_back_data(struct vhost_crypto_data_req *vc_req, + goto error_exit; + } + +- wb_data->src = src; ++ wb_data->src = src + offset; + wb_data->dst = dst; + wb_data->len = RTE_MIN(desc->len - offset, write_back_len); + write_back_len -= wb_data->len; +@@ -802,13 +806,10 @@ prepare_write_back_data(struct vhost_crypto_data_req *vc_req, + wb_data->next = NULL; + } + +- if (unlikely(*nb_descs == 0)) ++ if (unlikely(desc - head_desc == (int)max_n_descs)) + *cur_desc = NULL; +- else { +- if (unlikely(desc->next >= vq_size)) +- goto error_exit; +- *cur_desc = &vc_req->head[desc->next]; +- } ++ else ++ *cur_desc = desc + 1; + + *end_wb_data = wb_data; + +@@ -821,31 +822,44 @@ prepare_write_back_data(struct vhost_crypto_data_req *vc_req, + return NULL; + } + +-static uint8_t ++static __rte_always_inline uint8_t ++vhost_crypto_check_cipher_request(struct virtio_crypto_cipher_data_req *req) ++{ ++ if (likely((req->para.iv_len <= VHOST_CRYPTO_MAX_IV_LEN) && ++ (req->para.src_data_len <= RTE_MBUF_DEFAULT_BUF_SIZE) && ++ (req->para.dst_data_len >= req->para.src_data_len) && ++ (req->para.dst_data_len <= RTE_MBUF_DEFAULT_BUF_SIZE))) ++ return VIRTIO_CRYPTO_OK; ++ return VIRTIO_CRYPTO_BADMSG; ++} ++ ++static __rte_always_inline uint8_t + prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op, + struct vhost_crypto_data_req *vc_req, + struct virtio_crypto_cipher_data_req *cipher, +- struct vring_desc *cur_desc, +- uint32_t *nb_descs, uint32_t vq_size) ++ struct vhost_crypto_desc *head, ++ uint32_t max_n_descs) + { +- struct vring_desc *desc = cur_desc; ++ struct vhost_crypto_desc *desc = head; + struct vhost_crypto_writeback_data *ewb = NULL; + struct rte_mbuf *m_src = op->sym->m_src, *m_dst = op->sym->m_dst; + uint8_t *iv_data = rte_crypto_op_ctod_offset(op, uint8_t *, IV_OFFSET); +- uint8_t ret = 0; ++ uint8_t ret = vhost_crypto_check_cipher_request(cipher); ++ ++ if (unlikely(ret != VIRTIO_CRYPTO_OK)) ++ goto error_exit; + + /* prepare */ + /* iv */ +- if (unlikely(copy_data(iv_data, vc_req, &desc, cipher->para.iv_len, +- nb_descs, vq_size) < 0)) { ++ if (unlikely(copy_data(iv_data, vc_req, head, &desc, ++ cipher->para.iv_len, max_n_descs))) { + ret = VIRTIO_CRYPTO_BADMSG; + goto error_exit; + } + +- m_src->data_len = cipher->para.src_data_len; +- + switch (vcrypto->option) { + case RTE_VHOST_CRYPTO_ZERO_COPY_ENABLE: ++ m_src->data_len = cipher->para.src_data_len; + m_src->buf_iova = gpa_to_hpa(vcrypto->dev, desc->addr, + cipher->para.src_data_len); + m_src->buf_addr = get_data_ptr(vc_req, desc, VHOST_ACCESS_RO); +@@ -856,9 +870,8 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op, + goto error_exit; + } + +- if (unlikely(move_desc(vc_req->head, &desc, +- cipher->para.src_data_len, nb_descs, +- vq_size) < 0)) { ++ if (unlikely(move_desc(head, &desc, cipher->para.src_data_len, ++ max_n_descs) < 0)) { + VC_LOG_ERR("Incorrect descriptor"); + ret = VIRTIO_CRYPTO_ERR; + goto error_exit; +@@ -867,16 +880,10 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op, + break; + case RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE: + vc_req->wb_pool = vcrypto->wb_pool; +- +- if (unlikely(cipher->para.src_data_len > +- RTE_MBUF_DEFAULT_BUF_SIZE)) { +- VC_LOG_ERR("Not enough space to do data copy"); +- ret = VIRTIO_CRYPTO_ERR; +- goto error_exit; +- } ++ m_src->data_len = cipher->para.src_data_len; + if (unlikely(copy_data(rte_pktmbuf_mtod(m_src, uint8_t *), +- vc_req, &desc, cipher->para.src_data_len, +- nb_descs, vq_size) < 0)) { ++ vc_req, head, &desc, cipher->para.src_data_len, ++ max_n_descs) < 0)) { + ret = VIRTIO_CRYPTO_BADMSG; + goto error_exit; + } +@@ -887,7 +894,7 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op, + } + + /* dst */ +- desc = find_write_desc(vc_req->head, desc, nb_descs, vq_size); ++ desc = find_write_desc(head, desc, max_n_descs); + if (unlikely(!desc)) { + VC_LOG_ERR("Cannot find write location"); + ret = VIRTIO_CRYPTO_BADMSG; +@@ -905,9 +912,8 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op, + goto error_exit; + } + +- if (unlikely(move_desc(vc_req->head, &desc, +- cipher->para.dst_data_len, +- nb_descs, vq_size) < 0)) { ++ if (unlikely(move_desc(head, &desc, cipher->para.dst_data_len, ++ max_n_descs) < 0)) { + VC_LOG_ERR("Incorrect descriptor"); + ret = VIRTIO_CRYPTO_ERR; + goto error_exit; +@@ -916,9 +922,9 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op, + m_dst->data_len = cipher->para.dst_data_len; + break; + case RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE: +- vc_req->wb = prepare_write_back_data(vc_req, &desc, &ewb, ++ vc_req->wb = prepare_write_back_data(vc_req, head, &desc, &ewb, + rte_pktmbuf_mtod(m_src, uint8_t *), 0, +- cipher->para.dst_data_len, nb_descs, vq_size); ++ cipher->para.dst_data_len, max_n_descs); + if (unlikely(vc_req->wb == NULL)) { + ret = VIRTIO_CRYPTO_ERR; + goto error_exit; +@@ -956,33 +962,58 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op, + return ret; + } + +-static uint8_t ++static __rte_always_inline uint8_t ++vhost_crypto_check_chain_request(struct virtio_crypto_alg_chain_data_req *req) ++{ ++ if (likely((req->para.iv_len <= VHOST_CRYPTO_MAX_IV_LEN) && ++ (req->para.src_data_len <= VHOST_CRYPTO_MAX_DATA_SIZE) && ++ (req->para.dst_data_len >= req->para.src_data_len) && ++ (req->para.dst_data_len <= VHOST_CRYPTO_MAX_DATA_SIZE) && ++ (req->para.cipher_start_src_offset < ++ VHOST_CRYPTO_MAX_DATA_SIZE) && ++ (req->para.len_to_cipher <= VHOST_CRYPTO_MAX_DATA_SIZE) && ++ (req->para.hash_start_src_offset < ++ VHOST_CRYPTO_MAX_DATA_SIZE) && ++ (req->para.len_to_hash <= VHOST_CRYPTO_MAX_DATA_SIZE) && ++ (req->para.cipher_start_src_offset + req->para.len_to_cipher <= ++ req->para.src_data_len) && ++ (req->para.hash_start_src_offset + req->para.len_to_hash <= ++ req->para.src_data_len) && ++ (req->para.dst_data_len + req->para.hash_result_len <= ++ VHOST_CRYPTO_MAX_DATA_SIZE))) ++ return VIRTIO_CRYPTO_OK; ++ return VIRTIO_CRYPTO_BADMSG; ++} ++ ++static __rte_always_inline uint8_t + prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op, + struct vhost_crypto_data_req *vc_req, + struct virtio_crypto_alg_chain_data_req *chain, +- struct vring_desc *cur_desc, +- uint32_t *nb_descs, uint32_t vq_size) ++ struct vhost_crypto_desc *head, ++ uint32_t max_n_descs) + { +- struct vring_desc *desc = cur_desc, *digest_desc; ++ struct vhost_crypto_desc *desc = head, *digest_desc; + struct vhost_crypto_writeback_data *ewb = NULL, *ewb2 = NULL; + struct rte_mbuf *m_src = op->sym->m_src, *m_dst = op->sym->m_dst; + uint8_t *iv_data = rte_crypto_op_ctod_offset(op, uint8_t *, IV_OFFSET); + uint32_t digest_offset; + void *digest_addr; +- uint8_t ret = 0; ++ uint8_t ret = vhost_crypto_check_chain_request(chain); ++ ++ if (unlikely(ret != VIRTIO_CRYPTO_OK)) ++ goto error_exit; + + /* prepare */ + /* iv */ +- if (unlikely(copy_data(iv_data, vc_req, &desc, +- chain->para.iv_len, nb_descs, vq_size) < 0)) { ++ if (unlikely(copy_data(iv_data, vc_req, head, &desc, ++ chain->para.iv_len, max_n_descs) < 0)) { + ret = VIRTIO_CRYPTO_BADMSG; + goto error_exit; + } + +- m_src->data_len = chain->para.src_data_len; +- + switch (vcrypto->option) { + case RTE_VHOST_CRYPTO_ZERO_COPY_ENABLE: ++ m_src->data_len = chain->para.src_data_len; + m_dst->data_len = chain->para.dst_data_len; + + m_src->buf_iova = gpa_to_hpa(vcrypto->dev, desc->addr, +@@ -994,9 +1025,8 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op, + goto error_exit; + } + +- if (unlikely(move_desc(vc_req->head, &desc, +- chain->para.src_data_len, +- nb_descs, vq_size) < 0)) { ++ if (unlikely(move_desc(head, &desc, chain->para.src_data_len, ++ max_n_descs) < 0)) { + VC_LOG_ERR("Incorrect descriptor"); + ret = VIRTIO_CRYPTO_ERR; + goto error_exit; +@@ -1004,16 +1034,10 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op, + break; + case RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE: + vc_req->wb_pool = vcrypto->wb_pool; +- +- if (unlikely(chain->para.src_data_len > +- RTE_MBUF_DEFAULT_BUF_SIZE)) { +- VC_LOG_ERR("Not enough space to do data copy"); +- ret = VIRTIO_CRYPTO_ERR; +- goto error_exit; +- } ++ m_src->data_len = chain->para.src_data_len; + if (unlikely(copy_data(rte_pktmbuf_mtod(m_src, uint8_t *), +- vc_req, &desc, chain->para.src_data_len, +- nb_descs, vq_size) < 0)) { ++ vc_req, head, &desc, chain->para.src_data_len, ++ max_n_descs) < 0)) { + ret = VIRTIO_CRYPTO_BADMSG; + goto error_exit; + } +@@ -1025,7 +1049,7 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op, + } + + /* dst */ +- desc = find_write_desc(vc_req->head, desc, nb_descs, vq_size); ++ desc = find_write_desc(head, desc, max_n_descs); + if (unlikely(!desc)) { + VC_LOG_ERR("Cannot find write location"); + ret = VIRTIO_CRYPTO_BADMSG; +@@ -1044,8 +1068,7 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op, + } + + if (unlikely(move_desc(vc_req->head, &desc, +- chain->para.dst_data_len, +- nb_descs, vq_size) < 0)) { ++ chain->para.dst_data_len, max_n_descs) < 0)) { + VC_LOG_ERR("Incorrect descriptor"); + ret = VIRTIO_CRYPTO_ERR; + goto error_exit; +@@ -1061,9 +1084,9 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op, + goto error_exit; + } + +- if (unlikely(move_desc(vc_req->head, &desc, ++ if (unlikely(move_desc(head, &desc, + chain->para.hash_result_len, +- nb_descs, vq_size) < 0)) { ++ max_n_descs) < 0)) { + VC_LOG_ERR("Incorrect descriptor"); + ret = VIRTIO_CRYPTO_ERR; + goto error_exit; +@@ -1071,34 +1094,34 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op, + + break; + case RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE: +- vc_req->wb = prepare_write_back_data(vc_req, &desc, &ewb, ++ vc_req->wb = prepare_write_back_data(vc_req, head, &desc, &ewb, + rte_pktmbuf_mtod(m_src, uint8_t *), + chain->para.cipher_start_src_offset, + chain->para.dst_data_len - +- chain->para.cipher_start_src_offset, +- nb_descs, vq_size); ++ chain->para.cipher_start_src_offset, ++ max_n_descs); + if (unlikely(vc_req->wb == NULL)) { + ret = VIRTIO_CRYPTO_ERR; + goto error_exit; + } + ++ digest_desc = desc; + digest_offset = m_src->data_len; + digest_addr = rte_pktmbuf_mtod_offset(m_src, void *, + digest_offset); +- digest_desc = desc; + + /** create a wb_data for digest */ +- ewb->next = prepare_write_back_data(vc_req, &desc, &ewb2, +- digest_addr, 0, chain->para.hash_result_len, +- nb_descs, vq_size); ++ ewb->next = prepare_write_back_data(vc_req, head, &desc, ++ &ewb2, digest_addr, 0, ++ chain->para.hash_result_len, max_n_descs); + if (unlikely(ewb->next == NULL)) { + ret = VIRTIO_CRYPTO_ERR; + goto error_exit; + } + +- if (unlikely(copy_data(digest_addr, vc_req, &digest_desc, ++ if (unlikely(copy_data(digest_addr, vc_req, head, &digest_desc, + chain->para.hash_result_len, +- nb_descs, vq_size) < 0)) { ++ max_n_descs) < 0)) { + ret = VIRTIO_CRYPTO_BADMSG; + goto error_exit; + } +@@ -1148,74 +1171,103 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op, + static __rte_always_inline int + vhost_crypto_process_one_req(struct vhost_crypto *vcrypto, + struct vhost_virtqueue *vq, struct rte_crypto_op *op, +- struct vring_desc *head, uint16_t desc_idx) ++ struct vring_desc *head, struct vhost_crypto_desc *descs, ++ uint16_t desc_idx) + { + struct vhost_crypto_data_req *vc_req = rte_mbuf_to_priv(op->sym->m_src); + struct rte_cryptodev_sym_session *session; +- struct virtio_crypto_op_data_req *req, tmp_req; ++ struct virtio_crypto_op_data_req req; + struct virtio_crypto_inhdr *inhdr; +- struct vring_desc *desc = NULL; ++ struct vhost_crypto_desc *desc = descs; ++ struct vring_desc *src_desc; + uint64_t session_id; + uint64_t dlen; +- uint32_t nb_descs = vq->size; +- int err = 0; ++ uint32_t nb_descs = 0, max_n_descs, i; ++ int err; + + vc_req->desc_idx = desc_idx; + vc_req->dev = vcrypto->dev; + vc_req->vq = vq; + +- if (likely(head->flags & VRING_DESC_F_INDIRECT)) { +- dlen = head->len; +- nb_descs = dlen / sizeof(struct vring_desc); +- /* drop invalid descriptors */ +- if (unlikely(nb_descs > vq->size)) +- return -1; +- desc = IOVA_TO_VVA(struct vring_desc *, vc_req, head->addr, +- &dlen, VHOST_ACCESS_RO); +- if (unlikely(!desc || dlen != head->len)) +- return -1; +- desc_idx = 0; +- head = desc; +- } else { +- desc = head; ++ if (unlikely((head->flags & VRING_DESC_F_INDIRECT) == 0)) { ++ VC_LOG_ERR("Invalid descriptor"); ++ return -1; + } + +- vc_req->head = head; +- vc_req->zero_copy = vcrypto->option; ++ dlen = head->len; ++ src_desc = IOVA_TO_VVA(struct vring_desc *, vc_req, head->addr, ++ &dlen, VHOST_ACCESS_RO); ++ if (unlikely(!src_desc || dlen != head->len)) { ++ VC_LOG_ERR("Invalid descriptor"); ++ return -1; ++ } ++ head = src_desc; ++ ++ nb_descs = max_n_descs = dlen / sizeof(struct vring_desc); ++ if (unlikely(nb_descs > VHOST_CRYPTO_MAX_N_DESC || nb_descs == 0)) { ++ err = VIRTIO_CRYPTO_ERR; ++ VC_LOG_ERR("Cannot process num of descriptors %u", nb_descs); ++ if (nb_descs > 0) { ++ struct vring_desc *inhdr_desc = head; ++ while (inhdr_desc->flags & VRING_DESC_F_NEXT) { ++ if (inhdr_desc->next >= max_n_descs) ++ return -1; ++ inhdr_desc = &head[inhdr_desc->next]; ++ } ++ if (inhdr_desc->len != sizeof(*inhdr)) ++ return -1; ++ inhdr = IOVA_TO_VVA(struct virtio_crypto_inhdr *, ++ vc_req, inhdr_desc->addr, &dlen, ++ VHOST_ACCESS_WO); ++ if (unlikely(!inhdr || dlen != inhdr_desc->len)) ++ return -1; ++ inhdr->status = VIRTIO_CRYPTO_ERR; ++ return -1; ++ } ++ } + +- req = get_data_ptr(vc_req, desc, VHOST_ACCESS_RO); +- if (unlikely(req == NULL)) { +- switch (vcrypto->option) { +- case RTE_VHOST_CRYPTO_ZERO_COPY_ENABLE: ++ /* copy descriptors to local variable */ ++ for (i = 0; i < max_n_descs; i++) { ++ desc->addr = src_desc->addr; ++ desc->len = src_desc->len; ++ desc->flags = src_desc->flags; ++ desc++; ++ if (unlikely((src_desc->flags & VRING_DESC_F_NEXT) == 0)) ++ break; ++ if (unlikely(src_desc->next >= max_n_descs)) { + err = VIRTIO_CRYPTO_BADMSG; + VC_LOG_ERR("Invalid descriptor"); + goto error_exit; +- case RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE: +- req = &tmp_req; +- if (unlikely(copy_data(req, vc_req, &desc, sizeof(*req), +- &nb_descs, vq->size) < 0)) { +- err = VIRTIO_CRYPTO_BADMSG; +- VC_LOG_ERR("Invalid descriptor"); +- goto error_exit; +- } +- break; +- default: +- err = VIRTIO_CRYPTO_ERR; +- VC_LOG_ERR("Invalid option"); +- goto error_exit; +- } +- } else { +- if (unlikely(move_desc(vc_req->head, &desc, +- sizeof(*req), &nb_descs, vq->size) < 0)) { +- VC_LOG_ERR("Incorrect descriptor"); +- goto error_exit; + } ++ src_desc = &head[src_desc->next]; ++ } ++ ++ vc_req->head = head; ++ vc_req->zero_copy = vcrypto->option; ++ ++ nb_descs = desc - descs; ++ desc = descs; ++ ++ if (unlikely(desc->len < sizeof(req))) { ++ err = VIRTIO_CRYPTO_BADMSG; ++ VC_LOG_ERR("Invalid descriptor"); ++ goto error_exit; + } + +- switch (req->header.opcode) { ++ if (unlikely(copy_data(&req, vc_req, descs, &desc, sizeof(req), ++ max_n_descs) < 0)) { ++ err = VIRTIO_CRYPTO_BADMSG; ++ VC_LOG_ERR("Invalid descriptor"); ++ goto error_exit; ++ } ++ ++ /* desc is advanced by 1 now */ ++ max_n_descs -= 1; ++ ++ switch (req.header.opcode) { + case VIRTIO_CRYPTO_CIPHER_ENCRYPT: + case VIRTIO_CRYPTO_CIPHER_DECRYPT: +- session_id = req->header.session_id; ++ session_id = req.header.session_id; + + /* one branch to avoid unnecessary table lookup */ + if (vcrypto->cache_session_id != session_id) { +@@ -1241,19 +1293,19 @@ vhost_crypto_process_one_req(struct vhost_crypto *vcrypto, + goto error_exit; + } + +- switch (req->u.sym_req.op_type) { ++ switch (req.u.sym_req.op_type) { + case VIRTIO_CRYPTO_SYM_OP_NONE: + err = VIRTIO_CRYPTO_NOTSUPP; + break; + case VIRTIO_CRYPTO_SYM_OP_CIPHER: + err = prepare_sym_cipher_op(vcrypto, op, vc_req, +- &req->u.sym_req.u.cipher, desc, +- &nb_descs, vq->size); ++ &req.u.sym_req.u.cipher, desc, ++ max_n_descs); + break; + case VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING: + err = prepare_sym_chain_op(vcrypto, op, vc_req, +- &req->u.sym_req.u.chain, desc, +- &nb_descs, vq->size); ++ &req.u.sym_req.u.chain, desc, ++ max_n_descs); + break; + } + if (unlikely(err != 0)) { +@@ -1262,8 +1314,9 @@ vhost_crypto_process_one_req(struct vhost_crypto *vcrypto, + } + break; + default: ++ err = VIRTIO_CRYPTO_ERR; + VC_LOG_ERR("Unsupported symmetric crypto request type %u", +- req->header.opcode); ++ req.header.opcode); + goto error_exit; + } + +@@ -1271,7 +1324,7 @@ vhost_crypto_process_one_req(struct vhost_crypto *vcrypto, + + error_exit: + +- inhdr = reach_inhdr(vc_req, desc, &nb_descs, vq->size); ++ inhdr = reach_inhdr(vc_req, descs, max_n_descs); + if (likely(inhdr != NULL)) + inhdr->status = (uint8_t)err; + +@@ -1285,17 +1338,16 @@ vhost_crypto_finalize_one_request(struct rte_crypto_op *op, + struct rte_mbuf *m_src = op->sym->m_src; + struct rte_mbuf *m_dst = op->sym->m_dst; + struct vhost_crypto_data_req *vc_req = rte_mbuf_to_priv(m_src); +- uint16_t desc_idx; ++ struct vhost_virtqueue *vq = vc_req->vq; ++ uint16_t used_idx = vc_req->desc_idx, desc_idx; + + if (unlikely(!vc_req)) { + VC_LOG_ERR("Failed to retrieve vc_req"); + return NULL; + } + +- if (old_vq && (vc_req->vq != old_vq)) +- return vc_req->vq; +- +- desc_idx = vc_req->desc_idx; ++ if (old_vq && (vq != old_vq)) ++ return vq; + + if (unlikely(op->status != RTE_CRYPTO_OP_STATUS_SUCCESS)) + vc_req->inhdr->status = VIRTIO_CRYPTO_ERR; +@@ -1304,8 +1356,9 @@ vhost_crypto_finalize_one_request(struct rte_crypto_op *op, + write_back_data(vc_req); + } + +- vc_req->vq->used->ring[desc_idx].id = desc_idx; +- vc_req->vq->used->ring[desc_idx].len = vc_req->len; ++ desc_idx = vq->avail->ring[used_idx]; ++ vq->used->ring[desc_idx].id = vq->avail->ring[desc_idx]; ++ vq->used->ring[desc_idx].len = vc_req->len; + + rte_mempool_put(m_src->pool, (void *)m_src); + +@@ -1403,7 +1456,7 @@ rte_vhost_crypto_create(int vid, uint8_t cryptodev_id, + vcrypto->mbuf_pool = rte_pktmbuf_pool_create(name, + VHOST_CRYPTO_MBUF_POOL_SIZE, 512, + sizeof(struct vhost_crypto_data_req), +- RTE_MBUF_DEFAULT_DATAROOM * 2 + RTE_PKTMBUF_HEADROOM, ++ VHOST_CRYPTO_MAX_DATA_SIZE + RTE_PKTMBUF_HEADROOM, + rte_socket_id()); + if (!vcrypto->mbuf_pool) { + VC_LOG_ERR("Failed to creath mbuf pool"); +@@ -1529,6 +1582,7 @@ rte_vhost_crypto_fetch_requests(int vid, uint32_t qid, + struct rte_crypto_op **ops, uint16_t nb_ops) + { + struct rte_mbuf *mbufs[VHOST_CRYPTO_MAX_BURST_SIZE * 2]; ++ struct vhost_crypto_desc descs[VHOST_CRYPTO_MAX_N_DESC]; + struct virtio_net *dev = get_device(vid); + struct vhost_crypto *vcrypto; + struct vhost_virtqueue *vq; +@@ -1539,18 +1593,18 @@ rte_vhost_crypto_fetch_requests(int vid, uint32_t qid, if (unlikely(dev == NULL)) { VC_LOG_ERR("Invalid vid %i", vid); @@ -38445,7 +78420,7 @@ index 684fddc30b..0f9df4059d 100644 } vq = dev->virtqueue[qid]; -@@ -1572,7 +1590,7 @@ rte_vhost_crypto_fetch_requests(int vid, uint32_t qid, +@@ -1572,7 +1626,7 @@ rte_vhost_crypto_fetch_requests(int vid, uint32_t qid, if (unlikely(rte_mempool_get_bulk(vcrypto->mbuf_pool, (void **)mbufs, count * 2) < 0)) { VC_LOG_ERR("Insufficient memory"); @@ -38454,7 +78429,16 @@ index 684fddc30b..0f9df4059d 100644 } for (i = 0; i < count; i++) { -@@ -1602,7 +1620,7 @@ rte_vhost_crypto_fetch_requests(int vid, uint32_t qid, +@@ -1587,7 +1641,7 @@ rte_vhost_crypto_fetch_requests(int vid, uint32_t qid, + op->sym->m_dst->data_off = 0; + + if (unlikely(vhost_crypto_process_one_req(vcrypto, vq, +- op, head, desc_idx) < 0)) ++ op, head, descs, used_idx) < 0)) + break; + } + +@@ -1602,7 +1656,7 @@ rte_vhost_crypto_fetch_requests(int vid, uint32_t qid, if (unlikely(rte_mempool_get_bulk(vcrypto->mbuf_pool, (void **)mbufs, count) < 0)) { VC_LOG_ERR("Insufficient memory"); @@ -38463,11 +78447,38 @@ index 684fddc30b..0f9df4059d 100644 } for (i = 0; i < count; i++) { +@@ -1616,7 +1670,7 @@ rte_vhost_crypto_fetch_requests(int vid, uint32_t qid, + op->sym->m_src->data_off = 0; + + if (unlikely(vhost_crypto_process_one_req(vcrypto, vq, +- op, head, desc_idx) < 0)) ++ op, head, descs, desc_idx) < 0)) + break; + } + diff --git a/dpdk/lib/librte_vhost/vhost_user.c b/dpdk/lib/librte_vhost/vhost_user.c -index 0cfb8b792b..31080be2bd 100644 +index 0cfb8b792b..dc1e312d84 100644 --- a/dpdk/lib/librte_vhost/vhost_user.c +++ b/dpdk/lib/librte_vhost/vhost_user.c -@@ -206,7 +206,7 @@ vhost_backend_cleanup(struct virtio_net *dev) +@@ -97,8 +97,15 @@ close_msg_fds(struct VhostUserMsg *msg) + { + int i; + +- for (i = 0; i < msg->fd_num; i++) +- close(msg->fds[i]); ++ for (i = 0; i < msg->fd_num; i++) { ++ int fd = msg->fds[i]; ++ ++ if (fd == -1) ++ continue; ++ ++ msg->fds[i] = -1; ++ close(fd); ++ } + } + + /* +@@ -206,7 +213,7 @@ vhost_backend_cleanup(struct virtio_net *dev) dev->inflight_info->addr = NULL; } @@ -38476,7 +78487,18 @@ index 0cfb8b792b..31080be2bd 100644 close(dev->inflight_info->fd); dev->inflight_info->fd = -1; } -@@ -656,13 +656,11 @@ ring_addr_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq, +@@ -350,7 +357,9 @@ vhost_user_set_features(struct virtio_net **pdev, struct VhostUserMsg *msg, + + dev->features = features; + if (dev->features & +- ((1 << VIRTIO_NET_F_MRG_RXBUF) | (1ULL << VIRTIO_F_VERSION_1))) { ++ ((1ULL << VIRTIO_NET_F_MRG_RXBUF) | ++ (1ULL << VIRTIO_F_VERSION_1) | ++ (1ULL << VIRTIO_F_RING_PACKED))) { + dev->vhost_hlen = sizeof(struct virtio_net_hdr_mrg_rxbuf); + } else { + dev->vhost_hlen = sizeof(struct virtio_net_hdr); +@@ -656,13 +665,11 @@ ring_addr_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq, { if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)) { uint64_t vva; @@ -38493,7 +78515,7 @@ index 0cfb8b792b..31080be2bd 100644 return vva; } -@@ -670,37 +668,16 @@ ring_addr_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq, +@@ -670,37 +677,16 @@ ring_addr_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq, return qva_to_vva(dev, ra, size); } @@ -38537,7 +78559,7 @@ index 0cfb8b792b..31080be2bd 100644 } static struct virtio_net * -@@ -712,7 +689,7 @@ translate_ring_addresses(struct virtio_net *dev, int vq_index) +@@ -712,7 +698,7 @@ translate_ring_addresses(struct virtio_net *dev, int vq_index) if (addr->flags & (1 << VHOST_VRING_F_LOG)) { vq->log_guest_addr = @@ -38546,8 +78568,79 @@ index 0cfb8b792b..31080be2bd 100644 if (vq->log_guest_addr == 0) { RTE_LOG(DEBUG, VHOST_CONFIG, "(%d) failed to map log_guest_addr.\n", -@@ -1145,6 +1122,21 @@ vhost_user_set_mem_table(struct virtio_net **pdev, struct VhostUserMsg *msg, - goto err_mmap; +@@ -1053,7 +1039,6 @@ vhost_user_set_mem_table(struct virtio_net **pdev, struct VhostUserMsg *msg, + uint64_t alignment; + uint32_t i; + int populate; +- int fd; + + if (validate_msg_fds(msg, memory->nregions) != 0) + return RTE_VHOST_MSG_RESULT_ERR; +@@ -1061,7 +1046,7 @@ vhost_user_set_mem_table(struct virtio_net **pdev, struct VhostUserMsg *msg, + if (memory->nregions > VHOST_MEMORY_MAX_NREGIONS) { + RTE_LOG(ERR, VHOST_CONFIG, + "too many memory regions (%u)\n", memory->nregions); +- return RTE_VHOST_MSG_RESULT_ERR; ++ goto close_msg_fds; + } + + if (dev->mem && !vhost_memory_changed(memory, dev->mem)) { +@@ -1094,7 +1079,7 @@ vhost_user_set_mem_table(struct virtio_net **pdev, struct VhostUserMsg *msg, + "(%d) failed to allocate memory " + "for dev->guest_pages\n", + dev->vid); +- return RTE_VHOST_MSG_RESULT_ERR; ++ goto close_msg_fds; + } + } + +@@ -1104,18 +1089,23 @@ vhost_user_set_mem_table(struct virtio_net **pdev, struct VhostUserMsg *msg, + RTE_LOG(ERR, VHOST_CONFIG, + "(%d) failed to allocate memory for dev->mem\n", + dev->vid); +- return RTE_VHOST_MSG_RESULT_ERR; ++ goto free_guest_pages; + } + dev->mem->nregions = memory->nregions; + + for (i = 0; i < memory->nregions; i++) { +- fd = msg->fds[i]; + reg = &dev->mem->regions[i]; + + reg->guest_phys_addr = memory->regions[i].guest_phys_addr; + reg->guest_user_addr = memory->regions[i].userspace_addr; + reg->size = memory->regions[i].memory_size; +- reg->fd = fd; ++ reg->fd = msg->fds[i]; ++ ++ /* ++ * Assign invalid file descriptor value to avoid double ++ * closing on error path. ++ */ ++ msg->fds[i] = -1; + + mmap_offset = memory->regions[i].mmap_offset; + +@@ -1125,7 +1115,7 @@ vhost_user_set_mem_table(struct virtio_net **pdev, struct VhostUserMsg *msg, + "mmap_offset (%#"PRIx64") and memory_size " + "(%#"PRIx64") overflow\n", + mmap_offset, reg->size); +- goto err_mmap; ++ goto free_mem_table; + } + + mmap_size = reg->size + mmap_offset; +@@ -1138,22 +1128,37 @@ vhost_user_set_mem_table(struct virtio_net **pdev, struct VhostUserMsg *msg, + * to avoid failure, make sure in caller to keep length + * aligned. + */ +- alignment = get_blk_size(fd); ++ alignment = get_blk_size(reg->fd); + if (alignment == (uint64_t)-1) { + RTE_LOG(ERR, VHOST_CONFIG, + "couldn't get hugepage size through fstat\n"); +- goto err_mmap; ++ goto free_mem_table; } mmap_size = RTE_ALIGN_CEIL(mmap_size, alignment); + if (mmap_size == 0) { @@ -38563,12 +78656,97 @@ index 0cfb8b792b..31080be2bd 100644 + RTE_LOG(ERR, VHOST_CONFIG, "mmap size (0x%" PRIx64 ") " + "or alignment (0x%" PRIx64 ") is invalid\n", + reg->size + mmap_offset, alignment); -+ goto err_mmap; ++ goto free_mem_table; + } populate = (dev->dequeue_zero_copy) ? MAP_POPULATE : 0; mmap_addr = mmap(NULL, mmap_size, PROT_READ | PROT_WRITE, -@@ -1298,7 +1290,8 @@ vq_is_ready(struct virtio_net *dev, struct vhost_virtqueue *vq) +- MAP_SHARED | populate, fd, 0); ++ MAP_SHARED | populate, reg->fd, 0); + + if (mmap_addr == MAP_FAILED) { + RTE_LOG(ERR, VHOST_CONFIG, + "mmap region %u failed.\n", i); +- goto err_mmap; ++ goto free_mem_table; + } + + reg->mmap_addr = mmap_addr; +@@ -1166,7 +1171,7 @@ vhost_user_set_mem_table(struct virtio_net **pdev, struct VhostUserMsg *msg, + RTE_LOG(ERR, VHOST_CONFIG, + "adding guest pages to region %u failed.\n", + i); +- goto err_mmap; ++ goto free_mem_table; + } + + RTE_LOG(INFO, VHOST_CONFIG, +@@ -1209,17 +1214,17 @@ vhost_user_set_mem_table(struct virtio_net **pdev, struct VhostUserMsg *msg, + if (read_vhost_message(main_fd, &ack_msg) <= 0) { + RTE_LOG(ERR, VHOST_CONFIG, + "Failed to read qemu ack on postcopy set-mem-table\n"); +- goto err_mmap; ++ goto free_mem_table; + } + + if (validate_msg_fds(&ack_msg, 0) != 0) +- goto err_mmap; ++ goto free_mem_table; + + if (ack_msg.request.master != VHOST_USER_SET_MEM_TABLE) { + RTE_LOG(ERR, VHOST_CONFIG, + "Bad qemu ack on postcopy set-mem-table (%d)\n", + ack_msg.request.master); +- goto err_mmap; ++ goto free_mem_table; + } + + /* Now userfault register and we can use the memory */ +@@ -1243,7 +1248,7 @@ vhost_user_set_mem_table(struct virtio_net **pdev, struct VhostUserMsg *msg, + "Failed to register ufd for region %d: (ufd = %d) %s\n", + i, dev->postcopy_ufd, + strerror(errno)); +- goto err_mmap; ++ goto free_mem_table; + } + RTE_LOG(INFO, VHOST_CONFIG, + "\t userfaultfd registered for range : " +@@ -1252,7 +1257,7 @@ vhost_user_set_mem_table(struct virtio_net **pdev, struct VhostUserMsg *msg, + (uint64_t)reg_struct.range.start + + (uint64_t)reg_struct.range.len - 1); + #else +- goto err_mmap; ++ goto free_mem_table; + #endif + } + } +@@ -1271,7 +1276,7 @@ vhost_user_set_mem_table(struct virtio_net **pdev, struct VhostUserMsg *msg, + dev = translate_ring_addresses(dev, i); + if (!dev) { + dev = *pdev; +- goto err_mmap; ++ goto free_mem_table; + } + + *pdev = dev; +@@ -1282,10 +1287,15 @@ vhost_user_set_mem_table(struct virtio_net **pdev, struct VhostUserMsg *msg, + + return RTE_VHOST_MSG_RESULT_OK; + +-err_mmap: ++free_mem_table: + free_mem_region(dev); + rte_free(dev->mem); + dev->mem = NULL; ++free_guest_pages: ++ rte_free(dev->guest_pages); ++ dev->guest_pages = NULL; ++close_msg_fds: ++ close_msg_fds(msg); + return RTE_VHOST_MSG_RESULT_ERR; + } + +@@ -1298,7 +1308,8 @@ vq_is_ready(struct virtio_net *dev, struct vhost_virtqueue *vq) return false; if (vq_is_packed(dev)) @@ -38578,7 +78756,7 @@ index 0cfb8b792b..31080be2bd 100644 else rings_ok = vq->desc && vq->avail && vq->used; -@@ -1415,6 +1408,7 @@ vhost_user_get_inflight_fd(struct virtio_net **pdev, +@@ -1415,6 +1426,7 @@ vhost_user_get_inflight_fd(struct virtio_net **pdev, "failed to alloc dev inflight area\n"); return RTE_VHOST_MSG_RESULT_ERR; } @@ -38586,7 +78764,7 @@ index 0cfb8b792b..31080be2bd 100644 } num_queues = msg->payload.inflight.num_queues; -@@ -1440,6 +1434,16 @@ vhost_user_get_inflight_fd(struct virtio_net **pdev, +@@ -1440,6 +1452,16 @@ vhost_user_get_inflight_fd(struct virtio_net **pdev, } memset(addr, 0, mmap_size); @@ -38603,7 +78781,7 @@ index 0cfb8b792b..31080be2bd 100644 dev->inflight_info->addr = addr; dev->inflight_info->size = msg->payload.inflight.mmap_size = mmap_size; dev->inflight_info->fd = msg->fds[0] = fd; -@@ -1522,10 +1526,13 @@ vhost_user_set_inflight_fd(struct virtio_net **pdev, VhostUserMsg *msg, +@@ -1522,10 +1544,13 @@ vhost_user_set_inflight_fd(struct virtio_net **pdev, VhostUserMsg *msg, "failed to alloc dev inflight area\n"); return RTE_VHOST_MSG_RESULT_ERR; } @@ -38618,7 +78796,7 @@ index 0cfb8b792b..31080be2bd 100644 addr = mmap(0, mmap_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, mmap_offset); -@@ -1534,8 +1541,10 @@ vhost_user_set_inflight_fd(struct virtio_net **pdev, VhostUserMsg *msg, +@@ -1534,8 +1559,10 @@ vhost_user_set_inflight_fd(struct virtio_net **pdev, VhostUserMsg *msg, return RTE_VHOST_MSG_RESULT_ERR; } @@ -38630,7 +78808,7 @@ index 0cfb8b792b..31080be2bd 100644 dev->inflight_info->fd = fd; dev->inflight_info->addr = addr; -@@ -1629,8 +1638,11 @@ vhost_check_queue_inflights_split(struct virtio_net *dev, +@@ -1629,8 +1656,11 @@ vhost_check_queue_inflights_split(struct virtio_net *dev, (1ULL << VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD))) return RTE_VHOST_MSG_RESULT_OK; @@ -38643,7 +78821,7 @@ index 0cfb8b792b..31080be2bd 100644 if (!vq->inflight_split->version) { vq->inflight_split->version = INFLIGHT_VERSION; -@@ -1710,8 +1722,11 @@ vhost_check_queue_inflights_packed(struct virtio_net *dev, +@@ -1710,8 +1740,11 @@ vhost_check_queue_inflights_packed(struct virtio_net *dev, (1ULL << VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD))) return RTE_VHOST_MSG_RESULT_OK; @@ -38656,7 +78834,36 @@ index 0cfb8b792b..31080be2bd 100644 if (!vq->inflight_packed->version) { vq->inflight_packed->version = INFLIGHT_VERSION; -@@ -2060,10 +2075,10 @@ vhost_user_set_log_base(struct virtio_net **pdev, struct VhostUserMsg *msg, +@@ -1811,8 +1844,12 @@ vhost_user_set_vring_kick(struct virtio_net **pdev, struct VhostUserMsg *msg, + + /* Interpret ring addresses only when ring is started. */ + dev = translate_ring_addresses(dev, file.index); +- if (!dev) ++ if (!dev) { ++ if (file.fd != VIRTIO_INVALID_EVENTFD) ++ close(file.fd); ++ + return RTE_VHOST_MSG_RESULT_ERR; ++ } + + *pdev = dev; + +@@ -1857,6 +1894,7 @@ free_zmbufs(struct vhost_virtqueue *vq) + drain_zmbuf_list(vq); + + rte_free(vq->zmbufs); ++ vq->zmbufs = NULL; + } + + /* +@@ -2054,18 +2092,18 @@ vhost_user_set_log_base(struct virtio_net **pdev, struct VhostUserMsg *msg, + RTE_LOG(ERR, VHOST_CONFIG, + "invalid log base msg size: %"PRId32" != %d\n", + msg->size, (int)sizeof(VhostUserLog)); +- return RTE_VHOST_MSG_RESULT_ERR; ++ goto close_msg_fds; + } + size = msg->payload.log.mmap_size; off = msg->payload.log.mmap_offset; @@ -38668,9 +78875,23 @@ index 0cfb8b792b..31080be2bd 100644 - "log offset %#"PRIx64" exceeds log size %#"PRIx64"\n", + "log offset %#"PRIx64" and log size %#"PRIx64" overflow\n", off, size); - return RTE_VHOST_MSG_RESULT_ERR; +- return RTE_VHOST_MSG_RESULT_ERR; ++ goto close_msg_fds; } -@@ -2229,6 +2244,13 @@ is_vring_iotlb_split(struct vhost_virtqueue *vq, struct vhost_iotlb_msg *imsg) + + RTE_LOG(INFO, VHOST_CONFIG, +@@ -2102,6 +2140,10 @@ vhost_user_set_log_base(struct virtio_net **pdev, struct VhostUserMsg *msg, + msg->fd_num = 0; + + return RTE_VHOST_MSG_RESULT_REPLY; ++ ++close_msg_fds: ++ close_msg_fds(msg); ++ return RTE_VHOST_MSG_RESULT_ERR; + } + + static int vhost_user_set_log_fd(struct virtio_net **pdev __rte_unused, +@@ -2229,6 +2271,13 @@ is_vring_iotlb_split(struct vhost_virtqueue *vq, struct vhost_iotlb_msg *imsg) if (ra->used_user_addr < end && (ra->used_user_addr + len) > start) return 1; @@ -38684,7 +78905,7 @@ index 0cfb8b792b..31080be2bd 100644 return 0; } -@@ -2254,6 +2276,13 @@ is_vring_iotlb_packed(struct vhost_virtqueue *vq, struct vhost_iotlb_msg *imsg) +@@ -2254,6 +2303,13 @@ is_vring_iotlb_packed(struct vhost_virtqueue *vq, struct vhost_iotlb_msg *imsg) if (ra->used_user_addr < end && (ra->used_user_addr + len) > start) return 1; @@ -38698,7 +78919,7 @@ index 0cfb8b792b..31080be2bd 100644 return 0; } -@@ -2440,8 +2469,13 @@ read_vhost_message(int sockfd, struct VhostUserMsg *msg) +@@ -2440,8 +2496,13 @@ read_vhost_message(int sockfd, struct VhostUserMsg *msg) ret = read_fd_message(sockfd, (char *)msg, VHOST_USER_HDR_SIZE, msg->fds, VHOST_MEMORY_MAX_NREGIONS, &msg->fd_num); @@ -38713,7 +78934,7 @@ index 0cfb8b792b..31080be2bd 100644 if (msg->size) { if (msg->size > sizeof(msg->payload)) { -@@ -2508,7 +2542,7 @@ static int +@@ -2508,7 +2569,7 @@ static int vhost_user_check_and_alloc_queue_pair(struct virtio_net *dev, struct VhostUserMsg *msg) { @@ -38722,7 +78943,16 @@ index 0cfb8b792b..31080be2bd 100644 switch (msg->request.master) { case VHOST_USER_SET_VRING_KICK: -@@ -2794,11 +2828,19 @@ static int process_slave_message_reply(struct virtio_net *dev, +@@ -2758,7 +2819,7 @@ vhost_user_msg_handler(int vid, int fd) + return -1; + } + +- if (!(dev->flags & VIRTIO_DEV_RUNNING) && virtio_is_ready(dev)) { ++ if (!(dev->flags & VIRTIO_DEV_READY) && virtio_is_ready(dev)) { + dev->flags |= VIRTIO_DEV_READY; + + if (!(dev->flags & VIRTIO_DEV_RUNNING)) { +@@ -2794,11 +2855,19 @@ static int process_slave_message_reply(struct virtio_net *dev, if ((msg->flags & VHOST_USER_NEED_REPLY) == 0) return 0; @@ -38744,7 +78974,7 @@ index 0cfb8b792b..31080be2bd 100644 RTE_LOG(ERR, VHOST_CONFIG, "Received unexpected msg type (%u), expected %u\n", diff --git a/dpdk/lib/librte_vhost/virtio_net.c b/dpdk/lib/librte_vhost/virtio_net.c -index 21c311732a..a6c106c13c 100644 +index 21c311732a..f397e9a13a 100644 --- a/dpdk/lib/librte_vhost/virtio_net.c +++ b/dpdk/lib/librte_vhost/virtio_net.c @@ -43,6 +43,36 @@ is_valid_virt_queue_idx(uint32_t idx, int is_tx, uint32_t nr_vring) @@ -38868,7 +79098,51 @@ index 21c311732a..a6c106c13c 100644 if (unlikely(lens[i] != descs[avail_idx + i].len)) return -1; } -@@ -1688,6 +1676,8 @@ virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq, +@@ -1607,16 +1595,8 @@ virtio_dev_extbuf_alloc(struct rte_mbuf *pkt, uint32_t size) + rte_iova_t iova; + void *buf; + +- /* Try to use pkt buffer to store shinfo to reduce the amount of memory +- * required, otherwise store shinfo in the new buffer. +- */ +- if (rte_pktmbuf_tailroom(pkt) >= sizeof(*shinfo)) +- shinfo = rte_pktmbuf_mtod(pkt, +- struct rte_mbuf_ext_shared_info *); +- else { +- total_len += sizeof(*shinfo) + sizeof(uintptr_t); +- total_len = RTE_ALIGN_CEIL(total_len, sizeof(uintptr_t)); +- } ++ total_len += sizeof(*shinfo) + sizeof(uintptr_t); ++ total_len = RTE_ALIGN_CEIL(total_len, sizeof(uintptr_t)); + + if (unlikely(total_len > UINT16_MAX)) + return -ENOSPC; +@@ -1627,18 +1607,12 @@ virtio_dev_extbuf_alloc(struct rte_mbuf *pkt, uint32_t size) + return -ENOMEM; + + /* Initialize shinfo */ +- if (shinfo) { +- shinfo->free_cb = virtio_dev_extbuf_free; +- shinfo->fcb_opaque = buf; +- rte_mbuf_ext_refcnt_set(shinfo, 1); +- } else { +- shinfo = rte_pktmbuf_ext_shinfo_init_helper(buf, &buf_len, +- virtio_dev_extbuf_free, buf); +- if (unlikely(shinfo == NULL)) { +- rte_free(buf); +- RTE_LOG(ERR, VHOST_DATA, "Failed to init shinfo\n"); +- return -1; +- } ++ shinfo = rte_pktmbuf_ext_shinfo_init_helper(buf, &buf_len, ++ virtio_dev_extbuf_free, buf); ++ if (unlikely(shinfo == NULL)) { ++ rte_free(buf); ++ RTE_LOG(ERR, VHOST_DATA, "Failed to init shinfo\n"); ++ return -1; + } + + iova = rte_malloc_virt2iova(buf); +@@ -1688,6 +1662,8 @@ virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq, { uint16_t i; uint16_t free_entries; @@ -38877,7 +79151,7 @@ index 21c311732a..a6c106c13c 100644 if (unlikely(dev->dequeue_zero_copy)) { struct zcopy_mbuf *zmbuf, *next; -@@ -1751,13 +1741,35 @@ virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq, +@@ -1751,13 +1727,35 @@ virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq, update_shadow_used_ring_split(vq, head_idx, 0); pkts[i] = virtio_dev_pktmbuf_alloc(dev, mbuf_pool, buf_len); @@ -38914,7 +79188,7 @@ index 21c311732a..a6c106c13c 100644 break; } -@@ -1767,6 +1779,8 @@ virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq, +@@ -1767,6 +1765,8 @@ virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq, zmbuf = get_zmbuf(vq); if (!zmbuf) { rte_pktmbuf_free(pkts[i]); @@ -38923,7 +79197,7 @@ index 21c311732a..a6c106c13c 100644 break; } zmbuf->mbuf = pkts[i]; -@@ -1796,7 +1810,7 @@ virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq, +@@ -1796,7 +1796,7 @@ virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq, } } @@ -38932,7 +79206,15 @@ index 21c311732a..a6c106c13c 100644 } static __rte_always_inline int -@@ -1841,6 +1855,8 @@ vhost_reserve_avail_batch_packed(struct virtio_net *dev, +@@ -1810,7 +1810,6 @@ vhost_reserve_avail_batch_packed(struct virtio_net *dev, + { + bool wrap = vq->avail_wrap_counter; + struct vring_packed_desc *descs = vq->desc_packed; +- struct virtio_net_hdr *hdr; + uint64_t lens[PACKED_BATCH_SIZE]; + uint64_t buf_lens[PACKED_BATCH_SIZE]; + uint32_t buf_offset = dev->vhost_hlen; +@@ -1841,6 +1840,8 @@ vhost_reserve_avail_batch_packed(struct virtio_net *dev, } vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) { @@ -38941,7 +79223,43 @@ index 21c311732a..a6c106c13c 100644 if (unlikely((lens[i] != descs[avail_idx + i].len))) return -1; } -@@ -1928,6 +1944,7 @@ vhost_dequeue_single_packed(struct virtio_net *dev, +@@ -1865,13 +1866,6 @@ vhost_reserve_avail_batch_packed(struct virtio_net *dev, + ids[i] = descs[avail_idx + i].id; + } + +- if (virtio_net_with_host_offload(dev)) { +- vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) { +- hdr = (struct virtio_net_hdr *)(desc_addrs[i]); +- vhost_dequeue_offload(hdr, pkts[i]); +- } +- } +- + return 0; + + free_buf: +@@ -1889,6 +1883,7 @@ virtio_dev_tx_batch_packed(struct virtio_net *dev, + { + uint16_t avail_idx = vq->last_avail_idx; + uint32_t buf_offset = dev->vhost_hlen; ++ struct virtio_net_hdr *hdr; + uintptr_t desc_addrs[PACKED_BATCH_SIZE]; + uint16_t ids[PACKED_BATCH_SIZE]; + uint16_t i; +@@ -1905,6 +1900,13 @@ virtio_dev_tx_batch_packed(struct virtio_net *dev, + (void *)(uintptr_t)(desc_addrs[i] + buf_offset), + pkts[i]->pkt_len); + ++ if (virtio_net_with_host_offload(dev)) { ++ vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) { ++ hdr = (struct virtio_net_hdr *)(desc_addrs[i]); ++ vhost_dequeue_offload(hdr, pkts[i]); ++ } ++ } ++ + if (virtio_net_is_inorder(dev)) + vhost_shadow_dequeue_batch_packed_inorder(vq, + ids[PACKED_BATCH_SIZE - 1]); +@@ -1928,6 +1930,7 @@ vhost_dequeue_single_packed(struct virtio_net *dev, uint32_t buf_len; uint16_t nr_vec = 0; int err; @@ -38949,7 +79267,7 @@ index 21c311732a..a6c106c13c 100644 if (unlikely(fill_vec_buf_packed(dev, vq, vq->last_avail_idx, desc_count, -@@ -1938,14 +1955,24 @@ vhost_dequeue_single_packed(struct virtio_net *dev, +@@ -1938,14 +1941,24 @@ vhost_dequeue_single_packed(struct virtio_net *dev, *pkts = virtio_dev_pktmbuf_alloc(dev, mbuf_pool, buf_len); if (unlikely(*pkts == NULL)) { @@ -38976,7 +79294,7 @@ index 21c311732a..a6c106c13c 100644 rte_pktmbuf_free(*pkts); return -1; } -@@ -1960,21 +1987,24 @@ virtio_dev_tx_single_packed(struct virtio_net *dev, +@@ -1960,21 +1973,24 @@ virtio_dev_tx_single_packed(struct virtio_net *dev, struct rte_mbuf **pkts) { @@ -39012,7 +79330,7 @@ index 21c311732a..a6c106c13c 100644 } static __rte_always_inline int -@@ -2004,7 +2034,7 @@ virtio_dev_tx_batch_packed_zmbuf(struct virtio_net *dev, +@@ -2004,7 +2020,7 @@ virtio_dev_tx_batch_packed_zmbuf(struct virtio_net *dev, vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) { zmbufs[i]->mbuf = pkts[i]; @@ -39021,7 +79339,7 @@ index 21c311732a..a6c106c13c 100644 zmbufs[i]->desc_count = 1; } -@@ -2045,7 +2075,7 @@ virtio_dev_tx_single_packed_zmbuf(struct virtio_net *dev, +@@ -2045,7 +2061,7 @@ virtio_dev_tx_single_packed_zmbuf(struct virtio_net *dev, return -1; } zmbuf->mbuf = *pkts; @@ -39030,7 +79348,7 @@ index 21c311732a..a6c106c13c 100644 zmbuf->desc_count = desc_count; rte_mbuf_refcnt_update(*pkts, 1); -@@ -2149,7 +2179,6 @@ virtio_dev_tx_packed(struct virtio_net *dev, +@@ -2149,7 +2165,6 @@ virtio_dev_tx_packed(struct virtio_net *dev, if (remained >= PACKED_BATCH_SIZE) { if (!virtio_dev_tx_batch_packed(dev, vq, mbuf_pool, &pkts[pkt_idx])) { @@ -39038,7 +79356,7 @@ index 21c311732a..a6c106c13c 100644 pkt_idx += PACKED_BATCH_SIZE; remained -= PACKED_BATCH_SIZE; continue; -@@ -2159,15 +2188,18 @@ virtio_dev_tx_packed(struct virtio_net *dev, +@@ -2159,15 +2174,18 @@ virtio_dev_tx_packed(struct virtio_net *dev, if (virtio_dev_tx_single_packed(dev, vq, mbuf_pool, &pkts[pkt_idx])) break; @@ -39060,19 +79378,32 @@ index 21c311732a..a6c106c13c 100644 } diff --git a/dpdk/lib/meson.build b/dpdk/lib/meson.build -index 6ceb5e756e..d5a507fb43 100644 +index 6ceb5e756e..b60396428c 100644 --- a/dpdk/lib/meson.build +++ b/dpdk/lib/meson.build -@@ -148,12 +148,16 @@ foreach l:libraries +@@ -127,7 +127,7 @@ foreach l:libraries + dependencies: static_deps, + include_directories: includes, + install: true) +- static_dep = declare_dependency(link_with: static_lib, ++ static_dep = declare_dependency( + include_directories: includes, + dependencies: static_deps) + +@@ -148,12 +148,18 @@ foreach l:libraries command: [map_to_def_cmd, '@INPUT@', '@OUTPUT@'], input: version_map, output: 'rte_@0@_exports.def'.format(name)) - lk_deps = [version_map, def_file] - if is_windows +- lk_args = ['-Wl,/def:' + def_file.full_path(), +- '-Wl,/implib:lib\\' + implib] + + if is_ms_linker - lk_args = ['-Wl,/def:' + def_file.full_path(), - '-Wl,/implib:lib\\' + implib] ++ lk_args = ['-Wl,/def:' + def_file.full_path()] ++ if meson.version().version_compare('<0.54.0') ++ lk_args += ['-Wl,/implib:lib\\' + implib] ++ endif else lk_args = ['-Wl,--version-script=' + version_map] + endif @@ -39082,6 +79413,119 @@ index 6ceb5e756e..d5a507fb43 100644 # on unix systems check the output of the # experimental syms script, using it as a # dependency of the .so build +diff --git a/dpdk/license/bsd-2-clause.txt b/dpdk/license/bsd-2-clause.txt +new file mode 100644 +index 0000000000..dfb3f1adea +--- /dev/null ++++ b/dpdk/license/bsd-2-clause.txt +@@ -0,0 +1,20 @@ ++Redistribution and use in source and binary forms, with or without ++modification, are permitted provided that the following conditions are met: ++ ++ 1. Redistributions of source code must retain the above copyright notice, ++ this list of conditions and the following disclaimer. ++ ++ 2. Redistributions in binary form must reproduce the above copyright ++ notice, this list of conditions and the following disclaimer in the ++ documentation and/or other materials provided with the distribution. ++ ++THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" ++AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE ++DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE ++FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL ++DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR ++SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER ++CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, ++OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +diff --git a/dpdk/license/isc.txt b/dpdk/license/isc.txt +new file mode 100644 +index 0000000000..34a6a760d5 +--- /dev/null ++++ b/dpdk/license/isc.txt +@@ -0,0 +1,11 @@ ++Permission to use, copy, modify, and/or distribute this software for any ++purpose with or without fee is hereby granted, provided that the above ++copyright notice and this permission notice appear in all copies. ++ ++THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH REGARD ++TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND ++FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR ++CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, ++DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER ++TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE ++OF THIS SOFTWARE. +diff --git a/dpdk/license/mit.txt b/dpdk/license/mit.txt +new file mode 100644 +index 0000000000..c4037a4605 +--- /dev/null ++++ b/dpdk/license/mit.txt +@@ -0,0 +1,18 @@ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice (including the next ++paragraph) shall be included in all copies or substantial portions of the ++Software. ++ ++THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE ++AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER ++LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, ++OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE ++SOFTWARE. +diff --git a/dpdk/meson.build b/dpdk/meson.build +index b7ae9c8d9a..00949de995 100644 +--- a/dpdk/meson.build ++++ b/dpdk/meson.build +@@ -48,6 +48,9 @@ subdir('doc') + # build any examples explicitly requested - useful for developers - and + # install any example code into the appropriate install path + subdir('examples') ++install_subdir('examples', ++ install_dir: get_option('datadir') + '/dpdk', ++ exclude_files: 'meson.build') + + # build kernel modules if enabled + if get_option('enable_kmods') +@@ -61,29 +64,8 @@ configure_file(output: build_cfg, + install_dir: join_paths(get_option('includedir'), + get_option('include_subdir_arch'))) + +-# for static builds, include the drivers as libs and we need to "whole-archive" +-# them. +-dpdk_drivers = ['-Wl,--whole-archive'] + dpdk_drivers + ['-Wl,--no-whole-archive'] +- +-pkg = import('pkgconfig') +-pkg_extra_cflags = ['-include', 'rte_config.h'] + machine_args +-if is_freebsd +- pkg_extra_cflags += ['-D__BSD_VISIBLE'] +-endif +-pkg.generate(name: meson.project_name(), +- filebase: 'lib' + meson.project_name().to_lower(), +- version: meson.project_version(), +- libraries: dpdk_libraries, +- libraries_private: dpdk_drivers + dpdk_static_libraries + +- ['-Wl,-Bdynamic'] + dpdk_extra_ldflags, +- requires: libbsd, # apps using rte_string_fns.h may need this if enabled +- # if libbsd is not enabled, then this is blank +- description: '''The Data Plane Development Kit (DPDK). +-Note that CFLAGS might contain an -march flag higher than typical baseline. +-This is required for a number of static inline functions in the public headers.''', +- subdirs: [get_option('include_subdir_arch'), '.'], +- extra_cflags: pkg_extra_cflags +-) ++# build pkg-config files for dpdk ++subdir('buildtools/pkg-config') + + # final output, list all the libs and drivers to be built + # this does not affect any part of the build, for information only. diff --git a/dpdk/meson_options.txt b/dpdk/meson_options.txt index bc369d06c9..0de16b4fdb 100644 --- a/dpdk/meson_options.txt @@ -39110,6 +79554,46 @@ index 0cf3791b4d..82fe098f7c 100644 PMDINFO_TO_O = if grep -q 'RTE_PMD_REGISTER_.*(.*)' $<; then \ echo "$(if $V,$(PMDINFO_GEN), PMDINFO $@.pmd.c)" && \ $(PMDINFO_GEN) && \ +diff --git a/dpdk/mk/machine/graviton2/rte.vars.mk b/dpdk/mk/machine/graviton2/rte.vars.mk +new file mode 100644 +index 0000000000..1796c9cf8f +--- /dev/null ++++ b/dpdk/mk/machine/graviton2/rte.vars.mk +@@ -0,0 +1,34 @@ ++# SPDX-License-Identifier: BSD-3-Clause ++# Copyright(c) Amazon.com, Inc or its affiliates ++# ++ ++# ++# machine: ++# ++# - can define ARCH variable (overridden by cmdline value) ++# - can define CROSS variable (overridden by cmdline value) ++# - define MACHINE_CFLAGS variable (overridden by cmdline value) ++# - define MACHINE_LDFLAGS variable (overridden by cmdline value) ++# - define MACHINE_ASFLAGS variable (overridden by cmdline value) ++# - can define CPU_CFLAGS variable (overridden by cmdline value) that ++# overrides the one defined in arch. ++# - can define CPU_LDFLAGS variable (overridden by cmdline value) that ++# overrides the one defined in arch. ++# - can define CPU_ASFLAGS variable (overridden by cmdline value) that ++# overrides the one defined in arch. ++# - may override any previously defined variable ++# ++ ++# ARCH = ++# CROSS = ++# MACHINE_CFLAGS = ++# MACHINE_LDFLAGS = ++# MACHINE_ASFLAGS = ++# CPU_CFLAGS = ++# CPU_LDFLAGS = ++# CPU_ASFLAGS = ++ ++include $(RTE_SDK)/mk/rte.helper.mk ++ ++MACHINE_CFLAGS += $(call rte_cc_has_argument, -march=armv8.2-a+crypto) ++MACHINE_CFLAGS += $(call rte_cc_has_argument, -mcpu=neoverse-n1) diff --git a/dpdk/mk/rte.app.mk b/dpdk/mk/rte.app.mk index 05ea034b99..44dd684cb1 100644 --- a/dpdk/mk/rte.app.mk @@ -39143,10 +79627,72 @@ index 9fc704193b..b3473c06fd 100644 HOST_WERROR_FLAGS := $(WERROR_FLAGS) ifeq ($(shell test $(HOST_GCC_VERSION) -gt 70 && echo 1), 1) +diff --git a/dpdk/usertools/cpu_layout.py b/dpdk/usertools/cpu_layout.py +index 6f129b1db8..39b268752a 100755 +--- a/dpdk/usertools/cpu_layout.py ++++ b/dpdk/usertools/cpu_layout.py +@@ -22,8 +22,6 @@ + fd = open("{}/cpu{}/topology/core_id".format(base_path, cpu)) + except IOError: + continue +- except: +- break + core = int(fd.read()) + fd.close() + fd = open("{}/cpu{}/topology/physical_package_id".format(base_path, cpu)) +@@ -66,7 +64,7 @@ + for c in cores: + output = "Core %s" % str(c).ljust(max_core_id_len) + for s in sockets: +- if (s,c) in core_map: ++ if (s, c) in core_map: + output += " " + str(core_map[(s, c)]).ljust(max_core_map_len) + else: + output += " " * (max_core_map_len + 1) +diff --git a/dpdk/usertools/dpdk-devbind.py b/dpdk/usertools/dpdk-devbind.py +index b1d1498768..44ea3dd30b 100755 +--- a/dpdk/usertools/dpdk-devbind.py ++++ b/dpdk/usertools/dpdk-devbind.py +@@ -7,6 +7,7 @@ + import sys + import os + import getopt ++import platform + import subprocess + from os.path import exists, abspath, dirname, basename + +@@ -172,7 +173,17 @@ def module_is_loaded(module): + + loaded_modules = sysfs_mods + +- return module in sysfs_mods ++ # add built-in modules as loaded ++ release = platform.release() ++ filename = os.path.join("/lib/modules/", release, "modules.builtin") ++ if os.path.exists(filename): ++ try: ++ with open(filename) as f: ++ loaded_modules += [os.path.splitext(os.path.basename(mod))[0] for mod in f] ++ except IOError: ++ print("Warning: cannot read list of built-in kernel modules") ++ ++ return module in loaded_modules + + + def check_modules(): diff --git a/dpdk/usertools/dpdk-pmdinfo.py b/dpdk/usertools/dpdk-pmdinfo.py -index 069a3bf124..12f20735e0 100755 +index 069a3bf124..27e1cad328 100755 --- a/dpdk/usertools/dpdk-pmdinfo.py +++ b/dpdk/usertools/dpdk-pmdinfo.py +@@ -352,7 +352,7 @@ def display_pmd_info_strings(self, section_spec): + mystring = force_unicode(data[dataptr:endptr]) + rc = mystring.find("PMD_INFO_STRING") + if (rc != -1): +- self.parse_pmd_info_string(mystring) ++ self.parse_pmd_info_string(mystring[rc:]) + + dataptr = endptr + @@ -539,7 +539,7 @@ def scan_for_autoload_pmds(dpdk_path): return diff --git a/SOURCES/ppc_64-power8-linuxapp-gcc-config b/SOURCES/ppc_64-power8-linuxapp-gcc-config index 394713d..042c372 100644 --- a/SOURCES/ppc_64-power8-linuxapp-gcc-config +++ b/SOURCES/ppc_64-power8-linuxapp-gcc-config @@ -1,4 +1,4 @@ -# -*- cfg-sha: ed6bcdfa02f885357548558116ba4f4693048c72eb35043c2de856708c6f7257 +# -*- cfg-sha: f8eb57e7e75a69bb59051bd6f87b77c54bfda5320d8d3a2aaffa94b14d254b18 # SPDX-License-Identifier: BSD-3-Clause # Copyright (C) IBM Corporation 2014. # SPDX-License-Identifier: BSD-3-Clause @@ -10,7 +10,7 @@ CONFIG_RTE_VER_PREFIX="DPDK" # Version information completed when this file is processed for a build CONFIG_RTE_VER_YEAR=19 CONFIG_RTE_VER_MONTH=11 -CONFIG_RTE_VER_MINOR=3 +CONFIG_RTE_VER_MINOR=7 CONFIG_RTE_VER_SUFFIX="" CONFIG_RTE_VER_RELEASE=99 # RTE_EXEC_ENV values are the directories in mk/exec-env/ @@ -195,7 +195,6 @@ CONFIG_RTE_LIBRTE_ICE_PMD=n CONFIG_RTE_LIBRTE_ICE_DEBUG_RX=n CONFIG_RTE_LIBRTE_ICE_DEBUG_TX=n CONFIG_RTE_LIBRTE_ICE_DEBUG_TX_FREE=n -CONFIG_RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC=y CONFIG_RTE_LIBRTE_ICE_16BYTE_RX_DESC=n # Compile burst-oriented IAVF PMD driver CONFIG_RTE_LIBRTE_IAVF_PMD=n @@ -326,7 +325,6 @@ CONFIG_RTE_LIBRTE_CRYPTODEV=n CONFIG_RTE_CRYPTO_MAX_DEVS=64 # Compile PMD for ARMv8 Crypto device CONFIG_RTE_LIBRTE_PMD_ARMV8_CRYPTO=n -CONFIG_RTE_LIBRTE_PMD_ARMV8_CRYPTO_DEBUG=n # Compile NXP CAAM JR crypto Driver CONFIG_RTE_LIBRTE_PMD_CAAM_JR=n CONFIG_RTE_LIBRTE_PMD_CAAM_JR_BE=n @@ -590,3 +588,4 @@ CONFIG_RTE_TOOLCHAIN_GCC=y # Note: Power doesn't have this support # Note: Initially, all of architecture we compile for. PMD drivers compilation are turned off on Power # Will turn on them only after architecture we compile for. successful testing on Power +CONFIG_RTE_LIBRTE_PMD_XENVIRT=n diff --git a/SOURCES/x86_64-native-linuxapp-gcc-config b/SOURCES/x86_64-native-linuxapp-gcc-config index 30d033b..a19dba6 100644 --- a/SOURCES/x86_64-native-linuxapp-gcc-config +++ b/SOURCES/x86_64-native-linuxapp-gcc-config @@ -1,4 +1,4 @@ -# -*- cfg-sha: f4cf137e2d4d96b2fa1ea8a0f1029d8d6553993747fda3f9f37fd01138fae055 +# -*- cfg-sha: 133e4d11e86f77e37ed4efd835ccd4f8c81eb1e7ac828474be873cf0bc4126c6 # SPDX-License-Identifier: BSD-3-Clause # Copyright(c) 2010-2014 Intel Corporation # SPDX-License-Identifier: BSD-3-Clause @@ -10,7 +10,7 @@ CONFIG_RTE_VER_PREFIX="DPDK" # Version information completed when this file is processed for a build CONFIG_RTE_VER_YEAR=19 CONFIG_RTE_VER_MONTH=11 -CONFIG_RTE_VER_MINOR=3 +CONFIG_RTE_VER_MINOR=7 CONFIG_RTE_VER_SUFFIX="" CONFIG_RTE_VER_RELEASE=99 # RTE_EXEC_ENV values are the directories in mk/exec-env/ @@ -195,7 +195,6 @@ CONFIG_RTE_LIBRTE_ICE_PMD=n CONFIG_RTE_LIBRTE_ICE_DEBUG_RX=n CONFIG_RTE_LIBRTE_ICE_DEBUG_TX=n CONFIG_RTE_LIBRTE_ICE_DEBUG_TX_FREE=n -CONFIG_RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC=y CONFIG_RTE_LIBRTE_ICE_16BYTE_RX_DESC=n # Compile burst-oriented IAVF PMD driver CONFIG_RTE_LIBRTE_IAVF_PMD=n @@ -326,7 +325,6 @@ CONFIG_RTE_LIBRTE_CRYPTODEV=n CONFIG_RTE_CRYPTO_MAX_DEVS=64 # Compile PMD for ARMv8 Crypto device CONFIG_RTE_LIBRTE_PMD_ARMV8_CRYPTO=n -CONFIG_RTE_LIBRTE_PMD_ARMV8_CRYPTO_DEBUG=n # Compile NXP CAAM JR crypto Driver CONFIG_RTE_LIBRTE_PMD_CAAM_JR=n CONFIG_RTE_LIBRTE_PMD_CAAM_JR_BE=n @@ -588,3 +586,4 @@ CONFIG_RTE_ARCH_X86_64=y CONFIG_RTE_ARCH_X86=y CONFIG_RTE_ARCH_64=y CONFIG_RTE_TOOLCHAIN_GCC=y +CONFIG_RTE_LIBRTE_PMD_XENVIRT=n diff --git a/SPECS/openvswitch2.13.spec b/SPECS/openvswitch2.13.spec index d1756e8..2997a24 100644 --- a/SPECS/openvswitch2.13.spec +++ b/SPECS/openvswitch2.13.spec @@ -59,7 +59,7 @@ Summary: Open vSwitch Group: System Environment/Daemons daemon/database/utilities URL: http://www.openvswitch.org/ Version: 2.13.0 -Release: 101%{?commit0:.%{date}git%{shortcommit0}}%{?commit1:dpdk%{shortcommit1}}%{?dist} +Release: 103%{?commit0:.%{date}git%{shortcommit0}}%{?commit1:dpdk%{shortcommit1}}%{?dist} # Nearly all of openvswitch is ASL 2.0. The bugtool is LGPLv2+, and the # lib/sflow*.[ch] files are SISSL @@ -710,6 +710,14 @@ exit 0 %endif %changelog +* Tue Apr 06 2021 Timothy Redaelli - 2.13.0-103 +- Align DPDK config to 19.11.7 + [62cff1abf5fda881c4e5b130df38372701d31ba4] + +* Tue Apr 06 2021 Timothy Redaelli - 2.13.0-102 +- Merge tag 'c765f42e31c1baa8f4e7a9e01080f5474596ea98' into fast-datapath-rhel-8 + [4ef8ee2e1a5edb1c1406f275032bc3d06ba139ca] + * Fri Apr 02 2021 Open vSwitch CI - 2.13.0-101 - Merging upstream branch-2.13 [02b662f992b57ed2cc2274efb0033abae7bf2aa8]