diff --git a/SOURCES/openvswitch-2.17.0.patch b/SOURCES/openvswitch-2.17.0.patch index 4c77966..f25a6df 100644 --- a/SOURCES/openvswitch-2.17.0.patch +++ b/SOURCES/openvswitch-2.17.0.patch @@ -2524,7 +2524,7 @@ index c10c1a8ab5..b29cd91f56 100755 if [ "$BUILD_DOCS" = "true" ]; then diff --git a/dpdk/.github/workflows/build.yml b/dpdk/.github/workflows/build.yml -index 2e9c4be6d0..f1079d840e 100644 +index 2e9c4be6d0..12436403bb 100644 --- a/dpdk/.github/workflows/build.yml +++ b/dpdk/.github/workflows/build.yml @@ -23,68 +23,63 @@ jobs: @@ -2594,7 +2594,7 @@ index 2e9c4be6d0..f1079d840e 100644 steps: - name: Checkout sources - uses: actions/checkout@v2 -+ uses: actions/checkout@v3 ++ uses: actions/checkout@v4 - name: Generate cache keys id: get_ref_keys run: | @@ -2609,7 +2609,7 @@ index 2e9c4be6d0..f1079d840e 100644 + echo 'abi=abi-${{ matrix.config.os }}-${{ matrix.config.compiler }}-${{ matrix.config.cross }}-${{ env.LIBABIGAIL_VERSION }}-${{ env.REF_GIT_TAG }}' >> $GITHUB_OUTPUT - name: Retrieve ccache cache - uses: actions/cache@v2 -+ uses: actions/cache@v3 ++ uses: actions/cache@v4 with: path: ~/.ccache key: ${{ steps.get_ref_keys.outputs.ccache }}-${{ github.ref }} @@ -2618,14 +2618,14 @@ index 2e9c4be6d0..f1079d840e 100644 - name: Retrieve libabigail cache id: libabigail-cache - uses: actions/cache@v2 -+ uses: actions/cache@v3 ++ uses: actions/cache@v4 if: env.ABI_CHECKS == 'true' with: path: libabigail key: ${{ steps.get_ref_keys.outputs.libabigail }} - name: Retrieve ABI reference cache - uses: actions/cache@v2 -+ uses: actions/cache@v3 ++ uses: actions/cache@v4 if: env.ABI_CHECKS == 'true' with: path: reference @@ -2634,21 +2634,21 @@ index 2e9c4be6d0..f1079d840e 100644 - name: Upload logs on failure if: failure() - uses: actions/upload-artifact@v2 -+ uses: actions/upload-artifact@v3 ++ uses: actions/upload-artifact@v4 with: name: meson-logs-${{ join(matrix.config.*, '-') }} path: | diff --git a/dpdk/.mailmap b/dpdk/.mailmap new file mode 100644 -index 0000000000..fe94cbe1ff +index 0000000000..29ff02261c --- /dev/null +++ b/dpdk/.mailmap -@@ -0,0 +1,1614 @@ +@@ -0,0 +1,1636 @@ +Aakash Sasidharan +Aaro Koskinen +Aaron Campbell +Aaron Conole -+Abdullah Ömer Yamaç ++Abdullah Ömer Yamaç +Abdullah Sevincer +Abed Kamaluddin +Abhijit Sinha @@ -2673,9 +2673,11 @@ index 0000000000..fe94cbe1ff +Akash Saxena +Akeem G Abodunrin +Akhil Goyal ++Akshay Dorwat +Alain Leon +Alan Carew +Alan Dewar ++Alan Elder +Alan Liu +Alan Winkowski +Alejandro Lucero @@ -2856,6 +2858,7 @@ index 0000000000..fe94cbe1ff +Cheng Peng +Chengwen Feng +Chenmin Sun ++Chenming Chang +Chenxu Di +Cheryl Houser +Chinh T Cao @@ -2996,6 +2999,7 @@ index 0000000000..fe94cbe1ff +Elena Agostini +Eli Britstein +Elza Mathew ++Emi Aoki +Emma Finn +Emma Kenny +Emmanuel Roullit @@ -3029,9 +3033,11 @@ index 0000000000..fe94cbe1ff +Ferdinand Thiessen +Ferruh Yigit +Fidaullah Noonari ++Fidel Castro +Fiona Trahe +Flavia Musatescu +Flavio Leitner ++Flore Norceide +Forrest Shi +Francesco Mancino +Francesco Santoro @@ -3111,6 +3117,7 @@ index 0000000000..fe94cbe1ff +Hanumanth Pothula +Hao Chen +Hao Wu ++Haoqian He +Hari Kumar Vemula +Harini Ramakrishnan +Hariprasad Govindharajan @@ -3146,6 +3153,7 @@ index 0000000000..fe94cbe1ff +Hiroki Shirokura +Hiroshi Shimamoto +Hiroyuki Mikita ++Holly Nichols +Hongbo Zheng +Hongjun Ni +Hongzhi Guo @@ -3296,7 +3304,7 @@ index 0000000000..fe94cbe1ff +John Ousterhout +John W. Linville +Jonas Pfefferle -+Jonathan Erb ++(??)Jonathan Erb +Jonathan Tsai +Jon DeVree +Jon Loeliger @@ -3325,6 +3333,7 @@ index 0000000000..fe94cbe1ff +Jun Qiu +Jun W Zhou +Junxiao Shi ++Jun Wang +Jun Yang +Junyu Jiang +Juraj Linkeš @@ -3400,6 +3409,7 @@ index 0000000000..fe94cbe1ff +Levend Sayar +Lev Faerman +Lewei Yang ++Lewis Donzis +Leyi Rong +Liang Ma +Liang-Min Larry Wang @@ -3465,7 +3475,6 @@ index 0000000000..fe94cbe1ff +Manish Chopra +Manish Tomar +Mao Jiang -+Mao YingMing +Marcel Apfelbaum +Marcel Cornu +Marcelo Ricardo Leitner @@ -3482,6 +3491,7 @@ index 0000000000..fe94cbe1ff +Marcin Zapolski +Marco Varlese +Marc Sune ++Marek Mical +Maria Lingemark +Mario Carrillo +Mário Kuka @@ -3505,6 +3515,7 @@ index 0000000000..fe94cbe1ff +Martyna Szapar +Maryam Tahhan +Masoud Hasanifard ++Masoumeh Farhadi Nia +Matan Azrad +Matej Vido +Mateusz Kowalski @@ -3548,6 +3559,7 @@ index 0000000000..fe94cbe1ff +Michael Savisko +Michael Shamis +Michael S. Tsirkin ++Michael Theodore Stolarchuk +Michael Wildt +Michal Berger +Michal Jastrzebski @@ -3620,7 +3632,7 @@ index 0000000000..fe94cbe1ff +Netanel Belgazal +Netanel Gonen +Niall Power -+Nick Connolly ++Nick Connolly +Nick Nunley +Niclas Storm +Nicolas Chautru @@ -3731,6 +3743,7 @@ index 0000000000..fe94cbe1ff +Przemyslaw Patynowski +Przemyslaw Zegan +Pu Xu <583493798@qq.com> ++Qian Hao +Qian Xu +Qiao Liu +Qi Fu @@ -3745,6 +3758,7 @@ index 0000000000..fe94cbe1ff +Qun Wan +Radha Mohan Chintakuntla +Radoslaw Biernacki ++Radoslaw Tyl +Radu Bulie +Radu Nicolau +Rafael Ávila de Espíndola @@ -3875,6 +3889,7 @@ index 0000000000..fe94cbe1ff +Shannon Nelson +Shannon Zhao +Shaopeng He ++Shaowei Sun <1819846787@qq.com> +Sharmila Podury +Sharon Haroni +Shay Agroskin @@ -3901,6 +3916,7 @@ index 0000000000..fe94cbe1ff +Shuki Katzenelson +Shun Hao +Shu Shen ++Shuo Li +Shweta Choudaha +Shyam Kumar Shrivastav +Shy Shyman @@ -4030,6 +4046,7 @@ index 0000000000..fe94cbe1ff +Tomasz Zawadzki +Tom Barbette +Tom Crugnale ++Tom Jones +Tom Millington +Tom Rix +Tone Zhang @@ -4048,6 +4065,7 @@ index 0000000000..fe94cbe1ff +Vakul Garg +Vamsi Attunuru +Vanshika Shukla ++Varun Sethi +Vasily Philipov +Veerasenareddy Burru +Venkata Suresh Kumar P @@ -4076,6 +4094,8 @@ index 0000000000..fe94cbe1ff +Vincent Jardin +Vincent Li +Vincent S. Cojot ++Vinh Tran ++Vipin Padmam Ramesh +Vipin Varghese +Vipul Ashri +Visa Hankala @@ -4096,6 +4116,7 @@ index 0000000000..fe94cbe1ff +Wang Sheng-Hui +Wangyu (Eric) +Waterman Cao ++Wathsala Vithanage +Weichun Chen +Wei Dai +Weifeng Li @@ -4191,6 +4212,7 @@ index 0000000000..fe94cbe1ff +Yilun Xu +Yinan Wang +Ying A Wang ++Yingming Mao +Yingya Han +Yinjun Zhang +Yipeng Wang @@ -4280,14 +4302,14 @@ index 18d9edaf88..460c7fa96d 100644 Build System M: Bruce Richardson diff --git a/dpdk/VERSION b/dpdk/VERSION -index b570734337..071002af33 100644 +index b570734337..a371a94b55 100644 --- a/dpdk/VERSION +++ b/dpdk/VERSION @@ -1 +1 @@ -21.11.0 -+21.11.6 ++21.11.8 diff --git a/dpdk/app/dumpcap/main.c b/dpdk/app/dumpcap/main.c -index c5fe440302..74dd72b3ac 100644 +index c5fe440302..1025598b6d 100644 --- a/dpdk/app/dumpcap/main.c +++ b/dpdk/app/dumpcap/main.c @@ -44,7 +44,6 @@ @@ -4306,7 +4328,19 @@ index c5fe440302..74dd72b3ac 100644 rte_strscpy(intf->name, name, sizeof(intf->name)); printf("Capturing on '%s'\n", name); -@@ -538,6 +538,7 @@ static void dpdk_init(void) +@@ -527,6 +527,11 @@ static void dpdk_init(void) + for (i = 1; i < RTE_DIM(args); i++) + eal_argv[i] = strdup(args[i]); + ++ for (i = 0; i < (unsigned int)eal_argc; i++) { ++ if (eal_argv[i] == NULL) ++ rte_panic("No memory\n"); ++ } ++ + if (rte_eal_init(eal_argc, eal_argv) < 0) + rte_exit(EXIT_FAILURE, "EAL init failed: is primary process running?\n"); + +@@ -538,6 +543,7 @@ static void dpdk_init(void) static struct rte_ring *create_ring(void) { struct rte_ring *ring; @@ -4314,7 +4348,7 @@ index c5fe440302..74dd72b3ac 100644 size_t size, log2; /* Find next power of 2 >= size. */ -@@ -551,31 +552,31 @@ static struct rte_ring *create_ring(void) +@@ -551,31 +557,31 @@ static struct rte_ring *create_ring(void) ring_size = size; } @@ -4359,7 +4393,7 @@ index c5fe440302..74dd72b3ac 100644 if (mp == NULL) rte_exit(EXIT_FAILURE, "Mempool (%s) creation failed: %s\n", pool_name, -@@ -636,6 +637,7 @@ static dumpcap_out_t create_output(void) +@@ -636,6 +642,7 @@ static dumpcap_out_t create_output(void) else { mode_t mode = group_read ? 0640 : 0600; @@ -4367,7 +4401,7 @@ index c5fe440302..74dd72b3ac 100644 fd = open(output_name, O_WRONLY | O_CREAT, mode); if (fd < 0) rte_exit(EXIT_FAILURE, "Can not open \"%s\": %s\n", -@@ -679,8 +681,13 @@ static void enable_pdump(struct rte_ring *r, struct rte_mempool *mp) +@@ -679,8 +686,13 @@ static void enable_pdump(struct rte_ring *r, struct rte_mempool *mp) flags |= RTE_PDUMP_FLAG_PCAPNG; TAILQ_FOREACH(intf, &interfaces, next) { @@ -4383,9 +4417,15 @@ index c5fe440302..74dd72b3ac 100644 ret = rte_pdump_enable_bpf(intf->port, RTE_PDUMP_ALL_QUEUES, flags, snaplen, -@@ -778,8 +785,13 @@ int main(int argc, char **argv) +@@ -777,9 +789,19 @@ int main(int argc, char **argv) + { struct rte_ring *r; struct rte_mempool *mp; ++ struct sigaction action = { ++ .sa_flags = SA_RESTART, ++ .sa_handler = signal_handler, ++ }; ++ struct sigaction origaction; dumpcap_out_t out; + char *p; @@ -4398,7 +4438,23 @@ index c5fe440302..74dd72b3ac 100644 dpdk_init(); parse_opts(argc, argv); -@@ -837,7 +849,7 @@ int main(int argc, char **argv) +@@ -797,8 +819,13 @@ int main(int argc, char **argv) + start_time = create_timestamp(); + enable_pdump(r, mp); + +- signal(SIGINT, signal_handler); +- signal(SIGPIPE, SIG_IGN); ++ sigemptyset(&action.sa_mask); ++ sigaction(SIGTERM, &action, NULL); ++ sigaction(SIGINT, &action, NULL); ++ sigaction(SIGPIPE, &action, NULL); ++ sigaction(SIGHUP, NULL, &origaction); ++ if (origaction.sa_handler == SIG_DFL) ++ sigaction(SIGHUP, &action, NULL); + + enable_primary_monitor(); + +@@ -837,7 +864,7 @@ int main(int argc, char **argv) pcap_dump_close(out.dumper); cleanup_pdump_resources(); @@ -4408,10 +4464,36 @@ index c5fe440302..74dd72b3ac 100644 rte_mempool_free(mp); diff --git a/dpdk/app/pdump/main.c b/dpdk/app/pdump/main.c -index 46f9d25db0..101ac7db9a 100644 +index 46f9d25db0..0986034e44 100644 --- a/dpdk/app/pdump/main.c +++ b/dpdk/app/pdump/main.c -@@ -903,11 +903,21 @@ dump_packets_core(void *arg) +@@ -171,6 +171,9 @@ parse_device_id(const char *key __rte_unused, const char *value, + struct pdump_tuples *pt = extra_args; + + pt->device_id = strdup(value); ++ if (pt->device_id == NULL) ++ return -1; ++ + pt->dump_by_type = DEVICE_ID; + + return 0; +@@ -570,13 +573,9 @@ disable_primary_monitor(void) + } + + static void +-signal_handler(int sig_num) ++signal_handler(int sig_num __rte_unused) + { +- if (sig_num == SIGINT) { +- printf("\n\nSignal %d received, preparing to exit...\n", +- sig_num); +- quit_signal = 1; +- } ++ quit_signal = 1; + } + + static inline int +@@ -903,11 +902,21 @@ dump_packets_core(void *arg) return 0; } @@ -4434,7 +4516,7 @@ index 46f9d25db0..101ac7db9a 100644 if (!multiple_core_capture) { printf(" core (%u), capture for (%d) tuples\n", -@@ -933,12 +943,12 @@ dump_packets(void) +@@ -933,12 +942,12 @@ dump_packets(void) return; } @@ -4449,6 +4531,35 @@ index 46f9d25db0..101ac7db9a 100644 if (rte_eal_wait_lcore(lcore_id) < 0) rte_exit(EXIT_FAILURE, "failed to wait\n"); +@@ -963,6 +972,11 @@ enable_primary_monitor(void) + int + main(int argc, char **argv) + { ++ struct sigaction action = { ++ .sa_flags = SA_RESTART, ++ .sa_handler = signal_handler, ++ }; ++ struct sigaction origaction; + int diag; + int ret; + int i; +@@ -971,8 +985,14 @@ main(int argc, char **argv) + char mp_flag[] = "--proc-type=secondary"; + char *argp[argc + 2]; + +- /* catch ctrl-c so we can print on exit */ +- signal(SIGINT, signal_handler); ++ /* catch ctrl-c so we can cleanup on exit */ ++ sigemptyset(&action.sa_mask); ++ sigaction(SIGTERM, &action, NULL); ++ sigaction(SIGINT, &action, NULL); ++ sigaction(SIGPIPE, &action, NULL); ++ sigaction(SIGHUP, NULL, &origaction); ++ if (origaction.sa_handler == SIG_DFL) ++ sigaction(SIGHUP, &action, NULL); + + argp[0] = argv[0]; + argp[1] = n_flag; diff --git a/dpdk/app/proc-info/main.c b/dpdk/app/proc-info/main.c index ce140aaf84..b52c3ffbc5 100644 --- a/dpdk/app/proc-info/main.c @@ -4680,7 +4791,7 @@ index ac06d7320a..0092293725 100644 TEST_ASSERT_SUCCESS(return_value = rte_bbdev_queue_info_get(dev_id, diff --git a/dpdk/app/test-bbdev/test_bbdev_perf.c b/dpdk/app/test-bbdev/test_bbdev_perf.c -index 0fa119a502..3f2bac6136 100644 +index 0fa119a502..c15ed34b46 100644 --- a/dpdk/app/test-bbdev/test_bbdev_perf.c +++ b/dpdk/app/test-bbdev/test_bbdev_perf.c @@ -70,13 +70,12 @@ @@ -4761,6 +4872,130 @@ index 0fa119a502..3f2bac6136 100644 "Length of data differ in original (%u) and filled (%u) op", total_data_size, pkt_len); +@@ -2816,15 +2819,6 @@ throughput_intr_lcore_ldpc_dec(void *arg) + if (unlikely(num_to_process - enqueued < num_to_enq)) + num_to_enq = num_to_process - enqueued; + +- enq = 0; +- do { +- enq += rte_bbdev_enqueue_ldpc_dec_ops( +- tp->dev_id, +- queue_id, &ops[enqueued], +- num_to_enq); +- } while (unlikely(num_to_enq != enq)); +- enqueued += enq; +- + /* Write to thread burst_sz current number of enqueued + * descriptors. It ensures that proper number of + * descriptors will be dequeued in callback +@@ -2834,6 +2828,15 @@ throughput_intr_lcore_ldpc_dec(void *arg) + */ + __atomic_store_n(&tp->burst_sz, num_to_enq, __ATOMIC_RELAXED); + ++ enq = 0; ++ do { ++ enq += rte_bbdev_enqueue_ldpc_dec_ops( ++ tp->dev_id, ++ queue_id, &ops[enqueued], ++ num_to_enq); ++ } while (unlikely(num_to_enq != enq)); ++ enqueued += enq; ++ + /* Wait until processing of previous batch is + * completed + */ +@@ -2904,14 +2907,6 @@ throughput_intr_lcore_dec(void *arg) + if (unlikely(num_to_process - enqueued < num_to_enq)) + num_to_enq = num_to_process - enqueued; + +- enq = 0; +- do { +- enq += rte_bbdev_enqueue_dec_ops(tp->dev_id, +- queue_id, &ops[enqueued], +- num_to_enq); +- } while (unlikely(num_to_enq != enq)); +- enqueued += enq; +- + /* Write to thread burst_sz current number of enqueued + * descriptors. It ensures that proper number of + * descriptors will be dequeued in callback +@@ -2921,6 +2916,14 @@ throughput_intr_lcore_dec(void *arg) + */ + __atomic_store_n(&tp->burst_sz, num_to_enq, __ATOMIC_RELAXED); + ++ enq = 0; ++ do { ++ enq += rte_bbdev_enqueue_dec_ops(tp->dev_id, ++ queue_id, &ops[enqueued], ++ num_to_enq); ++ } while (unlikely(num_to_enq != enq)); ++ enqueued += enq; ++ + /* Wait until processing of previous batch is + * completed + */ +@@ -2990,14 +2993,6 @@ throughput_intr_lcore_enc(void *arg) + if (unlikely(num_to_process - enqueued < num_to_enq)) + num_to_enq = num_to_process - enqueued; + +- enq = 0; +- do { +- enq += rte_bbdev_enqueue_enc_ops(tp->dev_id, +- queue_id, &ops[enqueued], +- num_to_enq); +- } while (unlikely(enq != num_to_enq)); +- enqueued += enq; +- + /* Write to thread burst_sz current number of enqueued + * descriptors. It ensures that proper number of + * descriptors will be dequeued in callback +@@ -3007,6 +3002,14 @@ throughput_intr_lcore_enc(void *arg) + */ + __atomic_store_n(&tp->burst_sz, num_to_enq, __ATOMIC_RELAXED); + ++ enq = 0; ++ do { ++ enq += rte_bbdev_enqueue_enc_ops(tp->dev_id, ++ queue_id, &ops[enqueued], ++ num_to_enq); ++ } while (unlikely(enq != num_to_enq)); ++ enqueued += enq; ++ + /* Wait until processing of previous batch is + * completed + */ +@@ -3078,15 +3081,6 @@ throughput_intr_lcore_ldpc_enc(void *arg) + if (unlikely(num_to_process - enqueued < num_to_enq)) + num_to_enq = num_to_process - enqueued; + +- enq = 0; +- do { +- enq += rte_bbdev_enqueue_ldpc_enc_ops( +- tp->dev_id, +- queue_id, &ops[enqueued], +- num_to_enq); +- } while (unlikely(enq != num_to_enq)); +- enqueued += enq; +- + /* Write to thread burst_sz current number of enqueued + * descriptors. It ensures that proper number of + * descriptors will be dequeued in callback +@@ -3096,6 +3090,15 @@ throughput_intr_lcore_ldpc_enc(void *arg) + */ + __atomic_store_n(&tp->burst_sz, num_to_enq, __ATOMIC_RELAXED); + ++ enq = 0; ++ do { ++ enq += rte_bbdev_enqueue_ldpc_enc_ops( ++ tp->dev_id, ++ queue_id, &ops[enqueued], ++ num_to_enq); ++ } while (unlikely(enq != num_to_enq)); ++ enqueued += enq; ++ + /* Wait until processing of previous batch is + * completed + */ @@ -4724,7 +4727,7 @@ offload_cost_test(struct active_device *ad, printf("Set RTE_BBDEV_OFFLOAD_COST to 'y' to turn the test on.\n"); return TEST_SKIPPED; @@ -5577,10 +5812,17 @@ index 031b238b20..5533b2e6fb 100644 enum rte_crypto_cipher_algorithm cipher_algo; enum rte_crypto_cipher_operation cipher_op; diff --git a/dpdk/app/test-crypto-perf/cperf_options_parsing.c b/dpdk/app/test-crypto-perf/cperf_options_parsing.c -index 59a9dc596a..1d91bea0c9 100644 +index 59a9dc596a..c29a1c31e1 100644 --- a/dpdk/app/test-crypto-perf/cperf_options_parsing.c +++ b/dpdk/app/test-crypto-perf/cperf_options_parsing.c -@@ -504,6 +504,7 @@ parse_test_file(struct cperf_options *opts, +@@ -501,9 +501,14 @@ parse_test_file(struct cperf_options *opts, + const char *arg) + { + opts->test_file = strdup(arg); ++ if (opts->test_file == NULL) { ++ RTE_LOG(ERR, USER1, "Dup vector file failed!\n"); ++ return -1; ++ } if (access(opts->test_file, F_OK) != -1) return 0; RTE_LOG(ERR, USER1, "Test vector file doesn't exist\n"); @@ -5588,7 +5830,7 @@ index 59a9dc596a..1d91bea0c9 100644 return -1; } -@@ -1248,6 +1249,21 @@ cperf_options_check(struct cperf_options *options) +@@ -1248,6 +1253,21 @@ cperf_options_check(struct cperf_options *options) if (check_docsis_buffer_length(options) < 0) return -EINVAL; } @@ -5611,10 +5853,41 @@ index 59a9dc596a..1d91bea0c9 100644 return 0; diff --git a/dpdk/app/test-crypto-perf/cperf_test_common.c b/dpdk/app/test-crypto-perf/cperf_test_common.c -index 97a1ea47ad..5a65e11ba7 100644 +index 97a1ea47ad..a64043fd15 100644 --- a/dpdk/app/test-crypto-perf/cperf_test_common.c +++ b/dpdk/app/test-crypto-perf/cperf_test_common.c -@@ -198,9 +198,11 @@ cperf_alloc_common_memory(const struct cperf_options *options, +@@ -50,7 +50,6 @@ fill_multi_seg_mbuf(struct rte_mbuf *m, struct rte_mempool *mp, + { + uint16_t mbuf_hdr_size = sizeof(struct rte_mbuf); + uint16_t remaining_segments = segments_nb; +- struct rte_mbuf *next_mbuf; + rte_iova_t next_seg_phys_addr = rte_mempool_virt2iova(obj) + + mbuf_offset + mbuf_hdr_size; + +@@ -71,15 +70,15 @@ fill_multi_seg_mbuf(struct rte_mbuf *m, struct rte_mempool *mp, + m->nb_segs = segments_nb; + m->port = 0xff; + rte_mbuf_refcnt_set(m, 1); +- next_mbuf = (struct rte_mbuf *) ((uint8_t *) m + +- mbuf_hdr_size + segment_sz); +- m->next = next_mbuf; +- m = next_mbuf; +- remaining_segments--; + ++ remaining_segments--; ++ if (remaining_segments > 0) { ++ m->next = (struct rte_mbuf *)((uint8_t *) m + mbuf_hdr_size + segment_sz); ++ m = m->next; ++ } else { ++ m->next = NULL; ++ } + } while (remaining_segments > 0); +- +- m->next = NULL; + } + + static void +@@ -198,9 +197,11 @@ cperf_alloc_common_memory(const struct cperf_options *options, RTE_CACHE_LINE_ROUNDUP(crypto_op_total_size); uint32_t mbuf_size = sizeof(struct rte_mbuf) + options->segment_sz; uint32_t max_size = options->max_buffer_size + options->digest_sz; @@ -5629,6 +5902,16 @@ index 97a1ea47ad..5a65e11ba7 100644 uint32_t obj_size = crypto_op_total_size_padded + (mbuf_size * segments_nb); +@@ -226,7 +227,8 @@ cperf_alloc_common_memory(const struct cperf_options *options, + (mbuf_size * segments_nb); + params.dst_buf_offset = *dst_buf_offset; + /* Destination buffer will be one segment only */ +- obj_size += max_size + sizeof(struct rte_mbuf); ++ obj_size += max_size + sizeof(struct rte_mbuf) + ++ options->headroom_sz + options->tailroom_sz; + } + + *pool = rte_mempool_create_empty(pool_name, diff --git a/dpdk/app/test-crypto-perf/cperf_test_pmd_cyclecount.c b/dpdk/app/test-crypto-perf/cperf_test_pmd_cyclecount.c index ba1f104f72..5842f29d43 100644 --- a/dpdk/app/test-crypto-perf/cperf_test_pmd_cyclecount.c @@ -5654,6 +5937,132 @@ index 1e9dfcfff0..1fe11df27b 100644 } rte_free(vector); +diff --git a/dpdk/app/test-crypto-perf/cperf_test_verify.c b/dpdk/app/test-crypto-perf/cperf_test_verify.c +index 496eb0de00..30f66618ed 100644 +--- a/dpdk/app/test-crypto-perf/cperf_test_verify.c ++++ b/dpdk/app/test-crypto-perf/cperf_test_verify.c +@@ -100,8 +100,10 @@ cperf_verify_op(struct rte_crypto_op *op, + uint32_t len; + uint16_t nb_segs; + uint8_t *data; +- uint32_t cipher_offset, auth_offset; +- uint8_t cipher, auth; ++ uint32_t cipher_offset, auth_offset = 0; ++ bool cipher = false; ++ bool digest_verify = false; ++ bool is_encrypt = false; + int res = 0; + + if (op->status != RTE_CRYPTO_OP_STATUS_SUCCESS) +@@ -139,57 +141,54 @@ cperf_verify_op(struct rte_crypto_op *op, + + switch (options->op_type) { + case CPERF_CIPHER_ONLY: +- cipher = 1; ++ cipher = true; + cipher_offset = 0; +- auth = 0; +- auth_offset = 0; +- break; +- case CPERF_CIPHER_THEN_AUTH: +- cipher = 1; +- cipher_offset = 0; +- auth = 1; +- auth_offset = options->test_buffer_size; ++ is_encrypt = options->cipher_op == RTE_CRYPTO_CIPHER_OP_ENCRYPT; + break; + case CPERF_AUTH_ONLY: +- cipher = 0; + cipher_offset = 0; +- auth = 1; +- auth_offset = options->test_buffer_size; ++ if (options->auth_op == RTE_CRYPTO_AUTH_OP_GENERATE) { ++ auth_offset = options->test_buffer_size; ++ digest_verify = true; ++ } + break; ++ case CPERF_CIPHER_THEN_AUTH: + case CPERF_AUTH_THEN_CIPHER: +- cipher = 1; ++ cipher = true; + cipher_offset = 0; +- auth = 1; +- auth_offset = options->test_buffer_size; ++ if (options->cipher_op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) { ++ auth_offset = options->test_buffer_size; ++ digest_verify = true; ++ is_encrypt = true; ++ } + break; + case CPERF_AEAD: +- cipher = 1; ++ cipher = true; + cipher_offset = 0; +- auth = 1; +- auth_offset = options->test_buffer_size; ++ if (options->aead_op == RTE_CRYPTO_AEAD_OP_ENCRYPT) { ++ auth_offset = options->test_buffer_size; ++ digest_verify = true; ++ is_encrypt = true; ++ } + break; + default: + res = 1; + goto out; + } + +- if (cipher == 1) { +- if (options->cipher_op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) +- res += memcmp(data + cipher_offset, ++ if (cipher) { ++ if (is_encrypt) ++ res += !!memcmp(data + cipher_offset, + vector->ciphertext.data, + options->test_buffer_size); + else +- res += memcmp(data + cipher_offset, ++ res += !!memcmp(data + cipher_offset, + vector->plaintext.data, + options->test_buffer_size); + } + +- if (auth == 1) { +- if (options->auth_op == RTE_CRYPTO_AUTH_OP_GENERATE) +- res += memcmp(data + auth_offset, +- vector->digest.data, +- options->digest_sz); +- } ++ if (digest_verify) ++ res += !!memcmp(data + auth_offset, vector->digest.data, options->digest_sz); + + out: + rte_free(data); +@@ -301,7 +300,6 @@ cperf_verify_test_runner(void *test_ctx) + ops_needed, ctx->sess, ctx->options, + ctx->test_vector, iv_offset, &imix_idx, NULL); + +- + /* Populate the mbuf with the test vector, for verification */ + for (i = 0; i < ops_needed; i++) + cperf_mbuf_set(ops[i]->sym->m_src, +@@ -319,6 +317,17 @@ cperf_verify_test_runner(void *test_ctx) + } + #endif /* CPERF_LINEARIZATION_ENABLE */ + ++ /** ++ * When ops_needed is smaller than ops_enqd, the ++ * unused ops need to be moved to the front for ++ * next round use. ++ */ ++ if (unlikely(ops_enqd > ops_needed)) { ++ size_t nb_b_to_mov = ops_unused * sizeof(struct rte_crypto_op *); ++ ++ memmove(&ops[ops_needed], &ops[ops_enqd], nb_b_to_mov); ++ } ++ + /* Enqueue burst of ops on crypto device */ + ops_enqd = rte_cryptodev_enqueue_burst(ctx->dev_id, ctx->qp_id, + ops, burst_size); diff --git a/dpdk/app/test-crypto-perf/main.c b/dpdk/app/test-crypto-perf/main.c index 6fdb92fb7c..db0ebd0050 100644 --- a/dpdk/app/test-crypto-perf/main.c @@ -6379,6 +6788,19 @@ index 629d3e0d31..f041a5e1d5 100644 + .stream_init = stream_init_5tuple_swap, .packet_fwd = pkt_burst_5tuple_swap, }; +diff --git a/dpdk/app/test-pmd/bpf_cmd.c b/dpdk/app/test-pmd/bpf_cmd.c +index 09c8aec0c0..e3bfd97252 100644 +--- a/dpdk/app/test-pmd/bpf_cmd.c ++++ b/dpdk/app/test-pmd/bpf_cmd.c +@@ -137,7 +137,7 @@ cmdline_parse_token_string_t cmd_load_bpf_prm = + cmdline_parse_inst_t cmd_operate_bpf_ld_parse = { + .f = cmd_operate_bpf_ld_parsed, + .data = NULL, +- .help_str = "bpf-load rx|tx ", ++ .help_str = "bpf-load rx|tx ", + .tokens = { + (void *)&cmd_load_bpf_start, + (void *)&cmd_load_bpf_dir, diff --git a/dpdk/app/test-pmd/cmd_flex_item.c b/dpdk/app/test-pmd/cmd_flex_item.c index 908bcb3f47..3e54724237 100644 --- a/dpdk/app/test-pmd/cmd_flex_item.c @@ -6458,9 +6880,22 @@ index 908bcb3f47..3e54724237 100644 port_flex_item_flush(portid_t port_id) { diff --git a/dpdk/app/test-pmd/cmdline.c b/dpdk/app/test-pmd/cmdline.c -index 6e10afeedd..43857c8008 100644 +index 6e10afeedd..781b7ce0db 100644 --- a/dpdk/app/test-pmd/cmdline.c +++ b/dpdk/app/test-pmd/cmdline.c +@@ -68,10 +68,10 @@ + #include "cmdline_tm.h" + #include "bpf_cmd.h" + +-static struct cmdline *testpmd_cl; +- + static void cmd_reconfig_device_queue(portid_t id, uint8_t dev, uint8_t queue); + ++static struct cmdline *testpmd_cl; ++ + /* *** Help command with introduction. *** */ + struct cmd_help_brief_result { + cmdline_fixed_string_t help; @@ -504,6 +504,12 @@ static void cmd_help_long_parsed(void *parsed_result, "mac_addr add port (port_id) vf (vf_id) (mac_address)\n" " Add a MAC address for a VF on the port.\n\n" @@ -6969,8 +7404,55 @@ index 6e10afeedd..43857c8008 100644 (cmdline_parse_inst_t *)&cmd_del_port_meter_profile, (cmdline_parse_inst_t *)&cmd_create_port_meter, (cmdline_parse_inst_t *)&cmd_enable_port_meter, +@@ -17981,35 +18041,29 @@ cmdline_read_from_file(const char *filename) + printf("Read CLI commands from %s\n", filename); + } + ++void ++prompt_exit(void) ++{ ++ cmdline_quit(testpmd_cl); ++} ++ + /* prompt function, called from main on MAIN lcore */ + void + prompt(void) + { +- int ret; + /* initialize non-constant commands */ + cmd_set_fwd_mode_init(); + cmd_set_fwd_retry_mode_init(); + + testpmd_cl = cmdline_stdin_new(main_ctx, "testpmd> "); +- if (testpmd_cl == NULL) ++ if (testpmd_cl == NULL) { ++ fprintf(stderr, ++ "Failed to create stdin based cmdline context\n"); + return; +- +- ret = atexit(prompt_exit); +- if (ret != 0) +- fprintf(stderr, "Cannot set exit function for cmdline\n"); ++ } + + cmdline_interact(testpmd_cl); +- if (ret != 0) +- cmdline_stdin_exit(testpmd_cl); +-} +- +-void +-prompt_exit(void) +-{ +- if (testpmd_cl != NULL) { +- cmdline_quit(testpmd_cl); +- cmdline_stdin_exit(testpmd_cl); +- } ++ cmdline_stdin_exit(testpmd_cl); + } + + static void diff --git a/dpdk/app/test-pmd/cmdline_flow.c b/dpdk/app/test-pmd/cmdline_flow.c -index bbe3dc0115..5c4544a753 100644 +index bbe3dc0115..3e2ef95a54 100644 --- a/dpdk/app/test-pmd/cmdline_flow.c +++ b/dpdk/app/test-pmd/cmdline_flow.c @@ -2162,7 +2162,7 @@ static const struct token token_list[] = { @@ -7000,7 +7482,20 @@ index bbe3dc0115..5c4544a753 100644 .next = NEXT(NEXT_ENTRY(COMMON_UNSIGNED)), .args = ARGS(ARGS_ENTRY(struct tunnel_ops, id)), .call = parse_tunnel, -@@ -7702,16 +7702,14 @@ parse_string(struct context *ctx, const struct token *token, +@@ -3781,9 +3781,12 @@ static const struct token token_list[] = { + [ITEM_CONNTRACK] = { + .name = "conntrack", + .help = "conntrack state", ++ .priv = PRIV_ITEM(CONNTRACK, ++ sizeof(struct rte_flow_item_conntrack)), + .next = NEXT(NEXT_ENTRY(ITEM_NEXT), NEXT_ENTRY(COMMON_UNSIGNED), + item_param), + .args = ARGS(ARGS_ENTRY(struct rte_flow_item_conntrack, flags)), ++ .call = parse_vc, + }, + [ITEM_PORT_REPRESENTOR] = { + .name = "port_representor", +@@ -7702,16 +7705,14 @@ parse_string(struct context *ctx, const struct token *token, static int parse_hex_string(const char *src, uint8_t *dst, uint32_t *size) { @@ -7021,7 +7516,7 @@ index bbe3dc0115..5c4544a753 100644 /* Convert chars to bytes */ while (left) { char tmp[3], *end = tmp; -@@ -9153,7 +9151,8 @@ cmd_set_raw_parsed(const struct buffer *in) +@@ -9153,7 +9154,8 @@ cmd_set_raw_parsed(const struct buffer *in) case RTE_FLOW_ITEM_TYPE_GENEVE_OPT: opt = (const struct rte_flow_item_geneve_opt *) item->spec; @@ -7031,7 +7526,7 @@ index bbe3dc0115..5c4544a753 100644 if (opt->option_len && opt->data) { *total_size += opt->option_len * sizeof(uint32_t); -@@ -9210,19 +9209,15 @@ cmd_set_raw_parsed(const struct buffer *in) +@@ -9210,19 +9212,15 @@ cmd_set_raw_parsed(const struct buffer *in) } else { const struct rte_flow_item_gtp_psc *opt = item->spec; @@ -7098,7 +7593,7 @@ index bfbd43ca9b..c058b8946e 100644 (void *)&cmd_show_port_tm_level_cap_show, (void *)&cmd_show_port_tm_level_cap_port, diff --git a/dpdk/app/test-pmd/config.c b/dpdk/app/test-pmd/config.c -index 1722d6c8f8..fef7fa71e4 100644 +index 1722d6c8f8..e060a1de49 100644 --- a/dpdk/app/test-pmd/config.c +++ b/dpdk/app/test-pmd/config.c @@ -66,8 +66,6 @@ @@ -7403,6 +7898,18 @@ index 1722d6c8f8..fef7fa71e4 100644 if ((dev_info->dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE) == 0 || rxq_conf->share_group == 0) /* Not shared rxq. */ +@@ -3030,9 +3144,9 @@ fwd_stream_on_other_lcores(uint16_t domain_id, lcoreid_t src_lc, + continue; + printf("Shared Rx queue group %u queue %hu can't be scheduled on different cores:\n", + share_group, share_rxq); +- printf(" lcore %hhu Port %hu queue %hu\n", ++ printf(" lcore %u Port %hu queue %hu\n", + src_lc, src_port, src_rxq); +- printf(" lcore %hhu Port %hu queue %hu\n", ++ printf(" lcore %u Port %hu queue %hu\n", + lc_id, fs->rx_port, fs->rx_queue); + printf("Please use --nb-cores=%hu to limit number of forwarding cores\n", + nb_rxq); @@ -3077,7 +3191,7 @@ pkt_fwd_shared_rxq_check(void) fs->lcore = fwd_lcores[lc_id]; port = &ports[fs->rx_port]; @@ -7412,7 +7919,50 @@ index 1722d6c8f8..fef7fa71e4 100644 if ((dev_info->dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE) == 0 || rxq_conf->share_group == 0) /* Not shared rxq. */ -@@ -4719,6 +4833,8 @@ set_record_burst_stats(uint8_t on_off) +@@ -3213,7 +3327,6 @@ rss_fwd_config_setup(void) + queueid_t nb_q; + streamid_t sm_id; + int start; +- int end; + + nb_q = nb_rxq; + if (nb_q > nb_txq) +@@ -3221,7 +3334,7 @@ rss_fwd_config_setup(void) + cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; + cur_fwd_config.nb_fwd_ports = nb_fwd_ports; + cur_fwd_config.nb_fwd_streams = +- (streamid_t) (nb_q * cur_fwd_config.nb_fwd_ports); ++ (streamid_t) (nb_q / num_procs * cur_fwd_config.nb_fwd_ports); + + if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores) + cur_fwd_config.nb_fwd_lcores = +@@ -3243,7 +3356,6 @@ rss_fwd_config_setup(void) + * the 2~3 queue for secondary process. + */ + start = proc_id * nb_q / num_procs; +- end = start + nb_q / num_procs; + rxp = 0; + rxq = start; + for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) { +@@ -3262,8 +3374,6 @@ rss_fwd_config_setup(void) + continue; + rxp = 0; + rxq++; +- if (rxq >= end) +- rxq = start; + } + } + +@@ -3408,7 +3518,7 @@ icmp_echo_config_setup(void) + lcoreid_t lc_id; + uint16_t sm_id; + +- if ((nb_txq * nb_fwd_ports) < nb_fwd_lcores) ++ if ((lcoreid_t)(nb_txq * nb_fwd_ports) < nb_fwd_lcores) + cur_fwd_config.nb_fwd_lcores = (lcoreid_t) + (nb_txq * nb_fwd_ports); + else +@@ -4719,6 +4829,8 @@ set_record_burst_stats(uint8_t on_off) record_burst_stats = on_off; } @@ -7421,7 +7971,7 @@ index 1722d6c8f8..fef7fa71e4 100644 static char* flowtype_to_str(uint16_t flow_type) { -@@ -4762,8 +4878,6 @@ flowtype_to_str(uint16_t flow_type) +@@ -4762,8 +4874,6 @@ flowtype_to_str(uint16_t flow_type) return NULL; } @@ -7430,7 +7980,7 @@ index 1722d6c8f8..fef7fa71e4 100644 static inline void print_fdir_mask(struct rte_eth_fdir_masks *mask) { -@@ -5185,6 +5299,25 @@ mcast_addr_pool_remove(struct rte_port *port, uint32_t addr_idx) +@@ -5185,6 +5295,25 @@ mcast_addr_pool_remove(struct rte_port *port, uint32_t addr_idx) sizeof(struct rte_ether_addr) * (port->mc_addr_nb - addr_idx)); } @@ -7457,7 +8007,7 @@ index 1722d6c8f8..fef7fa71e4 100644 eth_port_multicast_addr_list_set(portid_t port_id) { diff --git a/dpdk/app/test-pmd/csumonly.c b/dpdk/app/test-pmd/csumonly.c -index 2aeea243b6..5e494c4129 100644 +index 2aeea243b6..37cddf4690 100644 --- a/dpdk/app/test-pmd/csumonly.c +++ b/dpdk/app/test-pmd/csumonly.c @@ -222,15 +222,14 @@ parse_gtp(struct rte_udp_hdr *udp_hdr, @@ -7506,7 +8056,43 @@ index 2aeea243b6..5e494c4129 100644 return; update_tunnel_outer(info); -@@ -771,6 +768,28 @@ pkt_copy_split(const struct rte_mbuf *pkt) +@@ -565,15 +562,17 @@ process_outer_cksums(void *outer_l3_hdr, struct testpmd_offload_info *info, + uint64_t ol_flags = 0; + + if (info->outer_ethertype == _htons(RTE_ETHER_TYPE_IPV4)) { +- ipv4_hdr->hdr_checksum = 0; + ol_flags |= RTE_MBUF_F_TX_OUTER_IPV4; + +- if (tx_offloads & RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM) ++ if (tx_offloads & RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM) { + ol_flags |= RTE_MBUF_F_TX_OUTER_IP_CKSUM; +- else ++ } else { ++ ipv4_hdr->hdr_checksum = 0; + ipv4_hdr->hdr_checksum = rte_ipv4_cksum(ipv4_hdr); +- } else ++ } ++ } else { + ol_flags |= RTE_MBUF_F_TX_OUTER_IPV6; ++ } + + if (info->outer_l4_proto != IPPROTO_UDP) + return ol_flags; +@@ -586,13 +585,6 @@ process_outer_cksums(void *outer_l3_hdr, struct testpmd_offload_info *info, + + /* Skip SW outer UDP checksum generation if HW supports it */ + if (tx_offloads & RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM) { +- if (info->outer_ethertype == _htons(RTE_ETHER_TYPE_IPV4)) +- udp_hdr->dgram_cksum +- = rte_ipv4_phdr_cksum(ipv4_hdr, ol_flags); +- else +- udp_hdr->dgram_cksum +- = rte_ipv6_phdr_cksum(ipv6_hdr, ol_flags); +- + ol_flags |= RTE_MBUF_F_TX_OUTER_UDP_CKSUM; + return ol_flags; + } +@@ -771,6 +763,28 @@ pkt_copy_split(const struct rte_mbuf *pkt) return md[0]; } @@ -7535,7 +8121,7 @@ index 2aeea243b6..5e494c4129 100644 /* * Receive a burst of packets, and for each packet: * - parse packet, and try to recognize a supported packet type (1) -@@ -796,7 +815,7 @@ pkt_copy_split(const struct rte_mbuf *pkt) +@@ -796,7 +810,7 @@ pkt_copy_split(const struct rte_mbuf *pkt) * * The testpmd command line for this forward engine sets the flags * TESTPMD_TX_OFFLOAD_* in ports[tx_port].tx_ol_flags. They control @@ -7544,7 +8130,7 @@ index 2aeea243b6..5e494c4129 100644 * IP, UDP, TCP and SCTP flags always concern the inner layer. The * OUTER_IP is only useful for tunnel packets. */ -@@ -887,10 +906,12 @@ pkt_burst_checksum_forward(struct fwd_stream *fs) +@@ -887,10 +901,12 @@ pkt_burst_checksum_forward(struct fwd_stream *fs) * and inner headers */ eth_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *); @@ -7561,7 +8147,7 @@ index 2aeea243b6..5e494c4129 100644 parse_ethernet(eth_hdr, &info); l3_hdr = (char *)eth_hdr + info.l2_len; -@@ -912,8 +933,7 @@ pkt_burst_checksum_forward(struct fwd_stream *fs) +@@ -912,8 +928,7 @@ pkt_burst_checksum_forward(struct fwd_stream *fs) RTE_MBUF_F_TX_TUNNEL_VXLAN_GPE; goto tunnel_update; } @@ -7571,7 +8157,7 @@ index 2aeea243b6..5e494c4129 100644 if (info.is_tunnel) { tx_ol_flags |= RTE_MBUF_F_TX_TUNNEL_VXLAN; -@@ -925,6 +945,12 @@ pkt_burst_checksum_forward(struct fwd_stream *fs) +@@ -925,6 +940,12 @@ pkt_burst_checksum_forward(struct fwd_stream *fs) RTE_MBUF_F_TX_TUNNEL_GENEVE; goto tunnel_update; } @@ -7584,7 +8170,7 @@ index 2aeea243b6..5e494c4129 100644 } else if (info.l4_proto == IPPROTO_GRE) { struct simple_gre_hdr *gre_hdr; -@@ -1089,6 +1115,8 @@ pkt_burst_checksum_forward(struct fwd_stream *fs) +@@ -1089,6 +1110,8 @@ pkt_burst_checksum_forward(struct fwd_stream *fs) fs->gro_times = 0; } } @@ -7593,7 +8179,7 @@ index 2aeea243b6..5e494c4129 100644 } #endif -@@ -1122,16 +1150,21 @@ pkt_burst_checksum_forward(struct fwd_stream *fs) +@@ -1122,16 +1145,21 @@ pkt_burst_checksum_forward(struct fwd_stream *fs) tx_pkts_burst = gso_segments; nb_rx = nb_segments; @@ -7616,7 +8202,7 @@ index 2aeea243b6..5e494c4129 100644 nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, tx_pkts_burst, nb_prep); -@@ -1139,12 +1172,12 @@ pkt_burst_checksum_forward(struct fwd_stream *fs) +@@ -1139,12 +1167,12 @@ pkt_burst_checksum_forward(struct fwd_stream *fs) /* * Retry if necessary */ @@ -7632,7 +8218,7 @@ index 2aeea243b6..5e494c4129 100644 } } fs->tx_packets += nb_tx; -@@ -1154,19 +1187,32 @@ pkt_burst_checksum_forward(struct fwd_stream *fs) +@@ -1154,19 +1182,32 @@ pkt_burst_checksum_forward(struct fwd_stream *fs) fs->rx_bad_outer_ip_csum += rx_bad_outer_ip_csum; inc_tx_burst_stats(fs, nb_tx); @@ -7723,7 +8309,7 @@ index 99c94cb282..066f2a3ab7 100644 .packet_fwd = reply_to_icmp_echo_rqsts, }; diff --git a/dpdk/app/test-pmd/ieee1588fwd.c b/dpdk/app/test-pmd/ieee1588fwd.c -index 9ff817aa68..896d5ef26a 100644 +index 9ff817aa68..8d9ce4d6ce 100644 --- a/dpdk/app/test-pmd/ieee1588fwd.c +++ b/dpdk/app/test-pmd/ieee1588fwd.c @@ -184,13 +184,13 @@ ieee1588_packet_fwd(struct fwd_stream *fs) @@ -7741,10 +8327,32 @@ index 9ff817aa68..896d5ef26a 100644 /* * Check the TX timestamp. -@@ -211,9 +211,22 @@ port_ieee1588_fwd_end(portid_t pi) - rte_eth_timesync_disable(pi); +@@ -201,19 +201,41 @@ ieee1588_packet_fwd(struct fwd_stream *fs) + static int + port_ieee1588_fwd_begin(portid_t pi) + { +- rte_eth_timesync_enable(pi); +- return 0; ++ int ret; ++ ++ ret = rte_eth_timesync_enable(pi); ++ if (ret) ++ printf("Port %u enable PTP failed, ret = %d\n", pi, ret); ++ ++ return ret; } + static void + port_ieee1588_fwd_end(portid_t pi) + { +- rte_eth_timesync_disable(pi); ++ int ret; ++ ++ ret = rte_eth_timesync_disable(pi); ++ if (ret) ++ printf("Port %u disable PTP failed, ret = %d\n", pi, ret); ++} ++ +static void +port_ieee1588_stream_init(struct fwd_stream *fs) +{ @@ -7755,8 +8363,8 @@ index 9ff817aa68..896d5ef26a 100644 + tx_stopped = ports[fs->tx_port].txq[fs->tx_queue].state == + RTE_ETH_QUEUE_STATE_STOPPED; + fs->disabled = rx_stopped || tx_stopped; -+} -+ + } + struct fwd_engine ieee1588_fwd_engine = { .fwd_mode_name = "ieee1588", .port_fwd_begin = port_ieee1588_fwd_begin, @@ -7908,7 +8516,7 @@ index e4434bea95..1be5f77efe 100644 .packet_fwd = pkt_burst_noisy_vnf, }; diff --git a/dpdk/app/test-pmd/parameters.c b/dpdk/app/test-pmd/parameters.c -index f9185065af..e3c9757f3f 100644 +index f9185065af..411c07c0ea 100644 --- a/dpdk/app/test-pmd/parameters.c +++ b/dpdk/app/test-pmd/parameters.c @@ -61,6 +61,9 @@ usage(char* progname) @@ -7921,16 +8529,53 @@ index f9185065af..e3c9757f3f 100644 printf(" --nb-cores=N: set the number of forwarding cores " "(1 <= N <= %d).\n", nb_lcores); printf(" --nb-ports=N: set the number of forwarding ports " -@@ -110,7 +113,7 @@ usage(char* progname) +@@ -109,10 +112,6 @@ usage(char* progname) + "the packet will be enqueued into the rx drop-queue. " "If the drop-queue doesn't exist, the packet is dropped. " "By default drop-queue=127.\n"); - #ifdef RTE_LIB_LATENCYSTATS +-#ifdef RTE_LIB_LATENCYSTATS - printf(" --latencystats=N: enable latency and jitter statistcs " -+ printf(" --latencystats=N: enable latency and jitter statistics " - "monitoring on forwarding lcore id N.\n"); - #endif +- "monitoring on forwarding lcore id N.\n"); +-#endif printf(" --disable-crc-strip: disable CRC stripping by hardware.\n"); -@@ -940,11 +943,12 @@ launch_args_parse(int argc, char** argv) + printf(" --enable-scatter: enable scattered Rx.\n"); + printf(" --enable-lro: enable large receive offload.\n"); +@@ -173,8 +172,14 @@ usage(char* progname) + printf(" --disable-device-start: do not automatically start port\n"); + printf(" --no-lsc-interrupt: disable link status change interrupt.\n"); + printf(" --no-rmv-interrupt: disable device removal interrupt.\n"); ++#ifdef RTE_LIB_BITRATESTATS + printf(" --bitrate-stats=N: set the logical core N to perform " + "bit-rate calculation.\n"); ++#endif ++#ifdef RTE_LIB_LATENCYSTATS ++ printf(" --latencystats=N: enable latency and jitter statistics " ++ "monitoring on forwarding lcore id N.\n"); ++#endif + printf(" --print-event : " + "enable print of designated event or all of them.\n"); + printf(" --mask-event : " +@@ -763,7 +768,7 @@ launch_args_parse(int argc, char** argv) + n = strtoul(optarg, &end, 10); + if ((optarg[0] == '\0') || (end == NULL) || + (*end != '\0')) +- break; ++ rte_exit(EXIT_FAILURE, "Invalid stats-period value\n"); + + stats_period = n; + break; +@@ -864,8 +869,8 @@ launch_args_parse(int argc, char** argv) + } + if (!strcmp(lgopts[opt_idx].name, "nb-cores")) { + n = atoi(optarg); +- if (n > 0 && n <= nb_lcores) +- nb_fwd_lcores = (uint8_t) n; ++ if (n > 0 && (lcoreid_t)n <= nb_lcores) ++ nb_fwd_lcores = (lcoreid_t) n; + else + rte_exit(EXIT_FAILURE, + "nb-cores should be > 0 and <= %d\n", +@@ -940,11 +945,12 @@ launch_args_parse(int argc, char** argv) } if (!strcmp(lgopts[opt_idx].name, "total-num-mbufs")) { n = atoi(optarg); @@ -7990,7 +8635,7 @@ index da54a383fd..2e9047804b 100644 .packet_fwd = shared_rxq_fwd, }; diff --git a/dpdk/app/test-pmd/testpmd.c b/dpdk/app/test-pmd/testpmd.c -index 55eb293cc0..5f34641e90 100644 +index 55eb293cc0..fbfc090d68 100644 --- a/dpdk/app/test-pmd/testpmd.c +++ b/dpdk/app/test-pmd/testpmd.c @@ -66,6 +66,9 @@ @@ -8023,7 +8668,7 @@ index 55eb293cc0..5f34641e90 100644 * option. Set flag to exit stats period loop after received SIGINT/SIGTERM. */ -uint8_t f_quit; -+static volatile uint8_t f_quit; ++volatile uint8_t f_quit; +uint8_t cl_quit; /* Quit testpmd from cmdline. */ /* @@ -8736,7 +9381,7 @@ index 55eb293cc0..5f34641e90 100644 ~RTE_ETH_RX_OFFLOAD_RSS_HASH; } -@@ -4034,10 +4289,11 @@ init_port(void) +@@ -4034,23 +4289,17 @@ init_port(void) "rte_zmalloc(%d struct rte_port) failed\n", RTE_MAX_ETHPORTS); } @@ -8750,7 +9395,71 @@ index 55eb293cc0..5f34641e90 100644 /* Initialize ports NUMA structures */ memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS); memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS); -@@ -4114,6 +4370,9 @@ main(int argc, char** argv) + memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS); + } + +-static void +-force_quit(void) +-{ +- pmd_test_exit(); +- prompt_exit(); +-} +- + static void + print_stats(void) + { +@@ -4069,28 +4318,10 @@ print_stats(void) + } + + static void +-signal_handler(int signum) ++signal_handler(int signum __rte_unused) + { +- if (signum == SIGINT || signum == SIGTERM) { +- fprintf(stderr, "\nSignal %d received, preparing to exit...\n", +- signum); +-#ifdef RTE_LIB_PDUMP +- /* uninitialize packet capture framework */ +- rte_pdump_uninit(); +-#endif +-#ifdef RTE_LIB_LATENCYSTATS +- if (latencystats_enabled != 0) +- rte_latencystats_uninit(); +-#endif +- force_quit(); +- /* Set flag to indicate the force termination. */ +- f_quit = 1; +- /* exit with the expected status */ +-#ifndef RTE_EXEC_ENV_WINDOWS +- signal(signum, SIG_DFL); +- kill(getpid(), signum); +-#endif +- } ++ f_quit = 1; ++ prompt_exit(); + } + + int +@@ -4101,8 +4332,18 @@ main(int argc, char** argv) + uint16_t count; + int ret; + ++#ifdef RTE_EXEC_ENV_WINDOWS + signal(SIGINT, signal_handler); + signal(SIGTERM, signal_handler); ++#else ++ /* Want read() not to be restarted on signal */ ++ struct sigaction action = { ++ .sa_handler = signal_handler, ++ }; ++ ++ sigaction(SIGINT, &action, NULL); ++ sigaction(SIGTERM, &action, NULL); ++#endif + + testpmd_logtype = rte_log_register("testpmd"); + if (testpmd_logtype < 0) +@@ -4114,6 +4355,9 @@ main(int argc, char** argv) rte_exit(EXIT_FAILURE, "Cannot init EAL: %s\n", rte_strerror(rte_errno)); @@ -8760,7 +9469,7 @@ index 55eb293cc0..5f34641e90 100644 ret = register_eth_event_callback(); if (ret != 0) rte_exit(EXIT_FAILURE, "Cannot register for ethdev events"); -@@ -4132,9 +4391,6 @@ main(int argc, char** argv) +@@ -4132,9 +4376,6 @@ main(int argc, char** argv) if (nb_ports == 0) TESTPMD_LOG(WARNING, "No probed ethernet devices\n"); @@ -8770,7 +9479,7 @@ index 55eb293cc0..5f34641e90 100644 set_def_fwd_config(); if (nb_lcores == 0) rte_exit(EXIT_FAILURE, "No cores defined for forwarding\n" -@@ -4212,8 +4468,13 @@ main(int argc, char** argv) +@@ -4212,8 +4453,13 @@ main(int argc, char** argv) } } @@ -8786,29 +9495,88 @@ index 55eb293cc0..5f34641e90 100644 /* set all ports to promiscuous mode by default */ RTE_ETH_FOREACH_DEV(port_id) { +@@ -4261,15 +4507,9 @@ main(int argc, char** argv) + start_packet_forwarding(0); + } + prompt(); +- pmd_test_exit(); + } else + #endif + { +- char c; +- int rc; +- +- f_quit = 0; +- + printf("No commandline core given, start packet forwarding\n"); + start_packet_forwarding(tx_first); + if (stats_period != 0) { +@@ -4292,15 +4532,33 @@ main(int argc, char** argv) + prev_time = cur_time; + rte_delay_us_sleep(US_PER_S); + } +- } ++ } else { ++ char c; + +- printf("Press enter to exit\n"); +- rc = read(0, &c, 1); +- pmd_test_exit(); +- if (rc < 0) +- return 1; ++ printf("Press enter to exit\n"); ++ while (f_quit == 0) { ++ /* end-of-file or any character exits loop */ ++ if (read(0, &c, 1) >= 0) ++ break; ++ if (errno == EINTR) ++ continue; ++ rte_exit(EXIT_FAILURE, "Read failed: %s\n", ++ strerror(errno)); ++ } ++ } + } + ++ pmd_test_exit(); ++ ++#ifdef RTE_LIB_PDUMP ++ /* uninitialize packet capture framework */ ++ rte_pdump_uninit(); ++#endif ++#ifdef RTE_LIB_LATENCYSTATS ++ if (latencystats_enabled != 0) ++ rte_latencystats_uninit(); ++#endif ++ + ret = rte_eal_cleanup(); + if (ret != 0) + rte_exit(EXIT_FAILURE, diff --git a/dpdk/app/test-pmd/testpmd.h b/dpdk/app/test-pmd/testpmd.h -index 2149ecd93a..e53320e630 100644 +index 2149ecd93a..e1618103bc 100644 --- a/dpdk/app/test-pmd/testpmd.h +++ b/dpdk/app/test-pmd/testpmd.h -@@ -32,6 +32,8 @@ +@@ -32,6 +32,9 @@ #define RTE_PORT_CLOSED (uint16_t)2 #define RTE_PORT_HANDLING (uint16_t)3 +extern uint8_t cl_quit; ++extern volatile uint8_t f_quit; + /* * It is used to allocate the memory for hash key. * The hash key size is NIC dependent. -@@ -72,6 +74,8 @@ +@@ -72,7 +75,9 @@ #define NUMA_NO_CONFIG 0xFF #define UMA_NO_CONFIG 0xFF +-typedef uint8_t lcoreid_t; +#define MIN_TOTAL_NUM_MBUFS 1024 + - typedef uint8_t lcoreid_t; ++typedef uint32_t lcoreid_t; typedef uint16_t portid_t; typedef uint16_t queueid_t; -@@ -134,6 +138,7 @@ struct fwd_stream { + typedef uint16_t streamid_t; +@@ -134,6 +139,7 @@ struct fwd_stream { portid_t tx_port; /**< forwarding port of received packets */ queueid_t tx_queue; /**< TX queue to send forwarded packets */ streamid_t peer_addr; /**< index of peer ethernet address of packets */ @@ -8816,7 +9584,7 @@ index 2149ecd93a..e53320e630 100644 unsigned int retry_enabled; -@@ -147,6 +152,7 @@ struct fwd_stream { +@@ -147,6 +153,7 @@ struct fwd_stream { /**< received packets has bad outer l4 checksum */ uint64_t rx_bad_outer_ip_csum; /**< received packets having bad outer ip checksum */ @@ -8824,7 +9592,7 @@ index 2149ecd93a..e53320e630 100644 #ifdef RTE_LIB_GRO unsigned int gro_times; /**< GRO operation times */ #endif -@@ -216,6 +222,18 @@ struct xstat_display_info { +@@ -216,6 +223,18 @@ struct xstat_display_info { bool allocated; }; @@ -8843,7 +9611,7 @@ index 2149ecd93a..e53320e630 100644 /** * The data structure associated with each port. */ -@@ -238,11 +256,13 @@ struct rte_port { +@@ -238,11 +257,13 @@ struct rte_port { uint8_t dcb_flag; /**< enable dcb */ uint16_t nb_rx_desc[RTE_MAX_QUEUES_PER_PORT+1]; /**< per queue rx desc number */ uint16_t nb_tx_desc[RTE_MAX_QUEUES_PER_PORT+1]; /**< per queue tx desc number */ @@ -8860,7 +9628,7 @@ index 2149ecd93a..e53320e630 100644 struct port_flow *flow_list; /**< Associated flows. */ struct port_indirect_action *actions_list; /**< Associated indirect actions. */ -@@ -296,12 +316,14 @@ struct fwd_lcore { +@@ -296,12 +317,14 @@ struct fwd_lcore { */ typedef int (*port_fwd_begin_t)(portid_t pi); typedef void (*port_fwd_end_t)(portid_t pi); @@ -8875,7 +9643,7 @@ index 2149ecd93a..e53320e630 100644 packet_fwd_t packet_fwd; /**< Mandatory. */ }; -@@ -880,6 +902,7 @@ int port_action_handle_create(portid_t port_id, uint32_t id, +@@ -880,6 +903,7 @@ int port_action_handle_create(portid_t port_id, uint32_t id, const struct rte_flow_action *action); int port_action_handle_destroy(portid_t port_id, uint32_t n, const uint32_t *action); @@ -8883,7 +9651,7 @@ index 2149ecd93a..e53320e630 100644 struct rte_flow_action_handle *port_action_handle_get_by_id(portid_t port_id, uint32_t id); int port_action_handle_update(portid_t port_id, uint32_t id, -@@ -897,6 +920,7 @@ int port_flow_create(portid_t port_id, +@@ -897,6 +921,7 @@ int port_flow_create(portid_t port_id, int port_action_handle_query(portid_t port_id, uint32_t id); void update_age_action_context(const struct rte_flow_action *actions, struct port_flow *pf); @@ -8891,7 +9659,7 @@ index 2149ecd93a..e53320e630 100644 int port_flow_destroy(portid_t port_id, uint32_t n, const uint32_t *rule); int port_flow_flush(portid_t port_id); int port_flow_dump(portid_t port_id, bool dump_all, -@@ -1091,7 +1115,6 @@ uint16_t tx_pkt_set_dynf(uint16_t port_id, __rte_unused uint16_t queue, +@@ -1091,7 +1116,6 @@ uint16_t tx_pkt_set_dynf(uint16_t port_id, __rte_unused uint16_t queue, void add_tx_dynf_callback(portid_t portid); void remove_tx_dynf_callback(portid_t portid); int update_mtu_from_frame_size(portid_t portid, uint32_t max_rx_pktlen); @@ -8899,7 +9667,7 @@ index 2149ecd93a..e53320e630 100644 void flex_item_create(portid_t port_id, uint16_t flex_id, const char *filename); void flex_item_destroy(portid_t port_id, uint16_t flex_id); void port_flex_item_flush(portid_t port_id); -@@ -1101,6 +1124,8 @@ extern int flow_parse(const char *src, void *result, unsigned int size, +@@ -1101,6 +1125,8 @@ extern int flow_parse(const char *src, void *result, unsigned int size, struct rte_flow_item **pattern, struct rte_flow_action **actions); @@ -9260,6 +10028,112 @@ index 8ac24577ba..7556bb5512 100644 } pkt_seg->next = NULL; /* Last segment of packet. */ +diff --git a/dpdk/app/test/process.h b/dpdk/app/test/process.h +index 5b10cf64df..8bb9eeec12 100644 +--- a/dpdk/app/test/process.h ++++ b/dpdk/app/test/process.h +@@ -15,6 +15,7 @@ + #include + + #include /* strlcpy */ ++#include + + #ifdef RTE_EXEC_ENV_FREEBSD + #define self "curproc" +@@ -32,6 +33,34 @@ extern uint16_t flag_for_send_pkts; + #endif + #endif + ++#define PREFIX_ALLOW "--allow=" ++ ++static int ++add_parameter_allow(char **argv, int max_capacity) ++{ ++ struct rte_devargs *devargs; ++ int count = 0; ++ ++ RTE_EAL_DEVARGS_FOREACH(NULL, devargs) { ++ if (strlen(devargs->name) == 0) ++ continue; ++ ++ if (devargs->data == NULL || strlen(devargs->data) == 0) { ++ if (asprintf(&argv[count], PREFIX_ALLOW"%s", devargs->name) < 0) ++ break; ++ } else { ++ if (asprintf(&argv[count], PREFIX_ALLOW"%s,%s", ++ devargs->name, devargs->data) < 0) ++ break; ++ } ++ ++ if (++count == max_capacity) ++ break; ++ } ++ ++ return count; ++} ++ + /* + * launches a second copy of the test process using the given argv parameters, + * which should include argv[0] as the process name. To identify in the +@@ -41,8 +70,10 @@ extern uint16_t flag_for_send_pkts; + static inline int + process_dup(const char *const argv[], int numargs, const char *env_value) + { +- int num; +- char *argv_cpy[numargs + 1]; ++ int num = 0; ++ char **argv_cpy; ++ int allow_num; ++ int argv_num; + int i, status; + char path[32]; + #ifdef RTE_LIB_PDUMP +@@ -56,11 +87,21 @@ process_dup(const char *const argv[], int numargs, const char *env_value) + if (pid < 0) + return -1; + else if (pid == 0) { ++ allow_num = rte_devargs_type_count(RTE_DEVTYPE_ALLOWED); ++ argv_num = numargs + allow_num + 1; ++ argv_cpy = calloc(argv_num, sizeof(char *)); ++ if (!argv_cpy) ++ rte_panic("Memory allocation failed\n"); ++ + /* make a copy of the arguments to be passed to exec */ +- for (i = 0; i < numargs; i++) ++ for (i = 0; i < numargs; i++) { + argv_cpy[i] = strdup(argv[i]); +- argv_cpy[i] = NULL; +- num = numargs; ++ if (argv_cpy[i] == NULL) ++ rte_panic("Error dup args\n"); ++ } ++ if (allow_num > 0) ++ num = add_parameter_allow(&argv_cpy[i], allow_num); ++ num += numargs; + + #ifdef RTE_EXEC_ENV_LINUX + { +diff --git a/dpdk/app/test/test.c b/dpdk/app/test/test.c +index 5194131026..f7375cfe88 100644 +--- a/dpdk/app/test/test.c ++++ b/dpdk/app/test/test.c +@@ -350,11 +350,13 @@ unit_test_suite_runner(struct unit_test_suite *suite) + + if (test_success == TEST_SUCCESS) + suite->succeeded++; +- else if (test_success == TEST_SKIPPED) ++ else if (test_success == TEST_SKIPPED) { + suite->skipped++; +- else if (test_success == -ENOTSUP) ++ suite->executed--; ++ } else if (test_success == -ENOTSUP) { + suite->unsupported++; +- else ++ suite->executed--; ++ } else + suite->failed++; + } else if (test_success == -ENOTSUP) { + suite->unsupported++; diff --git a/dpdk/app/test/test_barrier.c b/dpdk/app/test/test_barrier.c index 6d6d48749c..ec69af25bf 100644 --- a/dpdk/app/test/test_barrier.c @@ -9334,6 +10208,46 @@ index 46bcb51f86..d70bb0fe85 100644 #endif /* RTE_HAS_LIBPCAP */ + +REGISTER_TEST_COMMAND(bpf_convert_autotest, test_bpf_convert); +diff --git a/dpdk/app/test/test_cfgfile.c b/dpdk/app/test/test_cfgfile.c +index 2f596affee..a5e3d8699c 100644 +--- a/dpdk/app/test/test_cfgfile.c ++++ b/dpdk/app/test/test_cfgfile.c +@@ -168,7 +168,7 @@ test_cfgfile_invalid_section_header(void) + struct rte_cfgfile *cfgfile; + + cfgfile = rte_cfgfile_load(CFG_FILES_ETC "/invalid_section.ini", 0); +- TEST_ASSERT_NULL(cfgfile, "Expected failured did not occur"); ++ TEST_ASSERT_NULL(cfgfile, "Expected failure did not occur"); + + return 0; + } +@@ -185,7 +185,7 @@ test_cfgfile_invalid_comment(void) + + cfgfile = rte_cfgfile_load_with_params(CFG_FILES_ETC "/sample2.ini", 0, + ¶ms); +- TEST_ASSERT_NULL(cfgfile, "Expected failured did not occur"); ++ TEST_ASSERT_NULL(cfgfile, "Expected failure did not occur"); + + return 0; + } +@@ -196,7 +196,7 @@ test_cfgfile_invalid_key_value_pair(void) + struct rte_cfgfile *cfgfile; + + cfgfile = rte_cfgfile_load(CFG_FILES_ETC "/empty_key_value.ini", 0); +- TEST_ASSERT_NULL(cfgfile, "Expected failured did not occur"); ++ TEST_ASSERT_NULL(cfgfile, "Expected failure did not occur"); + + return 0; + } +@@ -236,7 +236,7 @@ test_cfgfile_missing_section(void) + struct rte_cfgfile *cfgfile; + + cfgfile = rte_cfgfile_load(CFG_FILES_ETC "/missing_section.ini", 0); +- TEST_ASSERT_NULL(cfgfile, "Expected failured did not occur"); ++ TEST_ASSERT_NULL(cfgfile, "Expected failure did not occur"); + + return 0; + } diff --git a/dpdk/app/test/test_common.c b/dpdk/app/test/test_common.c index ef177cecb1..f89e1eb7ee 100644 --- a/dpdk/app/test/test_common.c @@ -9437,10 +10351,18 @@ index bf1d344359..8231f81e4a 100644 for (i = 0; i < CRC32_VEC_LEN1; i += 12) rte_memcpy(&test_data[i], crc32_vec1, 12); diff --git a/dpdk/app/test/test_cryptodev.c b/dpdk/app/test/test_cryptodev.c -index 10b48cdadb..0bd4517bf8 100644 +index 10b48cdadb..245d514761 100644 --- a/dpdk/app/test/test_cryptodev.c +++ b/dpdk/app/test/test_cryptodev.c -@@ -135,6 +135,17 @@ security_proto_supported(enum rte_security_session_action_type action, +@@ -6,6 +6,7 @@ + #include + + #include ++#include + #include + #include + #include +@@ -135,6 +136,17 @@ security_proto_supported(enum rte_security_session_action_type action, static int dev_configure_and_start(uint64_t ff_disable); @@ -9458,7 +10380,7 @@ index 10b48cdadb..0bd4517bf8 100644 static struct rte_mbuf * setup_test_string(struct rte_mempool *mpool, const char *string, size_t len, uint8_t blocksize) -@@ -209,6 +220,7 @@ process_sym_raw_dp_op(uint8_t dev_id, uint16_t qp_id, +@@ -209,6 +221,7 @@ process_sym_raw_dp_op(uint8_t dev_id, uint16_t qp_id, int enqueue_status, dequeue_status; struct crypto_unittest_params *ut_params = &unittest_params; int is_sgl = sop->m_src->nb_segs > 1; @@ -9466,7 +10388,7 @@ index 10b48cdadb..0bd4517bf8 100644 ctx_service_size = rte_cryptodev_get_raw_dp_ctx_size(dev_id); if (ctx_service_size < 0) { -@@ -247,6 +259,9 @@ process_sym_raw_dp_op(uint8_t dev_id, uint16_t qp_id, +@@ -247,6 +260,9 @@ process_sym_raw_dp_op(uint8_t dev_id, uint16_t qp_id, ofs.raw = 0; @@ -9476,7 +10398,7 @@ index 10b48cdadb..0bd4517bf8 100644 if (is_cipher && is_auth) { cipher_offset = sop->cipher.data.offset; cipher_len = sop->cipher.data.length; -@@ -277,6 +292,8 @@ process_sym_raw_dp_op(uint8_t dev_id, uint16_t qp_id, +@@ -277,6 +293,8 @@ process_sym_raw_dp_op(uint8_t dev_id, uint16_t qp_id, if (is_sgl) { uint32_t remaining_off = auth_offset + auth_len; struct rte_mbuf *sgl_buf = sop->m_src; @@ -9485,7 +10407,7 @@ index 10b48cdadb..0bd4517bf8 100644 while (remaining_off >= rte_pktmbuf_data_len(sgl_buf) && sgl_buf->next != NULL) { -@@ -293,7 +310,8 @@ process_sym_raw_dp_op(uint8_t dev_id, uint16_t qp_id, +@@ -293,7 +311,8 @@ process_sym_raw_dp_op(uint8_t dev_id, uint16_t qp_id, /* Then check if digest-encrypted conditions are met */ if ((auth_offset + auth_len < cipher_offset + cipher_len) && (digest.iova == auth_end_iova) && is_sgl) @@ -9495,7 +10417,7 @@ index 10b48cdadb..0bd4517bf8 100644 ut_params->auth_xform.auth.digest_length); } else if (is_cipher) { -@@ -356,7 +374,7 @@ process_sym_raw_dp_op(uint8_t dev_id, uint16_t qp_id, +@@ -356,7 +375,7 @@ process_sym_raw_dp_op(uint8_t dev_id, uint16_t qp_id, sgl.num = n; /* Out of place */ @@ -9504,7 +10426,7 @@ index 10b48cdadb..0bd4517bf8 100644 dest_sgl.vec = dest_data_vec; vec.dest_sgl = &dest_sgl; n = rte_crypto_mbuf_to_vec(sop->m_dst, 0, max_len, -@@ -3031,6 +3049,16 @@ create_wireless_algo_auth_cipher_operation( +@@ -3031,6 +3050,16 @@ create_wireless_algo_auth_cipher_operation( remaining_off -= rte_pktmbuf_data_len(sgl_buf); sgl_buf = sgl_buf->next; } @@ -9521,7 +10443,7 @@ index 10b48cdadb..0bd4517bf8 100644 sym_op->auth.digest.data = rte_pktmbuf_mtod_offset(sgl_buf, uint8_t *, remaining_off); sym_op->auth.digest.phys_addr = rte_pktmbuf_iova_offset(sgl_buf, -@@ -4777,7 +4805,6 @@ test_zuc_cipher_auth(const struct wireless_test_data *tdata) +@@ -4777,7 +4806,6 @@ test_zuc_cipher_auth(const struct wireless_test_data *tdata) unsigned int plaintext_len; struct rte_cryptodev_info dev_info; @@ -9529,7 +10451,7 @@ index 10b48cdadb..0bd4517bf8 100644 rte_cryptodev_info_get(ts_params->valid_devs[0], &dev_info); uint64_t feat_flags = dev_info.feature_flags; -@@ -4799,19 +4826,14 @@ test_zuc_cipher_auth(const struct wireless_test_data *tdata) +@@ -4799,19 +4827,14 @@ test_zuc_cipher_auth(const struct wireless_test_data *tdata) return TEST_SKIPPED; /* Check if device supports ZUC EEA3 */ @@ -9554,7 +10476,7 @@ index 10b48cdadb..0bd4517bf8 100644 return TEST_SKIPPED; /* Create ZUC session */ -@@ -4869,7 +4891,7 @@ test_zuc_cipher_auth(const struct wireless_test_data *tdata) +@@ -4869,7 +4892,7 @@ test_zuc_cipher_auth(const struct wireless_test_data *tdata) TEST_ASSERT_BUFFERS_ARE_EQUAL( ut_params->digest, tdata->digest.data, @@ -9563,7 +10485,7 @@ index 10b48cdadb..0bd4517bf8 100644 "ZUC Generated auth tag not as expected"); return 0; } -@@ -6023,7 +6045,7 @@ test_zuc_encryption(const struct wireless_test_data *tdata) +@@ -6023,7 +6046,7 @@ test_zuc_encryption(const struct wireless_test_data *tdata) retval = create_wireless_algo_cipher_operation(tdata->cipher_iv.data, tdata->cipher_iv.len, tdata->plaintext.len, @@ -9572,7 +10494,7 @@ index 10b48cdadb..0bd4517bf8 100644 if (retval < 0) return retval; -@@ -6118,7 +6140,7 @@ test_zuc_encryption_sgl(const struct wireless_test_data *tdata) +@@ -6118,7 +6141,7 @@ test_zuc_encryption_sgl(const struct wireless_test_data *tdata) /* Create ZUC operation */ retval = create_wireless_algo_cipher_operation(tdata->cipher_iv.data, tdata->cipher_iv.len, tdata->plaintext.len, @@ -9581,7 +10503,7 @@ index 10b48cdadb..0bd4517bf8 100644 if (retval < 0) return retval; -@@ -6226,8 +6248,8 @@ test_zuc_authentication(const struct wireless_test_data *tdata) +@@ -6226,8 +6249,8 @@ test_zuc_authentication(const struct wireless_test_data *tdata) else ut_params->op = process_crypto_request(ts_params->valid_devs[0], ut_params->op); @@ -9591,7 +10513,7 @@ index 10b48cdadb..0bd4517bf8 100644 ut_params->digest = rte_pktmbuf_mtod(ut_params->obuf, uint8_t *) + plaintext_pad_len; -@@ -6431,7 +6453,7 @@ test_zuc_auth_cipher(const struct wireless_test_data *tdata, +@@ -6431,7 +6454,7 @@ test_zuc_auth_cipher(const struct wireless_test_data *tdata, TEST_ASSERT_BUFFERS_ARE_EQUAL( ut_params->digest, tdata->digest.data, @@ -9600,7 +10522,7 @@ index 10b48cdadb..0bd4517bf8 100644 "ZUC Generated auth tag not as expected"); } return 0; -@@ -6469,6 +6491,9 @@ test_zuc_auth_cipher_sgl(const struct wireless_test_data *tdata, +@@ -6469,6 +6492,9 @@ test_zuc_auth_cipher_sgl(const struct wireless_test_data *tdata, tdata->digest.len) < 0) return TEST_SKIPPED; @@ -9610,7 +10532,7 @@ index 10b48cdadb..0bd4517bf8 100644 rte_cryptodev_info_get(ts_params->valid_devs[0], &dev_info); uint64_t feat_flags = dev_info.feature_flags; -@@ -6553,7 +6578,7 @@ test_zuc_auth_cipher_sgl(const struct wireless_test_data *tdata, +@@ -6553,7 +6579,7 @@ test_zuc_auth_cipher_sgl(const struct wireless_test_data *tdata, retval = create_wireless_algo_auth_cipher_operation( tdata->digest.data, tdata->digest.len, tdata->cipher_iv.data, tdata->cipher_iv.len, @@ -9619,7 +10541,7 @@ index 10b48cdadb..0bd4517bf8 100644 (tdata->digest.offset_bytes == 0 ? (verify ? ciphertext_pad_len : plaintext_pad_len) : tdata->digest.offset_bytes), -@@ -6638,7 +6663,7 @@ test_zuc_auth_cipher_sgl(const struct wireless_test_data *tdata, +@@ -6638,7 +6664,7 @@ test_zuc_auth_cipher_sgl(const struct wireless_test_data *tdata, TEST_ASSERT_BUFFERS_ARE_EQUAL( digest, tdata->digest.data, @@ -9628,7 +10550,7 @@ index 10b48cdadb..0bd4517bf8 100644 "ZUC Generated auth tag not as expected"); } return 0; -@@ -6857,6 +6882,7 @@ snow3g_hash_test_vector_setup(const struct snow3g_test_data *pattern, +@@ -6857,6 +6883,7 @@ snow3g_hash_test_vector_setup(const struct snow3g_test_data *pattern, static int test_snow3g_decryption_with_digest_test_case_1(void) { @@ -9636,7 +10558,7 @@ index 10b48cdadb..0bd4517bf8 100644 struct snow3g_hash_test_data snow3g_hash_data; struct rte_cryptodev_info dev_info; struct crypto_testsuite_params *ts_params = &testsuite_params; -@@ -6870,13 +6896,16 @@ test_snow3g_decryption_with_digest_test_case_1(void) +@@ -6870,13 +6897,16 @@ test_snow3g_decryption_with_digest_test_case_1(void) } /* @@ -9656,7 +10578,7 @@ index 10b48cdadb..0bd4517bf8 100644 } static int -@@ -7545,6 +7574,9 @@ test_mixed_auth_cipher_sgl(const struct mixed_cipher_auth_test_data *tdata, +@@ -7545,6 +7575,9 @@ test_mixed_auth_cipher_sgl(const struct mixed_cipher_auth_test_data *tdata, if (global_api_test_type == CRYPTODEV_RAW_API_TEST) return TEST_SKIPPED; @@ -9666,7 +10588,7 @@ index 10b48cdadb..0bd4517bf8 100644 rte_cryptodev_info_get(ts_params->valid_devs[0], &dev_info); uint64_t feat_flags = dev_info.feature_flags; -@@ -8068,7 +8100,7 @@ create_aead_operation(enum rte_crypto_aead_operation op, +@@ -8068,7 +8101,7 @@ create_aead_operation(enum rte_crypto_aead_operation op, rte_pktmbuf_iova(ut_params->ibuf); /* Copy AAD 18 bytes after the AAD pointer, according to the API */ memcpy(sym_op->aead.aad.data + 18, tdata->aad.data, tdata->aad.len); @@ -9675,7 +10597,7 @@ index 10b48cdadb..0bd4517bf8 100644 tdata->aad.len); /* Append IV at the end of the crypto operation*/ -@@ -8077,7 +8109,7 @@ create_aead_operation(enum rte_crypto_aead_operation op, +@@ -8077,7 +8110,7 @@ create_aead_operation(enum rte_crypto_aead_operation op, /* Copy IV 1 byte after the IV pointer, according to the API */ rte_memcpy(iv_ptr + 1, tdata->iv.data, tdata->iv.len); @@ -9684,7 +10606,7 @@ index 10b48cdadb..0bd4517bf8 100644 tdata->iv.len); } else { aad_pad_len = RTE_ALIGN_CEIL(tdata->aad.len, 16); -@@ -8230,7 +8262,7 @@ test_authenticated_encryption(const struct aead_test_data *tdata) +@@ -8230,7 +8263,7 @@ test_authenticated_encryption(const struct aead_test_data *tdata) tdata->key.data, tdata->key.len, tdata->aad.len, tdata->auth_tag.len, tdata->iv.len); @@ -9693,7 +10615,25 @@ index 10b48cdadb..0bd4517bf8 100644 return retval; if (tdata->aad.len > MBUF_SIZE) { -@@ -10540,9 +10572,11 @@ test_authenticated_encryption_oop(const struct aead_test_data *tdata) +@@ -8382,7 +8415,7 @@ static int test_pdcp_proto(int i, int oop, enum rte_crypto_cipher_operation opc, + /* Out of place support */ + if (oop) { + /* +- * For out-op-place we need to alloc another mbuf ++ * For out-of-place we need to alloc another mbuf + */ + ut_params->obuf = rte_pktmbuf_alloc(ts_params->mbuf_pool); + rte_pktmbuf_append(ut_params->obuf, output_vec_len); +@@ -8584,7 +8617,7 @@ test_pdcp_proto_SGL(int i, int oop, + /* Out of place support */ + if (oop) { + /* +- * For out-op-place we need to alloc another mbuf ++ * For out-of-place we need to alloc another mbuf + */ + ut_params->obuf = rte_pktmbuf_alloc(ts_params->mbuf_pool); + rte_pktmbuf_append(ut_params->obuf, frag_size_oop); +@@ -10540,9 +10573,11 @@ test_authenticated_encryption_oop(const struct aead_test_data *tdata) rte_cryptodev_info_get(ts_params->valid_devs[0], &dev_info); uint64_t feat_flags = dev_info.feature_flags; @@ -9707,7 +10647,7 @@ index 10b48cdadb..0bd4517bf8 100644 /* not supported with CPU crypto */ if (gbl_action_type == RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO) -@@ -10876,7 +10910,7 @@ test_authenticated_decryption_sessionless( +@@ -10876,7 +10911,7 @@ test_authenticated_decryption_sessionless( key, tdata->key.len, tdata->aad.len, tdata->auth_tag.len, tdata->iv.len); @@ -9716,7 +10656,7 @@ index 10b48cdadb..0bd4517bf8 100644 return retval; ut_params->op->sym->m_src = ut_params->ibuf; -@@ -11073,11 +11107,11 @@ test_stats(void) +@@ -11073,11 +11108,11 @@ test_stats(void) TEST_ASSERT((stats.enqueued_count == 1), "rte_cryptodev_stats_get returned unexpected enqueued stat"); TEST_ASSERT((stats.dequeued_count == 1), @@ -9731,7 +10671,7 @@ index 10b48cdadb..0bd4517bf8 100644 /* invalid device but should ignore and not reset device stats*/ rte_cryptodev_stats_reset(ts_params->valid_devs[0] + 300); -@@ -11085,7 +11119,7 @@ test_stats(void) +@@ -11085,7 +11120,7 @@ test_stats(void) &stats), "rte_cryptodev_stats_get failed"); TEST_ASSERT((stats.enqueued_count == 1), @@ -9740,7 +10680,7 @@ index 10b48cdadb..0bd4517bf8 100644 /* check that a valid reset clears stats */ rte_cryptodev_stats_reset(ts_params->valid_devs[0]); -@@ -11093,9 +11127,9 @@ test_stats(void) +@@ -11093,9 +11128,9 @@ test_stats(void) &stats), "rte_cryptodev_stats_get failed"); TEST_ASSERT((stats.enqueued_count == 0), @@ -9752,7 +10692,42 @@ index 10b48cdadb..0bd4517bf8 100644 return TEST_SUCCESS; } -@@ -15690,7 +15724,7 @@ test_cryptodev_dpaa2_sec_raw_api(void) +@@ -11796,6 +11831,12 @@ test_enq_callback_setup(void) + /* Test with invalid crypto device */ + cb = rte_cryptodev_add_enq_callback(RTE_CRYPTO_MAX_DEVS, + qp_id, test_enq_callback, NULL); ++ if (rte_errno == ENOTSUP) { ++ RTE_LOG(ERR, USER1, "%s line %d: " ++ "rte_cryptodev_add_enq_callback() " ++ "Not supported, skipped\n", __func__, __LINE__); ++ return TEST_SKIPPED; ++ } + TEST_ASSERT_NULL(cb, "Add callback on qp %u on " + "cryptodev %u did not fail", + qp_id, RTE_CRYPTO_MAX_DEVS); +@@ -11896,6 +11937,12 @@ test_deq_callback_setup(void) + /* Test with invalid crypto device */ + cb = rte_cryptodev_add_deq_callback(RTE_CRYPTO_MAX_DEVS, + qp_id, test_deq_callback, NULL); ++ if (rte_errno == ENOTSUP) { ++ RTE_LOG(ERR, USER1, "%s line %d: " ++ "rte_cryptodev_add_deq_callback() " ++ "Not supported, skipped\n", __func__, __LINE__); ++ return TEST_SKIPPED; ++ } + TEST_ASSERT_NULL(cb, "Add callback on qp %u on " + "cryptodev %u did not fail", + qp_id, RTE_CRYPTO_MAX_DEVS); +@@ -13679,7 +13726,7 @@ test_authenticated_encryption_SGL(const struct aead_test_data *tdata, + } + + /* +- * For out-op-place we need to alloc another mbuf ++ * For out-of-place we need to alloc another mbuf + */ + if (oop) { + ut_params->obuf = rte_pktmbuf_alloc(ts_params->mbuf_pool); +@@ -15690,7 +15737,7 @@ test_cryptodev_dpaa2_sec_raw_api(void) static int test_cryptodev_dpaa_sec_raw_api(void) { @@ -9838,10 +10813,28 @@ index a797af1b00..6c4f6b6f13 100644 .op_mask = BLOCKCIPHER_TEST_OP_DECRYPT, .feature_mask = BLOCKCIPHER_TEST_FEATURE_OOP | diff --git a/dpdk/app/test/test_cryptodev_asym.c b/dpdk/app/test/test_cryptodev_asym.c -index 9d19a6d6d9..952b927d60 100644 +index 9d19a6d6d9..68cfc64760 100644 --- a/dpdk/app/test/test_cryptodev_asym.c +++ b/dpdk/app/test/test_cryptodev_asym.c -@@ -208,8 +208,8 @@ queue_ops_rsa_enc_dec(struct rte_cryptodev_asym_session *sess) +@@ -54,11 +54,15 @@ union test_case_structure { + struct rsa_test_data_2 rsa_data; + }; + ++struct vector_details { ++ uint32_t vector_size; ++ const void *address; ++}; + struct test_cases_array { + uint32_t size; +- const void *address[TEST_VECTOR_SIZE]; ++ struct vector_details details[TEST_VECTOR_SIZE]; + }; +-static struct test_cases_array test_vector = {0, { NULL } }; ++static struct test_cases_array test_vector = {0, {} }; + + static uint32_t test_index; + +@@ -208,8 +212,8 @@ queue_ops_rsa_enc_dec(struct rte_cryptodev_asym_session *sess) status = TEST_FAILED; goto error_exit; } @@ -9852,7 +10845,24 @@ index 9d19a6d6d9..952b927d60 100644 /* Use the resulted output as decryption Input vector*/ asym_op = result_op->asym; -@@ -558,7 +558,7 @@ test_one_case(const void *test_case, int sessionless) +@@ -525,14 +529,14 @@ test_cryptodev_asym_op(struct crypto_testsuite_params_asym *ts_params, + } + + static int +-test_one_case(const void *test_case, int sessionless) ++test_one_case(struct vector_details test_case, int sessionless) + { + int status = TEST_SUCCESS, i = 0; + char test_msg[ASYM_TEST_MSG_LEN + 1]; + + /* Map the case to union */ + union test_case_structure tc; +- memcpy(&tc, test_case, sizeof(tc)); ++ rte_memcpy(&tc, test_case.address, RTE_MIN(sizeof(tc), test_case.vector_size)); + + if (tc.modex.xform_type == RTE_CRYPTO_ASYM_XFORM_MODEX + || tc.modex.xform_type == RTE_CRYPTO_ASYM_XFORM_MODINV) { +@@ -558,7 +562,7 @@ test_one_case(const void *test_case, int sessionless) status = test_cryptodev_asym_op( &testsuite_params, &tc, test_msg, sessionless, i, @@ -9861,7 +10871,88 @@ index 9d19a6d6d9..952b927d60 100644 } if (status) break; -@@ -1717,7 +1717,7 @@ test_mod_exp(void) +@@ -584,7 +588,8 @@ load_test_vectors(void) + "TEST_VECTOR_SIZE too small\n"); + return -1; + } +- test_vector.address[test_vector.size] = &modex_test_case[i]; ++ test_vector.details[test_vector.size].address = &modex_test_case[i]; ++ test_vector.details[test_vector.size].vector_size = sizeof(modex_test_case[i]); + test_vector.size++; + } + /* Load MODINV vector*/ +@@ -595,7 +600,8 @@ load_test_vectors(void) + "TEST_VECTOR_SIZE too small\n"); + return -1; + } +- test_vector.address[test_vector.size] = &modinv_test_case[i]; ++ test_vector.details[test_vector.size].address = &modinv_test_case[i]; ++ test_vector.details[test_vector.size].vector_size = sizeof(modinv_test_case[i]); + test_vector.size++; + } + /* Load RSA vector*/ +@@ -606,7 +612,8 @@ load_test_vectors(void) + "TEST_VECTOR_SIZE too small\n"); + return -1; + } +- test_vector.address[test_vector.size] = &rsa_test_case_list[i]; ++ test_vector.details[test_vector.size].address = &rsa_test_case_list[i]; ++ test_vector.details[test_vector.size].vector_size = sizeof(rsa_test_case_list[i]); + test_vector.size++; + } + return 0; +@@ -631,12 +638,12 @@ test_one_by_one(void) + /* Go through all test cases */ + test_index = 0; + for (i = 0; i < test_vector.size; i++) { +- if (test_one_case(test_vector.address[i], 0) != TEST_SUCCESS) ++ if (test_one_case(test_vector.details[i], 0) != TEST_SUCCESS) + status = TEST_FAILED; + } + if (sessionless) { + for (i = 0; i < test_vector.size; i++) { +- if (test_one_case(test_vector.address[i], 1) ++ if (test_one_case(test_vector.details[i], 1) + != TEST_SUCCESS) + status = TEST_FAILED; + } +@@ -996,8 +1003,6 @@ ut_setup_asym(void) + qp_id, ts_params->valid_devs[0]); + } + +- rte_cryptodev_stats_reset(ts_params->valid_devs[0]); +- + /* Start the device */ + TEST_ASSERT_SUCCESS(rte_cryptodev_start(ts_params->valid_devs[0]), + "Failed to start cryptodev %u", +@@ -1010,9 +1015,6 @@ static void + ut_teardown_asym(void) + { + struct crypto_testsuite_params_asym *ts_params = &testsuite_params; +- struct rte_cryptodev_stats stats; +- +- rte_cryptodev_stats_get(ts_params->valid_devs[0], &stats); + + /* Stop the device */ + rte_cryptodev_stop(ts_params->valid_devs[0]); +@@ -1068,7 +1070,7 @@ test_capability(void) + RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO)) { + RTE_LOG(INFO, USER1, + "Device doesn't support asymmetric. Test Skipped\n"); +- return TEST_SUCCESS; ++ return TEST_SKIPPED; + } + + /* print xform capability */ +@@ -1083,6 +1085,7 @@ test_capability(void) + capa = rte_cryptodev_asym_capability_get(dev_id, + (const struct + rte_cryptodev_asym_capability_idx *) &idx); ++ TEST_ASSERT_NOT_NULL(capa, "Failed to get asymmetric capability"); + print_asym_capa(capa); + } + } +@@ -1717,7 +1720,7 @@ test_mod_exp(void) } static int @@ -9870,7 +10961,7 @@ index 9d19a6d6d9..952b927d60 100644 { int status; -@@ -2291,7 +2291,7 @@ static struct unit_test_suite cryptodev_openssl_asym_testsuite = { +@@ -2291,7 +2294,7 @@ static struct unit_test_suite cryptodev_openssl_asym_testsuite = { TEST_CASE_ST(ut_setup_asym, ut_teardown_asym, test_capability), TEST_CASE_ST(ut_setup_asym, ut_teardown_asym, test_dsa), TEST_CASE_ST(ut_setup_asym, ut_teardown_asym, @@ -12544,6 +13635,19 @@ index b206db27ae..e40c29c23b 100644 goto err; /* to test error handling we can provide null pointers for source or dest in copies. This +diff --git a/dpdk/app/test/test_eal_flags.c b/dpdk/app/test/test_eal_flags.c +index d7f4c2cd47..8916ea5882 100644 +--- a/dpdk/app/test/test_eal_flags.c ++++ b/dpdk/app/test/test_eal_flags.c +@@ -583,7 +583,7 @@ test_missing_c_flag(void) + launch_proc(argv26) == 0 || launch_proc(argv27) == 0 || + launch_proc(argv28) == 0 || launch_proc(argv30) == 0) { + printf("Error - " +- "process ran without error with invalid --lcore flag\n"); ++ "process ran without error with invalid --lcores flag\n"); + return -1; + } + diff --git a/dpdk/app/test/test_efd.c b/dpdk/app/test/test_efd.c index 1b249e0447..c10c48cf37 100644 --- a/dpdk/app/test/test_efd.c @@ -12588,6 +13692,21 @@ index 3d7e9fb93c..ea14094f02 100644 ret = rte_vdev_init( RTE_STR(CRYPTODEV_NAME_NULL_PMD), NULL); +diff --git a/dpdk/app/test/test_event_eth_tx_adapter.c b/dpdk/app/test/test_event_eth_tx_adapter.c +index cfcc784351..a75f1bb51f 100644 +--- a/dpdk/app/test/test_event_eth_tx_adapter.c ++++ b/dpdk/app/test/test_event_eth_tx_adapter.c +@@ -471,6 +471,10 @@ tx_adapter_service(void) + int internal_port; + uint32_t cap; + ++ /* Initialize mbufs */ ++ for (i = 0; i < RING_SIZE; i++) ++ rte_pktmbuf_reset(&bufs[i]); ++ + memset(&dev_conf, 0, sizeof(dev_conf)); + err = rte_event_eth_tx_adapter_caps_get(TEST_DEV_ID, TEST_ETHDEV_ID, + &cap); diff --git a/dpdk/app/test/test_event_timer_adapter.c b/dpdk/app/test/test_event_timer_adapter.c index 25bac2d155..3ded1c1efa 100644 --- a/dpdk/app/test/test_event_timer_adapter.c @@ -13005,6 +14124,338 @@ index 25bac2d155..3ded1c1efa 100644 TEST_ASSERT_EQUAL(n, 0, "Dequeued unexpected timer expiry event\n"); rte_mempool_put(eventdev_test_mempool, evtim); +diff --git a/dpdk/app/test/test_eventdev.c b/dpdk/app/test/test_eventdev.c +index 843d9766b0..c4cbd84f27 100644 +--- a/dpdk/app/test/test_eventdev.c ++++ b/dpdk/app/test/test_eventdev.c +@@ -22,9 +22,15 @@ testsuite_setup(void) + uint8_t count; + count = rte_event_dev_count(); + if (!count) { ++ int ret; ++ + printf("Failed to find a valid event device," +- " testing with event_skeleton device\n"); +- return rte_vdev_init("event_skeleton", NULL); ++ " trying with event_skeleton device\n"); ++ ret = rte_vdev_init("event_skeleton", NULL); ++ if (ret != 0) { ++ printf("No event device, skipping\n"); ++ return TEST_SKIPPED; ++ } + } + return TEST_SUCCESS; + } +diff --git a/dpdk/app/test/test_fbarray.c b/dpdk/app/test/test_fbarray.c +index a691bf4458..8a3a3d77ab 100644 +--- a/dpdk/app/test/test_fbarray.c ++++ b/dpdk/app/test/test_fbarray.c +@@ -21,23 +21,41 @@ struct fbarray_testsuite_params { + }; + + static struct fbarray_testsuite_params param; ++static struct fbarray_testsuite_params unaligned; + + #define FBARRAY_TEST_ARR_NAME "fbarray_autotest" + #define FBARRAY_TEST_LEN 256 ++#define FBARRAY_UNALIGNED_TEST_ARR_NAME "fbarray_unaligned_autotest" ++#define FBARRAY_UNALIGNED_TEST_LEN 60 + #define FBARRAY_TEST_ELT_SZ (sizeof(int)) + + static int autotest_setup(void) + { +- return rte_fbarray_init(¶m.arr, FBARRAY_TEST_ARR_NAME, ++ int ret; ++ ++ ret = rte_fbarray_init(¶m.arr, FBARRAY_TEST_ARR_NAME, + FBARRAY_TEST_LEN, FBARRAY_TEST_ELT_SZ); ++ if (ret) { ++ printf("Failed to initialize test array\n"); ++ return -1; ++ } ++ ret = rte_fbarray_init(&unaligned.arr, FBARRAY_UNALIGNED_TEST_ARR_NAME, ++ FBARRAY_UNALIGNED_TEST_LEN, FBARRAY_TEST_ELT_SZ); ++ if (ret) { ++ printf("Failed to initialize unaligned test array\n"); ++ rte_fbarray_destroy(¶m.arr); ++ return -1; ++ } ++ return 0; + } + + static void autotest_teardown(void) + { + rte_fbarray_destroy(¶m.arr); ++ rte_fbarray_destroy(&unaligned.arr); + } + +-static int init_array(void) ++static int init_aligned(void) + { + int i; + for (i = param.start; i <= param.end; i++) { +@@ -47,11 +65,35 @@ static int init_array(void) + return 0; + } + +-static void reset_array(void) ++static int init_unaligned(void) ++{ ++ int i; ++ for (i = unaligned.start; i <= unaligned.end; i++) { ++ if (rte_fbarray_set_used(&unaligned.arr, i)) ++ return -1; ++ } ++ return 0; ++} ++ ++static void reset_aligned(void) + { + int i; + for (i = 0; i < FBARRAY_TEST_LEN; i++) + rte_fbarray_set_free(¶m.arr, i); ++ /* reset param as well */ ++ param.start = -1; ++ param.end = -1; ++} ++ ++static void reset_unaligned(void) ++{ ++ int i; ++ for (i = 0; i < FBARRAY_UNALIGNED_TEST_LEN; i++) ++ rte_fbarray_set_free(&unaligned.arr, i); ++ /* reset param as well */ ++ unaligned.start = -1; ++ unaligned.end = -1; ++ + } + + static int first_msk_test_setup(void) +@@ -59,7 +101,7 @@ static int first_msk_test_setup(void) + /* put all within first mask */ + param.start = 3; + param.end = 10; +- return init_array(); ++ return init_aligned(); + } + + static int cross_msk_test_setup(void) +@@ -67,7 +109,7 @@ static int cross_msk_test_setup(void) + /* put all within second and third mask */ + param.start = 70; + param.end = 160; +- return init_array(); ++ return init_aligned(); + } + + static int multi_msk_test_setup(void) +@@ -75,7 +117,7 @@ static int multi_msk_test_setup(void) + /* put all within first and last mask */ + param.start = 3; + param.end = FBARRAY_TEST_LEN - 20; +- return init_array(); ++ return init_aligned(); + } + + static int last_msk_test_setup(void) +@@ -83,7 +125,7 @@ static int last_msk_test_setup(void) + /* put all within last mask */ + param.start = FBARRAY_TEST_LEN - 20; + param.end = FBARRAY_TEST_LEN - 1; +- return init_array(); ++ return init_aligned(); + } + + static int full_msk_test_setup(void) +@@ -91,16 +133,31 @@ static int full_msk_test_setup(void) + /* fill entire mask */ + param.start = 0; + param.end = FBARRAY_TEST_LEN - 1; +- return init_array(); ++ return init_aligned(); + } + +-static int empty_msk_test_setup(void) ++static int lookahead_test_setup(void) + { +- /* do not fill anything in */ +- reset_array(); +- param.start = -1; +- param.end = -1; +- return 0; ++ /* set index 64 as used */ ++ param.start = 64; ++ param.end = 64; ++ return init_aligned(); ++} ++ ++static int lookbehind_test_setup(void) ++{ ++ /* set index 63 as used */ ++ param.start = 63; ++ param.end = 63; ++ return init_aligned(); ++} ++ ++static int unaligned_test_setup(void) ++{ ++ unaligned.start = 0; ++ /* leave one free bit at the end */ ++ unaligned.end = FBARRAY_UNALIGNED_TEST_LEN - 2; ++ return init_unaligned(); + } + + static int test_invalid(void) +@@ -454,7 +511,7 @@ static int test_basic(void) + if (check_free()) + return TEST_FAILED; + +- reset_array(); ++ reset_aligned(); + + return TEST_SUCCESS; + } +@@ -697,6 +754,26 @@ static int test_find(void) + return TEST_SUCCESS; + } + ++static int test_find_unaligned(void) ++{ ++ TEST_ASSERT_EQUAL((int)unaligned.arr.count, unaligned.end - unaligned.start + 1, ++ "Wrong element count\n"); ++ /* ensure space is free before start */ ++ if (ensure_correct(&unaligned.arr, 0, unaligned.start - 1, false)) ++ return TEST_FAILED; ++ /* ensure space is occupied where it's supposed to be */ ++ if (ensure_correct(&unaligned.arr, unaligned.start, unaligned.end, true)) ++ return TEST_FAILED; ++ /* ensure space after end is free as well */ ++ if (ensure_correct(&unaligned.arr, unaligned.end + 1, FBARRAY_UNALIGNED_TEST_LEN - 1, ++ false)) ++ return TEST_FAILED; ++ /* test if find_biggest API's work correctly */ ++ if (test_biggest(&unaligned.arr, unaligned.start, unaligned.end)) ++ return TEST_FAILED; ++ return TEST_SUCCESS; ++} ++ + static int test_empty(void) + { + TEST_ASSERT_EQUAL((int)param.arr.count, 0, "Wrong element count\n"); +@@ -709,6 +786,87 @@ static int test_empty(void) + return TEST_SUCCESS; + } + ++static int test_lookahead(void) ++{ ++ int ret; ++ ++ /* run regular test first */ ++ ret = test_find(); ++ if (ret != TEST_SUCCESS) ++ return ret; ++ ++ /* test if we can find free chunk while not starting with 0 */ ++ TEST_ASSERT_EQUAL(rte_fbarray_find_next_n_free(¶m.arr, 1, param.start), ++ param.start + 1, "Free chunk index is wrong\n"); ++ return TEST_SUCCESS; ++} ++ ++static int test_lookbehind(void) ++{ ++ int ret, free_len = 2; ++ ++ /* run regular test first */ ++ ret = test_find(); ++ if (ret != TEST_SUCCESS) ++ return ret; ++ ++ /* test if we can find free chunk while crossing mask boundary */ ++ TEST_ASSERT_EQUAL(rte_fbarray_find_prev_n_free(¶m.arr, param.start + 1, free_len), ++ param.start - free_len, "Free chunk index is wrong\n"); ++ return TEST_SUCCESS; ++} ++ ++static int test_lookahead_mask(void) ++{ ++ /* ++ * There is a certain type of lookahead behavior we want to test here, ++ * namely masking of bits that were scanned with lookahead but that we ++ * know do not match our criteria. This is achieved in following steps: ++ * ++ * 0. Look for a big enough chunk of free space (say, 62 elements) ++ * 1. Trigger lookahead by breaking a run somewhere inside mask 0 ++ * (indices 0-63) ++ * 2. Fail lookahead by breaking the run somewhere inside mask 1 ++ * (indices 64-127) ++ * 3. Ensure that we can still find free space in mask 1 afterwards ++ */ ++ ++ /* break run on first mask */ ++ rte_fbarray_set_used(¶m.arr, 61); ++ /* break run on second mask */ ++ rte_fbarray_set_used(¶m.arr, 70); ++ ++ /* we expect to find free space at 71 */ ++ TEST_ASSERT_EQUAL(rte_fbarray_find_next_n_free(¶m.arr, 0, 62), ++ 71, "Free chunk index is wrong\n"); ++ return TEST_SUCCESS; ++} ++ ++static int test_lookbehind_mask(void) ++{ ++ /* ++ * There is a certain type of lookbehind behavior we want to test here, ++ * namely masking of bits that were scanned with lookbehind but that we ++ * know do not match our criteria. This is achieved in two steps: ++ * ++ * 0. Look for a big enough chunk of free space (say, 62 elements) ++ * 1. Trigger lookbehind by breaking a run somewhere inside mask 2 ++ * (indices 128-191) ++ * 2. Fail lookbehind by breaking the run somewhere inside mask 1 ++ * (indices 64-127) ++ * 3. Ensure that we can still find free space in mask 1 afterwards ++ */ ++ ++ /* break run on mask 2 */ ++ rte_fbarray_set_used(¶m.arr, 130); ++ /* break run on mask 1 */ ++ rte_fbarray_set_used(¶m.arr, 70); ++ ++ /* start from 190, we expect to find free space at 8 */ ++ TEST_ASSERT_EQUAL(rte_fbarray_find_prev_n_free(¶m.arr, 190, 62), ++ 8, "Free chunk index is wrong\n"); ++ return TEST_SUCCESS; ++} + + static struct unit_test_suite fbarray_test_suite = { + .suite_name = "fbarray autotest", +@@ -717,12 +875,19 @@ static struct unit_test_suite fbarray_test_suite = { + .unit_test_cases = { + TEST_CASE(test_invalid), + TEST_CASE(test_basic), +- TEST_CASE_ST(first_msk_test_setup, reset_array, test_find), +- TEST_CASE_ST(cross_msk_test_setup, reset_array, test_find), +- TEST_CASE_ST(multi_msk_test_setup, reset_array, test_find), +- TEST_CASE_ST(last_msk_test_setup, reset_array, test_find), +- TEST_CASE_ST(full_msk_test_setup, reset_array, test_find), +- TEST_CASE_ST(empty_msk_test_setup, reset_array, test_empty), ++ TEST_CASE_ST(first_msk_test_setup, reset_aligned, test_find), ++ TEST_CASE_ST(cross_msk_test_setup, reset_aligned, test_find), ++ TEST_CASE_ST(multi_msk_test_setup, reset_aligned, test_find), ++ TEST_CASE_ST(last_msk_test_setup, reset_aligned, test_find), ++ TEST_CASE_ST(full_msk_test_setup, reset_aligned, test_find), ++ /* empty test does not need setup */ ++ TEST_CASE_ST(NULL, reset_aligned, test_empty), ++ TEST_CASE_ST(lookahead_test_setup, reset_aligned, test_lookahead), ++ TEST_CASE_ST(lookbehind_test_setup, reset_aligned, test_lookbehind), ++ /* setup for these tests is more complex so do it in test func */ ++ TEST_CASE_ST(NULL, reset_aligned, test_lookahead_mask), ++ TEST_CASE_ST(NULL, reset_aligned, test_lookbehind_mask), ++ TEST_CASE_ST(unaligned_test_setup, reset_unaligned, test_find_unaligned), + TEST_CASES_END() + } + }; diff --git a/dpdk/app/test/test_fib_perf.c b/dpdk/app/test/test_fib_perf.c index 86b2f832b8..7a25fe8df7 100644 --- a/dpdk/app/test/test_fib_perf.c @@ -13533,7 +14984,7 @@ index 6d9249f831..9008038bfa 100644 } rte_free(mem); diff --git a/dpdk/app/test/test_mbuf.c b/dpdk/app/test/test_mbuf.c -index f54d1d7c00..9c0ac63f92 100644 +index f54d1d7c00..35c0989add 100644 --- a/dpdk/app/test/test_mbuf.c +++ b/dpdk/app/test/test_mbuf.c @@ -1172,37 +1172,16 @@ test_refcnt_mbuf(void) @@ -13654,7 +15105,25 @@ index f54d1d7c00..9c0ac63f92 100644 for (off = 0; off < MBUF_TEST_DATA_LEN2; off++) { if (data_copy[off] != (char)0xcc) GOTO_FAIL("Data corrupted at offset %u", off); -@@ -2749,6 +2724,7 @@ test_nb_segs_and_next_reset(void) +@@ -2372,16 +2347,13 @@ test_pktmbuf_ext_shinfo_init_helper(struct rte_mempool *pktmbuf_pool) + GOTO_FAIL("%s: External buffer is not attached to mbuf\n", + __func__); + +- /* allocate one more mbuf */ ++ /* allocate one more mbuf, it is attached to the same external buffer */ + clone = rte_pktmbuf_clone(m, pktmbuf_pool); + if (clone == NULL) + GOTO_FAIL("%s: mbuf clone allocation failed!\n", __func__); + if (rte_pktmbuf_pkt_len(clone) != 0) + GOTO_FAIL("%s: Bad packet length\n", __func__); + +- /* attach the same external buffer to the cloned mbuf */ +- rte_pktmbuf_attach_extbuf(clone, ext_buf_addr, buf_iova, buf_len, +- ret_shinfo); + if (clone->ol_flags != RTE_MBUF_F_EXTERNAL) + GOTO_FAIL("%s: External buffer is not attached to mbuf\n", + __func__); +@@ -2749,6 +2721,7 @@ test_nb_segs_and_next_reset(void) /* split m0 chain in two, between m1 and m2 */ m0->nb_segs = 2; @@ -13662,7 +15131,7 @@ index f54d1d7c00..9c0ac63f92 100644 m1->next = NULL; m2->nb_segs = 1; -@@ -2769,6 +2745,7 @@ test_nb_segs_and_next_reset(void) +@@ -2769,6 +2742,7 @@ test_nb_segs_and_next_reset(void) m2->nb_segs != 1 || m2->next != NULL) GOTO_FAIL("nb_segs or next was not reset properly"); @@ -13904,6 +15373,19 @@ index 0aa9dc1b1c..4094057b27 100644 printf("Total packets inject to prime ports = %u\n", idx); packets_per_second = (link_mbps * 1000 * 1000) / +diff --git a/dpdk/app/test/test_power.c b/dpdk/app/test/test_power.c +index b7b5561348..a1b32adf58 100644 +--- a/dpdk/app/test/test_power.c ++++ b/dpdk/app/test/test_power.c +@@ -142,7 +142,7 @@ test_power(void) + /* Test setting a valid environment */ + ret = rte_power_set_env(envs[i]); + if (ret != 0) { +- printf("Unexpectedly unsucceeded on setting a valid environment\n"); ++ printf("Unexpectedly unsuccessful on setting a valid environment\n"); + return -1; + } + diff --git a/dpdk/app/test/test_power_cpufreq.c b/dpdk/app/test/test_power_cpufreq.c index 1a9549527e..4d013cd7bb 100644 --- a/dpdk/app/test/test_power_cpufreq.c @@ -14283,11 +15765,59 @@ index 22ea0ba375..e1c600e40f 100644 error('missing python module: @0@'.format(module)) endif endforeach +diff --git a/dpdk/buildtools/pmdinfogen.py b/dpdk/buildtools/pmdinfogen.py +index 2a44f17bda..dfb89500c0 100755 +--- a/dpdk/buildtools/pmdinfogen.py ++++ b/dpdk/buildtools/pmdinfogen.py +@@ -6,6 +6,7 @@ + import argparse + import ctypes + import json ++import re + import sys + import tempfile + +@@ -66,11 +67,11 @@ def _get_symbol_by_name(self, name): + return [symbol] + return None + +- def find_by_prefix(self, prefix): +- prefix = prefix.encode("utf-8") if self._legacy_elftools else prefix ++ def find_by_pattern(self, pattern): ++ pattern = pattern.encode("utf-8") if self._legacy_elftools else pattern + for i in range(self._symtab.num_symbols()): + symbol = self._symtab.get_symbol(i) +- if symbol.name.startswith(prefix): ++ if re.match(pattern, symbol.name): + yield ELFSymbol(self._image, symbol) + + +@@ -97,9 +98,9 @@ def __init__(self, data): + def is_big_endian(self): + return False + +- def find_by_prefix(self, prefix): ++ def find_by_pattern(self, pattern): + for symbol in self._image.symbols: +- if symbol.name.startswith(prefix): ++ if re.match(pattern, symbol.name): + yield COFFSymbol(self._image, symbol) + + def find_by_name(self, name): +@@ -199,7 +200,7 @@ def dump(self, file): + + def load_drivers(image): + drivers = [] +- for symbol in image.find_by_prefix("this_pmd_name"): ++ for symbol in image.find_by_pattern("^this_pmd_name[0-9]+$"): + drivers.append(Driver.load(image, symbol)) + return drivers + diff --git a/dpdk/config/arm/arm32_armv8_linux_gcc b/dpdk/config/arm/arm32_armv8_linux_gcc -index 89f8a12881..0d4618ea4a 100644 +index 89f8a12881..687904782e 100644 --- a/dpdk/config/arm/arm32_armv8_linux_gcc +++ b/dpdk/config/arm/arm32_armv8_linux_gcc -@@ -1,6 +1,6 @@ +@@ -1,9 +1,10 @@ [binaries] c = 'arm-linux-gnueabihf-gcc' -cpp = 'arm-linux-gnueabihf-cpp' @@ -14295,11 +15825,15 @@ index 89f8a12881..0d4618ea4a 100644 ar = 'arm-linux-gnueabihf-gcc-ar' strip = 'arm-linux-gnueabihf-strip' pkgconfig = 'arm-linux-gnueabihf-pkg-config' ++pkg-config = 'arm-linux-gnueabihf-pkg-config' + pcap-config = '' + + [host_machine] diff --git a/dpdk/config/arm/arm64_armada_linux_gcc b/dpdk/config/arm/arm64_armada_linux_gcc -index 301418949b..5043b82651 100644 +index 301418949b..64d702ce00 100644 --- a/dpdk/config/arm/arm64_armada_linux_gcc +++ b/dpdk/config/arm/arm64_armada_linux_gcc -@@ -1,6 +1,6 @@ +@@ -1,10 +1,11 @@ [binaries] c = 'aarch64-linux-gnu-gcc' -cpp = 'aarch64-linux-gnu-cpp' @@ -14307,12 +15841,17 @@ index 301418949b..5043b82651 100644 ar = 'aarch64-linux-gnu-ar' as = 'aarch64-linux-gnu-as' strip = 'aarch64-linux-gnu-strip' + pkgconfig = 'aarch64-linux-gnu-pkg-config' ++pkg-config = 'aarch64-linux-gnu-pkg-config' + pcap-config = '' + + [host_machine] diff --git a/dpdk/config/arm/arm64_armv8_linux_clang_ubuntu b/dpdk/config/arm/arm64_armv8_linux_clang_ubuntu new file mode 100644 -index 0000000000..db488d75f4 +index 0000000000..e857774601 --- /dev/null +++ b/dpdk/config/arm/arm64_armv8_linux_clang_ubuntu -@@ -0,0 +1,19 @@ +@@ -0,0 +1,20 @@ +[binaries] +c = 'clang' +cpp = 'clang++' @@ -14321,6 +15860,7 @@ index 0000000000..db488d75f4 +llvm-config = 'llvm-config' +pcap-config = 'llvm-config' +pkgconfig = 'aarch64-linux-gnu-pkg-config' ++pkg-config = 'aarch64-linux-gnu-pkg-config' + +[host_machine] +system = 'linux' @@ -14366,10 +15906,10 @@ index 0000000000..8e2e3fa9c0 +arm64_armv8_linux_clang_ubuntu \ No newline at end of file diff --git a/dpdk/config/arm/arm64_armv8_linux_gcc b/dpdk/config/arm/arm64_armv8_linux_gcc -index 5391d35389..5c32f6b9ca 100644 +index 5391d35389..b0654f39eb 100644 --- a/dpdk/config/arm/arm64_armv8_linux_gcc +++ b/dpdk/config/arm/arm64_armv8_linux_gcc -@@ -1,6 +1,6 @@ +@@ -1,9 +1,10 @@ [binaries] c = 'aarch64-linux-gnu-gcc' -cpp = 'aarch64-linux-gnu-cpp' @@ -14377,11 +15917,15 @@ index 5391d35389..5c32f6b9ca 100644 ar = 'aarch64-linux-gnu-gcc-ar' strip = 'aarch64-linux-gnu-strip' pkgconfig = 'aarch64-linux-gnu-pkg-config' ++pkg-config = 'aarch64-linux-gnu-pkg-config' + pcap-config = '' + + [host_machine] diff --git a/dpdk/config/arm/arm64_bluefield_linux_gcc b/dpdk/config/arm/arm64_bluefield_linux_gcc -index 248a9f031a..df6eccc046 100644 +index 248a9f031a..347864deeb 100644 --- a/dpdk/config/arm/arm64_bluefield_linux_gcc +++ b/dpdk/config/arm/arm64_bluefield_linux_gcc -@@ -1,6 +1,6 @@ +@@ -1,9 +1,10 @@ [binaries] c = 'aarch64-linux-gnu-gcc' -cpp = 'aarch64-linux-gnu-cpp' @@ -14389,11 +15933,15 @@ index 248a9f031a..df6eccc046 100644 ar = 'aarch64-linux-gnu-gcc-ar' strip = 'aarch64-linux-gnu-strip' pkgconfig = 'aarch64-linux-gnu-pkg-config' ++pkg-config = 'aarch64-linux-gnu-pkg-config' + pcap-config = '' + + [host_machine] diff --git a/dpdk/config/arm/arm64_centriq2400_linux_gcc b/dpdk/config/arm/arm64_centriq2400_linux_gcc -index dfe9110331..ddffc0503a 100644 +index dfe9110331..bdc1e590e2 100644 --- a/dpdk/config/arm/arm64_centriq2400_linux_gcc +++ b/dpdk/config/arm/arm64_centriq2400_linux_gcc -@@ -1,6 +1,6 @@ +@@ -1,9 +1,10 @@ [binaries] c = 'aarch64-linux-gnu-gcc' -cpp = 'aarch64-linux-gnu-cpp' @@ -14401,11 +15949,15 @@ index dfe9110331..ddffc0503a 100644 ar = 'aarch64-linux-gnu-gcc-ar' strip = 'aarch64-linux-gnu-strip' pkgconfig = 'aarch64-linux-gnu-pkg-config' ++pkg-config = 'aarch64-linux-gnu-pkg-config' + pcap-config = '' + + [host_machine] diff --git a/dpdk/config/arm/arm64_cn10k_linux_gcc b/dpdk/config/arm/arm64_cn10k_linux_gcc -index 88e5f10945..19068f0ec9 100644 +index 88e5f10945..7834d8dbac 100644 --- a/dpdk/config/arm/arm64_cn10k_linux_gcc +++ b/dpdk/config/arm/arm64_cn10k_linux_gcc -@@ -1,6 +1,6 @@ +@@ -1,9 +1,10 @@ [binaries] c = 'aarch64-linux-gnu-gcc' -cpp = 'aarch64-linux-gnu-cpp' @@ -14413,11 +15965,15 @@ index 88e5f10945..19068f0ec9 100644 ar = 'aarch64-linux-gnu-gcc-ar' strip = 'aarch64-linux-gnu-strip' pkgconfig = 'aarch64-linux-gnu-pkg-config' ++pkg-config = 'aarch64-linux-gnu-pkg-config' + pcap-config = '' + + [host_machine] diff --git a/dpdk/config/arm/arm64_dpaa_linux_gcc b/dpdk/config/arm/arm64_dpaa_linux_gcc -index e9d5fd31fc..70df99fb02 100644 +index e9d5fd31fc..eeaff59161 100644 --- a/dpdk/config/arm/arm64_dpaa_linux_gcc +++ b/dpdk/config/arm/arm64_dpaa_linux_gcc -@@ -1,6 +1,6 @@ +@@ -1,10 +1,11 @@ [binaries] c = 'aarch64-linux-gnu-gcc' -cpp = 'aarch64-linux-gnu-cpp' @@ -14425,11 +15981,16 @@ index e9d5fd31fc..70df99fb02 100644 ar = 'aarch64-linux-gnu-ar' as = 'aarch64-linux-gnu-as' strip = 'aarch64-linux-gnu-strip' + pkgconfig = 'aarch64-linux-gnu-pkg-config' ++pkg-config = 'aarch64-linux-gnu-pkg-config' + pcap-config = '' + + [host_machine] diff --git a/dpdk/config/arm/arm64_emag_linux_gcc b/dpdk/config/arm/arm64_emag_linux_gcc -index 9cdd931180..06f5eaecd0 100644 +index 9cdd931180..d7e58c37c1 100644 --- a/dpdk/config/arm/arm64_emag_linux_gcc +++ b/dpdk/config/arm/arm64_emag_linux_gcc -@@ -1,6 +1,6 @@ +@@ -1,9 +1,10 @@ [binaries] c = 'aarch64-linux-gnu-gcc' -cpp = 'aarch64-linux-gnu-cpp' @@ -14437,11 +15998,15 @@ index 9cdd931180..06f5eaecd0 100644 ar = 'aarch64-linux-gnu-gcc-ar' strip = 'aarch64-linux-gnu-strip' pkgconfig = 'aarch64-linux-gnu-pkg-config' ++pkg-config = 'aarch64-linux-gnu-pkg-config' + pcap-config = '' + + [host_machine] diff --git a/dpdk/config/arm/arm64_graviton2_linux_gcc b/dpdk/config/arm/arm64_graviton2_linux_gcc -index 8016fd236c..24b2dbcca8 100644 +index 8016fd236c..75f79198e4 100644 --- a/dpdk/config/arm/arm64_graviton2_linux_gcc +++ b/dpdk/config/arm/arm64_graviton2_linux_gcc -@@ -1,6 +1,6 @@ +@@ -1,9 +1,10 @@ [binaries] c = 'aarch64-linux-gnu-gcc' -cpp = 'aarch64-linux-gnu-cpp' @@ -14449,11 +16014,15 @@ index 8016fd236c..24b2dbcca8 100644 ar = 'aarch64-linux-gnu-gcc-ar' strip = 'aarch64-linux-gnu-strip' pkgconfig = 'aarch64-linux-gnu-pkg-config' ++pkg-config = 'aarch64-linux-gnu-pkg-config' + pcap-config = '' + + [host_machine] diff --git a/dpdk/config/arm/arm64_kunpeng920_linux_gcc b/dpdk/config/arm/arm64_kunpeng920_linux_gcc -index c4685b2458..4a71531e3e 100644 +index c4685b2458..5671b66f11 100644 --- a/dpdk/config/arm/arm64_kunpeng920_linux_gcc +++ b/dpdk/config/arm/arm64_kunpeng920_linux_gcc -@@ -1,6 +1,6 @@ +@@ -1,9 +1,10 @@ [binaries] c = 'aarch64-linux-gnu-gcc' -cpp = 'aarch64-linux-gnu-cpp' @@ -14461,11 +16030,15 @@ index c4685b2458..4a71531e3e 100644 ar = 'aarch64-linux-gnu-gcc-ar' strip = 'aarch64-linux-gnu-strip' pkgconfig = 'aarch64-linux-gnu-pkg-config' ++pkg-config = 'aarch64-linux-gnu-pkg-config' + pcap-config = '' + + [host_machine] diff --git a/dpdk/config/arm/arm64_kunpeng930_linux_gcc b/dpdk/config/arm/arm64_kunpeng930_linux_gcc -index fb85d2d710..383f0b0313 100644 +index fb85d2d710..9ead4b12a3 100644 --- a/dpdk/config/arm/arm64_kunpeng930_linux_gcc +++ b/dpdk/config/arm/arm64_kunpeng930_linux_gcc -@@ -1,6 +1,6 @@ +@@ -1,9 +1,10 @@ [binaries] c = 'aarch64-linux-gnu-gcc' -cpp = 'aarch64-linux-gnu-cpp' @@ -14473,11 +16046,15 @@ index fb85d2d710..383f0b0313 100644 ar = 'aarch64-linux-gnu-gcc-ar' strip = 'aarch64-linux-gnu-strip' pkgconfig = 'aarch64-linux-gnu-pkg-config' ++pkg-config = 'aarch64-linux-gnu-pkg-config' + pcap-config = '' + + [host_machine] diff --git a/dpdk/config/arm/arm64_n1sdp_linux_gcc b/dpdk/config/arm/arm64_n1sdp_linux_gcc -index 0df283e2f4..5f6356caa2 100644 +index 0df283e2f4..3432ead3b3 100644 --- a/dpdk/config/arm/arm64_n1sdp_linux_gcc +++ b/dpdk/config/arm/arm64_n1sdp_linux_gcc -@@ -1,6 +1,6 @@ +@@ -1,9 +1,10 @@ [binaries] c = 'aarch64-linux-gnu-gcc' -cpp = 'aarch64-linux-gnu-cpp' @@ -14485,11 +16062,15 @@ index 0df283e2f4..5f6356caa2 100644 ar = 'aarch64-linux-gnu-gcc-ar' strip = 'aarch64-linux-gnu-strip' pkgconfig = 'aarch64-linux-gnu-pkg-config' ++pkg-config = 'aarch64-linux-gnu-pkg-config' + pcap-config = '' + + [host_machine] diff --git a/dpdk/config/arm/arm64_n2_linux_gcc b/dpdk/config/arm/arm64_n2_linux_gcc -index 036aee2b0a..82806ba780 100644 +index 036aee2b0a..f02780d92a 100644 --- a/dpdk/config/arm/arm64_n2_linux_gcc +++ b/dpdk/config/arm/arm64_n2_linux_gcc -@@ -1,6 +1,6 @@ +@@ -1,9 +1,10 @@ [binaries] c = 'aarch64-linux-gnu-gcc' -cpp = 'aarch64-linux-gnu-cpp' @@ -14497,11 +16078,15 @@ index 036aee2b0a..82806ba780 100644 ar = 'aarch64-linux-gnu-gcc-ar' strip = 'aarch64-linux-gnu-strip' pkgconfig = 'aarch64-linux-gnu-pkg-config' ++pkg-config = 'aarch64-linux-gnu-pkg-config' + pcap-config = '' + + [host_machine] diff --git a/dpdk/config/arm/arm64_octeontx2_linux_gcc b/dpdk/config/arm/arm64_octeontx2_linux_gcc -index 8fbdd3868d..d23b6527ef 100644 +index 8fbdd3868d..a8540869b8 100644 --- a/dpdk/config/arm/arm64_octeontx2_linux_gcc +++ b/dpdk/config/arm/arm64_octeontx2_linux_gcc -@@ -1,6 +1,6 @@ +@@ -1,9 +1,10 @@ [binaries] c = 'aarch64-linux-gnu-gcc' -cpp = 'aarch64-linux-gnu-cpp' @@ -14509,11 +16094,15 @@ index 8fbdd3868d..d23b6527ef 100644 ar = 'aarch64-linux-gnu-gcc-ar' strip = 'aarch64-linux-gnu-strip' pkgconfig = 'aarch64-linux-gnu-pkg-config' ++pkg-config = 'aarch64-linux-gnu-pkg-config' + pcap-config = '' + + [host_machine] diff --git a/dpdk/config/arm/arm64_stingray_linux_gcc b/dpdk/config/arm/arm64_stingray_linux_gcc -index 319a4a151d..cf98337f0f 100644 +index 319a4a151d..4e9ebc55fd 100644 --- a/dpdk/config/arm/arm64_stingray_linux_gcc +++ b/dpdk/config/arm/arm64_stingray_linux_gcc -@@ -1,6 +1,6 @@ +@@ -1,9 +1,10 @@ [binaries] c = 'aarch64-linux-gnu-gcc' -cpp = 'aarch64-linux-gnu-cpp' @@ -14521,11 +16110,15 @@ index 319a4a151d..cf98337f0f 100644 ar = 'aarch64-linux-gnu-gcc-ar' strip = 'aarch64-linux-gnu-strip' pkgconfig = 'aarch64-linux-gnu-pkg-config' ++pkg-config = 'aarch64-linux-gnu-pkg-config' + pcap-config = '' + + [host_machine] diff --git a/dpdk/config/arm/arm64_thunderx2_linux_gcc b/dpdk/config/arm/arm64_thunderx2_linux_gcc -index 69c71cbc82..616f6c263a 100644 +index 69c71cbc82..8c4ee7791f 100644 --- a/dpdk/config/arm/arm64_thunderx2_linux_gcc +++ b/dpdk/config/arm/arm64_thunderx2_linux_gcc -@@ -1,6 +1,6 @@ +@@ -1,9 +1,10 @@ [binaries] c = 'aarch64-linux-gnu-gcc' -cpp = 'aarch64-linux-gnu-cpp' @@ -14533,11 +16126,15 @@ index 69c71cbc82..616f6c263a 100644 ar = 'aarch64-linux-gnu-gcc-ar' strip = 'aarch64-linux-gnu-strip' pkgconfig = 'aarch64-linux-gnu-pkg-config' ++pkg-config = 'aarch64-linux-gnu-pkg-config' + pcap-config = '' + + [host_machine] diff --git a/dpdk/config/arm/arm64_thunderxt88_linux_gcc b/dpdk/config/arm/arm64_thunderxt88_linux_gcc -index 372097ba01..131f56465a 100644 +index 372097ba01..9f2af4c873 100644 --- a/dpdk/config/arm/arm64_thunderxt88_linux_gcc +++ b/dpdk/config/arm/arm64_thunderxt88_linux_gcc -@@ -1,6 +1,6 @@ +@@ -1,9 +1,10 @@ [binaries] c = 'aarch64-linux-gnu-gcc' -cpp = 'aarch64-linux-gnu-cpp' @@ -14545,6 +16142,10 @@ index 372097ba01..131f56465a 100644 ar = 'aarch64-linux-gnu-gcc-ar' strip = 'aarch64-linux-gnu-strip' pkgconfig = 'aarch64-linux-gnu-pkg-config' ++pkg-config = 'aarch64-linux-gnu-pkg-config' + pcap-config = '' + + [host_machine] diff --git a/dpdk/config/arm/meson.build b/dpdk/config/arm/meson.build index 213324d262..4131d6e227 100644 --- a/dpdk/config/arm/meson.build @@ -14651,7 +16252,7 @@ index 213324d262..4131d6e227 100644 else warning('Configuration compiler option ' + diff --git a/dpdk/config/meson.build b/dpdk/config/meson.build -index 805d5d51d0..a79a3ed39c 100644 +index 805d5d51d0..e4b19db323 100644 --- a/dpdk/config/meson.build +++ b/dpdk/config/meson.build @@ -22,7 +22,8 @@ is_ms_linker = is_windows and (cc.get_id() == 'clang') @@ -14664,7 +16265,27 @@ index 805d5d51d0..a79a3ed39c 100644 # Libraries have the abi_version as the filename extension # and have the soname be all but the final part of the abi_version. -@@ -136,7 +137,7 @@ endif +@@ -90,13 +91,14 @@ else + cpu_instruction_set = 'generic' + endif + endif ++ if platform == 'native' ++ if cpu_instruction_set == 'auto' ++ cpu_instruction_set = 'native' ++ endif ++ endif + endif + +-if platform == 'native' +- if cpu_instruction_set == 'auto' +- cpu_instruction_set = 'native' +- endif +-elif platform == 'generic' ++if platform == 'generic' + if cpu_instruction_set == 'auto' + cpu_instruction_set = 'generic' + endif +@@ -136,13 +138,16 @@ endif toolchain = cc.get_id() dpdk_conf.set_quoted('RTE_TOOLCHAIN', toolchain) @@ -14673,7 +16294,16 @@ index 805d5d51d0..a79a3ed39c 100644 dpdk_conf.set('RTE_ARCH_64', cc.sizeof('void *') == 8) dpdk_conf.set('RTE_ARCH_32', cc.sizeof('void *') == 4) -@@ -188,7 +189,7 @@ if find_libnuma + + if not is_windows + add_project_link_arguments('-Wl,--no-as-needed', language: 'c') ++ if cc.has_link_argument('-Wl,--undefined-version') ++ add_project_link_arguments('-Wl,--undefined-version', language: 'c') ++ endif + endif + + # use pthreads if available for the platform +@@ -188,7 +193,7 @@ if find_libnuma endif has_libfdt = 0 @@ -14682,7 +16312,7 @@ index 805d5d51d0..a79a3ed39c 100644 if fdt_dep.found() and cc.has_header('fdt.h') dpdk_conf.set10('RTE_HAS_LIBFDT', true) has_libfdt = 1 -@@ -196,11 +197,12 @@ if fdt_dep.found() and cc.has_header('fdt.h') +@@ -196,11 +201,12 @@ if fdt_dep.found() and cc.has_header('fdt.h') dpdk_extra_ldflags += '-lfdt' endif @@ -14697,7 +16327,7 @@ index 805d5d51d0..a79a3ed39c 100644 libarchive = dependency('libarchive', required: false, method: 'pkg-config') if libarchive.found() -@@ -334,7 +336,7 @@ if max_lcores == 'detect' +@@ -334,7 +340,7 @@ if max_lcores == 'detect' error('Discovery of max_lcores is not supported for cross-compilation.') endif # overwrite the default value with discovered values @@ -14772,6 +16402,18 @@ index cab4390a97..2f1a3ffb21 100644 #define RTE_MAX_VFIO_CONTAINERS 64 /* bsd module defines */ +diff --git a/dpdk/config/x86/cross-mingw b/dpdk/config/x86/cross-mingw +index 09f7af0928..fab1a7061c 100644 +--- a/dpdk/config/x86/cross-mingw ++++ b/dpdk/config/x86/cross-mingw +@@ -5,6 +5,7 @@ ld = 'x86_64-w64-mingw32-ld' + ar = 'x86_64-w64-mingw32-ar' + strip = 'x86_64-w64-mingw32-strip' + pkgconfig = 'x86_64-w64-mingw32-pkg-config' ++pkg-config = 'x86_64-w64-mingw32-pkg-config' + objdump = 'x86_64-w64-mingw32-objdump' + + [host_machine] diff --git a/dpdk/config/x86/meson.build b/dpdk/config/x86/meson.build index e25ed316f4..54345c4da3 100644 --- a/dpdk/config/x86/meson.build @@ -15560,6 +17202,38 @@ index 7e2b429ac8..5c25b92092 100644 cdata.set('WARN_AS_ERROR', 'NO') if get_option('werror') cdata.set('WARN_AS_ERROR', 'YES') +diff --git a/dpdk/doc/guides/bbdevs/fpga_5gnr_fec.rst b/dpdk/doc/guides/bbdevs/fpga_5gnr_fec.rst +index 9d71585e9e..950c5dfb5a 100644 +--- a/dpdk/doc/guides/bbdevs/fpga_5gnr_fec.rst ++++ b/dpdk/doc/guides/bbdevs/fpga_5gnr_fec.rst +@@ -165,7 +165,6 @@ parameters defined in ``rte_fpga_5gnr_fec_conf`` structure: + uint8_t dl_bandwidth; + uint8_t ul_load_balance; + uint8_t dl_load_balance; +- uint16_t flr_time_out; + }; + + - ``pf_mode_en``: identifies whether only PF is to be used, or the VFs. PF and +@@ -191,10 +190,6 @@ parameters defined in ``rte_fpga_5gnr_fec_conf`` structure: + If all hardware queues exceeds the watermark, no code blocks will be + streamed in from UL/DL code block FIFO. + +-- ``flr_time_out``: specifies how many 16.384us to be FLR time out. The +- time_out = flr_time_out x 16.384us. For instance, if you want to set 10ms for +- the FLR time out then set this setting to 0x262=610. +- + + An example configuration code calling the function ``rte_fpga_5gnr_fec_configure()`` is shown + below: +@@ -219,7 +214,7 @@ below: + /* setup FPGA PF */ + ret = rte_fpga_5gnr_fec_configure(info->dev_name, &conf); + TEST_ASSERT_SUCCESS(ret, +- "Failed to configure 4G FPGA PF for bbdev %s", ++ "Failed to configure 5GNR FPGA PF for bbdev %s", + info->dev_name); + + diff --git a/dpdk/doc/guides/compressdevs/mlx5.rst b/dpdk/doc/guides/compressdevs/mlx5.rst index a4e17f65b3..7f2d6bdfff 100644 --- a/dpdk/doc/guides/compressdevs/mlx5.rst @@ -15722,6 +17396,31 @@ index e86a6205e8..9936556cc9 100644 Linux Prerequisites ~~~~~~~~~~~~~~~~~~~ +diff --git a/dpdk/doc/guides/cryptodevs/overview.rst b/dpdk/doc/guides/cryptodevs/overview.rst +index d754b0cfc6..b068d0d19c 100644 +--- a/dpdk/doc/guides/cryptodevs/overview.rst ++++ b/dpdk/doc/guides/cryptodevs/overview.rst +@@ -20,17 +20,17 @@ Supported Feature Flags + - "OOP SGL In SGL Out" feature flag stands for + "Out-of-place Scatter-gather list Input, Scatter-gather list Output", + which means PMD supports different scatter-gather styled input and output buffers +- (i.e. both can consists of multiple segments). ++ (i.e. both can consist of multiple segments). + + - "OOP SGL In LB Out" feature flag stands for + "Out-of-place Scatter-gather list Input, Linear Buffers Output", +- which means PMD supports input from scatter-gathered styled buffers, ++ which means PMD supports input from scatter-gather styled buffers, + outputting linear buffers (i.e. single segment). + + - "OOP LB In SGL Out" feature flag stands for + "Out-of-place Linear Buffers Input, Scatter-gather list Output", + which means PMD supports input from linear buffer, outputting +- scatter-gathered styled buffers. ++ scatter-gather styled buffers. + + - "OOP LB In LB Out" feature flag stands for + "Out-of-place Linear Buffers Input, Linear Buffers Output", diff --git a/dpdk/doc/guides/dmadevs/hisilicon.rst b/dpdk/doc/guides/dmadevs/hisilicon.rst index 191e56f2f7..974bc49376 100644 --- a/dpdk/doc/guides/dmadevs/hisilicon.rst @@ -16287,10 +17986,28 @@ index da61814b5d..7897226203 100644 Supported ARK RTL PCIe Instances diff --git a/dpdk/doc/guides/nics/features.rst b/dpdk/doc/guides/nics/features.rst -index 27be2d2576..dc268a19ff 100644 +index 27be2d2576..841f3704b1 100644 --- a/dpdk/doc/guides/nics/features.rst +++ b/dpdk/doc/guides/nics/features.rst -@@ -174,7 +174,7 @@ Supports receiving segmented mbufs. +@@ -34,6 +34,17 @@ Supports getting the speed capabilities that the current device is capable of. + * **[related] API**: ``rte_eth_dev_info_get()``. + + ++.. _nic_features_link_speeds_config: ++ ++Link speed configuration ++------------------------ ++ ++Supports configurating fixed speed and link autonegotiation. ++ ++* **[uses] user config**: ``dev_conf.link_speeds:RTE_ETH_LINK_SPEED_*``. ++* **[related] API**: ``rte_eth_dev_configure()``. ++ ++ + .. _nic_features_link_status: + + Link status +@@ -174,7 +185,7 @@ Supports receiving segmented mbufs. .. _nic_features_buffer_split: @@ -16299,11 +18016,51 @@ index 27be2d2576..dc268a19ff 100644 ------------------ Scatters the packets being received on specified boundaries to segmented mbufs. +@@ -711,6 +722,19 @@ Supports configuring per-queue stat counter mapping. + ``rte_eth_dev_set_tx_queue_stats_mapping()``. + + ++.. _nic_features_traffic_manager: ++ ++Traffic manager ++--------------- ++ ++Supports Traffic manager. ++ ++* **[implements] rte_tm_ops**: ``capabilities_get``, ``shaper_profile_add``, ++ ``hierarchy_commit`` and so on. ++* **[related] API**: ``rte_tm_capabilities_get()``, ``rte_tm_shaper_profile_add()``, ++ ``rte_tm_hierarchy_commit()`` and so on. ++ ++ + .. _nic_features_fw_version: + + FW version +diff --git a/dpdk/doc/guides/nics/features/atlantic.ini b/dpdk/doc/guides/nics/features/atlantic.ini +index ef4155027c..29969c1493 100644 +--- a/dpdk/doc/guides/nics/features/atlantic.ini ++++ b/dpdk/doc/guides/nics/features/atlantic.ini +@@ -5,6 +5,7 @@ + ; + [Features] + Speed capabilities = Y ++Link speed configuration = Y + Link status = Y + Link status event = Y + Queue start/stop = Y diff --git a/dpdk/doc/guides/nics/features/bnxt.ini b/dpdk/doc/guides/nics/features/bnxt.ini -index afb5414b49..ac682c5779 100644 +index afb5414b49..60b6da6d15 100644 --- a/dpdk/doc/guides/nics/features/bnxt.ini +++ b/dpdk/doc/guides/nics/features/bnxt.ini -@@ -57,7 +57,7 @@ Perf doc = Y +@@ -5,6 +5,7 @@ + ; + [Features] + Speed capabilities = Y ++Link speed configuration = Y + Link status = Y + Link status event = Y + Rx interrupt = Y +@@ -57,7 +58,7 @@ Perf doc = Y [rte_flow items] any = Y @@ -16312,7 +18069,7 @@ index afb5414b49..ac682c5779 100644 ipv4 = Y ipv6 = Y gre = Y -@@ -71,7 +71,7 @@ represented_port = Y +@@ -71,7 +72,7 @@ represented_port = Y tcp = Y udp = Y vf = Y @@ -16321,6 +18078,18 @@ index afb5414b49..ac682c5779 100644 vxlan = Y [rte_flow actions] +diff --git a/dpdk/doc/guides/nics/features/cnxk.ini b/dpdk/doc/guides/nics/features/cnxk.ini +index 1623a1803e..9be71e713f 100644 +--- a/dpdk/doc/guides/nics/features/cnxk.ini ++++ b/dpdk/doc/guides/nics/features/cnxk.ini +@@ -27,6 +27,7 @@ RSS hash = Y + RSS key update = Y + RSS reta update = Y + Inner RSS = Y ++Traffic manager = Y + Inline protocol = Y + Flow control = Y + Scattered Rx = Y diff --git a/dpdk/doc/guides/nics/features/cxgbe.ini b/dpdk/doc/guides/nics/features/cxgbe.ini index f674803ec4..f9912390fb 100644 --- a/dpdk/doc/guides/nics/features/cxgbe.ini @@ -16344,10 +18113,18 @@ index f674803ec4..f9912390fb 100644 [rte_flow actions] count = Y diff --git a/dpdk/doc/guides/nics/features/default.ini b/dpdk/doc/guides/nics/features/default.ini -index c96a52b58e..8bd849e96f 100644 +index c96a52b58e..465d579080 100644 --- a/dpdk/doc/guides/nics/features/default.ini +++ b/dpdk/doc/guides/nics/features/default.ini -@@ -23,6 +23,7 @@ Shared Rx queue = +@@ -8,6 +8,7 @@ + ; + [Features] + Speed capabilities = ++Link speed configuration = + Link status = + Link status event = + Removal event = +@@ -23,6 +24,7 @@ Shared Rx queue = Burst mode info = Power mgmt address monitor = MTU update = @@ -16355,11 +18132,39 @@ index c96a52b58e..8bd849e96f 100644 Scattered Rx = LRO = TSO = +@@ -40,6 +42,7 @@ DCB = + VLAN filter = + Flow control = + Rate limitation = ++Traffic manager = + Inline crypto = + Inline protocol = + CRC offload = +diff --git a/dpdk/doc/guides/nics/features/dpaa.ini b/dpdk/doc/guides/nics/features/dpaa.ini +index a382c7160c..b136ed191a 100644 +--- a/dpdk/doc/guides/nics/features/dpaa.ini ++++ b/dpdk/doc/guides/nics/features/dpaa.ini +@@ -5,6 +5,7 @@ + ; + [Features] + Speed capabilities = Y ++Link speed configuration = Y + Link status = Y + Link status event = Y + Burst mode info = Y diff --git a/dpdk/doc/guides/nics/features/dpaa2.ini b/dpdk/doc/guides/nics/features/dpaa2.ini -index 4c06841a87..09ce66c788 100644 +index 4c06841a87..eaee07cf24 100644 --- a/dpdk/doc/guides/nics/features/dpaa2.ini +++ b/dpdk/doc/guides/nics/features/dpaa2.ini -@@ -31,7 +31,7 @@ ARMv8 = Y +@@ -17,6 +17,7 @@ Unicast MAC filter = Y + RSS hash = Y + VLAN filter = Y + Flow control = Y ++Traffic manager = Y + VLAN offload = Y + L3 checksum offload = Y + L4 checksum offload = Y +@@ -31,7 +32,7 @@ ARMv8 = Y Usage doc = Y [rte_flow items] @@ -16368,7 +18173,7 @@ index 4c06841a87..09ce66c788 100644 gre = Y icmp = Y ipv4 = Y -@@ -41,7 +41,7 @@ raw = Y +@@ -41,7 +42,7 @@ raw = Y sctp = Y tcp = Y udp = Y @@ -16426,10 +18231,26 @@ index 9f6f0ebf3a..ada6607fe9 100644 icmp6 = Y ipv4 = Y diff --git a/dpdk/doc/guides/nics/features/hns3.ini b/dpdk/doc/guides/nics/features/hns3.ini -index 405b94f05c..338b4e6864 100644 +index 405b94f05c..8b623d3077 100644 --- a/dpdk/doc/guides/nics/features/hns3.ini +++ b/dpdk/doc/guides/nics/features/hns3.ini -@@ -51,7 +51,7 @@ Linux = Y +@@ -5,6 +5,7 @@ + ; + [Features] + Speed capabilities = Y ++Link speed configuration = Y + Link status = Y + Link status event = Y + Rx interrupt = Y +@@ -28,6 +29,7 @@ RSS reta update = Y + DCB = Y + VLAN filter = Y + Flow control = Y ++Traffic manager = Y + CRC offload = Y + VLAN offload = Y + FEC = Y +@@ -51,7 +53,7 @@ Linux = Y ARMv8 = Y [rte_flow items] @@ -16438,7 +18259,7 @@ index 405b94f05c..338b4e6864 100644 geneve = Y icmp = Y ipv4 = Y -@@ -60,7 +60,7 @@ nvgre = Y +@@ -60,7 +62,7 @@ nvgre = Y sctp = Y tcp = Y udp = Y @@ -16448,10 +18269,26 @@ index 405b94f05c..338b4e6864 100644 vxlan_gpe = Y diff --git a/dpdk/doc/guides/nics/features/i40e.ini b/dpdk/doc/guides/nics/features/i40e.ini -index dd18fec217..6e141de326 100644 +index dd18fec217..32a76919f7 100644 --- a/dpdk/doc/guides/nics/features/i40e.ini +++ b/dpdk/doc/guides/nics/features/i40e.ini -@@ -54,7 +54,7 @@ Power8 = Y +@@ -5,6 +5,7 @@ + ; + [Features] + Speed capabilities = Y ++Link speed configuration = Y + Link status = Y + Link status event = Y + Rx interrupt = Y +@@ -27,6 +28,7 @@ SR-IOV = Y + DCB = Y + VLAN filter = Y + Flow control = Y ++Traffic manager = Y + CRC offload = Y + VLAN offload = Y + QinQ offload = P +@@ -54,7 +56,7 @@ Power8 = Y [rte_flow items] ah = Y esp = Y @@ -16460,7 +18297,7 @@ index dd18fec217..6e141de326 100644 gre = Y gtpc = Y gtpu = Y -@@ -69,7 +69,7 @@ sctp = Y +@@ -69,7 +71,7 @@ sctp = Y tcp = Y udp = Y vf = Y @@ -16470,19 +18307,28 @@ index dd18fec217..6e141de326 100644 [rte_flow actions] diff --git a/dpdk/doc/guides/nics/features/iavf.ini b/dpdk/doc/guides/nics/features/iavf.ini -index 01f514239e..3860f283d5 100644 +index 01f514239e..0ff4a1c44a 100644 --- a/dpdk/doc/guides/nics/features/iavf.ini +++ b/dpdk/doc/guides/nics/features/iavf.ini -@@ -21,7 +21,7 @@ RSS key update = Y +@@ -20,14 +20,15 @@ RSS hash = Y + RSS key update = Y RSS reta update = Y VLAN filter = Y ++Traffic manager = Y ++Inline crypto = Y CRC offload = Y -VLAN offload = Y +VLAN offload = P L3 checksum offload = P L4 checksum offload = P Packet type parsing = Y -@@ -40,7 +40,7 @@ ah = Y + Rx descriptor status = Y + Tx descriptor status = Y +-Inline crypto = Y + Basic stats = Y + Multiprocess aware = Y + FreeBSD = Y +@@ -40,7 +41,7 @@ ah = Y arp_eth_ipv4 = Y ecpri = Y esp = Y @@ -16491,7 +18337,7 @@ index 01f514239e..3860f283d5 100644 gre = Y gtpc = Y gtpu = Y -@@ -57,7 +57,7 @@ ppp = Y +@@ -57,7 +58,7 @@ ppp = Y sctp = Y tcp = Y udp = Y @@ -16501,10 +18347,26 @@ index 01f514239e..3860f283d5 100644 [rte_flow actions] count = Y diff --git a/dpdk/doc/guides/nics/features/ice.ini b/dpdk/doc/guides/nics/features/ice.ini -index a15f42f94c..0d911590b7 100644 +index a15f42f94c..3542f3f64a 100644 --- a/dpdk/doc/guides/nics/features/ice.ini +++ b/dpdk/doc/guides/nics/features/ice.ini -@@ -52,7 +52,7 @@ x86-64 = Y +@@ -8,6 +8,7 @@ + ; + [Features] + Speed capabilities = Y ++Link speed configuration = Y + Link status = Y + Link status event = Y + Rx interrupt = Y +@@ -25,6 +26,7 @@ RSS hash = Y + RSS key update = Y + RSS reta update = Y + VLAN filter = Y ++Traffic manager = Y + CRC offload = Y + VLAN offload = Y + QinQ offload = P +@@ -52,7 +54,7 @@ x86-64 = Y ah = Y arp_eth_ipv4 = Y esp = Y @@ -16513,7 +18375,7 @@ index a15f42f94c..0d911590b7 100644 gtpu = Y gtp_psc = Y icmp = Y -@@ -70,7 +70,7 @@ raw = Y +@@ -70,7 +72,7 @@ raw = Y sctp = Y tcp = Y udp = Y @@ -16523,7 +18385,7 @@ index a15f42f94c..0d911590b7 100644 [rte_flow actions] diff --git a/dpdk/doc/guides/nics/features/ice_dcf.ini b/dpdk/doc/guides/nics/features/ice_dcf.ini -index 4d6fb6d849..54073f0b88 100644 +index 4d6fb6d849..02bbbad069 100644 --- a/dpdk/doc/guides/nics/features/ice_dcf.ini +++ b/dpdk/doc/guides/nics/features/ice_dcf.ini @@ -3,6 +3,9 @@ @@ -16536,20 +18398,41 @@ index 4d6fb6d849..54073f0b88 100644 [Features] Queue start/stop = Y Scattered Rx = Y -@@ -10,6 +13,8 @@ RSS hash = P +@@ -10,6 +13,9 @@ RSS hash = P CRC offload = Y L3 checksum offload = P L4 checksum offload = P +Inner L3 checksum = P +Inner L4 checksum = P ++Traffic manager = Y Basic stats = Y Linux = Y x86-32 = Y +diff --git a/dpdk/doc/guides/nics/features/igb.ini b/dpdk/doc/guides/nics/features/igb.ini +index 7b4af6f86c..ee2408f3ee 100644 +--- a/dpdk/doc/guides/nics/features/igb.ini ++++ b/dpdk/doc/guides/nics/features/igb.ini +@@ -5,6 +5,7 @@ + ; + [Features] + Speed capabilities = P ++Link speed configuration = Y + Link status = Y + Link status event = Y + Rx interrupt = Y diff --git a/dpdk/doc/guides/nics/features/igc.ini b/dpdk/doc/guides/nics/features/igc.ini -index f2c6fa28ad..b5deea3f61 100644 +index f2c6fa28ad..a43b8eaefd 100644 --- a/dpdk/doc/guides/nics/features/igc.ini +++ b/dpdk/doc/guides/nics/features/igc.ini -@@ -35,7 +35,7 @@ Linux = Y +@@ -4,6 +4,7 @@ + ; + [Features] + Speed capabilities = Y ++Link speed configuration = Y + Link status = Y + Link status event = Y + FW version = Y +@@ -35,7 +36,7 @@ Linux = Y x86-64 = Y [rte_flow items] @@ -16558,11 +18441,31 @@ index f2c6fa28ad..b5deea3f61 100644 ipv4 = Y ipv6 = Y tcp = Y +diff --git a/dpdk/doc/guides/nics/features/ionic.ini b/dpdk/doc/guides/nics/features/ionic.ini +index 5bd18e39e9..a46874ad89 100644 +--- a/dpdk/doc/guides/nics/features/ionic.ini ++++ b/dpdk/doc/guides/nics/features/ionic.ini +@@ -5,6 +5,7 @@ + ; + [Features] + Speed capabilities = Y ++Link speed configuration = Y + Link status = Y + Link status event = Y + Queue start/stop = Y diff --git a/dpdk/doc/guides/nics/features/ipn3ke.ini b/dpdk/doc/guides/nics/features/ipn3ke.ini -index defc39f525..1f6b780273 100644 +index defc39f525..e412978820 100644 --- a/dpdk/doc/guides/nics/features/ipn3ke.ini +++ b/dpdk/doc/guides/nics/features/ipn3ke.ini -@@ -47,13 +47,13 @@ x86-32 = Y +@@ -25,6 +25,7 @@ SR-IOV = Y + DCB = Y + VLAN filter = Y + Flow control = Y ++Traffic manager = Y + CRC offload = Y + VLAN offload = Y + QinQ offload = Y +@@ -47,13 +48,13 @@ x86-32 = Y x86-64 = Y [rte_flow items] @@ -16579,10 +18482,26 @@ index defc39f525..1f6b780273 100644 [rte_flow actions] diff --git a/dpdk/doc/guides/nics/features/ixgbe.ini b/dpdk/doc/guides/nics/features/ixgbe.ini -index c5333d1142..e5cef81f9a 100644 +index c5333d1142..55deaadc62 100644 --- a/dpdk/doc/guides/nics/features/ixgbe.ini +++ b/dpdk/doc/guides/nics/features/ixgbe.ini -@@ -56,7 +56,7 @@ x86-32 = Y +@@ -5,6 +5,7 @@ + ; + [Features] + Speed capabilities = Y ++Link speed configuration = Y + Link status = Y + Link status event = Y + Rx interrupt = Y +@@ -27,6 +28,7 @@ DCB = Y + VLAN filter = Y + Flow control = Y + Rate limitation = Y ++Traffic manager = Y + Inline crypto = Y + CRC offload = P + VLAN offload = P +@@ -56,7 +58,7 @@ x86-32 = Y x86-64 = Y [rte_flow items] @@ -16591,7 +18510,7 @@ index c5333d1142..e5cef81f9a 100644 e_tag = Y fuzzy = Y ipv4 = Y -@@ -66,7 +66,7 @@ raw = Y +@@ -66,7 +68,7 @@ raw = Y sctp = Y tcp = Y udp = Y @@ -16631,10 +18550,21 @@ index 845d2d4a97..e2cf7a1d9d 100644 LRO = Y TSO = Y diff --git a/dpdk/doc/guides/nics/features/mvpp2.ini b/dpdk/doc/guides/nics/features/mvpp2.ini -index 1bcf74875e..653c9d08cb 100644 +index 1bcf74875e..ccc2c2d4f8 100644 --- a/dpdk/doc/guides/nics/features/mvpp2.ini +++ b/dpdk/doc/guides/nics/features/mvpp2.ini -@@ -24,13 +24,13 @@ ARMv8 = Y +@@ -12,8 +12,9 @@ Allmulticast mode = Y + Unicast MAC filter = Y + Multicast MAC filter = Y + RSS hash = Y +-Flow control = Y + VLAN filter = Y ++Flow control = Y ++Traffic manager = Y + CRC offload = Y + L3 checksum offload = Y + L4 checksum offload = Y +@@ -24,13 +25,13 @@ ARMv8 = Y Usage doc = Y [rte_flow items] @@ -16650,6 +18580,42 @@ index 1bcf74875e..653c9d08cb 100644 [rte_flow actions] drop = Y +diff --git a/dpdk/doc/guides/nics/features/ngbe.ini b/dpdk/doc/guides/nics/features/ngbe.ini +index 2701c5f051..1dfd92e96b 100644 +--- a/dpdk/doc/guides/nics/features/ngbe.ini ++++ b/dpdk/doc/guides/nics/features/ngbe.ini +@@ -5,6 +5,7 @@ + ; + [Features] + Speed capabilities = Y ++Link speed configuration = Y + Link status = Y + Link status event = Y + Free Tx mbuf on demand = Y +diff --git a/dpdk/doc/guides/nics/features/octeontx.ini b/dpdk/doc/guides/nics/features/octeontx.ini +index 78fa7c719a..3165e41a21 100644 +--- a/dpdk/doc/guides/nics/features/octeontx.ini ++++ b/dpdk/doc/guides/nics/features/octeontx.ini +@@ -5,6 +5,7 @@ + ; + [Features] + Speed capabilities = Y ++Link speed configuration = Y + Link status = Y + Link status event = Y + Lock-free Tx queue = Y +diff --git a/dpdk/doc/guides/nics/features/sfc.ini b/dpdk/doc/guides/nics/features/sfc.ini +index 2e798b5ef5..3ae479223a 100644 +--- a/dpdk/doc/guides/nics/features/sfc.ini ++++ b/dpdk/doc/guides/nics/features/sfc.ini +@@ -5,6 +5,7 @@ + ; + [Features] + Speed capabilities = Y ++Link speed configuration = Y + Link status = Y + Link status event = Y + Rx interrupt = Y diff --git a/dpdk/doc/guides/nics/features/tap.ini b/dpdk/doc/guides/nics/features/tap.ini index b4a356e5d5..f26355e57f 100644 --- a/dpdk/doc/guides/nics/features/tap.ini @@ -16669,11 +18635,39 @@ index b4a356e5d5..f26355e57f 100644 [rte_flow actions] drop = Y +diff --git a/dpdk/doc/guides/nics/features/thunderx.ini b/dpdk/doc/guides/nics/features/thunderx.ini +index b33bb37c82..2ab8db7239 100644 +--- a/dpdk/doc/guides/nics/features/thunderx.ini ++++ b/dpdk/doc/guides/nics/features/thunderx.ini +@@ -5,6 +5,7 @@ + ; + [Features] + Speed capabilities = Y ++Link speed configuration = Y + Link status = Y + Link status event = Y + Queue start/stop = Y diff --git a/dpdk/doc/guides/nics/features/txgbe.ini b/dpdk/doc/guides/nics/features/txgbe.ini -index 6d0cc8afdd..958f8ac793 100644 +index 6d0cc8afdd..a09bed5c84 100644 --- a/dpdk/doc/guides/nics/features/txgbe.ini +++ b/dpdk/doc/guides/nics/features/txgbe.ini -@@ -52,7 +52,7 @@ x86-32 = Y +@@ -5,6 +5,7 @@ + ; + [Features] + Speed capabilities = Y ++Link speed configuration = Y + Link status = Y + Link status event = Y + Rx interrupt = Y +@@ -26,6 +27,7 @@ DCB = Y + VLAN filter = Y + Flow control = Y + Rate limitation = Y ++Traffic manager = Y + Inline crypto = Y + CRC offload = P + VLAN offload = P +@@ -52,7 +54,7 @@ x86-32 = Y x86-64 = Y [rte_flow items] @@ -16682,7 +18676,7 @@ index 6d0cc8afdd..958f8ac793 100644 e_tag = Y fuzzy = Y ipv4 = Y -@@ -62,7 +62,7 @@ raw = Y +@@ -62,7 +64,7 @@ raw = Y sctp = Y tcp = Y udp = Y @@ -16692,9 +18686,18 @@ index 6d0cc8afdd..958f8ac793 100644 [rte_flow actions] diff --git a/dpdk/doc/guides/nics/hns3.rst b/dpdk/doc/guides/nics/hns3.rst -index 5f68a10ecf..380024600b 100644 +index 5f68a10ecf..836a91f86e 100644 --- a/dpdk/doc/guides/nics/hns3.rst +++ b/dpdk/doc/guides/nics/hns3.rst +@@ -6,7 +6,7 @@ HNS3 Poll Mode Driver + + The hns3 PMD (**librte_net_hns3**) provides poll mode driver support + for the inbuilt HiSilicon Network Subsystem(HNS) network engine +-found in the HiSilicon Kunpeng 920 SoC and Kunpeng 930 SoC . ++found in the HiSilicon Kunpeng 920 SoC (HIP08) and Kunpeng 930 SoC (HIP09/HIP10). + + Features + -------- @@ -30,7 +30,6 @@ Features of the HNS3 PMD are: - DCB - Scattered and gather for TX and RX @@ -17090,7 +19093,7 @@ index a25add7c47..66493a1157 100644 .. note:: diff --git a/dpdk/doc/guides/nics/mlx5.rst b/dpdk/doc/guides/nics/mlx5.rst -index feb2e57cee..e2fb45b1db 100644 +index feb2e57cee..95bafa230d 100644 --- a/dpdk/doc/guides/nics/mlx5.rst +++ b/dpdk/doc/guides/nics/mlx5.rst @@ -19,7 +19,7 @@ Information and documentation about these adapters can be found on the @@ -17249,6 +19252,15 @@ index feb2e57cee..e2fb45b1db 100644 include the necessary support and should be used in the meantime. For DPDK, only libibverbs, libmlx5, mlnx-ofed-kernel packages and firmware updates are required from that distribution. +@@ -1411,7 +1425,7 @@ DevX SDK installation + The DevX SDK must be installed on the machine building the Windows PMD. + Additional information can be found at + `How to Integrate Windows DevX in Your Development Environment +-`__. ++`_. + + Runtime Prerequisites + ~~~~~~~~~~~~~~~~~~~~~ diff --git a/dpdk/doc/guides/nics/mvneta.rst b/dpdk/doc/guides/nics/mvneta.rst index b7f279c3cb..2ee2637a58 100644 --- a/dpdk/doc/guides/nics/mvneta.rst @@ -18287,6 +20299,50 @@ diff --git a/dpdk/doc/guides/prog_guide/img/flow_tru_droppper.png b/dpdk/doc/gui similarity index 100% rename from dpdk/doc/guides/prog_guide/img/flow_tru_droppper.png rename to dpdk/doc/guides/prog_guide/img/flow_tru_dropper.png +diff --git a/dpdk/doc/guides/prog_guide/img/mbuf1.svg b/dpdk/doc/guides/prog_guide/img/mbuf1.svg +index a08bf3b6c0..111a874c00 100644 +--- a/dpdk/doc/guides/prog_guide/img/mbuf1.svg ++++ b/dpdk/doc/guides/prog_guide/img/mbuf1.svg +@@ -487,7 +487,7 @@ + sodipodi:role="line" + id="tspan5256" + x="59.842155" +- y="282.37683">m->pkt.next = NULL ++ y="282.37683">m->next = NULL + m->pkt.next = NULL ++ y="628.45935">m->next = NULL + m->pkt.next = mseg3 ++ y="628.45935">m->next = mseg3 + m->pkt.next = mseg2 ++ y="628.45935">m->next = mseg2 + l2_len = len(out_eth) + mb->l3_len = len(out_ip) +- mb->ol_flags |= RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_IP_CSUM ++ mb->ol_flags |= RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_IP_CKSUM + set out_ip checksum to 0 in the packet + + This is supported on hardware advertising RTE_ETH_TX_OFFLOAD_IPV4_CKSUM. +@@ -143,7 +143,7 @@ a vxlan-encapsulated tcp packet: + + mb->l2_len = len(out_eth) + mb->l3_len = len(out_ip) +- mb->ol_flags |= RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_IP_CSUM | RTE_MBUF_F_TX_UDP_CKSUM ++ mb->ol_flags |= RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_IP_CKSUM | RTE_MBUF_F_TX_UDP_CKSUM + set out_ip checksum to 0 in the packet + set out_udp checksum to pseudo header using rte_ipv4_phdr_cksum() + +@@ -154,7 +154,7 @@ a vxlan-encapsulated tcp packet: + + mb->l2_len = len(out_eth + out_ip + out_udp + vxlan + in_eth) + mb->l3_len = len(in_ip) +- mb->ol_flags |= RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_IP_CSUM ++ mb->ol_flags |= RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_IP_CKSUM + set in_ip checksum to 0 in the packet + + This is similar to case 1), but l2_len is different. It is supported +@@ -165,7 +165,7 @@ a vxlan-encapsulated tcp packet: + + mb->l2_len = len(out_eth + out_ip + out_udp + vxlan + in_eth) + mb->l3_len = len(in_ip) +- mb->ol_flags |= RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_IP_CSUM | RTE_MBUF_F_TX_TCP_CKSUM ++ mb->ol_flags |= RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_IP_CKSUM | RTE_MBUF_F_TX_TCP_CKSUM + set in_ip checksum to 0 in the packet + set in_tcp checksum to pseudo header using rte_ipv4_phdr_cksum() + +diff --git a/dpdk/doc/guides/prog_guide/packet_framework.rst b/dpdk/doc/guides/prog_guide/packet_framework.rst +index 3d4e3b66cc..b263f23f17 100644 +--- a/dpdk/doc/guides/prog_guide/packet_framework.rst ++++ b/dpdk/doc/guides/prog_guide/packet_framework.rst +@@ -512,7 +512,7 @@ the number of L2 or L3 cache memory misses is greatly reduced, hence one of the + This is because the cost of L2/L3 cache memory miss on memory read accesses is high, as usually due to data dependency between instructions, + the CPU execution units have to stall until the read operation is completed from L3 cache memory or external DRAM memory. + By using prefetch instructions, the latency of memory read accesses is hidden, +-provided that it is preformed early enough before the respective data structure is actually used. ++provided that it is performed early enough before the respective data structure is actually used. + + By splitting the processing into several stages that are executed on different packets (the packets from the input burst are interlaced), + enough work is created to allow the prefetch instructions to complete successfully (before the prefetched data structures are actually accessed) and diff --git a/dpdk/doc/guides/prog_guide/profile_app.rst b/dpdk/doc/guides/prog_guide/profile_app.rst -index bd6700ef85..14292d4c25 100644 +index bd6700ef85..a6b5fb4d5e 100644 --- a/dpdk/doc/guides/prog_guide/profile_app.rst +++ b/dpdk/doc/guides/prog_guide/profile_app.rst @@ -42,7 +42,7 @@ and recompile the DPDK: @@ -18336,6 +20458,15 @@ index bd6700ef85..14292d4c25 100644 meson configure build -Dc_args=-DRTE_ETHDEV_PROFILE_WITH_VTUNE ninja -C build +@@ -59,7 +59,7 @@ addition to the standard events, ``perf`` can be used to profile arm64 + specific PMU (Performance Monitor Unit) events through raw events (``-e`` + ``-rXX``). + +-For more derails refer to the ++For more details refer to the + `ARM64 specific PMU events enumeration `_. + + @@ -103,7 +103,7 @@ Example: .. code-block:: console @@ -18851,10 +20982,10 @@ index 25439dad45..1fd1755858 100644 * Parameters of ``rte_cryptodev_sym_session_create()`` were modified to accept ``mempool``, instead of ``device id`` and ``rte_crypto_sym_xform``. diff --git a/dpdk/doc/guides/rel_notes/release_21_11.rst b/dpdk/doc/guides/rel_notes/release_21_11.rst -index db09ec01ea..762db48e9b 100644 +index db09ec01ea..02f51b99f1 100644 --- a/dpdk/doc/guides/rel_notes/release_21_11.rst +++ b/dpdk/doc/guides/rel_notes/release_21_11.rst -@@ -878,3 +878,2319 @@ Tested Platforms +@@ -878,3 +878,2931 @@ Tested Platforms * Kernel version: 5.10 * Ubuntu 18.04 @@ -21174,6 +23305,618 @@ index db09ec01ea..762db48e9b 100644 +* 5e170dd8b6 net/txgbe: fix blocking system events +* 166591931b pcapng: modify timestamp calculation +* 63bf81a617 test: fix named test macro ++ ++21.11.7 Release Notes ++--------------------- ++ ++ ++21.11.7 Fixes ++~~~~~~~~~~~~~ ++ ++* app/crypto-perf: add missing op resubmission ++* app/crypto-perf: fix data comparison ++* app/crypto-perf: fix encrypt operation verification ++* app/crypto-perf: fix next segment mbuf ++* app/crypto-perf: fix out-of-place mbuf size ++* app/crypto-perf: verify strdup return ++* app/dumpcap: verify strdup return ++* app/pdump: verify strdup return ++* app/testpmd: fix crash in multi-process forwarding ++* app/testpmd: fix --stats-period option check ++* app/testpmd: hide --bitrate-stats in help if disabled ++* build: fix linker warnings about undefined symbols ++* bus/dpaa: verify strdup return ++* bus/fslmc: verify strdup return ++* bus/vdev: verify strdup return ++* ci: update versions of actions in GHA ++* common/cnxk: fix mbox region copy ++* common/cnxk: fix mbox struct attributes ++* common/cnxk: fix memory leak in CPT init ++* common/cnxk: fix possible out-of-bounds access ++* common/cnxk: fix RSS RETA configuration ++* common/cnxk: fix Tx MTU configuration ++* common/mlx5: fix calloc parameters ++* common/mlx5: fix duplicate read of general capabilities ++* common/sfc_efx/base: use C11 static assert ++* config: fix CPU instruction set for cross-build ++* cryptodev: remove unused extern variable ++* crypto/ipsec_mb: fix incorrectly setting cipher keys ++* dma/idxd: verify strdup return ++* doc: add --latencystats option in testpmd guide ++* doc: add link speeds configuration in features table ++* doc: add traffic manager in features table ++* doc: fix commands in eventdev test tool guide ++* doc: fix configuration in baseband 5GNR driver guide ++* doc: fix default IP fragments maximum in programmer guide ++* doc: fix typo in packet framework guide ++* doc: fix typo in profiling guide ++* doc: fix typos in cryptodev overview ++* doc: update link to Windows DevX in mlx5 guide ++* drivers/net: fix buffer overflow for packet types list ++* eal: verify strdup return ++* eal/x86: add AMD vendor check for TSC calibration ++* ethdev: fix NVGRE encap flow action description ++* event/cnxk: fix dequeue timeout configuration ++* event/cnxk: verify strdup return ++* eventdev: fix Doxygen processing of vector struct ++* eventdev: improve Doxygen comments on configure struct ++* event/dlb2: remove superfluous memcpy ++* examples/ipsec-secgw: fix typo in error message ++* examples/l3fwd: fix Rx over not ready port ++* examples/packet_ordering: fix Rx with reorder mode disabled ++* examples/qos_sched: fix memory leak in args parsing ++* hash: remove some dead code ++* kernel/freebsd: fix module build on FreeBSD 14 ++* net: add macros for VLAN metadata parsing ++* net/af_xdp: fix leak on XSK configuration failure ++* net/af_xdp: fix memzone leak on config failure ++* net/axgbe: fix Rx and Tx queue state ++* net/bnx2x: fix calloc parameters ++* net/bnx2x: fix warnings about memcpy lengths ++* net/bnxt: fix 50G and 100G forced speed ++* net/bnxt: fix array overflow ++* net/bnxt: fix backward firmware compatibility ++* net/bnxt: fix deadlock in ULP timer callback ++* net/bnxt: fix null pointer dereference ++* net/bnxt: fix number of Tx queues being created ++* net/bnxt: fix speed change from 200G to 25G on Thor ++* net/bnxt: modify locking for representor Tx ++* net/cnxk: fix flow RSS configuration ++* net/cnxk: fix MTU limit ++* net/ena/base: limit exponential backoff ++* net/ena/base: restructure interrupt handling ++* net/failsafe: fix memory leak in args parsing ++* net/hns3: enable PFC for all user priorities ++* net/hns3: fix disable command with firmware ++* net/hns3: fix reset level comparison ++* net/hns3: fix VF multiple count on one reset ++* net/hns3: refactor handle mailbox function ++* net/hns3: refactor PF mailbox message struct ++* net/hns3: refactor send mailbox function ++* net/hns3: refactor VF mailbox message struct ++* net/hns3: remove QinQ insert support for VF ++* net/hns3: support new device ++* net/i40e: remove incorrect 16B descriptor read block ++* net/i40e: remove redundant judgment in flow parsing ++* net/iavf: fix memory leak on security context error ++* net/iavf: remove error logs for VLAN offloading ++* net/iavf: remove incorrect 16B descriptor read block ++* net/ice: fix link update ++* net/ice: fix tunnel TSO capabilities ++* net/ice: remove incorrect 16B descriptor read block ++* net/ionic: fix device close ++* net/ionic: fix RSS query ++* net/ixgbe: fix memoy leak after device init failure ++* net/ixgbe: increase VF reset timeout ++* net/ixgbevf: fix RSS init for x550 NICs ++* net/memif: fix extra mbuf refcnt update in zero copy Tx ++* net/mlx5: fix age position in hairpin split ++* net/mlx5: fix counters map in bonding mode ++* net/mlx5: fix drop action release timing ++* net/mlx5: fix error packets drop in regular Rx ++* net/mlx5: fix GENEVE TLV option management ++* net/mlx5: fix jump action validation ++* net/mlx5: fix stats query crash in secondary process ++* net/mlx5: fix use after free when releasing Tx queues ++* net/mlx5: fix VLAN handling in meter split ++* net/mlx5: fix warning about copy length ++* net/mlx5: prevent ioctl failure log flooding ++* net/netvsc: fix VLAN metadata parsing ++* net/nfp: fix calloc parameters ++* net/nfp: fix resource leak for PF initialization ++* net/nfp: fix Rx and Tx queue state ++* net/tap: do not overwrite flow API errors ++* net/tap: fix traffic control handle calculation ++* net/virtio: remove duplicate queue xstats ++* net/vmxnet3: fix initialization on FreeBSD ++* net/vmxnet3: ignore Rx queue interrupt setup on FreeBSD ++* regexdev: fix logtype register ++* telemetry: fix connected clients count ++* telemetry: fix empty JSON dictionaries ++* test/cfgfile: fix typo in error messages ++* test: do not count skipped tests as executed ++* test/event: fix crash in Tx adapter freeing ++* test/event: skip test if no driver is present ++* test: fix probing in secondary process ++* test/mbuf: fix external mbuf case with assert enabled ++* test/power: fix typo in error message ++* test: verify strdup return ++* version: 21.11.7-rc1 ++* vhost: fix deadlock during vDPA SW live migration ++* vhost: fix memory leak in Virtio Tx split path ++* vhost: fix virtqueue access check in vhost-user setup ++ ++21.11.7 Validation ++~~~~~~~~~~~~~~~~~~ ++ ++* `Red Hat(R) Testing `__ ++ ++ * Platform ++ ++ * RHEL 9 ++ * Kernel 5.14 ++ * Qemu 8.2 ++ * X540-AT2 NIC(ixgbe, 10G) ++ ++ * Functionality ++ ++ * Guest with device assignment(PF) throughput testing(1G hugepage size) ++ * Guest with device assignment(PF) throughput testing(2M hugepage size) ++ * Guest with device assignment(VF) throughput testing ++ * PVP (host dpdk testpmd as vswitch) 1Q: throughput testing ++ * PVP vhost-user 2Q throughput testing ++ * PVP vhost-user 1Q cross numa node throughput testing ++ * Guest with vhost-user 2 queues throughput testing ++ * vhost-user reconnect with dpdk-client, qemu-server: qemu reconnect ++ * vhost-user reconnect with dpdk-client, qemu-server: ovs reconnect ++ * PVP 1Q live migration testing ++ * PVP 1Q cross numa node live migration testing ++ * Guest with ovs+dpdk+vhost-user 1Q live migration testing ++ * Guest with ovs+dpdk+vhost-user 1Q live migration testing (2M) ++ * Guest with ovs+dpdk+vhost-user 2Q live migration testing ++ * Guest with ovs+dpdk+vhost-user 4Q live migration testing ++ * Host PF + DPDK testing ++ * Host VF + DPDK testing ++ ++ ++* `Intel(R) Testing `__ ++ ++ * Basic Intel(R) NIC(ixgbe, i40e and ice) testing ++ ++ * PF (i40e, ixgbe) ++ * VF (i40e, ixgbe) ++ * PF/VF (ice) ++ * Compile Testing ++ * Intel NIC single core/NIC performance ++ * IPsec ++ ++ * Basic cryptodev and virtio testing ++ ++ * vhost/virtio basic loopback, PVP and performance test ++ * cryptodev Function/Performance ++ ++ ++* `Nvidia(R) Testing `__ ++ ++ * Basic functionality with testpmd ++ ++ * Tx/Rx ++ * xstats ++ * Timestamps ++ * Link status ++ * RTE flow ++ * RSS ++ * VLAN filtering, stripping and insertion ++ * Checksum/TSO ++ * ptype ++ * link_status_interrupt example application ++ * l3fwd-power example application ++ * Multi-process example applications ++ * Hardware LRO tests ++ * Regex application ++ * Buffer Split ++ * Tx scheduling ++ ++ * Build tests ++ * ConnectX-6 Dx ++ * ConnectX-7 ++ * BlueField-2 ++ ++21.11.7 Known Issues ++~~~~~~~~~~~~~~~~~~~~ ++ ++* DPDK 21.11.7 contains fixes up to DPDK 24.03 ++* Issues identified/fixed in DPDK main branch after DPDK 24.03 may be present in DPDK 21.11.7 ++ ++21.11.7 Fixes skipped and status unresolved ++~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ++ ++* c5b531d6ee app/crypto-perf: fix session freeing ++* 61b52e7edb app/test: fix reference to master in bonding test ++* 0fd1386c30 app/testpmd: cleanup cleanly from signal ++* a996cd04ae app/testpmd: fix early exit from signal ++* 7bdf7a13ae app/testpmd: fix encap/decap size calculation ++* 461c287ab5 app/testpmd: fix GRO packets flush on timeout ++* f1d0993e03 app/testpmd: fix interactive mode on Windows ++* 5d8c1f6253 common/cnxk: check error in MAC address set ++* a6f639e079 common/cnxk: fix BP threshold calculation ++* 772e30281a common/cnxk: fix CPT backpressure disable on LBK ++* 48054ca384 common/cnxk: fix link config for SDP ++* 1c7a4d37e7 common/cnxk: fix mailbox timeout due to deadlock ++* 59ceaa72d5 common/cnxk: fix part number for CN10K ++* 5781638519 common/cnxk: fix RQ mask config for CN10KB chip ++* 56fa6f92e9 common/cnxk: fix RSS key configuration ++* 37ca457d3f common/mlx5: fix obtaining IB device in LAG mode ++* cedb44dc87 common/mlx5: improve AES-XTS tweak capability check ++* 7be74edb90 common/mlx5: use just sufficient barrier for Arm ++* 9d91c3047d crypto/openssl: fix memory leaks in asym operations ++* 3dd3115078 dma/cnxk: fix chunk buffer failure return code ++* ba39a261a7 dma/cnxk: fix completion ring tail wrap ++* 95a955e3e0 dma/cnxk: fix device reconfigure ++* 694e8e643d event/cnxk: fix CASP usage for clang ++* b37fe88a2c event/cnxk: fix LMTST write for single event mode ++* 3fe71706ab event/cnxk: fix stale data in workslot ++* 04dac73643 eventdev/crypto: fix enqueue count ++* 4b04134cbb eventdev/crypto: fix failed events ++* da73a2a0d1 eventdev/crypto: fix offset used while flushing events ++* f442c04001 eventdev/crypto: fix overflow in circular buffer ++* 9a518054b5 examples/l3fwd: fix duplicate expression for default nexthop ++* 927cb43fe9 examples/l3fwd: fix port group mask with AltiVec ++* 547f294357 gro: fix reordering of packets ++* 6df1bc6b3b mempool/cnxk: avoid hang when counting batch allocs ++* b3ddd649ad mempool/cnxk: fix alloc from non-EAL threads ++* 02a2accb5f net/bonding: fix flow count query ++* 26a6bda9df net/cnxk: add cookies check for multi-segment offload ++* 5a0f64d84b net/cnxk: fix configuring large Rx/Tx queues ++* 3232c95d2c net/cnxk: fix indirect mbuf handling in Tx ++* 8ed5ca4dda net/cnxk: fix mbuf fields in multi-segment Tx ++* 7752f14026 net/cnxk: fix Rx flush on CN10k ++* 0f044b6681 net/iavf: fix refine protocol header ++* 0b241667cc net/iavf: fix tainted scalar ++* b125c0e721 net/iavf: fix tainted scalar ++* 92a16af450 net/iavf: fix virtchnl command called in interrupt ++* 6fd3a7a618 net/ice/base: fix internal etype in switch filter ++* 9749dffe23 net/ice: fix MAC type of E822 and E823 ++* 87e4384d26 net/mlx5: fix condition of LACP miss flow ++* 2ece3b7186 net/mlx5: fix flow workspace double free in Windows ++* 2db234e769 net/mlx5: fix IP-in-IP tunnels recognition ++* 1cfb78d2c4 net/mlx5: fix meter policy priority ++* 27e44a6f53 net/mlx5: remove duplication of L3 flow item validation ++* 48adbc80ba net/mlx5: remove GENEVE options length limitation ++* a74c5001e9 net/ngbe: add proper memory barriers in Rx ++* 31a28a99fd net/ngbe: add spinlock protection on YT PHY ++* 21f702d556 net/ngbe: fix link status in no LSC mode ++* 659cfce01e net/ngbe: remove redundant codes ++* 44a8635459 net/thunderx: fix DMAC control register update ++* 12011b11a3 net/txgbe: adapt to MNG veto bit setting ++* 5e170dd8b6 net/txgbe: fix blocking system events ++* 166591931b pcapng: modify timestamp calculation ++* df33fb53e4 rcu: fix acked token in debug log ++* 063cddfc74 rcu: use atomic operation on acked token ++* 63bf81a617 test: fix named test macro ++ ++21.11.8 Release Notes ++--------------------- ++ ++ ++21.11.8 Fixes ++~~~~~~~~~~~~~ ++ ++* app/bbdev: fix interrupt tests ++* app/dumpcap: handle SIGTERM and SIGHUP ++* app/pdump: handle SIGTERM and SIGHUP ++* app/testpmd: cleanup cleanly from signal ++* app/testpmd: fix build on signed comparison ++* app/testpmd: fix early exit from signal ++* app/testpmd: fix help string of BPF load command ++* app/testpmd: fix interactive mode on Windows ++* app/testpmd: fix lcore ID restriction ++* app/testpmd: fix outer IP checksum offload ++* app/testpmd: fix parsing for connection tracking item ++* app/testpmd: handle IEEE1588 init failure ++* baseband/la12xx: forbid secondary process ++* bpf: fix load hangs with six IPv6 addresses ++* bpf: fix MOV instruction evaluation ++* buildtools: fix build with clang 17 and ASan ++* bus/dpaa: fix bus scan for DMA devices ++* bus/dpaa: fix memory leak in bus scan ++* bus/dpaa: remove redundant file descriptor check ++* bus/pci: fix build with musl 1.2.4 / Alpine 3.19 ++* bus/pci: fix FD in secondary process ++* bus/pci: fix UIO resource mapping in secondary process ++* bus/vdev: fix device reinitialization ++* common/dpaax/caamflib: fix PDCP AES-AES watchdog error ++* common/dpaax/caamflib: fix PDCP-SDAP watchdog error ++* common/dpaax: fix IOVA table cleanup ++* common/dpaax: fix node array overrun ++* common/mlx5: fix unsigned/signed mismatch ++* common/mlx5: remove unneeded field when modify RQ table ++* config: fix warning for cross build with meson >= 1.3.0 ++* crypto/cnxk: fix minimal input normalization ++* cryptodev: fix build without crypto callbacks ++* cryptodev: validate crypto callbacks from next node ++* crypto/dpaa2_sec: fix event queue user context ++* crypto/openssl: optimize 3DES-CTR context init ++* dmadev: fix structure alignment ++* dma/idxd: add generic option for queue config ++* dma/idxd: add verbose option to config script ++* dma/idxd: fix default for workqueue options ++* dma/idxd: fix setup with Ubuntu 24.04 ++* doc: add baseline mode in l3fwd-power guide ++* doc: fix mbuf flags ++* doc: fix testpmd ring size command ++* doc: fix typo in l2fwd-crypto guide ++* doc: remove reference to mbuf pkt field ++* eal: fix logs for '--lcores' ++* eal/linux: lower log level on allocation attempt failure ++* eal/unix: support ZSTD compression for firmware ++* eal/windows: install sched.h file ++* ethdev: fix device init without socket-local memory ++* ethdev: fix GENEVE option item conversion ++* event/sw: fix warning from useless snprintf ++* fbarray: fix finding for unaligned length ++* fbarray: fix incorrect lookahead behavior ++* fbarray: fix incorrect lookbehind behavior ++* fbarray: fix lookahead ignore mask handling ++* fbarray: fix lookbehind ignore mask handling ++* hash: check name when creating a hash ++* hash: fix RCU reclamation size ++* hash: fix return code description in Doxygen ++* kni: use strscpy ++* latencystats: fix literal float suffix ++* malloc: fix multi-process wait condition handling ++* net/af_packet: align Rx/Tx structs to cache line ++* net/af_xdp: count mbuf allocation failures ++* net/af_xdp: fix port ID in Rx mbuf ++* net/ark: fix index arithmetic ++* net/axgbe: check only minimum speed for cables ++* net/axgbe: delay AN timeout during KR training ++* net/axgbe: disable interrupts during device removal ++* net/axgbe: fix connection for SFP+ active cables ++* net/axgbe: fix fluctuations for 1G Bel Fuse SFP ++* net/axgbe: fix linkup in PHY status ++* net/axgbe: fix MDIO access for non-zero ports and CL45 PHYs ++* net/axgbe: fix SFP codes check for DAC cables ++* net/axgbe: fix Tx flow on 30H HW ++* net/axgbe: reset link when link never comes back ++* net/axgbe: update DMA coherency values ++* net/cnxk: fix outbound security with higher packet burst ++* net/cnxk: fix promiscuous state after MAC change ++* net/cnxk: fix RSS config ++* net/e1000/base: fix link power down ++* net/ena: fix bad checksum handling ++* net/ena: fix checksum handling ++* net/ena: fix return value check ++* net: fix outer UDP checksum in Intel prepare helper ++* net/fm10k: fix cleanup during init failure ++* net/hns3: check Rx DMA address alignmnent ++* net/hns3: disable SCTP verification tag for RSS hash input ++* net/hns3: fix double free for Rx/Tx queue ++* net/hns3: fix offload flag of IEEE 1588 ++* net/hns3: fix Rx timestamp flag ++* net/hns3: fix uninitialized variable in FEC query ++* net/hns3: fix variable overflow ++* net/i40e: fix outer UDP checksum offload for X710 ++* net/ice/base: fix board type definition ++* net/ice/base: fix check for existing switch rule ++* net/ice/base: fix GCS descriptor field offsets ++* net/ice/base: fix masking when reading context ++* net/ice/base: fix pointer to variable outside scope ++* net/ice/base: fix potential TLV length overflow ++* net/ice/base: fix return type of bitmap hamming weight ++* net/ice/base: fix sign extension ++* net/ice/base: fix size when allocating children arrays ++* net/ice/base: fix temporary failures reading NVM ++* net/ice: fix check for outer UDP checksum offload ++* net/ice: fix memory leaks in raw pattern parsing ++* net/ice: fix return value for raw pattern parsing ++* net/ixgbe/base: fix 5G link speed reported on VF ++* net/ixgbe/base: fix PHY ID for X550 ++* net/ixgbe/base: revert advertising for X550 2.5G/5G ++* net/ixgbe: do not create delayed interrupt handler twice ++* net/ixgbe: do not update link status in secondary process ++* net/mlx5: fix Arm build with GCC 9.1 ++* net/mlx5: fix end condition of reading xstats ++* net/mlx5: fix hash Rx queue release in flow sample ++* net/mlx5: fix indexed pool with invalid index ++* net/mlx5: fix MTU configuration ++* net/mlx5: fix uplink port probing in bonding mode ++* net/nfp: fix disabling 32-bit build ++* net/ngbe: fix hotplug remove ++* net/ngbe: fix memory leaks ++* net/ngbe: fix MTU range ++* net/ngbe: keep PHY power down while device probing ++* net/softnic: fix maybe-uninitialized warning ++* net/tap: fix file descriptor check in isolated flow ++* net/txgbe: fix flow filters in VT mode ++* net/txgbe: fix hotplug remove ++* net/txgbe: fix memory leaks ++* net/txgbe: fix MTU range ++* net/txgbe: fix Rx interrupt ++* net/txgbe: fix tunnel packet parsing ++* net/txgbe: fix Tx hang on queue disable ++* net/txgbe: fix VF promiscuous and allmulticast ++* net/txgbe: reconfigure more MAC Rx registers ++* net/txgbe: restrict configuration of VLAN strip offload ++* net/virtio: fix MAC table update ++* net/virtio-user: add memcpy check ++* net/vmxnet3: fix init logs ++* pcapng: add memcpy check ++* telemetry: fix connection parameter parsing ++* telemetry: lower log level on socket error ++* test/crypto: fix allocation comment ++* test/crypto: fix asymmetric capability test ++* test/crypto: fix vector global buffer overflow ++* test/crypto: remove unused stats in setup ++* vdpa/sfc: remove dead code ++* version: 21.11.8-rc1 ++* vhost: cleanup resubmit info before inflight setup ++* vhost: fix build with GCC 13 ++ ++21.11.8 Validation ++~~~~~~~~~~~~~~~~~~ ++ ++* `Red Hat(R) Testing `__ ++ ++ * Platform ++ ++ * RHEL 9 ++ * Kernel 5.14 ++ * Qemu 8.2 ++ * X540-AT2 NIC(ixgbe, 10G) ++ ++ * Functionality ++ ++ * Guest with device assignment(PF) throughput testing(1G hugepage size) ++ * Guest with device assignment(PF) throughput testing(2M hugepage size) ++ * Guest with device assignment(VF) throughput testing ++ * PVP (host dpdk testpmd as vswitch) 1Q: throughput testing ++ * PVP vhost-user 2Q throughput testing ++ * PVP vhost-user 1Q cross numa node throughput testing ++ * Guest with vhost-user 2 queues throughput testing ++ * vhost-user reconnect with dpdk-client, qemu-server: qemu reconnect ++ * vhost-user reconnect with dpdk-client, qemu-server: ovs reconnect ++ * PVP 1Q live migration testing ++ * PVP 1Q cross numa node live migration testing ++ * Guest with ovs+dpdk+vhost-user 1Q live migration testing ++ * Guest with ovs+dpdk+vhost-user 1Q live migration testing (2M) ++ * Guest with ovs+dpdk+vhost-user 2Q live migration testing ++ * Guest with ovs+dpdk+vhost-user 4Q live migration testing ++ * Host PF + DPDK testing ++ * Host VF + DPDK testing ++ ++ ++* `Intel(R) Testing `__ ++ ++ * Basic Intel(R) NIC(ixgbe, i40e and ice) testing ++ ++ * PF (i40e, ixgbe) ++ * VF (i40e, ixgbe) ++ * PF/VF (ice) ++ * Compile Testing ++ * Intel NIC single core/NIC performance ++ * IPsec ++ ++ * Basic cryptodev and virtio testing ++ ++ * vhost/virtio basic loopback, PVP and performance test ++ * cryptodev Function/Performance ++ ++ ++* `Nvidia(R) Testing `__ ++ ++ * Basic functionality with testpmd ++ ++ * Tx/Rx ++ * xstats ++ * Timestamps ++ * Link status ++ * RTE flow ++ * RSS ++ * VLAN filtering, stripping and insertion ++ * Checksum/TSO ++ * ptype ++ * link_status_interrupt example application ++ * l3fwd-power example application ++ * Multi-process example applications ++ * Hardware LRO tests ++ * Regex application ++ * Buffer Split ++ * Tx scheduling ++ ++ * Build tests ++ * ConnectX-6 Dx ++ * ConnectX-7 ++ * BlueField-2 ++ ++21.11.8 Known Issues ++~~~~~~~~~~~~~~~~~~~~ ++ ++* DPDK 21.11.8 contains fixes up to DPDK 24.07 ++* Issues identified/fixed in DPDK main branch after DPDK 24.07 may be present in DPDK 21.11.8 ++ ++21.11.8 Fixes skipped and status unresolved ++~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ++ ++* c5b531d6ee app/crypto-perf: fix session freeing ++* 61b52e7edb app/test: fix reference to master in bonding test ++* 7bdf7a13ae app/testpmd: fix encap/decap size calculation ++* 461c287ab5 app/testpmd: fix GRO packets flush on timeout ++* ecf408d2aa app/testpmd: fix indirect action flush ++* 4edbcc7b53 bpf: disable on 32-bit x86 ++* 4edbcc7b53 bpf: disable on 32-bit x86 ++* 5d8c1f6253 common/cnxk: check error in MAC address set ++* a6f639e079 common/cnxk: fix BP threshold calculation ++* 772e30281a common/cnxk: fix CPT backpressure disable on LBK ++* 48054ca384 common/cnxk: fix link config for SDP ++* 1c7a4d37e7 common/cnxk: fix mailbox timeout due to deadlock ++* 59ceaa72d5 common/cnxk: fix part number for CN10K ++* 5781638519 common/cnxk: fix RQ mask config for CN10KB chip ++* 56fa6f92e9 common/cnxk: fix RSS key configuration ++* 37ca457d3f common/mlx5: fix obtaining IB device in LAG mode ++* cedb44dc87 common/mlx5: improve AES-XTS tweak capability check ++* 7be74edb90 common/mlx5: use just sufficient barrier for Arm ++* 046341575b crypto/dpaa_sec: fix IPsec descriptor ++* 9d91c3047d crypto/openssl: fix memory leaks in asym operations ++* 17d5bc6135 crypto/openssl: make per-QP auth context clones ++* b1d7112602 crypto/openssl: make per-QP cipher context clones ++* 3dd3115078 dma/cnxk: fix chunk buffer failure return code ++* ba39a261a7 dma/cnxk: fix completion ring tail wrap ++* 95a955e3e0 dma/cnxk: fix device reconfigure ++* 2a3f42942a dma/hisilicon: remove support for HIP09 platform ++* b9a87346b0 ethdev: fix strict aliasing in link up ++* 694e8e643d event/cnxk: fix CASP usage for clang ++* b37fe88a2c event/cnxk: fix LMTST write for single event mode ++* 3fe71706ab event/cnxk: fix stale data in workslot ++* 04dac73643 eventdev/crypto: fix enqueue count ++* 4b04134cbb eventdev/crypto: fix failed events ++* da73a2a0d1 eventdev/crypto: fix offset used while flushing events ++* ad12d08f05 eventdev/crypto: fix opaque field handling ++* f442c04001 eventdev/crypto: fix overflow in circular buffer ++* 4b97893816 examples: fix lcore ID restriction ++* 548de9091c examples: fix port ID restriction ++* b23c5bd71a examples: fix queue ID restriction ++* 9a518054b5 examples/l3fwd: fix duplicate expression for default nexthop ++* 927cb43fe9 examples/l3fwd: fix port group mask with AltiVec ++* 547f294357 gro: fix reordering of packets ++* 6df1bc6b3b mempool/cnxk: avoid hang when counting batch allocs ++* b3ddd649ad mempool/cnxk: fix alloc from non-EAL threads ++* 97039941b2 net/af_xdp: parse UMEM map info from mempool ++* abdabad636 net/af_xdp: remove unused local statistic ++* e82b0fe097 net/axgbe: disable RRC for yellow carp devices ++* a78a1ed8ba net/bonding: fix failover time of LACP with mode 4 ++* 02a2accb5f net/bonding: fix flow count query ++* 26a6bda9df net/cnxk: add cookies check for multi-segment offload ++* 5a0f64d84b net/cnxk: fix configuring large Rx/Tx queues ++* 3232c95d2c net/cnxk: fix indirect mbuf handling in Tx ++* 8ed5ca4dda net/cnxk: fix mbuf fields in multi-segment Tx ++* 7752f14026 net/cnxk: fix Rx flush on CN10k ++* 4c2f14bc6d net/cnxk: fix xstats reset ++* ee0fa7552a net/dpaa: forbid MTU configuration for shared interface ++* 0f044b6681 net/iavf: fix refine protocol header ++* 0b241667cc net/iavf: fix tainted scalar ++* b125c0e721 net/iavf: fix tainted scalar ++* 92a16af450 net/iavf: fix virtchnl command called in interrupt ++* df44ba7a3c net/ice/base: fix preparing PHY for timesync command ++* e2072ba1b0 net/ice/base: fix resource leak ++* 9749dffe23 net/ice: fix MAC type of E822 and E823 ++* 87e4384d26 net/mlx5: fix condition of LACP miss flow ++* 2ece3b7186 net/mlx5: fix flow workspace double free in Windows ++* 1cfb78d2c4 net/mlx5: fix meter policy priority ++* 48adbc80ba net/mlx5: remove GENEVE options length limitation ++* a74c5001e9 net/ngbe: add proper memory barriers in Rx ++* 79be49dd2d net/ngbe: add special config for YT8531SH-CA PHY ++* 31a28a99fd net/ngbe: add spinlock protection on YT PHY ++* 21f702d556 net/ngbe: fix link status in no LSC mode ++* 659cfce01e net/ngbe: remove redundant codes ++* 44a8635459 net/thunderx: fix DMAC control register update ++* 12011b11a3 net/txgbe: adapt to MNG veto bit setting ++* 5e170dd8b6 net/txgbe: fix blocking system events ++* 166591931b pcapng: modify timestamp calculation ++* df33fb53e4 rcu: fix acked token in debug log ++* 063cddfc74 rcu: use atomic operation on acked token ++* 4ad17a1c8f test/crypto: fix enqueue/dequeue callback case ++* 63bf81a617 test: fix named test macro diff --git a/dpdk/doc/guides/rel_notes/release_2_1.rst b/dpdk/doc/guides/rel_notes/release_2_1.rst index 35e6c88884..d0ad99ebce 100644 --- a/dpdk/doc/guides/rel_notes/release_2_1.rst @@ -21287,7 +24030,7 @@ index 440642ef7c..51621b692f 100644 The next task is to initialize the PQoS library and configure CAT. The diff --git a/dpdk/doc/guides/sample_app_ug/l2_forward_crypto.rst b/dpdk/doc/guides/sample_app_ug/l2_forward_crypto.rst -index 1b4444b7d8..ce49eab96f 100644 +index 1b4444b7d8..7ff304d05c 100644 --- a/dpdk/doc/guides/sample_app_ug/l2_forward_crypto.rst +++ b/dpdk/doc/guides/sample_app_ug/l2_forward_crypto.rst @@ -15,7 +15,7 @@ Overview @@ -21299,6 +24042,15 @@ index 1b4444b7d8..ce49eab96f 100644 The destination port is the adjacent port from the enabled portmask, that is, if the first four ports are enabled (portmask 0xf), ports 0 and 1 forward into each other, and ports 2 and 3 forward into each other. +@@ -30,7 +30,7 @@ Compiling the Application + + To compile the sample application see :doc:`compiling`. + +-The application is located in the ``l2fwd-crypt`` sub-directory. ++The application is located in the ``l2fwd-crypto`` sub-directory. + + Running the Application + ----------------------- @@ -54,37 +54,37 @@ The application requires a number of command line options: where, @@ -21508,6 +24260,20 @@ index 6d7d7c5cc1..9fb44e2c23 100644 Compiling the Application ------------------------- +diff --git a/dpdk/doc/guides/sample_app_ug/l3_forward_power_man.rst b/dpdk/doc/guides/sample_app_ug/l3_forward_power_man.rst +index 2e350c45f1..bb39cac24d 100644 +--- a/dpdk/doc/guides/sample_app_ug/l3_forward_power_man.rst ++++ b/dpdk/doc/guides/sample_app_ug/l3_forward_power_man.rst +@@ -336,6 +336,9 @@ will use automatic PMD power management. + This mode is limited to one queue per core, + and has three available power management schemes: + ++``baseline`` ++ This mode will not enable any power saving features. ++ + ``monitor`` + This will use ``rte_power_monitor()`` function to enter + a power-optimized state (subject to platform support). diff --git a/dpdk/doc/guides/sample_app_ug/pipeline.rst b/dpdk/doc/guides/sample_app_ug/pipeline.rst index 49d50136bc..7c86bf484a 100644 --- a/dpdk/doc/guides/sample_app_ug/pipeline.rst @@ -21593,10 +24359,21 @@ index 7160b6a63a..e0af729e66 100644 Before using this command, please enable responses via the set_query command on the host. diff --git a/dpdk/doc/guides/testpmd_app_ug/run_app.rst b/dpdk/doc/guides/testpmd_app_ug/run_app.rst -index 30edef07ea..ccc1bd6ddb 100644 +index 30edef07ea..720d5717f3 100644 --- a/dpdk/doc/guides/testpmd_app_ug/run_app.rst +++ b/dpdk/doc/guides/testpmd_app_ug/run_app.rst -@@ -621,6 +621,7 @@ as follows: +@@ -441,6 +441,10 @@ The command line options are: + + Set the logical core N to perform bitrate calculation. + ++* ``--latencystats=N`` ++ ++ Set the logical core N to perform latency and jitter calculations. ++ + * ``--print-event `` + + Enable printing the occurrence of the designated event. Using all will +@@ -621,6 +625,7 @@ as follows: - ``dev_configure`` - ``dev_start`` - ``dev_stop`` @@ -21605,7 +24382,7 @@ index 30edef07ea..ccc1bd6ddb 100644 - ``tx_queue_setup`` - ``rx_queue_release`` diff --git a/dpdk/doc/guides/testpmd_app_ug/testpmd_funcs.rst b/dpdk/doc/guides/testpmd_app_ug/testpmd_funcs.rst -index 44228cd7d2..3a522a80a6 100644 +index 44228cd7d2..6902b7ba7c 100644 --- a/dpdk/doc/guides/testpmd_app_ug/testpmd_funcs.rst +++ b/dpdk/doc/guides/testpmd_app_ug/testpmd_funcs.rst @@ -1767,7 +1767,7 @@ Enable or disable a per port Rx offloading on all Rx queues of a port:: @@ -21626,6 +24403,15 @@ index 44228cd7d2..3a522a80a6 100644 scatter, timestamp, security, keep_crc This command should be run when the port is stopped, or else it will fail. +@@ -2207,7 +2207,7 @@ port config - queue ring size + + Configure a rx/tx queue ring size:: + +- testpmd> port (port_id) (rxq|txq) (queue_id) ring_size (value) ++ testpmd> port config (port_id) (rxq|txq) (queue_id) ring_size (value) + + Only take effect after command that (re-)start the port or command that setup specific queue. + @@ -3510,7 +3510,7 @@ Tunnel offload Indicate tunnel offload rule type @@ -21756,6 +24542,99 @@ index 9772d97ef0..5dd6f9ecae 100644 -* Since default DPDK EAL arguments for ``dpdk-procinfo`` are ``-c1, -n4 & --proc-type=secondary``, +* Since default DPDK EAL arguments for ``dpdk-proc-info`` are ``-c1, -n4 & --proc-type=secondary``, It is not expected that the user passes any EAL arguments. +diff --git a/dpdk/doc/guides/tools/testeventdev.rst b/dpdk/doc/guides/tools/testeventdev.rst +index 48efb9ea6e..4f934f67e4 100644 +--- a/dpdk/doc/guides/tools/testeventdev.rst ++++ b/dpdk/doc/guides/tools/testeventdev.rst +@@ -270,7 +270,7 @@ Example command to run order queue test: + + .. code-block:: console + +- sudo /app/dpdk-test-eventdev --vdev=event_sw0 -- \ ++ sudo /app/dpdk-test-eventdev -c 0x1f -s 0x10 --vdev=event_sw0 -- \ + --test=order_queue --plcores 1 --wlcores 2,3 + + +@@ -333,7 +333,7 @@ Example command to run order ``all types queue`` test: + + .. code-block:: console + +- sudo /app/dpdk-test-eventdev --vdev=event_octeontx -- \ ++ sudo /app/dpdk-test-eventdev -c 0x1f -- \ + --test=order_atq --plcores 1 --wlcores 2,3 + + +@@ -435,14 +435,14 @@ Example command to run perf queue test: + + .. code-block:: console + +- sudo /app/dpdk-test-eventdev -c 0xf -s 0x1 --vdev=event_sw0 -- \ ++ sudo /app/dpdk-test-eventdev -c 0xf -s 0x2 --vdev=event_sw0 -- \ + --test=perf_queue --plcores=2 --wlcore=3 --stlist=p --nb_pkts=0 + + Example command to run perf queue test with producer enqueuing a burst of events: + + .. code-block:: console + +- sudo /app/dpdk-test-eventdev -c 0xf -s 0x1 --vdev=event_sw0 -- \ ++ sudo /app/dpdk-test-eventdev -c 0xf -s 0x2 --vdev=event_sw0 -- \ + --test=perf_queue --plcores=2 --wlcore=3 --stlist=p --nb_pkts=0 \ + --prod_enq_burst_sz=32 + +@@ -450,15 +450,15 @@ Example command to run perf queue test with ethernet ports: + + .. code-block:: console + +- sudo build/app/dpdk-test-eventdev --vdev=event_sw0 -- \ ++ sudo build/app/dpdk-test-eventdev -c 0xf -s 0x2 --vdev=event_sw0 -- \ + --test=perf_queue --plcores=2 --wlcore=3 --stlist=p --prod_type_ethdev + + Example command to run perf queue test with event timer adapter: + + .. code-block:: console + +- sudo /app/dpdk-test-eventdev --vdev="event_octeontx" -- \ +- --wlcores 4 --plcores 12 --test perf_queue --stlist=a \ ++ sudo /app/dpdk-test-eventdev -c 0xfff1 \ ++ -- --wlcores 4 --plcores 12 --test perf_queue --stlist=a \ + --prod_type_timerdev --fwd_latency + + PERF_ATQ Test +@@ -543,15 +543,15 @@ Example command to run perf ``all types queue`` test: + + .. code-block:: console + +- sudo /app/dpdk-test-eventdev --vdev=event_octeontx -- \ ++ sudo /app/dpdk-test-eventdev -c 0xf -- \ + --test=perf_atq --plcores=2 --wlcore=3 --stlist=p --nb_pkts=0 + + Example command to run perf ``all types queue`` test with event timer adapter: + + .. code-block:: console + +- sudo /app/dpdk-test-eventdev --vdev="event_octeontx" -- \ +- --wlcores 4 --plcores 12 --test perf_atq --verbose 20 \ ++ sudo /app/dpdk-test-eventdev -c 0xfff1 \ ++ -- --wlcores 4 --plcores 12 --test perf_atq --verbose 20 \ + --stlist=a --prod_type_timerdev --fwd_latency + + +@@ -771,13 +771,13 @@ Example command to run pipeline atq test: + + .. code-block:: console + +- sudo /app/dpdk-test-eventdev -c 0xf -s 0x8 --vdev=event_sw0 -- \ ++ sudo /app/dpdk-test-eventdev -c 0xf -- \ + --test=pipeline_atq --wlcore=1 --prod_type_ethdev --stlist=a + + Example command to run pipeline atq test with vector events: + + .. code-block:: console + +- sudo /app/dpdk-test-eventdev -c 0xf -s 0x8 --vdev=event_sw0 -- \ ++ sudo /app/dpdk-test-eventdev -c 0xf -- \ + --test=pipeline_atq --wlcore=1 --prod_type_ethdev --stlist=a \ + --enable_vector --vector_size 512 diff --git a/dpdk/doc/guides/windows_gsg/build_dpdk.rst b/dpdk/doc/guides/windows_gsg/build_dpdk.rst index 38b3068d7b..29f2b38feb 100644 --- a/dpdk/doc/guides/windows_gsg/build_dpdk.rst @@ -23079,6 +25958,20 @@ index 92decc3e05..21d35292a3 100644 desc_error = check_desc_error(desc->enc_req.error); status |= desc_error << RTE_BBDEV_DATA_ERROR; rte_bbdev_log_debug("DMA response desc %p", desc); +diff --git a/dpdk/drivers/baseband/la12xx/bbdev_la12xx.c b/dpdk/drivers/baseband/la12xx/bbdev_la12xx.c +index 4b05b5d3f2..dfebcd5c81 100644 +--- a/dpdk/drivers/baseband/la12xx/bbdev_la12xx.c ++++ b/dpdk/drivers/baseband/la12xx/bbdev_la12xx.c +@@ -1076,6 +1076,9 @@ la12xx_bbdev_remove(struct rte_vdev_device *vdev) + + PMD_INIT_FUNC_TRACE(); + ++ if (rte_eal_process_type() != RTE_PROC_PRIMARY) ++ return 0; ++ + if (vdev == NULL) + return -EINVAL; + diff --git a/dpdk/drivers/baseband/null/bbdev_null.c b/dpdk/drivers/baseband/null/bbdev_null.c index 753d920e18..08cff582b9 100644 --- a/dpdk/drivers/baseband/null/bbdev_null.c @@ -23202,6 +26095,38 @@ index 21a6bee778..b2aa93e046 100644 }; return 0; } +diff --git a/dpdk/drivers/bus/dpaa/base/qbman/process.c b/dpdk/drivers/bus/dpaa/base/qbman/process.c +index 9bc92681cd..72fa639987 100644 +--- a/dpdk/drivers/bus/dpaa/base/qbman/process.c ++++ b/dpdk/drivers/bus/dpaa/base/qbman/process.c +@@ -1,7 +1,7 @@ + /* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) + * + * Copyright 2011-2016 Freescale Semiconductor Inc. +- * Copyright 2017,2020 NXP ++ * Copyright 2017,2020,2022,2024 NXP + * + */ + #include +@@ -27,15 +27,16 @@ static int check_fd(void) + { + int ret; + +- if (fd >= 0) +- return 0; + ret = pthread_mutex_lock(&fd_init_lock); + assert(!ret); ++ + /* check again with the lock held */ + if (fd < 0) + fd = open(PROCESS_PATH, O_RDWR); ++ + ret = pthread_mutex_unlock(&fd_init_lock); + assert(!ret); ++ + return (fd >= 0) ? 0 : -ENODEV; + } + diff --git a/dpdk/drivers/bus/dpaa/base/qbman/qman.c b/dpdk/drivers/bus/dpaa/base/qbman/qman.c index 447c091770..aa8da96627 100644 --- a/dpdk/drivers/bus/dpaa/base/qbman/qman.c @@ -23233,7 +26158,7 @@ index 447c091770..aa8da96627 100644 if (fq->cb.fqs) fq->cb.fqs(p, fq, &swapped_msg); diff --git a/dpdk/drivers/bus/dpaa/dpaa_bus.c b/dpdk/drivers/bus/dpaa/dpaa_bus.c -index 737ac8d8c5..5546a9cb8d 100644 +index 737ac8d8c5..81ac971fbe 100644 --- a/dpdk/drivers/bus/dpaa/dpaa_bus.c +++ b/dpdk/drivers/bus/dpaa/dpaa_bus.c @@ -70,7 +70,7 @@ compare_dpaa_devices(struct rte_dpaa_device *dev1, @@ -23245,6 +26170,50 @@ index 737ac8d8c5..5546a9cb8d 100644 if (dev1->device_type > dev2->device_type) comp = 1; else if (dev1->device_type < dev2->device_type) +@@ -178,6 +178,7 @@ dpaa_create_device_list(void) + if (dev->intr_handle == NULL) { + DPAA_BUS_LOG(ERR, "Failed to allocate intr handle"); + ret = -ENOMEM; ++ free(dev); + goto cleanup; + } + +@@ -211,7 +212,7 @@ dpaa_create_device_list(void) + + if (dpaa_sec_available()) { + DPAA_BUS_LOG(INFO, "DPAA SEC devices are not available"); +- return 0; ++ goto qdma_dpaa; + } + + /* Creating SEC Devices */ +@@ -229,6 +230,7 @@ dpaa_create_device_list(void) + if (dev->intr_handle == NULL) { + DPAA_BUS_LOG(ERR, "Failed to allocate intr handle"); + ret = -ENOMEM; ++ free(dev); + goto cleanup; + } + +@@ -250,6 +252,7 @@ dpaa_create_device_list(void) + + rte_dpaa_bus.device_count += i; + ++qdma_dpaa: + /* Creating QDMA Device */ + for (i = 0; i < RTE_DPAA_QDMA_DEVICES; i++) { + dev = calloc(1, sizeof(struct rte_dpaa_device)); +@@ -782,6 +785,10 @@ dpaa_bus_dev_iterate(const void *start, const char *str, + + /* Now that name=device_name format is available, split */ + dup = strdup(str); ++ if (dup == NULL) { ++ DPAA_BUS_DEBUG("Dup string (%s) failed!\n", str); ++ return NULL; ++ } + dev_name = dup + strlen("name="); + + if (start != NULL) { diff --git a/dpdk/drivers/bus/dpaa/include/fsl_qman.h b/dpdk/drivers/bus/dpaa/include/fsl_qman.h index 7ef2f3b2e3..9b63e559bc 100644 --- a/dpdk/drivers/bus/dpaa/include/fsl_qman.h @@ -23303,7 +26272,7 @@ index a922988607..48d6b5693f 100644 */ uint32_t index; diff --git a/dpdk/drivers/bus/fslmc/fslmc_bus.c b/dpdk/drivers/bus/fslmc/fslmc_bus.c -index a0ef24cdc8..53fd75539e 100644 +index a0ef24cdc8..2993af2563 100644 --- a/dpdk/drivers/bus/fslmc/fslmc_bus.c +++ b/dpdk/drivers/bus/fslmc/fslmc_bus.c @@ -539,7 +539,7 @@ rte_fslmc_driver_unregister(struct rte_dpaa2_driver *driver) @@ -23315,6 +26284,17 @@ index a0ef24cdc8..53fd75539e 100644 * is called from. */ if (rte_eal_iova_mode() == RTE_IOVA_PA) +@@ -638,6 +638,10 @@ fslmc_bus_dev_iterate(const void *start, const char *str, + + /* Now that name=device_name format is available, split */ + dup = strdup(str); ++ if (dup == NULL) { ++ DPAA2_BUS_DEBUG("Dup string (%s) failed!\n", str); ++ return NULL; ++ } + dev_name = dup + strlen("name="); + + if (start != NULL) { diff --git a/dpdk/drivers/bus/fslmc/fslmc_vfio.c b/dpdk/drivers/bus/fslmc/fslmc_vfio.c index b4704eeae4..abe1cab2ee 100644 --- a/dpdk/drivers/bus/fslmc/fslmc_vfio.c @@ -23491,8 +26471,31 @@ index cbc6809284..f82b93af65 100644 if (rawdev->dev_ops && rawdev->dev_ops->firmware_load && rawdev->dev_ops->firmware_load(rawdev, +diff --git a/dpdk/drivers/bus/pci/linux/pci_uio.c b/dpdk/drivers/bus/pci/linux/pci_uio.c +index d52125e49b..81a1ed6fa0 100644 +--- a/dpdk/drivers/bus/pci/linux/pci_uio.c ++++ b/dpdk/drivers/bus/pci/linux/pci_uio.c +@@ -245,7 +245,7 @@ pci_uio_alloc_resource(struct rte_pci_device *dev, + } + snprintf(devname, sizeof(devname), "/dev/uio%u", uio_num); + +- /* save fd if in primary process */ ++ /* save fd */ + fd = open(devname, O_RDWR); + if (fd < 0) { + RTE_LOG(ERR, EAL, "Cannot open %s: %s\n", +@@ -283,6 +283,9 @@ pci_uio_alloc_resource(struct rte_pci_device *dev, + } + } + ++ if (rte_eal_process_type() != RTE_PROC_PRIMARY) ++ return 0; ++ + /* allocate the mapping details for secondary processes*/ + *uio_res = rte_zmalloc("UIO_RES", sizeof(**uio_res), 0); + if (*uio_res == NULL) { diff --git a/dpdk/drivers/bus/pci/linux/pci_vfio.c b/dpdk/drivers/bus/pci/linux/pci_vfio.c -index 1a5e7c2d2a..822aa41f9e 100644 +index 1a5e7c2d2a..847b95fe8c 100644 --- a/dpdk/drivers/bus/pci/linux/pci_vfio.c +++ b/dpdk/drivers/bus/pci/linux/pci_vfio.c @@ -2,6 +2,7 @@ @@ -23503,6 +26506,114 @@ index 1a5e7c2d2a..822aa41f9e 100644 #include #include #include +@@ -52,7 +53,7 @@ pci_vfio_read_config(const struct rte_intr_handle *intr_handle, + if (vfio_dev_fd < 0) + return -1; + +- return pread64(vfio_dev_fd, buf, len, ++ return pread(vfio_dev_fd, buf, len, + VFIO_GET_REGION_ADDR(VFIO_PCI_CONFIG_REGION_INDEX) + offs); + } + +@@ -65,7 +66,7 @@ pci_vfio_write_config(const struct rte_intr_handle *intr_handle, + if (vfio_dev_fd < 0) + return -1; + +- return pwrite64(vfio_dev_fd, buf, len, ++ return pwrite(vfio_dev_fd, buf, len, + VFIO_GET_REGION_ADDR(VFIO_PCI_CONFIG_REGION_INDEX) + offs); + } + +@@ -79,7 +80,7 @@ pci_vfio_get_msix_bar(int fd, struct pci_msix_table *msix_table) + uint8_t cap_id, cap_offset; + + /* read PCI capability pointer from config space */ +- ret = pread64(fd, ®, sizeof(reg), ++ ret = pread(fd, ®, sizeof(reg), + VFIO_GET_REGION_ADDR(VFIO_PCI_CONFIG_REGION_INDEX) + + PCI_CAPABILITY_LIST); + if (ret != sizeof(reg)) { +@@ -94,7 +95,7 @@ pci_vfio_get_msix_bar(int fd, struct pci_msix_table *msix_table) + while (cap_offset) { + + /* read PCI capability ID */ +- ret = pread64(fd, ®, sizeof(reg), ++ ret = pread(fd, ®, sizeof(reg), + VFIO_GET_REGION_ADDR(VFIO_PCI_CONFIG_REGION_INDEX) + + cap_offset); + if (ret != sizeof(reg)) { +@@ -108,7 +109,7 @@ pci_vfio_get_msix_bar(int fd, struct pci_msix_table *msix_table) + + /* if we haven't reached MSI-X, check next capability */ + if (cap_id != PCI_CAP_ID_MSIX) { +- ret = pread64(fd, ®, sizeof(reg), ++ ret = pread(fd, ®, sizeof(reg), + VFIO_GET_REGION_ADDR(VFIO_PCI_CONFIG_REGION_INDEX) + + cap_offset); + if (ret != sizeof(reg)) { +@@ -125,7 +126,7 @@ pci_vfio_get_msix_bar(int fd, struct pci_msix_table *msix_table) + /* else, read table offset */ + else { + /* table offset resides in the next 4 bytes */ +- ret = pread64(fd, ®, sizeof(reg), ++ ret = pread(fd, ®, sizeof(reg), + VFIO_GET_REGION_ADDR(VFIO_PCI_CONFIG_REGION_INDEX) + + cap_offset + 4); + if (ret != sizeof(reg)) { +@@ -134,7 +135,7 @@ pci_vfio_get_msix_bar(int fd, struct pci_msix_table *msix_table) + return -1; + } + +- ret = pread64(fd, &flags, sizeof(flags), ++ ret = pread(fd, &flags, sizeof(flags), + VFIO_GET_REGION_ADDR(VFIO_PCI_CONFIG_REGION_INDEX) + + cap_offset + 2); + if (ret != sizeof(flags)) { +@@ -161,7 +162,7 @@ pci_vfio_enable_bus_memory(int dev_fd) + uint16_t cmd; + int ret; + +- ret = pread64(dev_fd, &cmd, sizeof(cmd), ++ ret = pread(dev_fd, &cmd, sizeof(cmd), + VFIO_GET_REGION_ADDR(VFIO_PCI_CONFIG_REGION_INDEX) + + PCI_COMMAND); + +@@ -174,7 +175,7 @@ pci_vfio_enable_bus_memory(int dev_fd) + return 0; + + cmd |= PCI_COMMAND_MEMORY; +- ret = pwrite64(dev_fd, &cmd, sizeof(cmd), ++ ret = pwrite(dev_fd, &cmd, sizeof(cmd), + VFIO_GET_REGION_ADDR(VFIO_PCI_CONFIG_REGION_INDEX) + + PCI_COMMAND); + +@@ -193,7 +194,7 @@ pci_vfio_set_bus_master(int dev_fd, bool op) + uint16_t reg; + int ret; + +- ret = pread64(dev_fd, ®, sizeof(reg), ++ ret = pread(dev_fd, ®, sizeof(reg), + VFIO_GET_REGION_ADDR(VFIO_PCI_CONFIG_REGION_INDEX) + + PCI_COMMAND); + if (ret != sizeof(reg)) { +@@ -207,7 +208,7 @@ pci_vfio_set_bus_master(int dev_fd, bool op) + else + reg &= ~(PCI_COMMAND_MASTER); + +- ret = pwrite64(dev_fd, ®, sizeof(reg), ++ ret = pwrite(dev_fd, ®, sizeof(reg), + VFIO_GET_REGION_ADDR(VFIO_PCI_CONFIG_REGION_INDEX) + + PCI_COMMAND); + +@@ -463,7 +464,7 @@ pci_vfio_is_ioport_bar(int vfio_dev_fd, int bar_index) + uint32_t ioport_bar; + int ret; + +- ret = pread64(vfio_dev_fd, &ioport_bar, sizeof(ioport_bar), ++ ret = pread(vfio_dev_fd, &ioport_bar, sizeof(ioport_bar), + VFIO_GET_REGION_ADDR(VFIO_PCI_CONFIG_REGION_INDEX) + + PCI_BASE_ADDRESS_0 + bar_index*4); + if (ret != sizeof(ioport_bar)) { @@ -815,7 +816,7 @@ pci_vfio_map_resource_primary(struct rte_pci_device *dev) continue; } @@ -23512,6 +26623,24 @@ index 1a5e7c2d2a..822aa41f9e 100644 if ((reg->flags & VFIO_REGION_INFO_FLAG_MMAP) == 0) { free(reg); continue; +@@ -1132,7 +1133,7 @@ pci_vfio_ioport_read(struct rte_pci_ioport *p, + if (vfio_dev_fd < 0) + return; + +- if (pread64(vfio_dev_fd, data, ++ if (pread(vfio_dev_fd, data, + len, p->base + offset) <= 0) + RTE_LOG(ERR, EAL, + "Can't read from PCI bar (%" PRIu64 ") : offset (%x)\n", +@@ -1149,7 +1150,7 @@ pci_vfio_ioport_write(struct rte_pci_ioport *p, + if (vfio_dev_fd < 0) + return; + +- if (pwrite64(vfio_dev_fd, data, ++ if (pwrite(vfio_dev_fd, data, + len, p->base + offset) <= 0) + RTE_LOG(ERR, EAL, + "Can't write to PCI bar (%" PRIu64 ") : offset (%x)\n", diff --git a/dpdk/drivers/bus/pci/pci_common.c b/dpdk/drivers/bus/pci/pci_common.c index 4a3a87f24f..6ff136f083 100644 --- a/dpdk/drivers/bus/pci/pci_common.c @@ -23542,6 +26671,103 @@ index 4a3a87f24f..6ff136f083 100644 dr->driver.name, dev->id.vendor_id, dev->id.device_id, loc->domain, loc->bus, loc->devid, loc->function, dev->device.numa_node); +diff --git a/dpdk/drivers/bus/pci/pci_common_uio.c b/dpdk/drivers/bus/pci/pci_common_uio.c +index 76c661f054..a06378b239 100644 +--- a/dpdk/drivers/bus/pci/pci_common_uio.c ++++ b/dpdk/drivers/bus/pci/pci_common_uio.c +@@ -26,7 +26,7 @@ EAL_REGISTER_TAILQ(rte_uio_tailq) + static int + pci_uio_map_secondary(struct rte_pci_device *dev) + { +- int fd, i, j; ++ int fd, i = 0, j, res_idx; + struct mapped_pci_resource *uio_res; + struct mapped_pci_res_list *uio_res_list = + RTE_TAILQ_CAST(rte_uio_tailq.head, mapped_pci_res_list); +@@ -37,7 +37,15 @@ pci_uio_map_secondary(struct rte_pci_device *dev) + if (rte_pci_addr_cmp(&uio_res->pci_addr, &dev->addr)) + continue; + +- for (i = 0; i != uio_res->nb_maps; i++) { ++ /* Map all BARs */ ++ for (res_idx = 0; res_idx != PCI_MAX_RESOURCE; res_idx++) { ++ /* skip empty BAR */ ++ if (dev->mem_resource[res_idx].phys_addr == 0) ++ continue; ++ ++ if (i >= uio_res->nb_maps) ++ return -1; ++ + /* + * open devname, to mmap it + */ +@@ -71,7 +79,9 @@ pci_uio_map_secondary(struct rte_pci_device *dev) + } + return -1; + } +- dev->mem_resource[i].addr = mapaddr; ++ dev->mem_resource[res_idx].addr = mapaddr; ++ ++ i++; + } + return 0; + } +@@ -96,15 +106,15 @@ pci_uio_map_resource(struct rte_pci_device *dev) + if (rte_intr_dev_fd_set(dev->intr_handle, -1)) + return -1; + +- /* secondary processes - use already recorded details */ +- if (rte_eal_process_type() != RTE_PROC_PRIMARY) +- return pci_uio_map_secondary(dev); +- + /* allocate uio resource */ + ret = pci_uio_alloc_resource(dev, &uio_res); + if (ret) + return ret; + ++ /* secondary processes - use already recorded details */ ++ if (rte_eal_process_type() != RTE_PROC_PRIMARY) ++ return pci_uio_map_secondary(dev); ++ + /* Map all BARs */ + for (i = 0; i != PCI_MAX_RESOURCE; i++) { + /* skip empty BAR */ +@@ -220,6 +230,18 @@ pci_uio_unmap_resource(struct rte_pci_device *dev) + if (uio_res == NULL) + return; + ++ /* close fd */ ++ if (rte_intr_fd_get(dev->intr_handle) >= 0) ++ close(rte_intr_fd_get(dev->intr_handle)); ++ uio_cfg_fd = rte_intr_dev_fd_get(dev->intr_handle); ++ if (uio_cfg_fd >= 0) { ++ close(uio_cfg_fd); ++ rte_intr_dev_fd_set(dev->intr_handle, -1); ++ } ++ ++ rte_intr_fd_set(dev->intr_handle, -1); ++ rte_intr_type_set(dev->intr_handle, RTE_INTR_HANDLE_UNKNOWN); ++ + /* secondary processes - just free maps */ + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return pci_uio_unmap(uio_res); +@@ -231,16 +253,4 @@ pci_uio_unmap_resource(struct rte_pci_device *dev) + + /* free uio resource */ + rte_free(uio_res); +- +- /* close fd if in primary process */ +- if (rte_intr_fd_get(dev->intr_handle) >= 0) +- close(rte_intr_fd_get(dev->intr_handle)); +- uio_cfg_fd = rte_intr_dev_fd_get(dev->intr_handle); +- if (uio_cfg_fd >= 0) { +- close(uio_cfg_fd); +- rte_intr_dev_fd_set(dev->intr_handle, -1); +- } +- +- rte_intr_fd_set(dev->intr_handle, -1); +- rte_intr_type_set(dev->intr_handle, RTE_INTR_HANDLE_UNKNOWN); + } diff --git a/dpdk/drivers/bus/vdev/rte_bus_vdev.h b/dpdk/drivers/bus/vdev/rte_bus_vdev.h index 2856799953..5af6be009f 100644 --- a/dpdk/drivers/bus/vdev/rte_bus_vdev.h @@ -23555,6 +26781,37 @@ index 2856799953..5af6be009f 100644 * * @param name * The pointer to a driver name to be uninitialized. +diff --git a/dpdk/drivers/bus/vdev/vdev.c b/dpdk/drivers/bus/vdev/vdev.c +index a8d8b2327e..c040a6d09e 100644 +--- a/dpdk/drivers/bus/vdev/vdev.c ++++ b/dpdk/drivers/bus/vdev/vdev.c +@@ -249,6 +249,10 @@ alloc_devargs(const char *name, const char *args) + devargs->data = strdup(args); + else + devargs->data = strdup(""); ++ if (devargs->data == NULL) { ++ free(devargs); ++ return NULL; ++ } + devargs->args = devargs->data; + + ret = strlcpy(devargs->name, name, sizeof(devargs->name)); +@@ -285,7 +289,6 @@ insert_vdev(const char *name, const char *args, + + dev->device.bus = &rte_vdev_bus; + dev->device.numa_node = SOCKET_ID_ANY; +- dev->device.name = devargs->name; + + if (find_vdev(name)) { + /* +@@ -300,6 +303,7 @@ insert_vdev(const char *name, const char *args, + if (init) + rte_devargs_insert(&devargs); + dev->device.devargs = devargs; ++ dev->device.name = devargs->name; + TAILQ_INSERT_TAIL(&vdev_device_list, dev, next); + + if (p_dev) diff --git a/dpdk/drivers/bus/vmbus/private.h b/dpdk/drivers/bus/vmbus/private.h index 1bca147e12..658303bc27 100644 --- a/dpdk/drivers/bus/vmbus/private.h @@ -23821,7 +27078,7 @@ index f4e9b341af..f4954d2a28 100644 if (rc < 0) { plt_err("Failed to set affinity mask"); diff --git a/dpdk/drivers/common/cnxk/roc_cpt.c b/dpdk/drivers/common/cnxk/roc_cpt.c -index 8f8e6d3821..6179df2f1f 100644 +index 8f8e6d3821..17b5d1e383 100644 --- a/dpdk/drivers/common/cnxk/roc_cpt.c +++ b/dpdk/drivers/common/cnxk/roc_cpt.c @@ -385,6 +385,9 @@ cpt_lfs_alloc(struct dev *dev, uint8_t eng_grpmsk, uint8_t blkaddr, @@ -23844,7 +27101,24 @@ index 8f8e6d3821..6179df2f1f 100644 /* Allocate memory for instruction queue for CPT LF. */ iq_mem = plt_zmalloc(cpt_lf_iq_mem_calc(lf->nb_desc), ROC_ALIGN); if (iq_mem == NULL) -@@ -812,9 +812,9 @@ roc_cpt_eng_grp_add(struct roc_cpt *roc_cpt, enum cpt_eng_type eng_type) +@@ -641,7 +641,7 @@ roc_cpt_dev_init(struct roc_cpt *roc_cpt) + rc = dev_init(dev, pci_dev); + if (rc) { + plt_err("Failed to init roc device"); +- goto fail; ++ return rc; + } + + cpt->pci_dev = pci_dev; +@@ -673,6 +673,7 @@ roc_cpt_dev_init(struct roc_cpt *roc_cpt) + return 0; + + fail: ++ dev_fini(dev, pci_dev); + return rc; + } + +@@ -812,9 +813,9 @@ roc_cpt_eng_grp_add(struct roc_cpt *roc_cpt, enum cpt_eng_type eng_type) void roc_cpt_iq_disable(struct roc_cpt_lf *lf) { @@ -23870,7 +27144,7 @@ index 847d969268..be6ddb56aa 100644 } diff --git a/dpdk/drivers/common/cnxk/roc_dev.c b/dpdk/drivers/common/cnxk/roc_dev.c -index 926a916e44..0a9c722db3 100644 +index 926a916e44..0fbfa8db7c 100644 --- a/dpdk/drivers/common/cnxk/roc_dev.c +++ b/dpdk/drivers/common/cnxk/roc_dev.c @@ -57,7 +57,7 @@ pf_af_sync_msg(struct dev *dev, struct mbox_msghdr **rsp) @@ -23894,7 +27168,19 @@ index 926a916e44..0a9c722db3 100644 mbox_rsp_init(msg->id, rsp); /* Copy message from AF<->PF mbox to PF<->VF mbox */ -@@ -236,6 +241,12 @@ vf_pf_process_msgs(struct dev *dev, uint16_t vf) +@@ -185,9 +190,8 @@ af_pf_wait_msg(struct dev *dev, uint16_t vf, int num_msg) + vf_msg = mbox_alloc_msg(&dev->mbox_vfpf_up, vf, sz); + if (vf_msg) { + mbox_req_init(MBOX_MSG_CGX_LINK_EVENT, vf_msg); +- memcpy((uint8_t *)vf_msg + +- sizeof(struct mbox_msghdr), &linfo, +- sizeof(struct cgx_link_user_info)); ++ mbox_memcpy((uint8_t *)vf_msg + sizeof(struct mbox_msghdr), &linfo, ++ sizeof(struct cgx_link_user_info)); + + vf_msg->rc = msg->rc; + vf_msg->pcifunc = msg->pcifunc; +@@ -236,6 +240,12 @@ vf_pf_process_msgs(struct dev *dev, uint16_t vf) BIT_ULL(vf % max_bits); rsp = (struct ready_msg_rsp *)mbox_alloc_msg( mbox, vf, sizeof(*rsp)); @@ -23907,7 +27193,16 @@ index 926a916e44..0a9c722db3 100644 mbox_rsp_init(msg->id, rsp); /* PF/VF function ID */ -@@ -940,6 +951,7 @@ dev_vf_hwcap_update(struct plt_pci_device *pci_dev, struct dev *dev) +@@ -438,6 +448,8 @@ pf_vf_mbox_send_up_msg(struct dev *dev, void *rec_msg) + size_t size; + + size = PLT_ALIGN(mbox_id2size(msg->hdr.id), MBOX_MSG_ALIGN); ++ if (size < sizeof(struct mbox_msghdr)) ++ return; + /* Send UP message to all VF's */ + for (vf = 0; vf < vf_mbox->ndevs; vf++) { + /* VF active */ +@@ -940,6 +952,7 @@ dev_vf_hwcap_update(struct plt_pci_device *pci_dev, struct dev *dev) case PCI_DEVID_CNXK_RVU_AF_VF: case PCI_DEVID_CNXK_RVU_VF: case PCI_DEVID_CNXK_RVU_SDP_VF: @@ -23915,7 +27210,7 @@ index 926a916e44..0a9c722db3 100644 dev->hwcap |= DEV_HWCAP_F_VF; break; } -@@ -988,6 +1000,9 @@ dev_setup_shared_lmt_region(struct mbox *mbox, bool valid_iova, uint64_t iova) +@@ -988,6 +1001,9 @@ dev_setup_shared_lmt_region(struct mbox *mbox, bool valid_iova, uint64_t iova) struct lmtst_tbl_setup_req *req; req = mbox_alloc_msg_lmtst_tbl_setup(mbox); @@ -23925,7 +27220,7 @@ index 926a916e44..0a9c722db3 100644 /* This pcifunc is defined with primary pcifunc whose LMT address * will be shared. If call contains valid IOVA, following pcifunc * field is of no use. -@@ -1061,6 +1076,11 @@ dev_lmt_setup(struct dev *dev) +@@ -1061,6 +1077,11 @@ dev_lmt_setup(struct dev *dev) */ if (!dev->disable_shared_lmt) { idev = idev_get_cfg(); @@ -24016,7 +27311,7 @@ index 7a24297d72..010b121176 100644 vec : (uint32_t)plt_intr_nb_efd_get(intr_handle); plt_intr_nb_efd_set(intr_handle, nb_efd); diff --git a/dpdk/drivers/common/cnxk/roc_mbox.h b/dpdk/drivers/common/cnxk/roc_mbox.h -index b63fe108c9..564bf29bc2 100644 +index b63fe108c9..16e39ac6cf 100644 --- a/dpdk/drivers/common/cnxk/roc_mbox.h +++ b/dpdk/drivers/common/cnxk/roc_mbox.h @@ -114,7 +114,7 @@ struct mbox_msghdr { @@ -24057,6 +27352,25 @@ index b63fe108c9..564bf29bc2 100644 #define CGX_TX_STATS_COUNT 18 uint64_t __io rx_stats[CGX_RX_STATS_COUNT]; uint64_t __io tx_stats[CGX_TX_STATS_COUNT]; +@@ -820,12 +824,12 @@ struct nix_cn10k_aq_enq_req { + struct nix_cn10k_aq_enq_rsp { + struct mbox_msghdr hdr; + union { +- struct nix_cn10k_rq_ctx_s rq; +- struct nix_cn10k_sq_ctx_s sq; +- struct nix_cq_ctx_s cq; +- struct nix_rsse_s rss; +- struct nix_rx_mce_s mce; +- struct nix_band_prof_s prof; ++ __io struct nix_cn10k_rq_ctx_s rq; ++ __io struct nix_cn10k_sq_ctx_s sq; ++ __io struct nix_cq_ctx_s cq; ++ __io struct nix_rsse_s rss; ++ __io struct nix_rx_mce_s mce; ++ __io struct nix_band_prof_s prof; + }; + }; + @@ -1240,6 +1244,33 @@ struct ssow_lf_free_req { uint16_t __io hws; }; @@ -24091,11 +27405,33 @@ index b63fe108c9..564bf29bc2 100644 struct sso_hw_setconfig { struct mbox_msghdr hdr; uint32_t __io npa_aura_id; +diff --git a/dpdk/drivers/common/cnxk/roc_nix.c b/dpdk/drivers/common/cnxk/roc_nix.c +index 151d8c3426..74ecb67901 100644 +--- a/dpdk/drivers/common/cnxk/roc_nix.c ++++ b/dpdk/drivers/common/cnxk/roc_nix.c +@@ -413,7 +413,7 @@ roc_nix_dev_init(struct roc_nix *roc_nix) + sdp_lbk_id_update(pci_dev, nix); + nix->pci_dev = pci_dev; + nix->reta_sz = reta_sz; +- nix->mtu = ROC_NIX_DEFAULT_HW_FRS; ++ nix->mtu = roc_nix_max_pkt_len(roc_nix); + + /* Always start with full FC for LBK */ + if (nix->lbk_link) { diff --git a/dpdk/drivers/common/cnxk/roc_nix.h b/dpdk/drivers/common/cnxk/roc_nix.h -index 69a5e8e7b4..986aac9e57 100644 +index 69a5e8e7b4..89b12c1f94 100644 --- a/dpdk/drivers/common/cnxk/roc_nix.h +++ b/dpdk/drivers/common/cnxk/roc_nix.h -@@ -808,6 +808,7 @@ int __roc_api roc_nix_ptp_sync_time_adjust(struct roc_nix *roc_nix, +@@ -207,8 +207,6 @@ struct roc_nix_eeprom_info { + #define ROC_NIX_RSS_KEY_LEN 48 /* 352 Bits */ + #define ROC_NIX_RSS_MCAM_IDX_DEFAULT (-1) + +-#define ROC_NIX_DEFAULT_HW_FRS 1514 +- + #define ROC_NIX_VWQE_MAX_SIZE_LOG2 11 + #define ROC_NIX_VWQE_MIN_SIZE_LOG2 2 + +@@ -808,6 +806,7 @@ int __roc_api roc_nix_ptp_sync_time_adjust(struct roc_nix *roc_nix, int __roc_api roc_nix_ptp_info_cb_register(struct roc_nix *roc_nix, ptp_info_update_t ptp_update); void __roc_api roc_nix_ptp_info_cb_unregister(struct roc_nix *roc_nix); @@ -24579,6 +27915,28 @@ index c8c8401d81..e79a2d63e2 100644 rc = mbox_process(mbox); if (rc) +diff --git a/dpdk/drivers/common/cnxk/roc_nix_rss.c b/dpdk/drivers/common/cnxk/roc_nix_rss.c +index 7de69aabeb..5182c5a9cb 100644 +--- a/dpdk/drivers/common/cnxk/roc_nix_rss.c ++++ b/dpdk/drivers/common/cnxk/roc_nix_rss.c +@@ -182,7 +182,7 @@ roc_nix_rss_reta_set(struct roc_nix *roc_nix, uint8_t group, + if (rc) + return rc; + +- memcpy(&nix->reta[group], reta, ROC_NIX_RSS_RETA_MAX); ++ memcpy(&nix->reta[group], reta, sizeof(uint16_t) * ROC_NIX_RSS_RETA_MAX); + return 0; + } + +@@ -195,7 +195,7 @@ roc_nix_rss_reta_get(struct roc_nix *roc_nix, uint8_t group, + if (group >= ROC_NIX_RSS_GRPS) + return NIX_ERR_PARAM; + +- memcpy(reta, &nix->reta[group], ROC_NIX_RSS_RETA_MAX); ++ memcpy(reta, &nix->reta[group], sizeof(uint16_t) * ROC_NIX_RSS_RETA_MAX); + return 0; + } + diff --git a/dpdk/drivers/common/cnxk/roc_nix_stats.c b/dpdk/drivers/common/cnxk/roc_nix_stats.c index c50c8fa629..756111fb1c 100644 --- a/dpdk/drivers/common/cnxk/roc_nix_stats.c @@ -25776,10 +29134,34 @@ index 6bb915054a..e0848f0940 100644 * * Return: size of descriptor written in words or negative number on error diff --git a/dpdk/drivers/common/dpaax/caamflib/desc/pdcp.h b/dpdk/drivers/common/dpaax/caamflib/desc/pdcp.h -index 8e8daf5ba8..2c9c631cfd 100644 +index 8e8daf5ba8..73af1141b6 100644 --- a/dpdk/drivers/common/dpaax/caamflib/desc/pdcp.h +++ b/dpdk/drivers/common/dpaax/caamflib/desc/pdcp.h -@@ -3795,7 +3795,7 @@ cnstr_shdsc_pdcp_short_mac(uint32_t *descbuf, +@@ -1303,6 +1303,11 @@ pdcp_insert_uplane_aes_aes_op(struct program *p, + SEQFIFOLOAD(p, MSG1, 0, VLF | LAST1 | FLUSH1); + MOVEB(p, CONTEXT1, 0, MATH3, 0, 4, WAITCOMP | IMMED); + ++ /* conditional jump with calm added to ensure that the ++ * previous processing has been completed ++ */ ++ JUMP(p, 1, LOCAL_JUMP, ALL_TRUE, CALM); ++ + LOAD(p, CLRW_RESET_CLS1_CHA | + CLRW_CLR_C1KEY | + CLRW_CLR_C1CTX | +@@ -1350,6 +1355,11 @@ pdcp_insert_uplane_aes_aes_op(struct program *p, + + MOVEB(p, OFIFO, 0, MATH3, 0, 4, IMMED); + ++ /* conditional jump with calm added to ensure that the ++ * previous processing has been completed ++ */ ++ JUMP(p, 1, LOCAL_JUMP, ALL_TRUE, CALM); ++ + LOAD(p, CLRW_RESET_CLS1_CHA | + CLRW_CLR_C1KEY | + CLRW_CLR_C1CTX | +@@ -3795,7 +3805,7 @@ cnstr_shdsc_pdcp_short_mac(uint32_t *descbuf, return -ENOTSUP; } iv[0] = 0xFFFFFFFF; @@ -25789,9 +29171,16 @@ index 8e8daf5ba8..2c9c631cfd 100644 KEY(p, KEY2, authdata->key_enc_flags, authdata->key, diff --git a/dpdk/drivers/common/dpaax/caamflib/desc/sdap.h b/dpdk/drivers/common/dpaax/caamflib/desc/sdap.h -index b2497a5424..07f55b5b40 100644 +index b2497a5424..084392d7cd 100644 --- a/dpdk/drivers/common/dpaax/caamflib/desc/sdap.h +++ b/dpdk/drivers/common/dpaax/caamflib/desc/sdap.h +@@ -1,5 +1,5 @@ + /* SPDX-License-Identifier: BSD-3-Clause +- * Copyright 2020-2021 NXP ++ * Copyright 2020-2023 NXP + */ + + #ifndef __DESC_SDAP_H__ @@ -492,10 +492,10 @@ pdcp_sdap_insert_snoop_op(struct program *p, bool swap __maybe_unused, /* Set the variable size of data the register will write */ @@ -25805,7 +29194,29 @@ index b2497a5424..07f55b5b40 100644 MATHI(p, SEQINSZ, SUB, PDCP_MAC_I_LEN, VSEQOUTSZ, 4, IMMED2); /* Do not take the ICV in the out-snooping configuration */ MATHI(p, SEQINSZ, SUB, PDCP_MAC_I_LEN, VSEQINSZ, 4, IMMED2); -@@ -803,7 +803,7 @@ static inline int pdcp_sdap_insert_no_snoop_op( +@@ -704,6 +704,10 @@ static inline int pdcp_sdap_insert_no_snoop_op( + /* Save the ICV generated */ + MOVEB(p, CONTEXT1, 0, MATH3, 0, 4, WAITCOMP | IMMED); + ++ /* conditional jump with calm added to ensure that the ++ * previous processing has been completed ++ */ ++ JUMP(p, 1, LOCAL_JUMP, ALL_TRUE, CALM); + /* The CHA will be reused so we need to clear it */ + LOAD(p, CLRW_RESET_CLS1_CHA | + CLRW_CLR_C1KEY | +@@ -794,6 +798,10 @@ static inline int pdcp_sdap_insert_no_snoop_op( + /* Save the ICV which is stalling in output FIFO to MATH3 */ + MOVEB(p, OFIFO, 0, MATH3, 0, 4, IMMED); + ++ /* conditional jump with calm added to ensure that the ++ * previous processing has been completed ++ */ ++ JUMP(p, 1, LOCAL_JUMP, ALL_TRUE, CALM); + /* Reset class 1 CHA */ + LOAD(p, CLRW_RESET_CLS1_CHA | + CLRW_CLR_C1KEY | +@@ -803,7 +811,7 @@ static inline int pdcp_sdap_insert_no_snoop_op( CLRW_CLR_C1MODE, CLRW, 0, 4, IMMED); @@ -25815,11 +29226,40 @@ index b2497a5424..07f55b5b40 100644 authdata->keylen, INLINE_KEY(authdata)); diff --git a/dpdk/drivers/common/dpaax/dpaax_iova_table.c b/dpdk/drivers/common/dpaax/dpaax_iova_table.c -index 3d661102cc..9daac4bc03 100644 +index 3d661102cc..860e702333 100644 --- a/dpdk/drivers/common/dpaax/dpaax_iova_table.c +++ b/dpdk/drivers/common/dpaax/dpaax_iova_table.c -@@ -261,7 +261,7 @@ dpaax_iova_table_depopulate(void) - rte_free(dpaax_iova_table_p->entries); +@@ -1,5 +1,5 @@ + /* SPDX-License-Identifier: BSD-3-Clause +- * Copyright 2018 NXP ++ * Copyright 2018-2023 NXP + */ + + #include +@@ -139,10 +139,12 @@ read_memory_node(unsigned int *count) + } + + DPAAX_DEBUG("Device-tree memory node data:"); +- do { ++ ++ while (j > 0) { ++ --j; + DPAAX_DEBUG(" %08" PRIx64 " %08zu", + nodes[j].addr, nodes[j].len); +- } while (--j); ++ } + + cleanup: + close(fd); +@@ -255,13 +257,10 @@ dpaax_iova_table_populate(void) + void + dpaax_iova_table_depopulate(void) + { +- if (dpaax_iova_table_p == NULL) +- return; +- +- rte_free(dpaax_iova_table_p->entries); ++ rte_free(dpaax_iova_table_p); dpaax_iova_table_p = NULL; - DPAAX_DEBUG("IOVA Table cleanedup"); @@ -26362,7 +29802,7 @@ index f1650f94c6..f355b3d741 100644 * must invoke in its constructor. */ diff --git a/dpdk/drivers/common/mlx5/mlx5_common_mr.c b/dpdk/drivers/common/mlx5/mlx5_common_mr.c -index c694aaf28c..7f56e1f973 100644 +index c694aaf28c..73a8e3adae 100644 --- a/dpdk/drivers/common/mlx5/mlx5_common_mr.c +++ b/dpdk/drivers/common/mlx5/mlx5_common_mr.c @@ -78,7 +78,7 @@ mlx5_mprq_buf_free_cb(void *addr __rte_unused, void *opaque) @@ -26666,6 +30106,15 @@ index c694aaf28c..7f56e1f973 100644 } /** +@@ -1308,7 +1382,7 @@ mlx5_mempool_get_chunks(struct rte_mempool *mp, struct mlx5_range **out, + + DRV_LOG(DEBUG, "Collecting chunks of regular mempool %s", mp->name); + n = mp->nb_mem_chunks; +- *out = calloc(sizeof(**out), n); ++ *out = calloc(n, sizeof(**out)); + if (*out == NULL) + return -1; + rte_mempool_mem_iter(mp, mlx5_range_from_mempool_chunk, *out); @@ -1541,7 +1615,7 @@ mlx5_mempool_reg_create(struct rte_mempool *mp, unsigned int mrs_n, * Destroy a mempool registration object. * @@ -26762,7 +30211,7 @@ index 775fabd478..58d744b4d4 100644 if (!l_const->lcores_share) { __atomic_sub_fetch(&l_inconst->count, 1, __ATOMIC_RELAXED); diff --git a/dpdk/drivers/common/mlx5/mlx5_devx_cmds.c b/dpdk/drivers/common/mlx5/mlx5_devx_cmds.c -index e52b995ee3..70a430f134 100644 +index e52b995ee3..aa55d577f2 100644 --- a/dpdk/drivers/common/mlx5/mlx5_devx_cmds.c +++ b/dpdk/drivers/common/mlx5/mlx5_devx_cmds.c @@ -823,6 +823,7 @@ mlx5_devx_cmd_query_hca_attr(void *ctx, @@ -26781,8 +30230,42 @@ index e52b995ee3..70a430f134 100644 attr->max_wqe_sz_sq = MLX5_GET(cmd_hca_cap, hcattr, max_wqe_sz_sq); attr->flow_counter_bulk_alloc_bitmap = MLX5_GET(cmd_hca_cap, hcattr, flow_counter_bulk_alloc); -@@ -967,6 +969,20 @@ mlx5_devx_cmd_query_hca_attr(void *ctx, - general_obj_types) & +@@ -866,18 +868,6 @@ mlx5_devx_cmd_query_hca_attr(void *ctx, + attr->max_geneve_tlv_option_data_len = MLX5_GET(cmd_hca_cap, hcattr, + max_geneve_tlv_option_data_len); + attr->qos.sup = MLX5_GET(cmd_hca_cap, hcattr, qos); +- attr->qos.flow_meter_aso_sup = !!(MLX5_GET64(cmd_hca_cap, hcattr, +- general_obj_types) & +- MLX5_GENERAL_OBJ_TYPES_CAP_FLOW_METER_ASO); +- attr->vdpa.valid = !!(MLX5_GET64(cmd_hca_cap, hcattr, +- general_obj_types) & +- MLX5_GENERAL_OBJ_TYPES_CAP_VIRTQ_NET_Q); +- attr->vdpa.queue_counters_valid = !!(MLX5_GET64(cmd_hca_cap, hcattr, +- general_obj_types) & +- MLX5_GENERAL_OBJ_TYPES_CAP_VIRTIO_Q_COUNTERS); +- attr->parse_graph_flex_node = !!(MLX5_GET64(cmd_hca_cap, hcattr, +- general_obj_types) & +- MLX5_GENERAL_OBJ_TYPES_CAP_PARSE_GRAPH_FLEX_NODE); + attr->wqe_index_ignore = MLX5_GET(cmd_hca_cap, hcattr, + wqe_index_ignore_cap); + attr->cross_channel = MLX5_GET(cmd_hca_cap, hcattr, cd); +@@ -901,6 +891,9 @@ mlx5_devx_cmd_query_hca_attr(void *ctx, + /* Read the general_obj_types bitmap and extract the relevant bits. */ + general_obj_types_supported = MLX5_GET64(cmd_hca_cap, hcattr, + general_obj_types); ++ attr->qos.flow_meter_aso_sup = ++ !!(general_obj_types_supported & ++ MLX5_GENERAL_OBJ_TYPES_CAP_FLOW_METER_ASO); + attr->vdpa.valid = !!(general_obj_types_supported & + MLX5_GENERAL_OBJ_TYPES_CAP_VIRTQ_NET_Q); + attr->vdpa.queue_counters_valid = +@@ -963,10 +956,23 @@ mlx5_devx_cmd_query_hca_attr(void *ctx, + attr->crypto = MLX5_GET(cmd_hca_cap, hcattr, crypto); + if (attr->crypto) + attr->aes_xts = MLX5_GET(cmd_hca_cap, hcattr, aes_xts); +- attr->ct_offload = !!(MLX5_GET64(cmd_hca_cap, hcattr, +- general_obj_types) & ++ attr->ct_offload = !!(general_obj_types_supported & MLX5_GENERAL_OBJ_TYPES_CAP_CONN_TRACK_OFFLOAD); attr->rq_delay_drop = MLX5_GET(cmd_hca_cap, hcattr, rq_delay_drop); + if (hca_cap_2_sup) { @@ -26802,7 +30285,7 @@ index e52b995ee3..70a430f134 100644 if (attr->qos.sup) { hcattr = mlx5_devx_get_hca_cap(ctx, in, out, &rc, MLX5_GET_HCA_CAP_OP_MOD_QOS_CAP | -@@ -1114,6 +1130,18 @@ mlx5_devx_cmd_query_hca_attr(void *ctx, +@@ -1114,6 +1120,18 @@ mlx5_devx_cmd_query_hca_attr(void *ctx, goto error; } } @@ -26821,7 +30304,33 @@ index e52b995ee3..70a430f134 100644 return 0; error: rc = (rc > 0) ? -rc : rc; -@@ -1822,7 +1850,7 @@ mlx5_devx_cmd_create_td(void *ctx) +@@ -1540,7 +1558,7 @@ mlx5_devx_cmd_create_rqt(void *ctx, + uint32_t out[MLX5_ST_SZ_DW(create_rqt_out)] = {0}; + void *rqt_ctx; + struct mlx5_devx_obj *rqt = NULL; +- int i; ++ unsigned int i; + + in = mlx5_malloc(MLX5_MEM_ZERO, inlen, 0, SOCKET_ID_ANY); + if (!in) { +@@ -1594,7 +1612,7 @@ mlx5_devx_cmd_modify_rqt(struct mlx5_devx_obj *rqt, + uint32_t out[MLX5_ST_SZ_DW(modify_rqt_out)] = {0}; + uint32_t *in = mlx5_malloc(MLX5_MEM_ZERO, inlen, 0, SOCKET_ID_ANY); + void *rqt_ctx; +- int i; ++ unsigned int i; + int ret; + + if (!in) { +@@ -1607,7 +1625,6 @@ mlx5_devx_cmd_modify_rqt(struct mlx5_devx_obj *rqt, + MLX5_SET64(modify_rqt_in, in, modify_bitmask, 0x1); + rqt_ctx = MLX5_ADDR_OF(modify_rqt_in, in, rqt_context); + MLX5_SET(rqtc, rqt_ctx, list_q_type, rqt_attr->rq_type); +- MLX5_SET(rqtc, rqt_ctx, rqt_max_size, rqt_attr->rqt_max_size); + MLX5_SET(rqtc, rqt_ctx, rqt_actual_size, rqt_attr->rqt_actual_size); + for (i = 0; i < rqt_attr->rqt_actual_size; i++) + MLX5_SET(rqtc, rqt_ctx, rq_num[i], rqt_attr->rq_list[i]); +@@ -1822,7 +1839,7 @@ mlx5_devx_cmd_create_td(void *ctx) * Pointer to file stream. * * @return @@ -26830,7 +30339,7 @@ index e52b995ee3..70a430f134 100644 */ int mlx5_devx_cmd_flow_dump(void *fdb_domain __rte_unused, -@@ -2263,7 +2291,7 @@ mlx5_devx_cmd_modify_qp_state(struct mlx5_devx_obj *qp, uint32_t qp_st_mod_op, +@@ -2263,7 +2280,7 @@ mlx5_devx_cmd_modify_qp_state(struct mlx5_devx_obj *qp, uint32_t qp_st_mod_op, case MLX5_CMD_OP_RTR2RTS_QP: qpc = MLX5_ADDR_OF(rtr2rts_qp_in, &in, qpc); MLX5_SET(rtr2rts_qp_in, &in, qpn, qp->id); @@ -27548,10 +31057,43 @@ index 355d274470..4b0e4c10b4 100644 encp->enc_tx_dma_desc_boundary = 0; diff --git a/dpdk/drivers/common/sfc_efx/base/efx.h b/dpdk/drivers/common/sfc_efx/base/efx.h -index 96769935c0..398eb8dbd2 100644 +index 96769935c0..74fcd06583 100644 --- a/dpdk/drivers/common/sfc_efx/base/efx.h +++ b/dpdk/drivers/common/sfc_efx/base/efx.h -@@ -4535,6 +4535,24 @@ efx_mae_action_set_populate_mark( +@@ -7,6 +7,8 @@ + #ifndef _SYS_EFX_H + #define _SYS_EFX_H + ++#include ++ + #include "efx_annote.h" + #include "efsys.h" + #include "efx_types.h" +@@ -17,14 +19,20 @@ + extern "C" { + #endif + +-#define EFX_STATIC_ASSERT(_cond) \ +- ((void)sizeof (char[(_cond) ? 1 : -1])) ++/* ++ * Triggers an error at compilation time if the condition is false. ++ * ++ * The { } exists to workaround a bug in clang (#55821) ++ * where it would not handle _Static_assert in a switch case. ++ */ ++#define EFX_STATIC_ASSERT(_cond) \ ++ { static_assert((_cond), #_cond); } + + #define EFX_ARRAY_SIZE(_array) \ + (sizeof (_array) / sizeof ((_array)[0])) + + #define EFX_FIELD_OFFSET(_type, _field) \ +- ((size_t)&(((_type *)0)->_field)) ++ offsetof(_type, _field) + + /* The macro expands divider twice */ + #define EFX_DIV_ROUND_UP(_n, _d) (((_n) + (_d) - 1) / (_d)) +@@ -4535,6 +4543,24 @@ efx_mae_action_set_populate_mark( __in efx_mae_actions_t *spec, __in uint32_t mark_value); @@ -27576,7 +31118,7 @@ index 96769935c0..398eb8dbd2 100644 LIBEFX_API extern __checkReturn efx_rc_t efx_mae_action_set_populate_deliver( -@@ -4683,6 +4701,20 @@ efx_mae_action_set_fill_in_counter_id( +@@ -4683,6 +4709,20 @@ efx_mae_action_set_fill_in_counter_id( __in efx_mae_actions_t *spec, __in const efx_counter_t *counter_idp); @@ -28863,6 +32405,71 @@ index 2dc8913feb..2b0261e057 100644 return -ENOMEM; } +diff --git a/dpdk/drivers/crypto/cnxk/cnxk_ae.h b/dpdk/drivers/crypto/cnxk/cnxk_ae.h +index 6222171fe6..16d67a8153 100644 +--- a/dpdk/drivers/crypto/cnxk/cnxk_ae.h ++++ b/dpdk/drivers/crypto/cnxk/cnxk_ae.h +@@ -25,13 +25,22 @@ struct cnxk_ae_sess { + }; + + static __rte_always_inline void +-cnxk_ae_modex_param_normalize(uint8_t **data, size_t *len) ++cnxk_ae_modex_param_normalize(uint8_t **data, size_t *len, size_t max) + { ++ uint8_t msw_len = *len % 8; ++ uint64_t msw_val = 0; + size_t i; + +- /* Strip leading NUL bytes */ +- for (i = 0; i < *len; i++) { +- if ((*data)[i] != 0) ++ if (*len <= 8) ++ return; ++ ++ memcpy(&msw_val, *data, msw_len); ++ if (msw_val != 0) ++ return; ++ ++ for (i = msw_len; i < *len && (*len - i) < max; i += 8) { ++ memcpy(&msw_val, &(*data)[i], 8); ++ if (msw_val != 0) + break; + } + *data += i; +@@ -48,8 +57,8 @@ cnxk_ae_fill_modex_params(struct cnxk_ae_sess *sess, + uint8_t *exp = xform->modex.exponent.data; + uint8_t *mod = xform->modex.modulus.data; + +- cnxk_ae_modex_param_normalize(&mod, &mod_len); +- cnxk_ae_modex_param_normalize(&exp, &exp_len); ++ cnxk_ae_modex_param_normalize(&mod, &mod_len, SIZE_MAX); ++ cnxk_ae_modex_param_normalize(&exp, &exp_len, mod_len); + + if (unlikely(exp_len == 0 || mod_len == 0)) + return -EINVAL; +@@ -222,7 +231,7 @@ cnxk_ae_modex_prep(struct rte_crypto_op *op, struct roc_ae_buf_ptr *meta_buf, + struct rte_crypto_mod_op_param mod_op; + uint64_t total_key_len; + union cpt_inst_w4 w4; +- uint32_t base_len; ++ size_t base_len; + uint32_t dlen; + uint8_t *dptr; + +@@ -230,8 +239,11 @@ cnxk_ae_modex_prep(struct rte_crypto_op *op, struct roc_ae_buf_ptr *meta_buf, + + base_len = mod_op.base.length; + if (unlikely(base_len > mod_len)) { +- op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; +- return -ENOTSUP; ++ cnxk_ae_modex_param_normalize(&mod_op.base.data, &base_len, mod_len); ++ if (base_len > mod_len) { ++ op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; ++ return -ENOTSUP; ++ } + } + + total_key_len = mod_len + exp_len; diff --git a/dpdk/drivers/crypto/cnxk/cnxk_cryptodev_ops.c b/dpdk/drivers/crypto/cnxk/cnxk_cryptodev_ops.c index a2281fb8de..0d99c891d9 100644 --- a/dpdk/drivers/crypto/cnxk/cnxk_cryptodev_ops.c @@ -29040,7 +32647,7 @@ index 37237de21a..af86ef18d8 100644 pack_iv = 1; } diff --git a/dpdk/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c b/dpdk/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c -index a5b052375d..c9745f1db0 100644 +index a5b052375d..4e4fe4a54c 100644 --- a/dpdk/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c +++ b/dpdk/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c @@ -1,7 +1,7 @@ @@ -29173,6 +32780,15 @@ index a5b052375d..c9745f1db0 100644 } int +@@ -3777,7 +3814,7 @@ dpaa2_sec_eventq_attach(const struct rte_cryptodev *dev, + cfg.dest_cfg.priority = priority; + + cfg.options |= DPSECI_QUEUE_OPT_USER_CTX; +- cfg.user_ctx = (size_t)(qp); ++ cfg.user_ctx = (size_t)(&qp->rx_vq); + if (event->sched_type == RTE_SCHED_TYPE_ATOMIC) { + cfg.options |= DPSECI_QUEUE_OPT_ORDER_PRESERVATION; + cfg.order_preservation_en = 1; diff --git a/dpdk/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c b/dpdk/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c index 74f2045637..e68a4875dd 100644 --- a/dpdk/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c @@ -29519,7 +33135,7 @@ index 2c203795ab..2c033c6f28 100644 mb_mgr = get_per_thread_mb_mgr(); if (unlikely(mb_mgr == NULL)) diff --git a/dpdk/drivers/crypto/ipsec_mb/pmd_aesni_mb.c b/dpdk/drivers/crypto/ipsec_mb/pmd_aesni_mb.c -index a308d42ffa..536a586e98 100644 +index a308d42ffa..1097244bab 100644 --- a/dpdk/drivers/crypto/ipsec_mb/pmd_aesni_mb.c +++ b/dpdk/drivers/crypto/ipsec_mb/pmd_aesni_mb.c @@ -918,7 +918,9 @@ aesni_mb_set_docsis_sec_session_parameters( @@ -29575,7 +33191,26 @@ index a308d42ffa..536a586e98 100644 if (cipher_end < auth_end) memcpy(p_dst + cipher_end, p_src + cipher_end, auth_end - cipher_end); -@@ -1099,6 +1101,10 @@ set_mb_job_params(IMB_JOB *job, struct ipsec_mb_qp *qp, +@@ -990,9 +992,6 @@ set_cpu_mb_job_params(IMB_JOB *job, struct aesni_mb_session *session, + job->u.XCBC._k1_expanded = session->auth.xcbc.k1_expanded; + job->u.XCBC._k2 = session->auth.xcbc.k2; + job->u.XCBC._k3 = session->auth.xcbc.k3; +- +- job->enc_keys = session->cipher.expanded_aes_keys.encode; +- job->dec_keys = session->cipher.expanded_aes_keys.decode; + break; + + case IMB_AUTH_AES_CCM: +@@ -1007,8 +1006,6 @@ set_cpu_mb_job_params(IMB_JOB *job, struct aesni_mb_session *session, + job->u.CMAC._key_expanded = session->auth.cmac.expkey; + job->u.CMAC._skey1 = session->auth.cmac.skey1; + job->u.CMAC._skey2 = session->auth.cmac.skey2; +- job->enc_keys = session->cipher.expanded_aes_keys.encode; +- job->dec_keys = session->cipher.expanded_aes_keys.decode; + break; + + case IMB_AUTH_AES_GMAC: +@@ -1099,6 +1096,10 @@ set_mb_job_params(IMB_JOB *job, struct ipsec_mb_qp *qp, struct aesni_mb_qp_data *qp_data = ipsec_mb_get_qp_private_data(qp); struct aesni_mb_session *session; uint32_t m_offset, oop; @@ -29586,7 +33221,41 @@ index a308d42ffa..536a586e98 100644 session = ipsec_mb_get_session_private(qp, op); if (session == NULL) { -@@ -1207,6 +1213,7 @@ set_mb_job_params(IMB_JOB *job, struct ipsec_mb_qp *qp, +@@ -1133,24 +1134,17 @@ set_mb_job_params(IMB_JOB *job, struct ipsec_mb_qp *qp, + job->u.XCBC._k1_expanded = session->auth.xcbc.k1_expanded; + job->u.XCBC._k2 = session->auth.xcbc.k2; + job->u.XCBC._k3 = session->auth.xcbc.k3; +- +- job->enc_keys = session->cipher.expanded_aes_keys.encode; +- job->dec_keys = session->cipher.expanded_aes_keys.decode; + break; + + case IMB_AUTH_AES_CCM: + job->u.CCM.aad = op->sym->aead.aad.data + 18; + job->u.CCM.aad_len_in_bytes = session->aead.aad_len; +- job->enc_keys = session->cipher.expanded_aes_keys.encode; +- job->dec_keys = session->cipher.expanded_aes_keys.decode; + break; + + case IMB_AUTH_AES_CMAC: + job->u.CMAC._key_expanded = session->auth.cmac.expkey; + job->u.CMAC._skey1 = session->auth.cmac.skey1; + job->u.CMAC._skey2 = session->auth.cmac.skey2; +- job->enc_keys = session->cipher.expanded_aes_keys.encode; +- job->dec_keys = session->cipher.expanded_aes_keys.decode; + break; + + case IMB_AUTH_AES_GMAC: +@@ -1188,8 +1182,6 @@ set_mb_job_params(IMB_JOB *job, struct ipsec_mb_qp *qp, + job->u.CHACHA20_POLY1305.aad = op->sym->aead.aad.data; + job->u.CHACHA20_POLY1305.aad_len_in_bytes = + session->aead.aad_len; +- job->enc_keys = session->cipher.expanded_aes_keys.encode; +- job->dec_keys = session->cipher.expanded_aes_keys.encode; + break; + default: + job->u.HMAC._hashed_auth_key_xor_ipad = +@@ -1207,6 +1199,7 @@ set_mb_job_params(IMB_JOB *job, struct ipsec_mb_qp *qp, if (job->cipher_mode == IMB_CIPHER_ZUC_EEA3) { job->enc_keys = session->cipher.zuc_cipher_key; job->dec_keys = session->cipher.zuc_cipher_key; @@ -29594,7 +33263,7 @@ index a308d42ffa..536a586e98 100644 } else if (job->cipher_mode == IMB_CIPHER_SNOW3G_UEA2_BITLEN) { job->enc_keys = &session->cipher.pKeySched_snow3g_cipher; m_offset = 0; -@@ -1264,9 +1271,6 @@ set_mb_job_params(IMB_JOB *job, struct ipsec_mb_qp *qp, +@@ -1264,9 +1257,6 @@ set_mb_job_params(IMB_JOB *job, struct ipsec_mb_qp *qp, switch (job->hash_alg) { case IMB_AUTH_AES_CCM: @@ -29604,7 +33273,7 @@ index a308d42ffa..536a586e98 100644 job->hash_start_src_offset_in_bytes = op->sym->aead.data.offset; job->msg_len_to_hash_in_bytes = op->sym->aead.data.length; -@@ -1276,21 +1280,13 @@ set_mb_job_params(IMB_JOB *job, struct ipsec_mb_qp *qp, +@@ -1276,21 +1266,13 @@ set_mb_job_params(IMB_JOB *job, struct ipsec_mb_qp *qp, case IMB_AUTH_AES_GMAC: if (session->cipher.mode == IMB_CIPHER_GCM) { @@ -29628,7 +33297,7 @@ index a308d42ffa..536a586e98 100644 } job->iv = rte_crypto_op_ctod_offset(op, uint8_t *, -@@ -1298,36 +1294,100 @@ set_mb_job_params(IMB_JOB *job, struct ipsec_mb_qp *qp, +@@ -1298,36 +1280,100 @@ set_mb_job_params(IMB_JOB *job, struct ipsec_mb_qp *qp, break; case IMB_AUTH_CHACHA20_POLY1305: @@ -29657,8 +33326,9 @@ index a308d42ffa..536a586e98 100644 + ciph_off_in_bytes = op->sym->cipher.data.offset >> 3; + auth_len_in_bytes = op->sym->auth.data.length >> 3; + ciph_len_in_bytes = op->sym->cipher.data.length >> 3; -+ -+ job->hash_start_src_offset_in_bytes = auth_start_offset(op, + + job->hash_start_src_offset_in_bytes = auth_start_offset(op, +- session, oop); + session, oop, auth_off_in_bytes, + ciph_off_in_bytes, auth_len_in_bytes, + ciph_len_in_bytes); @@ -29674,9 +33344,8 @@ index a308d42ffa..536a586e98 100644 + ciph_off_in_bytes = op->sym->cipher.data.offset >> 3; + auth_len_in_bytes = op->sym->auth.data.length >> 3; + ciph_len_in_bytes = op->sym->cipher.data.length >> 3; - - job->hash_start_src_offset_in_bytes = auth_start_offset(op, -- session, oop); ++ ++ job->hash_start_src_offset_in_bytes = auth_start_offset(op, + session, oop, auth_off_in_bytes, + ciph_off_in_bytes, auth_len_in_bytes, + ciph_len_in_bytes); @@ -30060,7 +33729,7 @@ index 9e8fd495cf..f7ca8a8a8e 100644 return NULL; } diff --git a/dpdk/drivers/crypto/openssl/rte_openssl_pmd.c b/dpdk/drivers/crypto/openssl/rte_openssl_pmd.c -index 5794ed8159..514e93229f 100644 +index 5794ed8159..a321258980 100644 --- a/dpdk/drivers/crypto/openssl/rte_openssl_pmd.c +++ b/dpdk/drivers/crypto/openssl/rte_openssl_pmd.c @@ -2,6 +2,8 @@ @@ -30072,7 +33741,46 @@ index 5794ed8159..514e93229f 100644 #include #include #include -@@ -1059,8 +1061,11 @@ process_openssl_auth_encryption_gcm(struct rte_mbuf *mbuf_src, int offset, +@@ -463,6 +465,15 @@ openssl_set_session_cipher_parameters(struct openssl_session *sess, + sess->cipher.key.length, + sess->cipher.key.data) != 0) + return -EINVAL; ++ ++ ++ /* We use 3DES encryption also for decryption. ++ * IV is not important for 3DES ECB. ++ */ ++ if (EVP_EncryptInit_ex(sess->cipher.ctx, EVP_des_ede3_ecb(), ++ NULL, sess->cipher.key.data, NULL) != 1) ++ return -EINVAL; ++ + break; + + case RTE_CRYPTO_CIPHER_DES_CBC: +@@ -999,8 +1010,7 @@ process_openssl_cipher_decrypt(struct rte_mbuf *mbuf_src, uint8_t *dst, + /** Process cipher des 3 ctr encryption, decryption algorithm */ + static int + process_openssl_cipher_des3ctr(struct rte_mbuf *mbuf_src, uint8_t *dst, +- int offset, uint8_t *iv, uint8_t *key, int srclen, +- EVP_CIPHER_CTX *ctx) ++ int offset, uint8_t *iv, int srclen, EVP_CIPHER_CTX *ctx) + { + uint8_t ebuf[8], ctr[8]; + int unused, n; +@@ -1018,12 +1028,6 @@ process_openssl_cipher_des3ctr(struct rte_mbuf *mbuf_src, uint8_t *dst, + src = rte_pktmbuf_mtod_offset(m, uint8_t *, offset); + l = rte_pktmbuf_data_len(m) - offset; + +- /* We use 3DES encryption also for decryption. +- * IV is not important for 3DES ecb +- */ +- if (EVP_EncryptInit_ex(ctx, EVP_des_ede3_ecb(), NULL, key, NULL) <= 0) +- goto process_cipher_des3ctr_err; +- + memcpy(ctr, iv, 8); + + for (n = 0; n < srclen; n++) { +@@ -1059,8 +1063,11 @@ process_openssl_auth_encryption_gcm(struct rte_mbuf *mbuf_src, int offset, int srclen, uint8_t *aad, int aadlen, uint8_t *iv, uint8_t *dst, uint8_t *tag, EVP_CIPHER_CTX *ctx) { @@ -30085,7 +33793,7 @@ index 5794ed8159..514e93229f 100644 if (EVP_EncryptInit_ex(ctx, NULL, NULL, NULL, iv) <= 0) goto process_auth_encryption_gcm_err; -@@ -1074,9 +1079,11 @@ process_openssl_auth_encryption_gcm(struct rte_mbuf *mbuf_src, int offset, +@@ -1074,9 +1081,11 @@ process_openssl_auth_encryption_gcm(struct rte_mbuf *mbuf_src, int offset, srclen, ctx, 0)) goto process_auth_encryption_gcm_err; @@ -30097,7 +33805,7 @@ index 5794ed8159..514e93229f 100644 if (EVP_EncryptFinal_ex(ctx, dst, &len) <= 0) goto process_auth_encryption_gcm_err; -@@ -1138,8 +1145,11 @@ process_openssl_auth_decryption_gcm(struct rte_mbuf *mbuf_src, int offset, +@@ -1138,8 +1147,11 @@ process_openssl_auth_decryption_gcm(struct rte_mbuf *mbuf_src, int offset, int srclen, uint8_t *aad, int aadlen, uint8_t *iv, uint8_t *dst, uint8_t *tag, EVP_CIPHER_CTX *ctx) { @@ -30110,7 +33818,7 @@ index 5794ed8159..514e93229f 100644 if (EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_GCM_SET_TAG, 16, tag) <= 0) goto process_auth_decryption_gcm_err; -@@ -1156,9 +1166,11 @@ process_openssl_auth_decryption_gcm(struct rte_mbuf *mbuf_src, int offset, +@@ -1156,9 +1168,11 @@ process_openssl_auth_decryption_gcm(struct rte_mbuf *mbuf_src, int offset, srclen, ctx, 0)) goto process_auth_decryption_gcm_err; @@ -30122,6 +33830,16 @@ index 5794ed8159..514e93229f 100644 if (EVP_DecryptFinal_ex(ctx, dst, &len) <= 0) return -EFAULT; +@@ -1448,8 +1462,7 @@ process_openssl_cipher_op + srclen, ctx_copy, inplace); + else + status = process_openssl_cipher_des3ctr(mbuf_src, dst, +- op->sym->cipher.data.offset, iv, +- sess->cipher.key.data, srclen, ++ op->sym->cipher.data.offset, iv, srclen, + ctx_copy); + + EVP_CIPHER_CTX_free(ctx_copy); diff --git a/dpdk/drivers/crypto/openssl/rte_openssl_pmd_ops.c b/dpdk/drivers/crypto/openssl/rte_openssl_pmd_ops.c index 52715f86f8..35c4ad13ba 100644 --- a/dpdk/drivers/crypto/openssl/rte_openssl_pmd_ops.c @@ -30692,10 +34410,32 @@ index 12e209c86e..f06c851825 100644 #define BIT(x) (1ul << (x)) #define BITS_PER_LONG (__SIZEOF_LONG__ * 8) diff --git a/dpdk/drivers/dma/idxd/dpdk_idxd_cfg.py b/dpdk/drivers/dma/idxd/dpdk_idxd_cfg.py -index fcc27822ef..3f5d5ee752 100755 +index fcc27822ef..5c9572b49d 100755 --- a/dpdk/drivers/dma/idxd/dpdk_idxd_cfg.py +++ b/dpdk/drivers/dma/idxd/dpdk_idxd_cfg.py -@@ -29,9 +29,17 @@ def write_values(self, values): +@@ -13,25 +13,39 @@ + + + class SysfsDir: ++ verbose = False ++ + "Used to read/write paths in a sysfs directory" + def __init__(self, path): + self.path = path + + def read_int(self, filename): + "Return a value from sysfs file" ++ if SysfsDir.verbose: ++ print(f"Reading '{filename}' in {self.path}") + with open(os.path.join(self.path, filename)) as f: + return int(f.readline()) + + def write_values(self, values): + "write dictionary, where key is filename and value is value to write" + for filename, contents in values.items(): ++ if SysfsDir.verbose: ++ print(f"Writing '{contents}' to '{filename}' in {self.path}") + with open(os.path.join(self.path, filename), "w") as f: f.write(str(contents)) @@ -30714,20 +34454,58 @@ index fcc27822ef..3f5d5ee752 100755 drv_dir.write_values({"unbind": f"dsa{dsa_id}"}) -@@ -58,7 +66,6 @@ def get_dsa_id(pci): - def configure_dsa(dsa_id, queues, prefix): +@@ -55,18 +69,25 @@ def get_dsa_id(pci): + sys.exit(f"Could not get device ID for device {pci}") + + +-def configure_dsa(dsa_id, queues, prefix): ++def parse_wq_opts(wq_opts): ++ "Parse user-specified queue configuration, creating a dict of options" ++ try: ++ return {o.split('=')[0]: o.split('=')[1] for o in wq_opts} ++ except ValueError: ++ sys.exit("Invalid --wq-option format, use format 'option=value'") ++ ++ ++def configure_dsa(dsa_id, args): "Configure the DSA instance with appropriate number of queues" dsa_dir = SysfsDir(f"/sys/bus/dsa/devices/dsa{dsa_id}") - drv_dir = SysfsDir("/sys/bus/dsa/drivers/dsa") max_groups = dsa_dir.read_int("max_groups") max_engines = dsa_dir.read_int("max_engines") -@@ -82,12 +89,16 @@ def configure_dsa(dsa_id, queues, prefix): - "mode": "dedicated", - "name": f"{prefix}_wq{dsa_id}.{q}", - "priority": 1, -+ "max_batch_size": 1024, - "size": int(max_work_queues_size / nb_queues)}) + max_queues = dsa_dir.read_int("max_work_queues") + max_work_queues_size = dsa_dir.read_int("max_work_queues_size") + +- nb_queues = min(queues, max_queues) +- if queues > nb_queues: ++ nb_queues = min(args.q, max_queues) ++ if args.q > nb_queues: + print(f"Setting number of queues to max supported value: {max_queues}") + + # we want one engine per group, and no more engines than queues +@@ -76,18 +97,26 @@ def configure_dsa(dsa_id, queues, prefix): + + # configure each queue + for q in range(nb_queues): ++ wqcfg = {"group_id": q % nb_groups, ++ "type": "user", ++ "mode": "dedicated", ++ "name": f"{args.prefix}_wq{dsa_id}.{q}", ++ "priority": 1, ++ "max_batch_size": 1024, ++ "size": int(max_work_queues_size / nb_queues)} + wq_dir = SysfsDir(os.path.join(dsa_dir.path, f"wq{dsa_id}.{q}")) +- wq_dir.write_values({"group_id": q % nb_groups, +- "type": "user", +- "mode": "dedicated", +- "name": f"{prefix}_wq{dsa_id}.{q}", +- "priority": 1, +- "size": int(max_work_queues_size / nb_queues)}) ++ if os.path.exists(os.path.join(wq_dir.path, f"driver_name")): ++ wqcfg.update({"driver_name": "user"}) ++ wqcfg.update(parse_wq_opts(args.wq_option)) ++ wq_dir.write_values(wqcfg) # enable device and then queues - drv_dir.write_values({"bind": f"dsa{dsa_id}"}) @@ -30741,11 +34519,53 @@ index fcc27822ef..3f5d5ee752 100755 def main(args): +@@ -101,16 +130,22 @@ def main(args): + arg_p.add_argument('--name-prefix', metavar='prefix', dest='prefix', + default="dpdk", + help="Prefix for workqueue name to mark for DPDK use [default: 'dpdk']") ++ arg_p.add_argument('--wq-option', action='append', default=[], ++ help="Provide additional config option for queues (format 'x=y')") ++ arg_p.add_argument('--verbose', '-v', action='store_true', ++ help="Provide addition info on tasks being performed") + arg_p.add_argument('--reset', action='store_true', + help="Reset DSA device and its queues") + parsed_args = arg_p.parse_args(args[1:]) + + dsa_id = parsed_args.dsa_id + dsa_id = get_dsa_id(dsa_id) if ':' in dsa_id else dsa_id ++ ++ SysfsDir.verbose = parsed_args.verbose + if parsed_args.reset: + reset_device(dsa_id) + else: +- configure_dsa(dsa_id, parsed_args.q, parsed_args.prefix) ++ configure_dsa(dsa_id, parsed_args) + + + if __name__ == "__main__": diff --git a/dpdk/drivers/dma/idxd/idxd_bus.c b/dpdk/drivers/dma/idxd/idxd_bus.c -index 08639e9dce..594b3e1d5a 100644 +index 08639e9dce..7aaccd50f4 100644 --- a/dpdk/drivers/dma/idxd/idxd_bus.c +++ b/dpdk/drivers/dma/idxd/idxd_bus.c -@@ -314,6 +314,10 @@ dsa_scan(void) +@@ -248,9 +248,15 @@ static int + is_for_this_process_use(const char *name) + { + char *runtime_dir = strdup(rte_eal_get_runtime_dir()); +- char *prefix = basename(runtime_dir); +- int prefixlen = strlen(prefix); + int retval = 0; ++ int prefixlen; ++ char *prefix; ++ ++ if (runtime_dir == NULL) ++ return retval; ++ ++ prefix = basename(runtime_dir); ++ prefixlen = strlen(prefix); + + if (strncmp(name, "dpdk_", 5) == 0) + retval = 1; +@@ -314,6 +320,10 @@ dsa_scan(void) IDXD_PMD_DEBUG("%s(): found %s/%s", __func__, path, wq->d_name); dev = malloc(sizeof(*dev)); @@ -31419,10 +35239,33 @@ index 9377fa50e7..695e3ae429 100644 /* Write CPT instruction to lmt line */ vst1q_u64(lmt_addr, cmd01); diff --git a/dpdk/drivers/event/cnxk/cnxk_eventdev.c b/dpdk/drivers/event/cnxk/cnxk_eventdev.c -index f7a5026250..27c1840f71 100644 +index f7a5026250..2d422aea07 100644 --- a/dpdk/drivers/event/cnxk/cnxk_eventdev.c +++ b/dpdk/drivers/event/cnxk/cnxk_eventdev.c -@@ -356,9 +356,9 @@ int +@@ -243,16 +243,17 @@ cnxk_sso_dev_validate(const struct rte_eventdev *event_dev) + + deq_tmo_ns = conf->dequeue_timeout_ns; + +- if (deq_tmo_ns == 0) +- deq_tmo_ns = dev->min_dequeue_timeout_ns; +- if (deq_tmo_ns < dev->min_dequeue_timeout_ns || +- deq_tmo_ns > dev->max_dequeue_timeout_ns) { ++ if (deq_tmo_ns && (deq_tmo_ns < dev->min_dequeue_timeout_ns || ++ deq_tmo_ns > dev->max_dequeue_timeout_ns)) { + plt_err("Unsupported dequeue timeout requested"); + return -EINVAL; + } + +- if (conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT) ++ if (conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT) { ++ if (deq_tmo_ns == 0) ++ deq_tmo_ns = dev->min_dequeue_timeout_ns; + dev->is_timeout_deq = 1; ++ } + + dev->deq_tmo_ns = deq_tmo_ns; + +@@ -356,9 +357,9 @@ int cnxk_sso_timeout_ticks(struct rte_eventdev *event_dev, uint64_t ns, uint64_t *tmo_ticks) { @@ -31434,7 +35277,7 @@ index f7a5026250..27c1840f71 100644 return 0; } -@@ -417,10 +417,10 @@ cnxk_sso_start(struct rte_eventdev *event_dev, cnxk_sso_hws_reset_t reset_fn, +@@ -417,10 +418,10 @@ cnxk_sso_start(struct rte_eventdev *event_dev, cnxk_sso_hws_reset_t reset_fn, plt_sso_dbg(); for (i = 0; i < dev->qos_queue_cnt; i++) { @@ -31449,7 +35292,7 @@ index f7a5026250..27c1840f71 100644 } rc = roc_sso_hwgrp_qos_config(&dev->sso, qos, dev->qos_queue_cnt, dev->xae_cnt); -@@ -482,7 +482,7 @@ static void +@@ -482,7 +483,7 @@ static void parse_queue_param(char *value, void *opaque) { struct cnxk_sso_qos queue_qos = {0}; @@ -31458,7 +35301,17 @@ index f7a5026250..27c1840f71 100644 struct cnxk_sso_evdev *dev = opaque; char *tok = strtok(value, "-"); struct cnxk_sso_qos *old_ptr; -@@ -574,7 +574,7 @@ cnxk_sso_parse_devargs(struct cnxk_sso_evdev *dev, struct rte_devargs *devargs) +@@ -522,6 +523,9 @@ parse_qos_list(const char *value, void *opaque) + char *end = NULL; + char *f = s; + ++ if (s == NULL) ++ return; ++ + while (*s) { + if (*s == '[') + start = s; +@@ -574,7 +578,7 @@ cnxk_sso_parse_devargs(struct cnxk_sso_evdev *dev, struct rte_devargs *devargs) &dev->force_ena_bp); rte_kvargs_process(kvlist, CN9K_SSO_SINGLE_WS, &parse_kvargs_flag, &single_ws); @@ -31467,7 +35320,7 @@ index f7a5026250..27c1840f71 100644 &dev->gw_mode); dev->dual_ws = !single_ws; rte_kvargs_free(kvlist); -@@ -636,9 +636,8 @@ cnxk_sso_fini(struct rte_eventdev *event_dev) +@@ -636,9 +640,8 @@ cnxk_sso_fini(struct rte_eventdev *event_dev) cnxk_tim_fini(); roc_sso_rsrc_fini(&dev->sso); @@ -31800,7 +35653,7 @@ index 78e36ffafe..24088ca05b 100644 chunk = (struct cnxk_tim_ent *)mirr_bkt->current_chunk; chunk += (tim_ring->nb_chunk_slots - chunk_remainder); diff --git a/dpdk/drivers/event/dlb2/dlb2.c b/dpdk/drivers/event/dlb2/dlb2.c -index 16e9764dbf..f76f1c26b0 100644 +index 16e9764dbf..3560ae768a 100644 --- a/dpdk/drivers/event/dlb2/dlb2.c +++ b/dpdk/drivers/event/dlb2/dlb2.c @@ -61,12 +61,14 @@ static struct rte_event_dev_info evdev_dlb2_default_info = { @@ -31822,7 +35675,24 @@ index 16e9764dbf..f76f1c26b0 100644 RTE_EVENT_DEV_CAP_MAINTENANCE_FREE), }; -@@ -626,7 +628,7 @@ dlb2_hw_create_sched_domain(struct dlb2_hw_dev *handle, +@@ -110,7 +112,6 @@ static int + dlb2_hw_query_resources(struct dlb2_eventdev *dlb2) + { + struct dlb2_hw_dev *handle = &dlb2->qm_instance; +- struct dlb2_hw_resource_info *dlb2_info = &handle->info; + int ret; + + /* Query driver resources provisioned for this device */ +@@ -168,8 +169,6 @@ dlb2_hw_query_resources(struct dlb2_eventdev *dlb2) + handle->info.hw_rsrc_max.reorder_window_size = + dlb2->hw_rsrc_query_results.num_hist_list_entries; + +- rte_memcpy(dlb2_info, &handle->info.hw_rsrc_max, sizeof(*dlb2_info)); +- + return 0; + } + +@@ -626,7 +625,7 @@ dlb2_hw_create_sched_domain(struct dlb2_hw_dev *handle, cfg->num_ldb_queues; cfg->num_hist_list_entries = resources_asked->num_ldb_ports * @@ -31831,7 +35701,7 @@ index 16e9764dbf..f76f1c26b0 100644 if (device_version == DLB2_HW_V2_5) { DLB2_LOG_DBG("sched domain create - ldb_qs=%d, ldb_ports=%d, dir_ports=%d, atomic_inflights=%d, hist_list_entries=%d, credits=%d\n", -@@ -1349,7 +1351,7 @@ dlb2_hw_create_ldb_port(struct dlb2_eventdev *dlb2, +@@ -1349,7 +1348,7 @@ dlb2_hw_create_ldb_port(struct dlb2_eventdev *dlb2, cfg.cq_depth = rte_align32pow2(dequeue_depth); cfg.cq_depth_threshold = 1; @@ -31840,7 +35710,7 @@ index 16e9764dbf..f76f1c26b0 100644 if (handle->cos_id == DLB2_COS_DEFAULT) cfg.cos_id = 0; -@@ -2145,7 +2147,7 @@ dlb2_event_queue_detach_ldb(struct dlb2_eventdev *dlb2, +@@ -2145,7 +2144,7 @@ dlb2_event_queue_detach_ldb(struct dlb2_eventdev *dlb2, } /* This is expected with eventdev API! @@ -31849,7 +35719,7 @@ index 16e9764dbf..f76f1c26b0 100644 */ if (i == DLB2_MAX_NUM_QIDS_PER_LDB_CQ) { DLB2_LOG_DBG("dlb2: ignoring LB QID %d not mapped for qm_port %d.\n", -@@ -2936,6 +2938,7 @@ __dlb2_event_enqueue_burst(void *event_port, +@@ -2936,6 +2935,7 @@ __dlb2_event_enqueue_burst(void *event_port, struct dlb2_eventdev_port *ev_port = event_port; struct dlb2_port *qm_port = &ev_port->qm_port; struct process_local_port_data *port_data; @@ -31857,7 +35727,7 @@ index 16e9764dbf..f76f1c26b0 100644 int i; RTE_ASSERT(ev_port->enq_configured); -@@ -2945,7 +2948,8 @@ __dlb2_event_enqueue_burst(void *event_port, +@@ -2945,7 +2945,8 @@ __dlb2_event_enqueue_burst(void *event_port, port_data = &dlb2_port[qm_port->id][PORT_TYPE(qm_port)]; @@ -31867,7 +35737,7 @@ index 16e9764dbf..f76f1c26b0 100644 uint8_t sched_types[DLB2_NUM_QES_PER_CACHE_LINE]; uint8_t queue_ids[DLB2_NUM_QES_PER_CACHE_LINE]; int pop_offs = 0; -@@ -3897,31 +3901,47 @@ dlb2_hw_dequeue_sparse(struct dlb2_eventdev *dlb2, +@@ -3897,31 +3898,47 @@ dlb2_hw_dequeue_sparse(struct dlb2_eventdev *dlb2, while (num < max_num) { struct dlb2_dequeue_qe qes[DLB2_NUM_QES_PER_CACHE_LINE]; int num_avail; @@ -33092,11 +36962,44 @@ index bf3b01ebc8..66510cc432 100644 } static int +diff --git a/dpdk/drivers/event/sw/iq_chunk.h b/dpdk/drivers/event/sw/iq_chunk.h +index 31d013eab7..7820815c38 100644 +--- a/dpdk/drivers/event/sw/iq_chunk.h ++++ b/dpdk/drivers/event/sw/iq_chunk.h +@@ -9,8 +9,6 @@ + #include + #include + +-#define IQ_ROB_NAMESIZE 12 +- + struct sw_queue_chunk { + struct rte_event events[SW_EVS_PER_Q_CHUNK]; + struct sw_queue_chunk *next; diff --git a/dpdk/drivers/event/sw/sw_evdev.c b/dpdk/drivers/event/sw/sw_evdev.c -index 6ae613e0f2..e43bf250d6 100644 +index 6ae613e0f2..d1fd0eeb4f 100644 --- a/dpdk/drivers/event/sw/sw_evdev.c +++ b/dpdk/drivers/event/sw/sw_evdev.c -@@ -625,8 +625,8 @@ sw_dump(struct rte_eventdev *dev, FILE *f) +@@ -229,9 +229,7 @@ qid_init(struct sw_evdev *sw, unsigned int idx, int type, + const struct rte_event_queue_conf *queue_conf) + { + unsigned int i; +- int dev_id = sw->data->dev_id; + int socket_id = sw->data->socket_id; +- char buf[IQ_ROB_NAMESIZE]; + struct sw_qid *qid = &sw->qids[idx]; + + /* Initialize the FID structures to no pinning (-1), and zero packets */ +@@ -261,8 +259,7 @@ qid_init(struct sw_evdev *sw, unsigned int idx, int type, + goto cleanup; + } + +- snprintf(buf, sizeof(buf), "sw%d_iq_%d_rob", dev_id, i); +- qid->reorder_buffer = rte_zmalloc_socket(buf, ++ qid->reorder_buffer = rte_zmalloc_socket(NULL, + window_size * sizeof(qid->reorder_buffer[0]), + 0, socket_id); + if (!qid->reorder_buffer) { +@@ -625,8 +622,8 @@ sw_dump(struct rte_eventdev *dev, FILE *f) "Ordered", "Atomic", "Parallel", "Directed" }; uint32_t i; @@ -33107,7 +37010,7 @@ index 6ae613e0f2..e43bf250d6 100644 fprintf(f, "\trx %"PRIu64"\n\tdrop %"PRIu64"\n\ttx %"PRIu64"\n", sw->stats.rx_pkts, sw->stats.rx_dropped, sw->stats.tx_pkts); -@@ -1077,7 +1077,7 @@ sw_probe(struct rte_vdev_device *vdev) +@@ -1077,7 +1074,7 @@ sw_probe(struct rte_vdev_device *vdev) min_burst_size, deq_burst_size, refill_once); dev = rte_event_pmd_vdev_init(name, @@ -33322,10 +37225,36 @@ index 94dc5cd815..8fd9edced2 100644 } diff --git a/dpdk/drivers/net/af_packet/rte_eth_af_packet.c b/dpdk/drivers/net/af_packet/rte_eth_af_packet.c -index 1396f32c3d..88cdc7ee2e 100644 +index 1396f32c3d..164597d767 100644 --- a/dpdk/drivers/net/af_packet/rte_eth_af_packet.c +++ b/dpdk/drivers/net/af_packet/rte_eth_af_packet.c -@@ -312,7 +312,14 @@ eth_af_packet_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) +@@ -6,6 +6,7 @@ + * All rights reserved. + */ + ++#include + #include + #include + #include +@@ -38,7 +39,7 @@ + #define DFLT_FRAME_SIZE (1 << 11) + #define DFLT_FRAME_COUNT (1 << 9) + +-struct pkt_rx_queue { ++struct __rte_cache_aligned pkt_rx_queue { + int sockfd; + + struct iovec *rd; +@@ -54,7 +55,7 @@ struct pkt_rx_queue { + volatile unsigned long rx_bytes; + }; + +-struct pkt_tx_queue { ++struct __rte_cache_aligned pkt_tx_queue { + int sockfd; + unsigned int frame_data_size; + +@@ -312,7 +313,14 @@ eth_af_packet_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) static int eth_dev_start(struct rte_eth_dev *dev) { @@ -33340,7 +37269,7 @@ index 1396f32c3d..88cdc7ee2e 100644 return 0; } -@@ -340,6 +347,8 @@ eth_dev_stop(struct rte_eth_dev *dev) +@@ -340,6 +348,8 @@ eth_dev_stop(struct rte_eth_dev *dev) internals->rx_queue[i].sockfd = -1; internals->tx_queue[i].sockfd = -1; @@ -33484,7 +37413,7 @@ index 3ed2b29784..2605086d0c 100644 + endif endif diff --git a/dpdk/drivers/net/af_xdp/rte_eth_af_xdp.c b/dpdk/drivers/net/af_xdp/rte_eth_af_xdp.c -index 96c2c9d939..6bc7178fc5 100644 +index 96c2c9d939..5e2c239db8 100644 --- a/dpdk/drivers/net/af_xdp/rte_eth_af_xdp.c +++ b/dpdk/drivers/net/af_xdp/rte_eth_af_xdp.c @@ -15,8 +15,6 @@ @@ -33496,7 +37425,64 @@ index 96c2c9d939..6bc7178fc5 100644 #include #include -@@ -655,7 +653,13 @@ eth_af_xdp_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) +@@ -102,6 +100,7 @@ struct pkt_rx_queue { + struct xsk_umem_info *umem; + struct xsk_socket *xsk; + struct rte_mempool *mb_pool; ++ uint16_t port; + + struct rx_stats stats; + +@@ -272,6 +271,7 @@ af_xdp_rx_zc(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) + unsigned long rx_bytes = 0; + int i; + struct rte_mbuf *fq_bufs[ETH_AF_XDP_RX_BATCH_SIZE]; ++ struct rte_eth_dev *dev = &rte_eth_devices[rxq->port]; + + nb_pkts = xsk_ring_cons__peek(rx, nb_pkts, &idx_rx); + +@@ -299,6 +299,7 @@ af_xdp_rx_zc(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) + * xsk_ring_cons__peek + */ + rx->cached_cons -= nb_pkts; ++ dev->data->rx_mbuf_alloc_failed += nb_pkts; + return 0; + } + +@@ -321,6 +322,7 @@ af_xdp_rx_zc(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) + bufs[i]->data_off = offset - sizeof(struct rte_mbuf) - + rte_pktmbuf_priv_size(umem->mb_pool) - + umem->mb_pool->header_size; ++ bufs[i]->port = rxq->port; + + rte_pktmbuf_pkt_len(bufs[i]) = len; + rte_pktmbuf_data_len(bufs[i]) = len; +@@ -349,6 +351,7 @@ af_xdp_rx_cp(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) + int i; + uint32_t free_thresh = fq->size >> 1; + struct rte_mbuf *mbufs[ETH_AF_XDP_RX_BATCH_SIZE]; ++ struct rte_eth_dev *dev = &rte_eth_devices[rxq->port]; + + if (xsk_prod_nb_free(fq, free_thresh) >= free_thresh) + (void)reserve_fill_queue(umem, nb_pkts, NULL, fq); +@@ -367,6 +370,8 @@ af_xdp_rx_cp(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) + * xsk_ring_cons__peek + */ + rx->cached_cons -= nb_pkts; ++ dev->data->rx_mbuf_alloc_failed += nb_pkts; ++ + return 0; + } + +@@ -387,6 +392,7 @@ af_xdp_rx_cp(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) + rte_pktmbuf_data_len(mbufs[i]) = len; + rx_bytes += len; + bufs[i] = mbufs[i]; ++ bufs[i]->port = rxq->port; + } + + xsk_ring_cons__release(rx, nb_pkts); +@@ -655,7 +661,13 @@ eth_af_xdp_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) static int eth_dev_start(struct rte_eth_dev *dev) { @@ -33510,7 +37496,7 @@ index 96c2c9d939..6bc7178fc5 100644 return 0; } -@@ -664,7 +668,14 @@ eth_dev_start(struct rte_eth_dev *dev) +@@ -664,7 +676,14 @@ eth_dev_start(struct rte_eth_dev *dev) static int eth_dev_stop(struct rte_eth_dev *dev) { @@ -33525,7 +37511,7 @@ index 96c2c9d939..6bc7178fc5 100644 return 0; } -@@ -697,67 +708,6 @@ find_internal_resource(struct pmd_internals *port_int) +@@ -697,67 +716,6 @@ find_internal_resource(struct pmd_internals *port_int) return list; } @@ -33593,7 +37579,7 @@ index 96c2c9d939..6bc7178fc5 100644 static int eth_dev_configure(struct rte_eth_dev *dev) { -@@ -908,6 +858,43 @@ eth_stats_reset(struct rte_eth_dev *dev) +@@ -908,6 +866,43 @@ eth_stats_reset(struct rte_eth_dev *dev) return 0; } @@ -33637,7 +37623,7 @@ index 96c2c9d939..6bc7178fc5 100644 static void remove_xdp_program(struct pmd_internals *internals) { -@@ -922,6 +909,10 @@ remove_xdp_program(struct pmd_internals *internals) +@@ -922,9 +917,16 @@ remove_xdp_program(struct pmd_internals *internals) XDP_FLAGS_UPDATE_IF_NOEXIST); } @@ -33648,7 +37634,26 @@ index 96c2c9d939..6bc7178fc5 100644 static void xdp_umem_destroy(struct xsk_umem_info *umem) { -@@ -1013,6 +1004,66 @@ static inline uintptr_t get_base_addr(struct rte_mempool *mp, uint64_t *align) ++ (void)xsk_umem__delete(umem->umem); ++ umem->umem = NULL; ++ + #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG) + umem->mb_pool = NULL; + #else +@@ -957,11 +959,8 @@ eth_dev_close(struct rte_eth_dev *dev) + break; + xsk_socket__delete(rxq->xsk); + +- if (__atomic_sub_fetch(&rxq->umem->refcnt, 1, __ATOMIC_ACQUIRE) +- == 0) { +- (void)xsk_umem__delete(rxq->umem->umem); ++ if (__atomic_sub_fetch(&rxq->umem->refcnt, 1, __ATOMIC_ACQUIRE) == 0) + xdp_umem_destroy(rxq->umem); +- } + + /* free pkt_tx_queue */ + rte_free(rxq->pair); +@@ -1013,6 +1012,66 @@ static inline uintptr_t get_base_addr(struct rte_mempool *mp, uint64_t *align) return aligned_addr; } @@ -33715,7 +37720,7 @@ index 96c2c9d939..6bc7178fc5 100644 static struct xsk_umem_info *xdp_umem_configure(struct pmd_internals *internals, struct pkt_rx_queue *rxq) -@@ -1052,7 +1103,7 @@ xsk_umem_info *xdp_umem_configure(struct pmd_internals *internals, +@@ -1052,7 +1111,7 @@ xsk_umem_info *xdp_umem_configure(struct pmd_internals *internals, umem = rte_zmalloc_socket("umem", sizeof(*umem), 0, rte_socket_id()); if (umem == NULL) { @@ -33724,7 +37729,7 @@ index 96c2c9d939..6bc7178fc5 100644 return NULL; } -@@ -1065,7 +1116,7 @@ xsk_umem_info *xdp_umem_configure(struct pmd_internals *internals, +@@ -1065,7 +1124,7 @@ xsk_umem_info *xdp_umem_configure(struct pmd_internals *internals, ret = xsk_umem__create(&umem->umem, base_addr, umem_size, &rxq->fq, &rxq->cq, &usr_config); if (ret) { @@ -33733,7 +37738,7 @@ index 96c2c9d939..6bc7178fc5 100644 goto err; } umem->buffer = base_addr; -@@ -1099,7 +1150,7 @@ xsk_umem_info *xdp_umem_configure(struct pmd_internals *internals, +@@ -1099,7 +1158,7 @@ xsk_umem_info *xdp_umem_configure(struct pmd_internals *internals, umem = rte_zmalloc_socket("umem", sizeof(*umem), 0, rte_socket_id()); if (umem == NULL) { @@ -33742,7 +37747,15 @@ index 96c2c9d939..6bc7178fc5 100644 return NULL; } -@@ -1135,7 +1186,7 @@ xsk_umem_info *xdp_umem_configure(struct pmd_internals *internals, +@@ -1128,6 +1187,7 @@ xsk_umem_info *xdp_umem_configure(struct pmd_internals *internals, + AF_XDP_LOG(ERR, "Failed to reserve memzone for af_xdp umem.\n"); + goto err; + } ++ umem->mz = mz; + + ret = xsk_umem__create(&umem->umem, mz->addr, + ETH_AF_XDP_NUM_BUFFERS * ETH_AF_XDP_FRAME_SIZE, +@@ -1135,10 +1195,9 @@ xsk_umem_info *xdp_umem_configure(struct pmd_internals *internals, &usr_config); if (ret) { @@ -33750,8 +37763,11 @@ index 96c2c9d939..6bc7178fc5 100644 + AF_XDP_LOG(ERR, "Failed to create umem\n"); goto err; } - umem->mz = mz; -@@ -1148,16 +1199,19 @@ xsk_umem_info *xdp_umem_configure(struct pmd_internals *internals, +- umem->mz = mz; + + #endif + return umem; +@@ -1148,16 +1207,19 @@ xsk_umem_info *xdp_umem_configure(struct pmd_internals *internals, return NULL; } @@ -33775,7 +37791,7 @@ index 96c2c9d939..6bc7178fc5 100644 } /* -@@ -1171,7 +1225,7 @@ load_custom_xdp_prog(const char *prog_path, int if_index, struct bpf_map **map) +@@ -1171,7 +1233,7 @@ load_custom_xdp_prog(const char *prog_path, int if_index, struct bpf_map **map) } /* Link the program with the given network device */ @@ -33784,7 +37800,7 @@ index 96c2c9d939..6bc7178fc5 100644 XDP_FLAGS_UPDATE_IF_NOEXIST); if (ret) { AF_XDP_LOG(ERR, "Failed to set prog fd %d on interface\n", -@@ -1185,6 +1239,8 @@ load_custom_xdp_prog(const char *prog_path, int if_index, struct bpf_map **map) +@@ -1185,6 +1247,8 @@ load_custom_xdp_prog(const char *prog_path, int if_index, struct bpf_map **map) return 0; } @@ -33793,7 +37809,7 @@ index 96c2c9d939..6bc7178fc5 100644 /* Detect support for busy polling through setsockopt(). */ static int configure_preferred_busy_poll(struct pkt_rx_queue *rxq) -@@ -1269,18 +1325,19 @@ xsk_configure(struct pmd_internals *internals, struct pkt_rx_queue *rxq, +@@ -1269,18 +1333,19 @@ xsk_configure(struct pmd_internals *internals, struct pkt_rx_queue *rxq, cfg.bind_flags |= XDP_USE_NEED_WAKEUP; #endif @@ -33824,7 +37840,7 @@ index 96c2c9d939..6bc7178fc5 100644 } if (internals->shared_umem) -@@ -1294,7 +1351,7 @@ xsk_configure(struct pmd_internals *internals, struct pkt_rx_queue *rxq, +@@ -1294,7 +1359,7 @@ xsk_configure(struct pmd_internals *internals, struct pkt_rx_queue *rxq, if (ret) { AF_XDP_LOG(ERR, "Failed to create xsk socket.\n"); @@ -33833,7 +37849,7 @@ index 96c2c9d939..6bc7178fc5 100644 } /* insert the xsk into the xsks_map */ -@@ -1306,7 +1363,7 @@ xsk_configure(struct pmd_internals *internals, struct pkt_rx_queue *rxq, +@@ -1306,7 +1371,7 @@ xsk_configure(struct pmd_internals *internals, struct pkt_rx_queue *rxq, &rxq->xsk_queue_idx, &fd, 0); if (err) { AF_XDP_LOG(ERR, "Failed to insert xsk in map.\n"); @@ -33842,7 +37858,7 @@ index 96c2c9d939..6bc7178fc5 100644 } } -@@ -1314,7 +1371,7 @@ xsk_configure(struct pmd_internals *internals, struct pkt_rx_queue *rxq, +@@ -1314,7 +1379,7 @@ xsk_configure(struct pmd_internals *internals, struct pkt_rx_queue *rxq, ret = rte_pktmbuf_alloc_bulk(rxq->umem->mb_pool, fq_bufs, reserve_size); if (ret) { AF_XDP_LOG(DEBUG, "Failed to get enough buffers for fq.\n"); @@ -33851,7 +37867,7 @@ index 96c2c9d939..6bc7178fc5 100644 } #endif -@@ -1322,20 +1379,21 @@ xsk_configure(struct pmd_internals *internals, struct pkt_rx_queue *rxq, +@@ -1322,20 +1387,21 @@ xsk_configure(struct pmd_internals *internals, struct pkt_rx_queue *rxq, ret = configure_preferred_busy_poll(rxq); if (ret) { AF_XDP_LOG(ERR, "Failed configure busy polling.\n"); @@ -33877,6 +37893,93 @@ index 96c2c9d939..6bc7178fc5 100644 if (__atomic_sub_fetch(&rxq->umem->refcnt, 1, __ATOMIC_ACQUIRE) == 0) xdp_umem_destroy(rxq->umem); +@@ -1389,6 +1455,8 @@ eth_rx_queue_setup(struct rte_eth_dev *dev, + rxq->fds[0].fd = xsk_socket__fd(rxq->xsk); + rxq->fds[0].events = POLLIN; + ++ rxq->port = dev->data->port_id; ++ + dev->data->rx_queues[rx_queue_id] = rxq; + return 0; + +diff --git a/dpdk/drivers/net/ark/ark_ethdev_tx.c b/dpdk/drivers/net/ark/ark_ethdev_tx.c +index 676e4115d3..09147173ba 100644 +--- a/dpdk/drivers/net/ark/ark_ethdev_tx.c ++++ b/dpdk/drivers/net/ark/ark_ethdev_tx.c +@@ -39,8 +39,8 @@ struct ark_tx_queue { + uint32_t queue_mask; + + /* 3 indexes to the paired data rings. */ +- int32_t prod_index; /* where to put the next one */ +- int32_t free_index; /* mbuf has been freed */ ++ uint32_t prod_index; /* where to put the next one */ ++ uint32_t free_index; /* mbuf has been freed */ + + /* The queue Id is used to identify the HW Q */ + uint16_t phys_qid; +@@ -49,7 +49,7 @@ struct ark_tx_queue { + + /* next cache line - fields written by device */ + RTE_MARKER cacheline1 __rte_cache_min_aligned; +- volatile int32_t cons_index; /* hw is done, can be freed */ ++ volatile uint32_t cons_index; /* hw is done, can be freed */ + } __rte_cache_aligned; + + /* Forward declarations */ +@@ -123,7 +123,7 @@ eth_ark_xmit_pkts(void *vtxq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) + uint32_t user_meta[5]; + + int stat; +- int32_t prod_index_limit; ++ uint32_t prod_index_limit; + uint16_t nb; + uint8_t user_len = 0; + const uint32_t min_pkt_len = ARK_MIN_TX_PKTLEN; +@@ -138,8 +138,13 @@ eth_ark_xmit_pkts(void *vtxq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) + /* leave 4 elements mpu data */ + prod_index_limit = queue->queue_size + queue->free_index - 4; + ++ /* Populate the buffer bringing prod_index up to or slightly beyond ++ * prod_index_limit. Prod_index will increment by 2 or more each ++ * iteration. Note: indexes are uint32_t, cast to (signed) int32_t ++ * to catch the slight overage case; e.g. (200 - 201) ++ */ + for (nb = 0; +- (nb < nb_pkts) && (prod_index_limit - queue->prod_index) > 0; ++ (nb < nb_pkts) && (int32_t)(prod_index_limit - queue->prod_index) > 0; + ++nb) { + mbuf = tx_pkts[nb]; + +@@ -209,13 +214,13 @@ eth_ark_tx_jumbo(struct ark_tx_queue *queue, struct rte_mbuf *mbuf, + uint32_t *user_meta, uint8_t meta_cnt) + { + struct rte_mbuf *next; +- int32_t free_queue_space; ++ uint32_t free_queue_space; + uint8_t flags = ARK_DDM_SOP; + + free_queue_space = queue->queue_mask - + (queue->prod_index - queue->free_index); + /* We need up to 4 mbufs for first header and 2 for subsequent ones */ +- if (unlikely(free_queue_space < (2 + (2 * mbuf->nb_segs)))) ++ if (unlikely(free_queue_space < (2U + (2U * mbuf->nb_segs)))) + return -1; + + while (mbuf != NULL) { +@@ -424,10 +429,11 @@ free_completed_tx(struct ark_tx_queue *queue) + { + struct rte_mbuf *mbuf; + union ark_tx_meta *meta; +- int32_t top_index; ++ uint32_t top_index; + + top_index = queue->cons_index; /* read once */ +- while ((top_index - queue->free_index) > 0) { ++ ++ while ((int32_t)(top_index - queue->free_index) > 0) { + meta = &queue->meta_q[queue->free_index & queue->queue_mask]; + if (likely((meta->flags & ARK_DDM_SOP) != 0)) { + mbuf = queue->bufs[queue->free_index & diff --git a/dpdk/drivers/net/ark/ark_global.h b/dpdk/drivers/net/ark/ark_global.h index 6f9b3013d8..49193ac5b3 100644 --- a/dpdk/drivers/net/ark/ark_global.h @@ -33986,11 +38089,155 @@ index 7ac55584ff..4676c19a78 100644 unlock: rte_spinlock_unlock(&avp->lock); return ret; +diff --git a/dpdk/drivers/net/axgbe/axgbe_common.h b/dpdk/drivers/net/axgbe/axgbe_common.h +index df0aa21a9b..9618d7e33b 100644 +--- a/dpdk/drivers/net/axgbe/axgbe_common.h ++++ b/dpdk/drivers/net/axgbe/axgbe_common.h +@@ -407,8 +407,6 @@ + #define MAC_MDIOSCAR_PA_WIDTH 5 + #define MAC_MDIOSCAR_RA_INDEX 0 + #define MAC_MDIOSCAR_RA_WIDTH 16 +-#define MAC_MDIOSCAR_REG_INDEX 0 +-#define MAC_MDIOSCAR_REG_WIDTH 21 + #define MAC_MDIOSCCDR_BUSY_INDEX 22 + #define MAC_MDIOSCCDR_BUSY_WIDTH 1 + #define MAC_MDIOSCCDR_CMD_INDEX 16 diff --git a/dpdk/drivers/net/axgbe/axgbe_dev.c b/dpdk/drivers/net/axgbe/axgbe_dev.c -index daeb3308f4..6a7fddffca 100644 +index daeb3308f4..5233633a53 100644 --- a/dpdk/drivers/net/axgbe/axgbe_dev.c +++ b/dpdk/drivers/net/axgbe/axgbe_dev.c -@@ -1046,7 +1046,7 @@ static int axgbe_config_rx_threshold(struct axgbe_port *pdata, +@@ -63,15 +63,27 @@ static int mdio_complete(struct axgbe_port *pdata) + return 0; + } + ++static unsigned int axgbe_create_mdio_sca(int port, int reg) ++{ ++ unsigned int mdio_sca, da; ++ ++ da = (reg & MII_ADDR_C45) ? reg >> 16 : 0; ++ ++ mdio_sca = 0; ++ AXGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, RA, reg); ++ AXGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, PA, port); ++ AXGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, DA, da); ++ ++ return mdio_sca; ++} ++ + static int axgbe_write_ext_mii_regs(struct axgbe_port *pdata, int addr, + int reg, u16 val) + { + unsigned int mdio_sca, mdio_sccd; + uint64_t timeout; + +- mdio_sca = 0; +- AXGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, REG, reg); +- AXGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, DA, addr); ++ mdio_sca = axgbe_create_mdio_sca(addr, reg); + AXGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca); + + mdio_sccd = 0; +@@ -97,9 +109,7 @@ static int axgbe_read_ext_mii_regs(struct axgbe_port *pdata, int addr, + unsigned int mdio_sca, mdio_sccd; + uint64_t timeout; + +- mdio_sca = 0; +- AXGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, REG, reg); +- AXGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, DA, addr); ++ mdio_sca = axgbe_create_mdio_sca(addr, reg); + AXGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca); + + mdio_sccd = 0; +@@ -259,20 +269,28 @@ static int axgbe_set_speed(struct axgbe_port *pdata, int speed) + return 0; + } + ++static unsigned int axgbe_get_fc_queue_count(struct axgbe_port *pdata) ++{ ++ unsigned int max_q_count = AXGMAC_MAX_FLOW_CONTROL_QUEUES; ++ ++ /* From MAC ver 30H the TFCR is per priority, instead of per queue */ ++ if (AXGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER) >= 0x30) ++ return max_q_count; ++ else ++ return (RTE_MIN(pdata->tx_q_count, max_q_count)); ++} ++ + static int axgbe_disable_tx_flow_control(struct axgbe_port *pdata) + { +- unsigned int max_q_count, q_count; + unsigned int reg, reg_val; +- unsigned int i; ++ unsigned int i, q_count; + + /* Clear MTL flow control */ + for (i = 0; i < pdata->rx_q_count; i++) + AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, 0); + + /* Clear MAC flow control */ +- max_q_count = AXGMAC_MAX_FLOW_CONTROL_QUEUES; +- q_count = RTE_MIN(pdata->tx_q_count, +- max_q_count); ++ q_count = axgbe_get_fc_queue_count(pdata); + reg = MAC_Q0TFCR; + for (i = 0; i < q_count; i++) { + reg_val = AXGMAC_IOREAD(pdata, reg); +@@ -287,9 +305,8 @@ static int axgbe_disable_tx_flow_control(struct axgbe_port *pdata) + + static int axgbe_enable_tx_flow_control(struct axgbe_port *pdata) + { +- unsigned int max_q_count, q_count; + unsigned int reg, reg_val; +- unsigned int i; ++ unsigned int i, q_count; + + /* Set MTL flow control */ + for (i = 0; i < pdata->rx_q_count; i++) { +@@ -306,9 +323,7 @@ static int axgbe_enable_tx_flow_control(struct axgbe_port *pdata) + } + + /* Set MAC flow control */ +- max_q_count = AXGMAC_MAX_FLOW_CONTROL_QUEUES; +- q_count = RTE_MIN(pdata->tx_q_count, +- max_q_count); ++ q_count = axgbe_get_fc_queue_count(pdata); + reg = MAC_Q0TFCR; + for (i = 0; i < q_count; i++) { + reg_val = AXGMAC_IOREAD(pdata, reg); +@@ -637,23 +652,21 @@ static void axgbe_config_dma_cache(struct axgbe_port *pdata) + unsigned int arcache, awcache, arwcache; + + arcache = 0; +- AXGMAC_SET_BITS(arcache, DMA_AXIARCR, DRC, 0x3); ++ AXGMAC_SET_BITS(arcache, DMA_AXIARCR, DRC, 0xf); ++ AXGMAC_SET_BITS(arcache, DMA_AXIARCR, TEC, 0xf); ++ AXGMAC_SET_BITS(arcache, DMA_AXIARCR, THC, 0xf); + AXGMAC_IOWRITE(pdata, DMA_AXIARCR, arcache); + + awcache = 0; +- AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, DWC, 0x3); +- AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, RPC, 0x3); +- AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, RPD, 0x1); +- AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, RHC, 0x3); +- AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, RHD, 0x1); +- AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, RDC, 0x3); +- AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, RDD, 0x1); ++ AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, DWC, 0xf); ++ AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, RPC, 0xf); ++ AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, RHC, 0xf); ++ AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, RDC, 0xf); + AXGMAC_IOWRITE(pdata, DMA_AXIAWCR, awcache); + + arwcache = 0; +- AXGMAC_SET_BITS(arwcache, DMA_AXIAWRCR, TDWD, 0x1); +- AXGMAC_SET_BITS(arwcache, DMA_AXIAWRCR, TDWC, 0x3); +- AXGMAC_SET_BITS(arwcache, DMA_AXIAWRCR, RDRC, 0x3); ++ AXGMAC_SET_BITS(arwcache, DMA_AXIAWRCR, TDWC, 0xf); ++ AXGMAC_SET_BITS(arwcache, DMA_AXIAWRCR, RDRC, 0xf); + AXGMAC_IOWRITE(pdata, DMA_AXIAWRCR, arwcache); + } + +@@ -1046,7 +1059,7 @@ static int axgbe_config_rx_threshold(struct axgbe_port *pdata, return 0; } @@ -34000,7 +38247,7 @@ index daeb3308f4..6a7fddffca 100644 { unsigned int fifo_size; diff --git a/dpdk/drivers/net/axgbe/axgbe_ethdev.c b/dpdk/drivers/net/axgbe/axgbe_ethdev.c -index 7d40c18a86..5add403235 100644 +index 7d40c18a86..4dd634414c 100644 --- a/dpdk/drivers/net/axgbe/axgbe_ethdev.c +++ b/dpdk/drivers/net/axgbe/axgbe_ethdev.c @@ -10,6 +10,8 @@ @@ -34021,7 +38268,27 @@ index 7d40c18a86..5add403235 100644 * * @return * void -@@ -1009,18 +1011,18 @@ axgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *stats, +@@ -351,6 +353,7 @@ axgbe_dev_start(struct rte_eth_dev *dev) + int ret; + struct rte_eth_dev_data *dev_data = dev->data; + uint16_t max_pkt_len; ++ uint16_t i; + + dev->dev_ops = &axgbe_eth_dev_ops; + +@@ -395,6 +398,11 @@ axgbe_dev_start(struct rte_eth_dev *dev) + else + dev->rx_pkt_burst = &axgbe_recv_pkts; + ++ for (i = 0; i < dev->data->nb_rx_queues; i++) ++ dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; ++ for (i = 0; i < dev->data->nb_tx_queues; i++) ++ dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; ++ + return 0; + } + +@@ -1009,18 +1017,18 @@ axgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *stats, struct axgbe_port *pdata = dev->data->dev_private; unsigned int i; @@ -34044,7 +38311,7 @@ index 7d40c18a86..5add403235 100644 } static int -@@ -2117,28 +2119,27 @@ static void axgbe_default_config(struct axgbe_port *pdata) +@@ -2117,28 +2125,27 @@ static void axgbe_default_config(struct axgbe_port *pdata) pdata->power_down = 0; } @@ -34090,7 +38357,7 @@ index 7d40c18a86..5add403235 100644 } /* -@@ -2180,7 +2181,7 @@ eth_axgbe_dev_init(struct rte_eth_dev *eth_dev) +@@ -2180,7 +2187,7 @@ eth_axgbe_dev_init(struct rte_eth_dev *eth_dev) /* * Use root complex device ID to differentiate RV AXGBE vs SNOWY AXGBE */ @@ -34099,11 +38366,44 @@ index 7d40c18a86..5add403235 100644 pdata->xpcs_window_def_reg = PCS_V2_RV_WINDOW_DEF; pdata->xpcs_window_sel_reg = PCS_V2_RV_WINDOW_SELECT; } else { +@@ -2325,12 +2332,14 @@ static int + axgbe_dev_close(struct rte_eth_dev *eth_dev) + { + struct rte_pci_device *pci_dev; ++ struct axgbe_port *pdata; + + PMD_INIT_FUNC_TRACE(); + + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return 0; + ++ pdata = eth_dev->data->dev_private; + pci_dev = RTE_DEV_TO_PCI(eth_dev->device); + axgbe_dev_clear_queues(eth_dev); + +@@ -2340,6 +2349,9 @@ axgbe_dev_close(struct rte_eth_dev *eth_dev) + axgbe_dev_interrupt_handler, + (void *)eth_dev); + ++ /* Disable all interrupts in the hardware */ ++ XP_IOWRITE(pdata, XP_INT_EN, 0x0); ++ + return 0; + } + diff --git a/dpdk/drivers/net/axgbe/axgbe_ethdev.h b/dpdk/drivers/net/axgbe/axgbe_ethdev.h -index a207f2ae1b..e06d40f9eb 100644 +index a207f2ae1b..54cad56b21 100644 --- a/dpdk/drivers/net/axgbe/axgbe_ethdev.h +++ b/dpdk/drivers/net/axgbe/axgbe_ethdev.h -@@ -641,7 +641,7 @@ struct axgbe_port { +@@ -111,6 +111,7 @@ + /* Auto-negotiation */ + #define AXGBE_AN_MS_TIMEOUT 500 + #define AXGBE_LINK_TIMEOUT 5 ++#define AXGBE_KR_TRAINING_WAIT_ITER 50 + + #define AXGBE_SGMII_AN_LINK_STATUS BIT(1) + #define AXGBE_SGMII_AN_LINK_SPEED (BIT(2) | BIT(3)) +@@ -641,7 +642,7 @@ struct axgbe_port { unsigned int kr_redrv; @@ -34112,11 +38412,151 @@ index a207f2ae1b..e06d40f9eb 100644 unsigned int an_int; unsigned int an_status; enum axgbe_an an_result; +@@ -652,6 +653,7 @@ struct axgbe_port { + unsigned int parallel_detect; + unsigned int fec_ability; + unsigned long an_start; ++ unsigned long kr_start_time; + enum axgbe_an_mode an_mode; + + /* I2C support */ +diff --git a/dpdk/drivers/net/axgbe/axgbe_mdio.c b/dpdk/drivers/net/axgbe/axgbe_mdio.c +index 32d8c666f9..ce449f3ed6 100644 +--- a/dpdk/drivers/net/axgbe/axgbe_mdio.c ++++ b/dpdk/drivers/net/axgbe/axgbe_mdio.c +@@ -235,13 +235,14 @@ static void axgbe_switch_mode(struct axgbe_port *pdata) + axgbe_change_mode(pdata, pdata->phy_if.phy_impl.switch_mode(pdata)); + } + +-static void axgbe_set_mode(struct axgbe_port *pdata, ++static bool axgbe_set_mode(struct axgbe_port *pdata, + enum axgbe_mode mode) + { + if (mode == axgbe_cur_mode(pdata)) +- return; ++ return false; + + axgbe_change_mode(pdata, mode); ++ return true; + } + + static bool axgbe_use_mode(struct axgbe_port *pdata, +@@ -383,6 +384,7 @@ static enum axgbe_an axgbe_an73_tx_training(struct axgbe_port *pdata, + if (reg & AXGBE_KR_TRAINING_ENABLE) { + if (pdata->phy_if.phy_impl.kr_training_pre) + pdata->phy_if.phy_impl.kr_training_pre(pdata); ++ pdata->kr_start_time = rte_get_timer_cycles(); + + reg |= AXGBE_KR_TRAINING_START; + XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, +@@ -519,6 +521,7 @@ static enum axgbe_an axgbe_an73_incompat_link(struct axgbe_port *pdata) + + axgbe_an_disable(pdata); + axgbe_switch_mode(pdata); ++ pdata->an_result = AXGBE_AN_READY; + axgbe_an_restart(pdata); + + return AXGBE_AN_INCOMPAT_LINK; +@@ -999,11 +1002,34 @@ static void axgbe_check_link_timeout(struct axgbe_port *pdata) + { + unsigned long link_timeout; + unsigned long ticks; ++ unsigned long kr_time; ++ int wait; + + link_timeout = pdata->link_check + (AXGBE_LINK_TIMEOUT * + 2 * rte_get_timer_hz()); + ticks = rte_get_timer_cycles(); + if (time_after(ticks, link_timeout)) { ++ if ((axgbe_cur_mode(pdata) == AXGBE_MODE_KR) && ++ pdata->phy.autoneg == AUTONEG_ENABLE) { ++ /* AN restart should not happen while KR training is in progress. ++ * The while loop ensures no AN restart during KR training, ++ * waits up to 500ms and AN restart is triggered only if KR ++ * training is failed. ++ */ ++ wait = AXGBE_KR_TRAINING_WAIT_ITER; ++ while (wait--) { ++ kr_time = pdata->kr_start_time + ++ msecs_to_timer_cycles(AXGBE_AN_MS_TIMEOUT); ++ ticks = rte_get_timer_cycles(); ++ if (time_after(ticks, kr_time)) ++ break; ++ /* AN restart is not required, if AN result is COMPLETE */ ++ if (pdata->an_result == AXGBE_AN_COMPLETE) ++ return; ++ rte_delay_us(10500); ++ } ++ } ++ + PMD_DRV_LOG(NOTICE, "AN link timeout\n"); + axgbe_phy_config_aneg(pdata); + } +@@ -1014,7 +1040,7 @@ static enum axgbe_mode axgbe_phy_status_aneg(struct axgbe_port *pdata) + return pdata->phy_if.phy_impl.an_outcome(pdata); + } + +-static void axgbe_phy_status_result(struct axgbe_port *pdata) ++static bool axgbe_phy_status_result(struct axgbe_port *pdata) + { + enum axgbe_mode mode; + +@@ -1048,7 +1074,10 @@ static void axgbe_phy_status_result(struct axgbe_port *pdata) + + pdata->phy.duplex = DUPLEX_FULL; + +- axgbe_set_mode(pdata, mode); ++ if (axgbe_set_mode(pdata, mode)) ++ return true; ++ else ++ return false; + } + + static int autoneg_time_out(unsigned long autoneg_start_time) +@@ -1083,7 +1112,7 @@ static void axgbe_phy_status(struct axgbe_port *pdata) + &an_restart); + if (an_restart) { + axgbe_phy_config_aneg(pdata); +- return; ++ goto adjust_link; + } + + if (pdata->phy.link) { +@@ -1115,7 +1144,10 @@ static void axgbe_phy_status(struct axgbe_port *pdata) + return; + } + } +- axgbe_phy_status_result(pdata); ++ ++ if (axgbe_phy_status_result(pdata)) ++ return; ++ + if (rte_bit_relaxed_get32(AXGBE_LINK_INIT, &pdata->dev_state)) + rte_bit_relaxed_clear32(AXGBE_LINK_INIT, + &pdata->dev_state); diff --git a/dpdk/drivers/net/axgbe/axgbe_phy_impl.c b/dpdk/drivers/net/axgbe/axgbe_phy_impl.c -index 02236ec192..72104f8a3f 100644 +index 02236ec192..60a1bc5d7b 100644 --- a/dpdk/drivers/net/axgbe/axgbe_phy_impl.c +++ b/dpdk/drivers/net/axgbe/axgbe_phy_impl.c -@@ -347,7 +347,7 @@ static int axgbe_phy_i2c_read(struct axgbe_port *pdata, unsigned int target, +@@ -68,6 +68,7 @@ enum axgbe_sfp_cable { + AXGBE_SFP_CABLE_UNKNOWN = 0, + AXGBE_SFP_CABLE_ACTIVE, + AXGBE_SFP_CABLE_PASSIVE, ++ AXGBE_SFP_CABLE_FIBER, + }; + + enum axgbe_sfp_base { +@@ -115,9 +116,7 @@ enum axgbe_sfp_speed { + + #define AXGBE_SFP_BASE_BR 12 + #define AXGBE_SFP_BASE_BR_1GBE_MIN 0x0a +-#define AXGBE_SFP_BASE_BR_1GBE_MAX 0x0d + #define AXGBE_SFP_BASE_BR_10GBE_MIN 0x64 +-#define AXGBE_SFP_BASE_BR_10GBE_MAX 0x68 + + #define AXGBE_SFP_BASE_CU_CABLE_LEN 18 + +@@ -347,7 +346,7 @@ static int axgbe_phy_i2c_read(struct axgbe_port *pdata, unsigned int target, retry = 1; again2: @@ -34125,7 +38565,80 @@ index 02236ec192..72104f8a3f 100644 i2c_op.cmd = AXGBE_I2C_CMD_READ; i2c_op.target = target; i2c_op.len = val_len; -@@ -1093,7 +1093,7 @@ static int axgbe_phy_an_config(struct axgbe_port *pdata __rte_unused) +@@ -534,25 +533,22 @@ static void axgbe_phy_sfp_phy_settings(struct axgbe_port *pdata) + static bool axgbe_phy_sfp_bit_rate(struct axgbe_sfp_eeprom *sfp_eeprom, + enum axgbe_sfp_speed sfp_speed) + { +- u8 *sfp_base, min, max; ++ u8 *sfp_base, min; + + sfp_base = sfp_eeprom->base; + + switch (sfp_speed) { + case AXGBE_SFP_SPEED_1000: + min = AXGBE_SFP_BASE_BR_1GBE_MIN; +- max = AXGBE_SFP_BASE_BR_1GBE_MAX; + break; + case AXGBE_SFP_SPEED_10000: + min = AXGBE_SFP_BASE_BR_10GBE_MIN; +- max = AXGBE_SFP_BASE_BR_10GBE_MAX; + break; + default: + return false; + } + +- return ((sfp_base[AXGBE_SFP_BASE_BR] >= min) && +- (sfp_base[AXGBE_SFP_BASE_BR] <= max)); ++ return sfp_base[AXGBE_SFP_BASE_BR] >= min; + } + + static void axgbe_phy_sfp_external_phy(struct axgbe_port *pdata) +@@ -577,6 +573,9 @@ static bool axgbe_phy_belfuse_parse_quirks(struct axgbe_port *pdata) + AXGBE_BEL_FUSE_VENDOR, strlen(AXGBE_BEL_FUSE_VENDOR))) + return false; + ++ /* Reset PHY - wait for self-clearing reset bit to clear */ ++ pdata->phy_if.phy_impl.reset(pdata); ++ + if (!memcmp(&sfp_eeprom->base[AXGBE_SFP_BASE_VENDOR_PN], + AXGBE_BEL_FUSE_PARTNO, strlen(AXGBE_BEL_FUSE_PARTNO))) { + phy_data->sfp_base = AXGBE_SFP_BASE_1000_SX; +@@ -612,16 +611,21 @@ static void axgbe_phy_sfp_parse_eeprom(struct axgbe_port *pdata) + + axgbe_phy_sfp_parse_quirks(pdata); + +- /* Assume ACTIVE cable unless told it is PASSIVE */ ++ /* Assume FIBER cable unless told otherwise */ + if (sfp_base[AXGBE_SFP_BASE_CABLE] & AXGBE_SFP_BASE_CABLE_PASSIVE) { + phy_data->sfp_cable = AXGBE_SFP_CABLE_PASSIVE; + phy_data->sfp_cable_len = sfp_base[AXGBE_SFP_BASE_CU_CABLE_LEN]; +- } else { ++ } else if (sfp_base[AXGBE_SFP_BASE_CABLE] & AXGBE_SFP_BASE_CABLE_ACTIVE) { + phy_data->sfp_cable = AXGBE_SFP_CABLE_ACTIVE; ++ } else { ++ phy_data->sfp_cable = AXGBE_SFP_CABLE_FIBER; + } + + /* Determine the type of SFP */ +- if (sfp_base[AXGBE_SFP_BASE_10GBE_CC] & AXGBE_SFP_BASE_10GBE_CC_SR) ++ if (phy_data->sfp_cable != AXGBE_SFP_CABLE_FIBER && ++ axgbe_phy_sfp_bit_rate(sfp_eeprom, AXGBE_SFP_SPEED_10000)) ++ phy_data->sfp_base = AXGBE_SFP_BASE_10000_CR; ++ else if (sfp_base[AXGBE_SFP_BASE_10GBE_CC] & AXGBE_SFP_BASE_10GBE_CC_SR) + phy_data->sfp_base = AXGBE_SFP_BASE_10000_SR; + else if (sfp_base[AXGBE_SFP_BASE_10GBE_CC] & AXGBE_SFP_BASE_10GBE_CC_LR) + phy_data->sfp_base = AXGBE_SFP_BASE_10000_LR; +@@ -638,9 +642,6 @@ static void axgbe_phy_sfp_parse_eeprom(struct axgbe_port *pdata) + phy_data->sfp_base = AXGBE_SFP_BASE_1000_CX; + else if (sfp_base[AXGBE_SFP_BASE_1GBE_CC] & AXGBE_SFP_BASE_1GBE_CC_T) + phy_data->sfp_base = AXGBE_SFP_BASE_1000_T; +- else if ((phy_data->sfp_cable == AXGBE_SFP_CABLE_PASSIVE) && +- axgbe_phy_sfp_bit_rate(sfp_eeprom, AXGBE_SFP_SPEED_10000)) +- phy_data->sfp_base = AXGBE_SFP_BASE_10000_CR; + + switch (phy_data->sfp_base) { + case AXGBE_SFP_BASE_1000_T: +@@ -1093,7 +1094,7 @@ static int axgbe_phy_an_config(struct axgbe_port *pdata __rte_unused) { return 0; /* Dummy API since there is no case to support @@ -34134,8 +38647,22 @@ index 02236ec192..72104f8a3f 100644 */ } +@@ -1693,6 +1694,13 @@ static int axgbe_phy_link_status(struct axgbe_port *pdata, int *an_restart) + if (reg & MDIO_STAT1_LSTATUS) + return 1; + ++ if (pdata->phy.autoneg == AUTONEG_ENABLE && ++ phy_data->port_mode == AXGBE_PORT_MODE_BACKPLANE) { ++ if (rte_bit_relaxed_get32(AXGBE_LINK_INIT, &pdata->dev_state)) { ++ *an_restart = 1; ++ } ++ } ++ + /* No link, attempt a receiver reset cycle */ + if (phy_data->rrc_count++) { + phy_data->rrc_count = 0; diff --git a/dpdk/drivers/net/axgbe/axgbe_rxtx.c b/dpdk/drivers/net/axgbe/axgbe_rxtx.c -index 6bd41d3002..1de5b29f06 100644 +index 6bd41d3002..f993919637 100644 --- a/dpdk/drivers/net/axgbe/axgbe_rxtx.c +++ b/dpdk/drivers/net/axgbe/axgbe_rxtx.c @@ -341,20 +341,19 @@ uint16_t eth_axgbe_recv_scattered_pkts(void *rx_queue, @@ -34352,6 +38879,22 @@ index 6bd41d3002..1de5b29f06 100644 } return nb_rx; } +@@ -901,6 +928,7 @@ void axgbe_dev_clear_queues(struct rte_eth_dev *dev) + axgbe_rx_queue_release(rxq); + dev->data->rx_queues[i] = NULL; + } ++ dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; + } + + for (i = 0; i < dev->data->nb_tx_queues; i++) { +@@ -910,6 +938,7 @@ void axgbe_dev_clear_queues(struct rte_eth_dev *dev) + axgbe_tx_queue_release(txq); + dev->data->tx_queues[i] = NULL; + } ++ dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; + } + } + diff --git a/dpdk/drivers/net/axgbe/axgbe_rxtx.h b/dpdk/drivers/net/axgbe/axgbe_rxtx.h index 2a330339cd..2da3095547 100644 --- a/dpdk/drivers/net/axgbe/axgbe_rxtx.h @@ -34383,7 +38926,7 @@ index 816371cd79..d95a446bef 100644 #define TX_DESC_CTRL_FLAG_TMST 0x40000000 #define TX_FREE_BULK 8 diff --git a/dpdk/drivers/net/bnx2x/bnx2x.c b/dpdk/drivers/net/bnx2x/bnx2x.c -index f67db015b5..74e3018eab 100644 +index f67db015b5..55a91fad78 100644 --- a/dpdk/drivers/net/bnx2x/bnx2x.c +++ b/dpdk/drivers/net/bnx2x/bnx2x.c @@ -926,7 +926,7 @@ storm_memset_eq_prod(struct bnx2x_softc *sc, uint16_t eq_prod, uint16_t pfid) @@ -34476,6 +39019,15 @@ index f67db015b5..74e3018eab 100644 * visible to the controller. * * If an mbuf is submitted to this routine and cannot be given to the +@@ -2389,7 +2389,7 @@ int bnx2x_alloc_ilt_mem(struct bnx2x_softc *sc) + static int bnx2x_alloc_ilt_lines_mem(struct bnx2x_softc *sc) + { + sc->ilt->lines = rte_calloc("", +- sizeof(struct ilt_line), ILT_MAX_LINES, ++ ILT_MAX_LINES, sizeof(struct ilt_line), + RTE_CACHE_LINE_SIZE); + return sc->ilt->lines == NULL; + } @@ -2719,7 +2719,7 @@ static uint8_t bnx2x_clear_pf_load(struct bnx2x_softc *sc) return val1 != 0; } @@ -34635,9 +39187,32 @@ index f36ad30e17..e0be3c137c 100644 } diff --git a/dpdk/drivers/net/bnx2x/bnx2x_stats.c b/dpdk/drivers/net/bnx2x/bnx2x_stats.c -index 1cd972591a..c07b01510a 100644 +index 1cd972591a..69132c7c80 100644 --- a/dpdk/drivers/net/bnx2x/bnx2x_stats.c +++ b/dpdk/drivers/net/bnx2x/bnx2x_stats.c +@@ -114,7 +114,7 @@ bnx2x_hw_stats_post(struct bnx2x_softc *sc) + + /* Update MCP's statistics if possible */ + if (sc->func_stx) { +- rte_memcpy(BNX2X_SP(sc, func_stats), &sc->func_stats, ++ memcpy(BNX2X_SP(sc, func_stats), &sc->func_stats, + sizeof(sc->func_stats)); + } + +@@ -817,10 +817,10 @@ bnx2x_hw_stats_update(struct bnx2x_softc *sc) + etherstatspktsover1522octets); + } + +- rte_memcpy(old, new, sizeof(struct nig_stats)); ++ memcpy(old, new, sizeof(struct nig_stats)); + +- rte_memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]), +- sizeof(struct mac_stx)); ++ memcpy(RTE_PTR_ADD(estats, offsetof(struct bnx2x_eth_stats, rx_stat_ifhcinbadoctets_hi)), ++ &pstats->mac_stx[1], sizeof(struct mac_stx)); + estats->brb_drop_hi = pstats->brb_drop_hi; + estats->brb_drop_lo = pstats->brb_drop_lo; + @@ -1358,7 +1358,7 @@ bnx2x_prep_fw_stats_req(struct bnx2x_softc *sc) /* @@ -34647,6 +39222,20 @@ index 1cd972591a..c07b01510a 100644 */ memset(&sc->fw_stats_data->storm_counters, 0xff, sizeof(struct stats_counter)); +@@ -1492,9 +1492,11 @@ bnx2x_stats_init(struct bnx2x_softc *sc) + REG_RD(sc, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38); + if (!CHIP_IS_E3(sc)) { + REG_RD_DMAE(sc, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50, +- &(sc->port.old_nig_stats.egress_mac_pkt0_lo), 2); ++ RTE_PTR_ADD(&sc->port.old_nig_stats, ++ offsetof(struct nig_stats, egress_mac_pkt0_lo)), 2); + REG_RD_DMAE(sc, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50, +- &(sc->port.old_nig_stats.egress_mac_pkt1_lo), 2); ++ RTE_PTR_ADD(&sc->port.old_nig_stats, ++ offsetof(struct nig_stats, egress_mac_pkt1_lo)), 2); + } + + /* function stats */ diff --git a/dpdk/drivers/net/bnx2x/bnx2x_stats.h b/dpdk/drivers/net/bnx2x/bnx2x_stats.h index 635412bdd3..11ddab5039 100644 --- a/dpdk/drivers/net/bnx2x/bnx2x_stats.h @@ -34670,9 +39259,21 @@ index 635412bdd3..11ddab5039 100644 uint32_t total_bytes_received_lo; uint32_t total_bytes_transmitted_hi; diff --git a/dpdk/drivers/net/bnx2x/bnx2x_vfpf.c b/dpdk/drivers/net/bnx2x/bnx2x_vfpf.c -index 945e3df84f..63953c2979 100644 +index 945e3df84f..5411df3a38 100644 --- a/dpdk/drivers/net/bnx2x/bnx2x_vfpf.c +++ b/dpdk/drivers/net/bnx2x/bnx2x_vfpf.c +@@ -52,9 +52,9 @@ bnx2x_check_bull(struct bnx2x_softc *sc) + + /* check the mac address and VLAN and allocate memory if valid */ + if (valid_bitmap & (1 << MAC_ADDR_VALID) && memcmp(bull->mac, sc->old_bulletin.mac, ETH_ALEN)) +- rte_memcpy(&sc->link_params.mac_addr, bull->mac, ETH_ALEN); ++ memcpy(&sc->link_params.mac_addr, bull->mac, ETH_ALEN); + if (valid_bitmap & (1 << VLAN_VALID)) +- rte_memcpy(&bull->vlan, &sc->old_bulletin.vlan, RTE_VLAN_HLEN); ++ memcpy(&bull->vlan, &sc->old_bulletin.vlan, sizeof(bull->vlan)); + + sc->old_bulletin = *bull; + @@ -73,7 +73,7 @@ bnx2x_add_tlv(__rte_unused struct bnx2x_softc *sc, void *tlvs_list, tl->length = length; } @@ -34682,6 +39283,40 @@ index 945e3df84f..63953c2979 100644 static void bnx2x_vf_prep(struct bnx2x_softc *sc, struct vf_first_tlv *first_tlv, uint16_t type, uint16_t length) +@@ -569,7 +569,7 @@ bnx2x_vf_set_mac(struct bnx2x_softc *sc, int set) + + bnx2x_check_bull(sc); + +- rte_memcpy(query->filters[0].mac, sc->link_params.mac_addr, ETH_ALEN); ++ memcpy(query->filters[0].mac, sc->link_params.mac_addr, ETH_ALEN); + + bnx2x_add_tlv(sc, query, query->first_tlv.tl.length, + BNX2X_VF_TLV_LIST_END, +@@ -583,9 +583,9 @@ bnx2x_vf_set_mac(struct bnx2x_softc *sc, int set) + while (BNX2X_VF_STATUS_FAILURE == reply->status && + bnx2x_check_bull(sc)) { + /* A new mac was configured by PF for us */ +- rte_memcpy(sc->link_params.mac_addr, sc->pf2vf_bulletin->mac, ++ memcpy(sc->link_params.mac_addr, sc->pf2vf_bulletin->mac, + ETH_ALEN); +- rte_memcpy(query->filters[0].mac, sc->pf2vf_bulletin->mac, ++ memcpy(query->filters[0].mac, sc->pf2vf_bulletin->mac, + ETH_ALEN); + + rc = bnx2x_do_req4pf(sc, sc->vf2pf_mbox_mapping.paddr); +@@ -622,10 +622,10 @@ bnx2x_vf_config_rss(struct bnx2x_softc *sc, + BNX2X_VF_TLV_LIST_END, + sizeof(struct channel_list_end_tlv)); + +- rte_memcpy(query->rss_key, params->rss_key, sizeof(params->rss_key)); ++ memcpy(query->rss_key, params->rss_key, sizeof(params->rss_key)); + query->rss_key_size = T_ETH_RSS_KEY; + +- rte_memcpy(query->ind_table, params->ind_table, T_ETH_INDIRECTION_TABLE_SIZE); ++ memcpy(query->ind_table, params->ind_table, T_ETH_INDIRECTION_TABLE_SIZE); + query->ind_table_size = T_ETH_INDIRECTION_TABLE_SIZE; + + query->rss_result_mask = params->rss_result_mask; diff --git a/dpdk/drivers/net/bnx2x/bnx2x_vfpf.h b/dpdk/drivers/net/bnx2x/bnx2x_vfpf.h index 9577341266..d71e81c005 100644 --- a/dpdk/drivers/net/bnx2x/bnx2x_vfpf.h @@ -35134,7 +39769,7 @@ index 2093d8f373..43fbf04ece 100644 */ diff --git a/dpdk/drivers/net/bnxt/bnxt.h b/dpdk/drivers/net/bnxt/bnxt.h -index 234161053f..76783eb3a1 100644 +index 234161053f..bbbd2ae0d6 100644 --- a/dpdk/drivers/net/bnxt/bnxt.h +++ b/dpdk/drivers/net/bnxt/bnxt.h @@ -72,8 +72,7 @@ @@ -35156,7 +39791,26 @@ index 234161053f..76783eb3a1 100644 uint16_t support_pam4_auto_speeds; uint8_t req_signal_mode; uint8_t module_status; -@@ -580,30 +579,6 @@ struct bnxt_rep_info { +@@ -442,8 +441,8 @@ struct bnxt_ring_mem_info { + + struct bnxt_ctx_pg_info { + uint32_t entries; +- void *ctx_pg_arr[MAX_CTX_PAGES]; +- rte_iova_t ctx_dma_arr[MAX_CTX_PAGES]; ++ void **ctx_pg_arr; ++ rte_iova_t *ctx_dma_arr; + struct bnxt_ring_mem_info ring_mem; + }; + +@@ -543,7 +542,6 @@ struct bnxt_mark_info { + + struct bnxt_rep_info { + struct rte_eth_dev *vfr_eth_dev; +- pthread_mutex_t vfr_lock; + pthread_mutex_t vfr_start_lock; + bool conduit_valid; + }; +@@ -580,30 +578,6 @@ struct bnxt_rep_info { RTE_ETH_RSS_NONFRAG_IPV6_UDP | \ RTE_ETH_RSS_LEVEL_MASK) @@ -35187,7 +39841,7 @@ index 234161053f..76783eb3a1 100644 #define BNXT_HWRM_SHORT_REQ_LEN sizeof(struct hwrm_short_input) struct bnxt_flow_stat_info { -@@ -672,7 +647,6 @@ struct bnxt { +@@ -672,7 +646,6 @@ struct bnxt { #define BNXT_FLAG_PORT_STATS BIT(2) #define BNXT_FLAG_JUMBO BIT(3) #define BNXT_FLAG_SHORT_CMD BIT(4) @@ -35195,7 +39849,7 @@ index 234161053f..76783eb3a1 100644 #define BNXT_FLAG_PTP_SUPPORTED BIT(6) #define BNXT_FLAG_MULTI_HOST BIT(7) #define BNXT_FLAG_EXT_RX_PORT_STATS BIT(8) -@@ -695,9 +669,6 @@ struct bnxt { +@@ -695,9 +668,6 @@ struct bnxt { #define BNXT_FLAG_FLOW_XSTATS_EN BIT(25) #define BNXT_FLAG_DFLT_MAC_SET BIT(26) #define BNXT_FLAG_GFID_ENABLE BIT(27) @@ -35205,7 +39859,7 @@ index 234161053f..76783eb3a1 100644 #define BNXT_PF(bp) (!((bp)->flags & BNXT_FLAG_VF)) #define BNXT_VF(bp) ((bp)->flags & BNXT_FLAG_VF) #define BNXT_NPAR(bp) ((bp)->flags & BNXT_FLAG_NPAR_PF) -@@ -834,7 +805,7 @@ struct bnxt { +@@ -834,7 +804,7 @@ struct bnxt { uint16_t max_tx_rings; uint16_t max_rx_rings; #define MAX_STINGRAY_RINGS 236U @@ -35214,7 +39868,7 @@ index 234161053f..76783eb3a1 100644 uint16_t max_nq_rings; uint16_t max_l2_ctx; -@@ -891,6 +862,15 @@ struct bnxt { +@@ -891,6 +861,16 @@ struct bnxt { uint16_t tx_cfa_action; struct bnxt_ring_stats *prev_rx_ring_stats; struct bnxt_ring_stats *prev_tx_ring_stats; @@ -35223,6 +39877,7 @@ index 234161053f..76783eb3a1 100644 + struct rte_ether_addr *mcast_addr_list; + rte_iova_t mc_list_dma_addr; + uint32_t nb_mc_addr; ++#define BNXT_DFLT_MAX_MC_ADDR 16 /* for compatibility with older firmware */ + uint32_t max_mcast_addr; /* maximum number of mcast filters supported */ + + struct rte_eth_rss_conf rss_conf; /* RSS configuration. */ @@ -35284,7 +39939,7 @@ index a43b22a8f8..e1dcf3ac2f 100644 case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED: PMD_DRV_LOG(INFO, "Port conn async event\n"); diff --git a/dpdk/drivers/net/bnxt/bnxt_ethdev.c b/dpdk/drivers/net/bnxt/bnxt_ethdev.c -index f79f33ab4e..44fd45a4e9 100644 +index f79f33ab4e..63ef5593b0 100644 --- a/dpdk/drivers/net/bnxt/bnxt_ethdev.c +++ b/dpdk/drivers/net/bnxt/bnxt_ethdev.c @@ -177,6 +177,7 @@ static int bnxt_restore_vlan_filters(struct bnxt *bp); @@ -35526,7 +40181,19 @@ index f79f33ab4e..44fd45a4e9 100644 if (bp->rx_cp_nr_rings > RTE_ETHDEV_QUEUE_STAT_CNTRS) PMD_DRV_LOG(ERR, "RxQ cnt %d > RTE_ETHDEV_QUEUE_STAT_CNTRS %d\n", -@@ -1639,6 +1660,7 @@ static void bnxt_drv_uninit(struct bnxt *bp) +@@ -1626,10 +1647,8 @@ bnxt_uninit_locks(struct bnxt *bp) + pthread_mutex_destroy(&bp->def_cp_lock); + pthread_mutex_destroy(&bp->health_check_lock); + pthread_mutex_destroy(&bp->err_recovery_lock); +- if (bp->rep_info) { +- pthread_mutex_destroy(&bp->rep_info->vfr_lock); ++ if (bp->rep_info) + pthread_mutex_destroy(&bp->rep_info->vfr_start_lock); +- } + } + + static void bnxt_drv_uninit(struct bnxt *bp) +@@ -1639,6 +1658,7 @@ static void bnxt_drv_uninit(struct bnxt *bp) bnxt_free_link_info(bp); bnxt_free_parent_info(bp); bnxt_uninit_locks(bp); @@ -35534,7 +40201,7 @@ index f79f33ab4e..44fd45a4e9 100644 rte_memzone_free((const struct rte_memzone *)bp->tx_mem_zone); bp->tx_mem_zone = NULL; -@@ -1673,6 +1695,7 @@ static int bnxt_dev_close_op(struct rte_eth_dev *eth_dev) +@@ -1673,6 +1693,7 @@ static int bnxt_dev_close_op(struct rte_eth_dev *eth_dev) rte_eal_alarm_cancel(bnxt_dev_reset_and_resume, (void *)bp); rte_eal_alarm_cancel(bnxt_dev_recover, (void *)bp); bnxt_cancel_fc_thread(bp); @@ -35542,7 +40209,7 @@ index f79f33ab4e..44fd45a4e9 100644 if (eth_dev->data->dev_started) ret = bnxt_dev_stop(eth_dev); -@@ -1812,6 +1835,14 @@ int bnxt_link_update_op(struct rte_eth_dev *eth_dev, int wait_to_complete) +@@ -1812,6 +1833,14 @@ int bnxt_link_update_op(struct rte_eth_dev *eth_dev, int wait_to_complete) if (bp->link_info == NULL) goto out; @@ -35557,7 +40224,7 @@ index f79f33ab4e..44fd45a4e9 100644 do { /* Retrieve link info from hardware */ rc = bnxt_get_hwrm_link_config(bp, &new); -@@ -1829,12 +1860,6 @@ int bnxt_link_update_op(struct rte_eth_dev *eth_dev, int wait_to_complete) +@@ -1829,12 +1858,6 @@ int bnxt_link_update_op(struct rte_eth_dev *eth_dev, int wait_to_complete) rte_delay_ms(BNXT_LINK_WAIT_INTERVAL); } while (cnt--); @@ -35570,7 +40237,7 @@ index f79f33ab4e..44fd45a4e9 100644 out: /* Timed out or success */ if (new.link_status != eth_dev->data->dev_link.link_status || -@@ -2125,11 +2150,6 @@ static int bnxt_rss_hash_update_op(struct rte_eth_dev *eth_dev, +@@ -2125,11 +2148,6 @@ static int bnxt_rss_hash_update_op(struct rte_eth_dev *eth_dev, return -EINVAL; } @@ -35582,7 +40249,7 @@ index f79f33ab4e..44fd45a4e9 100644 /* Update the default RSS VNIC(s) */ vnic = BNXT_GET_DEFAULT_VNIC(bp); vnic->hash_type = bnxt_rte_to_hwrm_hash_types(rss_conf->rss_hf); -@@ -2137,6 +2157,9 @@ static int bnxt_rss_hash_update_op(struct rte_eth_dev *eth_dev, +@@ -2137,6 +2155,9 @@ static int bnxt_rss_hash_update_op(struct rte_eth_dev *eth_dev, bnxt_rte_to_hwrm_hash_level(bp, rss_conf->rss_hf, RTE_ETH_RSS_LEVEL(rss_conf->rss_hf)); @@ -35592,7 +40259,7 @@ index f79f33ab4e..44fd45a4e9 100644 /* * If hashkey is not specified, use the previously configured * hashkey -@@ -2152,6 +2175,9 @@ static int bnxt_rss_hash_update_op(struct rte_eth_dev *eth_dev, +@@ -2152,6 +2173,9 @@ static int bnxt_rss_hash_update_op(struct rte_eth_dev *eth_dev, } memcpy(vnic->rss_hash_key, rss_conf->rss_key, rss_conf->rss_key_len); @@ -35602,7 +40269,7 @@ index f79f33ab4e..44fd45a4e9 100644 rss_config: rc = bnxt_hwrm_vnic_rss_cfg(bp, vnic); return rc; -@@ -2831,9 +2857,8 @@ bnxt_dev_set_mc_addr_list_op(struct rte_eth_dev *eth_dev, +@@ -2831,9 +2855,8 @@ bnxt_dev_set_mc_addr_list_op(struct rte_eth_dev *eth_dev, uint32_t nb_mc_addr) { struct bnxt *bp = eth_dev->data->dev_private; @@ -35613,7 +40280,7 @@ index f79f33ab4e..44fd45a4e9 100644 int rc; rc = is_bnxt_in_error(bp); -@@ -2842,6 +2867,8 @@ bnxt_dev_set_mc_addr_list_op(struct rte_eth_dev *eth_dev, +@@ -2842,6 +2865,8 @@ bnxt_dev_set_mc_addr_list_op(struct rte_eth_dev *eth_dev, vnic = BNXT_GET_DEFAULT_VNIC(bp); @@ -35622,7 +40289,7 @@ index f79f33ab4e..44fd45a4e9 100644 if (nb_mc_addr > BNXT_MAX_MC_ADDRS) { vnic->flags |= BNXT_VNIC_INFO_ALLMULTI; goto allmulti; -@@ -2849,14 +2876,10 @@ bnxt_dev_set_mc_addr_list_op(struct rte_eth_dev *eth_dev, +@@ -2849,14 +2874,10 @@ bnxt_dev_set_mc_addr_list_op(struct rte_eth_dev *eth_dev, /* TODO Check for Duplicate mcast addresses */ vnic->flags &= ~BNXT_VNIC_INFO_ALLMULTI; @@ -35640,7 +40307,7 @@ index f79f33ab4e..44fd45a4e9 100644 vnic->flags |= BNXT_VNIC_INFO_MCAST; else vnic->flags &= ~BNXT_VNIC_INFO_MCAST; -@@ -3003,9 +3026,7 @@ bnxt_tx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id, +@@ -3003,9 +3024,7 @@ bnxt_tx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id, int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu) { @@ -35650,7 +40317,7 @@ index f79f33ab4e..44fd45a4e9 100644 uint32_t rc; uint32_t i; -@@ -3013,35 +3034,25 @@ int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu) +@@ -3013,35 +3032,25 @@ int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu) if (rc) return rc; @@ -35696,7 +40363,7 @@ index f79f33ab4e..44fd45a4e9 100644 for (i = 0; i < bp->nr_vnics; i++) { struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; uint16_t size = 0; -@@ -4264,6 +4275,18 @@ static int bnxt_restore_mac_filters(struct bnxt *bp) +@@ -4264,6 +4273,18 @@ static int bnxt_restore_mac_filters(struct bnxt *bp) return 0; } @@ -35715,7 +40382,7 @@ index f79f33ab4e..44fd45a4e9 100644 static int bnxt_restore_filters(struct bnxt *bp) { struct rte_eth_dev *dev = bp->eth_dev; -@@ -4284,14 +4307,21 @@ static int bnxt_restore_filters(struct bnxt *bp) +@@ -4284,14 +4305,21 @@ static int bnxt_restore_filters(struct bnxt *bp) if (ret) return ret; @@ -35739,7 +40406,7 @@ index f79f33ab4e..44fd45a4e9 100644 int rc = 0; do { -@@ -4345,16 +4375,16 @@ static void bnxt_dev_recover(void *arg) +@@ -4345,16 +4373,16 @@ static void bnxt_dev_recover(void *arg) goto err_start; } @@ -35760,7 +40427,96 @@ index f79f33ab4e..44fd45a4e9 100644 PMD_DRV_LOG(INFO, "Port: %u Recovered from FW reset\n", bp->eth_dev->data->port_id); pthread_mutex_unlock(&bp->err_recovery_lock); -@@ -4985,11 +5015,15 @@ static int bnxt_alloc_stats_mem(struct bnxt *bp) +@@ -4660,7 +4688,7 @@ static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp, + { + struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem; + const struct rte_memzone *mz = NULL; +- char mz_name[RTE_MEMZONE_NAMESIZE]; ++ char name[RTE_MEMZONE_NAMESIZE]; + rte_iova_t mz_phys_addr; + uint64_t valid_bits = 0; + uint32_t sz; +@@ -4672,6 +4700,19 @@ static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp, + rmem->nr_pages = RTE_ALIGN_MUL_CEIL(mem_size, BNXT_PAGE_SIZE) / + BNXT_PAGE_SIZE; + rmem->page_size = BNXT_PAGE_SIZE; ++ ++ snprintf(name, RTE_MEMZONE_NAMESIZE, "bnxt_ctx_pg_arr%s_%x_%d", ++ suffix, idx, bp->eth_dev->data->port_id); ++ ctx_pg->ctx_pg_arr = rte_zmalloc(name, sizeof(void *) * rmem->nr_pages, 0); ++ if (ctx_pg->ctx_pg_arr == NULL) ++ return -ENOMEM; ++ ++ snprintf(name, RTE_MEMZONE_NAMESIZE, "bnxt_ctx_dma_arr%s_%x_%d", ++ suffix, idx, bp->eth_dev->data->port_id); ++ ctx_pg->ctx_dma_arr = rte_zmalloc(name, sizeof(rte_iova_t *) * rmem->nr_pages, 0); ++ if (ctx_pg->ctx_dma_arr == NULL) ++ return -ENOMEM; ++ + rmem->pg_arr = ctx_pg->ctx_pg_arr; + rmem->dma_arr = ctx_pg->ctx_dma_arr; + rmem->flags = BNXT_RMEM_VALID_PTE_FLAG; +@@ -4679,13 +4720,13 @@ static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp, + valid_bits = PTU_PTE_VALID; + + if (rmem->nr_pages > 1) { +- snprintf(mz_name, RTE_MEMZONE_NAMESIZE, ++ snprintf(name, RTE_MEMZONE_NAMESIZE, + "bnxt_ctx_pg_tbl%s_%x_%d", + suffix, idx, bp->eth_dev->data->port_id); +- mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0; +- mz = rte_memzone_lookup(mz_name); ++ name[RTE_MEMZONE_NAMESIZE - 1] = 0; ++ mz = rte_memzone_lookup(name); + if (!mz) { +- mz = rte_memzone_reserve_aligned(mz_name, ++ mz = rte_memzone_reserve_aligned(name, + rmem->nr_pages * 8, + bp->eth_dev->device->numa_node, + RTE_MEMZONE_2MB | +@@ -4704,11 +4745,11 @@ static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp, + rmem->pg_tbl_mz = mz; + } + +- snprintf(mz_name, RTE_MEMZONE_NAMESIZE, "bnxt_ctx_%s_%x_%d", ++ snprintf(name, RTE_MEMZONE_NAMESIZE, "bnxt_ctx_%s_%x_%d", + suffix, idx, bp->eth_dev->data->port_id); +- mz = rte_memzone_lookup(mz_name); ++ mz = rte_memzone_lookup(name); + if (!mz) { +- mz = rte_memzone_reserve_aligned(mz_name, ++ mz = rte_memzone_reserve_aligned(name, + mem_size, + bp->eth_dev->device->numa_node, + RTE_MEMZONE_1GB | +@@ -4754,6 +4795,17 @@ static void bnxt_free_ctx_mem(struct bnxt *bp) + return; + + bp->ctx->flags &= ~BNXT_CTX_FLAG_INITED; ++ rte_free(bp->ctx->qp_mem.ctx_pg_arr); ++ rte_free(bp->ctx->srq_mem.ctx_pg_arr); ++ rte_free(bp->ctx->cq_mem.ctx_pg_arr); ++ rte_free(bp->ctx->vnic_mem.ctx_pg_arr); ++ rte_free(bp->ctx->stat_mem.ctx_pg_arr); ++ rte_free(bp->ctx->qp_mem.ctx_dma_arr); ++ rte_free(bp->ctx->srq_mem.ctx_dma_arr); ++ rte_free(bp->ctx->cq_mem.ctx_dma_arr); ++ rte_free(bp->ctx->vnic_mem.ctx_dma_arr); ++ rte_free(bp->ctx->stat_mem.ctx_dma_arr); ++ + rte_memzone_free(bp->ctx->qp_mem.ring_mem.mz); + rte_memzone_free(bp->ctx->srq_mem.ring_mem.mz); + rte_memzone_free(bp->ctx->cq_mem.ring_mem.mz); +@@ -4766,6 +4818,8 @@ static void bnxt_free_ctx_mem(struct bnxt *bp) + rte_memzone_free(bp->ctx->stat_mem.ring_mem.pg_tbl_mz); + + for (i = 0; i < bp->ctx->tqm_fp_rings_count + 1; i++) { ++ rte_free(bp->ctx->tqm_mem[i]->ctx_pg_arr); ++ rte_free(bp->ctx->tqm_mem[i]->ctx_dma_arr); + if (bp->ctx->tqm_mem[i]) + rte_memzone_free(bp->ctx->tqm_mem[i]->ring_mem.mz); + } +@@ -4985,11 +5039,15 @@ static int bnxt_alloc_stats_mem(struct bnxt *bp) static int bnxt_setup_mac_addr(struct rte_eth_dev *eth_dev) { struct bnxt *bp = eth_dev->data->dev_private; @@ -35778,7 +40534,7 @@ index f79f33ab4e..44fd45a4e9 100644 0); if (eth_dev->data->mac_addrs == NULL) { PMD_DRV_LOG(ERR, "Failed to alloc MAC addr tbl\n"); -@@ -5016,6 +5050,23 @@ static int bnxt_setup_mac_addr(struct rte_eth_dev *eth_dev) +@@ -5016,6 +5074,23 @@ static int bnxt_setup_mac_addr(struct rte_eth_dev *eth_dev) /* Copy the permanent MAC from the FUNC_QCAPS response */ memcpy(ð_dev->data->mac_addrs[0], bp->mac_addr, RTE_ETHER_ADDR_LEN); @@ -35802,7 +40558,7 @@ index f79f33ab4e..44fd45a4e9 100644 return rc; } -@@ -5178,10 +5229,6 @@ static int bnxt_get_config(struct bnxt *bp) +@@ -5178,10 +5253,6 @@ static int bnxt_get_config(struct bnxt *bp) if (rc) return rc; @@ -35813,7 +40569,7 @@ index f79f33ab4e..44fd45a4e9 100644 bnxt_hwrm_port_mac_qcfg(bp); bnxt_hwrm_parent_pf_qcfg(bp); -@@ -5229,6 +5276,25 @@ bnxt_init_locks(struct bnxt *bp) +@@ -5229,6 +5300,25 @@ bnxt_init_locks(struct bnxt *bp) return err; } @@ -35839,7 +40595,7 @@ index f79f33ab4e..44fd45a4e9 100644 static int bnxt_init_resources(struct bnxt *bp, bool reconfig_dev) { int rc = 0; -@@ -5237,6 +5303,10 @@ static int bnxt_init_resources(struct bnxt *bp, bool reconfig_dev) +@@ -5237,6 +5327,10 @@ static int bnxt_init_resources(struct bnxt *bp, bool reconfig_dev) if (rc) return rc; @@ -35850,7 +40606,7 @@ index f79f33ab4e..44fd45a4e9 100644 if (!reconfig_dev) { rc = bnxt_setup_mac_addr(bp->eth_dev); if (rc) -@@ -5272,6 +5342,16 @@ static int bnxt_init_resources(struct bnxt *bp, bool reconfig_dev) +@@ -5272,6 +5366,16 @@ static int bnxt_init_resources(struct bnxt *bp, bool reconfig_dev) } } @@ -35867,7 +40623,7 @@ index f79f33ab4e..44fd45a4e9 100644 rc = bnxt_alloc_mem(bp, reconfig_dev); if (rc) return rc; -@@ -5666,24 +5746,6 @@ bnxt_parse_dev_args(struct bnxt *bp, struct rte_devargs *devargs) +@@ -5666,24 +5770,6 @@ bnxt_parse_dev_args(struct bnxt *bp, struct rte_devargs *devargs) return ret; } @@ -35892,7 +40648,7 @@ index f79f33ab4e..44fd45a4e9 100644 /* Allocate and initialize various fields in bnxt struct that * need to be allocated/destroyed only once in the lifetime of the driver */ -@@ -5760,10 +5822,6 @@ static int bnxt_drv_init(struct rte_eth_dev *eth_dev) +@@ -5760,10 +5846,6 @@ static int bnxt_drv_init(struct rte_eth_dev *eth_dev) if (rc) return rc; @@ -35903,7 +40659,7 @@ index f79f33ab4e..44fd45a4e9 100644 return rc; } -@@ -5794,6 +5852,7 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev, void *params __rte_unused) +@@ -5794,6 +5876,7 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev, void *params __rte_unused) rte_eth_copy_pci_info(eth_dev, pci_dev); eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS; @@ -35911,7 +40667,7 @@ index f79f33ab4e..44fd45a4e9 100644 bp = eth_dev->data->dev_private; -@@ -5916,14 +5975,16 @@ bnxt_uninit_resources(struct bnxt *bp, bool reconfig_dev) +@@ -5916,14 +5999,16 @@ bnxt_uninit_resources(struct bnxt *bp, bool reconfig_dev) if (!reconfig_dev) { bnxt_free_hwrm_resources(bp); bnxt_free_error_recovery_info(bp); @@ -35931,7 +40687,21 @@ index f79f33ab4e..44fd45a4e9 100644 rte_free(bp->ptp_cfg); bp->ptp_cfg = NULL; return rc; -@@ -6302,4 +6363,4 @@ bool is_bnxt_supported(struct rte_eth_dev *dev) +@@ -6001,13 +6086,6 @@ static int bnxt_init_rep_info(struct bnxt *bp) + for (i = 0; i < BNXT_MAX_CFA_CODE; i++) + bp->cfa_code_map[i] = BNXT_VF_IDX_INVALID; + +- rc = pthread_mutex_init(&bp->rep_info->vfr_lock, NULL); +- if (rc) { +- PMD_DRV_LOG(ERR, "Unable to initialize vfr_lock\n"); +- bnxt_free_rep_info(bp); +- return rc; +- } +- + rc = pthread_mutex_init(&bp->rep_info->vfr_start_lock, NULL); + if (rc) { + PMD_DRV_LOG(ERR, "Unable to initialize vfr_start_lock\n"); +@@ -6302,4 +6380,4 @@ bool is_bnxt_supported(struct rte_eth_dev *dev) RTE_LOG_REGISTER_SUFFIX(bnxt_logtype_driver, driver, NOTICE); RTE_PMD_REGISTER_PCI(net_bnxt, bnxt_rte_pmd); RTE_PMD_REGISTER_PCI_TABLE(net_bnxt, bnxt_pci_id_map); @@ -36096,7 +40866,7 @@ index d062be5525..8bdf2405f0 100644 if (!ret || update_flow) { flow->filter = filter; diff --git a/dpdk/drivers/net/bnxt/bnxt_hwrm.c b/dpdk/drivers/net/bnxt/bnxt_hwrm.c -index f53f8632fe..51e1e2d6b3 100644 +index f53f8632fe..3ade65456b 100644 --- a/dpdk/drivers/net/bnxt/bnxt_hwrm.c +++ b/dpdk/drivers/net/bnxt/bnxt_hwrm.c @@ -506,8 +506,8 @@ int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, @@ -36110,7 +40880,7 @@ index f53f8632fe..51e1e2d6b3 100644 } if (vlan_table) { if (!(mask & HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN)) -@@ -902,18 +902,12 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp) +@@ -902,18 +902,14 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp) bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs); if (!BNXT_CHIP_P5(bp) && !bp->pdev->max_vfs) bp->max_l2_ctx += bp->max_rx_em_flows; @@ -36128,11 +40898,13 @@ index f53f8632fe..51e1e2d6b3 100644 bp->max_l2_ctx, bp->max_vnics); bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx); + bp->max_mcast_addr = rte_le_to_cpu_32(resp->max_mcast_filters); ++ if (!bp->max_mcast_addr) ++ bp->max_mcast_addr = BNXT_DFLT_MAX_MC_ADDR; + if (BNXT_PF(bp)) { bp->pf->total_vnics = rte_le_to_cpu_16(resp->max_vnics); if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_PTP_SUPPORTED) { -@@ -945,6 +939,11 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp) +@@ -945,6 +941,11 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp) bp->fw_cap |= BNXT_FW_CAP_VLAN_TX_INSERT; PMD_DRV_LOG(DEBUG, "VLAN acceleration for TX is enabled\n"); } @@ -36144,7 +40916,7 @@ index f53f8632fe..51e1e2d6b3 100644 unlock: HWRM_UNLOCK(); -@@ -1250,11 +1249,6 @@ int bnxt_hwrm_ver_get(struct bnxt *bp, uint32_t timeout) +@@ -1250,11 +1251,6 @@ int bnxt_hwrm_ver_get(struct bnxt *bp, uint32_t timeout) else HWRM_CHECK_RESULT(); @@ -36156,7 +40928,7 @@ index f53f8632fe..51e1e2d6b3 100644 PMD_DRV_LOG(INFO, "%d.%d.%d:%d.%d.%d.%d\n", resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b, resp->hwrm_intf_upd_8b, resp->hwrm_fw_maj_8b, -@@ -1430,20 +1424,21 @@ static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf) +@@ -1430,20 +1426,21 @@ static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf) } } /* AutoNeg - Advertise speeds specified. */ @@ -36184,7 +40956,7 @@ index f53f8632fe..51e1e2d6b3 100644 } } if (conf->auto_link_speed && -@@ -1511,12 +1506,12 @@ static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp, +@@ -1511,12 +1508,12 @@ static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp, link_info->phy_ver[1] = resp->phy_min; link_info->phy_ver[2] = resp->phy_bld; link_info->link_signal_mode = @@ -36199,7 +40971,7 @@ index f53f8632fe..51e1e2d6b3 100644 rte_le_to_cpu_16(resp->auto_pam4_link_speed_mask); link_info->module_status = resp->module_status; HWRM_UNLOCK(); -@@ -1527,7 +1522,7 @@ static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp, +@@ -1527,7 +1524,7 @@ static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp, link_info->support_speeds, link_info->force_link_speed); PMD_DRV_LOG(DEBUG, "Link Signal:%d,PAM::Auto:%x,Support:%x,Force:%x\n", link_info->link_signal_mode, @@ -36208,42 +40980,52 @@ index f53f8632fe..51e1e2d6b3 100644 link_info->support_pam4_speeds, link_info->force_pam4_link_speed); return rc; -@@ -2975,7 +2970,7 @@ static uint16_t bnxt_check_eth_link_autoneg(uint32_t conf_link) +@@ -2975,8 +2972,10 @@ static uint16_t bnxt_check_eth_link_autoneg(uint32_t conf_link) } static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed, - uint16_t pam4_link) + struct bnxt_link_info *link_info) { ++ uint16_t support_pam4_speeds = link_info->support_pam4_speeds; ++ uint16_t support_speeds = link_info->support_speeds; uint16_t eth_link_speed = 0; -@@ -3014,18 +3009,29 @@ static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed, + if (conf_link_speed == RTE_ETH_LINK_SPEED_AUTONEG) +@@ -3008,24 +3007,36 @@ static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed, + case RTE_ETH_LINK_SPEED_25G: + eth_link_speed = + HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_25GB; ++ link_info->link_signal_mode = BNXT_SIG_MODE_NRZ; + break; + case RTE_ETH_LINK_SPEED_40G: + eth_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB; break; case RTE_ETH_LINK_SPEED_50G: - eth_link_speed = pam4_link ? - HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_50GB : - HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB; -+ if (link_info->support_pam4_speeds & -+ HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_PAM4_SPEEDS_50G) { -+ eth_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_50GB; -+ link_info->link_signal_mode = BNXT_SIG_MODE_PAM4; -+ } else { ++ if (support_speeds & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_50GB) { + eth_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB; + link_info->link_signal_mode = BNXT_SIG_MODE_NRZ; ++ } else if (support_pam4_speeds & ++ HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_PAM4_SPEEDS_50G) { ++ eth_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_50GB; ++ link_info->link_signal_mode = BNXT_SIG_MODE_PAM4; + } break; case RTE_ETH_LINK_SPEED_100G: - eth_link_speed = pam4_link ? - HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_100GB : - HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_100GB; -+ if (link_info->support_pam4_speeds & -+ HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_PAM4_SPEEDS_100G) { -+ eth_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_100GB; -+ link_info->link_signal_mode = BNXT_SIG_MODE_PAM4; -+ } else { ++ if (support_speeds & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100GB) { + eth_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_100GB; + link_info->link_signal_mode = BNXT_SIG_MODE_NRZ; ++ } else if (support_pam4_speeds & ++ HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_PAM4_SPEEDS_100G) { ++ eth_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_100GB; ++ link_info->link_signal_mode = BNXT_SIG_MODE_PAM4; + } break; case RTE_ETH_LINK_SPEED_200G: @@ -36253,7 +41035,7 @@ index f53f8632fe..51e1e2d6b3 100644 break; default: PMD_DRV_LOG(ERR, -@@ -3229,9 +3235,11 @@ int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up) +@@ -3229,9 +3240,11 @@ int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up) if (!link_up) goto port_phy_cfg; @@ -36266,7 +41048,7 @@ index f53f8632fe..51e1e2d6b3 100644 /* 40G is not supported as part of media auto detect. * The speed should be forced and autoneg disabled * to configure 40G speed. -@@ -3240,24 +3248,27 @@ int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up) +@@ -3240,24 +3253,27 @@ int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up) autoneg = 0; } @@ -36302,7 +41084,7 @@ index f53f8632fe..51e1e2d6b3 100644 } else { if (bp->link_info->phy_type == HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASET || -@@ -3276,21 +3287,21 @@ int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up) +@@ -3276,21 +3292,21 @@ int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up) else if (bp->link_info->force_pam4_link_speed) link_req.link_speed = bp->link_info->force_pam4_link_speed; @@ -36329,7 +41111,7 @@ index f53f8632fe..51e1e2d6b3 100644 link_req.link_speed = bp->link_info->auto_link_speed; } link_req.duplex = bnxt_parse_eth_link_duplex(dev_conf->link_speeds); -@@ -3491,7 +3502,7 @@ static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp, +@@ -3491,7 +3507,7 @@ static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp, rte_cpu_to_le_16(pf_resc->num_hw_ring_grps); } else if (BNXT_HAS_NQ(bp)) { enables |= HWRM_FUNC_CFG_INPUT_ENABLES_NUM_MSIX; @@ -36338,7 +41120,7 @@ index f53f8632fe..51e1e2d6b3 100644 } req.flags = rte_cpu_to_le_32(bp->pf->func_cfg_flags); -@@ -3504,7 +3515,7 @@ static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp, +@@ -3504,7 +3520,7 @@ static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp, req.num_tx_rings = rte_cpu_to_le_16(pf_resc->num_tx_rings); req.num_rx_rings = rte_cpu_to_le_16(pf_resc->num_rx_rings); req.num_l2_ctxs = rte_cpu_to_le_16(pf_resc->num_l2_ctxs); @@ -36347,7 +41129,7 @@ index f53f8632fe..51e1e2d6b3 100644 req.fid = rte_cpu_to_le_16(0xffff); req.enables = rte_cpu_to_le_32(enables); -@@ -3541,14 +3552,12 @@ bnxt_fill_vf_func_cfg_req_new(struct bnxt *bp, +@@ -3541,14 +3557,12 @@ bnxt_fill_vf_func_cfg_req_new(struct bnxt *bp, req->min_rx_rings = req->max_rx_rings; req->max_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx / (num_vfs + 1)); req->min_l2_ctxs = req->max_l2_ctxs; @@ -36364,7 +41146,7 @@ index f53f8632fe..51e1e2d6b3 100644 } static void -@@ -3608,6 +3617,8 @@ static int bnxt_update_max_resources(struct bnxt *bp, +@@ -3608,6 +3622,8 @@ static int bnxt_update_max_resources(struct bnxt *bp, bp->max_rx_rings -= rte_le_to_cpu_16(resp->alloc_rx_rings); bp->max_l2_ctx -= rte_le_to_cpu_16(resp->alloc_l2_ctx); bp->max_ring_grps -= rte_le_to_cpu_16(resp->alloc_hw_ring_grps); @@ -36373,7 +41155,7 @@ index f53f8632fe..51e1e2d6b3 100644 HWRM_UNLOCK(); -@@ -3681,6 +3692,8 @@ static int bnxt_query_pf_resources(struct bnxt *bp, +@@ -3681,6 +3697,8 @@ static int bnxt_query_pf_resources(struct bnxt *bp, pf_resc->num_rx_rings = rte_le_to_cpu_16(resp->alloc_rx_rings); pf_resc->num_l2_ctxs = rte_le_to_cpu_16(resp->alloc_l2_ctx); pf_resc->num_hw_ring_grps = rte_le_to_cpu_32(resp->alloc_hw_ring_grps); @@ -36382,7 +41164,7 @@ index f53f8632fe..51e1e2d6b3 100644 bp->pf->evb_mode = resp->evb_mode; HWRM_UNLOCK(); -@@ -3701,6 +3714,8 @@ bnxt_calculate_pf_resources(struct bnxt *bp, +@@ -3701,6 +3719,8 @@ bnxt_calculate_pf_resources(struct bnxt *bp, pf_resc->num_rx_rings = bp->max_rx_rings; pf_resc->num_l2_ctxs = bp->max_l2_ctx; pf_resc->num_hw_ring_grps = bp->max_ring_grps; @@ -36391,7 +41173,7 @@ index f53f8632fe..51e1e2d6b3 100644 return; } -@@ -3719,6 +3734,10 @@ bnxt_calculate_pf_resources(struct bnxt *bp, +@@ -3719,6 +3739,10 @@ bnxt_calculate_pf_resources(struct bnxt *bp, bp->max_l2_ctx % (num_vfs + 1); pf_resc->num_hw_ring_grps = bp->max_ring_grps / (num_vfs + 1) + bp->max_ring_grps % (num_vfs + 1); @@ -36402,7 +41184,7 @@ index f53f8632fe..51e1e2d6b3 100644 } int bnxt_hwrm_allocate_pf_only(struct bnxt *bp) -@@ -3727,7 +3746,7 @@ int bnxt_hwrm_allocate_pf_only(struct bnxt *bp) +@@ -3727,7 +3751,7 @@ int bnxt_hwrm_allocate_pf_only(struct bnxt *bp) int rc; if (!BNXT_PF(bp)) { @@ -36411,7 +41193,7 @@ index f53f8632fe..51e1e2d6b3 100644 return -EINVAL; } -@@ -3894,6 +3913,8 @@ bnxt_update_pf_resources(struct bnxt *bp, +@@ -3894,6 +3918,8 @@ bnxt_update_pf_resources(struct bnxt *bp, bp->max_tx_rings = pf_resc->num_tx_rings; bp->max_rx_rings = pf_resc->num_rx_rings; bp->max_ring_grps = pf_resc->num_hw_ring_grps; @@ -36420,7 +41202,7 @@ index f53f8632fe..51e1e2d6b3 100644 } static int32_t -@@ -4514,7 +4535,7 @@ int bnxt_hwrm_port_led_cfg(struct bnxt *bp, bool led_on) +@@ -4514,7 +4540,7 @@ int bnxt_hwrm_port_led_cfg(struct bnxt *bp, bool led_on) uint16_t duration = 0; int rc, i; @@ -36429,7 +41211,7 @@ index f53f8632fe..51e1e2d6b3 100644 return -EOPNOTSUPP; HWRM_PREP(&req, HWRM_PORT_LED_CFG, BNXT_USE_CHIMP_MB); -@@ -6106,38 +6127,6 @@ int bnxt_hwrm_cfa_pair_free(struct bnxt *bp, struct bnxt_representor *rep_bp) +@@ -6106,38 +6132,6 @@ int bnxt_hwrm_cfa_pair_free(struct bnxt *bp, struct bnxt_representor *rep_bp) return rc; } @@ -36468,7 +41250,7 @@ index f53f8632fe..51e1e2d6b3 100644 int bnxt_hwrm_fw_echo_reply(struct bnxt *bp, uint32_t echo_req_data1, uint32_t echo_req_data2) { -@@ -6175,10 +6164,6 @@ int bnxt_hwrm_poll_ver_get(struct bnxt *bp) +@@ -6175,10 +6169,6 @@ int bnxt_hwrm_poll_ver_get(struct bnxt *bp) rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); HWRM_CHECK_RESULT_SILENT(); @@ -36479,7 +41261,7 @@ index f53f8632fe..51e1e2d6b3 100644 HWRM_UNLOCK(); return rc; -@@ -6262,3 +6247,26 @@ int bnxt_hwrm_config_host_mtu(struct bnxt *bp) +@@ -6262,3 +6252,26 @@ int bnxt_hwrm_config_host_mtu(struct bnxt *bp) return rc; } @@ -36572,10 +41354,21 @@ index f8f0556201..a82d9fb3ef 100644 +int bnxt_vnic_rss_clear_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic); #endif diff --git a/dpdk/drivers/net/bnxt/bnxt_reps.c b/dpdk/drivers/net/bnxt/bnxt_reps.c -index 22b76b72b9..299b4c24a8 100644 +index 22b76b72b9..2f21e78e5c 100644 --- a/dpdk/drivers/net/bnxt/bnxt_reps.c +++ b/dpdk/drivers/net/bnxt/bnxt_reps.c -@@ -35,16 +35,20 @@ static const struct eth_dev_ops bnxt_rep_dev_ops = { +@@ -32,19 +32,31 @@ static const struct eth_dev_ops bnxt_rep_dev_ops = { + .flow_ops_get = bnxt_flow_ops_get_op + }; + ++static bool bnxt_rep_check_parent(struct bnxt_representor *rep) ++{ ++ if (!rep->parent_dev->data->dev_private) ++ return false; ++ ++ return true; ++} ++ uint16_t bnxt_vfr_recv(uint16_t port_id, uint16_t queue_id, struct rte_mbuf *mbuf) { @@ -36600,7 +41393,57 @@ index 22b76b72b9..299b4c24a8 100644 /* If rxq_id happens to be > nr_rings, use ring 0 */ que = queue_id < vfr_bp->rx_nr_rings ? queue_id : 0; rep_rxq = vfr_bp->rx_queues[que]; -@@ -545,7 +549,10 @@ int bnxt_rep_dev_info_get_op(struct rte_eth_dev *eth_dev, +@@ -120,8 +132,8 @@ bnxt_rep_tx_burst(void *tx_queue, + qid = vfr_txq->txq->queue_id; + vf_rep_bp = vfr_txq->bp; + parent = vf_rep_bp->parent_dev->data->dev_private; +- pthread_mutex_lock(&parent->rep_info->vfr_lock); + ptxq = parent->tx_queues[qid]; ++ pthread_mutex_lock(&ptxq->txq_lock); + + ptxq->vfr_tx_cfa_action = vf_rep_bp->vfr_tx_cfa_action; + +@@ -130,9 +142,9 @@ bnxt_rep_tx_burst(void *tx_queue, + vf_rep_bp->tx_pkts[qid]++; + } + +- rc = bnxt_xmit_pkts(ptxq, tx_pkts, nb_pkts); ++ rc = _bnxt_xmit_pkts(ptxq, tx_pkts, nb_pkts); + ptxq->vfr_tx_cfa_action = 0; +- pthread_mutex_unlock(&parent->rep_info->vfr_lock); ++ pthread_mutex_unlock(&ptxq->txq_lock); + + return rc; + } +@@ -262,12 +274,12 @@ int bnxt_representor_uninit(struct rte_eth_dev *eth_dev) + PMD_DRV_LOG(DEBUG, "BNXT Port:%d VFR uninit\n", eth_dev->data->port_id); + eth_dev->data->mac_addrs = NULL; + +- parent_bp = rep->parent_dev->data->dev_private; +- if (!parent_bp) { ++ if (!bnxt_rep_check_parent(rep)) { + PMD_DRV_LOG(DEBUG, "BNXT Port:%d already freed\n", + eth_dev->data->port_id); + return 0; + } ++ parent_bp = rep->parent_dev->data->dev_private; + + parent_bp->num_reps--; + vf_id = rep->vf_id; +@@ -536,16 +548,20 @@ int bnxt_rep_dev_info_get_op(struct rte_eth_dev *eth_dev, + int rc = 0; + + /* MAC Specifics */ +- parent_bp = rep_bp->parent_dev->data->dev_private; +- if (!parent_bp) { +- PMD_DRV_LOG(ERR, "Rep parent NULL!\n"); ++ if (!bnxt_rep_check_parent(rep_bp)) { ++ /* Need not be an error scenario, if parent is closed first */ ++ PMD_DRV_LOG(INFO, "Rep parent port does not exist.\n"); + return rc; + } ++ parent_bp = rep_bp->parent_dev->data->dev_private; + PMD_DRV_LOG(DEBUG, "Representor dev_info_get_op\n"); dev_info->max_mac_addrs = parent_bp->max_l2_ctx; dev_info->max_hash_mac_addrs = 0; @@ -36612,7 +41455,7 @@ index 22b76b72b9..299b4c24a8 100644 /* For the sake of symmetry, max_rx_queues = max_tx_queues */ dev_info->max_rx_queues = max_rx_rings; dev_info->max_tx_queues = max_rx_rings; -@@ -561,10 +568,8 @@ int bnxt_rep_dev_info_get_op(struct rte_eth_dev *eth_dev, +@@ -561,10 +577,8 @@ int bnxt_rep_dev_info_get_op(struct rte_eth_dev *eth_dev, dev_info->min_rx_bufsize = 1; dev_info->max_rx_pktlen = BNXT_MAX_PKT_LEN; @@ -36625,7 +41468,7 @@ index 22b76b72b9..299b4c24a8 100644 dev_info->flow_type_rss_offloads = BNXT_ETH_RSS_SUPPORT; dev_info->switch_info.name = eth_dev->device->name; -@@ -626,10 +631,10 @@ int bnxt_rep_rx_queue_setup_op(struct rte_eth_dev *eth_dev, +@@ -626,10 +640,10 @@ int bnxt_rep_rx_queue_setup_op(struct rte_eth_dev *eth_dev, struct rte_mbuf **buf_ring; int rc = 0; @@ -36638,20 +41481,20 @@ index 22b76b72b9..299b4c24a8 100644 return -EINVAL; } -@@ -726,10 +731,10 @@ int bnxt_rep_tx_queue_setup_op(struct rte_eth_dev *eth_dev, +@@ -726,10 +740,10 @@ int bnxt_rep_tx_queue_setup_op(struct rte_eth_dev *eth_dev, struct bnxt_tx_queue *parent_txq, *txq; struct bnxt_vf_rep_tx_queue *vfr_txq; - if (queue_idx >= BNXT_MAX_VF_REP_RINGS) { -+ if (queue_idx >= rep_bp->rx_nr_rings) { ++ if (queue_idx >= rep_bp->tx_nr_rings) { PMD_DRV_LOG(ERR, "Cannot create Tx rings %d. %d rings available\n", - queue_idx, BNXT_MAX_VF_REP_RINGS); -+ queue_idx, rep_bp->rx_nr_rings); ++ queue_idx, rep_bp->tx_nr_rings); return -EINVAL; } -@@ -802,10 +807,10 @@ int bnxt_rep_stats_get_op(struct rte_eth_dev *eth_dev, +@@ -802,10 +816,10 @@ int bnxt_rep_stats_get_op(struct rte_eth_dev *eth_dev, struct rte_eth_stats *stats) { struct bnxt_representor *rep_bp = eth_dev->data->dev_private; @@ -36664,7 +41507,7 @@ index 22b76b72b9..299b4c24a8 100644 stats->obytes += rep_bp->tx_bytes[i]; stats->opackets += rep_bp->tx_pkts[i]; stats->ibytes += rep_bp->rx_bytes[i]; -@@ -825,9 +830,9 @@ int bnxt_rep_stats_get_op(struct rte_eth_dev *eth_dev, +@@ -825,9 +839,9 @@ int bnxt_rep_stats_get_op(struct rte_eth_dev *eth_dev, int bnxt_rep_stats_reset_op(struct rte_eth_dev *eth_dev) { struct bnxt_representor *rep_bp = eth_dev->data->dev_private; @@ -37068,7 +41911,7 @@ index 991eafc644..208aa5616d 100644 } diff --git a/dpdk/drivers/net/bnxt/bnxt_txq.c b/dpdk/drivers/net/bnxt/bnxt_txq.c -index 72a55ea643..c8745add5e 100644 +index 72a55ea643..0f41193038 100644 --- a/dpdk/drivers/net/bnxt/bnxt_txq.c +++ b/dpdk/drivers/net/bnxt/bnxt_txq.c @@ -17,6 +17,35 @@ @@ -37107,21 +41950,69 @@ index 72a55ea643..c8745add5e 100644 void bnxt_free_txq_stats(struct bnxt_tx_queue *txq) { if (txq && txq->cp_ring && txq->cp_ring->hw_stats) +@@ -82,6 +111,7 @@ void bnxt_tx_queue_release_op(struct rte_eth_dev *dev, uint16_t queue_idx) + txq->mz = NULL; + + rte_free(txq->free); ++ pthread_mutex_destroy(&txq->txq_lock); + rte_free(txq); + dev->data->tx_queues[queue_idx] = NULL; + } +@@ -165,6 +195,11 @@ int bnxt_tx_queue_setup_op(struct rte_eth_dev *eth_dev, + goto err; + } + ++ rc = pthread_mutex_init(&txq->txq_lock, NULL); ++ if (rc != 0) { ++ PMD_DRV_LOG(ERR, "TxQ mutex init failed!"); ++ goto err; ++ } + return 0; + err: + bnxt_tx_queue_release_op(eth_dev, queue_idx); diff --git a/dpdk/drivers/net/bnxt/bnxt_txq.h b/dpdk/drivers/net/bnxt/bnxt_txq.h -index 67fd4cbebb..f3a03812ad 100644 +index 67fd4cbebb..6e2d87de09 100644 --- a/dpdk/drivers/net/bnxt/bnxt_txq.h +++ b/dpdk/drivers/net/bnxt/bnxt_txq.h -@@ -43,4 +43,5 @@ int bnxt_tx_queue_setup_op(struct rte_eth_dev *eth_dev, +@@ -26,6 +26,7 @@ struct bnxt_tx_queue { + int index; + int tx_wake_thresh; + uint32_t vfr_tx_cfa_action; ++ pthread_mutex_t txq_lock; + struct bnxt_tx_ring_info *tx_ring; + + unsigned int cp_nr_rings; +@@ -43,4 +44,5 @@ int bnxt_tx_queue_setup_op(struct rte_eth_dev *eth_dev, uint16_t nb_desc, unsigned int socket_id, const struct rte_eth_txconf *tx_conf); +uint64_t bnxt_get_tx_port_offloads(struct bnxt *bp); #endif diff --git a/dpdk/drivers/net/bnxt/bnxt_txr.c b/dpdk/drivers/net/bnxt/bnxt_txr.c -index e2b7e40571..ec63b97fe2 100644 +index e2b7e40571..c0518b4a26 100644 --- a/dpdk/drivers/net/bnxt/bnxt_txr.c +++ b/dpdk/drivers/net/bnxt/bnxt_txr.c -@@ -551,6 +551,12 @@ int bnxt_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) +@@ -492,6 +492,19 @@ static int bnxt_handle_tx_cp(struct bnxt_tx_queue *txq) + + uint16_t bnxt_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) ++{ ++ struct bnxt_tx_queue *txq = tx_queue; ++ uint16_t rc; ++ ++ pthread_mutex_lock(&txq->txq_lock); ++ rc = _bnxt_xmit_pkts(tx_queue, tx_pkts, nb_pkts); ++ pthread_mutex_unlock(&txq->txq_lock); ++ ++ return rc; ++} ++ ++uint16_t _bnxt_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, ++ uint16_t nb_pkts) + { + int rc; + uint16_t nb_tx_pkts = 0; +@@ -551,6 +564,12 @@ int bnxt_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) if (rc) return rc; @@ -37134,7 +42025,7 @@ index e2b7e40571..ec63b97fe2 100644 bnxt_free_hwrm_tx_ring(bp, tx_queue_id); rc = bnxt_alloc_hwrm_tx_ring(bp, tx_queue_id); if (rc) -@@ -602,6 +608,9 @@ int bnxt_flush_tx_cmp(struct bnxt_cp_ring_info *cpr) +@@ -602,6 +621,9 @@ int bnxt_flush_tx_cmp(struct bnxt_cp_ring_info *cpr) cons = RING_CMPL(ring_mask, raw_cons); txcmp = (struct tx_cmpl *)&cp_desc_ring[cons]; @@ -37144,6 +42035,21 @@ index e2b7e40571..ec63b97fe2 100644 opaque = rte_cpu_to_le_32(txcmp->opaque); raw_cons = NEXT_RAW_CMP(raw_cons); +diff --git a/dpdk/drivers/net/bnxt/bnxt_txr.h b/dpdk/drivers/net/bnxt/bnxt_txr.h +index e11343c082..2be3ba4cac 100644 +--- a/dpdk/drivers/net/bnxt/bnxt_txr.h ++++ b/dpdk/drivers/net/bnxt/bnxt_txr.h +@@ -46,7 +46,9 @@ void bnxt_free_tx_rings(struct bnxt *bp); + int bnxt_init_one_tx_ring(struct bnxt_tx_queue *txq); + int bnxt_init_tx_ring_struct(struct bnxt_tx_queue *txq, unsigned int socket_id); + uint16_t bnxt_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, +- uint16_t nb_pkts); ++ uint16_t nb_pkts); ++uint16_t _bnxt_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, ++ uint16_t nb_pkts); + #if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64) + uint16_t bnxt_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts); diff --git a/dpdk/drivers/net/bnxt/bnxt_vnic.c b/dpdk/drivers/net/bnxt/bnxt_vnic.c index c63cf4b943..b3c03a2af5 100644 --- a/dpdk/drivers/net/bnxt/bnxt_vnic.c @@ -37433,6 +42339,18 @@ index b27678dae9..2b02836a40 100644 /* * Flush all flows in the flow database that belong to a device function. +diff --git a/dpdk/drivers/net/bnxt/tf_ulp/ulp_ha_mgr.c b/dpdk/drivers/net/bnxt/tf_ulp/ulp_ha_mgr.c +index 0030a487f5..897410cc0a 100644 +--- a/dpdk/drivers/net/bnxt/tf_ulp/ulp_ha_mgr.c ++++ b/dpdk/drivers/net/bnxt/tf_ulp/ulp_ha_mgr.c +@@ -171,6 +171,7 @@ ulp_ha_mgr_timer_cb(void *arg) + + myclient_cnt = bnxt_ulp_cntxt_num_shared_clients_get(ulp_ctx); + if (myclient_cnt == 0) { ++ bnxt_ulp_cntxt_entry_release(); + BNXT_TF_DBG(ERR, + "PANIC Client Count is zero kill timer\n."); + return; diff --git a/dpdk/drivers/net/bnxt/tf_ulp/ulp_rte_parser.c b/dpdk/drivers/net/bnxt/tf_ulp/ulp_rte_parser.c index f4274dd634..9edf3e8799 100644 --- a/dpdk/drivers/net/bnxt/tf_ulp/ulp_rte_parser.c @@ -38598,7 +43516,7 @@ index 5e6c5ee111..4e1abf7804 100644 void diff --git a/dpdk/drivers/net/cnxk/cn10k_tx.h b/dpdk/drivers/net/cnxk/cn10k_tx.h -index 873e1871f9..bd9eba08e9 100644 +index 873e1871f9..7e0ec4f048 100644 --- a/dpdk/drivers/net/cnxk/cn10k_tx.h +++ b/dpdk/drivers/net/cnxk/cn10k_tx.h @@ -736,7 +736,7 @@ cn10k_nix_xmit_prepare_tstamp(uintptr_t lmt_addr, const uint64_t *cmd, @@ -38668,7 +43586,17 @@ index 873e1871f9..bd9eba08e9 100644 rte_iova_t c_io_addr; uint64_t sa_base; union wdata { -@@ -2254,7 +2256,7 @@ cn10k_nix_xmit_pkts_vector(void *tx_queue, struct rte_mbuf **tx_pkts, +@@ -1573,7 +1575,8 @@ cn10k_nix_xmit_pkts_vector(void *tx_queue, struct rte_mbuf **tx_pkts, + } + + for (i = 0; i < burst; i += NIX_DESCS_PER_LOOP) { +- if (flags & NIX_TX_OFFLOAD_SECURITY_F && c_lnum + 2 > 16) { ++ if (flags & NIX_TX_OFFLOAD_SECURITY_F && ++ (((int)((16 - c_lnum) << 1) - c_loff) < 4)) { + burst = i; + break; + } +@@ -2254,7 +2257,7 @@ cn10k_nix_xmit_pkts_vector(void *tx_queue, struct rte_mbuf **tx_pkts, } if (flags & NIX_TX_OFFLOAD_TSTAMP_F) { @@ -38677,7 +43605,7 @@ index 873e1871f9..bd9eba08e9 100644 const uint64x2_t olf = {RTE_MBUF_F_TX_IEEE1588_TMST, RTE_MBUF_F_TX_IEEE1588_TMST}; /* Set send mem alg to SUB. */ -@@ -2350,28 +2352,28 @@ cn10k_nix_xmit_pkts_vector(void *tx_queue, struct rte_mbuf **tx_pkts, +@@ -2350,28 +2353,28 @@ cn10k_nix_xmit_pkts_vector(void *tx_queue, struct rte_mbuf **tx_pkts, mbuf3 = (uint64_t *)tx_pkts[3]; if (cnxk_nix_prefree_seg((struct rte_mbuf *)mbuf0)) @@ -38878,7 +43806,7 @@ index 435dde1317..fe44ff4290 100644 RTE_MEMPOOL_CHECK_COOKIES( ((struct rte_mbuf *)mbuf3)->pool, diff --git a/dpdk/drivers/net/cnxk/cnxk_ethdev.c b/dpdk/drivers/net/cnxk/cnxk_ethdev.c -index 74f625553d..94d1b17443 100644 +index 74f625553d..e4c22e7297 100644 --- a/dpdk/drivers/net/cnxk/cnxk_ethdev.c +++ b/dpdk/drivers/net/cnxk/cnxk_ethdev.c @@ -3,6 +3,8 @@ @@ -38969,7 +43897,21 @@ index 74f625553d..94d1b17443 100644 dev->npc.channel = roc_nix_get_base_chan(nix); nb_rxq = data->nb_rx_queues; -@@ -1299,6 +1334,7 @@ cnxk_nix_configure(struct rte_eth_dev *eth_dev) +@@ -1184,6 +1219,13 @@ cnxk_nix_configure(struct rte_eth_dev *eth_dev) + goto free_nix_lf; + } + ++ /* Overwrite default RSS setup if requested by user */ ++ rc = cnxk_nix_rss_hash_update(eth_dev, &conf->rx_adv_conf.rss_conf); ++ if (rc) { ++ plt_err("Failed to configure rss rc=%d", rc); ++ goto free_nix_lf; ++ } ++ + /* Init the default TM scheduler hierarchy */ + rc = roc_nix_tm_init(nix); + if (rc) { +@@ -1299,6 +1341,7 @@ cnxk_nix_configure(struct rte_eth_dev *eth_dev) roc_nix_tm_fini(nix); free_nix_lf: nix_free_queue_mem(dev); @@ -38977,7 +43919,7 @@ index 74f625553d..94d1b17443 100644 rc |= roc_nix_lf_free(nix); fail_configure: dev->configured = 0; -@@ -1780,6 +1816,11 @@ cnxk_eth_dev_uninit(struct rte_eth_dev *eth_dev, bool reset) +@@ -1780,6 +1823,11 @@ cnxk_eth_dev_uninit(struct rte_eth_dev *eth_dev, bool reset) /* Free ROC RQ's, SQ's and CQ's memory */ nix_free_queue_mem(dev); @@ -39106,10 +44048,34 @@ index 39d8563826..b6ccccdc39 100644 } diff --git a/dpdk/drivers/net/cnxk/cnxk_ethdev_ops.c b/dpdk/drivers/net/cnxk/cnxk_ethdev_ops.c -index ce5f1f7240..9662bb0a2c 100644 +index ce5f1f7240..84124c84a6 100644 --- a/dpdk/drivers/net/cnxk/cnxk_ethdev_ops.c +++ b/dpdk/drivers/net/cnxk/cnxk_ethdev_ops.c -@@ -390,6 +390,44 @@ cnxk_nix_mac_addr_del(struct rte_eth_dev *eth_dev, uint32_t index) +@@ -20,8 +20,7 @@ cnxk_nix_info_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *devinfo) + devinfo->max_tx_queues = RTE_MAX_QUEUES_PER_PORT; + devinfo->max_mac_addrs = dev->max_mac_entries; + devinfo->max_vfs = pci_dev->max_vfs; +- devinfo->max_mtu = devinfo->max_rx_pktlen - +- (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN); ++ devinfo->max_mtu = devinfo->max_rx_pktlen - CNXK_NIX_L2_OVERHEAD; + devinfo->min_mtu = devinfo->min_rx_bufsize - CNXK_NIX_L2_OVERHEAD; + + devinfo->rx_offload_capa = dev->rx_offload_capa; +@@ -341,6 +340,13 @@ cnxk_nix_mac_addr_set(struct rte_eth_dev *eth_dev, struct rte_ether_addr *addr) + roc_nix_npc_mac_addr_set(nix, dev->mac_addr); + goto exit; + } ++ ++ if (eth_dev->data->promiscuous) { ++ rc = roc_nix_mac_promisc_mode_enable(nix, true); ++ if (rc) ++ plt_err("Failed to setup promisc mode in mac, rc=%d(%s)", rc, ++ roc_error_msg_get(rc)); ++ } + } + + /* Update mac address to cnxk ethernet device */ +@@ -390,6 +396,44 @@ cnxk_nix_mac_addr_del(struct rte_eth_dev *eth_dev, uint32_t index) dev->dmac_filter_count--; } @@ -39154,10 +44120,17 @@ index ce5f1f7240..9662bb0a2c 100644 int cnxk_nix_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu) { -@@ -433,6 +471,15 @@ cnxk_nix_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu) +@@ -433,21 +477,20 @@ cnxk_nix_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu) goto exit; } +- frame_size -= RTE_ETHER_CRC_LEN; +- +- /* Update mtu on Tx */ +- rc = roc_nix_mac_mtu_set(nix, frame_size); +- if (rc) { +- plt_err("Failed to set MTU, rc=%d", rc); +- goto exit; + /* if new MTU was smaller than old one, then flush all SQs before MTU change */ + if (old_frame_size > frame_size) { + if (data->dev_started) { @@ -39165,12 +44138,21 @@ index ce5f1f7240..9662bb0a2c 100644 + goto exit; + } + cnxk_nix_sq_flush(eth_dev); -+ } -+ - frame_size -= RTE_ETHER_CRC_LEN; + } - /* Update mtu on Tx */ -@@ -517,7 +564,8 @@ cnxk_nix_allmulticast_enable(struct rte_eth_dev *eth_dev) +- /* Sync same frame size on Rx */ ++ frame_size -= RTE_ETHER_CRC_LEN; ++ ++ /* Set frame size on Rx */ + rc = roc_nix_mac_max_rx_len_set(nix, frame_size); + if (rc) { +- /* Rollback to older mtu */ +- roc_nix_mac_mtu_set(nix, +- old_frame_size - RTE_ETHER_CRC_LEN); + plt_err("Failed to max Rx frame length, rc=%d", rc); + goto exit; + } +@@ -517,7 +560,8 @@ cnxk_nix_allmulticast_enable(struct rte_eth_dev *eth_dev) { struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev); @@ -39180,7 +44162,7 @@ index ce5f1f7240..9662bb0a2c 100644 } int -@@ -746,6 +794,8 @@ cnxk_nix_reta_update(struct rte_eth_dev *eth_dev, +@@ -746,6 +790,8 @@ cnxk_nix_reta_update(struct rte_eth_dev *eth_dev, goto fail; } @@ -39216,10 +44198,34 @@ index 139fea256c..359f9a30ae 100644 * using freq_mult and clk_delta calculated during configure stage. */ diff --git a/dpdk/drivers/net/cnxk/cnxk_rte_flow.c b/dpdk/drivers/net/cnxk/cnxk_rte_flow.c -index b08d7c34fa..0410f2d82e 100644 +index b08d7c34fa..bdff3c0fc9 100644 --- a/dpdk/drivers/net/cnxk/cnxk_rte_flow.c +++ b/dpdk/drivers/net/cnxk/cnxk_rte_flow.c -@@ -110,13 +110,14 @@ cnxk_map_actions(struct rte_eth_dev *eth_dev, const struct rte_flow_attr *attr, +@@ -93,15 +93,19 @@ npc_rss_action_validate(struct rte_eth_dev *eth_dev, + } + + static void +-npc_rss_flowkey_get(struct cnxk_eth_dev *eth_dev, +- const struct roc_npc_action *rss_action, +- uint32_t *flowkey_cfg) ++npc_rss_flowkey_get(struct cnxk_eth_dev *eth_dev, const struct roc_npc_action *rss_action, ++ uint32_t *flowkey_cfg, uint64_t default_rss_types) + { + const struct roc_npc_action_rss *rss; ++ uint64_t rss_types; + + rss = (const struct roc_npc_action_rss *)rss_action->conf; ++ rss_types = rss->types; ++ /* If no RSS types are specified, use default one */ ++ if (rss_types == 0) ++ rss_types = default_rss_types; + +- *flowkey_cfg = cnxk_rss_ethdev_to_nix(eth_dev, rss->types, rss->level); ++ *flowkey_cfg = cnxk_rss_ethdev_to_nix(eth_dev, rss_types, rss->level); + } + + static int +@@ -110,13 +114,14 @@ cnxk_map_actions(struct rte_eth_dev *eth_dev, const struct rte_flow_attr *attr, struct roc_npc_action in_actions[], uint32_t *flowkey_cfg) { struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev); @@ -39235,7 +44241,7 @@ index b08d7c34fa..0410f2d82e 100644 int i = 0, rc = 0; int rq; -@@ -150,6 +151,7 @@ cnxk_map_actions(struct rte_eth_dev *eth_dev, const struct rte_flow_attr *attr, +@@ -150,6 +155,7 @@ cnxk_map_actions(struct rte_eth_dev *eth_dev, const struct rte_flow_attr *attr, case RTE_FLOW_ACTION_TYPE_VF: in_actions[i].type = ROC_NPC_ACTION_TYPE_VF; in_actions[i].conf = actions->conf; @@ -39243,7 +44249,7 @@ index b08d7c34fa..0410f2d82e 100644 break; case RTE_FLOW_ACTION_TYPE_PORT_ID: -@@ -183,13 +185,7 @@ cnxk_map_actions(struct rte_eth_dev *eth_dev, const struct rte_flow_attr *attr, +@@ -183,13 +189,7 @@ cnxk_map_actions(struct rte_eth_dev *eth_dev, const struct rte_flow_attr *attr, break; case RTE_FLOW_ACTION_TYPE_QUEUE: @@ -39258,7 +44264,17 @@ index b08d7c34fa..0410f2d82e 100644 in_actions[i].type = ROC_NPC_ACTION_TYPE_QUEUE; in_actions[i].conf = actions->conf; break; -@@ -234,6 +230,14 @@ cnxk_map_actions(struct rte_eth_dev *eth_dev, const struct rte_flow_attr *attr, +@@ -200,7 +200,8 @@ cnxk_map_actions(struct rte_eth_dev *eth_dev, const struct rte_flow_attr *attr, + goto err_exit; + in_actions[i].type = ROC_NPC_ACTION_TYPE_RSS; + in_actions[i].conf = actions->conf; +- npc_rss_flowkey_get(dev, &in_actions[i], flowkey_cfg); ++ npc_rss_flowkey_get(dev, &in_actions[i], flowkey_cfg, ++ eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf); + break; + + case RTE_FLOW_ACTION_TYPE_SECURITY: +@@ -234,6 +235,14 @@ cnxk_map_actions(struct rte_eth_dev *eth_dev, const struct rte_flow_attr *attr, } i++; } @@ -39273,7 +44289,7 @@ index b08d7c34fa..0410f2d82e 100644 in_actions[i].type = ROC_NPC_ACTION_TYPE_END; return 0; -@@ -297,7 +301,14 @@ cnxk_flow_validate(struct rte_eth_dev *eth_dev, +@@ -297,7 +306,14 @@ cnxk_flow_validate(struct rte_eth_dev *eth_dev, return rc; } @@ -39779,7 +44795,7 @@ index f623f3e684..566cd48406 100644 /* set offset to -1 to distinguish ingress queues without FL */ diff --git a/dpdk/drivers/net/dpaa/dpaa_ethdev.c b/dpdk/drivers/net/dpaa/dpaa_ethdev.c -index e49f765434..bae6c5abf2 100644 +index e49f765434..3bf356fa2c 100644 --- a/dpdk/drivers/net/dpaa/dpaa_ethdev.c +++ b/dpdk/drivers/net/dpaa/dpaa_ethdev.c @@ -133,6 +133,8 @@ static const struct rte_dpaa_xstats_name_off dpaa_xstats_strings[] = { @@ -39791,7 +44807,17 @@ index e49f765434..bae6c5abf2 100644 static int dpaa_eth_dev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info); -@@ -385,6 +387,7 @@ static void dpaa_interrupt_handler(void *param) +@@ -349,7 +351,8 @@ dpaa_supported_ptypes_get(struct rte_eth_dev *dev) + RTE_PTYPE_L4_FRAG, + RTE_PTYPE_L4_TCP, + RTE_PTYPE_L4_UDP, +- RTE_PTYPE_L4_SCTP ++ RTE_PTYPE_L4_SCTP, ++ RTE_PTYPE_UNKNOWN + }; + + PMD_INIT_FUNC_TRACE(); +@@ -385,6 +388,7 @@ static void dpaa_interrupt_handler(void *param) static int dpaa_eth_dev_start(struct rte_eth_dev *dev) { struct dpaa_if *dpaa_intf = dev->data->dev_private; @@ -39799,7 +44825,7 @@ index e49f765434..bae6c5abf2 100644 PMD_INIT_FUNC_TRACE(); -@@ -399,12 +402,18 @@ static int dpaa_eth_dev_start(struct rte_eth_dev *dev) +@@ -399,12 +403,18 @@ static int dpaa_eth_dev_start(struct rte_eth_dev *dev) fman_if_enable_rx(dev->process_private); @@ -39818,7 +44844,7 @@ index e49f765434..bae6c5abf2 100644 PMD_INIT_FUNC_TRACE(); dev->data->dev_started = 0; -@@ -413,6 +422,11 @@ static int dpaa_eth_dev_stop(struct rte_eth_dev *dev) +@@ -413,6 +423,11 @@ static int dpaa_eth_dev_stop(struct rte_eth_dev *dev) fman_if_disable_rx(fif); dev->tx_pkt_burst = dpaa_eth_tx_drop_all; @@ -39830,7 +44856,7 @@ index e49f765434..bae6c5abf2 100644 return 0; } -@@ -978,8 +992,7 @@ int dpaa_eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, +@@ -978,8 +993,7 @@ int dpaa_eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, } else { DPAA_PMD_WARN("The requested maximum Rx packet size (%u) is" " larger than a single mbuf (%u) and scattered" @@ -39840,7 +44866,7 @@ index e49f765434..bae6c5abf2 100644 } dpaa_intf->bp_info = DPAA_MEMPOOL_TO_POOL_INFO(mp); -@@ -994,7 +1007,7 @@ int dpaa_eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, +@@ -994,7 +1008,7 @@ int dpaa_eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, if (vsp_id >= 0) { ret = dpaa_port_vsp_update(dpaa_intf, fmc_q, vsp_id, DPAA_MEMPOOL_TO_POOL_INFO(mp)->bpid, @@ -39849,7 +44875,7 @@ index e49f765434..bae6c5abf2 100644 if (ret) { DPAA_PMD_ERR("dpaa_port_vsp_update failed"); return ret; -@@ -1030,7 +1043,7 @@ int dpaa_eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, +@@ -1030,7 +1044,7 @@ int dpaa_eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, QM_FQCTRL_CTXASTASHING | QM_FQCTRL_PREFERINCACHE; opts.fqd.context_a.stashing.exclusive = 0; @@ -39858,7 +44884,7 @@ index e49f765434..bae6c5abf2 100644 * So do not enable stashing in this case */ if (dpaa_svr_family != SVR_LS1046A_FAMILY) -@@ -1201,23 +1214,17 @@ int +@@ -1201,23 +1215,17 @@ int dpaa_eth_eventq_detach(const struct rte_eth_dev *dev, int eth_rx_queue_id) { @@ -39886,7 +44912,7 @@ index e49f765434..bae6c5abf2 100644 rxq->fqid, ret); } -@@ -1866,7 +1873,7 @@ dpaa_dev_init(struct rte_eth_dev *eth_dev) +@@ -1866,7 +1874,7 @@ dpaa_dev_init(struct rte_eth_dev *eth_dev) dpaa_intf->name = dpaa_device->name; @@ -39895,7 +44921,7 @@ index e49f765434..bae6c5abf2 100644 eth_dev->process_private = fman_intf; dpaa_intf->ifid = dev_id; dpaa_intf->cfg = cfg; -@@ -2169,7 +2176,7 @@ rte_dpaa_probe(struct rte_dpaa_driver *dpaa_drv, +@@ -2169,7 +2177,7 @@ rte_dpaa_probe(struct rte_dpaa_driver *dpaa_drv, if (dpaa_svr_family == SVR_LS1043A_FAMILY) dpaa_push_mode_max_queue = 0; @@ -39904,7 +44930,7 @@ index e49f765434..bae6c5abf2 100644 * only one queue per thread. */ if (getenv("DPAA_PUSH_QUEUES_NUMBER")) { -@@ -2215,7 +2222,20 @@ rte_dpaa_probe(struct rte_dpaa_driver *dpaa_drv, +@@ -2215,7 +2223,20 @@ rte_dpaa_probe(struct rte_dpaa_driver *dpaa_drv, /* Invoke PMD device initialization function */ diag = dpaa_dev_init(eth_dev); if (diag == 0) { @@ -39925,7 +44951,7 @@ index e49f765434..bae6c5abf2 100644 return 0; } -@@ -2233,6 +2253,9 @@ rte_dpaa_remove(struct rte_dpaa_device *dpaa_dev) +@@ -2233,6 +2254,9 @@ rte_dpaa_remove(struct rte_dpaa_device *dpaa_dev) eth_dev = dpaa_dev->eth_dev; dpaa_eth_dev_close(eth_dev); @@ -41132,6 +46158,19 @@ index 469ab9b3d4..3b9bffeed7 100644 * profile * corresponding to the ingress or egress of the DPNI. * @mc_io: Pointer to MC portal's I/O object +diff --git a/dpdk/drivers/net/e1000/base/e1000_base.c b/dpdk/drivers/net/e1000/base/e1000_base.c +index ab73e1e59e..3ec32e7240 100644 +--- a/dpdk/drivers/net/e1000/base/e1000_base.c ++++ b/dpdk/drivers/net/e1000/base/e1000_base.c +@@ -107,7 +107,7 @@ void e1000_power_down_phy_copper_base(struct e1000_hw *hw) + return; + + /* If the management interface is not enabled, then power down */ +- if (phy->ops.check_reset_block(hw)) ++ if (!(e1000_enable_mng_pass_thru(hw) || phy->ops.check_reset_block(hw))) + e1000_power_down_phy_copper(hw); + } + diff --git a/dpdk/drivers/net/e1000/e1000_ethdev.h b/dpdk/drivers/net/e1000/e1000_ethdev.h index a548ae2ccb..718a9746ed 100644 --- a/dpdk/drivers/net/e1000/e1000_ethdev.h @@ -41525,8 +46564,50 @@ index 4a311a7b18..6027cfbfb1 100644 } } +diff --git a/dpdk/drivers/net/ena/base/ena_com.c b/dpdk/drivers/net/ena/base/ena_com.c +index 5ca36ab6d9..98035f3cd4 100644 +--- a/dpdk/drivers/net/ena/base/ena_com.c ++++ b/dpdk/drivers/net/ena/base/ena_com.c +@@ -34,6 +34,8 @@ + + #define ENA_REGS_ADMIN_INTR_MASK 1 + ++#define ENA_MAX_BACKOFF_DELAY_EXP 16U ++ + #define ENA_MIN_ADMIN_POLL_US 100 + + #define ENA_MAX_ADMIN_POLL_US 5000 +@@ -171,6 +173,7 @@ static int ena_com_admin_init_aenq(struct ena_com_dev *ena_dev, + static void comp_ctxt_release(struct ena_com_admin_queue *queue, + struct ena_comp_ctx *comp_ctx) + { ++ comp_ctx->user_cqe = NULL; + comp_ctx->occupied = false; + ATOMIC32_DEC(&queue->outstanding_cmds); + } +@@ -464,6 +467,9 @@ static void ena_com_handle_single_admin_completion(struct ena_com_admin_queue *a + return; + } + ++ if (!comp_ctx->occupied) ++ return; ++ + comp_ctx->status = ENA_CMD_COMPLETED; + comp_ctx->comp_status = cqe->acq_common_descriptor.status; + +@@ -539,8 +545,9 @@ static int ena_com_comp_status_to_errno(struct ena_com_admin_queue *admin_queue, + + static void ena_delay_exponential_backoff_us(u32 exp, u32 delay_us) + { ++ exp = ENA_MIN32(ENA_MAX_BACKOFF_DELAY_EXP, exp); + delay_us = ENA_MAX32(ENA_MIN_ADMIN_POLL_US, delay_us); +- delay_us = ENA_MIN32(delay_us * (1U << exp), ENA_MAX_ADMIN_POLL_US); ++ delay_us = ENA_MIN32(ENA_MAX_ADMIN_POLL_US, delay_us * (1U << exp)); + ENA_USLEEP(delay_us); + } + diff --git a/dpdk/drivers/net/ena/ena_ethdev.c b/dpdk/drivers/net/ena/ena_ethdev.c -index 634c97acf6..1c61f793e6 100644 +index 634c97acf6..765e35131a 100644 --- a/dpdk/drivers/net/ena/ena_ethdev.c +++ b/dpdk/drivers/net/ena/ena_ethdev.c @@ -38,11 +38,6 @@ @@ -41557,22 +46638,17 @@ index 634c97acf6..1c61f793e6 100644 static inline void ena_rx_mbuf_prepare(struct rte_mbuf *mbuf, struct ena_com_rx_ctx *ena_rx_ctx, bool fill_hash) -@@ -306,7 +310,13 @@ static inline void ena_rx_mbuf_prepare(struct rte_mbuf *mbuf, +@@ -302,7 +306,8 @@ static inline void ena_rx_mbuf_prepare(struct rte_mbuf *mbuf, + packet_type |= RTE_PTYPE_L3_IPV6; + } + +- if (!ena_rx_ctx->l4_csum_checked || ena_rx_ctx->frag) ++ if (!ena_rx_ctx->l4_csum_checked || ena_rx_ctx->frag || ++ !(packet_type & (RTE_PTYPE_L4_TCP | RTE_PTYPE_L4_UDP))) ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_UNKNOWN; else if (unlikely(ena_rx_ctx->l4_csum_err)) -- ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD; -+ /* -+ * For the L4 Rx checksum offload the HW may indicate -+ * bad checksum although it's valid. Because of that, -+ * we're setting the UNKNOWN flag to let the app -+ * re-verify the checksum. -+ */ -+ ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_UNKNOWN; - else - ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD; - -@@ -344,6 +354,8 @@ static inline void ena_tx_mbuf_prepare(struct rte_mbuf *mbuf, +@@ -344,6 +349,8 @@ static inline void ena_tx_mbuf_prepare(struct rte_mbuf *mbuf, if (mbuf->ol_flags & RTE_MBUF_F_TX_IPV6) { ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV6; @@ -41581,7 +46657,7 @@ index 634c97acf6..1c61f793e6 100644 } else { ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV4; -@@ -351,7 +363,7 @@ static inline void ena_tx_mbuf_prepare(struct rte_mbuf *mbuf, +@@ -351,7 +358,7 @@ static inline void ena_tx_mbuf_prepare(struct rte_mbuf *mbuf, if (mbuf->packet_type & (RTE_PTYPE_L4_NONFRAG | RTE_PTYPE_INNER_L4_NONFRAG)) @@ -41590,7 +46666,7 @@ index 634c97acf6..1c61f793e6 100644 } /* check if L4 checksum is needed */ -@@ -399,8 +411,7 @@ static int validate_tx_req_id(struct ena_ring *tx_ring, u16 req_id) +@@ -399,8 +406,7 @@ static int validate_tx_req_id(struct ena_ring *tx_ring, u16 req_id) /* Trigger device reset */ ++tx_ring->tx_stats.bad_req_id; @@ -41600,7 +46676,25 @@ index 634c97acf6..1c61f793e6 100644 return -EFAULT; } -@@ -899,6 +910,7 @@ static int ena_start(struct rte_eth_dev *dev) +@@ -438,7 +444,7 @@ static void ena_config_host_info(struct ena_com_dev *ena_dev) + + rc = ena_com_set_host_attributes(ena_dev); + if (rc) { +- if (rc == -ENA_COM_UNSUPPORTED) ++ if (rc == ENA_COM_UNSUPPORTED) + PMD_DRV_LOG(WARNING, "Cannot set host attributes\n"); + else + PMD_DRV_LOG(ERR, "Cannot set host attributes\n"); +@@ -478,7 +484,7 @@ static void ena_config_debug_area(struct ena_adapter *adapter) + + rc = ena_com_set_host_attributes(&adapter->ena_dev); + if (rc) { +- if (rc == -ENA_COM_UNSUPPORTED) ++ if (rc == ENA_COM_UNSUPPORTED) + PMD_DRV_LOG(WARNING, "Cannot set host attributes\n"); + else + PMD_DRV_LOG(ERR, "Cannot set host attributes\n"); +@@ -899,6 +905,7 @@ static int ena_start(struct rte_eth_dev *dev) struct ena_adapter *adapter = dev->data->dev_private; uint64_t ticks; int rc = 0; @@ -41608,7 +46702,7 @@ index 634c97acf6..1c61f793e6 100644 /* Cannot allocate memory in secondary process */ if (rte_eal_process_type() != RTE_PROC_PRIMARY) { -@@ -940,6 +952,11 @@ static int ena_start(struct rte_eth_dev *dev) +@@ -940,6 +947,11 @@ static int ena_start(struct rte_eth_dev *dev) ++adapter->dev_stats.dev_start; adapter->state = ENA_ADAPTER_STATE_RUNNING; @@ -41620,7 +46714,7 @@ index 634c97acf6..1c61f793e6 100644 return 0; err_rss_init: -@@ -955,6 +972,7 @@ static int ena_stop(struct rte_eth_dev *dev) +@@ -955,6 +967,7 @@ static int ena_stop(struct rte_eth_dev *dev) struct ena_com_dev *ena_dev = &adapter->ena_dev; struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); struct rte_intr_handle *intr_handle = pci_dev->intr_handle; @@ -41628,7 +46722,7 @@ index 634c97acf6..1c61f793e6 100644 int rc; /* Cannot free memory in secondary process */ -@@ -986,6 +1004,11 @@ static int ena_stop(struct rte_eth_dev *dev) +@@ -986,6 +999,11 @@ static int ena_stop(struct rte_eth_dev *dev) adapter->state = ENA_ADAPTER_STATE_STOPPED; dev->data->dev_started = 0; @@ -41640,7 +46734,7 @@ index 634c97acf6..1c61f793e6 100644 return 0; } -@@ -1408,7 +1431,7 @@ static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count) +@@ -1408,7 +1426,7 @@ static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count) ++rxq->rx_stats.refill_partial; } @@ -41649,7 +46743,7 @@ index 634c97acf6..1c61f793e6 100644 if (likely(i > 0)) { /* ...let HW know that it can fill buffers with data. */ ena_com_write_sq_doorbell(rxq->ena_com_io_sq); -@@ -1529,8 +1552,7 @@ static void check_for_missing_keep_alive(struct ena_adapter *adapter) +@@ -1529,8 +1547,7 @@ static void check_for_missing_keep_alive(struct ena_adapter *adapter) if (unlikely((rte_get_timer_cycles() - adapter->timestamp_wd) >= adapter->keep_alive_timeout)) { PMD_DRV_LOG(ERR, "Keep alive timeout\n"); @@ -41659,7 +46753,7 @@ index 634c97acf6..1c61f793e6 100644 ++adapter->dev_stats.wd_expired; } } -@@ -1540,8 +1562,7 @@ static void check_for_admin_com_state(struct ena_adapter *adapter) +@@ -1540,8 +1557,7 @@ static void check_for_admin_com_state(struct ena_adapter *adapter) { if (unlikely(!ena_com_get_admin_running_state(&adapter->ena_dev))) { PMD_DRV_LOG(ERR, "ENA admin queue is not in running state\n"); @@ -41669,7 +46763,7 @@ index 634c97acf6..1c61f793e6 100644 } } -@@ -1632,6 +1653,9 @@ static void ena_timer_wd_callback(__rte_unused struct rte_timer *timer, +@@ -1632,6 +1648,9 @@ static void ena_timer_wd_callback(__rte_unused struct rte_timer *timer, struct rte_eth_dev *dev = arg; struct ena_adapter *adapter = dev->data->dev_private; @@ -41679,7 +46773,7 @@ index 634c97acf6..1c61f793e6 100644 check_for_missing_keep_alive(adapter); check_for_admin_com_state(adapter); check_for_tx_completions(adapter); -@@ -1682,6 +1706,13 @@ ena_set_queues_placement_policy(struct ena_adapter *adapter, +@@ -1682,6 +1701,13 @@ ena_set_queues_placement_policy(struct ena_adapter *adapter, return 0; } @@ -41693,7 +46787,7 @@ index 634c97acf6..1c61f793e6 100644 rc = ena_com_config_dev_mode(ena_dev, llq, llq_default_configurations); if (unlikely(rc)) { PMD_INIT_LOG(WARNING, -@@ -1694,13 +1725,6 @@ ena_set_queues_placement_policy(struct ena_adapter *adapter, +@@ -1694,13 +1720,6 @@ ena_set_queues_placement_policy(struct ena_adapter *adapter, if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) return 0; @@ -41707,7 +46801,7 @@ index 634c97acf6..1c61f793e6 100644 ena_dev->mem_bar = adapter->dev_mem_base; return 0; -@@ -2028,9 +2052,6 @@ static int ena_dev_configure(struct rte_eth_dev *dev) +@@ -2028,9 +2047,6 @@ static int ena_dev_configure(struct rte_eth_dev *dev) */ adapter->tx_cleanup_stall_delay = adapter->missing_tx_completion_to / 2; @@ -41717,7 +46811,7 @@ index 634c97acf6..1c61f793e6 100644 return 0; } -@@ -2325,14 +2346,13 @@ static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, +@@ -2325,14 +2341,13 @@ static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, rc); if (rc == ENA_COM_NO_SPACE) { ++rx_ring->rx_stats.bad_desc_num; @@ -41736,7 +46830,7 @@ index 634c97acf6..1c61f793e6 100644 return 0; } -@@ -2732,9 +2752,8 @@ static int ena_xmit_mbuf(struct ena_ring *tx_ring, struct rte_mbuf *mbuf) +@@ -2732,9 +2747,8 @@ static int ena_xmit_mbuf(struct ena_ring *tx_ring, struct rte_mbuf *mbuf) if (unlikely(rc)) { PMD_DRV_LOG(ERR, "Failed to prepare Tx buffers, rc: %d\n", rc); ++tx_ring->tx_stats.prepare_ctx_err; @@ -42188,6 +47282,19 @@ index 3c754a5f66..05cf533896 100644 return ret; } +diff --git a/dpdk/drivers/net/failsafe/failsafe_args.c b/dpdk/drivers/net/failsafe/failsafe_args.c +index b203e02d9a..3b867437d7 100644 +--- a/dpdk/drivers/net/failsafe/failsafe_args.c ++++ b/dpdk/drivers/net/failsafe/failsafe_args.c +@@ -248,7 +248,7 @@ fs_parse_device_param(struct rte_eth_dev *dev, const char *param, + goto free_args; + } else { + ERROR("Unrecognized device type: %.*s", (int)b, param); +- return -EINVAL; ++ ret = -EINVAL; + } + free_args: + free(args); diff --git a/dpdk/drivers/net/failsafe/failsafe_ops.c b/dpdk/drivers/net/failsafe/failsafe_ops.c index 55e21d635c..2c23d0e70a 100644 --- a/dpdk/drivers/net/failsafe/failsafe_ops.c @@ -42266,7 +47373,7 @@ index 7cfa29faa8..17a7056c45 100644 * be 8-byte aligned but without crossing host memory pages (4KB alignment * boundaries). Satisfy first option. diff --git a/dpdk/drivers/net/fm10k/fm10k_ethdev.c b/dpdk/drivers/net/fm10k/fm10k_ethdev.c -index 43e1d13431..8bbd8b445d 100644 +index 43e1d13431..5bac1794fb 100644 --- a/dpdk/drivers/net/fm10k/fm10k_ethdev.c +++ b/dpdk/drivers/net/fm10k/fm10k_ethdev.c @@ -290,7 +290,7 @@ rx_queue_free(struct fm10k_rx_queue *q) @@ -42323,6 +47430,92 @@ index 43e1d13431..8bbd8b445d 100644 * there is no way to get link status without reading BAR4. Until this * works, assume we have maximum bandwidth. * @todo - fix bus info +@@ -3056,7 +3056,7 @@ eth_fm10k_dev_init(struct rte_eth_dev *dev) + struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = pdev->intr_handle; +- int diag, i; ++ int diag, i, ret; + struct fm10k_macvlan_filter_info *macvlan; + + PMD_INIT_FUNC_TRACE(); +@@ -3145,21 +3145,24 @@ eth_fm10k_dev_init(struct rte_eth_dev *dev) + diag = fm10k_stats_reset(dev); + if (diag != 0) { + PMD_INIT_LOG(ERR, "Stats reset failed: %d", diag); +- return diag; ++ ret = diag; ++ goto err_stat; + } + + /* Reset the hw */ + diag = fm10k_reset_hw(hw); + if (diag != FM10K_SUCCESS) { + PMD_INIT_LOG(ERR, "Hardware reset failed: %d", diag); +- return -EIO; ++ ret = -EIO; ++ goto err_reset_hw; + } + + /* Setup mailbox service */ + diag = fm10k_setup_mbx_service(hw); + if (diag != FM10K_SUCCESS) { + PMD_INIT_LOG(ERR, "Failed to setup mailbox: %d", diag); +- return -EIO; ++ ret = -EIO; ++ goto err_mbx; + } + + /*PF/VF has different interrupt handling mechanism */ +@@ -3198,7 +3201,8 @@ eth_fm10k_dev_init(struct rte_eth_dev *dev) + + if (switch_ready == false) { + PMD_INIT_LOG(ERR, "switch is not ready"); +- return -1; ++ ret = -1; ++ goto err_switch_ready; + } + } + +@@ -3233,7 +3237,8 @@ eth_fm10k_dev_init(struct rte_eth_dev *dev) + + if (!hw->mac.default_vid) { + PMD_INIT_LOG(ERR, "default VID is not ready"); +- return -1; ++ ret = -1; ++ goto err_vid; + } + } + +@@ -3242,6 +3247,28 @@ eth_fm10k_dev_init(struct rte_eth_dev *dev) + MAIN_VSI_POOL_NUMBER); + + return 0; ++ ++err_vid: ++err_switch_ready: ++ rte_intr_disable(intr_handle); ++ ++ if (hw->mac.type == fm10k_mac_pf) { ++ fm10k_dev_disable_intr_pf(dev); ++ rte_intr_callback_unregister(intr_handle, ++ fm10k_dev_interrupt_handler_pf, (void *)dev); ++ } else { ++ fm10k_dev_disable_intr_vf(dev); ++ rte_intr_callback_unregister(intr_handle, ++ fm10k_dev_interrupt_handler_vf, (void *)dev); ++ } ++ ++err_mbx: ++err_reset_hw: ++err_stat: ++ rte_free(dev->data->mac_addrs); ++ dev->data->mac_addrs = NULL; ++ ++ return ret; + } + + static int diff --git a/dpdk/drivers/net/fm10k/fm10k_rxtx_vec.c b/dpdk/drivers/net/fm10k/fm10k_rxtx_vec.c index 1269250e23..10ce5a7582 100644 --- a/dpdk/drivers/net/fm10k/fm10k_rxtx_vec.c @@ -42483,7 +47676,7 @@ index 2688817f37..f09b1a6e1e 100644 &sqe_info, &off_info))) { txq->txq_stats.off_errs++; diff --git a/dpdk/drivers/net/hns3/hns3_cmd.c b/dpdk/drivers/net/hns3/hns3_cmd.c -index 2ce59d8de6..9ca2c0cbb9 100644 +index 2ce59d8de6..924ac0cae3 100644 --- a/dpdk/drivers/net/hns3/hns3_cmd.c +++ b/dpdk/drivers/net/hns3/hns3_cmd.c @@ -466,7 +466,7 @@ hns3_mask_capability(struct hns3_hw *hw, @@ -42504,7 +47697,7 @@ index 2ce59d8de6..9ca2c0cbb9 100644 } static uint32_t -@@ -523,6 +525,41 @@ hns3_build_api_caps(void) +@@ -523,6 +525,43 @@ hns3_build_api_caps(void) return rte_cpu_to_le_32(api_caps); } @@ -42526,7 +47719,9 @@ index 2ce59d8de6..9ca2c0cbb9 100644 + if (device_id == HNS3_DEV_ID_25GE_RDMA || + device_id == HNS3_DEV_ID_50GE_RDMA || + device_id == HNS3_DEV_ID_100G_RDMA_MACSEC || -+ device_id == HNS3_DEV_ID_200G_RDMA) ++ device_id == HNS3_DEV_ID_200G_RDMA || ++ device_id == HNS3_DEV_ID_100G_ROH || ++ device_id == HNS3_DEV_ID_200G_ROH) + hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_DCB_B, 1); +} + @@ -42546,7 +47741,7 @@ index 2ce59d8de6..9ca2c0cbb9 100644 static int hns3_cmd_query_firmware_version_and_capability(struct hns3_hw *hw) { -@@ -540,6 +577,9 @@ hns3_cmd_query_firmware_version_and_capability(struct hns3_hw *hw) +@@ -540,6 +579,9 @@ hns3_cmd_query_firmware_version_and_capability(struct hns3_hw *hw) return ret; hw->fw_version = rte_le_to_cpu_32(resp->firmware); @@ -42556,7 +47751,7 @@ index 2ce59d8de6..9ca2c0cbb9 100644 /* * Make sure mask the capability before parse capability because it * may overwrite resp's data. -@@ -635,39 +675,6 @@ hns3_firmware_compat_config(struct hns3_hw *hw, bool is_init) +@@ -635,39 +677,6 @@ hns3_firmware_compat_config(struct hns3_hw *hw, bool is_init) struct hns3_cmd_desc desc; uint32_t compat = 0; @@ -42596,7 +47791,7 @@ index 2ce59d8de6..9ca2c0cbb9 100644 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_FIRMWARE_COMPAT_CFG, false); req = (struct hns3_firmware_compat_cmd *)desc.data; -@@ -696,9 +703,6 @@ hns3_cmd_init(struct hns3_hw *hw) +@@ -696,9 +705,6 @@ hns3_cmd_init(struct hns3_hw *hw) hw->cmq.csq.next_to_use = 0; hw->cmq.crq.next_to_clean = 0; hw->cmq.crq.next_to_use = 0; @@ -42606,7 +47801,7 @@ index 2ce59d8de6..9ca2c0cbb9 100644 hns3_cmd_init_regs(hw); rte_spinlock_unlock(&hw->cmq.crq.lock); -@@ -736,7 +740,7 @@ hns3_cmd_init(struct hns3_hw *hw) +@@ -736,7 +742,7 @@ hns3_cmd_init(struct hns3_hw *hw) return 0; /* @@ -42715,7 +47910,7 @@ index 81bc9e9d98..e1fab05489 100644 uint32_t tc_queue_num; uint32_t rsv1[2]; diff --git a/dpdk/drivers/net/hns3/hns3_common.c b/dpdk/drivers/net/hns3/hns3_common.c -index eac2aa1040..ace5be01d5 100644 +index eac2aa1040..ca029be6e5 100644 --- a/dpdk/drivers/net/hns3/hns3_common.c +++ b/dpdk/drivers/net/hns3/hns3_common.c @@ -11,6 +11,7 @@ @@ -42736,7 +47931,14 @@ index eac2aa1040..ace5be01d5 100644 info->tx_offload_capa = (RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM | RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | RTE_ETH_TX_OFFLOAD_TCP_CKSUM | -@@ -90,13 +90,16 @@ hns3_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info) +@@ -84,19 +84,22 @@ hns3_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info) + RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE | + RTE_ETH_TX_OFFLOAD_VLAN_INSERT); + +- if (!hw->port_base_vlan_cfg.state) ++ if (!hns->is_vf && !hw->port_base_vlan_cfg.state) + info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_QINQ_INSERT; + if (hns3_dev_get_support(hw, OUTER_UDP_CKSUM)) info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM; @@ -42785,7 +47987,12 @@ index eac2aa1040..ace5be01d5 100644 val = strtoull(value, NULL, HNS3_CONVERT_TO_HEXADECIMAL); *(uint64_t *)extra_args = val; -@@ -212,11 +221,14 @@ hns3_parse_mbx_time_limit(const char *key, const char *value, void *extra_args) +@@ -208,15 +217,18 @@ hns3_parse_dev_caps_mask(const char *key, const char *value, void *extra_args) + static int + hns3_parse_mbx_time_limit(const char *key, const char *value, void *extra_args) + { +- uint32_t val; ++ uint64_t val; RTE_SET_USED(key); @@ -42981,7 +48188,7 @@ index 0dbb1c0413..47d6e34269 100644 + +#endif /* HNS3_COMMON_H */ diff --git a/dpdk/drivers/net/hns3/hns3_dcb.c b/dpdk/drivers/net/hns3/hns3_dcb.c -index 3d0159d787..1a156cca7e 100644 +index 3d0159d787..89f8eda223 100644 --- a/dpdk/drivers/net/hns3/hns3_dcb.c +++ b/dpdk/drivers/net/hns3/hns3_dcb.c @@ -25,7 +25,7 @@ @@ -43038,7 +48245,15 @@ index 3d0159d787..1a156cca7e 100644 return ret; } -@@ -1532,7 +1525,7 @@ hns3_dcb_hw_configure(struct hns3_adapter *hns) +@@ -1517,7 +1510,6 @@ hns3_dcb_info_update(struct hns3_adapter *hns, uint8_t num_tc) + static int + hns3_dcb_hw_configure(struct hns3_adapter *hns) + { +- struct rte_eth_dcb_rx_conf *dcb_rx_conf; + struct hns3_pf *pf = &hns->pf; + struct hns3_hw *hw = &hns->hw; + enum hns3_fc_status fc_status = hw->current_fc_status; +@@ -1532,17 +1524,13 @@ hns3_dcb_hw_configure(struct hns3_adapter *hns) ret = hns3_dcb_schd_setup_hw(hw); if (ret) { @@ -43047,7 +48262,19 @@ index 3d0159d787..1a156cca7e 100644 return ret; } -@@ -1737,7 +1730,7 @@ hns3_get_fc_mode(struct hns3_hw *hw, enum rte_eth_fc_mode mode) + if (hw->data->dev_conf.dcb_capability_en & RTE_ETH_DCB_PFC_SUPPORT) { +- dcb_rx_conf = &hw->data->dev_conf.rx_adv_conf.dcb_rx_conf; +- if (dcb_rx_conf->nb_tcs == 0) +- hw->dcb_info.pfc_en = 1; /* tc0 only */ +- else +- hw->dcb_info.pfc_en = +- RTE_LEN2MASK((uint8_t)dcb_rx_conf->nb_tcs, uint8_t); ++ hw->dcb_info.pfc_en = ++ RTE_LEN2MASK((uint8_t)HNS3_MAX_USER_PRIO, uint8_t); + + hw->dcb_info.hw_pfc_map = + hns3_dcb_undrop_tc_map(hw, hw->dcb_info.pfc_en); +@@ -1737,7 +1725,7 @@ hns3_get_fc_mode(struct hns3_hw *hw, enum rte_eth_fc_mode mode) * hns3_dcb_pfc_enable - Enable priority flow control * @dev: pointer to ethernet device * @@ -43057,7 +48284,7 @@ index 3d0159d787..1a156cca7e 100644 int hns3_dcb_pfc_enable(struct rte_eth_dev *dev, struct rte_eth_pfc_conf *pfc_conf) diff --git a/dpdk/drivers/net/hns3/hns3_ethdev.c b/dpdk/drivers/net/hns3/hns3_ethdev.c -index 0bd12907d8..a805046d19 100644 +index 0bd12907d8..a64c570e65 100644 --- a/dpdk/drivers/net/hns3/hns3_ethdev.c +++ b/dpdk/drivers/net/hns3/hns3_ethdev.c @@ -5,7 +5,6 @@ @@ -43339,7 +48566,7 @@ index 0bd12907d8..a805046d19 100644 static void hns3_interrupt_handler(void *param) { -@@ -299,24 +358,25 @@ hns3_interrupt_handler(void *param) +@@ -299,39 +358,45 @@ hns3_interrupt_handler(void *param) struct hns3_adapter *hns = dev->data->dev_private; struct hns3_hw *hw = &hns->hw; enum hns3_evt_cause event_cause; @@ -43372,11 +48599,12 @@ index 0bd12907d8..a805046d19 100644 hns3_handle_mac_tnl(hw); hns3_handle_error(hns); } else if (event_cause == HNS3_VECTOR0_EVENT_RST) { -@@ -324,14 +384,19 @@ hns3_interrupt_handler(void *param) + hns3_warn(hw, "received reset interrupt"); hns3_schedule_reset(hns); } else if (event_cause == HNS3_VECTOR0_EVENT_MBX) { - hns3_dev_handle_mbx_msg(hw); +- hns3_dev_handle_mbx_msg(hw); - } else { ++ hns3pf_handle_mbx_msg(hw); + } else if (event_cause != HNS3_VECTOR0_EVENT_PTP) { hns3_warn(hw, "received unknown event: vector0_int_stat:0x%x " "ras_int_stat:0x%x cmdq_int_stat:0x%x", @@ -43589,7 +48817,15 @@ index 0bd12907d8..a805046d19 100644 hns3_set_default_dev_specifications(hw); hw->intr.mapping_mode = HNS3_INTR_MAPPING_VEC_RSV_ONE; hw->intr.gl_unit = HNS3_INTR_COALESCE_GL_UINT_2US; -@@ -2795,7 +2774,6 @@ hns3_get_capability(struct hns3_hw *hw) +@@ -2779,6 +2758,7 @@ hns3_get_capability(struct hns3_hw *hw) + hw->rss_info.ipv6_sctp_offload_supported = false; + hw->udp_cksum_mode = HNS3_SPECIAL_PORT_SW_CKSUM_MODE; + pf->support_multi_tc_pause = false; ++ hw->rx_dma_addr_align = HNS3_RX_DMA_ADDR_ALIGN_64; + return 0; + } + +@@ -2795,11 +2775,11 @@ hns3_get_capability(struct hns3_hw *hw) hw->tso_mode = HNS3_TSO_HW_CAL_PSEUDO_H_CSUM; hw->vlan_mode = HNS3_HW_SHIFT_AND_DISCARD_MODE; hw->drop_stats_mode = HNS3_PKTS_DROP_STATS_MODE2; @@ -43597,7 +48833,12 @@ index 0bd12907d8..a805046d19 100644 pf->tqp_config_mode = HNS3_FLEX_MAX_TQP_NUM_MODE; hw->rss_info.ipv6_sctp_offload_supported = true; hw->udp_cksum_mode = HNS3_SPECIAL_PORT_HW_CKSUM_MODE; -@@ -2820,11 +2798,8 @@ hns3_check_media_type(struct hns3_hw *hw, uint8_t media_type) + pf->support_multi_tc_pause = true; ++ hw->rx_dma_addr_align = HNS3_RX_DMA_ADDR_ALIGN_128; + + return 0; + } +@@ -2820,11 +2800,8 @@ hns3_check_media_type(struct hns3_hw *hw, uint8_t media_type) } break; case HNS3_MEDIA_TYPE_FIBER: @@ -43610,7 +48851,7 @@ index 0bd12907d8..a805046d19 100644 break; default: PMD_INIT_LOG(ERR, "Unknown media type = %u!", media_type); -@@ -2855,7 +2830,6 @@ hns3_get_board_configuration(struct hns3_hw *hw) +@@ -2855,7 +2832,6 @@ hns3_get_board_configuration(struct hns3_hw *hw) hw->mac.media_type = cfg.media_type; hw->rss_size_max = cfg.rss_size_max; @@ -43618,7 +48859,7 @@ index 0bd12907d8..a805046d19 100644 memcpy(hw->mac.mac_addr, cfg.mac_addr, RTE_ETHER_ADDR_LEN); hw->mac.phy_addr = cfg.phy_addr; hw->num_tx_desc = cfg.tqp_desc_num; -@@ -3420,7 +3394,7 @@ hns3_only_alloc_priv_buff(struct hns3_hw *hw, +@@ -3420,7 +3396,7 @@ hns3_only_alloc_priv_buff(struct hns3_hw *hw, * hns3_rx_buffer_calc: calculate the rx private buffer size for all TCs * @hw: pointer to struct hns3_hw * @buf_alloc: pointer to buffer calculation data @@ -43627,7 +48868,7 @@ index 0bd12907d8..a805046d19 100644 */ static int hns3_rx_buffer_calc(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc) -@@ -3712,7 +3686,7 @@ hns3_get_mac_ethertype_cmd_status(uint16_t cmdq_resp, uint8_t resp_code) +@@ -3712,7 +3688,7 @@ hns3_get_mac_ethertype_cmd_status(uint16_t cmdq_resp, uint8_t resp_code) if (cmdq_resp) { PMD_INIT_LOG(ERR, @@ -43636,7 +48877,7 @@ index 0bd12907d8..a805046d19 100644 cmdq_resp); return -EIO; } -@@ -4109,6 +4083,7 @@ hns3_get_sfp_info(struct hns3_hw *hw, struct hns3_mac *mac_info) +@@ -4109,6 +4085,7 @@ hns3_get_sfp_info(struct hns3_hw *hw, struct hns3_mac *mac_info) mac_info->support_autoneg = resp->autoneg_ability; mac_info->link_autoneg = (resp->autoneg == 0) ? RTE_ETH_LINK_FIXED : RTE_ETH_LINK_AUTONEG; @@ -43644,7 +48885,7 @@ index 0bd12907d8..a805046d19 100644 } else { mac_info->query_type = HNS3_DEFAULT_QUERY; } -@@ -4191,7 +4166,7 @@ hns3_update_fiber_link_info(struct hns3_hw *hw) +@@ -4191,7 +4168,7 @@ hns3_update_fiber_link_info(struct hns3_hw *hw) mac->supported_speed = mac_info.supported_speed; mac->support_autoneg = mac_info.support_autoneg; mac->link_autoneg = mac_info.link_autoneg; @@ -43653,7 +48894,7 @@ index 0bd12907d8..a805046d19 100644 return 0; } -@@ -4280,14 +4255,11 @@ hns3_update_link_info(struct rte_eth_dev *eth_dev) +@@ -4280,14 +4257,11 @@ hns3_update_link_info(struct rte_eth_dev *eth_dev) { struct hns3_adapter *hns = eth_dev->data->dev_private; struct hns3_hw *hw = &hns->hw; @@ -43670,7 +48911,7 @@ index 0bd12907d8..a805046d19 100644 } static int -@@ -4396,10 +4368,12 @@ hns3_service_handler(void *param) +@@ -4396,10 +4370,12 @@ hns3_service_handler(void *param) struct hns3_adapter *hns = eth_dev->data->dev_private; struct hns3_hw *hw = &hns->hw; @@ -43685,7 +48926,7 @@ index 0bd12907d8..a805046d19 100644 rte_eal_alarm_set(HNS3_SERVICE_INTERVAL, hns3_service_handler, eth_dev); } -@@ -4410,6 +4384,10 @@ hns3_init_hardware(struct hns3_adapter *hns) +@@ -4410,6 +4386,10 @@ hns3_init_hardware(struct hns3_adapter *hns) struct hns3_hw *hw = &hns->hw; int ret; @@ -43696,7 +48937,7 @@ index 0bd12907d8..a805046d19 100644 ret = hns3_map_tqp(hw); if (ret) { PMD_INIT_LOG(ERR, "Failed to map tqp: %d", ret); -@@ -4483,6 +4461,12 @@ hns3_init_hardware(struct hns3_adapter *hns) +@@ -4483,6 +4463,12 @@ hns3_init_hardware(struct hns3_adapter *hns) goto err_mac_init; } @@ -43709,7 +48950,7 @@ index 0bd12907d8..a805046d19 100644 return 0; err_mac_init: -@@ -4550,14 +4534,14 @@ hns3_set_firber_default_support_speed(struct hns3_hw *hw) +@@ -4550,14 +4536,14 @@ hns3_set_firber_default_support_speed(struct hns3_hw *hw) } /* @@ -43726,7 +48967,7 @@ index 0bd12907d8..a805046d19 100644 * HNS3_OPC_GET_SFP_INFO opcode, the supported_speed can be obtained * through it. If unsupported, use the SFP's speed as the value of the * supported_speed. -@@ -4574,11 +4558,13 @@ hns3_get_port_supported_speed(struct rte_eth_dev *eth_dev) +@@ -4574,11 +4560,13 @@ hns3_get_port_supported_speed(struct rte_eth_dev *eth_dev) if (ret) return ret; @@ -43743,7 +48984,7 @@ index 0bd12907d8..a805046d19 100644 */ if (mac->supported_speed == 0) mac->supported_speed = -@@ -4620,6 +4606,10 @@ hns3_init_pf(struct rte_eth_dev *eth_dev) +@@ -4620,6 +4608,10 @@ hns3_init_pf(struct rte_eth_dev *eth_dev) /* Get hardware io base address from pcie BAR2 IO space */ hw->io_base = pci_dev->mem_resource[2].addr; @@ -43754,7 +48995,7 @@ index 0bd12907d8..a805046d19 100644 /* Firmware command queue initialize */ ret = hns3_cmd_init_queue(hw); if (ret) { -@@ -4650,13 +4640,6 @@ hns3_init_pf(struct rte_eth_dev *eth_dev) +@@ -4650,13 +4642,6 @@ hns3_init_pf(struct rte_eth_dev *eth_dev) goto err_cmd_init; } @@ -43768,7 +49009,7 @@ index 0bd12907d8..a805046d19 100644 hns3_config_all_msix_error(hw, true); ret = rte_intr_callback_register(pci_dev->intr_handle, -@@ -4667,10 +4650,6 @@ hns3_init_pf(struct rte_eth_dev *eth_dev) +@@ -4667,10 +4652,6 @@ hns3_init_pf(struct rte_eth_dev *eth_dev) goto err_intr_callback_register; } @@ -43779,7 +49020,7 @@ index 0bd12907d8..a805046d19 100644 /* Enable interrupt */ rte_intr_enable(pci_dev->intr_handle); hns3_pf_enable_irq0(hw); -@@ -4682,7 +4661,7 @@ hns3_init_pf(struct rte_eth_dev *eth_dev) +@@ -4682,7 +4663,7 @@ hns3_init_pf(struct rte_eth_dev *eth_dev) goto err_get_config; } @@ -43788,7 +49029,7 @@ index 0bd12907d8..a805046d19 100644 if (ret) goto err_get_config; -@@ -4727,8 +4706,9 @@ hns3_init_pf(struct rte_eth_dev *eth_dev) +@@ -4727,8 +4708,9 @@ hns3_init_pf(struct rte_eth_dev *eth_dev) hns3_fdir_filter_uninit(hns); err_fdir: hns3_uninit_umv_space(hw); @@ -43799,7 +49040,7 @@ index 0bd12907d8..a805046d19 100644 err_get_config: hns3_pf_disable_irq0(hw); rte_intr_disable(pci_dev->intr_handle); -@@ -4762,7 +4742,8 @@ hns3_uninit_pf(struct rte_eth_dev *eth_dev) +@@ -4762,7 +4744,8 @@ hns3_uninit_pf(struct rte_eth_dev *eth_dev) hns3_flow_uninit(eth_dev); hns3_fdir_filter_uninit(hns); hns3_uninit_umv_space(hw); @@ -43809,7 +49050,7 @@ index 0bd12907d8..a805046d19 100644 hns3_config_mac_tnl_int(hw, false); hns3_pf_disable_irq0(hw); rte_intr_disable(pci_dev->intr_handle); -@@ -4847,7 +4828,7 @@ hns3_check_port_speed(struct hns3_hw *hw, uint32_t link_speeds) +@@ -4847,7 +4830,7 @@ hns3_check_port_speed(struct hns3_hw *hw, uint32_t link_speeds) if (mac->media_type == HNS3_MEDIA_TYPE_COPPER) speed_bit = hns3_convert_link_speeds2bitmap_copper(link_speeds); @@ -43818,7 +49059,7 @@ index 0bd12907d8..a805046d19 100644 speed_bit = hns3_convert_link_speeds2bitmap_fiber(link_speeds); if (!(speed_bit & supported_speed)) { -@@ -4991,32 +4972,35 @@ hns3_set_fiber_port_link_speed(struct hns3_hw *hw, +@@ -4991,32 +4974,35 @@ hns3_set_fiber_port_link_speed(struct hns3_hw *hw, return hns3_cfg_mac_speed_dup(hw, cfg->speed, cfg->duplex); } @@ -43872,7 +49113,7 @@ index 0bd12907d8..a805046d19 100644 } return 0; -@@ -5079,7 +5063,7 @@ hns3_do_start(struct hns3_adapter *hns, bool reset_queue) +@@ -5079,7 +5065,7 @@ hns3_do_start(struct hns3_adapter *hns, bool reset_queue) if (ret) goto err_set_link_speed; @@ -43881,7 +49122,7 @@ index 0bd12907d8..a805046d19 100644 err_set_link_speed: (void)hns3_cfg_mac_mode(hw, false); -@@ -5096,12 +5080,6 @@ hns3_do_start(struct hns3_adapter *hns, bool reset_queue) +@@ -5096,12 +5082,6 @@ hns3_do_start(struct hns3_adapter *hns, bool reset_queue) return ret; } @@ -43894,7 +49135,7 @@ index 0bd12907d8..a805046d19 100644 static int hns3_dev_start(struct rte_eth_dev *dev) { -@@ -5155,10 +5133,7 @@ hns3_dev_start(struct rte_eth_dev *dev) +@@ -5155,10 +5135,7 @@ hns3_dev_start(struct rte_eth_dev *dev) rte_spinlock_unlock(&hw->lock); hns3_rx_scattered_calc(dev); @@ -43906,7 +49147,7 @@ index 0bd12907d8..a805046d19 100644 /* Enable interrupt of all rx queues before enabling queues */ hns3_dev_all_rx_queue_intr_enable(hw, true); -@@ -5236,12 +5211,7 @@ hns3_dev_stop(struct rte_eth_dev *dev) +@@ -5236,12 +5213,7 @@ hns3_dev_stop(struct rte_eth_dev *dev) dev->data->dev_started = 0; hw->adapter_state = HNS3_NIC_STOPPING; @@ -43920,7 +49161,7 @@ index 0bd12907d8..a805046d19 100644 rte_spinlock_lock(&hw->lock); if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) { -@@ -5327,7 +5297,7 @@ hns3_get_autoneg_fc_mode(struct hns3_hw *hw) +@@ -5327,7 +5299,7 @@ hns3_get_autoneg_fc_mode(struct hns3_hw *hw) /* * Flow control auto-negotiation is not supported for fiber and @@ -43929,7 +49170,7 @@ index 0bd12907d8..a805046d19 100644 */ case HNS3_MEDIA_TYPE_FIBER: case HNS3_MEDIA_TYPE_BACKPLANE: -@@ -5415,16 +5385,7 @@ hns3_check_fc_autoneg_valid(struct hns3_hw *hw, uint8_t autoneg) +@@ -5415,16 +5387,7 @@ hns3_check_fc_autoneg_valid(struct hns3_hw *hw, uint8_t autoneg) if (!pf->support_fc_autoneg) { if (autoneg != 0) { @@ -43947,7 +49188,7 @@ index 0bd12907d8..a805046d19 100644 return -EOPNOTSUPP; } -@@ -5579,15 +5540,15 @@ hns3_reinit_dev(struct hns3_adapter *hns) +@@ -5579,15 +5542,15 @@ hns3_reinit_dev(struct hns3_adapter *hns) return ret; } @@ -43967,35 +49208,21 @@ index 0bd12907d8..a805046d19 100644 return ret; } -@@ -5633,23 +5594,60 @@ is_pf_reset_done(struct hns3_hw *hw) +@@ -5633,23 +5596,50 @@ is_pf_reset_done(struct hns3_hw *hw) return true; } +static enum hns3_reset_level +hns3_detect_reset_event(struct hns3_hw *hw) +{ -+ struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); + enum hns3_reset_level new_req = HNS3_NONE_RESET; -+ enum hns3_reset_level last_req; + uint32_t vector0_intr_state; + -+ last_req = hns3_get_reset_level(hns, &hw->reset.pending); + vector0_intr_state = hns3_read_dev(hw, HNS3_VECTOR0_OTHER_INT_STS_REG); -+ if (BIT(HNS3_VECTOR0_IMPRESET_INT_B) & vector0_intr_state) { -+ __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED); ++ if (BIT(HNS3_VECTOR0_IMPRESET_INT_B) & vector0_intr_state) + new_req = HNS3_IMP_RESET; -+ } else if (BIT(HNS3_VECTOR0_GLOBALRESET_INT_B) & vector0_intr_state) { -+ __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED); ++ else if (BIT(HNS3_VECTOR0_GLOBALRESET_INT_B) & vector0_intr_state) + new_req = HNS3_GLOBAL_RESET; -+ } -+ -+ if (new_req == HNS3_NONE_RESET) -+ return HNS3_NONE_RESET; -+ -+ if (last_req == HNS3_NONE_RESET || last_req < new_req) { -+ hns3_schedule_delayed_reset(hns); -+ hns3_warn(hw, "High level reset detected, delay do reset"); -+ } + + return new_req; +} @@ -44021,10 +49248,14 @@ index 0bd12907d8..a805046d19 100644 + return false; + + new_req = hns3_detect_reset_event(hw); ++ if (new_req == HNS3_NONE_RESET) ++ return false; ++ + last_req = hns3_get_reset_level(hns, &hw->reset.pending); -+ if (last_req != HNS3_NONE_RESET && new_req != HNS3_NONE_RESET && -+ new_req < last_req) { -+ hns3_warn(hw, "High level reset %d is pending", last_req); ++ if (last_req == HNS3_NONE_RESET || last_req < new_req) { ++ __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED); ++ hns3_schedule_delayed_reset(hns); ++ hns3_warn(hw, "High level reset detected, delay do reset"); return true; } - reset = hns3_get_reset_level(hns, &hw->reset.request); @@ -44038,7 +49269,7 @@ index 0bd12907d8..a805046d19 100644 return true; } return false; -@@ -5696,17 +5694,6 @@ hns3_func_reset_cmd(struct hns3_hw *hw, int func_id) +@@ -5696,17 +5686,6 @@ hns3_func_reset_cmd(struct hns3_hw *hw, int func_id) return hns3_cmd_send(hw, &desc, 1); } @@ -44056,7 +49287,7 @@ index 0bd12907d8..a805046d19 100644 static void hns3_msix_process(struct hns3_adapter *hns, enum hns3_reset_level reset_level) { -@@ -5724,7 +5711,9 @@ hns3_msix_process(struct hns3_adapter *hns, enum hns3_reset_level reset_level) +@@ -5724,7 +5703,9 @@ hns3_msix_process(struct hns3_adapter *hns, enum hns3_reset_level reset_level) switch (reset_level) { case HNS3_IMP_RESET: @@ -44067,7 +49298,7 @@ index 0bd12907d8..a805046d19 100644 hns3_warn(hw, "IMP Reset requested time=%ld.%.6ld", tv.tv_sec, tv.tv_usec); break; -@@ -5849,12 +5838,7 @@ hns3_stop_service(struct hns3_adapter *hns) +@@ -5849,12 +5830,7 @@ hns3_stop_service(struct hns3_adapter *hns) rte_eal_alarm_cancel(hns3_service_handler, eth_dev); hns3_update_linkstatus_and_event(hw, false); } @@ -44081,7 +49312,7 @@ index 0bd12907d8..a805046d19 100644 rte_spinlock_lock(&hw->lock); if (hns->hw.adapter_state == HNS3_NIC_STARTED || -@@ -5887,8 +5871,7 @@ hns3_start_service(struct hns3_adapter *hns) +@@ -5887,8 +5863,7 @@ hns3_start_service(struct hns3_adapter *hns) hw->reset.level == HNS3_GLOBAL_RESET) hns3_set_rst_done(hw); eth_dev = &rte_eth_devices[hw->data->port_id]; @@ -44091,7 +49322,7 @@ index 0bd12907d8..a805046d19 100644 if (hw->adapter_state == HNS3_NIC_STARTED) { /* * This API parent function already hold the hns3_hw.lock, the -@@ -5943,10 +5926,6 @@ hns3_restore_conf(struct hns3_adapter *hns) +@@ -5943,10 +5918,6 @@ hns3_restore_conf(struct hns3_adapter *hns) if (ret) goto err_promisc; @@ -44102,7 +49333,7 @@ index 0bd12907d8..a805046d19 100644 ret = hns3_restore_ptp(hns); if (ret) goto err_promisc; -@@ -6042,56 +6021,27 @@ hns3_reset_service(void *param) +@@ -6042,56 +6013,27 @@ hns3_reset_service(void *param) hns3_msix_process(hns, reset_level); } @@ -44175,7 +49406,7 @@ index 0bd12907d8..a805046d19 100644 } static int -@@ -6100,28 +6050,28 @@ hns3_fec_get_capability(struct rte_eth_dev *dev, +@@ -6100,28 +6042,28 @@ hns3_fec_get_capability(struct rte_eth_dev *dev, unsigned int num) { struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); @@ -44219,7 +49450,16 @@ index 0bd12907d8..a805046d19 100644 static int get_current_fec_auto_state(struct hns3_hw *hw, uint8_t *state) { -@@ -6191,7 +6141,7 @@ hns3_fec_get_internal(struct hns3_hw *hw, uint32_t *fec_capa) +@@ -6157,7 +6099,7 @@ hns3_fec_get_internal(struct hns3_hw *hw, uint32_t *fec_capa) + { + struct hns3_sfp_info_cmd *resp; + uint32_t tmp_fec_capa; +- uint8_t auto_state; ++ uint8_t auto_state = 0; + struct hns3_cmd_desc desc; + int ret; + +@@ -6191,7 +6133,7 @@ hns3_fec_get_internal(struct hns3_hw *hw, uint32_t *fec_capa) } /* @@ -44228,7 +49468,7 @@ index 0bd12907d8..a805046d19 100644 * that defined in the ethdev library. So the sequence needs * to be converted. */ -@@ -6259,62 +6209,53 @@ hns3_set_fec_hw(struct hns3_hw *hw, uint32_t mode) +@@ -6259,62 +6201,53 @@ hns3_set_fec_hw(struct hns3_hw *hw, uint32_t mode) } static uint32_t @@ -44322,7 +49562,7 @@ index 0bd12907d8..a805046d19 100644 return -EINVAL; } -@@ -6322,12 +6263,27 @@ hns3_fec_set(struct rte_eth_dev *dev, uint32_t mode) +@@ -6322,12 +6255,27 @@ hns3_fec_set(struct rte_eth_dev *dev, uint32_t mode) * Check whether the configured mode is within the FEC capability. * If not, the configured mode will not be supported. */ @@ -44353,7 +49593,7 @@ index 0bd12907d8..a805046d19 100644 rte_spinlock_lock(&hw->lock); ret = hns3_set_fec_hw(hw, mode); if (ret) { -@@ -6382,7 +6338,7 @@ hns3_optical_module_existed(struct hns3_hw *hw) +@@ -6382,7 +6330,7 @@ hns3_optical_module_existed(struct hns3_hw *hw) ret = hns3_cmd_send(hw, &desc, 1); if (ret) { hns3_err(hw, @@ -44362,7 +49602,7 @@ index 0bd12907d8..a805046d19 100644 ret); return false; } -@@ -6420,7 +6376,7 @@ hns3_get_module_eeprom_data(struct hns3_hw *hw, uint32_t offset, +@@ -6420,7 +6368,7 @@ hns3_get_module_eeprom_data(struct hns3_hw *hw, uint32_t offset, ret = hns3_cmd_send(hw, desc, HNS3_SFP_INFO_CMD_NUM); if (ret) { @@ -44371,7 +49611,7 @@ index 0bd12907d8..a805046d19 100644 ret); return ret; } -@@ -6457,7 +6413,7 @@ hns3_get_module_eeprom(struct rte_eth_dev *dev, +@@ -6457,7 +6405,7 @@ hns3_get_module_eeprom(struct rte_eth_dev *dev, return -ENOTSUP; if (!hns3_optical_module_existed(hw)) { @@ -44380,7 +49620,7 @@ index 0bd12907d8..a805046d19 100644 return -EIO; } -@@ -6520,7 +6476,7 @@ hns3_get_module_info(struct rte_eth_dev *dev, +@@ -6520,7 +6468,7 @@ hns3_get_module_info(struct rte_eth_dev *dev, modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8636_MAX_LEN; break; default: @@ -44389,11 +49629,30 @@ index 0bd12907d8..a805046d19 100644 sfp_type.type, sfp_type.ext_type); return -EINVAL; } +@@ -6767,6 +6715,8 @@ static const struct rte_pci_id pci_id_hns3_map[] = { + { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_50GE_RDMA) }, + { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_100G_RDMA_MACSEC) }, + { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_200G_RDMA) }, ++ { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_100G_ROH) }, ++ { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_200G_ROH) }, + { .vendor_id = 0, }, /* sentinel */ + }; + diff --git a/dpdk/drivers/net/hns3/hns3_ethdev.h b/dpdk/drivers/net/hns3/hns3_ethdev.h -index aa45b31261..5ba9503bf8 100644 +index aa45b31261..84d5754a70 100644 --- a/dpdk/drivers/net/hns3/hns3_ethdev.h +++ b/dpdk/drivers/net/hns3/hns3_ethdev.h -@@ -75,7 +75,6 @@ +@@ -28,7 +28,9 @@ + #define HNS3_DEV_ID_25GE_RDMA 0xA222 + #define HNS3_DEV_ID_50GE_RDMA 0xA224 + #define HNS3_DEV_ID_100G_RDMA_MACSEC 0xA226 ++#define HNS3_DEV_ID_100G_ROH 0xA227 + #define HNS3_DEV_ID_200G_RDMA 0xA228 ++#define HNS3_DEV_ID_200G_ROH 0xA22C + #define HNS3_DEV_ID_100G_VF 0xA22E + #define HNS3_DEV_ID_100G_RDMA_PFC_VF 0xA22F + +@@ -75,7 +77,6 @@ #define HNS3_DEFAULT_MTU 1500UL #define HNS3_DEFAULT_FRAME_LEN (HNS3_DEFAULT_MTU + HNS3_ETH_OVERHEAD) #define HNS3_HIP08_MIN_TX_PKT_LEN 33 @@ -44401,7 +49660,7 @@ index aa45b31261..5ba9503bf8 100644 #define HNS3_BITS_PER_BYTE 8 -@@ -126,7 +125,7 @@ struct hns3_tc_info { +@@ -126,7 +127,7 @@ struct hns3_tc_info { uint8_t tc_sch_mode; /* 0: sp; 1: dwrr */ uint8_t pgid; uint32_t bw_limit; @@ -44410,7 +49669,7 @@ index aa45b31261..5ba9503bf8 100644 }; struct hns3_dcb_info { -@@ -217,6 +216,8 @@ struct hns3_mac { +@@ -217,6 +218,8 @@ struct hns3_mac { uint32_t advertising; /* advertised capability in the local part */ uint32_t lp_advertising; /* advertised capability in the link partner */ uint8_t support_autoneg; @@ -44419,7 +49678,17 @@ index aa45b31261..5ba9503bf8 100644 }; struct hns3_fake_queue_data { -@@ -502,8 +503,15 @@ struct hns3_hw { +@@ -486,6 +489,9 @@ struct hns3_queue_intr { + #define HNS3_PKTS_DROP_STATS_MODE1 0 + #define HNS3_PKTS_DROP_STATS_MODE2 1 + ++#define HNS3_RX_DMA_ADDR_ALIGN_128 128 ++#define HNS3_RX_DMA_ADDR_ALIGN_64 64 ++ + struct hns3_hw { + struct rte_eth_dev_data *data; + void *io_base; +@@ -502,8 +508,15 @@ struct hns3_hw { struct hns3_tqp_stats tqp_stats; /* Include Mac stats | Rx stats | Tx stats */ struct hns3_mac_stats mac_stats; @@ -44435,7 +49704,7 @@ index aa45b31261..5ba9503bf8 100644 uint32_t fw_version; uint16_t pf_vf_if_version; /* version of communication interface */ -@@ -523,7 +531,6 @@ struct hns3_hw { +@@ -523,7 +536,6 @@ struct hns3_hw { /* The configuration info of RSS */ struct hns3_rss_conf rss_info; @@ -44443,16 +49712,21 @@ index aa45b31261..5ba9503bf8 100644 uint16_t rss_ind_tbl_size; uint16_t rss_key_size; -@@ -548,7 +555,7 @@ struct hns3_hw { +@@ -548,7 +560,12 @@ struct hns3_hw { * The minimum length of the packet supported by hardware in the Tx * direction. */ - uint32_t min_tx_pkt_len; + uint8_t min_tx_pkt_len; ++ /* ++ * The required alignment of the DMA address of the RX buffer. ++ * See HNS3_RX_DMA_ADDR_ALIGN_XXX for available values. ++ */ ++ uint16_t rx_dma_addr_align; struct hns3_queue_intr intr; /* -@@ -571,12 +578,12 @@ struct hns3_hw { +@@ -571,12 +588,12 @@ struct hns3_hw { /* * vlan mode. * value range: @@ -44467,7 +49741,7 @@ index aa45b31261..5ba9503bf8 100644 * For example, driver need discard the stripped PVID tag to ensure * the PVID will not report to mbuf and shift the inserted VLAN tag * to avoid port based VLAN covering it. -@@ -724,7 +731,7 @@ enum hns3_mp_req_type { +@@ -724,7 +741,7 @@ enum hns3_mp_req_type { HNS3_MP_REQ_MAX }; @@ -44476,7 +49750,7 @@ index aa45b31261..5ba9503bf8 100644 struct hns3_mp_param { enum hns3_mp_req_type type; int port_id; -@@ -871,13 +878,6 @@ struct hns3_adapter { +@@ -871,13 +888,6 @@ struct hns3_adapter { struct hns3_ptype_table ptype_tbl __rte_cache_aligned; }; @@ -44490,7 +49764,7 @@ index aa45b31261..5ba9503bf8 100644 enum { HNS3_DEV_SUPPORT_DCB_B, HNS3_DEV_SUPPORT_COPPER_B, -@@ -891,6 +891,7 @@ enum { +@@ -891,6 +901,7 @@ enum { HNS3_DEV_SUPPORT_RAS_IMP_B, HNS3_DEV_SUPPORT_TM_B, HNS3_DEV_SUPPORT_VF_VLAN_FLT_MOD_B, @@ -44498,7 +49772,7 @@ index aa45b31261..5ba9503bf8 100644 }; #define hns3_dev_get_support(hw, _name) \ -@@ -996,15 +997,6 @@ static inline uint32_t hns3_read_reg(void *base, uint32_t reg) +@@ -996,15 +1007,6 @@ static inline uint32_t hns3_read_reg(void *base, uint32_t reg) #define hns3_read_dev(a, reg) \ hns3_read_reg((a)->io_base, (reg)) @@ -44514,7 +49788,7 @@ index aa45b31261..5ba9503bf8 100644 static inline uint64_t hns3_atomic_test_bit(unsigned int nr, volatile uint64_t *addr) { -@@ -1043,22 +1035,8 @@ void hns3_update_linkstatus_and_event(struct hns3_hw *hw, bool query); +@@ -1043,22 +1045,8 @@ void hns3_update_linkstatus_and_event(struct hns3_hw *hw, bool query); void hns3vf_update_link_status(struct hns3_hw *hw, uint8_t link_status, uint32_t link_speed, uint8_t link_duplex); void hns3vf_update_push_lsc_cap(struct hns3_hw *hw, bool supported); @@ -44539,7 +49813,7 @@ index aa45b31261..5ba9503bf8 100644 static inline bool is_reset_pending(struct hns3_adapter *hns) -@@ -1071,4 +1049,15 @@ is_reset_pending(struct hns3_adapter *hns) +@@ -1071,4 +1059,15 @@ is_reset_pending(struct hns3_adapter *hns) return ret; } @@ -44556,7 +49830,7 @@ index aa45b31261..5ba9503bf8 100644 + #endif /* _HNS3_ETHDEV_H_ */ diff --git a/dpdk/drivers/net/hns3/hns3_ethdev_vf.c b/dpdk/drivers/net/hns3/hns3_ethdev_vf.c -index 805abd4543..0d1d271f37 100644 +index 805abd4543..5632d6618c 100644 --- a/dpdk/drivers/net/hns3/hns3_ethdev_vf.c +++ b/dpdk/drivers/net/hns3/hns3_ethdev_vf.c @@ -6,7 +6,6 @@ @@ -44567,7 +49841,64 @@ index 805abd4543..0d1d271f37 100644 #include #include "hns3_ethdev.h" -@@ -242,7 +241,7 @@ hns3vf_set_default_mac_addr(struct rte_eth_dev *dev, +@@ -172,11 +171,13 @@ hns3vf_add_uc_mac_addr(struct hns3_hw *hw, struct rte_ether_addr *mac_addr) + { + /* mac address was checked by upper level interface */ + char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; ++ struct hns3_vf_to_pf_msg req; + int ret; + +- ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_UNICAST, +- HNS3_MBX_MAC_VLAN_UC_ADD, mac_addr->addr_bytes, +- RTE_ETHER_ADDR_LEN, false, NULL, 0); ++ hns3vf_mbx_setup(&req, HNS3_MBX_SET_UNICAST, ++ HNS3_MBX_MAC_VLAN_UC_ADD); ++ memcpy(req.data, mac_addr->addr_bytes, RTE_ETHER_ADDR_LEN); ++ ret = hns3vf_mbx_send(hw, &req, false, NULL, 0); + if (ret) { + hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, + mac_addr); +@@ -191,12 +192,13 @@ hns3vf_remove_uc_mac_addr(struct hns3_hw *hw, struct rte_ether_addr *mac_addr) + { + /* mac address was checked by upper level interface */ + char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; ++ struct hns3_vf_to_pf_msg req; + int ret; + +- ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_UNICAST, +- HNS3_MBX_MAC_VLAN_UC_REMOVE, +- mac_addr->addr_bytes, RTE_ETHER_ADDR_LEN, +- false, NULL, 0); ++ hns3vf_mbx_setup(&req, HNS3_MBX_SET_UNICAST, ++ HNS3_MBX_MAC_VLAN_UC_REMOVE); ++ memcpy(req.data, mac_addr->addr_bytes, RTE_ETHER_ADDR_LEN); ++ ret = hns3vf_mbx_send(hw, &req, false, NULL, 0); + if (ret) { + hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, + mac_addr); +@@ -215,6 +217,7 @@ hns3vf_set_default_mac_addr(struct rte_eth_dev *dev, + struct rte_ether_addr *old_addr; + uint8_t addr_bytes[HNS3_TWO_ETHER_ADDR_LEN]; /* for 2 MAC addresses */ + char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; ++ struct hns3_vf_to_pf_msg req; + int ret; + + /* +@@ -227,9 +230,10 @@ hns3vf_set_default_mac_addr(struct rte_eth_dev *dev, + memcpy(&addr_bytes[RTE_ETHER_ADDR_LEN], old_addr->addr_bytes, + RTE_ETHER_ADDR_LEN); + +- ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_UNICAST, +- HNS3_MBX_MAC_VLAN_UC_MODIFY, addr_bytes, +- HNS3_TWO_ETHER_ADDR_LEN, true, NULL, 0); ++ hns3vf_mbx_setup(&req, HNS3_MBX_SET_UNICAST, ++ HNS3_MBX_MAC_VLAN_UC_MODIFY); ++ memcpy(req.data, addr_bytes, HNS3_TWO_ETHER_ADDR_LEN); ++ ret = hns3vf_mbx_send(hw, &req, true, NULL, 0); + if (ret) { + /* + * The hns3 VF PMD depends on the hns3 PF kernel ethdev +@@ -242,7 +246,7 @@ hns3vf_set_default_mac_addr(struct rte_eth_dev *dev, if (ret == -EPERM) { hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, old_addr); @@ -44576,7 +49907,7 @@ index 805abd4543..0d1d271f37 100644 mac_str); } else { hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, -@@ -250,6 +249,8 @@ hns3vf_set_default_mac_addr(struct rte_eth_dev *dev, +@@ -250,6 +254,8 @@ hns3vf_set_default_mac_addr(struct rte_eth_dev *dev, hns3_err(hw, "Failed to set mac addr(%s) for vf: %d", mac_str, ret); } @@ -44585,7 +49916,43 @@ index 805abd4543..0d1d271f37 100644 } rte_ether_addr_copy(mac_addr, -@@ -318,7 +319,7 @@ hns3vf_set_promisc_mode(struct hns3_hw *hw, bool en_bc_pmc, +@@ -264,12 +270,13 @@ hns3vf_add_mc_mac_addr(struct hns3_hw *hw, + struct rte_ether_addr *mac_addr) + { + char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; ++ struct hns3_vf_to_pf_msg req; + int ret; + +- ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_MULTICAST, +- HNS3_MBX_MAC_VLAN_MC_ADD, +- mac_addr->addr_bytes, RTE_ETHER_ADDR_LEN, false, +- NULL, 0); ++ hns3vf_mbx_setup(&req, HNS3_MBX_SET_MULTICAST, ++ HNS3_MBX_MAC_VLAN_MC_ADD); ++ memcpy(req.data, mac_addr->addr_bytes, RTE_ETHER_ADDR_LEN); ++ ret = hns3vf_mbx_send(hw, &req, false, NULL, 0); + if (ret) { + hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, + mac_addr); +@@ -285,12 +292,13 @@ hns3vf_remove_mc_mac_addr(struct hns3_hw *hw, + struct rte_ether_addr *mac_addr) + { + char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; ++ struct hns3_vf_to_pf_msg req; + int ret; + +- ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_MULTICAST, +- HNS3_MBX_MAC_VLAN_MC_REMOVE, +- mac_addr->addr_bytes, RTE_ETHER_ADDR_LEN, false, +- NULL, 0); ++ hns3vf_mbx_setup(&req, HNS3_MBX_SET_MULTICAST, ++ HNS3_MBX_MAC_VLAN_MC_REMOVE); ++ memcpy(req.data, mac_addr->addr_bytes, RTE_ETHER_ADDR_LEN); ++ ret = hns3vf_mbx_send(hw, &req, false, NULL, 0); + if (ret) { + hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, + mac_addr); +@@ -318,7 +326,7 @@ hns3vf_set_promisc_mode(struct hns3_hw *hw, bool en_bc_pmc, * 1. The promiscuous/allmulticast mode can be configured successfully * only based on the trusted VF device. If based on the non trusted * VF device, configuring promiscuous/allmulticast mode will fail. @@ -44594,7 +49961,7 @@ index 805abd4543..0d1d271f37 100644 * kernel ethdev driver on the host by the following command: * "ip link set vf turst on" * 2. After the promiscuous mode is configured successfully, hns3 VF PMD -@@ -330,7 +331,7 @@ hns3vf_set_promisc_mode(struct hns3_hw *hw, bool en_bc_pmc, +@@ -330,14 +338,15 @@ hns3vf_set_promisc_mode(struct hns3_hw *hw, bool en_bc_pmc, * filter is still effective even in promiscuous mode. If upper * applications don't call rte_eth_dev_vlan_filter API function to * set vlan based on VF device, hns3 VF PMD will can't receive @@ -44602,8 +49969,63 @@ index 805abd4543..0d1d271f37 100644 + * the packets with vlan tag in promiscuous mode. */ hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MBX_VF_TO_PF, false); - req->msg[0] = HNS3_MBX_SET_PROMISC_MODE; -@@ -496,7 +497,6 @@ hns3vf_dev_configure(struct rte_eth_dev *dev) +- req->msg[0] = HNS3_MBX_SET_PROMISC_MODE; +- req->msg[1] = en_bc_pmc ? 1 : 0; +- req->msg[2] = en_uc_pmc ? 1 : 0; +- req->msg[3] = en_mc_pmc ? 1 : 0; +- req->msg[4] = hw->promisc_mode == HNS3_LIMIT_PROMISC_MODE ? 1 : 0; ++ req->msg.code = HNS3_MBX_SET_PROMISC_MODE; ++ req->msg.en_bc = en_bc_pmc ? 1 : 0; ++ req->msg.en_uc = en_uc_pmc ? 1 : 0; ++ req->msg.en_mc = en_mc_pmc ? 1 : 0; ++ req->msg.en_limit_promisc = ++ hw->promisc_mode == HNS3_LIMIT_PROMISC_MODE ? 1 : 0; + + ret = hns3_cmd_send(hw, &desc, 1); + if (ret) +@@ -426,30 +435,26 @@ hns3vf_bind_ring_with_vector(struct hns3_hw *hw, uint16_t vector_id, + bool mmap, enum hns3_ring_type queue_type, + uint16_t queue_id) + { +- struct hns3_vf_bind_vector_msg bind_msg; ++ struct hns3_vf_to_pf_msg req = {0}; + const char *op_str; +- uint16_t code; + int ret; + +- memset(&bind_msg, 0, sizeof(bind_msg)); +- code = mmap ? HNS3_MBX_MAP_RING_TO_VECTOR : ++ req.code = mmap ? HNS3_MBX_MAP_RING_TO_VECTOR : + HNS3_MBX_UNMAP_RING_TO_VECTOR; +- bind_msg.vector_id = (uint8_t)vector_id; ++ req.vector_id = (uint8_t)vector_id; ++ req.ring_num = 1; + + if (queue_type == HNS3_RING_TYPE_RX) +- bind_msg.param[0].int_gl_index = HNS3_RING_GL_RX; ++ req.ring_param[0].int_gl_index = HNS3_RING_GL_RX; + else +- bind_msg.param[0].int_gl_index = HNS3_RING_GL_TX; +- +- bind_msg.param[0].ring_type = queue_type; +- bind_msg.ring_num = 1; +- bind_msg.param[0].tqp_index = queue_id; ++ req.ring_param[0].int_gl_index = HNS3_RING_GL_TX; ++ req.ring_param[0].ring_type = queue_type; ++ req.ring_param[0].tqp_index = queue_id; + op_str = mmap ? "Map" : "Unmap"; +- ret = hns3_send_mbx_msg(hw, code, 0, (uint8_t *)&bind_msg, +- sizeof(bind_msg), false, NULL, 0); ++ ret = hns3vf_mbx_send(hw, &req, false, NULL, 0); + if (ret) +- hns3_err(hw, "%s TQP %u fail, vector_id is %u, ret is %d.", +- op_str, queue_id, bind_msg.vector_id, ret); ++ hns3_err(hw, "%s TQP %u fail, vector_id is %u, ret = %d.", ++ op_str, queue_id, req.vector_id, ret); + + return ret; + } +@@ -496,7 +501,6 @@ hns3vf_dev_configure(struct rte_eth_dev *dev) /* When RSS is not configured, redirect the packet queue 0 */ if ((uint32_t)mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) { conf->rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH; @@ -44611,7 +50033,22 @@ index 805abd4543..0d1d271f37 100644 rss_conf = conf->rx_adv_conf.rss_conf; ret = hns3_dev_rss_hash_update(dev, &rss_conf); if (ret) -@@ -611,6 +611,19 @@ hns3vf_enable_irq0(struct hns3_hw *hw) +@@ -533,10 +537,12 @@ hns3vf_dev_configure(struct rte_eth_dev *dev) + static int + hns3vf_config_mtu(struct hns3_hw *hw, uint16_t mtu) + { ++ struct hns3_vf_to_pf_msg req; + int ret; + +- ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_MTU, 0, (const uint8_t *)&mtu, +- sizeof(mtu), true, NULL, 0); ++ hns3vf_mbx_setup(&req, HNS3_MBX_SET_MTU, 0); ++ memcpy(req.data, &mtu, sizeof(mtu)); ++ ret = hns3vf_mbx_send(hw, &req, true, NULL, 0); + if (ret) + hns3_err(hw, "Failed to set mtu (%u) for vf: %d", mtu, ret); + +@@ -611,6 +617,19 @@ hns3vf_enable_irq0(struct hns3_hw *hw) hns3_write_dev(hw, HNS3_MISC_VECTOR_REG_BASE, 1); } @@ -44631,7 +50068,42 @@ index 805abd4543..0d1d271f37 100644 static enum hns3vf_evt_cause hns3vf_check_event_cause(struct hns3_adapter *hns, uint32_t *clearval) { -@@ -685,67 +698,10 @@ hns3vf_interrupt_handler(void *param) +@@ -630,13 +649,8 @@ hns3vf_check_event_cause(struct hns3_adapter *hns, uint32_t *clearval) + val = hns3_read_dev(hw, HNS3_VF_RST_ING); + hns3_write_dev(hw, HNS3_VF_RST_ING, val | HNS3_VF_RST_ING_BIT); + val = cmdq_stat_reg & ~BIT(HNS3_VECTOR0_RST_INT_B); +- if (clearval) { +- hw->reset.stats.global_cnt++; +- hns3_warn(hw, "Global reset detected, clear reset status"); +- } else { +- hns3_schedule_delayed_reset(hns); +- hns3_warn(hw, "Global reset detected, don't clear reset status"); +- } ++ hw->reset.stats.global_cnt++; ++ hns3_warn(hw, "Global reset detected, clear reset status"); + + ret = HNS3VF_VECTOR0_EVENT_RST; + goto out; +@@ -651,9 +665,9 @@ hns3vf_check_event_cause(struct hns3_adapter *hns, uint32_t *clearval) + + val = 0; + ret = HNS3VF_VECTOR0_EVENT_OTHER; ++ + out: +- if (clearval) +- *clearval = val; ++ *clearval = val; + return ret; + } + +@@ -679,73 +693,16 @@ hns3vf_interrupt_handler(void *param) + hns3_schedule_reset(hns); + break; + case HNS3VF_VECTOR0_EVENT_MBX: +- hns3_dev_handle_mbx_msg(hw); ++ hns3vf_handle_mbx_msg(hw); + break; + default: break; } @@ -44703,7 +50175,19 @@ index 805abd4543..0d1d271f37 100644 } void -@@ -780,6 +736,14 @@ hns3vf_get_push_lsc_cap(struct hns3_hw *hw) +@@ -771,15 +728,24 @@ hns3vf_get_push_lsc_cap(struct hns3_hw *hw) + uint16_t val = HNS3_PF_PUSH_LSC_CAP_NOT_SUPPORTED; + uint16_t exp = HNS3_PF_PUSH_LSC_CAP_UNKNOWN; + struct hns3_vf *vf = HNS3_DEV_HW_TO_VF(hw); ++ struct hns3_vf_to_pf_msg req; + + __atomic_store_n(&vf->pf_push_lsc_cap, HNS3_PF_PUSH_LSC_CAP_UNKNOWN, + __ATOMIC_RELEASE); + +- (void)hns3_send_mbx_msg(hw, HNS3_MBX_GET_LINK_STATUS, 0, NULL, 0, false, +- NULL, 0); ++ hns3vf_mbx_setup(&req, HNS3_MBX_GET_LINK_STATUS, 0); ++ (void)hns3vf_mbx_send(hw, &req, false, NULL, 0); while (remain_ms > 0) { rte_delay_ms(HNS3_POLL_RESPONE_MS); @@ -44714,11 +50198,11 @@ index 805abd4543..0d1d271f37 100644 + * driver has to actively handle the HNS3_MBX_LINK_STAT_CHANGE + * mailbox from PF driver to get this capability. + */ -+ hns3_dev_handle_mbx_msg(hw); ++ hns3vf_handle_mbx_msg(hw); if (__atomic_load_n(&vf->pf_push_lsc_cap, __ATOMIC_ACQUIRE) != HNS3_PF_PUSH_LSC_CAP_UNKNOWN) break; -@@ -810,26 +774,10 @@ hns3vf_get_push_lsc_cap(struct hns3_hw *hw) +@@ -810,26 +776,10 @@ hns3vf_get_push_lsc_cap(struct hns3_hw *hw) static int hns3vf_get_capability(struct hns3_hw *hw) { @@ -44747,7 +50231,11 @@ index 805abd4543..0d1d271f37 100644 hw->intr.mapping_mode = HNS3_INTR_MAPPING_VEC_RSV_ONE; hw->intr.gl_unit = HNS3_INTR_COALESCE_GL_UINT_2US; hw->tso_mode = HNS3_TSO_SW_CAL_PSEUDO_H_CSUM; -@@ -840,7 +788,7 @@ hns3vf_get_capability(struct hns3_hw *hw) +@@ -837,10 +787,11 @@ hns3vf_get_capability(struct hns3_hw *hw) + hw->min_tx_pkt_len = HNS3_HIP08_MIN_TX_PKT_LEN; + hw->rss_info.ipv6_sctp_offload_supported = false; + hw->promisc_mode = HNS3_UNLIMIT_PROMISC_MODE; ++ hw->rx_dma_addr_align = HNS3_RX_DMA_ADDR_ALIGN_64; return 0; } @@ -44756,15 +50244,93 @@ index 805abd4543..0d1d271f37 100644 if (ret) { PMD_INIT_LOG(ERR, "failed to query dev specifications, ret = %d", -@@ -852,7 +800,6 @@ hns3vf_get_capability(struct hns3_hw *hw) +@@ -852,9 +803,9 @@ hns3vf_get_capability(struct hns3_hw *hw) hw->intr.gl_unit = HNS3_INTR_COALESCE_GL_UINT_1US; hw->tso_mode = HNS3_TSO_HW_CAL_PSEUDO_H_CSUM; hw->drop_stats_mode = HNS3_PKTS_DROP_STATS_MODE2; - hw->min_tx_pkt_len = HNS3_HIP09_MIN_TX_PKT_LEN; hw->rss_info.ipv6_sctp_offload_supported = true; hw->promisc_mode = HNS3_LIMIT_PROMISC_MODE; ++ hw->rx_dma_addr_align = HNS3_RX_DMA_ADDR_ALIGN_128; + + return 0; + } +@@ -880,12 +831,13 @@ hns3vf_check_tqp_info(struct hns3_hw *hw) + static int + hns3vf_get_port_base_vlan_filter_state(struct hns3_hw *hw) + { ++ struct hns3_vf_to_pf_msg req; + uint8_t resp_msg; + int ret; -@@ -1031,7 +978,6 @@ hns3vf_get_configuration(struct hns3_hw *hw) +- ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_VLAN, +- HNS3_MBX_GET_PORT_BASE_VLAN_STATE, NULL, 0, +- true, &resp_msg, sizeof(resp_msg)); ++ hns3vf_mbx_setup(&req, HNS3_MBX_SET_VLAN, ++ HNS3_MBX_GET_PORT_BASE_VLAN_STATE); ++ ret = hns3vf_mbx_send(hw, &req, true, &resp_msg, sizeof(resp_msg)); + if (ret) { + if (ret == -ETIME) { + /* +@@ -926,10 +878,12 @@ hns3vf_get_queue_info(struct hns3_hw *hw) + { + #define HNS3VF_TQPS_RSS_INFO_LEN 6 + uint8_t resp_msg[HNS3VF_TQPS_RSS_INFO_LEN]; ++ struct hns3_vf_to_pf_msg req; + int ret; + +- ret = hns3_send_mbx_msg(hw, HNS3_MBX_GET_QINFO, 0, NULL, 0, true, +- resp_msg, HNS3VF_TQPS_RSS_INFO_LEN); ++ hns3vf_mbx_setup(&req, HNS3_MBX_GET_QINFO, 0); ++ ret = hns3vf_mbx_send(hw, &req, true, ++ resp_msg, HNS3VF_TQPS_RSS_INFO_LEN); + if (ret) { + PMD_INIT_LOG(ERR, "Failed to get tqp info from PF: %d", ret); + return ret; +@@ -946,10 +900,12 @@ hns3vf_get_queue_depth(struct hns3_hw *hw) + { + #define HNS3VF_TQPS_DEPTH_INFO_LEN 4 + uint8_t resp_msg[HNS3VF_TQPS_DEPTH_INFO_LEN]; ++ struct hns3_vf_to_pf_msg req; + int ret; + +- ret = hns3_send_mbx_msg(hw, HNS3_MBX_GET_QDEPTH, 0, NULL, 0, true, +- resp_msg, HNS3VF_TQPS_DEPTH_INFO_LEN); ++ hns3vf_mbx_setup(&req, HNS3_MBX_GET_QDEPTH, 0); ++ ret = hns3vf_mbx_send(hw, &req, true, ++ resp_msg, HNS3VF_TQPS_DEPTH_INFO_LEN); + if (ret) { + PMD_INIT_LOG(ERR, "Failed to get tqp depth info from PF: %d", + ret); +@@ -988,10 +944,11 @@ hns3vf_get_basic_info(struct hns3_hw *hw) + { + uint8_t resp_msg[HNS3_MBX_MAX_RESP_DATA_SIZE]; + struct hns3_basic_info *basic_info; ++ struct hns3_vf_to_pf_msg req; + int ret; + +- ret = hns3_send_mbx_msg(hw, HNS3_MBX_GET_BASIC_INFO, 0, NULL, 0, +- true, resp_msg, sizeof(resp_msg)); ++ hns3vf_mbx_setup(&req, HNS3_MBX_GET_BASIC_INFO, 0); ++ ret = hns3vf_mbx_send(hw, &req, true, resp_msg, sizeof(resp_msg)); + if (ret) { + hns3_err(hw, "failed to get basic info from PF, ret = %d.", + ret); +@@ -1011,10 +968,11 @@ static int + hns3vf_get_host_mac_addr(struct hns3_hw *hw) + { + uint8_t host_mac[RTE_ETHER_ADDR_LEN]; ++ struct hns3_vf_to_pf_msg req; + int ret; + +- ret = hns3_send_mbx_msg(hw, HNS3_MBX_GET_MAC_ADDR, 0, NULL, 0, +- true, host_mac, RTE_ETHER_ADDR_LEN); ++ hns3vf_mbx_setup(&req, HNS3_MBX_GET_MAC_ADDR, 0); ++ ret = hns3vf_mbx_send(hw, &req, true, host_mac, RTE_ETHER_ADDR_LEN); + if (ret) { + hns3_err(hw, "Failed to get mac addr from PF: %d", ret); + return ret; +@@ -1031,7 +989,6 @@ hns3vf_get_configuration(struct hns3_hw *hw) int ret; hw->mac.media_type = HNS3_MEDIA_TYPE_NONE; @@ -44772,7 +50338,126 @@ index 805abd4543..0d1d271f37 100644 /* Get device capability */ ret = hns3vf_get_capability(hw); -@@ -1385,10 +1331,12 @@ hns3vf_service_handler(void *param) +@@ -1078,6 +1035,7 @@ static void + hns3vf_request_link_info(struct hns3_hw *hw) + { + struct hns3_vf *vf = HNS3_DEV_HW_TO_VF(hw); ++ struct hns3_vf_to_pf_msg req; + bool send_req; + int ret; + +@@ -1089,8 +1047,8 @@ hns3vf_request_link_info(struct hns3_hw *hw) + if (!send_req) + return; + +- ret = hns3_send_mbx_msg(hw, HNS3_MBX_GET_LINK_STATUS, 0, NULL, 0, false, +- NULL, 0); ++ hns3vf_mbx_setup(&req, HNS3_MBX_GET_LINK_STATUS, 0); ++ ret = hns3vf_mbx_send(hw, &req, false, NULL, 0); + if (ret) { + hns3_err(hw, "failed to fetch link status, ret = %d", ret); + return; +@@ -1134,19 +1092,18 @@ hns3vf_update_link_status(struct hns3_hw *hw, uint8_t link_status, + static int + hns3vf_vlan_filter_configure(struct hns3_adapter *hns, uint16_t vlan_id, int on) + { +-#define HNS3VF_VLAN_MBX_MSG_LEN 5 ++ struct hns3_mbx_vlan_filter *vlan_filter; ++ struct hns3_vf_to_pf_msg req = {0}; + struct hns3_hw *hw = &hns->hw; +- uint8_t msg_data[HNS3VF_VLAN_MBX_MSG_LEN]; +- uint16_t proto = htons(RTE_ETHER_TYPE_VLAN); +- uint8_t is_kill = on ? 0 : 1; + +- msg_data[0] = is_kill; +- memcpy(&msg_data[1], &vlan_id, sizeof(vlan_id)); +- memcpy(&msg_data[3], &proto, sizeof(proto)); ++ req.code = HNS3_MBX_SET_VLAN; ++ req.subcode = HNS3_MBX_VLAN_FILTER; ++ vlan_filter = (struct hns3_mbx_vlan_filter *)req.data; ++ vlan_filter->is_kill = on ? 0 : 1; ++ vlan_filter->proto = rte_cpu_to_le_16(RTE_ETHER_TYPE_VLAN); ++ vlan_filter->vlan_id = rte_cpu_to_le_16(vlan_id); + +- return hns3_send_mbx_msg(hw, HNS3_MBX_SET_VLAN, HNS3_MBX_VLAN_FILTER, +- msg_data, HNS3VF_VLAN_MBX_MSG_LEN, true, NULL, +- 0); ++ return hns3vf_mbx_send(hw, &req, true, NULL, 0); + } + + static int +@@ -1175,6 +1132,7 @@ hns3vf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) + static int + hns3vf_en_vlan_filter(struct hns3_hw *hw, bool enable) + { ++ struct hns3_vf_to_pf_msg req; + uint8_t msg_data; + int ret; + +@@ -1182,9 +1140,10 @@ hns3vf_en_vlan_filter(struct hns3_hw *hw, bool enable) + return 0; + + msg_data = enable ? 1 : 0; +- ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_VLAN, +- HNS3_MBX_ENABLE_VLAN_FILTER, &msg_data, +- sizeof(msg_data), true, NULL, 0); ++ hns3vf_mbx_setup(&req, HNS3_MBX_SET_VLAN, ++ HNS3_MBX_ENABLE_VLAN_FILTER); ++ memcpy(req.data, &msg_data, sizeof(msg_data)); ++ ret = hns3vf_mbx_send(hw, &req, true, NULL, 0); + if (ret) + hns3_err(hw, "%s vlan filter failed, ret = %d.", + enable ? "enable" : "disable", ret); +@@ -1195,12 +1154,15 @@ hns3vf_en_vlan_filter(struct hns3_hw *hw, bool enable) + static int + hns3vf_en_hw_strip_rxvtag(struct hns3_hw *hw, bool enable) + { ++ struct hns3_vf_to_pf_msg req; + uint8_t msg_data; + int ret; + + msg_data = enable ? 1 : 0; +- ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_VLAN, HNS3_MBX_VLAN_RX_OFF_CFG, +- &msg_data, sizeof(msg_data), false, NULL, 0); ++ hns3vf_mbx_setup(&req, HNS3_MBX_SET_VLAN, ++ HNS3_MBX_VLAN_RX_OFF_CFG); ++ memcpy(req.data, &msg_data, sizeof(msg_data)); ++ ret = hns3vf_mbx_send(hw, &req, false, NULL, 0); + if (ret) + hns3_err(hw, "vf %s strip failed, ret = %d.", + enable ? "enable" : "disable", ret); +@@ -1344,11 +1306,13 @@ hns3vf_dev_configure_vlan(struct rte_eth_dev *dev) + static int + hns3vf_set_alive(struct hns3_hw *hw, bool alive) + { ++ struct hns3_vf_to_pf_msg req; + uint8_t msg_data; + + msg_data = alive ? 1 : 0; +- return hns3_send_mbx_msg(hw, HNS3_MBX_SET_ALIVE, 0, &msg_data, +- sizeof(msg_data), false, NULL, 0); ++ hns3vf_mbx_setup(&req, HNS3_MBX_SET_ALIVE, 0); ++ memcpy(req.data, &msg_data, sizeof(msg_data)); ++ return hns3vf_mbx_send(hw, &req, false, NULL, 0); + } + + static void +@@ -1356,11 +1320,12 @@ hns3vf_keep_alive_handler(void *param) + { + struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param; + struct hns3_adapter *hns = eth_dev->data->dev_private; ++ struct hns3_vf_to_pf_msg req; + struct hns3_hw *hw = &hns->hw; + int ret; + +- ret = hns3_send_mbx_msg(hw, HNS3_MBX_KEEP_ALIVE, 0, NULL, 0, +- false, NULL, 0); ++ hns3vf_mbx_setup(&req, HNS3_MBX_KEEP_ALIVE, 0); ++ ret = hns3vf_mbx_send(hw, &req, false, NULL, 0); + if (ret) + hns3_err(hw, "VF sends keeping alive cmd failed(=%d)", + ret); +@@ -1385,10 +1350,12 @@ hns3vf_service_handler(void *param) * Before querying the link status, check whether there is a reset * pending, and if so, abandon the query. */ @@ -44787,7 +50472,22 @@ index 805abd4543..0d1d271f37 100644 rte_eal_alarm_set(HNS3VF_SERVICE_INTERVAL, hns3vf_service_handler, eth_dev); -@@ -1515,6 +1463,10 @@ hns3vf_init_vf(struct rte_eth_dev *eth_dev) +@@ -1497,9 +1464,11 @@ hns3vf_init_hardware(struct hns3_adapter *hns) + static int + hns3vf_clear_vport_list(struct hns3_hw *hw) + { +- return hns3_send_mbx_msg(hw, HNS3_MBX_HANDLE_VF_TBL, +- HNS3_MBX_VPORT_LIST_CLEAR, NULL, 0, false, +- NULL, 0); ++ struct hns3_vf_to_pf_msg req; ++ ++ hns3vf_mbx_setup(&req, HNS3_MBX_HANDLE_VF_TBL, ++ HNS3_MBX_VPORT_LIST_CLEAR); ++ return hns3vf_mbx_send(hw, &req, false, NULL, 0); + } + + static int +@@ -1515,6 +1484,10 @@ hns3vf_init_vf(struct rte_eth_dev *eth_dev) /* Get hardware io base address from pcie BAR2 IO space */ hw->io_base = pci_dev->mem_resource[2].addr; @@ -44798,7 +50498,7 @@ index 805abd4543..0d1d271f37 100644 /* Firmware command queue initialize */ ret = hns3_cmd_init_queue(hw); if (ret) { -@@ -1558,17 +1510,10 @@ hns3vf_init_vf(struct rte_eth_dev *eth_dev) +@@ -1558,17 +1531,10 @@ hns3vf_init_vf(struct rte_eth_dev *eth_dev) goto err_get_config; } @@ -44817,7 +50517,7 @@ index 805abd4543..0d1d271f37 100644 ret = hns3vf_set_tc_queue_mapping(hns, hw->tqps_num, hw->tqps_num); if (ret) { PMD_INIT_LOG(ERR, "failed to set tc info, ret = %d.", ret); -@@ -1596,7 +1541,7 @@ hns3vf_init_vf(struct rte_eth_dev *eth_dev) +@@ -1596,7 +1562,7 @@ hns3vf_init_vf(struct rte_eth_dev *eth_dev) return 0; err_set_tc_queue: @@ -44826,7 +50526,7 @@ index 805abd4543..0d1d271f37 100644 err_get_config: hns3vf_disable_irq0(hw); -@@ -1627,7 +1572,7 @@ hns3vf_uninit_vf(struct rte_eth_dev *eth_dev) +@@ -1627,7 +1593,7 @@ hns3vf_uninit_vf(struct rte_eth_dev *eth_dev) (void)hns3vf_set_alive(hw, false); (void)hns3vf_set_promisc_mode(hw, false, false, false); hns3_flow_uninit(eth_dev); @@ -44835,7 +50535,7 @@ index 805abd4543..0d1d271f37 100644 hns3vf_disable_irq0(hw); rte_intr_disable(pci_dev->intr_handle); hns3_intr_unregister(pci_dev->intr_handle, hns3vf_interrupt_handler, -@@ -1678,12 +1623,7 @@ hns3vf_dev_stop(struct rte_eth_dev *dev) +@@ -1678,12 +1644,7 @@ hns3vf_dev_stop(struct rte_eth_dev *dev) dev->data->dev_started = 0; hw->adapter_state = HNS3_NIC_STOPPING; @@ -44849,7 +50549,7 @@ index 805abd4543..0d1d271f37 100644 rte_spinlock_lock(&hw->lock); if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) { -@@ -1785,16 +1725,12 @@ hns3vf_do_start(struct hns3_adapter *hns, bool reset_queue) +@@ -1785,16 +1746,12 @@ hns3vf_do_start(struct hns3_adapter *hns, bool reset_queue) hns3_enable_rxd_adv_layout(hw); ret = hns3_init_queues(hns, reset_queue); @@ -44870,7 +50570,7 @@ index 805abd4543..0d1d271f37 100644 } static int -@@ -1843,10 +1779,7 @@ hns3vf_dev_start(struct rte_eth_dev *dev) +@@ -1843,10 +1800,7 @@ hns3vf_dev_start(struct rte_eth_dev *dev) rte_spinlock_unlock(&hw->lock); hns3_rx_scattered_calc(dev); @@ -44882,11 +50582,43 @@ index 805abd4543..0d1d271f37 100644 /* Enable interrupt of all rx queues before enabling queues */ hns3_dev_all_rx_queue_intr_enable(hw, true); -@@ -1911,7 +1844,13 @@ hns3vf_is_reset_pending(struct hns3_adapter *hns) +@@ -1891,11 +1845,25 @@ is_vf_reset_done(struct hns3_hw *hw) + return true; + } + ++static enum hns3_reset_level ++hns3vf_detect_reset_event(struct hns3_hw *hw) ++{ ++ enum hns3_reset_level reset = HNS3_NONE_RESET; ++ uint32_t cmdq_stat_reg; ++ ++ cmdq_stat_reg = hns3_read_dev(hw, HNS3_VECTOR0_CMDQ_STAT_REG); ++ if (BIT(HNS3_VECTOR0_RST_INT_B) & cmdq_stat_reg) ++ reset = HNS3_VF_RESET; ++ ++ return reset; ++} ++ + bool + hns3vf_is_reset_pending(struct hns3_adapter *hns) + { ++ enum hns3_reset_level last_req; + struct hns3_hw *hw = &hns->hw; +- enum hns3_reset_level reset; ++ enum hns3_reset_level new_req; + + /* + * According to the protocol of PCIe, FLR to a PF device resets the PF +@@ -1911,20 +1879,32 @@ hns3vf_is_reset_pending(struct hns3_adapter *hns) if (hw->reset.level == HNS3_VF_FULL_RESET) return false; - /* Check the registers to confirm whether there is reset pending */ +- hns3vf_check_event_cause(hns, NULL); +- reset = hns3vf_get_reset_level(hw, &hw->reset.pending); +- if (hw->reset.level != HNS3_NONE_RESET && reset != HNS3_NONE_RESET && +- hw->reset.level < reset) { +- hns3_warn(hw, "High level reset %d is pending", reset); + /* + * Only primary can process can process the reset event, + * so don't check reset event in secondary. @@ -44894,10 +50626,21 @@ index 805abd4543..0d1d271f37 100644 + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return false; + - hns3vf_check_event_cause(hns, NULL); - reset = hns3vf_get_reset_level(hw, &hw->reset.pending); - if (hw->reset.level != HNS3_NONE_RESET && reset != HNS3_NONE_RESET && -@@ -1925,6 +1864,7 @@ hns3vf_is_reset_pending(struct hns3_adapter *hns) ++ new_req = hns3vf_detect_reset_event(hw); ++ if (new_req == HNS3_NONE_RESET) ++ return false; ++ ++ last_req = hns3vf_get_reset_level(hw, &hw->reset.pending); ++ if (last_req == HNS3_NONE_RESET || last_req < new_req) { ++ __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED); ++ hns3_schedule_delayed_reset(hns); ++ hns3_warn(hw, "High level reset detected, delay do reset"); + return true; + } ++ + return false; + } + static int hns3vf_wait_hardware_ready(struct hns3_adapter *hns) { @@ -44905,7 +50648,7 @@ index 805abd4543..0d1d271f37 100644 struct hns3_hw *hw = &hns->hw; struct hns3_wait_data *wait_data = hw->reset.wait_data; struct timeval tv; -@@ -1945,12 +1885,14 @@ hns3vf_wait_hardware_ready(struct hns3_adapter *hns) +@@ -1945,12 +1925,14 @@ hns3vf_wait_hardware_ready(struct hns3_adapter *hns) return 0; wait_data->check_completion = NULL; @@ -44922,7 +50665,23 @@ index 805abd4543..0d1d271f37 100644 return -EAGAIN; } else if (wait_data->result == HNS3_WAIT_TIMEOUT) { hns3_clock_gettime(&tv); -@@ -2006,11 +1948,7 @@ hns3vf_stop_service(struct hns3_adapter *hns) +@@ -1974,12 +1956,13 @@ hns3vf_wait_hardware_ready(struct hns3_adapter *hns) + static int + hns3vf_prepare_reset(struct hns3_adapter *hns) + { ++ struct hns3_vf_to_pf_msg req; + struct hns3_hw *hw = &hns->hw; + int ret; + + if (hw->reset.level == HNS3_VF_FUNC_RESET) { +- ret = hns3_send_mbx_msg(hw, HNS3_MBX_RESET, 0, NULL, +- 0, true, NULL, 0); ++ hns3vf_mbx_setup(&req, HNS3_MBX_RESET, 0); ++ ret = hns3vf_mbx_send(hw, &req, true, NULL, 0); + if (ret) + return ret; + } +@@ -2006,11 +1989,7 @@ hns3vf_stop_service(struct hns3_adapter *hns) } hw->mac.link_status = RTE_ETH_LINK_DOWN; @@ -44935,7 +50694,7 @@ index 805abd4543..0d1d271f37 100644 rte_spinlock_lock(&hw->lock); if (hw->adapter_state == HNS3_NIC_STARTED || -@@ -2040,8 +1978,7 @@ hns3vf_start_service(struct hns3_adapter *hns) +@@ -2040,8 +2019,7 @@ hns3vf_start_service(struct hns3_adapter *hns) struct rte_eth_dev *eth_dev; eth_dev = &rte_eth_devices[hw->data->port_id]; @@ -44945,7 +50704,7 @@ index 805abd4543..0d1d271f37 100644 if (hw->adapter_state == HNS3_NIC_STARTED) { hns3vf_start_poll_job(eth_dev); -@@ -2268,8 +2205,11 @@ hns3vf_reinit_dev(struct hns3_adapter *hns) +@@ -2268,8 +2246,11 @@ hns3vf_reinit_dev(struct hns3_adapter *hns) */ if (pci_dev->kdrv == RTE_PCI_KDRV_IGB_UIO || pci_dev->kdrv == RTE_PCI_KDRV_UIO_GENERIC) { @@ -44958,7 +50717,7 @@ index 805abd4543..0d1d271f37 100644 } rte_intr_enable(pci_dev->intr_handle); -@@ -2472,7 +2412,6 @@ hns3vf_dev_uninit(struct rte_eth_dev *eth_dev) +@@ -2472,7 +2453,6 @@ hns3vf_dev_uninit(struct rte_eth_dev *eth_dev) PMD_INIT_FUNC_TRACE(); if (rte_eal_process_type() != RTE_PROC_PRIMARY) { @@ -46625,10 +52384,34 @@ index 66dc509086..b049774e9a 100644 hw->reset.stage = RESET_STAGE_PREWAIT; hns3_schedule_reset(hns); diff --git a/dpdk/drivers/net/hns3/hns3_mbx.c b/dpdk/drivers/net/hns3/hns3_mbx.c -index b3563d4694..f1743c195e 100644 +index b3563d4694..9cdbc1668a 100644 --- a/dpdk/drivers/net/hns3/hns3_mbx.c +++ b/dpdk/drivers/net/hns3/hns3_mbx.c -@@ -40,23 +40,6 @@ hns3_resp_to_errno(uint16_t resp_code) +@@ -11,8 +11,6 @@ + #include "hns3_intr.h" + #include "hns3_rxtx.h" + +-#define HNS3_CMD_CODE_OFFSET 2 +- + static const struct errno_respcode_map err_code_map[] = { + {0, 0}, + {1, -EPERM}, +@@ -26,6 +24,14 @@ static const struct errno_respcode_map err_code_map[] = { + {95, -EOPNOTSUPP}, + }; + ++void ++hns3vf_mbx_setup(struct hns3_vf_to_pf_msg *req, uint8_t code, uint8_t subcode) ++{ ++ memset(req, 0, sizeof(struct hns3_vf_to_pf_msg)); ++ req->code = code; ++ req->subcode = subcode; ++} ++ + static int + hns3_resp_to_errno(uint16_t resp_code) + { +@@ -40,23 +46,6 @@ hns3_resp_to_errno(uint16_t resp_code) return -EIO; } @@ -46652,7 +52435,7 @@ index b3563d4694..f1743c195e 100644 static int hns3_get_mbx_resp(struct hns3_hw *hw, uint16_t code, uint16_t subcode, uint8_t *resp_data, uint16_t resp_len) -@@ -67,7 +50,6 @@ hns3_get_mbx_resp(struct hns3_hw *hw, uint16_t code, uint16_t subcode, +@@ -67,7 +56,6 @@ hns3_get_mbx_resp(struct hns3_hw *hw, uint16_t code, uint16_t subcode, struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); struct hns3_mbx_resp_status *mbx_resp; uint32_t wait_time = 0; @@ -46660,7 +52443,7 @@ index b3563d4694..f1743c195e 100644 if (resp_len > HNS3_MBX_MAX_RESP_DATA_SIZE) { hns3_err(hw, "VF mbx response len(=%u) exceeds maximum(=%d)", -@@ -78,14 +60,14 @@ hns3_get_mbx_resp(struct hns3_hw *hw, uint16_t code, uint16_t subcode, +@@ -78,35 +66,29 @@ hns3_get_mbx_resp(struct hns3_hw *hw, uint16_t code, uint16_t subcode, mbx_time_limit = (uint32_t)hns->mbx_time_limit_ms * US_PER_MS; while (wait_time < mbx_time_limit) { if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED)) { @@ -46677,8 +52460,9 @@ index b3563d4694..f1743c195e 100644 "reset pending"); return -EIO; } -@@ -93,20 +75,14 @@ hns3_get_mbx_resp(struct hns3_hw *hw, uint16_t code, uint16_t subcode, - hns3_dev_handle_mbx_msg(hw); + +- hns3_dev_handle_mbx_msg(hw); ++ hns3vf_handle_mbx_msg(hw); rte_delay_us(HNS3_WAIT_RESP_US); - if (hw->mbx_resp.matching_scheme == @@ -46700,7 +52484,7 @@ index b3563d4694..f1743c195e 100644 return -ETIME; } rte_io_rmb(); -@@ -132,7 +108,6 @@ hns3_mbx_prepare_resp(struct hns3_hw *hw, uint16_t code, uint16_t subcode) +@@ -132,7 +114,6 @@ hns3_mbx_prepare_resp(struct hns3_hw *hw, uint16_t code, uint16_t subcode) * we get the exact scheme which is used. */ hw->mbx_resp.req_msg_data = (uint32_t)code << 16 | subcode; @@ -46708,15 +52492,115 @@ index b3563d4694..f1743c195e 100644 /* Update match_id and ensure the value of match_id is not zero */ hw->mbx_resp.match_id++; -@@ -185,7 +160,6 @@ hns3_send_mbx_msg(struct hns3_hw *hw, uint16_t code, uint16_t subcode, - req->match_id = hw->mbx_resp.match_id; +@@ -145,54 +126,34 @@ hns3_mbx_prepare_resp(struct hns3_hw *hw, uint16_t code, uint16_t subcode) + } + + int +-hns3_send_mbx_msg(struct hns3_hw *hw, uint16_t code, uint16_t subcode, +- const uint8_t *msg_data, uint8_t msg_len, bool need_resp, +- uint8_t *resp_data, uint16_t resp_len) ++hns3vf_mbx_send(struct hns3_hw *hw, ++ struct hns3_vf_to_pf_msg *req, bool need_resp, ++ uint8_t *resp_data, uint16_t resp_len) + { +- struct hns3_mbx_vf_to_pf_cmd *req; ++ struct hns3_mbx_vf_to_pf_cmd *cmd; + struct hns3_cmd_desc desc; +- bool is_ring_vector_msg; +- int offset; + int ret; + +- req = (struct hns3_mbx_vf_to_pf_cmd *)desc.data; +- +- /* first two bytes are reserved for code & subcode */ +- if (msg_len > (HNS3_MBX_MAX_MSG_SIZE - HNS3_CMD_CODE_OFFSET)) { +- hns3_err(hw, +- "VF send mbx msg fail, msg len %u exceeds max payload len %d", +- msg_len, HNS3_MBX_MAX_MSG_SIZE - HNS3_CMD_CODE_OFFSET); +- return -EINVAL; +- } +- + hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MBX_VF_TO_PF, false); +- req->msg[0] = code; +- is_ring_vector_msg = (code == HNS3_MBX_MAP_RING_TO_VECTOR) || +- (code == HNS3_MBX_UNMAP_RING_TO_VECTOR) || +- (code == HNS3_MBX_GET_RING_VECTOR_MAP); +- if (!is_ring_vector_msg) +- req->msg[1] = subcode; +- if (msg_data) { +- offset = is_ring_vector_msg ? 1 : HNS3_CMD_CODE_OFFSET; +- memcpy(&req->msg[offset], msg_data, msg_len); +- } ++ cmd = (struct hns3_mbx_vf_to_pf_cmd *)desc.data; ++ cmd->msg = *req; + + /* synchronous send */ + if (need_resp) { +- req->mbx_need_resp |= HNS3_MBX_NEED_RESP_BIT; ++ cmd->mbx_need_resp |= HNS3_MBX_NEED_RESP_BIT; + rte_spinlock_lock(&hw->mbx_resp.lock); +- hns3_mbx_prepare_resp(hw, code, subcode); +- req->match_id = hw->mbx_resp.match_id; ++ hns3_mbx_prepare_resp(hw, req->code, req->subcode); ++ cmd->match_id = hw->mbx_resp.match_id; ret = hns3_cmd_send(hw, &desc, 1); if (ret) { - hw->mbx_resp.head--; rte_spinlock_unlock(&hw->mbx_resp.lock); hns3_err(hw, "VF failed(=%d) to send mbx message to PF", ret); -@@ -254,41 +228,10 @@ hns3_handle_asserting_reset(struct hns3_hw *hw, + return ret; + } + +- ret = hns3_get_mbx_resp(hw, code, subcode, resp_data, resp_len); ++ ret = hns3_get_mbx_resp(hw, req->code, req->subcode, ++ resp_data, resp_len); + rte_spinlock_unlock(&hw->mbx_resp.lock); + } else { + /* asynchronous send */ +@@ -219,17 +180,17 @@ static void + hns3vf_handle_link_change_event(struct hns3_hw *hw, + struct hns3_mbx_pf_to_vf_cmd *req) + { ++ struct hns3_mbx_link_status *link_info = ++ (struct hns3_mbx_link_status *)req->msg.msg_data; + uint8_t link_status, link_duplex; +- uint16_t *msg_q = req->msg; + uint8_t support_push_lsc; + uint32_t link_speed; + +- memcpy(&link_speed, &msg_q[2], sizeof(link_speed)); +- link_status = rte_le_to_cpu_16(msg_q[1]); +- link_duplex = (uint8_t)rte_le_to_cpu_16(msg_q[4]); +- hns3vf_update_link_status(hw, link_status, link_speed, +- link_duplex); +- support_push_lsc = (*(uint8_t *)&msg_q[5]) & 1u; ++ link_status = (uint8_t)rte_le_to_cpu_16(link_info->link_status); ++ link_speed = rte_le_to_cpu_32(link_info->speed); ++ link_duplex = (uint8_t)rte_le_to_cpu_16(link_info->duplex); ++ hns3vf_update_link_status(hw, link_status, link_speed, link_duplex); ++ support_push_lsc = (link_info->flag) & 1u; + hns3vf_update_push_lsc_cap(hw, support_push_lsc); + } + +@@ -238,7 +199,6 @@ hns3_handle_asserting_reset(struct hns3_hw *hw, + struct hns3_mbx_pf_to_vf_cmd *req) + { + enum hns3_reset_level reset_level; +- uint16_t *msg_q = req->msg; + + /* + * PF has asserted reset hence VF should go in pending +@@ -246,7 +206,7 @@ hns3_handle_asserting_reset(struct hns3_hw *hw, + * has been completely reset. After this stack should + * eventually be re-initialized. + */ +- reset_level = rte_le_to_cpu_16(msg_q[1]); ++ reset_level = rte_le_to_cpu_16(req->msg.reset_level); + hns3_atomic_set_bit(reset_level, &hw->reset.pending); + + hns3_warn(hw, "PF inform reset level %d", reset_level); +@@ -254,41 +214,10 @@ hns3_handle_asserting_reset(struct hns3_hw *hw, hns3_schedule_reset(HNS3_DEV_HW_TO_ADAPTER(hw)); } @@ -46759,7 +52643,7 @@ index b3563d4694..f1743c195e 100644 struct hns3_mbx_resp_status *resp = &hw->mbx_resp; uint32_t msg_data; -@@ -298,12 +241,6 @@ hns3_handle_mbx_response(struct hns3_hw *hw, struct hns3_mbx_pf_to_vf_cmd *req) +@@ -298,15 +227,10 @@ hns3_handle_mbx_response(struct hns3_hw *hw, struct hns3_mbx_pf_to_vf_cmd *req) * match_id to its response. So VF could use the match_id * to match the request. */ @@ -46770,13 +52654,22 @@ index b3563d4694..f1743c195e 100644 - hns3_info(hw, "detect mailbox support match id!"); - } if (req->match_id == resp->match_id) { - resp->resp_status = hns3_resp_to_errno(req->msg[3]); - memcpy(resp->additional_info, &req->msg[4], -@@ -319,11 +256,19 @@ hns3_handle_mbx_response(struct hns3_hw *hw, struct hns3_mbx_pf_to_vf_cmd *req) +- resp->resp_status = hns3_resp_to_errno(req->msg[3]); +- memcpy(resp->additional_info, &req->msg[4], ++ resp->resp_status = ++ hns3_resp_to_errno(req->msg.resp_status); ++ memcpy(resp->additional_info, &req->msg.resp_data, + HNS3_MBX_MAX_RESP_DATA_SIZE); + rte_io_wmb(); + resp->received_match_resp = true; +@@ -319,11 +243,20 @@ hns3_handle_mbx_response(struct hns3_hw *hw, struct hns3_mbx_pf_to_vf_cmd *req) * support copy request's match_id to its response. So VF follows the * original scheme to process. */ -+ msg_data = (uint32_t)req->msg[1] << HNS3_MBX_RESP_CODE_OFFSET | req->msg[2]; +- resp->resp_status = hns3_resp_to_errno(req->msg[3]); +- memcpy(resp->additional_info, &req->msg[4], ++ msg_data = (uint32_t)req->msg.vf_mbx_msg_code << ++ HNS3_MBX_RESP_CODE_OFFSET | req->msg.vf_mbx_msg_subcode; + if (resp->req_msg_data != msg_data) { + hns3_warn(hw, + "received response tag (%u) is mismatched with requested tag (%u)", @@ -46784,8 +52677,8 @@ index b3563d4694..f1743c195e 100644 + return; + } + - resp->resp_status = hns3_resp_to_errno(req->msg[3]); - memcpy(resp->additional_info, &req->msg[4], ++ resp->resp_status = hns3_resp_to_errno(req->msg.resp_status); ++ memcpy(resp->additional_info, &req->msg.resp_data, HNS3_MBX_MAX_RESP_DATA_SIZE); - msg_data = (uint32_t)req->msg[1] << 16 | req->msg[2]; - hns3_update_resp_position(hw, msg_data); @@ -46794,7 +52687,40 @@ index b3563d4694..f1743c195e 100644 } static void -@@ -429,15 +374,17 @@ hns3_handle_mbx_msg_out_intr(struct hns3_hw *hw) +@@ -351,11 +284,8 @@ static void + hns3pf_handle_link_change_event(struct hns3_hw *hw, + struct hns3_mbx_vf_to_pf_cmd *req) + { +-#define LINK_STATUS_OFFSET 1 +-#define LINK_FAIL_CODE_OFFSET 2 +- +- if (!req->msg[LINK_STATUS_OFFSET]) +- hns3_link_fail_parse(hw, req->msg[LINK_FAIL_CODE_OFFSET]); ++ if (!req->msg.link_status) ++ hns3_link_fail_parse(hw, req->msg.link_fail_code); + + hns3_update_linkstatus_and_event(hw, true); + } +@@ -364,8 +294,7 @@ static void + hns3_update_port_base_vlan_info(struct hns3_hw *hw, + struct hns3_mbx_pf_to_vf_cmd *req) + { +-#define PVID_STATE_OFFSET 1 +- uint16_t new_pvid_state = req->msg[PVID_STATE_OFFSET] ? ++ uint16_t new_pvid_state = req->msg.pvid_state ? + HNS3_PORT_BASE_VLAN_ENABLE : HNS3_PORT_BASE_VLAN_DISABLE; + /* + * Currently, hardware doesn't support more than two layers VLAN offload +@@ -414,7 +343,7 @@ hns3_handle_mbx_msg_out_intr(struct hns3_hw *hw) + while (next_to_use != tail) { + desc = &crq->desc[next_to_use]; + req = (struct hns3_mbx_pf_to_vf_cmd *)desc->data; +- opcode = req->msg[0] & 0xff; ++ opcode = req->msg.code & 0xff; + + flag = rte_le_to_cpu_16(crq->desc[next_to_use].flag); + if (!hns3_get_bit(flag, HNS3_CMDQ_RX_OUTVLD_B)) +@@ -429,21 +358,71 @@ hns3_handle_mbx_msg_out_intr(struct hns3_hw *hw) * Clear opcode to inform intr thread don't process * again. */ @@ -46807,16 +52733,125 @@ index b3563d4694..f1743c195e 100644 } - crq->next_to_use = next_to_use; -- hns3_write_dev(hw, HNS3_CMDQ_RX_HEAD_REG, crq->next_to_use); + /* + * Note: the crq->next_to_use field should not updated, otherwise, + * mailbox messages may be discarded. + */ ++} ++ ++void ++hns3pf_handle_mbx_msg(struct hns3_hw *hw) ++{ ++ struct hns3_cmq_ring *crq = &hw->cmq.crq; ++ struct hns3_mbx_vf_to_pf_cmd *req; ++ struct hns3_cmd_desc *desc; ++ uint16_t flag; ++ ++ rte_spinlock_lock(&hw->cmq.crq.lock); ++ ++ while (!hns3_cmd_crq_empty(hw)) { ++ if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED)) { ++ rte_spinlock_unlock(&hw->cmq.crq.lock); ++ return; ++ } ++ desc = &crq->desc[crq->next_to_use]; ++ req = (struct hns3_mbx_vf_to_pf_cmd *)desc->data; ++ ++ flag = rte_le_to_cpu_16(crq->desc[crq->next_to_use].flag); ++ if (unlikely(!hns3_get_bit(flag, HNS3_CMDQ_RX_OUTVLD_B))) { ++ hns3_warn(hw, ++ "dropped invalid mailbox message, code = %u", ++ req->msg.code); ++ ++ /* dropping/not processing this invalid message */ ++ crq->desc[crq->next_to_use].flag = 0; ++ hns3_mbx_ring_ptr_move_crq(crq); ++ continue; ++ } ++ ++ switch (req->msg.code) { ++ case HNS3_MBX_PUSH_LINK_STATUS: ++ hns3pf_handle_link_change_event(hw, req); ++ break; ++ default: ++ hns3_err(hw, "received unsupported(%u) mbx msg", ++ req->msg.code); ++ break; ++ } ++ crq->desc[crq->next_to_use].flag = 0; ++ hns3_mbx_ring_ptr_move_crq(crq); ++ } ++ ++ /* Write back CMDQ_RQ header pointer, IMP need this pointer */ + hns3_write_dev(hw, HNS3_CMDQ_RX_HEAD_REG, crq->next_to_use); ++ ++ rte_spinlock_unlock(&hw->cmq.crq.lock); } void +-hns3_dev_handle_mbx_msg(struct hns3_hw *hw) ++hns3vf_handle_mbx_msg(struct hns3_hw *hw) + { +- struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); + struct hns3_cmq_ring *crq = &hw->cmq.crq; + struct hns3_mbx_pf_to_vf_cmd *req; + struct hns3_cmd_desc *desc; +@@ -454,7 +433,7 @@ hns3_dev_handle_mbx_msg(struct hns3_hw *hw) + rte_spinlock_lock(&hw->cmq.crq.lock); + + handle_out = (rte_eal_process_type() != RTE_PROC_PRIMARY || +- !rte_thread_is_intr()) && hns->is_vf; ++ !rte_thread_is_intr()); + if (handle_out) { + /* + * Currently, any threads in the primary and secondary processes +@@ -485,7 +464,7 @@ hns3_dev_handle_mbx_msg(struct hns3_hw *hw) + + desc = &crq->desc[crq->next_to_use]; + req = (struct hns3_mbx_pf_to_vf_cmd *)desc->data; +- opcode = req->msg[0] & 0xff; ++ opcode = req->msg.code & 0xff; + + flag = rte_le_to_cpu_16(crq->desc[crq->next_to_use].flag); + if (unlikely(!hns3_get_bit(flag, HNS3_CMDQ_RX_OUTVLD_B))) { +@@ -499,8 +478,7 @@ hns3_dev_handle_mbx_msg(struct hns3_hw *hw) + continue; + } + +- handle_out = hns->is_vf && desc->opcode == 0; +- if (handle_out) { ++ if (desc->opcode == 0) { + /* Message already processed by other thread */ + crq->desc[crq->next_to_use].flag = 0; + hns3_mbx_ring_ptr_move_crq(crq); +@@ -517,16 +495,6 @@ hns3_dev_handle_mbx_msg(struct hns3_hw *hw) + case HNS3_MBX_ASSERTING_RESET: + hns3_handle_asserting_reset(hw, req); + break; +- case HNS3_MBX_PUSH_LINK_STATUS: +- /* +- * This message is reported by the firmware and is +- * reported in 'struct hns3_mbx_vf_to_pf_cmd' format. +- * Therefore, we should cast the req variable to +- * 'struct hns3_mbx_vf_to_pf_cmd' and then process it. +- */ +- hns3pf_handle_link_change_event(hw, +- (struct hns3_mbx_vf_to_pf_cmd *)req); +- break; + case HNS3_MBX_PUSH_VLAN_INFO: + /* + * When the PVID configuration status of VF device is +@@ -541,7 +509,7 @@ hns3_dev_handle_mbx_msg(struct hns3_hw *hw) + * hns3 PF kernel driver, VF driver will receive this + * mailbox message from PF driver. + */ +- hns3_handle_promisc_info(hw, req->msg[1]); ++ hns3_handle_promisc_info(hw, req->msg.promisc_en); + break; + default: + hns3_err(hw, "received unsupported(%u) mbx msg", diff --git a/dpdk/drivers/net/hns3/hns3_mbx.h b/dpdk/drivers/net/hns3/hns3_mbx.h -index d637bd2b23..1d9a788df5 100644 +index d637bd2b23..16c0de43ee 100644 --- a/dpdk/drivers/net/hns3/hns3_mbx.h +++ b/dpdk/drivers/net/hns3/hns3_mbx.h @@ -22,7 +22,7 @@ enum HNS3_MBX_OPCODE { @@ -46828,7 +52863,11 @@ index d637bd2b23..1d9a788df5 100644 HNS3_MBX_GET_BDNUM, /* (VF -> PF) get BD num */ HNS3_MBX_GET_BUFSIZE, /* (VF -> PF) get buffer size */ HNS3_MBX_GET_STREAMID, /* (VF -> PF) get stream id */ -@@ -89,21 +89,11 @@ enum hns3_mbx_link_fail_subcode { +@@ -85,25 +85,14 @@ enum hns3_mbx_link_fail_subcode { + HNS3_MBX_LF_XSFP_ABSENT, + }; + +-#define HNS3_MBX_MAX_MSG_SIZE 16 #define HNS3_MBX_MAX_RESP_DATA_SIZE 8 #define HNS3_MBX_DEF_TIME_LIMIT_MS 500 @@ -46850,6 +52889,123 @@ index d637bd2b23..1d9a788df5 100644 /* The following fields used in the matching scheme for match_id */ uint16_t match_id; +@@ -113,6 +102,69 @@ struct hns3_mbx_resp_status { + uint8_t additional_info[HNS3_MBX_MAX_RESP_DATA_SIZE]; + }; + ++struct hns3_ring_chain_param { ++ uint8_t ring_type; ++ uint8_t tqp_index; ++ uint8_t int_gl_index; ++}; ++ ++struct hns3_mbx_vlan_filter { ++ uint8_t is_kill; ++ uint16_t vlan_id; ++ uint16_t proto; ++} __rte_packed; ++ ++struct hns3_mbx_link_status { ++ uint16_t link_status; ++ uint32_t speed; ++ uint16_t duplex; ++ uint8_t flag; ++} __rte_packed; ++ ++#define HNS3_MBX_MSG_MAX_DATA_SIZE 14 ++#define HNS3_MBX_MAX_RING_CHAIN_PARAM_NUM 4 ++struct hns3_vf_to_pf_msg { ++ uint8_t code; ++ union { ++ struct { ++ uint8_t subcode; ++ uint8_t data[HNS3_MBX_MSG_MAX_DATA_SIZE]; ++ }; ++ struct { ++ uint8_t en_bc; ++ uint8_t en_uc; ++ uint8_t en_mc; ++ uint8_t en_limit_promisc; ++ }; ++ struct { ++ uint8_t vector_id; ++ uint8_t ring_num; ++ struct hns3_ring_chain_param ++ ring_param[HNS3_MBX_MAX_RING_CHAIN_PARAM_NUM]; ++ }; ++ struct { ++ uint8_t link_status; ++ uint8_t link_fail_code; ++ }; ++ }; ++}; ++ ++struct hns3_pf_to_vf_msg { ++ uint16_t code; ++ union { ++ struct { ++ uint16_t vf_mbx_msg_code; ++ uint16_t vf_mbx_msg_subcode; ++ uint16_t resp_status; ++ uint8_t resp_data[HNS3_MBX_MAX_RESP_DATA_SIZE]; ++ }; ++ uint16_t promisc_en; ++ uint16_t reset_level; ++ uint16_t pvid_state; ++ uint8_t msg_data[HNS3_MBX_MSG_MAX_DATA_SIZE]; ++ }; ++}; ++ + struct errno_respcode_map { + uint16_t resp_code; + int err_no; +@@ -128,7 +180,7 @@ struct hns3_mbx_vf_to_pf_cmd { + uint8_t msg_len; + uint8_t rsv2; + uint16_t match_id; +- uint8_t msg[HNS3_MBX_MAX_MSG_SIZE]; ++ struct hns3_vf_to_pf_msg msg; + }; + + struct hns3_mbx_pf_to_vf_cmd { +@@ -137,20 +189,7 @@ struct hns3_mbx_pf_to_vf_cmd { + uint8_t msg_len; + uint8_t rsv1; + uint16_t match_id; +- uint16_t msg[8]; +-}; +- +-struct hns3_ring_chain_param { +- uint8_t ring_type; +- uint8_t tqp_index; +- uint8_t int_gl_index; +-}; +- +-#define HNS3_MBX_MAX_RING_CHAIN_PARAM_NUM 4 +-struct hns3_vf_bind_vector_msg { +- uint8_t vector_id; +- uint8_t ring_num; +- struct hns3_ring_chain_param param[HNS3_MBX_MAX_RING_CHAIN_PARAM_NUM]; ++ struct hns3_pf_to_vf_msg msg; + }; + + struct hns3_pf_rst_done_cmd { +@@ -164,8 +203,11 @@ struct hns3_pf_rst_done_cmd { + ((crq)->next_to_use = ((crq)->next_to_use + 1) % (crq)->desc_num) + + struct hns3_hw; +-void hns3_dev_handle_mbx_msg(struct hns3_hw *hw); +-int hns3_send_mbx_msg(struct hns3_hw *hw, uint16_t code, uint16_t subcode, +- const uint8_t *msg_data, uint8_t msg_len, bool need_resp, +- uint8_t *resp_data, uint16_t resp_len); ++void hns3pf_handle_mbx_msg(struct hns3_hw *hw); ++void hns3vf_handle_mbx_msg(struct hns3_hw *hw); ++void hns3vf_mbx_setup(struct hns3_vf_to_pf_msg *req, ++ uint8_t code, uint8_t subcode); ++int hns3vf_mbx_send(struct hns3_hw *hw, ++ struct hns3_vf_to_pf_msg *req_msg, bool need_resp, ++ uint8_t *resp_data, uint16_t resp_len); + #endif /* _HNS3_MBX_H_ */ diff --git a/dpdk/drivers/net/hns3/hns3_mp.c b/dpdk/drivers/net/hns3/hns3_mp.c index 999b407f7d..c3005b943f 100644 --- a/dpdk/drivers/net/hns3/hns3_mp.c @@ -47120,7 +53276,7 @@ index 5812eb39db..d5f9d0ae9f 100644 #define HNS3_GL_USEC_TO_REG(gl_usec) ((gl_usec) >> 1) /* rl_usec convert to hardware count, as writing each 1 represents 4us */ diff --git a/dpdk/drivers/net/hns3/hns3_rss.c b/dpdk/drivers/net/hns3/hns3_rss.c -index 3a4b699ae2..b587954508 100644 +index 3a4b699ae2..3fce50519f 100644 --- a/dpdk/drivers/net/hns3/hns3_rss.c +++ b/dpdk/drivers/net/hns3/hns3_rss.c @@ -9,10 +9,8 @@ @@ -47136,7 +53292,7 @@ index 3a4b699ae2..b587954508 100644 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0, 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4, -@@ -20,215 +18,301 @@ static const uint8_t hns3_hash_key[] = { +@@ -20,215 +18,299 @@ static const uint8_t hns3_hash_key[] = { 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA }; @@ -47343,8 +53499,7 @@ index 3a4b699ae2..b587954508 100644 + BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_IP_S) | + BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_IP_D) | + BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_SCTP_S) | -+ BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_SCTP_D) | -+ BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_SCTP_VER), ++ BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_SCTP_D), + HNS3_RSS_TUPLE_IPV4_SCTP_M }, + + /* IPV6-FRAG */ @@ -47527,8 +53682,7 @@ index 3a4b699ae2..b587954508 100644 - BIT_ULL(HNS3_RSS_FIELD_IPV6_NONFRAG_IP_S) | - BIT_ULL(HNS3_RSS_FIELD_IPV6_NONFRAG_IP_D) } + BIT_ULL(HNS3_RSS_FIELD_IPV6_SCTP_EN_SCTP_D) | -+ BIT_ULL(HNS3_RSS_FIELD_IPV6_SCTP_EN_SCTP_S) | -+ BIT_ULL(HNS3_RSS_FIELD_IPV6_SCTP_EN_SCTP_VER), ++ BIT_ULL(HNS3_RSS_FIELD_IPV6_SCTP_EN_SCTP_S), + HNS3_RSS_TUPLE_IPV6_SCTP_M }, }; @@ -47594,7 +53748,7 @@ index 3a4b699ae2..b587954508 100644 ret = hns3_cmd_send(hw, &desc, 1); if (ret) { -@@ -236,34 +320,50 @@ hns3_rss_set_algo_key(struct hns3_hw *hw, const uint8_t *key) +@@ -236,34 +318,50 @@ hns3_rss_set_algo_key(struct hns3_hw *hw, const uint8_t *key) return ret; } } @@ -47629,7 +53783,8 @@ index 3a4b699ae2..b587954508 100644 + for (idx = 0; idx < max_bd_num; idx++) { + hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RSS_GENERIC_CONFIG, + true); -+ + +- req = (struct hns3_rss_input_tuple_cmd *)desc_tuple.data; + req->hash_config |= (idx << HNS3_RSS_HASH_KEY_OFFSET_B); + ret = hns3_cmd_send(hw, &desc, 1); + if (ret) { @@ -47638,31 +53793,30 @@ index 3a4b699ae2..b587954508 100644 + return ret; + } -- req = (struct hns3_rss_input_tuple_cmd *)desc_tuple.data; +- req->tuple_field = +- rte_cpu_to_le_64(rss_config->rss_tuple_sets.rss_tuple_fields); + if (idx == 0) + *hash_algo = req->hash_config & HNS3_RSS_HASH_ALGO_MASK; -- req->tuple_field = -- rte_cpu_to_le_64(rss_config->rss_tuple_sets.rss_tuple_fields); +- ret = hns3_cmd_send(hw, &desc_tuple, 1); +- if (ret) +- hns3_err(hw, "Configure RSS input tuple mode failed %d", ret); + if (idx == max_bd_num - 1 && + (key_len % HNS3_RSS_HASH_KEY_NUM) != 0) + cur_key_size = key_len % HNS3_RSS_HASH_KEY_NUM; + else + cur_key_size = HNS3_RSS_HASH_KEY_NUM; -- ret = hns3_cmd_send(hw, &desc_tuple, 1); -- if (ret) -- hns3_err(hw, "Configure RSS input tuple mode failed %d", ret); +- return ret; + cur_key = key + idx * HNS3_RSS_HASH_KEY_NUM; + memcpy(cur_key, req->hash_key, cur_key_size); + } - -- return ret; ++ + return 0; } /* -@@ -274,6 +374,7 @@ int +@@ -274,6 +372,7 @@ int hns3_set_rss_indir_table(struct hns3_hw *hw, uint16_t *indir, uint16_t size) { struct hns3_rss_indirection_table_cmd *req; @@ -47670,7 +53824,7 @@ index 3a4b699ae2..b587954508 100644 struct hns3_cmd_desc desc; uint8_t qid_msb_off; uint8_t qid_msb_val; -@@ -282,14 +383,20 @@ hns3_set_rss_indir_table(struct hns3_hw *hw, uint16_t *indir, uint16_t size) +@@ -282,14 +381,20 @@ hns3_set_rss_indir_table(struct hns3_hw *hw, uint16_t *indir, uint16_t size) int ret; req = (struct hns3_rss_indirection_table_cmd *)desc.data; @@ -47694,7 +53848,7 @@ index 3a4b699ae2..b587954508 100644 q_id = indir[i * HNS3_RSS_CFG_TBL_SIZE + j]; req->rss_result_l[j] = q_id & 0xff; -@@ -310,9 +417,53 @@ hns3_set_rss_indir_table(struct hns3_hw *hw, uint16_t *indir, uint16_t size) +@@ -310,9 +415,53 @@ hns3_set_rss_indir_table(struct hns3_hw *hw, uint16_t *indir, uint16_t size) } } @@ -47751,7 +53905,7 @@ index 3a4b699ae2..b587954508 100644 return 0; } -@@ -331,63 +482,134 @@ hns3_rss_reset_indir_table(struct hns3_hw *hw) +@@ -331,63 +480,134 @@ hns3_rss_reset_indir_table(struct hns3_hw *hw) } ret = hns3_set_rss_indir_table(hw, lut, hw->rss_ind_tbl_size); @@ -47926,7 +54080,7 @@ index 3a4b699ae2..b587954508 100644 } /* -@@ -403,59 +625,137 @@ int +@@ -403,59 +623,137 @@ int hns3_dev_rss_hash_update(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf) { @@ -48096,7 +54250,7 @@ index 3a4b699ae2..b587954508 100644 /* * Get rss key and rss_hf types set of RSS hash configuration. * @param dev -@@ -471,19 +771,32 @@ hns3_dev_rss_hash_conf_get(struct rte_eth_dev *dev, +@@ -471,19 +769,32 @@ hns3_dev_rss_hash_conf_get(struct rte_eth_dev *dev, { struct hns3_adapter *hns = dev->data->dev_private; struct hns3_hw *hw = &hns->hw; @@ -48135,7 +54289,7 @@ index 3a4b699ae2..b587954508 100644 } /* -@@ -523,12 +836,12 @@ hns3_dev_rss_reta_update(struct rte_eth_dev *dev, +@@ -523,12 +834,12 @@ hns3_dev_rss_reta_update(struct rte_eth_dev *dev, idx = i / RTE_ETH_RETA_GROUP_SIZE; shift = i % RTE_ETH_RETA_GROUP_SIZE; if (reta_conf[idx].reta[shift] >= hw->alloc_rss_size) { @@ -48150,7 +54304,7 @@ index 3a4b699ae2..b587954508 100644 } if (reta_conf[idx].mask & (1ULL << shift)) -@@ -537,7 +850,13 @@ hns3_dev_rss_reta_update(struct rte_eth_dev *dev, +@@ -537,7 +848,13 @@ hns3_dev_rss_reta_update(struct rte_eth_dev *dev, ret = hns3_set_rss_indir_table(hw, indirection_tbl, hw->rss_ind_tbl_size); @@ -48164,7 +54318,7 @@ index 3a4b699ae2..b587954508 100644 rte_spinlock_unlock(&hw->lock); return ret; } -@@ -559,10 +878,11 @@ hns3_dev_rss_reta_query(struct rte_eth_dev *dev, +@@ -559,10 +876,11 @@ hns3_dev_rss_reta_query(struct rte_eth_dev *dev, uint16_t reta_size) { struct hns3_adapter *hns = dev->data->dev_private; @@ -48177,7 +54331,7 @@ index 3a4b699ae2..b587954508 100644 if (reta_size != hw->rss_ind_tbl_size) { hns3_err(hw, "The size of hash lookup table configured (%u)" -@@ -571,44 +891,78 @@ hns3_dev_rss_reta_query(struct rte_eth_dev *dev, +@@ -571,44 +889,78 @@ hns3_dev_rss_reta_query(struct rte_eth_dev *dev, return -EINVAL; } rte_spinlock_lock(&hw->lock); @@ -48274,7 +54428,7 @@ index 3a4b699ae2..b587954508 100644 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RSS_TC_MODE, false); for (i = 0; i < HNS3_MAX_TC_NUM; i++) { uint16_t mode = 0; -@@ -630,6 +984,52 @@ hns3_set_rss_tc_mode(struct hns3_hw *hw) +@@ -630,6 +982,52 @@ hns3_set_rss_tc_mode(struct hns3_hw *hw) return ret; } @@ -48327,7 +54481,7 @@ index 3a4b699ae2..b587954508 100644 static void hns3_rss_tuple_uninit(struct hns3_hw *hw) { -@@ -656,10 +1056,11 @@ hns3_rss_set_default_args(struct hns3_hw *hw) +@@ -656,10 +1054,11 @@ hns3_rss_set_default_args(struct hns3_hw *hw) int i; /* Default hash algorithm */ @@ -48342,7 +54496,7 @@ index 3a4b699ae2..b587954508 100644 /* Initialize RSS indirection table */ for (i = 0; i < hw->rss_ind_tbl_size; i++) -@@ -675,67 +1076,42 @@ hns3_config_rss(struct hns3_adapter *hns) +@@ -675,67 +1074,42 @@ hns3_config_rss(struct hns3_adapter *hns) struct hns3_hw *hw = &hns->hw; struct hns3_rss_conf *rss_cfg = &hw->rss_info; uint8_t *hash_key = rss_cfg->key; @@ -48432,7 +54586,7 @@ index 3a4b699ae2..b587954508 100644 } /* -@@ -753,5 +1129,5 @@ hns3_rss_uninit(struct hns3_adapter *hns) +@@ -753,5 +1127,5 @@ hns3_rss_uninit(struct hns3_adapter *hns) return; /* Disable RSS */ @@ -48440,10 +54594,10 @@ index 3a4b699ae2..b587954508 100644 + hw->rss_info.rss_hf = 0; } diff --git a/dpdk/drivers/net/hns3/hns3_rss.h b/dpdk/drivers/net/hns3/hns3_rss.h -index 6f153a1b7b..5c0f0b75f0 100644 +index 6f153a1b7b..94c667dad6 100644 --- a/dpdk/drivers/net/hns3/hns3_rss.h +++ b/dpdk/drivers/net/hns3/hns3_rss.h -@@ -7,25 +7,107 @@ +@@ -7,25 +7,105 @@ #include #include @@ -48503,7 +54657,6 @@ index 6f153a1b7b..5c0f0b75f0 100644 + HNS3_RSS_FIELD_IPV4_SCTP_EN_SCTP_S, + HNS3_RSS_FIELD_IPV4_SCTP_EN_IP_D, + HNS3_RSS_FIELD_IPV4_SCTP_EN_IP_S, -+ HNS3_RSS_FIELD_IPV4_SCTP_EN_SCTP_VER, + + /* IPV4 ENABLE FIELD */ + HNS3_RSS_FIELD_IPV4_EN_NONFRAG_IP_D = 24, @@ -48528,7 +54681,6 @@ index 6f153a1b7b..5c0f0b75f0 100644 + HNS3_RSS_FIELD_IPV6_SCTP_EN_SCTP_S, + HNS3_RSS_FIELD_IPV6_SCTP_EN_IP_D, + HNS3_RSS_FIELD_IPV6_SCTP_EN_IP_S, -+ HNS3_RSS_FIELD_IPV6_SCTP_EN_SCTP_VER, + + /* IPV6 ENABLE FIELD */ + HNS3_RSS_FIELD_IPV6_NONFRAG_IP_D = 56, @@ -48550,12 +54702,12 @@ index 6f153a1b7b..5c0f0b75f0 100644 + +#define HNS3_RSS_TUPLE_IPV4_TCP_M GENMASK(3, 0) +#define HNS3_RSS_TUPLE_IPV4_UDP_M GENMASK(11, 8) -+#define HNS3_RSS_TUPLE_IPV4_SCTP_M GENMASK(20, 16) ++#define HNS3_RSS_TUPLE_IPV4_SCTP_M GENMASK(19, 16) +#define HNS3_RSS_TUPLE_IPV4_NONF_M GENMASK(25, 24) +#define HNS3_RSS_TUPLE_IPV4_FLAG_M GENMASK(27, 26) +#define HNS3_RSS_TUPLE_IPV6_TCP_M GENMASK(35, 32) +#define HNS3_RSS_TUPLE_IPV6_UDP_M GENMASK(43, 40) -+#define HNS3_RSS_TUPLE_IPV6_SCTP_M GENMASK(52, 48) ++#define HNS3_RSS_TUPLE_IPV6_SCTP_M GENMASK(51, 48) +#define HNS3_RSS_TUPLE_IPV6_NONF_M GENMASK(57, 56) +#define HNS3_RSS_TUPLE_IPV6_FLAG_M GENMASK(59, 58) @@ -48566,7 +54718,7 @@ index 6f153a1b7b..5c0f0b75f0 100644 #define HNS3_RSS_SET_BITMAP_MSK 0xffff #define HNS3_RSS_HASH_ALGO_TOEPLITZ 0 -@@ -33,20 +115,13 @@ +@@ -33,20 +113,13 @@ #define HNS3_RSS_HASH_ALGO_SYMMETRIC_TOEP 2 #define HNS3_RSS_HASH_ALGO_MASK 0xf @@ -48592,7 +54744,7 @@ index 6f153a1b7b..5c0f0b75f0 100644 /* * For IPv6 SCTP packets type, check whether the NIC hardware support * RSS hash using the src/dst port as the input tuple. For Kunpeng920 -@@ -89,6 +164,8 @@ static inline uint32_t roundup_pow_of_two(uint32_t x) +@@ -89,6 +162,8 @@ static inline uint32_t roundup_pow_of_two(uint32_t x) return 1UL << fls(x - 1); } @@ -48601,7 +54753,7 @@ index 6f153a1b7b..5c0f0b75f0 100644 struct hns3_adapter; int hns3_dev_rss_hash_update(struct rte_eth_dev *dev, -@@ -107,10 +184,12 @@ int hns3_set_rss_indir_table(struct hns3_hw *hw, uint16_t *indir, +@@ -107,10 +182,12 @@ int hns3_set_rss_indir_table(struct hns3_hw *hw, uint16_t *indir, int hns3_rss_reset_indir_table(struct hns3_hw *hw); int hns3_config_rss(struct hns3_adapter *hns); void hns3_rss_uninit(struct hns3_adapter *hns); @@ -48620,7 +54772,7 @@ index 6f153a1b7b..5c0f0b75f0 100644 #endif /* _HNS3_RSS_H_ */ diff --git a/dpdk/drivers/net/hns3/hns3_rxtx.c b/dpdk/drivers/net/hns3/hns3_rxtx.c -index f365daadf8..f841e44154 100644 +index f365daadf8..f817c18fc5 100644 --- a/dpdk/drivers/net/hns3/hns3_rxtx.c +++ b/dpdk/drivers/net/hns3/hns3_rxtx.c @@ -50,6 +50,8 @@ hns3_rx_queue_release_mbufs(struct hns3_rx_queue *rxq) @@ -48632,7 +54784,87 @@ index f365daadf8..f841e44154 100644 } for (i = 0; i < rxq->bulk_mbuf_num; i++) -@@ -587,7 +589,7 @@ hns3_tqp_enable(struct hns3_hw *hw, uint16_t queue_id, bool enable) +@@ -84,10 +86,14 @@ hns3_rx_queue_release(void *queue) + struct hns3_rx_queue *rxq = queue; + if (rxq) { + hns3_rx_queue_release_mbufs(rxq); +- if (rxq->mz) ++ if (rxq->mz) { + rte_memzone_free(rxq->mz); +- if (rxq->sw_ring) ++ rxq->mz = NULL; ++ } ++ if (rxq->sw_ring) { + rte_free(rxq->sw_ring); ++ rxq->sw_ring = NULL; ++ } + rte_free(rxq); + } + } +@@ -98,12 +104,18 @@ hns3_tx_queue_release(void *queue) + struct hns3_tx_queue *txq = queue; + if (txq) { + hns3_tx_queue_release_mbufs(txq); +- if (txq->mz) ++ if (txq->mz) { + rte_memzone_free(txq->mz); +- if (txq->sw_ring) ++ txq->mz = NULL; ++ } ++ if (txq->sw_ring) { + rte_free(txq->sw_ring); +- if (txq->free) ++ txq->sw_ring = NULL; ++ } ++ if (txq->free) { + rte_free(txq->free); ++ txq->free = NULL; ++ } + rte_free(txq); + } + } +@@ -261,12 +273,27 @@ hns3_free_all_queues(struct rte_eth_dev *dev) + hns3_free_tx_queues(dev); + } + ++static int ++hns3_check_rx_dma_addr(struct hns3_hw *hw, uint64_t dma_addr) ++{ ++ uint64_t rem; ++ ++ rem = dma_addr & (hw->rx_dma_addr_align - 1); ++ if (rem > 0) { ++ hns3_err(hw, "The IO address of the beginning of the mbuf data " ++ "must be %u-byte aligned", hw->rx_dma_addr_align); ++ return -EINVAL; ++ } ++ return 0; ++} ++ + static int + hns3_alloc_rx_queue_mbufs(struct hns3_hw *hw, struct hns3_rx_queue *rxq) + { + struct rte_mbuf *mbuf; + uint64_t dma_addr; + uint16_t i; ++ int ret; + + for (i = 0; i < rxq->nb_rx_desc; i++) { + mbuf = rte_mbuf_raw_alloc(rxq->mb_pool); +@@ -287,6 +314,12 @@ hns3_alloc_rx_queue_mbufs(struct hns3_hw *hw, struct hns3_rx_queue *rxq) + dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf)); + rxq->rx_ring[i].addr = dma_addr; + rxq->rx_ring[i].rx.bd_base_info = 0; ++ ++ ret = hns3_check_rx_dma_addr(hw, dma_addr); ++ if (ret != 0) { ++ hns3_rx_queue_release_mbufs(rxq); ++ return ret; ++ } + } + + return 0; +@@ -587,7 +620,7 @@ hns3_tqp_enable(struct hns3_hw *hw, uint16_t queue_id, bool enable) ret = hns3_cmd_send(hw, &desc, 1); if (ret) @@ -48641,16 +54873,45 @@ index f365daadf8..f841e44154 100644 return ret; } -@@ -776,7 +778,7 @@ hns3vf_reset_all_tqps(struct hns3_hw *hw) +@@ -687,13 +720,12 @@ hns3pf_reset_tqp(struct hns3_hw *hw, uint16_t queue_id) + static int + hns3vf_reset_tqp(struct hns3_hw *hw, uint16_t queue_id) + { +- uint8_t msg_data[2]; ++ struct hns3_vf_to_pf_msg req; + int ret; + +- memcpy(msg_data, &queue_id, sizeof(uint16_t)); +- +- ret = hns3_send_mbx_msg(hw, HNS3_MBX_QUEUE_RESET, 0, msg_data, +- sizeof(msg_data), true, NULL, 0); ++ hns3vf_mbx_setup(&req, HNS3_MBX_QUEUE_RESET, 0); ++ memcpy(req.data, &queue_id, sizeof(uint16_t)); ++ ret = hns3vf_mbx_send(hw, &req, true, NULL, 0); + if (ret) + hns3_err(hw, "fail to reset tqp, queue_id = %u, ret = %d.", + queue_id, ret); +@@ -771,15 +803,14 @@ static int + hns3vf_reset_all_tqps(struct hns3_hw *hw) + { + #define HNS3VF_RESET_ALL_TQP_DONE 1U ++ struct hns3_vf_to_pf_msg req; + uint8_t reset_status; +- uint8_t msg_data[2]; int ret; int i; - memset(msg_data, 0, sizeof(uint16_t)); -+ memset(msg_data, 0, sizeof(msg_data)); - ret = hns3_send_mbx_msg(hw, HNS3_MBX_QUEUE_RESET, 0, msg_data, - sizeof(msg_data), true, &reset_status, - sizeof(reset_status)); -@@ -1644,7 +1646,7 @@ hns3_set_fake_rx_or_tx_queues(struct rte_eth_dev *dev, uint16_t nb_rx_q, +- ret = hns3_send_mbx_msg(hw, HNS3_MBX_QUEUE_RESET, 0, msg_data, +- sizeof(msg_data), true, &reset_status, +- sizeof(reset_status)); ++ hns3vf_mbx_setup(&req, HNS3_MBX_QUEUE_RESET, 0); ++ ret = hns3vf_mbx_send(hw, &req, true, ++ &reset_status, sizeof(reset_status)); + if (ret) { + hns3_err(hw, "fail to send rcb reset mbx, ret = %d.", ret); + return ret; +@@ -1644,7 +1675,7 @@ hns3_set_fake_rx_or_tx_queues(struct rte_eth_dev *dev, uint16_t nb_rx_q, ret = hns3_fake_tx_queue_config(hw, tx_need_add_nb_q); if (ret) { @@ -48659,7 +54920,7 @@ index f365daadf8..f841e44154 100644 goto cfg_fake_tx_q_fail; } -@@ -1763,7 +1765,8 @@ hns3_rxq_conf_runtime_check(struct hns3_hw *hw, uint16_t buf_size, +@@ -1763,7 +1794,8 @@ hns3_rxq_conf_runtime_check(struct hns3_hw *hw, uint16_t buf_size, return -EINVAL; } @@ -48669,7 +54930,7 @@ index f365daadf8..f841e44154 100644 min_vec_bds = HNS3_DEFAULT_RXQ_REARM_THRESH + HNS3_DEFAULT_RX_BURST; if (nb_desc < min_vec_bds || -@@ -1793,6 +1796,12 @@ hns3_rx_queue_conf_check(struct hns3_hw *hw, const struct rte_eth_rxconf *conf, +@@ -1793,6 +1825,12 @@ hns3_rx_queue_conf_check(struct hns3_hw *hw, const struct rte_eth_rxconf *conf, return -EINVAL; } @@ -48682,7 +54943,7 @@ index f365daadf8..f841e44154 100644 if (conf->rx_drop_en == 0) hns3_warn(hw, "if no descriptors available, packets are always " "dropped and rx_drop_en (1) is fixed on"); -@@ -1903,7 +1912,7 @@ hns3_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc, +@@ -1903,7 +1941,7 @@ hns3_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc, * For hns3 VF device, whether it needs to process PVID depends * on the configuration of PF kernel mode netdevice driver. And the * related PF configuration is delivered through the mailbox and finally @@ -48691,7 +54952,7 @@ index f365daadf8..f841e44154 100644 */ if (hns->is_vf || hw->vlan_mode == HNS3_SW_SHIFT_AND_DISCARD_MODE) rxq->pvid_sw_discard_en = hw->port_base_vlan_cfg.state == -@@ -2000,7 +2009,7 @@ hns3_dev_supported_ptypes_get(struct rte_eth_dev *dev) +@@ -2000,7 +2038,7 @@ hns3_dev_supported_ptypes_get(struct rte_eth_dev *dev) RTE_PTYPE_INNER_L4_TCP, RTE_PTYPE_INNER_L4_SCTP, RTE_PTYPE_INNER_L4_ICMP, @@ -48700,7 +54961,7 @@ index f365daadf8..f841e44154 100644 RTE_PTYPE_TUNNEL_NVGRE, RTE_PTYPE_UNKNOWN }; -@@ -2097,7 +2106,7 @@ hns3_init_tunnel_ptype_tbl(struct hns3_ptype_table *tbl) +@@ -2097,7 +2135,7 @@ hns3_init_tunnel_ptype_tbl(struct hns3_ptype_table *tbl) tbl->ol3table[5] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT; tbl->ol4table[0] = RTE_PTYPE_UNKNOWN; @@ -48709,7 +54970,7 @@ index f365daadf8..f841e44154 100644 tbl->ol4table[2] = RTE_PTYPE_TUNNEL_NVGRE; } -@@ -2388,14 +2397,14 @@ hns3_rx_alloc_buffer(struct hns3_rx_queue *rxq) +@@ -2388,14 +2426,13 @@ hns3_rx_alloc_buffer(struct hns3_rx_queue *rxq) return rte_mbuf_raw_alloc(rxq->mb_pool); } @@ -48723,12 +54984,11 @@ index f365daadf8..f841e44154 100644 - uint64_t timestamp = rte_le_to_cpu_64(rxd->timestamp); - mbuf->ol_flags |= RTE_MBUF_F_RX_IEEE1588_PTP | RTE_MBUF_F_RX_IEEE1588_TMST; -+ mbuf->ol_flags |= RTE_MBUF_F_RX_IEEE1588_PTP | -+ RTE_MBUF_F_RX_IEEE1588_TMST; ++ mbuf->ol_flags |= RTE_MBUF_F_RX_IEEE1588_TMST; if (hns3_timestamp_rx_dynflag > 0) { *RTE_MBUF_DYNFIELD(mbuf, hns3_timestamp_dynfield_offset, rte_mbuf_timestamp_t *) = timestamp; -@@ -2469,7 +2478,8 @@ hns3_recv_pkts_simple(void *rx_queue, +@@ -2469,7 +2506,8 @@ hns3_recv_pkts_simple(void *rx_queue, rxe->mbuf = nmb; if (unlikely(bd_base_info & BIT(HNS3_RXD_TS_VLD_B))) @@ -48738,7 +54998,7 @@ index f365daadf8..f841e44154 100644 dma_addr = rte_mbuf_data_iova_default(nmb); rxdp->addr = rte_cpu_to_le_64(dma_addr); -@@ -2540,6 +2550,7 @@ hns3_recv_scattered_pkts(void *rx_queue, +@@ -2540,6 +2578,7 @@ hns3_recv_scattered_pkts(void *rx_queue, struct rte_mbuf *rxm; struct rte_eth_dev *dev; uint32_t bd_base_info; @@ -48746,7 +55006,7 @@ index f365daadf8..f841e44154 100644 uint32_t l234_info; uint32_t gro_size; uint32_t ol_info; -@@ -2649,6 +2660,9 @@ hns3_recv_scattered_pkts(void *rx_queue, +@@ -2649,6 +2688,9 @@ hns3_recv_scattered_pkts(void *rx_queue, rxm = rxe->mbuf; rxe->mbuf = nmb; @@ -48756,16 +55016,27 @@ index f365daadf8..f841e44154 100644 dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb)); rxdp->rx.bd_base_info = 0; rxdp->addr = dma_addr; -@@ -2671,7 +2685,7 @@ hns3_recv_scattered_pkts(void *rx_queue, +@@ -2670,8 +2712,9 @@ hns3_recv_scattered_pkts(void *rx_queue, + continue; } ++ first_seg->ol_flags = 0; if (unlikely(bd_base_info & BIT(HNS3_RXD_TS_VLD_B))) - hns3_rx_ptp_timestamp_handle(rxq, first_seg, rxdp); + hns3_rx_ptp_timestamp_handle(rxq, first_seg, timestamp); /* * The last buffer of the received packet. packet len from -@@ -2791,6 +2805,7 @@ hns3_rx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id, +@@ -2699,7 +2742,7 @@ hns3_recv_scattered_pkts(void *rx_queue, + + first_seg->port = rxq->port_id; + first_seg->hash.rss = rte_le_to_cpu_32(rxd.rx.rss_hash); +- first_seg->ol_flags = RTE_MBUF_F_RX_RSS_HASH; ++ first_seg->ol_flags |= RTE_MBUF_F_RX_RSS_HASH; + if (unlikely(bd_base_info & BIT(HNS3_RXD_LUM_B))) { + first_seg->hash.fdir.hi = + rte_le_to_cpu_16(rxd.rx.fd_id); +@@ -2791,6 +2834,7 @@ hns3_rx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id, { hns3_recv_scattered_pkts, "Scalar Scattered" }, { hns3_recv_pkts_vec, "Vector Neon" }, { hns3_recv_pkts_vec_sve, "Vector Sve" }, @@ -48773,7 +55044,7 @@ index f365daadf8..f841e44154 100644 }; eth_rx_burst_t pkt_burst = dev->rx_pkt_burst; -@@ -3043,7 +3058,7 @@ hns3_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc, +@@ -3043,7 +3087,7 @@ hns3_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc, * For hns3 VF device, whether it needs to process PVID depends * on the configuration of PF kernel mode netdev driver. And the * related PF configuration is delivered through the mailbox and finally @@ -48782,7 +55053,7 @@ index f365daadf8..f841e44154 100644 */ if (hns->is_vf || hw->vlan_mode == HNS3_SW_SHIFT_AND_DISCARD_MODE) txq->pvid_sw_shift_en = hw->port_base_vlan_cfg.state == -@@ -3077,51 +3092,40 @@ hns3_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc, +@@ -3077,51 +3121,40 @@ hns3_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc, return 0; } @@ -48860,7 +55131,7 @@ index f365daadf8..f841e44154 100644 } int -@@ -3131,6 +3135,9 @@ hns3_config_gro(struct hns3_hw *hw, bool en) +@@ -3131,6 +3164,9 @@ hns3_config_gro(struct hns3_hw *hw, bool en) struct hns3_cmd_desc desc; int ret; @@ -48870,7 +55141,7 @@ index f365daadf8..f841e44154 100644 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_GRO_GENERIC_CONFIG, false); req = (struct hns3_cfg_gro_status_cmd *)desc.data; -@@ -3208,7 +3215,7 @@ hns3_fill_first_desc(struct hns3_tx_queue *txq, struct hns3_desc *desc, +@@ -3208,7 +3244,7 @@ hns3_fill_first_desc(struct hns3_tx_queue *txq, struct hns3_desc *desc, * in Tx direction based on hns3 network engine. So when the number of * VLANs in the packets represented by rxm plus the number of VLAN * offload by hardware such as PVID etc, exceeds two, the packets will @@ -48879,7 +55150,7 @@ index f365daadf8..f841e44154 100644 * by hardware. When the PF PVID is enabled by calling the API function * named rte_eth_dev_set_vlan_pvid or the VF PVID is enabled by the hns3 * PF kernel ether driver, the outer VLAN tag will always be the PVID. -@@ -3393,7 +3400,7 @@ hns3_parse_inner_params(struct rte_mbuf *m, uint32_t *ol_type_vlan_len_msec, +@@ -3393,7 +3429,7 @@ hns3_parse_inner_params(struct rte_mbuf *m, uint32_t *ol_type_vlan_len_msec, /* * The inner l2 length of mbuf is the sum of outer l4 length, * tunneling header length and inner l2 length for a tunnel @@ -48888,7 +55159,7 @@ index f365daadf8..f841e44154 100644 * length is contained in the field of outer L4 length. * Therefore, driver need to calculate the outer L4 length and * inner L2 length. -@@ -3409,7 +3416,7 @@ hns3_parse_inner_params(struct rte_mbuf *m, uint32_t *ol_type_vlan_len_msec, +@@ -3409,7 +3445,7 @@ hns3_parse_inner_params(struct rte_mbuf *m, uint32_t *ol_type_vlan_len_msec, tmp_outer |= hns3_gen_field_val(HNS3_TXD_TUNTYPE_M, HNS3_TXD_TUNTYPE_S, HNS3_TUN_NVGRE); /* @@ -48897,7 +55168,7 @@ index f365daadf8..f841e44154 100644 * fill the NVGRE header length to the outer L4 field. */ tmp_outer |= hns3_gen_field_val(HNS3_TXD_L4LEN_M, -@@ -3452,7 +3459,7 @@ hns3_parse_tunneling_params(struct hns3_tx_queue *txq, struct rte_mbuf *m, +@@ -3452,7 +3488,7 @@ hns3_parse_tunneling_params(struct hns3_tx_queue *txq, struct rte_mbuf *m, * mbuf, but for hns3 descriptor, it is contained in the outer L4. So, * there is a need that switching between them. To avoid multiple * calculations, the length of the L2 header include the outer and @@ -48906,25 +55177,121 @@ index f365daadf8..f841e44154 100644 */ if (!(ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK)) { /* -@@ -3632,7 +3639,7 @@ hns3_outer_ipv4_cksum_prepared(struct rte_mbuf *m, uint64_t ol_flags, - if (ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM) { - struct rte_udp_hdr *udp_hdr; - /* +@@ -3620,58 +3656,6 @@ hns3_pkt_need_linearized(struct rte_mbuf *tx_pkts, uint32_t bd_num, + return false; + } + +-static bool +-hns3_outer_ipv4_cksum_prepared(struct rte_mbuf *m, uint64_t ol_flags, +- uint32_t *l4_proto) +-{ +- struct rte_ipv4_hdr *ipv4_hdr; +- ipv4_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *, +- m->outer_l2_len); +- if (ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM) +- ipv4_hdr->hdr_checksum = 0; +- if (ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM) { +- struct rte_udp_hdr *udp_hdr; +- /* - * If OUTER_UDP_CKSUM is support, HW can caclulate the pseudo -+ * If OUTER_UDP_CKSUM is support, HW can calculate the pseudo - * header for TSO packets - */ - if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) -@@ -3657,7 +3664,7 @@ hns3_outer_ipv6_cksum_prepared(struct rte_mbuf *m, uint64_t ol_flags, - if (ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM) { - struct rte_udp_hdr *udp_hdr; - /* +- * header for TSO packets +- */ +- if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) +- return true; +- udp_hdr = rte_pktmbuf_mtod_offset(m, struct rte_udp_hdr *, +- m->outer_l2_len + m->outer_l3_len); +- udp_hdr->dgram_cksum = rte_ipv4_phdr_cksum(ipv4_hdr, ol_flags); +- +- return true; +- } +- *l4_proto = ipv4_hdr->next_proto_id; +- return false; +-} +- +-static bool +-hns3_outer_ipv6_cksum_prepared(struct rte_mbuf *m, uint64_t ol_flags, +- uint32_t *l4_proto) +-{ +- struct rte_ipv6_hdr *ipv6_hdr; +- ipv6_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv6_hdr *, +- m->outer_l2_len); +- if (ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM) { +- struct rte_udp_hdr *udp_hdr; +- /* - * If OUTER_UDP_CKSUM is support, HW can caclulate the pseudo -+ * If OUTER_UDP_CKSUM is support, HW can calculate the pseudo - * header for TSO packets - */ - if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) -@@ -4044,7 +4051,7 @@ static inline void +- * header for TSO packets +- */ +- if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) +- return true; +- udp_hdr = rte_pktmbuf_mtod_offset(m, struct rte_udp_hdr *, +- m->outer_l2_len + m->outer_l3_len); +- udp_hdr->dgram_cksum = rte_ipv6_phdr_cksum(ipv6_hdr, ol_flags); +- +- return true; +- } +- *l4_proto = ipv6_hdr->proto; +- return false; +-} +- + static void + hns3_outer_header_cksum_prepare(struct rte_mbuf *m) + { +@@ -3679,29 +3663,38 @@ hns3_outer_header_cksum_prepare(struct rte_mbuf *m) + uint32_t paylen, hdr_len, l4_proto; + struct rte_udp_hdr *udp_hdr; + +- if (!(ol_flags & (RTE_MBUF_F_TX_OUTER_IPV4 | RTE_MBUF_F_TX_OUTER_IPV6))) ++ if (!(ol_flags & (RTE_MBUF_F_TX_OUTER_IPV4 | RTE_MBUF_F_TX_OUTER_IPV6)) && ++ ((ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM) || ++ !(ol_flags & RTE_MBUF_F_TX_TCP_SEG))) + return; + + if (ol_flags & RTE_MBUF_F_TX_OUTER_IPV4) { +- if (hns3_outer_ipv4_cksum_prepared(m, ol_flags, &l4_proto)) +- return; ++ struct rte_ipv4_hdr *ipv4_hdr; ++ ++ ipv4_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *, ++ m->outer_l2_len); ++ l4_proto = ipv4_hdr->next_proto_id; + } else { +- if (hns3_outer_ipv6_cksum_prepared(m, ol_flags, &l4_proto)) +- return; ++ struct rte_ipv6_hdr *ipv6_hdr; ++ ++ ipv6_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv6_hdr *, ++ m->outer_l2_len); ++ l4_proto = ipv6_hdr->proto; + } + ++ if (l4_proto != IPPROTO_UDP) ++ return; ++ + /* driver should ensure the outer udp cksum is 0 for TUNNEL TSO */ +- if (l4_proto == IPPROTO_UDP && (ol_flags & RTE_MBUF_F_TX_TCP_SEG)) { +- hdr_len = m->l2_len + m->l3_len + m->l4_len; +- hdr_len += m->outer_l2_len + m->outer_l3_len; +- paylen = m->pkt_len - hdr_len; +- if (paylen <= m->tso_segsz) +- return; +- udp_hdr = rte_pktmbuf_mtod_offset(m, struct rte_udp_hdr *, +- m->outer_l2_len + +- m->outer_l3_len); +- udp_hdr->dgram_cksum = 0; +- } ++ hdr_len = m->l2_len + m->l3_len + m->l4_len; ++ hdr_len += m->outer_l2_len + m->outer_l3_len; ++ paylen = m->pkt_len - hdr_len; ++ if (paylen <= m->tso_segsz) ++ return; ++ udp_hdr = rte_pktmbuf_mtod_offset(m, struct rte_udp_hdr *, ++ m->outer_l2_len + ++ m->outer_l3_len); ++ udp_hdr->dgram_cksum = 0; + } + + static int +@@ -4044,7 +4037,7 @@ static inline void hns3_tx_setup_4bd(struct hns3_desc *txdp, struct rte_mbuf **pkts) { #define PER_LOOP_NUM 4 @@ -48933,7 +55300,7 @@ index f365daadf8..f841e44154 100644 uint64_t dma_addr; uint32_t i; -@@ -4055,6 +4062,8 @@ hns3_tx_setup_4bd(struct hns3_desc *txdp, struct rte_mbuf **pkts) +@@ -4055,6 +4048,8 @@ hns3_tx_setup_4bd(struct hns3_desc *txdp, struct rte_mbuf **pkts) txdp->tx.paylen_fd_dop_ol4cs = 0; txdp->tx.type_cs_vlan_tso_len = 0; txdp->tx.ol_type_vlan_len_msec = 0; @@ -48942,7 +55309,7 @@ index f365daadf8..f841e44154 100644 txdp->tx.tp_fe_sc_vld_ra_ri = rte_cpu_to_le_16(bd_flag); } } -@@ -4062,7 +4071,7 @@ hns3_tx_setup_4bd(struct hns3_desc *txdp, struct rte_mbuf **pkts) +@@ -4062,7 +4057,7 @@ hns3_tx_setup_4bd(struct hns3_desc *txdp, struct rte_mbuf **pkts) static inline void hns3_tx_setup_1bd(struct hns3_desc *txdp, struct rte_mbuf **pkts) { @@ -48951,7 +55318,7 @@ index f365daadf8..f841e44154 100644 uint64_t dma_addr; dma_addr = rte_mbuf_data_iova(*pkts); -@@ -4071,6 +4080,8 @@ hns3_tx_setup_1bd(struct hns3_desc *txdp, struct rte_mbuf **pkts) +@@ -4071,6 +4066,8 @@ hns3_tx_setup_1bd(struct hns3_desc *txdp, struct rte_mbuf **pkts) txdp->tx.paylen_fd_dop_ol4cs = 0; txdp->tx.type_cs_vlan_tso_len = 0; txdp->tx.ol_type_vlan_len_msec = 0; @@ -48960,7 +55327,7 @@ index f365daadf8..f841e44154 100644 txdp->tx.tp_fe_sc_vld_ra_ri = rte_cpu_to_le_16(bd_flag); } -@@ -4127,14 +4138,16 @@ hns3_xmit_pkts_simple(void *tx_queue, +@@ -4127,14 +4124,16 @@ hns3_xmit_pkts_simple(void *tx_queue, } txq->tx_bd_ready -= nb_pkts; @@ -48980,7 +55347,7 @@ index f365daadf8..f841e44154 100644 hns3_write_txq_tail_reg(txq, nb_pkts); -@@ -4158,8 +4171,7 @@ hns3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) +@@ -4158,8 +4157,7 @@ hns3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) uint16_t nb_tx; uint16_t i; @@ -48990,7 +55357,7 @@ index f365daadf8..f841e44154 100644 tx_next_use = txq->next_to_use; tx_bd_max = txq->nb_tx_desc; -@@ -4174,14 +4186,10 @@ hns3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) +@@ -4174,14 +4172,10 @@ hns3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) nb_buf = tx_pkt->nb_segs; if (nb_buf > txq->tx_bd_ready) { @@ -49009,7 +55376,7 @@ index f365daadf8..f841e44154 100644 } /* -@@ -4287,24 +4295,31 @@ int +@@ -4287,24 +4281,31 @@ int hns3_tx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id, struct rte_eth_burst_mode *mode) { @@ -49056,7 +55423,7 @@ index f365daadf8..f841e44154 100644 } static bool -@@ -4312,21 +4327,12 @@ hns3_tx_check_simple_support(struct rte_eth_dev *dev) +@@ -4312,21 +4313,12 @@ hns3_tx_check_simple_support(struct rte_eth_dev *dev) { uint64_t offloads = dev->data->dev_conf.txmode.offloads; @@ -49078,7 +55445,7 @@ index f365daadf8..f841e44154 100644 #define HNS3_DEV_TX_CSKUM_TSO_OFFLOAD_MASK (\ RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | \ RTE_ETH_TX_OFFLOAD_TCP_CKSUM | \ -@@ -4340,27 +4346,30 @@ hns3_get_tx_prep_needed(struct rte_eth_dev *dev) +@@ -4340,27 +4332,30 @@ hns3_get_tx_prep_needed(struct rte_eth_dev *dev) RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO) uint64_t tx_offload = dev->data->dev_conf.txmode.offloads; @@ -49116,7 +55483,7 @@ index f365daadf8..f841e44154 100644 if (hns->tx_func_hint == HNS3_IO_FUNC_HINT_VEC && vec_allowed) return hns3_xmit_pkts_vec; -@@ -4368,19 +4377,14 @@ hns3_get_tx_function(struct rte_eth_dev *dev, eth_tx_prep_t *prep) +@@ -4368,19 +4363,14 @@ hns3_get_tx_function(struct rte_eth_dev *dev, eth_tx_prep_t *prep) return hns3_xmit_pkts_vec_sve; if (hns->tx_func_hint == HNS3_IO_FUNC_HINT_SIMPLE && simple_allowed) return hns3_xmit_pkts_simple; @@ -49137,7 +55504,7 @@ index f365daadf8..f841e44154 100644 return hns3_xmit_pkts; } -@@ -4408,11 +4412,26 @@ hns3_trace_rxtx_function(struct rte_eth_dev *dev) +@@ -4408,11 +4398,26 @@ hns3_trace_rxtx_function(struct rte_eth_dev *dev) rx_mode.info, tx_mode.info); } @@ -49166,7 +55533,7 @@ index f365daadf8..f841e44154 100644 if (hns->hw.adapter_state == HNS3_NIC_STARTED && __atomic_load_n(&hns->hw.reset.resetting, __ATOMIC_RELAXED) == 0) { -@@ -4420,15 +4439,17 @@ void hns3_set_rxtx_function(struct rte_eth_dev *eth_dev) +@@ -4420,15 +4425,17 @@ void hns3_set_rxtx_function(struct rte_eth_dev *eth_dev) eth_dev->rx_descriptor_status = hns3_dev_rx_descriptor_status; eth_dev->tx_pkt_burst = hw->set_link_down ? hns3_dummy_rxtx_burst : @@ -49187,7 +55554,7 @@ index f365daadf8..f841e44154 100644 } void -@@ -4478,6 +4499,13 @@ hns3_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) +@@ -4478,6 +4485,13 @@ hns3_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) return -ENOTSUP; rte_spinlock_lock(&hw->lock); @@ -49201,7 +55568,7 @@ index f365daadf8..f841e44154 100644 ret = hns3_reset_queue(hw, rx_queue_id, HNS3_RING_TYPE_RX); if (ret) { hns3_err(hw, "fail to reset Rx queue %u, ret = %d.", -@@ -4486,6 +4514,9 @@ hns3_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) +@@ -4486,6 +4500,9 @@ hns3_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) return ret; } @@ -49211,7 +55578,7 @@ index f365daadf8..f841e44154 100644 ret = hns3_init_rxq(hns, rx_queue_id); if (ret) { hns3_err(hw, "fail to init Rx queue %u, ret = %d.", -@@ -4524,6 +4555,13 @@ hns3_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) +@@ -4524,6 +4541,13 @@ hns3_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) return -ENOTSUP; rte_spinlock_lock(&hw->lock); @@ -49225,7 +55592,7 @@ index f365daadf8..f841e44154 100644 hns3_enable_rxq(rxq, false); hns3_rx_queue_release_mbufs(rxq); -@@ -4546,6 +4584,13 @@ hns3_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) +@@ -4546,6 +4570,13 @@ hns3_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) return -ENOTSUP; rte_spinlock_lock(&hw->lock); @@ -49239,7 +55606,7 @@ index f365daadf8..f841e44154 100644 ret = hns3_reset_queue(hw, tx_queue_id, HNS3_RING_TYPE_TX); if (ret) { hns3_err(hw, "fail to reset Tx queue %u, ret = %d.", -@@ -4572,6 +4617,13 @@ hns3_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) +@@ -4572,6 +4603,13 @@ hns3_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) return -ENOTSUP; rte_spinlock_lock(&hw->lock); @@ -49253,7 +55620,7 @@ index f365daadf8..f841e44154 100644 hns3_enable_txq(txq, false); hns3_tx_queue_release_mbufs(txq); /* -@@ -4591,22 +4643,43 @@ hns3_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) +@@ -4591,22 +4629,43 @@ hns3_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) static int hns3_tx_done_cleanup_full(struct hns3_tx_queue *txq, uint32_t free_cnt) { @@ -49276,10 +55643,10 @@ index f365daadf8..f841e44154 100644 - if (hns3_tx_free_useless_buffer(txq) != 0) + for (idx = 0; idx < free_cnt; idx++) { + if (next_to_clean == next_to_use) - break; ++ break; + if (desc->tx.tp_fe_sc_vld_ra_ri & + rte_cpu_to_le_16(BIT(HNS3_TXD_VLD_B))) -+ break; + break; + if (tx_pkt->mbuf != NULL) { + rte_pktmbuf_free_seg(tx_pkt->mbuf); + tx_pkt->mbuf = NULL; @@ -49293,19 +55660,19 @@ index f365daadf8..f841e44154 100644 + desc = txq->tx_ring; + next_to_clean = 0; + } -+ } -+ -+ if (idx > 0) { -+ txq->next_to_clean = next_to_clean; -+ txq->tx_bd_ready = tx_bd_ready; } - return RTE_MIN(idx, free_cnt); ++ if (idx > 0) { ++ txq->next_to_clean = next_to_clean; ++ txq->tx_bd_ready = tx_bd_ready; ++ } ++ + return (int)idx; } int -@@ -4729,6 +4802,11 @@ hns3_stop_tx_datapath(struct rte_eth_dev *dev) +@@ -4729,6 +4788,11 @@ hns3_stop_tx_datapath(struct rte_eth_dev *dev) { dev->tx_pkt_burst = hns3_dummy_rxtx_burst; dev->tx_pkt_prepare = NULL; @@ -49317,7 +55684,7 @@ index f365daadf8..f841e44154 100644 rte_wmb(); /* Disable tx datapath on secondary process. */ hns3_mp_req_stop_tx(dev); -@@ -4739,9 +4817,40 @@ hns3_stop_tx_datapath(struct rte_eth_dev *dev) +@@ -4739,9 +4803,40 @@ hns3_stop_tx_datapath(struct rte_eth_dev *dev) void hns3_start_tx_datapath(struct rte_eth_dev *dev) { @@ -50518,7 +56885,7 @@ index e1089b6bd0..d969164014 100644 .node_shaper_update = hns3_tm_node_shaper_update_wrap, }; diff --git a/dpdk/drivers/net/i40e/i40e_ethdev.c b/dpdk/drivers/net/i40e/i40e_ethdev.c -index c0bfff43ee..d829467f41 100644 +index c0bfff43ee..8571d1419d 100644 --- a/dpdk/drivers/net/i40e/i40e_ethdev.c +++ b/dpdk/drivers/net/i40e/i40e_ethdev.c @@ -2447,10 +2447,21 @@ i40e_dev_start(struct rte_eth_dev *dev) @@ -50573,7 +56940,21 @@ index c0bfff43ee..d829467f41 100644 for (i = 0; i < I40E_NB_HW_PORT_XSTATS; i++) { xstats[count].value = *(uint64_t *)(((char *)hw_stats) + rte_i40e_hw_port_strings[i].offset); -@@ -5544,7 +5555,7 @@ i40e_vsi_get_bw_config(struct i40e_vsi *vsi) +@@ -3746,8 +3757,12 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) + RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO | + RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO | + RTE_ETH_TX_OFFLOAD_MULTI_SEGS | +- RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM | + dev_info->tx_queue_offload_capa; ++ if (hw->mac.type == I40E_MAC_X722) { ++ dev_info->tx_offload_capa |= ++ RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM; ++ } ++ + dev_info->dev_capa = + RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP | + RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP; +@@ -5544,7 +5559,7 @@ i40e_vsi_get_bw_config(struct i40e_vsi *vsi) &ets_sla_config, NULL); if (ret != I40E_SUCCESS) { PMD_DRV_LOG(ERR, @@ -50582,7 +56963,7 @@ index c0bfff43ee..d829467f41 100644 hw->aq.asq_last_status); return ret; } -@@ -5975,14 +5986,16 @@ i40e_vsi_setup(struct i40e_pf *pf, +@@ -5975,14 +5990,16 @@ i40e_vsi_setup(struct i40e_pf *pf, } } @@ -50606,7 +56987,7 @@ index c0bfff43ee..d829467f41 100644 } /* Get VSI BW information */ -@@ -6822,7 +6835,7 @@ i40e_handle_mdd_event(struct rte_eth_dev *dev) +@@ -6822,7 +6839,7 @@ i40e_handle_mdd_event(struct rte_eth_dev *dev) * @param handle * Pointer to interrupt handle. * @param param @@ -50615,7 +56996,7 @@ index c0bfff43ee..d829467f41 100644 * * @return * void -@@ -9719,7 +9732,7 @@ i40e_ethertype_filter_convert(const struct rte_eth_ethertype_filter *input, +@@ -9719,7 +9736,7 @@ i40e_ethertype_filter_convert(const struct rte_eth_ethertype_filter *input, return 0; } @@ -50742,7 +57123,7 @@ index df2a5aaecc..8caedea14e 100644 } diff --git a/dpdk/drivers/net/i40e/i40e_flow.c b/dpdk/drivers/net/i40e/i40e_flow.c -index c9676caab5..b4cdefafa5 100644 +index c9676caab5..5c7f445018 100644 --- a/dpdk/drivers/net/i40e/i40e_flow.c +++ b/dpdk/drivers/net/i40e/i40e_flow.c @@ -1991,6 +1991,14 @@ i40e_flow_parse_attr(const struct rte_flow_attr *attr, @@ -50760,7 +57141,17 @@ index c9676caab5..b4cdefafa5 100644 /* Not supported */ if (attr->priority) { rte_flow_error_set(error, EINVAL, -@@ -3043,7 +3051,7 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev, +@@ -2457,8 +2465,7 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev, + + ether_type = rte_be_to_cpu_16(eth_spec->type); + +- if (next_type == RTE_FLOW_ITEM_TYPE_VLAN || +- ether_type == RTE_ETHER_TYPE_IPV4 || ++ if (ether_type == RTE_ETHER_TYPE_IPV4 || + ether_type == RTE_ETHER_TYPE_IPV6 || + ether_type == i40e_get_outer_vlan(dev)) { + rte_flow_error_set(error, EINVAL, +@@ -3043,7 +3050,7 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev, rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, @@ -50769,7 +57160,7 @@ index c9676caab5..b4cdefafa5 100644 return -rte_errno; } -@@ -3142,8 +3150,11 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev, +@@ -3142,8 +3149,11 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev, /* Check if the input set is valid */ if (i40e_validate_input_set(pctype, RTE_ETH_FILTER_FDIR, input_set) != 0) { @@ -50851,10 +57242,26 @@ index ccb3924a5f..15d9ff868f 100644 goto send_msg; } diff --git a/dpdk/drivers/net/i40e/i40e_rxtx.c b/dpdk/drivers/net/i40e/i40e_rxtx.c -index e4cb33dc3c..8a277dfe31 100644 +index e4cb33dc3c..20215a8e1e 100644 --- a/dpdk/drivers/net/i40e/i40e_rxtx.c +++ b/dpdk/drivers/net/i40e/i40e_rxtx.c -@@ -304,10 +304,7 @@ i40e_txd_enable_checksum(uint64_t ol_flags, +@@ -295,6 +295,15 @@ i40e_parse_tunneling_params(uint64_t ol_flags, + */ + *cd_tunneling |= (tx_offload.l2_len >> 1) << + I40E_TXD_CTX_QW0_NATLEN_SHIFT; ++ ++ /** ++ * Calculate the tunneling UDP checksum (only supported with X722). ++ * Shall be set only if L4TUNT = 01b and EIPT is not zero ++ */ ++ if ((*cd_tunneling & I40E_TXD_CTX_QW0_EXT_IP_MASK) && ++ (*cd_tunneling & I40E_TXD_CTX_UDP_TUNNELING) && ++ (ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM)) ++ *cd_tunneling |= I40E_TXD_CTX_QW0_L4T_CS_MASK; + } + + static inline void +@@ -304,10 +313,7 @@ i40e_txd_enable_checksum(uint64_t ol_flags, union i40e_tx_offload tx_offload) { /* Set MACLEN */ @@ -50866,7 +57273,7 @@ index e4cb33dc3c..8a277dfe31 100644 *td_offset |= (tx_offload.l2_len >> 1) << I40E_TX_DESC_LENGTH_MACLEN_SHIFT; -@@ -609,7 +606,7 @@ i40e_rx_alloc_bufs(struct i40e_rx_queue *rxq) +@@ -609,7 +615,7 @@ i40e_rx_alloc_bufs(struct i40e_rx_queue *rxq) rxdp[i].read.pkt_addr = dma_addr; } @@ -50875,7 +57282,7 @@ index e4cb33dc3c..8a277dfe31 100644 I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->rx_free_trigger); rxq->rx_free_trigger = -@@ -995,7 +992,7 @@ i40e_recv_scattered_pkts(void *rx_queue, +@@ -995,7 +1001,7 @@ i40e_recv_scattered_pkts(void *rx_queue, * threshold of the queue, advance the Receive Descriptor Tail (RDT) * register. Update the RDT with the value of the last processed RX * descriptor minus 1, to guarantee that the RDT register is never @@ -50884,7 +57291,7 @@ index e4cb33dc3c..8a277dfe31 100644 * from the hardware point of view. */ nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold); -@@ -1171,9 +1168,12 @@ i40e_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) +@@ -1171,9 +1177,12 @@ i40e_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) /* Fill in tunneling parameters if necessary */ cd_tunneling_params = 0; @@ -50898,7 +57305,7 @@ index e4cb33dc3c..8a277dfe31 100644 /* Enable checksum offloading */ if (ol_flags & I40E_TX_CKSUM_OFFLOAD_MASK) i40e_txd_enable_checksum(ol_flags, &td_cmd, -@@ -1467,7 +1467,7 @@ tx_xmit_pkts(struct i40e_tx_queue *txq, +@@ -1467,7 +1476,7 @@ tx_xmit_pkts(struct i40e_tx_queue *txq, i40e_tx_fill_hw_ring(txq, tx_pkts + n, (uint16_t)(nb_pkts - n)); txq->tx_tail = (uint16_t)(txq->tx_tail + (nb_pkts - n)); @@ -50907,7 +57314,7 @@ index e4cb33dc3c..8a277dfe31 100644 if (txq->tx_tail > txq->tx_next_rs) { txr[txq->tx_next_rs].cmd_type_offset_bsz |= rte_cpu_to_le_64(((uint64_t)I40E_TX_DESC_CMD_RS) << -@@ -1697,7 +1697,7 @@ i40e_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) +@@ -1697,7 +1706,7 @@ i40e_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) } if (rxq->rx_deferred_start) @@ -50916,7 +57323,7 @@ index e4cb33dc3c..8a277dfe31 100644 rx_queue_id); err = i40e_alloc_rx_queue_mbufs(rxq); -@@ -1706,7 +1706,7 @@ i40e_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) +@@ -1706,7 +1715,7 @@ i40e_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) return err; } @@ -50925,7 +57332,7 @@ index e4cb33dc3c..8a277dfe31 100644 I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1); err = i40e_switch_rx_queue(hw, rxq->reg_idx, TRUE); -@@ -1771,7 +1771,7 @@ i40e_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) +@@ -1771,7 +1780,7 @@ i40e_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) } if (txq->tx_deferred_start) @@ -50934,7 +57341,7 @@ index e4cb33dc3c..8a277dfe31 100644 tx_queue_id); /* -@@ -1917,6 +1917,12 @@ i40e_dev_rx_queue_setup_runtime(struct rte_eth_dev *dev, +@@ -1917,6 +1926,12 @@ i40e_dev_rx_queue_setup_runtime(struct rte_eth_dev *dev, if (use_def_burst_func) ad->rx_bulk_alloc_allowed = false; i40e_set_rx_function(dev); @@ -50947,7 +57354,7 @@ index e4cb33dc3c..8a277dfe31 100644 return 0; } else if (ad->rx_vec_allowed && !rte_is_power_of_2(rxq->nb_rx_desc)) { PMD_DRV_LOG(ERR, "Vector mode is allowed, but descriptor" -@@ -1930,7 +1936,7 @@ i40e_dev_rx_queue_setup_runtime(struct rte_eth_dev *dev, +@@ -1930,7 +1945,7 @@ i40e_dev_rx_queue_setup_runtime(struct rte_eth_dev *dev, PMD_DRV_LOG(ERR, "Can't use default burst."); return -EINVAL; } @@ -50956,7 +57363,7 @@ index e4cb33dc3c..8a277dfe31 100644 if (!dev->data->scattered_rx && use_scattered_rx) { PMD_DRV_LOG(ERR, "Scattered rx is required."); return -EINVAL; -@@ -2014,7 +2020,7 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev, +@@ -2014,7 +2029,7 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev, rxq->rx_deferred_start = rx_conf->rx_deferred_start; rxq->offloads = offloads; @@ -50965,7 +57372,7 @@ index e4cb33dc3c..8a277dfe31 100644 len = I40E_MAX_RING_DESC; /** -@@ -2322,7 +2328,7 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev, +@@ -2322,7 +2337,7 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev, */ tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ? tx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH); @@ -50974,7 +57381,7 @@ index e4cb33dc3c..8a277dfe31 100644 tx_rs_thresh = (DEFAULT_TX_RS_THRESH + tx_free_thresh > nb_desc) ? nb_desc - tx_free_thresh : DEFAULT_TX_RS_THRESH; if (tx_conf->tx_rs_thresh > 0) -@@ -2904,6 +2910,8 @@ i40e_rx_queue_config(struct i40e_rx_queue *rxq) +@@ -2904,6 +2919,8 @@ i40e_rx_queue_config(struct i40e_rx_queue *rxq) rxq->rx_hdr_len = 0; rxq->rx_buf_len = RTE_ALIGN_FLOOR(buf_size, (1 << I40E_RXQ_CTX_DBUFF_SHIFT)); @@ -50983,7 +57390,7 @@ index e4cb33dc3c..8a277dfe31 100644 rxq->hs_mode = i40e_header_split_none; break; } -@@ -2991,7 +2999,7 @@ i40e_rx_queue_init(struct i40e_rx_queue *rxq) +@@ -2991,7 +3008,7 @@ i40e_rx_queue_init(struct i40e_rx_queue *rxq) if (rxq->max_pkt_len > buf_size) dev_data->scattered_rx = 1; @@ -51468,6 +57875,81 @@ index d0bf86dfba..3d2cbe03fb 100644 } static inline void +diff --git a/dpdk/drivers/net/i40e/i40e_rxtx_vec_avx2.c b/dpdk/drivers/net/i40e/i40e_rxtx_vec_avx2.c +index c73b2a321b..4e94ccbdd6 100644 +--- a/dpdk/drivers/net/i40e/i40e_rxtx_vec_avx2.c ++++ b/dpdk/drivers/net/i40e/i40e_rxtx_vec_avx2.c +@@ -276,46 +276,30 @@ _recv_raw_pkts_vec_avx2(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts, + _mm256_loadu_si256((void *)&sw_ring[i + 4])); + #endif + +- __m256i raw_desc0_1, raw_desc2_3, raw_desc4_5, raw_desc6_7; +-#ifdef RTE_LIBRTE_I40E_16BYTE_RX_DESC +- /* for AVX we need alignment otherwise loads are not atomic */ +- if (avx_aligned) { +- /* load in descriptors, 2 at a time, in reverse order */ +- raw_desc6_7 = _mm256_load_si256((void *)(rxdp + 6)); +- rte_compiler_barrier(); +- raw_desc4_5 = _mm256_load_si256((void *)(rxdp + 4)); +- rte_compiler_barrier(); +- raw_desc2_3 = _mm256_load_si256((void *)(rxdp + 2)); +- rte_compiler_barrier(); +- raw_desc0_1 = _mm256_load_si256((void *)(rxdp + 0)); +- } else +-#endif +- do { +- const __m128i raw_desc7 = _mm_load_si128((void *)(rxdp + 7)); +- rte_compiler_barrier(); +- const __m128i raw_desc6 = _mm_load_si128((void *)(rxdp + 6)); +- rte_compiler_barrier(); +- const __m128i raw_desc5 = _mm_load_si128((void *)(rxdp + 5)); +- rte_compiler_barrier(); +- const __m128i raw_desc4 = _mm_load_si128((void *)(rxdp + 4)); +- rte_compiler_barrier(); +- const __m128i raw_desc3 = _mm_load_si128((void *)(rxdp + 3)); +- rte_compiler_barrier(); +- const __m128i raw_desc2 = _mm_load_si128((void *)(rxdp + 2)); +- rte_compiler_barrier(); +- const __m128i raw_desc1 = _mm_load_si128((void *)(rxdp + 1)); +- rte_compiler_barrier(); +- const __m128i raw_desc0 = _mm_load_si128((void *)(rxdp + 0)); +- +- raw_desc6_7 = _mm256_inserti128_si256( +- _mm256_castsi128_si256(raw_desc6), raw_desc7, 1); +- raw_desc4_5 = _mm256_inserti128_si256( +- _mm256_castsi128_si256(raw_desc4), raw_desc5, 1); +- raw_desc2_3 = _mm256_inserti128_si256( +- _mm256_castsi128_si256(raw_desc2), raw_desc3, 1); +- raw_desc0_1 = _mm256_inserti128_si256( +- _mm256_castsi128_si256(raw_desc0), raw_desc1, 1); +- } while (0); ++ const __m128i raw_desc7 = _mm_load_si128((void *)(rxdp + 7)); ++ rte_compiler_barrier(); ++ const __m128i raw_desc6 = _mm_load_si128((void *)(rxdp + 6)); ++ rte_compiler_barrier(); ++ const __m128i raw_desc5 = _mm_load_si128((void *)(rxdp + 5)); ++ rte_compiler_barrier(); ++ const __m128i raw_desc4 = _mm_load_si128((void *)(rxdp + 4)); ++ rte_compiler_barrier(); ++ const __m128i raw_desc3 = _mm_load_si128((void *)(rxdp + 3)); ++ rte_compiler_barrier(); ++ const __m128i raw_desc2 = _mm_load_si128((void *)(rxdp + 2)); ++ rte_compiler_barrier(); ++ const __m128i raw_desc1 = _mm_load_si128((void *)(rxdp + 1)); ++ rte_compiler_barrier(); ++ const __m128i raw_desc0 = _mm_load_si128((void *)(rxdp + 0)); ++ ++ const __m256i raw_desc6_7 = _mm256_inserti128_si256( ++ _mm256_castsi128_si256(raw_desc6), raw_desc7, 1); ++ const __m256i raw_desc4_5 = _mm256_inserti128_si256( ++ _mm256_castsi128_si256(raw_desc4), raw_desc5, 1); ++ const __m256i raw_desc2_3 = _mm256_inserti128_si256( ++ _mm256_castsi128_si256(raw_desc2), raw_desc3, 1); ++ const __m256i raw_desc0_1 = _mm256_inserti128_si256( ++ _mm256_castsi128_si256(raw_desc0), raw_desc1, 1); + + if (split_packet) { + int j; diff --git a/dpdk/drivers/net/i40e/i40e_rxtx_vec_avx512.c b/dpdk/drivers/net/i40e/i40e_rxtx_vec_avx512.c index 2e8a3f0df6..2ad9a920a1 100644 --- a/dpdk/drivers/net/i40e/i40e_rxtx_vec_avx512.c @@ -51649,7 +58131,7 @@ index 0bb5698583..58c3afe567 100644 struct iavf_devargs devargs; }; diff --git a/dpdk/drivers/net/iavf/iavf_ethdev.c b/dpdk/drivers/net/iavf/iavf_ethdev.c -index 377d7bc7a6..8f4c6de0ac 100644 +index 377d7bc7a6..4885a86ad3 100644 --- a/dpdk/drivers/net/iavf/iavf_ethdev.c +++ b/dpdk/drivers/net/iavf/iavf_ethdev.c @@ -125,6 +125,8 @@ static int iavf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, @@ -51697,7 +58179,17 @@ index 377d7bc7a6..8f4c6de0ac 100644 ret = iavf_configure_rss_lut(adapter); if (ret) return ret; -@@ -613,6 +624,9 @@ iavf_dev_configure(struct rte_eth_dev *dev) +@@ -592,7 +603,8 @@ iavf_dev_init_vlan(struct rte_eth_dev *dev) + RTE_ETH_VLAN_FILTER_MASK | + RTE_ETH_VLAN_EXTEND_MASK); + if (err) { +- PMD_DRV_LOG(ERR, "Failed to update vlan offload"); ++ PMD_DRV_LOG(INFO, ++ "VLAN offloading is not supported, or offloading was refused by the PF"); + return err; + } + +@@ -613,6 +625,9 @@ iavf_dev_configure(struct rte_eth_dev *dev) dev->data->nb_tx_queues); int ret; @@ -51707,7 +58199,18 @@ index 377d7bc7a6..8f4c6de0ac 100644 ad->rx_bulk_alloc_allowed = true; /* Initialize to TRUE. If any of Rx queues doesn't meet the * vector Rx/Tx preconditions, it will be reset. -@@ -831,7 +845,7 @@ static int iavf_config_rx_queues_irqs(struct rte_eth_dev *dev, +@@ -665,9 +680,7 @@ iavf_dev_configure(struct rte_eth_dev *dev) + vf->max_rss_qregion = IAVF_MAX_NUM_QUEUES_DFLT; + } + +- ret = iavf_dev_init_vlan(dev); +- if (ret) +- PMD_DRV_LOG(ERR, "configure VLAN failed: %d", ret); ++ iavf_dev_init_vlan(dev); + + if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) { + if (iavf_init_rss(ad) != 0) { +@@ -831,7 +844,7 @@ static int iavf_config_rx_queues_irqs(struct rte_eth_dev *dev, "vector %u are mapping to all Rx queues", vf->msix_base); } else { @@ -51716,7 +58219,7 @@ index 377d7bc7a6..8f4c6de0ac 100644 * multi interrupts, then the vec is from 1 */ vf->nb_msix = -@@ -896,28 +910,38 @@ iavf_start_queues(struct rte_eth_dev *dev) +@@ -896,28 +909,38 @@ iavf_start_queues(struct rte_eth_dev *dev) struct iavf_rx_queue *rxq; struct iavf_tx_queue *txq; int i; @@ -51765,7 +58268,7 @@ index 377d7bc7a6..8f4c6de0ac 100644 } static int -@@ -932,6 +956,9 @@ iavf_dev_start(struct rte_eth_dev *dev) +@@ -932,6 +955,9 @@ iavf_dev_start(struct rte_eth_dev *dev) PMD_INIT_FUNC_TRACE(); @@ -51775,7 +58278,7 @@ index 377d7bc7a6..8f4c6de0ac 100644 adapter->stopped = 0; vf->max_pkt_len = dev->data->mtu + IAVF_ETH_OVERHEAD; -@@ -1009,6 +1036,12 @@ iavf_dev_stop(struct rte_eth_dev *dev) +@@ -1009,6 +1035,12 @@ iavf_dev_stop(struct rte_eth_dev *dev) PMD_INIT_FUNC_TRACE(); @@ -51788,7 +58291,7 @@ index 377d7bc7a6..8f4c6de0ac 100644 if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) && dev->data->dev_conf.intr_conf.rxq != 0) rte_intr_disable(intr_handle); -@@ -1016,8 +1049,6 @@ iavf_dev_stop(struct rte_eth_dev *dev) +@@ -1016,8 +1048,6 @@ iavf_dev_stop(struct rte_eth_dev *dev) if (adapter->stopped == 1) return 0; @@ -51797,7 +58300,7 @@ index 377d7bc7a6..8f4c6de0ac 100644 /* Disable the interrupt for Rx */ rte_intr_efd_disable(intr_handle); /* Rx interrupt vector mapping free */ -@@ -1030,8 +1061,7 @@ iavf_dev_stop(struct rte_eth_dev *dev) +@@ -1030,8 +1060,7 @@ iavf_dev_stop(struct rte_eth_dev *dev) iavf_add_del_mc_addr_list(adapter, vf->mc_addrs, vf->mc_addrs_num, false); @@ -51807,7 +58310,7 @@ index 377d7bc7a6..8f4c6de0ac 100644 adapter->stopped = 1; dev->data->dev_started = 0; -@@ -1046,6 +1076,9 @@ iavf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) +@@ -1046,6 +1075,9 @@ iavf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); struct iavf_info *vf = &adapter->vf; @@ -51817,7 +58320,7 @@ index 377d7bc7a6..8f4c6de0ac 100644 dev_info->max_rx_queues = IAVF_MAX_NUM_QUEUES_LV; dev_info->max_tx_queues = IAVF_MAX_NUM_QUEUES_LV; dev_info->min_rx_bufsize = IAVF_BUF_SIZE_MIN; -@@ -1066,6 +1099,7 @@ iavf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) +@@ -1066,6 +1098,7 @@ iavf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM | RTE_ETH_RX_OFFLOAD_SCATTER | RTE_ETH_RX_OFFLOAD_VLAN_FILTER | @@ -51825,7 +58328,7 @@ index 377d7bc7a6..8f4c6de0ac 100644 RTE_ETH_RX_OFFLOAD_RSS_HASH; dev_info->tx_offload_capa = -@@ -1114,6 +1148,8 @@ iavf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) +@@ -1114,6 +1147,8 @@ iavf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) .nb_max = IAVF_MAX_RING_DESC, .nb_min = IAVF_MIN_RING_DESC, .nb_align = IAVF_ALIGN_RING_DESC, @@ -51834,7 +58337,7 @@ index 377d7bc7a6..8f4c6de0ac 100644 }; return 0; -@@ -1284,8 +1320,12 @@ iavf_dev_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) +@@ -1284,8 +1319,12 @@ iavf_dev_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) struct iavf_adapter *adapter = IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter); @@ -51847,7 +58350,7 @@ index 377d7bc7a6..8f4c6de0ac 100644 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN_V2) { err = iavf_add_del_vlan_v2(adapter, vlan_id, on); if (err) -@@ -1299,6 +1339,23 @@ iavf_dev_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) +@@ -1299,6 +1338,23 @@ iavf_dev_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) err = iavf_add_del_vlan(adapter, vlan_id, on); if (err) return -EIO; @@ -51871,7 +58374,7 @@ index 377d7bc7a6..8f4c6de0ac 100644 return 0; } -@@ -1362,6 +1419,9 @@ iavf_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask) +@@ -1362,6 +1418,9 @@ iavf_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask) struct rte_eth_conf *dev_conf = &dev->data->dev_conf; int err; @@ -51881,7 +58384,7 @@ index 377d7bc7a6..8f4c6de0ac 100644 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN_V2) return iavf_dev_vlan_offload_set_v2(dev, mask); -@@ -1394,6 +1454,9 @@ iavf_dev_rss_reta_update(struct rte_eth_dev *dev, +@@ -1394,6 +1453,9 @@ iavf_dev_rss_reta_update(struct rte_eth_dev *dev, uint16_t i, idx, shift; int ret; @@ -51891,7 +58394,7 @@ index 377d7bc7a6..8f4c6de0ac 100644 if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF)) return -ENOTSUP; -@@ -1420,7 +1483,7 @@ iavf_dev_rss_reta_update(struct rte_eth_dev *dev, +@@ -1420,7 +1482,7 @@ iavf_dev_rss_reta_update(struct rte_eth_dev *dev, } rte_memcpy(vf->rss_lut, lut, reta_size); @@ -51900,7 +58403,7 @@ index 377d7bc7a6..8f4c6de0ac 100644 ret = iavf_configure_rss_lut(adapter); if (ret) /* revert back */ rte_memcpy(vf->rss_lut, lut, reta_size); -@@ -1439,6 +1502,9 @@ iavf_dev_rss_reta_query(struct rte_eth_dev *dev, +@@ -1439,6 +1501,9 @@ iavf_dev_rss_reta_query(struct rte_eth_dev *dev, struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter); uint16_t i, idx, shift; @@ -51910,7 +58413,7 @@ index 377d7bc7a6..8f4c6de0ac 100644 if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF)) return -ENOTSUP; -@@ -1492,6 +1558,9 @@ iavf_dev_rss_hash_update(struct rte_eth_dev *dev, +@@ -1492,6 +1557,9 @@ iavf_dev_rss_hash_update(struct rte_eth_dev *dev, adapter->dev_data->dev_conf.rx_adv_conf.rss_conf = *rss_conf; @@ -51920,7 +58423,7 @@ index 377d7bc7a6..8f4c6de0ac 100644 if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF)) return -ENOTSUP; -@@ -1545,6 +1614,9 @@ iavf_dev_rss_hash_conf_get(struct rte_eth_dev *dev, +@@ -1545,6 +1613,9 @@ iavf_dev_rss_hash_conf_get(struct rte_eth_dev *dev, IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter); @@ -51930,7 +58433,7 @@ index 377d7bc7a6..8f4c6de0ac 100644 if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF)) return -ENOTSUP; -@@ -1792,6 +1864,9 @@ iavf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) +@@ -1792,6 +1863,9 @@ iavf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter); uint16_t msix_intr; @@ -51940,7 +58443,7 @@ index 377d7bc7a6..8f4c6de0ac 100644 msix_intr = rte_intr_vec_list_index_get(pci_dev->intr_handle, queue_id); if (msix_intr == IAVF_MISC_VEC_ID) { -@@ -1833,7 +1908,7 @@ iavf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id) +@@ -1833,7 +1907,7 @@ iavf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id) IAVF_WRITE_REG(hw, IAVF_VFINT_DYN_CTLN1(msix_intr - IAVF_RX_VEC_START), @@ -51949,7 +58452,7 @@ index 377d7bc7a6..8f4c6de0ac 100644 IAVF_WRITE_FLUSH(hw); return 0; -@@ -2412,8 +2487,11 @@ static int +@@ -2412,8 +2486,11 @@ static int iavf_dev_flow_ops_get(struct rte_eth_dev *dev, const struct rte_flow_ops **ops) { @@ -51963,7 +58466,7 @@ index 377d7bc7a6..8f4c6de0ac 100644 *ops = &iavf_flow_ops; return 0; -@@ -2539,26 +2617,43 @@ iavf_dev_init(struct rte_eth_dev *eth_dev) +@@ -2539,26 +2616,43 @@ iavf_dev_init(struct rte_eth_dev *eth_dev) ret = iavf_security_ctx_create(adapter); if (ret) { PMD_INIT_LOG(ERR, "failed to create ipsec crypto security instance"); @@ -52010,7 +58513,7 @@ index 377d7bc7a6..8f4c6de0ac 100644 rte_free(eth_dev->data->mac_addrs); eth_dev->data->mac_addrs = NULL; -@@ -2582,7 +2677,16 @@ iavf_dev_close(struct rte_eth_dev *dev) +@@ -2582,7 +2676,16 @@ iavf_dev_close(struct rte_eth_dev *dev) if (rte_eal_process_type() != RTE_PROC_PRIMARY) return 0; @@ -52027,7 +58530,7 @@ index 377d7bc7a6..8f4c6de0ac 100644 iavf_flow_flush(dev, NULL); iavf_flow_uninit(adapter); -@@ -2595,6 +2699,18 @@ iavf_dev_close(struct rte_eth_dev *dev) +@@ -2595,6 +2698,18 @@ iavf_dev_close(struct rte_eth_dev *dev) if (vf->promisc_unicast_enabled || vf->promisc_multicast_enabled) iavf_config_promisc(adapter, false, false); @@ -52046,7 +58549,7 @@ index 377d7bc7a6..8f4c6de0ac 100644 iavf_shutdown_adminq(hw); if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) { /* disable uio intr before callback unregister */ -@@ -2636,6 +2752,7 @@ iavf_dev_close(struct rte_eth_dev *dev) +@@ -2636,6 +2751,7 @@ iavf_dev_close(struct rte_eth_dev *dev) * the bus master bit will not be disabled, and this call will have no * effect. */ @@ -52162,7 +58665,7 @@ index 2befa125ac..01e7b8724d 100644 } diff --git a/dpdk/drivers/net/iavf/iavf_ipsec_crypto.c b/dpdk/drivers/net/iavf/iavf_ipsec_crypto.c -index 884169e061..9ab09778e2 100644 +index 884169e061..1fbffac6bc 100644 --- a/dpdk/drivers/net/iavf/iavf_ipsec_crypto.c +++ b/dpdk/drivers/net/iavf/iavf_ipsec_crypto.c @@ -69,7 +69,7 @@ struct iavf_security_session { @@ -52325,7 +58828,20 @@ index 884169e061..9ab09778e2 100644 */ for (i = 0; i < ((sizeof(iavf_security_capabilities) / sizeof(iavf_security_capabilities[0])) - 1); i++) { -@@ -1545,29 +1548,90 @@ iavf_security_ctx_destroy(struct iavf_adapter *adapter) +@@ -1494,8 +1497,11 @@ iavf_security_ctx_create(struct iavf_adapter *adapter) + if (adapter->security_ctx == NULL) { + adapter->security_ctx = rte_malloc("iavf_security_ctx", + sizeof(struct iavf_security_ctx), 0); +- if (adapter->security_ctx == NULL) ++ if (adapter->security_ctx == NULL) { ++ rte_free(adapter->vf.eth_dev->security_ctx); ++ adapter->vf.eth_dev->security_ctx = NULL; + return -ENOMEM; ++ } + } + + return 0; +@@ -1545,29 +1551,90 @@ iavf_security_ctx_destroy(struct iavf_adapter *adapter) if (iavf_sctx == NULL) return -ENODEV; @@ -52423,7 +58939,7 @@ index 884169e061..9ab09778e2 100644 } #define IAVF_IPSEC_INSET_ESP (\ -@@ -1623,6 +1687,7 @@ struct iavf_ipsec_flow_item { +@@ -1623,6 +1690,7 @@ struct iavf_ipsec_flow_item { struct rte_ipv6_hdr ipv6_hdr; }; struct rte_udp_hdr udp_hdr; @@ -52431,7 +58947,7 @@ index 884169e061..9ab09778e2 100644 }; static void -@@ -1735,6 +1800,7 @@ iavf_ipsec_flow_item_parse(struct rte_eth_dev *ethdev, +@@ -1735,6 +1803,7 @@ iavf_ipsec_flow_item_parse(struct rte_eth_dev *ethdev, parse_udp_item((const struct rte_flow_item_udp *) pattern[2].spec, &ipsec_flow->udp_hdr); @@ -52439,7 +58955,7 @@ index 884169e061..9ab09778e2 100644 ipsec_flow->spi = ((const struct rte_flow_item_esp *) pattern[3].spec)->hdr.spi; -@@ -1790,6 +1856,7 @@ iavf_ipsec_flow_create(struct iavf_adapter *ad, +@@ -1790,6 +1859,7 @@ iavf_ipsec_flow_create(struct iavf_adapter *ad, struct rte_flow_error *error) { struct iavf_ipsec_flow_item *ipsec_flow = meta; @@ -52447,7 +58963,7 @@ index 884169e061..9ab09778e2 100644 if (!ipsec_flow) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE, NULL, -@@ -1798,30 +1865,33 @@ iavf_ipsec_flow_create(struct iavf_adapter *ad, +@@ -1798,30 +1868,33 @@ iavf_ipsec_flow_create(struct iavf_adapter *ad, } if (ipsec_flow->is_ipv4) { @@ -53243,10 +59759,96 @@ index b610176b30..c428082080 100644 static inline void iavf_dump_rx_descriptor(struct iavf_rx_queue *rxq, diff --git a/dpdk/drivers/net/iavf/iavf_rxtx_vec_avx2.c b/dpdk/drivers/net/iavf/iavf_rxtx_vec_avx2.c -index b6ef1aea77..2479c18210 100644 +index b6ef1aea77..a93e1d7e2a 100644 --- a/dpdk/drivers/net/iavf/iavf_rxtx_vec_avx2.c +++ b/dpdk/drivers/net/iavf/iavf_rxtx_vec_avx2.c -@@ -622,43 +622,88 @@ _iavf_recv_raw_pkts_vec_avx2_flex_rxd(struct iavf_rx_queue *rxq, +@@ -192,62 +192,30 @@ _iavf_recv_raw_pkts_vec_avx2(struct iavf_rx_queue *rxq, + _mm256_loadu_si256((void *)&sw_ring[i + 4])); + #endif + +- __m256i raw_desc0_1, raw_desc2_3, raw_desc4_5, raw_desc6_7; +-#ifdef RTE_LIBRTE_IAVF_16BYTE_RX_DESC +- /* for AVX we need alignment otherwise loads are not atomic */ +- if (avx_aligned) { +- /* load in descriptors, 2 at a time, in reverse order */ +- raw_desc6_7 = _mm256_load_si256((void *)(rxdp + 6)); +- rte_compiler_barrier(); +- raw_desc4_5 = _mm256_load_si256((void *)(rxdp + 4)); +- rte_compiler_barrier(); +- raw_desc2_3 = _mm256_load_si256((void *)(rxdp + 2)); +- rte_compiler_barrier(); +- raw_desc0_1 = _mm256_load_si256((void *)(rxdp + 0)); +- } else +-#endif +- { +- const __m128i raw_desc7 = +- _mm_load_si128((void *)(rxdp + 7)); +- rte_compiler_barrier(); +- const __m128i raw_desc6 = +- _mm_load_si128((void *)(rxdp + 6)); +- rte_compiler_barrier(); +- const __m128i raw_desc5 = +- _mm_load_si128((void *)(rxdp + 5)); +- rte_compiler_barrier(); +- const __m128i raw_desc4 = +- _mm_load_si128((void *)(rxdp + 4)); +- rte_compiler_barrier(); +- const __m128i raw_desc3 = +- _mm_load_si128((void *)(rxdp + 3)); +- rte_compiler_barrier(); +- const __m128i raw_desc2 = +- _mm_load_si128((void *)(rxdp + 2)); +- rte_compiler_barrier(); +- const __m128i raw_desc1 = +- _mm_load_si128((void *)(rxdp + 1)); +- rte_compiler_barrier(); +- const __m128i raw_desc0 = +- _mm_load_si128((void *)(rxdp + 0)); ++ const __m128i raw_desc7 = _mm_load_si128((void *)(rxdp + 7)); ++ rte_compiler_barrier(); ++ const __m128i raw_desc6 = _mm_load_si128((void *)(rxdp + 6)); ++ rte_compiler_barrier(); ++ const __m128i raw_desc5 = _mm_load_si128((void *)(rxdp + 5)); ++ rte_compiler_barrier(); ++ const __m128i raw_desc4 = _mm_load_si128((void *)(rxdp + 4)); ++ rte_compiler_barrier(); ++ const __m128i raw_desc3 = _mm_load_si128((void *)(rxdp + 3)); ++ rte_compiler_barrier(); ++ const __m128i raw_desc2 = _mm_load_si128((void *)(rxdp + 2)); ++ rte_compiler_barrier(); ++ const __m128i raw_desc1 = _mm_load_si128((void *)(rxdp + 1)); ++ rte_compiler_barrier(); ++ const __m128i raw_desc0 = _mm_load_si128((void *)(rxdp + 0)); + +- raw_desc6_7 = +- _mm256_inserti128_si256 +- (_mm256_castsi128_si256(raw_desc6), +- raw_desc7, 1); +- raw_desc4_5 = +- _mm256_inserti128_si256 +- (_mm256_castsi128_si256(raw_desc4), +- raw_desc5, 1); +- raw_desc2_3 = +- _mm256_inserti128_si256 +- (_mm256_castsi128_si256(raw_desc2), +- raw_desc3, 1); +- raw_desc0_1 = +- _mm256_inserti128_si256 +- (_mm256_castsi128_si256(raw_desc0), +- raw_desc1, 1); +- } ++ const __m256i raw_desc6_7 = ++ _mm256_inserti128_si256(_mm256_castsi128_si256(raw_desc6), raw_desc7, 1); ++ const __m256i raw_desc4_5 = ++ _mm256_inserti128_si256(_mm256_castsi128_si256(raw_desc4), raw_desc5, 1); ++ const __m256i raw_desc2_3 = ++ _mm256_inserti128_si256(_mm256_castsi128_si256(raw_desc2), raw_desc3, 1); ++ const __m256i raw_desc0_1 = ++ _mm256_inserti128_si256(_mm256_castsi128_si256(raw_desc0), raw_desc1, 1); + + if (split_packet) { + int j; +@@ -622,43 +590,88 @@ _iavf_recv_raw_pkts_vec_avx2_flex_rxd(struct iavf_rx_queue *rxq, * bit13 is for VLAN indication. */ const __m256i flags_mask = @@ -53367,7 +59969,7 @@ index b6ef1aea77..2479c18210 100644 /** * data to be shuffled by result of flag mask, shifted down 12. * If RSS(bit12)/VLAN(bit13) are set, -@@ -836,6 +881,15 @@ _iavf_recv_raw_pkts_vec_avx2_flex_rxd(struct iavf_rx_queue *rxq, +@@ -836,6 +849,15 @@ _iavf_recv_raw_pkts_vec_avx2_flex_rxd(struct iavf_rx_queue *rxq, __m256i l3_l4_flags = _mm256_shuffle_epi8(l3_l4_flags_shuf, _mm256_srli_epi32(flag_bits, 4)); l3_l4_flags = _mm256_slli_epi32(l3_l4_flags, 1); @@ -53383,7 +59985,7 @@ index b6ef1aea77..2479c18210 100644 l3_l4_flags = _mm256_and_si256(l3_l4_flags, cksum_mask); /* set rss and vlan flags */ -@@ -1020,7 +1074,10 @@ _iavf_recv_raw_pkts_vec_avx2_flex_rxd(struct iavf_rx_queue *rxq, +@@ -1020,7 +1042,10 @@ _iavf_recv_raw_pkts_vec_avx2_flex_rxd(struct iavf_rx_queue *rxq, _mm256_set_epi8(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, @@ -54362,6 +60964,38 @@ index 145b059837..930a67f517 100644 if (err) { PMD_DRV_LOG(ERR, "fail to execute command %s", "OP_INLINE_IPSEC_CRYPTO"); +diff --git a/dpdk/drivers/net/ice/base/ice_adminq_cmd.h b/dpdk/drivers/net/ice/base/ice_adminq_cmd.h +index 253b971dfd..2bf322610b 100644 +--- a/dpdk/drivers/net/ice/base/ice_adminq_cmd.h ++++ b/dpdk/drivers/net/ice/base/ice_adminq_cmd.h +@@ -1654,8 +1654,8 @@ struct ice_aqc_link_topo_addr { + #define ICE_AQC_LINK_TOPO_HANDLE_M (0x3FF << ICE_AQC_LINK_TOPO_HANDLE_S) + /* Used to decode the handle field */ + #define ICE_AQC_LINK_TOPO_HANDLE_BRD_TYPE_M BIT(9) +-#define ICE_AQC_LINK_TOPO_HANDLE_BRD_TYPE_LOM BIT(9) +-#define ICE_AQC_LINK_TOPO_HANDLE_BRD_TYPE_MEZZ 0 ++#define ICE_AQC_LINK_TOPO_HANDLE_BRD_TYPE_LOM 0 ++#define ICE_AQC_LINK_TOPO_HANDLE_BRD_TYPE_MEZZ BIT(9) + #define ICE_AQC_LINK_TOPO_HANDLE_NODE_S 0 + /* In case of a Mezzanine type */ + #define ICE_AQC_LINK_TOPO_HANDLE_MEZZ_NODE_M \ +diff --git a/dpdk/drivers/net/ice/base/ice_bitops.h b/dpdk/drivers/net/ice/base/ice_bitops.h +index 21ec2014e1..7e61fec5ef 100644 +--- a/dpdk/drivers/net/ice/base/ice_bitops.h ++++ b/dpdk/drivers/net/ice/base/ice_bitops.h +@@ -408,10 +408,10 @@ ice_bitmap_set(ice_bitmap_t *dst, u16 pos, u16 num_bits) + * Note that this function assumes it is operating on a bitmap declared using + * ice_declare_bitmap. + */ +-static inline int ++static inline u16 + ice_bitmap_hweight(ice_bitmap_t *bm, u16 size) + { +- int count = 0; ++ u16 count = 0; + u16 bit = 0; + + while (size > (bit = ice_find_next_bit(bm, size, bit))) { diff --git a/dpdk/drivers/net/ice/base/ice_bst_tcam.c b/dpdk/drivers/net/ice/base/ice_bst_tcam.c index 306f62db2a..74a2de869e 100644 --- a/dpdk/drivers/net/ice/base/ice_bst_tcam.c @@ -54394,7 +61028,7 @@ index 306f62db2a..74a2de869e 100644 struct ice_lbl_item *ice_bst_lbl_table_get(struct ice_hw *hw) { diff --git a/dpdk/drivers/net/ice/base/ice_common.c b/dpdk/drivers/net/ice/base/ice_common.c -index ae55bebaa2..c6fc32fbc6 100644 +index ae55bebaa2..49bbf3b461 100644 --- a/dpdk/drivers/net/ice/base/ice_common.c +++ b/dpdk/drivers/net/ice/base/ice_common.c @@ -475,7 +475,6 @@ static enum ice_media_type ice_get_media_type(struct ice_port_info *pi) @@ -54436,6 +61070,42 @@ index ae55bebaa2..c6fc32fbc6 100644 ice_memcpy(&cmd->topo_params, topo_params, sizeof(*topo_params), ICE_NONDMA_TO_NONDMA); cmd->start_address = CPU_TO_LE32(start_address); +@@ -4676,7 +4674,7 @@ ice_read_byte(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info) + + ice_memcpy(&dest_byte, src, sizeof(dest_byte), ICE_DMA_TO_NONDMA); + +- dest_byte &= ~(mask); ++ dest_byte &= mask; + + dest_byte >>= shift_width; + +@@ -4716,7 +4714,7 @@ ice_read_word(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info) + /* the data in the memory is stored as little endian so mask it + * correctly + */ +- src_word &= ~(CPU_TO_LE16(mask)); ++ src_word &= CPU_TO_LE16(mask); + + /* get the data back into host order before shifting */ + dest_word = LE16_TO_CPU(src_word); +@@ -4767,7 +4765,7 @@ ice_read_dword(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info) + /* the data in the memory is stored as little endian so mask it + * correctly + */ +- src_dword &= ~(CPU_TO_LE32(mask)); ++ src_dword &= CPU_TO_LE32(mask); + + /* get the data back into host order before shifting */ + dest_dword = LE32_TO_CPU(src_dword); +@@ -4818,7 +4816,7 @@ ice_read_qword(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info) + /* the data in the memory is stored as little endian so mask it + * correctly + */ +- src_qword &= ~(CPU_TO_LE64(mask)); ++ src_qword &= CPU_TO_LE64(mask); + + /* get the data back into host order before shifting */ + dest_qword = LE64_TO_CPU(src_qword); @@ -5588,7 +5586,7 @@ ice_aq_set_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx, bool value, ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_gpio); @@ -54468,7 +61138,7 @@ index cb6c5ba182..3d630757f8 100644 /** diff --git a/dpdk/drivers/net/ice/base/ice_flex_pipe.c b/dpdk/drivers/net/ice/base/ice_flex_pipe.c -index 395787806b..3918169001 100644 +index 395787806b..d1a1503631 100644 --- a/dpdk/drivers/net/ice/base/ice_flex_pipe.c +++ b/dpdk/drivers/net/ice/base/ice_flex_pipe.c @@ -1785,8 +1785,12 @@ static enum ice_prof_type @@ -54514,6 +61184,26 @@ index 395787806b..3918169001 100644 u8 *prot, u16 *off) { struct ice_fv_word *fv_ext; +@@ -3369,16 +3368,14 @@ ice_write_prof_mask_reg(struct ice_hw *hw, enum ice_block blk, u16 mask_idx, + switch (blk) { + case ICE_BLK_RSS: + offset = GLQF_HMASK(mask_idx); +- val = (idx << GLQF_HMASK_MSK_INDEX_S) & +- GLQF_HMASK_MSK_INDEX_M; +- val |= (mask << GLQF_HMASK_MASK_S) & GLQF_HMASK_MASK_M; ++ val = (idx << GLQF_HMASK_MSK_INDEX_S) & GLQF_HMASK_MSK_INDEX_M; ++ val |= ((u32)mask << GLQF_HMASK_MASK_S) & GLQF_HMASK_MASK_M; + break; + case ICE_BLK_FD: + offset = GLQF_FDMASK(mask_idx); + val = (idx << GLQF_FDMASK_MSK_INDEX_S) & + GLQF_FDMASK_MSK_INDEX_M; +- val |= (mask << GLQF_FDMASK_MASK_S) & +- GLQF_FDMASK_MASK_M; ++ val |= ((u32)mask << GLQF_FDMASK_MASK_S) & GLQF_FDMASK_MASK_M; + break; + default: + ice_debug(hw, ICE_DBG_PKG, "No profile masks for block %d\n", diff --git a/dpdk/drivers/net/ice/base/ice_flex_pipe.h b/dpdk/drivers/net/ice/base/ice_flex_pipe.h index 23ba45564a..ab897de4f3 100644 --- a/dpdk/drivers/net/ice/base/ice_flex_pipe.h @@ -54638,6 +61328,24 @@ index 2136e0393b..9a76d21ce5 100644 */ struct ice_imem_item *ice_imem_table_get(struct ice_hw *hw) { +diff --git a/dpdk/drivers/net/ice/base/ice_lan_tx_rx.h b/dpdk/drivers/net/ice/base/ice_lan_tx_rx.h +index 2b6f039dcb..88cc721e9a 100644 +--- a/dpdk/drivers/net/ice/base/ice_lan_tx_rx.h ++++ b/dpdk/drivers/net/ice/base/ice_lan_tx_rx.h +@@ -1074,10 +1074,9 @@ struct ice_tx_ctx_desc { + __le64 qw1; + }; + +-#define ICE_TX_GSC_DESC_START 0 /* 7 BITS */ +-#define ICE_TX_GSC_DESC_OFFSET 7 /* 4 BITS */ +-#define ICE_TX_GSC_DESC_TYPE 11 /* 2 BITS */ +-#define ICE_TX_GSC_DESC_ENA 13 /* 1 BIT */ ++#define ICE_TX_GCS_DESC_START 0 /* 8 BITS */ ++#define ICE_TX_GCS_DESC_OFFSET 8 /* 4 BITS */ ++#define ICE_TX_GCS_DESC_TYPE 12 /* 3 BITS */ + + #define ICE_TXD_CTX_QW1_DTYPE_S 0 + #define ICE_TXD_CTX_QW1_DTYPE_M (0xFUL << ICE_TXD_CTX_QW1_DTYPE_S) diff --git a/dpdk/drivers/net/ice/base/ice_metainit.c b/dpdk/drivers/net/ice/base/ice_metainit.c index 3f9e5d6833..a899125b37 100644 --- a/dpdk/drivers/net/ice/base/ice_metainit.c @@ -54682,6 +61390,92 @@ index 4e9ab5c13a..814001c49e 100644 */ struct ice_mk_grp_item *ice_mk_grp_table_get(struct ice_hw *hw) { +diff --git a/dpdk/drivers/net/ice/base/ice_nvm.c b/dpdk/drivers/net/ice/base/ice_nvm.c +index 7860006206..48e0d418e2 100644 +--- a/dpdk/drivers/net/ice/base/ice_nvm.c ++++ b/dpdk/drivers/net/ice/base/ice_nvm.c +@@ -72,6 +72,7 @@ ice_read_flat_nvm(struct ice_hw *hw, u32 offset, u32 *length, u8 *data, + enum ice_status status; + u32 inlen = *length; + u32 bytes_read = 0; ++ int retry_cnt = 0; + bool last_cmd; + + ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); +@@ -106,11 +107,24 @@ ice_read_flat_nvm(struct ice_hw *hw, u32 offset, u32 *length, u8 *data, + offset, (u16)read_size, + data + bytes_read, last_cmd, + read_shadow_ram, NULL); +- if (status) +- break; +- +- bytes_read += read_size; +- offset += read_size; ++ if (status) { ++ if (hw->adminq.sq_last_status != ICE_AQ_RC_EBUSY || ++ retry_cnt > ICE_SQ_SEND_MAX_EXECUTE) ++ break; ++ ice_debug(hw, ICE_DBG_NVM, ++ "NVM read EBUSY error, retry %d\n", ++ retry_cnt + 1); ++ ice_release_nvm(hw); ++ msleep(ICE_SQ_SEND_DELAY_TIME_MS); ++ status = ice_acquire_nvm(hw, ICE_RES_READ); ++ if (status) ++ break; ++ retry_cnt++; ++ } else { ++ bytes_read += read_size; ++ offset += read_size; ++ retry_cnt = 0; ++ } + } while (!last_cmd); + + *length = bytes_read; +@@ -429,7 +443,7 @@ ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len, + { + enum ice_status status; + u16 pfa_len, pfa_ptr; +- u16 next_tlv; ++ u32 next_tlv; + + status = ice_read_sr_word(hw, ICE_SR_PFA_PTR, &pfa_ptr); + if (status != ICE_SUCCESS) { +@@ -445,25 +459,30 @@ ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len, + * of TLVs to find the requested one. + */ + next_tlv = pfa_ptr + 1; +- while (next_tlv < pfa_ptr + pfa_len) { ++ while (next_tlv < ((u32)pfa_ptr + pfa_len)) { + u16 tlv_sub_module_type; + u16 tlv_len; + + /* Read TLV type */ +- status = ice_read_sr_word(hw, next_tlv, &tlv_sub_module_type); ++ status = ice_read_sr_word(hw, (u16)next_tlv, ++ &tlv_sub_module_type); + if (status != ICE_SUCCESS) { + ice_debug(hw, ICE_DBG_INIT, "Failed to read TLV type.\n"); + break; + } + /* Read TLV length */ +- status = ice_read_sr_word(hw, next_tlv + 1, &tlv_len); ++ status = ice_read_sr_word(hw, (u16)(next_tlv + 1), &tlv_len); + if (status != ICE_SUCCESS) { + ice_debug(hw, ICE_DBG_INIT, "Failed to read TLV length.\n"); + break; + } ++ if (tlv_len > pfa_len) { ++ ice_debug(hw, ICE_DBG_INIT, "Invalid TLV length.\n"); ++ return ICE_ERR_INVAL_SIZE; ++ } + if (tlv_sub_module_type == module_type) { + if (tlv_len) { +- *module_tlv = next_tlv; ++ *module_tlv = (u16)next_tlv; + *module_tlv_len = tlv_len; + return ICE_SUCCESS; + } diff --git a/dpdk/drivers/net/ice/base/ice_parser.c b/dpdk/drivers/net/ice/base/ice_parser.c index 9b106baff0..4d490dda7b 100644 --- a/dpdk/drivers/net/ice/base/ice_parser.c @@ -55029,10 +61823,46 @@ index 97c41cb586..9807e688b1 100644 struct ice_ptype_mk_tcam_item *ice_ptype_mk_tcam_table_get(struct ice_hw *hw) { diff --git a/dpdk/drivers/net/ice/base/ice_sched.c b/dpdk/drivers/net/ice/base/ice_sched.c -index 2620892c9e..f3655a820f 100644 +index 2620892c9e..2944a6fd48 100644 --- a/dpdk/drivers/net/ice/base/ice_sched.c +++ b/dpdk/drivers/net/ice/base/ice_sched.c -@@ -1394,11 +1394,6 @@ void ice_sched_get_psm_clk_freq(struct ice_hw *hw) +@@ -28,9 +28,8 @@ ice_sched_add_root_node(struct ice_port_info *pi, + if (!root) + return ICE_ERR_NO_MEMORY; + +- /* coverity[suspicious_sizeof] */ + root->children = (struct ice_sched_node **) +- ice_calloc(hw, hw->max_children[0], sizeof(*root)); ++ ice_calloc(hw, hw->max_children[0], sizeof(*root->children)); + if (!root->children) { + ice_free(hw, root); + return ICE_ERR_NO_MEMORY; +@@ -180,9 +179,9 @@ ice_sched_add_node(struct ice_port_info *pi, u8 layer, + if (!node) + return ICE_ERR_NO_MEMORY; + if (hw->max_children[layer]) { +- /* coverity[suspicious_sizeof] */ + node->children = (struct ice_sched_node **) +- ice_calloc(hw, hw->max_children[layer], sizeof(*node)); ++ ice_calloc(hw, hw->max_children[layer], ++ sizeof(*node->children)); + if (!node->children) { + ice_free(hw, node); + return ICE_ERR_NO_MEMORY; +@@ -1030,11 +1029,11 @@ ice_sched_add_nodes_to_layer(struct ice_port_info *pi, + u32 *first_teid_ptr = first_node_teid; + u16 new_num_nodes = num_nodes; + enum ice_status status = ICE_SUCCESS; ++ u32 temp; + + *num_nodes_added = 0; + while (*num_nodes_added < num_nodes) { + u16 max_child_nodes, num_added = 0; +- u32 temp; + + status = ice_sched_add_nodes_to_hw_layer(pi, tc_node, parent, + layer, new_num_nodes, +@@ -1394,11 +1393,6 @@ void ice_sched_get_psm_clk_freq(struct ice_hw *hw) clk_src = (val & GLGEN_CLKSTAT_SRC_PSM_CLK_SRC_M) >> GLGEN_CLKSTAT_SRC_PSM_CLK_SRC_S; @@ -55044,7 +61874,7 @@ index 2620892c9e..f3655a820f 100644 switch (clk_src) { case PSM_CLK_SRC_367_MHZ: hw->psm_clk_freq = ICE_PSM_CLK_367MHZ_IN_HZ; -@@ -1412,11 +1407,12 @@ void ice_sched_get_psm_clk_freq(struct ice_hw *hw) +@@ -1412,11 +1406,12 @@ void ice_sched_get_psm_clk_freq(struct ice_hw *hw) case PSM_CLK_SRC_390_MHZ: hw->psm_clk_freq = ICE_PSM_CLK_390MHZ_IN_HZ; break; @@ -55062,7 +61892,7 @@ index 2620892c9e..f3655a820f 100644 } } -@@ -3830,8 +3826,8 @@ static u16 ice_sched_calc_wakeup(struct ice_hw *hw, s32 bw) +@@ -3830,8 +3825,8 @@ static u16 ice_sched_calc_wakeup(struct ice_hw *hw, s32 bw) u16 wakeup = 0; /* Get the wakeup integer value */ @@ -55073,7 +61903,7 @@ index 2620892c9e..f3655a820f 100644 if (wakeup_int > 63) { wakeup = (u16)((1 << 15) | wakeup_int); } else { -@@ -3839,18 +3835,18 @@ static u16 ice_sched_calc_wakeup(struct ice_hw *hw, s32 bw) +@@ -3839,18 +3834,18 @@ static u16 ice_sched_calc_wakeup(struct ice_hw *hw, s32 bw) * Convert Integer value to a constant multiplier */ wakeup_b = (s64)ICE_RL_PROF_MULTIPLIER * wakeup_int; @@ -55097,7 +61927,7 @@ index 2620892c9e..f3655a820f 100644 wakeup |= (u16)(wakeup_int << 9); wakeup |= (u16)(0x1ff & wakeup_f_int); } -@@ -3882,20 +3878,20 @@ ice_sched_bw_to_rl_profile(struct ice_hw *hw, u32 bw, +@@ -3882,20 +3877,20 @@ ice_sched_bw_to_rl_profile(struct ice_hw *hw, u32 bw, return status; /* Bytes per second from Kbps */ @@ -55123,7 +61953,7 @@ index 2620892c9e..f3655a820f 100644 /* Round to the nearest ICE_RL_PROF_MULTIPLIER */ mv = round_up_64bit(mv_tmp, ICE_RL_PROF_MULTIPLIER); -@@ -4774,12 +4770,12 @@ ice_sched_get_node_by_id_type(struct ice_port_info *pi, u32 id, +@@ -4774,12 +4769,12 @@ ice_sched_get_node_by_id_type(struct ice_port_info *pi, u32 id, case ICE_AGG_TYPE_Q: /* The current implementation allows single queue to modify */ @@ -55155,7 +61985,7 @@ index 1441b5f191..22ed09d2b1 100644 u32 bw; /* in Kbps */ u16 rl_multiplier; diff --git a/dpdk/drivers/net/ice/base/ice_switch.c b/dpdk/drivers/net/ice/base/ice_switch.c -index 1fee790c25..7c6a258255 100644 +index 1fee790c25..dd21ef4bd1 100644 --- a/dpdk/drivers/net/ice/base/ice_switch.c +++ b/dpdk/drivers/net/ice/base/ice_switch.c @@ -2303,7 +2303,7 @@ ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid, @@ -55167,6 +61997,24 @@ index 1fee790c25..7c6a258255 100644 vlan = true; fv_word_idx++; } +@@ -4334,7 +4334,7 @@ ice_add_update_vsi_list(struct ice_hw *hw, + u16 vsi_handle_arr[2]; + + /* A rule already exists with the new VSI being added */ +- if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id) ++ if (cur_fltr->vsi_handle == new_fltr->vsi_handle) + return ICE_ERR_ALREADY_EXISTS; + + vsi_handle_arr[0] = cur_fltr->vsi_handle; +@@ -4382,7 +4382,7 @@ ice_add_update_vsi_list(struct ice_hw *hw, + + /* A rule already exists with the new VSI being added */ + if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle)) +- return ICE_SUCCESS; ++ return ICE_ERR_ALREADY_EXISTS; + + /* Update the previously created VSI list set with + * the new VSI ID passed in @@ -4855,7 +4855,8 @@ ice_add_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list, if (!ice_is_vsi_valid(hw, vsi_handle)) return ICE_ERR_PARAM; @@ -55208,6 +62056,15 @@ index 1fee790c25..7c6a258255 100644 }; /** +@@ -7112,7 +7121,7 @@ ice_find_free_recp_res_idx(struct ice_hw *hw, const ice_bitmap_t *profiles, + ice_xor_bitmap(free_idx, used_idx, possible_idx, ICE_MAX_FV_WORDS); + + /* return number of free indexes */ +- return (u16)ice_bitmap_hweight(free_idx, ICE_MAX_FV_WORDS); ++ return ice_bitmap_hweight(free_idx, ICE_MAX_FV_WORDS); + } + + /** @@ -7320,7 +7329,6 @@ ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm, last_chain_entry->chain_idx = ICE_INVAL_CHAIN_IND; LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, @@ -55282,7 +62139,15 @@ index 1fee790c25..7c6a258255 100644 case ICE_SW_TUN_PROFID_IPV6_ESP: case ICE_SW_TUN_PROFID_IPV6_AH: case ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3: -@@ -7863,6 +7883,15 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, +@@ -7818,6 +7838,7 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, + enum ice_status status = ICE_SUCCESS; + struct ice_sw_recipe *rm; + u8 i; ++ u16 cnt; + + if (!ice_is_prof_rule(rinfo->tun_type) && !lkups_cnt) + return ICE_ERR_PARAM; +@@ -7863,6 +7884,15 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, */ ice_get_compat_fv_bitmap(hw, rinfo, fv_bitmap); @@ -55298,7 +62163,7 @@ index 1fee790c25..7c6a258255 100644 status = ice_get_fv(hw, lkups, lkups_cnt, fv_bitmap, &rm->fv_list); if (status) goto err_unroll; -@@ -8776,7 +8805,7 @@ ice_adv_add_update_vsi_list(struct ice_hw *hw, +@@ -8776,7 +8806,7 @@ ice_adv_add_update_vsi_list(struct ice_hw *hw, /* A rule already exists with the new VSI being added */ if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle)) @@ -55863,7 +62728,7 @@ index b9fcfc80ad..af281f069a 100644 + } +} diff --git a/dpdk/drivers/net/ice/ice_ethdev.c b/dpdk/drivers/net/ice/ice_ethdev.c -index 13a7a9702a..5e84894e5f 100644 +index 13a7a9702a..ef24a21239 100644 --- a/dpdk/drivers/net/ice/ice_ethdev.c +++ b/dpdk/drivers/net/ice/ice_ethdev.c @@ -1264,7 +1264,7 @@ ice_handle_aq_msg(struct rte_eth_dev *dev) @@ -55884,7 +62749,15 @@ index 13a7a9702a..5e84894e5f 100644 * Currently vsi->nb_qps means it. * Correct it if any change. */ -@@ -2316,6 +2316,9 @@ ice_dev_init(struct rte_eth_dev *dev) +@@ -1687,6 +1687,7 @@ ice_pf_setup(struct ice_pf *pf) + } + + pf->main_vsi = vsi; ++ rte_spinlock_init(&pf->link_lock); + + return 0; + } +@@ -2316,6 +2317,9 @@ ice_dev_init(struct rte_eth_dev *dev) pf->supported_rxdid = ice_get_supported_rxdid(hw); @@ -55894,7 +62767,7 @@ index 13a7a9702a..5e84894e5f 100644 return 0; err_flow_init: -@@ -2456,12 +2459,17 @@ ice_dev_close(struct rte_eth_dev *dev) +@@ -2456,12 +2460,17 @@ ice_dev_close(struct rte_eth_dev *dev) return 0; /* Since stop will make link down, then the link event will be @@ -55915,7 +62788,7 @@ index 13a7a9702a..5e84894e5f 100644 ret = ice_dev_stop(dev); if (!ad->is_safe_mode) -@@ -2493,10 +2501,6 @@ ice_dev_close(struct rte_eth_dev *dev) +@@ -2493,10 +2502,6 @@ ice_dev_close(struct rte_eth_dev *dev) /* disable uio intr before callback unregister */ rte_intr_disable(intr_handle); @@ -55926,7 +62799,7 @@ index 13a7a9702a..5e84894e5f 100644 return ret; } -@@ -3195,7 +3199,8 @@ static int ice_init_rss(struct ice_pf *pf) +@@ -3195,7 +3200,8 @@ static int ice_init_rss(struct ice_pf *pf) rss_conf = &dev_data->dev_conf.rx_adv_conf.rss_conf; nb_q = dev_data->nb_rx_queues; @@ -55936,7 +62809,7 @@ index 13a7a9702a..5e84894e5f 100644 vsi->rss_lut_size = pf->hash_lut_size; if (nb_q == 0) { -@@ -3235,7 +3240,11 @@ static int ice_init_rss(struct ice_pf *pf) +@@ -3235,7 +3241,11 @@ static int ice_init_rss(struct ice_pf *pf) RTE_MIN(rss_conf->rss_key_len, vsi->rss_key_size)); @@ -55949,7 +62822,42 @@ index 13a7a9702a..5e84894e5f 100644 ret = ice_aq_set_rss_key(hw, vsi->idx, &key); if (ret) goto out; -@@ -3486,6 +3495,8 @@ ice_get_init_link_status(struct rte_eth_dev *dev) +@@ -3467,17 +3477,31 @@ ice_rxq_intr_setup(struct rte_eth_dev *dev) + return 0; + } + ++static enum ice_status ++ice_get_link_info_safe(struct ice_pf *pf, bool ena_lse, ++ struct ice_link_status *link) ++{ ++ struct ice_hw *hw = ICE_PF_TO_HW(pf); ++ int ret; ++ ++ rte_spinlock_lock(&pf->link_lock); ++ ++ ret = ice_aq_get_link_info(hw->port_info, ena_lse, link, NULL); ++ ++ rte_spinlock_unlock(&pf->link_lock); ++ ++ return ret; ++} ++ + static void + ice_get_init_link_status(struct rte_eth_dev *dev) + { +- struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); + bool enable_lse = dev->data->dev_conf.intr_conf.lsc ? true : false; + struct ice_link_status link_status; + int ret; + +- ret = ice_aq_get_link_info(hw->port_info, enable_lse, +- &link_status, NULL); ++ ret = ice_get_link_info_safe(pf, enable_lse, &link_status); + if (ret != ICE_SUCCESS) { + PMD_DRV_LOG(ERR, "Failed to get link info"); + pf->init_link_up = false; +@@ -3486,6 +3510,8 @@ ice_get_init_link_status(struct rte_eth_dev *dev) if (link_status.link_info & ICE_AQ_LINK_UP) pf->init_link_up = true; @@ -55958,7 +62866,7 @@ index 13a7a9702a..5e84894e5f 100644 } static int -@@ -3556,6 +3567,16 @@ ice_dev_start(struct rte_eth_dev *dev) +@@ -3556,6 +3582,16 @@ ice_dev_start(struct rte_eth_dev *dev) } } @@ -55975,7 +62883,7 @@ index 13a7a9702a..5e84894e5f 100644 /* program Rx queues' context in hardware*/ for (nb_rxq = 0; nb_rxq < data->nb_rx_queues; nb_rxq++) { ret = ice_rx_queue_start(dev, nb_rxq); -@@ -3576,7 +3597,7 @@ ice_dev_start(struct rte_eth_dev *dev) +@@ -3576,7 +3612,7 @@ ice_dev_start(struct rte_eth_dev *dev) goto rx_err; } @@ -55984,7 +62892,7 @@ index 13a7a9702a..5e84894e5f 100644 if (ice_rxq_intr_setup(dev)) return -EIO; -@@ -3603,8 +3624,8 @@ ice_dev_start(struct rte_eth_dev *dev) +@@ -3603,8 +3639,8 @@ ice_dev_start(struct rte_eth_dev *dev) ice_dev_set_link_up(dev); @@ -55995,7 +62903,20 @@ index 13a7a9702a..5e84894e5f 100644 pf->adapter_stopped = false; -@@ -3751,6 +3772,8 @@ ice_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) +@@ -3709,7 +3745,11 @@ ice_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) + RTE_ETH_TX_OFFLOAD_TCP_CKSUM | + RTE_ETH_TX_OFFLOAD_SCTP_CKSUM | + RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM | +- RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM; ++ RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM | ++ RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO | ++ RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO | ++ RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO | ++ RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO; + dev_info->flow_type_rss_offloads |= ICE_RSS_OFFLOAD_ALL; + } + +@@ -3751,6 +3791,8 @@ ice_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) .nb_max = ICE_MAX_RING_DESC, .nb_min = ICE_MIN_RING_DESC, .nb_align = ICE_ALIGN_RING_DESC, @@ -56004,18 +62925,30 @@ index 13a7a9702a..5e84894e5f 100644 }; dev_info->speed_capa = RTE_ETH_LINK_SPEED_10M | -@@ -3816,8 +3839,8 @@ ice_atomic_write_link_status(struct rte_eth_dev *dev, +@@ -3816,9 +3858,9 @@ ice_atomic_write_link_status(struct rte_eth_dev *dev, static int ice_link_update(struct rte_eth_dev *dev, int wait_to_complete) { -#define CHECK_INTERVAL 100 /* 100ms */ -#define MAX_REPEAT_TIME 10 /* 1s (10 * 100ms) in total */ +- struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); +#define CHECK_INTERVAL 50 /* 50ms */ +#define MAX_REPEAT_TIME 40 /* 2s (40 * 50ms) in total */ - struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); ++ struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); struct ice_link_status link_status; struct rte_eth_link link, old; -@@ -4622,10 +4645,8 @@ ice_rss_hash_update(struct rte_eth_dev *dev, + int status; +@@ -3832,8 +3874,7 @@ ice_link_update(struct rte_eth_dev *dev, int wait_to_complete) + + do { + /* Get link status information from hardware */ +- status = ice_aq_get_link_info(hw->port_info, enable_lse, +- &link_status, NULL); ++ status = ice_get_link_info_safe(pf, enable_lse, &link_status); + if (status != ICE_SUCCESS) { + link.link_speed = RTE_ETH_SPEED_NUM_100M; + link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX; +@@ -4622,10 +4663,8 @@ ice_rss_hash_update(struct rte_eth_dev *dev, if (status) return status; @@ -56027,7 +62960,7 @@ index 13a7a9702a..5e84894e5f 100644 /* RSS hash configuration */ ice_rss_hash_set(pf, rss_conf->rss_hf); -@@ -5395,7 +5416,7 @@ ice_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, +@@ -5395,7 +5434,7 @@ ice_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, count++; } @@ -56036,7 +62969,7 @@ index 13a7a9702a..5e84894e5f 100644 for (i = 0; i < ICE_NB_HW_PORT_XSTATS; i++) { xstats[count].value = *(uint64_t *)((char *)hw_stats + -@@ -5426,7 +5447,7 @@ static int ice_xstats_get_names(__rte_unused struct rte_eth_dev *dev, +@@ -5426,7 +5465,7 @@ static int ice_xstats_get_names(__rte_unused struct rte_eth_dev *dev, count++; } @@ -56045,7 +62978,7 @@ index 13a7a9702a..5e84894e5f 100644 for (i = 0; i < ICE_NB_HW_PORT_XSTATS; i++) { strlcpy(xstats_names[count].name, ice_hw_port_strings[i].name, sizeof(xstats_names[count].name)); -@@ -5454,6 +5475,8 @@ ice_dev_udp_tunnel_port_add(struct rte_eth_dev *dev, +@@ -5454,6 +5493,8 @@ ice_dev_udp_tunnel_port_add(struct rte_eth_dev *dev, { int ret = 0; struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); @@ -56054,7 +62987,7 @@ index 13a7a9702a..5e84894e5f 100644 if (udp_tunnel == NULL) return -EINVAL; -@@ -5461,6 +5484,9 @@ ice_dev_udp_tunnel_port_add(struct rte_eth_dev *dev, +@@ -5461,6 +5502,9 @@ ice_dev_udp_tunnel_port_add(struct rte_eth_dev *dev, switch (udp_tunnel->prot_type) { case RTE_ETH_TUNNEL_TYPE_VXLAN: ret = ice_create_tunnel(hw, TNL_VXLAN, udp_tunnel->udp_port); @@ -56064,7 +62997,7 @@ index 13a7a9702a..5e84894e5f 100644 break; default: PMD_DRV_LOG(ERR, "Invalid tunnel type"); -@@ -5478,6 +5504,8 @@ ice_dev_udp_tunnel_port_del(struct rte_eth_dev *dev, +@@ -5478,6 +5522,8 @@ ice_dev_udp_tunnel_port_del(struct rte_eth_dev *dev, { int ret = 0; struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); @@ -56073,7 +63006,7 @@ index 13a7a9702a..5e84894e5f 100644 if (udp_tunnel == NULL) return -EINVAL; -@@ -5485,6 +5513,9 @@ ice_dev_udp_tunnel_port_del(struct rte_eth_dev *dev, +@@ -5485,6 +5531,9 @@ ice_dev_udp_tunnel_port_del(struct rte_eth_dev *dev, switch (udp_tunnel->prot_type) { case RTE_ETH_TUNNEL_TYPE_VXLAN: ret = ice_destroy_tunnel(hw, udp_tunnel->udp_port, 0); @@ -56084,10 +63017,21 @@ index 13a7a9702a..5e84894e5f 100644 default: PMD_DRV_LOG(ERR, "Invalid tunnel type"); diff --git a/dpdk/drivers/net/ice/ice_ethdev.h b/dpdk/drivers/net/ice/ice_ethdev.h -index 2e3e45f3d7..ac56c3cc60 100644 +index 2e3e45f3d7..6209575a9b 100644 --- a/dpdk/drivers/net/ice/ice_ethdev.h +++ b/dpdk/drivers/net/ice/ice_ethdev.h -@@ -531,6 +531,9 @@ struct ice_adapter { +@@ -472,6 +472,10 @@ struct ice_pf { + uint64_t old_tx_bytes; + uint64_t supported_rxdid; /* bitmap for supported RXDID */ + uint64_t rss_hf; ++ /* lock prevent race condition between lsc interrupt handler ++ * and link status update during dev_start. ++ */ ++ rte_spinlock_t link_lock; + }; + + #define ICE_MAX_QUEUE_NUM 2048 +@@ -531,6 +535,9 @@ struct ice_adapter { uint64_t time_hw; struct ice_fdir_prof_info fdir_prof_info[ICE_MAX_PTGS]; struct ice_rss_prof_info rss_prof_info[ICE_MAX_PTGS]; @@ -56330,10 +63274,10 @@ index c673feb7a6..4bd0dce0ac 100644 } diff --git a/dpdk/drivers/net/ice/ice_hash.c b/dpdk/drivers/net/ice/ice_hash.c -index afbb357fa3..52646e9408 100644 +index afbb357fa3..94b104fb36 100644 --- a/dpdk/drivers/net/ice/ice_hash.c +++ b/dpdk/drivers/net/ice/ice_hash.c -@@ -653,13 +653,15 @@ ice_hash_parse_raw_pattern(struct ice_adapter *ad, +@@ -653,12 +653,14 @@ ice_hash_parse_raw_pattern(struct ice_adapter *ad, const struct rte_flow_item_raw *raw_spec, *raw_mask; struct ice_parser_profile prof; struct ice_parser_result rslt; @@ -56343,29 +63287,60 @@ index afbb357fa3..52646e9408 100644 - uint8_t spec_len, pkt_len; uint8_t tmp_val = 0; uint8_t tmp_c = 0; - int i, j; - -+ if (ad->psr == NULL) -+ return -rte_errno; +- int i, j; ++ int i, j, ret = 0; + ++ if (ad->psr == NULL) ++ return -ENOTSUP; + raw_spec = item->spec; raw_mask = item->mask; +@@ -675,8 +677,10 @@ ice_hash_parse_raw_pattern(struct ice_adapter *ad, + return -ENOMEM; -@@ -713,11 +715,8 @@ ice_hash_parse_raw_pattern(struct ice_adapter *ad, + msk_buf = rte_zmalloc(NULL, pkt_len, 0); +- if (!msk_buf) ++ if (!msk_buf) { ++ rte_free(pkt_buf); + return -ENOMEM; ++ } + + /* convert string to int array */ + for (i = 0, j = 0; i < spec_len; i += 2, j++) { +@@ -713,21 +717,22 @@ ice_hash_parse_raw_pattern(struct ice_adapter *ad, msk_buf[j] = tmp_val * 16 + tmp_c - '0'; } - if (ice_parser_create(&ad->hw, &psr)) - return -rte_errno; - if (ice_parser_run(psr, pkt_buf, pkt_len, &rslt)) -+ if (ice_parser_run(ad->psr, pkt_buf, pkt_len, &rslt)) - return -rte_errno; +- return -rte_errno; - ice_parser_destroy(psr); ++ ret = ice_parser_run(ad->psr, pkt_buf, pkt_len, &rslt); ++ if (ret) ++ goto free_mem; - if (ice_parser_profile_init(&rslt, pkt_buf, msk_buf, - pkt_len, ICE_BLK_RSS, true, &prof)) +- if (ice_parser_profile_init(&rslt, pkt_buf, msk_buf, +- pkt_len, ICE_BLK_RSS, true, &prof)) +- return -rte_errno; ++ ret = ice_parser_profile_init(&rslt, pkt_buf, msk_buf, ++ pkt_len, ICE_BLK_RSS, true, &prof); ++ if (ret) ++ goto free_mem; + + rte_memcpy(&meta->raw.prof, &prof, sizeof(prof)); + ++free_mem: + rte_free(pkt_buf); + rte_free(msk_buf); +- return 0; ++ ++ return ret; + } + + static void diff --git a/dpdk/drivers/net/ice/ice_rxtx.c b/dpdk/drivers/net/ice/ice_rxtx.c -index f6d8564ab8..7578bac03e 100644 +index f6d8564ab8..9995561fe3 100644 --- a/dpdk/drivers/net/ice/ice_rxtx.c +++ b/dpdk/drivers/net/ice/ice_rxtx.c @@ -163,6 +163,8 @@ ice_rxd_to_pkt_fields_by_comms_aux_v1(struct ice_rx_queue *rxq, @@ -56712,13 +63687,15 @@ index f6d8564ab8..7578bac03e 100644 case RTE_MBUF_F_TX_TUNNEL_GTP: case RTE_MBUF_F_TX_TUNNEL_GENEVE: *cd_tunneling |= ICE_TXD_CTX_UDP_TUNNELING; -@@ -2471,7 +2548,8 @@ ice_parse_tunneling_params(uint64_t ol_flags, +@@ -2470,8 +2547,9 @@ ice_parse_tunneling_params(uint64_t ol_flags, + * Calculate the tunneling UDP checksum. * Shall be set only if L4TUNT = 01b and EIPT is not zero */ - if (!(*cd_tunneling & ICE_TX_CTX_EIPT_NONE) && +- if (!(*cd_tunneling & ICE_TX_CTX_EIPT_NONE) && - (*cd_tunneling & ICE_TXD_CTX_UDP_TUNNELING)) -+ (*cd_tunneling & ICE_TXD_CTX_UDP_TUNNELING) && -+ (ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM)) ++ if ((*cd_tunneling & ICE_TXD_CTX_QW0_EIPT_M) && ++ (*cd_tunneling & ICE_TXD_CTX_UDP_TUNNELING) && ++ (ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM)) *cd_tunneling |= ICE_TXD_CTX_QW0_L4T_CS_M; } @@ -56895,8 +63872,99 @@ index bb18a01951..3815c1cec3 100644 }; struct ice_tx_entry { +diff --git a/dpdk/drivers/net/ice/ice_rxtx_vec_avx2.c b/dpdk/drivers/net/ice/ice_rxtx_vec_avx2.c +index 31d6af42fd..5d591f9834 100644 +--- a/dpdk/drivers/net/ice/ice_rxtx_vec_avx2.c ++++ b/dpdk/drivers/net/ice/ice_rxtx_vec_avx2.c +@@ -254,62 +254,30 @@ _ice_recv_raw_pkts_vec_avx2(struct ice_rx_queue *rxq, struct rte_mbuf **rx_pkts, + _mm256_loadu_si256((void *)&sw_ring[i + 4])); + #endif + +- __m256i raw_desc0_1, raw_desc2_3, raw_desc4_5, raw_desc6_7; +-#ifdef RTE_LIBRTE_ICE_16BYTE_RX_DESC +- /* for AVX we need alignment otherwise loads are not atomic */ +- if (avx_aligned) { +- /* load in descriptors, 2 at a time, in reverse order */ +- raw_desc6_7 = _mm256_load_si256((void *)(rxdp + 6)); +- rte_compiler_barrier(); +- raw_desc4_5 = _mm256_load_si256((void *)(rxdp + 4)); +- rte_compiler_barrier(); +- raw_desc2_3 = _mm256_load_si256((void *)(rxdp + 2)); +- rte_compiler_barrier(); +- raw_desc0_1 = _mm256_load_si256((void *)(rxdp + 0)); +- } else +-#endif +- { +- const __m128i raw_desc7 = +- _mm_load_si128((void *)(rxdp + 7)); +- rte_compiler_barrier(); +- const __m128i raw_desc6 = +- _mm_load_si128((void *)(rxdp + 6)); +- rte_compiler_barrier(); +- const __m128i raw_desc5 = +- _mm_load_si128((void *)(rxdp + 5)); +- rte_compiler_barrier(); +- const __m128i raw_desc4 = +- _mm_load_si128((void *)(rxdp + 4)); +- rte_compiler_barrier(); +- const __m128i raw_desc3 = +- _mm_load_si128((void *)(rxdp + 3)); +- rte_compiler_barrier(); +- const __m128i raw_desc2 = +- _mm_load_si128((void *)(rxdp + 2)); +- rte_compiler_barrier(); +- const __m128i raw_desc1 = +- _mm_load_si128((void *)(rxdp + 1)); +- rte_compiler_barrier(); +- const __m128i raw_desc0 = +- _mm_load_si128((void *)(rxdp + 0)); +- +- raw_desc6_7 = +- _mm256_inserti128_si256 +- (_mm256_castsi128_si256(raw_desc6), +- raw_desc7, 1); +- raw_desc4_5 = +- _mm256_inserti128_si256 +- (_mm256_castsi128_si256(raw_desc4), +- raw_desc5, 1); +- raw_desc2_3 = +- _mm256_inserti128_si256 +- (_mm256_castsi128_si256(raw_desc2), +- raw_desc3, 1); +- raw_desc0_1 = +- _mm256_inserti128_si256 +- (_mm256_castsi128_si256(raw_desc0), +- raw_desc1, 1); +- } ++ const __m128i raw_desc7 = _mm_load_si128((void *)(rxdp + 7)); ++ rte_compiler_barrier(); ++ const __m128i raw_desc6 = _mm_load_si128((void *)(rxdp + 6)); ++ rte_compiler_barrier(); ++ const __m128i raw_desc5 = _mm_load_si128((void *)(rxdp + 5)); ++ rte_compiler_barrier(); ++ const __m128i raw_desc4 = _mm_load_si128((void *)(rxdp + 4)); ++ rte_compiler_barrier(); ++ const __m128i raw_desc3 = _mm_load_si128((void *)(rxdp + 3)); ++ rte_compiler_barrier(); ++ const __m128i raw_desc2 = _mm_load_si128((void *)(rxdp + 2)); ++ rte_compiler_barrier(); ++ const __m128i raw_desc1 = _mm_load_si128((void *)(rxdp + 1)); ++ rte_compiler_barrier(); ++ const __m128i raw_desc0 = _mm_load_si128((void *)(rxdp + 0)); ++ ++ const __m256i raw_desc6_7 = ++ _mm256_inserti128_si256(_mm256_castsi128_si256(raw_desc6), raw_desc7, 1); ++ const __m256i raw_desc4_5 = ++ _mm256_inserti128_si256(_mm256_castsi128_si256(raw_desc4), raw_desc5, 1); ++ const __m256i raw_desc2_3 = ++ _mm256_inserti128_si256(_mm256_castsi128_si256(raw_desc2), raw_desc3, 1); ++ const __m256i raw_desc0_1 = ++ _mm256_inserti128_si256(_mm256_castsi128_si256(raw_desc0), raw_desc1, 1); + + if (split_packet) { + int j; diff --git a/dpdk/drivers/net/ice/ice_rxtx_vec_common.h b/dpdk/drivers/net/ice/ice_rxtx_vec_common.h -index dfe60c81d9..ac939a3ba6 100644 +index dfe60c81d9..f07e304e20 100644 --- a/dpdk/drivers/net/ice/ice_rxtx_vec_common.h +++ b/dpdk/drivers/net/ice/ice_rxtx_vec_common.h @@ -72,7 +72,7 @@ ice_rx_reassemble_packets(struct ice_rx_queue *rxq, struct rte_mbuf **rx_bufs, @@ -56908,17 +63976,21 @@ index dfe60c81d9..ac939a3ba6 100644 return pkt_idx; } -@@ -250,7 +250,8 @@ ice_rxq_vec_setup_default(struct ice_rx_queue *rxq) +@@ -250,7 +250,12 @@ ice_rxq_vec_setup_default(struct ice_rx_queue *rxq) #define ICE_TX_NO_VECTOR_FLAGS ( \ RTE_ETH_TX_OFFLOAD_MULTI_SEGS | \ RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM | \ - RTE_ETH_TX_OFFLOAD_TCP_TSO) + RTE_ETH_TX_OFFLOAD_TCP_TSO | \ ++ RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO | \ ++ RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO | \ ++ RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO | \ ++ RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO | \ + RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM) #define ICE_TX_VECTOR_OFFLOAD ( \ RTE_ETH_TX_OFFLOAD_VLAN_INSERT | \ -@@ -366,7 +367,7 @@ ice_txd_enable_offload(struct rte_mbuf *tx_pkt, +@@ -366,7 +371,7 @@ ice_txd_enable_offload(struct rte_mbuf *tx_pkt, /* Tx Checksum Offload */ /* SET MACLEN */ td_offset |= (tx_pkt->l2_len >> 1) << @@ -57199,6 +64271,105 @@ index 43e9ca3de3..5439b99b2d 100644 /* BAR1: doorbells */ bar++; +diff --git a/dpdk/drivers/net/ionic/ionic_ethdev.c b/dpdk/drivers/net/ionic/ionic_ethdev.c +index 28280c5377..8a2b105238 100644 +--- a/dpdk/drivers/net/ionic/ionic_ethdev.c ++++ b/dpdk/drivers/net/ionic/ionic_ethdev.c +@@ -567,7 +567,7 @@ ionic_dev_rss_reta_query(struct rte_eth_dev *eth_dev, + struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev); + struct ionic_adapter *adapter = lif->adapter; + struct ionic_identity *ident = &adapter->ident; +- int i, num; ++ int i, j, num; + uint16_t tbl_sz = rte_le_to_cpu_16(ident->lif.eth.rss_ind_tbl_sz); + + IONIC_PRINT_CALL(); +@@ -588,9 +588,10 @@ ionic_dev_rss_reta_query(struct rte_eth_dev *eth_dev, + num = reta_size / RTE_ETH_RETA_GROUP_SIZE; + + for (i = 0; i < num; i++) { +- memcpy(reta_conf->reta, +- &lif->rss_ind_tbl[i * RTE_ETH_RETA_GROUP_SIZE], +- RTE_ETH_RETA_GROUP_SIZE); ++ for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++) { ++ reta_conf->reta[j] = ++ lif->rss_ind_tbl[(i * RTE_ETH_RETA_GROUP_SIZE) + j]; ++ } + reta_conf++; + } + +@@ -951,16 +952,17 @@ ionic_dev_close(struct rte_eth_dev *eth_dev) + + ionic_lif_stop(lif); + +- ionic_lif_free_queues(lif); +- + IONIC_PRINT(NOTICE, "Removing device %s", eth_dev->device->name); + ionic_unconfigure_intr(adapter); + +- rte_eth_dev_destroy(eth_dev, eth_ionic_dev_uninit); +- + ionic_port_reset(adapter); + ionic_reset(adapter); + ++ ionic_lif_free_queues(lif); ++ ionic_lif_deinit(lif); ++ ionic_lif_free(lif); /* Does not free LIF object */ ++ ++ lif->adapter = NULL; + rte_free(adapter); + + return 0; +@@ -1037,21 +1039,18 @@ eth_ionic_dev_init(struct rte_eth_dev *eth_dev, void *init_params) + static int + eth_ionic_dev_uninit(struct rte_eth_dev *eth_dev) + { +- struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev); +- struct ionic_adapter *adapter = lif->adapter; +- + IONIC_PRINT_CALL(); + + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return 0; + +- adapter->lif = NULL; +- +- ionic_lif_deinit(lif); +- ionic_lif_free(lif); ++ if (eth_dev->state != RTE_ETH_DEV_UNUSED) ++ ionic_dev_close(eth_dev); + +- if (!(lif->state & IONIC_LIF_F_FW_RESET)) +- ionic_lif_reset(lif); ++ eth_dev->dev_ops = NULL; ++ eth_dev->rx_pkt_burst = NULL; ++ eth_dev->tx_pkt_burst = NULL; ++ eth_dev->tx_pkt_prepare = NULL; + + return 0; + } +@@ -1256,18 +1255,19 @@ eth_ionic_pci_remove(struct rte_pci_device *pci_dev) + { + char name[RTE_ETH_NAME_MAX_LEN]; + struct rte_eth_dev *eth_dev; ++ int ret = 0; + + /* Adapter lookup is using the eth_dev name */ + snprintf(name, sizeof(name), "%s_lif", pci_dev->device.name); + + eth_dev = rte_eth_dev_allocated(name); + if (eth_dev) +- ionic_dev_close(eth_dev); ++ ret = rte_eth_dev_destroy(eth_dev, eth_ionic_dev_uninit); + else + IONIC_PRINT(DEBUG, "Cannot find device %s", + pci_dev->device.name); + +- return 0; ++ return ret; + } + + static struct rte_pci_driver rte_ionic_pmd = { diff --git a/dpdk/drivers/net/ionic/ionic_if.h b/dpdk/drivers/net/ionic/ionic_if.h index 693b44d764..45bad9b040 100644 --- a/dpdk/drivers/net/ionic/ionic_if.h @@ -57542,6 +64713,83 @@ index 4bf739809e..104d2f58e5 100644 # rte_eth_switch_domain_alloc() # rte_eth_dev_create() # rte_eth_dev_destroy() +diff --git a/dpdk/drivers/net/ixgbe/base/ixgbe_phy.c b/dpdk/drivers/net/ixgbe/base/ixgbe_phy.c +index 8d4d9bbfef..a072769af2 100644 +--- a/dpdk/drivers/net/ixgbe/base/ixgbe_phy.c ++++ b/dpdk/drivers/net/ixgbe/base/ixgbe_phy.c +@@ -432,8 +432,7 @@ enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id) + case TN1010_PHY_ID: + phy_type = ixgbe_phy_tn; + break; +- case X550_PHY_ID2: +- case X550_PHY_ID3: ++ case X550_PHY_ID: + case X540_PHY_ID: + phy_type = ixgbe_phy_aq; + break; +@@ -915,6 +914,10 @@ static s32 ixgbe_get_copper_speeds_supported(struct ixgbe_hw *hw) + hw->phy.speeds_supported |= IXGBE_LINK_SPEED_100_FULL; + + switch (hw->mac.type) { ++ case ixgbe_mac_X550: ++ hw->phy.speeds_supported |= IXGBE_LINK_SPEED_2_5GB_FULL; ++ hw->phy.speeds_supported |= IXGBE_LINK_SPEED_5GB_FULL; ++ break; + case ixgbe_mac_X550EM_x: + case ixgbe_mac_X550EM_a: + hw->phy.speeds_supported &= ~IXGBE_LINK_SPEED_100_FULL; +diff --git a/dpdk/drivers/net/ixgbe/base/ixgbe_type.h b/dpdk/drivers/net/ixgbe/base/ixgbe_type.h +index b7eec45635..5973c60477 100644 +--- a/dpdk/drivers/net/ixgbe/base/ixgbe_type.h ++++ b/dpdk/drivers/net/ixgbe/base/ixgbe_type.h +@@ -1663,6 +1663,7 @@ struct ixgbe_dmac_config { + #define TN1010_PHY_ID 0x00A19410 + #define TNX_FW_REV 0xB + #define X540_PHY_ID 0x01540200 ++#define X550_PHY_ID 0x01540220 + #define X550_PHY_ID2 0x01540223 + #define X550_PHY_ID3 0x01540221 + #define X557_PHY_ID 0x01540240 +@@ -1799,7 +1800,7 @@ enum { + /* VFRE bitmask */ + #define IXGBE_VFRE_ENABLE_ALL 0xFFFFFFFF + +-#define IXGBE_VF_INIT_TIMEOUT 200 /* Number of retries to clear RSTI */ ++#define IXGBE_VF_INIT_TIMEOUT 10000 /* Number of retries to clear RSTI */ + + /* RDHMPN and TDHMPN bitmasks */ + #define IXGBE_RDHMPN_RDICADDR 0x007FF800 +diff --git a/dpdk/drivers/net/ixgbe/base/ixgbe_vf.c b/dpdk/drivers/net/ixgbe/base/ixgbe_vf.c +index 5e3ae1b519..11dbbe2a86 100644 +--- a/dpdk/drivers/net/ixgbe/base/ixgbe_vf.c ++++ b/dpdk/drivers/net/ixgbe/base/ixgbe_vf.c +@@ -585,7 +585,7 @@ s32 ixgbe_check_mac_link_vf(struct ixgbe_hw *hw, ixgbe_link_speed *speed, + switch (links_reg & IXGBE_LINKS_SPEED_82599) { + case IXGBE_LINKS_SPEED_10G_82599: + *speed = IXGBE_LINK_SPEED_10GB_FULL; +- if (hw->mac.type >= ixgbe_mac_X550) { ++ if (hw->mac.type >= ixgbe_mac_X550_vf) { + if (links_reg & IXGBE_LINKS_SPEED_NON_STD) + *speed = IXGBE_LINK_SPEED_2_5GB_FULL; + } +@@ -595,7 +595,7 @@ s32 ixgbe_check_mac_link_vf(struct ixgbe_hw *hw, ixgbe_link_speed *speed, + break; + case IXGBE_LINKS_SPEED_100_82599: + *speed = IXGBE_LINK_SPEED_100_FULL; +- if (hw->mac.type == ixgbe_mac_X550) { ++ if (hw->mac.type == ixgbe_mac_X550_vf) { + if (links_reg & IXGBE_LINKS_SPEED_NON_STD) + *speed = IXGBE_LINK_SPEED_5GB_FULL; + } +@@ -603,7 +603,7 @@ s32 ixgbe_check_mac_link_vf(struct ixgbe_hw *hw, ixgbe_link_speed *speed, + case IXGBE_LINKS_SPEED_10_X550EM_A: + *speed = IXGBE_LINK_SPEED_UNKNOWN; + /* Since Reserved in older MAC's */ +- if (hw->mac.type >= ixgbe_mac_X550) ++ if (hw->mac.type >= ixgbe_mac_X550_vf) + *speed = IXGBE_LINK_SPEED_10_FULL; + break; + default: diff --git a/dpdk/drivers/net/ixgbe/ixgbe_bypass.c b/dpdk/drivers/net/ixgbe/ixgbe_bypass.c index 67ced6c723..94f34a2996 100644 --- a/dpdk/drivers/net/ixgbe/ixgbe_bypass.c @@ -57578,7 +64826,7 @@ index 8eb773391b..6ef965dbb6 100644 * @hw: pointer to hardware structure * @cmd: The control word we are setting. diff --git a/dpdk/drivers/net/ixgbe/ixgbe_ethdev.c b/dpdk/drivers/net/ixgbe/ixgbe_ethdev.c -index fe61dba81d..e9dd243d5b 100644 +index fe61dba81d..4ba25435fd 100644 --- a/dpdk/drivers/net/ixgbe/ixgbe_ethdev.c +++ b/dpdk/drivers/net/ixgbe/ixgbe_ethdev.c @@ -128,6 +128,13 @@ @@ -57664,7 +64912,47 @@ index fe61dba81d..e9dd243d5b 100644 rte_eth_copy_pci_info(eth_dev, pci_dev); eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS; -@@ -1223,13 +1271,8 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused) +@@ -1139,7 +1187,8 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused) + diag = ixgbe_validate_eeprom_checksum(hw, &csum); + if (diag != IXGBE_SUCCESS) { + PMD_INIT_LOG(ERR, "The EEPROM checksum is not valid: %d", diag); +- return -EIO; ++ ret = -EIO; ++ goto err_exit; + } + + #ifdef RTE_LIBRTE_IXGBE_BYPASS +@@ -1177,7 +1226,8 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused) + PMD_INIT_LOG(ERR, "Unsupported SFP+ Module"); + if (diag) { + PMD_INIT_LOG(ERR, "Hardware Initialization Failure: %d", diag); +- return -EIO; ++ ret = -EIO; ++ goto err_exit; + } + + /* Reset the hw statistics */ +@@ -1197,7 +1247,8 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused) + "Failed to allocate %u bytes needed to store " + "MAC addresses", + RTE_ETHER_ADDR_LEN * hw->mac.num_rar_entries); +- return -ENOMEM; ++ ret = -ENOMEM; ++ goto err_exit; + } + /* Copy the permanent MAC address */ + rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.perm_addr, +@@ -1212,7 +1263,8 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused) + RTE_ETHER_ADDR_LEN * IXGBE_VMDQ_NUM_UC_MAC); + rte_free(eth_dev->data->mac_addrs); + eth_dev->data->mac_addrs = NULL; +- return -ENOMEM; ++ ret = -ENOMEM; ++ goto err_exit; + } + + /* initialize the vfta */ +@@ -1223,13 +1275,8 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused) /* initialize PF if max_vfs not zero */ ret = ixgbe_pf_host_init(eth_dev); @@ -57680,7 +64968,7 @@ index fe61dba81d..e9dd243d5b 100644 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); /* let hardware know driver is loaded */ -@@ -1268,10 +1311,14 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused) +@@ -1268,10 +1315,14 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused) TAILQ_INIT(&filter_info->fivetuple_list); /* initialize flow director filter list & hash */ @@ -57697,7 +64985,7 @@ index fe61dba81d..e9dd243d5b 100644 /* initialize flow filter lists */ ixgbe_filterlist_init(); -@@ -1283,6 +1330,21 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused) +@@ -1283,6 +1334,26 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused) ixgbe_tm_conf_init(eth_dev); return 0; @@ -57715,11 +65003,16 @@ index fe61dba81d..e9dd243d5b 100644 + eth_dev->data->mac_addrs = NULL; + rte_free(eth_dev->data->hash_mac_addrs); + eth_dev->data->hash_mac_addrs = NULL; ++err_exit: ++#ifdef RTE_LIB_SECURITY ++ rte_free(eth_dev->security_ctx); ++ eth_dev->security_ctx = NULL; ++#endif + return ret; } static int -@@ -2375,7 +2437,7 @@ ixgbe_dev_configure(struct rte_eth_dev *dev) +@@ -2375,7 +2446,7 @@ ixgbe_dev_configure(struct rte_eth_dev *dev) if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH; @@ -57728,7 +65021,7 @@ index fe61dba81d..e9dd243d5b 100644 ret = ixgbe_check_mq_mode(dev); if (ret != 0) { PMD_DRV_LOG(ERR, "ixgbe_check_mq_mode fails with %d.", -@@ -2603,7 +2665,7 @@ ixgbe_dev_start(struct rte_eth_dev *dev) +@@ -2603,7 +2674,7 @@ ixgbe_dev_start(struct rte_eth_dev *dev) } } @@ -57737,7 +65030,7 @@ index fe61dba81d..e9dd243d5b 100644 ixgbe_configure_msix(dev); /* initialize transmission unit */ -@@ -2907,7 +2969,7 @@ ixgbe_dev_set_link_up(struct rte_eth_dev *dev) +@@ -2907,7 +2978,7 @@ ixgbe_dev_set_link_up(struct rte_eth_dev *dev) if (hw->mac.type == ixgbe_mac_82599EB) { #ifdef RTE_LIBRTE_IXGBE_BYPASS if (hw->device_id == IXGBE_DEV_ID_82599_BYPASS) { @@ -57746,7 +65039,7 @@ index fe61dba81d..e9dd243d5b 100644 PMD_INIT_LOG(ERR, "Set link up is not supported " "by device id 0x%x", hw->device_id); return -ENOTSUP; -@@ -2938,7 +3000,7 @@ ixgbe_dev_set_link_down(struct rte_eth_dev *dev) +@@ -2938,7 +3009,7 @@ ixgbe_dev_set_link_down(struct rte_eth_dev *dev) if (hw->mac.type == ixgbe_mac_82599EB) { #ifdef RTE_LIBRTE_IXGBE_BYPASS if (hw->device_id == IXGBE_DEV_ID_82599_BYPASS) { @@ -57755,7 +65048,7 @@ index fe61dba81d..e9dd243d5b 100644 PMD_INIT_LOG(ERR, "Set link down is not supported " "by device id 0x%x", hw->device_id); return -ENOTSUP; -@@ -3028,6 +3090,7 @@ ixgbe_dev_close(struct rte_eth_dev *dev) +@@ -3028,6 +3099,7 @@ ixgbe_dev_close(struct rte_eth_dev *dev) #ifdef RTE_LIB_SECURITY rte_free(dev->security_ctx); @@ -57763,7 +65056,7 @@ index fe61dba81d..e9dd243d5b 100644 #endif return ret; -@@ -3793,23 +3856,32 @@ static int +@@ -3793,23 +3865,32 @@ static int ixgbe_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); @@ -57781,12 +65074,12 @@ index fe61dba81d..e9dd243d5b 100644 + nvm_ver.oem_release); + return 0; + } ++ ++ ixgbe_get_etk_id(hw, &nvm_ver); ++ ixgbe_get_orom_version(hw, &nvm_ver); - etrack_id = (eeprom_verh << 16) | eeprom_verl; - ret = snprintf(fw_version, fw_size, "0x%08x", etrack_id); -+ ixgbe_get_etk_id(hw, &nvm_ver); -+ ixgbe_get_orom_version(hw, &nvm_ver); -+ + if (nvm_ver.or_valid) { + snprintf(fw_version, fw_size, "0x%08x, %d.%d.%d", + nvm_ver.etk_id, nvm_ver.or_major, @@ -57807,7 +65100,17 @@ index fe61dba81d..e9dd243d5b 100644 } static int -@@ -4236,7 +4308,8 @@ ixgbe_dev_link_update_share(struct rte_eth_dev *dev, +@@ -4204,6 +4285,9 @@ ixgbe_dev_link_update_share(struct rte_eth_dev *dev, + int wait = 1; + u32 esdp_reg; + ++ if (rte_eal_process_type() != RTE_PROC_PRIMARY) ++ return -1; ++ + memset(&link, 0, sizeof(link)); + link.link_status = RTE_ETH_LINK_DOWN; + link.link_speed = RTE_ETH_SPEED_NUM_NONE; +@@ -4236,7 +4320,8 @@ ixgbe_dev_link_update_share(struct rte_eth_dev *dev, return rte_eth_linkstatus_set(dev, &link); } @@ -57817,7 +65120,36 @@ index fe61dba81d..e9dd243d5b 100644 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP); if ((esdp_reg & IXGBE_ESDP_SDP3)) link_up = 0; -@@ -4603,7 +4676,7 @@ ixgbe_dev_interrupt_action(struct rte_eth_dev *dev) +@@ -4577,14 +4662,20 @@ ixgbe_dev_interrupt_action(struct rte_eth_dev *dev) + timeout = IXGBE_LINK_DOWN_CHECK_TIMEOUT; + + ixgbe_dev_link_status_print(dev); +- if (rte_eal_alarm_set(timeout * 1000, +- ixgbe_dev_interrupt_delayed_handler, (void *)dev) < 0) +- PMD_DRV_LOG(ERR, "Error setting alarm"); +- else { +- /* remember original mask */ +- intr->mask_original = intr->mask; +- /* only disable lsc interrupt */ +- intr->mask &= ~IXGBE_EIMS_LSC; ++ ++ /* Don't program delayed handler if LSC interrupt is disabled. ++ * It means one is already programmed. ++ */ ++ if (intr->mask & IXGBE_EIMS_LSC) { ++ if (rte_eal_alarm_set(timeout * 1000, ++ ixgbe_dev_interrupt_delayed_handler, (void *)dev) < 0) ++ PMD_DRV_LOG(ERR, "Error setting alarm"); ++ else { ++ /* remember original mask */ ++ intr->mask_original = intr->mask; ++ /* only disable lsc interrupt */ ++ intr->mask &= ~IXGBE_EIMS_LSC; ++ } + } + } + +@@ -4603,7 +4694,7 @@ ixgbe_dev_interrupt_action(struct rte_eth_dev *dev) * @param handle * Pointer to interrupt handle. * @param param @@ -57826,7 +65158,7 @@ index fe61dba81d..e9dd243d5b 100644 * * @return * void -@@ -4659,7 +4732,7 @@ ixgbe_dev_interrupt_delayed_handler(void *param) +@@ -4659,7 +4750,7 @@ ixgbe_dev_interrupt_delayed_handler(void *param) * @param handle * Pointer to interrupt handle. * @param param @@ -57835,7 +65167,7 @@ index fe61dba81d..e9dd243d5b 100644 * * @return * void -@@ -5921,7 +5994,7 @@ ixgbevf_configure_msix(struct rte_eth_dev *dev) +@@ -5921,7 +6012,7 @@ ixgbevf_configure_msix(struct rte_eth_dev *dev) /* Configure all RX queues of VF */ for (q_idx = 0; q_idx < dev->data->nb_rx_queues; q_idx++) { /* Force all queue use vector 0, @@ -57844,7 +65176,7 @@ index fe61dba81d..e9dd243d5b 100644 */ ixgbevf_set_ivar_map(hw, 0, q_idx, vector_idx); rte_intr_vec_list_index_set(intr_handle, q_idx, -@@ -6256,7 +6329,7 @@ ixgbe_inject_5tuple_filter(struct rte_eth_dev *dev, +@@ -6256,7 +6347,7 @@ ixgbe_inject_5tuple_filter(struct rte_eth_dev *dev, * @param * dev: Pointer to struct rte_eth_dev. * index: the index the filter allocates. @@ -57853,7 +65185,7 @@ index fe61dba81d..e9dd243d5b 100644 * rx_queue: the queue id the filter assigned to. * * @return -@@ -6872,7 +6945,7 @@ ixgbe_timesync_disable(struct rte_eth_dev *dev) +@@ -6872,7 +6963,7 @@ ixgbe_timesync_disable(struct rte_eth_dev *dev) /* Disable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */ IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_1588), 0); @@ -57862,7 +65194,7 @@ index fe61dba81d..e9dd243d5b 100644 IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, 0); return 0; -@@ -7725,9 +7798,13 @@ static int +@@ -7725,9 +7816,13 @@ static int ixgbevf_dev_promiscuous_disable(struct rte_eth_dev *dev) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); @@ -57877,7 +65209,7 @@ index fe61dba81d..e9dd243d5b 100644 case IXGBE_SUCCESS: ret = 0; break; -@@ -7749,6 +7826,9 @@ ixgbevf_dev_allmulticast_enable(struct rte_eth_dev *dev) +@@ -7749,6 +7844,9 @@ ixgbevf_dev_allmulticast_enable(struct rte_eth_dev *dev) int ret; int mode = IXGBEVF_XCAST_MODE_ALLMULTI; @@ -57887,7 +65219,7 @@ index fe61dba81d..e9dd243d5b 100644 switch (hw->mac.ops.update_xcast_mode(hw, mode)) { case IXGBE_SUCCESS: ret = 0; -@@ -7770,6 +7850,9 @@ ixgbevf_dev_allmulticast_disable(struct rte_eth_dev *dev) +@@ -7770,6 +7868,9 @@ ixgbevf_dev_allmulticast_disable(struct rte_eth_dev *dev) struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); int ret; @@ -57897,7 +65229,7 @@ index fe61dba81d..e9dd243d5b 100644 switch (hw->mac.ops.update_xcast_mode(hw, IXGBEVF_XCAST_MODE_MULTI)) { case IXGBE_SUCCESS: ret = 0; -@@ -8225,6 +8308,8 @@ ixgbe_dev_macsec_register_disable(struct rte_eth_dev *dev) +@@ -8225,6 +8326,8 @@ ixgbe_dev_macsec_register_disable(struct rte_eth_dev *dev) RTE_PMD_REGISTER_PCI(net_ixgbe, rte_ixgbe_pmd); RTE_PMD_REGISTER_PCI_TABLE(net_ixgbe, pci_id_ixgbe_map); RTE_PMD_REGISTER_KMOD_DEP(net_ixgbe, "* igb_uio | uio_pci_generic | vfio-pci"); @@ -58041,7 +65373,7 @@ index 9f1bd0a62b..0a0f639e39 100644 default: return -1; diff --git a/dpdk/drivers/net/ixgbe/ixgbe_rxtx.c b/dpdk/drivers/net/ixgbe/ixgbe_rxtx.c -index d7c80d4242..c137707869 100644 +index d7c80d4242..e3c611a40f 100644 --- a/dpdk/drivers/net/ixgbe/ixgbe_rxtx.c +++ b/dpdk/drivers/net/ixgbe/ixgbe_rxtx.c @@ -1818,11 +1818,22 @@ ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, @@ -58178,7 +65510,33 @@ index d7c80d4242..c137707869 100644 */ rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM); rxcsum |= IXGBE_RXCSUM_PCSD; -@@ -5831,6 +5828,8 @@ ixgbevf_dev_rxtx_start(struct rte_eth_dev *dev) +@@ -5744,6 +5741,25 @@ ixgbevf_dev_rx_init(struct rte_eth_dev *dev) + IXGBE_PSRTYPE_RQPL_SHIFT; + IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype); + ++ /* Initialize the rss for x550_vf cards if enabled */ ++ switch (hw->mac.type) { ++ case ixgbe_mac_X550_vf: ++ case ixgbe_mac_X550EM_x_vf: ++ case ixgbe_mac_X550EM_a_vf: ++ switch (dev->data->dev_conf.rxmode.mq_mode) { ++ case RTE_ETH_MQ_RX_RSS: ++ case RTE_ETH_MQ_RX_DCB_RSS: ++ case RTE_ETH_MQ_RX_VMDQ_RSS: ++ ixgbe_rss_configure(dev); ++ break; ++ default: ++ break; ++ } ++ break; ++ default: ++ break; ++ } ++ + ixgbe_set_rx_function(dev); + + return 0; +@@ -5831,6 +5847,8 @@ ixgbevf_dev_rxtx_start(struct rte_eth_dev *dev) } while (--poll_ms && !(txdctl & IXGBE_TXDCTL_ENABLE)); if (!poll_ms) PMD_INIT_LOG(ERR, "Could not enable Tx Queue %d", i); @@ -58187,7 +65545,7 @@ index d7c80d4242..c137707869 100644 } for (i = 0; i < dev->data->nb_rx_queues; i++) { -@@ -5848,6 +5847,8 @@ ixgbevf_dev_rxtx_start(struct rte_eth_dev *dev) +@@ -5848,6 +5866,8 @@ ixgbevf_dev_rxtx_start(struct rte_eth_dev *dev) } while (--poll_ms && !(rxdctl & IXGBE_RXDCTL_ENABLE)); if (!poll_ms) PMD_INIT_LOG(ERR, "Could not enable Rx Queue %d", i); @@ -58267,10 +65625,19 @@ index 079cf01269..7886644412 100644 */ ret = memif_msg_receive_init(cc, &msg); diff --git a/dpdk/drivers/net/memif/rte_eth_memif.c b/dpdk/drivers/net/memif/rte_eth_memif.c -index e3d523af57..abaf98c65e 100644 +index e3d523af57..88908a42a5 100644 --- a/dpdk/drivers/net/memif/rte_eth_memif.c +++ b/dpdk/drivers/net/memif/rte_eth_memif.c -@@ -351,13 +351,13 @@ eth_memif_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) +@@ -264,8 +264,6 @@ memif_free_stored_mbufs(struct pmd_process_private *proc_private, struct memif_q + cur_tail = __atomic_load_n(&ring->tail, __ATOMIC_ACQUIRE); + while (mq->last_tail != cur_tail) { + RTE_MBUF_PREFETCH_TO_FREE(mq->buffers[(mq->last_tail + 1) & mask]); +- /* Decrement refcnt and free mbuf. (current segment) */ +- rte_mbuf_refcnt_update(mq->buffers[mq->last_tail & mask], -1); + rte_pktmbuf_free_seg(mq->buffers[mq->last_tail & mask]); + mq->last_tail++; + } +@@ -351,13 +349,13 @@ eth_memif_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) goto no_free_bufs; mbuf = mbuf_head; mbuf->port = mq->in_port; @@ -58285,7 +65652,18 @@ index e3d523af57..abaf98c65e 100644 src_off = 0; do { -@@ -1026,7 +1026,7 @@ memif_regions_init(struct rte_eth_dev *dev) +@@ -710,10 +708,6 @@ memif_tx_one_zc(struct pmd_process_private *proc_private, struct memif_queue *mq + next_in_chain: + /* store pointer to mbuf to free it later */ + mq->buffers[slot & mask] = mbuf; +- /* Increment refcnt to make sure the buffer is not freed before server +- * receives it. (current segment) +- */ +- rte_mbuf_refcnt_update(mbuf, 1); + /* populate descriptor */ + d0 = &ring->desc[slot & mask]; + d0->length = rte_pktmbuf_data_len(mbuf); +@@ -1026,7 +1020,7 @@ memif_regions_init(struct rte_eth_dev *dev) if (ret < 0) return ret; } else { @@ -58294,7 +65672,7 @@ index e3d523af57..abaf98c65e 100644 ret = memif_region_init_shm(dev, /* has buffers */ 1); if (ret < 0) return ret; -@@ -1243,6 +1243,7 @@ memif_dev_start(struct rte_eth_dev *dev) +@@ -1243,6 +1237,7 @@ memif_dev_start(struct rte_eth_dev *dev) { struct pmd_internals *pmd = dev->data->dev_private; int ret = 0; @@ -58302,7 +65680,7 @@ index e3d523af57..abaf98c65e 100644 switch (pmd->role) { case MEMIF_ROLE_CLIENT: -@@ -1257,13 +1258,28 @@ memif_dev_start(struct rte_eth_dev *dev) +@@ -1257,13 +1252,28 @@ memif_dev_start(struct rte_eth_dev *dev) break; } @@ -58331,7 +65709,7 @@ index e3d523af57..abaf98c65e 100644 return 0; } -@@ -1447,8 +1463,8 @@ memif_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) +@@ -1447,8 +1457,8 @@ memif_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) stats->opackets = 0; stats->obytes = 0; @@ -58342,7 +65720,7 @@ index e3d523af57..abaf98c65e 100644 nq = (tmp < RTE_ETHDEV_QUEUE_STAT_CNTRS) ? tmp : RTE_ETHDEV_QUEUE_STAT_CNTRS; -@@ -1461,8 +1477,8 @@ memif_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) +@@ -1461,8 +1471,8 @@ memif_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) stats->ibytes += mq->n_bytes; } @@ -58353,7 +65731,7 @@ index e3d523af57..abaf98c65e 100644 nq = (tmp < RTE_ETHDEV_QUEUE_STAT_CNTRS) ? tmp : RTE_ETHDEV_QUEUE_STAT_CNTRS; -@@ -1500,23 +1516,6 @@ memif_stats_reset(struct rte_eth_dev *dev) +@@ -1500,23 +1510,6 @@ memif_stats_reset(struct rte_eth_dev *dev) return 0; } @@ -58377,7 +65755,7 @@ index e3d523af57..abaf98c65e 100644 static const struct eth_dev_ops ops = { .dev_start = memif_dev_start, .dev_stop = memif_dev_stop, -@@ -1527,8 +1526,6 @@ static const struct eth_dev_ops ops = { +@@ -1527,8 +1520,6 @@ static const struct eth_dev_ops ops = { .rx_queue_setup = memif_rx_queue_setup, .rx_queue_release = memif_rx_queue_release, .tx_queue_release = memif_tx_queue_release, @@ -58531,7 +65909,7 @@ index 8fcfb5490e..b0bb48c8f1 100644 mp_init_msg(dev, &mp_res, param->type); res->result = 0; diff --git a/dpdk/drivers/net/mlx5/linux/mlx5_ethdev_os.c b/dpdk/drivers/net/mlx5/linux/mlx5_ethdev_os.c -index c19825ee52..dbfe00ea6d 100644 +index c19825ee52..6d98657917 100644 --- a/dpdk/drivers/net/mlx5/linux/mlx5_ethdev_os.c +++ b/dpdk/drivers/net/mlx5/linux/mlx5_ethdev_os.c @@ -38,6 +38,7 @@ @@ -58542,6 +65920,15 @@ index c19825ee52..dbfe00ea6d 100644 #include "mlx5.h" #include "mlx5_rxtx.h" +@@ -669,7 +670,7 @@ mlx5_dev_get_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) + ifr.ifr_data = (void *)ðpause; + ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr); + if (ret) { +- DRV_LOG(WARNING, ++ DRV_LOG(DEBUG, + "port %u ioctl(SIOCETHTOOL, ETHTOOL_GPAUSEPARAM) failed:" + " %s", + dev->data->port_id, strerror(rte_errno)); @@ -744,6 +745,7 @@ mlx5_dev_interrupt_device_fatal(struct mlx5_dev_ctx_shared *sh) for (i = 0; i < sh->max_port; ++i) { @@ -58723,15 +66110,101 @@ index c19825ee52..dbfe00ea6d 100644 } file = fopen(phys_switch_id, "rb"); if (file == NULL) { -@@ -1347,15 +1414,16 @@ mlx5_os_read_dev_counters(struct rte_eth_dev *dev, uint64_t *stats) +@@ -1289,13 +1356,17 @@ _mlx5_os_read_dev_counters(struct rte_eth_dev *dev, int pf, uint64_t *stats) + struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl; + unsigned int i; + struct ifreq ifr; +- unsigned int stats_sz = xstats_ctrl->stats_n * sizeof(uint64_t); ++ unsigned int max_stats_n = RTE_MAX(xstats_ctrl->stats_n, xstats_ctrl->stats_n_2nd); ++ unsigned int stats_sz = max_stats_n * sizeof(uint64_t); + unsigned char et_stat_buf[sizeof(struct ethtool_stats) + stats_sz]; + struct ethtool_stats *et_stats = (struct ethtool_stats *)et_stat_buf; + int ret; ++ uint16_t i_idx, o_idx; ++ uint32_t total_stats = xstats_n; + + et_stats->cmd = ETHTOOL_GSTATS; +- et_stats->n_stats = xstats_ctrl->stats_n; ++ /* Pass the maximum value, the driver may ignore this. */ ++ et_stats->n_stats = max_stats_n; + ifr.ifr_data = (caddr_t)et_stats; + if (pf >= 0) + ret = mlx5_ifreq_by_ifname(priv->sh->bond.ports[pf].ifname, +@@ -1308,21 +1379,34 @@ _mlx5_os_read_dev_counters(struct rte_eth_dev *dev, int pf, uint64_t *stats) + dev->data->port_id); + return ret; + } +- for (i = 0; i != xstats_ctrl->mlx5_stats_n; ++i) { +- if (xstats_ctrl->info[i].dev) +- continue; +- stats[i] += (uint64_t) +- et_stats->data[xstats_ctrl->dev_table_idx[i]]; ++ if (pf <= 0) { ++ for (i = 0; i != total_stats; i++) { ++ i_idx = xstats_ctrl->dev_table_idx[i]; ++ o_idx = xstats_ctrl->xstats_o_idx[i]; ++ if (i_idx == UINT16_MAX || xstats_ctrl->info[o_idx].dev) ++ continue; ++ stats[o_idx] += (uint64_t)et_stats->data[i_idx]; ++ } ++ } else { ++ for (i = 0; i != total_stats; i++) { ++ i_idx = xstats_ctrl->dev_table_idx_2nd[i]; ++ o_idx = xstats_ctrl->xstats_o_idx_2nd[i]; ++ if (i_idx == UINT16_MAX || xstats_ctrl->info[o_idx].dev) ++ continue; ++ stats[o_idx] += (uint64_t)et_stats->data[i_idx]; ++ } + } + return 0; + } + +-/** ++/* + * Read device counters. + * + * @param dev + * Pointer to Ethernet device. +- * @param[out] stats ++ * @param bond_master ++ * Indicate if the device is a bond master. ++ * @param stats + * Counters table output buffer. + * + * @return +@@ -1330,7 +1414,7 @@ _mlx5_os_read_dev_counters(struct rte_eth_dev *dev, int pf, uint64_t *stats) + * rte_errno is set. + */ + int +-mlx5_os_read_dev_counters(struct rte_eth_dev *dev, uint64_t *stats) ++mlx5_os_read_dev_counters(struct rte_eth_dev *dev, bool bond_master, uint64_t *stats) + { + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl; +@@ -1338,7 +1422,7 @@ mlx5_os_read_dev_counters(struct rte_eth_dev *dev, uint64_t *stats) + + memset(stats, 0, sizeof(*stats) * xstats_ctrl->mlx5_stats_n); + /* Read ifreq counters. */ +- if (priv->master && priv->pf_bond >= 0) { ++ if (bond_master) { + /* Sum xstats from bonding device member ports. */ + for (i = 0; i < priv->sh->bond.n_port; i++) { + ret = _mlx5_os_read_dev_counters(dev, i, stats); +@@ -1347,15 +1431,20 @@ mlx5_os_read_dev_counters(struct rte_eth_dev *dev, uint64_t *stats) } } else { ret = _mlx5_os_read_dev_counters(dev, -1, stats); + if (ret) + return ret; } - /* Read IB counters. */ - for (i = 0; i != xstats_ctrl->mlx5_stats_n; ++i) { +- /* Read IB counters. */ +- for (i = 0; i != xstats_ctrl->mlx5_stats_n; ++i) { ++ /* ++ * Read IB dev counters. ++ * The counters are unique per IB device but not per netdev IF. ++ * In bonding mode, getting the stats name only from 1 port is enough. ++ */ ++ for (i = xstats_ctrl->dev_cnt_start; i < xstats_ctrl->mlx5_stats_n; i++) { if (!xstats_ctrl->info[i].dev) continue; - ret = mlx5_os_read_dev_stat(priv, xstats_ctrl->info[i].ctr_name, @@ -58739,11 +66212,280 @@ index c19825ee52..dbfe00ea6d 100644 /* return last xstats counter if fail to read. */ - if (ret != 0) + if (mlx5_os_read_dev_stat(priv, xstats_ctrl->info[i].ctr_name, -+ &stats[i]) == 0) ++ &stats[i]) == 0) xstats_ctrl->xstats[i] = stats[i]; else stats[i] = xstats_ctrl->xstats[i]; -@@ -1644,10 +1712,7 @@ mlx5_get_mac(struct rte_eth_dev *dev, uint8_t (*mac)[RTE_ETHER_ADDR_LEN]) +@@ -1363,18 +1452,24 @@ mlx5_os_read_dev_counters(struct rte_eth_dev *dev, uint64_t *stats) + return ret; + } + +-/** ++/* + * Query the number of statistics provided by ETHTOOL. + * + * @param dev + * Pointer to Ethernet device. ++ * @param bond_master ++ * Indicate if the device is a bond master. ++ * @param n_stats ++ * Pointer to number of stats to store. ++ * @param n_stats_sec ++ * Pointer to number of stats to store for the 2nd port of the bond. + * + * @return +- * Number of statistics on success, negative errno value otherwise and +- * rte_errno is set. ++ * 0 on success, negative errno value otherwise and rte_errno is set. + */ + int +-mlx5_os_get_stats_n(struct rte_eth_dev *dev) ++mlx5_os_get_stats_n(struct rte_eth_dev *dev, bool bond_master, ++ uint16_t *n_stats, uint16_t *n_stats_sec) + { + struct mlx5_priv *priv = dev->data->dev_private; + struct ethtool_drvinfo drvinfo; +@@ -1383,18 +1478,34 @@ mlx5_os_get_stats_n(struct rte_eth_dev *dev) + + drvinfo.cmd = ETHTOOL_GDRVINFO; + ifr.ifr_data = (caddr_t)&drvinfo; +- if (priv->master && priv->pf_bond >= 0) +- /* Bonding PF. */ ++ /* Bonding PFs. */ ++ if (bond_master) { + ret = mlx5_ifreq_by_ifname(priv->sh->bond.ports[0].ifname, + SIOCETHTOOL, &ifr); +- else ++ if (ret) { ++ DRV_LOG(WARNING, "bonding port %u unable to query number of" ++ " statistics for the 1st slave, %d", PORT_ID(priv), ret); ++ return ret; ++ } ++ *n_stats = drvinfo.n_stats; ++ ret = mlx5_ifreq_by_ifname(priv->sh->bond.ports[1].ifname, ++ SIOCETHTOOL, &ifr); ++ if (ret) { ++ DRV_LOG(WARNING, "bonding port %u unable to query number of" ++ " statistics for the 2nd slave, %d", PORT_ID(priv), ret); ++ return ret; ++ } ++ *n_stats_sec = drvinfo.n_stats; ++ } else { + ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr); +- if (ret) { +- DRV_LOG(WARNING, "port %u unable to query number of statistics", +- dev->data->port_id); +- return ret; ++ if (ret) { ++ DRV_LOG(WARNING, "port %u unable to query number of statistics", ++ PORT_ID(priv)); ++ return ret; ++ } ++ *n_stats = drvinfo.n_stats; + } +- return drvinfo.n_stats; ++ return 0; + } + + static const struct mlx5_counter_ctrl mlx5_counters_init[] = { +@@ -1514,7 +1625,104 @@ static const struct mlx5_counter_ctrl mlx5_counters_init[] = { + }, + }; + +-static const unsigned int xstats_n = RTE_DIM(mlx5_counters_init); ++const unsigned int xstats_n = RTE_DIM(mlx5_counters_init); ++ ++static int ++mlx5_os_get_stats_strings(struct rte_eth_dev *dev, bool bond_master, ++ struct ethtool_gstrings *strings, ++ uint32_t stats_n, uint32_t stats_n_2nd) ++{ ++ struct mlx5_priv *priv = dev->data->dev_private; ++ struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl; ++ struct ifreq ifr; ++ int ret; ++ uint32_t i, j, idx; ++ ++ /* Ensure no out of bounds access before. */ ++ MLX5_ASSERT(xstats_n <= MLX5_MAX_XSTATS); ++ strings->cmd = ETHTOOL_GSTRINGS; ++ strings->string_set = ETH_SS_STATS; ++ strings->len = stats_n; ++ ifr.ifr_data = (caddr_t)strings; ++ if (bond_master) ++ ret = mlx5_ifreq_by_ifname(priv->sh->bond.ports[0].ifname, ++ SIOCETHTOOL, &ifr); ++ else ++ ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr); ++ if (ret) { ++ DRV_LOG(WARNING, "port %u unable to get statistic names with %d", ++ PORT_ID(priv), ret); ++ return ret; ++ } ++ /* Reorganize the orders to reduce the iterations. */ ++ for (j = 0; j < xstats_n; j++) { ++ xstats_ctrl->dev_table_idx[j] = UINT16_MAX; ++ for (i = 0; i < stats_n; i++) { ++ const char *curr_string = ++ (const char *)&strings->data[i * ETH_GSTRING_LEN]; ++ ++ if (!strcmp(mlx5_counters_init[j].ctr_name, curr_string)) { ++ idx = xstats_ctrl->mlx5_stats_n++; ++ xstats_ctrl->dev_table_idx[j] = i; ++ xstats_ctrl->xstats_o_idx[j] = idx; ++ xstats_ctrl->info[idx] = mlx5_counters_init[j]; ++ } ++ } ++ } ++ if (!bond_master) { ++ /* Add dev counters, unique per IB device. */ ++ xstats_ctrl->dev_cnt_start = xstats_ctrl->mlx5_stats_n; ++ for (j = 0; j != xstats_n; j++) { ++ if (mlx5_counters_init[j].dev) { ++ idx = xstats_ctrl->mlx5_stats_n++; ++ xstats_ctrl->info[idx] = mlx5_counters_init[j]; ++ xstats_ctrl->hw_stats[idx] = 0; ++ } ++ } ++ return 0; ++ } ++ ++ strings->len = stats_n_2nd; ++ ret = mlx5_ifreq_by_ifname(priv->sh->bond.ports[1].ifname, ++ SIOCETHTOOL, &ifr); ++ if (ret) { ++ DRV_LOG(WARNING, "port %u unable to get statistic names for 2nd slave with %d", ++ PORT_ID(priv), ret); ++ return ret; ++ } ++ /* The 2nd slave port may have a different strings set, based on the configuration. */ ++ for (j = 0; j != xstats_n; j++) { ++ xstats_ctrl->dev_table_idx_2nd[j] = UINT16_MAX; ++ for (i = 0; i != stats_n_2nd; i++) { ++ const char *curr_string = ++ (const char *)&strings->data[i * ETH_GSTRING_LEN]; ++ ++ if (!strcmp(mlx5_counters_init[j].ctr_name, curr_string)) { ++ xstats_ctrl->dev_table_idx_2nd[j] = i; ++ if (xstats_ctrl->dev_table_idx[j] != UINT16_MAX) { ++ /* Already mapped in the 1st slave port. */ ++ idx = xstats_ctrl->xstats_o_idx[j]; ++ xstats_ctrl->xstats_o_idx_2nd[j] = idx; ++ } else { ++ /* Append the new items to the end of the map. */ ++ idx = xstats_ctrl->mlx5_stats_n++; ++ xstats_ctrl->xstats_o_idx_2nd[j] = idx; ++ xstats_ctrl->info[idx] = mlx5_counters_init[j]; ++ } ++ } ++ } ++ } ++ /* Dev counters are always at the last now. */ ++ xstats_ctrl->dev_cnt_start = xstats_ctrl->mlx5_stats_n; ++ for (j = 0; j != xstats_n; j++) { ++ if (mlx5_counters_init[j].dev) { ++ idx = xstats_ctrl->mlx5_stats_n++; ++ xstats_ctrl->info[idx] = mlx5_counters_init[j]; ++ xstats_ctrl->hw_stats[idx] = 0; ++ } ++ } ++ return 0; ++} + + /** + * Init the structures to read device counters. +@@ -1528,76 +1736,44 @@ mlx5_os_stats_init(struct rte_eth_dev *dev) + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl; + struct mlx5_stats_ctrl *stats_ctrl = &priv->stats_ctrl; +- unsigned int i; +- unsigned int j; +- struct ifreq ifr; + struct ethtool_gstrings *strings = NULL; +- unsigned int dev_stats_n; ++ uint16_t dev_stats_n = 0; ++ uint16_t dev_stats_n_2nd = 0; ++ unsigned int max_stats_n; + unsigned int str_sz; + int ret; ++ bool bond_master = (priv->master && priv->pf_bond >= 0); + + /* So that it won't aggregate for each init. */ + xstats_ctrl->mlx5_stats_n = 0; +- ret = mlx5_os_get_stats_n(dev); ++ ret = mlx5_os_get_stats_n(dev, bond_master, &dev_stats_n, &dev_stats_n_2nd); + if (ret < 0) { + DRV_LOG(WARNING, "port %u no extended statistics available", + dev->data->port_id); + return; + } +- dev_stats_n = ret; ++ max_stats_n = RTE_MAX(dev_stats_n, dev_stats_n_2nd); + /* Allocate memory to grab stat names and values. */ +- str_sz = dev_stats_n * ETH_GSTRING_LEN; ++ str_sz = max_stats_n * ETH_GSTRING_LEN; + strings = (struct ethtool_gstrings *) + mlx5_malloc(0, str_sz + sizeof(struct ethtool_gstrings), 0, + SOCKET_ID_ANY); + if (!strings) { + DRV_LOG(WARNING, "port %u unable to allocate memory for xstats", +- dev->data->port_id); ++ dev->data->port_id); + return; + } +- strings->cmd = ETHTOOL_GSTRINGS; +- strings->string_set = ETH_SS_STATS; +- strings->len = dev_stats_n; +- ifr.ifr_data = (caddr_t)strings; +- if (priv->master && priv->pf_bond >= 0) +- /* Bonding master. */ +- ret = mlx5_ifreq_by_ifname(priv->sh->bond.ports[0].ifname, +- SIOCETHTOOL, &ifr); +- else +- ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr); +- if (ret) { +- DRV_LOG(WARNING, "port %u unable to get statistic names", ++ ret = mlx5_os_get_stats_strings(dev, bond_master, strings, ++ dev_stats_n, dev_stats_n_2nd); ++ if (ret < 0) { ++ DRV_LOG(WARNING, "port %u failed to get the stats strings", + dev->data->port_id); + goto free; + } +- for (i = 0; i != dev_stats_n; ++i) { +- const char *curr_string = (const char *) +- &strings->data[i * ETH_GSTRING_LEN]; +- +- for (j = 0; j != xstats_n; ++j) { +- if (!strcmp(mlx5_counters_init[j].ctr_name, +- curr_string)) { +- unsigned int idx = xstats_ctrl->mlx5_stats_n++; +- +- xstats_ctrl->dev_table_idx[idx] = i; +- xstats_ctrl->info[idx] = mlx5_counters_init[j]; +- break; +- } +- } +- } +- /* Add dev counters. */ +- for (i = 0; i != xstats_n; ++i) { +- if (mlx5_counters_init[i].dev) { +- unsigned int idx = xstats_ctrl->mlx5_stats_n++; +- +- xstats_ctrl->info[idx] = mlx5_counters_init[i]; +- xstats_ctrl->hw_stats[idx] = 0; +- } +- } +- MLX5_ASSERT(xstats_ctrl->mlx5_stats_n <= MLX5_MAX_XSTATS); + xstats_ctrl->stats_n = dev_stats_n; ++ xstats_ctrl->stats_n_2nd = dev_stats_n_2nd; + /* Copy to base at first time. */ +- ret = mlx5_os_read_dev_counters(dev, xstats_ctrl->base); ++ ret = mlx5_os_read_dev_counters(dev, bond_master, xstats_ctrl->base); + if (ret) + DRV_LOG(ERR, "port %u cannot read device counters: %s", + dev->data->port_id, strerror(rte_errno)); +@@ -1644,10 +1820,7 @@ mlx5_get_mac(struct rte_eth_dev *dev, uint8_t (*mac)[RTE_ETHER_ADDR_LEN]) */ int mlx5_get_flag_dropless_rq(struct rte_eth_dev *dev) { @@ -58755,7 +66497,7 @@ index c19825ee52..dbfe00ea6d 100644 struct ethtool_drvinfo drvinfo; struct ifreq ifr; struct ethtool_gstrings *strings = NULL; -@@ -1658,15 +1723,21 @@ int mlx5_get_flag_dropless_rq(struct rte_eth_dev *dev) +@@ -1658,15 +1831,21 @@ int mlx5_get_flag_dropless_rq(struct rte_eth_dev *dev) int32_t i; int ret; @@ -58782,7 +66524,7 @@ index c19825ee52..dbfe00ea6d 100644 } else if (ret == -EOPNOTSUPP) { drvinfo.cmd = ETHTOOL_GDRVINFO; ifr.ifr_data = (caddr_t)&drvinfo; -@@ -1739,5 +1810,6 @@ int mlx5_get_flag_dropless_rq(struct rte_eth_dev *dev) +@@ -1739,5 +1918,6 @@ int mlx5_get_flag_dropless_rq(struct rte_eth_dev *dev) ret = !!(flags.data & (1U << i)); exit: mlx5_free(strings); @@ -58831,7 +66573,7 @@ index c448a3e9eb..0ba2208fe0 100644 mp_init_msg(&priv->mp_id, &mp_res, param->type); res->result = 0; diff --git a/dpdk/drivers/net/mlx5/linux/mlx5_os.c b/dpdk/drivers/net/mlx5/linux/mlx5_os.c -index c29fe3d92b..85eda47f7d 100644 +index c29fe3d92b..841512faa3 100644 --- a/dpdk/drivers/net/mlx5/linux/mlx5_os.c +++ b/dpdk/drivers/net/mlx5/linux/mlx5_os.c @@ -112,7 +112,7 @@ static struct mlx5_indexed_pool_config icfg[] = { @@ -59181,7 +66923,7 @@ index c29fe3d92b..85eda47f7d 100644 owner_pci.devid, owner_pci.function); rte_errno = ENOENT; ret = -rte_errno; -@@ -2300,7 +2287,7 @@ mlx5_os_pci_probe_pf(struct mlx5_common_device *cdev, +@@ -2300,13 +2287,12 @@ mlx5_os_pci_probe_pf(struct mlx5_common_device *cdev, /* * Force standalone bonding * device for ROCE LAG @@ -59190,7 +66932,14 @@ index c29fe3d92b..85eda47f7d 100644 */ list[ns].info.master = 0; list[ns].info.representor = 0; -@@ -2496,7 +2483,7 @@ mlx5_os_pci_probe_pf(struct mlx5_common_device *cdev, + } +- if (list[ns].info.port_name == bd) +- ns++; ++ ns++; + break; + case MLX5_PHYS_PORT_NAME_TYPE_PFHPF: + /* Fallthrough */ +@@ -2496,7 +2482,7 @@ mlx5_os_pci_probe_pf(struct mlx5_common_device *cdev, uint32_t restore; /* Default configuration. */ @@ -59199,7 +66948,7 @@ index c29fe3d92b..85eda47f7d 100644 dev_config.vf = dev_config_vf; list[i].eth_dev = mlx5_dev_spawn(cdev->dev, &list[i], &dev_config, ð_da); -@@ -2592,7 +2579,7 @@ mlx5_os_parse_eth_devargs(struct rte_device *dev, +@@ -2592,7 +2578,7 @@ mlx5_os_parse_eth_devargs(struct rte_device *dev, dev->devargs->cls_str); return -rte_errno; } @@ -59208,7 +66957,7 @@ index c29fe3d92b..85eda47f7d 100644 /* Parse legacy device argument */ ret = rte_eth_devargs_parse(dev->devargs->args, eth_da); if (ret) { -@@ -2632,16 +2619,16 @@ mlx5_os_pci_probe(struct mlx5_common_device *cdev) +@@ -2632,16 +2618,16 @@ mlx5_os_pci_probe(struct mlx5_common_device *cdev) for (p = 0; p < eth_da.nb_ports; p++) { ret = mlx5_os_pci_probe_pf(cdev, ð_da, eth_da.ports[p]); @@ -59235,7 +66984,7 @@ index c29fe3d92b..85eda47f7d 100644 } } else { ret = mlx5_os_pci_probe_pf(cdev, ð_da, 0); -@@ -2666,7 +2653,7 @@ mlx5_os_auxiliary_probe(struct mlx5_common_device *cdev) +@@ -2666,7 +2652,7 @@ mlx5_os_auxiliary_probe(struct mlx5_common_device *cdev) if (ret != 0) return ret; /* Set default config data. */ @@ -59244,7 +66993,7 @@ index c29fe3d92b..85eda47f7d 100644 config.sf = 1; /* Init spawn data. */ spawn.max_port = 1; -@@ -2733,6 +2720,40 @@ mlx5_os_net_cleanup(void) +@@ -2733,6 +2719,40 @@ mlx5_os_net_cleanup(void) mlx5_pmd_socket_uninit(); } @@ -59285,7 +67034,7 @@ index c29fe3d92b..85eda47f7d 100644 /** * Install shared asynchronous device events handler. * This function is implemented to support event sharing -@@ -2770,6 +2791,18 @@ mlx5_os_dev_shared_handler_install(struct mlx5_dev_ctx_shared *sh) +@@ -2770,6 +2790,18 @@ mlx5_os_dev_shared_handler_install(struct mlx5_dev_ctx_shared *sh) rte_intr_fd_set(sh->intr_handle, -1); } } @@ -59304,7 +67053,7 @@ index c29fe3d92b..85eda47f7d 100644 if (sh->devx) { #ifdef HAVE_IBV_DEVX_ASYNC sh->intr_handle_devx = -@@ -2817,10 +2850,19 @@ mlx5_os_dev_shared_handler_install(struct mlx5_dev_ctx_shared *sh) +@@ -2817,10 +2849,19 @@ mlx5_os_dev_shared_handler_install(struct mlx5_dev_ctx_shared *sh) void mlx5_os_dev_shared_handler_uninstall(struct mlx5_dev_ctx_shared *sh) { @@ -59324,6 +67073,23 @@ index c29fe3d92b..85eda47f7d 100644 #ifdef HAVE_IBV_DEVX_ASYNC if (rte_intr_fd_get(sh->intr_handle_devx) >= 0) rte_intr_callback_unregister(sh->intr_handle_devx, +@@ -2853,9 +2894,15 @@ mlx5_os_read_dev_stat(struct mlx5_priv *priv, const char *ctr_name, + + if (priv->sh) { + if (priv->q_counters != NULL && +- strcmp(ctr_name, "out_of_buffer") == 0) ++ strcmp(ctr_name, "out_of_buffer") == 0) { ++ if (rte_eal_process_type() == RTE_PROC_SECONDARY) { ++ DRV_LOG(WARNING, "Devx out_of_buffer counter is not supported in the secondary process"); ++ rte_errno = ENOTSUP; ++ return 1; ++ } + return mlx5_devx_cmd_queue_counter_query + (priv->q_counters, 0, (uint32_t *)stat); ++ } + MKSTR(path, "%s/ports/%d/hw_counters/%s", + priv->sh->ibdev_path, + priv->dev_port, diff --git a/dpdk/drivers/net/mlx5/linux/mlx5_verbs.c b/dpdk/drivers/net/mlx5/linux/mlx5_verbs.c index 58556d2bf0..b113731097 100644 --- a/dpdk/drivers/net/mlx5/linux/mlx5_verbs.c @@ -59373,7 +67139,7 @@ index 005904bdfe..7ee2460a23 100644 DRV_LOG(WARNING, "Can not create Netlink socket" diff --git a/dpdk/drivers/net/mlx5/mlx5.c b/dpdk/drivers/net/mlx5/mlx5.c -index aa5f313c1a..5645e8656c 100644 +index aa5f313c1a..b339ccce84 100644 --- a/dpdk/drivers/net/mlx5/mlx5.c +++ b/dpdk/drivers/net/mlx5/mlx5.c @@ -518,22 +518,37 @@ mlx5_flow_aging_init(struct mlx5_dev_ctx_shared *sh) @@ -59526,7 +67292,27 @@ index aa5f313c1a..5645e8656c 100644 if (sh->aso_age_mng) { mlx5_flow_aso_age_mng_close(sh); sh->aso_age_mng = NULL; -@@ -1594,8 +1614,6 @@ mlx5_dev_close(struct rte_eth_dev *dev) +@@ -1576,7 +1596,7 @@ mlx5_dev_close(struct rte_eth_dev *dev) + mlx5_free(priv->rxq_privs); + priv->rxq_privs = NULL; + } +- if (priv->txqs != NULL) { ++ if (priv->txqs != NULL && dev->data->tx_queues != NULL) { + /* XXX race condition if mlx5_tx_burst() is still running. */ + rte_delay_us_sleep(1000); + for (i = 0; (i != priv->txqs_n); ++i) +@@ -1585,17 +1605,15 @@ mlx5_dev_close(struct rte_eth_dev *dev) + priv->txqs = NULL; + } + mlx5_proc_priv_uninit(dev); ++ if (priv->drop_queue.hrxq) ++ mlx5_drop_action_destroy(dev); + if (priv->q_counters) { + mlx5_devx_cmd_destroy(priv->q_counters); + priv->q_counters = NULL; + } +- if (priv->drop_queue.hrxq) +- mlx5_drop_action_destroy(dev); if (priv->mreg_cp_tbl) mlx5_hlist_destroy(priv->mreg_cp_tbl); mlx5_mprq_free_mp(dev); @@ -59596,10 +67382,41 @@ index aa5f313c1a..5645e8656c 100644 else sh->dv_regc0_mask = reg_c0; diff --git a/dpdk/drivers/net/mlx5/mlx5.h b/dpdk/drivers/net/mlx5/mlx5.h -index 8466531060..6a3c48eaeb 100644 +index 8466531060..80c1c0b7b1 100644 --- a/dpdk/drivers/net/mlx5/mlx5.h +++ b/dpdk/drivers/net/mlx5/mlx5.h -@@ -275,10 +275,14 @@ struct mlx5_dev_config { +@@ -199,16 +199,29 @@ struct mlx5_counter_ctrl { + struct mlx5_xstats_ctrl { + /* Number of device stats. */ + uint16_t stats_n; ++ /* Number of device stats, for the 2nd port in bond. */ ++ uint16_t stats_n_2nd; + /* Number of device stats identified by PMD. */ +- uint16_t mlx5_stats_n; ++ uint16_t mlx5_stats_n; ++ /* First device counters index. */ ++ uint16_t dev_cnt_start; + /* Index in the device counters table. */ + uint16_t dev_table_idx[MLX5_MAX_XSTATS]; ++ /* Index in the output table. */ ++ uint16_t xstats_o_idx[MLX5_MAX_XSTATS]; + uint64_t base[MLX5_MAX_XSTATS]; + uint64_t xstats[MLX5_MAX_XSTATS]; + uint64_t hw_stats[MLX5_MAX_XSTATS]; + struct mlx5_counter_ctrl info[MLX5_MAX_XSTATS]; ++ /* Index in the device counters table, for the 2nd port in bond. */ ++ uint16_t dev_table_idx_2nd[MLX5_MAX_XSTATS]; ++ /* Index in the output table, for the 2nd port in bond. */ ++ uint16_t xstats_o_idx_2nd[MLX5_MAX_XSTATS]; + }; + ++/* xstats array size. */ ++extern const unsigned int xstats_n; ++ + struct mlx5_stats_ctrl { + /* Base for imissed counter. */ + uint64_t imissed_base; +@@ -275,10 +288,14 @@ struct mlx5_dev_config { unsigned int hp_delay_drop:1; /* Enable hairpin Rxq delay drop. */ struct { unsigned int enabled:1; /* Whether MPRQ is enabled. */ @@ -59618,7 +67435,7 @@ index 8466531060..6a3c48eaeb 100644 unsigned int max_memcpy_len; /* Maximum packet size to memcpy Rx packets. */ unsigned int min_rxqs_num; -@@ -319,9 +323,10 @@ struct mlx5_lb_ctx { +@@ -319,9 +336,10 @@ struct mlx5_lb_ctx { uint16_t refcnt; /* Reference count for representors. */ }; @@ -59630,7 +67447,7 @@ index 8466531060..6a3c48eaeb 100644 #define MLX5_CNT_SHARED_OFFSET 0x80000000 #define IS_BATCH_CNT(cnt) (((cnt) & (MLX5_CNT_SHARED_OFFSET - 1)) >= \ MLX5_CNT_BATCH_OFFSET) -@@ -477,7 +482,6 @@ TAILQ_HEAD(mlx5_counter_pools, mlx5_flow_counter_pool); +@@ -477,7 +495,6 @@ TAILQ_HEAD(mlx5_counter_pools, mlx5_flow_counter_pool); /* Counter global management structure. */ struct mlx5_flow_counter_mng { volatile uint16_t n_valid; /* Number of valid pools. */ @@ -59638,7 +67455,7 @@ index 8466531060..6a3c48eaeb 100644 uint16_t last_pool_idx; /* Last used pool index */ int min_id; /* The minimum counter ID in the pools. */ int max_id; /* The maximum counter ID in the pools. */ -@@ -546,6 +550,7 @@ struct mlx5_aso_age_action { +@@ -546,6 +563,7 @@ struct mlx5_aso_age_action { }; #define MLX5_ASO_AGE_ACTIONS_PER_POOL 512 @@ -59646,7 +67463,7 @@ index 8466531060..6a3c48eaeb 100644 struct mlx5_aso_age_pool { struct mlx5_devx_obj *flow_hit_aso_obj; -@@ -601,6 +606,7 @@ struct mlx5_age_info { +@@ -601,6 +619,7 @@ struct mlx5_age_info { struct mlx5_dev_shared_port { uint32_t ih_port_id; uint32_t devx_ih_port_id; @@ -59654,7 +67471,7 @@ index 8466531060..6a3c48eaeb 100644 /* * Interrupt handler port_id. Used by shared interrupt * handler to find the corresponding rte_eth device -@@ -742,6 +748,8 @@ struct mlx5_flow_meter_policy { +@@ -742,6 +761,8 @@ struct mlx5_flow_meter_policy { /* If yellow color policy is skipped. */ uint32_t skip_g:1; /* If green color policy is skipped. */ @@ -59663,7 +67480,7 @@ index 8466531060..6a3c48eaeb 100644 rte_spinlock_t sl; uint32_t ref_cnt; /* Use count. */ -@@ -956,7 +964,6 @@ union mlx5_flow_tbl_key { +@@ -956,7 +977,6 @@ union mlx5_flow_tbl_key { /* Table structure. */ struct mlx5_flow_tbl_resource { void *obj; /**< Pointer to DR table object. */ @@ -59671,7 +67488,7 @@ index 8466531060..6a3c48eaeb 100644 }; #define MLX5_MAX_TABLES UINT16_MAX -@@ -977,7 +984,7 @@ struct mlx5_flow_id_pool { +@@ -977,7 +997,7 @@ struct mlx5_flow_id_pool { uint32_t base_index; /**< The next index that can be used without any free elements. */ uint32_t *curr; /**< Pointer to the index to pop. */ @@ -59680,7 +67497,7 @@ index 8466531060..6a3c48eaeb 100644 uint32_t max_id; /**< Maximum id can be allocated from the pool. */ }; -@@ -1014,7 +1021,7 @@ struct mlx5_dev_txpp { +@@ -1014,7 +1034,7 @@ struct mlx5_dev_txpp { void *pp; /* Packet pacing context. */ uint16_t pp_id; /* Packet pacing context index. */ uint16_t ts_n; /* Number of captured timestamps. */ @@ -59689,7 +67506,7 @@ index 8466531060..6a3c48eaeb 100644 struct mlx5_txpp_ts *tsa; /* Timestamps sliding window stats. */ struct mlx5_txpp_ts ts; /* Cached completion id/timestamp. */ uint32_t sync_lost:1; /* ci/timestamp synchronization lost. */ -@@ -1118,7 +1125,7 @@ struct mlx5_flex_parser_devx { +@@ -1118,7 +1138,7 @@ struct mlx5_flex_parser_devx { uint32_t sample_ids[MLX5_GRAPH_NODE_SAMPLE_NUM]; }; @@ -59698,7 +67515,7 @@ index 8466531060..6a3c48eaeb 100644 __extension__ struct mlx5_flex_pattern_field { uint16_t width:6; -@@ -1152,7 +1159,7 @@ struct mlx5_dev_ctx_shared { +@@ -1152,7 +1172,7 @@ struct mlx5_dev_ctx_shared { uint32_t tunnel_header_0_1:1; /* tunnel_header_0_1 is supported. */ uint32_t misc5_cap:1; /* misc5 matcher parameter is supported. */ uint32_t reclaim_mode:1; /* Reclaim memory. */ @@ -59707,7 +67524,7 @@ index 8466531060..6a3c48eaeb 100644 uint32_t drop_action_check_flag:1; /* Check Flag for drop action. */ uint32_t flow_priority_check_flag:1; /* Check Flag for flow priority. */ uint32_t metadata_regc_check_flag:1; /* Check Flag for metadata REGC. */ -@@ -1169,7 +1176,7 @@ struct mlx5_dev_ctx_shared { +@@ -1169,7 +1189,7 @@ struct mlx5_dev_ctx_shared { /* Shared DV/DR flow data section. */ uint32_t dv_meta_mask; /* flow META metadata supported mask. */ uint32_t dv_mark_mask; /* flow MARK metadata supported mask. */ @@ -59716,7 +67533,7 @@ index 8466531060..6a3c48eaeb 100644 void *fdb_domain; /* FDB Direct Rules name space handle. */ void *rx_domain; /* RX Direct Rules name space handle. */ void *tx_domain; /* TX Direct Rules name space handle. */ -@@ -1199,6 +1206,7 @@ struct mlx5_dev_ctx_shared { +@@ -1199,6 +1219,7 @@ struct mlx5_dev_ctx_shared { /* Shared interrupt handler section. */ struct rte_intr_handle *intr_handle; /* Interrupt handler for device. */ struct rte_intr_handle *intr_handle_devx; /* DEVX interrupt handler. */ @@ -59724,7 +67541,7 @@ index 8466531060..6a3c48eaeb 100644 void *devx_comp; /* DEVX async comp obj. */ struct mlx5_devx_obj *tis[16]; /* TIS object. */ struct mlx5_devx_obj *td; /* Transport domain. */ -@@ -1409,6 +1417,8 @@ struct mlx5_priv { +@@ -1409,6 +1430,8 @@ struct mlx5_priv { unsigned int mtr_en:1; /* Whether support meter. */ unsigned int mtr_reg_share:1; /* Whether support meter REG_C share. */ unsigned int lb_used:1; /* Loopback queue is referred to. */ @@ -59733,7 +67550,7 @@ index 8466531060..6a3c48eaeb 100644 uint16_t domain_id; /* Switch domain identifier. */ uint16_t vport_id; /* Associated VF vport index (if any). */ uint32_t vport_meta_tag; /* Used for vport index match ove VF LAG. */ -@@ -1444,7 +1454,7 @@ struct mlx5_priv { +@@ -1444,7 +1467,7 @@ struct mlx5_priv { uint32_t refcnt; /**< Reference counter. */ /**< Verbs modify header action object. */ uint8_t ft_type; /**< Flow table type, Rx or Tx. */ @@ -59742,7 +67559,7 @@ index 8466531060..6a3c48eaeb 100644 uint32_t link_speed_capa; /* Link speed capabilities. */ struct mlx5_xstats_ctrl xstats_ctrl; /* Extended stats control. */ struct mlx5_stats_ctrl stats_ctrl; /* Stats control. */ -@@ -1580,6 +1590,7 @@ int mlx5_dev_set_flow_ctrl(struct rte_eth_dev *dev, +@@ -1580,6 +1603,7 @@ int mlx5_dev_set_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf); void mlx5_dev_interrupt_handler(void *arg); void mlx5_dev_interrupt_handler_devx(void *arg); @@ -59750,6 +67567,18 @@ index 8466531060..6a3c48eaeb 100644 int mlx5_set_link_down(struct rte_eth_dev *dev); int mlx5_set_link_up(struct rte_eth_dev *dev); int mlx5_is_removed(struct rte_eth_dev *dev); +@@ -1597,8 +1621,9 @@ int mlx5_get_module_eeprom(struct rte_eth_dev *dev, + struct rte_dev_eeprom_info *info); + int mlx5_os_read_dev_stat(struct mlx5_priv *priv, + const char *ctr_name, uint64_t *stat); +-int mlx5_os_read_dev_counters(struct rte_eth_dev *dev, uint64_t *stats); +-int mlx5_os_get_stats_n(struct rte_eth_dev *dev); ++int mlx5_os_read_dev_counters(struct rte_eth_dev *dev, bool bond_master, uint64_t *stats); ++int mlx5_os_get_stats_n(struct rte_eth_dev *dev, bool bond_master, ++ uint16_t *n_stats, uint16_t *n_stats_sec); + void mlx5_os_stats_init(struct rte_eth_dev *dev); + int mlx5_get_flag_dropless_rq(struct rte_eth_dev *dev); + diff --git a/dpdk/drivers/net/mlx5/mlx5_defs.h b/dpdk/drivers/net/mlx5/mlx5_defs.h index 258475ed2c..2d48fde010 100644 --- a/dpdk/drivers/net/mlx5/mlx5_defs.h @@ -59844,7 +67673,7 @@ index 105c3d67f0..b59c5d81bd 100644 mlx5_devx_tir_destroy(hrxq); if (hrxq->ind_table->ind_table != NULL) diff --git a/dpdk/drivers/net/mlx5/mlx5_ethdev.c b/dpdk/drivers/net/mlx5/mlx5_ethdev.c -index dc647d5580..9c44471c42 100644 +index dc647d5580..c6ec156493 100644 --- a/dpdk/drivers/net/mlx5/mlx5_ethdev.c +++ b/dpdk/drivers/net/mlx5/mlx5_ethdev.c @@ -109,7 +109,7 @@ mlx5_dev_configure(struct rte_eth_dev *dev) @@ -59856,8 +67685,21 @@ index dc647d5580..9c44471c42 100644 DRV_LOG(ERR, "port %u cannot allocate rxq private data", dev->data->port_id); rte_errno = ENOMEM; +@@ -136,6 +136,12 @@ mlx5_dev_configure(struct rte_eth_dev *dev) + ret = mlx5_proc_priv_init(dev); + if (ret) + return ret; ++ ret = mlx5_dev_set_mtu(dev, dev->data->mtu); ++ if (ret) { ++ DRV_LOG(ERR, "port %u failed to set MTU to %u", dev->data->port_id, ++ dev->data->mtu); ++ return ret; ++ } + return 0; + } + diff --git a/dpdk/drivers/net/mlx5/mlx5_flow.c b/dpdk/drivers/net/mlx5/mlx5_flow.c -index f34e4b88aa..5d489c7f92 100644 +index f34e4b88aa..533cc13f2a 100644 --- a/dpdk/drivers/net/mlx5/mlx5_flow.c +++ b/dpdk/drivers/net/mlx5/mlx5_flow.c @@ -18,6 +18,7 @@ @@ -60464,7 +68306,7 @@ index f34e4b88aa..5d489c7f92 100644 * by list traversing. */ mcp_res->rix_flow = flow_list_create(dev, MLX5_FLOW_TYPE_MCP, -@@ -4670,19 +4732,32 @@ flow_hairpin_split(struct rte_eth_dev *dev, +@@ -4670,20 +4732,34 @@ flow_hairpin_split(struct rte_eth_dev *dev, struct mlx5_rte_flow_item_tag *tag_item; struct rte_flow_item *item; char *addr; @@ -60496,9 +68338,11 @@ index f34e4b88aa..5d489c7f92 100644 + } + break; case RTE_FLOW_ACTION_TYPE_COUNT: ++ case RTE_FLOW_ACTION_TYPE_AGE: if (encap) { rte_memcpy(actions_tx, actions, -@@ -4796,6 +4871,7 @@ flow_create_split_inner(struct rte_eth_dev *dev, + sizeof(struct rte_flow_action)); +@@ -4796,6 +4872,7 @@ flow_create_split_inner(struct rte_eth_dev *dev, struct rte_flow_error *error) { struct mlx5_flow *dev_flow; @@ -60506,7 +68350,7 @@ index f34e4b88aa..5d489c7f92 100644 dev_flow = flow_drv_prepare(dev, flow, attr, items, actions, flow_split_info->flow_idx, error); -@@ -4810,12 +4886,14 @@ flow_create_split_inner(struct rte_eth_dev *dev, +@@ -4810,12 +4887,14 @@ flow_create_split_inner(struct rte_eth_dev *dev, /* * If dev_flow is as one of the suffix flow, some actions in suffix * flow may need some user defined item layer flags, and pass the @@ -60524,10 +68368,14 @@ index f34e4b88aa..5d489c7f92 100644 if (sub_flow) *sub_flow = dev_flow; #ifdef HAVE_IBV_FLOW_DV_SUPPORT -@@ -5006,9 +5084,10 @@ flow_meter_split_prep(struct rte_eth_dev *dev, +@@ -5004,11 +5083,12 @@ flow_meter_split_prep(struct rte_eth_dev *dev, + struct mlx5_rte_flow_item_tag *tag_item_spec; + struct mlx5_rte_flow_item_tag *tag_item_mask; uint32_t tag_id = 0; - struct rte_flow_item *vlan_item_dst = NULL; - const struct rte_flow_item *vlan_item_src = NULL; +- struct rte_flow_item *vlan_item_dst = NULL; +- const struct rte_flow_item *vlan_item_src = NULL; ++ bool vlan_actions; ++ struct rte_flow_item *orig_sfx_items = sfx_items; + const struct rte_flow_item *orig_items = items; struct rte_flow_action *hw_mtr_action; struct rte_flow_action *action_pre_head = NULL; @@ -60536,7 +68384,7 @@ index f34e4b88aa..5d489c7f92 100644 bool mtr_first; uint8_t mtr_id_offset = priv->mtr_reg_share ? MLX5_MTR_COLOR_BITS : 0; uint8_t mtr_reg_bits = priv->mtr_reg_share ? -@@ -5016,27 +5095,18 @@ flow_meter_split_prep(struct rte_eth_dev *dev, +@@ -5016,27 +5096,19 @@ flow_meter_split_prep(struct rte_eth_dev *dev, uint32_t flow_id = 0; uint32_t flow_id_reversed = 0; uint8_t flow_id_bits = 0; @@ -60545,6 +68393,7 @@ index f34e4b88aa..5d489c7f92 100644 /* Prepare the suffix subflow items. */ tag_item = sfx_items++; ++ tag_item->type = (enum rte_flow_item_type)MLX5_RTE_FLOW_ITEM_TYPE_TAG; for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) { - struct mlx5_priv *port_priv; - const struct rte_flow_item_port_id *pid_v; @@ -60567,7 +68416,33 @@ index f34e4b88aa..5d489c7f92 100644 if (!fm->def_policy && wks->policy->is_hierarchy && flow_src_port != priv->representor_id) { if (flow_drv_mtr_hierarchy_rule_create(dev, -@@ -5082,6 +5152,7 @@ flow_meter_split_prep(struct rte_eth_dev *dev, +@@ -5050,10 +5122,13 @@ flow_meter_split_prep(struct rte_eth_dev *dev, + sfx_items++; + break; + case RTE_FLOW_ITEM_TYPE_VLAN: +- /* Determine if copy vlan item below. */ +- vlan_item_src = items; +- vlan_item_dst = sfx_items++; +- vlan_item_dst->type = RTE_FLOW_ITEM_TYPE_VOID; ++ /* ++ * Copy VLAN items in case VLAN actions are performed. ++ * If there are no VLAN actions, these items will be VOID. ++ */ ++ memcpy(sfx_items, items, sizeof(*sfx_items)); ++ sfx_items->type = (enum rte_flow_item_type)MLX5_RTE_FLOW_ITEM_TYPE_VLAN; ++ sfx_items++; + break; + default: + break; +@@ -5070,6 +5145,7 @@ flow_meter_split_prep(struct rte_eth_dev *dev, + tag_action = actions_pre++; + } + /* Prepare the actions for prefix and suffix flow. */ ++ vlan_actions = false; + for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { + struct rte_flow_action *action_cur = NULL; + +@@ -5082,6 +5158,7 @@ flow_meter_split_prep(struct rte_eth_dev *dev, tag_action = actions_pre++; action_cur = actions_pre++; } @@ -60575,19 +68450,45 @@ index f34e4b88aa..5d489c7f92 100644 break; case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP: case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP: -@@ -5110,6 +5181,11 @@ flow_meter_split_prep(struct rte_eth_dev *dev, - MLX5_RTE_FLOW_ITEM_TYPE_VLAN; - } +@@ -5099,16 +5176,12 @@ flow_meter_split_prep(struct rte_eth_dev *dev, break; + case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN: + case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID: +- if (vlan_item_dst && vlan_item_src) { +- memcpy(vlan_item_dst, vlan_item_src, +- sizeof(*vlan_item_dst)); +- /* +- * Convert to internal match item, it is used +- * for vlan push and set vid. +- */ +- vlan_item_dst->type = (enum rte_flow_item_type) +- MLX5_RTE_FLOW_ITEM_TYPE_VLAN; +- } ++ vlan_actions = true; ++ break; + case RTE_FLOW_ACTION_TYPE_COUNT: + if (fm->def_policy) + action_cur = after_meter ? + actions_sfx++ : actions_pre++; -+ break; + break; default: break; - } -@@ -5130,7 +5206,8 @@ flow_meter_split_prep(struct rte_eth_dev *dev, +@@ -5118,6 +5191,14 @@ flow_meter_split_prep(struct rte_eth_dev *dev, + actions_sfx++ : actions_pre++; + memcpy(action_cur, actions, sizeof(struct rte_flow_action)); + } ++ /* If there are no VLAN actions, convert VLAN items to VOID in suffix flow items. */ ++ if (!vlan_actions) { ++ struct rte_flow_item *it = orig_sfx_items; ++ ++ for (; it->type != RTE_FLOW_ITEM_TYPE_END; it++) ++ if (it->type == (enum rte_flow_item_type)MLX5_RTE_FLOW_ITEM_TYPE_VLAN) ++ it->type = RTE_FLOW_ITEM_TYPE_VOID; ++ } + /* Add end action to the actions. */ + actions_sfx->type = RTE_FLOW_ACTION_TYPE_END; + if (priv->sh->meter_aso_en) { +@@ -5130,7 +5211,8 @@ flow_meter_split_prep(struct rte_eth_dev *dev, if (!fm->def_policy) { sub_policy = get_meter_sub_policy(dev, flow, wks, @@ -60597,7 +68498,16 @@ index f34e4b88aa..5d489c7f92 100644 if (!sub_policy) return -rte_errno; } else { -@@ -5359,7 +5436,7 @@ flow_mreg_split_qrss_prep(struct rte_eth_dev *dev, +@@ -5206,8 +5288,6 @@ flow_meter_split_prep(struct rte_eth_dev *dev, + tag_action->type = (enum rte_flow_action_type) + MLX5_RTE_FLOW_ACTION_TYPE_TAG; + tag_action->conf = set_tag; +- tag_item->type = (enum rte_flow_item_type) +- MLX5_RTE_FLOW_ITEM_TYPE_TAG; + tag_item->spec = tag_item_spec; + tag_item->last = NULL; + tag_item->mask = tag_item_mask; +@@ -5359,7 +5439,7 @@ flow_mreg_split_qrss_prep(struct rte_eth_dev *dev, * @param[out] error * Perform verbose error reporting if not NULL. * @param[in] encap_idx @@ -60606,7 +68516,7 @@ index f34e4b88aa..5d489c7f92 100644 * * @return * 0 on success, negative value otherwise -@@ -5436,6 +5513,7 @@ flow_check_match_action(const struct rte_flow_action actions[], +@@ -5436,6 +5516,7 @@ flow_check_match_action(const struct rte_flow_action actions[], { const struct rte_flow_action_sample *sample; const struct rte_flow_action_raw_decap *decap; @@ -60614,7 +68524,7 @@ index f34e4b88aa..5d489c7f92 100644 int actions_n = 0; uint32_t ratio = 0; int sub_type = 0; -@@ -5459,7 +5537,8 @@ flow_check_match_action(const struct rte_flow_action actions[], +@@ -5459,7 +5540,8 @@ flow_check_match_action(const struct rte_flow_action actions[], ratio = sample->ratio; sub_type = ((const struct rte_flow_action *) (sample->actions))->type; @@ -60624,7 +68534,7 @@ index f34e4b88aa..5d489c7f92 100644 fdb_mirror = 1; break; case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC: -@@ -5495,12 +5574,12 @@ flow_check_match_action(const struct rte_flow_action actions[], +@@ -5495,12 +5577,12 @@ flow_check_match_action(const struct rte_flow_action actions[], break; case RTE_FLOW_ACTION_TYPE_RAW_DECAP: decap = actions->conf; @@ -60641,7 +68551,7 @@ index f34e4b88aa..5d489c7f92 100644 if (decap->size <= MLX5_ENCAPSULATION_DECISION_SIZE && encap->size > -@@ -5527,7 +5606,7 @@ flow_check_match_action(const struct rte_flow_action actions[], +@@ -5527,7 +5609,7 @@ flow_check_match_action(const struct rte_flow_action actions[], return flag ? actions_n + 1 : 0; } @@ -60650,7 +68560,7 @@ index f34e4b88aa..5d489c7f92 100644 /** * Split the sample flow. -@@ -5568,6 +5647,7 @@ flow_check_match_action(const struct rte_flow_action actions[], +@@ -5568,6 +5650,7 @@ flow_check_match_action(const struct rte_flow_action actions[], static int flow_sample_split_prep(struct rte_eth_dev *dev, int add_tag, @@ -60658,7 +68568,7 @@ index f34e4b88aa..5d489c7f92 100644 struct rte_flow_item sfx_items[], const struct rte_flow_action actions[], struct rte_flow_action actions_sfx[], -@@ -5584,8 +5664,9 @@ flow_sample_split_prep(struct rte_eth_dev *dev, +@@ -5584,8 +5667,9 @@ flow_sample_split_prep(struct rte_eth_dev *dev, struct mlx5_rte_flow_item_tag *tag_mask; struct rte_flow_action_jump *jump_action; uint32_t tag_id = 0; @@ -60669,7 +68579,7 @@ index f34e4b88aa..5d489c7f92 100644 int ret; if (sample_action_pos < 0) -@@ -5594,6 +5675,54 @@ flow_sample_split_prep(struct rte_eth_dev *dev, +@@ -5594,6 +5678,54 @@ flow_sample_split_prep(struct rte_eth_dev *dev, NULL, "invalid position of sample " "action in list"); /* Prepare the actions for prefix and suffix flow. */ @@ -60724,7 +68634,7 @@ index f34e4b88aa..5d489c7f92 100644 if (qrss_action_pos >= 0 && qrss_action_pos < sample_action_pos) { index = qrss_action_pos; /* Put the preceding the Queue/RSS action into prefix flow. */ -@@ -5610,6 +5739,14 @@ flow_sample_split_prep(struct rte_eth_dev *dev, +@@ -5610,6 +5742,14 @@ flow_sample_split_prep(struct rte_eth_dev *dev, memcpy(actions_sfx, actions + qrss_action_pos, sizeof(struct rte_flow_action)); actions_sfx++; @@ -60739,7 +68649,7 @@ index f34e4b88aa..5d489c7f92 100644 } else { index = sample_action_pos; if (index != 0) -@@ -5624,7 +5761,14 @@ flow_sample_split_prep(struct rte_eth_dev *dev, +@@ -5624,7 +5764,14 @@ flow_sample_split_prep(struct rte_eth_dev *dev, /* Prepare the prefix tag action. */ append_index++; set_tag = (void *)(actions_pre + actions_n + append_index); @@ -60755,7 +68665,7 @@ index f34e4b88aa..5d489c7f92 100644 if (ret < 0) return ret; mlx5_ipool_malloc(priv->sh->ipool -@@ -5639,6 +5783,13 @@ flow_sample_split_prep(struct rte_eth_dev *dev, +@@ -5639,6 +5786,13 @@ flow_sample_split_prep(struct rte_eth_dev *dev, tag_spec->id = set_tag->id; tag_mask = tag_spec + 1; tag_mask->data = UINT32_MAX; @@ -60769,7 +68679,7 @@ index f34e4b88aa..5d489c7f92 100644 sfx_items[0] = (struct rte_flow_item){ .type = (enum rte_flow_item_type) MLX5_RTE_FLOW_ITEM_TYPE_TAG, -@@ -5651,13 +5802,17 @@ flow_sample_split_prep(struct rte_eth_dev *dev, +@@ -5651,13 +5805,17 @@ flow_sample_split_prep(struct rte_eth_dev *dev, RTE_FLOW_ITEM_TYPE_END, }; /* Prepare the tag action in prefix subflow. */ @@ -60788,7 +68698,35 @@ index f34e4b88aa..5d489c7f92 100644 memcpy(actions_pre + index, actions + sample_action_pos, sizeof(struct rte_flow_action)); index += 1; -@@ -6042,6 +6197,8 @@ flow_create_split_meter(struct rte_eth_dev *dev, +@@ -5956,6 +6114,19 @@ flow_meter_create_drop_flow_with_org_pattern(struct rte_eth_dev *dev, + &drop_split_info, error); + } + ++static int ++flow_count_vlan_items(const struct rte_flow_item items[]) ++{ ++ int items_n = 0; ++ ++ for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) { ++ if (items->type == RTE_FLOW_ITEM_TYPE_VLAN || ++ items->type == (enum rte_flow_item_type)MLX5_RTE_FLOW_ITEM_TYPE_VLAN) ++ items_n++; ++ } ++ return items_n; ++} ++ + /** + * The splitting for meter feature. + * +@@ -6011,6 +6182,7 @@ flow_create_split_meter(struct rte_eth_dev *dev, + size_t act_size; + size_t item_size; + int actions_n = 0; ++ int vlan_items_n = 0; + int ret = 0; + + if (priv->mtr_en) +@@ -6042,6 +6214,8 @@ flow_create_split_meter(struct rte_eth_dev *dev, fm->policy_id, NULL); MLX5_ASSERT(wks->policy); @@ -60797,7 +68735,7 @@ index f34e4b88aa..5d489c7f92 100644 if (wks->policy->is_hierarchy) { wks->final_policy = mlx5_flow_meter_hierarchy_get_final_policy(dev, -@@ -6065,8 +6222,10 @@ flow_create_split_meter(struct rte_eth_dev *dev, +@@ -6065,12 +6239,16 @@ flow_create_split_meter(struct rte_eth_dev *dev, if (!fm->def_policy && !is_mtr_hierarchy && (!has_modify || !fm->drop_cnt)) set_mtr_reg = false; @@ -60808,9 +68746,18 @@ index f34e4b88aa..5d489c7f92 100644 + act_size = (sizeof(struct rte_flow_action) * + (actions_n + METER_PREFIX_ACTION)) + sizeof(struct mlx5_rte_flow_action_set_tag); - /* Suffix items: tag, vlan, port id, end. */ - #define METER_SUFFIX_ITEM 4 -@@ -6128,7 +6287,7 @@ flow_create_split_meter(struct rte_eth_dev *dev, +- /* Suffix items: tag, vlan, port id, end. */ +-#define METER_SUFFIX_ITEM 4 +- item_size = sizeof(struct rte_flow_item) * METER_SUFFIX_ITEM + ++ /* Flow can have multiple VLAN items. Account for them in suffix items. */ ++ vlan_items_n = flow_count_vlan_items(items); ++ /* Suffix items: tag, [vlans], port id, end. */ ++#define METER_SUFFIX_ITEM 3 ++ item_size = sizeof(struct rte_flow_item) * (METER_SUFFIX_ITEM + vlan_items_n) + + sizeof(struct mlx5_rte_flow_item_tag) * 2; + sfx_actions = mlx5_malloc(MLX5_MEM_ZERO, (act_size + item_size), + 0, SOCKET_ID_ANY); +@@ -6128,7 +6306,7 @@ flow_create_split_meter(struct rte_eth_dev *dev, MLX5_FLOW_TABLE_LEVEL_METER; flow_split_info->prefix_layers = flow_get_prefix_layer_flags(dev_flow); @@ -60819,7 +68766,7 @@ index f34e4b88aa..5d489c7f92 100644 flow_split_info->table_id = MLX5_MTR_TABLE_ID_SUFFIX; } /* Add the prefix subflow. */ -@@ -6194,6 +6353,7 @@ flow_create_split_sample(struct rte_eth_dev *dev, +@@ -6194,6 +6372,7 @@ flow_create_split_sample(struct rte_eth_dev *dev, struct mlx5_flow_dv_sample_resource *sample_res; struct mlx5_flow_tbl_data_entry *sfx_tbl_data; struct mlx5_flow_tbl_resource *sfx_tbl; @@ -60827,7 +68774,7 @@ index f34e4b88aa..5d489c7f92 100644 #endif size_t act_size; size_t item_size; -@@ -6207,6 +6367,8 @@ flow_create_split_sample(struct rte_eth_dev *dev, +@@ -6207,6 +6386,8 @@ flow_create_split_sample(struct rte_eth_dev *dev, uint16_t jump_table = 0; const uint32_t next_ft_step = 1; int ret = 0; @@ -60836,7 +68783,7 @@ index f34e4b88aa..5d489c7f92 100644 if (priv->sampler_en) actions_n = flow_check_match_action(actions, attr, -@@ -6226,8 +6388,20 @@ flow_create_split_sample(struct rte_eth_dev *dev, +@@ -6226,8 +6407,20 @@ flow_create_split_sample(struct rte_eth_dev *dev, RTE_FLOW_ERROR_TYPE_ACTION, NULL, "no memory to split " "sample flow"); @@ -60858,7 +68805,7 @@ index f34e4b88aa..5d489c7f92 100644 /* * When reg_c_preserve is set, metadata registers Cx preserve * their value even through packet duplication. -@@ -6240,7 +6414,7 @@ flow_create_split_sample(struct rte_eth_dev *dev, +@@ -6240,7 +6433,7 @@ flow_create_split_sample(struct rte_eth_dev *dev, jump_table = attr->group * MLX5_FLOW_TABLE_FACTOR + next_ft_step; pre_actions = sfx_actions + actions_n; @@ -60867,7 +68814,7 @@ index f34e4b88aa..5d489c7f92 100644 actions, sfx_actions, pre_actions, actions_n, sample_action_pos, -@@ -6280,7 +6454,8 @@ flow_create_split_sample(struct rte_eth_dev *dev, +@@ -6280,7 +6473,8 @@ flow_create_split_sample(struct rte_eth_dev *dev, } flow_split_info->prefix_layers = flow_get_prefix_layer_flags(dev_flow); @@ -60877,7 +68824,7 @@ index f34e4b88aa..5d489c7f92 100644 /* Suffix group level already be scaled with factor, set * MLX5_SCALE_FLOW_GROUP_BIT of skip_scale to 1 to avoid scale * again in translation. -@@ -6364,36 +6539,6 @@ flow_tunnel_from_rule(const struct mlx5_flow *flow) +@@ -6364,36 +6558,6 @@ flow_tunnel_from_rule(const struct mlx5_flow *flow) return tunnel; } @@ -60914,7 +68861,7 @@ index f34e4b88aa..5d489c7f92 100644 /** * Create a flow and add it to @p list. * -@@ -6434,7 +6579,7 @@ flow_list_create(struct rte_eth_dev *dev, enum mlx5_flow_type type, +@@ -6434,7 +6598,7 @@ flow_list_create(struct rte_eth_dev *dev, enum mlx5_flow_type type, int indir_actions_n = MLX5_MAX_INDIRECT_ACTIONS; union { struct mlx5_flow_expand_rss buf; @@ -60923,7 +68870,7 @@ index f34e4b88aa..5d489c7f92 100644 } expand_buffer; union { struct rte_flow_action actions[MLX5_MAX_SPLIT_ACTIONS]; -@@ -6511,8 +6656,7 @@ flow_list_create(struct rte_eth_dev *dev, enum mlx5_flow_type type, +@@ -6511,8 +6675,7 @@ flow_list_create(struct rte_eth_dev *dev, enum mlx5_flow_type type, if (attr->ingress && !attr->transfer) rss = flow_get_rss_action(dev, p_actions_rx); if (rss) { @@ -60933,7 +68880,7 @@ index f34e4b88aa..5d489c7f92 100644 /* * The following information is required by * mlx5_flow_hashfields_adjust() in advance. -@@ -6643,8 +6787,8 @@ flow_list_create(struct rte_eth_dev *dev, enum mlx5_flow_type type, +@@ -6643,8 +6806,8 @@ flow_list_create(struct rte_eth_dev *dev, enum mlx5_flow_type type, rte_errno = ret; /* Restore rte_errno. */ ret = rte_errno; rte_errno = ret; @@ -60943,7 +68890,7 @@ index f34e4b88aa..5d489c7f92 100644 rte_free(translated_actions); return 0; } -@@ -6884,7 +7028,7 @@ flow_list_destroy(struct rte_eth_dev *dev, enum mlx5_flow_type type, +@@ -6884,7 +7047,7 @@ flow_list_destroy(struct rte_eth_dev *dev, enum mlx5_flow_type type, * @param type * Flow type to be flushed. * @param active @@ -60952,7 +68899,7 @@ index f34e4b88aa..5d489c7f92 100644 */ void mlx5_flow_list_flush(struct rte_eth_dev *dev, enum mlx5_flow_type type, -@@ -6945,12 +7089,34 @@ flow_release_workspace(void *data) +@@ -6945,12 +7108,34 @@ flow_release_workspace(void *data) while (wks) { next = wks->next; @@ -60988,7 +68935,7 @@ index f34e4b88aa..5d489c7f92 100644 /** * Get thread specific current flow workspace. * -@@ -6976,24 +7142,17 @@ mlx5_flow_get_thread_workspace(void) +@@ -6976,24 +7161,17 @@ mlx5_flow_get_thread_workspace(void) static struct mlx5_flow_workspace* flow_alloc_thread_workspace(void) { @@ -61019,7 +68966,7 @@ index f34e4b88aa..5d489c7f92 100644 } /** -@@ -7003,7 +7162,7 @@ flow_alloc_thread_workspace(void) +@@ -7003,7 +7181,7 @@ flow_alloc_thread_workspace(void) * * @return pointer to thread specific flow workspace data, NULL on error. */ @@ -61028,7 +68975,7 @@ index f34e4b88aa..5d489c7f92 100644 mlx5_flow_push_thread_workspace(void) { struct mlx5_flow_workspace *curr; -@@ -7014,6 +7173,7 @@ mlx5_flow_push_thread_workspace(void) +@@ -7014,6 +7192,7 @@ mlx5_flow_push_thread_workspace(void) data = flow_alloc_thread_workspace(); if (!data) return NULL; @@ -61036,7 +68983,7 @@ index f34e4b88aa..5d489c7f92 100644 } else if (!curr->inuse) { data = curr; } else if (curr->next) { -@@ -7040,7 +7200,7 @@ mlx5_flow_push_thread_workspace(void) +@@ -7040,7 +7219,7 @@ mlx5_flow_push_thread_workspace(void) * * @return pointer to thread specific flow workspace data, NULL on error. */ @@ -61045,7 +68992,7 @@ index f34e4b88aa..5d489c7f92 100644 mlx5_flow_pop_thread_workspace(void) { struct mlx5_flow_workspace *data = mlx5_flow_get_thread_workspace(); -@@ -7810,7 +7970,7 @@ mlx5_flow_create_counter_stat_mem_mng(struct mlx5_dev_ctx_shared *sh) +@@ -7810,7 +7989,7 @@ mlx5_flow_create_counter_stat_mem_mng(struct mlx5_dev_ctx_shared *sh) { struct mlx5_counter_stats_mem_mng *mem_mng; volatile struct flow_counter_stats *raw_data; @@ -61054,7 +69001,7 @@ index f34e4b88aa..5d489c7f92 100644 int size = (sizeof(struct flow_counter_stats) * MLX5_COUNTERS_PER_POOL + sizeof(struct mlx5_counter_stats_raw)) * raws_n + -@@ -7848,7 +8008,7 @@ mlx5_flow_create_counter_stat_mem_mng(struct mlx5_dev_ctx_shared *sh) +@@ -7848,7 +8027,7 @@ mlx5_flow_create_counter_stat_mem_mng(struct mlx5_dev_ctx_shared *sh) } for (i = 0; i < MLX5_MAX_PENDING_QUERIES; ++i) LIST_INSERT_HEAD(&sh->cmng.free_stat_raws, @@ -61063,7 +69010,7 @@ index f34e4b88aa..5d489c7f92 100644 next); LIST_INSERT_HEAD(&sh->cmng.mem_mngs, mem_mng, next); sh->cmng.mem_mng = mem_mng; -@@ -7872,14 +8032,13 @@ mlx5_flow_set_counter_stat_mem(struct mlx5_dev_ctx_shared *sh, +@@ -7872,14 +8051,13 @@ mlx5_flow_set_counter_stat_mem(struct mlx5_dev_ctx_shared *sh, { struct mlx5_flow_counter_mng *cmng = &sh->cmng; /* Resize statistic memory once used out. */ @@ -61080,7 +69027,7 @@ index f34e4b88aa..5d489c7f92 100644 rte_spinlock_unlock(&pool->sl); pool->raw_hw = NULL; return 0; -@@ -7921,13 +8080,14 @@ void +@@ -7921,13 +8099,14 @@ void mlx5_flow_query_alarm(void *arg) { struct mlx5_dev_ctx_shared *sh = arg; @@ -61091,14 +69038,14 @@ index f34e4b88aa..5d489c7f92 100644 struct mlx5_flow_counter_pool *pool; uint16_t n_valid; + int ret; -+ - if (sh->cmng.pending_queries >= MLX5_MAX_PENDING_QUERIES) ++ + if (cmng->pending_queries >= MLX5_MAX_PENDING_QUERIES) goto set_alarm; rte_spinlock_lock(&cmng->pool_update_sl); pool = cmng->pools[pool_index]; -@@ -7939,8 +8099,7 @@ mlx5_flow_query_alarm(void *arg) +@@ -7939,8 +8118,7 @@ mlx5_flow_query_alarm(void *arg) if (pool->raw_hw) /* There is a pool query in progress. */ goto set_alarm; @@ -61108,7 +69055,7 @@ index f34e4b88aa..5d489c7f92 100644 if (!pool->raw_hw) /* No free counter statistics raw memory. */ goto set_alarm; -@@ -7966,12 +8125,12 @@ mlx5_flow_query_alarm(void *arg) +@@ -7966,12 +8144,12 @@ mlx5_flow_query_alarm(void *arg) goto set_alarm; } LIST_REMOVE(pool->raw_hw, next); @@ -61123,7 +69070,7 @@ index f34e4b88aa..5d489c7f92 100644 mlx5_set_query_alarm(sh); } -@@ -8477,23 +8636,47 @@ mlx5_flow_dev_dump_sh_all(struct rte_eth_dev *dev, +@@ -8477,23 +8655,47 @@ mlx5_flow_dev_dump_sh_all(struct rte_eth_dev *dev, } i = lcore_index; @@ -61188,7 +69135,7 @@ index f34e4b88aa..5d489c7f92 100644 } } -@@ -8502,7 +8685,7 @@ mlx5_flow_dev_dump_sh_all(struct rte_eth_dev *dev, +@@ -8502,7 +8704,7 @@ mlx5_flow_dev_dump_sh_all(struct rte_eth_dev *dev, } /* get counter */ @@ -61197,7 +69144,7 @@ index f34e4b88aa..5d489c7f92 100644 max = MLX5_COUNTERS_PER_POOL * cmng->n_valid; for (j = 1; j <= max; j++) { action = NULL; -@@ -8531,7 +8714,7 @@ mlx5_flow_dev_dump_sh_all(struct rte_eth_dev *dev, +@@ -8531,7 +8733,7 @@ mlx5_flow_dev_dump_sh_all(struct rte_eth_dev *dev, * Perform verbose error reporting if not NULL. PMDs initialize this * structure in case of error only. * @return @@ -61206,7 +69153,7 @@ index f34e4b88aa..5d489c7f92 100644 */ int mlx5_flow_dev_dump(struct rte_eth_dev *dev, struct rte_flow *flow_idx, -@@ -8785,9 +8968,18 @@ mlx5_action_handle_update(struct rte_eth_dev *dev, +@@ -8785,9 +8987,18 @@ mlx5_action_handle_update(struct rte_eth_dev *dev, const struct mlx5_flow_driver_ops *fops = flow_get_drv_ops(flow_get_drv_type(dev, &attr)); int ret; @@ -61227,7 +69174,7 @@ index f34e4b88aa..5d489c7f92 100644 if (ret) return ret; return flow_drv_action_update(dev, handle, update, fops, -@@ -9009,7 +9201,7 @@ mlx5_get_tof(const struct rte_flow_item *item, +@@ -9009,7 +9220,7 @@ mlx5_get_tof(const struct rte_flow_item *item, } /** @@ -61236,7 +69183,7 @@ index f34e4b88aa..5d489c7f92 100644 */ #ifdef HAVE_IBV_FLOW_DV_SUPPORT __extension__ -@@ -9536,7 +9728,7 @@ mlx5_flow_tunnel_validate(struct rte_eth_dev *dev, +@@ -9536,7 +9747,7 @@ mlx5_flow_tunnel_validate(struct rte_eth_dev *dev, if (!is_tunnel_offload_active(dev)) return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, @@ -61245,7 +69192,7 @@ index f34e4b88aa..5d489c7f92 100644 if (!tunnel) return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, -@@ -9822,10 +10014,27 @@ mlx5_flow_flex_item_create(struct rte_eth_dev *dev, +@@ -9822,10 +10033,27 @@ mlx5_flow_flex_item_create(struct rte_eth_dev *dev, struct rte_flow_error *error) { static const char err_msg[] = "flex item creation unsupported"; @@ -61273,7 +69220,7 @@ index f34e4b88aa..5d489c7f92 100644 if (!fops->item_create) { DRV_LOG(ERR, "port %u %s.", dev->data->port_id, err_msg); rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, -@@ -10012,3 +10221,80 @@ mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority, +@@ -10012,3 +10240,80 @@ mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority, } return res; } @@ -61772,7 +69719,7 @@ index ddf4328dec..eb7fc43da3 100644 return -1; } diff --git a/dpdk/drivers/net/mlx5/mlx5_flow_dv.c b/dpdk/drivers/net/mlx5/mlx5_flow_dv.c -index 3da122cbb9..0c66c76ef5 100644 +index 3da122cbb9..b54b4793cc 100644 --- a/dpdk/drivers/net/mlx5/mlx5_flow_dv.c +++ b/dpdk/drivers/net/mlx5/mlx5_flow_dv.c @@ -93,37 +93,6 @@ static int @@ -62177,18 +70124,24 @@ index 3da122cbb9..0c66c76ef5 100644 int ret = 0; struct flow_grp_info grp_info = { .external = !!external, -@@ -5013,6 +5005,10 @@ flow_dv_validate_action_jump(struct rte_eth_dev *dev, - RTE_FLOW_ERROR_TYPE_ACTION, NULL, - "target group must be other than" - " the current flow group"); +@@ -5006,13 +4998,10 @@ flow_dv_validate_action_jump(struct rte_eth_dev *dev, + &grp_info, error); + if (ret) + return ret; +- if (attributes->group == target_group && +- !(action_flags & (MLX5_FLOW_ACTION_TUNNEL_SET | +- MLX5_FLOW_ACTION_TUNNEL_MATCH))) + if (table == 0) -+ return rte_flow_error_set(error, EINVAL, + return rte_flow_error_set(error, EINVAL, +- RTE_FLOW_ERROR_TYPE_ACTION, NULL, +- "target group must be other than" +- " the current flow group"); + RTE_FLOW_ERROR_TYPE_ACTION_CONF, + NULL, "root table shouldn't be destination"); return 0; } -@@ -5145,7 +5141,7 @@ flow_dv_modify_hdr_action_max(struct rte_eth_dev *dev __rte_unused, +@@ -5145,7 +5134,7 @@ flow_dv_modify_hdr_action_max(struct rte_eth_dev *dev __rte_unused, * Pointer to error structure. * * @return @@ -62197,7 +70150,7 @@ index 3da122cbb9..0c66c76ef5 100644 */ static int mlx5_flow_validate_action_meter(struct rte_eth_dev *dev, -@@ -5230,21 +5226,12 @@ mlx5_flow_validate_action_meter(struct rte_eth_dev *dev, +@@ -5230,21 +5219,12 @@ mlx5_flow_validate_action_meter(struct rte_eth_dev *dev, */ struct mlx5_priv *policy_port_priv = mtr_policy->dev->data->dev_private; @@ -62223,7 +70176,16 @@ index 3da122cbb9..0c66c76ef5 100644 } if (flow_src_port != policy_port_priv->representor_id) return rte_flow_error_set(error, -@@ -5560,8 +5547,8 @@ flow_dv_modify_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry) +@@ -5484,7 +5464,7 @@ flow_dv_modify_create_cb(void *tool_ctx, void *cb_ctx) + "cannot allocate resource memory"); + return NULL; + } +- rte_memcpy(&entry->ft_type, ++ rte_memcpy(RTE_PTR_ADD(entry, offsetof(typeof(*entry), ft_type)), + RTE_PTR_ADD(ref, offsetof(typeof(*ref), ft_type)), + key_len + data_len); + if (entry->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) +@@ -5560,8 +5540,8 @@ flow_dv_modify_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry) * Pointer to the RSS action in sample action list. * @param[out] count * Pointer to the COUNT action in sample action list. @@ -62234,7 +70196,7 @@ index 3da122cbb9..0c66c76ef5 100644 * @param[out] error * Pointer to error structure. * -@@ -5570,6 +5557,7 @@ flow_dv_modify_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry) +@@ -5570,6 +5550,7 @@ flow_dv_modify_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry) */ static int flow_dv_validate_action_sample(uint64_t *action_flags, @@ -62242,7 +70204,7 @@ index 3da122cbb9..0c66c76ef5 100644 const struct rte_flow_action *action, struct rte_eth_dev *dev, const struct rte_flow_attr *attr, -@@ -5577,14 +5565,15 @@ flow_dv_validate_action_sample(uint64_t *action_flags, +@@ -5577,14 +5558,15 @@ flow_dv_validate_action_sample(uint64_t *action_flags, const struct rte_flow_action_rss *rss, const struct rte_flow_action_rss **sample_rss, const struct rte_flow_action_count **count, @@ -62260,7 +70222,7 @@ index 3da122cbb9..0c66c76ef5 100644 uint16_t queue_index = 0xFFFF; int actions_n = 0; int ret; -@@ -5630,20 +5619,20 @@ flow_dv_validate_action_sample(uint64_t *action_flags, +@@ -5630,20 +5612,20 @@ flow_dv_validate_action_sample(uint64_t *action_flags, switch (act->type) { case RTE_FLOW_ACTION_TYPE_QUEUE: ret = mlx5_flow_validate_action_queue(act, @@ -62284,7 +70246,7 @@ index 3da122cbb9..0c66c76ef5 100644 dev, attr, item_flags, error); -@@ -5659,48 +5648,57 @@ flow_dv_validate_action_sample(uint64_t *action_flags, +@@ -5659,48 +5641,57 @@ flow_dv_validate_action_sample(uint64_t *action_flags, "or level in the same flow"); if (*sample_rss != NULL && (*sample_rss)->queue_num) queue_index = (*sample_rss)->queue[0]; @@ -62352,7 +70314,7 @@ index 3da122cbb9..0c66c76ef5 100644 &actions_n, action, item_flags, error); if (ret < 0) return ret; -@@ -5709,12 +5707,12 @@ flow_dv_validate_action_sample(uint64_t *action_flags, +@@ -5709,12 +5700,12 @@ flow_dv_validate_action_sample(uint64_t *action_flags, case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP: case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP: ret = flow_dv_validate_action_l2_encap(dev, @@ -62367,7 +70329,7 @@ index 3da122cbb9..0c66c76ef5 100644 ++actions_n; break; default: -@@ -5726,7 +5724,7 @@ flow_dv_validate_action_sample(uint64_t *action_flags, +@@ -5726,7 +5717,7 @@ flow_dv_validate_action_sample(uint64_t *action_flags, } } if (attr->ingress && !attr->transfer) { @@ -62376,7 +70338,7 @@ index 3da122cbb9..0c66c76ef5 100644 MLX5_FLOW_ACTION_RSS))) return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, -@@ -5748,38 +5746,36 @@ flow_dv_validate_action_sample(uint64_t *action_flags, +@@ -5748,38 +5739,36 @@ flow_dv_validate_action_sample(uint64_t *action_flags, "E-Switch doesn't support " "any optional action " "for sampling"); @@ -62422,7 +70384,7 @@ index 3da122cbb9..0c66c76ef5 100644 MLX5_FLOW_ACTION_ENCAP)) return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, -@@ -5832,7 +5828,8 @@ flow_dv_modify_hdr_resource_register +@@ -5832,7 +5821,8 @@ flow_dv_modify_hdr_resource_register flow_dv_modify_match_cb, flow_dv_modify_remove_cb, flow_dv_modify_clone_cb, @@ -62432,7 +70394,7 @@ index 3da122cbb9..0c66c76ef5 100644 if (unlikely(!modify_cmds)) return -rte_errno; resource->root = !dev_flow->dv.group; -@@ -5874,7 +5871,7 @@ flow_dv_counter_get_by_idx(struct rte_eth_dev *dev, +@@ -5874,7 +5864,7 @@ flow_dv_counter_get_by_idx(struct rte_eth_dev *dev, /* Decrease to original index and clear shared bit. */ idx = (idx - 1) & (MLX5_CNT_SHARED_OFFSET - 1); @@ -62441,7 +70403,7 @@ index 3da122cbb9..0c66c76ef5 100644 pool = cmng->pools[idx / MLX5_COUNTERS_PER_POOL]; MLX5_ASSERT(pool); if (ppool) -@@ -5950,39 +5947,6 @@ flow_dv_find_pool_by_id(struct mlx5_flow_counter_mng *cmng, int id) +@@ -5950,39 +5940,6 @@ flow_dv_find_pool_by_id(struct mlx5_flow_counter_mng *cmng, int id) return pool; } @@ -62481,7 +70443,7 @@ index 3da122cbb9..0c66c76ef5 100644 /** * Query a devx flow counter. * -@@ -6034,8 +5998,6 @@ _flow_dv_query_count(struct rte_eth_dev *dev, uint32_t counter, uint64_t *pkts, +@@ -6034,8 +5991,6 @@ _flow_dv_query_count(struct rte_eth_dev *dev, uint32_t counter, uint64_t *pkts, * The devX counter handle. * @param[in] age * Whether the pool is for counter that was allocated for aging. @@ -62490,7 +70452,7 @@ index 3da122cbb9..0c66c76ef5 100644 * * @return * The pool container pointer on success, NULL otherwise and rte_errno is set. -@@ -6047,9 +6009,14 @@ flow_dv_pool_create(struct rte_eth_dev *dev, struct mlx5_devx_obj *dcs, +@@ -6047,9 +6002,14 @@ flow_dv_pool_create(struct rte_eth_dev *dev, struct mlx5_devx_obj *dcs, struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_flow_counter_pool *pool; struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng; @@ -62506,7 +70468,7 @@ index 3da122cbb9..0c66c76ef5 100644 size += MLX5_COUNTERS_PER_POOL * MLX5_CNT_SIZE; size += (!age ? 0 : MLX5_COUNTERS_PER_POOL * MLX5_AGE_SIZE); pool = mlx5_malloc(MLX5_MEM_ZERO, size, 0, SOCKET_ID_ANY); -@@ -6068,11 +6035,6 @@ flow_dv_pool_create(struct rte_eth_dev *dev, struct mlx5_devx_obj *dcs, +@@ -6068,11 +6028,6 @@ flow_dv_pool_create(struct rte_eth_dev *dev, struct mlx5_devx_obj *dcs, pool->time_of_last_age_check = MLX5_CURR_TIME_SEC; rte_spinlock_lock(&cmng->pool_update_sl); pool->index = cmng->n_valid; @@ -62518,7 +70480,22 @@ index 3da122cbb9..0c66c76ef5 100644 cmng->pools[pool->index] = pool; cmng->n_valid++; if (unlikely(fallback)) { -@@ -6714,6 +6676,12 @@ flow_dv_validate_item_integrity(struct rte_eth_dev *dev, +@@ -6629,11 +6584,13 @@ flow_dv_validate_attributes(struct rte_eth_dev *dev, + } + + static int +-validate_integrity_bits(const struct rte_flow_item_integrity *mask, ++validate_integrity_bits(const void *arg, + int64_t pattern_flags, uint64_t l3_flags, + uint64_t l4_flags, uint64_t ip4_flag, + struct rte_flow_error *error) + { ++ const struct rte_flow_item_integrity *mask = arg; ++ + if (mask->l3_ok && !(pattern_flags & l3_flags)) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, +@@ -6714,6 +6671,12 @@ flow_dv_validate_item_integrity(struct rte_eth_dev *dev, RTE_FLOW_ERROR_TYPE_ITEM, integrity_item, "unsupported integrity filter"); @@ -62531,7 +70508,7 @@ index 3da122cbb9..0c66c76ef5 100644 if (spec->level > 1) { if (pattern_flags & MLX5_FLOW_ITEM_INNER_INTEGRITY) return rte_flow_error_set -@@ -6844,14 +6812,14 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, +@@ -6844,14 +6807,14 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, bool external, int hairpin, struct rte_flow_error *error) { int ret; @@ -62548,7 +70525,7 @@ index 3da122cbb9..0c66c76ef5 100644 int modify_after_mirror = 0; const struct rte_flow_item *geneve_item = NULL; const struct rte_flow_item *gre_item = NULL; -@@ -6911,7 +6879,19 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, +@@ -6911,7 +6874,19 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, const struct rte_flow_item *integrity_items[2] = {NULL, NULL}; const struct rte_flow_item *port_id_item = NULL; bool def_policy = false; @@ -62568,7 +70545,7 @@ index 3da122cbb9..0c66c76ef5 100644 if (items == NULL) return -1; -@@ -6953,7 +6933,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, +@@ -6953,7 +6928,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, break; case RTE_FLOW_ITEM_TYPE_PORT_ID: ret = flow_dv_validate_item_port_id @@ -62577,7 +70554,7 @@ index 3da122cbb9..0c66c76ef5 100644 if (ret < 0) return ret; last_item = MLX5_FLOW_ITEM_PORT_ID; -@@ -7203,14 +7183,23 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, +@@ -7203,14 +7178,23 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, last_item = MLX5_FLOW_LAYER_ICMP6; break; case RTE_FLOW_ITEM_TYPE_TAG: @@ -62603,7 +70580,7 @@ index 3da122cbb9..0c66c76ef5 100644 break; case RTE_FLOW_ITEM_TYPE_GTP: ret = flow_dv_validate_item_gtp(dev, items, item_flags, -@@ -7281,7 +7270,6 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, +@@ -7281,7 +7265,6 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, } for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { int type = actions->type; @@ -62611,7 +70588,7 @@ index 3da122cbb9..0c66c76ef5 100644 if (!mlx5_flow_os_action_supported(type)) return rte_flow_error_set(error, ENOTSUP, -@@ -7310,6 +7298,14 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, +@@ -7310,6 +7293,14 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, error); if (ret) return ret; @@ -62626,7 +70603,7 @@ index 3da122cbb9..0c66c76ef5 100644 action_flags |= MLX5_FLOW_ACTION_PORT_ID; ++actions_n; break; -@@ -7380,11 +7376,13 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, +@@ -7380,11 +7371,13 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, ++actions_n; if (action_flags & MLX5_FLOW_ACTION_SAMPLE) modify_after_mirror = 1; @@ -62641,7 +70618,7 @@ index 3da122cbb9..0c66c76ef5 100644 attr, error); if (ret < 0) return ret; -@@ -7438,9 +7436,10 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, +@@ -7438,9 +7431,10 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, case RTE_FLOW_ACTION_TYPE_COUNT: ret = flow_dv_validate_action_count(dev, shared_count, action_flags, @@ -62653,7 +70630,7 @@ index 3da122cbb9..0c66c76ef5 100644 action_flags |= MLX5_FLOW_ACTION_COUNT; ++actions_n; break; -@@ -7657,12 +7656,6 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, +@@ -7657,12 +7651,6 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, error); if (ret) return ret; @@ -62666,7 +70643,7 @@ index 3da122cbb9..0c66c76ef5 100644 ++actions_n; action_flags |= MLX5_FLOW_ACTION_JUMP; break; -@@ -7742,10 +7735,13 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, +@@ -7742,10 +7730,13 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, RTE_FLOW_ERROR_TYPE_ACTION, NULL, "duplicate age actions set"); @@ -62680,7 +70657,7 @@ index 3da122cbb9..0c66c76ef5 100644 ret = flow_dv_validate_action_age(action_flags, actions, dev, error); -@@ -7753,21 +7749,24 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, +@@ -7753,21 +7744,24 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, return ret; /* * Validate the regular AGE action (using counter) @@ -62708,7 +70685,7 @@ index 3da122cbb9..0c66c76ef5 100644 } action_flags |= MLX5_FLOW_ACTION_AGE; ++actions_n; -@@ -7806,14 +7805,21 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, +@@ -7806,14 +7800,21 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, break; case RTE_FLOW_ACTION_TYPE_SAMPLE: ret = flow_dv_validate_action_sample(&action_flags, @@ -62731,7 +70708,7 @@ index 3da122cbb9..0c66c76ef5 100644 action_flags |= MLX5_FLOW_ACTION_SAMPLE; ++actions_n; break; -@@ -7839,6 +7845,8 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, +@@ -7839,6 +7840,8 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, error); if (ret < 0) return ret; @@ -62740,7 +70717,7 @@ index 3da122cbb9..0c66c76ef5 100644 action_flags |= MLX5_FLOW_ACTION_CT; break; case MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET: -@@ -7858,7 +7866,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, +@@ -7858,7 +7861,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, * - Explicit decap action is prohibited by the tunnel offload API. * - Drop action in tunnel steer rule is prohibited by the API. * - Application cannot use MARK action because it's value can mask @@ -62749,7 +70726,7 @@ index 3da122cbb9..0c66c76ef5 100644 * - JUMP in tunnel match rule has no support in current PMD * implementation. * - TAG & META are reserved for future uses. -@@ -7904,18 +7912,18 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, +@@ -7904,18 +7907,18 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, /* * Validate the drop action mutual exclusion with other actions. * Drop action is mutually-exclusive with any other action, except for @@ -62771,7 +70748,7 @@ index 3da122cbb9..0c66c76ef5 100644 /* Eswitch has few restrictions on using items and actions */ if (attr->transfer) { if (!mlx5_flow_ext_mreg_supported(dev) && -@@ -7970,6 +7978,28 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, +@@ -7970,6 +7973,28 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, RTE_FLOW_ERROR_TYPE_ACTION, NULL, "encap and decap " "combination aren't supported"); @@ -62800,7 +70777,7 @@ index 3da122cbb9..0c66c76ef5 100644 if (!attr->transfer && attr->ingress) { if (action_flags & MLX5_FLOW_ACTION_ENCAP) return rte_flow_error_set -@@ -7977,12 +8007,6 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, +@@ -7977,12 +8002,6 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, RTE_FLOW_ERROR_TYPE_ACTION, NULL, "encap is not supported" " for ingress traffic"); @@ -62813,7 +70790,7 @@ index 3da122cbb9..0c66c76ef5 100644 else if ((action_flags & MLX5_FLOW_VLAN_ACTIONS) == MLX5_FLOW_VLAN_ACTIONS) return rte_flow_error_set -@@ -7992,6 +8016,27 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, +@@ -7992,6 +8011,27 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, "multiple VLAN actions"); } } @@ -62841,7 +70818,7 @@ index 3da122cbb9..0c66c76ef5 100644 if (action_flags & MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY) { if ((action_flags & (MLX5_FLOW_FATE_ACTIONS & ~MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY)) && -@@ -8022,6 +8067,20 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, +@@ -8022,6 +8062,20 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, "cannot be done before meter action"); } } @@ -62862,7 +70839,7 @@ index 3da122cbb9..0c66c76ef5 100644 /* * Hairpin flow will add one more TAG action in TX implicit mode. * In TX explicit mode, there will be no hairpin flow ID. -@@ -8040,11 +8099,59 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, +@@ -8040,11 +8094,59 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, NULL, "too many header modify" " actions to support"); } @@ -62926,7 +70903,7 @@ index 3da122cbb9..0c66c76ef5 100644 return 0; } -@@ -8084,6 +8191,7 @@ flow_dv_prepare(struct rte_eth_dev *dev, +@@ -8084,6 +8186,7 @@ flow_dv_prepare(struct rte_eth_dev *dev, wks->skip_matcher_reg = 0; wks->policy = NULL; wks->final_policy = NULL; @@ -62934,7 +70911,7 @@ index 3da122cbb9..0c66c76ef5 100644 /* In case of corrupting the memory. */ if (wks->flow_idx >= MLX5_NUM_MAX_DEV_FLOWS) { rte_flow_error_set(error, ENOSPC, -@@ -9184,7 +9292,7 @@ flow_dev_geneve_tlv_option_resource_register(struct rte_eth_dev *dev, +@@ -9184,7 +9287,7 @@ flow_dev_geneve_tlv_option_resource_register(struct rte_eth_dev *dev, geneve_opt_v->option_type && geneve_opt_resource->length == geneve_opt_v->option_len) { @@ -62943,7 +70920,7 @@ index 3da122cbb9..0c66c76ef5 100644 __atomic_fetch_add(&geneve_opt_resource->refcnt, 1, __ATOMIC_RELAXED); } else { -@@ -9708,12 +9816,14 @@ flow_dv_translate_item_port_id(struct rte_eth_dev *dev, void *matcher, +@@ -9708,12 +9811,14 @@ flow_dv_translate_item_port_id(struct rte_eth_dev *dev, void *matcher, { const struct rte_flow_item_port_id *pid_m = item ? item->mask : NULL; const struct rte_flow_item_port_id *pid_v = item ? item->spec : NULL; @@ -62959,7 +70936,7 @@ index 3da122cbb9..0c66c76ef5 100644 return 0; } mask = pid_m ? pid_m->id : 0xffff; -@@ -9727,6 +9837,7 @@ flow_dv_translate_item_port_id(struct rte_eth_dev *dev, void *matcher, +@@ -9727,6 +9832,7 @@ flow_dv_translate_item_port_id(struct rte_eth_dev *dev, void *matcher, * register. */ if (priv->vport_meta_mask) { @@ -62967,7 +70944,7 @@ index 3da122cbb9..0c66c76ef5 100644 /* * Provide the hint for SW steering library * to insert the flow into ingress domain and -@@ -10170,7 +10281,7 @@ flow_dv_translate_item_flex(struct rte_eth_dev *dev, void *matcher, void *key, +@@ -10170,7 +10276,7 @@ flow_dv_translate_item_flex(struct rte_eth_dev *dev, void *matcher, void *key, /* Don't count both inner and outer flex items in one rule. */ if (mlx5_flex_acquire_index(dev, spec->handle, true) != index) MLX5_ASSERT(false); @@ -62976,7 +70953,7 @@ index 3da122cbb9..0c66c76ef5 100644 } mlx5_flex_flow_translate_item(dev, matcher, key, item, is_inner); } -@@ -10226,7 +10337,7 @@ __flow_dv_adjust_buf_size(size_t *size, uint8_t match_criteria) +@@ -10226,7 +10332,7 @@ __flow_dv_adjust_buf_size(size_t *size, uint8_t match_criteria) * Check flow matching criteria first, subtract misc5/4 length if flow * doesn't own misc5/4 parameters. In some old rdma-core releases, * misc5/4 are not supported, and matcher creation failure is expected @@ -62985,7 +70962,7 @@ index 3da122cbb9..0c66c76ef5 100644 * misc5 is right after misc4. */ if (!(match_criteria & (1 << MLX5_MATCH_CRITERIA_ENABLE_MISC5_BIT))) { -@@ -10514,7 +10625,8 @@ flow_dv_tbl_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry) +@@ -10514,7 +10620,8 @@ flow_dv_tbl_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry) tbl_data->tunnel->tunnel_id : 0, tbl_data->group_id); } @@ -62995,7 +70972,7 @@ index 3da122cbb9..0c66c76ef5 100644 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], tbl_data->idx); } -@@ -10769,7 +10881,8 @@ flow_dv_tag_resource_register +@@ -10769,7 +10876,8 @@ flow_dv_tag_resource_register flow_dv_tag_match_cb, flow_dv_tag_remove_cb, flow_dv_tag_clone_cb, @@ -63005,7 +70982,7 @@ index 3da122cbb9..0c66c76ef5 100644 if (unlikely(!tag_table)) return -rte_errno; entry = mlx5_hlist_register(tag_table, tag_be24, &ctx); -@@ -11074,6 +11187,7 @@ flow_dv_hrxq_prepare(struct rte_eth_dev *dev, +@@ -11074,6 +11182,7 @@ flow_dv_hrxq_prepare(struct rte_eth_dev *dev, { struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_flow_handle *dh = dev_flow->handle; @@ -63013,7 +70990,7 @@ index 3da122cbb9..0c66c76ef5 100644 struct mlx5_hrxq *hrxq; MLX5_ASSERT(rss_desc->queue_num); -@@ -11088,6 +11202,7 @@ flow_dv_hrxq_prepare(struct rte_eth_dev *dev, +@@ -11088,6 +11197,7 @@ flow_dv_hrxq_prepare(struct rte_eth_dev *dev, return NULL; hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], *hrxq_idx); @@ -63021,7 +70998,7 @@ index 3da122cbb9..0c66c76ef5 100644 return hrxq; } -@@ -11425,7 +11540,7 @@ flow_dv_dest_array_create_cb(void *tool_ctx __rte_unused, void *cb_ctx) +@@ -11425,7 +11535,7 @@ flow_dv_dest_array_create_cb(void *tool_ctx __rte_unused, void *cb_ctx) goto error; } } @@ -63030,7 +71007,7 @@ index 3da122cbb9..0c66c76ef5 100644 ret = mlx5_os_flow_dr_create_flow_action_dest_array (domain, resource->num_of_dest, -@@ -11660,7 +11775,7 @@ flow_dv_translate_action_sample(struct rte_eth_dev *dev, +@@ -11660,7 +11770,7 @@ flow_dv_translate_action_sample(struct rte_eth_dev *dev, (((const struct rte_flow_action_mark *) (sub_actions->conf))->id); @@ -63039,7 +71016,7 @@ index 3da122cbb9..0c66c76ef5 100644 pre_rix = dev_flow->handle->dvh.rix_tag; /* Save the mark resource before sample */ pre_r = dev_flow->dv.tag_resource; -@@ -11770,6 +11885,9 @@ flow_dv_translate_action_sample(struct rte_eth_dev *dev, +@@ -11770,6 +11880,9 @@ flow_dv_translate_action_sample(struct rte_eth_dev *dev, uint32_t action_in[MLX5_ST_SZ_DW(set_action_in)]; uint64_t set_action; } action_ctx = { .set_action = 0 }; @@ -63049,7 +71026,7 @@ index 3da122cbb9..0c66c76ef5 100644 res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB; MLX5_SET(set_action_in, action_ctx.action_in, action_type, -@@ -11777,7 +11895,7 @@ flow_dv_translate_action_sample(struct rte_eth_dev *dev, +@@ -11777,7 +11890,7 @@ flow_dv_translate_action_sample(struct rte_eth_dev *dev, MLX5_SET(set_action_in, action_ctx.action_in, field, MLX5_MODI_META_REG_C_0); MLX5_SET(set_action_in, action_ctx.action_in, data, @@ -63058,7 +71035,7 @@ index 3da122cbb9..0c66c76ef5 100644 res->set_action = action_ctx.set_action; } else if (attr->ingress) { res->ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_RX; -@@ -11963,7 +12081,7 @@ flow_dv_aso_age_release(struct rte_eth_dev *dev, uint32_t age_idx) +@@ -11963,7 +12076,7 @@ flow_dv_aso_age_release(struct rte_eth_dev *dev, uint32_t age_idx) } /** @@ -63067,7 +71044,7 @@ index 3da122cbb9..0c66c76ef5 100644 * * @param[in] dev * Pointer to the Ethernet device structure. -@@ -11977,7 +12095,7 @@ flow_dv_aso_age_pools_resize(struct rte_eth_dev *dev) +@@ -11977,7 +12090,7 @@ flow_dv_aso_age_pools_resize(struct rte_eth_dev *dev) struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng; void *old_pools = mng->pools; @@ -63076,7 +71053,7 @@ index 3da122cbb9..0c66c76ef5 100644 uint32_t mem_size = sizeof(struct mlx5_aso_age_pool *) * resize; void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY); -@@ -12820,7 +12938,7 @@ flow_dv_translate(struct rte_eth_dev *dev, +@@ -12820,7 +12933,7 @@ flow_dv_translate(struct rte_eth_dev *dev, break; case RTE_FLOW_ACTION_TYPE_FLAG: action_flags |= MLX5_FLOW_ACTION_FLAG; @@ -63085,7 +71062,7 @@ index 3da122cbb9..0c66c76ef5 100644 if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) { struct rte_flow_action_mark mark = { .id = MLX5_FLOW_MARK_DEFAULT, -@@ -12849,7 +12967,7 @@ flow_dv_translate(struct rte_eth_dev *dev, +@@ -12849,7 +12962,7 @@ flow_dv_translate(struct rte_eth_dev *dev, break; case RTE_FLOW_ACTION_TYPE_MARK: action_flags |= MLX5_FLOW_ACTION_MARK; @@ -63094,7 +71071,7 @@ index 3da122cbb9..0c66c76ef5 100644 if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) { const struct rte_flow_action_mark *mark = (const struct rte_flow_action_mark *) -@@ -13306,8 +13424,7 @@ flow_dv_translate(struct rte_eth_dev *dev, +@@ -13306,8 +13419,7 @@ flow_dv_translate(struct rte_eth_dev *dev, */ if (action_flags & MLX5_FLOW_ACTION_AGE) { if ((non_shared_age && count) || @@ -63104,7 +71081,7 @@ index 3da122cbb9..0c66c76ef5 100644 /* Creates age by counters. */ cnt_act = flow_dv_prepare_counter (dev, dev_flow, -@@ -13538,11 +13655,13 @@ flow_dv_translate(struct rte_eth_dev *dev, +@@ -13538,11 +13650,13 @@ flow_dv_translate(struct rte_eth_dev *dev, case RTE_FLOW_ITEM_TYPE_ICMP: flow_dv_translate_item_icmp(match_mask, match_value, items, tunnel); @@ -63118,7 +71095,7 @@ index 3da122cbb9..0c66c76ef5 100644 last_item = MLX5_FLOW_LAYER_ICMP6; break; case RTE_FLOW_ITEM_TYPE_TAG: -@@ -13617,12 +13736,14 @@ flow_dv_translate(struct rte_eth_dev *dev, +@@ -13617,12 +13731,14 @@ flow_dv_translate(struct rte_eth_dev *dev, /* * When E-Switch mode is enabled, we have two cases where we need to * set the source port manually. @@ -63137,7 +71114,7 @@ index 3da122cbb9..0c66c76ef5 100644 if (flow_dv_translate_item_port_id(dev, match_mask, match_value, NULL, attr)) return -rte_errno; -@@ -13999,7 +14120,7 @@ flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow, +@@ -13999,7 +14115,7 @@ flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow, } dv->actions[n++] = priv->sh->default_miss_action; } @@ -63146,7 +71123,29 @@ index 3da122cbb9..0c66c76ef5 100644 __flow_dv_adjust_buf_size(&dv->value.size, misc_mask); err = mlx5_flow_os_create_flow(dv_h->matcher->matcher_object, (void *)&dv->value, n, -@@ -14508,7 +14629,7 @@ flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow) +@@ -14032,7 +14148,8 @@ flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow, + SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles, + handle_idx, dh, next) { + /* hrxq is union, don't clear it if the flag is not set. */ +- if (dh->fate_action == MLX5_FLOW_FATE_QUEUE && dh->rix_hrxq) { ++ if (dh->fate_action == MLX5_FLOW_FATE_QUEUE && dh->rix_hrxq && ++ !dh->dvh.rix_sample && !dh->dvh.rix_dest_array) { + mlx5_hrxq_release(dev, dh->rix_hrxq); + dh->rix_hrxq = 0; + } else if (dh->fate_action == MLX5_FLOW_FATE_SHARED_RSS) { +@@ -14492,9 +14609,9 @@ flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow) + flow_dv_aso_ct_release(dev, flow->ct, NULL); + else if (flow->age) + flow_dv_aso_age_release(dev, flow->age); +- if (flow->geneve_tlv_option) { ++ while (flow->geneve_tlv_option) { + flow_dv_geneve_tlv_option_resource_release(dev); +- flow->geneve_tlv_option = 0; ++ flow->geneve_tlv_option--; + } + while (flow->dev_handles) { + uint32_t tmp_idx = flow->dev_handles; +@@ -14508,7 +14625,7 @@ flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow) int index = rte_bsf32(dev_handle->flex_item); mlx5_flex_release_index(dev, index); @@ -63155,7 +71154,7 @@ index 3da122cbb9..0c66c76ef5 100644 } if (dev_handle->dvh.matcher) flow_dv_matcher_release(dev, dev_handle); -@@ -14607,8 +14728,8 @@ __flow_dv_action_rss_hrxqs_release(struct rte_eth_dev *dev, +@@ -14607,8 +14724,8 @@ __flow_dv_action_rss_hrxqs_release(struct rte_eth_dev *dev, * MLX5_RSS_HASH_IPV4_DST_ONLY are mutually exclusive so they can share * same slot in mlx5_rss_hash_fields. * @@ -63166,7 +71165,7 @@ index 3da122cbb9..0c66c76ef5 100644 * @param[in, out] hash_field * hash_field variable needed to be adjusted. * -@@ -14616,10 +14737,10 @@ __flow_dv_action_rss_hrxqs_release(struct rte_eth_dev *dev, +@@ -14616,10 +14733,10 @@ __flow_dv_action_rss_hrxqs_release(struct rte_eth_dev *dev, * void */ static void @@ -63179,7 +71178,7 @@ index 3da122cbb9..0c66c76ef5 100644 switch (*hash_field & ~IBV_RX_HASH_INNER) { case MLX5_RSS_HASH_IPV4: -@@ -14721,7 +14842,8 @@ __flow_dv_action_rss_setup(struct rte_eth_dev *dev, +@@ -14721,7 +14838,8 @@ __flow_dv_action_rss_setup(struct rte_eth_dev *dev, uint64_t hash_fields = mlx5_rss_hash_fields[i]; int tunnel = 0; @@ -63189,7 +71188,7 @@ index 3da122cbb9..0c66c76ef5 100644 if (shared_rss->origin.level > 1) { hash_fields |= IBV_RX_HASH_INNER; tunnel = 1; -@@ -15455,7 +15577,6 @@ __flow_dv_create_domain_policy_acts(struct rte_eth_dev *dev, +@@ -15455,7 +15573,6 @@ __flow_dv_create_domain_policy_acts(struct rte_eth_dev *dev, NULL, "cannot create policy " "mark action for this color"); @@ -63197,7 +71196,7 @@ index 3da122cbb9..0c66c76ef5 100644 if (flow_dv_tag_resource_register(dev, tag_be, &dev_flow, &flow_err)) return -rte_mtr_error_set(error, -@@ -15467,6 +15588,7 @@ __flow_dv_create_domain_policy_acts(struct rte_eth_dev *dev, +@@ -15467,6 +15584,7 @@ __flow_dv_create_domain_policy_acts(struct rte_eth_dev *dev, act_cnt->rix_mark = dev_flow.handle->dvh.rix_tag; action_flags |= MLX5_FLOW_ACTION_MARK; @@ -63205,7 +71204,7 @@ index 3da122cbb9..0c66c76ef5 100644 break; } case RTE_FLOW_ACTION_TYPE_SET_TAG: -@@ -15750,6 +15872,8 @@ __flow_dv_create_domain_policy_acts(struct rte_eth_dev *dev, +@@ -15750,6 +15868,8 @@ __flow_dv_create_domain_policy_acts(struct rte_eth_dev *dev, act_cnt->next_sub_policy = NULL; mtr_policy->is_hierarchy = 1; mtr_policy->dev = next_policy->dev; @@ -63214,7 +71213,7 @@ index 3da122cbb9..0c66c76ef5 100644 action_flags |= MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY; break; -@@ -16165,7 +16289,7 @@ flow_dv_destroy_def_policy(struct rte_eth_dev *dev) +@@ -16165,7 +16285,7 @@ flow_dv_destroy_def_policy(struct rte_eth_dev *dev) static int __flow_dv_create_policy_flow(struct rte_eth_dev *dev, uint32_t color_reg_c_idx, @@ -63223,7 +71222,7 @@ index 3da122cbb9..0c66c76ef5 100644 int actions_n, void *actions, bool match_src_port, const struct rte_flow_item *item, void **rule, const struct rte_flow_attr *attr) -@@ -16191,9 +16315,9 @@ __flow_dv_create_policy_flow(struct rte_eth_dev *dev, +@@ -16191,9 +16311,9 @@ __flow_dv_create_policy_flow(struct rte_eth_dev *dev, flow_dv_match_meta_reg(matcher.buf, value.buf, (enum modify_reg)color_reg_c_idx, rte_col_2_mlx5_col(color), UINT32_MAX); @@ -63235,7 +71234,7 @@ index 3da122cbb9..0c66c76ef5 100644 actions_n, actions, rule); if (ret) { DRV_LOG(ERR, "Failed to create meter policy%d flow.", color); -@@ -16341,7 +16465,7 @@ __flow_dv_create_domain_policy_rules(struct rte_eth_dev *dev, +@@ -16341,7 +16461,7 @@ __flow_dv_create_domain_policy_rules(struct rte_eth_dev *dev, /* Create flow, matching color. */ if (__flow_dv_create_policy_flow(dev, color_reg_c_idx, (enum rte_color)i, @@ -63244,7 +71243,7 @@ index 3da122cbb9..0c66c76ef5 100644 acts[i].actions_n, acts[i].dv_actions, svport_match, NULL, &color_rule->rule, &attr)) { -@@ -16549,8 +16673,11 @@ flow_dv_create_policy_rules(struct rte_eth_dev *dev, +@@ -16549,8 +16669,11 @@ flow_dv_create_policy_rules(struct rte_eth_dev *dev, struct mlx5_flow_meter_policy *mtr_policy) { int i; @@ -63256,7 +71255,7 @@ index 3da122cbb9..0c66c76ef5 100644 for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) { sub_policy_num = (mtr_policy->sub_policy_num >> (MLX5_MTR_SUB_POLICY_NUM_SHIFT * i)) & -@@ -16562,10 +16689,13 @@ flow_dv_create_policy_rules(struct rte_eth_dev *dev, +@@ -16562,10 +16685,13 @@ flow_dv_create_policy_rules(struct rte_eth_dev *dev, mtr_policy->sub_policys[i][0], i)) { DRV_LOG(ERR, "Failed to create policy action " "list per domain."); @@ -63272,7 +71271,7 @@ index 3da122cbb9..0c66c76ef5 100644 } static int -@@ -16795,7 +16925,7 @@ flow_dv_create_mtr_tbls(struct rte_eth_dev *dev, +@@ -16795,7 +16921,7 @@ flow_dv_create_mtr_tbls(struct rte_eth_dev *dev, actions[i++] = priv->sh->dr_drop_action; flow_dv_match_meta_reg(matcher_para.buf, value.buf, (enum modify_reg)mtr_id_reg_c, 0, 0); @@ -63281,7 +71280,7 @@ index 3da122cbb9..0c66c76ef5 100644 __flow_dv_adjust_buf_size(&value.size, misc_mask); ret = mlx5_flow_os_create_flow (mtrmng->def_matcher[domain]->matcher_object, -@@ -16840,7 +16970,7 @@ flow_dv_create_mtr_tbls(struct rte_eth_dev *dev, +@@ -16840,7 +16966,7 @@ flow_dv_create_mtr_tbls(struct rte_eth_dev *dev, fm->drop_cnt, NULL); actions[i++] = cnt->action; actions[i++] = priv->sh->dr_drop_action; @@ -63290,7 +71289,7 @@ index 3da122cbb9..0c66c76ef5 100644 __flow_dv_adjust_buf_size(&value.size, misc_mask); ret = mlx5_flow_os_create_flow(drop_matcher->matcher_object, (void *)&value, i, actions, -@@ -16880,7 +17010,9 @@ __flow_dv_meter_get_rss_sub_policy(struct rte_eth_dev *dev, +@@ -16880,7 +17006,9 @@ __flow_dv_meter_get_rss_sub_policy(struct rte_eth_dev *dev, struct mlx5_meter_policy_action_container *act_cnt; uint32_t domain = MLX5_MTR_DOMAIN_INGRESS; uint16_t sub_policy_num; @@ -63300,7 +71299,7 @@ index 3da122cbb9..0c66c76ef5 100644 rte_spinlock_lock(&mtr_policy->sl); for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) { if (!rss_desc[i]) -@@ -16914,7 +17046,8 @@ __flow_dv_meter_get_rss_sub_policy(struct rte_eth_dev *dev, +@@ -16914,7 +17042,8 @@ __flow_dv_meter_get_rss_sub_policy(struct rte_eth_dev *dev, } } /* Create sub policy. */ @@ -63310,7 +71309,7 @@ index 3da122cbb9..0c66c76ef5 100644 /* Reuse the first pre-allocated sub_policy. */ sub_policy = mtr_policy->sub_policys[domain][0]; sub_policy_idx = sub_policy->idx; -@@ -16954,7 +17087,7 @@ __flow_dv_meter_get_rss_sub_policy(struct rte_eth_dev *dev, +@@ -16954,7 +17083,7 @@ __flow_dv_meter_get_rss_sub_policy(struct rte_eth_dev *dev, if (act_cnt->rix_mark || act_cnt->modify_hdr) { memset(&dh, 0, sizeof(struct mlx5_flow_handle)); if (act_cnt->rix_mark) @@ -63319,7 +71318,7 @@ index 3da122cbb9..0c66c76ef5 100644 dh.fate_action = MLX5_FLOW_FATE_QUEUE; dh.rix_hrxq = hrxq_idx[i]; flow_drv_rxq_flags_set(dev, &dh); -@@ -17212,7 +17345,7 @@ flow_dv_meter_hierarchy_rule_create(struct rte_eth_dev *dev, +@@ -17212,7 +17341,7 @@ flow_dv_meter_hierarchy_rule_create(struct rte_eth_dev *dev, } if (__flow_dv_create_policy_flow(dev, color_reg_c_idx, (enum rte_color)i, @@ -63328,7 +71327,17 @@ index 3da122cbb9..0c66c76ef5 100644 acts.actions_n, acts.dv_actions, true, item, &color_rule->rule, &attr)) { -@@ -17635,7 +17768,7 @@ flow_dv_action_validate(struct rte_eth_dev *dev, +@@ -17555,8 +17684,7 @@ flow_dv_get_aged_flows(struct rte_eth_dev *dev, + LIST_FOREACH(act, &age_info->aged_aso, next) { + nb_flows++; + if (nb_contexts) { +- context[nb_flows - 1] = +- act->age_params.context; ++ context[nb_flows - 1] = act->age_params.context; + if (!(--nb_contexts)) + break; + } +@@ -17635,7 +17763,7 @@ flow_dv_action_validate(struct rte_eth_dev *dev, "Indirect age action not supported"); return flow_dv_validate_action_age(0, action, dev, err); case RTE_FLOW_ACTION_TYPE_COUNT: @@ -63337,7 +71346,7 @@ index 3da122cbb9..0c66c76ef5 100644 case RTE_FLOW_ACTION_TYPE_CONNTRACK: if (!priv->sh->ct_aso_en) return rte_flow_error_set(err, ENOTSUP, -@@ -17907,7 +18040,7 @@ flow_dv_validate_mtr_policy_acts(struct rte_eth_dev *dev, +@@ -17907,7 +18035,7 @@ flow_dv_validate_mtr_policy_acts(struct rte_eth_dev *dev, break; case RTE_FLOW_ACTION_TYPE_DROP: ret = mlx5_flow_validate_action_drop @@ -63346,7 +71355,7 @@ index 3da122cbb9..0c66c76ef5 100644 if (ret < 0) return -rte_mtr_error_set(error, ENOTSUP, -@@ -18229,7 +18362,7 @@ flow_dv_discover_priorities(struct rte_eth_dev *dev, +@@ -18229,7 +18357,7 @@ flow_dv_discover_priorities(struct rte_eth_dev *dev, break; } /* Try to apply the flow to HW. */ @@ -63355,7 +71364,7 @@ index 3da122cbb9..0c66c76ef5 100644 __flow_dv_adjust_buf_size(&flow.dv.value.size, misc_mask); err = mlx5_flow_os_create_flow (flow.handle->dvh.matcher->matcher_object, -@@ -18291,4 +18424,3 @@ const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = { +@@ -18291,4 +18419,3 @@ const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = { }; #endif /* HAVE_IBV_FLOW_DV_SUPPORT */ @@ -63769,7 +71778,7 @@ index 29cd694752..0fe2c9af5a 100644 /* Other members of attr will be ignored. */ dev_flow->verbs.attr.priority = diff --git a/dpdk/drivers/net/mlx5/mlx5_rx.c b/dpdk/drivers/net/mlx5/mlx5_rx.c -index e8215f7381..ab88f2526f 100644 +index e8215f7381..eea6a5c6c7 100644 --- a/dpdk/drivers/net/mlx5/mlx5_rx.c +++ b/dpdk/drivers/net/mlx5/mlx5_rx.c @@ -34,7 +34,8 @@ rxq_cq_to_pkt_type(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe, @@ -63966,7 +71975,7 @@ index e8215f7381..ab88f2526f 100644 } } -@@ -536,17 +578,24 @@ mlx5_rx_err_handle(struct mlx5_rxq_data *rxq, uint8_t vec) +@@ -536,17 +578,25 @@ mlx5_rx_err_handle(struct mlx5_rxq_data *rxq, uint8_t vec) * @param[out] mcqe * Store pointer to mini-CQE if compressed. Otherwise, the pointer is not * written. @@ -63977,7 +71986,8 @@ index e8215f7381..ab88f2526f 100644 + * Indication if it is called from MPRQ. * @return - * 0 in case of empty CQE, otherwise the packet size in bytes. -+ * 0 in case of empty CQE, MLX5_REGULAR_ERROR_CQE_RET in case of error CQE, ++ * 0 in case of empty CQE, ++ * MLX5_REGULAR_ERROR_CQE_RET in case of error CQE, + * MLX5_CRITICAL_ERROR_CQE_RET in case of error CQE lead to Rx queue reset, + * otherwise the packet size in regular RxQ, + * and striding byte count format in mprq case. @@ -63995,7 +72005,7 @@ index e8215f7381..ab88f2526f 100644 uint16_t idx, end; do { -@@ -595,7 +644,6 @@ mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe, +@@ -595,7 +645,6 @@ mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe, * compressed. */ } else { @@ -64003,7 +72013,7 @@ index e8215f7381..ab88f2526f 100644 int8_t op_own; uint32_t cq_ci; -@@ -603,10 +651,12 @@ mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe, +@@ -603,10 +652,17 @@ mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe, if (unlikely(ret != MLX5_CQE_STATUS_SW_OWN)) { if (unlikely(ret == MLX5_CQE_STATUS_ERR || rxq->err_state)) { @@ -64017,10 +72027,15 @@ index e8215f7381..ab88f2526f 100644 + if (ret == MLX5_RECOVERY_ERROR_RET || + ret == MLX5_RECOVERY_COMPLETED_RET) + return MLX5_CRITICAL_ERROR_CQE_RET; ++ if (!mprq && ret == MLX5_RECOVERY_IGNORE_RET) { ++ *skip_cnt = 1; ++ ++rxq->cq_ci; ++ return MLX5_ERROR_CQE_MASK; ++ } } else { return 0; } -@@ -659,8 +709,15 @@ mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe, +@@ -659,8 +715,15 @@ mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe, } } if (unlikely(rxq->err_state)) { @@ -64036,7 +72051,7 @@ index e8215f7381..ab88f2526f 100644 } else { return len; } -@@ -812,6 +869,7 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) +@@ -812,6 +875,7 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) int len = 0; /* keep its value across iterations. */ while (pkts_n) { @@ -64044,7 +72059,7 @@ index e8215f7381..ab88f2526f 100644 unsigned int idx = rq_ci & wqe_cnt; volatile struct mlx5_wqe_data_seg *wqe = &((volatile struct mlx5_wqe_data_seg *)rxq->wqes)[idx]; -@@ -850,8 +908,23 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) +@@ -850,8 +914,22 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) } if (!pkt) { cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_cnt]; @@ -64052,25 +72067,24 @@ index e8215f7381..ab88f2526f 100644 - if (!len) { + len = mlx5_rx_poll_len(rxq, cqe, cqe_cnt, &mcqe, &skip_cnt, false); + if (unlikely(len & MLX5_ERROR_CQE_MASK)) { ++ /* We drop packets with non-critical errors */ ++ rte_mbuf_raw_free(rep); + if (len == MLX5_CRITICAL_ERROR_CQE_RET) { -+ rte_mbuf_raw_free(rep); + rq_ci = rxq->rq_ci << sges_n; + break; + } ++ /* Skip specified amount of error CQEs packets */ + rq_ci >>= sges_n; + rq_ci += skip_cnt; + rq_ci <<= sges_n; -+ idx = rq_ci & wqe_cnt; -+ wqe = &((volatile struct mlx5_wqe_data_seg *)rxq->wqes)[idx]; -+ seg = (*rxq->elts)[idx]; -+ cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_cnt]; -+ len = len & ~MLX5_ERROR_CQE_MASK; ++ MLX5_ASSERT(!pkt); ++ continue; + } + if (len == 0) { rte_mbuf_raw_free(rep); break; } -@@ -954,6 +1027,7 @@ mlx5_lro_update_tcp_hdr(struct rte_tcp_hdr *__rte_restrict tcp, +@@ -954,6 +1032,7 @@ mlx5_lro_update_tcp_hdr(struct rte_tcp_hdr *__rte_restrict tcp, tcp->cksum = 0; csum += rte_raw_cksum(tcp, (tcp->data_off >> 4) * 4); csum = ((csum & 0xffff0000) >> 16) + (csum & 0xffff); @@ -64078,7 +72092,7 @@ index e8215f7381..ab88f2526f 100644 csum = (~csum) & 0xffff; if (csum == 0) csum = 0xffff; -@@ -1045,8 +1119,8 @@ uint16_t +@@ -1045,8 +1124,8 @@ uint16_t mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) { struct mlx5_rxq_data *rxq = dpdk_rxq; @@ -64089,7 +72103,7 @@ index e8215f7381..ab88f2526f 100644 const uint32_t cq_mask = (1 << rxq->cqe_n) - 1; const uint32_t wq_mask = (1 << rxq->elts_n) - 1; volatile struct mlx5_cqe *cqe = &(*rxq->cqes)[rxq->cq_ci & cq_mask]; -@@ -1062,6 +1136,7 @@ mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) +@@ -1062,6 +1141,7 @@ mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) uint16_t strd_cnt; uint16_t strd_idx; uint32_t byte_cnt; @@ -64097,7 +72111,7 @@ index e8215f7381..ab88f2526f 100644 volatile struct mlx5_mini_cqe8 *mcqe = NULL; enum mlx5_rqx_code rxq_code; -@@ -1074,8 +1149,25 @@ mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) +@@ -1074,8 +1154,25 @@ mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) buf = (*rxq->mprq_bufs)[rq_ci & wq_mask]; } cqe = &(*rxq->cqes)[rxq->cq_ci & cq_mask]; @@ -66508,10 +74522,57 @@ index f3d838389e..ab69af0c55 100644 uint32_t mask = rxq->flow_meta_port_mask; diff --git a/dpdk/drivers/net/mlx5/mlx5_stats.c b/dpdk/drivers/net/mlx5/mlx5_stats.c -index 732775954a..615e1d073d 100644 +index 732775954a..f4ac58e2f9 100644 --- a/dpdk/drivers/net/mlx5/mlx5_stats.c +++ b/dpdk/drivers/net/mlx5/mlx5_stats.c -@@ -114,18 +114,23 @@ mlx5_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) +@@ -39,24 +39,36 @@ mlx5_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *stats, + unsigned int n) + { + struct mlx5_priv *priv = dev->data->dev_private; +- unsigned int i; +- uint64_t counters[n]; ++ uint64_t counters[MLX5_MAX_XSTATS]; + struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl; ++ unsigned int i; ++ uint16_t stats_n = 0; ++ uint16_t stats_n_2nd = 0; + uint16_t mlx5_stats_n = xstats_ctrl->mlx5_stats_n; ++ bool bond_master = (priv->master && priv->pf_bond >= 0); + + if (n >= mlx5_stats_n && stats) { +- int stats_n; + int ret; + +- stats_n = mlx5_os_get_stats_n(dev); +- if (stats_n < 0) +- return stats_n; +- if (xstats_ctrl->stats_n != stats_n) ++ ret = mlx5_os_get_stats_n(dev, bond_master, &stats_n, &stats_n_2nd); ++ if (ret < 0) ++ return ret; ++ /* ++ * The number of statistics fetched via "ETH_SS_STATS" may vary because ++ * of the port configuration each time. This is also true between 2 ++ * ports. There might be a case that the numbers are the same even if ++ * configurations are different. ++ * It is not recommended to change the configuration without using ++ * RTE API. The port(traffic) restart may trigger another initialization ++ * to make sure the map are correct. ++ */ ++ if (xstats_ctrl->stats_n != stats_n || ++ (bond_master && xstats_ctrl->stats_n_2nd != stats_n_2nd)) + mlx5_os_stats_init(dev); +- ret = mlx5_os_read_dev_counters(dev, counters); +- if (ret) ++ ret = mlx5_os_read_dev_counters(dev, bond_master, counters); ++ if (ret < 0) + return ret; +- for (i = 0; i != mlx5_stats_n; ++i) { ++ for (i = 0; i != mlx5_stats_n; i++) { + stats[i].id = i; + if (xstats_ctrl->info[i].dev) { + uint64_t wrap_n; +@@ -114,18 +126,23 @@ mlx5_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) idx = rxq->idx; if (idx < RTE_ETHDEV_QUEUE_STAT_CNTRS) { #ifdef MLX5_PMD_SOFT_COUNTERS @@ -66542,7 +74603,7 @@ index 732775954a..615e1d073d 100644 } for (i = 0; (i != priv->txqs_n); ++i) { struct mlx5_txq_data *txq = (*priv->txqs)[i]; -@@ -135,15 +140,17 @@ mlx5_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) +@@ -135,15 +152,17 @@ mlx5_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) idx = txq->idx; if (idx < RTE_ETHDEV_QUEUE_STAT_CNTRS) { #ifdef MLX5_PMD_SOFT_COUNTERS @@ -66565,7 +74626,7 @@ index 732775954a..615e1d073d 100644 } ret = mlx5_os_read_dev_stat(priv, "out_of_buffer", &tmp.imissed); if (ret == 0) { -@@ -185,13 +192,14 @@ mlx5_stats_reset(struct rte_eth_dev *dev) +@@ -185,13 +204,14 @@ mlx5_stats_reset(struct rte_eth_dev *dev) if (rxq_data == NULL) continue; @@ -66584,7 +74645,52 @@ index 732775954a..615e1d073d 100644 } mlx5_os_read_dev_stat(priv, "out_of_buffer", &stats_ctrl->imissed_base); stats_ctrl->imissed = 0; -@@ -280,10 +288,9 @@ mlx5_xstats_get_names(struct rte_eth_dev *dev, +@@ -217,30 +237,32 @@ mlx5_xstats_reset(struct rte_eth_dev *dev) + { + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl; +- int stats_n; + unsigned int i; + uint64_t *counters; + int ret; ++ uint16_t stats_n = 0; ++ uint16_t stats_n_2nd = 0; ++ bool bond_master = (priv->master && priv->pf_bond >= 0); + +- stats_n = mlx5_os_get_stats_n(dev); +- if (stats_n < 0) { ++ ret = mlx5_os_get_stats_n(dev, bond_master, &stats_n, &stats_n_2nd); ++ if (ret < 0) { + DRV_LOG(ERR, "port %u cannot get stats: %s", dev->data->port_id, +- strerror(-stats_n)); +- return stats_n; ++ strerror(-ret)); ++ return ret; + } +- if (xstats_ctrl->stats_n != stats_n) ++ if (xstats_ctrl->stats_n != stats_n || ++ (bond_master && xstats_ctrl->stats_n_2nd != stats_n_2nd)) + mlx5_os_stats_init(dev); +- counters = mlx5_malloc(MLX5_MEM_SYS, sizeof(*counters) * +- xstats_ctrl->mlx5_stats_n, 0, +- SOCKET_ID_ANY); ++ /* Considering to use stack directly. */ ++ counters = mlx5_malloc(MLX5_MEM_SYS, sizeof(*counters) * xstats_ctrl->mlx5_stats_n, ++ 0, SOCKET_ID_ANY); + if (!counters) { +- DRV_LOG(WARNING, "port %u unable to allocate memory for xstats " +- "counters", ++ DRV_LOG(WARNING, "port %u unable to allocate memory for xstats counters", + dev->data->port_id); + rte_errno = ENOMEM; + return -rte_errno; + } +- ret = mlx5_os_read_dev_counters(dev, counters); ++ ret = mlx5_os_read_dev_counters(dev, bond_master, counters); + if (ret) { + DRV_LOG(ERR, "port %u cannot read device counters: %s", + dev->data->port_id, strerror(rte_errno)); +@@ -280,10 +302,9 @@ mlx5_xstats_get_names(struct rte_eth_dev *dev, if (n >= mlx5_xstats_n && xstats_names) { for (i = 0; i != mlx5_xstats_n; ++i) { @@ -67053,7 +75159,7 @@ index af77e91e4c..83d17997d1 100644 } return n_used + n_txpp; diff --git a/dpdk/drivers/net/mlx5/mlx5_utils.c b/dpdk/drivers/net/mlx5/mlx5_utils.c -index e4e66ae4c5..b295702fd4 100644 +index e4e66ae4c5..bbe07cd7a3 100644 --- a/dpdk/drivers/net/mlx5/mlx5_utils.c +++ b/dpdk/drivers/net/mlx5/mlx5_utils.c @@ -340,6 +340,8 @@ mlx5_ipool_allocate_from_global(struct mlx5_indexed_pool *pool, int cidx) @@ -67065,7 +75171,17 @@ index e4e66ae4c5..b295702fd4 100644 for (i = 0; i < fetch_size; i++) lc->idx[i] = ts_idx + i; lc->len = fetch_size; -@@ -477,7 +479,7 @@ _mlx5_ipool_free_cache(struct mlx5_indexed_pool *pool, int cidx, uint32_t idx) +@@ -377,7 +379,8 @@ _mlx5_ipool_get_cache(struct mlx5_indexed_pool *pool, int cidx, uint32_t idx) + idx -= 1; + trunk_idx = mlx5_trunk_idx_get(pool, idx); + trunk = lc->trunks[trunk_idx]; +- MLX5_ASSERT(trunk); ++ if (!trunk) ++ return NULL; + entry_idx = idx - mlx5_trunk_idx_offset_get(pool, trunk_idx); + return &trunk->data[entry_idx * pool->cfg.size]; + } +@@ -477,7 +480,7 @@ _mlx5_ipool_free_cache(struct mlx5_indexed_pool *pool, int cidx, uint32_t idx) mlx5_ipool_lock(pool); gc = pool->gc; if (ilc->lc != gc) { @@ -67074,7 +75190,7 @@ index e4e66ae4c5..b295702fd4 100644 olc = ilc->lc; gc->ref_cnt++; ilc->lc = gc; -@@ -1184,44 +1186,3 @@ mlx5_l3t_set_entry(struct mlx5_l3t_tbl *tbl, uint32_t idx, +@@ -1184,44 +1187,3 @@ mlx5_l3t_set_entry(struct mlx5_l3t_tbl *tbl, uint32_t idx, rte_spinlock_unlock(&tbl->sl); return ret; } @@ -67167,6 +75283,65 @@ index cf3db89403..254c879d1a 100644 /** * This function decreases and clear index entry if reference * counter is 0 from Three-level table. +diff --git a/dpdk/drivers/net/mlx5/windows/mlx5_ethdev_os.c b/dpdk/drivers/net/mlx5/windows/mlx5_ethdev_os.c +index 359f73df7c..847e052ba0 100644 +--- a/dpdk/drivers/net/mlx5/windows/mlx5_ethdev_os.c ++++ b/dpdk/drivers/net/mlx5/windows/mlx5_ethdev_os.c +@@ -177,20 +177,29 @@ mlx5_dev_set_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) + return -ENOTSUP; + } + +-/** ++/* + * Query the number of statistics provided by ETHTOOL. + * + * @param dev + * Pointer to Ethernet device. ++ * @param bond_master ++ * Indicate if the device is a bond master. ++ * @param n_stats ++ * Pointer to number of stats to store. ++ * @param n_stats_sec ++ * Pointer to number of stats to store for the 2nd port of the bond. + * + * @return +- * Number of statistics on success, negative errno value otherwise and +- * rte_errno is set. ++ * 0 on success, negative errno value otherwise and rte_errno is set. + */ + int +-mlx5_os_get_stats_n(struct rte_eth_dev *dev) ++mlx5_os_get_stats_n(struct rte_eth_dev *dev, bool bond_master, ++ uint16_t *n_stats, uint16_t *n_stats_sec) + { + RTE_SET_USED(dev); ++ RTE_SET_USED(bond_master); ++ RTE_SET_USED(n_stats); ++ RTE_SET_USED(n_stats_sec); + return -ENOTSUP; + } + +@@ -211,6 +220,8 @@ mlx5_os_stats_init(struct rte_eth_dev *dev) + * + * @param dev + * Pointer to Ethernet device. ++ * @param bond_master ++ * Indicate if the device is a bond master. + * @param[out] stats + * Counters table output buffer. + * +@@ -219,9 +230,10 @@ mlx5_os_stats_init(struct rte_eth_dev *dev) + * rte_errno is set. + */ + int +-mlx5_os_read_dev_counters(struct rte_eth_dev *dev, uint64_t *stats) ++mlx5_os_read_dev_counters(struct rte_eth_dev *dev, bool bond_master, uint64_t *stats) + { + RTE_SET_USED(dev); ++ RTE_SET_USED(bond_master); + RTE_SET_USED(stats); + return -ENOTSUP; + } diff --git a/dpdk/drivers/net/mlx5/windows/mlx5_flow_os.c b/dpdk/drivers/net/mlx5/windows/mlx5_flow_os.c index c4d5790726..0afe74cea8 100644 --- a/dpdk/drivers/net/mlx5/windows/mlx5_flow_os.c @@ -67317,10 +75492,20 @@ index dec4b923d0..ba99901c5c 100644 goto error; } diff --git a/dpdk/drivers/net/mvneta/mvneta_ethdev.c b/dpdk/drivers/net/mvneta/mvneta_ethdev.c -index 10fe6d828c..d79d069120 100644 +index 10fe6d828c..309336eec3 100644 --- a/dpdk/drivers/net/mvneta/mvneta_ethdev.c +++ b/dpdk/drivers/net/mvneta/mvneta_ethdev.c -@@ -247,7 +247,7 @@ mvneta_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) +@@ -203,7 +203,8 @@ mvneta_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused) + RTE_PTYPE_L3_IPV4, + RTE_PTYPE_L3_IPV6, + RTE_PTYPE_L4_TCP, +- RTE_PTYPE_L4_UDP ++ RTE_PTYPE_L4_UDP, ++ RTE_PTYPE_UNKNOWN + }; + + return ptypes; +@@ -247,7 +248,7 @@ mvneta_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) (mru + MRVL_NETA_PKT_OFFS > mbuf_data_size)) { mru = mbuf_data_size - MRVL_NETA_PKT_OFFS; mtu = MRVL_NETA_MRU_TO_MTU(mru); @@ -67329,7 +75514,7 @@ index 10fe6d828c..d79d069120 100644 " current mbuf size: %u. Set MTU to %u, MRU to %u", mbuf_data_size, mtu, mru); } -@@ -381,6 +381,10 @@ mvneta_dev_start(struct rte_eth_dev *dev) +@@ -381,6 +382,10 @@ mvneta_dev_start(struct rte_eth_dev *dev) goto out; } @@ -67340,7 +75525,7 @@ index 10fe6d828c..d79d069120 100644 /* start tx queues */ for (i = 0; i < dev->data->nb_tx_queues; i++) dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; -@@ -405,6 +409,7 @@ static int +@@ -405,6 +410,7 @@ static int mvneta_dev_stop(struct rte_eth_dev *dev) { struct mvneta_priv *priv = dev->data->dev_private; @@ -67348,7 +75533,7 @@ index 10fe6d828c..d79d069120 100644 dev->data->dev_started = 0; -@@ -417,6 +422,14 @@ mvneta_dev_stop(struct rte_eth_dev *dev) +@@ -417,6 +423,14 @@ mvneta_dev_stop(struct rte_eth_dev *dev) priv->ppio = NULL; @@ -67379,7 +75564,7 @@ index 6e4a7896b4..952e982275 100644 if (ret) { MVNETA_LOG(ERR, "Failed to allocate %u mbufs.", nb_desc); diff --git a/dpdk/drivers/net/mvpp2/mrvl_ethdev.c b/dpdk/drivers/net/mvpp2/mrvl_ethdev.c -index 9c7fe13f7f..a1c800aaf8 100644 +index 9c7fe13f7f..2133fb7717 100644 --- a/dpdk/drivers/net/mvpp2/mrvl_ethdev.c +++ b/dpdk/drivers/net/mvpp2/mrvl_ethdev.c @@ -579,7 +579,7 @@ mrvl_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) @@ -67443,6 +75628,16 @@ index 9c7fe13f7f..a1c800aaf8 100644 } /** +@@ -1771,7 +1782,8 @@ mrvl_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused) + RTE_PTYPE_L3_IPV6_EXT, + RTE_PTYPE_L2_ETHER_ARP, + RTE_PTYPE_L4_TCP, +- RTE_PTYPE_L4_UDP ++ RTE_PTYPE_L4_UDP, ++ RTE_PTYPE_UNKNOWN + }; + + return ptypes; diff --git a/dpdk/drivers/net/mvpp2/mrvl_qos.c b/dpdk/drivers/net/mvpp2/mrvl_qos.c index dbfc3b5d20..99f0ee56d1 100644 --- a/dpdk/drivers/net/mvpp2/mrvl_qos.c @@ -67730,7 +75925,7 @@ index 1b63b27e0c..1ad255507e 100644 "missing RNDIS header %u", len); return; diff --git a/dpdk/drivers/net/netvsc/hn_rxtx.c b/dpdk/drivers/net/netvsc/hn_rxtx.c -index 028f176c7e..7a3bd523a5 100644 +index 028f176c7e..aba206565d 100644 --- a/dpdk/drivers/net/netvsc/hn_rxtx.c +++ b/dpdk/drivers/net/netvsc/hn_rxtx.c @@ -578,11 +578,11 @@ static void hn_rxpkt(struct hn_rx_queue *rxq, struct hn_rx_bufinfo *rxb, @@ -67747,7 +75942,18 @@ index 028f176c7e..7a3bd523a5 100644 iova = rte_mem_virt2iova(rxbuf) + RTE_PTR_DIFF(data, rxbuf); shinfo = &rxb->shinfo; -@@ -765,8 +765,8 @@ hn_nvs_handle_rxbuf(struct rte_eth_dev *dev, +@@ -614,7 +614,9 @@ static void hn_rxpkt(struct hn_rx_queue *rxq, struct hn_rx_bufinfo *rxb, + RTE_PTYPE_L4_MASK); + + if (info->vlan_info != HN_NDIS_VLAN_INFO_INVALID) { +- m->vlan_tci = info->vlan_info; ++ m->vlan_tci = RTE_VLAN_TCI_MAKE(NDIS_VLAN_INFO_ID(info->vlan_info), ++ NDIS_VLAN_INFO_PRI(info->vlan_info), ++ NDIS_VLAN_INFO_CFI(info->vlan_info)); + m->ol_flags |= RTE_MBUF_F_RX_VLAN_STRIPPED | RTE_MBUF_F_RX_VLAN; + + /* NDIS always strips tag, put it back if necessary */ +@@ -765,8 +767,8 @@ hn_nvs_handle_rxbuf(struct rte_eth_dev *dev, { const struct vmbus_chanpkt_rxbuf *pkt; const struct hn_nvs_hdr *nvs_hdr = buf; @@ -67758,7 +75964,7 @@ index 028f176c7e..7a3bd523a5 100644 unsigned int i, hlen, count; struct hn_rx_bufinfo *rxb; -@@ -1031,7 +1031,7 @@ hn_dev_rx_queue_count(void *rx_queue) +@@ -1031,7 +1033,7 @@ hn_dev_rx_queue_count(void *rx_queue) * returns: * - -EINVAL - offset outside of ring * - RTE_ETH_RX_DESC_AVAIL - no data available yet @@ -67767,7 +75973,7 @@ index 028f176c7e..7a3bd523a5 100644 */ int hn_dev_rx_queue_status(void *arg, uint16_t offset) { -@@ -1266,7 +1266,7 @@ hn_try_txagg(struct hn_data *hv, struct hn_tx_queue *txq, +@@ -1266,7 +1268,7 @@ hn_try_txagg(struct hn_data *hv, struct hn_tx_queue *txq, if (txd->chim_index == NVS_CHIM_IDX_INVALID) return NULL; @@ -67776,7 +75982,18 @@ index 028f176c7e..7a3bd523a5 100644 + txd->chim_index * hv->chim_szmax; txq->agg_txd = txd; -@@ -1348,8 +1348,11 @@ static void hn_encap(struct rndis_packet_msg *pkt, +@@ -1334,7 +1336,9 @@ static void hn_encap(struct rndis_packet_msg *pkt, + if (m->ol_flags & RTE_MBUF_F_TX_VLAN) { + pi_data = hn_rndis_pktinfo_append(pkt, NDIS_VLAN_INFO_SIZE, + NDIS_PKTINFO_TYPE_VLAN); +- *pi_data = m->vlan_tci; ++ *pi_data = NDIS_VLAN_INFO_MAKE(RTE_VLAN_TCI_ID(m->vlan_tci), ++ RTE_VLAN_TCI_PRI(m->vlan_tci), ++ RTE_VLAN_TCI_DEI(m->vlan_tci)); + } + + if (m->ol_flags & RTE_MBUF_F_TX_TCP_SEG) { +@@ -1348,8 +1352,11 @@ static void hn_encap(struct rndis_packet_msg *pkt, *pi_data = NDIS_LSO2_INFO_MAKEIPV4(hlen, m->tso_segsz); } @@ -67790,7 +76007,7 @@ index 028f176c7e..7a3bd523a5 100644 pi_data = hn_rndis_pktinfo_append(pkt, NDIS_TXCSUM_INFO_SIZE, NDIS_PKTINFO_TYPE_CSUM); *pi_data = 0; -@@ -1363,9 +1366,11 @@ static void hn_encap(struct rndis_packet_msg *pkt, +@@ -1363,9 +1370,11 @@ static void hn_encap(struct rndis_packet_msg *pkt, *pi_data |= NDIS_TXCSUM_INFO_IPCS; } @@ -67990,6 +76207,18 @@ index 2d0b613d21..ca6e4d5578 100644 } return 0; +diff --git a/dpdk/drivers/net/nfp/meson.build b/dpdk/drivers/net/nfp/meson.build +index 810f02ae5b..b39ae1fa2a 100644 +--- a/dpdk/drivers/net/nfp/meson.build ++++ b/dpdk/drivers/net/nfp/meson.build +@@ -4,6 +4,7 @@ + if not is_linux or not dpdk_conf.get('RTE_ARCH_64') + build = false + reason = 'only supported on 64-bit Linux' ++ subdir_done() + endif + sources = files( + 'nfpcore/nfp_cpp_pcie_ops.c', diff --git a/dpdk/drivers/net/nfp/nfp_common.c b/dpdk/drivers/net/nfp/nfp_common.c index f8978e803a..f73f7dd0be 100644 --- a/dpdk/drivers/net/nfp/nfp_common.c @@ -68169,10 +76398,18 @@ index 8b35fa119c..8c39b84c36 100644 void nfp_net_dev_interrupt_delayed_handler(void *param); int nfp_net_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); diff --git a/dpdk/drivers/net/nfp/nfp_ethdev.c b/dpdk/drivers/net/nfp/nfp_ethdev.c -index 8e81cc498f..e1da0bdebe 100644 +index 8e81cc498f..84562d92fd 100644 --- a/dpdk/drivers/net/nfp/nfp_ethdev.c +++ b/dpdk/drivers/net/nfp/nfp_ethdev.c -@@ -132,6 +132,13 @@ nfp_net_start(struct rte_eth_dev *dev) +@@ -89,6 +89,7 @@ nfp_net_start(struct rte_eth_dev *dev) + struct rte_eth_conf *dev_conf; + struct rte_eth_rxmode *rxmode; + uint32_t intr_vector; ++ uint16_t i; + int ret; + + hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); +@@ -132,6 +133,13 @@ nfp_net_start(struct rte_eth_dev *dev) update = NFP_NET_CFG_UPDATE_MSIX; } @@ -68186,7 +76423,7 @@ index 8e81cc498f..e1da0bdebe 100644 rte_intr_enable(intr_handle); new_ctrl = nfp_check_offloads(dev); -@@ -160,6 +167,8 @@ nfp_net_start(struct rte_eth_dev *dev) +@@ -160,6 +168,8 @@ nfp_net_start(struct rte_eth_dev *dev) if (nfp_net_reconfig(hw, new_ctrl, update) < 0) return -EIO; @@ -68195,16 +76432,19 @@ index 8e81cc498f..e1da0bdebe 100644 /* * Allocating rte mbufs for configured rx queues. * This requires queues being enabled before -@@ -176,8 +185,6 @@ nfp_net_start(struct rte_eth_dev *dev) +@@ -176,7 +186,10 @@ nfp_net_start(struct rte_eth_dev *dev) nfp_eth_set_configured(dev->process_private, hw->nfp_idx, 1); - hw->ctrl = new_ctrl; -- ++ for (i = 0; i < dev->data->nb_rx_queues; i++) ++ dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; ++ for (i = 0; i < dev->data->nb_tx_queues; i++) ++ dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; + return 0; - error: -@@ -302,11 +309,13 @@ nfp_net_close(struct rte_eth_dev *dev) +@@ -302,11 +315,13 @@ nfp_net_close(struct rte_eth_dev *dev) for (i = 0; i < dev->data->nb_tx_queues; i++) { this_tx_q = (struct nfp_net_txq *)dev->data->tx_queues[i]; nfp_net_reset_tx_queue(this_tx_q); @@ -68218,7 +76458,7 @@ index 8e81cc498f..e1da0bdebe 100644 } /* Cancel possible impending LSC work here before releasing the port*/ -@@ -500,6 +509,7 @@ nfp_net_init(struct rte_eth_dev *eth_dev) +@@ -500,6 +515,7 @@ nfp_net_init(struct rte_eth_dev *eth_dev) hw->cap = nn_cfg_readl(hw, NFP_NET_CFG_CAP); hw->max_mtu = nn_cfg_readl(hw, NFP_NET_CFG_MAX_MTU); hw->mtu = RTE_ETHER_MTU; @@ -68226,7 +76466,7 @@ index 8e81cc498f..e1da0bdebe 100644 /* VLAN insertion is incompatible with LSOv2 */ if (hw->cap & NFP_NET_CFG_CTRL_LSO2) -@@ -585,6 +595,8 @@ nfp_net_init(struct rte_eth_dev *eth_dev) +@@ -585,6 +601,8 @@ nfp_net_init(struct rte_eth_dev *eth_dev) (void *)eth_dev); /* Telling the firmware about the LSC interrupt entry */ nn_cfg_writeb(hw, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX); @@ -68235,7 +76475,7 @@ index 8e81cc498f..e1da0bdebe 100644 /* Recording current stats counters values */ nfp_net_stats_reset(eth_dev); } -@@ -627,7 +639,7 @@ nfp_fw_upload(struct rte_pci_device *dev, struct nfp_nsp *nsp, char *card) +@@ -627,7 +645,7 @@ nfp_fw_upload(struct rte_pci_device *dev, struct nfp_nsp *nsp, char *card) goto load_fw; /* Then try the PCI name */ snprintf(fw_name, sizeof(fw_name), "%s/pci-%s.nffw", DEFAULT_FW_PATH, @@ -68244,7 +76484,7 @@ index 8e81cc498f..e1da0bdebe 100644 PMD_DRV_LOG(DEBUG, "Trying with fw file: %s", fw_name); if (rte_firmware_read(fw_name, &fw_buf, &fsize) == 0) -@@ -662,7 +674,9 @@ nfp_fw_setup(struct rte_pci_device *dev, struct nfp_cpp *cpp, +@@ -662,7 +680,9 @@ nfp_fw_setup(struct rte_pci_device *dev, struct nfp_cpp *cpp, char card_desc[100]; int err = 0; @@ -68255,7 +76495,7 @@ index 8e81cc498f..e1da0bdebe 100644 if (nfp_fw_model) { PMD_DRV_LOG(INFO, "firmware model found: %s", nfp_fw_model); -@@ -788,6 +802,7 @@ static int nfp_pf_init(struct rte_pci_device *pci_dev) +@@ -788,6 +808,7 @@ static int nfp_pf_init(struct rte_pci_device *pci_dev) { struct nfp_pf_dev *pf_dev = NULL; struct nfp_cpp *cpp; @@ -68263,7 +76503,7 @@ index 8e81cc498f..e1da0bdebe 100644 struct nfp_hwinfo *hwinfo; struct nfp_rtsym_table *sym_tbl; struct nfp_eth_table *nfp_eth_table = NULL; -@@ -795,6 +810,7 @@ static int nfp_pf_init(struct rte_pci_device *pci_dev) +@@ -795,6 +816,7 @@ static int nfp_pf_init(struct rte_pci_device *pci_dev) int total_ports; int ret = -ENODEV; int err; @@ -68271,7 +76511,7 @@ index 8e81cc498f..e1da0bdebe 100644 if (!pci_dev) return ret; -@@ -831,6 +847,10 @@ static int nfp_pf_init(struct rte_pci_device *pci_dev) +@@ -831,6 +853,10 @@ static int nfp_pf_init(struct rte_pci_device *pci_dev) goto hwinfo_cleanup; } @@ -68282,7 +76522,7 @@ index 8e81cc498f..e1da0bdebe 100644 if (nfp_fw_setup(pci_dev, cpp, nfp_eth_table, hwinfo)) { PMD_INIT_LOG(ERR, "Error when uploading firmware"); ret = -EIO; -@@ -892,7 +912,8 @@ static int nfp_pf_init(struct rte_pci_device *pci_dev) +@@ -892,7 +918,8 @@ static int nfp_pf_init(struct rte_pci_device *pci_dev) PMD_INIT_LOG(DEBUG, "ctrl bar: %p", pf_dev->ctrl_bar); /* configure access to tx/rx vNIC BARs */ @@ -68292,11 +76532,28 @@ index 8e81cc498f..e1da0bdebe 100644 NFP_PCIE_QUEUE(0), NFP_QCP_QUEUE_AREA_SZ, &pf_dev->hwqueues_area); +@@ -919,7 +946,7 @@ static int nfp_pf_init(struct rte_pci_device *pci_dev) + return 0; + + hwqueues_cleanup: +- nfp_cpp_area_free(pf_dev->hwqueues_area); ++ nfp_cpp_area_release_free(pf_dev->hwqueues_area); + ctrl_area_cleanup: + nfp_cpp_area_free(pf_dev->ctrl_area); + pf_cleanup: diff --git a/dpdk/drivers/net/nfp/nfp_ethdev_vf.c b/dpdk/drivers/net/nfp/nfp_ethdev_vf.c -index 303ef72b1b..0dda3961ce 100644 +index 303ef72b1b..a61c73a96e 100644 --- a/dpdk/drivers/net/nfp/nfp_ethdev_vf.c +++ b/dpdk/drivers/net/nfp/nfp_ethdev_vf.c -@@ -122,6 +122,8 @@ nfp_netvf_start(struct rte_eth_dev *dev) +@@ -57,6 +57,7 @@ nfp_netvf_start(struct rte_eth_dev *dev) + struct rte_eth_conf *dev_conf; + struct rte_eth_rxmode *rxmode; + uint32_t intr_vector; ++ uint16_t i; + int ret; + + hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); +@@ -122,6 +123,8 @@ nfp_netvf_start(struct rte_eth_dev *dev) if (nfp_net_reconfig(hw, new_ctrl, update) < 0) return -EIO; @@ -68305,16 +76562,19 @@ index 303ef72b1b..0dda3961ce 100644 /* * Allocating rte mbufs for configured rx queues. * This requires queues being enabled before -@@ -131,8 +133,6 @@ nfp_netvf_start(struct rte_eth_dev *dev) +@@ -131,7 +134,10 @@ nfp_netvf_start(struct rte_eth_dev *dev) goto error; } - hw->ctrl = new_ctrl; -- ++ for (i = 0; i < dev->data->nb_rx_queues; i++) ++ dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; ++ for (i = 0; i < dev->data->nb_tx_queues; i++) ++ dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; + return 0; - error: -@@ -219,11 +219,13 @@ nfp_netvf_close(struct rte_eth_dev *dev) +@@ -219,11 +225,13 @@ nfp_netvf_close(struct rte_eth_dev *dev) for (i = 0; i < dev->data->nb_tx_queues; i++) { this_tx_q = (struct nfp_net_txq *)dev->data->tx_queues[i]; nfp_net_reset_tx_queue(this_tx_q); @@ -68328,7 +76588,7 @@ index 303ef72b1b..0dda3961ce 100644 } rte_intr_disable(pci_dev->intr_handle); -@@ -367,6 +369,7 @@ nfp_netvf_init(struct rte_eth_dev *eth_dev) +@@ -367,6 +375,7 @@ nfp_netvf_init(struct rte_eth_dev *eth_dev) hw->cap = nn_cfg_readl(hw, NFP_NET_CFG_CAP); hw->max_mtu = nn_cfg_readl(hw, NFP_NET_CFG_MAX_MTU); hw->mtu = RTE_ETHER_MTU; @@ -68336,7 +76596,7 @@ index 303ef72b1b..0dda3961ce 100644 /* VLAN insertion is incompatible with LSOv2 */ if (hw->cap & NFP_NET_CFG_CTRL_LSO2) -@@ -451,6 +454,8 @@ nfp_netvf_init(struct rte_eth_dev *eth_dev) +@@ -451,6 +460,8 @@ nfp_netvf_init(struct rte_eth_dev *eth_dev) (void *)eth_dev); /* Telling the firmware about the LSC interrupt entry */ nn_cfg_writeb(hw, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX); @@ -68674,6 +76934,19 @@ index c0516bf8e8..9f848bde79 100644 if (nfp_hwinfo_is_updating(header)) goto exit_free; +diff --git a/dpdk/drivers/net/nfp/nfpcore/nfp_mutex.c b/dpdk/drivers/net/nfp/nfpcore/nfp_mutex.c +index 318c5800d7..b9de16e889 100644 +--- a/dpdk/drivers/net/nfp/nfpcore/nfp_mutex.c ++++ b/dpdk/drivers/net/nfp/nfpcore/nfp_mutex.c +@@ -151,7 +151,7 @@ nfp_cpp_mutex_alloc(struct nfp_cpp *cpp, int target, + if (tmp != key) + return NFP_ERRPTR(EEXIST); + +- mutex = calloc(sizeof(*mutex), 1); ++ mutex = calloc(1, sizeof(*mutex)); + if (!mutex) + return NFP_ERRPTR(ENOMEM); + diff --git a/dpdk/drivers/net/nfp/nfpcore/nfp_nsp.h b/dpdk/drivers/net/nfp/nfpcore/nfp_nsp.h index c9c7b0d0fb..e74cdeb191 100644 --- a/dpdk/drivers/net/nfp/nfpcore/nfp_nsp.h @@ -68975,6 +77248,18 @@ index 2884a0034f..e8dcc9ad1e 100644 return NFP_ERRNO(EINVAL); if (island == 0) { +diff --git a/dpdk/drivers/net/ngbe/base/ngbe_devids.h b/dpdk/drivers/net/ngbe/base/ngbe_devids.h +index 6010cc050e..75fb6d702a 100644 +--- a/dpdk/drivers/net/ngbe/base/ngbe_devids.h ++++ b/dpdk/drivers/net/ngbe/base/ngbe_devids.h +@@ -77,6 +77,7 @@ + #define NGBE_LY_M88E1512_SFP 0x0050 + #define NGBE_YT8521S_SFP 0x0060 + #define NGBE_LY_YT8521S_SFP 0x0070 ++#define NGBE_RGMII_FPGA 0x0080 + #define NGBE_WOL_SUP 0x4000 + #define NGBE_NCSI_SUP 0x8000 + diff --git a/dpdk/drivers/net/ngbe/base/ngbe_dummy.h b/dpdk/drivers/net/ngbe/base/ngbe_dummy.h index 61b0d82bfb..d74c9f7b54 100644 --- a/dpdk/drivers/net/ngbe/base/ngbe_dummy.h @@ -69108,7 +77393,7 @@ index f9a876e9bd..104501fa7a 100644 hw->rom.read32(hw, offset, &eeprom_verl); etrack_id = eeprom_verl; diff --git a/dpdk/drivers/net/ngbe/base/ngbe_hw.c b/dpdk/drivers/net/ngbe/base/ngbe_hw.c -index 0716357725..08a7e02943 100644 +index 0716357725..abfe800caf 100644 --- a/dpdk/drivers/net/ngbe/base/ngbe_hw.c +++ b/dpdk/drivers/net/ngbe/base/ngbe_hw.c @@ -20,8 +20,6 @@ s32 ngbe_start_hw(struct ngbe_hw *hw) @@ -69156,7 +77441,17 @@ index 0716357725..08a7e02943 100644 /* Call adapter stop to disable tx/rx and clear interrupts */ status = hw->mac.stop_hw(hw); if (status != 0) -@@ -205,8 +199,6 @@ s32 ngbe_clear_hw_cntrs(struct ngbe_hw *hw) +@@ -179,6 +173,9 @@ s32 ngbe_reset_hw_em(struct ngbe_hw *hw) + ngbe_reset_misc_em(hw); + hw->mac.clear_hw_cntrs(hw); + ++ if (!((hw->sub_device_id & NGBE_OEM_MASK) == NGBE_RGMII_FPGA)) ++ hw->phy.set_phy_power(hw, false); ++ + msec_delay(50); + + /* Store the permanent mac address */ +@@ -205,8 +202,6 @@ s32 ngbe_clear_hw_cntrs(struct ngbe_hw *hw) { u16 i = 0; @@ -69165,7 +77460,7 @@ index 0716357725..08a7e02943 100644 /* QP Stats */ /* don't write clear queue stats */ for (i = 0; i < NGBE_MAX_QP; i++) { -@@ -305,8 +297,6 @@ s32 ngbe_get_mac_addr(struct ngbe_hw *hw, u8 *mac_addr) +@@ -305,8 +300,6 @@ s32 ngbe_get_mac_addr(struct ngbe_hw *hw, u8 *mac_addr) u32 rar_low; u16 i; @@ -69174,7 +77469,7 @@ index 0716357725..08a7e02943 100644 wr32(hw, NGBE_ETHADDRIDX, 0); rar_high = rd32(hw, NGBE_ETHADDRH); rar_low = rd32(hw, NGBE_ETHADDRL); -@@ -332,8 +322,6 @@ void ngbe_set_lan_id_multi_port(struct ngbe_hw *hw) +@@ -332,8 +325,6 @@ void ngbe_set_lan_id_multi_port(struct ngbe_hw *hw) struct ngbe_bus_info *bus = &hw->bus; u32 reg = 0; @@ -69183,7 +77478,7 @@ index 0716357725..08a7e02943 100644 reg = rd32(hw, NGBE_PORTSTAT); bus->lan_id = NGBE_PORTSTAT_ID(reg); bus->func = bus->lan_id; -@@ -350,10 +338,8 @@ void ngbe_set_lan_id_multi_port(struct ngbe_hw *hw) +@@ -350,10 +341,8 @@ void ngbe_set_lan_id_multi_port(struct ngbe_hw *hw) **/ s32 ngbe_stop_hw(struct ngbe_hw *hw) { @@ -69195,7 +77490,7 @@ index 0716357725..08a7e02943 100644 /* * Set the adapter_stopped flag so other driver functions stop touching -@@ -372,16 +358,27 @@ s32 ngbe_stop_hw(struct ngbe_hw *hw) +@@ -372,16 +361,27 @@ s32 ngbe_stop_hw(struct ngbe_hw *hw) wr32(hw, NGBE_ICRMISC, NGBE_ICRMISC_MASK); wr32(hw, NGBE_ICR(0), NGBE_ICR_MASK); @@ -69231,7 +77526,7 @@ index 0716357725..08a7e02943 100644 /* flush all queues disables */ ngbe_flush(hw); -@@ -399,8 +396,6 @@ s32 ngbe_led_on(struct ngbe_hw *hw, u32 index) +@@ -399,8 +399,6 @@ s32 ngbe_led_on(struct ngbe_hw *hw, u32 index) { u32 led_reg = rd32(hw, NGBE_LEDCTL); @@ -69240,7 +77535,7 @@ index 0716357725..08a7e02943 100644 if (index > 3) return NGBE_ERR_PARAM; -@@ -421,8 +416,6 @@ s32 ngbe_led_off(struct ngbe_hw *hw, u32 index) +@@ -421,8 +419,6 @@ s32 ngbe_led_off(struct ngbe_hw *hw, u32 index) { u32 led_reg = rd32(hw, NGBE_LEDCTL); @@ -69249,7 +77544,7 @@ index 0716357725..08a7e02943 100644 if (index > 3) return NGBE_ERR_PARAM; -@@ -444,8 +437,6 @@ s32 ngbe_validate_mac_addr(u8 *mac_addr) +@@ -444,8 +440,6 @@ s32 ngbe_validate_mac_addr(u8 *mac_addr) { s32 status = 0; @@ -69258,7 +77553,7 @@ index 0716357725..08a7e02943 100644 /* Make sure it is not a multicast address */ if (NGBE_IS_MULTICAST((struct rte_ether_addr *)mac_addr)) { status = NGBE_ERR_INVALID_MAC_ADDR; -@@ -476,11 +467,9 @@ s32 ngbe_set_rar(struct ngbe_hw *hw, u32 index, u8 *addr, u32 vmdq, +@@ -476,11 +470,9 @@ s32 ngbe_set_rar(struct ngbe_hw *hw, u32 index, u8 *addr, u32 vmdq, u32 rar_low, rar_high; u32 rar_entries = hw->mac.num_rar_entries; @@ -69271,7 +77566,7 @@ index 0716357725..08a7e02943 100644 return NGBE_ERR_INVALID_ARGUMENT; } -@@ -528,11 +517,9 @@ s32 ngbe_clear_rar(struct ngbe_hw *hw, u32 index) +@@ -528,11 +520,9 @@ s32 ngbe_clear_rar(struct ngbe_hw *hw, u32 index) u32 rar_high; u32 rar_entries = hw->mac.num_rar_entries; @@ -69284,7 +77579,7 @@ index 0716357725..08a7e02943 100644 return NGBE_ERR_INVALID_ARGUMENT; } -@@ -568,8 +555,6 @@ s32 ngbe_init_rx_addrs(struct ngbe_hw *hw) +@@ -568,8 +558,6 @@ s32 ngbe_init_rx_addrs(struct ngbe_hw *hw) u32 psrctl; u32 rar_entries = hw->mac.num_rar_entries; @@ -69293,7 +77588,7 @@ index 0716357725..08a7e02943 100644 /* * If the current mac address is valid, assume it is a software override * to the permanent address. -@@ -580,18 +565,18 @@ s32 ngbe_init_rx_addrs(struct ngbe_hw *hw) +@@ -580,18 +568,18 @@ s32 ngbe_init_rx_addrs(struct ngbe_hw *hw) /* Get the MAC address from the RAR0 for later reference */ hw->mac.get_mac_addr(hw, hw->mac.addr); @@ -69319,7 +77614,7 @@ index 0716357725..08a7e02943 100644 hw->mac.addr[4], hw->mac.addr[5]); hw->mac.set_rar(hw, 0, hw->mac.addr, 0, true); -@@ -601,7 +586,7 @@ s32 ngbe_init_rx_addrs(struct ngbe_hw *hw) +@@ -601,7 +589,7 @@ s32 ngbe_init_rx_addrs(struct ngbe_hw *hw) hw->mac.clear_vmdq(hw, 0, BIT_MASK32); /* Zero out the other receive addresses. */ @@ -69328,7 +77623,7 @@ index 0716357725..08a7e02943 100644 for (i = 1; i < rar_entries; i++) { wr32(hw, NGBE_ETHADDRIDX, i); wr32(hw, NGBE_ETHADDRL, 0); -@@ -615,7 +600,7 @@ s32 ngbe_init_rx_addrs(struct ngbe_hw *hw) +@@ -615,7 +603,7 @@ s32 ngbe_init_rx_addrs(struct ngbe_hw *hw) psrctl |= NGBE_PSRCTL_ADHF12(hw->mac.mc_filter_type); wr32(hw, NGBE_PSRCTL, psrctl); @@ -69337,7 +77632,7 @@ index 0716357725..08a7e02943 100644 for (i = 0; i < hw->mac.mcft_size; i++) wr32(hw, NGBE_MCADDRTBL(i), 0); -@@ -640,8 +625,6 @@ static s32 ngbe_mta_vector(struct ngbe_hw *hw, u8 *mc_addr) +@@ -640,8 +628,6 @@ static s32 ngbe_mta_vector(struct ngbe_hw *hw, u8 *mc_addr) { u32 vector = 0; @@ -69346,7 +77641,7 @@ index 0716357725..08a7e02943 100644 switch (hw->mac.mc_filter_type) { case 0: /* use bits [47:36] of the address */ vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4)); -@@ -656,7 +639,7 @@ static s32 ngbe_mta_vector(struct ngbe_hw *hw, u8 *mc_addr) +@@ -656,7 +642,7 @@ static s32 ngbe_mta_vector(struct ngbe_hw *hw, u8 *mc_addr) vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8)); break; default: /* Invalid mc_filter_type */ @@ -69355,7 +77650,7 @@ index 0716357725..08a7e02943 100644 ASSERT(0); break; } -@@ -679,12 +662,10 @@ void ngbe_set_mta(struct ngbe_hw *hw, u8 *mc_addr) +@@ -679,12 +665,10 @@ void ngbe_set_mta(struct ngbe_hw *hw, u8 *mc_addr) u32 vector_bit; u32 vector_reg; @@ -69369,7 +77664,7 @@ index 0716357725..08a7e02943 100644 /* * The MTA is a register array of 128 32-bit registers. It is treated -@@ -718,8 +699,6 @@ s32 ngbe_update_mc_addr_list(struct ngbe_hw *hw, u8 *mc_addr_list, +@@ -718,8 +702,6 @@ s32 ngbe_update_mc_addr_list(struct ngbe_hw *hw, u8 *mc_addr_list, u32 i; u32 vmdq; @@ -69378,7 +77673,7 @@ index 0716357725..08a7e02943 100644 /* * Set the new number of MC addresses that we are being requested to * use. -@@ -729,13 +708,13 @@ s32 ngbe_update_mc_addr_list(struct ngbe_hw *hw, u8 *mc_addr_list, +@@ -729,13 +711,13 @@ s32 ngbe_update_mc_addr_list(struct ngbe_hw *hw, u8 *mc_addr_list, /* Clear mta_shadow */ if (clear) { @@ -69394,7 +77689,7 @@ index 0716357725..08a7e02943 100644 ngbe_set_mta(hw, next(hw, &mc_addr_list, &vmdq)); } -@@ -752,7 +731,7 @@ s32 ngbe_update_mc_addr_list(struct ngbe_hw *hw, u8 *mc_addr_list, +@@ -752,7 +734,7 @@ s32 ngbe_update_mc_addr_list(struct ngbe_hw *hw, u8 *mc_addr_list, wr32(hw, NGBE_PSRCTL, psrctl); } @@ -69403,7 +77698,7 @@ index 0716357725..08a7e02943 100644 return 0; } -@@ -767,11 +746,9 @@ s32 ngbe_setup_fc_em(struct ngbe_hw *hw) +@@ -767,11 +749,9 @@ s32 ngbe_setup_fc_em(struct ngbe_hw *hw) s32 err = 0; u16 reg_cu = 0; @@ -69416,7 +77711,7 @@ index 0716357725..08a7e02943 100644 err = NGBE_ERR_INVALID_LINK_SETTINGS; goto out; } -@@ -827,7 +804,7 @@ s32 ngbe_setup_fc_em(struct ngbe_hw *hw) +@@ -827,7 +807,7 @@ s32 ngbe_setup_fc_em(struct ngbe_hw *hw) reg_cu |= 0xC00; /*need to merge rtl and mvl on page 0*/ break; default: @@ -69425,7 +77720,7 @@ index 0716357725..08a7e02943 100644 err = NGBE_ERR_CONFIG; goto out; } -@@ -851,8 +828,6 @@ s32 ngbe_fc_enable(struct ngbe_hw *hw) +@@ -851,8 +831,6 @@ s32 ngbe_fc_enable(struct ngbe_hw *hw) u32 pause_time; u32 fcrtl, fcrth; @@ -69434,7 +77729,7 @@ index 0716357725..08a7e02943 100644 /* Validate the water mark configuration */ if (!hw->fc.pause_time) { err = NGBE_ERR_INVALID_LINK_SETTINGS; -@@ -863,7 +838,7 @@ s32 ngbe_fc_enable(struct ngbe_hw *hw) +@@ -863,7 +841,7 @@ s32 ngbe_fc_enable(struct ngbe_hw *hw) if ((hw->fc.current_mode & ngbe_fc_tx_pause) && hw->fc.high_water) { if (!hw->fc.low_water || hw->fc.low_water >= hw->fc.high_water) { @@ -69443,7 +77738,7 @@ index 0716357725..08a7e02943 100644 err = NGBE_ERR_INVALID_LINK_SETTINGS; goto out; } -@@ -919,7 +894,7 @@ s32 ngbe_fc_enable(struct ngbe_hw *hw) +@@ -919,7 +897,7 @@ s32 ngbe_fc_enable(struct ngbe_hw *hw) fccfg_reg |= NGBE_TXFCCFG_FC; break; default: @@ -69452,7 +77747,7 @@ index 0716357725..08a7e02943 100644 err = NGBE_ERR_CONFIG; goto out; } -@@ -977,8 +952,7 @@ s32 ngbe_negotiate_fc(struct ngbe_hw *hw, u32 adv_reg, u32 lp_reg, +@@ -977,8 +955,7 @@ s32 ngbe_negotiate_fc(struct ngbe_hw *hw, u32 adv_reg, u32 lp_reg, u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm) { if ((!(adv_reg)) || (!(lp_reg))) { @@ -69462,7 +77757,7 @@ index 0716357725..08a7e02943 100644 adv_reg, lp_reg); return NGBE_ERR_FC_NOT_NEGOTIATED; } -@@ -993,22 +967,22 @@ s32 ngbe_negotiate_fc(struct ngbe_hw *hw, u32 adv_reg, u32 lp_reg, +@@ -993,22 +970,22 @@ s32 ngbe_negotiate_fc(struct ngbe_hw *hw, u32 adv_reg, u32 lp_reg, */ if (hw->fc.requested_mode == ngbe_fc_full) { hw->fc.current_mode = ngbe_fc_full; @@ -69490,7 +77785,7 @@ index 0716357725..08a7e02943 100644 } return 0; } -@@ -1046,8 +1020,6 @@ void ngbe_fc_autoneg(struct ngbe_hw *hw) +@@ -1046,8 +1023,6 @@ void ngbe_fc_autoneg(struct ngbe_hw *hw) u32 speed; bool link_up; @@ -69499,7 +77794,7 @@ index 0716357725..08a7e02943 100644 /* * AN should have completed when the cable was plugged in. * Look for reasons to bail out. Bail out if: -@@ -1076,6 +1048,64 @@ void ngbe_fc_autoneg(struct ngbe_hw *hw) +@@ -1076,6 +1051,64 @@ void ngbe_fc_autoneg(struct ngbe_hw *hw) } } @@ -69564,7 +77859,7 @@ index 0716357725..08a7e02943 100644 /** * ngbe_acquire_swfw_sync - Acquire SWFW semaphore * @hw: pointer to hardware structure -@@ -1092,8 +1122,6 @@ s32 ngbe_acquire_swfw_sync(struct ngbe_hw *hw, u32 mask) +@@ -1092,8 +1125,6 @@ s32 ngbe_acquire_swfw_sync(struct ngbe_hw *hw, u32 mask) u32 timeout = 200; u32 i; @@ -69573,7 +77868,7 @@ index 0716357725..08a7e02943 100644 for (i = 0; i < timeout; i++) { /* * SW NVM semaphore bit is used for access to all -@@ -1136,8 +1164,6 @@ void ngbe_release_swfw_sync(struct ngbe_hw *hw, u32 mask) +@@ -1136,8 +1167,6 @@ void ngbe_release_swfw_sync(struct ngbe_hw *hw, u32 mask) u32 mngsem; u32 swmask = mask; @@ -69582,7 +77877,7 @@ index 0716357725..08a7e02943 100644 ngbe_get_eeprom_semaphore(hw); mngsem = rd32(hw, NGBE_MNGSEM); -@@ -1161,9 +1187,6 @@ s32 ngbe_disable_sec_rx_path(struct ngbe_hw *hw) +@@ -1161,9 +1190,6 @@ s32 ngbe_disable_sec_rx_path(struct ngbe_hw *hw) int i; u32 secrxreg; @@ -69592,7 +77887,7 @@ index 0716357725..08a7e02943 100644 secrxreg = rd32(hw, NGBE_SECRXCTL); secrxreg |= NGBE_SECRXCTL_XDSA; wr32(hw, NGBE_SECRXCTL, secrxreg); -@@ -1178,8 +1201,7 @@ s32 ngbe_disable_sec_rx_path(struct ngbe_hw *hw) +@@ -1178,8 +1204,7 @@ s32 ngbe_disable_sec_rx_path(struct ngbe_hw *hw) /* For informational purposes only */ if (i >= NGBE_MAX_SECRX_POLL) @@ -69602,7 +77897,7 @@ index 0716357725..08a7e02943 100644 return 0; } -@@ -1194,8 +1216,6 @@ s32 ngbe_enable_sec_rx_path(struct ngbe_hw *hw) +@@ -1194,8 +1219,6 @@ s32 ngbe_enable_sec_rx_path(struct ngbe_hw *hw) { u32 secrxreg; @@ -69611,7 +77906,7 @@ index 0716357725..08a7e02943 100644 secrxreg = rd32(hw, NGBE_SECRXCTL); secrxreg &= ~NGBE_SECRXCTL_XDSA; wr32(hw, NGBE_SECRXCTL, secrxreg); -@@ -1215,11 +1235,9 @@ s32 ngbe_clear_vmdq(struct ngbe_hw *hw, u32 rar, u32 vmdq) +@@ -1215,11 +1238,9 @@ s32 ngbe_clear_vmdq(struct ngbe_hw *hw, u32 rar, u32 vmdq) u32 mpsar; u32 rar_entries = hw->mac.num_rar_entries; @@ -69624,7 +77919,7 @@ index 0716357725..08a7e02943 100644 return NGBE_ERR_INVALID_ARGUMENT; } -@@ -1253,11 +1271,9 @@ s32 ngbe_set_vmdq(struct ngbe_hw *hw, u32 rar, u32 vmdq) +@@ -1253,11 +1274,9 @@ s32 ngbe_set_vmdq(struct ngbe_hw *hw, u32 rar, u32 vmdq) u32 mpsar; u32 rar_entries = hw->mac.num_rar_entries; @@ -69637,7 +77932,7 @@ index 0716357725..08a7e02943 100644 return NGBE_ERR_INVALID_ARGUMENT; } -@@ -1278,8 +1294,7 @@ s32 ngbe_init_uta_tables(struct ngbe_hw *hw) +@@ -1278,8 +1297,7 @@ s32 ngbe_init_uta_tables(struct ngbe_hw *hw) { int i; @@ -69647,7 +77942,7 @@ index 0716357725..08a7e02943 100644 for (i = 0; i < 128; i++) wr32(hw, NGBE_UCADDRTBL(i), 0); -@@ -1334,7 +1349,7 @@ s32 ngbe_find_vlvf_slot(struct ngbe_hw *hw, u32 vlan, bool vlvf_bypass) +@@ -1334,7 +1352,7 @@ s32 ngbe_find_vlvf_slot(struct ngbe_hw *hw, u32 vlan, bool vlvf_bypass) * slot we found during our search, else error. */ if (!first_empty_slot) @@ -69656,7 +77951,7 @@ index 0716357725..08a7e02943 100644 return first_empty_slot ? first_empty_slot : NGBE_ERR_NO_SPACE; } -@@ -1355,8 +1370,6 @@ s32 ngbe_set_vfta(struct ngbe_hw *hw, u32 vlan, u32 vind, +@@ -1355,8 +1373,6 @@ s32 ngbe_set_vfta(struct ngbe_hw *hw, u32 vlan, u32 vind, u32 regidx, vfta_delta, vfta; s32 err; @@ -69665,7 +77960,7 @@ index 0716357725..08a7e02943 100644 if (vlan > 4095 || vind > 63) return NGBE_ERR_PARAM; -@@ -1424,8 +1437,6 @@ s32 ngbe_set_vlvf(struct ngbe_hw *hw, u32 vlan, u32 vind, +@@ -1424,8 +1440,6 @@ s32 ngbe_set_vlvf(struct ngbe_hw *hw, u32 vlan, u32 vind, u32 portctl; s32 vlvf_index; @@ -69674,7 +77969,7 @@ index 0716357725..08a7e02943 100644 if (vlan > 4095 || vind > 63) return NGBE_ERR_PARAM; -@@ -1505,8 +1516,6 @@ s32 ngbe_clear_vfta(struct ngbe_hw *hw) +@@ -1505,8 +1519,6 @@ s32 ngbe_clear_vfta(struct ngbe_hw *hw) { u32 offset; @@ -69683,7 +77978,7 @@ index 0716357725..08a7e02943 100644 for (offset = 0; offset < hw->mac.vft_size; offset++) wr32(hw, NGBE_VLANTBL(offset), 0); -@@ -1534,8 +1543,6 @@ s32 ngbe_check_mac_link_em(struct ngbe_hw *hw, u32 *speed, +@@ -1534,8 +1546,6 @@ s32 ngbe_check_mac_link_em(struct ngbe_hw *hw, u32 *speed, u32 i, reg; s32 status = 0; @@ -69692,7 +77987,7 @@ index 0716357725..08a7e02943 100644 reg = rd32(hw, NGBE_GPIOINTSTAT); wr32(hw, NGBE_GPIOEOI, reg); -@@ -1559,7 +1566,6 @@ s32 ngbe_get_link_capabilities_em(struct ngbe_hw *hw, +@@ -1559,7 +1569,6 @@ s32 ngbe_get_link_capabilities_em(struct ngbe_hw *hw, { s32 status = 0; @@ -69700,7 +77995,7 @@ index 0716357725..08a7e02943 100644 hw->mac.autoneg = *autoneg; -@@ -1582,8 +1588,6 @@ s32 ngbe_setup_mac_link_em(struct ngbe_hw *hw, +@@ -1582,8 +1591,6 @@ s32 ngbe_setup_mac_link_em(struct ngbe_hw *hw, { s32 status; @@ -69709,7 +78004,7 @@ index 0716357725..08a7e02943 100644 /* Setup the PHY according to input speed */ status = hw->phy.setup_link(hw, speed, autoneg_wait_to_complete); -@@ -1609,6 +1613,30 @@ void ngbe_set_mac_anti_spoofing(struct ngbe_hw *hw, bool enable, int vf) +@@ -1609,6 +1616,30 @@ void ngbe_set_mac_anti_spoofing(struct ngbe_hw *hw, bool enable, int vf) wr32(hw, NGBE_POOLTXASMAC, pfvfspoof); } @@ -69740,7 +78035,7 @@ index 0716357725..08a7e02943 100644 /** * ngbe_set_vlan_anti_spoofing - Enable/Disable VLAN anti-spoofing * @hw: pointer to hardware structure -@@ -1639,8 +1667,6 @@ s32 ngbe_init_thermal_sensor_thresh(struct ngbe_hw *hw) +@@ -1639,8 +1670,6 @@ s32 ngbe_init_thermal_sensor_thresh(struct ngbe_hw *hw) { struct ngbe_thermal_sensor_data *data = &hw->mac.thermal_sensor_data; @@ -69749,7 +78044,7 @@ index 0716357725..08a7e02943 100644 memset(data, 0, sizeof(struct ngbe_thermal_sensor_data)); if (hw->bus.lan_id != 0) -@@ -1664,8 +1690,6 @@ s32 ngbe_mac_check_overtemp(struct ngbe_hw *hw) +@@ -1664,8 +1693,6 @@ s32 ngbe_mac_check_overtemp(struct ngbe_hw *hw) s32 status = 0; u32 ts_state; @@ -69758,7 +78053,7 @@ index 0716357725..08a7e02943 100644 /* Check that the LASI temp alarm status was triggered */ ts_state = rd32(hw, NGBE_TSALM); -@@ -1720,8 +1744,6 @@ s32 ngbe_set_mac_type(struct ngbe_hw *hw) +@@ -1720,8 +1747,6 @@ s32 ngbe_set_mac_type(struct ngbe_hw *hw) { s32 err = 0; @@ -69767,7 +78062,7 @@ index 0716357725..08a7e02943 100644 if (hw->vendor_id != PCI_VENDOR_ID_WANGXUN) { DEBUGOUT("Unsupported vendor id: %x", hw->vendor_id); return NGBE_ERR_DEVICE_NOT_SUPPORTED; -@@ -1750,7 +1772,7 @@ s32 ngbe_set_mac_type(struct ngbe_hw *hw) +@@ -1750,7 +1775,7 @@ s32 ngbe_set_mac_type(struct ngbe_hw *hw) break; } @@ -69776,7 +78071,7 @@ index 0716357725..08a7e02943 100644 hw->mac.type, hw->phy.media_type, err); return err; } -@@ -1764,15 +1786,12 @@ s32 ngbe_set_mac_type(struct ngbe_hw *hw) +@@ -1764,15 +1789,12 @@ s32 ngbe_set_mac_type(struct ngbe_hw *hw) **/ s32 ngbe_enable_rx_dma(struct ngbe_hw *hw, u32 regval) { @@ -69792,7 +78087,7 @@ index 0716357725..08a7e02943 100644 hw->mac.disable_sec_rx_path(hw); if (regval & NGBE_PBRXCTL_ENA) -@@ -1853,8 +1872,6 @@ s32 ngbe_init_ops_pf(struct ngbe_hw *hw) +@@ -1853,8 +1875,6 @@ s32 ngbe_init_ops_pf(struct ngbe_hw *hw) struct ngbe_rom_info *rom = &hw->rom; struct ngbe_mbx_info *mbx = &hw->mbx; @@ -69801,7 +78096,7 @@ index 0716357725..08a7e02943 100644 /* BUS */ bus->set_lan_id = ngbe_set_lan_id_multi_port; -@@ -1907,6 +1924,8 @@ s32 ngbe_init_ops_pf(struct ngbe_hw *hw) +@@ -1907,6 +1927,8 @@ s32 ngbe_init_ops_pf(struct ngbe_hw *hw) mac->check_link = ngbe_check_mac_link_em; mac->setup_link = ngbe_setup_mac_link_em; @@ -69810,7 +78105,7 @@ index 0716357725..08a7e02943 100644 /* Manageability interface */ mac->init_thermal_sensor_thresh = ngbe_init_thermal_sensor_thresh; mac->check_overtemp = ngbe_mac_check_overtemp; -@@ -1928,6 +1947,7 @@ s32 ngbe_init_ops_pf(struct ngbe_hw *hw) +@@ -1928,6 +1950,7 @@ s32 ngbe_init_ops_pf(struct ngbe_hw *hw) mac->mcft_size = NGBE_EM_MC_TBL_SIZE; mac->vft_size = NGBE_EM_VFT_TBL_SIZE; mac->num_rar_entries = NGBE_EM_RAR_ENTRIES; @@ -69818,7 +78113,7 @@ index 0716357725..08a7e02943 100644 mac->max_rx_queues = NGBE_EM_MAX_RX_QUEUES; mac->max_tx_queues = NGBE_EM_MAX_TX_QUEUES; -@@ -1953,8 +1973,6 @@ s32 ngbe_init_shared_code(struct ngbe_hw *hw) +@@ -1953,8 +1976,6 @@ s32 ngbe_init_shared_code(struct ngbe_hw *hw) { s32 status = 0; @@ -70669,7 +78964,7 @@ index 12847b7272..8f87398c17 100644 u32 max_rx_queues; bool get_link_status; diff --git a/dpdk/drivers/net/ngbe/ngbe_ethdev.c b/dpdk/drivers/net/ngbe/ngbe_ethdev.c -index 981592f7f4..3038694042 100644 +index 981592f7f4..7866f05571 100644 --- a/dpdk/drivers/net/ngbe/ngbe_ethdev.c +++ b/dpdk/drivers/net/ngbe/ngbe_ethdev.c @@ -89,8 +89,8 @@ static int ngbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev); @@ -70726,6 +79021,15 @@ index 981592f7f4..3038694042 100644 hw->device_id = pci_dev->id.device_id; hw->vendor_id = pci_dev->id.vendor_id; hw->sub_system_id = pci_dev->id.subsystem_device_id; +@@ -514,7 +526,7 @@ static int eth_ngbe_pci_remove(struct rte_pci_device *pci_dev) + if (ethdev == NULL) + return 0; + +- return rte_eth_dev_destroy(ethdev, eth_ngbe_dev_uninit); ++ return rte_eth_dev_pci_generic_remove(pci_dev, eth_ngbe_dev_uninit); + } + + static struct rte_pci_driver rte_ngbe_pmd = { @@ -943,12 +955,14 @@ ngbe_dev_start(struct rte_eth_dev *dev) PMD_INIT_FUNC_TRACE(); @@ -70857,7 +79161,18 @@ index 981592f7f4..3038694042 100644 hw_stats->rx_illegal_byte_errors + hw_stats->rx_error_bytes + hw_stats->rx_fragment_errors; -@@ -1800,6 +1830,24 @@ ngbe_dev_supported_ptypes_get(struct rte_eth_dev *dev) +@@ -1736,7 +1766,9 @@ ngbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) + dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues; + dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues; + dev_info->min_rx_bufsize = 1024; +- dev_info->max_rx_pktlen = 15872; ++ dev_info->max_rx_pktlen = NGBE_MAX_MTU + NGBE_ETH_OVERHEAD; ++ dev_info->min_mtu = RTE_ETHER_MIN_MTU; ++ dev_info->max_mtu = NGBE_MAX_MTU; + dev_info->max_mac_addrs = hw->mac.num_rar_entries; + dev_info->max_hash_mac_addrs = NGBE_VMDQ_NUM_UC_MAC; + dev_info->max_vfs = pci_dev->max_vfs; +@@ -1800,6 +1832,24 @@ ngbe_dev_supported_ptypes_get(struct rte_eth_dev *dev) return NULL; } @@ -70882,7 +79197,7 @@ index 981592f7f4..3038694042 100644 /* return 0 means link status changed, -1 means not changed */ int ngbe_dev_link_update_share(struct rte_eth_dev *dev, -@@ -1837,8 +1885,16 @@ ngbe_dev_link_update_share(struct rte_eth_dev *dev, +@@ -1837,8 +1887,16 @@ ngbe_dev_link_update_share(struct rte_eth_dev *dev, return rte_eth_linkstatus_set(dev, &link); } @@ -70900,7 +79215,7 @@ index 981592f7f4..3038694042 100644 intr->flags &= ~NGBE_FLAG_NEED_LINK_CONFIG; link.link_status = RTE_ETH_LINK_UP; -@@ -1874,6 +1930,8 @@ ngbe_dev_link_update_share(struct rte_eth_dev *dev, +@@ -1874,6 +1932,8 @@ ngbe_dev_link_update_share(struct rte_eth_dev *dev, wr32m(hw, NGBE_MACTXCFG, NGBE_MACTXCFG_SPEED_MASK, NGBE_MACTXCFG_SPEED_1G | NGBE_MACTXCFG_TE); } @@ -70909,7 +79224,7 @@ index 981592f7f4..3038694042 100644 } return rte_eth_linkstatus_set(dev, &link); -@@ -2061,9 +2119,6 @@ ngbe_dev_interrupt_get_status(struct rte_eth_dev *dev) +@@ -2061,9 +2121,6 @@ ngbe_dev_interrupt_get_status(struct rte_eth_dev *dev) struct ngbe_hw *hw = ngbe_dev_hw(dev); struct ngbe_interrupt *intr = ngbe_dev_intr(dev); @@ -70919,7 +79234,7 @@ index 981592f7f4..3038694042 100644 /* read-on-clear nic registers here */ eicr = ((u32 *)hw->isb_mem)[NGBE_ISB_MISC]; PMD_DRV_LOG(DEBUG, "eicr %x", eicr); -@@ -2083,6 +2138,8 @@ ngbe_dev_interrupt_get_status(struct rte_eth_dev *dev) +@@ -2083,6 +2140,8 @@ ngbe_dev_interrupt_get_status(struct rte_eth_dev *dev) if (eicr & NGBE_ICRMISC_GPIO) intr->flags |= NGBE_FLAG_NEED_LINK_UPDATE; @@ -70928,7 +79243,7 @@ index 981592f7f4..3038694042 100644 return 0; } -@@ -2135,7 +2192,6 @@ static int +@@ -2135,7 +2194,6 @@ static int ngbe_dev_interrupt_action(struct rte_eth_dev *dev) { struct ngbe_interrupt *intr = ngbe_dev_intr(dev); @@ -70936,7 +79251,7 @@ index 981592f7f4..3038694042 100644 PMD_DRV_LOG(DEBUG, "intr action type %d", intr->flags); -@@ -2151,31 +2207,11 @@ ngbe_dev_interrupt_action(struct rte_eth_dev *dev) +@@ -2151,31 +2209,11 @@ ngbe_dev_interrupt_action(struct rte_eth_dev *dev) rte_eth_linkstatus_get(dev, &link); ngbe_dev_link_update(dev, 0); @@ -70972,7 +79287,7 @@ index 981592f7f4..3038694042 100644 } PMD_DRV_LOG(DEBUG, "enable intr immediately"); -@@ -2184,53 +2220,6 @@ ngbe_dev_interrupt_action(struct rte_eth_dev *dev) +@@ -2184,53 +2222,6 @@ ngbe_dev_interrupt_action(struct rte_eth_dev *dev) return 0; } @@ -71026,7 +79341,7 @@ index 981592f7f4..3038694042 100644 /** * Interrupt handler triggered by NIC for handling * specific interrupt. -@@ -2362,6 +2351,93 @@ ngbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) +@@ -2362,6 +2353,93 @@ ngbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) return -EIO; } @@ -71120,7 +79435,7 @@ index 981592f7f4..3038694042 100644 int ngbe_dev_rss_reta_update(struct rte_eth_dev *dev, struct rte_eth_rss_reta_entry64 *reta_conf, -@@ -2481,7 +2557,7 @@ static int +@@ -2481,7 +2559,7 @@ static int ngbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) { struct ngbe_hw *hw = ngbe_dev_hw(dev); @@ -71129,7 +79444,7 @@ index 981592f7f4..3038694042 100644 struct rte_eth_dev_data *dev_data = dev->data; /* If device is started, refuse mtu that requires the support of -@@ -2494,12 +2570,8 @@ ngbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) +@@ -2494,12 +2572,8 @@ ngbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) return -EINVAL; } @@ -71144,7 +79459,7 @@ index 981592f7f4..3038694042 100644 return 0; } -@@ -2641,7 +2713,7 @@ ngbe_set_ivar_map(struct ngbe_hw *hw, int8_t direction, +@@ -2641,7 +2715,7 @@ ngbe_set_ivar_map(struct ngbe_hw *hw, int8_t direction, wr32(hw, NGBE_IVARMISC, tmp); } else { /* rx or tx causes */ @@ -71153,7 +79468,7 @@ index 981592f7f4..3038694042 100644 idx = ((16 * (queue & 1)) + (8 * direction)); tmp = rd32(hw, NGBE_IVAR(queue >> 1)); tmp &= ~(0xFF << idx); -@@ -2893,7 +2965,7 @@ ngbe_timesync_disable(struct rte_eth_dev *dev) +@@ -2893,7 +2967,7 @@ ngbe_timesync_disable(struct rte_eth_dev *dev) /* Disable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */ wr32(hw, NGBE_ETFLT(NGBE_ETF_ID_1588), 0); @@ -71163,10 +79478,18 @@ index 981592f7f4..3038694042 100644 return 0; diff --git a/dpdk/drivers/net/ngbe/ngbe_ethdev.h b/dpdk/drivers/net/ngbe/ngbe_ethdev.h -index bb96f6a5e7..8d500fd38c 100644 +index bb96f6a5e7..bc26f4eebb 100644 --- a/dpdk/drivers/net/ngbe/ngbe_ethdev.h +++ b/dpdk/drivers/net/ngbe/ngbe_ethdev.h -@@ -341,6 +341,7 @@ void ngbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, +@@ -31,6 +31,7 @@ + + #define NGBE_QUEUE_ITR_INTERVAL_DEFAULT 500 /* 500us */ + ++#define NGBE_MAX_MTU 9414 + /* The overhead from MTU to max frame size. */ + #define NGBE_ETH_OVERHEAD (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN) + +@@ -341,6 +342,7 @@ void ngbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on); void ngbe_config_vlan_strip_on_all_queues(struct rte_eth_dev *dev, int mask); @@ -71205,7 +79528,7 @@ index 7f9c04fb0e..12a18de31d 100644 /* clear VMDq map to scan rar 31 */ diff --git a/dpdk/drivers/net/ngbe/ngbe_rxtx.c b/dpdk/drivers/net/ngbe/ngbe_rxtx.c -index 86a5ef5486..5df4186c72 100644 +index 86a5ef5486..fcff13fb22 100644 --- a/dpdk/drivers/net/ngbe/ngbe_rxtx.c +++ b/dpdk/drivers/net/ngbe/ngbe_rxtx.c @@ -24,15 +24,11 @@ @@ -71369,7 +79692,15 @@ index 86a5ef5486..5df4186c72 100644 } /* -@@ -1939,12 +1897,8 @@ ngbe_get_tx_port_offloads(struct rte_eth_dev *dev) +@@ -1816,6 +1774,7 @@ ngbe_tx_queue_release(struct ngbe_tx_queue *txq) + if (txq->ops != NULL) { + txq->ops->release_mbufs(txq); + txq->ops->free_swring(txq); ++ rte_memzone_free(txq->mz); + } + rte_free(txq); + } +@@ -1939,12 +1898,8 @@ ngbe_get_tx_port_offloads(struct rte_eth_dev *dev) RTE_ETH_TX_OFFLOAD_UDP_CKSUM | RTE_ETH_TX_OFFLOAD_TCP_CKSUM | RTE_ETH_TX_OFFLOAD_SCTP_CKSUM | @@ -71382,7 +79713,31 @@ index 86a5ef5486..5df4186c72 100644 RTE_ETH_TX_OFFLOAD_MULTI_SEGS; if (hw->is_pf) -@@ -2237,6 +2191,7 @@ ngbe_get_rx_port_offloads(struct rte_eth_dev *dev) +@@ -2024,6 +1979,7 @@ ngbe_dev_tx_queue_setup(struct rte_eth_dev *dev, + return -ENOMEM; + } + ++ txq->mz = tz; + txq->nb_tx_desc = nb_desc; + txq->tx_free_thresh = tx_free_thresh; + txq->pthresh = tx_conf->tx_thresh.pthresh; +@@ -2126,6 +2082,7 @@ ngbe_rx_queue_release(struct ngbe_rx_queue *rxq) + ngbe_rx_queue_release_mbufs(rxq); + rte_free(rxq->sw_ring); + rte_free(rxq->sw_sc_ring); ++ rte_memzone_free(rxq->mz); + rte_free(rxq); + } + } +@@ -2216,6 +2173,7 @@ ngbe_reset_rx_queue(struct ngbe_adapter *adapter, struct ngbe_rx_queue *rxq) + rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1); + rxq->rx_tail = 0; + rxq->nb_rx_hold = 0; ++ rte_pktmbuf_free(rxq->pkt_first_seg); + rxq->pkt_first_seg = NULL; + rxq->pkt_last_seg = NULL; + } +@@ -2237,6 +2195,7 @@ ngbe_get_rx_port_offloads(struct rte_eth_dev *dev) RTE_ETH_RX_OFFLOAD_TCP_CKSUM | RTE_ETH_RX_OFFLOAD_KEEP_CRC | RTE_ETH_RX_OFFLOAD_VLAN_FILTER | @@ -71390,7 +79745,15 @@ index 86a5ef5486..5df4186c72 100644 RTE_ETH_RX_OFFLOAD_SCATTER; if (hw->is_pf) -@@ -2460,6 +2415,7 @@ ngbe_dev_clear_queues(struct rte_eth_dev *dev) +@@ -2305,6 +2264,7 @@ ngbe_dev_rx_queue_setup(struct rte_eth_dev *dev, + return -ENOMEM; + } + ++ rxq->mz = rz; + /* + * Zero init all the descriptors in the ring. + */ +@@ -2460,6 +2420,7 @@ ngbe_dev_clear_queues(struct rte_eth_dev *dev) if (txq != NULL) { txq->ops->release_mbufs(txq); txq->ops->reset(txq); @@ -71398,7 +79761,7 @@ index 86a5ef5486..5df4186c72 100644 } } -@@ -2469,6 +2425,7 @@ ngbe_dev_clear_queues(struct rte_eth_dev *dev) +@@ -2469,6 +2430,7 @@ ngbe_dev_clear_queues(struct rte_eth_dev *dev) if (rxq != NULL) { ngbe_rx_queue_release_mbufs(rxq); ngbe_reset_rx_queue(adapter, rxq); @@ -71406,6 +79769,26 @@ index 86a5ef5486..5df4186c72 100644 } } } +diff --git a/dpdk/drivers/net/ngbe/ngbe_rxtx.h b/dpdk/drivers/net/ngbe/ngbe_rxtx.h +index 9130f9d0df..2914b9a756 100644 +--- a/dpdk/drivers/net/ngbe/ngbe_rxtx.h ++++ b/dpdk/drivers/net/ngbe/ngbe_rxtx.h +@@ -276,6 +276,7 @@ struct ngbe_rx_queue { + struct rte_mbuf fake_mbuf; + /** hold packets to return to application */ + struct rte_mbuf *rx_stage[RTE_PMD_NGBE_RX_MAX_BURST * 2]; ++ const struct rte_memzone *mz; + }; + + /** +@@ -353,6 +354,7 @@ struct ngbe_tx_queue { + uint8_t tx_deferred_start; /**< not in global dev start */ + + const struct ngbe_txq_ops *ops; /**< txq ops */ ++ const struct rte_memzone *mz; + }; + + struct ngbe_txq_ops { diff --git a/dpdk/drivers/net/null/rte_eth_null.c b/dpdk/drivers/net/null/rte_eth_null.c index ca03469d0e..bdaca02d86 100644 --- a/dpdk/drivers/net/null/rte_eth_null.c @@ -71630,7 +80013,7 @@ index c9b91fef9e..96366b2a7f 100644 do { diff --git a/dpdk/drivers/net/pfe/pfe_ethdev.c b/dpdk/drivers/net/pfe/pfe_ethdev.c -index 047010e15e..c5158bbf31 100644 +index 047010e15e..fe48ccea8e 100644 --- a/dpdk/drivers/net/pfe/pfe_ethdev.c +++ b/dpdk/drivers/net/pfe/pfe_ethdev.c @@ -257,6 +257,7 @@ pfe_eth_open(struct rte_eth_dev *dev) @@ -71672,7 +80055,17 @@ index 047010e15e..c5158bbf31 100644 return 0; } -@@ -769,7 +780,7 @@ pfe_eth_init(struct rte_vdev_device *vdev, struct pfe *pfe, int id) +@@ -525,7 +536,8 @@ pfe_supported_ptypes_get(struct rte_eth_dev *dev) + RTE_PTYPE_L3_IPV6_EXT, + RTE_PTYPE_L4_TCP, + RTE_PTYPE_L4_UDP, +- RTE_PTYPE_L4_SCTP ++ RTE_PTYPE_L4_SCTP, ++ RTE_PTYPE_UNKNOWN + }; + + if (dev->rx_pkt_burst == pfe_recv_pkts || +@@ -769,7 +781,7 @@ pfe_eth_init(struct rte_vdev_device *vdev, struct pfe *pfe, int id) if (eth_dev == NULL) return -ENOMEM; @@ -72979,8 +81372,20 @@ index ca70eab678..ad96288e7e 100644 * buffer. * Question: are the two masks equivalent? * +diff --git a/dpdk/drivers/net/softnic/rte_eth_softnic_meter.c b/dpdk/drivers/net/softnic/rte_eth_softnic_meter.c +index 6b02f43e31..86e6144887 100644 +--- a/dpdk/drivers/net/softnic/rte_eth_softnic_meter.c ++++ b/dpdk/drivers/net/softnic/rte_eth_softnic_meter.c +@@ -906,6 +906,7 @@ pmd_mtr_stats_read(struct rte_eth_dev *dev, + struct rte_mtr_stats s; + uint64_t s_mask = 0; + ++ memset(&s, 0, sizeof(s)); + mtr_stats_convert(p, + m, + &counters.stats[0], diff --git a/dpdk/drivers/net/tap/rte_eth_tap.c b/dpdk/drivers/net/tap/rte_eth_tap.c -index f1b48cae82..29b4860656 100644 +index f1b48cae82..3a11f36e6f 100644 --- a/dpdk/drivers/net/tap/rte_eth_tap.c +++ b/dpdk/drivers/net/tap/rte_eth_tap.c @@ -67,6 +67,7 @@ @@ -73150,7 +81555,15 @@ index f1b48cae82..29b4860656 100644 if (internals->ioctl_sock != -1) { close(internals->ioctl_sock); internals->ioctl_sock = -1; -@@ -2099,8 +2180,8 @@ eth_dev_tap_create(struct rte_vdev_device *vdev, const char *tap_name, +@@ -1756,6 +1837,7 @@ tap_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused) + RTE_PTYPE_L4_UDP, + RTE_PTYPE_L4_TCP, + RTE_PTYPE_L4_SCTP, ++ RTE_PTYPE_UNKNOWN + }; + + return ptypes; +@@ -2099,8 +2181,8 @@ eth_dev_tap_create(struct rte_vdev_device *vdev, const char *tap_name, close(pmd->ioctl_sock); /* mac_addrs must not be freed alone because part of dev_private */ dev->data->mac_addrs = NULL; @@ -73160,7 +81573,7 @@ index f1b48cae82..29b4860656 100644 error_exit_nodev: TAP_LOG(ERR, "%s Unable to initialize %s", -@@ -2167,29 +2248,6 @@ set_remote_iface(const char *key __rte_unused, +@@ -2167,29 +2249,6 @@ set_remote_iface(const char *key __rte_unused, return 0; } @@ -73190,7 +81603,7 @@ index f1b48cae82..29b4860656 100644 static int set_mac_type(const char *key __rte_unused, const char *value, -@@ -2203,15 +2261,15 @@ set_mac_type(const char *key __rte_unused, +@@ -2203,15 +2262,15 @@ set_mac_type(const char *key __rte_unused, if (!strncasecmp(ETH_TAP_MAC_FIXED, value, strlen(ETH_TAP_MAC_FIXED))) { static int iface_idx; @@ -73209,7 +81622,7 @@ index f1b48cae82..29b4860656 100644 goto error; success: TAP_LOG(DEBUG, "TAP user MAC param (%s)", value); -@@ -2445,6 +2503,16 @@ rte_pmd_tap_probe(struct rte_vdev_device *dev) +@@ -2445,6 +2504,16 @@ rte_pmd_tap_probe(struct rte_vdev_device *dev) ret = tap_mp_attach_queues(name, eth_dev); if (ret != 0) return -1; @@ -76510,10 +84923,18 @@ index 20c310e5e7..d9bb65831a 100644 __u8 input_len = sizeof(v6_tuple) / sizeof(__u32); if (rsskey->hash_fields & (1 << HASH_FIELD_IPV6_L3)) diff --git a/dpdk/drivers/net/tap/tap_flow.c b/dpdk/drivers/net/tap/tap_flow.c -index c4f60ce98e..f53bc297f8 100644 +index c4f60ce98e..44ef948ae3 100644 --- a/dpdk/drivers/net/tap/tap_flow.c +++ b/dpdk/drivers/net/tap/tap_flow.c -@@ -961,7 +961,7 @@ add_action(struct rte_flow *flow, size_t *act_index, struct action_data *adata) +@@ -11,6 +11,7 @@ + + #include + #include ++#include + #include + #include + #include +@@ -961,7 +962,7 @@ add_action(struct rte_flow *flow, size_t *act_index, struct action_data *adata) } /** @@ -76522,7 +84943,110 @@ index c4f60ce98e..f53bc297f8 100644 * * @param[in] flow * Pointer to rte flow containing the netlink message -@@ -1684,7 +1684,7 @@ int tap_flow_implicit_create(struct pmd_internals *pmd, +@@ -1082,8 +1083,11 @@ priv_flow_process(struct pmd_internals *pmd, + } + /* use flower filter type */ + tap_nlattr_add(&flow->msg.nh, TCA_KIND, sizeof("flower"), "flower"); +- if (tap_nlattr_nested_start(&flow->msg, TCA_OPTIONS) < 0) +- goto exit_item_not_supported; ++ if (tap_nlattr_nested_start(&flow->msg, TCA_OPTIONS) < 0) { ++ rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_ACTION, ++ actions, "could not allocated netlink msg"); ++ goto exit_return_error; ++ } + } + for (; items->type != RTE_FLOW_ITEM_TYPE_END; ++items) { + const struct tap_flow_items *token = NULL; +@@ -1199,9 +1203,12 @@ priv_flow_process(struct pmd_internals *pmd, + if (action) + goto exit_action_not_supported; + action = 1; +- if (!queue || +- (queue->index > pmd->dev->data->nb_rx_queues - 1)) +- goto exit_action_not_supported; ++ if (queue->index >= pmd->dev->data->nb_rx_queues) { ++ rte_flow_error_set(error, ERANGE, ++ RTE_FLOW_ERROR_TYPE_ACTION, actions, ++ "queue index out of range"); ++ goto exit_return_error; ++ } + if (flow) { + struct action_data adata = { + .id = "skbedit", +@@ -1227,7 +1234,7 @@ priv_flow_process(struct pmd_internals *pmd, + if (!pmd->rss_enabled) { + err = rss_enable(pmd, attr, error); + if (err) +- goto exit_action_not_supported; ++ goto exit_return_error; + } + if (flow) + err = rss_add_actions(flow, pmd, rss, error); +@@ -1235,7 +1242,7 @@ priv_flow_process(struct pmd_internals *pmd, + goto exit_action_not_supported; + } + if (err) +- goto exit_action_not_supported; ++ goto exit_return_error; + } + /* When fate is unknown, drop traffic. */ + if (!action) { +@@ -1258,6 +1265,7 @@ priv_flow_process(struct pmd_internals *pmd, + exit_action_not_supported: + rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, + actions, "action not supported"); ++exit_return_error: + return -rte_errno; + } + +@@ -1290,9 +1298,7 @@ tap_flow_validate(struct rte_eth_dev *dev, + * In those rules, the handle (uint32_t) is the part that would identify + * specifically each rule. + * +- * On 32-bit architectures, the handle can simply be the flow's pointer address. +- * On 64-bit architectures, we rely on jhash(flow) to find a (sufficiently) +- * unique handle. ++ * Use jhash of the flow pointer to make a unique handle. + * + * @param[in, out] flow + * The flow that needs its handle set. +@@ -1302,16 +1308,18 @@ tap_flow_set_handle(struct rte_flow *flow) + { + union { + struct rte_flow *flow; +- const void *key; +- } tmp; +- uint32_t handle = 0; ++ uint32_t words[sizeof(flow) / sizeof(uint32_t)]; ++ } tmp = { ++ .flow = flow, ++ }; ++ uint32_t handle; ++ static uint64_t hash_seed; + +- tmp.flow = flow; ++ if (hash_seed == 0) ++ hash_seed = rte_rand(); ++ ++ handle = rte_jhash_32b(tmp.words, sizeof(flow) / sizeof(uint32_t), hash_seed); + +- if (sizeof(flow) > 4) +- handle = rte_jhash(tmp.key, sizeof(flow), 1); +- else +- handle = (uintptr_t)flow; + /* must be at least 1 to avoid letting the kernel choose one for us */ + if (!handle) + handle = 1; +@@ -1589,7 +1597,7 @@ tap_flow_isolate(struct rte_eth_dev *dev, + * If netdevice is there, setup appropriate flow rules immediately. + * Otherwise it will be set when bringing up the netdevice (tun_alloc). + */ +- if (!process_private->rxq_fds[0]) ++ if (process_private->rxq_fds[0] == -1) + return 0; + if (set) { + struct rte_flow *remote_flow; +@@ -1684,7 +1692,7 @@ int tap_flow_implicit_create(struct pmd_internals *pmd, struct rte_flow_item *items = implicit_rte_flows[idx].items; struct rte_flow_attr *attr = &implicit_rte_flows[idx].attr; struct rte_flow_item_eth eth_local = { .type = 0 }; @@ -76531,7 +85055,7 @@ index c4f60ce98e..f53bc297f8 100644 struct rte_flow *remote_flow = NULL; struct nlmsg *msg = NULL; int err = 0; -@@ -2017,7 +2017,7 @@ static int bpf_rss_key(enum bpf_rss_key_e cmd, __u32 *key_idx) +@@ -2017,7 +2025,7 @@ static int bpf_rss_key(enum bpf_rss_key_e cmd, __u32 *key_idx) break; /* @@ -76665,6 +85189,25 @@ index 8cedea8462..a64cb29d6f 100644 +int filter_list_ingress(int nlsk_fd, unsigned int ifindex); #endif /* _TAP_TCMSGS_H_ */ +diff --git a/dpdk/drivers/net/thunderx/nicvf_ethdev.c b/dpdk/drivers/net/thunderx/nicvf_ethdev.c +index fc334cf734..a177bca3ff 100644 +--- a/dpdk/drivers/net/thunderx/nicvf_ethdev.c ++++ b/dpdk/drivers/net/thunderx/nicvf_ethdev.c +@@ -310,12 +310,14 @@ nicvf_dev_supported_ptypes_get(struct rte_eth_dev *dev) + RTE_PTYPE_L4_TCP, + RTE_PTYPE_L4_UDP, + RTE_PTYPE_L4_FRAG, ++ RTE_PTYPE_UNKNOWN + }; + static const uint32_t ptypes_tunnel[] = { + RTE_PTYPE_TUNNEL_GRE, + RTE_PTYPE_TUNNEL_GENEVE, + RTE_PTYPE_TUNNEL_VXLAN, + RTE_PTYPE_TUNNEL_NVGRE, ++ RTE_PTYPE_UNKNOWN + }; + static const uint32_t ptypes_end = RTE_PTYPE_UNKNOWN; + diff --git a/dpdk/drivers/net/thunderx/nicvf_svf.c b/dpdk/drivers/net/thunderx/nicvf_svf.c index bccf290599..1bcf73d9fc 100644 --- a/dpdk/drivers/net/thunderx/nicvf_svf.c @@ -76679,7 +85222,7 @@ index bccf290599..1bcf73d9fc 100644 entry->vf = vf; diff --git a/dpdk/drivers/net/txgbe/base/meson.build b/dpdk/drivers/net/txgbe/base/meson.build -index 7a30191472..a81d6890fe 100644 +index 7a30191472..4cf90a394a 100644 --- a/dpdk/drivers/net/txgbe/base/meson.build +++ b/dpdk/drivers/net/txgbe/base/meson.build @@ -22,6 +22,6 @@ foreach flag: error_cflags @@ -76687,7 +85230,7 @@ index 7a30191472..a81d6890fe 100644 base_lib = static_library('txgbe_base', sources, - dependencies: static_rte_eal, -+ dependencies: [static_rte_eal, static_rte_net], ++ dependencies: [static_rte_eal, static_rte_net, static_rte_bus_pci], c_args: c_args) base_objs = base_lib.extract_all_objects(recursive: true) diff --git a/dpdk/drivers/net/txgbe/base/txgbe_eeprom.c b/dpdk/drivers/net/txgbe/base/txgbe_eeprom.c @@ -76872,7 +85415,7 @@ index 72901cd0b0..aeeae06dfc 100644 } diff --git a/dpdk/drivers/net/txgbe/base/txgbe_hw.c b/dpdk/drivers/net/txgbe/base/txgbe_hw.c -index 00a8db78bf..1083431055 100644 +index 00a8db78bf..2a42f18381 100644 --- a/dpdk/drivers/net/txgbe/base/txgbe_hw.c +++ b/dpdk/drivers/net/txgbe/base/txgbe_hw.c @@ -42,8 +42,6 @@ bool txgbe_device_supports_autoneg_fc(struct txgbe_hw *hw) @@ -76978,8 +85521,12 @@ index 00a8db78bf..1083431055 100644 reg = rd32(hw, TXGBE_PORTSTAT); bus->lan_id = TXGBE_PORTSTAT_ID(reg); -@@ -479,8 +465,6 @@ s32 txgbe_stop_hw(struct txgbe_hw *hw) - u32 reg_val; +@@ -476,11 +462,9 @@ void txgbe_set_lan_id_multi_port(struct txgbe_hw *hw) + **/ + s32 txgbe_stop_hw(struct txgbe_hw *hw) + { +- u32 reg_val; ++ s32 status = 0; u16 i; - DEBUGFUNC("txgbe_stop_hw"); @@ -76987,7 +85534,42 @@ index 00a8db78bf..1083431055 100644 /* * Set the adapter_stopped flag so other driver functions stop touching * the hardware -@@ -527,8 +511,6 @@ s32 txgbe_led_on(struct txgbe_hw *hw, u32 index) +@@ -500,16 +484,26 @@ s32 txgbe_stop_hw(struct txgbe_hw *hw) + wr32(hw, TXGBE_ICR(0), TXGBE_ICR_MASK); + wr32(hw, TXGBE_ICR(1), TXGBE_ICR_MASK); + +- /* Disable the transmit unit. Each queue must be disabled. */ +- for (i = 0; i < hw->mac.max_tx_queues; i++) +- wr32(hw, TXGBE_TXCFG(i), TXGBE_TXCFG_FLUSH); ++ wr32(hw, TXGBE_BMECTL, 0x3); + + /* Disable the receive unit by stopping each queue */ +- for (i = 0; i < hw->mac.max_rx_queues; i++) { +- reg_val = rd32(hw, TXGBE_RXCFG(i)); +- reg_val &= ~TXGBE_RXCFG_ENA; +- wr32(hw, TXGBE_RXCFG(i), reg_val); +- } ++ for (i = 0; i < hw->mac.max_rx_queues; i++) ++ wr32(hw, TXGBE_RXCFG(i), 0); ++ ++ /* flush all queues disables */ ++ txgbe_flush(hw); ++ msec_delay(2); ++ ++ /* Prevent the PCI-E bus from hanging by disabling PCI-E master ++ * access and verify no pending requests ++ */ ++ status = txgbe_set_pcie_master(hw, false); ++ if (status) ++ return status; ++ ++ /* Disable the transmit unit. Each queue must be disabled. */ ++ for (i = 0; i < hw->mac.max_tx_queues; i++) ++ wr32(hw, TXGBE_TXCFG(i), 0); + + /* flush all queues disables */ + txgbe_flush(hw); +@@ -527,8 +521,6 @@ s32 txgbe_led_on(struct txgbe_hw *hw, u32 index) { u32 led_reg = rd32(hw, TXGBE_LEDCTL); @@ -76996,7 +85578,7 @@ index 00a8db78bf..1083431055 100644 if (index > 4) return TXGBE_ERR_PARAM; -@@ -550,8 +532,6 @@ s32 txgbe_led_off(struct txgbe_hw *hw, u32 index) +@@ -550,8 +542,6 @@ s32 txgbe_led_off(struct txgbe_hw *hw, u32 index) { u32 led_reg = rd32(hw, TXGBE_LEDCTL); @@ -77005,7 +85587,7 @@ index 00a8db78bf..1083431055 100644 if (index > 4) return TXGBE_ERR_PARAM; -@@ -574,8 +554,6 @@ s32 txgbe_validate_mac_addr(u8 *mac_addr) +@@ -574,8 +564,6 @@ s32 txgbe_validate_mac_addr(u8 *mac_addr) { s32 status = 0; @@ -77014,7 +85596,7 @@ index 00a8db78bf..1083431055 100644 /* Make sure it is not a multicast address */ if (TXGBE_IS_MULTICAST(mac_addr)) { status = TXGBE_ERR_INVALID_MAC_ADDR; -@@ -606,11 +584,9 @@ s32 txgbe_set_rar(struct txgbe_hw *hw, u32 index, u8 *addr, u32 vmdq, +@@ -606,11 +594,9 @@ s32 txgbe_set_rar(struct txgbe_hw *hw, u32 index, u8 *addr, u32 vmdq, u32 rar_low, rar_high; u32 rar_entries = hw->mac.num_rar_entries; @@ -77027,7 +85609,7 @@ index 00a8db78bf..1083431055 100644 return TXGBE_ERR_INVALID_ARGUMENT; } -@@ -658,11 +634,9 @@ s32 txgbe_clear_rar(struct txgbe_hw *hw, u32 index) +@@ -658,11 +644,9 @@ s32 txgbe_clear_rar(struct txgbe_hw *hw, u32 index) u32 rar_high; u32 rar_entries = hw->mac.num_rar_entries; @@ -77040,7 +85622,7 @@ index 00a8db78bf..1083431055 100644 return TXGBE_ERR_INVALID_ARGUMENT; } -@@ -698,8 +672,6 @@ s32 txgbe_init_rx_addrs(struct txgbe_hw *hw) +@@ -698,8 +682,6 @@ s32 txgbe_init_rx_addrs(struct txgbe_hw *hw) u32 psrctl; u32 rar_entries = hw->mac.num_rar_entries; @@ -77049,7 +85631,7 @@ index 00a8db78bf..1083431055 100644 /* * If the current mac address is valid, assume it is a software override * to the permanent address. -@@ -710,18 +682,18 @@ s32 txgbe_init_rx_addrs(struct txgbe_hw *hw) +@@ -710,18 +692,18 @@ s32 txgbe_init_rx_addrs(struct txgbe_hw *hw) /* Get the MAC address from the RAR0 for later reference */ hw->mac.get_mac_addr(hw, hw->mac.addr); @@ -77075,7 +85657,7 @@ index 00a8db78bf..1083431055 100644 hw->mac.addr[4], hw->mac.addr[5]); hw->mac.set_rar(hw, 0, hw->mac.addr, 0, true); -@@ -735,7 +707,7 @@ s32 txgbe_init_rx_addrs(struct txgbe_hw *hw) +@@ -735,7 +717,7 @@ s32 txgbe_init_rx_addrs(struct txgbe_hw *hw) hw->addr_ctrl.rar_used_count = 1; /* Zero out the other receive addresses. */ @@ -77084,7 +85666,7 @@ index 00a8db78bf..1083431055 100644 for (i = 1; i < rar_entries; i++) { wr32(hw, TXGBE_ETHADDRIDX, i); wr32(hw, TXGBE_ETHADDRL, 0); -@@ -749,7 +721,7 @@ s32 txgbe_init_rx_addrs(struct txgbe_hw *hw) +@@ -749,7 +731,7 @@ s32 txgbe_init_rx_addrs(struct txgbe_hw *hw) psrctl |= TXGBE_PSRCTL_ADHF12(hw->mac.mc_filter_type); wr32(hw, TXGBE_PSRCTL, psrctl); @@ -77093,7 +85675,7 @@ index 00a8db78bf..1083431055 100644 for (i = 0; i < hw->mac.mcft_size; i++) wr32(hw, TXGBE_MCADDRTBL(i), 0); -@@ -774,8 +746,6 @@ static s32 txgbe_mta_vector(struct txgbe_hw *hw, u8 *mc_addr) +@@ -774,8 +756,6 @@ static s32 txgbe_mta_vector(struct txgbe_hw *hw, u8 *mc_addr) { u32 vector = 0; @@ -77102,7 +85684,7 @@ index 00a8db78bf..1083431055 100644 switch (hw->mac.mc_filter_type) { case 0: /* use bits [47:36] of the address */ vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4)); -@@ -790,7 +760,7 @@ static s32 txgbe_mta_vector(struct txgbe_hw *hw, u8 *mc_addr) +@@ -790,7 +770,7 @@ static s32 txgbe_mta_vector(struct txgbe_hw *hw, u8 *mc_addr) vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8)); break; default: /* Invalid mc_filter_type */ @@ -77111,7 +85693,7 @@ index 00a8db78bf..1083431055 100644 ASSERT(0); break; } -@@ -813,12 +783,10 @@ void txgbe_set_mta(struct txgbe_hw *hw, u8 *mc_addr) +@@ -813,12 +793,10 @@ void txgbe_set_mta(struct txgbe_hw *hw, u8 *mc_addr) u32 vector_bit; u32 vector_reg; @@ -77125,7 +85707,7 @@ index 00a8db78bf..1083431055 100644 /* * The MTA is a register array of 128 32-bit registers. It is treated -@@ -852,8 +820,6 @@ s32 txgbe_update_mc_addr_list(struct txgbe_hw *hw, u8 *mc_addr_list, +@@ -852,8 +830,6 @@ s32 txgbe_update_mc_addr_list(struct txgbe_hw *hw, u8 *mc_addr_list, u32 i; u32 vmdq; @@ -77134,7 +85716,7 @@ index 00a8db78bf..1083431055 100644 /* * Set the new number of MC addresses that we are being requested to * use. -@@ -863,13 +829,13 @@ s32 txgbe_update_mc_addr_list(struct txgbe_hw *hw, u8 *mc_addr_list, +@@ -863,13 +839,13 @@ s32 txgbe_update_mc_addr_list(struct txgbe_hw *hw, u8 *mc_addr_list, /* Clear mta_shadow */ if (clear) { @@ -77150,7 +85732,7 @@ index 00a8db78bf..1083431055 100644 txgbe_set_mta(hw, next(hw, &mc_addr_list, &vmdq)); } -@@ -886,7 +852,7 @@ s32 txgbe_update_mc_addr_list(struct txgbe_hw *hw, u8 *mc_addr_list, +@@ -886,7 +862,7 @@ s32 txgbe_update_mc_addr_list(struct txgbe_hw *hw, u8 *mc_addr_list, wr32(hw, TXGBE_PSRCTL, psrctl); } @@ -77159,7 +85741,7 @@ index 00a8db78bf..1083431055 100644 return 0; } -@@ -904,8 +870,6 @@ s32 txgbe_fc_enable(struct txgbe_hw *hw) +@@ -904,8 +880,6 @@ s32 txgbe_fc_enable(struct txgbe_hw *hw) u32 fcrtl, fcrth; int i; @@ -77168,7 +85750,7 @@ index 00a8db78bf..1083431055 100644 /* Validate the water mark configuration */ if (!hw->fc.pause_time) { err = TXGBE_ERR_INVALID_LINK_SETTINGS; -@@ -918,7 +882,7 @@ s32 txgbe_fc_enable(struct txgbe_hw *hw) +@@ -918,7 +892,7 @@ s32 txgbe_fc_enable(struct txgbe_hw *hw) hw->fc.high_water[i]) { if (!hw->fc.low_water[i] || hw->fc.low_water[i] >= hw->fc.high_water[i]) { @@ -77177,7 +85759,7 @@ index 00a8db78bf..1083431055 100644 err = TXGBE_ERR_INVALID_LINK_SETTINGS; goto out; } -@@ -976,7 +940,7 @@ s32 txgbe_fc_enable(struct txgbe_hw *hw) +@@ -976,7 +950,7 @@ s32 txgbe_fc_enable(struct txgbe_hw *hw) fccfg_reg |= TXGBE_TXFCCFG_FC; break; default: @@ -77186,7 +85768,7 @@ index 00a8db78bf..1083431055 100644 err = TXGBE_ERR_CONFIG; goto out; } -@@ -1037,8 +1001,7 @@ s32 txgbe_negotiate_fc(struct txgbe_hw *hw, u32 adv_reg, u32 lp_reg, +@@ -1037,8 +1011,7 @@ s32 txgbe_negotiate_fc(struct txgbe_hw *hw, u32 adv_reg, u32 lp_reg, u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm) { if ((!(adv_reg)) || (!(lp_reg))) { @@ -77196,7 +85778,7 @@ index 00a8db78bf..1083431055 100644 adv_reg, lp_reg); return TXGBE_ERR_FC_NOT_NEGOTIATED; } -@@ -1053,22 +1016,22 @@ s32 txgbe_negotiate_fc(struct txgbe_hw *hw, u32 adv_reg, u32 lp_reg, +@@ -1053,22 +1026,22 @@ s32 txgbe_negotiate_fc(struct txgbe_hw *hw, u32 adv_reg, u32 lp_reg, */ if (hw->fc.requested_mode == txgbe_fc_full) { hw->fc.current_mode = txgbe_fc_full; @@ -77224,7 +85806,7 @@ index 00a8db78bf..1083431055 100644 } return 0; } -@@ -1168,8 +1131,6 @@ void txgbe_fc_autoneg(struct txgbe_hw *hw) +@@ -1168,8 +1141,6 @@ void txgbe_fc_autoneg(struct txgbe_hw *hw) u32 speed; bool link_up; @@ -77233,7 +85815,46 @@ index 00a8db78bf..1083431055 100644 /* * AN should have completed when the cable was plugged in. * Look for reasons to bail out. Bail out if: -@@ -1235,8 +1196,6 @@ s32 txgbe_acquire_swfw_sync(struct txgbe_hw *hw, u32 mask) +@@ -1219,6 +1190,38 @@ void txgbe_fc_autoneg(struct txgbe_hw *hw) + } + } + ++s32 txgbe_set_pcie_master(struct txgbe_hw *hw, bool enable) ++{ ++ struct rte_pci_device *pci_dev = (struct rte_pci_device *)hw->back; ++ s32 status = 0; ++ u32 i; ++ ++ if (rte_pci_set_bus_master(pci_dev, enable) < 0) { ++ DEBUGOUT("Cannot configure PCI bus master."); ++ return -1; ++ } ++ ++ if (enable) ++ goto out; ++ ++ /* Exit if master requests are blocked */ ++ if (!(rd32(hw, TXGBE_BMEPEND))) ++ goto out; ++ ++ /* Poll for master request bit to clear */ ++ for (i = 0; i < TXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) { ++ usec_delay(100); ++ if (!(rd32(hw, TXGBE_BMEPEND))) ++ goto out; ++ } ++ ++ DEBUGOUT("PCIe transaction pending bit also did not clear."); ++ status = TXGBE_ERR_MASTER_REQUESTS_PENDING; ++ ++out: ++ return status; ++} ++ + /** + * txgbe_acquire_swfw_sync - Acquire SWFW semaphore + * @hw: pointer to hardware structure +@@ -1235,8 +1238,6 @@ s32 txgbe_acquire_swfw_sync(struct txgbe_hw *hw, u32 mask) u32 timeout = 200; u32 i; @@ -77242,7 +85863,7 @@ index 00a8db78bf..1083431055 100644 for (i = 0; i < timeout; i++) { /* * SW NVM semaphore bit is used for access to all -@@ -1279,8 +1238,6 @@ void txgbe_release_swfw_sync(struct txgbe_hw *hw, u32 mask) +@@ -1279,8 +1280,6 @@ void txgbe_release_swfw_sync(struct txgbe_hw *hw, u32 mask) u32 mngsem; u32 swmask = mask; @@ -77251,7 +85872,7 @@ index 00a8db78bf..1083431055 100644 txgbe_get_eeprom_semaphore(hw); mngsem = rd32(hw, TXGBE_MNGSEM); -@@ -1304,8 +1261,6 @@ s32 txgbe_disable_sec_rx_path(struct txgbe_hw *hw) +@@ -1304,8 +1303,6 @@ s32 txgbe_disable_sec_rx_path(struct txgbe_hw *hw) int i; u32 secrxreg; @@ -77260,7 +85881,7 @@ index 00a8db78bf..1083431055 100644 secrxreg = rd32(hw, TXGBE_SECRXCTL); secrxreg |= TXGBE_SECRXCTL_XDSA; wr32(hw, TXGBE_SECRXCTL, secrxreg); -@@ -1320,8 +1275,7 @@ s32 txgbe_disable_sec_rx_path(struct txgbe_hw *hw) +@@ -1320,8 +1317,7 @@ s32 txgbe_disable_sec_rx_path(struct txgbe_hw *hw) /* For informational purposes only */ if (i >= TXGBE_MAX_SECRX_POLL) @@ -77270,7 +85891,7 @@ index 00a8db78bf..1083431055 100644 return 0; } -@@ -1336,8 +1290,6 @@ s32 txgbe_enable_sec_rx_path(struct txgbe_hw *hw) +@@ -1336,8 +1332,6 @@ s32 txgbe_enable_sec_rx_path(struct txgbe_hw *hw) { u32 secrxreg; @@ -77279,7 +85900,7 @@ index 00a8db78bf..1083431055 100644 secrxreg = rd32(hw, TXGBE_SECRXCTL); secrxreg &= ~TXGBE_SECRXCTL_XDSA; wr32(hw, TXGBE_SECRXCTL, secrxreg); -@@ -1373,8 +1325,7 @@ int txgbe_disable_sec_tx_path(struct txgbe_hw *hw) +@@ -1373,8 +1367,7 @@ int txgbe_disable_sec_tx_path(struct txgbe_hw *hw) /* For informational purposes only */ if (i >= TXGBE_MAX_SECTX_POLL) @@ -77289,7 +85910,7 @@ index 00a8db78bf..1083431055 100644 return 0; } -@@ -1411,8 +1362,6 @@ static s32 txgbe_get_san_mac_addr_offset(struct txgbe_hw *hw, +@@ -1411,8 +1404,6 @@ static s32 txgbe_get_san_mac_addr_offset(struct txgbe_hw *hw, { s32 err; @@ -77298,7 +85919,7 @@ index 00a8db78bf..1083431055 100644 /* * First read the EEPROM pointer to see if the MAC addresses are * available. -@@ -1443,8 +1392,6 @@ s32 txgbe_get_san_mac_addr(struct txgbe_hw *hw, u8 *san_mac_addr) +@@ -1443,8 +1434,6 @@ s32 txgbe_get_san_mac_addr(struct txgbe_hw *hw, u8 *san_mac_addr) u8 i; s32 err; @@ -77307,7 +85928,7 @@ index 00a8db78bf..1083431055 100644 /* * First read the EEPROM pointer to see if the MAC addresses are * available. If they're not, no point in calling set_lan_id() here. -@@ -1493,8 +1440,6 @@ s32 txgbe_set_san_mac_addr(struct txgbe_hw *hw, u8 *san_mac_addr) +@@ -1493,8 +1482,6 @@ s32 txgbe_set_san_mac_addr(struct txgbe_hw *hw, u8 *san_mac_addr) u16 san_mac_data, san_mac_offset; u8 i; @@ -77316,7 +85937,7 @@ index 00a8db78bf..1083431055 100644 /* Look for SAN mac address pointer. If not defined, return */ err = txgbe_get_san_mac_addr_offset(hw, &san_mac_offset); if (err || san_mac_offset == 0 || san_mac_offset == 0xFFFF) -@@ -1525,11 +1470,9 @@ s32 txgbe_clear_vmdq(struct txgbe_hw *hw, u32 rar, u32 vmdq) +@@ -1525,11 +1512,9 @@ s32 txgbe_clear_vmdq(struct txgbe_hw *hw, u32 rar, u32 vmdq) u32 mpsar_lo, mpsar_hi; u32 rar_entries = hw->mac.num_rar_entries; @@ -77329,7 +85950,7 @@ index 00a8db78bf..1083431055 100644 return TXGBE_ERR_INVALID_ARGUMENT; } -@@ -1579,11 +1522,9 @@ s32 txgbe_set_vmdq(struct txgbe_hw *hw, u32 rar, u32 vmdq) +@@ -1579,11 +1564,9 @@ s32 txgbe_set_vmdq(struct txgbe_hw *hw, u32 rar, u32 vmdq) u32 mpsar; u32 rar_entries = hw->mac.num_rar_entries; @@ -77342,7 +85963,7 @@ index 00a8db78bf..1083431055 100644 return TXGBE_ERR_INVALID_ARGUMENT; } -@@ -1608,8 +1549,7 @@ s32 txgbe_init_uta_tables(struct txgbe_hw *hw) +@@ -1608,8 +1591,7 @@ s32 txgbe_init_uta_tables(struct txgbe_hw *hw) { int i; @@ -77352,7 +85973,7 @@ index 00a8db78bf..1083431055 100644 for (i = 0; i < 128; i++) wr32(hw, TXGBE_UCADDRTBL(i), 0); -@@ -1664,7 +1604,7 @@ s32 txgbe_find_vlvf_slot(struct txgbe_hw *hw, u32 vlan, bool vlvf_bypass) +@@ -1664,7 +1646,7 @@ s32 txgbe_find_vlvf_slot(struct txgbe_hw *hw, u32 vlan, bool vlvf_bypass) * slot we found during our search, else error. */ if (!first_empty_slot) @@ -77361,7 +85982,7 @@ index 00a8db78bf..1083431055 100644 return first_empty_slot ? first_empty_slot : TXGBE_ERR_NO_SPACE; } -@@ -1685,8 +1625,6 @@ s32 txgbe_set_vfta(struct txgbe_hw *hw, u32 vlan, u32 vind, +@@ -1685,8 +1667,6 @@ s32 txgbe_set_vfta(struct txgbe_hw *hw, u32 vlan, u32 vind, u32 regidx, vfta_delta, vfta; s32 err; @@ -77370,7 +85991,7 @@ index 00a8db78bf..1083431055 100644 if (vlan > 4095 || vind > 63) return TXGBE_ERR_PARAM; -@@ -1754,8 +1692,6 @@ s32 txgbe_set_vlvf(struct txgbe_hw *hw, u32 vlan, u32 vind, +@@ -1754,8 +1734,6 @@ s32 txgbe_set_vlvf(struct txgbe_hw *hw, u32 vlan, u32 vind, u32 portctl; s32 vlvf_index; @@ -77379,7 +86000,7 @@ index 00a8db78bf..1083431055 100644 if (vlan > 4095 || vind > 63) return TXGBE_ERR_PARAM; -@@ -1835,8 +1771,6 @@ s32 txgbe_clear_vfta(struct txgbe_hw *hw) +@@ -1835,8 +1813,6 @@ s32 txgbe_clear_vfta(struct txgbe_hw *hw) { u32 offset; @@ -77388,7 +86009,7 @@ index 00a8db78bf..1083431055 100644 for (offset = 0; offset < hw->mac.vft_size; offset++) wr32(hw, TXGBE_VLANTBL(offset), 0); -@@ -1890,8 +1824,6 @@ s32 txgbe_check_mac_link(struct txgbe_hw *hw, u32 *speed, +@@ -1890,8 +1866,6 @@ s32 txgbe_check_mac_link(struct txgbe_hw *hw, u32 *speed, u32 links_reg, links_orig; u32 i; @@ -77397,7 +86018,7 @@ index 00a8db78bf..1083431055 100644 /* If Crosstalk fix enabled do the sanity check of making sure * the SFP+ cage is full. */ -@@ -1922,7 +1854,7 @@ s32 txgbe_check_mac_link(struct txgbe_hw *hw, u32 *speed, +@@ -1922,7 +1896,7 @@ s32 txgbe_check_mac_link(struct txgbe_hw *hw, u32 *speed, links_reg = rd32(hw, TXGBE_PORTSTAT); if (links_orig != links_reg) { @@ -77406,7 +86027,7 @@ index 00a8db78bf..1083431055 100644 links_orig, links_reg); } -@@ -1977,8 +1909,6 @@ s32 txgbe_get_wwn_prefix(struct txgbe_hw *hw, u16 *wwnn_prefix, +@@ -1977,8 +1951,6 @@ s32 txgbe_get_wwn_prefix(struct txgbe_hw *hw, u16 *wwnn_prefix, u16 offset, caps; u16 alt_san_mac_blk_offset; @@ -77415,7 +86036,7 @@ index 00a8db78bf..1083431055 100644 /* clear output first */ *wwnn_prefix = 0xFFFF; *wwpn_prefix = 0xFFFF; -@@ -2068,8 +1998,6 @@ void txgbe_set_ethertype_anti_spoofing(struct txgbe_hw *hw, +@@ -2068,8 +2040,6 @@ void txgbe_set_ethertype_anti_spoofing(struct txgbe_hw *hw, **/ s32 txgbe_get_device_caps(struct txgbe_hw *hw, u16 *device_caps) { @@ -77424,7 +86045,7 @@ index 00a8db78bf..1083431055 100644 hw->rom.readw_sw(hw, TXGBE_DEVICE_CAPS, device_caps); return 0; -@@ -2191,8 +2119,6 @@ s32 txgbe_get_thermal_sensor_data(struct txgbe_hw *hw) +@@ -2191,8 +2161,6 @@ s32 txgbe_get_thermal_sensor_data(struct txgbe_hw *hw) s64 tsv; u32 ts_stat; @@ -77433,7 +86054,7 @@ index 00a8db78bf..1083431055 100644 /* Only support thermal sensors attached to physical port 0 */ if (hw->bus.lan_id != 0) return TXGBE_NOT_IMPLEMENTED; -@@ -2223,8 +2149,6 @@ s32 txgbe_init_thermal_sensor_thresh(struct txgbe_hw *hw) +@@ -2223,8 +2191,6 @@ s32 txgbe_init_thermal_sensor_thresh(struct txgbe_hw *hw) { struct txgbe_thermal_sensor_data *data = &hw->mac.thermal_sensor_data; @@ -77442,7 +86063,7 @@ index 00a8db78bf..1083431055 100644 memset(data, 0, sizeof(struct txgbe_thermal_sensor_data)); if (hw->bus.lan_id != 0) -@@ -2295,8 +2219,6 @@ s32 txgbe_setup_mac_link_multispeed_fiber(struct txgbe_hw *hw, +@@ -2295,8 +2261,6 @@ s32 txgbe_setup_mac_link_multispeed_fiber(struct txgbe_hw *hw, u32 i = 0; bool autoneg, link_up = false; @@ -77451,7 +86072,7 @@ index 00a8db78bf..1083431055 100644 /* Mask off requested but non-supported speeds */ status = hw->mac.get_link_capabilities(hw, &link_speed, &autoneg); if (status != 0) -@@ -2321,7 +2243,7 @@ s32 txgbe_setup_mac_link_multispeed_fiber(struct txgbe_hw *hw, +@@ -2321,7 +2285,7 @@ s32 txgbe_setup_mac_link_multispeed_fiber(struct txgbe_hw *hw, /* QSFP module automatically detects MAC link speed */ break; default: @@ -77460,7 +86081,7 @@ index 00a8db78bf..1083431055 100644 break; } -@@ -2357,10 +2279,24 @@ s32 txgbe_setup_mac_link_multispeed_fiber(struct txgbe_hw *hw, +@@ -2357,10 +2321,24 @@ s32 txgbe_setup_mac_link_multispeed_fiber(struct txgbe_hw *hw, } if (speed & TXGBE_LINK_SPEED_1GB_FULL) { @@ -77485,7 +86106,7 @@ index 00a8db78bf..1083431055 100644 /* Set the module link speed */ switch (hw->phy.media_type) { case txgbe_media_type_fiber: -@@ -2371,7 +2307,7 @@ s32 txgbe_setup_mac_link_multispeed_fiber(struct txgbe_hw *hw, +@@ -2371,7 +2349,7 @@ s32 txgbe_setup_mac_link_multispeed_fiber(struct txgbe_hw *hw, /* QSFP module automatically detects link speed */ break; default: @@ -77494,7 +86115,7 @@ index 00a8db78bf..1083431055 100644 break; } -@@ -2437,8 +2373,6 @@ s32 txgbe_init_shared_code(struct txgbe_hw *hw) +@@ -2437,8 +2415,6 @@ s32 txgbe_init_shared_code(struct txgbe_hw *hw) { s32 status; @@ -77503,7 +86124,7 @@ index 00a8db78bf..1083431055 100644 /* * Set the mac type */ -@@ -2474,8 +2408,6 @@ s32 txgbe_set_mac_type(struct txgbe_hw *hw) +@@ -2474,8 +2450,6 @@ s32 txgbe_set_mac_type(struct txgbe_hw *hw) { s32 err = 0; @@ -77512,7 +86133,7 @@ index 00a8db78bf..1083431055 100644 if (hw->vendor_id != PCI_VENDOR_ID_WANGXUN) { DEBUGOUT("Unsupported vendor id: %x", hw->vendor_id); return TXGBE_ERR_DEVICE_NOT_SUPPORTED; -@@ -2497,7 +2429,7 @@ s32 txgbe_set_mac_type(struct txgbe_hw *hw) +@@ -2497,7 +2471,7 @@ s32 txgbe_set_mac_type(struct txgbe_hw *hw) break; } @@ -77521,7 +86142,7 @@ index 00a8db78bf..1083431055 100644 hw->mac.type, err); return err; } -@@ -2506,8 +2438,6 @@ void txgbe_init_mac_link_ops(struct txgbe_hw *hw) +@@ -2506,8 +2480,6 @@ void txgbe_init_mac_link_ops(struct txgbe_hw *hw) { struct txgbe_mac_info *mac = &hw->mac; @@ -77530,7 +86151,7 @@ index 00a8db78bf..1083431055 100644 /* * enable the laser control functions for SFP+ fiber * and MNG not enabled -@@ -2550,8 +2480,6 @@ s32 txgbe_init_phy_raptor(struct txgbe_hw *hw) +@@ -2550,8 +2522,6 @@ s32 txgbe_init_phy_raptor(struct txgbe_hw *hw) struct txgbe_phy_info *phy = &hw->phy; s32 err = 0; @@ -77539,7 +86160,7 @@ index 00a8db78bf..1083431055 100644 if ((hw->device_id & 0xFF) == TXGBE_DEV_ID_QSFP) { /* Store flag indicating I2C bus access control unit. */ hw->phy.qsfp_shared_i2c_bus = TRUE; -@@ -2598,8 +2526,6 @@ s32 txgbe_setup_sfp_modules(struct txgbe_hw *hw) +@@ -2598,8 +2568,6 @@ s32 txgbe_setup_sfp_modules(struct txgbe_hw *hw) { s32 err = 0; @@ -77548,7 +86169,7 @@ index 00a8db78bf..1083431055 100644 if (hw->phy.sfp_type == txgbe_sfp_type_unknown) return 0; -@@ -2619,7 +2545,7 @@ s32 txgbe_setup_sfp_modules(struct txgbe_hw *hw) +@@ -2619,7 +2587,7 @@ s32 txgbe_setup_sfp_modules(struct txgbe_hw *hw) msec_delay(hw->rom.semaphore_delay); if (err) { @@ -77557,7 +86178,7 @@ index 00a8db78bf..1083431055 100644 return TXGBE_ERR_SFP_SETUP_NOT_COMPLETE; } -@@ -2717,8 +2643,6 @@ s32 txgbe_init_ops_pf(struct txgbe_hw *hw) +@@ -2717,8 +2685,6 @@ s32 txgbe_init_ops_pf(struct txgbe_hw *hw) struct txgbe_rom_info *rom = &hw->rom; struct txgbe_mbx_info *mbx = &hw->mbx; @@ -77566,7 +86187,7 @@ index 00a8db78bf..1083431055 100644 /* BUS */ bus->set_lan_id = txgbe_set_lan_id_multi_port; -@@ -2845,8 +2769,6 @@ s32 txgbe_get_link_capabilities_raptor(struct txgbe_hw *hw, +@@ -2845,8 +2811,6 @@ s32 txgbe_get_link_capabilities_raptor(struct txgbe_hw *hw, s32 status = 0; u32 autoc = 0; @@ -77575,7 +86196,7 @@ index 00a8db78bf..1083431055 100644 /* Check if 1G SFP module. */ if (hw->phy.sfp_type == txgbe_sfp_type_1g_cu_core0 || hw->phy.sfp_type == txgbe_sfp_type_1g_cu_core1 || -@@ -2950,8 +2872,6 @@ u32 txgbe_get_media_type_raptor(struct txgbe_hw *hw) +@@ -2950,8 +2914,6 @@ u32 txgbe_get_media_type_raptor(struct txgbe_hw *hw) { u32 media_type; @@ -77584,7 +86205,7 @@ index 00a8db78bf..1083431055 100644 if (hw->phy.ffe_set) txgbe_bp_mode_set(hw); -@@ -3010,8 +2930,6 @@ s32 txgbe_start_mac_link_raptor(struct txgbe_hw *hw, +@@ -3010,8 +2972,6 @@ s32 txgbe_start_mac_link_raptor(struct txgbe_hw *hw, s32 status = 0; bool got_lock = false; @@ -77593,7 +86214,7 @@ index 00a8db78bf..1083431055 100644 UNREFERENCED_PARAMETER(autoneg_wait_to_complete); /* reset_pipeline requires us to hold this lock as it writes to -@@ -3094,8 +3012,6 @@ void txgbe_enable_tx_laser_multispeed_fiber(struct txgbe_hw *hw) +@@ -3094,8 +3054,6 @@ void txgbe_enable_tx_laser_multispeed_fiber(struct txgbe_hw *hw) **/ void txgbe_flap_tx_laser_multispeed_fiber(struct txgbe_hw *hw) { @@ -77602,7 +86223,7 @@ index 00a8db78bf..1083431055 100644 /* Blocked by MNG FW so bail */ if (txgbe_check_reset_blocked(hw)) return; -@@ -3127,7 +3043,7 @@ void txgbe_set_hard_rate_select_speed(struct txgbe_hw *hw, +@@ -3127,7 +3085,7 @@ void txgbe_set_hard_rate_select_speed(struct txgbe_hw *hw, esdp_reg &= ~(TXGBE_GPIOBIT_4 | TXGBE_GPIOBIT_5); break; default: @@ -77611,7 +86232,7 @@ index 00a8db78bf..1083431055 100644 return; } -@@ -3153,8 +3069,6 @@ s32 txgbe_setup_mac_link_smartspeed(struct txgbe_hw *hw, +@@ -3153,8 +3111,6 @@ s32 txgbe_setup_mac_link_smartspeed(struct txgbe_hw *hw, bool link_up = false; u32 autoc_reg = rd32_epcs(hw, SR_AN_MMD_ADV_REG1); @@ -77620,7 +86241,7 @@ index 00a8db78bf..1083431055 100644 /* Set autoneg_advertised value based on input link speed */ hw->phy.autoneg_advertised = 0; -@@ -3243,8 +3157,7 @@ s32 txgbe_setup_mac_link_smartspeed(struct txgbe_hw *hw, +@@ -3243,8 +3199,7 @@ s32 txgbe_setup_mac_link_smartspeed(struct txgbe_hw *hw, out: if (link_up && link_speed == TXGBE_LINK_SPEED_1GB_FULL) @@ -77630,7 +86251,7 @@ index 00a8db78bf..1083431055 100644 return status; } -@@ -3270,7 +3183,6 @@ s32 txgbe_setup_mac_link(struct txgbe_hw *hw, +@@ -3270,7 +3225,6 @@ s32 txgbe_setup_mac_link(struct txgbe_hw *hw, u64 orig_autoc = 0; u32 link_capabilities = TXGBE_LINK_SPEED_UNKNOWN; @@ -77638,7 +86259,7 @@ index 00a8db78bf..1083431055 100644 UNREFERENCED_PARAMETER(autoneg_wait_to_complete); /* Check to see if speed passed in is supported. */ -@@ -3357,8 +3269,6 @@ static s32 txgbe_setup_copper_link_raptor(struct txgbe_hw *hw, +@@ -3357,8 +3311,6 @@ static s32 txgbe_setup_copper_link_raptor(struct txgbe_hw *hw, { s32 status; @@ -77647,7 +86268,7 @@ index 00a8db78bf..1083431055 100644 /* Setup the PHY according to input speed */ status = hw->phy.setup_link_speed(hw, speed, autoneg_wait_to_complete); -@@ -3467,8 +3377,6 @@ s32 txgbe_reset_hw(struct txgbe_hw *hw) +@@ -3467,8 +3419,6 @@ s32 txgbe_reset_hw(struct txgbe_hw *hw) s32 status; u32 autoc; @@ -77656,7 +86277,7 @@ index 00a8db78bf..1083431055 100644 /* Call adapter stop to disable tx/rx and clear interrupts */ status = hw->mac.stop_hw(hw); if (status != 0) -@@ -3624,15 +3532,13 @@ s32 txgbe_reinit_fdir_tables(struct txgbe_hw *hw) +@@ -3624,15 +3574,13 @@ s32 txgbe_reinit_fdir_tables(struct txgbe_hw *hw) u32 fdircmd; fdirctrl &= ~TXGBE_FDIRCTL_INITDONE; @@ -77673,7 +86294,7 @@ index 00a8db78bf..1083431055 100644 return err; } -@@ -3666,7 +3572,7 @@ s32 txgbe_reinit_fdir_tables(struct txgbe_hw *hw) +@@ -3666,7 +3614,7 @@ s32 txgbe_reinit_fdir_tables(struct txgbe_hw *hw) msec_delay(1); } if (i >= TXGBE_FDIR_INIT_DONE_POLL) { @@ -77682,7 +86303,7 @@ index 00a8db78bf..1083431055 100644 return TXGBE_ERR_FDIR_REINIT_FAILED; } -@@ -3692,8 +3598,6 @@ s32 txgbe_start_hw_raptor(struct txgbe_hw *hw) +@@ -3692,8 +3640,6 @@ s32 txgbe_start_hw_raptor(struct txgbe_hw *hw) { s32 err = 0; @@ -77691,7 +86312,7 @@ index 00a8db78bf..1083431055 100644 err = txgbe_start_hw(hw); if (err != 0) goto out; -@@ -3718,8 +3622,6 @@ s32 txgbe_start_hw_raptor(struct txgbe_hw *hw) +@@ -3718,8 +3664,6 @@ s32 txgbe_start_hw_raptor(struct txgbe_hw *hw) **/ s32 txgbe_enable_rx_dma_raptor(struct txgbe_hw *hw, u32 regval) { @@ -77700,7 +86321,7 @@ index 00a8db78bf..1083431055 100644 /* * Workaround silicon errata when enabling the Rx datapath. * If traffic is incoming before we enable the Rx unit, it could hang -@@ -3752,8 +3654,6 @@ bool txgbe_verify_lesm_fw_enabled_raptor(struct txgbe_hw *hw) +@@ -3752,8 +3696,6 @@ bool txgbe_verify_lesm_fw_enabled_raptor(struct txgbe_hw *hw) u16 fw_offset, fw_lesm_param_offset, fw_lesm_state; s32 status; @@ -77709,6 +86330,18 @@ index 00a8db78bf..1083431055 100644 /* get the offset to the Firmware Module block */ status = hw->rom.read16(hw, TXGBE_FW_PTR, &fw_offset); +diff --git a/dpdk/drivers/net/txgbe/base/txgbe_hw.h b/dpdk/drivers/net/txgbe/base/txgbe_hw.h +index fd2f7d784c..bf656bb53e 100644 +--- a/dpdk/drivers/net/txgbe/base/txgbe_hw.h ++++ b/dpdk/drivers/net/txgbe/base/txgbe_hw.h +@@ -40,6 +40,7 @@ s32 txgbe_setup_fc(struct txgbe_hw *hw); + s32 txgbe_validate_mac_addr(u8 *mac_addr); + s32 txgbe_acquire_swfw_sync(struct txgbe_hw *hw, u32 mask); + void txgbe_release_swfw_sync(struct txgbe_hw *hw, u32 mask); ++s32 txgbe_set_pcie_master(struct txgbe_hw *hw, bool enable); + + s32 txgbe_get_san_mac_addr(struct txgbe_hw *hw, u8 *san_mac_addr); + s32 txgbe_set_san_mac_addr(struct txgbe_hw *hw, u8 *san_mac_addr); diff --git a/dpdk/drivers/net/txgbe/base/txgbe_mbx.c b/dpdk/drivers/net/txgbe/base/txgbe_mbx.c index 4d64c6c3e9..7f2489a13f 100644 --- a/dpdk/drivers/net/txgbe/base/txgbe_mbx.c @@ -77982,14 +86615,15 @@ index dbe512122c..6255718ff7 100644 reset_cmd.hdr.buf_len = FW_RESET_LEN; reset_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED; diff --git a/dpdk/drivers/net/txgbe/base/txgbe_osdep.h b/dpdk/drivers/net/txgbe/base/txgbe_osdep.h -index 11fcf7e8fe..b62c0b0824 100644 +index 11fcf7e8fe..30d671540e 100644 --- a/dpdk/drivers/net/txgbe/base/txgbe_osdep.h +++ b/dpdk/drivers/net/txgbe/base/txgbe_osdep.h -@@ -18,6 +18,7 @@ +@@ -18,6 +18,8 @@ #include #include #include +#include ++#include #include "../txgbe_logs.h" @@ -78480,10 +87114,29 @@ index 3f5229ecc2..a7c11c50df 100644 value = rd32_ephy(hw, addr); BP_LOG("PHY LANE TX EQ Read Value: %x\n", lane); diff --git a/dpdk/drivers/net/txgbe/base/txgbe_regs.h b/dpdk/drivers/net/txgbe/base/txgbe_regs.h -index 144047ba62..dc22ef53e3 100644 +index 144047ba62..d151123882 100644 --- a/dpdk/drivers/net/txgbe/base/txgbe_regs.h +++ b/dpdk/drivers/net/txgbe/base/txgbe_regs.h -@@ -1862,8 +1862,13 @@ po32m(struct txgbe_hw *hw, u32 reg, u32 mask, u32 expect, u32 *actual, +@@ -1020,6 +1020,8 @@ enum txgbe_5tuple_protocol { + #define TXGBE_MACRXFLT_CTL_PASS LS(3, 6, 0x3) + #define TXGBE_MACRXFLT_RXALL MS(31, 0x1) + ++#define TXGBE_MAC_WDG_TIMEOUT 0x01100C ++ + /****************************************************************************** + * Statistic Registers + ******************************************************************************/ +@@ -1234,6 +1236,9 @@ enum txgbe_5tuple_protocol { + #define TXGBE_TCPTMR 0x000170 + #define TXGBE_ITRSEL 0x000180 + ++#define TXGBE_BMECTL 0x012020 ++#define TXGBE_BMEPEND 0x000168 ++ + /* P2V Mailbox */ + #define TXGBE_MBMEM(i) (0x005000 + 0x40 * (i)) /* 0-63 */ + #define TXGBE_MBCTL(i) (0x000600 + 4 * (i)) /* 0-63 */ +@@ -1862,8 +1867,13 @@ po32m(struct txgbe_hw *hw, u32 reg, u32 mask, u32 expect, u32 *actual, } do { @@ -78499,7 +87152,7 @@ index 144047ba62..dc22ef53e3 100644 if (value == expect) break; -@@ -1896,7 +1901,7 @@ po32m(struct txgbe_hw *hw, u32 reg, u32 mask, u32 expect, u32 *actual, +@@ -1896,7 +1906,7 @@ po32m(struct txgbe_hw *hw, u32 reg, u32 mask, u32 expect, u32 *actual, #define wr32w(hw, reg, val, mask, slice) do { \ wr32((hw), reg, val); \ @@ -78509,10 +87162,18 @@ index 144047ba62..dc22ef53e3 100644 #define TXGBE_XPCS_IDAADDR 0x13000 diff --git a/dpdk/drivers/net/txgbe/base/txgbe_type.h b/dpdk/drivers/net/txgbe/base/txgbe_type.h -index d95467f9f8..e7971ccf1d 100644 +index d95467f9f8..3b86d8bb8c 100644 --- a/dpdk/drivers/net/txgbe/base/txgbe_type.h +++ b/dpdk/drivers/net/txgbe/base/txgbe_type.h -@@ -355,9 +355,9 @@ struct txgbe_hw_stats { +@@ -28,6 +28,7 @@ + #define TXGBE_FDIR_INIT_DONE_POLL 10 + #define TXGBE_FDIRCMD_CMD_POLL 10 + #define TXGBE_VF_INIT_TIMEOUT 200 /* Number of retries to clear RSTI */ ++#define TXGBE_PCI_MASTER_DISABLE_TIMEOUT 800 + + #define TXGBE_ALIGN 128 /* as intel did */ + +@@ -355,9 +356,9 @@ struct txgbe_hw_stats { u64 tx_management_packets; u64 rx_management_dropped; u64 rx_dma_drop; @@ -78523,7 +87184,7 @@ index d95467f9f8..e7971ccf1d 100644 u64 rx_crc_errors; u64 rx_illegal_byte_errors; u64 rx_error_bytes; -@@ -365,7 +365,7 @@ struct txgbe_hw_stats { +@@ -365,7 +366,7 @@ struct txgbe_hw_stats { u64 rx_length_errors; u64 rx_undersize_errors; u64 rx_fragment_errors; @@ -78532,7 +87193,7 @@ index d95467f9f8..e7971ccf1d 100644 u64 rx_jabber_errors; u64 rx_l3_l4_xsum_error; u64 mac_local_errors; -@@ -782,6 +782,7 @@ struct txgbe_hw { +@@ -782,6 +783,7 @@ struct txgbe_hw { bool allow_unsupported_sfp; bool need_crosstalk_fix; bool dev_start; @@ -78599,7 +87260,7 @@ index fb6d6d90ea..a73502351e 100644 } diff --git a/dpdk/drivers/net/txgbe/txgbe_ethdev.c b/dpdk/drivers/net/txgbe/txgbe_ethdev.c -index 47d0e6ea40..bd587b4f71 100644 +index 47d0e6ea40..1dbf7c554e 100644 --- a/dpdk/drivers/net/txgbe/txgbe_ethdev.c +++ b/dpdk/drivers/net/txgbe/txgbe_ethdev.c @@ -179,12 +179,16 @@ static const struct rte_txgbe_xstats_name_off rte_txgbe_stats_strings[] = { @@ -78637,7 +87298,128 @@ index 47d0e6ea40..bd587b4f71 100644 return -EIO; PMD_INIT_LOG(DEBUG, "Setting port %d, %s queue_id %d to stat index %d", -@@ -1482,6 +1486,19 @@ txgbe_check_mq_mode(struct rte_eth_dev *dev) +@@ -592,6 +596,7 @@ eth_txgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused) + rte_eth_copy_pci_info(eth_dev, pci_dev); + + /* Vendor and Device ID need to be set before init of shared code */ ++ hw->back = pci_dev; + hw->device_id = pci_dev->id.device_id; + hw->vendor_id = pci_dev->id.vendor_id; + hw->hw_addr = (void *)pci_dev->mem_resource[0].addr; +@@ -713,6 +718,8 @@ eth_txgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused) + PMD_INIT_LOG(ERR, + "Failed to allocate %d bytes needed to store MAC addresses", + RTE_ETHER_ADDR_LEN * TXGBE_VMDQ_NUM_UC_MAC); ++ rte_free(eth_dev->data->mac_addrs); ++ eth_dev->data->mac_addrs = NULL; + return -ENOMEM; + } + +@@ -884,6 +891,7 @@ static int txgbe_fdir_filter_init(struct rte_eth_dev *eth_dev) + if (!fdir_info->hash_map) { + PMD_INIT_LOG(ERR, + "Failed to allocate memory for fdir hash map!"); ++ rte_hash_free(fdir_info->hash_handle); + return -ENOMEM; + } + fdir_info->mask_added = FALSE; +@@ -919,6 +927,7 @@ static int txgbe_l2_tn_filter_init(struct rte_eth_dev *eth_dev) + if (!l2_tn_info->hash_map) { + PMD_INIT_LOG(ERR, + "Failed to allocate memory for L2 TN hash map!"); ++ rte_hash_free(l2_tn_info->hash_handle); + return -ENOMEM; + } + l2_tn_info->e_tag_en = FALSE; +@@ -946,7 +955,7 @@ static int eth_txgbe_pci_remove(struct rte_pci_device *pci_dev) + if (!ethdev) + return 0; + +- return rte_eth_dev_destroy(ethdev, eth_txgbe_dev_uninit); ++ return rte_eth_dev_pci_generic_remove(pci_dev, eth_txgbe_dev_uninit); + } + + static struct rte_pci_driver rte_txgbe_pmd = { +@@ -982,41 +991,25 @@ txgbe_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) + } + + static void +-txgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on) ++txgbe_vlan_strip_q_set(struct rte_eth_dev *dev, uint16_t queue, int on) + { +- struct txgbe_hw *hw = TXGBE_DEV_HW(dev); +- struct txgbe_rx_queue *rxq; +- bool restart; +- uint32_t rxcfg, rxbal, rxbah; +- + if (on) + txgbe_vlan_hw_strip_enable(dev, queue); + else + txgbe_vlan_hw_strip_disable(dev, queue); ++} + +- rxq = dev->data->rx_queues[queue]; +- rxbal = rd32(hw, TXGBE_RXBAL(rxq->reg_idx)); +- rxbah = rd32(hw, TXGBE_RXBAH(rxq->reg_idx)); +- rxcfg = rd32(hw, TXGBE_RXCFG(rxq->reg_idx)); +- if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) { +- restart = (rxcfg & TXGBE_RXCFG_ENA) && +- !(rxcfg & TXGBE_RXCFG_VLAN); +- rxcfg |= TXGBE_RXCFG_VLAN; +- } else { +- restart = (rxcfg & TXGBE_RXCFG_ENA) && +- (rxcfg & TXGBE_RXCFG_VLAN); +- rxcfg &= ~TXGBE_RXCFG_VLAN; +- } +- rxcfg &= ~TXGBE_RXCFG_ENA; ++static void ++txgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on) ++{ ++ struct txgbe_hw *hw = TXGBE_DEV_HW(dev); + +- if (restart) { +- /* set vlan strip for ring */ +- txgbe_dev_rx_queue_stop(dev, queue); +- wr32(hw, TXGBE_RXBAL(rxq->reg_idx), rxbal); +- wr32(hw, TXGBE_RXBAH(rxq->reg_idx), rxbah); +- wr32(hw, TXGBE_RXCFG(rxq->reg_idx), rxcfg); +- txgbe_dev_rx_queue_start(dev, queue); ++ if (!hw->adapter_stopped) { ++ PMD_DRV_LOG(ERR, "Please stop port first"); ++ return; + } ++ ++ txgbe_vlan_strip_q_set(dev, queue, on); + } + + static int +@@ -1241,9 +1234,9 @@ txgbe_vlan_hw_strip_config(struct rte_eth_dev *dev) + rxq = dev->data->rx_queues[i]; + + if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) +- txgbe_vlan_strip_queue_set(dev, i, 1); ++ txgbe_vlan_strip_q_set(dev, i, 1); + else +- txgbe_vlan_strip_queue_set(dev, i, 0); ++ txgbe_vlan_strip_q_set(dev, i, 0); + } + } + +@@ -1305,6 +1298,13 @@ txgbe_vlan_offload_config(struct rte_eth_dev *dev, int mask) + static int + txgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask) + { ++ struct txgbe_hw *hw = TXGBE_DEV_HW(dev); ++ ++ if (!hw->adapter_stopped && (mask & RTE_ETH_VLAN_STRIP_MASK)) { ++ PMD_DRV_LOG(ERR, "Please stop port first"); ++ return -EPERM; ++ } ++ + txgbe_config_vlan_strip_on_all_queues(dev, mask); + + txgbe_vlan_offload_config(dev, mask); +@@ -1482,6 +1482,19 @@ txgbe_check_mq_mode(struct rte_eth_dev *dev) return -EINVAL; } } @@ -78657,7 +87439,16 @@ index 47d0e6ea40..bd587b4f71 100644 } return 0; } -@@ -1678,7 +1695,7 @@ txgbe_dev_start(struct rte_eth_dev *dev) +@@ -1656,6 +1669,8 @@ txgbe_dev_start(struct rte_eth_dev *dev) + hw->mac.get_link_status = true; + hw->dev_start = true; + ++ txgbe_set_pcie_master(hw, true); ++ + /* configure PF module if SRIOV enabled */ + txgbe_pf_host_configure(dev); + +@@ -1678,7 +1693,7 @@ txgbe_dev_start(struct rte_eth_dev *dev) return -ENOMEM; } } @@ -78666,7 +87457,7 @@ index 47d0e6ea40..bd587b4f71 100644 txgbe_configure_msix(dev); /* initialize transmission unit */ -@@ -1774,6 +1791,7 @@ txgbe_dev_start(struct rte_eth_dev *dev) +@@ -1774,6 +1789,7 @@ txgbe_dev_start(struct rte_eth_dev *dev) speed = (TXGBE_LINK_SPEED_100M_FULL | TXGBE_LINK_SPEED_1GB_FULL | TXGBE_LINK_SPEED_10GB_FULL); @@ -78674,7 +87465,7 @@ index 47d0e6ea40..bd587b4f71 100644 } else { if (*link_speeds & RTE_ETH_LINK_SPEED_10G) speed |= TXGBE_LINK_SPEED_10GB_FULL; -@@ -1785,6 +1803,7 @@ txgbe_dev_start(struct rte_eth_dev *dev) +@@ -1785,6 +1801,7 @@ txgbe_dev_start(struct rte_eth_dev *dev) speed |= TXGBE_LINK_SPEED_1GB_FULL; if (*link_speeds & RTE_ETH_LINK_SPEED_100M) speed |= TXGBE_LINK_SPEED_100M_FULL; @@ -78682,7 +87473,7 @@ index 47d0e6ea40..bd587b4f71 100644 } err = hw->mac.setup_link(hw, speed, link_up); -@@ -1863,7 +1882,7 @@ txgbe_dev_stop(struct rte_eth_dev *dev) +@@ -1863,7 +1880,7 @@ txgbe_dev_stop(struct rte_eth_dev *dev) struct txgbe_tm_conf *tm_conf = TXGBE_DEV_TM_CONF(dev); if (hw->adapter_stopped) @@ -78691,7 +87482,7 @@ index 47d0e6ea40..bd587b4f71 100644 PMD_INIT_FUNC_TRACE(); -@@ -1882,14 +1901,6 @@ txgbe_dev_stop(struct rte_eth_dev *dev) +@@ -1882,14 +1899,6 @@ txgbe_dev_stop(struct rte_eth_dev *dev) for (vf = 0; vfinfo != NULL && vf < pci_dev->max_vfs; vf++) vfinfo[vf].clear_to_send = false; @@ -78706,7 +87497,13 @@ index 47d0e6ea40..bd587b4f71 100644 txgbe_dev_clear_queues(dev); /* Clear stored conf */ -@@ -1920,6 +1931,16 @@ txgbe_dev_stop(struct rte_eth_dev *dev) +@@ -1916,10 +1925,22 @@ txgbe_dev_stop(struct rte_eth_dev *dev) + adapter->rss_reta_updated = 0; + wr32m(hw, TXGBE_LEDCTL, 0xFFFFFFFF, TXGBE_LEDCTL_SEL_MASK); + ++ txgbe_set_pcie_master(hw, true); ++ + hw->adapter_stopped = true; dev->data->dev_started = 0; hw->dev_start = false; @@ -78739,7 +87536,7 @@ index 47d0e6ea40..bd587b4f71 100644 txgbe_dev_link_update(dev, 0); } -@@ -1977,6 +2000,9 @@ txgbe_dev_close(struct rte_eth_dev *dev) +@@ -1977,12 +2000,17 @@ txgbe_dev_close(struct rte_eth_dev *dev) PMD_INIT_FUNC_TRACE(); @@ -78749,7 +87546,15 @@ index 47d0e6ea40..bd587b4f71 100644 txgbe_pf_reset_hw(hw); ret = txgbe_dev_stop(dev); -@@ -2005,8 +2031,9 @@ txgbe_dev_close(struct rte_eth_dev *dev) + + txgbe_dev_free_queues(dev); + ++ txgbe_set_pcie_master(hw, false); ++ + /* reprogram the RAR[0] in case user changed it. */ + txgbe_set_rar(hw, 0, hw->mac.addr, 0, true); + +@@ -2005,8 +2033,9 @@ txgbe_dev_close(struct rte_eth_dev *dev) rte_delay_ms(100); } while (retries++ < (10 + TXGBE_LINK_UP_TIME)); @@ -78760,7 +87565,7 @@ index 47d0e6ea40..bd587b4f71 100644 /* uninitialize PF if max_vfs not zero */ txgbe_pf_host_uninit(dev); -@@ -2034,6 +2061,7 @@ txgbe_dev_close(struct rte_eth_dev *dev) +@@ -2034,6 +2063,7 @@ txgbe_dev_close(struct rte_eth_dev *dev) #ifdef RTE_LIB_SECURITY rte_free(dev->security_ctx); @@ -78768,7 +87573,7 @@ index 47d0e6ea40..bd587b4f71 100644 #endif return ret; -@@ -2144,7 +2172,7 @@ txgbe_read_stats_registers(struct txgbe_hw *hw, +@@ -2144,7 +2174,7 @@ txgbe_read_stats_registers(struct txgbe_hw *hw, hw_stats->rx_bytes += rd64(hw, TXGBE_DMARXOCTL); hw_stats->tx_bytes += rd64(hw, TXGBE_DMATXOCTL); hw_stats->rx_dma_drop += rd32(hw, TXGBE_DMARXDROP); @@ -78777,7 +87582,7 @@ index 47d0e6ea40..bd587b4f71 100644 /* MAC Stats */ hw_stats->rx_crc_errors += rd64(hw, TXGBE_MACRXERRCRCL); -@@ -2176,7 +2204,7 @@ txgbe_read_stats_registers(struct txgbe_hw *hw, +@@ -2176,7 +2206,7 @@ txgbe_read_stats_registers(struct txgbe_hw *hw, rd64(hw, TXGBE_MACTX1024TOMAXL); hw_stats->rx_undersize_errors += rd64(hw, TXGBE_MACRXERRLENL); @@ -78786,7 +87591,7 @@ index 47d0e6ea40..bd587b4f71 100644 hw_stats->rx_jabber_errors += rd32(hw, TXGBE_MACRXJABBER); /* MNG Stats */ -@@ -2298,8 +2326,7 @@ txgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) +@@ -2298,8 +2328,7 @@ txgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) hw_stats->rx_mac_short_packet_dropped + hw_stats->rx_length_errors + hw_stats->rx_undersize_errors + @@ -78796,19 +87601,43 @@ index 47d0e6ea40..bd587b4f71 100644 hw_stats->rx_illegal_byte_errors + hw_stats->rx_error_bytes + hw_stats->rx_fragment_errors + -@@ -2759,6 +2786,11 @@ txgbe_dev_link_update_share(struct rte_eth_dev *dev, +@@ -2591,7 +2620,9 @@ txgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) + dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues; + dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues; + dev_info->min_rx_bufsize = 1024; +- dev_info->max_rx_pktlen = 15872; ++ dev_info->max_rx_pktlen = TXGBE_MAX_MTU + TXGBE_ETH_OVERHEAD; ++ dev_info->min_mtu = RTE_ETHER_MIN_MTU; ++ dev_info->max_mtu = TXGBE_MAX_MTU; + dev_info->max_mac_addrs = hw->mac.num_rar_entries; + dev_info->max_hash_mac_addrs = TXGBE_VMDQ_NUM_UC_MAC; + dev_info->max_vfs = pci_dev->max_vfs; +@@ -2688,6 +2719,7 @@ txgbe_dev_link_update_share(struct rte_eth_dev *dev, + bool link_up; + int err; + int wait = 1; ++ u32 reg; + + memset(&link, 0, sizeof(link)); + link.link_status = RTE_ETH_LINK_DOWN; +@@ -2759,6 +2791,16 @@ txgbe_dev_link_update_share(struct rte_eth_dev *dev, break; } + /* Re configure MAC RX */ -+ if (hw->mac.type == txgbe_mac_raptor) ++ if (hw->mac.type == txgbe_mac_raptor) { ++ reg = rd32(hw, TXGBE_MACRXCFG); ++ wr32(hw, TXGBE_MACRXCFG, reg); + wr32m(hw, TXGBE_MACRXFLT, TXGBE_MACRXFLT_PROMISC, + TXGBE_MACRXFLT_PROMISC); ++ reg = rd32(hw, TXGBE_MAC_WDG_TIMEOUT); ++ wr32(hw, TXGBE_MAC_WDG_TIMEOUT, reg); ++ } + return rte_eth_linkstatus_set(dev, &link); } -@@ -2935,9 +2967,6 @@ txgbe_dev_interrupt_get_status(struct rte_eth_dev *dev, +@@ -2935,9 +2977,6 @@ txgbe_dev_interrupt_get_status(struct rte_eth_dev *dev, rte_intr_type_get(intr_handle) != RTE_INTR_HANDLE_VFIO_MSIX) wr32(hw, TXGBE_PX_INTA, 1); @@ -78818,7 +87647,7 @@ index 47d0e6ea40..bd587b4f71 100644 /* read-on-clear nic registers here */ eicr = ((u32 *)hw->isb_mem)[TXGBE_ISB_MISC]; PMD_DRV_LOG(DEBUG, "eicr %x", eicr); -@@ -2960,6 +2989,8 @@ txgbe_dev_interrupt_get_status(struct rte_eth_dev *dev, +@@ -2960,6 +2999,8 @@ txgbe_dev_interrupt_get_status(struct rte_eth_dev *dev, if (eicr & TXGBE_ICRMISC_GPIO) intr->flags |= TXGBE_FLAG_PHY_INTERRUPT; @@ -78827,7 +87656,7 @@ index 47d0e6ea40..bd587b4f71 100644 return 0; } -@@ -3129,7 +3160,8 @@ txgbe_dev_interrupt_delayed_handler(void *param) +@@ -3129,7 +3170,8 @@ txgbe_dev_interrupt_delayed_handler(void *param) } /* restore original mask */ @@ -78837,16 +87666,117 @@ index 47d0e6ea40..bd587b4f71 100644 intr->mask = intr->mask_orig; intr->mask_orig = 0; -@@ -3682,7 +3714,7 @@ txgbe_set_ivar_map(struct txgbe_hw *hw, int8_t direction, +@@ -3467,12 +3509,8 @@ txgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) + return -EINVAL; + } + +- if (hw->mode) +- wr32m(hw, TXGBE_FRMSZ, TXGBE_FRMSZ_MAX_MASK, +- TXGBE_FRAME_SIZE_MAX); +- else +- wr32m(hw, TXGBE_FRMSZ, TXGBE_FRMSZ_MAX_MASK, +- TXGBE_FRMSZ_MAX(frame_size)); ++ wr32m(hw, TXGBE_FRMSZ, TXGBE_FRMSZ_MAX_MASK, ++ TXGBE_FRMSZ_MAX(frame_size)); + + return 0; + } +@@ -3623,13 +3661,13 @@ txgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) + struct txgbe_hw *hw = TXGBE_DEV_HW(dev); + + if (queue_id < 32) { +- mask = rd32(hw, TXGBE_IMS(0)); +- mask &= (1 << queue_id); +- wr32(hw, TXGBE_IMS(0), mask); ++ mask = rd32(hw, TXGBE_IMC(0)); ++ mask |= (1 << queue_id); ++ wr32(hw, TXGBE_IMC(0), mask); + } else if (queue_id < 64) { +- mask = rd32(hw, TXGBE_IMS(1)); +- mask &= (1 << (queue_id - 32)); +- wr32(hw, TXGBE_IMS(1), mask); ++ mask = rd32(hw, TXGBE_IMC(1)); ++ mask |= (1 << (queue_id - 32)); ++ wr32(hw, TXGBE_IMC(1), mask); + } + rte_intr_enable(intr_handle); + +@@ -3644,11 +3682,11 @@ txgbe_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id) + + if (queue_id < 32) { + mask = rd32(hw, TXGBE_IMS(0)); +- mask &= ~(1 << queue_id); ++ mask |= (1 << queue_id); + wr32(hw, TXGBE_IMS(0), mask); + } else if (queue_id < 64) { + mask = rd32(hw, TXGBE_IMS(1)); +- mask &= ~(1 << (queue_id - 32)); ++ mask |= (1 << (queue_id - 32)); + wr32(hw, TXGBE_IMS(1), mask); + } + +@@ -3682,7 +3720,7 @@ txgbe_set_ivar_map(struct txgbe_hw *hw, int8_t direction, wr32(hw, TXGBE_IVARMISC, tmp); } else { /* rx or tx causes */ - /* Workround for ICR lost */ -+ /* Workaround for ICR lost */ ++ msix_vector |= TXGBE_IVAR_VLD; /* Workaround for ICR lost */ idx = ((16 * (queue & 1)) + (8 * direction)); tmp = rd32(hw, TXGBE_IVAR(queue >> 1)); tmp &= ~(0xFF << idx); -@@ -4387,7 +4419,7 @@ txgbe_timesync_disable(struct rte_eth_dev *dev) +@@ -3788,6 +3826,7 @@ txgbe_syn_filter_set(struct rte_eth_dev *dev, + struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev); + uint32_t syn_info; + uint32_t synqf; ++ uint16_t queue; + + if (filter->queue >= TXGBE_MAX_RX_QUEUE_NUM) + return -EINVAL; +@@ -3797,7 +3836,11 @@ txgbe_syn_filter_set(struct rte_eth_dev *dev, + if (add) { + if (syn_info & TXGBE_SYNCLS_ENA) + return -EINVAL; +- synqf = (uint32_t)TXGBE_SYNCLS_QPID(filter->queue); ++ if (RTE_ETH_DEV_SRIOV(dev).active) ++ queue = RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + filter->queue; ++ else ++ queue = filter->queue; ++ synqf = (uint32_t)TXGBE_SYNCLS_QPID(queue); + synqf |= TXGBE_SYNCLS_ENA; + + if (filter->hig_pri) +@@ -3866,7 +3909,10 @@ txgbe_inject_5tuple_filter(struct rte_eth_dev *dev, + wr32(hw, TXGBE_5TFPORT(i), sdpqf); + wr32(hw, TXGBE_5TFCTL0(i), ftqf); + +- l34timir |= TXGBE_5TFCTL1_QP(filter->queue); ++ if (RTE_ETH_DEV_SRIOV(dev).active) ++ l34timir |= TXGBE_5TFCTL1_QP(RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + filter->queue); ++ else ++ l34timir |= TXGBE_5TFCTL1_QP(filter->queue); + wr32(hw, TXGBE_5TFCTL1(i), l34timir); + } + +@@ -4150,7 +4196,17 @@ txgbe_add_del_ethertype_filter(struct rte_eth_dev *dev, + if (add) { + etqf = TXGBE_ETFLT_ENA; + etqf |= TXGBE_ETFLT_ETID(filter->ether_type); +- etqs |= TXGBE_ETCLS_QPID(filter->queue); ++ if (RTE_ETH_DEV_SRIOV(dev).active) { ++ int pool, queue; ++ ++ pool = RTE_ETH_DEV_SRIOV(dev).def_vmdq_idx; ++ queue = RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + filter->queue; ++ etqf |= TXGBE_ETFLT_POOLENA; ++ etqf |= TXGBE_ETFLT_POOL(pool); ++ etqs |= TXGBE_ETCLS_QPID(queue); ++ } else { ++ etqs |= TXGBE_ETCLS_QPID(filter->queue); ++ } + etqs |= TXGBE_ETCLS_QENA; + + ethertype_filter.ethertype = filter->ether_type; +@@ -4387,7 +4443,7 @@ txgbe_timesync_disable(struct rte_eth_dev *dev) /* Disable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */ wr32(hw, TXGBE_ETFLT(TXGBE_ETF_ID_1588), 0); @@ -78856,7 +87786,7 @@ index 47d0e6ea40..bd587b4f71 100644 return 0; diff --git a/dpdk/drivers/net/txgbe/txgbe_ethdev.h b/dpdk/drivers/net/txgbe/txgbe_ethdev.h -index 262dbb5e38..edc3311e19 100644 +index 262dbb5e38..87cdb63e88 100644 --- a/dpdk/drivers/net/txgbe/txgbe_ethdev.h +++ b/dpdk/drivers/net/txgbe/txgbe_ethdev.h @@ -40,6 +40,7 @@ @@ -78867,20 +87797,58 @@ index 262dbb5e38..edc3311e19 100644 #ifndef NBBY #define NBBY 8 /* number of bits in a byte */ +@@ -54,7 +55,7 @@ + #define TXGBE_5TUPLE_MAX_PRI 7 + #define TXGBE_5TUPLE_MIN_PRI 1 + +- ++#define TXGBE_MAX_MTU 9414 + /* The overhead from MTU to max frame size. */ + #define TXGBE_ETH_OVERHEAD (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN) + diff --git a/dpdk/drivers/net/txgbe/txgbe_ethdev_vf.c b/dpdk/drivers/net/txgbe/txgbe_ethdev_vf.c -index 84b960b8f9..f52cd8bc19 100644 +index 84b960b8f9..e8b5c326a5 100644 --- a/dpdk/drivers/net/txgbe/txgbe_ethdev_vf.c +++ b/dpdk/drivers/net/txgbe/txgbe_ethdev_vf.c -@@ -961,7 +961,7 @@ txgbevf_set_ivar_map(struct txgbe_hw *hw, int8_t direction, +@@ -293,6 +293,8 @@ eth_txgbevf_dev_init(struct rte_eth_dev *eth_dev) + err = hw->mac.start_hw(hw); + if (err) { + PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", err); ++ rte_free(eth_dev->data->mac_addrs); ++ eth_dev->data->mac_addrs = NULL; + return -EIO; + } + +@@ -666,8 +668,10 @@ txgbevf_dev_start(struct rte_eth_dev *dev) + * now only one vector is used for Rx queue + */ + intr_vector = 1; +- if (rte_intr_efd_enable(intr_handle, intr_vector)) ++ if (rte_intr_efd_enable(intr_handle, intr_vector)) { ++ txgbe_dev_clear_queues(dev); + return -1; ++ } + } + + if (rte_intr_dp_is_en(intr_handle)) { +@@ -675,6 +679,7 @@ txgbevf_dev_start(struct rte_eth_dev *dev) + dev->data->nb_rx_queues)) { + PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues" + " intr_vec", dev->data->nb_rx_queues); ++ txgbe_dev_clear_queues(dev); + return -ENOMEM; + } + } +@@ -961,7 +966,7 @@ txgbevf_set_ivar_map(struct txgbe_hw *hw, int8_t direction, wr32(hw, TXGBE_VFIVARMISC, tmp); } else { /* rx or tx cause */ - /* Workround for ICR lost */ -+ /* Workaround for ICR lost */ ++ msix_vector |= TXGBE_VFIVAR_VLD; /* Workaround for ICR lost */ idx = ((16 * (queue & 1)) + (8 * direction)); tmp = rd32(hw, TXGBE_VFIVAR(queue >> 1)); tmp &= ~(0xFF << idx); -@@ -997,7 +997,7 @@ txgbevf_configure_msix(struct rte_eth_dev *dev) +@@ -997,7 +1002,7 @@ txgbevf_configure_msix(struct rte_eth_dev *dev) /* Configure all RX queues of VF */ for (q_idx = 0; q_idx < dev->data->nb_rx_queues; q_idx++) { /* Force all queue use vector 0, @@ -78889,7 +87857,42 @@ index 84b960b8f9..f52cd8bc19 100644 */ txgbevf_set_ivar_map(hw, 0, q_idx, vector_idx); rte_intr_vec_list_index_set(intr_handle, q_idx, -@@ -1288,7 +1288,7 @@ txgbevf_dev_interrupt_get_status(struct rte_eth_dev *dev) +@@ -1197,9 +1202,13 @@ static int + txgbevf_dev_promiscuous_disable(struct rte_eth_dev *dev) + { + struct txgbe_hw *hw = TXGBE_DEV_HW(dev); ++ int mode = TXGBEVF_XCAST_MODE_NONE; + int ret; + +- switch (hw->mac.update_xcast_mode(hw, TXGBEVF_XCAST_MODE_NONE)) { ++ if (dev->data->all_multicast) ++ mode = TXGBEVF_XCAST_MODE_ALLMULTI; ++ ++ switch (hw->mac.update_xcast_mode(hw, mode)) { + case 0: + ret = 0; + break; +@@ -1220,6 +1229,9 @@ txgbevf_dev_allmulticast_enable(struct rte_eth_dev *dev) + struct txgbe_hw *hw = TXGBE_DEV_HW(dev); + int ret; + ++ if (dev->data->promiscuous) ++ return 0; ++ + switch (hw->mac.update_xcast_mode(hw, TXGBEVF_XCAST_MODE_ALLMULTI)) { + case 0: + ret = 0; +@@ -1241,6 +1253,9 @@ txgbevf_dev_allmulticast_disable(struct rte_eth_dev *dev) + struct txgbe_hw *hw = TXGBE_DEV_HW(dev); + int ret; + ++ if (dev->data->promiscuous) ++ return 0; ++ + switch (hw->mac.update_xcast_mode(hw, TXGBEVF_XCAST_MODE_MULTI)) { + case 0: + ret = 0; +@@ -1288,7 +1303,7 @@ txgbevf_dev_interrupt_get_status(struct rte_eth_dev *dev) /* only one misc vector supported - mailbox */ eicr &= TXGBE_VFICR_MASK; @@ -78898,6 +87901,20 @@ index 84b960b8f9..f52cd8bc19 100644 intr->flags |= TXGBE_FLAG_MAILBOX; /* To avoid compiler warnings set eicr to used. */ +diff --git a/dpdk/drivers/net/txgbe/txgbe_fdir.c b/dpdk/drivers/net/txgbe/txgbe_fdir.c +index e303d87176..c1c2580d8a 100644 +--- a/dpdk/drivers/net/txgbe/txgbe_fdir.c ++++ b/dpdk/drivers/net/txgbe/txgbe_fdir.c +@@ -846,6 +846,9 @@ txgbe_fdir_filter_program(struct rte_eth_dev *dev, + return -EINVAL; + } + ++ if (RTE_ETH_DEV_SRIOV(dev).active) ++ queue = RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue; ++ + node = txgbe_fdir_filter_lookup(info, &rule->input); + if (node) { + if (!update) { diff --git a/dpdk/drivers/net/txgbe/txgbe_flow.c b/dpdk/drivers/net/txgbe/txgbe_flow.c index 6d7fd18428..ac9e8605c1 100644 --- a/dpdk/drivers/net/txgbe/txgbe_flow.c @@ -79080,7 +88097,7 @@ index fa6c347d53..6fa8147f05 100644 + #endif /* _TXGBE_PTYPE_H_ */ diff --git a/dpdk/drivers/net/txgbe/txgbe_rxtx.c b/dpdk/drivers/net/txgbe/txgbe_rxtx.c -index 35b77cb271..f85ec77dd5 100644 +index 35b77cb271..6971c775ef 100644 --- a/dpdk/drivers/net/txgbe/txgbe_rxtx.c +++ b/dpdk/drivers/net/txgbe/txgbe_rxtx.c @@ -516,20 +516,21 @@ tx_desc_ol_flags_to_cmdtype(uint64_t ol_flags) @@ -79110,15 +88127,35 @@ index 35b77cb271..f85ec77dd5 100644 ptype |= RTE_PTYPE_L2_ETHER_VLAN; /* L3 level */ -@@ -571,7 +572,6 @@ tx_desc_ol_flags_to_ptid(uint64_t oflags, uint32_t ptype) - ptype |= RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4 | - RTE_PTYPE_TUNNEL_GRE; +@@ -563,30 +564,30 @@ tx_desc_ol_flags_to_ptid(uint64_t oflags, uint32_t ptype) + switch (oflags & RTE_MBUF_F_TX_TUNNEL_MASK) { + case RTE_MBUF_F_TX_TUNNEL_VXLAN: + case RTE_MBUF_F_TX_TUNNEL_VXLAN_GPE: +- ptype |= RTE_PTYPE_L2_ETHER | +- RTE_PTYPE_L3_IPV4 | +- RTE_PTYPE_TUNNEL_GRENAT; ++ ptype |= RTE_PTYPE_TUNNEL_GRENAT; + break; + case RTE_MBUF_F_TX_TUNNEL_GRE: +- ptype |= RTE_PTYPE_L2_ETHER | +- RTE_PTYPE_L3_IPV4 | +- RTE_PTYPE_TUNNEL_GRE; - ptype |= RTE_PTYPE_INNER_L2_ETHER; ++ ptype |= RTE_PTYPE_TUNNEL_GRE; break; case RTE_MBUF_F_TX_TUNNEL_GENEVE: - ptype |= RTE_PTYPE_L2_ETHER | -@@ -587,6 +587,16 @@ tx_desc_ol_flags_to_ptid(uint64_t oflags, uint32_t ptype) +- ptype |= RTE_PTYPE_L2_ETHER | +- RTE_PTYPE_L3_IPV4 | +- RTE_PTYPE_TUNNEL_GENEVE; +- ptype |= RTE_PTYPE_INNER_L2_ETHER; ++ ptype |= RTE_PTYPE_TUNNEL_GENEVE; + break; + case RTE_MBUF_F_TX_TUNNEL_IPIP: + case RTE_MBUF_F_TX_TUNNEL_IP: +- ptype |= RTE_PTYPE_L2_ETHER | +- RTE_PTYPE_L3_IPV4 | +- RTE_PTYPE_TUNNEL_IP; ++ ptype |= RTE_PTYPE_TUNNEL_IP; break; } @@ -79135,52 +88172,149 @@ index 35b77cb271..f85ec77dd5 100644 return txgbe_encode_ptype(ptype); } -@@ -694,22 +704,24 @@ txgbe_get_tun_len(struct rte_mbuf *mbuf) +@@ -657,11 +658,20 @@ txgbe_xmit_cleanup(struct txgbe_tx_queue *txq) + return 0; + } + ++#define GRE_CHECKSUM_PRESENT 0x8000 ++#define GRE_KEY_PRESENT 0x2000 ++#define GRE_SEQUENCE_PRESENT 0x1000 ++#define GRE_EXT_LEN 4 ++#define GRE_SUPPORTED_FIELDS (GRE_CHECKSUM_PRESENT | GRE_KEY_PRESENT |\ ++ GRE_SEQUENCE_PRESENT) ++ static inline uint8_t - txgbe_parse_tun_ptid(struct rte_mbuf *tx_pkt) + txgbe_get_tun_len(struct rte_mbuf *mbuf) + { + struct txgbe_genevehdr genevehdr; + const struct txgbe_genevehdr *gh; ++ const struct txgbe_grehdr *grh; ++ struct txgbe_grehdr grehdr; + uint8_t tun_len; + + switch (mbuf->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) { +@@ -674,11 +684,16 @@ txgbe_get_tun_len(struct rte_mbuf *mbuf) + + sizeof(struct txgbe_vxlanhdr); + break; + case RTE_MBUF_F_TX_TUNNEL_GRE: +- tun_len = sizeof(struct txgbe_nvgrehdr); ++ tun_len = sizeof(struct txgbe_grehdr); ++ grh = rte_pktmbuf_read(mbuf, ++ mbuf->outer_l2_len + mbuf->outer_l3_len, ++ sizeof(grehdr), &grehdr); ++ if (grh->flags & rte_cpu_to_be_16(GRE_SUPPORTED_FIELDS)) ++ tun_len += GRE_EXT_LEN; + break; + case RTE_MBUF_F_TX_TUNNEL_GENEVE: +- gh = rte_pktmbuf_read(mbuf, +- mbuf->outer_l2_len + mbuf->outer_l3_len, ++ gh = rte_pktmbuf_read(mbuf, mbuf->outer_l2_len + ++ mbuf->outer_l3_len + sizeof(struct txgbe_udphdr), + sizeof(genevehdr), &genevehdr); + tun_len = sizeof(struct txgbe_udphdr) + + sizeof(struct txgbe_genevehdr) +@@ -692,25 +707,26 @@ txgbe_get_tun_len(struct rte_mbuf *mbuf) + } + + static inline uint8_t +-txgbe_parse_tun_ptid(struct rte_mbuf *tx_pkt) ++txgbe_parse_tun_ptid(struct rte_mbuf *tx_pkt, uint8_t tun_len) { - uint64_t l2_none, l2_mac, l2_mac_vlan; -+ uint64_t l2_vxlan, l2_vxlan_mac, l2_vxlan_mac_vlan; -+ uint64_t l2_gre, l2_gre_mac, l2_gre_mac_vlan; ++ uint64_t inner_l2_len; uint8_t ptid = 0; - if ((tx_pkt->ol_flags & (RTE_MBUF_F_TX_TUNNEL_VXLAN | - RTE_MBUF_F_TX_TUNNEL_VXLAN_GPE)) == 0) - return ptid; -+ l2_vxlan = sizeof(struct txgbe_udphdr) + sizeof(struct txgbe_vxlanhdr); -+ l2_vxlan_mac = l2_vxlan + sizeof(struct rte_ether_hdr); -+ l2_vxlan_mac_vlan = l2_vxlan_mac + sizeof(struct rte_vlan_hdr); ++ inner_l2_len = tx_pkt->l2_len - tun_len; - l2_none = sizeof(struct txgbe_udphdr) + sizeof(struct txgbe_vxlanhdr); - l2_mac = l2_none + sizeof(struct rte_ether_hdr); - l2_mac_vlan = l2_mac + sizeof(struct rte_vlan_hdr); -+ l2_gre = sizeof(struct txgbe_grehdr); -+ l2_gre_mac = l2_gre + sizeof(struct rte_ether_hdr); -+ l2_gre_mac_vlan = l2_gre_mac + sizeof(struct rte_vlan_hdr); - +- - if (tx_pkt->l2_len == l2_none) -+ if (tx_pkt->l2_len == l2_vxlan || tx_pkt->l2_len == l2_gre) ++ switch (inner_l2_len) { ++ case 0: ptid = TXGBE_PTID_TUN_EIG; - else if (tx_pkt->l2_len == l2_mac) -+ else if (tx_pkt->l2_len == l2_vxlan_mac || tx_pkt->l2_len == l2_gre_mac) ++ break; ++ case sizeof(struct rte_ether_hdr): ptid = TXGBE_PTID_TUN_EIGM; - else if (tx_pkt->l2_len == l2_mac_vlan) -+ else if (tx_pkt->l2_len == l2_vxlan_mac_vlan || -+ tx_pkt->l2_len == l2_gre_mac_vlan) ++ break; ++ case sizeof(struct rte_ether_hdr) + sizeof(struct rte_vlan_hdr): ptid = TXGBE_PTID_TUN_EIGMV; ++ break; ++ default: ++ ptid = TXGBE_PTID_TUN_EI; ++ } return ptid; -@@ -776,8 +788,7 @@ txgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, + } +@@ -776,10 +792,7 @@ txgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, /* If hardware offload required */ tx_ol_req = ol_flags & TXGBE_TX_OFFLOAD_MASK; if (tx_ol_req) { - tx_offload.ptid = tx_desc_ol_flags_to_ptid(tx_ol_req, - tx_pkt->packet_type); +- if (tx_offload.ptid & TXGBE_PTID_PKT_TUN) +- tx_offload.ptid |= txgbe_parse_tun_ptid(tx_pkt); + tx_offload.ptid = tx_desc_ol_flags_to_ptid(tx_ol_req); - if (tx_offload.ptid & TXGBE_PTID_PKT_TUN) - tx_offload.ptid |= txgbe_parse_tun_ptid(tx_pkt); tx_offload.l2_len = tx_pkt->l2_len; -@@ -2795,6 +2806,8 @@ txgbe_dev_clear_queues(struct rte_eth_dev *dev) + tx_offload.l3_len = tx_pkt->l3_len; + tx_offload.l4_len = tx_pkt->l4_len; +@@ -788,6 +801,9 @@ txgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, + tx_offload.outer_l2_len = tx_pkt->outer_l2_len; + tx_offload.outer_l3_len = tx_pkt->outer_l3_len; + tx_offload.outer_tun_len = txgbe_get_tun_len(tx_pkt); ++ if (tx_offload.ptid & TXGBE_PTID_PKT_TUN) ++ tx_offload.ptid |= txgbe_parse_tun_ptid(tx_pkt, ++ tx_offload.outer_tun_len); + + #ifdef RTE_LIB_SECURITY + if (use_ipsec) { +@@ -2124,6 +2140,7 @@ txgbe_tx_queue_release(struct txgbe_tx_queue *txq) + if (txq != NULL && txq->ops != NULL) { + txq->ops->release_mbufs(txq); + txq->ops->free_swring(txq); ++ rte_memzone_free(txq->mz); + rte_free(txq); + } + } +@@ -2335,6 +2352,7 @@ txgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, + return -ENOMEM; + } + ++ txq->mz = tz; + txq->nb_tx_desc = nb_desc; + txq->tx_free_thresh = tx_free_thresh; + txq->pthresh = tx_conf->tx_thresh.pthresh; +@@ -2452,6 +2470,7 @@ txgbe_rx_queue_release(struct txgbe_rx_queue *rxq) + txgbe_rx_queue_release_mbufs(rxq); + rte_free(rxq->sw_ring); + rte_free(rxq->sw_sc_ring); ++ rte_memzone_free(rxq->mz); + rte_free(rxq); + } + } +@@ -2545,6 +2564,7 @@ txgbe_reset_rx_queue(struct txgbe_adapter *adapter, struct txgbe_rx_queue *rxq) + rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1); + rxq->rx_tail = 0; + rxq->nb_rx_hold = 0; ++ rte_pktmbuf_free(rxq->pkt_first_seg); + rxq->pkt_first_seg = NULL; + rxq->pkt_last_seg = NULL; + } +@@ -2625,6 +2645,7 @@ txgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, + return -ENOMEM; + } + ++ rxq->mz = rz; + /* + * Zero init all the descriptors in the ring. + */ +@@ -2795,6 +2816,8 @@ txgbe_dev_clear_queues(struct rte_eth_dev *dev) txq->ops->release_mbufs(txq); txq->ops->reset(txq); } @@ -79189,7 +88323,7 @@ index 35b77cb271..f85ec77dd5 100644 } for (i = 0; i < dev->data->nb_rx_queues; i++) { -@@ -2804,6 +2817,8 @@ txgbe_dev_clear_queues(struct rte_eth_dev *dev) +@@ -2804,6 +2827,8 @@ txgbe_dev_clear_queues(struct rte_eth_dev *dev) txgbe_rx_queue_release_mbufs(rxq); txgbe_reset_rx_queue(adapter, rxq); } @@ -79198,7 +88332,7 @@ index 35b77cb271..f85ec77dd5 100644 } } -@@ -4382,7 +4397,7 @@ txgbe_dev_rx_init(struct rte_eth_dev *dev) +@@ -4382,7 +4407,7 @@ txgbe_dev_rx_init(struct rte_eth_dev *dev) */ buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) - RTE_PKTMBUF_HEADROOM); @@ -79207,7 +88341,7 @@ index 35b77cb271..f85ec77dd5 100644 srrctl |= TXGBE_RXCFG_PKTLEN(buf_size); wr32(hw, TXGBE_RXCFG(rxq->reg_idx), srrctl); -@@ -4994,6 +5009,8 @@ txgbevf_dev_rxtx_start(struct rte_eth_dev *dev) +@@ -4994,6 +5019,8 @@ txgbevf_dev_rxtx_start(struct rte_eth_dev *dev) } while (--poll_ms && !(txdctl & TXGBE_TXCFG_ENA)); if (!poll_ms) PMD_INIT_LOG(ERR, "Could not enable Tx Queue %d", i); @@ -79216,7 +88350,7 @@ index 35b77cb271..f85ec77dd5 100644 } for (i = 0; i < dev->data->nb_rx_queues; i++) { rxq = dev->data->rx_queues[i]; -@@ -5008,6 +5025,8 @@ txgbevf_dev_rxtx_start(struct rte_eth_dev *dev) +@@ -5008,6 +5035,8 @@ txgbevf_dev_rxtx_start(struct rte_eth_dev *dev) } while (--poll_ms && !(rxdctl & TXGBE_RXCFG_ENA)); if (!poll_ms) PMD_INIT_LOG(ERR, "Could not enable Rx Queue %d", i); @@ -79225,6 +88359,48 @@ index 35b77cb271..f85ec77dd5 100644 rte_wmb(); wr32(hw, TXGBE_RXWP(i), rxq->nb_rx_desc - 1); } +@@ -5055,6 +5084,7 @@ txgbe_config_rss_filter(struct rte_eth_dev *dev, + uint32_t reta; + uint16_t i; + uint16_t j; ++ uint16_t queue; + struct rte_eth_rss_conf rss_conf = { + .rss_key = conf->conf.key_len ? + (void *)(uintptr_t)conf->conf.key : NULL, +@@ -5087,7 +5117,12 @@ txgbe_config_rss_filter(struct rte_eth_dev *dev, + for (i = 0, j = 0; i < RTE_ETH_RSS_RETA_SIZE_128; i++, j++) { + if (j == conf->conf.queue_num) + j = 0; +- reta = (reta >> 8) | LS32(conf->conf.queue[j], 24, 0xFF); ++ if (RTE_ETH_DEV_SRIOV(dev).active) ++ queue = RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + ++ conf->conf.queue[j]; ++ else ++ queue = conf->conf.queue[j]; ++ reta = (reta >> 8) | LS32(queue, 24, 0xFF); + if ((i & 3) == 3) + wr32at(hw, TXGBE_REG_RSSTBL, i >> 2, reta); + } +diff --git a/dpdk/drivers/net/txgbe/txgbe_rxtx.h b/dpdk/drivers/net/txgbe/txgbe_rxtx.h +index 27d4c842c0..c579e1a9f2 100644 +--- a/dpdk/drivers/net/txgbe/txgbe_rxtx.h ++++ b/dpdk/drivers/net/txgbe/txgbe_rxtx.h +@@ -314,6 +314,7 @@ struct txgbe_rx_queue { + struct rte_mbuf fake_mbuf; + /** hold packets to return to application */ + struct rte_mbuf *rx_stage[RTE_PMD_TXGBE_RX_MAX_BURST * 2]; ++ const struct rte_memzone *mz; + }; + + /** +@@ -402,6 +403,7 @@ struct txgbe_tx_queue { + uint8_t using_ipsec; + /**< indicates that IPsec TX feature is in use */ + #endif ++ const struct rte_memzone *mz; + }; + + struct txgbe_txq_ops { diff --git a/dpdk/drivers/net/vhost/rte_eth_vhost.c b/dpdk/drivers/net/vhost/rte_eth_vhost.c index 070f0e6dfd..1306cc2935 100644 --- a/dpdk/drivers/net/vhost/rte_eth_vhost.c @@ -79842,7 +89018,7 @@ index 01a333ada2..d78b8278c6 100644 endif endif diff --git a/dpdk/drivers/net/virtio/virtio_ethdev.c b/dpdk/drivers/net/virtio/virtio_ethdev.c -index c2588369b2..bcad27817e 100644 +index c2588369b2..ea7e712590 100644 --- a/dpdk/drivers/net/virtio/virtio_ethdev.c +++ b/dpdk/drivers/net/virtio/virtio_ethdev.c @@ -638,10 +638,13 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t queue_idx) @@ -79861,7 +89037,16 @@ index c2588369b2..bcad27817e 100644 if (queue_type == VTNET_TQ) { struct virtio_tx_region *txr; -@@ -1796,22 +1799,25 @@ static int +@@ -1319,6 +1322,8 @@ virtio_mac_addr_add(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr, + struct virtio_net_ctrl_mac *tbl + = rte_is_multicast_ether_addr(addr) ? mc : uc; + ++ if (rte_is_zero_ether_addr(addr)) ++ break; + memcpy(&tbl->macs[tbl->entries++], addr, RTE_ETHER_ADDR_LEN); + } + +@@ -1796,22 +1801,25 @@ static int virtio_configure_intr(struct rte_eth_dev *dev) { struct virtio_hw *hw = dev->data->dev_private; @@ -79892,7 +89077,7 @@ index c2588369b2..bcad27817e 100644 } if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC) { -@@ -1832,12 +1838,13 @@ virtio_configure_intr(struct rte_eth_dev *dev) +@@ -1832,12 +1840,13 @@ virtio_configure_intr(struct rte_eth_dev *dev) */ if (virtio_intr_enable(dev) < 0) { PMD_DRV_LOG(ERR, "interrupt enable failed"); @@ -79909,7 +89094,7 @@ index c2588369b2..bcad27817e 100644 } return 0; -@@ -2028,7 +2035,8 @@ virtio_dev_rss_hash_update(struct rte_eth_dev *dev, +@@ -2028,7 +2037,8 @@ virtio_dev_rss_hash_update(struct rte_eth_dev *dev, return 0; restore_key: @@ -79919,7 +89104,7 @@ index c2588369b2..bcad27817e 100644 restore_types: hw->rss_hash_types = old_hash_types; -@@ -2159,7 +2167,7 @@ virtio_dev_rss_init(struct rte_eth_dev *eth_dev) +@@ -2159,7 +2169,7 @@ virtio_dev_rss_init(struct rte_eth_dev *eth_dev) eth_dev->device->numa_node); if (!hw->rss_key) { PMD_INIT_LOG(ERR, "Failed to allocate RSS key"); @@ -79928,7 +89113,7 @@ index c2588369b2..bcad27817e 100644 } } -@@ -2181,7 +2189,7 @@ virtio_dev_rss_init(struct rte_eth_dev *eth_dev) +@@ -2181,7 +2191,7 @@ virtio_dev_rss_init(struct rte_eth_dev *eth_dev) eth_dev->device->numa_node); if (!hw->rss_reta) { PMD_INIT_LOG(ERR, "Failed to allocate RSS reta"); @@ -79937,7 +89122,7 @@ index c2588369b2..bcad27817e 100644 } hw->rss_rx_queues = 0; -@@ -2221,7 +2229,7 @@ virtio_init_device(struct rte_eth_dev *eth_dev, uint64_t req_features) +@@ -2221,7 +2231,7 @@ virtio_init_device(struct rte_eth_dev *eth_dev, uint64_t req_features) /* Tell the host we've known how to drive the device. */ virtio_set_status(hw, VIRTIO_CONFIG_STATUS_DRIVER); if (virtio_ethdev_negotiate_features(hw, req_features) < 0) @@ -79946,6 +89131,15 @@ index c2588369b2..bcad27817e 100644 hw->weak_barriers = !virtio_with_feature(hw, VIRTIO_F_ORDER_PLATFORM); +@@ -2231,8 +2241,6 @@ virtio_init_device(struct rte_eth_dev *eth_dev, uint64_t req_features) + else + eth_dev->data->dev_flags &= ~RTE_ETH_DEV_INTR_LSC; + +- eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS; +- + /* Setting up rx_header size for the device */ + if (virtio_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF) || + virtio_with_feature(hw, VIRTIO_F_VERSION_1) || @@ -2303,7 +2311,7 @@ virtio_init_device(struct rte_eth_dev *eth_dev, uint64_t req_features) if (config->mtu < RTE_ETHER_MIN_MTU) { PMD_INIT_LOG(ERR, "invalid max MTU value (%u)", @@ -80418,10 +89612,20 @@ index 7534974ef4..e7f0ed6068 100644 if (unlikely(nb_used <= RTE_VIRTIO_DESC_PER_LOOP)) { diff --git a/dpdk/drivers/net/virtio/virtio_user/vhost_user.c b/dpdk/drivers/net/virtio/virtio_user/vhost_user.c -index cc830a660f..77820bf967 100644 +index cc830a660f..df383bd8e6 100644 --- a/dpdk/drivers/net/virtio/virtio_user/vhost_user.c +++ b/dpdk/drivers/net/virtio/virtio_user/vhost_user.c -@@ -840,8 +840,10 @@ vhost_user_setup(struct virtio_user_dev *dev) +@@ -129,7 +129,8 @@ vhost_user_write(int fd, struct vhost_user_msg *msg, int *fds, int fd_num) + cmsg->cmsg_len = CMSG_LEN(fd_size); + cmsg->cmsg_level = SOL_SOCKET; + cmsg->cmsg_type = SCM_RIGHTS; +- memcpy(CMSG_DATA(cmsg), fds, fd_size); ++ if (fd_size > 0) ++ memcpy(CMSG_DATA(cmsg), fds, fd_size); + + do { + r = sendmsg(fd, &msgh, 0); +@@ -840,8 +841,10 @@ vhost_user_setup(struct virtio_user_dev *dev) } flag = fcntl(fd, F_GETFD); @@ -80434,7 +89638,7 @@ index cc830a660f..77820bf967 100644 memset(&un, 0, sizeof(un)); un.sun_family = AF_UNIX; -@@ -940,15 +942,8 @@ vhost_user_update_link_state(struct virtio_user_dev *dev) +@@ -940,15 +943,8 @@ vhost_user_update_link_state(struct virtio_user_dev *dev) if (data->vhostfd >= 0) { int r; @@ -80451,7 +89655,7 @@ index cc830a660f..77820bf967 100644 if (r == 0 || (r < 0 && errno != EAGAIN)) { dev->net_status &= (~VIRTIO_NET_S_LINK_UP); PMD_DRV_LOG(ERR, "virtio-user port %u is down", dev->hw.port_id); -@@ -963,12 +958,6 @@ vhost_user_update_link_state(struct virtio_user_dev *dev) +@@ -963,12 +959,6 @@ vhost_user_update_link_state(struct virtio_user_dev *dev) } else { dev->net_status |= VIRTIO_NET_S_LINK_UP; } @@ -80612,10 +89816,26 @@ index 855f57a956..39288c13f8 100644 idx++; if (idx >= vq->vq_nentries) { diff --git a/dpdk/drivers/net/vmxnet3/vmxnet3_ethdev.c b/dpdk/drivers/net/vmxnet3/vmxnet3_ethdev.c -index d1ef1cad08..a48a355d39 100644 +index d1ef1cad08..32d1036c4f 100644 --- a/dpdk/drivers/net/vmxnet3/vmxnet3_ethdev.c +++ b/dpdk/drivers/net/vmxnet3/vmxnet3_ethdev.c -@@ -822,6 +822,11 @@ vmxnet3_setup_driver_shared(struct rte_eth_dev *dev) +@@ -206,6 +206,7 @@ vmxnet3_disable_intr(struct vmxnet3_hw *hw, unsigned int intr_idx) + VMXNET3_WRITE_BAR0_REG(hw, VMXNET3_REG_IMR + intr_idx * 8, 1); + } + ++#ifndef RTE_EXEC_ENV_FREEBSD + /* + * Enable all intrs used by the device + */ +@@ -227,6 +228,7 @@ vmxnet3_enable_all_intrs(struct vmxnet3_hw *hw) + vmxnet3_enable_intr(hw, i); + } + } ++#endif + + /* + * Disable all intrs used by the device +@@ -822,6 +824,11 @@ vmxnet3_setup_driver_shared(struct rte_eth_dev *dev) rqd->conf.rxRingSize[1] = rxq->cmd_ring[1].size; rqd->conf.compRingSize = rxq->comp_ring.size; @@ -80627,7 +89847,7 @@ index d1ef1cad08..a48a355d39 100644 if (hw->intr.lsc_only) rqd->conf.intrIdx = 1; else -@@ -885,6 +890,7 @@ vmxnet3_dev_start(struct rte_eth_dev *dev) +@@ -885,6 +892,7 @@ vmxnet3_dev_start(struct rte_eth_dev *dev) { int ret; struct vmxnet3_hw *hw = dev->data->dev_private; @@ -80635,7 +89855,23 @@ index d1ef1cad08..a48a355d39 100644 PMD_INIT_FUNC_TRACE(); -@@ -980,6 +986,11 @@ vmxnet3_dev_start(struct rte_eth_dev *dev) +@@ -957,6 +965,7 @@ vmxnet3_dev_start(struct rte_eth_dev *dev) + /* Setting proper Rx Mode and issue Rx Mode Update command */ + vmxnet3_dev_set_rxmode(hw, VMXNET3_RXM_UCAST | VMXNET3_RXM_BCAST, 1); + ++#ifndef RTE_EXEC_ENV_FREEBSD + /* Setup interrupt callback */ + rte_intr_callback_register(dev->intr_handle, + vmxnet3_interrupt_handler, dev); +@@ -968,6 +977,7 @@ vmxnet3_dev_start(struct rte_eth_dev *dev) + + /* enable all intrs */ + vmxnet3_enable_all_intrs(hw); ++#endif + + vmxnet3_process_events(dev); + +@@ -980,6 +990,11 @@ vmxnet3_dev_start(struct rte_eth_dev *dev) */ __vmxnet3_dev_link_update(dev, 0); @@ -80647,7 +89883,7 @@ index d1ef1cad08..a48a355d39 100644 return VMXNET3_SUCCESS; } -@@ -992,6 +1003,7 @@ vmxnet3_dev_stop(struct rte_eth_dev *dev) +@@ -992,6 +1007,7 @@ vmxnet3_dev_stop(struct rte_eth_dev *dev) struct rte_eth_link link; struct vmxnet3_hw *hw = dev->data->dev_private; struct rte_intr_handle *intr_handle = dev->intr_handle; @@ -80655,7 +89891,7 @@ index d1ef1cad08..a48a355d39 100644 int ret; PMD_INIT_FUNC_TRACE(); -@@ -1047,6 +1059,11 @@ vmxnet3_dev_stop(struct rte_eth_dev *dev) +@@ -1047,6 +1063,11 @@ vmxnet3_dev_stop(struct rte_eth_dev *dev) hw->adapter_stopped = 1; dev->data->dev_started = 0; @@ -80667,6 +89903,33 @@ index d1ef1cad08..a48a355d39 100644 return 0; } +@@ -1668,11 +1689,13 @@ vmxnet3_interrupt_handler(void *param) + static int + vmxnet3_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) + { ++#ifndef RTE_EXEC_ENV_FREEBSD + struct vmxnet3_hw *hw = dev->data->dev_private; + + vmxnet3_enable_intr(hw, + rte_intr_vec_list_index_get(dev->intr_handle, + queue_id)); ++#endif + + return 0; + } +diff --git a/dpdk/drivers/net/vmxnet3/vmxnet3_logs.h b/dpdk/drivers/net/vmxnet3/vmxnet3_logs.h +index 74154e3a1a..ae8542811a 100644 +--- a/dpdk/drivers/net/vmxnet3/vmxnet3_logs.h ++++ b/dpdk/drivers/net/vmxnet3/vmxnet3_logs.h +@@ -7,7 +7,7 @@ + + extern int vmxnet3_logtype_init; + #define PMD_INIT_LOG(level, fmt, args...) \ +- rte_log(RTE_LOG_ ## level, vmxnet3_logtype_driver, \ ++ rte_log(RTE_LOG_ ## level, vmxnet3_logtype_init, \ + "%s(): " fmt "\n", __func__, ## args) + #define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>") + diff --git a/dpdk/drivers/net/vmxnet3/vmxnet3_rxtx.c b/dpdk/drivers/net/vmxnet3/vmxnet3_rxtx.c index d745064bc4..a01f2c3cdd 100644 --- a/dpdk/drivers/net/vmxnet3/vmxnet3_rxtx.c @@ -82619,6 +91882,19 @@ index fccdd8c687..53f598facc 100644 return -1; } +diff --git a/dpdk/drivers/vdpa/sfc/sfc_vdpa_hw.c b/dpdk/drivers/vdpa/sfc/sfc_vdpa_hw.c +index fd1fee7e32..ad02723bb2 100644 +--- a/dpdk/drivers/vdpa/sfc/sfc_vdpa_hw.c ++++ b/dpdk/drivers/vdpa/sfc/sfc_vdpa_hw.c +@@ -13,8 +13,6 @@ + #include "sfc_vdpa.h" + #include "sfc_vdpa_ops.h" + +-extern uint32_t sfc_logtype_driver; +- + #ifndef PAGE_SIZE + #define PAGE_SIZE (sysconf(_SC_PAGESIZE)) + #endif diff --git a/dpdk/drivers/vdpa/sfc/sfc_vdpa_ops.c b/dpdk/drivers/vdpa/sfc/sfc_vdpa_ops.c index c4ce4474ef..b84699d234 100644 --- a/dpdk/drivers/vdpa/sfc/sfc_vdpa_ops.c @@ -83612,6 +92888,19 @@ index 7419e85db2..5fe91b62e4 100644 } /* +diff --git a/dpdk/examples/ipsec-secgw/parser.c b/dpdk/examples/ipsec-secgw/parser.c +index 98dff93b87..f49f8ede6d 100644 +--- a/dpdk/examples/ipsec-secgw/parser.c ++++ b/dpdk/examples/ipsec-secgw/parser.c +@@ -387,7 +387,7 @@ cfg_parse_neigh(void *parsed_result, __rte_unused struct cmdline *cl, + rc = parse_mac(res->mac, &mac); + APP_CHECK(rc == 0, st, "invalid ether addr:%s", res->mac); + rc = add_dst_ethaddr(res->port, &mac); +- APP_CHECK(rc == 0, st, "invalid port numer:%hu", res->port); ++ APP_CHECK(rc == 0, st, "invalid port number:%hu", res->port); + if (st->status < 0) + return; + } diff --git a/dpdk/examples/ipsec-secgw/sa.c b/dpdk/examples/ipsec-secgw/sa.c index 30bc693e06..49d16f055b 100644 --- a/dpdk/examples/ipsec-secgw/sa.c @@ -84408,7 +93697,7 @@ index bb565ed546..d5a717e18c 100644 * dp1 should contain: , dp2: . * We doing 4 comparisons at once and the result is 4 bit mask. diff --git a/dpdk/examples/l3fwd/main.c b/dpdk/examples/l3fwd/main.c -index eb68ffc5aa..59436bb589 100644 +index eb68ffc5aa..32cc45647e 100644 --- a/dpdk/examples/l3fwd/main.c +++ b/dpdk/examples/l3fwd/main.c @@ -53,9 +53,8 @@ @@ -84551,6 +93840,26 @@ index eb68ffc5aa..59436bb589 100644 config_port_max_pkt_len(struct rte_eth_conf *conf, struct rte_eth_dev_info *dev_info) { +@@ -1408,7 +1459,6 @@ main(int argc, char **argv) + l3fwd_lkp.main_loop = evt_rsrc->ops.fib_event_loop; + else + l3fwd_lkp.main_loop = evt_rsrc->ops.lpm_event_loop; +- l3fwd_event_service_setup(); + } else + l3fwd_poll_resource_setup(); + +@@ -1439,6 +1489,11 @@ main(int argc, char **argv) + } + } + ++#ifdef RTE_LIB_EVENTDEV ++ if (evt_rsrc->enabled) ++ l3fwd_event_service_setup(); ++#endif ++ + printf("\n"); + + for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) { diff --git a/dpdk/examples/link_status_interrupt/main.c b/dpdk/examples/link_status_interrupt/main.c index 551f0524da..9699e14ce6 100644 --- a/dpdk/examples/link_status_interrupt/main.c @@ -84648,10 +93957,63 @@ index f110fc129f..41bb536141 100644 return; } diff --git a/dpdk/examples/packet_ordering/main.c b/dpdk/examples/packet_ordering/main.c -index b01ac60fd1..99e67ef67b 100644 +index b01ac60fd1..f16afe8986 100644 --- a/dpdk/examples/packet_ordering/main.c +++ b/dpdk/examples/packet_ordering/main.c -@@ -686,7 +686,7 @@ main(int argc, char **argv) +@@ -4,6 +4,7 @@ + + #include + #include ++#include + + #include + #include +@@ -426,8 +427,8 @@ int_handler(int sig_num) + * The mbufs are then passed to the worker threads via the rx_to_workers + * ring. + */ +-static int +-rx_thread(struct rte_ring *ring_out) ++static __rte_always_inline int ++rx_thread(struct rte_ring *ring_out, bool disable_reorder_flag) + { + uint32_t seqn = 0; + uint16_t i, ret = 0; +@@ -453,9 +454,11 @@ rx_thread(struct rte_ring *ring_out) + } + app_stats.rx.rx_pkts += nb_rx_pkts; + +- /* mark sequence number */ +- for (i = 0; i < nb_rx_pkts; ) +- *rte_reorder_seqn(pkts[i++]) = seqn++; ++ /* mark sequence number if reorder is enabled */ ++ if (!disable_reorder_flag) { ++ for (i = 0; i < nb_rx_pkts;) ++ *rte_reorder_seqn(pkts[i++]) = seqn++; ++ } + + /* enqueue to rx_to_workers ring */ + ret = rte_ring_enqueue_burst(ring_out, +@@ -472,6 +475,18 @@ rx_thread(struct rte_ring *ring_out) + return 0; + } + ++static __rte_noinline int ++rx_thread_reorder(struct rte_ring *ring_out) ++{ ++ return rx_thread(ring_out, false); ++} ++ ++static __rte_noinline int ++rx_thread_reorder_disabled(struct rte_ring *ring_out) ++{ ++ return rx_thread(ring_out, true); ++} ++ + /** + * This thread takes bursts of packets from the rx_to_workers ring and + * Changes the input port value to output port value. And feds it to +@@ -686,7 +701,7 @@ main(int argc, char **argv) if (ret < 0) rte_exit(EXIT_FAILURE, "Invalid packet_ordering arguments\n"); @@ -84660,6 +94022,20 @@ index b01ac60fd1..99e67ef67b 100644 if (rte_lcore_count() < 3) rte_exit(EXIT_FAILURE, "Error, This application needs at " "least 3 logical cores to run:\n" +@@ -771,8 +786,11 @@ main(int argc, char **argv) + (void *)&send_args, last_lcore_id); + } + +- /* Start rx_thread() on the main core */ +- rx_thread(rx_to_workers); ++ /* Start rx_thread_xxx() on the main core */ ++ if (disable_reorder) ++ rx_thread_reorder_disabled(rx_to_workers); ++ else ++ rx_thread_reorder(rx_to_workers); + + RTE_LCORE_FOREACH_WORKER(lcore_id) { + if (rte_eal_wait_lcore(lcore_id) < 0) diff --git a/dpdk/examples/performance-thread/common/lthread.c b/dpdk/examples/performance-thread/common/lthread.c index 009374a8c3..b02e0fc13a 100644 --- a/dpdk/examples/performance-thread/common/lthread.c @@ -84842,7 +94218,7 @@ index 74a014ad06..59998fef03 100644 ; The "regrd" and "regwr" CLI commands can be used to read and write the current value of ; any register array location. diff --git a/dpdk/examples/qos_sched/args.c b/dpdk/examples/qos_sched/args.c -index 10ca7bea61..ff51d0215a 100644 +index 10ca7bea61..7c92f14a02 100644 --- a/dpdk/examples/qos_sched/args.c +++ b/dpdk/examples/qos_sched/args.c @@ -11,6 +11,7 @@ @@ -84853,7 +94229,19 @@ index 10ca7bea61..ff51d0215a 100644 #include #include #include -@@ -427,13 +428,13 @@ app_parse_args(int argc, char **argv) +@@ -141,8 +142,10 @@ app_parse_opt_vals(const char *conf_str, char separator, uint32_t n_vals, uint32 + + n_tokens = rte_strsplit(string, strnlen(string, 32), tokens, n_vals, separator); + +- if (n_tokens > MAX_OPT_VALUES) ++ if (n_tokens > MAX_OPT_VALUES) { ++ free(string); + return -1; ++ } + + for (i = 0; i < n_tokens; i++) + opt_vals[i] = (uint32_t)atol(tokens[i]); +@@ -427,13 +430,13 @@ app_parse_args(int argc, char **argv) /* check main core index validity */ for (i = 0; i <= app_main_core; i++) { @@ -85505,11 +94893,43 @@ index bf5aa20a55..1f612711be 100644 '-include rte_config.h'] # to avoid warnings due to race conditions with creating the dev_if.h, etc. +diff --git a/dpdk/kernel/freebsd/nic_uio/nic_uio.c b/dpdk/kernel/freebsd/nic_uio/nic_uio.c +index 7a81694c92..0043892870 100644 +--- a/dpdk/kernel/freebsd/nic_uio/nic_uio.c ++++ b/dpdk/kernel/freebsd/nic_uio/nic_uio.c +@@ -78,10 +78,14 @@ struct pci_bdf { + uint32_t function; + }; + +-static devclass_t nic_uio_devclass; +- + DEFINE_CLASS_0(nic_uio, nic_uio_driver, nic_uio_methods, sizeof(struct nic_uio_softc)); ++ ++#if __FreeBSD_version < 1400000 ++static devclass_t nic_uio_devclass; + DRIVER_MODULE(nic_uio, pci, nic_uio_driver, nic_uio_devclass, nic_uio_modevent, 0); ++#else ++DRIVER_MODULE(nic_uio, pci, nic_uio_driver, nic_uio_modevent, 0); ++#endif + + static int + nic_uio_mmap(struct cdev *cdev, vm_ooffset_t offset, vm_paddr_t *paddr, diff --git a/dpdk/kernel/linux/kni/compat.h b/dpdk/kernel/linux/kni/compat.h -index 664785674f..8beb670465 100644 +index 664785674f..5d68eb4c8a 100644 --- a/dpdk/kernel/linux/kni/compat.h +++ b/dpdk/kernel/linux/kni/compat.h -@@ -141,3 +141,17 @@ +@@ -82,6 +82,10 @@ + #define HAVE_SK_ALLOC_KERN_PARAM + #endif + ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 8, 0) ++#define strlcpy strscpy ++#endif ++ + #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 7, 0) || \ + (defined(RHEL_RELEASE_CODE) && \ + RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 4)) || \ +@@ -141,3 +145,17 @@ #if KERNEL_VERSION(5, 9, 0) > LINUX_VERSION_CODE #define HAVE_TSK_IN_GUP #endif @@ -86079,11 +95499,533 @@ index 26d165ad5c..b4d8e87c6d 100644 -#endif /* _BPF_H_ */ +#endif /* BPF_IMPL_H */ +diff --git a/dpdk/lib/bpf/bpf_validate.c b/dpdk/lib/bpf/bpf_validate.c +index 09331258eb..dfbef6ca42 100644 +--- a/dpdk/lib/bpf/bpf_validate.c ++++ b/dpdk/lib/bpf/bpf_validate.c +@@ -31,10 +31,13 @@ struct bpf_reg_val { + }; + + struct bpf_eval_state { ++ SLIST_ENTRY(bpf_eval_state) next; /* for @safe list traversal */ + struct bpf_reg_val rv[EBPF_REG_NUM]; + struct bpf_reg_val sv[MAX_BPF_STACK_SIZE / sizeof(uint64_t)]; + }; + ++SLIST_HEAD(bpf_evst_head, bpf_eval_state); ++ + /* possible instruction node colour */ + enum { + WHITE, +@@ -54,6 +57,9 @@ enum { + + #define MAX_EDGES 2 + ++/* max number of 'safe' evaluated states to track per node */ ++#define NODE_EVST_MAX 32 ++ + struct inst_node { + uint8_t colour; + uint8_t nb_edge:4; +@@ -61,7 +67,18 @@ struct inst_node { + uint8_t edge_type[MAX_EDGES]; + uint32_t edge_dest[MAX_EDGES]; + uint32_t prev_node; +- struct bpf_eval_state *evst; ++ struct { ++ struct bpf_eval_state *cur; /* save/restore for jcc targets */ ++ struct bpf_eval_state *start; ++ struct bpf_evst_head safe; /* safe states for track/prune */ ++ uint32_t nb_safe; ++ } evst; ++}; ++ ++struct evst_pool { ++ uint32_t num; ++ uint32_t cur; ++ struct bpf_eval_state *ent; + }; + + struct bpf_verifier { +@@ -75,11 +92,8 @@ struct bpf_verifier { + uint32_t edge_type[MAX_EDGE_TYPE]; + struct bpf_eval_state *evst; + struct inst_node *evin; +- struct { +- uint32_t num; +- uint32_t cur; +- struct bpf_eval_state *ent; +- } evst_pool; ++ struct evst_pool evst_sr_pool; /* for evst save/restore */ ++ struct evst_pool evst_tp_pool; /* for evst track/prune */ + }; + + struct bpf_ins_check { +@@ -638,14 +652,14 @@ eval_alu(struct bpf_verifier *bvf, const struct ebpf_insn *ins) + { + uint64_t msk; + uint32_t op; +- size_t opsz; ++ size_t opsz, sz; + const char *err; + struct bpf_eval_state *st; + struct bpf_reg_val *rd, rs; + +- opsz = (BPF_CLASS(ins->code) == BPF_ALU) ? ++ sz = (BPF_CLASS(ins->code) == BPF_ALU) ? + sizeof(uint32_t) : sizeof(uint64_t); +- opsz = opsz * CHAR_BIT; ++ opsz = sz * CHAR_BIT; + msk = RTE_LEN2MASK(opsz, uint64_t); + + st = bvf->evst; +@@ -654,8 +668,10 @@ eval_alu(struct bpf_verifier *bvf, const struct ebpf_insn *ins) + if (BPF_SRC(ins->code) == BPF_X) { + rs = st->rv[ins->src_reg]; + eval_apply_mask(&rs, msk); +- } else ++ } else { ++ rs = (struct bpf_reg_val){.v = {.size = sz,},}; + eval_fill_imm(&rs, msk, ins->imm); ++ } + + eval_apply_mask(rd, msk); + +@@ -1085,7 +1101,7 @@ eval_jcc(struct bpf_verifier *bvf, const struct ebpf_insn *ins) + struct bpf_reg_val rvf, rvt; + + tst = bvf->evst; +- fst = bvf->evin->evst; ++ fst = bvf->evin->evst.cur; + + frd = fst->rv + ins->dst_reg; + trd = tst->rv + ins->dst_reg; +@@ -1814,8 +1830,8 @@ add_edge(struct bpf_verifier *bvf, struct inst_node *node, uint32_t nidx) + uint32_t ne; + + if (nidx > bvf->prm->nb_ins) { +- RTE_BPF_LOG(ERR, "%s: program boundary violation at pc: %u, " +- "next pc: %u\n", ++ RTE_BPF_LOG(ERR, ++ "%s: program boundary violation at pc: %u, next pc: %u\n", + __func__, get_node_idx(bvf, node), nidx); + return -EINVAL; + } +@@ -2091,60 +2107,114 @@ validate(struct bpf_verifier *bvf) + * helper functions get/free eval states. + */ + static struct bpf_eval_state * +-pull_eval_state(struct bpf_verifier *bvf) ++pull_eval_state(struct evst_pool *pool) + { + uint32_t n; + +- n = bvf->evst_pool.cur; +- if (n == bvf->evst_pool.num) ++ n = pool->cur; ++ if (n == pool->num) + return NULL; + +- bvf->evst_pool.cur = n + 1; +- return bvf->evst_pool.ent + n; ++ pool->cur = n + 1; ++ return pool->ent + n; + } + + static void +-push_eval_state(struct bpf_verifier *bvf) ++push_eval_state(struct evst_pool *pool) + { +- bvf->evst_pool.cur--; ++ RTE_ASSERT(pool->cur != 0); ++ pool->cur--; + } + + static void + evst_pool_fini(struct bpf_verifier *bvf) + { + bvf->evst = NULL; +- free(bvf->evst_pool.ent); +- memset(&bvf->evst_pool, 0, sizeof(bvf->evst_pool)); ++ free(bvf->evst_sr_pool.ent); ++ memset(&bvf->evst_sr_pool, 0, sizeof(bvf->evst_sr_pool)); ++ memset(&bvf->evst_tp_pool, 0, sizeof(bvf->evst_tp_pool)); + } + + static int + evst_pool_init(struct bpf_verifier *bvf) + { +- uint32_t n; ++ uint32_t k, n; + +- n = bvf->nb_jcc_nodes + 1; ++ /* ++ * We need nb_jcc_nodes + 1 for save_cur/restore_cur ++ * remaining ones will be used for state tracking/pruning. ++ */ ++ k = bvf->nb_jcc_nodes + 1; ++ n = k * 3; + +- bvf->evst_pool.ent = calloc(n, sizeof(bvf->evst_pool.ent[0])); +- if (bvf->evst_pool.ent == NULL) ++ bvf->evst_sr_pool.ent = calloc(n, sizeof(bvf->evst_sr_pool.ent[0])); ++ if (bvf->evst_sr_pool.ent == NULL) + return -ENOMEM; + +- bvf->evst_pool.num = n; +- bvf->evst_pool.cur = 0; ++ bvf->evst_sr_pool.num = k; ++ bvf->evst_sr_pool.cur = 0; + +- bvf->evst = pull_eval_state(bvf); ++ bvf->evst_tp_pool.ent = bvf->evst_sr_pool.ent + k; ++ bvf->evst_tp_pool.num = n - k; ++ bvf->evst_tp_pool.cur = 0; ++ ++ bvf->evst = pull_eval_state(&bvf->evst_sr_pool); + return 0; + } + ++/* ++ * try to allocate and initialise new eval state for given node. ++ * later if no errors will be encountered, this state will be accepted as ++ * one of the possible 'safe' states for that node. ++ */ ++static void ++save_start_eval_state(struct bpf_verifier *bvf, struct inst_node *node) ++{ ++ RTE_ASSERT(node->evst.start == NULL); ++ ++ /* limit number of states for one node with some reasonable value */ ++ if (node->evst.nb_safe >= NODE_EVST_MAX) ++ return; ++ ++ /* try to get new eval_state */ ++ node->evst.start = pull_eval_state(&bvf->evst_tp_pool); ++ ++ /* make a copy of current state */ ++ if (node->evst.start != NULL) { ++ memcpy(node->evst.start, bvf->evst, sizeof(*node->evst.start)); ++ SLIST_NEXT(node->evst.start, next) = NULL; ++ } ++} ++ ++/* ++ * add @start state to the list of @safe states. ++ */ ++static void ++save_safe_eval_state(struct bpf_verifier *bvf, struct inst_node *node) ++{ ++ if (node->evst.start == NULL) ++ return; ++ ++ SLIST_INSERT_HEAD(&node->evst.safe, node->evst.start, next); ++ node->evst.nb_safe++; ++ ++ RTE_BPF_LOG(DEBUG, "%s(bvf=%p,node=%u,state=%p): nb_safe=%u;\n", ++ __func__, bvf, get_node_idx(bvf, node), node->evst.start, ++ node->evst.nb_safe); ++ ++ node->evst.start = NULL; ++} ++ + /* + * Save current eval state. + */ + static int +-save_eval_state(struct bpf_verifier *bvf, struct inst_node *node) ++save_cur_eval_state(struct bpf_verifier *bvf, struct inst_node *node) + { + struct bpf_eval_state *st; + + /* get new eval_state for this node */ +- st = pull_eval_state(bvf); ++ st = pull_eval_state(&bvf->evst_sr_pool); + if (st == NULL) { + RTE_BPF_LOG(ERR, + "%s: internal error (out of space) at pc: %u\n", +@@ -2156,11 +2226,13 @@ save_eval_state(struct bpf_verifier *bvf, struct inst_node *node) + memcpy(st, bvf->evst, sizeof(*st)); + + /* swap current state with new one */ +- node->evst = bvf->evst; ++ RTE_ASSERT(node->evst.cur == NULL); ++ node->evst.cur = bvf->evst; + bvf->evst = st; + + RTE_BPF_LOG(DEBUG, "%s(bvf=%p,node=%u) old/new states: %p/%p;\n", +- __func__, bvf, get_node_idx(bvf, node), node->evst, bvf->evst); ++ __func__, bvf, get_node_idx(bvf, node), node->evst.cur, ++ bvf->evst); + + return 0; + } +@@ -2169,14 +2241,15 @@ save_eval_state(struct bpf_verifier *bvf, struct inst_node *node) + * Restore previous eval state and mark current eval state as free. + */ + static void +-restore_eval_state(struct bpf_verifier *bvf, struct inst_node *node) ++restore_cur_eval_state(struct bpf_verifier *bvf, struct inst_node *node) + { + RTE_BPF_LOG(DEBUG, "%s(bvf=%p,node=%u) old/new states: %p/%p;\n", +- __func__, bvf, get_node_idx(bvf, node), bvf->evst, node->evst); ++ __func__, bvf, get_node_idx(bvf, node), bvf->evst, ++ node->evst.cur); + +- bvf->evst = node->evst; +- node->evst = NULL; +- push_eval_state(bvf); ++ bvf->evst = node->evst.cur; ++ node->evst.cur = NULL; ++ push_eval_state(&bvf->evst_sr_pool); + } + + static void +@@ -2193,26 +2266,124 @@ log_eval_state(const struct bpf_verifier *bvf, const struct ebpf_insn *ins, + + rte_log(loglvl, rte_bpf_logtype, + "r%u={\n" +- "\tv={type=%u, size=%zu},\n" ++ "\tv={type=%u, size=%zu, buf_size=%zu},\n" + "\tmask=0x%" PRIx64 ",\n" + "\tu={min=0x%" PRIx64 ", max=0x%" PRIx64 "},\n" + "\ts={min=%" PRId64 ", max=%" PRId64 "},\n" + "};\n", + ins->dst_reg, +- rv->v.type, rv->v.size, ++ rv->v.type, rv->v.size, rv->v.buf_size, + rv->mask, + rv->u.min, rv->u.max, + rv->s.min, rv->s.max); + } + + /* +- * Do second pass through CFG and try to evaluate instructions +- * via each possible path. +- * Right now evaluation functionality is quite limited. +- * Still need to add extra checks for: +- * - use/return uninitialized registers. +- * - use uninitialized data from the stack. +- * - memory boundaries violation. ++ * compare two evaluation states. ++ * returns zero if @lv is more conservative (safer) then @rv. ++ * returns non-zero value otherwise. ++ */ ++static int ++cmp_reg_val_within(const struct bpf_reg_val *lv, const struct bpf_reg_val *rv) ++{ ++ /* expect @v and @mask to be identical */ ++ if (memcmp(&lv->v, &rv->v, sizeof(lv->v)) != 0 || lv->mask != rv->mask) ++ return -1; ++ ++ /* exact match only for mbuf and stack pointers */ ++ if (lv->v.type == RTE_BPF_ARG_PTR_MBUF || ++ lv->v.type == BPF_ARG_PTR_STACK) ++ return -1; ++ ++ if (lv->u.min <= rv->u.min && lv->u.max >= rv->u.max && ++ lv->s.min <= rv->s.min && lv->s.max >= rv->s.max) ++ return 0; ++ ++ return -1; ++} ++ ++/* ++ * compare two evaluation states. ++ * returns zero if they are identical. ++ * returns positive value if @lv is more conservative (safer) then @rv. ++ * returns negative value otherwise. ++ */ ++static int ++cmp_eval_state(const struct bpf_eval_state *lv, const struct bpf_eval_state *rv) ++{ ++ int32_t rc; ++ uint32_t i, k; ++ ++ /* for stack expect identical values */ ++ rc = memcmp(lv->sv, rv->sv, sizeof(lv->sv)); ++ if (rc != 0) ++ return -(2 * EBPF_REG_NUM); ++ ++ k = 0; ++ /* check register values */ ++ for (i = 0; i != RTE_DIM(lv->rv); i++) { ++ rc = memcmp(&lv->rv[i], &rv->rv[i], sizeof(lv->rv[i])); ++ if (rc != 0 && cmp_reg_val_within(&lv->rv[i], &rv->rv[i]) != 0) ++ return -(i + 1); ++ k += (rc != 0); ++ } ++ ++ return k; ++} ++ ++/* ++ * check did we already evaluated that path and can it be pruned that time. ++ */ ++static int ++prune_eval_state(struct bpf_verifier *bvf, const struct inst_node *node, ++ struct inst_node *next) ++{ ++ int32_t rc; ++ struct bpf_eval_state *safe; ++ ++ rc = INT32_MIN; ++ SLIST_FOREACH(safe, &next->evst.safe, next) { ++ rc = cmp_eval_state(safe, bvf->evst); ++ if (rc >= 0) ++ break; ++ } ++ ++ rc = (rc >= 0) ? 0 : -1; ++ ++ /* ++ * current state doesn't match any safe states, ++ * so no prunning is possible right now, ++ * track current state for future references. ++ */ ++ if (rc != 0) ++ save_start_eval_state(bvf, next); ++ ++ RTE_BPF_LOG(DEBUG, "%s(bvf=%p,node=%u,next=%u) returns %d, " ++ "next->evst.start=%p, next->evst.nb_safe=%u\n", ++ __func__, bvf, get_node_idx(bvf, node), ++ get_node_idx(bvf, next), rc, ++ next->evst.start, next->evst.nb_safe); ++ return rc; ++} ++ ++/* Do second pass through CFG and try to evaluate instructions ++ * via each possible path. The verifier will try all paths, tracking types of ++ * registers used as input to instructions, and updating resulting type via ++ * register state values. Plus for each register and possible stack value it ++ * tries to estimate possible max/min value. ++ * For conditional jumps, a stack is used to save evaluation state, so one ++ * path is explored while the state for the other path is pushed onto the stack. ++ * Then later, we backtrack to the first pushed instruction and repeat the cycle ++ * until the stack is empty and we're done. ++ * For program with many conditional branches walking through all possible path ++ * could be very excessive. So to minimize number of evaluations we use ++ * heuristic similar to what Linux kernel does - state pruning: ++ * If from given instruction for given program state we explore all possible ++ * paths and for each of them reach _exit() without any complaints and a valid ++ * R0 value, then for that instruction, that program state can be marked as ++ * 'safe'. When we later arrive at the same instruction with a state ++ * equivalent to an earlier instruction's 'safe' state, we can prune the search. ++ * For now, only states for JCC targets are saved/examined. + */ + static int + evaluate(struct bpf_verifier *bvf) +@@ -2223,6 +2394,13 @@ evaluate(struct bpf_verifier *bvf) + const struct ebpf_insn *ins; + struct inst_node *next, *node; + ++ struct { ++ uint32_t nb_eval; ++ uint32_t nb_prune; ++ uint32_t nb_save; ++ uint32_t nb_restore; ++ } stats; ++ + /* initial state of frame pointer */ + static const struct bpf_reg_val rvfp = { + .v = { +@@ -2246,6 +2424,8 @@ evaluate(struct bpf_verifier *bvf) + next = node; + rc = 0; + ++ memset(&stats, 0, sizeof(stats)); ++ + while (node != NULL && rc == 0) { + + /* +@@ -2259,11 +2439,14 @@ evaluate(struct bpf_verifier *bvf) + op = ins[idx].code; + + /* for jcc node make a copy of evaluation state */ +- if (node->nb_edge > 1) +- rc |= save_eval_state(bvf, node); ++ if (node->nb_edge > 1) { ++ rc |= save_cur_eval_state(bvf, node); ++ stats.nb_save++; ++ } + + if (ins_chk[op].eval != NULL && rc == 0) { + err = ins_chk[op].eval(bvf, ins + idx); ++ stats.nb_eval++; + if (err != NULL) { + RTE_BPF_LOG(ERR, "%s: %s at pc: %u\n", + __func__, err, idx); +@@ -2277,21 +2460,37 @@ evaluate(struct bpf_verifier *bvf) + + /* proceed through CFG */ + next = get_next_node(bvf, node); ++ + if (next != NULL) { + + /* proceed with next child */ + if (node->cur_edge == node->nb_edge && +- node->evst != NULL) +- restore_eval_state(bvf, node); ++ node->evst.cur != NULL) { ++ restore_cur_eval_state(bvf, node); ++ stats.nb_restore++; ++ } + +- next->prev_node = get_node_idx(bvf, node); +- node = next; ++ /* ++ * for jcc targets: check did we already evaluated ++ * that path and can it's evaluation be skipped that ++ * time. ++ */ ++ if (node->nb_edge > 1 && prune_eval_state(bvf, node, ++ next) == 0) { ++ next = NULL; ++ stats.nb_prune++; ++ } else { ++ next->prev_node = get_node_idx(bvf, node); ++ node = next; ++ } + } else { + /* + * finished with current node and all it's kids, +- * proceed with parent ++ * mark it's @start state as safe for future references, ++ * and proceed with parent. + */ + node->cur_edge = 0; ++ save_safe_eval_state(bvf, node); + node = get_prev_node(bvf, node); + + /* finished */ +@@ -2300,6 +2499,14 @@ evaluate(struct bpf_verifier *bvf) + } + } + ++ RTE_BPF_LOG(DEBUG, "%s(%p) returns %d, stats:\n" ++ "node evaluations=%u;\n" ++ "state pruned=%u;\n" ++ "state saves=%u;\n" ++ "state restores=%u;\n", ++ __func__, bvf, rc, ++ stats.nb_eval, stats.nb_prune, stats.nb_save, stats.nb_restore); ++ + return rc; + } + diff --git a/dpdk/lib/cmdline/cmdline.c b/dpdk/lib/cmdline/cmdline.c -index 8f1854cb0b..5600f012c2 100644 +index 8f1854cb0b..d0601d69f4 100644 --- a/dpdk/lib/cmdline/cmdline.c +++ b/dpdk/lib/cmdline/cmdline.c -@@ -199,9 +199,14 @@ cmdline_poll(struct cmdline *cl) +@@ -175,6 +175,7 @@ cmdline_quit(struct cmdline *cl) + { + if (!cl) + return; ++ cmdline_cancel(cl); + rdline_quit(&cl->rdl); + } + +@@ -199,9 +200,14 @@ cmdline_poll(struct cmdline *cl) if (read_status < 0) return read_status; @@ -86118,11 +96060,47 @@ index 96674dfda2..b14355ef51 100644 struct cmdline; struct cmdline *cmdline_new(cmdline_parse_ctx_t *ctx, const char *prompt, int s_in, int s_out); +diff --git a/dpdk/lib/cmdline/cmdline_os_unix.c b/dpdk/lib/cmdline/cmdline_os_unix.c +index 64a945a34f..9a4ec4e334 100644 +--- a/dpdk/lib/cmdline/cmdline_os_unix.c ++++ b/dpdk/lib/cmdline/cmdline_os_unix.c +@@ -51,3 +51,9 @@ cmdline_vdprintf(int fd, const char *format, va_list op) + { + return vdprintf(fd, format, op); + } ++ ++/* This function is not needed on Linux, instead use sigaction() */ ++void ++cmdline_cancel(__rte_unused struct cmdline *cl) ++{ ++} +diff --git a/dpdk/lib/cmdline/cmdline_os_windows.c b/dpdk/lib/cmdline/cmdline_os_windows.c +index 73ed9ba290..80863bfc8a 100644 +--- a/dpdk/lib/cmdline/cmdline_os_windows.c ++++ b/dpdk/lib/cmdline/cmdline_os_windows.c +@@ -203,3 +203,17 @@ cmdline_vdprintf(int fd, const char *format, va_list op) + + return ret; + } ++ ++void ++cmdline_cancel(struct cmdline *cl) ++{ ++ if (!cl) ++ return; ++ ++ /* force the outstanding read on console to exit */ ++ if (cl->oldterm.is_console_input) { ++ HANDLE handle = (HANDLE)_get_osfhandle(cl->s_in); ++ ++ CancelIoEx(handle, NULL); ++ } ++} diff --git a/dpdk/lib/cmdline/cmdline_private.h b/dpdk/lib/cmdline/cmdline_private.h -index c2e906d8de..a3271c7693 100644 +index c2e906d8de..86a46cdea6 100644 --- a/dpdk/lib/cmdline/cmdline_private.h +++ b/dpdk/lib/cmdline/cmdline_private.h -@@ -23,12 +23,6 @@ +@@ -23,14 +23,8 @@ #define RDLINE_HISTORY_BUF_SIZE BUFSIZ #define RDLINE_HISTORY_MAX_LINE 64 @@ -86133,8 +96111,21 @@ index c2e906d8de..a3271c7693 100644 -}; - struct rdline { - enum rdline_status status; +- enum rdline_status status; ++ volatile enum rdline_status status; /* rdline bufs */ + struct cirbuf left; + struct cirbuf right; +@@ -96,6 +90,9 @@ int cmdline_poll_char(struct cmdline *cl); + /* Read one character from input. */ + ssize_t cmdline_read_char(struct cmdline *cl, char *c); + ++/* Force current cmdline read to unblock. */ ++void cmdline_cancel(struct cmdline *cl); ++ + /* vdprintf(3) */ + __rte_format_printf(2, 0) + int cmdline_vdprintf(int fd, const char *format, va_list op); diff --git a/dpdk/lib/compressdev/rte_compressdev.h b/dpdk/lib/compressdev/rte_compressdev.h index 2840c27c6c..d9b2fe40dc 100644 --- a/dpdk/lib/compressdev/rte_compressdev.h @@ -86330,7 +96321,7 @@ index 9c866f553f..9c5bb9233a 100644 * See rte_crypto_rsa_priv_key_qt */ diff --git a/dpdk/lib/cryptodev/rte_cryptodev.c b/dpdk/lib/cryptodev/rte_cryptodev.c -index a40536c5ea..00fdd18630 100644 +index a40536c5ea..f4eb6d7d04 100644 --- a/dpdk/lib/cryptodev/rte_cryptodev.c +++ b/dpdk/lib/cryptodev/rte_cryptodev.c @@ -1215,13 +1215,13 @@ rte_cryptodev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id, @@ -86367,7 +96358,49 @@ index a40536c5ea..00fdd18630 100644 return -EINVAL; } } -@@ -1803,7 +1803,7 @@ rte_cryptodev_sym_session_pool_create(const char *name, uint32_t nb_elts, +@@ -1269,6 +1269,10 @@ rte_cryptodev_add_enq_callback(uint8_t dev_id, + rte_cryptodev_callback_fn cb_fn, + void *cb_arg) + { ++#ifndef RTE_CRYPTO_CALLBACKS ++ rte_errno = ENOTSUP; ++ return NULL; ++#endif + struct rte_cryptodev *dev; + struct rte_cryptodev_cb_rcu *list; + struct rte_cryptodev_cb *cb, *tail; +@@ -1333,6 +1337,9 @@ rte_cryptodev_remove_enq_callback(uint8_t dev_id, + uint16_t qp_id, + struct rte_cryptodev_cb *cb) + { ++#ifndef RTE_CRYPTO_CALLBACKS ++ return -ENOTSUP; ++#endif + struct rte_cryptodev *dev; + struct rte_cryptodev_cb **prev_cb, *curr_cb; + struct rte_cryptodev_cb_rcu *list; +@@ -1404,6 +1411,10 @@ rte_cryptodev_add_deq_callback(uint8_t dev_id, + rte_cryptodev_callback_fn cb_fn, + void *cb_arg) + { ++#ifndef RTE_CRYPTO_CALLBACKS ++ rte_errno = ENOTSUP; ++ return NULL; ++#endif + struct rte_cryptodev *dev; + struct rte_cryptodev_cb_rcu *list; + struct rte_cryptodev_cb *cb, *tail; +@@ -1468,6 +1479,9 @@ rte_cryptodev_remove_deq_callback(uint8_t dev_id, + uint16_t qp_id, + struct rte_cryptodev_cb *cb) + { ++#ifndef RTE_CRYPTO_CALLBACKS ++ return -ENOTSUP; ++#endif + struct rte_cryptodev *dev; + struct rte_cryptodev_cb **prev_cb, *curr_cb; + struct rte_cryptodev_cb_rcu *list; +@@ -1803,7 +1817,7 @@ rte_cryptodev_sym_session_pool_create(const char *name, uint32_t nb_elts, obj_sz = rte_cryptodev_sym_get_header_session_size() + user_data_size; if (obj_sz > elt_size) @@ -86376,7 +96409,7 @@ index a40536c5ea..00fdd18630 100644 obj_sz); else obj_sz = elt_size; -@@ -1813,14 +1813,14 @@ rte_cryptodev_sym_session_pool_create(const char *name, uint32_t nb_elts, +@@ -1813,14 +1827,14 @@ rte_cryptodev_sym_session_pool_create(const char *name, uint32_t nb_elts, NULL, NULL, NULL, NULL, socket_id, 0); if (mp == NULL) { @@ -86393,7 +96426,7 @@ index a40536c5ea..00fdd18630 100644 __func__, name); rte_mempool_free(mp); return NULL; -@@ -1868,7 +1868,7 @@ rte_cryptodev_sym_session_create(struct rte_mempool *mp) +@@ -1868,7 +1882,7 @@ rte_cryptodev_sym_session_create(struct rte_mempool *mp) struct rte_cryptodev_sym_session_pool_private_data *pool_priv; if (!rte_cryptodev_sym_is_valid_session_pool(mp)) { @@ -86402,7 +96435,7 @@ index a40536c5ea..00fdd18630 100644 return NULL; } -@@ -1902,7 +1902,7 @@ rte_cryptodev_asym_session_create(struct rte_mempool *mp) +@@ -1902,7 +1916,7 @@ rte_cryptodev_asym_session_create(struct rte_mempool *mp) rte_cryptodev_asym_get_header_session_size(); if (!mp) { @@ -86411,7 +96444,7 @@ index a40536c5ea..00fdd18630 100644 return NULL; } -@@ -2286,7 +2286,7 @@ rte_crypto_op_pool_create(const char *name, enum rte_crypto_op_type type, +@@ -2286,7 +2300,7 @@ rte_crypto_op_pool_create(const char *name, enum rte_crypto_op_type type, elt_size += RTE_MAX(sizeof(struct rte_crypto_sym_op), sizeof(struct rte_crypto_asym_op)); } else { @@ -86420,7 +96453,7 @@ index a40536c5ea..00fdd18630 100644 return NULL; } -@@ -2472,7 +2472,7 @@ cryptodev_handle_dev_info(const char *cmd __rte_unused, +@@ -2472,7 +2486,7 @@ cryptodev_handle_dev_info(const char *cmd __rte_unused, rte_tel_data_start_dict(d); rte_tel_data_add_dict_string(d, "device_name", cryptodev_info.device->name); @@ -86430,10 +96463,19 @@ index a40536c5ea..00fdd18630 100644 return 0; diff --git a/dpdk/lib/cryptodev/rte_cryptodev.h b/dpdk/lib/cryptodev/rte_cryptodev.h -index 59ea5a54df..99fd4c3569 100644 +index 59ea5a54df..eead3d2bff 100644 --- a/dpdk/lib/cryptodev/rte_cryptodev.h +++ b/dpdk/lib/cryptodev/rte_cryptodev.h -@@ -897,6 +897,15 @@ struct rte_cryptodev_cb_rcu { +@@ -27,8 +27,6 @@ extern "C" { + + #include "rte_cryptodev_trace_fp.h" + +-extern const char **rte_cyptodev_names; +- + /* Logging Macros */ + + #define CDEV_LOG_ERR(...) \ +@@ -897,6 +895,15 @@ struct rte_cryptodev_cb_rcu { /**< RCU QSBR variable per queue pair */ }; @@ -86449,6 +96491,24 @@ index 59ea5a54df..99fd4c3569 100644 void * rte_cryptodev_get_sec_ctx(uint8_t dev_id); +@@ -1844,7 +1851,7 @@ rte_cryptodev_dequeue_burst(uint8_t dev_id, uint16_t qp_id, + nb_ops = fp_ops->dequeue_burst(qp, ops, nb_ops); + + #ifdef RTE_CRYPTO_CALLBACKS +- if (unlikely(fp_ops->qp.deq_cb != NULL)) { ++ if (unlikely(fp_ops->qp.deq_cb[qp_id].next != NULL)) { + struct rte_cryptodev_cb_rcu *list; + struct rte_cryptodev_cb *cb; + +@@ -1911,7 +1918,7 @@ rte_cryptodev_enqueue_burst(uint8_t dev_id, uint16_t qp_id, + fp_ops = &rte_crypto_fp_ops[dev_id]; + qp = fp_ops->qp.data[qp_id]; + #ifdef RTE_CRYPTO_CALLBACKS +- if (unlikely(fp_ops->qp.enq_cb != NULL)) { ++ if (unlikely(fp_ops->qp.enq_cb[qp_id].next != NULL)) { + struct rte_cryptodev_cb_rcu *list; + struct rte_cryptodev_cb *cb; + diff --git a/dpdk/lib/distributor/rte_distributor_single.c b/dpdk/lib/distributor/rte_distributor_single.c index b653620688..60ca86152f 100644 --- a/dpdk/lib/distributor/rte_distributor_single.c @@ -86463,6 +96523,39 @@ index b653620688..60ca86152f 100644 /* Only turned-on bits are considered as match */ match &= d->in_flight_bitmask; +diff --git a/dpdk/lib/dmadev/rte_dmadev.c b/dpdk/lib/dmadev/rte_dmadev.c +index d4b32b2971..4e2057d301 100644 +--- a/dpdk/lib/dmadev/rte_dmadev.c ++++ b/dpdk/lib/dmadev/rte_dmadev.c +@@ -154,15 +154,24 @@ static int + dma_dev_data_prepare(void) + { + size_t size; ++ void *ptr; + + if (rte_dma_devices != NULL) + return 0; + +- size = dma_devices_max * sizeof(struct rte_dma_dev); +- rte_dma_devices = malloc(size); +- if (rte_dma_devices == NULL) ++ /* The DMA device object is expected to align cacheline, ++ * but the return value of malloc may not be aligned to the cache line. ++ * Therefore, extra memory is applied for realignment. ++ * Note: posix_memalign/aligned_alloc are not used ++ * because not always available, depending on libc. ++ */ ++ size = dma_devices_max * sizeof(struct rte_dma_dev) + RTE_CACHE_LINE_SIZE; ++ ptr = malloc(size); ++ if (ptr == NULL) + return -ENOMEM; +- memset(rte_dma_devices, 0, size); ++ memset(ptr, 0, size); ++ ++ rte_dma_devices = RTE_PTR_ALIGN(ptr, RTE_CACHE_LINE_SIZE); + + return 0; + } diff --git a/dpdk/lib/dmadev/rte_dmadev.h b/dpdk/lib/dmadev/rte_dmadev.h index 9942c6ec21..4abe79c536 100644 --- a/dpdk/lib/dmadev/rte_dmadev.h @@ -86601,10 +96694,84 @@ index 7c5437ddfa..9af299b8f8 100644 /* do not request exact number of pages */ cur_pages = eal_memalloc_alloc_seg_bulk(pages, diff --git a/dpdk/lib/eal/common/eal_common_fbarray.c b/dpdk/lib/eal/common/eal_common_fbarray.c -index 3a28a53247..5903474355 100644 +index 3a28a53247..ad40b6d9f9 100644 --- a/dpdk/lib/eal/common/eal_common_fbarray.c +++ b/dpdk/lib/eal/common/eal_common_fbarray.c -@@ -1485,7 +1485,7 @@ rte_fbarray_dump_metadata(struct rte_fbarray *arr, FILE *f) +@@ -176,7 +176,7 @@ find_next_n(const struct rte_fbarray *arr, unsigned int start, unsigned int n, + + /* combine current ignore mask with last index ignore mask */ + if (msk_idx == last) +- ignore_msk |= last_msk; ++ ignore_msk &= last_msk; + + /* if we have an ignore mask, ignore once */ + if (ignore_msk) { +@@ -219,6 +219,8 @@ find_next_n(const struct rte_fbarray *arr, unsigned int start, unsigned int n, + for (lookahead_idx = msk_idx + 1; lookahead_idx < msk->n_masks; + lookahead_idx++) { + unsigned int s_idx, need; ++ uint64_t first_bit = 1; ++ + lookahead_msk = msk->data[lookahead_idx]; + + /* if we're looking for free space, invert the mask */ +@@ -228,18 +230,24 @@ find_next_n(const struct rte_fbarray *arr, unsigned int start, unsigned int n, + /* figure out how many consecutive bits we need here */ + need = RTE_MIN(left, MASK_ALIGN); + +- for (s_idx = 0; s_idx < need - 1; s_idx++) ++ /* count number of shifts we performed */ ++ for (s_idx = 0; s_idx < need - 1; s_idx++) { + lookahead_msk &= lookahead_msk >> 1ULL; ++ /* did we lose the run yet? */ ++ if ((lookahead_msk & first_bit) == 0) ++ break; ++ } + + /* if first bit is not set, we've lost the run */ +- if ((lookahead_msk & 1) == 0) { ++ if ((lookahead_msk & first_bit) == 0) { + /* + * we've scanned this far, so we know there are + * no runs in the space we've lookahead-scanned + * as well, so skip that on next iteration. + */ +- ignore_msk = ~((1ULL << need) - 1); +- msk_idx = lookahead_idx; ++ ignore_msk = ~((1ULL << (s_idx + 1)) - 1); ++ /* outer loop will increment msk_idx so add 1 */ ++ msk_idx = lookahead_idx - 1; + break; + } + +@@ -503,8 +511,13 @@ find_prev_n(const struct rte_fbarray *arr, unsigned int start, unsigned int n, + /* figure out how many consecutive bits we need here */ + need = RTE_MIN(left, MASK_ALIGN); + +- for (s_idx = 0; s_idx < need - 1; s_idx++) ++ /* count number of shifts we performed */ ++ for (s_idx = 0; s_idx < need - 1; s_idx++) { + lookbehind_msk &= lookbehind_msk << 1ULL; ++ /* did we lose the run yet? */ ++ if ((lookbehind_msk & last_bit) == 0) ++ break; ++ } + + /* if last bit is not set, we've lost the run */ + if ((lookbehind_msk & last_bit) == 0) { +@@ -513,8 +526,9 @@ find_prev_n(const struct rte_fbarray *arr, unsigned int start, unsigned int n, + * no runs in the space we've lookbehind-scanned + * as well, so skip that on next iteration. + */ +- ignore_msk = UINT64_MAX << need; +- msk_idx = lookbehind_idx; ++ ignore_msk = ~(UINT64_MAX << (MASK_ALIGN - s_idx - 1)); ++ /* outer loop will decrement msk_idx so add 1 */ ++ msk_idx = lookbehind_idx + 1; + break; + } + +@@ -1485,7 +1499,7 @@ rte_fbarray_dump_metadata(struct rte_fbarray *arr, FILE *f) if (fully_validate(arr->name, arr->elt_sz, arr->len)) { fprintf(f, "Invalid file-backed array\n"); @@ -86613,7 +96780,7 @@ index 3a28a53247..5903474355 100644 } /* prevent array from changing under us */ -@@ -1499,6 +1499,5 @@ rte_fbarray_dump_metadata(struct rte_fbarray *arr, FILE *f) +@@ -1499,6 +1513,5 @@ rte_fbarray_dump_metadata(struct rte_fbarray *arr, FILE *f) for (i = 0; i < msk->n_masks; i++) fprintf(f, "msk idx %i: 0x%016" PRIx64 "\n", i, msk->data[i]); @@ -86659,6 +96826,84 @@ index 616db5ce31..4aeb933a8c 100644 snprintf(addr, ADDR_STR, "%p", ms->addr); rte_tel_data_add_dict_string(d, "Hugepage_base", addr); +diff --git a/dpdk/lib/eal/common/eal_common_options.c b/dpdk/lib/eal/common/eal_common_options.c +index 1cfdd75f3b..954062a386 100644 +--- a/dpdk/lib/eal/common/eal_common_options.c ++++ b/dpdk/lib/eal/common/eal_common_options.c +@@ -226,6 +226,8 @@ eal_save_args(int argc, char **argv) + if (strcmp(argv[i], "--") == 0) + break; + eal_args[i] = strdup(argv[i]); ++ if (eal_args[i] == NULL) ++ goto error; + } + eal_args[i++] = NULL; /* always finish with NULL */ + +@@ -235,13 +237,31 @@ eal_save_args(int argc, char **argv) + + eal_app_args = calloc(argc - i + 1, sizeof(*eal_args)); + if (eal_app_args == NULL) +- return -1; ++ goto error; + +- for (j = 0; i < argc; j++, i++) ++ for (j = 0; i < argc; j++, i++) { + eal_app_args[j] = strdup(argv[i]); ++ if (eal_app_args[j] == NULL) ++ goto error; ++ } + eal_app_args[j] = NULL; + + return 0; ++ ++error: ++ if (eal_app_args != NULL) { ++ i = 0; ++ while (eal_app_args[i] != NULL) ++ free(eal_app_args[i++]); ++ free(eal_app_args); ++ eal_app_args = NULL; ++ } ++ i = 0; ++ while (eal_args[i] != NULL) ++ free(eal_args[i++]); ++ free(eal_args); ++ eal_args = NULL; ++ return -1; + } + #endif + +@@ -1643,7 +1663,7 @@ eal_parse_common_option(int opt, const char *optarg, + if (core_parsed) { + RTE_LOG(ERR, EAL, "Option -c is ignored, because (%s) is set!\n", + (core_parsed == LCORE_OPT_LST) ? "-l" : +- (core_parsed == LCORE_OPT_MAP) ? "--lcore" : ++ (core_parsed == LCORE_OPT_MAP) ? "--lcores" : + "-c"); + return -1; + } +@@ -1676,7 +1696,7 @@ eal_parse_common_option(int opt, const char *optarg, + if (core_parsed) { + RTE_LOG(ERR, EAL, "Option -l is ignored, because (%s) is set!\n", + (core_parsed == LCORE_OPT_MSK) ? "-c" : +- (core_parsed == LCORE_OPT_MAP) ? "--lcore" : ++ (core_parsed == LCORE_OPT_MAP) ? "--lcores" : + "-l"); + return -1; + } +@@ -1854,10 +1874,10 @@ eal_parse_common_option(int opt, const char *optarg, + } + + if (core_parsed) { +- RTE_LOG(ERR, EAL, "Option --lcore is ignored, because (%s) is set!\n", ++ RTE_LOG(ERR, EAL, "Option --lcores is ignored, because (%s) is set!\n", + (core_parsed == LCORE_OPT_LST) ? "-l" : + (core_parsed == LCORE_OPT_MSK) ? "-c" : +- "--lcore"); ++ "--lcores"); + return -1; + } + diff --git a/dpdk/lib/eal/common/eal_common_proc.c b/dpdk/lib/eal/common/eal_common_proc.c index ebd0f6673b..38a2164f71 100644 --- a/dpdk/lib/eal/common/eal_common_proc.c @@ -87158,7 +97403,7 @@ index 55aad2711b..25370d1148 100644 return ret; } diff --git a/dpdk/lib/eal/common/malloc_mp.c b/dpdk/lib/eal/common/malloc_mp.c -index 2e597a17a2..8e236ddd7b 100644 +index 2e597a17a2..2817aaa627 100644 --- a/dpdk/lib/eal/common/malloc_mp.c +++ b/dpdk/lib/eal/common/malloc_mp.c @@ -250,8 +250,8 @@ handle_alloc_request(const struct malloc_mp_req *m, @@ -87172,6 +97417,16 @@ index 2e597a17a2..8e236ddd7b 100644 n_segs = alloc_sz / ar->page_sz; /* we can't know in advance how many pages we'll need, so we malloc */ +@@ -755,7 +755,8 @@ request_to_primary(struct malloc_mp_req *user_req) + do { + ret = pthread_cond_timedwait(&entry->cond, + &mp_request_list.lock, &ts); +- } while (ret != 0 && ret != ETIMEDOUT); ++ } while ((ret != 0 && ret != ETIMEDOUT) && ++ entry->state == REQ_STATE_ACTIVE); + + if (entry->state != REQ_STATE_COMPLETE) { + RTE_LOG(ERR, EAL, "Request timed out\n"); diff --git a/dpdk/lib/eal/common/rte_malloc.c b/dpdk/lib/eal/common/rte_malloc.c index d0bec26920..c5a7757deb 100644 --- a/dpdk/lib/eal/common/rte_malloc.c @@ -88013,10 +98268,19 @@ index 60b4924838..6f7e8641d3 100644 return 0; } diff --git a/dpdk/lib/eal/linux/eal_dev.c b/dpdk/lib/eal/linux/eal_dev.c -index bde55a3d92..52fe336572 100644 +index bde55a3d92..28a8ddc18c 100644 --- a/dpdk/lib/eal/linux/eal_dev.c +++ b/dpdk/lib/eal/linux/eal_dev.c -@@ -231,13 +231,13 @@ dev_uev_handler(__rte_unused void *param) +@@ -185,6 +185,8 @@ dev_uev_parse(const char *buf, struct rte_dev_event *event, int length) + i += 14; + strlcpy(pci_slot_name, buf, sizeof(subsystem)); + event->devname = strdup(pci_slot_name); ++ if (event->devname == NULL) ++ return -1; + } + for (; i < length; i++) { + if (*buf == '\0') +@@ -231,13 +233,13 @@ dev_uev_handler(__rte_unused void *param) { struct rte_dev_event uevent; int ret; @@ -88032,7 +98296,7 @@ index bde55a3d92..52fe336572 100644 if (rte_intr_fd_get(intr_handle) < 0) return; -@@ -384,6 +384,7 @@ rte_dev_event_monitor_stop(void) +@@ -384,6 +386,7 @@ rte_dev_event_monitor_stop(void) close(rte_intr_fd_get(intr_handle)); rte_intr_instance_free(intr_handle); intr_handle = NULL; @@ -88122,7 +98386,7 @@ index 6e3925efd4..70060bf3ef 100644 if (rte_intr_fd_get(src->intr_handle) == rte_intr_fd_get(intr_handle)) break; diff --git a/dpdk/lib/eal/linux/eal_memalloc.c b/dpdk/lib/eal/linux/eal_memalloc.c -index 337f2bc739..16b58d861b 100644 +index 337f2bc739..05d45d7f54 100644 --- a/dpdk/lib/eal/linux/eal_memalloc.c +++ b/dpdk/lib/eal/linux/eal_memalloc.c @@ -308,8 +308,8 @@ get_seg_fd(char *path, int buflen, struct hugepage_info *hi, @@ -88147,6 +98411,15 @@ index 337f2bc739..16b58d861b 100644 return -1; } /* take out a read lock */ +@@ -1034,7 +1034,7 @@ eal_memalloc_alloc_seg_bulk(struct rte_memseg **ms, int n_segs, size_t page_sz, + /* memalloc is locked, so it's safe to use thread-unsafe version */ + ret = rte_memseg_list_walk_thread_unsafe(alloc_seg_walk, &wa); + if (ret == 0) { +- RTE_LOG(ERR, EAL, "%s(): couldn't find suitable memseg_list\n", ++ RTE_LOG(DEBUG, EAL, "%s(): couldn't find suitable memseg_list\n", + __func__); + ret = -1; + } else if (ret > 0) { diff --git a/dpdk/lib/eal/linux/eal_memory.c b/dpdk/lib/eal/linux/eal_memory.c index 03a4f2dd2d..84f4e1cce7 100644 --- a/dpdk/lib/eal/linux/eal_memory.c @@ -88441,10 +98714,19 @@ index 3180adb0ff..cd4bdff8b8 100644 + } diff --git a/dpdk/lib/eal/unix/eal_firmware.c b/dpdk/lib/eal/unix/eal_firmware.c -index d1616b0bd9..1a7cf8e7b7 100644 +index d1616b0bd9..b999420de0 100644 --- a/dpdk/lib/eal/unix/eal_firmware.c +++ b/dpdk/lib/eal/unix/eal_firmware.c -@@ -25,19 +25,31 @@ static int +@@ -15,6 +15,8 @@ + + #include "eal_firmware.h" + ++static const char * const compression_suffixes[] = { "xz", "zst" }; ++ + #ifdef RTE_HAS_LIBARCHIVE + + struct firmware_read_ctx { +@@ -25,19 +27,37 @@ static int firmware_open(struct firmware_read_ctx *ctx, const char *name, size_t blocksize) { struct archive_entry *e; @@ -88467,7 +98749,13 @@ index d1616b0bd9..1a7cf8e7b7 100644 + + err = archive_read_support_filter_xz(ctx->a); + if (err != ARCHIVE_OK && err != ARCHIVE_WARN) -+ goto error; ++ RTE_LOG(DEBUG, EAL, ++ "could not initialise libarchive for xz compression\n"); ++ ++ err = archive_read_support_filter_zstd(ctx->a); ++ if (err != ARCHIVE_OK && err != ARCHIVE_WARN) ++ RTE_LOG(DEBUG, EAL, ++ "could not initialise libarchive for zstd compression\n"); + + if (archive_read_open_filename(ctx->a, name, blocksize) != ARCHIVE_OK) + goto error; @@ -88484,6 +98772,33 @@ index d1616b0bd9..1a7cf8e7b7 100644 } static ssize_t +@@ -135,16 +155,21 @@ rte_firmware_read(const char *name, void **buf, size_t *bufsz) + + ret = firmware_read(name, buf, bufsz); + if (ret < 0) { +- snprintf(path, sizeof(path), "%s.xz", name); +- path[PATH_MAX - 1] = '\0'; ++ unsigned int i; ++ ++ for (i = 0; i < RTE_DIM(compression_suffixes); i++) { ++ snprintf(path, sizeof(path), "%s.%s", name, compression_suffixes[i]); ++ path[PATH_MAX - 1] = '\0'; ++ if (access(path, F_OK) != 0) ++ continue; + #ifndef RTE_HAS_LIBARCHIVE +- if (access(path, F_OK) == 0) { + RTE_LOG(WARNING, EAL, "libarchive not linked, %s cannot be decompressed\n", + path); +- } + #else +- ret = firmware_read(path, buf, bufsz); ++ ret = firmware_read(path, buf, bufsz); + #endif ++ break; ++ } + } + return ret; + } diff --git a/dpdk/lib/eal/windows/eal_memalloc.c b/dpdk/lib/eal/windows/eal_memalloc.c index 55d6dcc71c..aa7589b81d 100644 --- a/dpdk/lib/eal/windows/eal_memalloc.c @@ -88618,6 +98933,16 @@ index c272f65ccd..c6b226bd5d 100644 * * @param flag * containing information about the pattern +diff --git a/dpdk/lib/eal/windows/include/meson.build b/dpdk/lib/eal/windows/include/meson.build +index 5fb1962ac7..e985a77d58 100644 +--- a/dpdk/lib/eal/windows/include/meson.build ++++ b/dpdk/lib/eal/windows/include/meson.build +@@ -6,4 +6,5 @@ includes += include_directories('.') + headers += files( + 'rte_os.h', + 'rte_windows.h', ++ 'sched.h', + ) diff --git a/dpdk/lib/eal/windows/include/pthread.h b/dpdk/lib/eal/windows/include/pthread.h index 27fd2cca52..f7cf0e9ddf 100644 --- a/dpdk/lib/eal/windows/include/pthread.h @@ -88872,6 +99197,41 @@ index 1b6c6e585f..b678b5c942 100644 #if defined(RTE_TOOLCHAIN_GCC) && (GCC_VERSION >= 100000) #pragma GCC diagnostic pop #endif +diff --git a/dpdk/lib/eal/x86/rte_cycles.c b/dpdk/lib/eal/x86/rte_cycles.c +index edd9621abb..2a601d7035 100644 +--- a/dpdk/lib/eal/x86/rte_cycles.c ++++ b/dpdk/lib/eal/x86/rte_cycles.c +@@ -6,7 +6,10 @@ + #include + #include + +-#include ++#define x86_vendor_amd(t1, t2, t3) \ ++ ((t1 == 0x68747541) && /* htuA */ \ ++ (t2 == 0x444d4163) && /* DMAc */ \ ++ (t3 == 0x69746e65)) /* itne */ + + #include "eal_private.h" + +@@ -91,6 +94,18 @@ get_tsc_freq_arch(void) + uint8_t mult, model; + int32_t ret; + ++#ifdef RTE_TOOLCHAIN_MSVC ++ __cpuid(cpuinfo, 0); ++ a = cpuinfo[0]; ++ b = cpuinfo[1]; ++ c = cpuinfo[2]; ++ d = cpuinfo[3]; ++#else ++ __cpuid(0, a, b, c, d); ++#endif ++ if (x86_vendor_amd(b, c, d)) ++ return 0; ++ + /* + * Time Stamp Counter and Nominal Core Crystal Clock + * Information Leaf diff --git a/dpdk/lib/efd/rte_efd.c b/dpdk/lib/efd/rte_efd.c index 86ef46863c..6c794d7750 100644 --- a/dpdk/lib/efd/rte_efd.c @@ -88935,7 +99295,7 @@ index d95605a355..2822fd8c72 100644 + #endif /* _RTE_ETHDEV_DRIVER_H_ */ diff --git a/dpdk/lib/ethdev/ethdev_pci.h b/dpdk/lib/ethdev/ethdev_pci.h -index 71aa4b2e98..7ab819c393 100644 +index 71aa4b2e98..90956cbdf7 100644 --- a/dpdk/lib/ethdev/ethdev_pci.h +++ b/dpdk/lib/ethdev/ethdev_pci.h @@ -6,6 +6,10 @@ @@ -88961,7 +99321,32 @@ index 71aa4b2e98..7ab819c393 100644 if (!pci_dev) return -ENODEV; -@@ -121,11 +126,13 @@ rte_eth_dev_pci_generic_probe(struct rte_pci_device *pci_dev, +@@ -91,9 +96,21 @@ rte_eth_dev_pci_allocate(struct rte_pci_device *dev, size_t private_data_size) + eth_dev->data->dev_private = rte_zmalloc_socket(name, + private_data_size, RTE_CACHE_LINE_SIZE, + dev->device.numa_node); +- if (!eth_dev->data->dev_private) { +- rte_eth_dev_release_port(eth_dev); +- return NULL; ++ /* if cannot allocate memory on the socket local to the device ++ * use rte_malloc to allocate memory on some other socket, if available. ++ */ ++ if (eth_dev->data->dev_private == NULL) { ++ eth_dev->data->dev_private = rte_zmalloc(name, ++ private_data_size, RTE_CACHE_LINE_SIZE); ++ ++ if (eth_dev->data->dev_private == NULL) { ++ rte_eth_dev_release_port(eth_dev); ++ return NULL; ++ } ++ /* got memory, but not local, so issue warning */ ++ RTE_ETHDEV_LOG(WARNING, ++ "Private data for ethdev '%s' not allocated on local NUMA node %d\n", ++ dev->device.name, dev->device.numa_node); + } + } + } else { +@@ -121,11 +138,13 @@ rte_eth_dev_pci_generic_probe(struct rte_pci_device *pci_dev, struct rte_eth_dev *eth_dev; int ret; @@ -88976,7 +99361,7 @@ index 71aa4b2e98..7ab819c393 100644 ret = dev_init(eth_dev); if (ret) rte_eth_dev_release_port(eth_dev); -@@ -171,4 +178,8 @@ rte_eth_dev_pci_generic_remove(struct rte_pci_device *pci_dev, +@@ -171,4 +190,8 @@ rte_eth_dev_pci_generic_remove(struct rte_pci_device *pci_dev, return 0; } @@ -89059,7 +99444,7 @@ index cb2fe0ae97..67cf0ae526 100644 + #endif /* _RTE_DEV_INFO_H_ */ diff --git a/dpdk/lib/ethdev/rte_ethdev.c b/dpdk/lib/ethdev/rte_ethdev.c -index a1d475a292..9a8dd94e0a 100644 +index a1d475a292..b246f7836a 100644 --- a/dpdk/lib/ethdev/rte_ethdev.c +++ b/dpdk/lib/ethdev/rte_ethdev.c @@ -894,6 +894,17 @@ rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id) @@ -89196,7 +99581,33 @@ index a1d475a292..9a8dd94e0a 100644 ret = (*dev->dev_ops->mac_addr_set)(dev, addr); if (ret < 0) return ret; -@@ -6156,6 +6206,8 @@ eth_dev_add_port_queue_stats(struct rte_tel_data *d, uint64_t *q_stats, +@@ -4967,11 +5017,20 @@ rte_eth_dev_create(struct rte_device *device, const char *name, + name, priv_data_size, RTE_CACHE_LINE_SIZE, + device->numa_node); + +- if (!ethdev->data->dev_private) { +- RTE_ETHDEV_LOG(ERR, +- "failed to allocate private data\n"); +- retval = -ENOMEM; +- goto probe_failed; ++ /* fall back to alloc on any socket on failure */ ++ if (ethdev->data->dev_private == NULL) { ++ ethdev->data->dev_private = rte_zmalloc(name, ++ priv_data_size, RTE_CACHE_LINE_SIZE); ++ ++ if (ethdev->data->dev_private == NULL) { ++ RTE_ETHDEV_LOG(ERR, "failed to allocate private data\n"); ++ retval = -ENOMEM; ++ goto probe_failed; ++ } ++ /* got memory, but not local, so issue warning */ ++ RTE_ETHDEV_LOG(WARNING, ++ "Private data for ethdev '%s' not allocated on local NUMA node %d\n", ++ device->name, device->numa_node); + } + } + } else { +@@ -6156,6 +6215,8 @@ eth_dev_add_port_queue_stats(struct rte_tel_data *d, uint64_t *q_stats, { int q; struct rte_tel_data *q_data = rte_tel_data_alloc(); @@ -89205,7 +99616,7 @@ index a1d475a292..9a8dd94e0a 100644 rte_tel_data_start_array(q_data, RTE_TEL_U64_VAL); for (q = 0; q < RTE_ETHDEV_QUEUE_STAT_CNTRS; q++) rte_tel_data_add_array_u64(q_data, q_stats[q]); -@@ -6249,6 +6301,7 @@ eth_dev_handle_port_xstats(const char *cmd __rte_unused, +@@ -6249,6 +6310,7 @@ eth_dev_handle_port_xstats(const char *cmd __rte_unused, for (i = 0; i < num_xstats; i++) rte_tel_data_add_dict_u64(d, xstat_names[i].name, eth_xstats[i].value); @@ -89213,7 +99624,7 @@ index a1d475a292..9a8dd94e0a 100644 return 0; } -@@ -6295,7 +6348,7 @@ eth_dev_handle_port_info(const char *cmd __rte_unused, +@@ -6295,7 +6357,7 @@ eth_dev_handle_port_info(const char *cmd __rte_unused, struct rte_tel_data *d) { struct rte_tel_data *rxq_state, *txq_state; @@ -89222,7 +99633,7 @@ index a1d475a292..9a8dd94e0a 100644 struct rte_eth_dev *eth_dev; char *end_param; int port_id, i; -@@ -6312,16 +6365,16 @@ eth_dev_handle_port_info(const char *cmd __rte_unused, +@@ -6312,16 +6374,16 @@ eth_dev_handle_port_info(const char *cmd __rte_unused, return -EINVAL; eth_dev = &rte_eth_devices[port_id]; @@ -89242,7 +99653,7 @@ index a1d475a292..9a8dd94e0a 100644 rte_tel_data_start_dict(d); rte_tel_data_add_dict_string(d, "name", eth_dev->data->name); -@@ -6332,17 +6385,10 @@ eth_dev_handle_port_info(const char *cmd __rte_unused, +@@ -6332,17 +6394,10 @@ eth_dev_handle_port_info(const char *cmd __rte_unused, eth_dev->data->nb_tx_queues); rte_tel_data_add_dict_int(d, "port_id", eth_dev->data->port_id); rte_tel_data_add_dict_int(d, "mtu", eth_dev->data->mtu); @@ -89263,7 +99674,7 @@ index a1d475a292..9a8dd94e0a 100644 rte_tel_data_add_dict_string(d, "mac_addr", mac_addr); rte_tel_data_add_dict_int(d, "promiscuous", eth_dev->data->promiscuous); -@@ -6368,12 +6414,12 @@ eth_dev_handle_port_info(const char *cmd __rte_unused, +@@ -6368,12 +6423,12 @@ eth_dev_handle_port_info(const char *cmd __rte_unused, rte_tel_data_add_dict_container(d, "rxq_state", rxq_state, 0); rte_tel_data_add_dict_container(d, "txq_state", txq_state, 0); rte_tel_data_add_dict_int(d, "numa_node", eth_dev->data->numa_node); @@ -89370,10 +99781,76 @@ index fa299c8ad7..083f324a46 100644 int rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct rte_ether_addr *mac_addr); diff --git a/dpdk/lib/ethdev/rte_flow.c b/dpdk/lib/ethdev/rte_flow.c -index a93f68abbc..3a6b6db725 100644 +index a93f68abbc..a82dc48264 100644 --- a/dpdk/lib/ethdev/rte_flow.c +++ b/dpdk/lib/ethdev/rte_flow.c -@@ -656,7 +656,7 @@ rte_flow_conv_action_conf(void *buf, const size_t size, +@@ -217,7 +217,7 @@ static const struct rte_flow_desc_data rte_flow_desc_action[] = { + sizeof(struct rte_flow_action_of_push_mpls)), + MK_FLOW_ACTION(VXLAN_ENCAP, sizeof(struct rte_flow_action_vxlan_encap)), + MK_FLOW_ACTION(VXLAN_DECAP, 0), +- MK_FLOW_ACTION(NVGRE_ENCAP, sizeof(struct rte_flow_action_vxlan_encap)), ++ MK_FLOW_ACTION(NVGRE_ENCAP, sizeof(struct rte_flow_action_nvgre_encap)), + MK_FLOW_ACTION(NVGRE_DECAP, 0), + MK_FLOW_ACTION(RAW_ENCAP, sizeof(struct rte_flow_action_raw_encap)), + MK_FLOW_ACTION(RAW_DECAP, sizeof(struct rte_flow_action_raw_decap)), +@@ -549,6 +549,7 @@ rte_flow_conv_item_spec(void *buf, const size_t size, + switch (item->type) { + union { + const struct rte_flow_item_raw *raw; ++ const struct rte_flow_item_geneve_opt *geneve_opt; + } spec; + union { + const struct rte_flow_item_raw *raw; +@@ -558,10 +559,13 @@ rte_flow_conv_item_spec(void *buf, const size_t size, + } mask; + union { + const struct rte_flow_item_raw *raw; ++ const struct rte_flow_item_geneve_opt *geneve_opt; + } src; + union { + struct rte_flow_item_raw *raw; ++ struct rte_flow_item_geneve_opt *geneve_opt; + } dst; ++ void *deep_src; + size_t tmp; + + case RTE_FLOW_ITEM_TYPE_RAW: +@@ -590,13 +594,30 @@ rte_flow_conv_item_spec(void *buf, const size_t size, + tmp = last.raw->length & mask.raw->length; + if (tmp) { + off = RTE_ALIGN_CEIL(off, sizeof(*dst.raw->pattern)); +- if (size >= off + tmp) +- dst.raw->pattern = rte_memcpy +- ((void *)((uintptr_t)dst.raw + off), +- src.raw->pattern, tmp); ++ if (size >= off + tmp) { ++ deep_src = (void *)((uintptr_t)dst.raw + off); ++ dst.raw->pattern = rte_memcpy(deep_src, ++ src.raw->pattern, ++ tmp); ++ } + off += tmp; + } + break; ++ case RTE_FLOW_ITEM_TYPE_GENEVE_OPT: ++ off = rte_flow_conv_copy(buf, data, size, ++ rte_flow_desc_item, item->type); ++ spec.geneve_opt = item->spec; ++ src.geneve_opt = data; ++ dst.geneve_opt = buf; ++ tmp = spec.geneve_opt->option_len << 2; ++ if (size > 0 && src.geneve_opt->data) { ++ deep_src = (void *)((uintptr_t)(dst.geneve_opt + 1)); ++ dst.geneve_opt->data = rte_memcpy(deep_src, ++ src.geneve_opt->data, ++ tmp); ++ } ++ off += tmp; ++ break; + default: + off = rte_flow_conv_copy(buf, data, size, + rte_flow_desc_item, item->type); +@@ -656,7 +677,7 @@ rte_flow_conv_action_conf(void *buf, const size_t size, if (src.rss->key_len && src.rss->key) { off = RTE_ALIGN_CEIL(off, sizeof(*dst.rss->key)); tmp = sizeof(*src.rss->key) * src.rss->key_len; @@ -89382,7 +99859,7 @@ index a93f68abbc..3a6b6db725 100644 dst.rss->key = rte_memcpy ((void *)((uintptr_t)dst.rss + off), src.rss->key, tmp); -@@ -665,7 +665,7 @@ rte_flow_conv_action_conf(void *buf, const size_t size, +@@ -665,7 +686,7 @@ rte_flow_conv_action_conf(void *buf, const size_t size, if (src.rss->queue_num) { off = RTE_ALIGN_CEIL(off, sizeof(*dst.rss->queue)); tmp = sizeof(*src.rss->queue) * src.rss->queue_num; @@ -89391,7 +99868,7 @@ index a93f68abbc..3a6b6db725 100644 dst.rss->queue = rte_memcpy ((void *)((uintptr_t)dst.rss + off), src.rss->queue, tmp); -@@ -857,7 +857,15 @@ rte_flow_conv_actions(struct rte_flow_action *dst, +@@ -857,7 +878,15 @@ rte_flow_conv_actions(struct rte_flow_action *dst, src -= num; dst -= num; do { @@ -89408,6 +99885,19 @@ index a93f68abbc..3a6b6db725 100644 off = RTE_ALIGN_CEIL(off, sizeof(double)); ret = rte_flow_conv_action_conf ((void *)(data + off), +diff --git a/dpdk/lib/ethdev/rte_flow.h b/dpdk/lib/ethdev/rte_flow.h +index 1031fb246b..5af9bdd88c 100644 +--- a/dpdk/lib/ethdev/rte_flow.h ++++ b/dpdk/lib/ethdev/rte_flow.h +@@ -3279,7 +3279,7 @@ struct rte_flow_action_vxlan_encap { + */ + struct rte_flow_action_nvgre_encap { + /** +- * Encapsulating vxlan tunnel definition ++ * Encapsulating nvgre tunnel definition + * (terminated by the END pattern item). + */ + struct rte_flow_item *definition; diff --git a/dpdk/lib/ethdev/version.map b/dpdk/lib/ethdev/version.map index c2fb0669a4..1f7359c846 100644 --- a/dpdk/lib/ethdev/version.map @@ -90292,10 +100782,116 @@ index 79b9ea3a02..cb52f17b50 100644 } diff --git a/dpdk/lib/eventdev/rte_eventdev.h b/dpdk/lib/eventdev/rte_eventdev.h -index eef47d8acc..476bcbcc21 100644 +index eef47d8acc..f09ea4a9d1 100644 --- a/dpdk/lib/eventdev/rte_eventdev.h +++ b/dpdk/lib/eventdev/rte_eventdev.h -@@ -986,21 +986,31 @@ struct rte_event_vector { +@@ -475,9 +475,9 @@ rte_event_dev_attr_get(uint8_t dev_id, uint32_t attr_id, + struct rte_event_dev_config { + uint32_t dequeue_timeout_ns; + /**< rte_event_dequeue_burst() timeout on this device. +- * This value should be in the range of *min_dequeue_timeout_ns* and +- * *max_dequeue_timeout_ns* which previously provided in +- * rte_event_dev_info_get() ++ * This value should be in the range of @ref rte_event_dev_info.min_dequeue_timeout_ns and ++ * @ref rte_event_dev_info.max_dequeue_timeout_ns returned by ++ * @ref rte_event_dev_info_get() + * The value 0 is allowed, in which case, default dequeue timeout used. + * @see RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT + */ +@@ -485,40 +485,53 @@ struct rte_event_dev_config { + /**< In a *closed system* this field is the limit on maximum number of + * events that can be inflight in the eventdev at a given time. The + * limit is required to ensure that the finite space in a closed system +- * is not overwhelmed. The value cannot exceed the *max_num_events* +- * as provided by rte_event_dev_info_get(). +- * This value should be set to -1 for *open system*. ++ * is not exhausted. ++ * The value cannot exceed @ref rte_event_dev_info.max_num_events ++ * returned by rte_event_dev_info_get(). ++ * ++ * This value should be set to -1 for *open systems*, that is, ++ * those systems returning -1 in @ref rte_event_dev_info.max_num_events. ++ * ++ * @see rte_event_port_conf.new_event_threshold + */ + uint8_t nb_event_queues; + /**< Number of event queues to configure on this device. +- * This value cannot exceed the *max_event_queues* which previously +- * provided in rte_event_dev_info_get() ++ * This value *includes* any single-link queue-port pairs to be used. ++ * This value cannot exceed @ref rte_event_dev_info.max_event_queues + ++ * @ref rte_event_dev_info.max_single_link_event_port_queue_pairs ++ * returned by rte_event_dev_info_get(). ++ * The number of non-single-link queues i.e. this value less ++ * *nb_single_link_event_port_queues* in this struct, cannot exceed ++ * @ref rte_event_dev_info.max_event_queues + */ + uint8_t nb_event_ports; + /**< Number of event ports to configure on this device. +- * This value cannot exceed the *max_event_ports* which previously +- * provided in rte_event_dev_info_get() ++ * This value *includes* any single-link queue-port pairs to be used. ++ * This value cannot exceed @ref rte_event_dev_info.max_event_ports + ++ * @ref rte_event_dev_info.max_single_link_event_port_queue_pairs ++ * returned by rte_event_dev_info_get(). ++ * The number of non-single-link ports i.e. this value less ++ * *nb_single_link_event_port_queues* in this struct, cannot exceed ++ * @ref rte_event_dev_info.max_event_ports + */ + uint32_t nb_event_queue_flows; +- /**< Number of flows for any event queue on this device. +- * This value cannot exceed the *max_event_queue_flows* which previously +- * provided in rte_event_dev_info_get() ++ /**< Max number of flows needed for a single event queue on this device. ++ * This value cannot exceed @ref rte_event_dev_info.max_event_queue_flows ++ * returned by rte_event_dev_info_get() + */ + uint32_t nb_event_port_dequeue_depth; +- /**< Maximum number of events can be dequeued at a time from an +- * event port by this device. +- * This value cannot exceed the *max_event_port_dequeue_depth* +- * which previously provided in rte_event_dev_info_get(). ++ /**< Max number of events that can be dequeued at a time from an event port on this device. ++ * This value cannot exceed @ref rte_event_dev_info.max_event_port_dequeue_depth ++ * returned by rte_event_dev_info_get(). + * Ignored when device is not RTE_EVENT_DEV_CAP_BURST_MODE capable. +- * @see rte_event_port_setup() ++ * @see rte_event_port_setup() rte_event_dequeue_burst() + */ + uint32_t nb_event_port_enqueue_depth; +- /**< Maximum number of events can be enqueued at a time from an +- * event port by this device. +- * This value cannot exceed the *max_event_port_enqueue_depth* +- * which previously provided in rte_event_dev_info_get(). ++ /**< Maximum number of events can be enqueued at a time to an event port on this device. ++ * This value cannot exceed @ref rte_event_dev_info.max_event_port_enqueue_depth ++ * returned by rte_event_dev_info_get(). + * Ignored when device is not RTE_EVENT_DEV_CAP_BURST_MODE capable. +- * @see rte_event_port_setup() ++ * @see rte_event_port_setup() rte_event_enqueue_burst() + */ + uint32_t event_dev_cfg; + /**< Event device config flags(RTE_EVENT_DEV_CFG_)*/ +@@ -528,7 +541,7 @@ struct rte_event_dev_config { + * queues; this value cannot exceed *nb_event_ports* or + * *nb_event_queues*. If the device has ports and queues that are + * optimized for single-link usage, this field is a hint for how many +- * to allocate; otherwise, regular event ports and queues can be used. ++ * to allocate; otherwise, regular event ports and queues will be used. + */ + }; + +@@ -978,29 +991,41 @@ struct rte_event_vector { + * port and queue of the mbufs in the vector + */ + struct { +- uint16_t port; +- /* Ethernet device port id. */ +- uint16_t queue; +- /* Ethernet device queue id. */ ++ uint16_t port; /**< Ethernet device port id. */ ++ uint16_t queue; /**< Ethernet device queue id. */ + }; }; /**< Union to hold common attributes of the vector array. */ uint64_t impl_opaque; @@ -90323,12 +100919,15 @@ index eef47d8acc..476bcbcc21 100644 * vector array can be an array of mbufs or pointers or opaque u64 * values. */ --}; ++#ifndef __DOXYGEN__ +} __rte_aligned(16); ++#else + }; ++#endif /* Scheduler type definitions */ #define RTE_SCHED_TYPE_ORDERED 0 -@@ -1805,7 +1815,7 @@ __rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id, +@@ -1805,7 +1830,7 @@ __rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id, return 0; } #endif @@ -90669,10 +101268,18 @@ index 8ca4da67e9..7a788523ad 100644 RTE_PTYPE_TUNNEL_VXLAN) && \ ((ptype & RTE_PTYPE_INNER_L4_TCP) == \ diff --git a/dpdk/lib/hash/rte_cuckoo_hash.c b/dpdk/lib/hash/rte_cuckoo_hash.c -index 1191dfd81a..95c3e6c2f4 100644 +index 1191dfd81a..c545336905 100644 --- a/dpdk/lib/hash/rte_cuckoo_hash.c +++ b/dpdk/lib/hash/rte_cuckoo_hash.c -@@ -527,6 +527,7 @@ rte_hash_free(struct rte_hash *h) +@@ -172,6 +172,7 @@ rte_hash_create(const struct rte_hash_parameters *params) + /* Check for valid parameters */ + if ((params->entries > RTE_HASH_ENTRIES_MAX) || + (params->entries < RTE_HASH_BUCKET_ENTRIES) || ++ (params->name == NULL) || + (params->key_len == 0)) { + rte_errno = EINVAL; + RTE_LOG(ERR, HASH, "rte_hash_create has invalid parameters\n"); +@@ -527,6 +528,7 @@ rte_hash_free(struct rte_hash *h) rte_free(h->buckets_ext); rte_free(h->tbl_chng_cnt); rte_free(h->ext_bkt_to_free); @@ -90680,7 +101287,15 @@ index 1191dfd81a..95c3e6c2f4 100644 rte_free(h); rte_free(te); } -@@ -1865,11 +1866,15 @@ compare_signatures(uint32_t *prim_hash_matches, uint32_t *sec_hash_matches, +@@ -1547,6 +1549,7 @@ rte_hash_rcu_qsbr_add(struct rte_hash *h, struct rte_hash_rcu_config *cfg) + if (params.size == 0) + params.size = total_entries; + params.trigger_reclaim_limit = cfg->trigger_reclaim_limit; ++ params.max_reclaim_size = cfg->max_reclaim_size; + if (params.max_reclaim_size == 0) + params.max_reclaim_size = RTE_HASH_RCU_DQ_RECLAIM_MAX; + params.esize = sizeof(struct __rte_hash_rcu_dq_entry); +@@ -1865,11 +1868,15 @@ compare_signatures(uint32_t *prim_hash_matches, uint32_t *sec_hash_matches, _mm_load_si128( (__m128i const *)prim_bkt->sig_current), _mm_set1_epi16(sig))); @@ -90696,6 +101311,104 @@ index 1191dfd81a..95c3e6c2f4 100644 break; #elif defined(__ARM_NEON) case RTE_HASH_COMPARE_NEON: { +diff --git a/dpdk/lib/hash/rte_cuckoo_hash.h b/dpdk/lib/hash/rte_cuckoo_hash.h +index eb2644f74b..b5a608b442 100644 +--- a/dpdk/lib/hash/rte_cuckoo_hash.h ++++ b/dpdk/lib/hash/rte_cuckoo_hash.h +@@ -29,17 +29,6 @@ + #define RETURN_IF_TRUE(cond, retval) + #endif + +-#if defined(RTE_LIBRTE_HASH_DEBUG) +-#define ERR_IF_TRUE(cond, fmt, args...) do { \ +- if (cond) { \ +- RTE_LOG(ERR, HASH, fmt, ##args); \ +- return; \ +- } \ +-} while (0) +-#else +-#define ERR_IF_TRUE(cond, fmt, args...) +-#endif +- + #include + #include + +diff --git a/dpdk/lib/hash/rte_hash.h b/dpdk/lib/hash/rte_hash.h +index 6067aad954..28bfa06840 100644 +--- a/dpdk/lib/hash/rte_hash.h ++++ b/dpdk/lib/hash/rte_hash.h +@@ -286,7 +286,7 @@ rte_hash_add_key_with_hash_data(const struct rte_hash *h, const void *key, + * @return + * - -EINVAL if the parameters are invalid. + * - -ENOSPC if there is no space in the hash for this key. +- * - A positive value that can be used by the caller as an offset into an ++ * - A non-negative value that can be used by the caller as an offset into an + * array of user data. This value is unique for this key. This + * unique key id may be larger than the user specified entry count + * when RTE_HASH_EXTRA_FLAGS_MULTI_WRITER_ADD flag is set. +@@ -310,7 +310,7 @@ rte_hash_add_key(const struct rte_hash *h, const void *key); + * @return + * - -EINVAL if the parameters are invalid. + * - -ENOSPC if there is no space in the hash for this key. +- * - A positive value that can be used by the caller as an offset into an ++ * - A non-negative value that can be used by the caller as an offset into an + * array of user data. This value is unique for this key. This + * unique key ID may be larger than the user specified entry count + * when RTE_HASH_EXTRA_FLAGS_MULTI_WRITER_ADD flag is set. +@@ -341,7 +341,7 @@ rte_hash_add_key_with_hash(const struct rte_hash *h, const void *key, hash_sig_t + * @return + * - -EINVAL if the parameters are invalid. + * - -ENOENT if the key is not found. +- * - A positive value that can be used by the caller as an offset into an ++ * - A non-negative value that can be used by the caller as an offset into an + * array of user data. This value is unique for this key, and is the same + * value that was returned when the key was added. + */ +@@ -373,7 +373,7 @@ rte_hash_del_key(const struct rte_hash *h, const void *key); + * @return + * - -EINVAL if the parameters are invalid. + * - -ENOENT if the key is not found. +- * - A positive value that can be used by the caller as an offset into an ++ * - A non-negative value that can be used by the caller as an offset into an + * array of user data. This value is unique for this key, and is the same + * value that was returned when the key was added. + */ +@@ -440,7 +440,7 @@ rte_hash_free_key_with_position(const struct rte_hash *h, + * @param data + * Output with pointer to data returned from the hash table. + * @return +- * - A positive value that can be used by the caller as an offset into an ++ * - A non-negative value that can be used by the caller as an offset into an + * array of user data. This value is unique for this key, and is the same + * value that was returned when the key was added. + * - -EINVAL if the parameters are invalid. +@@ -465,7 +465,7 @@ rte_hash_lookup_data(const struct rte_hash *h, const void *key, void **data); + * @param data + * Output with pointer to data returned from the hash table. + * @return +- * - A positive value that can be used by the caller as an offset into an ++ * - A non-negative value that can be used by the caller as an offset into an + * array of user data. This value is unique for this key, and is the same + * value that was returned when the key was added. + * - -EINVAL if the parameters are invalid. +@@ -488,7 +488,7 @@ rte_hash_lookup_with_hash_data(const struct rte_hash *h, const void *key, + * @return + * - -EINVAL if the parameters are invalid. + * - -ENOENT if the key is not found. +- * - A positive value that can be used by the caller as an offset into an ++ * - A non-negative value that can be used by the caller as an offset into an + * array of user data. This value is unique for this key, and is the same + * value that was returned when the key was added. + */ +@@ -510,7 +510,7 @@ rte_hash_lookup(const struct rte_hash *h, const void *key); + * @return + * - -EINVAL if the parameters are invalid. + * - -ENOENT if the key is not found. +- * - A positive value that can be used by the caller as an offset into an ++ * - A non-negative value that can be used by the caller as an offset into an + * array of user data. This value is unique for this key, and is the same + * value that was returned when the key was added. + */ diff --git a/dpdk/lib/hash/rte_thash.c b/dpdk/lib/hash/rte_thash.c index 6847e36f4b..2b97482cfb 100644 --- a/dpdk/lib/hash/rte_thash.c @@ -90972,6 +101685,19 @@ index 359a9f5b09..4900b750bc 100644 typedef int (*arg_handler_t)(const char *key, const char *value, void *opaque); /** A key/value association */ +diff --git a/dpdk/lib/latencystats/rte_latencystats.c b/dpdk/lib/latencystats/rte_latencystats.c +index ab8db7a139..586e4700ea 100644 +--- a/dpdk/lib/latencystats/rte_latencystats.c ++++ b/dpdk/lib/latencystats/rte_latencystats.c +@@ -168,7 +168,7 @@ calc_latency(uint16_t pid __rte_unused, + * a constant smoothing factor between 0 and 1. The value + * is used below for measuring average latency. + */ +- const float alpha = 0.2; ++ const float alpha = 0.2f; + + now = rte_rdtsc(); + for (i = 0; i < nb_pkts; i++) { diff --git a/dpdk/lib/lpm/rte_lpm.h b/dpdk/lib/lpm/rte_lpm.h index 5eb14c1748..d0ba57ae71 100644 --- a/dpdk/lib/lpm/rte_lpm.h @@ -91473,6 +102199,31 @@ index 2b6eb1ccc8..09b14d9336 100644 +#endif + #endif +diff --git a/dpdk/lib/net/rte_ether.h b/dpdk/lib/net/rte_ether.h +index 3d9852d9e2..1d84fc1098 100644 +--- a/dpdk/lib/net/rte_ether.h ++++ b/dpdk/lib/net/rte_ether.h +@@ -47,6 +47,20 @@ extern "C" { + + #define RTE_ETHER_MIN_MTU 68 /**< Minimum MTU for IPv4 packets, see RFC 791. */ + ++/* VLAN header fields */ ++#define RTE_VLAN_DEI_SHIFT 12 ++#define RTE_VLAN_PRI_SHIFT 13 ++#define RTE_VLAN_PRI_MASK 0xe000 /* Priority Code Point */ ++#define RTE_VLAN_DEI_MASK 0x1000 /* Drop Eligible Indicator */ ++#define RTE_VLAN_ID_MASK 0x0fff /* VLAN Identifier */ ++ ++#define RTE_VLAN_TCI_ID(vlan_tci) ((vlan_tci) & RTE_VLAN_ID_MASK) ++#define RTE_VLAN_TCI_PRI(vlan_tci) (((vlan_tci) & RTE_VLAN_PRI_MASK) >> RTE_VLAN_PRI_SHIFT) ++#define RTE_VLAN_TCI_DEI(vlan_tci) (((vlan_tci) & RTE_VLAN_DEI_MASK) >> RTE_VLAN_DEI_SHIFT) ++#define RTE_VLAN_TCI_MAKE(id, pri, dei) ((id) | \ ++ ((pri) << RTE_VLAN_PRI_SHIFT) | \ ++ ((dei) << RTE_VLAN_DEI_SHIFT)) ++ + /** + * Ethernet address: + * A universally administered address is uniquely assigned to a device by its diff --git a/dpdk/lib/net/rte_gtp.h b/dpdk/lib/net/rte_gtp.h index dca940c2c5..9849872366 100644 --- a/dpdk/lib/net/rte_gtp.h @@ -91615,6 +102366,42 @@ index b90e36cf12..1f3ad3f03c 100644 */ struct rte_l2tpv2_msg_without_offset { rte_be16_t length; /**< length(16) */ +diff --git a/dpdk/lib/net/rte_net.h b/dpdk/lib/net/rte_net.h +index 53a7f4d360..f2aeba8404 100644 +--- a/dpdk/lib/net/rte_net.h ++++ b/dpdk/lib/net/rte_net.h +@@ -122,7 +122,8 @@ rte_net_intel_cksum_flags_prepare(struct rte_mbuf *m, uint64_t ol_flags) + * no offloads are requested. + */ + if (!(ol_flags & (RTE_MBUF_F_TX_IP_CKSUM | RTE_MBUF_F_TX_L4_MASK | RTE_MBUF_F_TX_TCP_SEG | +- RTE_MBUF_F_TX_OUTER_IP_CKSUM))) ++ RTE_MBUF_F_TX_OUTER_IP_CKSUM | ++ RTE_MBUF_F_TX_OUTER_UDP_CKSUM))) + return 0; + + if (ol_flags & (RTE_MBUF_F_TX_OUTER_IPV4 | RTE_MBUF_F_TX_OUTER_IPV6)) { +@@ -136,6 +137,21 @@ rte_net_intel_cksum_flags_prepare(struct rte_mbuf *m, uint64_t ol_flags) + struct rte_ipv4_hdr *, m->outer_l2_len); + ipv4_hdr->hdr_checksum = 0; + } ++ if (ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM) { ++ if (ol_flags & RTE_MBUF_F_TX_OUTER_IPV4) { ++ ipv4_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *, ++ m->outer_l2_len); ++ udp_hdr = (struct rte_udp_hdr *)((char *)ipv4_hdr + ++ m->outer_l3_len); ++ udp_hdr->dgram_cksum = rte_ipv4_phdr_cksum(ipv4_hdr, m->ol_flags); ++ } else { ++ ipv6_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv6_hdr *, ++ m->outer_l2_len); ++ udp_hdr = rte_pktmbuf_mtod_offset(m, struct rte_udp_hdr *, ++ m->outer_l2_len + m->outer_l3_len); ++ udp_hdr->dgram_cksum = rte_ipv6_phdr_cksum(ipv6_hdr, m->ol_flags); ++ } ++ } + } + + /* diff --git a/dpdk/lib/node/ethdev_ctrl.c b/dpdk/lib/node/ethdev_ctrl.c index 13b8b705f0..8a7429faa3 100644 --- a/dpdk/lib/node/ethdev_ctrl.c @@ -91629,7 +102416,7 @@ index 13b8b705f0..8a7429faa3 100644 elem->ctx.port_id = port_id; elem->ctx.queue_id = j; diff --git a/dpdk/lib/pcapng/rte_pcapng.c b/dpdk/lib/pcapng/rte_pcapng.c -index 03edabe73e..e914b7b031 100644 +index 03edabe73e..600a724bbb 100644 --- a/dpdk/lib/pcapng/rte_pcapng.c +++ b/dpdk/lib/pcapng/rte_pcapng.c @@ -20,6 +20,7 @@ @@ -91709,7 +102496,17 @@ index 03edabe73e..e914b7b031 100644 } /* length of option including padding */ -@@ -177,8 +214,8 @@ pcapng_add_interface(rte_pcapng_t *self, uint16_t port) +@@ -72,7 +109,8 @@ pcapng_add_option(struct pcapng_option *popt, uint16_t code, + { + popt->code = code; + popt->length = len; +- memcpy(popt->data, data, len); ++ if (len > 0) ++ memcpy(popt->data, data, len); + + return (struct pcapng_option *)((uint8_t *)popt + pcapng_optlen(len)); + } +@@ -177,8 +215,8 @@ pcapng_add_interface(rte_pcapng_t *self, uint16_t port) "%s-%s", dev->bus->name, dev->name); /* DPDK reports in units of Mbps */ @@ -91720,7 +102517,7 @@ index 03edabe73e..e914b7b031 100644 speed = link.link_speed * PCAPNG_MBPS_SPEED; if (rte_eth_macaddr_get(port, &macaddr) < 0) -@@ -515,33 +552,16 @@ rte_pcapng_copy(uint16_t port_id, uint32_t queue, +@@ -515,33 +553,16 @@ rte_pcapng_copy(uint16_t port_id, uint32_t queue, return NULL; } @@ -91758,7 +102555,7 @@ index 03edabe73e..e914b7b031 100644 struct rte_mbuf *m = pkts[i]; struct pcapng_enhance_packet_block *epb; -@@ -553,6 +573,20 @@ rte_pcapng_write_packets(rte_pcapng_t *self, +@@ -553,6 +574,20 @@ rte_pcapng_write_packets(rte_pcapng_t *self, return -1; } @@ -91779,7 +102576,7 @@ index 03edabe73e..e914b7b031 100644 /* * The DPDK port is recorded during pcapng_copy. * Map that to PCAPNG interface in file. -@@ -565,10 +599,12 @@ rte_pcapng_write_packets(rte_pcapng_t *self, +@@ -565,10 +600,12 @@ rte_pcapng_write_packets(rte_pcapng_t *self, } while ((m = m->next)); } @@ -92278,6 +103075,19 @@ index a6134e76ea..c06ed8b9c7 100644 return NULL; } +diff --git a/dpdk/lib/regexdev/rte_regexdev.c b/dpdk/lib/regexdev/rte_regexdev.c +index 04ab713730..c66e251509 100644 +--- a/dpdk/lib/regexdev/rte_regexdev.c ++++ b/dpdk/lib/regexdev/rte_regexdev.c +@@ -21,7 +21,7 @@ static struct { + struct rte_regexdev_data data[RTE_MAX_REGEXDEV_DEVS]; + } *rte_regexdev_shared_data; + +-int rte_regexdev_logtype; ++RTE_LOG_REGISTER_DEFAULT(rte_regexdev_logtype, INFO); + + static uint16_t + regexdev_find_free_dev(void) diff --git a/dpdk/lib/regexdev/rte_regexdev.h b/dpdk/lib/regexdev/rte_regexdev.h index 86f0b231b0..513ce5b67c 100644 --- a/dpdk/lib/regexdev/rte_regexdev.h @@ -93186,10 +103996,23 @@ index 7bca8a9a49..0f24579235 100644 + #endif diff --git a/dpdk/lib/telemetry/telemetry.c b/dpdk/lib/telemetry/telemetry.c -index a7483167d4..e73f4a593e 100644 +index a7483167d4..74315714ea 100644 --- a/dpdk/lib/telemetry/telemetry.c +++ b/dpdk/lib/telemetry/telemetry.c -@@ -197,7 +197,11 @@ container_to_json(const struct rte_tel_data *d, char *out_buf, size_t buf_len) +@@ -160,7 +160,11 @@ container_to_json(const struct rte_tel_data *d, char *out_buf, size_t buf_len) + d->type != RTE_TEL_ARRAY_INT && d->type != RTE_TEL_ARRAY_STRING) + return snprintf(out_buf, buf_len, "null"); + +- used = rte_tel_json_empty_array(out_buf, buf_len, 0); ++ if (d->type == RTE_TEL_DICT) ++ used = rte_tel_json_empty_obj(out_buf, buf_len, 0); ++ else ++ used = rte_tel_json_empty_array(out_buf, buf_len, 0); ++ + if (d->type == RTE_TEL_ARRAY_U64) + for (i = 0; i < d->data_len; i++) + used = rte_tel_json_add_array_u64(out_buf, +@@ -197,7 +201,11 @@ container_to_json(const struct rte_tel_data *d, char *out_buf, size_t buf_len) break; case RTE_TEL_CONTAINER: { @@ -93202,7 +104025,7 @@ index a7483167d4..e73f4a593e 100644 const struct container *cont = &v->value.container; if (container_to_json(cont->data, -@@ -208,6 +212,7 @@ container_to_json(const struct rte_tel_data *d, char *out_buf, size_t buf_len) +@@ -208,6 +216,7 @@ container_to_json(const struct rte_tel_data *d, char *out_buf, size_t buf_len) v->name, temp); if (!cont->keep) rte_tel_data_free(cont->data); @@ -93210,7 +104033,7 @@ index a7483167d4..e73f4a593e 100644 break; } } -@@ -264,7 +269,11 @@ output_json(const char *cmd, const struct rte_tel_data *d, int s) +@@ -264,7 +273,11 @@ output_json(const char *cmd, const struct rte_tel_data *d, int s) break; case RTE_TEL_CONTAINER: { @@ -93223,7 +104046,7 @@ index a7483167d4..e73f4a593e 100644 const struct container *cont = &v->value.container; if (container_to_json(cont->data, -@@ -275,6 +284,7 @@ output_json(const char *cmd, const struct rte_tel_data *d, int s) +@@ -275,6 +288,7 @@ output_json(const char *cmd, const struct rte_tel_data *d, int s) v->name, temp); if (!cont->keep) rte_tel_data_free(cont->data); @@ -93231,7 +104054,7 @@ index a7483167d4..e73f4a593e 100644 } } } -@@ -306,7 +316,11 @@ output_json(const char *cmd, const struct rte_tel_data *d, int s) +@@ -306,7 +320,11 @@ output_json(const char *cmd, const struct rte_tel_data *d, int s) buf_len, used, d->data.array[i].u64val); else if (d->type == RTE_TEL_ARRAY_CONTAINER) { @@ -93244,7 +104067,7 @@ index a7483167d4..e73f4a593e 100644 const struct container *rec_data = &d->data.array[i].container; if (container_to_json(rec_data->data, -@@ -316,6 +330,7 @@ output_json(const char *cmd, const struct rte_tel_data *d, int s) +@@ -316,6 +334,7 @@ output_json(const char *cmd, const struct rte_tel_data *d, int s) buf_len, used, temp); if (!rec_data->keep) rte_tel_data_free(rec_data->data); @@ -93252,7 +104075,7 @@ index a7483167d4..e73f4a593e 100644 } used += prefix_used; used += strlcat(out_buf + used, "}", sizeof(out_buf) - used); -@@ -328,7 +343,7 @@ output_json(const char *cmd, const struct rte_tel_data *d, int s) +@@ -328,7 +347,7 @@ output_json(const char *cmd, const struct rte_tel_data *d, int s) static void perform_command(telemetry_cb fn, const char *cmd, const char *param, int s) { @@ -93261,7 +104084,26 @@ index a7483167d4..e73f4a593e 100644 int ret = fn(cmd, param, &data); if (ret < 0) { -@@ -534,7 +549,7 @@ telemetry_legacy_init(void) +@@ -359,8 +378,8 @@ client_handler(void *sock_id) + "{\"version\":\"%s\",\"pid\":%d,\"max_output_len\":%d}", + telemetry_version, getpid(), MAX_OUTPUT_LEN); + if (write(s, info_str, strlen(info_str)) < 0) { +- close(s); +- return NULL; ++ TMTY_LOG(DEBUG, "Socket write base info to client failed\n"); ++ goto exit; + } + + /* receive data is not null terminated */ +@@ -385,6 +404,7 @@ client_handler(void *sock_id) + + bytes = read(s, buffer, sizeof(buffer) - 1); + } ++exit: + close(s); + __atomic_sub_fetch(&v2_clients, 1, __ATOMIC_RELAXED); + return NULL; +@@ -534,7 +554,7 @@ telemetry_legacy_init(void) } rc = pthread_create(&t_old, NULL, socket_listener, &v1_socket); if (rc != 0) { @@ -93283,6 +104125,19 @@ index f02a12f5b0..db70690274 100644 * This function is not for use for values larger than given buffer length. */ __rte_format_printf(3, 4) +diff --git a/dpdk/lib/telemetry/telemetry_legacy.c b/dpdk/lib/telemetry/telemetry_legacy.c +index 8aba44d689..5996fe83cf 100644 +--- a/dpdk/lib/telemetry/telemetry_legacy.c ++++ b/dpdk/lib/telemetry/telemetry_legacy.c +@@ -93,7 +93,7 @@ register_client(const char *cmd __rte_unused, const char *params, + } + #ifndef RTE_EXEC_ENV_WINDOWS + strlcpy(data, strchr(params, ':'), sizeof(data)); +- memcpy(data, &data[strlen(":\"")], strlen(data)); ++ memmove(data, &data[strlen(":\"")], strlen(data)); + if (!strchr(data, '\"')) { + fprintf(stderr, "Invalid client data\n"); + return -1; diff --git a/dpdk/lib/timer/rte_timer.c b/dpdk/lib/timer/rte_timer.c index 6d19ce469b..98c1941cb1 100644 --- a/dpdk/lib/timer/rte_timer.c @@ -93539,6 +104394,54 @@ index 82963c1e6d..a34aebd50c 100644 fdset_pipe_uninit(&vhost_user.fdset); return -1; +diff --git a/dpdk/lib/vhost/vdpa.c b/dpdk/lib/vhost/vdpa.c +index 09ad5d866e..bd00c9d2c2 100644 +--- a/dpdk/lib/vhost/vdpa.c ++++ b/dpdk/lib/vhost/vdpa.c +@@ -19,6 +19,7 @@ + #include "rte_vdpa.h" + #include "vdpa_driver.h" + #include "vhost.h" ++#include "iotlb.h" + + /** Double linked list of vDPA devices. */ + TAILQ_HEAD(vdpa_device_list, rte_vdpa_device); +@@ -176,17 +177,21 @@ rte_vdpa_relay_vring_used(int vid, uint16_t qid, void *vring_m) + if (unlikely(nr_descs > vq->size)) + return -1; + ++ vhost_user_iotlb_rd_lock(vq); + desc_ring = (struct vring_desc *)(uintptr_t) + vhost_iova_to_vva(dev, vq, + vq->desc[desc_id].addr, &dlen, + VHOST_ACCESS_RO); ++ vhost_user_iotlb_rd_unlock(vq); + if (unlikely(!desc_ring)) + return -1; + + if (unlikely(dlen < vq->desc[desc_id].len)) { ++ vhost_user_iotlb_rd_lock(vq); + idesc = vhost_alloc_copy_ind_table(dev, vq, + vq->desc[desc_id].addr, + vq->desc[desc_id].len); ++ vhost_user_iotlb_rd_unlock(vq); + if (unlikely(!idesc)) + return -1; + +@@ -203,9 +208,12 @@ rte_vdpa_relay_vring_used(int vid, uint16_t qid, void *vring_m) + if (unlikely(nr_descs-- == 0)) + goto fail; + desc = desc_ring[desc_id]; +- if (desc.flags & VRING_DESC_F_WRITE) ++ if (desc.flags & VRING_DESC_F_WRITE) { ++ vhost_user_iotlb_rd_lock(vq); + vhost_log_write_iova(dev, vq, desc.addr, + desc.len); ++ vhost_user_iotlb_rd_unlock(vq); ++ } + desc_id = desc.next; + } while (desc.flags & VRING_DESC_F_NEXT); + diff --git a/dpdk/lib/vhost/vdpa_driver.h b/dpdk/lib/vhost/vdpa_driver.h index fc2d6acedd..7ba9e28e57 100644 --- a/dpdk/lib/vhost/vdpa_driver.h @@ -94099,7 +105002,7 @@ index 926b5c0bd9..7d1d6a1861 100644 goto error_exit; } diff --git a/dpdk/lib/vhost/vhost_user.c b/dpdk/lib/vhost/vhost_user.c -index a781346c4d..bf489cb13b 100644 +index a781346c4d..ca0618f627 100644 --- a/dpdk/lib/vhost/vhost_user.c +++ b/dpdk/lib/vhost/vhost_user.c @@ -143,57 +143,59 @@ get_blk_size(int fd) @@ -94379,7 +105282,15 @@ index a781346c4d..bf489cb13b 100644 fd = msg->fds[0]; if (msg->size != sizeof(msg->payload.inflight) || fd < 0) { VHOST_LOG_CONFIG(ERR, -@@ -1841,7 +1847,7 @@ static int vhost_user_set_vring_err(struct virtio_net **pdev __rte_unused, +@@ -1780,6 +1786,7 @@ vhost_user_set_inflight_fd(struct virtio_net **pdev, VhostUserMsg *msg, + if (!vq) + continue; + ++ cleanup_vq_inflight(dev, vq); + if (vq_is_packed(dev)) { + vq->inflight_packed = addr; + vq->inflight_packed->desc_num = queue_size; +@@ -1841,7 +1848,7 @@ static int vhost_user_set_vring_err(struct virtio_net **pdev __rte_unused, if (!(msg->payload.u64 & VHOST_USER_VRING_NOFD_MASK)) close(msg->fds[0]); @@ -94388,7 +105299,17 @@ index a781346c4d..bf489cb13b 100644 return RTE_VHOST_MSG_RESULT_OK; } -@@ -2372,7 +2378,7 @@ static int vhost_user_set_log_fd(struct virtio_net **pdev __rte_unused, +@@ -2183,7 +2190,9 @@ vhost_user_get_vring_base(struct virtio_net **pdev, + + vhost_user_iotlb_flush_all(vq); + ++ rte_spinlock_lock(&vq->access_lock); + vring_invalidate(dev, vq); ++ rte_spinlock_unlock(&vq->access_lock); + + return RTE_VHOST_MSG_RESULT_REPLY; + } +@@ -2372,7 +2381,7 @@ static int vhost_user_set_log_fd(struct virtio_net **pdev __rte_unused, return RTE_VHOST_MSG_RESULT_ERR; close(msg->fds[0]); @@ -94397,7 +105318,7 @@ index a781346c4d..bf489cb13b 100644 return RTE_VHOST_MSG_RESULT_OK; } -@@ -2566,8 +2572,12 @@ vhost_user_iotlb_msg(struct virtio_net **pdev, struct VhostUserMsg *msg, +@@ -2566,8 +2575,12 @@ vhost_user_iotlb_msg(struct virtio_net **pdev, struct VhostUserMsg *msg, vhost_user_iotlb_cache_insert(vq, imsg->iova, vva, len, imsg->perm); @@ -94411,7 +105332,7 @@ index a781346c4d..bf489cb13b 100644 } break; case VHOST_IOTLB_INVALIDATE: -@@ -2580,8 +2590,11 @@ vhost_user_iotlb_msg(struct virtio_net **pdev, struct VhostUserMsg *msg, +@@ -2580,8 +2593,11 @@ vhost_user_iotlb_msg(struct virtio_net **pdev, struct VhostUserMsg *msg, vhost_user_iotlb_cache_remove(vq, imsg->iova, imsg->size); @@ -94424,7 +105345,7 @@ index a781346c4d..bf489cb13b 100644 } break; default: -@@ -2783,30 +2796,37 @@ read_vhost_message(int sockfd, struct VhostUserMsg *msg) +@@ -2783,30 +2799,37 @@ read_vhost_message(int sockfd, struct VhostUserMsg *msg) ret = read_fd_message(sockfd, (char *)msg, VHOST_USER_HDR_SIZE, msg->fds, VHOST_MEMORY_MAX_NREGIONS, &msg->fd_num); @@ -94470,7 +105391,7 @@ index a781346c4d..bf489cb13b 100644 return ret; } -@@ -2873,6 +2893,9 @@ vhost_user_check_and_alloc_queue_pair(struct virtio_net *dev, +@@ -2873,6 +2896,9 @@ vhost_user_check_and_alloc_queue_pair(struct virtio_net *dev, case VHOST_USER_SET_VRING_ADDR: vring_idx = msg->payload.addr.index; break; @@ -94480,7 +105401,7 @@ index a781346c4d..bf489cb13b 100644 default: return 0; } -@@ -2961,7 +2984,6 @@ vhost_user_msg_handler(int vid, int fd) +@@ -2961,7 +2987,6 @@ vhost_user_msg_handler(int vid, int fd) return -1; } @@ -94488,7 +105409,7 @@ index a781346c4d..bf489cb13b 100644 request = msg.request.master; if (request > VHOST_USER_NONE && request < VHOST_USER_MAX && vhost_message_str[request]) { -@@ -3103,9 +3125,11 @@ vhost_user_msg_handler(int vid, int fd) +@@ -3103,9 +3128,11 @@ vhost_user_msg_handler(int vid, int fd) } else if (ret == RTE_VHOST_MSG_RESULT_ERR) { VHOST_LOG_CONFIG(ERR, "vhost message handling failed.\n"); @@ -94501,7 +105422,7 @@ index a781346c4d..bf489cb13b 100644 for (i = 0; i < dev->nr_vring; i++) { struct vhost_virtqueue *vq = dev->virtqueue[i]; bool cur_ready = vq_is_ready(dev, vq); -@@ -3116,10 +3140,11 @@ vhost_user_msg_handler(int vid, int fd) +@@ -3116,10 +3143,11 @@ vhost_user_msg_handler(int vid, int fd) } } @@ -94514,7 +105435,7 @@ index a781346c4d..bf489cb13b 100644 goto out; /* -@@ -3146,7 +3171,7 @@ vhost_user_msg_handler(int vid, int fd) +@@ -3146,7 +3174,7 @@ vhost_user_msg_handler(int vid, int fd) } out: @@ -94524,7 +105445,7 @@ index a781346c4d..bf489cb13b 100644 static int process_slave_message_reply(struct virtio_net *dev, diff --git a/dpdk/lib/vhost/virtio_net.c b/dpdk/lib/vhost/virtio_net.c -index b3d954aab4..b211799687 100644 +index b3d954aab4..9f74a3c997 100644 --- a/dpdk/lib/vhost/virtio_net.c +++ b/dpdk/lib/vhost/virtio_net.c @@ -415,6 +415,16 @@ virtio_enqueue_offload(struct rte_mbuf *m_buf, struct virtio_net_hdr *net_hdr) @@ -94629,6 +105550,24 @@ index b3d954aab4..b211799687 100644 uint32_t buffer_len[vq->size]; uint16_t buffer_buf_id[vq->size]; uint16_t buffer_desc_count[vq->size]; +@@ -1574,7 +1591,7 @@ vhost_enqueue_async_packed(struct virtio_net *dev, + else + max_tries = 1; + +- while (size > 0) { ++ do { + /* + * if we tried all available ring items, and still + * can't get enough buf, it means something abnormal +@@ -1601,7 +1618,7 @@ vhost_enqueue_async_packed(struct virtio_net *dev, + avail_idx += desc_count; + if (avail_idx >= vq->size) + avail_idx -= vq->size; +- } ++ } while (size > 0); + + if (unlikely(mbuf_to_desc(dev, vq, pkt, buf_vec, nr_vec, *nr_buffers, true) < 0)) + return -1; @@ -1661,7 +1678,6 @@ virtio_dev_rx_async_submit_packed(struct virtio_net *dev, struct rte_mbuf **pkts, uint32_t count) { @@ -94749,22 +105688,68 @@ index b3d954aab4..b211799687 100644 PRINT_PACKET(dev, (uintptr_t)(buf_addr + buf_offset), (uint32_t)buf_avail, 0); -@@ -2551,6 +2557,14 @@ virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq, +@@ -2511,7 +2517,6 @@ virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq, + { + uint16_t i; + uint16_t free_entries; +- uint16_t dropped = 0; + static bool allocerr_warned; + + /* +@@ -2551,6 +2556,11 @@ virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq, update_shadow_used_ring_split(vq, head_idx, 0); -+ if (unlikely(buf_len <= dev->vhost_hlen)) { -+ dropped += 1; -+ i++; ++ if (unlikely(buf_len <= dev->vhost_hlen)) + break; -+ } + + buf_len -= dev->vhost_hlen; + err = virtio_dev_pktmbuf_prep(dev, pkts[i], buf_len); if (unlikely(err)) { /* -@@ -2754,6 +2768,11 @@ vhost_dequeue_single_packed(struct virtio_net *dev, +@@ -2564,8 +2574,6 @@ virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq, + buf_len, mbuf_pool->name, dev->ifname); + allocerr_warned = true; + } +- dropped += 1; +- i++; + break; + } + +@@ -2578,26 +2586,21 @@ virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq, + dev->ifname); + allocerr_warned = true; + } +- dropped += 1; +- i++; + break; + } + } + +- if (dropped) +- rte_pktmbuf_free_bulk(&pkts[i - 1], count - i + 1); +- +- vq->last_avail_idx += i; ++ if (unlikely(count != i)) ++ rte_pktmbuf_free_bulk(&pkts[i], count - i); + +- do_data_copy_dequeue(vq); +- if (unlikely(i < count)) +- vq->shadow_used_idx = i; + if (likely(vq->shadow_used_idx)) { ++ vq->last_avail_idx += vq->shadow_used_idx; ++ do_data_copy_dequeue(vq); + flush_shadow_used_ring_split(dev, vq); + vhost_vring_call_split(dev, vq); + } + +- return (i - dropped); ++ return i; + } + + __rte_noinline +@@ -2754,6 +2757,11 @@ vhost_dequeue_single_packed(struct virtio_net *dev, VHOST_ACCESS_RO) < 0)) return -1; diff --git a/SPECS/openvswitch2.17.spec b/SPECS/openvswitch2.17.spec index 2704aba..ed33f43 100644 --- a/SPECS/openvswitch2.17.spec +++ b/SPECS/openvswitch2.17.spec @@ -63,7 +63,7 @@ Summary: Open vSwitch Group: System Environment/Daemons daemon/database/utilities URL: http://www.openvswitch.org/ Version: 2.17.0 -Release: 190%{?dist} +Release: 191%{?dist} # Nearly all of openvswitch is ASL 2.0. The bugtool is LGPLv2+, and the # lib/sflow*.[ch] files are SISSL @@ -749,6 +749,296 @@ exit 0 %endif %changelog +* Thu Oct 24 2024 Open vSwitch CI - 2.17.0-191 +- Merging dpdk subtree [RH git: e08be57038] + Commit list: + 551d7da597 Merge tag 'v21.11.8' into 21.11 + d20fd65f25 version: 21.11.8 + ced6fe6d45 kni: use strscpy + 617f9462c1 net/softnic: fix maybe-uninitialized warning + 680818068d version: 21.11.8-rc1 + 06a1fb9ff4 app/testpmd: fix interactive mode on Windows + 37408e6b7a app/testpmd: fix early exit from signal + 8211aee58f app/testpmd: cleanup cleanly from signal + 678229914e doc: fix mbuf flags + 764432d99c net/ena: fix checksum handling + 6059b6a729 dma/idxd: fix setup with Ubuntu 24.04 + 24e7731981 dma/idxd: add verbose option to config script + 699d2b93fa dma/idxd: fix default for workqueue options + 07a19a205d dma/idxd: add generic option for queue config + 82c2a85726 net: fix outer UDP checksum in Intel prepare helper + 9290e6ebc7 ethdev: fix device init without socket-local memory + 77d08c98e7 test/crypto: remove unused stats in setup + a46619d81e config: fix warning for cross build with meson >= 1.3.0 + 9234d31785 doc: add baseline mode in l3fwd-power guide + bee1b4cf67 doc: remove reference to mbuf pkt field + b1b0e806ec net/ice/base: fix temporary failures reading NVM + 2a885993dc net/hns3: fix uninitialized variable in FEC query + 7da32cce88 bus/vdev: fix device reinitialization + cb6ae8135d malloc: fix multi-process wait condition handling + 502c8f0344 app/pdump: handle SIGTERM and SIGHUP + d2c2fc4709 app/dumpcap: handle SIGTERM and SIGHUP + b898d9edce bus/pci: fix FD in secondary process + 4231013b08 bus/pci: fix UIO resource mapping in secondary process + e3c5d1f39c app/testpmd: fix build on signed comparison + a76b81f212 ethdev: fix GENEVE option item conversion + 8ef004bdde net/ark: fix index arithmetic + df1ab1523a net/hns3: check Rx DMA address alignmnent + 791fb23625 common/mlx5: remove unneeded field when modify RQ table + f40768049f net/mlx5: fix uplink port probing in bonding mode + b0ab250b5c net/mlx5: fix end condition of reading xstats + 4032e7572a net/mlx5: fix MTU configuration + 24fc9686af net/mlx5: fix Arm build with GCC 9.1 + 525eee1f7e net/ice: fix return value for raw pattern parsing + 62d214cef7 net/ice: fix memory leaks in raw pattern parsing + f7da9791b7 test/crypto: fix asymmetric capability test + 00d901b2fc doc: fix typo in l2fwd-crypto guide + 02bda04ec3 test/crypto: fix allocation comment + 44b500589c net/nfp: fix disabling 32-bit build + e4e2cbbdbd net/ena: fix return value check + b4b2b2dbc8 net/ena: fix bad checksum handling + f7072b7cac net/vmxnet3: fix init logs + 5cdda1dc0b net/txgbe: fix Rx interrupt + e2b706371f net/ngbe: fix memory leaks + 45e7c92c6f net/txgbe: fix memory leaks + adffa231be net/ngbe: fix MTU range + d5ca03a046 net/txgbe: fix MTU range + 71944f455d net/ngbe: fix hotplug remove + a6e71f39cc net/txgbe: fix hotplug remove + c7640d6f91 net/ngbe: keep PHY power down while device probing + d7774d77a5 net/txgbe: fix VF promiscuous and allmulticast + 06c7322fce net/txgbe: reconfigure more MAC Rx registers + 5d3adce6c8 net/txgbe: restrict configuration of VLAN strip offload + ee5d65c322 net/txgbe: fix Tx hang on queue disable + b1e9d2e692 net/txgbe: fix flow filters in VT mode + 6d3c1dfd5f net/txgbe: fix tunnel packet parsing + 5d16a5c0b0 app/testpmd: fix parsing for connection tracking item + 3fc3956874 app/testpmd: handle IEEE1588 init failure + 7cf71d7715 net/ice/base: fix masking when reading context + f93aad91e0 net/ice/base: fix board type definition + 8452229f93 net/ice/base: fix potential TLV length overflow + 041079fd49 net/ice/base: fix check for existing switch rule + ad10a87ffd net/ice/base: fix return type of bitmap hamming weight + dd51a93ab6 net/ice/base: fix GCS descriptor field offsets + bd46ca68c1 net/ice/base: fix size when allocating children arrays + ffa6d84d02 net/ice/base: fix sign extension + 1b20f194b2 net/ice/base: fix pointer to variable outside scope + e6b69e13c8 buildtools: fix build with clang 17 and ASan + 1e00bb1fb1 fbarray: fix finding for unaligned length + 728048cc94 bus/dpaa: remove redundant file descriptor check + 9fe819e421 common/dpaax: fix node array overrun + 5ab7860c51 common/dpaax: fix IOVA table cleanup + e46935eae4 bus/dpaa: fix memory leak in bus scan + 373efadc5f bus/dpaa: fix bus scan for DMA devices + 23be36e929 app/testpmd: fix help string of BPF load command + 2de908abe1 eal/linux: lower log level on allocation attempt failure + 33db9b4509 fbarray: fix lookbehind ignore mask handling + 85308d31b1 fbarray: fix lookahead ignore mask handling + 313de37f27 fbarray: fix incorrect lookbehind behavior + 16039f57ff fbarray: fix incorrect lookahead behavior + a16a1a7267 crypto/dpaa2_sec: fix event queue user context + 4d95152c91 common/dpaax/caamflib: fix PDCP AES-AES watchdog error + b3f493cc72 common/dpaax/caamflib: fix PDCP-SDAP watchdog error + 3e1ff62d6d crypto/openssl: optimize 3DES-CTR context init + 6a46407ad7 cryptodev: validate crypto callbacks from next node + a27e346a93 cryptodev: fix build without crypto callbacks + 87bdd9a583 crypto/cnxk: fix minimal input normalization + f7e28211b7 baseband/la12xx: forbid secondary process + 38c54ad34e telemetry: fix connection parameter parsing + 7598b5b537 bpf: fix load hangs with six IPv6 addresses + 89a1505c5a bpf: fix MOV instruction evaluation + e296365eb4 vdpa/sfc: remove dead code + 2c3f6129d9 dmadev: fix structure alignment + 967cb3d5bd app/bbdev: fix interrupt tests + 90859e988c telemetry: lower log level on socket error + 2a597e9f8b net/mlx5: fix hash Rx queue release in flow sample + ac5f04c92a net/mlx5: fix indexed pool with invalid index + 1940acf81d common/mlx5: fix unsigned/signed mismatch + f12baa45c9 hash: fix RCU reclamation size + 6fa9a2e32d net/cnxk: fix promiscuous state after MAC change + 9c45669c03 net/cnxk: fix outbound security with higher packet burst + d8a3470e22 net/cnxk: fix RSS config + 2070487977 net/ixgbe/base: fix PHY ID for X550 + ea2d868cc5 net/ixgbe/base: fix 5G link speed reported on VF + 2b53ce832b net/ixgbe/base: revert advertising for X550 2.5G/5G + f7f0ac1a36 net/e1000/base: fix link power down + f6e83fa8b3 net/ixgbe: do not create delayed interrupt handler twice + 1d72c06705 net/ixgbe: do not update link status in secondary process + 7a97a44a0a net/fm10k: fix cleanup during init failure + 1f57a72efc eal: fix logs for '--lcores' + 4722e89fcd event/sw: fix warning from useless snprintf + 8863a119d0 net/virtio: fix MAC table update + 1a7bda79d1 vhost: cleanup resubmit info before inflight setup + 2f6234f1d9 vhost: fix build with GCC 13 + 8a77d04eb3 hash: check name when creating a hash + bb01e07f1d hash: fix return code description in Doxygen + 8e20d5d780 app/testpmd: fix lcore ID restriction + d6ec78819d net/i40e: fix outer UDP checksum offload for X710 + ce87e585c5 app/testpmd: fix outer IP checksum offload + c5411472c3 net/ice: fix check for outer UDP checksum offload + c2baa4b8da net/axgbe: fix linkup in PHY status + 63539e77be net/axgbe: delay AN timeout during KR training + d168063444 net/axgbe: fix Tx flow on 30H HW + 1f85d756aa net/axgbe: check only minimum speed for cables + 3fe8271448 net/axgbe: fix connection for SFP+ active cables + 35429aa99d net/axgbe: fix SFP codes check for DAC cables + a14247758a net/axgbe: disable interrupts during device removal + ef8e4aa855 net/axgbe: update DMA coherency values + 8d53ecbbf6 net/axgbe: fix fluctuations for 1G Bel Fuse SFP + 4615048c8a net/axgbe: reset link when link never comes back + 8eeb4d4534 net/axgbe: fix MDIO access for non-zero ports and CL45 PHYs + a599aacf12 net/tap: fix file descriptor check in isolated flow + 6825397cf6 net/af_xdp: count mbuf allocation failures + 6d6630439d net/af_xdp: fix port ID in Rx mbuf + 9b8ca61893 doc: fix testpmd ring size command + 858c31a501 net/af_packet: align Rx/Tx structs to cache line + 8fb5749bc6 net/hns3: disable SCTP verification tag for RSS hash input + 2cd371c2b2 net/hns3: fix variable overflow + 46ac5a94ba net/hns3: fix double free for Rx/Tx queue + 7d93896c26 net/hns3: fix Rx timestamp flag + 4b362a6ca1 net/hns3: fix offload flag of IEEE 1588 + 024e9662e7 latencystats: fix literal float suffix + caed4d5e2d eal/windows: install sched.h file + bd0e650520 net/virtio-user: add memcpy check + d00c004916 pcapng: add memcpy check + 87e7322144 eal/unix: support ZSTD compression for firmware + a7fb1c9784 bus/pci: fix build with musl 1.2.4 / Alpine 3.19 + b40fd60a9d test/crypto: fix vector global buffer overflow + 49b2102bf9 version: 21.11.7 + e9b8b369de version: 21.11.7-rc1 + adfa4b4d0c net/hns3: refactor handle mailbox function + a38893de7d net/hns3: refactor send mailbox function + 465327ef11 net/nfp: fix Rx and Tx queue state + 8f8564ff2c net/axgbe: fix Rx and Tx queue state + b42960b1d7 net/af_xdp: fix leak on XSK configuration failure + d13f7b11d4 crypto/ipsec_mb: fix incorrectly setting cipher keys + 6be826afff net/nfp: fix resource leak for PF initialization + aa99de9993 examples/ipsec-secgw: fix typo in error message + c6ab7f7241 test/cfgfile: fix typo in error messages + 6e0fe10015 test/power: fix typo in error message + fe97b1e3ed doc: fix typo in packet framework guide + 13e54713ae doc: fix typo in profiling guide + 2ee1c55567 net/vmxnet3: ignore Rx queue interrupt setup on FreeBSD + 46f9124cae doc: fix default IP fragments maximum in programmer guide + 0bcf927db2 net/bnxt: fix number of Tx queues being created + 307a7cc705 net/mlx5: fix warning about copy length + 61aa88de54 net/mlx5: fix drop action release timing + 165b252b21 net/mlx5: fix age position in hairpin split + eadfb0fbb8 net/mlx5: prevent ioctl failure log flooding + 123f0bcf24 doc: update link to Windows DevX in mlx5 guide + 95826daab3 net/hns3: support new device + 300a10e4d3 app/testpmd: fix --stats-period option check + 4db5876a5e net/ena/base: restructure interrupt handling + cb98b1b26d net/ena/base: limit exponential backoff + b063b49cf4 doc: add link speeds configuration in features table + 4a74870ae4 doc: add traffic manager in features table + 5c731f2f53 net/hns3: enable PFC for all user priorities + 8f34999c24 examples/l3fwd: fix Rx over not ready port + 6241c4f334 examples/packet_ordering: fix Rx with reorder mode disabled + da59128c3f test: do not count skipped tests as executed + 30c9214352 test/mbuf: fix external mbuf case with assert enabled + 617bccf412 config: fix CPU instruction set for cross-build + bf8fb07ca7 test: fix probing in secondary process + 2481136319 net/mlx5: fix counters map in bonding mode + 03c7f0a177 net/mlx5: fix VLAN handling in meter split + c52e6e0ecd net/mlx5: fix error packets drop in regular Rx + 411422bb04 net/mlx5: fix use after free when releasing Tx queues + 2556c23922 common/cnxk: fix possible out-of-bounds access + 22c015d09d common/cnxk: fix mbox struct attributes + ed1fd02bd4 common/cnxk: fix RSS RETA configuration + 92f6f10e9d net/cnxk: fix MTU limit + 603e63f7a6 common/cnxk: fix Tx MTU configuration + 61ced40b0d net/bnx2x: fix warnings about memcpy lengths + d8f8df9c46 net/ice: remove incorrect 16B descriptor read block + dcf42c72bc net/iavf: remove incorrect 16B descriptor read block + 007b1f1ad7 net/i40e: remove incorrect 16B descriptor read block + 4a95397cc0 net/ixgbe: increase VF reset timeout + 22e7e531ce net/iavf: remove error logs for VLAN offloading + 153ce28fcf net/ixgbevf: fix RSS init for x550 NICs + 1de0e75b7b net/bnxt: fix null pointer dereference + 6131c863c8 net/tap: fix traffic control handle calculation + f87697c6ad net/tap: do not overwrite flow API errors + 4e761a47ab doc: fix typos in cryptodev overview + c5d16ff345 app/crypto-perf: add missing op resubmission + 51cdf41427 app/crypto-perf: fix out-of-place mbuf size + c735ad16ff eventdev: fix Doxygen processing of vector struct + 5730dc49ba eventdev: improve Doxygen comments on configure struct + 34342122cd test/event: fix crash in Tx adapter freeing + c87e76f66b event/dlb2: remove superfluous memcpy + c2671c9adf doc: fix configuration in baseband 5GNR driver guide + a5224aa517 telemetry: fix empty JSON dictionaries + 1af693e007 telemetry: fix connected clients count + 40625500ac net/mlx5: fix stats query crash in secondary process + 83738589f7 common/mlx5: fix duplicate read of general capabilities + 5c8bbc6b7d net/mlx5: fix GENEVE TLV option management + 9fe858d7b9 net/mlx5: fix jump action validation + 79c2b508d9 common/cnxk: fix mbox region copy + 5a59e070c0 net/cnxk: fix flow RSS configuration + 638141e6c5 net/bnxt: fix deadlock in ULP timer callback + 659e7c7b2d net/bnxt: modify locking for representor Tx + f7380ba809 net/bnxt: fix backward firmware compatibility + c208fb7ed9 net/bnxt: fix speed change from 200G to 25G on Thor + e3391de3f7 net/bnxt: fix 50G and 100G forced speed + f6e8248127 net/bnxt: fix array overflow + 7d331e7bd1 net/netvsc: fix VLAN metadata parsing + d100a5e796 net: add macros for VLAN metadata parsing + 18fb9b63a8 net/memif: fix extra mbuf refcnt update in zero copy Tx + 82aea5d9b4 common/sfc_efx/base: use C11 static assert + 3b7cc646a9 net/ionic: fix device close + bee2ac7906 net/ionic: fix RSS query + 04fb58b84f app/testpmd: fix crash in multi-process forwarding + 15d533ce94 drivers/net: fix buffer overflow for packet types list + be2dc69e86 net/vmxnet3: fix initialization on FreeBSD + 2597a7df7c app/testpmd: hide --bitrate-stats in help if disabled + e690cae685 doc: add --latencystats option in testpmd guide + d2d6bdae86 net/hns3: remove QinQ insert support for VF + 51fd05c6ad net/hns3: fix reset level comparison + a327482169 net/hns3: fix disable command with firmware + 72126b1d1c net/hns3: fix VF multiple count on one reset + 9b6c3117a1 net/hns3: refactor PF mailbox message struct + 8b31b47194 net/hns3: refactor VF mailbox message struct + 13696b1aa8 net/af_xdp: fix memzone leak on config failure + b886b2c079 ethdev: fix NVGRE encap flow action description + b1d48396f2 doc: fix commands in eventdev test tool guide + 15d001811d test/event: skip test if no driver is present + d84482b4ce event/cnxk: fix dequeue timeout configuration + 33266c1331 app/crypto-perf: fix encrypt operation verification + 4fe2036cef app/crypto-perf: fix data comparison + fa23c86e34 app/crypto-perf: fix next segment mbuf + aed510b37c common/cnxk: fix memory leak in CPT init + 7683a75096 cryptodev: remove unused extern variable + b3ec4db23d vhost: fix memory leak in Virtio Tx split path + 15afe0935f vhost: fix deadlock during vDPA SW live migration + 519dd4c280 net/virtio: remove duplicate queue xstats + 7293e69ddf vhost: fix virtqueue access check in vhost-user setup + d27dfc1a75 build: fix linker warnings about undefined symbols + 8fd1646a47 net/nfp: fix calloc parameters + 59160624f8 net/bnx2x: fix calloc parameters + bb4eb1f8fc common/mlx5: fix calloc parameters + f6975c91bc examples/qos_sched: fix memory leak in args parsing + f30f9a3c5e test: verify strdup return + 868c62ad59 app/crypto-perf: verify strdup return + 268ceff07f app/pdump: verify strdup return + 59b5633558 app/dumpcap: verify strdup return + b8422c5011 net/failsafe: fix memory leak in args parsing + bb7030a551 event/cnxk: verify strdup return + 93cc8c419f dma/idxd: verify strdup return + fa4c241ce7 bus/vdev: verify strdup return + 5a753bd6dd bus/fslmc: verify strdup return + f128469220 bus/dpaa: verify strdup return + 7fc3309c5d eal: verify strdup return + 83776248bc eal/x86: add AMD vendor check for TSC calibration + 100a4dc25e ci: update versions of actions in GHA + 7f994fccfe kernel/freebsd: fix module build on FreeBSD 14 + c354880dcf net/ice: fix tunnel TSO capabilities + 78c0d6dd0b net/ice: fix link update + 4f2f83c0a6 net/ixgbe: fix memoy leak after device init failure + 835b8ab53c net/iavf: fix memory leak on security context error + 3c1d8f321d net/i40e: remove redundant judgment in flow parsing + 570eff65fd regexdev: fix logtype register + 014409d9f3 hash: remove some dead code + + * Tue Oct 22 2024 Open vSwitch CI - 2.17.0-190 - Merging upstream branch-2.17 [RH git: 442fb52a7c] Commit list: