From cd14f6cf07296a00a61fdaf416bbcf1cb78fe452 Mon Sep 17 00:00:00 2001 From: Open vSwitch CI Date: May 12 2021 17:17:12 +0000 Subject: Import openvswitch2.15-2.15.0-18 from Fast DataPath --- diff --git a/SOURCES/openvswitch-2.15.0.patch b/SOURCES/openvswitch-2.15.0.patch index fb00e69..2172040 100644 --- a/SOURCES/openvswitch-2.15.0.patch +++ b/SOURCES/openvswitch-2.15.0.patch @@ -182,6 +182,18006 @@ index 1f2b7a3668..8b5d075840 100644 openvswitch (2.15.0-1) unstable; urgency=low * New upstream version +diff --git a/dpdk/VERSION b/dpdk/VERSION +index 8b0beab16a..2dbbe00e67 100644 +--- a/dpdk/VERSION ++++ b/dpdk/VERSION +@@ -1 +1 @@ +-20.11.0 ++20.11.1 +diff --git a/dpdk/app/meson.build b/dpdk/app/meson.build +index eb74f215a3..87fc195dbf 100644 +--- a/dpdk/app/meson.build ++++ b/dpdk/app/meson.build +@@ -25,6 +25,10 @@ apps = [ + lib_execinfo = cc.find_library('execinfo', required: false) + + default_cflags = machine_args + ['-DALLOW_EXPERIMENTAL_API'] ++default_ldflags = [] ++if get_option('default_library') == 'static' and not is_windows ++ default_ldflags += ['-Wl,--export-dynamic'] ++endif + + foreach app:apps + build = true +@@ -32,6 +36,7 @@ foreach app:apps + sources = [] + includes = [] + cflags = default_cflags ++ ldflags = default_ldflags + objs = [] # other object files to link against, used e.g. for + # instruction-set optimized versions of code + +@@ -58,8 +63,10 @@ foreach app:apps + executable('dpdk-' + name, + sources, + c_args: cflags, ++ link_args: ldflags, + link_whole: link_libs, + dependencies: dep_objs, ++ include_directories: includes, + install_rpath: join_paths(get_option('prefix'), + driver_install_path), + install: true) +diff --git a/dpdk/app/proc-info/main.c b/dpdk/app/proc-info/main.c +index d743209f0d..b9587f7ded 100644 +--- a/dpdk/app/proc-info/main.c ++++ b/dpdk/app/proc-info/main.c +@@ -5,6 +5,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -301,14 +302,13 @@ proc_info_parse_args(int argc, char **argv) + } else if (!strncmp(long_option[option_index].name, + "xstats-ids", + MAX_LONG_OPT_SZ)) { +- nb_xstats_ids = parse_xstats_ids(optarg, ++ int ret = parse_xstats_ids(optarg, + xstats_ids, MAX_NB_XSTATS_IDS); +- +- if (nb_xstats_ids <= 0) { ++ if (ret <= 0) { + printf("xstats-id list parse error.\n"); + return -1; + } +- ++ nb_xstats_ids = ret; + } + break; + default: +@@ -420,11 +420,9 @@ static void collectd_resolve_cnt_type(char *cnt_type, size_t cnt_type_len, + } else if ((type_end != NULL) && + (strncmp(cnt_name, "flow_", strlen("flow_"))) == 0) { + if (strncmp(type_end, "_filters", strlen("_filters")) == 0) +- strlcpy(cnt_type, "operations", cnt_type_len); ++ strlcpy(cnt_type, "filter_result", cnt_type_len); + else if (strncmp(type_end, "_errors", strlen("_errors")) == 0) + strlcpy(cnt_type, "errors", cnt_type_len); +- else if (strncmp(type_end, "_filters", strlen("_filters")) == 0) +- strlcpy(cnt_type, "filter_result", cnt_type_len); + } else if ((type_end != NULL) && + (strncmp(cnt_name, "mac_", strlen("mac_"))) == 0) { + if (strncmp(type_end, "_errors", strlen("_errors")) == 0) +@@ -648,11 +646,16 @@ metrics_display(int port_id) + } + + static void +-show_security_context(uint16_t portid) ++show_security_context(uint16_t portid, bool inline_offload) + { +- void *p_ctx = rte_eth_dev_get_sec_ctx(portid); ++ void *p_ctx; + const struct rte_security_capability *s_cap; + ++ if (inline_offload) ++ p_ctx = rte_eth_dev_get_sec_ctx(portid); ++ else ++ p_ctx = rte_cryptodev_get_sec_ctx(portid); ++ + if (p_ctx == NULL) + return; + +@@ -859,7 +862,7 @@ show_port(void) + } + + #ifdef RTE_LIB_SECURITY +- show_security_context(i); ++ show_security_context(i, true); + #endif + } + } +@@ -1210,7 +1213,6 @@ show_crypto(void) + + display_crypto_feature_info(dev_info.feature_flags); + +- memset(&stats, 0, sizeof(0)); + if (rte_cryptodev_stats_get(i, &stats) == 0) { + printf("\t -- stats\n"); + printf("\t\t + enqueue count (%"PRIu64")" +@@ -1224,7 +1226,7 @@ show_crypto(void) + } + + #ifdef RTE_LIB_SECURITY +- show_security_context(i); ++ show_security_context(i, false); + #endif + } + } +@@ -1268,8 +1270,6 @@ show_ring(char *name) + static void + show_mempool(char *name) + { +- uint64_t flags = 0; +- + snprintf(bdr_str, MAX_STRING_LEN, " show - MEMPOOL "); + STATS_BDR_STR(10, bdr_str); + +@@ -1277,8 +1277,8 @@ show_mempool(char *name) + struct rte_mempool *ptr = rte_mempool_lookup(name); + if (ptr != NULL) { + struct rte_mempool_ops *ops; ++ uint64_t flags = ptr->flags; + +- flags = ptr->flags; + ops = rte_mempool_get_ops(ptr->ops_index); + printf(" - Name: %s on socket %d\n" + " - flags:\n" +diff --git a/dpdk/app/test-crypto-perf/cperf_options_parsing.c b/dpdk/app/test-crypto-perf/cperf_options_parsing.c +index 03ed6f5942..0466f7baf8 100644 +--- a/dpdk/app/test-crypto-perf/cperf_options_parsing.c ++++ b/dpdk/app/test-crypto-perf/cperf_options_parsing.c +@@ -24,7 +24,7 @@ usage(char *progname) + { + printf("%s [EAL options] --\n" + " --silent: disable options dump\n" +- " --ptest throughput / latency / verify / pmd-cycleount :" ++ " --ptest throughput / latency / verify / pmd-cyclecount :" + " set test type\n" + " --pool_sz N: set the number of crypto ops/mbufs allocated\n" + " --total-ops N: set the number of total operations performed\n" +diff --git a/dpdk/app/test-crypto-perf/cperf_test_latency.c b/dpdk/app/test-crypto-perf/cperf_test_latency.c +index 0e4d0e1538..159fe8492b 100644 +--- a/dpdk/app/test-crypto-perf/cperf_test_latency.c ++++ b/dpdk/app/test-crypto-perf/cperf_test_latency.c +@@ -310,11 +310,11 @@ cperf_latency_test_runner(void *arg) + if (ctx->options->csv) { + if (rte_atomic16_test_and_set(&display_once)) + printf("\n# lcore, Buffer Size, Burst Size, Pakt Seq #, " +- "Packet Size, cycles, time (us)"); ++ "cycles, time (us)"); + + for (i = 0; i < ctx->options->total_ops; i++) { + +- printf("\n%u;%u;%u;%"PRIu64";%"PRIu64";%.3f", ++ printf("\n%u,%u,%u,%"PRIu64",%"PRIu64",%.3f", + ctx->lcore_id, ctx->options->test_buffer_size, + test_burst_size, i + 1, + ctx->res[i].tsc_end - ctx->res[i].tsc_start, +diff --git a/dpdk/app/test-crypto-perf/cperf_test_pmd_cyclecount.c b/dpdk/app/test-crypto-perf/cperf_test_pmd_cyclecount.c +index 4e67d3aebd..844659aeca 100644 +--- a/dpdk/app/test-crypto-perf/cperf_test_pmd_cyclecount.c ++++ b/dpdk/app/test-crypto-perf/cperf_test_pmd_cyclecount.c +@@ -16,7 +16,7 @@ + #define PRETTY_HDR_FMT "%12s%12s%12s%12s%12s%12s%12s%12s%12s%12s\n\n" + #define PRETTY_LINE_FMT "%12u%12u%12u%12u%12u%12u%12u%12.0f%12.0f%12.0f\n" + #define CSV_HDR_FMT "%s,%s,%s,%s,%s,%s,%s,%s,%s,%s\n" +-#define CSV_LINE_FMT "%10u;%10u;%u;%u;%u;%u;%u;%.3f;%.3f;%.3f\n" ++#define CSV_LINE_FMT "%10u,%10u,%u,%u,%u,%u,%u,%.3f,%.3f,%.3f\n" + + struct cperf_pmd_cyclecount_ctx { + uint8_t dev_id; +diff --git a/dpdk/app/test-crypto-perf/cperf_test_throughput.c b/dpdk/app/test-crypto-perf/cperf_test_throughput.c +index f30f7d5c2c..f6eb8cf259 100644 +--- a/dpdk/app/test-crypto-perf/cperf_test_throughput.c ++++ b/dpdk/app/test-crypto-perf/cperf_test_throughput.c +@@ -299,8 +299,8 @@ cperf_throughput_test_runner(void *test_ctx) + "Failed Deq,Ops(Millions),Throughput(Gbps)," + "Cycles/Buf\n\n"); + +- printf("%u;%u;%u;%"PRIu64";%"PRIu64";%"PRIu64";%"PRIu64";" +- "%.3f;%.3f;%.3f\n", ++ printf("%u,%u,%u,%"PRIu64",%"PRIu64",%"PRIu64",%"PRIu64"," ++ "%.3f,%.3f,%.3f\n", + ctx->lcore_id, + ctx->options->test_buffer_size, + test_burst_size, +diff --git a/dpdk/app/test-crypto-perf/cperf_test_verify.c b/dpdk/app/test-crypto-perf/cperf_test_verify.c +index 833bc9a552..2939aeaa93 100644 +--- a/dpdk/app/test-crypto-perf/cperf_test_verify.c ++++ b/dpdk/app/test-crypto-perf/cperf_test_verify.c +@@ -406,7 +406,7 @@ cperf_verify_test_runner(void *test_ctx) + "Burst Size,Enqueued,Dequeued,Failed Enq," + "Failed Deq,Failed Ops\n"); + +- printf("%10u;%10u;%u;%"PRIu64";%"PRIu64";%"PRIu64";%"PRIu64";" ++ printf("%10u,%10u,%u,%"PRIu64",%"PRIu64",%"PRIu64",%"PRIu64"," + "%"PRIu64"\n", + ctx->lcore_id, + ctx->options->max_buffer_size, +diff --git a/dpdk/app/test-crypto-perf/main.c b/dpdk/app/test-crypto-perf/main.c +index 5f035519c3..49af812d8b 100644 +--- a/dpdk/app/test-crypto-perf/main.c ++++ b/dpdk/app/test-crypto-perf/main.c +@@ -390,7 +390,7 @@ cperf_check_test_vector(struct cperf_options *opts, + if (opts->cipher_algo == RTE_CRYPTO_CIPHER_NULL) { + if (test_vec->plaintext.data == NULL) + return -1; +- } else if (opts->cipher_algo != RTE_CRYPTO_CIPHER_NULL) { ++ } else { + if (test_vec->plaintext.data == NULL) + return -1; + if (test_vec->plaintext.length < opts->max_buffer_size) +@@ -440,7 +440,7 @@ cperf_check_test_vector(struct cperf_options *opts, + return -1; + if (test_vec->plaintext.length < opts->max_buffer_size) + return -1; +- } else if (opts->cipher_algo != RTE_CRYPTO_CIPHER_NULL) { ++ } else { + if (test_vec->plaintext.data == NULL) + return -1; + if (test_vec->plaintext.length < opts->max_buffer_size) +@@ -530,14 +530,14 @@ main(int argc, char **argv) + + ret = cperf_options_parse(&opts, argc, argv); + if (ret) { +- RTE_LOG(ERR, USER1, "Parsing on or more user options failed\n"); ++ RTE_LOG(ERR, USER1, "Parsing one or more user options failed\n"); + goto err; + } + + ret = cperf_options_check(&opts); + if (ret) { + RTE_LOG(ERR, USER1, +- "Checking on or more user options failed\n"); ++ "Checking one or more user options failed\n"); + goto err; + } + +diff --git a/dpdk/app/test-eventdev/test_perf_common.h b/dpdk/app/test-eventdev/test_perf_common.h +index ff9705df88..e7233e5a5b 100644 +--- a/dpdk/app/test-eventdev/test_perf_common.h ++++ b/dpdk/app/test-eventdev/test_perf_common.h +@@ -97,8 +97,13 @@ perf_process_last_stage(struct rte_mempool *const pool, + void *bufs[], int const buf_sz, uint8_t count) + { + bufs[count++] = ev->event_ptr; +- w->processed_pkts++; ++ ++ /* wmb here ensures event_prt is stored before ++ * updating the number of processed packets ++ * for worker lcores ++ */ + rte_smp_wmb(); ++ w->processed_pkts++; + + if (unlikely(count == buf_sz)) { + count = 0; +@@ -116,6 +121,12 @@ perf_process_last_stage_latency(struct rte_mempool *const pool, + struct perf_elt *const m = ev->event_ptr; + + bufs[count++] = ev->event_ptr; ++ ++ /* wmb here ensures event_prt is stored before ++ * updating the number of processed packets ++ * for worker lcores ++ */ ++ rte_smp_wmb(); + w->processed_pkts++; + + if (unlikely(count == buf_sz)) { +@@ -127,7 +138,6 @@ perf_process_last_stage_latency(struct rte_mempool *const pool, + } + + w->latency += latency; +- rte_smp_wmb(); + return count; + } + +diff --git a/dpdk/app/test-eventdev/test_pipeline_queue.c b/dpdk/app/test-eventdev/test_pipeline_queue.c +index 7bebac34fc..9a9febb199 100644 +--- a/dpdk/app/test-eventdev/test_pipeline_queue.c ++++ b/dpdk/app/test-eventdev/test_pipeline_queue.c +@@ -83,16 +83,15 @@ pipeline_queue_worker_single_stage_burst_tx(void *arg) + rte_prefetch0(ev[i + 1].mbuf); + if (ev[i].sched_type == RTE_SCHED_TYPE_ATOMIC) { + pipeline_event_tx(dev, port, &ev[i]); +- ev[i].op = RTE_EVENT_OP_RELEASE; + w->processed_pkts++; + } else { + ev[i].queue_id++; + pipeline_fwd_event(&ev[i], + RTE_SCHED_TYPE_ATOMIC); ++ pipeline_event_enqueue_burst(dev, port, ev, ++ nb_rx); + } + } +- +- pipeline_event_enqueue_burst(dev, port, ev, nb_rx); + } + + return 0; +@@ -180,13 +179,13 @@ pipeline_queue_worker_multi_stage_fwd(void *arg) + ev.queue_id = tx_queue[ev.mbuf->port]; + rte_event_eth_tx_adapter_txq_set(ev.mbuf, 0); + pipeline_fwd_event(&ev, RTE_SCHED_TYPE_ATOMIC); ++ pipeline_event_enqueue(dev, port, &ev); + w->processed_pkts++; + } else { + ev.queue_id++; + pipeline_fwd_event(&ev, sched_type_list[cq_id]); ++ pipeline_event_enqueue(dev, port, &ev); + } +- +- pipeline_event_enqueue(dev, port, &ev); + } + + return 0; +@@ -213,7 +212,6 @@ pipeline_queue_worker_multi_stage_burst_tx(void *arg) + + if (ev[i].queue_id == tx_queue[ev[i].mbuf->port]) { + pipeline_event_tx(dev, port, &ev[i]); +- ev[i].op = RTE_EVENT_OP_RELEASE; + w->processed_pkts++; + continue; + } +@@ -222,9 +220,8 @@ pipeline_queue_worker_multi_stage_burst_tx(void *arg) + pipeline_fwd_event(&ev[i], cq_id != last_queue ? + sched_type_list[cq_id] : + RTE_SCHED_TYPE_ATOMIC); ++ pipeline_event_enqueue_burst(dev, port, ev, nb_rx); + } +- +- pipeline_event_enqueue_burst(dev, port, ev, nb_rx); + } + + return 0; +@@ -237,6 +234,7 @@ pipeline_queue_worker_multi_stage_burst_fwd(void *arg) + const uint8_t *tx_queue = t->tx_evqueue_id; + + while (t->done == false) { ++ uint16_t processed_pkts = 0; + uint16_t nb_rx = rte_event_dequeue_burst(dev, port, ev, + BURST_SIZE, 0); + +@@ -254,7 +252,7 @@ pipeline_queue_worker_multi_stage_burst_fwd(void *arg) + rte_event_eth_tx_adapter_txq_set(ev[i].mbuf, 0); + pipeline_fwd_event(&ev[i], + RTE_SCHED_TYPE_ATOMIC); +- w->processed_pkts++; ++ processed_pkts++; + } else { + ev[i].queue_id++; + pipeline_fwd_event(&ev[i], +@@ -263,6 +261,7 @@ pipeline_queue_worker_multi_stage_burst_fwd(void *arg) + } + + pipeline_event_enqueue_burst(dev, port, ev, nb_rx); ++ w->processed_pkts += processed_pkts; + } + + return 0; +diff --git a/dpdk/app/test-flow-perf/actions_gen.c b/dpdk/app/test-flow-perf/actions_gen.c +index ac525f6fdb..f265894247 100644 +--- a/dpdk/app/test-flow-perf/actions_gen.c ++++ b/dpdk/app/test-flow-perf/actions_gen.c +@@ -145,12 +145,10 @@ add_set_meta(struct rte_flow_action *actions, + uint8_t actions_counter, + __rte_unused struct additional_para para) + { +- static struct rte_flow_action_set_meta meta_action; +- +- do { +- meta_action.data = RTE_BE32(META_DATA); +- meta_action.mask = RTE_BE32(0xffffffff); +- } while (0); ++ static struct rte_flow_action_set_meta meta_action = { ++ .data = RTE_BE32(META_DATA), ++ .mask = RTE_BE32(0xffffffff), ++ }; + + actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_META; + actions[actions_counter].conf = &meta_action; +@@ -161,13 +159,11 @@ add_set_tag(struct rte_flow_action *actions, + uint8_t actions_counter, + __rte_unused struct additional_para para) + { +- static struct rte_flow_action_set_tag tag_action; +- +- do { +- tag_action.data = RTE_BE32(META_DATA); +- tag_action.mask = RTE_BE32(0xffffffff); +- tag_action.index = TAG_INDEX; +- } while (0); ++ static struct rte_flow_action_set_tag tag_action = { ++ .data = RTE_BE32(META_DATA), ++ .mask = RTE_BE32(0xffffffff), ++ .index = TAG_INDEX, ++ }; + + actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_TAG; + actions[actions_counter].conf = &tag_action; +@@ -178,11 +174,9 @@ add_port_id(struct rte_flow_action *actions, + uint8_t actions_counter, + __rte_unused struct additional_para para) + { +- static struct rte_flow_action_port_id port_id; +- +- do { +- port_id.id = PORT_ID_DST; +- } while (0); ++ static struct rte_flow_action_port_id port_id = { ++ .id = PORT_ID_DST, ++ }; + + actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_PORT_ID; + actions[actions_counter].conf = &port_id; +diff --git a/dpdk/app/test-flow-perf/items_gen.c b/dpdk/app/test-flow-perf/items_gen.c +index 2b1ab41467..aaa243a7c4 100644 +--- a/dpdk/app/test-flow-perf/items_gen.c ++++ b/dpdk/app/test-flow-perf/items_gen.c +@@ -25,9 +25,6 @@ add_ether(struct rte_flow_item *items, + static struct rte_flow_item_eth eth_spec; + static struct rte_flow_item_eth eth_mask; + +- memset(ð_spec, 0, sizeof(struct rte_flow_item_eth)); +- memset(ð_mask, 0, sizeof(struct rte_flow_item_eth)); +- + items[items_counter].type = RTE_FLOW_ITEM_TYPE_ETH; + items[items_counter].spec = ð_spec; + items[items_counter].mask = ð_mask; +@@ -38,16 +35,12 @@ add_vlan(struct rte_flow_item *items, + uint8_t items_counter, + __rte_unused struct additional_para para) + { +- static struct rte_flow_item_vlan vlan_spec; +- static struct rte_flow_item_vlan vlan_mask; +- +- uint16_t vlan_value = VLAN_VALUE; +- +- memset(&vlan_spec, 0, sizeof(struct rte_flow_item_vlan)); +- memset(&vlan_mask, 0, sizeof(struct rte_flow_item_vlan)); +- +- vlan_spec.tci = RTE_BE16(vlan_value); +- vlan_mask.tci = RTE_BE16(0xffff); ++ static struct rte_flow_item_vlan vlan_spec = { ++ .tci = RTE_BE16(VLAN_VALUE), ++ }; ++ static struct rte_flow_item_vlan vlan_mask = { ++ .tci = RTE_BE16(0xffff), ++ }; + + items[items_counter].type = RTE_FLOW_ITEM_TYPE_VLAN; + items[items_counter].spec = &vlan_spec; +@@ -61,9 +54,6 @@ add_ipv4(struct rte_flow_item *items, + static struct rte_flow_item_ipv4 ipv4_spec; + static struct rte_flow_item_ipv4 ipv4_mask; + +- memset(&ipv4_spec, 0, sizeof(struct rte_flow_item_ipv4)); +- memset(&ipv4_mask, 0, sizeof(struct rte_flow_item_ipv4)); +- + ipv4_spec.hdr.src_addr = RTE_BE32(para.src_ip); + ipv4_mask.hdr.src_addr = RTE_BE32(0xffffffff); + +@@ -80,9 +70,6 @@ add_ipv6(struct rte_flow_item *items, + static struct rte_flow_item_ipv6 ipv6_spec; + static struct rte_flow_item_ipv6 ipv6_mask; + +- memset(&ipv6_spec, 0, sizeof(struct rte_flow_item_ipv6)); +- memset(&ipv6_mask, 0, sizeof(struct rte_flow_item_ipv6)); +- + /** Set ipv6 src **/ + memset(&ipv6_spec.hdr.src_addr, para.src_ip, + sizeof(ipv6_spec.hdr.src_addr) / 2); +@@ -104,9 +91,6 @@ add_tcp(struct rte_flow_item *items, + static struct rte_flow_item_tcp tcp_spec; + static struct rte_flow_item_tcp tcp_mask; + +- memset(&tcp_spec, 0, sizeof(struct rte_flow_item_tcp)); +- memset(&tcp_mask, 0, sizeof(struct rte_flow_item_tcp)); +- + items[items_counter].type = RTE_FLOW_ITEM_TYPE_TCP; + items[items_counter].spec = &tcp_spec; + items[items_counter].mask = &tcp_mask; +@@ -120,9 +104,6 @@ add_udp(struct rte_flow_item *items, + static struct rte_flow_item_udp udp_spec; + static struct rte_flow_item_udp udp_mask; + +- memset(&udp_spec, 0, sizeof(struct rte_flow_item_udp)); +- memset(&udp_mask, 0, sizeof(struct rte_flow_item_udp)); +- + items[items_counter].type = RTE_FLOW_ITEM_TYPE_UDP; + items[items_counter].spec = &udp_spec; + items[items_counter].mask = &udp_mask; +@@ -141,9 +122,6 @@ add_vxlan(struct rte_flow_item *items, + + vni_value = VNI_VALUE; + +- memset(&vxlan_spec, 0, sizeof(struct rte_flow_item_vxlan)); +- memset(&vxlan_mask, 0, sizeof(struct rte_flow_item_vxlan)); +- + /* Set standard vxlan vni */ + for (i = 0; i < 3; i++) { + vxlan_spec.vni[2 - i] = vni_value >> (i * 8); +@@ -171,9 +149,6 @@ add_vxlan_gpe(struct rte_flow_item *items, + + vni_value = VNI_VALUE; + +- memset(&vxlan_gpe_spec, 0, sizeof(struct rte_flow_item_vxlan_gpe)); +- memset(&vxlan_gpe_mask, 0, sizeof(struct rte_flow_item_vxlan_gpe)); +- + /* Set vxlan-gpe vni */ + for (i = 0; i < 3; i++) { + vxlan_gpe_spec.vni[2 - i] = vni_value >> (i * 8); +@@ -193,18 +168,12 @@ add_gre(struct rte_flow_item *items, + uint8_t items_counter, + __rte_unused struct additional_para para) + { +- static struct rte_flow_item_gre gre_spec; +- static struct rte_flow_item_gre gre_mask; +- +- uint16_t proto; +- +- proto = RTE_ETHER_TYPE_TEB; +- +- memset(&gre_spec, 0, sizeof(struct rte_flow_item_gre)); +- memset(&gre_mask, 0, sizeof(struct rte_flow_item_gre)); +- +- gre_spec.protocol = RTE_BE16(proto); +- gre_mask.protocol = RTE_BE16(0xffff); ++ static struct rte_flow_item_gre gre_spec = { ++ .protocol = RTE_BE16(RTE_ETHER_TYPE_TEB), ++ }; ++ static struct rte_flow_item_gre gre_mask = { ++ .protocol = RTE_BE16(0xffff), ++ }; + + items[items_counter].type = RTE_FLOW_ITEM_TYPE_GRE; + items[items_counter].spec = &gre_spec; +@@ -224,9 +193,6 @@ add_geneve(struct rte_flow_item *items, + + vni_value = VNI_VALUE; + +- memset(&geneve_spec, 0, sizeof(struct rte_flow_item_geneve)); +- memset(&geneve_mask, 0, sizeof(struct rte_flow_item_geneve)); +- + for (i = 0; i < 3; i++) { + geneve_spec.vni[2 - i] = vni_value >> (i * 8); + geneve_mask.vni[2 - i] = 0xff; +@@ -242,18 +208,12 @@ add_gtp(struct rte_flow_item *items, + uint8_t items_counter, + __rte_unused struct additional_para para) + { +- static struct rte_flow_item_gtp gtp_spec; +- static struct rte_flow_item_gtp gtp_mask; +- +- uint32_t teid_value; +- +- teid_value = TEID_VALUE; +- +- memset(>p_spec, 0, sizeof(struct rte_flow_item_gtp)); +- memset(>p_mask, 0, sizeof(struct rte_flow_item_gtp)); +- +- gtp_spec.teid = RTE_BE32(teid_value); +- gtp_mask.teid = RTE_BE32(0xffffffff); ++ static struct rte_flow_item_gtp gtp_spec = { ++ .teid = RTE_BE32(TEID_VALUE), ++ }; ++ static struct rte_flow_item_gtp gtp_mask = { ++ .teid = RTE_BE32(0xffffffff), ++ }; + + items[items_counter].type = RTE_FLOW_ITEM_TYPE_GTP; + items[items_counter].spec = >p_spec; +@@ -265,18 +225,12 @@ add_meta_data(struct rte_flow_item *items, + uint8_t items_counter, + __rte_unused struct additional_para para) + { +- static struct rte_flow_item_meta meta_spec; +- static struct rte_flow_item_meta meta_mask; +- +- uint32_t data; +- +- data = META_DATA; +- +- memset(&meta_spec, 0, sizeof(struct rte_flow_item_meta)); +- memset(&meta_mask, 0, sizeof(struct rte_flow_item_meta)); +- +- meta_spec.data = RTE_BE32(data); +- meta_mask.data = RTE_BE32(0xffffffff); ++ static struct rte_flow_item_meta meta_spec = { ++ .data = RTE_BE32(META_DATA), ++ }; ++ static struct rte_flow_item_meta meta_mask = { ++ .data = RTE_BE32(0xffffffff), ++ }; + + items[items_counter].type = RTE_FLOW_ITEM_TYPE_META; + items[items_counter].spec = &meta_spec; +@@ -289,21 +243,14 @@ add_meta_tag(struct rte_flow_item *items, + uint8_t items_counter, + __rte_unused struct additional_para para) + { +- static struct rte_flow_item_tag tag_spec; +- static struct rte_flow_item_tag tag_mask; +- uint32_t data; +- uint8_t index; +- +- data = META_DATA; +- index = TAG_INDEX; +- +- memset(&tag_spec, 0, sizeof(struct rte_flow_item_tag)); +- memset(&tag_mask, 0, sizeof(struct rte_flow_item_tag)); +- +- tag_spec.data = RTE_BE32(data); +- tag_mask.data = RTE_BE32(0xffffffff); +- tag_spec.index = index; +- tag_mask.index = 0xff; ++ static struct rte_flow_item_tag tag_spec = { ++ .data = RTE_BE32(META_DATA), ++ .index = TAG_INDEX, ++ }; ++ static struct rte_flow_item_tag tag_mask = { ++ .data = RTE_BE32(0xffffffff), ++ .index = 0xff, ++ }; + + items[items_counter].type = RTE_FLOW_ITEM_TYPE_TAG; + items[items_counter].spec = &tag_spec; +@@ -318,9 +265,6 @@ add_icmpv4(struct rte_flow_item *items, + static struct rte_flow_item_icmp icmpv4_spec; + static struct rte_flow_item_icmp icmpv4_mask; + +- memset(&icmpv4_spec, 0, sizeof(struct rte_flow_item_icmp)); +- memset(&icmpv4_mask, 0, sizeof(struct rte_flow_item_icmp)); +- + items[items_counter].type = RTE_FLOW_ITEM_TYPE_ICMP; + items[items_counter].spec = &icmpv4_spec; + items[items_counter].mask = &icmpv4_mask; +@@ -334,9 +278,6 @@ add_icmpv6(struct rte_flow_item *items, + static struct rte_flow_item_icmp6 icmpv6_spec; + static struct rte_flow_item_icmp6 icmpv6_mask; + +- memset(&icmpv6_spec, 0, sizeof(struct rte_flow_item_icmp6)); +- memset(&icmpv6_mask, 0, sizeof(struct rte_flow_item_icmp6)); +- + items[items_counter].type = RTE_FLOW_ITEM_TYPE_ICMP6; + items[items_counter].spec = &icmpv6_spec; + items[items_counter].mask = &icmpv6_mask; +diff --git a/dpdk/app/test-pmd/cmdline.c b/dpdk/app/test-pmd/cmdline.c +index 0d2d6aad05..2b9dd3e1f4 100644 +--- a/dpdk/app/test-pmd/cmdline.c ++++ b/dpdk/app/test-pmd/cmdline.c +@@ -163,7 +163,7 @@ static void cmd_help_long_parsed(void *parsed_result, + "Display:\n" + "--------\n\n" + +- "show port (info|stats|summary|xstats|fdir|stat_qmap|dcb_tc|cap) (port_id|all)\n" ++ "show port (info|stats|summary|xstats|fdir|dcb_tc|cap) (port_id|all)\n" + " Display information for port_id, or all.\n\n" + + "show port port_id (module_eeprom|eeprom)\n" +@@ -177,7 +177,7 @@ static void cmd_help_long_parsed(void *parsed_result, + "show port (port_id) rss-hash [key]\n" + " Display the RSS hash functions and RSS hash key of port\n\n" + +- "clear port (info|stats|xstats|fdir|stat_qmap) (port_id|all)\n" ++ "clear port (info|stats|xstats|fdir) (port_id|all)\n" + " Clear information for port_id, or all.\n\n" + + "show (rxq|txq) info (port_id) (queue_id)\n" +@@ -1877,7 +1877,9 @@ cmd_config_max_pkt_len_parsed(void *parsed_result, + __rte_unused void *data) + { + struct cmd_config_max_pkt_len_result *res = parsed_result; ++ uint32_t max_rx_pkt_len_backup = 0; + portid_t pid; ++ int ret; + + if (!all_ports_stopped()) { + printf("Please stop all ports first\n"); +@@ -1886,7 +1888,6 @@ cmd_config_max_pkt_len_parsed(void *parsed_result, + + RTE_ETH_FOREACH_DEV(pid) { + struct rte_port *port = &ports[pid]; +- uint64_t rx_offloads = port->dev_conf.rxmode.offloads; + + if (!strcmp(res->name, "max-pkt-len")) { + if (res->value < RTE_ETHER_MIN_LEN) { +@@ -1897,12 +1898,18 @@ cmd_config_max_pkt_len_parsed(void *parsed_result, + if (res->value == port->dev_conf.rxmode.max_rx_pkt_len) + return; + ++ ret = eth_dev_info_get_print_err(pid, &port->dev_info); ++ if (ret != 0) { ++ printf("rte_eth_dev_info_get() failed for port %u\n", ++ pid); ++ return; ++ } ++ ++ max_rx_pkt_len_backup = port->dev_conf.rxmode.max_rx_pkt_len; ++ + port->dev_conf.rxmode.max_rx_pkt_len = res->value; +- if (res->value > RTE_ETHER_MAX_LEN) +- rx_offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME; +- else +- rx_offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME; +- port->dev_conf.rxmode.offloads = rx_offloads; ++ if (update_jumbo_frame_offload(pid) != 0) ++ port->dev_conf.rxmode.max_rx_pkt_len = max_rx_pkt_len_backup; + } else { + printf("Unknown parameter\n"); + return; +@@ -3782,6 +3789,7 @@ cmd_set_rxoffs_parsed(void *parsed_result, + MAX_SEGS_BUFFER_SPLIT, seg_offsets, 0); + if (nb_segs > 0) + set_rx_pkt_offsets(seg_offsets, nb_segs); ++ cmd_reconfig_device_queue(RTE_PORT_ALL, 0, 1); + } + + cmdline_parse_token_string_t cmd_set_rxoffs_keyword = +@@ -3828,6 +3836,7 @@ cmd_set_rxpkts_parsed(void *parsed_result, + MAX_SEGS_BUFFER_SPLIT, seg_lengths, 0); + if (nb_segs > 0) + set_rx_pkt_segments(seg_lengths, nb_segs); ++ cmd_reconfig_device_queue(RTE_PORT_ALL, 0, 1); + } + + cmdline_parse_token_string_t cmd_set_rxpkts_keyword = +@@ -7555,9 +7564,6 @@ static void cmd_showportall_parsed(void *parsed_result, + RTE_ETH_FOREACH_DEV(i) + fdir_get_infos(i); + #endif +- else if (!strcmp(res->what, "stat_qmap")) +- RTE_ETH_FOREACH_DEV(i) +- nic_stats_mapping_display(i); + else if (!strcmp(res->what, "dcb_tc")) + RTE_ETH_FOREACH_DEV(i) + port_dcb_info_display(i); +@@ -7573,14 +7579,14 @@ cmdline_parse_token_string_t cmd_showportall_port = + TOKEN_STRING_INITIALIZER(struct cmd_showportall_result, port, "port"); + cmdline_parse_token_string_t cmd_showportall_what = + TOKEN_STRING_INITIALIZER(struct cmd_showportall_result, what, +- "info#summary#stats#xstats#fdir#stat_qmap#dcb_tc#cap"); ++ "info#summary#stats#xstats#fdir#dcb_tc#cap"); + cmdline_parse_token_string_t cmd_showportall_all = + TOKEN_STRING_INITIALIZER(struct cmd_showportall_result, all, "all"); + cmdline_parse_inst_t cmd_showportall = { + .f = cmd_showportall_parsed, + .data = NULL, + .help_str = "show|clear port " +- "info|summary|stats|xstats|fdir|stat_qmap|dcb_tc|cap all", ++ "info|summary|stats|xstats|fdir|dcb_tc|cap all", + .tokens = { + (void *)&cmd_showportall_show, + (void *)&cmd_showportall_port, +@@ -7622,8 +7628,6 @@ static void cmd_showport_parsed(void *parsed_result, + else if (!strcmp(res->what, "fdir")) + fdir_get_infos(res->portnum); + #endif +- else if (!strcmp(res->what, "stat_qmap")) +- nic_stats_mapping_display(res->portnum); + else if (!strcmp(res->what, "dcb_tc")) + port_dcb_info_display(res->portnum); + else if (!strcmp(res->what, "cap")) +@@ -7637,7 +7641,7 @@ cmdline_parse_token_string_t cmd_showport_port = + TOKEN_STRING_INITIALIZER(struct cmd_showport_result, port, "port"); + cmdline_parse_token_string_t cmd_showport_what = + TOKEN_STRING_INITIALIZER(struct cmd_showport_result, what, +- "info#summary#stats#xstats#fdir#stat_qmap#dcb_tc#cap"); ++ "info#summary#stats#xstats#fdir#dcb_tc#cap"); + cmdline_parse_token_num_t cmd_showport_portnum = + TOKEN_NUM_INITIALIZER(struct cmd_showport_result, portnum, RTE_UINT16); + +@@ -7645,7 +7649,7 @@ cmdline_parse_inst_t cmd_showport = { + .f = cmd_showport_parsed, + .data = NULL, + .help_str = "show|clear port " +- "info|summary|stats|xstats|fdir|stat_qmap|dcb_tc|cap " ++ "info|summary|stats|xstats|fdir|dcb_tc|cap " + "", + .tokens = { + (void *)&cmd_showport_show, +@@ -17112,6 +17116,7 @@ cmdline_read_from_file(const char *filename) + void + prompt(void) + { ++ int ret; + /* initialize non-constant commands */ + cmd_set_fwd_mode_init(); + cmd_set_fwd_retry_mode_init(); +@@ -17119,15 +17124,23 @@ prompt(void) + testpmd_cl = cmdline_stdin_new(main_ctx, "testpmd> "); + if (testpmd_cl == NULL) + return; ++ ++ ret = atexit(prompt_exit); ++ if (ret != 0) ++ printf("Cannot set exit function for cmdline\n"); ++ + cmdline_interact(testpmd_cl); +- cmdline_stdin_exit(testpmd_cl); ++ if (ret != 0) ++ cmdline_stdin_exit(testpmd_cl); + } + + void + prompt_exit(void) + { +- if (testpmd_cl != NULL) ++ if (testpmd_cl != NULL) { + cmdline_quit(testpmd_cl); ++ cmdline_stdin_exit(testpmd_cl); ++ } + } + + static void +diff --git a/dpdk/app/test-pmd/cmdline_flow.c b/dpdk/app/test-pmd/cmdline_flow.c +index 585cab98b4..de80924e7c 100644 +--- a/dpdk/app/test-pmd/cmdline_flow.c ++++ b/dpdk/app/test-pmd/cmdline_flow.c +@@ -3403,7 +3403,10 @@ static const struct token token_list[] = { + .name = "key", + .help = "RSS hash key", + .next = NEXT(action_rss, NEXT_ENTRY(HEX)), +- .args = ARGS(ARGS_ENTRY_ARB(0, 0), ++ .args = ARGS(ARGS_ENTRY_ARB ++ (offsetof(struct action_rss_data, conf) + ++ offsetof(struct rte_flow_action_rss, key), ++ sizeof(((struct rte_flow_action_rss *)0)->key)), + ARGS_ENTRY_ARB + (offsetof(struct action_rss_data, conf) + + offsetof(struct rte_flow_action_rss, key_len), +diff --git a/dpdk/app/test-pmd/cmdline_mtr.c b/dpdk/app/test-pmd/cmdline_mtr.c +index 399ee56e07..3982787d20 100644 +--- a/dpdk/app/test-pmd/cmdline_mtr.c ++++ b/dpdk/app/test-pmd/cmdline_mtr.c +@@ -312,7 +312,7 @@ static void cmd_show_port_meter_cap_parsed(void *parsed_result, + cmdline_parse_inst_t cmd_show_port_meter_cap = { + .f = cmd_show_port_meter_cap_parsed, + .data = NULL, +- .help_str = "Show port meter cap", ++ .help_str = "show port meter cap ", + .tokens = { + (void *)&cmd_show_port_meter_cap_show, + (void *)&cmd_show_port_meter_cap_port, +@@ -408,7 +408,7 @@ static void cmd_add_port_meter_profile_srtcm_parsed(void *parsed_result, + cmdline_parse_inst_t cmd_add_port_meter_profile_srtcm = { + .f = cmd_add_port_meter_profile_srtcm_parsed, + .data = NULL, +- .help_str = "Add port meter profile srtcm (rfc2697)", ++ .help_str = "add port meter profile srtcm_rfc2697 ", + .tokens = { + (void *)&cmd_add_port_meter_profile_srtcm_add, + (void *)&cmd_add_port_meter_profile_srtcm_port, +@@ -515,7 +515,7 @@ static void cmd_add_port_meter_profile_trtcm_parsed(void *parsed_result, + cmdline_parse_inst_t cmd_add_port_meter_profile_trtcm = { + .f = cmd_add_port_meter_profile_trtcm_parsed, + .data = NULL, +- .help_str = "Add port meter profile trtcm (rfc2698)", ++ .help_str = "add port meter profile trtcm_rfc2698 ", + .tokens = { + (void *)&cmd_add_port_meter_profile_trtcm_add, + (void *)&cmd_add_port_meter_profile_trtcm_port, +@@ -627,7 +627,7 @@ static void cmd_add_port_meter_profile_trtcm_rfc4115_parsed( + cmdline_parse_inst_t cmd_add_port_meter_profile_trtcm_rfc4115 = { + .f = cmd_add_port_meter_profile_trtcm_rfc4115_parsed, + .data = NULL, +- .help_str = "Add port meter profile trtcm (rfc4115)", ++ .help_str = "add port meter profile trtcm_rfc4115 ", + .tokens = { + (void *)&cmd_add_port_meter_profile_trtcm_rfc4115_add, + (void *)&cmd_add_port_meter_profile_trtcm_rfc4115_port, +@@ -702,7 +702,7 @@ static void cmd_del_port_meter_profile_parsed(void *parsed_result, + cmdline_parse_inst_t cmd_del_port_meter_profile = { + .f = cmd_del_port_meter_profile_parsed, + .data = NULL, +- .help_str = "Delete port meter profile", ++ .help_str = "del port meter profile ", + .tokens = { + (void *)&cmd_del_port_meter_profile_del, + (void *)&cmd_del_port_meter_profile_port, +@@ -827,7 +827,10 @@ static void cmd_create_port_meter_parsed(void *parsed_result, + cmdline_parse_inst_t cmd_create_port_meter = { + .f = cmd_create_port_meter_parsed, + .data = NULL, +- .help_str = "Create port meter", ++ .help_str = "create port meter (yes|no) " ++ "(R|Y|G|D) (R|Y|G|D) (R|Y|G|D) " ++ " " ++ "[ ...]", + .tokens = { + (void *)&cmd_create_port_meter_create, + (void *)&cmd_create_port_meter_port, +@@ -896,7 +899,7 @@ static void cmd_enable_port_meter_parsed(void *parsed_result, + cmdline_parse_inst_t cmd_enable_port_meter = { + .f = cmd_enable_port_meter_parsed, + .data = NULL, +- .help_str = "Enable port meter", ++ .help_str = "enable port meter ", + .tokens = { + (void *)&cmd_enable_port_meter_enable, + (void *)&cmd_enable_port_meter_port, +@@ -957,7 +960,7 @@ static void cmd_disable_port_meter_parsed(void *parsed_result, + cmdline_parse_inst_t cmd_disable_port_meter = { + .f = cmd_disable_port_meter_parsed, + .data = NULL, +- .help_str = "Disable port meter", ++ .help_str = "disable port meter ", + .tokens = { + (void *)&cmd_disable_port_meter_disable, + (void *)&cmd_disable_port_meter_port, +@@ -1018,7 +1021,7 @@ static void cmd_del_port_meter_parsed(void *parsed_result, + cmdline_parse_inst_t cmd_del_port_meter = { + .f = cmd_del_port_meter_parsed, + .data = NULL, +- .help_str = "Delete port meter", ++ .help_str = "del port meter ", + .tokens = { + (void *)&cmd_del_port_meter_del, + (void *)&cmd_del_port_meter_port, +@@ -1092,7 +1095,7 @@ static void cmd_set_port_meter_profile_parsed(void *parsed_result, + cmdline_parse_inst_t cmd_set_port_meter_profile = { + .f = cmd_set_port_meter_profile_parsed, + .data = NULL, +- .help_str = "Set port meter profile", ++ .help_str = "set port meter profile ", + .tokens = { + (void *)&cmd_set_port_meter_profile_set, + (void *)&cmd_set_port_meter_profile_port, +@@ -1166,7 +1169,8 @@ static void cmd_set_port_meter_dscp_table_parsed(void *parsed_result, + cmdline_parse_inst_t cmd_set_port_meter_dscp_table = { + .f = cmd_set_port_meter_dscp_table_parsed, + .data = NULL, +- .help_str = "Update port meter dscp table", ++ .help_str = "set port meter dscp table " ++ "[ ... ]", + .tokens = { + (void *)&cmd_set_port_meter_dscp_table_set, + (void *)&cmd_set_port_meter_dscp_table_port, +@@ -1276,7 +1280,8 @@ static void cmd_set_port_meter_policer_action_parsed(void *parsed_result, + cmdline_parse_inst_t cmd_set_port_meter_policer_action = { + .f = cmd_set_port_meter_policer_action_parsed, + .data = NULL, +- .help_str = "Set port meter policer action", ++ .help_str = "set port meter policer action " ++ " [ ]", + .tokens = { + (void *)&cmd_set_port_meter_policer_action_set, + (void *)&cmd_set_port_meter_policer_action_port, +@@ -1355,7 +1360,7 @@ static void cmd_set_port_meter_stats_mask_parsed(void *parsed_result, + cmdline_parse_inst_t cmd_set_port_meter_stats_mask = { + .f = cmd_set_port_meter_stats_mask_parsed, + .data = NULL, +- .help_str = "Set port meter stats mask", ++ .help_str = "set port meter stats mask ", + .tokens = { + (void *)&cmd_set_port_meter_stats_mask_set, + (void *)&cmd_set_port_meter_stats_mask_port, +@@ -1459,7 +1464,7 @@ static void cmd_show_port_meter_stats_parsed(void *parsed_result, + cmdline_parse_inst_t cmd_show_port_meter_stats = { + .f = cmd_show_port_meter_stats_parsed, + .data = NULL, +- .help_str = "Show port meter stats", ++ .help_str = "show port meter stats (yes|no)", + .tokens = { + (void *)&cmd_show_port_meter_stats_show, + (void *)&cmd_show_port_meter_stats_port, +diff --git a/dpdk/app/test-pmd/config.c b/dpdk/app/test-pmd/config.c +index b51de59e1e..dab8afe5dd 100644 +--- a/dpdk/app/test-pmd/config.c ++++ b/dpdk/app/test-pmd/config.c +@@ -183,8 +183,6 @@ nic_stats_display(portid_t port_id) + diff_ns; + uint64_t mpps_rx, mpps_tx, mbps_rx, mbps_tx; + struct rte_eth_stats stats; +- struct rte_port *port = &ports[port_id]; +- uint8_t i; + + static const char *nic_stats_border = "########################"; + +@@ -196,46 +194,12 @@ nic_stats_display(portid_t port_id) + printf("\n %s NIC statistics for port %-2d %s\n", + nic_stats_border, port_id, nic_stats_border); + +- if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) { +- printf(" RX-packets: %-10"PRIu64" RX-missed: %-10"PRIu64" RX-bytes: " +- "%-"PRIu64"\n", +- stats.ipackets, stats.imissed, stats.ibytes); +- printf(" RX-errors: %-"PRIu64"\n", stats.ierrors); +- printf(" RX-nombuf: %-10"PRIu64"\n", +- stats.rx_nombuf); +- printf(" TX-packets: %-10"PRIu64" TX-errors: %-10"PRIu64" TX-bytes: " +- "%-"PRIu64"\n", +- stats.opackets, stats.oerrors, stats.obytes); +- } +- else { +- printf(" RX-packets: %10"PRIu64" RX-errors: %10"PRIu64 +- " RX-bytes: %10"PRIu64"\n", +- stats.ipackets, stats.ierrors, stats.ibytes); +- printf(" RX-errors: %10"PRIu64"\n", stats.ierrors); +- printf(" RX-nombuf: %10"PRIu64"\n", +- stats.rx_nombuf); +- printf(" TX-packets: %10"PRIu64" TX-errors: %10"PRIu64 +- " TX-bytes: %10"PRIu64"\n", +- stats.opackets, stats.oerrors, stats.obytes); +- } +- +- if (port->rx_queue_stats_mapping_enabled) { +- printf("\n"); +- for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) { +- printf(" Stats reg %2d RX-packets: %10"PRIu64 +- " RX-errors: %10"PRIu64 +- " RX-bytes: %10"PRIu64"\n", +- i, stats.q_ipackets[i], stats.q_errors[i], stats.q_ibytes[i]); +- } +- } +- if (port->tx_queue_stats_mapping_enabled) { +- printf("\n"); +- for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) { +- printf(" Stats reg %2d TX-packets: %10"PRIu64 +- " TX-bytes: %10"PRIu64"\n", +- i, stats.q_opackets[i], stats.q_obytes[i]); +- } +- } ++ printf(" RX-packets: %-10"PRIu64" RX-missed: %-10"PRIu64" RX-bytes: " ++ "%-"PRIu64"\n", stats.ipackets, stats.imissed, stats.ibytes); ++ printf(" RX-errors: %-"PRIu64"\n", stats.ierrors); ++ printf(" RX-nombuf: %-10"PRIu64"\n", stats.rx_nombuf); ++ printf(" TX-packets: %-10"PRIu64" TX-errors: %-10"PRIu64" TX-bytes: " ++ "%-"PRIu64"\n", stats.opackets, stats.oerrors, stats.obytes); + + diff_ns = 0; + if (clock_gettime(CLOCK_TYPE_ID, &cur_time) == 0) { +@@ -398,54 +362,6 @@ nic_xstats_clear(portid_t port_id) + } + } + +-void +-nic_stats_mapping_display(portid_t port_id) +-{ +- struct rte_port *port = &ports[port_id]; +- uint16_t i; +- +- static const char *nic_stats_mapping_border = "########################"; +- +- if (port_id_is_invalid(port_id, ENABLED_WARN)) { +- print_valid_ports(); +- return; +- } +- +- if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) { +- printf("Port id %d - either does not support queue statistic mapping or" +- " no queue statistic mapping set\n", port_id); +- return; +- } +- +- printf("\n %s NIC statistics mapping for port %-2d %s\n", +- nic_stats_mapping_border, port_id, nic_stats_mapping_border); +- +- if (port->rx_queue_stats_mapping_enabled) { +- for (i = 0; i < nb_rx_queue_stats_mappings; i++) { +- if (rx_queue_stats_mappings[i].port_id == port_id) { +- printf(" RX-queue %2d mapped to Stats Reg %2d\n", +- rx_queue_stats_mappings[i].queue_id, +- rx_queue_stats_mappings[i].stats_counter_id); +- } +- } +- printf("\n"); +- } +- +- +- if (port->tx_queue_stats_mapping_enabled) { +- for (i = 0; i < nb_tx_queue_stats_mappings; i++) { +- if (tx_queue_stats_mappings[i].port_id == port_id) { +- printf(" TX-queue %2d mapped to Stats Reg %2d\n", +- tx_queue_stats_mappings[i].queue_id, +- tx_queue_stats_mappings[i].stats_counter_id); +- } +- } +- } +- +- printf(" %s####################################%s\n", +- nic_stats_mapping_border, nic_stats_mapping_border); +-} +- + void + rx_queue_infos_display(portid_t port_id, uint16_t queue_id) + { +@@ -1518,7 +1434,7 @@ port_mtu_set(portid_t port_id, uint16_t mtu) + * device supports jumbo frame. + */ + eth_overhead = dev_info.max_rx_pktlen - dev_info.max_mtu; +- if (mtu > RTE_ETHER_MAX_LEN - eth_overhead) { ++ if (mtu > RTE_ETHER_MTU) { + rte_port->dev_conf.rxmode.offloads |= + DEV_RX_OFFLOAD_JUMBO_FRAME; + rte_port->dev_conf.rxmode.max_rx_pkt_len = +@@ -1963,6 +1879,7 @@ port_shared_action_query(portid_t port_id, uint32_t id) + return -EINVAL; + switch (psa->type) { + case RTE_FLOW_ACTION_TYPE_RSS: ++ case RTE_FLOW_ACTION_TYPE_AGE: + data = &default_data; + break; + default: +@@ -1979,6 +1896,20 @@ port_shared_action_query(portid_t port_id, uint32_t id) + *((uint32_t *)data)); + data = NULL; + break; ++ case RTE_FLOW_ACTION_TYPE_AGE: ++ if (!ret) { ++ struct rte_flow_query_age *resp = data; ++ ++ printf("AGE:\n" ++ " aged: %u\n" ++ " sec_since_last_hit_valid: %u\n" ++ " sec_since_last_hit: %" PRIu32 "\n", ++ resp->aged, ++ resp->sec_since_last_hit_valid, ++ resp->sec_since_last_hit); ++ } ++ data = NULL; ++ break; + default: + printf("Shared action %u (type: %d) on port %u doesn't support" + " query\n", id, psa->type, port_id); +@@ -1986,6 +1917,7 @@ port_shared_action_query(portid_t port_id, uint32_t id) + } + return ret; + } ++ + static struct port_flow_tunnel * + port_flow_tunnel_offload_cmd_prep(portid_t port_id, + const struct rte_flow_item *pattern, +@@ -2573,7 +2505,7 @@ tx_queue_id_is_invalid(queueid_t txq_id) + { + if (txq_id < nb_txq) + return 0; +- printf("Invalid TX queue %d (must be < nb_rxq=%d)\n", txq_id, nb_txq); ++ printf("Invalid TX queue %d (must be < nb_txq=%d)\n", txq_id, nb_txq); + return 1; + } + +@@ -3785,7 +3717,7 @@ show_fec_capability(unsigned int num, struct rte_eth_fec_capa *speed_fec_capa) + printf("%s : ", + rte_eth_link_speed_to_str(speed_fec_capa[i].speed)); + +- for (j = RTE_ETH_FEC_AUTO; j < RTE_DIM(fec_mode_name); j++) { ++ for (j = 0; j < RTE_DIM(fec_mode_name); j++) { + if (RTE_ETH_FEC_MODE_TO_CAPA(j) & + speed_fec_capa[i].capa) + printf("%s ", fec_mode_name[j].name); +@@ -4528,8 +4460,7 @@ tx_vlan_pvid_set(portid_t port_id, uint16_t vlan_id, int on) + void + set_qmap(portid_t port_id, uint8_t is_rx, uint16_t queue_id, uint8_t map_value) + { +- uint16_t i; +- uint8_t existing_mapping_found = 0; ++ int ret; + + if (port_id_is_invalid(port_id, ENABLED_WARN)) + return; +@@ -4539,40 +4470,23 @@ set_qmap(portid_t port_id, uint8_t is_rx, uint16_t queue_id, uint8_t map_value) + + if (map_value >= RTE_ETHDEV_QUEUE_STAT_CNTRS) { + printf("map_value not in required range 0..%d\n", +- RTE_ETHDEV_QUEUE_STAT_CNTRS - 1); ++ RTE_ETHDEV_QUEUE_STAT_CNTRS - 1); + return; + } + +- if (!is_rx) { /*then tx*/ +- for (i = 0; i < nb_tx_queue_stats_mappings; i++) { +- if ((tx_queue_stats_mappings[i].port_id == port_id) && +- (tx_queue_stats_mappings[i].queue_id == queue_id)) { +- tx_queue_stats_mappings[i].stats_counter_id = map_value; +- existing_mapping_found = 1; +- break; +- } +- } +- if (!existing_mapping_found) { /* A new additional mapping... */ +- tx_queue_stats_mappings[nb_tx_queue_stats_mappings].port_id = port_id; +- tx_queue_stats_mappings[nb_tx_queue_stats_mappings].queue_id = queue_id; +- tx_queue_stats_mappings[nb_tx_queue_stats_mappings].stats_counter_id = map_value; +- nb_tx_queue_stats_mappings++; +- } +- } +- else { /*rx*/ +- for (i = 0; i < nb_rx_queue_stats_mappings; i++) { +- if ((rx_queue_stats_mappings[i].port_id == port_id) && +- (rx_queue_stats_mappings[i].queue_id == queue_id)) { +- rx_queue_stats_mappings[i].stats_counter_id = map_value; +- existing_mapping_found = 1; +- break; +- } ++ if (!is_rx) { /* tx */ ++ ret = rte_eth_dev_set_tx_queue_stats_mapping(port_id, queue_id, ++ map_value); ++ if (ret) { ++ printf("failed to set tx queue stats mapping.\n"); ++ return; + } +- if (!existing_mapping_found) { /* A new additional mapping... */ +- rx_queue_stats_mappings[nb_rx_queue_stats_mappings].port_id = port_id; +- rx_queue_stats_mappings[nb_rx_queue_stats_mappings].queue_id = queue_id; +- rx_queue_stats_mappings[nb_rx_queue_stats_mappings].stats_counter_id = map_value; +- nb_rx_queue_stats_mappings++; ++ } else { /* rx */ ++ ret = rte_eth_dev_set_rx_queue_stats_mapping(port_id, queue_id, ++ map_value); ++ if (ret) { ++ printf("failed to set rx queue stats mapping.\n"); ++ return; + } + } + } +diff --git a/dpdk/app/test-pmd/flowgen.c b/dpdk/app/test-pmd/flowgen.c +index acf3e24605..cabfc688ff 100644 +--- a/dpdk/app/test-pmd/flowgen.c ++++ b/dpdk/app/test-pmd/flowgen.c +@@ -53,8 +53,11 @@ static struct rte_ether_addr cfg_ether_dst = + + #define IP_DEFTTL 64 /* from RFC 1340. */ + ++/* Use this type to inform GCC that ip_sum violates aliasing rules. */ ++typedef unaligned_uint16_t alias_int16_t __attribute__((__may_alias__)); ++ + static inline uint16_t +-ip_sum(const unaligned_uint16_t *hdr, int hdr_len) ++ip_sum(const alias_int16_t *hdr, int hdr_len) + { + uint32_t sum = 0; + +@@ -150,7 +153,7 @@ pkt_burst_flow_gen(struct fwd_stream *fs) + next_flow); + ip_hdr->total_length = RTE_CPU_TO_BE_16(pkt_size - + sizeof(*eth_hdr)); +- ip_hdr->hdr_checksum = ip_sum((unaligned_uint16_t *)ip_hdr, ++ ip_hdr->hdr_checksum = ip_sum((const alias_int16_t *)ip_hdr, + sizeof(*ip_hdr)); + + /* Initialize UDP header. */ +diff --git a/dpdk/app/test-pmd/parameters.c b/dpdk/app/test-pmd/parameters.c +index bbb68a55ff..df5eb10d84 100644 +--- a/dpdk/app/test-pmd/parameters.c ++++ b/dpdk/app/test-pmd/parameters.c +@@ -176,12 +176,6 @@ usage(char* progname) + "(0 <= N <= value of txd).\n"); + printf(" --txrst=N: set the transmit RS bit threshold of TX rings to N " + "(0 <= N <= value of txd).\n"); +- printf(" --tx-queue-stats-mapping=(port,queue,mapping)[,(port,queue,mapping]: " +- "tx queues statistics counters mapping " +- "(0 <= mapping <= %d).\n", RTE_ETHDEV_QUEUE_STAT_CNTRS - 1); +- printf(" --rx-queue-stats-mapping=(port,queue,mapping)[,(port,queue,mapping]: " +- "rx queues statistics counters mapping " +- "(0 <= mapping <= %d).\n", RTE_ETHDEV_QUEUE_STAT_CNTRS - 1); + printf(" --no-flush-rx: Don't flush RX streams before forwarding." + " Used mainly with PCAP drivers.\n"); + printf(" --rxoffs=X[,Y]*: set RX segment offsets for split.\n"); +@@ -300,93 +294,6 @@ parse_fwd_portmask(const char *portmask) + set_fwd_ports_mask((uint64_t) pm); + } + +- +-static int +-parse_queue_stats_mapping_config(const char *q_arg, int is_rx) +-{ +- char s[256]; +- const char *p, *p0 = q_arg; +- char *end; +- enum fieldnames { +- FLD_PORT = 0, +- FLD_QUEUE, +- FLD_STATS_COUNTER, +- _NUM_FLD +- }; +- unsigned long int_fld[_NUM_FLD]; +- char *str_fld[_NUM_FLD]; +- int i; +- unsigned size; +- +- /* reset from value set at definition */ +- is_rx ? (nb_rx_queue_stats_mappings = 0) : (nb_tx_queue_stats_mappings = 0); +- +- while ((p = strchr(p0,'(')) != NULL) { +- ++p; +- if((p0 = strchr(p,')')) == NULL) +- return -1; +- +- size = p0 - p; +- if(size >= sizeof(s)) +- return -1; +- +- snprintf(s, sizeof(s), "%.*s", size, p); +- if (rte_strsplit(s, sizeof(s), str_fld, _NUM_FLD, ',') != _NUM_FLD) +- return -1; +- for (i = 0; i < _NUM_FLD; i++){ +- errno = 0; +- int_fld[i] = strtoul(str_fld[i], &end, 0); +- if (errno != 0 || end == str_fld[i] || int_fld[i] > 255) +- return -1; +- } +- /* Check mapping field is in correct range (0..RTE_ETHDEV_QUEUE_STAT_CNTRS-1) */ +- if (int_fld[FLD_STATS_COUNTER] >= RTE_ETHDEV_QUEUE_STAT_CNTRS) { +- printf("Stats counter not in the correct range 0..%d\n", +- RTE_ETHDEV_QUEUE_STAT_CNTRS - 1); +- return -1; +- } +- +- if (!is_rx) { +- if ((nb_tx_queue_stats_mappings >= +- MAX_TX_QUEUE_STATS_MAPPINGS)) { +- printf("exceeded max number of TX queue " +- "statistics mappings: %hu\n", +- nb_tx_queue_stats_mappings); +- return -1; +- } +- tx_queue_stats_mappings_array[nb_tx_queue_stats_mappings].port_id = +- (uint8_t)int_fld[FLD_PORT]; +- tx_queue_stats_mappings_array[nb_tx_queue_stats_mappings].queue_id = +- (uint8_t)int_fld[FLD_QUEUE]; +- tx_queue_stats_mappings_array[nb_tx_queue_stats_mappings].stats_counter_id = +- (uint8_t)int_fld[FLD_STATS_COUNTER]; +- ++nb_tx_queue_stats_mappings; +- } +- else { +- if ((nb_rx_queue_stats_mappings >= +- MAX_RX_QUEUE_STATS_MAPPINGS)) { +- printf("exceeded max number of RX queue " +- "statistics mappings: %hu\n", +- nb_rx_queue_stats_mappings); +- return -1; +- } +- rx_queue_stats_mappings_array[nb_rx_queue_stats_mappings].port_id = +- (uint8_t)int_fld[FLD_PORT]; +- rx_queue_stats_mappings_array[nb_rx_queue_stats_mappings].queue_id = +- (uint8_t)int_fld[FLD_QUEUE]; +- rx_queue_stats_mappings_array[nb_rx_queue_stats_mappings].stats_counter_id = +- (uint8_t)int_fld[FLD_STATS_COUNTER]; +- ++nb_rx_queue_stats_mappings; +- } +- +- } +-/* Reassign the rx/tx_queue_stats_mappings pointer to point to this newly populated array rather */ +-/* than to the default array (that was set at its definition) */ +- is_rx ? (rx_queue_stats_mappings = rx_queue_stats_mappings_array) : +- (tx_queue_stats_mappings = tx_queue_stats_mappings_array); +- return 0; +-} +- + static void + print_invalid_socket_id_error(void) + { +@@ -664,8 +571,6 @@ launch_args_parse(int argc, char** argv) + { "rxht", 1, 0, 0 }, + { "rxwt", 1, 0, 0 }, + { "rxfreet", 1, 0, 0 }, +- { "tx-queue-stats-mapping", 1, 0, 0 }, +- { "rx-queue-stats-mapping", 1, 0, 0 }, + { "no-flush-rx", 0, 0, 0 }, + { "flow-isolate-all", 0, 0, 0 }, + { "rxoffs", 1, 0, 0 }, +@@ -929,12 +834,9 @@ launch_args_parse(int argc, char** argv) + } + if (!strcmp(lgopts[opt_idx].name, "max-pkt-len")) { + n = atoi(optarg); +- if (n >= RTE_ETHER_MIN_LEN) { ++ if (n >= RTE_ETHER_MIN_LEN) + rx_mode.max_rx_pkt_len = (uint32_t) n; +- if (n > RTE_ETHER_MAX_LEN) +- rx_offloads |= +- DEV_RX_OFFLOAD_JUMBO_FRAME; +- } else ++ else + rte_exit(EXIT_FAILURE, + "Invalid max-pkt-len=%d - should be > %d\n", + n, RTE_ETHER_MIN_LEN); +@@ -1279,18 +1181,6 @@ launch_args_parse(int argc, char** argv) + else + rte_exit(EXIT_FAILURE, "rxfreet must be >= 0\n"); + } +- if (!strcmp(lgopts[opt_idx].name, "tx-queue-stats-mapping")) { +- if (parse_queue_stats_mapping_config(optarg, TX)) { +- rte_exit(EXIT_FAILURE, +- "invalid TX queue statistics mapping config entered\n"); +- } +- } +- if (!strcmp(lgopts[opt_idx].name, "rx-queue-stats-mapping")) { +- if (parse_queue_stats_mapping_config(optarg, RX)) { +- rte_exit(EXIT_FAILURE, +- "invalid RX queue statistics mapping config entered\n"); +- } +- } + if (!strcmp(lgopts[opt_idx].name, "rxoffs")) { + unsigned int seg_off[MAX_SEGS_BUFFER_SPLIT]; + unsigned int nb_offs; +diff --git a/dpdk/app/test-pmd/testpmd.c b/dpdk/app/test-pmd/testpmd.c +index 33fc0fddf5..555852ae5e 100644 +--- a/dpdk/app/test-pmd/testpmd.c ++++ b/dpdk/app/test-pmd/testpmd.c +@@ -443,8 +443,11 @@ lcoreid_t latencystats_lcore_id = -1; + * Ethernet device configuration. + */ + struct rte_eth_rxmode rx_mode = { +- .max_rx_pkt_len = RTE_ETHER_MAX_LEN, +- /**< Default maximum frame length. */ ++ /* Default maximum frame length. ++ * Zero is converted to "RTE_ETHER_MTU + PMD Ethernet overhead" ++ * in init_config(). ++ */ ++ .max_rx_pkt_len = 0, + }; + + struct rte_eth_txmode tx_mode = { +@@ -476,15 +479,6 @@ struct rte_fdir_conf fdir_conf = { + + volatile int test_done = 1; /* stop packet forwarding when set to 1. */ + +-struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS]; +-struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS]; +- +-struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array; +-struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array; +- +-uint16_t nb_tx_queue_stats_mappings = 0; +-uint16_t nb_rx_queue_stats_mappings = 0; +- + /* + * Display zero values by default for xstats + */ +@@ -520,8 +514,6 @@ enum rte_eth_rx_mq_mode rx_mq_mode = ETH_MQ_RX_VMDQ_DCB_RSS; + + /* Forward function declarations */ + static void setup_attached_port(portid_t pi); +-static void map_port_queue_stats_mapping_registers(portid_t pi, +- struct rte_port *port); + static void check_all_ports_link_status(uint32_t port_mask); + static int eth_event_callback(portid_t port_id, + enum rte_eth_event_type type, +@@ -1457,6 +1449,11 @@ init_config(void) + rte_exit(EXIT_FAILURE, + "rte_eth_dev_info_get() failed\n"); + ++ ret = update_jumbo_frame_offload(pid); ++ if (ret != 0) ++ printf("Updating jumbo frame offload failed for port %u\n", ++ pid); ++ + if (!(port->dev_info.tx_offload_capa & + DEV_TX_OFFLOAD_MBUF_FAST_FREE)) + port->dev_conf.txmode.offloads &= +@@ -1857,8 +1854,6 @@ fwd_stats_display(void) + fwd_cycles += fs->core_cycles; + } + for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { +- uint8_t j; +- + pt_id = fwd_ports_ids[i]; + port = &ports[pt_id]; + +@@ -1881,88 +1876,34 @@ fwd_stats_display(void) + printf("\n %s Forward statistics for port %-2d %s\n", + fwd_stats_border, pt_id, fwd_stats_border); + +- if (!port->rx_queue_stats_mapping_enabled && +- !port->tx_queue_stats_mapping_enabled) { +- printf(" RX-packets: %-14"PRIu64 +- " RX-dropped: %-14"PRIu64 +- "RX-total: %-"PRIu64"\n", +- stats.ipackets, stats.imissed, +- stats.ipackets + stats.imissed); +- +- if (cur_fwd_eng == &csum_fwd_engine) +- printf(" Bad-ipcsum: %-14"PRIu64 +- " Bad-l4csum: %-14"PRIu64 +- "Bad-outer-l4csum: %-14"PRIu64"\n", +- ports_stats[pt_id].rx_bad_ip_csum, +- ports_stats[pt_id].rx_bad_l4_csum, +- ports_stats[pt_id].rx_bad_outer_l4_csum); +- if (stats.ierrors + stats.rx_nombuf > 0) { +- printf(" RX-error: %-"PRIu64"\n", +- stats.ierrors); +- printf(" RX-nombufs: %-14"PRIu64"\n", +- stats.rx_nombuf); +- } +- +- printf(" TX-packets: %-14"PRIu64 +- " TX-dropped: %-14"PRIu64 +- "TX-total: %-"PRIu64"\n", +- stats.opackets, ports_stats[pt_id].tx_dropped, +- stats.opackets + ports_stats[pt_id].tx_dropped); +- } else { +- printf(" RX-packets: %14"PRIu64 +- " RX-dropped:%14"PRIu64 +- " RX-total:%14"PRIu64"\n", +- stats.ipackets, stats.imissed, +- stats.ipackets + stats.imissed); +- +- if (cur_fwd_eng == &csum_fwd_engine) +- printf(" Bad-ipcsum:%14"PRIu64 +- " Bad-l4csum:%14"PRIu64 +- " Bad-outer-l4csum: %-14"PRIu64"\n", +- ports_stats[pt_id].rx_bad_ip_csum, +- ports_stats[pt_id].rx_bad_l4_csum, +- ports_stats[pt_id].rx_bad_outer_l4_csum); +- if ((stats.ierrors + stats.rx_nombuf) > 0) { +- printf(" RX-error:%"PRIu64"\n", stats.ierrors); +- printf(" RX-nombufs: %14"PRIu64"\n", +- stats.rx_nombuf); +- } ++ printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64 ++ "RX-total: %-"PRIu64"\n", stats.ipackets, stats.imissed, ++ stats.ipackets + stats.imissed); + +- printf(" TX-packets: %14"PRIu64 +- " TX-dropped:%14"PRIu64 +- " TX-total:%14"PRIu64"\n", +- stats.opackets, ports_stats[pt_id].tx_dropped, +- stats.opackets + ports_stats[pt_id].tx_dropped); ++ if (cur_fwd_eng == &csum_fwd_engine) ++ printf(" Bad-ipcsum: %-14"PRIu64 ++ " Bad-l4csum: %-14"PRIu64 ++ "Bad-outer-l4csum: %-14"PRIu64"\n", ++ ports_stats[pt_id].rx_bad_ip_csum, ++ ports_stats[pt_id].rx_bad_l4_csum, ++ ports_stats[pt_id].rx_bad_outer_l4_csum); ++ if (stats.ierrors + stats.rx_nombuf > 0) { ++ printf(" RX-error: %-"PRIu64"\n", stats.ierrors); ++ printf(" RX-nombufs: %-14"PRIu64"\n", stats.rx_nombuf); + } + ++ printf(" TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64 ++ "TX-total: %-"PRIu64"\n", ++ stats.opackets, ports_stats[pt_id].tx_dropped, ++ stats.opackets + ports_stats[pt_id].tx_dropped); ++ + if (record_burst_stats) { + if (ports_stats[pt_id].rx_stream) + pkt_burst_stats_display("RX", + &ports_stats[pt_id].rx_stream->rx_burst_stats); + if (ports_stats[pt_id].tx_stream) + pkt_burst_stats_display("TX", +- &ports_stats[pt_id].tx_stream->tx_burst_stats); +- } +- +- if (port->rx_queue_stats_mapping_enabled) { +- printf("\n"); +- for (j = 0; j < RTE_ETHDEV_QUEUE_STAT_CNTRS; j++) { +- printf(" Stats reg %2d RX-packets:%14"PRIu64 +- " RX-errors:%14"PRIu64 +- " RX-bytes:%14"PRIu64"\n", +- j, stats.q_ipackets[j], +- stats.q_errors[j], stats.q_ibytes[j]); +- } +- printf("\n"); +- } +- if (port->tx_queue_stats_mapping_enabled) { +- for (j = 0; j < RTE_ETHDEV_QUEUE_STAT_CNTRS; j++) { +- printf(" Stats reg %2d TX-packets:%14"PRIu64 +- " TX-bytes:%14" +- PRIu64"\n", +- j, stats.q_opackets[j], +- stats.q_obytes[j]); +- } ++ &ports_stats[pt_id].tx_stream->tx_burst_stats); + } + + printf(" %s--------------------------------%s\n", +@@ -2236,11 +2177,6 @@ start_packet_forwarding(int with_tx_first) + rxtx_config_display(); + + fwd_stats_reset(); +- for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { +- pt_id = fwd_ports_ids[i]; +- port = &ports[pt_id]; +- map_port_queue_stats_mapping_registers(pt_id, port); +- } + if (with_tx_first) { + port_fwd_begin = tx_only_engine.port_fwd_begin; + if (port_fwd_begin != NULL) { +@@ -2806,6 +2742,9 @@ stop_port(portid_t pid) + } + } + ++ if (port->flow_list) ++ port_flow_flush(pi); ++ + if (rte_eth_dev_stop(pi) != 0) + RTE_LOG(ERR, EAL, "rte_eth_dev_stop failed for port %u\n", + pi); +@@ -3352,84 +3291,6 @@ dev_event_callback(const char *device_name, enum rte_dev_event_type type, + } + } + +-static int +-set_tx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port) +-{ +- uint16_t i; +- int diag; +- uint8_t mapping_found = 0; +- +- for (i = 0; i < nb_tx_queue_stats_mappings; i++) { +- if ((tx_queue_stats_mappings[i].port_id == port_id) && +- (tx_queue_stats_mappings[i].queue_id < nb_txq )) { +- diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id, +- tx_queue_stats_mappings[i].queue_id, +- tx_queue_stats_mappings[i].stats_counter_id); +- if (diag != 0) +- return diag; +- mapping_found = 1; +- } +- } +- if (mapping_found) +- port->tx_queue_stats_mapping_enabled = 1; +- return 0; +-} +- +-static int +-set_rx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port) +-{ +- uint16_t i; +- int diag; +- uint8_t mapping_found = 0; +- +- for (i = 0; i < nb_rx_queue_stats_mappings; i++) { +- if ((rx_queue_stats_mappings[i].port_id == port_id) && +- (rx_queue_stats_mappings[i].queue_id < nb_rxq )) { +- diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id, +- rx_queue_stats_mappings[i].queue_id, +- rx_queue_stats_mappings[i].stats_counter_id); +- if (diag != 0) +- return diag; +- mapping_found = 1; +- } +- } +- if (mapping_found) +- port->rx_queue_stats_mapping_enabled = 1; +- return 0; +-} +- +-static void +-map_port_queue_stats_mapping_registers(portid_t pi, struct rte_port *port) +-{ +- int diag = 0; +- +- diag = set_tx_queue_stats_mapping_registers(pi, port); +- if (diag != 0) { +- if (diag == -ENOTSUP) { +- port->tx_queue_stats_mapping_enabled = 0; +- printf("TX queue stats mapping not supported port id=%d\n", pi); +- } +- else +- rte_exit(EXIT_FAILURE, +- "set_tx_queue_stats_mapping_registers " +- "failed for port id=%d diag=%d\n", +- pi, diag); +- } +- +- diag = set_rx_queue_stats_mapping_registers(pi, port); +- if (diag != 0) { +- if (diag == -ENOTSUP) { +- port->rx_queue_stats_mapping_enabled = 0; +- printf("RX queue stats mapping not supported port id=%d\n", pi); +- } +- else +- rte_exit(EXIT_FAILURE, +- "set_rx_queue_stats_mapping_registers " +- "failed for port id=%d diag=%d\n", +- pi, diag); +- } +-} +- + static void + rxtx_port_config(struct rte_port *port) + { +@@ -3487,6 +3348,80 @@ rxtx_port_config(struct rte_port *port) + } + } + ++/* ++ * Helper function to arrange max_rx_pktlen value and JUMBO_FRAME offload, ++ * MTU is also aligned if JUMBO_FRAME offload is not set. ++ * ++ * port->dev_info should be set before calling this function. ++ * ++ * return 0 on success, negative on error ++ */ ++int ++update_jumbo_frame_offload(portid_t portid) ++{ ++ struct rte_port *port = &ports[portid]; ++ uint32_t eth_overhead; ++ uint64_t rx_offloads; ++ int ret; ++ bool on; ++ ++ /* Update the max_rx_pkt_len to have MTU as RTE_ETHER_MTU */ ++ if (port->dev_info.max_mtu != UINT16_MAX && ++ port->dev_info.max_rx_pktlen > port->dev_info.max_mtu) ++ eth_overhead = port->dev_info.max_rx_pktlen - ++ port->dev_info.max_mtu; ++ else ++ eth_overhead = RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN; ++ ++ rx_offloads = port->dev_conf.rxmode.offloads; ++ ++ /* Default config value is 0 to use PMD specific overhead */ ++ if (port->dev_conf.rxmode.max_rx_pkt_len == 0) ++ port->dev_conf.rxmode.max_rx_pkt_len = RTE_ETHER_MTU + eth_overhead; ++ ++ if (port->dev_conf.rxmode.max_rx_pkt_len <= RTE_ETHER_MTU + eth_overhead) { ++ rx_offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME; ++ on = false; ++ } else { ++ if ((port->dev_info.rx_offload_capa & DEV_RX_OFFLOAD_JUMBO_FRAME) == 0) { ++ printf("Frame size (%u) is not supported by port %u\n", ++ port->dev_conf.rxmode.max_rx_pkt_len, ++ portid); ++ return -1; ++ } ++ rx_offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME; ++ on = true; ++ } ++ ++ if (rx_offloads != port->dev_conf.rxmode.offloads) { ++ uint16_t qid; ++ ++ port->dev_conf.rxmode.offloads = rx_offloads; ++ ++ /* Apply JUMBO_FRAME offload configuration to Rx queue(s) */ ++ for (qid = 0; qid < port->dev_info.nb_rx_queues; qid++) { ++ if (on) ++ port->rx_conf[qid].offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME; ++ else ++ port->rx_conf[qid].offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME; ++ } ++ } ++ ++ /* If JUMBO_FRAME is set MTU conversion done by ethdev layer, ++ * if unset do it here ++ */ ++ if ((rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) == 0) { ++ ret = rte_eth_dev_set_mtu(portid, ++ port->dev_conf.rxmode.max_rx_pkt_len - eth_overhead); ++ if (ret) ++ printf("Failed to set MTU to %u for port %u\n", ++ port->dev_conf.rxmode.max_rx_pkt_len - eth_overhead, ++ portid); ++ } ++ ++ return 0; ++} ++ + void + init_port_config(void) + { +@@ -3526,7 +3461,6 @@ init_port_config(void) + if (ret != 0) + return; + +- map_port_queue_stats_mapping_registers(pid, port); + #if defined RTE_NET_IXGBE && defined RTE_LIBRTE_IXGBE_BYPASS + rte_pmd_ixgbe_bypass_init(pid); + #endif +@@ -3737,8 +3671,6 @@ init_port_dcb_config(portid_t pid, + if (retval != 0) + return retval; + +- map_port_queue_stats_mapping_registers(pid, rte_port); +- + rte_port->dcb_flag = 1; + + return 0; +diff --git a/dpdk/app/test-pmd/testpmd.h b/dpdk/app/test-pmd/testpmd.h +index 6b901a894f..2f8f5a92e4 100644 +--- a/dpdk/app/test-pmd/testpmd.h ++++ b/dpdk/app/test-pmd/testpmd.h +@@ -206,8 +206,6 @@ struct rte_port { + uint16_t tunnel_tso_segsz; /**< Segmentation offload MSS for tunneled pkts. */ + uint16_t tx_vlan_id;/**< The tag ID */ + uint16_t tx_vlan_id_outer;/**< The outer tag ID */ +- uint8_t tx_queue_stats_mapping_enabled; +- uint8_t rx_queue_stats_mapping_enabled; + volatile uint16_t port_status; /**< port started or not */ + uint8_t need_setup; /**< port just attached */ + uint8_t need_reconfig; /**< need reconfiguring port or not */ +@@ -326,25 +324,6 @@ enum dcb_mode_enable + DCB_ENABLED + }; + +-#define MAX_TX_QUEUE_STATS_MAPPINGS 1024 /* MAX_PORT of 32 @ 32 tx_queues/port */ +-#define MAX_RX_QUEUE_STATS_MAPPINGS 4096 /* MAX_PORT of 32 @ 128 rx_queues/port */ +- +-struct queue_stats_mappings { +- portid_t port_id; +- uint16_t queue_id; +- uint8_t stats_counter_id; +-} __rte_cache_aligned; +- +-extern struct queue_stats_mappings tx_queue_stats_mappings_array[]; +-extern struct queue_stats_mappings rx_queue_stats_mappings_array[]; +- +-/* Assign both tx and rx queue stats mappings to the same default values */ +-extern struct queue_stats_mappings *tx_queue_stats_mappings; +-extern struct queue_stats_mappings *rx_queue_stats_mappings; +- +-extern uint16_t nb_tx_queue_stats_mappings; +-extern uint16_t nb_rx_queue_stats_mappings; +- + extern uint8_t xstats_hide_zero; /**< Hide zero values for xstats display */ + + /* globals used for configuration */ +@@ -790,7 +769,6 @@ void nic_stats_display(portid_t port_id); + void nic_stats_clear(portid_t port_id); + void nic_xstats_display(portid_t port_id); + void nic_xstats_clear(portid_t port_id); +-void nic_stats_mapping_display(portid_t port_id); + void device_infos_display(const char *identifier); + void port_infos_display(portid_t port_id); + void port_summary_display(portid_t port_id); +@@ -1027,6 +1005,7 @@ uint16_t tx_pkt_set_dynf(uint16_t port_id, __rte_unused uint16_t queue, + __rte_unused void *user_param); + void add_tx_dynf_callback(portid_t portid); + void remove_tx_dynf_callback(portid_t portid); ++int update_jumbo_frame_offload(portid_t portid); + + /* + * Work-around of a compilation error with ICC on invocations of the +diff --git a/dpdk/app/test-pmd/util.c b/dpdk/app/test-pmd/util.c +index 649bf8f53a..a9e431a8b2 100644 +--- a/dpdk/app/test-pmd/util.c ++++ b/dpdk/app/test-pmd/util.c +@@ -15,12 +15,23 @@ + + #include "testpmd.h" + ++#define MAX_STRING_LEN 8192 ++ ++#define MKDUMPSTR(buf, buf_size, cur_len, ...) \ ++do { \ ++ if (cur_len >= buf_size) \ ++ break; \ ++ cur_len += snprintf(buf + cur_len, buf_size - cur_len, __VA_ARGS__); \ ++} while (0) ++ + static inline void +-print_ether_addr(const char *what, const struct rte_ether_addr *eth_addr) ++print_ether_addr(const char *what, const struct rte_ether_addr *eth_addr, ++ char print_buf[], size_t buf_size, size_t *cur_len) + { + char buf[RTE_ETHER_ADDR_FMT_SIZE]; ++ + rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr); +- printf("%s%s", what, buf); ++ MKDUMPSTR(print_buf, buf_size, *cur_len, "%s%s", what, buf); + } + + static inline bool +@@ -74,13 +85,15 @@ dump_pkt_burst(uint16_t port_id, uint16_t queue, struct rte_mbuf *pkts[], + uint32_t vx_vni; + const char *reason; + int dynf_index; ++ char print_buf[MAX_STRING_LEN]; ++ size_t buf_size = MAX_STRING_LEN; ++ size_t cur_len = 0; + + if (!nb_pkts) + return; +- printf("port %u/queue %u: %s %u packets\n", +- port_id, queue, +- is_rx ? "received" : "sent", +- (unsigned int) nb_pkts); ++ MKDUMPSTR(print_buf, buf_size, cur_len, ++ "port %u/queue %u: %s %u packets\n", port_id, queue, ++ is_rx ? "received" : "sent", (unsigned int) nb_pkts); + for (i = 0; i < nb_pkts; i++) { + int ret; + struct rte_flow_error error; +@@ -93,95 +106,128 @@ dump_pkt_burst(uint16_t port_id, uint16_t queue, struct rte_mbuf *pkts[], + is_encapsulation = RTE_ETH_IS_TUNNEL_PKT(packet_type); + ret = rte_flow_get_restore_info(port_id, mb, &info, &error); + if (!ret) { +- printf("restore info:"); ++ MKDUMPSTR(print_buf, buf_size, cur_len, ++ "restore info:"); + if (info.flags & RTE_FLOW_RESTORE_INFO_TUNNEL) { + struct port_flow_tunnel *port_tunnel; + + port_tunnel = port_flow_locate_tunnel + (port_id, &info.tunnel); +- printf(" - tunnel"); ++ MKDUMPSTR(print_buf, buf_size, cur_len, ++ " - tunnel"); + if (port_tunnel) +- printf(" #%u", port_tunnel->id); ++ MKDUMPSTR(print_buf, buf_size, cur_len, ++ " #%u", port_tunnel->id); + else +- printf(" %s", "-none-"); +- printf(" type %s", +- port_flow_tunnel_type(&info.tunnel)); ++ MKDUMPSTR(print_buf, buf_size, cur_len, ++ " %s", "-none-"); ++ MKDUMPSTR(print_buf, buf_size, cur_len, ++ " type %s", port_flow_tunnel_type ++ (&info.tunnel)); + } else { +- printf(" - no tunnel info"); ++ MKDUMPSTR(print_buf, buf_size, cur_len, ++ " - no tunnel info"); + } + if (info.flags & RTE_FLOW_RESTORE_INFO_ENCAPSULATED) +- printf(" - outer header present"); ++ MKDUMPSTR(print_buf, buf_size, cur_len, ++ " - outer header present"); + else +- printf(" - no outer header"); ++ MKDUMPSTR(print_buf, buf_size, cur_len, ++ " - no outer header"); + if (info.flags & RTE_FLOW_RESTORE_INFO_GROUP_ID) +- printf(" - miss group %u", info.group_id); ++ MKDUMPSTR(print_buf, buf_size, cur_len, ++ " - miss group %u", info.group_id); + else +- printf(" - no miss group"); +- printf("\n"); ++ MKDUMPSTR(print_buf, buf_size, cur_len, ++ " - no miss group"); ++ MKDUMPSTR(print_buf, buf_size, cur_len, "\n"); + } +- print_ether_addr(" src=", ð_hdr->s_addr); +- print_ether_addr(" - dst=", ð_hdr->d_addr); +- printf(" - type=0x%04x - length=%u - nb_segs=%d", +- eth_type, (unsigned int) mb->pkt_len, +- (int)mb->nb_segs); ++ print_ether_addr(" src=", ð_hdr->s_addr, ++ print_buf, buf_size, &cur_len); ++ print_ether_addr(" - dst=", ð_hdr->d_addr, ++ print_buf, buf_size, &cur_len); ++ MKDUMPSTR(print_buf, buf_size, cur_len, ++ " - type=0x%04x - length=%u - nb_segs=%d", ++ eth_type, (unsigned int) mb->pkt_len, ++ (int)mb->nb_segs); + ol_flags = mb->ol_flags; + if (ol_flags & PKT_RX_RSS_HASH) { +- printf(" - RSS hash=0x%x", (unsigned int) mb->hash.rss); +- printf(" - RSS queue=0x%x", (unsigned int) queue); ++ MKDUMPSTR(print_buf, buf_size, cur_len, ++ " - RSS hash=0x%x", ++ (unsigned int) mb->hash.rss); ++ MKDUMPSTR(print_buf, buf_size, cur_len, ++ " - RSS queue=0x%x", (unsigned int) queue); + } + if (ol_flags & PKT_RX_FDIR) { +- printf(" - FDIR matched "); ++ MKDUMPSTR(print_buf, buf_size, cur_len, ++ " - FDIR matched "); + if (ol_flags & PKT_RX_FDIR_ID) +- printf("ID=0x%x", +- mb->hash.fdir.hi); ++ MKDUMPSTR(print_buf, buf_size, cur_len, ++ "ID=0x%x", mb->hash.fdir.hi); + else if (ol_flags & PKT_RX_FDIR_FLX) +- printf("flex bytes=0x%08x %08x", +- mb->hash.fdir.hi, mb->hash.fdir.lo); ++ MKDUMPSTR(print_buf, buf_size, cur_len, ++ "flex bytes=0x%08x %08x", ++ mb->hash.fdir.hi, mb->hash.fdir.lo); + else +- printf("hash=0x%x ID=0x%x ", +- mb->hash.fdir.hash, mb->hash.fdir.id); ++ MKDUMPSTR(print_buf, buf_size, cur_len, ++ "hash=0x%x ID=0x%x ", ++ mb->hash.fdir.hash, mb->hash.fdir.id); + } + if (is_timestamp_enabled(mb)) +- printf(" - timestamp %"PRIu64" ", get_timestamp(mb)); ++ MKDUMPSTR(print_buf, buf_size, cur_len, ++ " - timestamp %"PRIu64" ", get_timestamp(mb)); + if (ol_flags & PKT_RX_QINQ) +- printf(" - QinQ VLAN tci=0x%x, VLAN tci outer=0x%x", +- mb->vlan_tci, mb->vlan_tci_outer); ++ MKDUMPSTR(print_buf, buf_size, cur_len, ++ " - QinQ VLAN tci=0x%x, VLAN tci outer=0x%x", ++ mb->vlan_tci, mb->vlan_tci_outer); + else if (ol_flags & PKT_RX_VLAN) +- printf(" - VLAN tci=0x%x", mb->vlan_tci); ++ MKDUMPSTR(print_buf, buf_size, cur_len, ++ " - VLAN tci=0x%x", mb->vlan_tci); + if (!is_rx && (ol_flags & PKT_TX_DYNF_METADATA)) +- printf(" - Tx metadata: 0x%x", +- *RTE_FLOW_DYNF_METADATA(mb)); ++ MKDUMPSTR(print_buf, buf_size, cur_len, ++ " - Tx metadata: 0x%x", ++ *RTE_FLOW_DYNF_METADATA(mb)); + if (is_rx && (ol_flags & PKT_RX_DYNF_METADATA)) +- printf(" - Rx metadata: 0x%x", +- *RTE_FLOW_DYNF_METADATA(mb)); ++ MKDUMPSTR(print_buf, buf_size, cur_len, ++ " - Rx metadata: 0x%x", ++ *RTE_FLOW_DYNF_METADATA(mb)); + for (dynf_index = 0; dynf_index < 64; dynf_index++) { + if (dynf_names[dynf_index][0] != '\0') +- printf(" - dynf %s: %d", +- dynf_names[dynf_index], +- !!(ol_flags & (1UL << dynf_index))); ++ MKDUMPSTR(print_buf, buf_size, cur_len, ++ " - dynf %s: %d", ++ dynf_names[dynf_index], ++ !!(ol_flags & (1UL << dynf_index))); + } + if (mb->packet_type) { + rte_get_ptype_name(mb->packet_type, buf, sizeof(buf)); +- printf(" - hw ptype: %s", buf); ++ MKDUMPSTR(print_buf, buf_size, cur_len, ++ " - hw ptype: %s", buf); + } + sw_packet_type = rte_net_get_ptype(mb, &hdr_lens, + RTE_PTYPE_ALL_MASK); + rte_get_ptype_name(sw_packet_type, buf, sizeof(buf)); +- printf(" - sw ptype: %s", buf); ++ MKDUMPSTR(print_buf, buf_size, cur_len, " - sw ptype: %s", buf); + if (sw_packet_type & RTE_PTYPE_L2_MASK) +- printf(" - l2_len=%d", hdr_lens.l2_len); ++ MKDUMPSTR(print_buf, buf_size, cur_len, " - l2_len=%d", ++ hdr_lens.l2_len); + if (sw_packet_type & RTE_PTYPE_L3_MASK) +- printf(" - l3_len=%d", hdr_lens.l3_len); ++ MKDUMPSTR(print_buf, buf_size, cur_len, " - l3_len=%d", ++ hdr_lens.l3_len); + if (sw_packet_type & RTE_PTYPE_L4_MASK) +- printf(" - l4_len=%d", hdr_lens.l4_len); ++ MKDUMPSTR(print_buf, buf_size, cur_len, " - l4_len=%d", ++ hdr_lens.l4_len); + if (sw_packet_type & RTE_PTYPE_TUNNEL_MASK) +- printf(" - tunnel_len=%d", hdr_lens.tunnel_len); ++ MKDUMPSTR(print_buf, buf_size, cur_len, ++ " - tunnel_len=%d", hdr_lens.tunnel_len); + if (sw_packet_type & RTE_PTYPE_INNER_L2_MASK) +- printf(" - inner_l2_len=%d", hdr_lens.inner_l2_len); ++ MKDUMPSTR(print_buf, buf_size, cur_len, ++ " - inner_l2_len=%d", hdr_lens.inner_l2_len); + if (sw_packet_type & RTE_PTYPE_INNER_L3_MASK) +- printf(" - inner_l3_len=%d", hdr_lens.inner_l3_len); ++ MKDUMPSTR(print_buf, buf_size, cur_len, ++ " - inner_l3_len=%d", hdr_lens.inner_l3_len); + if (sw_packet_type & RTE_PTYPE_INNER_L4_MASK) +- printf(" - inner_l4_len=%d", hdr_lens.inner_l4_len); ++ MKDUMPSTR(print_buf, buf_size, cur_len, ++ " - inner_l4_len=%d", hdr_lens.inner_l4_len); + if (is_encapsulation) { + struct rte_ipv4_hdr *ipv4_hdr; + struct rte_ipv6_hdr *ipv6_hdr; +@@ -218,18 +264,27 @@ dump_pkt_burst(uint16_t port_id, uint16_t queue, struct rte_mbuf *pkts[], + l2_len + l3_len + l4_len); + udp_port = RTE_BE_TO_CPU_16(udp_hdr->dst_port); + vx_vni = rte_be_to_cpu_32(vxlan_hdr->vx_vni); +- printf(" - VXLAN packet: packet type =%d, " +- "Destination UDP port =%d, VNI = %d", +- packet_type, udp_port, vx_vni >> 8); ++ MKDUMPSTR(print_buf, buf_size, cur_len, ++ " - VXLAN packet: packet type =%d, " ++ "Destination UDP port =%d, VNI = %d", ++ packet_type, udp_port, vx_vni >> 8); + } + } +- printf(" - %s queue=0x%x", is_rx ? "Receive" : "Send", +- (unsigned int) queue); +- printf("\n"); ++ MKDUMPSTR(print_buf, buf_size, cur_len, ++ " - %s queue=0x%x", is_rx ? "Receive" : "Send", ++ (unsigned int) queue); ++ MKDUMPSTR(print_buf, buf_size, cur_len, "\n"); + rte_get_rx_ol_flag_list(mb->ol_flags, buf, sizeof(buf)); +- printf(" ol_flags: %s\n", buf); ++ MKDUMPSTR(print_buf, buf_size, cur_len, ++ " ol_flags: %s\n", buf); + if (rte_mbuf_check(mb, 1, &reason) < 0) +- printf("INVALID mbuf: %s\n", reason); ++ MKDUMPSTR(print_buf, buf_size, cur_len, ++ "INVALID mbuf: %s\n", reason); ++ if (cur_len >= buf_size) ++ printf("%s ...\n", print_buf); ++ else ++ printf("%s", print_buf); ++ cur_len = 0; + } + } + +diff --git a/dpdk/app/test/meson.build b/dpdk/app/test/meson.build +index 94fd39fecb..bdbc619476 100644 +--- a/dpdk/app/test/meson.build ++++ b/dpdk/app/test/meson.build +@@ -406,7 +406,7 @@ cflags += ['-DALLOW_INTERNAL_API'] + + test_dep_objs = [] + if dpdk_conf.has('RTE_LIB_COMPRESSDEV') +- compress_test_dep = dependency('zlib', required: false) ++ compress_test_dep = dependency('zlib', required: false, method: 'pkg-config') + if compress_test_dep.found() + test_dep_objs += compress_test_dep + test_sources += 'test_compressdev.c' +diff --git a/dpdk/app/test/test.c b/dpdk/app/test/test.c +index ba0b0309b5..624dd48042 100644 +--- a/dpdk/app/test/test.c ++++ b/dpdk/app/test/test.c +@@ -164,29 +164,38 @@ main(int argc, char **argv) + + + #ifdef RTE_LIB_CMDLINE +- cl = cmdline_stdin_new(main_ctx, "RTE>>"); +- if (cl == NULL) { +- ret = -1; +- goto out; +- } +- + char *dpdk_test = getenv("DPDK_TEST"); + if (dpdk_test && strlen(dpdk_test)) { + char buf[1024]; ++ ++ cl = cmdline_new(main_ctx, "RTE>>", 0, 1); ++ if (cl == NULL) { ++ ret = -1; ++ goto out; ++ } ++ + snprintf(buf, sizeof(buf), "%s\n", dpdk_test); + if (cmdline_in(cl, buf, strlen(buf)) < 0) { + printf("error on cmdline input\n"); ++ ++ ret = -1; ++ } else { ++ ret = last_test_result; ++ } ++ cmdline_free(cl); ++ goto out; ++ } else { ++ /* if no DPDK_TEST env variable, go interactive */ ++ cl = cmdline_stdin_new(main_ctx, "RTE>>"); ++ if (cl == NULL) { + ret = -1; + goto out; + } + ++ cmdline_interact(cl); + cmdline_stdin_exit(cl); +- ret = last_test_result; +- goto out; ++ cmdline_free(cl); + } +- /* if no DPDK_TEST env variable, go interactive */ +- cmdline_interact(cl); +- cmdline_stdin_exit(cl); + #endif + ret = 0; + +diff --git a/dpdk/app/test/test_distributor.c b/dpdk/app/test/test_distributor.c +index f4c6229f16..961f326cd5 100644 +--- a/dpdk/app/test/test_distributor.c ++++ b/dpdk/app/test/test_distributor.c +@@ -217,6 +217,8 @@ sanity_test(struct worker_params *wp, struct rte_mempool *p) + clear_packet_count(); + struct rte_mbuf *many_bufs[BIG_BATCH], *return_bufs[BIG_BATCH]; + unsigned num_returned = 0; ++ unsigned int num_being_processed = 0; ++ unsigned int return_buffer_capacity = 127;/* RTE_DISTRIB_RETURNS_MASK */ + + /* flush out any remaining packets */ + rte_distributor_flush(db); +@@ -233,16 +235,16 @@ sanity_test(struct worker_params *wp, struct rte_mempool *p) + for (i = 0; i < BIG_BATCH/BURST; i++) { + rte_distributor_process(db, + &many_bufs[i*BURST], BURST); +- count = rte_distributor_returned_pkts(db, +- &return_bufs[num_returned], +- BIG_BATCH - num_returned); +- num_returned += count; ++ num_being_processed += BURST; ++ do { ++ count = rte_distributor_returned_pkts(db, ++ &return_bufs[num_returned], ++ BIG_BATCH - num_returned); ++ num_being_processed -= count; ++ num_returned += count; ++ rte_distributor_flush(db); ++ } while (num_being_processed + BURST > return_buffer_capacity); + } +- rte_distributor_flush(db); +- count = rte_distributor_returned_pkts(db, +- &return_bufs[num_returned], +- BIG_BATCH - num_returned); +- num_returned += count; + retries = 0; + do { + rte_distributor_flush(db); +diff --git a/dpdk/app/test/test_event_crypto_adapter.c b/dpdk/app/test/test_event_crypto_adapter.c +index a0169aa6cf..335211cd8c 100644 +--- a/dpdk/app/test/test_event_crypto_adapter.c ++++ b/dpdk/app/test/test_event_crypto_adapter.c +@@ -183,6 +183,7 @@ test_op_forward_mode(uint8_t session_less) + cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER; + cipher_xform.next = NULL; + cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_NULL; ++ cipher_xform.cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT; + + op = rte_crypto_op_alloc(params.op_mpool, + RTE_CRYPTO_OP_TYPE_SYMMETRIC); +@@ -382,6 +383,7 @@ test_op_new_mode(uint8_t session_less) + cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER; + cipher_xform.next = NULL; + cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_NULL; ++ cipher_xform.cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT; + + op = rte_crypto_op_alloc(params.op_mpool, + RTE_CRYPTO_OP_TYPE_SYMMETRIC); +diff --git a/dpdk/app/test/test_ipsec.c b/dpdk/app/test/test_ipsec.c +index 9ad07a1790..d18220a885 100644 +--- a/dpdk/app/test/test_ipsec.c ++++ b/dpdk/app/test/test_ipsec.c +@@ -744,7 +744,7 @@ create_sa(enum rte_security_session_action_type action_type, + ut->ss[j].type = action_type; + rc = create_session(ut, &ts->qp_conf, ts->valid_dev, j); + if (rc != 0) +- return TEST_FAILED; ++ return rc; + + rc = rte_ipsec_sa_init(ut->ss[j].sa, &ut->sa_prm, sz); + rc = (rc > 0 && (uint32_t)rc <= sz) ? 0 : -EINVAL; +@@ -1247,7 +1247,7 @@ test_ipsec_crypto_inb_burst_null_null(int i) + test_cfg[i].replay_win_sz, test_cfg[i].flags, 0); + if (rc != 0) { + RTE_LOG(ERR, USER1, "create_sa failed, cfg %d\n", i); +- return TEST_FAILED; ++ return rc; + } + + /* Generate test mbuf data */ +@@ -1349,7 +1349,7 @@ test_ipsec_crypto_outb_burst_null_null(int i) + test_cfg[i].replay_win_sz, test_cfg[i].flags, 0); + if (rc != 0) { + RTE_LOG(ERR, USER1, "create_sa failed, cfg %d\n", i); +- return TEST_FAILED; ++ return rc; + } + + /* Generate input mbuf data */ +@@ -1458,7 +1458,7 @@ test_ipsec_inline_crypto_inb_burst_null_null(int i) + test_cfg[i].replay_win_sz, test_cfg[i].flags, 0); + if (rc != 0) { + RTE_LOG(ERR, USER1, "create_sa failed, cfg %d\n", i); +- return TEST_FAILED; ++ return rc; + } + + /* Generate inbound mbuf data */ +@@ -1536,7 +1536,7 @@ test_ipsec_inline_proto_inb_burst_null_null(int i) + test_cfg[i].replay_win_sz, test_cfg[i].flags, 0); + if (rc != 0) { + RTE_LOG(ERR, USER1, "create_sa failed, cfg %d\n", i); +- return TEST_FAILED; ++ return rc; + } + + /* Generate inbound mbuf data */ +@@ -1644,7 +1644,7 @@ test_ipsec_inline_crypto_outb_burst_null_null(int i) + test_cfg[i].replay_win_sz, test_cfg[i].flags, 0); + if (rc != 0) { + RTE_LOG(ERR, USER1, "create_sa failed, cfg %d\n", i); +- return TEST_FAILED; ++ return rc; + } + + /* Generate test mbuf data */ +@@ -1722,7 +1722,7 @@ test_ipsec_inline_proto_outb_burst_null_null(int i) + test_cfg[i].replay_win_sz, test_cfg[i].flags, 0); + if (rc != 0) { + RTE_LOG(ERR, USER1, "create_sa failed, cfg %d\n", i); +- return TEST_FAILED; ++ return rc; + } + + /* Generate test mbuf data */ +@@ -1798,7 +1798,7 @@ test_ipsec_lksd_proto_inb_burst_null_null(int i) + test_cfg[i].replay_win_sz, test_cfg[i].flags, 0); + if (rc != 0) { + RTE_LOG(ERR, USER1, "create_sa failed, cfg %d\n", i); +- return TEST_FAILED; ++ return rc; + } + + /* Generate test mbuf data */ +@@ -1911,7 +1911,7 @@ test_ipsec_replay_inb_inside_null_null(int i) + test_cfg[i].replay_win_sz, test_cfg[i].flags, 0); + if (rc != 0) { + RTE_LOG(ERR, USER1, "create_sa failed, cfg %d\n", i); +- return TEST_FAILED; ++ return rc; + } + + /* Generate inbound mbuf data */ +@@ -2004,7 +2004,7 @@ test_ipsec_replay_inb_outside_null_null(int i) + test_cfg[i].replay_win_sz, test_cfg[i].flags, 0); + if (rc != 0) { + RTE_LOG(ERR, USER1, "create_sa failed, cfg %d\n", i); +- return TEST_FAILED; ++ return rc; + } + + /* Generate test mbuf data */ +@@ -2104,7 +2104,7 @@ test_ipsec_replay_inb_repeat_null_null(int i) + test_cfg[i].replay_win_sz, test_cfg[i].flags, 0); + if (rc != 0) { + RTE_LOG(ERR, USER1, "create_sa failed, cfg %d\n", i); +- return TEST_FAILED; ++ return rc; + } + + /* Generate test mbuf data */ +@@ -2205,7 +2205,7 @@ test_ipsec_replay_inb_inside_burst_null_null(int i) + test_cfg[i].replay_win_sz, test_cfg[i].flags, 0); + if (rc != 0) { + RTE_LOG(ERR, USER1, "create_sa failed, cfg %d\n", i); +- return TEST_FAILED; ++ return rc; + } + + /* Generate inbound mbuf data */ +@@ -2338,7 +2338,7 @@ test_ipsec_crypto_inb_burst_2sa_null_null(int i) + test_cfg[i].replay_win_sz, test_cfg[i].flags, 0); + if (rc != 0) { + RTE_LOG(ERR, USER1, "create_sa 0 failed, cfg %d\n", i); +- return TEST_FAILED; ++ return rc; + } + + /* create second rte_ipsec_sa */ +@@ -2348,7 +2348,7 @@ test_ipsec_crypto_inb_burst_2sa_null_null(int i) + if (rc != 0) { + RTE_LOG(ERR, USER1, "create_sa 1 failed, cfg %d\n", i); + destroy_sa(0); +- return TEST_FAILED; ++ return rc; + } + + /* Generate test mbuf data */ +@@ -2424,7 +2424,7 @@ test_ipsec_crypto_inb_burst_2sa_4grp_null_null(int i) + test_cfg[i].replay_win_sz, test_cfg[i].flags, 0); + if (rc != 0) { + RTE_LOG(ERR, USER1, "create_sa 0 failed, cfg %d\n", i); +- return TEST_FAILED; ++ return rc; + } + + /* create second rte_ipsec_sa */ +@@ -2434,7 +2434,7 @@ test_ipsec_crypto_inb_burst_2sa_4grp_null_null(int i) + if (rc != 0) { + RTE_LOG(ERR, USER1, "create_sa 1 failed, cfg %d\n", i); + destroy_sa(0); +- return TEST_FAILED; ++ return rc; + } + + /* Generate test mbuf data */ +diff --git a/dpdk/app/test/test_mcslock.c b/dpdk/app/test/test_mcslock.c +index fbca78707d..80eaecc90a 100644 +--- a/dpdk/app/test/test_mcslock.c ++++ b/dpdk/app/test/test_mcslock.c +@@ -37,10 +37,6 @@ + * lock multiple times. + */ + +-RTE_DEFINE_PER_LCORE(rte_mcslock_t, _ml_me); +-RTE_DEFINE_PER_LCORE(rte_mcslock_t, _ml_try_me); +-RTE_DEFINE_PER_LCORE(rte_mcslock_t, _ml_perf_me); +- + rte_mcslock_t *p_ml; + rte_mcslock_t *p_ml_try; + rte_mcslock_t *p_ml_perf; +@@ -53,7 +49,7 @@ static int + test_mcslock_per_core(__rte_unused void *arg) + { + /* Per core me node. */ +- rte_mcslock_t ml_me = RTE_PER_LCORE(_ml_me); ++ rte_mcslock_t ml_me; + + rte_mcslock_lock(&p_ml, &ml_me); + printf("MCS lock taken on core %u\n", rte_lcore_id()); +@@ -77,7 +73,7 @@ load_loop_fn(void *func_param) + const unsigned int lcore = rte_lcore_id(); + + /**< Per core me node. */ +- rte_mcslock_t ml_perf_me = RTE_PER_LCORE(_ml_perf_me); ++ rte_mcslock_t ml_perf_me; + + /* wait synchro */ + while (rte_atomic32_read(&synchro) == 0) +@@ -151,8 +147,8 @@ static int + test_mcslock_try(__rte_unused void *arg) + { + /**< Per core me node. */ +- rte_mcslock_t ml_me = RTE_PER_LCORE(_ml_me); +- rte_mcslock_t ml_try_me = RTE_PER_LCORE(_ml_try_me); ++ rte_mcslock_t ml_me; ++ rte_mcslock_t ml_try_me; + + /* Locked ml_try in the main lcore, so it should fail + * when trying to lock it in the worker lcore. +@@ -178,8 +174,8 @@ test_mcslock(void) + int i; + + /* Define per core me node. */ +- rte_mcslock_t ml_me = RTE_PER_LCORE(_ml_me); +- rte_mcslock_t ml_try_me = RTE_PER_LCORE(_ml_try_me); ++ rte_mcslock_t ml_me; ++ rte_mcslock_t ml_try_me; + + /* + * Test mcs lock & unlock on each core +diff --git a/dpdk/app/test/test_pmd_perf.c b/dpdk/app/test/test_pmd_perf.c +index 4db816a360..3a248d512c 100644 +--- a/dpdk/app/test/test_pmd_perf.c ++++ b/dpdk/app/test/test_pmd_perf.c +@@ -606,10 +606,10 @@ poll_burst(void *args) + static int + exec_burst(uint32_t flags, int lcore) + { +- unsigned i, portid, nb_tx = 0; ++ unsigned int portid, nb_tx = 0; + struct lcore_conf *conf; + uint32_t pkt_per_port; +- int num, idx = 0; ++ int num, i, idx = 0; + int diff_tsc; + + conf = &lcore_conf[lcore]; +@@ -628,16 +628,14 @@ exec_burst(uint32_t flags, int lcore) + rte_atomic64_set(&start, 1); + + /* start xmit */ ++ i = 0; + while (num) { + nb_tx = RTE_MIN(MAX_PKT_BURST, num); +- for (i = 0; i < conf->nb_ports; i++) { +- portid = conf->portlist[i]; +- nb_tx = rte_eth_tx_burst(portid, 0, +- &tx_burst[idx], nb_tx); +- idx += nb_tx; +- num -= nb_tx; +- } +- ++ portid = conf->portlist[i]; ++ nb_tx = rte_eth_tx_burst(portid, 0, &tx_burst[idx], nb_tx); ++ idx += nb_tx; ++ num -= nb_tx; ++ i = (i >= conf->nb_ports - 1) ? 0 : (i + 1); + } + + sleep(5); +diff --git a/dpdk/app/test/test_ring_perf.c b/dpdk/app/test/test_ring_perf.c +index e63e25a867..fd82e20412 100644 +--- a/dpdk/app/test/test_ring_perf.c ++++ b/dpdk/app/test/test_ring_perf.c +@@ -178,7 +178,7 @@ enqueue_dequeue_bulk_helper(const unsigned int flag, const int esize, + struct thread_params *p) + { + int ret; +- const unsigned int iter_shift = 23; ++ const unsigned int iter_shift = 15; + const unsigned int iterations = 1 << iter_shift; + struct rte_ring *r = p->r; + unsigned int bsize = p->size; +diff --git a/dpdk/app/test/test_rwlock.c b/dpdk/app/test/test_rwlock.c +index 701187f398..b47150a86a 100644 +--- a/dpdk/app/test/test_rwlock.c ++++ b/dpdk/app/test/test_rwlock.c +@@ -46,6 +46,7 @@ enum { + static struct { + rte_rwlock_t lock; + uint64_t tick; ++ + volatile union { + uint8_t u8[RTE_CACHE_LINE_SIZE]; + uint64_t u64[RTE_CACHE_LINE_SIZE / sizeof(uint64_t)]; +@@ -182,7 +183,7 @@ rwlock_test1(void) + int i; + + rte_rwlock_init(&sl); +- for (i=0; i`_. ++ ``_. + + - Follow the DPDK :ref:`Getting Started Guide for Linux ` to setup the basic DPDK environment. + +diff --git a/dpdk/doc/guides/nics/i40e.rst b/dpdk/doc/guides/nics/i40e.rst +index 4e5c4679b8..64f20e7dab 100644 +--- a/dpdk/doc/guides/nics/i40e.rst ++++ b/dpdk/doc/guides/nics/i40e.rst +@@ -562,9 +562,9 @@ Generic flow API + - ``RSS Flow`` + + RSS Flow supports to set hash input set, hash function, enable hash +- and configure queue region. ++ and configure queues. + For example: +- Configure queue region as queue 0, 1, 2, 3. ++ Configure queues as queue 0, 1, 2, 3. + + .. code-block:: console + +diff --git a/dpdk/doc/guides/nics/ice.rst b/dpdk/doc/guides/nics/ice.rst +index a0887f129f..ccda26f82f 100644 +--- a/dpdk/doc/guides/nics/ice.rst ++++ b/dpdk/doc/guides/nics/ice.rst +@@ -211,9 +211,12 @@ are chosen based on 2 conditions. + - ``CPU`` + On the X86 platform, the driver checks if the CPU supports AVX2. + If it's supported, AVX2 paths will be chosen. If not, SSE is chosen. ++ If the CPU supports AVX512 and EAL argument ``--force-max-simd-bitwidth`` ++ is set to 512, AVX512 paths will be chosen. + + - ``Offload features`` +- The supported HW offload features are described in the document ice_vec.ini. ++ The supported HW offload features are described in the document ice.ini, ++ A value "P" means the offload feature is not supported by vector path. + If any not supported features are used, ICE vector PMD is disabled and the + normal paths are chosen. + +diff --git a/dpdk/doc/guides/nics/ixgbe.rst b/dpdk/doc/guides/nics/ixgbe.rst +index c801dbae81..4f4d3b1c2c 100644 +--- a/dpdk/doc/guides/nics/ixgbe.rst ++++ b/dpdk/doc/guides/nics/ixgbe.rst +@@ -252,6 +252,16 @@ Before binding ``vfio`` with legacy mode in X550 NICs, use ``modprobe vfio `` + ``nointxmask=1`` to load ``vfio`` module if the intx is not shared with other + devices. + ++UDP with zero checksum is reported as error ++~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ++ ++Intel 82599 10 Gigabit Ethernet Controller Specification Update (Revision 2.87) ++Errata: 44 Integrity Error Reported for IPv4/UDP Packets With Zero Checksum ++ ++To support UDP zero checksum, the zero and bad UDP checksum packet is marked as ++PKT_RX_L4_CKSUM_UNKNOWN, so the application needs to recompute the checksum to ++validate it. ++ + Inline crypto processing support + -------------------------------- + +diff --git a/dpdk/doc/guides/nics/mlx5.rst b/dpdk/doc/guides/nics/mlx5.rst +index 3bda0f8417..24d5a69227 100644 +--- a/dpdk/doc/guides/nics/mlx5.rst ++++ b/dpdk/doc/guides/nics/mlx5.rst +@@ -433,13 +433,17 @@ Driver options + A nonzero value enables the compression of CQE on RX side. This feature + allows to save PCI bandwidth and improve performance. Enabled by default. + Different compression formats are supported in order to achieve the best +- performance for different traffic patterns. Hash RSS format is the default. ++ performance for different traffic patterns. Default format depends on ++ Multi-Packet Rx queue configuration: Hash RSS format is used in case ++ MPRQ is disabled, Checksum format is used in case MPRQ is enabled. + + Specifying 2 as a ``rxq_cqe_comp_en`` value selects Flow Tag format for + better compression rate in case of RTE Flow Mark traffic. + Specifying 3 as a ``rxq_cqe_comp_en`` value selects Checksum format. + Specifying 4 as a ``rxq_cqe_comp_en`` value selects L3/L4 Header format for + better compression rate in case of mixed TCP/UDP and IPv4/IPv6 traffic. ++ CQE compression format selection requires DevX to be enabled. If there is ++ no DevX enabled/supported the value is reset to 1 by default. + + Supported on: + +@@ -448,24 +452,6 @@ Driver options + - POWER9 and ARMv8 with ConnectX-4 Lx, ConnectX-5, ConnectX-6, ConnectX-6 Dx, + ConnectX-6 Lx, BlueField and BlueField-2. + +-- ``rxq_cqe_pad_en`` parameter [int] +- +- A nonzero value enables 128B padding of CQE on RX side. The size of CQE +- is aligned with the size of a cacheline of the core. If cacheline size is +- 128B, the CQE size is configured to be 128B even though the device writes +- only 64B data on the cacheline. This is to avoid unnecessary cache +- invalidation by device's two consecutive writes on to one cacheline. +- However in some architecture, it is more beneficial to update entire +- cacheline with padding the rest 64B rather than striding because +- read-modify-write could drop performance a lot. On the other hand, +- writing extra data will consume more PCIe bandwidth and could also drop +- the maximum throughput. It is recommended to empirically set this +- parameter. Disabled by default. +- +- Supported on: +- +- - CPU having 128B cacheline with ConnectX-5 and BlueField. +- + - ``rxq_pkt_pad_en`` parameter [int] + + A nonzero value enables padding Rx packet to the size of cacheline on PCI +@@ -825,7 +811,7 @@ Driver options + +------+-----------+-----------+-------------+-------------+ + | 1 | 24 bits | vary 0-32 | 32 bits | yes | + +------+-----------+-----------+-------------+-------------+ +- | 2 | vary 0-32 | 32 bits | 32 bits | yes | ++ | 2 | vary 0-24 | 32 bits | 32 bits | yes | + +------+-----------+-----------+-------------+-------------+ + + If there is no E-Switch configuration the ``dv_xmeta_en`` parameter is +@@ -837,6 +823,17 @@ Driver options + of the extensive metadata features. The legacy Verbs supports FLAG and + MARK metadata actions over NIC Rx steering domain only. + ++ Setting META value to zero in flow action means there is no item provided ++ and receiving datapath will not report in mbufs the metadata are present. ++ Setting MARK value to zero in flow action means the zero FDIR ID value ++ will be reported on packet receiving. ++ ++ For the MARK action the last 16 values in the full range are reserved for ++ internal PMD purposes (to emulate FLAG action). The valid range for the ++ MARK action values is 0-0xFFEF for the 16-bit mode and 0-xFFFFEF ++ for the 24-bit mode, the flows with the MARK action value outside ++ the specified range will be rejected. ++ + - ``dv_flow_en`` parameter [int] + + A nonzero value enables the DV flow steering assuming it is supported +@@ -1390,7 +1387,8 @@ Supported hardware offloads + Rx timestamp 17.11 4.14 16 4.2-1 12.21.1000 ConnectX-4 + TSO 17.11 4.14 16 4.2-1 12.21.1000 ConnectX-4 + LRO 19.08 N/A N/A 4.6-4 16.25.6406 ConnectX-5 +- Buffer Split 20.11 N/A N/A 5.1-2 22.28.2006 ConnectX-6 Dx ++ Tx scheduling 20.08 N/A N/A 5.1-2 22.28.2006 ConnectX-6 Dx ++ Buffer Split 20.11 N/A N/A 5.1-2 16.28.2006 ConnectX-5 + ============== ===== ===== ========= ===== ========== ============= + + .. table:: Minimal SW/HW versions for rte_flow offloads +diff --git a/dpdk/doc/guides/prog_guide/graph_lib.rst b/dpdk/doc/guides/prog_guide/graph_lib.rst +index 5d183f86d7..fcff9c4286 100644 +--- a/dpdk/doc/guides/prog_guide/graph_lib.rst ++++ b/dpdk/doc/guides/prog_guide/graph_lib.rst +@@ -61,7 +61,7 @@ Anatomy of Node: + + .. figure:: img/anatomy_of_a_node.* + +-The :numref:`figure_anatomy_of_a_node` diagram depicts the anatomy of a node. ++ Anatomy of a node + + The node is the basic building block of the graph framework. + +@@ -138,8 +138,7 @@ Link the Nodes to create the graph topology + + .. figure:: img/link_the_nodes.* + +-The :numref:`figure_link_the_nodes` diagram shows a graph topology after +-linking the N nodes. ++ Topology after linking the nodes + + Once nodes are available to the program, Application or node public API + functions can links them together to create a complex packet processing graph. +@@ -322,8 +321,9 @@ Graph object memory layout + + .. figure:: img/graph_mem_layout.* + +-The :numref:`figure_graph_mem_layout` diagram shows ``rte_graph`` object memory +-layout. Understanding the memory layout helps to debug the graph library and ++ Memory layout ++ ++Understanding the memory layout helps to debug the graph library and + improve the performance if needed. + + Graph object consists of a header, circular buffer to store the pending +diff --git a/dpdk/doc/guides/prog_guide/img/anatomy_of_a_node.svg b/dpdk/doc/guides/prog_guide/img/anatomy_of_a_node.svg +index fa4b5b2d5a..d3bc742e77 100644 +--- a/dpdk/doc/guides/prog_guide/img/anatomy_of_a_node.svg ++++ b/dpdk/doc/guides/prog_guide/img/anatomy_of_a_node.svg +@@ -309,11 +309,6 @@ + id="path109" + inkscape:connector-curvature="0" + style="fill:#000000;fill-opacity:0;fill-rule:evenodd" /> +- + +- + + +diff --git a/dpdk/doc/guides/prog_guide/profile_app.rst b/dpdk/doc/guides/prog_guide/profile_app.rst +index 7093681983..52f85bb9e0 100644 +--- a/dpdk/doc/guides/prog_guide/profile_app.rst ++++ b/dpdk/doc/guides/prog_guide/profile_app.rst +@@ -33,6 +33,20 @@ Refer to the + for details about application profiling. + + ++Profiling with VTune ++~~~~~~~~~~~~~~~~~~~~ ++ ++To allow VTune attaching to the DPDK application, reconfigure a DPDK build ++folder by passing ``-Dc_args=-DRTE_ETHDEV_PROFILE_WITH_VTUNE`` meson option ++and recompile the DPDK: ++ ++.. code-block:: console ++ ++ meson build ++ meson configure build -Dc_args=-DRTE_ETHDEV_PROFILE_WITH_VTUNE ++ ninja -C build ++ ++ + Profiling on ARM64 + ------------------ + +diff --git a/dpdk/doc/guides/rel_notes/release_20_11.rst b/dpdk/doc/guides/rel_notes/release_20_11.rst +index 7405a9864f..e6a7f121c8 100644 +--- a/dpdk/doc/guides/rel_notes/release_20_11.rst ++++ b/dpdk/doc/guides/rel_notes/release_20_11.rst +@@ -1053,3 +1053,503 @@ Tested Platforms + * Broadcom Yocto Linux + * Kernel version: 4.14.174 + * DPDK application running on 8 Arm Cortex-A72 cores ++ ++20.11.1 Release Notes ++--------------------- ++ ++20.11.1 Fixes ++~~~~~~~~~~~~~ ++ ++* app/crypto-perf: fix CSV output format ++* app/crypto-perf: fix latency CSV output ++* app/crypto-perf: fix spelling in output ++* app/crypto-perf: remove always true condition ++* app/eventdev: adjust event count order for pipeline test ++* app/eventdev: fix SMP barrier in performance test ++* app/eventdev: remove redundant enqueue in burst Tx ++* app: fix build with extra include paths ++* app/flow-perf: simplify objects initialization ++* app/procinfo: fix check on xstats-ids ++* app/procinfo: fix _filters stats reporting ++* app/procinfo: fix security context info ++* app/procinfo: remove useless assignment ++* app/procinfo: remove useless memset ++* app/testpmd: avoid exit without terminal restore ++* app/testpmd: fix help of metering commands ++* app/testpmd: fix IP checksum calculation ++* app/testpmd: fix key for RSS flow rule ++* app/testpmd: fix max Rx packet length for VLAN packets ++* app/testpmd: fix packets dump overlapping ++* app/testpmd: fix queue reconfig request on Rx split update ++* app/testpmd: fix queue stats mapping configuration ++* app/testpmd: fix setting maximum packet length ++* app/testpmd: fix start index for showing FEC array ++* app/testpmd: release flows left before port stop ++* app/testpmd: support shared age action query ++* bitrate: fix missing header include ++* build: fix linker flags on Windows ++* build: fix plugin load on static build ++* build: force pkg-config for dependency detection ++* build: provide suitable error for "both" libraries option ++* bus/pci: fix build with MinGW-w64 8 ++* bus/pci: fix build with Windows SDK >= 10.0.20253 ++* bus/pci: fix hardware ID limit on Windows ++* bus/pci: ignore missing NUMA node on Windows ++* common/mlx5: fix completion queue entry size configuration ++* common/mlx5: fix pointer cast on Windows ++* common/mlx5: fix storing synced MAC to internal table ++* common/octeontx2: fix build with SVE ++* common/sfc_efx/base: apply mask to value on match field set ++* common/sfc_efx/base: check for MAE privilege ++* common/sfc_efx/base: enhance field ID check in field set API ++* common/sfc_efx/base: fix MAE match spec class comparison API ++* common/sfc_efx/base: fix MAE match spec validation helper ++* common/sfc_efx/base: fix MPORT related byte order handling ++* common/sfc_efx/base: fix signed/unsigned mismatch warnings ++* common/sfc_efx/base: remove warnings about inline specifiers ++* common/sfc_efx/base: support alternative MAE match fields ++* common/sfc_efx/base: update MCDI headers for MAE privilege ++* crypto/dpaa2_sec: fix memory allocation check ++* crypto/qat: fix access to uninitialized variable ++* crypto/qat: fix digest in buffer ++* doc: add FEC to NIC features ++* doc: add vtune profiling config to prog guide ++* doc: fix figure numbering in graph guide ++* doc: fix mark action zero value in mlx5 guide ++* doc: fix product link in hns3 guide ++* doc: fix QinQ flow rules in testpmd guide ++* doc: fix RSS flow description in i40e guide ++* doc: fix some statements for ice vector PMD ++* doc: fix supported feature table in mlx5 guide ++* doc: update flow mark action in mlx5 guide ++* eal/arm: fix debug build with gcc for 128-bit atomics ++* eal: fix automatic loading of drivers as shared libs ++* eal: fix internal ABI tag with clang ++* eal: fix MCS lock header include ++* eal: fix reciprocal header include ++* eal/linux: fix handling of error events from epoll ++* eal/windows: fix build with MinGW-w64 8 ++* eal/windows: fix C++ compatibility ++* eal/windows: fix debug build with MinGW ++* eal/windows: fix vfprintf warning with clang ++* ethdev: avoid blocking telemetry for link status ++* ethdev: fix close failure handling ++* ethdev: fix max Rx packet length check ++* ethdev: fix missing header include ++* eventdev: fix a return value comment ++* event/dlb: fix accessing uninitialized variables ++* examples/eventdev: add info output for main core ++* examples/eventdev: check CPU core enabling ++* examples/eventdev: move ethdev stop to the end ++* examples/l3fwd: remove limitation on Tx queue count ++* examples/pipeline: fix CLI parsing crash ++* examples/pipeline: fix VXLAN script permission ++* fbarray: fix overlap check ++* fib: fix missing header includes ++* ip_frag: remove padding length of fragment ++* ipsec: fix missing header include ++* lib: fix doxygen for parameters of function pointers ++* license: add licenses for exception cases ++* lpm: fix vector IPv4 lookup ++* mbuf: add C++ include guard for dynamic fields header ++* mbuf: fix missing header include ++* mbuf: remove unneeded atomic generic header include ++* mempool: fix panic on dump or audit ++* metrics: fix variable declaration in header ++* net/af_xdp: remove useless assignment ++* net/avp: remove always true condition ++* net/axgbe: fix jumbo frame flag condition for MTU set ++* net/bnxt: disable end of packet padding for Rx ++* net/bnxt: fix cleanup on mutex init failure ++* net/bnxt: fix doorbell write ordering ++* net/bnxt: fix error handling in device start ++* net/bnxt: fix fallback mbuf allocation logic ++* net/bnxt: fix format specifier for unsigned int ++* net/bnxt: fix freeing mbuf ++* net/bnxt: fix FW version log ++* net/bnxt: fix lock init and destroy ++* net/bnxt: fix max rings computation ++* net/bnxt: fix memory leak when mapping fails ++* net/bnxt: fix null termination of Rx mbuf chain ++* net/bnxt: fix outer UDP checksum Rx offload capability ++* net/bnxt: fix packet type index calculation ++* net/bnxt: fix PF resource query ++* net/bnxt: fix Rx completion ring size calculation ++* net/bnxt: fix Rx rings in RSS redirection table ++* net/bnxt: fix VNIC config on Rx queue stop ++* net/bnxt: fix VNIC RSS configure function ++* net/bnxt: limit Rx representor packets per poll ++* net/bnxt: make offload flags mapping per-ring ++* net/bnxt: propagate FW command failure to application ++* net/bnxt: refactor init/uninit ++* net/bnxt: release HWRM lock in error ++* net/bnxt: remove redundant return ++* net/bnxt: set correct checksum status in mbuf ++* net/bonding: fix PCI address comparison on non-PCI ports ++* net/bonding: fix port id validity check on parsing ++* net/bonding: remove local variable shadowing outer one ++* net/cxgbe: accept VLAN flow items without ethertype ++* net/cxgbe: fix jumbo frame flag condition ++* net/dpaa2: fix jumbo frame flag condition for MTU set ++* net/dpaa: fix jumbo frame flag condition for MTU set ++* net/e1000: fix flow control mode setting ++* net/e1000: fix jumbo frame flag condition for MTU set ++* net/ena: fix Tx doorbell statistics ++* net/ena: fix Tx SQ free space assessment ++* net/ena: flush Rx buffers memory pool cache ++* net/ena: prevent double doorbell ++* net/ena: validate Rx req ID upon acquiring descriptor ++* net/enetc: fix jumbo frame flag condition for MTU set ++* net/enic: fix filter log message ++* net/enic: fix filter type used for flow API ++* net: fix missing header include ++* net/hinic: fix jumbo frame flag condition for MTU set ++* net/hinic: restore vectorised code ++* net/hns3: adjust format specifier for enum ++* net/hns3: adjust some comments ++* net/hns3: fix build with SVE ++* net/hns3: fix crash with multi-process ++* net/hns3: fix data overwriting during register dump ++* net/hns3: fix dump register out of range ++* net/hns3: fix error code in xstats ++* net/hns3: fix FEC state query ++* net/hns3: fix firmware exceptions by concurrent commands ++* net/hns3: fix flow director rule residue on malloc failure ++* net/hns3: fix interception with flow director ++* net/hns3: fix interrupt resources in Rx interrupt mode ++* net/hns3: fix jumbo frame flag condition for MTU set ++* net/hns3: fix link status change from firmware ++* net/hns3: fix memory leak on secondary process exit ++* net/hns3: fix query order of link status and link info ++* net/hns3: fix register length when dumping registers ++* net/hns3: fix RSS indirection table size ++* net/hns3: fix Rx/Tx errors stats ++* net/hns3: fix stats flip overflow ++* net/hns3: fix VF query link status in dev init ++* net/hns3: fix VF reset on mailbox failure ++* net/hns3: fix xstats with id and names ++* net/hns3: remove MPLS from supported flow items ++* net/hns3: use new opcode for clearing hardware resource ++* net/hns3: validate requested maximum Rx frame length ++* net/i40e: add null input checks ++* net/i40e: fix flex payload rule conflict ++* net/i40e: fix global register recovery ++* net/i40e: fix jumbo frame flag condition ++* net/i40e: fix L4 checksum flag ++* net/i40e: fix returned code for RSS hardware failure ++* net/i40e: fix Rx bytes statistics ++* net/i40e: fix stats counters ++* net/i40e: fix VLAN stripping in VF ++* net/i40e: fix X722 for 802.1ad frames ability ++* net/iavf: fix conflicting RSS combination rules ++* net/iavf: fix GTPU UL and DL support for flow director ++* net/iavf: fix jumbo frame flag condition ++* net/iavf: fix memory leak in large VF ++* net/iavf: fix queue pairs configuration ++* net/iavf: fix symmetric flow rule creation ++* net/iavf: fix vector mapping with queue ++* net/ice/base: fix memory handling ++* net/ice/base: fix null pointer dereference ++* net/ice/base: fix tunnel destroy ++* net/ice: check Rx queue number on RSS init ++* net/ice: disable IPv4 checksum offload in vector Tx ++* net/ice: drain out DCF AdminQ command queue ++* net/ice: enlarge Rx queue rearm threshold to 64 ++* net/ice: fix jumbo frame flag condition ++* net/ice: fix outer checksum flags ++* net/ice: fix outer UDP Tx checksum offload ++* net/ice: fix RSS lookup table initialization ++* net/ionic: allow separate L3 and L4 checksum offload ++* net/ionic: do minor logging fixups ++* net/ionic: fix address handling in Tx ++* net/ionic: fix link speed and autonegotiation ++* net/ionic: fix up function attribute tags ++* net/ipn3ke: fix jumbo frame flag condition for MTU set ++* net/ixgbe: detect failed VF MTU set ++* net/ixgbe: disable NFS filtering ++* net/ixgbe: fix configuration of max frame size ++* net/ixgbe: fix flex bytes flow director rule ++* net/ixgbe: fix jumbo frame flag condition ++* net/ixgbe: fix UDP zero checksum on x86 ++* net/liquidio: fix jumbo frame flag condition for MTU set ++* net/mlx4: fix device detach ++* net/mlx4: fix handling of probing failure ++* net/mlx4: fix port attach in secondary process ++* net/mlx5: check FW miniCQE format capabilities ++* net/mlx5: fix buffer split offload advertising ++* net/mlx5: fix comparison sign in flow engine ++* net/mlx5: fix constant array size ++* net/mlx5: fix count actions query in sample flow ++* net/mlx5: fix counter and age flow action validation ++* net/mlx5: fix crash on secondary process port close ++* net/mlx5: fix device name size on Windows ++* net/mlx5: fix Direct Verbs flow descriptor allocation ++* net/mlx5: fix drop action in tunnel offload mode ++* net/mlx5: fix flow action destroy wrapper ++* net/mlx5: fix flow operation wrapper per OS ++* net/mlx5: fix flow split combined with age action ++* net/mlx5: fix flow split combined with counter ++* net/mlx5: fix flow tag decompression ++* net/mlx5: fix freeing packet pacing ++* net/mlx5: fix hairpin flow split decision ++* net/mlx5: fix leak on ASO SQ creation failure ++* net/mlx5: fix leak on Rx queue creation failure ++* net/mlx5: fix leak on Tx queue creation failure ++* net/mlx5: fix mark action in active tunnel offload ++* net/mlx5: fix mbuf freeing in vectorized MPRQ ++* net/mlx5: fix miniCQE configuration for Verbs ++* net/mlx5: fix multi-process port ID ++* net/mlx5: fix port attach in secondary process ++* net/mlx5: fix shared age action validation ++* net/mlx5: fix shared RSS and mark actions combination ++* net/mlx5: fix shared RSS capability check ++* net/mlx5: fix shared RSS translation and cleanup ++* net/mlx5: fix tunnel rules validation on VF representor ++* net/mlx5: fix Tx queue size created with DevX ++* net/mlx5: fix unnecessary checking for RSS action ++* net/mlx5: fix Verbs memory allocation callback ++* net/mlx5: fix VXLAN decap on non-VXLAN flow ++* net/mlx5: fix wire vport hint ++* net/mlx5: refuse empty VLAN in flow pattern ++* net/mlx5: remove CQE padding device argument ++* net/mlx5: unify operations for all OS ++* net/mlx5: validate hash Rx queue pointer ++* net/mvneta: check allocation in Rx queue flush ++* net/mvpp2: fix frame size checking ++* net/mvpp2: fix stack corruption ++* net/mvpp2: remove CRC length from MRU validation ++* net/mvpp2: remove debug log on fast-path ++* net/mvpp2: remove VLAN flush ++* net/netvsc: ignore unsupported packet on sync command ++* net/nfp: fix jumbo frame flag condition for MTU set ++* net/nfp: read chip model from PluDevice register ++* net/octeontx2: fix corruption in segments list ++* net/octeontx2: fix jumbo frame flag condition for MTU ++* net/octeontx2: fix PF flow action for Tx ++* net/octeontx: fix build with SVE ++* net/octeontx: fix jumbo frame flag condition for MTU set ++* net/octeontx: fix max Rx packet length ++* net/pcap: fix byte stats for drop Tx ++* net/pcap: fix infinite Rx with large files ++* net/pcap: remove local variable shadowing outer one ++* net/qede: fix jumbo frame flag condition for MTU set ++* net/qede: fix promiscuous enable ++* net/sfc: fix generic byte statistics to exclude FCS bytes ++* net/sfc: fix jumbo frame flag condition for MTU set ++* net/sfc: fix TSO and checksum offloads for EF10 ++* net/thunderx: fix jumbo frame flag condition for MTU set ++* net/virtio: add missing backend features negotiation ++* net/virtio: fix getting old status on reconnect ++* net/virtio: fix memory init with vDPA backend ++* net/virtio-user: fix protocol features advertising ++* net/virtio-user: fix run closing stdin and close callfd ++* node: fix missing header include ++* pipeline: fix missing header includes ++* power: clean up includes ++* power: create guest channel public header file ++* power: export guest channel header file ++* power: fix missing header includes ++* power: make channel message functions public ++* power: rename constants ++* power: rename public structs ++* regex/mlx5: fix memory rule alignment ++* regex/mlx5: fix number of supported queues ++* regex/mlx5: fix support for group id ++* regex/octeontx2: fix PCI table overflow ++* rib: fix insertion in some cases ++* rib: fix missing header include ++* rib: fix missing header includes ++* service: propagate init error in EAL ++* table: fix missing header include ++* telemetry: fix missing header include ++* test/distributor: fix return buffer queue overload ++* test/event_crypto: set cipher operation in transform ++* test: fix buffer overflow in Tx burst ++* test: fix terminal settings on exit ++* test/ipsec: fix result code for not supported ++* test/mcslock: remove unneeded per lcore copy ++* test/ring: reduce duration of performance tests ++* test/rwlock: fix spelling and missing whitespace ++* usertools: fix binding built-in kernel driver ++* vdpa/mlx5: fix configuration mutex cleanup ++* version: 20.11.1-rc1 ++* vhost: fix missing header includes ++* vhost: fix packed ring dequeue offloading ++* vhost: fix vid allocation race ++ ++20.11.1 Validation ++~~~~~~~~~~~~~~~~~~ ++ ++* Canonical(R) Testing ++ ++ * Build tests on Ubuntu 21.04 ++ * OVS-DPDK tests on x86_64 ++ * 1.0.0 (07:05:12): phys (BM) tests ++ * 1.1.0 (07:05:12): initialize environment ++ * 1.1.1 (07:09:32): testpmd => Pass ++ * 1.1.2 (07:11:12): check testpmd output => Pass ++ * 2.0.0 (07:11:12): prep virtual test environment ++ * 1.0.0 (07:14:14): virt tests ++ * 1.1.0 (07:14:14): initialize environment ++ * 3.0.0 (07:15:30): performance tests ++ * 3.1.0 (07:15:30): prep benchmarks ++ * 3.2.0 (07:15:51): performance tests ++ * 3.2.1 (07:16:01): test guest-openvswitch for OVS-5CPU => Pass ++ * 3.2.2 (07:35:44): test guest-dpdk-vhost-user-client-multiq for ++ * OVSDPDK-VUC => Pass ++ * 4.0.0 (07:57:11): VUC endurance checks ++ * 4.1.0 (07:57:11): prep VUC endurance tests ++ * 4.1.1 (08:12:38): start stop guests (client) => Pass ++ * 4.1.2 (09:25:59): add/remove ports (client) => Pass ++ * 4.2.0 (09:35:04): Final cleanup ++ ++ ++* Red Hat(R) Testing ++ ++ * Platform ++ ++ * RHEL 8 ++ * Kernel 4.18 ++ * Qemu 5.2 ++ * X540-AT2 NIC(ixgbe, 10G) ++ ++ * Functionality ++ ++ * Guest with device assignment(PF) throughput testing(1G hugepage size) ++ * Guest with device assignment(PF) throughput testing(2M hugepage size) ++ * Guest with device assignment(VF) throughput testing ++ * PVP (host dpdk testpmd as vswitch) 1Q: throughput testing ++ * PVP vhost-user 2Q throughput testing ++ * PVP vhost-user 1Q * cross numa node throughput testing ++ * Guest with vhost-user 2 queues throughput testing ++ * vhost-user reconnect with dpdk-client, qemu-server: qemu reconnect ++ * vhost-user reconnect with dpdk-client, qemu-server: ovs reconnect ++ * PVP 1Q live migration testing ++ * PVP 1Q cross numa node live migration testing ++ * Guest with ovs+dpdk+vhost-user 1Q live migration testing ++ * Guest with ovs+dpdk+vhost-user 1Q live migration testing (2M) ++ * Guest with ovs+dpdk+vhost-user 2Q live migration testing ++ * Guest with ovs+dpdk+vhost-user 4Q live migration testing ++ * Host PF + DPDK testing ++ * Host VF + DPDK testing ++ ++ ++* Broadcom(R) Testing ++ ++ * Functionality ++ ++ * Tx/Rx ++ * Link status ++ * RSS ++ * TSO ++ * VLAN filtering ++ * MAC filtering ++ * statistics ++ * Checksum offload ++ * MTU ++ * Promiscuous mode ++ ++ * Platform ++ ++ * BCM57414 NetXtreme-E 10Gb/25Gb Ethernet Controller, Firmware: 218.1.186.0 ++ * BCM57508 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb/200Gb Ethernet, Firmware : 219.0.0.74 ++ ++ ++* Intel(R) Testing ++ ++ * Basic Intel(R) NIC(ixgbe, i40e and ice) testing ++ * PF (i40e) ++ * PF (ixgbe) ++ * PF (ice) ++ * VF (i40e) ++ * VF (ixgbe) ++ * VF (ice) ++ * Compile Testing ++ * Intel NIC single core/NIC performance ++ * Power and IPsec ++ ++ * Basic cryptodev and virtio testing ++ ++ * vhost/virtio basic loopback, PVP and performance test ++ * cryptodev Function/Performance ++ ++ ++* Intel(R) Testing with Open vSwitch ++ ++ * OVS testing with OVS 2.15.0 ++ ++ * ICE Device ++ ++ * Jumbo frames, RSS, Kernel forwarding ++ ++ * i40e Device ++ ++ * Basic performance (RFC2544 P2P, PVP_CONT, RFC2544 PVP_TPUT, RFC2544 PVVP_TPUT, PVPV), Jumbo frames, RSS ++ ++ * Niantic Device ++ ++ * Basic performance tests (RFC2544 P2P, PVP_CONT, RFC2544 PVP_TPUT, RFC2544 PVVP_TPUT, PVPV), Jumbo frames, RSS ++ ++ * vhost ++ ++ * Port addition/deletion, Jumbo frames, RSS ++ ++ ++* Nvidia(R) Testing ++ ++ * Basic functionality with testpmd ++ ++ * Tx/Rx ++ * xstats ++ * Timestamps ++ * Link status ++ * RTE flow and flow_director ++ * RSS ++ * VLAN stripping and insertion ++ * Checksum/TSO ++ * ptype ++ * link_status_interrupt example application ++ * l3fwd-power example application ++ * Multi-process example applications ++ ++ * Build tests ++ ++ * Ubuntu 20.04.1 with MLNX_OFED_LINUX-5.2-2.2.0.0. ++ * Ubuntu 20.04.1 with rdma-core master (7f2d460). ++ * Ubuntu 20.04.1 with rdma-core v28.0. ++ * Ubuntu 18.04.5 with rdma-core v17.1. ++ * Ubuntu 18.04.5 with rdma-core master (7f2d460) (i386). ++ * Ubuntu 16.04.7 with rdma-core v22.7. ++ * Fedora 32 with rdma-core v33.0. ++ * CentOS 7 7.9.2009 with rdma-core master (7f2d460). ++ * CentOS 7 7.9.2009 with MLNX_OFED_LINUX-5.2-2.2.0.0. ++ * CentOS 8 8.3.2011 with rdma-core master (7f2d460). ++ * openSUSE Leap 15.2 with rdma-core v27.1. ++ ++ * ConnectX-5 ++ ++ * RHEL 7.4 ++ * Driver MLNX_OFED_LINUX-5.2-2.2.0.0 ++ * Kernel: 5.12.0-rc1 / Driver: rdma-core 34.0 ++ * fw 14.29.2002 ++ ++ * ConnectX-4 Lx ++ ++ * RHEL 7.4 ++ * Driver MLNX_OFED_LINUX-5.2-2.2.0.0 ++ * Kernel: 5.12.0-rc1 / Driver: rdma-core 34.0 ++ * fw 16.29.2002 ++ ++20.11.1 Known Issues ++~~~~~~~~~~~~~~~~~~~~ ++ ++* ICE ++ ++ * creating 512 acl rules after creating a full mask switch rule fails. ++ ++* vhost/virtio ++ ++ * udp-fragmentation-offload cannot be setup on Ubuntu 19.10 VMs. ++ https://bugzilla.kernel.org/show_bug.cgi?id=207075 ++ * vm2vm virtio-net connectivity between two vms randomly fails due ++ to lost connection after vhost reconnect. +diff --git a/dpdk/doc/guides/sample_app_ug/eventdev_pipeline.rst b/dpdk/doc/guides/sample_app_ug/eventdev_pipeline.rst +index 4508c3dcc8..19ff53803e 100644 +--- a/dpdk/doc/guides/sample_app_ug/eventdev_pipeline.rst ++++ b/dpdk/doc/guides/sample_app_ug/eventdev_pipeline.rst +@@ -34,6 +34,7 @@ options. + An example eventdev pipeline running with the software eventdev PMD using + these settings is shown below: + ++ * ``-l 0,2,8-15``: lcore to use + * ``-r1``: core mask 0x1 for RX + * ``-t1``: core mask 0x1 for TX + * ``-e4``: core mask 0x4 for the software scheduler +@@ -46,8 +47,8 @@ these settings is shown below: + + .. code-block:: console + +- .//examples/dpdk-eventdev_pipeline --vdev event_sw0 -- -r1 -t1 \ +- -e4 -w FF00 -s4 -n0 -c32 -W1000 -D ++ .//examples/dpdk-eventdev_pipeline -l 0,2,8-15 --vdev event_sw0 \ ++ -- -r1 -t1 -e4 -w FF00 -s4 -n0 -c32 -W1000 -D + + The application has some sanity checking built-in, so if there is a function + (e.g.; the RX core) which doesn't have a cpu core mask assigned, the application +diff --git a/dpdk/doc/guides/testpmd_app_ug/testpmd_funcs.rst b/dpdk/doc/guides/testpmd_app_ug/testpmd_funcs.rst +index 9be450066e..6a00245fc8 100644 +--- a/dpdk/doc/guides/testpmd_app_ug/testpmd_funcs.rst ++++ b/dpdk/doc/guides/testpmd_app_ug/testpmd_funcs.rst +@@ -4426,14 +4426,14 @@ Sample QinQ flow rules + Before creating QinQ rule(s) the following commands should be issued to enable QinQ:: + + testpmd> port stop 0 +- testpmd> vlan set qinq_strip on 0 ++ testpmd> vlan set extend on 0 + + The above command sets the inner and outer TPID's to 0x8100. + + To change the TPID's the following commands should be used:: + +- testpmd> vlan set outer tpid 0xa100 0 +- testpmd> vlan set inner tpid 0x9100 0 ++ testpmd> vlan set outer tpid 0x88A8 0 ++ testpmd> vlan set inner tpid 0x8100 0 + testpmd> port start 0 + + Validate and create a QinQ rule on port 0 to steer traffic to a VF queue in a VM. +diff --git a/dpdk/drivers/bus/pci/windows/pci.c b/dpdk/drivers/bus/pci/windows/pci.c +index b450346bdc..f662584528 100644 +--- a/dpdk/drivers/bus/pci/windows/pci.c ++++ b/dpdk/drivers/bus/pci/windows/pci.c +@@ -10,8 +10,9 @@ + #include "pci_netuio.h" + + #include ++#include + +-#ifdef RTE_TOOLCHAIN_GCC ++#if defined RTE_TOOLCHAIN_GCC && (__MINGW64_VERSION_MAJOR < 8) + #include + DEFINE_DEVPROPKEY(DEVPKEY_Device_Numa_Node, 0x540b947e, 0x8b40, 0x45bc, + 0xa8, 0xa2, 0x6a, 0x0b, 0x89, 0x4c, 0xbd, 0xa2, 3); +@@ -234,6 +235,12 @@ get_device_resource_info(HDEVINFO dev_info, + &DEVPKEY_Device_Numa_Node, &property_type, + (BYTE *)&numa_node, sizeof(numa_node), NULL, 0); + if (!res) { ++ DWORD error = GetLastError(); ++ if (error == ERROR_NOT_FOUND) { ++ /* On older CPUs, NUMA is not bound to PCIe locality. */ ++ dev->device.numa_node = 0; ++ return ERROR_SUCCESS; ++ } + RTE_LOG_WIN32_ERR("SetupDiGetDevicePropertyW" + "(DEVPKEY_Device_Numa_Node)"); + return -1; +@@ -303,7 +310,7 @@ pci_scan_one(HDEVINFO dev_info, PSP_DEVINFO_DATA device_info_data) + { + struct rte_pci_device *dev; + int ret = -1; +- char pci_device_info[PATH_MAX]; ++ char pci_device_info[REGSTR_VAL_MAX_HCID_LEN]; + struct rte_pci_addr addr; + struct rte_pci_id pci_id; + +@@ -314,7 +321,7 @@ pci_scan_one(HDEVINFO dev_info, PSP_DEVINFO_DATA device_info_data) + memset(dev, 0, sizeof(*dev)); + + ret = get_pci_hardware_id(dev_info, device_info_data, +- pci_device_info, PATH_MAX); ++ pci_device_info, sizeof(pci_device_info)); + if (ret != 0) + goto end; + +diff --git a/dpdk/drivers/bus/pci/windows/pci_netuio.c b/dpdk/drivers/bus/pci/windows/pci_netuio.c +index 6701948392..1bf9133f71 100644 +--- a/dpdk/drivers/bus/pci/windows/pci_netuio.c ++++ b/dpdk/drivers/bus/pci/windows/pci_netuio.c +@@ -7,6 +7,12 @@ + #include + #include + ++#ifdef __MINGW32__ ++#include ++#else ++#include ++#endif ++ + #include "private.h" + #include "pci_netuio.h" + +diff --git a/dpdk/drivers/bus/pci/windows/pci_netuio.h b/dpdk/drivers/bus/pci/windows/pci_netuio.h +index 9a77806b57..2f6c97ea73 100644 +--- a/dpdk/drivers/bus/pci/windows/pci_netuio.h ++++ b/dpdk/drivers/bus/pci/windows/pci_netuio.h +@@ -5,6 +5,7 @@ + #ifndef _PCI_NETUIO_H_ + #define _PCI_NETUIO_H_ + ++#if !defined(NTDDI_WIN10_FE) || NTDDI_VERSION < NTDDI_WIN10_FE + /* GUID definition for device class netUIO */ + DEFINE_GUID(GUID_DEVCLASS_NETUIO, 0x78912bc1, 0xcb8e, 0x4b28, + 0xa3, 0x29, 0xf3, 0x22, 0xeb, 0xad, 0xbe, 0x0f); +@@ -12,6 +13,7 @@ DEFINE_GUID(GUID_DEVCLASS_NETUIO, 0x78912bc1, 0xcb8e, 0x4b28, + /* GUID definition for the netuio device interface */ + DEFINE_GUID(GUID_DEVINTERFACE_NETUIO, 0x08336f60, 0x0679, 0x4c6c, + 0x85, 0xd2, 0xae, 0x7c, 0xed, 0x65, 0xff, 0xf7); ++#endif + + /* IOCTL code definitions */ + #define IOCTL_NETUIO_MAP_HW_INTO_USERSPACE \ +diff --git a/dpdk/drivers/common/mlx5/linux/meson.build b/dpdk/drivers/common/mlx5/linux/meson.build +index 63b78e4bce..fa9686fdaf 100644 +--- a/dpdk/drivers/common/mlx5/linux/meson.build ++++ b/dpdk/drivers/common/mlx5/linux/meson.build +@@ -19,7 +19,8 @@ endif + libnames = [ 'mlx5', 'ibverbs' ] + libs = [] + foreach libname:libnames +- lib = dependency('lib' + libname, static:static_ibverbs, required:false) ++ lib = dependency('lib' + libname, static:static_ibverbs, ++ required:false, method: 'pkg-config') + if not lib.found() and not static_ibverbs + lib = cc.find_library(libname, required:false) + endif +diff --git a/dpdk/drivers/common/mlx5/linux/mlx5_nl.c b/dpdk/drivers/common/mlx5/linux/mlx5_nl.c +index 40d8620300..ef7a521379 100644 +--- a/dpdk/drivers/common/mlx5/linux/mlx5_nl.c ++++ b/dpdk/drivers/common/mlx5/linux/mlx5_nl.c +@@ -758,11 +758,21 @@ mlx5_nl_mac_addr_sync(int nlsk_fd, unsigned int iface_idx, + break; + if (j != n) + continue; +- /* Find the first entry available. */ +- for (j = 0; j != n; ++j) { +- if (rte_is_zero_ether_addr(&mac_addrs[j])) { +- mac_addrs[j] = macs[i]; +- break; ++ if (rte_is_multicast_ether_addr(&macs[i])) { ++ /* Find the first entry available. */ ++ for (j = MLX5_MAX_UC_MAC_ADDRESSES; j != n; ++j) { ++ if (rte_is_zero_ether_addr(&mac_addrs[j])) { ++ mac_addrs[j] = macs[i]; ++ break; ++ } ++ } ++ } else { ++ /* Find the first entry available. */ ++ for (j = 0; j != MLX5_MAX_UC_MAC_ADDRESSES; ++j) { ++ if (rte_is_zero_ether_addr(&mac_addrs[j])) { ++ mac_addrs[j] = macs[i]; ++ break; ++ } + } + } + } +diff --git a/dpdk/drivers/common/mlx5/mlx5_devx_cmds.c b/dpdk/drivers/common/mlx5/mlx5_devx_cmds.c +index 9c1d1883ea..eafee65f22 100644 +--- a/dpdk/drivers/common/mlx5/mlx5_devx_cmds.c ++++ b/dpdk/drivers/common/mlx5/mlx5_devx_cmds.c +@@ -720,6 +720,11 @@ mlx5_devx_cmd_query_hca_attr(void *ctx, + attr->flow_hit_aso = !!(MLX5_GET64(cmd_hca_cap, hcattr, + general_obj_types) & + MLX5_GENERAL_OBJ_TYPES_CAP_FLOW_HIT_ASO); ++ attr->cqe_compression = MLX5_GET(cmd_hca_cap, hcattr, cqe_compression); ++ attr->mini_cqe_resp_flow_tag = MLX5_GET(cmd_hca_cap, hcattr, ++ mini_cqe_resp_flow_tag); ++ attr->mini_cqe_resp_l3_l4_tag = MLX5_GET(cmd_hca_cap, hcattr, ++ mini_cqe_resp_l3_l4_tag); + if (attr->qos.sup) { + MLX5_SET(query_hca_cap_in, in, op_mod, + MLX5_GET_HCA_CAP_OP_MOD_QOS_CAP | +@@ -1558,7 +1563,8 @@ mlx5_devx_cmd_create_cq(void *ctx, struct mlx5_devx_cq_attr *attr) + } else { + MLX5_SET64(cqc, cqctx, dbr_addr, attr->db_addr); + } +- MLX5_SET(cqc, cqctx, cqe_sz, attr->cqe_size); ++ MLX5_SET(cqc, cqctx, cqe_sz, (RTE_CACHE_LINE_SIZE == 128) ? ++ MLX5_CQE_SIZE_128B : MLX5_CQE_SIZE_64B); + MLX5_SET(cqc, cqctx, cc, attr->use_first_only); + MLX5_SET(cqc, cqctx, oi, attr->overrun_ignore); + MLX5_SET(cqc, cqctx, log_cq_size, attr->log_cq_size); +@@ -1571,7 +1577,6 @@ mlx5_devx_cmd_create_cq(void *ctx, struct mlx5_devx_cq_attr *attr) + attr->mini_cqe_res_format); + MLX5_SET(cqc, cqctx, mini_cqe_res_format_ext, + attr->mini_cqe_res_format_ext); +- MLX5_SET(cqc, cqctx, cqe_sz, attr->cqe_size); + if (attr->q_umem_valid) { + MLX5_SET(create_cq_in, in, cq_umem_valid, attr->q_umem_valid); + MLX5_SET(create_cq_in, in, cq_umem_id, attr->q_umem_id); +diff --git a/dpdk/drivers/common/mlx5/mlx5_devx_cmds.h b/dpdk/drivers/common/mlx5/mlx5_devx_cmds.h +index 726e9f5192..78202eba9d 100644 +--- a/dpdk/drivers/common/mlx5/mlx5_devx_cmds.h ++++ b/dpdk/drivers/common/mlx5/mlx5_devx_cmds.h +@@ -115,6 +115,9 @@ struct mlx5_hca_attr { + uint32_t regex:1; + uint32_t regexp_num_of_engines; + uint32_t log_max_ft_sampler_num:8; ++ uint32_t cqe_compression:1; ++ uint32_t mini_cqe_resp_flow_tag:1; ++ uint32_t mini_cqe_resp_l3_l4_tag:1; + struct mlx5_hca_qos_attr qos; + struct mlx5_hca_vdpa_attr vdpa; + }; +@@ -267,7 +270,6 @@ struct mlx5_devx_cq_attr { + uint32_t cqe_comp_en:1; + uint32_t mini_cqe_res_format:2; + uint32_t mini_cqe_res_format_ext:2; +- uint32_t cqe_size:3; + uint32_t log_cq_size:5; + uint32_t log_page_size:5; + uint32_t uar_page_id; +diff --git a/dpdk/drivers/common/mlx5/mlx5_prm.h b/dpdk/drivers/common/mlx5/mlx5_prm.h +index 58d180486e..00b425ac85 100644 +--- a/dpdk/drivers/common/mlx5/mlx5_prm.h ++++ b/dpdk/drivers/common/mlx5/mlx5_prm.h +@@ -600,7 +600,7 @@ typedef uint8_t u8; + + #define __mlx5_nullp(typ) ((struct mlx5_ifc_##typ##_bits *)0) + #define __mlx5_bit_sz(typ, fld) sizeof(__mlx5_nullp(typ)->fld) +-#define __mlx5_bit_off(typ, fld) ((unsigned int)(unsigned long) \ ++#define __mlx5_bit_off(typ, fld) ((unsigned int)(uintptr_t) \ + (&(__mlx5_nullp(typ)->fld))) + #define __mlx5_dw_bit_off(typ, fld) (32 - __mlx5_bit_sz(typ, fld) - \ + (__mlx5_bit_off(typ, fld) & 0x1f)) +@@ -1364,7 +1364,10 @@ struct mlx5_ifc_cmd_hca_cap_bits { + u8 num_of_uars_per_page[0x20]; + u8 flex_parser_protocols[0x20]; + u8 reserved_at_560[0x20]; +- u8 reserved_at_580[0x3c]; ++ u8 reserved_at_580[0x39]; ++ u8 mini_cqe_resp_l3_l4_tag[0x1]; ++ u8 mini_cqe_resp_flow_tag[0x1]; ++ u8 enhanced_cqe_compression[0x1]; + u8 mini_cqe_resp_stride_index[0x1]; + u8 cqe_128_always[0x1]; + u8 cqe_compression_128[0x1]; +diff --git a/dpdk/drivers/common/octeontx2/otx2_io_arm64.h b/dpdk/drivers/common/octeontx2/otx2_io_arm64.h +index b5c85d9a6e..34268e3af3 100644 +--- a/dpdk/drivers/common/octeontx2/otx2_io_arm64.h ++++ b/dpdk/drivers/common/octeontx2/otx2_io_arm64.h +@@ -21,6 +21,12 @@ + #define otx2_prefetch_store_keep(ptr) ({\ + asm volatile("prfm pstl1keep, [%x0]\n" : : "r" (ptr)); }) + ++#if defined(__ARM_FEATURE_SVE) ++#define __LSE_PREAMBLE " .cpu generic+lse+sve\n" ++#else ++#define __LSE_PREAMBLE " .cpu generic+lse\n" ++#endif ++ + static __rte_always_inline uint64_t + otx2_atomic64_add_nosync(int64_t incr, int64_t *ptr) + { +@@ -28,7 +34,7 @@ otx2_atomic64_add_nosync(int64_t incr, int64_t *ptr) + + /* Atomic add with no ordering */ + asm volatile ( +- ".cpu generic+lse\n" ++ __LSE_PREAMBLE + "ldadd %x[i], %x[r], [%[b]]" + : [r] "=r" (result), "+m" (*ptr) + : [i] "r" (incr), [b] "r" (ptr) +@@ -43,7 +49,7 @@ otx2_atomic64_add_sync(int64_t incr, int64_t *ptr) + + /* Atomic add with ordering */ + asm volatile ( +- ".cpu generic+lse\n" ++ __LSE_PREAMBLE + "ldadda %x[i], %x[r], [%[b]]" + : [r] "=r" (result), "+m" (*ptr) + : [i] "r" (incr), [b] "r" (ptr) +@@ -57,7 +63,7 @@ otx2_lmt_submit(rte_iova_t io_address) + uint64_t result; + + asm volatile ( +- ".cpu generic+lse\n" ++ __LSE_PREAMBLE + "ldeor xzr,%x[rf],[%[rs]]" : + [rf] "=r"(result): [rs] "r"(io_address)); + return result; +@@ -69,7 +75,7 @@ otx2_lmt_submit_release(rte_iova_t io_address) + uint64_t result; + + asm volatile ( +- ".cpu generic+lse\n" ++ __LSE_PREAMBLE + "ldeorl xzr,%x[rf],[%[rs]]" : + [rf] "=r"(result) : [rs] "r"(io_address)); + return result; +@@ -104,4 +110,5 @@ otx2_lmt_mov_seg(void *out, const void *in, const uint16_t segdw) + dst128[i] = src128[i]; + } + ++#undef __LSE_PREAMBLE + #endif /* _OTX2_IO_ARM64_H_ */ +diff --git a/dpdk/drivers/common/qat/meson.build b/dpdk/drivers/common/qat/meson.build +index 29e1299f20..b2915c91fe 100644 +--- a/dpdk/drivers/common/qat/meson.build ++++ b/dpdk/drivers/common/qat/meson.build +@@ -23,7 +23,7 @@ if disabled_drivers.contains(qat_compress_path) + 'Explicitly disabled via build config') + endif + +-libcrypto = dependency('libcrypto', required: false) ++libcrypto = dependency('libcrypto', required: false, method: 'pkg-config') + if qat_crypto and not libcrypto.found() + qat_crypto = false + dpdk_drvs_disabled += qat_crypto_path +diff --git a/dpdk/drivers/common/sfc_efx/base/ef10_nic.c b/dpdk/drivers/common/sfc_efx/base/ef10_nic.c +index 68414d9fa9..9dccde9576 100644 +--- a/dpdk/drivers/common/sfc_efx/base/ef10_nic.c ++++ b/dpdk/drivers/common/sfc_efx/base/ef10_nic.c +@@ -1423,11 +1423,19 @@ ef10_get_datapath_caps( + + #if EFSYS_OPT_MAE + /* +- * Indicate support for MAE. +- * MAE is supported by Riverhead boards starting with R2, +- * and it is required that FW is built with MAE support, too. ++ * Check support for EF100 Match Action Engine (MAE). ++ * MAE hardware is present on Riverhead boards (from R2), ++ * and on Keystone, and requires support in firmware. ++ * ++ * MAE control operations require MAE control privilege, ++ * which is not available for VFs. ++ * ++ * Privileges can change dynamically at runtime: we assume ++ * MAE support requires the privilege is granted initially, ++ * and ignore later dynamic changes. + */ +- if (CAP_FLAGS3(req, MAE_SUPPORTED)) ++ if (CAP_FLAGS3(req, MAE_SUPPORTED) && ++ EFX_MCDI_HAVE_PRIVILEGE(encp->enc_privilege_mask, MAE)) + encp->enc_mae_supported = B_TRUE; + else + encp->enc_mae_supported = B_FALSE; +@@ -1896,6 +1904,18 @@ efx_mcdi_nic_board_cfg( + + EFX_MAC_ADDR_COPY(encp->enc_mac_addr, mac_addr); + ++ /* ++ * Get the current privilege mask. Note that this may be modified ++ * dynamically, so for most cases the value is informational only. ++ * If the privilege being discovered can't be granted dynamically, ++ * it's fine to rely on the value. In all other cases, DO NOT use ++ * the privilege mask to check for sufficient privileges, as that ++ * can result in time-of-check/time-of-use bugs. ++ */ ++ if ((rc = ef10_get_privilege_mask(enp, &mask)) != 0) ++ goto fail6; ++ encp->enc_privilege_mask = mask; ++ + /* Board configuration (legacy) */ + rc = efx_mcdi_get_board_cfg(enp, &board_type, NULL, NULL); + if (rc != 0) { +@@ -1903,14 +1923,14 @@ efx_mcdi_nic_board_cfg( + if (rc == EACCES) + board_type = 0; + else +- goto fail6; ++ goto fail7; + } + + encp->enc_board_type = board_type; + + /* Fill out fields in enp->en_port and enp->en_nic_cfg from MCDI */ + if ((rc = efx_mcdi_get_phy_cfg(enp)) != 0) +- goto fail7; ++ goto fail8; + + /* + * Firmware with support for *_FEC capability bits does not +@@ -1929,18 +1949,18 @@ efx_mcdi_nic_board_cfg( + + /* Obtain the default PHY advertised capabilities */ + if ((rc = ef10_phy_get_link(enp, &els)) != 0) +- goto fail8; ++ goto fail9; + epp->ep_default_adv_cap_mask = els.epls.epls_adv_cap_mask; + epp->ep_adv_cap_mask = els.epls.epls_adv_cap_mask; + + /* Check capabilities of running datapath firmware */ + if ((rc = ef10_get_datapath_caps(enp)) != 0) +- goto fail9; ++ goto fail10; + + /* Get interrupt vector limits */ + if ((rc = efx_mcdi_get_vector_cfg(enp, &base, &nvec, NULL)) != 0) { + if (EFX_PCI_FUNCTION_IS_PF(encp)) +- goto fail10; ++ goto fail11; + + /* Ignore error (cannot query vector limits from a VF). */ + base = 0; +@@ -1949,16 +1969,6 @@ efx_mcdi_nic_board_cfg( + encp->enc_intr_vec_base = base; + encp->enc_intr_limit = nvec; + +- /* +- * Get the current privilege mask. Note that this may be modified +- * dynamically, so this value is informational only. DO NOT use +- * the privilege mask to check for sufficient privileges, as that +- * can result in time-of-check/time-of-use bugs. +- */ +- if ((rc = ef10_get_privilege_mask(enp, &mask)) != 0) +- goto fail11; +- encp->enc_privilege_mask = mask; +- + return (0); + + fail11: +diff --git a/dpdk/drivers/common/sfc_efx/base/efx.h b/dpdk/drivers/common/sfc_efx/base/efx.h +index 3b40e28b4e..ccf9c7ab8a 100644 +--- a/dpdk/drivers/common/sfc_efx/base/efx.h ++++ b/dpdk/drivers/common/sfc_efx/base/efx.h +@@ -4283,6 +4283,11 @@ efx_mae_action_set_specs_equal( + * Conduct a comparison to check whether two match specifications + * of equal rule type (action / outer) and priority would map to + * the very same rule class from the firmware's standpoint. ++ * ++ * For match specification fields that are not supported by firmware, ++ * the rule class only matches if the mask/value pairs for that field ++ * are equal. Clients should use efx_mae_match_spec_is_valid() before ++ * calling this API to detect usage of unsupported fields. + */ + LIBEFX_API + extern __checkReturn efx_rc_t +diff --git a/dpdk/drivers/common/sfc_efx/base/efx_mae.c b/dpdk/drivers/common/sfc_efx/base/efx_mae.c +index ee0a3d3196..338a0013f9 100644 +--- a/dpdk/drivers/common/sfc_efx/base/efx_mae.c ++++ b/dpdk/drivers/common/sfc_efx/base/efx_mae.c +@@ -463,6 +463,10 @@ typedef enum efx_mae_field_endianness_e { + * The information in it is meant to be used internally by + * APIs for addressing a given field in a mask-value pairs + * structure and for validation purposes. ++ * ++ * A field may have an alternative one. This structure ++ * has additional members to reference the alternative ++ * field's mask. See efx_mae_match_spec_is_valid(). + */ + typedef struct efx_mae_mv_desc_s { + efx_mae_field_cap_id_t emmd_field_cap_id; +@@ -472,6 +476,14 @@ typedef struct efx_mae_mv_desc_s { + size_t emmd_mask_size; + size_t emmd_mask_offset; + ++ /* ++ * Having the alternative field's mask size set to 0 ++ * means that there's no alternative field specified. ++ */ ++ size_t emmd_alt_mask_size; ++ size_t emmd_alt_mask_offset; ++ ++ /* Primary field and the alternative one are of the same endianness. */ + efx_mae_field_endianness_t emmd_endianness; + } efx_mae_mv_desc_t; + +@@ -485,6 +497,7 @@ static const efx_mae_mv_desc_t __efx_mae_action_rule_mv_desc_set[] = { + MAE_FIELD_MASK_VALUE_PAIRS_##_name##_OFST, \ + MAE_FIELD_MASK_VALUE_PAIRS_##_name##_MASK_LEN, \ + MAE_FIELD_MASK_VALUE_PAIRS_##_name##_MASK_OFST, \ ++ 0, 0 /* no alternative field */, \ + _endianness \ + } + +@@ -522,6 +535,21 @@ static const efx_mae_mv_desc_t __efx_mae_outer_rule_mv_desc_set[] = { + MAE_ENC_FIELD_PAIRS_##_name##_OFST, \ + MAE_ENC_FIELD_PAIRS_##_name##_MASK_LEN, \ + MAE_ENC_FIELD_PAIRS_##_name##_MASK_OFST, \ ++ 0, 0 /* no alternative field */, \ ++ _endianness \ ++ } ++ ++/* Same as EFX_MAE_MV_DESC(), but also indicates an alternative field. */ ++#define EFX_MAE_MV_DESC_ALT(_name, _alt_name, _endianness) \ ++ [EFX_MAE_FIELD_##_name] = \ ++ { \ ++ EFX_MAE_FIELD_ID_##_name, \ ++ MAE_ENC_FIELD_PAIRS_##_name##_LEN, \ ++ MAE_ENC_FIELD_PAIRS_##_name##_OFST, \ ++ MAE_ENC_FIELD_PAIRS_##_name##_MASK_LEN, \ ++ MAE_ENC_FIELD_PAIRS_##_name##_MASK_OFST, \ ++ MAE_ENC_FIELD_PAIRS_##_alt_name##_MASK_LEN, \ ++ MAE_ENC_FIELD_PAIRS_##_alt_name##_MASK_OFST, \ + _endianness \ + } + +@@ -533,16 +561,17 @@ static const efx_mae_mv_desc_t __efx_mae_outer_rule_mv_desc_set[] = { + EFX_MAE_MV_DESC(ENC_VLAN0_PROTO_BE, EFX_MAE_FIELD_BE), + EFX_MAE_MV_DESC(ENC_VLAN1_TCI_BE, EFX_MAE_FIELD_BE), + EFX_MAE_MV_DESC(ENC_VLAN1_PROTO_BE, EFX_MAE_FIELD_BE), +- EFX_MAE_MV_DESC(ENC_SRC_IP4_BE, EFX_MAE_FIELD_BE), +- EFX_MAE_MV_DESC(ENC_DST_IP4_BE, EFX_MAE_FIELD_BE), ++ EFX_MAE_MV_DESC_ALT(ENC_SRC_IP4_BE, ENC_SRC_IP6_BE, EFX_MAE_FIELD_BE), ++ EFX_MAE_MV_DESC_ALT(ENC_DST_IP4_BE, ENC_DST_IP6_BE, EFX_MAE_FIELD_BE), + EFX_MAE_MV_DESC(ENC_IP_PROTO, EFX_MAE_FIELD_BE), + EFX_MAE_MV_DESC(ENC_IP_TOS, EFX_MAE_FIELD_BE), + EFX_MAE_MV_DESC(ENC_IP_TTL, EFX_MAE_FIELD_BE), +- EFX_MAE_MV_DESC(ENC_SRC_IP6_BE, EFX_MAE_FIELD_BE), +- EFX_MAE_MV_DESC(ENC_DST_IP6_BE, EFX_MAE_FIELD_BE), ++ EFX_MAE_MV_DESC_ALT(ENC_SRC_IP6_BE, ENC_SRC_IP4_BE, EFX_MAE_FIELD_BE), ++ EFX_MAE_MV_DESC_ALT(ENC_DST_IP6_BE, ENC_DST_IP4_BE, EFX_MAE_FIELD_BE), + EFX_MAE_MV_DESC(ENC_L4_SPORT_BE, EFX_MAE_FIELD_BE), + EFX_MAE_MV_DESC(ENC_L4_DPORT_BE, EFX_MAE_FIELD_BE), + ++#undef EFX_MAE_MV_DESC_ALT + #undef EFX_MAE_MV_DESC + }; + +@@ -564,7 +593,13 @@ efx_mae_mport_by_phy_port( + MAE_MPORT_SELECTOR_PPORT_ID, phy_port); + + memset(mportp, 0, sizeof (*mportp)); +- mportp->sel = dword.ed_u32[0]; ++ /* ++ * The constructed DWORD is little-endian, ++ * but the resulting value is meant to be ++ * passed to MCDIs, where it will undergo ++ * host-order to little endian conversion. ++ */ ++ mportp->sel = EFX_DWORD_FIELD(dword, EFX_DWORD_0); + + return (0); + +@@ -601,7 +636,13 @@ efx_mae_mport_by_pcie_function( + MAE_MPORT_SELECTOR_FUNC_VF_ID, vf); + + memset(mportp, 0, sizeof (*mportp)); +- mportp->sel = dword.ed_u32[0]; ++ /* ++ * The constructed DWORD is little-endian, ++ * but the resulting value is meant to be ++ * passed to MCDIs, where it will undergo ++ * host-order to little endian conversion. ++ */ ++ mportp->sel = EFX_DWORD_FIELD(dword, EFX_DWORD_0); + + return (0); + +@@ -644,28 +685,54 @@ efx_mae_match_spec_field_set( + goto fail1; + } + +- if (field_id >= desc_set_nentries) { ++ if ((unsigned int)field_id >= desc_set_nentries) { + rc = EINVAL; + goto fail2; + } + +- if (value_size != descp->emmd_value_size) { ++ if (descp->emmd_mask_size == 0) { ++ /* The ID points to a gap in the array of field descriptors. */ + rc = EINVAL; + goto fail3; + } + +- if (mask_size != descp->emmd_mask_size) { ++ if (value_size != descp->emmd_value_size) { + rc = EINVAL; + goto fail4; + } + ++ if (mask_size != descp->emmd_mask_size) { ++ rc = EINVAL; ++ goto fail5; ++ } ++ + if (descp->emmd_endianness == EFX_MAE_FIELD_BE) { ++ unsigned int i; ++ + /* + * The mask/value are in network (big endian) order. + * The MCDI request field is also big endian. + */ +- memcpy(mvp + descp->emmd_value_offset, value, value_size); +- memcpy(mvp + descp->emmd_mask_offset, mask, mask_size); ++ ++ EFSYS_ASSERT3U(value_size, ==, mask_size); ++ ++ for (i = 0; i < value_size; ++i) { ++ uint8_t *v_bytep = mvp + descp->emmd_value_offset + i; ++ uint8_t *m_bytep = mvp + descp->emmd_mask_offset + i; ++ ++ /* ++ * Apply the mask (which may be all-zeros) to the value. ++ * ++ * If this API is provided with some value to set for a ++ * given field in one specification and with some other ++ * value to set for this field in another specification, ++ * then, if the two masks are all-zeros, the field will ++ * avoid being counted as a mismatch when comparing the ++ * specifications using efx_mae_match_specs_equal() API. ++ */ ++ *v_bytep = value[i] & mask[i]; ++ *m_bytep = mask[i]; ++ } + } else { + efx_dword_t dword; + +@@ -700,6 +767,8 @@ efx_mae_match_spec_field_set( + + return (0); + ++fail5: ++ EFSYS_PROBE(fail5); + fail4: + EFSYS_PROBE(fail4); + fail3: +@@ -760,7 +829,7 @@ efx_mae_match_specs_equal( + ((_mask)[(_bit) / (_mask_page_nbits)] & \ + (1ULL << ((_bit) & ((_mask_page_nbits) - 1)))) + +-static inline boolean_t ++static boolean_t + efx_mask_is_prefix( + __in size_t mask_nbytes, + __in_bcount(mask_nbytes) const uint8_t *maskp) +@@ -780,7 +849,7 @@ efx_mask_is_prefix( + return B_TRUE; + } + +-static inline boolean_t ++static boolean_t + efx_mask_is_all_ones( + __in size_t mask_nbytes, + __in_bcount(mask_nbytes) const uint8_t *maskp) +@@ -794,7 +863,7 @@ efx_mask_is_all_ones( + return (t == (uint8_t)(~0)); + } + +-static inline boolean_t ++static boolean_t + efx_mask_is_all_zeros( + __in size_t mask_nbytes, + __in_bcount(mask_nbytes) const uint8_t *maskp) +@@ -844,17 +913,29 @@ efx_mae_match_spec_is_valid( + if (field_caps == NULL) + return (B_FALSE); + +- for (field_id = 0; field_id < desc_set_nentries; ++field_id) { ++ for (field_id = 0; (unsigned int)field_id < desc_set_nentries; ++ ++field_id) { + const efx_mae_mv_desc_t *descp = &desc_setp[field_id]; + efx_mae_field_cap_id_t field_cap_id = descp->emmd_field_cap_id; ++ const uint8_t *alt_m_buf = mvp + descp->emmd_alt_mask_offset; + const uint8_t *m_buf = mvp + descp->emmd_mask_offset; ++ size_t alt_m_size = descp->emmd_alt_mask_size; + size_t m_size = descp->emmd_mask_size; + + if (m_size == 0) + continue; /* Skip array gap */ + +- if (field_cap_id >= field_ncaps) +- break; ++ if ((unsigned int)field_cap_id >= field_ncaps) { ++ /* ++ * The FW has not reported capability status for ++ * this field. Make sure that its mask is zeroed. ++ */ ++ is_valid = efx_mask_is_all_zeros(m_size, m_buf); ++ if (is_valid != B_FALSE) ++ continue; ++ else ++ break; ++ } + + switch (field_caps[field_cap_id].emfc_support) { + case MAE_FIELD_SUPPORTED_MATCH_MASK: +@@ -869,6 +950,19 @@ efx_mae_match_spec_is_valid( + break; + case MAE_FIELD_SUPPORTED_MATCH_ALWAYS: + is_valid = efx_mask_is_all_ones(m_size, m_buf); ++ ++ if ((is_valid == B_FALSE) && (alt_m_size != 0)) { ++ /* ++ * This field has an alternative one. The FW ++ * reports ALWAYS for both implying that one ++ * of them is required to have all-ones mask. ++ * ++ * The primary field's mask is incorrect; go ++ * on to check that of the alternative field. ++ */ ++ is_valid = efx_mask_is_all_ones(alt_m_size, ++ alt_m_buf); ++ } + break; + case MAE_FIELD_SUPPORTED_MATCH_NEVER: + case MAE_FIELD_UNSUPPORTED: +@@ -1274,7 +1368,13 @@ efx_mae_action_set_populate_drop( + EFX_POPULATE_DWORD_1(dword, + MAE_MPORT_SELECTOR_FLAT, MAE_MPORT_SELECTOR_NULL); + +- mport.sel = dword.ed_u32[0]; ++ /* ++ * The constructed DWORD is little-endian, ++ * but the resulting value is meant to be ++ * passed to MCDIs, where it will undergo ++ * host-order to little endian conversion. ++ */ ++ mport.sel = EFX_DWORD_FIELD(dword, EFX_DWORD_0); + + arg = (const uint8_t *)&mport.sel; + +@@ -1350,21 +1450,36 @@ efx_mae_match_specs_class_cmp( + return (0); + } + +- for (field_id = 0; field_id < desc_set_nentries; ++field_id) { ++ for (field_id = 0; (unsigned int)field_id < desc_set_nentries; ++ ++field_id) { + const efx_mae_mv_desc_t *descp = &desc_setp[field_id]; + efx_mae_field_cap_id_t field_cap_id = descp->emmd_field_cap_id; +- +- if (descp->emmd_mask_size == 0) ++ const uint8_t *lmaskp = mvpl + descp->emmd_mask_offset; ++ const uint8_t *rmaskp = mvpr + descp->emmd_mask_offset; ++ size_t mask_size = descp->emmd_mask_size; ++ const uint8_t *lvalp = mvpl + descp->emmd_value_offset; ++ const uint8_t *rvalp = mvpr + descp->emmd_value_offset; ++ size_t value_size = descp->emmd_value_size; ++ ++ if (mask_size == 0) + continue; /* Skip array gap */ + +- if (field_cap_id >= field_ncaps) +- break; ++ if ((unsigned int)field_cap_id >= field_ncaps) { ++ /* ++ * The FW has not reported capability status for this ++ * field. It's unknown whether any difference between ++ * the two masks / values affects the class. The only ++ * case when the class must be the same is when these ++ * mask-value pairs match. Otherwise, report mismatch. ++ */ ++ if ((memcmp(lmaskp, rmaskp, mask_size) == 0) && ++ (memcmp(lvalp, rvalp, value_size) == 0)) ++ continue; ++ else ++ break; ++ } + + if (field_caps[field_cap_id].emfc_mask_affects_class) { +- const uint8_t *lmaskp = mvpl + descp->emmd_mask_offset; +- const uint8_t *rmaskp = mvpr + descp->emmd_mask_offset; +- size_t mask_size = descp->emmd_mask_size; +- + if (memcmp(lmaskp, rmaskp, mask_size) != 0) { + have_same_class = B_FALSE; + break; +@@ -1372,10 +1487,6 @@ efx_mae_match_specs_class_cmp( + } + + if (field_caps[field_cap_id].emfc_match_affects_class) { +- const uint8_t *lvalp = mvpl + descp->emmd_value_offset; +- const uint8_t *rvalp = mvpr + descp->emmd_value_offset; +- size_t value_size = descp->emmd_value_size; +- + if (memcmp(lvalp, rvalp, value_size) != 0) { + have_same_class = B_FALSE; + break; +diff --git a/dpdk/drivers/common/sfc_efx/base/efx_regs_mcdi.h b/dpdk/drivers/common/sfc_efx/base/efx_regs_mcdi.h +index 0388acf723..689a491d05 100644 +--- a/dpdk/drivers/common/sfc_efx/base/efx_regs_mcdi.h ++++ b/dpdk/drivers/common/sfc_efx/base/efx_regs_mcdi.h +@@ -20349,6 +20349,8 @@ + * SF-117064-DG for background). + */ + #define MC_CMD_PRIVILEGE_MASK_IN_GRP_ADMIN_TSA_UNBOUND 0x8000 ++/* enum: Control the Match-Action Engine if present. See mcdi_mae.yml. */ ++#define MC_CMD_PRIVILEGE_MASK_IN_GRP_MAE 0x10000 + /* enum: Set this bit to indicate that a new privilege mask is to be set, + * otherwise the command will only read the existing mask. + */ +@@ -26823,7 +26825,7 @@ + #define MC_CMD_MAE_GET_AR_CAPS 0x141 + #undef MC_CMD_0x141_PRIVILEGE_CTG + +-#define MC_CMD_0x141_PRIVILEGE_CTG SRIOV_CTG_GENERAL ++#define MC_CMD_0x141_PRIVILEGE_CTG SRIOV_CTG_MAE + + /* MC_CMD_MAE_GET_AR_CAPS_IN msgrequest */ + #define MC_CMD_MAE_GET_AR_CAPS_IN_LEN 0 +@@ -26855,7 +26857,7 @@ + #define MC_CMD_MAE_GET_OR_CAPS 0x142 + #undef MC_CMD_0x142_PRIVILEGE_CTG + +-#define MC_CMD_0x142_PRIVILEGE_CTG SRIOV_CTG_GENERAL ++#define MC_CMD_0x142_PRIVILEGE_CTG SRIOV_CTG_MAE + + /* MC_CMD_MAE_GET_OR_CAPS_IN msgrequest */ + #define MC_CMD_MAE_GET_OR_CAPS_IN_LEN 0 +@@ -26885,7 +26887,7 @@ + #define MC_CMD_MAE_COUNTER_ALLOC 0x143 + #undef MC_CMD_0x143_PRIVILEGE_CTG + +-#define MC_CMD_0x143_PRIVILEGE_CTG SRIOV_CTG_GENERAL ++#define MC_CMD_0x143_PRIVILEGE_CTG SRIOV_CTG_MAE + + /* MC_CMD_MAE_COUNTER_ALLOC_IN msgrequest */ + #define MC_CMD_MAE_COUNTER_ALLOC_IN_LEN 4 +@@ -26928,7 +26930,7 @@ + #define MC_CMD_MAE_COUNTER_FREE 0x144 + #undef MC_CMD_0x144_PRIVILEGE_CTG + +-#define MC_CMD_0x144_PRIVILEGE_CTG SRIOV_CTG_GENERAL ++#define MC_CMD_0x144_PRIVILEGE_CTG SRIOV_CTG_MAE + + /* MC_CMD_MAE_COUNTER_FREE_IN msgrequest */ + #define MC_CMD_MAE_COUNTER_FREE_IN_LENMIN 8 +@@ -26993,6 +26995,9 @@ + * delivering packets to the current queue first. + */ + #define MC_CMD_MAE_COUNTERS_STREAM_START 0x151 ++#undef MC_CMD_0x151_PRIVILEGE_CTG ++ ++#define MC_CMD_0x151_PRIVILEGE_CTG SRIOV_CTG_MAE + + /* MC_CMD_MAE_COUNTERS_STREAM_START_IN msgrequest */ + #define MC_CMD_MAE_COUNTERS_STREAM_START_IN_LEN 8 +@@ -27026,6 +27031,9 @@ + * Stop streaming counter values to the specified RxQ. + */ + #define MC_CMD_MAE_COUNTERS_STREAM_STOP 0x152 ++#undef MC_CMD_0x152_PRIVILEGE_CTG ++ ++#define MC_CMD_0x152_PRIVILEGE_CTG SRIOV_CTG_MAE + + /* MC_CMD_MAE_COUNTERS_STREAM_STOP_IN msgrequest */ + #define MC_CMD_MAE_COUNTERS_STREAM_STOP_IN_LEN 2 +@@ -27052,6 +27060,9 @@ + * MAE_COUNTERS_PACKETISER_STREAM_START/PACKET_SIZE and rung the doorbell. + */ + #define MC_CMD_MAE_COUNTERS_STREAM_GIVE_CREDITS 0x153 ++#undef MC_CMD_0x153_PRIVILEGE_CTG ++ ++#define MC_CMD_0x153_PRIVILEGE_CTG SRIOV_CTG_MAE + + /* MC_CMD_MAE_COUNTERS_STREAM_GIVE_CREDITS_IN msgrequest */ + #define MC_CMD_MAE_COUNTERS_STREAM_GIVE_CREDITS_IN_LEN 4 +@@ -27070,7 +27081,7 @@ + #define MC_CMD_MAE_ENCAP_HEADER_ALLOC 0x148 + #undef MC_CMD_0x148_PRIVILEGE_CTG + +-#define MC_CMD_0x148_PRIVILEGE_CTG SRIOV_CTG_GENERAL ++#define MC_CMD_0x148_PRIVILEGE_CTG SRIOV_CTG_MAE + + /* MC_CMD_MAE_ENCAP_HEADER_ALLOC_IN msgrequest */ + #define MC_CMD_MAE_ENCAP_HEADER_ALLOC_IN_LENMIN 4 +@@ -27103,7 +27114,7 @@ + #define MC_CMD_MAE_ENCAP_HEADER_UPDATE 0x149 + #undef MC_CMD_0x149_PRIVILEGE_CTG + +-#define MC_CMD_0x149_PRIVILEGE_CTG SRIOV_CTG_GENERAL ++#define MC_CMD_0x149_PRIVILEGE_CTG SRIOV_CTG_MAE + + /* MC_CMD_MAE_ENCAP_HEADER_UPDATE_IN msgrequest */ + #define MC_CMD_MAE_ENCAP_HEADER_UPDATE_IN_LENMIN 8 +@@ -27132,7 +27143,7 @@ + #define MC_CMD_MAE_ENCAP_HEADER_FREE 0x14a + #undef MC_CMD_0x14a_PRIVILEGE_CTG + +-#define MC_CMD_0x14a_PRIVILEGE_CTG SRIOV_CTG_GENERAL ++#define MC_CMD_0x14a_PRIVILEGE_CTG SRIOV_CTG_MAE + + /* MC_CMD_MAE_ENCAP_HEADER_FREE_IN msgrequest */ + #define MC_CMD_MAE_ENCAP_HEADER_FREE_IN_LENMIN 4 +@@ -27170,7 +27181,7 @@ + #define MC_CMD_MAE_MAC_ADDR_ALLOC 0x15e + #undef MC_CMD_0x15e_PRIVILEGE_CTG + +-#define MC_CMD_0x15e_PRIVILEGE_CTG SRIOV_CTG_GENERAL ++#define MC_CMD_0x15e_PRIVILEGE_CTG SRIOV_CTG_MAE + + /* MC_CMD_MAE_MAC_ADDR_ALLOC_IN msgrequest */ + #define MC_CMD_MAE_MAC_ADDR_ALLOC_IN_LEN 6 +@@ -27195,7 +27206,7 @@ + #define MC_CMD_MAE_MAC_ADDR_FREE 0x15f + #undef MC_CMD_0x15f_PRIVILEGE_CTG + +-#define MC_CMD_0x15f_PRIVILEGE_CTG SRIOV_CTG_GENERAL ++#define MC_CMD_0x15f_PRIVILEGE_CTG SRIOV_CTG_MAE + + /* MC_CMD_MAE_MAC_ADDR_FREE_IN msgrequest */ + #define MC_CMD_MAE_MAC_ADDR_FREE_IN_LENMIN 4 +@@ -27232,7 +27243,7 @@ + #define MC_CMD_MAE_ACTION_SET_ALLOC 0x14d + #undef MC_CMD_0x14d_PRIVILEGE_CTG + +-#define MC_CMD_0x14d_PRIVILEGE_CTG SRIOV_CTG_GENERAL ++#define MC_CMD_0x14d_PRIVILEGE_CTG SRIOV_CTG_MAE + + /* MC_CMD_MAE_ACTION_SET_ALLOC_IN msgrequest */ + #define MC_CMD_MAE_ACTION_SET_ALLOC_IN_LEN 44 +@@ -27317,7 +27328,7 @@ + #define MC_CMD_MAE_ACTION_SET_FREE 0x14e + #undef MC_CMD_0x14e_PRIVILEGE_CTG + +-#define MC_CMD_0x14e_PRIVILEGE_CTG SRIOV_CTG_GENERAL ++#define MC_CMD_0x14e_PRIVILEGE_CTG SRIOV_CTG_MAE + + /* MC_CMD_MAE_ACTION_SET_FREE_IN msgrequest */ + #define MC_CMD_MAE_ACTION_SET_FREE_IN_LENMIN 4 +@@ -27355,7 +27366,7 @@ + #define MC_CMD_MAE_ACTION_SET_LIST_ALLOC 0x14f + #undef MC_CMD_0x14f_PRIVILEGE_CTG + +-#define MC_CMD_0x14f_PRIVILEGE_CTG SRIOV_CTG_GENERAL ++#define MC_CMD_0x14f_PRIVILEGE_CTG SRIOV_CTG_MAE + + /* MC_CMD_MAE_ACTION_SET_LIST_ALLOC_IN msgrequest */ + #define MC_CMD_MAE_ACTION_SET_LIST_ALLOC_IN_LENMIN 8 +@@ -27398,7 +27409,7 @@ + #define MC_CMD_MAE_ACTION_SET_LIST_FREE 0x150 + #undef MC_CMD_0x150_PRIVILEGE_CTG + +-#define MC_CMD_0x150_PRIVILEGE_CTG SRIOV_CTG_GENERAL ++#define MC_CMD_0x150_PRIVILEGE_CTG SRIOV_CTG_MAE + + /* MC_CMD_MAE_ACTION_SET_LIST_FREE_IN msgrequest */ + #define MC_CMD_MAE_ACTION_SET_LIST_FREE_IN_LENMIN 4 +@@ -27435,7 +27446,7 @@ + #define MC_CMD_MAE_OUTER_RULE_INSERT 0x15a + #undef MC_CMD_0x15a_PRIVILEGE_CTG + +-#define MC_CMD_0x15a_PRIVILEGE_CTG SRIOV_CTG_ADMIN ++#define MC_CMD_0x15a_PRIVILEGE_CTG SRIOV_CTG_MAE + + /* MC_CMD_MAE_OUTER_RULE_INSERT_IN msgrequest */ + #define MC_CMD_MAE_OUTER_RULE_INSERT_IN_LENMIN 16 +@@ -27495,7 +27506,7 @@ + #define MC_CMD_MAE_OUTER_RULE_REMOVE 0x15b + #undef MC_CMD_0x15b_PRIVILEGE_CTG + +-#define MC_CMD_0x15b_PRIVILEGE_CTG SRIOV_CTG_ADMIN ++#define MC_CMD_0x15b_PRIVILEGE_CTG SRIOV_CTG_MAE + + /* MC_CMD_MAE_OUTER_RULE_REMOVE_IN msgrequest */ + #define MC_CMD_MAE_OUTER_RULE_REMOVE_IN_LENMIN 4 +@@ -27577,7 +27588,7 @@ + #define MC_CMD_MAE_ACTION_RULE_INSERT 0x15c + #undef MC_CMD_0x15c_PRIVILEGE_CTG + +-#define MC_CMD_0x15c_PRIVILEGE_CTG SRIOV_CTG_GENERAL ++#define MC_CMD_0x15c_PRIVILEGE_CTG SRIOV_CTG_MAE + + /* MC_CMD_MAE_ACTION_RULE_INSERT_IN msgrequest */ + #define MC_CMD_MAE_ACTION_RULE_INSERT_IN_LENMIN 28 +@@ -27618,7 +27629,7 @@ + #define MC_CMD_MAE_ACTION_RULE_UPDATE 0x15d + #undef MC_CMD_0x15d_PRIVILEGE_CTG + +-#define MC_CMD_0x15d_PRIVILEGE_CTG SRIOV_CTG_GENERAL ++#define MC_CMD_0x15d_PRIVILEGE_CTG SRIOV_CTG_MAE + + /* MC_CMD_MAE_ACTION_RULE_UPDATE_IN msgrequest */ + #define MC_CMD_MAE_ACTION_RULE_UPDATE_IN_LEN 24 +@@ -27639,7 +27650,7 @@ + #define MC_CMD_MAE_ACTION_RULE_DELETE 0x155 + #undef MC_CMD_0x155_PRIVILEGE_CTG + +-#define MC_CMD_0x155_PRIVILEGE_CTG SRIOV_CTG_GENERAL ++#define MC_CMD_0x155_PRIVILEGE_CTG SRIOV_CTG_MAE + + /* MC_CMD_MAE_ACTION_RULE_DELETE_IN msgrequest */ + #define MC_CMD_MAE_ACTION_RULE_DELETE_IN_LENMIN 4 +@@ -27696,7 +27707,7 @@ + #define MC_CMD_MAE_MPORT_ALLOC 0x163 + #undef MC_CMD_0x163_PRIVILEGE_CTG + +-#define MC_CMD_0x163_PRIVILEGE_CTG SRIOV_CTG_GENERAL ++#define MC_CMD_0x163_PRIVILEGE_CTG SRIOV_CTG_MAE + + /* MC_CMD_MAE_MPORT_ALLOC_IN msgrequest */ + #define MC_CMD_MAE_MPORT_ALLOC_IN_LEN 20 +@@ -27803,7 +27814,7 @@ + #define MC_CMD_MAE_MPORT_FREE 0x164 + #undef MC_CMD_0x164_PRIVILEGE_CTG + +-#define MC_CMD_0x164_PRIVILEGE_CTG SRIOV_CTG_GENERAL ++#define MC_CMD_0x164_PRIVILEGE_CTG SRIOV_CTG_MAE + + /* MC_CMD_MAE_MPORT_FREE_IN msgrequest */ + #define MC_CMD_MAE_MPORT_FREE_IN_LEN 4 +@@ -27907,6 +27918,9 @@ + /* MC_CMD_MAE_MPORT_ENUMERATE + */ + #define MC_CMD_MAE_MPORT_ENUMERATE 0x17c ++#undef MC_CMD_0x17c_PRIVILEGE_CTG ++ ++#define MC_CMD_0x17c_PRIVILEGE_CTG SRIOV_CTG_GENERAL + + /* MC_CMD_MAE_MPORT_ENUMERATE_IN msgrequest */ + #define MC_CMD_MAE_MPORT_ENUMERATE_IN_LEN 0 +diff --git a/dpdk/drivers/compress/isal/meson.build b/dpdk/drivers/compress/isal/meson.build +index 5ee17e28f5..d847c2ea6f 100644 +--- a/dpdk/drivers/compress/isal/meson.build ++++ b/dpdk/drivers/compress/isal/meson.build +@@ -1,7 +1,7 @@ + # SPDX-License-Identifier: BSD-3-Clause + # Copyright 2018 Intel Corporation + +-dep = dependency('libisal', required: false) ++dep = dependency('libisal', required: false, method: 'pkg-config') + if not dep.found() + build = false + reason = 'missing dependency, "libisal"' +diff --git a/dpdk/drivers/compress/zlib/meson.build b/dpdk/drivers/compress/zlib/meson.build +index b19a6d2b16..82cf0dddd6 100644 +--- a/dpdk/drivers/compress/zlib/meson.build ++++ b/dpdk/drivers/compress/zlib/meson.build +@@ -1,7 +1,7 @@ + # SPDX-License-Identifier: BSD-3-Clause + # Copyright(c) 2018 Cavium Networks + +-dep = dependency('zlib', required: false) ++dep = dependency('zlib', required: false, method: 'pkg-config') + if not dep.found() + build = false + reason = 'missing dependency, "zlib"' +diff --git a/dpdk/drivers/crypto/armv8/meson.build b/dpdk/drivers/crypto/armv8/meson.build +index 3289a2adca..027173bc1e 100644 +--- a/dpdk/drivers/crypto/armv8/meson.build ++++ b/dpdk/drivers/crypto/armv8/meson.build +@@ -1,7 +1,7 @@ + # SPDX-License-Identifier: BSD-3-Clause + # Copyright(c) 2019 Arm Limited + +-dep = dependency('libAArch64crypto', required: false) ++dep = dependency('libAArch64crypto', required: false, method: 'pkg-config') + if not dep.found() + build = false + reason = 'missing dependency, "libAArch64crypto"' +diff --git a/dpdk/drivers/crypto/ccp/meson.build b/dpdk/drivers/crypto/ccp/meson.build +index a0e0b379eb..ff66427ae8 100644 +--- a/dpdk/drivers/crypto/ccp/meson.build ++++ b/dpdk/drivers/crypto/ccp/meson.build +@@ -5,7 +5,7 @@ if not is_linux + build = false + reason = 'only supported on Linux' + endif +-dep = dependency('libcrypto', required: false) ++dep = dependency('libcrypto', required: false, method: 'pkg-config') + if not dep.found() + build = false + reason = 'missing dependency, "libcrypto"' +diff --git a/dpdk/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c b/dpdk/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c +index 6ff0d833e9..5d91bf910e 100644 +--- a/dpdk/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c ++++ b/dpdk/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c +@@ -1842,7 +1842,7 @@ dpaa2_sec_cipher_init(struct rte_cryptodev *dev, + session->ctxt_type = DPAA2_SEC_CIPHER; + session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length, + RTE_CACHE_LINE_SIZE); +- if (session->cipher_key.data == NULL) { ++ if (session->cipher_key.data == NULL && xform->cipher.key.length > 0) { + DPAA2_SEC_ERR("No Memory for cipher key"); + rte_free(priv); + return -ENOMEM; +diff --git a/dpdk/drivers/crypto/openssl/meson.build b/dpdk/drivers/crypto/openssl/meson.build +index d9ac698971..47fb2bb751 100644 +--- a/dpdk/drivers/crypto/openssl/meson.build ++++ b/dpdk/drivers/crypto/openssl/meson.build +@@ -1,7 +1,7 @@ + # SPDX-License-Identifier: BSD-3-Clause + # Copyright(c) 2017 Intel Corporation + +-dep = dependency('libcrypto', required: false) ++dep = dependency('libcrypto', required: false, method: 'pkg-config') + if not dep.found() + build = false + reason = 'missing dependency, "libcrypto"' +diff --git a/dpdk/drivers/crypto/qat/meson.build b/dpdk/drivers/crypto/qat/meson.build +index bc90ec44cc..92e0ed6565 100644 +--- a/dpdk/drivers/crypto/qat/meson.build ++++ b/dpdk/drivers/crypto/qat/meson.build +@@ -5,7 +5,7 @@ + # driver which comes later. Here we just add our sources files to the list + build = false + reason = '' # sentinal value to suppress printout +-dep = dependency('libcrypto', required: false) ++dep = dependency('libcrypto', required: false, method: 'pkg-config') + qat_includes += include_directories('.') + qat_deps += 'cryptodev' + qat_deps += 'net' +diff --git a/dpdk/drivers/crypto/qat/qat_asym_pmd.c b/dpdk/drivers/crypto/qat/qat_asym_pmd.c +index ed8a2a50b4..a2c8aca2c1 100644 +--- a/dpdk/drivers/crypto/qat/qat_asym_pmd.c ++++ b/dpdk/drivers/crypto/qat/qat_asym_pmd.c +@@ -251,6 +251,10 @@ qat_asym_dev_create(struct qat_pci_device *qat_pci_dev, + struct rte_cryptodev *cryptodev; + struct qat_asym_dev_private *internals; + ++ snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN, "%s_%s", ++ qat_pci_dev->name, "asym"); ++ QAT_LOG(DEBUG, "Creating QAT ASYM device %s\n", name); ++ + if (rte_eal_process_type() == RTE_PROC_PRIMARY) { + qat_pci_dev->qat_asym_driver_id = + qat_asym_driver_id; +@@ -264,10 +268,6 @@ qat_asym_dev_create(struct qat_pci_device *qat_pci_dev, + } + } + +- snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN, "%s_%s", +- qat_pci_dev->name, "asym"); +- QAT_LOG(DEBUG, "Creating QAT ASYM device %s\n", name); +- + /* Populate subset device to use in cryptodev device creation */ + qat_dev_instance->asym_rte_dev.driver = &cryptodev_qat_asym_driver; + qat_dev_instance->asym_rte_dev.numa_node = +diff --git a/dpdk/drivers/crypto/qat/qat_sym_hw_dp.c b/dpdk/drivers/crypto/qat/qat_sym_hw_dp.c +index dfbbad59b6..01afb883e3 100644 +--- a/dpdk/drivers/crypto/qat/qat_sym_hw_dp.c ++++ b/dpdk/drivers/crypto/qat/qat_sym_hw_dp.c +@@ -558,55 +558,6 @@ enqueue_one_chain_job(struct qat_sym_session *ctx, + case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9: + case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3: + auth_param->u1.aad_adr = auth_iv->iova; +- +- if (unlikely(n_data_vecs > 1)) { +- int auth_end_get = 0, i = n_data_vecs - 1; +- struct rte_crypto_vec *cvec = &data[0]; +- uint32_t len; +- +- len = data_len - ofs.ofs.auth.tail; +- +- while (i >= 0 && len > 0) { +- if (cvec->len >= len) { +- auth_iova_end = cvec->iova + +- (cvec->len - len); +- len = 0; +- auth_end_get = 1; +- break; +- } +- len -= cvec->len; +- i--; +- cvec++; +- } +- +- if (unlikely(auth_end_get == 0)) +- return -1; +- } else +- auth_iova_end = data[0].iova + auth_param->auth_off + +- auth_param->auth_len; +- +- /* Then check if digest-encrypted conditions are met */ +- if ((auth_param->auth_off + auth_param->auth_len < +- cipher_param->cipher_offset + +- cipher_param->cipher_length) && +- (digest->iova == auth_iova_end)) { +- /* Handle partial digest encryption */ +- if (cipher_param->cipher_offset + +- cipher_param->cipher_length < +- auth_param->auth_off + +- auth_param->auth_len + +- ctx->digest_length) +- req->comn_mid.dst_length = +- req->comn_mid.src_length = +- auth_param->auth_off + +- auth_param->auth_len + +- ctx->digest_length; +- struct icp_qat_fw_comn_req_hdr *header = +- &req->comn_hdr; +- ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET( +- header->serv_specif_flags, +- ICP_QAT_FW_LA_DIGEST_IN_BUFFER); +- } + break; + case ICP_QAT_HW_AUTH_ALGO_GALOIS_128: + case ICP_QAT_HW_AUTH_ALGO_GALOIS_64: +@@ -615,6 +566,54 @@ enqueue_one_chain_job(struct qat_sym_session *ctx, + break; + } + ++ if (unlikely(n_data_vecs > 1)) { ++ int auth_end_get = 0, i = n_data_vecs - 1; ++ struct rte_crypto_vec *cvec = &data[0]; ++ uint32_t len; ++ ++ len = data_len - ofs.ofs.auth.tail; ++ ++ while (i >= 0 && len > 0) { ++ if (cvec->len >= len) { ++ auth_iova_end = cvec->iova + len; ++ len = 0; ++ auth_end_get = 1; ++ break; ++ } ++ len -= cvec->len; ++ i--; ++ cvec++; ++ } ++ ++ if (unlikely(auth_end_get == 0)) ++ return -1; ++ } else ++ auth_iova_end = data[0].iova + auth_param->auth_off + ++ auth_param->auth_len; ++ ++ /* Then check if digest-encrypted conditions are met */ ++ if ((auth_param->auth_off + auth_param->auth_len < ++ cipher_param->cipher_offset + ++ cipher_param->cipher_length) && ++ (digest->iova == auth_iova_end)) { ++ /* Handle partial digest encryption */ ++ if (cipher_param->cipher_offset + ++ cipher_param->cipher_length < ++ auth_param->auth_off + ++ auth_param->auth_len + ++ ctx->digest_length) ++ req->comn_mid.dst_length = ++ req->comn_mid.src_length = ++ auth_param->auth_off + ++ auth_param->auth_len + ++ ctx->digest_length; ++ struct icp_qat_fw_comn_req_hdr *header = ++ &req->comn_hdr; ++ ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET( ++ header->serv_specif_flags, ++ ICP_QAT_FW_LA_DIGEST_IN_BUFFER); ++ } ++ + return 0; + } + +diff --git a/dpdk/drivers/crypto/qat/qat_sym_pmd.c b/dpdk/drivers/crypto/qat/qat_sym_pmd.c +index 6da9512fe4..93666fdade 100644 +--- a/dpdk/drivers/crypto/qat/qat_sym_pmd.c ++++ b/dpdk/drivers/crypto/qat/qat_sym_pmd.c +@@ -330,6 +330,10 @@ qat_sym_dev_create(struct qat_pci_device *qat_pci_dev, + const struct rte_cryptodev_capabilities *capabilities; + uint64_t capa_size; + ++ snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN, "%s_%s", ++ qat_pci_dev->name, "sym"); ++ QAT_LOG(DEBUG, "Creating QAT SYM device %s", name); ++ + /* + * All processes must use same driver id so they can share sessions. + * Store driver_id so we can validate that all processes have the same +@@ -349,10 +353,6 @@ qat_sym_dev_create(struct qat_pci_device *qat_pci_dev, + } + } + +- snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN, "%s_%s", +- qat_pci_dev->name, "sym"); +- QAT_LOG(DEBUG, "Creating QAT SYM device %s", name); +- + /* Populate subset device to use in cryptodev device creation */ + qat_dev_instance->sym_rte_dev.driver = &cryptodev_qat_sym_driver; + qat_dev_instance->sym_rte_dev.numa_node = +diff --git a/dpdk/drivers/event/dlb/dlb.c b/dpdk/drivers/event/dlb/dlb.c +index 0c95c4793d..e2d5d43da7 100644 +--- a/dpdk/drivers/event/dlb/dlb.c ++++ b/dpdk/drivers/event/dlb/dlb.c +@@ -1847,7 +1847,7 @@ dlb_hw_create_dir_queue(struct dlb_eventdev *dlb, int32_t qm_port_id) + { + struct dlb_hw_dev *handle = &dlb->qm_instance; + struct dlb_create_dir_queue_args cfg; +- struct dlb_cmd_response response; ++ struct dlb_cmd_response response = {0}; + int32_t ret; + + cfg.response = (uintptr_t)&response; +@@ -3569,7 +3569,7 @@ dlb_get_ldb_queue_depth(struct dlb_eventdev *dlb, + { + struct dlb_hw_dev *handle = &dlb->qm_instance; + struct dlb_get_ldb_queue_depth_args cfg; +- struct dlb_cmd_response response; ++ struct dlb_cmd_response response = {0}; + int ret; + + cfg.queue_id = queue->qm_queue.id; +@@ -3591,7 +3591,7 @@ dlb_get_dir_queue_depth(struct dlb_eventdev *dlb, + { + struct dlb_hw_dev *handle = &dlb->qm_instance; + struct dlb_get_dir_queue_depth_args cfg; +- struct dlb_cmd_response response; ++ struct dlb_cmd_response response = {0}; + int ret; + + cfg.queue_id = queue->qm_queue.id; +diff --git a/dpdk/drivers/net/af_xdp/meson.build b/dpdk/drivers/net/af_xdp/meson.build +index fead8dd99f..ae7355c8c9 100644 +--- a/dpdk/drivers/net/af_xdp/meson.build ++++ b/dpdk/drivers/net/af_xdp/meson.build +@@ -3,14 +3,15 @@ + + sources = files('rte_eth_af_xdp.c') + +-bpf_dep = dependency('libbpf', required: false) ++bpf_dep = dependency('libbpf', required: false, method: 'pkg-config') + if not bpf_dep.found() + bpf_dep = cc.find_library('bpf', required: false) + endif + + if bpf_dep.found() and cc.has_header('bpf/xsk.h') and cc.has_header('linux/if_xdp.h') + ext_deps += bpf_dep +- bpf_ver_dep = dependency('libbpf', version : '>=0.2.0', required: false) ++ bpf_ver_dep = dependency('libbpf', version : '>=0.2.0', ++ required: false, method: 'pkg-config') + if bpf_ver_dep.found() + dpdk_conf.set('RTE_LIBRTE_AF_XDP_PMD_SHARED_UMEM', 1) + endif +diff --git a/dpdk/drivers/net/af_xdp/rte_eth_af_xdp.c b/dpdk/drivers/net/af_xdp/rte_eth_af_xdp.c +index 2c7892bd7e..7fc70df713 100644 +--- a/dpdk/drivers/net/af_xdp/rte_eth_af_xdp.c ++++ b/dpdk/drivers/net/af_xdp/rte_eth_af_xdp.c +@@ -840,7 +840,6 @@ xdp_umem_destroy(struct xsk_umem_info *umem) + #endif + + rte_free(umem); +- umem = NULL; + } + + static int +diff --git a/dpdk/drivers/net/avp/avp_ethdev.c b/dpdk/drivers/net/avp/avp_ethdev.c +index 5f8187b905..f531e03c02 100644 +--- a/dpdk/drivers/net/avp/avp_ethdev.c ++++ b/dpdk/drivers/net/avp/avp_ethdev.c +@@ -267,7 +267,7 @@ avp_dev_process_request(struct avp_dev *avp, struct rte_avp_request *request) + break; + } + +- if ((count < 1) && (retry == 0)) { ++ if (retry == 0) { + PMD_DRV_LOG(ERR, "Timeout while waiting for a response for %u\n", + request->req_id); + ret = -ETIME; +diff --git a/dpdk/drivers/net/axgbe/axgbe_ethdev.c b/dpdk/drivers/net/axgbe/axgbe_ethdev.c +index cfe6aba73a..9cd056d04a 100644 +--- a/dpdk/drivers/net/axgbe/axgbe_ethdev.c ++++ b/dpdk/drivers/net/axgbe/axgbe_ethdev.c +@@ -1439,7 +1439,7 @@ static int axgb_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) + dev->data->port_id); + return -EBUSY; + } +- if (frame_size > RTE_ETHER_MAX_LEN) { ++ if (frame_size > AXGBE_ETH_MAX_LEN) { + dev->data->dev_conf.rxmode.offloads |= + DEV_RX_OFFLOAD_JUMBO_FRAME; + val = 1; +diff --git a/dpdk/drivers/net/axgbe/axgbe_ethdev.h b/dpdk/drivers/net/axgbe/axgbe_ethdev.h +index 35a8476466..ac9210f2c8 100644 +--- a/dpdk/drivers/net/axgbe/axgbe_ethdev.h ++++ b/dpdk/drivers/net/axgbe/axgbe_ethdev.h +@@ -125,6 +125,12 @@ + /* MDIO port types */ + #define AXGMAC_MAX_C22_PORT 3 + ++/* The max frame size with default MTU */ ++#define AXGBE_ETH_MAX_LEN ( \ ++ RTE_ETHER_MTU + \ ++ RTE_ETHER_HDR_LEN + \ ++ RTE_ETHER_CRC_LEN) ++ + /* Helper macro for descriptor handling + * Always use AXGBE_GET_DESC_DATA to access the descriptor data + * since the index is free-running and needs to be and-ed +diff --git a/dpdk/drivers/net/bnx2x/meson.build b/dpdk/drivers/net/bnx2x/meson.build +index 4892bb234c..9801697949 100644 +--- a/dpdk/drivers/net/bnx2x/meson.build ++++ b/dpdk/drivers/net/bnx2x/meson.build +@@ -1,7 +1,7 @@ + # SPDX-License-Identifier: BSD-3-Clause + # Copyright(c) 2018 Intel Corporation + +-dep = dependency('zlib', required: false) ++dep = dependency('zlib', required: false, method: 'pkg-config') + build = dep.found() + reason = 'missing dependency, "zlib"' + ext_deps += dep +diff --git a/dpdk/drivers/net/bnxt/bnxt.h b/dpdk/drivers/net/bnxt/bnxt.h +index 90ced972c0..b912fd8564 100644 +--- a/dpdk/drivers/net/bnxt/bnxt.h ++++ b/dpdk/drivers/net/bnxt/bnxt.h +@@ -389,7 +389,7 @@ struct bnxt_coal { + #define DBR_TYPE_NQ (0xaULL << 60) + #define DBR_TYPE_NQ_ARM (0xbULL << 60) + +-#define BNXT_RSS_TBL_SIZE_THOR 512 ++#define BNXT_RSS_TBL_SIZE_THOR 512U + #define BNXT_RSS_ENTRIES_PER_CTX_THOR 64 + #define BNXT_MAX_RSS_CTXTS_THOR \ + (BNXT_RSS_TBL_SIZE_THOR / BNXT_RSS_ENTRIES_PER_CTX_THOR) +@@ -583,6 +583,7 @@ struct bnxt_rep_info { + DEV_RX_OFFLOAD_UDP_CKSUM | \ + DEV_RX_OFFLOAD_TCP_CKSUM | \ + DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | \ ++ DEV_RX_OFFLOAD_OUTER_UDP_CKSUM | \ + DEV_RX_OFFLOAD_JUMBO_FRAME | \ + DEV_RX_OFFLOAD_KEEP_CRC | \ + DEV_RX_OFFLOAD_VLAN_EXTEND | \ +@@ -752,19 +753,6 @@ struct bnxt { + uint16_t max_tx_rings; + uint16_t max_rx_rings; + #define MAX_STINGRAY_RINGS 128U +-/* For sake of symmetry, max Tx rings == max Rx rings, one stat ctx for each */ +-#define BNXT_MAX_RX_RINGS(bp) \ +- (BNXT_STINGRAY(bp) ? RTE_MIN(RTE_MIN(bp->max_rx_rings / 2U, \ +- MAX_STINGRAY_RINGS), \ +- bp->max_stat_ctx / 2U) : \ +- RTE_MIN(bp->max_rx_rings / 2U, \ +- bp->max_stat_ctx / 2U)) +-#define BNXT_MAX_TX_RINGS(bp) \ +- (RTE_MIN((bp)->max_tx_rings, BNXT_MAX_RX_RINGS(bp))) +- +-#define BNXT_MAX_RINGS(bp) \ +- (RTE_MIN((((bp)->max_cp_rings - BNXT_NUM_ASYNC_CPR(bp)) / 2U), \ +- BNXT_MAX_TX_RINGS(bp))) + + #define BNXT_MAX_VF_REP_RINGS 8 + +@@ -823,6 +811,34 @@ struct bnxt { + uint16_t tx_cfa_action; + }; + ++static ++inline uint16_t bnxt_max_rings(struct bnxt *bp) ++{ ++ uint16_t max_tx_rings = bp->max_tx_rings; ++ uint16_t max_rx_rings = bp->max_rx_rings; ++ uint16_t max_cp_rings = bp->max_cp_rings; ++ uint16_t max_rings; ++ ++ /* For the sake of symmetry: ++ * max Tx rings == max Rx rings, one stat ctx for each. ++ */ ++ if (BNXT_STINGRAY(bp)) { ++ max_rx_rings = RTE_MIN(RTE_MIN(max_rx_rings / 2U, ++ MAX_STINGRAY_RINGS), ++ bp->max_stat_ctx / 2U); ++ } else { ++ max_rx_rings = RTE_MIN(max_rx_rings / 2U, ++ bp->max_stat_ctx / 2U); ++ } ++ ++ max_tx_rings = RTE_MIN(max_tx_rings, max_rx_rings); ++ if (max_cp_rings > BNXT_NUM_ASYNC_CPR(bp)) ++ max_cp_rings -= BNXT_NUM_ASYNC_CPR(bp); ++ max_rings = RTE_MIN(max_cp_rings / 2U, max_tx_rings); ++ ++ return max_rings; ++} ++ + #define BNXT_FC_TIMER 1 /* Timer freq in Sec Flow Counters */ + + /** +diff --git a/dpdk/drivers/net/bnxt/bnxt_ethdev.c b/dpdk/drivers/net/bnxt/bnxt_ethdev.c +index 81c8f8d79d..3aa346d45c 100644 +--- a/dpdk/drivers/net/bnxt/bnxt_ethdev.c ++++ b/dpdk/drivers/net/bnxt/bnxt_ethdev.c +@@ -207,12 +207,15 @@ int is_bnxt_in_error(struct bnxt *bp) + + static uint16_t bnxt_rss_ctxts(const struct bnxt *bp) + { ++ unsigned int num_rss_rings = RTE_MIN(bp->rx_nr_rings, ++ BNXT_RSS_TBL_SIZE_THOR); ++ + if (!BNXT_CHIP_THOR(bp)) + return 1; + +- return RTE_ALIGN_MUL_CEIL(bp->rx_nr_rings, ++ return RTE_ALIGN_MUL_CEIL(num_rss_rings, + BNXT_RSS_ENTRIES_PER_CTX_THOR) / +- BNXT_RSS_ENTRIES_PER_CTX_THOR; ++ BNXT_RSS_ENTRIES_PER_CTX_THOR; + } + + uint16_t bnxt_rss_hash_tbl_size(const struct bnxt *bp) +@@ -424,6 +427,14 @@ static int bnxt_setup_one_vnic(struct bnxt *bp, uint16_t vnic_id) + if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS) { + int j, nr_ctxs = bnxt_rss_ctxts(bp); + ++ if (bp->rx_nr_rings > BNXT_RSS_TBL_SIZE_THOR) { ++ PMD_DRV_LOG(ERR, "RxQ cnt %d > reta_size %d\n", ++ bp->rx_nr_rings, BNXT_RSS_TBL_SIZE_THOR); ++ PMD_DRV_LOG(ERR, ++ "Only queues 0-%d will be in RSS table\n", ++ BNXT_RSS_TBL_SIZE_THOR - 1); ++ } ++ + rc = 0; + for (j = 0; j < nr_ctxs; j++) { + rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, j); +@@ -678,7 +689,7 @@ static int bnxt_update_phy_setting(struct bnxt *bp) + return rc; + } + +-static int bnxt_init_chip(struct bnxt *bp) ++static int bnxt_start_nic(struct bnxt *bp) + { + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(bp->eth_dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; +@@ -909,7 +920,7 @@ static int bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev, + if (BNXT_PF(bp)) + dev_info->max_vfs = pdev->max_vfs; + +- max_rx_rings = BNXT_MAX_RINGS(bp); ++ max_rx_rings = bnxt_max_rings(bp); + /* For the sake of symmetry, max_rx_queues = max_tx_queues */ + dev_info->max_rx_queues = max_rx_rings; + dev_info->max_tx_queues = max_rx_rings; +@@ -1060,13 +1071,6 @@ static int bnxt_dev_configure_op(struct rte_eth_dev *eth_dev) + } + + pthread_mutex_unlock(&bp->def_cp_lock); +- } else { +- /* legacy driver needs to get updated values */ +- rc = bnxt_hwrm_func_qcaps(bp); +- if (rc) { +- PMD_DRV_LOG(ERR, "hwrm func qcaps fail:%d\n", rc); +- return rc; +- } + } + + /* Inherit new configurations */ +@@ -1143,6 +1147,9 @@ static int bnxt_scattered_rx(struct rte_eth_dev *eth_dev) + if (eth_dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) + return 1; + ++ if (eth_dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_TCP_LRO) ++ return 1; ++ + for (i = 0; i < eth_dev->data->nb_rx_queues; i++) { + struct bnxt_rx_queue *rxq = eth_dev->data->rx_queues[i]; + +@@ -1175,6 +1182,7 @@ bnxt_receive_function(struct rte_eth_dev *eth_dev) + DEV_RX_OFFLOAD_UDP_CKSUM | + DEV_RX_OFFLOAD_TCP_CKSUM | + DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | ++ DEV_RX_OFFLOAD_OUTER_UDP_CKSUM | + DEV_RX_OFFLOAD_RSS_HASH | + DEV_RX_OFFLOAD_VLAN_FILTER)) && + !BNXT_TRUFLOW_EN(bp) && BNXT_NUM_ASYNC_CPR(bp) && +@@ -1248,81 +1256,6 @@ static int bnxt_handle_if_change_status(struct bnxt *bp) + return rc; + } + +-static int bnxt_dev_start_op(struct rte_eth_dev *eth_dev) +-{ +- struct bnxt *bp = eth_dev->data->dev_private; +- uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads; +- int vlan_mask = 0; +- int rc, retry_cnt = BNXT_IF_CHANGE_RETRY_COUNT; +- +- if (!eth_dev->data->nb_tx_queues || !eth_dev->data->nb_rx_queues) { +- PMD_DRV_LOG(ERR, "Queues are not configured yet!\n"); +- return -EINVAL; +- } +- +- if (bp->rx_cp_nr_rings > RTE_ETHDEV_QUEUE_STAT_CNTRS) { +- PMD_DRV_LOG(ERR, +- "RxQ cnt %d > RTE_ETHDEV_QUEUE_STAT_CNTRS %d\n", +- bp->rx_cp_nr_rings, RTE_ETHDEV_QUEUE_STAT_CNTRS); +- } +- +- do { +- rc = bnxt_hwrm_if_change(bp, true); +- if (rc == 0 || rc != -EAGAIN) +- break; +- +- rte_delay_ms(BNXT_IF_CHANGE_RETRY_INTERVAL); +- } while (retry_cnt--); +- +- if (rc) +- return rc; +- +- if (bp->flags & BNXT_FLAG_IF_CHANGE_HOT_FW_RESET_DONE) { +- rc = bnxt_handle_if_change_status(bp); +- if (rc) +- return rc; +- } +- +- bnxt_enable_int(bp); +- +- rc = bnxt_init_chip(bp); +- if (rc) +- goto error; +- +- eth_dev->data->scattered_rx = bnxt_scattered_rx(eth_dev); +- eth_dev->data->dev_started = 1; +- +- bnxt_link_update_op(eth_dev, 1); +- +- if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER) +- vlan_mask |= ETH_VLAN_FILTER_MASK; +- if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP) +- vlan_mask |= ETH_VLAN_STRIP_MASK; +- rc = bnxt_vlan_offload_set_op(eth_dev, vlan_mask); +- if (rc) +- goto error; +- +- /* Initialize bnxt ULP port details */ +- rc = bnxt_ulp_port_init(bp); +- if (rc) +- goto error; +- +- eth_dev->rx_pkt_burst = bnxt_receive_function(eth_dev); +- eth_dev->tx_pkt_burst = bnxt_transmit_function(eth_dev); +- +- bnxt_schedule_fw_health_check(bp); +- +- return 0; +- +-error: +- bnxt_shutdown_nic(bp); +- bnxt_free_tx_mbufs(bp); +- bnxt_free_rx_mbufs(bp); +- bnxt_hwrm_if_change(bp, false); +- eth_dev->data->dev_started = 0; +- return rc; +-} +- + static int bnxt_dev_set_link_up_op(struct rte_eth_dev *eth_dev) + { + struct bnxt *bp = eth_dev->data->dev_private; +@@ -1429,31 +1362,98 @@ static int bnxt_dev_stop_op(struct rte_eth_dev *eth_dev) + return 0; + } + +-static int bnxt_dev_close_op(struct rte_eth_dev *eth_dev) ++static int bnxt_dev_start_op(struct rte_eth_dev *eth_dev) + { + struct bnxt *bp = eth_dev->data->dev_private; +- int ret = 0; ++ uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads; ++ int vlan_mask = 0; ++ int rc, retry_cnt = BNXT_IF_CHANGE_RETRY_COUNT; + +- if (rte_eal_process_type() != RTE_PROC_PRIMARY) +- return 0; ++ if (!eth_dev->data->nb_tx_queues || !eth_dev->data->nb_rx_queues) { ++ PMD_DRV_LOG(ERR, "Queues are not configured yet!\n"); ++ return -EINVAL; ++ } + +- /* cancel the recovery handler before remove dev */ +- rte_eal_alarm_cancel(bnxt_dev_reset_and_resume, (void *)bp); +- rte_eal_alarm_cancel(bnxt_dev_recover, (void *)bp); +- bnxt_cancel_fc_thread(bp); ++ if (bp->rx_cp_nr_rings > RTE_ETHDEV_QUEUE_STAT_CNTRS) ++ PMD_DRV_LOG(ERR, ++ "RxQ cnt %d > RTE_ETHDEV_QUEUE_STAT_CNTRS %d\n", ++ bp->rx_cp_nr_rings, RTE_ETHDEV_QUEUE_STAT_CNTRS); + +- if (eth_dev->data->dev_started) +- ret = bnxt_dev_stop_op(eth_dev); ++ do { ++ rc = bnxt_hwrm_if_change(bp, true); ++ if (rc == 0 || rc != -EAGAIN) ++ break; + +- bnxt_free_switch_domain(bp); ++ rte_delay_ms(BNXT_IF_CHANGE_RETRY_INTERVAL); ++ } while (retry_cnt--); + +- bnxt_uninit_resources(bp, false); ++ if (rc) ++ return rc; ++ ++ if (bp->flags & BNXT_FLAG_IF_CHANGE_HOT_FW_RESET_DONE) { ++ rc = bnxt_handle_if_change_status(bp); ++ if (rc) ++ return rc; ++ } ++ ++ bnxt_enable_int(bp); ++ ++ eth_dev->data->scattered_rx = bnxt_scattered_rx(eth_dev); ++ ++ rc = bnxt_start_nic(bp); ++ if (rc) ++ goto error; ++ ++ eth_dev->data->dev_started = 1; ++ ++ bnxt_link_update_op(eth_dev, 1); ++ ++ if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER) ++ vlan_mask |= ETH_VLAN_FILTER_MASK; ++ if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP) ++ vlan_mask |= ETH_VLAN_STRIP_MASK; ++ rc = bnxt_vlan_offload_set_op(eth_dev, vlan_mask); ++ if (rc) ++ goto error; ++ ++ /* Initialize bnxt ULP port details */ ++ rc = bnxt_ulp_port_init(bp); ++ if (rc) ++ goto error; ++ ++ eth_dev->rx_pkt_burst = bnxt_receive_function(eth_dev); ++ eth_dev->tx_pkt_burst = bnxt_transmit_function(eth_dev); ++ ++ bnxt_schedule_fw_health_check(bp); ++ ++ return 0; ++ ++error: ++ bnxt_dev_stop_op(eth_dev); ++ return rc; ++} + ++static void ++bnxt_uninit_locks(struct bnxt *bp) ++{ ++ pthread_mutex_destroy(&bp->flow_lock); ++ pthread_mutex_destroy(&bp->def_cp_lock); ++ pthread_mutex_destroy(&bp->health_check_lock); ++ if (bp->rep_info) { ++ pthread_mutex_destroy(&bp->rep_info->vfr_lock); ++ pthread_mutex_destroy(&bp->rep_info->vfr_start_lock); ++ } ++} ++ ++static void bnxt_drv_uninit(struct bnxt *bp) ++{ ++ bnxt_free_switch_domain(bp); + bnxt_free_leds_info(bp); + bnxt_free_cos_queues(bp); + bnxt_free_link_info(bp); + bnxt_free_pf_info(bp); + bnxt_free_parent_info(bp); ++ bnxt_uninit_locks(bp); + + rte_memzone_free((const struct rte_memzone *)bp->tx_mem_zone); + bp->tx_mem_zone = NULL; +@@ -1464,6 +1464,27 @@ static int bnxt_dev_close_op(struct rte_eth_dev *eth_dev) + + rte_free(bp->grp_info); + bp->grp_info = NULL; ++} ++ ++static int bnxt_dev_close_op(struct rte_eth_dev *eth_dev) ++{ ++ struct bnxt *bp = eth_dev->data->dev_private; ++ int ret = 0; ++ ++ if (rte_eal_process_type() != RTE_PROC_PRIMARY) ++ return 0; ++ ++ /* cancel the recovery handler before remove dev */ ++ rte_eal_alarm_cancel(bnxt_dev_reset_and_resume, (void *)bp); ++ rte_eal_alarm_cancel(bnxt_dev_recover, (void *)bp); ++ bnxt_cancel_fc_thread(bp); ++ ++ if (eth_dev->data->dev_started) ++ ret = bnxt_dev_stop_op(eth_dev); ++ ++ bnxt_uninit_resources(bp, false); ++ ++ bnxt_drv_uninit(bp); + + return ret; + } +@@ -1832,8 +1853,8 @@ static int bnxt_reta_update_op(struct rte_eth_dev *eth_dev, + } + } + +- bnxt_hwrm_vnic_rss_cfg(bp, vnic); +- return 0; ++ rc = bnxt_hwrm_vnic_rss_cfg(bp, vnic); ++ return rc; + } + + static int bnxt_reta_query_op(struct rte_eth_dev *eth_dev, +@@ -1938,8 +1959,8 @@ static int bnxt_rss_hash_update_op(struct rte_eth_dev *eth_dev, + memcpy(vnic->rss_hash_key, rss_conf->rss_key, rss_conf->rss_key_len); + + rss_config: +- bnxt_hwrm_vnic_rss_cfg(bp, vnic); +- return 0; ++ rc = bnxt_hwrm_vnic_rss_cfg(bp, vnic); ++ return rc; + } + + static int bnxt_rss_hash_conf_get_op(struct rte_eth_dev *eth_dev, +@@ -4032,7 +4053,7 @@ bool bnxt_stratus_device(struct bnxt *bp) + } + } + +-static int bnxt_init_board(struct rte_eth_dev *eth_dev) ++static int bnxt_map_pci_bars(struct rte_eth_dev *eth_dev) + { + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); + struct bnxt *bp = eth_dev->data->dev_private; +@@ -4657,7 +4678,11 @@ static int bnxt_map_hcomm_fw_status_reg(struct bnxt *bp) + return 0; + } + +-static int bnxt_init_fw(struct bnxt *bp) ++/* This function gets the FW version along with the ++ * capabilities(MAX and current) of the function, vnic, ++ * error recovery, phy and other chip related info ++ */ ++static int bnxt_get_config(struct bnxt *bp) + { + uint16_t mtu; + int rc = 0; +@@ -4727,8 +4752,10 @@ bnxt_init_locks(struct bnxt *bp) + } + + err = pthread_mutex_init(&bp->def_cp_lock, NULL); +- if (err) ++ if (err) { + PMD_DRV_LOG(ERR, "Unable to initialize def_cp_lock\n"); ++ return err; ++ } + + err = pthread_mutex_init(&bp->health_check_lock, NULL); + if (err) +@@ -4740,7 +4767,7 @@ static int bnxt_init_resources(struct bnxt *bp, bool reconfig_dev) + { + int rc = 0; + +- rc = bnxt_init_fw(bp); ++ rc = bnxt_get_config(bp); + if (rc) + return rc; + +@@ -4797,10 +4824,6 @@ static int bnxt_init_resources(struct bnxt *bp, bool reconfig_dev) + return rc; + } + +- rc = bnxt_init_locks(bp); +- if (rc) +- return rc; +- + return 0; + } + +@@ -5191,38 +5214,14 @@ static int bnxt_alloc_switch_domain(struct bnxt *bp) + return rc; + } + +-static int +-bnxt_dev_init(struct rte_eth_dev *eth_dev, void *params __rte_unused) ++/* Allocate and initialize various fields in bnxt struct that ++ * need to be allocated/destroyed only once in the lifetime of the driver ++ */ ++static int bnxt_drv_init(struct rte_eth_dev *eth_dev) + { + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); +- static int version_printed; +- struct bnxt *bp; +- int rc; +- +- if (version_printed++ == 0) +- PMD_DRV_LOG(INFO, "%s\n", bnxt_version); +- +- eth_dev->dev_ops = &bnxt_dev_ops; +- eth_dev->rx_queue_count = bnxt_rx_queue_count_op; +- eth_dev->rx_descriptor_status = bnxt_rx_descriptor_status_op; +- eth_dev->tx_descriptor_status = bnxt_tx_descriptor_status_op; +- eth_dev->rx_pkt_burst = &bnxt_recv_pkts; +- eth_dev->tx_pkt_burst = &bnxt_xmit_pkts; +- +- /* +- * For secondary processes, we don't initialise any further +- * as primary has already done this work. +- */ +- if (rte_eal_process_type() != RTE_PROC_PRIMARY) +- return 0; +- +- rte_eth_copy_pci_info(eth_dev, pci_dev); +- eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS; +- +- bp = eth_dev->data->dev_private; +- +- /* Parse dev arguments passed on when starting the DPDK application. */ +- bnxt_parse_dev_args(bp, pci_dev->device.devargs); ++ struct bnxt *bp = eth_dev->data->dev_private; ++ int rc = 0; + + bp->flags &= ~BNXT_FLAG_RX_VECTOR_PKT_MODE; + +@@ -5254,7 +5253,7 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev, void *params __rte_unused) + } + } + +- rc = bnxt_init_board(eth_dev); ++ rc = bnxt_map_pci_bars(eth_dev); + if (rc) { + PMD_DRV_LOG(ERR, + "Failed to initialize board rc: %x\n", rc); +@@ -5263,27 +5262,75 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev, void *params __rte_unused) + + rc = bnxt_alloc_pf_info(bp); + if (rc) +- goto error_free; ++ return rc; + + rc = bnxt_alloc_link_info(bp); + if (rc) +- goto error_free; ++ return rc; + + rc = bnxt_alloc_parent_info(bp); + if (rc) +- goto error_free; ++ return rc; + + rc = bnxt_alloc_hwrm_resources(bp); + if (rc) { + PMD_DRV_LOG(ERR, + "Failed to allocate hwrm resource rc: %x\n", rc); +- goto error_free; ++ return rc; + } + rc = bnxt_alloc_leds_info(bp); + if (rc) +- goto error_free; ++ return rc; + + rc = bnxt_alloc_cos_queues(bp); ++ if (rc) ++ return rc; ++ ++ rc = bnxt_init_locks(bp); ++ if (rc) ++ return rc; ++ ++ rc = bnxt_alloc_switch_domain(bp); ++ if (rc) ++ return rc; ++ ++ return rc; ++} ++ ++static int ++bnxt_dev_init(struct rte_eth_dev *eth_dev, void *params __rte_unused) ++{ ++ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); ++ static int version_printed; ++ struct bnxt *bp; ++ int rc; ++ ++ if (version_printed++ == 0) ++ PMD_DRV_LOG(INFO, "%s\n", bnxt_version); ++ ++ eth_dev->dev_ops = &bnxt_dev_ops; ++ eth_dev->rx_queue_count = bnxt_rx_queue_count_op; ++ eth_dev->rx_descriptor_status = bnxt_rx_descriptor_status_op; ++ eth_dev->tx_descriptor_status = bnxt_tx_descriptor_status_op; ++ eth_dev->rx_pkt_burst = &bnxt_recv_pkts; ++ eth_dev->tx_pkt_burst = &bnxt_xmit_pkts; ++ ++ /* ++ * For secondary processes, we don't initialise any further ++ * as primary has already done this work. ++ */ ++ if (rte_eal_process_type() != RTE_PROC_PRIMARY) ++ return 0; ++ ++ rte_eth_copy_pci_info(eth_dev, pci_dev); ++ eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS; ++ ++ bp = eth_dev->data->dev_private; ++ ++ /* Parse dev arguments passed on when starting the DPDK application. */ ++ bnxt_parse_dev_args(bp, pci_dev->device.devargs); ++ ++ rc = bnxt_drv_init(eth_dev); + if (rc) + goto error_free; + +@@ -5295,8 +5342,6 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev, void *params __rte_unused) + if (rc) + goto error_free; + +- bnxt_alloc_switch_domain(bp); +- + PMD_DRV_LOG(INFO, + DRV_MODULE_NAME "found at mem %" PRIX64 ", node addr %pM\n", + pci_dev->mem_resource[0].phys_addr, +@@ -5378,18 +5423,6 @@ bnxt_free_error_recovery_info(struct bnxt *bp) + bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY; + } + +-static void +-bnxt_uninit_locks(struct bnxt *bp) +-{ +- pthread_mutex_destroy(&bp->flow_lock); +- pthread_mutex_destroy(&bp->def_cp_lock); +- pthread_mutex_destroy(&bp->health_check_lock); +- if (bp->rep_info) { +- pthread_mutex_destroy(&bp->rep_info->vfr_lock); +- pthread_mutex_destroy(&bp->rep_info->vfr_start_lock); +- } +-} +- + static int + bnxt_uninit_resources(struct bnxt *bp, bool reconfig_dev) + { +@@ -5411,7 +5444,6 @@ bnxt_uninit_resources(struct bnxt *bp, bool reconfig_dev) + + bnxt_uninit_ctx_mem(bp); + +- bnxt_uninit_locks(bp); + bnxt_free_flow_stats_info(bp); + bnxt_free_rep_info(bp); + rte_free(bp->ptp_cfg); +diff --git a/dpdk/drivers/net/bnxt/bnxt_hwrm.c b/dpdk/drivers/net/bnxt/bnxt_hwrm.c +index 24c33185b4..344895843b 100644 +--- a/dpdk/drivers/net/bnxt/bnxt_hwrm.c ++++ b/dpdk/drivers/net/bnxt/bnxt_hwrm.c +@@ -718,6 +718,7 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp) + sizeof(bp->pf->vf_info[0]) * new_max_vfs, 0); + if (bp->pf->vf_info == NULL) { + PMD_DRV_LOG(ERR, "Alloc vf info fail\n"); ++ HWRM_UNLOCK(); + return -ENOMEM; + } + bp->pf->max_vfs = new_max_vfs; +@@ -1095,10 +1096,11 @@ int bnxt_hwrm_ver_get(struct bnxt *bp, uint32_t timeout) + else + HWRM_CHECK_RESULT(); + +- PMD_DRV_LOG(INFO, "%d.%d.%d:%d.%d.%d\n", ++ PMD_DRV_LOG(INFO, "%d.%d.%d:%d.%d.%d.%d\n", + resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b, + resp->hwrm_intf_upd_8b, resp->hwrm_fw_maj_8b, +- resp->hwrm_fw_min_8b, resp->hwrm_fw_bld_8b); ++ resp->hwrm_fw_min_8b, resp->hwrm_fw_bld_8b, ++ resp->hwrm_fw_rsvd_8b); + bp->fw_ver = (resp->hwrm_fw_maj_8b << 24) | + (resp->hwrm_fw_min_8b << 16) | + (resp->hwrm_fw_bld_8b << 8) | +@@ -3455,6 +3457,35 @@ static int bnxt_update_max_resources(struct bnxt *bp, + return 0; + } + ++/* Update the PF resource values based on how many resources ++ * got allocated to it. ++ */ ++static int bnxt_update_max_resources_pf_only(struct bnxt *bp) ++{ ++ struct hwrm_func_qcfg_input req = {0}; ++ struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr; ++ int rc; ++ ++ /* Get the actual allocated values now */ ++ HWRM_PREP(&req, HWRM_FUNC_QCFG, BNXT_USE_CHIMP_MB); ++ req.fid = rte_cpu_to_le_16(0xffff); ++ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); ++ HWRM_CHECK_RESULT(); ++ ++ bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->alloc_rsscos_ctx); ++ bp->max_stat_ctx = rte_le_to_cpu_16(resp->alloc_stat_ctx); ++ bp->max_cp_rings = rte_le_to_cpu_16(resp->alloc_cmpl_rings); ++ bp->max_tx_rings = rte_le_to_cpu_16(resp->alloc_tx_rings); ++ bp->max_rx_rings = rte_le_to_cpu_16(resp->alloc_rx_rings); ++ bp->max_l2_ctx = rte_le_to_cpu_16(resp->alloc_l2_ctx); ++ bp->max_ring_grps = rte_le_to_cpu_16(resp->alloc_hw_ring_grps); ++ bp->max_vnics = rte_le_to_cpu_16(resp->alloc_vnics); ++ ++ HWRM_UNLOCK(); ++ ++ return 0; ++} ++ + int bnxt_hwrm_func_qcfg_current_vf_vlan(struct bnxt *bp, int vf) + { + struct hwrm_func_qcfg_input req = {0}; +@@ -3554,8 +3585,13 @@ int bnxt_hwrm_allocate_pf_only(struct bnxt *bp) + HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE); + bp->pf->func_cfg_flags |= + HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE; ++ + rc = bnxt_hwrm_pf_func_cfg(bp, &pf_resc); +- rc = __bnxt_hwrm_func_qcaps(bp); ++ if (rc) ++ return rc; ++ ++ rc = bnxt_update_max_resources_pf_only(bp); ++ + return rc; + } + +@@ -4320,6 +4356,7 @@ int bnxt_get_nvram_directory(struct bnxt *bp, uint32_t len, uint8_t *data) + return -ENOMEM; + dma_handle = rte_malloc_virt2iova(buf); + if (dma_handle == RTE_BAD_IOVA) { ++ rte_free(buf); + PMD_DRV_LOG(ERR, + "unable to map response address to physical memory\n"); + return -ENOMEM; +@@ -4354,6 +4391,7 @@ int bnxt_hwrm_get_nvram_item(struct bnxt *bp, uint32_t index, + + dma_handle = rte_malloc_virt2iova(buf); + if (dma_handle == RTE_BAD_IOVA) { ++ rte_free(buf); + PMD_DRV_LOG(ERR, + "unable to map response address to physical memory\n"); + return -ENOMEM; +@@ -4407,6 +4445,7 @@ int bnxt_hwrm_flash_nvram(struct bnxt *bp, uint16_t dir_type, + + dma_handle = rte_malloc_virt2iova(buf); + if (dma_handle == RTE_BAD_IOVA) { ++ rte_free(buf); + PMD_DRV_LOG(ERR, + "unable to map response address to physical memory\n"); + return -ENOMEM; +@@ -4892,37 +4931,35 @@ int bnxt_vnic_rss_configure(struct bnxt *bp, struct bnxt_vnic_info *vnic) + { + unsigned int rss_idx, fw_idx, i; + ++ if (vnic->fw_vnic_id == INVALID_HW_RING_ID) ++ return 0; ++ + if (!(vnic->rss_table && vnic->hash_type)) + return 0; + + if (BNXT_CHIP_THOR(bp)) + return bnxt_vnic_rss_configure_thor(bp, vnic); + +- if (vnic->fw_vnic_id == INVALID_HW_RING_ID) +- return 0; +- +- if (vnic->rss_table && vnic->hash_type) { +- /* +- * Fill the RSS hash & redirection table with +- * ring group ids for all VNICs +- */ +- for (rss_idx = 0, fw_idx = 0; rss_idx < HW_HASH_INDEX_SIZE; +- rss_idx++, fw_idx++) { +- for (i = 0; i < bp->rx_cp_nr_rings; i++) { +- fw_idx %= bp->rx_cp_nr_rings; +- if (vnic->fw_grp_ids[fw_idx] != +- INVALID_HW_RING_ID) +- break; +- fw_idx++; +- } +- if (i == bp->rx_cp_nr_rings) +- return 0; +- vnic->rss_table[rss_idx] = vnic->fw_grp_ids[fw_idx]; ++ /* ++ * Fill the RSS hash & redirection table with ++ * ring group ids for all VNICs ++ */ ++ for (rss_idx = 0, fw_idx = 0; rss_idx < HW_HASH_INDEX_SIZE; ++ rss_idx++, fw_idx++) { ++ for (i = 0; i < bp->rx_cp_nr_rings; i++) { ++ fw_idx %= bp->rx_cp_nr_rings; ++ if (vnic->fw_grp_ids[fw_idx] != INVALID_HW_RING_ID) ++ break; ++ fw_idx++; + } +- return bnxt_hwrm_vnic_rss_cfg(bp, vnic); ++ ++ if (i == bp->rx_cp_nr_rings) ++ return 0; ++ ++ vnic->rss_table[rss_idx] = vnic->fw_grp_ids[fw_idx]; + } + +- return 0; ++ return bnxt_hwrm_vnic_rss_cfg(bp, vnic); + } + + static void bnxt_hwrm_set_coal_params(struct bnxt_coal *hw_coal, +diff --git a/dpdk/drivers/net/bnxt/bnxt_reps.c b/dpdk/drivers/net/bnxt/bnxt_reps.c +index e5ba0909b9..167c46ad41 100644 +--- a/dpdk/drivers/net/bnxt/bnxt_reps.c ++++ b/dpdk/drivers/net/bnxt/bnxt_reps.c +@@ -65,7 +65,7 @@ bnxt_vfr_recv(uint16_t port_id, uint16_t queue_id, struct rte_mbuf *mbuf) + /* Representor Rx ring full, drop pkt */ + vfr_bp->rx_drop_bytes[que] += mbuf->pkt_len; + vfr_bp->rx_drop_pkts[que]++; +- rte_pktmbuf_free(mbuf); ++ rte_mbuf_raw_free(mbuf); + } + + return 0; +diff --git a/dpdk/drivers/net/bnxt/bnxt_ring.c b/dpdk/drivers/net/bnxt/bnxt_ring.c +index aeb6cb6150..94cf7d3de2 100644 +--- a/dpdk/drivers/net/bnxt/bnxt_ring.c ++++ b/dpdk/drivers/net/bnxt/bnxt_ring.c +@@ -568,6 +568,17 @@ int bnxt_alloc_hwrm_rx_ring(struct bnxt *bp, int queue_index) + struct bnxt_rx_ring_info *rxr = rxq->rx_ring; + int rc; + ++ /* ++ * Storage for the cp ring is allocated based on worst-case ++ * usage, the actual size to be used by hw is computed here. ++ */ ++ cp_ring->ring_size = rxr->rx_ring_struct->ring_size * 2; ++ ++ if (bp->eth_dev->data->scattered_rx) ++ cp_ring->ring_size *= AGG_RING_SIZE_FACTOR; ++ ++ cp_ring->ring_mask = cp_ring->ring_size - 1; ++ + rc = bnxt_alloc_cmpl_ring(bp, queue_index, cpr); + if (rc) + goto err_out; +@@ -679,6 +690,17 @@ int bnxt_alloc_hwrm_rings(struct bnxt *bp) + struct bnxt_ring *cp_ring = cpr->cp_ring_struct; + struct bnxt_rx_ring_info *rxr = rxq->rx_ring; + ++ /* ++ * Storage for the cp ring is allocated based on worst-case ++ * usage, the actual size to be used by hw is computed here. ++ */ ++ cp_ring->ring_size = rxr->rx_ring_struct->ring_size * 2; ++ ++ if (bp->eth_dev->data->scattered_rx) ++ cp_ring->ring_size *= AGG_RING_SIZE_FACTOR; ++ ++ cp_ring->ring_mask = cp_ring->ring_size - 1; ++ + if (bnxt_alloc_cmpl_ring(bp, i, cpr)) + goto err_out; + +diff --git a/dpdk/drivers/net/bnxt/bnxt_rxq.c b/dpdk/drivers/net/bnxt/bnxt_rxq.c +index e0ec342162..8637559370 100644 +--- a/dpdk/drivers/net/bnxt/bnxt_rxq.c ++++ b/dpdk/drivers/net/bnxt/bnxt_rxq.c +@@ -311,7 +311,7 @@ int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev, + if (rc) + return rc; + +- if (queue_idx >= BNXT_MAX_RINGS(bp)) { ++ if (queue_idx >= bnxt_max_rings(bp)) { + PMD_DRV_LOG(ERR, + "Cannot create Rx ring %d. Only %d rings available\n", + queue_idx, bp->max_rx_rings); +@@ -364,8 +364,9 @@ int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev, + + eth_dev->data->rx_queues[queue_idx] = rxq; + /* Allocate RX ring hardware descriptors */ +- if (bnxt_alloc_rings(bp, queue_idx, NULL, rxq, rxq->cp_ring, NULL, +- "rxr")) { ++ rc = bnxt_alloc_rings(bp, queue_idx, NULL, rxq, rxq->cp_ring, NULL, ++ "rxr"); ++ if (rc) { + PMD_DRV_LOG(ERR, + "ring_dma_zone_reserve for rx_ring failed!\n"); + goto err; +@@ -557,12 +558,12 @@ int bnxt_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) + rc = bnxt_vnic_rss_configure(bp, vnic); + } + +- if (BNXT_CHIP_THOR(bp)) { +- /* Compute current number of active receive queues. */ +- for (i = vnic->start_grp_id; i < vnic->end_grp_id; i++) +- if (bp->rx_queues[i]->rx_started) +- active_queue_cnt++; ++ /* Compute current number of active receive queues. */ ++ for (i = vnic->start_grp_id; i < vnic->end_grp_id; i++) ++ if (bp->rx_queues[i]->rx_started) ++ active_queue_cnt++; + ++ if (BNXT_CHIP_THOR(bp)) { + /* + * For Thor, we need to ensure that the VNIC default receive + * ring corresponds to an active receive queue. When no queue +@@ -582,6 +583,22 @@ int bnxt_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) + /* Reconfigure default receive ring. */ + bnxt_hwrm_vnic_cfg(bp, vnic); + } ++ } else if (active_queue_cnt) { ++ /* ++ * If the queue being stopped is the current default queue and ++ * there are other active queues, pick one of them as the ++ * default and reconfigure the vnic. ++ */ ++ if (vnic->dflt_ring_grp == bp->grp_info[rx_queue_id].fw_grp_id) { ++ for (i = vnic->start_grp_id; i < vnic->end_grp_id; i++) { ++ if (bp->rx_queues[i]->rx_started) { ++ vnic->dflt_ring_grp = ++ bp->grp_info[i].fw_grp_id; ++ bnxt_hwrm_vnic_cfg(bp, vnic); ++ break; ++ } ++ } ++ } + } + + if (rc == 0) +diff --git a/dpdk/drivers/net/bnxt/bnxt_rxr.c b/dpdk/drivers/net/bnxt/bnxt_rxr.c +index fdbe6f71ea..b28b7fb561 100644 +--- a/dpdk/drivers/net/bnxt/bnxt_rxr.c ++++ b/dpdk/drivers/net/bnxt/bnxt_rxr.c +@@ -267,6 +267,7 @@ static int bnxt_rx_pages(struct bnxt_rx_queue *rxq, + */ + rte_bitmap_set(rxr->ag_bitmap, ag_cons); + } ++ last->next = NULL; + bnxt_prod_ag_mbuf(rxq); + return 0; + } +@@ -344,7 +345,7 @@ bnxt_init_ptype_table(void) + + ip6 = i & (RX_PKT_CMPL_FLAGS2_IP_TYPE >> 7); + tun = i & (RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC >> 2); +- type = (i & 0x38) << 9; ++ type = (i & 0x78) << 9; + + if (!tun && !ip6) + l3 = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN; +@@ -406,62 +407,98 @@ bnxt_parse_pkt_type(struct rx_pkt_cmpl *rxcmp, struct rx_pkt_cmpl_hi *rxcmp1) + return bnxt_ptype_table[index]; + } + +-uint32_t +-bnxt_ol_flags_table[BNXT_OL_FLAGS_TBL_DIM] __rte_cache_aligned; +- +-uint32_t +-bnxt_ol_flags_err_table[BNXT_OL_FLAGS_ERR_TBL_DIM] __rte_cache_aligned; +- + static void __rte_cold +-bnxt_init_ol_flags_tables(void) ++bnxt_init_ol_flags_tables(struct bnxt_rx_queue *rxq) + { +- static bool initialized; ++ struct bnxt_rx_ring_info *rxr = rxq->rx_ring; ++ struct rte_eth_conf *dev_conf; ++ bool outer_cksum_enabled; ++ uint64_t offloads; + uint32_t *pt; + int i; + +- if (initialized) +- return; ++ dev_conf = &rxq->bp->eth_dev->data->dev_conf; ++ offloads = dev_conf->rxmode.offloads; ++ ++ outer_cksum_enabled = !!(offloads & (DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | ++ DEV_RX_OFFLOAD_OUTER_UDP_CKSUM)); + + /* Initialize ol_flags table. */ +- pt = bnxt_ol_flags_table; ++ pt = rxr->ol_flags_table; + for (i = 0; i < BNXT_OL_FLAGS_TBL_DIM; i++) { + pt[i] = 0; ++ + if (i & RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN) + pt[i] |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED; + +- if (i & RX_PKT_CMPL_FLAGS2_IP_CS_CALC) +- pt[i] |= PKT_RX_IP_CKSUM_GOOD; ++ if (i & (RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC << 3)) { ++ /* Tunnel case. */ ++ if (outer_cksum_enabled) { ++ if (i & RX_PKT_CMPL_FLAGS2_IP_CS_CALC) ++ pt[i] |= PKT_RX_IP_CKSUM_GOOD; + +- if (i & RX_PKT_CMPL_FLAGS2_L4_CS_CALC) +- pt[i] |= PKT_RX_L4_CKSUM_GOOD; ++ if (i & RX_PKT_CMPL_FLAGS2_L4_CS_CALC) ++ pt[i] |= PKT_RX_L4_CKSUM_GOOD; ++ ++ if (i & RX_PKT_CMPL_FLAGS2_T_L4_CS_CALC) ++ pt[i] |= PKT_RX_OUTER_L4_CKSUM_GOOD; ++ } else { ++ if (i & RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC) ++ pt[i] |= PKT_RX_IP_CKSUM_GOOD; + +- if (i & RX_PKT_CMPL_FLAGS2_T_L4_CS_CALC) +- pt[i] |= PKT_RX_OUTER_L4_CKSUM_GOOD; ++ if (i & RX_PKT_CMPL_FLAGS2_T_L4_CS_CALC) ++ pt[i] |= PKT_RX_L4_CKSUM_GOOD; ++ } ++ } else { ++ /* Non-tunnel case. */ ++ if (i & RX_PKT_CMPL_FLAGS2_IP_CS_CALC) ++ pt[i] |= PKT_RX_IP_CKSUM_GOOD; ++ ++ if (i & RX_PKT_CMPL_FLAGS2_L4_CS_CALC) ++ pt[i] |= PKT_RX_L4_CKSUM_GOOD; ++ } + } + + /* Initialize checksum error table. */ +- pt = bnxt_ol_flags_err_table; ++ pt = rxr->ol_flags_err_table; + for (i = 0; i < BNXT_OL_FLAGS_ERR_TBL_DIM; i++) { + pt[i] = 0; +- if (i & (RX_PKT_CMPL_ERRORS_IP_CS_ERROR >> 4)) +- pt[i] |= PKT_RX_IP_CKSUM_BAD; + +- if (i & (RX_PKT_CMPL_ERRORS_L4_CS_ERROR >> 4)) +- pt[i] |= PKT_RX_L4_CKSUM_BAD; ++ if (i & (RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC << 2)) { ++ /* Tunnel case. */ ++ if (outer_cksum_enabled) { ++ if (i & (RX_PKT_CMPL_ERRORS_IP_CS_ERROR >> 4)) ++ pt[i] |= PKT_RX_IP_CKSUM_BAD; + +- if (i & (RX_PKT_CMPL_ERRORS_T_IP_CS_ERROR >> 4)) +- pt[i] |= PKT_RX_EIP_CKSUM_BAD; ++ if (i & (RX_PKT_CMPL_ERRORS_T_IP_CS_ERROR >> 4)) ++ pt[i] |= PKT_RX_EIP_CKSUM_BAD; + +- if (i & (RX_PKT_CMPL_ERRORS_T_L4_CS_ERROR >> 4)) +- pt[i] |= PKT_RX_OUTER_L4_CKSUM_BAD; +- } ++ if (i & (RX_PKT_CMPL_ERRORS_L4_CS_ERROR >> 4)) ++ pt[i] |= PKT_RX_L4_CKSUM_BAD; + +- initialized = true; ++ if (i & (RX_PKT_CMPL_ERRORS_T_L4_CS_ERROR >> 4)) ++ pt[i] |= PKT_RX_OUTER_L4_CKSUM_BAD; ++ } else { ++ if (i & (RX_PKT_CMPL_ERRORS_T_IP_CS_ERROR >> 4)) ++ pt[i] |= PKT_RX_IP_CKSUM_BAD; ++ ++ if (i & (RX_PKT_CMPL_ERRORS_T_L4_CS_ERROR >> 4)) ++ pt[i] |= PKT_RX_L4_CKSUM_BAD; ++ } ++ } else { ++ /* Non-tunnel case. */ ++ if (i & (RX_PKT_CMPL_ERRORS_IP_CS_ERROR >> 4)) ++ pt[i] |= PKT_RX_IP_CKSUM_BAD; ++ ++ if (i & (RX_PKT_CMPL_ERRORS_L4_CS_ERROR >> 4)) ++ pt[i] |= PKT_RX_L4_CKSUM_BAD; ++ } ++ } + } + + static void +-bnxt_set_ol_flags(struct rx_pkt_cmpl *rxcmp, struct rx_pkt_cmpl_hi *rxcmp1, +- struct rte_mbuf *mbuf) ++bnxt_set_ol_flags(struct bnxt_rx_ring_info *rxr, struct rx_pkt_cmpl *rxcmp, ++ struct rx_pkt_cmpl_hi *rxcmp1, struct rte_mbuf *mbuf) + { + uint16_t flags_type, errors, flags; + uint64_t ol_flags; +@@ -475,6 +512,7 @@ bnxt_set_ol_flags(struct rx_pkt_cmpl *rxcmp, struct rx_pkt_cmpl_hi *rxcmp1, + RX_PKT_CMPL_FLAGS2_T_L4_CS_CALC | + RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN); + ++ flags |= (flags & RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC) << 3; + errors = rte_le_to_cpu_16(rxcmp1->errors_v2) & + (RX_PKT_CMPL_ERRORS_IP_CS_ERROR | + RX_PKT_CMPL_ERRORS_L4_CS_ERROR | +@@ -482,10 +520,12 @@ bnxt_set_ol_flags(struct rx_pkt_cmpl *rxcmp, struct rx_pkt_cmpl_hi *rxcmp1, + RX_PKT_CMPL_ERRORS_T_L4_CS_ERROR); + errors = (errors >> 4) & flags; + +- ol_flags = bnxt_ol_flags_table[flags & ~errors]; ++ ol_flags = rxr->ol_flags_table[flags & ~errors]; + +- if (errors) +- ol_flags |= bnxt_ol_flags_err_table[errors]; ++ if (unlikely(errors)) { ++ errors |= (flags & RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC) << 2; ++ ol_flags |= rxr->ol_flags_err_table[errors]; ++ } + + if (flags_type & RX_PKT_CMPL_FLAGS_RSS_VALID) { + mbuf->hash.rss = rte_le_to_cpu_32(rxcmp->rss_hash); +@@ -740,7 +780,7 @@ static int bnxt_rx_pkt(struct rte_mbuf **rx_pkt, + mbuf->data_len = mbuf->pkt_len; + mbuf->port = rxq->port_id; + +- bnxt_set_ol_flags(rxcmp, rxcmp1, mbuf); ++ bnxt_set_ol_flags(rxr, rxcmp, rxcmp1, mbuf); + + #ifdef RTE_LIBRTE_IEEE1588 + if (unlikely((rte_le_to_cpu_16(rxcmp->flags_type) & +@@ -827,6 +867,7 @@ uint16_t bnxt_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, + struct bnxt_cp_ring_info *cpr = rxq->cp_ring; + struct bnxt_rx_ring_info *rxr = rxq->rx_ring; + uint32_t raw_cons = cpr->cp_raw_cons; ++ bool alloc_failed = false; + uint32_t cons; + int nb_rx_pkts = 0; + int nb_rep_rx_pkts = 0; +@@ -875,12 +916,16 @@ uint16_t bnxt_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, + /* TODO: Avoid magic numbers... */ + if ((CMP_TYPE(rxcmp) & 0x30) == 0x10) { + rc = bnxt_rx_pkt(&rx_pkts[nb_rx_pkts], rxq, &raw_cons); +- if (likely(!rc) || rc == -ENOMEM) ++ if (!rc) + nb_rx_pkts++; +- if (rc == -EBUSY) /* partial completion */ ++ else if (rc == -EBUSY) /* partial completion */ + break; +- if (rc == -ENODEV) /* completion for representor */ ++ else if (rc == -ENODEV) /* completion for representor */ + nb_rep_rx_pkts++; ++ else if (rc == -ENOMEM) { ++ nb_rx_pkts++; ++ alloc_failed = true; ++ } + } else if (!BNXT_NUM_ASYNC_CPR(rxq->bp)) { + evt = + bnxt_event_hwrm_resp_handler(rxq->bp, +@@ -891,7 +936,7 @@ uint16_t bnxt_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, + } + + raw_cons = NEXT_RAW_CMP(raw_cons); +- if (nb_rx_pkts == nb_pkts || evt) ++ if (nb_rx_pkts == nb_pkts || nb_rep_rx_pkts == nb_pkts || evt) + break; + /* Post some Rx buf early in case of larger burst processing */ + if (nb_rx_pkts == BNXT_RX_POST_THRESH) +@@ -907,6 +952,10 @@ uint16_t bnxt_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, + goto done; + } + ++ /* Ring the completion queue doorbell. */ ++ bnxt_db_cq(cpr); ++ ++ /* Ring the receive descriptor doorbell. */ + if (prod != rxr->rx_prod) + bnxt_db_write(&rxr->rx_db, rxr->rx_prod); + +@@ -914,24 +963,23 @@ uint16_t bnxt_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, + if (ag_prod != rxr->ag_prod) + bnxt_db_write(&rxr->ag_db, rxr->ag_prod); + +- bnxt_db_cq(cpr); +- + /* Attempt to alloc Rx buf in case of a previous allocation failure. */ +- if (rc == -ENOMEM) { +- int i = RING_NEXT(rxr->rx_ring_struct, prod); +- int cnt = nb_rx_pkts; ++ if (alloc_failed) { ++ uint16_t cnt; ++ ++ for (cnt = 0; cnt < nb_rx_pkts + nb_rep_rx_pkts; cnt++) { ++ struct rte_mbuf **rx_buf; + +- for (; cnt; +- i = RING_NEXT(rxr->rx_ring_struct, i), cnt--) { +- struct rte_mbuf **rx_buf = &rxr->rx_buf_ring[i]; ++ prod = RING_NEXT(rxr->rx_ring_struct, prod); ++ rx_buf = &rxr->rx_buf_ring[prod]; + + /* Buffer already allocated for this index. */ + if (*rx_buf != NULL && *rx_buf != &rxq->fake_mbuf) + continue; + + /* This slot is empty. Alloc buffer for Rx */ +- if (!bnxt_alloc_rx_data(rxq, rxr, i)) { +- rxr->rx_prod = i; ++ if (!bnxt_alloc_rx_data(rxq, rxr, prod)) { ++ rxr->rx_prod = prod; + bnxt_db_write(&rxr->rx_db, rxr->rx_prod); + } else { + PMD_DRV_LOG(ERR, "Alloc mbuf failed\n"); +@@ -992,12 +1040,9 @@ void bnxt_free_rx_rings(struct bnxt *bp) + + int bnxt_init_rx_ring_struct(struct bnxt_rx_queue *rxq, unsigned int socket_id) + { +- struct rte_eth_dev *eth_dev = rxq->bp->eth_dev; +- struct rte_eth_rxmode *rxmode; + struct bnxt_cp_ring_info *cpr; + struct bnxt_rx_ring_info *rxr; + struct bnxt_ring *ring; +- bool use_agg_ring; + + rxq->rx_buf_size = BNXT_MAX_PKT_LEN + sizeof(struct rte_mbuf); + +@@ -1040,19 +1085,9 @@ int bnxt_init_rx_ring_struct(struct bnxt_rx_queue *rxq, unsigned int socket_id) + return -ENOMEM; + cpr->cp_ring_struct = ring; + +- rxmode = ð_dev->data->dev_conf.rxmode; +- use_agg_ring = (rxmode->offloads & DEV_RX_OFFLOAD_SCATTER) || +- (rxmode->offloads & DEV_RX_OFFLOAD_TCP_LRO) || +- (rxmode->max_rx_pkt_len > +- (uint32_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) - +- RTE_PKTMBUF_HEADROOM)); +- + /* Allocate two completion slots per entry in desc ring. */ + ring->ring_size = rxr->rx_ring_struct->ring_size * 2; +- +- /* Allocate additional slots if aggregation ring is in use. */ +- if (use_agg_ring) +- ring->ring_size *= AGG_RING_SIZE_FACTOR; ++ ring->ring_size *= AGG_RING_SIZE_FACTOR; + + ring->ring_size = rte_align32pow2(ring->ring_size); + ring->ring_mask = ring->ring_size - 1; +@@ -1107,18 +1142,18 @@ int bnxt_init_one_rx_ring(struct bnxt_rx_queue *rxq) + /* Initialize packet type table. */ + bnxt_init_ptype_table(); + +- /* Initialize offload flags parsing table. */ +- bnxt_init_ol_flags_tables(); +- + size = rte_pktmbuf_data_room_size(rxq->mb_pool) - RTE_PKTMBUF_HEADROOM; + size = RTE_MIN(BNXT_MAX_PKT_LEN, size); + +- type = RX_PROD_PKT_BD_TYPE_RX_PROD_PKT | RX_PROD_PKT_BD_FLAGS_EOP_PAD; ++ type = RX_PROD_PKT_BD_TYPE_RX_PROD_PKT; + + rxr = rxq->rx_ring; + ring = rxr->rx_ring_struct; + bnxt_init_rxbds(ring, type, size); + ++ /* Initialize offload flags parsing table. */ ++ bnxt_init_ol_flags_tables(rxq); ++ + prod = rxr->rx_prod; + for (i = 0; i < ring->ring_size; i++) { + if (unlikely(!rxr->rx_buf_ring[i])) { +diff --git a/dpdk/drivers/net/bnxt/bnxt_rxr.h b/dpdk/drivers/net/bnxt/bnxt_rxr.h +index 3fc901fdf0..46c34e6e16 100644 +--- a/dpdk/drivers/net/bnxt/bnxt_rxr.h ++++ b/dpdk/drivers/net/bnxt/bnxt_rxr.h +@@ -42,6 +42,9 @@ static inline uint16_t bnxt_tpa_start_agg_id(struct bnxt *bp, + /* Number of descriptors to process per inner loop in vector mode. */ + #define RTE_BNXT_DESCS_PER_LOOP 4U + ++#define BNXT_OL_FLAGS_TBL_DIM 64 ++#define BNXT_OL_FLAGS_ERR_TBL_DIM 32 ++ + struct bnxt_tpa_info { + struct rte_mbuf *mbuf; + uint16_t len; +@@ -73,6 +76,9 @@ struct bnxt_rx_ring_info { + struct rte_bitmap *ag_bitmap; + + struct bnxt_tpa_info *tpa_info; ++ ++ uint32_t ol_flags_table[BNXT_OL_FLAGS_TBL_DIM]; ++ uint32_t ol_flags_err_table[BNXT_OL_FLAGS_ERR_TBL_DIM]; + }; + + uint16_t bnxt_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, +@@ -116,10 +122,4 @@ bnxt_cfa_code_dynfield(struct rte_mbuf *mbuf) + + #define BNXT_PTYPE_TBL_DIM 128 + extern uint32_t bnxt_ptype_table[BNXT_PTYPE_TBL_DIM]; +- +-#define BNXT_OL_FLAGS_TBL_DIM 32 +-extern uint32_t bnxt_ol_flags_table[BNXT_OL_FLAGS_TBL_DIM]; +- +-#define BNXT_OL_FLAGS_ERR_TBL_DIM 16 +-extern uint32_t bnxt_ol_flags_err_table[BNXT_OL_FLAGS_ERR_TBL_DIM]; + #endif +diff --git a/dpdk/drivers/net/bnxt/bnxt_rxtx_vec_neon.c b/dpdk/drivers/net/bnxt/bnxt_rxtx_vec_neon.c +index de1d96570c..54f47a3fe1 100644 +--- a/dpdk/drivers/net/bnxt/bnxt_rxtx_vec_neon.c ++++ b/dpdk/drivers/net/bnxt/bnxt_rxtx_vec_neon.c +@@ -27,11 +27,11 @@ + uint32_t tmp, of; \ + \ + of = vgetq_lane_u32((rss_flags), (pi)) | \ +- bnxt_ol_flags_table[vgetq_lane_u32((ol_idx), (pi))]; \ ++ rxr->ol_flags_table[vgetq_lane_u32((ol_idx), (pi))]; \ + \ + tmp = vgetq_lane_u32((errors), (pi)); \ + if (tmp) \ +- of |= bnxt_ol_flags_err_table[tmp]; \ ++ of |= rxr->ol_flags_err_table[tmp]; \ + (ol_flags) = of; \ + } + +@@ -58,7 +58,8 @@ + + static void + descs_to_mbufs(uint32x4_t mm_rxcmp[4], uint32x4_t mm_rxcmp1[4], +- uint64x2_t mb_init, struct rte_mbuf **mbuf) ++ uint64x2_t mb_init, struct rte_mbuf **mbuf, ++ struct bnxt_rx_ring_info *rxr) + { + const uint8x16_t shuf_msk = { + 0xFF, 0xFF, 0xFF, 0xFF, /* pkt_type (zeroes) */ +@@ -79,7 +80,7 @@ descs_to_mbufs(uint32x4_t mm_rxcmp[4], uint32x4_t mm_rxcmp1[4], + const uint32x4_t flags2_index_mask = vdupq_n_u32(0x1F); + const uint32x4_t flags2_error_mask = vdupq_n_u32(0x0F); + uint32x4_t flags_type, flags2, index, errors, rss_flags; +- uint32x4_t tmp, ptype_idx; ++ uint32x4_t tmp, ptype_idx, is_tunnel; + uint64x2_t t0, t1; + uint32_t ol_flags; + +@@ -116,10 +117,14 @@ descs_to_mbufs(uint32x4_t mm_rxcmp[4], uint32x4_t mm_rxcmp1[4], + vget_low_u64(t1))); + + /* Compute ol_flags and checksum error indexes for four packets. */ ++ is_tunnel = vandq_u32(flags2, vdupq_n_u32(4)); ++ is_tunnel = vshlq_n_u32(is_tunnel, 3); + errors = vandq_u32(vshrq_n_u32(errors, 4), flags2_error_mask); + errors = vandq_u32(errors, flags2); + + index = vbicq_u32(flags2, errors); ++ errors = vorrq_u32(errors, vshrq_n_u32(is_tunnel, 1)); ++ index = vorrq_u32(index, is_tunnel); + + /* Update mbuf rearm_data for four packets. */ + GET_OL_FLAGS(rss_flags, index, errors, 0, ol_flags); +@@ -286,7 +291,8 @@ bnxt_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts, + goto out; + } + +- descs_to_mbufs(rxcmp, rxcmp1, mb_init, &rx_pkts[nb_rx_pkts]); ++ descs_to_mbufs(rxcmp, rxcmp1, mb_init, &rx_pkts[nb_rx_pkts], ++ rxr); + nb_rx_pkts += num_valid; + + if (num_valid < RTE_BNXT_DESCS_PER_LOOP) +diff --git a/dpdk/drivers/net/bnxt/bnxt_rxtx_vec_sse.c b/dpdk/drivers/net/bnxt/bnxt_rxtx_vec_sse.c +index e12bf8bb76..621f567890 100644 +--- a/dpdk/drivers/net/bnxt/bnxt_rxtx_vec_sse.c ++++ b/dpdk/drivers/net/bnxt/bnxt_rxtx_vec_sse.c +@@ -27,11 +27,11 @@ + uint32_t tmp, of; \ + \ + of = _mm_extract_epi32((rss_flags), (pi)) | \ +- bnxt_ol_flags_table[_mm_extract_epi32((ol_index), (pi))]; \ ++ rxr->ol_flags_table[_mm_extract_epi32((ol_index), (pi))]; \ + \ + tmp = _mm_extract_epi32((errors), (pi)); \ + if (tmp) \ +- of |= bnxt_ol_flags_err_table[tmp]; \ ++ of |= rxr->ol_flags_err_table[tmp]; \ + (ol_flags) = of; \ + } + +@@ -54,7 +54,8 @@ + + static inline void + descs_to_mbufs(__m128i mm_rxcmp[4], __m128i mm_rxcmp1[4], +- __m128i mbuf_init, struct rte_mbuf **mbuf) ++ __m128i mbuf_init, struct rte_mbuf **mbuf, ++ struct bnxt_rx_ring_info *rxr) + { + const __m128i shuf_msk = + _mm_set_epi8(15, 14, 13, 12, /* rss */ +@@ -72,7 +73,7 @@ descs_to_mbufs(__m128i mm_rxcmp[4], __m128i mm_rxcmp1[4], + const __m128i rss_mask = + _mm_set1_epi32(RX_PKT_CMPL_FLAGS_RSS_VALID); + __m128i t0, t1, flags_type, flags2, index, errors, rss_flags; +- __m128i ptype_idx; ++ __m128i ptype_idx, is_tunnel; + uint32_t ol_flags; + + /* Compute packet type table indexes for four packets */ +@@ -99,6 +100,8 @@ descs_to_mbufs(__m128i mm_rxcmp[4], __m128i mm_rxcmp1[4], + t1 = _mm_unpackhi_epi32(mm_rxcmp1[2], mm_rxcmp1[3]); + + /* Compute ol_flags and checksum error indexes for four packets. */ ++ is_tunnel = _mm_and_si128(flags2, _mm_set1_epi32(4)); ++ is_tunnel = _mm_slli_epi32(is_tunnel, 3); + flags2 = _mm_and_si128(flags2, _mm_set1_epi32(0x1F)); + + errors = _mm_srli_epi32(_mm_unpacklo_epi64(t0, t1), 4); +@@ -106,6 +109,8 @@ descs_to_mbufs(__m128i mm_rxcmp[4], __m128i mm_rxcmp1[4], + errors = _mm_and_si128(errors, flags2); + + index = _mm_andnot_si128(errors, flags2); ++ errors = _mm_or_si128(errors, _mm_srli_epi32(is_tunnel, 1)); ++ index = _mm_or_si128(index, is_tunnel); + + /* Update mbuf rearm_data for four packets. */ + GET_OL_FLAGS(rss_flags, index, errors, 0, ol_flags); +@@ -268,7 +273,8 @@ bnxt_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts, + goto out; + } + +- descs_to_mbufs(rxcmp, rxcmp1, mbuf_init, &rx_pkts[nb_rx_pkts]); ++ descs_to_mbufs(rxcmp, rxcmp1, mbuf_init, &rx_pkts[nb_rx_pkts], ++ rxr); + nb_rx_pkts += num_valid; + + if (num_valid < RTE_BNXT_DESCS_PER_LOOP) +diff --git a/dpdk/drivers/net/bnxt/bnxt_txq.c b/dpdk/drivers/net/bnxt/bnxt_txq.c +index c9792a2af2..99a31cef28 100644 +--- a/dpdk/drivers/net/bnxt/bnxt_txq.c ++++ b/dpdk/drivers/net/bnxt/bnxt_txq.c +@@ -98,7 +98,7 @@ int bnxt_tx_queue_setup_op(struct rte_eth_dev *eth_dev, + if (rc) + return rc; + +- if (queue_idx >= BNXT_MAX_RINGS(bp)) { ++ if (queue_idx >= bnxt_max_rings(bp)) { + PMD_DRV_LOG(ERR, + "Cannot create Tx ring %d. Only %d rings available\n", + queue_idx, bp->max_tx_rings); +diff --git a/dpdk/drivers/net/bnxt/tf_core/tf_core.c b/dpdk/drivers/net/bnxt/tf_core/tf_core.c +index 24d49096a7..3409cbbcec 100644 +--- a/dpdk/drivers/net/bnxt/tf_core/tf_core.c ++++ b/dpdk/drivers/net/bnxt/tf_core/tf_core.c +@@ -82,7 +82,7 @@ tf_open_session(struct tf *tfp, + return rc; + + TFP_DRV_LOG(INFO, +- "domain:%d, bus:%d, device:%d\n", ++ "domain:%d, bus:%d, device:%u\n", + parms->session_id.internal.domain, + parms->session_id.internal.bus, + parms->session_id.internal.device); +diff --git a/dpdk/drivers/net/bnxt/tf_core/tf_em_common.c b/dpdk/drivers/net/bnxt/tf_core/tf_em_common.c +index ad92cbdc75..c96c21c2e9 100644 +--- a/dpdk/drivers/net/bnxt/tf_core/tf_em_common.c ++++ b/dpdk/drivers/net/bnxt/tf_core/tf_em_common.c +@@ -307,7 +307,6 @@ tf_em_page_tbl_pgcnt(uint32_t num_pages, + { + return roundup(num_pages, MAX_PAGE_PTRS(page_size)) / + MAX_PAGE_PTRS(page_size); +- return 0; + } + + /** +diff --git a/dpdk/drivers/net/bonding/rte_eth_bond_8023ad.c b/dpdk/drivers/net/bonding/rte_eth_bond_8023ad.c +index 67ca0730fa..5fe004e551 100644 +--- a/dpdk/drivers/net/bonding/rte_eth_bond_8023ad.c ++++ b/dpdk/drivers/net/bonding/rte_eth_bond_8023ad.c +@@ -1334,8 +1334,7 @@ bond_mode_8023ad_handle_slow_pkt(struct bond_dev_private *internals, + rte_eth_macaddr_get(slave_id, &m_hdr->eth_hdr.s_addr); + + if (internals->mode4.dedicated_queues.enabled == 0) { +- int retval = rte_ring_enqueue(port->tx_ring, pkt); +- if (retval != 0) { ++ if (rte_ring_enqueue(port->tx_ring, pkt) != 0) { + /* reset timer */ + port->rx_marker_timer = 0; + wrn = WRN_TX_QUEUE_FULL; +@@ -1355,8 +1354,7 @@ bond_mode_8023ad_handle_slow_pkt(struct bond_dev_private *internals, + } + } else if (likely(subtype == SLOW_SUBTYPE_LACP)) { + if (internals->mode4.dedicated_queues.enabled == 0) { +- int retval = rte_ring_enqueue(port->rx_ring, pkt); +- if (retval != 0) { ++ if (rte_ring_enqueue(port->rx_ring, pkt) != 0) { + /* If RX fing full free lacpdu message and drop packet */ + wrn = WRN_RX_QUEUE_FULL; + goto free_out; +diff --git a/dpdk/drivers/net/bonding/rte_eth_bond_args.c b/dpdk/drivers/net/bonding/rte_eth_bond_args.c +index abdf552610..8c5f90dc63 100644 +--- a/dpdk/drivers/net/bonding/rte_eth_bond_args.c ++++ b/dpdk/drivers/net/bonding/rte_eth_bond_args.c +@@ -22,23 +22,37 @@ const char *pmd_bond_init_valid_arguments[] = { + NULL + }; + ++static inline int ++bond_pci_addr_cmp(const struct rte_device *dev, const void *_pci_addr) ++{ ++ const struct rte_pci_device *pdev = RTE_DEV_TO_PCI_CONST(dev); ++ const struct rte_pci_addr *paddr = _pci_addr; ++ ++ return rte_pci_addr_cmp(&pdev->addr, paddr); ++} ++ + static inline int + find_port_id_by_pci_addr(const struct rte_pci_addr *pci_addr) + { +- struct rte_pci_device *pci_dev; +- struct rte_pci_addr *eth_pci_addr; ++ struct rte_bus *pci_bus; ++ struct rte_device *dev; + unsigned i; + +- RTE_ETH_FOREACH_DEV(i) { +- pci_dev = RTE_ETH_DEV_TO_PCI(&rte_eth_devices[i]); +- eth_pci_addr = &pci_dev->addr; ++ pci_bus = rte_bus_find_by_name("pci"); ++ if (pci_bus == NULL) { ++ RTE_BOND_LOG(ERR, "No PCI bus found"); ++ return -1; ++ } + +- if (pci_addr->bus == eth_pci_addr->bus && +- pci_addr->devid == eth_pci_addr->devid && +- pci_addr->domain == eth_pci_addr->domain && +- pci_addr->function == eth_pci_addr->function) +- return i; ++ dev = pci_bus->find_device(NULL, bond_pci_addr_cmp, pci_addr); ++ if (dev == NULL) { ++ RTE_BOND_LOG(ERR, "unable to find PCI device"); ++ return -1; + } ++ ++ RTE_ETH_FOREACH_DEV(i) ++ if (rte_eth_devices[i].device == dev) ++ return i; + return -1; + } + +@@ -57,15 +71,6 @@ find_port_id_by_dev_name(const char *name) + return -1; + } + +-static inline int +-bond_pci_addr_cmp(const struct rte_device *dev, const void *_pci_addr) +-{ +- const struct rte_pci_device *pdev = RTE_DEV_TO_PCI_CONST(dev); +- const struct rte_pci_addr *paddr = _pci_addr; +- +- return rte_pci_addr_cmp(&pdev->addr, paddr); +-} +- + /** + * Parses a port identifier string to a port id by pci address, then by name, + * and finally port id. +@@ -74,23 +79,10 @@ static inline int + parse_port_id(const char *port_str) + { + struct rte_pci_addr dev_addr; +- struct rte_bus *pci_bus; +- struct rte_device *dev; + int port_id; + +- pci_bus = rte_bus_find_by_name("pci"); +- if (pci_bus == NULL) { +- RTE_BOND_LOG(ERR, "unable to find PCI bus\n"); +- return -1; +- } +- + /* try parsing as pci address, physical devices */ +- if (pci_bus->parse(port_str, &dev_addr) == 0) { +- dev = pci_bus->find_device(NULL, bond_pci_addr_cmp, &dev_addr); +- if (dev == NULL) { +- RTE_BOND_LOG(ERR, "unable to find PCI device"); +- return -1; +- } ++ if (rte_pci_addr_parse(port_str, &dev_addr) == 0) { + port_id = find_port_id_by_pci_addr(&dev_addr); + if (port_id < 0) + return -1; +@@ -108,9 +100,8 @@ parse_port_id(const char *port_str) + } + } + +- if (port_id < 0 || port_id > RTE_MAX_ETHPORTS) { +- RTE_BOND_LOG(ERR, "Slave port specified (%s) outside expected range", +- port_str); ++ if (!rte_eth_dev_is_valid_port(port_id)) { ++ RTE_BOND_LOG(ERR, "Specified port (%s) is invalid", port_str); + return -1; + } + return port_id; +diff --git a/dpdk/drivers/net/cxgbe/cxgbe.h b/dpdk/drivers/net/cxgbe/cxgbe.h +index ef62af1c3f..7c89a028bf 100644 +--- a/dpdk/drivers/net/cxgbe/cxgbe.h ++++ b/dpdk/drivers/net/cxgbe/cxgbe.h +@@ -19,6 +19,10 @@ + #define CXGBE_MAX_RX_PKTLEN (9000 + RTE_ETHER_HDR_LEN + \ + RTE_ETHER_CRC_LEN) /* max pkt */ + ++/* The max frame size with default MTU */ ++#define CXGBE_ETH_MAX_LEN (RTE_ETHER_MTU + \ ++ RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN) ++ + /* Max poll time is 100 * 100msec = 10 sec */ + #define CXGBE_LINK_STATUS_POLL_MS 100 /* 100ms */ + #define CXGBE_LINK_STATUS_POLL_CNT 100 /* Max number of times to poll */ +diff --git a/dpdk/drivers/net/cxgbe/cxgbe_ethdev.c b/dpdk/drivers/net/cxgbe/cxgbe_ethdev.c +index 98d0362fa3..480d6f58a8 100644 +--- a/dpdk/drivers/net/cxgbe/cxgbe_ethdev.c ++++ b/dpdk/drivers/net/cxgbe/cxgbe_ethdev.c +@@ -300,7 +300,7 @@ int cxgbe_dev_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu) + return -EINVAL; + + /* set to jumbo mode if needed */ +- if (new_mtu > RTE_ETHER_MAX_LEN) ++ if (new_mtu > CXGBE_ETH_MAX_LEN) + eth_dev->data->dev_conf.rxmode.offloads |= + DEV_RX_OFFLOAD_JUMBO_FRAME; + else +@@ -669,7 +669,7 @@ int cxgbe_dev_rx_queue_setup(struct rte_eth_dev *eth_dev, + rxq->fl.size = temp_nb_desc; + + /* Set to jumbo mode if necessary */ +- if (pkt_len > RTE_ETHER_MAX_LEN) ++ if (pkt_len > CXGBE_ETH_MAX_LEN) + eth_dev->data->dev_conf.rxmode.offloads |= + DEV_RX_OFFLOAD_JUMBO_FRAME; + else +diff --git a/dpdk/drivers/net/cxgbe/cxgbe_flow.c b/dpdk/drivers/net/cxgbe/cxgbe_flow.c +index f7c4f36962..520a5a5c9a 100644 +--- a/dpdk/drivers/net/cxgbe/cxgbe_flow.c ++++ b/dpdk/drivers/net/cxgbe/cxgbe_flow.c +@@ -245,11 +245,6 @@ ch_rte_parsetype_vlan(const void *dmask, const struct rte_flow_item *item, + /* If user has not given any mask, then use chelsio supported mask. */ + mask = umask ? umask : (const struct rte_flow_item_vlan *)dmask; + +- if (!fs->mask.ethtype) +- return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, +- item, +- "Can't parse VLAN item without knowing ethertype"); +- + /* If ethertype is already set and is not VLAN (0x8100) or + * QINQ(0x88A8), then don't proceed further. Otherwise, + * reset the outer ethertype, so that it can be replaced by +@@ -275,7 +270,7 @@ ch_rte_parsetype_vlan(const void *dmask, const struct rte_flow_item *item, + fs->mask.ethtype = 0; + fs->val.ethtype = 0; + } +- } else if (fs->val.ethtype == RTE_ETHER_TYPE_VLAN) { ++ } else { + CXGBE_FILL_FS(1, 1, ivlan_vld); + if (spec) { + if (spec->tci || (umask && umask->tci)) +diff --git a/dpdk/drivers/net/dpaa/dpaa_ethdev.c b/dpdk/drivers/net/dpaa/dpaa_ethdev.c +index f00279e004..0c87c136d7 100644 +--- a/dpdk/drivers/net/dpaa/dpaa_ethdev.c ++++ b/dpdk/drivers/net/dpaa/dpaa_ethdev.c +@@ -184,7 +184,7 @@ dpaa_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) + return -EINVAL; + } + +- if (frame_size > RTE_ETHER_MAX_LEN) ++ if (frame_size > DPAA_ETH_MAX_LEN) + dev->data->dev_conf.rxmode.offloads |= + DEV_RX_OFFLOAD_JUMBO_FRAME; + else +diff --git a/dpdk/drivers/net/dpaa/dpaa_ethdev.h b/dpdk/drivers/net/dpaa/dpaa_ethdev.h +index 659bceb467..a858b1372c 100644 +--- a/dpdk/drivers/net/dpaa/dpaa_ethdev.h ++++ b/dpdk/drivers/net/dpaa/dpaa_ethdev.h +@@ -51,6 +51,10 @@ + #define VLAN_TAG_SIZE 4 /** < Vlan Header Length */ + #endif + ++#define DPAA_ETH_MAX_LEN (RTE_ETHER_MTU + \ ++ RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + \ ++ VLAN_TAG_SIZE) ++ + /* PCD frame queues */ + #define DPAA_DEFAULT_NUM_PCD_QUEUES 1 + #define DPAA_VSP_PROFILE_MAX_NUM 8 +diff --git a/dpdk/drivers/net/dpaa2/dpaa2_ethdev.c b/dpdk/drivers/net/dpaa2/dpaa2_ethdev.c +index ab6863300e..6f38da3cce 100644 +--- a/dpdk/drivers/net/dpaa2/dpaa2_ethdev.c ++++ b/dpdk/drivers/net/dpaa2/dpaa2_ethdev.c +@@ -1420,7 +1420,7 @@ dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) + if (mtu < RTE_ETHER_MIN_MTU || frame_size > DPAA2_MAX_RX_PKT_LEN) + return -EINVAL; + +- if (frame_size > RTE_ETHER_MAX_LEN) ++ if (frame_size > DPAA2_ETH_MAX_LEN) + dev->data->dev_conf.rxmode.offloads |= + DEV_RX_OFFLOAD_JUMBO_FRAME; + else +diff --git a/dpdk/drivers/net/dpaa2/dpaa2_ethdev.h b/dpdk/drivers/net/dpaa2/dpaa2_ethdev.h +index 8d82f74684..cacb11bd3e 100644 +--- a/dpdk/drivers/net/dpaa2/dpaa2_ethdev.h ++++ b/dpdk/drivers/net/dpaa2/dpaa2_ethdev.h +@@ -26,6 +26,10 @@ + + #define DPAA2_RX_DEFAULT_NBDESC 512 + ++#define DPAA2_ETH_MAX_LEN (RTE_ETHER_MTU + \ ++ RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + \ ++ VLAN_TAG_SIZE) ++ + /*default tc to be used for ,congestion, distribution etc configuration. */ + #define DPAA2_DEF_TC 0 + +diff --git a/dpdk/drivers/net/e1000/e1000_ethdev.h b/dpdk/drivers/net/e1000/e1000_ethdev.h +index 4755a5f333..3b4d9c3ee6 100644 +--- a/dpdk/drivers/net/e1000/e1000_ethdev.h ++++ b/dpdk/drivers/net/e1000/e1000_ethdev.h +@@ -97,7 +97,7 @@ + */ + #define E1000_ETH_OVERHEAD (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + \ + VLAN_TAG_SIZE) +- ++#define E1000_ETH_MAX_LEN (RTE_ETHER_MTU + E1000_ETH_OVERHEAD) + /* + * Maximum number of Ring Descriptors. + * +diff --git a/dpdk/drivers/net/e1000/em_ethdev.c b/dpdk/drivers/net/e1000/em_ethdev.c +index 8ee9422bf4..2036c6e917 100644 +--- a/dpdk/drivers/net/e1000/em_ethdev.c ++++ b/dpdk/drivers/net/e1000/em_ethdev.c +@@ -1799,8 +1799,7 @@ eth_em_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) + if (ret != 0) + return ret; + +- frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + +- VLAN_TAG_SIZE; ++ frame_size = mtu + E1000_ETH_OVERHEAD; + + /* check that mtu is within the allowed range */ + if (mtu < RTE_ETHER_MIN_MTU || frame_size > dev_info.max_rx_pktlen) +@@ -1816,7 +1815,7 @@ eth_em_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) + rctl = E1000_READ_REG(hw, E1000_RCTL); + + /* switch to jumbo mode if needed */ +- if (frame_size > RTE_ETHER_MAX_LEN) { ++ if (frame_size > E1000_ETH_MAX_LEN) { + dev->data->dev_conf.rxmode.offloads |= + DEV_RX_OFFLOAD_JUMBO_FRAME; + rctl |= E1000_RCTL_LPE; +diff --git a/dpdk/drivers/net/e1000/igb_ethdev.c b/dpdk/drivers/net/e1000/igb_ethdev.c +index 647aa8d995..5bcc67d75f 100644 +--- a/dpdk/drivers/net/e1000/igb_ethdev.c ++++ b/dpdk/drivers/net/e1000/igb_ethdev.c +@@ -3064,6 +3064,7 @@ eth_igb_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) + uint32_t rx_buf_size; + uint32_t max_high_water; + uint32_t rctl; ++ uint32_t ctrl; + + hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + if (fc_conf->autoneg != hw->mac.autoneg) +@@ -3101,6 +3102,39 @@ eth_igb_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) + rctl &= ~E1000_RCTL_PMCF; + + E1000_WRITE_REG(hw, E1000_RCTL, rctl); ++ ++ /* ++ * check if we want to change flow control mode - driver doesn't have native ++ * capability to do that, so we'll write the registers ourselves ++ */ ++ ctrl = E1000_READ_REG(hw, E1000_CTRL); ++ ++ /* ++ * set or clear E1000_CTRL_RFCE and E1000_CTRL_TFCE bits depending ++ * on configuration ++ */ ++ switch (fc_conf->mode) { ++ case RTE_FC_NONE: ++ ctrl &= ~E1000_CTRL_RFCE & ~E1000_CTRL_TFCE; ++ break; ++ case RTE_FC_RX_PAUSE: ++ ctrl |= E1000_CTRL_RFCE; ++ ctrl &= ~E1000_CTRL_TFCE; ++ break; ++ case RTE_FC_TX_PAUSE: ++ ctrl |= E1000_CTRL_TFCE; ++ ctrl &= ~E1000_CTRL_RFCE; ++ break; ++ case RTE_FC_FULL: ++ ctrl |= E1000_CTRL_RFCE | E1000_CTRL_TFCE; ++ break; ++ default: ++ PMD_INIT_LOG(ERR, "invalid flow control mode"); ++ return -EINVAL; ++ } ++ ++ E1000_WRITE_REG(hw, E1000_CTRL, ctrl); ++ + E1000_WRITE_FLUSH(hw); + + return 0; +@@ -4369,7 +4403,7 @@ eth_igb_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) + rctl = E1000_READ_REG(hw, E1000_RCTL); + + /* switch to jumbo mode if needed */ +- if (frame_size > RTE_ETHER_MAX_LEN) { ++ if (frame_size > E1000_ETH_MAX_LEN) { + dev->data->dev_conf.rxmode.offloads |= + DEV_RX_OFFLOAD_JUMBO_FRAME; + rctl |= E1000_RCTL_LPE; +diff --git a/dpdk/drivers/net/ena/base/ena_eth_com.c b/dpdk/drivers/net/ena/base/ena_eth_com.c +index a35d92fbd3..5583a310a1 100644 +--- a/dpdk/drivers/net/ena/base/ena_eth_com.c ++++ b/dpdk/drivers/net/ena/base/ena_eth_com.c +@@ -531,6 +531,7 @@ int ena_com_rx_pkt(struct ena_com_io_cq *io_cq, + { + struct ena_com_rx_buf_info *ena_buf = &ena_rx_ctx->ena_bufs[0]; + struct ena_eth_io_rx_cdesc_base *cdesc = NULL; ++ u16 q_depth = io_cq->q_depth; + u16 cdesc_idx = 0; + u16 nb_hw_desc; + u16 i = 0; +@@ -559,6 +560,8 @@ int ena_com_rx_pkt(struct ena_com_io_cq *io_cq, + do { + ena_buf[i].len = cdesc->length; + ena_buf[i].req_id = cdesc->req_id; ++ if (unlikely(ena_buf[i].req_id >= q_depth)) ++ return ENA_COM_EIO; + + if (++i >= nb_hw_desc) + break; +diff --git a/dpdk/drivers/net/ena/base/ena_plat_dpdk.h b/dpdk/drivers/net/ena/base/ena_plat_dpdk.h +index 48c77f0c19..a1d749f83f 100644 +--- a/dpdk/drivers/net/ena/base/ena_plat_dpdk.h ++++ b/dpdk/drivers/net/ena/base/ena_plat_dpdk.h +@@ -51,6 +51,7 @@ typedef uint64_t dma_addr_t; + #define ENA_COM_FAULT -EFAULT + #define ENA_COM_TRY_AGAIN -EAGAIN + #define ENA_COM_UNSUPPORTED -EOPNOTSUPP ++#define ENA_COM_EIO -EIO + + #define ____cacheline_aligned __rte_cache_aligned + +diff --git a/dpdk/drivers/net/ena/ena_ethdev.c b/dpdk/drivers/net/ena/ena_ethdev.c +index 20ff3653c6..8baec80040 100644 +--- a/dpdk/drivers/net/ena/ena_ethdev.c ++++ b/dpdk/drivers/net/ena/ena_ethdev.c +@@ -28,7 +28,7 @@ + + #define DRV_MODULE_VER_MAJOR 2 + #define DRV_MODULE_VER_MINOR 2 +-#define DRV_MODULE_VER_SUBMINOR 0 ++#define DRV_MODULE_VER_SUBMINOR 1 + + #define ENA_IO_TXQ_IDX(q) (2 * (q)) + #define ENA_IO_RXQ_IDX(q) (2 * (q) + 1) +@@ -380,20 +380,6 @@ static inline void ena_tx_mbuf_prepare(struct rte_mbuf *mbuf, + } + } + +-static inline int validate_rx_req_id(struct ena_ring *rx_ring, uint16_t req_id) +-{ +- if (likely(req_id < rx_ring->ring_size)) +- return 0; +- +- PMD_DRV_LOG(ERR, "Invalid rx req_id: %hu\n", req_id); +- +- rx_ring->adapter->reset_reason = ENA_REGS_RESET_INV_RX_REQ_ID; +- rx_ring->adapter->trigger_reset = true; +- ++rx_ring->rx_stats.bad_req_id; +- +- return -EFAULT; +-} +- + static int validate_tx_req_id(struct ena_ring *tx_ring, u16 req_id) + { + struct ena_tx_buffer *tx_info = NULL; +@@ -1246,6 +1232,10 @@ static int ena_queue_start(struct ena_ring *ring) + PMD_INIT_LOG(ERR, "Failed to populate rx ring !"); + return ENA_COM_FAULT; + } ++ /* Flush per-core RX buffers pools cache as they can be used on other ++ * cores as well. ++ */ ++ rte_mempool_cache_flush(NULL, ring->mb_pool); + + return 0; + } +@@ -1292,6 +1282,7 @@ static int ena_tx_queue_setup(struct rte_eth_dev *dev, + txq->ring_size = nb_desc; + txq->size_mask = nb_desc - 1; + txq->numa_socket_id = socket_id; ++ txq->pkts_without_db = false; + + txq->tx_buffer_info = rte_zmalloc("txq->tx_buffer_info", + sizeof(struct ena_tx_buffer) * +@@ -1482,10 +1473,6 @@ static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count) + rte_prefetch0(mbufs[i + 4]); + + req_id = rxq->empty_rx_reqs[next_to_use]; +- rc = validate_rx_req_id(rxq, req_id); +- if (unlikely(rc)) +- break; +- + rx_info = &rxq->rx_buffer_info[req_id]; + + rc = ena_add_single_rx_desc(rxq->ena_com_io_sq, mbuf, req_id); +@@ -2110,8 +2097,6 @@ static struct rte_mbuf *ena_rx_mbuf(struct ena_ring *rx_ring, + + len = ena_bufs[buf].len; + req_id = ena_bufs[buf].req_id; +- if (unlikely(validate_rx_req_id(rx_ring, req_id))) +- return NULL; + + rx_info = &rx_ring->rx_buffer_info[req_id]; + +@@ -2135,10 +2120,6 @@ static struct rte_mbuf *ena_rx_mbuf(struct ena_ring *rx_ring, + ++buf; + len = ena_bufs[buf].len; + req_id = ena_bufs[buf].req_id; +- if (unlikely(validate_rx_req_id(rx_ring, req_id))) { +- rte_mbuf_raw_free(mbuf_head); +- return NULL; +- } + + rx_info = &rx_ring->rx_buffer_info[req_id]; + RTE_ASSERT(rx_info->mbuf != NULL); +@@ -2226,10 +2207,16 @@ static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, + &ena_rx_ctx); + if (unlikely(rc)) { + PMD_DRV_LOG(ERR, "ena_com_rx_pkt error %d\n", rc); +- rx_ring->adapter->reset_reason = +- ENA_REGS_RESET_TOO_MANY_RX_DESCS; ++ if (rc == ENA_COM_NO_SPACE) { ++ ++rx_ring->rx_stats.bad_desc_num; ++ rx_ring->adapter->reset_reason = ++ ENA_REGS_RESET_TOO_MANY_RX_DESCS; ++ } else { ++ ++rx_ring->rx_stats.bad_req_id; ++ rx_ring->adapter->reset_reason = ++ ENA_REGS_RESET_INV_RX_REQ_ID; ++ } + rx_ring->adapter->trigger_reset = true; +- ++rx_ring->rx_stats.bad_desc_num; + return 0; + } + +@@ -2373,8 +2360,8 @@ static void ena_update_hints(struct ena_adapter *adapter, + } + } + +-static int ena_check_and_linearize_mbuf(struct ena_ring *tx_ring, +- struct rte_mbuf *mbuf) ++static int ena_check_space_and_linearize_mbuf(struct ena_ring *tx_ring, ++ struct rte_mbuf *mbuf) + { + struct ena_com_dev *ena_dev; + int num_segments, header_len, rc; +@@ -2384,13 +2371,21 @@ static int ena_check_and_linearize_mbuf(struct ena_ring *tx_ring, + header_len = mbuf->data_len; + + if (likely(num_segments < tx_ring->sgl_size)) +- return 0; ++ goto checkspace; + + if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV && + (num_segments == tx_ring->sgl_size) && + (header_len < tx_ring->tx_max_header_size)) +- return 0; ++ goto checkspace; + ++ /* Checking for space for 2 additional metadata descriptors due to ++ * possible header split and metadata descriptor. Linearization will ++ * be needed so we reduce the segments number from num_segments to 1 ++ */ ++ if (!ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq, 3)) { ++ PMD_DRV_LOG(DEBUG, "Not enough space in the tx queue\n"); ++ return ENA_COM_NO_MEM; ++ } + ++tx_ring->tx_stats.linearize; + rc = rte_pktmbuf_linearize(mbuf); + if (unlikely(rc)) { +@@ -2400,7 +2395,19 @@ static int ena_check_and_linearize_mbuf(struct ena_ring *tx_ring, + return rc; + } + +- return rc; ++ return 0; ++ ++checkspace: ++ /* Checking for space for 2 additional metadata descriptors due to ++ * possible header split and metadata descriptor ++ */ ++ if (!ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq, ++ num_segments + 2)) { ++ PMD_DRV_LOG(DEBUG, "Not enough space in the tx queue\n"); ++ return ENA_COM_NO_MEM; ++ } ++ ++ return 0; + } + + static void ena_tx_map_mbuf(struct ena_ring *tx_ring, +@@ -2487,7 +2494,7 @@ static int ena_xmit_mbuf(struct ena_ring *tx_ring, struct rte_mbuf *mbuf) + int nb_hw_desc; + int rc; + +- rc = ena_check_and_linearize_mbuf(tx_ring, mbuf); ++ rc = ena_check_space_and_linearize_mbuf(tx_ring, mbuf); + if (unlikely(rc)) + return rc; + +@@ -2515,6 +2522,8 @@ static int ena_xmit_mbuf(struct ena_ring *tx_ring, struct rte_mbuf *mbuf) + "llq tx max burst size of queue %d achieved, writing doorbell to send burst\n", + tx_ring->id); + ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq); ++ tx_ring->tx_stats.doorbells++; ++ tx_ring->pkts_without_db = false; + } + + /* prepare the packet's descriptors to dma engine */ +@@ -2593,13 +2602,10 @@ static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, + return 0; + } + +- nb_pkts = RTE_MIN(ena_com_free_q_entries(tx_ring->ena_com_io_sq), +- nb_pkts); +- + for (sent_idx = 0; sent_idx < nb_pkts; sent_idx++) { + if (ena_xmit_mbuf(tx_ring, tx_pkts[sent_idx])) + break; +- ++ tx_ring->pkts_without_db = true; + rte_prefetch0(tx_pkts[ENA_IDX_ADD_MASKED(sent_idx, 4, + tx_ring->size_mask)]); + } +@@ -2608,10 +2614,11 @@ static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, + ena_com_free_q_entries(tx_ring->ena_com_io_sq); + + /* If there are ready packets to be xmitted... */ +- if (sent_idx > 0) { ++ if (likely(tx_ring->pkts_without_db)) { + /* ...let HW do its best :-) */ + ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq); + tx_ring->tx_stats.doorbells++; ++ tx_ring->pkts_without_db = false; + } + + ena_tx_cleanup(tx_ring); +diff --git a/dpdk/drivers/net/ena/ena_ethdev.h b/dpdk/drivers/net/ena/ena_ethdev.h +index 7bb74a1d06..ae235897ee 100644 +--- a/dpdk/drivers/net/ena/ena_ethdev.h ++++ b/dpdk/drivers/net/ena/ena_ethdev.h +@@ -100,6 +100,10 @@ struct ena_ring { + + enum ena_ring_type type; + enum ena_admin_placement_policy_type tx_mem_queue_type; ++ ++ /* Indicate there are Tx packets pushed to the device and wait for db */ ++ bool pkts_without_db; ++ + /* Holds the empty requests for TX/RX OOO completions */ + union { + uint16_t *empty_tx_reqs; +diff --git a/dpdk/drivers/net/enetc/enetc.h b/dpdk/drivers/net/enetc/enetc.h +index 14ef3bc18b..7163633bce 100644 +--- a/dpdk/drivers/net/enetc/enetc.h ++++ b/dpdk/drivers/net/enetc/enetc.h +@@ -29,6 +29,10 @@ + /* maximum frame size supported */ + #define ENETC_MAC_MAXFRM_SIZE 9600 + ++/* The max frame size with default MTU */ ++#define ENETC_ETH_MAX_LEN (RTE_ETHER_MTU + \ ++ RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN) ++ + /* + * upper_32_bits - return bits 32-63 of a number + * @n: the number we're accessing +diff --git a/dpdk/drivers/net/enetc/enetc_ethdev.c b/dpdk/drivers/net/enetc/enetc_ethdev.c +index 6ff3022874..4d2c9c0474 100644 +--- a/dpdk/drivers/net/enetc/enetc_ethdev.c ++++ b/dpdk/drivers/net/enetc/enetc_ethdev.c +@@ -677,7 +677,7 @@ enetc_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) + return -EINVAL; + } + +- if (frame_size > RTE_ETHER_MAX_LEN) ++ if (frame_size > ENETC_ETH_MAX_LEN) + dev->data->dev_conf.rxmode.offloads &= + DEV_RX_OFFLOAD_JUMBO_FRAME; + else +diff --git a/dpdk/drivers/net/enic/enic_flow.c b/dpdk/drivers/net/enic/enic_flow.c +index cebca7d55a..92b1c9eda6 100644 +--- a/dpdk/drivers/net/enic/enic_flow.c ++++ b/dpdk/drivers/net/enic/enic_flow.c +@@ -1389,7 +1389,7 @@ enic_dump_filter(const struct filter_v2 *filt) + + if (gp->mask_flags & FILTER_GENERIC_1_IPV6) + sprintf(ip6, "%s ", +- (gp->val_flags & FILTER_GENERIC_1_IPV4) ++ (gp->val_flags & FILTER_GENERIC_1_IPV6) + ? "ip6(y)" : "ip6(n)"); + else + sprintf(ip6, "%s ", "ip6(x)"); +@@ -1595,6 +1595,8 @@ enic_flow_parse(struct rte_eth_dev *dev, + return -rte_errno; + } + enic_filter->type = enic->flow_filter_mode; ++ if (enic->adv_filters) ++ enic_filter->type = FILTER_DPDK_1; + ret = enic_copy_filter(pattern, enic_filter_cap, enic, + enic_filter, error); + return ret; +diff --git a/dpdk/drivers/net/hinic/hinic_pmd_ethdev.c b/dpdk/drivers/net/hinic/hinic_pmd_ethdev.c +index 62642354cf..5a2c171099 100644 +--- a/dpdk/drivers/net/hinic/hinic_pmd_ethdev.c ++++ b/dpdk/drivers/net/hinic/hinic_pmd_ethdev.c +@@ -75,6 +75,9 @@ + #define HINIC_PKTLEN_TO_MTU(pktlen) \ + ((pktlen) - (ETH_HLEN + ETH_CRC_LEN)) + ++/* The max frame size with default MTU */ ++#define HINIC_ETH_MAX_LEN (RTE_ETHER_MTU + ETH_HLEN + ETH_CRC_LEN) ++ + /* lro numer limit for one packet */ + #define HINIC_LRO_WQE_NUM_DEFAULT 8 + +@@ -1556,7 +1559,7 @@ static int hinic_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) + + /* update max frame size */ + frame_size = HINIC_MTU_TO_PKTLEN(mtu); +- if (frame_size > RTE_ETHER_MAX_LEN) ++ if (frame_size > HINIC_ETH_MAX_LEN) + dev->data->dev_conf.rxmode.offloads |= + DEV_RX_OFFLOAD_JUMBO_FRAME; + else +diff --git a/dpdk/drivers/net/hinic/hinic_pmd_rx.c b/dpdk/drivers/net/hinic/hinic_pmd_rx.c +index a49769a863..842399cc4c 100644 +--- a/dpdk/drivers/net/hinic/hinic_pmd_rx.c ++++ b/dpdk/drivers/net/hinic/hinic_pmd_rx.c +@@ -4,7 +4,7 @@ + + #include + #include +-#ifdef __ARM64_NEON__ ++#ifdef RTE_ARCH_ARM64 + #include + #endif + +@@ -762,7 +762,7 @@ void hinic_free_all_rx_mbufs(struct hinic_rxq *rxq) + static inline void hinic_rq_cqe_be_to_cpu32(void *dst_le32, + volatile void *src_be32) + { +-#if defined(__X86_64_SSE__) ++#if defined(RTE_ARCH_X86_64) + volatile __m128i *wqe_be = (volatile __m128i *)src_be32; + __m128i *wqe_le = (__m128i *)dst_le32; + __m128i shuf_mask = _mm_set_epi8(12, 13, 14, 15, 8, 9, 10, +@@ -770,7 +770,7 @@ static inline void hinic_rq_cqe_be_to_cpu32(void *dst_le32, + + /* l2nic just use first 128 bits */ + wqe_le[0] = _mm_shuffle_epi8(wqe_be[0], shuf_mask); +-#elif defined(__ARM64_NEON__) ++#elif defined(RTE_ARCH_ARM64) + volatile uint8x16_t *wqe_be = (volatile uint8x16_t *)src_be32; + uint8x16_t *wqe_le = (uint8x16_t *)dst_le32; + const uint8x16_t shuf_mask = {3, 2, 1, 0, 7, 6, 5, 4, 11, 10, +diff --git a/dpdk/drivers/net/hinic/hinic_pmd_tx.c b/dpdk/drivers/net/hinic/hinic_pmd_tx.c +index 9d0264e67a..669f82389c 100644 +--- a/dpdk/drivers/net/hinic/hinic_pmd_tx.c ++++ b/dpdk/drivers/net/hinic/hinic_pmd_tx.c +@@ -7,7 +7,7 @@ + #include + #include + #include +-#ifdef __ARM64_NEON__ ++#ifdef RTE_ARCH_ARM64 + #include + #endif + +@@ -203,7 +203,7 @@ + + static inline void hinic_sq_wqe_cpu_to_be32(void *data, int nr_wqebb) + { +-#if defined(__X86_64_SSE__) ++#if defined(RTE_ARCH_X86_64) + int i; + __m128i *wqe_line = (__m128i *)data; + __m128i shuf_mask = _mm_set_epi8(12, 13, 14, 15, 8, 9, 10, +@@ -217,7 +217,7 @@ static inline void hinic_sq_wqe_cpu_to_be32(void *data, int nr_wqebb) + wqe_line[3] = _mm_shuffle_epi8(wqe_line[3], shuf_mask); + wqe_line += 4; + } +-#elif defined(__ARM64_NEON__) ++#elif defined(RTE_ARCH_ARM64) + int i; + uint8x16_t *wqe_line = (uint8x16_t *)data; + const uint8x16_t shuf_mask = {3, 2, 1, 0, 7, 6, 5, 4, 11, 10, +@@ -237,7 +237,7 @@ static inline void hinic_sq_wqe_cpu_to_be32(void *data, int nr_wqebb) + + static inline void hinic_sge_cpu_to_be32(void *data, int nr_sge) + { +-#if defined(__X86_64_SSE__) ++#if defined(RTE_ARCH_X86_64) + int i; + __m128i *sge_line = (__m128i *)data; + __m128i shuf_mask = _mm_set_epi8(12, 13, 14, 15, 8, 9, 10, +@@ -248,7 +248,7 @@ static inline void hinic_sge_cpu_to_be32(void *data, int nr_sge) + *sge_line = _mm_shuffle_epi8(*sge_line, shuf_mask); + sge_line++; + } +-#elif defined(__ARM64_NEON__) ++#elif defined(RTE_ARCH_ARM64) + int i; + uint8x16_t *sge_line = (uint8x16_t *)data; + const uint8x16_t shuf_mask = {3, 2, 1, 0, 7, 6, 5, 4, 11, 10, +diff --git a/dpdk/drivers/net/hns3/hns3_cmd.c b/dpdk/drivers/net/hns3/hns3_cmd.c +index f58f4f7adc..76d16a5a92 100644 +--- a/dpdk/drivers/net/hns3/hns3_cmd.c ++++ b/dpdk/drivers/net/hns3/hns3_cmd.c +@@ -432,6 +432,16 @@ static void hns3_parse_capability(struct hns3_hw *hw, + hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_STASH_B, 1); + } + ++static uint32_t ++hns3_build_api_caps(void) ++{ ++ uint32_t api_caps = 0; ++ ++ hns3_set_bit(api_caps, HNS3_API_CAP_FLEX_RSS_TBL_B, 1); ++ ++ return rte_cpu_to_le_32(api_caps); ++} ++ + static enum hns3_cmd_status + hns3_cmd_query_firmware_version_and_capability(struct hns3_hw *hw) + { +@@ -441,6 +451,7 @@ hns3_cmd_query_firmware_version_and_capability(struct hns3_hw *hw) + + hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_FW_VER, 1); + resp = (struct hns3_query_version_cmd *)desc.data; ++ resp->api_caps = hns3_build_api_caps(); + + /* Initialize the cmd function */ + ret = hns3_cmd_send(hw, &desc, 1); +@@ -572,9 +583,21 @@ hns3_cmd_destroy_queue(struct hns3_hw *hw) + void + hns3_cmd_uninit(struct hns3_hw *hw) + { ++ rte_atomic16_set(&hw->reset.disable_cmd, 1); ++ ++ /* ++ * A delay is added to ensure that the register cleanup operations ++ * will not be performed concurrently with the firmware command and ++ * ensure that all the reserved commands are executed. ++ * Concurrency may occur in two scenarios: asynchronous command and ++ * timeout command. If the command fails to be executed due to busy ++ * scheduling, the command will be processed in the next scheduling ++ * of the firmware. ++ */ ++ rte_delay_ms(HNS3_CMDQ_CLEAR_WAIT_TIME); ++ + rte_spinlock_lock(&hw->cmq.csq.lock); + rte_spinlock_lock(&hw->cmq.crq.lock); +- rte_atomic16_set(&hw->reset.disable_cmd, 1); + hns3_cmd_clear_regs(hw); + rte_spinlock_unlock(&hw->cmq.crq.lock); + rte_spinlock_unlock(&hw->cmq.csq.lock); +diff --git a/dpdk/drivers/net/hns3/hns3_cmd.h b/dpdk/drivers/net/hns3/hns3_cmd.h +index 194c3a731b..20c373590f 100644 +--- a/dpdk/drivers/net/hns3/hns3_cmd.h ++++ b/dpdk/drivers/net/hns3/hns3_cmd.h +@@ -8,6 +8,7 @@ + #include + + #define HNS3_CMDQ_TX_TIMEOUT 30000 ++#define HNS3_CMDQ_CLEAR_WAIT_TIME 200 + #define HNS3_CMDQ_RX_INVLD_B 0 + #define HNS3_CMDQ_RX_OUTVLD_B 1 + #define HNS3_CMD_DESC_ALIGNMENT 4096 +@@ -203,7 +204,10 @@ enum hns3_opcode_type { + HNS3_OPC_FD_COUNTER_OP = 0x1205, + + /* Clear hardware state command */ +- HNS3_OPC_CLEAR_HW_STATE = 0x700A, ++ HNS3_OPC_CLEAR_HW_STATE = 0x700B, ++ ++ /* Firmware stats command */ ++ HNS3_OPC_FIRMWARE_COMPAT_CFG = 0x701A, + + /* SFP command */ + HNS3_OPC_SFP_GET_SPEED = 0x7104, +@@ -291,11 +295,16 @@ enum HNS3_CAPS_BITS { + HNS3_CAPS_HW_PAD_B, + HNS3_CAPS_STASH_B, + }; ++ ++enum HNS3_API_CAP_BITS { ++ HNS3_API_CAP_FLEX_RSS_TBL_B, ++}; ++ + #define HNS3_QUERY_CAP_LENGTH 3 + struct hns3_query_version_cmd { + uint32_t firmware; + uint32_t hardware; +- uint32_t rsv; ++ uint32_t api_caps; + uint32_t caps[HNS3_QUERY_CAP_LENGTH]; /* capabilities of device */ + }; + +@@ -632,6 +641,13 @@ enum hns3_promisc_type { + HNS3_BROADCAST = 3, + }; + ++#define HNS3_LINK_EVENT_REPORT_EN_B 0 ++#define HNS3_NCSI_ERROR_REPORT_EN_B 1 ++struct hns3_firmware_compat_cmd { ++ uint32_t compat; ++ uint8_t rsv[20]; ++}; ++ + #define HNS3_MAC_TX_EN_B 6 + #define HNS3_MAC_RX_EN_B 7 + #define HNS3_MAC_PAD_TX_B 11 +@@ -775,12 +791,16 @@ enum hns3_int_gl_idx { + #define HNS3_TQP_ID_M GENMASK(12, 2) + #define HNS3_INT_GL_IDX_S 13 + #define HNS3_INT_GL_IDX_M GENMASK(14, 13) ++#define HNS3_TQP_INT_ID_L_S 0 ++#define HNS3_TQP_INT_ID_L_M GENMASK(7, 0) ++#define HNS3_TQP_INT_ID_H_S 8 ++#define HNS3_TQP_INT_ID_H_M GENMASK(15, 8) + struct hns3_ctrl_vector_chain_cmd { +- uint8_t int_vector_id; ++ uint8_t int_vector_id; /* the low order of the interrupt id */ + uint8_t int_cause_num; + uint16_t tqp_type_and_id[HNS3_VECTOR_ELEMENTS_PER_CMD]; + uint8_t vfid; +- uint8_t rsv; ++ uint8_t int_vector_id_h; /* the high order of the interrupt id */ + }; + + struct hns3_config_max_frm_size_cmd { +diff --git a/dpdk/drivers/net/hns3/hns3_dcb.c b/dpdk/drivers/net/hns3/hns3_dcb.c +index fb501795f0..ab77acd948 100644 +--- a/dpdk/drivers/net/hns3/hns3_dcb.c ++++ b/dpdk/drivers/net/hns3/hns3_dcb.c +@@ -634,7 +634,7 @@ hns3_set_rss_size(struct hns3_hw *hw, uint16_t nb_rx_q) + * stage of the reset process. + */ + if (rte_atomic16_read(&hw->reset.resetting) == 0) { +- for (i = 0; i < HNS3_RSS_IND_TBL_SIZE; i++) ++ for (i = 0; i < hw->rss_ind_tbl_size; i++) + rss_cfg->rss_indirection_tbl[i] = + i % hw->alloc_rss_size; + } +diff --git a/dpdk/drivers/net/hns3/hns3_ethdev.c b/dpdk/drivers/net/hns3/hns3_ethdev.c +index 2011378879..ba7d6e38a2 100644 +--- a/dpdk/drivers/net/hns3/hns3_ethdev.c ++++ b/dpdk/drivers/net/hns3/hns3_ethdev.c +@@ -93,14 +93,14 @@ static enum hns3_reset_level hns3_get_reset_level(struct hns3_adapter *hns, + static int hns3_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); + static int hns3_vlan_pvid_configure(struct hns3_adapter *hns, uint16_t pvid, + int on); +-static int hns3_update_speed_duplex(struct rte_eth_dev *eth_dev); ++static int hns3_update_link_info(struct rte_eth_dev *eth_dev); + + static int hns3_add_mc_addr(struct hns3_hw *hw, + struct rte_ether_addr *mac_addr); + static int hns3_remove_mc_addr(struct hns3_hw *hw, + struct rte_ether_addr *mac_addr); + static int hns3_restore_fec(struct hns3_hw *hw); +-static int hns3_query_dev_fec_info(struct rte_eth_dev *dev); ++static int hns3_query_dev_fec_info(struct hns3_hw *hw); + + static void + hns3_pf_disable_irq0(struct hns3_hw *hw) +@@ -2203,7 +2203,7 @@ hns3_check_dcb_cfg(struct rte_eth_dev *dev) + } + + static int +-hns3_bind_ring_with_vector(struct hns3_hw *hw, uint8_t vector_id, bool mmap, ++hns3_bind_ring_with_vector(struct hns3_hw *hw, uint16_t vector_id, bool en, + enum hns3_ring_type queue_type, uint16_t queue_id) + { + struct hns3_cmd_desc desc; +@@ -2212,13 +2212,15 @@ hns3_bind_ring_with_vector(struct hns3_hw *hw, uint8_t vector_id, bool mmap, + enum hns3_cmd_status status; + enum hns3_opcode_type op; + uint16_t tqp_type_and_id = 0; +- const char *op_str; + uint16_t type; + uint16_t gl; + +- op = mmap ? HNS3_OPC_ADD_RING_TO_VECTOR : HNS3_OPC_DEL_RING_TO_VECTOR; ++ op = en ? HNS3_OPC_ADD_RING_TO_VECTOR : HNS3_OPC_DEL_RING_TO_VECTOR; + hns3_cmd_setup_basic_desc(&desc, op, false); +- req->int_vector_id = vector_id; ++ req->int_vector_id = hns3_get_field(vector_id, HNS3_TQP_INT_ID_L_M, ++ HNS3_TQP_INT_ID_L_S); ++ req->int_vector_id_h = hns3_get_field(vector_id, HNS3_TQP_INT_ID_H_M, ++ HNS3_TQP_INT_ID_H_S); + + if (queue_type == HNS3_RING_TYPE_RX) + gl = HNS3_RING_GL_RX; +@@ -2234,11 +2236,10 @@ hns3_bind_ring_with_vector(struct hns3_hw *hw, uint8_t vector_id, bool mmap, + gl); + req->tqp_type_and_id[0] = rte_cpu_to_le_16(tqp_type_and_id); + req->int_cause_num = 1; +- op_str = mmap ? "Map" : "Unmap"; + status = hns3_cmd_send(hw, &desc, 1); + if (status) { + hns3_err(hw, "%s TQP %u fail, vector_id is %u, status is %d.", +- op_str, queue_id, req->int_vector_id, status); ++ en ? "Map" : "Unmap", queue_id, vector_id, status); + return status; + } + +@@ -2312,6 +2313,7 @@ hns3_dev_configure(struct rte_eth_dev *dev) + uint16_t nb_rx_q = dev->data->nb_rx_queues; + uint16_t nb_tx_q = dev->data->nb_tx_queues; + struct rte_eth_rss_conf rss_conf; ++ uint32_t max_rx_pkt_len; + uint16_t mtu; + bool gro_en; + int ret; +@@ -2370,12 +2372,18 @@ hns3_dev_configure(struct rte_eth_dev *dev) + * according to the maximum RX packet length. + */ + if (conf->rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) { +- /* +- * Security of max_rx_pkt_len is guaranteed in dpdk frame. +- * Maximum value of max_rx_pkt_len is HNS3_MAX_FRAME_LEN, so it +- * can safely assign to "uint16_t" type variable. +- */ +- mtu = (uint16_t)HNS3_PKTLEN_TO_MTU(conf->rxmode.max_rx_pkt_len); ++ max_rx_pkt_len = conf->rxmode.max_rx_pkt_len; ++ if (max_rx_pkt_len > HNS3_MAX_FRAME_LEN || ++ max_rx_pkt_len <= HNS3_DEFAULT_FRAME_LEN) { ++ hns3_err(hw, "maximum Rx packet length must be greater " ++ "than %u and less than %u when jumbo frame enabled.", ++ (uint16_t)HNS3_DEFAULT_FRAME_LEN, ++ (uint16_t)HNS3_MAX_FRAME_LEN); ++ ret = -EINVAL; ++ goto cfg_err; ++ } ++ ++ mtu = (uint16_t)HNS3_PKTLEN_TO_MTU(max_rx_pkt_len); + ret = hns3_dev_mtu_set(dev, mtu); + if (ret) + goto cfg_err; +@@ -2458,7 +2466,7 @@ hns3_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) + } + + rte_spinlock_lock(&hw->lock); +- is_jumbo_frame = frame_size > RTE_ETHER_MAX_LEN ? true : false; ++ is_jumbo_frame = frame_size > HNS3_DEFAULT_FRAME_LEN ? true : false; + frame_size = RTE_MAX(frame_size, HNS3_DEFAULT_FRAME_LEN); + + /* +@@ -2567,7 +2575,7 @@ hns3_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info) + + info->vmdq_queue_num = 0; + +- info->reta_size = HNS3_RSS_IND_TBL_SIZE; ++ info->reta_size = hw->rss_ind_tbl_size; + info->hash_key_size = HNS3_RSS_KEY_SIZE; + info->flow_type_rss_offloads = HNS3_ETH_RSS_SUPPORT; + +@@ -2616,8 +2624,8 @@ hns3_dev_link_update(struct rte_eth_dev *eth_dev, + struct rte_eth_link new_link; + + if (!hns3_is_reset_pending(hns)) { +- hns3_update_speed_duplex(eth_dev); + hns3_update_link_status(hw); ++ hns3_update_link_info(eth_dev); + } + + memset(&new_link, 0, sizeof(new_link)); +@@ -2957,6 +2965,20 @@ hns3_parse_dev_specifications(struct hns3_hw *hw, struct hns3_cmd_desc *desc) + hw->intr.int_ql_max = rte_le_to_cpu_16(req0->intr_ql_max); + } + ++static int ++hns3_check_dev_specifications(struct hns3_hw *hw) ++{ ++ if (hw->rss_ind_tbl_size == 0 || ++ hw->rss_ind_tbl_size > HNS3_RSS_IND_TBL_SIZE_MAX) { ++ hns3_err(hw, "the size of hash lookup table configured (%u)" ++ " exceeds the maximum(%u)", hw->rss_ind_tbl_size, ++ HNS3_RSS_IND_TBL_SIZE_MAX); ++ return -EINVAL; ++ } ++ ++ return 0; ++} ++ + static int + hns3_query_dev_specifications(struct hns3_hw *hw) + { +@@ -2977,7 +2999,7 @@ hns3_query_dev_specifications(struct hns3_hw *hw) + + hns3_parse_dev_specifications(hw, desc); + +- return 0; ++ return hns3_check_dev_specifications(hw); + } + + static int +@@ -3001,13 +3023,6 @@ hns3_get_capability(struct hns3_hw *hw) + device_id == HNS3_DEV_ID_200G_RDMA) + hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_DCB_B, 1); + +- ret = hns3_query_dev_fec_info(eth_dev); +- if (ret) { +- PMD_INIT_LOG(ERR, +- "failed to query FEC information, ret = %d", ret); +- return ret; +- } +- + /* Get PCI revision id */ + ret = rte_pci_read_config(pci_dev, &revision, HNS3_PCI_REVISION_ID_LEN, + HNS3_PCI_REVISION_ID); +@@ -3139,8 +3154,15 @@ hns3_get_configuration(struct hns3_hw *hw) + } + + ret = hns3_get_board_configuration(hw); +- if (ret) ++ if (ret) { + PMD_INIT_LOG(ERR, "failed to get board configuration: %d", ret); ++ return ret; ++ } ++ ++ ret = hns3_query_dev_fec_info(hw); ++ if (ret) ++ PMD_INIT_LOG(ERR, ++ "failed to query FEC information, ret = %d", ret); + + return ret; + } +@@ -3892,6 +3914,26 @@ hns3_buffer_alloc(struct hns3_hw *hw) + return ret; + } + ++static int ++hns3_firmware_compat_config(struct hns3_hw *hw, bool is_init) ++{ ++ struct hns3_firmware_compat_cmd *req; ++ struct hns3_cmd_desc desc; ++ uint32_t compat = 0; ++ ++ hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_FIRMWARE_COMPAT_CFG, false); ++ req = (struct hns3_firmware_compat_cmd *)desc.data; ++ ++ if (is_init) { ++ hns3_set_bit(compat, HNS3_LINK_EVENT_REPORT_EN_B, 1); ++ hns3_set_bit(compat, HNS3_NCSI_ERROR_REPORT_EN_B, 0); ++ } ++ ++ req->compat = rte_cpu_to_le_32(compat); ++ ++ return hns3_cmd_send(hw, &desc, 1); ++} ++ + static int + hns3_mac_init(struct hns3_hw *hw) + { +@@ -4342,10 +4384,9 @@ hns3_cfg_mac_speed_dup(struct hns3_hw *hw, uint32_t speed, uint8_t duplex) + } + + static int +-hns3_update_speed_duplex(struct rte_eth_dev *eth_dev) ++hns3_update_fiber_link_info(struct hns3_hw *hw) + { +- struct hns3_adapter *hns = eth_dev->data->dev_private; +- struct hns3_hw *hw = &hns->hw; ++ struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); + struct hns3_pf *pf = &hns->pf; + uint32_t speed; + int ret; +@@ -4368,6 +4409,21 @@ hns3_update_speed_duplex(struct rte_eth_dev *eth_dev) + return hns3_cfg_mac_speed_dup(hw, speed, ETH_LINK_FULL_DUPLEX); + } + ++static int ++hns3_update_link_info(struct rte_eth_dev *eth_dev) ++{ ++ struct hns3_adapter *hns = eth_dev->data->dev_private; ++ struct hns3_hw *hw = &hns->hw; ++ int ret = 0; ++ ++ if (hw->mac.media_type == HNS3_MEDIA_TYPE_COPPER) ++ return 0; ++ else if (hw->mac.media_type == HNS3_MEDIA_TYPE_FIBER) ++ ret = hns3_update_fiber_link_info(hw); ++ ++ return ret; ++} ++ + static int + hns3_cfg_mac_mode(struct hns3_hw *hw, bool enable) + { +@@ -4454,8 +4510,8 @@ hns3_service_handler(void *param) + struct hns3_hw *hw = &hns->hw; + + if (!hns3_is_reset_pending(hns)) { +- hns3_update_speed_duplex(eth_dev); + hns3_update_link_status(hw); ++ hns3_update_link_info(eth_dev); + } else + hns3_warn(hw, "Cancel the query when reset is pending"); + +@@ -4541,6 +4597,15 @@ hns3_init_hardware(struct hns3_adapter *hns) + goto err_mac_init; + } + ++ /* ++ * Requiring firmware to enable some features, driver can ++ * still work without it. ++ */ ++ ret = hns3_firmware_compat_config(hw, true); ++ if (ret) ++ PMD_INIT_LOG(WARNING, "firmware compatible features not " ++ "supported, ret = %d.", ret); ++ + return 0; + + err_mac_init: +@@ -4675,6 +4740,7 @@ hns3_init_pf(struct rte_eth_dev *eth_dev) + err_enable_intr: + hns3_fdir_filter_uninit(hns); + err_fdir: ++ (void)hns3_firmware_compat_config(hw, false); + hns3_uninit_umv_space(hw); + err_init_hw: + hns3_tqp_stats_uninit(hw); +@@ -4708,6 +4774,7 @@ hns3_uninit_pf(struct rte_eth_dev *eth_dev) + (void)hns3_config_gro(hw, false); + hns3_promisc_uninit(hw); + hns3_fdir_filter_uninit(hns); ++ (void)hns3_firmware_compat_config(hw, false); + hns3_uninit_umv_space(hw); + hns3_tqp_stats_uninit(hw); + hns3_pf_disable_irq0(hw); +@@ -4761,8 +4828,8 @@ hns3_map_rx_interrupt(struct rte_eth_dev *dev) + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); +- uint8_t base = RTE_INTR_VEC_ZERO_OFFSET; +- uint8_t vec = RTE_INTR_VEC_ZERO_OFFSET; ++ uint16_t base = RTE_INTR_VEC_ZERO_OFFSET; ++ uint16_t vec = RTE_INTR_VEC_ZERO_OFFSET; + uint32_t intr_vector; + uint16_t q_id; + int ret; +@@ -5788,6 +5855,16 @@ get_current_fec_auto_state(struct hns3_hw *hw, uint8_t *state) + struct hns3_cmd_desc desc; + int ret; + ++ /* ++ * CMD(HNS3_OPC_CONFIG_FEC_MODE) read is not supported ++ * in device of link speed ++ * below 10 Gbps. ++ */ ++ if (hw->mac.link_speed < ETH_SPEED_NUM_10G) { ++ *state = 0; ++ return 0; ++ } ++ + hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CONFIG_FEC_MODE, true); + req = (struct hns3_config_fec_cmd *)desc.data; + ret = hns3_cmd_send(hw, &desc, 1); +@@ -5802,10 +5879,9 @@ get_current_fec_auto_state(struct hns3_hw *hw, uint8_t *state) + } + + static int +-hns3_fec_get(struct rte_eth_dev *dev, uint32_t *fec_capa) ++hns3_fec_get_internal(struct hns3_hw *hw, uint32_t *fec_capa) + { + #define QUERY_ACTIVE_SPEED 1 +- struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct hns3_sfp_speed_cmd *resp; + uint32_t tmp_fec_capa; + uint8_t auto_state; +@@ -5865,6 +5941,14 @@ hns3_fec_get(struct rte_eth_dev *dev, uint32_t *fec_capa) + return 0; + } + ++static int ++hns3_fec_get(struct rte_eth_dev *dev, uint32_t *fec_capa) ++{ ++ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); ++ ++ return hns3_fec_get_internal(hw, fec_capa); ++} ++ + static int + hns3_set_fec_hw(struct hns3_hw *hw, uint32_t mode) + { +@@ -5994,14 +6078,13 @@ hns3_restore_fec(struct hns3_hw *hw) + } + + static int +-hns3_query_dev_fec_info(struct rte_eth_dev *dev) ++hns3_query_dev_fec_info(struct hns3_hw *hw) + { +- struct hns3_adapter *hns = dev->data->dev_private; +- struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(hns); +- struct hns3_pf *pf = &hns->pf; ++ struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); ++ struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(hns); + int ret; + +- ret = hns3_fec_get(dev, &pf->fec_mode); ++ ret = hns3_fec_get_internal(hw, &pf->fec_mode); + if (ret) + hns3_err(hw, "query device FEC info failed, ret = %d", ret); + +@@ -6210,8 +6293,11 @@ hns3_dev_uninit(struct rte_eth_dev *eth_dev) + + PMD_INIT_FUNC_TRACE(); + +- if (rte_eal_process_type() != RTE_PROC_PRIMARY) +- return -EPERM; ++ if (rte_eal_process_type() != RTE_PROC_PRIMARY) { ++ rte_free(eth_dev->process_private); ++ eth_dev->process_private = NULL; ++ return 0; ++ } + + if (hw->adapter_state < HNS3_NIC_CLOSING) + hns3_dev_close(eth_dev); +diff --git a/dpdk/drivers/net/hns3/hns3_ethdev_vf.c b/dpdk/drivers/net/hns3/hns3_ethdev_vf.c +index 0366b9d4dc..9c84740d7b 100644 +--- a/dpdk/drivers/net/hns3/hns3_ethdev_vf.c ++++ b/dpdk/drivers/net/hns3/hns3_ethdev_vf.c +@@ -779,6 +779,7 @@ hns3vf_dev_configure(struct rte_eth_dev *dev) + uint16_t nb_rx_q = dev->data->nb_rx_queues; + uint16_t nb_tx_q = dev->data->nb_tx_queues; + struct rte_eth_rss_conf rss_conf; ++ uint32_t max_rx_pkt_len; + uint16_t mtu; + bool gro_en; + int ret; +@@ -831,12 +832,18 @@ hns3vf_dev_configure(struct rte_eth_dev *dev) + * according to the maximum RX packet length. + */ + if (conf->rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) { +- /* +- * Security of max_rx_pkt_len is guaranteed in dpdk frame. +- * Maximum value of max_rx_pkt_len is HNS3_MAX_FRAME_LEN, so it +- * can safely assign to "uint16_t" type variable. +- */ +- mtu = (uint16_t)HNS3_PKTLEN_TO_MTU(conf->rxmode.max_rx_pkt_len); ++ max_rx_pkt_len = conf->rxmode.max_rx_pkt_len; ++ if (max_rx_pkt_len > HNS3_MAX_FRAME_LEN || ++ max_rx_pkt_len <= HNS3_DEFAULT_FRAME_LEN) { ++ hns3_err(hw, "maximum Rx packet length must be greater " ++ "than %u and less than %u when jumbo frame enabled.", ++ (uint16_t)HNS3_DEFAULT_FRAME_LEN, ++ (uint16_t)HNS3_MAX_FRAME_LEN); ++ ret = -EINVAL; ++ goto cfg_err; ++ } ++ ++ mtu = (uint16_t)HNS3_PKTLEN_TO_MTU(max_rx_pkt_len); + ret = hns3vf_dev_mtu_set(dev, mtu); + if (ret) + goto cfg_err; +@@ -928,7 +935,7 @@ hns3vf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) + rte_spinlock_unlock(&hw->lock); + return ret; + } +- if (frame_size > RTE_ETHER_MAX_LEN) ++ if (mtu > RTE_ETHER_MTU) + dev->data->dev_conf.rxmode.offloads |= + DEV_RX_OFFLOAD_JUMBO_FRAME; + else +@@ -1022,7 +1029,7 @@ hns3vf_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info) + + info->vmdq_queue_num = 0; + +- info->reta_size = HNS3_RSS_IND_TBL_SIZE; ++ info->reta_size = hw->rss_ind_tbl_size; + info->hash_key_size = HNS3_RSS_KEY_SIZE; + info->flow_type_rss_offloads = HNS3_ETH_RSS_SUPPORT; + info->default_rxportconf.ring_size = HNS3_DEFAULT_RING_DESC; +@@ -1154,6 +1161,20 @@ hns3vf_parse_dev_specifications(struct hns3_hw *hw, struct hns3_cmd_desc *desc) + hw->intr.int_ql_max = rte_le_to_cpu_16(req0->intr_ql_max); + } + ++static int ++hns3vf_check_dev_specifications(struct hns3_hw *hw) ++{ ++ if (hw->rss_ind_tbl_size == 0 || ++ hw->rss_ind_tbl_size > HNS3_RSS_IND_TBL_SIZE_MAX) { ++ hns3_warn(hw, "the size of hash lookup table configured (%u)" ++ " exceeds the maximum(%u)", hw->rss_ind_tbl_size, ++ HNS3_RSS_IND_TBL_SIZE_MAX); ++ return -EINVAL; ++ } ++ ++ return 0; ++} ++ + static int + hns3vf_query_dev_specifications(struct hns3_hw *hw) + { +@@ -1174,7 +1195,7 @@ hns3vf_query_dev_specifications(struct hns3_hw *hw) + + hns3vf_parse_dev_specifications(hw, desc); + +- return 0; ++ return hns3vf_check_dev_specifications(hw); + } + + static int +@@ -1749,7 +1770,6 @@ hns3vf_init_hardware(struct hns3_adapter *hns) + goto err_init_hardware; + } + +- hns3vf_request_link_info(hw); + return 0; + + err_init_hardware: +@@ -1978,8 +1998,11 @@ hns3vf_dev_close(struct rte_eth_dev *eth_dev) + struct hns3_hw *hw = &hns->hw; + int ret = 0; + +- if (rte_eal_process_type() != RTE_PROC_PRIMARY) ++ if (rte_eal_process_type() != RTE_PROC_PRIMARY) { ++ rte_free(eth_dev->process_private); ++ eth_dev->process_private = NULL; + return 0; ++ } + + if (hw->adapter_state == HNS3_NIC_STARTED) + ret = hns3vf_dev_stop(eth_dev); +@@ -2238,7 +2261,7 @@ hns3vf_dev_start(struct rte_eth_dev *dev) + hns3_rx_scattered_calc(dev); + hns3_set_rxtx_function(dev); + hns3_mp_req_start_rxtx(dev); +- rte_eal_alarm_set(HNS3VF_SERVICE_INTERVAL, hns3vf_service_handler, dev); ++ hns3vf_service_handler(dev); + + hns3vf_restore_filter(dev); + +@@ -2360,15 +2383,17 @@ static int + hns3vf_prepare_reset(struct hns3_adapter *hns) + { + struct hns3_hw *hw = &hns->hw; +- int ret = 0; ++ int ret; + + if (hw->reset.level == HNS3_VF_FUNC_RESET) { + ret = hns3_send_mbx_msg(hw, HNS3_MBX_RESET, 0, NULL, + 0, true, NULL, 0); ++ if (ret) ++ return ret; + } + rte_atomic16_set(&hw->reset.disable_cmd, 1); + +- return ret; ++ return 0; + } + + static int +@@ -2849,8 +2874,11 @@ hns3vf_dev_uninit(struct rte_eth_dev *eth_dev) + + PMD_INIT_FUNC_TRACE(); + +- if (rte_eal_process_type() != RTE_PROC_PRIMARY) +- return -EPERM; ++ if (rte_eal_process_type() != RTE_PROC_PRIMARY) { ++ rte_free(eth_dev->process_private); ++ eth_dev->process_private = NULL; ++ return 0; ++ } + + if (hw->adapter_state < HNS3_NIC_CLOSING) + hns3vf_dev_close(eth_dev); +diff --git a/dpdk/drivers/net/hns3/hns3_flow.c b/dpdk/drivers/net/hns3/hns3_flow.c +index ee6ec15498..8e4519a425 100644 +--- a/dpdk/drivers/net/hns3/hns3_flow.c ++++ b/dpdk/drivers/net/hns3/hns3_flow.c +@@ -44,8 +44,7 @@ static enum rte_flow_item_type first_items[] = { + RTE_FLOW_ITEM_TYPE_NVGRE, + RTE_FLOW_ITEM_TYPE_VXLAN, + RTE_FLOW_ITEM_TYPE_GENEVE, +- RTE_FLOW_ITEM_TYPE_VXLAN_GPE, +- RTE_FLOW_ITEM_TYPE_MPLS ++ RTE_FLOW_ITEM_TYPE_VXLAN_GPE + }; + + static enum rte_flow_item_type L2_next_items[] = { +@@ -65,8 +64,7 @@ static enum rte_flow_item_type L3_next_items[] = { + static enum rte_flow_item_type L4_next_items[] = { + RTE_FLOW_ITEM_TYPE_VXLAN, + RTE_FLOW_ITEM_TYPE_GENEVE, +- RTE_FLOW_ITEM_TYPE_VXLAN_GPE, +- RTE_FLOW_ITEM_TYPE_MPLS ++ RTE_FLOW_ITEM_TYPE_VXLAN_GPE + }; + + static enum rte_flow_item_type tunnel_next_items[] = { +@@ -91,9 +89,9 @@ net_addr_to_host(uint32_t *dst, const rte_be32_t *src, size_t len) + /* + * This function is used to find rss general action. + * 1. As we know RSS is used to spread packets among several queues, the flow +- * API provide the struct rte_flow_action_rss, user could config it's field ++ * API provide the struct rte_flow_action_rss, user could config its field + * sush as: func/level/types/key/queue to control RSS function. +- * 2. The flow API also support queue region configuration for hns3. It was ++ * 2. The flow API also supports queue region configuration for hns3. It was + * implemented by FDIR + RSS in hns3 hardware, user can create one FDIR rule + * which action is RSS queues region. + * 3. When action is RSS, we use the following rule to distinguish: +@@ -128,11 +126,11 @@ hns3_find_rss_general_action(const struct rte_flow_item pattern[], + rss = act->conf; + if (have_eth && rss->conf.queue_num) { + /* +- * Patter have ETH and action's queue_num > 0, indicate this is ++ * Pattern have ETH and action's queue_num > 0, indicate this is + * queue region configuration. + * Because queue region is implemented by FDIR + RSS in hns3 +- * hardware, it need enter FDIR process, so here return NULL to +- * avoid enter RSS process. ++ * hardware, it needs to enter FDIR process, so here return NULL ++ * to avoid enter RSS process. + */ + return NULL; + } +@@ -405,7 +403,6 @@ hns3_handle_actions(struct rte_eth_dev *dev, + return 0; + } + +-/* Parse to get the attr and action info of flow director rule. */ + static int + hns3_check_attr(const struct rte_flow_attr *attr, struct rte_flow_error *error) + { +@@ -800,7 +797,7 @@ hns3_parse_sctp(const struct rte_flow_item *item, struct hns3_fdir_rule *rule, + } + + /* +- * Check items before tunnel, save inner configs to outer configs,and clear ++ * Check items before tunnel, save inner configs to outer configs, and clear + * inner configs. + * The key consists of two parts: meta_data and tuple keys. + * Meta data uses 15 bits, including vlan_num(2bit), des_port(12bit) and tunnel +@@ -1146,8 +1143,7 @@ is_tunnel_packet(enum rte_flow_item_type type) + if (type == RTE_FLOW_ITEM_TYPE_VXLAN_GPE || + type == RTE_FLOW_ITEM_TYPE_VXLAN || + type == RTE_FLOW_ITEM_TYPE_NVGRE || +- type == RTE_FLOW_ITEM_TYPE_GENEVE || +- type == RTE_FLOW_ITEM_TYPE_MPLS) ++ type == RTE_FLOW_ITEM_TYPE_GENEVE) + return true; + return false; + } +@@ -1208,11 +1204,6 @@ hns3_parse_fdir_filter(struct rte_eth_dev *dev, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "Fdir not supported in VF"); + +- if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) +- return rte_flow_error_set(error, ENOTSUP, +- RTE_FLOW_ERROR_TYPE_HANDLE, NULL, +- "fdir_conf.mode isn't perfect"); +- + step_mngr.items = first_items; + step_mngr.count = ARRAY_SIZE(first_items); + for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { +@@ -1469,7 +1460,7 @@ hns3_parse_rss_algorithm(struct hns3_hw *hw, enum rte_eth_hash_function *func, + *hash_algo = HNS3_RSS_HASH_ALGO_SYMMETRIC_TOEP; + break; + default: +- hns3_err(hw, "Invalid RSS algorithm configuration(%u)", ++ hns3_err(hw, "Invalid RSS algorithm configuration(%d)", + algo_func); + return -EINVAL; + } +@@ -1495,10 +1486,8 @@ hns3_hw_rss_hash_set(struct hns3_hw *hw, struct rte_flow_action_rss *rss_config) + if (ret) + return ret; + +- /* Update algorithm of hw */ + hw->rss_info.conf.func = rss_config->func; + +- /* Set flow type supported */ + tuple = &hw->rss_info.rss_tuple_sets; + ret = hns3_set_rss_tuple_by_rss_hf(hw, tuple, rss_config->types); + if (ret) +@@ -1513,14 +1502,14 @@ hns3_update_indir_table(struct rte_eth_dev *dev, + { + struct hns3_adapter *hns = dev->data->dev_private; + struct hns3_hw *hw = &hns->hw; +- uint16_t indir_tbl[HNS3_RSS_IND_TBL_SIZE]; ++ uint16_t indir_tbl[HNS3_RSS_IND_TBL_SIZE_MAX]; + uint16_t j; + uint32_t i; + + /* Fill in redirection table */ + memcpy(indir_tbl, hw->rss_info.rss_indirection_tbl, + sizeof(hw->rss_info.rss_indirection_tbl)); +- for (i = 0, j = 0; i < HNS3_RSS_IND_TBL_SIZE; i++, j++) { ++ for (i = 0, j = 0; i < hw->rss_ind_tbl_size; i++, j++) { + j %= num; + if (conf->queue[j] >= hw->alloc_rss_size) { + hns3_err(hw, "queue id(%u) set to redirection table " +@@ -1531,7 +1520,7 @@ hns3_update_indir_table(struct rte_eth_dev *dev, + indir_tbl[i] = conf->queue[j]; + } + +- return hns3_set_rss_indir_table(hw, indir_tbl, HNS3_RSS_IND_TBL_SIZE); ++ return hns3_set_rss_indir_table(hw, indir_tbl, hw->rss_ind_tbl_size); + } + + static int +@@ -1583,7 +1572,7 @@ hns3_config_rss_filter(struct rte_eth_dev *dev, + if (rss_flow_conf.queue_num) { + /* + * Due the content of queue pointer have been reset to +- * 0, the rss_info->conf.queue should be set NULL ++ * 0, the rss_info->conf.queue should be set to NULL + */ + rss_info->conf.queue = NULL; + rss_info->conf.queue_num = 0; +@@ -1749,7 +1738,7 @@ hns3_flow_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, + /* + * Create or destroy a flow rule. + * Theorically one rule can match more than one filters. +- * We will let it use the filter which it hitt first. ++ * We will let it use the filter which it hit first. + * So, the sequence matters. + */ + static struct rte_flow * +@@ -1833,17 +1822,18 @@ hns3_flow_create(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, + + flow->counter_id = fdir_rule.act_cnt.id; + } ++ ++ fdir_rule_ptr = rte_zmalloc("hns3 fdir rule", ++ sizeof(struct hns3_fdir_rule_ele), ++ 0); ++ if (fdir_rule_ptr == NULL) { ++ hns3_err(hw, "failed to allocate fdir_rule memory."); ++ ret = -ENOMEM; ++ goto err_fdir; ++ } ++ + ret = hns3_fdir_filter_program(hns, &fdir_rule, false); + if (!ret) { +- fdir_rule_ptr = rte_zmalloc("hns3 fdir rule", +- sizeof(struct hns3_fdir_rule_ele), +- 0); +- if (fdir_rule_ptr == NULL) { +- hns3_err(hw, "Failed to allocate fdir_rule memory"); +- ret = -ENOMEM; +- goto err_fdir; +- } +- + memcpy(&fdir_rule_ptr->fdir_conf, &fdir_rule, + sizeof(struct hns3_fdir_rule)); + TAILQ_INSERT_TAIL(&process_list->fdir_list, +@@ -1854,10 +1844,10 @@ hns3_flow_create(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, + return flow; + } + ++ rte_free(fdir_rule_ptr); + err_fdir: + if (fdir_rule.flags & HNS3_RULE_FLAG_COUNTER) + hns3_counter_release(dev, fdir_rule.act_cnt.id); +- + err: + rte_flow_error_set(error, -ret, RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "Failed to create flow"); +diff --git a/dpdk/drivers/net/hns3/hns3_regs.c b/dpdk/drivers/net/hns3/hns3_regs.c +index b2cc599f12..8afe132585 100644 +--- a/dpdk/drivers/net/hns3/hns3_regs.c ++++ b/dpdk/drivers/net/hns3/hns3_regs.c +@@ -104,6 +104,7 @@ hns3_get_regs_length(struct hns3_hw *hw, uint32_t *length) + struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); + uint32_t cmdq_lines, common_lines, ring_lines, tqp_intr_lines; + uint32_t regs_num_32_bit, regs_num_64_bit; ++ uint32_t dfx_reg_lines; + uint32_t len; + int ret; + +@@ -117,7 +118,7 @@ hns3_get_regs_length(struct hns3_hw *hw, uint32_t *length) + tqp_intr_lines = sizeof(tqp_intr_reg_addrs) / REG_LEN_PER_LINE + 1; + + len = (cmdq_lines + common_lines + ring_lines * hw->tqps_num + +- tqp_intr_lines * hw->num_msi) * REG_LEN_PER_LINE; ++ tqp_intr_lines * hw->num_msi) * REG_NUM_PER_LINE; + + if (!hns->is_vf) { + ret = hns3_get_regs_num(hw, ®s_num_32_bit, ®s_num_64_bit); +@@ -126,8 +127,11 @@ hns3_get_regs_length(struct hns3_hw *hw, uint32_t *length) + ret); + return -ENOTSUP; + } +- len += regs_num_32_bit * sizeof(uint32_t) + +- regs_num_64_bit * sizeof(uint64_t); ++ dfx_reg_lines = regs_num_32_bit * sizeof(uint32_t) / ++ REG_LEN_PER_LINE + 1; ++ dfx_reg_lines += regs_num_64_bit * sizeof(uint64_t) / ++ REG_LEN_PER_LINE + 1; ++ len += dfx_reg_lines * REG_NUM_PER_LINE; + } + + *length = len; +@@ -248,63 +252,68 @@ hns3_get_64_bit_regs(struct hns3_hw *hw, uint32_t regs_num, void *data) + return 0; + } + +-static void ++static int ++hns3_insert_reg_separator(int reg_num, uint32_t *data) ++{ ++ int separator_num; ++ int i; ++ ++ separator_num = MAX_SEPARATE_NUM - reg_num % REG_NUM_PER_LINE; ++ for (i = 0; i < separator_num; i++) ++ *data++ = SEPARATOR_VALUE; ++ return separator_num; ++} ++ ++static int + hns3_direct_access_regs(struct hns3_hw *hw, uint32_t *data) + { + struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); ++ uint32_t *origin_data_ptr = data; + uint32_t reg_offset; +- int separator_num; +- int reg_um; ++ int reg_num; + int i, j; + + /* fetching per-PF registers values from PF PCIe register space */ +- reg_um = sizeof(cmdq_reg_addrs) / sizeof(uint32_t); +- separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; +- for (i = 0; i < reg_um; i++) ++ reg_num = sizeof(cmdq_reg_addrs) / sizeof(uint32_t); ++ for (i = 0; i < reg_num; i++) + *data++ = hns3_read_dev(hw, cmdq_reg_addrs[i]); +- for (i = 0; i < separator_num; i++) +- *data++ = SEPARATOR_VALUE; ++ data += hns3_insert_reg_separator(reg_num, data); + + if (hns->is_vf) +- reg_um = sizeof(common_vf_reg_addrs) / sizeof(uint32_t); ++ reg_num = sizeof(common_vf_reg_addrs) / sizeof(uint32_t); + else +- reg_um = sizeof(common_reg_addrs) / sizeof(uint32_t); +- separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; +- for (i = 0; i < reg_um; i++) ++ reg_num = sizeof(common_reg_addrs) / sizeof(uint32_t); ++ for (i = 0; i < reg_num; i++) + if (hns->is_vf) + *data++ = hns3_read_dev(hw, common_vf_reg_addrs[i]); + else + *data++ = hns3_read_dev(hw, common_reg_addrs[i]); +- for (i = 0; i < separator_num; i++) +- *data++ = SEPARATOR_VALUE; ++ data += hns3_insert_reg_separator(reg_num, data); + +- reg_um = sizeof(ring_reg_addrs) / sizeof(uint32_t); +- separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; ++ reg_num = sizeof(ring_reg_addrs) / sizeof(uint32_t); + for (j = 0; j < hw->tqps_num; j++) { + reg_offset = hns3_get_tqp_reg_offset(j); +- for (i = 0; i < reg_um; i++) ++ for (i = 0; i < reg_num; i++) + *data++ = hns3_read_dev(hw, + ring_reg_addrs[i] + reg_offset); +- for (i = 0; i < separator_num; i++) +- *data++ = SEPARATOR_VALUE; ++ data += hns3_insert_reg_separator(reg_num, data); + } + +- reg_um = sizeof(tqp_intr_reg_addrs) / sizeof(uint32_t); +- separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; +- for (j = 0; j < hw->num_msi; j++) { +- reg_offset = HNS3_TQP_INTR_REG_SIZE * j; +- for (i = 0; i < reg_um; i++) +- *data++ = hns3_read_dev(hw, +- tqp_intr_reg_addrs[i] + ++ reg_num = sizeof(tqp_intr_reg_addrs) / sizeof(uint32_t); ++ for (j = 0; j < hw->intr_tqps_num; j++) { ++ reg_offset = hns3_get_tqp_intr_reg_offset(j); ++ for (i = 0; i < reg_num; i++) ++ *data++ = hns3_read_dev(hw, tqp_intr_reg_addrs[i] + + reg_offset); +- for (i = 0; i < separator_num; i++) +- *data++ = SEPARATOR_VALUE; ++ data += hns3_insert_reg_separator(reg_num, data); + } ++ return data - origin_data_ptr; + } + + int + hns3_get_regs(struct rte_eth_dev *eth_dev, struct rte_dev_reg_info *regs) + { ++#define HNS3_64_BIT_REG_SIZE (sizeof(uint64_t) / sizeof(uint32_t)) + struct hns3_adapter *hns = eth_dev->data->dev_private; + struct hns3_hw *hw = &hns->hw; + uint32_t regs_num_32_bit; +@@ -334,7 +343,7 @@ hns3_get_regs(struct rte_eth_dev *eth_dev, struct rte_dev_reg_info *regs) + return -ENOTSUP; + + /* fetching per-PF registers values from PF PCIe register space */ +- hns3_direct_access_regs(hw, data); ++ data += hns3_direct_access_regs(hw, data); + + if (hns->is_vf) + return 0; +@@ -351,11 +360,16 @@ hns3_get_regs(struct rte_eth_dev *eth_dev, struct rte_dev_reg_info *regs) + hns3_err(hw, "Get 32 bit register failed, ret = %d", ret); + return ret; + } +- + data += regs_num_32_bit; ++ data += hns3_insert_reg_separator(regs_num_32_bit, data); ++ + ret = hns3_get_64_bit_regs(hw, regs_num_64_bit, data); +- if (ret) ++ if (ret) { + hns3_err(hw, "Get 64 bit register failed, ret = %d", ret); +- ++ return ret; ++ } ++ data += regs_num_64_bit * HNS3_64_BIT_REG_SIZE; ++ data += hns3_insert_reg_separator(regs_num_64_bit * ++ HNS3_64_BIT_REG_SIZE, data); + return ret; + } +diff --git a/dpdk/drivers/net/hns3/hns3_regs.h b/dpdk/drivers/net/hns3/hns3_regs.h +index 81a0af59e4..39fc5d1b18 100644 +--- a/dpdk/drivers/net/hns3/hns3_regs.h ++++ b/dpdk/drivers/net/hns3/hns3_regs.h +@@ -95,15 +95,21 @@ + #define HNS3_MIN_EXTEND_QUEUE_ID 1024 + + /* bar registers for tqp interrupt */ +-#define HNS3_TQP_INTR_CTRL_REG 0x20000 +-#define HNS3_TQP_INTR_GL0_REG 0x20100 +-#define HNS3_TQP_INTR_GL1_REG 0x20200 +-#define HNS3_TQP_INTR_GL2_REG 0x20300 +-#define HNS3_TQP_INTR_RL_REG 0x20900 +-#define HNS3_TQP_INTR_TX_QL_REG 0x20e00 +-#define HNS3_TQP_INTR_RX_QL_REG 0x20f00 +- +-#define HNS3_TQP_INTR_REG_SIZE 4 ++#define HNS3_TQP_INTR_REG_BASE 0x20000 ++#define HNS3_TQP_INTR_EXT_REG_BASE 0x30000 ++#define HNS3_TQP_INTR_CTRL_REG 0 ++#define HNS3_TQP_INTR_GL0_REG 0x100 ++#define HNS3_TQP_INTR_GL1_REG 0x200 ++#define HNS3_TQP_INTR_GL2_REG 0x300 ++#define HNS3_TQP_INTR_RL_REG 0x900 ++#define HNS3_TQP_INTR_TX_QL_REG 0xe00 ++#define HNS3_TQP_INTR_RX_QL_REG 0xf00 ++#define HNS3_TQP_INTR_RL_EN_B 6 ++ ++#define HNS3_MIN_EXT_TQP_INTR_ID 64 ++#define HNS3_TQP_INTR_LOW_ORDER_OFFSET 0x4 ++#define HNS3_TQP_INTR_HIGH_ORDER_OFFSET 0x1000 ++ + #define HNS3_TQP_INTR_GL_MAX 0x1FE0 + #define HNS3_TQP_INTR_GL_DEFAULT 20 + #define HNS3_TQP_INTR_GL_UNIT_1US BIT(31) +diff --git a/dpdk/drivers/net/hns3/hns3_rss.c b/dpdk/drivers/net/hns3/hns3_rss.c +index e2f04687b2..7bd7745859 100644 +--- a/dpdk/drivers/net/hns3/hns3_rss.c ++++ b/dpdk/drivers/net/hns3/hns3_rss.c +@@ -312,7 +312,7 @@ hns3_set_rss_indir_table(struct hns3_hw *hw, uint16_t *indir, uint16_t size) + + /* Update redirection table of hw */ + memcpy(hw->rss_info.rss_indirection_tbl, indir, +- sizeof(hw->rss_info.rss_indirection_tbl)); ++ sizeof(uint16_t) * size); + + return 0; + } +@@ -324,13 +324,13 @@ hns3_rss_reset_indir_table(struct hns3_hw *hw) + int ret; + + lut = rte_zmalloc("hns3_rss_lut", +- HNS3_RSS_IND_TBL_SIZE * sizeof(uint16_t), 0); ++ hw->rss_ind_tbl_size * sizeof(uint16_t), 0); + if (lut == NULL) { + hns3_err(hw, "No hns3_rss_lut memory can be allocated"); + return -ENOMEM; + } + +- ret = hns3_set_rss_indir_table(hw, lut, HNS3_RSS_IND_TBL_SIZE); ++ ret = hns3_set_rss_indir_table(hw, lut, hw->rss_ind_tbl_size); + if (ret) + hns3_err(hw, "RSS uninit indir table failed: %d", ret); + rte_free(lut); +@@ -428,7 +428,7 @@ hns3_dev_rss_hash_update(struct rte_eth_dev *dev, + } else if (rss_hf && rss_cfg->conf.types == 0) { + /* Enable RSS, restore indirection table by hw's config */ + ret = hns3_set_rss_indir_table(hw, rss_cfg->rss_indirection_tbl, +- HNS3_RSS_IND_TBL_SIZE); ++ hw->rss_ind_tbl_size); + if (ret) + goto conf_err; + } +@@ -505,15 +505,15 @@ hns3_dev_rss_reta_update(struct rte_eth_dev *dev, + struct hns3_adapter *hns = dev->data->dev_private; + struct hns3_hw *hw = &hns->hw; + struct hns3_rss_conf *rss_cfg = &hw->rss_info; +- uint16_t i, indir_size = HNS3_RSS_IND_TBL_SIZE; /* Table size is 512 */ +- uint16_t indirection_tbl[HNS3_RSS_IND_TBL_SIZE]; ++ uint16_t indirection_tbl[HNS3_RSS_IND_TBL_SIZE_MAX]; + uint16_t idx, shift; ++ uint16_t i; + int ret; + +- if (reta_size != indir_size || reta_size > ETH_RSS_RETA_SIZE_512) { ++ if (reta_size != hw->rss_ind_tbl_size) { + hns3_err(hw, "The size of hash lookup table configured (%u)" + "doesn't match the number hardware can supported" +- "(%u)", reta_size, indir_size); ++ "(%u)", reta_size, hw->rss_ind_tbl_size); + return -EINVAL; + } + rte_spinlock_lock(&hw->lock); +@@ -536,7 +536,7 @@ hns3_dev_rss_reta_update(struct rte_eth_dev *dev, + } + + ret = hns3_set_rss_indir_table(hw, indirection_tbl, +- HNS3_RSS_IND_TBL_SIZE); ++ hw->rss_ind_tbl_size); + + rte_spinlock_unlock(&hw->lock); + return ret; +@@ -561,13 +561,13 @@ hns3_dev_rss_reta_query(struct rte_eth_dev *dev, + struct hns3_adapter *hns = dev->data->dev_private; + struct hns3_hw *hw = &hns->hw; + struct hns3_rss_conf *rss_cfg = &hw->rss_info; +- uint16_t i, indir_size = HNS3_RSS_IND_TBL_SIZE; /* Table size is 512 */ + uint16_t idx, shift; ++ uint16_t i; + +- if (reta_size != indir_size || reta_size > ETH_RSS_RETA_SIZE_512) { ++ if (reta_size != hw->rss_ind_tbl_size) { + hns3_err(hw, "The size of hash lookup table configured (%u)" + " doesn't match the number hardware can supported" +- "(%u)", reta_size, indir_size); ++ "(%u)", reta_size, hw->rss_ind_tbl_size); + return -EINVAL; + } + rte_spinlock_lock(&hw->lock); +@@ -667,7 +667,7 @@ hns3_set_default_rss_args(struct hns3_hw *hw) + memcpy(rss_cfg->key, hns3_hash_key, HNS3_RSS_KEY_SIZE); + + /* Initialize RSS indirection table */ +- for (i = 0; i < HNS3_RSS_IND_TBL_SIZE; i++) ++ for (i = 0; i < hw->rss_ind_tbl_size; i++) + rss_cfg->rss_indirection_tbl[i] = i % queue_num; + } + +@@ -716,7 +716,7 @@ hns3_config_rss(struct hns3_adapter *hns) + */ + if (((uint32_t)mq_mode & ETH_MQ_RX_RSS_FLAG)) { + ret = hns3_set_rss_indir_table(hw, rss_cfg->rss_indirection_tbl, +- HNS3_RSS_IND_TBL_SIZE); ++ hw->rss_ind_tbl_size); + if (ret) + goto rss_tuple_uninit; + } +diff --git a/dpdk/drivers/net/hns3/hns3_rss.h b/dpdk/drivers/net/hns3/hns3_rss.h +index 6d1d25f227..798c5c62df 100644 +--- a/dpdk/drivers/net/hns3/hns3_rss.h ++++ b/dpdk/drivers/net/hns3/hns3_rss.h +@@ -24,9 +24,8 @@ + ETH_RSS_L4_DST_ONLY) + + #define HNS3_RSS_IND_TBL_SIZE 512 /* The size of hash lookup table */ ++#define HNS3_RSS_IND_TBL_SIZE_MAX 2048 + #define HNS3_RSS_KEY_SIZE 40 +-#define HNS3_RSS_CFG_TBL_NUM \ +- (HNS3_RSS_IND_TBL_SIZE / HNS3_RSS_CFG_TBL_SIZE) + #define HNS3_RSS_SET_BITMAP_MSK 0xffff + + #define HNS3_RSS_HASH_ALGO_TOEPLITZ 0 +@@ -45,7 +44,7 @@ struct hns3_rss_conf { + uint8_t hash_algo; /* hash function type definited by hardware */ + uint8_t key[HNS3_RSS_KEY_SIZE]; /* Hash key */ + struct hns3_rss_tuple_cfg rss_tuple_sets; +- uint16_t rss_indirection_tbl[HNS3_RSS_IND_TBL_SIZE]; /* Shadow table */ ++ uint16_t rss_indirection_tbl[HNS3_RSS_IND_TBL_SIZE_MAX]; + uint16_t queue[HNS3_RSS_QUEUES_BUFFER_NUM]; /* Queues indices to use */ + bool valid; /* check if RSS rule is valid */ + /* +diff --git a/dpdk/drivers/net/hns3/hns3_rxtx.c b/dpdk/drivers/net/hns3/hns3_rxtx.c +index 88d3baba4a..896567c791 100644 +--- a/dpdk/drivers/net/hns3/hns3_rxtx.c ++++ b/dpdk/drivers/net/hns3/hns3_rxtx.c +@@ -10,7 +10,7 @@ + #include + #include + #include +-#if defined(RTE_ARCH_ARM64) && defined(CC_SVE_SUPPORT) ++#if defined(RTE_ARCH_ARM64) && defined(__ARM_FEATURE_SVE) + #include + #endif + +@@ -834,6 +834,24 @@ hns3_reset_queue(struct hns3_hw *hw, uint16_t queue_id, + return ret; + } + ++uint32_t ++hns3_get_tqp_intr_reg_offset(uint16_t tqp_intr_id) ++{ ++ uint32_t reg_offset; ++ ++ /* Need an extend offset to config queues > 64 */ ++ if (tqp_intr_id < HNS3_MIN_EXT_TQP_INTR_ID) ++ reg_offset = HNS3_TQP_INTR_REG_BASE + ++ tqp_intr_id * HNS3_TQP_INTR_LOW_ORDER_OFFSET; ++ else ++ reg_offset = HNS3_TQP_INTR_EXT_REG_BASE + ++ tqp_intr_id / HNS3_MIN_EXT_TQP_INTR_ID * ++ HNS3_TQP_INTR_HIGH_ORDER_OFFSET + ++ tqp_intr_id % HNS3_MIN_EXT_TQP_INTR_ID * ++ HNS3_TQP_INTR_LOW_ORDER_OFFSET; ++ ++ return reg_offset; ++} + + void + hns3_set_queue_intr_gl(struct hns3_hw *hw, uint16_t queue_id, +@@ -847,7 +865,7 @@ hns3_set_queue_intr_gl(struct hns3_hw *hw, uint16_t queue_id, + if (gl_idx >= RTE_DIM(offset) || gl_value > HNS3_TQP_INTR_GL_MAX) + return; + +- addr = offset[gl_idx] + queue_id * HNS3_TQP_INTR_REG_SIZE; ++ addr = offset[gl_idx] + hns3_get_tqp_intr_reg_offset(queue_id); + if (hw->intr.gl_unit == HNS3_INTR_COALESCE_GL_UINT_1US) + value = gl_value | HNS3_TQP_INTR_GL_UNIT_1US; + else +@@ -864,7 +882,7 @@ hns3_set_queue_intr_rl(struct hns3_hw *hw, uint16_t queue_id, uint16_t rl_value) + if (rl_value > HNS3_TQP_INTR_RL_MAX) + return; + +- addr = HNS3_TQP_INTR_RL_REG + queue_id * HNS3_TQP_INTR_REG_SIZE; ++ addr = HNS3_TQP_INTR_RL_REG + hns3_get_tqp_intr_reg_offset(queue_id); + value = HNS3_RL_USEC_TO_REG(rl_value); + if (value > 0) + value |= HNS3_TQP_INTR_RL_ENABLE_MASK; +@@ -885,10 +903,10 @@ hns3_set_queue_intr_ql(struct hns3_hw *hw, uint16_t queue_id, uint16_t ql_value) + if (hw->intr.int_ql_max == HNS3_INTR_QL_NONE) + return; + +- addr = HNS3_TQP_INTR_TX_QL_REG + queue_id * HNS3_TQP_INTR_REG_SIZE; ++ addr = HNS3_TQP_INTR_TX_QL_REG + hns3_get_tqp_intr_reg_offset(queue_id); + hns3_write_dev(hw, addr, ql_value); + +- addr = HNS3_TQP_INTR_RX_QL_REG + queue_id * HNS3_TQP_INTR_REG_SIZE; ++ addr = HNS3_TQP_INTR_RX_QL_REG + hns3_get_tqp_intr_reg_offset(queue_id); + hns3_write_dev(hw, addr, ql_value); + } + +@@ -897,7 +915,7 @@ hns3_queue_intr_enable(struct hns3_hw *hw, uint16_t queue_id, bool en) + { + uint32_t addr, value; + +- addr = HNS3_TQP_INTR_CTRL_REG + queue_id * HNS3_TQP_INTR_REG_SIZE; ++ addr = HNS3_TQP_INTR_CTRL_REG + hns3_get_tqp_intr_reg_offset(queue_id); + value = en ? 1 : 0; + + hns3_write_dev(hw, addr, value); +@@ -2467,7 +2485,7 @@ hns3_rx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id, + static bool + hns3_check_sve_support(void) + { +-#if defined(RTE_ARCH_ARM64) && defined(CC_SVE_SUPPORT) ++#if defined(RTE_ARCH_ARM64) && defined(__ARM_FEATURE_SVE) + if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_SVE)) + return true; + #endif +diff --git a/dpdk/drivers/net/hns3/hns3_rxtx.h b/dpdk/drivers/net/hns3/hns3_rxtx.h +index 6538848fee..5650a97c3a 100644 +--- a/dpdk/drivers/net/hns3/hns3_rxtx.h ++++ b/dpdk/drivers/net/hns3/hns3_rxtx.h +@@ -653,6 +653,7 @@ int hns3_tx_burst_mode_get(struct rte_eth_dev *dev, + const uint32_t *hns3_dev_supported_ptypes_get(struct rte_eth_dev *dev); + void hns3_init_rx_ptype_tble(struct rte_eth_dev *dev); + void hns3_set_rxtx_function(struct rte_eth_dev *eth_dev); ++uint32_t hns3_get_tqp_intr_reg_offset(uint16_t tqp_intr_id); + void hns3_set_queue_intr_gl(struct hns3_hw *hw, uint16_t queue_id, + uint8_t gl_idx, uint16_t gl_value); + void hns3_set_queue_intr_rl(struct hns3_hw *hw, uint16_t queue_id, +diff --git a/dpdk/drivers/net/hns3/hns3_stats.c b/dpdk/drivers/net/hns3/hns3_stats.c +index 91168ac95a..48ab6a38bb 100644 +--- a/dpdk/drivers/net/hns3/hns3_stats.c ++++ b/dpdk/drivers/net/hns3/hns3_stats.c +@@ -521,8 +521,15 @@ hns3_stats_get(struct rte_eth_dev *eth_dev, struct rte_eth_stats *rte_stats) + if (rxq) { + cnt = rxq->l2_errors + rxq->pkt_len_errors; + rte_stats->q_errors[i] = cnt; ++ /* ++ * If HW statistics are reset by stats_reset, but ++ * a lot of residual packets exist in the hardware ++ * queue and these packets are error packets, flip ++ * overflow may occurred. So return 0 in this case. ++ */ + rte_stats->q_ipackets[i] = +- stats->rcb_rx_ring_pktnum[i] - cnt; ++ stats->rcb_rx_ring_pktnum[i] > cnt ? ++ stats->rcb_rx_ring_pktnum[i] - cnt : 0; + rte_stats->ierrors += cnt; + } + } +@@ -535,8 +542,9 @@ hns3_stats_get(struct rte_eth_dev *eth_dev, struct rte_eth_stats *rte_stats) + } + + rte_stats->oerrors = 0; +- rte_stats->ipackets = stats->rcb_rx_ring_pktnum_rcd - +- rte_stats->ierrors; ++ rte_stats->ipackets = ++ stats->rcb_rx_ring_pktnum_rcd > rte_stats->ierrors ? ++ stats->rcb_rx_ring_pktnum_rcd - rte_stats->ierrors : 0; + rte_stats->opackets = stats->rcb_tx_ring_pktnum_rcd - + rte_stats->oerrors; + rte_stats->rx_nombuf = eth_dev->data->rx_mbuf_alloc_failed; +@@ -551,7 +559,6 @@ hns3_stats_reset(struct rte_eth_dev *eth_dev) + struct hns3_hw *hw = &hns->hw; + struct hns3_cmd_desc desc_reset; + struct hns3_rx_queue *rxq; +- struct hns3_tx_queue *txq; + uint16_t i; + int ret; + +@@ -581,29 +588,15 @@ hns3_stats_reset(struct rte_eth_dev *eth_dev) + } + } + +- /* Clear the Rx BD errors stats */ +- for (i = 0; i != eth_dev->data->nb_rx_queues; ++i) { ++ /* ++ * Clear soft stats of rx error packet which will be dropped ++ * in driver. ++ */ ++ for (i = 0; i < eth_dev->data->nb_rx_queues; ++i) { + rxq = eth_dev->data->rx_queues[i]; + if (rxq) { + rxq->pkt_len_errors = 0; + rxq->l2_errors = 0; +- rxq->l3_csum_errors = 0; +- rxq->l4_csum_errors = 0; +- rxq->ol3_csum_errors = 0; +- rxq->ol4_csum_errors = 0; +- } +- } +- +- /* Clear the Tx errors stats */ +- for (i = 0; i != eth_dev->data->nb_tx_queues; ++i) { +- txq = eth_dev->data->tx_queues[i]; +- if (txq) { +- txq->over_length_pkt_cnt = 0; +- txq->exceed_limit_bd_pkt_cnt = 0; +- txq->exceed_limit_bd_reassem_fail = 0; +- txq->unsupported_tunnel_pkt_cnt = 0; +- txq->queue_full_cnt = 0; +- txq->pkt_padding_fail_cnt = 0; + } + } + +@@ -739,9 +732,9 @@ hns3_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, + if (!hns->is_vf) { + /* Update Mac stats */ + ret = hns3_query_update_mac_stats(dev); +- if (ret) { ++ if (ret < 0) { + hns3_err(hw, "Update Mac stats fail : %d", ret); +- return 0; ++ return ret; + } + + /* Get MAC stats from hw->hw_xstats.mac_stats struct */ +@@ -933,9 +926,13 @@ hns3_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids, + uint32_t i; + int ret; + +- if (ids == NULL || size < cnt_stats) ++ if (ids == NULL && values == NULL) + return cnt_stats; + ++ if (ids == NULL) ++ if (size < cnt_stats) ++ return cnt_stats; ++ + /* Update tqp stats by read register */ + ret = hns3_update_tqp_stats(hw); + if (ret) { +@@ -957,6 +954,15 @@ hns3_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids, + return -EINVAL; + } + ++ if (ids == NULL && values != NULL) { ++ for (i = 0; i < cnt_stats; i++) ++ memcpy(&values[i], &values_copy[i].value, ++ sizeof(values[i])); ++ ++ rte_free(values_copy); ++ return cnt_stats; ++ } ++ + for (i = 0; i < size; i++) { + if (ids[i] >= cnt_stats) { + hns3_err(hw, "ids[%u] (%" PRIx64 ") is invalid, " +@@ -1005,9 +1011,16 @@ hns3_dev_xstats_get_names_by_id(struct rte_eth_dev *dev, + uint64_t len; + uint32_t i; + +- if (ids == NULL || xstats_names == NULL) ++ if (xstats_names == NULL) + return cnt_stats; + ++ if (ids == NULL) { ++ if (size < cnt_stats) ++ return cnt_stats; ++ ++ return hns3_dev_xstats_get_names(dev, xstats_names, cnt_stats); ++ } ++ + len = cnt_stats * sizeof(struct rte_eth_xstat_name); + names_copy = rte_zmalloc("hns3_xstats_names", len, 0); + if (names_copy == NULL) { +@@ -1033,6 +1046,38 @@ hns3_dev_xstats_get_names_by_id(struct rte_eth_dev *dev, + return size; + } + ++static void ++hns3_tqp_dfx_stats_clear(struct rte_eth_dev *dev) ++{ ++ struct hns3_rx_queue *rxq; ++ struct hns3_tx_queue *txq; ++ int i; ++ ++ /* Clear Rx dfx stats */ ++ for (i = 0; i < dev->data->nb_rx_queues; ++i) { ++ rxq = dev->data->rx_queues[i]; ++ if (rxq) { ++ rxq->l3_csum_errors = 0; ++ rxq->l4_csum_errors = 0; ++ rxq->ol3_csum_errors = 0; ++ rxq->ol4_csum_errors = 0; ++ } ++ } ++ ++ /* Clear Tx dfx stats */ ++ for (i = 0; i < dev->data->nb_tx_queues; ++i) { ++ txq = dev->data->tx_queues[i]; ++ if (txq) { ++ txq->over_length_pkt_cnt = 0; ++ txq->exceed_limit_bd_pkt_cnt = 0; ++ txq->exceed_limit_bd_reassem_fail = 0; ++ txq->unsupported_tunnel_pkt_cnt = 0; ++ txq->queue_full_cnt = 0; ++ txq->pkt_padding_fail_cnt = 0; ++ } ++ } ++} ++ + int + hns3_dev_xstats_reset(struct rte_eth_dev *dev) + { +@@ -1048,6 +1093,8 @@ hns3_dev_xstats_reset(struct rte_eth_dev *dev) + /* Clear reset stats */ + memset(&hns->hw.reset.stats, 0, sizeof(struct hns3_reset_stats)); + ++ hns3_tqp_dfx_stats_clear(dev); ++ + if (hns->is_vf) + return 0; + +diff --git a/dpdk/drivers/net/hns3/meson.build b/dpdk/drivers/net/hns3/meson.build +index 45cee34d9d..5674d986ba 100644 +--- a/dpdk/drivers/net/hns3/meson.build ++++ b/dpdk/drivers/net/hns3/meson.build +@@ -32,7 +32,6 @@ deps += ['hash'] + if arch_subdir == 'arm' and dpdk_conf.get('RTE_ARCH_64') + sources += files('hns3_rxtx_vec.c') + if cc.get_define('__ARM_FEATURE_SVE', args: machine_args) != '' +- cflags = ['-DCC_SVE_SUPPORT'] + sources += files('hns3_rxtx_vec_sve.c') + endif + endif +diff --git a/dpdk/drivers/net/i40e/base/i40e_osdep.h b/dpdk/drivers/net/i40e/base/i40e_osdep.h +index 9b5033024f..c9287ff255 100644 +--- a/dpdk/drivers/net/i40e/base/i40e_osdep.h ++++ b/dpdk/drivers/net/i40e/base/i40e_osdep.h +@@ -133,6 +133,14 @@ static inline uint32_t i40e_read_addr(volatile void *addr) + return rte_le_to_cpu_32(I40E_PCI_REG(addr)); + } + ++#define I40E_PCI_REG64(reg) rte_read64(reg) ++#define I40E_PCI_REG64_ADDR(a, reg) \ ++ ((volatile uint64_t *)((char *)(a)->hw_addr + (reg))) ++static inline uint64_t i40e_read64_addr(volatile void *addr) ++{ ++ return rte_le_to_cpu_64(I40E_PCI_REG64(addr)); ++} ++ + #define I40E_PCI_REG_WRITE(reg, value) \ + rte_write32((rte_cpu_to_le_32(value)), reg) + #define I40E_PCI_REG_WRITE_RELAXED(reg, value) \ +@@ -150,6 +158,8 @@ static inline uint32_t i40e_read_addr(volatile void *addr) + #define I40E_WRITE_REG(hw, reg, value) \ + I40E_PCI_REG_WRITE(I40E_PCI_REG_ADDR((hw), (reg)), (value)) + ++#define I40E_READ_REG64(hw, reg) i40e_read64_addr(I40E_PCI_REG64_ADDR((hw), (reg))) ++ + #define rd32(a, reg) i40e_read_addr(I40E_PCI_REG_ADDR((a), (reg))) + #define wr32(a, reg, value) \ + I40E_PCI_REG_WRITE(I40E_PCI_REG_ADDR((a), (reg)), (value)) +diff --git a/dpdk/drivers/net/i40e/i40e_ethdev.c b/dpdk/drivers/net/i40e/i40e_ethdev.c +index f54769c29d..ef4f28fe53 100644 +--- a/dpdk/drivers/net/i40e/i40e_ethdev.c ++++ b/dpdk/drivers/net/i40e/i40e_ethdev.c +@@ -763,6 +763,21 @@ static inline void i40e_config_automask(struct i40e_pf *pf) + I40E_WRITE_REG(hw, I40E_GLINT_CTL, val); + } + ++static inline void i40e_clear_automask(struct i40e_pf *pf) ++{ ++ struct i40e_hw *hw = I40E_PF_TO_HW(pf); ++ uint32_t val; ++ ++ val = I40E_READ_REG(hw, I40E_GLINT_CTL); ++ val &= ~(I40E_GLINT_CTL_DIS_AUTOMASK_PF0_MASK | ++ I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK); ++ ++ if (!pf->support_multi_driver) ++ val &= ~I40E_GLINT_CTL_DIS_AUTOMASK_N_MASK; ++ ++ I40E_WRITE_REG(hw, I40E_GLINT_CTL, val); ++} ++ + #define I40E_FLOW_CONTROL_ETHERTYPE 0x8808 + + /* +@@ -1534,8 +1549,9 @@ eth_i40e_dev_init(struct rte_eth_dev *dev, void *init_params __rte_unused) + PMD_INIT_LOG(ERR, "Failed to init adminq: %d", ret); + return -EIO; + } +- /* Firmware of SFP x722 does not support adminq option */ +- if (hw->device_id == I40E_DEV_ID_SFP_X722) ++ /* Firmware of SFP x722 does not support 802.1ad frames ability */ ++ if (hw->device_id == I40E_DEV_ID_SFP_X722 || ++ hw->device_id == I40E_DEV_ID_SFP_I_X722) + hw->flags &= ~I40E_HW_FLAG_802_1AD_CAPABLE; + + PMD_INIT_LOG(INFO, "FW %d.%d API %d.%d NVM %02d.%02d.%02d eetrack %04x", +@@ -2741,6 +2757,8 @@ i40e_dev_close(struct rte_eth_dev *dev) + /* Remove all Traffic Manager configuration */ + i40e_tm_conf_uninit(dev); + ++ i40e_clear_automask(pf); ++ + hw->adapter_closed = 1; + return ret; + } +@@ -4426,7 +4444,6 @@ i40e_set_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size) + { + struct i40e_pf *pf; + struct i40e_hw *hw; +- int ret; + + if (!vsi || !lut) + return -EINVAL; +@@ -4435,12 +4452,16 @@ i40e_set_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size) + hw = I40E_VSI_TO_HW(vsi); + + if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) { +- ret = i40e_aq_set_rss_lut(hw, vsi->vsi_id, +- vsi->type != I40E_VSI_SRIOV, +- lut, lut_size); +- if (ret) { +- PMD_DRV_LOG(ERR, "Failed to set RSS lookup table"); +- return ret; ++ enum i40e_status_code status; ++ ++ status = i40e_aq_set_rss_lut(hw, vsi->vsi_id, ++ vsi->type != I40E_VSI_SRIOV, ++ lut, lut_size); ++ if (status) { ++ PMD_DRV_LOG(ERR, ++ "Failed to update RSS lookup table, error status: %d", ++ status); ++ return -EIO; + } + } else { + uint32_t *lut_dw = (uint32_t *)lut; +@@ -6603,9 +6624,13 @@ i40e_stat_update_48(struct i40e_hw *hw, + { + uint64_t new_data; + +- new_data = (uint64_t)I40E_READ_REG(hw, loreg); +- new_data |= ((uint64_t)(I40E_READ_REG(hw, hireg) & +- I40E_16_BIT_MASK)) << I40E_32_BIT_WIDTH; ++ if (hw->device_id == I40E_DEV_ID_QEMU) { ++ new_data = (uint64_t)I40E_READ_REG(hw, loreg); ++ new_data |= ((uint64_t)(I40E_READ_REG(hw, hireg) & ++ I40E_16_BIT_MASK)) << I40E_32_BIT_WIDTH; ++ } else { ++ new_data = I40E_READ_REG64(hw, loreg); ++ } + + if (!offset_loaded) + *offset = new_data; +@@ -7591,7 +7616,6 @@ i40e_set_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t key_len) + uint16_t key_idx = (vsi->type == I40E_VSI_SRIOV) ? + I40E_VFQF_HKEY_MAX_INDEX : + I40E_PFQF_HKEY_MAX_INDEX; +- int ret = 0; + + if (!key || key_len == 0) { + PMD_DRV_LOG(DEBUG, "No key to be configured"); +@@ -7604,11 +7628,16 @@ i40e_set_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t key_len) + + if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) { + struct i40e_aqc_get_set_rss_key_data *key_dw = +- (struct i40e_aqc_get_set_rss_key_data *)key; ++ (struct i40e_aqc_get_set_rss_key_data *)key; ++ enum i40e_status_code status = ++ i40e_aq_set_rss_key(hw, vsi->vsi_id, key_dw); + +- ret = i40e_aq_set_rss_key(hw, vsi->vsi_id, key_dw); +- if (ret) +- PMD_INIT_LOG(ERR, "Failed to configure RSS key via AQ"); ++ if (status) { ++ PMD_DRV_LOG(ERR, ++ "Failed to configure RSS key via AQ, error status: %d", ++ status); ++ return -EIO; ++ } + } else { + uint32_t *hash_key = (uint32_t *)key; + uint16_t i; +@@ -7628,7 +7657,7 @@ i40e_set_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t key_len) + I40E_WRITE_FLUSH(hw); + } + +- return ret; ++ return 0; + } + + static int +@@ -11753,7 +11782,7 @@ i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) + return -EBUSY; + } + +- if (frame_size > RTE_ETHER_MAX_LEN) ++ if (frame_size > I40E_ETH_MAX_LEN) + dev_data->dev_conf.rxmode.offloads |= + DEV_RX_OFFLOAD_JUMBO_FRAME; + else +diff --git a/dpdk/drivers/net/i40e/i40e_ethdev.h b/dpdk/drivers/net/i40e/i40e_ethdev.h +index 696c5aaf7e..20d051db8b 100644 +--- a/dpdk/drivers/net/i40e/i40e_ethdev.h ++++ b/dpdk/drivers/net/i40e/i40e_ethdev.h +@@ -281,6 +281,7 @@ struct rte_flow { + */ + #define I40E_ETH_OVERHEAD \ + (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + I40E_VLAN_TAG_SIZE * 2) ++#define I40E_ETH_MAX_LEN (RTE_ETHER_MTU + I40E_ETH_OVERHEAD) + + #define I40E_RXTX_BYTES_H_16_BIT(bytes) ((bytes) & ~I40E_48_BIT_MASK) + #define I40E_RXTX_BYTES_L_48_BIT(bytes) ((bytes) & I40E_48_BIT_MASK) +@@ -636,6 +637,7 @@ struct i40e_fdir_flow_ext { + bool is_udp; /* ipv4|ipv6 udp flow */ + enum i40e_flxpld_layer_idx layer_idx; + struct i40e_fdir_flex_pit flex_pit[I40E_MAX_FLXPLD_LAYER * I40E_MAX_FLXPLD_FIED]; ++ bool is_flex_flow; + }; + + /* A structure used to define the input for a flow director filter entry */ +@@ -784,6 +786,8 @@ struct i40e_fdir_info { + bool flex_mask_flag[I40E_FILTER_PCTYPE_MAX]; + + bool inset_flag[I40E_FILTER_PCTYPE_MAX]; /* Mark if input set is set */ ++ ++ uint32_t flex_flow_count[I40E_MAX_FLXPLD_LAYER]; + }; + + /* Ethertype filter number HW supports */ +diff --git a/dpdk/drivers/net/i40e/i40e_ethdev_vf.c b/dpdk/drivers/net/i40e/i40e_ethdev_vf.c +index c26b036b85..bca8cb80e4 100644 +--- a/dpdk/drivers/net/i40e/i40e_ethdev_vf.c ++++ b/dpdk/drivers/net/i40e/i40e_ethdev_vf.c +@@ -1078,8 +1078,18 @@ i40evf_add_vlan(struct rte_eth_dev *dev, uint16_t vlanid) + args.out_buffer = vf->aq_resp; + args.out_size = I40E_AQ_BUF_SZ; + err = i40evf_execute_vf_cmd(dev, &args); +- if (err) ++ if (err) { + PMD_DRV_LOG(ERR, "fail to execute command OP_ADD_VLAN"); ++ return err; ++ } ++ /** ++ * In linux kernel driver on receiving ADD_VLAN it enables ++ * VLAN_STRIP by default. So reconfigure the vlan_offload ++ * as it was done by the app earlier. ++ */ ++ err = i40evf_vlan_offload_set(dev, ETH_VLAN_STRIP_MASK); ++ if (err) ++ PMD_DRV_LOG(ERR, "fail to set vlan_strip"); + + return err; + } +@@ -1889,22 +1899,22 @@ i40evf_rxq_init(struct rte_eth_dev *dev, struct i40e_rx_queue *rxq) + * Check if the jumbo frame and maximum packet length are set correctly + */ + if (dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) { +- if (rxq->max_pkt_len <= RTE_ETHER_MAX_LEN || ++ if (rxq->max_pkt_len <= I40E_ETH_MAX_LEN || + rxq->max_pkt_len > I40E_FRAME_SIZE_MAX) { + PMD_DRV_LOG(ERR, "maximum packet length must be " + "larger than %u and smaller than %u, as jumbo " +- "frame is enabled", (uint32_t)RTE_ETHER_MAX_LEN, ++ "frame is enabled", (uint32_t)I40E_ETH_MAX_LEN, + (uint32_t)I40E_FRAME_SIZE_MAX); + return I40E_ERR_CONFIG; + } + } else { + if (rxq->max_pkt_len < RTE_ETHER_MIN_LEN || +- rxq->max_pkt_len > RTE_ETHER_MAX_LEN) { ++ rxq->max_pkt_len > I40E_ETH_MAX_LEN) { + PMD_DRV_LOG(ERR, "maximum packet length must be " + "larger than %u and smaller than %u, as jumbo " + "frame is disabled", + (uint32_t)RTE_ETHER_MIN_LEN, +- (uint32_t)RTE_ETHER_MAX_LEN); ++ (uint32_t)I40E_ETH_MAX_LEN); + return I40E_ERR_CONFIG; + } + } +@@ -2406,6 +2416,7 @@ i40evf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) + stats->imissed = pstats->rx_discards; + stats->oerrors = pstats->tx_errors + pstats->tx_discards; + stats->ibytes = pstats->rx_bytes; ++ stats->ibytes -= stats->ipackets * RTE_ETHER_CRC_LEN; + stats->obytes = pstats->tx_bytes; + } else { + PMD_DRV_LOG(ERR, "Get statistics failed"); +@@ -2825,7 +2836,7 @@ i40evf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) + return -EBUSY; + } + +- if (frame_size > RTE_ETHER_MAX_LEN) ++ if (frame_size > I40E_ETH_MAX_LEN) + dev_data->dev_conf.rxmode.offloads |= + DEV_RX_OFFLOAD_JUMBO_FRAME; + else +diff --git a/dpdk/drivers/net/i40e/i40e_fdir.c b/dpdk/drivers/net/i40e/i40e_fdir.c +index 50c0eee9f2..f5defcf585 100644 +--- a/dpdk/drivers/net/i40e/i40e_fdir.c ++++ b/dpdk/drivers/net/i40e/i40e_fdir.c +@@ -116,7 +116,7 @@ i40e_fdir_rx_queue_init(struct i40e_rx_queue *rxq) + #endif + rx_ctx.dtype = i40e_header_split_none; + rx_ctx.hsplit_0 = I40E_HEADER_SPLIT_NONE; +- rx_ctx.rxmax = RTE_ETHER_MAX_LEN; ++ rx_ctx.rxmax = I40E_ETH_MAX_LEN; + rx_ctx.tphrdesc_ena = 1; + rx_ctx.tphwdesc_ena = 1; + rx_ctx.tphdata_ena = 1; +@@ -355,6 +355,7 @@ i40e_init_flx_pld(struct i40e_pf *pf) + I40E_PRTQF_FLX_PIT(index + 1), 0x0000FC29);/*non-used*/ + I40E_WRITE_REG(hw, + I40E_PRTQF_FLX_PIT(index + 2), 0x0000FC2A);/*non-used*/ ++ pf->fdir.flex_pit_flag[i] = 0; + } + + /* initialize the masks */ +@@ -1513,8 +1514,6 @@ i40e_flow_set_fdir_flex_pit(struct i40e_pf *pf, + I40E_WRITE_REG(hw, I40E_PRTQF_FLX_PIT(field_idx), flx_pit); + min_next_off++; + } +- +- pf->fdir.flex_pit_flag[layer_idx] = 1; + } + + static int +@@ -1686,7 +1685,7 @@ i40e_flow_add_del_fdir_filter(struct rte_eth_dev *dev, + i40e_fdir_filter_convert(filter, &check_filter); + + if (add) { +- if (!filter->input.flow_ext.customized_pctype) { ++ if (filter->input.flow_ext.is_flex_flow) { + for (i = 0; i < filter->input.flow_ext.raw_id; i++) { + layer_idx = filter->input.flow_ext.layer_idx; + field_idx = layer_idx * I40E_MAX_FLXPLD_FIED + i; +@@ -1738,6 +1737,9 @@ i40e_flow_add_del_fdir_filter(struct rte_eth_dev *dev, + fdir_info->fdir_guarantee_free_space > 0) + wait_status = false; + } else { ++ if (filter->input.flow_ext.is_flex_flow) ++ layer_idx = filter->input.flow_ext.layer_idx; ++ + node = i40e_sw_fdir_filter_lookup(fdir_info, + &check_filter.fdir.input); + if (!node) { +@@ -1785,6 +1787,17 @@ i40e_flow_add_del_fdir_filter(struct rte_eth_dev *dev, + goto error_op; + } + ++ if (filter->input.flow_ext.is_flex_flow) { ++ if (add) { ++ fdir_info->flex_flow_count[layer_idx]++; ++ pf->fdir.flex_pit_flag[layer_idx] = 1; ++ } else { ++ fdir_info->flex_flow_count[layer_idx]--; ++ if (!fdir_info->flex_flow_count[layer_idx]) ++ pf->fdir.flex_pit_flag[layer_idx] = 0; ++ } ++ } ++ + if (add) { + fdir_info->fdir_actual_cnt++; + if (fdir_info->fdir_invalprio == 1 && +diff --git a/dpdk/drivers/net/i40e/i40e_flow.c b/dpdk/drivers/net/i40e/i40e_flow.c +index b09ff6590d..bbd666b7a0 100644 +--- a/dpdk/drivers/net/i40e/i40e_flow.c ++++ b/dpdk/drivers/net/i40e/i40e_flow.c +@@ -3069,6 +3069,7 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev, + &flex_pit, sizeof(struct i40e_fdir_flex_pit)); + filter->input.flow_ext.layer_idx = layer_idx; + filter->input.flow_ext.raw_id = raw_id; ++ filter->input.flow_ext.is_flex_flow = true; + break; + case RTE_FLOW_ITEM_TYPE_VF: + vf_spec = item->spec; +@@ -5515,6 +5516,9 @@ i40e_flow_flush_fdir_filter(struct i40e_pf *pf) + pf->fdir.flex_mask_flag[pctype] = 0; + } + ++ for (i = 0; i < I40E_MAX_FLXPLD_LAYER; i++) ++ pf->fdir.flex_pit_flag[i] = 0; ++ + /* Disable FDIR processing as all FDIR rules are now flushed */ + i40e_fdir_rx_proc_enable(dev, 0); + } +diff --git a/dpdk/drivers/net/i40e/i40e_rxtx.c b/dpdk/drivers/net/i40e/i40e_rxtx.c +index 5df9a9df56..b8859bbff2 100644 +--- a/dpdk/drivers/net/i40e/i40e_rxtx.c ++++ b/dpdk/drivers/net/i40e/i40e_rxtx.c +@@ -2797,23 +2797,23 @@ i40e_rx_queue_config(struct i40e_rx_queue *rxq) + RTE_MIN((uint32_t)(hw->func_caps.rx_buf_chain_len * + rxq->rx_buf_len), data->dev_conf.rxmode.max_rx_pkt_len); + if (data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) { +- if (rxq->max_pkt_len <= RTE_ETHER_MAX_LEN || ++ if (rxq->max_pkt_len <= I40E_ETH_MAX_LEN || + rxq->max_pkt_len > I40E_FRAME_SIZE_MAX) { + PMD_DRV_LOG(ERR, "maximum packet length must " + "be larger than %u and smaller than %u," + "as jumbo frame is enabled", +- (uint32_t)RTE_ETHER_MAX_LEN, ++ (uint32_t)I40E_ETH_MAX_LEN, + (uint32_t)I40E_FRAME_SIZE_MAX); + return I40E_ERR_CONFIG; + } + } else { + if (rxq->max_pkt_len < RTE_ETHER_MIN_LEN || +- rxq->max_pkt_len > RTE_ETHER_MAX_LEN) { ++ rxq->max_pkt_len > I40E_ETH_MAX_LEN) { + PMD_DRV_LOG(ERR, "maximum packet length must be " + "larger than %u and smaller than %u, " + "as jumbo frame is disabled", + (uint32_t)RTE_ETHER_MIN_LEN, +- (uint32_t)RTE_ETHER_MAX_LEN); ++ (uint32_t)I40E_ETH_MAX_LEN); + return I40E_ERR_CONFIG; + } + } +diff --git a/dpdk/drivers/net/i40e/i40e_rxtx_vec_avx2.c b/dpdk/drivers/net/i40e/i40e_rxtx_vec_avx2.c +index 7a558fc73a..fe6ec7deef 100644 +--- a/dpdk/drivers/net/i40e/i40e_rxtx_vec_avx2.c ++++ b/dpdk/drivers/net/i40e/i40e_rxtx_vec_avx2.c +@@ -342,24 +342,32 @@ _recv_raw_pkts_vec_avx2(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts, + */ + const __m256i l3_l4_flags_shuf = _mm256_set_epi8(0, 0, 0, 0, 0, 0, 0, 0, + /* shift right 1 bit to make sure it not exceed 255 */ +- (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1, +- (PKT_RX_IP_CKSUM_GOOD | PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD) >> 1, +- (PKT_RX_EIP_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1, +- (PKT_RX_IP_CKSUM_GOOD | PKT_RX_EIP_CKSUM_BAD) >> 1, +- (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1, +- (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD) >> 1, +- PKT_RX_IP_CKSUM_BAD >> 1, +- (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD) >> 1, ++ (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD | ++ PKT_RX_IP_CKSUM_BAD) >> 1, ++ (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD | ++ PKT_RX_IP_CKSUM_GOOD) >> 1, ++ (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD | ++ PKT_RX_IP_CKSUM_BAD) >> 1, ++ (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD | ++ PKT_RX_IP_CKSUM_GOOD) >> 1, ++ (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1, ++ (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_GOOD) >> 1, ++ (PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD) >> 1, ++ (PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_GOOD) >> 1, + /* second 128-bits */ + 0, 0, 0, 0, 0, 0, 0, 0, +- (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1, +- (PKT_RX_IP_CKSUM_GOOD | PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD) >> 1, +- (PKT_RX_EIP_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1, +- (PKT_RX_IP_CKSUM_GOOD | PKT_RX_EIP_CKSUM_BAD) >> 1, +- (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1, +- (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD) >> 1, +- PKT_RX_IP_CKSUM_BAD >> 1, +- (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD) >> 1); ++ (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD | ++ PKT_RX_IP_CKSUM_BAD) >> 1, ++ (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD | ++ PKT_RX_IP_CKSUM_GOOD) >> 1, ++ (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD | ++ PKT_RX_IP_CKSUM_BAD) >> 1, ++ (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD | ++ PKT_RX_IP_CKSUM_GOOD) >> 1, ++ (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1, ++ (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_GOOD) >> 1, ++ (PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD) >> 1, ++ (PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_GOOD) >> 1); + + const __m256i cksum_mask = _mm256_set1_epi32( + PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD | +diff --git a/dpdk/drivers/net/i40e/i40e_rxtx_vec_sse.c b/dpdk/drivers/net/i40e/i40e_rxtx_vec_sse.c +index 4b2b6a28fc..0bcb48e24e 100644 +--- a/dpdk/drivers/net/i40e/i40e_rxtx_vec_sse.c ++++ b/dpdk/drivers/net/i40e/i40e_rxtx_vec_sse.c +@@ -254,16 +254,18 @@ desc_to_olflags_v(struct i40e_rx_queue *rxq, volatile union i40e_rx_desc *rxdp, + + const __m128i l3_l4e_flags = _mm_set_epi8(0, 0, 0, 0, 0, 0, 0, 0, + /* shift right 1 bit to make sure it not exceed 255 */ +- (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD | ++ (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD | + PKT_RX_IP_CKSUM_BAD) >> 1, +- (PKT_RX_IP_CKSUM_GOOD | PKT_RX_EIP_CKSUM_BAD | +- PKT_RX_L4_CKSUM_BAD) >> 1, +- (PKT_RX_EIP_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1, +- (PKT_RX_IP_CKSUM_GOOD | PKT_RX_EIP_CKSUM_BAD) >> 1, +- (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1, +- (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD) >> 1, +- PKT_RX_IP_CKSUM_BAD >> 1, +- (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD) >> 1); ++ (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD | ++ PKT_RX_IP_CKSUM_GOOD) >> 1, ++ (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD | ++ PKT_RX_IP_CKSUM_BAD) >> 1, ++ (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD | ++ PKT_RX_IP_CKSUM_GOOD) >> 1, ++ (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1, ++ (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_GOOD) >> 1, ++ (PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD) >> 1, ++ (PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_GOOD) >> 1); + + /* Unpack "status" from quadword 1, bits 0:32 */ + vlan0 = _mm_unpackhi_epi32(descs[0], descs[1]); +diff --git a/dpdk/drivers/net/i40e/rte_pmd_i40e.c b/dpdk/drivers/net/i40e/rte_pmd_i40e.c +index 790d042002..2e34140c5b 100644 +--- a/dpdk/drivers/net/i40e/rte_pmd_i40e.c ++++ b/dpdk/drivers/net/i40e/rte_pmd_i40e.c +@@ -2366,6 +2366,9 @@ rte_pmd_i40e_add_vf_mac_addr(uint16_t port, uint16_t vf_id, + struct i40e_mac_filter_info mac_filter; + int ret; + ++ if (mac_addr == NULL) ++ return -EINVAL; ++ + if (i40e_validate_mac_addr((u8 *)mac_addr) != I40E_SUCCESS) + return -EINVAL; + +@@ -3042,6 +3045,9 @@ int rte_pmd_i40e_flow_add_del_packet_template( + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + ++ if (conf == NULL) ++ return -EINVAL; ++ + if (!is_i40e_supported(dev)) + return -ENOTSUP; + +diff --git a/dpdk/drivers/net/iavf/iavf.h b/dpdk/drivers/net/iavf/iavf.h +index 6d5912d8c1..3328bd9327 100644 +--- a/dpdk/drivers/net/iavf/iavf.h ++++ b/dpdk/drivers/net/iavf/iavf.h +@@ -66,6 +66,7 @@ + #define IAVF_VLAN_TAG_SIZE 4 + #define IAVF_ETH_OVERHEAD \ + (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + IAVF_VLAN_TAG_SIZE * 2) ++#define IAVF_ETH_MAX_LEN (RTE_ETHER_MTU + IAVF_ETH_OVERHEAD) + + #define IAVF_32_BIT_WIDTH (CHAR_BIT * 4) + #define IAVF_48_BIT_WIDTH (CHAR_BIT * 6) +diff --git a/dpdk/drivers/net/iavf/iavf_ethdev.c b/dpdk/drivers/net/iavf/iavf_ethdev.c +index 7e3c26a94e..ed69ba483e 100644 +--- a/dpdk/drivers/net/iavf/iavf_ethdev.c ++++ b/dpdk/drivers/net/iavf/iavf_ethdev.c +@@ -372,8 +372,10 @@ iavf_dev_configure(struct rte_eth_dev *dev) + } else { + /* Check if large VF is already enabled. If so, disable and + * release redundant queue resource. ++ * Or check if enough queue pairs. If not, request them from PF. + */ +- if (vf->lv_enabled) { ++ if (vf->lv_enabled || ++ num_queue_pairs > vf->vsi_res->num_queue_pairs) { + ret = iavf_queues_req_reset(dev, num_queue_pairs); + if (ret) + return ret; +@@ -418,23 +420,23 @@ iavf_init_rxq(struct rte_eth_dev *dev, struct iavf_rx_queue *rxq) + * correctly. + */ + if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) { +- if (max_pkt_len <= RTE_ETHER_MAX_LEN || ++ if (max_pkt_len <= IAVF_ETH_MAX_LEN || + max_pkt_len > IAVF_FRAME_SIZE_MAX) { + PMD_DRV_LOG(ERR, "maximum packet length must be " + "larger than %u and smaller than %u, " + "as jumbo frame is enabled", +- (uint32_t)RTE_ETHER_MAX_LEN, ++ (uint32_t)IAVF_ETH_MAX_LEN, + (uint32_t)IAVF_FRAME_SIZE_MAX); + return -EINVAL; + } + } else { + if (max_pkt_len < RTE_ETHER_MIN_LEN || +- max_pkt_len > RTE_ETHER_MAX_LEN) { ++ max_pkt_len > IAVF_ETH_MAX_LEN) { + PMD_DRV_LOG(ERR, "maximum packet length must be " + "larger than %u and smaller than %u, " + "as jumbo frame is disabled", + (uint32_t)RTE_ETHER_MIN_LEN, +- (uint32_t)RTE_ETHER_MAX_LEN); ++ (uint32_t)IAVF_ETH_MAX_LEN); + return -EINVAL; + } + } +@@ -570,15 +572,15 @@ static int iavf_config_rx_queues_irqs(struct rte_eth_dev *dev, + /* If Rx interrupt is reuquired, and we can use + * multi interrupts, then the vec is from 1 + */ +- vf->nb_msix = RTE_MIN(vf->vf_res->max_vectors, +- intr_handle->nb_efd); ++ vf->nb_msix = RTE_MIN(intr_handle->nb_efd, ++ (uint16_t)(vf->vf_res->max_vectors - 1)); + vf->msix_base = IAVF_RX_VEC_START; + vec = IAVF_RX_VEC_START; + for (i = 0; i < dev->data->nb_rx_queues; i++) { + qv_map[i].queue_id = i; + qv_map[i].vector_id = vec; + intr_handle->intr_vec[i] = vec++; +- if (vec >= vf->nb_msix) ++ if (vec >= vf->nb_msix + IAVF_RX_VEC_START) + vec = IAVF_RX_VEC_START; + } + vf->qv_map = qv_map; +@@ -1167,7 +1169,7 @@ iavf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) + return -EBUSY; + } + +- if (frame_size > RTE_ETHER_MAX_LEN) ++ if (frame_size > IAVF_ETH_MAX_LEN) + dev->data->dev_conf.rxmode.offloads |= + DEV_RX_OFFLOAD_JUMBO_FRAME; + else +diff --git a/dpdk/drivers/net/iavf/iavf_fdir.c b/dpdk/drivers/net/iavf/iavf_fdir.c +index 7054bde0b9..253213f8b5 100644 +--- a/dpdk/drivers/net/iavf/iavf_fdir.c ++++ b/dpdk/drivers/net/iavf/iavf_fdir.c +@@ -25,6 +25,9 @@ + #define IAVF_FDIR_IPV6_TC_OFFSET 20 + #define IAVF_IPV6_TC_MASK (0xFF << IAVF_FDIR_IPV6_TC_OFFSET) + ++#define IAVF_GTPU_EH_DWLINK 0 ++#define IAVF_GTPU_EH_UPLINK 1 ++ + #define IAVF_FDIR_INSET_ETH (\ + IAVF_INSET_ETHERTYPE) + +@@ -807,7 +810,14 @@ iavf_fdir_parse_pattern(__rte_unused struct iavf_adapter *ad, + + hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer]; + +- VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, GTPU_EH); ++ if (!gtp_psc_spec) ++ VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, GTPU_EH); ++ else if ((gtp_psc_mask->qfi) && !(gtp_psc_mask->pdu_type)) ++ VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, GTPU_EH); ++ else if (gtp_psc_spec->pdu_type == IAVF_GTPU_EH_UPLINK) ++ VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, GTPU_EH_PDU_UP); ++ else if (gtp_psc_spec->pdu_type == IAVF_GTPU_EH_DWLINK) ++ VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, GTPU_EH_PDU_DWN); + + if (gtp_psc_spec && gtp_psc_mask) { + if (gtp_psc_mask->qfi == UINT8_MAX) { +diff --git a/dpdk/drivers/net/iavf/iavf_hash.c b/dpdk/drivers/net/iavf/iavf_hash.c +index c4c73e6644..72b0117230 100644 +--- a/dpdk/drivers/net/iavf/iavf_hash.c ++++ b/dpdk/drivers/net/iavf/iavf_hash.c +@@ -806,7 +806,9 @@ static void iavf_refine_proto_hdrs(struct virtchnl_proto_hdrs *proto_hdrs, + + static uint64_t invalid_rss_comb[] = { + ETH_RSS_IPV4 | ETH_RSS_NONFRAG_IPV4_UDP, ++ ETH_RSS_IPV4 | ETH_RSS_NONFRAG_IPV4_TCP, + ETH_RSS_IPV6 | ETH_RSS_NONFRAG_IPV6_UDP, ++ ETH_RSS_IPV6 | ETH_RSS_NONFRAG_IPV6_TCP, + RTE_ETH_RSS_L3_PRE32 | RTE_ETH_RSS_L3_PRE40 | + RTE_ETH_RSS_L3_PRE48 | RTE_ETH_RSS_L3_PRE56 | + RTE_ETH_RSS_L3_PRE96 +@@ -867,6 +869,13 @@ iavf_any_invalid_rss_type(enum rte_eth_hash_function rss_func, + if (rss_type & (ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY | + ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY)) + return true; ++ ++ if (!(rss_type & ++ (ETH_RSS_IPV4 | ETH_RSS_IPV6 | ++ ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_NONFRAG_IPV6_UDP | ++ ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_NONFRAG_IPV6_TCP | ++ ETH_RSS_NONFRAG_IPV4_SCTP | ETH_RSS_NONFRAG_IPV6_SCTP))) ++ return true; + } + + /* check invalid combination */ +diff --git a/dpdk/drivers/net/iavf/iavf_vchnl.c b/dpdk/drivers/net/iavf/iavf_vchnl.c +index 33d03af653..c17ae06227 100644 +--- a/dpdk/drivers/net/iavf/iavf_vchnl.c ++++ b/dpdk/drivers/net/iavf/iavf_vchnl.c +@@ -644,12 +644,12 @@ iavf_enable_queues_lv(struct iavf_adapter *adapter) + args.out_buffer = vf->aq_resp; + args.out_size = IAVF_AQ_BUF_SZ; + err = iavf_execute_vf_cmd(adapter, &args); +- if (err) { ++ if (err) + PMD_DRV_LOG(ERR, + "Failed to execute command of OP_ENABLE_QUEUES_V2"); +- return err; +- } +- return 0; ++ ++ rte_free(queue_select); ++ return err; + } + + int +@@ -688,12 +688,12 @@ iavf_disable_queues_lv(struct iavf_adapter *adapter) + args.out_buffer = vf->aq_resp; + args.out_size = IAVF_AQ_BUF_SZ; + err = iavf_execute_vf_cmd(adapter, &args); +- if (err) { ++ if (err) + PMD_DRV_LOG(ERR, + "Failed to execute command of OP_DISABLE_QUEUES_V2"); +- return err; +- } +- return 0; ++ ++ rte_free(queue_select); ++ return err; + } + + int +@@ -737,6 +737,8 @@ iavf_switch_queue_lv(struct iavf_adapter *adapter, uint16_t qid, + if (err) + PMD_DRV_LOG(ERR, "Failed to execute command of %s", + on ? "OP_ENABLE_QUEUES_V2" : "OP_DISABLE_QUEUES_V2"); ++ ++ rte_free(queue_select); + return err; + } + +diff --git a/dpdk/drivers/net/ice/base/ice_flex_pipe.c b/dpdk/drivers/net/ice/base/ice_flex_pipe.c +index 7594df1696..d74fecbf5b 100644 +--- a/dpdk/drivers/net/ice/base/ice_flex_pipe.c ++++ b/dpdk/drivers/net/ice/base/ice_flex_pipe.c +@@ -2156,7 +2156,7 @@ enum ice_status ice_destroy_tunnel(struct ice_hw *hw, u16 port, bool all) + u16 count = 0; + u16 index; + u16 size; +- u16 i; ++ u16 i, j; + + ice_acquire_lock(&hw->tnl_lock); + +@@ -2196,30 +2196,31 @@ enum ice_status ice_destroy_tunnel(struct ice_hw *hw, u16 port, bool all) + size); + if (!sect_rx) + goto ice_destroy_tunnel_err; +- sect_rx->count = CPU_TO_LE16(1); ++ sect_rx->count = CPU_TO_LE16(count); + + sect_tx = (struct ice_boost_tcam_section *) + ice_pkg_buf_alloc_section(bld, ICE_SID_TXPARSER_BOOST_TCAM, + size); + if (!sect_tx) + goto ice_destroy_tunnel_err; +- sect_tx->count = CPU_TO_LE16(1); ++ sect_tx->count = CPU_TO_LE16(count); + + /* copy original boost entry to update package buffer, one copy to Rx + * section, another copy to the Tx section + */ +- for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++) ++ for (i = 0, j = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++) + if (hw->tnl.tbl[i].valid && hw->tnl.tbl[i].in_use && + (all || hw->tnl.tbl[i].port == port)) { +- ice_memcpy(sect_rx->tcam + i, ++ ice_memcpy(sect_rx->tcam + j, + hw->tnl.tbl[i].boost_entry, + sizeof(*sect_rx->tcam), + ICE_NONDMA_TO_NONDMA); +- ice_memcpy(sect_tx->tcam + i, ++ ice_memcpy(sect_tx->tcam + j, + hw->tnl.tbl[i].boost_entry, + sizeof(*sect_tx->tcam), + ICE_NONDMA_TO_NONDMA); + hw->tnl.tbl[i].marked = true; ++ j++; + } + + status = ice_update_pkg(hw, ice_pkg_buf(bld), 1); +diff --git a/dpdk/drivers/net/ice/base/ice_sched.c b/dpdk/drivers/net/ice/base/ice_sched.c +index ac48bbe279..882448671e 100644 +--- a/dpdk/drivers/net/ice/base/ice_sched.c ++++ b/dpdk/drivers/net/ice/base/ice_sched.c +@@ -1345,7 +1345,7 @@ enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw) + ice_memdup(hw, buf->layer_props, + (hw->num_tx_sched_layers * + sizeof(*hw->layer_info)), +- ICE_DMA_TO_DMA); ++ ICE_NONDMA_TO_NONDMA); + if (!hw->layer_info) { + status = ICE_ERR_NO_MEMORY; + goto sched_query_out; +diff --git a/dpdk/drivers/net/ice/base/ice_switch.c b/dpdk/drivers/net/ice/base/ice_switch.c +index dc55d7e3ce..247c3acb67 100644 +--- a/dpdk/drivers/net/ice/base/ice_switch.c ++++ b/dpdk/drivers/net/ice/base/ice_switch.c +@@ -3683,6 +3683,9 @@ ice_add_update_vsi_list(struct ice_hw *hw, + ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2, + vsi_list_id); + ++ if (!m_entry->vsi_list_info) ++ return ICE_ERR_NO_MEMORY; ++ + /* If this entry was large action then the large action needs + * to be updated to point to FWD to VSI list + */ +@@ -5016,6 +5019,7 @@ ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle) + return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI && + fm_entry->fltr_info.vsi_handle == vsi_handle) || + (fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST && ++ fm_entry->vsi_list_info && + (ice_is_bit_set(fm_entry->vsi_list_info->vsi_map, + vsi_handle)))); + } +@@ -5090,14 +5094,12 @@ ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle, + + LIST_FOR_EACH_ENTRY(fm_entry, lkup_list_head, + ice_fltr_mgmt_list_entry, list_entry) { +- struct ice_fltr_info *fi; +- +- fi = &fm_entry->fltr_info; +- if (!fi || !ice_vsi_uses_fltr(fm_entry, vsi_handle)) ++ if (!ice_vsi_uses_fltr(fm_entry, vsi_handle)) + continue; + + status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle, +- vsi_list_head, fi); ++ vsi_list_head, ++ &fm_entry->fltr_info); + if (status) + return status; + } +diff --git a/dpdk/drivers/net/ice/ice_dcf.c b/dpdk/drivers/net/ice/ice_dcf.c +index 44dbd3bb84..294ddcd2e1 100644 +--- a/dpdk/drivers/net/ice/ice_dcf.c ++++ b/dpdk/drivers/net/ice/ice_dcf.c +@@ -504,9 +504,7 @@ ice_dcf_send_aq_cmd(void *dcf_hw, struct ice_aq_desc *desc, + } + + do { +- if ((!desc_cmd.pending && !buff_cmd.pending) || +- (!desc_cmd.pending && desc_cmd.v_ret != IAVF_SUCCESS) || +- (!buff_cmd.pending && buff_cmd.v_ret != IAVF_SUCCESS)) ++ if (!desc_cmd.pending && !buff_cmd.pending) + break; + + rte_delay_ms(ICE_DCF_ARQ_CHECK_TIME); +diff --git a/dpdk/drivers/net/ice/ice_dcf_ethdev.c b/dpdk/drivers/net/ice/ice_dcf_ethdev.c +index b0b2ecb0d6..e5c877805f 100644 +--- a/dpdk/drivers/net/ice/ice_dcf_ethdev.c ++++ b/dpdk/drivers/net/ice/ice_dcf_ethdev.c +@@ -60,23 +60,23 @@ ice_dcf_init_rxq(struct rte_eth_dev *dev, struct ice_rx_queue *rxq) + * correctly. + */ + if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) { +- if (max_pkt_len <= RTE_ETHER_MAX_LEN || ++ if (max_pkt_len <= ICE_ETH_MAX_LEN || + max_pkt_len > ICE_FRAME_SIZE_MAX) { + PMD_DRV_LOG(ERR, "maximum packet length must be " + "larger than %u and smaller than %u, " + "as jumbo frame is enabled", +- (uint32_t)RTE_ETHER_MAX_LEN, ++ (uint32_t)ICE_ETH_MAX_LEN, + (uint32_t)ICE_FRAME_SIZE_MAX); + return -EINVAL; + } + } else { + if (max_pkt_len < RTE_ETHER_MIN_LEN || +- max_pkt_len > RTE_ETHER_MAX_LEN) { ++ max_pkt_len > ICE_ETH_MAX_LEN) { + PMD_DRV_LOG(ERR, "maximum packet length must be " + "larger than %u and smaller than %u, " + "as jumbo frame is disabled", + (uint32_t)RTE_ETHER_MIN_LEN, +- (uint32_t)RTE_ETHER_MAX_LEN); ++ (uint32_t)ICE_ETH_MAX_LEN); + return -EINVAL; + } + } +diff --git a/dpdk/drivers/net/ice/ice_ethdev.c b/dpdk/drivers/net/ice/ice_ethdev.c +index 9a5d6a559f..70e5f74b2f 100644 +--- a/dpdk/drivers/net/ice/ice_ethdev.c ++++ b/dpdk/drivers/net/ice/ice_ethdev.c +@@ -3182,6 +3182,12 @@ static int ice_init_rss(struct ice_pf *pf) + vsi->rss_key_size = ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE; + vsi->rss_lut_size = pf->hash_lut_size; + ++ if (nb_q == 0) { ++ PMD_DRV_LOG(WARNING, ++ "RSS is not supported as rx queues number is zero\n"); ++ return 0; ++ } ++ + if (is_safe_mode) { + PMD_DRV_LOG(WARNING, "RSS is not supported in safe mode\n"); + return 0; +@@ -3268,10 +3274,12 @@ ice_dev_configure(struct rte_eth_dev *dev) + if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) + dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH; + +- ret = ice_init_rss(pf); +- if (ret) { +- PMD_DRV_LOG(ERR, "Failed to enable rss for PF"); +- return ret; ++ if (dev->data->nb_rx_queues) { ++ ret = ice_init_rss(pf); ++ if (ret) { ++ PMD_DRV_LOG(ERR, "Failed to enable rss for PF"); ++ return ret; ++ } + } + + return 0; +@@ -3904,7 +3912,7 @@ ice_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) + return -EBUSY; + } + +- if (frame_size > RTE_ETHER_MAX_LEN) ++ if (frame_size > ICE_ETH_MAX_LEN) + dev_data->dev_conf.rxmode.offloads |= + DEV_RX_OFFLOAD_JUMBO_FRAME; + else +diff --git a/dpdk/drivers/net/ice/ice_ethdev.h b/dpdk/drivers/net/ice/ice_ethdev.h +index 899f446cde..2b03c59671 100644 +--- a/dpdk/drivers/net/ice/ice_ethdev.h ++++ b/dpdk/drivers/net/ice/ice_ethdev.h +@@ -135,6 +135,7 @@ + */ + #define ICE_ETH_OVERHEAD \ + (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + ICE_VLAN_TAG_SIZE * 2) ++#define ICE_ETH_MAX_LEN (RTE_ETHER_MTU + ICE_ETH_OVERHEAD) + + #define ICE_RXTX_BYTES_HIGH(bytes) ((bytes) & ~ICE_40_BIT_MASK) + #define ICE_RXTX_BYTES_LOW(bytes) ((bytes) & ICE_40_BIT_MASK) +diff --git a/dpdk/drivers/net/ice/ice_rxtx.c b/dpdk/drivers/net/ice/ice_rxtx.c +index 5fbd68eafc..c98328ce0b 100644 +--- a/dpdk/drivers/net/ice/ice_rxtx.c ++++ b/dpdk/drivers/net/ice/ice_rxtx.c +@@ -246,23 +246,23 @@ ice_program_hw_rx_queue(struct ice_rx_queue *rxq) + dev->data->dev_conf.rxmode.max_rx_pkt_len); + + if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) { +- if (rxq->max_pkt_len <= RTE_ETHER_MAX_LEN || ++ if (rxq->max_pkt_len <= ICE_ETH_MAX_LEN || + rxq->max_pkt_len > ICE_FRAME_SIZE_MAX) { + PMD_DRV_LOG(ERR, "maximum packet length must " + "be larger than %u and smaller than %u," + "as jumbo frame is enabled", +- (uint32_t)RTE_ETHER_MAX_LEN, ++ (uint32_t)ICE_ETH_MAX_LEN, + (uint32_t)ICE_FRAME_SIZE_MAX); + return -EINVAL; + } + } else { + if (rxq->max_pkt_len < RTE_ETHER_MIN_LEN || +- rxq->max_pkt_len > RTE_ETHER_MAX_LEN) { ++ rxq->max_pkt_len > ICE_ETH_MAX_LEN) { + PMD_DRV_LOG(ERR, "maximum packet length must be " + "larger than %u and smaller than %u, " + "as jumbo frame is disabled", + (uint32_t)RTE_ETHER_MIN_LEN, +- (uint32_t)RTE_ETHER_MAX_LEN); ++ (uint32_t)ICE_ETH_MAX_LEN); + return -EINVAL; + } + } +@@ -701,7 +701,7 @@ ice_fdir_program_hw_rx_queue(struct ice_rx_queue *rxq) + rx_ctx.hbuf = rxq->rx_hdr_len >> ICE_RLAN_CTX_HBUF_S; + rx_ctx.dtype = 0; /* No Header Split mode */ + rx_ctx.dsize = 1; /* 32B descriptors */ +- rx_ctx.rxmax = RTE_ETHER_MAX_LEN; ++ rx_ctx.rxmax = ICE_ETH_MAX_LEN; + /* TPH: Transaction Layer Packet (TLP) processing hints */ + rx_ctx.tphrdesc_ena = 1; + rx_ctx.tphwdesc_ena = 1; +@@ -1451,6 +1451,11 @@ ice_rxd_error_to_pkt_flags(uint16_t stat_err0) + if (unlikely(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S))) + flags |= PKT_RX_EIP_CKSUM_BAD; + ++ if (unlikely(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S))) ++ flags |= PKT_RX_OUTER_L4_CKSUM_BAD; ++ else ++ flags |= PKT_RX_OUTER_L4_CKSUM_GOOD; ++ + return flags; + } + +@@ -2319,8 +2324,11 @@ ice_parse_tunneling_params(uint64_t ol_flags, + *cd_tunneling |= (tx_offload.l2_len >> 1) << + ICE_TXD_CTX_QW0_NATLEN_S; + +- if ((ol_flags & PKT_TX_OUTER_UDP_CKSUM) && +- (ol_flags & PKT_TX_OUTER_IP_CKSUM) && ++ /** ++ * Calculate the tunneling UDP checksum. ++ * Shall be set only if L4TUNT = 01b and EIPT is not zero ++ */ ++ if (!(*cd_tunneling & ICE_TX_CTX_EIPT_NONE) && + (*cd_tunneling & ICE_TXD_CTX_UDP_TUNNELING)) + *cd_tunneling |= ICE_TXD_CTX_QW0_L4T_CS_M; + } +diff --git a/dpdk/drivers/net/ice/ice_rxtx.h b/dpdk/drivers/net/ice/ice_rxtx.h +index 6b16716063..adfae016a9 100644 +--- a/dpdk/drivers/net/ice/ice_rxtx.h ++++ b/dpdk/drivers/net/ice/ice_rxtx.h +@@ -31,7 +31,7 @@ + + #define ICE_VPMD_RX_BURST 32 + #define ICE_VPMD_TX_BURST 32 +-#define ICE_RXQ_REARM_THRESH 32 ++#define ICE_RXQ_REARM_THRESH 64 + #define ICE_MAX_RX_BURST ICE_RXQ_REARM_THRESH + #define ICE_TX_MAX_FREE_BUF_SZ 64 + #define ICE_DESCS_PER_LOOP 4 +diff --git a/dpdk/drivers/net/ice/ice_rxtx_vec_avx2.c b/dpdk/drivers/net/ice/ice_rxtx_vec_avx2.c +index b72a9e7025..7838e17787 100644 +--- a/dpdk/drivers/net/ice/ice_rxtx_vec_avx2.c ++++ b/dpdk/drivers/net/ice/ice_rxtx_vec_avx2.c +@@ -251,43 +251,88 @@ _ice_recv_raw_pkts_vec_avx2(struct ice_rx_queue *rxq, struct rte_mbuf **rx_pkts, + * bit13 is for VLAN indication. + */ + const __m256i flags_mask = +- _mm256_set1_epi32((7 << 4) | (1 << 12) | (1 << 13)); ++ _mm256_set1_epi32((0xF << 4) | (1 << 12) | (1 << 13)); + /** + * data to be shuffled by the result of the flags mask shifted by 4 + * bits. This gives use the l3_l4 flags. + */ +- const __m256i l3_l4_flags_shuf = _mm256_set_epi8(0, 0, 0, 0, 0, 0, 0, 0, +- /* shift right 1 bit to make sure it not exceed 255 */ +- (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD | +- PKT_RX_IP_CKSUM_BAD) >> 1, +- (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD | +- PKT_RX_IP_CKSUM_GOOD) >> 1, +- (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD | +- PKT_RX_IP_CKSUM_BAD) >> 1, +- (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD | +- PKT_RX_IP_CKSUM_GOOD) >> 1, +- (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1, +- (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_GOOD) >> 1, +- (PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD) >> 1, +- (PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_GOOD) >> 1, +- /* second 128-bits */ +- 0, 0, 0, 0, 0, 0, 0, 0, +- (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD | +- PKT_RX_IP_CKSUM_BAD) >> 1, +- (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD | +- PKT_RX_IP_CKSUM_GOOD) >> 1, +- (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD | +- PKT_RX_IP_CKSUM_BAD) >> 1, +- (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD | +- PKT_RX_IP_CKSUM_GOOD) >> 1, +- (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1, +- (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_GOOD) >> 1, +- (PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD) >> 1, +- (PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_GOOD) >> 1); ++ const __m256i l3_l4_flags_shuf = ++ _mm256_set_epi8((PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | ++ PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD | ++ PKT_RX_IP_CKSUM_BAD) >> 1, ++ (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_EIP_CKSUM_BAD | ++ PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_GOOD) >> 1, ++ (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_EIP_CKSUM_BAD | ++ PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD) >> 1, ++ (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_EIP_CKSUM_BAD | ++ PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_GOOD) >> 1, ++ (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_L4_CKSUM_BAD | ++ PKT_RX_IP_CKSUM_BAD) >> 1, ++ (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_L4_CKSUM_BAD | ++ PKT_RX_IP_CKSUM_GOOD) >> 1, ++ (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_L4_CKSUM_GOOD | ++ PKT_RX_IP_CKSUM_BAD) >> 1, ++ (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_L4_CKSUM_GOOD | ++ PKT_RX_IP_CKSUM_GOOD) >> 1, ++ (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_EIP_CKSUM_BAD | ++ PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1, ++ (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_EIP_CKSUM_BAD | ++ PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_GOOD) >> 1, ++ (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_EIP_CKSUM_BAD | ++ PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD) >> 1, ++ (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_EIP_CKSUM_BAD | ++ PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_GOOD) >> 1, ++ (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_L4_CKSUM_BAD | ++ PKT_RX_IP_CKSUM_BAD) >> 1, ++ (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_L4_CKSUM_BAD | ++ PKT_RX_IP_CKSUM_GOOD) >> 1, ++ (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_L4_CKSUM_GOOD | ++ PKT_RX_IP_CKSUM_BAD) >> 1, ++ (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_L4_CKSUM_GOOD | ++ PKT_RX_IP_CKSUM_GOOD) >> 1, ++ /** ++ * second 128-bits ++ * shift right 20 bits to use the low two bits to indicate ++ * outer checksum status ++ * shift right 1 bit to make sure it not exceed 255 ++ */ ++ (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_EIP_CKSUM_BAD | ++ PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1, ++ (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_EIP_CKSUM_BAD | ++ PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_GOOD) >> 1, ++ (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_EIP_CKSUM_BAD | ++ PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD) >> 1, ++ (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_EIP_CKSUM_BAD | ++ PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_GOOD) >> 1, ++ (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_L4_CKSUM_BAD | ++ PKT_RX_IP_CKSUM_BAD) >> 1, ++ (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_L4_CKSUM_BAD | ++ PKT_RX_IP_CKSUM_GOOD) >> 1, ++ (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_L4_CKSUM_GOOD | ++ PKT_RX_IP_CKSUM_BAD) >> 1, ++ (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_L4_CKSUM_GOOD | ++ PKT_RX_IP_CKSUM_GOOD) >> 1, ++ (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_EIP_CKSUM_BAD | ++ PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1, ++ (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_EIP_CKSUM_BAD | ++ PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_GOOD) >> 1, ++ (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_EIP_CKSUM_BAD | ++ PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD) >> 1, ++ (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_EIP_CKSUM_BAD | ++ PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_GOOD) >> 1, ++ (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_L4_CKSUM_BAD | ++ PKT_RX_IP_CKSUM_BAD) >> 1, ++ (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_L4_CKSUM_BAD | ++ PKT_RX_IP_CKSUM_GOOD) >> 1, ++ (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_L4_CKSUM_GOOD | ++ PKT_RX_IP_CKSUM_BAD) >> 1, ++ (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_L4_CKSUM_GOOD | ++ PKT_RX_IP_CKSUM_GOOD) >> 1); + const __m256i cksum_mask = +- _mm256_set1_epi32(PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD | +- PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD | +- PKT_RX_EIP_CKSUM_BAD); ++ _mm256_set1_epi32(PKT_RX_IP_CKSUM_MASK | ++ PKT_RX_L4_CKSUM_MASK | ++ PKT_RX_EIP_CKSUM_BAD | ++ PKT_RX_OUTER_L4_CKSUM_MASK); + /** + * data to be shuffled by result of flag mask, shifted down 12. + * If RSS(bit12)/VLAN(bit13) are set, +@@ -469,6 +514,15 @@ _ice_recv_raw_pkts_vec_avx2(struct ice_rx_queue *rxq, struct rte_mbuf **rx_pkts, + __m256i l3_l4_flags = _mm256_shuffle_epi8(l3_l4_flags_shuf, + _mm256_srli_epi32(flag_bits, 4)); + l3_l4_flags = _mm256_slli_epi32(l3_l4_flags, 1); ++ ++ __m256i l4_outer_mask = _mm256_set1_epi32(0x6); ++ __m256i l4_outer_flags = ++ _mm256_and_si256(l3_l4_flags, l4_outer_mask); ++ l4_outer_flags = _mm256_slli_epi32(l4_outer_flags, 20); ++ ++ __m256i l3_l4_mask = _mm256_set1_epi32(~0x6); ++ l3_l4_flags = _mm256_and_si256(l3_l4_flags, l3_l4_mask); ++ l3_l4_flags = _mm256_or_si256(l3_l4_flags, l4_outer_flags); + l3_l4_flags = _mm256_and_si256(l3_l4_flags, cksum_mask); + /* set rss and vlan flags */ + const __m256i rss_vlan_flag_bits = +diff --git a/dpdk/drivers/net/ice/ice_rxtx_vec_avx512.c b/dpdk/drivers/net/ice/ice_rxtx_vec_avx512.c +index df5d2be1e6..fd5d724329 100644 +--- a/dpdk/drivers/net/ice/ice_rxtx_vec_avx512.c ++++ b/dpdk/drivers/net/ice/ice_rxtx_vec_avx512.c +@@ -230,43 +230,88 @@ _ice_recv_raw_pkts_vec_avx512(struct ice_rx_queue *rxq, + * bit13 is for VLAN indication. + */ + const __m256i flags_mask = +- _mm256_set1_epi32((7 << 4) | (1 << 12) | (1 << 13)); ++ _mm256_set1_epi32((0xF << 4) | (1 << 12) | (1 << 13)); + /** + * data to be shuffled by the result of the flags mask shifted by 4 + * bits. This gives use the l3_l4 flags. + */ +- const __m256i l3_l4_flags_shuf = _mm256_set_epi8(0, 0, 0, 0, 0, 0, 0, 0, +- /* shift right 1 bit to make sure it not exceed 255 */ +- (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD | +- PKT_RX_IP_CKSUM_BAD) >> 1, +- (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD | +- PKT_RX_IP_CKSUM_GOOD) >> 1, +- (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD | +- PKT_RX_IP_CKSUM_BAD) >> 1, +- (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD | +- PKT_RX_IP_CKSUM_GOOD) >> 1, +- (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1, +- (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_GOOD) >> 1, +- (PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD) >> 1, +- (PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_GOOD) >> 1, +- /* 2nd 128-bits */ +- 0, 0, 0, 0, 0, 0, 0, 0, +- (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD | +- PKT_RX_IP_CKSUM_BAD) >> 1, +- (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD | +- PKT_RX_IP_CKSUM_GOOD) >> 1, +- (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD | +- PKT_RX_IP_CKSUM_BAD) >> 1, +- (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD | +- PKT_RX_IP_CKSUM_GOOD) >> 1, +- (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1, +- (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_GOOD) >> 1, +- (PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD) >> 1, +- (PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_GOOD) >> 1); ++ const __m256i l3_l4_flags_shuf = ++ _mm256_set_epi8((PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | ++ PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD | ++ PKT_RX_IP_CKSUM_BAD) >> 1, ++ (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_EIP_CKSUM_BAD | ++ PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_GOOD) >> 1, ++ (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_EIP_CKSUM_BAD | ++ PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD) >> 1, ++ (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_EIP_CKSUM_BAD | ++ PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_GOOD) >> 1, ++ (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_L4_CKSUM_BAD | ++ PKT_RX_IP_CKSUM_BAD) >> 1, ++ (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_L4_CKSUM_BAD | ++ PKT_RX_IP_CKSUM_GOOD) >> 1, ++ (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_L4_CKSUM_GOOD | ++ PKT_RX_IP_CKSUM_BAD) >> 1, ++ (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_L4_CKSUM_GOOD | ++ PKT_RX_IP_CKSUM_GOOD) >> 1, ++ (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_EIP_CKSUM_BAD | ++ PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1, ++ (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_EIP_CKSUM_BAD | ++ PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_GOOD) >> 1, ++ (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_EIP_CKSUM_BAD | ++ PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD) >> 1, ++ (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_EIP_CKSUM_BAD | ++ PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_GOOD) >> 1, ++ (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_L4_CKSUM_BAD | ++ PKT_RX_IP_CKSUM_BAD) >> 1, ++ (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_L4_CKSUM_BAD | ++ PKT_RX_IP_CKSUM_GOOD) >> 1, ++ (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_L4_CKSUM_GOOD | ++ PKT_RX_IP_CKSUM_BAD) >> 1, ++ (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_L4_CKSUM_GOOD | ++ PKT_RX_IP_CKSUM_GOOD) >> 1, ++ /** ++ * second 128-bits ++ * shift right 20 bits to use the low two bits to indicate ++ * outer checksum status ++ * shift right 1 bit to make sure it not exceed 255 ++ */ ++ (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_EIP_CKSUM_BAD | ++ PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1, ++ (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_EIP_CKSUM_BAD | ++ PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_GOOD) >> 1, ++ (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_EIP_CKSUM_BAD | ++ PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD) >> 1, ++ (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_EIP_CKSUM_BAD | ++ PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_GOOD) >> 1, ++ (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_L4_CKSUM_BAD | ++ PKT_RX_IP_CKSUM_BAD) >> 1, ++ (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_L4_CKSUM_BAD | ++ PKT_RX_IP_CKSUM_GOOD) >> 1, ++ (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_L4_CKSUM_GOOD | ++ PKT_RX_IP_CKSUM_BAD) >> 1, ++ (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_L4_CKSUM_GOOD | ++ PKT_RX_IP_CKSUM_GOOD) >> 1, ++ (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_EIP_CKSUM_BAD | ++ PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1, ++ (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_EIP_CKSUM_BAD | ++ PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_GOOD) >> 1, ++ (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_EIP_CKSUM_BAD | ++ PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD) >> 1, ++ (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_EIP_CKSUM_BAD | ++ PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_GOOD) >> 1, ++ (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_L4_CKSUM_BAD | ++ PKT_RX_IP_CKSUM_BAD) >> 1, ++ (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_L4_CKSUM_BAD | ++ PKT_RX_IP_CKSUM_GOOD) >> 1, ++ (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_L4_CKSUM_GOOD | ++ PKT_RX_IP_CKSUM_BAD) >> 1, ++ (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_L4_CKSUM_GOOD | ++ PKT_RX_IP_CKSUM_GOOD) >> 1); + const __m256i cksum_mask = +- _mm256_set1_epi32(PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD | +- PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD | +- PKT_RX_EIP_CKSUM_BAD); ++ _mm256_set1_epi32(PKT_RX_IP_CKSUM_MASK | ++ PKT_RX_L4_CKSUM_MASK | ++ PKT_RX_EIP_CKSUM_BAD | ++ PKT_RX_OUTER_L4_CKSUM_MASK); + /** + * data to be shuffled by result of flag mask, shifted down 12. + * If RSS(bit12)/VLAN(bit13) are set, +@@ -451,6 +496,14 @@ _ice_recv_raw_pkts_vec_avx512(struct ice_rx_queue *rxq, + __m256i l3_l4_flags = _mm256_shuffle_epi8(l3_l4_flags_shuf, + _mm256_srli_epi32(flag_bits, 4)); + l3_l4_flags = _mm256_slli_epi32(l3_l4_flags, 1); ++ __m256i l4_outer_mask = _mm256_set1_epi32(0x6); ++ __m256i l4_outer_flags = ++ _mm256_and_si256(l3_l4_flags, l4_outer_mask); ++ l4_outer_flags = _mm256_slli_epi32(l4_outer_flags, 20); ++ ++ __m256i l3_l4_mask = _mm256_set1_epi32(~0x6); ++ l3_l4_flags = _mm256_and_si256(l3_l4_flags, l3_l4_mask); ++ l3_l4_flags = _mm256_or_si256(l3_l4_flags, l4_outer_flags); + l3_l4_flags = _mm256_and_si256(l3_l4_flags, cksum_mask); + /* set rss and vlan flags */ + const __m256i rss_vlan_flag_bits = +diff --git a/dpdk/drivers/net/ice/ice_rxtx_vec_common.h b/dpdk/drivers/net/ice/ice_rxtx_vec_common.h +index ae2ac29f2a..c09ac7f667 100644 +--- a/dpdk/drivers/net/ice/ice_rxtx_vec_common.h ++++ b/dpdk/drivers/net/ice/ice_rxtx_vec_common.h +@@ -266,6 +266,7 @@ ice_rx_vec_queue_default(struct ice_rx_queue *rxq) + #define ICE_NO_VECTOR_FLAGS ( \ + DEV_TX_OFFLOAD_MULTI_SEGS | \ + DEV_TX_OFFLOAD_VLAN_INSERT | \ ++ DEV_TX_OFFLOAD_IPV4_CKSUM | \ + DEV_TX_OFFLOAD_SCTP_CKSUM | \ + DEV_TX_OFFLOAD_UDP_CKSUM | \ + DEV_TX_OFFLOAD_TCP_TSO | \ +diff --git a/dpdk/drivers/net/ice/ice_rxtx_vec_sse.c b/dpdk/drivers/net/ice/ice_rxtx_vec_sse.c +index 626364719b..87e0c3db2e 100644 +--- a/dpdk/drivers/net/ice/ice_rxtx_vec_sse.c ++++ b/dpdk/drivers/net/ice/ice_rxtx_vec_sse.c +@@ -114,39 +114,67 @@ ice_rx_desc_to_olflags_v(struct ice_rx_queue *rxq, __m128i descs[4], + * bit12 for RSS indication. + * bit13 for VLAN indication. + */ +- const __m128i desc_mask = _mm_set_epi32(0x3070, 0x3070, +- 0x3070, 0x3070); +- ++ const __m128i desc_mask = _mm_set_epi32(0x30f0, 0x30f0, ++ 0x30f0, 0x30f0); + const __m128i cksum_mask = _mm_set_epi32(PKT_RX_IP_CKSUM_MASK | + PKT_RX_L4_CKSUM_MASK | ++ PKT_RX_OUTER_L4_CKSUM_MASK | + PKT_RX_EIP_CKSUM_BAD, + PKT_RX_IP_CKSUM_MASK | + PKT_RX_L4_CKSUM_MASK | ++ PKT_RX_OUTER_L4_CKSUM_MASK | + PKT_RX_EIP_CKSUM_BAD, + PKT_RX_IP_CKSUM_MASK | + PKT_RX_L4_CKSUM_MASK | ++ PKT_RX_OUTER_L4_CKSUM_MASK | + PKT_RX_EIP_CKSUM_BAD, + PKT_RX_IP_CKSUM_MASK | + PKT_RX_L4_CKSUM_MASK | ++ PKT_RX_OUTER_L4_CKSUM_MASK | + PKT_RX_EIP_CKSUM_BAD); + + /* map the checksum, rss and vlan fields to the checksum, rss + * and vlan flag + */ +- const __m128i cksum_flags = _mm_set_epi8(0, 0, 0, 0, 0, 0, 0, 0, +- /* shift right 1 bit to make sure it not exceed 255 */ +- (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD | +- PKT_RX_IP_CKSUM_BAD) >> 1, +- (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD | +- PKT_RX_IP_CKSUM_GOOD) >> 1, +- (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD | +- PKT_RX_IP_CKSUM_BAD) >> 1, +- (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD | +- PKT_RX_IP_CKSUM_GOOD) >> 1, +- (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1, +- (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_GOOD) >> 1, +- (PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD) >> 1, +- (PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_GOOD) >> 1); ++ const __m128i cksum_flags = ++ _mm_set_epi8((PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | ++ PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD | ++ PKT_RX_IP_CKSUM_BAD) >> 1, ++ (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_EIP_CKSUM_BAD | ++ PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_GOOD) >> 1, ++ (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_EIP_CKSUM_BAD | ++ PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD) >> 1, ++ (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_EIP_CKSUM_BAD | ++ PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_GOOD) >> 1, ++ (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_L4_CKSUM_BAD | ++ PKT_RX_IP_CKSUM_BAD) >> 1, ++ (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_L4_CKSUM_BAD | ++ PKT_RX_IP_CKSUM_GOOD) >> 1, ++ (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_L4_CKSUM_GOOD | ++ PKT_RX_IP_CKSUM_BAD) >> 1, ++ (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_L4_CKSUM_GOOD | ++ PKT_RX_IP_CKSUM_GOOD) >> 1, ++ /** ++ * shift right 20 bits to use the low two bits to indicate ++ * outer checksum status ++ * shift right 1 bit to make sure it not exceed 255 ++ */ ++ (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_EIP_CKSUM_BAD | ++ PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1, ++ (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_EIP_CKSUM_BAD | ++ PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_GOOD) >> 1, ++ (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_EIP_CKSUM_BAD | ++ PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD) >> 1, ++ (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_EIP_CKSUM_BAD | ++ PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_GOOD) >> 1, ++ (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_L4_CKSUM_BAD | ++ PKT_RX_IP_CKSUM_BAD) >> 1, ++ (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_L4_CKSUM_BAD | ++ PKT_RX_IP_CKSUM_GOOD) >> 1, ++ (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_L4_CKSUM_GOOD | ++ PKT_RX_IP_CKSUM_BAD) >> 1, ++ (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_L4_CKSUM_GOOD | ++ PKT_RX_IP_CKSUM_GOOD) >> 1); + + const __m128i rss_vlan_flags = _mm_set_epi8(0, 0, 0, 0, + 0, 0, 0, 0, +@@ -166,6 +194,14 @@ ice_rx_desc_to_olflags_v(struct ice_rx_queue *rxq, __m128i descs[4], + flags = _mm_shuffle_epi8(cksum_flags, tmp_desc); + /* then we shift left 1 bit */ + flags = _mm_slli_epi32(flags, 1); ++ ++ __m128i l4_outer_mask = _mm_set_epi32(0x6, 0x6, 0x6, 0x6); ++ __m128i l4_outer_flags = _mm_and_si128(flags, l4_outer_mask); ++ l4_outer_flags = _mm_slli_epi32(l4_outer_flags, 20); ++ ++ __m128i l3_l4_mask = _mm_set_epi32(~0x6, ~0x6, ~0x6, ~0x6); ++ __m128i l3_l4_flags = _mm_and_si128(flags, l3_l4_mask); ++ flags = _mm_or_si128(l3_l4_flags, l4_outer_flags); + /* we need to mask out the reduntant bits introduced by RSS or + * VLAN fields. + */ +@@ -217,10 +253,10 @@ ice_rx_desc_to_olflags_v(struct ice_rx_queue *rxq, __m128i descs[4], + * appropriate flags means that we have to do a shift and blend for + * each mbuf before we do the write. + */ +- rearm0 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(flags, 8), 0x10); +- rearm1 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(flags, 4), 0x10); +- rearm2 = _mm_blend_epi16(mbuf_init, flags, 0x10); +- rearm3 = _mm_blend_epi16(mbuf_init, _mm_srli_si128(flags, 4), 0x10); ++ rearm0 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(flags, 8), 0x30); ++ rearm1 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(flags, 4), 0x30); ++ rearm2 = _mm_blend_epi16(mbuf_init, flags, 0x30); ++ rearm3 = _mm_blend_epi16(mbuf_init, _mm_srli_si128(flags, 4), 0x30); + + /* write the rearm data and the olflags in one write */ + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, ol_flags) != +diff --git a/dpdk/drivers/net/ionic/ionic.h b/dpdk/drivers/net/ionic/ionic.h +index 1538df3092..a6d84036e8 100644 +--- a/dpdk/drivers/net/ionic/ionic.h ++++ b/dpdk/drivers/net/ionic/ionic.h +@@ -48,6 +48,7 @@ struct ionic_hw { + struct ionic_adapter { + struct ionic_hw hw; + struct ionic_dev idev; ++ const char *name; + struct ionic_dev_bar bars[IONIC_BARS_MAX]; + struct ionic_identity ident; + struct ionic_lif *lifs[IONIC_LIFS_MAX]; +diff --git a/dpdk/drivers/net/ionic/ionic_dev.c b/dpdk/drivers/net/ionic/ionic_dev.c +index 5c2820b7a1..632ca10cf2 100644 +--- a/dpdk/drivers/net/ionic/ionic_dev.c ++++ b/dpdk/drivers/net/ionic/ionic_dev.c +@@ -103,6 +103,9 @@ ionic_dev_cmd_go(struct ionic_dev *idev, union ionic_dev_cmd *cmd) + uint32_t cmd_size = sizeof(cmd->words) / + sizeof(cmd->words[0]); + ++ IONIC_PRINT(DEBUG, "Sending %s (%d) via dev_cmd", ++ ionic_opcode_to_str(cmd->cmd.opcode), cmd->cmd.opcode); ++ + for (i = 0; i < cmd_size; i++) + iowrite32(cmd->words[i], &idev->dev_cmd->cmd.words[i]); + +@@ -350,6 +353,8 @@ ionic_dev_cmd_adminq_init(struct ionic_dev *idev, + .q_init.cq_ring_base = cq->base_pa, + }; + ++ IONIC_PRINT(DEBUG, "adminq.q_init.ver %u", cmd.q_init.ver); ++ + ionic_dev_cmd_go(idev, &cmd); + } + +diff --git a/dpdk/drivers/net/ionic/ionic_dev.h b/dpdk/drivers/net/ionic/ionic_dev.h +index 532255a603..6bac96072d 100644 +--- a/dpdk/drivers/net/ionic/ionic_dev.h ++++ b/dpdk/drivers/net/ionic/ionic_dev.h +@@ -208,6 +208,8 @@ struct ionic_qcq; + void ionic_intr_init(struct ionic_dev *idev, struct ionic_intr_info *intr, + unsigned long index); + ++const char *ionic_opcode_to_str(enum ionic_cmd_opcode opcode); ++ + int ionic_dev_setup(struct ionic_adapter *adapter); + + void ionic_dev_cmd_go(struct ionic_dev *idev, union ionic_dev_cmd *cmd); +diff --git a/dpdk/drivers/net/ionic/ionic_ethdev.c b/dpdk/drivers/net/ionic/ionic_ethdev.c +index 600333e20f..fe778043eb 100644 +--- a/dpdk/drivers/net/ionic/ionic_ethdev.c ++++ b/dpdk/drivers/net/ionic/ionic_ethdev.c +@@ -289,7 +289,10 @@ ionic_dev_link_update(struct rte_eth_dev *eth_dev, + + /* Initialize */ + memset(&link, 0, sizeof(link)); +- link.link_autoneg = ETH_LINK_AUTONEG; ++ ++ if (adapter->idev.port_info->config.an_enable) { ++ link.link_autoneg = ETH_LINK_AUTONEG; ++ } + + if (!adapter->link_up) { + /* Interface is down */ +@@ -571,7 +574,7 @@ ionic_dev_rss_reta_update(struct rte_eth_dev *eth_dev, + + if (reta_size != ident->lif.eth.rss_ind_tbl_sz) { + IONIC_PRINT(ERR, "The size of hash lookup table configured " +- "(%d) doesn't match the number hardware can supported " ++ "(%d) does not match the number hardware can support " + "(%d)", + reta_size, ident->lif.eth.rss_ind_tbl_sz); + return -EINVAL; +@@ -605,7 +608,7 @@ ionic_dev_rss_reta_query(struct rte_eth_dev *eth_dev, + + if (reta_size != ident->lif.eth.rss_ind_tbl_sz) { + IONIC_PRINT(ERR, "The size of hash lookup table configured " +- "(%d) doesn't match the number hardware can supported " ++ "(%d) does not match the number hardware can support " + "(%d)", + reta_size, ident->lif.eth.rss_ind_tbl_sz); + return -EINVAL; +@@ -901,7 +904,8 @@ ionic_dev_start(struct rte_eth_dev *eth_dev) + struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev); + struct ionic_adapter *adapter = lif->adapter; + struct ionic_dev *idev = &adapter->idev; +- uint32_t allowed_speeds; ++ uint32_t speed = 0, allowed_speeds; ++ uint8_t an_enable; + int err; + + IONIC_PRINT_CALL(); +@@ -925,11 +929,23 @@ ionic_dev_start(struct rte_eth_dev *eth_dev) + return err; + } + +- if (eth_dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED) { +- uint32_t speed = ionic_parse_link_speeds(dev_conf->link_speeds); ++ /* Configure link */ ++ an_enable = (dev_conf->link_speeds & ETH_LINK_SPEED_FIXED) == 0; + +- if (speed) +- ionic_dev_cmd_port_speed(idev, speed); ++ ionic_dev_cmd_port_autoneg(idev, an_enable); ++ err = ionic_dev_cmd_wait_check(idev, IONIC_DEVCMD_TIMEOUT); ++ if (err) ++ IONIC_PRINT(WARNING, "Failed to %s autonegotiation", ++ an_enable ? "enable" : "disable"); ++ ++ if (!an_enable) ++ speed = ionic_parse_link_speeds(dev_conf->link_speeds); ++ if (speed) { ++ ionic_dev_cmd_port_speed(idev, speed); ++ err = ionic_dev_cmd_wait_check(idev, IONIC_DEVCMD_TIMEOUT); ++ if (err) ++ IONIC_PRINT(WARNING, "Failed to set link speed %u", ++ speed); + } + + ionic_dev_link_update(eth_dev, 0); +diff --git a/dpdk/drivers/net/ionic/ionic_lif.c b/dpdk/drivers/net/ionic/ionic_lif.c +index 60a5f3d537..5894f3505a 100644 +--- a/dpdk/drivers/net/ionic/ionic_lif.c ++++ b/dpdk/drivers/net/ionic/ionic_lif.c +@@ -551,7 +551,7 @@ ionic_intr_alloc(struct ionic_lif *lif, struct ionic_intr_info *intr) + /* + * Note: interrupt handler is called for index = 0 only + * (we use interrupts for the notifyq only anyway, +- * which hash index = 0) ++ * which has index = 0) + */ + + for (index = 0; index < adapter->nintrs; index++) +@@ -684,8 +684,8 @@ ionic_qcq_alloc(struct ionic_lif *lif, uint8_t type, + ionic_q_sg_map(&new->q, sg_base, sg_base_pa); + } + +- IONIC_PRINT(DEBUG, "Q-Base-PA = %ju CQ-Base-PA = %ju " +- "SG-base-PA = %ju", ++ IONIC_PRINT(DEBUG, "Q-Base-PA = %#jx CQ-Base-PA = %#jx " ++ "SG-base-PA = %#jx", + q_base_pa, cq_base_pa, sg_base_pa); + + ionic_q_map(&new->q, q_base, q_base_pa); +@@ -824,7 +824,13 @@ ionic_lif_alloc(struct ionic_lif *lif) + int dbpage_num; + int err; + +- snprintf(lif->name, sizeof(lif->name), "lif%u", lif->index); ++ /* ++ * lif->name was zeroed on allocation. ++ * Copy (sizeof() - 1) bytes to ensure that it is NULL terminated. ++ */ ++ memcpy(lif->name, lif->eth_dev->data->name, sizeof(lif->name) - 1); ++ ++ IONIC_PRINT(DEBUG, "LIF: %s", lif->name); + + IONIC_PRINT(DEBUG, "Allocating Lif Info"); + +@@ -867,8 +873,6 @@ ionic_lif_alloc(struct ionic_lif *lif) + + IONIC_PRINT(DEBUG, "Allocating Admin Queue"); + +- IONIC_PRINT(DEBUG, "Allocating Admin Queue"); +- + err = ionic_admin_qcq_alloc(lif); + if (err) { + IONIC_PRINT(ERR, "Cannot allocate admin queue"); +@@ -1224,6 +1228,7 @@ ionic_lif_notifyq_init(struct ionic_lif *lif) + ctx.cmd.q_init.ring_base); + IONIC_PRINT(DEBUG, "notifyq_init.ring_size %d", + ctx.cmd.q_init.ring_size); ++ IONIC_PRINT(DEBUG, "notifyq_init.ver %u", ctx.cmd.q_init.ver); + + err = ionic_adminq_post_wait(lif, &ctx); + if (err) +@@ -1335,6 +1340,7 @@ ionic_lif_txq_init(struct ionic_qcq *qcq) + ctx.cmd.q_init.ring_base); + IONIC_PRINT(DEBUG, "txq_init.ring_size %d", + ctx.cmd.q_init.ring_size); ++ IONIC_PRINT(DEBUG, "txq_init.ver %u", ctx.cmd.q_init.ver); + + err = ionic_adminq_post_wait(qcq->lif, &ctx); + if (err) +@@ -1383,6 +1389,7 @@ ionic_lif_rxq_init(struct ionic_qcq *qcq) + ctx.cmd.q_init.ring_base); + IONIC_PRINT(DEBUG, "rxq_init.ring_size %d", + ctx.cmd.q_init.ring_size); ++ IONIC_PRINT(DEBUG, "rxq_init.ver %u", ctx.cmd.q_init.ver); + + err = ionic_adminq_post_wait(qcq->lif, &ctx); + if (err) +@@ -1453,8 +1460,8 @@ ionic_lif_set_name(struct ionic_lif *lif) + }, + }; + +- snprintf(ctx.cmd.lif_setattr.name, sizeof(ctx.cmd.lif_setattr.name), +- "%d", lif->port_id); ++ memcpy(ctx.cmd.lif_setattr.name, lif->name, ++ sizeof(ctx.cmd.lif_setattr.name) - 1); + + ionic_adminq_post_wait(lif, &ctx); + } +@@ -1685,7 +1692,8 @@ ionic_lifs_size(struct ionic_adapter *adapter) + nintrs = nlifs * 1 /* notifyq */; + + if (nintrs > dev_nintrs) { +- IONIC_PRINT(ERR, "At most %d intr queues supported, minimum required is %u", ++ IONIC_PRINT(ERR, ++ "At most %d intr supported, minimum req'd is %u", + dev_nintrs, nintrs); + return -ENOSPC; + } +diff --git a/dpdk/drivers/net/ionic/ionic_main.c b/dpdk/drivers/net/ionic/ionic_main.c +index 2ade213d2d..b963898db0 100644 +--- a/dpdk/drivers/net/ionic/ionic_main.c ++++ b/dpdk/drivers/net/ionic/ionic_main.c +@@ -61,7 +61,7 @@ ionic_error_to_str(enum ionic_status_code code) + } + } + +-static const char * ++const char * + ionic_opcode_to_str(enum ionic_cmd_opcode opcode) + { + switch (opcode) { +@@ -107,6 +107,8 @@ ionic_opcode_to_str(enum ionic_cmd_opcode opcode) + return "IONIC_CMD_Q_INIT"; + case IONIC_CMD_Q_CONTROL: + return "IONIC_CMD_Q_CONTROL"; ++ case IONIC_CMD_Q_IDENTIFY: ++ return "IONIC_CMD_Q_IDENTIFY"; + case IONIC_CMD_RDMA_RESET_LIF: + return "IONIC_CMD_RDMA_RESET_LIF"; + case IONIC_CMD_RDMA_CREATE_EQ: +@@ -126,8 +128,9 @@ ionic_adminq_check_err(struct ionic_admin_ctx *ctx, bool timeout) + const char *name; + const char *status; + ++ name = ionic_opcode_to_str(ctx->cmd.cmd.opcode); ++ + if (ctx->comp.comp.status || timeout) { +- name = ionic_opcode_to_str(ctx->cmd.cmd.opcode); + status = ionic_error_to_str(ctx->comp.comp.status); + IONIC_PRINT(ERR, "%s (%d) failed: %s (%d)", + name, +@@ -137,6 +140,8 @@ ionic_adminq_check_err(struct ionic_admin_ctx *ctx, bool timeout) + return -EIO; + } + ++ IONIC_PRINT(DEBUG, "%s (%d) succeeded", name, ctx->cmd.cmd.opcode); ++ + return 0; + } + +@@ -174,14 +179,13 @@ ionic_adminq_post_wait(struct ionic_lif *lif, struct ionic_admin_ctx *ctx) + bool done; + int err; + +- IONIC_PRINT(DEBUG, "Sending %s to the admin queue", +- ionic_opcode_to_str(ctx->cmd.cmd.opcode)); ++ IONIC_PRINT(DEBUG, "Sending %s (%d) via the admin queue", ++ ionic_opcode_to_str(ctx->cmd.cmd.opcode), ctx->cmd.cmd.opcode); + + err = ionic_adminq_post(lif, ctx); + if (err) { +- IONIC_PRINT(ERR, "Failure posting to the admin queue %d (%d)", ++ IONIC_PRINT(ERR, "Failure posting %d to the admin queue (%d)", + ctx->cmd.cmd.opcode, err); +- + return err; + } + +@@ -339,12 +343,12 @@ ionic_port_identify(struct ionic_adapter *adapter) + ioread32(&idev->dev_cmd->data[i]); + } + +- IONIC_PRINT(INFO, "speed %d ", ident->port.config.speed); +- IONIC_PRINT(INFO, "mtu %d ", ident->port.config.mtu); +- IONIC_PRINT(INFO, "state %d ", ident->port.config.state); +- IONIC_PRINT(INFO, "an_enable %d ", ident->port.config.an_enable); +- IONIC_PRINT(INFO, "fec_type %d ", ident->port.config.fec_type); +- IONIC_PRINT(INFO, "pause_type %d ", ident->port.config.pause_type); ++ IONIC_PRINT(INFO, "speed %d", ident->port.config.speed); ++ IONIC_PRINT(INFO, "mtu %d", ident->port.config.mtu); ++ IONIC_PRINT(INFO, "state %d", ident->port.config.state); ++ IONIC_PRINT(INFO, "an_enable %d", ident->port.config.an_enable); ++ IONIC_PRINT(INFO, "fec_type %d", ident->port.config.fec_type); ++ IONIC_PRINT(INFO, "pause_type %d", ident->port.config.pause_type); + IONIC_PRINT(INFO, "loopback_mode %d", + ident->port.config.loopback_mode); + +@@ -385,8 +389,7 @@ ionic_port_init(struct ionic_adapter *adapter) + idev->port_info_sz = RTE_ALIGN(sizeof(*idev->port_info), PAGE_SIZE); + + snprintf(z_name, sizeof(z_name), "%s_port_%s_info", +- IONIC_DRV_NAME, +- adapter->pci_dev->device.name); ++ IONIC_DRV_NAME, adapter->name); + + idev->port_info_z = ionic_memzone_reserve(z_name, idev->port_info_sz, + SOCKET_ID_ANY); +diff --git a/dpdk/drivers/net/ionic/ionic_rxtx.c b/dpdk/drivers/net/ionic/ionic_rxtx.c +index 2592f5cab6..9466099352 100644 +--- a/dpdk/drivers/net/ionic/ionic_rxtx.c ++++ b/dpdk/drivers/net/ionic/ionic_rxtx.c +@@ -67,7 +67,7 @@ ionic_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, + qinfo->conf.tx_deferred_start = txq->deferred_start; + } + +-static inline void __rte_cold ++static __rte_always_inline void + ionic_tx_flush(struct ionic_cq *cq) + { + struct ionic_queue *q = cq->bound_q; +@@ -133,7 +133,7 @@ ionic_dev_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id) + { + struct ionic_qcq *txq; + +- IONIC_PRINT_CALL(); ++ IONIC_PRINT(DEBUG, "Stopping TX queue %u", tx_queue_id); + + txq = eth_dev->data->tx_queues[tx_queue_id]; + +@@ -156,7 +156,7 @@ ionic_dev_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id) + + int __rte_cold + ionic_dev_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id, +- uint16_t nb_desc, uint32_t socket_id __rte_unused, ++ uint16_t nb_desc, uint32_t socket_id, + const struct rte_eth_txconf *tx_conf) + { + struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev); +@@ -164,11 +164,6 @@ ionic_dev_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id, + uint64_t offloads; + int err; + +- IONIC_PRINT_CALL(); +- +- IONIC_PRINT(DEBUG, "Configuring TX queue %u with %u buffers", +- tx_queue_id, nb_desc); +- + if (tx_queue_id >= lif->ntxqcqs) { + IONIC_PRINT(DEBUG, "Queue index %u not available " + "(max %u queues)", +@@ -177,6 +172,9 @@ ionic_dev_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id, + } + + offloads = tx_conf->offloads | eth_dev->data->dev_conf.txmode.offloads; ++ IONIC_PRINT(DEBUG, ++ "Configuring skt %u TX queue %u with %u buffers, offloads %jx", ++ socket_id, tx_queue_id, nb_desc, offloads); + + /* Validate number of receive descriptors */ + if (!rte_is_power_of_2(nb_desc) || nb_desc < IONIC_MIN_RING_DESC) +@@ -214,10 +212,11 @@ ionic_dev_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id) + struct ionic_qcq *txq; + int err; + +- IONIC_PRINT_CALL(); +- + txq = eth_dev->data->tx_queues[tx_queue_id]; + ++ IONIC_PRINT(DEBUG, "Starting TX queue %u, %u descs", ++ tx_queue_id, txq->q.num_descs); ++ + err = ionic_lif_txq_init(txq); + if (err) + return err; +@@ -316,7 +315,8 @@ ionic_tx_tso(struct ionic_queue *q, struct rte_mbuf *txm, + struct ionic_txq_desc *desc; + struct ionic_txq_sg_elem *elem; + struct rte_mbuf *txm_seg; +- uint64_t desc_addr = 0; ++ rte_iova_t data_iova; ++ uint64_t desc_addr = 0, next_addr; + uint16_t desc_len = 0; + uint8_t desc_nsge; + uint32_t hdrlen; +@@ -353,6 +353,7 @@ ionic_tx_tso(struct ionic_queue *q, struct rte_mbuf *txm, + + seglen = hdrlen + mss; + left = txm->data_len; ++ data_iova = rte_mbuf_data_iova(txm); + + desc = ionic_tx_tso_next(q, &elem); + start = true; +@@ -362,7 +363,7 @@ ionic_tx_tso(struct ionic_queue *q, struct rte_mbuf *txm, + while (left > 0) { + len = RTE_MIN(seglen, left); + frag_left = seglen - len; +- desc_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(txm)); ++ desc_addr = rte_cpu_to_le_64(data_iova + offset); + desc_len = len; + desc_nsge = 0; + left -= len; +@@ -386,24 +387,23 @@ ionic_tx_tso(struct ionic_queue *q, struct rte_mbuf *txm, + txm_seg = txm->next; + while (txm_seg != NULL) { + offset = 0; ++ data_iova = rte_mbuf_data_iova(txm_seg); + left = txm_seg->data_len; + stats->frags++; + + while (left > 0) { +- rte_iova_t data_iova; +- data_iova = rte_mbuf_data_iova(txm_seg); +- elem->addr = rte_cpu_to_le_64(data_iova) + offset; ++ next_addr = rte_cpu_to_le_64(data_iova + offset); + if (frag_left > 0) { + len = RTE_MIN(frag_left, left); + frag_left -= len; ++ elem->addr = next_addr; + elem->len = len; + elem++; + desc_nsge++; + } else { + len = RTE_MIN(mss, left); + frag_left = mss - len; +- data_iova = rte_mbuf_data_iova(txm_seg); +- desc_addr = rte_cpu_to_le_64(data_iova); ++ desc_addr = next_addr; + desc_len = len; + desc_nsge = 0; + } +@@ -411,6 +411,7 @@ ionic_tx_tso(struct ionic_queue *q, struct rte_mbuf *txm, + offset += len; + if (txm_seg->next != NULL && frag_left > 0) + continue; ++ + done = (txm_seg->next == NULL && left == 0); + ionic_tx_tso_post(q, desc, txm_seg, + desc_addr, desc_nsge, desc_len, +@@ -430,7 +431,7 @@ ionic_tx_tso(struct ionic_queue *q, struct rte_mbuf *txm, + return 0; + } + +-static int ++static __rte_always_inline int + ionic_tx(struct ionic_queue *q, struct rte_mbuf *txm, + uint64_t offloads, bool not_xmit_more) + { +@@ -444,23 +445,27 @@ ionic_tx(struct ionic_queue *q, struct rte_mbuf *txm, + bool encap; + bool has_vlan; + uint64_t ol_flags = txm->ol_flags; +- uint64_t addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(txm)); ++ uint64_t addr; + uint8_t opcode = IONIC_TXQ_DESC_OPCODE_CSUM_NONE; + uint8_t flags = 0; + + if ((ol_flags & PKT_TX_IP_CKSUM) && +- (offloads & DEV_TX_OFFLOAD_IPV4_CKSUM)) { ++ (offloads & DEV_TX_OFFLOAD_IPV4_CKSUM)) { + opcode = IONIC_TXQ_DESC_OPCODE_CSUM_HW; + flags |= IONIC_TXQ_DESC_FLAG_CSUM_L3; +- if (((ol_flags & PKT_TX_TCP_CKSUM) && +- (offloads & DEV_TX_OFFLOAD_TCP_CKSUM)) || +- ((ol_flags & PKT_TX_UDP_CKSUM) && +- (offloads & DEV_TX_OFFLOAD_UDP_CKSUM))) +- flags |= IONIC_TXQ_DESC_FLAG_CSUM_L4; +- } else { +- stats->no_csum++; + } + ++ if (((ol_flags & PKT_TX_TCP_CKSUM) && ++ (offloads & DEV_TX_OFFLOAD_TCP_CKSUM)) || ++ ((ol_flags & PKT_TX_UDP_CKSUM) && ++ (offloads & DEV_TX_OFFLOAD_UDP_CKSUM))) { ++ opcode = IONIC_TXQ_DESC_OPCODE_CSUM_HW; ++ flags |= IONIC_TXQ_DESC_FLAG_CSUM_L4; ++ } ++ ++ if (opcode == IONIC_TXQ_DESC_OPCODE_CSUM_NONE) ++ stats->no_csum++; ++ + has_vlan = (ol_flags & PKT_TX_VLAN_PKT); + encap = ((ol_flags & PKT_TX_OUTER_IP_CKSUM) || + (ol_flags & PKT_TX_OUTER_UDP_CKSUM)) && +@@ -470,6 +475,8 @@ ionic_tx(struct ionic_queue *q, struct rte_mbuf *txm, + flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0; + flags |= encap ? IONIC_TXQ_DESC_FLAG_ENCAP : 0; + ++ addr = rte_cpu_to_le_64(rte_mbuf_data_iova(txm)); ++ + desc->cmd = encode_txq_desc_cmd(opcode, flags, txm->nb_segs - 1, addr); + desc->len = txm->data_len; + desc->vlan_tci = txm->vlan_tci; +@@ -641,7 +648,7 @@ int __rte_cold + ionic_dev_rx_queue_setup(struct rte_eth_dev *eth_dev, + uint16_t rx_queue_id, + uint16_t nb_desc, +- uint32_t socket_id __rte_unused, ++ uint32_t socket_id, + const struct rte_eth_rxconf *rx_conf, + struct rte_mempool *mp) + { +@@ -650,11 +657,6 @@ ionic_dev_rx_queue_setup(struct rte_eth_dev *eth_dev, + uint64_t offloads; + int err; + +- IONIC_PRINT_CALL(); +- +- IONIC_PRINT(DEBUG, "Configuring RX queue %u with %u buffers", +- rx_queue_id, nb_desc); +- + if (rx_queue_id >= lif->nrxqcqs) { + IONIC_PRINT(ERR, + "Queue index %u not available (max %u queues)", +@@ -663,13 +665,16 @@ ionic_dev_rx_queue_setup(struct rte_eth_dev *eth_dev, + } + + offloads = rx_conf->offloads | eth_dev->data->dev_conf.rxmode.offloads; ++ IONIC_PRINT(DEBUG, ++ "Configuring skt %u RX queue %u with %u buffers, offloads %jx", ++ socket_id, rx_queue_id, nb_desc, offloads); + + /* Validate number of receive descriptors */ + if (!rte_is_power_of_2(nb_desc) || + nb_desc < IONIC_MIN_RING_DESC || + nb_desc > IONIC_MAX_RING_DESC) { + IONIC_PRINT(ERR, +- "Bad number of descriptors (%u) for queue %u (min: %u)", ++ "Bad descriptor count (%u) for queue %u (min: %u)", + nb_desc, rx_queue_id, IONIC_MIN_RING_DESC); + return -EINVAL; /* or use IONIC_DEFAULT_RING_DESC */ + } +@@ -686,7 +691,7 @@ ionic_dev_rx_queue_setup(struct rte_eth_dev *eth_dev, + + err = ionic_rx_qcq_alloc(lif, rx_queue_id, nb_desc, &rxq); + if (err) { +- IONIC_PRINT(ERR, "Queue allocation failure"); ++ IONIC_PRINT(ERR, "Queue %d allocation failure", rx_queue_id); + return -EINVAL; + } + +@@ -712,7 +717,7 @@ ionic_dev_rx_queue_setup(struct rte_eth_dev *eth_dev, + return 0; + } + +-static void ++static __rte_always_inline void + ionic_rx_clean(struct ionic_queue *q, + uint32_t q_desc_index, uint32_t cq_desc_index, + void *cb_arg, void *service_cb_arg) +@@ -873,7 +878,7 @@ ionic_rx_recycle(struct ionic_queue *q, uint32_t q_desc_index, + ionic_q_post(q, true, ionic_rx_clean, mbuf); + } + +-static int __rte_cold ++static __rte_always_inline int + ionic_rx_fill(struct ionic_qcq *rxq, uint32_t len) + { + struct ionic_queue *q = &rxq->q; +@@ -957,13 +962,11 @@ ionic_dev_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id) + struct ionic_qcq *rxq; + int err; + +- IONIC_PRINT_CALL(); +- +- IONIC_PRINT(DEBUG, "Allocating RX queue buffers (size: %u)", +- frame_size); +- + rxq = eth_dev->data->rx_queues[rx_queue_id]; + ++ IONIC_PRINT(DEBUG, "Starting RX queue %u, %u descs (size: %u)", ++ rx_queue_id, rxq->q.num_descs, frame_size); ++ + err = ionic_lif_rxq_init(rxq); + if (err) + return err; +@@ -983,7 +986,7 @@ ionic_dev_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id) + return 0; + } + +-static inline void __rte_cold ++static __rte_always_inline void + ionic_rxq_service(struct ionic_cq *cq, uint32_t work_to_do, + void *service_cb_arg) + { +@@ -1043,7 +1046,7 @@ ionic_dev_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id) + { + struct ionic_qcq *rxq; + +- IONIC_PRINT_CALL(); ++ IONIC_PRINT(DEBUG, "Stopping RX queue %u", rx_queue_id); + + rxq = eth_dev->data->rx_queues[rx_queue_id]; + +diff --git a/dpdk/drivers/net/ipn3ke/ipn3ke_ethdev.h b/dpdk/drivers/net/ipn3ke/ipn3ke_ethdev.h +index 9b0cf309c8..a6815a9cca 100644 +--- a/dpdk/drivers/net/ipn3ke/ipn3ke_ethdev.h ++++ b/dpdk/drivers/net/ipn3ke/ipn3ke_ethdev.h +@@ -640,6 +640,7 @@ ipn3ke_tm_ops_get(struct rte_eth_dev *ethdev, + */ + #define IPN3KE_ETH_OVERHEAD \ + (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + IPN3KE_VLAN_TAG_SIZE * 2) ++#define IPN3KE_ETH_MAX_LEN (RTE_ETHER_MTU + IPN3KE_ETH_OVERHEAD) + + #define IPN3KE_MAC_FRAME_SIZE_MAX 9728 + #define IPN3KE_MAC_RX_FRAME_MAXLENGTH 0x00AE +diff --git a/dpdk/drivers/net/ipn3ke/ipn3ke_representor.c b/dpdk/drivers/net/ipn3ke/ipn3ke_representor.c +index 8a53602576..9e15cce34f 100644 +--- a/dpdk/drivers/net/ipn3ke/ipn3ke_representor.c ++++ b/dpdk/drivers/net/ipn3ke/ipn3ke_representor.c +@@ -2801,7 +2801,7 @@ ipn3ke_rpst_mtu_set(struct rte_eth_dev *ethdev, uint16_t mtu) + return -EBUSY; + } + +- if (frame_size > RTE_ETHER_MAX_LEN) ++ if (frame_size > IPN3KE_ETH_MAX_LEN) + dev_data->dev_conf.rxmode.offloads |= + (uint64_t)(DEV_RX_OFFLOAD_JUMBO_FRAME); + else +diff --git a/dpdk/drivers/net/ixgbe/ixgbe_ethdev.c b/dpdk/drivers/net/ixgbe/ixgbe_ethdev.c +index 9a47a8b262..fa0f5afd03 100644 +--- a/dpdk/drivers/net/ixgbe/ixgbe_ethdev.c ++++ b/dpdk/drivers/net/ixgbe/ixgbe_ethdev.c +@@ -5173,7 +5173,7 @@ ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) + hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); + + /* switch to jumbo mode if needed */ +- if (frame_size > RTE_ETHER_MAX_LEN) { ++ if (frame_size > IXGBE_ETH_MAX_LEN) { + dev->data->dev_conf.rxmode.offloads |= + DEV_RX_OFFLOAD_JUMBO_FRAME; + hlreg0 |= IXGBE_HLREG0_JUMBOEN; +@@ -6555,7 +6555,8 @@ ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) + * prior to 3.11.33 which contains the following change: + * "ixgbe: Enable jumbo frames support w/ SR-IOV" + */ +- ixgbevf_rlpml_set_vf(hw, max_frame); ++ if (ixgbevf_rlpml_set_vf(hw, max_frame)) ++ return -EINVAL; + + /* update max frame size */ + dev->data->dev_conf.rxmode.max_rx_pkt_len = max_frame; +diff --git a/dpdk/drivers/net/ixgbe/ixgbe_ethdev.h b/dpdk/drivers/net/ixgbe/ixgbe_ethdev.h +index 3d35ea791b..a0ce18ca24 100644 +--- a/dpdk/drivers/net/ixgbe/ixgbe_ethdev.h ++++ b/dpdk/drivers/net/ixgbe/ixgbe_ethdev.h +@@ -104,6 +104,9 @@ + /* The overhead from MTU to max frame size. */ + #define IXGBE_ETH_OVERHEAD (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN) + ++/* The max frame size with default MTU */ ++#define IXGBE_ETH_MAX_LEN (RTE_ETHER_MTU + IXGBE_ETH_OVERHEAD) ++ + /* bit of VXLAN tunnel type | 7 bits of zeros | 8 bits of zeros*/ + #define IXGBE_FDIR_VXLAN_TUNNEL_TYPE 0x8000 + /* bit of NVGRE tunnel type | 7 bits of zeros | 8 bits of zeros*/ +diff --git a/dpdk/drivers/net/ixgbe/ixgbe_fdir.c b/dpdk/drivers/net/ixgbe/ixgbe_fdir.c +index a0fab5070d..11b9effeba 100644 +--- a/dpdk/drivers/net/ixgbe/ixgbe_fdir.c ++++ b/dpdk/drivers/net/ixgbe/ixgbe_fdir.c +@@ -503,9 +503,30 @@ ixgbe_fdir_set_flexbytes_offset(struct rte_eth_dev *dev, + uint16_t offset) + { + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); ++ struct ixgbe_hw_fdir_info *fdir_info = ++ IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private); + uint32_t fdirctrl; + int i; + ++ if (fdir_info->flex_bytes_offset == offset) ++ return 0; ++ ++ /** ++ * 82599 adapters flow director init flow cannot be restarted, ++ * Workaround 82599 silicon errata by performing the following steps ++ * before re-writing the FDIRCTRL control register with the same value. ++ * - write 1 to bit 8 of FDIRCMD register & ++ * - write 0 to bit 8 of FDIRCMD register ++ */ ++ IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, ++ (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) | ++ IXGBE_FDIRCMD_CLEARHT)); ++ IXGBE_WRITE_FLUSH(hw); ++ IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, ++ (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) & ++ ~IXGBE_FDIRCMD_CLEARHT)); ++ IXGBE_WRITE_FLUSH(hw); ++ + fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL); + + fdirctrl &= ~IXGBE_FDIRCTRL_FLEX_MASK; +@@ -520,6 +541,14 @@ ixgbe_fdir_set_flexbytes_offset(struct rte_eth_dev *dev, + break; + msec_delay(1); + } ++ ++ if (i >= IXGBE_FDIR_INIT_DONE_POLL) { ++ PMD_DRV_LOG(ERR, "Flow Director poll time exceeded!"); ++ return -ETIMEDOUT; ++ } ++ ++ fdir_info->flex_bytes_offset = offset; ++ + return 0; + } + +diff --git a/dpdk/drivers/net/ixgbe/ixgbe_flow.c b/dpdk/drivers/net/ixgbe/ixgbe_flow.c +index 39f6ed73f6..9aeb2e4a49 100644 +--- a/dpdk/drivers/net/ixgbe/ixgbe_flow.c ++++ b/dpdk/drivers/net/ixgbe/ixgbe_flow.c +@@ -3137,13 +3137,13 @@ ixgbe_flow_create(struct rte_eth_dev *dev, + rte_memcpy(&fdir_info->mask, + &fdir_rule.mask, + sizeof(struct ixgbe_hw_fdir_mask)); +- fdir_info->flex_bytes_offset = +- fdir_rule.flex_bytes_offset; + +- if (fdir_rule.mask.flex_bytes_mask) +- ixgbe_fdir_set_flexbytes_offset(dev, ++ if (fdir_rule.mask.flex_bytes_mask) { ++ ret = ixgbe_fdir_set_flexbytes_offset(dev, + fdir_rule.flex_bytes_offset); +- ++ if (ret) ++ goto out; ++ } + ret = ixgbe_fdir_set_input_mask(dev); + if (ret) + goto out; +@@ -3161,8 +3161,9 @@ ixgbe_flow_create(struct rte_eth_dev *dev, + if (ret) + goto out; + +- if (fdir_info->flex_bytes_offset != +- fdir_rule.flex_bytes_offset) ++ if (fdir_rule.mask.flex_bytes_mask && ++ fdir_info->flex_bytes_offset != ++ fdir_rule.flex_bytes_offset) + goto out; + } + } +diff --git a/dpdk/drivers/net/ixgbe/ixgbe_pf.c b/dpdk/drivers/net/ixgbe/ixgbe_pf.c +index 833863af5a..15982af8da 100644 +--- a/dpdk/drivers/net/ixgbe/ixgbe_pf.c ++++ b/dpdk/drivers/net/ixgbe/ixgbe_pf.c +@@ -552,20 +552,47 @@ ixgbe_vf_set_vlan(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf) + } + + static int +-ixgbe_set_vf_lpe(struct rte_eth_dev *dev, __rte_unused uint32_t vf, uint32_t *msgbuf) ++ixgbe_set_vf_lpe(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf) + { + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); +- uint32_t new_mtu = msgbuf[1]; ++ uint32_t max_frame = msgbuf[1]; + uint32_t max_frs; + uint32_t hlreg0; +- int max_frame = new_mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN; + + /* X540 and X550 support jumbo frames in IOV mode */ + if (hw->mac.type != ixgbe_mac_X540 && + hw->mac.type != ixgbe_mac_X550 && + hw->mac.type != ixgbe_mac_X550EM_x && +- hw->mac.type != ixgbe_mac_X550EM_a) +- return -1; ++ hw->mac.type != ixgbe_mac_X550EM_a) { ++ struct ixgbe_vf_info *vfinfo = ++ *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private); ++ ++ switch (vfinfo[vf].api_version) { ++ case ixgbe_mbox_api_11: ++ case ixgbe_mbox_api_12: ++ case ixgbe_mbox_api_13: ++ /** ++ * Version 1.1&1.2&1.3 supports jumbo frames on VFs ++ * if PF has jumbo frames enabled which means legacy ++ * VFs are disabled. ++ */ ++ if (dev->data->dev_conf.rxmode.max_rx_pkt_len > ++ IXGBE_ETH_MAX_LEN) ++ break; ++ /* fall through */ ++ default: ++ /** ++ * If the PF or VF are running w/ jumbo frames enabled, ++ * we return -1 as we cannot support jumbo frames on ++ * legacy VFs. ++ */ ++ if (max_frame > IXGBE_ETH_MAX_LEN || ++ dev->data->dev_conf.rxmode.max_rx_pkt_len > ++ IXGBE_ETH_MAX_LEN) ++ return -1; ++ break; ++ } ++ } + + if (max_frame < RTE_ETHER_MIN_LEN || + max_frame > RTE_ETHER_MAX_JUMBO_FRAME_LEN) +@@ -573,9 +600,9 @@ ixgbe_set_vf_lpe(struct rte_eth_dev *dev, __rte_unused uint32_t vf, uint32_t *ms + + max_frs = (IXGBE_READ_REG(hw, IXGBE_MAXFRS) & + IXGBE_MHADD_MFS_MASK) >> IXGBE_MHADD_MFS_SHIFT; +- if (max_frs < new_mtu) { ++ if (max_frs < max_frame) { + hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); +- if (new_mtu > RTE_ETHER_MAX_LEN) { ++ if (max_frame > IXGBE_ETH_MAX_LEN) { + dev->data->dev_conf.rxmode.offloads |= + DEV_RX_OFFLOAD_JUMBO_FRAME; + hlreg0 |= IXGBE_HLREG0_JUMBOEN; +@@ -586,7 +613,7 @@ ixgbe_set_vf_lpe(struct rte_eth_dev *dev, __rte_unused uint32_t vf, uint32_t *ms + } + IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); + +- max_frs = new_mtu << IXGBE_MHADD_MFS_SHIFT; ++ max_frs = max_frame << IXGBE_MHADD_MFS_SHIFT; + IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, max_frs); + } + +diff --git a/dpdk/drivers/net/ixgbe/ixgbe_rxtx.c b/dpdk/drivers/net/ixgbe/ixgbe_rxtx.c +index 6cfbb582e2..3b893b0df0 100644 +--- a/dpdk/drivers/net/ixgbe/ixgbe_rxtx.c ++++ b/dpdk/drivers/net/ixgbe/ixgbe_rxtx.c +@@ -1441,7 +1441,8 @@ rx_desc_status_to_pkt_flags(uint32_t rx_status, uint64_t vlan_flags) + } + + static inline uint64_t +-rx_desc_error_to_pkt_flags(uint32_t rx_status) ++rx_desc_error_to_pkt_flags(uint32_t rx_status, uint16_t pkt_info, ++ uint8_t rx_udp_csum_zero_err) + { + uint64_t pkt_flags; + +@@ -1458,6 +1459,15 @@ rx_desc_error_to_pkt_flags(uint32_t rx_status) + pkt_flags = error_to_pkt_flags_map[(rx_status >> + IXGBE_RXDADV_ERR_CKSUM_BIT) & IXGBE_RXDADV_ERR_CKSUM_MSK]; + ++ /* Mask out the bad UDP checksum error if the hardware has UDP zero ++ * checksum error issue, so that the software application will then ++ * have to recompute the checksum itself if needed. ++ */ ++ if ((rx_status & IXGBE_RXDADV_ERR_TCPE) && ++ (pkt_info & IXGBE_RXDADV_PKTTYPE_UDP) && ++ rx_udp_csum_zero_err) ++ pkt_flags &= ~PKT_RX_L4_CKSUM_BAD; ++ + if ((rx_status & IXGBE_RXD_STAT_OUTERIPCS) && + (rx_status & IXGBE_RXDADV_ERR_OUTERIPER)) { + pkt_flags |= PKT_RX_EIP_CKSUM_BAD; +@@ -1544,7 +1554,9 @@ ixgbe_rx_scan_hw_ring(struct ixgbe_rx_queue *rxq) + /* convert descriptor fields to rte mbuf flags */ + pkt_flags = rx_desc_status_to_pkt_flags(s[j], + vlan_flags); +- pkt_flags |= rx_desc_error_to_pkt_flags(s[j]); ++ pkt_flags |= rx_desc_error_to_pkt_flags(s[j], ++ (uint16_t)pkt_info[j], ++ rxq->rx_udp_csum_zero_err); + pkt_flags |= ixgbe_rxd_pkt_info_to_pkt_flags + ((uint16_t)pkt_info[j]); + mb->ol_flags = pkt_flags; +@@ -1877,7 +1889,9 @@ ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, + rxm->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan); + + pkt_flags = rx_desc_status_to_pkt_flags(staterr, vlan_flags); +- pkt_flags = pkt_flags | rx_desc_error_to_pkt_flags(staterr); ++ pkt_flags = pkt_flags | ++ rx_desc_error_to_pkt_flags(staterr, (uint16_t)pkt_info, ++ rxq->rx_udp_csum_zero_err); + pkt_flags = pkt_flags | + ixgbe_rxd_pkt_info_to_pkt_flags((uint16_t)pkt_info); + rxm->ol_flags = pkt_flags; +@@ -1970,7 +1984,8 @@ ixgbe_fill_cluster_head_buf( + head->vlan_tci = rte_le_to_cpu_16(desc->wb.upper.vlan); + pkt_info = rte_le_to_cpu_32(desc->wb.lower.lo_dword.data); + pkt_flags = rx_desc_status_to_pkt_flags(staterr, rxq->vlan_flags); +- pkt_flags |= rx_desc_error_to_pkt_flags(staterr); ++ pkt_flags |= rx_desc_error_to_pkt_flags(staterr, (uint16_t)pkt_info, ++ rxq->rx_udp_csum_zero_err); + pkt_flags |= ixgbe_rxd_pkt_info_to_pkt_flags((uint16_t)pkt_info); + head->ol_flags = pkt_flags; + head->packet_type = +@@ -3091,6 +3106,13 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, + else + rxq->pkt_type_mask = IXGBE_PACKET_TYPE_MASK_82599; + ++ /* ++ * 82599 errata, UDP frames with a 0 checksum can be marked as checksum ++ * errors. ++ */ ++ if (hw->mac.type == ixgbe_mac_82599EB) ++ rxq->rx_udp_csum_zero_err = 1; ++ + /* + * Allocate RX ring hardware descriptors. A memzone large enough to + * handle the maximum ring size is allocated in order to allow for +@@ -4898,15 +4920,11 @@ ixgbe_set_rsc(struct rte_eth_dev *dev) + /* RFCTL configuration */ + rfctl = IXGBE_READ_REG(hw, IXGBE_RFCTL); + if ((rsc_capable) && (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO)) +- /* +- * Since NFS packets coalescing is not supported - clear +- * RFCTL.NFSW_DIS and RFCTL.NFSR_DIS when RSC is +- * enabled. +- */ +- rfctl &= ~(IXGBE_RFCTL_RSC_DIS | IXGBE_RFCTL_NFSW_DIS | +- IXGBE_RFCTL_NFSR_DIS); ++ rfctl &= ~IXGBE_RFCTL_RSC_DIS; + else + rfctl |= IXGBE_RFCTL_RSC_DIS; ++ /* disable NFS filtering */ ++ rfctl |= IXGBE_RFCTL_NFSW_DIS | IXGBE_RFCTL_NFSR_DIS; + IXGBE_WRITE_REG(hw, IXGBE_RFCTL, rfctl); + + /* If LRO hasn't been requested - we are done here. */ +@@ -5634,8 +5652,12 @@ ixgbevf_dev_rx_init(struct rte_eth_dev *dev) + * ixgbevf_rlpml_set_vf even if jumbo frames are not used. This way, + * VF packets received can work in all cases. + */ +- ixgbevf_rlpml_set_vf(hw, +- (uint16_t)dev->data->dev_conf.rxmode.max_rx_pkt_len); ++ if (ixgbevf_rlpml_set_vf(hw, ++ (uint16_t)dev->data->dev_conf.rxmode.max_rx_pkt_len)) { ++ PMD_INIT_LOG(ERR, "Set max packet length to %d failed.", ++ dev->data->dev_conf.rxmode.max_rx_pkt_len); ++ return -EINVAL; ++ } + + /* + * Assume no header split and no VLAN strip support +diff --git a/dpdk/drivers/net/ixgbe/ixgbe_rxtx.h b/dpdk/drivers/net/ixgbe/ixgbe_rxtx.h +index 6d2f7c9da3..bcadaf79ce 100644 +--- a/dpdk/drivers/net/ixgbe/ixgbe_rxtx.h ++++ b/dpdk/drivers/net/ixgbe/ixgbe_rxtx.h +@@ -129,6 +129,8 @@ struct ixgbe_rx_queue { + uint8_t crc_len; /**< 0 if CRC stripped, 4 otherwise. */ + uint8_t drop_en; /**< If not 0, set SRRCTL.Drop_En. */ + uint8_t rx_deferred_start; /**< not in global dev start. */ ++ /** UDP frames with a 0 checksum can be marked as checksum errors. */ ++ uint8_t rx_udp_csum_zero_err; + /** flags to set in mbuf when a vlan is detected. */ + uint64_t vlan_flags; + uint64_t offloads; /**< Rx offloads with DEV_RX_OFFLOAD_* */ +diff --git a/dpdk/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c b/dpdk/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c +index 90c076825a..52add17b5d 100644 +--- a/dpdk/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c ++++ b/dpdk/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c +@@ -132,9 +132,9 @@ desc_to_olflags_v_ipsec(__m128i descs[4], struct rte_mbuf **rx_pkts) + + static inline void + desc_to_olflags_v(__m128i descs[4], __m128i mbuf_init, uint8_t vlan_flags, +- struct rte_mbuf **rx_pkts) ++ uint16_t udp_p_flag, struct rte_mbuf **rx_pkts) + { +- __m128i ptype0, ptype1, vtag0, vtag1, csum; ++ __m128i ptype0, ptype1, vtag0, vtag1, csum, udp_csum_skip; + __m128i rearm0, rearm1, rearm2, rearm3; + + /* mask everything except rss type */ +@@ -161,6 +161,7 @@ desc_to_olflags_v(__m128i descs[4], __m128i mbuf_init, uint8_t vlan_flags, + (IXGBE_RXDADV_ERR_TCPE | IXGBE_RXDADV_ERR_IPE) >> 16, + IXGBE_RXD_STAT_VP, IXGBE_RXD_STAT_VP, + IXGBE_RXD_STAT_VP, IXGBE_RXD_STAT_VP); ++ + /* map vlan present (0x8), IPE (0x2), L4E (0x1) to ol_flags */ + const __m128i vlan_csum_map_lo = _mm_set_epi8( + 0, 0, 0, 0, +@@ -182,12 +183,23 @@ desc_to_olflags_v(__m128i descs[4], __m128i mbuf_init, uint8_t vlan_flags, + 0, PKT_RX_L4_CKSUM_GOOD >> sizeof(uint8_t), 0, + PKT_RX_L4_CKSUM_GOOD >> sizeof(uint8_t)); + ++ /* mask everything except UDP header present if specified */ ++ const __m128i udp_hdr_p_msk = _mm_set_epi16 ++ (0, 0, 0, 0, ++ udp_p_flag, udp_p_flag, udp_p_flag, udp_p_flag); ++ ++ const __m128i udp_csum_bad_shuf = _mm_set_epi8 ++ (0, 0, 0, 0, 0, 0, 0, 0, ++ 0, 0, 0, 0, 0, 0, ~(uint8_t)PKT_RX_L4_CKSUM_BAD, 0xFF); ++ + ptype0 = _mm_unpacklo_epi16(descs[0], descs[1]); + ptype1 = _mm_unpacklo_epi16(descs[2], descs[3]); + vtag0 = _mm_unpackhi_epi16(descs[0], descs[1]); + vtag1 = _mm_unpackhi_epi16(descs[2], descs[3]); + + ptype0 = _mm_unpacklo_epi32(ptype0, ptype1); ++ /* save the UDP header present information */ ++ udp_csum_skip = _mm_and_si128(ptype0, udp_hdr_p_msk); + ptype0 = _mm_and_si128(ptype0, rsstype_msk); + ptype0 = _mm_shuffle_epi8(rss_flags, ptype0); + +@@ -215,6 +227,15 @@ desc_to_olflags_v(__m128i descs[4], __m128i mbuf_init, uint8_t vlan_flags, + + vtag1 = _mm_or_si128(ptype0, vtag1); + ++ /* convert the UDP header present 0x200 to 0x1 for aligning with each ++ * PKT_RX_L4_CKSUM_BAD value in low byte of 16 bits word ol_flag in ++ * vtag1 (4x16). Then mask out the bad checksum value by shuffle and ++ * bit-mask. ++ */ ++ udp_csum_skip = _mm_srli_epi16(udp_csum_skip, 9); ++ udp_csum_skip = _mm_shuffle_epi8(udp_csum_bad_shuf, udp_csum_skip); ++ vtag1 = _mm_and_si128(vtag1, udp_csum_skip); ++ + /* + * At this point, we have the 4 sets of flags in the low 64-bits + * of vtag1 (4x16). +@@ -341,6 +362,7 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts, + __m128i dd_check, eop_check; + __m128i mbuf_init; + uint8_t vlan_flags; ++ uint16_t udp_p_flag = 0; /* Rx Descriptor UDP header present */ + + /* nb_pkts has to be floor-aligned to RTE_IXGBE_DESCS_PER_LOOP */ + nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, RTE_IXGBE_DESCS_PER_LOOP); +@@ -365,6 +387,9 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts, + rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD))) + return 0; + ++ if (rxq->rx_udp_csum_zero_err) ++ udp_p_flag = IXGBE_RXDADV_PKTTYPE_UDP; ++ + /* 4 packets DD mask */ + dd_check = _mm_set_epi64x(0x0000000100000001LL, 0x0000000100000001LL); + +@@ -477,7 +502,8 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts, + sterr_tmp1 = _mm_unpackhi_epi32(descs[1], descs[0]); + + /* set ol_flags with vlan packet type */ +- desc_to_olflags_v(descs, mbuf_init, vlan_flags, &rx_pkts[pos]); ++ desc_to_olflags_v(descs, mbuf_init, vlan_flags, udp_p_flag, ++ &rx_pkts[pos]); + + #ifdef RTE_LIB_SECURITY + if (unlikely(use_ipsec)) +diff --git a/dpdk/drivers/net/liquidio/lio_ethdev.c b/dpdk/drivers/net/liquidio/lio_ethdev.c +index d4dd3768cd..eb0fdab45a 100644 +--- a/dpdk/drivers/net/liquidio/lio_ethdev.c ++++ b/dpdk/drivers/net/liquidio/lio_ethdev.c +@@ -481,7 +481,7 @@ lio_dev_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu) + return -1; + } + +- if (frame_len > RTE_ETHER_MAX_LEN) ++ if (frame_len > LIO_ETH_MAX_LEN) + eth_dev->data->dev_conf.rxmode.offloads |= + DEV_RX_OFFLOAD_JUMBO_FRAME; + else +diff --git a/dpdk/drivers/net/liquidio/lio_ethdev.h b/dpdk/drivers/net/liquidio/lio_ethdev.h +index 74cd2fb6c6..d33be1c44d 100644 +--- a/dpdk/drivers/net/liquidio/lio_ethdev.h ++++ b/dpdk/drivers/net/liquidio/lio_ethdev.h +@@ -13,6 +13,9 @@ + #define LIO_LSC_TIMEOUT 100000 /* 100000us (100ms) */ + #define LIO_MAX_CMD_TIMEOUT 10000 /* 10000ms (10s) */ + ++/* The max frame size with default MTU */ ++#define LIO_ETH_MAX_LEN (RTE_ETHER_MTU + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN) ++ + #define LIO_DEV(_eth_dev) ((_eth_dev)->data->dev_private) + + /* LIO Response condition variable */ +diff --git a/dpdk/drivers/net/mlx4/meson.build b/dpdk/drivers/net/mlx4/meson.build +index 0cf9938a88..d7602b748e 100644 +--- a/dpdk/drivers/net/mlx4/meson.build ++++ b/dpdk/drivers/net/mlx4/meson.build +@@ -24,7 +24,8 @@ endif + libnames = [ 'mlx4', 'ibverbs' ] + libs = [] + foreach libname:libnames +- lib = dependency('lib' + libname, static:static_ibverbs, required:false) ++ lib = dependency('lib' + libname, static:static_ibverbs, ++ required:false, method: 'pkg-config') + if not lib.found() and not static_ibverbs + lib = cc.find_library(libname, required:false) + endif +diff --git a/dpdk/drivers/net/mlx4/mlx4.c b/dpdk/drivers/net/mlx4/mlx4.c +index d5d8c96351..919a9347f9 100644 +--- a/dpdk/drivers/net/mlx4/mlx4.c ++++ b/dpdk/drivers/net/mlx4/mlx4.c +@@ -195,7 +195,7 @@ mlx4_free_verbs_buf(void *ptr, void *data __rte_unused) + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +-static int ++int + mlx4_proc_priv_init(struct rte_eth_dev *dev) + { + struct mlx4_proc_priv *ppriv; +@@ -207,13 +207,13 @@ mlx4_proc_priv_init(struct rte_eth_dev *dev) + */ + ppriv_size = sizeof(struct mlx4_proc_priv) + + dev->data->nb_tx_queues * sizeof(void *); +- ppriv = rte_malloc_socket("mlx4_proc_priv", ppriv_size, +- RTE_CACHE_LINE_SIZE, dev->device->numa_node); ++ ppriv = rte_zmalloc_socket("mlx4_proc_priv", ppriv_size, ++ RTE_CACHE_LINE_SIZE, dev->device->numa_node); + if (!ppriv) { + rte_errno = ENOMEM; + return -rte_errno; + } +- ppriv->uar_table_sz = ppriv_size; ++ ppriv->uar_table_sz = dev->data->nb_tx_queues; + dev->process_private = ppriv; + return 0; + } +@@ -224,7 +224,7 @@ mlx4_proc_priv_init(struct rte_eth_dev *dev) + * @param dev + * Pointer to Ethernet device structure. + */ +-static void ++void + mlx4_proc_priv_uninit(struct rte_eth_dev *dev) + { + if (!dev->process_private) +@@ -375,8 +375,10 @@ mlx4_dev_close(struct rte_eth_dev *dev) + struct mlx4_priv *priv = dev->data->dev_private; + unsigned int i; + +- if (rte_eal_process_type() != RTE_PROC_PRIMARY) ++ if (rte_eal_process_type() == RTE_PROC_SECONDARY) { ++ rte_eth_dev_release_port(dev); + return 0; ++ } + DEBUG("%p: closing device \"%s\"", + (void *)dev, + ((priv->ctx != NULL) ? priv->ctx->device->name : "")); +@@ -764,6 +766,7 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) + struct ibv_context *attr_ctx = NULL; + struct ibv_device_attr device_attr; + struct ibv_device_attr_ex device_attr_ex; ++ struct rte_eth_dev *prev_dev = NULL; + struct mlx4_conf conf = { + .ports.present = 0, + .mr_ext_memseg_en = 1, +@@ -878,7 +881,7 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) + ERROR("can not attach rte ethdev"); + rte_errno = ENOMEM; + err = rte_errno; +- goto error; ++ goto err_secondary; + } + priv = eth_dev->data->dev_private; + if (!priv->verbs_alloc_ctx.enabled) { +@@ -887,24 +890,24 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) + " from Verbs"); + rte_errno = ENOTSUP; + err = rte_errno; +- goto error; ++ goto err_secondary; + } + eth_dev->device = &pci_dev->device; + eth_dev->dev_ops = &mlx4_dev_sec_ops; + err = mlx4_proc_priv_init(eth_dev); + if (err) +- goto error; ++ goto err_secondary; + /* Receive command fd from primary process. */ + err = mlx4_mp_req_verbs_cmd_fd(eth_dev); + if (err < 0) { + err = rte_errno; +- goto error; ++ goto err_secondary; + } + /* Remap UAR for Tx queues. */ + err = mlx4_tx_uar_init_secondary(eth_dev, err); + if (err) { + err = rte_errno; +- goto error; ++ goto err_secondary; + } + /* + * Ethdev pointer is still required as input since +@@ -916,7 +919,14 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) + claim_zero(mlx4_glue->close_device(ctx)); + rte_eth_copy_pci_info(eth_dev, pci_dev); + rte_eth_dev_probing_finish(eth_dev); ++ prev_dev = eth_dev; + continue; ++err_secondary: ++ claim_zero(mlx4_glue->close_device(ctx)); ++ rte_eth_dev_release_port(eth_dev); ++ if (prev_dev) ++ rte_eth_dev_release_port(prev_dev); ++ break; + } + /* Check port status. */ + err = mlx4_glue->query_port(ctx, port, &port_attr); +@@ -1091,6 +1101,7 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) + priv, mem_event_cb); + rte_rwlock_write_unlock(&mlx4_shared_data->mem_event_rwlock); + rte_eth_dev_probing_finish(eth_dev); ++ prev_dev = eth_dev; + continue; + port_error: + rte_free(priv); +@@ -1105,14 +1116,10 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) + eth_dev->data->mac_addrs = NULL; + rte_eth_dev_release_port(eth_dev); + } ++ if (prev_dev) ++ mlx4_dev_close(prev_dev); + break; + } +- /* +- * XXX if something went wrong in the loop above, there is a resource +- * leak (ctx, pd, priv, dpdk ethdev) but we can do nothing about it as +- * long as the dpdk does not provide a way to deallocate a ethdev and a +- * way to enumerate the registered ethdevs to free the previous ones. +- */ + error: + if (attr_ctx) + claim_zero(mlx4_glue->close_device(attr_ctx)); +@@ -1123,6 +1130,36 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) + return -err; + } + ++/** ++ * DPDK callback to remove a PCI device. ++ * ++ * This function removes all Ethernet devices belong to a given PCI device. ++ * ++ * @param[in] pci_dev ++ * Pointer to the PCI device. ++ * ++ * @return ++ * 0 on success, the function cannot fail. ++ */ ++static int ++mlx4_pci_remove(struct rte_pci_device *pci_dev) ++{ ++ uint16_t port_id; ++ int ret = 0; ++ ++ RTE_ETH_FOREACH_DEV_OF(port_id, &pci_dev->device) { ++ /* ++ * mlx4_dev_close() is not registered to secondary process, ++ * call the close function explicitly for secondary process. ++ */ ++ if (rte_eal_process_type() == RTE_PROC_SECONDARY) ++ ret |= mlx4_dev_close(&rte_eth_devices[port_id]); ++ else ++ ret |= rte_eth_dev_close(port_id); ++ } ++ return ret == 0 ? 0 : -EIO; ++} ++ + static const struct rte_pci_id mlx4_pci_id_map[] = { + { + RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, +@@ -1147,6 +1184,7 @@ static struct rte_pci_driver mlx4_driver = { + }, + .id_table = mlx4_pci_id_map, + .probe = mlx4_pci_probe, ++ .remove = mlx4_pci_remove, + .drv_flags = RTE_PCI_DRV_INTR_LSC | RTE_PCI_DRV_INTR_RMV, + }; + +diff --git a/dpdk/drivers/net/mlx4/mlx4.h b/dpdk/drivers/net/mlx4/mlx4.h +index c6cb29493e..87710d3996 100644 +--- a/dpdk/drivers/net/mlx4/mlx4.h ++++ b/dpdk/drivers/net/mlx4/mlx4.h +@@ -197,6 +197,10 @@ struct mlx4_priv { + #define PORT_ID(priv) ((priv)->dev_data->port_id) + #define ETH_DEV(priv) (&rte_eth_devices[PORT_ID(priv)]) + ++int mlx4_proc_priv_init(struct rte_eth_dev *dev); ++void mlx4_proc_priv_uninit(struct rte_eth_dev *dev); ++ ++ + /* mlx4_ethdev.c */ + + int mlx4_get_ifname(const struct mlx4_priv *priv, char (*ifname)[IF_NAMESIZE]); +diff --git a/dpdk/drivers/net/mlx4/mlx4_mp.c b/dpdk/drivers/net/mlx4/mlx4_mp.c +index eca0c20a8a..3622d61075 100644 +--- a/dpdk/drivers/net/mlx4/mlx4_mp.c ++++ b/dpdk/drivers/net/mlx4/mlx4_mp.c +@@ -111,6 +111,9 @@ mp_secondary_handle(const struct rte_mp_msg *mp_msg, const void *peer) + const struct mlx4_mp_param *param = + (const struct mlx4_mp_param *)mp_msg->param; + struct rte_eth_dev *dev; ++#ifdef HAVE_IBV_MLX4_UAR_MMAP_OFFSET ++ struct mlx4_proc_priv *ppriv; ++#endif + int ret; + + MLX4_ASSERT(rte_eal_process_type() == RTE_PROC_SECONDARY); +@@ -126,6 +129,21 @@ mp_secondary_handle(const struct rte_mp_msg *mp_msg, const void *peer) + rte_mb(); + dev->tx_pkt_burst = mlx4_tx_burst; + dev->rx_pkt_burst = mlx4_rx_burst; ++#ifdef HAVE_IBV_MLX4_UAR_MMAP_OFFSET ++ ppriv = (struct mlx4_proc_priv *)dev->process_private; ++ if (ppriv->uar_table_sz != dev->data->nb_tx_queues) { ++ mlx4_tx_uar_uninit_secondary(dev); ++ mlx4_proc_priv_uninit(dev); ++ ret = mlx4_proc_priv_init(dev); ++ if (ret) ++ return -rte_errno; ++ ret = mlx4_tx_uar_init_secondary(dev, mp_msg->fds[0]); ++ if (ret) { ++ mlx4_proc_priv_uninit(dev); ++ return -rte_errno; ++ } ++ } ++#endif + mp_init_msg(dev, &mp_res, param->type); + res->result = 0; + ret = rte_mp_reply(&mp_res, peer); +@@ -163,6 +181,7 @@ mp_req_on_rxtx(struct rte_eth_dev *dev, enum mlx4_mp_req_type type) + struct rte_mp_reply mp_rep; + struct mlx4_mp_param *res __rte_unused; + struct timespec ts = {.tv_sec = MLX4_MP_REQ_TIMEOUT_SEC, .tv_nsec = 0}; ++ struct mlx4_priv *priv; + int ret; + int i; + +@@ -175,6 +194,11 @@ mp_req_on_rxtx(struct rte_eth_dev *dev, enum mlx4_mp_req_type type) + return; + } + mp_init_msg(dev, &mp_req, type); ++ if (type == MLX4_MP_REQ_START_RXTX) { ++ priv = dev->data->dev_private; ++ mp_req.num_fds = 1; ++ mp_req.fds[0] = priv->ctx->cmd_fd; ++ } + ret = rte_mp_request_sync(&mp_req, &mp_rep, &ts); + if (ret) { + if (rte_errno != ENOTSUP) +diff --git a/dpdk/drivers/net/mlx4/mlx4_rxtx.h b/dpdk/drivers/net/mlx4/mlx4_rxtx.h +index 9de6c59411..136ca56ca4 100644 +--- a/dpdk/drivers/net/mlx4/mlx4_rxtx.h ++++ b/dpdk/drivers/net/mlx4/mlx4_rxtx.h +@@ -157,6 +157,7 @@ uint16_t mlx4_rx_burst_removed(void *dpdk_rxq, struct rte_mbuf **pkts, + /* mlx4_txq.c */ + + int mlx4_tx_uar_init_secondary(struct rte_eth_dev *dev, int fd); ++void mlx4_tx_uar_uninit_secondary(struct rte_eth_dev *dev); + uint64_t mlx4_get_tx_port_offloads(struct mlx4_priv *priv); + int mlx4_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, + uint16_t desc, unsigned int socket, +diff --git a/dpdk/drivers/net/mlx4/mlx4_txq.c b/dpdk/drivers/net/mlx4/mlx4_txq.c +index 37b84413fb..60560d9545 100644 +--- a/dpdk/drivers/net/mlx4/mlx4_txq.c ++++ b/dpdk/drivers/net/mlx4/mlx4_txq.c +@@ -157,6 +157,27 @@ mlx4_tx_uar_init_secondary(struct rte_eth_dev *dev, int fd) + } while (i--); + return -rte_errno; + } ++ ++void ++mlx4_tx_uar_uninit_secondary(struct rte_eth_dev *dev) ++{ ++ struct mlx4_proc_priv *ppriv = ++ (struct mlx4_proc_priv *)dev->process_private; ++ const size_t page_size = sysconf(_SC_PAGESIZE); ++ void *addr; ++ size_t i; ++ ++ if (page_size == (size_t)-1) { ++ ERROR("Failed to get mem page size"); ++ return; ++ } ++ for (i = 0; i < ppriv->uar_table_sz; i++) { ++ addr = ppriv->uar_table[i]; ++ if (addr) ++ munmap(RTE_PTR_ALIGN_FLOOR(addr, page_size), page_size); ++ } ++} ++ + #else + int + mlx4_tx_uar_init_secondary(struct rte_eth_dev *dev __rte_unused, +@@ -167,6 +188,13 @@ mlx4_tx_uar_init_secondary(struct rte_eth_dev *dev __rte_unused, + rte_errno = ENOTSUP; + return -rte_errno; + } ++ ++void ++mlx4_tx_uar_uninit_secondary(struct rte_eth_dev *dev __rte_unused) ++{ ++ assert(rte_eal_process_type() == RTE_PROC_SECONDARY); ++ ERROR("UAR remap is not supported"); ++} + #endif + + /** +diff --git a/dpdk/drivers/net/mlx5/linux/mlx5_ethdev_os.c b/dpdk/drivers/net/mlx5/linux/mlx5_ethdev_os.c +index 128845cb52..e36a78091c 100644 +--- a/dpdk/drivers/net/mlx5/linux/mlx5_ethdev_os.c ++++ b/dpdk/drivers/net/mlx5/linux/mlx5_ethdev_os.c +@@ -143,7 +143,7 @@ struct ethtool_link_settings { + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ + int +-mlx5_get_ifname(const struct rte_eth_dev *dev, char (*ifname)[IF_NAMESIZE]) ++mlx5_get_ifname(const struct rte_eth_dev *dev, char (*ifname)[MLX5_NAMESIZE]) + { + struct mlx5_priv *priv = dev->data->dev_private; + unsigned int ifindex; +@@ -151,7 +151,7 @@ mlx5_get_ifname(const struct rte_eth_dev *dev, char (*ifname)[IF_NAMESIZE]) + MLX5_ASSERT(priv); + MLX5_ASSERT(priv->sh); + if (priv->bond_ifindex > 0) { +- memcpy(ifname, priv->bond_name, IF_NAMESIZE); ++ memcpy(ifname, priv->bond_name, MLX5_NAMESIZE); + return 0; + } + ifindex = mlx5_ifindex(dev); +diff --git a/dpdk/drivers/net/mlx5/linux/mlx5_flow_os.h b/dpdk/drivers/net/mlx5/linux/mlx5_flow_os.h +index a6bd2c01e1..73ed62655e 100644 +--- a/dpdk/drivers/net/mlx5/linux/mlx5_flow_os.h ++++ b/dpdk/drivers/net/mlx5/linux/mlx5_flow_os.h +@@ -350,6 +350,32 @@ mlx5_flow_os_create_flow_action_drop(void **action) + return (*action) ? 0 : -1; + } + ++/** ++ * Create flow action: dest_devx_tir ++ * ++ * @param[in] tir ++ * Pointer to DevX tir object ++ * @param[out] action ++ * Pointer to a valid action on success, NULL otherwise. ++ * ++ * @return ++ * 0 on success, or -1 on failure and errno is set. ++ */ ++static inline int ++mlx5_flow_os_create_flow_action_dest_devx_tir(struct mlx5_devx_obj *tir, ++ void **action) ++{ ++#ifdef HAVE_IBV_FLOW_DV_SUPPORT ++ *action = mlx5_glue->dv_create_flow_action_dest_devx_tir(tir->obj); ++ return (*action) ? 0 : -1; ++#else ++ /* If no DV support - skip the operation and return success */ ++ RTE_SET_USED(tir); ++ *action = 0; ++ return 0; ++#endif ++} ++ + /** + * Destroy flow action. + * +diff --git a/dpdk/drivers/net/mlx5/linux/mlx5_mp_os.c b/dpdk/drivers/net/mlx5/linux/mlx5_mp_os.c +index 08ade75799..95372e2084 100644 +--- a/dpdk/drivers/net/mlx5/linux/mlx5_mp_os.c ++++ b/dpdk/drivers/net/mlx5/linux/mlx5_mp_os.c +@@ -115,6 +115,7 @@ struct rte_mp_msg mp_res; + const struct mlx5_mp_param *param = + (const struct mlx5_mp_param *)mp_msg->param; + struct rte_eth_dev *dev; ++ struct mlx5_proc_priv *ppriv; + struct mlx5_priv *priv; + int ret; + +@@ -132,6 +133,20 @@ struct rte_mp_msg mp_res; + rte_mb(); + dev->rx_pkt_burst = mlx5_select_rx_function(dev); + dev->tx_pkt_burst = mlx5_select_tx_function(dev); ++ ppriv = (struct mlx5_proc_priv *)dev->process_private; ++ /* If Tx queue number changes, re-initialize UAR. */ ++ if (ppriv->uar_table_sz != priv->txqs_n) { ++ mlx5_tx_uar_uninit_secondary(dev); ++ mlx5_proc_priv_uninit(dev); ++ ret = mlx5_proc_priv_init(dev); ++ if (ret) ++ return -rte_errno; ++ ret = mlx5_tx_uar_init_secondary(dev, mp_msg->fds[0]); ++ if (ret) { ++ mlx5_proc_priv_uninit(dev); ++ return -rte_errno; ++ } ++ } + mp_init_msg(&priv->mp_id, &mp_res, param->type); + res->result = 0; + ret = rte_mp_reply(&mp_res, peer); +@@ -183,6 +198,10 @@ mp_req_on_rxtx(struct rte_eth_dev *dev, enum mlx5_mp_req_type type) + return; + } + mp_init_msg(&priv->mp_id, &mp_req, type); ++ if (type == MLX5_MP_REQ_START_RXTX) { ++ mp_req.num_fds = 1; ++ mp_req.fds[0] = ((struct ibv_context *)priv->sh->ctx)->cmd_fd; ++ } + ret = rte_mp_request_sync(&mp_req, &mp_rep, &ts); + if (ret) { + if (rte_errno != ENOTSUP) +diff --git a/dpdk/drivers/net/mlx5/linux/mlx5_os.c b/dpdk/drivers/net/mlx5/linux/mlx5_os.c +index 4c863db1a7..91001473b0 100644 +--- a/dpdk/drivers/net/mlx5/linux/mlx5_os.c ++++ b/dpdk/drivers/net/mlx5/linux/mlx5_os.c +@@ -168,9 +168,8 @@ mlx5_os_get_dev_attr(void *ctx, struct mlx5_dev_attr *device_attr) + static void * + mlx5_alloc_verbs_buf(size_t size, void *data) + { +- struct mlx5_priv *priv = data; ++ struct mlx5_dev_ctx_shared *sh = data; + void *ret; +- unsigned int socket = SOCKET_ID_ANY; + size_t alignment = rte_mem_page_size(); + if (alignment == (size_t)-1) { + DRV_LOG(ERR, "Failed to get mem page size"); +@@ -178,18 +177,8 @@ mlx5_alloc_verbs_buf(size_t size, void *data) + return NULL; + } + +- if (priv->verbs_alloc_ctx.type == MLX5_VERBS_ALLOC_TYPE_TX_QUEUE) { +- const struct mlx5_txq_ctrl *ctrl = priv->verbs_alloc_ctx.obj; +- +- socket = ctrl->socket; +- } else if (priv->verbs_alloc_ctx.type == +- MLX5_VERBS_ALLOC_TYPE_RX_QUEUE) { +- const struct mlx5_rxq_ctrl *ctrl = priv->verbs_alloc_ctx.obj; +- +- socket = ctrl->socket; +- } + MLX5_ASSERT(data != NULL); +- ret = mlx5_malloc(0, size, alignment, socket); ++ ret = mlx5_malloc(0, size, alignment, sh->numa_node); + if (!ret && size) + rte_errno = ENOMEM; + return ret; +@@ -681,8 +670,6 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev, + int err = 0; + unsigned int hw_padding = 0; + unsigned int mps; +- unsigned int cqe_comp; +- unsigned int cqe_pad = 0; + unsigned int tunnel_en = 0; + unsigned int mpls_en = 0; + unsigned int swp = 0; +@@ -762,7 +749,7 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev, + rte_eth_devices[priv->sh->bond_dev].device; + else + eth_dev->device = dpdk_dev; +- eth_dev->dev_ops = &mlx5_os_dev_sec_ops; ++ eth_dev->dev_ops = &mlx5_dev_sec_ops; + eth_dev->rx_descriptor_status = mlx5_rx_descriptor_status; + eth_dev->tx_descriptor_status = mlx5_tx_descriptor_status; + err = mlx5_proc_priv_init(eth_dev); +@@ -874,17 +861,8 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev, + mprq_caps.max_single_wqe_log_num_of_strides; + } + #endif +- if (RTE_CACHE_LINE_SIZE == 128 && +- !(dv_attr.flags & MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP)) +- cqe_comp = 0; +- else +- cqe_comp = 1; +- config->cqe_comp = cqe_comp; +-#ifdef HAVE_IBV_MLX5_MOD_CQE_128B_PAD +- /* Whether device supports 128B Rx CQE padding. */ +- cqe_pad = RTE_CACHE_LINE_SIZE == 128 && +- (dv_attr.flags & MLX5DV_CONTEXT_FLAGS_CQE_128B_PAD); +-#endif ++ /* Rx CQE compression is enabled by default. */ ++ config->cqe_comp = 1; + #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT + if (dv_attr.comp_mask & MLX5DV_CONTEXT_MASK_TUNNEL_OFFLOADS) { + tunnel_en = ((dv_attr.tunnel_offloads_caps & +@@ -941,8 +919,6 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev, + priv->dev_port = spawn->phys_port; + priv->pci_dev = spawn->pci_dev; + priv->mtu = RTE_ETHER_MTU; +- priv->mp_id.port_id = port_id; +- strlcpy(priv->mp_id.name, MLX5_MP_NAME, RTE_MP_MAX_NAME_LEN); + /* Some internal functions rely on Netlink sockets, open them now. */ + priv->nl_socket_rdma = mlx5_nl_init(NETLINK_RDMA); + priv->nl_socket_route = mlx5_nl_init(NETLINK_ROUTE); +@@ -1117,16 +1093,6 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev, + config->mps == MLX5_MPW_ENHANCED ? "enhanced " : + config->mps == MLX5_MPW ? "legacy " : "", + config->mps != MLX5_MPW_DISABLED ? "enabled" : "disabled"); +- if (config->cqe_comp && !cqe_comp) { +- DRV_LOG(WARNING, "Rx CQE compression isn't supported"); +- config->cqe_comp = 0; +- } +- if (config->cqe_pad && !cqe_pad) { +- DRV_LOG(WARNING, "Rx CQE padding isn't supported"); +- config->cqe_pad = 0; +- } else if (config->cqe_pad) { +- DRV_LOG(INFO, "Rx CQE padding is enabled"); +- } + if (config->devx) { + err = mlx5_devx_cmd_query_hca_attr(sh->ctx, &config->hca_attr); + if (err) { +@@ -1225,6 +1191,25 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev, + } + #endif + } ++ if (config->cqe_comp && RTE_CACHE_LINE_SIZE == 128 && ++ !(dv_attr.flags & MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP)) { ++ DRV_LOG(WARNING, "Rx CQE 128B compression is not supported"); ++ config->cqe_comp = 0; ++ } ++ if (config->cqe_comp_fmt == MLX5_CQE_RESP_FORMAT_FTAG_STRIDX && ++ (!config->devx || !config->hca_attr.mini_cqe_resp_flow_tag)) { ++ DRV_LOG(WARNING, "Flow Tag CQE compression" ++ " format isn't supported."); ++ config->cqe_comp = 0; ++ } ++ if (config->cqe_comp_fmt == MLX5_CQE_RESP_FORMAT_L34H_STRIDX && ++ (!config->devx || !config->hca_attr.mini_cqe_resp_l3_l4_tag)) { ++ DRV_LOG(WARNING, "L3/L4 Header CQE compression" ++ " format isn't supported."); ++ config->cqe_comp = 0; ++ } ++ DRV_LOG(DEBUG, "Rx CQE compression is %ssupported", ++ config->cqe_comp ? "" : "not "); + if (config->tx_pp) { + DRV_LOG(DEBUG, "Timestamp counter frequency %u kHz", + config->hca_attr.dev_freq_khz); +@@ -1364,6 +1349,8 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev, + eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR; + eth_dev->data->representor_id = priv->representor_id; + } ++ priv->mp_id.port_id = eth_dev->data->port_id; ++ strlcpy(priv->mp_id.name, MLX5_MP_NAME, RTE_MP_MAX_NAME_LEN); + /* + * Store associated network device interface index. This index + * is permanent throughout the lifetime of device. So, we may store +@@ -1416,7 +1403,7 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev, + mac.addr_bytes[4], mac.addr_bytes[5]); + #ifdef RTE_LIBRTE_MLX5_DEBUG + { +- char ifname[IF_NAMESIZE]; ++ char ifname[MLX5_NAMESIZE]; + + if (mlx5_get_ifname(eth_dev, &ifname) == 0) + DRV_LOG(DEBUG, "port %u ifname is \"%s\"", +@@ -1437,7 +1424,7 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev, + /* Initialize burst functions to prevent crashes before link-up. */ + eth_dev->rx_pkt_burst = removed_rx_burst; + eth_dev->tx_pkt_burst = removed_tx_burst; +- eth_dev->dev_ops = &mlx5_os_dev_ops; ++ eth_dev->dev_ops = &mlx5_dev_ops; + eth_dev->rx_descriptor_status = mlx5_rx_descriptor_status; + eth_dev->tx_descriptor_status = mlx5_tx_descriptor_status; + eth_dev->rx_queue_count = mlx5_rx_queue_count; +@@ -1459,7 +1446,7 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev, + (void *)((uintptr_t)&(struct mlx5dv_ctx_allocators){ + .alloc = &mlx5_alloc_verbs_buf, + .free = &mlx5_free_verbs_buf, +- .data = priv, ++ .data = sh, + })); + /* Bring Ethernet device up. */ + DRV_LOG(DEBUG, "port %u forcing Ethernet interface up", +@@ -2324,6 +2311,16 @@ mlx5_os_open_device(const struct mlx5_dev_spawn_data *spawn, + DRV_LOG(DEBUG, "DevX is NOT supported"); + err = 0; + } ++ if (!err && sh->ctx) { ++ /* Hint libmlx5 to use PMD allocator for data plane resources */ ++ mlx5_glue->dv_set_context_attr(sh->ctx, ++ MLX5DV_CTX_ATTR_BUF_ALLOCATORS, ++ (void *)((uintptr_t)&(struct mlx5dv_ctx_allocators){ ++ .alloc = &mlx5_alloc_verbs_buf, ++ .free = &mlx5_free_verbs_buf, ++ .data = sh, ++ })); ++ } + return err; + } + +@@ -2606,153 +2603,3 @@ mlx5_os_mac_addr_flush(struct rte_eth_dev *dev) + dev->data->mac_addrs, + MLX5_MAX_MAC_ADDRESSES, priv->mac_own); + } +- +-const struct eth_dev_ops mlx5_os_dev_ops = { +- .dev_configure = mlx5_dev_configure, +- .dev_start = mlx5_dev_start, +- .dev_stop = mlx5_dev_stop, +- .dev_set_link_down = mlx5_set_link_down, +- .dev_set_link_up = mlx5_set_link_up, +- .dev_close = mlx5_dev_close, +- .promiscuous_enable = mlx5_promiscuous_enable, +- .promiscuous_disable = mlx5_promiscuous_disable, +- .allmulticast_enable = mlx5_allmulticast_enable, +- .allmulticast_disable = mlx5_allmulticast_disable, +- .link_update = mlx5_link_update, +- .stats_get = mlx5_stats_get, +- .stats_reset = mlx5_stats_reset, +- .xstats_get = mlx5_xstats_get, +- .xstats_reset = mlx5_xstats_reset, +- .xstats_get_names = mlx5_xstats_get_names, +- .fw_version_get = mlx5_fw_version_get, +- .dev_infos_get = mlx5_dev_infos_get, +- .read_clock = mlx5_txpp_read_clock, +- .dev_supported_ptypes_get = mlx5_dev_supported_ptypes_get, +- .vlan_filter_set = mlx5_vlan_filter_set, +- .rx_queue_setup = mlx5_rx_queue_setup, +- .rx_hairpin_queue_setup = mlx5_rx_hairpin_queue_setup, +- .tx_queue_setup = mlx5_tx_queue_setup, +- .tx_hairpin_queue_setup = mlx5_tx_hairpin_queue_setup, +- .rx_queue_release = mlx5_rx_queue_release, +- .tx_queue_release = mlx5_tx_queue_release, +- .rx_queue_start = mlx5_rx_queue_start, +- .rx_queue_stop = mlx5_rx_queue_stop, +- .tx_queue_start = mlx5_tx_queue_start, +- .tx_queue_stop = mlx5_tx_queue_stop, +- .flow_ctrl_get = mlx5_dev_get_flow_ctrl, +- .flow_ctrl_set = mlx5_dev_set_flow_ctrl, +- .mac_addr_remove = mlx5_mac_addr_remove, +- .mac_addr_add = mlx5_mac_addr_add, +- .mac_addr_set = mlx5_mac_addr_set, +- .set_mc_addr_list = mlx5_set_mc_addr_list, +- .mtu_set = mlx5_dev_set_mtu, +- .vlan_strip_queue_set = mlx5_vlan_strip_queue_set, +- .vlan_offload_set = mlx5_vlan_offload_set, +- .reta_update = mlx5_dev_rss_reta_update, +- .reta_query = mlx5_dev_rss_reta_query, +- .rss_hash_update = mlx5_rss_hash_update, +- .rss_hash_conf_get = mlx5_rss_hash_conf_get, +- .filter_ctrl = mlx5_dev_filter_ctrl, +- .rxq_info_get = mlx5_rxq_info_get, +- .txq_info_get = mlx5_txq_info_get, +- .rx_burst_mode_get = mlx5_rx_burst_mode_get, +- .tx_burst_mode_get = mlx5_tx_burst_mode_get, +- .rx_queue_intr_enable = mlx5_rx_intr_enable, +- .rx_queue_intr_disable = mlx5_rx_intr_disable, +- .is_removed = mlx5_is_removed, +- .udp_tunnel_port_add = mlx5_udp_tunnel_port_add, +- .get_module_info = mlx5_get_module_info, +- .get_module_eeprom = mlx5_get_module_eeprom, +- .hairpin_cap_get = mlx5_hairpin_cap_get, +- .mtr_ops_get = mlx5_flow_meter_ops_get, +- .hairpin_bind = mlx5_hairpin_bind, +- .hairpin_unbind = mlx5_hairpin_unbind, +- .hairpin_get_peer_ports = mlx5_hairpin_get_peer_ports, +- .hairpin_queue_peer_update = mlx5_hairpin_queue_peer_update, +- .hairpin_queue_peer_bind = mlx5_hairpin_queue_peer_bind, +- .hairpin_queue_peer_unbind = mlx5_hairpin_queue_peer_unbind, +-}; +- +-/* Available operations from secondary process. */ +-const struct eth_dev_ops mlx5_os_dev_sec_ops = { +- .stats_get = mlx5_stats_get, +- .stats_reset = mlx5_stats_reset, +- .xstats_get = mlx5_xstats_get, +- .xstats_reset = mlx5_xstats_reset, +- .xstats_get_names = mlx5_xstats_get_names, +- .fw_version_get = mlx5_fw_version_get, +- .dev_infos_get = mlx5_dev_infos_get, +- .read_clock = mlx5_txpp_read_clock, +- .rx_queue_start = mlx5_rx_queue_start, +- .rx_queue_stop = mlx5_rx_queue_stop, +- .tx_queue_start = mlx5_tx_queue_start, +- .tx_queue_stop = mlx5_tx_queue_stop, +- .rxq_info_get = mlx5_rxq_info_get, +- .txq_info_get = mlx5_txq_info_get, +- .rx_burst_mode_get = mlx5_rx_burst_mode_get, +- .tx_burst_mode_get = mlx5_tx_burst_mode_get, +- .get_module_info = mlx5_get_module_info, +- .get_module_eeprom = mlx5_get_module_eeprom, +-}; +- +-/* Available operations in flow isolated mode. */ +-const struct eth_dev_ops mlx5_os_dev_ops_isolate = { +- .dev_configure = mlx5_dev_configure, +- .dev_start = mlx5_dev_start, +- .dev_stop = mlx5_dev_stop, +- .dev_set_link_down = mlx5_set_link_down, +- .dev_set_link_up = mlx5_set_link_up, +- .dev_close = mlx5_dev_close, +- .promiscuous_enable = mlx5_promiscuous_enable, +- .promiscuous_disable = mlx5_promiscuous_disable, +- .allmulticast_enable = mlx5_allmulticast_enable, +- .allmulticast_disable = mlx5_allmulticast_disable, +- .link_update = mlx5_link_update, +- .stats_get = mlx5_stats_get, +- .stats_reset = mlx5_stats_reset, +- .xstats_get = mlx5_xstats_get, +- .xstats_reset = mlx5_xstats_reset, +- .xstats_get_names = mlx5_xstats_get_names, +- .fw_version_get = mlx5_fw_version_get, +- .dev_infos_get = mlx5_dev_infos_get, +- .read_clock = mlx5_txpp_read_clock, +- .dev_supported_ptypes_get = mlx5_dev_supported_ptypes_get, +- .vlan_filter_set = mlx5_vlan_filter_set, +- .rx_queue_setup = mlx5_rx_queue_setup, +- .rx_hairpin_queue_setup = mlx5_rx_hairpin_queue_setup, +- .tx_queue_setup = mlx5_tx_queue_setup, +- .tx_hairpin_queue_setup = mlx5_tx_hairpin_queue_setup, +- .rx_queue_release = mlx5_rx_queue_release, +- .tx_queue_release = mlx5_tx_queue_release, +- .rx_queue_start = mlx5_rx_queue_start, +- .rx_queue_stop = mlx5_rx_queue_stop, +- .tx_queue_start = mlx5_tx_queue_start, +- .tx_queue_stop = mlx5_tx_queue_stop, +- .flow_ctrl_get = mlx5_dev_get_flow_ctrl, +- .flow_ctrl_set = mlx5_dev_set_flow_ctrl, +- .mac_addr_remove = mlx5_mac_addr_remove, +- .mac_addr_add = mlx5_mac_addr_add, +- .mac_addr_set = mlx5_mac_addr_set, +- .set_mc_addr_list = mlx5_set_mc_addr_list, +- .mtu_set = mlx5_dev_set_mtu, +- .vlan_strip_queue_set = mlx5_vlan_strip_queue_set, +- .vlan_offload_set = mlx5_vlan_offload_set, +- .filter_ctrl = mlx5_dev_filter_ctrl, +- .rxq_info_get = mlx5_rxq_info_get, +- .txq_info_get = mlx5_txq_info_get, +- .rx_burst_mode_get = mlx5_rx_burst_mode_get, +- .tx_burst_mode_get = mlx5_tx_burst_mode_get, +- .rx_queue_intr_enable = mlx5_rx_intr_enable, +- .rx_queue_intr_disable = mlx5_rx_intr_disable, +- .is_removed = mlx5_is_removed, +- .get_module_info = mlx5_get_module_info, +- .get_module_eeprom = mlx5_get_module_eeprom, +- .hairpin_cap_get = mlx5_hairpin_cap_get, +- .mtr_ops_get = mlx5_flow_meter_ops_get, +- .hairpin_bind = mlx5_hairpin_bind, +- .hairpin_unbind = mlx5_hairpin_unbind, +- .hairpin_get_peer_ports = mlx5_hairpin_get_peer_ports, +- .hairpin_queue_peer_update = mlx5_hairpin_queue_peer_update, +- .hairpin_queue_peer_bind = mlx5_hairpin_queue_peer_bind, +- .hairpin_queue_peer_unbind = mlx5_hairpin_queue_peer_unbind, +-}; +diff --git a/dpdk/drivers/net/mlx5/linux/mlx5_os.h b/dpdk/drivers/net/mlx5/linux/mlx5_os.h +index 759def2f4b..7dbacceabe 100644 +--- a/dpdk/drivers/net/mlx5/linux/mlx5_os.h ++++ b/dpdk/drivers/net/mlx5/linux/mlx5_os.h +@@ -14,11 +14,9 @@ enum { + DEV_SYSFS_PATH_MAX = IBV_SYSFS_PATH_MAX + 1 + }; + ++#define MLX5_NAMESIZE IF_NAMESIZE ++ + #define PCI_DRV_FLAGS (RTE_PCI_DRV_INTR_LSC | \ + RTE_PCI_DRV_INTR_RMV | \ + RTE_PCI_DRV_PROBE_AGAIN) +- +-/* mlx5_ethdev_os.c */ +- +-int mlx5_get_ifname(const struct rte_eth_dev *dev, char (*ifname)[IF_NAMESIZE]); + #endif /* RTE_PMD_MLX5_OS_H_ */ +diff --git a/dpdk/drivers/net/mlx5/linux/mlx5_verbs.c b/dpdk/drivers/net/mlx5/linux/mlx5_verbs.c +index 540ce32990..6b98a4c166 100644 +--- a/dpdk/drivers/net/mlx5/linux/mlx5_verbs.c ++++ b/dpdk/drivers/net/mlx5/linux/mlx5_verbs.c +@@ -213,13 +213,22 @@ mlx5_rxq_ibv_cq_create(struct rte_eth_dev *dev, uint16_t idx) + if (priv->config.cqe_comp && !rxq_data->hw_timestamp) { + cq_attr.mlx5.comp_mask |= + MLX5DV_CQ_INIT_ATTR_MASK_COMPRESSED_CQE; ++ rxq_data->byte_mask = UINT32_MAX; + #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT +- cq_attr.mlx5.cqe_comp_res_format = +- mlx5_rxq_mprq_enabled(rxq_data) ? +- MLX5DV_CQE_RES_FORMAT_CSUM_STRIDX : +- MLX5DV_CQE_RES_FORMAT_HASH; ++ if (mlx5_rxq_mprq_enabled(rxq_data)) { ++ cq_attr.mlx5.cqe_comp_res_format = ++ MLX5DV_CQE_RES_FORMAT_CSUM_STRIDX; ++ rxq_data->mcqe_format = ++ MLX5_CQE_RESP_FORMAT_CSUM_STRIDX; ++ } else { ++ cq_attr.mlx5.cqe_comp_res_format = ++ MLX5DV_CQE_RES_FORMAT_HASH; ++ rxq_data->mcqe_format = ++ MLX5_CQE_RESP_FORMAT_HASH; ++ } + #else + cq_attr.mlx5.cqe_comp_res_format = MLX5DV_CQE_RES_FORMAT_HASH; ++ rxq_data->mcqe_format = MLX5_CQE_RESP_FORMAT_HASH; + #endif + /* + * For vectorized Rx, it must not be doubled in order to +@@ -234,7 +243,7 @@ mlx5_rxq_ibv_cq_create(struct rte_eth_dev *dev, uint16_t idx) + dev->data->port_id); + } + #ifdef HAVE_IBV_MLX5_MOD_CQE_128B_PAD +- if (priv->config.cqe_pad) { ++ if (RTE_CACHE_LINE_SIZE == 128) { + cq_attr.mlx5.comp_mask |= MLX5DV_CQ_INIT_ATTR_MASK_FLAGS; + cq_attr.mlx5.flags |= MLX5DV_CQ_INIT_ATTR_FLAGS_CQE_PAD; + } +@@ -366,8 +375,6 @@ mlx5_rxq_ibv_obj_new(struct rte_eth_dev *dev, uint16_t idx) + + MLX5_ASSERT(rxq_data); + MLX5_ASSERT(tmpl); +- priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_RX_QUEUE; +- priv->verbs_alloc_ctx.obj = rxq_ctrl; + tmpl->rxq_ctrl = rxq_ctrl; + if (rxq_ctrl->irq) { + tmpl->ibv_channel = +@@ -438,7 +445,6 @@ mlx5_rxq_ibv_obj_new(struct rte_eth_dev *dev, uint16_t idx) + rxq_data->cq_arm_sn = 0; + mlx5_rxq_initialize(rxq_data); + rxq_data->cq_ci = 0; +- priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE; + dev->data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STARTED; + rxq_ctrl->wqn = ((struct ibv_wq *)(tmpl->wq))->wq_num; + return 0; +@@ -451,7 +457,6 @@ mlx5_rxq_ibv_obj_new(struct rte_eth_dev *dev, uint16_t idx) + if (tmpl->ibv_channel) + claim_zero(mlx5_glue->destroy_comp_channel(tmpl->ibv_channel)); + rte_errno = ret; /* Restore rte_errno. */ +- priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE; + return -rte_errno; + } + +@@ -932,8 +937,6 @@ mlx5_txq_ibv_obj_new(struct rte_eth_dev *dev, uint16_t idx) + MLX5_ASSERT(txq_data); + MLX5_ASSERT(txq_obj); + txq_obj->txq_ctrl = txq_ctrl; +- priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_TX_QUEUE; +- priv->verbs_alloc_ctx.obj = txq_ctrl; + if (mlx5_getenv_int("MLX5_ENABLE_CQE_COMPRESSION")) { + DRV_LOG(ERR, "Port %u MLX5_ENABLE_CQE_COMPRESSION " + "must never be set.", dev->data->port_id); +@@ -1039,7 +1042,6 @@ mlx5_txq_ibv_obj_new(struct rte_eth_dev *dev, uint16_t idx) + } + txq_uar_init(txq_ctrl); + dev->data->tx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STARTED; +- priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE; + return 0; + error: + ret = rte_errno; /* Save rte_errno before cleanup. */ +@@ -1047,7 +1049,6 @@ mlx5_txq_ibv_obj_new(struct rte_eth_dev *dev, uint16_t idx) + claim_zero(mlx5_glue->destroy_cq(txq_obj->cq)); + if (txq_obj->qp) + claim_zero(mlx5_glue->destroy_qp(txq_obj->qp)); +- priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE; + rte_errno = ret; /* Restore rte_errno. */ + return -rte_errno; + } +diff --git a/dpdk/drivers/net/mlx5/mlx5.c b/dpdk/drivers/net/mlx5/mlx5.c +index ca3667a469..bdb446d2d2 100644 +--- a/dpdk/drivers/net/mlx5/mlx5.c ++++ b/dpdk/drivers/net/mlx5/mlx5.c +@@ -37,14 +37,12 @@ + #include "mlx5_autoconf.h" + #include "mlx5_mr.h" + #include "mlx5_flow.h" ++#include "mlx5_flow_os.h" + #include "rte_pmd_mlx5.h" + + /* Device parameter to enable RX completion queue compression. */ + #define MLX5_RXQ_CQE_COMP_EN "rxq_cqe_comp_en" + +-/* Device parameter to enable RX completion entry padding to 128B. */ +-#define MLX5_RXQ_CQE_PAD_EN "rxq_cqe_pad_en" +- + /* Device parameter to enable padding Rx packet to cacheline size. */ + #define MLX5_RXQ_PKT_PAD_EN "rxq_pkt_pad_en" + +@@ -413,8 +411,8 @@ mlx5_flow_aso_age_mng_close(struct mlx5_dev_ctx_shared *sh) + for (j = 0; j < MLX5_COUNTERS_PER_POOL; ++j) + if (pool->actions[j].dr_action) + claim_zero +- (mlx5_glue->destroy_flow_action +- (pool->actions[j].dr_action)); ++ (mlx5_flow_os_destroy_flow_action ++ (pool->actions[j].dr_action)); + mlx5_free(pool); + } + mlx5_free(sh->aso_age_mng->pools); +@@ -521,7 +519,7 @@ mlx5_flow_counters_mng_close(struct mlx5_dev_ctx_shared *sh) + + if (cnt->action) + claim_zero +- (mlx5_glue->destroy_flow_action ++ (mlx5_flow_os_destroy_flow_action + (cnt->action)); + if (fallback && MLX5_POOL_GET_CNT + (pool, j)->dcs_when_free) +@@ -1249,13 +1247,13 @@ mlx5_proc_priv_init(struct rte_eth_dev *dev) + */ + ppriv_size = + sizeof(struct mlx5_proc_priv) + priv->txqs_n * sizeof(void *); +- ppriv = mlx5_malloc(MLX5_MEM_RTE, ppriv_size, RTE_CACHE_LINE_SIZE, +- dev->device->numa_node); ++ ppriv = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, ppriv_size, ++ RTE_CACHE_LINE_SIZE, dev->device->numa_node); + if (!ppriv) { + rte_errno = ENOMEM; + return -rte_errno; + } +- ppriv->uar_table_sz = ppriv_size; ++ ppriv->uar_table_sz = priv->txqs_n; + dev->process_private = ppriv; + return 0; + } +@@ -1266,7 +1264,7 @@ mlx5_proc_priv_init(struct rte_eth_dev *dev) + * @param dev + * Pointer to Ethernet device structure. + */ +-static void ++void + mlx5_proc_priv_uninit(struct rte_eth_dev *dev) + { + if (!dev->process_private) +@@ -1426,6 +1424,156 @@ mlx5_dev_close(struct rte_eth_dev *dev) + return 0; + } + ++const struct eth_dev_ops mlx5_dev_ops = { ++ .dev_configure = mlx5_dev_configure, ++ .dev_start = mlx5_dev_start, ++ .dev_stop = mlx5_dev_stop, ++ .dev_set_link_down = mlx5_set_link_down, ++ .dev_set_link_up = mlx5_set_link_up, ++ .dev_close = mlx5_dev_close, ++ .promiscuous_enable = mlx5_promiscuous_enable, ++ .promiscuous_disable = mlx5_promiscuous_disable, ++ .allmulticast_enable = mlx5_allmulticast_enable, ++ .allmulticast_disable = mlx5_allmulticast_disable, ++ .link_update = mlx5_link_update, ++ .stats_get = mlx5_stats_get, ++ .stats_reset = mlx5_stats_reset, ++ .xstats_get = mlx5_xstats_get, ++ .xstats_reset = mlx5_xstats_reset, ++ .xstats_get_names = mlx5_xstats_get_names, ++ .fw_version_get = mlx5_fw_version_get, ++ .dev_infos_get = mlx5_dev_infos_get, ++ .read_clock = mlx5_txpp_read_clock, ++ .dev_supported_ptypes_get = mlx5_dev_supported_ptypes_get, ++ .vlan_filter_set = mlx5_vlan_filter_set, ++ .rx_queue_setup = mlx5_rx_queue_setup, ++ .rx_hairpin_queue_setup = mlx5_rx_hairpin_queue_setup, ++ .tx_queue_setup = mlx5_tx_queue_setup, ++ .tx_hairpin_queue_setup = mlx5_tx_hairpin_queue_setup, ++ .rx_queue_release = mlx5_rx_queue_release, ++ .tx_queue_release = mlx5_tx_queue_release, ++ .rx_queue_start = mlx5_rx_queue_start, ++ .rx_queue_stop = mlx5_rx_queue_stop, ++ .tx_queue_start = mlx5_tx_queue_start, ++ .tx_queue_stop = mlx5_tx_queue_stop, ++ .flow_ctrl_get = mlx5_dev_get_flow_ctrl, ++ .flow_ctrl_set = mlx5_dev_set_flow_ctrl, ++ .mac_addr_remove = mlx5_mac_addr_remove, ++ .mac_addr_add = mlx5_mac_addr_add, ++ .mac_addr_set = mlx5_mac_addr_set, ++ .set_mc_addr_list = mlx5_set_mc_addr_list, ++ .mtu_set = mlx5_dev_set_mtu, ++ .vlan_strip_queue_set = mlx5_vlan_strip_queue_set, ++ .vlan_offload_set = mlx5_vlan_offload_set, ++ .reta_update = mlx5_dev_rss_reta_update, ++ .reta_query = mlx5_dev_rss_reta_query, ++ .rss_hash_update = mlx5_rss_hash_update, ++ .rss_hash_conf_get = mlx5_rss_hash_conf_get, ++ .filter_ctrl = mlx5_dev_filter_ctrl, ++ .rxq_info_get = mlx5_rxq_info_get, ++ .txq_info_get = mlx5_txq_info_get, ++ .rx_burst_mode_get = mlx5_rx_burst_mode_get, ++ .tx_burst_mode_get = mlx5_tx_burst_mode_get, ++ .rx_queue_intr_enable = mlx5_rx_intr_enable, ++ .rx_queue_intr_disable = mlx5_rx_intr_disable, ++ .is_removed = mlx5_is_removed, ++ .udp_tunnel_port_add = mlx5_udp_tunnel_port_add, ++ .get_module_info = mlx5_get_module_info, ++ .get_module_eeprom = mlx5_get_module_eeprom, ++ .hairpin_cap_get = mlx5_hairpin_cap_get, ++ .mtr_ops_get = mlx5_flow_meter_ops_get, ++ .hairpin_bind = mlx5_hairpin_bind, ++ .hairpin_unbind = mlx5_hairpin_unbind, ++ .hairpin_get_peer_ports = mlx5_hairpin_get_peer_ports, ++ .hairpin_queue_peer_update = mlx5_hairpin_queue_peer_update, ++ .hairpin_queue_peer_bind = mlx5_hairpin_queue_peer_bind, ++ .hairpin_queue_peer_unbind = mlx5_hairpin_queue_peer_unbind, ++}; ++ ++/* Available operations from secondary process. */ ++const struct eth_dev_ops mlx5_dev_sec_ops = { ++ .stats_get = mlx5_stats_get, ++ .stats_reset = mlx5_stats_reset, ++ .xstats_get = mlx5_xstats_get, ++ .xstats_reset = mlx5_xstats_reset, ++ .xstats_get_names = mlx5_xstats_get_names, ++ .fw_version_get = mlx5_fw_version_get, ++ .dev_infos_get = mlx5_dev_infos_get, ++ .read_clock = mlx5_txpp_read_clock, ++ .rx_queue_start = mlx5_rx_queue_start, ++ .rx_queue_stop = mlx5_rx_queue_stop, ++ .tx_queue_start = mlx5_tx_queue_start, ++ .tx_queue_stop = mlx5_tx_queue_stop, ++ .rxq_info_get = mlx5_rxq_info_get, ++ .txq_info_get = mlx5_txq_info_get, ++ .rx_burst_mode_get = mlx5_rx_burst_mode_get, ++ .tx_burst_mode_get = mlx5_tx_burst_mode_get, ++ .get_module_info = mlx5_get_module_info, ++ .get_module_eeprom = mlx5_get_module_eeprom, ++}; ++ ++/* Available operations in flow isolated mode. */ ++const struct eth_dev_ops mlx5_dev_ops_isolate = { ++ .dev_configure = mlx5_dev_configure, ++ .dev_start = mlx5_dev_start, ++ .dev_stop = mlx5_dev_stop, ++ .dev_set_link_down = mlx5_set_link_down, ++ .dev_set_link_up = mlx5_set_link_up, ++ .dev_close = mlx5_dev_close, ++ .promiscuous_enable = mlx5_promiscuous_enable, ++ .promiscuous_disable = mlx5_promiscuous_disable, ++ .allmulticast_enable = mlx5_allmulticast_enable, ++ .allmulticast_disable = mlx5_allmulticast_disable, ++ .link_update = mlx5_link_update, ++ .stats_get = mlx5_stats_get, ++ .stats_reset = mlx5_stats_reset, ++ .xstats_get = mlx5_xstats_get, ++ .xstats_reset = mlx5_xstats_reset, ++ .xstats_get_names = mlx5_xstats_get_names, ++ .fw_version_get = mlx5_fw_version_get, ++ .dev_infos_get = mlx5_dev_infos_get, ++ .read_clock = mlx5_txpp_read_clock, ++ .dev_supported_ptypes_get = mlx5_dev_supported_ptypes_get, ++ .vlan_filter_set = mlx5_vlan_filter_set, ++ .rx_queue_setup = mlx5_rx_queue_setup, ++ .rx_hairpin_queue_setup = mlx5_rx_hairpin_queue_setup, ++ .tx_queue_setup = mlx5_tx_queue_setup, ++ .tx_hairpin_queue_setup = mlx5_tx_hairpin_queue_setup, ++ .rx_queue_release = mlx5_rx_queue_release, ++ .tx_queue_release = mlx5_tx_queue_release, ++ .rx_queue_start = mlx5_rx_queue_start, ++ .rx_queue_stop = mlx5_rx_queue_stop, ++ .tx_queue_start = mlx5_tx_queue_start, ++ .tx_queue_stop = mlx5_tx_queue_stop, ++ .flow_ctrl_get = mlx5_dev_get_flow_ctrl, ++ .flow_ctrl_set = mlx5_dev_set_flow_ctrl, ++ .mac_addr_remove = mlx5_mac_addr_remove, ++ .mac_addr_add = mlx5_mac_addr_add, ++ .mac_addr_set = mlx5_mac_addr_set, ++ .set_mc_addr_list = mlx5_set_mc_addr_list, ++ .mtu_set = mlx5_dev_set_mtu, ++ .vlan_strip_queue_set = mlx5_vlan_strip_queue_set, ++ .vlan_offload_set = mlx5_vlan_offload_set, ++ .filter_ctrl = mlx5_dev_filter_ctrl, ++ .rxq_info_get = mlx5_rxq_info_get, ++ .txq_info_get = mlx5_txq_info_get, ++ .rx_burst_mode_get = mlx5_rx_burst_mode_get, ++ .tx_burst_mode_get = mlx5_tx_burst_mode_get, ++ .rx_queue_intr_enable = mlx5_rx_intr_enable, ++ .rx_queue_intr_disable = mlx5_rx_intr_disable, ++ .is_removed = mlx5_is_removed, ++ .get_module_info = mlx5_get_module_info, ++ .get_module_eeprom = mlx5_get_module_eeprom, ++ .hairpin_cap_get = mlx5_hairpin_cap_get, ++ .mtr_ops_get = mlx5_flow_meter_ops_get, ++ .hairpin_bind = mlx5_hairpin_bind, ++ .hairpin_unbind = mlx5_hairpin_unbind, ++ .hairpin_get_peer_ports = mlx5_hairpin_get_peer_ports, ++ .hairpin_queue_peer_update = mlx5_hairpin_queue_peer_update, ++ .hairpin_queue_peer_bind = mlx5_hairpin_queue_peer_bind, ++ .hairpin_queue_peer_unbind = mlx5_hairpin_queue_peer_unbind, ++}; ++ + /** + * Verify and store value for device argument. + * +@@ -1472,8 +1620,6 @@ mlx5_args_check(const char *key, const char *val, void *opaque) + } + config->cqe_comp = !!tmp; + config->cqe_comp_fmt = tmp; +- } else if (strcmp(MLX5_RXQ_CQE_PAD_EN, key) == 0) { +- config->cqe_pad = !!tmp; + } else if (strcmp(MLX5_RXQ_PKT_PAD_EN, key) == 0) { + config->hw_padding = !!tmp; + } else if (strcmp(MLX5_RX_MPRQ_EN, key) == 0) { +@@ -1602,7 +1748,6 @@ mlx5_args(struct mlx5_dev_config *config, struct rte_devargs *devargs) + { + const char **params = (const char *[]){ + MLX5_RXQ_CQE_COMP_EN, +- MLX5_RXQ_CQE_PAD_EN, + MLX5_RXQ_PKT_PAD_EN, + MLX5_RX_MPRQ_EN, + MLX5_RX_MPRQ_LOG_STRIDE_NUM, +diff --git a/dpdk/drivers/net/mlx5/mlx5.h b/dpdk/drivers/net/mlx5/mlx5.h +index 041240e6fe..9bf1bf3146 100644 +--- a/dpdk/drivers/net/mlx5/mlx5.h ++++ b/dpdk/drivers/net/mlx5/mlx5.h +@@ -135,9 +135,9 @@ struct mlx5_local_data { + extern struct mlx5_shared_data *mlx5_shared_data; + + /* Dev ops structs */ +-extern const struct eth_dev_ops mlx5_os_dev_ops; +-extern const struct eth_dev_ops mlx5_os_dev_sec_ops; +-extern const struct eth_dev_ops mlx5_os_dev_ops_isolate; ++extern const struct eth_dev_ops mlx5_dev_ops; ++extern const struct eth_dev_ops mlx5_dev_sec_ops; ++extern const struct eth_dev_ops mlx5_dev_ops_isolate; + + struct mlx5_counter_ctrl { + /* Name of the counter. */ +@@ -207,7 +207,6 @@ struct mlx5_dev_config { + unsigned int mpls_en:1; /* MPLS over GRE/UDP is enabled. */ + unsigned int cqe_comp:1; /* CQE compression is enabled. */ + unsigned int cqe_comp_fmt:3; /* CQE compression format. */ +- unsigned int cqe_pad:1; /* CQE padding is enabled. */ + unsigned int tso:1; /* Whether TSO is supported. */ + unsigned int rx_vec_en:1; /* Rx vector is enabled. */ + unsigned int mr_ext_memseg_en:1; +@@ -258,30 +257,12 @@ struct mlx5_dev_config { + }; + + +-/** +- * Type of object being allocated. +- */ +-enum mlx5_verbs_alloc_type { +- MLX5_VERBS_ALLOC_TYPE_NONE, +- MLX5_VERBS_ALLOC_TYPE_TX_QUEUE, +- MLX5_VERBS_ALLOC_TYPE_RX_QUEUE, +-}; +- + /* Structure for VF VLAN workaround. */ + struct mlx5_vf_vlan { + uint32_t tag:12; + uint32_t created:1; + }; + +-/** +- * Verbs allocator needs a context to know in the callback which kind of +- * resources it is allocating. +- */ +-struct mlx5_verbs_alloc_ctx { +- enum mlx5_verbs_alloc_type type; /* Kind of object being allocated. */ +- const void *obj; /* Pointer to the DPDK object. */ +-}; +- + /* Flow drop context necessary due to Verbs API. */ + struct mlx5_drop { + struct mlx5_hrxq *hrxq; /* Hash Rx queue queue. */ +@@ -768,7 +749,10 @@ struct mlx5_dev_ctx_shared { + struct mlx5_dev_shared_port port[]; /* per device port data array. */ + }; + +-/* Per-process private structure. */ ++/* ++ * Per-process private structure. ++ * Caution, secondary process may rebuild the struct during port start. ++ */ + struct mlx5_proc_priv { + size_t uar_table_sz; + /* Size of UAR register table. */ +@@ -957,7 +941,7 @@ struct mlx5_priv { + int32_t pf_bond; /* >=0 means PF index in bonding configuration. */ + unsigned int if_index; /* Associated kernel network device index. */ + uint32_t bond_ifindex; /**< Bond interface index. */ +- char bond_name[IF_NAMESIZE]; /**< Bond interface name. */ ++ char bond_name[MLX5_NAMESIZE]; /**< Bond interface name. */ + /* RX/TX queues. */ + unsigned int rxqs_n; /* RX queues array size. */ + unsigned int txqs_n; /* TX queues array size. */ +@@ -989,7 +973,6 @@ struct mlx5_priv { + struct mlx5_xstats_ctrl xstats_ctrl; /* Extended stats control. */ + struct mlx5_stats_ctrl stats_ctrl; /* Stats control. */ + struct mlx5_dev_config config; /* Device configuration. */ +- struct mlx5_verbs_alloc_ctx verbs_alloc_ctx; + /* Context for Verbs allocator. */ + int nl_socket_rdma; /* Netlink socket (NETLINK_RDMA). */ + int nl_socket_route; /* Netlink socket (NETLINK_ROUTE). */ +@@ -1024,6 +1007,7 @@ struct rte_hairpin_peer_info { + + int mlx5_getenv_int(const char *); + int mlx5_proc_priv_init(struct rte_eth_dev *dev); ++void mlx5_proc_priv_uninit(struct rte_eth_dev *dev); + int mlx5_udp_tunnel_port_add(struct rte_eth_dev *dev, + struct rte_eth_udp_tunnel *udp_tunnel); + uint16_t mlx5_eth_find_next(uint16_t port_id, struct rte_pci_device *pci_dev); +@@ -1075,6 +1059,8 @@ int mlx5_dev_configure_rss_reta(struct rte_eth_dev *dev); + + /* mlx5_ethdev_os.c */ + ++int mlx5_get_ifname(const struct rte_eth_dev *dev, ++ char (*ifname)[MLX5_NAMESIZE]); + unsigned int mlx5_ifindex(const struct rte_eth_dev *dev); + int mlx5_get_mac(struct rte_eth_dev *dev, uint8_t (*mac)[RTE_ETHER_ADDR_LEN]); + int mlx5_get_mtu(struct rte_eth_dev *dev, uint16_t *mtu); +diff --git a/dpdk/drivers/net/mlx5/mlx5_devx.c b/dpdk/drivers/net/mlx5/mlx5_devx.c +index de9b204075..9970a58156 100644 +--- a/dpdk/drivers/net/mlx5/mlx5_devx.c ++++ b/dpdk/drivers/net/mlx5/mlx5_devx.c +@@ -23,7 +23,7 @@ + #include "mlx5_utils.h" + #include "mlx5_devx.h" + #include "mlx5_flow.h" +- ++#include "mlx5_flow_os.h" + + /** + * Modify RQ vlan stripping offload +@@ -486,8 +486,6 @@ mlx5_rxq_create_devx_cq_resources(struct rte_eth_dev *dev, uint16_t idx) + "Port %u Rx CQE compression is disabled for LRO.", + dev->data->port_id); + } +- if (priv->config.cqe_pad) +- cq_attr.cqe_size = MLX5_CQE_SIZE_128B; + log_cqe_n = log2above(cqe_n); + cq_size = sizeof(struct mlx5_cqe) * (1 << log_cqe_n); + buf = rte_calloc_socket(__func__, 1, cq_size, page_size, +@@ -942,9 +940,8 @@ mlx5_devx_hrxq_new(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq, + goto error; + } + #ifdef HAVE_IBV_FLOW_DV_SUPPORT +- hrxq->action = mlx5_glue->dv_create_flow_action_dest_devx_tir +- (hrxq->tir->obj); +- if (!hrxq->action) { ++ if (mlx5_flow_os_create_flow_action_dest_devx_tir(hrxq->tir, ++ &hrxq->action)) { + rte_errno = errno; + goto error; + } +@@ -1263,8 +1260,6 @@ mlx5_txq_create_devx_cq_resources(struct rte_eth_dev *dev, uint16_t idx) + DRV_LOG(ERR, "Failed to allocate CQ door-bell."); + goto error; + } +- cq_attr.cqe_size = (sizeof(struct mlx5_cqe) == 128) ? +- MLX5_CQE_SIZE_128B : MLX5_CQE_SIZE_64B; + cq_attr.uar_page_id = mlx5_os_get_devx_uar_page_id(priv->sh->tx_uar); + cq_attr.eqn = priv->sh->eqn; + cq_attr.q_umem_valid = 1; +@@ -1304,12 +1299,15 @@ mlx5_txq_create_devx_cq_resources(struct rte_eth_dev *dev, uint16_t idx) + * Pointer to Ethernet device. + * @param idx + * Queue index in DPDK Tx queue array. ++ * @param[in] log_desc_n ++ * Log of number of descriptors in queue. + * + * @return + * Number of WQEs in SQ, 0 otherwise and rte_errno is set. + */ + static uint32_t +-mlx5_txq_create_devx_sq_resources(struct rte_eth_dev *dev, uint16_t idx) ++mlx5_txq_create_devx_sq_resources(struct rte_eth_dev *dev, uint16_t idx, ++ uint16_t log_desc_n) + { + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_txq_data *txq_data = (*priv->txqs)[idx]; +@@ -1329,7 +1327,7 @@ mlx5_txq_create_devx_sq_resources(struct rte_eth_dev *dev, uint16_t idx) + rte_errno = ENOMEM; + return 0; + } +- wqe_n = RTE_MIN(1UL << txq_data->elts_n, ++ wqe_n = RTE_MIN(1UL << log_desc_n, + (uint32_t)priv->sh->device_attr.max_qp_wr); + txq_obj->sq_buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, + wqe_n * sizeof(struct mlx5_wqe), +@@ -1431,8 +1429,8 @@ mlx5_txq_devx_obj_new(struct rte_eth_dev *dev, uint16_t idx) + struct mlx5_dev_ctx_shared *sh = priv->sh; + struct mlx5_txq_obj *txq_obj = txq_ctrl->obj; + void *reg_addr; +- uint32_t cqe_n; +- uint32_t wqe_n; ++ uint32_t cqe_n, log_desc_n; ++ uint32_t wqe_n, wqe_size; + int ret = 0; + + MLX5_ASSERT(txq_data); +@@ -1453,8 +1451,29 @@ mlx5_txq_devx_obj_new(struct rte_eth_dev *dev, uint16_t idx) + txq_data->cq_db = (volatile uint32_t *)(txq_obj->cq_dbrec_page->dbrs + + txq_obj->cq_dbrec_offset); + *txq_data->cq_db = 0; ++ /* ++ * Adjust the amount of WQEs depending on inline settings. ++ * The number of descriptors should be enough to handle ++ * the specified number of packets. If queue is being created ++ * with Verbs the rdma-core does queue size adjustment ++ * internally in the mlx5_calc_sq_size(), we do the same ++ * for the queue being created with DevX at this point. ++ */ ++ wqe_size = txq_data->tso_en ? ++ RTE_ALIGN(txq_ctrl->max_tso_header, MLX5_WSEG_SIZE) : 0; ++ wqe_size += sizeof(struct mlx5_wqe_cseg) + ++ sizeof(struct mlx5_wqe_eseg) + ++ sizeof(struct mlx5_wqe_dseg); ++ if (txq_data->inlen_send) ++ wqe_size = RTE_MAX(wqe_size, sizeof(struct mlx5_wqe_cseg) + ++ sizeof(struct mlx5_wqe_eseg) + ++ RTE_ALIGN(txq_data->inlen_send + ++ sizeof(uint32_t), ++ MLX5_WSEG_SIZE)); ++ wqe_size = RTE_ALIGN(wqe_size, MLX5_WQE_SIZE) / MLX5_WQE_SIZE; + /* Create Send Queue object with DevX. */ +- wqe_n = mlx5_txq_create_devx_sq_resources(dev, idx); ++ log_desc_n = log2above((1UL << txq_data->elts_n) * wqe_size); ++ wqe_n = mlx5_txq_create_devx_sq_resources(dev, idx, log_desc_n); + if (!wqe_n) { + rte_errno = errno; + goto error; +diff --git a/dpdk/drivers/net/mlx5/mlx5_ethdev.c b/dpdk/drivers/net/mlx5/mlx5_ethdev.c +index a3910cf922..45ee7e4488 100644 +--- a/dpdk/drivers/net/mlx5/mlx5_ethdev.c ++++ b/dpdk/drivers/net/mlx5/mlx5_ethdev.c +@@ -310,8 +310,8 @@ mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info) + info->max_mac_addrs = MLX5_MAX_UC_MAC_ADDRESSES; + info->rx_queue_offload_capa = mlx5_get_rx_queue_offloads(dev); + info->rx_seg_capa.max_nseg = MLX5_MAX_RXQ_NSEG; +- info->rx_seg_capa.multi_pools = 1; +- info->rx_seg_capa.offset_allowed = 1; ++ info->rx_seg_capa.multi_pools = !config->mprq.enabled; ++ info->rx_seg_capa.offset_allowed = !config->mprq.enabled; + info->rx_seg_capa.offset_align_log2 = 0; + info->rx_offload_capa = (mlx5_get_rx_port_offloads() | + info->rx_queue_offload_capa); +diff --git a/dpdk/drivers/net/mlx5/mlx5_flow.c b/dpdk/drivers/net/mlx5/mlx5_flow.c +index 52ade39a42..cda3ca557c 100644 +--- a/dpdk/drivers/net/mlx5/mlx5_flow.c ++++ b/dpdk/drivers/net/mlx5/mlx5_flow.c +@@ -212,6 +212,8 @@ mlx5_flow_expand_rss_item_complete(const struct rte_flow_item *item) + return ret; + } + ++#define MLX5_RSS_EXP_ELT_N 8 ++ + /** + * Expand RSS flows into several possible flows according to the RSS hash + * fields requested and the driver capabilities. +@@ -242,13 +244,12 @@ mlx5_flow_expand_rss(struct mlx5_flow_expand_rss *buf, size_t size, + const struct mlx5_flow_expand_node graph[], + int graph_root_index) + { +- const int elt_n = 8; + const struct rte_flow_item *item; + const struct mlx5_flow_expand_node *node = &graph[graph_root_index]; + const int *next_node; +- const int *stack[elt_n]; ++ const int *stack[MLX5_RSS_EXP_ELT_N]; + int stack_pos = 0; +- struct rte_flow_item flow_items[elt_n]; ++ struct rte_flow_item flow_items[MLX5_RSS_EXP_ELT_N]; + unsigned int i; + size_t lsize; + size_t user_pattern_size = 0; +@@ -261,10 +262,10 @@ mlx5_flow_expand_rss(struct mlx5_flow_expand_rss *buf, size_t size, + + memset(&missed_item, 0, sizeof(missed_item)); + lsize = offsetof(struct mlx5_flow_expand_rss, entry) + +- elt_n * sizeof(buf->entry[0]); ++ MLX5_RSS_EXP_ELT_N * sizeof(buf->entry[0]); + if (lsize <= size) { + buf->entry[0].priority = 0; +- buf->entry[0].pattern = (void *)&buf->entry[elt_n]; ++ buf->entry[0].pattern = (void *)&buf->entry[MLX5_RSS_EXP_ELT_N]; + buf->entries = 0; + addr = buf->entry[0].pattern; + } +@@ -367,7 +368,7 @@ mlx5_flow_expand_rss(struct mlx5_flow_expand_rss *buf, size_t size, + /* Go deeper. */ + if (node->next) { + next_node = node->next; +- if (stack_pos++ == elt_n) { ++ if (stack_pos++ == MLX5_RSS_EXP_ELT_N) { + rte_errno = E2BIG; + return -rte_errno; + } +@@ -797,7 +798,7 @@ mlx5_flow_get_reg_id(struct rte_eth_dev *dev, + start_reg = priv->mtr_color_reg != REG_C_2 ? REG_C_2 : + (priv->mtr_reg_share ? REG_C_3 : REG_C_4); + skip_mtr_reg = !!(priv->mtr_en && start_reg == REG_C_2); +- if (id > (REG_C_7 - start_reg)) ++ if (id > (uint32_t)(REG_C_7 - start_reg)) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + NULL, "invalid tag id"); +@@ -813,7 +814,7 @@ mlx5_flow_get_reg_id(struct rte_eth_dev *dev, + */ + if (skip_mtr_reg && config->flow_mreg_c + [id + start_reg - REG_C_0] >= priv->mtr_color_reg) { +- if (id >= (REG_C_7 - start_reg)) ++ if (id >= (uint32_t)(REG_C_7 - start_reg)) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + NULL, "invalid tag id"); +@@ -1001,17 +1002,29 @@ flow_drv_rxq_flags_set(struct rte_eth_dev *dev, + struct mlx5_priv *priv = dev->data->dev_private; + const int mark = dev_handle->mark; + const int tunnel = !!(dev_handle->layers & MLX5_FLOW_LAYER_TUNNEL); +- struct mlx5_hrxq *hrxq; ++ struct mlx5_ind_table_obj *ind_tbl = NULL; + unsigned int i; + +- if (dev_handle->fate_action != MLX5_FLOW_FATE_QUEUE) +- return; +- hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], ++ if (dev_handle->fate_action == MLX5_FLOW_FATE_QUEUE) { ++ struct mlx5_hrxq *hrxq; ++ ++ hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], + dev_handle->rix_hrxq); +- if (!hrxq) ++ if (hrxq) ++ ind_tbl = hrxq->ind_table; ++ } else if (dev_handle->fate_action == MLX5_FLOW_FATE_SHARED_RSS) { ++ struct mlx5_shared_action_rss *shared_rss; ++ ++ shared_rss = mlx5_ipool_get ++ (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], ++ dev_handle->rix_srss); ++ if (shared_rss) ++ ind_tbl = shared_rss->ind_tbl; ++ } ++ if (!ind_tbl) + return; +- for (i = 0; i != hrxq->ind_table->queues_n; ++i) { +- int idx = hrxq->ind_table->queues[i]; ++ for (i = 0; i != ind_tbl->queues_n; ++i) { ++ int idx = ind_tbl->queues[i]; + struct mlx5_rxq_ctrl *rxq_ctrl = + container_of((*priv->rxqs)[idx], + struct mlx5_rxq_ctrl, rxq); +@@ -1083,18 +1096,30 @@ flow_drv_rxq_flags_trim(struct rte_eth_dev *dev, + struct mlx5_priv *priv = dev->data->dev_private; + const int mark = dev_handle->mark; + const int tunnel = !!(dev_handle->layers & MLX5_FLOW_LAYER_TUNNEL); +- struct mlx5_hrxq *hrxq; ++ struct mlx5_ind_table_obj *ind_tbl = NULL; + unsigned int i; + +- if (dev_handle->fate_action != MLX5_FLOW_FATE_QUEUE) +- return; +- hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], ++ if (dev_handle->fate_action == MLX5_FLOW_FATE_QUEUE) { ++ struct mlx5_hrxq *hrxq; ++ ++ hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], + dev_handle->rix_hrxq); +- if (!hrxq) ++ if (hrxq) ++ ind_tbl = hrxq->ind_table; ++ } else if (dev_handle->fate_action == MLX5_FLOW_FATE_SHARED_RSS) { ++ struct mlx5_shared_action_rss *shared_rss; ++ ++ shared_rss = mlx5_ipool_get ++ (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], ++ dev_handle->rix_srss); ++ if (shared_rss) ++ ind_tbl = shared_rss->ind_tbl; ++ } ++ if (!ind_tbl) + return; + MLX5_ASSERT(dev->data->dev_started); +- for (i = 0; i != hrxq->ind_table->queues_n; ++i) { +- int idx = hrxq->ind_table->queues[i]; ++ for (i = 0; i != ind_tbl->queues_n; ++i) { ++ int idx = ind_tbl->queues[i]; + struct mlx5_rxq_ctrl *rxq_ctrl = + container_of((*priv->rxqs)[idx], + struct mlx5_rxq_ctrl, rxq); +@@ -3523,7 +3548,7 @@ flow_check_hairpin_split(struct rte_eth_dev *dev, + if (queue == NULL) + return 0; + conf = mlx5_rxq_get_hairpin_conf(dev, queue->index); +- if (conf != NULL && !!conf->tx_explicit) ++ if (conf == NULL || conf->tx_explicit != 0) + return 0; + queue_action = 1; + action_n++; +@@ -3533,7 +3558,7 @@ flow_check_hairpin_split(struct rte_eth_dev *dev, + if (rss == NULL || rss->queue_num == 0) + return 0; + conf = mlx5_rxq_get_hairpin_conf(dev, rss->queue[0]); +- if (conf != NULL && !!conf->tx_explicit) ++ if (conf == NULL || conf->tx_explicit != 0) + return 0; + queue_action = 1; + action_n++; +@@ -5243,7 +5268,7 @@ flow_list_create(struct rte_eth_dev *dev, uint32_t *list, + struct mlx5_priv *priv = dev->data->dev_private; + struct rte_flow *flow = NULL; + struct mlx5_flow *dev_flow; +- const struct rte_flow_action_rss *rss; ++ const struct rte_flow_action_rss *rss = NULL; + struct mlx5_translated_shared_action + shared_actions[MLX5_MAX_SHARED_ACTIONS]; + int shared_actions_n = MLX5_MAX_SHARED_ACTIONS; +@@ -5321,7 +5346,9 @@ flow_list_create(struct rte_eth_dev *dev, uint32_t *list, + MLX5_ASSERT(flow->drv_type > MLX5_FLOW_TYPE_MIN && + flow->drv_type < MLX5_FLOW_TYPE_MAX); + memset(rss_desc, 0, offsetof(struct mlx5_flow_rss_desc, queue)); +- rss = flow_get_rss_action(p_actions_rx); ++ /* RSS Action only works on NIC RX domain */ ++ if (attr->ingress && !attr->transfer) ++ rss = flow_get_rss_action(p_actions_rx); + if (rss) { + if (flow_rss_workspace_adjust(wks, rss_desc, rss->queue_num)) + return 0; +@@ -6124,9 +6151,9 @@ mlx5_flow_isolate(struct rte_eth_dev *dev, + } + priv->isolated = !!enable; + if (enable) +- dev->dev_ops = &mlx5_os_dev_ops_isolate; ++ dev->dev_ops = &mlx5_dev_ops_isolate; + else +- dev->dev_ops = &mlx5_os_dev_ops; ++ dev->dev_ops = &mlx5_dev_ops; + + dev->rx_descriptor_status = mlx5_rx_descriptor_status; + dev->tx_descriptor_status = mlx5_tx_descriptor_status; +@@ -7150,12 +7177,12 @@ mlx5_shared_action_flush(struct rte_eth_dev *dev) + { + struct rte_flow_error error; + struct mlx5_priv *priv = dev->data->dev_private; +- struct mlx5_shared_action_rss *action; ++ struct mlx5_shared_action_rss *shared_rss; + int ret = 0; + uint32_t idx; + + ILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], +- priv->rss_shared_actions, idx, action, next) { ++ priv->rss_shared_actions, idx, shared_rss, next) { + ret |= mlx5_shared_action_destroy(dev, + (struct rte_flow_shared_action *)(uintptr_t)idx, &error); + } +diff --git a/dpdk/drivers/net/mlx5/mlx5_flow.h b/dpdk/drivers/net/mlx5/mlx5_flow.h +index a249c292e3..91f48923c0 100644 +--- a/dpdk/drivers/net/mlx5/mlx5_flow.h ++++ b/dpdk/drivers/net/mlx5/mlx5_flow.h +@@ -552,7 +552,6 @@ struct mlx5_flow_sub_actions_list { + struct mlx5_flow_sub_actions_idx { + uint32_t rix_hrxq; /**< Hash Rx queue object index. */ + uint32_t rix_tag; /**< Index to the tag action. */ +- uint32_t cnt; + uint32_t rix_port_id_action; /**< Index to port ID action resource. */ + uint32_t rix_encap_decap; /**< Index to encap/decap resource. */ + }; +@@ -1049,17 +1048,17 @@ struct rte_flow { + #define MLX5_RSS_HASH_IPV4 (IBV_RX_HASH_SRC_IPV4 | IBV_RX_HASH_DST_IPV4) + #define MLX5_RSS_HASH_IPV4_TCP \ + (MLX5_RSS_HASH_IPV4 | \ +- IBV_RX_HASH_SRC_PORT_TCP | IBV_RX_HASH_SRC_PORT_TCP) ++ IBV_RX_HASH_SRC_PORT_TCP | IBV_RX_HASH_DST_PORT_TCP) + #define MLX5_RSS_HASH_IPV4_UDP \ + (MLX5_RSS_HASH_IPV4 | \ +- IBV_RX_HASH_SRC_PORT_UDP | IBV_RX_HASH_SRC_PORT_UDP) ++ IBV_RX_HASH_SRC_PORT_UDP | IBV_RX_HASH_DST_PORT_UDP) + #define MLX5_RSS_HASH_IPV6 (IBV_RX_HASH_SRC_IPV6 | IBV_RX_HASH_DST_IPV6) + #define MLX5_RSS_HASH_IPV6_TCP \ + (MLX5_RSS_HASH_IPV6 | \ +- IBV_RX_HASH_SRC_PORT_TCP | IBV_RX_HASH_SRC_PORT_TCP) ++ IBV_RX_HASH_SRC_PORT_TCP | IBV_RX_HASH_DST_PORT_TCP) + #define MLX5_RSS_HASH_IPV6_UDP \ + (MLX5_RSS_HASH_IPV6 | \ +- IBV_RX_HASH_SRC_PORT_UDP | IBV_RX_HASH_SRC_PORT_UDP) ++ IBV_RX_HASH_SRC_PORT_UDP | IBV_RX_HASH_DST_PORT_UDP) + #define MLX5_RSS_HASH_NONE 0ULL + + /* array of valid combinations of RX Hash fields for RSS */ +diff --git a/dpdk/drivers/net/mlx5/mlx5_flow_age.c b/dpdk/drivers/net/mlx5/mlx5_flow_age.c +index cea2cf769d..0ea61be4eb 100644 +--- a/dpdk/drivers/net/mlx5/mlx5_flow_age.c ++++ b/dpdk/drivers/net/mlx5/mlx5_flow_age.c +@@ -278,7 +278,8 @@ mlx5_aso_sq_create(void *ctx, struct mlx5_aso_sq *sq, int socket, + sizeof(*sq->db_rec) * 2, 4096, socket); + if (!sq->umem_buf) { + DRV_LOG(ERR, "Can't allocate wqe buffer."); +- return -ENOMEM; ++ rte_errno = ENOMEM; ++ goto error; + } + sq->wqe_umem = mlx5_glue->devx_umem_reg(ctx, + (void *)(uintptr_t)sq->umem_buf, +diff --git a/dpdk/drivers/net/mlx5/mlx5_flow_dv.c b/dpdk/drivers/net/mlx5/mlx5_flow_dv.c +index aa21ff9613..3fdc3ffe16 100644 +--- a/dpdk/drivers/net/mlx5/mlx5_flow_dv.c ++++ b/dpdk/drivers/net/mlx5/mlx5_flow_dv.c +@@ -955,7 +955,7 @@ flow_dv_convert_action_set_reg + RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "too many items to modify"); + MLX5_ASSERT(conf->id != REG_NON); +- MLX5_ASSERT(conf->id < RTE_DIM(reg_to_field)); ++ MLX5_ASSERT(conf->id < (enum modify_reg)RTE_DIM(reg_to_field)); + actions[i] = (struct mlx5_modification_cmd) { + .action_type = MLX5_MODIFICATION_TYPE_SET, + .field = reg_to_field[conf->id], +@@ -2375,6 +2375,11 @@ flow_dv_validate_action_mark(struct rte_eth_dev *dev, + const struct rte_flow_action_mark *mark = action->conf; + int ret; + ++ if (is_tunnel_offload_active(dev)) ++ return rte_flow_error_set(error, ENOTSUP, ++ RTE_FLOW_ERROR_TYPE_ACTION, NULL, ++ "no mark action " ++ "if tunnel offload active"); + /* Fall back if no extended metadata register support. */ + if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY) + return mlx5_flow_validate_action_mark(action, action_flags, +@@ -2537,6 +2542,10 @@ flow_dv_validate_action_set_tag(struct rte_eth_dev *dev, + * + * @param[in] dev + * Pointer to rte_eth_dev structure. ++ * @param[in] action ++ * Pointer to the action structure. ++ * @param[in] action_flags ++ * Holds the actions detected until now. + * @param[out] error + * Pointer to error structure. + * +@@ -2545,12 +2554,25 @@ flow_dv_validate_action_set_tag(struct rte_eth_dev *dev, + */ + static int + flow_dv_validate_action_count(struct rte_eth_dev *dev, ++ const struct rte_flow_action *action, ++ uint64_t action_flags, + struct rte_flow_error *error) + { + struct mlx5_priv *priv = dev->data->dev_private; ++ const struct rte_flow_action_count *count; + + if (!priv->config.devx) + goto notsup_err; ++ if (action_flags & MLX5_FLOW_ACTION_COUNT) ++ return rte_flow_error_set(error, EINVAL, ++ RTE_FLOW_ERROR_TYPE_ACTION, NULL, ++ "duplicate count actions set"); ++ count = (const struct rte_flow_action_count *)action->conf; ++ if (count && count->shared && (action_flags & MLX5_FLOW_ACTION_AGE) && ++ !priv->sh->flow_hit_aso_en) ++ return rte_flow_error_set(error, EINVAL, ++ RTE_FLOW_ERROR_TYPE_ACTION, NULL, ++ "old age and shared count combination is not supported"); + #ifdef HAVE_IBV_FLOW_DEVX_COUNTERS + return 0; + #endif +@@ -2612,6 +2634,10 @@ flow_dv_validate_action_l2_encap(struct rte_eth_dev *dev, + * Pointer to the rte_eth_dev structure. + * @param[in] action_flags + * Holds the actions detected until now. ++ * @param[in] action ++ * Pointer to the action structure. ++ * @param[in] item_flags ++ * Holds the items detected. + * @param[in] attr + * Pointer to flow attributes + * @param[out] error +@@ -2623,6 +2649,8 @@ flow_dv_validate_action_l2_encap(struct rte_eth_dev *dev, + static int + flow_dv_validate_action_decap(struct rte_eth_dev *dev, + uint64_t action_flags, ++ const struct rte_flow_action *action, ++ const uint64_t item_flags, + const struct rte_flow_attr *attr, + struct rte_flow_error *error) + { +@@ -2656,6 +2684,11 @@ flow_dv_validate_action_decap(struct rte_eth_dev *dev, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "decap action for VF representor " + "not supported on NIC table"); ++ if (action->type == RTE_FLOW_ACTION_TYPE_VXLAN_DECAP && ++ !(item_flags & MLX5_FLOW_LAYER_VXLAN)) ++ return rte_flow_error_set(error, ENOTSUP, ++ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, ++ "VXLAN item should be present for VXLAN decap"); + return 0; + } + +@@ -2676,6 +2709,10 @@ const struct rte_flow_action_raw_decap empty_decap = {.data = NULL, .size = 0,}; + * Holds the actions detected until now. + * @param[out] actions_n + * pointer to the number of actions counter. ++ * @param[in] action ++ * Pointer to the action structure. ++ * @param[in] item_flags ++ * Holds the items detected. + * @param[out] error + * Pointer to error structure. + * +@@ -2688,7 +2725,8 @@ flow_dv_validate_action_raw_encap_decap + const struct rte_flow_action_raw_decap *decap, + const struct rte_flow_action_raw_encap *encap, + const struct rte_flow_attr *attr, uint64_t *action_flags, +- int *actions_n, struct rte_flow_error *error) ++ int *actions_n, const struct rte_flow_action *action, ++ uint64_t item_flags, struct rte_flow_error *error) + { + const struct mlx5_priv *priv = dev->data->dev_private; + int ret; +@@ -2723,8 +2761,8 @@ flow_dv_validate_action_raw_encap_decap + "encap combination"); + } + if (decap) { +- ret = flow_dv_validate_action_decap(dev, *action_flags, attr, +- error); ++ ret = flow_dv_validate_action_decap(dev, *action_flags, action, ++ item_flags, attr, error); + if (ret < 0) + return ret; + *action_flags |= MLX5_FLOW_ACTION_DECAP; +@@ -4321,7 +4359,7 @@ flow_dv_modify_create_cb(struct mlx5_hlist *list, uint64_t key __rte_unused, + /** + * Validate the sample action. + * +- * @param[in] action_flags ++ * @param[in, out] action_flags + * Holds the actions detected until now. + * @param[in] action + * Pointer to the sample action. +@@ -4329,6 +4367,10 @@ flow_dv_modify_create_cb(struct mlx5_hlist *list, uint64_t key __rte_unused, + * Pointer to the Ethernet device structure. + * @param[in] attr + * Attributes of flow that includes this action. ++ * @param[in] item_flags ++ * Holds the items detected. ++ * @param[out] count ++ * Pointer to the COUNT action in sample action list. + * @param[out] error + * Pointer to error structure. + * +@@ -4336,10 +4378,12 @@ flow_dv_modify_create_cb(struct mlx5_hlist *list, uint64_t key __rte_unused, + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ + static int +-flow_dv_validate_action_sample(uint64_t action_flags, ++flow_dv_validate_action_sample(uint64_t *action_flags, + const struct rte_flow_action *action, + struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, ++ const uint64_t item_flags, ++ const struct rte_flow_action_count **count, + struct rte_flow_error *error) + { + struct mlx5_priv *priv = dev->data->dev_private; +@@ -4365,17 +4409,17 @@ flow_dv_validate_action_sample(uint64_t action_flags, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "sample action not supported"); +- if (action_flags & MLX5_FLOW_ACTION_SAMPLE) ++ if (*action_flags & MLX5_FLOW_ACTION_SAMPLE) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "Multiple sample actions not " + "supported"); +- if (action_flags & MLX5_FLOW_ACTION_METER) ++ if (*action_flags & MLX5_FLOW_ACTION_METER) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, action, + "wrong action order, meter should " + "be after sample action"); +- if (action_flags & MLX5_FLOW_ACTION_JUMP) ++ if (*action_flags & MLX5_FLOW_ACTION_JUMP) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, action, + "wrong action order, jump should " +@@ -4413,10 +4457,15 @@ flow_dv_validate_action_sample(uint64_t action_flags, + ++actions_n; + break; + case RTE_FLOW_ACTION_TYPE_COUNT: +- ret = flow_dv_validate_action_count(dev, error); ++ ret = flow_dv_validate_action_count ++ (dev, act, ++ *action_flags | sub_action_flags, ++ error); + if (ret < 0) + return ret; ++ *count = act->conf; + sub_action_flags |= MLX5_FLOW_ACTION_COUNT; ++ *action_flags |= MLX5_FLOW_ACTION_COUNT; + ++actions_n; + break; + case RTE_FLOW_ACTION_TYPE_PORT_ID: +@@ -4433,7 +4482,7 @@ flow_dv_validate_action_sample(uint64_t action_flags, + case RTE_FLOW_ACTION_TYPE_RAW_ENCAP: + ret = flow_dv_validate_action_raw_encap_decap + (dev, NULL, act->conf, attr, &sub_action_flags, +- &actions_n, error); ++ &actions_n, action, item_flags, error); + if (ret < 0) + return ret; + ++actions_n; +@@ -5224,6 +5273,8 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, + const struct rte_flow_action_raw_decap *decap; + const struct rte_flow_action_raw_encap *encap; + const struct rte_flow_action_rss *rss; ++ const struct rte_flow_action_count *count = NULL; ++ const struct rte_flow_action_count *sample_count = NULL; + const struct rte_flow_item_tcp nic_tcp_mask = { + .hdr = { + .tcp_flags = 0xFF, +@@ -5282,6 +5333,11 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, + } else { + tunnel = NULL; + } ++ if (tunnel && priv->representor) ++ return rte_flow_error_set(error, ENOTSUP, ++ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, ++ "decap not supported " ++ "for VF representor"); + grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate + (dev, tunnel, attr, items, actions); + ret = flow_dv_validate_attributes(dev, tunnel, attr, &grp_info, error); +@@ -5702,9 +5758,12 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, + ++actions_n; + break; + case RTE_FLOW_ACTION_TYPE_COUNT: +- ret = flow_dv_validate_action_count(dev, error); ++ ret = flow_dv_validate_action_count(dev, actions, ++ action_flags, ++ error); + if (ret < 0) + return ret; ++ count = actions->conf; + action_flags |= MLX5_FLOW_ACTION_COUNT; + ++actions_n; + break; +@@ -5761,6 +5820,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, + case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP: + case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP: + ret = flow_dv_validate_action_decap(dev, action_flags, ++ actions, item_flags, + attr, error); + if (ret < 0) + return ret; +@@ -5770,7 +5830,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, + case RTE_FLOW_ACTION_TYPE_RAW_ENCAP: + ret = flow_dv_validate_action_raw_encap_decap + (dev, NULL, actions->conf, attr, &action_flags, +- &actions_n, error); ++ &actions_n, actions, item_flags, error); + if (ret < 0) + return ret; + break; +@@ -5788,7 +5848,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, + (dev, + decap ? decap : &empty_decap, encap, + attr, &action_flags, &actions_n, +- error); ++ actions, item_flags, error); + if (ret < 0) + return ret; + break; +@@ -5955,7 +6015,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, + rw_act_num += MLX5_ACT_NUM_SET_TAG; + break; + case MLX5_RTE_FLOW_ACTION_TYPE_AGE: +- if (!attr->group) ++ if (!attr->transfer && !attr->group) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, +@@ -5969,6 +6029,24 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, + error); + if (ret < 0) + return ret; ++ /* ++ * Validate the regular AGE action (using counter) ++ * mutual exclusion with share counter actions. ++ */ ++ if (!priv->sh->flow_hit_aso_en) { ++ if (count && count->shared) ++ return rte_flow_error_set ++ (error, EINVAL, ++ RTE_FLOW_ERROR_TYPE_ACTION, ++ NULL, ++ "old age and shared count combination is not supported"); ++ if (sample_count) ++ return rte_flow_error_set ++ (error, EINVAL, ++ RTE_FLOW_ERROR_TYPE_ACTION, ++ NULL, ++ "old age action and count must be in the same sub flow"); ++ } + action_flags |= MLX5_FLOW_ACTION_AGE; + ++actions_n; + break; +@@ -6001,9 +6079,11 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, + rw_act_num += MLX5_ACT_NUM_SET_DSCP; + break; + case RTE_FLOW_ACTION_TYPE_SAMPLE: +- ret = flow_dv_validate_action_sample(action_flags, ++ ret = flow_dv_validate_action_sample(&action_flags, + actions, dev, +- attr, error); ++ attr, item_flags, ++ &sample_count, ++ error); + if (ret < 0) + return ret; + action_flags |= MLX5_FLOW_ACTION_SAMPLE; +@@ -6079,8 +6159,11 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, + * Validate the drop action mutual exclusion with other actions. + * Drop action is mutually-exclusive with any other action, except for + * Count action. ++ * Drop action compatibility with tunnel offload was already validated. + */ +- if ((action_flags & MLX5_FLOW_ACTION_DROP) && ++ if (action_flags & (MLX5_FLOW_ACTION_TUNNEL_MATCH | ++ MLX5_FLOW_ACTION_TUNNEL_MATCH)); ++ else if ((action_flags & MLX5_FLOW_ACTION_DROP) && + (action_flags & ~(MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_COUNT))) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, NULL, +@@ -6232,8 +6315,9 @@ flow_dv_prepare(struct rte_eth_dev *dev, + "not enough memory to create flow handle"); + return NULL; + } +- MLX5_ASSERT(wks->flow_idx + 1 < RTE_DIM(wks->flows)); ++ MLX5_ASSERT(wks->flow_idx < RTE_DIM(wks->flows)); + dev_flow = &wks->flows[wks->flow_idx++]; ++ memset(dev_flow, 0, sizeof(*dev_flow)); + dev_flow->handle = dev_handle; + dev_flow->handle_idx = handle_idx; + /* +@@ -6245,12 +6329,6 @@ flow_dv_prepare(struct rte_eth_dev *dev, + */ + dev_flow->dv.value.size = MLX5_ST_SZ_BYTES(fte_match_param) - + MLX5_ST_SZ_BYTES(fte_match_set_misc4); +- /* +- * The matching value needs to be cleared to 0 before using. In the +- * past, it will be automatically cleared when using rte_*alloc +- * API. The time consumption will be almost the same as before. +- */ +- memset(dev_flow->dv.value.buf, 0, MLX5_ST_SZ_BYTES(fte_match_param)); + dev_flow->ingress = attr->ingress; + dev_flow->dv.transfer = attr->transfer; + return dev_flow; +@@ -7659,11 +7737,15 @@ flow_dv_translate_item_port_id(struct rte_eth_dev *dev, void *matcher, + priv->pf_bond < 0 && attr->transfer) + flow_dv_translate_item_source_vport + (matcher, key, priv->vport_id, mask); +- else +- flow_dv_translate_item_meta_vport +- (matcher, key, +- priv->vport_meta_tag, +- priv->vport_meta_mask); ++ /* ++ * We should always set the vport metadata register, ++ * otherwise the SW steering library can drop ++ * the rule if wire vport metadata value is not zero, ++ * it depends on kernel configuration. ++ */ ++ flow_dv_translate_item_meta_vport(matcher, key, ++ priv->vport_meta_tag, ++ priv->vport_meta_mask); + } else { + flow_dv_translate_item_source_vport(matcher, key, + priv->vport_id, mask); +@@ -8656,10 +8738,6 @@ flow_dv_sample_sub_actions_release(struct rte_eth_dev *dev, + flow_dv_tag_release(dev, act_res->rix_tag); + act_res->rix_tag = 0; + } +- if (act_res->cnt) { +- flow_dv_counter_free(dev, act_res->cnt); +- act_res->cnt = 0; +- } + } + + int +@@ -9038,6 +9116,7 @@ flow_dv_translate_action_sample(struct rte_eth_dev *dev, + struct mlx5_flow_sub_actions_list *sample_act; + struct mlx5_flow_sub_actions_idx *sample_idx; + struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace(); ++ struct rte_flow *flow = dev_flow->flow; + struct mlx5_flow_rss_desc *rss_desc; + uint64_t action_flags = 0; + +@@ -9109,21 +9188,22 @@ flow_dv_translate_action_sample(struct rte_eth_dev *dev, + } + case RTE_FLOW_ACTION_TYPE_COUNT: + { +- uint32_t counter; +- +- counter = flow_dv_translate_create_counter(dev, +- dev_flow, sub_actions->conf, 0); +- if (!counter) +- return rte_flow_error_set ++ if (!flow->counter) { ++ flow->counter = ++ flow_dv_translate_create_counter(dev, ++ dev_flow, sub_actions->conf, ++ 0); ++ if (!flow->counter) ++ return rte_flow_error_set + (error, rte_errno, +- RTE_FLOW_ERROR_TYPE_ACTION, +- NULL, +- "cannot create counter" +- " object."); +- sample_idx->cnt = counter; ++ RTE_FLOW_ERROR_TYPE_ACTION, ++ NULL, ++ "cannot create counter" ++ " object."); ++ } + sample_act->dr_cnt_action = + (flow_dv_counter_get_by_idx(dev, +- counter, NULL))->action; ++ flow->counter, NULL))->action; + sample_actions[sample_act->actions_num++] = + sample_act->dr_cnt_action; + action_flags |= MLX5_FLOW_ACTION_COUNT; +@@ -9876,14 +9956,22 @@ flow_dv_translate(struct rte_eth_dev *dev, + break; + case RTE_FLOW_ACTION_TYPE_AGE: + if (priv->sh->flow_hit_aso_en && attr->group) { +- flow->age = flow_dv_translate_create_aso_age +- (dev, action->conf, error); +- if (!flow->age) +- return rte_flow_error_set ++ /* ++ * Create one shared age action, to be used ++ * by all sub-flows. ++ */ ++ if (!flow->age) { ++ flow->age = ++ flow_dv_translate_create_aso_age ++ (dev, action->conf, ++ error); ++ if (!flow->age) ++ return rte_flow_error_set + (error, rte_errno, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, + "can't create ASO age action"); ++ } + dev_flow->dv.actions[actions_n++] = + (flow_aso_age_get_by_idx + (dev, flow->age))->dr_action; +@@ -10214,17 +10302,22 @@ flow_dv_translate(struct rte_eth_dev *dev, + handle->dvh.modify_hdr->action; + } + if (action_flags & MLX5_FLOW_ACTION_COUNT) { +- flow->counter = +- flow_dv_translate_create_counter(dev, +- dev_flow, count, age); +- +- if (!flow->counter) +- return rte_flow_error_set ++ /* ++ * Create one count action, to be used ++ * by all sub-flows. ++ */ ++ if (!flow->counter) { ++ flow->counter = ++ flow_dv_translate_create_counter ++ (dev, dev_flow, count, ++ age); ++ if (!flow->counter) ++ return rte_flow_error_set + (error, rte_errno, +- RTE_FLOW_ERROR_TYPE_ACTION, +- NULL, +- "cannot create counter" +- " object."); ++ RTE_FLOW_ERROR_TYPE_ACTION, ++ NULL, "cannot create counter" ++ " object."); ++ } + dev_flow->dv.actions[actions_n] = + (flow_dv_counter_get_by_idx(dev, + flow->counter, NULL))->action; +@@ -10652,47 +10745,6 @@ __flow_dv_action_rss_hrxq_lookup(struct rte_eth_dev *dev, uint32_t idx, + } + } + +-/** +- * Retrieves hash RX queue suitable for the *flow*. +- * If shared action configured for *flow* suitable hash RX queue will be +- * retrieved from attached shared action. +- * +- * @param[in] dev +- * Pointer to the Ethernet device structure. +- * @param[in] dev_flow +- * Pointer to the sub flow. +- * @param[in] rss_desc +- * Pointer to the RSS descriptor. +- * @param[out] hrxq +- * Pointer to retrieved hash RX queue object. +- * +- * @return +- * Valid hash RX queue index, otherwise 0 and rte_errno is set. +- */ +-static uint32_t +-__flow_dv_rss_get_hrxq(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow, +- struct mlx5_flow_rss_desc *rss_desc, +- struct mlx5_hrxq **hrxq) +-{ +- struct mlx5_priv *priv = dev->data->dev_private; +- uint32_t hrxq_idx; +- +- if (rss_desc->shared_rss) { +- hrxq_idx = __flow_dv_action_rss_hrxq_lookup +- (dev, rss_desc->shared_rss, +- dev_flow->hash_fields, +- !!(dev_flow->handle->layers & +- MLX5_FLOW_LAYER_TUNNEL)); +- if (hrxq_idx) +- *hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], +- hrxq_idx); +- } else { +- *hrxq = flow_dv_hrxq_prepare(dev, dev_flow, rss_desc, +- &hrxq_idx); +- } +- return hrxq_idx; +-} +- + /** + * Apply the flow to the NIC, lock free, + * (mutex should be acquired by caller). +@@ -10724,11 +10776,6 @@ flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow, + struct mlx5_flow_rss_desc *rss_desc = &wks->rss_desc; + + MLX5_ASSERT(wks); +- if (rss_desc->shared_rss) { +- dh = wks->flows[wks->flow_idx - 1].handle; +- MLX5_ASSERT(dh->fate_action == MLX5_FLOW_FATE_SHARED_RSS); +- dh->rix_srss = rss_desc->shared_rss; +- } + for (idx = wks->flow_idx - 1; idx >= 0; idx--) { + dev_flow = &wks->flows[idx]; + dv = &dev_flow->dv; +@@ -10744,11 +10791,34 @@ flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow, + priv->drop_queue.hrxq->action; + } + } else if ((dh->fate_action == MLX5_FLOW_FATE_QUEUE && +- !dv_h->rix_sample && !dv_h->rix_dest_array) || +- (dh->fate_action == MLX5_FLOW_FATE_SHARED_RSS)) { ++ !dv_h->rix_sample && !dv_h->rix_dest_array)) { ++ struct mlx5_hrxq *hrxq; ++ uint32_t hrxq_idx; ++ ++ hrxq = flow_dv_hrxq_prepare(dev, dev_flow, rss_desc, ++ &hrxq_idx); ++ if (!hrxq) { ++ rte_flow_error_set ++ (error, rte_errno, ++ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, ++ "cannot get hash queue"); ++ goto error; ++ } ++ dh->rix_hrxq = hrxq_idx; ++ dv->actions[n++] = hrxq->action; ++ } else if (dh->fate_action == MLX5_FLOW_FATE_SHARED_RSS) { + struct mlx5_hrxq *hrxq = NULL; +- uint32_t hrxq_idx = __flow_dv_rss_get_hrxq +- (dev, dev_flow, rss_desc, &hrxq); ++ uint32_t hrxq_idx; ++ ++ hrxq_idx = __flow_dv_action_rss_hrxq_lookup(dev, ++ rss_desc->shared_rss, ++ dev_flow->hash_fields, ++ !!(dh->layers & ++ MLX5_FLOW_LAYER_TUNNEL)); ++ if (hrxq_idx) ++ hrxq = mlx5_ipool_get ++ (priv->sh->ipool[MLX5_IPOOL_HRXQ], ++ hrxq_idx); + if (!hrxq) { + rte_flow_error_set + (error, rte_errno, +@@ -10756,8 +10826,7 @@ flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow, + "cannot get hash queue"); + goto error; + } +- if (dh->fate_action == MLX5_FLOW_FATE_QUEUE) +- dh->rix_hrxq = hrxq_idx; ++ dh->rix_srss = rss_desc->shared_rss; + dv->actions[n++] = hrxq->action; + } else if (dh->fate_action == MLX5_FLOW_FATE_DEFAULT_MISS) { + if (!priv->sh->default_miss_action) { +@@ -10799,12 +10868,12 @@ flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow, + if (dh->fate_action == MLX5_FLOW_FATE_QUEUE && dh->rix_hrxq) { + mlx5_hrxq_release(dev, dh->rix_hrxq); + dh->rix_hrxq = 0; ++ } else if (dh->fate_action == MLX5_FLOW_FATE_SHARED_RSS) { ++ dh->rix_srss = 0; + } + if (dh->vf_vlan.tag && dh->vf_vlan.created) + mlx5_vlan_vmwa_release(dev, &dh->vf_vlan); + } +- if (rss_desc->shared_rss) +- wks->flows[wks->flow_idx - 1].handle->rix_srss = 0; + rte_errno = err; /* Restore rte_errno. */ + return -rte_errno; + } +@@ -11072,9 +11141,6 @@ flow_dv_fate_resource_release(struct rte_eth_dev *dev, + flow_dv_port_id_action_resource_release(dev, + handle->rix_port_id_action); + break; +- case MLX5_FLOW_FATE_SHARED_RSS: +- flow_dv_shared_rss_action_release(dev, handle->rix_srss); +- break; + default: + DRV_LOG(DEBUG, "Incorrect fate action:%d", handle->fate_action); + break; +@@ -11092,11 +11158,11 @@ flow_dv_sample_remove_cb(struct mlx5_cache_list *list __rte_unused, + struct mlx5_priv *priv = dev->data->dev_private; + + if (cache_resource->verbs_action) +- claim_zero(mlx5_glue->destroy_flow_action ++ claim_zero(mlx5_flow_os_destroy_flow_action + (cache_resource->verbs_action)); + if (cache_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) { + if (cache_resource->default_miss) +- claim_zero(mlx5_glue->destroy_flow_action ++ claim_zero(mlx5_flow_os_destroy_flow_action + (cache_resource->default_miss)); + } + if (cache_resource->normal_path_tbl) +@@ -11149,7 +11215,7 @@ flow_dv_dest_array_remove_cb(struct mlx5_cache_list *list __rte_unused, + + MLX5_ASSERT(cache_resource->action); + if (cache_resource->action) +- claim_zero(mlx5_glue->destroy_flow_action ++ claim_zero(mlx5_flow_os_destroy_flow_action + (cache_resource->action)); + for (; i < cache_resource->num_of_dest; i++) + flow_dv_sample_sub_actions_release(dev, +@@ -11237,6 +11303,7 @@ flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow) + { + struct mlx5_flow_handle *dev_handle; + struct mlx5_priv *priv = dev->data->dev_private; ++ uint32_t srss = 0; + + if (!flow) + return; +@@ -11281,10 +11348,15 @@ flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow) + if (dev_handle->dvh.rix_tag) + flow_dv_tag_release(dev, + dev_handle->dvh.rix_tag); +- flow_dv_fate_resource_release(dev, dev_handle); ++ if (dev_handle->fate_action != MLX5_FLOW_FATE_SHARED_RSS) ++ flow_dv_fate_resource_release(dev, dev_handle); ++ else if (!srss) ++ srss = dev_handle->rix_srss; + mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], + tmp_idx); + } ++ if (srss) ++ flow_dv_shared_rss_action_release(dev, srss); + } + + /** +@@ -11332,10 +11404,10 @@ __flow_dv_hrxqs_release(struct rte_eth_dev *dev, + */ + static int + __flow_dv_action_rss_hrxqs_release(struct rte_eth_dev *dev, +- struct mlx5_shared_action_rss *action) ++ struct mlx5_shared_action_rss *shared_rss) + { +- return __flow_dv_hrxqs_release(dev, &action->hrxq) + +- __flow_dv_hrxqs_release(dev, &action->hrxq_tunnel); ++ return __flow_dv_hrxqs_release(dev, &shared_rss->hrxq) + ++ __flow_dv_hrxqs_release(dev, &shared_rss->hrxq_tunnel); + } + + /** +@@ -11359,25 +11431,25 @@ __flow_dv_action_rss_hrxqs_release(struct rte_eth_dev *dev, + static int + __flow_dv_action_rss_setup(struct rte_eth_dev *dev, + uint32_t action_idx, +- struct mlx5_shared_action_rss *action, ++ struct mlx5_shared_action_rss *shared_rss, + struct rte_flow_error *error) + { + struct mlx5_flow_rss_desc rss_desc = { 0 }; + size_t i; + int err; + +- if (mlx5_ind_table_obj_setup(dev, action->ind_tbl)) { ++ if (mlx5_ind_table_obj_setup(dev, shared_rss->ind_tbl)) { + return rte_flow_error_set(error, rte_errno, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "cannot setup indirection table"); + } +- memcpy(rss_desc.key, action->origin.key, MLX5_RSS_HASH_KEY_LEN); ++ memcpy(rss_desc.key, shared_rss->origin.key, MLX5_RSS_HASH_KEY_LEN); + rss_desc.key_len = MLX5_RSS_HASH_KEY_LEN; +- rss_desc.const_q = action->origin.queue; +- rss_desc.queue_num = action->origin.queue_num; ++ rss_desc.const_q = shared_rss->origin.queue; ++ rss_desc.queue_num = shared_rss->origin.queue_num; + /* Set non-zero value to indicate a shared RSS. */ + rss_desc.shared_rss = action_idx; +- rss_desc.ind_tbl = action->ind_tbl; ++ rss_desc.ind_tbl = shared_rss->ind_tbl; + for (i = 0; i < MLX5_RSS_HASH_FIELDS_LEN; i++) { + uint32_t hrxq_idx; + uint64_t hash_fields = mlx5_rss_hash_fields[i]; +@@ -11395,16 +11467,16 @@ __flow_dv_action_rss_setup(struct rte_eth_dev *dev, + goto error_hrxq_new; + } + err = __flow_dv_action_rss_hrxq_set +- (action, hash_fields, tunnel, hrxq_idx); ++ (shared_rss, hash_fields, tunnel, hrxq_idx); + MLX5_ASSERT(!err); + } + } + return 0; + error_hrxq_new: + err = rte_errno; +- __flow_dv_action_rss_hrxqs_release(dev, action); +- if (!mlx5_ind_table_obj_release(dev, action->ind_tbl, true)) +- action->ind_tbl = NULL; ++ __flow_dv_action_rss_hrxqs_release(dev, shared_rss); ++ if (!mlx5_ind_table_obj_release(dev, shared_rss->ind_tbl, true)) ++ shared_rss->ind_tbl = NULL; + rte_errno = err; + return -rte_errno; + } +@@ -11433,7 +11505,7 @@ __flow_dv_action_rss_create(struct rte_eth_dev *dev, + struct rte_flow_error *error) + { + struct mlx5_priv *priv = dev->data->dev_private; +- struct mlx5_shared_action_rss *shared_action = NULL; ++ struct mlx5_shared_action_rss *shared_rss = NULL; + void *queue = NULL; + struct rte_flow_action_rss *origin; + const uint8_t *rss_key; +@@ -11443,9 +11515,9 @@ __flow_dv_action_rss_create(struct rte_eth_dev *dev, + RTE_SET_USED(conf); + queue = mlx5_malloc(0, RTE_ALIGN_CEIL(queue_size, sizeof(void *)), + 0, SOCKET_ID_ANY); +- shared_action = mlx5_ipool_zmalloc ++ shared_rss = mlx5_ipool_zmalloc + (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], &idx); +- if (!shared_action || !queue) { ++ if (!shared_rss || !queue) { + rte_flow_error_set(error, ENOMEM, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "cannot allocate resource memory"); +@@ -11457,43 +11529,43 @@ __flow_dv_action_rss_create(struct rte_eth_dev *dev, + "rss action number out of range"); + goto error_rss_init; + } +- shared_action->ind_tbl = mlx5_malloc(MLX5_MEM_ZERO, +- sizeof(*shared_action->ind_tbl), +- 0, SOCKET_ID_ANY); +- if (!shared_action->ind_tbl) { ++ shared_rss->ind_tbl = mlx5_malloc(MLX5_MEM_ZERO, ++ sizeof(*shared_rss->ind_tbl), ++ 0, SOCKET_ID_ANY); ++ if (!shared_rss->ind_tbl) { + rte_flow_error_set(error, ENOMEM, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "cannot allocate resource memory"); + goto error_rss_init; + } + memcpy(queue, rss->queue, queue_size); +- shared_action->ind_tbl->queues = queue; +- shared_action->ind_tbl->queues_n = rss->queue_num; +- origin = &shared_action->origin; ++ shared_rss->ind_tbl->queues = queue; ++ shared_rss->ind_tbl->queues_n = rss->queue_num; ++ origin = &shared_rss->origin; + origin->func = rss->func; + origin->level = rss->level; + /* RSS type 0 indicates default RSS type (ETH_RSS_IP). */ + origin->types = !rss->types ? ETH_RSS_IP : rss->types; + /* NULL RSS key indicates default RSS key. */ + rss_key = !rss->key ? rss_hash_default_key : rss->key; +- memcpy(shared_action->key, rss_key, MLX5_RSS_HASH_KEY_LEN); +- origin->key = &shared_action->key[0]; ++ memcpy(shared_rss->key, rss_key, MLX5_RSS_HASH_KEY_LEN); ++ origin->key = &shared_rss->key[0]; + origin->key_len = MLX5_RSS_HASH_KEY_LEN; + origin->queue = queue; + origin->queue_num = rss->queue_num; +- if (__flow_dv_action_rss_setup(dev, idx, shared_action, error)) ++ if (__flow_dv_action_rss_setup(dev, idx, shared_rss, error)) + goto error_rss_init; +- rte_spinlock_init(&shared_action->action_rss_sl); +- __atomic_add_fetch(&shared_action->refcnt, 1, __ATOMIC_RELAXED); ++ rte_spinlock_init(&shared_rss->action_rss_sl); ++ __atomic_add_fetch(&shared_rss->refcnt, 1, __ATOMIC_RELAXED); + rte_spinlock_lock(&priv->shared_act_sl); + ILIST_INSERT(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], +- &priv->rss_shared_actions, idx, shared_action, next); ++ &priv->rss_shared_actions, idx, shared_rss, next); + rte_spinlock_unlock(&priv->shared_act_sl); + return idx; + error_rss_init: +- if (shared_action) { +- if (shared_action->ind_tbl) +- mlx5_free(shared_action->ind_tbl); ++ if (shared_rss) { ++ if (shared_rss->ind_tbl) ++ mlx5_free(shared_rss->ind_tbl); + mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], + idx); + } +@@ -11538,6 +11610,13 @@ __flow_dv_action_rss_release(struct rte_eth_dev *dev, uint32_t idx, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, + "shared rss hrxq has references"); ++ if (!__atomic_compare_exchange_n(&shared_rss->refcnt, &old_refcnt, ++ 0, 0, __ATOMIC_ACQUIRE, ++ __ATOMIC_RELAXED)) ++ return rte_flow_error_set(error, EBUSY, ++ RTE_FLOW_ERROR_TYPE_ACTION, ++ NULL, ++ "shared rss has references"); + queue = shared_rss->ind_tbl->queues; + remaining = mlx5_ind_table_obj_release(dev, shared_rss->ind_tbl, true); + if (remaining) +@@ -11546,13 +11625,6 @@ __flow_dv_action_rss_release(struct rte_eth_dev *dev, uint32_t idx, + NULL, + "shared rss indirection table has" + " references"); +- if (!__atomic_compare_exchange_n(&shared_rss->refcnt, &old_refcnt, +- 0, 0, __ATOMIC_ACQUIRE, +- __ATOMIC_RELAXED)) +- return rte_flow_error_set(error, EBUSY, +- RTE_FLOW_ERROR_TYPE_ACTION, +- NULL, +- "shared rss has references"); + mlx5_free(queue); + rte_spinlock_lock(&priv->shared_act_sl); + ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], +@@ -11700,6 +11772,10 @@ __flow_dv_action_rss_update(struct rte_eth_dev *dev, uint32_t idx, + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "invalid shared action to update"); ++ if (priv->obj_ops.ind_table_modify == NULL) ++ return rte_flow_error_set(error, ENOTSUP, ++ RTE_FLOW_ERROR_TYPE_ACTION, NULL, ++ "cannot modify indirection table"); + queue = mlx5_malloc(MLX5_MEM_ZERO, + RTE_ALIGN_CEIL(queue_size, sizeof(void *)), + 0, SOCKET_ID_ANY); +@@ -12582,6 +12658,20 @@ flow_dv_action_validate(struct rte_eth_dev *dev, + RTE_SET_USED(conf); + switch (action->type) { + case RTE_FLOW_ACTION_TYPE_RSS: ++ /* ++ * priv->obj_ops is set according to driver capabilities. ++ * When DevX capabilities are ++ * sufficient, it is set to devx_obj_ops. ++ * Otherwise, it is set to ibv_obj_ops. ++ * ibv_obj_ops doesn't support ind_table_modify operation. ++ * In this case the shared RSS action can't be used. ++ */ ++ if (priv->obj_ops.ind_table_modify == NULL) ++ return rte_flow_error_set ++ (err, ENOTSUP, ++ RTE_FLOW_ERROR_TYPE_ACTION, ++ NULL, ++ "shared RSS action not supported"); + return mlx5_validate_action_rss(dev, action, err); + case RTE_FLOW_ACTION_TYPE_AGE: + if (!priv->sh->aso_age_mng) +diff --git a/dpdk/drivers/net/mlx5/mlx5_flow_verbs.c b/dpdk/drivers/net/mlx5/mlx5_flow_verbs.c +index 59291fbd09..bd060e9d44 100644 +--- a/dpdk/drivers/net/mlx5/mlx5_flow_verbs.c ++++ b/dpdk/drivers/net/mlx5/mlx5_flow_verbs.c +@@ -1247,6 +1247,7 @@ flow_verbs_validate(struct rte_eth_dev *dev, + uint64_t last_item = 0; + uint8_t next_protocol = 0xff; + uint16_t ether_type = 0; ++ bool is_empty_vlan = false; + + if (items == NULL) + return -1; +@@ -1274,6 +1275,8 @@ flow_verbs_validate(struct rte_eth_dev *dev, + ether_type &= + ((const struct rte_flow_item_eth *) + items->mask)->type; ++ if (ether_type == RTE_BE16(RTE_ETHER_TYPE_VLAN)) ++ is_empty_vlan = true; + ether_type = rte_be_to_cpu_16(ether_type); + } else { + ether_type = 0; +@@ -1299,6 +1302,7 @@ flow_verbs_validate(struct rte_eth_dev *dev, + } else { + ether_type = 0; + } ++ is_empty_vlan = false; + break; + case RTE_FLOW_ITEM_TYPE_IPV4: + ret = mlx5_flow_validate_item_ipv4 +@@ -1410,6 +1414,10 @@ flow_verbs_validate(struct rte_eth_dev *dev, + } + item_flags |= last_item; + } ++ if (is_empty_vlan) ++ return rte_flow_error_set(error, ENOTSUP, ++ RTE_FLOW_ERROR_TYPE_ITEM, NULL, ++ "VLAN matching without vid specification is not supported"); + for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { + switch (actions->type) { + case RTE_FLOW_ACTION_TYPE_VOID: +diff --git a/dpdk/drivers/net/mlx5/mlx5_rxq.c b/dpdk/drivers/net/mlx5/mlx5_rxq.c +index da7a8b3cd7..1a5cf99d51 100644 +--- a/dpdk/drivers/net/mlx5/mlx5_rxq.c ++++ b/dpdk/drivers/net/mlx5/mlx5_rxq.c +@@ -346,7 +346,9 @@ rxq_free_elts_sprq(struct mlx5_rxq_ctrl *rxq_ctrl) + (1 << rxq->elts_n) * (1 << rxq->strd_num_n) : + (1 << rxq->elts_n); + const uint16_t q_mask = q_n - 1; +- uint16_t used = q_n - (rxq->rq_ci - rxq->rq_pi); ++ uint16_t elts_ci = mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq) ? ++ rxq->elts_ci : rxq->rq_ci; ++ uint16_t used = q_n - (elts_ci - rxq->rq_pi); + uint16_t i; + + DRV_LOG(DEBUG, "port %u Rx queue %u freeing %d WRs", +@@ -359,8 +361,8 @@ rxq_free_elts_sprq(struct mlx5_rxq_ctrl *rxq_ctrl) + */ + if (mlx5_rxq_check_vec_support(rxq) > 0) { + for (i = 0; i < used; ++i) +- (*rxq->elts)[(rxq->rq_ci + i) & q_mask] = NULL; +- rxq->rq_pi = rxq->rq_ci; ++ (*rxq->elts)[(elts_ci + i) & q_mask] = NULL; ++ rxq->rq_pi = elts_ci; + } + for (i = 0; i != q_n; ++i) { + if ((*rxq->elts)[i] != NULL) +@@ -402,14 +404,14 @@ mlx5_get_rx_queue_offloads(struct rte_eth_dev *dev) + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_dev_config *config = &priv->config; + uint64_t offloads = (DEV_RX_OFFLOAD_SCATTER | +- RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT | + DEV_RX_OFFLOAD_TIMESTAMP | + DEV_RX_OFFLOAD_JUMBO_FRAME | + DEV_RX_OFFLOAD_RSS_HASH); + ++ if (!config->mprq.enabled) ++ offloads |= RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT; + if (config->hw_fcs_strip) + offloads |= DEV_RX_OFFLOAD_KEEP_CRC; +- + if (config->hw_csum) + offloads |= (DEV_RX_OFFLOAD_IPV4_CKSUM | + DEV_RX_OFFLOAD_UDP_CKSUM | +@@ -1689,6 +1691,7 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, + LIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next); + return tmpl; + error: ++ mlx5_mr_btree_free(&tmpl->rxq.mr_ctrl.cache_bh); + mlx5_free(tmpl); + return NULL; + } +@@ -2421,7 +2424,9 @@ uint32_t mlx5_hrxq_get(struct rte_eth_dev *dev, + return 0; + hrxq = container_of(entry, typeof(*hrxq), entry); + } +- return hrxq->idx; ++ if (hrxq) ++ return hrxq->idx; ++ return 0; + } + + /** +diff --git a/dpdk/drivers/net/mlx5/mlx5_rxtx.h b/dpdk/drivers/net/mlx5/mlx5_rxtx.h +index 7989a50403..c57ccc32ed 100644 +--- a/dpdk/drivers/net/mlx5/mlx5_rxtx.h ++++ b/dpdk/drivers/net/mlx5/mlx5_rxtx.h +@@ -126,7 +126,7 @@ struct mlx5_rxq_data { + unsigned int strd_scatter_en:1; /* Scattered packets from a stride. */ + unsigned int lro:1; /* Enable LRO. */ + unsigned int dynf_meta:1; /* Dynamic metadata is configured. */ +- unsigned int mcqe_format:3; /* Dynamic metadata is configured. */ ++ unsigned int mcqe_format:3; /* CQE compression format. */ + volatile uint32_t *rq_db; + volatile uint32_t *cq_db; + uint16_t port_id; +diff --git a/dpdk/drivers/net/mlx5/mlx5_rxtx_vec_sse.h b/dpdk/drivers/net/mlx5/mlx5_rxtx_vec_sse.h +index d4df9816aa..0b3f240e10 100644 +--- a/dpdk/drivers/net/mlx5/mlx5_rxtx_vec_sse.h ++++ b/dpdk/drivers/net/mlx5/mlx5_rxtx_vec_sse.h +@@ -197,8 +197,8 @@ rxq_cq_decompress_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq, + const __m128i flow_mark_adj = + _mm_set_epi32(-1, -1, -1, -1); + const __m128i flow_mark_shuf = +- _mm_set_epi8(-1, 1, 0, 4, +- -1, 9, 8, 12, ++ _mm_set_epi8(-1, 9, 8, 12, ++ -1, 1, 0, 4, + -1, -1, -1, -1, + -1, -1, -1, -1); + const __m128i ft_mask = +diff --git a/dpdk/drivers/net/mlx5/mlx5_txpp.c b/dpdk/drivers/net/mlx5/mlx5_txpp.c +index 2438bf1f1d..28afda28cb 100644 +--- a/dpdk/drivers/net/mlx5/mlx5_txpp.c ++++ b/dpdk/drivers/net/mlx5/mlx5_txpp.c +@@ -57,11 +57,16 @@ mlx5_txpp_create_event_channel(struct mlx5_dev_ctx_shared *sh) + static void + mlx5_txpp_free_pp_index(struct mlx5_dev_ctx_shared *sh) + { ++#ifdef HAVE_MLX5DV_PP_ALLOC + if (sh->txpp.pp) { + mlx5_glue->dv_free_pp(sh->txpp.pp); + sh->txpp.pp = NULL; + sh->txpp.pp_id = 0; + } ++#else ++ RTE_SET_USED(sh); ++ DRV_LOG(ERR, "Freeing pacing index is not supported."); ++#endif + } + + /* Allocate Packet Pacing index from kernel via mlx5dv call. */ +@@ -270,8 +275,6 @@ mlx5_txpp_create_rearm_queue(struct mlx5_dev_ctx_shared *sh) + goto error; + } + /* Create completion queue object for Rearm Queue. */ +- cq_attr.cqe_size = (sizeof(struct mlx5_cqe) == 128) ? +- MLX5_CQE_SIZE_128B : MLX5_CQE_SIZE_64B; + cq_attr.uar_page_id = mlx5_os_get_devx_uar_page_id(sh->tx_uar); + cq_attr.eqn = sh->eqn; + cq_attr.q_umem_valid = 1; +@@ -508,8 +511,6 @@ mlx5_txpp_create_clock_queue(struct mlx5_dev_ctx_shared *sh) + goto error; + } + /* Create completion queue object for Clock Queue. */ +- cq_attr.cqe_size = (sizeof(struct mlx5_cqe) == 128) ? +- MLX5_CQE_SIZE_128B : MLX5_CQE_SIZE_64B; + cq_attr.use_first_only = 1; + cq_attr.overrun_ignore = 1; + cq_attr.uar_page_id = mlx5_os_get_devx_uar_page_id(sh->tx_uar); +diff --git a/dpdk/drivers/net/mlx5/mlx5_txq.c b/dpdk/drivers/net/mlx5/mlx5_txq.c +index d96abef883..c53af10d58 100644 +--- a/dpdk/drivers/net/mlx5/mlx5_txq.c ++++ b/dpdk/drivers/net/mlx5/mlx5_txq.c +@@ -634,18 +634,23 @@ txq_uar_uninit_secondary(struct mlx5_txq_ctrl *txq_ctrl) + void + mlx5_tx_uar_uninit_secondary(struct rte_eth_dev *dev) + { +- struct mlx5_priv *priv = dev->data->dev_private; +- struct mlx5_txq_data *txq; +- struct mlx5_txq_ctrl *txq_ctrl; ++ struct mlx5_proc_priv *ppriv = (struct mlx5_proc_priv *) ++ dev->process_private; ++ const size_t page_size = rte_mem_page_size(); ++ void *addr; + unsigned int i; + ++ if (page_size == (size_t)-1) { ++ DRV_LOG(ERR, "Failed to get mem page size"); ++ return; ++ } + MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_SECONDARY); +- for (i = 0; i != priv->txqs_n; ++i) { +- if (!(*priv->txqs)[i]) ++ for (i = 0; i != ppriv->uar_table_sz; ++i) { ++ if (!ppriv->uar_table[i]) + continue; +- txq = (*priv->txqs)[i]; +- txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq); +- txq_uar_uninit_secondary(txq_ctrl); ++ addr = ppriv->uar_table[i]; ++ rte_mem_unmap(RTE_PTR_ALIGN_FLOOR(addr, page_size), page_size); ++ + } + } + +@@ -1146,6 +1151,7 @@ mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, + LIST_INSERT_HEAD(&priv->txqsctrl, tmpl, next); + return tmpl; + error: ++ mlx5_mr_btree_free(&tmpl->txq.mr_ctrl.cache_bh); + mlx5_free(tmpl); + return NULL; + } +diff --git a/dpdk/drivers/net/mvneta/mvneta_rxtx.c b/dpdk/drivers/net/mvneta/mvneta_rxtx.c +index 10b6f57584..dfa7ecc090 100644 +--- a/dpdk/drivers/net/mvneta/mvneta_rxtx.c ++++ b/dpdk/drivers/net/mvneta/mvneta_rxtx.c +@@ -872,7 +872,17 @@ mvneta_rx_queue_flush(struct mvneta_rxq *rxq) + int ret, i; + + descs = rte_malloc("rxdesc", MRVL_NETA_RXD_MAX * sizeof(*descs), 0); ++ if (descs == NULL) { ++ MVNETA_LOG(ERR, "Failed to allocate descs."); ++ return; ++ } ++ + bufs = rte_malloc("buffs", MRVL_NETA_RXD_MAX * sizeof(*bufs), 0); ++ if (bufs == NULL) { ++ MVNETA_LOG(ERR, "Failed to allocate bufs."); ++ rte_free(descs); ++ return; ++ } + + do { + num = MRVL_NETA_RXD_MAX; +diff --git a/dpdk/drivers/net/mvpp2/mrvl_ethdev.c b/dpdk/drivers/net/mvpp2/mrvl_ethdev.c +index f25cf9e46d..6cd5acd337 100644 +--- a/dpdk/drivers/net/mvpp2/mrvl_ethdev.c ++++ b/dpdk/drivers/net/mvpp2/mrvl_ethdev.c +@@ -441,8 +441,8 @@ mrvl_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) + * when this feature has not been enabled/supported so far + * (TODO check scattered_rx flag here once scattered RX is supported). + */ +- if (mru + MRVL_PKT_OFFS > mbuf_data_size) { +- mru = mbuf_data_size - MRVL_PKT_OFFS; ++ if (mru - RTE_ETHER_CRC_LEN + MRVL_PKT_OFFS > mbuf_data_size) { ++ mru = mbuf_data_size + RTE_ETHER_CRC_LEN - MRVL_PKT_OFFS; + mtu = MRVL_PP2_MRU_TO_MTU(mru); + MRVL_LOG(WARNING, "MTU too big, max MTU possible limitted " + "by current mbuf size: %u. Set MTU to %u, MRU to %u", +@@ -671,18 +671,6 @@ mrvl_dev_start(struct rte_eth_dev *dev) + priv->uc_mc_flushed = 1; + } + +- if (!priv->vlan_flushed) { +- ret = pp2_ppio_flush_vlan(priv->ppio); +- if (ret) { +- MRVL_LOG(ERR, "Failed to flush vlan list"); +- /* +- * TODO +- * once pp2_ppio_flush_vlan() is supported jump to out +- * goto out; +- */ +- } +- priv->vlan_flushed = 1; +- } + ret = mrvl_mtu_set(dev, dev->data->mtu); + if (ret) + MRVL_LOG(ERR, "Failed to set MTU to %d", dev->data->mtu); +@@ -1614,8 +1602,8 @@ mrvl_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) + static int + mrvl_fill_bpool(struct mrvl_rxq *rxq, int num) + { +- struct buff_release_entry entries[MRVL_PP2_RXD_MAX]; +- struct rte_mbuf *mbufs[MRVL_PP2_RXD_MAX]; ++ struct buff_release_entry entries[num]; ++ struct rte_mbuf *mbufs[num]; + int i, ret; + unsigned int core_id; + struct pp2_hif *hif; +@@ -1711,7 +1699,8 @@ mrvl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, + return -EFAULT; + } + +- frame_size = buf_size - RTE_PKTMBUF_HEADROOM - MRVL_PKT_EFFEC_OFFS; ++ frame_size = buf_size - RTE_PKTMBUF_HEADROOM - ++ MRVL_PKT_EFFEC_OFFS + RTE_ETHER_CRC_LEN; + if (frame_size < max_rx_pkt_len) { + MRVL_LOG(WARNING, + "Mbuf size must be increased to %u bytes to hold up " +@@ -2171,7 +2160,6 @@ mrvl_desc_to_packet_type_and_offset(struct pp2_ppio_desc *desc, + *l4_offset = *l3_offset + MRVL_ARP_LENGTH; + break; + default: +- MRVL_LOG(DEBUG, "Failed to recognise l3 packet type"); + break; + } + +@@ -2183,7 +2171,6 @@ mrvl_desc_to_packet_type_and_offset(struct pp2_ppio_desc *desc, + packet_type |= RTE_PTYPE_L4_UDP; + break; + default: +- MRVL_LOG(DEBUG, "Failed to recognise l4 packet type"); + break; + } + +@@ -2253,10 +2240,9 @@ mrvl_rx_pkt_burst(void *rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) + + ret = pp2_ppio_recv(q->priv->ppio, q->priv->rxq_map[q->queue_id].tc, + q->priv->rxq_map[q->queue_id].inq, descs, &nb_pkts); +- if (unlikely(ret < 0)) { +- MRVL_LOG(ERR, "Failed to receive packets"); ++ if (unlikely(ret < 0)) + return 0; +- } ++ + mrvl_port_bpool_size[bpool->pp2_id][bpool->id][core_id] -= nb_pkts; + + for (i = 0; i < nb_pkts; i++) { +@@ -2319,21 +2305,13 @@ mrvl_rx_pkt_burst(void *rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) + + if (unlikely(num <= q->priv->bpool_min_size || + (!rx_done && num < q->priv->bpool_init_size))) { +- ret = mrvl_fill_bpool(q, MRVL_BURST_SIZE); +- if (ret) +- MRVL_LOG(ERR, "Failed to fill bpool"); ++ mrvl_fill_bpool(q, MRVL_BURST_SIZE); + } else if (unlikely(num > q->priv->bpool_max_size)) { + int i; + int pkt_to_remove = num - q->priv->bpool_init_size; + struct rte_mbuf *mbuf; + struct pp2_buff_inf buff; + +- MRVL_LOG(DEBUG, +- "port-%d:%d: bpool %d oversize - remove %d buffers (pool size: %d -> %d)", +- bpool->pp2_id, q->priv->ppio->port_id, +- bpool->id, pkt_to_remove, num, +- q->priv->bpool_init_size); +- + for (i = 0; i < pkt_to_remove; i++) { + ret = pp2_bpool_get_buff(hif, bpool, &buff); + if (ret) +@@ -2526,12 +2504,8 @@ mrvl_tx_pkt_burst(void *txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) + sq, q->queue_id, 0); + + sq_free_size = MRVL_PP2_TX_SHADOWQ_SIZE - sq->size - 1; +- if (unlikely(nb_pkts > sq_free_size)) { +- MRVL_LOG(DEBUG, +- "No room in shadow queue for %d packets! %d packets will be sent.", +- nb_pkts, sq_free_size); ++ if (unlikely(nb_pkts > sq_free_size)) + nb_pkts = sq_free_size; +- } + + for (i = 0; i < nb_pkts; i++) { + struct rte_mbuf *mbuf = tx_pkts[i]; +@@ -2648,10 +2622,6 @@ mrvl_tx_sg_pkt_burst(void *txq, struct rte_mbuf **tx_pkts, + */ + if (unlikely(total_descs > sq_free_size)) { + total_descs -= nb_segs; +- RTE_LOG(DEBUG, PMD, +- "No room in shadow queue for %d packets! " +- "%d packets will be sent.\n", +- nb_pkts, i); + break; + } + +diff --git a/dpdk/drivers/net/mvpp2/mrvl_ethdev.h b/dpdk/drivers/net/mvpp2/mrvl_ethdev.h +index db6632f5b6..eee5182ce8 100644 +--- a/dpdk/drivers/net/mvpp2/mrvl_ethdev.h ++++ b/dpdk/drivers/net/mvpp2/mrvl_ethdev.h +@@ -186,7 +186,6 @@ struct mrvl_priv { + uint8_t bpool_bit; + uint8_t rss_hf_tcp; + uint8_t uc_mc_flushed; +- uint8_t vlan_flushed; + uint8_t isolated; + uint8_t multiseg; + +diff --git a/dpdk/drivers/net/netvsc/hn_nvs.c b/dpdk/drivers/net/netvsc/hn_nvs.c +index eeb82ab9ee..03b6cc1551 100644 +--- a/dpdk/drivers/net/netvsc/hn_nvs.c ++++ b/dpdk/drivers/net/netvsc/hn_nvs.c +@@ -97,8 +97,13 @@ __hn_nvs_execute(struct hn_data *hv, + hdr = (struct hn_nvs_hdr *)buffer; + + /* Silently drop received packets while waiting for response */ +- if (hdr->type == NVS_TYPE_RNDIS) { ++ switch (hdr->type) { ++ case NVS_TYPE_RNDIS: + hn_nvs_ack_rxbuf(chan, xactid); ++ /* fallthrough */ ++ ++ case NVS_TYPE_TXTBL_NOTE: ++ PMD_DRV_LOG(DEBUG, "discard packet type 0x%x", hdr->type); + goto retry; + } + +diff --git a/dpdk/drivers/net/nfb/meson.build b/dpdk/drivers/net/nfb/meson.build +index d53e8eca7d..995c44c61c 100644 +--- a/dpdk/drivers/net/nfb/meson.build ++++ b/dpdk/drivers/net/nfb/meson.build +@@ -3,7 +3,7 @@ + # Copyright(c) 2019 Netcope Technologies, a.s. + # All rights reserved. + +-dep = dependency('netcope-common', required: false) ++dep = dependency('netcope-common', required: false, method: 'pkg-config') + reason = 'missing dependency, "libnfb"' + build = dep.found() + ext_deps += dep +diff --git a/dpdk/drivers/net/nfp/nfp_net.c b/dpdk/drivers/net/nfp/nfp_net.c +index 1608bf5ea1..9ea24e5bda 100644 +--- a/dpdk/drivers/net/nfp/nfp_net.c ++++ b/dpdk/drivers/net/nfp/nfp_net.c +@@ -1508,7 +1508,7 @@ nfp_net_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) + } + + /* switch to jumbo mode if needed */ +- if ((uint32_t)mtu > RTE_ETHER_MAX_LEN) ++ if ((uint32_t)mtu > RTE_ETHER_MTU) + dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME; + else + dev->data->dev_conf.rxmode.offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME; +diff --git a/dpdk/drivers/net/nfp/nfpcore/nfp_cpp.h b/dpdk/drivers/net/nfp/nfpcore/nfp_cpp.h +index 1427954c17..08d656da14 100644 +--- a/dpdk/drivers/net/nfp/nfpcore/nfp_cpp.h ++++ b/dpdk/drivers/net/nfp/nfpcore/nfp_cpp.h +@@ -170,7 +170,7 @@ void *nfp_cpp_priv(struct nfp_cpp *cpp); + */ + void *nfp_cpp_area_priv(struct nfp_cpp_area *cpp_area); + +-uint32_t __nfp_cpp_model_autodetect(struct nfp_cpp *cpp); ++uint32_t __nfp_cpp_model_autodetect(struct nfp_cpp *cpp, uint32_t *model); + + /* + * NFP CPP core interface for CPP clients. +diff --git a/dpdk/drivers/net/nfp/nfpcore/nfp_cppcore.c b/dpdk/drivers/net/nfp/nfpcore/nfp_cppcore.c +index dec4a8b6d1..6d629430d4 100644 +--- a/dpdk/drivers/net/nfp/nfpcore/nfp_cppcore.c ++++ b/dpdk/drivers/net/nfp/nfpcore/nfp_cppcore.c +@@ -22,8 +22,9 @@ + + #define NFP_PL_DEVICE_ID 0x00000004 + #define NFP_PL_DEVICE_ID_MASK 0xff +- +-#define NFP6000_ARM_GCSR_SOFTMODEL0 0x00400144 ++#define NFP_PL_DEVICE_PART_MASK 0xffff0000 ++#define NFP_PL_DEVICE_MODEL_MASK (NFP_PL_DEVICE_PART_MASK | \ ++ NFP_PL_DEVICE_ID_MASK) + + void + nfp_cpp_priv_set(struct nfp_cpp *cpp, void *priv) +@@ -46,13 +47,18 @@ nfp_cpp_model_set(struct nfp_cpp *cpp, uint32_t model) + uint32_t + nfp_cpp_model(struct nfp_cpp *cpp) + { ++ int err; ++ uint32_t model; ++ + if (!cpp) + return NFP_CPP_MODEL_INVALID; + +- if (cpp->model == 0) +- cpp->model = __nfp_cpp_model_autodetect(cpp); ++ err = __nfp_cpp_model_autodetect(cpp, &model); + +- return cpp->model; ++ if (err < 0) ++ return err; ++ ++ return model; + } + + void +@@ -389,9 +395,6 @@ nfp_xpb_to_cpp(struct nfp_cpp *cpp, uint32_t *xpb_addr) + uint32_t xpb; + int island; + +- if (!NFP_CPP_MODEL_IS_6000(cpp->model)) +- return 0; +- + xpb = NFP_CPP_ID(14, NFP_CPP_ACTION_RW, 0); + + /* +@@ -796,29 +799,21 @@ nfp_cpp_area_fill(struct nfp_cpp_area *area, unsigned long offset, + * as those are model-specific + */ + uint32_t +-__nfp_cpp_model_autodetect(struct nfp_cpp *cpp) ++__nfp_cpp_model_autodetect(struct nfp_cpp *cpp, uint32_t *model) + { +- uint32_t arm_id = NFP_CPP_ID(NFP_CPP_TARGET_ARM, 0, 0); +- uint32_t model = 0; +- +- if (nfp_cpp_readl(cpp, arm_id, NFP6000_ARM_GCSR_SOFTMODEL0, &model)) +- return 0; +- +- if (NFP_CPP_MODEL_IS_6000(model)) { +- uint32_t tmp; +- +- nfp_cpp_model_set(cpp, model); ++ uint32_t reg; ++ int err; + +- /* The PL's PluDeviceID revision code is authoratative */ +- model &= ~0xff; +- if (nfp_xpb_readl(cpp, NFP_XPB_DEVICE(1, 1, 16) + +- NFP_PL_DEVICE_ID, &tmp)) +- return 0; ++ err = nfp_xpb_readl(cpp, NFP_XPB_DEVICE(1, 1, 16) + NFP_PL_DEVICE_ID, ++ ®); ++ if (err < 0) ++ return err; + +- model |= (NFP_PL_DEVICE_ID_MASK & tmp) - 0x10; +- } ++ *model = reg & NFP_PL_DEVICE_MODEL_MASK; ++ if (*model & NFP_PL_DEVICE_ID_MASK) ++ *model -= 0x10; + +- return model; ++ return 0; + } + + /* +diff --git a/dpdk/drivers/net/octeontx/base/octeontx_io.h b/dpdk/drivers/net/octeontx/base/octeontx_io.h +index 04b9ce1910..d0b9cfbc67 100644 +--- a/dpdk/drivers/net/octeontx/base/octeontx_io.h ++++ b/dpdk/drivers/net/octeontx/base/octeontx_io.h +@@ -52,6 +52,11 @@ do { \ + #endif + + #if defined(RTE_ARCH_ARM64) ++#if defined(__ARM_FEATURE_SVE) ++#define __LSE_PREAMBLE " .cpu generic+lse+sve\n" ++#else ++#define __LSE_PREAMBLE " .cpu generic+lse\n" ++#endif + /** + * Perform an atomic fetch-and-add operation. + */ +@@ -61,7 +66,7 @@ octeontx_reg_ldadd_u64(void *addr, int64_t off) + uint64_t old_val; + + __asm__ volatile( +- " .cpu generic+lse\n" ++ __LSE_PREAMBLE + " ldadd %1, %0, [%2]\n" + : "=r" (old_val) : "r" (off), "r" (addr) : "memory"); + +@@ -98,12 +103,13 @@ octeontx_reg_lmtst(void *lmtline_va, void *ioreg_va, const uint64_t cmdbuf[], + + /* LDEOR initiates atomic transfer to I/O device */ + __asm__ volatile( +- " .cpu generic+lse\n" ++ __LSE_PREAMBLE + " ldeor xzr, %0, [%1]\n" + : "=r" (result) : "r" (ioreg_va) : "memory"); + } while (!result); + } + ++#undef __LSE_PREAMBLE + #else + + static inline uint64_t +diff --git a/dpdk/drivers/net/octeontx/octeontx_ethdev.c b/dpdk/drivers/net/octeontx/octeontx_ethdev.c +index 3ee7b043fd..5836dbe09e 100644 +--- a/dpdk/drivers/net/octeontx/octeontx_ethdev.c ++++ b/dpdk/drivers/net/octeontx/octeontx_ethdev.c +@@ -552,7 +552,7 @@ octeontx_dev_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu) + if (rc) + return rc; + +- if (frame_size > RTE_ETHER_MAX_LEN) ++ if (frame_size > OCCTX_L2_MAX_LEN) + nic->rx_offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME; + else + nic->rx_offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME; +@@ -867,7 +867,6 @@ octeontx_dev_info(struct rte_eth_dev *dev, + + dev_info->max_mac_addrs = + octeontx_bgx_port_mac_entries_get(nic->port_id); +- dev_info->max_rx_pktlen = PKI_MAX_PKTLEN; + dev_info->max_rx_queues = 1; + dev_info->max_tx_queues = PKO_MAX_NUM_DQ; + dev_info->min_rx_bufsize = 0; +diff --git a/dpdk/drivers/net/octeontx/octeontx_ethdev.h b/dpdk/drivers/net/octeontx/octeontx_ethdev.h +index 7246fb6d1d..780a094ffa 100644 +--- a/dpdk/drivers/net/octeontx/octeontx_ethdev.h ++++ b/dpdk/drivers/net/octeontx/octeontx_ethdev.h +@@ -44,6 +44,7 @@ + /* ETH_HLEN+ETH_FCS+2*VLAN_HLEN */ + #define OCCTX_L2_OVERHEAD (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + \ + OCCTX_MAX_VTAG_ACT_SIZE) ++#define OCCTX_L2_MAX_LEN (RTE_ETHER_MTU + OCCTX_L2_OVERHEAD) + + /* Since HW FRS includes NPC VTAG insertion space, user has reduced FRS */ + #define OCCTX_MAX_FRS \ +diff --git a/dpdk/drivers/net/octeontx2/otx2_ethdev.h b/dpdk/drivers/net/octeontx2/otx2_ethdev.h +index 3b9871f4dc..99f0469d89 100644 +--- a/dpdk/drivers/net/octeontx2/otx2_ethdev.h ++++ b/dpdk/drivers/net/octeontx2/otx2_ethdev.h +@@ -51,6 +51,8 @@ + /* ETH_HLEN+ETH_FCS+2*VLAN_HLEN */ + #define NIX_L2_OVERHEAD \ + (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + 8) ++#define NIX_L2_MAX_LEN \ ++ (RTE_ETHER_MTU + NIX_L2_OVERHEAD) + + /* HW config of frame size doesn't include FCS */ + #define NIX_MAX_HW_FRS 9212 +diff --git a/dpdk/drivers/net/octeontx2/otx2_ethdev_ops.c b/dpdk/drivers/net/octeontx2/otx2_ethdev_ops.c +index b36d37b9f7..963cc285ed 100644 +--- a/dpdk/drivers/net/octeontx2/otx2_ethdev_ops.c ++++ b/dpdk/drivers/net/octeontx2/otx2_ethdev_ops.c +@@ -58,7 +58,7 @@ otx2_nix_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu) + if (rc) + return rc; + +- if (frame_size > RTE_ETHER_MAX_LEN) ++ if (frame_size > NIX_L2_MAX_LEN) + dev->rx_offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME; + else + dev->rx_offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME; +diff --git a/dpdk/drivers/net/octeontx2/otx2_flow_parse.c b/dpdk/drivers/net/octeontx2/otx2_flow_parse.c +index 476195d634..e9b940f6c0 100644 +--- a/dpdk/drivers/net/octeontx2/otx2_flow_parse.c ++++ b/dpdk/drivers/net/octeontx2/otx2_flow_parse.c +@@ -1090,7 +1090,10 @@ otx2_flow_parse_actions(struct rte_eth_dev *dev, + + set_pf_func: + /* Ideally AF must ensure that correct pf_func is set */ +- flow->npc_action |= (uint64_t)pf_func << 4; ++ if (attr->egress) ++ flow->npc_action |= (uint64_t)pf_func << 48; ++ else ++ flow->npc_action |= (uint64_t)pf_func << 4; + + return 0; + +diff --git a/dpdk/drivers/net/octeontx2/otx2_flow_utils.c b/dpdk/drivers/net/octeontx2/otx2_flow_utils.c +index 9a0a5f9fb4..7ed86ba742 100644 +--- a/dpdk/drivers/net/octeontx2/otx2_flow_utils.c ++++ b/dpdk/drivers/net/octeontx2/otx2_flow_utils.c +@@ -944,7 +944,7 @@ otx2_flow_mcam_alloc_and_write(struct rte_flow *flow, struct otx2_mbox *mbox, + req->entry_data.kw[0] |= flow_info->channel; + req->entry_data.kw_mask[0] |= (BIT_ULL(12) - 1); + } else { +- uint16_t pf_func = (flow->npc_action >> 4) & 0xffff; ++ uint16_t pf_func = (flow->npc_action >> 48) & 0xffff; + + pf_func = htons(pf_func); + req->entry_data.kw[0] |= ((uint64_t)pf_func << 32); +diff --git a/dpdk/drivers/net/octeontx2/otx2_rx.c b/dpdk/drivers/net/octeontx2/otx2_rx.c +index 2da8efe77c..ffeade5952 100644 +--- a/dpdk/drivers/net/octeontx2/otx2_rx.c ++++ b/dpdk/drivers/net/octeontx2/otx2_rx.c +@@ -279,6 +279,12 @@ nix_recv_pkts_vector(void *rx_queue, struct rte_mbuf **rx_pkts, + vst1q_u64((uint64_t *)mbuf2->rearm_data, rearm2); + vst1q_u64((uint64_t *)mbuf3->rearm_data, rearm3); + ++ /* Update that no more segments */ ++ mbuf0->next = NULL; ++ mbuf1->next = NULL; ++ mbuf2->next = NULL; ++ mbuf3->next = NULL; ++ + /* Store the mbufs to rx_pkts */ + vst1q_u64((uint64_t *)&rx_pkts[packets], mbuf01); + vst1q_u64((uint64_t *)&rx_pkts[packets + 2], mbuf23); +diff --git a/dpdk/drivers/net/octeontx2/otx2_rx.h b/dpdk/drivers/net/octeontx2/otx2_rx.h +index 926f614a4e..0ba3d3d96c 100644 +--- a/dpdk/drivers/net/octeontx2/otx2_rx.h ++++ b/dpdk/drivers/net/octeontx2/otx2_rx.h +@@ -215,6 +215,7 @@ nix_cqe_xtract_mseg(const struct nix_rx_parse_s *rx, + iova_list = (const rte_iova_t *)(iova_list + 1); + } + } ++ mbuf->next = NULL; + } + + static __rte_always_inline uint16_t +@@ -330,10 +331,12 @@ otx2_nix_cqe_to_mbuf(const struct nix_cqe_hdr_s *cq, const uint32_t tag, + *(uint64_t *)(&mbuf->rearm_data) = val; + mbuf->pkt_len = len; + +- if (flag & NIX_RX_MULTI_SEG_F) ++ if (flag & NIX_RX_MULTI_SEG_F) { + nix_cqe_xtract_mseg(rx, mbuf, val); +- else ++ } else { + mbuf->data_len = len; ++ mbuf->next = NULL; ++ } + } + + #define CKSUM_F NIX_RX_OFFLOAD_CHECKSUM_F +diff --git a/dpdk/drivers/net/pcap/rte_eth_pcap.c b/dpdk/drivers/net/pcap/rte_eth_pcap.c +index 4930d7d382..40f4fa9021 100644 +--- a/dpdk/drivers/net/pcap/rte_eth_pcap.c ++++ b/dpdk/drivers/net/pcap/rte_eth_pcap.c +@@ -386,7 +386,7 @@ eth_tx_drop(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) + return 0; + + for (i = 0; i < nb_pkts; i++) { +- tx_bytes += bufs[i]->data_len; ++ tx_bytes += bufs[i]->pkt_len; + rte_pktmbuf_free(bufs[i]); + } + +@@ -735,6 +735,17 @@ eth_stats_reset(struct rte_eth_dev *dev) + return 0; + } + ++static inline void ++infinite_rx_ring_free(struct rte_ring *pkts) ++{ ++ struct rte_mbuf *bufs; ++ ++ while (!rte_ring_dequeue(pkts, (void **)&bufs)) ++ rte_pktmbuf_free(bufs); ++ ++ rte_ring_free(pkts); ++} ++ + static int + eth_dev_close(struct rte_eth_dev *dev) + { +@@ -753,7 +764,6 @@ eth_dev_close(struct rte_eth_dev *dev) + if (internals->infinite_rx) { + for (i = 0; i < dev->data->nb_rx_queues; i++) { + struct pcap_rx_queue *pcap_q = &internals->rx_queue[i]; +- struct rte_mbuf *pcap_buf; + + /* + * 'pcap_q->pkts' can be NULL if 'eth_dev_close()' +@@ -762,11 +772,7 @@ eth_dev_close(struct rte_eth_dev *dev) + if (pcap_q->pkts == NULL) + continue; + +- while (!rte_ring_dequeue(pcap_q->pkts, +- (void **)&pcap_buf)) +- rte_pktmbuf_free(pcap_buf); +- +- rte_ring_free(pcap_q->pkts); ++ infinite_rx_ring_free(pcap_q->pkts); + } + } + +@@ -835,21 +841,25 @@ eth_rx_queue_setup(struct rte_eth_dev *dev, + while (eth_pcap_rx(pcap_q, bufs, 1)) { + /* Check for multiseg mbufs. */ + if (bufs[0]->nb_segs != 1) { +- rte_pktmbuf_free(*bufs); +- +- while (!rte_ring_dequeue(pcap_q->pkts, +- (void **)bufs)) +- rte_pktmbuf_free(*bufs); +- +- rte_ring_free(pcap_q->pkts); +- PMD_LOG(ERR, "Multiseg mbufs are not supported in infinite_rx " +- "mode."); ++ infinite_rx_ring_free(pcap_q->pkts); ++ PMD_LOG(ERR, ++ "Multiseg mbufs are not supported in infinite_rx mode."); + return -EINVAL; + } + + rte_ring_enqueue_bulk(pcap_q->pkts, + (void * const *)bufs, 1, NULL); + } ++ ++ if (rte_ring_count(pcap_q->pkts) < pcap_pkt_count) { ++ infinite_rx_ring_free(pcap_q->pkts); ++ PMD_LOG(ERR, ++ "Not enough mbufs to accommodate packets in pcap file. " ++ "At least %" PRIu64 " mbufs per queue is required.", ++ pcap_pkt_count); ++ return -EINVAL; ++ } ++ + /* + * Reset the stats for this queue since eth_pcap_rx calls above + * didn't result in the application receiving packets. +@@ -1324,9 +1334,8 @@ eth_from_pcaps(struct rte_vdev_device *vdev, + + /* phy_mac arg is applied only only if "iface" devarg is provided */ + if (rx_queues->phy_mac) { +- int ret = eth_pcap_update_mac(rx_queues->queue[0].name, +- eth_dev, vdev->device.numa_node); +- if (ret == 0) ++ if (eth_pcap_update_mac(rx_queues->queue[0].name, ++ eth_dev, vdev->device.numa_node) == 0) + internals->phy_mac = 1; + } + } +diff --git a/dpdk/drivers/net/qede/qede_ethdev.c b/dpdk/drivers/net/qede/qede_ethdev.c +index 549013557c..ab5f5b1065 100644 +--- a/dpdk/drivers/net/qede/qede_ethdev.c ++++ b/dpdk/drivers/net/qede/qede_ethdev.c +@@ -1885,6 +1885,8 @@ static int qede_allmulticast_enable(struct rte_eth_dev *eth_dev) + QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC; + enum _ecore_status_t ecore_status; + ++ if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1) ++ type = QED_FILTER_RX_MODE_TYPE_PROMISC; + ecore_status = qed_configure_filter_rx_mode(eth_dev, type); + + return ecore_status >= ECORE_SUCCESS ? 0 : -EAGAIN; +@@ -2367,7 +2369,7 @@ static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) + fp->rxq->rx_buf_size = rc; + } + } +- if (max_rx_pkt_len > RTE_ETHER_MAX_LEN) ++ if (frame_size > QEDE_ETH_MAX_LEN) + dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME; + else + dev->data->dev_conf.rxmode.offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME; +diff --git a/dpdk/drivers/net/qede/qede_rxtx.h b/dpdk/drivers/net/qede/qede_rxtx.h +index d7ff870b20..fcb564a1bb 100644 +--- a/dpdk/drivers/net/qede/qede_rxtx.h ++++ b/dpdk/drivers/net/qede/qede_rxtx.h +@@ -71,6 +71,7 @@ + + (QEDE_LLC_SNAP_HDR_LEN) + 2) + + #define QEDE_MAX_ETHER_HDR_LEN (RTE_ETHER_HDR_LEN + QEDE_ETH_OVERHEAD) ++#define QEDE_ETH_MAX_LEN (RTE_ETHER_MTU + QEDE_MAX_ETHER_HDR_LEN) + + #define QEDE_RSS_OFFLOAD_ALL (ETH_RSS_IPV4 |\ + ETH_RSS_NONFRAG_IPV4_TCP |\ +diff --git a/dpdk/drivers/net/sfc/sfc_ef10_tx.c b/dpdk/drivers/net/sfc/sfc_ef10_tx.c +index 87fa40f3eb..33d2d637c2 100644 +--- a/dpdk/drivers/net/sfc/sfc_ef10_tx.c ++++ b/dpdk/drivers/net/sfc/sfc_ef10_tx.c +@@ -481,6 +481,25 @@ sfc_ef10_xmit_tso_pkt(struct sfc_ef10_txq * const txq, struct rte_mbuf *m_seg, + needed_desc--; + } + ++ /* ++ * 8000-series EF10 hardware requires that innermost IP length ++ * be greater than or equal to the value which each segment is ++ * supposed to have; otherwise, TCP checksum will be incorrect. ++ * ++ * The same concern applies to outer UDP datagram length field. ++ */ ++ switch (m_seg->ol_flags & PKT_TX_TUNNEL_MASK) { ++ case PKT_TX_TUNNEL_VXLAN: ++ /* FALLTHROUGH */ ++ case PKT_TX_TUNNEL_GENEVE: ++ sfc_tso_outer_udp_fix_len(first_m_seg, hdr_addr); ++ break; ++ default: ++ break; ++ } ++ ++ sfc_tso_innermost_ip_fix_len(first_m_seg, hdr_addr, iph_off); ++ + /* + * Tx prepare has debug-only checks that offload flags are correctly + * filled in in TSO mbuf. Use zero IPID if there is no IPv4 flag. +diff --git a/dpdk/drivers/net/sfc/sfc_ethdev.c b/dpdk/drivers/net/sfc/sfc_ethdev.c +index 93fc7baa0d..a002e2c037 100644 +--- a/dpdk/drivers/net/sfc/sfc_ethdev.c ++++ b/dpdk/drivers/net/sfc/sfc_ethdev.c +@@ -640,10 +640,19 @@ sfc_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) + mac_stats[EFX_MAC_VADAPTER_TX_BROADCAST_BYTES]; + stats->imissed = mac_stats[EFX_MAC_VADAPTER_RX_BAD_PACKETS]; + stats->oerrors = mac_stats[EFX_MAC_VADAPTER_TX_BAD_PACKETS]; ++ ++ /* CRC is included in these stats, but shouldn't be */ ++ stats->ibytes -= stats->ipackets * RTE_ETHER_CRC_LEN; ++ stats->obytes -= stats->opackets * RTE_ETHER_CRC_LEN; + } else { + stats->opackets = mac_stats[EFX_MAC_TX_PKTS]; + stats->ibytes = mac_stats[EFX_MAC_RX_OCTETS]; + stats->obytes = mac_stats[EFX_MAC_TX_OCTETS]; ++ ++ /* CRC is included in these stats, but shouldn't be */ ++ stats->ibytes -= mac_stats[EFX_MAC_RX_PKTS] * RTE_ETHER_CRC_LEN; ++ stats->obytes -= mac_stats[EFX_MAC_TX_PKTS] * RTE_ETHER_CRC_LEN; ++ + /* + * Take into account stats which are whenever supported + * on EF10. If some stat is not supported by current +@@ -1017,7 +1026,7 @@ sfc_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) + * The driver does not use it, but other PMDs update jumbo frame + * flag and max_rx_pkt_len when MTU is set. + */ +- if (mtu > RTE_ETHER_MAX_LEN) { ++ if (mtu > RTE_ETHER_MTU) { + struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode; + rxmode->offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME; + } +diff --git a/dpdk/drivers/net/sfc/sfc_tso.c b/dpdk/drivers/net/sfc/sfc_tso.c +index d6f1119890..b090ef14db 100644 +--- a/dpdk/drivers/net/sfc/sfc_tso.c ++++ b/dpdk/drivers/net/sfc/sfc_tso.c +@@ -140,6 +140,13 @@ sfc_efx_tso_do(struct sfc_efx_txq *txq, unsigned int idx, + tsoh = rte_pktmbuf_mtod(m, uint8_t *); + } + ++ /* ++ * 8000-series EF10 hardware requires that innermost IP length ++ * be greater than or equal to the value which each segment is ++ * supposed to have; otherwise, TCP checksum will be incorrect. ++ */ ++ sfc_tso_innermost_ip_fix_len(m, tsoh, nh_off); ++ + /* + * Handle IP header. Tx prepare has debug-only checks that offload flags + * are correctly filled in in TSO mbuf. Use zero IPID if there is no +diff --git a/dpdk/drivers/net/sfc/sfc_tso.h b/dpdk/drivers/net/sfc/sfc_tso.h +index 8597c2868a..361aa22192 100644 +--- a/dpdk/drivers/net/sfc/sfc_tso.h ++++ b/dpdk/drivers/net/sfc/sfc_tso.h +@@ -38,6 +38,36 @@ sfc_tso_ip4_get_ipid(const uint8_t *pkt_hdrp, size_t ip_hdr_off) + return rte_be_to_cpu_16(ipid); + } + ++static inline void ++sfc_tso_outer_udp_fix_len(const struct rte_mbuf *m, uint8_t *tsoh) ++{ ++ rte_be16_t len = rte_cpu_to_be_16(m->l2_len + m->l3_len + m->l4_len + ++ m->tso_segsz); ++ ++ rte_memcpy(tsoh + m->outer_l2_len + m->outer_l3_len + ++ offsetof(struct rte_udp_hdr, dgram_len), ++ &len, sizeof(len)); ++} ++ ++static inline void ++sfc_tso_innermost_ip_fix_len(const struct rte_mbuf *m, uint8_t *tsoh, ++ size_t iph_ofst) ++{ ++ size_t ip_payload_len = m->l4_len + m->tso_segsz; ++ size_t field_ofst; ++ rte_be16_t len; ++ ++ if (m->ol_flags & PKT_TX_IPV4) { ++ field_ofst = offsetof(struct rte_ipv4_hdr, total_length); ++ len = rte_cpu_to_be_16(m->l3_len + ip_payload_len); ++ } else { ++ field_ofst = offsetof(struct rte_ipv6_hdr, payload_len); ++ len = rte_cpu_to_be_16(ip_payload_len); ++ } ++ ++ rte_memcpy(tsoh + iph_ofst + field_ofst, &len, sizeof(len)); ++} ++ + unsigned int sfc_tso_prepare_header(uint8_t *tsoh, size_t header_len, + struct rte_mbuf **in_seg, size_t *in_off); + +diff --git a/dpdk/drivers/net/szedata2/meson.build b/dpdk/drivers/net/szedata2/meson.build +index b53fcbc591..77a5b0ed80 100644 +--- a/dpdk/drivers/net/szedata2/meson.build ++++ b/dpdk/drivers/net/szedata2/meson.build +@@ -1,7 +1,7 @@ + # SPDX-License-Identifier: BSD-3-Clause + # Copyright(c) 2018 Intel Corporation + +-dep = dependency('libsze2', required: false) ++dep = dependency('libsze2', required: false, method: 'pkg-config') + build = dep.found() + reason = 'missing dependency, "libsze2"' + ext_deps += dep +diff --git a/dpdk/drivers/net/thunderx/base/nicvf_hw_defs.h b/dpdk/drivers/net/thunderx/base/nicvf_hw_defs.h +index b12c8ec50a..adc8ec943d 100644 +--- a/dpdk/drivers/net/thunderx/base/nicvf_hw_defs.h ++++ b/dpdk/drivers/net/thunderx/base/nicvf_hw_defs.h +@@ -176,6 +176,7 @@ + #define NIC_HW_MAX_MTU (9190) + #define NIC_HW_MAX_FRS (NIC_HW_MAX_MTU + NIC_HW_L2_OVERHEAD) + #define NIC_HW_MAX_SEGS (12) ++#define NIC_HW_L2_MAX_LEN (RTE_ETHER_MTU + NIC_HW_L2_OVERHEAD) + + /* Descriptor alignments */ + #define NICVF_RBDR_BASE_ALIGN_BYTES (128) /* 7 bits */ +diff --git a/dpdk/drivers/net/thunderx/nicvf_ethdev.c b/dpdk/drivers/net/thunderx/nicvf_ethdev.c +index b6bb05e500..c2e7c334d4 100644 +--- a/dpdk/drivers/net/thunderx/nicvf_ethdev.c ++++ b/dpdk/drivers/net/thunderx/nicvf_ethdev.c +@@ -176,7 +176,7 @@ nicvf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) + (frame_size + 2 * VLAN_TAG_SIZE > buffsz * NIC_HW_MAX_SEGS)) + return -EINVAL; + +- if (frame_size > RTE_ETHER_MAX_LEN) ++ if (frame_size > NIC_HW_L2_MAX_LEN) + rxmode->offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME; + else + rxmode->offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME; +diff --git a/dpdk/drivers/net/virtio/virtio_user/vhost.h b/dpdk/drivers/net/virtio/virtio_user/vhost.h +index 210a3704e7..be286173b0 100644 +--- a/dpdk/drivers/net/virtio/virtio_user/vhost.h ++++ b/dpdk/drivers/net/virtio/virtio_user/vhost.h +@@ -86,6 +86,14 @@ enum vhost_user_request { + VHOST_USER_MAX + }; + ++#ifndef VHOST_BACKEND_F_IOTLB_MSG_V2 ++#define VHOST_BACKEND_F_IOTLB_MSG_V2 1 ++#endif ++ ++#ifndef VHOST_BACKEND_F_IOTLB_BATCH ++#define VHOST_BACKEND_F_IOTLB_BATCH 2 ++#endif ++ + extern const char * const vhost_msg_strings[VHOST_USER_MAX]; + + struct vhost_memory_region { +diff --git a/dpdk/drivers/net/virtio/virtio_user/vhost_user.c b/dpdk/drivers/net/virtio/virtio_user/vhost_user.c +index b93e65c60b..350eed4182 100644 +--- a/dpdk/drivers/net/virtio/virtio_user/vhost_user.c ++++ b/dpdk/drivers/net/virtio/virtio_user/vhost_user.c +@@ -297,13 +297,18 @@ vhost_user_sock(struct virtio_user_dev *dev, + if (has_reply_ack) + msg.flags |= VHOST_USER_NEED_REPLY_MASK; + /* Fallthrough */ +- case VHOST_USER_SET_FEATURES: + case VHOST_USER_SET_PROTOCOL_FEATURES: + case VHOST_USER_SET_LOG_BASE: + msg.payload.u64 = *((__u64 *)arg); + msg.size = sizeof(m.payload.u64); + break; + ++ case VHOST_USER_SET_FEATURES: ++ msg.payload.u64 = *((__u64 *)arg) | (dev->device_features & ++ (1ULL << VHOST_USER_F_PROTOCOL_FEATURES)); ++ msg.size = sizeof(m.payload.u64); ++ break; ++ + case VHOST_USER_SET_OWNER: + case VHOST_USER_RESET_OWNER: + break; +diff --git a/dpdk/drivers/net/virtio/virtio_user/vhost_vdpa.c b/dpdk/drivers/net/virtio/virtio_user/vhost_vdpa.c +index c7b9349fc8..269bab2f8e 100644 +--- a/dpdk/drivers/net/virtio/virtio_user/vhost_vdpa.c ++++ b/dpdk/drivers/net/virtio/virtio_user/vhost_vdpa.c +@@ -35,6 +35,8 @@ + #define VHOST_VDPA_SET_STATUS _IOW(VHOST_VIRTIO, 0x72, __u8) + #define VHOST_VDPA_SET_VRING_ENABLE _IOW(VHOST_VIRTIO, 0x75, \ + struct vhost_vring_state) ++#define VHOST_SET_BACKEND_FEATURES _IOW(VHOST_VIRTIO, 0x25, __u64) ++#define VHOST_GET_BACKEND_FEATURES _IOR(VHOST_VIRTIO, 0x26, __u64) + + static uint64_t vhost_req_user_to_vdpa[] = { + [VHOST_USER_SET_OWNER] = VHOST_SET_OWNER, +@@ -51,6 +53,8 @@ static uint64_t vhost_req_user_to_vdpa[] = { + [VHOST_USER_SET_STATUS] = VHOST_VDPA_SET_STATUS, + [VHOST_USER_GET_STATUS] = VHOST_VDPA_GET_STATUS, + [VHOST_USER_SET_VRING_ENABLE] = VHOST_VDPA_SET_VRING_ENABLE, ++ [VHOST_USER_GET_PROTOCOL_FEATURES] = VHOST_GET_BACKEND_FEATURES, ++ [VHOST_USER_SET_PROTOCOL_FEATURES] = VHOST_SET_BACKEND_FEATURES, + }; + + /* no alignment requirement */ +@@ -66,6 +70,8 @@ struct vhost_iotlb_msg { + #define VHOST_IOTLB_UPDATE 2 + #define VHOST_IOTLB_INVALIDATE 3 + #define VHOST_IOTLB_ACCESS_FAIL 4 ++#define VHOST_IOTLB_BATCH_BEGIN 5 ++#define VHOST_IOTLB_BATCH_END 6 + uint8_t type; + }; + +@@ -80,12 +86,67 @@ struct vhost_msg { + }; + }; + ++static int ++vhost_vdpa_iotlb_batch_begin(struct virtio_user_dev *dev) ++{ ++ struct vhost_msg msg = {}; ++ ++ if (!(dev->protocol_features & (1ULL << VHOST_BACKEND_F_IOTLB_BATCH))) ++ return 0; ++ ++ if (!(dev->protocol_features & (1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2))) { ++ PMD_DRV_LOG(ERR, "IOTLB_MSG_V2 not supported by the backend."); ++ return -1; ++ } ++ ++ msg.type = VHOST_IOTLB_MSG_V2; ++ msg.iotlb.type = VHOST_IOTLB_BATCH_BEGIN; ++ ++ if (write(dev->vhostfd, &msg, sizeof(msg)) != sizeof(msg)) { ++ PMD_DRV_LOG(ERR, "Failed to send IOTLB batch begin (%s)", ++ strerror(errno)); ++ return -1; ++ } ++ ++ return 0; ++} ++ ++static int ++vhost_vdpa_iotlb_batch_end(struct virtio_user_dev *dev) ++{ ++ struct vhost_msg msg = {}; ++ ++ if (!(dev->protocol_features & (1ULL << VHOST_BACKEND_F_IOTLB_BATCH))) ++ return 0; ++ ++ if (!(dev->protocol_features & (1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2))) { ++ PMD_DRV_LOG(ERR, "IOTLB_MSG_V2 not supported by the backend."); ++ return -1; ++ } ++ ++ msg.type = VHOST_IOTLB_MSG_V2; ++ msg.iotlb.type = VHOST_IOTLB_BATCH_END; ++ ++ if (write(dev->vhostfd, &msg, sizeof(msg)) != sizeof(msg)) { ++ PMD_DRV_LOG(ERR, "Failed to send IOTLB batch end (%s)", ++ strerror(errno)); ++ return -1; ++ } ++ ++ return 0; ++} ++ + static int + vhost_vdpa_dma_map(struct virtio_user_dev *dev, void *addr, + uint64_t iova, size_t len) + { + struct vhost_msg msg = {}; + ++ if (!(dev->protocol_features & (1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2))) { ++ PMD_DRV_LOG(ERR, "IOTLB_MSG_V2 not supported by the backend."); ++ return -1; ++ } ++ + msg.type = VHOST_IOTLB_MSG_V2; + msg.iotlb.type = VHOST_IOTLB_UPDATE; + msg.iotlb.iova = iova; +@@ -108,6 +169,11 @@ vhost_vdpa_dma_unmap(struct virtio_user_dev *dev, __rte_unused void *addr, + { + struct vhost_msg msg = {}; + ++ if (!(dev->protocol_features & (1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2))) { ++ PMD_DRV_LOG(ERR, "IOTLB_MSG_V2 not supported by the backend."); ++ return -1; ++ } ++ + msg.type = VHOST_IOTLB_MSG_V2; + msg.iotlb.type = VHOST_IOTLB_INVALIDATE; + msg.iotlb.iova = iova; +@@ -122,6 +188,39 @@ vhost_vdpa_dma_unmap(struct virtio_user_dev *dev, __rte_unused void *addr, + return 0; + } + ++static int ++vhost_vdpa_dma_map_batch(struct virtio_user_dev *dev, void *addr, ++ uint64_t iova, size_t len) ++{ ++ int ret; ++ ++ if (vhost_vdpa_iotlb_batch_begin(dev) < 0) ++ return -1; ++ ++ ret = vhost_vdpa_dma_map(dev, addr, iova, len); ++ ++ if (vhost_vdpa_iotlb_batch_end(dev) < 0) ++ return -1; ++ ++ return ret; ++} ++ ++static int ++vhost_vdpa_dma_unmap_batch(struct virtio_user_dev *dev, void *addr, ++ uint64_t iova, size_t len) ++{ ++ int ret; ++ ++ if (vhost_vdpa_iotlb_batch_begin(dev) < 0) ++ return -1; ++ ++ ret = vhost_vdpa_dma_unmap(dev, addr, iova, len); ++ ++ if (vhost_vdpa_iotlb_batch_end(dev) < 0) ++ return -1; ++ ++ return ret; ++} + + static int + vhost_vdpa_map_contig(const struct rte_memseg_list *msl, +@@ -159,21 +258,32 @@ vhost_vdpa_map(const struct rte_memseg_list *msl, const struct rte_memseg *ms, + static int + vhost_vdpa_dma_map_all(struct virtio_user_dev *dev) + { ++ int ret; ++ ++ if (vhost_vdpa_iotlb_batch_begin(dev) < 0) ++ return -1; ++ + vhost_vdpa_dma_unmap(dev, NULL, 0, SIZE_MAX); + + if (rte_eal_iova_mode() == RTE_IOVA_VA) { + /* with IOVA as VA mode, we can get away with mapping contiguous + * chunks rather than going page-by-page. + */ +- int ret = rte_memseg_contig_walk_thread_unsafe( ++ ret = rte_memseg_contig_walk_thread_unsafe( + vhost_vdpa_map_contig, dev); + if (ret) +- return ret; ++ goto batch_end; + /* we have to continue the walk because we've skipped the + * external segments during the config walk. + */ + } +- return rte_memseg_walk_thread_unsafe(vhost_vdpa_map, dev); ++ ret = rte_memseg_walk_thread_unsafe(vhost_vdpa_map, dev); ++ ++batch_end: ++ if (vhost_vdpa_iotlb_batch_end(dev) < 0) ++ return -1; ++ ++ return ret; + } + + /* with below features, vhost vdpa does not need to do the checksum and TSO, +@@ -293,6 +403,6 @@ struct virtio_user_backend_ops virtio_ops_vdpa = { + .setup = vhost_vdpa_setup, + .send_request = vhost_vdpa_ioctl, + .enable_qp = vhost_vdpa_enable_queue_pair, +- .dma_map = vhost_vdpa_dma_map, +- .dma_unmap = vhost_vdpa_dma_unmap, ++ .dma_map = vhost_vdpa_dma_map_batch, ++ .dma_unmap = vhost_vdpa_dma_unmap_batch, + }; +diff --git a/dpdk/drivers/net/virtio/virtio_user/virtio_user_dev.c b/dpdk/drivers/net/virtio/virtio_user/virtio_user_dev.c +index 053f0267ca..202431ca22 100644 +--- a/dpdk/drivers/net/virtio/virtio_user/virtio_user_dev.c ++++ b/dpdk/drivers/net/virtio/virtio_user/virtio_user_dev.c +@@ -276,6 +276,7 @@ virtio_user_dev_init_notify(struct virtio_user_dev *dev) + } + kickfd = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK); + if (kickfd < 0) { ++ close(callfd); + PMD_DRV_LOG(ERR, "kickfd error, %s", strerror(errno)); + break; + } +@@ -284,7 +285,7 @@ virtio_user_dev_init_notify(struct virtio_user_dev *dev) + } + + if (i < VIRTIO_MAX_VIRTQUEUES) { +- for (j = 0; j <= i; ++j) { ++ for (j = 0; j < i; ++j) { + close(dev->callfds[j]); + close(dev->kickfds[j]); + } +@@ -439,11 +440,14 @@ virtio_user_dev_setup(struct virtio_user_dev *dev) + 1ULL << VIRTIO_F_RING_PACKED | \ + 1ULL << VHOST_USER_F_PROTOCOL_FEATURES) + +-#define VIRTIO_USER_SUPPORTED_PROTOCOL_FEATURES \ ++#define VHOST_USER_SUPPORTED_PROTOCOL_FEATURES \ + (1ULL << VHOST_USER_PROTOCOL_F_MQ | \ + 1ULL << VHOST_USER_PROTOCOL_F_REPLY_ACK | \ + 1ULL << VHOST_USER_PROTOCOL_F_STATUS) + ++#define VHOST_VDPA_SUPPORTED_PROTOCOL_FEATURES \ ++ (1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2 | \ ++ 1ULL << VHOST_BACKEND_F_IOTLB_BATCH) + int + virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues, + int cq, int queue_size, const char *mac, char **ifname, +@@ -462,9 +466,13 @@ virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues, + dev->mac_specified = 0; + dev->frontend_features = 0; + dev->unsupported_features = ~VIRTIO_USER_SUPPORTED_FEATURES; +- dev->protocol_features = VIRTIO_USER_SUPPORTED_PROTOCOL_FEATURES; + dev->backend_type = backend_type; + ++ if (dev->backend_type == VIRTIO_USER_BACKEND_VHOST_USER) ++ dev->protocol_features = VHOST_USER_SUPPORTED_PROTOCOL_FEATURES; ++ else if (dev->backend_type == VIRTIO_USER_BACKEND_VHOST_VDPA) ++ dev->protocol_features = VHOST_VDPA_SUPPORTED_PROTOCOL_FEATURES; ++ + parse_mac(dev, mac); + + if (*ifname) { +@@ -497,8 +505,8 @@ virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues, + } + + +- if (dev->device_features & +- (1ULL << VHOST_USER_F_PROTOCOL_FEATURES)) { ++ if ((dev->device_features & (1ULL << VHOST_USER_F_PROTOCOL_FEATURES)) || ++ (dev->backend_type == VIRTIO_USER_BACKEND_VHOST_VDPA)) { + if (dev->ops->send_request(dev, + VHOST_USER_GET_PROTOCOL_FEATURES, + &protocol_features)) +diff --git a/dpdk/drivers/net/virtio/virtio_user/virtio_user_dev.h b/dpdk/drivers/net/virtio/virtio_user/virtio_user_dev.h +index e053897d8f..3b5b6bc3ae 100644 +--- a/dpdk/drivers/net/virtio/virtio_user/virtio_user_dev.h ++++ b/dpdk/drivers/net/virtio/virtio_user/virtio_user_dev.h +@@ -48,9 +48,7 @@ struct virtio_user_dev { + uint64_t device_features; /* supported features by device */ + uint64_t frontend_features; /* enabled frontend features */ + uint64_t unsupported_features; /* unsupported features mask */ +- uint64_t protocol_features; /* negotiated protocol features +- * (Vhost-user only) +- */ ++ uint64_t protocol_features; /* negotiated protocol features */ + uint8_t status; + uint16_t net_status; + uint16_t port_id; +diff --git a/dpdk/drivers/net/virtio/virtio_user_ethdev.c b/dpdk/drivers/net/virtio/virtio_user_ethdev.c +index 40345193e6..78998427cc 100644 +--- a/dpdk/drivers/net/virtio/virtio_user_ethdev.c ++++ b/dpdk/drivers/net/virtio/virtio_user_ethdev.c +@@ -77,7 +77,7 @@ virtio_user_server_reconnect(struct virtio_user_dev *dev) + return -1; + + dev->vhostfd = connectfd; +- old_status = vtpci_get_status(hw); ++ old_status = dev->status; + + vtpci_reset(hw); + +diff --git a/dpdk/drivers/regex/mlx5/mlx5_regex_fastpath.c b/dpdk/drivers/regex/mlx5/mlx5_regex_fastpath.c +index 5857617282..8d134ac98e 100644 +--- a/dpdk/drivers/regex/mlx5/mlx5_regex_fastpath.c ++++ b/dpdk/drivers/regex/mlx5/mlx5_regex_fastpath.c +@@ -105,7 +105,21 @@ prep_one(struct mlx5_regex_priv *priv, struct mlx5_regex_qp *qp, + { + size_t wqe_offset = (sq->pi & (sq_size_get(sq) - 1)) * MLX5_SEND_WQE_BB; + uint32_t lkey; +- ++ uint16_t group0 = op->req_flags & RTE_REGEX_OPS_REQ_GROUP_ID0_VALID_F ? ++ op->group_id0 : 0; ++ uint16_t group1 = op->req_flags & RTE_REGEX_OPS_REQ_GROUP_ID1_VALID_F ? ++ op->group_id1 : 0; ++ uint16_t group2 = op->req_flags & RTE_REGEX_OPS_REQ_GROUP_ID2_VALID_F ? ++ op->group_id2 : 0; ++ uint16_t group3 = op->req_flags & RTE_REGEX_OPS_REQ_GROUP_ID3_VALID_F ? ++ op->group_id3 : 0; ++ ++ /* For backward compatibility. */ ++ if (!(op->req_flags & (RTE_REGEX_OPS_REQ_GROUP_ID0_VALID_F | ++ RTE_REGEX_OPS_REQ_GROUP_ID1_VALID_F | ++ RTE_REGEX_OPS_REQ_GROUP_ID2_VALID_F | ++ RTE_REGEX_OPS_REQ_GROUP_ID3_VALID_F))) ++ group0 = op->group_id0; + lkey = mlx5_mr_addr2mr_bh(priv->pd, 0, + &priv->mr_scache, &qp->mr_ctrl, + rte_pktmbuf_mtod(op->mbuf, uintptr_t), +@@ -116,9 +130,8 @@ prep_one(struct mlx5_regex_priv *priv, struct mlx5_regex_qp *qp, + set_wqe_ctrl_seg((struct mlx5_wqe_ctrl_seg *)wqe, sq->pi, + MLX5_OPCODE_MMO, MLX5_OPC_MOD_MMO_REGEX, sq->obj->id, + 0, ds, 0, 0); +- set_regex_ctrl_seg(wqe + 12, 0, op->group_id0, op->group_id1, +- op->group_id2, +- op->group_id3, 0); ++ set_regex_ctrl_seg(wqe + 12, 0, group0, group1, group2, group3, ++ 0); + struct mlx5_wqe_data_seg *input_seg = + (struct mlx5_wqe_data_seg *)(wqe + + MLX5_REGEX_WQE_GATHER_OFFSET); +diff --git a/dpdk/drivers/regex/mlx5/mlx5_rxp.c b/dpdk/drivers/regex/mlx5/mlx5_rxp.c +index fcbc766441..0753ab3bdc 100644 +--- a/dpdk/drivers/regex/mlx5/mlx5_rxp.c ++++ b/dpdk/drivers/regex/mlx5/mlx5_rxp.c +@@ -115,11 +115,10 @@ mlx5_regex_info_get(struct rte_regexdev *dev __rte_unused, + info->max_payload_size = MLX5_REGEX_MAX_PAYLOAD_SIZE; + info->max_rules_per_group = MLX5_REGEX_MAX_RULES_PER_GROUP; + info->max_groups = MLX5_REGEX_MAX_GROUPS; +- info->max_queue_pairs = 1; + info->regexdev_capa = RTE_REGEXDEV_SUPP_PCRE_GREEDY_F | + RTE_REGEXDEV_CAPA_QUEUE_PAIR_OOS_F; + info->rule_flags = 0; +- info->max_queue_pairs = 10; ++ info->max_queue_pairs = UINT16_MAX; + return 0; + } + +@@ -892,7 +891,7 @@ rxp_db_setup(struct mlx5_regex_priv *priv) + + /* Setup database memories for both RXP engines + reprogram memory. */ + for (i = 0; i < (priv->nb_engines + MLX5_RXP_EM_COUNT); i++) { +- priv->db[i].ptr = rte_malloc("", MLX5_MAX_DB_SIZE, 0); ++ priv->db[i].ptr = rte_malloc("", MLX5_MAX_DB_SIZE, 1 << 21); + if (!priv->db[i].ptr) { + DRV_LOG(ERR, "Failed to alloc db memory!"); + ret = ENODEV; +diff --git a/dpdk/drivers/regex/octeontx2/otx2_regexdev.c b/dpdk/drivers/regex/octeontx2/otx2_regexdev.c +index 39eed7a20d..b6e55853e9 100644 +--- a/dpdk/drivers/regex/octeontx2/otx2_regexdev.c ++++ b/dpdk/drivers/regex/octeontx2/otx2_regexdev.c +@@ -988,6 +988,9 @@ static struct rte_pci_id pci_id_ree_table[] = { + RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, + PCI_DEVID_OCTEONTX2_RVU_REE_PF) + }, ++ { ++ .vendor_id = 0, ++ } + }; + + static struct rte_pci_driver otx2_regexdev_pmd = { +diff --git a/dpdk/drivers/vdpa/mlx5/mlx5_vdpa.c b/dpdk/drivers/vdpa/mlx5/mlx5_vdpa.c +index b64f364eb7..0b2f1ab68e 100644 +--- a/dpdk/drivers/vdpa/mlx5/mlx5_vdpa.c ++++ b/dpdk/drivers/vdpa/mlx5/mlx5_vdpa.c +@@ -295,6 +295,8 @@ mlx5_vdpa_dev_close(int vid) + } + priv->configured = 0; + priv->vid = 0; ++ /* The mutex may stay locked after event thread cancel - initiate it. */ ++ pthread_mutex_init(&priv->vq_config_lock, NULL); + DRV_LOG(INFO, "vDPA device %d was closed.", vid); + return ret; + } +diff --git a/dpdk/examples/eventdev_pipeline/main.c b/dpdk/examples/eventdev_pipeline/main.c +index 823f8b51c2..3dbef6ed45 100644 +--- a/dpdk/examples/eventdev_pipeline/main.c ++++ b/dpdk/examples/eventdev_pipeline/main.c +@@ -22,6 +22,32 @@ struct config_data cdata = { + .worker_cq_depth = 16 + }; + ++static void ++dump_core_info(unsigned int lcore_id, struct worker_data *data, ++ unsigned int worker_idx) ++{ ++ if (fdata->rx_core[lcore_id]) ++ printf( ++ "[%s()] lcore %d executing NIC Rx\n", ++ __func__, lcore_id); ++ ++ if (fdata->tx_core[lcore_id]) ++ printf( ++ "[%s()] lcore %d executing NIC Tx\n", ++ __func__, lcore_id); ++ ++ if (fdata->sched_core[lcore_id]) ++ printf( ++ "[%s()] lcore %d executing scheduler\n", ++ __func__, lcore_id); ++ ++ if (fdata->worker_core[lcore_id]) ++ printf( ++ "[%s()] lcore %d executing worker, using eventdev port %u\n", ++ __func__, lcore_id, ++ data[worker_idx].port_id); ++} ++ + static bool + core_in_use(unsigned int lcore_id) { + return (fdata->rx_core[lcore_id] || fdata->sched_core[lcore_id] || +@@ -239,8 +265,15 @@ parse_app_args(int argc, char **argv) + + if (fdata->worker_core[i]) + cdata.num_workers++; +- if (core_in_use(i)) ++ if (core_in_use(i)) { ++ if (!rte_lcore_is_enabled(i)) { ++ printf("lcore %d is not enabled in lcore list\n", ++ i); ++ rte_exit(EXIT_FAILURE, ++ "check lcore params failed\n"); ++ } + cdata.active_cores++; ++ } + } + } + +@@ -280,7 +313,6 @@ static void + signal_handler(int signum) + { + static uint8_t once; +- uint16_t portid; + + if (fdata->done) + rte_exit(1, "Exiting on signal %d\n", signum); +@@ -291,17 +323,6 @@ signal_handler(int signum) + rte_event_dev_dump(0, stdout); + once = 1; + fdata->done = 1; +- rte_smp_wmb(); +- +- RTE_ETH_FOREACH_DEV(portid) { +- rte_event_eth_rx_adapter_stop(portid); +- rte_event_eth_tx_adapter_stop(portid); +- if (rte_eth_dev_stop(portid) < 0) +- printf("Failed to stop port %u", portid); +- } +- +- rte_eal_mp_wait_lcore(); +- + } + if (signum == SIGTSTP) + rte_event_dev_dump(0, stdout); +@@ -406,25 +427,7 @@ main(int argc, char **argv) + !fdata->sched_core[lcore_id]) + continue; + +- if (fdata->rx_core[lcore_id]) +- printf( +- "[%s()] lcore %d executing NIC Rx\n", +- __func__, lcore_id); +- +- if (fdata->tx_core[lcore_id]) +- printf( +- "[%s()] lcore %d executing NIC Tx\n", +- __func__, lcore_id); +- +- if (fdata->sched_core[lcore_id]) +- printf("[%s()] lcore %d executing scheduler\n", +- __func__, lcore_id); +- +- if (fdata->worker_core[lcore_id]) +- printf( +- "[%s()] lcore %d executing worker, using eventdev port %u\n", +- __func__, lcore_id, +- worker_data[worker_idx].port_id); ++ dump_core_info(lcore_id, worker_data, worker_idx); + + err = rte_eal_remote_launch(fdata->cap.worker, + &worker_data[worker_idx], lcore_id); +@@ -439,8 +442,13 @@ main(int argc, char **argv) + + lcore_id = rte_lcore_id(); + +- if (core_in_use(lcore_id)) +- fdata->cap.worker(&worker_data[worker_idx++]); ++ if (core_in_use(lcore_id)) { ++ dump_core_info(lcore_id, worker_data, worker_idx); ++ fdata->cap.worker(&worker_data[worker_idx]); ++ ++ if (fdata->worker_core[lcore_id]) ++ worker_idx++; ++ } + + rte_eal_mp_wait_lcore(); + +@@ -465,6 +473,10 @@ main(int argc, char **argv) + } + + RTE_ETH_FOREACH_DEV(portid) { ++ rte_event_eth_rx_adapter_stop(portid); ++ rte_event_eth_tx_adapter_stop(portid); ++ if (rte_eth_dev_stop(portid) < 0) ++ printf("Failed to stop port %u", portid); + rte_eth_dev_close(portid); + } + +diff --git a/dpdk/examples/l3fwd/main.c b/dpdk/examples/l3fwd/main.c +index d62dec434c..bb49e5faff 100644 +--- a/dpdk/examples/l3fwd/main.c ++++ b/dpdk/examples/l3fwd/main.c +@@ -48,7 +48,7 @@ + #include "l3fwd.h" + #include "l3fwd_event.h" + +-#define MAX_TX_QUEUE_PER_PORT RTE_MAX_ETHPORTS ++#define MAX_TX_QUEUE_PER_PORT RTE_MAX_LCORE + #define MAX_RX_QUEUE_PER_PORT 128 + + #define MAX_LCORE_PARAMS 1024 +diff --git a/dpdk/examples/meson.build b/dpdk/examples/meson.build +index 46ec80919e..b9ab24223f 100644 +--- a/dpdk/examples/meson.build ++++ b/dpdk/examples/meson.build +@@ -63,6 +63,10 @@ default_cflags = machine_args + if cc.has_argument('-Wno-format-truncation') + default_cflags += '-Wno-format-truncation' + endif ++default_ldflags = dpdk_extra_ldflags ++if get_option('default_library') == 'static' and not is_windows ++ default_ldflags += ['-Wl,--export-dynamic'] ++endif + + foreach example: examples + name = example.split('/')[-1] +@@ -70,6 +74,7 @@ foreach example: examples + sources = [] + allow_experimental_apis = false + cflags = default_cflags ++ ldflags = default_ldflags + + ext_deps = [execinfo] + includes = [include_directories(example)] +@@ -91,7 +96,7 @@ foreach example: examples + executable('dpdk-' + name, sources, + include_directories: includes, + link_whole: link_whole_libs, +- link_args: dpdk_extra_ldflags, ++ link_args: ldflags, + c_args: cflags, + dependencies: dep_objs) + elif not allow_skips +diff --git a/dpdk/examples/pipeline/cli.c b/dpdk/examples/pipeline/cli.c +index d0150cfcf6..e97e120606 100644 +--- a/dpdk/examples/pipeline/cli.c ++++ b/dpdk/examples/pipeline/cli.c +@@ -1294,7 +1294,7 @@ cli_process(char *in, char *out, size_t out_size, void *obj) + } + + if (strcmp(tokens[0], "link") == 0) { +- if (strcmp(tokens[1], "show") == 0) { ++ if ((n_tokens >= 2) && (strcmp(tokens[1], "show") == 0)) { + cmd_link_show(tokens, n_tokens, out, out_size, obj); + return; + } +diff --git a/dpdk/examples/pipeline/examples/vxlan_table.py b/dpdk/examples/pipeline/examples/vxlan_table.py +old mode 100644 +new mode 100755 +diff --git a/dpdk/examples/vm_power_manager/channel_manager.c b/dpdk/examples/vm_power_manager/channel_manager.c +index a26315051b..0a28cb643b 100644 +--- a/dpdk/examples/vm_power_manager/channel_manager.c ++++ b/dpdk/examples/vm_power_manager/channel_manager.c +@@ -27,7 +27,6 @@ + #include + + #include "channel_manager.h" +-#include "channel_commands.h" + #include "channel_monitor.h" + #include "power_manager.h" + +diff --git a/dpdk/examples/vm_power_manager/channel_monitor.c b/dpdk/examples/vm_power_manager/channel_monitor.c +index 228f06803d..99f81544d7 100644 +--- a/dpdk/examples/vm_power_manager/channel_monitor.c ++++ b/dpdk/examples/vm_power_manager/channel_monitor.c +@@ -35,7 +35,6 @@ + + #include + #include "channel_monitor.h" +-#include "channel_commands.h" + #include "channel_manager.h" + #include "power_manager.h" + #include "oob_monitor.h" +@@ -108,7 +107,7 @@ str_to_ether_addr(const char *a, struct rte_ether_addr *ether_addr) + } + + static int +-set_policy_mac(struct channel_packet *pkt, int idx, char *mac) ++set_policy_mac(struct rte_power_channel_packet *pkt, int idx, char *mac) + { + union PFID pfid; + int ret; +@@ -165,7 +164,7 @@ get_resource_id_from_vmname(const char *vm_name) + } + + static int +-parse_json_to_pkt(json_t *element, struct channel_packet *pkt, ++parse_json_to_pkt(json_t *element, struct rte_power_channel_packet *pkt, + const char *vm_name) + { + const char *key; +@@ -173,14 +172,14 @@ parse_json_to_pkt(json_t *element, struct channel_packet *pkt, + int ret; + int resource_id; + +- memset(pkt, 0, sizeof(struct channel_packet)); ++ memset(pkt, 0, sizeof(*pkt)); + + pkt->nb_mac_to_monitor = 0; + pkt->t_boost_status.tbEnabled = false; +- pkt->workload = LOW; +- pkt->policy_to_use = TIME; +- pkt->command = PKT_POLICY; +- pkt->core_type = CORE_TYPE_PHYSICAL; ++ pkt->workload = RTE_POWER_WL_LOW; ++ pkt->policy_to_use = RTE_POWER_POLICY_TIME; ++ pkt->command = RTE_POWER_PKT_POLICY; ++ pkt->core_type = RTE_POWER_CORE_TYPE_PHYSICAL; + + if (vm_name == NULL) { + RTE_LOG(ERR, CHANNEL_MONITOR, +@@ -203,11 +202,11 @@ parse_json_to_pkt(json_t *element, struct channel_packet *pkt, + char command[32]; + strlcpy(command, json_string_value(value), 32); + if (!strcmp(command, "power")) { +- pkt->command = CPU_POWER; ++ pkt->command = RTE_POWER_CPU_POWER; + } else if (!strcmp(command, "create")) { +- pkt->command = PKT_POLICY; ++ pkt->command = RTE_POWER_PKT_POLICY; + } else if (!strcmp(command, "destroy")) { +- pkt->command = PKT_POLICY_REMOVE; ++ pkt->command = RTE_POWER_PKT_POLICY_REMOVE; + } else { + RTE_LOG(ERR, CHANNEL_MONITOR, + "Invalid command received in JSON\n"); +@@ -217,13 +216,17 @@ parse_json_to_pkt(json_t *element, struct channel_packet *pkt, + char command[32]; + strlcpy(command, json_string_value(value), 32); + if (!strcmp(command, "TIME")) { +- pkt->policy_to_use = TIME; ++ pkt->policy_to_use = ++ RTE_POWER_POLICY_TIME; + } else if (!strcmp(command, "TRAFFIC")) { +- pkt->policy_to_use = TRAFFIC; ++ pkt->policy_to_use = ++ RTE_POWER_POLICY_TRAFFIC; + } else if (!strcmp(command, "WORKLOAD")) { +- pkt->policy_to_use = WORKLOAD; ++ pkt->policy_to_use = ++ RTE_POWER_POLICY_WORKLOAD; + } else if (!strcmp(command, "BRANCH_RATIO")) { +- pkt->policy_to_use = BRANCH_RATIO; ++ pkt->policy_to_use = ++ RTE_POWER_POLICY_BRANCH_RATIO; + } else { + RTE_LOG(ERR, CHANNEL_MONITOR, + "Wrong policy_type received in JSON\n"); +@@ -233,11 +236,11 @@ parse_json_to_pkt(json_t *element, struct channel_packet *pkt, + char command[32]; + strlcpy(command, json_string_value(value), 32); + if (!strcmp(command, "HIGH")) { +- pkt->workload = HIGH; ++ pkt->workload = RTE_POWER_WL_HIGH; + } else if (!strcmp(command, "MEDIUM")) { +- pkt->workload = MEDIUM; ++ pkt->workload = RTE_POWER_WL_MEDIUM; + } else if (!strcmp(command, "LOW")) { +- pkt->workload = LOW; ++ pkt->workload = RTE_POWER_WL_LOW; + } else { + RTE_LOG(ERR, CHANNEL_MONITOR, + "Wrong workload received in JSON\n"); +@@ -283,17 +286,17 @@ parse_json_to_pkt(json_t *element, struct channel_packet *pkt, + char unit[32]; + strlcpy(unit, json_string_value(value), 32); + if (!strcmp(unit, "SCALE_UP")) { +- pkt->unit = CPU_POWER_SCALE_UP; ++ pkt->unit = RTE_POWER_SCALE_UP; + } else if (!strcmp(unit, "SCALE_DOWN")) { +- pkt->unit = CPU_POWER_SCALE_DOWN; ++ pkt->unit = RTE_POWER_SCALE_DOWN; + } else if (!strcmp(unit, "SCALE_MAX")) { +- pkt->unit = CPU_POWER_SCALE_MAX; ++ pkt->unit = RTE_POWER_SCALE_MAX; + } else if (!strcmp(unit, "SCALE_MIN")) { +- pkt->unit = CPU_POWER_SCALE_MIN; ++ pkt->unit = RTE_POWER_SCALE_MIN; + } else if (!strcmp(unit, "ENABLE_TURBO")) { +- pkt->unit = CPU_POWER_ENABLE_TURBO; ++ pkt->unit = RTE_POWER_ENABLE_TURBO; + } else if (!strcmp(unit, "DISABLE_TURBO")) { +- pkt->unit = CPU_POWER_DISABLE_TURBO; ++ pkt->unit = RTE_POWER_DISABLE_TURBO; + } else { + RTE_LOG(ERR, CHANNEL_MONITOR, + "Invalid command received in JSON\n"); +@@ -312,7 +315,7 @@ parse_json_to_pkt(json_t *element, struct channel_packet *pkt, + vm_name); + return -1; + } +- strlcpy(pkt->vm_name, vm_name, VM_MAX_NAME_SZ); ++ strlcpy(pkt->vm_name, vm_name, RTE_POWER_VM_MAX_NAME_SZ); + pkt->resource_id = resource_id; + } + return 0; +@@ -367,7 +370,7 @@ pcpu_monitor(struct policy *pol, struct core_info *ci, int pcpu, int count) + { + int ret = 0; + +- if (pol->pkt.policy_to_use == BRANCH_RATIO) { ++ if (pol->pkt.policy_to_use == RTE_POWER_POLICY_BRANCH_RATIO) { + ci->cd[pcpu].oob_enabled = 1; + ret = add_core_to_monitor(pcpu); + if (ret == 0) +@@ -407,7 +410,7 @@ get_pcpu_to_control(struct policy *pol) + * differenciate between them when adding them to the branch monitor. + * Virtual cores need to be converted to physical cores. + */ +- if (pol->pkt.core_type == CORE_TYPE_VIRTUAL) { ++ if (pol->pkt.core_type == RTE_POWER_CORE_TYPE_VIRTUAL) { + /* + * If the cores in the policy are virtual, we need to map them + * to physical core. We look up the vm info and use that for +@@ -463,7 +466,7 @@ get_pfid(struct policy *pol) + } + + static int +-update_policy(struct channel_packet *pkt) ++update_policy(struct rte_power_channel_packet *pkt) + { + + unsigned int updated = 0; +@@ -479,7 +482,8 @@ update_policy(struct channel_packet *pkt) + policies[i].pkt = *pkt; + get_pcpu_to_control(&policies[i]); + /* Check Eth dev only for Traffic policy */ +- if (policies[i].pkt.policy_to_use == TRAFFIC) { ++ if (policies[i].pkt.policy_to_use == ++ RTE_POWER_POLICY_TRAFFIC) { + if (get_pfid(&policies[i]) < 0) { + updated = 1; + break; +@@ -496,7 +500,8 @@ update_policy(struct channel_packet *pkt) + policies[i].pkt = *pkt; + get_pcpu_to_control(&policies[i]); + /* Check Eth dev only for Traffic policy */ +- if (policies[i].pkt.policy_to_use == TRAFFIC) { ++ if (policies[i].pkt.policy_to_use == ++ RTE_POWER_POLICY_TRAFFIC) { + if (get_pfid(&policies[i]) < 0) { + updated = 1; + break; +@@ -512,7 +517,7 @@ update_policy(struct channel_packet *pkt) + } + + static int +-remove_policy(struct channel_packet *pkt __rte_unused) ++remove_policy(struct rte_power_channel_packet *pkt __rte_unused) + { + unsigned int i; + +@@ -615,7 +620,7 @@ apply_time_profile(struct policy *pol) + /* Format the date and time, down to a single second. */ + strftime(time_string, sizeof(time_string), "%Y-%m-%d %H:%M:%S", ptm); + +- for (x = 0; x < HOURS; x++) { ++ for (x = 0; x < RTE_POWER_HOURS_PER_DAY; x++) { + + if (ptm->tm_hour == pol->pkt.timer_policy.busy_hours[x]) { + for (count = 0; count < pol->pkt.num_vcpu; count++) { +@@ -648,19 +653,19 @@ apply_workload_profile(struct policy *pol) + + int count; + +- if (pol->pkt.workload == HIGH) { ++ if (pol->pkt.workload == RTE_POWER_WL_HIGH) { + for (count = 0; count < pol->pkt.num_vcpu; count++) { + if (pol->core_share[count].status != 1) + power_manager_scale_core_max( + pol->core_share[count].pcpu); + } +- } else if (pol->pkt.workload == MEDIUM) { ++ } else if (pol->pkt.workload == RTE_POWER_WL_MEDIUM) { + for (count = 0; count < pol->pkt.num_vcpu; count++) { + if (pol->core_share[count].status != 1) + power_manager_scale_core_med( + pol->core_share[count].pcpu); + } +- } else if (pol->pkt.workload == LOW) { ++ } else if (pol->pkt.workload == RTE_POWER_WL_LOW) { + for (count = 0; count < pol->pkt.num_vcpu; count++) { + if (pol->core_share[count].status != 1) + power_manager_scale_core_min( +@@ -673,14 +678,14 @@ static void + apply_policy(struct policy *pol) + { + +- struct channel_packet *pkt = &pol->pkt; ++ struct rte_power_channel_packet *pkt = &pol->pkt; + + /*Check policy to use*/ +- if (pkt->policy_to_use == TRAFFIC) ++ if (pkt->policy_to_use == RTE_POWER_POLICY_TRAFFIC) + apply_traffic_profile(pol); +- else if (pkt->policy_to_use == TIME) ++ else if (pkt->policy_to_use == RTE_POWER_POLICY_TIME) + apply_time_profile(pol); +- else if (pkt->policy_to_use == WORKLOAD) ++ else if (pkt->policy_to_use == RTE_POWER_POLICY_WORKLOAD) + apply_workload_profile(pol); + } + +@@ -715,24 +720,24 @@ write_binary_packet(void *buffer, + } + + static int +-send_freq(struct channel_packet *pkt, ++send_freq(struct rte_power_channel_packet *pkt, + struct channel_info *chan_info, + bool freq_list) + { + unsigned int vcore_id = pkt->resource_id; +- struct channel_packet_freq_list channel_pkt_freq_list; ++ struct rte_power_channel_packet_freq_list channel_pkt_freq_list; + struct vm_info info; + + if (get_info_vm(pkt->vm_name, &info) != 0) + return -1; + +- if (!freq_list && vcore_id >= MAX_VCPU_PER_VM) ++ if (!freq_list && vcore_id >= RTE_POWER_MAX_VCPU_PER_VM) + return -1; + + if (!info.allow_query) + return -1; + +- channel_pkt_freq_list.command = CPU_POWER_FREQ_LIST; ++ channel_pkt_freq_list.command = RTE_POWER_FREQ_LIST; + channel_pkt_freq_list.num_vcpu = info.num_vcpus; + + if (freq_list) { +@@ -751,12 +756,12 @@ send_freq(struct channel_packet *pkt, + } + + static int +-send_capabilities(struct channel_packet *pkt, ++send_capabilities(struct rte_power_channel_packet *pkt, + struct channel_info *chan_info, + bool list_requested) + { + unsigned int vcore_id = pkt->resource_id; +- struct channel_packet_caps_list channel_pkt_caps_list; ++ struct rte_power_channel_packet_caps_list channel_pkt_caps_list; + struct vm_info info; + struct rte_power_core_capabilities caps; + int ret; +@@ -764,13 +769,13 @@ send_capabilities(struct channel_packet *pkt, + if (get_info_vm(pkt->vm_name, &info) != 0) + return -1; + +- if (!list_requested && vcore_id >= MAX_VCPU_PER_VM) ++ if (!list_requested && vcore_id >= RTE_POWER_MAX_VCPU_PER_VM) + return -1; + + if (!info.allow_query) + return -1; + +- channel_pkt_caps_list.command = CPU_POWER_CAPS_LIST; ++ channel_pkt_caps_list.command = RTE_POWER_CAPS_LIST; + channel_pkt_caps_list.num_vcpu = info.num_vcpus; + + if (list_requested) { +@@ -805,18 +810,19 @@ send_capabilities(struct channel_packet *pkt, + } + + static int +-send_ack_for_received_cmd(struct channel_packet *pkt, ++send_ack_for_received_cmd(struct rte_power_channel_packet *pkt, + struct channel_info *chan_info, + uint32_t command) + { + pkt->command = command; + return write_binary_packet(pkt, +- sizeof(struct channel_packet), ++ sizeof(*pkt), + chan_info); + } + + static int +-process_request(struct channel_packet *pkt, struct channel_info *chan_info) ++process_request(struct rte_power_channel_packet *pkt, ++ struct channel_info *chan_info) + { + int ret; + +@@ -827,10 +833,10 @@ process_request(struct channel_packet *pkt, struct channel_info *chan_info) + CHANNEL_MGR_CHANNEL_PROCESSING) == 0) + return -1; + +- if (pkt->command == CPU_POWER) { ++ if (pkt->command == RTE_POWER_CPU_POWER) { + unsigned int core_num; + +- if (pkt->core_type == CORE_TYPE_VIRTUAL) ++ if (pkt->core_type == RTE_POWER_CORE_TYPE_VIRTUAL) + core_num = get_pcpu(chan_info, pkt->resource_id); + else + core_num = pkt->resource_id; +@@ -842,22 +848,22 @@ process_request(struct channel_packet *pkt, struct channel_info *chan_info) + bool valid_unit = true; + + switch (pkt->unit) { +- case(CPU_POWER_SCALE_MIN): ++ case(RTE_POWER_SCALE_MIN): + scale_res = power_manager_scale_core_min(core_num); + break; +- case(CPU_POWER_SCALE_MAX): ++ case(RTE_POWER_SCALE_MAX): + scale_res = power_manager_scale_core_max(core_num); + break; +- case(CPU_POWER_SCALE_DOWN): ++ case(RTE_POWER_SCALE_DOWN): + scale_res = power_manager_scale_core_down(core_num); + break; +- case(CPU_POWER_SCALE_UP): ++ case(RTE_POWER_SCALE_UP): + scale_res = power_manager_scale_core_up(core_num); + break; +- case(CPU_POWER_ENABLE_TURBO): ++ case(RTE_POWER_ENABLE_TURBO): + scale_res = power_manager_enable_turbo_core(core_num); + break; +- case(CPU_POWER_DISABLE_TURBO): ++ case(RTE_POWER_DISABLE_TURBO): + scale_res = power_manager_disable_turbo_core(core_num); + break; + default: +@@ -869,8 +875,8 @@ process_request(struct channel_packet *pkt, struct channel_info *chan_info) + ret = send_ack_for_received_cmd(pkt, + chan_info, + scale_res >= 0 ? +- CPU_POWER_CMD_ACK : +- CPU_POWER_CMD_NACK); ++ RTE_POWER_CMD_ACK : ++ RTE_POWER_CMD_NACK); + if (ret < 0) + RTE_LOG(ERR, CHANNEL_MONITOR, "Error during sending ack command.\n"); + } else +@@ -878,19 +884,19 @@ process_request(struct channel_packet *pkt, struct channel_info *chan_info) + + } + +- if (pkt->command == PKT_POLICY) { ++ if (pkt->command == RTE_POWER_PKT_POLICY) { + RTE_LOG(INFO, CHANNEL_MONITOR, "Processing policy request %s\n", + pkt->vm_name); + int ret = send_ack_for_received_cmd(pkt, + chan_info, +- CPU_POWER_CMD_ACK); ++ RTE_POWER_CMD_ACK); + if (ret < 0) + RTE_LOG(ERR, CHANNEL_MONITOR, "Error during sending ack command.\n"); + update_policy(pkt); + policy_is_set = 1; + } + +- if (pkt->command == PKT_POLICY_REMOVE) { ++ if (pkt->command == RTE_POWER_PKT_POLICY_REMOVE) { + ret = remove_policy(pkt); + if (ret == 0) + RTE_LOG(INFO, CHANNEL_MONITOR, +@@ -900,26 +906,26 @@ process_request(struct channel_packet *pkt, struct channel_info *chan_info) + "Policy %s does not exist\n", pkt->vm_name); + } + +- if (pkt->command == CPU_POWER_QUERY_FREQ_LIST || +- pkt->command == CPU_POWER_QUERY_FREQ) { ++ if (pkt->command == RTE_POWER_QUERY_FREQ_LIST || ++ pkt->command == RTE_POWER_QUERY_FREQ) { + + RTE_LOG(INFO, CHANNEL_MONITOR, + "Frequency for %s requested.\n", pkt->vm_name); + int ret = send_freq(pkt, + chan_info, +- pkt->command == CPU_POWER_QUERY_FREQ_LIST); ++ pkt->command == RTE_POWER_QUERY_FREQ_LIST); + if (ret < 0) + RTE_LOG(ERR, CHANNEL_MONITOR, "Error during frequency sending.\n"); + } + +- if (pkt->command == CPU_POWER_QUERY_CAPS_LIST || +- pkt->command == CPU_POWER_QUERY_CAPS) { ++ if (pkt->command == RTE_POWER_QUERY_CAPS_LIST || ++ pkt->command == RTE_POWER_QUERY_CAPS) { + + RTE_LOG(INFO, CHANNEL_MONITOR, + "Capabilities for %s requested.\n", pkt->vm_name); + int ret = send_capabilities(pkt, + chan_info, +- pkt->command == CPU_POWER_QUERY_CAPS_LIST); ++ pkt->command == RTE_POWER_QUERY_CAPS_LIST); + if (ret < 0) + RTE_LOG(ERR, CHANNEL_MONITOR, "Error during sending capabilities.\n"); + } +@@ -988,7 +994,7 @@ channel_monitor_init(void) + static void + read_binary_packet(struct channel_info *chan_info) + { +- struct channel_packet pkt; ++ struct rte_power_channel_packet pkt; + void *buffer = &pkt; + int buffer_len = sizeof(pkt); + int n_bytes, err = 0; +@@ -1019,7 +1025,7 @@ read_binary_packet(struct channel_info *chan_info) + static void + read_json_packet(struct channel_info *chan_info) + { +- struct channel_packet pkt; ++ struct rte_power_channel_packet pkt; + int n_bytes, ret; + json_t *root; + json_error_t error; +@@ -1063,7 +1069,7 @@ read_json_packet(struct channel_info *chan_info) + /* + * Because our data is now in the json + * object, we can overwrite the pkt +- * with a channel_packet struct, using ++ * with a rte_power_channel_packet struct, using + * parse_json_to_pkt() + */ + ret = parse_json_to_pkt(root, &pkt, resource_name); +diff --git a/dpdk/examples/vm_power_manager/channel_monitor.h b/dpdk/examples/vm_power_manager/channel_monitor.h +index 7362a80d26..2b38c554b5 100644 +--- a/dpdk/examples/vm_power_manager/channel_monitor.h ++++ b/dpdk/examples/vm_power_manager/channel_monitor.h +@@ -5,8 +5,9 @@ + #ifndef CHANNEL_MONITOR_H_ + #define CHANNEL_MONITOR_H_ + ++#include ++ + #include "channel_manager.h" +-#include "channel_commands.h" + + struct core_share { + unsigned int pcpu; +@@ -18,11 +19,11 @@ struct core_share { + }; + + struct policy { +- struct channel_packet pkt; +- uint32_t pfid[MAX_VFS]; +- uint32_t port[MAX_VFS]; ++ struct rte_power_channel_packet pkt; ++ uint32_t pfid[RTE_POWER_MAX_VFS]; ++ uint32_t port[RTE_POWER_MAX_VFS]; + unsigned int enabled; +- struct core_share core_share[MAX_VCPU_PER_VM]; ++ struct core_share core_share[RTE_POWER_MAX_VCPU_PER_VM]; + }; + + #ifdef __cplusplus +diff --git a/dpdk/examples/vm_power_manager/guest_cli/main.c b/dpdk/examples/vm_power_manager/guest_cli/main.c +index f63b3c988a..4e17f7fb90 100644 +--- a/dpdk/examples/vm_power_manager/guest_cli/main.c ++++ b/dpdk/examples/vm_power_manager/guest_cli/main.c +@@ -48,10 +48,10 @@ parse_args(int argc, char **argv) + { "policy", required_argument, 0, 'o'}, + {NULL, 0, 0, 0} + }; +- struct channel_packet *policy; ++ struct rte_power_channel_packet *policy; + unsigned short int hours[MAX_HOURS]; +- unsigned short int cores[MAX_VCPU_PER_VM]; +- unsigned short int ports[MAX_VCPU_PER_VM]; ++ unsigned short int cores[RTE_POWER_MAX_VCPU_PER_VM]; ++ unsigned short int ports[RTE_POWER_MAX_VCPU_PER_VM]; + int i, cnt, idx; + + policy = get_policy(); +@@ -69,7 +69,8 @@ parse_args(int argc, char **argv) + switch (opt) { + /* portmask */ + case 'n': +- strlcpy(policy->vm_name, optarg, VM_MAX_NAME_SZ); ++ strlcpy(policy->vm_name, optarg, ++ RTE_POWER_VM_MAX_NAME_SZ); + printf("Setting VM Name to [%s]\n", policy->vm_name); + break; + case 'b': +@@ -97,14 +98,15 @@ parse_args(int argc, char **argv) + } + break; + case 'l': +- cnt = parse_set(optarg, cores, MAX_VCPU_PER_VM); ++ cnt = parse_set(optarg, cores, ++ RTE_POWER_MAX_VCPU_PER_VM); + if (cnt < 0) { + printf("Invalid value passed to vcpu-list - [%s]\n", + optarg); + break; + } + idx = 0; +- for (i = 0; i < MAX_VCPU_PER_VM; i++) { ++ for (i = 0; i < RTE_POWER_MAX_VCPU_PER_VM; i++) { + if (cores[i]) { + printf("***Using core %d\n", i); + policy->vcpu_to_control[idx++] = i; +@@ -114,14 +116,15 @@ parse_args(int argc, char **argv) + printf("Total cores: %d\n", idx); + break; + case 'p': +- cnt = parse_set(optarg, ports, MAX_VCPU_PER_VM); ++ cnt = parse_set(optarg, ports, ++ RTE_POWER_MAX_VCPU_PER_VM); + if (cnt < 0) { + printf("Invalid value passed to port-list - [%s]\n", + optarg); + break; + } + idx = 0; +- for (i = 0; i < MAX_VCPU_PER_VM; i++) { ++ for (i = 0; i < RTE_POWER_MAX_VCPU_PER_VM; i++) { + if (ports[i]) { + printf("***Using port %d\n", i); + if (set_policy_mac(i, idx++) != 0) { +@@ -135,13 +138,17 @@ parse_args(int argc, char **argv) + break; + case 'o': + if (!strcmp(optarg, "TRAFFIC")) +- policy->policy_to_use = TRAFFIC; ++ policy->policy_to_use = ++ RTE_POWER_POLICY_TRAFFIC; + else if (!strcmp(optarg, "TIME")) +- policy->policy_to_use = TIME; ++ policy->policy_to_use = ++ RTE_POWER_POLICY_TIME; + else if (!strcmp(optarg, "WORKLOAD")) +- policy->policy_to_use = WORKLOAD; ++ policy->policy_to_use = ++ RTE_POWER_POLICY_WORKLOAD; + else if (!strcmp(optarg, "BRANCH_RATIO")) +- policy->policy_to_use = BRANCH_RATIO; ++ policy->policy_to_use = ++ RTE_POWER_POLICY_BRANCH_RATIO; + else { + printf("Invalid policy specified: %s\n", + optarg); +diff --git a/dpdk/examples/vm_power_manager/guest_cli/vm_power_cli_guest.c b/dpdk/examples/vm_power_manager/guest_cli/vm_power_cli_guest.c +index cf1636e784..0bf5774ffc 100644 +--- a/dpdk/examples/vm_power_manager/guest_cli/vm_power_cli_guest.c ++++ b/dpdk/examples/vm_power_manager/guest_cli/vm_power_cli_guest.c +@@ -19,7 +19,6 @@ + #include + + #include +-#include + + #include "vm_power_cli_guest.h" + +@@ -38,9 +37,9 @@ union PFID { + uint64_t pfid; + }; + +-static struct channel_packet policy; ++static struct rte_power_channel_packet policy; + +-struct channel_packet * ++struct rte_power_channel_packet * + get_policy(void) + { + return &policy; +@@ -49,7 +48,7 @@ get_policy(void) + int + set_policy_mac(int port, int idx) + { +- struct channel_packet *policy; ++ struct rte_power_channel_packet *policy; + union PFID pfid; + int ret; + +@@ -73,7 +72,7 @@ set_policy_mac(int port, int idx) + } + + int +-set_policy_defaults(struct channel_packet *pkt) ++set_policy_defaults(struct rte_power_channel_packet *pkt) + { + int ret; + +@@ -103,10 +102,10 @@ set_policy_defaults(struct channel_packet *pkt) + pkt->timer_policy.hours_to_use_traffic_profile[0] = 8; + pkt->timer_policy.hours_to_use_traffic_profile[1] = 10; + +- pkt->core_type = CORE_TYPE_VIRTUAL; +- pkt->workload = LOW; +- pkt->policy_to_use = TIME; +- pkt->command = PKT_POLICY; ++ pkt->core_type = RTE_POWER_CORE_TYPE_VIRTUAL; ++ pkt->workload = RTE_POWER_WL_LOW; ++ pkt->policy_to_use = RTE_POWER_POLICY_TIME; ++ pkt->command = RTE_POWER_PKT_POLICY; + strlcpy(pkt->vm_name, "ubuntu2", sizeof(pkt->vm_name)); + + return 0; +@@ -145,7 +144,7 @@ struct cmd_freq_list_result { + }; + + static int +-query_data(struct channel_packet *pkt, unsigned int lcore_id) ++query_data(struct rte_power_channel_packet *pkt, unsigned int lcore_id) + { + int ret; + ret = rte_power_guest_channel_send_msg(pkt, lcore_id); +@@ -157,19 +156,19 @@ query_data(struct channel_packet *pkt, unsigned int lcore_id) + } + + static int +-receive_freq_list(struct channel_packet_freq_list *pkt_freq_list, ++receive_freq_list(struct rte_power_channel_packet_freq_list *pkt_freq_list, + unsigned int lcore_id) + { + int ret; + + ret = rte_power_guest_channel_receive_msg(pkt_freq_list, +- sizeof(struct channel_packet_freq_list), ++ sizeof(*pkt_freq_list), + lcore_id); + if (ret < 0) { + RTE_LOG(ERR, GUEST_CLI, "Error receiving message.\n"); + return -1; + } +- if (pkt_freq_list->command != CPU_POWER_FREQ_LIST) { ++ if (pkt_freq_list->command != RTE_POWER_FREQ_LIST) { + RTE_LOG(ERR, GUEST_CLI, "Unexpected message received.\n"); + return -1; + } +@@ -183,14 +182,14 @@ cmd_query_freq_list_parsed(void *parsed_result, + { + struct cmd_freq_list_result *res = parsed_result; + unsigned int lcore_id; +- struct channel_packet_freq_list pkt_freq_list; +- struct channel_packet pkt; ++ struct rte_power_channel_packet_freq_list pkt_freq_list; ++ struct rte_power_channel_packet pkt; + bool query_list = false; + int ret; + char *ep; + +- memset(&pkt, 0, sizeof(struct channel_packet)); +- memset(&pkt_freq_list, 0, sizeof(struct channel_packet_freq_list)); ++ memset(&pkt, 0, sizeof(pkt)); ++ memset(&pkt_freq_list, 0, sizeof(pkt_freq_list)); + + if (!strcmp(res->cpu_num, "all")) { + +@@ -203,18 +202,18 @@ cmd_query_freq_list_parsed(void *parsed_result, + return; + } + +- pkt.command = CPU_POWER_QUERY_FREQ_LIST; ++ pkt.command = RTE_POWER_QUERY_FREQ_LIST; + strlcpy(pkt.vm_name, policy.vm_name, sizeof(pkt.vm_name)); + query_list = true; + } else { + errno = 0; + lcore_id = (unsigned int)strtol(res->cpu_num, &ep, 10); +- if (errno != 0 || lcore_id >= MAX_VCPU_PER_VM || ++ if (errno != 0 || lcore_id >= RTE_POWER_MAX_VCPU_PER_VM || + ep == res->cpu_num) { + cmdline_printf(cl, "Invalid parameter provided.\n"); + return; + } +- pkt.command = CPU_POWER_QUERY_FREQ; ++ pkt.command = RTE_POWER_QUERY_FREQ; + strlcpy(pkt.vm_name, policy.vm_name, sizeof(pkt.vm_name)); + pkt.resource_id = lcore_id; + } +@@ -267,19 +266,19 @@ struct cmd_query_caps_result { + }; + + static int +-receive_capabilities(struct channel_packet_caps_list *pkt_caps_list, ++receive_capabilities(struct rte_power_channel_packet_caps_list *pkt_caps_list, + unsigned int lcore_id) + { + int ret; + + ret = rte_power_guest_channel_receive_msg(pkt_caps_list, +- sizeof(struct channel_packet_caps_list), ++ sizeof(*pkt_caps_list), + lcore_id); + if (ret < 0) { + RTE_LOG(ERR, GUEST_CLI, "Error receiving message.\n"); + return -1; + } +- if (pkt_caps_list->command != CPU_POWER_CAPS_LIST) { ++ if (pkt_caps_list->command != RTE_POWER_CAPS_LIST) { + RTE_LOG(ERR, GUEST_CLI, "Unexpected message received.\n"); + return -1; + } +@@ -293,14 +292,14 @@ cmd_query_caps_list_parsed(void *parsed_result, + { + struct cmd_query_caps_result *res = parsed_result; + unsigned int lcore_id; +- struct channel_packet_caps_list pkt_caps_list; +- struct channel_packet pkt; ++ struct rte_power_channel_packet_caps_list pkt_caps_list; ++ struct rte_power_channel_packet pkt; + bool query_list = false; + int ret; + char *ep; + +- memset(&pkt, 0, sizeof(struct channel_packet)); +- memset(&pkt_caps_list, 0, sizeof(struct channel_packet_caps_list)); ++ memset(&pkt, 0, sizeof(pkt)); ++ memset(&pkt_caps_list, 0, sizeof(pkt_caps_list)); + + if (!strcmp(res->cpu_num, "all")) { + +@@ -313,18 +312,18 @@ cmd_query_caps_list_parsed(void *parsed_result, + return; + } + +- pkt.command = CPU_POWER_QUERY_CAPS_LIST; ++ pkt.command = RTE_POWER_QUERY_CAPS_LIST; + strlcpy(pkt.vm_name, policy.vm_name, sizeof(pkt.vm_name)); + query_list = true; + } else { + errno = 0; + lcore_id = (unsigned int)strtol(res->cpu_num, &ep, 10); +- if (errno != 0 || lcore_id >= MAX_VCPU_PER_VM || ++ if (errno != 0 || lcore_id >= RTE_POWER_MAX_VCPU_PER_VM || + ep == res->cpu_num) { + cmdline_printf(cl, "Invalid parameter provided.\n"); + return; + } +- pkt.command = CPU_POWER_QUERY_CAPS; ++ pkt.command = RTE_POWER_QUERY_CAPS; + strlcpy(pkt.vm_name, policy.vm_name, sizeof(pkt.vm_name)); + pkt.resource_id = lcore_id; + } +@@ -380,7 +379,7 @@ cmdline_parse_inst_t cmd_query_caps_list = { + static int + check_response_cmd(unsigned int lcore_id, int *result) + { +- struct channel_packet pkt; ++ struct rte_power_channel_packet pkt; + int ret; + + ret = rte_power_guest_channel_receive_msg(&pkt, sizeof pkt, lcore_id); +@@ -388,10 +387,10 @@ check_response_cmd(unsigned int lcore_id, int *result) + return -1; + + switch (pkt.command) { +- case(CPU_POWER_CMD_ACK): ++ case(RTE_POWER_CMD_ACK): + *result = 1; + break; +- case(CPU_POWER_CMD_NACK): ++ case(RTE_POWER_CMD_NACK): + *result = 0; + break; + default: +@@ -473,7 +472,7 @@ struct cmd_send_policy_result { + }; + + static inline int +-send_policy(struct channel_packet *pkt, struct cmdline *cl) ++send_policy(struct rte_power_channel_packet *pkt, struct cmdline *cl) + { + int ret; + +diff --git a/dpdk/examples/vm_power_manager/guest_cli/vm_power_cli_guest.h b/dpdk/examples/vm_power_manager/guest_cli/vm_power_cli_guest.h +index 6ad14a3dea..b578ec0723 100644 +--- a/dpdk/examples/vm_power_manager/guest_cli/vm_power_cli_guest.h ++++ b/dpdk/examples/vm_power_manager/guest_cli/vm_power_cli_guest.h +@@ -9,13 +9,11 @@ + extern "C" { + #endif + +-#include "channel_commands.h" +- +-struct channel_packet *get_policy(void); ++struct rte_power_channel_packet *get_policy(void); + + int set_policy_mac(int port, int idx); + +-int set_policy_defaults(struct channel_packet *pkt); ++int set_policy_defaults(struct rte_power_channel_packet *pkt); + + void run_cli(__rte_unused void *arg); + +diff --git a/dpdk/examples/vm_power_manager/main.c b/dpdk/examples/vm_power_manager/main.c +index 75d5b5364f..799d7b9bc3 100644 +--- a/dpdk/examples/vm_power_manager/main.c ++++ b/dpdk/examples/vm_power_manager/main.c +@@ -394,7 +394,7 @@ main(int argc, char **argv) + "Cannot init port %"PRIu8 "\n", + portid); + +- for (w = 0; w < MAX_VFS; w++) { ++ for (w = 0; w < RTE_POWER_MAX_VFS; w++) { + eth.addr_bytes[5] = w + 0xf0; + + ret = -ENOTSUP; +diff --git a/dpdk/examples/vm_power_manager/meson.build b/dpdk/examples/vm_power_manager/meson.build +index 1f813fbe87..637bd23235 100644 +--- a/dpdk/examples/vm_power_manager/meson.build ++++ b/dpdk/examples/vm_power_manager/meson.build +@@ -41,7 +41,7 @@ opt_dep = cc.find_library('virt', required : false) + build = opt_dep.found() + ext_deps += opt_dep + +-opt_dep = dependency('jansson', required : false) ++opt_dep = dependency('jansson', required : false, method: 'pkg-config') + if opt_dep.found() + ext_deps += opt_dep + cflags += '-DUSE_JANSSON' +diff --git a/dpdk/examples/vm_power_manager/vm_power_cli.c b/dpdk/examples/vm_power_manager/vm_power_cli.c +index ed0623a41d..1a55e553b9 100644 +--- a/dpdk/examples/vm_power_manager/vm_power_cli.c ++++ b/dpdk/examples/vm_power_manager/vm_power_cli.c +@@ -21,7 +21,6 @@ + #include "channel_manager.h" + #include "channel_monitor.h" + #include "power_manager.h" +-#include "channel_commands.h" + + struct cmd_quit_result { + cmdline_fixed_string_t quit; +diff --git a/dpdk/lib/librte_bitratestats/rte_bitrate.h b/dpdk/lib/librte_bitratestats/rte_bitrate.h +index 4865929e8f..fcd1564ddc 100644 +--- a/dpdk/lib/librte_bitratestats/rte_bitrate.h ++++ b/dpdk/lib/librte_bitratestats/rte_bitrate.h +@@ -7,6 +7,8 @@ + + #include + ++#include ++ + #ifdef __cplusplus + extern "C" { + #endif +diff --git a/dpdk/lib/librte_bpf/meson.build b/dpdk/lib/librte_bpf/meson.build +index 48460e9505..614277effd 100644 +--- a/dpdk/lib/librte_bpf/meson.build ++++ b/dpdk/lib/librte_bpf/meson.build +@@ -19,7 +19,7 @@ headers = files('bpf_def.h', + + deps += ['mbuf', 'net', 'ethdev'] + +-dep = dependency('libelf', required: false) ++dep = dependency('libelf', required: false, method: 'pkg-config') + if dep.found() + dpdk_conf.set('RTE_LIBRTE_BPF_ELF', 1) + sources += files('bpf_load_elf.c') +diff --git a/dpdk/lib/librte_compressdev/rte_compressdev_pmd.h b/dpdk/lib/librte_compressdev/rte_compressdev_pmd.h +index d5898a5b71..16b6bc6b35 100644 +--- a/dpdk/lib/librte_compressdev/rte_compressdev_pmd.h ++++ b/dpdk/lib/librte_compressdev/rte_compressdev_pmd.h +@@ -138,6 +138,8 @@ typedef void (*compressdev_stats_reset_t)(struct rte_compressdev *dev); + * + * @param dev + * Compress device ++ * @param dev_info ++ * Compress device information to populate + */ + typedef void (*compressdev_info_get_t)(struct rte_compressdev *dev, + struct rte_compressdev_info *dev_info); +diff --git a/dpdk/lib/librte_cryptodev/rte_cryptodev_pmd.h b/dpdk/lib/librte_cryptodev/rte_cryptodev_pmd.h +index 9a8a7e632b..1274436870 100644 +--- a/dpdk/lib/librte_cryptodev/rte_cryptodev_pmd.h ++++ b/dpdk/lib/librte_cryptodev/rte_cryptodev_pmd.h +@@ -121,7 +121,7 @@ extern struct rte_cryptodev *rte_cryptodevs; + * Function used to configure device. + * + * @param dev Crypto device pointer +- * config Crypto device configurations ++ * @param config Crypto device configurations + * + * @return Returns 0 on success + */ +@@ -176,7 +176,8 @@ typedef void (*cryptodev_stats_reset_t)(struct rte_cryptodev *dev); + /** + * Function used to get specific information of a device. + * +- * @param dev Crypto device pointer ++ * @param dev Crypto device pointer ++ * @param dev_info Pointer to infos structure to populate + */ + typedef void (*cryptodev_info_get_t)(struct rte_cryptodev *dev, + struct rte_cryptodev_info *dev_info); +@@ -213,7 +214,7 @@ typedef int (*cryptodev_queue_pair_release_t)(struct rte_cryptodev *dev, + * + * @param dev Crypto device pointer + * @param nb_objs number of sessions objects in mempool +- * @param obj_cache l-core object cache size, see *rte_ring_create* ++ * @param obj_cache_size l-core object cache size, see *rte_ring_create* + * @param socket_id Socket Id to allocate mempool on. + * + * @return +@@ -253,7 +254,7 @@ typedef unsigned int (*cryptodev_asym_get_session_private_size_t)( + * + * @param dev Crypto device pointer + * @param xform Single or chain of crypto xforms +- * @param priv_sess Pointer to cryptodev's private session structure ++ * @param session Pointer to cryptodev's private session structure + * @param mp Mempool where the private session is allocated + * + * @return +@@ -271,7 +272,7 @@ typedef int (*cryptodev_sym_configure_session_t)(struct rte_cryptodev *dev, + * + * @param dev Crypto device pointer + * @param xform Single or chain of crypto xforms +- * @param priv_sess Pointer to cryptodev's private session structure ++ * @param session Pointer to cryptodev's private session structure + * @param mp Mempool where the private session is allocated + * + * @return +@@ -333,7 +334,6 @@ typedef int (*cryptodev_sym_get_raw_dp_ctx_size_t)(struct rte_cryptodev *dev); + * + * @param dev Crypto device pointer. + * @param qp_id Crypto device queue pair index. +- * @param service_type Type of the service requested. + * @param ctx The raw data-path context data. + * @param sess_type session type. + * @param session_ctx Session context data. If NULL the driver +diff --git a/dpdk/lib/librte_eal/arm/include/rte_atomic_64.h b/dpdk/lib/librte_eal/arm/include/rte_atomic_64.h +index 467d32a455..fa6f334c0d 100644 +--- a/dpdk/lib/librte_eal/arm/include/rte_atomic_64.h ++++ b/dpdk/lib/librte_eal/arm/include/rte_atomic_64.h +@@ -53,15 +53,15 @@ rte_atomic_thread_fence(int memorder) + #endif + + #define __ATOMIC128_CAS_OP(cas_op_name, op_string) \ +-static __rte_noinline rte_int128_t \ +-cas_op_name(rte_int128_t *dst, rte_int128_t old, rte_int128_t updated) \ ++static __rte_noinline void \ ++cas_op_name(rte_int128_t *dst, rte_int128_t *old, rte_int128_t updated) \ + { \ + /* caspX instructions register pair must start from even-numbered + * register at operand 1. + * So, specify registers for local variables here. + */ \ +- register uint64_t x0 __asm("x0") = (uint64_t)old.val[0]; \ +- register uint64_t x1 __asm("x1") = (uint64_t)old.val[1]; \ ++ register uint64_t x0 __asm("x0") = (uint64_t)old->val[0]; \ ++ register uint64_t x1 __asm("x1") = (uint64_t)old->val[1]; \ + register uint64_t x2 __asm("x2") = (uint64_t)updated.val[0]; \ + register uint64_t x3 __asm("x3") = (uint64_t)updated.val[1]; \ + asm volatile( \ +@@ -73,9 +73,8 @@ cas_op_name(rte_int128_t *dst, rte_int128_t old, rte_int128_t updated) \ + [upd1] "r" (x3), \ + [dst] "r" (dst) \ + : "memory"); \ +- old.val[0] = x0; \ +- old.val[1] = x1; \ +- return old; \ ++ old->val[0] = x0; \ ++ old->val[1] = x1; \ + } + + __ATOMIC128_CAS_OP(__cas_128_relaxed, "casp") +@@ -113,13 +112,14 @@ rte_atomic128_cmp_exchange(rte_int128_t *dst, rte_int128_t *exp, + + #if defined(__ARM_FEATURE_ATOMICS) || defined(RTE_ARM_FEATURE_ATOMICS) + if (success == __ATOMIC_RELAXED) +- old = __cas_128_relaxed(dst, expected, desired); ++ __cas_128_relaxed(dst, exp, desired); + else if (success == __ATOMIC_ACQUIRE) +- old = __cas_128_acquire(dst, expected, desired); ++ __cas_128_acquire(dst, exp, desired); + else if (success == __ATOMIC_RELEASE) +- old = __cas_128_release(dst, expected, desired); ++ __cas_128_release(dst, exp, desired); + else +- old = __cas_128_acq_rel(dst, expected, desired); ++ __cas_128_acq_rel(dst, exp, desired); ++ old = *exp; + #else + #define __HAS_ACQ(mo) ((mo) != __ATOMIC_RELAXED && (mo) != __ATOMIC_RELEASE) + #define __HAS_RLS(mo) ((mo) == __ATOMIC_RELEASE || (mo) == __ATOMIC_ACQ_REL || \ +@@ -183,12 +183,12 @@ rte_atomic128_cmp_exchange(rte_int128_t *dst, rte_int128_t *exp, + #undef __STORE_128 + + } while (unlikely(ret)); +-#endif + +- /* Unconditionally updating expected removes an 'if' statement. +- * expected should already be in register if not in the cache. ++ /* Unconditionally updating the value of exp removes an 'if' statement. ++ * The value of exp should already be in register if not in the cache. + */ + *exp = old; ++#endif + + return (old.int128 == expected.int128); + } +diff --git a/dpdk/lib/librte_eal/common/eal_common_fbarray.c b/dpdk/lib/librte_eal/common/eal_common_fbarray.c +index 1220e2bae9..d974f3dab7 100644 +--- a/dpdk/lib/librte_eal/common/eal_common_fbarray.c ++++ b/dpdk/lib/librte_eal/common/eal_common_fbarray.c +@@ -110,7 +110,7 @@ overlap(const struct mem_area *ma, const void *start, size_t len) + if (start >= ma_start && start < ma_end) + return 1; + /* end overlap? */ +- if (end >= ma_start && end < ma_end) ++ if (end > ma_start && end < ma_end) + return 1; + return 0; + } +diff --git a/dpdk/lib/librte_eal/common/eal_common_options.c b/dpdk/lib/librte_eal/common/eal_common_options.c +index 424e8bcf87..622c7bc429 100644 +--- a/dpdk/lib/librte_eal/common/eal_common_options.c ++++ b/dpdk/lib/librte_eal/common/eal_common_options.c +@@ -494,6 +494,39 @@ eal_dlopen(const char *pathname) + return retval; + } + ++static int ++is_shared_build(void) ++{ ++#define EAL_SO "librte_eal.so" ++ char soname[32]; ++ size_t len, minlen = strlen(EAL_SO); ++ ++ len = strlcpy(soname, EAL_SO"."ABI_VERSION, sizeof(soname)); ++ if (len > sizeof(soname)) { ++ RTE_LOG(ERR, EAL, "Shared lib name too long in shared build check\n"); ++ len = sizeof(soname) - 1; ++ } ++ ++ while (len >= minlen) { ++ /* check if we have this .so loaded, if so - shared build */ ++ RTE_LOG(DEBUG, EAL, "Checking presence of .so '%s'\n", soname); ++ if (dlopen(soname, RTLD_LAZY | RTLD_NOLOAD) != NULL) { ++ RTE_LOG(INFO, EAL, "Detected shared linkage of DPDK\n"); ++ return 1; ++ } ++ ++ /* remove any version numbers off the end to retry */ ++ while (len-- > 0) ++ if (soname[len] == '.') { ++ soname[len] = '\0'; ++ break; ++ } ++ } ++ ++ RTE_LOG(INFO, EAL, "Detected static linkage of DPDK\n"); ++ return 0; ++} ++ + int + eal_plugins_init(void) + { +@@ -505,7 +538,7 @@ eal_plugins_init(void) + * (Using dlopen with NOLOAD flag on EAL, will return NULL if the EAL + * shared library is not already loaded i.e. it's statically linked.) + */ +- if (dlopen("librte_eal.so."ABI_VERSION, RTLD_LAZY | RTLD_NOLOAD) != NULL && ++ if (is_shared_build() && + *default_solib_dir != '\0' && + stat(default_solib_dir, &sb) == 0 && + S_ISDIR(sb.st_mode)) +diff --git a/dpdk/lib/librte_eal/freebsd/eal.c b/dpdk/lib/librte_eal/freebsd/eal.c +index d6ea023750..51478358c7 100644 +--- a/dpdk/lib/librte_eal/freebsd/eal.c ++++ b/dpdk/lib/librte_eal/freebsd/eal.c +@@ -906,7 +906,7 @@ rte_eal_init(int argc, char **argv) + ret = rte_service_init(); + if (ret) { + rte_eal_init_alert("rte_service_init() failed"); +- rte_errno = ENOEXEC; ++ rte_errno = -ret; + return -1; + } + +@@ -922,7 +922,7 @@ rte_eal_init(int argc, char **argv) + */ + ret = rte_service_start_with_defaults(); + if (ret < 0 && ret != -ENOTSUP) { +- rte_errno = ENOEXEC; ++ rte_errno = -ret; + return -1; + } + +diff --git a/dpdk/lib/librte_eal/include/generic/rte_mcslock.h b/dpdk/lib/librte_eal/include/generic/rte_mcslock.h +index d370bef17a..9f323bd2a2 100644 +--- a/dpdk/lib/librte_eal/include/generic/rte_mcslock.h ++++ b/dpdk/lib/librte_eal/include/generic/rte_mcslock.h +@@ -22,6 +22,7 @@ + #include + #include + #include ++#include + + /** + * The rte_mcslock_t type. +diff --git a/dpdk/lib/librte_eal/include/rte_compat.h b/dpdk/lib/librte_eal/include/rte_compat.h +index 4cd8f68d68..2718612cce 100644 +--- a/dpdk/lib/librte_eal/include/rte_compat.h ++++ b/dpdk/lib/librte_eal/include/rte_compat.h +@@ -19,12 +19,23 @@ __attribute__((section(".text.experimental"))) + + #endif + +-#ifndef ALLOW_INTERNAL_API ++#ifndef __has_attribute ++/* if no has_attribute assume no support for attribute too */ ++#define __has_attribute(x) 0 ++#endif ++ ++#if !defined ALLOW_INTERNAL_API && __has_attribute(error) /* For GCC */ + + #define __rte_internal \ + __attribute__((error("Symbol is not public ABI"), \ + section(".text.internal"))) + ++#elif !defined ALLOW_INTERNAL_API && __has_attribute(diagnose_if) /* For clang */ ++ ++#define __rte_internal \ ++__attribute__((diagnose_if(1, "Symbol is not public ABI", "error"), \ ++section(".text.internal"))) ++ + #else + + #define __rte_internal \ +diff --git a/dpdk/lib/librte_eal/include/rte_keepalive.h b/dpdk/lib/librte_eal/include/rte_keepalive.h +index 4bda7ca56f..bd25508da8 100644 +--- a/dpdk/lib/librte_eal/include/rte_keepalive.h ++++ b/dpdk/lib/librte_eal/include/rte_keepalive.h +@@ -52,7 +52,7 @@ typedef void (*rte_keepalive_failure_callback_t)( + * @param data Data pointer passed to rte_keepalive_register_relay_callback() + * @param id_core ID of the core for which state is being reported + * @param core_state The current state of the core +- * @param Timestamp of when core was last seen alive ++ * @param last_seen Timestamp of when core was last seen alive + */ + typedef void (*rte_keepalive_relay_callback_t)( + void *data, +diff --git a/dpdk/lib/librte_eal/include/rte_reciprocal.h b/dpdk/lib/librte_eal/include/rte_reciprocal.h +index 63e16fde0a..735adb029b 100644 +--- a/dpdk/lib/librte_eal/include/rte_reciprocal.h ++++ b/dpdk/lib/librte_eal/include/rte_reciprocal.h +@@ -27,6 +27,8 @@ + + #include + ++#include ++ + struct rte_reciprocal { + uint32_t m; + uint8_t sh1, sh2; +diff --git a/dpdk/lib/librte_eal/linux/eal.c b/dpdk/lib/librte_eal/linux/eal.c +index a4161be630..32b48c3de9 100644 +--- a/dpdk/lib/librte_eal/linux/eal.c ++++ b/dpdk/lib/librte_eal/linux/eal.c +@@ -1273,7 +1273,7 @@ rte_eal_init(int argc, char **argv) + ret = rte_service_init(); + if (ret) { + rte_eal_init_alert("rte_service_init() failed"); +- rte_errno = ENOEXEC; ++ rte_errno = -ret; + return -1; + } + +@@ -1295,7 +1295,7 @@ rte_eal_init(int argc, char **argv) + */ + ret = rte_service_start_with_defaults(); + if (ret < 0 && ret != -ENOTSUP) { +- rte_errno = ENOEXEC; ++ rte_errno = -ret; + return -1; + } + +diff --git a/dpdk/lib/librte_eal/linux/eal_interrupts.c b/dpdk/lib/librte_eal/linux/eal_interrupts.c +index 2f03a61254..1dd994bd1f 100644 +--- a/dpdk/lib/librte_eal/linux/eal_interrupts.c ++++ b/dpdk/lib/librte_eal/linux/eal_interrupts.c +@@ -1241,7 +1241,7 @@ eal_epoll_process_event(struct epoll_event *evs, unsigned int n, + events[count].status = RTE_EPOLL_VALID; + events[count].fd = rev->fd; + events[count].epfd = rev->epfd; +- events[count].epdata.event = rev->epdata.event; ++ events[count].epdata.event = evs[i].events; + events[count].epdata.data = rev->epdata.data; + if (rev->epdata.cb_fun) + rev->epdata.cb_fun(rev->fd, +diff --git a/dpdk/lib/librte_eal/windows/eal.c b/dpdk/lib/librte_eal/windows/eal.c +index 105549de1b..1e5f6576f0 100644 +--- a/dpdk/lib/librte_eal/windows/eal.c ++++ b/dpdk/lib/librte_eal/windows/eal.c +@@ -264,6 +264,7 @@ rte_eal_init(int argc, char **argv) + const struct rte_config *config = rte_eal_get_configuration(); + struct internal_config *internal_conf = + eal_get_internal_configuration(); ++ int ret; + + rte_eal_log_init(NULL, 0); + +@@ -387,9 +388,10 @@ rte_eal_init(int argc, char **argv) + } + + /* Initialize services so drivers can register services during probe. */ +- if (rte_service_init()) { ++ ret = rte_service_init(); ++ if (ret) { + rte_eal_init_alert("rte_service_init() failed"); +- rte_errno = ENOEXEC; ++ rte_errno = -ret; + return -1; + } + +diff --git a/dpdk/lib/librte_eal/windows/eal_lcore.c b/dpdk/lib/librte_eal/windows/eal_lcore.c +index d5ff721e03..a85149be95 100644 +--- a/dpdk/lib/librte_eal/windows/eal_lcore.c ++++ b/dpdk/lib/librte_eal/windows/eal_lcore.c +@@ -38,6 +38,7 @@ static struct cpu_map cpu_map = { 0 }; + + /* eal_create_cpu_map() is called before logging is initialized */ + static void ++__rte_format_printf(1, 2) + log_early(const char *format, ...) + { + va_list va; +diff --git a/dpdk/lib/librte_eal/windows/eal_memory.c b/dpdk/lib/librte_eal/windows/eal_memory.c +index 7f8d3c2fa4..2cf5a5e649 100644 +--- a/dpdk/lib/librte_eal/windows/eal_memory.c ++++ b/dpdk/lib/librte_eal/windows/eal_memory.c +@@ -18,13 +18,12 @@ + #include + + /* MinGW-w64 headers lack VirtualAlloc2() in some distributions. +- * Provide a copy of definitions and code to load it dynamically. + * Note: definitions are copied verbatim from Microsoft documentation + * and don't follow DPDK code style. +- * +- * MEM_RESERVE_PLACEHOLDER being defined means VirtualAlloc2() is present too. + */ +-#ifndef MEM_PRESERVE_PLACEHOLDER ++#ifndef MEM_EXTENDED_PARAMETER_TYPE_BITS ++ ++#define MEM_EXTENDED_PARAMETER_TYPE_BITS 4 + + /* https://docs.microsoft.com/en-us/windows/win32/api/winnt/ne-winnt-mem_extended_parameter_type */ + typedef enum MEM_EXTENDED_PARAMETER_TYPE { +@@ -37,8 +36,6 @@ typedef enum MEM_EXTENDED_PARAMETER_TYPE { + MemExtendedParameterMax + } *PMEM_EXTENDED_PARAMETER_TYPE; + +-#define MEM_EXTENDED_PARAMETER_TYPE_BITS 4 +- + /* https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-mem_extended_parameter */ + typedef struct MEM_EXTENDED_PARAMETER { + struct { +@@ -54,6 +51,8 @@ typedef struct MEM_EXTENDED_PARAMETER { + } DUMMYUNIONNAME; + } MEM_EXTENDED_PARAMETER, *PMEM_EXTENDED_PARAMETER; + ++#endif /* defined(MEM_EXTENDED_PARAMETER_TYPE_BITS) */ ++ + /* https://docs.microsoft.com/en-us/windows/win32/api/memoryapi/nf-memoryapi-virtualalloc2 */ + typedef PVOID (*VirtualAlloc2_type)( + HANDLE Process, +@@ -65,17 +64,19 @@ typedef PVOID (*VirtualAlloc2_type)( + ULONG ParameterCount + ); + +-/* VirtualAlloc2() flags. */ ++/* MinGW-w64 distributions, even those that declare VirtualAlloc2(), ++ * lack it in import libraries, which results in a failure at link time. ++ * Link it dynamically in such case. ++ */ ++static VirtualAlloc2_type VirtualAlloc2_ptr; ++ ++#ifdef RTE_TOOLCHAIN_GCC ++ + #define MEM_COALESCE_PLACEHOLDERS 0x00000001 + #define MEM_PRESERVE_PLACEHOLDER 0x00000002 + #define MEM_REPLACE_PLACEHOLDER 0x00004000 + #define MEM_RESERVE_PLACEHOLDER 0x00040000 + +-/* Named exactly as the function, so that user code does not depend +- * on it being found at compile time or dynamically. +- */ +-static VirtualAlloc2_type VirtualAlloc2; +- + int + eal_mem_win32api_init(void) + { +@@ -89,7 +90,7 @@ eal_mem_win32api_init(void) + int ret = 0; + + /* Already done. */ +- if (VirtualAlloc2 != NULL) ++ if (VirtualAlloc2_ptr != NULL) + return 0; + + library = LoadLibraryA(library_name); +@@ -98,9 +99,9 @@ eal_mem_win32api_init(void) + return -1; + } + +- VirtualAlloc2 = (VirtualAlloc2_type)( ++ VirtualAlloc2_ptr = (VirtualAlloc2_type)( + (void *)GetProcAddress(library, function)); +- if (VirtualAlloc2 == NULL) { ++ if (VirtualAlloc2_ptr == NULL) { + RTE_LOG_WIN32_ERR("GetProcAddress(\"%s\", \"%s\")\n", + library_name, function); + +@@ -117,14 +118,15 @@ eal_mem_win32api_init(void) + + #else + +-/* Stub in case VirtualAlloc2() is provided by the compiler. */ ++/* Stub in case VirtualAlloc2() is provided by the toolchain. */ + int + eal_mem_win32api_init(void) + { ++ VirtualAlloc2_ptr = VirtualAlloc2; + return 0; + } + +-#endif /* defined(MEM_RESERVE_PLACEHOLDER) */ ++#endif /* defined(RTE_TOOLCHAIN_GCC) */ + + static HANDLE virt2phys_device = INVALID_HANDLE_VALUE; + +@@ -278,7 +280,7 @@ eal_mem_reserve(void *requested_addr, size_t size, int flags) + + process = GetCurrentProcess(); + +- virt = VirtualAlloc2(process, requested_addr, size, ++ virt = VirtualAlloc2_ptr(process, requested_addr, size, + MEM_RESERVE | MEM_RESERVE_PLACEHOLDER, PAGE_NOACCESS, + NULL, 0); + if (virt == NULL) { +@@ -364,7 +366,7 @@ eal_mem_commit(void *requested_addr, size_t size, int socket_id) + } + + flags = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES; +- addr = VirtualAlloc2(process, requested_addr, size, ++ addr = VirtualAlloc2_ptr(process, requested_addr, size, + flags, PAGE_READWRITE, ¶m, param_count); + if (addr == NULL) { + /* Logging may overwrite GetLastError() result. */ +@@ -406,7 +408,7 @@ eal_mem_decommit(void *addr, size_t size) + } + + flags = MEM_RESERVE | MEM_RESERVE_PLACEHOLDER; +- stub = VirtualAlloc2( ++ stub = VirtualAlloc2_ptr( + process, addr, size, flags, PAGE_NOACCESS, NULL, 0); + if (stub == NULL) { + /* We lost the race for the VA. */ +diff --git a/dpdk/lib/librte_eal/windows/include/rte_os.h b/dpdk/lib/librte_eal/windows/include/rte_os.h +index 569ed92d51..7ef38ff06c 100644 +--- a/dpdk/lib/librte_eal/windows/include/rte_os.h ++++ b/dpdk/lib/librte_eal/windows/include/rte_os.h +@@ -25,22 +25,42 @@ extern "C" { + #define PATH_MAX _MAX_PATH + #endif + ++#ifndef sleep + #define sleep(x) Sleep(1000 * (x)) ++#endif + ++#ifndef strerror_r + #define strerror_r(a, b, c) strerror_s(b, c, a) ++#endif + ++#ifndef strdup + /* strdup is deprecated in Microsoft libc and _strdup is preferred */ + #define strdup(str) _strdup(str) ++#endif + ++#ifndef strtok_r + #define strtok_r(str, delim, saveptr) strtok_s(str, delim, saveptr) ++#endif + ++#ifndef index + #define index(a, b) strchr(a, b) ++#endif ++ ++#ifndef rindex + #define rindex(a, b) strrchr(a, b) ++#endif + ++#ifndef strncasecmp + #define strncasecmp(s1, s2, count) _strnicmp(s1, s2, count) ++#endif + ++#ifndef close + #define close _close ++#endif ++ ++#ifndef unlink + #define unlink _unlink ++#endif + + /* cpu_set macros implementation */ + #define RTE_CPU_AND(dst, src1, src2) CPU_AND(dst, src1, src2) +@@ -66,7 +86,7 @@ asprintf(char **buffer, const char *format, ...) + return -1; + size++; + +- *buffer = malloc(size); ++ *buffer = (char *)malloc(size); + if (*buffer == NULL) + return -1; + +@@ -89,7 +109,9 @@ eal_strerror(int code) + return buffer; + } + ++#ifndef strerror + #define strerror eal_strerror ++#endif + + #endif /* RTE_TOOLCHAIN_GCC */ + +diff --git a/dpdk/lib/librte_ethdev/rte_eth_ctrl.h b/dpdk/lib/librte_ethdev/rte_eth_ctrl.h +index 1cca522fa3..8a50dbfef9 100644 +--- a/dpdk/lib/librte_ethdev/rte_eth_ctrl.h ++++ b/dpdk/lib/librte_ethdev/rte_eth_ctrl.h +@@ -9,6 +9,7 @@ + #include + #include + #include "rte_flow.h" ++#include "rte_ethdev.h" + + /** + * @deprecated Please use rte_flow API instead of this legacy one. +diff --git a/dpdk/lib/librte_ethdev/rte_ethdev.c b/dpdk/lib/librte_ethdev/rte_ethdev.c +index 17ddacc78d..ecd46ac01f 100644 +--- a/dpdk/lib/librte_ethdev/rte_ethdev.c ++++ b/dpdk/lib/librte_ethdev/rte_ethdev.c +@@ -1292,8 +1292,10 @@ rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q, + struct rte_eth_dev *dev; + struct rte_eth_dev_info dev_info; + struct rte_eth_conf orig_conf; ++ uint16_t overhead_len; + int diag; + int ret; ++ uint16_t old_mtu; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); + +@@ -1319,10 +1321,20 @@ rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q, + memcpy(&dev->data->dev_conf, dev_conf, + sizeof(dev->data->dev_conf)); + ++ /* Backup mtu for rollback */ ++ old_mtu = dev->data->mtu; ++ + ret = rte_eth_dev_info_get(port_id, &dev_info); + if (ret != 0) + goto rollback; + ++ /* Get the real Ethernet overhead length */ ++ if (dev_info.max_mtu != UINT16_MAX && ++ dev_info.max_rx_pktlen > dev_info.max_mtu) ++ overhead_len = dev_info.max_rx_pktlen - dev_info.max_mtu; ++ else ++ overhead_len = RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN; ++ + /* If number of queues specified by application for both Rx and Tx is + * zero, use driver preferred values. This cannot be done individually + * as it is valid for either Tx or Rx (but not both) to be zero. +@@ -1409,12 +1421,17 @@ rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q, + ret = -EINVAL; + goto rollback; + } ++ ++ /* Scale the MTU size to adapt max_rx_pkt_len */ ++ dev->data->mtu = dev->data->dev_conf.rxmode.max_rx_pkt_len - ++ overhead_len; + } else { +- if (dev_conf->rxmode.max_rx_pkt_len < RTE_ETHER_MIN_LEN || +- dev_conf->rxmode.max_rx_pkt_len > RTE_ETHER_MAX_LEN) ++ uint16_t pktlen = dev_conf->rxmode.max_rx_pkt_len; ++ if (pktlen < RTE_ETHER_MIN_MTU + overhead_len || ++ pktlen > RTE_ETHER_MTU + overhead_len) + /* Use default value */ + dev->data->dev_conf.rxmode.max_rx_pkt_len = +- RTE_ETHER_MAX_LEN; ++ RTE_ETHER_MTU + overhead_len; + } + + /* +@@ -1549,6 +1566,8 @@ rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q, + eth_dev_tx_queue_config(dev, 0); + rollback: + memcpy(&dev->data->dev_conf, &orig_conf, sizeof(dev->data->dev_conf)); ++ if (old_mtu != dev->data->mtu) ++ dev->data->mtu = old_mtu; + + rte_ethdev_trace_configure(port_id, nb_rx_q, nb_tx_q, dev_conf, ret); + return ret; +@@ -1801,7 +1820,7 @@ rte_eth_dev_close(uint16_t port_id) + rte_ethdev_trace_close(port_id); + *lasterr = rte_eth_dev_release_port(dev); + +- return eth_err(port_id, firsterr); ++ return firsterr; + } + + int +@@ -5692,7 +5711,7 @@ eth_dev_handle_port_link_status(const char *cmd __rte_unused, + if (!rte_eth_dev_is_valid_port(port_id)) + return -1; + +- ret = rte_eth_link_get(port_id, &link); ++ ret = rte_eth_link_get_nowait(port_id, &link); + if (ret < 0) + return -1; + +diff --git a/dpdk/lib/librte_eventdev/rte_eventdev_pmd.h b/dpdk/lib/librte_eventdev/rte_eventdev_pmd.h +index 27be376ed1..7eb9a77393 100644 +--- a/dpdk/lib/librte_eventdev/rte_eventdev_pmd.h ++++ b/dpdk/lib/librte_eventdev/rte_eventdev_pmd.h +@@ -158,9 +158,6 @@ rte_event_pmd_is_valid_dev(uint8_t dev_id) + * Event device pointer + * @param dev_info + * Event device information structure +- * +- * @return +- * Returns 0 on success + */ + typedef void (*eventdev_info_get_t)(struct rte_eventdev *dev, + struct rte_event_dev_info *dev_info); +@@ -297,7 +294,7 @@ typedef void (*eventdev_port_release_t)(void *port); + * Event device pointer + * @param port + * Event port pointer +- * @param link ++ * @param queues + * Points to an array of *nb_links* event queues to be linked + * to the event port. + * @param priorities +@@ -383,6 +380,10 @@ typedef void (*eventdev_dump_t)(struct rte_eventdev *dev, FILE *f); + * + * @param dev + * Event device pointer ++ * @param mode ++ * Level (device, port or queue) ++ * @param queue_port_id ++ * Queue or port number depending on mode + * @param ids + * The stat ids to retrieve + * @param values +@@ -410,8 +411,14 @@ typedef int (*eventdev_xstats_reset_t)(struct rte_eventdev *dev, + * + * @param dev + * Event device pointer ++ * @param mode ++ * Level (device, port or queue) ++ * @param queue_port_id ++ * Queue or port number depending on mode + * @param xstats_names + * Array of name values to be filled in ++ * @param ids ++ * The stat ids to retrieve + * @param size + * Number of values in the xstats_names array + * @return +diff --git a/dpdk/lib/librte_fib/rte_fib.h b/dpdk/lib/librte_fib/rte_fib.h +index fef0749525..acad20963c 100644 +--- a/dpdk/lib/librte_fib/rte_fib.h ++++ b/dpdk/lib/librte_fib/rte_fib.h +@@ -19,6 +19,8 @@ + * for IPv4 Longest Prefix Match + */ + ++#include ++ + #include + + #ifdef __cplusplus +diff --git a/dpdk/lib/librte_fib/rte_fib6.h b/dpdk/lib/librte_fib/rte_fib6.h +index 668bffb2ba..0e193b8e7b 100644 +--- a/dpdk/lib/librte_fib/rte_fib6.h ++++ b/dpdk/lib/librte_fib/rte_fib6.h +@@ -19,6 +19,8 @@ + * for IPv6 Longest Prefix Match + */ + ++#include ++ + #include + + #ifdef __cplusplus +diff --git a/dpdk/lib/librte_ip_frag/rte_ipv4_reassembly.c b/dpdk/lib/librte_ip_frag/rte_ipv4_reassembly.c +index 1dda8aca02..69666c8b82 100644 +--- a/dpdk/lib/librte_ip_frag/rte_ipv4_reassembly.c ++++ b/dpdk/lib/librte_ip_frag/rte_ipv4_reassembly.c +@@ -104,6 +104,7 @@ rte_ipv4_frag_reassemble_packet(struct rte_ip_frag_tbl *tbl, + const unaligned_uint64_t *psd; + uint16_t flag_offset, ip_ofs, ip_flag; + int32_t ip_len; ++ int32_t trim; + + flag_offset = rte_be_to_cpu_16(ip_hdr->fragment_offset); + ip_ofs = (uint16_t)(flag_offset & RTE_IPV4_HDR_OFFSET_MASK); +@@ -117,14 +118,15 @@ rte_ipv4_frag_reassemble_packet(struct rte_ip_frag_tbl *tbl, + + ip_ofs *= RTE_IPV4_HDR_OFFSET_UNITS; + ip_len = rte_be_to_cpu_16(ip_hdr->total_length) - mb->l3_len; ++ trim = mb->pkt_len - (ip_len + mb->l3_len + mb->l2_len); + + IP_FRAG_LOG(DEBUG, "%s:%d:\n" +- "mbuf: %p, tms: %" PRIu64 +- ", key: <%" PRIx64 ", %#x>, ofs: %u, len: %d, flags: %#x\n" ++ "mbuf: %p, tms: %" PRIu64 ", key: <%" PRIx64 ", %#x>" ++ "ofs: %u, len: %d, padding: %d, flags: %#x\n" + "tbl: %p, max_cycles: %" PRIu64 ", entry_mask: %#x, " + "max_entries: %u, use_entries: %u\n\n", + __func__, __LINE__, +- mb, tms, key.src_dst[0], key.id, ip_ofs, ip_len, ip_flag, ++ mb, tms, key.src_dst[0], key.id, ip_ofs, ip_len, trim, ip_flag, + tbl, tbl->max_cycles, tbl->entry_mask, tbl->max_entries, + tbl->use_entries); + +@@ -134,6 +136,9 @@ rte_ipv4_frag_reassemble_packet(struct rte_ip_frag_tbl *tbl, + return NULL; + } + ++ if (unlikely(trim > 0)) ++ rte_pktmbuf_trim(mb, trim); ++ + /* try to find/add entry into the fragment's table. */ + if ((fp = ip_frag_find(tbl, dr, &key, tms)) == NULL) { + IP_FRAG_MBUF2DR(dr, mb); +diff --git a/dpdk/lib/librte_ip_frag/rte_ipv6_reassembly.c b/dpdk/lib/librte_ip_frag/rte_ipv6_reassembly.c +index ad01055184..6bc0bf792a 100644 +--- a/dpdk/lib/librte_ip_frag/rte_ipv6_reassembly.c ++++ b/dpdk/lib/librte_ip_frag/rte_ipv6_reassembly.c +@@ -142,6 +142,7 @@ rte_ipv6_frag_reassemble_packet(struct rte_ip_frag_tbl *tbl, + struct ip_frag_key key; + uint16_t ip_ofs; + int32_t ip_len; ++ int32_t trim; + + rte_memcpy(&key.src_dst[0], ip_hdr->src_addr, 16); + rte_memcpy(&key.src_dst[2], ip_hdr->dst_addr, 16); +@@ -158,16 +159,17 @@ rte_ipv6_frag_reassemble_packet(struct rte_ip_frag_tbl *tbl, + * this is what we remove from the payload len. + */ + ip_len = rte_be_to_cpu_16(ip_hdr->payload_len) - sizeof(*frag_hdr); ++ trim = mb->pkt_len - (ip_len + mb->l3_len + mb->l2_len); + + IP_FRAG_LOG(DEBUG, "%s:%d:\n" + "mbuf: %p, tms: %" PRIu64 + ", key: <" IPv6_KEY_BYTES_FMT ", %#x>, " +- "ofs: %u, len: %d, flags: %#x\n" ++ "ofs: %u, len: %d, padding: %d, flags: %#x\n" + "tbl: %p, max_cycles: %" PRIu64 ", entry_mask: %#x, " + "max_entries: %u, use_entries: %u\n\n", + __func__, __LINE__, + mb, tms, IPv6_KEY_BYTES(key.src_dst), key.id, ip_ofs, ip_len, +- RTE_IPV6_GET_MF(frag_hdr->frag_data), ++ trim, RTE_IPV6_GET_MF(frag_hdr->frag_data), + tbl, tbl->max_cycles, tbl->entry_mask, tbl->max_entries, + tbl->use_entries); + +@@ -177,6 +179,9 @@ rte_ipv6_frag_reassemble_packet(struct rte_ip_frag_tbl *tbl, + return NULL; + } + ++ if (unlikely(trim > 0)) ++ rte_pktmbuf_trim(mb, trim); ++ + /* try to find/add entry into the fragment's table. */ + fp = ip_frag_find(tbl, dr, &key, tms); + if (fp == NULL) { +diff --git a/dpdk/lib/librte_ipsec/rte_ipsec_sad.h b/dpdk/lib/librte_ipsec/rte_ipsec_sad.h +index 3e67ab1e4b..b65d295831 100644 +--- a/dpdk/lib/librte_ipsec/rte_ipsec_sad.h ++++ b/dpdk/lib/librte_ipsec/rte_ipsec_sad.h +@@ -6,6 +6,8 @@ + #ifndef _RTE_IPSEC_SAD_H_ + #define _RTE_IPSEC_SAD_H_ + ++#include ++ + #include + + /** +diff --git a/dpdk/lib/librte_lpm/rte_lpm_altivec.h b/dpdk/lib/librte_lpm/rte_lpm_altivec.h +index 228c41b38e..4fbc1b595d 100644 +--- a/dpdk/lib/librte_lpm/rte_lpm_altivec.h ++++ b/dpdk/lib/librte_lpm/rte_lpm_altivec.h +@@ -88,28 +88,28 @@ rte_lpm_lookupx4(const struct rte_lpm *lpm, xmm_t ip, uint32_t hop[4], + if (unlikely((pt & RTE_LPM_VALID_EXT_ENTRY_BITMASK) == + RTE_LPM_VALID_EXT_ENTRY_BITMASK)) { + i8.u32[0] = i8.u32[0] + +- (uint8_t)tbl[0] * RTE_LPM_TBL8_GROUP_NUM_ENTRIES; ++ (tbl[0] & 0x00FFFFFF) * RTE_LPM_TBL8_GROUP_NUM_ENTRIES; + ptbl = (const uint32_t *)&lpm->tbl8[i8.u32[0]]; + tbl[0] = *ptbl; + } + if (unlikely((pt >> 32 & RTE_LPM_VALID_EXT_ENTRY_BITMASK) == + RTE_LPM_VALID_EXT_ENTRY_BITMASK)) { + i8.u32[1] = i8.u32[1] + +- (uint8_t)tbl[1] * RTE_LPM_TBL8_GROUP_NUM_ENTRIES; ++ (tbl[1] & 0x00FFFFFF) * RTE_LPM_TBL8_GROUP_NUM_ENTRIES; + ptbl = (const uint32_t *)&lpm->tbl8[i8.u32[1]]; + tbl[1] = *ptbl; + } + if (unlikely((pt2 & RTE_LPM_VALID_EXT_ENTRY_BITMASK) == + RTE_LPM_VALID_EXT_ENTRY_BITMASK)) { + i8.u32[2] = i8.u32[2] + +- (uint8_t)tbl[2] * RTE_LPM_TBL8_GROUP_NUM_ENTRIES; ++ (tbl[2] & 0x00FFFFFF) * RTE_LPM_TBL8_GROUP_NUM_ENTRIES; + ptbl = (const uint32_t *)&lpm->tbl8[i8.u32[2]]; + tbl[2] = *ptbl; + } + if (unlikely((pt2 >> 32 & RTE_LPM_VALID_EXT_ENTRY_BITMASK) == + RTE_LPM_VALID_EXT_ENTRY_BITMASK)) { + i8.u32[3] = i8.u32[3] + +- (uint8_t)tbl[3] * RTE_LPM_TBL8_GROUP_NUM_ENTRIES; ++ (tbl[3] & 0x00FFFFFF) * RTE_LPM_TBL8_GROUP_NUM_ENTRIES; + ptbl = (const uint32_t *)&lpm->tbl8[i8.u32[3]]; + tbl[3] = *ptbl; + } +diff --git a/dpdk/lib/librte_lpm/rte_lpm_neon.h b/dpdk/lib/librte_lpm/rte_lpm_neon.h +index 6c131d3125..4642a866f1 100644 +--- a/dpdk/lib/librte_lpm/rte_lpm_neon.h ++++ b/dpdk/lib/librte_lpm/rte_lpm_neon.h +@@ -81,28 +81,28 @@ rte_lpm_lookupx4(const struct rte_lpm *lpm, xmm_t ip, uint32_t hop[4], + if (unlikely((pt & RTE_LPM_VALID_EXT_ENTRY_BITMASK) == + RTE_LPM_VALID_EXT_ENTRY_BITMASK)) { + i8.u32[0] = i8.u32[0] + +- (uint8_t)tbl[0] * RTE_LPM_TBL8_GROUP_NUM_ENTRIES; ++ (tbl[0] & 0x00FFFFFF) * RTE_LPM_TBL8_GROUP_NUM_ENTRIES; + ptbl = (const uint32_t *)&lpm->tbl8[i8.u32[0]]; + tbl[0] = *ptbl; + } + if (unlikely((pt >> 32 & RTE_LPM_VALID_EXT_ENTRY_BITMASK) == + RTE_LPM_VALID_EXT_ENTRY_BITMASK)) { + i8.u32[1] = i8.u32[1] + +- (uint8_t)tbl[1] * RTE_LPM_TBL8_GROUP_NUM_ENTRIES; ++ (tbl[1] & 0x00FFFFFF) * RTE_LPM_TBL8_GROUP_NUM_ENTRIES; + ptbl = (const uint32_t *)&lpm->tbl8[i8.u32[1]]; + tbl[1] = *ptbl; + } + if (unlikely((pt2 & RTE_LPM_VALID_EXT_ENTRY_BITMASK) == + RTE_LPM_VALID_EXT_ENTRY_BITMASK)) { + i8.u32[2] = i8.u32[2] + +- (uint8_t)tbl[2] * RTE_LPM_TBL8_GROUP_NUM_ENTRIES; ++ (tbl[2] & 0x00FFFFFF) * RTE_LPM_TBL8_GROUP_NUM_ENTRIES; + ptbl = (const uint32_t *)&lpm->tbl8[i8.u32[2]]; + tbl[2] = *ptbl; + } + if (unlikely((pt2 >> 32 & RTE_LPM_VALID_EXT_ENTRY_BITMASK) == + RTE_LPM_VALID_EXT_ENTRY_BITMASK)) { + i8.u32[3] = i8.u32[3] + +- (uint8_t)tbl[3] * RTE_LPM_TBL8_GROUP_NUM_ENTRIES; ++ (tbl[3] & 0x00FFFFFF) * RTE_LPM_TBL8_GROUP_NUM_ENTRIES; + ptbl = (const uint32_t *)&lpm->tbl8[i8.u32[3]]; + tbl[3] = *ptbl; + } +diff --git a/dpdk/lib/librte_lpm/rte_lpm_sse.h b/dpdk/lib/librte_lpm/rte_lpm_sse.h +index 44770b6ff8..eaa863c522 100644 +--- a/dpdk/lib/librte_lpm/rte_lpm_sse.h ++++ b/dpdk/lib/librte_lpm/rte_lpm_sse.h +@@ -82,28 +82,28 @@ rte_lpm_lookupx4(const struct rte_lpm *lpm, xmm_t ip, uint32_t hop[4], + if (unlikely((pt & RTE_LPM_VALID_EXT_ENTRY_BITMASK) == + RTE_LPM_VALID_EXT_ENTRY_BITMASK)) { + i8.u32[0] = i8.u32[0] + +- (uint8_t)tbl[0] * RTE_LPM_TBL8_GROUP_NUM_ENTRIES; ++ (tbl[0] & 0x00FFFFFF) * RTE_LPM_TBL8_GROUP_NUM_ENTRIES; + ptbl = (const uint32_t *)&lpm->tbl8[i8.u32[0]]; + tbl[0] = *ptbl; + } + if (unlikely((pt >> 32 & RTE_LPM_VALID_EXT_ENTRY_BITMASK) == + RTE_LPM_VALID_EXT_ENTRY_BITMASK)) { + i8.u32[1] = i8.u32[1] + +- (uint8_t)tbl[1] * RTE_LPM_TBL8_GROUP_NUM_ENTRIES; ++ (tbl[1] & 0x00FFFFFF) * RTE_LPM_TBL8_GROUP_NUM_ENTRIES; + ptbl = (const uint32_t *)&lpm->tbl8[i8.u32[1]]; + tbl[1] = *ptbl; + } + if (unlikely((pt2 & RTE_LPM_VALID_EXT_ENTRY_BITMASK) == + RTE_LPM_VALID_EXT_ENTRY_BITMASK)) { + i8.u32[2] = i8.u32[2] + +- (uint8_t)tbl[2] * RTE_LPM_TBL8_GROUP_NUM_ENTRIES; ++ (tbl[2] & 0x00FFFFFF) * RTE_LPM_TBL8_GROUP_NUM_ENTRIES; + ptbl = (const uint32_t *)&lpm->tbl8[i8.u32[2]]; + tbl[2] = *ptbl; + } + if (unlikely((pt2 >> 32 & RTE_LPM_VALID_EXT_ENTRY_BITMASK) == + RTE_LPM_VALID_EXT_ENTRY_BITMASK)) { + i8.u32[3] = i8.u32[3] + +- (uint8_t)tbl[3] * RTE_LPM_TBL8_GROUP_NUM_ENTRIES; ++ (tbl[3] & 0x00FFFFFF) * RTE_LPM_TBL8_GROUP_NUM_ENTRIES; + ptbl = (const uint32_t *)&lpm->tbl8[i8.u32[3]]; + tbl[3] = *ptbl; + } +diff --git a/dpdk/lib/librte_mbuf/rte_mbuf_core.h b/dpdk/lib/librte_mbuf/rte_mbuf_core.h +index 567551deab..9d1609336a 100644 +--- a/dpdk/lib/librte_mbuf/rte_mbuf_core.h ++++ b/dpdk/lib/librte_mbuf/rte_mbuf_core.h +@@ -17,8 +17,9 @@ + */ + + #include ++ + #include +-#include ++#include + + #ifdef __cplusplus + extern "C" { +diff --git a/dpdk/lib/librte_mbuf/rte_mbuf_dyn.h b/dpdk/lib/librte_mbuf/rte_mbuf_dyn.h +index d88e7bacc5..13f06d8ed2 100644 +--- a/dpdk/lib/librte_mbuf/rte_mbuf_dyn.h ++++ b/dpdk/lib/librte_mbuf/rte_mbuf_dyn.h +@@ -66,7 +66,16 @@ + * - any name that does not start with "rte_" in an application + */ + ++#include ++#include + #include ++ ++#include ++ ++#ifdef __cplusplus ++extern "C" { ++#endif ++ + /** + * Maximum length of the dynamic field or flag string. + */ +@@ -326,4 +335,8 @@ int rte_mbuf_dyn_rx_timestamp_register(int *field_offset, uint64_t *rx_flag); + __rte_experimental + int rte_mbuf_dyn_tx_timestamp_register(int *field_offset, uint64_t *tx_flag); + ++#ifdef __cplusplus ++} + #endif ++ ++#endif /* _RTE_MBUF_DYN_H_ */ +diff --git a/dpdk/lib/librte_mempool/rte_mempool.c b/dpdk/lib/librte_mempool/rte_mempool.c +index b9f3fbd614..afb1239c8d 100644 +--- a/dpdk/lib/librte_mempool/rte_mempool.c ++++ b/dpdk/lib/librte_mempool/rte_mempool.c +@@ -1167,7 +1167,7 @@ mempool_audit_cache(const struct rte_mempool *mp) + for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) { + const struct rte_mempool_cache *cache; + cache = &mp->local_cache[lcore_id]; +- if (cache->len > cache->flushthresh) { ++ if (cache->len > RTE_DIM(cache->objs)) { + RTE_LOG(CRIT, MEMPOOL, "badness on cache[%u]\n", + lcore_id); + rte_panic("MEMPOOL: invalid cache len\n"); +diff --git a/dpdk/lib/librte_metrics/meson.build b/dpdk/lib/librte_metrics/meson.build +index eed27b880a..28a8cc1155 100644 +--- a/dpdk/lib/librte_metrics/meson.build ++++ b/dpdk/lib/librte_metrics/meson.build +@@ -4,7 +4,7 @@ + sources = files('rte_metrics.c') + headers = files('rte_metrics.h') + +-jansson = dependency('jansson', required: false) ++jansson = dependency('jansson', required: false, method: 'pkg-config') + if jansson.found() + ext_deps += jansson + sources += files('rte_metrics_telemetry.c') +diff --git a/dpdk/lib/librte_metrics/rte_metrics_telemetry.c b/dpdk/lib/librte_metrics/rte_metrics_telemetry.c +index 901cbeb0a3..b8ee56ef01 100644 +--- a/dpdk/lib/librte_metrics/rte_metrics_telemetry.c ++++ b/dpdk/lib/librte_metrics/rte_metrics_telemetry.c +@@ -13,6 +13,8 @@ + #include "rte_metrics.h" + #include "rte_metrics_telemetry.h" + ++struct telemetry_metrics_data tel_met_data; ++ + int metrics_log_level; + + /* Logging Macros */ +diff --git a/dpdk/lib/librte_metrics/rte_metrics_telemetry.h b/dpdk/lib/librte_metrics/rte_metrics_telemetry.h +index 3435a55425..5dbb32ca0c 100644 +--- a/dpdk/lib/librte_metrics/rte_metrics_telemetry.h ++++ b/dpdk/lib/librte_metrics/rte_metrics_telemetry.h +@@ -34,8 +34,6 @@ struct telemetry_metrics_data { + int metrics_register_done; + }; + +-struct telemetry_metrics_data tel_met_data; +- + __rte_experimental + int32_t rte_metrics_tel_reg_all_ethdev(int *metrics_register_done, + int *reg_index_list); +diff --git a/dpdk/lib/librte_net/rte_geneve.h b/dpdk/lib/librte_net/rte_geneve.h +index bb67724c31..3bbc561847 100644 +--- a/dpdk/lib/librte_net/rte_geneve.h ++++ b/dpdk/lib/librte_net/rte_geneve.h +@@ -12,6 +12,8 @@ + */ + #include + ++#include ++ + #ifdef __cplusplus + extern "C" { + #endif +diff --git a/dpdk/lib/librte_node/rte_node_ip4_api.h b/dpdk/lib/librte_node/rte_node_ip4_api.h +index eb9ebd5f89..46d0d8976b 100644 +--- a/dpdk/lib/librte_node/rte_node_ip4_api.h ++++ b/dpdk/lib/librte_node/rte_node_ip4_api.h +@@ -21,6 +21,7 @@ extern "C" { + #endif + + #include ++#include + + /** + * IP4 lookup next nodes. +diff --git a/dpdk/lib/librte_pipeline/rte_swx_ctl.h b/dpdk/lib/librte_pipeline/rte_swx_ctl.h +index bab1894944..32815b69e2 100644 +--- a/dpdk/lib/librte_pipeline/rte_swx_ctl.h ++++ b/dpdk/lib/librte_pipeline/rte_swx_ctl.h +@@ -15,6 +15,7 @@ extern "C" { + + #include + #include ++#include + + #include + +diff --git a/dpdk/lib/librte_pipeline/rte_swx_pipeline.h b/dpdk/lib/librte_pipeline/rte_swx_pipeline.h +index d0a3439edf..f0a2cef777 100644 +--- a/dpdk/lib/librte_pipeline/rte_swx_pipeline.h ++++ b/dpdk/lib/librte_pipeline/rte_swx_pipeline.h +@@ -15,6 +15,7 @@ extern "C" { + + #include + #include ++#include + + #include + +diff --git a/dpdk/lib/librte_port/rte_port.h b/dpdk/lib/librte_port/rte_port.h +index 7f156ef47d..6b6a2cdd17 100644 +--- a/dpdk/lib/librte_port/rte_port.h ++++ b/dpdk/lib/librte_port/rte_port.h +@@ -186,7 +186,7 @@ typedef int (*rte_port_out_op_tx)( + */ + typedef int (*rte_port_out_op_tx_bulk)( + void *port, +- struct rte_mbuf **pkt, ++ struct rte_mbuf **pkts, + uint64_t pkts_mask); + + /** +diff --git a/dpdk/lib/librte_port/rte_swx_port.h b/dpdk/lib/librte_port/rte_swx_port.h +index 4beb59991f..ecf109d2ca 100644 +--- a/dpdk/lib/librte_port/rte_swx_port.h ++++ b/dpdk/lib/librte_port/rte_swx_port.h +@@ -50,7 +50,7 @@ typedef void * + /** + * Input port free + * +- * @param[in] args ++ * @param[in] port + * Input port handle. + */ + typedef void +@@ -129,7 +129,7 @@ typedef void * + /** + * Output port free + * +- * @param[in] args ++ * @param[in] port + * Output port handle. + */ + typedef void +diff --git a/dpdk/lib/librte_power/channel_commands.h b/dpdk/lib/librte_power/channel_commands.h +deleted file mode 100644 +index adc8e5ca27..0000000000 +--- a/dpdk/lib/librte_power/channel_commands.h ++++ /dev/null +@@ -1,125 +0,0 @@ +-/* SPDX-License-Identifier: BSD-3-Clause +- * Copyright(c) 2010-2014 Intel Corporation +- */ +- +-#ifndef CHANNEL_COMMANDS_H_ +-#define CHANNEL_COMMANDS_H_ +- +-#ifdef __cplusplus +-extern "C" { +-#endif +- +-#include +-#include +- +-/* --- Incoming messages --- */ +- +-/* Valid Commands */ +-#define CPU_POWER 1 +-#define CPU_POWER_CONNECT 2 +-#define PKT_POLICY 3 +-#define PKT_POLICY_REMOVE 4 +- +-/* CPU Power Command Scaling */ +-#define CPU_POWER_SCALE_UP 1 +-#define CPU_POWER_SCALE_DOWN 2 +-#define CPU_POWER_SCALE_MAX 3 +-#define CPU_POWER_SCALE_MIN 4 +-#define CPU_POWER_ENABLE_TURBO 5 +-#define CPU_POWER_DISABLE_TURBO 6 +- +-/* CPU Power Queries */ +-#define CPU_POWER_QUERY_FREQ_LIST 7 +-#define CPU_POWER_QUERY_FREQ 8 +-#define CPU_POWER_QUERY_CAPS_LIST 9 +-#define CPU_POWER_QUERY_CAPS 10 +- +-/* --- Outgoing messages --- */ +- +-/* Generic Power Command Response */ +-#define CPU_POWER_CMD_ACK 1 +-#define CPU_POWER_CMD_NACK 2 +- +-/* CPU Power Query Responses */ +-#define CPU_POWER_FREQ_LIST 3 +-#define CPU_POWER_CAPS_LIST 4 +- +-#define HOURS 24 +- +-#define MAX_VFS 10 +-#define VM_MAX_NAME_SZ 32 +- +-#define MAX_VCPU_PER_VM 8 +- +-struct t_boost_status { +- bool tbEnabled; +-}; +- +-struct timer_profile { +- int busy_hours[HOURS]; +- int quiet_hours[HOURS]; +- int hours_to_use_traffic_profile[HOURS]; +-}; +- +-enum workload {HIGH, MEDIUM, LOW}; +-enum policy_to_use { +- TRAFFIC, +- TIME, +- WORKLOAD, +- BRANCH_RATIO +-}; +- +-struct traffic { +- uint32_t min_packet_thresh; +- uint32_t avg_max_packet_thresh; +- uint32_t max_max_packet_thresh; +-}; +- +-#define CORE_TYPE_VIRTUAL 0 +-#define CORE_TYPE_PHYSICAL 1 +- +-struct channel_packet { +- uint64_t resource_id; /**< core_num, device */ +- uint32_t unit; /**< scale down/up/min/max */ +- uint32_t command; /**< Power, IO, etc */ +- char vm_name[VM_MAX_NAME_SZ]; +- +- uint64_t vfid[MAX_VFS]; +- int nb_mac_to_monitor; +- struct traffic traffic_policy; +- uint8_t vcpu_to_control[MAX_VCPU_PER_VM]; +- uint8_t num_vcpu; +- struct timer_profile timer_policy; +- bool core_type; +- enum workload workload; +- enum policy_to_use policy_to_use; +- struct t_boost_status t_boost_status; +-}; +- +-struct channel_packet_freq_list { +- uint64_t resource_id; /**< core_num, device */ +- uint32_t unit; /**< scale down/up/min/max */ +- uint32_t command; /**< Power, IO, etc */ +- char vm_name[VM_MAX_NAME_SZ]; +- +- uint32_t freq_list[MAX_VCPU_PER_VM]; +- uint8_t num_vcpu; +-}; +- +-struct channel_packet_caps_list { +- uint64_t resource_id; /**< core_num, device */ +- uint32_t unit; /**< scale down/up/min/max */ +- uint32_t command; /**< Power, IO, etc */ +- char vm_name[VM_MAX_NAME_SZ]; +- +- uint64_t turbo[MAX_VCPU_PER_VM]; +- uint64_t priority[MAX_VCPU_PER_VM]; +- uint8_t num_vcpu; +-}; +- +- +-#ifdef __cplusplus +-} +-#endif +- +-#endif /* CHANNEL_COMMANDS_H_ */ +diff --git a/dpdk/lib/librte_power/guest_channel.c b/dpdk/lib/librte_power/guest_channel.c +index 7b5926e5c4..2f7507a03c 100644 +--- a/dpdk/lib/librte_power/guest_channel.c ++++ b/dpdk/lib/librte_power/guest_channel.c +@@ -15,9 +15,9 @@ + + + #include ++#include + + #include "guest_channel.h" +-#include "channel_commands.h" + + #define RTE_LOGTYPE_GUEST_CHANNEL RTE_LOGTYPE_USER1 + +@@ -55,7 +55,7 @@ int + guest_channel_host_connect(const char *path, unsigned int lcore_id) + { + int flags, ret; +- struct channel_packet pkt; ++ struct rte_power_channel_packet pkt; + char fd_path[PATH_MAX]; + int fd = -1; + +@@ -100,7 +100,7 @@ guest_channel_host_connect(const char *path, unsigned int lcore_id) + /* Send a test packet, this command is ignored by the host, but a successful + * send indicates that the host endpoint is monitoring. + */ +- pkt.command = CPU_POWER_CONNECT; ++ pkt.command = RTE_POWER_CPU_POWER_CONNECT; + global_fds[lcore_id] = fd; + ret = guest_channel_send_msg(&pkt, lcore_id); + if (ret != 0) { +@@ -119,7 +119,8 @@ guest_channel_host_connect(const char *path, unsigned int lcore_id) + } + + int +-guest_channel_send_msg(struct channel_packet *pkt, unsigned int lcore_id) ++guest_channel_send_msg(struct rte_power_channel_packet *pkt, ++ unsigned int lcore_id) + { + int ret, buffer_len = sizeof(*pkt); + void *buffer = pkt; +@@ -149,7 +150,7 @@ guest_channel_send_msg(struct channel_packet *pkt, unsigned int lcore_id) + return 0; + } + +-int rte_power_guest_channel_send_msg(struct channel_packet *pkt, ++int rte_power_guest_channel_send_msg(struct rte_power_channel_packet *pkt, + unsigned int lcore_id) + { + return guest_channel_send_msg(pkt, lcore_id); +diff --git a/dpdk/lib/librte_power/guest_channel.h b/dpdk/lib/librte_power/guest_channel.h +index e15db46fc7..43d532a5aa 100644 +--- a/dpdk/lib/librte_power/guest_channel.h ++++ b/dpdk/lib/librte_power/guest_channel.h +@@ -8,8 +8,6 @@ + extern "C" { + #endif + +-#include +- + /** + * Check if any Virtio-Serial VM end-points exist in path. + * +@@ -63,31 +61,16 @@ void guest_channel_host_disconnect(unsigned int lcore_id); + * - Negative on channel not connected. + * - errno on write to channel error. + */ +-int guest_channel_send_msg(struct channel_packet *pkt, unsigned int lcore_id); +- +-/** +- * Send a message contained in pkt over the Virtio-Serial to the host endpoint. +- * +- * @param pkt +- * Pointer to a populated struct channel_packet +- * +- * @param lcore_id +- * lcore_id. +- * +- * @return +- * - 0 on success. +- * - Negative on error. +- */ +-int rte_power_guest_channel_send_msg(struct channel_packet *pkt, +- unsigned int lcore_id); ++int guest_channel_send_msg(struct rte_power_channel_packet *pkt, ++ unsigned int lcore_id); + + /** + * Read a message contained in pkt over the Virtio-Serial + * from the host endpoint. + * + * @param pkt +- * Pointer to channel_packet or +- * channel_packet_freq_list struct. ++ * Pointer to rte_power_channel_packet or ++ * rte_power_channel_packet_freq_list struct. + * + * @param pkt_len + * Size of expected data packet. +@@ -103,30 +86,6 @@ int power_guest_channel_read_msg(void *pkt, + size_t pkt_len, + unsigned int lcore_id); + +-/** +- * Receive a message contained in pkt over the Virtio-Serial +- * from the host endpoint. +- * +- * @param pkt +- * Pointer to channel_packet or +- * channel_packet_freq_list struct. +- * +- * @param pkt_len +- * Size of expected data packet. +- * +- * @param lcore_id +- * lcore_id. +- * +- * @return +- * - 0 on success. +- * - Negative on error. +- */ +-__rte_experimental +-int +-rte_power_guest_channel_receive_msg(void *pkt, +- size_t pkt_len, +- unsigned int lcore_id); +- + + #ifdef __cplusplus + } +diff --git a/dpdk/lib/librte_power/meson.build b/dpdk/lib/librte_power/meson.build +index 4b4cf1b90b..5415695281 100644 +--- a/dpdk/lib/librte_power/meson.build ++++ b/dpdk/lib/librte_power/meson.build +@@ -10,5 +10,6 @@ sources = files('rte_power.c', 'power_acpi_cpufreq.c', + 'rte_power_empty_poll.c', + 'power_pstate_cpufreq.c', + 'power_common.c') +-headers = files('rte_power.h','rte_power_empty_poll.h') ++headers = files('rte_power.h','rte_power_empty_poll.h', ++ 'rte_power_guest_channel.h') + deps += ['timer'] +diff --git a/dpdk/lib/librte_power/power_kvm_vm.c b/dpdk/lib/librte_power/power_kvm_vm.c +index 409c3e03ab..ab7d4b8cee 100644 +--- a/dpdk/lib/librte_power/power_kvm_vm.c ++++ b/dpdk/lib/librte_power/power_kvm_vm.c +@@ -6,14 +6,14 @@ + + #include + ++#include "rte_power_guest_channel.h" + #include "guest_channel.h" +-#include "channel_commands.h" + #include "power_kvm_vm.h" + #include "power_common.h" + + #define FD_PATH "/dev/virtio-ports/virtio.serial.port.poweragent" + +-static struct channel_packet pkt[RTE_MAX_LCORE]; ++static struct rte_power_channel_packet pkt[RTE_MAX_LCORE]; + + int + power_kvm_vm_check_supported(void) +@@ -29,7 +29,7 @@ power_kvm_vm_init(unsigned int lcore_id) + lcore_id, RTE_MAX_LCORE-1); + return -1; + } +- pkt[lcore_id].command = CPU_POWER; ++ pkt[lcore_id].command = RTE_POWER_CPU_POWER; + pkt[lcore_id].resource_id = lcore_id; + return guest_channel_host_connect(FD_PATH, lcore_id); + } +@@ -90,25 +90,25 @@ send_msg(unsigned int lcore_id, uint32_t scale_direction) + int + power_kvm_vm_freq_up(unsigned int lcore_id) + { +- return send_msg(lcore_id, CPU_POWER_SCALE_UP); ++ return send_msg(lcore_id, RTE_POWER_SCALE_UP); + } + + int + power_kvm_vm_freq_down(unsigned int lcore_id) + { +- return send_msg(lcore_id, CPU_POWER_SCALE_DOWN); ++ return send_msg(lcore_id, RTE_POWER_SCALE_DOWN); + } + + int + power_kvm_vm_freq_max(unsigned int lcore_id) + { +- return send_msg(lcore_id, CPU_POWER_SCALE_MAX); ++ return send_msg(lcore_id, RTE_POWER_SCALE_MAX); + } + + int + power_kvm_vm_freq_min(unsigned int lcore_id) + { +- return send_msg(lcore_id, CPU_POWER_SCALE_MIN); ++ return send_msg(lcore_id, RTE_POWER_SCALE_MIN); + } + + int +@@ -121,13 +121,13 @@ power_kvm_vm_turbo_status(__rte_unused unsigned int lcore_id) + int + power_kvm_vm_enable_turbo(unsigned int lcore_id) + { +- return send_msg(lcore_id, CPU_POWER_ENABLE_TURBO); ++ return send_msg(lcore_id, RTE_POWER_ENABLE_TURBO); + } + + int + power_kvm_vm_disable_turbo(unsigned int lcore_id) + { +- return send_msg(lcore_id, CPU_POWER_DISABLE_TURBO); ++ return send_msg(lcore_id, RTE_POWER_DISABLE_TURBO); + } + + struct rte_power_core_capabilities; +diff --git a/dpdk/lib/librte_power/rte_power.h b/dpdk/lib/librte_power/rte_power.h +index bbbde4dfb4..c8086bf6ba 100644 +--- a/dpdk/lib/librte_power/rte_power.h ++++ b/dpdk/lib/librte_power/rte_power.h +@@ -14,6 +14,7 @@ + #include + #include + #include ++#include + + #ifdef __cplusplus + extern "C" { +diff --git a/dpdk/lib/librte_power/rte_power_guest_channel.h b/dpdk/lib/librte_power/rte_power_guest_channel.h +new file mode 100644 +index 0000000000..ed4fbfdcd3 +--- /dev/null ++++ b/dpdk/lib/librte_power/rte_power_guest_channel.h +@@ -0,0 +1,176 @@ ++/* SPDX-License-Identifier: BSD-3-Clause ++ * Copyright(c) 2010-2021 Intel Corporation ++ */ ++#ifndef RTE_POWER_GUEST_CHANNEL_H ++#define RTE_POWER_GUEST_CHANNEL_H ++ ++#include ++#include ++#include ++ ++#include ++ ++#ifdef __cplusplus ++extern "C" { ++#endif ++ ++#define RTE_POWER_MAX_VFS 10 ++#define RTE_POWER_VM_MAX_NAME_SZ 32 ++#define RTE_POWER_MAX_VCPU_PER_VM 8 ++#define RTE_POWER_HOURS_PER_DAY 24 ++ ++/* Valid Commands */ ++#define RTE_POWER_CPU_POWER 1 ++#define RTE_POWER_CPU_POWER_CONNECT 2 ++#define RTE_POWER_PKT_POLICY 3 ++#define RTE_POWER_PKT_POLICY_REMOVE 4 ++ ++#define RTE_POWER_CORE_TYPE_VIRTUAL 0 ++#define RTE_POWER_CORE_TYPE_PHYSICAL 1 ++ ++/* CPU Power Command Scaling */ ++#define RTE_POWER_SCALE_UP 1 ++#define RTE_POWER_SCALE_DOWN 2 ++#define RTE_POWER_SCALE_MAX 3 ++#define RTE_POWER_SCALE_MIN 4 ++#define RTE_POWER_ENABLE_TURBO 5 ++#define RTE_POWER_DISABLE_TURBO 6 ++ ++/* CPU Power Queries */ ++#define RTE_POWER_QUERY_FREQ_LIST 7 ++#define RTE_POWER_QUERY_FREQ 8 ++#define RTE_POWER_QUERY_CAPS_LIST 9 ++#define RTE_POWER_QUERY_CAPS 10 ++ ++/* Generic Power Command Response */ ++#define RTE_POWER_CMD_ACK 1 ++#define RTE_POWER_CMD_NACK 2 ++ ++/* CPU Power Query Responses */ ++#define RTE_POWER_FREQ_LIST 3 ++#define RTE_POWER_CAPS_LIST 4 ++ ++struct rte_power_traffic_policy { ++ uint32_t min_packet_thresh; ++ uint32_t avg_max_packet_thresh; ++ uint32_t max_max_packet_thresh; ++}; ++ ++struct rte_power_timer_profile { ++ int busy_hours[RTE_POWER_HOURS_PER_DAY]; ++ int quiet_hours[RTE_POWER_HOURS_PER_DAY]; ++ int hours_to_use_traffic_profile[RTE_POWER_HOURS_PER_DAY]; ++}; ++ ++enum rte_power_workload_level { ++ RTE_POWER_WL_HIGH, ++ RTE_POWER_WL_MEDIUM, ++ RTE_POWER_WL_LOW ++}; ++ ++enum rte_power_policy { ++ RTE_POWER_POLICY_TRAFFIC, ++ RTE_POWER_POLICY_TIME, ++ RTE_POWER_POLICY_WORKLOAD, ++ RTE_POWER_POLICY_BRANCH_RATIO ++}; ++ ++struct rte_power_turbo_status { ++ bool tbEnabled; ++}; ++ ++struct rte_power_channel_packet { ++ uint64_t resource_id; /**< core_num, device */ ++ uint32_t unit; /**< scale down/up/min/max */ ++ uint32_t command; /**< Power, IO, etc */ ++ char vm_name[RTE_POWER_VM_MAX_NAME_SZ]; ++ ++ uint64_t vfid[RTE_POWER_MAX_VFS]; ++ int nb_mac_to_monitor; ++ struct rte_power_traffic_policy traffic_policy; ++ uint8_t vcpu_to_control[RTE_POWER_MAX_VCPU_PER_VM]; ++ uint8_t num_vcpu; ++ struct rte_power_timer_profile timer_policy; ++ bool core_type; ++ enum rte_power_workload_level workload; ++ enum rte_power_policy policy_to_use; ++ struct rte_power_turbo_status t_boost_status; ++}; ++ ++struct rte_power_channel_packet_freq_list { ++ uint64_t resource_id; /**< core_num, device */ ++ uint32_t unit; /**< scale down/up/min/max */ ++ uint32_t command; /**< Power, IO, etc */ ++ char vm_name[RTE_POWER_VM_MAX_NAME_SZ]; ++ ++ uint32_t freq_list[RTE_POWER_MAX_VCPU_PER_VM]; ++ uint8_t num_vcpu; ++}; ++ ++struct rte_power_channel_packet_caps_list { ++ uint64_t resource_id; /**< core_num, device */ ++ uint32_t unit; /**< scale down/up/min/max */ ++ uint32_t command; /**< Power, IO, etc */ ++ char vm_name[RTE_POWER_VM_MAX_NAME_SZ]; ++ ++ uint64_t turbo[RTE_POWER_MAX_VCPU_PER_VM]; ++ uint64_t priority[RTE_POWER_MAX_VCPU_PER_VM]; ++ uint8_t num_vcpu; ++}; ++ ++/** ++ * @internal ++ * ++ * @warning ++ * @b EXPERIMENTAL: this API may change without prior notice. ++ * ++ * Send a message contained in pkt over the Virtio-Serial to the host endpoint. ++ * ++ * @param pkt ++ * Pointer to a populated struct channel_packet. ++ * ++ * @param lcore_id ++ * Use channel specific to this lcore_id. ++ * ++ * @return ++ * - 0 on success. ++ * - Negative on error. ++ */ ++__rte_experimental ++int rte_power_guest_channel_send_msg(struct rte_power_channel_packet *pkt, ++ unsigned int lcore_id); ++ ++/** ++ * @internal ++ * ++ * @warning ++ * @b EXPERIMENTAL: this API may change without prior notice. ++ * ++ * Receive a message contained in pkt over the Virtio-Serial ++ * from the host endpoint. ++ * ++ * @param pkt ++ * Pointer to channel_packet or ++ * channel_packet_freq_list struct. ++ * ++ * @param pkt_len ++ * Size of expected data packet. ++ * ++ * @param lcore_id ++ * Use channel specific to this lcore_id. ++ * ++ * @return ++ * - 0 on success. ++ * - Negative on error. ++ */ ++__rte_experimental ++int rte_power_guest_channel_receive_msg(void *pkt, ++ size_t pkt_len, ++ unsigned int lcore_id); ++ ++ ++#ifdef __cplusplus ++} ++#endif ++ ++#endif /* RTE_POWER_GUEST_CHANNEL_H_ */ +diff --git a/dpdk/lib/librte_power/version.map b/dpdk/lib/librte_power/version.map +index 69ca9af616..13f0af3b2d 100644 +--- a/dpdk/lib/librte_power/version.map ++++ b/dpdk/lib/librte_power/version.map +@@ -34,4 +34,8 @@ EXPERIMENTAL { + rte_power_guest_channel_receive_msg; + rte_power_poll_stat_fetch; + rte_power_poll_stat_update; ++ ++ # added in 21.02 ++ rte_power_guest_channel_receive_msg; ++ rte_power_guest_channel_send_msg; + }; +diff --git a/dpdk/lib/librte_rawdev/rte_rawdev_pmd.h b/dpdk/lib/librte_rawdev/rte_rawdev_pmd.h +index 34dd7181b4..b1bed13ee2 100644 +--- a/dpdk/lib/librte_rawdev/rte_rawdev_pmd.h ++++ b/dpdk/lib/librte_rawdev/rte_rawdev_pmd.h +@@ -155,6 +155,8 @@ typedef int (*rawdev_info_get_t)(struct rte_rawdev *dev, + * Raw device pointer + * @param config + * Void object containing device specific configuration ++ * @param config_size ++ * Size of the memory allocated for the configuration + * + * @return + * Returns 0 on success +@@ -214,6 +216,8 @@ typedef int (*rawdev_reset_t)(struct rte_rawdev *dev); + * Raw device queue index + * @param[out] queue_conf + * Raw device queue configuration structure ++ * @param queue_conf_size ++ * Size of the memory allocated for the configuration + * + * @return + * Returns 0 on success, negative errno on failure +@@ -232,6 +236,8 @@ typedef int (*rawdev_queue_conf_get_t)(struct rte_rawdev *dev, + * Rawqueue index + * @param queue_conf + * Rawqueue configuration structure ++ * @param queue_conf_size ++ * Size of the memory allocated for the configuration + * + * @return + * Returns 0 on success. +@@ -263,7 +269,7 @@ typedef int (*rawdev_queue_release_t)(struct rte_rawdev *dev, + * This function helps in getting queue count supported, independently. It + * can help in cases where iterator needs to be implemented. + * +- * @param ++ * @param dev + * Raw device pointer + * @return + * Number of queues; 0 is assumed to be a valid response. +@@ -279,7 +285,7 @@ typedef uint16_t (*rawdev_queue_count_t)(struct rte_rawdev *dev); + * + * @param dev + * Raw device pointer +- * @param bufs ++ * @param buffers + * array of buffers + * @param count + * number of buffers passed +@@ -303,7 +309,7 @@ typedef int (*rawdev_enqueue_bufs_t)(struct rte_rawdev *dev, + * + * @param dev + * Raw device pointer +- * @param bufs ++ * @param buffers + * array of buffers + * @param count + * Max buffers expected to be dequeued +@@ -444,7 +450,7 @@ typedef uint64_t (*rawdev_xstats_get_by_name_t)(const struct rte_rawdev *dev, + * + * @param dev + * Raw device pointer +- * @param status ++ * @param status_info + * void block containing device specific status information + * @return + * 0 for success, +@@ -472,8 +478,8 @@ typedef int (*rawdev_firmware_version_get_t)(struct rte_rawdev *dev, + * + * @param dev + * Raw device pointer +- * @param firmware_file +- * file pointer to firmware area ++ * @param firmware_buf ++ * Pointer to firmware image + * @return + * >0, ~0: for successful load + * <0: for failure +diff --git a/dpdk/lib/librte_rib/rte_rib.c b/dpdk/lib/librte_rib/rte_rib.c +index 2a370d7f84..6c29e1c49a 100644 +--- a/dpdk/lib/librte_rib/rte_rib.c ++++ b/dpdk/lib/librte_rib/rte_rib.c +@@ -301,7 +301,7 @@ rte_rib_insert(struct rte_rib *rib, uint32_t ip, uint8_t depth) + /* closest node found, new_node should be inserted in the middle */ + common_depth = RTE_MIN(depth, (*tmp)->depth); + common_prefix = ip ^ (*tmp)->ip; +- d = __builtin_clz(common_prefix); ++ d = (common_prefix == 0) ? 32 : __builtin_clz(common_prefix); + + common_depth = RTE_MIN(d, common_depth); + common_prefix = ip & rte_rib_depth_to_mask(common_depth); +diff --git a/dpdk/lib/librte_rib/rte_rib.h b/dpdk/lib/librte_rib/rte_rib.h +index f80752e5bd..ec97079c35 100644 +--- a/dpdk/lib/librte_rib/rte_rib.h ++++ b/dpdk/lib/librte_rib/rte_rib.h +@@ -18,6 +18,9 @@ + * Level compressed tree implementation for IPv4 Longest Prefix Match + */ + ++#include ++#include ++ + #include + + #ifdef __cplusplus +diff --git a/dpdk/lib/librte_rib/rte_rib6.h b/dpdk/lib/librte_rib/rte_rib6.h +index b5e10569b9..dbd52928a2 100644 +--- a/dpdk/lib/librte_rib/rte_rib6.h ++++ b/dpdk/lib/librte_rib/rte_rib6.h +@@ -20,6 +20,7 @@ + + #include + #include ++#include + + #ifdef __cplusplus + extern "C" { +diff --git a/dpdk/lib/librte_security/rte_security_driver.h b/dpdk/lib/librte_security/rte_security_driver.h +index c5abb07990..938373205c 100644 +--- a/dpdk/lib/librte_security/rte_security_driver.h ++++ b/dpdk/lib/librte_security/rte_security_driver.h +@@ -41,7 +41,7 @@ typedef int (*security_session_create_t)(void *device, + /** + * Free driver private session data. + * +- * @param dev Crypto/eth device pointer ++ * @param device Crypto/eth device pointer + * @param sess Security session structure + */ + typedef int (*security_session_destroy_t)(void *device, +@@ -95,16 +95,17 @@ int rte_security_dynfield_register(void); + /** + * Update the mbuf with provided metadata. + * ++ * @param device Crypto/eth device pointer + * @param sess Security session structure + * @param mb Packet buffer +- * @param mt Metadata ++ * @param params Metadata + * + * @return + * - Returns 0 if metadata updated successfully. + * - Returns -ve value for errors. + */ + typedef int (*security_set_pkt_metadata_t)(void *device, +- struct rte_security_session *sess, struct rte_mbuf *m, ++ struct rte_security_session *sess, struct rte_mbuf *mb, + void *params); + + /** +diff --git a/dpdk/lib/librte_table/rte_lru_x86.h b/dpdk/lib/librte_table/rte_lru_x86.h +index 0e24906c2c..38476d956e 100644 +--- a/dpdk/lib/librte_table/rte_lru_x86.h ++++ b/dpdk/lib/librte_table/rte_lru_x86.h +@@ -12,6 +12,7 @@ extern "C" { + #include + + #include ++#include + + #ifndef RTE_TABLE_HASH_LRU_STRATEGY + #define RTE_TABLE_HASH_LRU_STRATEGY 2 +diff --git a/dpdk/lib/librte_table/rte_swx_table.h b/dpdk/lib/librte_table/rte_swx_table.h +index dc434b72ef..5a3137ec53 100644 +--- a/dpdk/lib/librte_table/rte_swx_table.h ++++ b/dpdk/lib/librte_table/rte_swx_table.h +@@ -127,12 +127,6 @@ typedef uint64_t + * progress and it is passed as a parameter to the lookup operation. This allows + * for multiple concurrent lookup operations into the same table. + * +- * @param[in] params +- * Table creation parameters. +- * @param[in] entries +- * Entries to be added to the table at creation time. +- * @param[in] args +- * Any additional table create arguments. It may be NULL. + * @return + * Table memory footprint in bytes, on success, or zero, on error. + */ +diff --git a/dpdk/lib/librte_table/rte_table.h b/dpdk/lib/librte_table/rte_table.h +index cccded1a1c..096ab8a7c8 100644 +--- a/dpdk/lib/librte_table/rte_table.h ++++ b/dpdk/lib/librte_table/rte_table.h +@@ -129,7 +129,7 @@ typedef int (*rte_table_op_entry_delete)( + * + * @param table + * Handle to lookup table instance +- * @param key ++ * @param keys + * Array containing lookup keys + * @param entries + * Array containing data to be associated with each key. Every item in the +@@ -166,7 +166,7 @@ typedef int (*rte_table_op_entry_add_bulk)( + * + * @param table + * Handle to lookup table instance +- * @param key ++ * @param keys + * Array containing lookup keys + * @param n_keys + * Number of keys to delete +diff --git a/dpdk/lib/librte_telemetry/rte_telemetry.h b/dpdk/lib/librte_telemetry/rte_telemetry.h +index 4693275c24..76172222c9 100644 +--- a/dpdk/lib/librte_telemetry/rte_telemetry.h ++++ b/dpdk/lib/librte_telemetry/rte_telemetry.h +@@ -4,7 +4,9 @@ + + #include + #include ++ + #include ++#include + + #ifndef _RTE_TELEMETRY_H_ + #define _RTE_TELEMETRY_H_ +diff --git a/dpdk/lib/librte_vhost/rte_vdpa.h b/dpdk/lib/librte_vhost/rte_vdpa.h +index f074ec0c4a..1437f400bf 100644 +--- a/dpdk/lib/librte_vhost/rte_vdpa.h ++++ b/dpdk/lib/librte_vhost/rte_vdpa.h +@@ -11,6 +11,8 @@ + * Device specific vhost lib + */ + ++#include ++ + /** Maximum name length for statistics counters */ + #define RTE_VDPA_STATS_NAME_SIZE 64 + +diff --git a/dpdk/lib/librte_vhost/rte_vdpa_dev.h b/dpdk/lib/librte_vhost/rte_vdpa_dev.h +index a60183f780..bfada387b0 100644 +--- a/dpdk/lib/librte_vhost/rte_vdpa_dev.h ++++ b/dpdk/lib/librte_vhost/rte_vdpa_dev.h +@@ -8,6 +8,7 @@ + #include + + #include "rte_vhost.h" ++#include "rte_vdpa.h" + + #define RTE_VHOST_QUEUE_ALL UINT16_MAX + +diff --git a/dpdk/lib/librte_vhost/rte_vhost_crypto.h b/dpdk/lib/librte_vhost/rte_vhost_crypto.h +index c809c46a21..8531757285 100644 +--- a/dpdk/lib/librte_vhost/rte_vhost_crypto.h ++++ b/dpdk/lib/librte_vhost/rte_vhost_crypto.h +@@ -5,6 +5,14 @@ + #ifndef _VHOST_CRYPTO_H_ + #define _VHOST_CRYPTO_H_ + ++#include ++ ++#include ++ ++/* pre-declare structs to avoid including full headers */ ++struct rte_mempool; ++struct rte_crypto_op; ++ + #define VHOST_CRYPTO_MBUF_POOL_SIZE (8192) + #define VHOST_CRYPTO_MAX_BURST_SIZE (64) + #define VHOST_CRYPTO_MAX_DATA_SIZE (4096) +diff --git a/dpdk/lib/librte_vhost/vhost.c b/dpdk/lib/librte_vhost/vhost.c +index b83cf639eb..4de588d752 100644 +--- a/dpdk/lib/librte_vhost/vhost.c ++++ b/dpdk/lib/librte_vhost/vhost.c +@@ -26,6 +26,7 @@ + #include "vhost_user.h" + + struct virtio_net *vhost_devices[MAX_VHOST_DEVICE]; ++pthread_mutex_t vhost_dev_lock = PTHREAD_MUTEX_INITIALIZER; + + /* Called with iotlb_lock read-locked */ + uint64_t +@@ -645,6 +646,7 @@ vhost_new_device(void) + struct virtio_net *dev; + int i; + ++ pthread_mutex_lock(&vhost_dev_lock); + for (i = 0; i < MAX_VHOST_DEVICE; i++) { + if (vhost_devices[i] == NULL) + break; +@@ -653,6 +655,7 @@ vhost_new_device(void) + if (i == MAX_VHOST_DEVICE) { + VHOST_LOG_CONFIG(ERR, + "Failed to find a free slot for new device.\n"); ++ pthread_mutex_unlock(&vhost_dev_lock); + return -1; + } + +@@ -660,10 +663,13 @@ vhost_new_device(void) + if (dev == NULL) { + VHOST_LOG_CONFIG(ERR, + "Failed to allocate memory for new dev.\n"); ++ pthread_mutex_unlock(&vhost_dev_lock); + return -1; + } + + vhost_devices[i] = dev; ++ pthread_mutex_unlock(&vhost_dev_lock); ++ + dev->vid = i; + dev->flags = VIRTIO_DEV_BUILTIN_VIRTIO_NET; + dev->slave_req_fd = -1; +diff --git a/dpdk/lib/librte_vhost/virtio_net.c b/dpdk/lib/librte_vhost/virtio_net.c +index 6c5128665e..55bfc161b5 100644 +--- a/dpdk/lib/librte_vhost/virtio_net.c ++++ b/dpdk/lib/librte_vhost/virtio_net.c +@@ -2232,7 +2232,6 @@ vhost_reserve_avail_batch_packed(struct virtio_net *dev, + { + bool wrap = vq->avail_wrap_counter; + struct vring_packed_desc *descs = vq->desc_packed; +- struct virtio_net_hdr *hdr; + uint64_t lens[PACKED_BATCH_SIZE]; + uint64_t buf_lens[PACKED_BATCH_SIZE]; + uint32_t buf_offset = sizeof(struct virtio_net_hdr_mrg_rxbuf); +@@ -2289,13 +2288,6 @@ vhost_reserve_avail_batch_packed(struct virtio_net *dev, + ids[i] = descs[avail_idx + i].id; + } + +- if (virtio_net_with_host_offload(dev)) { +- vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) { +- hdr = (struct virtio_net_hdr *)(desc_addrs[i]); +- vhost_dequeue_offload(hdr, pkts[i]); +- } +- } +- + return 0; + + free_buf: +@@ -2313,6 +2305,7 @@ virtio_dev_tx_batch_packed(struct virtio_net *dev, + { + uint16_t avail_idx = vq->last_avail_idx; + uint32_t buf_offset = sizeof(struct virtio_net_hdr_mrg_rxbuf); ++ struct virtio_net_hdr *hdr; + uintptr_t desc_addrs[PACKED_BATCH_SIZE]; + uint16_t ids[PACKED_BATCH_SIZE]; + uint16_t i; +@@ -2329,6 +2322,13 @@ virtio_dev_tx_batch_packed(struct virtio_net *dev, + (void *)(uintptr_t)(desc_addrs[i] + buf_offset), + pkts[i]->pkt_len); + ++ if (virtio_net_with_host_offload(dev)) { ++ vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) { ++ hdr = (struct virtio_net_hdr *)(desc_addrs[i]); ++ vhost_dequeue_offload(hdr, pkts[i]); ++ } ++ } ++ + if (virtio_net_is_inorder(dev)) + vhost_shadow_dequeue_batch_packed_inorder(vq, + ids[PACKED_BATCH_SIZE - 1]); +diff --git a/dpdk/license/bsd-2-clause.txt b/dpdk/license/bsd-2-clause.txt +new file mode 100644 +index 0000000000..dfb3f1adea +--- /dev/null ++++ b/dpdk/license/bsd-2-clause.txt +@@ -0,0 +1,20 @@ ++Redistribution and use in source and binary forms, with or without ++modification, are permitted provided that the following conditions are met: ++ ++ 1. Redistributions of source code must retain the above copyright notice, ++ this list of conditions and the following disclaimer. ++ ++ 2. Redistributions in binary form must reproduce the above copyright ++ notice, this list of conditions and the following disclaimer in the ++ documentation and/or other materials provided with the distribution. ++ ++THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" ++AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE ++DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE ++FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL ++DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR ++SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER ++CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, ++OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +diff --git a/dpdk/license/isc.txt b/dpdk/license/isc.txt +new file mode 100644 +index 0000000000..34a6a760d5 +--- /dev/null ++++ b/dpdk/license/isc.txt +@@ -0,0 +1,11 @@ ++Permission to use, copy, modify, and/or distribute this software for any ++purpose with or without fee is hereby granted, provided that the above ++copyright notice and this permission notice appear in all copies. ++ ++THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH REGARD ++TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND ++FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR ++CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, ++DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER ++TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE ++OF THIS SOFTWARE. +diff --git a/dpdk/license/mit.txt b/dpdk/license/mit.txt +new file mode 100644 +index 0000000000..c4037a4605 +--- /dev/null ++++ b/dpdk/license/mit.txt +@@ -0,0 +1,18 @@ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice (including the next ++paragraph) shall be included in all copies or substantial portions of the ++Software. ++ ++THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE ++AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER ++LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, ++OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE ++SOFTWARE. +diff --git a/dpdk/usertools/dpdk-devbind.py b/dpdk/usertools/dpdk-devbind.py +index c2ede3d4df..98bd1b7e4d 100755 +--- a/dpdk/usertools/dpdk-devbind.py ++++ b/dpdk/usertools/dpdk-devbind.py +@@ -7,6 +7,7 @@ + import os + import subprocess + import argparse ++import platform + + from glob import glob + from os.path import exists, basename +@@ -107,7 +108,17 @@ def module_is_loaded(module): + + loaded_modules = sysfs_mods + +- return module in sysfs_mods ++ # add built-in modules as loaded ++ release = platform.uname().release ++ filename = os.path.join("/lib/modules/", release, "modules.builtin") ++ if os.path.exists(filename): ++ try: ++ with open(filename) as f: ++ loaded_modules += [os.path.splitext(os.path.basename(mod))[0] for mod in f] ++ except IOError: ++ print("Warning: cannot read list of built-in kernel modules") ++ ++ return module in loaded_modules + + + def check_modules(): diff --git a/include/linux/pkt_cls.h b/include/linux/pkt_cls.h index b0a5ce8bec..bc51a5767f 100644 --- a/include/linux/pkt_cls.h diff --git a/SPECS/openvswitch2.15.spec b/SPECS/openvswitch2.15.spec index 09bbe7b..dab60cd 100644 --- a/SPECS/openvswitch2.15.spec +++ b/SPECS/openvswitch2.15.spec @@ -57,7 +57,7 @@ Summary: Open vSwitch Group: System Environment/Daemons daemon/database/utilities URL: http://www.openvswitch.org/ Version: 2.15.0 -Release: 17%{?dist} +Release: 18%{?dist} # Nearly all of openvswitch is ASL 2.0. The bugtool is LGPLv2+, and the # lib/sflow*.[ch] files are SISSL @@ -697,6 +697,11 @@ exit 0 %endif %changelog +* Wed May 12 2021 Timothy Redaelli - 2.15.0-18 +- Merge tag 'b6167fabb202faa025946348f514e369dba5853b' into fast-datapath-rhel-8 [RH gerrit: 1cdc5555f9] + dpdk-20.11.1 + + * Tue May 11 2021 Open vSwitch CI - 2.15.0-17 - Merging upstream branch-2.15 [RH gerrit: 120e2a5d87] Commit list: