diff --git a/.openvswitch.metadata b/.openvswitch.metadata
index 8e5bc3c..aae65b4 100644
--- a/.openvswitch.metadata
+++ b/.openvswitch.metadata
@@ -1,6 +1,6 @@
-002450621b33c5690060345b0aac25bc2426d675 SOURCES/docutils-0.12.tar.gz
-17331a86759beba4b6635ed530ce23b0b73c0744 SOURCES/dpdk-21.11.tar.xz
-722b63cd114c21041abda7b38d7f14e46338e3e0 SOURCES/openvswitch-2.17.0.tar.gz
-8509a716f9f936526f64fb23f313c5a9baf2f123 SOURCES/pyelftools-0.27.tar.gz
-d34f96421a86004aa5d26ecf975edefd09f948b1 SOURCES/Pygments-1.4.tar.gz
-3a11f130c63b057532ca37fe49c8967d0cbae1d5 SOURCES/Sphinx-1.2.3.tar.gz
+002450621b33c5690060345b0aac25bc2426d675  SOURCES/docutils-0.12.tar.gz
+722b63cd114c21041abda7b38d7f14e46338e3e0  SOURCES/openvswitch-2.17.0.tar.gz
+8509a716f9f936526f64fb23f313c5a9baf2f123  SOURCES/pyelftools-0.27.tar.gz
+d34f96421a86004aa5d26ecf975edefd09f948b1  SOURCES/Pygments-1.4.tar.gz
+3a11f130c63b057532ca37fe49c8967d0cbae1d5  SOURCES/Sphinx-1.2.3.tar.gz
+17331a86759beba4b6635ed530ce23b0b73c0744  SOURCES/dpdk-21.11.tar.xz
diff --git a/SOURCES/openvswitch-2.17.0.patch b/SOURCES/openvswitch-2.17.0.patch
index 38d7300..bde878c 100644
--- a/SOURCES/openvswitch-2.17.0.patch
+++ b/SOURCES/openvswitch-2.17.0.patch
@@ -1,5 +1,5 @@
 diff --git a/.ci/linux-build.sh b/.ci/linux-build.sh
-index 6cd38ff3ef..c4ec93a398 100755
+index 6cd38ff3ef..2dabd3d0a4 100755
 --- a/.ci/linux-build.sh
 +++ b/.ci/linux-build.sh
 @@ -220,7 +220,7 @@ fi
@@ -11,8 +11,19 @@ index 6cd38ff3ef..c4ec93a398 100755
      fi
      install_dpdk $DPDK_VER
  fi
+@@ -244,9 +244,7 @@ fi
+ if [ "$ASAN" ]; then
+     # This will override default option configured in tests/atlocal.in.
+     export ASAN_OPTIONS='detect_leaks=1'
+-    # -O2 generates few false-positive memory leak reports in test-ovsdb
+-    # application, so lowering optimizations to -O1 here.
+-    CFLAGS_ASAN="-O1 -fno-omit-frame-pointer -fno-common -fsanitize=address"
++    CFLAGS_ASAN="-fno-omit-frame-pointer -fno-common -fsanitize=address"
+     CFLAGS_FOR_OVS="${CFLAGS_FOR_OVS} ${CFLAGS_ASAN}"
+ fi
+ 
 diff --git a/.cirrus.yml b/.cirrus.yml
-index a7ae793bc4..a4d2a5bbcd 100644
+index a7ae793bc4..29be50029b 100644
 --- a/.cirrus.yml
 +++ b/.cirrus.yml
 @@ -2,8 +2,8 @@ freebsd_build_task:
@@ -22,14 +33,45 @@ index a7ae793bc4..a4d2a5bbcd 100644
 -      image_family: freebsd-12-2-snap
 -      image_family: freebsd-11-4-snap
 +      image_family: freebsd-12-3-snap
-+      image_family: freebsd-13-0-snap
++      image_family: freebsd-13-1-snap
      cpu: 4
      memory: 4G
  
+diff --git a/.github/workflows/build-and-test.yml b/.github/workflows/build-and-test.yml
+index eac3504e48..6c9dddbb59 100644
+--- a/.github/workflows/build-and-test.yml
++++ b/.github/workflows/build-and-test.yml
+@@ -6,7 +6,7 @@ jobs:
+   build-linux:
+     env:
+       dependencies: |
+-        automake libtool gcc bc libjemalloc1 libjemalloc-dev    \
++        automake libtool gcc bc libjemalloc2 libjemalloc-dev    \
+         libssl-dev llvm-dev libelf-dev libnuma-dev libpcap-dev  \
+         ninja-build selinux-policy-dev
+       deb_dependencies: |
+@@ -25,7 +25,7 @@ jobs:
+       TESTSUITE:   ${{ matrix.testsuite }}
+ 
+     name: linux ${{ join(matrix.*, ' ') }}
+-    runs-on: ubuntu-18.04
++    runs-on: ubuntu-20.04
+     timeout-minutes: 30
+ 
+     strategy:
 diff --git a/Documentation/faq/releases.rst b/Documentation/faq/releases.rst
-index af524251ff..319ee38c7d 100644
+index af524251ff..33a0d5d2d4 100644
 --- a/Documentation/faq/releases.rst
 +++ b/Documentation/faq/releases.rst
+@@ -32,7 +32,7 @@ Q: What does it mean for an Open vSwitch release to be LTS (long-term support)?
+     If a significant bug is identified in an LTS release, we will provide an
+     updated release that includes the fix.  Releases that are not LTS may not
+     be fixed and may just be supplanted by the next major release.  The current
+-    LTS release is 2.13.x.
++    LTS release is 2.17.x.
+ 
+     For more information on the Open vSwitch release process, refer to
+     :doc:`/internals/release-process`.
 @@ -208,9 +208,9 @@ Q: What DPDK version does each Open vSwitch release work with?
      2.12.x       18.11.9
      2.13.x       19.11.10
@@ -121,9 +163,58 @@ index c10e9bfacc..7c71284f97 100644
  ---------------------
     - Userspace datapath:
 diff --git a/acinclude.m4 b/acinclude.m4
-index 0c360fd1ef..61e88105f5 100644
+index 0c360fd1ef..c981f90bc7 100644
 --- a/acinclude.m4
 +++ b/acinclude.m4
+@@ -19,7 +19,7 @@ dnl This enables automatically running all unit tests with all MFEX
+ dnl implementations.
+ AC_DEFUN([OVS_CHECK_MFEX_AUTOVALIDATOR], [
+   AC_ARG_ENABLE([mfex-default-autovalidator],
+-                [AC_HELP_STRING([--enable-mfex-default-autovalidator],
++                [AS_HELP_STRING([--enable-mfex-default-autovalidator],
+                                 [Enable MFEX autovalidator as default
+                                  miniflow_extract implementation.])],
+                 [autovalidator=yes],[autovalidator=no])
+@@ -38,7 +38,7 @@ dnl This enables automatically running all unit tests with all DPCLS
+ dnl implementations.
+ AC_DEFUN([OVS_CHECK_DPCLS_AUTOVALIDATOR], [
+   AC_ARG_ENABLE([autovalidator],
+-                [AC_HELP_STRING([--enable-autovalidator],
++                [AS_HELP_STRING([--enable-autovalidator],
+                                 [Enable DPCLS autovalidator as default subtable
+                                  search implementation.])],
+                 [autovalidator=yes],[autovalidator=no])
+@@ -57,7 +57,7 @@ dnl Set OVS DPIF default implementation at configure time for running the unit
+ dnl tests on the whole codebase without modifying tests per DPIF impl
+ AC_DEFUN([OVS_CHECK_DPIF_AVX512_DEFAULT], [
+   AC_ARG_ENABLE([dpif-default-avx512],
+-                [AC_HELP_STRING([--enable-dpif-default-avx512],
++                [AS_HELP_STRING([--enable-dpif-default-avx512],
+                                 [Enable DPIF AVX512 implementation as default.])],
+                 [dpifavx512=yes],[dpifavx512=no])
+   AC_MSG_CHECKING([whether DPIF AVX512 is default implementation])
+@@ -89,7 +89,7 @@ dnl OVS_ENABLE_WERROR
+ AC_DEFUN([OVS_ENABLE_WERROR],
+   [AC_ARG_ENABLE(
+      [Werror],
+-     [AC_HELP_STRING([--enable-Werror], [Add -Werror to CFLAGS])],
++     [AS_HELP_STRING([--enable-Werror], [Add -Werror to CFLAGS])],
+      [], [enable_Werror=no])
+    AC_CONFIG_COMMANDS_PRE(
+      [if test "X$enable_Werror" = Xyes; then
+@@ -118,10 +118,10 @@ dnl
+ dnl Configure linux kernel source tree
+ AC_DEFUN([OVS_CHECK_LINUX], [
+   AC_ARG_WITH([linux],
+-              [AC_HELP_STRING([--with-linux=/path/to/linux],
++              [AS_HELP_STRING([--with-linux=/path/to/linux],
+                               [Specify the Linux kernel build directory])])
+   AC_ARG_WITH([linux-source],
+-              [AC_HELP_STRING([--with-linux-source=/path/to/linux-source],
++              [AS_HELP_STRING([--with-linux-source=/path/to/linux-source],
+                               [Specify the Linux kernel source directory
+                                (usually figured out automatically from build
+                                directory)])])
 @@ -305,6 +305,13 @@ AC_DEFUN([OVS_CHECK_LINUX_TC], [
      ])],
      [AC_DEFINE([HAVE_TCA_SKBEDIT_FLAGS], [1],
@@ -138,7 +229,25 @@ index 0c360fd1ef..61e88105f5 100644
  ])
  
  dnl OVS_CHECK_LINUX_SCTP_CT
-@@ -1424,7 +1431,7 @@ AC_DEFUN([OVS_ENABLE_SPARSE],
+@@ -348,7 +355,7 @@ dnl
+ dnl Check both Linux kernel AF_XDP and libbpf support
+ AC_DEFUN([OVS_CHECK_LINUX_AF_XDP], [
+   AC_ARG_ENABLE([afxdp],
+-                [AC_HELP_STRING([--enable-afxdp], [Enable AF-XDP support])],
++                [AS_HELP_STRING([--enable-afxdp], [Enable AF-XDP support])],
+                 [], [enable_afxdp=no])
+   AC_MSG_CHECKING([whether AF_XDP is enabled])
+   if test "$enable_afxdp" != yes; then
+@@ -390,7 +397,7 @@ dnl
+ dnl Configure DPDK source tree
+ AC_DEFUN([OVS_CHECK_DPDK], [
+   AC_ARG_WITH([dpdk],
+-              [AC_HELP_STRING([--with-dpdk=static|shared|yes],
++              [AS_HELP_STRING([--with-dpdk=static|shared|yes],
+                               [Specify "static" or "shared" depending on the
+                               DPDK libraries to use])],
+               [have_dpdk=true])
+@@ -1424,11 +1431,11 @@ AC_DEFUN([OVS_ENABLE_SPARSE],
     : ${SPARSE=sparse}
     AC_SUBST([SPARSE])
     AC_CONFIG_COMMANDS_PRE(
@@ -147,6 +256,11 @@ index 0c360fd1ef..61e88105f5 100644
  
     AC_ARG_ENABLE(
       [sparse],
+-     [AC_HELP_STRING([--enable-sparse], [Run "sparse" by default])],
++     [AS_HELP_STRING([--enable-sparse], [Run "sparse" by default])],
+      [], [enable_sparse=no])
+    AM_CONDITIONAL([ENABLE_SPARSE_BY_DEFAULT], [test $enable_sparse = yes])])
+ 
 diff --git a/configure.ac b/configure.ac
 index 4e9bcce272..5cc3f4801e 100644
 --- a/configure.ac
@@ -363,6 +477,18 @@ index 3e0d3a66e3..5ddd655d6c 100644
  openvswitch (2.17.0-1) unstable; urgency=low
  
     * New upstream version
+diff --git a/debian/control b/debian/control
+index 6420b9d3e2..27359a297d 100644
+--- a/debian/control
++++ b/debian/control
+@@ -9,6 +9,7 @@ Build-Depends: graphviz,
+                bzip2,
+                debhelper (>= 8),
+                dh-autoreconf,
++               dh-python,
+                libssl-dev,
+                libtool,
+                openssl,
 diff --git a/debian/openvswitch-switch.install b/debian/openvswitch-switch.install
 index 6a6e9a5435..5ac3df77b1 100644
 --- a/debian/openvswitch-switch.install
@@ -384,693 +510,49957 @@ index 7fd7bc55da..088734b0dc 100644
  debian/tmp/usr/share/man/man8/ovs-ctl.8
  utilities/ovs-dpctl-top.8
  utilities/ovs-dpctl.8
-diff --git a/dpdk/lib/vhost/vhost_user.c b/dpdk/lib/vhost/vhost_user.c
-index a781346c4d..550b0ee8b5 100644
---- a/dpdk/lib/vhost/vhost_user.c
-+++ b/dpdk/lib/vhost/vhost_user.c
-@@ -1603,6 +1603,9 @@ vhost_user_get_inflight_fd(struct virtio_net **pdev,
- 	int numa_node = SOCKET_ID_ANY;
- 	void *addr;
+diff --git a/dpdk/MAINTAINERS b/dpdk/MAINTAINERS
+index 18d9edaf88..84d8e261d5 100644
+--- a/dpdk/MAINTAINERS
++++ b/dpdk/MAINTAINERS
+@@ -64,6 +64,8 @@ T: git://dpdk.org/next/dpdk-next-eventdev
+ Stable Branches
+ M: Luca Boccassi <bluca@debian.org>
+ M: Kevin Traynor <ktraynor@redhat.com>
++M: Christian Ehrhardt <christian.ehrhardt@canonical.com>
++M: Xueming Li <xuemingl@nvidia.com>
+ T: git://dpdk.org/dpdk-stable
+ 
+ Security Issues
+diff --git a/dpdk/VERSION b/dpdk/VERSION
+index b570734337..63f795c0f8 100644
+--- a/dpdk/VERSION
++++ b/dpdk/VERSION
+@@ -1 +1 @@
+-21.11.0
++21.11.2
+diff --git a/dpdk/app/dumpcap/main.c b/dpdk/app/dumpcap/main.c
+index c5fe440302..536ec64711 100644
+--- a/dpdk/app/dumpcap/main.c
++++ b/dpdk/app/dumpcap/main.c
+@@ -679,8 +679,13 @@ static void enable_pdump(struct rte_ring *r, struct rte_mempool *mp)
+ 		flags |= RTE_PDUMP_FLAG_PCAPNG;
+ 
+ 	TAILQ_FOREACH(intf, &interfaces, next) {
+-		if (promiscuous_mode)
+-			rte_eth_promiscuous_enable(intf->port);
++		if (promiscuous_mode) {
++			ret = rte_eth_promiscuous_enable(intf->port);
++			if (ret != 0)
++				fprintf(stderr,
++					"port %u set promiscuous enable failed: %d\n",
++					intf->port, ret);
++		}
+ 
+ 		ret = rte_pdump_enable_bpf(intf->port, RTE_PDUMP_ALL_QUEUES,
+ 					   flags, snaplen,
+diff --git a/dpdk/app/pdump/main.c b/dpdk/app/pdump/main.c
+index 46f9d25db0..101ac7db9a 100644
+--- a/dpdk/app/pdump/main.c
++++ b/dpdk/app/pdump/main.c
+@@ -903,11 +903,21 @@ dump_packets_core(void *arg)
+ 	return 0;
+ }
  
-+	if (validate_msg_fds(msg, 0) != 0)
-+		return RTE_VHOST_MSG_RESULT_ERR;
++static unsigned int
++get_next_core(unsigned int lcore)
++{
++	lcore = rte_get_next_lcore(lcore, 1, 0);
++	if (lcore == RTE_MAX_LCORE)
++		rte_exit(EXIT_FAILURE,
++				"Max core limit %u reached for packet capture", lcore);
++	return lcore;
++}
 +
- 	if (msg->size != sizeof(msg->payload.inflight)) {
- 		VHOST_LOG_CONFIG(ERR,
- 			"invalid get_inflight_fd message size is %d\n",
-@@ -1704,6 +1707,9 @@ vhost_user_set_inflight_fd(struct virtio_net **pdev, VhostUserMsg *msg,
- 	int fd, i;
- 	int numa_node = SOCKET_ID_ANY;
+ static inline void
+ dump_packets(void)
+ {
+ 	int i;
+-	uint32_t lcore_id = 0;
++	unsigned int lcore_id = 0;
+ 
+ 	if (!multiple_core_capture) {
+ 		printf(" core (%u), capture for (%d) tuples\n",
+@@ -933,12 +943,12 @@ dump_packets(void)
+ 		return;
+ 	}
  
-+	if (validate_msg_fds(msg, 1) != 0)
-+		return RTE_VHOST_MSG_RESULT_ERR;
-+
- 	fd = msg->fds[0];
- 	if (msg->size != sizeof(msg->payload.inflight) || fd < 0) {
- 		VHOST_LOG_CONFIG(ERR,
-@@ -2873,6 +2879,9 @@ vhost_user_check_and_alloc_queue_pair(struct virtio_net *dev,
- 	case VHOST_USER_SET_VRING_ADDR:
- 		vring_idx = msg->payload.addr.index;
- 		break;
-+	case VHOST_USER_SET_INFLIGHT_FD:
-+		vring_idx = msg->payload.inflight.num_queues - 1;
-+		break;
- 	default:
- 		return 0;
+-	lcore_id = rte_get_next_lcore(lcore_id, 1, 0);
++	lcore_id = get_next_core(lcore_id);
+ 
+ 	for (i = 0; i < num_tuples; i++) {
+ 		rte_eal_remote_launch(dump_packets_core,
+ 				&pdump_t[i], lcore_id);
+-		lcore_id = rte_get_next_lcore(lcore_id, 1, 0);
++		lcore_id = get_next_core(lcore_id);
+ 
+ 		if (rte_eal_wait_lcore(lcore_id) < 0)
+ 			rte_exit(EXIT_FAILURE, "failed to wait\n");
+diff --git a/dpdk/app/proc-info/main.c b/dpdk/app/proc-info/main.c
+index ce140aaf84..e1ccdbbaa5 100644
+--- a/dpdk/app/proc-info/main.c
++++ b/dpdk/app/proc-info/main.c
+@@ -630,7 +630,7 @@ metrics_display(int port_id)
+ 
+ 	names =  rte_malloc(NULL, sizeof(struct rte_metric_name) * len, 0);
+ 	if (names == NULL) {
+-		printf("Cannot allocate memory for metrcis names\n");
++		printf("Cannot allocate memory for metrics names\n");
+ 		rte_free(metrics);
+ 		return;
  	}
-diff --git a/include/linux/automake.mk b/include/linux/automake.mk
-index 8f063f482e..f857c7e088 100644
---- a/include/linux/automake.mk
-+++ b/include/linux/automake.mk
-@@ -2,6 +2,7 @@ noinst_HEADERS += \
- 	include/linux/netlink.h \
- 	include/linux/netfilter/nf_conntrack_sctp.h \
- 	include/linux/pkt_cls.h \
-+	include/linux/gen_stats.h \
- 	include/linux/tc_act/tc_mpls.h \
- 	include/linux/tc_act/tc_pedit.h \
- 	include/linux/tc_act/tc_skbedit.h \
-diff --git a/include/linux/gen_stats.h b/include/linux/gen_stats.h
-new file mode 100644
-index 0000000000..6fae6f727c
---- /dev/null
-+++ b/include/linux/gen_stats.h
-@@ -0,0 +1,81 @@
-+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-+#ifndef __LINUX_GEN_STATS_WRAPPER_H
-+#define __LINUX_GEN_STATS_WRAPPER_H 1
+@@ -1109,7 +1109,7 @@ show_tm(void)
+ 				caplevel.n_nodes_max,
+ 				caplevel.n_nodes_nonleaf_max,
+ 				caplevel.n_nodes_leaf_max);
+-			printf("\t  -- indetical: non leaf %u leaf %u\n",
++			printf("\t  -- identical: non leaf %u leaf %u\n",
+ 				caplevel.non_leaf_nodes_identical,
+ 				caplevel.leaf_nodes_identical);
+ 
+@@ -1263,7 +1263,7 @@ show_ring(char *name)
+ 			printf("  - Name (%s) on socket (%d)\n"
+ 				"  - flags:\n"
+ 				"\t  -- Single Producer Enqueue (%u)\n"
+-				"\t  -- Single Consmer Dequeue (%u)\n",
++				"\t  -- Single Consumer Dequeue (%u)\n",
+ 				ptr->name,
+ 				ptr->memzone->socket_id,
+ 				ptr->flags & RING_F_SP_ENQ,
+@@ -1504,10 +1504,10 @@ main(int argc, char **argv)
+ 	if (nb_ports == 0)
+ 		rte_exit(EXIT_FAILURE, "No Ethernet ports - bye\n");
+ 
+-	/* If no port mask was specified, then show non-owned ports */
++	/* If no port mask was specified, then show all non-owned ports */
+ 	if (enabled_port_mask == 0) {
+ 		RTE_ETH_FOREACH_DEV(i)
+-			enabled_port_mask = 1ul << i;
++			enabled_port_mask |= 1ul << i;
+ 	}
+ 
+ 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
+diff --git a/dpdk/app/test-acl/main.c b/dpdk/app/test-acl/main.c
+index c2de18770d..06e3847ab9 100644
+--- a/dpdk/app/test-acl/main.c
++++ b/dpdk/app/test-acl/main.c
+@@ -386,8 +386,8 @@ parse_cb_ipv4_trace(char *str, struct ipv4_5tuple *v)
+ }
+ 
+ /*
+- * Parses IPV6 address, exepcts the following format:
+- * XXXX:XXXX:XXXX:XXXX:XXXX:XXXX:XXXX:XXXX (where X - is a hexedecimal digit).
++ * Parse IPv6 address, expects the following format:
++ * XXXX:XXXX:XXXX:XXXX:XXXX:XXXX:XXXX:XXXX (where X is a hexadecimal digit).
+  */
+ static int
+ parse_ipv6_addr(const char *in, const char **end, uint32_t v[IPV6_ADDR_U32],
+@@ -994,7 +994,7 @@ print_usage(const char *prgname)
+ 			"should be either 1 or multiple of %zu, "
+ 			"but not greater then %u]\n"
+ 		"[--" OPT_MAX_SIZE
+-			"=<size limit (in bytes) for runtime ACL strucutures> "
++			"=<size limit (in bytes) for runtime ACL structures> "
+ 			"leave 0 for default behaviour]\n"
+ 		"[--" OPT_ITER_NUM "=<number of iterations to perform>]\n"
+ 		"[--" OPT_VERBOSE "=<verbose level>]\n"
+diff --git a/dpdk/app/test-compress-perf/comp_perf_test_cyclecount.c b/dpdk/app/test-compress-perf/comp_perf_test_cyclecount.c
+index da55b02b74..a3f6404eb2 100644
+--- a/dpdk/app/test-compress-perf/comp_perf_test_cyclecount.c
++++ b/dpdk/app/test-compress-perf/comp_perf_test_cyclecount.c
+@@ -175,16 +175,17 @@ main_loop(struct cperf_cyclecount_ctx *ctx, enum rte_comp_xform_type type)
+ 
+ 	/* one array for both enqueue and dequeue */
+ 	ops = rte_zmalloc_socket(NULL,
+-		2 * mem->total_bufs * sizeof(struct rte_comp_op *),
++		(test_data->burst_sz + mem->total_bufs) *
++		sizeof(struct rte_comp_op *),
+ 		0, rte_socket_id());
+ 
+ 	if (ops == NULL) {
+ 		RTE_LOG(ERR, USER1,
+-			"Can't allocate memory for ops strucures\n");
++			"Can't allocate memory for ops structures\n");
+ 		return -1;
+ 	}
+ 
+-	deq_ops = &ops[mem->total_bufs];
++	deq_ops = &ops[test_data->burst_sz];
+ 
+ 	if (type == RTE_COMP_COMPRESS) {
+ 		xform = (struct rte_comp_xform) {
+@@ -273,7 +274,7 @@ main_loop(struct cperf_cyclecount_ctx *ctx, enum rte_comp_xform_type type)
+ 			/* Allocate compression operations */
+ 			if (ops_needed && rte_mempool_get_bulk(
+ 						mem->op_pool,
+-						(void **)ops,
++						(void **)&ops[ops_unused],
+ 						ops_needed) != 0) {
+ 				RTE_LOG(ERR, USER1,
+ 				      "Could not allocate enough operations\n");
+diff --git a/dpdk/app/test-compress-perf/comp_perf_test_throughput.c b/dpdk/app/test-compress-perf/comp_perf_test_throughput.c
+index d3dff070b0..4569599eb9 100644
+--- a/dpdk/app/test-compress-perf/comp_perf_test_throughput.c
++++ b/dpdk/app/test-compress-perf/comp_perf_test_throughput.c
+@@ -72,7 +72,7 @@ main_loop(struct cperf_benchmark_ctx *ctx, enum rte_comp_xform_type type)
+ 
+ 	if (ops == NULL) {
+ 		RTE_LOG(ERR, USER1,
+-			"Can't allocate memory for ops strucures\n");
++			"Can't allocate memory for ops structures\n");
+ 		return -1;
+ 	}
+ 
+diff --git a/dpdk/app/test-compress-perf/comp_perf_test_verify.c b/dpdk/app/test-compress-perf/comp_perf_test_verify.c
+index f6e21368e8..7d06029488 100644
+--- a/dpdk/app/test-compress-perf/comp_perf_test_verify.c
++++ b/dpdk/app/test-compress-perf/comp_perf_test_verify.c
+@@ -75,7 +75,7 @@ main_loop(struct cperf_verify_ctx *ctx, enum rte_comp_xform_type type)
+ 
+ 	if (ops == NULL) {
+ 		RTE_LOG(ERR, USER1,
+-			"Can't allocate memory for ops strucures\n");
++			"Can't allocate memory for ops structures\n");
+ 		return -1;
+ 	}
+ 
+diff --git a/dpdk/app/test-compress-perf/main.c b/dpdk/app/test-compress-perf/main.c
+index cc9951a9b1..ce9e80bedc 100644
+--- a/dpdk/app/test-compress-perf/main.c
++++ b/dpdk/app/test-compress-perf/main.c
+@@ -67,7 +67,7 @@ comp_perf_check_capabilities(struct comp_test_data *test_data, uint8_t cdev_id)
+ 
+ 	uint64_t comp_flags = cap->comp_feature_flags;
+ 
+-	/* Huffman enconding */
++	/* Huffman encoding */
+ 	if (test_data->huffman_enc == RTE_COMP_HUFFMAN_FIXED &&
+ 			(comp_flags & RTE_COMP_FF_HUFFMAN_FIXED) == 0) {
+ 		RTE_LOG(ERR, USER1,
+@@ -168,7 +168,7 @@ comp_perf_initialize_compressdev(struct comp_test_data *test_data,
+ 		cdev_id = enabled_cdevs[i];
+ 
+ 		struct rte_compressdev_info cdev_info;
+-		uint8_t socket_id = rte_compressdev_socket_id(cdev_id);
++		int socket_id = rte_compressdev_socket_id(cdev_id);
+ 
+ 		rte_compressdev_info_get(cdev_id, &cdev_info);
+ 		if (cdev_info.max_nb_queue_pairs &&
+@@ -194,6 +194,7 @@ comp_perf_initialize_compressdev(struct comp_test_data *test_data,
+ 			.max_nb_priv_xforms = NUM_MAX_XFORMS,
+ 			.max_nb_streams = 0
+ 		};
++		test_data->nb_qps = config.nb_queue_pairs;
+ 
+ 		if (rte_compressdev_configure(cdev_id, &config) < 0) {
+ 			RTE_LOG(ERR, USER1, "Device configuration failed\n");
+diff --git a/dpdk/app/test-crypto-perf/cperf_test_pmd_cyclecount.c b/dpdk/app/test-crypto-perf/cperf_test_pmd_cyclecount.c
+index ba1f104f72..5842f29d43 100644
+--- a/dpdk/app/test-crypto-perf/cperf_test_pmd_cyclecount.c
++++ b/dpdk/app/test-crypto-perf/cperf_test_pmd_cyclecount.c
+@@ -334,7 +334,7 @@ pmd_cyclecount_bench_burst_sz(
+ 	 * queue, so we never get any failed enqs unless the driver won't accept
+ 	 * the exact number of descriptors we requested, or the driver won't
+ 	 * wrap around the end of the TX ring. However, since we're only
+-	 * dequeueing once we've filled up the queue, we have to benchmark it
++	 * dequeuing once we've filled up the queue, we have to benchmark it
+ 	 * piecemeal and then average out the results.
+ 	 */
+ 	cur_op = 0;
+diff --git a/dpdk/app/test-eventdev/evt_options.c b/dpdk/app/test-eventdev/evt_options.c
+index 753a7dbd7d..4ae44801da 100644
+--- a/dpdk/app/test-eventdev/evt_options.c
++++ b/dpdk/app/test-eventdev/evt_options.c
+@@ -336,7 +336,7 @@ usage(char *program)
+ 		"\t--deq_tmo_nsec     : global dequeue timeout\n"
+ 		"\t--prod_type_ethdev : use ethernet device as producer.\n"
+ 		"\t--prod_type_timerdev : use event timer device as producer.\n"
+-		"\t                     expity_nsec would be the timeout\n"
++		"\t                     expiry_nsec would be the timeout\n"
+ 		"\t                     in ns.\n"
+ 		"\t--prod_type_timerdev_burst : use timer device as producer\n"
+ 		"\t                             burst mode.\n"
+diff --git a/dpdk/app/test-eventdev/test_order_common.c b/dpdk/app/test-eventdev/test_order_common.c
+index ff7813f9c2..603e7c9178 100644
+--- a/dpdk/app/test-eventdev/test_order_common.c
++++ b/dpdk/app/test-eventdev/test_order_common.c
+@@ -253,7 +253,7 @@ void
+ order_opt_dump(struct evt_options *opt)
+ {
+ 	evt_dump_producer_lcores(opt);
+-	evt_dump("nb_wrker_lcores", "%d", evt_nr_active_lcores(opt->wlcores));
++	evt_dump("nb_worker_lcores", "%d", evt_nr_active_lcores(opt->wlcores));
+ 	evt_dump_worker_lcores(opt);
+ 	evt_dump("nb_evdev_ports", "%d", order_nb_event_ports(opt));
+ }
+diff --git a/dpdk/app/test-fib/main.c b/dpdk/app/test-fib/main.c
+index ecd420116a..830c32cc44 100644
+--- a/dpdk/app/test-fib/main.c
++++ b/dpdk/app/test-fib/main.c
+@@ -624,7 +624,7 @@ print_usage(void)
+ 		"(if -f is not specified)>]\n"
+ 		"[-r <percentage ratio of random ip's to lookup"
+ 		"(if -t is not specified)>]\n"
+-		"[-c <do comarison with LPM library>]\n"
++		"[-c <do comparison with LPM library>]\n"
+ 		"[-6 <do tests with ipv6 (default ipv4)>]\n"
+ 		"[-s <shuffle randomly generated routes>]\n"
+ 		"[-a <check nexthops for all ipv4 address space"
+@@ -641,7 +641,7 @@ print_usage(void)
+ 		"[-g <number of tbl8's for dir24_8 or trie FIBs>]\n"
+ 		"[-w <path to the file to dump routing table>]\n"
+ 		"[-u <path to the file to dump ip's for lookup>]\n"
+-		"[-v <type of loookup function:"
++		"[-v <type of lookup function:"
+ 		"\ts1, s2, s3 (3 types of scalar), v (vector) -"
+ 		" for DIR24_8 based FIB\n"
+ 		"\ts, v - for TRIE based ipv6 FIB>]\n",
+@@ -711,6 +711,10 @@ parse_opts(int argc, char **argv)
+ 				print_usage();
+ 				rte_exit(-EINVAL, "Invalid option -n\n");
+ 			}
++
++			if (config.nb_routes < config.print_fract)
++				config.print_fract = config.nb_routes;
++
+ 			break;
+ 		case 'd':
+ 			distrib_string = optarg;
+@@ -1242,6 +1246,10 @@ main(int argc, char **argv)
+ 		config.nb_routes = 0;
+ 		while (fgets(line, sizeof(line), fr) != NULL)
+ 			config.nb_routes++;
++
++		if (config.nb_routes < config.print_fract)
++			config.print_fract = config.nb_routes;
++
+ 		rewind(fr);
+ 	}
+ 
+diff --git a/dpdk/app/test-flow-perf/config.h b/dpdk/app/test-flow-perf/config.h
+index 0db2254bd1..29b63298e0 100644
+--- a/dpdk/app/test-flow-perf/config.h
++++ b/dpdk/app/test-flow-perf/config.h
+@@ -28,7 +28,7 @@
+ #define PORT_ID_DST 1
+ #define TEID_VALUE 1
+ 
+-/* Flow items/acctions max size */
++/* Flow items/actions max size */
+ #define MAX_ITEMS_NUM 32
+ #define MAX_ACTIONS_NUM 32
+ #define MAX_ATTRS_NUM 16
+diff --git a/dpdk/app/test-flow-perf/main.c b/dpdk/app/test-flow-perf/main.c
+index 11f1ee0e1e..f375097028 100644
+--- a/dpdk/app/test-flow-perf/main.c
++++ b/dpdk/app/test-flow-perf/main.c
+@@ -16,6 +16,7 @@
+  * gives packet per second measurement.
+  */
+ 
++#include <locale.h>
+ #include <stdio.h>
+ #include <stdlib.h>
+ #include <string.h>
+@@ -1519,7 +1520,7 @@ dump_used_cpu_time(const char *item,
+ 	 * threads time.
+ 	 *
+ 	 * Throughput: total count of rte rules divided
+-	 * over the average of the time cosumed by all
++	 * over the average of the time consumed by all
+ 	 * threads time.
+ 	 */
+ 	double insertion_latency_time;
+@@ -1713,36 +1714,6 @@ do_tx(struct lcore_info *li, uint16_t cnt, uint16_t tx_port,
+ 		rte_pktmbuf_free(li->pkts[i]);
+ }
+ 
+-/*
+- * Method to convert numbers into pretty numbers that easy
+- * to read. The design here is to add comma after each three
+- * digits and set all of this inside buffer.
+- *
+- * For example if n = 1799321, the output will be
+- * 1,799,321 after this method which is easier to read.
+- */
+-static char *
+-pretty_number(uint64_t n, char *buf)
+-{
+-	char p[6][4];
+-	int i = 0;
+-	int off = 0;
+-
+-	while (n > 1000) {
+-		sprintf(p[i], "%03d", (int)(n % 1000));
+-		n /= 1000;
+-		i += 1;
+-	}
+-
+-	sprintf(p[i++], "%d", (int)n);
+-
+-	while (i--)
+-		off += sprintf(buf + off, "%s,", p[i]);
+-	buf[strlen(buf) - 1] = '\0';
+-
+-	return buf;
+-}
+-
+ static void
+ packet_per_second_stats(void)
+ {
+@@ -1764,7 +1735,6 @@ packet_per_second_stats(void)
+ 		uint64_t total_rx_pkts = 0;
+ 		uint64_t total_tx_drops = 0;
+ 		uint64_t tx_delta, rx_delta, drops_delta;
+-		char buf[3][32];
+ 		int nr_valid_core = 0;
+ 
+ 		sleep(1);
+@@ -1789,10 +1759,8 @@ packet_per_second_stats(void)
+ 			tx_delta    = li->tx_pkts  - oli->tx_pkts;
+ 			rx_delta    = li->rx_pkts  - oli->rx_pkts;
+ 			drops_delta = li->tx_drops - oli->tx_drops;
+-			printf("%6d %16s %16s %16s\n", i,
+-				pretty_number(tx_delta,    buf[0]),
+-				pretty_number(drops_delta, buf[1]),
+-				pretty_number(rx_delta,    buf[2]));
++			printf("%6d %'16"PRId64" %'16"PRId64" %'16"PRId64"\n",
++				i, tx_delta, drops_delta, rx_delta);
+ 
+ 			total_tx_pkts  += tx_delta;
+ 			total_rx_pkts  += rx_delta;
+@@ -1803,10 +1771,9 @@ packet_per_second_stats(void)
+ 		}
+ 
+ 		if (nr_valid_core > 1) {
+-			printf("%6s %16s %16s %16s\n", "total",
+-				pretty_number(total_tx_pkts,  buf[0]),
+-				pretty_number(total_tx_drops, buf[1]),
+-				pretty_number(total_rx_pkts,  buf[2]));
++			printf("%6s %'16"PRId64" %'16"PRId64" %'16"PRId64"\n",
++				"total", total_tx_pkts, total_tx_drops,
++				total_rx_pkts);
+ 			nr_lines += 1;
+ 		}
+ 
+@@ -2139,6 +2106,9 @@ main(int argc, char **argv)
+ 	if (argc > 1)
+ 		args_parse(argc, argv);
+ 
++	/* For more fancy, localised integer formatting. */
++	setlocale(LC_NUMERIC, "");
++
+ 	init_port();
+ 
+ 	nb_lcores = rte_lcore_count();
+diff --git a/dpdk/app/test-pmd/5tswap.c b/dpdk/app/test-pmd/5tswap.c
+index 629d3e0d31..f041a5e1d5 100644
+--- a/dpdk/app/test-pmd/5tswap.c
++++ b/dpdk/app/test-pmd/5tswap.c
+@@ -185,9 +185,22 @@ pkt_burst_5tuple_swap(struct fwd_stream *fs)
+ 	get_end_cycles(fs, start_tsc);
+ }
+ 
++static void
++stream_init_5tuple_swap(struct fwd_stream *fs)
++{
++	bool rx_stopped, tx_stopped;
 +
-+#if defined(__KERNEL__) || defined(HAVE_TCA_STATS_PKT64)
-+#include_next <linux/gen_stats.h>
-+#else
-+#include <linux/types.h>
++	rx_stopped = ports[fs->rx_port].rxq[fs->rx_queue].state ==
++						RTE_ETH_QUEUE_STATE_STOPPED;
++	tx_stopped = ports[fs->tx_port].txq[fs->tx_queue].state ==
++						RTE_ETH_QUEUE_STATE_STOPPED;
++	fs->disabled = rx_stopped || tx_stopped;
++}
 +
-+enum {
-+	TCA_STATS_UNSPEC,
-+	TCA_STATS_BASIC,
-+	TCA_STATS_RATE_EST,
-+	TCA_STATS_QUEUE,
-+	TCA_STATS_APP,
-+	TCA_STATS_RATE_EST64,
-+	TCA_STATS_PAD,
-+	TCA_STATS_BASIC_HW,
-+	TCA_STATS_PKT64,
-+	__TCA_STATS_MAX,
-+};
-+#define TCA_STATS_MAX (__TCA_STATS_MAX - 1)
+ struct fwd_engine five_tuple_swap_fwd_engine = {
+ 	.fwd_mode_name  = "5tswap",
+ 	.port_fwd_begin = NULL,
+ 	.port_fwd_end   = NULL,
++	.stream_init    = stream_init_5tuple_swap,
+ 	.packet_fwd     = pkt_burst_5tuple_swap,
+ };
+diff --git a/dpdk/app/test-pmd/cmd_flex_item.c b/dpdk/app/test-pmd/cmd_flex_item.c
+index 908bcb3f47..3e54724237 100644
+--- a/dpdk/app/test-pmd/cmd_flex_item.c
++++ b/dpdk/app/test-pmd/cmd_flex_item.c
+@@ -20,6 +20,8 @@
+ struct flex_item *flex_items[RTE_MAX_ETHPORTS][FLEX_MAX_PARSERS_NUM];
+ struct flex_pattern flex_patterns[FLEX_MAX_PATTERNS_NUM];
+ 
++#ifdef RTE_HAS_JANSSON
++
+ static struct flex_item *
+ flex_parser_fetch(uint16_t port_id, uint16_t flex_id)
+ {
+@@ -34,7 +36,6 @@ flex_parser_fetch(uint16_t port_id, uint16_t flex_id)
+ 	return flex_items[port_id][flex_id];
+ }
+ 
+-#ifdef RTE_HAS_JANSSON
+ static __rte_always_inline bool
+ match_strkey(const char *key, const char *pattern)
+ {
+@@ -133,7 +134,8 @@ flex_link_item_parse(const char *src, struct rte_flow_item *item)
+ 	struct rte_flow_item *pattern;
+ 	struct rte_flow_action *actions;
+ 
+-	sprintf(flow_rule, "flow create 0 pattern %s / end", src);
++	sprintf(flow_rule,
++		"flow create 0 pattern %s / end actions drop / end", src);
+ 	src = flow_rule;
+ 	ret = flow_parse(src, (void *)data, sizeof(data),
+ 			 &attr, &pattern, &actions);
+@@ -368,23 +370,12 @@ flex_item_create(portid_t port_id, uint16_t flex_id, const char *filename)
+ 		free(fp);
+ }
+ 
+-#else /* RTE_HAS_JANSSON */
+-void flex_item_create(__rte_unused portid_t port_id,
+-		      __rte_unused uint16_t flex_id,
+-		      __rte_unused const char *filename)
+-{
+-	printf("cannot create flex item - no JSON library configured\n");
+-}
+-#endif /* RTE_HAS_JANSSON */
+-
+ void
+ flex_item_destroy(portid_t port_id, uint16_t flex_id)
+ {
+ 	int ret;
+ 	struct rte_flow_error error;
+ 	struct flex_item *fp = flex_parser_fetch(port_id, flex_id);
+-	if (!flex_id)
+-		return;
+ 	if (fp == FLEX_PARSER_ERR) {
+ 		printf("Bad parameters: port_id=%u flex_id=%u\n",
+ 		       port_id, flex_id);
+@@ -405,6 +396,22 @@ flex_item_destroy(portid_t port_id, uint16_t flex_id)
+ 	}
+ }
+ 
++#else /* RTE_HAS_JANSSON */
++void flex_item_create(__rte_unused portid_t port_id,
++		      __rte_unused uint16_t flex_id,
++		      __rte_unused const char *filename)
++{
++	printf("cannot create flex item - no JSON library configured\n");
++}
 +
-+/**
-+ * struct gnet_stats_basic - byte/packet throughput statistics
-+ * @bytes: number of seen bytes
-+ * @packets: number of seen packets
-+ */
-+struct gnet_stats_basic {
-+	__u64	bytes;
-+	__u32	packets;
-+};
++void
++flex_item_destroy(__rte_unused portid_t port_id, __rte_unused uint16_t flex_id)
++{
 +
-+/**
-+ * struct gnet_stats_rate_est - rate estimator
-+ * @bps: current byte rate
-+ * @pps: current packet rate
-+ */
-+struct gnet_stats_rate_est {
-+	__u32	bps;
-+	__u32	pps;
-+};
++}
 +
-+/**
-+ * struct gnet_stats_rate_est64 - rate estimator
-+ * @bps: current byte rate
-+ * @pps: current packet rate
-+ */
-+struct gnet_stats_rate_est64 {
-+	__u64	bps;
-+	__u64	pps;
-+};
++#endif /* RTE_HAS_JANSSON */
 +
-+/**
-+ * struct gnet_stats_queue - queuing statistics
-+ * @qlen: queue length
-+ * @backlog: backlog size of queue
-+ * @drops: number of dropped packets
-+ * @requeues: number of requeues
-+ * @overlimits: number of enqueues over the limit
-+ */
-+struct gnet_stats_queue {
-+	__u32	qlen;
-+	__u32	backlog;
-+	__u32	drops;
-+	__u32	requeues;
-+	__u32	overlimits;
-+};
+ void
+ port_flex_item_flush(portid_t port_id)
+ {
+diff --git a/dpdk/app/test-pmd/cmdline.c b/dpdk/app/test-pmd/cmdline.c
+index 6e10afeedd..d9bf0eb3b3 100644
+--- a/dpdk/app/test-pmd/cmdline.c
++++ b/dpdk/app/test-pmd/cmdline.c
+@@ -561,7 +561,7 @@ static void cmd_help_long_parsed(void *parsed_result,
+ 			"    Set the option to enable display of RX and TX bursts.\n"
+ 
+ 			"set port (port_id) vf (vf_id) rx|tx on|off\n"
+-			"    Enable/Disable a VF receive/tranmit from a port\n\n"
++			"    Enable/Disable a VF receive/transmit from a port\n\n"
+ 
+ 			"set port (port_id) vf (vf_id) rxmode (AUPE|ROPE|BAM"
+ 			"|MPE) (on|off)\n"
+@@ -2045,10 +2045,6 @@ cmd_config_mtu_parsed(void *parsed_result,
+ {
+ 	struct cmd_config_mtu_result *res = parsed_result;
+ 
+-	if (res->value < RTE_ETHER_MIN_LEN) {
+-		fprintf(stderr, "mtu cannot be less than %d\n", RTE_ETHER_MIN_LEN);
+-		return;
+-	}
+ 	port_mtu_set(res->port_id, res->value);
+ }
+ 
+@@ -2651,8 +2647,10 @@ cmd_config_rxtx_queue_parsed(void *parsed_result,
+ 			__rte_unused void *data)
+ {
+ 	struct cmd_config_rxtx_queue *res = parsed_result;
++	struct rte_port *port;
+ 	uint8_t isrx;
+ 	uint8_t isstart;
++	uint8_t *state;
+ 	int ret = 0;
+ 
+ 	if (test_done == 0) {
+@@ -2700,8 +2698,15 @@ cmd_config_rxtx_queue_parsed(void *parsed_result,
+ 	else
+ 		ret = rte_eth_dev_tx_queue_stop(res->portid, res->qid);
+ 
+-	if (ret == -ENOTSUP)
++	if (ret == -ENOTSUP) {
+ 		fprintf(stderr, "Function not supported in PMD\n");
++		return;
++	}
++
++	port = &ports[res->portid];
++	state = isrx ? &port->rxq[res->qid].state : &port->txq[res->qid].state;
++	*state = isstart ? RTE_ETH_QUEUE_STATE_STARTED :
++			   RTE_ETH_QUEUE_STATE_STOPPED;
+ }
+ 
+ cmdline_parse_token_string_t cmd_config_rxtx_queue_port =
+@@ -2770,11 +2775,11 @@ cmd_config_deferred_start_rxtx_queue_parsed(void *parsed_result,
+ 
+ 	ison = !strcmp(res->state, "on");
+ 
+-	if (isrx && port->rx_conf[res->qid].rx_deferred_start != ison) {
+-		port->rx_conf[res->qid].rx_deferred_start = ison;
++	if (isrx && port->rxq[res->qid].conf.rx_deferred_start != ison) {
++		port->rxq[res->qid].conf.rx_deferred_start = ison;
+ 		needreconfig = 1;
+-	} else if (!isrx && port->tx_conf[res->qid].tx_deferred_start != ison) {
+-		port->tx_conf[res->qid].tx_deferred_start = ison;
++	} else if (!isrx && port->txq[res->qid].conf.tx_deferred_start != ison) {
++		port->txq[res->qid].conf.tx_deferred_start = ison;
+ 		needreconfig = 1;
+ 	}
+ 
+@@ -2892,7 +2897,7 @@ cmd_setup_rxtx_queue_parsed(
+ 				     res->qid,
+ 				     port->nb_rx_desc[res->qid],
+ 				     socket_id,
+-				     &port->rx_conf[res->qid],
++				     &port->rxq[res->qid].conf,
+ 				     mp);
+ 		if (ret)
+ 			fprintf(stderr, "Failed to setup RX queue\n");
+@@ -2910,7 +2915,7 @@ cmd_setup_rxtx_queue_parsed(
+ 					     res->qid,
+ 					     port->nb_tx_desc[res->qid],
+ 					     socket_id,
+-					     &port->tx_conf[res->qid]);
++					     &port->txq[res->qid].conf);
+ 		if (ret)
+ 			fprintf(stderr, "Failed to setup TX queue\n");
+ 	}
+@@ -3120,7 +3125,7 @@ showport_parse_reta_config(struct rte_eth_rss_reta_entry64 *conf,
+ 		return -1;
+ 	}
+ 	for (i = 0; i < ret; i++)
+-		conf[i].mask = (uint64_t)strtoul(str_fld[i], &end, 0);
++		conf[i].mask = (uint64_t)strtoull(str_fld[i], &end, 0);
+ 
+ 	return 0;
+ }
+@@ -4686,7 +4691,7 @@ cmd_config_queue_tx_offloads(struct rte_port *port)
+ 
+ 	/* Apply queue tx offloads configuration */
+ 	for (k = 0; k < port->dev_info.max_tx_queues; k++)
+-		port->tx_conf[k].offloads =
++		port->txq[k].conf.offloads =
+ 			port->dev_conf.txmode.offloads;
+ }
+ 
+@@ -5915,6 +5920,19 @@ static void cmd_set_bonding_mode_parsed(void *parsed_result,
+ {
+ 	struct cmd_set_bonding_mode_result *res = parsed_result;
+ 	portid_t port_id = res->port_id;
++	struct rte_port *port = &ports[port_id];
++
++	/*
++	 * Bonding mode changed means resources of device changed, like whether
++	 * started rte timer or not. Device should be restarted when resources
++	 * of device changed.
++	 */
++	if (port->port_status != RTE_PORT_STOPPED) {
++		fprintf(stderr,
++			"\t Error: Can't set bonding mode when port %d is not stopped\n",
++			port_id);
++		return;
++	}
+ 
+ 	/* Set the bonding mode for the relevant port. */
+ 	if (0 != rte_eth_bond_mode_set(port_id, res->value))
+@@ -6651,6 +6669,7 @@ static void cmd_create_bonded_device_parsed(void *parsed_result,
+ 				"Failed to enable promiscuous mode for port %u: %s - ignore\n",
+ 				port_id, rte_strerror(-ret));
+ 
++		ports[port_id].bond_flag = 1;
+ 		ports[port_id].need_setup = 0;
+ 		ports[port_id].port_status = RTE_PORT_STOPPED;
+ 	}
+@@ -8754,6 +8773,7 @@ static void cmd_quit_parsed(__rte_unused void *parsed_result,
+ 			    __rte_unused void *data)
+ {
+ 	cmdline_quit(cl);
++	cl_quit = 1;
+ }
+ 
+ cmdline_parse_token_string_t cmd_quit_quit =
+@@ -9273,6 +9293,7 @@ cmd_set_vf_rxmode_parsed(void *parsed_result,
+ 	}
+ 
+ 	RTE_SET_USED(is_on);
++	RTE_SET_USED(vf_rxmode);
+ 
+ #ifdef RTE_NET_IXGBE
+ 	if (ret == -ENOTSUP)
+@@ -16068,7 +16089,7 @@ cmd_rx_offload_get_configuration_parsed(
+ 
+ 	nb_rx_queues = dev_info.nb_rx_queues;
+ 	for (q = 0; q < nb_rx_queues; q++) {
+-		queue_offloads = port->rx_conf[q].offloads;
++		queue_offloads = port->rxq[q].conf.offloads;
+ 		printf("  Queue[%2d] :", q);
+ 		print_rx_offloads(queue_offloads);
+ 		printf("\n");
+@@ -16188,11 +16209,11 @@ cmd_config_per_port_rx_offload_parsed(void *parsed_result,
+ 	if (!strcmp(res->on_off, "on")) {
+ 		port->dev_conf.rxmode.offloads |= single_offload;
+ 		for (q = 0; q < nb_rx_queues; q++)
+-			port->rx_conf[q].offloads |= single_offload;
++			port->rxq[q].conf.offloads |= single_offload;
+ 	} else {
+ 		port->dev_conf.rxmode.offloads &= ~single_offload;
+ 		for (q = 0; q < nb_rx_queues; q++)
+-			port->rx_conf[q].offloads &= ~single_offload;
++			port->rxq[q].conf.offloads &= ~single_offload;
+ 	}
+ 
+ 	cmd_reconfig_device_queue(port_id, 1, 1);
+@@ -16298,9 +16319,9 @@ cmd_config_per_queue_rx_offload_parsed(void *parsed_result,
+ 	}
+ 
+ 	if (!strcmp(res->on_off, "on"))
+-		port->rx_conf[queue_id].offloads |= single_offload;
++		port->rxq[queue_id].conf.offloads |= single_offload;
+ 	else
+-		port->rx_conf[queue_id].offloads &= ~single_offload;
++		port->rxq[queue_id].conf.offloads &= ~single_offload;
+ 
+ 	cmd_reconfig_device_queue(port_id, 1, 1);
+ }
+@@ -16487,7 +16508,7 @@ cmd_tx_offload_get_configuration_parsed(
+ 
+ 	nb_tx_queues = dev_info.nb_tx_queues;
+ 	for (q = 0; q < nb_tx_queues; q++) {
+-		queue_offloads = port->tx_conf[q].offloads;
++		queue_offloads = port->txq[q].conf.offloads;
+ 		printf("  Queue[%2d] :", q);
+ 		print_tx_offloads(queue_offloads);
+ 		printf("\n");
+@@ -16611,11 +16632,11 @@ cmd_config_per_port_tx_offload_parsed(void *parsed_result,
+ 	if (!strcmp(res->on_off, "on")) {
+ 		port->dev_conf.txmode.offloads |= single_offload;
+ 		for (q = 0; q < nb_tx_queues; q++)
+-			port->tx_conf[q].offloads |= single_offload;
++			port->txq[q].conf.offloads |= single_offload;
+ 	} else {
+ 		port->dev_conf.txmode.offloads &= ~single_offload;
+ 		for (q = 0; q < nb_tx_queues; q++)
+-			port->tx_conf[q].offloads &= ~single_offload;
++			port->txq[q].conf.offloads &= ~single_offload;
+ 	}
+ 
+ 	cmd_reconfig_device_queue(port_id, 1, 1);
+@@ -16724,9 +16745,9 @@ cmd_config_per_queue_tx_offload_parsed(void *parsed_result,
+ 	}
+ 
+ 	if (!strcmp(res->on_off, "on"))
+-		port->tx_conf[queue_id].offloads |= single_offload;
++		port->txq[queue_id].conf.offloads |= single_offload;
+ 	else
+-		port->tx_conf[queue_id].offloads &= ~single_offload;
++		port->txq[queue_id].conf.offloads &= ~single_offload;
+ 
+ 	cmd_reconfig_device_queue(port_id, 1, 1);
+ }
+@@ -17829,6 +17850,7 @@ cmdline_parse_ctx_t main_ctx[] = {
+ 	(cmdline_parse_inst_t *)&cmd_show_port_meter_cap,
+ 	(cmdline_parse_inst_t *)&cmd_add_port_meter_profile_srtcm,
+ 	(cmdline_parse_inst_t *)&cmd_add_port_meter_profile_trtcm,
++	(cmdline_parse_inst_t *)&cmd_add_port_meter_profile_trtcm_rfc4115,
+ 	(cmdline_parse_inst_t *)&cmd_del_port_meter_profile,
+ 	(cmdline_parse_inst_t *)&cmd_create_port_meter,
+ 	(cmdline_parse_inst_t *)&cmd_enable_port_meter,
+diff --git a/dpdk/app/test-pmd/cmdline_flow.c b/dpdk/app/test-pmd/cmdline_flow.c
+index bbe3dc0115..5c4544a753 100644
+--- a/dpdk/app/test-pmd/cmdline_flow.c
++++ b/dpdk/app/test-pmd/cmdline_flow.c
+@@ -2162,7 +2162,7 @@ static const struct token token_list[] = {
+ 	},
+ 	[COMMON_POLICY_ID] = {
+ 		.name = "{policy_id}",
+-		.type = "POLCIY_ID",
++		.type = "POLICY_ID",
+ 		.help = "policy id",
+ 		.call = parse_int,
+ 		.comp = comp_none,
+@@ -2370,7 +2370,7 @@ static const struct token token_list[] = {
+ 	},
+ 	[TUNNEL_DESTROY] = {
+ 		.name = "destroy",
+-		.help = "destroy tunel",
++		.help = "destroy tunnel",
+ 		.next = NEXT(NEXT_ENTRY(TUNNEL_DESTROY_ID),
+ 			     NEXT_ENTRY(COMMON_PORT_ID)),
+ 		.args = ARGS(ARGS_ENTRY(struct buffer, port)),
+@@ -2378,7 +2378,7 @@ static const struct token token_list[] = {
+ 	},
+ 	[TUNNEL_DESTROY_ID] = {
+ 		.name = "id",
+-		.help = "tunnel identifier to testroy",
++		.help = "tunnel identifier to destroy",
+ 		.next = NEXT(NEXT_ENTRY(COMMON_UNSIGNED)),
+ 		.args = ARGS(ARGS_ENTRY(struct tunnel_ops, id)),
+ 		.call = parse_tunnel,
+@@ -7702,16 +7702,14 @@ parse_string(struct context *ctx, const struct token *token,
+ static int
+ parse_hex_string(const char *src, uint8_t *dst, uint32_t *size)
+ {
+-	uint32_t left = *size;
+ 	const uint8_t *head = dst;
++	uint32_t left;
+ 
+-	/* Check input parameters */
+-	if ((src == NULL) ||
+-		(dst == NULL) ||
+-		(size == NULL) ||
+-		(*size == 0))
++	if (*size == 0)
+ 		return -1;
+ 
++	left = *size;
++
+ 	/* Convert chars to bytes */
+ 	while (left) {
+ 		char tmp[3], *end = tmp;
+@@ -9153,7 +9151,8 @@ cmd_set_raw_parsed(const struct buffer *in)
+ 		case RTE_FLOW_ITEM_TYPE_GENEVE_OPT:
+ 			opt = (const struct rte_flow_item_geneve_opt *)
+ 								item->spec;
+-			size = offsetof(struct rte_flow_item_geneve_opt, data);
++			size = offsetof(struct rte_flow_item_geneve_opt,
++					option_len) + sizeof(uint8_t);
+ 			if (opt->option_len && opt->data) {
+ 				*total_size += opt->option_len *
+ 					       sizeof(uint32_t);
+@@ -9210,19 +9209,15 @@ cmd_set_raw_parsed(const struct buffer *in)
+ 			} else {
+ 				const struct rte_flow_item_gtp_psc
+ 					*opt = item->spec;
+-				struct {
+-					uint8_t len;
+-					uint8_t pdu_type:4;
+-					uint8_t qfi:6;
+-					uint8_t next;
+-				} psc;
+-				psc.len = sizeof(psc) / 4;
+-				psc.pdu_type = opt->hdr.type;
+-				psc.qfi = opt->hdr.qfi;
+-				psc.next = 0;
+-				*total_size += sizeof(psc);
+-				rte_memcpy(data_tail - (*total_size),
+-					   &psc, sizeof(psc));
++				struct rte_gtp_psc_generic_hdr *hdr;
++				size_t hdr_size = RTE_ALIGN(sizeof(*hdr),
++							 sizeof(int32_t));
++
++				*total_size += hdr_size;
++				hdr = (typeof(hdr))(data_tail - (*total_size));
++				memset(hdr, 0, hdr_size);
++				*hdr = opt->hdr;
++				hdr->ext_hdr_len = 1;
+ 				gtp_psc = i;
+ 				size = 0;
+ 			}
+diff --git a/dpdk/app/test-pmd/cmdline_mtr.c b/dpdk/app/test-pmd/cmdline_mtr.c
+index ad7ef6ad98..a0f885f190 100644
+--- a/dpdk/app/test-pmd/cmdline_mtr.c
++++ b/dpdk/app/test-pmd/cmdline_mtr.c
+@@ -817,8 +817,8 @@ static void cmd_create_port_meter_parsed(void *parsed_result,
+ cmdline_parse_inst_t cmd_create_port_meter = {
+ 	.f = cmd_create_port_meter_parsed,
+ 	.data = NULL,
+-	.help_str = "create port meter <port_id> <mtr_id> <profile_id> <meter_enable>(yes|no) "
+-		"<stats_mask> <shared> <use_pre_meter_color> "
++	.help_str = "create port meter <port_id> <mtr_id> <profile_id> <policy_id> "
++		"<meter_enable>(yes|no) <stats_mask> <shared> <use_pre_meter_color> "
+ 		"[<dscp_tbl_entry0> <dscp_tbl_entry1> ...<dscp_tbl_entry63>]",
+ 	.tokens = {
+ 		(void *)&cmd_create_port_meter_create,
+diff --git a/dpdk/app/test-pmd/cmdline_tm.c b/dpdk/app/test-pmd/cmdline_tm.c
+index bfbd43ca9b..c058b8946e 100644
+--- a/dpdk/app/test-pmd/cmdline_tm.c
++++ b/dpdk/app/test-pmd/cmdline_tm.c
+@@ -69,7 +69,7 @@ print_err_msg(struct rte_tm_error *error)
+ 		[RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS]
+ 			= "num shared shapers field (node params)",
+ 		[RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE]
+-			= "wfq weght mode field (node params)",
++			= "wfq weight mode field (node params)",
+ 		[RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES]
+ 			= "num strict priorities field (node params)",
+ 		[RTE_TM_ERROR_TYPE_NODE_PARAMS_CMAN]
+@@ -479,7 +479,7 @@ static void cmd_show_port_tm_level_cap_parsed(void *parsed_result,
+ cmdline_parse_inst_t cmd_show_port_tm_level_cap = {
+ 	.f = cmd_show_port_tm_level_cap_parsed,
+ 	.data = NULL,
+-	.help_str = "Show Port TM Hierarhical level Capabilities",
++	.help_str = "Show port TM hierarchical level capabilities",
+ 	.tokens = {
+ 		(void *)&cmd_show_port_tm_level_cap_show,
+ 		(void *)&cmd_show_port_tm_level_cap_port,
+diff --git a/dpdk/app/test-pmd/config.c b/dpdk/app/test-pmd/config.c
+index 1722d6c8f8..ad1b5f51d5 100644
+--- a/dpdk/app/test-pmd/config.c
++++ b/dpdk/app/test-pmd/config.c
+@@ -66,8 +66,6 @@
+ 
+ #define NS_PER_SEC 1E9
+ 
+-static char *flowtype_to_str(uint16_t flow_type);
+-
+ static const struct {
+ 	enum tx_pkt_split split;
+ 	const char *name;
+@@ -248,14 +246,20 @@ nic_stats_display(portid_t port_id)
+ 								diff_ns;
+ 	uint64_t mpps_rx, mpps_tx, mbps_rx, mbps_tx;
+ 	struct rte_eth_stats stats;
+-
+ 	static const char *nic_stats_border = "########################";
++	int ret;
+ 
+ 	if (port_id_is_invalid(port_id, ENABLED_WARN)) {
+ 		print_valid_ports();
+ 		return;
+ 	}
+-	rte_eth_stats_get(port_id, &stats);
++	ret = rte_eth_stats_get(port_id, &stats);
++	if (ret != 0) {
++		fprintf(stderr,
++			"%s: Error: failed to get stats (port %u): %d",
++			__func__, port_id, ret);
++		return;
++	}
+ 	printf("\n  %s NIC statistics for port %-2d %s\n",
+ 	       nic_stats_border, port_id, nic_stats_border);
+ 
+@@ -668,6 +672,19 @@ print_dev_capabilities(uint64_t capabilities)
+ 	}
+ }
+ 
++const char *
++rsstypes_to_str(uint64_t rss_type)
++{
++	uint16_t i;
 +
-+/**
-+ * struct gnet_estimator - rate estimator configuration
-+ * @interval: sampling period
-+ * @ewma_log: the log of measurement window weight
-+ */
-+struct gnet_estimator {
-+	signed char	interval;
-+	unsigned char	ewma_log;
-+};
++	for (i = 0; rss_type_table[i].str != NULL; i++) {
++		if (rss_type_table[i].rss_type == rss_type)
++			return rss_type_table[i].str;
++	}
 +
-+#endif /* __KERNEL__ || !HAVE_TCA_STATS_PKT64 */
-+#endif /* __LINUX_GEN_STATS_WRAPPER_H */
-diff --git a/include/openvswitch/flow.h b/include/openvswitch/flow.h
-index 3054015d93..df10cf579e 100644
---- a/include/openvswitch/flow.h
-+++ b/include/openvswitch/flow.h
-@@ -141,15 +141,14 @@ struct flow {
-     uint8_t nw_tos;             /* IP ToS (including DSCP and ECN). */
-     uint8_t nw_ttl;             /* IP TTL/Hop Limit. */
-     uint8_t nw_proto;           /* IP protocol or low 8 bits of ARP opcode. */
-+    /* L4 (64-bit aligned) */
-     struct in6_addr nd_target;  /* IPv6 neighbor discovery (ND) target. */
-     struct eth_addr arp_sha;    /* ARP/ND source hardware address. */
-     struct eth_addr arp_tha;    /* ARP/ND target hardware address. */
--    ovs_be16 tcp_flags;         /* TCP flags/ICMPv6 ND options type.
--                                 * With L3 to avoid matching L4. */
-+    ovs_be16 tcp_flags;         /* TCP flags/ICMPv6 ND options type. */
-     ovs_be16 pad2;              /* Pad to 64 bits. */
-     struct ovs_key_nsh nsh;     /* Network Service Header keys */
++	return NULL;
++}
++
+ void
+ port_infos_display(portid_t port_id)
+ {
+@@ -772,19 +789,20 @@ port_infos_display(portid_t port_id)
+ 	if (!dev_info.flow_type_rss_offloads)
+ 		printf("No RSS offload flow type is supported.\n");
+ 	else {
++		uint64_t rss_offload_types = dev_info.flow_type_rss_offloads;
+ 		uint16_t i;
+-		char *p;
+ 
+ 		printf("Supported RSS offload flow types:\n");
+-		for (i = RTE_ETH_FLOW_UNKNOWN + 1;
+-		     i < sizeof(dev_info.flow_type_rss_offloads) * CHAR_BIT; i++) {
+-			if (!(dev_info.flow_type_rss_offloads & (1ULL << i)))
+-				continue;
+-			p = flowtype_to_str(i);
+-			if (p)
+-				printf("  %s\n", p);
+-			else
+-				printf("  user defined %d\n", i);
++		for (i = 0; i < sizeof(rss_offload_types) * CHAR_BIT; i++) {
++			uint64_t rss_offload = RTE_BIT64(i);
++			if ((rss_offload_types & rss_offload) != 0) {
++				const char *p = rsstypes_to_str(rss_offload);
++				if (p)
++					printf("  %s\n", p);
++				else
++					printf("  user defined %u\n",
++					       i);
++			}
+ 		}
+ 	}
  
--    /* L4 (64-bit aligned) */
-     ovs_be16 tp_src;            /* TCP/UDP/SCTP source port/ICMP type. */
-     ovs_be16 tp_dst;            /* TCP/UDP/SCTP destination port/ICMP code. */
-     ovs_be16 ct_tp_src;         /* CT original tuple source port/ICMP type. */
-@@ -179,7 +178,7 @@ BUILD_ASSERT_DECL(offsetof(struct flow, igmp_group_ip4) + sizeof(uint32_t)
- enum {
-     FLOW_SEGMENT_1_ENDS_AT = offsetof(struct flow, dl_dst),
-     FLOW_SEGMENT_2_ENDS_AT = offsetof(struct flow, nw_src),
--    FLOW_SEGMENT_3_ENDS_AT = offsetof(struct flow, tp_src),
-+    FLOW_SEGMENT_3_ENDS_AT = offsetof(struct flow, nd_target),
- };
- BUILD_ASSERT_DECL(FLOW_SEGMENT_1_ENDS_AT % sizeof(uint64_t) == 0);
- BUILD_ASSERT_DECL(FLOW_SEGMENT_2_ENDS_AT % sizeof(uint64_t) == 0);
-diff --git a/include/openvswitch/hmap.h b/include/openvswitch/hmap.h
-index 4e001cc692..beb48295b9 100644
---- a/include/openvswitch/hmap.h
-+++ b/include/openvswitch/hmap.h
-@@ -134,17 +134,17 @@ struct hmap_node *hmap_random_node(const struct hmap *);
-  * without using 'break', NODE will be NULL.  This is true for all of the
-  * HMAP_FOR_EACH_*() macros.
-  */
--#define HMAP_FOR_EACH_WITH_HASH(NODE, MEMBER, HASH, HMAP)               \
--    for (INIT_CONTAINER(NODE, hmap_first_with_hash(HMAP, HASH), MEMBER); \
--         (NODE != OBJECT_CONTAINING(NULL, NODE, MEMBER))                \
--         || ((NODE = NULL), false);                                     \
--         ASSIGN_CONTAINER(NODE, hmap_next_with_hash(&(NODE)->MEMBER),   \
--                          MEMBER))
--#define HMAP_FOR_EACH_IN_BUCKET(NODE, MEMBER, HASH, HMAP)               \
--    for (INIT_CONTAINER(NODE, hmap_first_in_bucket(HMAP, HASH), MEMBER); \
--         (NODE != OBJECT_CONTAINING(NULL, NODE, MEMBER))                \
--         || ((NODE = NULL), false);                                     \
--         ASSIGN_CONTAINER(NODE, hmap_next_in_bucket(&(NODE)->MEMBER), MEMBER))
-+#define HMAP_FOR_EACH_WITH_HASH(NODE, MEMBER, HASH, HMAP)                     \
-+    for (INIT_MULTIVAR(NODE, MEMBER, hmap_first_with_hash(HMAP, HASH),        \
-+                       struct hmap_node);                                     \
-+         CONDITION_MULTIVAR(NODE, MEMBER, ITER_VAR(NODE) != NULL);            \
-+         UPDATE_MULTIVAR(NODE, hmap_next_with_hash(ITER_VAR(NODE))))
-+
-+#define HMAP_FOR_EACH_IN_BUCKET(NODE, MEMBER, HASH, HMAP)                     \
-+    for (INIT_MULTIVAR(NODE, MEMBER, hmap_first_in_bucket(HMAP, HASH),        \
-+                       struct hmap_node);                                     \
-+         CONDITION_MULTIVAR(NODE, MEMBER, ITER_VAR(NODE) != NULL);            \
-+         UPDATE_MULTIVAR(NODE, hmap_next_in_bucket(ITER_VAR(NODE))))
+@@ -912,10 +930,15 @@ port_eeprom_display(portid_t port_id)
+ 		return;
+ 	}
  
- static inline struct hmap_node *hmap_first_with_hash(const struct hmap *,
-                                                      size_t hash);
-@@ -170,54 +170,80 @@ bool hmap_contains(const struct hmap *, const struct hmap_node *);
- /* Iterates through every node in HMAP. */
- #define HMAP_FOR_EACH(NODE, MEMBER, HMAP) \
-     HMAP_FOR_EACH_INIT(NODE, MEMBER, HMAP, (void) 0)
--#define HMAP_FOR_EACH_INIT(NODE, MEMBER, HMAP, ...)                     \
--    for (INIT_CONTAINER(NODE, hmap_first(HMAP), MEMBER), __VA_ARGS__;   \
--         (NODE != OBJECT_CONTAINING(NULL, NODE, MEMBER))                \
--         || ((NODE = NULL), false);                                     \
--         ASSIGN_CONTAINER(NODE, hmap_next(HMAP, &(NODE)->MEMBER), MEMBER))
-+#define HMAP_FOR_EACH_INIT(NODE, MEMBER, HMAP, ...)                           \
-+    for (INIT_MULTIVAR_EXP(NODE, MEMBER, hmap_first(HMAP), struct hmap_node,  \
-+                           __VA_ARGS__);                                      \
-+         CONDITION_MULTIVAR(NODE, MEMBER, ITER_VAR(NODE) != NULL);            \
-+         UPDATE_MULTIVAR(NODE, hmap_next(HMAP, ITER_VAR(NODE))))
+-	char buf[len_eeprom];
+ 	einfo.offset = 0;
+ 	einfo.length = len_eeprom;
+-	einfo.data = buf;
++	einfo.data = calloc(1, len_eeprom);
++	if (!einfo.data) {
++		fprintf(stderr,
++			"Allocation of port %u eeprom data failed\n",
++			port_id);
++		return;
++	}
+ 
+ 	ret = rte_eth_dev_get_eeprom(port_id, &einfo);
+ 	if (ret != 0) {
+@@ -933,10 +956,12 @@ port_eeprom_display(portid_t port_id)
+ 			fprintf(stderr, "Unable to get EEPROM: %d\n", ret);
+ 			break;
+ 		}
++		free(einfo.data);
+ 		return;
+ 	}
+ 	rte_hexdump(stdout, "hexdump", einfo.data, einfo.length);
+ 	printf("Finish -- Port: %d EEPROM length: %d bytes\n", port_id, len_eeprom);
++	free(einfo.data);
+ }
  
- /* Safe when NODE may be freed (not needed when NODE may be removed from the
-  * hash map but its members remain accessible and intact). */
--#define HMAP_FOR_EACH_SAFE(NODE, NEXT, MEMBER, HMAP) \
--    HMAP_FOR_EACH_SAFE_INIT(NODE, NEXT, MEMBER, HMAP, (void) 0)
--#define HMAP_FOR_EACH_SAFE_INIT(NODE, NEXT, MEMBER, HMAP, ...)          \
--    for (INIT_CONTAINER(NODE, hmap_first(HMAP), MEMBER), __VA_ARGS__;   \
--         ((NODE != OBJECT_CONTAINING(NULL, NODE, MEMBER))               \
--          || ((NODE = NULL), false)                                     \
--          ? INIT_CONTAINER(NEXT, hmap_next(HMAP, &(NODE)->MEMBER), MEMBER), 1 \
--          : 0);                                                         \
--         (NODE) = (NEXT))
-+#define HMAP_FOR_EACH_SAFE_LONG(NODE, NEXT, MEMBER, HMAP) \
-+    HMAP_FOR_EACH_SAFE_LONG_INIT (NODE, NEXT, MEMBER, HMAP, (void) NEXT)
-+
-+#define HMAP_FOR_EACH_SAFE_LONG_INIT(NODE, NEXT, MEMBER, HMAP, ...)           \
-+    for (INIT_MULTIVAR_SAFE_LONG_EXP(NODE, NEXT, MEMBER, hmap_first(HMAP),    \
-+                                     struct hmap_node, __VA_ARGS__);          \
-+         CONDITION_MULTIVAR_SAFE_LONG(NODE, NEXT, MEMBER,                     \
-+                                      ITER_VAR(NODE) != NULL,                 \
-+                            ITER_VAR(NEXT) = hmap_next(HMAP, ITER_VAR(NODE)), \
-+                                      ITER_VAR(NEXT) != NULL);                \
-+         UPDATE_MULTIVAR_SAFE_LONG(NODE, NEXT))
+ void
+@@ -972,10 +997,15 @@ port_module_eeprom_display(portid_t port_id)
+ 		return;
+ 	}
+ 
+-	char buf[minfo.eeprom_len];
+ 	einfo.offset = 0;
+ 	einfo.length = minfo.eeprom_len;
+-	einfo.data = buf;
++	einfo.data = calloc(1, minfo.eeprom_len);
++	if (!einfo.data) {
++		fprintf(stderr,
++			"Allocation of port %u eeprom data failed\n",
++			port_id);
++		return;
++	}
+ 
+ 	ret = rte_eth_dev_get_module_eeprom(port_id, &einfo);
+ 	if (ret != 0) {
+@@ -994,11 +1024,13 @@ port_module_eeprom_display(portid_t port_id)
+ 				ret);
+ 			break;
+ 		}
++		free(einfo.data);
+ 		return;
+ 	}
+ 
+ 	rte_hexdump(stdout, "hexdump", einfo.data, einfo.length);
+ 	printf("Finish -- Port: %d MODULE EEPROM length: %d bytes\n", port_id, einfo.length);
++	free(einfo.data);
+ }
+ 
+ int
+@@ -1233,6 +1265,57 @@ port_reg_set(portid_t port_id, uint32_t reg_off, uint32_t reg_v)
+ 	display_port_reg_value(port_id, reg_off, reg_v);
+ }
+ 
++static uint32_t
++eth_dev_get_overhead_len(uint32_t max_rx_pktlen, uint16_t max_mtu)
++{
++	uint32_t overhead_len;
 +
-+/* Short versions of HMAP_FOR_EACH_SAFE. */
-+#define HMAP_FOR_EACH_SAFE_SHORT(NODE, MEMBER, HMAP)                          \
-+    HMAP_FOR_EACH_SAFE_SHORT_INIT (NODE, MEMBER, HMAP, (void) 0)
++	if (max_mtu != UINT16_MAX && max_rx_pktlen > max_mtu)
++		overhead_len = max_rx_pktlen - max_mtu;
++	else
++		overhead_len = RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
 +
-+#define HMAP_FOR_EACH_SAFE_SHORT_INIT(NODE, MEMBER, HMAP, ...)                \
-+    for (INIT_MULTIVAR_SAFE_SHORT_EXP(NODE, MEMBER, hmap_first(HMAP),         \
-+                                      struct hmap_node, __VA_ARGS__);         \
-+         CONDITION_MULTIVAR_SAFE_SHORT(NODE, MEMBER,                          \
-+                                       ITER_VAR(NODE) != NULL,                \
-+                      ITER_NEXT_VAR(NODE) = hmap_next(HMAP, ITER_VAR(NODE))); \
-+         UPDATE_MULTIVAR_SAFE_SHORT(NODE))
++	return overhead_len;
++}
 +
-+#define HMAP_FOR_EACH_SAFE(...)                                               \
-+    OVERLOAD_SAFE_MACRO(HMAP_FOR_EACH_SAFE_LONG,                              \
-+                        HMAP_FOR_EACH_SAFE_SHORT,                             \
-+                        4, __VA_ARGS__)
++static int
++eth_dev_validate_mtu(uint16_t port_id, uint16_t mtu)
++{
++	struct rte_eth_dev_info dev_info;
++	uint32_t overhead_len;
++	uint32_t frame_size;
++	int ret;
++
++	ret = rte_eth_dev_info_get(port_id, &dev_info);
++	if (ret != 0)
++		return ret;
++
++	if (mtu < dev_info.min_mtu) {
++		fprintf(stderr,
++			"MTU (%u) < device min MTU (%u) for port_id %u\n",
++			mtu, dev_info.min_mtu, port_id);
++		return -EINVAL;
++	}
++	if (mtu > dev_info.max_mtu) {
++		fprintf(stderr,
++			"MTU (%u) > device max MTU (%u) for port_id %u\n",
++			mtu, dev_info.max_mtu, port_id);
++		return -EINVAL;
++	}
++
++	overhead_len = eth_dev_get_overhead_len(dev_info.max_rx_pktlen,
++			dev_info.max_mtu);
++	frame_size = mtu + overhead_len;
++	if (frame_size > dev_info.max_rx_pktlen) {
++		fprintf(stderr,
++			"Frame size (%u) > device max frame size (%u) for port_id %u\n",
++			frame_size, dev_info.max_rx_pktlen, port_id);
++		return -EINVAL;
++	}
++
++	return 0;
++}
 +
+ void
+ port_mtu_set(portid_t port_id, uint16_t mtu)
+ {
+@@ -1242,6 +1325,10 @@ port_mtu_set(portid_t port_id, uint16_t mtu)
+ 	if (port_id_is_invalid(port_id, ENABLED_WARN))
+ 		return;
+ 
++	diag = eth_dev_validate_mtu(port_id, mtu);
++	if (diag != 0)
++		return;
++
+ 	if (port->need_reconfig == 0) {
+ 		diag = rte_eth_dev_set_mtu(port_id, mtu);
+ 		if (diag != 0) {
+@@ -1682,6 +1769,37 @@ port_action_handle_destroy(portid_t port_id,
+ 	return ret;
+ }
  
- /* Continues an iteration from just after NODE. */
- #define HMAP_FOR_EACH_CONTINUE(NODE, MEMBER, HMAP) \
-     HMAP_FOR_EACH_CONTINUE_INIT(NODE, MEMBER, HMAP, (void) 0)
--#define HMAP_FOR_EACH_CONTINUE_INIT(NODE, MEMBER, HMAP, ...)            \
--    for (ASSIGN_CONTAINER(NODE, hmap_next(HMAP, &(NODE)->MEMBER), MEMBER), \
--         __VA_ARGS__;                                                   \
--         (NODE != OBJECT_CONTAINING(NULL, NODE, MEMBER))                \
--         || ((NODE = NULL), false);                                     \
--         ASSIGN_CONTAINER(NODE, hmap_next(HMAP, &(NODE)->MEMBER), MEMBER))
-+#define HMAP_FOR_EACH_CONTINUE_INIT(NODE, MEMBER, HMAP, ...)                  \
-+    for (INIT_MULTIVAR_EXP(NODE, MEMBER, hmap_next(HMAP, &(NODE)->MEMBER),    \
-+                           struct hmap_node, __VA_ARGS__);                    \
-+         CONDITION_MULTIVAR(NODE, MEMBER, ITER_VAR(NODE) != NULL);            \
-+         UPDATE_MULTIVAR(NODE, hmap_next(HMAP, ITER_VAR(NODE))))
-+
-+struct hmap_pop_helper_iter__ {
-+    size_t bucket;
-+    struct hmap_node *node;
-+};
++int
++port_action_handle_flush(portid_t port_id)
++{
++	struct rte_port *port;
++	struct port_indirect_action **tmp;
++	int ret = 0;
++
++	if (port_id_is_invalid(port_id, ENABLED_WARN) ||
++	    port_id == (portid_t)RTE_PORT_ALL)
++		return -EINVAL;
++	port = &ports[port_id];
++	tmp = &port->actions_list;
++	while (*tmp != NULL) {
++		struct rte_flow_error error;
++		struct port_indirect_action *pia = *tmp;
++
++		/* Poisoning to make sure PMDs update it in case of error. */
++		memset(&error, 0x44, sizeof(error));
++		if (pia->handle != NULL &&
++		    rte_flow_action_handle_destroy
++					(port_id, pia->handle, &error) != 0) {
++			printf("Indirect action #%u not destroyed\n", pia->id);
++			ret = port_flow_complain(&error);
++			tmp = &pia->next;
++		} else {
++			*tmp = pia->next;
++			free(pia);
++		}
++	}
++	return ret;
++}
  
--static inline struct hmap_node *
--hmap_pop_helper__(struct hmap *hmap, size_t *bucket) {
-+static inline void
-+hmap_pop_helper__(struct hmap *hmap, struct hmap_pop_helper_iter__ *iter) {
+ /** Get indirect action by port + id */
+ struct rte_flow_action_handle *
+@@ -2758,8 +2876,8 @@ rxtx_config_display(void)
+ 	       nb_fwd_lcores, nb_fwd_ports);
+ 
+ 	RTE_ETH_FOREACH_DEV(pid) {
+-		struct rte_eth_rxconf *rx_conf = &ports[pid].rx_conf[0];
+-		struct rte_eth_txconf *tx_conf = &ports[pid].tx_conf[0];
++		struct rte_eth_rxconf *rx_conf = &ports[pid].rxq[0].conf;
++		struct rte_eth_txconf *tx_conf = &ports[pid].txq[0].conf;
+ 		uint16_t *nb_rx_desc = &ports[pid].nb_rx_desc[0];
+ 		uint16_t *nb_tx_desc = &ports[pid].nb_tx_desc[0];
+ 		struct rte_eth_rxq_info rx_qinfo;
+@@ -3017,7 +3135,7 @@ fwd_stream_on_other_lcores(uint16_t domain_id, lcoreid_t src_lc,
+ 			fs = fwd_streams[sm_id];
+ 			port = &ports[fs->rx_port];
+ 			dev_info = &port->dev_info;
+-			rxq_conf = &port->rx_conf[fs->rx_queue];
++			rxq_conf = &port->rxq[fs->rx_queue].conf;
+ 			if ((dev_info->dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE)
+ 			    == 0 || rxq_conf->share_group == 0)
+ 				/* Not shared rxq. */
+@@ -3077,7 +3195,7 @@ pkt_fwd_shared_rxq_check(void)
+ 			fs->lcore = fwd_lcores[lc_id];
+ 			port = &ports[fs->rx_port];
+ 			dev_info = &port->dev_info;
+-			rxq_conf = &port->rx_conf[fs->rx_queue];
++			rxq_conf = &port->rxq[fs->rx_queue].conf;
+ 			if ((dev_info->dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE)
+ 			    == 0 || rxq_conf->share_group == 0)
+ 				/* Not shared rxq. */
+@@ -4719,6 +4837,8 @@ set_record_burst_stats(uint8_t on_off)
+ 	record_burst_stats = on_off;
+ }
  
--    for (; *bucket <= hmap->mask; (*bucket)++) {
--        struct hmap_node *node = hmap->buckets[*bucket];
-+    for (; iter->bucket <= hmap->mask; (iter->bucket)++) {
-+        struct hmap_node *node = hmap->buckets[iter->bucket];
++#if defined(RTE_NET_I40E) || defined(RTE_NET_IXGBE)
++
+ static char*
+ flowtype_to_str(uint16_t flow_type)
+ {
+@@ -4762,8 +4882,6 @@ flowtype_to_str(uint16_t flow_type)
+ 	return NULL;
+ }
  
-         if (node) {
-             hmap_remove(hmap, node);
--            return node;
-+            iter->node = node;
-+            return;
-         }
-     }
+-#if defined(RTE_NET_I40E) || defined(RTE_NET_IXGBE)
 -
--    return NULL;
-+    iter->node = NULL;
+ static inline void
+ print_fdir_mask(struct rte_eth_fdir_masks *mask)
+ {
+@@ -5185,6 +5303,25 @@ mcast_addr_pool_remove(struct rte_port *port, uint32_t addr_idx)
+ 		sizeof(struct rte_ether_addr) * (port->mc_addr_nb - addr_idx));
  }
  
--#define HMAP_FOR_EACH_POP(NODE, MEMBER, HMAP)                               \
--    for (size_t bucket__ = 0;                                               \
--         INIT_CONTAINER(NODE, hmap_pop_helper__(HMAP, &bucket__), MEMBER),  \
--         (NODE != OBJECT_CONTAINING(NULL, NODE, MEMBER))                    \
--         || ((NODE = NULL), false);)
-+#define HMAP_FOR_EACH_POP(NODE, MEMBER, HMAP)                                 \
-+    for (struct hmap_pop_helper_iter__ ITER_VAR(NODE) = { 0, NULL };          \
-+         hmap_pop_helper__(HMAP, &ITER_VAR(NODE)),                            \
-+         (ITER_VAR(NODE).node != NULL) ?                                      \
-+            (((NODE) = OBJECT_CONTAINING(ITER_VAR(NODE).node,                 \
-+                                         NODE, MEMBER)),1):                   \
-+            (((NODE) = NULL), 0);)
- 
- static inline struct hmap_node *hmap_first(const struct hmap *);
- static inline struct hmap_node *hmap_next(const struct hmap *,
-diff --git a/include/openvswitch/list.h b/include/openvswitch/list.h
-index 8ad5eeb327..6272d340cf 100644
---- a/include/openvswitch/list.h
-+++ b/include/openvswitch/list.h
-@@ -72,37 +72,74 @@ static inline bool ovs_list_is_empty(const struct ovs_list *);
- static inline bool ovs_list_is_singleton(const struct ovs_list *);
- static inline bool ovs_list_is_short(const struct ovs_list *);
++int
++mcast_addr_pool_destroy(portid_t port_id)
++{
++	struct rte_port *port;
++
++	if (port_id_is_invalid(port_id, ENABLED_WARN) ||
++	    port_id == (portid_t)RTE_PORT_ALL)
++		return -EINVAL;
++	port = &ports[port_id];
++
++	if (port->mc_addr_nb != 0) {
++		/* free the pool of multicast addresses. */
++		free(port->mc_addr_pool);
++		port->mc_addr_pool = NULL;
++		port->mc_addr_nb = 0;
++	}
++	return 0;
++}
++
+ static int
+ eth_port_multicast_addr_list_set(portid_t port_id)
+ {
+diff --git a/dpdk/app/test-pmd/csumonly.c b/dpdk/app/test-pmd/csumonly.c
+index 2aeea243b6..d661e21e02 100644
+--- a/dpdk/app/test-pmd/csumonly.c
++++ b/dpdk/app/test-pmd/csumonly.c
+@@ -222,15 +222,14 @@ parse_gtp(struct rte_udp_hdr *udp_hdr,
+ 
+ 	gtp_hdr = (struct rte_gtp_hdr *)((char *)udp_hdr +
+ 		  sizeof(struct rte_udp_hdr));
+-
++	if (gtp_hdr->e || gtp_hdr->s || gtp_hdr->pn)
++		gtp_len += sizeof(struct rte_gtp_hdr_ext_word);
+ 	/*
+ 	 * Check message type. If message type is 0xff, it is
+ 	 * a GTP data packet. If not, it is a GTP control packet
+ 	 */
+ 	if (gtp_hdr->msg_type == 0xff) {
+-		ip_ver = *(uint8_t *)((char *)udp_hdr +
+-			 sizeof(struct rte_udp_hdr) +
+-			 sizeof(struct rte_gtp_hdr));
++		ip_ver = *(uint8_t *)((char *)gtp_hdr + gtp_len);
+ 		ip_ver = (ip_ver) & 0xf0;
+ 
+ 		if (ip_ver == RTE_GTP_TYPE_IPV4) {
+@@ -257,8 +256,7 @@ parse_gtp(struct rte_udp_hdr *udp_hdr,
+ /* Parse a vxlan header */
+ static void
+ parse_vxlan(struct rte_udp_hdr *udp_hdr,
+-	    struct testpmd_offload_info *info,
+-	    uint32_t pkt_type)
++	    struct testpmd_offload_info *info)
+ {
+ 	struct rte_ether_hdr *eth_hdr;
+ 
+@@ -266,8 +264,7 @@ parse_vxlan(struct rte_udp_hdr *udp_hdr,
+ 	 * default vxlan port (rfc7348) or that the rx offload flag is set
+ 	 * (i40e only currently)
+ 	 */
+-	if (udp_hdr->dst_port != _htons(RTE_VXLAN_DEFAULT_PORT) &&
+-		RTE_ETH_IS_TUNNEL_PKT(pkt_type) == 0)
++	if (udp_hdr->dst_port != _htons(RTE_VXLAN_DEFAULT_PORT))
+ 		return;
+ 
+ 	update_tunnel_outer(info);
+@@ -771,6 +768,28 @@ pkt_copy_split(const struct rte_mbuf *pkt)
+ 	return md[0];
+ }
  
--#define LIST_FOR_EACH(ITER, MEMBER, LIST)                               \
--    for (INIT_CONTAINER(ITER, (LIST)->next, MEMBER);                    \
--         &(ITER)->MEMBER != (LIST);                                     \
--         ASSIGN_CONTAINER(ITER, (ITER)->MEMBER.next, MEMBER))
--#define LIST_FOR_EACH_CONTINUE(ITER, MEMBER, LIST)                      \
--    for (ASSIGN_CONTAINER(ITER, (ITER)->MEMBER.next, MEMBER);             \
--         &(ITER)->MEMBER != (LIST);                                     \
--         ASSIGN_CONTAINER(ITER, (ITER)->MEMBER.next, MEMBER))
--#define LIST_FOR_EACH_REVERSE(ITER, MEMBER, LIST)                       \
--    for (INIT_CONTAINER(ITER, (LIST)->prev, MEMBER);                    \
--         &(ITER)->MEMBER != (LIST);                                     \
--         ASSIGN_CONTAINER(ITER, (ITER)->MEMBER.prev, MEMBER))
--#define LIST_FOR_EACH_REVERSE_SAFE(ITER, PREV, MEMBER, LIST)        \
--    for (INIT_CONTAINER(ITER, (LIST)->prev, MEMBER);                \
--         (&(ITER)->MEMBER != (LIST)                                 \
--          ? INIT_CONTAINER(PREV, (ITER)->MEMBER.prev, MEMBER), 1    \
--          : 0);                                                     \
--         (ITER) = (PREV))
--#define LIST_FOR_EACH_REVERSE_CONTINUE(ITER, MEMBER, LIST)              \
--    for (ASSIGN_CONTAINER(ITER, (ITER)->MEMBER.prev, MEMBER);           \
--         &(ITER)->MEMBER != (LIST);                                     \
--         ASSIGN_CONTAINER(ITER, (ITER)->MEMBER.prev, MEMBER))
--#define LIST_FOR_EACH_SAFE(ITER, NEXT, MEMBER, LIST)               \
--    for (INIT_CONTAINER(ITER, (LIST)->next, MEMBER);               \
--         (&(ITER)->MEMBER != (LIST)                                \
--          ? INIT_CONTAINER(NEXT, (ITER)->MEMBER.next, MEMBER), 1   \
--          : 0);                                                    \
--         (ITER) = (NEXT))
--#define LIST_FOR_EACH_POP(ITER, MEMBER, LIST)                      \
--    while (!ovs_list_is_empty(LIST)                                    \
--           && (INIT_CONTAINER(ITER, ovs_list_pop_front(LIST), MEMBER), 1))
-+#define LIST_FOR_EACH(VAR, MEMBER, LIST)                                      \
-+    for (INIT_MULTIVAR(VAR, MEMBER, (LIST)->next, struct ovs_list);           \
-+         CONDITION_MULTIVAR(VAR, MEMBER, ITER_VAR(VAR) != (LIST));            \
-+         UPDATE_MULTIVAR(VAR, ITER_VAR(VAR)->next))
++#if defined(RTE_LIB_GRO) || defined(RTE_LIB_GSO)
++/*
++ * Re-calculate IP checksum for merged/fragmented packets.
++ */
++static void
++pkts_ip_csum_recalc(struct rte_mbuf **pkts_burst, const uint16_t nb_pkts, uint64_t tx_offloads)
++{
++	int i;
++	struct rte_ipv4_hdr *ipv4_hdr;
++	for (i = 0; i < nb_pkts; i++) {
++		if ((pkts_burst[i]->ol_flags & RTE_MBUF_F_TX_IPV4) &&
++			(tx_offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM) == 0) {
++			ipv4_hdr = rte_pktmbuf_mtod_offset(pkts_burst[i],
++						struct rte_ipv4_hdr *,
++						pkts_burst[i]->l2_len);
++			ipv4_hdr->hdr_checksum = 0;
++			ipv4_hdr->hdr_checksum = rte_ipv4_cksum(ipv4_hdr);
++		}
++	}
++}
++#endif
 +
-+#define LIST_FOR_EACH_CONTINUE(VAR, MEMBER, LIST)                             \
-+    for (INIT_MULTIVAR(VAR, MEMBER, VAR->MEMBER.next, struct ovs_list);       \
-+         CONDITION_MULTIVAR(VAR, MEMBER, ITER_VAR(VAR) != (LIST));            \
-+         UPDATE_MULTIVAR(VAR, ITER_VAR(VAR)->next))
+ /*
+  * Receive a burst of packets, and for each packet:
+  *  - parse packet, and try to recognize a supported packet type (1)
+@@ -796,7 +815,7 @@ pkt_copy_split(const struct rte_mbuf *pkt)
+  *
+  * The testpmd command line for this forward engine sets the flags
+  * TESTPMD_TX_OFFLOAD_* in ports[tx_port].tx_ol_flags. They control
+- * wether a checksum must be calculated in software or in hardware. The
++ * whether a checksum must be calculated in software or in hardware. The
+  * IP, UDP, TCP and SCTP flags always concern the inner layer. The
+  * OUTER_IP is only useful for tunnel packets.
+  */
+@@ -887,10 +906,6 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
+ 		 * and inner headers */
+ 
+ 		eth_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
+-		rte_ether_addr_copy(&peer_eth_addrs[fs->peer_addr],
+-				&eth_hdr->dst_addr);
+-		rte_ether_addr_copy(&ports[fs->tx_port].eth_addr,
+-				&eth_hdr->src_addr);
+ 		parse_ethernet(eth_hdr, &info);
+ 		l3_hdr = (char *)eth_hdr + info.l2_len;
+ 
+@@ -912,8 +927,7 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
+ 						RTE_MBUF_F_TX_TUNNEL_VXLAN_GPE;
+ 					goto tunnel_update;
+ 				}
+-				parse_vxlan(udp_hdr, &info,
+-					    m->packet_type);
++				parse_vxlan(udp_hdr, &info);
+ 				if (info.is_tunnel) {
+ 					tx_ol_flags |=
+ 						RTE_MBUF_F_TX_TUNNEL_VXLAN;
+@@ -925,6 +939,12 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
+ 						RTE_MBUF_F_TX_TUNNEL_GENEVE;
+ 					goto tunnel_update;
+ 				}
++				/* Always keep last. */
++				if (unlikely(RTE_ETH_IS_TUNNEL_PKT(
++							m->packet_type) != 0)) {
++					TESTPMD_LOG(DEBUG, "Unknown tunnel packet. UDP dst port: %hu",
++						udp_hdr->dst_port);
++				}
+ 			} else if (info.l4_proto == IPPROTO_GRE) {
+ 				struct simple_gre_hdr *gre_hdr;
+ 
+@@ -1089,6 +1109,8 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
+ 				fs->gro_times = 0;
+ 			}
+ 		}
++
++		pkts_ip_csum_recalc(pkts_burst, nb_rx, tx_offloads);
+ 	}
+ #endif
+ 
+@@ -1122,6 +1144,8 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
+ 
+ 		tx_pkts_burst = gso_segments;
+ 		nb_rx = nb_segments;
 +
-+#define LIST_FOR_EACH_REVERSE(VAR, MEMBER, LIST)                              \
-+    for (INIT_MULTIVAR(VAR, MEMBER, (LIST)->prev, struct ovs_list);           \
-+         CONDITION_MULTIVAR(VAR, MEMBER, ITER_VAR(VAR) != (LIST));            \
-+         UPDATE_MULTIVAR(VAR, ITER_VAR(VAR)->prev))
++		pkts_ip_csum_recalc(tx_pkts_burst, nb_rx, tx_offloads);
+ 	} else
+ #endif
+ 		tx_pkts_burst = pkts_burst;
+@@ -1164,9 +1188,22 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
+ 	get_end_cycles(fs, start_tsc);
+ }
+ 
++static void
++stream_init_checksum_forward(struct fwd_stream *fs)
++{
++	bool rx_stopped, tx_stopped;
 +
-+#define LIST_FOR_EACH_REVERSE_CONTINUE(VAR, MEMBER, LIST)                     \
-+    for (INIT_MULTIVAR(VAR, MEMBER, VAR->MEMBER.prev, struct ovs_list);       \
-+         CONDITION_MULTIVAR(VAR, MEMBER, ITER_VAR(VAR) != (LIST));            \
-+         UPDATE_MULTIVAR(VAR, ITER_VAR(VAR)->prev))
++	rx_stopped = ports[fs->rx_port].rxq[fs->rx_queue].state ==
++						RTE_ETH_QUEUE_STATE_STOPPED;
++	tx_stopped = ports[fs->tx_port].txq[fs->tx_queue].state ==
++						RTE_ETH_QUEUE_STATE_STOPPED;
++	fs->disabled = rx_stopped || tx_stopped;
++}
 +
-+/* LONG version of SAFE iterators. */
-+#define LIST_FOR_EACH_REVERSE_SAFE_LONG(VAR, PREV, MEMBER, LIST)              \
-+    for (INIT_MULTIVAR_SAFE_LONG(VAR, PREV, MEMBER, (LIST)->prev,             \
-+                                 struct ovs_list);                            \
-+         CONDITION_MULTIVAR_SAFE_LONG(VAR, PREV, MEMBER,                      \
-+                                      ITER_VAR(VAR) != (LIST),                \
-+                                      ITER_VAR(PREV) = ITER_VAR(VAR)->prev,   \
-+                                      ITER_VAR(PREV) != (LIST));              \
-+         UPDATE_MULTIVAR_SAFE_LONG(VAR, PREV))
+ struct fwd_engine csum_fwd_engine = {
+ 	.fwd_mode_name  = "csum",
+ 	.port_fwd_begin = NULL,
+ 	.port_fwd_end   = NULL,
++	.stream_init    = stream_init_checksum_forward,
+ 	.packet_fwd     = pkt_burst_checksum_forward,
+ };
+diff --git a/dpdk/app/test-pmd/flowgen.c b/dpdk/app/test-pmd/flowgen.c
+index 9ceef3b54a..1e01120ae9 100644
+--- a/dpdk/app/test-pmd/flowgen.c
++++ b/dpdk/app/test-pmd/flowgen.c
+@@ -207,9 +207,22 @@ flowgen_begin(portid_t pi)
+ 	return 0;
+ }
+ 
++static void
++flowgen_stream_init(struct fwd_stream *fs)
++{
++	bool rx_stopped, tx_stopped;
 +
-+#define LIST_FOR_EACH_SAFE_LONG(VAR, NEXT, MEMBER, LIST)                      \
-+    for (INIT_MULTIVAR_SAFE_LONG(VAR, NEXT, MEMBER, (LIST)->next,             \
-+                                 struct ovs_list);                            \
-+         CONDITION_MULTIVAR_SAFE_LONG(VAR, NEXT, MEMBER,                      \
-+                                      ITER_VAR(VAR) != (LIST),                \
-+                                      ITER_VAR(NEXT) = ITER_VAR(VAR)->next,   \
-+                                      ITER_VAR(NEXT) != (LIST));              \
-+         UPDATE_MULTIVAR_SAFE_LONG(VAR, NEXT))
++	rx_stopped = ports[fs->rx_port].rxq[fs->rx_queue].state ==
++						RTE_ETH_QUEUE_STATE_STOPPED;
++	tx_stopped = ports[fs->tx_port].txq[fs->tx_queue].state ==
++						RTE_ETH_QUEUE_STATE_STOPPED;
++	fs->disabled = rx_stopped || tx_stopped;
++}
 +
-+/* SHORT version of SAFE iterators. */
-+#define LIST_FOR_EACH_REVERSE_SAFE_SHORT(VAR, MEMBER, LIST)                   \
-+    for (INIT_MULTIVAR_SAFE_SHORT(VAR, MEMBER, (LIST)->prev, struct ovs_list);\
-+         CONDITION_MULTIVAR_SAFE_SHORT(VAR, MEMBER,                           \
-+                                       ITER_VAR(VAR) != (LIST),               \
-+                                 ITER_NEXT_VAR(VAR) = ITER_VAR(VAR)->prev);   \
-+         UPDATE_MULTIVAR_SAFE_SHORT(VAR))
+ struct fwd_engine flow_gen_engine = {
+ 	.fwd_mode_name  = "flowgen",
+ 	.port_fwd_begin = flowgen_begin,
+ 	.port_fwd_end   = NULL,
++	.stream_init    = flowgen_stream_init,
+ 	.packet_fwd     = pkt_burst_flow_gen,
+ };
+diff --git a/dpdk/app/test-pmd/icmpecho.c b/dpdk/app/test-pmd/icmpecho.c
+index 99c94cb282..066f2a3ab7 100644
+--- a/dpdk/app/test-pmd/icmpecho.c
++++ b/dpdk/app/test-pmd/icmpecho.c
+@@ -512,9 +512,22 @@ reply_to_icmp_echo_rqsts(struct fwd_stream *fs)
+ 	get_end_cycles(fs, start_tsc);
+ }
+ 
++static void
++icmpecho_stream_init(struct fwd_stream *fs)
++{
++	bool rx_stopped, tx_stopped;
 +
-+#define LIST_FOR_EACH_SAFE_SHORT(VAR, MEMBER, LIST)                           \
-+    for (INIT_MULTIVAR_SAFE_SHORT(VAR, MEMBER, (LIST)->next, struct ovs_list);\
-+         CONDITION_MULTIVAR_SAFE_SHORT(VAR, MEMBER,                           \
-+                                       ITER_VAR(VAR) != (LIST),               \
-+                                 ITER_NEXT_VAR(VAR) = ITER_VAR(VAR)->next);   \
-+         UPDATE_MULTIVAR_SAFE_SHORT(VAR))
++	rx_stopped = ports[fs->rx_port].rxq[fs->rx_queue].state ==
++						RTE_ETH_QUEUE_STATE_STOPPED;
++	tx_stopped = ports[fs->tx_port].txq[fs->tx_queue].state ==
++						RTE_ETH_QUEUE_STATE_STOPPED;
++	fs->disabled = rx_stopped || tx_stopped;
++}
 +
-+#define LIST_FOR_EACH_SAFE(...)                      \
-+    OVERLOAD_SAFE_MACRO(LIST_FOR_EACH_SAFE_LONG,     \
-+                        LIST_FOR_EACH_SAFE_SHORT,    \
-+                        4, __VA_ARGS__)
+ struct fwd_engine icmp_echo_engine = {
+ 	.fwd_mode_name  = "icmpecho",
+ 	.port_fwd_begin = NULL,
+ 	.port_fwd_end   = NULL,
++	.stream_init    = icmpecho_stream_init,
+ 	.packet_fwd     = reply_to_icmp_echo_rqsts,
+ };
+diff --git a/dpdk/app/test-pmd/ieee1588fwd.c b/dpdk/app/test-pmd/ieee1588fwd.c
+index 9ff817aa68..fc4e2d014c 100644
+--- a/dpdk/app/test-pmd/ieee1588fwd.c
++++ b/dpdk/app/test-pmd/ieee1588fwd.c
+@@ -211,9 +211,22 @@ port_ieee1588_fwd_end(portid_t pi)
+ 	rte_eth_timesync_disable(pi);
+ }
+ 
++static void
++port_ieee1588_stream_init(struct fwd_stream *fs)
++{
++	bool rx_stopped, tx_stopped;
 +
-+#define LIST_FOR_EACH_REVERSE_SAFE(...)                        \
-+    OVERLOAD_SAFE_MACRO(LIST_FOR_EACH_REVERSE_SAFE_LONG,       \
-+                        LIST_FOR_EACH_REVERSE_SAFE_SHORT,      \
-+                        4, __VA_ARGS__)
++	rx_stopped = ports[fs->rx_port].rxq[fs->rx_queue].state ==
++						RTE_ETH_QUEUE_STATE_STOPPED;
++	tx_stopped = ports[fs->tx_port].txq[fs->tx_queue].state ==
++						RTE_ETH_QUEUE_STATE_STOPPED;
++	fs->disabled = rx_stopped || tx_stopped;
++}
 +
-+#define LIST_FOR_EACH_POP(ITER, MEMBER, LIST)                                 \
-+    while (!ovs_list_is_empty(LIST) ?                                         \
-+           (INIT_CONTAINER(ITER, ovs_list_pop_front(LIST), MEMBER), 1) :      \
-+           (ITER = NULL, 0))
- 
- /* Inline implementations. */
+ struct fwd_engine ieee1588_fwd_engine = {
+ 	.fwd_mode_name  = "ieee1588",
+ 	.port_fwd_begin = port_ieee1588_fwd_begin,
+ 	.port_fwd_end   = port_ieee1588_fwd_end,
++	.stream_init    = port_ieee1588_stream_init,
+ 	.packet_fwd     = ieee1588_packet_fwd,
+ };
+diff --git a/dpdk/app/test-pmd/iofwd.c b/dpdk/app/test-pmd/iofwd.c
+index 19cd920f70..71849aaf96 100644
+--- a/dpdk/app/test-pmd/iofwd.c
++++ b/dpdk/app/test-pmd/iofwd.c
+@@ -88,9 +88,22 @@ pkt_burst_io_forward(struct fwd_stream *fs)
+ 	get_end_cycles(fs, start_tsc);
+ }
  
-diff --git a/include/openvswitch/ofp-actions.h b/include/openvswitch/ofp-actions.h
-index 41bcb55d20..b7231c7bb3 100644
---- a/include/openvswitch/ofp-actions.h
-+++ b/include/openvswitch/ofp-actions.h
-@@ -218,7 +218,9 @@ struct ofpact *ofpact_next_flattened(const struct ofpact *);
- static inline struct ofpact *
- ofpact_end(const struct ofpact *ofpacts, size_t ofpacts_len)
- {
--    return ALIGNED_CAST(struct ofpact *, (uint8_t *) ofpacts + ofpacts_len);
-+    return ofpacts
-+           ? ALIGNED_CAST(struct ofpact *, (uint8_t *) ofpacts + ofpacts_len)
-+           : NULL;
++static void
++stream_init_forward(struct fwd_stream *fs)
++{
++	bool rx_stopped, tx_stopped;
++
++	rx_stopped = ports[fs->rx_port].rxq[fs->rx_queue].state ==
++						RTE_ETH_QUEUE_STATE_STOPPED;
++	tx_stopped = ports[fs->tx_port].txq[fs->tx_queue].state ==
++						RTE_ETH_QUEUE_STATE_STOPPED;
++	fs->disabled = rx_stopped || tx_stopped;
++}
++
+ struct fwd_engine io_fwd_engine = {
+ 	.fwd_mode_name  = "io",
+ 	.port_fwd_begin = NULL,
+ 	.port_fwd_end   = NULL,
++	.stream_init    = stream_init_forward,
+ 	.packet_fwd     = pkt_burst_io_forward,
+ };
+diff --git a/dpdk/app/test-pmd/macfwd.c b/dpdk/app/test-pmd/macfwd.c
+index 812a0c721f..79c9241d00 100644
+--- a/dpdk/app/test-pmd/macfwd.c
++++ b/dpdk/app/test-pmd/macfwd.c
+@@ -119,9 +119,22 @@ pkt_burst_mac_forward(struct fwd_stream *fs)
+ 	get_end_cycles(fs, start_tsc);
  }
  
- static inline bool
-diff --git a/include/openvswitch/ofpbuf.h b/include/openvswitch/ofpbuf.h
-index 1136ba04c8..32f03ea837 100644
---- a/include/openvswitch/ofpbuf.h
-+++ b/include/openvswitch/ofpbuf.h
-@@ -179,7 +179,11 @@ static inline void ofpbuf_delete(struct ofpbuf *b)
- static inline void *ofpbuf_at(const struct ofpbuf *b, size_t offset,
-                               size_t size)
- {
--    return offset + size <= b->size ? (char *) b->data + offset : NULL;
-+    if (offset + size <= b->size) {
-+        ovs_assert(b->data);
-+        return (char *) b->data + offset;
-+    }
-+    return NULL;
++static void
++stream_init_mac_forward(struct fwd_stream *fs)
++{
++	bool rx_stopped, tx_stopped;
++
++	rx_stopped = ports[fs->rx_port].rxq[fs->rx_queue].state ==
++						RTE_ETH_QUEUE_STATE_STOPPED;
++	tx_stopped = ports[fs->tx_port].txq[fs->tx_queue].state ==
++						RTE_ETH_QUEUE_STATE_STOPPED;
++	fs->disabled = rx_stopped || tx_stopped;
++}
++
+ struct fwd_engine mac_fwd_engine = {
+ 	.fwd_mode_name  = "mac",
+ 	.port_fwd_begin = NULL,
+ 	.port_fwd_end   = NULL,
++	.stream_init    = stream_init_mac_forward,
+ 	.packet_fwd     = pkt_burst_mac_forward,
+ };
+diff --git a/dpdk/app/test-pmd/macswap.c b/dpdk/app/test-pmd/macswap.c
+index 4627ff83e9..acb0fd7fb4 100644
+--- a/dpdk/app/test-pmd/macswap.c
++++ b/dpdk/app/test-pmd/macswap.c
+@@ -97,9 +97,22 @@ pkt_burst_mac_swap(struct fwd_stream *fs)
+ 	get_end_cycles(fs, start_tsc);
  }
  
- /* Returns a pointer to byte 'offset' in 'b', which must contain at least
-@@ -188,20 +192,23 @@ static inline void *ofpbuf_at_assert(const struct ofpbuf *b, size_t offset,
-                                      size_t size)
- {
-     ovs_assert(offset + size <= b->size);
--    return ((char *) b->data) + offset;
-+    ovs_assert(b->data);
-+    return (char *) b->data + offset;
++static void
++stream_init_mac_swap(struct fwd_stream *fs)
++{
++	bool rx_stopped, tx_stopped;
++
++	rx_stopped = ports[fs->rx_port].rxq[fs->rx_queue].state ==
++						RTE_ETH_QUEUE_STATE_STOPPED;
++	tx_stopped = ports[fs->tx_port].txq[fs->tx_queue].state ==
++						RTE_ETH_QUEUE_STATE_STOPPED;
++	fs->disabled = rx_stopped || tx_stopped;
++}
++
+ struct fwd_engine mac_swap_engine = {
+ 	.fwd_mode_name  = "macswap",
+ 	.port_fwd_begin = NULL,
+ 	.port_fwd_end   = NULL,
++	.stream_init    = stream_init_mac_swap,
+ 	.packet_fwd     = pkt_burst_mac_swap,
+ };
+diff --git a/dpdk/app/test-pmd/noisy_vnf.c b/dpdk/app/test-pmd/noisy_vnf.c
+index e4434bea95..a92e810190 100644
+--- a/dpdk/app/test-pmd/noisy_vnf.c
++++ b/dpdk/app/test-pmd/noisy_vnf.c
+@@ -277,9 +277,22 @@ noisy_fwd_begin(portid_t pi)
+ 	return 0;
  }
  
- /* Returns a pointer to byte following the last byte of data in use in 'b'. */
- static inline void *ofpbuf_tail(const struct ofpbuf *b)
++static void
++stream_init_noisy_vnf(struct fwd_stream *fs)
++{
++	bool rx_stopped, tx_stopped;
++
++	rx_stopped = ports[fs->rx_port].rxq[fs->rx_queue].state ==
++						RTE_ETH_QUEUE_STATE_STOPPED;
++	tx_stopped = ports[fs->tx_port].txq[fs->tx_queue].state ==
++						RTE_ETH_QUEUE_STATE_STOPPED;
++	fs->disabled = rx_stopped || tx_stopped;
++}
++
+ struct fwd_engine noisy_vnf_engine = {
+ 	.fwd_mode_name  = "noisy",
+ 	.port_fwd_begin = noisy_fwd_begin,
+ 	.port_fwd_end   = noisy_fwd_end,
++	.stream_init    = stream_init_noisy_vnf,
+ 	.packet_fwd     = pkt_burst_noisy_vnf,
+ };
+diff --git a/dpdk/app/test-pmd/parameters.c b/dpdk/app/test-pmd/parameters.c
+index f9185065af..e3c9757f3f 100644
+--- a/dpdk/app/test-pmd/parameters.c
++++ b/dpdk/app/test-pmd/parameters.c
+@@ -61,6 +61,9 @@ usage(char* progname)
+ 	       "extended statistics to show. Used with --stats-period "
+ 	       "specified or interactive commands that show Rx/Tx statistics "
+ 	       "(i.e. 'show port stats').\n");
++	printf("  --num-procs=N: set the total number of multi-process instances.\n");
++	printf("  --proc-id=id: set the id of the current process from "
++	       "multi-process instances (0 <= id < num-procs).\n");
+ 	printf("  --nb-cores=N: set the number of forwarding cores "
+ 	       "(1 <= N <= %d).\n", nb_lcores);
+ 	printf("  --nb-ports=N: set the number of forwarding ports "
+@@ -110,7 +113,7 @@ usage(char* progname)
+ 	       "If the drop-queue doesn't exist, the packet is dropped. "
+ 	       "By default drop-queue=127.\n");
+ #ifdef RTE_LIB_LATENCYSTATS
+-	printf("  --latencystats=N: enable latency and jitter statistcs "
++	printf("  --latencystats=N: enable latency and jitter statistics "
+ 	       "monitoring on forwarding lcore id N.\n");
+ #endif
+ 	printf("  --disable-crc-strip: disable CRC stripping by hardware.\n");
+@@ -940,11 +943,12 @@ launch_args_parse(int argc, char** argv)
+ 			}
+ 			if (!strcmp(lgopts[opt_idx].name, "total-num-mbufs")) {
+ 				n = atoi(optarg);
+-				if (n > 1024)
++				if (n > MIN_TOTAL_NUM_MBUFS)
+ 					param_total_num_mbufs = (unsigned)n;
+ 				else
+ 					rte_exit(EXIT_FAILURE,
+-						 "total-num-mbufs should be > 1024\n");
++						 "total-num-mbufs should be > %d\n",
++						 MIN_TOTAL_NUM_MBUFS);
+ 			}
+ 			if (!strcmp(lgopts[opt_idx].name, "max-pkt-len")) {
+ 				n = atoi(optarg);
+diff --git a/dpdk/app/test-pmd/rxonly.c b/dpdk/app/test-pmd/rxonly.c
+index d1a579d8d8..04457010f4 100644
+--- a/dpdk/app/test-pmd/rxonly.c
++++ b/dpdk/app/test-pmd/rxonly.c
+@@ -68,9 +68,17 @@ pkt_burst_receive(struct fwd_stream *fs)
+ 	get_end_cycles(fs, start_tsc);
+ }
+ 
++static void
++stream_init_receive(struct fwd_stream *fs)
++{
++	fs->disabled = ports[fs->rx_port].rxq[fs->rx_queue].state ==
++						RTE_ETH_QUEUE_STATE_STOPPED;
++}
++
+ struct fwd_engine rx_only_engine = {
+ 	.fwd_mode_name  = "rxonly",
+ 	.port_fwd_begin = NULL,
+ 	.port_fwd_end   = NULL,
++	.stream_init    = stream_init_receive,
+ 	.packet_fwd     = pkt_burst_receive,
+ };
+diff --git a/dpdk/app/test-pmd/shared_rxq_fwd.c b/dpdk/app/test-pmd/shared_rxq_fwd.c
+index da54a383fd..2e9047804b 100644
+--- a/dpdk/app/test-pmd/shared_rxq_fwd.c
++++ b/dpdk/app/test-pmd/shared_rxq_fwd.c
+@@ -107,9 +107,17 @@ shared_rxq_fwd(struct fwd_stream *fs)
+ 	get_end_cycles(fs, start_tsc);
+ }
+ 
++static void
++shared_rxq_stream_init(struct fwd_stream *fs)
++{
++	fs->disabled = ports[fs->rx_port].rxq[fs->rx_queue].state ==
++						RTE_ETH_QUEUE_STATE_STOPPED;
++}
++
+ struct fwd_engine shared_rxq_engine = {
+ 	.fwd_mode_name  = "shared_rxq",
+ 	.port_fwd_begin = NULL,
+ 	.port_fwd_end   = NULL,
++	.stream_init    = shared_rxq_stream_init,
+ 	.packet_fwd     = shared_rxq_fwd,
+ };
+diff --git a/dpdk/app/test-pmd/testpmd.c b/dpdk/app/test-pmd/testpmd.c
+index 55eb293cc0..3699c5fd64 100644
+--- a/dpdk/app/test-pmd/testpmd.c
++++ b/dpdk/app/test-pmd/testpmd.c
+@@ -66,6 +66,9 @@
+ #ifdef RTE_EXEC_ENV_WINDOWS
+ #include <process.h>
+ #endif
++#ifdef RTE_NET_BOND
++#include <rte_eth_bond.h>
++#endif
+ 
+ #include "testpmd.h"
+ 
+@@ -84,7 +87,13 @@
+ #endif
+ 
+ #define EXTMEM_HEAP_NAME "extmem"
+-#define EXTBUF_ZONE_SIZE RTE_PGSIZE_2M
++/*
++ * Zone size with the malloc overhead (max of debug and release variants)
++ * must fit into the smallest supported hugepage size (2M),
++ * so that an IOVA-contiguous zone of this size can always be allocated
++ * if there are free 2M hugepages.
++ */
++#define EXTBUF_ZONE_SIZE (RTE_PGSIZE_2M - 4 * RTE_CACHE_LINE_SIZE)
+ 
+ uint16_t verbose_level = 0; /**< Silent by default. */
+ int testpmd_logtype; /**< Log type for testpmd logs */
+@@ -220,6 +229,7 @@ unsigned int xstats_display_num; /**< Size of extended statistics to show */
+  * option. Set flag to exit stats period loop after received SIGINT/SIGTERM.
+  */
+ uint8_t f_quit;
++uint8_t cl_quit; /* Quit testpmd from cmdline. */
+ 
+ /*
+  * Max Rx frame size, set by '--max-pkt-len' parameter.
+@@ -449,7 +459,7 @@ uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF;
+ uint8_t latencystats_enabled;
+ 
+ /*
+- * Lcore ID to serive latency statistics.
++ * Lcore ID to service latency statistics.
+  */
+ lcoreid_t latencystats_lcore_id = -1;
+ 
+@@ -591,11 +601,58 @@ eth_dev_configure_mp(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
+ 	return 0;
+ }
+ 
++static int
++change_bonding_slave_port_status(portid_t bond_pid, bool is_stop)
++{
++#ifdef RTE_NET_BOND
++
++	portid_t slave_pids[RTE_MAX_ETHPORTS];
++	struct rte_port *port;
++	int num_slaves;
++	portid_t slave_pid;
++	int i;
++
++	num_slaves = rte_eth_bond_slaves_get(bond_pid, slave_pids,
++						RTE_MAX_ETHPORTS);
++	if (num_slaves < 0) {
++		fprintf(stderr, "Failed to get slave list for port = %u\n",
++			bond_pid);
++		return num_slaves;
++	}
++
++	for (i = 0; i < num_slaves; i++) {
++		slave_pid = slave_pids[i];
++		port = &ports[slave_pid];
++		port->port_status =
++			is_stop ? RTE_PORT_STOPPED : RTE_PORT_STARTED;
++	}
++#else
++	RTE_SET_USED(bond_pid);
++	RTE_SET_USED(is_stop);
++#endif
++	return 0;
++}
++
+ static int
+ eth_dev_start_mp(uint16_t port_id)
  {
--    return (char *) b->data + b->size;
-+    ovs_assert(b->data || !b->size);
-+    return b->data ? (char *) b->data + b->size : NULL;
+-	if (is_proc_primary())
+-		return rte_eth_dev_start(port_id);
++	int ret;
++
++	if (is_proc_primary()) {
++		ret = rte_eth_dev_start(port_id);
++		if (ret != 0)
++			return ret;
++
++		struct rte_port *port = &ports[port_id];
++
++		/*
++		 * Starting a bonded port also starts all slaves under the bonded
++		 * device. So if this port is bond device, we need to modify the
++		 * port status of these slaves.
++		 */
++		if (port->bond_flag == 1)
++			return change_bonding_slave_port_status(port_id, false);
++	}
+ 
+ 	return 0;
+ }
+@@ -603,8 +660,23 @@ eth_dev_start_mp(uint16_t port_id)
+ static int
+ eth_dev_stop_mp(uint16_t port_id)
+ {
+-	if (is_proc_primary())
+-		return rte_eth_dev_stop(port_id);
++	int ret;
++
++	if (is_proc_primary()) {
++		ret = rte_eth_dev_stop(port_id);
++		if (ret != 0)
++			return ret;
++
++		struct rte_port *port = &ports[port_id];
++
++		/*
++		 * Stopping a bonded port also stops all slaves under the bonded
++		 * device. So if this port is bond device, we need to modify the
++		 * port status of these slaves.
++		 */
++		if (port->bond_flag == 1)
++			return change_bonding_slave_port_status(port_id, true);
++	}
+ 
+ 	return 0;
+ }
+@@ -1061,12 +1133,11 @@ setup_extbuf(uint32_t nb_mbufs, uint16_t mbuf_sz, unsigned int socket_id,
+ 			ext_num = 0;
+ 			break;
+ 		}
+-		mz = rte_memzone_reserve_aligned(mz_name, EXTBUF_ZONE_SIZE,
+-						 socket_id,
+-						 RTE_MEMZONE_IOVA_CONTIG |
+-						 RTE_MEMZONE_1GB |
+-						 RTE_MEMZONE_SIZE_HINT_ONLY,
+-						 EXTBUF_ZONE_SIZE);
++		mz = rte_memzone_reserve(mz_name, EXTBUF_ZONE_SIZE,
++					 socket_id,
++					 RTE_MEMZONE_IOVA_CONTIG |
++					 RTE_MEMZONE_1GB |
++					 RTE_MEMZONE_SIZE_HINT_ONLY);
+ 		if (mz == NULL) {
+ 			/*
+ 			 * The caller exits on external buffer creation
+@@ -1569,10 +1640,10 @@ init_config_port_offloads(portid_t pid, uint32_t socket_id)
+ 
+ 	/* Apply Rx offloads configuration */
+ 	for (i = 0; i < port->dev_info.max_rx_queues; i++)
+-		port->rx_conf[i].offloads = port->dev_conf.rxmode.offloads;
++		port->rxq[i].conf.offloads = port->dev_conf.rxmode.offloads;
+ 	/* Apply Tx offloads configuration */
+ 	for (i = 0; i < port->dev_info.max_tx_queues; i++)
+-		port->tx_conf[i].offloads = port->dev_conf.txmode.offloads;
++		port->txq[i].conf.offloads = port->dev_conf.txmode.offloads;
+ 
+ 	if (eth_link_speed)
+ 		port->dev_conf.link_speeds = eth_link_speed;
+@@ -1759,7 +1830,6 @@ reconfig(portid_t new_port_id, unsigned socket_id)
+ 	init_port_config();
  }
  
- /* Returns a pointer to byte following the last byte allocated for use (but
-  * not necessarily in use) in 'b'. */
- static inline void *ofpbuf_end(const struct ofpbuf *b)
+-
+ int
+ init_fwd_streams(void)
  {
--    return (char *) b->base + b->allocated;
-+    ovs_assert(b->base || !b->allocated);
-+    return b->base ? (char *) b->base + b->allocated : NULL;
+@@ -1978,6 +2048,7 @@ fwd_stats_display(void)
+ 	struct rte_port *port;
+ 	streamid_t sm_id;
+ 	portid_t pt_id;
++	int ret;
+ 	int i;
+ 
+ 	memset(ports_stats, 0, sizeof(ports_stats));
+@@ -2009,7 +2080,13 @@ fwd_stats_display(void)
+ 		pt_id = fwd_ports_ids[i];
+ 		port = &ports[pt_id];
+ 
+-		rte_eth_stats_get(pt_id, &stats);
++		ret = rte_eth_stats_get(pt_id, &stats);
++		if (ret != 0) {
++			fprintf(stderr,
++				"%s: Error: failed to get stats (port %u): %d",
++				__func__, pt_id, ret);
++			continue;
++		}
+ 		stats.ipackets -= port->stats.ipackets;
+ 		stats.opackets -= port->stats.opackets;
+ 		stats.ibytes -= port->stats.ibytes;
+@@ -2104,11 +2181,16 @@ fwd_stats_reset(void)
+ {
+ 	streamid_t sm_id;
+ 	portid_t pt_id;
++	int ret;
+ 	int i;
+ 
+ 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
+ 		pt_id = fwd_ports_ids[i];
+-		rte_eth_stats_get(pt_id, &ports[pt_id].stats);
++		ret = rte_eth_stats_get(pt_id, &ports[pt_id].stats);
++		if (ret != 0)
++			fprintf(stderr,
++				"%s: Error: failed to clear stats (port %u):%d",
++				__func__, pt_id, ret);
+ 	}
+ 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
+ 		struct fwd_stream *fs = fwd_streams[sm_id];
+@@ -2152,6 +2234,12 @@ flush_fwd_rx_queues(void)
+ 		for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
+ 			for (rxq = 0; rxq < nb_rxq; rxq++) {
+ 				port_id = fwd_ports_ids[rxp];
++
++				/* Polling stopped queues is prohibited. */
++				if (ports[port_id].rxq[rxq].state ==
++				    RTE_ETH_QUEUE_STATE_STOPPED)
++					continue;
++
+ 				/**
+ 				* testpmd can stuck in the below do while loop
+ 				* if rte_eth_rx_burst() always returns nonzero
+@@ -2197,7 +2285,8 @@ run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
+ 	nb_fs = fc->stream_nb;
+ 	do {
+ 		for (sm_id = 0; sm_id < nb_fs; sm_id++)
+-			(*pkt_fwd)(fsm[sm_id]);
++			if (!fsm[sm_id]->disabled)
++				(*pkt_fwd)(fsm[sm_id]);
+ #ifdef RTE_LIB_BITRATESTATS
+ 		if (bitrate_enabled != 0 &&
+ 				bitrate_lcore_id == rte_lcore_id()) {
+@@ -2279,6 +2368,7 @@ start_packet_forwarding(int with_tx_first)
+ {
+ 	port_fwd_begin_t port_fwd_begin;
+ 	port_fwd_end_t  port_fwd_end;
++	stream_init_t stream_init = cur_fwd_eng->stream_init;
+ 	unsigned int i;
+ 
+ 	if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
+@@ -2309,6 +2399,10 @@ start_packet_forwarding(int with_tx_first)
+ 	if (!pkt_fwd_shared_rxq_check())
+ 		return;
+ 
++	if (stream_init != NULL)
++		for (i = 0; i < cur_fwd_config.nb_fwd_streams; i++)
++			stream_init(fwd_streams[i]);
++
+ 	port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
+ 	if (port_fwd_begin != NULL) {
+ 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
+@@ -2570,7 +2664,7 @@ rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
+ 		ret = rte_eth_rx_queue_setup(port_id, rx_queue_id,
+ 					     nb_rx_desc, socket_id,
+ 					     rx_conf, mp);
+-		return ret;
++		goto exit;
+ 	}
+ 	for (i = 0; i < rx_pkt_nb_segs; i++) {
+ 		struct rte_eth_rxseg_split *rx_seg = &rx_useg[i].split;
+@@ -2579,7 +2673,7 @@ rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
+ 		 * Use last valid pool for the segments with number
+ 		 * exceeding the pool index.
+ 		 */
+-		mp_n = (i > mbuf_data_size_n) ? mbuf_data_size_n - 1 : i;
++		mp_n = (i >= mbuf_data_size_n) ? mbuf_data_size_n - 1 : i;
+ 		mpx = mbuf_pool_find(socket_id, mp_n);
+ 		/* Handle zero as mbuf data buffer size. */
+ 		rx_seg->length = rx_pkt_seg_lengths[i] ?
+@@ -2595,6 +2689,10 @@ rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
+ 				    socket_id, rx_conf, NULL);
+ 	rx_conf->rx_seg = NULL;
+ 	rx_conf->rx_nseg = 0;
++exit:
++	ports[port_id].rxq[rx_queue_id].state = rx_conf->rx_deferred_start ?
++						RTE_ETH_QUEUE_STATE_STOPPED :
++						RTE_ETH_QUEUE_STATE_STARTED;
+ 	return ret;
  }
  
- /* Returns the number of bytes of headroom in 'b', that is, the number of bytes
-@@ -249,6 +256,11 @@ static inline void *ofpbuf_pull(struct ofpbuf *b, size_t size)
+@@ -2722,6 +2820,13 @@ start_port(portid_t pid)
+ 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
+ 			continue;
+ 
++		if (port_is_bonding_slave(pi)) {
++			fprintf(stderr,
++				"Please remove port %d from bonded device.\n",
++				pi);
++			continue;
++		}
++
+ 		need_check_link_status = 0;
+ 		port = &ports[pi];
+ 		if (port->port_status == RTE_PORT_STOPPED)
+@@ -2790,7 +2895,7 @@ start_port(portid_t pid)
+ 				for (k = 0;
+ 				     k < port->dev_info.max_rx_queues;
+ 				     k++)
+-					port->rx_conf[k].offloads |=
++					port->rxq[k].conf.offloads |=
+ 						dev_conf.rxmode.offloads;
+ 			}
+ 			/* Apply Tx offloads configuration */
+@@ -2801,7 +2906,7 @@ start_port(portid_t pid)
+ 				for (k = 0;
+ 				     k < port->dev_info.max_tx_queues;
+ 				     k++)
+-					port->tx_conf[k].offloads |=
++					port->txq[k].conf.offloads |=
+ 						dev_conf.txmode.offloads;
+ 			}
+ 		}
+@@ -2809,20 +2914,28 @@ start_port(portid_t pid)
+ 			port->need_reconfig_queues = 0;
+ 			/* setup tx queues */
+ 			for (qi = 0; qi < nb_txq; qi++) {
++				struct rte_eth_txconf *conf =
++							&port->txq[qi].conf;
++
+ 				if ((numa_support) &&
+ 					(txring_numa[pi] != NUMA_NO_CONFIG))
+ 					diag = rte_eth_tx_queue_setup(pi, qi,
+ 						port->nb_tx_desc[qi],
+ 						txring_numa[pi],
+-						&(port->tx_conf[qi]));
++						&(port->txq[qi].conf));
+ 				else
+ 					diag = rte_eth_tx_queue_setup(pi, qi,
+ 						port->nb_tx_desc[qi],
+ 						port->socket_id,
+-						&(port->tx_conf[qi]));
++						&(port->txq[qi].conf));
+ 
+-				if (diag == 0)
++				if (diag == 0) {
++					port->txq[qi].state =
++						conf->tx_deferred_start ?
++						RTE_ETH_QUEUE_STATE_STOPPED :
++						RTE_ETH_QUEUE_STATE_STARTED;
+ 					continue;
++				}
+ 
+ 				/* Fail to setup tx queue, return */
+ 				if (port->port_status == RTE_PORT_HANDLING)
+@@ -2855,7 +2968,7 @@ start_port(portid_t pid)
+ 					diag = rx_queue_setup(pi, qi,
+ 					     port->nb_rx_desc[qi],
+ 					     rxring_numa[pi],
+-					     &(port->rx_conf[qi]),
++					     &(port->rxq[qi].conf),
+ 					     mp);
+ 				} else {
+ 					struct rte_mempool *mp =
+@@ -2870,7 +2983,7 @@ start_port(portid_t pid)
+ 					diag = rx_queue_setup(pi, qi,
+ 					     port->nb_rx_desc[qi],
+ 					     port->socket_id,
+-					     &(port->rx_conf[qi]),
++					     &(port->rxq[qi].conf),
+ 					     mp);
+ 				}
+ 				if (diag == 0)
+@@ -3090,11 +3203,48 @@ remove_invalid_ports(void)
+ 	nb_cfg_ports = nb_fwd_ports;
+ }
+ 
++static void
++flush_port_owned_resources(portid_t pi)
++{
++	mcast_addr_pool_destroy(pi);
++	port_flow_flush(pi);
++	port_flex_item_flush(pi);
++	port_action_handle_flush(pi);
++}
++
++static void
++clear_bonding_slave_device(portid_t *slave_pids, uint16_t num_slaves)
++{
++	struct rte_port *port;
++	portid_t slave_pid;
++	uint16_t i;
++
++	for (i = 0; i < num_slaves; i++) {
++		slave_pid = slave_pids[i];
++		if (port_is_started(slave_pid) == 1) {
++			if (rte_eth_dev_stop(slave_pid) != 0)
++				fprintf(stderr, "rte_eth_dev_stop failed for port %u\n",
++					slave_pid);
++
++			port = &ports[slave_pid];
++			port->port_status = RTE_PORT_STOPPED;
++		}
++
++		clear_port_slave_flag(slave_pid);
++
++		/* Close slave device when testpmd quit or is killed. */
++		if (cl_quit == 1 || f_quit == 1)
++			rte_eth_dev_close(slave_pid);
++	}
++}
++
+ void
+ close_port(portid_t pid)
  {
-     ovs_assert(b->size >= size);
-     void *data = b->data;
+ 	portid_t pi;
+ 	struct rte_port *port;
++	portid_t slave_pids[RTE_MAX_ETHPORTS];
++	int num_slaves = 0;
+ 
+ 	if (port_id_is_invalid(pid, ENABLED_WARN))
+ 		return;
+@@ -3126,9 +3276,20 @@ close_port(portid_t pid)
+ 		}
+ 
+ 		if (is_proc_primary()) {
+-			port_flow_flush(pi);
+-			port_flex_item_flush(pi);
++			flush_port_owned_resources(pi);
++#ifdef RTE_NET_BOND
++			if (port->bond_flag == 1)
++				num_slaves = rte_eth_bond_slaves_get(pi,
++						slave_pids, RTE_MAX_ETHPORTS);
++#endif
+ 			rte_eth_dev_close(pi);
++			/*
++			 * If this port is bonded device, all slaves under the
++			 * device need to be removed or closed.
++			 */
++			if (port->bond_flag == 1 && num_slaves > 0)
++				clear_bonding_slave_device(slave_pids,
++							num_slaves);
+ 		}
+ 
+ 		free_xstats_display_info(pi);
+@@ -3272,7 +3433,7 @@ detach_device(struct rte_device *dev)
+ 					sibling);
+ 				return;
+ 			}
+-			port_flow_flush(sibling);
++			flush_port_owned_resources(sibling);
+ 		}
+ 	}
+ 
+@@ -3339,7 +3500,7 @@ detach_devargs(char *identifier)
+ 				rte_devargs_reset(&da);
+ 				return;
+ 			}
+-			port_flow_flush(port_id);
++			flush_port_owned_resources(port_id);
+ 		}
+ 	}
+ 
+@@ -3645,59 +3806,59 @@ rxtx_port_config(portid_t pid)
+ 	struct rte_port *port = &ports[pid];
+ 
+ 	for (qid = 0; qid < nb_rxq; qid++) {
+-		offloads = port->rx_conf[qid].offloads;
+-		port->rx_conf[qid] = port->dev_info.default_rxconf;
++		offloads = port->rxq[qid].conf.offloads;
++		port->rxq[qid].conf = port->dev_info.default_rxconf;
+ 
+ 		if (rxq_share > 0 &&
+ 		    (port->dev_info.dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE)) {
+ 			/* Non-zero share group to enable RxQ share. */
+-			port->rx_conf[qid].share_group = pid / rxq_share + 1;
+-			port->rx_conf[qid].share_qid = qid; /* Equal mapping. */
++			port->rxq[qid].conf.share_group = pid / rxq_share + 1;
++			port->rxq[qid].conf.share_qid = qid; /* Equal mapping. */
+ 		}
+ 
+ 		if (offloads != 0)
+-			port->rx_conf[qid].offloads = offloads;
++			port->rxq[qid].conf.offloads = offloads;
+ 
+ 		/* Check if any Rx parameters have been passed */
+ 		if (rx_pthresh != RTE_PMD_PARAM_UNSET)
+-			port->rx_conf[qid].rx_thresh.pthresh = rx_pthresh;
++			port->rxq[qid].conf.rx_thresh.pthresh = rx_pthresh;
+ 
+ 		if (rx_hthresh != RTE_PMD_PARAM_UNSET)
+-			port->rx_conf[qid].rx_thresh.hthresh = rx_hthresh;
++			port->rxq[qid].conf.rx_thresh.hthresh = rx_hthresh;
+ 
+ 		if (rx_wthresh != RTE_PMD_PARAM_UNSET)
+-			port->rx_conf[qid].rx_thresh.wthresh = rx_wthresh;
++			port->rxq[qid].conf.rx_thresh.wthresh = rx_wthresh;
+ 
+ 		if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
+-			port->rx_conf[qid].rx_free_thresh = rx_free_thresh;
++			port->rxq[qid].conf.rx_free_thresh = rx_free_thresh;
+ 
+ 		if (rx_drop_en != RTE_PMD_PARAM_UNSET)
+-			port->rx_conf[qid].rx_drop_en = rx_drop_en;
++			port->rxq[qid].conf.rx_drop_en = rx_drop_en;
+ 
+ 		port->nb_rx_desc[qid] = nb_rxd;
+ 	}
+ 
+ 	for (qid = 0; qid < nb_txq; qid++) {
+-		offloads = port->tx_conf[qid].offloads;
+-		port->tx_conf[qid] = port->dev_info.default_txconf;
++		offloads = port->txq[qid].conf.offloads;
++		port->txq[qid].conf = port->dev_info.default_txconf;
+ 		if (offloads != 0)
+-			port->tx_conf[qid].offloads = offloads;
++			port->txq[qid].conf.offloads = offloads;
+ 
+ 		/* Check if any Tx parameters have been passed */
+ 		if (tx_pthresh != RTE_PMD_PARAM_UNSET)
+-			port->tx_conf[qid].tx_thresh.pthresh = tx_pthresh;
++			port->txq[qid].conf.tx_thresh.pthresh = tx_pthresh;
+ 
+ 		if (tx_hthresh != RTE_PMD_PARAM_UNSET)
+-			port->tx_conf[qid].tx_thresh.hthresh = tx_hthresh;
++			port->txq[qid].conf.tx_thresh.hthresh = tx_hthresh;
+ 
+ 		if (tx_wthresh != RTE_PMD_PARAM_UNSET)
+-			port->tx_conf[qid].tx_thresh.wthresh = tx_wthresh;
++			port->txq[qid].conf.tx_thresh.wthresh = tx_wthresh;
+ 
+ 		if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
+-			port->tx_conf[qid].tx_rs_thresh = tx_rs_thresh;
++			port->txq[qid].conf.tx_rs_thresh = tx_rs_thresh;
+ 
+ 		if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
+-			port->tx_conf[qid].tx_free_thresh = tx_free_thresh;
++			port->txq[qid].conf.tx_free_thresh = tx_free_thresh;
+ 
+ 		port->nb_tx_desc[qid] = nb_txd;
+ 	}
+@@ -3778,7 +3939,7 @@ init_port_config(void)
+ 				for (i = 0;
+ 				     i < port->dev_info.nb_rx_queues;
+ 				     i++)
+-					port->rx_conf[i].offloads &=
++					port->rxq[i].conf.offloads &=
+ 						~RTE_ETH_RX_OFFLOAD_RSS_HASH;
+ 			}
+ 		}
+@@ -3952,7 +4113,7 @@ init_port_dcb_config(portid_t pid,
+ 	if (port_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_VMDQ_DCB) {
+ 		port_conf.rxmode.offloads &= ~RTE_ETH_RX_OFFLOAD_RSS_HASH;
+ 		for (i = 0; i < nb_rxq; i++)
+-			rte_port->rx_conf[i].offloads &=
++			rte_port->rxq[i].conf.offloads &=
+ 				~RTE_ETH_RX_OFFLOAD_RSS_HASH;
+ 	}
+ 
+diff --git a/dpdk/app/test-pmd/testpmd.h b/dpdk/app/test-pmd/testpmd.h
+index 2149ecd93a..18abee907c 100644
+--- a/dpdk/app/test-pmd/testpmd.h
++++ b/dpdk/app/test-pmd/testpmd.h
+@@ -32,6 +32,8 @@
+ #define RTE_PORT_CLOSED         (uint16_t)2
+ #define RTE_PORT_HANDLING       (uint16_t)3
+ 
++extern uint8_t cl_quit;
++
+ /*
+  * It is used to allocate the memory for hash key.
+  * The hash key size is NIC dependent.
+@@ -72,6 +74,8 @@
+ #define NUMA_NO_CONFIG 0xFF
+ #define UMA_NO_CONFIG  0xFF
+ 
++#define MIN_TOTAL_NUM_MBUFS 1024
++
+ typedef uint8_t  lcoreid_t;
+ typedef uint16_t portid_t;
+ typedef uint16_t queueid_t;
+@@ -134,6 +138,7 @@ struct fwd_stream {
+ 	portid_t   tx_port;   /**< forwarding port of received packets */
+ 	queueid_t  tx_queue;  /**< TX queue to send forwarded packets */
+ 	streamid_t peer_addr; /**< index of peer ethernet address of packets */
++	bool       disabled;  /**< the stream is disabled and should not run */
+ 
+ 	unsigned int retry_enabled;
+ 
+@@ -147,6 +152,7 @@ struct fwd_stream {
+ 	/**< received packets has bad outer l4 checksum */
+ 	uint64_t rx_bad_outer_ip_csum;
+ 	/**< received packets having bad outer ip checksum */
++	uint64_t ts_skew; /**< TX scheduling timestamp */
+ #ifdef RTE_LIB_GRO
+ 	unsigned int gro_times;	/**< GRO operation times */
+ #endif
+@@ -216,6 +222,18 @@ struct xstat_display_info {
+ 	bool	 allocated;
+ };
+ 
++/** RX queue configuration and state. */
++struct port_rxqueue {
++	struct rte_eth_rxconf conf;
++	uint8_t state; /**< RTE_ETH_QUEUE_STATE_* value. */
++};
 +
-+    if (!size) {
-+        return data;
-+    }
++/** TX queue configuration and state. */
++struct port_txqueue {
++	struct rte_eth_txconf conf;
++	uint8_t state; /**< RTE_ETH_QUEUE_STATE_* value. */
++};
 +
-     b->data = (char*)b->data + size;
-     b->size = b->size - size;
-     return data;
-@@ -270,7 +282,7 @@ static inline struct ofpbuf *ofpbuf_from_list(const struct ovs_list *list)
- static inline bool ofpbuf_equal(const struct ofpbuf *a, const struct ofpbuf *b)
+ /**
+  * The data structure associated with each port.
+  */
+@@ -238,11 +256,12 @@ struct rte_port {
+ 	uint8_t                 dcb_flag;   /**< enable dcb */
+ 	uint16_t                nb_rx_desc[RTE_MAX_QUEUES_PER_PORT+1]; /**< per queue rx desc number */
+ 	uint16_t                nb_tx_desc[RTE_MAX_QUEUES_PER_PORT+1]; /**< per queue tx desc number */
+-	struct rte_eth_rxconf   rx_conf[RTE_MAX_QUEUES_PER_PORT+1]; /**< per queue rx configuration */
+-	struct rte_eth_txconf   tx_conf[RTE_MAX_QUEUES_PER_PORT+1]; /**< per queue tx configuration */
++	struct port_rxqueue     rxq[RTE_MAX_QUEUES_PER_PORT+1]; /**< per queue Rx config and state */
++	struct port_txqueue     txq[RTE_MAX_QUEUES_PER_PORT+1]; /**< per queue Tx config and state */
+ 	struct rte_ether_addr   *mc_addr_pool; /**< pool of multicast addrs */
+ 	uint32_t                mc_addr_nb; /**< nb. of addr. in mc_addr_pool */
+-	uint8_t                 slave_flag; /**< bonding slave port */
++	uint8_t                 slave_flag : 1, /**< bonding slave port */
++				bond_flag : 1; /**< port is bond device */
+ 	struct port_flow        *flow_list; /**< Associated flows. */
+ 	struct port_indirect_action *actions_list;
+ 	/**< Associated indirect actions. */
+@@ -296,12 +315,14 @@ struct fwd_lcore {
+  */
+ typedef int (*port_fwd_begin_t)(portid_t pi);
+ typedef void (*port_fwd_end_t)(portid_t pi);
++typedef void (*stream_init_t)(struct fwd_stream *fs);
+ typedef void (*packet_fwd_t)(struct fwd_stream *fs);
+ 
+ struct fwd_engine {
+ 	const char       *fwd_mode_name; /**< Forwarding mode name. */
+ 	port_fwd_begin_t port_fwd_begin; /**< NULL if nothing special to do. */
+ 	port_fwd_end_t   port_fwd_end;   /**< NULL if nothing special to do. */
++	stream_init_t    stream_init;    /**< NULL if nothing special to do. */
+ 	packet_fwd_t     packet_fwd;     /**< Mandatory. */
+ };
+ 
+@@ -880,6 +901,7 @@ int port_action_handle_create(portid_t port_id, uint32_t id,
+ 			      const struct rte_flow_action *action);
+ int port_action_handle_destroy(portid_t port_id,
+ 			       uint32_t n, const uint32_t *action);
++int port_action_handle_flush(portid_t port_id);
+ struct rte_flow_action_handle *port_action_handle_get_by_id(portid_t port_id,
+ 							    uint32_t id);
+ int port_action_handle_update(portid_t port_id, uint32_t id,
+@@ -897,6 +919,7 @@ int port_flow_create(portid_t port_id,
+ int port_action_handle_query(portid_t port_id, uint32_t id);
+ void update_age_action_context(const struct rte_flow_action *actions,
+ 		     struct port_flow *pf);
++int mcast_addr_pool_destroy(portid_t port_id);
+ int port_flow_destroy(portid_t port_id, uint32_t n, const uint32_t *rule);
+ int port_flow_flush(portid_t port_id);
+ int port_flow_dump(portid_t port_id, bool dump_all,
+@@ -1101,6 +1124,8 @@ extern int flow_parse(const char *src, void *result, unsigned int size,
+ 		      struct rte_flow_item **pattern,
+ 		      struct rte_flow_action **actions);
+ 
++const char *rsstypes_to_str(uint64_t rss_type);
++
+ /*
+  * Work-around of a compilation error with ICC on invocations of the
+  * rte_be_to_cpu_16() function.
+diff --git a/dpdk/app/test-pmd/txonly.c b/dpdk/app/test-pmd/txonly.c
+index b8497e733d..e1bc78b73d 100644
+--- a/dpdk/app/test-pmd/txonly.c
++++ b/dpdk/app/test-pmd/txonly.c
+@@ -59,14 +59,10 @@ uint32_t tx_ip_dst_addr = (198U << 24) | (18 << 16) | (0 << 8) | 2;
+ static struct rte_ipv4_hdr pkt_ip_hdr; /**< IP header of transmitted packets. */
+ RTE_DEFINE_PER_LCORE(uint8_t, _ip_var); /**< IP address variation */
+ static struct rte_udp_hdr pkt_udp_hdr; /**< UDP header of tx packets. */
+-RTE_DEFINE_PER_LCORE(uint64_t, timestamp_qskew);
+-					/**< Timestamp offset per queue */
+-RTE_DEFINE_PER_LCORE(uint32_t, timestamp_idone); /**< Timestamp init done. */
+ 
+ static uint64_t timestamp_mask; /**< Timestamp dynamic flag mask */
+ static int32_t timestamp_off; /**< Timestamp dynamic field offset */
+ static bool timestamp_enable; /**< Timestamp enable */
+-static uint32_t timestamp_init_req; /**< Timestamp initialization request. */
+ static uint64_t timestamp_initial[RTE_MAX_ETHPORTS];
+ 
+ static void
+@@ -174,14 +170,14 @@ update_pkt_header(struct rte_mbuf *pkt, uint32_t total_pkt_len)
+ 					sizeof(struct rte_ether_hdr) +
+ 					sizeof(struct rte_ipv4_hdr) +
+ 					sizeof(struct rte_udp_hdr)));
+-	/* updata udp pkt length */
++	/* update UDP packet length */
+ 	udp_hdr = rte_pktmbuf_mtod_offset(pkt, struct rte_udp_hdr *,
+ 				sizeof(struct rte_ether_hdr) +
+ 				sizeof(struct rte_ipv4_hdr));
+ 	pkt_len = (uint16_t) (pkt_data_len + sizeof(struct rte_udp_hdr));
+ 	udp_hdr->dgram_len = RTE_CPU_TO_BE_16(pkt_len);
+ 
+-	/* updata ip pkt length and csum */
++	/* update IP packet length and checksum */
+ 	ip_hdr = rte_pktmbuf_mtod_offset(pkt, struct rte_ipv4_hdr *,
+ 				sizeof(struct rte_ether_hdr));
+ 	ip_hdr->hdr_checksum = 0;
+@@ -194,7 +190,7 @@ static inline bool
+ pkt_burst_prepare(struct rte_mbuf *pkt, struct rte_mempool *mbp,
+ 		struct rte_ether_hdr *eth_hdr, const uint16_t vlan_tci,
+ 		const uint16_t vlan_tci_outer, const uint64_t ol_flags,
+-		const uint16_t idx, const struct fwd_stream *fs)
++		const uint16_t idx, struct fwd_stream *fs)
  {
-     return a->size == b->size &&
--           memcmp(a->data, b->data, a->size) == 0;
-+           (a->size == 0 || memcmp(a->data, b->data, a->size) == 0);
- }
+ 	struct rte_mbuf *pkt_segs[RTE_MAX_SEGS_PER_PKT];
+ 	struct rte_mbuf *pkt_seg;
+@@ -262,11 +258,10 @@ pkt_burst_prepare(struct rte_mbuf *pkt, struct rte_mempool *mbp,
+ 		update_pkt_header(pkt, pkt_len);
+ 
+ 	if (unlikely(timestamp_enable)) {
+-		uint64_t skew = RTE_PER_LCORE(timestamp_qskew);
++		uint64_t skew = fs->ts_skew;
+ 		struct tx_timestamp timestamp_mark;
+ 
+-		if (unlikely(timestamp_init_req !=
+-				RTE_PER_LCORE(timestamp_idone))) {
++		if (unlikely(!skew)) {
+ 			struct rte_eth_dev_info dev_info;
+ 			unsigned int txqs_n;
+ 			uint64_t phase;
+@@ -289,8 +284,7 @@ pkt_burst_prepare(struct rte_mbuf *pkt, struct rte_mempool *mbp,
+ 			 */
+ 			skew = timestamp_initial[fs->tx_port] +
+ 			       tx_pkt_times_inter + phase;
+-			RTE_PER_LCORE(timestamp_qskew) = skew;
+-			RTE_PER_LCORE(timestamp_idone) = timestamp_init_req;
++			fs->ts_skew = skew;
+ 		}
+ 		timestamp_mark.pkt_idx = rte_cpu_to_be_16(idx);
+ 		timestamp_mark.queue_idx = rte_cpu_to_be_16(fs->tx_queue);
+@@ -300,14 +294,14 @@ pkt_burst_prepare(struct rte_mbuf *pkt, struct rte_mempool *mbp,
+ 			pkt->ol_flags |= timestamp_mask;
+ 			*RTE_MBUF_DYNFIELD
+ 				(pkt, timestamp_off, uint64_t *) = skew;
+-			RTE_PER_LCORE(timestamp_qskew) = skew;
++			fs->ts_skew = skew;
+ 			timestamp_mark.ts = rte_cpu_to_be_64(skew);
+ 		} else if (tx_pkt_times_intra) {
+ 			skew +=	tx_pkt_times_intra;
+ 			pkt->ol_flags |= timestamp_mask;
+ 			*RTE_MBUF_DYNFIELD
+ 				(pkt, timestamp_off, uint64_t *) = skew;
+-			RTE_PER_LCORE(timestamp_qskew) = skew;
++			fs->ts_skew = skew;
+ 			timestamp_mark.ts = rte_cpu_to_be_64(skew);
+ 		} else {
+ 			timestamp_mark.ts = RTE_BE64(0);
+@@ -461,7 +455,6 @@ tx_only_begin(portid_t pi)
+ 	timestamp_enable = false;
+ 	timestamp_mask = 0;
+ 	timestamp_off = -1;
+-	RTE_PER_LCORE(timestamp_qskew) = 0;
+ 	dynf = rte_mbuf_dynflag_lookup
+ 				(RTE_MBUF_DYNFLAG_TX_TIMESTAMP_NAME, NULL);
+ 	if (dynf >= 0)
+@@ -504,7 +497,6 @@ tx_only_begin(portid_t pi)
+ 				return -EINVAL;
+ 			}
+ 		}
+-		timestamp_init_req++;
+ 	}
  
- static inline bool ofpbuf_oversized(const struct ofpbuf *ofpacts)
-diff --git a/include/openvswitch/shash.h b/include/openvswitch/shash.h
-index c249e13e1f..4e7badd4dc 100644
---- a/include/openvswitch/shash.h
-+++ b/include/openvswitch/shash.h
-@@ -41,13 +41,24 @@ struct shash {
-                         BUILD_ASSERT_TYPE(SHASH_NODE, struct shash_node *), \
-                         BUILD_ASSERT_TYPE(SHASH, struct shash *))
+ 	/* Make sure all settings are visible on forwarding cores.*/
+@@ -512,9 +504,17 @@ tx_only_begin(portid_t pi)
+ 	return 0;
+ }
  
--#define SHASH_FOR_EACH_SAFE(SHASH_NODE, NEXT, SHASH)        \
--    HMAP_FOR_EACH_SAFE_INIT (                               \
-+#define SHASH_FOR_EACH_SAFE_SHORT(SHASH_NODE, SHASH)        \
-+    HMAP_FOR_EACH_SAFE_SHORT_INIT (                         \
-+        SHASH_NODE, node, &(SHASH)->map,                    \
-+        BUILD_ASSERT_TYPE(SHASH_NODE, struct shash_node *), \
-+        BUILD_ASSERT_TYPE(SHASH, struct shash *))
++static void
++tx_only_stream_init(struct fwd_stream *fs)
++{
++	fs->disabled = ports[fs->tx_port].txq[fs->tx_queue].state ==
++						RTE_ETH_QUEUE_STATE_STOPPED;
++}
 +
-+#define SHASH_FOR_EACH_SAFE_LONG(SHASH_NODE, NEXT, SHASH)   \
-+    HMAP_FOR_EACH_SAFE_LONG_INIT (                          \
-         SHASH_NODE, NEXT, node, &(SHASH)->map,              \
-         BUILD_ASSERT_TYPE(SHASH_NODE, struct shash_node *), \
-         BUILD_ASSERT_TYPE(NEXT, struct shash_node *),       \
-         BUILD_ASSERT_TYPE(SHASH, struct shash *))
+ struct fwd_engine tx_only_engine = {
+ 	.fwd_mode_name  = "txonly",
+ 	.port_fwd_begin = tx_only_begin,
+ 	.port_fwd_end   = NULL,
++	.stream_init    = tx_only_stream_init,
+ 	.packet_fwd     = pkt_burst_transmit,
+ };
+diff --git a/dpdk/app/test-regex/main.c b/dpdk/app/test-regex/main.c
+index 8e665df73c..ca0b0a5d6a 100644
+--- a/dpdk/app/test-regex/main.c
++++ b/dpdk/app/test-regex/main.c
+@@ -385,10 +385,13 @@ run_regex(void *args)
+ 	char *data_buf = rgxc->data_buf;
+ 	long data_len = rgxc->data_len;
+ 	long job_len = rgxc->job_len;
+-
++	long remainder;
++	long act_job_len = 0;
++	bool last_job = false;
+ 	char *buf = NULL;
+ 	uint32_t actual_jobs = 0;
+ 	uint32_t i;
++	uint32_t job_id;
+ 	uint16_t qp_id;
+ 	uint16_t dev_id = 0;
+ 	uint8_t nb_matches;
+@@ -412,8 +415,8 @@ run_regex(void *args)
+ 	mbuf_mp = rte_pktmbuf_pool_create(mbuf_pool,
+ 			rte_align32pow2(nb_jobs * nb_qps * nb_segs),
+ 			0, 0, (nb_segs == 1) ? MBUF_SIZE :
+-			(rte_align32pow2(job_len) / nb_segs +
+-			RTE_PKTMBUF_HEADROOM),
++			(rte_align32pow2(job_len + (data_len % nb_jobs)) /
++			 nb_segs + RTE_PKTMBUF_HEADROOM),
+ 			rte_socket_id());
+ 	if (mbuf_mp == NULL) {
+ 		printf("Error, can't create memory pool\n");
+@@ -459,9 +462,16 @@ run_regex(void *args)
+ 		/* Assign each mbuf with the data to handle. */
+ 		actual_jobs = 0;
+ 		pos = 0;
++		remainder = data_len % nb_jobs;
++
+ 		/* Allocate the jobs and assign each job with an mbuf. */
+ 		for (i = 0; (pos < data_len) && (i < nb_jobs) ; i++) {
+-			long act_job_len = RTE_MIN(job_len, data_len - pos);
++			act_job_len = RTE_MIN(job_len, data_len - pos);
++
++			if (i == (nb_jobs - 1)) {
++				last_job = true;
++				act_job_len += remainder;
++			}
+ 
+ 			ops[i] = rte_malloc(NULL, sizeof(*ops[0]) +
+ 					nb_max_matches *
+@@ -481,7 +491,12 @@ run_regex(void *args)
+ 				if (ops[i]->mbuf) {
+ 					rte_pktmbuf_attach_extbuf(ops[i]->mbuf,
+ 					&buf[pos], 0, act_job_len, &shinfo);
+-					ops[i]->mbuf->data_len = job_len;
++
++					if (!last_job)
++						ops[i]->mbuf->data_len = job_len;
++					else
++						ops[i]->mbuf->data_len = act_job_len;
++
+ 					ops[i]->mbuf->pkt_len = act_job_len;
+ 				}
+ 			}
+@@ -509,6 +524,9 @@ run_regex(void *args)
+ 			qp = &qps[qp_id];
+ 			qp->total_enqueue = 0;
+ 			qp->total_dequeue = 0;
++			/* Re-set user id after dequeue to match data in mbuf. */
++			for (job_id = 0 ; job_id < nb_jobs; job_id++)
++				qp->ops[job_id]->user_id = job_id;
+ 		}
+ 		do {
+ 			update = false;
+@@ -554,10 +572,10 @@ run_regex(void *args)
+ 	for (qp_id = 0; qp_id < nb_qps; qp_id++) {
+ 		qp = &qps[qp_id];
+ 		time = (long double)qp->cycles / rte_get_timer_hz();
+-		printf("Core=%u QP=%u Job=%ld Bytes Time=%Lf sec Perf=%Lf "
++		printf("Core=%u QP=%u Job=%ld Bytes Last Job=%ld Bytes Time=%Lf sec Perf=%Lf "
+ 		       "Gbps\n", rte_lcore_id(), qp_id + qp_id_base,
+-		       job_len, time,
+-		       (((double)actual_jobs * job_len * nb_iterations * 8)
++		       job_len, act_job_len, time,
++		       (((double)data_len * nb_iterations * 8)
+ 		       / time) / 1000000000.0);
+ 	}
  
-+#define SHASH_FOR_EACH_SAFE(...)                                              \
-+    OVERLOAD_SAFE_MACRO(SHASH_FOR_EACH_SAFE_LONG,                             \
-+                        SHASH_FOR_EACH_SAFE_SHORT,                            \
-+                        3, __VA_ARGS__)
+@@ -590,10 +608,10 @@ run_regex(void *args)
+ 			qp->total_matches += nb_matches;
+ 			match = qp->ops[d_ind % actual_jobs]->matches;
+ 			for (i = 0; i < nb_matches; i++) {
+-				printf("start = %ld, len = %d, rule = %d\n",
+-						match->start_offset +
+-						d_ind * job_len,
+-						match->len, match->rule_id);
++				printf("start = %d, len = %d, rule = %d\n",
++					match->start_offset +
++					(int)(qp->ops[d_ind % actual_jobs]->user_id * job_len),
++					match->len, match->rule_id);
+ 				match++;
+ 			}
+ 		}
+@@ -714,6 +732,8 @@ main(int argc, char **argv)
+ 		rte_exit(EXIT_FAILURE, "Number of QPs must be greater than 0\n");
+ 	if (nb_lcores == 0)
+ 		rte_exit(EXIT_FAILURE, "Number of lcores must be greater than 0\n");
++	if (nb_jobs == 0)
++		rte_exit(EXIT_FAILURE, "Number of jobs must be greater than 0\n");
+ 	if (distribute_qps_to_lcores(nb_lcores, nb_qps, &qps_per_lcore) < 0)
+ 		rte_exit(EXIT_FAILURE, "Failed to distribute queues to lcores!\n");
+ 	ret = init_port(&nb_max_payload, rules_file,
+diff --git a/dpdk/app/test/meson.build b/dpdk/app/test/meson.build
+index 2b480adfba..c13776c0b9 100644
+--- a/dpdk/app/test/meson.build
++++ b/dpdk/app/test/meson.build
+@@ -288,8 +288,6 @@ fast_tests = [
+ # Tests known to have issues or which don't belong in other tests lists.
+ extra_test_names = [
+         'alarm_autotest', # ee00af60170b ("test: remove strict timing requirements some tests")
+-        'cycles_autotest', # ee00af60170b ("test: remove strict timing requirements some tests")
+-        'delay_us_sleep_autotest', # ee00af60170b ("test: remove strict timing requirements some tests")
+         'red_autotest', # https://bugs.dpdk.org/show_bug.cgi?id=826
+ ]
+ 
+@@ -492,7 +490,7 @@ dpdk_test = executable('dpdk-test',
+              driver_install_path),
+         install: true)
+ 
+-has_hugepage = run_command('has-hugepage.sh').stdout().strip() != '0'
++has_hugepage = run_command('has-hugepage.sh', check: true).stdout().strip() != '0'
+ message('hugepage availability: @0@'.format(has_hugepage))
+ 
+ # some perf tests (eg: memcpy perf autotest)take very long
+diff --git a/dpdk/app/test/test_barrier.c b/dpdk/app/test/test_barrier.c
+index 6d6d48749c..ec69af25bf 100644
+--- a/dpdk/app/test/test_barrier.c
++++ b/dpdk/app/test/test_barrier.c
+@@ -11,7 +11,7 @@
+   * (https://en.wikipedia.org/wiki/Peterson%27s_algorithm)
+   * for two execution units to make sure that rte_smp_mb() prevents
+   * store-load reordering to happen.
+-  * Also when executed on a single lcore could be used as a approxiamate
++  * Also when executed on a single lcore could be used as a approximate
+   * estimation of number of cycles particular implementation of rte_smp_mb()
+   * will take.
+   */
+diff --git a/dpdk/app/test/test_bpf.c b/dpdk/app/test/test_bpf.c
+index 46bcb51f86..d70bb0fe85 100644
+--- a/dpdk/app/test/test_bpf.c
++++ b/dpdk/app/test/test_bpf.c
+@@ -23,7 +23,7 @@
+ /*
+  * Basic functional tests for librte_bpf.
+  * The main procedure - load eBPF program, execute it and
+- * compare restuls with expected values.
++ * compare results with expected values.
+  */
+ 
+ struct dummy_offset {
+@@ -2707,7 +2707,7 @@ test_ld_mbuf1_check(uint64_t rc, const void *arg)
+ }
+ 
+ /*
+- * same as ld_mbuf1, but then trancate the mbuf by 1B,
++ * same as ld_mbuf1, but then truncate the mbuf by 1B,
+  * so load of last 4B fail.
+  */
+ static void
+@@ -3250,7 +3250,16 @@ test_bpf(void)
+ 
+ REGISTER_TEST_COMMAND(bpf_autotest, test_bpf);
+ 
+-#ifdef RTE_HAS_LIBPCAP
++#ifndef RTE_HAS_LIBPCAP
 +
- void shash_init(struct shash *);
- void shash_destroy(struct shash *);
- void shash_destroy_free_data(struct shash *);
-diff --git a/include/openvswitch/util.h b/include/openvswitch/util.h
-index 228b185c3a..8e6c46a85f 100644
---- a/include/openvswitch/util.h
-+++ b/include/openvswitch/util.h
-@@ -145,6 +145,150 @@ OVS_NO_RETURN void ovs_assert_failure(const char *, const char *, const char *);
- #define INIT_CONTAINER(OBJECT, POINTER, MEMBER) \
-     ((OBJECT) = NULL, ASSIGN_CONTAINER(OBJECT, POINTER, MEMBER))
++static int
++test_bpf_convert(void)
++{
++	printf("BPF convert RTE_HAS_LIBPCAP is undefined, skipping test\n");
++	return TEST_SKIPPED;
++}
++
++#else
+ #include <pcap/pcap.h>
+ 
+ static void
+@@ -3259,8 +3268,10 @@ test_bpf_dump(struct bpf_program *cbf, const struct rte_bpf_prm *prm)
+ 	printf("cBPF program (%u insns)\n", cbf->bf_len);
+ 	bpf_dump(cbf, 1);
+ 
+-	printf("\neBPF program (%u insns)\n", prm->nb_ins);
+-	rte_bpf_dump(stdout, prm->ins, prm->nb_ins);
++	if (prm != NULL) {
++		printf("\neBPF program (%u insns)\n", prm->nb_ins);
++		rte_bpf_dump(stdout, prm->ins, prm->nb_ins);
++	}
+ }
+ 
+ static int
+@@ -3446,5 +3457,6 @@ test_bpf_convert(void)
+ 	return rc;
+ }
+ 
+-REGISTER_TEST_COMMAND(bpf_convert_autotest, test_bpf_convert);
+ #endif /* RTE_HAS_LIBPCAP */
++
++REGISTER_TEST_COMMAND(bpf_convert_autotest, test_bpf_convert);
+diff --git a/dpdk/app/test/test_compressdev.c b/dpdk/app/test/test_compressdev.c
+index c63b5b6737..57c566aa92 100644
+--- a/dpdk/app/test/test_compressdev.c
++++ b/dpdk/app/test/test_compressdev.c
+@@ -1256,7 +1256,7 @@ test_deflate_comp_run(const struct interim_data_params *int_data,
+ 		/*
+ 		 * Store original operation index in private data,
+ 		 * since ordering does not have to be maintained,
+-		 * when dequeueing from compressdev, so a comparison
++		 * when dequeuing from compressdev, so a comparison
+ 		 * at the end of the test can be done.
+ 		 */
+ 		priv_data = (struct priv_op_data *) (ops[i] + 1);
+diff --git a/dpdk/app/test/test_crc.c b/dpdk/app/test/test_crc.c
+index bf1d344359..8231f81e4a 100644
+--- a/dpdk/app/test/test_crc.c
++++ b/dpdk/app/test/test_crc.c
+@@ -80,6 +80,8 @@ test_crc_calc(void)
+ 
+ 	/* 32-bit ethernet CRC: Test 2 */
+ 	test_data = rte_zmalloc(NULL, CRC32_VEC_LEN1, 0);
++	if (test_data == NULL)
++		return -7;
+ 
+ 	for (i = 0; i < CRC32_VEC_LEN1; i += 12)
+ 		rte_memcpy(&test_data[i], crc32_vec1, 12);
+diff --git a/dpdk/app/test/test_cryptodev.c b/dpdk/app/test/test_cryptodev.c
+index 10b48cdadb..b11be735d0 100644
+--- a/dpdk/app/test/test_cryptodev.c
++++ b/dpdk/app/test/test_cryptodev.c
+@@ -209,6 +209,7 @@ process_sym_raw_dp_op(uint8_t dev_id, uint16_t qp_id,
+ 	int enqueue_status, dequeue_status;
+ 	struct crypto_unittest_params *ut_params = &unittest_params;
+ 	int is_sgl = sop->m_src->nb_segs > 1;
++	int is_oop = 0;
+ 
+ 	ctx_service_size = rte_cryptodev_get_raw_dp_ctx_size(dev_id);
+ 	if (ctx_service_size < 0) {
+@@ -247,6 +248,9 @@ process_sym_raw_dp_op(uint8_t dev_id, uint16_t qp_id,
+ 
+ 	ofs.raw = 0;
+ 
++	if ((sop->m_dst != NULL) && (sop->m_dst != sop->m_src))
++		is_oop = 1;
++
+ 	if (is_cipher && is_auth) {
+ 		cipher_offset = sop->cipher.data.offset;
+ 		cipher_len = sop->cipher.data.length;
+@@ -277,6 +281,8 @@ process_sym_raw_dp_op(uint8_t dev_id, uint16_t qp_id,
+ 		if (is_sgl) {
+ 			uint32_t remaining_off = auth_offset + auth_len;
+ 			struct rte_mbuf *sgl_buf = sop->m_src;
++			if (is_oop)
++				sgl_buf = sop->m_dst;
+ 
+ 			while (remaining_off >= rte_pktmbuf_data_len(sgl_buf)
+ 					&& sgl_buf->next != NULL) {
+@@ -293,7 +299,8 @@ process_sym_raw_dp_op(uint8_t dev_id, uint16_t qp_id,
+ 		/* Then check if digest-encrypted conditions are met */
+ 		if ((auth_offset + auth_len < cipher_offset + cipher_len) &&
+ 				(digest.iova == auth_end_iova) && is_sgl)
+-			max_len = RTE_MAX(max_len, auth_offset + auth_len +
++			max_len = RTE_MAX(max_len,
++				auth_offset + auth_len +
+ 				ut_params->auth_xform.auth.digest_length);
+ 
+ 	} else if (is_cipher) {
+@@ -356,7 +363,7 @@ process_sym_raw_dp_op(uint8_t dev_id, uint16_t qp_id,
+ 
+ 	sgl.num = n;
+ 	/* Out of place */
+-	if (sop->m_dst != NULL) {
++	if (is_oop) {
+ 		dest_sgl.vec = dest_data_vec;
+ 		vec.dest_sgl = &dest_sgl;
+ 		n = rte_crypto_mbuf_to_vec(sop->m_dst, 0, max_len,
+@@ -6023,7 +6030,7 @@ test_zuc_encryption(const struct wireless_test_data *tdata)
+ 	retval = create_wireless_algo_cipher_operation(tdata->cipher_iv.data,
+ 					tdata->cipher_iv.len,
+ 					tdata->plaintext.len,
+-					0);
++					tdata->validCipherOffsetInBits.len);
+ 	if (retval < 0)
+ 		return retval;
+ 
+@@ -6118,7 +6125,7 @@ test_zuc_encryption_sgl(const struct wireless_test_data *tdata)
+ 	/* Create ZUC operation */
+ 	retval = create_wireless_algo_cipher_operation(tdata->cipher_iv.data,
+ 			tdata->cipher_iv.len, tdata->plaintext.len,
+-			0);
++			tdata->validCipherOffsetInBits.len);
+ 	if (retval < 0)
+ 		return retval;
+ 
+@@ -6226,8 +6233,8 @@ test_zuc_authentication(const struct wireless_test_data *tdata)
+ 	else
+ 		ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ 				ut_params->op);
+-	ut_params->obuf = ut_params->op->sym->m_src;
+ 	TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
++	ut_params->obuf = ut_params->op->sym->m_src;
+ 	ut_params->digest = rte_pktmbuf_mtod(ut_params->obuf, uint8_t *)
+ 			+ plaintext_pad_len;
+ 
+@@ -6553,7 +6560,7 @@ test_zuc_auth_cipher_sgl(const struct wireless_test_data *tdata,
+ 	retval = create_wireless_algo_auth_cipher_operation(
+ 		tdata->digest.data, tdata->digest.len,
+ 		tdata->cipher_iv.data, tdata->cipher_iv.len,
+-		NULL, 0,
++		tdata->auth_iv.data, tdata->auth_iv.len,
+ 		(tdata->digest.offset_bytes == 0 ?
+ 		(verify ? ciphertext_pad_len : plaintext_pad_len)
+ 			: tdata->digest.offset_bytes),
+@@ -6870,7 +6877,7 @@ test_snow3g_decryption_with_digest_test_case_1(void)
+ 	}
+ 
+ 	/*
+-	 * Function prepare data for hash veryfication test case.
++	 * Function prepare data for hash verification test case.
+ 	 * Digest is allocated in 4 last bytes in plaintext, pattern.
+ 	 */
+ 	snow3g_hash_test_vector_setup(&snow3g_test_case_7, &snow3g_hash_data);
+@@ -10540,9 +10547,11 @@ test_authenticated_encryption_oop(const struct aead_test_data *tdata)
+ 	rte_cryptodev_info_get(ts_params->valid_devs[0], &dev_info);
+ 	uint64_t feat_flags = dev_info.feature_flags;
+ 
+-	if ((global_api_test_type == CRYPTODEV_RAW_API_TEST) &&
+-			(!(feat_flags & RTE_CRYPTODEV_FF_SYM_RAW_DP)))
++	if ((global_api_test_type == CRYPTODEV_RAW_API_TEST) ||
++			(!(feat_flags & RTE_CRYPTODEV_FF_SYM_RAW_DP))) {
++		printf("Device does not support RAW data-path APIs.\n");
+ 		return TEST_SKIPPED;
++	}
+ 
+ 	/* not supported with CPU crypto */
+ 	if (gbl_action_type == RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO)
+@@ -15690,7 +15699,7 @@ test_cryptodev_dpaa2_sec_raw_api(void)
+ static int
+ test_cryptodev_dpaa_sec_raw_api(void)
+ {
+-	static const char *pmd_name = RTE_STR(CRYPTODEV_NAME_DPAA2_SEC_PMD);
++	static const char *pmd_name = RTE_STR(CRYPTODEV_NAME_DPAA_SEC_PMD);
+ 	int ret;
+ 
+ 	ret = require_feature_flag(pmd_name, RTE_CRYPTODEV_FF_SYM_RAW_DP,
+diff --git a/dpdk/app/test/test_cryptodev_asym.c b/dpdk/app/test/test_cryptodev_asym.c
+index 9d19a6d6d9..1131290e88 100644
+--- a/dpdk/app/test/test_cryptodev_asym.c
++++ b/dpdk/app/test/test_cryptodev_asym.c
+@@ -558,7 +558,7 @@ test_one_case(const void *test_case, int sessionless)
+ 						status = test_cryptodev_asym_op(
+ 							&testsuite_params,
+ 							&tc, test_msg, sessionless, i,
+-							RTE_RSA_KET_TYPE_QT);
++							RTE_RSA_KEY_TYPE_QT);
+ 					}
+ 					if (status)
+ 						break;
+diff --git a/dpdk/app/test/test_cryptodev_rsa_test_vectors.h b/dpdk/app/test/test_cryptodev_rsa_test_vectors.h
+index 48a72e1492..04539a1ecf 100644
+--- a/dpdk/app/test/test_cryptodev_rsa_test_vectors.h
++++ b/dpdk/app/test/test_cryptodev_rsa_test_vectors.h
+@@ -378,7 +378,7 @@ struct rte_crypto_asym_xform rsa_xform_crt = {
+ 			.data = rsa_e,
+ 			.length = sizeof(rsa_e)
+ 		},
+-		.key_type = RTE_RSA_KET_TYPE_QT,
++		.key_type = RTE_RSA_KEY_TYPE_QT,
+ 		.qt = {
+ 			.p = {
+ 				.data = rsa_p,
+diff --git a/dpdk/app/test/test_cryptodev_snow3g_test_vectors.h b/dpdk/app/test/test_cryptodev_snow3g_test_vectors.h
+index bbe05662be..b49a07bcf2 100644
+--- a/dpdk/app/test/test_cryptodev_snow3g_test_vectors.h
++++ b/dpdk/app/test/test_cryptodev_snow3g_test_vectors.h
+@@ -138,11 +138,11 @@ struct snow3g_test_data snow3g_test_case_2 = {
+ 		.len = 16
+ 	},
+ 	.cipher_iv = {
+-	       .data = {
++		.data = {
+ 			0xE2, 0x8B, 0xCF, 0x7B, 0xC0, 0x00, 0x00, 0x00,
+ 			0xE2, 0x8B, 0xCF, 0x7B, 0xC0, 0x00, 0x00, 0x00
+ 		},
+-	       .len = 16
++		.len = 16
+ 	},
+ 	.plaintext = {
+ 		.data = {
+@@ -359,8 +359,8 @@ struct snow3g_test_data snow3g_auth_cipher_test_case_1 = {
+ 	},
+ 	.cipher_iv = {
+ 		.data = {
+-			0x14, 0x79, 0x3E, 0x41, 0x03, 0x97, 0xE8, 0xFD,
+-			0x94, 0x79, 0x3E, 0x41, 0x03, 0x97, 0x68, 0xFD
++			0x72, 0xA4, 0xF2, 0x0F, 0x48, 0x00, 0x00, 0x00,
++			0x72, 0xA4, 0xF2, 0x0F, 0x48, 0x00, 0x00, 0x00
+ 		},
+ 		.len = 16
+ 	},
+@@ -383,13 +383,13 @@ struct snow3g_test_data snow3g_auth_cipher_test_case_1 = {
+ 		.len = 384
+ 	},
+ 	.ciphertext = {
+-	   .data = {
+-			0x95, 0x2E, 0x5A, 0xE1, 0x50, 0xB8, 0x59, 0x2A,
+-			0x9B, 0xA0, 0x38, 0xA9, 0x8E, 0x2F, 0xED, 0xAB,
+-			0xFD, 0xC8, 0x3B, 0x47, 0x46, 0x0B, 0x50, 0x16,
+-			0xEC, 0x88, 0x45, 0xB6, 0x05, 0xC7, 0x54, 0xF8,
+-			0xBD, 0x91, 0xAA, 0xB6, 0xA4, 0xDC, 0x64, 0xB4,
+-			0xCB, 0xEB, 0x97, 0x06, 0x4C, 0xF7, 0x02, 0x3D
++	  .data = {
++			0x86, 0x4F, 0x4D, 0xE8, 0x86, 0xE6, 0x3E, 0x66,
++			0x52, 0x97, 0xC7, 0x62, 0xAE, 0x8E, 0xA2, 0xDB,
++			0x01, 0xD6, 0x33, 0xA9, 0xA4, 0xCE, 0x02, 0xD5,
++			0xC2, 0xC5, 0x5F, 0x90, 0xE0, 0x89, 0x48, 0xD4,
++			0x92, 0xF4, 0xE5, 0x9A, 0xDA, 0x13, 0x76, 0xFF,
++			0x6E, 0x76, 0x6B, 0x71, 0x62, 0x28, 0xB2, 0xEC
+ 		},
+ 		.len = 384
+ 	},
+@@ -428,15 +428,15 @@ struct snow3g_test_data snow3g_test_case_7 = {
+ 	},
+ 	.cipher_iv = {
+ 		.data = {
+-			0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
+-			0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08
++			0xE2, 0x8B, 0xCF, 0x7B, 0xC0, 0x00, 0x00, 0x00,
++			0xE2, 0x8B, 0xCF, 0x7B, 0xC0, 0x00, 0x00, 0x00
+ 		},
+ 		.len = 16
+ 	},
+ 	.auth_iv = {
+ 		.data = {
+-			 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
+-			 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
++			0x36, 0xAF, 0x61, 0x44, 0x98, 0x38, 0xF0, 0x3A,
++			0x36, 0xAF, 0x61, 0x44, 0x98, 0x38, 0xF0, 0x3A
+ 		},
+ 		.len = 16
+ 	},
+@@ -457,28 +457,28 @@ struct snow3g_test_data snow3g_test_case_7 = {
+ 			0x5A,  0x5A,  0x5A,  0x5A,  0x5A,  0x5A,  0x5A,  0x5A,
+ 			0x5A,  0x5A,  0x5A,  0x5A,  0x5A,  0x5A,  0x5A,  0x5A,
+ 			0x5A,  0x5A,  0x5A,  0x5A,  0x5A,  0x5A,  0x5A,  0x5A,
+-			0x5A,  0x5A,  0x5A,  0x5A,  0xF1,  0x9E,  0x2B,  0x6F,
++			0x5A,  0x5A,  0x5A,  0x5A,  0xBB,  0x2B,  0x8B,  0x15,
+ 		},
+ 		.len = 128 << 3
+ 	},
+ 	.ciphertext = {
+ 		.data = {
+-			0x5A,  0x5A,  0xE4,  0xAD,  0x29,  0xA2,  0x6A,  0xA6,
+-			0x20,  0x1D,  0xCD,  0x08,  0x50,  0xD6,  0xE6,  0x47,
+-			0xBC,  0x88,  0x08,  0x01,  0x17,  0xFA,  0x47,  0x5B,
+-			0x90,  0x40,  0xBA,  0x0C,  0xB5,  0x58,  0xF3,  0x0C,
+-			0xA0,  0xD4,  0x98,  0x83,  0x1B,  0xCE,  0x54,  0xE3,
+-			0x29,  0x00,  0x3C,  0xA4,  0xAD,  0x74,  0xEE,  0x05,
+-			0xA3,  0x6C,  0xD4,  0xAC,  0xC6,  0x30,  0x33,  0xC9,
+-			0x37,  0x57,  0x41,  0x9B,  0xD4,  0x73,  0xB9,  0x77,
+-			0x70,  0x8B,  0x63,  0xDD,  0x22,  0xB8,  0xE1,  0x85,
+-			0xB2,  0x92,  0x7C,  0x37,  0xD3,  0x2E,  0xD9,  0xF4,
+-			0x4A,  0x69,  0x25,  0x30,  0xE3,  0x5B,  0x8B,  0xF6,
+-			0x0F,  0xDE,  0x0B,  0x92,  0xD5,  0x25,  0x52,  0x6D,
+-			0x26,  0xEB,  0x2F,  0x8A,  0x3B,  0x8B,  0x38,  0xE2,
+-			0x48,  0xD3,  0x4A,  0x98,  0xF7,  0x3A,  0xC2,  0x46,
+-			0x69,  0x8D,  0x73,  0x3E,  0x57,  0x88,  0x2C,  0x80,
+-			0xF0,  0xF2,  0x75,  0xB8,  0x7D,  0x27,  0xC6,  0xDA,
++			0x5A,  0x5A,  0x8A,  0x35,  0xF7,  0x36,  0xDA,  0xD7,
++			0xC4,  0x2C,  0x10,  0xEA,  0x92,  0x9C,  0x00,  0xF0,
++			0xAE,  0x35,  0x5E,  0x8D,  0xB6,  0x88,  0x30,  0x66,
++			0x74,  0x8B,  0xA2,  0x82,  0x5C,  0xA7,  0xF3,  0x54,
++			0x75,  0x02,  0xA9,  0x90,  0x6B,  0x4B,  0x6A,  0x63,
++			0xFF,  0x4B,  0x08,  0xFE,  0x11,  0x3C,  0x5A,  0x53,
++			0xEE,  0x68,  0x14,  0x41,  0x17,  0xCD,  0x7B,  0x27,
++			0x88,  0xAF,  0x99,  0xE2,  0x9C,  0x86,  0x42,  0x12,
++			0x97,  0x93,  0xF0,  0xE6,  0xE2,  0xB2,  0x2D,  0xDA,
++			0x2C,  0x59,  0xB0,  0xA7,  0x09,  0xF6,  0x32,  0xC0,
++			0x35,  0x9A,  0xD3,  0xBA,  0xDC,  0x8F,  0x2E,  0x18,
++			0x97,  0x87,  0x44,  0xD6,  0x43,  0xFA,  0x86,  0x5A,
++			0xB0,  0xA2,  0x5A,  0xB8,  0x5F,  0x57,  0xE3,  0x2F,
++			0x73,  0x9C,  0x01,  0x3A,  0x02,  0x08,  0x8C,  0xEB,
++			0xA0,  0x5D,  0x74,  0x58,  0x5A,  0xA1,  0x58,  0x17,
++			0x5E,  0x86,  0x96,  0xE6,  0x9C,  0xEE,  0x8C,  0xA8
+ 
+ 		},
+ 		.len = 128 << 3
+@@ -493,7 +493,7 @@ struct snow3g_test_data snow3g_test_case_7 = {
+ 	},
+ 	.digest = {
+ 		.data = {
+-			0x7D, 0x27, 0xC6, 0xDA
++			0x9C, 0xEE, 0x8C, 0xA8
+ 		},
+ 		.len = 4,
+ 		.offset_bytes = 124
+@@ -520,15 +520,15 @@ struct snow3g_test_data snow3g_auth_cipher_test_case_2 = {
+ 	},
+ 	.cipher_iv = {
+ 		.data = {
+-			0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
+-			0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08
++			0xFA, 0x55, 0x6B, 0x26, 0x1C, 0x00, 0x00, 0x00,
++			0xFA, 0x55, 0x6B, 0x26, 0x1C, 0x00, 0x00, 0x00
+ 		},
+ 		.len = 16
+ 	},
+ 	.auth_iv = {
+ 		.data = {
+-			 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
+-			 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
++			0x3E, 0xDC, 0x87, 0xE2, 0xA4, 0xF2, 0xD8, 0xE2,
++			0x3E, 0xDC, 0x87, 0xE2, 0xA4, 0xF2, 0xD8, 0xE2
+ 		},
+ 		.len = 16
+ 	},
+@@ -556,22 +556,22 @@ struct snow3g_test_data snow3g_auth_cipher_test_case_2 = {
+ 	},
+ 	.ciphertext = {
+ 		.data = {
+-			0x5A,  0x5A,  0xE4,  0xAD,  0x29,  0xA2,  0x6A,  0xA6,
+-			0x20,  0x1D,  0xCD,  0x08,  0x50,  0xD6,  0xE6,  0x47,
+-			0xBC,  0x88,  0x08,  0x01,  0x17,  0xFA,  0x47,  0x5B,
+-			0x90,  0x40,  0xBA,  0x0C,  0xB5,  0x58,  0xF3,  0x0C,
+-			0xA0,  0xD4,  0x98,  0x83,  0x1B,  0xCE,  0x54,  0xE3,
+-			0x29,  0x00,  0x3C,  0xA4,  0xAD,  0x74,  0xEE,  0x05,
+-			0xA3,  0x6C,  0xD4,  0xAC,  0xC6,  0x30,  0x33,  0xC9,
+-			0x37,  0x57,  0x41,  0x9B,  0xD4,  0x73,  0xB9,  0x77,
+-			0x70,  0x8B,  0x63,  0xDD,  0x22,  0xB8,  0xE1,  0x85,
+-			0xB2,  0x92,  0x7C,  0x37,  0xD3,  0x2E,  0xD9,  0xF4,
+-			0x4A,  0x69,  0x25,  0x30,  0xE3,  0x5B,  0x8B,  0xF6,
+-			0x0F,  0xDE,  0x0B,  0x92,  0xD5,  0x25,  0x52,  0x6D,
+-			0x26,  0xEB,  0x2F,  0x8A,  0x3B,  0x8B,  0x38,  0xE2,
+-			0x48,  0xD3,  0x4A,  0x98,  0xF7,  0x3A,  0xC2,  0x46,
+-			0x69,  0x8D,  0x73,  0x3E,  0x57,  0x88,  0x2C,  0x80,
+-			0xF0,  0xF2,  0x75,  0xB8,  0x7D,  0x27,  0xC6,  0xDA,
++			0x5A,  0x5A,  0xCF,  0xCF,  0x3D,  0x11,  0xBF,  0xD9,
++			0xC3,  0x7F,  0x7C,  0xA8,  0x1A,  0x9F,  0x9F,  0x34,
++			0xC5,  0x6E,  0x1B,  0x2C,  0xE0,  0x81,  0x4B,  0x66,
++			0x87,  0xCB,  0xD5,  0x61,  0x04,  0xED,  0xBC,  0x69,
++			0x79,  0x86,  0x73,  0x48,  0x69,  0x4A,  0xBA,  0x55,
++			0x44,  0x6C,  0xEF,  0xD9,  0x34,  0x61,  0x59,  0x67,
++			0x80,  0x4E,  0x03,  0x95,  0x0A,  0xA1,  0x6C,  0xBA,
++			0x74,  0xBD,  0xAF,  0x11,  0x4B,  0xE6,  0x98,  0x61,
++			0x4E,  0xD4,  0x3E,  0xE4,  0x99,  0x55,  0x5C,  0x3A,
++			0x8C,  0x3E,  0xC0,  0x01,  0x6E,  0x15,  0xE1,  0x0E,
++			0x71,  0x4C,  0x89,  0x43,  0x8A,  0x48,  0x69,  0x6D,
++			0x02,  0x10,  0xC6,  0x54,  0x37,  0x18,  0xAA,  0x10,
++			0x90,  0x80,  0x0B,  0x69,  0x08,  0xB4,  0xF9,  0x4D,
++			0xD1,  0x2E,  0x43,  0xD9,  0x92,  0xAF,  0x06,  0x4A,
++			0xAF,  0x26,  0x25,  0x77,  0x37,  0xD0,  0xFC,  0x3C,
++			0xA0,  0xCB,  0xAF,  0x06,  0x95,  0x26,  0x30,  0x38,
+ 
+ 		},
+ 		.len = 128 << 3
+@@ -586,7 +586,7 @@ struct snow3g_test_data snow3g_auth_cipher_test_case_2 = {
+ 	},
+ 	.digest = {
+ 		.data = {
+-			0x7D, 0x27, 0xC6, 0xDA
++			0x95, 0x26, 0x30, 0x38
+ 		},
+ 		.len = 4,
+ 		.offset_bytes = 124
+@@ -613,15 +613,15 @@ struct snow3g_test_data snow3g_auth_cipher_test_case_3 = {
+ 	},
+ 	.cipher_iv = {
+ 		.data = {
+-			0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
+-			0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08
++			0x39, 0x8A, 0x59, 0xB4, 0x2C, 0x00, 0x00, 0x00,
++			0x39, 0x8A, 0x59, 0xB4, 0x2C, 0x00, 0x00, 0x00
+ 		},
+ 		.len = 16
+ 	},
+ 	.auth_iv = {
+ 		.data = {
+-			 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
+-			 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
++			0x29, 0x6F, 0x39, 0x3C, 0x6B, 0x22, 0x77, 0x37,
++			0x29, 0x6F, 0x39, 0x3C, 0x6B, 0x22, 0x77, 0x37
+ 		},
+ 		.len = 16
+ 	},
+@@ -636,10 +636,10 @@ struct snow3g_test_data snow3g_auth_cipher_test_case_3 = {
+ 	},
+ 	.ciphertext = {
+ 		.data = {
+-			0x5A, 0x5A, 0xE4, 0xAD, 0x29, 0xA2, 0x6A, 0xA6,
+-			0x20, 0x1D, 0xCD, 0x08, 0x50, 0xD6, 0xE6, 0x47,
+-			0xBC, 0x88, 0x08, 0x01, 0x17, 0xFA, 0x47, 0x5B,
+-			0x90, 0x40, 0xBA, 0x0C, 0xBA, 0x6D, 0x6A, 0x5E,
++			0x5A, 0x5A, 0x93, 0xB0, 0x3F, 0xA4, 0xEB, 0xD4,
++			0x51, 0x12, 0x3B, 0x95, 0x93, 0x12, 0xBF, 0xBE,
++			0xF2, 0xFE, 0xA5, 0xAE, 0xE7, 0xF4, 0x80, 0x3E,
++			0xB2, 0xD1, 0xFF, 0x5F, 0xD9, 0x32, 0x72, 0xFE,
+ 		},
+ 		.len = 32 << 3
+ 	},
+@@ -653,7 +653,7 @@ struct snow3g_test_data snow3g_auth_cipher_test_case_3 = {
+ 	},
+ 	.digest = {
+ 		.data = {
+-			0xBA, 0x6D, 0x6A, 0x5E
++			0xD9, 0x32, 0x72, 0xFE
+ 		},
+ 		.len = 4,
+ 		.offset_bytes = 28
+@@ -680,15 +680,15 @@ struct snow3g_test_data snow3g_auth_cipher_partial_digest_encryption = {
+ 	},
+ 	.cipher_iv = {
+ 		.data = {
+-			0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
+-			0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08
++			0x72, 0xA4, 0xF2, 0x0F, 0x48, 0x00, 0x00, 0x00,
++			0x72, 0xA4, 0xF2, 0x0F, 0x48, 0x00, 0x00, 0x00
+ 		},
+ 		.len = 16
+ 	},
+ 	.auth_iv = {
+ 		.data = {
+-			 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
+-			 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
++			0x14, 0x79, 0x3E, 0x41, 0x03, 0x97, 0xE8, 0xFD,
++			0x14, 0x79, 0x3E, 0x41, 0x03, 0x97, 0xE8, 0xFD
+ 		},
+ 		.len = 16
+ 	},
+@@ -704,9 +704,9 @@ struct snow3g_test_data snow3g_auth_cipher_partial_digest_encryption = {
+ 	.ciphertext = {
+ 		.data = {
+ 			0x5A, 0x5A, 0x5A, 0x5A, 0x5A, 0x5A, 0x5A, 0x5A,
+-			0x5A, 0x5A, 0x5A, 0x5A, 0x5A, 0x5A, 0xE4, 0xAD,
+-			0x29, 0xA2, 0x6A, 0xA6, 0x20, 0x1D, 0xCD, 0x08,
+-			0x50, 0xD6, 0xE6, 0x47, 0xB3, 0xBD, 0xC3, 0x08
++			0x5A, 0x5A, 0x5A, 0x5A, 0x5A, 0x5A, 0xA2, 0xB7,
++			0xDF, 0xA7, 0x98, 0xA1, 0xD8, 0xD4, 0x9B, 0x6E,
++			0x2C, 0x7A, 0x66, 0x15, 0xCC, 0x4C, 0xE5, 0xE0
+ 		},
+ 		.len = 32 << 3
+ 	},
+@@ -720,7 +720,7 @@ struct snow3g_test_data snow3g_auth_cipher_partial_digest_encryption = {
+ 	},
+ 	.digest = {
+ 		.data = {
+-			0xB3, 0xBD, 0xC3, 0x08
++			0xCC, 0x4C, 0xE5, 0xE0
+ 		},
+ 		.len = 4,
+ 		.offset_bytes = 28
+diff --git a/dpdk/app/test/test_cryptodev_zuc_test_vectors.h b/dpdk/app/test/test_cryptodev_zuc_test_vectors.h
+index 5d1d264579..299d7649fe 100644
+--- a/dpdk/app/test/test_cryptodev_zuc_test_vectors.h
++++ b/dpdk/app/test/test_cryptodev_zuc_test_vectors.h
+@@ -558,13 +558,13 @@ static struct wireless_test_data zuc_test_case_cipher_200b_auth_200b = {
+ 	},
+ 	.auth_iv = {
+ 		.data = {
+-			0xFA, 0x55, 0x6B, 0x26, 0x1C, 0x00, 0x00, 0x00,
+-			0xFA, 0x55, 0x6B, 0x26, 0x1C, 0x00, 0x00, 0x00
++			0xFA, 0x55, 0x6B, 0x26, 0x18, 0x00, 0x00, 0x00,
++			0xFA, 0x55, 0x6B, 0x26, 0x18, 0x00, 0x00, 0x00
+ 		},
+ 		.len = 16
+ 	},
+ 	.digest = {
+-		.data = {0x01, 0xFE, 0x5E, 0x38},
++		.data = {0x2F, 0x45, 0x7D, 0x7B},
+ 		.len  = 4
+ 	},
+ 	.validAuthLenInBits = {
+@@ -631,13 +631,13 @@ static struct wireless_test_data zuc_test_case_cipher_800b_auth_120b = {
+ 	},
+ 	.auth_iv = {
+ 		.data = {
+-			0xFA, 0x55, 0x6B, 0x26, 0x1C, 0x00, 0x00, 0x00,
+-			0xFA, 0x55, 0x6B, 0x26, 0x1C, 0x00, 0x00, 0x00
++			0xFA, 0x55, 0x6B, 0x26, 0x18, 0x00, 0x00, 0x00,
++			0xFA, 0x55, 0x6B, 0x26, 0x18, 0x00, 0x00, 0x00
+ 		},
+ 		.len = 16
+ 	},
+ 	.digest = {
+-		.data = {0x9D, 0x42, 0x1C, 0xEA},
++		.data = {0xCA, 0xBB, 0x8D, 0x94},
+ 		.len  = 4
+ 	},
+ 	.validAuthLenInBits = {
+@@ -1166,15 +1166,15 @@ struct wireless_test_data zuc_auth_cipher_test_case_1 = {
+ 	},
+ 	.cipher_iv = {
+ 		.data = {
+-			0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
+-			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
++			0x66, 0x03, 0x54, 0x92, 0x78, 0x00, 0x00, 0x00,
++			0x66, 0x03, 0x54, 0x92, 0x78, 0x00, 0x00, 0x00
+ 		},
+ 		.len = 16
+ 	},
+ 	.auth_iv = {
+ 		.data = {
+-			0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
+-			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
++			0xFA, 0x55, 0x6B, 0x26, 0x18, 0x00, 0x00, 0x00,
++			0xFA, 0x55, 0x6B, 0x26, 0x18, 0x00, 0x00, 0x00
+ 		},
+ 		.len = 16
+ 	},
+@@ -1201,22 +1201,22 @@ struct wireless_test_data zuc_auth_cipher_test_case_1 = {
+ 	},
+ 	.ciphertext = {
+ 		.data = {
+-			0x5A, 0x5A, 0xDB, 0x3D, 0xD5, 0xB7, 0xB9, 0x58,
+-			0xA5, 0xD3, 0xE3, 0xF9, 0x18, 0x73, 0xB4, 0x74,
+-			0x05, 0xF0, 0xE9, 0xB6, 0x5D, 0x9A, 0xE3, 0xFA,
+-			0x5D, 0xFD, 0x24, 0x51, 0xAD, 0x73, 0xCA, 0x64,
+-			0x91, 0xD5, 0xB3, 0x94, 0x10, 0x91, 0x89, 0xEA,
+-			0x73, 0x6F, 0xB0, 0x2A, 0x0A, 0x63, 0x0F, 0x8D,
+-			0x64, 0x87, 0xA3, 0x14, 0x6B, 0x93, 0x31, 0x0F,
+-			0x14, 0xAD, 0xEA, 0x62, 0x80, 0x3F, 0x44, 0xDD,
+-			0x4E, 0x30, 0xFA, 0xC8, 0x0E, 0x5F, 0x46, 0xE7,
+-			0x60, 0xEC, 0xDF, 0x8B, 0x94, 0x7D, 0x2E, 0x63,
+-			0x48, 0xD9, 0x69, 0x06, 0x13, 0xF2, 0x20, 0x49,
+-			0x54, 0xA6, 0xD4, 0x98, 0xF4, 0xF6, 0x1D, 0x4A,
+-			0xC9, 0xA5, 0xDA, 0x46, 0x3D, 0xD9, 0x02, 0x47,
+-			0x1C, 0x20, 0x73, 0x35, 0x17, 0x1D, 0x81, 0x8D,
+-			0x2E, 0xCD, 0x70, 0x37, 0x22, 0x55, 0x3C, 0xF3,
+-			0xDA, 0x70, 0x42, 0x12, 0x0E, 0xAA, 0xC4, 0xAB
++			0x5A, 0x5A, 0x94, 0xE7, 0xB8, 0xD7, 0x4E, 0xBB,
++			0x4C, 0xC3, 0xD1, 0x16, 0xFC, 0x8C, 0xE4, 0x27,
++			0x44, 0xEC, 0x04, 0x26, 0x60, 0x9C, 0xFF, 0x81,
++			0xB6, 0x2B, 0x48, 0x1D, 0xEE, 0x26, 0xF7, 0x58,
++			0x40, 0x38, 0x58, 0xEA, 0x22, 0x23, 0xE6, 0x34,
++			0x9A, 0x69, 0x32, 0x68, 0xBD, 0xDD, 0x7D, 0xA3,
++			0xC0, 0x04, 0x79, 0xF0, 0xF1, 0x58, 0x78, 0x5E,
++			0xD0, 0xDF, 0x27, 0x9A, 0x53, 0x70, 0x5D, 0xFB,
++			0x1B, 0xCA, 0xBA, 0x97, 0x12, 0x1F, 0x59, 0x6B,
++			0x75, 0x7B, 0x94, 0xF6, 0xE7, 0xFA, 0x49, 0x6B,
++			0x7D, 0x7F, 0x8F, 0x0F, 0x78, 0x56, 0x40, 0x52,
++			0x84, 0x3E, 0xA9, 0xE8, 0x84, 0x6F, 0xEF, 0xFB,
++			0x4A, 0x48, 0x3A, 0x4C, 0x81, 0x98, 0xDD, 0x17,
++			0x89, 0x66, 0x3B, 0xC0, 0xEC, 0x71, 0xDB, 0xF6,
++			0x44, 0xDF, 0xA7, 0x97, 0xB2, 0x9B, 0x84, 0xA7,
++			0x2D, 0x2D, 0xC1, 0x93, 0x12, 0x37, 0xEA, 0xD2
+ 		},
+ 		.len = 128 << 3
+ 	},
+@@ -1233,7 +1233,7 @@ struct wireless_test_data zuc_auth_cipher_test_case_1 = {
+ 		.len = 2 << 3
+ 	},
+ 	.digest = {
+-		.data = {0x0E, 0xAA, 0xC4, 0xAB},
++		.data = {0x12, 0x37, 0xEA, 0xD2},
+ 		.len  = 4,
+ 		.offset_bytes = 124
+ 	}
+diff --git a/dpdk/app/test/test_dmadev.c b/dpdk/app/test/test_dmadev.c
+index b206db27ae..e40c29c23b 100644
+--- a/dpdk/app/test/test_dmadev.c
++++ b/dpdk/app/test/test_dmadev.c
+@@ -774,6 +774,9 @@ test_dmadev_instance(int16_t dev_id)
+ 	if (rte_dma_stats_get(dev_id, vchan, &stats) != 0)
+ 		ERR_RETURN("Error with rte_dma_stats_get()\n");
+ 
++	if (rte_dma_burst_capacity(dev_id, vchan) < 32)
++		ERR_RETURN("Error: Device does not have sufficient burst capacity to run tests");
++
+ 	if (stats.completed != 0 || stats.submitted != 0 || stats.errors != 0)
+ 		ERR_RETURN("Error device stats are not all zero: completed = %"PRIu64", "
+ 				"submitted = %"PRIu64", errors = %"PRIu64"\n",
+@@ -795,7 +798,10 @@ test_dmadev_instance(int16_t dev_id)
+ 		goto err;
+ 
+ 	/* run some burst capacity tests */
+-	if (runtest("burst capacity", test_burst_capacity, 1, dev_id, vchan, CHECK_ERRS) < 0)
++	if (rte_dma_burst_capacity(dev_id, vchan) < 64)
++		printf("DMA Dev %u: insufficient burst capacity (64 required), skipping tests\n",
++				dev_id);
++	else if (runtest("burst capacity", test_burst_capacity, 1, dev_id, vchan, CHECK_ERRS) < 0)
+ 		goto err;
+ 
+ 	/* to test error handling we can provide null pointers for source or dest in copies. This
+diff --git a/dpdk/app/test/test_efd.c b/dpdk/app/test/test_efd.c
+index 1b249e0447..c10c48cf37 100644
+--- a/dpdk/app/test/test_efd.c
++++ b/dpdk/app/test/test_efd.c
+@@ -98,7 +98,7 @@ static inline uint64_t efd_get_all_sockets_bitmask(void)
+ 	unsigned int next_lcore = rte_get_main_lcore();
+ 	const int val_true = 1, val_false = 0;
+ 	for (i = 0; i < rte_lcore_count(); i++) {
+-		all_cpu_sockets_bitmask |= 1 << rte_lcore_to_socket_id(next_lcore);
++		all_cpu_sockets_bitmask |= 1ULL << rte_lcore_to_socket_id(next_lcore);
+ 		next_lcore = rte_get_next_lcore(next_lcore, val_false, val_true);
+ 	}
+ 
+diff --git a/dpdk/app/test/test_fib_perf.c b/dpdk/app/test/test_fib_perf.c
+index 86b2f832b8..7a25fe8df7 100644
+--- a/dpdk/app/test/test_fib_perf.c
++++ b/dpdk/app/test/test_fib_perf.c
+@@ -346,7 +346,7 @@ test_fib_perf(void)
+ 	fib = rte_fib_create(__func__, SOCKET_ID_ANY, &config);
+ 	TEST_FIB_ASSERT(fib != NULL);
+ 
+-	/* Measue add. */
++	/* Measure add. */
+ 	begin = rte_rdtsc();
+ 
+ 	for (i = 0; i < NUM_ROUTE_ENTRIES; i++) {
+diff --git a/dpdk/app/test/test_hash_readwrite_lf_perf.c b/dpdk/app/test/test_hash_readwrite_lf_perf.c
+index 8120cf43be..32f9ec9250 100644
+--- a/dpdk/app/test/test_hash_readwrite_lf_perf.c
++++ b/dpdk/app/test/test_hash_readwrite_lf_perf.c
+@@ -59,7 +59,7 @@ struct rwc_perf {
+ 	uint32_t w_ks_r_hit_nsp[2][NUM_TEST];
+ 	uint32_t w_ks_r_hit_sp[2][NUM_TEST];
+ 	uint32_t w_ks_r_miss[2][NUM_TEST];
+-	uint32_t multi_rw[NUM_TEST - 1][2][NUM_TEST];
++	uint32_t multi_rw[NUM_TEST][2][NUM_TEST];
+ 	uint32_t w_ks_r_hit_extbkt[2][NUM_TEST];
+ 	uint32_t writer_add_del[NUM_TEST];
+ };
+diff --git a/dpdk/app/test/test_ipsec.c b/dpdk/app/test/test_ipsec.c
+index bc2a3dbc2e..3c6dcdc604 100644
+--- a/dpdk/app/test/test_ipsec.c
++++ b/dpdk/app/test/test_ipsec.c
+@@ -543,12 +543,14 @@ struct rte_ipv4_hdr ipv4_outer  = {
+ };
+ 
+ static struct rte_mbuf *
+-setup_test_string(struct rte_mempool *mpool,
+-		const char *string, size_t len, uint8_t blocksize)
++setup_test_string(struct rte_mempool *mpool, const char *string,
++	size_t string_len, size_t len, uint8_t blocksize)
+ {
+ 	struct rte_mbuf *m = rte_pktmbuf_alloc(mpool);
+ 	size_t t_len = len - (blocksize ? (len % blocksize) : 0);
+ 
++	RTE_VERIFY(len <= string_len);
++
+ 	if (m) {
+ 		memset(m->buf_addr, 0, m->buf_len);
+ 		char *dst = rte_pktmbuf_append(m, t_len);
+@@ -1354,7 +1356,8 @@ test_ipsec_crypto_outb_burst_null_null(int i)
+ 	/* Generate input mbuf data */
+ 	for (j = 0; j < num_pkts && rc == 0; j++) {
+ 		ut_params->ibuf[j] = setup_test_string(ts_params->mbuf_pool,
+-			null_plain_data, test_cfg[i].pkt_sz, 0);
++			null_plain_data, sizeof(null_plain_data),
++			test_cfg[i].pkt_sz, 0);
+ 		if (ut_params->ibuf[j] == NULL)
+ 			rc = TEST_FAILED;
+ 		else {
+@@ -1472,7 +1475,8 @@ test_ipsec_inline_crypto_inb_burst_null_null(int i)
+ 			/* Generate test mbuf data */
+ 			ut_params->obuf[j] = setup_test_string(
+ 				ts_params->mbuf_pool,
+-				null_plain_data, test_cfg[i].pkt_sz, 0);
++				null_plain_data, sizeof(null_plain_data),
++				test_cfg[i].pkt_sz, 0);
+ 			if (ut_params->obuf[j] == NULL)
+ 				rc = TEST_FAILED;
+ 		}
+@@ -1540,16 +1544,17 @@ test_ipsec_inline_proto_inb_burst_null_null(int i)
+ 
+ 	/* Generate inbound mbuf data */
+ 	for (j = 0; j < num_pkts && rc == 0; j++) {
+-		ut_params->ibuf[j] = setup_test_string(
+-			ts_params->mbuf_pool,
+-			null_plain_data, test_cfg[i].pkt_sz, 0);
++		ut_params->ibuf[j] = setup_test_string(ts_params->mbuf_pool,
++			null_plain_data, sizeof(null_plain_data),
++			test_cfg[i].pkt_sz, 0);
+ 		if (ut_params->ibuf[j] == NULL)
+ 			rc = TEST_FAILED;
+ 		else {
+ 			/* Generate test mbuf data */
+ 			ut_params->obuf[j] = setup_test_string(
+ 				ts_params->mbuf_pool,
+-				null_plain_data, test_cfg[i].pkt_sz, 0);
++				null_plain_data, sizeof(null_plain_data),
++				test_cfg[i].pkt_sz, 0);
+ 			if (ut_params->obuf[j] == NULL)
+ 				rc = TEST_FAILED;
+ 		}
+@@ -1649,7 +1654,8 @@ test_ipsec_inline_crypto_outb_burst_null_null(int i)
+ 	/* Generate test mbuf data */
+ 	for (j = 0; j < num_pkts && rc == 0; j++) {
+ 		ut_params->ibuf[j] = setup_test_string(ts_params->mbuf_pool,
+-			null_plain_data, test_cfg[i].pkt_sz, 0);
++			null_plain_data, sizeof(null_plain_data),
++			test_cfg[i].pkt_sz, 0);
+ 		if (ut_params->ibuf[0] == NULL)
+ 			rc = TEST_FAILED;
+ 
+@@ -1727,15 +1733,17 @@ test_ipsec_inline_proto_outb_burst_null_null(int i)
+ 	/* Generate test mbuf data */
+ 	for (j = 0; j < num_pkts && rc == 0; j++) {
+ 		ut_params->ibuf[j] = setup_test_string(ts_params->mbuf_pool,
+-			null_plain_data, test_cfg[i].pkt_sz, 0);
++			null_plain_data, sizeof(null_plain_data),
++			test_cfg[i].pkt_sz, 0);
+ 		if (ut_params->ibuf[0] == NULL)
+ 			rc = TEST_FAILED;
+ 
+ 		if (rc == 0) {
+ 			/* Generate test tunneled mbuf data for comparison */
+ 			ut_params->obuf[j] = setup_test_string(
+-					ts_params->mbuf_pool,
+-					null_plain_data, test_cfg[i].pkt_sz, 0);
++				ts_params->mbuf_pool, null_plain_data,
++				sizeof(null_plain_data), test_cfg[i].pkt_sz,
++				0);
+ 			if (ut_params->obuf[j] == NULL)
+ 				rc = TEST_FAILED;
+ 		}
+@@ -1804,7 +1812,8 @@ test_ipsec_lksd_proto_inb_burst_null_null(int i)
+ 	for (j = 0; j < num_pkts && rc == 0; j++) {
+ 		/* packet with sequence number 0 is invalid */
+ 		ut_params->ibuf[j] = setup_test_string(ts_params->mbuf_pool,
+-			null_encrypted_data, test_cfg[i].pkt_sz, 0);
++			null_encrypted_data, sizeof(null_encrypted_data),
++			test_cfg[i].pkt_sz, 0);
+ 		if (ut_params->ibuf[j] == NULL)
+ 			rc = TEST_FAILED;
+ 	}
+diff --git a/dpdk/app/test/test_ipsec_perf.c b/dpdk/app/test/test_ipsec_perf.c
+index 92106bf374..7e07805ea3 100644
+--- a/dpdk/app/test/test_ipsec_perf.c
++++ b/dpdk/app/test/test_ipsec_perf.c
+@@ -580,8 +580,8 @@ testsuite_teardown(void)
+ static int
+ test_libipsec_perf(void)
+ {
+-	struct ipsec_sa sa_out;
+-	struct ipsec_sa sa_in;
++	struct ipsec_sa sa_out = { .sa_prm = { 0 } };
++	struct ipsec_sa sa_in = { .sa_prm = { 0 } };
+ 	uint32_t i;
+ 	int ret;
+ 
+diff --git a/dpdk/app/test/test_kni.c b/dpdk/app/test/test_kni.c
+index 40ab0d5c4c..2761de9b07 100644
+--- a/dpdk/app/test/test_kni.c
++++ b/dpdk/app/test/test_kni.c
+@@ -326,7 +326,7 @@ test_kni_register_handler_mp(void)
+ 
+ 		/* Check with the invalid parameters */
+ 		if (rte_kni_register_handlers(kni, NULL) == 0) {
+-			printf("Unexpectedly register successuflly "
++			printf("Unexpectedly register successfully "
+ 					"with NULL ops pointer\n");
+ 			exit(-1);
+ 		}
+@@ -475,7 +475,7 @@ test_kni_processing(uint16_t port_id, struct rte_mempool *mp)
+ 
+ 	/**
+ 	 * Check multiple processes support on
+-	 * registerring/unregisterring handlers.
++	 * registering/unregistering handlers.
+ 	 */
+ 	if (test_kni_register_handler_mp() < 0) {
+ 		printf("fail to check multiple process support\n");
+diff --git a/dpdk/app/test/test_kvargs.c b/dpdk/app/test/test_kvargs.c
+index a91ea8dc47..b7b97a0dd9 100644
+--- a/dpdk/app/test/test_kvargs.c
++++ b/dpdk/app/test/test_kvargs.c
+@@ -11,7 +11,7 @@
+ 
+ #include "test.h"
+ 
+-/* incrementd in handler, to check it is properly called once per
++/* incremented in handler, to check it is properly called once per
+  * key/value association */
+ static unsigned count;
+ 
+@@ -107,14 +107,14 @@ static int test_valid_kvargs(void)
+ 		goto fail;
+ 	}
+ 	count = 0;
+-	/* call check_handler() for all entries with key="unexistant_key" */
+-	if (rte_kvargs_process(kvlist, "unexistant_key", check_handler, NULL) < 0) {
++	/* call check_handler() for all entries with key="nonexistent_key" */
++	if (rte_kvargs_process(kvlist, "nonexistent_key", check_handler, NULL) < 0) {
+ 		printf("rte_kvargs_process() error\n");
+ 		rte_kvargs_free(kvlist);
+ 		goto fail;
+ 	}
+ 	if (count != 0) {
+-		printf("invalid count value %d after rte_kvargs_process(unexistant_key)\n",
++		printf("invalid count value %d after rte_kvargs_process(nonexistent_key)\n",
+ 			count);
+ 		rte_kvargs_free(kvlist);
+ 		goto fail;
+@@ -135,10 +135,10 @@ static int test_valid_kvargs(void)
+ 		rte_kvargs_free(kvlist);
+ 		goto fail;
+ 	}
+-	/* count all entries with key="unexistant_key" */
+-	count = rte_kvargs_count(kvlist, "unexistant_key");
++	/* count all entries with key="nonexistent_key" */
++	count = rte_kvargs_count(kvlist, "nonexistent_key");
+ 	if (count != 0) {
+-		printf("invalid count value %d after rte_kvargs_count(unexistant_key)\n",
++		printf("invalid count value %d after rte_kvargs_count(nonexistent_key)\n",
+ 			count);
+ 		rte_kvargs_free(kvlist);
+ 		goto fail;
+@@ -156,7 +156,7 @@ static int test_valid_kvargs(void)
+ 	/* call check_handler() on all entries with key="check", it
+ 	 * should fail as the value is not recognized by the handler */
+ 	if (rte_kvargs_process(kvlist, "check", check_handler, NULL) == 0) {
+-		printf("rte_kvargs_process() is success bu should not\n");
++		printf("rte_kvargs_process() is success but should not\n");
+ 		rte_kvargs_free(kvlist);
+ 		goto fail;
+ 	}
+diff --git a/dpdk/app/test/test_link_bonding.c b/dpdk/app/test/test_link_bonding.c
+index dc6fc46b9c..12c50ef393 100644
+--- a/dpdk/app/test/test_link_bonding.c
++++ b/dpdk/app/test/test_link_bonding.c
+@@ -181,6 +181,10 @@ configure_ethdev(uint16_t port_id, uint8_t start, uint8_t en_isr)
+ 			test_params->nb_tx_q, &default_pmd_conf),
+ 			"rte_eth_dev_configure for port %d failed", port_id);
+ 
++	int ret = rte_eth_dev_set_mtu(port_id, 1550);
++	RTE_TEST_ASSERT(ret == 0 || ret == -ENOTSUP,
++			"rte_eth_dev_set_mtu for port %d failed", port_id);
++
+ 	for (q_id = 0; q_id < test_params->nb_rx_q; q_id++)
+ 		TEST_ASSERT_SUCCESS(rte_eth_rx_queue_setup(port_id, q_id, RX_RING_SIZE,
+ 				rte_eth_dev_socket_id(port_id), &rx_conf_default,
+diff --git a/dpdk/app/test/test_link_bonding_rssconf.c b/dpdk/app/test/test_link_bonding_rssconf.c
+index f9eae93973..b3d71c6f3a 100644
+--- a/dpdk/app/test/test_link_bonding_rssconf.c
++++ b/dpdk/app/test/test_link_bonding_rssconf.c
+@@ -128,6 +128,10 @@ configure_ethdev(uint16_t port_id, struct rte_eth_conf *eth_conf,
+ 			RXTX_QUEUE_COUNT, eth_conf) == 0, "Failed to configure device %u",
+ 			port_id);
+ 
++	int ret = rte_eth_dev_set_mtu(port_id, 1550);
++	RTE_TEST_ASSERT(ret == 0 || ret == -ENOTSUP,
++			"rte_eth_dev_set_mtu for port %d failed", port_id);
++
+ 	for (rxq = 0; rxq < RXTX_QUEUE_COUNT; rxq++) {
+ 		TEST_ASSERT(rte_eth_rx_queue_setup(port_id, rxq, RXTX_RING_SIZE,
+ 				rte_eth_dev_socket_id(port_id), NULL,
+@@ -464,15 +468,85 @@ test_rss(void)
+ 
+ 	TEST_ASSERT_SUCCESS(test_propagate(), "Propagation test failed");
+ 
+-	TEST_ASSERT(slave_remove_and_add() == 1, "New slave should be synced");
++	TEST_ASSERT(slave_remove_and_add() == 1, "remove and add slaves success.");
+ 
+ 	remove_slaves_and_stop_bonded_device();
+ 
+ 	return TEST_SUCCESS;
+ }
  
-+/* Multi-variable container iterators.
-+ *
-+ * The following macros facilitate safe iteration over data structures
-+ * contained in objects. It does so by using an internal iterator variable of
-+ * the type of the member object pointer (i.e: pointer to the data structure).
-+ */
 +
-+/* Multi-variable iterator variable name.
-+ * Returns the name of the internal iterator variable.
+ /**
+- * Test propagation logic, when RX_RSS mq_mode is turned off for bonding port
++ * Test RSS configuration over bonded and slaves.
 + */
-+#define ITER_VAR(NAME) NAME ## __iterator__
++static int
++test_rss_config_lazy(void)
++{
++	struct rte_eth_rss_conf bond_rss_conf = {0};
++	struct slave_conf *port;
++	uint8_t rss_key[40];
++	uint64_t rss_hf;
++	int retval;
++	uint16_t i;
++	uint8_t n;
++
++	retval = rte_eth_dev_info_get(test_params.bond_port_id,
++				      &test_params.bond_dev_info);
++	TEST_ASSERT((retval == 0), "Error during getting device (port %u) info: %s\n",
++		    test_params.bond_port_id, strerror(-retval));
++
++	rss_hf = test_params.bond_dev_info.flow_type_rss_offloads;
++	if (rss_hf != 0) {
++		bond_rss_conf.rss_key = NULL;
++		bond_rss_conf.rss_hf = rss_hf;
++		retval = rte_eth_dev_rss_hash_update(test_params.bond_port_id,
++						     &bond_rss_conf);
++		TEST_ASSERT(retval != 0, "Succeeded in setting bonded port hash function");
++	}
++
++	/* Set all keys to zero for all slaves */
++	FOR_EACH_PORT(n, port) {
++		port = &test_params.slave_ports[n];
++		retval = rte_eth_dev_rss_hash_conf_get(port->port_id,
++						       &port->rss_conf);
++		TEST_ASSERT_SUCCESS(retval, "Cannot get slaves RSS configuration");
++		memset(port->rss_key, 0, sizeof(port->rss_key));
++		port->rss_conf.rss_key = port->rss_key;
++		port->rss_conf.rss_key_len = sizeof(port->rss_key);
++		retval = rte_eth_dev_rss_hash_update(port->port_id,
++						     &port->rss_conf);
++		TEST_ASSERT(retval != 0, "Succeeded in setting slaves RSS keys");
++	}
++
++	/* Set RSS keys for bonded port */
++	memset(rss_key, 1, sizeof(rss_key));
++	bond_rss_conf.rss_hf = rss_hf;
++	bond_rss_conf.rss_key = rss_key;
++	bond_rss_conf.rss_key_len = sizeof(rss_key);
++
++	retval = rte_eth_dev_rss_hash_update(test_params.bond_port_id,
++					     &bond_rss_conf);
++	TEST_ASSERT(retval != 0, "Succeeded in setting bonded port RSS keys");
++
++	/*  Test RETA propagation */
++	for (i = 0; i < RXTX_QUEUE_COUNT; i++) {
++		FOR_EACH_PORT(n, port) {
++			port = &test_params.slave_ports[n];
++			retval = reta_set(port->port_id, (i + 1) % RXTX_QUEUE_COUNT,
++					  port->dev_info.reta_size);
++			TEST_ASSERT(retval != 0, "Succeeded in setting slaves RETA");
++		}
++
++		retval = reta_set(test_params.bond_port_id, i % RXTX_QUEUE_COUNT,
++				  test_params.bond_dev_info.reta_size);
++		TEST_ASSERT(retval != 0, "Succeeded in setting bonded port RETA");
++	}
++
++	return TEST_SUCCESS;
++}
 +
-+/* Multi-variable initialization. Creates an internal iterator variable that
-+ * points to the provided pointer. The type of the iterator variable is
-+ * ITER_TYPE*. It must be the same type as &VAR->MEMBER.
-+ *
-+ * The _EXP version evaluates the extra expressions once.
++/**
++ * Test RSS function logic, when RX_RSS mq_mode is turned off for bonding port
+  */
+ static int
+ test_rss_lazy(void)
+@@ -493,9 +567,7 @@ test_rss_lazy(void)
+ 	TEST_ASSERT_SUCCESS(rte_eth_dev_start(test_params.bond_port_id),
+ 			"Failed to start bonding port (%d).", test_params.bond_port_id);
+ 
+-	TEST_ASSERT_SUCCESS(test_propagate(), "Propagation test failed");
+-
+-	TEST_ASSERT(slave_remove_and_add() == 0, "New slave shouldn't be synced");
++	TEST_ASSERT_SUCCESS(test_rss_config_lazy(), "Succeeded in setting RSS hash when RX_RSS mq_mode is turned off");
+ 
+ 	remove_slaves_and_stop_bonded_device();
+ 
+diff --git a/dpdk/app/test/test_lpm6_data.h b/dpdk/app/test/test_lpm6_data.h
+index c3894f730e..da9b161f20 100644
+--- a/dpdk/app/test/test_lpm6_data.h
++++ b/dpdk/app/test/test_lpm6_data.h
+@@ -22,7 +22,7 @@ struct ips_tbl_entry {
+  * in previous test_lpm6_routes.h . Because this table has only 1000
+  * lines, keeping it doesn't make LPM6 test case so large and also
+  * make the algorithm to generate rule table unnecessary and the
+- * algorithm to genertate test input IPv6 and associated expected
++ * algorithm to generate test input IPv6 and associated expected
+  * next_hop much simple.
+  */
+ 
+diff --git a/dpdk/app/test/test_mbuf.c b/dpdk/app/test/test_mbuf.c
+index f54d1d7c00..2a037a12da 100644
+--- a/dpdk/app/test/test_mbuf.c
++++ b/dpdk/app/test/test_mbuf.c
+@@ -2031,8 +2031,6 @@ test_pktmbuf_read_from_offset(struct rte_mempool *pktmbuf_pool)
+ 			NULL);
+ 	if (data_copy == NULL)
+ 		GOTO_FAIL("%s: Error in reading packet data!\n", __func__);
+-	if (strlen(data_copy) != MBUF_TEST_DATA_LEN2 - 5)
+-		GOTO_FAIL("%s: Incorrect data length!\n", __func__);
+ 	for (off = 0; off < MBUF_TEST_DATA_LEN2 - 5; off++) {
+ 		if (data_copy[off] != (char)0xcc)
+ 			GOTO_FAIL("Data corrupted at offset %u", off);
+@@ -2054,8 +2052,6 @@ test_pktmbuf_read_from_offset(struct rte_mempool *pktmbuf_pool)
+ 	data_copy = rte_pktmbuf_read(m, hdr_len, 0, NULL);
+ 	if (data_copy == NULL)
+ 		GOTO_FAIL("%s: Error in reading packet data!\n", __func__);
+-	if (strlen(data_copy) != MBUF_TEST_DATA_LEN2)
+-		GOTO_FAIL("%s: Corrupted data content!\n", __func__);
+ 	for (off = 0; off < MBUF_TEST_DATA_LEN2; off++) {
+ 		if (data_copy[off] != (char)0xcc)
+ 			GOTO_FAIL("Data corrupted at offset %u", off);
+diff --git a/dpdk/app/test/test_member.c b/dpdk/app/test/test_member.c
+index 40aa4c8627..af9d50915c 100644
+--- a/dpdk/app/test/test_member.c
++++ b/dpdk/app/test/test_member.c
+@@ -459,7 +459,7 @@ static int test_member_multimatch(void)
+ 						MAX_MATCH, set_ids_cache);
+ 		/*
+ 		 * For cache mode, keys overwrite when signature same.
+-		 * the mutimatch should work like single match.
++		 * the multimatch should work like single match.
+ 		 */
+ 		TEST_ASSERT(ret_ht == M_MATCH_CNT && ret_vbf == M_MATCH_CNT &&
+ 				ret_cache == 1,
+diff --git a/dpdk/app/test/test_memory.c b/dpdk/app/test/test_memory.c
+index dbf6871e71..440e5ef838 100644
+--- a/dpdk/app/test/test_memory.c
++++ b/dpdk/app/test/test_memory.c
+@@ -25,6 +25,11 @@
+  * - Try to read all memory; it should not segfault.
+  */
+ 
++/*
++ * ASan complains about accessing unallocated memory.
++ * See: https://bugs.dpdk.org/show_bug.cgi?id=880
 + */
-+#define INIT_MULTIVAR(VAR, MEMBER, POINTER, ITER_TYPE)                  \
-+    INIT_MULTIVAR_EXP(VAR, MEMBER, POINTER, ITER_TYPE, (void) 0)
++__rte_no_asan
+ static int
+ check_mem(const struct rte_memseg_list *msl __rte_unused,
+ 		const struct rte_memseg *ms, void *arg __rte_unused)
+@@ -63,7 +68,7 @@ check_seg_fds(const struct rte_memseg_list *msl, const struct rte_memseg *ms,
+ 	/* we're able to get memseg fd - try getting its offset */
+ 	ret = rte_memseg_get_fd_offset_thread_unsafe(ms, &offset);
+ 	if (ret < 0) {
+-		if (errno == ENOTSUP)
++		if (rte_errno == ENOTSUP)
+ 			return 1;
+ 		return -1;
+ 	}
+diff --git a/dpdk/app/test/test_mempool.c b/dpdk/app/test/test_mempool.c
+index f6c650d11f..8e493eda47 100644
+--- a/dpdk/app/test/test_mempool.c
++++ b/dpdk/app/test/test_mempool.c
+@@ -304,7 +304,7 @@ static int test_mempool_single_consumer(void)
+ }
+ 
+ /*
+- * test function for mempool test based on singple consumer and single producer,
++ * test function for mempool test based on single consumer and single producer,
+  * can run on one lcore only
+  */
+ static int
+@@ -322,7 +322,7 @@ my_mp_init(struct rte_mempool *mp, __rte_unused void *arg)
+ }
+ 
+ /*
+- * it tests the mempool operations based on singple producer and single consumer
++ * it tests the mempool operations based on single producer and single consumer
+  */
+ static int
+ test_mempool_sp_sc(void)
+diff --git a/dpdk/app/test/test_memzone.c b/dpdk/app/test/test_memzone.c
+index 6ddd0fbab5..c9255e5763 100644
+--- a/dpdk/app/test/test_memzone.c
++++ b/dpdk/app/test/test_memzone.c
+@@ -543,7 +543,7 @@ test_memzone_reserve_max(void)
+ 		}
+ 
+ 		if (mz->len != maxlen) {
+-			printf("Memzone reserve with 0 size did not return bigest block\n");
++			printf("Memzone reserve with 0 size did not return biggest block\n");
+ 			printf("Expected size = %zu, actual size = %zu\n",
+ 					maxlen, mz->len);
+ 			rte_dump_physmem_layout(stdout);
+@@ -606,7 +606,7 @@ test_memzone_reserve_max_aligned(void)
+ 
+ 		if (mz->len < minlen || mz->len > maxlen) {
+ 			printf("Memzone reserve with 0 size and alignment %u did not return"
+-					" bigest block\n", align);
++					" biggest block\n", align);
+ 			printf("Expected size = %zu-%zu, actual size = %zu\n",
+ 					minlen, maxlen, mz->len);
+ 			rte_dump_physmem_layout(stdout);
+@@ -1054,7 +1054,7 @@ test_memzone_basic(void)
+ 	if (mz != memzone1)
+ 		return -1;
+ 
+-	printf("test duplcate zone name\n");
++	printf("test duplicate zone name\n");
+ 	mz = rte_memzone_reserve(TEST_MEMZONE_NAME("testzone1"), 100,
+ 			SOCKET_ID_ANY, 0);
+ 	if (mz != NULL)
+diff --git a/dpdk/app/test/test_metrics.c b/dpdk/app/test/test_metrics.c
+index e736019ae4..11222133d0 100644
+--- a/dpdk/app/test/test_metrics.c
++++ b/dpdk/app/test/test_metrics.c
+@@ -121,7 +121,7 @@ test_metrics_update_value(void)
+ 	err = rte_metrics_update_value(RTE_METRICS_GLOBAL, KEY, VALUE);
+ 	TEST_ASSERT(err >= 0, "%s, %d", __func__, __LINE__);
+ 
+-	/* Successful Test: Valid port_id otherthan RTE_METRICS_GLOBAL, key
++	/* Successful Test: Valid port_id other than RTE_METRICS_GLOBAL, key
+ 	 * and value
+ 	 */
+ 	err = rte_metrics_update_value(9, KEY, VALUE);
+diff --git a/dpdk/app/test/test_pcapng.c b/dpdk/app/test/test_pcapng.c
+index c2dbeaf603..34c5e12346 100644
+--- a/dpdk/app/test/test_pcapng.c
++++ b/dpdk/app/test/test_pcapng.c
+@@ -109,7 +109,7 @@ test_setup(void)
+ 		return -1;
+ 	}
+ 
+-	/* Make a pool for cloned packeets */
++	/* Make a pool for cloned packets */
+ 	mp = rte_pktmbuf_pool_create_by_ops("pcapng_test_pool", NUM_PACKETS,
+ 					    0, 0,
+ 					    rte_pcapng_mbuf_size(pkt_len),
+diff --git a/dpdk/app/test/test_pmd_perf.c b/dpdk/app/test/test_pmd_perf.c
+index 0aa9dc1b1c..4094057b27 100644
+--- a/dpdk/app/test/test_pmd_perf.c
++++ b/dpdk/app/test/test_pmd_perf.c
+@@ -454,6 +454,7 @@ main_loop(__rte_unused void *args)
+ #define PACKET_SIZE 64
+ #define FRAME_GAP 12
+ #define MAC_PREAMBLE 8
++#define MAX_RETRY_COUNT 5
+ 	struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
+ 	unsigned lcore_id;
+ 	unsigned i, portid, nb_rx = 0, nb_tx = 0;
+@@ -461,6 +462,8 @@ main_loop(__rte_unused void *args)
+ 	int pkt_per_port;
+ 	uint64_t diff_tsc;
+ 	uint64_t packets_per_second, total_packets;
++	int retry_cnt = 0;
++	int free_pkt = 0;
+ 
+ 	lcore_id = rte_lcore_id();
+ 	conf = &lcore_conf[lcore_id];
+@@ -478,10 +481,19 @@ main_loop(__rte_unused void *args)
+ 			nb_tx = RTE_MIN(MAX_PKT_BURST, num);
+ 			nb_tx = rte_eth_tx_burst(portid, 0,
+ 						&tx_burst[idx], nb_tx);
++			if (nb_tx == 0)
++				retry_cnt++;
+ 			num -= nb_tx;
+ 			idx += nb_tx;
++			if (retry_cnt == MAX_RETRY_COUNT) {
++				retry_cnt = 0;
++				break;
++			}
+ 		}
+ 	}
++	for (free_pkt = idx; free_pkt < (MAX_TRAFFIC_BURST * conf->nb_ports);
++			free_pkt++)
++		rte_pktmbuf_free(tx_burst[free_pkt]);
+ 	printf("Total packets inject to prime ports = %u\n", idx);
+ 
+ 	packets_per_second = (link_mbps * 1000 * 1000) /
+diff --git a/dpdk/app/test/test_power_cpufreq.c b/dpdk/app/test/test_power_cpufreq.c
+index 1a9549527e..4d013cd7bb 100644
+--- a/dpdk/app/test/test_power_cpufreq.c
++++ b/dpdk/app/test/test_power_cpufreq.c
+@@ -659,7 +659,7 @@ test_power_cpufreq(void)
+ 	/* test of exit power management for an invalid lcore */
+ 	ret = rte_power_exit(TEST_POWER_LCORE_INVALID);
+ 	if (ret == 0) {
+-		printf("Unpectedly exit power management successfully for "
++		printf("Unexpectedly exit power management successfully for "
+ 				"lcore %u\n", TEST_POWER_LCORE_INVALID);
+ 		rte_power_unset_env();
+ 		return -1;
+diff --git a/dpdk/app/test/test_rcu_qsbr.c b/dpdk/app/test/test_rcu_qsbr.c
+index ab37a068cd..70404e89e6 100644
+--- a/dpdk/app/test/test_rcu_qsbr.c
++++ b/dpdk/app/test/test_rcu_qsbr.c
+@@ -408,7 +408,7 @@ test_rcu_qsbr_synchronize_reader(void *arg)
+ 
+ /*
+  * rte_rcu_qsbr_synchronize: Wait till all the reader threads have entered
+- * the queiscent state.
++ * the quiescent state.
+  */
+ static int
+ test_rcu_qsbr_synchronize(void)
+@@ -443,7 +443,7 @@ test_rcu_qsbr_synchronize(void)
+ 	rte_rcu_qsbr_synchronize(t[0], RTE_MAX_LCORE - 1);
+ 	rte_rcu_qsbr_thread_offline(t[0], RTE_MAX_LCORE - 1);
+ 
+-	/* Test if the API returns after unregisterng all the threads */
++	/* Test if the API returns after unregistering all the threads */
+ 	for (i = 0; i < RTE_MAX_LCORE; i++)
+ 		rte_rcu_qsbr_thread_unregister(t[0], i);
+ 	rte_rcu_qsbr_synchronize(t[0], RTE_QSBR_THRID_INVALID);
+diff --git a/dpdk/app/test/test_red.c b/dpdk/app/test/test_red.c
+index 05936cfee8..33a9f4ebb7 100644
+--- a/dpdk/app/test/test_red.c
++++ b/dpdk/app/test/test_red.c
+@@ -1566,10 +1566,10 @@ static void ovfl_check_avg(uint32_t avg)
+ }
+ 
+ static struct test_config ovfl_test1_config = {
+-	.ifname = "queue avergage overflow test interface",
++	.ifname = "queue average overflow test interface",
+ 	.msg = "overflow test 1 : use one RED configuration,\n"
+ 	"		  increase average queue size to target level,\n"
+-	"		  check maximum number of bits requirte_red to represent avg_s\n\n",
++	"		  check maximum number of bits required to represent avg_s\n\n",
+ 	.htxt = "avg queue size  "
+ 	"wq_log2  "
+ 	"fraction bits  "
+@@ -1757,12 +1757,12 @@ test_invalid_parameters(void)
+ 		printf("%i: rte_red_config_init should have failed!\n", __LINE__);
+ 		return -1;
+ 	}
+-	/* min_treshold == max_treshold */
++	/* min_threshold == max_threshold */
+ 	if (rte_red_config_init(&config, 0, 1, 1, 0) == 0) {
+ 		printf("%i: rte_red_config_init should have failed!\n", __LINE__);
+ 		return -1;
+ 	}
+-	/* min_treshold > max_treshold */
++	/* min_threshold > max_threshold */
+ 	if (rte_red_config_init(&config, 0, 2, 1, 0) == 0) {
+ 		printf("%i: rte_red_config_init should have failed!\n", __LINE__);
+ 		return -1;
+diff --git a/dpdk/app/test/test_security.c b/dpdk/app/test/test_security.c
+index 060cf1ffa8..059731b65d 100644
+--- a/dpdk/app/test/test_security.c
++++ b/dpdk/app/test/test_security.c
+@@ -237,7 +237,7 @@
+  * increases .called counter. Function returns value stored in .ret field
+  * of the structure.
+  * In case of some parameters in some functions the expected value is unknown
+- * and cannot be detrmined prior to call. Such parameters are stored
++ * and cannot be determined prior to call. Such parameters are stored
+  * in structure and can be compared or analyzed later in test case code.
+  *
+  * Below structures and functions follow the rules just described.
+diff --git a/dpdk/app/test/test_table_pipeline.c b/dpdk/app/test/test_table_pipeline.c
+index aabf4375db..915c451fed 100644
+--- a/dpdk/app/test/test_table_pipeline.c
++++ b/dpdk/app/test/test_table_pipeline.c
+@@ -364,7 +364,7 @@ setup_pipeline(int test_type)
+ 				.action = RTE_PIPELINE_ACTION_PORT,
+ 				{.port_id = port_out_id[i^1]},
+ 			};
+-			printf("Setting secont table to output to port\n");
++			printf("Setting second table to output to port\n");
+ 
+ 			/* Add the default action for the table. */
+ 			ret = rte_pipeline_table_default_entry_add(p,
+diff --git a/dpdk/app/test/test_table_tables.c b/dpdk/app/test/test_table_tables.c
+index 4ff6ab16aa..494fb6ffaa 100644
+--- a/dpdk/app/test/test_table_tables.c
++++ b/dpdk/app/test/test_table_tables.c
+@@ -290,10 +290,10 @@ test_table_lpm(void)
+ 	struct rte_mbuf *mbufs[RTE_PORT_IN_BURST_SIZE_MAX];
+ 	void *table;
+ 	char *entries[RTE_PORT_IN_BURST_SIZE_MAX];
+-	char entry;
++	uint64_t entry;
+ 	void *entry_ptr;
+ 	int key_found;
+-	uint32_t entry_size = 1;
++	uint32_t entry_size = sizeof(entry);
+ 
+ 	/* Initialize params and create tables */
+ 	struct rte_table_lpm_params lpm_params = {
+@@ -355,7 +355,7 @@ test_table_lpm(void)
+ 	struct rte_table_lpm_key lpm_key;
+ 	lpm_key.ip = 0xadadadad;
+ 
+-	table = rte_table_lpm_ops.f_create(&lpm_params, 0, 1);
++	table = rte_table_lpm_ops.f_create(&lpm_params, 0, entry_size);
+ 	if (table == NULL)
+ 		return -9;
+ 
+@@ -456,10 +456,10 @@ test_table_lpm_ipv6(void)
+ 	struct rte_mbuf *mbufs[RTE_PORT_IN_BURST_SIZE_MAX];
+ 	void *table;
+ 	char *entries[RTE_PORT_IN_BURST_SIZE_MAX];
+-	char entry;
++	uint64_t entry;
+ 	void *entry_ptr;
+ 	int key_found;
+-	uint32_t entry_size = 1;
++	uint32_t entry_size = sizeof(entry);
+ 
+ 	/* Initialize params and create tables */
+ 	struct rte_table_lpm_ipv6_params lpm_params = {
+diff --git a/dpdk/app/test/test_thash.c b/dpdk/app/test/test_thash.c
+index a62530673f..62ba4a9528 100644
+--- a/dpdk/app/test/test_thash.c
++++ b/dpdk/app/test/test_thash.c
+@@ -684,7 +684,7 @@ test_predictable_rss_multirange(void)
+ 
+ 	/*
+ 	 * calculate hashes, complements, then adjust keys with
+-	 * complements and recalsulate hashes
++	 * complements and recalculate hashes
+ 	 */
+ 	for (i = 0; i < RTE_DIM(rng_arr); i++) {
+ 		for (k = 0; k < 100; k++) {
+diff --git a/dpdk/buildtools/binutils-avx512-check.py b/dpdk/buildtools/binutils-avx512-check.py
+index a4e14f3593..a0847a23d6 100644
+--- a/dpdk/buildtools/binutils-avx512-check.py
++++ b/dpdk/buildtools/binutils-avx512-check.py
+@@ -1,5 +1,5 @@
+ #! /usr/bin/env python3
+-# SPDX-License-Identitifer: BSD-3-Clause
++# SPDX-License-Identifier: BSD-3-Clause
+ # Copyright(c) 2020 Intel Corporation
+ 
+ import subprocess
+@@ -15,7 +15,7 @@
+     src = '__asm__("vpgatherqq {}");'.format(gather_params).encode('utf-8')
+     subprocess.run(cc + ['-c', '-xc', '-o', obj.name, '-'], input=src, check=True)
+     asm = subprocess.run([objdump, '-d', '--no-show-raw-insn', obj.name],
+-                         capture_output=True, check=True).stdout.decode('utf-8')
++                         stdout=subprocess.PIPE, check=True).stdout.decode('utf-8')
+     if gather_params not in asm:
+ 	    print('vpgatherqq displacement error with as')
+ 	    sys.exit(1)
+diff --git a/dpdk/buildtools/call-sphinx-build.py b/dpdk/buildtools/call-sphinx-build.py
+index 26b199220a..39a60d09fa 100755
+--- a/dpdk/buildtools/call-sphinx-build.py
++++ b/dpdk/buildtools/call-sphinx-build.py
+@@ -7,7 +7,7 @@
+ import os
+ from os.path import join
+ from subprocess import run, PIPE, STDOUT
+-from distutils.version import StrictVersion
++from packaging.version import Version
+ 
+ # assign parameters to variables
+ (sphinx, version, src, dst, *extra_args) = sys.argv[1:]
+@@ -19,7 +19,7 @@
+ ver = run([sphinx, '--version'], stdout=PIPE,
+           stderr=STDOUT).stdout.decode().split()[-1]
+ sphinx_cmd = [sphinx] + extra_args
+-if StrictVersion(ver) >= StrictVersion('1.7'):
++if Version(ver) >= Version('1.7'):
+     sphinx_cmd += ['-j', 'auto']
+ 
+ # find all the files sphinx will process so we can write them as dependencies
+diff --git a/dpdk/buildtools/meson.build b/dpdk/buildtools/meson.build
+index 22ea0ba375..e1c600e40f 100644
+--- a/dpdk/buildtools/meson.build
++++ b/dpdk/buildtools/meson.build
+@@ -31,6 +31,9 @@ if host_machine.system() == 'windows'
+         pmdinfo += 'llvm-ar'
+     endif
+     pmdinfogen += 'coff'
++elif host_machine.system() == 'freebsd'
++    pmdinfo += 'llvm-ar'
++    pmdinfogen += 'elf'
+ else
+     pmdinfo += 'ar'
+     pmdinfogen += 'elf'
+@@ -45,7 +48,7 @@ if host_machine.system() != 'windows'
+ endif
+ foreach module : python3_required_modules
+     script = 'import importlib.util; import sys; exit(importlib.util.find_spec("@0@") is None)'
+-    if run_command(py3, '-c', script.format(module)).returncode() != 0
++    if run_command(py3, '-c', script.format(module), check: false).returncode() != 0
+         error('missing python module: @0@'.format(module))
+     endif
+ endforeach
+diff --git a/dpdk/config/arm/arm32_armv8_linux_gcc b/dpdk/config/arm/arm32_armv8_linux_gcc
+index 89f8a12881..0d4618ea4a 100644
+--- a/dpdk/config/arm/arm32_armv8_linux_gcc
++++ b/dpdk/config/arm/arm32_armv8_linux_gcc
+@@ -1,6 +1,6 @@
+ [binaries]
+ c = 'arm-linux-gnueabihf-gcc'
+-cpp = 'arm-linux-gnueabihf-cpp'
++cpp = 'arm-linux-gnueabihf-g++'
+ ar = 'arm-linux-gnueabihf-gcc-ar'
+ strip = 'arm-linux-gnueabihf-strip'
+ pkgconfig = 'arm-linux-gnueabihf-pkg-config'
+diff --git a/dpdk/config/arm/arm64_armada_linux_gcc b/dpdk/config/arm/arm64_armada_linux_gcc
+index 301418949b..5043b82651 100644
+--- a/dpdk/config/arm/arm64_armada_linux_gcc
++++ b/dpdk/config/arm/arm64_armada_linux_gcc
+@@ -1,6 +1,6 @@
+ [binaries]
+ c = 'aarch64-linux-gnu-gcc'
+-cpp = 'aarch64-linux-gnu-cpp'
++cpp = 'aarch64-linux-gnu-g++'
+ ar = 'aarch64-linux-gnu-ar'
+ as = 'aarch64-linux-gnu-as'
+ strip = 'aarch64-linux-gnu-strip'
+diff --git a/dpdk/config/arm/arm64_armv8_linux_gcc b/dpdk/config/arm/arm64_armv8_linux_gcc
+index 5391d35389..5c32f6b9ca 100644
+--- a/dpdk/config/arm/arm64_armv8_linux_gcc
++++ b/dpdk/config/arm/arm64_armv8_linux_gcc
+@@ -1,6 +1,6 @@
+ [binaries]
+ c = 'aarch64-linux-gnu-gcc'
+-cpp = 'aarch64-linux-gnu-cpp'
++cpp = 'aarch64-linux-gnu-g++'
+ ar = 'aarch64-linux-gnu-gcc-ar'
+ strip = 'aarch64-linux-gnu-strip'
+ pkgconfig = 'aarch64-linux-gnu-pkg-config'
+diff --git a/dpdk/config/arm/arm64_bluefield_linux_gcc b/dpdk/config/arm/arm64_bluefield_linux_gcc
+index 248a9f031a..df6eccc046 100644
+--- a/dpdk/config/arm/arm64_bluefield_linux_gcc
++++ b/dpdk/config/arm/arm64_bluefield_linux_gcc
+@@ -1,6 +1,6 @@
+ [binaries]
+ c = 'aarch64-linux-gnu-gcc'
+-cpp = 'aarch64-linux-gnu-cpp'
++cpp = 'aarch64-linux-gnu-g++'
+ ar = 'aarch64-linux-gnu-gcc-ar'
+ strip = 'aarch64-linux-gnu-strip'
+ pkgconfig = 'aarch64-linux-gnu-pkg-config'
+diff --git a/dpdk/config/arm/arm64_centriq2400_linux_gcc b/dpdk/config/arm/arm64_centriq2400_linux_gcc
+index dfe9110331..ddffc0503a 100644
+--- a/dpdk/config/arm/arm64_centriq2400_linux_gcc
++++ b/dpdk/config/arm/arm64_centriq2400_linux_gcc
+@@ -1,6 +1,6 @@
+ [binaries]
+ c = 'aarch64-linux-gnu-gcc'
+-cpp = 'aarch64-linux-gnu-cpp'
++cpp = 'aarch64-linux-gnu-g++'
+ ar = 'aarch64-linux-gnu-gcc-ar'
+ strip = 'aarch64-linux-gnu-strip'
+ pkgconfig = 'aarch64-linux-gnu-pkg-config'
+diff --git a/dpdk/config/arm/arm64_cn10k_linux_gcc b/dpdk/config/arm/arm64_cn10k_linux_gcc
+index 88e5f10945..19068f0ec9 100644
+--- a/dpdk/config/arm/arm64_cn10k_linux_gcc
++++ b/dpdk/config/arm/arm64_cn10k_linux_gcc
+@@ -1,6 +1,6 @@
+ [binaries]
+ c = 'aarch64-linux-gnu-gcc'
+-cpp = 'aarch64-linux-gnu-cpp'
++cpp = 'aarch64-linux-gnu-g++'
+ ar = 'aarch64-linux-gnu-gcc-ar'
+ strip = 'aarch64-linux-gnu-strip'
+ pkgconfig = 'aarch64-linux-gnu-pkg-config'
+diff --git a/dpdk/config/arm/arm64_dpaa_linux_gcc b/dpdk/config/arm/arm64_dpaa_linux_gcc
+index e9d5fd31fc..70df99fb02 100644
+--- a/dpdk/config/arm/arm64_dpaa_linux_gcc
++++ b/dpdk/config/arm/arm64_dpaa_linux_gcc
+@@ -1,6 +1,6 @@
+ [binaries]
+ c = 'aarch64-linux-gnu-gcc'
+-cpp = 'aarch64-linux-gnu-cpp'
++cpp = 'aarch64-linux-gnu-g++'
+ ar = 'aarch64-linux-gnu-ar'
+ as = 'aarch64-linux-gnu-as'
+ strip = 'aarch64-linux-gnu-strip'
+diff --git a/dpdk/config/arm/arm64_emag_linux_gcc b/dpdk/config/arm/arm64_emag_linux_gcc
+index 9cdd931180..06f5eaecd0 100644
+--- a/dpdk/config/arm/arm64_emag_linux_gcc
++++ b/dpdk/config/arm/arm64_emag_linux_gcc
+@@ -1,6 +1,6 @@
+ [binaries]
+ c = 'aarch64-linux-gnu-gcc'
+-cpp = 'aarch64-linux-gnu-cpp'
++cpp = 'aarch64-linux-gnu-g++'
+ ar = 'aarch64-linux-gnu-gcc-ar'
+ strip = 'aarch64-linux-gnu-strip'
+ pkgconfig = 'aarch64-linux-gnu-pkg-config'
+diff --git a/dpdk/config/arm/arm64_graviton2_linux_gcc b/dpdk/config/arm/arm64_graviton2_linux_gcc
+index 8016fd236c..24b2dbcca8 100644
+--- a/dpdk/config/arm/arm64_graviton2_linux_gcc
++++ b/dpdk/config/arm/arm64_graviton2_linux_gcc
+@@ -1,6 +1,6 @@
+ [binaries]
+ c = 'aarch64-linux-gnu-gcc'
+-cpp = 'aarch64-linux-gnu-cpp'
++cpp = 'aarch64-linux-gnu-g++'
+ ar = 'aarch64-linux-gnu-gcc-ar'
+ strip = 'aarch64-linux-gnu-strip'
+ pkgconfig = 'aarch64-linux-gnu-pkg-config'
+diff --git a/dpdk/config/arm/arm64_kunpeng920_linux_gcc b/dpdk/config/arm/arm64_kunpeng920_linux_gcc
+index c4685b2458..4a71531e3e 100644
+--- a/dpdk/config/arm/arm64_kunpeng920_linux_gcc
++++ b/dpdk/config/arm/arm64_kunpeng920_linux_gcc
+@@ -1,6 +1,6 @@
+ [binaries]
+ c = 'aarch64-linux-gnu-gcc'
+-cpp = 'aarch64-linux-gnu-cpp'
++cpp = 'aarch64-linux-gnu-g++'
+ ar = 'aarch64-linux-gnu-gcc-ar'
+ strip = 'aarch64-linux-gnu-strip'
+ pkgconfig = 'aarch64-linux-gnu-pkg-config'
+diff --git a/dpdk/config/arm/arm64_kunpeng930_linux_gcc b/dpdk/config/arm/arm64_kunpeng930_linux_gcc
+index fb85d2d710..383f0b0313 100644
+--- a/dpdk/config/arm/arm64_kunpeng930_linux_gcc
++++ b/dpdk/config/arm/arm64_kunpeng930_linux_gcc
+@@ -1,6 +1,6 @@
+ [binaries]
+ c = 'aarch64-linux-gnu-gcc'
+-cpp = 'aarch64-linux-gnu-cpp'
++cpp = 'aarch64-linux-gnu-g++'
+ ar = 'aarch64-linux-gnu-gcc-ar'
+ strip = 'aarch64-linux-gnu-strip'
+ pkgconfig = 'aarch64-linux-gnu-pkg-config'
+diff --git a/dpdk/config/arm/arm64_n1sdp_linux_gcc b/dpdk/config/arm/arm64_n1sdp_linux_gcc
+index 0df283e2f4..5f6356caa2 100644
+--- a/dpdk/config/arm/arm64_n1sdp_linux_gcc
++++ b/dpdk/config/arm/arm64_n1sdp_linux_gcc
+@@ -1,6 +1,6 @@
+ [binaries]
+ c = 'aarch64-linux-gnu-gcc'
+-cpp = 'aarch64-linux-gnu-cpp'
++cpp = 'aarch64-linux-gnu-g++'
+ ar = 'aarch64-linux-gnu-gcc-ar'
+ strip = 'aarch64-linux-gnu-strip'
+ pkgconfig = 'aarch64-linux-gnu-pkg-config'
+diff --git a/dpdk/config/arm/arm64_n2_linux_gcc b/dpdk/config/arm/arm64_n2_linux_gcc
+index 036aee2b0a..82806ba780 100644
+--- a/dpdk/config/arm/arm64_n2_linux_gcc
++++ b/dpdk/config/arm/arm64_n2_linux_gcc
+@@ -1,6 +1,6 @@
+ [binaries]
+ c = 'aarch64-linux-gnu-gcc'
+-cpp = 'aarch64-linux-gnu-cpp'
++cpp = 'aarch64-linux-gnu-g++'
+ ar = 'aarch64-linux-gnu-gcc-ar'
+ strip = 'aarch64-linux-gnu-strip'
+ pkgconfig = 'aarch64-linux-gnu-pkg-config'
+diff --git a/dpdk/config/arm/arm64_octeontx2_linux_gcc b/dpdk/config/arm/arm64_octeontx2_linux_gcc
+index 8fbdd3868d..d23b6527ef 100644
+--- a/dpdk/config/arm/arm64_octeontx2_linux_gcc
++++ b/dpdk/config/arm/arm64_octeontx2_linux_gcc
+@@ -1,6 +1,6 @@
+ [binaries]
+ c = 'aarch64-linux-gnu-gcc'
+-cpp = 'aarch64-linux-gnu-cpp'
++cpp = 'aarch64-linux-gnu-g++'
+ ar = 'aarch64-linux-gnu-gcc-ar'
+ strip = 'aarch64-linux-gnu-strip'
+ pkgconfig = 'aarch64-linux-gnu-pkg-config'
+diff --git a/dpdk/config/arm/arm64_stingray_linux_gcc b/dpdk/config/arm/arm64_stingray_linux_gcc
+index 319a4a151d..cf98337f0f 100644
+--- a/dpdk/config/arm/arm64_stingray_linux_gcc
++++ b/dpdk/config/arm/arm64_stingray_linux_gcc
+@@ -1,6 +1,6 @@
+ [binaries]
+ c = 'aarch64-linux-gnu-gcc'
+-cpp = 'aarch64-linux-gnu-cpp'
++cpp = 'aarch64-linux-gnu-g++'
+ ar = 'aarch64-linux-gnu-gcc-ar'
+ strip = 'aarch64-linux-gnu-strip'
+ pkgconfig = 'aarch64-linux-gnu-pkg-config'
+diff --git a/dpdk/config/arm/arm64_thunderx2_linux_gcc b/dpdk/config/arm/arm64_thunderx2_linux_gcc
+index 69c71cbc82..616f6c263a 100644
+--- a/dpdk/config/arm/arm64_thunderx2_linux_gcc
++++ b/dpdk/config/arm/arm64_thunderx2_linux_gcc
+@@ -1,6 +1,6 @@
+ [binaries]
+ c = 'aarch64-linux-gnu-gcc'
+-cpp = 'aarch64-linux-gnu-cpp'
++cpp = 'aarch64-linux-gnu-g++'
+ ar = 'aarch64-linux-gnu-gcc-ar'
+ strip = 'aarch64-linux-gnu-strip'
+ pkgconfig = 'aarch64-linux-gnu-pkg-config'
+diff --git a/dpdk/config/arm/arm64_thunderxt88_linux_gcc b/dpdk/config/arm/arm64_thunderxt88_linux_gcc
+index 372097ba01..131f56465a 100644
+--- a/dpdk/config/arm/arm64_thunderxt88_linux_gcc
++++ b/dpdk/config/arm/arm64_thunderxt88_linux_gcc
+@@ -1,6 +1,6 @@
+ [binaries]
+ c = 'aarch64-linux-gnu-gcc'
+-cpp = 'aarch64-linux-gnu-cpp'
++cpp = 'aarch64-linux-gnu-g++'
+ ar = 'aarch64-linux-gnu-gcc-ar'
+ strip = 'aarch64-linux-gnu-strip'
+ pkgconfig = 'aarch64-linux-gnu-pkg-config'
+diff --git a/dpdk/config/arm/meson.build b/dpdk/config/arm/meson.build
+index 213324d262..89a3bf4213 100644
+--- a/dpdk/config/arm/meson.build
++++ b/dpdk/config/arm/meson.build
+@@ -49,6 +49,7 @@ implementer_generic = {
+                 ['RTE_ARCH_ARM_NEON_MEMCPY', false],
+                 ['RTE_ARCH_STRICT_ALIGN', true],
+                 ['RTE_ARCH_ARMv8_AARCH32', true],
++                ['RTE_ARCH', 'armv8_aarch32'],
+                 ['RTE_CACHE_LINE_SIZE', 64]
+             ]
+         }
+@@ -276,7 +277,8 @@ soc_cn10k = {
+     'implementer' : '0x41',
+     'flags': [
+         ['RTE_MAX_LCORE', 24],
+-        ['RTE_MAX_NUMA_NODES', 1]
++        ['RTE_MAX_NUMA_NODES', 1],
++        ['RTE_MEMPOOL_ALIGN', 128]
+     ],
+     'part_number': '0xd49',
+     'extra_march_features': ['crypto'],
+@@ -432,11 +434,15 @@ if dpdk_conf.get('RTE_ARCH_32')
+     else
+         # armv7 build
+         dpdk_conf.set('RTE_ARCH_ARMv7', true)
++        dpdk_conf.set('RTE_ARCH', 'armv7')
++        dpdk_conf.set('RTE_MAX_LCORE', 128)
++        dpdk_conf.set('RTE_MAX_NUMA_NODES', 1)
+         # the minimum architecture supported, armv7-a, needs the following,
+         machine_args += '-mfpu=neon'
+     endif
+ else
+     # armv8 build
++    dpdk_conf.set('RTE_ARCH', 'armv8')
+     update_flags = true
+     soc_config = {}
+     if not meson.is_cross_build()
+@@ -460,7 +466,7 @@ else
+             # 'Primary Part number', 'Revision']
+             detect_vendor = find_program(join_paths(meson.current_source_dir(),
+                                                     'armv8_machine.py'))
+-            cmd = run_command(detect_vendor.path())
++            cmd = run_command(detect_vendor.path(), check: false)
+             if cmd.returncode() == 0
+                 cmd_output = cmd.stdout().to_lower().strip().split(' ')
+                 implementer_id = cmd_output[0]
+diff --git a/dpdk/config/meson.build b/dpdk/config/meson.build
+index 805d5d51d0..ee12318d4f 100644
+--- a/dpdk/config/meson.build
++++ b/dpdk/config/meson.build
+@@ -22,7 +22,8 @@ is_ms_linker = is_windows and (cc.get_id() == 'clang')
+ # depending on the configuration options
+ pver = meson.project_version().split('.')
+ major_version = '@0@.@1@'.format(pver.get(0), pver.get(1))
+-abi_version = run_command(find_program('cat', 'more'), abi_version_file).stdout().strip()
++abi_version = run_command(find_program('cat', 'more'), abi_version_file,
++        check: true).stdout().strip()
+ 
+ # Libraries have the abi_version as the filename extension
+ # and have the soname be all but the final part of the abi_version.
+@@ -334,7 +335,7 @@ if max_lcores == 'detect'
+         error('Discovery of max_lcores is not supported for cross-compilation.')
+     endif
+     # overwrite the default value with discovered values
+-    max_lcores = run_command(get_cpu_count_cmd).stdout().to_int()
++    max_lcores = run_command(get_cpu_count_cmd, check: true).stdout().to_int()
+     min_lcores = 2
+     # DPDK must be built for at least 2 cores
+     if max_lcores < min_lcores
+diff --git a/dpdk/config/ppc/ppc64le-power8-linux-gcc b/dpdk/config/ppc/ppc64le-power8-linux-gcc
+index 51f7ceebf3..784c33df9e 100644
+--- a/dpdk/config/ppc/ppc64le-power8-linux-gcc
++++ b/dpdk/config/ppc/ppc64le-power8-linux-gcc
+@@ -1,6 +1,6 @@
+ [binaries]
+ c = 'powerpc64le-linux-gcc'
+-cpp = 'powerpc64le-linux-cpp'
++cpp = 'powerpc64le-linux-g++'
+ ar = 'powerpc64le-linux-gcc-ar'
+ strip = 'powerpc64le-linux-strip'
+ 
+diff --git a/dpdk/config/x86/meson.build b/dpdk/config/x86/meson.build
+index e25ed316f4..54345c4da3 100644
+--- a/dpdk/config/x86/meson.build
++++ b/dpdk/config/x86/meson.build
+@@ -4,7 +4,7 @@
+ # get binutils version for the workaround of Bug 97
+ binutils_ok = true
+ if is_linux or cc.get_id() == 'gcc'
+-    binutils_ok = run_command(binutils_avx512_check).returncode() == 0
++    binutils_ok = run_command(binutils_avx512_check, check: false).returncode() == 0
+     if not binutils_ok and cc.has_argument('-mno-avx512f')
+         machine_args += '-mno-avx512f'
+         warning('Binutils error with AVX512 assembly, disabling AVX512 support')
+diff --git a/dpdk/devtools/check-abi.sh b/dpdk/devtools/check-abi.sh
+index ca523eb94c..9835e346da 100755
+--- a/dpdk/devtools/check-abi.sh
++++ b/dpdk/devtools/check-abi.sh
+@@ -44,10 +44,6 @@ for dump in $(find $refdir -name "*.dump"); do
+ 		echo "Skipped glue library $name."
+ 		continue
+ 	fi
+-	if grep -qE "\<soname='librte_event_dlb\.so" $dump; then
+-		echo "Skipped removed driver $name."
+-		continue
+-	fi
+ 	dump2=$(find $newdir -name $name)
+ 	if [ -z "$dump2" ] || [ ! -e "$dump2" ]; then
+ 		echo "Error: cannot find $name in $newdir" >&2
+diff --git a/dpdk/devtools/check-forbidden-tokens.awk b/dpdk/devtools/check-forbidden-tokens.awk
+index 61ba707c9b..026844141c 100755
+--- a/dpdk/devtools/check-forbidden-tokens.awk
++++ b/dpdk/devtools/check-forbidden-tokens.awk
+@@ -20,6 +20,9 @@ BEGIN {
+ # state machine assumes the comments structure is enforced by
+ # checkpatches.pl
+ (in_file) {
++	if ($0 ~ "^@@") {
++		in_comment = 0
++	}
+ 	# comment start
+ 	if (index($0,comment_start) > 0) {
+ 		in_comment = 1
+diff --git a/dpdk/devtools/check-symbol-change.sh b/dpdk/devtools/check-symbol-change.sh
+index 8fcd0ce1a1..8992214ac8 100755
+--- a/dpdk/devtools/check-symbol-change.sh
++++ b/dpdk/devtools/check-symbol-change.sh
+@@ -25,7 +25,7 @@ build_map_changes()
+ 
+ 		# Triggering this rule, which starts a line and ends it
+ 		# with a { identifies a versioned section.  The section name is
+-		# the rest of the line with the + and { symbols remvoed.
++		# the rest of the line with the + and { symbols removed.
+ 		# Triggering this rule sets in_sec to 1, which actives the
+ 		# symbol rule below
+ 		/^.*{/ {
+@@ -35,7 +35,7 @@ build_map_changes()
+ 			}
+ 		}
+ 
+-		# This rule idenfies the end of a section, and disables the
++		# This rule identifies the end of a section, and disables the
+ 		# symbol rule
+ 		/.*}/ {in_sec=0}
+ 
+@@ -100,7 +100,7 @@ check_for_rule_violations()
+ 				# Just inform the user of this occurrence, but
+ 				# don't flag it as an error
+ 				echo -n "INFO: symbol $symname is added but "
+-				echo -n "patch has insuficient context "
++				echo -n "patch has insufficient context "
+ 				echo -n "to determine the section name "
+ 				echo -n "please ensure the version is "
+ 				echo "EXPERIMENTAL"
+diff --git a/dpdk/devtools/check-symbol-maps.sh b/dpdk/devtools/check-symbol-maps.sh
+index 5bd290ac97..32e1fa5c8f 100755
+--- a/dpdk/devtools/check-symbol-maps.sh
++++ b/dpdk/devtools/check-symbol-maps.sh
+@@ -53,4 +53,11 @@ if [ -n "$duplicate_symbols" ] ; then
+     ret=1
+ fi
+ 
++local_miss_maps=$(grep -L 'local: \*;' $@ || true)
++if [ -n "$local_miss_maps" ] ; then
++    echo "Found maps without local catch-all:"
++    echo "$local_miss_maps"
++    ret=1
++fi
++
+ exit $ret
+diff --git a/dpdk/devtools/libabigail.abignore b/dpdk/devtools/libabigail.abignore
+index 4b676f317d..146a601ed3 100644
+--- a/dpdk/devtools/libabigail.abignore
++++ b/dpdk/devtools/libabigail.abignore
+@@ -11,3 +11,23 @@
+ ; Ignore generated PMD information strings
+ [suppress_variable]
+         name_regexp = _pmd_info$
++
++; Ignore changes to rte_crypto_asym_op, asymmetric crypto API is experimental
++[suppress_type]
++        name = rte_crypto_asym_op
++
++; Ignore section attribute fixes in experimental regexdev library
++[suppress_file]
++        soname_regexp = ^librte_regexdev\.
++
++; Ignore changes in common mlx5 driver, should be all internal
++[suppress_file]
++        soname_regexp = ^librte_common_mlx5\.
++
++; Ignore visibility fix of local functions in experimental auxiliary driver
++[suppress_file]
++        soname_regexp = ^librte_bus_auxiliary\.
++
++; Ignore visibility fix of local functions in experimental gpudev library
++[suppress_file]
++        soname_regexp = ^librte_gpudev\.
+diff --git a/dpdk/devtools/test-null.sh b/dpdk/devtools/test-null.sh
+index 4ba57a6829..6cd34f64f1 100755
+--- a/dpdk/devtools/test-null.sh
++++ b/dpdk/devtools/test-null.sh
+@@ -27,6 +27,7 @@ else
+ fi
+ 
+ (sleep 1 && echo stop) |
+-$testpmd -c $coremask --no-huge -m 20 \
++# testpmd only needs 20M, make it x2 (default number of cores) for NUMA systems
++$testpmd -c $coremask --no-huge -m 40 \
+ 	$libs -a 0:0.0 --vdev net_null1 --vdev net_null2 $eal_options -- \
+ 	--no-mlockall --total-num-mbufs=2048 $testpmd_options -ia
+diff --git a/dpdk/doc/api/doxy-api-index.md b/dpdk/doc/api/doxy-api-index.md
+index 4245b9635c..baecb2e52e 100644
+--- a/dpdk/doc/api/doxy-api-index.md
++++ b/dpdk/doc/api/doxy-api-index.md
+@@ -9,222 +9,222 @@ API
+ The public API headers are grouped by topics:
+ 
+ - **device**:
+-  [dev]                (@ref rte_dev.h),
+-  [ethdev]             (@ref rte_ethdev.h),
+-  [ethctrl]            (@ref rte_eth_ctrl.h),
+-  [rte_flow]           (@ref rte_flow.h),
+-  [rte_tm]             (@ref rte_tm.h),
+-  [rte_mtr]            (@ref rte_mtr.h),
+-  [bbdev]              (@ref rte_bbdev.h),
+-  [cryptodev]          (@ref rte_cryptodev.h),
+-  [security]           (@ref rte_security.h),
+-  [compressdev]        (@ref rte_compressdev.h),
+-  [compress]           (@ref rte_comp.h),
+-  [regexdev]           (@ref rte_regexdev.h),
+-  [dmadev]             (@ref rte_dmadev.h),
+-  [eventdev]           (@ref rte_eventdev.h),
+-  [event_eth_rx_adapter]   (@ref rte_event_eth_rx_adapter.h),
+-  [event_eth_tx_adapter]   (@ref rte_event_eth_tx_adapter.h),
+-  [event_timer_adapter]    (@ref rte_event_timer_adapter.h),
+-  [event_crypto_adapter]   (@ref rte_event_crypto_adapter.h),
+-  [rawdev]             (@ref rte_rawdev.h),
+-  [metrics]            (@ref rte_metrics.h),
+-  [bitrate]            (@ref rte_bitrate.h),
+-  [latency]            (@ref rte_latencystats.h),
+-  [devargs]            (@ref rte_devargs.h),
+-  [PCI]                (@ref rte_pci.h),
+-  [vdev]               (@ref rte_bus_vdev.h),
+-  [vfio]               (@ref rte_vfio.h)
++  [dev](@ref rte_dev.h),
++  [ethdev](@ref rte_ethdev.h),
++  [ethctrl](@ref rte_eth_ctrl.h),
++  [rte_flow](@ref rte_flow.h),
++  [rte_tm](@ref rte_tm.h),
++  [rte_mtr](@ref rte_mtr.h),
++  [bbdev](@ref rte_bbdev.h),
++  [cryptodev](@ref rte_cryptodev.h),
++  [security](@ref rte_security.h),
++  [compressdev](@ref rte_compressdev.h),
++  [compress](@ref rte_comp.h),
++  [regexdev](@ref rte_regexdev.h),
++  [dmadev](@ref rte_dmadev.h),
++  [eventdev](@ref rte_eventdev.h),
++  [event_eth_rx_adapter](@ref rte_event_eth_rx_adapter.h),
++  [event_eth_tx_adapter](@ref rte_event_eth_tx_adapter.h),
++  [event_timer_adapter](@ref rte_event_timer_adapter.h),
++  [event_crypto_adapter](@ref rte_event_crypto_adapter.h),
++  [rawdev](@ref rte_rawdev.h),
++  [metrics](@ref rte_metrics.h),
++  [bitrate](@ref rte_bitrate.h),
++  [latency](@ref rte_latencystats.h),
++  [devargs](@ref rte_devargs.h),
++  [PCI](@ref rte_pci.h),
++  [vdev](@ref rte_bus_vdev.h),
++  [vfio](@ref rte_vfio.h)
+ 
+ - **device specific**:
+-  [softnic]            (@ref rte_eth_softnic.h),
+-  [bond]               (@ref rte_eth_bond.h),
+-  [vhost]              (@ref rte_vhost.h),
+-  [vdpa]               (@ref rte_vdpa.h),
+-  [KNI]                (@ref rte_kni.h),
+-  [ixgbe]              (@ref rte_pmd_ixgbe.h),
+-  [i40e]               (@ref rte_pmd_i40e.h),
+-  [ice]                (@ref rte_pmd_ice.h),
+-  [iavf]               (@ref rte_pmd_iavf.h),
+-  [ioat]               (@ref rte_ioat_rawdev.h),
+-  [bnxt]               (@ref rte_pmd_bnxt.h),
+-  [dpaa]               (@ref rte_pmd_dpaa.h),
+-  [dpaa2]              (@ref rte_pmd_dpaa2.h),
+-  [mlx5]               (@ref rte_pmd_mlx5.h),
+-  [dpaa2_mempool]      (@ref rte_dpaa2_mempool.h),
+-  [dpaa2_cmdif]        (@ref rte_pmd_dpaa2_cmdif.h),
+-  [dpaa2_qdma]         (@ref rte_pmd_dpaa2_qdma.h),
+-  [crypto_scheduler]   (@ref rte_cryptodev_scheduler.h),
+-  [dlb2]               (@ref rte_pmd_dlb2.h),
+-  [ifpga]              (@ref rte_pmd_ifpga.h)
++  [softnic](@ref rte_eth_softnic.h),
++  [bond](@ref rte_eth_bond.h),
++  [vhost](@ref rte_vhost.h),
++  [vdpa](@ref rte_vdpa.h),
++  [KNI](@ref rte_kni.h),
++  [ixgbe](@ref rte_pmd_ixgbe.h),
++  [i40e](@ref rte_pmd_i40e.h),
++  [ice](@ref rte_pmd_ice.h),
++  [iavf](@ref rte_pmd_iavf.h),
++  [ioat](@ref rte_ioat_rawdev.h),
++  [bnxt](@ref rte_pmd_bnxt.h),
++  [dpaa](@ref rte_pmd_dpaa.h),
++  [dpaa2](@ref rte_pmd_dpaa2.h),
++  [mlx5](@ref rte_pmd_mlx5.h),
++  [dpaa2_mempool](@ref rte_dpaa2_mempool.h),
++  [dpaa2_cmdif](@ref rte_pmd_dpaa2_cmdif.h),
++  [dpaa2_qdma](@ref rte_pmd_dpaa2_qdma.h),
++  [crypto_scheduler](@ref rte_cryptodev_scheduler.h),
++  [dlb2](@ref rte_pmd_dlb2.h),
++  [ifpga](@ref rte_pmd_ifpga.h)
+ 
+ - **memory**:
+-  [memseg]             (@ref rte_memory.h),
+-  [memzone]            (@ref rte_memzone.h),
+-  [mempool]            (@ref rte_mempool.h),
+-  [malloc]             (@ref rte_malloc.h),
+-  [memcpy]             (@ref rte_memcpy.h)
++  [memseg](@ref rte_memory.h),
++  [memzone](@ref rte_memzone.h),
++  [mempool](@ref rte_mempool.h),
++  [malloc](@ref rte_malloc.h),
++  [memcpy](@ref rte_memcpy.h)
+ 
+ - **timers**:
+-  [cycles]             (@ref rte_cycles.h),
+-  [timer]              (@ref rte_timer.h),
+-  [alarm]              (@ref rte_alarm.h)
++  [cycles](@ref rte_cycles.h),
++  [timer](@ref rte_timer.h),
++  [alarm](@ref rte_alarm.h)
+ 
+ - **locks**:
+-  [atomic]             (@ref rte_atomic.h),
+-  [mcslock]            (@ref rte_mcslock.h),
+-  [pflock]             (@ref rte_pflock.h),
+-  [rwlock]             (@ref rte_rwlock.h),
+-  [spinlock]           (@ref rte_spinlock.h),
+-  [ticketlock]         (@ref rte_ticketlock.h),
+-  [RCU]                (@ref rte_rcu_qsbr.h)
++  [atomic](@ref rte_atomic.h),
++  [mcslock](@ref rte_mcslock.h),
++  [pflock](@ref rte_pflock.h),
++  [rwlock](@ref rte_rwlock.h),
++  [spinlock](@ref rte_spinlock.h),
++  [ticketlock](@ref rte_ticketlock.h),
++  [RCU](@ref rte_rcu_qsbr.h)
+ 
+ - **CPU arch**:
+-  [branch prediction]  (@ref rte_branch_prediction.h),
+-  [cache prefetch]     (@ref rte_prefetch.h),
+-  [SIMD]               (@ref rte_vect.h),
+-  [byte order]         (@ref rte_byteorder.h),
+-  [CPU flags]          (@ref rte_cpuflags.h),
+-  [CPU pause]          (@ref rte_pause.h),
+-  [I/O access]         (@ref rte_io.h),
+-  [power management]   (@ref rte_power_intrinsics.h)
++  [branch prediction](@ref rte_branch_prediction.h),
++  [cache prefetch](@ref rte_prefetch.h),
++  [SIMD](@ref rte_vect.h),
++  [byte order](@ref rte_byteorder.h),
++  [CPU flags](@ref rte_cpuflags.h),
++  [CPU pause](@ref rte_pause.h),
++  [I/O access](@ref rte_io.h),
++  [power management](@ref rte_power_intrinsics.h)
+ 
+ - **CPU multicore**:
+-  [interrupts]         (@ref rte_interrupts.h),
+-  [launch]             (@ref rte_launch.h),
+-  [lcore]              (@ref rte_lcore.h),
+-  [per-lcore]          (@ref rte_per_lcore.h),
+-  [service cores]      (@ref rte_service.h),
+-  [keepalive]          (@ref rte_keepalive.h),
+-  [power/freq]         (@ref rte_power.h),
+-  [PMD power]          (@ref rte_power_pmd_mgmt.h)
++  [interrupts](@ref rte_interrupts.h),
++  [launch](@ref rte_launch.h),
++  [lcore](@ref rte_lcore.h),
++  [per-lcore](@ref rte_per_lcore.h),
++  [service cores](@ref rte_service.h),
++  [keepalive](@ref rte_keepalive.h),
++  [power/freq](@ref rte_power.h),
++  [PMD power](@ref rte_power_pmd_mgmt.h)
+ 
+ - **layers**:
+-  [ethernet]           (@ref rte_ether.h),
+-  [ARP]                (@ref rte_arp.h),
+-  [HIGIG]              (@ref rte_higig.h),
+-  [ICMP]               (@ref rte_icmp.h),
+-  [ESP]                (@ref rte_esp.h),
+-  [IPsec]              (@ref rte_ipsec.h),
+-  [IPsec group]        (@ref rte_ipsec_group.h),
+-  [IPsec SA]           (@ref rte_ipsec_sa.h),
+-  [IPsec SAD]          (@ref rte_ipsec_sad.h),
+-  [IP]                 (@ref rte_ip.h),
+-  [frag/reass]         (@ref rte_ip_frag.h),
+-  [SCTP]               (@ref rte_sctp.h),
+-  [TCP]                (@ref rte_tcp.h),
+-  [UDP]                (@ref rte_udp.h),
+-  [GTP]                (@ref rte_gtp.h),
+-  [GRO]                (@ref rte_gro.h),
+-  [GSO]                (@ref rte_gso.h),
+-  [GRE]                (@ref rte_gre.h),
+-  [MPLS]               (@ref rte_mpls.h),
+-  [VXLAN]              (@ref rte_vxlan.h),
+-  [Geneve]             (@ref rte_geneve.h),
+-  [eCPRI]              (@ref rte_ecpri.h),
+-  [L2TPv2]             (@ref rte_l2tpv2.h),
+-  [PPP]                (@ref rte_ppp.h)
++  [ethernet](@ref rte_ether.h),
++  [ARP](@ref rte_arp.h),
++  [HIGIG](@ref rte_higig.h),
++  [ICMP](@ref rte_icmp.h),
++  [ESP](@ref rte_esp.h),
++  [IPsec](@ref rte_ipsec.h),
++  [IPsec group](@ref rte_ipsec_group.h),
++  [IPsec SA](@ref rte_ipsec_sa.h),
++  [IPsec SAD](@ref rte_ipsec_sad.h),
++  [IP](@ref rte_ip.h),
++  [frag/reass](@ref rte_ip_frag.h),
++  [SCTP](@ref rte_sctp.h),
++  [TCP](@ref rte_tcp.h),
++  [UDP](@ref rte_udp.h),
++  [GTP](@ref rte_gtp.h),
++  [GRO](@ref rte_gro.h),
++  [GSO](@ref rte_gso.h),
++  [GRE](@ref rte_gre.h),
++  [MPLS](@ref rte_mpls.h),
++  [VXLAN](@ref rte_vxlan.h),
++  [Geneve](@ref rte_geneve.h),
++  [eCPRI](@ref rte_ecpri.h),
++  [L2TPv2](@ref rte_l2tpv2.h),
++  [PPP](@ref rte_ppp.h)
+ 
+ - **QoS**:
+-  [metering]           (@ref rte_meter.h),
+-  [scheduler]          (@ref rte_sched.h),
+-  [RED congestion]     (@ref rte_red.h)
++  [metering](@ref rte_meter.h),
++  [scheduler](@ref rte_sched.h),
++  [RED congestion](@ref rte_red.h)
+ 
+ - **routing**:
+-  [LPM IPv4 route]     (@ref rte_lpm.h),
+-  [LPM IPv6 route]     (@ref rte_lpm6.h),
+-  [RIB IPv4]           (@ref rte_rib.h),
+-  [RIB IPv6]           (@ref rte_rib6.h),
+-  [FIB IPv4]           (@ref rte_fib.h),
+-  [FIB IPv6]           (@ref rte_fib6.h)
++  [LPM IPv4 route](@ref rte_lpm.h),
++  [LPM IPv6 route](@ref rte_lpm6.h),
++  [RIB IPv4](@ref rte_rib.h),
++  [RIB IPv6](@ref rte_rib6.h),
++  [FIB IPv4](@ref rte_fib.h),
++  [FIB IPv6](@ref rte_fib6.h)
+ 
+ - **hashes**:
+-  [hash]               (@ref rte_hash.h),
+-  [jhash]              (@ref rte_jhash.h),
+-  [thash]              (@ref rte_thash.h),
+-  [thash_gfni]         (@ref rte_thash_gfni.h),
+-  [FBK hash]           (@ref rte_fbk_hash.h),
+-  [CRC hash]           (@ref rte_hash_crc.h)
++  [hash](@ref rte_hash.h),
++  [jhash](@ref rte_jhash.h),
++  [thash](@ref rte_thash.h),
++  [thash_gfni](@ref rte_thash_gfni.h),
++  [FBK hash](@ref rte_fbk_hash.h),
++  [CRC hash](@ref rte_hash_crc.h)
+ 
+ - **classification**
+-  [reorder]            (@ref rte_reorder.h),
+-  [distributor]        (@ref rte_distributor.h),
+-  [EFD]                (@ref rte_efd.h),
+-  [ACL]                (@ref rte_acl.h),
+-  [member]             (@ref rte_member.h),
+-  [flow classify]      (@ref rte_flow_classify.h),
+-  [BPF]                (@ref rte_bpf.h)
++  [reorder](@ref rte_reorder.h),
++  [distributor](@ref rte_distributor.h),
++  [EFD](@ref rte_efd.h),
++  [ACL](@ref rte_acl.h),
++  [member](@ref rte_member.h),
++  [flow classify](@ref rte_flow_classify.h),
++  [BPF](@ref rte_bpf.h)
+ 
+ - **containers**:
+-  [mbuf]               (@ref rte_mbuf.h),
+-  [mbuf pool ops]      (@ref rte_mbuf_pool_ops.h),
+-  [ring]               (@ref rte_ring.h),
+-  [stack]              (@ref rte_stack.h),
+-  [tailq]              (@ref rte_tailq.h),
+-  [bitmap]             (@ref rte_bitmap.h)
++  [mbuf](@ref rte_mbuf.h),
++  [mbuf pool ops](@ref rte_mbuf_pool_ops.h),
++  [ring](@ref rte_ring.h),
++  [stack](@ref rte_stack.h),
++  [tailq](@ref rte_tailq.h),
++  [bitmap](@ref rte_bitmap.h)
+ 
+ - **packet framework**:
+-  * [port]             (@ref rte_port.h):
+-    [ethdev]           (@ref rte_port_ethdev.h),
+-    [ring]             (@ref rte_port_ring.h),
+-    [frag]             (@ref rte_port_frag.h),
+-    [reass]            (@ref rte_port_ras.h),
+-    [sched]            (@ref rte_port_sched.h),
+-    [kni]              (@ref rte_port_kni.h),
+-    [src/sink]         (@ref rte_port_source_sink.h)
+-  * [table]            (@ref rte_table.h):
+-    [lpm IPv4]         (@ref rte_table_lpm.h),
+-    [lpm IPv6]         (@ref rte_table_lpm_ipv6.h),
+-    [ACL]              (@ref rte_table_acl.h),
+-    [hash]             (@ref rte_table_hash.h),
+-    [array]            (@ref rte_table_array.h),
+-    [stub]             (@ref rte_table_stub.h)
+-  * [pipeline]         (@ref rte_pipeline.h)
+-    [port_in_action]   (@ref rte_port_in_action.h)
+-    [table_action]     (@ref rte_table_action.h)
++  * [port](@ref rte_port.h):
++    [ethdev](@ref rte_port_ethdev.h),
++    [ring](@ref rte_port_ring.h),
++    [frag](@ref rte_port_frag.h),
++    [reass](@ref rte_port_ras.h),
++    [sched](@ref rte_port_sched.h),
++    [kni](@ref rte_port_kni.h),
++    [src/sink](@ref rte_port_source_sink.h)
++  * [table](@ref rte_table.h):
++    [lpm IPv4](@ref rte_table_lpm.h),
++    [lpm IPv6](@ref rte_table_lpm_ipv6.h),
++    [ACL](@ref rte_table_acl.h),
++    [hash](@ref rte_table_hash.h),
++    [array](@ref rte_table_array.h),
++    [stub](@ref rte_table_stub.h)
++  * [pipeline](@ref rte_pipeline.h)
++    [port_in_action](@ref rte_port_in_action.h)
++    [table_action](@ref rte_table_action.h)
+   * SWX pipeline:
+-    [control]          (@ref rte_swx_ctl.h),
+-    [extern]           (@ref rte_swx_extern.h),
+-    [pipeline]         (@ref rte_swx_pipeline.h)
++    [control](@ref rte_swx_ctl.h),
++    [extern](@ref rte_swx_extern.h),
++    [pipeline](@ref rte_swx_pipeline.h)
+   * SWX port:
+-    [port]             (@ref rte_swx_port.h),
+-    [ethdev]           (@ref rte_swx_port_ethdev.h),
+-    [fd]               (@ref rte_swx_port_fd.h),
+-    [ring]             (@ref rte_swx_port_ring.h),
+-    [src/sink]         (@ref rte_swx_port_source_sink.h)
++    [port](@ref rte_swx_port.h),
++    [ethdev](@ref rte_swx_port_ethdev.h),
++    [fd](@ref rte_swx_port_fd.h),
++    [ring](@ref rte_swx_port_ring.h),
++    [src/sink](@ref rte_swx_port_source_sink.h)
+   * SWX table:
+-    [table]            (@ref rte_swx_table.h),
+-    [table_em]         (@ref rte_swx_table_em.h)
+-    [table_wm]         (@ref rte_swx_table_wm.h)
+-  * [graph]            (@ref rte_graph.h):
+-    [graph_worker]     (@ref rte_graph_worker.h)
++    [table](@ref rte_swx_table.h),
++    [table_em](@ref rte_swx_table_em.h)
++    [table_wm](@ref rte_swx_table_wm.h)
++  * [graph](@ref rte_graph.h):
++    [graph_worker](@ref rte_graph_worker.h)
+   * graph_nodes:
+-    [eth_node]         (@ref rte_node_eth_api.h),
+-    [ip4_node]         (@ref rte_node_ip4_api.h)
++    [eth_node](@ref rte_node_eth_api.h),
++    [ip4_node](@ref rte_node_ip4_api.h)
+ 
+ - **basic**:
+-  [bitops]             (@ref rte_bitops.h),
+-  [approx fraction]    (@ref rte_approx.h),
+-  [random]             (@ref rte_random.h),
+-  [config file]        (@ref rte_cfgfile.h),
+-  [key/value args]     (@ref rte_kvargs.h),
+-  [string]             (@ref rte_string_fns.h)
++  [bitops](@ref rte_bitops.h),
++  [approx fraction](@ref rte_approx.h),
++  [random](@ref rte_random.h),
++  [config file](@ref rte_cfgfile.h),
++  [key/value args](@ref rte_kvargs.h),
++  [string](@ref rte_string_fns.h)
+ 
+ - **debug**:
+-  [jobstats]           (@ref rte_jobstats.h),
+-  [telemetry]          (@ref rte_telemetry.h),
+-  [pcapng]             (@ref rte_pcapng.h),
+-  [pdump]              (@ref rte_pdump.h),
+-  [hexdump]            (@ref rte_hexdump.h),
+-  [debug]              (@ref rte_debug.h),
+-  [log]                (@ref rte_log.h),
+-  [errno]              (@ref rte_errno.h),
+-  [trace]              (@ref rte_trace.h),
+-  [trace_point]        (@ref rte_trace_point.h)
++  [jobstats](@ref rte_jobstats.h),
++  [telemetry](@ref rte_telemetry.h),
++  [pcapng](@ref rte_pcapng.h),
++  [pdump](@ref rte_pdump.h),
++  [hexdump](@ref rte_hexdump.h),
++  [debug](@ref rte_debug.h),
++  [log](@ref rte_log.h),
++  [errno](@ref rte_errno.h),
++  [trace](@ref rte_trace.h),
++  [trace_point](@ref rte_trace_point.h)
+ 
+ - **misc**:
+-  [EAL config]         (@ref rte_eal.h),
+-  [common]             (@ref rte_common.h),
+-  [experimental APIs]  (@ref rte_compat.h),
+-  [ABI versioning]     (@ref rte_function_versioning.h),
+-  [version]            (@ref rte_version.h)
++  [EAL config](@ref rte_eal.h),
++  [common](@ref rte_common.h),
++  [experimental APIs](@ref rte_compat.h),
++  [ABI versioning](@ref rte_function_versioning.h),
++  [version](@ref rte_version.h)
+diff --git a/dpdk/doc/api/generate_examples.sh b/dpdk/doc/api/generate_examples.sh
+index 3e08236596..48574563ca 100755
+--- a/dpdk/doc/api/generate_examples.sh
++++ b/dpdk/doc/api/generate_examples.sh
+@@ -6,21 +6,15 @@ EXAMPLES_DIR=$1
+ API_EXAMPLES=$2
+ 
+ FIND=find
+-if [ "$(uname)" = "FreeBSD" ] ; then
+-# on FreeBSD, we need GNU find for "-printf" flag
+-	FIND=gfind
+-	if ! which -s $FIND ; then
+-		echo "Error: need '$FIND' on FreeBSD. Install 'findutils' pkg"
+-		exit 1
+-	fi
+-fi
+ 
+ # generate a .d file including both C files and also build files, so we can
+ # detect both file changes and file additions/deletions
+-echo "$API_EXAMPLES: $($FIND ${EXAMPLES_DIR} -type f \( -name '*.c' -o -name 'meson.build' \) -printf '%p ' )" > ${API_EXAMPLES}.d
++echo "$API_EXAMPLES: $($FIND ${EXAMPLES_DIR} -type f \( -name '*.c' -o -name 'meson.build' \) | tr '\n' ' ' )" > ${API_EXAMPLES}.d
+ 
+ exec > "${API_EXAMPLES}"
+ printf '/**\n'
+ printf '@page examples DPDK Example Programs\n\n'
+-$FIND "${EXAMPLES_DIR}" -type f -name '*.c' -printf '@example examples/%P\n' | LC_ALL=C sort
++$FIND "${EXAMPLES_DIR}" -type f -name '*.c' |
++	sed "s|${EXAMPLES_DIR}|@example examples|" |
++	LC_ALL=C sort
+ printf '*/\n'
+diff --git a/dpdk/doc/api/meson.build b/dpdk/doc/api/meson.build
+index 7e2b429ac8..5c25b92092 100644
+--- a/dpdk/doc/api/meson.build
++++ b/dpdk/doc/api/meson.build
+@@ -24,7 +24,7 @@ htmldir = join_paths(get_option('datadir'), 'doc', 'dpdk')
+ # So use a configure option for now.
+ example = custom_target('examples.dox',
+         output: 'examples.dox',
+-        command: [generate_examples, join_paths(meson.source_root(), 'examples'), '@OUTPUT@'],
++        command: [generate_examples, join_paths(dpdk_source_root, 'examples'), '@OUTPUT@'],
+         depfile: 'examples.dox.d',
+         install: get_option('enable_docs'),
+         install_dir: htmldir,
+@@ -32,11 +32,11 @@ example = custom_target('examples.dox',
+ 
+ cdata = configuration_data()
+ cdata.set('VERSION', meson.project_version())
+-cdata.set('API_EXAMPLES', join_paths(meson.build_root(), 'doc', 'api', 'examples.dox'))
+-cdata.set('OUTPUT', join_paths(meson.build_root(), 'doc', 'api'))
++cdata.set('API_EXAMPLES', join_paths(dpdk_build_root, 'doc', 'api', 'examples.dox'))
++cdata.set('OUTPUT', join_paths(dpdk_build_root, 'doc', 'api'))
+ cdata.set('HTML_OUTPUT', 'html')
+-cdata.set('TOPDIR', meson.source_root())
+-cdata.set('STRIP_FROM_PATH', ' '.join([meson.source_root(), join_paths(meson.build_root(), 'doc', 'api')]))
++cdata.set('TOPDIR', dpdk_source_root)
++cdata.set('STRIP_FROM_PATH', ' '.join([dpdk_source_root, join_paths(dpdk_build_root, 'doc', 'api')]))
+ cdata.set('WARN_AS_ERROR', 'NO')
+ if get_option('werror')
+     cdata.set('WARN_AS_ERROR', 'YES')
+diff --git a/dpdk/doc/guides/compressdevs/mlx5.rst b/dpdk/doc/guides/compressdevs/mlx5.rst
+index a4e17f65b3..7f2d6bdfff 100644
+--- a/dpdk/doc/guides/compressdevs/mlx5.rst
++++ b/dpdk/doc/guides/compressdevs/mlx5.rst
+@@ -7,7 +7,7 @@ MLX5 compress driver
+ ====================
+ 
+ The MLX5 compress driver library
+-(**librte_compress_mlx5**) provides support for **Mellanox BlueField 2**
++(**librte_compress_mlx5**) provides support for **Mellanox BlueField-2**
+ families of 25/50/100/200 Gb/s adapters.
+ 
+ Design
+@@ -21,7 +21,7 @@ These engines are part of the ARM complex of the BlueField chip, and as
+ such they do not use NIC related resources (e.g. RX/TX bandwidth).
+ They do share the same PCI and memory bandwidth.
+ 
+-So, using the BlueField device (starting from BlueField 2), the compress
++So, using the BlueField device (starting from BlueField-2), the compress
+ class operations can be supported in parallel to the net, vDPA and
+ RegEx class operations.
+ 
+@@ -95,7 +95,7 @@ Driver options
+ Supported NICs
+ --------------
+ 
+-* Mellanox\ |reg| BlueField 2 SmartNIC
++* Mellanox\ |reg| BlueField-2 SmartNIC
+ 
+ Prerequisites
+ -------------
+diff --git a/dpdk/doc/guides/conf.py b/dpdk/doc/guides/conf.py
+index 1743ce301f..a55ce38800 100644
+--- a/dpdk/doc/guides/conf.py
++++ b/dpdk/doc/guides/conf.py
+@@ -3,7 +3,7 @@
+ # Copyright(c) 2010-2015 Intel Corporation
+ 
+ from docutils import nodes
+-from distutils.version import LooseVersion
++from packaging.version import Version
+ from sphinx import __version__ as sphinx_version
+ from os import listdir
+ from os import environ
+@@ -28,7 +28,7 @@
+ 
+ project = 'Data Plane Development Kit'
+ html_logo = '../logo/DPDK_logo_vertical_rev_small.png'
+-if LooseVersion(sphinx_version) >= LooseVersion('3.5'):
++if Version(sphinx_version) >= Version('3.5'):
+     html_permalinks = False
+ else:
+     html_add_permalinks = ""
+@@ -427,7 +427,7 @@ def setup(app):
+                             'Features availability for Timer adapters',
+                             'Feature')
+ 
+-    if LooseVersion(sphinx_version) < LooseVersion('1.3.1'):
++    if Version(sphinx_version) < Version('1.3.1'):
+         print('Upgrade sphinx to version >= 1.3.1 for '
+               'improved Figure/Table number handling.',
+               file=stderr)
+diff --git a/dpdk/doc/guides/cryptodevs/mlx5.rst b/dpdk/doc/guides/cryptodevs/mlx5.rst
+index e86a6205e8..9936556cc9 100644
+--- a/dpdk/doc/guides/cryptodevs/mlx5.rst
++++ b/dpdk/doc/guides/cryptodevs/mlx5.rst
+@@ -88,7 +88,7 @@ The mlxreg dedicated tool should be used as follows:
+   should not be specified.
+ 
+   All the device ports should set it in order to move to operational mode.
+-  For BlueField 2, the internal ports in the ARM system should also be set.
++  For BlueField-2, the internal ports in the ARM system should also be set.
+ 
+ - Query CRYPTO_OPERATIONAL register to make sure the device is in Operational
+   mode.
+@@ -142,7 +142,7 @@ Supported NICs
+ --------------
+ 
+ * Mellanox\ |reg| ConnectX\ |reg|-6 200G MCX654106A-HCAT (2x200G)
+-* Mellanox\ |reg| BlueField 2 SmartNIC
++* Mellanox\ |reg| BlueField-2 SmartNIC
+ * Mellanox\ |reg| ConnectX\ |reg|-6 Dx
+ 
+ 
+@@ -162,7 +162,7 @@ FW Prerequisites
+ ~~~~~~~~~~~~~~~~
+ 
+ - xx.31.0328 for ConnectX-6.
+-- xx.32.0108 for ConnectX-6 Dx and BlueField 2.
++- xx.32.0108 for ConnectX-6 Dx and BlueField-2.
+ 
+ Linux Prerequisites
+ ~~~~~~~~~~~~~~~~~~~
+diff --git a/dpdk/doc/guides/dmadevs/hisilicon.rst b/dpdk/doc/guides/dmadevs/hisilicon.rst
+index 191e56f2f7..974bc49376 100644
+--- a/dpdk/doc/guides/dmadevs/hisilicon.rst
++++ b/dpdk/doc/guides/dmadevs/hisilicon.rst
+@@ -29,8 +29,8 @@ which can be accessed using API from the ``rte_dmadev`` library.
+ 
+ The name of the ``dmadev`` created is like "B:D.F-chX", e.g. DMA 0000:7b:00.0
+ will create four ``dmadev``,
+-the 1st ``dmadev`` name is "7b:00.0-ch0",
+-and the 2nd ``dmadev`` name is "7b:00.0-ch1".
++the 1st ``dmadev`` name is "0000:7b:00.0-ch0",
++and the 2nd ``dmadev`` name is "0000:7b:00.0-ch1".
+ 
+ Device Configuration
+ ~~~~~~~~~~~~~~~~~~~~~
+diff --git a/dpdk/doc/guides/dmadevs/idxd.rst b/dpdk/doc/guides/dmadevs/idxd.rst
+index d4a210b854..81451afd4d 100644
+--- a/dpdk/doc/guides/dmadevs/idxd.rst
++++ b/dpdk/doc/guides/dmadevs/idxd.rst
+@@ -55,7 +55,6 @@ such as priority or queue depth, need to be set for each queue.
+ To assign an engine to a group::
+ 
+         $ accel-config config-engine dsa0/engine0.0 --group-id=0
+-        $ accel-config config-engine dsa0/engine0.1 --group-id=1
+ 
+ To assign work queues to groups for passing descriptors to the engines a similar accel-config command can be used.
+ However, the work queues also need to be configured depending on the use case.
+@@ -71,7 +70,7 @@ Example configuration for a work queue::
+ 
+         $ accel-config config-wq dsa0/wq0.0 --group-id=0 \
+            --mode=dedicated --priority=10 --wq-size=8 \
+-           --type=user --name=dpdk_app1
++           --max-batch-size=512 --type=user --name=dpdk_app1
+ 
+ Once the devices have been configured, they need to be enabled::
+ 
+@@ -82,6 +81,32 @@ Check the device configuration::
+ 
+         $ accel-config list
+ 
++Every Intel\ |reg| DSA instance supports multiple queues and each should be similarly configured.
++As a further example, the following set of commands will configure and enable 4 queues on instance 0,
++giving each an equal share of resources::
++
++        # configure 4 groups, each with one engine
++        accel-config config-engine dsa0/engine0.0 --group-id=0
++        accel-config config-engine dsa0/engine0.1 --group-id=1
++        accel-config config-engine dsa0/engine0.2 --group-id=2
++        accel-config config-engine dsa0/engine0.3 --group-id=3
++
++        # configure 4 queues, putting each in a different group, so each
++        # is backed by a single engine
++        accel-config config-wq dsa0/wq0.0 --group-id=0 --type=user --wq-size=32 \
++            --priority=10 --max-batch-size=1024 --mode=dedicated --name=dpdk_app1
++        accel-config config-wq dsa0/wq0.1 --group-id=1 --type=user --wq-size=32 \
++            --priority=10 --max-batch-size=1024 --mode=dedicated --name=dpdk_app1
++        accel-config config-wq dsa0/wq0.2 --group-id=2 --type=user --wq-size=32 \
++            --priority=10 --max-batch-size=1024 --mode=dedicated --name=dpdk_app1
++        accel-config config-wq dsa0/wq0.3 --group-id=3 --type=user --wq-size=32 \
++            --priority=10 --max-batch-size=1024 --mode=dedicated --name=dpdk_app1
++
++        # enable device and queues
++        accel-config enable-device dsa0
++        accel-config enable-wq dsa0/wq0.0 dsa0/wq0.1 dsa0/wq0.2 dsa0/wq0.3
++
++
+ Devices using VFIO/UIO drivers
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ 
+diff --git a/dpdk/doc/guides/eventdevs/dlb2.rst b/dpdk/doc/guides/eventdevs/dlb2.rst
+index bce984ca08..bc53618b53 100644
+--- a/dpdk/doc/guides/eventdevs/dlb2.rst
++++ b/dpdk/doc/guides/eventdevs/dlb2.rst
+@@ -151,7 +151,7 @@ load-balanced queues, and directed credits are used for directed queues.
+ These pools' sizes are controlled by the nb_events_limit field in struct
+ rte_event_dev_config. The load-balanced pool is sized to contain
+ nb_events_limit credits, and the directed pool is sized to contain
+-nb_events_limit/4 credits. The directed pool size can be overridden with the
++nb_events_limit/2 credits. The directed pool size can be overridden with the
+ num_dir_credits devargs argument, like so:
+ 
+     .. code-block:: console
+@@ -239,8 +239,8 @@ queue A.
+ Due to this, workers should stop retrying after a time, release the events it
+ is attempting to enqueue, and dequeue more events. It is important that the
+ worker release the events and don't simply set them aside to retry the enqueue
+-again later, because the port has limited history list size (by default, twice
+-the port's dequeue_depth).
++again later, because the port has limited history list size (by default, same
++as port's dequeue_depth).
+ 
+ Priority
+ ~~~~~~~~
+@@ -309,18 +309,11 @@ scheduled. The likelihood of this case depends on the eventdev configuration,
+ traffic behavior, event processing latency, potential for a worker to be
+ interrupted or otherwise delayed, etc.
+ 
+-By default, the PMD allocates 16 buffer entries for each load-balanced queue,
+-which provides an even division across all 128 queues but potentially wastes
++By default, the PMD allocates 64 buffer entries for each load-balanced queue,
++which provides an even division across all 32 queues but potentially wastes
+ buffer space (e.g. if not all queues are used, or aren't used for atomic
+ scheduling).
+ 
+-The PMD provides a dev arg to override the default per-queue allocation. To
+-increase per-queue atomic-inflight allocation to (for example) 64:
+-
+-    .. code-block:: console
+-
+-       --allow ea:00.0,atm_inflights=64
+-
+ QID Depth Threshold
+ ~~~~~~~~~~~~~~~~~~~
+ 
+@@ -337,7 +330,7 @@ Per queue threshold metrics are tracked in the DLB xstats, and are also
+ returned in the impl_opaque field of each received event.
+ 
+ The per qid threshold can be specified as part of the device args, and
+-can be applied to all queue, a range of queues, or a single queue, as
++can be applied to all queues, a range of queues, or a single queue, as
+ shown below.
+ 
+     .. code-block:: console
+diff --git a/dpdk/doc/guides/eventdevs/features/dlb2.ini b/dpdk/doc/guides/eventdevs/features/dlb2.ini
+index 29747b1c26..48a2a18aff 100644
+--- a/dpdk/doc/guides/eventdevs/features/dlb2.ini
++++ b/dpdk/doc/guides/eventdevs/features/dlb2.ini
+@@ -4,12 +4,13 @@
+ ; Refer to default.ini for the full list of available PMD features.
+ ;
+ [Scheduling Features]
+-queue_qos                  = Y
+ event_qos                  = Y
+ distributed_sched          = Y
+ queue_all_types            = Y
+ burst_mode                 = Y
+ implicit_release_disable   = Y
++runtime_port_link          = Y
++multiple_queue_port        = Y
+ maintenance_free           = Y
+ 
+ [Eth Rx adapter Features]
+diff --git a/dpdk/doc/guides/gpus/features/cuda.ini b/dpdk/doc/guides/gpus/features/cuda.ini
+new file mode 100644
+index 0000000000..9d587eed6e
+--- /dev/null
++++ b/dpdk/doc/guides/gpus/features/cuda.ini
+@@ -0,0 +1,10 @@
++;
++; Supported features of the 'cuda' gpu driver.
++;
++; Refer to default.ini for the full list of available PMD features.
++;
++[Features]
++Get device info                = Y
++Share CPU memory with device   = Y
++Allocate device memory         = Y
++Free memory                    = Y
+diff --git a/dpdk/doc/guides/howto/img/virtio_user_for_container_networking.svg b/dpdk/doc/guides/howto/img/virtio_user_for_container_networking.svg
+index de80806649..dc9b318e7e 100644
+--- a/dpdk/doc/guides/howto/img/virtio_user_for_container_networking.svg
++++ b/dpdk/doc/guides/howto/img/virtio_user_for_container_networking.svg
+@@ -465,7 +465,7 @@
+        v:mID="63"
+        id="shape63-63"><title
+          id="title149">Sheet.63</title><desc
+-         id="desc151">Contanier/App</desc><v:textBlock
++         id="desc151">Container/App</desc><v:textBlock
+          v:margins="rect(4,4,4,4)" /><v:textRect
+          height="22.5"
+          width="90"
+diff --git a/dpdk/doc/guides/linux_gsg/enable_func.rst b/dpdk/doc/guides/linux_gsg/enable_func.rst
+index 7bd6b03f10..7538d04d97 100644
+--- a/dpdk/doc/guides/linux_gsg/enable_func.rst
++++ b/dpdk/doc/guides/linux_gsg/enable_func.rst
+@@ -1,6 +1,8 @@
+ ..  SPDX-License-Identifier: BSD-3-Clause
+     Copyright(c) 2010-2014 Intel Corporation.
+ 
++.. include:: <isonum.txt>
++
+ .. _Enabling_Additional_Functionality:
+ 
+ Enabling Additional Functionality
+@@ -64,13 +66,62 @@ The application can then determine what action to take, if any, if the HPET is n
+ Running DPDK Applications Without Root Privileges
+ -------------------------------------------------
+ 
+-In order to run DPDK as non-root, the following Linux filesystem objects'
+-permissions should be adjusted to ensure that the Linux account being used to
+-run the DPDK application has access to them:
++The following sections describe generic requirements and configuration
++for running DPDK applications as non-root.
++There may be additional requirements documented for some drivers.
++
++Hugepages
++~~~~~~~~~
++
++Hugepages must be reserved as root before running the application as non-root,
++for example::
++
++  sudo dpdk-hugepages.py --reserve 1G
++
++If multi-process is not required, running with ``--in-memory``
++bypasses the need to access hugepage mount point and files within it.
++Otherwise, hugepage directory must be made accessible
++for writing to the unprivileged user.
++A good way for managing multiple applications using hugepages
++is to mount the filesystem with group permissions
++and add a supplementary group to each application or container.
++
++One option is to mount manually::
++
++  mount -t hugetlbfs -o pagesize=1G,uid=`id -u`,gid=`id -g` nodev $HOME/huge-1G
++
++In production environment, the OS can manage mount points
++(`systemd example <https://github.com/systemd/systemd/blob/main/units/dev-hugepages.mount>`_).
++
++The ``hugetlb`` filesystem has additional options to guarantee or limit
++the amount of memory that is possible to allocate using the mount point.
++Refer to the `documentation <https://www.kernel.org/doc/Documentation/vm/hugetlbpage.txt>`_.
++
++.. note::
++
++   Using ``vfio-pci`` kernel driver, if applicable, can eliminate the need
++   for physical addresses and therefore eliminate the permission requirements
++   described below.
++
++If the driver requires using physical addresses (PA),
++the executable file must be granted additional capabilities:
++
++* ``SYS_ADMIN`` to read ``/proc/self/pagemaps``
++* ``IPC_LOCK`` to lock hugepages in memory
+ 
+-*   All directories which serve as hugepage mount points, for example, ``/dev/hugepages``
++.. code-block:: console
++
++   setcap cap_ipc_lock,cap_sys_admin+ep <executable>
++
++If physical addresses are not accessible,
++the following message will appear during EAL initialization::
++
++  EAL: rte_mem_virt2phy(): cannot open /proc/self/pagemap: Permission denied
+ 
+-*   If the HPET is to be used,  ``/dev/hpet``
++It is harmless in case PA are not needed.
++
++Resource Limits
++~~~~~~~~~~~~~~~
+ 
+ When running as non-root user, there may be some additional resource limits
+ that are imposed by the system. Specifically, the following resource limits may
+@@ -85,8 +136,10 @@ need to be adjusted in order to ensure normal DPDK operation:
+ The above limits can usually be adjusted by editing
+ ``/etc/security/limits.conf`` file, and rebooting.
+ 
+-Additionally, depending on which kernel driver is in use, the relevant
+-resources also should be accessible by the user running the DPDK application.
++Device Control
++~~~~~~~~~~~~~~
 +
-+#define INIT_MULTIVAR_EXP(VAR, MEMBER, POINTER, ITER_TYPE, ...)         \
-+    ITER_TYPE *ITER_VAR(VAR) = ( __VA_ARGS__ , (ITER_TYPE *) POINTER)
++If the HPET is to be used, ``/dev/hpet`` permissions must be adjusted.
+ 
+ For ``vfio-pci`` kernel driver, the following Linux file system objects'
+ permissions should be adjusted:
+@@ -96,38 +149,18 @@ permissions should be adjusted:
+ * The directories under ``/dev/vfio`` that correspond to IOMMU group numbers of
+   devices intended to be used by DPDK, for example, ``/dev/vfio/50``
+ 
+-.. note::
+-
+-    The instructions below will allow running DPDK with ``igb_uio`` or
+-    ``uio_pci_generic`` drivers as non-root with older Linux kernel versions.
+-    However, since version 4.0, the kernel does not allow unprivileged processes
+-    to read the physical address information from the pagemaps file, making it
+-    impossible for those processes to be used by non-privileged users. In such
+-    cases, using the VFIO driver is recommended.
+-
+-For ``igb_uio`` or ``uio_pci_generic`` kernel drivers, the following Linux file
+-system objects' permissions should be adjusted:
+-
+-*   The userspace-io device files in  ``/dev``, for example,  ``/dev/uio0``, ``/dev/uio1``, and so on
+-
+-*   The userspace-io sysfs config and resource files, for example for ``uio0``::
+-
+-       /sys/class/uio/uio0/device/config
+-       /sys/class/uio/uio0/device/resource*
+-
+-
+ Power Management and Power Saving Functionality
+ -----------------------------------------------
+ 
+-Enhanced Intel SpeedStep® Technology must be enabled in the platform BIOS if the power management feature of DPDK is to be used.
++Enhanced Intel SpeedStep\ |reg| Technology must be enabled in the platform BIOS if the power management feature of DPDK is to be used.
+ Otherwise, the sys file folder ``/sys/devices/system/cpu/cpu0/cpufreq`` will not exist, and the CPU frequency- based power management cannot be used.
+ Consult the relevant BIOS documentation to determine how these settings can be accessed.
+ 
+-For example, on some Intel reference platform BIOS variants, the path to Enhanced Intel SpeedStep® Technology is::
++For example, on some Intel reference platform BIOS variants, the path to Enhanced Intel SpeedStep\ |reg| Technology is::
+ 
+    Advanced
+      -> Processor Configuration
+-     -> Enhanced Intel SpeedStep® Tech
++     -> Enhanced Intel SpeedStep\ |reg| Tech
+ 
+ In addition, C3 and C6 should be enabled as well for power management. The path of C3 and C6 on the same platform BIOS is::
+ 
+diff --git a/dpdk/doc/guides/linux_gsg/linux_drivers.rst b/dpdk/doc/guides/linux_gsg/linux_drivers.rst
+index 2dd711bb37..75af2f01e1 100644
+--- a/dpdk/doc/guides/linux_gsg/linux_drivers.rst
++++ b/dpdk/doc/guides/linux_gsg/linux_drivers.rst
+@@ -3,6 +3,8 @@
+     Copyright 2017 Mellanox Technologies, Ltd
+     All rights reserved.
+ 
++.. include:: <isonum.txt>
++
+ .. _linux_gsg_linux_drivers:
+ 
+ Linux Drivers
+@@ -99,7 +101,7 @@ The token will be used for all PF and VF ports within the application.
+ 
+ To make use of full VFIO functionality,
+ both kernel and BIOS must support and be configured
+-to use IO virtualization (such as Intel® VT-d).
++to use IO virtualization (such as Intel\ |reg| VT-d).
+ 
+ .. note::
+ 
+@@ -172,6 +174,11 @@ It can be loaded as shown below:
+    sudo modprobe uio
+    sudo insmod igb_uio.ko
+ 
++.. note::
++
++    For some devices which lack support for legacy interrupts, e.g. virtual function
++    (VF) devices, the ``igb_uio`` module may be needed in place of ``uio_pci_generic``.
++
+ .. note::
+ 
+    If UEFI secure boot is enabled,
+@@ -335,7 +342,7 @@ Please refer to earlier sections on how to configure kernel parameters
+ correctly for your system.
+ 
+ If the kernel is configured correctly, one also has to make sure that
+-the BIOS configuration has virtualization features (such as Intel® VT-d).
++the BIOS configuration has virtualization features (such as Intel\ |reg| VT-d).
+ There is no standard way to check if the platform is configured correctly,
+ so please check with your platform documentation to see if it has such features,
+ and how to enable them.
+diff --git a/dpdk/doc/guides/linux_gsg/sys_reqs.rst b/dpdk/doc/guides/linux_gsg/sys_reqs.rst
+index d95a78d156..cfaa2db301 100644
+--- a/dpdk/doc/guides/linux_gsg/sys_reqs.rst
++++ b/dpdk/doc/guides/linux_gsg/sys_reqs.rst
+@@ -1,6 +1,8 @@
+ ..  SPDX-License-Identifier: BSD-3-Clause
+     Copyright(c) 2010-2014 Intel Corporation.
+ 
++.. include:: <isonum.txt>
++
+ System Requirements
+ ===================
+ 
+@@ -72,10 +74,10 @@ Compilation of the DPDK
+ 
+ **Optional Tools:**
+ 
+-*   Intel® C++ Compiler (icc). For installation, additional libraries may be required.
++*   Intel\ |reg| C++ Compiler (icc). For installation, additional libraries may be required.
+     See the icc Installation Guide found in the Documentation directory under the compiler installation.
+ 
+-*   IBM® Advance ToolChain for Powerlinux. This is a set of open source development tools and runtime libraries
++*   IBM\ |reg| Advance ToolChain for Powerlinux. This is a set of open source development tools and runtime libraries
+     which allows users to take leading edge advantage of IBM's latest POWER hardware features on Linux. To install
+     it, see the IBM official installation document.
+ 
+diff --git a/dpdk/doc/guides/nics/af_packet.rst b/dpdk/doc/guides/nics/af_packet.rst
+index 8292369141..66b977e1a2 100644
+--- a/dpdk/doc/guides/nics/af_packet.rst
++++ b/dpdk/doc/guides/nics/af_packet.rst
+@@ -9,7 +9,7 @@ packets. This Linux-specific PMD binds to an AF_PACKET socket and allows
+ a DPDK application to send and receive raw packets through the Kernel.
+ 
+ In order to improve Rx and Tx performance this implementation makes use of
+-PACKET_MMAP, which provides a mmap'ed ring buffer, shared between user space
++PACKET_MMAP, which provides a mmapped ring buffer, shared between user space
+ and kernel, that's used to send and receive packets. This helps reducing system
+ calls and the copies needed between user space and Kernel.
+ 
+diff --git a/dpdk/doc/guides/nics/af_xdp.rst b/dpdk/doc/guides/nics/af_xdp.rst
+index c9d0e1ad6c..db02ea1984 100644
+--- a/dpdk/doc/guides/nics/af_xdp.rst
++++ b/dpdk/doc/guides/nics/af_xdp.rst
+@@ -43,9 +43,7 @@ Prerequisites
+ This is a Linux-specific PMD, thus the following prerequisites apply:
+ 
+ *  A Linux Kernel (version > v4.18) with XDP sockets configuration enabled;
+-*  libbpf (within kernel version > v5.1-rc4) with latest af_xdp support installed,
+-   User can install libbpf via `make install_lib` && `make install_headers` in
+-   <kernel src tree>/tools/lib/bpf;
++*  Both libxdp >=v1.2.2 and libbpf libraries installed, or, libbpf <=v0.6.0
+ *  A Kernel bound interface to attach to;
+ *  For need_wakeup feature, it requires kernel version later than v5.3-rc1;
+ *  For PMD zero copy, it requires kernel version later than v5.4-rc1;
+@@ -143,4 +141,4 @@ Limitations
+   NAPI context from a watchdog timer instead of from softirqs. More information
+   on this feature can be found at [1].
+ 
+-  [1] https://lwn.net/Articles/837010/
+\ No newline at end of file
++  [1] https://lwn.net/Articles/837010/
+diff --git a/dpdk/doc/guides/nics/features/ice_dcf.ini b/dpdk/doc/guides/nics/features/ice_dcf.ini
+index 4d6fb6d849..54073f0b88 100644
+--- a/dpdk/doc/guides/nics/features/ice_dcf.ini
++++ b/dpdk/doc/guides/nics/features/ice_dcf.ini
+@@ -3,6 +3,9 @@
+ ;
+ ; Refer to default.ini for the full list of available PMD features.
+ ;
++; A feature with "P" indicates only be supported when non-vector path
++; is selected.
++;
+ [Features]
+ Queue start/stop     = Y
+ Scattered Rx         = Y
+@@ -10,6 +13,8 @@ RSS hash             = P
+ CRC offload          = Y
+ L3 checksum offload  = P
+ L4 checksum offload  = P
++Inner L3 checksum    = P
++Inner L4 checksum    = P
+ Basic stats          = Y
+ Linux                = Y
+ x86-32               = Y
+diff --git a/dpdk/doc/guides/nics/hns3.rst b/dpdk/doc/guides/nics/hns3.rst
+index 5f68a10ecf..791c9cc2ed 100644
+--- a/dpdk/doc/guides/nics/hns3.rst
++++ b/dpdk/doc/guides/nics/hns3.rst
+@@ -290,5 +290,10 @@ Currently, we only support VF device driven by DPDK driver when PF is driven
+ by kernel mode hns3 ethdev driver. VF is not supported when PF is driven by
+ DPDK driver.
+ 
++For sake of Rx/Tx performance, IEEE 1588 is not supported when using vec or
++sve burst function. When enabling IEEE 1588, Rx/Tx burst mode should be
++simple or common. It is recommended that enable IEEE 1588 before ethdev
++start. In this way, the correct Rx/Tx burst function can be selected.
++
+ Build with ICC is not supported yet.
+ X86-32, Power8, ARMv7 and BSD are not supported yet.
+diff --git a/dpdk/doc/guides/nics/i40e.rst b/dpdk/doc/guides/nics/i40e.rst
+index ef91b3a1ac..aedb1afc4b 100644
+--- a/dpdk/doc/guides/nics/i40e.rst
++++ b/dpdk/doc/guides/nics/i40e.rst
+@@ -101,6 +101,14 @@ For X710/XL710/XXV710,
+    +--------------+-----------------------+------------------+
+    | DPDK version | Kernel driver version | Firmware version |
+    +==============+=======================+==================+
++   |    22.03     |         2.17.15       |       8.30       |
++   +--------------+-----------------------+------------------+
++   |    21.11     |         2.17.4        |       8.30       |
++   +--------------+-----------------------+------------------+
++   |    21.08     |         2.15.9        |       8.30       |
++   +--------------+-----------------------+------------------+
++   |    21.05     |         2.15.9        |       8.30       |
++   +--------------+-----------------------+------------------+
+    |    21.02     |         2.14.13       |       8.00       |
+    +--------------+-----------------------+------------------+
+    |    20.11     |         2.14.13       |       8.00       |
+@@ -148,6 +156,14 @@ For X722,
+    +--------------+-----------------------+------------------+
+    | DPDK version | Kernel driver version | Firmware version |
+    +==============+=======================+==================+
++   |    22.03     |         2.17.15       |       5.50       |
++   +--------------+-----------------------+------------------+
++   |    21.11     |         2.17.4        |       5.30       |
++   +--------------+-----------------------+------------------+
++   |    21.08     |         2.15.9        |       5.30       |
++   +--------------+-----------------------+------------------+
++   |    21.05     |         2.15.9        |       5.30       |
++   +--------------+-----------------------+------------------+
+    |    21.02     |         2.14.13       |       5.00       |
+    +--------------+-----------------------+------------------+
+    |    20.11     |         2.13.10       |       5.00       |
+@@ -771,6 +787,13 @@ it will fail and return the info "Conflict with the first rule's input set",
+ which means the current rule's input set conflicts with the first rule's.
+ Remove the first rule if want to change the input set of the PCTYPE.
+ 
++PF reset fail after QinQ set with FW >= 8.4
++~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++
++If upgrade FW to version 8.4 and higher, after set MAC VLAN filter and configure outer VLAN on PF, kill
++DPDK process will cause the card crash.
++
++
+ Example of getting best performance with l3fwd example
+ ------------------------------------------------------
+ 
+diff --git a/dpdk/doc/guides/nics/ice.rst b/dpdk/doc/guides/nics/ice.rst
+index f95fef8cf0..6b903b9bbc 100644
+--- a/dpdk/doc/guides/nics/ice.rst
++++ b/dpdk/doc/guides/nics/ice.rst
+@@ -58,6 +58,12 @@ The detailed information can refer to chapter Tested Platforms/Tested NICs in re
+    +-----------+---------------+-----------------+-----------+--------------+-----------+
+    |    21.05  |     1.6.5     |      1.3.26     |  1.3.30   |    1.3.6     |    3.0    |
+    +-----------+---------------+-----------------+-----------+--------------+-----------+
++   |    21.08  |     1.7.16    |      1.3.27     |  1.3.31   |    1.3.7     |    3.1    |
++   +-----------+---------------+-----------------+-----------+--------------+-----------+
++   |    21.11  |     1.7.16    |      1.3.27     |  1.3.31   |    1.3.7     |    3.1    |
++   +-----------+---------------+-----------------+-----------+--------------+-----------+
++   |    22.03  |     1.8.3     |      1.3.28     |  1.3.35   |    1.3.8     |    3.2    |
++   +-----------+---------------+-----------------+-----------+--------------+-----------+
+ 
+ Pre-Installation Configuration
+ ------------------------------
+diff --git a/dpdk/doc/guides/nics/ixgbe.rst b/dpdk/doc/guides/nics/ixgbe.rst
+index 82fa453fa2..ad1a3da610 100644
+--- a/dpdk/doc/guides/nics/ixgbe.rst
++++ b/dpdk/doc/guides/nics/ixgbe.rst
+@@ -101,6 +101,23 @@ To guarantee the constraint, capabilities in dev_conf.rxmode.offloads will be ch
+ 
+ fdir_conf->mode will also be checked.
+ 
++Disable SDP3 TX_DISABLE for Fiber Links
++^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
++
++The following ``devargs`` option can be enabled at runtime.  It must
++be passed as part of EAL arguments. For example,
++
++.. code-block:: console
++
++   dpdk-testpmd -a fiber_sdp3_no_tx_disable=1 -- -i
++
++- ``fiber_sdp3_no_tx_disable`` (default **0**)
++
++  Not all IXGBE implementations with SFP cages use the SDP3 signal as
++  TX_DISABLE as a means to disable the laser on fiber SFP modules.
++  This option informs the driver that in this case, SDP3 is not to be
++  used as a check for link up by testing for laser on/off.
++
+ VF Runtime Options
+ ^^^^^^^^^^^^^^^^^^
+ 
+diff --git a/dpdk/doc/guides/nics/kni.rst b/dpdk/doc/guides/nics/kni.rst
+index 37c5411a32..2a23bb3f3b 100644
+--- a/dpdk/doc/guides/nics/kni.rst
++++ b/dpdk/doc/guides/nics/kni.rst
+@@ -33,7 +33,7 @@ Usage
+ 
+ EAL ``--vdev`` argument can be used to create KNI device instance, like::
+ 
+-        dpdk-testpmd --vdev=net_kni0 --vdev=net_kn1 -- -i
++        dpdk-testpmd --vdev=net_kni0 --vdev=net_kni1 -- -i
+ 
+ Above command will create ``kni0`` and ``kni1`` Linux network interfaces,
+ those interfaces can be controlled by standard Linux tools.
+diff --git a/dpdk/doc/guides/nics/mlx4.rst b/dpdk/doc/guides/nics/mlx4.rst
+index a25add7c47..66493a1157 100644
+--- a/dpdk/doc/guides/nics/mlx4.rst
++++ b/dpdk/doc/guides/nics/mlx4.rst
+@@ -14,7 +14,7 @@ the `Mellanox website <http://www.mellanox.com>`_. Help is also provided by
+ the `Mellanox community <http://community.mellanox.com/welcome>`_.
+ 
+ There is also a `section dedicated to this poll mode driver
+-<http://www.mellanox.com/page/products_dyn?product_family=209&mtag=pmd_for_dpdk>`_.
++<https://developer.nvidia.com/networking/dpdk>`_.
+ 
+ 
+ Implementation details
+@@ -178,7 +178,7 @@ DPDK and must be installed separately:
+ 
+   - mlx4_core: hardware driver managing Mellanox ConnectX-3 devices.
+   - mlx4_en: Ethernet device driver that provides kernel network interfaces.
+-  - mlx4_ib: InifiniBand device driver.
++  - mlx4_ib: InfiniBand device driver.
+   - ib_uverbs: user space driver for verbs (entry point for libibverbs).
+ 
+ - **Firmware update**
+@@ -219,7 +219,7 @@ Mellanox OFED as a fallback
+ - `Mellanox OFED`_ version: **4.4, 4.5, 4.6**.
+ - firmware version: **2.42.5000** and above.
+ 
+-.. _`Mellanox OFED`: http://www.mellanox.com/page/products_dyn?product_family=26&mtag=linux_sw_drivers
++.. _`Mellanox OFED`: https://network.nvidia.com/products/infiniband-drivers/linux/mlnx_ofed/
+ 
+ .. note::
+ 
+diff --git a/dpdk/doc/guides/nics/mlx5.rst b/dpdk/doc/guides/nics/mlx5.rst
+index feb2e57cee..ce40d1cdac 100644
+--- a/dpdk/doc/guides/nics/mlx5.rst
++++ b/dpdk/doc/guides/nics/mlx5.rst
+@@ -19,7 +19,7 @@ Information and documentation about these adapters can be found on the
+ `Mellanox community <http://community.mellanox.com/welcome>`__.
+ 
+ There is also a `section dedicated to this poll mode driver
+-<http://www.mellanox.com/page/products_dyn?product_family=209&mtag=pmd_for_dpdk>`__.
++<https://developer.nvidia.com/networking/dpdk>`_.
+ 
+ 
+ Design
+@@ -250,7 +250,7 @@ Limitations
+ 
+ - Flex item:
+ 
+-  - Hardware support: BlueField 2.
++  - Hardware support: BlueField-2.
+   - Flex item is supported on PF only.
+   - Hardware limits ``header_length_mask_width`` up to 6 bits.
+   - Firmware supports 8 global sample fields.
+@@ -360,6 +360,12 @@ Limitations
+   - can be applied to VF ports only.
+   - must specify PF port action (packet redirection from VF to PF).
+ 
++- E-Switch Manager matching:
 +
-+/* Multi-variable condition.
-+ * Evaluates the condition expression (that must be based on the internal
-+ * iterator variable). Only if the result of expression is true, the OBJECT is
-+ * set to the object containing the current value of the iterator variable.
-+ *
-+ * It is up to the caller to make sure it is safe to run OBJECT_CONTAINING on
-+ * the pointers that verify the condition.
-+ */
-+#define CONDITION_MULTIVAR(VAR, MEMBER, EXPR)                                 \
-+    ((EXPR) ?                                                                 \
-+     (((VAR) = OBJECT_CONTAINING(ITER_VAR(VAR), VAR, MEMBER)), 1) :           \
-+     (((VAR) = NULL), 0))
++  - For Bluefield with old FW
++    which doesn't expose the E-Switch Manager vport ID in the capability,
++    matching E-Switch Manager should be used only in Bluefield embedded CPU mode.
 +
-+/* Multi-variable update.
-+ * Sets the iterator value to NEXT_ITER.
-+ */
-+#define UPDATE_MULTIVAR(VAR, NEXT_ITER)                                       \
-+    (ITER_VAR(VAR) = NEXT_ITER)
+ - Raw encapsulation:
+ 
+   - The input buffer, used as outer header, is not validated.
+@@ -420,6 +426,8 @@ Limitations
+     sample actions list.
+   - For E-Switch mirroring flow, supports ``RAW ENCAP``, ``Port ID``,
+     ``VXLAN ENCAP``, ``NVGRE ENCAP`` in the sample actions list.
++  - For ConnectX-5 trusted device, the application metadata with SET_TAG index 0
++    is not supported before ``RTE_FLOW_ACTION_TYPE_SAMPLE`` action.
+ 
+ - Modify Field flow:
+ 
+@@ -428,6 +436,12 @@ Limitations
+   - Modification of the 802.1Q Tag, VXLAN Network or GENEVE Network ID's is not supported.
+   - Encapsulation levels are not supported, can modify outermost header fields only.
+   - Offsets must be 32-bits aligned, cannot skip past the boundary of a field.
++  - If the field type is ``RTE_FLOW_FIELD_MAC_TYPE``
++    and packet contains one or more VLAN headers,
++    the meaningful type field following the last VLAN header
++    is used as modify field operation argument.
++    The modify field action is not intended to modify VLAN headers type field,
++    dedicated VLAN push and pop actions should be used instead.
+ 
+ - IPv6 header item 'proto' field, indicating the next header protocol, should
+   not be set as extension header.
+@@ -462,17 +476,18 @@ Limitations
+ 
+ - Integrity:
+ 
+-  - Integrity offload is enabled for **ConnectX-6** family.
++  - Integrity offload is enabled starting from **ConnectX-6 Dx**.
+   - Verification bits provided by the hardware are ``l3_ok``, ``ipv4_csum_ok``, ``l4_ok``, ``l4_csum_ok``.
+   - ``level`` value 0 references outer headers.
++  - Negative integrity item verification is not supported.
+   - Multiple integrity items not supported in a single flow rule.
+   - Flow rule items supplied by application must explicitly specify network headers referred by integrity item.
+     For example, if integrity item mask sets ``l4_ok`` or ``l4_csum_ok`` bits, reference to L4 network header,
+     TCP or UDP, must be in the rule pattern as well::
+ 
+       flow create 0 ingress pattern integrity level is 0 value mask l3_ok value spec l3_ok / eth / ipv6 / end …
+-      or
+-      flow create 0 ingress pattern integrity level is 0 value mask l4_ok value spec 0 / eth / ipv4 proto is udp / end …
 +
-+/* In the safe version of the multi-variable container iteration, the next
-+ * value of the iterator is precalculated on the condition expression.
-+ * This allows for the iterator to be freed inside the loop.
-+ *
-+ * Two versions of the macros are provided:
-+ *
-+ * * In the _SHORT version, the user does not have to provide a variable to
-+ * store the next value of the iterator. Instead, a second iterator variable
-+ * is declared in the INIT_ macro and its name is determined by
-+ * ITER_NEXT_VAR(OBJECT).
-+ *
-+ * * In the _LONG version, the user provides another variable of the same type
-+ * as the iterator object variable to store the next containing object.
-+ * We still declare an iterator variable inside the loop but in this case it's
-+ * name is derived from the name of the next containing variable.
-+ * The value of the next containing object will only be set
-+ * (via OBJECT_CONTAINING) if an additional condition is statisfied. This
-+ * second condition must ensure it is safe to call OBJECT_CONTAINING on the
-+ * next iterator variable.
-+ * With respect to the value of the next containing object:
-+ *  - Inside of the loop: the variable is either NULL or safe to use.
-+ *  - Outside of the loop: the variable is NULL if the loop ends normally.
-+ *     If the loop ends with a "break;" statement, rules of Inside the loop
-+ *     apply.
-+ */
-+#define ITER_NEXT_VAR(NAME) NAME ## __iterator__next__
++      flow create 0 ingress pattern integrity level is 0 value mask l4_ok value spec l4_ok / eth / ipv4 proto is udp / end …
+ 
+ - Connection tracking:
+ 
+@@ -508,6 +523,8 @@ Limitations
+     from the reference "Clock Queue" completions,
+     the scheduled send timestamps should not be specified with non-zero MSB.
+ 
++- The NIC egress flow rules on representor port are not supported.
++
+ Statistics
+ ----------
+ 
+@@ -554,15 +571,6 @@ Environment variables
+   The register would be flushed to HW usually when the write-combining buffer
+   becomes full, but it depends on CPU design.
+ 
+-  Except for vectorized Tx burst routines, a write memory barrier is enforced
+-  after updating the register so that the update can be immediately visible to
+-  HW.
+-
+-  When vectorized Tx burst is called, the barrier is set only if the burst size
+-  is not aligned to MLX5_VPMD_TX_MAX_BURST. However, setting this environmental
+-  variable will bring better latency even though the maximum throughput can
+-  slightly decline.
+-
+ Run-time configuration
+ ~~~~~~~~~~~~~~~~~~~~~~
+ 
+@@ -649,7 +657,7 @@ Driver options
+ 
+   A timeout value is set in the driver to control the waiting time before
+   dropping a packet. Once the timer is expired, the delay drop will be
+-  deactivated for all the Rx queues with this feature enable. To re-activeate
++  deactivated for all the Rx queues with this feature enable. To re-activate
+   it, a rearming is needed and it is part of the kernel driver starting from
+   OFED 5.5.
+ 
+@@ -1033,7 +1041,7 @@ Driver options
+ 
+   For the MARK action the last 16 values in the full range are reserved for
+   internal PMD purposes (to emulate FLAG action). The valid range for the
+-  MARK action values is 0-0xFFEF for the 16-bit mode and 0-xFFFFEF
++  MARK action values is 0-0xFFEF for the 16-bit mode and 0-0xFFFFEF
+   for the 24-bit mode, the flows with the MARK action value outside
+   the specified range will be rejected.
+ 
+@@ -1317,7 +1325,7 @@ DPDK and must be installed separately:
+   - mlx5_core: hardware driver managing Mellanox
+     ConnectX-4/ConnectX-5/ConnectX-6/BlueField devices and related Ethernet kernel
+     network devices.
+-  - mlx5_ib: InifiniBand device driver.
++  - mlx5_ib: InfiniBand device driver.
+   - ib_uverbs: user space driver for Verbs (entry point for libibverbs).
+ 
+ - **Firmware update**
+@@ -1382,9 +1390,9 @@ managers on most distributions, this PMD requires Ethernet extensions that
+ may not be supported at the moment (this is a work in progress).
+ 
+ `Mellanox OFED
+-<http://www.mellanox.com/page/products_dyn?product_family=26&mtag=linux>`__ and
++<https://network.nvidia.com/products/infiniband-drivers/linux/mlnx_ofed/>`__ and
+ `Mellanox EN
+-<http://www.mellanox.com/page/products_dyn?product_family=27&mtag=linux>`__
++<https://network.nvidia.com/products/ethernet-drivers/linux/mlnx_en/>`__
+ include the necessary support and should be used in the meantime. For DPDK,
+ only libibverbs, libmlx5, mlnx-ofed-kernel packages and firmware updates are
+ required from that distribution.
+diff --git a/dpdk/doc/guides/prog_guide/bpf_lib.rst b/dpdk/doc/guides/prog_guide/bpf_lib.rst
+index 1feb7734a3..1cf2d59429 100644
+--- a/dpdk/doc/guides/prog_guide/bpf_lib.rst
++++ b/dpdk/doc/guides/prog_guide/bpf_lib.rst
+@@ -10,7 +10,7 @@ user-space dpdk application.
+ 
+ It supports basic set of features from eBPF spec.
+ Please refer to the
+-`eBPF spec <https://www.kernel.org/doc/Documentation/networking/filter.txt>`
++`eBPF spec <https://www.kernel.org/doc/Documentation/networking/filter.txt>`_
+ for more information.
+ Also it introduces basic framework to load/unload BPF-based filters
+ on eth devices (right now only via SW RX/TX callbacks).
+@@ -48,9 +48,9 @@ For example, ``(BPF_IND | BPF_W | BPF_LD)`` means:
+ .. code-block:: c
+ 
+     uint32_t tmp;
+-    R0 = rte_pktmbuf_read((const struct rte_mbuf *)R6,  src_reg + imm32,
+-	sizeof(tmp), &tmp);
+-    if (R0 == NULL) return FAILED;
++    R0 = rte_pktmbuf_read((const struct rte_mbuf *)R6,  src_reg + imm32, sizeof(tmp), &tmp);
++    if (R0 == NULL)
++        return FAILED;
+     R0 = ntohl(*(uint32_t *)R0);
+ 
+ and ``R1-R5`` were scratched.
+diff --git a/dpdk/doc/guides/prog_guide/compressdev.rst b/dpdk/doc/guides/prog_guide/compressdev.rst
+index 07d1a62a63..2a59c434c1 100644
+--- a/dpdk/doc/guides/prog_guide/compressdev.rst
++++ b/dpdk/doc/guides/prog_guide/compressdev.rst
+@@ -2,7 +2,7 @@
+     Copyright(c) 2017-2018 Cavium Networks.
+ 
+ Compression Device Library
+-===========================
++==========================
+ 
+ The compression framework provides a generic set of APIs to perform compression services
+ as well as to query and configure compression devices both physical(hardware) and virtual(software)
+@@ -32,10 +32,10 @@ From the command line using the --vdev EAL option
+ 
+ .. Note::
+ 
+-   * If DPDK application requires multiple software compression PMD devices then required
+-     number of ``--vdev`` with appropriate libraries are to be added.
++   * If a DPDK application requires multiple software compression PMD devices then the
++     required number of ``--vdev`` args with appropriate libraries are to be added.
+ 
+-   * An Application with multiple compression device instances exposed by the same PMD must
++   * An application with multiple compression device instances exposed by the same PMD must
+      specify a unique name for each device.
+ 
+    Example: ``--vdev  'pmd0' --vdev  'pmd1'``
+@@ -53,7 +53,7 @@ All virtual compression devices support the following initialization parameters:
+ Device Identification
+ ~~~~~~~~~~~~~~~~~~~~~
+ 
+-Each device, whether virtual or physical is uniquely designated by two
++Each device, whether virtual or physical, is uniquely designated by two
+ identifiers:
+ 
+ - A unique device index used to designate the compression device in all functions
+@@ -76,7 +76,7 @@ The ``rte_compressdev_configure`` API is used to configure a compression device.
+ The ``rte_compressdev_config`` structure is used to pass the configuration
+ parameters.
+ 
+-See *DPDK API Reference* for details.
++See the `DPDK API Reference <https://doc.dpdk.org/api/rte__compressdev_8h.html>`_ for details.
+ 
+ Configuration of Queue Pairs
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+@@ -85,87 +85,88 @@ Each compression device queue pair is individually configured through the
+ ``rte_compressdev_queue_pair_setup`` API.
+ 
+ The ``max_inflight_ops`` is used to pass maximum number of
+-rte_comp_op that could be present in a queue at-a-time.
+-PMD then can allocate resources accordingly on a specified socket.
++``rte_comp_op`` that could be present in a queue at a time.
++The PMD can then allocate resources accordingly on a specified socket.
+ 
+-See *DPDK API Reference* for details.
++See the `DPDK API Reference <https://doc.dpdk.org/api/rte__compressdev_8h.html>`_ for details.
+ 
+-Logical Cores, Memory and Queues Pair Relationships
+-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++Logical Cores, Memory and Queue Pair Relationships
++~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ 
+-Library supports NUMA similarly as described in Cryptodev library section.
++The Compressdev library supports NUMA similarly as described in Cryptodev library section.
+ 
+-A queue pair cannot be shared and should be exclusively used by a single processing
+-context for enqueuing operations or dequeuing operations on the same compression device
++A queue pair cannot be shared, and should be exclusively used by a single processing
++context for enqueuing operations or dequeuing operations on the same compression device,
+ since sharing would require global locks and hinder performance. It is however possible
+ to use a different logical core to dequeue an operation on a queue pair from the logical
+-core on which it was enqueued. This means that a compression burst enqueue/dequeue
++core on which it was enqueued. This means that for a compression burst, enqueue/dequeue
+ APIs are a logical place to transition from one logical core to another in a
+ data processing pipeline.
+ 
+ Device Features and Capabilities
+----------------------------------
++--------------------------------
+ 
+ Compression devices define their functionality through two mechanisms, global device
+-features and algorithm features. Global devices features identify device
+-wide level features which are applicable to the whole device such as supported hardware
++features and algorithm features. Global device features identify device
++wide level features which are applicable to the whole device, such as supported hardware
+ acceleration and CPU features. List of compression device features can be seen in the
+ RTE_COMPDEV_FF_XXX macros.
+ 
+-The algorithm features lists individual algo feature which device supports per-algorithm,
+-such as a stateful compression/decompression, checksums operation etc. List of algorithm
+-features can be seen in the RTE_COMP_FF_XXX macros.
++The algorithm features are features which the device supports per-algorithm,
++such as a stateful compression/decompression, checksums operation etc.
++The list of algorithm features can be seen in the RTE_COMP_FF_XXX macros.
+ 
+ Capabilities
+ ~~~~~~~~~~~~
+ Each PMD has a list of capabilities, including algorithms listed in
+-enum ``rte_comp_algorithm`` and its associated feature flag and
+-sliding window range in log base 2 value. Sliding window tells
+-the minimum and maximum size of lookup window that algorithm uses
++the enum ``rte_comp_algorithm``, its associated feature flag, and
++sliding window range in log base 2 value. The sliding window range
++defines the minimum and maximum size of a lookup window that an algorithm uses
+ to find duplicates.
+ 
+-See *DPDK API Reference* for details.
++See the `DPDK API Reference <https://doc.dpdk.org/api/rte__compressdev_8h.html>`_ for details.
+ 
+ Each Compression poll mode driver defines its array of capabilities
+-for each algorithm it supports. See PMD implementation for capability
++for each algorithm it supports. See the PMD implementation for capability
+ initialization.
+ 
+ Capabilities Discovery
+ ~~~~~~~~~~~~~~~~~~~~~~
+ 
+-PMD capability and features are discovered via ``rte_compressdev_info_get`` function.
++PMD capability and features are discovered via the ``rte_compressdev_info_get`` function.
+ 
+ The ``rte_compressdev_info`` structure contains all the relevant information for the device.
+ 
+-See *DPDK API Reference* for details.
++See the `DPDK API Reference <https://doc.dpdk.org/api/rte__compressdev_8h.html>`_ for details.
+ 
+ Compression Operation
+-----------------------
++---------------------
+ 
+ DPDK compression supports two types of compression methodologies:
+ 
+-- Stateless, data associated to a compression operation is compressed without any reference
++- Stateless - data associated with a compression operation is compressed without any reference
+   to another compression operation.
+ 
+-- Stateful, data in each compression operation is compressed with reference to previous compression
++- Stateful - data in each compression operation is compressed with reference to previous compression
+   operations in the same data stream i.e. history of data is maintained between the operations.
+ 
+-For more explanation, please refer RFC https://www.ietf.org/rfc/rfc1951.txt
++For more explanation, please refer to the RFC https://www.ietf.org/rfc/rfc1951.txt
+ 
+ Operation Representation
+ ~~~~~~~~~~~~~~~~~~~~~~~~
+ 
+-Compression operation is described via ``struct rte_comp_op``, which contains both input and
++A compression operation is described via ``struct rte_comp_op``, which contains both input and
+ output data. The operation structure includes the operation type (stateless or stateful),
+-the operation status and the priv_xform/stream handle, source, destination and checksum buffer
++the operation status, the priv_xform/stream handle, source, destination and checksum buffer
+ pointers. It also contains the source mempool from which the operation is allocated.
+-PMD updates consumed field with amount of data read from source buffer and produced
+-field with amount of data of written into destination buffer along with status of
+-operation. See section *Produced, Consumed And Operation Status* for more details.
+-
+-Compression operations mempool also has an ability to allocate private memory with the
+-operation for application's purposes. Application software is responsible for specifying
+-all the operation specific fields in the ``rte_comp_op`` structure which are then used
++The PMD updates the consumed field with the amount of data read from the source buffer,
++and the produced field with the amount of data written into the destination buffer,
++along with status of operation.
++See the section :ref:`compressdev_prod_cons_op_status`: for more details.
++
++The compression operations mempool also has the ability to allocate private memory with the
++operation for the application's use. The application software is responsible for specifying
++all the operation specific fields in the ``rte_comp_op`` structure, which are then used
+ by the compression PMD to process the requested operation.
+ 
+ 
+@@ -181,27 +182,27 @@ A ``rte_comp_op`` contains a field indicating the pool it originated from.
+ 
+ ``rte_comp_op_alloc()`` and ``rte_comp_op_bulk_alloc()`` are used to allocate
+ compression operations from a given compression operation mempool.
+-The operation gets reset before being returned to a user so that operation
++The operation gets reset before being returned to a user so that the operation
+ is always in a good known state before use by the application.
+ 
+ ``rte_comp_op_free()`` is called by the application to return an operation to
+ its allocating pool.
+ 
+-See *DPDK API Reference* for details.
++See the `DPDK API Reference <https://doc.dpdk.org/api/rte__compressdev_8h.html>`_ for details.
+ 
+ Passing source data as mbuf-chain
+-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ If input data is scattered across several different buffers, then
+-Application can either parse through all such buffers and make one
++the application can either parse through all such buffers and make one
+ mbuf-chain and enqueue it for processing or, alternatively, it can
+-make multiple sequential enqueue_burst() calls for each of them
+-processing them statefully. See *Compression API Stateful Operation*
++make multiple sequential enqueue_burst() calls for each of them,
++processing them statefully. See :ref:`compressdev_stateful_op`:
+ for stateful processing of ops.
+ 
+ Operation Status
+ ~~~~~~~~~~~~~~~~
+-Each operation carries a status information updated by PMD after it is processed.
+-Following are currently supported:
++Each operation carries status information updated by the PMD after it is processed.
++The following are currently supported:
+ 
+ - RTE_COMP_OP_STATUS_SUCCESS,
+     Operation is successfully completed
+@@ -225,22 +226,25 @@ Following are currently supported:
+ - RTE_COMP_OP_STATUS_OUT_OF_SPACE_RECOVERABLE,
+     Output buffer ran out of space before operation completed, but this
+     is not an error case. Output data up to op.produced can be used and
+-    next op in the stream should continue on from op.consumed+1.
++    the next op in the stream should continue on from op.consumed+1.
+ 
+ Operation status after enqueue / dequeue
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ Some of the above values may arise in the op after an
+-``rte_compressdev_enqueue_burst()``. If number ops enqueued < number ops requested then
+-the app should check the op.status of nb_enqd+1. If status is RTE_COMP_OP_STATUS_NOT_PROCESSED,
+-it likely indicates a full-queue case for a hardware device and a retry after dequeuing some ops is likely
+-to be successful. If the op holds any other status, e.g. RTE_COMP_OP_STATUS_INVALID_ARGS, a retry with
++``rte_compressdev_enqueue_burst()``. If the number of ops enqueued < the number of ops requested
++then the app should check the op.status of nb_enqd+1.
++If the status is RTE_COMP_OP_STATUS_NOT_PROCESSED, it likely indicates a full-queue case for a
++hardware device, and a retry after dequeuing some ops is likely to be successful.
++If the op holds any other status, e.g. RTE_COMP_OP_STATUS_INVALID_ARGS, a retry with
+ the same op is unlikely to be successful.
+ 
+ 
++.. _compressdev_prod_cons_op_status:
++
+ Produced, Consumed And Operation Status
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ 
+-- If status is RTE_COMP_OP_STATUS_SUCCESS,
++- If the status is RTE_COMP_OP_STATUS_SUCCESS,
+     consumed = amount of data read from input buffer, and
+     produced = amount of data written in destination buffer
+ - If status is RTE_COMP_OP_STATUS_ERROR,
+@@ -253,37 +257,37 @@ Produced, Consumed And Operation Status
+ - If status is RTE_COMP_OP_STATUS_OUT_OF_SPACE_RECOVERABLE,
+     consumed = amount of data read, and
+     produced = amount of data successfully produced until
+-    out of space condition hit. PMD has ability to recover
+-    from here, so application can submit next op from
+-    consumed+1 and a destination buffer with available space.
++    out of space condition hit. The PMD has ability to recover
++    from here, so an application can submit the next op from
++    consumed+1, and a destination buffer with available space.
+ 
+ Transforms
+ ----------
+ 
+ Compression transforms (``rte_comp_xform``) are the mechanism
+ to specify the details of the compression operation such as algorithm,
+-window size and checksum.
++window size, and checksum.
+ 
+ Compression API Hash support
+ ----------------------------
+ 
+-Compression API allows application to enable digest calculation
++The compression API allows an application to enable digest calculation
+ alongside compression and decompression of data. A PMD reflects its
+ support for hash algorithms via capability algo feature flags.
+-If supported, PMD calculates digest always on plaintext i.e.
++If supported, the PMD always calculates the digest on plaintext i.e.
+ before compression and after decompression.
+ 
+ Currently supported list of hash algos are SHA-1 and SHA2 family
+ SHA256.
+ 
+-See *DPDK API Reference* for details.
++See the `DPDK API Reference <https://doc.dpdk.org/api/rte__compressdev_8h.html>`_ for details.
+ 
+-If required, application should set valid hash algo in compress
++If required, the application should set the valid hash algo in compress
+ or decompress xforms during ``rte_compressdev_stream_create()``
+-or ``rte_compressdev_private_xform_create()`` and pass a valid
++or ``rte_compressdev_private_xform_create()``, and pass a valid
+ output buffer in ``rte_comp_op`` hash field struct to store the
+-resulting digest. Buffer passed should be contiguous and large
+-enough to store digest which is 20 bytes for SHA-1 and
++resulting digest. The buffer passed should be contiguous and large
++enough to store digest, which is 20 bytes for SHA-1 and
+ 32 bytes for SHA2-256.
+ 
+ Compression API Stateless operation
+@@ -295,20 +299,21 @@ An op is processed stateless if it has
+ (required only on compression side),
+ - All required input in source buffer
+ 
+-When all of the above conditions are met, PMD initiates stateless processing
++When all of the above conditions are met, the PMD initiates stateless processing
+ and releases acquired resources after processing of current operation is
+-complete. Application can enqueue multiple stateless ops in a single burst
++complete. The application can enqueue multiple stateless ops in a single burst
+ and must attach priv_xform handle to such ops.
+ 
+ priv_xform in Stateless operation
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ 
+-priv_xform is PMD internally managed private data that it maintains to do stateless processing.
+-priv_xforms are initialized provided a generic xform structure by an application via making call
+-to ``rte_compressdev_private_xform_create``, at an output PMD returns an opaque priv_xform reference.
+-If PMD support SHAREABLE priv_xform indicated via algorithm feature flag, then application can
+-attach same priv_xform with many stateless ops at-a-time. If not, then application needs to
+-create as many priv_xforms as it expects to have stateless operations in-flight.
++A priv_xform is private data managed internally by the PMD to do stateless processing.
++A priv_xform is initialized by an application providing a generic xform structure
++to ``rte_compressdev_private_xform_create``, which returns an opaque priv_xform reference.
++If the PMD supports SHAREABLE priv_xform, indicated via algorithm feature flag,
++then the application can attach the same priv_xform with many stateless ops at a time.
++If not, then the application needs to create as many priv_xforms as it expects to have
++stateless operations in-flight.
+ 
+ .. figure:: img/stateless-op.*
+ 
+@@ -320,8 +325,9 @@ create as many priv_xforms as it expects to have stateless operations in-flight.
+    Stateless Ops using Shareable priv_xform
+ 
+ 
+-Application should call ``rte_compressdev_private_xform_create()`` and attach to stateless op before
+-enqueuing them for processing and free via ``rte_compressdev_private_xform_free()`` during termination.
++The application should call ``rte_compressdev_private_xform_create()`` and attach it to a stateless
++op before enqueuing them for processing and free via ``rte_compressdev_private_xform_free()``
++during termination.
+ 
+ An example pseudocode to setup and process NUM_OPS stateless ops with each of length OP_LEN
+ using priv_xform would look like:
+@@ -399,75 +405,80 @@ using priv_xform would look like:
+ 
+ 
+ Stateless and OUT_OF_SPACE
+-~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++~~~~~~~~~~~~~~~~~~~~~~~~~~
+ 
+-OUT_OF_SPACE is a condition when output buffer runs out of space and where PMD
+-still has more data to produce. If PMD runs into such condition, then PMD returns
+-RTE_COMP_OP_OUT_OF_SPACE_TERMINATED error. In such case, PMD resets itself and can set
++OUT_OF_SPACE is a condition when the output buffer runs out of space and where the PMD
++still has more data to produce. If the PMD runs into such condition, then the PMD returns
++RTE_COMP_OP_OUT_OF_SPACE_TERMINATED error. In such case, the PMD resets itself and can set
+ consumed=0 and produced=amount of output it could produce before hitting out_of_space.
+-Application would need to resubmit the whole input with a larger output buffer, if it
++The application would need to resubmit the whole input with a larger output buffer, if it
+ wants the operation to be completed.
+ 
+ Hash in Stateless
+ ~~~~~~~~~~~~~~~~~
+-If hash is enabled, digest buffer will contain valid data after op is successfully
++If hash is enabled, the digest buffer will contain valid data after an op is successfully
+ processed i.e. dequeued with status = RTE_COMP_OP_STATUS_SUCCESS.
+ 
+ Checksum in Stateless
+ ~~~~~~~~~~~~~~~~~~~~~
+-If checksum is enabled, checksum will only be available after op is successfully
++If checksum is enabled, checksum will only be available after an op is successfully
+ processed i.e. dequeued with status = RTE_COMP_OP_STATUS_SUCCESS.
+ 
++.. _compressdev_stateful_op:
++
+ Compression API Stateful operation
+ -----------------------------------
+ 
+-Compression API provide RTE_COMP_FF_STATEFUL_COMPRESSION and
+-RTE_COMP_FF_STATEFUL_DECOMPRESSION feature flag for PMD to reflect
++The compression API provides RTE_COMP_FF_STATEFUL_COMPRESSION and
++RTE_COMP_FF_STATEFUL_DECOMPRESSION feature flag for the PMD to reflect
+ its support for Stateful operations.
+ 
+-A Stateful operation in DPDK compression means application invokes enqueue
+-burst() multiple times to process related chunk of data because
+-application broke data into several ops.
++A Stateful operation in DPDK compression means the application invokes enqueue
++burst() multiple times to process a related chunk of data because the
++application broke the data into several ops.
+ 
+-In such case
++In such cases
+ - ops are setup with op_type RTE_COMP_OP_STATEFUL,
+-- all ops except last set to flush value = RTE_COMP_FLUSH_NONE/SYNC
+-and last set to flush value RTE_COMP_FLUSH_FULL/FINAL.
++- all ops except the last are set with flush value = RTE_COMP_FLUSH_NONE/SYNC
++and the last is set with flush value RTE_COMP_FLUSH_FULL/FINAL.
+ 
+-In case of either one or all of the above conditions, PMD initiates
+-stateful processing and releases acquired resources after processing
++In case of either one or all of the above conditions, the PMD initiates
++stateful processing and releases acquired resources after processing the
+ operation with flush value = RTE_COMP_FLUSH_FULL/FINAL is complete.
+-Unlike stateless, application can enqueue only one stateful op from
+-a particular stream at a time and must attach stream handle
++Unlike stateless, the application can enqueue only one stateful op from
++a particular stream at a time and must attach a stream handle
+ to each op.
+ 
+ Stream in Stateful operation
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ 
+-`stream` in DPDK compression is a logical entity which identifies related set of ops, say, a one large
+-file broken into multiple chunks then file is represented by a stream and each chunk of that file is
+-represented by compression op `rte_comp_op`. Whenever application wants a stateful processing of such
+-data, then it must get a stream handle via making call to ``rte_compressdev_stream_create()``
+-with xform, at an output the target PMD will return an opaque stream handle to application which
+-it must attach to all of the ops carrying data of that stream. In stateful processing, every op
+-requires previous op data for compression/decompression. A PMD allocates and set up resources such
+-as history, states, etc. within a stream, which are maintained during the processing of the related ops.
++A stream in DPDK compression is a logical entity which identifies a related set of ops.
++For example, one large file broken into multiple chunks, then the file is represented by a stream,
++and each chunk of that file is represented by a compression op ``rte_comp_op``.
++Whenever an application wants stateful processing of such data, then it must get a stream handle
++via making call to ``rte_compressdev_stream_create()`` with an xform, which will return an opaque
++stream handle to attach to all of the ops carrying data of that stream.
++In stateful processing, every op requires previous op data for compression/decompression.
++A PMD allocates and sets up resources such as history, states, etc. within a stream,
++which are maintained during the processing of related ops.
+ 
+-Unlike priv_xforms, stream is always a NON_SHAREABLE entity. One stream handle must be attached to only
+-one set of related ops and cannot be reused until all of them are processed with status Success or failure.
++Unlike priv_xforms, a stream is always a NON_SHAREABLE entity. One stream handle must be attached
++to only one set of related ops and cannot be reused until all of them are processed with a
++success/failure status.
+ 
+ .. figure:: img/stateful-op.*
+ 
+    Stateful Ops
+ 
+ 
+-Application should call ``rte_compressdev_stream_create()`` and attach to op before
++An application should call ``rte_compressdev_stream_create()`` and attach it to the op before
+ enqueuing them for processing and free via ``rte_compressdev_stream_free()`` during
+-termination. All ops that are to be processed statefully should carry *same* stream.
++termination. All ops that are to be processed statefully should carry the *same* stream.
+ 
+-See *DPDK API Reference* document for details.
++See the `DPDK API Reference <https://doc.dpdk.org/api/rte__compressdev_8h.html>`_ for details.
+ 
+-An example pseudocode to set up and process a stream having NUM_CHUNKS with each chunk size of CHUNK_LEN would look like:
++An example pseudocode to set up and process a stream having NUM_CHUNKS,
++with each chunk size of CHUNK_LEN, would look like:
+ 
+ .. code-block:: c
+ 
+@@ -549,64 +560,65 @@ An example pseudocode to set up and process a stream having NUM_CHUNKS with each
+ 
+ 
+ Stateful and OUT_OF_SPACE
+-~~~~~~~~~~~~~~~~~~~~~~~~~~~
++~~~~~~~~~~~~~~~~~~~~~~~~~
+ 
+-If PMD supports stateful operation, then OUT_OF_SPACE status is not an actual
+-error for the PMD. In such case, PMD returns with status
++If a PMD supports stateful operation, then an OUT_OF_SPACE status is not an actual
++error for the PMD. In such a case, the PMD returns with status
+ RTE_COMP_OP_STATUS_OUT_OF_SPACE_RECOVERABLE with consumed = number of input bytes
+-read and produced = length of complete output buffer.
+-Application should enqueue next op with source starting at consumed+1 and an
++read, and produced = length of complete output buffer.
++The application should enqueue the next op with source starting at consumed+1, and an
+ output buffer with available space.
+ 
+ Hash in Stateful
+ ~~~~~~~~~~~~~~~~
+-If enabled, digest buffer will contain valid digest after last op in stream
++If enabled, the digest buffer will contain valid digest after the last op in a stream
+ (having flush = RTE_COMP_FLUSH_FINAL) is successfully processed i.e. dequeued
+ with status = RTE_COMP_OP_STATUS_SUCCESS.
+ 
+ Checksum in Stateful
+ ~~~~~~~~~~~~~~~~~~~~
+-If enabled, checksum will only be available after last op in stream
++If enabled, the checksum will only be available after the last op in a stream
+ (having flush = RTE_COMP_FLUSH_FINAL) is successfully processed i.e. dequeued
+ with status = RTE_COMP_OP_STATUS_SUCCESS.
+ 
+ Burst in compression API
+--------------------------
++------------------------
+ 
+ Scheduling of compression operations on DPDK's application data path is
+ performed using a burst oriented asynchronous API set. A queue pair on a compression
+-device accepts a burst of compression operations using enqueue burst API. On physical
+-devices the enqueue burst API will place the operations to be processed
++device accepts a burst of compression operations using the enqueue burst API.
++On physical devices the enqueue burst API will place the operations to be processed
+ on the device's hardware input queue, for virtual devices the processing of the
+ operations is usually completed during the enqueue call to the compression
+ device. The dequeue burst API will retrieve any processed operations available
+ from the queue pair on the compression device, from physical devices this is usually
+-directly from the devices processed queue, and for virtual device's from a
++directly from the devices processed queue, and for virtual device's from an
+ ``rte_ring`` where processed operations are placed after being processed on the
+ enqueue call.
+ 
+-A burst in DPDK compression can be a combination of stateless and stateful operations with a condition
+-that for stateful ops only one op at-a-time should be enqueued from a particular stream i.e. no-two ops
+-should belong to same stream in a single burst. However a burst may contain multiple stateful ops as long
+-as each op is attached to a different stream i.e. a burst can look like:
++A burst in DPDK compression can be a combination of stateless and stateful operations with a
++condition that for stateful ops only one op at a time should be enqueued from a particular stream
++i.e. two ops should never belong to the same stream in a single burst.
++However, a burst may contain multiple stateful ops, as long as each op is attached to a different
++stream, i.e. a burst can look like:
+ 
+ +---------------+--------------+--------------+-----------------+--------------+--------------+
+ | enqueue_burst | op1.no_flush | op2.no_flush | op3.flush_final | op4.no_flush | op5.no_flush |
+ +---------------+--------------+--------------+-----------------+--------------+--------------+
+ 
+-Where, op1 .. op5 all belong to different independent data units. op1, op2, op4, op5 must be stateful
+-as stateless ops can only use flush full or final and op3 can be of type stateless or stateful.
+-Every op with type set to RTE_COMP_OP_STATELESS must be attached to priv_xform and
+-Every op with type set to RTE_COMP_OP_STATEFUL *must* be attached to stream.
++Where, op1 .. op5 all belong to different independent data units. op1, op2, op4, op5 must be
++stateful as stateless ops can only use flush full or final and op3 can be of type stateless or
++stateful. Every op with type set to RTE_COMP_OP_STATELESS must be attached to priv_xform and
++every op with type set to RTE_COMP_OP_STATEFUL *must* be attached to stream.
+ 
+ Since each operation in a burst is independent and thus can be completed
+-out-of-order, applications which need ordering, should setup per-op user data
+-area with reordering information so that it can determine enqueue order at
++out of order, applications which need ordering should setup a per-op user data
++area, with reordering information so that it can determine enqueue order at
+ dequeue.
+ 
+-Also if multiple threads calls enqueue_burst() on same queue pair then it’s
+-application onus to use proper locking mechanism to ensure exclusive enqueuing
+-of operations.
++Also, if multiple threads calls enqueue_burst() on the same queue pair then it's
++the application's responsibility to use a proper locking mechanism to ensure
++exclusive enqueuing of operations.
+ 
+ Enqueue / Dequeue Burst APIs
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+@@ -629,9 +641,10 @@ Sample code
+ -----------
+ 
+ There are unit test applications that show how to use the compressdev library inside
+-app/test/test_compressdev.c
++``app/test/test_compressdev.c``
+ 
+ Compression Device API
+ ~~~~~~~~~~~~~~~~~~~~~~
+ 
+-The compressdev Library API is described in the *DPDK API Reference* document.
++The compressdev Library API is described in the
++`DPDK API Reference <https://doc.dpdk.org/api/rte__compressdev_8h.html>`_.
+diff --git a/dpdk/doc/guides/prog_guide/cryptodev_lib.rst b/dpdk/doc/guides/prog_guide/cryptodev_lib.rst
+index 0af35f5e74..8e403cb949 100644
+--- a/dpdk/doc/guides/prog_guide/cryptodev_lib.rst
++++ b/dpdk/doc/guides/prog_guide/cryptodev_lib.rst
+@@ -751,7 +751,7 @@ feature is useful when the user wants to abandon partially enqueued operations
+ for a failed enqueue burst operation and try enqueuing in a whole later.
+ 
+ Similar as enqueue, there are two dequeue functions:
+-``rte_cryptodev_raw_dequeue`` for dequeing single operation, and
++``rte_cryptodev_raw_dequeue`` for dequeuing single operation, and
+ ``rte_cryptodev_raw_dequeue_burst`` for dequeuing a burst of operations (e.g.
+ all operations in a ``struct rte_crypto_sym_vec`` descriptor). The
+ ``rte_cryptodev_raw_dequeue_burst`` function allows the user to provide callback
+@@ -1309,6 +1309,7 @@ are shown below.
+      "enqueue_err_count": 0, "dequeue_err_count": 0}}
+ 
+ #. Get the capabilities of a particular Crypto device::
++
+      --> /cryptodev/caps,0
+      {"/cryptodev/caps": {"crypto_caps": [<array of serialized bytes of
+      capabilities>], "crypto_caps_n": <number of capabilities>}}
+diff --git a/dpdk/doc/guides/prog_guide/env_abstraction_layer.rst b/dpdk/doc/guides/prog_guide/env_abstraction_layer.rst
+index 29f6fefc48..c6accce701 100644
+--- a/dpdk/doc/guides/prog_guide/env_abstraction_layer.rst
++++ b/dpdk/doc/guides/prog_guide/env_abstraction_layer.rst
+@@ -433,7 +433,7 @@ and decides on a preferred IOVA mode.
+ 
+ - if all buses report RTE_IOVA_PA, then the preferred IOVA mode is RTE_IOVA_PA,
+ - if all buses report RTE_IOVA_VA, then the preferred IOVA mode is RTE_IOVA_VA,
+-- if all buses report RTE_IOVA_DC, no bus expressed a preferrence, then the
++- if all buses report RTE_IOVA_DC, no bus expressed a preference, then the
+   preferred mode is RTE_IOVA_DC,
+ - if the buses disagree (at least one wants RTE_IOVA_PA and at least one wants
+   RTE_IOVA_VA), then the preferred IOVA mode is RTE_IOVA_DC (see below with the
+@@ -658,7 +658,7 @@ Known Issues
+ + rte_ring
+ 
+   rte_ring supports multi-producer enqueue and multi-consumer dequeue.
+-  However, it is non-preemptive, this has a knock on effect of making rte_mempool non-preemptable.
++  However, it is non-preemptive, this has a knock on effect of making rte_mempool non-preemptible.
+ 
+   .. note::
+ 
+diff --git a/dpdk/doc/guides/prog_guide/event_ethernet_rx_adapter.rst b/dpdk/doc/guides/prog_guide/event_ethernet_rx_adapter.rst
+index 67b11e1563..3b4ef502b2 100644
+--- a/dpdk/doc/guides/prog_guide/event_ethernet_rx_adapter.rst
++++ b/dpdk/doc/guides/prog_guide/event_ethernet_rx_adapter.rst
+@@ -257,8 +257,8 @@ A loop processing ``rte_event_vector`` containing mbufs is shown below.
+                         /* Process each mbuf. */
+                 }
+         break;
+-        case ...
+-        ...
++        case default:
++                /* Handle other event_types. */
+         }
+ 
+ Rx event vectorization for SW Rx adapter
+diff --git a/dpdk/doc/guides/prog_guide/img/flow_tru_droppper.png b/dpdk/doc/guides/prog_guide/img/flow_tru_dropper.png
+similarity index 100%
+rename from dpdk/doc/guides/prog_guide/img/flow_tru_droppper.png
+rename to dpdk/doc/guides/prog_guide/img/flow_tru_dropper.png
+diff --git a/dpdk/doc/guides/prog_guide/img/turbo_tb_decode.svg b/dpdk/doc/guides/prog_guide/img/turbo_tb_decode.svg
+index a259f45866..95779c3642 100644
+--- a/dpdk/doc/guides/prog_guide/img/turbo_tb_decode.svg
++++ b/dpdk/doc/guides/prog_guide/img/turbo_tb_decode.svg
+@@ -460,7 +460,7 @@
+            height="14.642858"
+            x="39.285713"
+            y="287.16254" /></flowRegion><flowPara
+-         id="flowPara4817">offse</flowPara></flowRoot>    <text
++         id="flowPara4817">offset</flowPara></flowRoot>    <text
+        xml:space="preserve"
+        style="font-style:normal;font-weight:normal;font-size:3.14881921px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#9cc3e5;fill-opacity:1;stroke:none;stroke-width:0.23616144"
+        x="74.16684"
+diff --git a/dpdk/doc/guides/prog_guide/img/turbo_tb_encode.svg b/dpdk/doc/guides/prog_guide/img/turbo_tb_encode.svg
+index e3708a9377..98a6b83983 100644
+--- a/dpdk/doc/guides/prog_guide/img/turbo_tb_encode.svg
++++ b/dpdk/doc/guides/prog_guide/img/turbo_tb_encode.svg
+@@ -649,7 +649,7 @@
+            height="14.642858"
+            x="39.285713"
+            y="287.16254" /></flowRegion><flowPara
+-         id="flowPara4817">offse</flowPara></flowRoot>    <text
++         id="flowPara4817">offset</flowPara></flowRoot>    <text
+        xml:space="preserve"
+        style="font-style:normal;font-weight:normal;font-size:3.14881921px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;display:inline;fill:#a8d08d;fill-opacity:1;stroke:none;stroke-width:0.23616144"
+        x="16.351753"
+diff --git a/dpdk/doc/guides/prog_guide/qos_framework.rst b/dpdk/doc/guides/prog_guide/qos_framework.rst
+index 89ea199529..22616117cb 100644
+--- a/dpdk/doc/guides/prog_guide/qos_framework.rst
++++ b/dpdk/doc/guides/prog_guide/qos_framework.rst
+@@ -1196,12 +1196,12 @@ In the case of severe congestion, the dropper resorts to tail drop.
+ This occurs when a packet queue has reached maximum capacity and cannot store any more packets.
+ In this situation, all arriving packets are dropped.
+ 
+-The flow through the dropper is illustrated in :numref:`figure_flow_tru_droppper`.
++The flow through the dropper is illustrated in :numref:`figure_flow_tru_dropper`.
+ The RED/WRED/PIE algorithm is exercised first and tail drop second.
+ 
+-.. _figure_flow_tru_droppper:
++.. _figure_flow_tru_dropper:
+ 
+-.. figure:: img/flow_tru_droppper.*
++.. figure:: img/flow_tru_dropper.*
+ 
+    Flow Through the Dropper
+ 
+diff --git a/dpdk/doc/guides/prog_guide/rte_flow.rst b/dpdk/doc/guides/prog_guide/rte_flow.rst
+index c51ed88cfe..714769d0e4 100644
+--- a/dpdk/doc/guides/prog_guide/rte_flow.rst
++++ b/dpdk/doc/guides/prog_guide/rte_flow.rst
+@@ -60,12 +60,12 @@ Flow rules can also be grouped, the flow rule priority is specific to the
+ group they belong to. All flow rules in a given group are thus processed within
+ the context of that group. Groups are not linked by default, so the logical
+ hierarchy of groups must be explicitly defined by flow rules themselves in each
+-group using the JUMP action to define the next group to redirect too. Only flow
+-rules defined in the default group 0 are guarantee to be matched against, this
++group using the JUMP action to define the next group to redirect to. Only flow
++rules defined in the default group 0 are guaranteed to be matched against. This
+ makes group 0 the origin of any group hierarchy defined by an application.
+ 
+ Support for multiple actions per rule may be implemented internally on top
+-of non-default hardware priorities, as a result both features may not be
++of non-default hardware priorities. As a result, both features may not be
+ simultaneously available to applications.
+ 
+ Considering that allowed pattern/actions combinations cannot be known in
+@@ -1379,7 +1379,7 @@ Matches a network service header (RFC 8300).
+ - ``ttl``: maximum SFF hopes (6 bits).
+ - ``length``: total length in 4 bytes words (6 bits).
+ - ``reserved1``: reserved1 bits (4 bits).
+-- ``mdtype``: ndicates format of NSH header (4 bits).
++- ``mdtype``: indicates format of NSH header (4 bits).
+ - ``next_proto``: indicates protocol type of encap data (8 bits).
+ - ``spi``: service path identifier (3 bytes).
+ - ``sindex``: service index (1 byte).
+diff --git a/dpdk/doc/guides/prog_guide/vhost_lib.rst b/dpdk/doc/guides/prog_guide/vhost_lib.rst
+index 76f5d303c9..8959568d8f 100644
+--- a/dpdk/doc/guides/prog_guide/vhost_lib.rst
++++ b/dpdk/doc/guides/prog_guide/vhost_lib.rst
+@@ -331,7 +331,7 @@ vhost-user implementation has two options:
+ 
+      * The vhost supported features must be exactly the same before and
+        after the restart. For example, if TSO is disabled and then enabled,
+-       nothing will work and issues undefined might happen.
++       nothing will work and undefined issues might happen.
+ 
+ No matter which mode is used, once a connection is established, DPDK
+ vhost-user will start receiving and processing vhost messages from QEMU.
+@@ -362,12 +362,12 @@ Guest memory requirement
+ 
+ * Memory pre-allocation
+ 
+-  For non-async data path, guest memory pre-allocation is not a
+-  must. This can help save of memory. If users really want the guest memory
+-  to be pre-allocated (e.g., for performance reason), we can add option
+-  ``-mem-prealloc`` when starting QEMU. Or, we can lock all memory at vhost
+-  side which will force memory to be allocated when mmap at vhost side;
+-  option --mlockall in ovs-dpdk is an example in hand.
++  For non-async data path guest memory pre-allocation is not a
++  must but can help save memory. To do this we can add option
++  ``-mem-prealloc`` when starting QEMU, or we can lock all memory at vhost
++  side which will force memory to be allocated when it calls mmap
++  (option --mlockall in ovs-dpdk is an example in hand).
++
+ 
+   For async data path, we force the VM memory to be pre-allocated at vhost
+   lib when mapping the guest memory; and also we need to lock the memory to
+@@ -375,8 +375,8 @@ Guest memory requirement
+ 
+ * Memory sharing
+ 
+-  Make sure ``share=on`` QEMU option is given. vhost-user will not work with
+-  a QEMU version without shared memory mapping.
++  Make sure ``share=on`` QEMU option is given. The vhost-user will not work with
++  a QEMU instance without shared memory mapping.
+ 
+ Vhost supported vSwitch reference
+ ---------------------------------
+diff --git a/dpdk/doc/guides/rawdevs/cnxk_bphy.rst b/dpdk/doc/guides/rawdevs/cnxk_bphy.rst
+index 3cb2175688..522390bf1b 100644
+--- a/dpdk/doc/guides/rawdevs/cnxk_bphy.rst
++++ b/dpdk/doc/guides/rawdevs/cnxk_bphy.rst
+@@ -37,7 +37,7 @@ using ``rte_rawdev_queue_conf_get()``.
+ 
+ To perform data transfer use standard ``rte_rawdev_enqueue_buffers()`` and
+ ``rte_rawdev_dequeue_buffers()`` APIs. Not all messages produce sensible
+-responses hence dequeueing is not always necessary.
++responses hence dequeuing is not always necessary.
+ 
+ BPHY CGX/RPM PMD
+ ----------------
+diff --git a/dpdk/doc/guides/regexdevs/features_overview.rst b/dpdk/doc/guides/regexdevs/features_overview.rst
+index c512bde592..3e7ab409bf 100644
+--- a/dpdk/doc/guides/regexdevs/features_overview.rst
++++ b/dpdk/doc/guides/regexdevs/features_overview.rst
+@@ -22,7 +22,7 @@ PCRE back tracking ctrl
+   Support PCRE back tracking ctrl.
+ 
+ PCRE call outs
+-  Support PCRE call outes.
++  Support PCRE call routes.
+ 
+ PCRE forward reference
+   Support Forward reference.
+diff --git a/dpdk/doc/guides/regexdevs/mlx5.rst b/dpdk/doc/guides/regexdevs/mlx5.rst
+index b2bf0afd01..fc2cacba60 100644
+--- a/dpdk/doc/guides/regexdevs/mlx5.rst
++++ b/dpdk/doc/guides/regexdevs/mlx5.rst
+@@ -7,7 +7,7 @@ MLX5 RegEx driver
+ =================
+ 
+ The MLX5 RegEx (Regular Expression) driver library
+-(**librte_regex_mlx5**) provides support for **Mellanox BlueField 2**
++(**librte_regex_mlx5**) provides support for **Mellanox BlueField-2**
+ families of 25/50/100/200 Gb/s adapters.
+ 
+ Design
+@@ -43,13 +43,13 @@ Features
+ Supported NICs
+ --------------
+ 
+-* Mellanox\ |reg| BlueField 2 SmartNIC
++* Mellanox\ |reg| BlueField-2 SmartNIC
+ 
+ Prerequisites
+ -------------
+ 
+-- BlueField 2 running Mellanox supported kernel.
+-- Enable the RegEx capabilities using system call from the BlueField 2.
++- BlueField-2 running Mellanox supported kernel.
++- Enable the RegEx capabilities using system call from the BlueField-2.
+ - Official support is not yet released.
+ 
+ Limitations
+diff --git a/dpdk/doc/guides/rel_notes/known_issues.rst b/dpdk/doc/guides/rel_notes/known_issues.rst
+index 187d9c942e..570550843a 100644
+--- a/dpdk/doc/guides/rel_notes/known_issues.rst
++++ b/dpdk/doc/guides/rel_notes/known_issues.rst
+@@ -885,14 +885,15 @@ Unsuitable IOVA mode may be picked as the default
+ **Driver/Module**:
+    ALL.
+ 
+-Vhost multi-queue reconnection failed with QEMU version >= 4.2.0
+-----------------------------------------------------------------
++Vhost multi-queue reconnection failed with QEMU version 4.2.0 to 5.1.0
++----------------------------------------------------------------------
+ 
+ **Description**
+    It's a QEMU regression bug (bad commit: c6beefd674ff). QEMU only saves
+    acked features for one vhost-net when vhost quits. When vhost reconnects
+    to virtio-net/virtio-pmd in multi-queue situations, the features been
+-   set multiple times are not consistent.
++   set multiple times are not consistent. QEMU-5.2.0 fixes this issue in commit
++   f66337bdbfda ("vhost-user: save features of multiqueues if chardev is closed").
+ 
+ **Implication**
+    Vhost cannot reconnect back to virtio-net/virtio-pmd normally.
+diff --git a/dpdk/doc/guides/rel_notes/release_16_07.rst b/dpdk/doc/guides/rel_notes/release_16_07.rst
+index 5be2d171f1..c4f2f71222 100644
+--- a/dpdk/doc/guides/rel_notes/release_16_07.rst
++++ b/dpdk/doc/guides/rel_notes/release_16_07.rst
+@@ -192,7 +192,7 @@ EAL
+ 
+ * **igb_uio: Fixed possible mmap failure for Linux >= 4.5.**
+ 
+-  The mmaping of the iomem range of the PCI device fails for kernels that
++  The mmapping of the iomem range of the PCI device fails for kernels that
+   enabled the ``CONFIG_IO_STRICT_DEVMEM`` option. The error seen by the
+   user is as similar to the following::
+ 
+diff --git a/dpdk/doc/guides/rel_notes/release_17_08.rst b/dpdk/doc/guides/rel_notes/release_17_08.rst
+index 25439dad45..1fd1755858 100644
+--- a/dpdk/doc/guides/rel_notes/release_17_08.rst
++++ b/dpdk/doc/guides/rel_notes/release_17_08.rst
+@@ -232,7 +232,7 @@ API Changes
+   * The ``rte_cryptodev_configure()`` function does not create the session
+     mempool for the device anymore.
+   * The ``rte_cryptodev_queue_pair_attach_sym_session()`` and
+-    ``rte_cryptodev_queue_pair_dettach_sym_session()`` functions require
++    ``rte_cryptodev_queue_pair_detach_sym_session()`` functions require
+     the new parameter ``device id``.
+   * Parameters of ``rte_cryptodev_sym_session_create()`` were modified to
+     accept ``mempool``, instead of ``device id`` and ``rte_crypto_sym_xform``.
+diff --git a/dpdk/doc/guides/rel_notes/release_21_11.rst b/dpdk/doc/guides/rel_notes/release_21_11.rst
+index db09ec01ea..69199a9583 100644
+--- a/dpdk/doc/guides/rel_notes/release_21_11.rst
++++ b/dpdk/doc/guides/rel_notes/release_21_11.rst
+@@ -878,3 +878,975 @@ Tested Platforms
+ 
+     * Kernel version: 5.10
+     * Ubuntu 18.04
++
++21.11.1 Release Notes
++---------------------
 +
-+/* Safe initialization declares both iterators. */
-+#define INIT_MULTIVAR_SAFE_SHORT(VAR, MEMBER, POINTER, ITER_TYPE)             \
-+    INIT_MULTIVAR_SAFE_SHORT_EXP(VAR, MEMBER, POINTER, ITER_TYPE, (void) 0)
 +
-+#define INIT_MULTIVAR_SAFE_SHORT_EXP(VAR, MEMBER, POINTER, ITER_TYPE, ...)    \
-+    ITER_TYPE *ITER_VAR(VAR) = ( __VA_ARGS__ , (ITER_TYPE *) POINTER),        \
-+        *ITER_NEXT_VAR(VAR) = NULL
++21.11.1 Fixes
++~~~~~~~~~~~~~
++
++* acl: add missing C++ guards
++* app/compress-perf: fix cycle count operations allocation
++* app/compress-perf: fix number of queue pairs to setup
++* app/compress-perf: fix socket ID type during init
++* app/compress-perf: optimize operations pool allocation
++* app/dumpcap: check for failure to set promiscuous
++* app/fib: fix division by zero
++* app/pdump: abort on multi-core capture limit
++* app/regex: fix number of matches
++* app/testpmd: check starting port is not in bonding
++* app/testpmd: fix bonding mode set
++* app/testpmd: fix build without drivers
++* app/testpmd: fix dereference before null check
++* app/testpmd: fix external buffer allocation
++* app/testpmd: fix flow rule with flex input link
++* app/testpmd: fix GENEVE parsing in checksum mode
++* app/testpmd: fix GTP header parsing in checksum engine
++* app/testpmd: fix raw encap of GENEVE option
++* app/testpmd: fix show RSS RETA on Windows
++* app/testpmd: fix stack overflow for EEPROM display
++* app/testpmd: fix Tx scheduling interval
++* baseband/acc100: avoid out-of-bounds access
++* bpf: add missing C++ guards
++* bpf: fix build with some libpcap version on FreeBSD
++* build: fix build on FreeBSD with Meson 0.61.1
++* build: fix warnings when running external commands
++* build: hide local symbols in shared libraries
++* build: remove deprecated Meson functions
++* build: suppress rte_crypto_asym_op abi check
++* buildtools: fix AVX512 check for Python 3.5
++* bus/ifpga: remove useless check while browsing devices
++* bus/pci: assign driver pointer before mapping
++* common/cnxk: add missing checks of return values
++* common/cnxk: add workaround for vWQE flush
++* common/cnxk: always use single interrupt ID with NIX
++* common/cnxk: fix base rule merge
++* common/cnxk: fix bitmap usage for TM
++* common/cnxk: fix byte order of frag sizes and infos
++* common/cnxk: fix error checking
++* common/cnxk: fix flow deletion
++* common/cnxk: fix log level during MCAM allocation
++* common/cnxk: fix mbuf data offset for VF
++* common/cnxk: fix nibble parsing order when dumping MCAM
++* common/cnxk: fix NPC key extraction validation
++* common/cnxk: fix null pointer dereferences
++* common/cnxk: fix reset of fields
++* common/cnxk: fix shift offset for TL3 length disable
++* common/cnxk: fix uninitialized pointer read
++* common/cnxk: fix uninitialized variables
++* common/cnxk fix unintended sign extension
++* common/cnxk: reset stale values on error debug registers
++* common/mlx5: add minimum WQE size for striding RQ
++* common/mlx5: add Netlink event helpers
++* common/mlx5: consider local functions as internal
++* common/mlx5: fix error handling in multi-class probe
++* common/mlx5: fix missing validation in devargs parsing
++* common/mlx5: fix MR lookup for non-contiguous mempool
++* common/mlx5: fix probing failure code
++* common/mlx5: fix queue pair ack timeout configuration
++* common/sfc_efx/base: add missing handler for 1-byte fields
++* common/sfc_efx/base: fix recirculation ID set in outer rules
++* compressdev: add missing C++ guards
++* compressdev: fix missing space in log macro
++* compressdev: fix socket ID type
++* compress/mlx5: support out-of-space status
++* compress/octeontx: fix null pointer dereference
++* config: add arch define for Arm
++* config: align mempool elements to 128 bytes on CN10K
++* config/arm: add values for native armv7
++* crypto/cnxk: enable allocated queues only
++* crypto/cnxk: fix extend tail calculation
++* crypto/cnxk: fix inflight count calculation
++* crypto/cnxk: fix update of number of descriptors
++* cryptodev: add missing C++ guards
++* cryptodev: fix clang C++ include
++* cryptodev: fix RSA key type name
++* crypto/dpaax_sec: fix auth/cipher xform chain checks
++* crypto/ipsec_mb: check missing operation types
++* crypto/ipsec_mb: fix buffer overrun
++* crypto/ipsec_mb: fix GCM requested digest length
++* crypto/ipsec_mb: fix GMAC parameters setting
++* crypto/ipsec_mb: fix length and offset settings
++* crypto/ipsec_mb: fix length and offset settings
++* crypto/ipsec_mb: fix premature dereference
++* crypto/ipsec_mb: fix queue cleanup null pointer dereference
++* crypto/ipsec_mb: fix queue setup null pointer dereference
++* crypto/ipsec_mb: fix tainted data for session
++* crypto/ipsec_mb: fix ZUC authentication verify
++* crypto/ipsec_mb: fix ZUC operation overwrite
++* crypto/ipsec_mb: remove useless check
++* crypto/qat: fix GEN4 AEAD job in raw data path
++* crypto/virtio: fix out-of-bounds access
++* devargs: fix crash with uninitialized parsing
++* devtools: fix comment detection in forbidden token check
++* devtools: fix symbols check
++* devtools: remove event/dlb exception in ABI check
++* distributor: fix potential overflow
++* dma/cnxk: fix installing internal headers
++* dmadev: add missing header include
++* dma/hisilicon: use common PCI device naming
++* dma/idxd: configure maximum batch size to high value
++* dma/idxd: fix burst capacity calculation
++* dma/idxd: fix paths to driver sysfs directory
++* dma/idxd: fix wrap-around in burst capacity calculation
++* doc: add CUDA driver features
++* doc: correct name of BlueField-2 in mlx5 guide
++* doc: fix dlb2 guide
++* doc: fix FIPS guide
++* doc: fix KNI PMD name typo
++* doc: fix missing note on UIO module in Linux guide
++* doc: fix modify field action description for mlx5
++* doc: fix telemetry example in cryptodev guide
++* doc: fix typos and punctuation in flow API guide
++* doc: improve configuration examples in idxd guide
++* doc: remove dependency on findutils on FreeBSD
++* doc: remove obsolete vector Tx explanations from mlx5 guide
++* doc: replace broken links in mlx guides
++* doc: replace characters for (R) symbol in Linux guide
++* doc: replace deprecated distutils version parsing
++* doc: update matching versions in ice guide
++* eal: add missing C++ guards
++* eal: fix C++ include
++* eal/freebsd: add missing C++ include guards
++* eal/linux: fix device monitor stop return
++* eal/linux: fix illegal memory access in uevent handler
++* eal/linux: log hugepage create errors with filename
++* eal/windows: fix error code for not supported API
++* efd: fix uninitialized structure
++* ethdev: add internal function to device struct from name
++* ethdev: add missing C++ guards
++* ethdev: fix cast for C++ compatibility
++* ethdev: fix doxygen comments for device info struct
++* ethdev: fix MAC address in telemetry device info
++* ethdev: fix Rx queue telemetry memory leak on failure
++* ethdev: remove unnecessary null check
++* event/cnxk: fix QoS devargs parsing
++* event/cnxk: fix Rx adapter config check
++* event/cnxk: fix sub-event clearing mask length
++* event/cnxk: fix uninitialized local variables
++* event/cnxk: fix variables casting
++* eventdev: add missing C++ guards
++* eventdev/eth_rx: fix missing internal port checks
++* eventdev/eth_rx: fix parameters parsing memory leak
++* eventdev/eth_rx: fix queue config query
++* eventdev/eth_tx: fix queue add error code
++* eventdev: fix C++ include
++* eventdev: fix clang C++ include
++* event/dlb2: add shift value check in sparse dequeue
++* event/dlb2: poll HW CQ inflights before mapping queue
++* event/dlb2: update rolling mask used for dequeue
++* examples/distributor: reduce Tx queue number to 1
++* examples/flow_classify: fix failure message
++* examples/ipsec-secgw: fix buffer freeing in vector mode
++* examples/ipsec-secgw: fix default flow rule creation
++* examples/ipsec-secgw: fix eventdev start sequence
++* examples/ipsec-secgw: fix offload flag used for TSO IPv6
++* examples/kni: add missing trailing newline in log
++* examples/l2fwd-crypto: fix port mask overflow
++* examples/l3fwd: fix buffer overflow in Tx
++* examples/l3fwd: fix Rx burst size for event mode
++* examples/l3fwd: make Rx and Tx queue size configurable
++* examples/l3fwd: share queue size variables
++* examples/qos_sched: fix core mask overflow
++* examples/vhost: fix launch with physical port
++* fix spelling in comments and strings
++* gpu/cuda: fix dependency loading path
++* gpu/cuda: fix memory list cleanup
++* graph: fix C++ include
++* ipc: end multiprocess thread during cleanup
++* ipsec: fix C++ include
++* kni: add missing C++ guards
++* kni: fix freeing order in device release
++* maintainers: update for stable branches
++* mem: check allocation in dynamic hugepage init
++* mempool/cnxk: fix batch allocation failure path
++* metrics: add missing C++ guards
++* net/af_xdp: add missing trailing newline in logs
++* net/af_xdp: ensure socket is deleted on Rx queue setup error
++* net/af_xdp: fix build with -Wunused-function
++* net/af_xdp: fix custom program loading with multiple queues
++* net/axgbe: use PCI root complex device to distinguish device
++* net/bnxt: add null check for mark table
++* net/bnxt: cap maximum number of unicast MAC addresses
++* net/bnxt: check VF representor pointer before access
++* net/bnxt: fix check for autoneg enablement
++* net/bnxt: fix crash by validating pointer
++* net/bnxt: fix flow create when RSS is disabled
++* net/bnxt: fix handling of VF configuration change
++* net/bnxt: fix memzone allocation per VNIC
++* net/bnxt: fix multicast address set
++* net/bnxt: fix multicast MAC restore during reset recovery
++* net/bnxt: fix null dereference in session cleanup
++* net/bnxt: fix PAM4 mask setting
++* net/bnxt: fix queue stop operation
++* net/bnxt: fix restoring VLAN filtering after recovery
++* net/bnxt: fix ring calculation for representors
++* net/bnxt: fix ring teardown
++* net/bnxt: fix VF resource allocation strategy
++* net/bnxt: fix xstats names query overrun
++* net/bnxt: fix xstats query
++* net/bnxt: get maximum supported multicast filters count
++* net/bnxt: handle ring cleanup in case of error
++* net/bnxt: restore dependency on kernel modules
++* net/bnxt: restore RSS configuration after reset recovery
++* net/bnxt: set fast-path pointers only if recovery succeeds
++* net/bnxt: set HW coalescing parameters
++* net/bonding: fix mode type mismatch
++* net/bonding: fix MTU set for slaves
++* net/bonding: fix offloading configuration
++* net/bonding: fix promiscuous and allmulticast state
++* net/bonding: fix reference count on mbufs
++* net/bonding: fix RSS with early configure
++* net/bonding: fix slaves initializing on MTU setting
++* net/cnxk: fix build with GCC 12
++* net/cnxk: fix build with optimization
++* net/cnxk: fix inline device RQ tag mask
++* net/cnxk: fix inline IPsec security error handling
++* net/cnxk: fix mbuf data length
++* net/cnxk: fix promiscuous mode in multicast enable flow
++* net/cnxk: fix RSS RETA table update
++* net/cnxk: fix Rx/Tx function update
++* net/cnxk: fix uninitialized local variable
++* net/cnxk: register callback early to handle initial packets
++* net/cxgbe: fix dangling pointer by mailbox access rework
++* net/dpaa2: fix null pointer dereference
++* net/dpaa2: fix timestamping for IEEE1588
++* net/dpaa2: fix unregistering interrupt handler
++* net/ena: check memory BAR before initializing LLQ
++* net/ena: fix checksum flag for L4
++* net/ena: fix meta descriptor DF flag setup
++* net/ena: fix reset reason being overwritten
++* net/ena: remove unused enumeration
++* net/ena: remove unused offload variables
++* net/ena: skip timer if reset is triggered
++* net/enic: fix dereference before null check
++* net: fix L2TPv2 common header
++* net/hns3: delete duplicated RSS type
++* net/hns3: fix double decrement of secondary count
++* net/hns3: fix insecure way to query MAC statistics
++* net/hns3: fix mailbox wait time
++* net/hns3: fix max packet size rollback in PF
++* net/hns3: fix operating queue when TCAM table is invalid
++* net/hns3: fix RSS key with null
++* net/hns3: fix RSS TC mode entry
++* net/hns3: fix Rx/Tx functions update
++* net/hns3: fix using enum as boolean
++* net/hns3: fix vector Rx/Tx when PTP enabled
++* net/hns3: fix VF RSS TC mode entry
++* net/hns3: increase time waiting for PF reset completion
++* net/hns3: remove duplicate macro definition
++* net/i40e: enable maximum frame size at port level
++* net/i40e: fix unintentional integer overflow
++* net/iavf: count continuous DD bits for Arm
++* net/iavf: count continuous DD bits for Arm in flex Rx
++* net/iavf: fix AES-GMAC IV size
++* net/iavf: fix function pointer in multi-process
++* net/iavf: fix null pointer dereference
++* net/iavf: fix potential out-of-bounds access
++* net/iavf: fix segmentation offload buffer size
++* net/iavf: fix segmentation offload condition
++* net/iavf: remove git residue symbol
++* net/iavf: reset security context pointer on stop
++* net/iavf: support NAT-T / UDP encapsulation
++* net/ice/base: add profile validation on switch filter
++* net/ice: fix build with 16-byte Rx descriptor
++* net/ice: fix link up when starting device
++* net/ice: fix mbuf offload flag for Rx timestamp
++* net/ice: fix overwriting of LSE bit by DCF
++* net/ice: fix pattern check for flow director parser
++* net/ice: fix pattern check in flow director
++* net/ice: fix Tx checksum offload
++* net/ice: fix Tx checksum offload capability
++* net/ice: fix Tx offload path choice
++* net/ice: track DCF state of PF
++* net/ixgbe: add vector Rx parameter check
++* net/ixgbe: check filter init failure
++* net/ixgbe: fix FSP check for X550EM devices
++* net/ixgbe: reset security context pointer on close
++* net/kni: fix config initialization
++* net/memif: remove pointer deference before null check
++* net/memif: remove unnecessary Rx interrupt stub
++* net/mlx5: fix ASO CT object release
++* net/mlx5: fix assertion on flags set in packet mbuf
++* net/mlx5: fix check in count action validation
++* net/mlx5: fix committed bucket size
++* net/mlx5: fix configuration without Rx queue
++* net/mlx5: fix CPU socket ID for Rx queue creation
++* net/mlx5: fix destroying empty matchers list
++* net/mlx5: fix entry in shared Rx queues list
++* net/mlx5: fix errno update in shared context creation
++* net/mlx5: fix E-Switch manager vport ID
++* net/mlx5: fix flex item availability
++* net/mlx5: fix flex item availability
++* net/mlx5: fix flex item header length translation
++* net/mlx5: fix GCC uninitialized variable warning
++* net/mlx5: fix GRE item translation in Verbs
++* net/mlx5: fix GRE protocol type translation for Verbs
++* net/mlx5: fix implicit tag insertion with sample action
++* net/mlx5: fix indexed pool fetch overlap
++* net/mlx5: fix ineffective metadata argument adjustment
++* net/mlx5: fix inet IPIP protocol type
++* net/mlx5: fix initial link status detection
++* net/mlx5: fix inline length for multi-segment TSO
++* net/mlx5: fix link status change detection
++* net/mlx5: fix mark enabling for Rx
++* net/mlx5: fix matcher priority with ICMP or ICMPv6
++* net/mlx5: fix maximum packet headers size for TSO
++* net/mlx5: fix memory socket selection in ASO management
++* net/mlx5: fix metadata endianness in modify field action
++* net/mlx5: fix meter capabilities reporting
++* net/mlx5: fix meter creation default state
++* net/mlx5: fix meter policy creation assert
++* net/mlx5: fix meter sub-policy creation
++* net/mlx5: fix modify field MAC address offset
++* net/mlx5: fix modify port action validation
++* net/mlx5: fix MPLS/GRE Verbs spec ordering
++* net/mlx5: fix MPRQ stride devargs adjustment
++* net/mlx5: fix MPRQ WQE size assertion
++* net/mlx5: fix next protocol RSS expansion
++* net/mlx5: fix NIC egress flow mismatch in switchdev mode
++* net/mlx5: fix port matching in sample flow rule
++* net/mlx5: fix RSS expansion with explicit next protocol
++* net/mlx5: fix sample flow action on trusted device
++* net/mlx5: fix shared counter flag in flow validation
++* net/mlx5: fix shared RSS destroy
++* net/mlx5: fix sibling device config check
++* net/mlx5: fix VLAN push action validation
++* net/mlx5: forbid multiple ASO actions in a single rule
++* net/mlx5: improve stride parameter names
++* net/mlx5: reduce flex item flow handle size
++* net/mlx5: reject jump to root table
++* net/mlx5: relax headroom assertion
++* net/mlx5: remove unused function
++* net/mlx5: remove unused reference counter
++* net/mlx5: set flow error for hash list create
++* net/nfb: fix array indexes in deinit functions
++* net/nfb: fix multicast/promiscuous mode switching
++* net/nfp: free HW ring memzone on queue release
++* net/nfp: remove duplicated check when setting MAC address
++* net/nfp: remove useless range checks
++* net/ngbe: fix debug logs
++* net/ngbe: fix missed link interrupt
++* net/ngbe: fix packet statistics
++* net/ngbe: fix Rx by initializing packet buffer early
++* net/ngbe: fix Tx hang on queue disable
++* net/qede: fix maximum Rx packet length
++* net/qede: fix redundant condition in debug code
++* net/qede: fix Rx bulk
++* net/qede: fix Tx completion
++* net/sfc: demand Tx fast free offload on EF10 simple datapath
++* net/sfc: do not push fast free offload to default TxQ config
++* net/sfc: fix flow tunnel support detection
++* net/sfc: fix lock releases
++* net/sfc: fix memory allocation size for cache
++* net/sfc: reduce log level of tunnel restore info error
++* net/sfc: validate queue span when parsing flow action RSS
++* net/tap: fix to populate FDs in secondary process
++* net/txgbe: fix debug logs
++* net/txgbe: fix KR auto-negotiation
++* net/txgbe: fix link up and down
++* net/txgbe: fix queue statistics mapping
++* net/txgbe: reset security context pointer on close
++* net/virtio: fix slots number when indirect feature on
++* net/virtio: fix Tx queue 0 overriden by queue 128
++* net/virtio: fix uninitialized RSS key
++* net/virtio-user: check FD flags getting failure
++* net/virtio-user: fix resource leak on probing failure
++* pcapng: handle failure of link status query
++* pflock: fix header file installation
++* pipeline: fix annotation checks
++* pipeline: fix table state memory allocation
++* raw/ifpga/base: fix port feature ID
++* raw/ifpga/base: fix SPI transaction
++* raw/ifpga: fix build with optimization
++* raw/ifpga: fix interrupt handle allocation
++* raw/ifpga: fix monitor thread
++* raw/ifpga: fix thread closing
++* raw/ifpga: fix variable initialization in probing
++* raw/ntb: clear all valid doorbell bits on init
++* regexdev: fix section attribute of symbols
++* regex/mlx5: fix memory allocation check
++* Revert "crypto/ipsec_mb: fix length and offset settings"
++* Revert "net/mlx5: fix flex item availability"
++* ring: fix error code when creating ring
++* ring: fix overflow in memory size calculation
++* sched: remove useless malloc in PIE data init
++* stack: fix stubs header export
++* table: fix C++ include
++* telemetry: add missing C++ guards
++* test/bpf: skip dump if conversion fails
++* test/crypto: fix out-of-place SGL in raw datapath
++* test/dma: fix missing checks for device capacity
++* test/efd: fix sockets mask size
++* test/mbuf: fix mbuf data content check
++* test/mem: fix error check
++* vdpa/ifc: fix log info mismatch
++* vdpa/mlx5: workaround queue stop with traffic
++* vdpa/sfc: fix null dereference during config
++* vdpa/sfc: fix null dereference during removal
++* version: 21.11.1-rc1
++* vfio: cleanup the multiprocess sync handle
++* vhost: add missing C++ guards
++* vhost: fix C++ include
++* vhost: fix FD leak with inflight messages
++* vhost: fix field naming in guest page struct
++* vhost: fix guest to host physical address mapping
++* vhost: fix linker script syntax
++* vhost: fix physical address mapping
++* vhost: fix queue number check when setting inflight FD
++* vhost: fix unsafe vring addresses modifications
++
++21.11.1 Validation
++~~~~~~~~~~~~~~~~~~
++
++* `Nvidia(R) Testing <https://mails.dpdk.org/archives/stable/2022-April/037633.html>`_
++
++   * testpmd send and receive multiple types of traffic
++   * testpmd xstats counters
++   * testpmd timestamp
++   * Changing/checking link status through testpmd
++   * RTE flow
++   * Some RSS
++   * VLAN stripping and insertion
++   * checksum and TSO
++   * ptype
++   * ptype tests.
++   * link_status_interrupt example application
++   * l3fwd-power example application
++   * multi-process example applications
++   * Hardware LRO
++   * Regex application
++   * Buffer Split
++   * Tx scheduling
++   * Compilation tests
++
++   * ConnectX-4 Lx
++
++      * Ubuntu 20.04
++
++      * driver MLNX_OFED_LINUX-5.5-1.0.3.2
++      * fw 14.32.1010
++
++   * ConnectX-5
++
++      * Ubuntu 20.04
++
++      * driver MLNX_OFED_LINUX-5.5-1.0.3.2
++      * fw 16.32.2004
++
++   * ConnectX-6 Dx
++
++      * Ubuntu 20.04
++
++      * driver MLNX_OFED_LINUX-5.5-1.0.3.2
++      * fw 22.32.2004
++
++   * BlueField-2
++
++      * DOCA SW version: 1.2.1
++
++
++* `Red Hat(R) Testing <https://mails.dpdk.org/archives/stable/2022-April/037650.html>`_
++
++   * RHEL 8
++   * Kernel 4.18
++   * QEMU 6.2
++   * Functionality
++
++      * PF assignment
++      * VF assignment
++      * vhost single/multi queues and cross-NUMA
++      * vhostclient reconnect
++      * vhost live migration with single/multi queues and cross-NUMA
++      * OVS PVP
++
++   * Tested NICs
++
++      * X540-AT2 NIC(ixgbe, 10G)
++
++
++* `Intel(R) Testing <https://mails.dpdk.org/archives/stable/2022-April/037680.html>`_
 +
-+/* Evaluate the condition expression and, if satisfied, update the _next_
-+ * iterator with the NEXT_EXPR.
-+ * Both EXPR and NEXT_EXPR should only use ITER_VAR(VAR) and
-+ * ITER_NEXT_VAR(VAR).
-+ */
-+#define CONDITION_MULTIVAR_SAFE_SHORT(VAR, MEMBER, EXPR, NEXT_EXPR)           \
-+    ((EXPR) ?                                                                 \
-+     (((VAR) = OBJECT_CONTAINING(ITER_VAR(VAR), VAR, MEMBER)),                \
-+      (NEXT_EXPR), 1) :                                                       \
-+     (((VAR) = NULL), 0))
++   * Compilation tests
 +
-+#define UPDATE_MULTIVAR_SAFE_SHORT(VAR)                                       \
-+    UPDATE_MULTIVAR(VAR, ITER_NEXT_VAR(VAR))
++   * Basic Intel(R) NIC(ixgbe, i40e, ice)
 +
-+/* _LONG versions of the macros. */
++      * PF (i40e, ixgbe, ice)
++      * VF (i40e, ixgbe, ice)
++      * Intel NIC single core/NIC performance
++      * IPsec test scenarios
++      * Power test scenarios
 +
-+#define INIT_MULTIVAR_SAFE_LONG(VAR, NEXT_VAR, MEMBER, POINTER, ITER_TYPE)    \
-+    INIT_MULTIVAR_SAFE_LONG_EXP(VAR, NEXT_VAR, MEMBER, POINTER, ITER_TYPE,    \
-+                                (void) 0)                                     \
++   * Basic cryptodev and virtio
 +
-+#define INIT_MULTIVAR_SAFE_LONG_EXP(VAR, NEXT_VAR, MEMBER, POINTER,           \
-+                                    ITER_TYPE, ...)                           \
-+    ITER_TYPE  *ITER_VAR(VAR) = ( __VA_ARGS__ , (ITER_TYPE *) POINTER),       \
-+        *ITER_VAR(NEXT_VAR) = NULL
++      * vhost/virtio basic loopback, PVP and performance
++      * cryptodev function
++      * cryptodev performance
++      * vhost_crypto unit test and function/performance test
 +
-+/* Evaluate the condition expression and, if satisfied, update the _next_
-+ * iterator with the NEXT_EXPR. After, evaluate the NEXT_COND and, if
-+ * satisfied, set the value to NEXT_VAR. NEXT_COND must use ITER_VAR(NEXT_VAR).
-+ *
-+ * Both EXPR and NEXT_EXPR should only use ITER_VAR(VAR) and
-+ * ITER_VAR(NEXT_VAR).
-+ */
-+#define CONDITION_MULTIVAR_SAFE_LONG(VAR, NEXT_VAR, MEMBER, EXPR, NEXT_EXPR,  \
-+                                     NEXT_COND)                               \
-+    ((EXPR) ?                                                                 \
-+     (((VAR) = OBJECT_CONTAINING(ITER_VAR(VAR), VAR, MEMBER)),                \
-+      (NEXT_EXPR), ((NEXT_COND) ?                                             \
-+       ((NEXT_VAR) =                                                          \
-+        OBJECT_CONTAINING(ITER_VAR(NEXT_VAR), NEXT_VAR, MEMBER)) :            \
-+       ((NEXT_VAR) = NULL)), 1) :                                             \
-+     (((VAR) = NULL), ((NEXT_VAR) = NULL), 0))
++* `Canonical(R) Testing <https://mails.dpdk.org/archives/stable/2022-April/037717.html>`_
 +
-+#define UPDATE_MULTIVAR_SAFE_LONG(VAR, NEXT_VAR)                              \
-+    UPDATE_MULTIVAR(VAR, ITER_VAR(NEXT_VAR))
++   * Build tests of DPDK & OVS 2.13.3 on Ubuntu 20.04 (meson based)
++   * Functional and performance tests based on OVS-DPDK on x86_64
++   * Autopkgtests for DPDK and OpenvSwitch
 +
-+/* Helpers to allow overloading the *_SAFE iterator macros and select either
-+ * the LONG or the SHORT version depending on the number of arguments.
-+ */
-+#define GET_SAFE_MACRO2(_1, _2, NAME, ...) NAME
-+#define GET_SAFE_MACRO3(_1, _2, _3, NAME, ...) NAME
-+#define GET_SAFE_MACRO4(_1, _2, _3, _4, NAME, ...) NAME
-+#define GET_SAFE_MACRO5(_1, _2, _3, _4, _5, NAME, ...) NAME
-+#define GET_SAFE_MACRO6(_1, _2, _3, _4, _5, _6, NAME, ...) NAME
-+#define GET_SAFE_MACRO(MAX_ARGS) GET_SAFE_MACRO ## MAX_ARGS
++21.11.1 Known Issues
++~~~~~~~~~~~~~~~~~~~~
 +
-+/* MSVC treats __VA_ARGS__ as a simple token in argument lists. Introduce
-+ * a level of indirection to work around that. */
-+#define EXPAND_MACRO(name, args) name args
++* DPDK 21.11.1 contains fixes up to DPDK 22.03
++* Issues identified/fixed in DPDK main branch after DPDK 22.03 may be present in DPDK 21.11.1
 +
-+/* Overload the LONG and the SHORT version of the macros. MAX_ARGS is the
-+ * maximum number of arguments (i.e: the number of arguments of the LONG
-+ * version). */
-+#define OVERLOAD_SAFE_MACRO(LONG, SHORT, MAX_ARGS, ...) \
-+        EXPAND_MACRO(GET_SAFE_MACRO(MAX_ARGS), \
-+                     (__VA_ARGS__, LONG, SHORT))(__VA_ARGS__)
++21.11.2 Release Notes
++---------------------
 +
- /* Returns the number of elements in ARRAY. */
- #define ARRAY_SIZE(ARRAY) __ARRAY_SIZE(ARRAY)
++
++21.11.2 Fixes
++~~~~~~~~~~~~~
++
++* acl: fix rules with 8-byte field size
++* app/flow-perf: fix build with GCC 12
++* app/procinfo: show all non-owned ports
++* app/regex: avoid division by zero
++* app/regex: fix mbuf size for multi-segment buffer
++* app/testpmd: add help messages for multi-process
++* app/testpmd: check statistics query before printing
++* app/testpmd: cleanup port resources after implicit close
++* app/testpmd: do not poll stopped queues
++* app/testpmd: fix bonding slave devices not released
++* app/testpmd: fix flex parser destroy command
++* app/testpmd: fix GTP PSC raw processing
++* app/testpmd: fix GTP PSC raw processing
++* app/testpmd: fix help of create meter command
++* app/testpmd: fix metering and policing command for RFC4115
++* app/testpmd: fix MTU verification
++* app/testpmd: fix multicast address pool leak
++* app/testpmd: fix packet segment allocation
++* app/testpmd: fix port status of bonding slave device
++* app/testpmd: fix supported RSS offload display
++* app/testpmd: fix use of indirect action after port close
++* app/testpmd: perform SW IP checksum for GRO/GSO packets
++* app/testpmd: remove useless pointer checks
++* app/testpmd: replace hardcoded min mbuf number with macro
++* app/testpmd: revert MAC update in checksum forwarding
++* avoid AltiVec keyword vector
++* baseband/acc100: add protection for some negative scenario
++* baseband/acc100: update companion PF configure function
++* bus/fslmc: fix VFIO setup
++* common/cnxk: allow changing PTP mode on CN10K
++* common/cnxk: fix decrypt packet count register update
++* common/cnxk: fix GRE tunnel parsing
++* common/cnxk: fix null pointer dereference
++* common/cnxk: fix SQ flush sequence
++* common/cnxk: fix unaligned access to device memory
++* common/cnxk: handle ROC model init failure
++* common/cnxk: swap zuc-256 key
++* common/cpt: fix build with GCC 12
++* common/dpaax: fix short MAC-I IV calculation for ZUC
++* common/mlx5: fix memory region range calculation
++* common/mlx5: fix non-expandable global MR cache
++* common/mlx5: remove unused lcore check
++* common/sfc_efx/base: convert EFX PCIe INTF to MCDI value
++* config: fix C++ cross compiler for Arm and PPC
++* crypto/cnxk: fix build with GCC 12
++* crypto/cnxk: swap zuc-256 iv
++* crypto/dpaa2_sec: fix buffer pool ID check
++* crypto/dpaa2_sec: fix chained FD length in raw datapath
++* crypto/dpaa2_sec: fix crypto operation pointer
++* crypto/dpaa2_sec: fix fle buffer leak
++* crypto/dpaa2_sec: fix operation status for simple FD
++* crypto/dpaa_sec: fix chained FD length in raw datapath
++* crypto/dpaa_sec: fix digest size
++* crypto/dpaa_sec: fix secondary process probing
++* crypto/ipsec_mb: fix build with GCC 12
++* crypto/mlx5: fix login cleanup
++* crypto/qat: fix DOCSIS crash
++* crypto/scheduler: fix queue pair in scheduler failover
++* devargs: fix leak on hotplug failure
++* devtools: fix null test for NUMA systems
++* dma/hisilicon: enhance CQ scan robustness
++* dma/hisilicon: fix includes in header file
++* dma/hisilicon: fix index returned when no DMA completed
++* dma/idxd: fix AVX2 in non-datapath functions
++* dma/idxd: fix error code for PCI device commands
++* dma/idxd: fix memory leak in PCI close
++* dma/idxd: fix non-AVX builds with old compilers
++* dma/idxd: fix null dereference in PCI remove
++* dma/idxd: fix partial freeing in PCI close
++* dma/skeleton: fix index returned when no memcpy completed
++* doc: add missing auth algo for IPsec example
++* doc: add more instructions for running as non-root
++* doc: fix API index Markdown syntax
++* doc: fix build with sphinx 4.5
++* doc: fix flow integrity hardware support in mlx5 guide
++* doc: fix formatting and link in BPF library guide
++* doc: fix grammar and formatting in compressdev guide
++* doc: fix grammar and parameters in l2fwd-crypto guide
++* doc: fix readability in vhost guide
++* doc: fix release note typo
++* doc: fix vhost multi-queue reconnection
++* doc: update matching versions in i40e guide
++* doc: update matching versions in ice guide
++* drivers/crypto: fix warnings for OpenSSL version
++* eal: fix C++ include for device event and DMA
++* eal/freebsd: fix use of newer cpuset macros
++* eal/ppc: fix compilation for musl
++* eal/windows: add missing C++ include guards
++* eal/windows: fix data race when creating threads
++* eal/x86: drop export of internal alignment macro
++* eal/x86: fix unaligned access for small memcpy
++* ethdev: fix build with vtune option
++* ethdev: fix memory leak in xstats telemetry
++* ethdev: fix port close in secondary process
++* ethdev: fix port state when stop
++* ethdev: fix possible null pointer access
++* ethdev: fix RSS update when RSS is disabled
++* ethdev: prohibit polling stopped queue
++* event/cnxk: fix out of bounds access in test
++* event/cnxk: fix QoS parameter handling
++* event/cnxk: fix Tx adapter enqueue return for CN10K
++* eventdev/eth_rx: fix telemetry Rx stats reset
++* eventdev/eth_tx: fix adapter creation
++* eventdev/eth_tx: fix queue delete
++* event/dlb2: fix advertized capabilities
++* event/dlb2: fix check of QID in-flight
++* event/dlb2: rework queue drain handling
++* event/octeontx: fix SSO fast path
++* examples/bond: fix invalid use of trylock
++* examples/distributor: fix distributor on Rx core
++* examples/dma: fix MTU configuration
++* examples/dma: fix Tx drop statistics
++* examples/fips_validation: handle empty payload
++* examples/ipsec-secgw: fix ESN setting
++* examples/ipsec-secgw: fix NAT-T header fields
++* examples/ipsec-secgw: fix promiscuous mode option
++* examples/ipsec-secgw: fix uninitialized memory access
++* examples/l2fwd-crypto: fix stats refresh rate
++* examples/link_status_interrupt: fix stats refresh rate
++* examples/performance-thread: fix build with GCC 12
++* examples/vhost: fix crash when no VMDq
++* examples/vhost: fix retry logic on Rx path
++* gro: fix identifying fragmented packets
++* ipsec: fix NAT-T ports and length
++* kni: fix build
++* kni: fix build with Linux 5.18
++* kni: use dedicated function to set MAC address
++* kni: use dedicated function to set random MAC address
++* malloc: fix allocation of almost hugepage size
++* malloc: fix ASan handling for unmapped memory
++* mbuf: dump outer VLAN
++* mem: skip attaching external memory in secondary process
++* net/af_xdp: make compatible with libbpf >= 0.7.0
++* net/af_xdp: use libxdp if available
++* net/axgbe: fix xstats get return if xstats is null
++* net/bnxt: allow Tx only or Rx only
++* net/bnxt: avoid unnecessary endianness conversion
++* net/bnxt: check duplicate queue IDs
++* net/bnxt: cleanup MTU setting
++* net/bnxt: disallow MTU change when device is started
++* net/bnxt: fix check for autoneg enablement in the PHY FW
++* net/bnxt: fix compatibility with some old firmwares
++* net/bnxt: fix device capability reporting
++* net/bnxt: fix freeing VNIC filters
++* net/bnxt: fix link status when port is stopped
++* net/bnxt: fix reordering in NEON Rx
++* net/bnxt: fix ring group on Rx restart
++* net/bnxt: fix RSS action
++* net/bnxt: fix Rx configuration
++* net/bnxt: fix setting forced speed
++* net/bnxt: fix speed autonegotiation
++* net/bnxt: fix switch domain allocation
++* net/bnxt: fix tunnel stateless offloads
++* net/bnxt: fix ULP parser to ignore segment offset
++* net/bnxt: force PHY update on certain configurations
++* net/bnxt: handle queue stop during RSS flow create
++* net/bnxt: recheck FW readiness if in reset process
++* net/bnxt: remove unused macro
++* net/bonding: fix mbuf fast free usage
++* net/bonding: fix RSS inconsistency between ports
++* net/bonding: fix RSS key config with extended key length
++* net/bonding: fix slave stop and remove on port close
++* net/bonding: fix stopping non-active slaves
++* net/cnxk: add barrier after meta batch free in scalar
++* net/cnxk: add message on flow parsing failure
++* net/cnxk: fix possible null dereference in telemetry
++* net/cnxk: fix uninitialized variables
++* net/cxgbe: fix port ID in Rx mbuf
++* net/cxgbe: fix Tx queue stuck with mbuf chain coalescing
++* net/dpaa2: fix dpdmux default interface
++* net/dpaa: fix event queue detach
++* net/ena: fix build with GCC 12
++* net/enetfec: fix build with GCC 12
++* net/failsafe: fix device freeing
++* net: fix GTP PSC headers
++* net/hns3: delete unused code
++* net/hns3: fix an unreasonable memset
++* net/hns3: fix code check warning
++* net/hns3: fix crash from secondary process
++* net/hns3: fix descriptors check with SVE
++* net/hns3: fix link status capability query from VF
++* net/hns3: fix MAC and queues HW statistics overflow
++* net/hns3: fix mbuf free on Tx done cleanup
++* net/hns3: fix order of clearing imissed register in PF
++* net/hns3: fix pseudo-sharing between threads
++* net/hns3: fix PTP interrupt logging
++* net/hns3: fix return value for unsupported tuple
++* net/hns3: fix rollback on RSS hash update
++* net/hns3: fix RSS disable
++* net/hns3: fix statistics locking
++* net/hns3: fix TM capability
++* net/hns3: fix xstats get return if xstats is null
++* net/hns3: remove duplicate definition
++* net/hns3: remove redundant RSS tuple field
++* net/hns3: remove unnecessary RSS switch
++* net/hns3: support backplane media type
++* net/i40e: fix max frame size config at port level
++* net/i40e: populate error in flow director parser
++* net/iavf: fix data path selection
++* net/iavf: fix device initialization without inline crypto
++* net/iavf: fix device stop
++* net/iavf: fix GTP-U extension flow
++* net/iavf: fix mbuf release in multi-process
++* net/iavf: fix NAT-T payload length
++* net/iavf: fix queue start exception handling
++* net/iavf: fix Rx queue interrupt setting
++* net/iavf: fix segfaults when calling API after VF reset failed
++* net/iavf: fix VF reset
++* net/iavf: increase reset complete wait count
++* net/iavf: remove dead code
++* net/ice: add missing Tx burst mode name
++* net/ice/base: fix build with GCC 12
++* net/ice/base: fix direction of flow that matches any
++* net/ice/base: fix getting sched node from ID type
++* net/ice: fix build with GCC 12
++* net/ice: fix MTU info for DCF
++* net/ice: fix race condition in Rx timestamp
++* net/ice: fix raw flow input pattern parsing
++* net/ice: improve performance of Rx timestamp offload
++* net/ice: refactor parser usage
++* net/igc: support multi-process
++* net/ipn3ke: fix xstats get return if xstats is null
++* net/ixgbe: add option for link up check on pin SDP3
++* net/memif: fix overwriting of head segment
++* net/mlx5: add limitation for E-Switch Manager match
++* net/mlx5: fix build with clang 14
++* net/mlx5: fix counter in non-termination meter
++* net/mlx5: fix GTP handling in header modify action
++* net/mlx5: fix LRO configuration in drop Rx queue
++* net/mlx5: fix LRO validation in Rx setup
++* net/mlx5: fix metering on E-Switch Manager
++* net/mlx5: fix no-green metering with RSS
++* net/mlx5: fix probing with secondary bonding member
++* net/mlx5: fix RSS expansion for patterns with ICMP item
++* net/mlx5: fix RSS hash types adjustment
++* net/mlx5: fix Rx queue recovery mechanism
++* net/mlx5: fix Rx/Tx stats concurrency
++* net/mlx5: fix stack buffer overflow in drop action
++* net/mlx5: fix statistics read on Linux
++* net/mlx5: fix Tx recovery
++* net/mlx5: fix Tx when inlining is impossible
++* net/mlx5: reject negative integrity item configuration
++* net/mlx5: restrict Rx queue array access to boundary
++* net/mvpp2: fix xstats get return if xstats is null
++* net/netvsc: fix calculation of checksums based on mbuf flag
++* net/netvsc: fix hot adding multiple VF PCI devices
++* net/netvsc: fix vmbus device reference in multi-process
++* net/nfp: fix disabling VLAN stripping
++* net/nfp: fix initialization
++* net/nfp: make sure MTU is never larger than mbuf size
++* net/nfp: remove unneeded header inclusion
++* net/nfp: update how max MTU is read
++* net/ngbe: add more packet statistics
++* net/ngbe: fix link speed check
++* net/ngbe: fix PCIe related operations with bus API
++* net/ngbe: fix reading PHY ID
++* net/octeontx: fix port close
++* net/qede: fix build with GCC 12
++* net/qede: fix build with GCC 13
++* net/tap: fix device freeing
++* net/tap: fix interrupt handler freeing
++* net/txgbe: fix max number of queues for SR-IOV
++* net/txgbe: fix register polling
++* net/txgbe: fix SGMII mode to link up
++* net/vhost: fix access to freed memory
++* net/vhost: fix deadlock on vring state change
++* net/vhost: fix null pointer dereference
++* net/vhost: fix TSO feature default disablement
++* net/virtio: restore some optimisations with AVX512
++* net/virtio: unmap PCI device in secondary process
++* net/virtio-user: fix Rx interrupts with multi-queue
++* net/virtio-user: fix socket non-blocking mode
++* net/vmxnet3: fix Rx data ring initialization
++* pcapng: fix timestamp wrapping in output files
++* pipeline: fix emit instruction for invalid headers
++* raw/ifpga: remove virtual devices on close
++* raw/ifpga: unregister interrupt on close
++* raw/ioat: fix build missing errno include
++* raw/ioat: fix build when ioat dmadev enabled
++* rib: fix references for IPv6 implementation
++* rib: fix traversal with /32 route
++* sched: remove unnecessary floating point
++* security: fix SA lifetime comments
++* service: fix lingering active status
++* test: avoid hang if queues are full and Tx fails
++* test/bonding: fix RSS test when disable RSS
++* test/bpf: skip test if libpcap is unavailable
++* test: check memory allocation for CRC
++* test/crypto: fix authentication IV for ZUC SGL
++* test/crypto: fix cipher offset for ZUC
++* test/crypto: fix driver name for DPAA raw API test
++* test/crypto: fix null check for ZUC authentication
++* test/crypto: fix SNOW3G vector IV format
++* test/crypto: fix ZUC vector IV format
++* test/crypto: skip oop test for raw api
++* test: drop reference to removed tests
++* test/hash: fix out of bound access
++* test/ipsec: fix build with GCC 12
++* test/ipsec: fix performance test
++* test/mem: disable ASan when accessing unallocated memory
++* test/table: fix buffer overflow on lpm entry
++* trace: fix crash when exiting
++* trace: fix init with long file prefix
++* vdpa/ifc/base: fix null pointer dereference
++* vdpa/ifc: fix build with GCC 12
++* vdpa/mlx5: fix dead loop when process interrupted
++* vdpa/mlx5: fix interrupt trash that leads to crash
++* vdpa/mlx5: fix leak on event thread creation
++* vdpa/mlx5: fix maximum number of virtqs
++* vdpa/mlx5: workaround var offset within page
++* vdpa/sfc: fix sync between QEMU and vhost-user
++* vdpa/sfc: resolve race between vhost lib and device conf
++* version: 21.11.2-rc1
++* vhost: add some trailing newline in log messages
++* vhost/crypto: fix build with GCC 12
++* vhost/crypto: fix descriptor processing
++* vhost: discard too small descriptor chains
++* vhost: fix async access
++* vhost: fix deadlock when message handling failed
++* vhost: fix header spanned across more than two descriptors
++* vhost: fix missing enqueue pseudo-header calculation
++* vhost: fix missing virtqueue lock protection
++* vhost: restore device information in log messages
++
++21.11.2 Validation
++~~~~~~~~~~~~~~~~~~
++
++* `Red Hat(R) Testing <https://mails.dpdk.org/archives/stable/2022-August/039801.html>`__
++
++   * Platform
++
++      * RHEL 8
++      * Kernel 4.18
++      * Qemu 6.2
++      * X540-AT2 NIC(ixgbe, 10G)
++
++   * Functionality
++
++      * Guest with device assignment(PF) throughput testing(1G hugepage size)
++      * Guest with device assignment(PF) throughput testing(2M hugepage size)
++      * Guest with device assignment(VF) throughput testing
++      * PVP (host dpdk testpmd as vswitch) 1Q: throughput testing
++      * PVP vhost-user 2Q throughput testing
++      * PVP vhost-user 1Q cross numa node  throughput testing
++      * Guest with vhost-user 2 queues throughput testing
++      * vhost-user reconnect with dpdk-client, qemu-server: qemu reconnect
++      * vhost-user reconnect with dpdk-client, qemu-server: ovs reconnect
++      * PVP 1Q live migration testing
++      * PVP 1Q cross numa node live migration testing
++      * Guest with ovs+dpdk+vhost-user 1Q live migration testing
++      * Guest with ovs+dpdk+vhost-user 1Q live migration testing (2M)
++      * Guest with ovs+dpdk+vhost-user 2Q live migration testing
++      * Guest with ovs+dpdk+vhost-user 4Q live migration testing
++      * Host PF + DPDK testing
++      * Host VF + DPDK testing
++
++
++* `Intel(R) Testing <https://mails.dpdk.org/archives/stable/2022-August/040006.html>`__
++
++   * Basic Intel(R) NIC(ixgbe, i40e and ice) testing
++
++      * PF (i40e)
++      * PF (ixgbe)
++      * PF (ice)
++      * VF (i40e)
++      * VF (ixgbe)
++      * VF (ice)
++      * Compile Testing
++      * Intel NIC single core/NIC performance
++      * Power and IPsec
++
++   * Basic cryptodev and virtio testing
++
++      * vhost/virtio basic loopback, PVP and performance test
++      * cryptodev Function/Performance
++
++
++* `Nvidia(R) Testing <https://mails.dpdk.org/archives/stable/2022-August/039931.html>`__
++
++   * Basic functionality with testpmd
++
++      * Tx/Rx
++      * xstats
++      * Timestamps
++      * Link status
++      * RTE flow and flow_director
++      * RSS
++      * VLAN filtering, stripping and insertion
++      * Checksum/TSO
++      * ptype
++      * link_status_interrupt example application
++      * l3fwd-power example application
++      * Multi-process example applications
++      * Hardware LRO tests
++      * Regex application
++      * Buffer Split
++      * Tx scheduling
++
++   * Build tests
++
++      * Ubuntu 20.04.4 with MLNX_OFED_LINUX-5.7-1.0.2.0.
++      * Ubuntu 20.04.4 with rdma-core master (23a0021).
++      * Ubuntu 20.04.4 with rdma-core v28.0.
++      * Ubuntu 18.04.6 with rdma-core v17.1.
++      * Ubuntu 18.04.6 with rdma-core master (23a0021) (i386).
++      * Ubuntu 16.04.7 with rdma-core v22.7.
++      * Fedora 35 with rdma-core v39.0.
++      * Fedora 37 (Rawhide) with rdma-core v39.0 (with clang only).
++      * CentOS 7 7.9.2009 with rdma-core master (23a0021).
++      * CentOS 7 7.9.2009 with MLNX_OFED_LINUX-5.7-1.0.2.0.
++      * CentOS 8 8.4.2105 with rdma-core master (23a0021).
++      * OpenSUSE Leap 15.4 with rdma-core v38.1.
++      * Windows Server 2019 with Clang 11.0.0.
++
++   * ConnectX-6 Dx
++
++      * Ubuntu 20.04
++      * Driver MLNX_OFED_LINUX-5.7-1.0.2.0
++      * fw 22.34.1002
++
++   * ConnectX-5
++
++      * Ubuntu 20.04
++      * Driver MLNX_OFED_LINUX-5.7-1.0.2.0
++      * fw 16.34.1002
++
++   * ConnectX-4 Lx
++
++      * Ubuntu 20.04
++      * Driver MLNX_OFED_LINUX-5.7-1.0.2.0
++      * fw 14.32.1010
++
++   * BlueField-2
++
++      * DOCA SW version: 1.4.0
++
++
++* `Intel(R) Testing with Open vSwitch <https://mails.dpdk.org/archives/stable/2022-August/040028.html>`__
++
++   * 21.11.2 validated by Intel for i40e, ICE, vhost and MTU for OVS with DPDK
++
++21.11.2 Known Issues
++~~~~~~~~~~~~~~~~~~~~
++
++* DPDK 21.11.2 contains fixes up to DPDK 22.07 as well as fixes for CVE-2022-28199 and CVE-2022-2132
++* Issues identified/fixed in DPDK main branch after DPDK 22.07 may be present in DPDK 21.11.2
+diff --git a/dpdk/doc/guides/rel_notes/release_2_1.rst b/dpdk/doc/guides/rel_notes/release_2_1.rst
+index 35e6c88884..d0ad99ebce 100644
+--- a/dpdk/doc/guides/rel_notes/release_2_1.rst
++++ b/dpdk/doc/guides/rel_notes/release_2_1.rst
+@@ -671,7 +671,7 @@ Resolved Issues
+   value 0.
+ 
+ 
+-  Fixes: 40b966a211ab ("ivshmem: library changes for mmaping using ivshmem")
++  Fixes: 40b966a211ab ("ivshmem: library changes for mmapping using ivshmem")
+ 
+ 
+ * **ixgbe/base: Fix SFP probing.**
+diff --git a/dpdk/doc/guides/sample_app_ug/fips_validation.rst b/dpdk/doc/guides/sample_app_ug/fips_validation.rst
+index 56df434215..39baea3346 100644
+--- a/dpdk/doc/guides/sample_app_ug/fips_validation.rst
++++ b/dpdk/doc/guides/sample_app_ug/fips_validation.rst
+@@ -77,11 +77,12 @@ Compiling the Application
+     .. code-block:: console
+ 
+          dos2unix AES/req/*
+-         dos2unix AES_GCM/req/*
++         dos2unix GCM/req/*
+          dos2unix CCM/req/*
+          dos2unix CMAC/req/*
+          dos2unix HMAC/req/*
+          dos2unix TDES/req/*
++         dos2unix SHA/req/*
+ 
+ Running the Application
+ -----------------------
+diff --git a/dpdk/doc/guides/sample_app_ug/ip_reassembly.rst b/dpdk/doc/guides/sample_app_ug/ip_reassembly.rst
+index 06289c2248..5280bf4ea0 100644
+--- a/dpdk/doc/guides/sample_app_ug/ip_reassembly.rst
++++ b/dpdk/doc/guides/sample_app_ug/ip_reassembly.rst
+@@ -154,8 +154,8 @@ each RX queue uses its own mempool.
+ 
+ .. literalinclude:: ../../../examples/ip_reassembly/main.c
+     :language: c
+-    :start-after: mbufs stored int the gragment table. 8<
+-    :end-before: >8 End of mbufs stored int the fragmentation table.
++    :start-after: mbufs stored in the fragment table. 8<
++    :end-before: >8 End of mbufs stored in the fragmentation table.
+     :dedent: 1
+ 
+ Packet Reassembly and Forwarding
+diff --git a/dpdk/doc/guides/sample_app_ug/ipsec_secgw.rst b/dpdk/doc/guides/sample_app_ug/ipsec_secgw.rst
+index c53ee7c386..468a977478 100644
+--- a/dpdk/doc/guides/sample_app_ug/ipsec_secgw.rst
++++ b/dpdk/doc/guides/sample_app_ug/ipsec_secgw.rst
+@@ -116,7 +116,8 @@ Constraints
+ *  No IPv6 options headers.
+ *  No AH mode.
+ *  Supported algorithms: AES-CBC, AES-CTR, AES-GCM, 3DES-CBC, HMAC-SHA1,
+-   AES-GMAC, AES_CTR, AES_XCBC_MAC, AES_CCM, CHACHA20_POLY1305 and NULL.
++   HMAC-SHA256, AES-GMAC, AES_CTR, AES_XCBC_MAC, AES_CCM, CHACHA20_POLY1305
++   and NULL.
+ *  Each SA must be handle by a unique lcore (*1 RX queue per port*).
  
-@@ -285,6 +429,9 @@ is_pow2(uintmax_t x)
+ Compiling the Application
+@@ -586,6 +587,7 @@ where each options means:
+ 
+     * *null*: NULL algorithm
+     * *sha1-hmac*: HMAC SHA1 algorithm
++    * *sha256-hmac*: HMAC SHA256 algorithm
+ 
+ ``<auth_key>``
+ 
+diff --git a/dpdk/doc/guides/sample_app_ug/l2_forward_cat.rst b/dpdk/doc/guides/sample_app_ug/l2_forward_cat.rst
+index 440642ef7c..3ada3575ba 100644
+--- a/dpdk/doc/guides/sample_app_ug/l2_forward_cat.rst
++++ b/dpdk/doc/guides/sample_app_ug/l2_forward_cat.rst
+@@ -176,7 +176,7 @@ function. The value returned is the number of parsed arguments:
+ .. literalinclude:: ../../../examples/l2fwd-cat/l2fwd-cat.c
+     :language: c
+     :start-after: Initialize the Environment Abstraction Layer (EAL). 8<
+-    :end-before: >8 End of initializion the Environment Abstraction Layer (EAL).
++    :end-before: >8 End of initialization the Environment Abstraction Layer (EAL).
+     :dedent: 1
+ 
+ The next task is to initialize the PQoS library and configure CAT. The
+diff --git a/dpdk/doc/guides/sample_app_ug/l2_forward_crypto.rst b/dpdk/doc/guides/sample_app_ug/l2_forward_crypto.rst
+index 1b4444b7d8..ce49eab96f 100644
+--- a/dpdk/doc/guides/sample_app_ug/l2_forward_crypto.rst
++++ b/dpdk/doc/guides/sample_app_ug/l2_forward_crypto.rst
+@@ -15,7 +15,7 @@ Overview
+ The L2 Forwarding with Crypto sample application performs a crypto operation (cipher/hash)
+ specified by the user from command line (or using the default values),
+ with a crypto device capable of doing that operation,
+-for each packet that is received on a RX_PORT and performs L2 forwarding.
++for each packet that is received on an RX_PORT and performs L2 forwarding.
+ The destination port is the adjacent port from the enabled portmask, that is,
+ if the first four ports are enabled (portmask 0xf),
+ ports 0 and 1 forward into each other, and ports 2 and 3 forward into each other.
+@@ -54,37 +54,37 @@ The application requires a number of command line options:
+ 
+ where,
+ 
+-*   p PORTMASK: A hexadecimal bitmask of the ports to configure (default is all the ports)
++*   p PORTMASK: A hexadecimal bitmask of the ports to configure. (Default is all the ports.)
+ 
+-*   q NQ: A number of queues (=ports) per lcore (default is 1)
++*   q NQ: A number of queues (=ports) per lcore. (Default is 1.)
+ 
+-*   s: manage all ports from single core
++*   s: manage all ports from a single core.
+ 
+-*   T PERIOD: statistics will be refreshed each PERIOD seconds
++*   T PERIOD: statistics will be refreshed each PERIOD seconds.
+ 
+-    (0 to disable, 10 default, 86400 maximum)
++    (0 to disable, 10 default, 86400 maximum.)
+ 
+-*   cdev_type: select preferred crypto device type: HW, SW or anything (ANY)
++*   cdev_type: select preferred crypto device type: HW, SW or anything (ANY).
+ 
+-    (default is ANY)
++    (Default is ANY.)
+ 
+ *   chain: select the operation chaining to perform: Cipher->Hash (CIPHER_HASH),
+ 
+     Hash->Cipher (HASH_CIPHER), Cipher (CIPHER_ONLY), Hash (HASH_ONLY)
+ 
+-    or AEAD (AEAD)
++    or AEAD (AEAD).
+ 
+-    (default is Cipher->Hash)
++    (Default is Cipher->Hash.)
+ 
+-*   cipher_algo: select the ciphering algorithm (default is aes-cbc)
++*   cipher_algo: select the ciphering algorithm. (Default is aes-cbc.)
+ 
+-*   cipher_op: select the ciphering operation to perform: ENCRYPT or DECRYPT
++*   cipher_op: select the ciphering operation to perform: ENCRYPT or DECRYPT.
+ 
+-    (default is ENCRYPT)
++    (Default is ENCRYPT.)
+ 
+ *   cipher_dataunit_len: set the length of the cipher data-unit.
+ 
+-*   cipher_key: set the ciphering key to be used. Bytes has to be separated with ":"
++*   cipher_key: set the ciphering key to be used. Bytes have to be separated with ":".
+ 
+ *   cipher_key_random_size: set the size of the ciphering key,
+ 
+@@ -92,19 +92,19 @@ where,
+ 
+     Note that if --cipher_key is used, this will be ignored.
+ 
+-*   cipher_iv: set the cipher IV to be used. Bytes has to be separated with ":"
++*   cipher_iv: set the cipher IV to be used. Bytes have to be separated with ":".
+ 
+ *   cipher_iv_random_size: set the size of the cipher IV, which will be generated randomly.
+ 
+     Note that if --cipher_iv is used, this will be ignored.
+ 
+-*   auth_algo: select the authentication algorithm (default is sha1-hmac)
++*   auth_algo: select the authentication algorithm. (Default is sha1-hmac.)
+ 
+-*   auth_op: select the authentication operation to perform: GENERATE or VERIFY
++*   auth_op: select the authentication operation to perform: GENERATE or VERIFY.
+ 
+-    (default is GENERATE)
++    (Default is GENERATE.)
+ 
+-*   auth_key: set the authentication key to be used. Bytes has to be separated with ":"
++*   auth_key: set the authentication key to be used. Bytes have to be separated with ":".
+ 
+ *   auth_key_random_size: set the size of the authentication key,
+ 
+@@ -112,19 +112,19 @@ where,
+ 
+     Note that if --auth_key is used, this will be ignored.
+ 
+-*   auth_iv: set the auth IV to be used. Bytes has to be separated with ":"
++*   auth_iv: set the auth IV to be used. Bytes have to be separated with ":".
+ 
+ *   auth_iv_random_size: set the size of the auth IV, which will be generated randomly.
+ 
+     Note that if --auth_iv is used, this will be ignored.
+ 
+-*   aead_algo: select the AEAD algorithm (default is aes-gcm)
++*   aead_algo: select the AEAD algorithm. (Default is aes-gcm.)
+ 
+-*   aead_op: select the AEAD operation to perform: ENCRYPT or DECRYPT
++*   aead_op: select the AEAD operation to perform: ENCRYPT or DECRYPT.
+ 
+-    (default is ENCRYPT)
++    (Default is ENCRYPT.)
+ 
+-*   aead_key: set the AEAD key to be used. Bytes has to be separated with ":"
++*   aead_key: set the AEAD key to be used. Bytes have to be separated with ":".
+ 
+ *   aead_key_random_size: set the size of the AEAD key,
+ 
+@@ -132,13 +132,13 @@ where,
+ 
+     Note that if --aead_key is used, this will be ignored.
+ 
+-*   aead_iv: set the AEAD IV to be used. Bytes has to be separated with ":"
++*   aead_iv: set the AEAD IV to be used. Bytes have to be separated with ":".
+ 
+ *   aead_iv_random_size: set the size of the AEAD IV, which will be generated randomly.
+ 
+     Note that if --aead_iv is used, this will be ignored.
+ 
+-*   aad: set the AAD to be used. Bytes has to be separated with ":"
++*   aad: set the AAD to be used. Bytes have to be separated with ":".
+ 
+ *   aad_random_size: set the size of the AAD, which will be generated randomly.
+ 
+@@ -151,9 +151,9 @@ where,
+ *   cryptodev_mask: A hexadecimal bitmask of the cryptodevs to be used by the
+     application.
+ 
+-    (default is all cryptodevs).
++    (Default is all cryptodevs.)
+ 
+-*   [no-]mac-updating: Enable or disable MAC addresses updating (enabled by default).
++*   [no-]mac-updating: Enable or disable MAC addresses updating. (Enabled by default.)
+ 
+ 
+ The application requires that crypto devices capable of performing
+@@ -165,7 +165,7 @@ To run the application in linux environment with 2 lcores, 2 ports and 2 crypto
+ 
+ .. code-block:: console
+ 
+-    $ ./<build_dir>/examples/dpdk-l2fwd-crypto -l 0-1 -n 4 --vdev "crypto_aesni_mb0" \
++    $ ./<build_dir>/examples/dpdk-l2fwd-crypto -l 0-1 --vdev "crypto_aesni_mb0" \
+     --vdev "crypto_aesni_mb1" -- -p 0x3 --chain CIPHER_HASH \
+     --cipher_op ENCRYPT --cipher_algo aes-cbc \
+     --cipher_key 00:01:02:03:04:05:06:07:08:09:0a:0b:0c:0d:0e:0f \
+@@ -179,7 +179,7 @@ and the Environment Abstraction Layer (EAL) options.
+ 
+     * The ``l2fwd-crypto`` sample application requires IPv4 packets for crypto operation.
+ 
+-    * If multiple Ethernet ports is passed, then equal number of crypto devices are to be passed.
++    * If multiple Ethernet ports are passed, then equal number of crypto devices are to be passed.
+ 
+     * All crypto devices shall use the same session.
+ 
+@@ -187,7 +187,7 @@ Explanation
+ -----------
+ 
+ The L2 forward with Crypto application demonstrates the performance of a crypto operation
+-on a packet received on a RX PORT before forwarding it to a TX PORT.
++on a packet received on an RX PORT before forwarding it to a TX PORT.
+ 
+ The following figure illustrates a sample flow of a packet in the application,
+ from reception until transmission.
+@@ -196,7 +196,7 @@ from reception until transmission.
+ 
+ .. figure:: img/l2_fwd_encrypt_flow.*
+ 
+-   Encryption flow Through the L2 Forwarding with Crypto Application
++   Encryption flow through the L2 Forwarding with Crypto Application
+ 
+ 
+ The following sections provide some explanation of the application.
+@@ -206,8 +206,8 @@ Crypto operation specification
+ 
+ All the packets received in all the ports get transformed by the crypto device/s
+ (ciphering and/or authentication).
+-The crypto operation to be performed on the packet is parsed from the command line
+-(go to "Running the Application" section for all the options).
++The crypto operation to be performed on the packet is parsed from the command line.
++(Go to "Running the Application" section for all the options.)
+ 
+ If no parameter is passed, the default crypto operation is:
+ 
+@@ -244,7 +244,7 @@ when running the application.
+ 
+ The initialize_cryptodevs() function performs the device initialization.
+ It iterates through the list of the available crypto devices and
+-check which ones are capable of performing the operation.
++checks which ones are capable of performing the operation.
+ Each device has a set of capabilities associated with it,
+ which are stored in the device info structure, so the function checks if the operation
+ is within the structure of each device.
+@@ -291,7 +291,7 @@ This session is created and is later attached to the crypto operation:
+ Crypto operation creation
+ ~~~~~~~~~~~~~~~~~~~~~~~~~
+ 
+-Given N packets received from a RX PORT, N crypto operations are allocated
++Given N packets received from an RX PORT, N crypto operations are allocated
+ and filled:
+ 
+ .. literalinclude:: ../../../examples/l2fwd-crypto/main.c
+diff --git a/dpdk/doc/guides/sample_app_ug/server_node_efd.rst b/dpdk/doc/guides/sample_app_ug/server_node_efd.rst
+index 605eb09a61..c6cbc3def6 100644
+--- a/dpdk/doc/guides/sample_app_ug/server_node_efd.rst
++++ b/dpdk/doc/guides/sample_app_ug/server_node_efd.rst
+@@ -191,7 +191,7 @@ flow is not handled by the node.
+ .. literalinclude:: ../../../examples/server_node_efd/node/node.c
+     :language: c
+     :start-after: Packets dequeued from the shared ring. 8<
+-    :end-before: >8 End of packets dequeueing.
++    :end-before: >8 End of packets dequeuing.
+ 
+ Finally, note that both processes updates statistics, such as transmitted, received
+ and dropped packets, which are shown and refreshed by the server app.
+diff --git a/dpdk/doc/guides/sample_app_ug/skeleton.rst b/dpdk/doc/guides/sample_app_ug/skeleton.rst
+index 6d0de64401..08ddd7aa59 100644
+--- a/dpdk/doc/guides/sample_app_ug/skeleton.rst
++++ b/dpdk/doc/guides/sample_app_ug/skeleton.rst
+@@ -54,7 +54,7 @@ function. The value returned is the number of parsed arguments:
+ .. literalinclude:: ../../../examples/skeleton/basicfwd.c
+     :language: c
+     :start-after: Initializion the Environment Abstraction Layer (EAL). 8<
+-    :end-before: >8 End of initializion the Environment Abstraction Layer (EAL).
++    :end-before: >8 End of initialization the Environment Abstraction Layer (EAL).
+     :dedent: 1
+ 
+ 
+diff --git a/dpdk/doc/guides/sample_app_ug/vm_power_management.rst b/dpdk/doc/guides/sample_app_ug/vm_power_management.rst
+index 7160b6a63a..9ce87956c9 100644
+--- a/dpdk/doc/guides/sample_app_ug/vm_power_management.rst
++++ b/dpdk/doc/guides/sample_app_ug/vm_power_management.rst
+@@ -681,7 +681,7 @@ The following is an example JSON string for a power management request.
+    "resource_id": 10
+    }}
+ 
+-To query the available frequences of an lcore, use the query_cpu_freq command.
++To query the available frequencies of an lcore, use the query_cpu_freq command.
+ Where {core_num} is the lcore to query.
+ Before using this command, please enable responses via the set_query command on the host.
+ 
+diff --git a/dpdk/doc/guides/testpmd_app_ug/testpmd_funcs.rst b/dpdk/doc/guides/testpmd_app_ug/testpmd_funcs.rst
+index 44228cd7d2..94792d88cc 100644
+--- a/dpdk/doc/guides/testpmd_app_ug/testpmd_funcs.rst
++++ b/dpdk/doc/guides/testpmd_app_ug/testpmd_funcs.rst
+@@ -3510,7 +3510,7 @@ Tunnel offload
+ Indicate tunnel offload rule type
+ 
+ - ``tunnel_set {tunnel_id}``: mark rule as tunnel offload decap_set type.
+-- ``tunnel_match {tunnel_id}``:  mark rule as tunel offload match type.
++- ``tunnel_match {tunnel_id}``:  mark rule as tunnel offload match type.
+ 
+ Matching pattern
+ ^^^^^^^^^^^^^^^^
+diff --git a/dpdk/drivers/baseband/acc100/acc100_pf_enum.h b/dpdk/drivers/baseband/acc100/acc100_pf_enum.h
+index a1ee416d26..2fba667627 100644
+--- a/dpdk/drivers/baseband/acc100/acc100_pf_enum.h
++++ b/dpdk/drivers/baseband/acc100/acc100_pf_enum.h
+@@ -238,6 +238,24 @@ enum {
+ 	HWPfPermonBTotalLatLowBusMon          =  0x00BAC504,
+ 	HWPfPermonBTotalLatUpperBusMon        =  0x00BAC508,
+ 	HWPfPermonBTotalReqCntBusMon          =  0x00BAC50C,
++	HwPfFabI2MArbCntrlReg                 =  0x00BB0000,
++	HWPfFabricMode                        =  0x00BB1000,
++	HwPfFabI2MGrp0DebugReg                =  0x00BBF000,
++	HwPfFabI2MGrp1DebugReg                =  0x00BBF004,
++	HwPfFabI2MGrp2DebugReg                =  0x00BBF008,
++	HwPfFabI2MGrp3DebugReg                =  0x00BBF00C,
++	HwPfFabI2MBuf0DebugReg                =  0x00BBF010,
++	HwPfFabI2MBuf1DebugReg                =  0x00BBF014,
++	HwPfFabI2MBuf2DebugReg                =  0x00BBF018,
++	HwPfFabI2MBuf3DebugReg                =  0x00BBF01C,
++	HwPfFabM2IBuf0Grp0DebugReg            =  0x00BBF020,
++	HwPfFabM2IBuf1Grp0DebugReg            =  0x00BBF024,
++	HwPfFabM2IBuf0Grp1DebugReg            =  0x00BBF028,
++	HwPfFabM2IBuf1Grp1DebugReg            =  0x00BBF02C,
++	HwPfFabM2IBuf0Grp2DebugReg            =  0x00BBF030,
++	HwPfFabM2IBuf1Grp2DebugReg            =  0x00BBF034,
++	HwPfFabM2IBuf0Grp3DebugReg            =  0x00BBF038,
++	HwPfFabM2IBuf1Grp3DebugReg            =  0x00BBF03C,
+ 	HWPfFecUl5gCntrlReg                   =  0x00BC0000,
+ 	HWPfFecUl5gI2MThreshReg               =  0x00BC0004,
+ 	HWPfFecUl5gVersionReg                 =  0x00BC0100,
+diff --git a/dpdk/drivers/baseband/acc100/rte_acc100_pmd.c b/dpdk/drivers/baseband/acc100/rte_acc100_pmd.c
+index 1c6080f2f8..6cdc6e65f7 100644
+--- a/dpdk/drivers/baseband/acc100/rte_acc100_pmd.c
++++ b/dpdk/drivers/baseband/acc100/rte_acc100_pmd.c
+@@ -141,8 +141,8 @@ aqDepth(int qg_idx, struct rte_acc100_conf *acc100_conf)
+ 	int acc_enum = accFromQgid(qg_idx, acc100_conf);
+ 	qtopFromAcc(&q_top, acc_enum, acc100_conf);
+ 	if (unlikely(q_top == NULL))
+-		return 0;
+-	return q_top->aq_depth_log2;
++		return 1;
++	return RTE_MAX(1, q_top->aq_depth_log2);
+ }
+ 
+ /* Return the AQ depth for a Queue Group Index */
+@@ -1236,6 +1236,8 @@ get_k0(uint16_t n_cb, uint16_t z_c, uint8_t bg, uint8_t rv_index)
+ 			return (bg == 1 ? ACC100_K0_3_1 : ACC100_K0_3_2) * z_c;
+ 	}
+ 	/* LBRM case - includes a division by N */
++	if (unlikely(z_c == 0))
++		return 0;
+ 	if (rv_index == 1)
+ 		return (((bg == 1 ? ACC100_K0_1_1 : ACC100_K0_1_2) * n_cb)
+ 				/ n) * z_c;
+@@ -1460,8 +1462,7 @@ acc100_dma_fill_blk_type_in(struct acc100_dma_req_desc *desc,
+ 	next_triplet++;
+ 
+ 	while (cb_len > 0) {
+-		if (next_triplet < ACC100_DMA_MAX_NUM_POINTERS &&
+-				m->next != NULL) {
++		if (next_triplet < ACC100_DMA_MAX_NUM_POINTERS_IN && m->next != NULL) {
+ 
+ 			m = m->next;
+ 			*seg_total_left = rte_pktmbuf_data_len(m);
+@@ -1765,6 +1766,10 @@ acc100_dma_desc_td_fill(struct rte_bbdev_dec_op *op,
+ 
+ 	/* Soft output */
+ 	if (check_bit(op->turbo_dec.op_flags, RTE_BBDEV_TURBO_SOFT_OUTPUT)) {
++		if (op->turbo_dec.soft_output.data == 0) {
++			rte_bbdev_log(ERR, "Soft output is not defined");
++			return -1;
++		}
+ 		if (check_bit(op->turbo_dec.op_flags,
+ 				RTE_BBDEV_TURBO_EQUALIZER))
+ 			*s_out_length = e;
+@@ -4413,7 +4418,7 @@ rte_acc100_configure(const char *dev_name, struct rte_acc100_conf *conf)
+ {
+ 	rte_bbdev_log(INFO, "rte_acc100_configure");
+ 	uint32_t value, address, status;
+-	int qg_idx, template_idx, vf_idx, acc, i;
++	int qg_idx, template_idx, vf_idx, acc, i, j;
+ 	struct rte_bbdev *bbdev = rte_bbdev_get_named_dev(dev_name);
+ 
+ 	/* Compile time checks */
+@@ -4433,6 +4438,9 @@ rte_acc100_configure(const char *dev_name, struct rte_acc100_conf *conf)
+ 	/* Store configuration */
+ 	rte_memcpy(&d->acc100_conf, conf, sizeof(d->acc100_conf));
+ 
++	value = acc100_reg_read(d, HwPfPcieGpexBridgeControl);
++	bool firstCfg = (value != ACC100_CFG_PCI_BRIDGE);
++
+ 	/* PCIe Bridge configuration */
+ 	acc100_reg_write(d, HwPfPcieGpexBridgeControl, ACC100_CFG_PCI_BRIDGE);
+ 	for (i = 1; i < ACC100_GPEX_AXIMAP_NUM; i++)
+@@ -4453,20 +4461,9 @@ rte_acc100_configure(const char *dev_name, struct rte_acc100_conf *conf)
+ 	value = 1;
+ 	acc100_reg_write(d, address, value);
+ 
+-	/* DDR Configuration */
+-	address = HWPfDdrBcTim6;
+-	value = acc100_reg_read(d, address);
+-	value &= 0xFFFFFFFB; /* Bit 2 */
+-#ifdef ACC100_DDR_ECC_ENABLE
+-	value |= 0x4;
+-#endif
+-	acc100_reg_write(d, address, value);
+-	address = HWPfDdrPhyDqsCountNum;
+-#ifdef ACC100_DDR_ECC_ENABLE
+-	value = 9;
+-#else
+-	value = 8;
+-#endif
++	/* Enable granular dynamic clock gating */
++	address = HWPfHiClkGateHystReg;
++	value = ACC100_CLOCK_GATING_EN;
+ 	acc100_reg_write(d, address, value);
+ 
+ 	/* Set default descriptor signature */
+@@ -4484,6 +4481,17 @@ rte_acc100_configure(const char *dev_name, struct rte_acc100_conf *conf)
+ 	address = HWPfDmaAxcacheReg;
+ 	acc100_reg_write(d, address, value);
+ 
++	/* Adjust PCIe Lane adaptation */
++	for (i = 0; i < ACC100_QUAD_NUMS; i++)
++		for (j = 0; j < ACC100_LANES_PER_QUAD; j++)
++			acc100_reg_write(d, HwPfPcieLnAdaptctrl + i * ACC100_PCIE_QUAD_OFFSET
++					+ j * ACC100_PCIE_LANE_OFFSET, ACC100_ADAPT);
++
++	/* Enable PCIe live adaptation */
++	for (i = 0; i < ACC100_QUAD_NUMS; i++)
++		acc100_reg_write(d, HwPfPciePcsEqControl +
++				i * ACC100_PCIE_QUAD_OFFSET, ACC100_PCS_EQ);
++
+ 	/* Default DMA Configuration (Qmgr Enabled) */
+ 	address = HWPfDmaConfig0Reg;
+ 	value = 0;
+@@ -4502,6 +4510,11 @@ rte_acc100_configure(const char *dev_name, struct rte_acc100_conf *conf)
+ 	value = HWPfQmgrEgressQueuesTemplate;
+ 	acc100_reg_write(d, address, value);
+ 
++	/* Default Fabric Mode */
++	address = HWPfFabricMode;
++	value = ACC100_FABRIC_MODE;
++	acc100_reg_write(d, address, value);
++
+ 	/* ===== Qmgr Configuration ===== */
+ 	/* Configuration of the AQueue Depth QMGR_GRP_0_DEPTH_LOG2 for UL */
+ 	int totalQgs = conf->q_ul_4g.num_qgroups +
+@@ -4520,22 +4533,17 @@ rte_acc100_configure(const char *dev_name, struct rte_acc100_conf *conf)
+ 	}
+ 
+ 	/* Template Priority in incremental order */
+-	for (template_idx = 0; template_idx < ACC100_NUM_TMPL;
+-			template_idx++) {
+-		address = HWPfQmgrGrpTmplateReg0Indx +
+-		ACC100_BYTES_IN_WORD * (template_idx % 8);
++	for (template_idx = 0; template_idx < ACC100_NUM_TMPL; template_idx++) {
++		address = HWPfQmgrGrpTmplateReg0Indx + ACC100_BYTES_IN_WORD * template_idx;
+ 		value = ACC100_TMPL_PRI_0;
+ 		acc100_reg_write(d, address, value);
+-		address = HWPfQmgrGrpTmplateReg1Indx +
+-		ACC100_BYTES_IN_WORD * (template_idx % 8);
++		address = HWPfQmgrGrpTmplateReg1Indx + ACC100_BYTES_IN_WORD * template_idx;
+ 		value = ACC100_TMPL_PRI_1;
+ 		acc100_reg_write(d, address, value);
+-		address = HWPfQmgrGrpTmplateReg2indx +
+-		ACC100_BYTES_IN_WORD * (template_idx % 8);
++		address = HWPfQmgrGrpTmplateReg2indx + ACC100_BYTES_IN_WORD * template_idx;
+ 		value = ACC100_TMPL_PRI_2;
+ 		acc100_reg_write(d, address, value);
+-		address = HWPfQmgrGrpTmplateReg3Indx +
+-		ACC100_BYTES_IN_WORD * (template_idx % 8);
++		address = HWPfQmgrGrpTmplateReg3Indx + ACC100_BYTES_IN_WORD * template_idx;
+ 		value = ACC100_TMPL_PRI_3;
+ 		acc100_reg_write(d, address, value);
+ 	}
+@@ -4586,9 +4594,6 @@ rte_acc100_configure(const char *dev_name, struct rte_acc100_conf *conf)
+ 			numEngines++;
+ 		} else
+ 			acc100_reg_write(d, address, 0);
+-#if RTE_ACC100_SINGLE_FEC == 1
+-		value = 0;
+-#endif
+ 	}
+ 	printf("Number of 5GUL engines %d\n", numEngines);
+ 	/* 4GDL */
+@@ -4603,9 +4608,6 @@ rte_acc100_configure(const char *dev_name, struct rte_acc100_conf *conf)
+ 		address = HWPfQmgrGrpTmplateReg4Indx
+ 				+ ACC100_BYTES_IN_WORD * template_idx;
+ 		acc100_reg_write(d, address, value);
+-#if RTE_ACC100_SINGLE_FEC == 1
+-			value = 0;
+-#endif
+ 	}
+ 	/* 5GDL */
+ 	numQqsAcc += numQgs;
+@@ -4619,13 +4621,10 @@ rte_acc100_configure(const char *dev_name, struct rte_acc100_conf *conf)
+ 		address = HWPfQmgrGrpTmplateReg4Indx
+ 				+ ACC100_BYTES_IN_WORD * template_idx;
+ 		acc100_reg_write(d, address, value);
+-#if RTE_ACC100_SINGLE_FEC == 1
+-		value = 0;
+-#endif
+ 	}
+ 
+ 	/* Queue Group Function mapping */
+-	int qman_func_id[5] = {0, 2, 1, 3, 4};
++	int qman_func_id[8] = {0, 2, 1, 3, 4, 0, 0, 0};
+ 	address = HWPfQmgrGrpFunction0;
+ 	value = 0;
+ 	for (qg_idx = 0; qg_idx < 8; qg_idx++) {
+@@ -4656,7 +4655,7 @@ rte_acc100_configure(const char *dev_name, struct rte_acc100_conf *conf)
+ 		}
+ 	}
+ 
+-	/* This pointer to ARAM (256kB) is shifted by 2 (4B per register) */
++	/* This pointer to ARAM (128kB) is shifted by 2 (4B per register) */
+ 	uint32_t aram_address = 0;
+ 	for (qg_idx = 0; qg_idx < totalQgs; qg_idx++) {
+ 		for (vf_idx = 0; vf_idx < conf->num_vf_bundles; vf_idx++) {
+@@ -4681,6 +4680,11 @@ rte_acc100_configure(const char *dev_name, struct rte_acc100_conf *conf)
+ 
+ 	/* ==== HI Configuration ==== */
+ 
++	/* No Info Ring/MSI by default */
++	acc100_reg_write(d, HWPfHiInfoRingIntWrEnRegPf, 0);
++	acc100_reg_write(d, HWPfHiInfoRingVf2pfLoWrEnReg, 0);
++	acc100_reg_write(d, HWPfHiCfgMsiIntWrEnRegPf, 0xFFFFFFFF);
++	acc100_reg_write(d, HWPfHiCfgMsiVf2pfLoWrEnReg, 0xFFFFFFFF);
+ 	/* Prevent Block on Transmit Error */
+ 	address = HWPfHiBlockTransmitOnErrorEn;
+ 	value = 0;
+@@ -4693,10 +4697,6 @@ rte_acc100_configure(const char *dev_name, struct rte_acc100_conf *conf)
+ 	address = HWPfHiPfMode;
+ 	value = (conf->pf_mode_en) ? ACC100_PF_VAL : 0;
+ 	acc100_reg_write(d, address, value);
+-	/* Enable Error Detection in HW */
+-	address = HWPfDmaErrorDetectionEn;
+-	value = 0x3D7;
+-	acc100_reg_write(d, address, value);
+ 
+ 	/* QoS overflow init */
+ 	value = 1;
+@@ -4706,7 +4706,7 @@ rte_acc100_configure(const char *dev_name, struct rte_acc100_conf *conf)
+ 	acc100_reg_write(d, address, value);
+ 
+ 	/* HARQ DDR Configuration */
+-	unsigned int ddrSizeInMb = 512; /* Fixed to 512 MB per VF for now */
++	unsigned int ddrSizeInMb = ACC100_HARQ_DDR;
+ 	for (vf_idx = 0; vf_idx < conf->num_vf_bundles; vf_idx++) {
+ 		address = HWPfDmaVfDdrBaseRw + vf_idx
+ 				* 0x10;
+@@ -4720,6 +4720,88 @@ rte_acc100_configure(const char *dev_name, struct rte_acc100_conf *conf)
+ 	if (numEngines < (ACC100_SIG_UL_5G_LAST + 1))
+ 		poweron_cleanup(bbdev, d, conf);
+ 
++	uint32_t version = 0;
++	for (i = 0; i < 4; i++)
++		version += acc100_reg_read(d,
++				HWPfDdrPhyIdtmFwVersion + 4 * i) << (8 * i);
++	if (version != ACC100_PRQ_DDR_VER) {
++		printf("* Note: Not on DDR PRQ version %8x != %08x\n",
++				version, ACC100_PRQ_DDR_VER);
++	} else if (firstCfg) {
++		/* ---- DDR configuration at boot up --- */
++		/* Read Clear Ddr training status */
++		acc100_reg_read(d, HWPfChaDdrStDoneStatus);
++		/* Reset PHY/IDTM/UMMC */
++		acc100_reg_write(d, HWPfChaDdrWbRstCfg, 3);
++		acc100_reg_write(d, HWPfChaDdrApbRstCfg, 2);
++		acc100_reg_write(d, HWPfChaDdrPhyRstCfg, 2);
++		acc100_reg_write(d, HWPfChaDdrCpuRstCfg, 3);
++		acc100_reg_write(d, HWPfChaDdrSifRstCfg, 2);
++		usleep(ACC100_MS_IN_US);
++		/* Reset WB and APB resets */
++		acc100_reg_write(d, HWPfChaDdrWbRstCfg, 2);
++		acc100_reg_write(d, HWPfChaDdrApbRstCfg, 3);
++		/* Configure PHY-IDTM */
++		acc100_reg_write(d, HWPfDdrPhyIdletimeout, 0x3e8);
++		/* IDTM timing registers */
++		acc100_reg_write(d, HWPfDdrPhyRdLatency, 0x13);
++		acc100_reg_write(d, HWPfDdrPhyRdLatencyDbi, 0x15);
++		acc100_reg_write(d, HWPfDdrPhyWrLatency, 0x10011);
++		/* Configure SDRAM MRS registers */
++		acc100_reg_write(d, HWPfDdrPhyMr01Dimm, 0x3030b70);
++		acc100_reg_write(d, HWPfDdrPhyMr01DimmDbi, 0x3030b50);
++		acc100_reg_write(d, HWPfDdrPhyMr23Dimm, 0x30);
++		acc100_reg_write(d, HWPfDdrPhyMr67Dimm, 0xc00);
++		acc100_reg_write(d, HWPfDdrPhyMr45Dimm, 0x4000000);
++		/* Configure active lanes */
++		acc100_reg_write(d, HWPfDdrPhyDqsCountMax, 0x9);
++		acc100_reg_write(d, HWPfDdrPhyDqsCountNum, 0x9);
++		/* Configure WR/RD leveling timing registers */
++		acc100_reg_write(d, HWPfDdrPhyWrlvlWwRdlvlRr, 0x101212);
++		/* Configure what trainings to execute */
++		acc100_reg_write(d, HWPfDdrPhyTrngType, 0x2d3c);
++		/* Releasing PHY reset */
++		acc100_reg_write(d, HWPfChaDdrPhyRstCfg, 3);
++		/* Configure Memory Controller registers */
++		acc100_reg_write(d, HWPfDdrMemInitPhyTrng0, 0x3);
++		acc100_reg_write(d, HWPfDdrBcDram, 0x3c232003);
++		acc100_reg_write(d, HWPfDdrBcAddrMap, 0x31);
++		/* Configure UMMC BC timing registers */
++		acc100_reg_write(d, HWPfDdrBcRef, 0xa22);
++		acc100_reg_write(d, HWPfDdrBcTim0, 0x4050501);
++		acc100_reg_write(d, HWPfDdrBcTim1, 0xf0b0476);
++		acc100_reg_write(d, HWPfDdrBcTim2, 0x103);
++		acc100_reg_write(d, HWPfDdrBcTim3, 0x144050a1);
++		acc100_reg_write(d, HWPfDdrBcTim4, 0x23300);
++		acc100_reg_write(d, HWPfDdrBcTim5, 0x4230276);
++		acc100_reg_write(d, HWPfDdrBcTim6, 0x857914);
++		acc100_reg_write(d, HWPfDdrBcTim7, 0x79100232);
++		acc100_reg_write(d, HWPfDdrBcTim8, 0x100007ce);
++		acc100_reg_write(d, HWPfDdrBcTim9, 0x50020);
++		acc100_reg_write(d, HWPfDdrBcTim10, 0x40ee);
++		/* Configure UMMC DFI timing registers */
++		acc100_reg_write(d, HWPfDdrDfiInit, 0x5000);
++		acc100_reg_write(d, HWPfDdrDfiTim0, 0x15030006);
++		acc100_reg_write(d, HWPfDdrDfiTim1, 0x11305);
++		acc100_reg_write(d, HWPfDdrDfiPhyUpdEn, 0x1);
++		acc100_reg_write(d, HWPfDdrUmmcIntEn, 0x1f);
++		/* Release IDTM CPU out of reset */
++		acc100_reg_write(d, HWPfChaDdrCpuRstCfg, 0x2);
++		/* Wait PHY-IDTM to finish static training */
++		for (i = 0; i < ACC100_DDR_TRAINING_MAX; i++) {
++			usleep(ACC100_MS_IN_US);
++			value = acc100_reg_read(d,
++					HWPfChaDdrStDoneStatus);
++			if (value & 1)
++				break;
++		}
++		printf("DDR Training completed in %d ms", i);
++		/* Enable Memory Controller */
++		acc100_reg_write(d, HWPfDdrUmmcCtrl, 0x401);
++		/* Release AXI interface reset */
++		acc100_reg_write(d, HWPfChaDdrSifRstCfg, 3);
++	}
++
+ 	rte_bbdev_log_debug("PF Tip configuration complete for %s", dev_name);
+ 	return 0;
+ }
+diff --git a/dpdk/drivers/baseband/acc100/rte_acc100_pmd.h b/dpdk/drivers/baseband/acc100/rte_acc100_pmd.h
+index 03ed0b3e1a..071b37cf9d 100644
+--- a/dpdk/drivers/baseband/acc100/rte_acc100_pmd.h
++++ b/dpdk/drivers/baseband/acc100/rte_acc100_pmd.h
+@@ -31,11 +31,6 @@
+ #define RTE_ACC100_PF_DEVICE_ID        (0x0d5c)
+ #define RTE_ACC100_VF_DEVICE_ID        (0x0d5d)
+ 
+-/* Define as 1 to use only a single FEC engine */
+-#ifndef RTE_ACC100_SINGLE_FEC
+-#define RTE_ACC100_SINGLE_FEC 0
+-#endif
+-
+ /* Values used in filling in descriptors */
+ #define ACC100_DMA_DESC_TYPE           2
+ #define ACC100_DMA_CODE_BLK_MODE       0
+@@ -113,6 +108,7 @@
+ #define ACC100_SW_RING_MEM_ALLOC_ATTEMPTS 5
+ #define ACC100_MAX_QUEUE_DEPTH            1024
+ #define ACC100_DMA_MAX_NUM_POINTERS       14
++#define ACC100_DMA_MAX_NUM_POINTERS_IN    7
+ #define ACC100_DMA_DESC_PADDING           8
+ #define ACC100_FCW_PADDING                12
+ #define ACC100_DESC_FCW_OFFSET            192
+@@ -152,6 +148,12 @@
+ #define ACC100_CFG_QMGR_HI_P    0x0F0F
+ #define ACC100_CFG_PCI_AXI      0xC003
+ #define ACC100_CFG_PCI_BRIDGE   0x40006033
++#define ACC100_QUAD_NUMS        4
++#define ACC100_LANES_PER_QUAD   4
++#define ACC100_PCIE_LANE_OFFSET 0x200
++#define ACC100_PCIE_QUAD_OFFSET 0x2000
++#define ACC100_PCS_EQ           0x6007
++#define ACC100_ADAPT            0x8400
+ #define ACC100_ENGINE_OFFSET    0x1000
+ #define ACC100_RESET_HI         0x20100
+ #define ACC100_RESET_LO         0x20000
+@@ -159,6 +161,15 @@
+ #define ACC100_ENGINES_MAX      9
+ #define ACC100_LONG_WAIT        1000
+ #define ACC100_GPEX_AXIMAP_NUM  17
++#define ACC100_CLOCK_GATING_EN  0x30000
++#define ACC100_FABRIC_MODE      0xB
++/* DDR Size per VF - 512MB by default
++ * Can be increased up to 4 GB with single PF/VF
++ */
++#define ACC100_HARQ_DDR         (512 * 1)
++#define ACC100_PRQ_DDR_VER       0x10092020
++#define ACC100_MS_IN_US         (1000)
++#define ACC100_DDR_TRAINING_MAX (5000)
+ 
+ /* ACC100 DMA Descriptor triplet */
+ struct acc100_dma_triplet {
+diff --git a/dpdk/drivers/baseband/fpga_lte_fec/fpga_lte_fec.c b/dpdk/drivers/baseband/fpga_lte_fec/fpga_lte_fec.c
+index 92decc3e05..21d35292a3 100644
+--- a/dpdk/drivers/baseband/fpga_lte_fec/fpga_lte_fec.c
++++ b/dpdk/drivers/baseband/fpga_lte_fec/fpga_lte_fec.c
+@@ -2097,7 +2097,7 @@ dequeue_enc_one_op_cb(struct fpga_queue *q, struct rte_bbdev_enc_op **op,
+ 	rte_bbdev_log_debug("DMA response desc %p", desc);
+ 
+ 	*op = desc->enc_req.op_addr;
+-	/* Check the decriptor error field, return 1 on error */
++	/* Check the descriptor error field, return 1 on error */
+ 	desc_error = check_desc_error(desc->enc_req.error);
+ 	(*op)->status = desc_error << RTE_BBDEV_DATA_ERROR;
+ 
+@@ -2139,7 +2139,7 @@ dequeue_enc_one_op_tb(struct fpga_queue *q, struct rte_bbdev_enc_op **op,
+ 	for (cb_idx = 0; cb_idx < cbs_in_op; ++cb_idx) {
+ 		desc = q->ring_addr + ((q->head_free_desc + desc_offset +
+ 				cb_idx) & q->sw_ring_wrap_mask);
+-		/* Check the decriptor error field, return 1 on error */
++		/* Check the descriptor error field, return 1 on error */
+ 		desc_error = check_desc_error(desc->enc_req.error);
+ 		status |=  desc_error << RTE_BBDEV_DATA_ERROR;
+ 		rte_bbdev_log_debug("DMA response desc %p", desc);
+@@ -2177,7 +2177,7 @@ dequeue_dec_one_op_cb(struct fpga_queue *q, struct rte_bbdev_dec_op **op,
+ 	(*op)->turbo_dec.iter_count = (desc->dec_req.iter + 2) >> 1;
+ 	/* crc_pass = 0 when decoder fails */
+ 	(*op)->status = !(desc->dec_req.crc_pass) << RTE_BBDEV_CRC_ERROR;
+-	/* Check the decriptor error field, return 1 on error */
++	/* Check the descriptor error field, return 1 on error */
+ 	desc_error = check_desc_error(desc->enc_req.error);
+ 	(*op)->status |= desc_error << RTE_BBDEV_DATA_ERROR;
+ 	return 1;
+@@ -2221,7 +2221,7 @@ dequeue_dec_one_op_tb(struct fpga_queue *q, struct rte_bbdev_dec_op **op,
+ 		iter_count = RTE_MAX(iter_count, (uint8_t) desc->dec_req.iter);
+ 		/* crc_pass = 0 when decoder fails, one fails all */
+ 		status |= !(desc->dec_req.crc_pass) << RTE_BBDEV_CRC_ERROR;
+-		/* Check the decriptor error field, return 1 on error */
++		/* Check the descriptor error field, return 1 on error */
+ 		desc_error = check_desc_error(desc->enc_req.error);
+ 		status |= desc_error << RTE_BBDEV_DATA_ERROR;
+ 		rte_bbdev_log_debug("DMA response desc %p", desc);
+diff --git a/dpdk/drivers/baseband/null/bbdev_null.c b/dpdk/drivers/baseband/null/bbdev_null.c
+index 753d920e18..08cff582b9 100644
+--- a/dpdk/drivers/baseband/null/bbdev_null.c
++++ b/dpdk/drivers/baseband/null/bbdev_null.c
+@@ -31,7 +31,7 @@ struct bbdev_null_params {
+ 	uint16_t queues_num;  /*< Null BBDEV queues number */
+ };
+ 
+-/* Accecptable params for null BBDEV devices */
++/* Acceptable params for null BBDEV devices */
+ #define BBDEV_NULL_MAX_NB_QUEUES_ARG  "max_nb_queues"
+ #define BBDEV_NULL_SOCKET_ID_ARG      "socket_id"
+ 
+diff --git a/dpdk/drivers/baseband/turbo_sw/bbdev_turbo_software.c b/dpdk/drivers/baseband/turbo_sw/bbdev_turbo_software.c
+index b234bb751a..c6b1eb8679 100644
+--- a/dpdk/drivers/baseband/turbo_sw/bbdev_turbo_software.c
++++ b/dpdk/drivers/baseband/turbo_sw/bbdev_turbo_software.c
+@@ -61,7 +61,7 @@ struct turbo_sw_params {
+ 	uint16_t queues_num;  /*< Turbo SW device queues number */
+ };
+ 
+-/* Accecptable params for Turbo SW devices */
++/* Acceptable params for Turbo SW devices */
+ #define TURBO_SW_MAX_NB_QUEUES_ARG  "max_nb_queues"
+ #define TURBO_SW_SOCKET_ID_ARG      "socket_id"
+ 
+diff --git a/dpdk/drivers/bus/auxiliary/version.map b/dpdk/drivers/bus/auxiliary/version.map
+index a52260657c..dc993e84ff 100644
+--- a/dpdk/drivers/bus/auxiliary/version.map
++++ b/dpdk/drivers/bus/auxiliary/version.map
+@@ -4,4 +4,6 @@ EXPERIMENTAL {
+ 	# added in 21.08
+ 	rte_auxiliary_register;
+ 	rte_auxiliary_unregister;
++
++	local: *;
+ };
+diff --git a/dpdk/drivers/bus/dpaa/dpaa_bus.c b/dpdk/drivers/bus/dpaa/dpaa_bus.c
+index 737ac8d8c5..5546a9cb8d 100644
+--- a/dpdk/drivers/bus/dpaa/dpaa_bus.c
++++ b/dpdk/drivers/bus/dpaa/dpaa_bus.c
+@@ -70,7 +70,7 @@ compare_dpaa_devices(struct rte_dpaa_device *dev1,
+ {
+ 	int comp = 0;
+ 
+-	/* Segragating ETH from SEC devices */
++	/* Segregating ETH from SEC devices */
+ 	if (dev1->device_type > dev2->device_type)
+ 		comp = 1;
+ 	else if (dev1->device_type < dev2->device_type)
+diff --git a/dpdk/drivers/bus/dpaa/include/fsl_qman.h b/dpdk/drivers/bus/dpaa/include/fsl_qman.h
+index 7ef2f3b2e3..9b63e559bc 100644
+--- a/dpdk/drivers/bus/dpaa/include/fsl_qman.h
++++ b/dpdk/drivers/bus/dpaa/include/fsl_qman.h
+@@ -1353,7 +1353,7 @@ __rte_internal
+ int qman_irqsource_add(u32 bits);
+ 
+ /**
+- * qman_fq_portal_irqsource_add - samilar to qman_irqsource_add, but it
++ * qman_fq_portal_irqsource_add - similar to qman_irqsource_add, but it
+  * takes portal (fq specific) as input rather than using the thread affined
+  * portal.
+  */
+@@ -1416,7 +1416,7 @@ __rte_internal
+ struct qm_dqrr_entry *qman_dequeue(struct qman_fq *fq);
+ 
+ /**
+- * qman_dqrr_consume - Consume the DQRR entriy after volatile dequeue
++ * qman_dqrr_consume - Consume the DQRR entry after volatile dequeue
+  * @fq: Frame Queue on which the volatile dequeue command is issued
+  * @dq: DQRR entry to consume. This is the one which is provided by the
+  *    'qbman_dequeue' command.
+@@ -2017,7 +2017,7 @@ int qman_create_cgr_to_dcp(struct qman_cgr *cgr, u32 flags, u16 dcp_portal,
+  * @cgr: the 'cgr' object to deregister
+  *
+  * "Unplugs" this CGR object from the portal affine to the cpu on which this API
+- * is executed. This must be excuted on the same affine portal on which it was
++ * is executed. This must be executed on the same affine portal on which it was
+  * created.
+  */
+ __rte_internal
+diff --git a/dpdk/drivers/bus/dpaa/include/fsl_usd.h b/dpdk/drivers/bus/dpaa/include/fsl_usd.h
+index dcf35e4adb..97279421ad 100644
+--- a/dpdk/drivers/bus/dpaa/include/fsl_usd.h
++++ b/dpdk/drivers/bus/dpaa/include/fsl_usd.h
+@@ -40,7 +40,7 @@ struct dpaa_raw_portal {
+ 	/* Specifies the stash request queue this portal should use */
+ 	uint8_t sdest;
+ 
+-	/* Specifes a specific portal index to map or QBMAN_ANY_PORTAL_IDX
++	/* Specifies a specific portal index to map or QBMAN_ANY_PORTAL_IDX
+ 	 * for don't care.  The portal index will be populated by the
+ 	 * driver when the ioctl() successfully completes.
+ 	 */
+diff --git a/dpdk/drivers/bus/dpaa/include/process.h b/dpdk/drivers/bus/dpaa/include/process.h
+index a922988607..48d6b5693f 100644
+--- a/dpdk/drivers/bus/dpaa/include/process.h
++++ b/dpdk/drivers/bus/dpaa/include/process.h
+@@ -49,7 +49,7 @@ struct dpaa_portal_map {
+ struct dpaa_ioctl_portal_map {
+ 	/* Input parameter, is a qman or bman portal required. */
+ 	enum dpaa_portal_type type;
+-	/* Specifes a specific portal index to map or 0xffffffff
++	/* Specifies a specific portal index to map or 0xffffffff
+ 	 * for don't care.
+ 	 */
+ 	uint32_t index;
+diff --git a/dpdk/drivers/bus/fslmc/fslmc_bus.c b/dpdk/drivers/bus/fslmc/fslmc_bus.c
+index a0ef24cdc8..53fd75539e 100644
+--- a/dpdk/drivers/bus/fslmc/fslmc_bus.c
++++ b/dpdk/drivers/bus/fslmc/fslmc_bus.c
+@@ -539,7 +539,7 @@ rte_fslmc_driver_unregister(struct rte_dpaa2_driver *driver)
+ 
+ 	fslmc_bus = driver->fslmc_bus;
+ 
+-	/* Cleanup the PA->VA Translation table; From whereever this function
++	/* Cleanup the PA->VA Translation table; From wherever this function
+ 	 * is called from.
+ 	 */
+ 	if (rte_eal_iova_mode() == RTE_IOVA_PA)
+diff --git a/dpdk/drivers/bus/fslmc/fslmc_vfio.c b/dpdk/drivers/bus/fslmc/fslmc_vfio.c
+index b4704eeae4..abe1cab2ee 100644
+--- a/dpdk/drivers/bus/fslmc/fslmc_vfio.c
++++ b/dpdk/drivers/bus/fslmc/fslmc_vfio.c
+@@ -979,6 +979,7 @@ fslmc_vfio_setup_group(void)
+ {
+ 	int groupid;
+ 	int ret;
++	int vfio_container_fd;
+ 	struct vfio_group_status status = { .argsz = sizeof(status) };
+ 
+ 	/* if already done once */
+@@ -997,8 +998,15 @@ fslmc_vfio_setup_group(void)
+ 		return 0;
+ 	}
+ 
++	ret = rte_vfio_container_create();
++	if (ret < 0) {
++		DPAA2_BUS_ERR("Failed to open VFIO container");
++		return ret;
++	}
++	vfio_container_fd = ret;
++
+ 	/* Get the actual group fd */
+-	ret = rte_vfio_get_group_fd(groupid);
++	ret = rte_vfio_container_group_bind(vfio_container_fd, groupid);
+ 	if (ret < 0)
+ 		return ret;
+ 	vfio_group.fd = ret;
+diff --git a/dpdk/drivers/bus/fslmc/portal/dpaa2_hw_dpio.c b/dpdk/drivers/bus/fslmc/portal/dpaa2_hw_dpio.c
+index 2210a0fa4a..52605ea2c3 100644
+--- a/dpdk/drivers/bus/fslmc/portal/dpaa2_hw_dpio.c
++++ b/dpdk/drivers/bus/fslmc/portal/dpaa2_hw_dpio.c
+@@ -178,7 +178,7 @@ static int dpaa2_dpio_intr_init(struct dpaa2_dpio_dev *dpio_dev)
+ 	dpio_epoll_fd = epoll_create(1);
+ 	ret = rte_dpaa2_intr_enable(dpio_dev->intr_handle, 0);
+ 	if (ret) {
+-		DPAA2_BUS_ERR("Interrupt registeration failed");
++		DPAA2_BUS_ERR("Interrupt registration failed");
+ 		return -1;
+ 	}
+ 
+diff --git a/dpdk/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h b/dpdk/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h
+index b1bba1ac36..957fc62d4c 100644
+--- a/dpdk/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h
++++ b/dpdk/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h
+@@ -156,7 +156,7 @@ struct dpaa2_queue {
+ 		struct rte_cryptodev_data *crypto_data;
+ 	};
+ 	uint32_t fqid;		/*!< Unique ID of this queue */
+-	uint16_t flow_id;	/*!< To be used by DPAA2 frmework */
++	uint16_t flow_id;	/*!< To be used by DPAA2 framework */
+ 	uint8_t tc_index;	/*!< traffic class identifier */
+ 	uint8_t cgid;		/*! < Congestion Group id for this queue */
+ 	uint64_t rx_pkts;
+diff --git a/dpdk/drivers/bus/fslmc/qbman/include/fsl_qbman_portal.h b/dpdk/drivers/bus/fslmc/qbman/include/fsl_qbman_portal.h
+index eb68c9cab5..5375ea386d 100644
+--- a/dpdk/drivers/bus/fslmc/qbman/include/fsl_qbman_portal.h
++++ b/dpdk/drivers/bus/fslmc/qbman/include/fsl_qbman_portal.h
+@@ -510,7 +510,7 @@ int qbman_result_has_new_result(struct qbman_swp *s,
+ 				struct qbman_result *dq);
+ 
+ /**
+- * qbman_check_command_complete() - Check if the previous issued dq commnd
++ * qbman_check_command_complete() - Check if the previous issued dq command
+  * is completed and results are available in memory.
+  * @s: the software portal object.
+  * @dq: the dequeue result read from the memory.
+@@ -687,7 +687,7 @@ uint16_t qbman_result_DQ_seqnum(const struct qbman_result *dq);
+ 
+ /**
+  * qbman_result_DQ_odpid() - Get the seqnum field in dequeue response
+- * odpid is valid only if ODPVAILD flag is TRUE.
++ * odpid is valid only if ODPVALID flag is TRUE.
+  * @dq: the dequeue result.
+  *
+  * Return odpid.
+@@ -743,7 +743,7 @@ const struct qbman_fd *qbman_result_DQ_fd(const struct qbman_result *dq);
+  * qbman_result_SCN_state() - Get the state field in State-change notification
+  * @scn: the state change notification.
+  *
+- * Return the state in the notifiation.
++ * Return the state in the notification.
+  */
+ __rte_internal
+ uint8_t qbman_result_SCN_state(const struct qbman_result *scn);
+@@ -825,7 +825,7 @@ uint64_t qbman_result_bpscn_ctx(const struct qbman_result *scn);
+ 
+ /* Parsing CGCU */
+ /**
+- * qbman_result_cgcu_cgid() - Check CGCU resouce id, i.e. cgid
++ * qbman_result_cgcu_cgid() - Check CGCU resource id, i.e. cgid
+  * @scn: the state change notification.
+  *
+  * Return the CGCU resource id.
+@@ -903,14 +903,14 @@ void qbman_eq_desc_clear(struct qbman_eq_desc *d);
+ __rte_internal
+ void qbman_eq_desc_set_no_orp(struct qbman_eq_desc *d, int respond_success);
+ /**
+- * qbman_eq_desc_set_orp() - Set order-resotration in the enqueue descriptor
++ * qbman_eq_desc_set_orp() - Set order-restoration in the enqueue descriptor
+  * @d: the enqueue descriptor.
+  * @response_success: 1 = enqueue with response always; 0 = enqueue with
+  * rejections returned on a FQ.
+  * @opr_id: the order point record id.
+  * @seqnum: the order restoration sequence number.
+- * @incomplete: indiates whether this is the last fragments using the same
+- * sequeue number.
++ * @incomplete: indicates whether this is the last fragments using the same
++ * sequence number.
+  */
+ __rte_internal
+ void qbman_eq_desc_set_orp(struct qbman_eq_desc *d, int respond_success,
+@@ -1052,10 +1052,10 @@ __rte_internal
+ uint8_t qbman_result_eqresp_rspid(struct qbman_result *eqresp);
+ 
+ /**
+- * qbman_result_eqresp_rc() - determines if enqueue command is sucessful.
++ * qbman_result_eqresp_rc() - determines if enqueue command is successful.
+  * @eqresp: enqueue response.
+  *
+- * Return 0 when command is sucessful.
++ * Return 0 when command is successful.
+  */
+ __rte_internal
+ uint8_t qbman_result_eqresp_rc(struct qbman_result *eqresp);
+@@ -1250,7 +1250,7 @@ int qbman_swp_fq_force(struct qbman_swp *s, uint32_t fqid);
+ /**
+  * These functions change the FQ flow-control stuff between XON/XOFF. (The
+  * default is XON.) This setting doesn't affect enqueues to the FQ, just
+- * dequeues. XOFF FQs will remain in the tenatively-scheduled state, even when
++ * dequeues. XOFF FQs will remain in the tentatively-scheduled state, even when
+  * non-empty, meaning they won't be selected for scheduled dequeuing. If a FQ is
+  * changed to XOFF after it had already become truly-scheduled to a channel, and
+  * a pull dequeue of that channel occurs that selects that FQ for dequeuing,
+diff --git a/dpdk/drivers/bus/ifpga/ifpga_bus.c b/dpdk/drivers/bus/ifpga/ifpga_bus.c
+index cbc6809284..c5c8bbd572 100644
+--- a/dpdk/drivers/bus/ifpga/ifpga_bus.c
++++ b/dpdk/drivers/bus/ifpga/ifpga_bus.c
+@@ -64,8 +64,7 @@ ifpga_find_afu_dev(const struct rte_rawdev *rdev,
+ 	struct rte_afu_device *afu_dev = NULL;
+ 
+ 	TAILQ_FOREACH(afu_dev, &ifpga_afu_dev_list, next) {
+-		if (afu_dev &&
+-			afu_dev->rawdev == rdev &&
++		if (afu_dev->rawdev == rdev &&
+ 			!ifpga_afu_id_cmp(&afu_dev->id, afu_id))
+ 			return afu_dev;
+ 	}
+@@ -78,8 +77,7 @@ rte_ifpga_find_afu_by_name(const char *name)
+ 	struct rte_afu_device *afu_dev = NULL;
+ 
+ 	TAILQ_FOREACH(afu_dev, &ifpga_afu_dev_list, next) {
+-		if (afu_dev &&
+-			!strcmp(afu_dev->device.name, name))
++		if (!strcmp(afu_dev->device.name, name))
+ 			return afu_dev;
+ 	}
+ 	return NULL;
+diff --git a/dpdk/drivers/bus/pci/linux/pci_vfio.c b/dpdk/drivers/bus/pci/linux/pci_vfio.c
+index 1a5e7c2d2a..cd0d0b1670 100644
+--- a/dpdk/drivers/bus/pci/linux/pci_vfio.c
++++ b/dpdk/drivers/bus/pci/linux/pci_vfio.c
+@@ -815,7 +815,7 @@ pci_vfio_map_resource_primary(struct rte_pci_device *dev)
+ 			continue;
+ 		}
+ 
+-		/* skip non-mmapable BARs */
++		/* skip non-mmappable BARs */
+ 		if ((reg->flags & VFIO_REGION_INFO_FLAG_MMAP) == 0) {
+ 			free(reg);
+ 			continue;
+diff --git a/dpdk/drivers/bus/pci/pci_common.c b/dpdk/drivers/bus/pci/pci_common.c
+index 4a3a87f24f..def372b67e 100644
+--- a/dpdk/drivers/bus/pci/pci_common.c
++++ b/dpdk/drivers/bus/pci/pci_common.c
+@@ -247,9 +247,12 @@ rte_pci_probe_one_driver(struct rte_pci_driver *dr,
+ 			return -ENOMEM;
+ 		}
+ 
++		dev->driver = dr;
++
+ 		if (dr->drv_flags & RTE_PCI_DRV_NEED_MAPPING) {
+ 			ret = rte_pci_map_device(dev);
+ 			if (ret != 0) {
++				dev->driver = NULL;
+ 				rte_intr_instance_free(dev->vfio_req_intr_handle);
+ 				dev->vfio_req_intr_handle = NULL;
+ 				rte_intr_instance_free(dev->intr_handle);
+@@ -257,8 +260,6 @@ rte_pci_probe_one_driver(struct rte_pci_driver *dr,
+ 				return ret;
+ 			}
+ 		}
+-
+-		dev->driver = dr;
+ 	}
+ 
+ 	RTE_LOG(INFO, EAL, "Probe PCI driver: %s (%x:%x) device: "PCI_PRI_FMT" (socket %i)\n",
+diff --git a/dpdk/drivers/bus/vdev/rte_bus_vdev.h b/dpdk/drivers/bus/vdev/rte_bus_vdev.h
+index 2856799953..5af6be009f 100644
+--- a/dpdk/drivers/bus/vdev/rte_bus_vdev.h
++++ b/dpdk/drivers/bus/vdev/rte_bus_vdev.h
+@@ -197,7 +197,7 @@ rte_vdev_remove_custom_scan(rte_vdev_scan_callback callback, void *user_arg);
+ int rte_vdev_init(const char *name, const char *args);
+ 
+ /**
+- * Uninitalize a driver specified by name.
++ * Uninitialize a driver specified by name.
+  *
+  * @param name
+  *   The pointer to a driver name to be uninitialized.
+diff --git a/dpdk/drivers/bus/vmbus/private.h b/dpdk/drivers/bus/vmbus/private.h
+index 1bca147e12..658303bc27 100644
+--- a/dpdk/drivers/bus/vmbus/private.h
++++ b/dpdk/drivers/bus/vmbus/private.h
+@@ -74,6 +74,8 @@ struct vmbus_channel {
+ 	uint16_t relid;
+ 	uint16_t subchannel_id;
+ 	uint8_t monitor_id;
++
++	struct vmbus_mon_page *monitor_page;
+ };
+ 
+ #define VMBUS_MAX_CHANNELS	64
+diff --git a/dpdk/drivers/bus/vmbus/vmbus_channel.c b/dpdk/drivers/bus/vmbus/vmbus_channel.c
+index 119b9b367e..9bd01679c3 100644
+--- a/dpdk/drivers/bus/vmbus/vmbus_channel.c
++++ b/dpdk/drivers/bus/vmbus/vmbus_channel.c
+@@ -27,7 +27,7 @@ vmbus_sync_set_bit(volatile uint32_t *addr, uint32_t mask)
+ }
+ 
+ static inline void
+-vmbus_set_monitor(const struct rte_vmbus_device *dev, uint32_t monitor_id)
++vmbus_set_monitor(const struct vmbus_channel *channel, uint32_t monitor_id)
+ {
+ 	uint32_t *monitor_addr, monitor_mask;
+ 	unsigned int trigger_index;
+@@ -35,15 +35,14 @@ vmbus_set_monitor(const struct rte_vmbus_device *dev, uint32_t monitor_id)
+ 	trigger_index = monitor_id / HV_MON_TRIG_LEN;
+ 	monitor_mask = 1u << (monitor_id % HV_MON_TRIG_LEN);
+ 
+-	monitor_addr = &dev->monitor_page->trigs[trigger_index].pending;
++	monitor_addr = &channel->monitor_page->trigs[trigger_index].pending;
+ 	vmbus_sync_set_bit(monitor_addr, monitor_mask);
+ }
+ 
+ static void
+-vmbus_set_event(const struct rte_vmbus_device *dev,
+-		const struct vmbus_channel *chan)
++vmbus_set_event(const struct vmbus_channel *chan)
+ {
+-	vmbus_set_monitor(dev, chan->monitor_id);
++	vmbus_set_monitor(chan, chan->monitor_id);
+ }
+ 
+ /*
+@@ -81,7 +80,6 @@ rte_vmbus_set_latency(const struct rte_vmbus_device *dev,
+ void
+ rte_vmbus_chan_signal_tx(const struct vmbus_channel *chan)
+ {
+-	const struct rte_vmbus_device *dev = chan->device;
+ 	const struct vmbus_br *tbr = &chan->txbr;
+ 
+ 	/* Make sure all updates are done before signaling host */
+@@ -91,7 +89,7 @@ rte_vmbus_chan_signal_tx(const struct vmbus_channel *chan)
+ 	if (tbr->vbr->imask)
+ 		return;
+ 
+-	vmbus_set_event(dev, chan);
++	vmbus_set_event(chan);
+ }
+ 
+ 
+@@ -218,7 +216,7 @@ void rte_vmbus_chan_signal_read(struct vmbus_channel *chan, uint32_t bytes_read)
+ 	if (write_sz <= pending_sz)
+ 		return;
+ 
+-	vmbus_set_event(chan->device, chan);
++	vmbus_set_event(chan);
+ }
+ 
+ int rte_vmbus_chan_recv(struct vmbus_channel *chan, void *data, uint32_t *len,
+@@ -325,6 +323,7 @@ int vmbus_chan_create(const struct rte_vmbus_device *device,
+ 	chan->subchannel_id = subid;
+ 	chan->relid = relid;
+ 	chan->monitor_id = monitor_id;
++	chan->monitor_page = device->monitor_page;
+ 	*new_chan = chan;
+ 
+ 	err = vmbus_uio_map_rings(chan);
+diff --git a/dpdk/drivers/bus/vmbus/vmbus_common.c b/dpdk/drivers/bus/vmbus/vmbus_common.c
+index 519ca9c6fe..367727367e 100644
+--- a/dpdk/drivers/bus/vmbus/vmbus_common.c
++++ b/dpdk/drivers/bus/vmbus/vmbus_common.c
+@@ -134,7 +134,7 @@ vmbus_probe_one_driver(struct rte_vmbus_driver *dr,
+ 
+ /*
+  * If device class GUID matches, call the probe function of
+- * registere drivers for the vmbus device.
++ * register drivers for the vmbus device.
+  * Return -1 if initialization failed,
+  * and 1 if no driver found for this device.
+  */
+diff --git a/dpdk/drivers/common/cnxk/cnxk_security.c b/dpdk/drivers/common/cnxk/cnxk_security.c
+index 30562b46e3..787138b059 100644
+--- a/dpdk/drivers/common/cnxk/cnxk_security.c
++++ b/dpdk/drivers/common/cnxk/cnxk_security.c
+@@ -444,10 +444,6 @@ cnxk_ot_ipsec_outb_sa_fill(struct roc_ot_ipsec_outb_sa *sa,
+ 		return -EINVAL;
+ 	}
+ 
+-	/* Default options of DSCP and Flow label/DF */
+-	sa->w2.s.dscp_src = ROC_IE_OT_SA_COPY_FROM_SA;
+-	sa->w2.s.ipv4_df_src_or_ipv6_flw_lbl_src = ROC_IE_OT_SA_COPY_FROM_SA;
+-
+ skip_tunnel_info:
+ 	/* ESN */
+ 	sa->w0.s.esn_en = !!ipsec_xfrm->options.esn;
+diff --git a/dpdk/drivers/common/cnxk/cnxk_telemetry_nix.c b/dpdk/drivers/common/cnxk/cnxk_telemetry_nix.c
+index df6458039d..4119e9ee4f 100644
+--- a/dpdk/drivers/common/cnxk/cnxk_telemetry_nix.c
++++ b/dpdk/drivers/common/cnxk/cnxk_telemetry_nix.c
+@@ -765,6 +765,9 @@ cnxk_nix_tel_handle_info_x(const char *cmd, const char *params,
+ 
+ 	plt_strlcpy(buf, params, PCI_PRI_STR_SIZE + 1);
+ 	name = strtok(buf, ",");
++	if (name == NULL)
++		goto exit;
++
+ 	param = strtok(NULL, "\0");
+ 
+ 	node = nix_tel_node_get_by_pcidev_name(name);
+diff --git a/dpdk/drivers/common/cnxk/hw/cpt.h b/dpdk/drivers/common/cnxk/hw/cpt.h
+index 919f8420f0..3ade4dc0c9 100644
+--- a/dpdk/drivers/common/cnxk/hw/cpt.h
++++ b/dpdk/drivers/common/cnxk/hw/cpt.h
+@@ -286,10 +286,11 @@ struct cpt_frag_info_s {
+ 	union {
+ 		uint64_t u64;
+ 		struct {
+-			union cpt_frag_info f3;
+-			union cpt_frag_info f2;
+-			union cpt_frag_info f1;
++			/* CPT HW swaps each 8B word implicitly */
+ 			union cpt_frag_info f0;
++			union cpt_frag_info f1;
++			union cpt_frag_info f2;
++			union cpt_frag_info f3;
+ 		};
+ 	} w0;
+ 
+@@ -297,10 +298,11 @@ struct cpt_frag_info_s {
+ 	union {
+ 		uint64_t u64;
+ 		struct {
+-			uint16_t frag_size3;
+-			uint16_t frag_size2;
+-			uint16_t frag_size1;
++			/* CPT HW swaps each 8B word implicitly */
+ 			uint16_t frag_size0;
++			uint16_t frag_size1;
++			uint16_t frag_size2;
++			uint16_t frag_size3;
+ 		};
+ 	} w1;
+ };
+diff --git a/dpdk/drivers/common/cnxk/roc_bphy_cgx.c b/dpdk/drivers/common/cnxk/roc_bphy_cgx.c
+index 7449cbe77a..0cd7dff655 100644
+--- a/dpdk/drivers/common/cnxk/roc_bphy_cgx.c
++++ b/dpdk/drivers/common/cnxk/roc_bphy_cgx.c
+@@ -14,7 +14,7 @@
+ #define CGX_CMRX_INT_OVERFLW	       BIT_ULL(1)
+ /*
+  * CN10K stores number of lmacs in 4 bit filed
+- * in contraty to CN9K which uses only 3 bits.
++ * in contrary to CN9K which uses only 3 bits.
+  *
+  * In theory masks should differ yet on CN9K
+  * bits beyond specified range contain zeros.
+@@ -268,9 +268,6 @@ roc_bphy_cgx_ptp_rx_ena_dis(struct roc_bphy_cgx *roc_cgx, unsigned int lmac,
+ {
+ 	uint64_t scr1, scr0;
+ 
+-	if (roc_model_is_cn10k())
+-		return -ENOTSUP;
+-
+ 	if (!roc_cgx)
+ 		return -EINVAL;
+ 
+diff --git a/dpdk/drivers/common/cnxk/roc_bphy_irq.c b/dpdk/drivers/common/cnxk/roc_bphy_irq.c
+index f4e9b341af..f4954d2a28 100644
+--- a/dpdk/drivers/common/cnxk/roc_bphy_irq.c
++++ b/dpdk/drivers/common/cnxk/roc_bphy_irq.c
+@@ -259,7 +259,7 @@ roc_bphy_irq_handler_set(struct roc_bphy_irq_chip *chip, int irq_num,
+ 
+ 	CPU_ZERO(&intr_cpuset);
+ 	CPU_SET(curr_cpu, &intr_cpuset);
+-	retval = pthread_setaffinity_np(pthread_self(), sizeof(intr_cpuset),
++	rc = pthread_setaffinity_np(pthread_self(), sizeof(intr_cpuset),
+ 					&intr_cpuset);
+ 	if (rc < 0) {
+ 		plt_err("Failed to set affinity mask");
+diff --git a/dpdk/drivers/common/cnxk/roc_cpt.c b/dpdk/drivers/common/cnxk/roc_cpt.c
+index 8f8e6d3821..6179df2f1f 100644
+--- a/dpdk/drivers/common/cnxk/roc_cpt.c
++++ b/dpdk/drivers/common/cnxk/roc_cpt.c
+@@ -385,6 +385,9 @@ cpt_lfs_alloc(struct dev *dev, uint8_t eng_grpmsk, uint8_t blkaddr,
+ 		return -EINVAL;
+ 
+ 	req = mbox_alloc_msg_cpt_lf_alloc(mbox);
++	if (!req)
++		return -ENOSPC;
++
+ 	req->nix_pf_func = 0;
+ 	if (inl_dev_sso && nix_inl_dev_pffunc_get())
+ 		req->sso_pf_func = nix_inl_dev_pffunc_get();
+@@ -568,9 +571,6 @@ cpt_lf_init(struct roc_cpt_lf *lf)
+ 	if (lf->nb_desc == 0 || lf->nb_desc > CPT_LF_MAX_NB_DESC)
+ 		lf->nb_desc = CPT_LF_DEFAULT_NB_DESC;
+ 
+-	/* Update nb_desc to next power of 2 to aid in pending queue checks */
+-	lf->nb_desc = plt_align32pow2(lf->nb_desc);
+-
+ 	/* Allocate memory for instruction queue for CPT LF. */
+ 	iq_mem = plt_zmalloc(cpt_lf_iq_mem_calc(lf->nb_desc), ROC_ALIGN);
+ 	if (iq_mem == NULL)
+@@ -812,9 +812,9 @@ roc_cpt_eng_grp_add(struct roc_cpt *roc_cpt, enum cpt_eng_type eng_type)
+ void
+ roc_cpt_iq_disable(struct roc_cpt_lf *lf)
+ {
++	volatile union cpt_lf_q_grp_ptr grp_ptr = {.u = 0x0};
++	volatile union cpt_lf_inprog lf_inprog = {.u = 0x0};
+ 	union cpt_lf_ctl lf_ctl = {.u = 0x0};
+-	union cpt_lf_q_grp_ptr grp_ptr;
+-	union cpt_lf_inprog lf_inprog;
+ 	int timeout = 20;
+ 	int cnt;
+ 
+diff --git a/dpdk/drivers/common/cnxk/roc_cpt_debug.c b/dpdk/drivers/common/cnxk/roc_cpt_debug.c
+index 847d969268..be6ddb56aa 100644
+--- a/dpdk/drivers/common/cnxk/roc_cpt_debug.c
++++ b/dpdk/drivers/common/cnxk/roc_cpt_debug.c
+@@ -200,7 +200,7 @@ cpt_lf_print(struct roc_cpt_lf *lf)
+ 	reg_val = plt_read64(lf->rbase + CPT_LF_CTX_DEC_BYTE_CNT);
+ 	plt_print("    Decrypted byte count:\t%" PRIu64, reg_val);
+ 
+-	reg_val = plt_read64(lf->rbase + CPT_LF_CTX_ENC_PKT_CNT);
++	reg_val = plt_read64(lf->rbase + CPT_LF_CTX_DEC_PKT_CNT);
+ 	plt_print("    Decrypted packet count:\t%" PRIu64, reg_val);
+ }
+ 
+diff --git a/dpdk/drivers/common/cnxk/roc_dev.c b/dpdk/drivers/common/cnxk/roc_dev.c
+index 926a916e44..9a869698c4 100644
+--- a/dpdk/drivers/common/cnxk/roc_dev.c
++++ b/dpdk/drivers/common/cnxk/roc_dev.c
+@@ -57,7 +57,7 @@ pf_af_sync_msg(struct dev *dev, struct mbox_msghdr **rsp)
+ 	struct mbox *mbox = dev->mbox;
+ 	struct mbox_dev *mdev = &mbox->dev[0];
+ 
+-	volatile uint64_t int_status;
++	volatile uint64_t int_status = 0;
+ 	struct mbox_msghdr *msghdr;
+ 	uint64_t off;
+ 	int rc = 0;
+@@ -152,6 +152,11 @@ af_pf_wait_msg(struct dev *dev, uint16_t vf, int num_msg)
+ 		/* Reserve PF/VF mbox message */
+ 		size = PLT_ALIGN(size, MBOX_MSG_ALIGN);
+ 		rsp = mbox_alloc_msg(&dev->mbox_vfpf, vf, size);
++		if (!rsp) {
++			plt_err("Failed to reserve VF%d message", vf);
++			continue;
++		}
++
+ 		mbox_rsp_init(msg->id, rsp);
+ 
+ 		/* Copy message from AF<->PF mbox to PF<->VF mbox */
+@@ -236,6 +241,12 @@ vf_pf_process_msgs(struct dev *dev, uint16_t vf)
+ 				BIT_ULL(vf % max_bits);
+ 			rsp = (struct ready_msg_rsp *)mbox_alloc_msg(
+ 				mbox, vf, sizeof(*rsp));
++			if (!rsp) {
++				plt_err("Failed to alloc VF%d READY message",
++					vf);
++				continue;
++			}
++
+ 			mbox_rsp_init(msg->id, rsp);
+ 
+ 			/* PF/VF function ID */
+@@ -988,6 +999,9 @@ dev_setup_shared_lmt_region(struct mbox *mbox, bool valid_iova, uint64_t iova)
+ 	struct lmtst_tbl_setup_req *req;
+ 
+ 	req = mbox_alloc_msg_lmtst_tbl_setup(mbox);
++	if (!req)
++		return -ENOSPC;
++
+ 	/* This pcifunc is defined with primary pcifunc whose LMT address
+ 	 * will be shared. If call contains valid IOVA, following pcifunc
+ 	 * field is of no use.
+@@ -1061,6 +1075,11 @@ dev_lmt_setup(struct dev *dev)
+ 	 */
+ 	if (!dev->disable_shared_lmt) {
+ 		idev = idev_get_cfg();
++		if (!idev) {
++			errno = EFAULT;
++			goto free;
++		}
++
+ 		if (!__atomic_load_n(&idev->lmt_pf_func, __ATOMIC_ACQUIRE)) {
+ 			idev->lmt_base_addr = dev->lmt_base;
+ 			idev->lmt_pf_func = dev->pf_func;
+diff --git a/dpdk/drivers/common/cnxk/roc_irq.c b/dpdk/drivers/common/cnxk/roc_irq.c
+index 7a24297d72..010b121176 100644
+--- a/dpdk/drivers/common/cnxk/roc_irq.c
++++ b/dpdk/drivers/common/cnxk/roc_irq.c
+@@ -160,7 +160,10 @@ dev_irq_register(struct plt_intr_handle *intr_handle, plt_intr_callback_fn cb,
+ 		return rc;
+ 	}
+ 
+-	plt_intr_efds_index_set(intr_handle, vec, fd);
++	rc = plt_intr_efds_index_set(intr_handle, vec, fd);
++	if (rc)
++		return rc;
++
+ 	nb_efd = (vec > (uint32_t)plt_intr_nb_efd_get(intr_handle)) ?
+ 		vec : (uint32_t)plt_intr_nb_efd_get(intr_handle);
+ 	plt_intr_nb_efd_set(intr_handle, nb_efd);
+diff --git a/dpdk/drivers/common/cnxk/roc_mbox.h b/dpdk/drivers/common/cnxk/roc_mbox.h
+index b63fe108c9..9a8ae6b216 100644
+--- a/dpdk/drivers/common/cnxk/roc_mbox.h
++++ b/dpdk/drivers/common/cnxk/roc_mbox.h
+@@ -114,7 +114,7 @@ struct mbox_msghdr {
+ 	  msg_rsp)                                                             \
+ 	M(SSO_GRP_GET_PRIORITY, 0x606, sso_grp_get_priority, sso_info_req,     \
+ 	  sso_grp_priority)                                                    \
+-	M(SSO_WS_CACHE_INV, 0x607, sso_ws_cache_inv, msg_req, msg_rsp)         \
++	M(SSO_WS_CACHE_INV, 0x607, sso_ws_cache_inv, ssow_lf_inv_req, msg_rsp) \
+ 	M(SSO_GRP_QOS_CONFIG, 0x608, sso_grp_qos_config, sso_grp_qos_cfg,      \
+ 	  msg_rsp)                                                             \
+ 	M(SSO_GRP_GET_STATS, 0x609, sso_grp_get_stats, sso_info_req,           \
+@@ -123,6 +123,9 @@ struct mbox_msghdr {
+ 	  sso_hws_stats)                                                       \
+ 	M(SSO_HW_RELEASE_XAQ, 0x611, sso_hw_release_xaq_aura,                  \
+ 	  sso_hw_xaq_release, msg_rsp)                                         \
++	M(SSO_CONFIG_LSW, 0x612, ssow_config_lsw, ssow_config_lsw, msg_rsp)    \
++	M(SSO_HWS_CHNG_MSHIP, 0x613, ssow_chng_mship, ssow_chng_mship,         \
++	  msg_rsp)                                                             \
+ 	/* TIM mbox IDs (range 0x800 - 0x9FF) */                               \
+ 	M(TIM_LF_ALLOC, 0x800, tim_lf_alloc, tim_lf_alloc_req,                 \
+ 	  tim_lf_alloc_rsp)                                                    \
+@@ -247,7 +250,8 @@ struct mbox_msghdr {
+ 	M(NIX_CPT_BP_ENABLE, 0x8020, nix_cpt_bp_enable, nix_bp_cfg_req,        \
+ 	  nix_bp_cfg_rsp)                                                      \
+ 	M(NIX_CPT_BP_DISABLE, 0x8021, nix_cpt_bp_disable, nix_bp_cfg_req,      \
+-	  msg_rsp)
++	  msg_rsp)                                                             \
++	M(NIX_RX_SW_SYNC, 0x8022, nix_rx_sw_sync, msg_req, msg_rsp)
+ 
+ /* Messages initiated by AF (range 0xC00 - 0xDFF) */
+ #define MBOX_UP_CGX_MESSAGES                                                   \
+@@ -1240,6 +1244,33 @@ struct ssow_lf_free_req {
+ 	uint16_t __io hws;
+ };
+ 
++#define SSOW_INVAL_SELECTIVE_VER 0x1000
++struct ssow_lf_inv_req {
++	struct mbox_msghdr hdr;
++	uint16_t nb_hws;		 /* Number of HWS to invalidate*/
++	uint16_t hws[MAX_RVU_BLKLF_CNT]; /* Array of HWS */
++};
++
++struct ssow_config_lsw {
++	struct mbox_msghdr hdr;
++#define SSOW_LSW_DIS	 0
++#define SSOW_LSW_GW_WAIT 1
++#define SSOW_LSW_GW_IMM	 2
++	uint8_t __io lsw_mode;
++#define SSOW_WQE_REL_LSW_WAIT 0
++#define SSOW_WQE_REL_IMM      1
++	uint8_t __io wqe_release;
++};
++
++struct ssow_chng_mship {
++	struct mbox_msghdr hdr;
++	uint8_t __io set;	 /* Membership set to modify. */
++	uint8_t __io enable;	 /* Enable/Disable the hwgrps. */
++	uint8_t __io hws;	 /* HWS to modify. */
++	uint16_t __io nb_hwgrps; /* Number of hwgrps in the array */
++	uint16_t __io hwgrps[MAX_RVU_BLKLF_CNT]; /* Array of hwgrps. */
++};
++
+ struct sso_hw_setconfig {
+ 	struct mbox_msghdr hdr;
+ 	uint32_t __io npa_aura_id;
+diff --git a/dpdk/drivers/common/cnxk/roc_nix.h b/dpdk/drivers/common/cnxk/roc_nix.h
+index 69a5e8e7b4..986aac9e57 100644
+--- a/dpdk/drivers/common/cnxk/roc_nix.h
++++ b/dpdk/drivers/common/cnxk/roc_nix.h
+@@ -808,6 +808,7 @@ int __roc_api roc_nix_ptp_sync_time_adjust(struct roc_nix *roc_nix,
+ int __roc_api roc_nix_ptp_info_cb_register(struct roc_nix *roc_nix,
+ 					   ptp_info_update_t ptp_update);
+ void __roc_api roc_nix_ptp_info_cb_unregister(struct roc_nix *roc_nix);
++bool __roc_api roc_nix_ptp_is_enable(struct roc_nix *roc_nix);
+ 
+ /* VLAN */
+ int __roc_api
+diff --git a/dpdk/drivers/common/cnxk/roc_nix_bpf.c b/dpdk/drivers/common/cnxk/roc_nix_bpf.c
+index 6996a54be0..4941f62995 100644
+--- a/dpdk/drivers/common/cnxk/roc_nix_bpf.c
++++ b/dpdk/drivers/common/cnxk/roc_nix_bpf.c
+@@ -138,7 +138,7 @@ nix_lf_bpf_dump(__io struct nix_band_prof_s *bpf)
+ {
+ 	plt_dump("W0: cir_mantissa  \t\t\t%d\nW0: pebs_mantissa \t\t\t0x%03x",
+ 		 bpf->cir_mantissa, bpf->pebs_mantissa);
+-	plt_dump("W0: peir_matissa \t\t\t\t%d\nW0: cbs_exponent \t\t\t%d",
++	plt_dump("W0: peir_mantissa \t\t\t\t%d\nW0: cbs_exponent \t\t\t%d",
+ 		 bpf->peir_mantissa, bpf->cbs_exponent);
+ 	plt_dump("W0: cir_exponent \t\t\t%d\nW0: pebs_exponent \t\t\t%d",
+ 		 bpf->cir_exponent, bpf->pebs_exponent);
+diff --git a/dpdk/drivers/common/cnxk/roc_nix_debug.c b/dpdk/drivers/common/cnxk/roc_nix_debug.c
+index 266935a6c5..9829c13b69 100644
+--- a/dpdk/drivers/common/cnxk/roc_nix_debug.c
++++ b/dpdk/drivers/common/cnxk/roc_nix_debug.c
+@@ -52,7 +52,9 @@ nix_bitmap_dump(struct plt_bitmap *bmp)
+ 	int i;
+ 
+ 	plt_bitmap_scan_init(bmp);
+-	plt_bitmap_scan(bmp, &pos, &slab);
++	if (!plt_bitmap_scan(bmp, &pos, &slab))
++		return;
++
+ 	start_pos = pos;
+ 
+ 	nix_dump_no_nl("  \t\t[");
+@@ -323,6 +325,9 @@ nix_q_ctx_get(struct dev *dev, uint8_t ctype, uint16_t qid, __io void **ctx_p)
+ 		int rc;
+ 
+ 		aq = mbox_alloc_msg_nix_aq_enq(mbox);
++		if (!aq)
++			return -ENOSPC;
++
+ 		aq->qidx = qid;
+ 		aq->ctype = ctype;
+ 		aq->op = NIX_AQ_INSTOP_READ;
+@@ -341,6 +346,9 @@ nix_q_ctx_get(struct dev *dev, uint8_t ctype, uint16_t qid, __io void **ctx_p)
+ 		struct nix_cn10k_aq_enq_req *aq;
+ 
+ 		aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox);
++		if (!aq)
++			return -ENOSPC;
++
+ 		aq->qidx = qid;
+ 		aq->ctype = ctype;
+ 		aq->op = NIX_AQ_INSTOP_READ;
+diff --git a/dpdk/drivers/common/cnxk/roc_nix_fc.c b/dpdk/drivers/common/cnxk/roc_nix_fc.c
+index ca29cd2bf9..d31137188e 100644
+--- a/dpdk/drivers/common/cnxk/roc_nix_fc.c
++++ b/dpdk/drivers/common/cnxk/roc_nix_fc.c
+@@ -113,6 +113,9 @@ nix_fc_cq_config_get(struct roc_nix *roc_nix, struct roc_nix_fc_cfg *fc_cfg)
+ 		struct nix_aq_enq_req *aq;
+ 
+ 		aq = mbox_alloc_msg_nix_aq_enq(mbox);
++		if (!aq)
++			return -ENOSPC;
++
+ 		aq->qidx = fc_cfg->cq_cfg.rq;
+ 		aq->ctype = NIX_AQ_CTYPE_CQ;
+ 		aq->op = NIX_AQ_INSTOP_READ;
+@@ -120,6 +123,9 @@ nix_fc_cq_config_get(struct roc_nix *roc_nix, struct roc_nix_fc_cfg *fc_cfg)
+ 		struct nix_cn10k_aq_enq_req *aq;
+ 
+ 		aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox);
++		if (!aq)
++			return -ENOSPC;
++
+ 		aq->qidx = fc_cfg->cq_cfg.rq;
+ 		aq->ctype = NIX_AQ_CTYPE_CQ;
+ 		aq->op = NIX_AQ_INSTOP_READ;
+@@ -147,6 +153,9 @@ nix_fc_cq_config_set(struct roc_nix *roc_nix, struct roc_nix_fc_cfg *fc_cfg)
+ 		struct nix_aq_enq_req *aq;
+ 
+ 		aq = mbox_alloc_msg_nix_aq_enq(mbox);
++		if (!aq)
++			return -ENOSPC;
++
+ 		aq->qidx = fc_cfg->cq_cfg.rq;
+ 		aq->ctype = NIX_AQ_CTYPE_CQ;
+ 		aq->op = NIX_AQ_INSTOP_WRITE;
+@@ -164,6 +173,9 @@ nix_fc_cq_config_set(struct roc_nix *roc_nix, struct roc_nix_fc_cfg *fc_cfg)
+ 		struct nix_cn10k_aq_enq_req *aq;
+ 
+ 		aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox);
++		if (!aq)
++			return -ENOSPC;
++
+ 		aq->qidx = fc_cfg->cq_cfg.rq;
+ 		aq->ctype = NIX_AQ_CTYPE_CQ;
+ 		aq->op = NIX_AQ_INSTOP_WRITE;
+diff --git a/dpdk/drivers/common/cnxk/roc_nix_inl.c b/dpdk/drivers/common/cnxk/roc_nix_inl.c
+index f0fc690417..e71774cf4a 100644
+--- a/dpdk/drivers/common/cnxk/roc_nix_inl.c
++++ b/dpdk/drivers/common/cnxk/roc_nix_inl.c
+@@ -533,7 +533,7 @@ roc_nix_inl_dev_rq_get(struct roc_nix_rq *rq)
+ 
+ 	inl_rq->flow_tag_width = 20;
+ 	/* Special tag mask */
+-	inl_rq->tag_mask = 0xFFF00000;
++	inl_rq->tag_mask = rq->tag_mask;
+ 	inl_rq->tt = SSO_TT_ORDERED;
+ 	inl_rq->hwgrp = 0;
+ 	inl_rq->wqe_skip = 1;
+@@ -595,8 +595,7 @@ roc_nix_inl_dev_rq_put(struct roc_nix_rq *rq)
+ 		plt_err("Failed to disable inline device rq, rc=%d", rc);
+ 
+ 	/* Flush NIX LF for CN10K */
+-	if (roc_model_is_cn10k())
+-		plt_write64(0, inl_dev->nix_base + NIX_LF_OP_VWQE_FLUSH);
++	nix_rq_vwqe_flush(rq, inl_dev->vwqe_interval);
+ 
+ 	return rc;
+ }
+diff --git a/dpdk/drivers/common/cnxk/roc_nix_inl_dev.c b/dpdk/drivers/common/cnxk/roc_nix_inl_dev.c
+index a0fe6ecd82..10912a6c93 100644
+--- a/dpdk/drivers/common/cnxk/roc_nix_inl_dev.c
++++ b/dpdk/drivers/common/cnxk/roc_nix_inl_dev.c
+@@ -346,6 +346,7 @@ nix_inl_nix_setup(struct nix_inl_dev *inl_dev)
+ 	struct mbox *mbox = dev->mbox;
+ 	struct nix_lf_alloc_rsp *rsp;
+ 	struct nix_lf_alloc_req *req;
++	struct nix_hw_info *hw_info;
+ 	size_t inb_sa_sz;
+ 	int i, rc = -ENOSPC;
+ 	void *sa;
+@@ -382,6 +383,17 @@ nix_inl_nix_setup(struct nix_inl_dev *inl_dev)
+ 	inl_dev->qints = rsp->qints;
+ 	inl_dev->cints = rsp->cints;
+ 
++	/* Get VWQE info if supported */
++	if (roc_model_is_cn10k()) {
++		mbox_alloc_msg_nix_get_hw_info(mbox);
++		rc = mbox_process_msg(mbox, (void *)&hw_info);
++		if (rc) {
++			plt_err("Failed to get HW info, rc=%d", rc);
++			goto lf_free;
++		}
++		inl_dev->vwqe_interval = hw_info->vwqe_delay;
++	}
++
+ 	/* Register nix interrupts */
+ 	rc = nix_inl_nix_register_irqs(inl_dev);
+ 	if (rc) {
+diff --git a/dpdk/drivers/common/cnxk/roc_nix_inl_priv.h b/dpdk/drivers/common/cnxk/roc_nix_inl_priv.h
+index 3dc526f929..be53a3fa81 100644
+--- a/dpdk/drivers/common/cnxk/roc_nix_inl_priv.h
++++ b/dpdk/drivers/common/cnxk/roc_nix_inl_priv.h
+@@ -35,6 +35,7 @@ struct nix_inl_dev {
+ 	/* NIX data */
+ 	uint8_t lf_tx_stats;
+ 	uint8_t lf_rx_stats;
++	uint16_t vwqe_interval;
+ 	uint16_t cints;
+ 	uint16_t qints;
+ 	struct roc_nix_rq rq;
+diff --git a/dpdk/drivers/common/cnxk/roc_nix_irq.c b/dpdk/drivers/common/cnxk/roc_nix_irq.c
+index a5cd9d4b02..7dcd533ea9 100644
+--- a/dpdk/drivers/common/cnxk/roc_nix_irq.c
++++ b/dpdk/drivers/common/cnxk/roc_nix_irq.c
+@@ -202,9 +202,12 @@ nix_lf_sq_debug_reg(struct nix *nix, uint32_t off)
+ 	uint64_t reg;
+ 
+ 	reg = plt_read64(nix->base + off);
+-	if (reg & BIT_ULL(44))
++	if (reg & BIT_ULL(44)) {
+ 		plt_err("SQ=%d err_code=0x%x", (int)((reg >> 8) & 0xfffff),
+ 			(uint8_t)(reg & 0xff));
++		/* Clear valid bit */
++		plt_write64(BIT_ULL(44), nix->base + off);
++	}
+ }
+ 
+ static void
+diff --git a/dpdk/drivers/common/cnxk/roc_nix_npc.c b/dpdk/drivers/common/cnxk/roc_nix_npc.c
+index c0666c87d5..ad8839dde8 100644
+--- a/dpdk/drivers/common/cnxk/roc_nix_npc.c
++++ b/dpdk/drivers/common/cnxk/roc_nix_npc.c
+@@ -96,7 +96,7 @@ roc_nix_npc_mcast_config(struct roc_nix *roc_nix, bool mcast_enable,
+ 
+ 	if (mcast_enable)
+ 		req->mode = NIX_RX_MODE_ALLMULTI;
+-	else if (prom_enable)
++	if (prom_enable)
+ 		req->mode = NIX_RX_MODE_PROMISC;
+ 
+ 	return mbox_process(mbox);
+diff --git a/dpdk/drivers/common/cnxk/roc_nix_priv.h b/dpdk/drivers/common/cnxk/roc_nix_priv.h
+index 04575af295..deb2a6ba11 100644
+--- a/dpdk/drivers/common/cnxk/roc_nix_priv.h
++++ b/dpdk/drivers/common/cnxk/roc_nix_priv.h
+@@ -377,6 +377,7 @@ int nix_rq_cfg(struct dev *dev, struct roc_nix_rq *rq, uint16_t qints, bool cfg,
+ int nix_rq_ena_dis(struct dev *dev, struct roc_nix_rq *rq, bool enable);
+ int nix_tm_bp_config_get(struct roc_nix *roc_nix, bool *is_enabled);
+ int nix_tm_bp_config_set(struct roc_nix *roc_nix, bool enable);
++void nix_rq_vwqe_flush(struct roc_nix_rq *rq, uint16_t vwqe_interval);
+ 
+ /*
+  * TM priv utils.
+diff --git a/dpdk/drivers/common/cnxk/roc_nix_ptp.c b/dpdk/drivers/common/cnxk/roc_nix_ptp.c
+index 03c4c6e5fd..05e4211de9 100644
+--- a/dpdk/drivers/common/cnxk/roc_nix_ptp.c
++++ b/dpdk/drivers/common/cnxk/roc_nix_ptp.c
+@@ -120,3 +120,11 @@ roc_nix_ptp_info_cb_unregister(struct roc_nix *roc_nix)
+ 
+ 	dev->ops->ptp_info_update = NULL;
+ }
++
++bool
++roc_nix_ptp_is_enable(struct roc_nix *roc_nix)
++{
++	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
++
++	return nix->ptp_en;
++}
+diff --git a/dpdk/drivers/common/cnxk/roc_nix_queue.c b/dpdk/drivers/common/cnxk/roc_nix_queue.c
+index c8c8401d81..e79a2d63e2 100644
+--- a/dpdk/drivers/common/cnxk/roc_nix_queue.c
++++ b/dpdk/drivers/common/cnxk/roc_nix_queue.c
+@@ -28,6 +28,22 @@ nix_qsize_clampup(uint32_t val)
+ 	return i;
+ }
+ 
++void
++nix_rq_vwqe_flush(struct roc_nix_rq *rq, uint16_t vwqe_interval)
++{
++	uint64_t wait_ns;
++
++	if (!roc_model_is_cn10k())
++		return;
++	/* Due to HW errata writes to VWQE_FLUSH might hang, so instead
++	 * wait for max vwqe timeout interval.
++	 */
++	if (rq->vwqe_ena) {
++		wait_ns = rq->vwqe_wait_tmo * (vwqe_interval + 1) * 100;
++		plt_delay_us((wait_ns / 1E3) + 1);
++	}
++}
++
+ int
+ nix_rq_ena_dis(struct dev *dev, struct roc_nix_rq *rq, bool enable)
+ {
+@@ -38,6 +54,9 @@ nix_rq_ena_dis(struct dev *dev, struct roc_nix_rq *rq, bool enable)
+ 		struct nix_aq_enq_req *aq;
+ 
+ 		aq = mbox_alloc_msg_nix_aq_enq(mbox);
++		if (!aq)
++			return -ENOSPC;
++
+ 		aq->qidx = rq->qid;
+ 		aq->ctype = NIX_AQ_CTYPE_RQ;
+ 		aq->op = NIX_AQ_INSTOP_WRITE;
+@@ -48,6 +67,9 @@ nix_rq_ena_dis(struct dev *dev, struct roc_nix_rq *rq, bool enable)
+ 		struct nix_cn10k_aq_enq_req *aq;
+ 
+ 		aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox);
++		if (!aq)
++			return -ENOSPC;
++
+ 		aq->qidx = rq->qid;
+ 		aq->ctype = NIX_AQ_CTYPE_RQ;
+ 		aq->op = NIX_AQ_INSTOP_WRITE;
+@@ -66,9 +88,8 @@ roc_nix_rq_ena_dis(struct roc_nix_rq *rq, bool enable)
+ 	int rc;
+ 
+ 	rc = nix_rq_ena_dis(&nix->dev, rq, enable);
++	nix_rq_vwqe_flush(rq, nix->vwqe_interval);
+ 
+-	if (roc_model_is_cn10k())
+-		plt_write64(rq->qid, nix->base + NIX_LF_OP_VWQE_FLUSH);
+ 	return rc;
+ }
+ 
+@@ -80,6 +101,9 @@ nix_rq_cn9k_cfg(struct dev *dev, struct roc_nix_rq *rq, uint16_t qints,
+ 	struct nix_aq_enq_req *aq;
+ 
+ 	aq = mbox_alloc_msg_nix_aq_enq(mbox);
++	if (!aq)
++		return -ENOSPC;
++
+ 	aq->qidx = rq->qid;
+ 	aq->ctype = NIX_AQ_CTYPE_RQ;
+ 	aq->op = cfg ? NIX_AQ_INSTOP_WRITE : NIX_AQ_INSTOP_INIT;
+@@ -195,6 +219,9 @@ nix_rq_cfg(struct dev *dev, struct roc_nix_rq *rq, uint16_t qints, bool cfg,
+ 	struct mbox *mbox = dev->mbox;
+ 
+ 	aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox);
++	if (!aq)
++		return -ENOSPC;
++
+ 	aq->qidx = rq->qid;
+ 	aq->ctype = NIX_AQ_CTYPE_RQ;
+ 	aq->op = cfg ? NIX_AQ_INSTOP_WRITE : NIX_AQ_INSTOP_INIT;
+@@ -463,6 +490,9 @@ roc_nix_cq_init(struct roc_nix *roc_nix, struct roc_nix_cq *cq)
+ 		struct nix_aq_enq_req *aq;
+ 
+ 		aq = mbox_alloc_msg_nix_aq_enq(mbox);
++		if (!aq)
++			return -ENOSPC;
++
+ 		aq->qidx = cq->qid;
+ 		aq->ctype = NIX_AQ_CTYPE_CQ;
+ 		aq->op = NIX_AQ_INSTOP_INIT;
+@@ -471,6 +501,9 @@ roc_nix_cq_init(struct roc_nix *roc_nix, struct roc_nix_cq *cq)
+ 		struct nix_cn10k_aq_enq_req *aq;
+ 
+ 		aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox);
++		if (!aq)
++			return -ENOSPC;
++
+ 		aq->qidx = cq->qid;
+ 		aq->ctype = NIX_AQ_CTYPE_CQ;
+ 		aq->op = NIX_AQ_INSTOP_INIT;
+@@ -547,6 +580,9 @@ roc_nix_cq_fini(struct roc_nix_cq *cq)
+ 		struct nix_aq_enq_req *aq;
+ 
+ 		aq = mbox_alloc_msg_nix_aq_enq(mbox);
++		if (!aq)
++			return -ENOSPC;
++
+ 		aq->qidx = cq->qid;
+ 		aq->ctype = NIX_AQ_CTYPE_CQ;
+ 		aq->op = NIX_AQ_INSTOP_WRITE;
+@@ -558,6 +594,9 @@ roc_nix_cq_fini(struct roc_nix_cq *cq)
+ 		struct nix_cn10k_aq_enq_req *aq;
+ 
+ 		aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox);
++		if (!aq)
++			return -ENOSPC;
++
+ 		aq->qidx = cq->qid;
+ 		aq->ctype = NIX_AQ_CTYPE_CQ;
+ 		aq->op = NIX_AQ_INSTOP_WRITE;
+@@ -649,7 +688,7 @@ sqb_pool_populate(struct roc_nix *roc_nix, struct roc_nix_sq *sq)
+ 	return rc;
+ }
+ 
+-static void
++static int
+ sq_cn9k_init(struct nix *nix, struct roc_nix_sq *sq, uint32_t rr_quantum,
+ 	     uint16_t smq)
+ {
+@@ -657,6 +696,9 @@ sq_cn9k_init(struct nix *nix, struct roc_nix_sq *sq, uint32_t rr_quantum,
+ 	struct nix_aq_enq_req *aq;
+ 
+ 	aq = mbox_alloc_msg_nix_aq_enq(mbox);
++	if (!aq)
++		return -ENOSPC;
++
+ 	aq->qidx = sq->qid;
+ 	aq->ctype = NIX_AQ_CTYPE_SQ;
+ 	aq->op = NIX_AQ_INSTOP_INIT;
+@@ -680,7 +722,12 @@ sq_cn9k_init(struct nix *nix, struct roc_nix_sq *sq, uint32_t rr_quantum,
+ 	aq->sq.sq_int_ena |= BIT(NIX_SQINT_MNQ_ERR);
+ 
+ 	/* Many to one reduction */
+-	aq->sq.qint_idx = sq->qid % nix->qints;
++	/* Assigning QINT 0 to all the SQs, an errata exists where NIXTX can
++	 * send incorrect QINT_IDX when reporting queue interrupt (QINT). This
++	 * might result in software missing the interrupt.
++	 */
++	aq->sq.qint_idx = 0;
++	return 0;
+ }
+ 
+ static int
+@@ -694,6 +741,9 @@ sq_cn9k_fini(struct nix *nix, struct roc_nix_sq *sq)
+ 	int rc, count;
+ 
+ 	aq = mbox_alloc_msg_nix_aq_enq(mbox);
++	if (!aq)
++		return -ENOSPC;
++
+ 	aq->qidx = sq->qid;
+ 	aq->ctype = NIX_AQ_CTYPE_SQ;
+ 	aq->op = NIX_AQ_INSTOP_READ;
+@@ -707,6 +757,9 @@ sq_cn9k_fini(struct nix *nix, struct roc_nix_sq *sq)
+ 
+ 	/* Disable sq */
+ 	aq = mbox_alloc_msg_nix_aq_enq(mbox);
++	if (!aq)
++		return -ENOSPC;
++
+ 	aq->qidx = sq->qid;
+ 	aq->ctype = NIX_AQ_CTYPE_SQ;
+ 	aq->op = NIX_AQ_INSTOP_WRITE;
+@@ -718,6 +771,9 @@ sq_cn9k_fini(struct nix *nix, struct roc_nix_sq *sq)
+ 
+ 	/* Read SQ and free sqb's */
+ 	aq = mbox_alloc_msg_nix_aq_enq(mbox);
++	if (!aq)
++		return -ENOSPC;
++
+ 	aq->qidx = sq->qid;
+ 	aq->ctype = NIX_AQ_CTYPE_SQ;
+ 	aq->op = NIX_AQ_INSTOP_READ;
+@@ -749,7 +805,7 @@ sq_cn9k_fini(struct nix *nix, struct roc_nix_sq *sq)
+ 	return 0;
+ }
+ 
+-static void
++static int
+ sq_init(struct nix *nix, struct roc_nix_sq *sq, uint32_t rr_quantum,
+ 	uint16_t smq)
+ {
+@@ -757,6 +813,9 @@ sq_init(struct nix *nix, struct roc_nix_sq *sq, uint32_t rr_quantum,
+ 	struct nix_cn10k_aq_enq_req *aq;
+ 
+ 	aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox);
++	if (!aq)
++		return -ENOSPC;
++
+ 	aq->qidx = sq->qid;
+ 	aq->ctype = NIX_AQ_CTYPE_SQ;
+ 	aq->op = NIX_AQ_INSTOP_INIT;
+@@ -779,8 +838,12 @@ sq_init(struct nix *nix, struct roc_nix_sq *sq, uint32_t rr_quantum,
+ 	aq->sq.sq_int_ena |= BIT(NIX_SQINT_SEND_ERR);
+ 	aq->sq.sq_int_ena |= BIT(NIX_SQINT_MNQ_ERR);
+ 
+-	/* Many to one reduction */
+-	aq->sq.qint_idx = sq->qid % nix->qints;
++	/* Assigning QINT 0 to all the SQs, an errata exists where NIXTX can
++	 * send incorrect QINT_IDX when reporting queue interrupt (QINT). This
++	 * might result in software missing the interrupt.
++	 */
++	aq->sq.qint_idx = 0;
++	return 0;
+ }
+ 
+ static int
+@@ -794,6 +857,9 @@ sq_fini(struct nix *nix, struct roc_nix_sq *sq)
+ 	int rc, count;
+ 
+ 	aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox);
++	if (!aq)
++		return -ENOSPC;
++
+ 	aq->qidx = sq->qid;
+ 	aq->ctype = NIX_AQ_CTYPE_SQ;
+ 	aq->op = NIX_AQ_INSTOP_READ;
+@@ -807,6 +873,9 @@ sq_fini(struct nix *nix, struct roc_nix_sq *sq)
+ 
+ 	/* Disable sq */
+ 	aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox);
++	if (!aq)
++		return -ENOSPC;
++
+ 	aq->qidx = sq->qid;
+ 	aq->ctype = NIX_AQ_CTYPE_SQ;
+ 	aq->op = NIX_AQ_INSTOP_WRITE;
+@@ -818,6 +887,9 @@ sq_fini(struct nix *nix, struct roc_nix_sq *sq)
+ 
+ 	/* Read SQ and free sqb's */
+ 	aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox);
++	if (!aq)
++		return -ENOSPC;
++
+ 	aq->qidx = sq->qid;
+ 	aq->ctype = NIX_AQ_CTYPE_SQ;
+ 	aq->op = NIX_AQ_INSTOP_READ;
+@@ -888,9 +960,12 @@ roc_nix_sq_init(struct roc_nix *roc_nix, struct roc_nix_sq *sq)
+ 
+ 	/* Init SQ context */
+ 	if (roc_model_is_cn9k())
+-		sq_cn9k_init(nix, sq, rr_quantum, smq);
++		rc = sq_cn9k_init(nix, sq, rr_quantum, smq);
+ 	else
+-		sq_init(nix, sq, rr_quantum, smq);
++		rc = sq_init(nix, sq, rr_quantum, smq);
++
++	if (rc)
++		goto nomem;
+ 
+ 	rc = mbox_process(mbox);
+ 	if (rc)
+diff --git a/dpdk/drivers/common/cnxk/roc_nix_stats.c b/dpdk/drivers/common/cnxk/roc_nix_stats.c
+index c50c8fa629..756111fb1c 100644
+--- a/dpdk/drivers/common/cnxk/roc_nix_stats.c
++++ b/dpdk/drivers/common/cnxk/roc_nix_stats.c
+@@ -124,6 +124,9 @@ nix_stat_rx_queue_reset(struct nix *nix, uint16_t qid)
+ 		struct nix_aq_enq_req *aq;
+ 
+ 		aq = mbox_alloc_msg_nix_aq_enq(mbox);
++		if (!aq)
++			return -ENOSPC;
++
+ 		aq->qidx = qid;
+ 		aq->ctype = NIX_AQ_CTYPE_RQ;
+ 		aq->op = NIX_AQ_INSTOP_WRITE;
+@@ -143,6 +146,9 @@ nix_stat_rx_queue_reset(struct nix *nix, uint16_t qid)
+ 		struct nix_cn10k_aq_enq_req *aq;
+ 
+ 		aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox);
++		if (!aq)
++			return -ENOSPC;
++
+ 		aq->qidx = qid;
+ 		aq->ctype = NIX_AQ_CTYPE_RQ;
+ 		aq->op = NIX_AQ_INSTOP_WRITE;
+@@ -174,6 +180,9 @@ nix_stat_tx_queue_reset(struct nix *nix, uint16_t qid)
+ 		struct nix_aq_enq_req *aq;
+ 
+ 		aq = mbox_alloc_msg_nix_aq_enq(mbox);
++		if (!aq)
++			return -ENOSPC;
++
+ 		aq->qidx = qid;
+ 		aq->ctype = NIX_AQ_CTYPE_SQ;
+ 		aq->op = NIX_AQ_INSTOP_WRITE;
+@@ -190,6 +199,9 @@ nix_stat_tx_queue_reset(struct nix *nix, uint16_t qid)
+ 		struct nix_cn10k_aq_enq_req *aq;
+ 
+ 		aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox);
++		if (!aq)
++			return -ENOSPC;
++
+ 		aq->qidx = qid;
+ 		aq->ctype = NIX_AQ_CTYPE_SQ;
+ 		aq->op = NIX_AQ_INSTOP_WRITE;
+@@ -295,6 +307,9 @@ roc_nix_xstats_get(struct roc_nix *roc_nix, struct roc_nix_xstat *xstats,
+ 
+ 	if (roc_model_is_cn9k()) {
+ 		req = mbox_alloc_msg_cgx_stats(mbox);
++		if (!req)
++			return -ENOSPC;
++
+ 		req->hdr.pcifunc = roc_nix_get_pf_func(roc_nix);
+ 
+ 		rc = mbox_process_msg(mbox, (void *)&cgx_resp);
+@@ -316,6 +331,9 @@ roc_nix_xstats_get(struct roc_nix *roc_nix, struct roc_nix_xstat *xstats,
+ 		}
+ 	} else {
+ 		req = mbox_alloc_msg_rpm_stats(mbox);
++		if (!req)
++			return -ENOSPC;
++
+ 		req->hdr.pcifunc = roc_nix_get_pf_func(roc_nix);
+ 
+ 		rc = mbox_process_msg(mbox, (void *)&rpm_resp);
+diff --git a/dpdk/drivers/common/cnxk/roc_nix_tm.c b/dpdk/drivers/common/cnxk/roc_nix_tm.c
+index b3d8ebd3c2..506cb066ce 100644
+--- a/dpdk/drivers/common/cnxk/roc_nix_tm.c
++++ b/dpdk/drivers/common/cnxk/roc_nix_tm.c
+@@ -424,7 +424,7 @@ nix_tm_bp_config_get(struct roc_nix *roc_nix, bool *is_enabled)
+ 
+ 	if (req) {
+ 		req->num_regs = k;
+-		rc = mbox_process(mbox);
++		rc = mbox_process_msg(mbox, (void **)&rsp);
+ 		if (rc)
+ 			goto err;
+ 		/* Report it as enabled only if enabled or all */
+@@ -564,6 +564,7 @@ nix_tm_sq_flush_pre(struct roc_nix_sq *sq)
+ 	struct nix_tm_node *node, *sibling;
+ 	struct nix_tm_node_list *list;
+ 	enum roc_nix_tm_tree tree;
++	struct msg_req *req;
+ 	struct mbox *mbox;
+ 	struct nix *nix;
+ 	uint16_t qid;
+@@ -653,6 +654,12 @@ nix_tm_sq_flush_pre(struct roc_nix_sq *sq)
+ 			rc);
+ 		goto cleanup;
+ 	}
++
++	req = mbox_alloc_msg_nix_rx_sw_sync(mbox);
++	if (!req)
++		return -ENOSPC;
++
++	rc = mbox_process(mbox);
+ cleanup:
+ 	/* Restore cgx state */
+ 	if (!roc_nix->io_enabled) {
+@@ -766,6 +773,9 @@ nix_tm_sq_sched_conf(struct nix *nix, struct nix_tm_node *node,
+ 		struct nix_aq_enq_req *aq;
+ 
+ 		aq = mbox_alloc_msg_nix_aq_enq(mbox);
++		if (!aq)
++			return -ENOSPC;
++
+ 		aq->qidx = qid;
+ 		aq->ctype = NIX_AQ_CTYPE_SQ;
+ 		aq->op = NIX_AQ_INSTOP_WRITE;
+@@ -781,6 +791,9 @@ nix_tm_sq_sched_conf(struct nix *nix, struct nix_tm_node *node,
+ 		struct nix_cn10k_aq_enq_req *aq;
+ 
+ 		aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox);
++		if (!aq)
++			return -ENOSPC;
++
+ 		aq->qidx = qid;
+ 		aq->ctype = NIX_AQ_CTYPE_SQ;
+ 		aq->op = NIX_AQ_INSTOP_WRITE;
+diff --git a/dpdk/drivers/common/cnxk/roc_nix_tm_ops.c b/dpdk/drivers/common/cnxk/roc_nix_tm_ops.c
+index 3257fa67c7..3d81247a12 100644
+--- a/dpdk/drivers/common/cnxk/roc_nix_tm_ops.c
++++ b/dpdk/drivers/common/cnxk/roc_nix_tm_ops.c
+@@ -107,7 +107,7 @@ nix_tm_adjust_shaper_pps_rate(struct nix_tm_shaper_profile *profile)
+ 	if (profile->peak.rate && min_rate > profile->peak.rate)
+ 		min_rate = profile->peak.rate;
+ 
+-	/* Each packet accomulate single count, whereas HW
++	/* Each packet accumulate single count, whereas HW
+ 	 * considers each unit as Byte, so we need convert
+ 	 * user pps to bps
+ 	 */
+diff --git a/dpdk/drivers/common/cnxk/roc_nix_tm_utils.c b/dpdk/drivers/common/cnxk/roc_nix_tm_utils.c
+index 543adf9e56..9e80c2a5fe 100644
+--- a/dpdk/drivers/common/cnxk/roc_nix_tm_utils.c
++++ b/dpdk/drivers/common/cnxk/roc_nix_tm_utils.c
+@@ -642,6 +642,7 @@ nix_tm_shaper_reg_prep(struct nix_tm_node *node,
+ 	else if (profile)
+ 		adjust = profile->pkt_len_adj;
+ 
++	adjust &= 0x1FF;
+ 	plt_tm_dbg("Shaper config node %s(%u) lvl %u id %u, "
+ 		   "pir %" PRIu64 "(%" PRIu64 "B),"
+ 		   " cir %" PRIu64 "(%" PRIu64 "B)"
+@@ -708,7 +709,7 @@ nix_tm_shaper_reg_prep(struct nix_tm_node *node,
+ 		/* Configure RED algo */
+ 		reg[k] = NIX_AF_TL3X_SHAPE(schq);
+ 		regval[k] = (adjust | (uint64_t)node->red_algo << 9 |
+-			     (uint64_t)node->pkt_mode);
++			     (uint64_t)node->pkt_mode << 24);
+ 		k++;
+ 
+ 		break;
+diff --git a/dpdk/drivers/common/cnxk/roc_npc.c b/dpdk/drivers/common/cnxk/roc_npc.c
+index 503c74748f..5ee7ff5e41 100644
+--- a/dpdk/drivers/common/cnxk/roc_npc.c
++++ b/dpdk/drivers/common/cnxk/roc_npc.c
+@@ -969,14 +969,14 @@ npc_vtag_insert_action_configure(struct mbox *mbox, struct roc_npc_flow *flow,
+ 	vtag_cfg->cfg_type = VTAG_TX;
+ 	vtag_cfg->vtag_size = NIX_VTAGSIZE_T4;
+ 	vtag_cfg->tx.vtag0 =
+-		((vlan_info[0].vlan_ethtype << 16) |
++		(((uint32_t)vlan_info[0].vlan_ethtype << 16) |
+ 		 (vlan_info[0].vlan_pcp << 13) | vlan_info[0].vlan_id);
+ 
+ 	vtag_cfg->tx.cfg_vtag0 = 1;
+ 
+ 	if (flow->vtag_insert_count == 2) {
+ 		vtag_cfg->tx.vtag1 =
+-			((vlan_info[1].vlan_ethtype << 16) |
++			(((uint32_t)vlan_info[1].vlan_ethtype << 16) |
+ 			 (vlan_info[1].vlan_pcp << 13) | vlan_info[1].vlan_id);
+ 
+ 		vtag_cfg->tx.cfg_vtag1 = 1;
+@@ -1246,6 +1246,16 @@ roc_npc_flow_destroy(struct roc_npc *roc_npc, struct roc_npc_flow *flow)
+ 			return rc;
+ 	}
+ 
++	if (flow->ctr_id != NPC_COUNTER_NONE) {
++		rc = roc_npc_mcam_clear_counter(roc_npc, flow->ctr_id);
++		if (rc != 0)
++			return rc;
++
++		rc = npc_mcam_free_counter(npc, flow->ctr_id);
++		if (rc != 0)
++			return rc;
++	}
++
+ 	rc = npc_mcam_free_entry(npc, flow->mcam_id);
+ 	if (rc != 0)
+ 		return rc;
+diff --git a/dpdk/drivers/common/cnxk/roc_npc_mcam.c b/dpdk/drivers/common/cnxk/roc_npc_mcam.c
+index ba7f89b45b..a16ba3f7be 100644
+--- a/dpdk/drivers/common/cnxk/roc_npc_mcam.c
++++ b/dpdk/drivers/common/cnxk/roc_npc_mcam.c
+@@ -234,7 +234,7 @@ npc_get_kex_capability(struct npc *npc)
+ 	/* Ethtype: Offset 12B, len 2B */
+ 	kex_cap.bit.ethtype_0 = npc_is_kex_enabled(
+ 		npc, NPC_LID_LA, NPC_LT_LA_ETHER, 12 * 8, 2 * 8);
+-	/* QINQ VLAN Ethtype: ofset 8B, len 2B */
++	/* QINQ VLAN Ethtype: offset 8B, len 2B */
+ 	kex_cap.bit.ethtype_x = npc_is_kex_enabled(
+ 		npc, NPC_LID_LB, NPC_LT_LB_STAG_QINQ, 8 * 8, 2 * 8);
+ 	/* VLAN ID0 : Outer VLAN: Offset 2B, len 2B */
+@@ -670,7 +670,7 @@ npc_program_mcam(struct npc *npc, struct npc_parse_state *pst, bool mcam_alloc)
+ 	memcpy(pst->flow->mcam_data, key_data, key_len);
+ 	memcpy(pst->flow->mcam_mask, key_mask, key_len);
+ 
+-	if (pst->is_vf) {
++	if (pst->is_vf && pst->flow->nix_intf == NIX_INTF_RX) {
+ 		(void)mbox_alloc_msg_npc_read_base_steer_rule(npc->mbox);
+ 		rc = mbox_process_msg(npc->mbox, (void *)&base_rule_rsp);
+ 		if (rc) {
+diff --git a/dpdk/drivers/common/cnxk/roc_npc_mcam_dump.c b/dpdk/drivers/common/cnxk/roc_npc_mcam_dump.c
+index 19b4901a52..278056591e 100644
+--- a/dpdk/drivers/common/cnxk/roc_npc_mcam_dump.c
++++ b/dpdk/drivers/common/cnxk/roc_npc_mcam_dump.c
+@@ -159,6 +159,12 @@ npc_flow_print_parse_nibbles(FILE *file, struct roc_npc_flow *flow,
+ 		offset += 4;
+ 	}
+ 
++	if (rx_parse->laflags) {
++		data = npc_get_nibbles(flow, 2, offset);
++		fprintf(file, "\tNPC_PARSE_NIBBLE_LA_FLAGS:%#02X\n", data);
++		offset += 8;
++	}
++
+ 	if (rx_parse->latype) {
+ 		data = npc_get_nibbles(flow, 1, offset);
+ 		fprintf(file, "\tNPC_PARSE_NIBBLE_LA_LTYPE:%s\n",
+@@ -166,9 +172,9 @@ npc_flow_print_parse_nibbles(FILE *file, struct roc_npc_flow *flow,
+ 		offset += 4;
+ 	}
+ 
+-	if (rx_parse->laflags) {
++	if (rx_parse->lbflags) {
+ 		data = npc_get_nibbles(flow, 2, offset);
+-		fprintf(file, "\tNPC_PARSE_NIBBLE_LA_FLAGS:%#02X\n", data);
++		fprintf(file, "\tNPC_PARSE_NIBBLE_LB_FLAGS:%#02X\n", data);
+ 		offset += 8;
+ 	}
+ 
+@@ -179,9 +185,9 @@ npc_flow_print_parse_nibbles(FILE *file, struct roc_npc_flow *flow,
+ 		offset += 4;
+ 	}
+ 
+-	if (rx_parse->lbflags) {
++	if (rx_parse->lcflags) {
+ 		data = npc_get_nibbles(flow, 2, offset);
+-		fprintf(file, "\tNPC_PARSE_NIBBLE_LB_FLAGS:%#02X\n", data);
++		fprintf(file, "\tNPC_PARSE_NIBBLE_LC_FLAGS:%#02X\n", data);
+ 		offset += 8;
+ 	}
+ 
+@@ -192,9 +198,9 @@ npc_flow_print_parse_nibbles(FILE *file, struct roc_npc_flow *flow,
+ 		offset += 4;
+ 	}
+ 
+-	if (rx_parse->lcflags) {
++	if (rx_parse->ldflags) {
+ 		data = npc_get_nibbles(flow, 2, offset);
+-		fprintf(file, "\tNPC_PARSE_NIBBLE_LC_FLAGS:%#02X\n", data);
++		fprintf(file, "\tNPC_PARSE_NIBBLE_LD_FLAGS:%#02X\n", data);
+ 		offset += 8;
+ 	}
+ 
+@@ -205,9 +211,9 @@ npc_flow_print_parse_nibbles(FILE *file, struct roc_npc_flow *flow,
+ 		offset += 4;
+ 	}
+ 
+-	if (rx_parse->ldflags) {
++	if (rx_parse->leflags) {
+ 		data = npc_get_nibbles(flow, 2, offset);
+-		fprintf(file, "\tNPC_PARSE_NIBBLE_LD_FLAGS:%#02X\n", data);
++		fprintf(file, "\tNPC_PARSE_NIBBLE_LE_FLAGS:%#02X\n", data);
+ 		offset += 8;
+ 	}
+ 
+@@ -218,9 +224,9 @@ npc_flow_print_parse_nibbles(FILE *file, struct roc_npc_flow *flow,
+ 		offset += 4;
+ 	}
+ 
+-	if (rx_parse->leflags) {
++	if (rx_parse->lfflags) {
+ 		data = npc_get_nibbles(flow, 2, offset);
+-		fprintf(file, "\tNPC_PARSE_NIBBLE_LE_FLAGS:%#02X\n", data);
++		fprintf(file, "\tNPC_PARSE_NIBBLE_LF_FLAGS:%#02X\n", data);
+ 		offset += 8;
+ 	}
+ 
+@@ -231,9 +237,9 @@ npc_flow_print_parse_nibbles(FILE *file, struct roc_npc_flow *flow,
+ 		offset += 4;
+ 	}
+ 
+-	if (rx_parse->lfflags) {
++	if (rx_parse->lgflags) {
+ 		data = npc_get_nibbles(flow, 2, offset);
+-		fprintf(file, "\tNPC_PARSE_NIBBLE_LF_FLAGS:%#02X\n", data);
++		fprintf(file, "\tNPC_PARSE_NIBBLE_LG_FLAGS:%#02X\n", data);
+ 		offset += 8;
+ 	}
+ 
+@@ -244,10 +250,9 @@ npc_flow_print_parse_nibbles(FILE *file, struct roc_npc_flow *flow,
+ 		offset += 4;
+ 	}
+ 
+-	if (rx_parse->lgflags) {
++	if (rx_parse->lhflags) {
+ 		data = npc_get_nibbles(flow, 2, offset);
+-		fprintf(file, "\tNPC_PARSE_NIBBLE_LG_FLAGS:%#02X\n", data);
+-		offset += 8;
++		fprintf(file, "\tNPC_PARSE_NIBBLE_LH_FLAGS:%#02X\n", data);
+ 	}
+ 
+ 	if (rx_parse->lhtype) {
+@@ -256,11 +261,6 @@ npc_flow_print_parse_nibbles(FILE *file, struct roc_npc_flow *flow,
+ 			ltype_str[NPC_LID_LH][data]);
+ 		offset += 4;
+ 	}
+-
+-	if (rx_parse->lhflags) {
+-		data = npc_get_nibbles(flow, 2, offset);
+-		fprintf(file, "\tNPC_PARSE_NIBBLE_LH_FLAGS:%#02X\n", data);
+-	}
+ }
+ 
+ static void
+diff --git a/dpdk/drivers/common/cnxk/roc_npc_parse.c b/dpdk/drivers/common/cnxk/roc_npc_parse.c
+index 8125035dd8..9742ac90f7 100644
+--- a/dpdk/drivers/common/cnxk/roc_npc_parse.c
++++ b/dpdk/drivers/common/cnxk/roc_npc_parse.c
+@@ -38,6 +38,7 @@ npc_parse_cpt_hdr(struct npc_parse_state *pst)
+ 	info.hw_hdr_len = 0;
+ 
+ 	/* Prepare for parsing the item */
++	info.def_mask = NULL;
+ 	info.hw_mask = &hw_mask;
+ 	info.len = pst->pattern->size;
+ 	npc_get_hw_supp_mask(pst, &info, lid, lt);
+@@ -75,6 +76,7 @@ npc_parse_higig2_hdr(struct npc_parse_state *pst)
+ 	}
+ 
+ 	/* Prepare for parsing the item */
++	info.def_mask = NULL;
+ 	info.hw_mask = &hw_mask;
+ 	info.len = pst->pattern->size;
+ 	npc_get_hw_supp_mask(pst, &info, lid, lt);
+@@ -121,6 +123,7 @@ npc_parse_la(struct npc_parse_state *pst)
+ 	}
+ 
+ 	/* Prepare for parsing the item */
++	info.def_mask = NULL;
+ 	info.hw_mask = &hw_mask;
+ 	info.len = pst->pattern->size;
+ 	npc_get_hw_supp_mask(pst, &info, lid, lt);
+@@ -179,6 +182,7 @@ npc_parse_lb(struct npc_parse_state *pst)
+ 	int nr_vlans = 0;
+ 	int rc;
+ 
++	info.def_mask = NULL;
+ 	info.spec = NULL;
+ 	info.mask = NULL;
+ 	info.def_mask = NULL;
+@@ -307,12 +311,12 @@ npc_parse_mpls_label_stack(struct npc_parse_state *pst, int *flag)
+ 	 * pst->pattern points to first MPLS label. We only check
+ 	 * that subsequent labels do not have anything to match.
+ 	 */
++	info.def_mask = NULL;
+ 	info.hw_mask = NULL;
+ 	info.len = pattern->size;
+ 	info.spec = NULL;
+ 	info.mask = NULL;
+ 	info.hw_hdr_len = 0;
+-	info.def_mask = NULL;
+ 
+ 	while (pattern->type == ROC_NPC_ITEM_TYPE_MPLS) {
+ 		nr_labels++;
+@@ -358,6 +362,7 @@ npc_parse_mpls(struct npc_parse_state *pst, int lid)
+ 	info.len = pst->pattern->size;
+ 	info.spec = NULL;
+ 	info.mask = NULL;
++	info.def_mask = NULL;
+ 	info.hw_hdr_len = 0;
+ 
+ 	npc_get_hw_supp_mask(pst, &info, lid, lt);
+@@ -405,6 +410,7 @@ npc_parse_lc(struct npc_parse_state *pst)
+ 	if (pst->pattern->type == ROC_NPC_ITEM_TYPE_MPLS)
+ 		return npc_parse_mpls(pst, NPC_LID_LC);
+ 
++	info.def_mask = NULL;
+ 	info.hw_mask = &hw_mask;
+ 	info.spec = NULL;
+ 	info.mask = NULL;
+@@ -492,10 +498,10 @@ npc_parse_ld(struct npc_parse_state *pst)
+ 			return npc_parse_mpls(pst, NPC_LID_LD);
+ 		return 0;
+ 	}
++	info.def_mask = NULL;
+ 	info.hw_mask = &hw_mask;
+ 	info.spec = NULL;
+ 	info.mask = NULL;
+-	info.def_mask = NULL;
+ 	info.len = 0;
+ 	info.hw_hdr_len = 0;
+ 
+@@ -529,11 +535,13 @@ npc_parse_ld(struct npc_parse_state *pst)
+ 	case ROC_NPC_ITEM_TYPE_GRE:
+ 		lt = NPC_LT_LD_GRE;
+ 		info.len = pst->pattern->size;
++		pst->tunnel = 1;
+ 		break;
+ 	case ROC_NPC_ITEM_TYPE_GRE_KEY:
+ 		lt = NPC_LT_LD_GRE;
+ 		info.len = pst->pattern->size;
+ 		info.hw_hdr_len = 4;
++		pst->tunnel = 1;
+ 		break;
+ 	case ROC_NPC_ITEM_TYPE_NVGRE:
+ 		lt = NPC_LT_LD_NVGRE;
+@@ -651,6 +659,7 @@ npc_parse_lf(struct npc_parse_state *pst)
+ 	lflags = 0;
+ 
+ 	/* No match support for vlan tags */
++	info.def_mask = NULL;
+ 	info.hw_mask = NULL;
+ 	info.len = pst->pattern->size;
+ 	info.spec = NULL;
+@@ -709,6 +718,7 @@ npc_parse_lg(struct npc_parse_state *pst)
+ 	if (!pst->tunnel)
+ 		return 0;
+ 
++	info.def_mask = NULL;
+ 	info.hw_mask = &hw_mask;
+ 	info.spec = NULL;
+ 	info.mask = NULL;
+@@ -745,6 +755,7 @@ npc_parse_lh(struct npc_parse_state *pst)
+ 	if (!pst->tunnel)
+ 		return 0;
+ 
++	info.def_mask = NULL;
+ 	info.hw_mask = &hw_mask;
+ 	info.spec = NULL;
+ 	info.mask = NULL;
+diff --git a/dpdk/drivers/common/cnxk/roc_npc_priv.h b/dpdk/drivers/common/cnxk/roc_npc_priv.h
+index 712302bc5c..74e0fb2ece 100644
+--- a/dpdk/drivers/common/cnxk/roc_npc_priv.h
++++ b/dpdk/drivers/common/cnxk/roc_npc_priv.h
+@@ -363,7 +363,7 @@ struct npc {
+ 	uint32_t rss_grps;			/* rss groups supported */
+ 	uint16_t flow_prealloc_size;		/* Pre allocated mcam size */
+ 	uint16_t flow_max_priority;		/* Max priority for flow */
+-	uint16_t switch_header_type; /* Suppprted switch header type */
++	uint16_t switch_header_type; /* Supported switch header type */
+ 	uint32_t mark_actions;	     /* Number of mark actions */
+ 	uint32_t vtag_strip_actions; /* vtag insert/strip actions */
+ 	uint16_t pf_func;	     /* pf_func of device */
+diff --git a/dpdk/drivers/common/cnxk/roc_npc_utils.c b/dpdk/drivers/common/cnxk/roc_npc_utils.c
+index ed0ef5c462..e36a312576 100644
+--- a/dpdk/drivers/common/cnxk/roc_npc_utils.c
++++ b/dpdk/drivers/common/cnxk/roc_npc_utils.c
+@@ -145,6 +145,9 @@ npc_parse_item_basic(const struct roc_npc_item_info *item,
+ 			info->mask = item->mask;
+ 	}
+ 
++	if (info->mask == NULL)
++		return NPC_ERR_INVALID_MASK;
++
+ 	/* mask specified must be subset of hw supported mask
+ 	 * mask | hw_mask == hw_mask
+ 	 */
+@@ -166,6 +169,9 @@ npc_update_extraction_data(struct npc_parse_state *pst,
+ 	int len = 0;
+ 
+ 	x = xinfo;
++	if (x->len > NPC_MAX_EXTRACT_DATA_LEN)
++		return NPC_ERR_INVALID_SIZE;
++
+ 	len = x->len;
+ 	hdr_off = x->hdr_off;
+ 
+@@ -579,7 +585,7 @@ npc_allocate_mcam_entry(struct mbox *mbox, int prio,
+ 	if (!rsp_cmd->count)
+ 		return -ENOSPC;
+ 
+-	memcpy(rsp_local, rsp_cmd, sizeof(*rsp));
++	mbox_memcpy(rsp_local, rsp_cmd, sizeof(*rsp));
+ 
+ 	return 0;
+ }
+@@ -664,14 +670,14 @@ npc_get_free_mcam_entry(struct mbox *mbox, struct roc_npc_flow *flow,
+ 
+ 	new_entry->flow = flow;
+ 
+-	plt_info("npc: kernel allocated MCAM entry %d", rsp_local.entry);
++	plt_npc_dbg("kernel allocated MCAM entry %d", rsp_local.entry);
+ 
+ 	rc = npc_sort_mcams_by_user_prio_level(mbox, new_entry, npc,
+ 					       &rsp_local);
+ 	if (rc)
+ 		goto err;
+ 
+-	plt_info("npc: allocated MCAM entry after sorting %d", rsp_local.entry);
++	plt_npc_dbg("allocated MCAM entry after sorting %d", rsp_local.entry);
+ 	flow->mcam_id = rsp_local.entry;
+ 	npc_insert_into_flow_list(npc, new_entry);
+ 
+diff --git a/dpdk/drivers/common/cnxk/roc_platform.c b/dpdk/drivers/common/cnxk/roc_platform.c
+index 74dbdeceb9..6cf0c4113e 100644
+--- a/dpdk/drivers/common/cnxk/roc_platform.c
++++ b/dpdk/drivers/common/cnxk/roc_platform.c
+@@ -37,7 +37,11 @@ roc_plt_init(void)
+ 				plt_err("Failed to reserve mem for roc_model");
+ 				return -ENOMEM;
+ 			}
+-			roc_model_init(mz->addr);
++			if (roc_model_init(mz->addr)) {
++				plt_err("Failed to init roc_model");
++				rte_memzone_free(mz);
++				return -EINVAL;
++			}
+ 		}
+ 	} else {
+ 		if (mz == NULL) {
+diff --git a/dpdk/drivers/common/cnxk/roc_se.c b/dpdk/drivers/common/cnxk/roc_se.c
+index ffe537af30..3f0821e400 100644
+--- a/dpdk/drivers/common/cnxk/roc_se.c
++++ b/dpdk/drivers/common/cnxk/roc_se.c
+@@ -283,6 +283,8 @@ roc_se_auth_key_set(struct roc_se_ctx *se_ctx, roc_se_auth_type type,
+ 				return ret;
+ 			se_ctx->pdcp_alg_type = ROC_SE_PDCP_ALG_TYPE_ZUC;
+ 			memcpy(ci_key, key, key_len);
++			if (key_len == 32)
++				roc_se_zuc_bytes_swap(ci_key, key_len);
+ 			cpt_pdcp_update_zuc_const(zuc_const, key_len, mac_len);
+ 			se_ctx->fc_type = ROC_SE_PDCP;
+ 			se_ctx->zsk_flags = 0x1;
+@@ -459,9 +461,10 @@ roc_se_ciph_key_set(struct roc_se_ctx *se_ctx, roc_se_cipher_type type,
+ 		zs_ctx->zuc.otk_ctx.w0.s.alg_type = ROC_SE_PDCP_ALG_TYPE_ZUC;
+ 		se_ctx->pdcp_alg_type = ROC_SE_PDCP_ALG_TYPE_ZUC;
+ 		memcpy(ci_key, key, key_len);
+-		if (key_len == 32)
++		if (key_len == 32) {
++			roc_se_zuc_bytes_swap(ci_key, key_len);
+ 			memcpy(zuc_const, zuc_key256, 16);
+-		else
++		} else
+ 			memcpy(zuc_const, zuc_key128, 32);
+ 
+ 		se_ctx->zsk_flags = 0;
+diff --git a/dpdk/drivers/common/cnxk/roc_se.h b/dpdk/drivers/common/cnxk/roc_se.h
+index 5be832fa75..500f94ac11 100644
+--- a/dpdk/drivers/common/cnxk/roc_se.h
++++ b/dpdk/drivers/common/cnxk/roc_se.h
+@@ -297,6 +297,27 @@ struct roc_se_ctx {
+ 	uint8_t *auth_key;
+ };
+ 
++static inline void
++roc_se_zuc_bytes_swap(uint8_t *arr, int len)
++{
++	int start, end;
++	uint8_t tmp;
++
++	if (len <= 0)
++		return;
++
++	start = 0;
++	end = len - 1;
++
++	while (start < end) {
++		tmp = arr[start];
++		arr[start] = arr[end];
++		arr[end] = tmp;
++		start++;
++		end--;
++	}
++}
++
+ int __roc_api roc_se_auth_key_set(struct roc_se_ctx *se_ctx,
+ 				  roc_se_auth_type type, const uint8_t *key,
+ 				  uint16_t key_len, uint16_t mac_len);
+@@ -306,4 +327,5 @@ int __roc_api roc_se_ciph_key_set(struct roc_se_ctx *se_ctx,
+ 				  uint16_t key_len, uint8_t *salt);
+ 
+ void __roc_api roc_se_ctx_swap(struct roc_se_ctx *se_ctx);
++
+ #endif /* __ROC_SE_H__ */
+diff --git a/dpdk/drivers/common/cnxk/roc_sso.c b/dpdk/drivers/common/cnxk/roc_sso.c
+index 45ff16ca0e..ede0a8a3bb 100644
+--- a/dpdk/drivers/common/cnxk/roc_sso.c
++++ b/dpdk/drivers/common/cnxk/roc_sso.c
+@@ -378,10 +378,10 @@ roc_sso_hwgrp_qos_config(struct roc_sso *roc_sso, struct roc_sso_hwgrp_qos *qos,
+ 		}
+ 		req->grp = qos[i].hwgrp;
+ 		req->xaq_limit = (nb_xaq * (xaq_prcnt ? xaq_prcnt : 100)) / 100;
+-		req->taq_thr = (SSO_HWGRP_IAQ_MAX_THR_MASK *
++		req->iaq_thr = (SSO_HWGRP_IAQ_MAX_THR_MASK *
+ 				(iaq_prcnt ? iaq_prcnt : 100)) /
+ 			       100;
+-		req->iaq_thr = (SSO_HWGRP_TAQ_MAX_THR_MASK *
++		req->taq_thr = (SSO_HWGRP_TAQ_MAX_THR_MASK *
+ 				(taq_prcnt ? taq_prcnt : 100)) /
+ 			       100;
+ 	}
+diff --git a/dpdk/drivers/common/cnxk/version.map b/dpdk/drivers/common/cnxk/version.map
+index 07c6720f0c..556369bc02 100644
+--- a/dpdk/drivers/common/cnxk/version.map
++++ b/dpdk/drivers/common/cnxk/version.map
+@@ -198,6 +198,7 @@ INTERNAL {
+ 	roc_nix_ptp_clock_read;
+ 	roc_nix_ptp_info_cb_register;
+ 	roc_nix_ptp_info_cb_unregister;
++	roc_nix_ptp_is_enable;
+ 	roc_nix_ptp_rx_ena_dis;
+ 	roc_nix_ptp_sync_time_adjust;
+ 	roc_nix_ptp_tx_ena_dis;
+diff --git a/dpdk/drivers/common/cpt/cpt_mcode_defines.h b/dpdk/drivers/common/cpt/cpt_mcode_defines.h
+index f16ee44297..e6dcb7674c 100644
+--- a/dpdk/drivers/common/cpt/cpt_mcode_defines.h
++++ b/dpdk/drivers/common/cpt/cpt_mcode_defines.h
+@@ -387,7 +387,7 @@ typedef struct buf_ptr {
+ /* IOV Pointer */
+ typedef struct{
+ 	int buf_cnt;
+-	buf_ptr_t bufs[0];
++	buf_ptr_t bufs[];
+ } iov_ptr_t;
+ 
+ typedef struct fc_params {
+diff --git a/dpdk/drivers/common/cpt/cpt_ucode.h b/dpdk/drivers/common/cpt/cpt_ucode.h
+index e015cf66a1..22aabab6ac 100644
+--- a/dpdk/drivers/common/cpt/cpt_ucode.h
++++ b/dpdk/drivers/common/cpt/cpt_ucode.h
+@@ -246,7 +246,7 @@ cpt_fc_ciph_set_key(struct cpt_ctx *cpt_ctx, cipher_type_t type,
+ 	if (cpt_ctx->fc_type == FC_GEN) {
+ 		/*
+ 		 * We need to always say IV is from DPTR as user can
+-		 * sometimes iverride IV per operation.
++		 * sometimes override IV per operation.
+ 		 */
+ 		fctx->enc.iv_source = CPT_FROM_DPTR;
+ 
+@@ -394,27 +394,26 @@ fill_sg_comp_from_iov(sg_comp_t *list,
+ 	int32_t j;
+ 	uint32_t extra_len = extra_buf ? extra_buf->size : 0;
+ 	uint32_t size = *psize;
+-	buf_ptr_t *bufs;
+ 
+-	bufs = from->bufs;
+ 	for (j = 0; (j < from->buf_cnt) && size; j++) {
++		phys_addr_t dma_addr = from->bufs[j].dma_addr;
++		uint32_t buf_sz = from->bufs[j].size;
++		sg_comp_t *to = &list[i >> 2];
+ 		phys_addr_t e_dma_addr;
+ 		uint32_t e_len;
+-		sg_comp_t *to = &list[i >> 2];
+ 
+ 		if (unlikely(from_offset)) {
+-			if (from_offset >= bufs[j].size) {
+-				from_offset -= bufs[j].size;
++			if (from_offset >= buf_sz) {
++				from_offset -= buf_sz;
+ 				continue;
+ 			}
+-			e_dma_addr = bufs[j].dma_addr + from_offset;
+-			e_len = (size > (bufs[j].size - from_offset)) ?
+-				(bufs[j].size - from_offset) : size;
++			e_dma_addr = dma_addr + from_offset;
++			e_len = (size > (buf_sz - from_offset)) ?
++				(buf_sz - from_offset) : size;
+ 			from_offset = 0;
+ 		} else {
+-			e_dma_addr = bufs[j].dma_addr;
+-			e_len = (size > bufs[j].size) ?
+-				bufs[j].size : size;
++			e_dma_addr = dma_addr;
++			e_len = (size > buf_sz) ? buf_sz : size;
+ 		}
+ 
+ 		to->u.s.len[i % 4] = rte_cpu_to_be_16(e_len);
+@@ -3035,7 +3034,7 @@ prepare_iov_from_pkt_inplace(struct rte_mbuf *pkt,
+ 		tailroom = rte_pktmbuf_tailroom(pkt);
+ 		if (likely((headroom >= 24) &&
+ 		    (tailroom >= 8))) {
+-			/* In 83XX this is prerequivisit for Direct mode */
++			/* In 83XX this is prerequisite for Direct mode */
+ 			*flags |= SINGLE_BUF_HEADTAILROOM;
+ 		}
+ 		param->bufs[0].vaddr = seg_data;
+diff --git a/dpdk/drivers/common/cpt/cpt_ucode_asym.h b/dpdk/drivers/common/cpt/cpt_ucode_asym.h
+index a67ded642a..f0b5dddd8c 100644
+--- a/dpdk/drivers/common/cpt/cpt_ucode_asym.h
++++ b/dpdk/drivers/common/cpt/cpt_ucode_asym.h
+@@ -779,7 +779,7 @@ cpt_ecdsa_verify_prep(struct rte_crypto_ecdsa_op_param *ecdsa,
+ 	 * Set dlen = sum(sizeof(fpm address), ROUNDUP8(message len),
+ 	 * ROUNDUP8(sign len(r and s), public key len(x and y coordinates),
+ 	 * prime len, order len)).
+-	 * Please note sign, public key and order can not excede prime length
++	 * Please note sign, public key and order can not exceed prime length
+ 	 * i.e. 6 * p_align
+ 	 */
+ 	dlen = sizeof(fpm_table_iova) + m_align + (8 * p_align);
+diff --git a/dpdk/drivers/common/dpaax/caamflib/desc/algo.h b/dpdk/drivers/common/dpaax/caamflib/desc/algo.h
+index 6bb915054a..e0848f0940 100644
+--- a/dpdk/drivers/common/dpaax/caamflib/desc/algo.h
++++ b/dpdk/drivers/common/dpaax/caamflib/desc/algo.h
+@@ -67,7 +67,7 @@ cnstr_shdsc_zuce(uint32_t *descbuf, bool ps, bool swap,
+  * @authlen: size of digest
+  *
+  * The IV prepended before hmac payload must be 8 bytes consisting
+- * of COUNT||BEAERER||DIR. The COUNT is of 32-bits, bearer is of 5 bits and
++ * of COUNT||BEARER||DIR. The COUNT is of 32-bits, bearer is of 5 bits and
+  * direction is of 1 bit - totalling to 38 bits.
+  *
+  * Return: size of descriptor written in words or negative number on error
+diff --git a/dpdk/drivers/common/dpaax/caamflib/desc/pdcp.h b/dpdk/drivers/common/dpaax/caamflib/desc/pdcp.h
+index 8e8daf5ba8..2c9c631cfd 100644
+--- a/dpdk/drivers/common/dpaax/caamflib/desc/pdcp.h
++++ b/dpdk/drivers/common/dpaax/caamflib/desc/pdcp.h
+@@ -3795,7 +3795,7 @@ cnstr_shdsc_pdcp_short_mac(uint32_t *descbuf,
+ 			return -ENOTSUP;
+ 		}
+ 		iv[0] = 0xFFFFFFFF;
+-		iv[1] = swap ? swab32(0xFC000000) : 0xFC000000;
++		iv[1] = swab32(0xFC000000);
+ 		iv[2] = 0x00000000; /* unused */
+ 
+ 		KEY(p, KEY2, authdata->key_enc_flags, authdata->key,
+diff --git a/dpdk/drivers/common/dpaax/caamflib/desc/sdap.h b/dpdk/drivers/common/dpaax/caamflib/desc/sdap.h
+index b2497a5424..07f55b5b40 100644
+--- a/dpdk/drivers/common/dpaax/caamflib/desc/sdap.h
++++ b/dpdk/drivers/common/dpaax/caamflib/desc/sdap.h
+@@ -492,10 +492,10 @@ pdcp_sdap_insert_snoop_op(struct program *p, bool swap __maybe_unused,
+ 
+ 	/* Set the variable size of data the register will write */
+ 	if (dir == OP_TYPE_ENCAP_PROTOCOL) {
+-		/* We will add the interity data so add its length */
++		/* We will add the integrity data so add its length */
+ 		MATHI(p, SEQINSZ, ADD, PDCP_MAC_I_LEN, VSEQOUTSZ, 4, IMMED2);
+ 	} else {
+-		/* We will check the interity data so remove its length */
++		/* We will check the integrity data so remove its length */
+ 		MATHI(p, SEQINSZ, SUB, PDCP_MAC_I_LEN, VSEQOUTSZ, 4, IMMED2);
+ 		/* Do not take the ICV in the out-snooping configuration */
+ 		MATHI(p, SEQINSZ, SUB, PDCP_MAC_I_LEN, VSEQINSZ, 4, IMMED2);
+@@ -803,7 +803,7 @@ static inline int pdcp_sdap_insert_no_snoop_op(
+ 		     CLRW_CLR_C1MODE,
+ 		     CLRW, 0, 4, IMMED);
+ 
+-		/* Load the key for authentcation */
++		/* Load the key for authentication */
+ 		KEY(p, KEY1, authdata->key_enc_flags, authdata->key,
+ 		    authdata->keylen, INLINE_KEY(authdata));
+ 
+diff --git a/dpdk/drivers/common/dpaax/dpaax_iova_table.c b/dpdk/drivers/common/dpaax/dpaax_iova_table.c
+index 3d661102cc..9daac4bc03 100644
+--- a/dpdk/drivers/common/dpaax/dpaax_iova_table.c
++++ b/dpdk/drivers/common/dpaax/dpaax_iova_table.c
+@@ -261,7 +261,7 @@ dpaax_iova_table_depopulate(void)
+ 	rte_free(dpaax_iova_table_p->entries);
+ 	dpaax_iova_table_p = NULL;
+ 
+-	DPAAX_DEBUG("IOVA Table cleanedup");
++	DPAAX_DEBUG("IOVA Table cleaned");
+ }
+ 
+ int
+diff --git a/dpdk/drivers/common/iavf/iavf_type.h b/dpdk/drivers/common/iavf/iavf_type.h
+index 51267ca3b3..1cd87587d6 100644
+--- a/dpdk/drivers/common/iavf/iavf_type.h
++++ b/dpdk/drivers/common/iavf/iavf_type.h
+@@ -1006,7 +1006,7 @@ struct iavf_profile_tlv_section_record {
+ 	u8 data[12];
+ };
+ 
+-/* Generic AQ section in proflie */
++/* Generic AQ section in profile */
+ struct iavf_profile_aq_section {
+ 	u16 opcode;
+ 	u16 flags;
+diff --git a/dpdk/drivers/common/iavf/virtchnl.h b/dpdk/drivers/common/iavf/virtchnl.h
+index 269578f7c0..80e754a1b2 100644
+--- a/dpdk/drivers/common/iavf/virtchnl.h
++++ b/dpdk/drivers/common/iavf/virtchnl.h
+@@ -233,7 +233,7 @@ static inline const char *virtchnl_op_str(enum virtchnl_ops v_opcode)
+ 	case VIRTCHNL_OP_DCF_CMD_DESC:
+ 		return "VIRTCHNL_OP_DCF_CMD_DESC";
+ 	case VIRTCHNL_OP_DCF_CMD_BUFF:
+-		return "VIRTCHHNL_OP_DCF_CMD_BUFF";
++		return "VIRTCHNL_OP_DCF_CMD_BUFF";
+ 	case VIRTCHNL_OP_DCF_DISABLE:
+ 		return "VIRTCHNL_OP_DCF_DISABLE";
+ 	case VIRTCHNL_OP_DCF_GET_VSI_MAP:
+diff --git a/dpdk/drivers/common/iavf/virtchnl_inline_ipsec.h b/dpdk/drivers/common/iavf/virtchnl_inline_ipsec.h
+index 1e9134501e..2f4bf15725 100644
+--- a/dpdk/drivers/common/iavf/virtchnl_inline_ipsec.h
++++ b/dpdk/drivers/common/iavf/virtchnl_inline_ipsec.h
+@@ -446,6 +446,15 @@ struct virtchnl_ipsec_sp_cfg {
+ 
+ 	/* Set TC (congestion domain) if true. For future use. */
+ 	u8 set_tc;
++
++	/* 0 for NAT-T unsupported, 1 for NAT-T supported */
++	u8 is_udp;
++
++	/* reserved */
++	u8 reserved;
++
++	/* NAT-T UDP port number. Only valid in case NAT-T supported */
++	u16 udp_port;
+ } __rte_packed;
+ 
+ 
+diff --git a/dpdk/drivers/common/mlx5/linux/meson.build b/dpdk/drivers/common/mlx5/linux/meson.build
+index 7909f23e21..4c7b53b9bd 100644
+--- a/dpdk/drivers/common/mlx5/linux/meson.build
++++ b/dpdk/drivers/common/mlx5/linux/meson.build
+@@ -36,7 +36,7 @@ foreach libname:libnames
+ endforeach
+ if static_ibverbs or dlopen_ibverbs
+     # Build without adding shared libs to Requires.private
+-    ibv_cflags = run_command(pkgconf, '--cflags', 'libibverbs').stdout()
++    ibv_cflags = run_command(pkgconf, '--cflags', 'libibverbs', check: true).stdout()
+     ext_deps += declare_dependency(compile_args: ibv_cflags.split())
+ endif
+ if static_ibverbs
+diff --git a/dpdk/drivers/common/mlx5/linux/mlx5_common_os.c b/dpdk/drivers/common/mlx5/linux/mlx5_common_os.c
+index 0d3e24e04e..eeb583a553 100644
+--- a/dpdk/drivers/common/mlx5/linux/mlx5_common_os.c
++++ b/dpdk/drivers/common/mlx5/linux/mlx5_common_os.c
+@@ -456,21 +456,33 @@ mlx5_os_get_ibv_device(const struct rte_pci_addr *addr)
+ 	int n;
+ 	struct ibv_device **ibv_list = mlx5_glue->get_device_list(&n);
+ 	struct ibv_device *ibv_match = NULL;
++	uint8_t guid1[32] = {0};
++	uint8_t guid2[32] = {0};
++	int ret1, ret2 = -1;
++	struct rte_pci_addr paddr;
+ 
+-	if (ibv_list == NULL) {
++	if (ibv_list == NULL || !n) {
+ 		rte_errno = ENOSYS;
++		if (ibv_list)
++			mlx5_glue->free_device_list(ibv_list);
+ 		return NULL;
+ 	}
++	ret1 = mlx5_get_device_guid(addr, guid1, sizeof(guid1));
+ 	while (n-- > 0) {
+-		struct rte_pci_addr paddr;
+-
+ 		DRV_LOG(DEBUG, "Checking device \"%s\"..", ibv_list[n]->name);
+ 		if (mlx5_get_pci_addr(ibv_list[n]->ibdev_path, &paddr) != 0)
+ 			continue;
+-		if (rte_pci_addr_cmp(addr, &paddr) != 0)
+-			continue;
+-		ibv_match = ibv_list[n];
+-		break;
++		if (ret1 > 0)
++			ret2 = mlx5_get_device_guid(&paddr, guid2, sizeof(guid2));
++		/* Bond device can bond secondary PCIe */
++		if ((strstr(ibv_list[n]->name, "bond") &&
++		    ((ret1 > 0 && ret2 > 0 && !memcmp(guid1, guid2, sizeof(guid1))) ||
++		    (addr->domain == paddr.domain && addr->bus == paddr.bus &&
++		     addr->devid == paddr.devid))) ||
++		     !rte_pci_addr_cmp(addr, &paddr)) {
++			ibv_match = ibv_list[n];
++			break;
++		}
+ 	}
+ 	if (ibv_match == NULL) {
+ 		DRV_LOG(WARNING,
+@@ -487,7 +499,7 @@ mlx5_os_get_ibv_device(const struct rte_pci_addr *addr)
+ static int
+ mlx5_nl_roce_disable(const char *addr)
+ {
+-	int nlsk_fd = mlx5_nl_init(NETLINK_GENERIC);
++	int nlsk_fd = mlx5_nl_init(NETLINK_GENERIC, 0);
+ 	int devlink_id;
+ 	int enable;
+ 	int ret;
+diff --git a/dpdk/drivers/common/mlx5/linux/mlx5_common_os.h b/dpdk/drivers/common/mlx5/linux/mlx5_common_os.h
+index 83066e752d..a6190a34e6 100644
+--- a/dpdk/drivers/common/mlx5/linux/mlx5_common_os.h
++++ b/dpdk/drivers/common/mlx5/linux/mlx5_common_os.h
+@@ -9,6 +9,7 @@
+ #include <malloc.h>
+ 
+ #include <rte_pci.h>
++#include <rte_bus_pci.h>
+ #include <rte_debug.h>
+ #include <rte_atomic.h>
+ #include <rte_log.h>
+@@ -300,6 +301,7 @@ mlx5_set_context_attr(struct rte_device *dev, struct ibv_context *ctx);
+  *  0 if OFED doesn't support.
+  *  >0 if success.
+  */
++__rte_internal
+ int
+ mlx5_get_device_guid(const struct rte_pci_addr *dev, uint8_t *guid, size_t len);
+ 
+diff --git a/dpdk/drivers/common/mlx5/linux/mlx5_nl.c b/dpdk/drivers/common/mlx5/linux/mlx5_nl.c
+index fd4c2d2625..5d04857b38 100644
+--- a/dpdk/drivers/common/mlx5/linux/mlx5_nl.c
++++ b/dpdk/drivers/common/mlx5/linux/mlx5_nl.c
+@@ -185,19 +185,22 @@ uint32_t atomic_sn;
+  *
+  * @param protocol
+  *   Netlink protocol (e.g. NETLINK_ROUTE, NETLINK_RDMA).
++ * @param groups
++ *   Groups to listen (e.g. RTMGRP_LINK), can be 0.
+  *
+  * @return
+  *   A file descriptor on success, a negative errno value otherwise and
+  *   rte_errno is set.
+  */
+ int
+-mlx5_nl_init(int protocol)
++mlx5_nl_init(int protocol, int groups)
+ {
+ 	int fd;
+ 	int buf_size;
+ 	socklen_t opt_size;
+ 	struct sockaddr_nl local = {
+ 		.nl_family = AF_NETLINK,
++		.nl_groups = groups,
+ 	};
+ 	int ret;
+ 
+@@ -1862,3 +1865,100 @@ mlx5_nl_enable_roce_set(int nlsk_fd, int family_id, const char *pci_addr,
+ 	/* Now, need to reload the driver. */
+ 	return mlx5_nl_driver_reload(nlsk_fd, family_id, pci_addr);
+ }
++
++/**
++ * Try to parse a Netlink message as a link status update.
++ *
++ * @param hdr
++ *  Netlink message header.
++ * @param[out] ifindex
++ *  Index of the updated interface.
++ *
++ * @return
++ *  0 on success, negative on failure.
++ */
++int
++mlx5_nl_parse_link_status_update(struct nlmsghdr *hdr, uint32_t *ifindex)
++{
++	struct ifinfomsg *info;
++
++	switch (hdr->nlmsg_type) {
++	case RTM_NEWLINK:
++	case RTM_DELLINK:
++	case RTM_GETLINK:
++	case RTM_SETLINK:
++		info = NLMSG_DATA(hdr);
++		*ifindex = info->ifi_index;
++		return 0;
++	}
++	return -1;
++}
++
++/**
++ * Read pending events from a Netlink socket.
++ *
++ * @param nlsk_fd
++ *  Netlink socket.
++ * @param cb
++ *  Callback invoked for each of the events.
++ * @param cb_arg
++ *  User data for the callback.
++ *
++ * @return
++ *  0 on success, including the case when there are no events.
++ *  Negative on failure and rte_errno is set.
++ */
++int
++mlx5_nl_read_events(int nlsk_fd, mlx5_nl_event_cb *cb, void *cb_arg)
++{
++	char buf[8192];
++	struct sockaddr_nl addr;
++	struct iovec iov = {
++		.iov_base = buf,
++		.iov_len = sizeof(buf),
++	};
++	struct msghdr msg = {
++		.msg_name = &addr,
++		.msg_namelen = sizeof(addr),
++		.msg_iov = &iov,
++		.msg_iovlen = 1,
++	};
++	struct nlmsghdr *hdr;
++	ssize_t size;
++
++	while (1) {
++		size = recvmsg(nlsk_fd, &msg, MSG_DONTWAIT);
++		if (size < 0) {
++			if (errno == EAGAIN)
++				return 0;
++			if (errno == EINTR)
++				continue;
++			DRV_LOG(DEBUG, "Failed to receive netlink message: %s",
++				strerror(errno));
++			rte_errno = errno;
++			return -rte_errno;
++		}
++		hdr = (struct nlmsghdr *)buf;
++		while (size >= (ssize_t)sizeof(*hdr)) {
++			ssize_t msg_len = hdr->nlmsg_len;
++			ssize_t data_len = msg_len - sizeof(*hdr);
++			ssize_t aligned_len;
++
++			if (data_len < 0) {
++				DRV_LOG(DEBUG, "Netlink message too short");
++				rte_errno = EINVAL;
++				return -rte_errno;
++			}
++			aligned_len = NLMSG_ALIGN(msg_len);
++			if (aligned_len > size) {
++				DRV_LOG(DEBUG, "Netlink message too long");
++				rte_errno = EINVAL;
++				return -rte_errno;
++			}
++			cb(hdr, cb_arg);
++			hdr = RTE_PTR_ADD(hdr, aligned_len);
++			size -= aligned_len;
++		}
++	}
++	return 0;
++}
+diff --git a/dpdk/drivers/common/mlx5/linux/mlx5_nl.h b/dpdk/drivers/common/mlx5/linux/mlx5_nl.h
+index 2063c0deeb..0b7552338a 100644
+--- a/dpdk/drivers/common/mlx5/linux/mlx5_nl.h
++++ b/dpdk/drivers/common/mlx5/linux/mlx5_nl.h
+@@ -11,6 +11,7 @@
+ 
+ #include "mlx5_common.h"
+ 
++typedef void (mlx5_nl_event_cb)(struct nlmsghdr *hdr, void *user_data);
+ 
+ /* VLAN netdev for VLAN workaround. */
+ struct mlx5_nl_vlan_dev {
+@@ -30,7 +31,7 @@ struct mlx5_nl_vlan_vmwa_context {
+ };
+ 
+ __rte_internal
+-int mlx5_nl_init(int protocol);
++int mlx5_nl_init(int protocol, int groups);
+ __rte_internal
+ int mlx5_nl_mac_addr_add(int nlsk_fd, unsigned int iface_idx, uint64_t *mac_own,
+ 			 struct rte_ether_addr *mac, uint32_t index);
+@@ -75,4 +76,9 @@ int mlx5_nl_enable_roce_get(int nlsk_fd, int family_id, const char *pci_addr,
+ int mlx5_nl_enable_roce_set(int nlsk_fd, int family_id, const char *pci_addr,
+ 			    int enable);
+ 
++__rte_internal
++int mlx5_nl_read_events(int nlsk_fd, mlx5_nl_event_cb *cb, void *cb_arg);
++__rte_internal
++int mlx5_nl_parse_link_status_update(struct nlmsghdr *hdr, uint32_t *ifindex);
++
+ #endif /* RTE_PMD_MLX5_NL_H_ */
+diff --git a/dpdk/drivers/common/mlx5/mlx5_common.c b/dpdk/drivers/common/mlx5/mlx5_common.c
+index f1650f94c6..4faae6c86d 100644
+--- a/dpdk/drivers/common/mlx5/mlx5_common.c
++++ b/dpdk/drivers/common/mlx5/mlx5_common.c
+@@ -111,6 +111,11 @@ mlx5_common_args_check_handler(const char *key, const char *val, void *opaque)
+ 	struct mlx5_common_dev_config *config = opaque;
+ 	signed long tmp;
+ 
++	if (val == NULL || *val == '\0') {
++		DRV_LOG(ERR, "Key %s is missing value.", key);
++		rte_errno = EINVAL;
++		return -rte_errno;
++	}
+ 	errno = 0;
+ 	tmp = strtol(val, NULL, 0);
+ 	if (errno) {
+@@ -616,7 +621,6 @@ drivers_remove(struct mlx5_common_device *cdev, uint32_t enabled_classes)
+ 	unsigned int i = 0;
+ 	int ret = 0;
+ 
+-	enabled_classes &= cdev->classes_loaded;
+ 	while (enabled_classes) {
+ 		driver = driver_get(RTE_BIT64(i));
+ 		if (driver != NULL) {
+@@ -640,7 +644,7 @@ drivers_probe(struct mlx5_common_device *cdev, uint32_t user_classes)
+ 	struct mlx5_class_driver *driver;
+ 	uint32_t enabled_classes = 0;
+ 	bool already_loaded;
+-	int ret;
++	int ret = -EINVAL;
+ 
+ 	TAILQ_FOREACH(driver, &drivers_list, next) {
+ 		if ((driver->drv_class & user_classes) == 0)
+@@ -662,12 +666,16 @@ drivers_probe(struct mlx5_common_device *cdev, uint32_t user_classes)
+ 		}
+ 		enabled_classes |= driver->drv_class;
+ 	}
+-	cdev->classes_loaded |= enabled_classes;
+-	return 0;
++	if (!ret) {
++		cdev->classes_loaded |= enabled_classes;
++		return 0;
++	}
+ probe_err:
+-	/* Only unload drivers which are enabled which were enabled
+-	 * in this probe instance.
++	/*
++	 * Need to remove only drivers which were not probed before this probe
++	 * instance, but have already been probed before this failure.
+ 	 */
++	enabled_classes &= ~cdev->classes_loaded;
+ 	drivers_remove(cdev, enabled_classes);
+ 	return ret;
+ }
+@@ -754,6 +762,7 @@ mlx5_common_dev_dma_map(struct rte_device *rte_dev, void *addr,
+ 			uint64_t iova __rte_unused, size_t len)
+ {
+ 	struct mlx5_common_device *dev;
++	struct mlx5_mr_btree *bt;
+ 	struct mlx5_mr *mr;
+ 
+ 	dev = to_mlx5_device(rte_dev);
+@@ -771,7 +780,36 @@ mlx5_common_dev_dma_map(struct rte_device *rte_dev, void *addr,
+ 		rte_errno = EINVAL;
+ 		return -1;
+ 	}
++try_insert:
+ 	rte_rwlock_write_lock(&dev->mr_scache.rwlock);
++	bt = &dev->mr_scache.cache;
++	if (bt->len == bt->size) {
++		uint32_t size;
++		int ret;
++
++		size = bt->size + 1;
++		MLX5_ASSERT(size > bt->size);
++		/*
++		 * Avoid deadlock (numbers show the sequence of events):
++		 *    mlx5_mr_create_primary():
++		 *        1) take EAL memory lock
++		 *        3) take MR lock
++		 *    this function:
++		 *        2) take MR lock
++		 *        4) take EAL memory lock while allocating the new cache
++		 * Releasing the MR lock before step 4
++		 * allows another thread to execute step 3.
++		 */
++		rte_rwlock_write_unlock(&dev->mr_scache.rwlock);
++		ret = mlx5_mr_expand_cache(&dev->mr_scache, size,
++					   rte_dev->numa_node);
++		if (ret < 0) {
++			mlx5_mr_free(mr, dev->mr_scache.dereg_mr_cb);
++			rte_errno = ret;
++			return -1;
++		}
++		goto try_insert;
++	}
+ 	LIST_INSERT_HEAD(&dev->mr_scache.mr_list, mr, mr);
+ 	/* Insert to the global cache table. */
+ 	mlx5_mr_insert_cache(&dev->mr_scache, mr);
+@@ -854,7 +892,7 @@ static void mlx5_common_driver_init(void)
+ static bool mlx5_common_initialized;
+ 
+ /**
+- * One time innitialization routine for run-time dependency on glue library
++ * One time initialization routine for run-time dependency on glue library
+  * for multiple PMDs. Each mlx5 PMD that depends on mlx5_common module,
+  * must invoke in its constructor.
+  */
+diff --git a/dpdk/drivers/common/mlx5/mlx5_common_mr.c b/dpdk/drivers/common/mlx5/mlx5_common_mr.c
+index c694aaf28c..26fdf22386 100644
+--- a/dpdk/drivers/common/mlx5/mlx5_common_mr.c
++++ b/dpdk/drivers/common/mlx5/mlx5_common_mr.c
+@@ -78,7 +78,7 @@ mlx5_mprq_buf_free_cb(void *addr __rte_unused, void *opaque)
+  *   0 on success, -1 on failure.
+  */
+ static int
+-mr_btree_expand(struct mlx5_mr_btree *bt, int n)
++mr_btree_expand(struct mlx5_mr_btree *bt, uint32_t n)
+ {
+ 	void *mem;
+ 	int ret = 0;
+@@ -123,11 +123,11 @@ mr_btree_expand(struct mlx5_mr_btree *bt, int n)
+  *   Searched LKey on success, UINT32_MAX on no match.
+  */
+ static uint32_t
+-mr_btree_lookup(struct mlx5_mr_btree *bt, uint16_t *idx, uintptr_t addr)
++mr_btree_lookup(struct mlx5_mr_btree *bt, uint32_t *idx, uintptr_t addr)
+ {
+ 	struct mr_cache_entry *lkp_tbl;
+-	uint16_t n;
+-	uint16_t base = 0;
++	uint32_t n;
++	uint32_t base = 0;
+ 
+ 	MLX5_ASSERT(bt != NULL);
+ 	lkp_tbl = *bt->table;
+@@ -137,7 +137,7 @@ mr_btree_lookup(struct mlx5_mr_btree *bt, uint16_t *idx, uintptr_t addr)
+ 				    lkp_tbl[0].lkey == UINT32_MAX));
+ 	/* Binary search. */
+ 	do {
+-		register uint16_t delta = n >> 1;
++		register uint32_t delta = n >> 1;
+ 
+ 		if (addr < lkp_tbl[base + delta].start) {
+ 			n = delta;
+@@ -169,7 +169,7 @@ static int
+ mr_btree_insert(struct mlx5_mr_btree *bt, struct mr_cache_entry *entry)
+ {
+ 	struct mr_cache_entry *lkp_tbl;
+-	uint16_t idx = 0;
++	uint32_t idx = 0;
+ 	size_t shift;
+ 
+ 	MLX5_ASSERT(bt != NULL);
+@@ -185,11 +185,8 @@ mr_btree_insert(struct mlx5_mr_btree *bt, struct mr_cache_entry *entry)
+ 		/* Already exist, return. */
+ 		return 0;
+ 	}
+-	/* If table is full, return error. */
+-	if (unlikely(bt->len == bt->size)) {
+-		bt->overflow = 1;
+-		return -1;
+-	}
++	/* Caller must ensure that there is enough place for a new entry. */
++	MLX5_ASSERT(bt->len < bt->size);
+ 	/* Insert entry. */
+ 	++idx;
+ 	shift = (bt->len - idx) * sizeof(struct mr_cache_entry);
+@@ -273,7 +270,7 @@ void
+ mlx5_mr_btree_dump(struct mlx5_mr_btree *bt __rte_unused)
+ {
+ #ifdef RTE_LIBRTE_MLX5_DEBUG
+-	int idx;
++	uint32_t idx;
+ 	struct mr_cache_entry *lkp_tbl;
+ 
+ 	if (bt == NULL)
+@@ -409,13 +406,8 @@ mlx5_mr_insert_cache(struct mlx5_mr_share_cache *share_cache,
+ 		n = mr_find_next_chunk(mr, &entry, n);
+ 		if (!entry.end)
+ 			break;
+-		if (mr_btree_insert(&share_cache->cache, &entry) < 0) {
+-			/*
+-			 * Overflowed, but the global table cannot be expanded
+-			 * because of deadlock.
+-			 */
++		if (mr_btree_insert(&share_cache->cache, &entry) < 0)
+ 			return -1;
+-		}
+ 	}
+ 	return 0;
+ }
+@@ -477,26 +469,12 @@ static uint32_t
+ mlx5_mr_lookup_cache(struct mlx5_mr_share_cache *share_cache,
+ 		     struct mr_cache_entry *entry, uintptr_t addr)
+ {
+-	uint16_t idx;
+-	uint32_t lkey = UINT32_MAX;
+-	struct mlx5_mr *mr;
++	uint32_t idx;
++	uint32_t lkey;
+ 
+-	/*
+-	 * If the global cache has overflowed since it failed to expand the
+-	 * B-tree table, it can't have all the existing MRs. Then, the address
+-	 * has to be searched by traversing the original MR list instead, which
+-	 * is very slow path. Otherwise, the global cache is all inclusive.
+-	 */
+-	if (!unlikely(share_cache->cache.overflow)) {
+-		lkey = mr_btree_lookup(&share_cache->cache, &idx, addr);
+-		if (lkey != UINT32_MAX)
+-			*entry = (*share_cache->cache.table)[idx];
+-	} else {
+-		/* Falling back to the slowest path. */
+-		mr = mlx5_mr_lookup_list(share_cache, entry, addr);
+-		if (mr != NULL)
+-			lkey = entry->lkey;
+-	}
++	lkey = mr_btree_lookup(&share_cache->cache, &idx, addr);
++	if (lkey != UINT32_MAX)
++		*entry = (*share_cache->cache.table)[idx];
+ 	MLX5_ASSERT(lkey == UINT32_MAX || (addr >= entry->start &&
+ 					   addr < entry->end));
+ 	return lkey;
+@@ -529,7 +507,6 @@ mlx5_mr_rebuild_cache(struct mlx5_mr_share_cache *share_cache)
+ 	DRV_LOG(DEBUG, "Rebuild dev cache[] %p", (void *)share_cache);
+ 	/* Flush cache to rebuild. */
+ 	share_cache->cache.len = 1;
+-	share_cache->cache.overflow = 0;
+ 	/* Iterate all the existing MRs. */
+ 	LIST_FOREACH(mr, &share_cache->mr_list, mr)
+ 		if (mlx5_mr_insert_cache(share_cache, mr) < 0)
+@@ -585,6 +562,74 @@ mr_find_contig_memsegs_cb(const struct rte_memseg_list *msl,
+ 	return 1;
+ }
+ 
++/**
++ * Get the number of virtually-contiguous chunks in the MR.
++ * HW MR does not need to be already created to use this function.
++ *
++ * @param mr
++ *   Pointer to the MR.
++ *
++ * @return
++ *   Number of chunks.
++ */
++static uint32_t
++mr_get_chunk_count(const struct mlx5_mr *mr)
++{
++	uint32_t i, count = 0;
++	bool was_in_chunk = false;
++	bool is_in_chunk;
++
++	/* There is only one chunk in case of external memory. */
++	if (mr->msl == NULL)
++		return 1;
++	for (i = 0; i < mr->ms_bmp_n; i++) {
++		is_in_chunk = rte_bitmap_get(mr->ms_bmp, i);
++		if (!was_in_chunk && is_in_chunk)
++			count++;
++		was_in_chunk = is_in_chunk;
++	}
++	return count;
++}
++
++/**
++ * Thread-safely expand the global MR cache to at least @p new_size slots.
++ *
++ * @param share_cache
++ *  Shared MR cache for locking.
++ * @param new_size
++ *  Desired cache size.
++ * @param socket
++ *  NUMA node.
++ *
++ * @return
++ *  0 in success, negative on failure and rte_errno is set.
++ */
++int
++mlx5_mr_expand_cache(struct mlx5_mr_share_cache *share_cache,
++		     uint32_t size, int socket)
++{
++	struct mlx5_mr_btree cache = {0};
++	struct mlx5_mr_btree *bt;
++	struct mr_cache_entry *lkp_tbl;
++	int ret;
++
++	size = rte_align32pow2(size);
++	ret = mlx5_mr_btree_init(&cache, size, socket);
++	if (ret < 0)
++		return ret;
++	rte_rwlock_write_lock(&share_cache->rwlock);
++	bt = &share_cache->cache;
++	lkp_tbl = *bt->table;
++	if (cache.size > bt->size) {
++		rte_memcpy(cache.table, lkp_tbl, bt->len * sizeof(lkp_tbl[0]));
++		RTE_SWAP(*bt, cache);
++		DRV_LOG(DEBUG, "Global MR cache expanded to %u slots", size);
++	}
++	rte_rwlock_write_unlock(&share_cache->rwlock);
++	mlx5_mr_btree_free(&cache);
++	return 0;
++}
++
+ /**
+  * Create a new global Memory Region (MR) for a missing virtual address.
+  * This API should be called on a secondary process, then a request is sent to
+@@ -660,12 +705,14 @@ mlx5_mr_create_primary(void *pd,
+ 	struct mr_find_contig_memsegs_data data_re;
+ 	const struct rte_memseg_list *msl;
+ 	const struct rte_memseg *ms;
++	struct mlx5_mr_btree *bt;
+ 	struct mlx5_mr *mr = NULL;
+ 	int ms_idx_shift = -1;
+ 	uint32_t bmp_size;
+ 	void *bmp_mem;
+ 	uint32_t ms_n;
+ 	uint32_t n;
++	uint32_t chunks_n;
+ 	size_t len;
+ 
+ 	DRV_LOG(DEBUG, "Creating a MR using address (%p)", (void *)addr);
+@@ -677,6 +724,7 @@ mlx5_mr_create_primary(void *pd,
+ 	 * is quite opportunistic.
+ 	 */
+ 	mlx5_mr_garbage_collect(share_cache);
++find_range:
+ 	/*
+ 	 * If enabled, find out a contiguous virtual address chunk in use, to
+ 	 * which the given address belongs, in order to register maximum range.
+@@ -828,6 +876,33 @@ mlx5_mr_create_primary(void *pd,
+ 	len = data.end - data.start;
+ 	mr->ms_bmp_n = len / msl->page_sz;
+ 	MLX5_ASSERT(ms_idx_shift + mr->ms_bmp_n <= ms_n);
++	/*
++	 * It is now known how many entries will be used in the global cache.
++	 * If there is not enough, expand the cache.
++	 * This cannot be done while holding the memory hotplug lock.
++	 * While it is released, memory layout may change,
++	 * so the process must be repeated from the beginning.
++	 */
++	bt = &share_cache->cache;
++	chunks_n = mr_get_chunk_count(mr);
++	if (bt->len + chunks_n > bt->size) {
++		struct mlx5_common_device *cdev;
++		uint32_t size;
++
++		size = bt->size + chunks_n;
++		MLX5_ASSERT(size > bt->size);
++		cdev = container_of(share_cache, struct mlx5_common_device,
++				    mr_scache);
++		rte_rwlock_write_unlock(&share_cache->rwlock);
++		rte_mcfg_mem_read_unlock();
++		if (mlx5_mr_expand_cache(share_cache, size,
++					 cdev->dev->numa_node) < 0) {
++			DRV_LOG(ERR, "Failed to expand global MR cache to %u slots",
++				size);
++			goto err_nolock;
++		}
++		goto find_range;
++	}
+ 	/*
+ 	 * Finally create an MR for the memory chunk. Verbs: ibv_reg_mr() can
+ 	 * be called with holding the memory lock because it doesn't use
+@@ -938,7 +1013,7 @@ mr_lookup_caches(struct mlx5_mr_ctrl *mr_ctrl,
+ 		container_of(share_cache, struct mlx5_common_device, mr_scache);
+ 	struct mlx5_mr_btree *bt = &mr_ctrl->cache_bh;
+ 	uint32_t lkey;
+-	uint16_t idx;
++	uint32_t idx;
+ 
+ 	/* If local cache table is full, try to double it. */
+ 	if (unlikely(bt->len == bt->size))
+@@ -989,7 +1064,7 @@ static uint32_t
+ mlx5_mr_addr2mr_bh(struct mlx5_mr_ctrl *mr_ctrl, uintptr_t addr)
+ {
+ 	uint32_t lkey;
+-	uint16_t bh_idx = 0;
++	uint32_t bh_idx = 0;
+ 	/* Victim in top-half cache to replace with new entry. */
+ 	struct mr_cache_entry *repl = &mr_ctrl->cache[mr_ctrl->head];
+ 
+@@ -1086,7 +1161,6 @@ mlx5_mr_flush_local_cache(struct mlx5_mr_ctrl *mr_ctrl)
+ 	memset(mr_ctrl->cache, 0, sizeof(mr_ctrl->cache));
+ 	/* Reset the B-tree table. */
+ 	mr_ctrl->cache_bh.len = 1;
+-	mr_ctrl->cache_bh.overflow = 0;
+ 	/* Update the generation number. */
+ 	mr_ctrl->cur_gen = *mr_ctrl->dev_gen_ptr;
+ 	DRV_LOG(DEBUG, "mr_ctrl(%p): flushed, cur_gen=%d",
+@@ -1290,11 +1364,12 @@ mlx5_range_from_mempool_chunk(struct rte_mempool *mp, void *opaque,
+ 			      unsigned int idx)
+ {
+ 	struct mlx5_range *ranges = opaque, *range = &ranges[idx];
++	uintptr_t start = (uintptr_t)memhdr->addr;
+ 	uint64_t page_size = rte_mem_page_size();
+ 
+ 	RTE_SET_USED(mp);
+-	range->start = RTE_ALIGN_FLOOR((uintptr_t)memhdr->addr, page_size);
+-	range->end = RTE_ALIGN_CEIL(range->start + memhdr->len, page_size);
++	range->start = RTE_ALIGN_FLOOR(start, page_size);
++	range->end = RTE_ALIGN_CEIL(start + memhdr->len, page_size);
+ }
+ 
+ /**
+@@ -1541,7 +1616,7 @@ mlx5_mempool_reg_create(struct rte_mempool *mp, unsigned int mrs_n,
+  * Destroy a mempool registration object.
+  *
+  * @param standalone
+- *   Whether @p mpr owns its MRs excludively, i.e. they are not shared.
++ *   Whether @p mpr owns its MRs exclusively, i.e. they are not shared.
+  */
+ static void
+ mlx5_mempool_reg_destroy(struct mlx5_mr_share_cache *share_cache,
+@@ -1834,12 +1909,13 @@ mlx5_mempool_reg_addr2mr(struct mlx5_mempool_reg *mpr, uintptr_t addr,
+ 
+ 	for (i = 0; i < mpr->mrs_n; i++) {
+ 		const struct mlx5_pmd_mr *mr = &mpr->mrs[i].pmd_mr;
+-		uintptr_t mr_addr = (uintptr_t)mr->addr;
++		uintptr_t mr_start = (uintptr_t)mr->addr;
++		uintptr_t mr_end = mr_start + mr->len;
+ 
+-		if (mr_addr <= addr) {
++		if (mr_start <= addr && addr < mr_end) {
+ 			lkey = rte_cpu_to_be_32(mr->lkey);
+-			entry->start = mr_addr;
+-			entry->end = mr_addr + mr->len;
++			entry->start = mr_start;
++			entry->end = mr_end;
+ 			entry->lkey = lkey;
+ 			break;
+ 		}
+@@ -1932,7 +2008,7 @@ mlx5_mr_mempool_populate_cache(struct mlx5_mr_ctrl *mr_ctrl,
+ 		struct mlx5_mempool_mr *mr = &mpr->mrs[i];
+ 		struct mr_cache_entry entry;
+ 		uint32_t lkey;
+-		uint16_t idx;
++		uint32_t idx;
+ 
+ 		lkey = mr_btree_lookup(bt, &idx, (uintptr_t)mr->pmd_mr.addr);
+ 		if (lkey != UINT32_MAX)
+@@ -1970,7 +2046,7 @@ mlx5_mr_mempool2mr_bh(struct mlx5_mr_ctrl *mr_ctrl,
+ {
+ 	struct mr_cache_entry *repl = &mr_ctrl->cache[mr_ctrl->head];
+ 	uint32_t lkey;
+-	uint16_t bh_idx = 0;
++	uint32_t bh_idx = 0;
+ 
+ 	/* Binary-search MR translation table. */
+ 	lkey = mr_btree_lookup(&mr_ctrl->cache_bh, &bh_idx, addr);
+diff --git a/dpdk/drivers/common/mlx5/mlx5_common_mr.h b/dpdk/drivers/common/mlx5/mlx5_common_mr.h
+index cf384b6748..213f5427cb 100644
+--- a/dpdk/drivers/common/mlx5/mlx5_common_mr.h
++++ b/dpdk/drivers/common/mlx5/mlx5_common_mr.h
+@@ -56,9 +56,8 @@ struct mr_cache_entry {
+ 
+ /* MR Cache table for Binary search. */
+ struct mlx5_mr_btree {
+-	uint16_t len; /* Number of entries. */
+-	uint16_t size; /* Total number of entries. */
+-	int overflow; /* Mark failure of table expansion. */
++	uint32_t len; /* Number of entries. */
++	uint32_t size; /* Total number of entries. */
+ 	struct mr_cache_entry (*table)[];
+ } __rte_packed;
+ 
+@@ -218,6 +217,8 @@ void mlx5_mr_btree_dump(struct mlx5_mr_btree *bt __rte_unused);
+ __rte_internal
+ uint32_t mlx5_mr_mempool2mr_bh(struct mlx5_mr_ctrl *mr_ctrl,
+ 			       struct rte_mempool *mp, uintptr_t addr);
++int mlx5_mr_expand_cache(struct mlx5_mr_share_cache *share_cache,
++			 uint32_t new_size, int socket);
+ void mlx5_mr_release_cache(struct mlx5_mr_share_cache *mr_cache);
+ int mlx5_mr_create_cache(struct mlx5_mr_share_cache *share_cache, int socket);
+ void mlx5_mr_dump_cache(struct mlx5_mr_share_cache *share_cache __rte_unused);
+diff --git a/dpdk/drivers/common/mlx5/mlx5_common_utils.c b/dpdk/drivers/common/mlx5/mlx5_common_utils.c
+index 775fabd478..58d744b4d4 100644
+--- a/dpdk/drivers/common/mlx5/mlx5_common_utils.c
++++ b/dpdk/drivers/common/mlx5/mlx5_common_utils.c
+@@ -293,11 +293,9 @@ _mlx5_list_unregister(struct mlx5_list_inconst *l_inconst,
+ 			l_const->cb_clone_free(l_const->ctx, entry);
+ 		else
+ 			l_const->cb_remove(l_const->ctx, entry);
+-	} else if (likely(lcore_idx != -1)) {
++	} else {
+ 		__atomic_add_fetch(&l_inconst->cache[entry->lcore_idx]->inv_cnt,
+ 				   1, __ATOMIC_RELAXED);
+-	} else {
+-		return 0;
+ 	}
+ 	if (!l_const->lcores_share) {
+ 		__atomic_sub_fetch(&l_inconst->count, 1, __ATOMIC_RELAXED);
+diff --git a/dpdk/drivers/common/mlx5/mlx5_devx_cmds.c b/dpdk/drivers/common/mlx5/mlx5_devx_cmds.c
+index e52b995ee3..70a430f134 100644
+--- a/dpdk/drivers/common/mlx5/mlx5_devx_cmds.c
++++ b/dpdk/drivers/common/mlx5/mlx5_devx_cmds.c
+@@ -823,6 +823,7 @@ mlx5_devx_cmd_query_hca_attr(void *ctx,
+ {
+ 	uint32_t in[MLX5_ST_SZ_DW(query_hca_cap_in)] = {0};
+ 	uint32_t out[MLX5_ST_SZ_DW(query_hca_cap_out)] = {0};
++	bool hca_cap_2_sup;
+ 	uint64_t general_obj_types_supported = 0;
+ 	void *hcattr;
+ 	int rc, i;
+@@ -832,6 +833,7 @@ mlx5_devx_cmd_query_hca_attr(void *ctx,
+ 			MLX5_HCA_CAP_OPMOD_GET_CUR);
+ 	if (!hcattr)
+ 		return rc;
++	hca_cap_2_sup = MLX5_GET(cmd_hca_cap, hcattr, hca_cap_2);
+ 	attr->max_wqe_sz_sq = MLX5_GET(cmd_hca_cap, hcattr, max_wqe_sz_sq);
+ 	attr->flow_counter_bulk_alloc_bitmap =
+ 			MLX5_GET(cmd_hca_cap, hcattr, flow_counter_bulk_alloc);
+@@ -967,6 +969,20 @@ mlx5_devx_cmd_query_hca_attr(void *ctx,
+ 					 general_obj_types) &
+ 			      MLX5_GENERAL_OBJ_TYPES_CAP_CONN_TRACK_OFFLOAD);
+ 	attr->rq_delay_drop = MLX5_GET(cmd_hca_cap, hcattr, rq_delay_drop);
++	if (hca_cap_2_sup) {
++		hcattr = mlx5_devx_get_hca_cap(ctx, in, out, &rc,
++				MLX5_GET_HCA_CAP_OP_MOD_GENERAL_DEVICE_2 |
++				MLX5_HCA_CAP_OPMOD_GET_CUR);
++		if (!hcattr) {
++			DRV_LOG(DEBUG,
++				"Failed to query DevX HCA capabilities 2.");
++			return rc;
++		}
++		attr->log_min_stride_wqe_sz = MLX5_GET(cmd_hca_cap_2, hcattr,
++						       log_min_stride_wqe_sz);
++	}
++	if (attr->log_min_stride_wqe_sz == 0)
++		attr->log_min_stride_wqe_sz = MLX5_MPRQ_LOG_MIN_STRIDE_WQE_SIZE;
+ 	if (attr->qos.sup) {
+ 		hcattr = mlx5_devx_get_hca_cap(ctx, in, out, &rc,
+ 				MLX5_GET_HCA_CAP_OP_MOD_QOS_CAP |
+@@ -1114,6 +1130,18 @@ mlx5_devx_cmd_query_hca_attr(void *ctx,
+ 			goto error;
+ 		}
+ 	}
++	if (attr->eswitch_manager) {
++		hcattr = mlx5_devx_get_hca_cap(ctx, in, out, &rc,
++				MLX5_SET_HCA_CAP_OP_MOD_ESW |
++				MLX5_HCA_CAP_OPMOD_GET_CUR);
++		if (!hcattr)
++			return rc;
++		attr->esw_mgr_vport_id_valid =
++			MLX5_GET(esw_cap, hcattr,
++				 esw_manager_vport_number_valid);
++		attr->esw_mgr_vport_id =
++			MLX5_GET(esw_cap, hcattr, esw_manager_vport_number);
++	}
+ 	return 0;
+ error:
+ 	rc = (rc > 0) ? -rc : rc;
+@@ -1822,7 +1850,7 @@ mlx5_devx_cmd_create_td(void *ctx)
+  *   Pointer to file stream.
+  *
+  * @return
+- *   0 on success, a nagative value otherwise.
++ *   0 on success, a negative value otherwise.
+  */
+ int
+ mlx5_devx_cmd_flow_dump(void *fdb_domain __rte_unused,
+@@ -2263,7 +2291,7 @@ mlx5_devx_cmd_modify_qp_state(struct mlx5_devx_obj *qp, uint32_t qp_st_mod_op,
+ 	case MLX5_CMD_OP_RTR2RTS_QP:
+ 		qpc = MLX5_ADDR_OF(rtr2rts_qp_in, &in, qpc);
+ 		MLX5_SET(rtr2rts_qp_in, &in, qpn, qp->id);
+-		MLX5_SET(qpc, qpc, primary_address_path.ack_timeout, 14);
++		MLX5_SET(qpc, qpc, primary_address_path.ack_timeout, 16);
+ 		MLX5_SET(qpc, qpc, log_ack_req_freq, 0);
+ 		MLX5_SET(qpc, qpc, retry_count, 7);
+ 		MLX5_SET(qpc, qpc, rnr_retry, 7);
+diff --git a/dpdk/drivers/common/mlx5/mlx5_devx_cmds.h b/dpdk/drivers/common/mlx5/mlx5_devx_cmds.h
+index d7f71646a3..4373761c29 100644
+--- a/dpdk/drivers/common/mlx5/mlx5_devx_cmds.h
++++ b/dpdk/drivers/common/mlx5/mlx5_devx_cmds.h
+@@ -251,6 +251,9 @@ struct mlx5_hca_attr {
+ 	uint32_t log_max_mmo_decompress:5;
+ 	uint32_t umr_modify_entity_size_disabled:1;
+ 	uint32_t umr_indirect_mkey_disabled:1;
++	uint32_t log_min_stride_wqe_sz:5;
++	uint32_t esw_mgr_vport_id_valid:1; /* E-Switch Mgr vport ID is valid. */
++	uint16_t esw_mgr_vport_id; /* E-Switch Mgr vport ID . */
+ 	uint16_t max_wqe_sz_sq;
+ };
+ 
+diff --git a/dpdk/drivers/common/mlx5/mlx5_malloc.c b/dpdk/drivers/common/mlx5/mlx5_malloc.c
+index b19501e1bc..cef3b88e11 100644
+--- a/dpdk/drivers/common/mlx5/mlx5_malloc.c
++++ b/dpdk/drivers/common/mlx5/mlx5_malloc.c
+@@ -58,7 +58,7 @@ static struct mlx5_sys_mem mlx5_sys_mem = {
+  * Check if the address belongs to memory seg list.
+  *
+  * @param addr
+- *   Memory address to be ckeced.
++ *   Memory address to be checked.
+  * @param msl
+  *   Memory seg list.
+  *
+@@ -109,7 +109,7 @@ mlx5_mem_update_msl(void *addr)
+  * Check if the address belongs to rte memory.
+  *
+  * @param addr
+- *   Memory address to be ckeced.
++ *   Memory address to be checked.
+  *
+  * @return
+  *   True if it belongs, false otherwise.
+diff --git a/dpdk/drivers/common/mlx5/mlx5_malloc.h b/dpdk/drivers/common/mlx5/mlx5_malloc.h
+index 74b7eeb26e..92149f7b92 100644
+--- a/dpdk/drivers/common/mlx5/mlx5_malloc.h
++++ b/dpdk/drivers/common/mlx5/mlx5_malloc.h
+@@ -19,7 +19,7 @@ extern "C" {
+ 
+ enum mlx5_mem_flags {
+ 	MLX5_MEM_ANY = 0,
+-	/* Memory will be allocated dpends on sys_mem_en. */
++	/* Memory will be allocated depends on sys_mem_en. */
+ 	MLX5_MEM_SYS = 1 << 0,
+ 	/* Memory should be allocated from system. */
+ 	MLX5_MEM_RTE = 1 << 1,
+diff --git a/dpdk/drivers/common/mlx5/mlx5_prm.h b/dpdk/drivers/common/mlx5/mlx5_prm.h
+index 2ded67e85e..58aa72df64 100644
+--- a/dpdk/drivers/common/mlx5/mlx5_prm.h
++++ b/dpdk/drivers/common/mlx5/mlx5_prm.h
+@@ -249,6 +249,9 @@
+ /* Maximum number of DS in WQE. Limited by 6-bit field. */
+ #define MLX5_DSEG_MAX 63
+ 
++/* The 32 bit syndrome offset in struct mlx5_err_cqe. */
++#define MLX5_ERROR_CQE_SYNDROME_OFFSET 52
++
+ /* The completion mode offset in the WQE control segment line 2. */
+ #define MLX5_COMP_MODE_OFFSET 2
+ 
+@@ -264,6 +267,9 @@
+ /* The maximum log value of segments per RQ WQE. */
+ #define MLX5_MAX_LOG_RQ_SEGS 5u
+ 
++/* Log 2 of the default size of a WQE for Multi-Packet RQ. */
++#define MLX5_MPRQ_LOG_MIN_STRIDE_WQE_SIZE 14U
++
+ /* The alignment needed for WQ buffer. */
+ #define MLX5_WQE_BUF_ALIGNMENT rte_mem_page_size()
+ 
+@@ -556,6 +562,8 @@ struct mlx5_rdma_write_wqe {
+ #define MLX5_GGA_COMP_LOG_BLOCK_SIZE_MAX 15u
+ #define MLX5_GGA_COMP_LOG_DYNAMIC_SIZE_MAX 15u
+ #define MLX5_GGA_COMP_LOG_DYNAMIC_SIZE_MIN 0u
++#define MLX5_GGA_COMP_OUT_OF_SPACE_SYNDROME_BE 0x29D0084
++#define MLX5_GGA_COMP_MISSING_BFINAL_SYNDROME_BE 0x29D0011
+ 
+ struct mlx5_wqe_metadata_seg {
+ 	uint32_t mmo_control_31_0; /* mmo_control_63_32 is in ctrl_seg.imm */
+@@ -1261,6 +1269,7 @@ enum {
+ 	MLX5_GET_HCA_CAP_OP_MOD_QOS_CAP = 0xc << 1,
+ 	MLX5_GET_HCA_CAP_OP_MOD_ROCE = 0x4 << 1,
+ 	MLX5_GET_HCA_CAP_OP_MOD_NIC_FLOW_TABLE = 0x7 << 1,
++	MLX5_SET_HCA_CAP_OP_MOD_ESW = 0x9 << 1,
+ 	MLX5_GET_HCA_CAP_OP_MOD_VDPA_EMULATION = 0x13 << 1,
+ 	MLX5_GET_HCA_CAP_OP_MOD_PARSE_GRAPH_NODE_CAP = 0x1C << 1,
+ 	MLX5_GET_HCA_CAP_OP_MOD_GENERAL_DEVICE_2 = 0x20 << 1,
+@@ -1342,7 +1351,9 @@ enum {
+ #define MLX5_STEERING_LOGIC_FORMAT_CONNECTX_6DX 0x1
+ 
+ struct mlx5_ifc_cmd_hca_cap_bits {
+-	u8 reserved_at_0[0x30];
++	u8 reserved_at_0[0x20];
++	u8 hca_cap_2[0x1];
++	u8 reserved_at_21[0xf];
+ 	u8 vhca_id[0x10];
+ 	u8 reserved_at_40[0x20];
+ 	u8 reserved_at_60[0x3];
+@@ -1909,7 +1920,8 @@ struct mlx5_ifc_cmd_hca_cap_2_bits {
+ 	u8 max_reformat_insert_offset[0x8];
+ 	u8 max_reformat_remove_size[0x8];
+ 	u8 max_reformat_remove_offset[0x8]; /* End of DW6. */
+-	u8 aso_conntrack_reg_id[0x8];
++	u8 reserved_at_c0[0x3];
++	u8 log_min_stride_wqe_sz[0x5];
+ 	u8 reserved_at_c8[0x3];
+ 	u8 log_conn_track_granularity[0x5];
+ 	u8 reserved_at_d0[0x3];
+@@ -1920,13 +1932,25 @@ struct mlx5_ifc_cmd_hca_cap_2_bits {
+ 	u8 reserved_at_100[0x700];
+ };
+ 
++struct mlx5_ifc_esw_cap_bits {
++	u8 reserved_at_0[0x60];
++
++	u8 esw_manager_vport_number_valid[0x1];
++	u8 reserved_at_61[0xf];
++	u8 esw_manager_vport_number[0x10];
++
++	u8 reserved_at_80[0x780];
++};
++
+ union mlx5_ifc_hca_cap_union_bits {
+ 	struct mlx5_ifc_cmd_hca_cap_bits cmd_hca_cap;
++	struct mlx5_ifc_cmd_hca_cap_2_bits cmd_hca_cap_2;
+ 	struct mlx5_ifc_per_protocol_networking_offload_caps_bits
+ 	       per_protocol_networking_offload_caps;
+ 	struct mlx5_ifc_qos_cap_bits qos_cap;
+ 	struct mlx5_ifc_virtio_emulation_cap_bits vdpa_caps;
+ 	struct mlx5_ifc_flow_table_nic_cap_bits flow_table_nic_cap;
++	struct mlx5_ifc_esw_cap_bits esw_cap;
+ 	struct mlx5_ifc_roce_caps_bits roce_caps;
+ 	u8 reserved_at_0[0x8000];
+ };
+@@ -4172,7 +4196,7 @@ mlx5_flow_mark_get(uint32_t val)
+  *   timestamp format supported by the queue.
+  *
+  * @return
+- *   Converted timstamp format settings.
++ *   Converted timestamp format settings.
+  */
+ static inline uint32_t
+ mlx5_ts_format_conv(uint32_t ts_format)
+diff --git a/dpdk/drivers/common/mlx5/version.map b/dpdk/drivers/common/mlx5/version.map
+index 34e86004a0..130d47ad8c 100644
+--- a/dpdk/drivers/common/mlx5/version.map
++++ b/dpdk/drivers/common/mlx5/version.map
+@@ -79,6 +79,7 @@ INTERNAL {
+ 
+ 	mlx5_free;
+ 
++	mlx5_get_device_guid; # WINDOWS_NO_EXPORT
+ 	mlx5_get_ifname_sysfs; # WINDOWS_NO_EXPORT
+ 	mlx5_get_pci_addr; # WINDOWS_NO_EXPORT
+ 
+@@ -123,9 +124,11 @@ INTERNAL {
+ 	mlx5_nl_mac_addr_flush; # WINDOWS_NO_EXPORT
+ 	mlx5_nl_mac_addr_remove; # WINDOWS_NO_EXPORT
+ 	mlx5_nl_mac_addr_sync; # WINDOWS_NO_EXPORT
++	mlx5_nl_parse_link_status_update; # WINDOWS_NO_EXPORT
+ 	mlx5_nl_port_state; # WINDOWS_NO_EXPORT
+ 	mlx5_nl_portnum; # WINDOWS_NO_EXPORT
+ 	mlx5_nl_promisc; # WINDOWS_NO_EXPORT
++	mlx5_nl_read_events; # WINDOWS_NO_EXPORT
+ 	mlx5_nl_switch_info; # WINDOWS_NO_EXPORT
+ 	mlx5_nl_vf_mac_addr_modify; # WINDOWS_NO_EXPORT
+ 	mlx5_nl_vlan_vmwa_create; # WINDOWS_NO_EXPORT
+@@ -146,4 +149,6 @@ INTERNAL {
+ 	mlx5_mp_req_mempool_reg;
+ 	mlx5_mr_mempool2mr_bh;
+ 	mlx5_mr_mempool_populate_cache;
++
++	local: *;
+ };
+diff --git a/dpdk/drivers/common/mlx5/windows/meson.build b/dpdk/drivers/common/mlx5/windows/meson.build
+index 980f76b11c..edbbaa9ae1 100644
+--- a/dpdk/drivers/common/mlx5/windows/meson.build
++++ b/dpdk/drivers/common/mlx5/windows/meson.build
+@@ -8,8 +8,8 @@ sources += files(
+         'mlx5_common_os.c',
+ )
+ 
+-res_lib = run_command(python3, '-c', 'import os; print(os.environ["DEVX_LIB_PATH"])')
+-res_inc = run_command(python3, '-c', 'import os; print(os.environ["DEVX_INC_PATH"])')
++res_lib = run_command(python3, '-c', 'import os; print(os.environ["DEVX_LIB_PATH"])', check: false)
++res_inc = run_command(python3, '-c', 'import os; print(os.environ["DEVX_INC_PATH"])', check: false)
+ 
+ if (res_lib.returncode() != 0 or res_inc.returncode() != 0)
+     build = false
+diff --git a/dpdk/drivers/common/mlx5/windows/mlx5_common_os.c b/dpdk/drivers/common/mlx5/windows/mlx5_common_os.c
+index 162c7476cc..c3cfc315f2 100644
+--- a/dpdk/drivers/common/mlx5/windows/mlx5_common_os.c
++++ b/dpdk/drivers/common/mlx5/windows/mlx5_common_os.c
+@@ -302,7 +302,7 @@ mlx5_os_umem_dereg(void *pumem)
+ }
+ 
+ /**
+- * Register mr. Given protection doamin pointer, pointer to addr and length
++ * Register mr. Given protection domain pointer, pointer to addr and length
+  * register the memory region.
+  *
+  * @param[in] pd
+@@ -310,7 +310,7 @@ mlx5_os_umem_dereg(void *pumem)
+  * @param[in] addr
+  *   Pointer to memory start address (type devx_device_ctx).
+  * @param[in] length
+- *   Lengtoh of the memory to register.
++ *   Length of the memory to register.
+  * @param[out] pmd_mr
+  *   pmd_mr struct set with lkey, address, length, pointer to mr object, mkey
+  *
+diff --git a/dpdk/drivers/common/mlx5/windows/mlx5_common_os.h b/dpdk/drivers/common/mlx5/windows/mlx5_common_os.h
+index 3afce56cd9..61fc8dd761 100644
+--- a/dpdk/drivers/common/mlx5/windows/mlx5_common_os.h
++++ b/dpdk/drivers/common/mlx5/windows/mlx5_common_os.h
+@@ -21,7 +21,7 @@
+ /**
+  * This API allocates aligned or non-aligned memory.  The free can be on either
+  * aligned or nonaligned memory.  To be protected - even though there may be no
+- * alignment - in Windows this API will unconditioanlly call _aligned_malloc()
++ * alignment - in Windows this API will unconditionally call _aligned_malloc()
+  * with at least a minimal alignment size.
+  *
+  * @param[in] align
+diff --git a/dpdk/drivers/common/qat/qat_adf/adf_transport_access_macros.h b/dpdk/drivers/common/qat/qat_adf/adf_transport_access_macros.h
+index a6d403fac3..12a7258c60 100644
+--- a/dpdk/drivers/common/qat/qat_adf/adf_transport_access_macros.h
++++ b/dpdk/drivers/common/qat/qat_adf/adf_transport_access_macros.h
+@@ -72,7 +72,7 @@
+ #define ADF_SIZE_TO_RING_SIZE_IN_BYTES(SIZE) ((1 << (SIZE - 1)) << 7)
+ #define ADF_RING_SIZE_IN_BYTES_TO_SIZE(SIZE) ((1 << (SIZE - 1)) >> 7)
+ 
+-/* Minimum ring bufer size for memory allocation */
++/* Minimum ring buffer size for memory allocation */
+ #define ADF_RING_SIZE_BYTES_MIN(SIZE) ((SIZE < ADF_RING_SIZE_4K) ? \
+ 				ADF_RING_SIZE_4K : SIZE)
+ #define ADF_RING_SIZE_MODULO(SIZE) (SIZE + 0x6)
+diff --git a/dpdk/drivers/common/sfc_efx/base/efx_impl.h b/dpdk/drivers/common/sfc_efx/base/efx_impl.h
+index e2802e6672..ba00eeeb47 100644
+--- a/dpdk/drivers/common/sfc_efx/base/efx_impl.h
++++ b/dpdk/drivers/common/sfc_efx/base/efx_impl.h
+@@ -1555,6 +1555,12 @@ efx_mcdi_intf_from_pcie(
+ 	__in			uint32_t pcie_intf,
+ 	__out			efx_pcie_interface_t *efx_intf);
+ 
++LIBEFX_INTERNAL
++extern	__checkReturn		efx_rc_t
++efx_mcdi_intf_to_pcie(
++	__in			efx_pcie_interface_t efx_intf,
++	__out			uint32_t *pcie_intf);
++
+ LIBEFX_INTERNAL
+ extern	__checkReturn	efx_rc_t
+ efx_mcdi_init_evq(
+diff --git a/dpdk/drivers/common/sfc_efx/base/efx_mae.c b/dpdk/drivers/common/sfc_efx/base/efx_mae.c
+index 7b24e3fee4..31f51b5548 100644
+--- a/dpdk/drivers/common/sfc_efx/base/efx_mae.c
++++ b/dpdk/drivers/common/sfc_efx/base/efx_mae.c
+@@ -1027,6 +1027,10 @@ efx_mae_match_spec_field_set(
+ 			memcpy(mvp + descp->emmd_value_offset,
+ 			    &dword, sizeof (dword));
+ 			break;
++		case 1:
++			memcpy(mvp + descp->emmd_value_offset,
++			    value, 1);
++			break;
+ 		default:
+ 			EFSYS_ASSERT(B_FALSE);
+ 		}
+@@ -1039,6 +1043,10 @@ efx_mae_match_spec_field_set(
+ 			memcpy(mvp + descp->emmd_mask_offset,
+ 			    &dword, sizeof (dword));
+ 			break;
++		case 1:
++			memcpy(mvp + descp->emmd_mask_offset,
++			    mask, 1);
++			break;
+ 		default:
+ 			EFSYS_ASSERT(B_FALSE);
+ 		}
+@@ -2242,7 +2250,8 @@ efx_mae_outer_rule_insert(
+ 	memcpy(payload + offset, spec->emms_mask_value_pairs.outer,
+ 	    MAE_ENC_FIELD_PAIRS_LEN);
+ 
+-	MCDI_IN_SET_BYTE(req, MAE_OUTER_RULE_INSERT_IN_RECIRC_ID,
++	MCDI_IN_SET_DWORD_FIELD(req, MAE_OUTER_RULE_INSERT_IN_LOOKUP_CONTROL,
++	    MAE_OUTER_RULE_INSERT_IN_RECIRC_ID,
+ 	    spec->emms_outer_rule_recirc_id);
+ 
+ 	efx_mcdi_execute(enp, &req);
+diff --git a/dpdk/drivers/common/sfc_efx/base/efx_mcdi.c b/dpdk/drivers/common/sfc_efx/base/efx_mcdi.c
+index 9189a7a8b3..404ca23d58 100644
+--- a/dpdk/drivers/common/sfc_efx/base/efx_mcdi.c
++++ b/dpdk/drivers/common/sfc_efx/base/efx_mcdi.c
+@@ -659,6 +659,7 @@ efx_mcdi_get_client_handle(
+ 	EFX_MCDI_DECLARE_BUF(payload,
+ 	    MC_CMD_GET_CLIENT_HANDLE_IN_LEN,
+ 	    MC_CMD_GET_CLIENT_HANDLE_OUT_LEN);
++	uint32_t pcie_intf;
+ 	efx_rc_t rc;
+ 
+ 	if (handle == NULL) {
+@@ -666,6 +667,10 @@ efx_mcdi_get_client_handle(
+ 		goto fail1;
+ 	}
+ 
++	rc = efx_mcdi_intf_to_pcie(intf, &pcie_intf);
++	if (rc != 0)
++		goto fail2;
++
+ 	req.emr_cmd = MC_CMD_GET_CLIENT_HANDLE;
+ 	req.emr_in_buf = payload;
+ 	req.emr_in_length = MC_CMD_GET_CLIENT_HANDLE_IN_LEN;
+@@ -676,23 +681,25 @@ efx_mcdi_get_client_handle(
+ 	    MC_CMD_GET_CLIENT_HANDLE_IN_TYPE_FUNC);
+ 	MCDI_IN_SET_WORD(req, GET_CLIENT_HANDLE_IN_FUNC_PF, pf);
+ 	MCDI_IN_SET_WORD(req, GET_CLIENT_HANDLE_IN_FUNC_VF, vf);
+-	MCDI_IN_SET_DWORD(req, GET_CLIENT_HANDLE_IN_FUNC_INTF, intf);
++	MCDI_IN_SET_DWORD(req, GET_CLIENT_HANDLE_IN_FUNC_INTF, pcie_intf);
+ 
+ 	efx_mcdi_execute(enp, &req);
+ 
+ 	if (req.emr_rc != 0) {
+ 		rc = req.emr_rc;
+-		goto fail2;
++		goto fail3;
+ 	}
+ 
+ 	if (req.emr_out_length_used < MC_CMD_GET_CLIENT_HANDLE_OUT_LEN) {
+ 		rc = EMSGSIZE;
+-		goto fail3;
++		goto fail4;
+ 	}
+ 
+ 	*handle = MCDI_OUT_DWORD(req, GET_CLIENT_HANDLE_OUT_HANDLE);
+ 
+ 	return 0;
++fail4:
++	EFSYS_PROBE(fail4);
+ fail3:
+ 	EFSYS_PROBE(fail3);
+ fail2:
+@@ -709,7 +716,7 @@ efx_mcdi_get_own_client_handle(
+ {
+ 	efx_rc_t rc;
+ 
+-	rc = efx_mcdi_get_client_handle(enp, PCIE_INTERFACE_CALLER,
++	rc = efx_mcdi_get_client_handle(enp, EFX_PCIE_INTERFACE_CALLER,
+ 	    PCIE_FUNCTION_PF_NULL, PCIE_FUNCTION_VF_NULL, handle);
+ 	if (rc != 0)
+ 		goto fail1;
+@@ -2233,6 +2240,35 @@ efx_mcdi_intf_from_pcie(
+ 	return (rc);
+ }
+ 
++	__checkReturn		efx_rc_t
++efx_mcdi_intf_to_pcie(
++	__in			efx_pcie_interface_t efx_intf,
++	__out			uint32_t *pcie_intf)
++{
++	efx_rc_t rc;
++
++	switch (efx_intf) {
++	case EFX_PCIE_INTERFACE_CALLER:
++		*pcie_intf = PCIE_INTERFACE_CALLER;
++		break;
++	case EFX_PCIE_INTERFACE_HOST_PRIMARY:
++		*pcie_intf = PCIE_INTERFACE_HOST_PRIMARY;
++		break;
++	case EFX_PCIE_INTERFACE_NIC_EMBEDDED:
++		*pcie_intf = PCIE_INTERFACE_NIC_EMBEDDED;
++		break;
++	default:
++		rc = EINVAL;
++		goto fail1;
++	}
++
++	return (0);
++
++fail1:
++	EFSYS_PROBE1(fail1, efx_rc_t, rc);
++	return (rc);
++}
++
+ /*
+  * This function returns the pf and vf number of a function.  If it is a pf the
+  * vf number is 0xffff.  The vf number is the index of the vf on that
+diff --git a/dpdk/drivers/common/sfc_efx/efsys.h b/dpdk/drivers/common/sfc_efx/efsys.h
+index 3860c2835a..224254bee7 100644
+--- a/dpdk/drivers/common/sfc_efx/efsys.h
++++ b/dpdk/drivers/common/sfc_efx/efsys.h
+@@ -616,7 +616,7 @@ typedef struct efsys_bar_s {
+ 
+ #define EFSYS_DMA_SYNC_FOR_KERNEL(_esmp, _offset, _size)	((void)0)
+ 
+-/* Just avoid store and compiler (impliciltly) reordering */
++/* Just avoid store and compiler (implicitly) reordering */
+ #define EFSYS_DMA_SYNC_FOR_DEVICE(_esmp, _offset, _size)	rte_wmb()
+ 
+ /* TIMESTAMP */
+diff --git a/dpdk/drivers/compress/mlx5/mlx5_compress.c b/dpdk/drivers/compress/mlx5/mlx5_compress.c
+index 82b871bd86..a18ec8a6cf 100644
+--- a/dpdk/drivers/compress/mlx5/mlx5_compress.c
++++ b/dpdk/drivers/compress/mlx5/mlx5_compress.c
+@@ -563,7 +563,18 @@ mlx5_compress_cqe_err_handle(struct mlx5_compress_qp *qp,
+ 								    qp->qp.wqes;
+ 	volatile struct mlx5_gga_compress_opaque *opaq = qp->opaque_mr.addr;
+ 
+-	op->status = RTE_COMP_OP_STATUS_ERROR;
++	volatile uint32_t *synd_word = RTE_PTR_ADD(cqe, MLX5_ERROR_CQE_SYNDROME_OFFSET);
++	switch (*synd_word) {
++	case MLX5_GGA_COMP_OUT_OF_SPACE_SYNDROME_BE:
++		op->status = RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED;
++		DRV_LOG(DEBUG, "OUT OF SPACE error, output is bigger than dst buffer.");
++		break;
++	case MLX5_GGA_COMP_MISSING_BFINAL_SYNDROME_BE:
++		DRV_LOG(DEBUG, "The last compressed block missed the B-final flag; maybe the compressed data is not complete or garbaged?");
++		/* fallthrough */
++	default:
++		op->status = RTE_COMP_OP_STATUS_ERROR;
++	}
+ 	op->consumed = 0;
+ 	op->produced = 0;
+ 	op->output_chksum = 0;
+diff --git a/dpdk/drivers/compress/octeontx/include/zip_regs.h b/dpdk/drivers/compress/octeontx/include/zip_regs.h
+index 96e538bb75..94a48cde66 100644
+--- a/dpdk/drivers/compress/octeontx/include/zip_regs.h
++++ b/dpdk/drivers/compress/octeontx/include/zip_regs.h
+@@ -195,7 +195,7 @@ union zip_inst_s {
+ 		uint64_t bf                    : 1;
+ 		/** Comp/decomp operation */
+ 		uint64_t op                    : 2;
+-		/** Data sactter */
++		/** Data scatter */
+ 		uint64_t ds                    : 1;
+ 		/** Data gather */
+ 		uint64_t dg                    : 1;
+@@ -376,7 +376,7 @@ union zip_inst_s {
+ 		uint64_t bf                    : 1;
+ 		/** Comp/decomp operation */
+ 		uint64_t op                    : 2;
+-		/** Data sactter */
++		/** Data scatter */
+ 		uint64_t ds                    : 1;
+ 		/** Data gather */
+ 		uint64_t dg                    : 1;
+diff --git a/dpdk/drivers/compress/octeontx/otx_zip.h b/dpdk/drivers/compress/octeontx/otx_zip.h
+index e43f7f5c3e..118a95d738 100644
+--- a/dpdk/drivers/compress/octeontx/otx_zip.h
++++ b/dpdk/drivers/compress/octeontx/otx_zip.h
+@@ -31,7 +31,7 @@ extern int octtx_zip_logtype_driver;
+ /**< PCI device id of ZIP VF */
+ #define PCI_DEVICE_ID_OCTEONTX_ZIPVF	0xA037
+ 
+-/* maxmum number of zip vf devices */
++/* maximum number of zip vf devices */
+ #define ZIP_MAX_VFS 8
+ 
+ /* max size of one chunk */
+diff --git a/dpdk/drivers/compress/octeontx/otx_zip_pmd.c b/dpdk/drivers/compress/octeontx/otx_zip_pmd.c
+index dd62285b86..1b6178f661 100644
+--- a/dpdk/drivers/compress/octeontx/otx_zip_pmd.c
++++ b/dpdk/drivers/compress/octeontx/otx_zip_pmd.c
+@@ -392,6 +392,8 @@ zip_pmd_qp_setup(struct rte_compressdev *dev, uint16_t qp_id,
+ 	}
+ 
+ 	name =  rte_malloc(NULL, RTE_COMPRESSDEV_NAME_MAX_LEN, 0);
++	if (name == NULL)
++		return (-ENOMEM);
+ 	snprintf(name, RTE_COMPRESSDEV_NAME_MAX_LEN,
+ 		 "zip_pmd_%u_qp_%u",
+ 		 dev->data->dev_id, qp_id);
+@@ -399,8 +401,10 @@ zip_pmd_qp_setup(struct rte_compressdev *dev, uint16_t qp_id,
+ 	/* Allocate the queue pair data structure. */
+ 	qp = rte_zmalloc_socket(name, sizeof(*qp),
+ 				RTE_CACHE_LINE_SIZE, socket_id);
+-	if (qp == NULL)
++	if (qp == NULL) {
++		rte_free(name);
+ 		return (-ENOMEM);
++	}
+ 
+ 	qp->name = name;
+ 
+diff --git a/dpdk/drivers/compress/qat/qat_comp_pmd.c b/dpdk/drivers/compress/qat/qat_comp_pmd.c
+index 9b24d46e97..da6404c017 100644
+--- a/dpdk/drivers/compress/qat/qat_comp_pmd.c
++++ b/dpdk/drivers/compress/qat/qat_comp_pmd.c
+@@ -463,7 +463,7 @@ qat_comp_create_stream_pool(struct qat_comp_dev_private *comp_dev,
+ 		} else if (info.error) {
+ 			rte_mempool_obj_iter(mp, qat_comp_stream_destroy, NULL);
+ 			QAT_LOG(ERR,
+-			     "Destoying mempool %s as at least one element failed initialisation",
++			     "Destroying mempool %s as at least one element failed initialisation",
+ 			     stream_pool_name);
+ 			rte_mempool_free(mp);
+ 			mp = NULL;
+diff --git a/dpdk/drivers/crypto/bcmfs/bcmfs_device.h b/dpdk/drivers/crypto/bcmfs/bcmfs_device.h
+index e5ca866977..4901a6cfd9 100644
+--- a/dpdk/drivers/crypto/bcmfs/bcmfs_device.h
++++ b/dpdk/drivers/crypto/bcmfs/bcmfs_device.h
+@@ -32,7 +32,7 @@ enum bcmfs_device_type {
+ 	BCMFS_UNKNOWN
+ };
+ 
+-/* A table to store registered queue pair opertations */
++/* A table to store registered queue pair operations */
+ struct bcmfs_hw_queue_pair_ops_table {
+ 	rte_spinlock_t tl;
+ 	/* Number of used ops structs in the table. */
+diff --git a/dpdk/drivers/crypto/bcmfs/bcmfs_qp.c b/dpdk/drivers/crypto/bcmfs/bcmfs_qp.c
+index cb5ff6c61b..61d457f4e0 100644
+--- a/dpdk/drivers/crypto/bcmfs/bcmfs_qp.c
++++ b/dpdk/drivers/crypto/bcmfs/bcmfs_qp.c
+@@ -212,7 +212,7 @@ bcmfs_qp_setup(struct bcmfs_qp **qp_addr,
+ 		nb_descriptors = FS_RM_MAX_REQS;
+ 
+ 	if (qp_conf->iobase == NULL) {
+-		BCMFS_LOG(ERR, "IO onfig space null");
++		BCMFS_LOG(ERR, "IO config space null");
+ 		return -EINVAL;
+ 	}
+ 
+diff --git a/dpdk/drivers/crypto/bcmfs/bcmfs_sym_defs.h b/dpdk/drivers/crypto/bcmfs/bcmfs_sym_defs.h
+index eaefe97e26..9bb8a695a0 100644
+--- a/dpdk/drivers/crypto/bcmfs/bcmfs_sym_defs.h
++++ b/dpdk/drivers/crypto/bcmfs/bcmfs_sym_defs.h
+@@ -20,11 +20,11 @@ struct bcmfs_sym_request;
+ 
+ /** Crypto Request processing successful. */
+ #define BCMFS_SYM_RESPONSE_SUCCESS               (0)
+-/** Crypot Request processing protocol failure. */
++/** Crypto Request processing protocol failure. */
+ #define BCMFS_SYM_RESPONSE_PROTO_FAILURE         (1)
+-/** Crypot Request processing completion failure. */
++/** Crypto Request processing completion failure. */
+ #define BCMFS_SYM_RESPONSE_COMPL_ERROR           (2)
+-/** Crypot Request processing hash tag check error. */
++/** Crypto Request processing hash tag check error. */
+ #define BCMFS_SYM_RESPONSE_HASH_TAG_ERROR        (3)
+ 
+ /** Maximum threshold length to adjust AAD in continuation
+diff --git a/dpdk/drivers/crypto/bcmfs/bcmfs_sym_engine.h b/dpdk/drivers/crypto/bcmfs/bcmfs_sym_engine.h
+index d9594246b5..51ff9f75ed 100644
+--- a/dpdk/drivers/crypto/bcmfs/bcmfs_sym_engine.h
++++ b/dpdk/drivers/crypto/bcmfs/bcmfs_sym_engine.h
+@@ -12,7 +12,7 @@
+ #include "bcmfs_sym_defs.h"
+ #include "bcmfs_sym_req.h"
+ 
+-/* structure to hold element's arrtibutes */
++/* structure to hold element's attributes */
+ struct fsattr {
+ 	void *va;
+ 	uint64_t pa;
+diff --git a/dpdk/drivers/crypto/bcmfs/hw/bcmfs5_rm.c b/dpdk/drivers/crypto/bcmfs/hw/bcmfs5_rm.c
+index 86e53051dd..c677c0cd9b 100644
+--- a/dpdk/drivers/crypto/bcmfs/hw/bcmfs5_rm.c
++++ b/dpdk/drivers/crypto/bcmfs/hw/bcmfs5_rm.c
+@@ -441,7 +441,7 @@ static void bcmfs5_write_doorbell(struct bcmfs_qp *qp)
+ {
+ 	struct bcmfs_queue *txq = &qp->tx_q;
+ 
+-	/* sync in bfeore ringing the door-bell */
++	/* sync in before ringing the door-bell */
+ 	rte_wmb();
+ 
+ 	FS_MMIO_WRITE32(txq->descs_inflight,
+diff --git a/dpdk/drivers/crypto/caam_jr/caam_jr_hw_specific.h b/dpdk/drivers/crypto/caam_jr/caam_jr_hw_specific.h
+index bbe8bc3f90..6ee7f7cef3 100644
+--- a/dpdk/drivers/crypto/caam_jr/caam_jr_hw_specific.h
++++ b/dpdk/drivers/crypto/caam_jr/caam_jr_hw_specific.h
+@@ -376,7 +376,7 @@ struct sec_job_ring_t {
+ 	void *register_base_addr;	/* Base address for SEC's
+ 					 * register memory for this job ring.
+ 					 */
+-	uint8_t coalescing_en;		/* notifies if coelescing is
++	uint8_t coalescing_en;		/* notifies if coalescing is
+ 					 * enabled for the job ring
+ 					 */
+ 	sec_job_ring_state_t jr_state;	/* The state of this job ring */
+@@ -479,7 +479,7 @@ void hw_job_ring_error_print(struct sec_job_ring_t *job_ring, int code);
+ 
+ /* @brief Set interrupt coalescing parameters on the Job Ring.
+  * @param [in]  job_ring		The job ring
+- * @param [in]  irq_coalesing_timer     Interrupt coalescing timer threshold.
++ * @param [in]  irq_coalescing_timer    Interrupt coalescing timer threshold.
+  *					This value determines the maximum
+  *					amount of time after processing a
+  *					descriptor before raising an interrupt.
+diff --git a/dpdk/drivers/crypto/caam_jr/caam_jr_pvt.h b/dpdk/drivers/crypto/caam_jr/caam_jr_pvt.h
+index 552d6b9b1b..52f872bcd0 100644
+--- a/dpdk/drivers/crypto/caam_jr/caam_jr_pvt.h
++++ b/dpdk/drivers/crypto/caam_jr/caam_jr_pvt.h
+@@ -169,7 +169,7 @@ struct sec4_sg_entry {
+ 
+ /* Structure encompassing a job descriptor which is to be processed
+  * by SEC. User should also initialise this structure with the callback
+- * function pointer which will be called by driver after recieving proccessed
++ * function pointer which will be called by driver after receiving processed
+  * descriptor from SEC. User data is also passed in this data structure which
+  * will be sent as an argument to the user callback function.
+  */
+@@ -288,7 +288,7 @@ int caam_jr_enable_irqs(int uio_fd);
+  *  value that indicates an IRQ disable action into UIO file descriptor
+  *  of this job ring.
+  *
+- * @param [in]  uio_fd    UIO File descripto
++ * @param [in]  uio_fd    UIO File descriptor
+  * @retval 0 for success
+  * @retval -1 value for error
+  *
+diff --git a/dpdk/drivers/crypto/caam_jr/caam_jr_uio.c b/dpdk/drivers/crypto/caam_jr/caam_jr_uio.c
+index e4ee102344..583ba3b523 100644
+--- a/dpdk/drivers/crypto/caam_jr/caam_jr_uio.c
++++ b/dpdk/drivers/crypto/caam_jr/caam_jr_uio.c
+@@ -227,7 +227,7 @@ caam_jr_enable_irqs(int uio_fd)
+  *  value that indicates an IRQ disable action into UIO file descriptor
+  *  of this job ring.
+  *
+- * @param [in]  uio_fd    UIO File descripto
++ * @param [in]  uio_fd    UIO File descriptor
+  * @retval 0 for success
+  * @retval -1 value for error
+  *
+diff --git a/dpdk/drivers/crypto/ccp/ccp_crypto.c b/dpdk/drivers/crypto/ccp/ccp_crypto.c
+index 70daed791e..4bab18323b 100644
+--- a/dpdk/drivers/crypto/ccp/ccp_crypto.c
++++ b/dpdk/drivers/crypto/ccp/ccp_crypto.c
+@@ -2,6 +2,8 @@
+  *   Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
+  */
+ 
++#define OPENSSL_API_COMPAT 0x10100000L
++
+ #include <dirent.h>
+ #include <fcntl.h>
+ #include <stdio.h>
+@@ -1299,7 +1301,7 @@ ccp_auth_slot(struct ccp_session *session)
+ 	case CCP_AUTH_ALGO_SHA512_HMAC:
+ 		/**
+ 		 * 1. Load PHash1 = H(k ^ ipad); to LSB
+-		 * 2. generate IHash = H(hash on meassage with PHash1
++		 * 2. generate IHash = H(hash on message with PHash1
+ 		 * as init values);
+ 		 * 3. Retrieve IHash 2 slots for 384/512
+ 		 * 4. Load Phash2 = H(k ^ opad); to LSB
+diff --git a/dpdk/drivers/crypto/ccp/ccp_crypto.h b/dpdk/drivers/crypto/ccp/ccp_crypto.h
+index 8e6d03efc8..d307f73ee4 100644
+--- a/dpdk/drivers/crypto/ccp/ccp_crypto.h
++++ b/dpdk/drivers/crypto/ccp/ccp_crypto.h
+@@ -70,7 +70,7 @@
+ /* Maximum length for digest */
+ #define DIGEST_LENGTH_MAX	64
+ 
+-/* SHA LSB intialiazation values */
++/* SHA LSB initialization values */
+ 
+ #define SHA1_H0		0x67452301UL
+ #define SHA1_H1		0xefcdab89UL
+diff --git a/dpdk/drivers/crypto/ccp/ccp_dev.h b/dpdk/drivers/crypto/ccp/ccp_dev.h
+index 85c8fc47a2..2a205cd446 100644
+--- a/dpdk/drivers/crypto/ccp/ccp_dev.h
++++ b/dpdk/drivers/crypto/ccp/ccp_dev.h
+@@ -19,7 +19,7 @@
+ #include <rte_crypto_sym.h>
+ #include <cryptodev_pmd.h>
+ 
+-/**< CCP sspecific */
++/**< CCP specific */
+ #define MAX_HW_QUEUES                   5
+ #define CCP_MAX_TRNG_RETRIES		10
+ #define CCP_ALIGN(x, y) ((((x) + (y - 1)) / y) * y)
+diff --git a/dpdk/drivers/crypto/cnxk/cn9k_ipsec_la_ops.h b/dpdk/drivers/crypto/cnxk/cn9k_ipsec_la_ops.h
+index 2dc8913feb..2b0261e057 100644
+--- a/dpdk/drivers/crypto/cnxk/cn9k_ipsec_la_ops.h
++++ b/dpdk/drivers/crypto/cnxk/cn9k_ipsec_la_ops.h
+@@ -77,9 +77,10 @@ process_outb_sa(struct rte_crypto_op *cop, struct cn9k_ipsec_sa *sa,
+ 	const unsigned int hdr_len = sizeof(struct roc_ie_on_outb_hdr);
+ 	struct rte_crypto_sym_op *sym_op = cop->sym;
+ 	struct rte_mbuf *m_src = sym_op->m_src;
+-	uint32_t dlen, rlen, extend_tail;
+ 	struct roc_ie_on_outb_sa *out_sa;
+ 	struct roc_ie_on_outb_hdr *hdr;
++	uint32_t dlen, rlen;
++	int32_t extend_tail;
+ 
+ 	out_sa = &sa->out_sa;
+ 
+@@ -88,7 +89,8 @@ process_outb_sa(struct rte_crypto_op *cop, struct cn9k_ipsec_sa *sa,
+ 
+ 	extend_tail = rlen - dlen;
+ 	if (unlikely(extend_tail > rte_pktmbuf_tailroom(m_src))) {
+-		plt_dp_err("Not enough tail room");
++		plt_dp_err("Not enough tail room (required: %d, available: %d",
++			   extend_tail, rte_pktmbuf_tailroom(m_src));
+ 		return -ENOMEM;
+ 	}
+ 
+diff --git a/dpdk/drivers/crypto/cnxk/cnxk_cryptodev_ops.c b/dpdk/drivers/crypto/cnxk/cnxk_cryptodev_ops.c
+index a2281fb8de..0d99c891d9 100644
+--- a/dpdk/drivers/crypto/cnxk/cnxk_cryptodev_ops.c
++++ b/dpdk/drivers/crypto/cnxk/cnxk_cryptodev_ops.c
+@@ -100,8 +100,13 @@ cnxk_cpt_dev_start(struct rte_cryptodev *dev)
+ 	uint16_t nb_lf = roc_cpt->nb_lf;
+ 	uint16_t qp_id;
+ 
+-	for (qp_id = 0; qp_id < nb_lf; qp_id++)
++	for (qp_id = 0; qp_id < nb_lf; qp_id++) {
++		/* Application may not setup all queue pair */
++		if (roc_cpt->lf[qp_id] == NULL)
++			continue;
++
+ 		roc_cpt_iq_enable(roc_cpt->lf[qp_id]);
++	}
+ 
+ 	return 0;
+ }
+@@ -114,8 +119,12 @@ cnxk_cpt_dev_stop(struct rte_cryptodev *dev)
+ 	uint16_t nb_lf = roc_cpt->nb_lf;
+ 	uint16_t qp_id;
+ 
+-	for (qp_id = 0; qp_id < nb_lf; qp_id++)
++	for (qp_id = 0; qp_id < nb_lf; qp_id++) {
++		if (roc_cpt->lf[qp_id] == NULL)
++			continue;
++
+ 		roc_cpt_iq_disable(roc_cpt->lf[qp_id]);
++	}
+ }
+ 
+ int
+@@ -352,6 +361,7 @@ cnxk_cpt_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
+ 	struct roc_cpt *roc_cpt = &vf->cpt;
+ 	struct rte_pci_device *pci_dev;
+ 	struct cnxk_cpt_qp *qp;
++	uint32_t nb_desc;
+ 	int ret;
+ 
+ 	if (dev->data->queue_pairs[qp_id] != NULL)
+@@ -364,14 +374,17 @@ cnxk_cpt_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
+ 		return -EIO;
+ 	}
+ 
+-	qp = cnxk_cpt_qp_create(dev, qp_id, conf->nb_descriptors);
++	/* Update nb_desc to next power of 2 to aid in pending queue checks */
++	nb_desc = plt_align32pow2(conf->nb_descriptors);
++
++	qp = cnxk_cpt_qp_create(dev, qp_id, nb_desc);
+ 	if (qp == NULL) {
+ 		plt_err("Could not create queue pair %d", qp_id);
+ 		return -ENOMEM;
+ 	}
+ 
+ 	qp->lf.lf_id = qp_id;
+-	qp->lf.nb_desc = conf->nb_descriptors;
++	qp->lf.nb_desc = nb_desc;
+ 
+ 	ret = roc_cpt_lf_init(roc_cpt, &qp->lf);
+ 	if (ret < 0) {
+diff --git a/dpdk/drivers/crypto/cnxk/cnxk_cryptodev_ops.h b/dpdk/drivers/crypto/cnxk/cnxk_cryptodev_ops.h
+index 0d363651ff..1d1b1fc94b 100644
+--- a/dpdk/drivers/crypto/cnxk/cnxk_cryptodev_ops.h
++++ b/dpdk/drivers/crypto/cnxk/cnxk_cryptodev_ops.h
+@@ -166,7 +166,11 @@ pending_queue_retreat(uint64_t *index, const uint64_t mask, uint64_t nb_entry)
+ static __rte_always_inline uint64_t
+ pending_queue_infl_cnt(uint64_t head, uint64_t tail, const uint64_t mask)
+ {
+-	return (head - tail) & mask;
++	/*
++	 * Mask is nb_desc - 1. Add nb_desc to head and mask to account for
++	 * cases when tail > head, which happens during wrap around.
++	 */
++	return ((head + mask + 1) - tail) & mask;
+ }
+ 
+ static __rte_always_inline uint64_t
+diff --git a/dpdk/drivers/crypto/cnxk/cnxk_se.h b/dpdk/drivers/crypto/cnxk/cnxk_se.h
+index 37237de21a..af86ef18d8 100644
+--- a/dpdk/drivers/crypto/cnxk/cnxk_se.h
++++ b/dpdk/drivers/crypto/cnxk/cnxk_se.h
+@@ -39,17 +39,16 @@ struct cnxk_se_sess {
+ static inline void
+ cpt_pack_iv(uint8_t *iv_src, uint8_t *iv_dst)
+ {
+-	iv_dst[16] = iv_src[16];
+-	/* pack the last 8 bytes of IV to 6 bytes.
++	/* pack the first 8 bytes of IV to 6 bytes.
+ 	 * discard the 2 MSB bits of each byte
+ 	 */
+-	iv_dst[17] = (((iv_src[17] & 0x3f) << 2) | ((iv_src[18] >> 4) & 0x3));
+-	iv_dst[18] = (((iv_src[18] & 0xf) << 4) | ((iv_src[19] >> 2) & 0xf));
+-	iv_dst[19] = (((iv_src[19] & 0x3) << 6) | (iv_src[20] & 0x3f));
++	iv_dst[0] = (((iv_src[0] & 0x3f) << 2) | ((iv_src[1] >> 4) & 0x3));
++	iv_dst[1] = (((iv_src[1] & 0xf) << 4) | ((iv_src[2] >> 2) & 0xf));
++	iv_dst[2] = (((iv_src[2] & 0x3) << 6) | (iv_src[3] & 0x3f));
+ 
+-	iv_dst[20] = (((iv_src[21] & 0x3f) << 2) | ((iv_src[22] >> 4) & 0x3));
+-	iv_dst[21] = (((iv_src[22] & 0xf) << 4) | ((iv_src[23] >> 2) & 0xf));
+-	iv_dst[22] = (((iv_src[23] & 0x3) << 6) | (iv_src[24] & 0x3f));
++	iv_dst[3] = (((iv_src[4] & 0x3f) << 2) | ((iv_src[5] >> 4) & 0x3));
++	iv_dst[4] = (((iv_src[5] & 0xf) << 4) | ((iv_src[6] >> 2) & 0xf));
++	iv_dst[5] = (((iv_src[6] & 0x3) << 6) | (iv_src[7] & 0x3f));
+ }
+ 
+ static inline void
+@@ -71,10 +70,11 @@ pdcp_iv_copy(uint8_t *iv_d, uint8_t *iv_s, const uint8_t pdcp_alg_type,
+ 			iv_temp[j] = iv_s_temp[3 - j];
+ 		memcpy(iv_d, iv_temp, 16);
+ 	} else {
+-		/* ZUC doesn't need a swap */
+-		memcpy(iv_d, iv_s, 16);
+-		if (pack_iv)
++		if (pack_iv) {
+ 			cpt_pack_iv(iv_s, iv_d);
++			memcpy(iv_d + 6, iv_s + 8, 17);
++		} else
++			memcpy(iv_d, iv_s, 16);
+ 	}
+ }
+ 
+@@ -179,27 +179,27 @@ fill_sg_comp_from_iov(struct roc_se_sglist_comp *list, uint32_t i,
+ 	int32_t j;
+ 	uint32_t extra_len = extra_buf ? extra_buf->size : 0;
+ 	uint32_t size = *psize;
+-	struct roc_se_buf_ptr *bufs;
+ 
+-	bufs = from->bufs;
+ 	for (j = 0; (j < from->buf_cnt) && size; j++) {
++		struct roc_se_sglist_comp *to = &list[i >> 2];
++		uint32_t buf_sz = from->bufs[j].size;
++		void *vaddr = from->bufs[j].vaddr;
+ 		uint64_t e_vaddr;
+ 		uint32_t e_len;
+-		struct roc_se_sglist_comp *to = &list[i >> 2];
+ 
+ 		if (unlikely(from_offset)) {
+-			if (from_offset >= bufs[j].size) {
+-				from_offset -= bufs[j].size;
++			if (from_offset >= buf_sz) {
++				from_offset -= buf_sz;
+ 				continue;
+ 			}
+-			e_vaddr = (uint64_t)bufs[j].vaddr + from_offset;
+-			e_len = (size > (bufs[j].size - from_offset)) ?
+-					(bufs[j].size - from_offset) :
++			e_vaddr = (uint64_t)vaddr + from_offset;
++			e_len = (size > (buf_sz - from_offset)) ?
++					(buf_sz - from_offset) :
+ 					size;
+ 			from_offset = 0;
+ 		} else {
+-			e_vaddr = (uint64_t)bufs[j].vaddr;
+-			e_len = (size > bufs[j].size) ? bufs[j].size : size;
++			e_vaddr = (uint64_t)vaddr;
++			e_len = (size > buf_sz) ? buf_sz : size;
+ 		}
+ 
+ 		to->u.s.len[i % 4] = rte_cpu_to_be_16(e_len);
+@@ -1020,6 +1020,7 @@ cpt_zuc_snow3g_prep(uint32_t req_flags, uint64_t d_offs, uint64_t d_lens,
+ 		iv_len = params->auth_iv_len;
+ 
+ 		if (iv_len == 25) {
++			roc_se_zuc_bytes_swap(iv_s, iv_len);
+ 			iv_len -= 2;
+ 			pack_iv = 1;
+ 		}
+@@ -1049,6 +1050,7 @@ cpt_zuc_snow3g_prep(uint32_t req_flags, uint64_t d_offs, uint64_t d_lens,
+ 		iv_len = params->cipher_iv_len;
+ 
+ 		if (iv_len == 25) {
++			roc_se_zuc_bytes_swap(iv_s, iv_len);
+ 			iv_len -= 2;
+ 			pack_iv = 1;
+ 		}
+diff --git a/dpdk/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c b/dpdk/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
+index a5b052375d..c9745f1db0 100644
+--- a/dpdk/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
++++ b/dpdk/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
+@@ -1,7 +1,7 @@
+ /* SPDX-License-Identifier: BSD-3-Clause
+  *
+  *   Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
+- *   Copyright 2016-2021 NXP
++ *   Copyright 2016-2022 NXP
+  *
+  */
+ 
+@@ -52,6 +52,27 @@
+ 
+ uint8_t cryptodev_driver_id;
+ 
++static inline void
++free_fle(const struct qbman_fd *fd)
++{
++	struct qbman_fle *fle;
++	struct rte_crypto_op *op;
++	struct ctxt_priv *priv;
++
++#ifdef RTE_LIB_SECURITY
++	if (DPAA2_FD_GET_FORMAT(fd) == qbman_fd_single)
++		return;
++#endif
++	fle = (struct qbman_fle *)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
++	op = (struct rte_crypto_op *)DPAA2_GET_FLE_ADDR((fle - 1));
++	/* free the fle memory */
++	if (likely(rte_pktmbuf_is_contiguous(op->sym->m_src))) {
++		priv = (struct ctxt_priv *)(size_t)DPAA2_GET_FLE_CTXT(fle - 1);
++		rte_mempool_put(priv->fle_pool, (void *)(fle-1));
++	} else
++		rte_free((void *)(fle-1));
++}
++
+ #ifdef RTE_LIB_SECURITY
+ static inline int
+ build_proto_compound_sg_fd(dpaa2_sec_session *sess,
+@@ -1501,6 +1522,12 @@ dpaa2_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
+ 				if (retry_count > DPAA2_MAX_TX_RETRY_COUNT) {
+ 					num_tx += loop;
+ 					nb_ops -= loop;
++					DPAA2_SEC_DP_DEBUG("Enqueue fail\n");
++					/* freeing the fle buffers */
++					while (loop < frames_to_send) {
++						free_fle(&fd_arr[loop]);
++						loop++;
++					}
+ 					goto skip_tx;
+ 				}
+ 			} else {
+@@ -1527,6 +1554,10 @@ sec_simple_fd_to_mbuf(const struct qbman_fd *fd)
+ 	int16_t diff = 0;
+ 	dpaa2_sec_session *sess_priv __rte_unused;
+ 
++	if (unlikely(DPAA2_GET_FD_IVP(fd))) {
++		DPAA2_SEC_ERR("error: non inline buffer");
++		return NULL;
++	}
+ 	struct rte_mbuf *mbuf = DPAA2_INLINE_MBUF_FROM_BUF(
+ 		DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)),
+ 		rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
+@@ -1545,6 +1576,14 @@ sec_simple_fd_to_mbuf(const struct qbman_fd *fd)
+ 	else
+ 		mbuf->data_off += SEC_FLC_DHR_INBOUND;
+ 
++	if (unlikely(fd->simple.frc)) {
++		DPAA2_SEC_ERR("SEC returned Error - %x",
++				fd->simple.frc);
++		op->status = RTE_CRYPTO_OP_STATUS_ERROR;
++	} else {
++		op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
++	}
++
+ 	return op;
+ }
+ #endif
+@@ -1573,11 +1612,6 @@ sec_fd_to_mbuf(const struct qbman_fd *fd)
+ 	 * We can have a better approach to use the inline Mbuf
+ 	 */
+ 
+-	if (unlikely(DPAA2_GET_FD_IVP(fd))) {
+-		/* TODO complete it. */
+-		DPAA2_SEC_ERR("error: non inline buffer");
+-		return NULL;
+-	}
+ 	op = (struct rte_crypto_op *)DPAA2_GET_FLE_ADDR((fle - 1));
+ 
+ 	/* Prefeth op */
+@@ -3138,13 +3172,15 @@ dpaa2_sec_set_pdcp_session(struct rte_cryptodev *dev,
+ 	/* find xfrm types */
+ 	if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+ 		cipher_xform = &xform->cipher;
+-		if (xform->next != NULL) {
++		if (xform->next != NULL &&
++			xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+ 			session->ext_params.aead_ctxt.auth_cipher_text = true;
+ 			auth_xform = &xform->next->auth;
+ 		}
+ 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+ 		auth_xform = &xform->auth;
+-		if (xform->next != NULL) {
++		if (xform->next != NULL &&
++			xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+ 			session->ext_params.aead_ctxt.auth_cipher_text = false;
+ 			cipher_xform = &xform->next->cipher;
+ 		}
+@@ -3723,7 +3759,7 @@ dpaa2_sec_process_atomic_event(struct qbman_swp *swp __rte_unused,
+ 				 struct rte_event *ev)
+ {
+ 	uint8_t dqrr_index;
+-	struct rte_crypto_op *crypto_op = (struct rte_crypto_op *)ev->event_ptr;
++	struct rte_crypto_op *crypto_op;
+ 	/* Prefetching mbuf */
+ 	rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-
+ 		rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size));
+@@ -3739,12 +3775,13 @@ dpaa2_sec_process_atomic_event(struct qbman_swp *swp __rte_unused,
+ 	ev->queue_id = rxq->ev.queue_id;
+ 	ev->priority = rxq->ev.priority;
+ 
+-	ev->event_ptr = sec_fd_to_mbuf(fd);
++	crypto_op = sec_fd_to_mbuf(fd);
+ 	dqrr_index = qbman_get_dqrr_idx(dq);
+ 	*dpaa2_seqn(crypto_op->sym->m_src) = dqrr_index + 1;
+ 	DPAA2_PER_LCORE_DQRR_SIZE++;
+ 	DPAA2_PER_LCORE_DQRR_HELD |= 1 << dqrr_index;
+ 	DPAA2_PER_LCORE_DQRR_MBUF(dqrr_index) = crypto_op->sym->m_src;
++	ev->event_ptr = crypto_op;
+ }
+ 
+ int
+diff --git a/dpdk/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c b/dpdk/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c
+index 74f2045637..e68a4875dd 100644
+--- a/dpdk/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c
++++ b/dpdk/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c
+@@ -1,5 +1,5 @@
+ /* SPDX-License-Identifier: BSD-3-Clause
+- * Copyright 2021 NXP
++ * Copyright 2021-2022 NXP
+  */
+ 
+ #include <cryptodev_pmd.h>
+@@ -44,8 +44,8 @@ build_raw_dp_chain_fd(uint8_t *drv_ctx,
+ 	uint16_t auth_hdr_len = ofs.ofs.cipher.head -
+ 				ofs.ofs.auth.head;
+ 
+-	uint16_t auth_tail_len = ofs.ofs.auth.tail;
+-	uint32_t auth_only_len = (auth_tail_len << 16) | auth_hdr_len;
++	uint16_t auth_tail_len;
++	uint32_t auth_only_len;
+ 	int icv_len = sess->digest_length;
+ 	uint8_t *old_icv;
+ 	uint8_t *iv_ptr = iv->va;
+@@ -55,6 +55,8 @@ build_raw_dp_chain_fd(uint8_t *drv_ctx,
+ 
+ 	cipher_len = data_len - ofs.ofs.cipher.head - ofs.ofs.cipher.tail;
+ 	auth_len = data_len - ofs.ofs.auth.head - ofs.ofs.auth.tail;
++	auth_tail_len = auth_len - cipher_len - auth_hdr_len;
++	auth_only_len = (auth_tail_len << 16) | auth_hdr_len;
+ 	/* first FLE entry used to store session ctxt */
+ 	fle = (struct qbman_fle *)rte_malloc(NULL,
+ 			FLE_SG_MEM_SIZE(2 * sgl->num),
+@@ -104,6 +106,7 @@ build_raw_dp_chain_fd(uint8_t *drv_ctx,
+ 			DPAA2_SET_FLE_OFFSET(sge, 0);
+ 			sge->length = dest_sgl->vec[i].len;
+ 		}
++		sge->length -= ofs.ofs.cipher.tail;
+ 	} else {
+ 		/* Configure Output SGE for Encap/Decap */
+ 		DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
+@@ -117,6 +120,7 @@ build_raw_dp_chain_fd(uint8_t *drv_ctx,
+ 			DPAA2_SET_FLE_OFFSET(sge, 0);
+ 			sge->length = sgl->vec[i].len;
+ 		}
++		sge->length -= ofs.ofs.cipher.tail;
+ 	}
+ 
+ 	if (sess->dir == DIR_ENC) {
+diff --git a/dpdk/drivers/crypto/dpaa_sec/dpaa_sec.c b/dpdk/drivers/crypto/dpaa_sec/dpaa_sec.c
+index a552e64506..0a6126ad97 100644
+--- a/dpdk/drivers/crypto/dpaa_sec/dpaa_sec.c
++++ b/dpdk/drivers/crypto/dpaa_sec/dpaa_sec.c
+@@ -723,7 +723,7 @@ dpaa_sec_deq(struct dpaa_sec_qp *qp, struct rte_crypto_op **ops, int nb_ops)
+ 		}
+ 		ops[pkts++] = op;
+ 
+-		/* report op status to sym->op and then free the ctx memeory */
++		/* report op status to sym->op and then free the ctx memory */
+ 		rte_mempool_put(ctx->ctx_pool, (void *)ctx);
+ 
+ 		qman_dqrr_consume(fq, dq);
+@@ -2986,11 +2986,13 @@ dpaa_sec_set_pdcp_session(struct rte_cryptodev *dev,
+ 	/* find xfrm types */
+ 	if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+ 		cipher_xform = &xform->cipher;
+-		if (xform->next != NULL)
++		if (xform->next != NULL &&
++			xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
+ 			auth_xform = &xform->next->auth;
+ 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+ 		auth_xform = &xform->auth;
+-		if (xform->next != NULL)
++		if (xform->next != NULL &&
++			xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
+ 			cipher_xform = &xform->next->cipher;
+ 	} else {
+ 		DPAA_SEC_ERR("Invalid crypto type");
+@@ -3552,23 +3554,24 @@ cryptodev_dpaa_sec_probe(struct rte_dpaa_driver *dpaa_drv __rte_unused,
+ 
+ 	int retval;
+ 
++	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
++		return 0;
++
+ 	snprintf(cryptodev_name, sizeof(cryptodev_name), "%s", dpaa_dev->name);
+ 
+ 	cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id());
+ 	if (cryptodev == NULL)
+ 		return -ENOMEM;
+ 
+-	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+-		cryptodev->data->dev_private = rte_zmalloc_socket(
+-					"cryptodev private structure",
+-					sizeof(struct dpaa_sec_dev_private),
+-					RTE_CACHE_LINE_SIZE,
+-					rte_socket_id());
++	cryptodev->data->dev_private = rte_zmalloc_socket(
++				"cryptodev private structure",
++				sizeof(struct dpaa_sec_dev_private),
++				RTE_CACHE_LINE_SIZE,
++				rte_socket_id());
+ 
+-		if (cryptodev->data->dev_private == NULL)
+-			rte_panic("Cannot allocate memzone for private "
+-					"device data");
+-	}
++	if (cryptodev->data->dev_private == NULL)
++		rte_panic("Cannot allocate memzone for private "
++				"device data");
+ 
+ 	dpaa_dev->crypto_dev = cryptodev;
+ 	cryptodev->device = &dpaa_dev->device;
+@@ -3610,8 +3613,7 @@ cryptodev_dpaa_sec_probe(struct rte_dpaa_driver *dpaa_drv __rte_unused,
+ 	retval = -ENXIO;
+ out:
+ 	/* In case of error, cleanup is done */
+-	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+-		rte_free(cryptodev->data->dev_private);
++	rte_free(cryptodev->data->dev_private);
+ 
+ 	rte_cryptodev_pmd_release_device(cryptodev);
+ 
+diff --git a/dpdk/drivers/crypto/dpaa_sec/dpaa_sec.h b/dpdk/drivers/crypto/dpaa_sec/dpaa_sec.h
+index 7890687828..b3f2258ead 100644
+--- a/dpdk/drivers/crypto/dpaa_sec/dpaa_sec.h
++++ b/dpdk/drivers/crypto/dpaa_sec/dpaa_sec.h
+@@ -1,6 +1,6 @@
+ /* SPDX-License-Identifier: BSD-3-Clause
+  *
+- *   Copyright 2016-2021 NXP
++ *   Copyright 2016-2022 NXP
+  *
+  */
+ 
+@@ -231,7 +231,7 @@ struct dpaa_sec_job {
+ 	struct qm_sg_entry sg[MAX_JOB_SG_ENTRIES];
+ };
+ 
+-#define DPAA_MAX_NB_MAX_DIGEST	32
++#define DPAA_MAX_NB_MAX_DIGEST	64
+ struct dpaa_sec_op_ctx {
+ 	struct dpaa_sec_job job;
+ 	union {
+diff --git a/dpdk/drivers/crypto/dpaa_sec/dpaa_sec_raw_dp.c b/dpdk/drivers/crypto/dpaa_sec/dpaa_sec_raw_dp.c
+index 522685f8cf..29f4e6d40b 100644
+--- a/dpdk/drivers/crypto/dpaa_sec/dpaa_sec_raw_dp.c
++++ b/dpdk/drivers/crypto/dpaa_sec/dpaa_sec_raw_dp.c
+@@ -1,5 +1,5 @@
+ /* SPDX-License-Identifier: BSD-3-Clause
+- * Copyright 2021 NXP
++ * Copyright 2021-2022 NXP
+  */
+ 
+ #include <rte_byteorder.h>
+@@ -397,8 +397,8 @@ build_dpaa_raw_dp_chain_fd(uint8_t *drv_ctx,
+ 	unsigned int i;
+ 	uint16_t auth_hdr_len = ofs.ofs.cipher.head -
+ 				ofs.ofs.auth.head;
+-	uint16_t auth_tail_len = ofs.ofs.auth.tail;
+-	uint32_t auth_only_len = (auth_tail_len << 16) | auth_hdr_len;
++	uint16_t auth_tail_len;
++	uint32_t auth_only_len;
+ 	int data_len = 0, auth_len = 0, cipher_len = 0;
+ 
+ 	for (i = 0; i < sgl->num; i++)
+@@ -406,6 +406,8 @@ build_dpaa_raw_dp_chain_fd(uint8_t *drv_ctx,
+ 
+ 	cipher_len = data_len - ofs.ofs.cipher.head - ofs.ofs.cipher.tail;
+ 	auth_len = data_len - ofs.ofs.auth.head - ofs.ofs.auth.tail;
++	auth_tail_len = auth_len - cipher_len - auth_hdr_len;
++	auth_only_len = (auth_tail_len << 16) | auth_hdr_len;
+ 
+ 	if (sgl->num > MAX_SG_ENTRIES) {
+ 		DPAA_SEC_DP_ERR("Cipher-Auth: Max sec segs supported is %d",
+@@ -448,6 +450,7 @@ build_dpaa_raw_dp_chain_fd(uint8_t *drv_ctx,
+ 			qm_sg_entry_set64(sg, dest_sgl->vec[i].iova);
+ 			sg->length = dest_sgl->vec[i].len;
+ 		}
++		sg->length -= ofs.ofs.cipher.tail;
+ 	} else {
+ 		qm_sg_entry_set64(sg, sgl->vec[0].iova);
+ 		sg->length = sgl->vec[0].len - ofs.ofs.cipher.head;
+@@ -460,6 +463,7 @@ build_dpaa_raw_dp_chain_fd(uint8_t *drv_ctx,
+ 			qm_sg_entry_set64(sg, sgl->vec[i].iova);
+ 			sg->length = sgl->vec[i].len;
+ 		}
++		sg->length -= ofs.ofs.cipher.tail;
+ 	}
+ 
+ 	if (is_encode(ses)) {
+diff --git a/dpdk/drivers/crypto/ipsec_mb/ipsec_mb_ops.c b/dpdk/drivers/crypto/ipsec_mb/ipsec_mb_ops.c
+index 189262c4ad..58ea4ee476 100644
+--- a/dpdk/drivers/crypto/ipsec_mb/ipsec_mb_ops.c
++++ b/dpdk/drivers/crypto/ipsec_mb/ipsec_mb_ops.c
+@@ -221,8 +221,11 @@ ipsec_mb_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
+ 				IMB_VERSION_STR, IMB_MP_REQ_VER_STR);
+ 		return -EINVAL;
+ #endif
+-		if (dev->data->queue_pairs[qp_id] != NULL)
+-			qp = dev->data->queue_pairs[qp_id];
++		qp = dev->data->queue_pairs[qp_id];
++		if (qp == NULL) {
++			IPSEC_MB_LOG(ERR, "Primary process hasn't configured device qp.");
++			return -EINVAL;
++		}
+ 	} else {
+ 		/* Free memory prior to re-allocation if needed. */
+ 		if (dev->data->queue_pairs[qp_id] != NULL)
+@@ -291,8 +294,7 @@ ipsec_mb_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
+ 	if (qp->mb_mgr_mz)
+ 		rte_memzone_free(qp->mb_mgr_mz);
+ #endif
+-	if (qp)
+-		rte_free(qp);
++	rte_free(qp);
+ 	return ret;
+ }
+ 
+diff --git a/dpdk/drivers/crypto/ipsec_mb/ipsec_mb_private.h b/dpdk/drivers/crypto/ipsec_mb/ipsec_mb_private.h
+index 866722d6f4..e53101acf1 100644
+--- a/dpdk/drivers/crypto/ipsec_mb/ipsec_mb_private.h
++++ b/dpdk/drivers/crypto/ipsec_mb/ipsec_mb_private.h
+@@ -191,13 +191,13 @@ ipsec_mb_parse_xform(const struct rte_crypto_sym_xform *xform,
+ 			const struct rte_crypto_sym_xform **cipher_xform,
+ 			const struct rte_crypto_sym_xform **aead_xform)
+ {
+-	const struct rte_crypto_sym_xform *next = xform->next;
+-
+ 	if (xform == NULL) {
+ 		*mode = IPSEC_MB_OP_NOT_SUPPORTED;
+ 		return -ENOTSUP;
+ 	}
+ 
++	const struct rte_crypto_sym_xform *next = xform->next;
++
+ 	if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+ 		if (next == NULL) {
+ 			if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
+diff --git a/dpdk/drivers/crypto/ipsec_mb/pmd_aesni_gcm.c b/dpdk/drivers/crypto/ipsec_mb/pmd_aesni_gcm.c
+index 2c203795ab..2c033c6f28 100644
+--- a/dpdk/drivers/crypto/ipsec_mb/pmd_aesni_gcm.c
++++ b/dpdk/drivers/crypto/ipsec_mb/pmd_aesni_gcm.c
+@@ -96,7 +96,9 @@ aesni_gcm_session_configure(IMB_MGR *mb_mgr, void *session,
+ 		sess->iv.length = auth_xform->auth.iv.length;
+ 		key_length = auth_xform->auth.key.length;
+ 		key = auth_xform->auth.key.data;
+-		sess->req_digest_length = auth_xform->auth.digest_length;
++		sess->req_digest_length =
++		    RTE_MIN(auth_xform->auth.digest_length,
++				DIGEST_LENGTH_MAX);
+ 		break;
+ 	case IPSEC_MB_OP_AEAD_AUTHENTICATED_ENCRYPT:
+ 	case IPSEC_MB_OP_AEAD_AUTHENTICATED_DECRYPT:
+@@ -116,7 +118,9 @@ aesni_gcm_session_configure(IMB_MGR *mb_mgr, void *session,
+ 		key_length = aead_xform->aead.key.length;
+ 		key = aead_xform->aead.key.data;
+ 		sess->aad_length = aead_xform->aead.aad_length;
+-		sess->req_digest_length = aead_xform->aead.digest_length;
++		sess->req_digest_length =
++			RTE_MIN(aead_xform->aead.digest_length,
++				DIGEST_LENGTH_MAX);
+ 		break;
+ 	default:
+ 		IPSEC_MB_LOG(
+@@ -713,19 +717,17 @@ aesni_gcm_process_bulk(struct rte_cryptodev *dev,
+ 			__rte_unused union rte_crypto_sym_ofs ofs,
+ 			struct rte_crypto_sym_vec *vec)
+ {
+-	void *sess_priv;
+ 	struct aesni_gcm_session *s;
+ 	struct gcm_context_data gdata_ctx;
+ 	IMB_MGR *mb_mgr;
+ 
+-	sess_priv = get_sym_session_private_data(sess, dev->driver_id);
+-	if (unlikely(sess_priv == NULL)) {
++	s = (struct aesni_gcm_session *) get_sym_session_private_data(sess,
++		dev->driver_id);
++	if (unlikely(s == NULL)) {
+ 		aesni_gcm_fill_error_code(vec, EINVAL);
+ 		return 0;
+ 	}
+ 
+-	s = sess_priv;
+-
+ 	/* get per-thread MB MGR, create one if needed */
+ 	mb_mgr = get_per_thread_mb_mgr();
+ 	if (unlikely(mb_mgr == NULL))
+diff --git a/dpdk/drivers/crypto/ipsec_mb/pmd_aesni_mb.c b/dpdk/drivers/crypto/ipsec_mb/pmd_aesni_mb.c
+index a308d42ffa..536a586e98 100644
+--- a/dpdk/drivers/crypto/ipsec_mb/pmd_aesni_mb.c
++++ b/dpdk/drivers/crypto/ipsec_mb/pmd_aesni_mb.c
+@@ -918,7 +918,9 @@ aesni_mb_set_docsis_sec_session_parameters(
+ 
+ static inline uint64_t
+ auth_start_offset(struct rte_crypto_op *op, struct aesni_mb_session *session,
+-		uint32_t oop)
++		uint32_t oop, const uint32_t auth_offset,
++		const uint32_t cipher_offset, const uint32_t auth_length,
++		const uint32_t cipher_length)
+ {
+ 	struct rte_mbuf *m_src, *m_dst;
+ 	uint8_t *p_src, *p_dst;
+@@ -927,7 +929,7 @@ auth_start_offset(struct rte_crypto_op *op, struct aesni_mb_session *session,
+ 
+ 	/* Only cipher then hash needs special calculation. */
+ 	if (!oop || session->chain_order != IMB_ORDER_CIPHER_HASH)
+-		return op->sym->auth.data.offset;
++		return auth_offset;
+ 
+ 	m_src = op->sym->m_src;
+ 	m_dst = op->sym->m_dst;
+@@ -935,24 +937,24 @@ auth_start_offset(struct rte_crypto_op *op, struct aesni_mb_session *session,
+ 	p_src = rte_pktmbuf_mtod(m_src, uint8_t *);
+ 	p_dst = rte_pktmbuf_mtod(m_dst, uint8_t *);
+ 	u_src = (uintptr_t)p_src;
+-	u_dst = (uintptr_t)p_dst + op->sym->auth.data.offset;
++	u_dst = (uintptr_t)p_dst + auth_offset;
+ 
+ 	/**
+ 	 * Copy the content between cipher offset and auth offset for generating
+ 	 * correct digest.
+ 	 */
+-	if (op->sym->cipher.data.offset > op->sym->auth.data.offset)
+-		memcpy(p_dst + op->sym->auth.data.offset,
+-				p_src + op->sym->auth.data.offset,
+-				op->sym->cipher.data.offset -
+-				op->sym->auth.data.offset);
++	if (cipher_offset > auth_offset)
++		memcpy(p_dst + auth_offset,
++				p_src + auth_offset,
++				cipher_offset -
++				auth_offset);
+ 
+ 	/**
+ 	 * Copy the content between (cipher offset + length) and (auth offset +
+ 	 * length) for generating correct digest
+ 	 */
+-	cipher_end = op->sym->cipher.data.offset + op->sym->cipher.data.length;
+-	auth_end = op->sym->auth.data.offset + op->sym->auth.data.length;
++	cipher_end = cipher_offset + cipher_length;
++	auth_end = auth_offset + auth_length;
+ 	if (cipher_end < auth_end)
+ 		memcpy(p_dst + cipher_end, p_src + cipher_end,
+ 				auth_end - cipher_end);
+@@ -1099,6 +1101,10 @@ set_mb_job_params(IMB_JOB *job, struct ipsec_mb_qp *qp,
+ 	struct aesni_mb_qp_data *qp_data = ipsec_mb_get_qp_private_data(qp);
+ 	struct aesni_mb_session *session;
+ 	uint32_t m_offset, oop;
++	uint32_t auth_off_in_bytes;
++	uint32_t ciph_off_in_bytes;
++	uint32_t auth_len_in_bytes;
++	uint32_t ciph_len_in_bytes;
+ 
+ 	session = ipsec_mb_get_session_private(qp, op);
+ 	if (session == NULL) {
+@@ -1207,6 +1213,7 @@ set_mb_job_params(IMB_JOB *job, struct ipsec_mb_qp *qp,
+ 	if (job->cipher_mode == IMB_CIPHER_ZUC_EEA3) {
+ 		job->enc_keys = session->cipher.zuc_cipher_key;
+ 		job->dec_keys = session->cipher.zuc_cipher_key;
++		m_offset >>= 3;
+ 	} else if (job->cipher_mode == IMB_CIPHER_SNOW3G_UEA2_BITLEN) {
+ 		job->enc_keys = &session->cipher.pKeySched_snow3g_cipher;
+ 		m_offset = 0;
+@@ -1264,9 +1271,6 @@ set_mb_job_params(IMB_JOB *job, struct ipsec_mb_qp *qp,
+ 
+ 	switch (job->hash_alg) {
+ 	case IMB_AUTH_AES_CCM:
+-		job->cipher_start_src_offset_in_bytes =
+-				op->sym->aead.data.offset;
+-		job->msg_len_to_cipher_in_bytes = op->sym->aead.data.length;
+ 		job->hash_start_src_offset_in_bytes = op->sym->aead.data.offset;
+ 		job->msg_len_to_hash_in_bytes = op->sym->aead.data.length;
+ 
+@@ -1276,21 +1280,13 @@ set_mb_job_params(IMB_JOB *job, struct ipsec_mb_qp *qp,
+ 
+ 	case IMB_AUTH_AES_GMAC:
+ 		if (session->cipher.mode == IMB_CIPHER_GCM) {
+-			job->cipher_start_src_offset_in_bytes =
+-					op->sym->aead.data.offset;
+ 			job->hash_start_src_offset_in_bytes =
+ 					op->sym->aead.data.offset;
+-			job->msg_len_to_cipher_in_bytes =
+-					op->sym->aead.data.length;
+ 			job->msg_len_to_hash_in_bytes =
+ 					op->sym->aead.data.length;
+-		} else {
+-			job->cipher_start_src_offset_in_bytes =
+-					op->sym->auth.data.offset;
+-			job->hash_start_src_offset_in_bytes =
+-					op->sym->auth.data.offset;
+-			job->msg_len_to_cipher_in_bytes = 0;
++		} else { /* AES-GMAC only, only AAD used */
+ 			job->msg_len_to_hash_in_bytes = 0;
++			job->hash_start_src_offset_in_bytes = 0;
+ 		}
+ 
+ 		job->iv = rte_crypto_op_ctod_offset(op, uint8_t *,
+@@ -1298,36 +1294,100 @@ set_mb_job_params(IMB_JOB *job, struct ipsec_mb_qp *qp,
+ 		break;
+ 
+ 	case IMB_AUTH_CHACHA20_POLY1305:
+-		job->cipher_start_src_offset_in_bytes =
+-			op->sym->aead.data.offset;
+ 		job->hash_start_src_offset_in_bytes =
+ 			op->sym->aead.data.offset;
+-		job->msg_len_to_cipher_in_bytes =
+-				op->sym->aead.data.length;
+ 		job->msg_len_to_hash_in_bytes =
+ 					op->sym->aead.data.length;
+ 
+ 		job->iv = rte_crypto_op_ctod_offset(op, uint8_t *,
+ 				session->iv.offset);
+ 		break;
+-	default:
+-		/* For SNOW3G, length and offsets are already in bits */
+-		job->cipher_start_src_offset_in_bytes =
+-				op->sym->cipher.data.offset;
+-		job->msg_len_to_cipher_in_bytes = op->sym->cipher.data.length;
++	/* ZUC and SNOW3G require length in bits and offset in bytes */
++	case IMB_AUTH_ZUC_EIA3_BITLEN:
++	case IMB_AUTH_ZUC256_EIA3_BITLEN:
++	case IMB_AUTH_SNOW3G_UIA2_BITLEN:
++		auth_off_in_bytes = op->sym->auth.data.offset >> 3;
++		ciph_off_in_bytes = op->sym->cipher.data.offset >> 3;
++		auth_len_in_bytes = op->sym->auth.data.length >> 3;
++		ciph_len_in_bytes = op->sym->cipher.data.length >> 3;
++
++		job->hash_start_src_offset_in_bytes = auth_start_offset(op,
++				session, oop, auth_off_in_bytes,
++				ciph_off_in_bytes, auth_len_in_bytes,
++				ciph_len_in_bytes);
++		job->msg_len_to_hash_in_bits = op->sym->auth.data.length;
++
++		job->iv = rte_crypto_op_ctod_offset(op, uint8_t *,
++			session->iv.offset);
++		break;
++
++	/* KASUMI requires lengths and offset in bytes */
++	case IMB_AUTH_KASUMI_UIA1:
++		auth_off_in_bytes = op->sym->auth.data.offset >> 3;
++		ciph_off_in_bytes = op->sym->cipher.data.offset >> 3;
++		auth_len_in_bytes = op->sym->auth.data.length >> 3;
++		ciph_len_in_bytes = op->sym->cipher.data.length >> 3;
+ 
+ 		job->hash_start_src_offset_in_bytes = auth_start_offset(op,
+-				session, oop);
++				session, oop, auth_off_in_bytes,
++				ciph_off_in_bytes, auth_len_in_bytes,
++				ciph_len_in_bytes);
++		job->msg_len_to_hash_in_bytes = auth_len_in_bytes;
++
++		job->iv = rte_crypto_op_ctod_offset(op, uint8_t *,
++			session->iv.offset);
++		break;
++
++	default:
++		job->hash_start_src_offset_in_bytes = auth_start_offset(op,
++				session, oop, op->sym->auth.data.offset,
++				op->sym->cipher.data.offset,
++				op->sym->auth.data.length,
++				op->sym->cipher.data.length);
+ 		job->msg_len_to_hash_in_bytes = op->sym->auth.data.length;
+ 
+ 		job->iv = rte_crypto_op_ctod_offset(op, uint8_t *,
+ 			session->iv.offset);
+ 	}
+ 
+-	if (job->cipher_mode == IMB_CIPHER_ZUC_EEA3)
+-		job->msg_len_to_cipher_in_bytes >>= 3;
+-	else if (job->hash_alg == IMB_AUTH_KASUMI_UIA1)
+-		job->msg_len_to_hash_in_bytes >>= 3;
++	switch (job->cipher_mode) {
++	/* ZUC requires length and offset in bytes */
++	case IMB_CIPHER_ZUC_EEA3:
++		job->cipher_start_src_offset_in_bytes =
++					op->sym->cipher.data.offset >> 3;
++		job->msg_len_to_cipher_in_bytes =
++					op->sym->cipher.data.length >> 3;
++		break;
++	/* ZUC and SNOW3G require length and offset in bits */
++	case IMB_CIPHER_SNOW3G_UEA2_BITLEN:
++	case IMB_CIPHER_KASUMI_UEA1_BITLEN:
++		job->cipher_start_src_offset_in_bits =
++					op->sym->cipher.data.offset;
++		job->msg_len_to_cipher_in_bits =
++					op->sym->cipher.data.length;
++		break;
++	case IMB_CIPHER_GCM:
++		if (session->cipher.mode == IMB_CIPHER_NULL) {
++			/* AES-GMAC only (only AAD used) */
++			job->msg_len_to_cipher_in_bytes = 0;
++			job->cipher_start_src_offset_in_bytes = 0;
++		} else {
++			job->cipher_start_src_offset_in_bytes =
++					op->sym->aead.data.offset;
++			job->msg_len_to_cipher_in_bytes = op->sym->aead.data.length;
++		}
++		break;
++	case IMB_CIPHER_CCM:
++	case IMB_CIPHER_CHACHA20_POLY1305:
++		job->cipher_start_src_offset_in_bytes =
++				op->sym->aead.data.offset;
++		job->msg_len_to_cipher_in_bytes = op->sym->aead.data.length;
++		break;
++	default:
++		job->cipher_start_src_offset_in_bytes =
++					op->sym->cipher.data.offset;
++		job->msg_len_to_cipher_in_bytes = op->sym->cipher.data.length;
++	}
+ 
+ 	/* Set user data to be crypto operation data struct */
+ 	job->user_data = op;
+diff --git a/dpdk/drivers/crypto/ipsec_mb/pmd_aesni_mb_priv.h b/dpdk/drivers/crypto/ipsec_mb/pmd_aesni_mb_priv.h
+index d37cc787a0..d177961ea5 100644
+--- a/dpdk/drivers/crypto/ipsec_mb/pmd_aesni_mb_priv.h
++++ b/dpdk/drivers/crypto/ipsec_mb/pmd_aesni_mb_priv.h
+@@ -848,7 +848,7 @@ struct aesni_mb_session {
+ 
+ 			struct gcm_key_data gcm_key;
+ 			/* *< Expanded GCM key */
+-			uint8_t zuc_cipher_key[16];
++			uint8_t zuc_cipher_key[32];
+ 			/* *< ZUC cipher key */
+ 			snow3g_key_schedule_t pKeySched_snow3g_cipher;
+ 			/* *< SNOW3G scheduled cipher key */
+@@ -893,7 +893,7 @@ struct aesni_mb_session {
+ 				/* *< k3. */
+ 			} cmac;
+ 			/* *< Expanded XCBC authentication keys */
+-			uint8_t zuc_auth_key[16];
++			uint8_t zuc_auth_key[32];
+ 			/* *< ZUC authentication key */
+ 			snow3g_key_schedule_t pKeySched_snow3g_auth;
+ 			/* *< SNOW3G scheduled authentication key */
+diff --git a/dpdk/drivers/crypto/ipsec_mb/pmd_snow3g.c b/dpdk/drivers/crypto/ipsec_mb/pmd_snow3g.c
+index ebc9a0b562..9a85f46721 100644
+--- a/dpdk/drivers/crypto/ipsec_mb/pmd_snow3g.c
++++ b/dpdk/drivers/crypto/ipsec_mb/pmd_snow3g.c
+@@ -422,12 +422,13 @@ process_op_bit(struct rte_crypto_op *op, struct snow3g_session *session,
+ 		op->sym->session = NULL;
+ 	}
+ 
+-	enqueued_op = rte_ring_enqueue_burst(qp->ingress_queue,
+-			(void **)&op, processed_op, NULL);
++	if (unlikely(processed_op != 1))
++		return 0;
++	enqueued_op = rte_ring_enqueue(qp->ingress_queue, op);
+ 	qp->stats.enqueued_count += enqueued_op;
+ 	*accumulated_enqueued_ops += enqueued_op;
+ 
+-	return enqueued_op;
++	return 1;
+ }
+ 
+ static uint16_t
+diff --git a/dpdk/drivers/crypto/ipsec_mb/pmd_zuc.c b/dpdk/drivers/crypto/ipsec_mb/pmd_zuc.c
+index 2eae1d1ec7..e36c7092d6 100644
+--- a/dpdk/drivers/crypto/ipsec_mb/pmd_zuc.c
++++ b/dpdk/drivers/crypto/ipsec_mb/pmd_zuc.c
+@@ -166,7 +166,7 @@ process_zuc_hash_op(struct ipsec_mb_qp *qp, struct rte_crypto_op **ops,
+ 
+ 		hash_keys[i] = sess->pKey_hash;
+ 		if (sess->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY)
+-			dst[i] = (uint32_t *)qp_data->temp_digest;
++			dst[i] = (uint32_t *)qp_data->temp_digest[i];
+ 		else
+ 			dst[i] = (uint32_t *)ops[i]->sym->auth.digest.data;
+ 
+@@ -198,7 +198,7 @@ process_ops(struct rte_crypto_op **ops, enum ipsec_mb_operation op_type,
+ 		struct ipsec_mb_qp *qp, uint8_t num_ops)
+ {
+ 	unsigned int i;
+-	unsigned int processed_ops;
++	unsigned int processed_ops = 0;
+ 
+ 	switch (op_type) {
+ 	case IPSEC_MB_OP_ENCRYPT_ONLY:
+@@ -212,18 +212,21 @@ process_ops(struct rte_crypto_op **ops, enum ipsec_mb_operation op_type,
+ 				num_ops);
+ 		break;
+ 	case IPSEC_MB_OP_ENCRYPT_THEN_HASH_GEN:
++	case IPSEC_MB_OP_DECRYPT_THEN_HASH_VERIFY:
+ 		processed_ops = process_zuc_cipher_op(qp, ops, sessions,
+ 				num_ops);
+ 		process_zuc_hash_op(qp, ops, sessions, processed_ops);
+ 		break;
+ 	case IPSEC_MB_OP_HASH_VERIFY_THEN_DECRYPT:
++	case IPSEC_MB_OP_HASH_GEN_THEN_ENCRYPT:
+ 		processed_ops = process_zuc_hash_op(qp, ops, sessions,
+ 				num_ops);
+ 		process_zuc_cipher_op(qp, ops, sessions, processed_ops);
+ 		break;
+ 	default:
+ 		/* Operation not supported. */
+-		processed_ops = 0;
++		for (i = 0; i < num_ops; i++)
++			ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+ 	}
+ 
+ 	for (i = 0; i < num_ops; i++) {
+@@ -256,6 +259,7 @@ zuc_pmd_dequeue_burst(void *queue_pair,
+ 
+ 	struct zuc_session *curr_sess;
+ 	struct zuc_session *sessions[ZUC_MAX_BURST];
++	struct rte_crypto_op *int_c_ops[ZUC_MAX_BURST];
+ 	enum ipsec_mb_operation prev_zuc_op = IPSEC_MB_OP_NOT_SUPPORTED;
+ 	enum ipsec_mb_operation curr_zuc_op;
+ 	struct ipsec_mb_qp *qp = queue_pair;
+@@ -287,11 +291,11 @@ zuc_pmd_dequeue_burst(void *queue_pair,
+ 		 */
+ 		if (burst_size == 0) {
+ 			prev_zuc_op = curr_zuc_op;
+-			c_ops[0] = curr_c_op;
++			int_c_ops[0] = curr_c_op;
+ 			sessions[0] = curr_sess;
+ 			burst_size++;
+ 		} else if (curr_zuc_op == prev_zuc_op) {
+-			c_ops[burst_size] = curr_c_op;
++			int_c_ops[burst_size] = curr_c_op;
+ 			sessions[burst_size] = curr_sess;
+ 			burst_size++;
+ 			/*
+@@ -299,7 +303,7 @@ zuc_pmd_dequeue_burst(void *queue_pair,
+ 			 * process them, and start a new batch.
+ 			 */
+ 			if (burst_size == ZUC_MAX_BURST) {
+-				processed_ops = process_ops(c_ops, curr_zuc_op,
++				processed_ops = process_ops(int_c_ops, curr_zuc_op,
+ 						sessions, qp, burst_size);
+ 				if (processed_ops < burst_size) {
+ 					burst_size = 0;
+@@ -313,7 +317,7 @@ zuc_pmd_dequeue_burst(void *queue_pair,
+ 			 * Different operation type, process the ops
+ 			 * of the previous type.
+ 			 */
+-			processed_ops = process_ops(c_ops, prev_zuc_op,
++			processed_ops = process_ops(int_c_ops, prev_zuc_op,
+ 					sessions, qp, burst_size);
+ 			if (processed_ops < burst_size) {
+ 				burst_size = 0;
+@@ -323,7 +327,7 @@ zuc_pmd_dequeue_burst(void *queue_pair,
+ 			burst_size = 0;
+ 			prev_zuc_op = curr_zuc_op;
+ 
+-			c_ops[0] = curr_c_op;
++			int_c_ops[0] = curr_c_op;
+ 			sessions[0] = curr_sess;
+ 			burst_size++;
+ 		}
+@@ -331,7 +335,7 @@ zuc_pmd_dequeue_burst(void *queue_pair,
+ 
+ 	if (burst_size != 0) {
+ 		/* Process the crypto ops of the last operation type. */
+-		processed_ops = process_ops(c_ops, prev_zuc_op,
++		processed_ops = process_ops(int_c_ops, prev_zuc_op,
+ 				sessions, qp, burst_size);
+ 	}
+ 
+diff --git a/dpdk/drivers/crypto/ipsec_mb/pmd_zuc_priv.h b/dpdk/drivers/crypto/ipsec_mb/pmd_zuc_priv.h
+index 46d5bfae37..76fd6758c2 100644
+--- a/dpdk/drivers/crypto/ipsec_mb/pmd_zuc_priv.h
++++ b/dpdk/drivers/crypto/ipsec_mb/pmd_zuc_priv.h
+@@ -75,7 +75,7 @@ struct zuc_session {
+ 
+ struct zuc_qp_data {
+ 
+-	uint8_t temp_digest[ZUC_DIGEST_LENGTH];
++	uint8_t temp_digest[ZUC_MAX_BURST][ZUC_DIGEST_LENGTH];
+ 	/* *< Buffers used to store the digest generated
+ 	 * by the driver when verifying a digest provided
+ 	 * by the user (using authentication verify operation)
+diff --git a/dpdk/drivers/crypto/mlx5/mlx5_crypto.c b/dpdk/drivers/crypto/mlx5/mlx5_crypto.c
+index 421c23748a..36db31aae5 100644
+--- a/dpdk/drivers/crypto/mlx5/mlx5_crypto.c
++++ b/dpdk/drivers/crypto/mlx5/mlx5_crypto.c
+@@ -952,6 +952,7 @@ mlx5_crypto_dev_probe(struct mlx5_common_device *cdev)
+ 	ret = mlx5_crypto_configure_wqe_size(priv,
+ 		cdev->config.hca_attr.max_wqe_sz_sq, devarg_prms.max_segs_num);
+ 	if (ret) {
++		claim_zero(mlx5_devx_cmd_destroy(priv->login_obj));
+ 		mlx5_devx_uar_release(&priv->uar);
+ 		rte_cryptodev_pmd_destroy(priv->crypto_dev);
+ 		return -1;
+diff --git a/dpdk/drivers/crypto/octeontx/otx_cryptodev_hw_access.c b/dpdk/drivers/crypto/octeontx/otx_cryptodev_hw_access.c
+index 20b288334a..27604459e4 100644
+--- a/dpdk/drivers/crypto/octeontx/otx_cryptodev_hw_access.c
++++ b/dpdk/drivers/crypto/octeontx/otx_cryptodev_hw_access.c
+@@ -296,7 +296,7 @@ cpt_vq_init(struct cpt_vf *cptvf, uint8_t group)
+ 	/* CPT VF device initialization */
+ 	otx_cpt_vfvq_init(cptvf);
+ 
+-	/* Send msg to PF to assign currnet Q to required group */
++	/* Send msg to PF to assign current Q to required group */
+ 	cptvf->vfgrp = group;
+ 	err = otx_cpt_send_vf_grp_msg(cptvf, group);
+ 	if (err) {
+diff --git a/dpdk/drivers/crypto/octeontx/otx_cryptodev_mbox.h b/dpdk/drivers/crypto/octeontx/otx_cryptodev_mbox.h
+index 508f3afd47..c1eedc1b9e 100644
+--- a/dpdk/drivers/crypto/octeontx/otx_cryptodev_mbox.h
++++ b/dpdk/drivers/crypto/octeontx/otx_cryptodev_mbox.h
+@@ -70,7 +70,7 @@ void
+ otx_cpt_handle_mbox_intr(struct cpt_vf *cptvf);
+ 
+ /*
+- * Checks if VF is able to comminicate with PF
++ * Checks if VF is able to communicate with PF
+  * and also gets the CPT number this VF is associated to.
+  */
+ int
+diff --git a/dpdk/drivers/crypto/octeontx/otx_cryptodev_ops.c b/dpdk/drivers/crypto/octeontx/otx_cryptodev_ops.c
+index 9e8fd495cf..f7ca8a8a8e 100644
+--- a/dpdk/drivers/crypto/octeontx/otx_cryptodev_ops.c
++++ b/dpdk/drivers/crypto/octeontx/otx_cryptodev_ops.c
+@@ -558,7 +558,7 @@ otx_cpt_enq_single_sym(struct cpt_instance *instance,
+ 					 &mdata, (void **)&prep_req);
+ 
+ 	if (unlikely(ret)) {
+-		CPT_LOG_DP_ERR("prep cryto req : op %p, cpt_op 0x%x "
++		CPT_LOG_DP_ERR("prep crypto req : op %p, cpt_op 0x%x "
+ 			       "ret 0x%x", op, (unsigned int)cpt_op, ret);
+ 		return NULL;
+ 	}
+diff --git a/dpdk/drivers/crypto/openssl/rte_openssl_pmd.c b/dpdk/drivers/crypto/openssl/rte_openssl_pmd.c
+index 5794ed8159..5977bc746c 100644
+--- a/dpdk/drivers/crypto/openssl/rte_openssl_pmd.c
++++ b/dpdk/drivers/crypto/openssl/rte_openssl_pmd.c
+@@ -2,6 +2,8 @@
+  * Copyright(c) 2016-2017 Intel Corporation
+  */
+ 
++#define OPENSSL_API_COMPAT 0x10100000L
++
+ #include <rte_common.h>
+ #include <rte_hexdump.h>
+ #include <rte_cryptodev.h>
+diff --git a/dpdk/drivers/crypto/openssl/rte_openssl_pmd_ops.c b/dpdk/drivers/crypto/openssl/rte_openssl_pmd_ops.c
+index 52715f86f8..35c4ad13ba 100644
+--- a/dpdk/drivers/crypto/openssl/rte_openssl_pmd_ops.c
++++ b/dpdk/drivers/crypto/openssl/rte_openssl_pmd_ops.c
+@@ -2,6 +2,8 @@
+  * Copyright(c) 2016-2017 Intel Corporation
+  */
+ 
++#define OPENSSL_API_COMPAT 0x10100000L
++
+ #include <string.h>
+ 
+ #include <rte_common.h>
+diff --git a/dpdk/drivers/crypto/qat/qat_asym.c b/dpdk/drivers/crypto/qat/qat_asym.c
+index f893508030..bd0bf5f0cb 100644
+--- a/dpdk/drivers/crypto/qat/qat_asym.c
++++ b/dpdk/drivers/crypto/qat/qat_asym.c
+@@ -97,7 +97,7 @@ static void qat_clear_arrays_by_alg(struct qat_asym_op_cookie *cookie,
+ 		qat_clear_arrays(cookie, QAT_ASYM_MODINV_NUM_IN_PARAMS,
+ 				QAT_ASYM_MODINV_NUM_OUT_PARAMS, alg_size);
+ 	else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_RSA) {
+-		if (xform->rsa.key_type == RTE_RSA_KET_TYPE_QT)
++		if (xform->rsa.key_type == RTE_RSA_KEY_TYPE_QT)
+ 			qat_clear_arrays_crt(cookie, alg_size);
+ 		else {
+ 			qat_clear_arrays(cookie, QAT_ASYM_RSA_NUM_IN_PARAMS,
+@@ -109,7 +109,7 @@ static void qat_clear_arrays_by_alg(struct qat_asym_op_cookie *cookie,
+ static int qat_asym_check_nonzero(rte_crypto_param n)
+ {
+ 	if (n.length < 8) {
+-		/* Not a case for any cryptograpic function except for DH
++		/* Not a case for any cryptographic function except for DH
+ 		 * generator which very often can be of one byte length
+ 		 */
+ 		size_t i;
+@@ -370,7 +370,7 @@ qat_asym_fill_arrays(struct rte_crypto_asym_op *asym_op,
+ 					return -(EINVAL);
+ 				}
+ 			}
+-			if (xform->rsa.key_type == RTE_RSA_KET_TYPE_QT) {
++			if (xform->rsa.key_type == RTE_RSA_KEY_TYPE_QT) {
+ 
+ 				qat_req->input_param_count =
+ 						QAT_ASYM_RSA_QT_NUM_IN_PARAMS;
+diff --git a/dpdk/drivers/crypto/qat/qat_sym.c b/dpdk/drivers/crypto/qat/qat_sym.c
+index 93b257522b..0dd83ee2ee 100644
+--- a/dpdk/drivers/crypto/qat/qat_sym.c
++++ b/dpdk/drivers/crypto/qat/qat_sym.c
+@@ -2,6 +2,8 @@
+  * Copyright(c) 2015-2019 Intel Corporation
+  */
+ 
++#define OPENSSL_API_COMPAT 0x10100000L
++
+ #include <openssl/evp.h>
+ 
+ #include <rte_mempool.h>
+@@ -419,7 +421,7 @@ qat_sym_build_request(void *in_op, uint8_t *out_msg,
+ 				ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC) {
+ 
+ 			/* In case of AES-CCM this may point to user selected
+-			 * memory or iv offset in cypto_op
++			 * memory or iv offset in crypto_op
+ 			 */
+ 			uint8_t *aad_data = op->sym->aead.aad.data;
+ 			/* This is true AAD length, it not includes 18 bytes of
+diff --git a/dpdk/drivers/crypto/qat/qat_sym_hw_dp.c b/dpdk/drivers/crypto/qat/qat_sym_hw_dp.c
+index 12825e448b..792ad2b213 100644
+--- a/dpdk/drivers/crypto/qat/qat_sym_hw_dp.c
++++ b/dpdk/drivers/crypto/qat/qat_sym_hw_dp.c
+@@ -533,8 +533,20 @@ enqueue_one_aead_job(struct qat_sym_session *ctx,
+ 	/* CPM 1.7 uses single pass to treat AEAD as cipher operation */
+ 	if (ctx->is_single_pass) {
+ 		enqueue_one_cipher_job(ctx, req, iv, ofs, data_len);
+-		cipher_param->spc_aad_addr = aad->iova;
+-		cipher_param->spc_auth_res_addr = digest->iova;
++
++		if (ctx->is_ucs) {
++			/* QAT GEN4 uses single pass to treat AEAD as cipher
++			 * operation
++			 */
++			struct icp_qat_fw_la_cipher_20_req_params *cipher_param_20 =
++				(void *)&req->serv_specif_rqpars;
++			cipher_param_20->spc_aad_addr = aad->iova;
++			cipher_param_20->spc_auth_res_addr = digest->iova;
++		} else {
++			cipher_param->spc_aad_addr = aad->iova;
++			cipher_param->spc_auth_res_addr = digest->iova;
++		}
++
+ 		return;
+ 	}
+ 
+diff --git a/dpdk/drivers/crypto/qat/qat_sym_session.c b/dpdk/drivers/crypto/qat/qat_sym_session.c
+index 8ca475ca8b..80d6fbfa46 100644
+--- a/dpdk/drivers/crypto/qat/qat_sym_session.c
++++ b/dpdk/drivers/crypto/qat/qat_sym_session.c
+@@ -2,6 +2,8 @@
+  * Copyright(c) 2015-2019 Intel Corporation
+  */
+ 
++#define OPENSSL_API_COMPAT 0x10100000L
++
+ #include <openssl/sha.h>	/* Needed to calculate pre-compute values */
+ #include <openssl/aes.h>	/* Needed to calculate pre-compute values */
+ #include <openssl/md5.h>	/* Needed to calculate pre-compute values */
+@@ -124,8 +126,10 @@ bpi_cipher_ctx_init(enum rte_crypto_cipher_algorithm cryptodev_algo,
+ 	return 0;
+ 
+ ctx_init_err:
+-	if (*ctx != NULL)
++	if (*ctx != NULL) {
+ 		EVP_CIPHER_CTX_free(*ctx);
++		*ctx = NULL;
++	}
+ 	return ret;
+ }
+ 
+diff --git a/dpdk/drivers/crypto/scheduler/scheduler_failover.c b/dpdk/drivers/crypto/scheduler/scheduler_failover.c
+index 5023577ef8..2a0e29fa72 100644
+--- a/dpdk/drivers/crypto/scheduler/scheduler_failover.c
++++ b/dpdk/drivers/crypto/scheduler/scheduler_failover.c
+@@ -157,6 +157,9 @@ scheduler_start(struct rte_cryptodev *dev)
+ 			((struct scheduler_qp_ctx *)
+ 				dev->data->queue_pairs[i])->private_qp_ctx;
+ 
++		sched_ctx->workers[PRIMARY_WORKER_IDX].qp_id = i;
++		sched_ctx->workers[SECONDARY_WORKER_IDX].qp_id = i;
++
+ 		rte_memcpy(&qp_ctx->primary_worker,
+ 				&sched_ctx->workers[PRIMARY_WORKER_IDX],
+ 				sizeof(struct scheduler_worker));
+diff --git a/dpdk/drivers/crypto/virtio/virtio_rxtx.c b/dpdk/drivers/crypto/virtio/virtio_rxtx.c
+index a65524a306..08359b3a39 100644
+--- a/dpdk/drivers/crypto/virtio/virtio_rxtx.c
++++ b/dpdk/drivers/crypto/virtio/virtio_rxtx.c
+@@ -264,6 +264,9 @@ virtqueue_crypto_sym_enqueue_xmit(
+ 		if (cop->phys_addr)
+ 			desc[idx].addr = cop->phys_addr + session->iv.offset;
+ 		else {
++			if (session->iv.length > VIRTIO_CRYPTO_MAX_IV_SIZE)
++				return -ENOMEM;
++
+ 			rte_memcpy(crypto_op_cookie->iv,
+ 					rte_crypto_op_ctod_offset(cop,
+ 					uint8_t *, session->iv.offset),
+diff --git a/dpdk/drivers/crypto/virtio/virtqueue.h b/dpdk/drivers/crypto/virtio/virtqueue.h
+index bf10c6579b..c96ca62992 100644
+--- a/dpdk/drivers/crypto/virtio/virtqueue.h
++++ b/dpdk/drivers/crypto/virtio/virtqueue.h
+@@ -145,7 +145,7 @@ virtqueue_notify(struct virtqueue *vq)
+ {
+ 	/*
+ 	 * Ensure updated avail->idx is visible to host.
+-	 * For virtio on IA, the notificaiton is through io port operation
++	 * For virtio on IA, the notification is through io port operation
+ 	 * which is a serialization instruction itself.
+ 	 */
+ 	VTPCI_OPS(vq->hw)->notify_queue(vq->hw, vq);
+diff --git a/dpdk/drivers/dma/cnxk/meson.build b/dpdk/drivers/dma/cnxk/meson.build
+index 633e92a20d..d4be4ee860 100644
+--- a/dpdk/drivers/dma/cnxk/meson.build
++++ b/dpdk/drivers/dma/cnxk/meson.build
+@@ -3,4 +3,3 @@
+ 
+ deps += ['bus_pci', 'common_cnxk', 'dmadev']
+ sources = files('cnxk_dmadev.c')
+-headers = files('cnxk_dmadev.h')
+diff --git a/dpdk/drivers/dma/hisilicon/hisi_dmadev.c b/dpdk/drivers/dma/hisilicon/hisi_dmadev.c
+index 05066b4d0e..ec687ef67e 100644
+--- a/dpdk/drivers/dma/hisilicon/hisi_dmadev.c
++++ b/dpdk/drivers/dma/hisilicon/hisi_dmadev.c
+@@ -581,7 +581,7 @@ hisi_dma_scan_cq(struct hisi_dma_dev *hw)
+ 	uint16_t count = 0;
+ 	uint64_t misc;
+ 
+-	while (true) {
++	while (count < hw->cq_depth) {
+ 		cqe = &hw->cqe[cq_head];
+ 		misc = cqe->misc;
+ 		misc = rte_le_to_cpu_64(misc);
+@@ -589,6 +589,16 @@ hisi_dma_scan_cq(struct hisi_dma_dev *hw)
+ 			break;
+ 
+ 		csq_head = FIELD_GET(CQE_SQ_HEAD_MASK, misc);
++		if (unlikely(csq_head > hw->sq_depth_mask)) {
++			/**
++			 * Defensive programming to prevent overflow of the
++			 * status array indexed by csq_head. Only error logs
++			 * are used for prompting.
++			 */
++			HISI_DMA_ERR(hw, "invalid csq_head:%u!\n", csq_head);
++			count = 0;
++			break;
++		}
+ 		if (unlikely(misc & CQE_STATUS_MASK))
+ 			hw->status[csq_head] = FIELD_GET(CQE_STATUS_MASK,
+ 							 misc);
+@@ -649,12 +659,12 @@ hisi_dma_completed(void *dev_private,
+ 		}
+ 		sq_head = (sq_head + 1) & hw->sq_depth_mask;
+ 	}
++	*last_idx = hw->cridx + i - 1;
+ 	if (i > 0) {
+ 		hw->cridx += i;
+-		*last_idx = hw->cridx - 1;
+ 		hw->sq_head = sq_head;
++		hw->completed += i;
+ 	}
+-	hw->completed += i;
+ 
+ 	return i;
+ }
+@@ -708,12 +718,12 @@ hisi_dma_completed_status(void *dev_private,
+ 		hw->status[sq_head] = HISI_DMA_STATUS_SUCCESS;
+ 		sq_head = (sq_head + 1) & hw->sq_depth_mask;
+ 	}
++	*last_idx = hw->cridx + cpl_num - 1;
+ 	if (likely(cpl_num > 0)) {
+ 		hw->cridx += cpl_num;
+-		*last_idx = hw->cridx - 1;
+ 		hw->sq_head = sq_head;
++		hw->completed += cpl_num;
+ 	}
+-	hw->completed += cpl_num;
+ 
+ 	return cpl_num;
+ }
+@@ -731,24 +741,15 @@ hisi_dma_burst_capacity(const void *dev_private, uint16_t vchan)
+ 				      sq_head - 1 - sq_tail;
+ }
+ 
+-static void
+-hisi_dma_gen_pci_device_name(const struct rte_pci_device *pci_dev,
+-			     char *name, size_t size)
+-{
+-	memset(name, 0, size);
+-	(void)snprintf(name, size, "%x:%x.%x",
+-		 pci_dev->addr.bus, pci_dev->addr.devid,
+-		 pci_dev->addr.function);
+-}
+-
+ static void
+ hisi_dma_gen_dev_name(const struct rte_pci_device *pci_dev,
+-		      uint8_t queue_id, char *name, size_t size)
++		      uint8_t queue_id, char *dev_name, size_t size)
+ {
+-	memset(name, 0, size);
+-	(void)snprintf(name, size, "%x:%x.%x-ch%u",
+-		 pci_dev->addr.bus, pci_dev->addr.devid,
+-		 pci_dev->addr.function, queue_id);
++	char name[RTE_DEV_NAME_MAX_LEN] = { 0 };
++
++	memset(dev_name, 0, size);
++	rte_pci_device_name(&pci_dev->addr, name, sizeof(name));
++	(void)snprintf(dev_name, size, "%s-ch%u", name, queue_id);
+ }
+ 
+ /**
+@@ -864,7 +865,7 @@ hisi_dma_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ 	uint8_t i;
+ 	int ret;
+ 
+-	hisi_dma_gen_pci_device_name(pci_dev, name, sizeof(name));
++	rte_pci_device_name(&pci_dev->addr, name, sizeof(name));
+ 
+ 	if (pci_dev->mem_resource[2].addr == NULL) {
+ 		HISI_DMA_LOG(ERR, "%s BAR2 is NULL!\n", name);
+diff --git a/dpdk/drivers/dma/hisilicon/hisi_dmadev.h b/dpdk/drivers/dma/hisilicon/hisi_dmadev.h
+index 12e209c86e..f06c851825 100644
+--- a/dpdk/drivers/dma/hisilicon/hisi_dmadev.h
++++ b/dpdk/drivers/dma/hisilicon/hisi_dmadev.h
+@@ -7,6 +7,8 @@
+ 
+ #include <rte_byteorder.h>
+ #include <rte_common.h>
++#include <rte_memzone.h>
++#include <rte_dmadev_pmd.h>
+ 
+ #define BIT(x)	(1ul << (x))
+ #define BITS_PER_LONG	(__SIZEOF_LONG__ * 8)
+diff --git a/dpdk/drivers/dma/idxd/dpdk_idxd_cfg.py b/dpdk/drivers/dma/idxd/dpdk_idxd_cfg.py
+index fcc27822ef..3f5d5ee752 100755
+--- a/dpdk/drivers/dma/idxd/dpdk_idxd_cfg.py
++++ b/dpdk/drivers/dma/idxd/dpdk_idxd_cfg.py
+@@ -29,9 +29,17 @@ def write_values(self, values):
+                 f.write(str(contents))
+ 
+ 
++def get_drv_dir(dtype):
++    "Get the sysfs path for the driver, either 'idxd' or 'user'"
++    drv_dir = "/sys/bus/dsa/drivers/" + dtype
++    if not os.path.exists(drv_dir):
++        return "/sys/bus/dsa/drivers/dsa"
++    return drv_dir
++
++
+ def reset_device(dsa_id):
+     "Reset the DSA device and all its queues"
+-    drv_dir = SysfsDir("/sys/bus/dsa/drivers/dsa")
++    drv_dir = SysfsDir(get_drv_dir("idxd"))
+     drv_dir.write_values({"unbind": f"dsa{dsa_id}"})
+ 
+ 
+@@ -58,7 +66,6 @@ def get_dsa_id(pci):
+ def configure_dsa(dsa_id, queues, prefix):
+     "Configure the DSA instance with appropriate number of queues"
+     dsa_dir = SysfsDir(f"/sys/bus/dsa/devices/dsa{dsa_id}")
+-    drv_dir = SysfsDir("/sys/bus/dsa/drivers/dsa")
+ 
+     max_groups = dsa_dir.read_int("max_groups")
+     max_engines = dsa_dir.read_int("max_engines")
+@@ -82,12 +89,16 @@ def configure_dsa(dsa_id, queues, prefix):
+                              "mode": "dedicated",
+                              "name": f"{prefix}_wq{dsa_id}.{q}",
+                              "priority": 1,
++                             "max_batch_size": 1024,
+                              "size": int(max_work_queues_size / nb_queues)})
+ 
+     # enable device and then queues
+-    drv_dir.write_values({"bind": f"dsa{dsa_id}"})
++    idxd_dir = SysfsDir(get_drv_dir("idxd"))
++    idxd_dir.write_values({"bind": f"dsa{dsa_id}"})
++
++    user_dir = SysfsDir(get_drv_dir("user"))
+     for q in range(nb_queues):
+-        drv_dir.write_values({"bind": f"wq{dsa_id}.{q}"})
++        user_dir.write_values({"bind": f"wq{dsa_id}.{q}"})
+ 
+ 
+ def main(args):
+diff --git a/dpdk/drivers/dma/idxd/idxd_common.c b/dpdk/drivers/dma/idxd/idxd_common.c
+index fc11b11337..c77200a457 100644
+--- a/dpdk/drivers/dma/idxd/idxd_common.c
++++ b/dpdk/drivers/dma/idxd/idxd_common.c
+@@ -13,12 +13,23 @@
+ 
+ #define IDXD_PMD_NAME_STR "dmadev_idxd"
+ 
++/* systems with DSA all support AVX2 so allow our data-path functions to
++ * always use at least that instruction set
++ */
++#ifndef __AVX2__
++#define __use_avx2 __attribute__((target("avx2")))
++#else
++#define __use_avx2
++#endif
++
++__use_avx2
+ static __rte_always_inline rte_iova_t
+ __desc_idx_to_iova(struct idxd_dmadev *idxd, uint16_t n)
+ {
+ 	return idxd->desc_iova + (n * sizeof(struct idxd_hw_desc));
+ }
+ 
++__use_avx2
+ static __rte_always_inline void
+ __idxd_movdir64b(volatile void *dst, const struct idxd_hw_desc *src)
+ {
+@@ -28,6 +39,7 @@ __idxd_movdir64b(volatile void *dst, const struct idxd_hw_desc *src)
+ 			: "memory");
+ }
+ 
++__use_avx2
+ static __rte_always_inline void
+ __submit(struct idxd_dmadev *idxd)
+ {
+@@ -74,6 +86,7 @@ __submit(struct idxd_dmadev *idxd)
+ 			_mm256_setzero_si256());
+ }
+ 
++__use_avx2
+ static __rte_always_inline int
+ __idxd_write_desc(struct idxd_dmadev *idxd,
+ 		const uint32_t op_flags,
+@@ -112,6 +125,7 @@ __idxd_write_desc(struct idxd_dmadev *idxd,
+ 	return job_id;
+ }
+ 
++__use_avx2
+ int
+ idxd_enqueue_copy(void *dev_private, uint16_t qid __rte_unused, rte_iova_t src,
+ 		rte_iova_t dst, unsigned int length, uint64_t flags)
+@@ -126,6 +140,7 @@ idxd_enqueue_copy(void *dev_private, uint16_t qid __rte_unused, rte_iova_t src,
+ 			flags);
+ }
+ 
++__use_avx2
+ int
+ idxd_enqueue_fill(void *dev_private, uint16_t qid __rte_unused, uint64_t pattern,
+ 		rte_iova_t dst, unsigned int length, uint64_t flags)
+@@ -136,6 +151,7 @@ idxd_enqueue_fill(void *dev_private, uint16_t qid __rte_unused, uint64_t pattern
+ 			flags);
+ }
+ 
++__use_avx2
+ int
+ idxd_submit(void *dev_private, uint16_t qid __rte_unused)
+ {
+@@ -143,6 +159,7 @@ idxd_submit(void *dev_private, uint16_t qid __rte_unused)
+ 	return 0;
+ }
+ 
++__use_avx2
+ static enum rte_dma_status_code
+ get_comp_status(struct idxd_completion *c)
+ {
+@@ -163,6 +180,7 @@ get_comp_status(struct idxd_completion *c)
+ 	}
+ }
+ 
++__use_avx2
+ int
+ idxd_vchan_status(const struct rte_dma_dev *dev, uint16_t vchan __rte_unused,
+ 		enum rte_dma_vchan_status *status)
+@@ -180,6 +198,7 @@ idxd_vchan_status(const struct rte_dma_dev *dev, uint16_t vchan __rte_unused,
+ 	return 0;
+ }
+ 
++__use_avx2
+ static __rte_always_inline int
+ batch_ok(struct idxd_dmadev *idxd, uint16_t max_ops, enum rte_dma_status_code *status)
+ {
+@@ -224,6 +243,7 @@ batch_ok(struct idxd_dmadev *idxd, uint16_t max_ops, enum rte_dma_status_code *s
+ 	return -1; /* error case */
+ }
+ 
++__use_avx2
+ static inline uint16_t
+ batch_completed(struct idxd_dmadev *idxd, uint16_t max_ops, bool *has_error)
+ {
+@@ -275,6 +295,7 @@ batch_completed(struct idxd_dmadev *idxd, uint16_t max_ops, bool *has_error)
+ 	return ret;
+ }
+ 
++__use_avx2
+ static uint16_t
+ batch_completed_status(struct idxd_dmadev *idxd, uint16_t max_ops, enum rte_dma_status_code *status)
+ {
+@@ -366,6 +387,7 @@ batch_completed_status(struct idxd_dmadev *idxd, uint16_t max_ops, enum rte_dma_
+ 	return ret;
+ }
+ 
++__use_avx2
+ uint16_t
+ idxd_completed(void *dev_private, uint16_t qid __rte_unused, uint16_t max_ops,
+ 		uint16_t *last_idx, bool *has_error)
+@@ -383,6 +405,7 @@ idxd_completed(void *dev_private, uint16_t qid __rte_unused, uint16_t max_ops,
+ 	return ret;
+ }
+ 
++__use_avx2
+ uint16_t
+ idxd_completed_status(void *dev_private, uint16_t qid __rte_unused, uint16_t max_ops,
+ 		uint16_t *last_idx, enum rte_dma_status_code *status)
+@@ -480,12 +503,12 @@ idxd_burst_capacity(const void *dev_private, uint16_t vchan __rte_unused)
+ 			idxd->batch_idx_write + 1 == idxd->batch_idx_read)
+ 		return 0;
+ 
+-	/* For descriptors, check for wrap-around on write but not read */
+-	if (idxd->ids_returned > write_idx)
+-		write_idx += idxd->desc_ring_mask + 1;
+-	used_space = write_idx - idxd->ids_returned;
++	/* Subtract and mask to get in correct range */
++	used_space = (write_idx - idxd->ids_returned) & idxd->desc_ring_mask;
+ 
+-	return RTE_MIN((idxd->desc_ring_mask - used_space), idxd->max_batch_size);
++	const int ret = RTE_MIN((idxd->desc_ring_mask - used_space),
++			(idxd->max_batch_size - idxd->batch_size));
++	return ret < 0 ? 0 : (uint16_t)ret;
+ }
+ 
+ int
+diff --git a/dpdk/drivers/dma/idxd/idxd_internal.h b/dpdk/drivers/dma/idxd/idxd_internal.h
+index 3375600217..180a8587c6 100644
+--- a/dpdk/drivers/dma/idxd/idxd_internal.h
++++ b/dpdk/drivers/dma/idxd/idxd_internal.h
+@@ -7,6 +7,7 @@
+ 
+ #include <rte_dmadev_pmd.h>
+ #include <rte_spinlock.h>
++#include <rte_atomic.h>
+ 
+ #include "idxd_hw_defs.h"
+ 
+@@ -33,6 +34,7 @@ struct idxd_pci_common {
+ 	rte_spinlock_t lk;
+ 
+ 	uint8_t wq_cfg_sz;
++	rte_atomic16_t ref_count;
+ 	volatile struct rte_idxd_bar0 *regs;
+ 	volatile uint32_t *wq_regs_base;
+ 	volatile struct rte_idxd_grpcfg *grp_regs;
+diff --git a/dpdk/drivers/dma/idxd/idxd_pci.c b/dpdk/drivers/dma/idxd/idxd_pci.c
+index 9ca1ec64e9..2f8ec06d9e 100644
+--- a/dpdk/drivers/dma/idxd/idxd_pci.c
++++ b/dpdk/drivers/dma/idxd/idxd_pci.c
+@@ -6,6 +6,7 @@
+ #include <rte_devargs.h>
+ #include <rte_dmadev_pmd.h>
+ #include <rte_malloc.h>
++#include <rte_atomic.h>
+ 
+ #include "idxd_internal.h"
+ 
+@@ -38,13 +39,13 @@ idxd_pci_dev_command(struct idxd_dmadev *idxd, enum rte_idxd_cmds command)
+ 			IDXD_PMD_ERR("Timeout waiting for command response from HW");
+ 			rte_spinlock_unlock(&idxd->u.pci->lk);
+ 			err_code &= CMDSTATUS_ERR_MASK;
+-			return -err_code;
++			return err_code;
+ 		}
+ 	} while (err_code & CMDSTATUS_ACTIVE_MASK);
+ 	rte_spinlock_unlock(&idxd->u.pci->lk);
+ 
+ 	err_code &= CMDSTATUS_ERR_MASK;
+-	return -err_code;
++	return err_code;
+ }
+ 
+ static uint32_t *
+@@ -115,20 +116,38 @@ idxd_pci_dev_close(struct rte_dma_dev *dev)
+ {
+ 	struct idxd_dmadev *idxd = dev->fp_obj->dev_private;
+ 	uint8_t err_code;
++	int is_last_wq;
+ 
+-	/* disable the device */
+-	err_code = idxd_pci_dev_command(idxd, idxd_disable_dev);
+-	if (err_code) {
+-		IDXD_PMD_ERR("Error disabling device: code %#x", err_code);
+-		return err_code;
++	if (idxd_is_wq_enabled(idxd)) {
++		/* disable the wq */
++		err_code = idxd_pci_dev_command(idxd, idxd_disable_wq);
++		if (err_code) {
++			IDXD_PMD_ERR("Error disabling wq: code %#x", err_code);
++			return err_code;
++		}
++		IDXD_PMD_DEBUG("IDXD WQ disabled OK");
+ 	}
+-	IDXD_PMD_DEBUG("IDXD Device disabled OK");
+ 
+ 	/* free device memory */
+ 	IDXD_PMD_DEBUG("Freeing device driver memory");
+-	rte_free(idxd->batch_idx_ring);
++	rte_free(idxd->batch_comp_ring);
+ 	rte_free(idxd->desc_ring);
+ 
++	/* if this is the last WQ on the device, disable the device and free
++	 * the PCI struct
++	 */
++	is_last_wq = rte_atomic16_dec_and_test(&idxd->u.pci->ref_count);
++	if (is_last_wq) {
++		/* disable the device */
++		err_code = idxd_pci_dev_command(idxd, idxd_disable_dev);
++		if (err_code) {
++			IDXD_PMD_ERR("Error disabling device: code %#x", err_code);
++			return err_code;
++		}
++		IDXD_PMD_DEBUG("IDXD device disabled OK");
++		rte_free(idxd->u.pci);
++	}
++
+ 	return 0;
+ }
+ 
+@@ -159,12 +178,13 @@ init_pci_device(struct rte_pci_device *dev, struct idxd_dmadev *idxd,
+ 	uint8_t lg2_max_batch, lg2_max_copy_size;
+ 	unsigned int i, err_code;
+ 
+-	pci = malloc(sizeof(*pci));
++	pci = rte_malloc(NULL, sizeof(*pci), 0);
+ 	if (pci == NULL) {
+ 		IDXD_PMD_ERR("%s: Can't allocate memory", __func__);
+ 		err_code = -1;
+ 		goto err;
+ 	}
++	memset(pci, 0, sizeof(*pci));
+ 	rte_spinlock_init(&pci->lk);
+ 
+ 	/* assign the bar registers, and then configure device */
+@@ -330,6 +350,7 @@ idxd_dmadev_probe_pci(struct rte_pci_driver *drv, struct rte_pci_device *dev)
+ 				free(idxd.u.pci);
+ 			return ret;
+ 		}
++		rte_atomic16_inc(&idxd.u.pci->ref_count);
+ 	}
+ 
+ 	return 0;
+@@ -359,10 +380,10 @@ idxd_dmadev_remove_pci(struct rte_pci_device *dev)
+ 	IDXD_PMD_INFO("Closing %s on NUMA node %d", name, dev->device.numa_node);
+ 
+ 	RTE_DMA_FOREACH_DEV(i) {
+-		struct rte_dma_info *info = {0};
+-		rte_dma_info_get(i, info);
+-		if (strncmp(name, info->dev_name, strlen(name)) == 0)
+-			idxd_dmadev_destroy(info->dev_name);
++		struct rte_dma_info info;
++		rte_dma_info_get(i, &info);
++		if (strncmp(name, info.dev_name, strlen(name)) == 0)
++			idxd_dmadev_destroy(info.dev_name);
+ 	}
+ 
+ 	return 0;
+diff --git a/dpdk/drivers/dma/idxd/meson.build b/dpdk/drivers/dma/idxd/meson.build
+index f1396be945..c5403b431c 100644
+--- a/dpdk/drivers/dma/idxd/meson.build
++++ b/dpdk/drivers/dma/idxd/meson.build
+@@ -4,8 +4,18 @@
+ build = dpdk_conf.has('RTE_ARCH_X86')
+ reason = 'only supported on x86'
+ 
++test_avx2_code = '''
++#include <x86intrin.h>
++__attribute__((target("avx2")))
++__m256i fn(void *x) { return _mm256_loadu_si256(x); }
++'''
++if build and not cc.compiles(test_avx2_code, args:machine_args)
++    build = false
++    reason = 'missing support for AVX2 function attribute'
++    subdir_done()
++endif
++
+ deps += ['bus_pci']
+-cflags += '-mavx2' # all platforms with idxd HW support AVX
+ sources = files(
+         'idxd_common.c',
+         'idxd_pci.c',
+diff --git a/dpdk/drivers/dma/skeleton/skeleton_dmadev.c b/dpdk/drivers/dma/skeleton/skeleton_dmadev.c
+index d9e4f731d7..6b0bb14e2c 100644
+--- a/dpdk/drivers/dma/skeleton/skeleton_dmadev.c
++++ b/dpdk/drivers/dma/skeleton/skeleton_dmadev.c
+@@ -118,6 +118,7 @@ skeldma_start(struct rte_dma_dev *dev)
+ 	fflush_ring(hw, hw->desc_running);
+ 	fflush_ring(hw, hw->desc_completed);
+ 	hw->ridx = 0;
++	hw->last_ridx = hw->ridx - 1;
+ 	hw->submitted_count = 0;
+ 	hw->zero_req_count = 0;
+ 	hw->completed_count = 0;
+@@ -169,7 +170,7 @@ vchan_setup(struct skeldma_hw *hw, uint16_t nb_desc)
+ 	struct rte_ring *completed;
+ 	uint16_t i;
+ 
+-	desc = rte_zmalloc_socket("dma_skelteon_desc",
++	desc = rte_zmalloc_socket("dma_skeleton_desc",
+ 				  nb_desc * sizeof(struct skeldma_desc),
+ 				  RTE_CACHE_LINE_SIZE, hw->socket_id);
+ 	if (desc == NULL) {
+@@ -322,9 +323,11 @@ skeldma_dump(const struct rte_dma_dev *dev, FILE *f)
+ 		GET_RING_COUNT(hw->desc_completed));
+ 	(void)fprintf(f,
+ 		"    next_ring_idx: %u\n"
++		"    last_ring_idx: %u\n"
+ 		"    submitted_count: %" PRIu64 "\n"
+ 		"    completed_count: %" PRIu64 "\n",
+-		hw->ridx, hw->submitted_count, hw->completed_count);
++		hw->ridx, hw->last_ridx,
++		hw->submitted_count, hw->completed_count);
+ 
+ 	return 0;
+ }
+@@ -398,11 +401,15 @@ skeldma_completed(void *dev_private,
+ 	count = RTE_MIN(nb_cpls, rte_ring_count(hw->desc_completed));
+ 	while (index < count) {
+ 		(void)rte_ring_dequeue(hw->desc_completed, (void **)&desc);
+-		if (index == count - 1)
++		if (index == count - 1) {
++			hw->last_ridx = desc->ridx;
+ 			*last_idx = desc->ridx;
++		}
+ 		index++;
+ 		(void)rte_ring_enqueue(hw->desc_empty, (void *)desc);
+ 	}
++	if (unlikely(count == 0))
++		*last_idx = hw->last_ridx;
+ 
+ 	return count;
+ }
+@@ -422,11 +429,15 @@ skeldma_completed_status(void *dev_private,
+ 	count = RTE_MIN(nb_cpls, rte_ring_count(hw->desc_completed));
+ 	while (index < count) {
+ 		(void)rte_ring_dequeue(hw->desc_completed, (void **)&desc);
+-		if (index == count - 1)
++		if (index == count - 1) {
++			hw->last_ridx = desc->ridx;
+ 			*last_idx = desc->ridx;
++		}
+ 		status[index++] = RTE_DMA_STATUS_SUCCESSFUL;
+ 		(void)rte_ring_enqueue(hw->desc_empty, (void *)desc);
+ 	}
++	if (unlikely(count == 0))
++		*last_idx = hw->last_ridx;
+ 
+ 	return count;
+ }
+diff --git a/dpdk/drivers/dma/skeleton/skeleton_dmadev.h b/dpdk/drivers/dma/skeleton/skeleton_dmadev.h
+index 91eb5460fc..6f89400480 100644
+--- a/dpdk/drivers/dma/skeleton/skeleton_dmadev.h
++++ b/dpdk/drivers/dma/skeleton/skeleton_dmadev.h
+@@ -50,6 +50,7 @@ struct skeldma_hw {
+ 	/* Cache delimiter for dataplane API's operation data */
+ 	char cache1 __rte_cache_aligned;
+ 	uint16_t ridx;  /* ring idx */
++	uint16_t last_ridx;
+ 	uint64_t submitted_count;
+ 
+ 	/* Cache delimiter for cpucopy thread's operation data */
+diff --git a/dpdk/drivers/event/cnxk/cn10k_eventdev.c b/dpdk/drivers/event/cnxk/cn10k_eventdev.c
+index c5a8c1ae8f..4d878fc2b7 100644
+--- a/dpdk/drivers/event/cnxk/cn10k_eventdev.c
++++ b/dpdk/drivers/event/cnxk/cn10k_eventdev.c
+@@ -111,10 +111,10 @@ cn10k_sso_hws_release(void *arg, void *hws)
+ {
+ 	struct cnxk_sso_evdev *dev = arg;
+ 	struct cn10k_sso_hws *ws = hws;
+-	int i;
++	uint16_t i;
+ 
+ 	for (i = 0; i < dev->nb_event_queues; i++)
+-		roc_sso_hws_unlink(&dev->sso, ws->hws_id, (uint16_t *)&i, 1);
++		roc_sso_hws_unlink(&dev->sso, ws->hws_id, &i, 1);
+ 	memset(ws, 0, sizeof(*ws));
+ }
+ 
+diff --git a/dpdk/drivers/event/cnxk/cn10k_worker.h b/dpdk/drivers/event/cnxk/cn10k_worker.h
+index f8331e88d7..f67c36f888 100644
+--- a/dpdk/drivers/event/cnxk/cn10k_worker.h
++++ b/dpdk/drivers/event/cnxk/cn10k_worker.h
+@@ -169,7 +169,6 @@ cn10k_process_vwqe(uintptr_t vwqe, uint16_t port_id, const uint32_t flags,
+ 					   CNXK_SSO_WQE_SG_PTR);
+ 		cnxk_nix_mbuf_to_tstamp((struct rte_mbuf *)mbuf, tstamp,
+ 					flags & NIX_RX_OFFLOAD_TSTAMP_F,
+-					flags & NIX_RX_MULTI_SEG_F,
+ 					(uint64_t *)tstamp_ptr);
+ 		wqe[0] = (uint64_t *)mbuf;
+ 		non_vec--;
+@@ -261,7 +260,6 @@ cn10k_sso_hws_get_work(struct cn10k_sso_hws *ws, struct rte_event *ev,
+ 			cnxk_nix_mbuf_to_tstamp((struct rte_mbuf *)mbuf,
+ 						ws->tstamp,
+ 						flags & NIX_RX_OFFLOAD_TSTAMP_F,
+-						flags & NIX_RX_MULTI_SEG_F,
+ 						(uint64_t *)tstamp_ptr);
+ 			gw.u64[1] = mbuf;
+ 		} else if (CNXK_EVENT_TYPE_FROM_TAG(gw.u64[0]) ==
+@@ -526,7 +524,7 @@ cn10k_sso_hws_event_tx(struct cn10k_sso_hws *ws, struct rte_event *ev,
+ 				ev->sched_type, ws->tx_base, txq_data, flags);
+ 		}
+ 		rte_mempool_put(rte_mempool_from_obj(ev->vec), ev->vec);
+-		return (meta & 0xFFFF);
++		return 1;
+ 	}
+ 
+ 	m = ev->mbuf;
+diff --git a/dpdk/drivers/event/cnxk/cn9k_eventdev.c b/dpdk/drivers/event/cnxk/cn9k_eventdev.c
+index b68ce6c0a4..d2ec4aedd7 100644
+--- a/dpdk/drivers/event/cnxk/cn9k_eventdev.c
++++ b/dpdk/drivers/event/cnxk/cn9k_eventdev.c
+@@ -109,24 +109,21 @@ cn9k_sso_hws_release(void *arg, void *hws)
+ 	struct cnxk_sso_evdev *dev = arg;
+ 	struct cn9k_sso_hws_dual *dws;
+ 	struct cn9k_sso_hws *ws;
+-	int i;
++	uint16_t i;
+ 
+ 	if (dev->dual_ws) {
+ 		dws = hws;
+ 		for (i = 0; i < dev->nb_event_queues; i++) {
+ 			roc_sso_hws_unlink(&dev->sso,
+-					   CN9K_DUAL_WS_PAIR_ID(dws->hws_id, 0),
+-					   (uint16_t *)&i, 1);
++					   CN9K_DUAL_WS_PAIR_ID(dws->hws_id, 0), &i, 1);
+ 			roc_sso_hws_unlink(&dev->sso,
+-					   CN9K_DUAL_WS_PAIR_ID(dws->hws_id, 1),
+-					   (uint16_t *)&i, 1);
++					   CN9K_DUAL_WS_PAIR_ID(dws->hws_id, 1), &i, 1);
+ 		}
+ 		memset(dws, 0, sizeof(*dws));
+ 	} else {
+ 		ws = hws;
+ 		for (i = 0; i < dev->nb_event_queues; i++)
+-			roc_sso_hws_unlink(&dev->sso, ws->hws_id,
+-					   (uint16_t *)&i, 1);
++			roc_sso_hws_unlink(&dev->sso, ws->hws_id, &i, 1);
+ 		memset(ws, 0, sizeof(*ws));
+ 	}
+ }
+diff --git a/dpdk/drivers/event/cnxk/cn9k_worker.h b/dpdk/drivers/event/cnxk/cn9k_worker.h
+index 9377fa50e7..8abdd13b66 100644
+--- a/dpdk/drivers/event/cnxk/cn9k_worker.h
++++ b/dpdk/drivers/event/cnxk/cn9k_worker.h
+@@ -209,7 +209,6 @@ cn9k_sso_hws_dual_get_work(uint64_t base, uint64_t pair_base,
+ 						   CNXK_SSO_WQE_SG_PTR);
+ 			cnxk_nix_mbuf_to_tstamp((struct rte_mbuf *)mbuf, tstamp,
+ 						flags & NIX_RX_OFFLOAD_TSTAMP_F,
+-						flags & NIX_RX_MULTI_SEG_F,
+ 						(uint64_t *)tstamp_ptr);
+ 			gw.u64[1] = mbuf;
+ 		}
+@@ -288,7 +287,6 @@ cn9k_sso_hws_get_work(struct cn9k_sso_hws *ws, struct rte_event *ev,
+ 			cnxk_nix_mbuf_to_tstamp((struct rte_mbuf *)mbuf,
+ 						ws->tstamp,
+ 						flags & NIX_RX_OFFLOAD_TSTAMP_F,
+-						flags & NIX_RX_MULTI_SEG_F,
+ 						(uint64_t *)tstamp_ptr);
+ 			gw.u64[1] = mbuf;
+ 		}
+diff --git a/dpdk/drivers/event/cnxk/cnxk_eventdev.c b/dpdk/drivers/event/cnxk/cnxk_eventdev.c
+index f7a5026250..46a788ef4e 100644
+--- a/dpdk/drivers/event/cnxk/cnxk_eventdev.c
++++ b/dpdk/drivers/event/cnxk/cnxk_eventdev.c
+@@ -417,10 +417,10 @@ cnxk_sso_start(struct rte_eventdev *event_dev, cnxk_sso_hws_reset_t reset_fn,
+ 
+ 	plt_sso_dbg();
+ 	for (i = 0; i < dev->qos_queue_cnt; i++) {
+-		qos->hwgrp = dev->qos_parse_data[i].queue;
+-		qos->iaq_prcnt = dev->qos_parse_data[i].iaq_prcnt;
+-		qos->taq_prcnt = dev->qos_parse_data[i].taq_prcnt;
+-		qos->xaq_prcnt = dev->qos_parse_data[i].xaq_prcnt;
++		qos[i].hwgrp = dev->qos_parse_data[i].queue;
++		qos[i].iaq_prcnt = dev->qos_parse_data[i].iaq_prcnt;
++		qos[i].taq_prcnt = dev->qos_parse_data[i].taq_prcnt;
++		qos[i].xaq_prcnt = dev->qos_parse_data[i].xaq_prcnt;
+ 	}
+ 	rc = roc_sso_hwgrp_qos_config(&dev->sso, qos, dev->qos_queue_cnt,
+ 				      dev->xae_cnt);
+@@ -482,7 +482,7 @@ static void
+ parse_queue_param(char *value, void *opaque)
+ {
+ 	struct cnxk_sso_qos queue_qos = {0};
+-	uint8_t *val = (uint8_t *)&queue_qos;
++	uint16_t *val = (uint16_t *)&queue_qos;
+ 	struct cnxk_sso_evdev *dev = opaque;
+ 	char *tok = strtok(value, "-");
+ 	struct cnxk_sso_qos *old_ptr;
+diff --git a/dpdk/drivers/event/cnxk/cnxk_eventdev.h b/dpdk/drivers/event/cnxk/cnxk_eventdev.h
+index 305c6a3b9e..39c13b02fc 100644
+--- a/dpdk/drivers/event/cnxk/cnxk_eventdev.h
++++ b/dpdk/drivers/event/cnxk/cnxk_eventdev.h
+@@ -44,7 +44,7 @@
+ #define CNXK_TT_FROM_EVENT(x)	    (((x) >> 38) & SSO_TT_EMPTY)
+ #define CNXK_EVENT_TYPE_FROM_TAG(x) (((x) >> 28) & 0xf)
+ #define CNXK_SUB_EVENT_FROM_TAG(x)  (((x) >> 20) & 0xff)
+-#define CNXK_CLR_SUB_EVENT(x)	    (~(0xffu << 20) & x)
++#define CNXK_CLR_SUB_EVENT(x)	    (~(0xffull << 20) & x)
+ #define CNXK_GRP_FROM_TAG(x)	    (((x) >> 36) & 0x3ff)
+ #define CNXK_SWTAG_PEND(x)	    (BIT_ULL(62) & x)
+ 
+@@ -74,9 +74,9 @@ typedef void (*cnxk_sso_hws_flush_t)(void *ws, uint8_t queue_id, uintptr_t base,
+ 
+ struct cnxk_sso_qos {
+ 	uint16_t queue;
+-	uint8_t xaq_prcnt;
+-	uint8_t taq_prcnt;
+-	uint8_t iaq_prcnt;
++	uint16_t xaq_prcnt;
++	uint16_t taq_prcnt;
++	uint16_t iaq_prcnt;
+ };
+ 
+ struct cnxk_sso_evdev {
+diff --git a/dpdk/drivers/event/cnxk/cnxk_eventdev_adptr.c b/dpdk/drivers/event/cnxk/cnxk_eventdev_adptr.c
+index fdcd68ca63..54c3d6a3cb 100644
+--- a/dpdk/drivers/event/cnxk/cnxk_eventdev_adptr.c
++++ b/dpdk/drivers/event/cnxk/cnxk_eventdev_adptr.c
+@@ -230,7 +230,7 @@ cnxk_sso_rx_adapter_queue_add(
+ 			cnxk_eth_dev, (uint16_t)rx_queue_id, port,
+ 			&queue_conf->ev,
+ 			!!(queue_conf->rx_queue_flags &
+-			   RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID));
++			   RTE_EVENT_ETH_RX_ADAPTER_QUEUE_FLOW_ID_VALID));
+ 		if (queue_conf->rx_queue_flags &
+ 		    RTE_EVENT_ETH_RX_ADAPTER_QUEUE_EVENT_VECTOR) {
+ 			cnxk_sso_updt_xae_cnt(dev, queue_conf->vector_mp,
+diff --git a/dpdk/drivers/event/cnxk/cnxk_eventdev_selftest.c b/dpdk/drivers/event/cnxk/cnxk_eventdev_selftest.c
+index 69c15b1d0a..3aa6f081a7 100644
+--- a/dpdk/drivers/event/cnxk/cnxk_eventdev_selftest.c
++++ b/dpdk/drivers/event/cnxk/cnxk_eventdev_selftest.c
+@@ -140,7 +140,7 @@ _eventdev_setup(int mode)
+ 	struct rte_event_dev_info info;
+ 	int i, ret;
+ 
+-	/* Create and destrory pool for each test case to make it standalone */
++	/* Create and destroy pool for each test case to make it standalone */
+ 	eventdev_test_mempool = rte_pktmbuf_pool_create(
+ 		pool_name, MAX_EVENTS, 0, 0, 512, rte_socket_id());
+ 	if (!eventdev_test_mempool) {
+@@ -626,6 +626,12 @@ launch_workers_and_wait(int (*main_thread)(void *),
+ 		/* start core */ -1,
+ 		/* skip main */ 1,
+ 		/* wrap */ 0);
++	if (w_lcore == RTE_MAX_LCORE) {
++		plt_err("Failed to get next available lcore");
++		free(param);
++		return -1;
++	}
++
+ 	rte_eal_remote_launch(main_thread, &param[0], w_lcore);
+ 
+ 	for (port = 1; port < nb_workers; port++) {
+@@ -635,6 +641,12 @@ launch_workers_and_wait(int (*main_thread)(void *),
+ 		param[port].dequeue_tmo_ticks = dequeue_tmo_ticks;
+ 		rte_atomic_thread_fence(__ATOMIC_RELEASE);
+ 		w_lcore = rte_get_next_lcore(w_lcore, 1, 0);
++		if (w_lcore == RTE_MAX_LCORE) {
++			plt_err("Failed to get next available lcore");
++			free(param);
++			return -1;
++		}
++
+ 		rte_eal_remote_launch(worker_thread, &param[port], w_lcore);
+ 	}
+ 
+@@ -1543,7 +1555,7 @@ cnxk_sso_selftest(const char *dev_name)
+ 		cn9k_sso_set_rsrc(dev);
+ 		if (cnxk_sso_testsuite_run(dev_name))
+ 			return rc;
+-		/* Verift dual ws mode. */
++		/* Verify dual ws mode. */
+ 		printf("Verifying CN9K Dual workslot mode\n");
+ 		dev->dual_ws = 1;
+ 		cn9k_sso_set_rsrc(dev);
+diff --git a/dpdk/drivers/event/cnxk/cnxk_tim_worker.c b/dpdk/drivers/event/cnxk/cnxk_tim_worker.c
+index 3ce99864a6..dfcfbdc797 100644
+--- a/dpdk/drivers/event/cnxk/cnxk_tim_worker.c
++++ b/dpdk/drivers/event/cnxk/cnxk_tim_worker.c
+@@ -63,7 +63,7 @@ cnxk_tim_timer_arm_burst(const struct rte_event_timer_adapter *adptr,
+ 	struct cnxk_tim_ring *tim_ring = adptr->data->adapter_priv;
+ 	struct cnxk_tim_ent entry;
+ 	uint16_t index;
+-	int ret;
++	int ret = 0;
+ 
+ 	cnxk_tim_sync_start_cyc(tim_ring);
+ 	for (index = 0; index < nb_timers; index++) {
+diff --git a/dpdk/drivers/event/cnxk/cnxk_tim_worker.h b/dpdk/drivers/event/cnxk/cnxk_tim_worker.h
+index 78e36ffafe..0c9f29cfbe 100644
+--- a/dpdk/drivers/event/cnxk/cnxk_tim_worker.h
++++ b/dpdk/drivers/event/cnxk/cnxk_tim_worker.h
+@@ -233,8 +233,8 @@ cnxk_tim_add_entry_sp(struct cnxk_tim_ring *const tim_ring,
+ 		      const struct cnxk_tim_ent *const pent,
+ 		      const uint8_t flags)
+ {
++	struct cnxk_tim_ent *chunk = NULL;
+ 	struct cnxk_tim_bkt *mirr_bkt;
+-	struct cnxk_tim_ent *chunk;
+ 	struct cnxk_tim_bkt *bkt;
+ 	uint64_t lock_sema;
+ 	int16_t rem;
+@@ -316,8 +316,8 @@ cnxk_tim_add_entry_mp(struct cnxk_tim_ring *const tim_ring,
+ 		      const struct cnxk_tim_ent *const pent,
+ 		      const uint8_t flags)
+ {
++	struct cnxk_tim_ent *chunk = NULL;
+ 	struct cnxk_tim_bkt *mirr_bkt;
+-	struct cnxk_tim_ent *chunk;
+ 	struct cnxk_tim_bkt *bkt;
+ 	uint64_t lock_sema;
+ 	int64_t rem;
+diff --git a/dpdk/drivers/event/dlb2/dlb2.c b/dpdk/drivers/event/dlb2/dlb2.c
+index 16e9764dbf..543f793ed1 100644
+--- a/dpdk/drivers/event/dlb2/dlb2.c
++++ b/dpdk/drivers/event/dlb2/dlb2.c
+@@ -61,12 +61,13 @@ static struct rte_event_dev_info evdev_dlb2_default_info = {
+ 	.max_num_events = DLB2_MAX_NUM_LDB_CREDITS,
+ 	.max_single_link_event_port_queue_pairs =
+ 		DLB2_MAX_NUM_DIR_PORTS(DLB2_HW_V2),
+-	.event_dev_cap = (RTE_EVENT_DEV_CAP_QUEUE_QOS |
+-			  RTE_EVENT_DEV_CAP_EVENT_QOS |
+-			  RTE_EVENT_DEV_CAP_BURST_MODE |
++	.event_dev_cap = (RTE_EVENT_DEV_CAP_EVENT_QOS |
+ 			  RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED |
+-			  RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE |
+ 			  RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES |
++			  RTE_EVENT_DEV_CAP_BURST_MODE |
++			  RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE |
++			  RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK |
++			  RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT |
+ 			  RTE_EVENT_DEV_CAP_MAINTENANCE_FREE),
+ };
+ 
+@@ -2145,7 +2146,7 @@ dlb2_event_queue_detach_ldb(struct dlb2_eventdev *dlb2,
+ 	}
+ 
+ 	/* This is expected with eventdev API!
+-	 * It blindly attemmpts to unmap all queues.
++	 * It blindly attempts to unmap all queues.
+ 	 */
+ 	if (i == DLB2_MAX_NUM_QIDS_PER_LDB_CQ) {
+ 		DLB2_LOG_DBG("dlb2: ignoring LB QID %d not mapped for qm_port %d.\n",
+@@ -3897,31 +3898,47 @@ dlb2_hw_dequeue_sparse(struct dlb2_eventdev *dlb2,
+ 	while (num < max_num) {
+ 		struct dlb2_dequeue_qe qes[DLB2_NUM_QES_PER_CACHE_LINE];
+ 		int num_avail;
++
+ 		if (use_scalar) {
++			int n_iter = 0;
++			uint64_t m_rshift, m_lshift, m2_rshift, m2_lshift;
++
+ 			num_avail = dlb2_recv_qe_sparse(qm_port, qes);
+ 			num_avail = RTE_MIN(num_avail, max_num - num);
+ 			dlb2_inc_cq_idx(qm_port, num_avail << 2);
+ 			if (num_avail == DLB2_NUM_QES_PER_CACHE_LINE)
+-				num += dlb2_process_dequeue_four_qes(ev_port,
+-								  qm_port,
+-								  &events[num],
+-								  &qes[0]);
++				n_iter = dlb2_process_dequeue_four_qes(ev_port,
++								qm_port,
++								&events[num],
++								&qes[0]);
+ 			else if (num_avail)
+-				num += dlb2_process_dequeue_qes(ev_port,
++				n_iter = dlb2_process_dequeue_qes(ev_port,
+ 								qm_port,
+ 								&events[num],
+ 								&qes[0],
+ 								num_avail);
++			if (n_iter != 0) {
++				num += n_iter;
++				/* update rolling_mask for vector code support */
++				m_rshift = qm_port->cq_rolling_mask >> n_iter;
++				m_lshift = qm_port->cq_rolling_mask << (64 - n_iter);
++				m2_rshift = qm_port->cq_rolling_mask_2 >> n_iter;
++				m2_lshift = qm_port->cq_rolling_mask_2 <<
++					(64 - n_iter);
++				qm_port->cq_rolling_mask = (m_rshift | m2_lshift);
++				qm_port->cq_rolling_mask_2 = (m2_rshift | m_lshift);
++			}
+ 		} else { /* !use_scalar */
+ 			num_avail = dlb2_recv_qe_sparse_vec(qm_port,
+ 							    &events[num],
+ 							    max_num - num);
+-			num += num_avail;
+ 			dlb2_inc_cq_idx(qm_port, num_avail << 2);
++			num += num_avail;
+ 			DLB2_INC_STAT(ev_port->stats.traffic.rx_ok, num_avail);
+ 		}
+ 		if (!num_avail) {
+-			if (num > 0)
++			if ((timeout == 0) || (num > 0))
++				/* Not waiting in any form or 1+ events recd */
+ 				break;
+ 			else if (dlb2_dequeue_wait(dlb2, ev_port, qm_port,
+ 						   timeout, start_ticks))
+diff --git a/dpdk/drivers/event/dlb2/dlb2_priv.h b/dpdk/drivers/event/dlb2/dlb2_priv.h
+index a5e2f8e46b..7837ae8733 100644
+--- a/dpdk/drivers/event/dlb2/dlb2_priv.h
++++ b/dpdk/drivers/event/dlb2/dlb2_priv.h
+@@ -519,7 +519,7 @@ struct dlb2_eventdev_port {
+ 	bool setup_done;
+ 	/* enq_configured is set when the qm port is created */
+ 	bool enq_configured;
+-	uint8_t implicit_release; /* release events before dequeueing */
++	uint8_t implicit_release; /* release events before dequeuing */
+ }  __rte_cache_aligned;
+ 
+ struct dlb2_queue {
+diff --git a/dpdk/drivers/event/dlb2/dlb2_selftest.c b/dpdk/drivers/event/dlb2/dlb2_selftest.c
+index 2113bc2c99..1863ffe049 100644
+--- a/dpdk/drivers/event/dlb2/dlb2_selftest.c
++++ b/dpdk/drivers/event/dlb2/dlb2_selftest.c
+@@ -223,7 +223,7 @@ test_stop_flush(struct test *t) /* test to check we can properly flush events */
+ 				    0,
+ 				    RTE_EVENT_PORT_ATTR_DEQ_DEPTH,
+ 				    &dequeue_depth)) {
+-		printf("%d: Error retrieveing dequeue depth\n", __LINE__);
++		printf("%d: Error retrieving dequeue depth\n", __LINE__);
+ 		goto err;
+ 	}
+ 
+diff --git a/dpdk/drivers/event/dlb2/pf/base/dlb2_hw_types.h b/dpdk/drivers/event/dlb2/pf/base/dlb2_hw_types.h
+index 6b8fee3416..9511521e67 100644
+--- a/dpdk/drivers/event/dlb2/pf/base/dlb2_hw_types.h
++++ b/dpdk/drivers/event/dlb2/pf/base/dlb2_hw_types.h
+@@ -27,7 +27,7 @@
+ #define DLB2_MAX_NUM_SEQUENCE_NUMBER_GROUPS	2
+ #define DLB2_MAX_NUM_SEQUENCE_NUMBER_MODES	5
+ #define DLB2_MAX_CQ_COMP_CHECK_LOOPS		409600
+-#define DLB2_MAX_QID_EMPTY_CHECK_LOOPS		(32 * 64 * 1024 * (800 / 30))
++#define DLB2_MAX_QID_EMPTY_CHECK_LOOPS		(4 * DLB2_MAX_NUM_LDB_CREDITS)
+ 
+ #define DLB2_FUNC_BAR				0
+ #define DLB2_CSR_BAR				2
+diff --git a/dpdk/drivers/event/dlb2/pf/base/dlb2_resource.c b/dpdk/drivers/event/dlb2/pf/base/dlb2_resource.c
+index 3661b940c3..4011c24aef 100644
+--- a/dpdk/drivers/event/dlb2/pf/base/dlb2_resource.c
++++ b/dpdk/drivers/event/dlb2/pf/base/dlb2_resource.c
+@@ -1057,7 +1057,7 @@ static u32 dlb2_dir_cq_token_count(struct dlb2_hw *hw,
+ 	       port->init_tkn_cnt;
+ }
+ 
+-static void dlb2_drain_dir_cq(struct dlb2_hw *hw,
++static int dlb2_drain_dir_cq(struct dlb2_hw *hw,
+ 			      struct dlb2_dir_pq_pair *port)
+ {
+ 	unsigned int port_id = port->id.phys_id;
+@@ -1089,6 +1089,8 @@ static void dlb2_drain_dir_cq(struct dlb2_hw *hw,
+ 
+ 		os_unmap_producer_port(hw, pp_addr);
+ 	}
++
++	return cnt;
+ }
+ 
+ static void dlb2_dir_port_cq_enable(struct dlb2_hw *hw,
+@@ -1107,6 +1109,7 @@ static int dlb2_domain_drain_dir_cqs(struct dlb2_hw *hw,
+ {
+ 	struct dlb2_list_entry *iter;
+ 	struct dlb2_dir_pq_pair *port;
++	int drain_cnt = 0;
+ 	RTE_SET_USED(iter);
+ 
+ 	DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter) {
+@@ -1120,13 +1123,13 @@ static int dlb2_domain_drain_dir_cqs(struct dlb2_hw *hw,
+ 		if (toggle_port)
+ 			dlb2_dir_port_cq_disable(hw, port);
+ 
+-		dlb2_drain_dir_cq(hw, port);
++		drain_cnt = dlb2_drain_dir_cq(hw, port);
+ 
+ 		if (toggle_port)
+ 			dlb2_dir_port_cq_enable(hw, port);
+ 	}
+ 
+-	return 0;
++	return drain_cnt;
+ }
+ 
+ static u32 dlb2_dir_queue_depth(struct dlb2_hw *hw,
+@@ -1170,10 +1173,20 @@ static int dlb2_domain_drain_dir_queues(struct dlb2_hw *hw,
+ 		return 0;
+ 
+ 	for (i = 0; i < DLB2_MAX_QID_EMPTY_CHECK_LOOPS; i++) {
+-		dlb2_domain_drain_dir_cqs(hw, domain, true);
++		int drain_cnt;
++
++		drain_cnt = dlb2_domain_drain_dir_cqs(hw, domain, false);
+ 
+ 		if (dlb2_domain_dir_queues_empty(hw, domain))
+ 			break;
++
++		/*
++		 * Allow time for DLB to schedule QEs before draining
++		 * the CQs again.
++		 */
++		if (!drain_cnt)
++			rte_delay_us(1);
++
+ 	}
+ 
+ 	if (i == DLB2_MAX_QID_EMPTY_CHECK_LOOPS) {
+@@ -1249,7 +1262,7 @@ static u32 dlb2_ldb_cq_token_count(struct dlb2_hw *hw,
+ 		port->init_tkn_cnt;
+ }
+ 
+-static void dlb2_drain_ldb_cq(struct dlb2_hw *hw, struct dlb2_ldb_port *port)
++static int dlb2_drain_ldb_cq(struct dlb2_hw *hw, struct dlb2_ldb_port *port)
+ {
+ 	u32 infl_cnt, tkn_cnt;
+ 	unsigned int i;
+@@ -1289,32 +1302,37 @@ static void dlb2_drain_ldb_cq(struct dlb2_hw *hw, struct dlb2_ldb_port *port)
+ 
+ 		os_unmap_producer_port(hw, pp_addr);
+ 	}
++
++	return tkn_cnt;
+ }
+ 
+-static void dlb2_domain_drain_ldb_cqs(struct dlb2_hw *hw,
++static int dlb2_domain_drain_ldb_cqs(struct dlb2_hw *hw,
+ 				      struct dlb2_hw_domain *domain,
+ 				      bool toggle_port)
+ {
+ 	struct dlb2_list_entry *iter;
+ 	struct dlb2_ldb_port *port;
++	int drain_cnt = 0;
+ 	int i;
+ 	RTE_SET_USED(iter);
+ 
+ 	/* If the domain hasn't been started, there's no traffic to drain */
+ 	if (!domain->started)
+-		return;
++		return 0;
+ 
+ 	for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
+ 		DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
+ 			if (toggle_port)
+ 				dlb2_ldb_port_cq_disable(hw, port);
+ 
+-			dlb2_drain_ldb_cq(hw, port);
++			drain_cnt = dlb2_drain_ldb_cq(hw, port);
+ 
+ 			if (toggle_port)
+ 				dlb2_ldb_port_cq_enable(hw, port);
+ 		}
+ 	}
++
++	return drain_cnt;
+ }
+ 
+ static u32 dlb2_ldb_queue_depth(struct dlb2_hw *hw,
+@@ -1375,10 +1393,19 @@ static int dlb2_domain_drain_mapped_queues(struct dlb2_hw *hw,
+ 	}
+ 
+ 	for (i = 0; i < DLB2_MAX_QID_EMPTY_CHECK_LOOPS; i++) {
+-		dlb2_domain_drain_ldb_cqs(hw, domain, true);
++		int drain_cnt;
++
++		drain_cnt = dlb2_domain_drain_ldb_cqs(hw, domain, false);
+ 
+ 		if (dlb2_domain_mapped_queues_empty(hw, domain))
+ 			break;
++
++		/*
++		 * Allow time for DLB to schedule QEs before draining
++		 * the CQs again.
++		 */
++		if (!drain_cnt)
++			rte_delay_us(1);
+ 	}
+ 
+ 	if (i == DLB2_MAX_QID_EMPTY_CHECK_LOOPS) {
+@@ -2356,16 +2383,26 @@ static bool dlb2_domain_finish_unmap_port(struct dlb2_hw *hw,
+ {
+ 	u32 infl_cnt;
+ 	int i;
++	const int max_iters = 1000;
++	const int iter_poll_us = 100;
+ 
+ 	if (port->num_pending_removals == 0)
+ 		return false;
+ 
+ 	/*
+ 	 * The unmap requires all the CQ's outstanding inflights to be
+-	 * completed.
++	 * completed. Poll up to 100ms.
+ 	 */
+-	infl_cnt = DLB2_CSR_RD(hw, DLB2_LSP_CQ_LDB_INFL_CNT(hw->ver,
++	for (i = 0; i < max_iters; i++) {
++		infl_cnt = DLB2_CSR_RD(hw, DLB2_LSP_CQ_LDB_INFL_CNT(hw->ver,
+ 						       port->id.phys_id));
++
++		if (DLB2_BITS_GET(infl_cnt,
++				  DLB2_LSP_CQ_LDB_INFL_CNT_COUNT) == 0)
++			break;
++		rte_delay_us_sleep(iter_poll_us);
++	}
++
+ 	if (DLB2_BITS_GET(infl_cnt, DLB2_LSP_CQ_LDB_INFL_CNT_COUNT) > 0)
+ 		return false;
+ 
+@@ -3691,7 +3728,7 @@ dlb2_verify_create_ldb_queue_args(struct dlb2_hw *hw,
+ 		}
+ 	}
+ 
+-	if (args->num_qid_inflights > 4096) {
++	if (args->num_qid_inflights < 1 || args->num_qid_inflights > 2048) {
+ 		resp->status = DLB2_ST_INVALID_QID_INFLIGHT_ALLOCATION;
+ 		return -EINVAL;
+ 	}
+@@ -5316,6 +5353,7 @@ static void dlb2_log_map_qid(struct dlb2_hw *hw,
+  * EINVAL - A requested resource is unavailable, invalid port or queue ID, or
+  *	    the domain is not configured.
+  * EFAULT - Internal error (resp->status not set).
++ * EBUSY  - The requested port has outstanding detach operations.
+  */
+ int dlb2_hw_map_qid(struct dlb2_hw *hw,
+ 		    u32 domain_id,
+@@ -5356,8 +5394,12 @@ int dlb2_hw_map_qid(struct dlb2_hw *hw,
+ 	 * attempt to complete them. This may be necessary to free up a QID
+ 	 * slot for this requested mapping.
+ 	 */
+-	if (port->num_pending_removals)
+-		dlb2_domain_finish_unmap_port(hw, domain, port);
++	if (port->num_pending_removals) {
++		bool bool_ret;
++		bool_ret = dlb2_domain_finish_unmap_port(hw, domain, port);
++		if (!bool_ret)
++			return -EBUSY;
++	}
+ 
+ 	ret = dlb2_verify_map_qid_slot_available(port, queue, resp);
+ 	if (ret)
+diff --git a/dpdk/drivers/event/dlb2/rte_pmd_dlb2.h b/dpdk/drivers/event/dlb2/rte_pmd_dlb2.h
+index 74399db018..1dbd885a16 100644
+--- a/dpdk/drivers/event/dlb2/rte_pmd_dlb2.h
++++ b/dpdk/drivers/event/dlb2/rte_pmd_dlb2.h
+@@ -24,7 +24,7 @@ extern "C" {
+  * Selects the token pop mode for a DLB2 port.
+  */
+ enum dlb2_token_pop_mode {
+-	/* Pop the CQ tokens immediately after dequeueing. */
++	/* Pop the CQ tokens immediately after dequeuing. */
+ 	AUTO_POP,
+ 	/* Pop CQ tokens after (dequeue_depth - 1) events are released.
+ 	 * Supported on load-balanced ports only.
+diff --git a/dpdk/drivers/event/dpaa2/dpaa2_eventdev_selftest.c b/dpdk/drivers/event/dpaa2/dpaa2_eventdev_selftest.c
+index bbbd20951f..b549bdfcbb 100644
+--- a/dpdk/drivers/event/dpaa2/dpaa2_eventdev_selftest.c
++++ b/dpdk/drivers/event/dpaa2/dpaa2_eventdev_selftest.c
+@@ -118,7 +118,7 @@ _eventdev_setup(int mode)
+ 	struct rte_event_dev_info info;
+ 	const char *pool_name = "evdev_dpaa2_test_pool";
+ 
+-	/* Create and destrory pool for each test case to make it standalone */
++	/* Create and destroy pool for each test case to make it standalone */
+ 	eventdev_test_mempool = rte_pktmbuf_pool_create(pool_name,
+ 					MAX_EVENTS,
+ 					0 /*MBUF_CACHE_SIZE*/,
+diff --git a/dpdk/drivers/event/dsw/dsw_evdev.h b/dpdk/drivers/event/dsw/dsw_evdev.h
+index e64ae26f6e..c907c00c78 100644
+--- a/dpdk/drivers/event/dsw/dsw_evdev.h
++++ b/dpdk/drivers/event/dsw/dsw_evdev.h
+@@ -24,7 +24,7 @@
+ /* Multiple 24-bit flow ids will map to the same DSW-level flow. The
+  * number of DSW flows should be high enough make it unlikely that
+  * flow ids of several large flows hash to the same DSW-level flow.
+- * Such collisions will limit parallism and thus the number of cores
++ * Such collisions will limit parallelism and thus the number of cores
+  * that may be utilized. However, configuring a large number of DSW
+  * flows might potentially, depending on traffic and actual
+  * application flow id value range, result in each such DSW-level flow
+@@ -104,7 +104,7 @@
+ /* Only one outstanding migration per port is allowed */
+ #define DSW_MAX_PAUSED_FLOWS (DSW_MAX_PORTS*DSW_MAX_FLOWS_PER_MIGRATION)
+ 
+-/* Enough room for paus request/confirm and unpaus request/confirm for
++/* Enough room for pause request/confirm and unpaus request/confirm for
+  * all possible senders.
+  */
+ #define DSW_CTL_IN_RING_SIZE ((DSW_MAX_PORTS-1)*4)
+diff --git a/dpdk/drivers/event/dsw/dsw_event.c b/dpdk/drivers/event/dsw/dsw_event.c
+index c6ed470286..e209cd5b00 100644
+--- a/dpdk/drivers/event/dsw/dsw_event.c
++++ b/dpdk/drivers/event/dsw/dsw_event.c
+@@ -1096,7 +1096,7 @@ dsw_port_ctl_process(struct dsw_evdev *dsw, struct dsw_port *port)
+ static void
+ dsw_port_note_op(struct dsw_port *port, uint16_t num_events)
+ {
+-	/* To pull the control ring reasonbly often on busy ports,
++	/* To pull the control ring reasonably often on busy ports,
+ 	 * each dequeued/enqueued event is considered an 'op' too.
+ 	 */
+ 	port->ops_since_bg_task += (num_events+1);
+@@ -1180,7 +1180,7 @@ dsw_event_enqueue_burst_generic(struct dsw_port *source_port,
+ 	 * addition, a port cannot be left "unattended" (e.g. unused)
+ 	 * for long periods of time, since that would stall
+ 	 * migration. Eventdev API extensions to provide a cleaner way
+-	 * to archieve both of these functions should be
++	 * to archive both of these functions should be
+ 	 * considered.
+ 	 */
+ 	if (unlikely(events_len == 0)) {
+diff --git a/dpdk/drivers/event/octeontx/ssovf_evdev.h b/dpdk/drivers/event/octeontx/ssovf_evdev.h
+index bb1056a955..e46dc055eb 100644
+--- a/dpdk/drivers/event/octeontx/ssovf_evdev.h
++++ b/dpdk/drivers/event/octeontx/ssovf_evdev.h
+@@ -88,7 +88,7 @@
+ 
+ /*
+  * In Cavium OCTEON TX SoC, all accesses to the device registers are
+- * implictly strongly ordered. So, The relaxed version of IO operation is
++ * implicitly strongly ordered. So, The relaxed version of IO operation is
+  * safe to use with out any IO memory barriers.
+  */
+ #define ssovf_read64 rte_read64_relaxed
+diff --git a/dpdk/drivers/event/octeontx/ssovf_evdev_selftest.c b/dpdk/drivers/event/octeontx/ssovf_evdev_selftest.c
+index d7b0d22111..b55523632a 100644
+--- a/dpdk/drivers/event/octeontx/ssovf_evdev_selftest.c
++++ b/dpdk/drivers/event/octeontx/ssovf_evdev_selftest.c
+@@ -151,7 +151,7 @@ _eventdev_setup(int mode)
+ 	struct rte_event_dev_info info;
+ 	const char *pool_name = "evdev_octeontx_test_pool";
+ 
+-	/* Create and destrory pool for each test case to make it standalone */
++	/* Create and destroy pool for each test case to make it standalone */
+ 	eventdev_test_mempool = rte_pktmbuf_pool_create(pool_name,
+ 					MAX_EVENTS,
+ 					0 /*MBUF_CACHE_SIZE*/,
+diff --git a/dpdk/drivers/event/octeontx/ssovf_worker.h b/dpdk/drivers/event/octeontx/ssovf_worker.h
+index e6ee292688..57be476394 100644
+--- a/dpdk/drivers/event/octeontx/ssovf_worker.h
++++ b/dpdk/drivers/event/octeontx/ssovf_worker.h
+@@ -179,16 +179,22 @@ ssows_get_work(struct ssows *ws, struct rte_event *ev, const uint16_t flag)
+ 	ev->event = sched_type_queue | (get_work0 & 0xffffffff);
+ 
+ 	if (get_work1) {
+-		if (ev->event_type == RTE_EVENT_TYPE_ETHDEV)
+-			get_work1 = (uintptr_t)ssovf_octeontx_wqe_to_pkt(
+-				get_work1, (ev->event >> 20) & 0x7F, flag,
+-				ws->lookup_mem);
+-		else if (ev->event_type == RTE_EVENT_TYPE_CRYPTODEV)
++		if (ev->event_type == RTE_EVENT_TYPE_ETHDEV) {
++			uint16_t port = (ev->event >> 20) & 0x7F;
++
++			ev->sub_event_type = 0;
++			ev->mbuf = ssovf_octeontx_wqe_to_pkt(
++				get_work1, port, flag, ws->lookup_mem);
++		} else if (ev->event_type == RTE_EVENT_TYPE_CRYPTODEV) {
+ 			get_work1 = otx_crypto_adapter_dequeue(get_work1);
+-		ev->u64 = get_work1;
+-	} else if (unlikely((get_work0 & 0xFFFFFFFF) == 0xFFFFFFFF)) {
+-		ssovf_octeontx_wqe_free(get_work1);
+-		return 0;
++			ev->u64 = get_work1;
++		} else {
++			if (unlikely((get_work0 & 0xFFFFFFFF) == 0xFFFFFFFF)) {
++				ssovf_octeontx_wqe_free(get_work1);
++				return 0;
++			}
++			ev->u64 = get_work1;
++		}
+ 	}
+ 
+ 	return !!get_work1;
+diff --git a/dpdk/drivers/event/octeontx2/otx2_evdev_selftest.c b/dpdk/drivers/event/octeontx2/otx2_evdev_selftest.c
+index 48bfaf893d..a89637d60f 100644
+--- a/dpdk/drivers/event/octeontx2/otx2_evdev_selftest.c
++++ b/dpdk/drivers/event/octeontx2/otx2_evdev_selftest.c
+@@ -139,7 +139,7 @@ _eventdev_setup(int mode)
+ 	struct rte_event_dev_info info;
+ 	int i, ret;
+ 
+-	/* Create and destrory pool for each test case to make it standalone */
++	/* Create and destroy pool for each test case to make it standalone */
+ 	eventdev_test_mempool = rte_pktmbuf_pool_create(pool_name, MAX_EVENTS,
+ 							0, 0, 512,
+ 							rte_socket_id());
+diff --git a/dpdk/drivers/event/octeontx2/otx2_worker_dual.h b/dpdk/drivers/event/octeontx2/otx2_worker_dual.h
+index 36ae4dd88f..ca06d51c8a 100644
+--- a/dpdk/drivers/event/octeontx2/otx2_worker_dual.h
++++ b/dpdk/drivers/event/octeontx2/otx2_worker_dual.h
+@@ -74,7 +74,7 @@ otx2_ssogws_dual_get_work(struct otx2_ssogws_state *ws,
+ 					 event.flow_id, flags, lookup_mem);
+ 			/* Extracting tstamp, if PTP enabled. CGX will prepend
+ 			 * the timestamp at starting of packet data and it can
+-			 * be derieved from WQE 9 dword which corresponds to SG
++			 * be derived from WQE 9 dword which corresponds to SG
+ 			 * iova.
+ 			 * rte_pktmbuf_mtod_offset can be used for this purpose
+ 			 * but it brings down the performance as it reads
+diff --git a/dpdk/drivers/event/opdl/opdl_evdev.c b/dpdk/drivers/event/opdl/opdl_evdev.c
+index 15c10240b0..8b6890b220 100644
+--- a/dpdk/drivers/event/opdl/opdl_evdev.c
++++ b/dpdk/drivers/event/opdl/opdl_evdev.c
+@@ -703,7 +703,7 @@ opdl_probe(struct rte_vdev_device *vdev)
+ 	}
+ 
+ 	PMD_DRV_LOG(INFO, "DEV_ID:[%02d] : "
+-		      "Success - creating eventdev device %s, numa_node:[%d], do_valdation:[%s]"
++		      "Success - creating eventdev device %s, numa_node:[%d], do_validation:[%s]"
+ 			  " , self_test:[%s]\n",
+ 		      dev->data->dev_id,
+ 		      name,
+diff --git a/dpdk/drivers/event/opdl/opdl_test.c b/dpdk/drivers/event/opdl/opdl_test.c
+index e4fc70a440..24b92df476 100644
+--- a/dpdk/drivers/event/opdl/opdl_test.c
++++ b/dpdk/drivers/event/opdl/opdl_test.c
+@@ -864,7 +864,7 @@ qid_basic(struct test *t)
+ 	}
+ 
+ 
+-	/* Start the devicea */
++	/* Start the device */
+ 	if (!err) {
+ 		if (rte_event_dev_start(evdev) < 0) {
+ 			PMD_DRV_LOG(ERR, "%s:%d: Error with start call\n",
+diff --git a/dpdk/drivers/event/sw/sw_evdev.h b/dpdk/drivers/event/sw/sw_evdev.h
+index 33645bd1df..4fd1054470 100644
+--- a/dpdk/drivers/event/sw/sw_evdev.h
++++ b/dpdk/drivers/event/sw/sw_evdev.h
+@@ -180,7 +180,7 @@ struct sw_port {
+ 	uint16_t outstanding_releases __rte_cache_aligned;
+ 	uint16_t inflight_max; /* app requested max inflights for this port */
+ 	uint16_t inflight_credits; /* num credits this port has right now */
+-	uint8_t implicit_release; /* release events before dequeueing */
++	uint8_t implicit_release; /* release events before dequeuing */
+ 
+ 	uint16_t last_dequeue_burst_sz; /* how big the burst was */
+ 	uint64_t last_dequeue_ticks; /* used to track burst processing time */
+diff --git a/dpdk/drivers/event/sw/sw_evdev_selftest.c b/dpdk/drivers/event/sw/sw_evdev_selftest.c
+index 9768d3a0c7..cb97a4d615 100644
+--- a/dpdk/drivers/event/sw/sw_evdev_selftest.c
++++ b/dpdk/drivers/event/sw/sw_evdev_selftest.c
+@@ -1109,7 +1109,7 @@ xstats_tests(struct test *t)
+ 					NULL,
+ 					0);
+ 
+-	/* Verify that the resetable stats are reset, and others are not */
++	/* Verify that the resettable stats are reset, and others are not */
+ 	static const uint64_t queue_expected_zero[] = {
+ 		0 /* rx */,
+ 		0 /* tx */,
+diff --git a/dpdk/drivers/gpu/cuda/cuda.c b/dpdk/drivers/gpu/cuda/cuda.c
+index 882df08e56..fd577f7167 100644
+--- a/dpdk/drivers/gpu/cuda/cuda.c
++++ b/dpdk/drivers/gpu/cuda/cuda.c
+@@ -163,7 +163,7 @@ cuda_loader(void)
+ 	if (getenv("CUDA_PATH_L") == NULL)
+ 		snprintf(cuda_path, 1024, "%s", "libcuda.so");
+ 	else
+-		snprintf(cuda_path, 1024, "%s%s", getenv("CUDA_PATH_L"), "libcuda.so");
++		snprintf(cuda_path, 1024, "%s/%s", getenv("CUDA_PATH_L"), "libcuda.so");
+ 
+ 	cudalib = dlopen(cuda_path, RTLD_LAZY);
+ 	if (cudalib == NULL) {
+@@ -437,9 +437,11 @@ mem_list_del_item(cuda_ptr_key pk)
+ 		return -EINVAL;
+ 
+ 	/* if key is in head */
+-	if (mem_alloc_list_cur->prev == NULL)
++	if (mem_alloc_list_cur->prev == NULL) {
+ 		mem_alloc_list_head = mem_alloc_list_cur->next;
+-	else {
++		if (mem_alloc_list_head != NULL)
++			mem_alloc_list_head->prev = NULL;
++	} else {
+ 		mem_alloc_list_cur->prev->next = mem_alloc_list_cur->next;
+ 		if (mem_alloc_list_cur->next != NULL)
+ 			mem_alloc_list_cur->next->prev = mem_alloc_list_cur->prev;
+diff --git a/dpdk/drivers/mempool/cnxk/cn10k_mempool_ops.c b/dpdk/drivers/mempool/cnxk/cn10k_mempool_ops.c
+index 4c669b878f..6ebbf91de5 100644
+--- a/dpdk/drivers/mempool/cnxk/cn10k_mempool_ops.c
++++ b/dpdk/drivers/mempool/cnxk/cn10k_mempool_ops.c
+@@ -202,7 +202,7 @@ cn10k_mempool_deq(struct rte_mempool *mp, void **obj_table, unsigned int n)
+ 						    BATCH_ALLOC_SZ, 0, 1);
+ 		/* If issue fails, try falling back to default alloc */
+ 		if (unlikely(rc))
+-			return cn10k_mempool_enq(mp, obj_table, n);
++			return cnxk_mempool_deq(mp, obj_table, n);
+ 		mem->status = BATCH_ALLOC_OP_ISSUED;
+ 	}
+ 
+diff --git a/dpdk/drivers/mempool/dpaa/dpaa_mempool.c b/dpdk/drivers/mempool/dpaa/dpaa_mempool.c
+index f17aff9655..32639a3bfd 100644
+--- a/dpdk/drivers/mempool/dpaa/dpaa_mempool.c
++++ b/dpdk/drivers/mempool/dpaa/dpaa_mempool.c
+@@ -258,7 +258,7 @@ dpaa_mbuf_alloc_bulk(struct rte_mempool *pool,
+ 		}
+ 		/* assigning mbuf from the acquired objects */
+ 		for (i = 0; (i < ret) && bufs[i].addr; i++) {
+-			/* TODO-errata - objerved that bufs may be null
++			/* TODO-errata - observed that bufs may be null
+ 			 * i.e. first buffer is valid, remaining 6 buffers
+ 			 * may be null.
+ 			 */
+diff --git a/dpdk/drivers/mempool/octeontx/octeontx_fpavf.c b/dpdk/drivers/mempool/octeontx/octeontx_fpavf.c
+index 94dc5cd815..8fd9edced2 100644
+--- a/dpdk/drivers/mempool/octeontx/octeontx_fpavf.c
++++ b/dpdk/drivers/mempool/octeontx/octeontx_fpavf.c
+@@ -669,7 +669,7 @@ octeontx_fpa_bufpool_destroy(uintptr_t handle, int node_id)
+ 			break;
+ 		}
+ 
+-		/* Imsert it into an ordered linked list */
++		/* Insert it into an ordered linked list */
+ 		for (curr = &head; curr[0] != NULL; curr = curr[0]) {
+ 			if ((uintptr_t)node <= (uintptr_t)curr[0])
+ 				break;
+@@ -705,7 +705,7 @@ octeontx_fpa_bufpool_destroy(uintptr_t handle, int node_id)
+ 
+ 	ret = octeontx_fpapf_aura_detach(gpool);
+ 	if (ret) {
+-		fpavf_log_err("Failed to dettach gaura %u. error code=%d\n",
++		fpavf_log_err("Failed to detach gaura %u. error code=%d\n",
+ 			      gpool, ret);
+ 	}
+ 
+diff --git a/dpdk/drivers/net/af_xdp/compat.h b/dpdk/drivers/net/af_xdp/compat.h
+index 3880dc7dd7..28ea64aeaa 100644
+--- a/dpdk/drivers/net/af_xdp/compat.h
++++ b/dpdk/drivers/net/af_xdp/compat.h
+@@ -2,12 +2,17 @@
+  * Copyright(c) 2020 Intel Corporation.
+  */
+ 
++#ifdef RTE_NET_AF_XDP_LIBXDP
++#include <xdp/xsk.h>
++#else
+ #include <bpf/xsk.h>
++#endif
++#include <bpf/bpf.h>
+ #include <linux/version.h>
+ #include <poll.h>
+ 
+ #if KERNEL_VERSION(5, 10, 0) <= LINUX_VERSION_CODE && \
+-	defined(RTE_LIBRTE_AF_XDP_PMD_SHARED_UMEM)
++	defined(RTE_NET_AF_XDP_SHARED_UMEM)
+ #define ETH_AF_XDP_SHARED_UMEM 1
+ #endif
+ 
+@@ -54,3 +59,41 @@ tx_syscall_needed(struct xsk_ring_prod *q __rte_unused)
+ 	return 1;
+ }
+ #endif
++
++#ifdef RTE_NET_AF_XDP_LIBBPF_OBJ_OPEN
++static int load_program(const char *prog_path, struct bpf_object **obj)
++{
++	struct bpf_program *prog;
++	int err;
++
++	*obj = bpf_object__open_file(prog_path, NULL);
++	err = libbpf_get_error(*obj);
++	if (err)
++		return -1;
++
++	err = bpf_object__load(*obj);
++	if (err)
++		goto out;
++
++	prog = bpf_object__next_program(*obj, NULL);
++	if (!prog)
++		goto out;
++
++	return bpf_program__fd(prog);
++
++out:
++	bpf_object__close(*obj);
++	return -1;
++}
++#else
++static int load_program(const char *prog_path, struct bpf_object **obj)
++{
++	int ret, prog_fd;
++
++	ret = bpf_prog_load(prog_path, BPF_PROG_TYPE_XDP, obj, &prog_fd);
++	if (ret)
++		return -1;
++
++	return prog_fd;
++}
++#endif
+diff --git a/dpdk/drivers/net/af_xdp/meson.build b/dpdk/drivers/net/af_xdp/meson.build
+index 3ed2b29784..1e0de23705 100644
+--- a/dpdk/drivers/net/af_xdp/meson.build
++++ b/dpdk/drivers/net/af_xdp/meson.build
+@@ -9,19 +9,49 @@ endif
+ 
+ sources = files('rte_eth_af_xdp.c')
+ 
++xdp_dep = dependency('libxdp', version : '>=1.2.2', required: false, method: 'pkg-config')
+ bpf_dep = dependency('libbpf', required: false, method: 'pkg-config')
+ if not bpf_dep.found()
+     bpf_dep = cc.find_library('bpf', required: false)
+ endif
+ 
+-if bpf_dep.found() and cc.has_header('bpf/xsk.h') and cc.has_header('linux/if_xdp.h')
+-    ext_deps += bpf_dep
+-    bpf_ver_dep = dependency('libbpf', version : '>=0.2.0',
+-            required: false, method: 'pkg-config')
+-    if bpf_ver_dep.found()
+-        dpdk_conf.set('RTE_LIBRTE_AF_XDP_PMD_SHARED_UMEM', 1)
++if cc.has_header('linux/if_xdp.h')
++    if xdp_dep.found() and cc.has_header('xdp/xsk.h')
++        if bpf_dep.found() and cc.has_header('bpf/bpf.h')
++            cflags += ['-DRTE_NET_AF_XDP_LIBXDP']
++            cflags += ['-DRTE_NET_AF_XDP_SHARED_UMEM']
++            ext_deps += xdp_dep
++            ext_deps += bpf_dep
++            bpf_ver_dep = dependency('libbpf', version : '>=0.7.0',
++                                 required: false, method: 'pkg-config')
++            if bpf_ver_dep.found()
++                cflags += ['-DRTE_NET_AF_XDP_LIBBPF_OBJ_OPEN']
++            endif
++        else
++            build = false
++            reason = 'missing dependency, libbpf'
++        endif
++    elif bpf_dep.found() and cc.has_header('bpf/xsk.h') and cc.has_header('bpf/bpf.h')
++        # libxdp not found. Rely solely on libbpf for xsk functionality
++        # which is only available in versions <= v0.6.0.
++        bpf_ver_dep = dependency('libbpf', version : '<=0.6.0',
++                                 required: false, method: 'pkg-config')
++        if bpf_ver_dep.found()
++            ext_deps += bpf_dep
++            bpf_shumem_ver_dep = dependency('libbpf', version : '>=0.2.0',
++                            required: false, method: 'pkg-config')
++            if bpf_shumem_ver_dep.found()
++                cflags += ['-DRTE_NET_AF_XDP_SHARED_UMEM']
++            endif
++        else
++            build = false
++            reason = 'missing dependency, "libxdp" or "libbpf <= v0.6.0"'
++        endif
++    else
++        build = false
++        reason = 'missing dependency, "libxdp" and "libbpf"'
+     endif
+ else
+     build = false
+-    reason = 'missing dependency, "libbpf"'
++    reason = 'missing header, "linux/if_xdp.h"'
+ endif
+diff --git a/dpdk/drivers/net/af_xdp/rte_eth_af_xdp.c b/dpdk/drivers/net/af_xdp/rte_eth_af_xdp.c
+index 96c2c9d939..9db76d4562 100644
+--- a/dpdk/drivers/net/af_xdp/rte_eth_af_xdp.c
++++ b/dpdk/drivers/net/af_xdp/rte_eth_af_xdp.c
+@@ -15,8 +15,6 @@
+ #include <linux/ethtool.h>
+ #include <linux/sockios.h>
+ #include "af_xdp_deps.h"
+-#include <bpf/bpf.h>
+-#include <bpf/xsk.h>
+ 
+ #include <rte_ethdev.h>
+ #include <ethdev_driver.h>
+@@ -697,67 +695,6 @@ find_internal_resource(struct pmd_internals *port_int)
+ 	return list;
+ }
+ 
+-/* Check if the netdev,qid context already exists */
+-static inline bool
+-ctx_exists(struct pkt_rx_queue *rxq, const char *ifname,
+-		struct pkt_rx_queue *list_rxq, const char *list_ifname)
+-{
+-	bool exists = false;
+-
+-	if (rxq->xsk_queue_idx == list_rxq->xsk_queue_idx &&
+-			!strncmp(ifname, list_ifname, IFNAMSIZ)) {
+-		AF_XDP_LOG(ERR, "ctx %s,%i already exists, cannot share umem\n",
+-					ifname, rxq->xsk_queue_idx);
+-		exists = true;
+-	}
+-
+-	return exists;
+-}
+-
+-/* Get a pointer to an existing UMEM which overlays the rxq's mb_pool */
+-static inline int
+-get_shared_umem(struct pkt_rx_queue *rxq, const char *ifname,
+-			struct xsk_umem_info **umem)
+-{
+-	struct internal_list *list;
+-	struct pmd_internals *internals;
+-	int i = 0, ret = 0;
+-	struct rte_mempool *mb_pool = rxq->mb_pool;
+-
+-	if (mb_pool == NULL)
+-		return ret;
+-
+-	pthread_mutex_lock(&internal_list_lock);
+-
+-	TAILQ_FOREACH(list, &internal_list, next) {
+-		internals = list->eth_dev->data->dev_private;
+-		for (i = 0; i < internals->queue_cnt; i++) {
+-			struct pkt_rx_queue *list_rxq =
+-						&internals->rx_queues[i];
+-			if (rxq == list_rxq)
+-				continue;
+-			if (mb_pool == internals->rx_queues[i].mb_pool) {
+-				if (ctx_exists(rxq, ifname, list_rxq,
+-						internals->if_name)) {
+-					ret = -1;
+-					goto out;
+-				}
+-				if (__atomic_load_n(
+-					&internals->rx_queues[i].umem->refcnt,
+-							__ATOMIC_ACQUIRE)) {
+-					*umem = internals->rx_queues[i].umem;
+-					goto out;
+-				}
+-			}
+-		}
+-	}
+-
+-out:
+-	pthread_mutex_unlock(&internal_list_lock);
+-
+-	return ret;
+-}
+-
+ static int
+ eth_dev_configure(struct rte_eth_dev *dev)
+ {
+@@ -1013,6 +950,66 @@ static inline uintptr_t get_base_addr(struct rte_mempool *mp, uint64_t *align)
+ 	return aligned_addr;
+ }
+ 
++/* Check if the netdev,qid context already exists */
++static inline bool
++ctx_exists(struct pkt_rx_queue *rxq, const char *ifname,
++		struct pkt_rx_queue *list_rxq, const char *list_ifname)
++{
++	bool exists = false;
++
++	if (rxq->xsk_queue_idx == list_rxq->xsk_queue_idx &&
++			!strncmp(ifname, list_ifname, IFNAMSIZ)) {
++		AF_XDP_LOG(ERR, "ctx %s,%i already exists, cannot share umem\n",
++					ifname, rxq->xsk_queue_idx);
++		exists = true;
++	}
++
++	return exists;
++}
++
++/* Get a pointer to an existing UMEM which overlays the rxq's mb_pool */
++static inline int
++get_shared_umem(struct pkt_rx_queue *rxq, const char *ifname,
++			struct xsk_umem_info **umem)
++{
++	struct internal_list *list;
++	struct pmd_internals *internals;
++	int i = 0, ret = 0;
++	struct rte_mempool *mb_pool = rxq->mb_pool;
++
++	if (mb_pool == NULL)
++		return ret;
++
++	pthread_mutex_lock(&internal_list_lock);
++
++	TAILQ_FOREACH(list, &internal_list, next) {
++		internals = list->eth_dev->data->dev_private;
++		for (i = 0; i < internals->queue_cnt; i++) {
++			struct pkt_rx_queue *list_rxq =
++						&internals->rx_queues[i];
++			if (rxq == list_rxq)
++				continue;
++			if (mb_pool == internals->rx_queues[i].mb_pool) {
++				if (ctx_exists(rxq, ifname, list_rxq,
++						internals->if_name)) {
++					ret = -1;
++					goto out;
++				}
++				if (__atomic_load_n(&internals->rx_queues[i].umem->refcnt,
++						    __ATOMIC_ACQUIRE)) {
++					*umem = internals->rx_queues[i].umem;
++					goto out;
++				}
++			}
++		}
++	}
++
++out:
++	pthread_mutex_unlock(&internal_list_lock);
++
++	return ret;
++}
++
+ static struct
+ xsk_umem_info *xdp_umem_configure(struct pmd_internals *internals,
+ 				  struct pkt_rx_queue *rxq)
+@@ -1052,7 +1049,7 @@ xsk_umem_info *xdp_umem_configure(struct pmd_internals *internals,
+ 		umem = rte_zmalloc_socket("umem", sizeof(*umem), 0,
+ 					  rte_socket_id());
+ 		if (umem == NULL) {
+-			AF_XDP_LOG(ERR, "Failed to allocate umem info");
++			AF_XDP_LOG(ERR, "Failed to allocate umem info\n");
+ 			return NULL;
+ 		}
+ 
+@@ -1065,7 +1062,7 @@ xsk_umem_info *xdp_umem_configure(struct pmd_internals *internals,
+ 		ret = xsk_umem__create(&umem->umem, base_addr, umem_size,
+ 				&rxq->fq, &rxq->cq, &usr_config);
+ 		if (ret) {
+-			AF_XDP_LOG(ERR, "Failed to create umem");
++			AF_XDP_LOG(ERR, "Failed to create umem\n");
+ 			goto err;
+ 		}
+ 		umem->buffer = base_addr;
+@@ -1099,7 +1096,7 @@ xsk_umem_info *xdp_umem_configure(struct pmd_internals *internals,
+ 
+ 	umem = rte_zmalloc_socket("umem", sizeof(*umem), 0, rte_socket_id());
+ 	if (umem == NULL) {
+-		AF_XDP_LOG(ERR, "Failed to allocate umem info");
++		AF_XDP_LOG(ERR, "Failed to allocate umem info\n");
+ 		return NULL;
+ 	}
+ 
+@@ -1135,7 +1132,7 @@ xsk_umem_info *xdp_umem_configure(struct pmd_internals *internals,
+ 			       &usr_config);
+ 
+ 	if (ret) {
+-		AF_XDP_LOG(ERR, "Failed to create umem");
++		AF_XDP_LOG(ERR, "Failed to create umem\n");
+ 		goto err;
+ 	}
+ 	umem->mz = mz;
+@@ -1151,13 +1148,13 @@ xsk_umem_info *xdp_umem_configure(struct pmd_internals *internals,
+ static int
+ load_custom_xdp_prog(const char *prog_path, int if_index, struct bpf_map **map)
+ {
+-	int ret, prog_fd = -1;
++	int ret, prog_fd;
+ 	struct bpf_object *obj;
+ 
+-	ret = bpf_prog_load(prog_path, BPF_PROG_TYPE_XDP, &obj, &prog_fd);
+-	if (ret) {
++	prog_fd = load_program(prog_path, &obj);
++	if (prog_fd < 0) {
+ 		AF_XDP_LOG(ERR, "Failed to load program %s\n", prog_path);
+-		return ret;
++		return -1;
+ 	}
+ 
+ 	/*
+@@ -1269,18 +1266,19 @@ xsk_configure(struct pmd_internals *internals, struct pkt_rx_queue *rxq,
+ 	cfg.bind_flags |= XDP_USE_NEED_WAKEUP;
+ #endif
+ 
+-	if (strnlen(internals->prog_path, PATH_MAX) &&
+-				!internals->custom_prog_configured) {
+-		ret = load_custom_xdp_prog(internals->prog_path,
+-					   internals->if_index,
+-					   &internals->map);
+-		if (ret) {
+-			AF_XDP_LOG(ERR, "Failed to load custom XDP program %s\n",
+-					internals->prog_path);
+-			goto err;
++	if (strnlen(internals->prog_path, PATH_MAX)) {
++		if (!internals->custom_prog_configured) {
++			ret = load_custom_xdp_prog(internals->prog_path,
++							internals->if_index,
++							&internals->map);
++			if (ret) {
++				AF_XDP_LOG(ERR, "Failed to load custom XDP program %s\n",
++						internals->prog_path);
++				goto out_umem;
++			}
++			internals->custom_prog_configured = 1;
+ 		}
+-		internals->custom_prog_configured = 1;
+-		cfg.libbpf_flags = XSK_LIBBPF_FLAGS__INHIBIT_PROG_LOAD;
++		cfg.libbpf_flags |= XSK_LIBBPF_FLAGS__INHIBIT_PROG_LOAD;
+ 	}
+ 
+ 	if (internals->shared_umem)
+@@ -1294,7 +1292,7 @@ xsk_configure(struct pmd_internals *internals, struct pkt_rx_queue *rxq,
+ 
+ 	if (ret) {
+ 		AF_XDP_LOG(ERR, "Failed to create xsk socket.\n");
+-		goto err;
++		goto out_umem;
+ 	}
+ 
+ 	/* insert the xsk into the xsks_map */
+@@ -1306,7 +1304,7 @@ xsk_configure(struct pmd_internals *internals, struct pkt_rx_queue *rxq,
+ 					  &rxq->xsk_queue_idx, &fd, 0);
+ 		if (err) {
+ 			AF_XDP_LOG(ERR, "Failed to insert xsk in map.\n");
+-			goto err;
++			goto out_xsk;
+ 		}
+ 	}
+ 
+@@ -1314,7 +1312,7 @@ xsk_configure(struct pmd_internals *internals, struct pkt_rx_queue *rxq,
+ 	ret = rte_pktmbuf_alloc_bulk(rxq->umem->mb_pool, fq_bufs, reserve_size);
+ 	if (ret) {
+ 		AF_XDP_LOG(DEBUG, "Failed to get enough buffers for fq.\n");
+-		goto err;
++		goto out_xsk;
+ 	}
+ #endif
+ 
+@@ -1322,20 +1320,21 @@ xsk_configure(struct pmd_internals *internals, struct pkt_rx_queue *rxq,
+ 		ret = configure_preferred_busy_poll(rxq);
+ 		if (ret) {
+ 			AF_XDP_LOG(ERR, "Failed configure busy polling.\n");
+-			goto err;
++			goto out_xsk;
+ 		}
+ 	}
+ 
+ 	ret = reserve_fill_queue(rxq->umem, reserve_size, fq_bufs, &rxq->fq);
+ 	if (ret) {
+-		xsk_socket__delete(rxq->xsk);
+ 		AF_XDP_LOG(ERR, "Failed to reserve fill queue.\n");
+-		goto err;
++		goto out_xsk;
+ 	}
+ 
+ 	return 0;
+ 
+-err:
++out_xsk:
++	xsk_socket__delete(rxq->xsk);
++out_umem:
+ 	if (__atomic_sub_fetch(&rxq->umem->refcnt, 1, __ATOMIC_ACQUIRE) == 0)
+ 		xdp_umem_destroy(rxq->umem);
+ 
+diff --git a/dpdk/drivers/net/ark/ark_global.h b/dpdk/drivers/net/ark/ark_global.h
+index 6f9b3013d8..49193ac5b3 100644
+--- a/dpdk/drivers/net/ark/ark_global.h
++++ b/dpdk/drivers/net/ark/ark_global.h
+@@ -67,7 +67,7 @@
+ typedef void (*rx_user_meta_hook_fn)(struct rte_mbuf *mbuf,
+ 				     const uint32_t *meta,
+ 				     void *ext_user_data);
+-/* TX hook poplulate *meta, with up to 20 bytes.  meta_cnt
++/* TX hook populate *meta, with up to 20 bytes.  meta_cnt
+  * returns the number of uint32_t words populated, 0 to 5
+  */
+ typedef void (*tx_user_meta_hook_fn)(const struct rte_mbuf *mbuf,
+diff --git a/dpdk/drivers/net/atlantic/atl_ethdev.c b/dpdk/drivers/net/atlantic/atl_ethdev.c
+index 1c03e8bfa1..3a028f4290 100644
+--- a/dpdk/drivers/net/atlantic/atl_ethdev.c
++++ b/dpdk/drivers/net/atlantic/atl_ethdev.c
+@@ -1423,7 +1423,7 @@ atl_dev_interrupt_action(struct rte_eth_dev *dev,
+  * @param handle
+  *  Pointer to interrupt handle.
+  * @param param
+- *  The address of parameter (struct rte_eth_dev *) regsitered before.
++ *  The address of parameter (struct rte_eth_dev *) registered before.
+  *
+  * @return
+  *  void
+diff --git a/dpdk/drivers/net/atlantic/atl_rxtx.c b/dpdk/drivers/net/atlantic/atl_rxtx.c
+index e3f57ded73..aeb79bf5a2 100644
+--- a/dpdk/drivers/net/atlantic/atl_rxtx.c
++++ b/dpdk/drivers/net/atlantic/atl_rxtx.c
+@@ -1094,7 +1094,7 @@ atl_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+ 	 * register.
+ 	 * Update the RDT with the value of the last processed RX descriptor
+ 	 * minus 1, to guarantee that the RDT register is never equal to the
+-	 * RDH register, which creates a "full" ring situtation from the
++	 * RDH register, which creates a "full" ring situation from the
+ 	 * hardware point of view...
+ 	 */
+ 	nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);
+diff --git a/dpdk/drivers/net/atlantic/hw_atl/hw_atl_b0.c b/dpdk/drivers/net/atlantic/hw_atl/hw_atl_b0.c
+index 7d0e724019..d0eb4af928 100644
+--- a/dpdk/drivers/net/atlantic/hw_atl/hw_atl_b0.c
++++ b/dpdk/drivers/net/atlantic/hw_atl/hw_atl_b0.c
+@@ -281,7 +281,7 @@ int hw_atl_b0_hw_init_rx_path(struct aq_hw_s *self)
+ 	hw_atl_rpf_vlan_outer_etht_set(self, 0x88A8U);
+ 	hw_atl_rpf_vlan_inner_etht_set(self, 0x8100U);
+ 
+-	/* VLAN proimisc bu defauld */
++	/* VLAN promisc by default */
+ 	hw_atl_rpf_vlan_prom_mode_en_set(self, 1);
+ 
+ 	/* Rx Interrupts */
+diff --git a/dpdk/drivers/net/axgbe/axgbe_dev.c b/dpdk/drivers/net/axgbe/axgbe_dev.c
+index daeb3308f4..6a7fddffca 100644
+--- a/dpdk/drivers/net/axgbe/axgbe_dev.c
++++ b/dpdk/drivers/net/axgbe/axgbe_dev.c
+@@ -1046,7 +1046,7 @@ static int axgbe_config_rx_threshold(struct axgbe_port *pdata,
+ 	return 0;
+ }
+ 
+-/*Distrubting fifo size  */
++/* Distributing FIFO size */
+ static void axgbe_config_rx_fifo_size(struct axgbe_port *pdata)
+ {
+ 	unsigned int fifo_size;
+diff --git a/dpdk/drivers/net/axgbe/axgbe_ethdev.c b/dpdk/drivers/net/axgbe/axgbe_ethdev.c
+index 7d40c18a86..5add403235 100644
+--- a/dpdk/drivers/net/axgbe/axgbe_ethdev.c
++++ b/dpdk/drivers/net/axgbe/axgbe_ethdev.c
+@@ -10,6 +10,8 @@
+ #include "axgbe_regs.h"
+ #include "rte_time.h"
+ 
++#include "eal_filesystem.h"
++
+ static int eth_axgbe_dev_init(struct rte_eth_dev *eth_dev);
+ static int  axgbe_dev_configure(struct rte_eth_dev *dev);
+ static int  axgbe_dev_start(struct rte_eth_dev *dev);
+@@ -284,7 +286,7 @@ static int axgbe_phy_reset(struct axgbe_port *pdata)
+  * @param handle
+  *  Pointer to interrupt handle.
+  * @param param
+- *  The address of parameter (struct rte_eth_dev *) regsitered before.
++ *  The address of parameter (struct rte_eth_dev *) registered before.
+  *
+  * @return
+  *  void
+@@ -1009,18 +1011,18 @@ axgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *stats,
+ 	struct axgbe_port *pdata = dev->data->dev_private;
+ 	unsigned int i;
+ 
+-	if (!stats)
+-		return 0;
++	if (n < AXGBE_XSTATS_COUNT)
++		return AXGBE_XSTATS_COUNT;
+ 
+ 	axgbe_read_mmc_stats(pdata);
+ 
+-	for (i = 0; i < n && i < AXGBE_XSTATS_COUNT; i++) {
++	for (i = 0; i < AXGBE_XSTATS_COUNT; i++) {
+ 		stats[i].id = i;
+ 		stats[i].value = *(u64 *)((uint8_t *)&pdata->mmc_stats +
+ 				axgbe_xstats_strings[i].offset);
+ 	}
+ 
+-	return i;
++	return AXGBE_XSTATS_COUNT;
+ }
+ 
+ static int
+@@ -2117,28 +2119,27 @@ static void axgbe_default_config(struct axgbe_port *pdata)
+ 	pdata->power_down = 0;
+ }
+ 
+-static int
+-pci_device_cmp(const struct rte_device *dev, const void *_pci_id)
++/*
++ * Return PCI root complex device id on success else 0
++ */
++static uint16_t
++get_pci_rc_devid(void)
+ {
+-	const struct rte_pci_device *pdev = RTE_DEV_TO_PCI_CONST(dev);
+-	const struct rte_pci_id *pcid = _pci_id;
++	char pci_sysfs[PATH_MAX];
++	const struct rte_pci_addr pci_rc_addr = {0, 0, 0, 0};
++	unsigned long device_id;
+ 
+-	if (pdev->id.vendor_id == AMD_PCI_VENDOR_ID &&
+-			pdev->id.device_id == pcid->device_id)
+-		return 0;
+-	return 1;
+-}
++	snprintf(pci_sysfs, sizeof(pci_sysfs), "%s/" PCI_PRI_FMT "/device",
++		 rte_pci_get_sysfs_path(), pci_rc_addr.domain,
++		 pci_rc_addr.bus, pci_rc_addr.devid, pci_rc_addr.function);
+ 
+-static bool
+-pci_search_device(int device_id)
+-{
+-	struct rte_bus *pci_bus;
+-	struct rte_pci_id dev_id;
++	/* get device id */
++	if (eal_parse_sysfs_value(pci_sysfs, &device_id) < 0) {
++		PMD_INIT_LOG(ERR, "Error in reading PCI sysfs\n");
++		return 0;
++	}
+ 
+-	dev_id.device_id = device_id;
+-	pci_bus = rte_bus_find_by_name("pci");
+-	return (pci_bus != NULL) &&
+-		(pci_bus->find_device(NULL, pci_device_cmp, &dev_id) != NULL);
++	return (uint16_t)device_id;
+ }
+ 
+ /*
+@@ -2180,7 +2181,7 @@ eth_axgbe_dev_init(struct rte_eth_dev *eth_dev)
+ 	/*
+ 	 * Use root complex device ID to differentiate RV AXGBE vs SNOWY AXGBE
+ 	 */
+-	if (pci_search_device(AMD_PCI_RV_ROOT_COMPLEX_ID)) {
++	if ((get_pci_rc_devid()) == AMD_PCI_RV_ROOT_COMPLEX_ID) {
+ 		pdata->xpcs_window_def_reg = PCS_V2_RV_WINDOW_DEF;
+ 		pdata->xpcs_window_sel_reg = PCS_V2_RV_WINDOW_SELECT;
+ 	} else {
+diff --git a/dpdk/drivers/net/axgbe/axgbe_ethdev.h b/dpdk/drivers/net/axgbe/axgbe_ethdev.h
+index a207f2ae1b..e06d40f9eb 100644
+--- a/dpdk/drivers/net/axgbe/axgbe_ethdev.h
++++ b/dpdk/drivers/net/axgbe/axgbe_ethdev.h
+@@ -641,7 +641,7 @@ struct axgbe_port {
+ 
+ 	unsigned int kr_redrv;
+ 
+-	/* Auto-negotiation atate machine support */
++	/* Auto-negotiation state machine support */
+ 	unsigned int an_int;
+ 	unsigned int an_status;
+ 	enum axgbe_an an_result;
+diff --git a/dpdk/drivers/net/axgbe/axgbe_phy_impl.c b/dpdk/drivers/net/axgbe/axgbe_phy_impl.c
+index 02236ec192..72104f8a3f 100644
+--- a/dpdk/drivers/net/axgbe/axgbe_phy_impl.c
++++ b/dpdk/drivers/net/axgbe/axgbe_phy_impl.c
+@@ -347,7 +347,7 @@ static int axgbe_phy_i2c_read(struct axgbe_port *pdata, unsigned int target,
+ 
+ 	retry = 1;
+ again2:
+-	/* Read the specfied register */
++	/* Read the specified register */
+ 	i2c_op.cmd = AXGBE_I2C_CMD_READ;
+ 	i2c_op.target = target;
+ 	i2c_op.len = val_len;
+@@ -1093,7 +1093,7 @@ static int axgbe_phy_an_config(struct axgbe_port *pdata __rte_unused)
+ {
+ 	return 0;
+ 	/* Dummy API since there is no case to support
+-	 * external phy devices registred through kerenl apis
++	 * external phy devices registered through kernel APIs
+ 	 */
+ }
+ 
+diff --git a/dpdk/drivers/net/axgbe/axgbe_rxtx_vec_sse.c b/dpdk/drivers/net/axgbe/axgbe_rxtx_vec_sse.c
+index 816371cd79..d95a446bef 100644
+--- a/dpdk/drivers/net/axgbe/axgbe_rxtx_vec_sse.c
++++ b/dpdk/drivers/net/axgbe/axgbe_rxtx_vec_sse.c
+@@ -11,7 +11,7 @@
+ #include <rte_mempool.h>
+ #include <rte_mbuf.h>
+ 
+-/* Useful to avoid shifting for every descriptor prepration*/
++/* Useful to avoid shifting for every descriptor preparation */
+ #define TX_DESC_CTRL_FLAGS 0xb000000000000000
+ #define TX_DESC_CTRL_FLAG_TMST 0x40000000
+ #define TX_FREE_BULK	   8
+diff --git a/dpdk/drivers/net/bnx2x/bnx2x.c b/dpdk/drivers/net/bnx2x/bnx2x.c
+index f67db015b5..74e3018eab 100644
+--- a/dpdk/drivers/net/bnx2x/bnx2x.c
++++ b/dpdk/drivers/net/bnx2x/bnx2x.c
+@@ -926,7 +926,7 @@ storm_memset_eq_prod(struct bnx2x_softc *sc, uint16_t eq_prod, uint16_t pfid)
+  *   block.
+  *
+  * RAMROD_CMD_ID_ETH_UPDATE
+- *   Used to update the state of the leading connection, usually to udpate
++ *   Used to update the state of the leading connection, usually to update
+  *   the RSS indirection table.  Completes on the RCQ of the leading
+  *   connection. (Not currently used under FreeBSD until OS support becomes
+  *   available.)
+@@ -941,7 +941,7 @@ storm_memset_eq_prod(struct bnx2x_softc *sc, uint16_t eq_prod, uint16_t pfid)
+  *   the RCQ of the leading connection.
+  *
+  * RAMROD_CMD_ID_ETH_CFC_DEL
+- *   Used when tearing down a conneciton prior to driver unload.  Completes
++ *   Used when tearing down a connection prior to driver unload.  Completes
+  *   on the RCQ of the leading connection (since the current connection
+  *   has been completely removed from controller memory).
+  *
+@@ -1072,7 +1072,7 @@ bnx2x_sp_post(struct bnx2x_softc *sc, int command, int cid, uint32_t data_hi,
+ 
+ 	/*
+ 	 * It's ok if the actual decrement is issued towards the memory
+-	 * somewhere between the lock and unlock. Thus no more explict
++	 * somewhere between the lock and unlock. Thus no more explicit
+ 	 * memory barrier is needed.
+ 	 */
+ 	if (common) {
+@@ -1190,7 +1190,7 @@ bnx2x_sp_event(struct bnx2x_softc *sc, struct bnx2x_fastpath *fp,
+ 		break;
+ 
+ 	case (RAMROD_CMD_ID_ETH_TERMINATE):
+-		PMD_DRV_LOG(DEBUG, sc, "got MULTI[%d] teminate ramrod", cid);
++		PMD_DRV_LOG(DEBUG, sc, "got MULTI[%d] terminate ramrod", cid);
+ 		drv_cmd = ECORE_Q_CMD_TERMINATE;
+ 		break;
+ 
+@@ -1476,7 +1476,7 @@ bnx2x_fill_accept_flags(struct bnx2x_softc *sc, uint32_t rx_mode,
+ 	case BNX2X_RX_MODE_ALLMULTI_PROMISC:
+ 	case BNX2X_RX_MODE_PROMISC:
+ 		/*
+-		 * According to deffinition of SI mode, iface in promisc mode
++		 * According to definition of SI mode, iface in promisc mode
+ 		 * should receive matched and unmatched (in resolution of port)
+ 		 * unicast packets.
+ 		 */
+@@ -1944,7 +1944,7 @@ static void bnx2x_disable_close_the_gate(struct bnx2x_softc *sc)
+ 
+ /*
+  * Cleans the object that have internal lists without sending
+- * ramrods. Should be run when interrutps are disabled.
++ * ramrods. Should be run when interrupts are disabled.
+  */
+ static void bnx2x_squeeze_objects(struct bnx2x_softc *sc)
+ {
+@@ -2043,7 +2043,7 @@ bnx2x_nic_unload(struct bnx2x_softc *sc, uint32_t unload_mode, uint8_t keep_link
+ 
+ 	/*
+ 	 * Nothing to do during unload if previous bnx2x_nic_load()
+-	 * did not completed successfully - all resourses are released.
++	 * did not complete successfully - all resources are released.
+ 	 */
+ 	if ((sc->state == BNX2X_STATE_CLOSED) || (sc->state == BNX2X_STATE_ERROR)) {
+ 		return 0;
+@@ -2084,7 +2084,7 @@ bnx2x_nic_unload(struct bnx2x_softc *sc, uint32_t unload_mode, uint8_t keep_link
+ 		/*
+ 		 * Prevent transactions to host from the functions on the
+ 		 * engine that doesn't reset global blocks in case of global
+-		 * attention once gloabl blocks are reset and gates are opened
++		 * attention once global blocks are reset and gates are opened
+ 		 * (the engine which leader will perform the recovery
+ 		 * last).
+ 		 */
+@@ -2101,7 +2101,7 @@ bnx2x_nic_unload(struct bnx2x_softc *sc, uint32_t unload_mode, uint8_t keep_link
+ 
+ 	/*
+ 	 * At this stage no more interrupts will arrive so we may safely clean
+-	 * the queue'able objects here in case they failed to get cleaned so far.
++	 * the queueable objects here in case they failed to get cleaned so far.
+ 	 */
+ 	if (IS_PF(sc)) {
+ 		bnx2x_squeeze_objects(sc);
+@@ -2151,7 +2151,7 @@ bnx2x_nic_unload(struct bnx2x_softc *sc, uint32_t unload_mode, uint8_t keep_link
+ }
+ 
+ /*
+- * Encapsulte an mbuf cluster into the tx bd chain and makes the memory
++ * Encapsulate an mbuf cluster into the Tx BD chain and makes the memory
+  * visible to the controller.
+  *
+  * If an mbuf is submitted to this routine and cannot be given to the
+@@ -2719,7 +2719,7 @@ static uint8_t bnx2x_clear_pf_load(struct bnx2x_softc *sc)
+ 	return val1 != 0;
+ }
+ 
+-/* send load requrest to mcp and analyze response */
++/* send load request to MCP and analyze response */
+ static int bnx2x_nic_load_request(struct bnx2x_softc *sc, uint32_t * load_code)
+ {
+ 	PMD_INIT_FUNC_TRACE(sc);
+@@ -5325,7 +5325,7 @@ static void bnx2x_func_init(struct bnx2x_softc *sc, struct bnx2x_func_init_param
+  *   sum of vn_min_rates.
+  *     or
+  *   0 - if all the min_rates are 0.
+- * In the later case fainess algorithm should be deactivated.
++ * In the later case fairness algorithm should be deactivated.
+  * If all min rates are not zero then those that are zeroes will be set to 1.
+  */
+ static void bnx2x_calc_vn_min(struct bnx2x_softc *sc, struct cmng_init_input *input)
+@@ -6564,7 +6564,7 @@ bnx2x_pf_tx_q_prep(struct bnx2x_softc *sc, struct bnx2x_fastpath *fp,
+ 	txq_init->fw_sb_id = fp->fw_sb_id;
+ 
+ 	/*
+-	 * set the TSS leading client id for TX classfication to the
++	 * set the TSS leading client id for Tx classification to the
+ 	 * leading RSS client id
+ 	 */
+ 	txq_init->tss_leading_cl_id = BNX2X_FP(sc, 0, cl_id);
+@@ -7634,8 +7634,8 @@ static uint8_t bnx2x_is_pcie_pending(struct bnx2x_softc *sc)
+ }
+ 
+ /*
+-* Walk the PCI capabiites list for the device to find what features are
+-* supported. These capabilites may be enabled/disabled by firmware so it's
++* Walk the PCI capabilities list for the device to find what features are
++* supported. These capabilities may be enabled/disabled by firmware so it's
+ * best to walk the list rather than make assumptions.
+ */
+ static void bnx2x_probe_pci_caps(struct bnx2x_softc *sc)
+@@ -8425,7 +8425,7 @@ static int bnx2x_get_device_info(struct bnx2x_softc *sc)
+ 	} else {
+ 		sc->devinfo.int_block = INT_BLOCK_IGU;
+ 
+-/* do not allow device reset during IGU info preocessing */
++/* do not allow device reset during IGU info processing */
+ 		bnx2x_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RESET);
+ 
+ 		val = REG_RD(sc, IGU_REG_BLOCK_CONFIGURATION);
+@@ -9765,7 +9765,7 @@ int bnx2x_attach(struct bnx2x_softc *sc)
+ 
+ 	sc->igu_base_addr = IS_VF(sc) ? PXP_VF_ADDR_IGU_START : BAR_IGU_INTMEM;
+ 
+-	/* get PCI capabilites */
++	/* get PCI capabilities */
+ 	bnx2x_probe_pci_caps(sc);
+ 
+ 	if (sc->devinfo.pcie_msix_cap_reg != 0) {
+@@ -10284,7 +10284,7 @@ static int bnx2x_init_hw_common(struct bnx2x_softc *sc)
+  *          stay set)
+  *      f.  If this is VNIC 3 of a port then also init
+  *          first_timers_ilt_entry to zero and last_timers_ilt_entry
+- *          to the last enrty in the ILT.
++ *          to the last entry in the ILT.
+  *
+  *      Notes:
+  *      Currently the PF error in the PGLC is non recoverable.
+@@ -11090,7 +11090,7 @@ static void bnx2x_hw_enable_status(struct bnx2x_softc *sc)
+ /**
+  *	bnx2x_pf_flr_clnup
+  *	a. re-enable target read on the PF
+- *	b. poll cfc per function usgae counter
++ *	b. poll cfc per function usage counter
+  *	c. poll the qm perfunction usage counter
+  *	d. poll the tm per function usage counter
+  *	e. poll the tm per function scan-done indication
+diff --git a/dpdk/drivers/net/bnx2x/bnx2x.h b/dpdk/drivers/net/bnx2x/bnx2x.h
+index 80d19cbfd6..d7e1729e68 100644
+--- a/dpdk/drivers/net/bnx2x/bnx2x.h
++++ b/dpdk/drivers/net/bnx2x/bnx2x.h
+@@ -681,13 +681,13 @@ struct bnx2x_slowpath {
+ }; /* struct bnx2x_slowpath */
+ 
+ /*
+- * Port specifc data structure.
++ * Port specific data structure.
+  */
+ struct bnx2x_port {
+     /*
+      * Port Management Function (for 57711E only).
+      * When this field is set the driver instance is
+-     * responsible for managing port specifc
++     * responsible for managing port specific
+      * configurations such as handling link attentions.
+      */
+     uint32_t pmf;
+@@ -732,7 +732,7 @@ struct bnx2x_port {
+ 
+     /*
+      * MCP scratchpad address for port specific statistics.
+-     * The device is responsible for writing statistcss
++     * The device is responsible for writing statistics
+      * back to the MCP for use with management firmware such
+      * as UMP/NC-SI.
+      */
+@@ -937,8 +937,8 @@ struct bnx2x_devinfo {
+  * already registered for this port (which means that the user wants storage
+  * services).
+  * 2. During cnic-related load, to know if offload mode is already configured
+- * in the HW or needs to be configrued. Since the transition from nic-mode to
+- * offload-mode in HW causes traffic coruption, nic-mode is configured only
++ * in the HW or needs to be configured. Since the transition from nic-mode to
++ * offload-mode in HW causes traffic corruption, nic-mode is configured only
+  * in ports on which storage services where never requested.
+  */
+ #define CONFIGURE_NIC_MODE(sc) (!CHIP_IS_E1x(sc) && !CNIC_ENABLED(sc))
+diff --git a/dpdk/drivers/net/bnx2x/bnx2x_stats.c b/dpdk/drivers/net/bnx2x/bnx2x_stats.c
+index 1cd972591a..c07b01510a 100644
+--- a/dpdk/drivers/net/bnx2x/bnx2x_stats.c
++++ b/dpdk/drivers/net/bnx2x/bnx2x_stats.c
+@@ -1358,7 +1358,7 @@ bnx2x_prep_fw_stats_req(struct bnx2x_softc *sc)
+ 
+     /*
+      * Prepare the first stats ramrod (will be completed with
+-     * the counters equal to zero) - init counters to somethig different.
++     * the counters equal to zero) - init counters to something different.
+      */
+     memset(&sc->fw_stats_data->storm_counters, 0xff,
+ 	   sizeof(struct stats_counter));
+diff --git a/dpdk/drivers/net/bnx2x/bnx2x_stats.h b/dpdk/drivers/net/bnx2x/bnx2x_stats.h
+index 635412bdd3..11ddab5039 100644
+--- a/dpdk/drivers/net/bnx2x/bnx2x_stats.h
++++ b/dpdk/drivers/net/bnx2x/bnx2x_stats.h
+@@ -314,7 +314,7 @@ struct bnx2x_eth_stats_old {
+ };
+ 
+ struct bnx2x_eth_q_stats_old {
+-    /* Fields to perserve over fw reset*/
++    /* Fields to preserve over FW reset */
+     uint32_t total_unicast_bytes_received_hi;
+     uint32_t total_unicast_bytes_received_lo;
+     uint32_t total_broadcast_bytes_received_hi;
+@@ -328,7 +328,7 @@ struct bnx2x_eth_q_stats_old {
+     uint32_t total_multicast_bytes_transmitted_hi;
+     uint32_t total_multicast_bytes_transmitted_lo;
+ 
+-    /* Fields to perserve last of */
++    /* Fields to preserve last of */
+     uint32_t total_bytes_received_hi;
+     uint32_t total_bytes_received_lo;
+     uint32_t total_bytes_transmitted_hi;
+diff --git a/dpdk/drivers/net/bnx2x/bnx2x_vfpf.c b/dpdk/drivers/net/bnx2x/bnx2x_vfpf.c
+index 945e3df84f..63953c2979 100644
+--- a/dpdk/drivers/net/bnx2x/bnx2x_vfpf.c
++++ b/dpdk/drivers/net/bnx2x/bnx2x_vfpf.c
+@@ -73,7 +73,7 @@ bnx2x_add_tlv(__rte_unused struct bnx2x_softc *sc, void *tlvs_list,
+ 	tl->length = length;
+ }
+ 
+-/* Initiliaze header of the first tlv and clear mailbox*/
++/* Initialize header of the first TLV and clear mailbox */
+ static void
+ bnx2x_vf_prep(struct bnx2x_softc *sc, struct vf_first_tlv *first_tlv,
+ 	      uint16_t type, uint16_t length)
+diff --git a/dpdk/drivers/net/bnx2x/bnx2x_vfpf.h b/dpdk/drivers/net/bnx2x/bnx2x_vfpf.h
+index 9577341266..d71e81c005 100644
+--- a/dpdk/drivers/net/bnx2x/bnx2x_vfpf.h
++++ b/dpdk/drivers/net/bnx2x/bnx2x_vfpf.h
+@@ -241,7 +241,7 @@ struct vf_close_tlv {
+ 	uint8_t pad[2];
+ };
+ 
+-/* rlease the VF's acquired resources */
++/* release the VF's acquired resources */
+ struct vf_release_tlv {
+ 	struct vf_first_tlv   first_tlv;
+ 	uint16_t		vf_id;  /* for debug */
+diff --git a/dpdk/drivers/net/bnx2x/ecore_fw_defs.h b/dpdk/drivers/net/bnx2x/ecore_fw_defs.h
+index 93bca8ad33..6fc1fce7e2 100644
+--- a/dpdk/drivers/net/bnx2x/ecore_fw_defs.h
++++ b/dpdk/drivers/net/bnx2x/ecore_fw_defs.h
+@@ -379,7 +379,7 @@
+ /* temporarily used for RTT */
+ #define XSEMI_CLK1_RESUL_CHIP (1e-3)
+ 
+-/* used for Host Coallescing */
++/* used for Host Coalescing */
+ #define SDM_TIMER_TICK_RESUL_CHIP (4 * (1e-6))
+ #define TSDM_TIMER_TICK_RESUL_CHIP (1 * (1e-6))
+ 
+diff --git a/dpdk/drivers/net/bnx2x/ecore_hsi.h b/dpdk/drivers/net/bnx2x/ecore_hsi.h
+index 5508c53639..eda79408e9 100644
+--- a/dpdk/drivers/net/bnx2x/ecore_hsi.h
++++ b/dpdk/drivers/net/bnx2x/ecore_hsi.h
+@@ -1062,7 +1062,7 @@ struct port_feat_cfg {		    /* port 0: 0x454  port 1: 0x4c8 */
+ 		#define PORT_FEATURE_MBA_LINK_SPEED_20G              0x20000000
+ 
+ 	/* Secondary MBA configuration,
+-	 * see mba_config for the fileds defination.
++	 * see mba_config for the fields definition.
+ 	 */
+ 	uint32_t mba_config2;
+ 
+@@ -1075,7 +1075,7 @@ struct port_feat_cfg {		    /* port 0: 0x454  port 1: 0x4c8 */
+ 	#define PORT_FEATURE_BOFM_CFGD_VEN                  0x00080000
+ 
+ 	/* Secondary MBA configuration,
+-	 * see mba_vlan_cfg for the fileds defination.
++	 * see mba_vlan_cfg for the fields definition.
+ 	 */
+ 	uint32_t mba_vlan_cfg2;
+ 
+@@ -1429,7 +1429,7 @@ struct extended_dev_info_shared_cfg {             /* NVRAM OFFSET */
+ 	#define EXTENDED_DEV_INFO_SHARED_CFG_DBG_GEN3_COMPLI_ENA      0x00080000
+ 
+ 	/*  Override Rx signal detect threshold when enabled the threshold
+-	 * will be set staticaly
++	 * will be set statically
+ 	 */
+ 	#define EXTENDED_DEV_INFO_SHARED_CFG_OVERRIDE_RX_SIG_MASK     0x00100000
+ 	#define EXTENDED_DEV_INFO_SHARED_CFG_OVERRIDE_RX_SIG_SHIFT    20
+@@ -2189,9 +2189,9 @@ struct eee_remote_vals {
+  * elements on a per byte or word boundary.
+  *
+  * example: an array with 8 entries each 4 bit wide. This array will fit into
+- * a single dword. The diagrmas below show the array order of the nibbles.
++ * a single dword. The diagrams below show the array order of the nibbles.
+  *
+- * SHMEM_ARRAY_BITPOS(i, 4, 4) defines the stadard ordering:
++ * SHMEM_ARRAY_BITPOS(i, 4, 4) defines the standard ordering:
+  *
+  *                |                |                |               |
+  *   0    |   1   |   2    |   3   |   4    |   5   |   6   |   7   |
+@@ -2519,17 +2519,17 @@ struct shmem_lfa {
+ };
+ 
+ /*
+- * Used to suppoert NSCI get OS driver version
++ * Used to support NSCI get OS driver version
+  * On driver load the version value will be set
+  * On driver unload driver value of 0x0 will be set
+  */
+ struct os_drv_ver {
+ 	#define DRV_VER_NOT_LOADED                      0
+-	/*personalites orrder is importent */
++	/* personalities order is important */
+ 	#define DRV_PERS_ETHERNET                       0
+ 	#define DRV_PERS_ISCSI                          1
+ 	#define DRV_PERS_FCOE                           2
+-	/*shmem2 struct is constatnt can't add more personalites here*/
++	/* shmem2 struct is constant can't add more personalities here */
+ 	#define MAX_DRV_PERS                            3
+ 	uint32_t  versions[MAX_DRV_PERS];
+ };
+@@ -2821,7 +2821,7 @@ struct shmem2_region {
+ 	/* Flag to the driver that PF's drv_info_host_addr buffer was read */
+ 	uint32_t mfw_drv_indication;			/* Offset 0x19c */
+ 
+-	/* We use inidcation for each PF (0..3) */
++	/* We use indication for each PF (0..3) */
+ 	#define MFW_DRV_IND_READ_DONE_OFFSET(_pf_)  (1 << (_pf_))
+ 
+ 	union { /* For various OEMs */			/* Offset 0x1a0 */
+@@ -6195,7 +6195,7 @@ struct hc_sb_data {
+ 
+ 
+ /*
+- * Segment types for host coaslescing
++ * Segment types for host coalescing
+  */
+ enum hc_segment {
+ 	HC_REGULAR_SEGMENT,
+@@ -6242,7 +6242,7 @@ struct hc_status_block_data_e2 {
+ 
+ 
+ /*
+- * IGU block operartion modes (in Everest2)
++ * IGU block operation modes (in Everest2)
+  */
+ enum igu_mode {
+ 	HC_IGU_BC_MODE,
+@@ -6508,7 +6508,7 @@ struct stats_query_header {
+ 
+ 
+ /*
+- * Types of statistcis query entry
++ * Types of statistics query entry
+  */
+ enum stats_query_type {
+ 	STATS_TYPE_QUEUE,
+@@ -6542,7 +6542,7 @@ enum storm_id {
+ 
+ 
+ /*
+- * Taffic types used in ETS and flow control algorithms
++ * Traffic types used in ETS and flow control algorithms
+  */
+ enum traffic_type {
+ 	LLFC_TRAFFIC_TYPE_NW,
+diff --git a/dpdk/drivers/net/bnx2x/ecore_init_ops.h b/dpdk/drivers/net/bnx2x/ecore_init_ops.h
+index 0945e79993..4ed811fdd4 100644
+--- a/dpdk/drivers/net/bnx2x/ecore_init_ops.h
++++ b/dpdk/drivers/net/bnx2x/ecore_init_ops.h
+@@ -534,7 +534,7 @@ static void ecore_init_pxp_arb(struct bnx2x_softc *sc, int r_order,
+ 		REG_WR(sc, PXP2_REG_WR_CDU_MPS, val);
+ 	}
+ 
+-	/* Validate number of tags suppoted by device */
++	/* Validate number of tags supported by device */
+ #define PCIE_REG_PCIER_TL_HDR_FC_ST		0x2980
+ 	val = REG_RD(sc, PCIE_REG_PCIER_TL_HDR_FC_ST);
+ 	val &= 0xFF;
+@@ -714,7 +714,7 @@ static void ecore_ilt_client_init_op_ilt(struct bnx2x_softc *sc,
+ 	for (i = ilt_cli->start; i <= ilt_cli->end; i++)
+ 		ecore_ilt_line_init_op(sc, ilt, i, initop);
+ 
+-	/* init/clear the ILT boundries */
++	/* init/clear the ILT boundaries */
+ 	ecore_ilt_boundary_init_op(sc, ilt_cli, ilt->start_line, initop);
+ }
+ 
+@@ -765,7 +765,7 @@ static void ecore_ilt_init_client_psz(struct bnx2x_softc *sc, int cli_num,
+ 
+ /*
+  * called during init common stage, ilt clients should be initialized
+- * prioir to calling this function
++ * prior to calling this function
+  */
+ static void ecore_ilt_init_page_size(struct bnx2x_softc *sc, uint8_t initop)
+ {
+diff --git a/dpdk/drivers/net/bnx2x/ecore_reg.h b/dpdk/drivers/net/bnx2x/ecore_reg.h
+index bb92d131f8..6f7b0522f2 100644
+--- a/dpdk/drivers/net/bnx2x/ecore_reg.h
++++ b/dpdk/drivers/net/bnx2x/ecore_reg.h
+@@ -19,7 +19,7 @@
+ #define ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT		 (0x1 << 3)
+ #define ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR			 (0x1 << 4)
+ #define ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND		 (0x1 << 1)
+-/* [R 1] ATC initalization done */
++/* [R 1] ATC initialization done */
+ #define ATC_REG_ATC_INIT_DONE					 0x1100bc
+ /* [RW 6] Interrupt mask register #0 read/write */
+ #define ATC_REG_ATC_INT_MASK					 0x1101c8
+@@ -56,7 +56,7 @@
+ #define BRB1_REG_PAUSE_HIGH_THRESHOLD_0				 0x60078
+ /* [RW 10] Write client 0: Assert pause threshold. Not Functional */
+ #define BRB1_REG_PAUSE_LOW_THRESHOLD_0				 0x60068
+-/* [R 24] The number of full blocks occpied by port. */
++/* [R 24] The number of full blocks occupied by port. */
+ #define BRB1_REG_PORT_NUM_OCC_BLOCKS_0				 0x60094
+ /* [R 5] Used to read the value of the XX protection CAM occupancy counter. */
+ #define CCM_REG_CAM_OCCUP					 0xd0188
+@@ -456,7 +456,7 @@
+ #define IGU_REG_PCI_PF_MSIX_FUNC_MASK				 0x130148
+ #define IGU_REG_PCI_PF_MSI_EN					 0x130140
+ /* [WB_R 32] Each bit represent the pending bits status for that SB. 0 = no
+- * pending; 1 = pending. Pendings means interrupt was asserted; and write
++ * pending; 1 = pending. Pending means interrupt was asserted; and write
+  * done was not received. Data valid only in addresses 0-4. all the rest are
+  * zero.
+  */
+@@ -1059,14 +1059,14 @@
+ /* [R 28] this field hold the last information that caused reserved
+  * attention. bits [19:0] - address; [22:20] function; [23] reserved;
+  * [27:24] the master that caused the attention - according to the following
+- * encodeing:1 = pxp; 2 = mcp; 3 = usdm; 4 = tsdm; 5 = xsdm; 6 = csdm; 7 =
++ * encoding:1 = pxp; 2 = mcp; 3 = usdm; 4 = tsdm; 5 = xsdm; 6 = csdm; 7 =
+  * dbu; 8 = dmae
+  */
+ #define MISC_REG_GRC_RSV_ATTN					 0xa3c0
+ /* [R 28] this field hold the last information that caused timeout
+  * attention. bits [19:0] - address; [22:20] function; [23] reserved;
+  * [27:24] the master that caused the attention - according to the following
+- * encodeing:1 = pxp; 2 = mcp; 3 = usdm; 4 = tsdm; 5 = xsdm; 6 = csdm; 7 =
++ * encoding:1 = pxp; 2 = mcp; 3 = usdm; 4 = tsdm; 5 = xsdm; 6 = csdm; 7 =
+  * dbu; 8 = dmae
+  */
+ #define MISC_REG_GRC_TIMEOUT_ATTN				 0xa3c4
+@@ -1567,7 +1567,7 @@
+  * MAC DA 2. The reset default is set to mask out all parameters.
+  */
+ #define NIG_REG_P0_LLH_PTP_PARAM_MASK				 0x187a0
+-/* [RW 14] Mask regiser for the rules used in detecting PTP packets. Set
++/* [RW 14] Mask register for the rules used in detecting PTP packets. Set
+  * each bit to 1 to mask out that particular rule. 0-{IPv4 DA 0; UDP DP 0} .
+  * 1-{IPv4 DA 0; UDP DP 1} . 2-{IPv4 DA 1; UDP DP 0} . 3-{IPv4 DA 1; UDP DP
+  * 1} . 4-{IPv6 DA 0; UDP DP 0} . 5-{IPv6 DA 0; UDP DP 1} . 6-{IPv6 DA 1;
+@@ -1672,7 +1672,7 @@
+  * MAC DA 2. The reset default is set to mask out all parameters.
+  */
+ #define NIG_REG_P0_TLLH_PTP_PARAM_MASK				 0x187f0
+-/* [RW 14] Mask regiser for the rules used in detecting PTP packets. Set
++/* [RW 14] Mask register for the rules used in detecting PTP packets. Set
+  * each bit to 1 to mask out that particular rule. 0-{IPv4 DA 0; UDP DP 0} .
+  * 1-{IPv4 DA 0; UDP DP 1} . 2-{IPv4 DA 1; UDP DP 0} . 3-{IPv4 DA 1; UDP DP
+  * 1} . 4-{IPv6 DA 0; UDP DP 0} . 5-{IPv6 DA 0; UDP DP 1} . 6-{IPv6 DA 1;
+@@ -1839,7 +1839,7 @@
+  * MAC DA 2. The reset default is set to mask out all parameters.
+  */
+ #define NIG_REG_P1_LLH_PTP_PARAM_MASK				 0x187c8
+-/* [RW 14] Mask regiser for the rules used in detecting PTP packets. Set
++/* [RW 14] Mask register for the rules used in detecting PTP packets. Set
+  * each bit to 1 to mask out that particular rule. 0-{IPv4 DA 0; UDP DP 0} .
+  * 1-{IPv4 DA 0; UDP DP 1} . 2-{IPv4 DA 1; UDP DP 0} . 3-{IPv4 DA 1; UDP DP
+  * 1} . 4-{IPv6 DA 0; UDP DP 0} . 5-{IPv6 DA 0; UDP DP 1} . 6-{IPv6 DA 1;
+@@ -1926,7 +1926,7 @@
+  * MAC DA 2. The reset default is set to mask out all parameters.
+  */
+ #define NIG_REG_P1_TLLH_PTP_PARAM_MASK				 0x187f8
+-/* [RW 14] Mask regiser for the rules used in detecting PTP packets. Set
++/* [RW 14] Mask register for the rules used in detecting PTP packets. Set
+  * each bit to 1 to mask out that particular rule. 0-{IPv4 DA 0; UDP DP 0} .
+  * 1-{IPv4 DA 0; UDP DP 1} . 2-{IPv4 DA 1; UDP DP 0} . 3-{IPv4 DA 1; UDP DP
+  * 1} . 4-{IPv6 DA 0; UDP DP 0} . 5-{IPv6 DA 0; UDP DP 1} . 6-{IPv6 DA 1;
+@@ -2306,7 +2306,7 @@
+ #define PBF_REG_HDRS_AFTER_BASIC				 0x15c0a8
+ /* [RW 6] Bit-map indicating which L2 hdrs may appear after L2 tag 0 */
+ #define PBF_REG_HDRS_AFTER_TAG_0				 0x15c0b8
+-/* [R 1] Removed for E3 B0 - Indicates which COS is conncted to the highest
++/* [R 1] Removed for E3 B0 - Indicates which COS is connected to the highest
+  * priority in the command arbiter.
+  */
+ #define PBF_REG_HIGH_PRIORITY_COS_NUM				 0x15c04c
+@@ -2366,7 +2366,7 @@
+  */
+ #define PBF_REG_NUM_STRICT_ARB_SLOTS				 0x15c064
+ /* [R 11] Removed for E3 B0 - Port 0 threshold used by arbiter in 16 byte
+- * lines used when pause not suppoterd.
++ * lines used when pause not supported.
+  */
+ #define PBF_REG_P0_ARB_THRSH					 0x1400e4
+ /* [R 11] Removed for E3 B0 - Current credit for port 0 in the tx port
+@@ -3503,7 +3503,7 @@
+  * queues.
+  */
+ #define QM_REG_OVFERROR						 0x16805c
+-/* [RC 6] the Q were the qverflow occurs */
++/* [RC 6] the Q were the overflow occurs */
+ #define QM_REG_OVFQNUM						 0x168058
+ /* [R 16] Pause state for physical queues 15-0 */
+ #define QM_REG_PAUSESTATE0					 0x168410
+@@ -4890,7 +4890,7 @@
+ 	if set, generate pcie_err_attn output when this error is seen. WC \
+ 	*/
+ #define PXPCS_TL_FUNC345_STAT_ERR_MASTER_ABRT2 \
+-	(1 << 3) /* Receive UR Statusfor Function 2. If set, generate \
++	(1 << 3) /* Receive UR Status for Function 2. If set, generate \
+ 	pcie_err_attn output when this error is seen. WC */
+ #define PXPCS_TL_FUNC345_STAT_ERR_CPL_TIMEOUT2 \
+ 	(1 << 2) /* Completer Timeout Status Status for Function 2, if \
+@@ -4986,7 +4986,7 @@
+ 	if set, generate pcie_err_attn output when this error is seen. WC \
+ 	*/
+ #define PXPCS_TL_FUNC678_STAT_ERR_MASTER_ABRT5 \
+-	(1 << 3) /* Receive UR Statusfor Function 5. If set, generate \
++	(1 << 3) /* Receive UR Status for Function 5. If set, generate \
+ 	pcie_err_attn output when this error is seen. WC */
+ #define PXPCS_TL_FUNC678_STAT_ERR_CPL_TIMEOUT5 \
+ 	(1 << 2) /* Completer Timeout Status Status for Function 5, if \
+diff --git a/dpdk/drivers/net/bnx2x/ecore_sp.c b/dpdk/drivers/net/bnx2x/ecore_sp.c
+index 0075422eee..c6c3857778 100644
+--- a/dpdk/drivers/net/bnx2x/ecore_sp.c
++++ b/dpdk/drivers/net/bnx2x/ecore_sp.c
+@@ -1338,7 +1338,7 @@ static int __ecore_vlan_mac_execute_step(struct bnx2x_softc *sc,
+ 	if (rc != ECORE_SUCCESS) {
+ 		__ecore_vlan_mac_h_pend(sc, o, *ramrod_flags);
+ 
+-		/** Calling function should not diffrentiate between this case
++		/** Calling function should not differentiate between this case
+ 		 *  and the case in which there is already a pending ramrod
+ 		 */
+ 		rc = ECORE_PENDING;
+@@ -2246,7 +2246,7 @@ struct ecore_pending_mcast_cmd {
+ 	union {
+ 		ecore_list_t macs_head;
+ 		uint32_t macs_num;	/* Needed for DEL command */
+-		int next_bin;	/* Needed for RESTORE flow with aprox match */
++		int next_bin;	/* Needed for RESTORE flow with approx match */
+ 	} data;
+ 
+ 	int done;		/* set to TRUE, when the command has been handled,
+@@ -3424,7 +3424,7 @@ void ecore_init_mac_credit_pool(struct bnx2x_softc *sc,
+ 	} else {
+ 
+ 		/*
+-		 * CAM credit is equaly divided between all active functions
++		 * CAM credit is equally divided between all active functions
+ 		 * on the PATH.
+ 		 */
+ 		if (func_num > 0) {
+diff --git a/dpdk/drivers/net/bnx2x/ecore_sp.h b/dpdk/drivers/net/bnx2x/ecore_sp.h
+index d58072dac0..1f4d5a3ebe 100644
+--- a/dpdk/drivers/net/bnx2x/ecore_sp.h
++++ b/dpdk/drivers/net/bnx2x/ecore_sp.h
+@@ -430,7 +430,7 @@ enum {
+ 	RAMROD_RESTORE,
+ 	 /* Execute the next command now */
+ 	RAMROD_EXEC,
+-	/* Don't add a new command and continue execution of posponed
++	/* Don't add a new command and continue execution of postponed
+ 	 * commands. If not set a new command will be added to the
+ 	 * pending commands list.
+ 	 */
+@@ -1173,7 +1173,7 @@ struct ecore_rss_config_obj {
+ 	/* Last configured indirection table */
+ 	uint8_t			ind_table[T_ETH_INDIRECTION_TABLE_SIZE];
+ 
+-	/* flags for enabling 4-tupple hash on UDP */
++	/* flags for enabling 4-tuple hash on UDP */
+ 	uint8_t			udp_rss_v4;
+ 	uint8_t			udp_rss_v6;
+ 
+@@ -1285,7 +1285,7 @@ enum ecore_q_type {
+ #define ECORE_MULTI_TX_COS_E3B0			3
+ #define ECORE_MULTI_TX_COS			3 /* Maximum possible */
+ #define MAC_PAD (ECORE_ALIGN(ETH_ALEN, sizeof(uint32_t)) - ETH_ALEN)
+-/* DMAE channel to be used by FW for timesync workaroun. A driver that sends
++/* DMAE channel to be used by FW for timesync workaround. A driver that sends
+  * timesync-related ramrods must not use this DMAE command ID.
+  */
+ #define FW_DMAE_CMD_ID 6
+diff --git a/dpdk/drivers/net/bnx2x/elink.c b/dpdk/drivers/net/bnx2x/elink.c
+index 2093d8f373..43fbf04ece 100644
+--- a/dpdk/drivers/net/bnx2x/elink.c
++++ b/dpdk/drivers/net/bnx2x/elink.c
+@@ -1460,7 +1460,7 @@ static void elink_ets_e3b0_pbf_disabled(const struct elink_params *params)
+ }
+ /******************************************************************************
+  * Description:
+- *	E3B0 disable will return basicly the values to init values.
++ *	E3B0 disable will return basically the values to init values.
+  *.
+  ******************************************************************************/
+ static elink_status_t elink_ets_e3b0_disabled(const struct elink_params *params,
+@@ -1483,7 +1483,7 @@ static elink_status_t elink_ets_e3b0_disabled(const struct elink_params *params,
+ 
+ /******************************************************************************
+  * Description:
+- *	Disable will return basicly the values to init values.
++ *	Disable will return basically the values to init values.
+  *
+  ******************************************************************************/
+ elink_status_t elink_ets_disabled(struct elink_params *params,
+@@ -1506,7 +1506,7 @@ elink_status_t elink_ets_disabled(struct elink_params *params,
+ 
+ /******************************************************************************
+  * Description
+- *	Set the COS mappimg to SP and BW until this point all the COS are not
++ *	Set the COS mapping to SP and BW until this point all the COS are not
+  *	set as SP or BW.
+  ******************************************************************************/
+ static elink_status_t elink_ets_e3b0_cli_map(const struct elink_params *params,
+@@ -1652,7 +1652,7 @@ static elink_status_t elink_ets_e3b0_get_total_bw(
+ 		}
+ 		ELINK_DEBUG_P0(sc,
+ 		   "elink_ets_E3B0_config total BW should be 100");
+-		/* We can handle a case whre the BW isn't 100 this can happen
++		/* We can handle a case where the BW isn't 100 this can happen
+ 		 * if the TC are joined.
+ 		 */
+ 	}
+@@ -2608,7 +2608,7 @@ static elink_status_t elink_emac_enable(struct elink_params *params,
+ 	REG_WR(sc, NIG_REG_EGRESS_EMAC0_PORT + port * 4, 1);
+ 
+ #ifdef ELINK_INCLUDE_EMUL
+-	/* for paladium */
++	/* for palladium */
+ 	if (CHIP_REV_IS_EMUL(sc)) {
+ 		/* Use lane 1 (of lanes 0-3) */
+ 		REG_WR(sc, NIG_REG_XGXS_LANE_SEL_P0 + port * 4, 1);
+@@ -2850,7 +2850,7 @@ static void elink_update_pfc_bmac2(struct elink_params *params,
+ 
+ 	/* Set Time (based unit is 512 bit time) between automatic
+ 	 * re-sending of PP packets amd enable automatic re-send of
+-	 * Per-Priroity Packet as long as pp_gen is asserted and
++	 * Per-Priority Packet as long as pp_gen is asserted and
+ 	 * pp_disable is low.
+ 	 */
+ 	val = 0x8000;
+@@ -3369,7 +3369,7 @@ static elink_status_t elink_pbf_update(struct elink_params *params,
+ }
+ 
+ /**
+- * elink_get_emac_base - retrive emac base address
++ * elink_get_emac_base - retrieve emac base address
+  *
+  * @bp:			driver handle
+  * @mdc_mdio_access:	access type
+@@ -4518,7 +4518,7 @@ static void elink_warpcore_enable_AN_KR2(struct elink_phy *phy,
+ 		elink_cl45_write(sc, phy, reg_set[i].devad, reg_set[i].reg,
+ 				 reg_set[i].val);
+ 
+-	/* Start KR2 work-around timer which handles BNX2X8073 link-parner */
++	/* Start KR2 work-around timer which handles BNX2X8073 link-partner */
+ 	params->link_attr_sync |= LINK_ATTR_SYNC_KR2_ENABLE;
+ 	elink_update_link_attr(params, params->link_attr_sync);
+ }
+@@ -7824,7 +7824,7 @@ elink_status_t elink_link_update(struct elink_params *params,
+ 			 * hence its link is expected to be down
+ 			 * - SECOND_PHY means that first phy should not be able
+ 			 * to link up by itself (using configuration)
+-			 * - DEFAULT should be overridden during initialiazation
++			 * - DEFAULT should be overridden during initialization
+ 			 */
+ 				ELINK_DEBUG_P1(sc, "Invalid link indication"
+ 					       " mpc=0x%x. DISABLING LINK !!!",
+@@ -10991,7 +10991,7 @@ static elink_status_t elink_84858_cmd_hdlr(struct elink_phy *phy,
+ 		ELINK_DEBUG_P0(sc, "FW cmd failed.");
+ 		return ELINK_STATUS_ERROR;
+ 	}
+-	/* Step5: Once the command has completed, read the specficied DATA
++	/* Step5: Once the command has completed, read the specified DATA
+ 	 * registers for any saved results for the command, if applicable
+ 	 */
+ 
+diff --git a/dpdk/drivers/net/bnxt/bnxt.h b/dpdk/drivers/net/bnxt/bnxt.h
+index 234161053f..76783eb3a1 100644
+--- a/dpdk/drivers/net/bnxt/bnxt.h
++++ b/dpdk/drivers/net/bnxt/bnxt.h
+@@ -72,8 +72,7 @@
+ #define BROADCOM_DEV_ID_58818_VF	0xd82e
+ 
+ #define BROADCOM_DEV_957508_N2100	0x5208
+-#define IS_BNXT_DEV_957508_N2100(bp)	\
+-	((bp)->pdev->id.subsystem_device_id == BROADCOM_DEV_957508_N2100)
++#define BROADCOM_DEV_957414_N225	0x4145
+ 
+ #define BNXT_MAX_MTU		9574
+ #define BNXT_NUM_VLANS		2
+@@ -297,7 +296,7 @@ struct bnxt_link_info {
+ 	uint8_t			link_signal_mode;
+ 	uint16_t		force_pam4_link_speed;
+ 	uint16_t		support_pam4_speeds;
+-	uint16_t		auto_pam4_link_speeds;
++	uint16_t		auto_pam4_link_speed_mask;
+ 	uint16_t		support_pam4_auto_speeds;
+ 	uint8_t			req_signal_mode;
+ 	uint8_t			module_status;
+@@ -580,30 +579,6 @@ struct bnxt_rep_info {
+ 	RTE_ETH_RSS_NONFRAG_IPV6_UDP |	\
+ 	RTE_ETH_RSS_LEVEL_MASK)
+ 
+-#define BNXT_DEV_TX_OFFLOAD_SUPPORT (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | \
+-				     RTE_ETH_TX_OFFLOAD_TCP_CKSUM | \
+-				     RTE_ETH_TX_OFFLOAD_UDP_CKSUM | \
+-				     RTE_ETH_TX_OFFLOAD_TCP_TSO | \
+-				     RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM | \
+-				     RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO | \
+-				     RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO | \
+-				     RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO | \
+-				     RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO | \
+-				     RTE_ETH_TX_OFFLOAD_QINQ_INSERT | \
+-				     RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
+-
+-#define BNXT_DEV_RX_OFFLOAD_SUPPORT (RTE_ETH_RX_OFFLOAD_VLAN_FILTER | \
+-				     RTE_ETH_RX_OFFLOAD_IPV4_CKSUM | \
+-				     RTE_ETH_RX_OFFLOAD_UDP_CKSUM | \
+-				     RTE_ETH_RX_OFFLOAD_TCP_CKSUM | \
+-				     RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM | \
+-				     RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM | \
+-				     RTE_ETH_RX_OFFLOAD_KEEP_CRC | \
+-				     RTE_ETH_RX_OFFLOAD_VLAN_EXTEND | \
+-				     RTE_ETH_RX_OFFLOAD_TCP_LRO | \
+-				     RTE_ETH_RX_OFFLOAD_SCATTER | \
+-				     RTE_ETH_RX_OFFLOAD_RSS_HASH)
+-
+ #define BNXT_HWRM_SHORT_REQ_LEN		sizeof(struct hwrm_short_input)
+ 
+ struct bnxt_flow_stat_info {
+@@ -672,7 +647,6 @@ struct bnxt {
+ #define BNXT_FLAG_PORT_STATS		BIT(2)
+ #define BNXT_FLAG_JUMBO			BIT(3)
+ #define BNXT_FLAG_SHORT_CMD		BIT(4)
+-#define BNXT_FLAG_UPDATE_HASH		BIT(5)
+ #define BNXT_FLAG_PTP_SUPPORTED		BIT(6)
+ #define BNXT_FLAG_MULTI_HOST    	BIT(7)
+ #define BNXT_FLAG_EXT_RX_PORT_STATS	BIT(8)
+@@ -695,9 +669,6 @@ struct bnxt {
+ #define BNXT_FLAG_FLOW_XSTATS_EN		BIT(25)
+ #define BNXT_FLAG_DFLT_MAC_SET			BIT(26)
+ #define BNXT_FLAG_GFID_ENABLE			BIT(27)
+-#define BNXT_FLAG_RFS_NEEDS_VNIC		BIT(28)
+-#define BNXT_FLAG_FLOW_CFA_RFS_RING_TBL_IDX_V2	BIT(29)
+-#define BNXT_RFS_NEEDS_VNIC(bp)	((bp)->flags & BNXT_FLAG_RFS_NEEDS_VNIC)
+ #define BNXT_PF(bp)		(!((bp)->flags & BNXT_FLAG_VF))
+ #define BNXT_VF(bp)		((bp)->flags & BNXT_FLAG_VF)
+ #define BNXT_NPAR(bp)		((bp)->flags & BNXT_FLAG_NPAR_PF)
+@@ -834,7 +805,7 @@ struct bnxt {
+ 	uint16_t		max_tx_rings;
+ 	uint16_t		max_rx_rings;
+ #define MAX_STINGRAY_RINGS		236U
+-#define BNXT_MAX_VF_REP_RINGS	8
++#define BNXT_MAX_VF_REP_RINGS	8U
+ 
+ 	uint16_t		max_nq_rings;
+ 	uint16_t		max_l2_ctx;
+@@ -891,6 +862,15 @@ struct bnxt {
+ 	uint16_t		tx_cfa_action;
+ 	struct bnxt_ring_stats	*prev_rx_ring_stats;
+ 	struct bnxt_ring_stats	*prev_tx_ring_stats;
++
++#define BNXT_MAX_MC_ADDRS	((bp)->max_mcast_addr)
++	struct rte_ether_addr	*mcast_addr_list;
++	rte_iova_t		mc_list_dma_addr;
++	uint32_t		nb_mc_addr;
++	uint32_t		max_mcast_addr; /* maximum number of mcast filters supported */
++
++	struct rte_eth_rss_conf	rss_conf; /* RSS configuration. */
++	uint16_t		tunnel_disable_flag; /* tunnel stateless offloads status */
+ };
+ 
+ static
+@@ -1063,5 +1043,8 @@ int bnxt_flow_stats_cnt(struct bnxt *bp);
+ uint32_t bnxt_get_speed_capabilities(struct bnxt *bp);
+ int bnxt_flow_ops_get_op(struct rte_eth_dev *dev,
+ 			 const struct rte_flow_ops **ops);
++int bnxt_dev_start_op(struct rte_eth_dev *eth_dev);
++int bnxt_dev_stop_op(struct rte_eth_dev *eth_dev);
++void bnxt_handle_vf_cfg_change(void *arg);
+ 
+ #endif
+diff --git a/dpdk/drivers/net/bnxt/bnxt_cpr.c b/dpdk/drivers/net/bnxt/bnxt_cpr.c
+index a43b22a8f8..e1dcf3ac2f 100644
+--- a/dpdk/drivers/net/bnxt/bnxt_cpr.c
++++ b/dpdk/drivers/net/bnxt/bnxt_cpr.c
+@@ -107,6 +107,26 @@ static void bnxt_handle_event_error_report(struct bnxt *bp,
+ 	}
+ }
+ 
++void bnxt_handle_vf_cfg_change(void *arg)
++{
++	struct bnxt *bp = arg;
++	struct rte_eth_dev *eth_dev = bp->eth_dev;
++	int rc;
++
++	/* Free and recreate filters with default VLAN */
++	if (eth_dev->data->dev_started) {
++		rc = bnxt_dev_stop_op(eth_dev);
++		if (rc != 0) {
++			PMD_DRV_LOG(ERR, "Failed to stop Port:%u\n", eth_dev->data->port_id);
++			return;
++		}
++
++		rc = bnxt_dev_start_op(eth_dev);
++		if (rc != 0)
++			PMD_DRV_LOG(ERR, "Failed to start Port:%u\n", eth_dev->data->port_id);
++	}
++}
++
+ /*
+  * Async event handling
+  */
+@@ -138,8 +158,11 @@ void bnxt_handle_async_event(struct bnxt *bp,
+ 		PMD_DRV_LOG(INFO, "Async event: PF driver unloaded\n");
+ 		break;
+ 	case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE:
+-		PMD_DRV_LOG(INFO, "Async event: VF config changed\n");
++		PMD_DRV_LOG(INFO, "Port %u: VF config change async event\n", port_id);
++		PMD_DRV_LOG(INFO, "event: data1 %#x data2 %#x\n", data1, data2);
+ 		bnxt_hwrm_func_qcfg(bp, NULL);
++		if (BNXT_VF(bp))
++			rte_eal_alarm_set(1, bnxt_handle_vf_cfg_change, (void *)bp);
+ 		break;
+ 	case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED:
+ 		PMD_DRV_LOG(INFO, "Port conn async event\n");
+diff --git a/dpdk/drivers/net/bnxt/bnxt_ethdev.c b/dpdk/drivers/net/bnxt/bnxt_ethdev.c
+index f79f33ab4e..517e4b3898 100644
+--- a/dpdk/drivers/net/bnxt/bnxt_ethdev.c
++++ b/dpdk/drivers/net/bnxt/bnxt_ethdev.c
+@@ -177,6 +177,7 @@ static int bnxt_restore_vlan_filters(struct bnxt *bp);
+ static void bnxt_dev_recover(void *arg);
+ static void bnxt_free_error_recovery_info(struct bnxt *bp);
+ static void bnxt_free_rep_info(struct bnxt *bp);
++static int bnxt_check_fw_ready(struct bnxt *bp);
+ 
+ int is_bnxt_in_error(struct bnxt *bp)
+ {
+@@ -368,7 +369,7 @@ static int bnxt_alloc_mem(struct bnxt *bp, bool reconfig)
+ 	if (rc)
+ 		goto alloc_mem_err;
+ 
+-	rc = bnxt_alloc_vnic_attributes(bp);
++	rc = bnxt_alloc_vnic_attributes(bp, reconfig);
+ 	if (rc)
+ 		goto alloc_mem_err;
+ 
+@@ -659,6 +660,19 @@ static int bnxt_init_ctx_mem(struct bnxt *bp)
+ 	return rc;
+ }
+ 
++static inline bool bnxt_force_link_config(struct bnxt *bp)
++{
++	uint16_t subsystem_device_id = bp->pdev->id.subsystem_device_id;
++
++	switch (subsystem_device_id) {
++	case BROADCOM_DEV_957508_N2100:
++	case BROADCOM_DEV_957414_N225:
++		return true;
++	default:
++		return false;
++	}
++}
++
+ static int bnxt_update_phy_setting(struct bnxt *bp)
+ {
+ 	struct rte_eth_link new;
+@@ -671,11 +685,12 @@ static int bnxt_update_phy_setting(struct bnxt *bp)
+ 	}
+ 
+ 	/*
+-	 * On BCM957508-N2100 adapters, FW will not allow any user other
+-	 * than BMC to shutdown the port. bnxt_get_hwrm_link_config() call
+-	 * always returns link up. Force phy update always in that case.
++	 * Device is not obliged link down in certain scenarios, even
++	 * when forced. When FW does not allow any user other than BMC
++	 * to shutdown the port, bnxt_get_hwrm_link_config() call always
++	 * returns link up. Force phy update always in that case.
+ 	 */
+-	if (!new.link_status || IS_BNXT_DEV_957508_N2100(bp)) {
++	if (!new.link_status || bnxt_force_link_config(bp)) {
+ 		rc = bnxt_set_hwrm_link_config(bp, true);
+ 		if (rc) {
+ 			PMD_DRV_LOG(ERR, "Failed to update PHY settings\n");
+@@ -708,7 +723,7 @@ static int bnxt_alloc_prev_ring_stats(struct bnxt *bp)
+ 					     sizeof(struct bnxt_ring_stats) *
+ 					     bp->tx_cp_nr_rings,
+ 					     0);
+-	if (bp->prev_tx_ring_stats == NULL)
++	if (bp->tx_cp_nr_rings > 0 && bp->prev_tx_ring_stats == NULL)
+ 		goto error;
+ 
+ 	return 0;
+@@ -786,17 +801,11 @@ static int bnxt_start_nic(struct bnxt *bp)
+ 		}
+ 	}
+ 
+-	/* default vnic 0 */
+-	rc = bnxt_setup_one_vnic(bp, 0);
+-	if (rc)
+-		goto err_out;
+ 	/* VNIC configuration */
+-	if (BNXT_RFS_NEEDS_VNIC(bp)) {
+-		for (i = 1; i < bp->nr_vnics; i++) {
+-			rc = bnxt_setup_one_vnic(bp, i);
+-			if (rc)
+-				goto err_out;
+-		}
++	for (i = 0; i < bp->nr_vnics; i++) {
++		rc = bnxt_setup_one_vnic(bp, i);
++		if (rc)
++			goto err_out;
+ 	}
+ 
+ 	for (j = 0; j < bp->tx_nr_rings; j++) {
+@@ -894,6 +903,7 @@ static int bnxt_shutdown_nic(struct bnxt *bp)
+ 
+ uint32_t bnxt_get_speed_capabilities(struct bnxt *bp)
+ {
++	uint32_t pam4_link_speed = 0;
+ 	uint32_t link_speed = 0;
+ 	uint32_t speed_capa = 0;
+ 
+@@ -903,8 +913,8 @@ uint32_t bnxt_get_speed_capabilities(struct bnxt *bp)
+ 	link_speed = bp->link_info->support_speeds;
+ 
+ 	/* If PAM4 is configured, use PAM4 supported speed */
+-	if (link_speed == 0 && bp->link_info->support_pam4_speeds > 0)
+-		link_speed = bp->link_info->support_pam4_speeds;
++	if (bp->link_info->support_pam4_speeds > 0)
++		pam4_link_speed = bp->link_info->support_pam4_speeds;
+ 
+ 	if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB)
+ 		speed_capa |= RTE_ETH_LINK_SPEED_100M;
+@@ -926,11 +936,11 @@ uint32_t bnxt_get_speed_capabilities(struct bnxt *bp)
+ 		speed_capa |= RTE_ETH_LINK_SPEED_50G;
+ 	if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100GB)
+ 		speed_capa |= RTE_ETH_LINK_SPEED_100G;
+-	if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_PAM4_SPEEDS_50G)
++	if (pam4_link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_PAM4_SPEEDS_50G)
+ 		speed_capa |= RTE_ETH_LINK_SPEED_50G;
+-	if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_PAM4_SPEEDS_100G)
++	if (pam4_link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_PAM4_SPEEDS_100G)
+ 		speed_capa |= RTE_ETH_LINK_SPEED_100G;
+-	if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_PAM4_SPEEDS_200G)
++	if (pam4_link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_PAM4_SPEEDS_200G)
+ 		speed_capa |= RTE_ETH_LINK_SPEED_200G;
+ 
+ 	if (bp->link_info->auto_mode ==
+@@ -954,7 +964,7 @@ static int bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev,
+ 		return rc;
+ 
+ 	/* MAC Specifics */
+-	dev_info->max_mac_addrs = bp->max_l2_ctx;
++	dev_info->max_mac_addrs = RTE_MIN(bp->max_l2_ctx, RTE_ETH_NUM_RECEIVE_MAC_ADDR);
+ 	dev_info->max_hash_mac_addrs = 0;
+ 
+ 	/* PF/VF specifics */
+@@ -977,16 +987,10 @@ static int bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev,
+ 	dev_info->min_rx_bufsize = 1;
+ 	dev_info->max_rx_pktlen = BNXT_MAX_PKT_LEN;
+ 
+-	dev_info->rx_offload_capa = BNXT_DEV_RX_OFFLOAD_SUPPORT;
+-	if (bp->flags & BNXT_FLAG_PTP_SUPPORTED)
+-		dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_TIMESTAMP;
+-	if (bp->vnic_cap_flags & BNXT_VNIC_CAP_VLAN_RX_STRIP)
+-		dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
++	dev_info->rx_offload_capa = bnxt_get_rx_port_offloads(bp);
+ 	dev_info->tx_queue_offload_capa = RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
+-	dev_info->tx_offload_capa = BNXT_DEV_TX_OFFLOAD_SUPPORT |
++	dev_info->tx_offload_capa = bnxt_get_tx_port_offloads(bp) |
+ 				    dev_info->tx_queue_offload_capa;
+-	if (bp->fw_cap & BNXT_FW_CAP_VLAN_TX_INSERT)
+-		dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_VLAN_INSERT;
+ 	dev_info->flow_type_rss_offloads = BNXT_ETH_RSS_SUPPORT;
+ 
+ 	dev_info->speed_capa = bnxt_get_speed_capabilities(bp);
+@@ -1067,6 +1071,7 @@ static int bnxt_dev_configure_op(struct rte_eth_dev *eth_dev)
+ {
+ 	struct bnxt *bp = eth_dev->data->dev_private;
+ 	uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads;
++	struct rte_eth_rss_conf *rss_conf = &eth_dev->data->dev_conf.rx_adv_conf.rss_conf;
+ 	int rc;
+ 
+ 	bp->rx_queues = (void *)eth_dev->data->rx_queues;
+@@ -1141,6 +1146,17 @@ static int bnxt_dev_configure_op(struct rte_eth_dev *eth_dev)
+ 		rx_offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
+ 	eth_dev->data->dev_conf.rxmode.offloads = rx_offloads;
+ 
++	/* application provides the hash key to program */
++	if (rss_conf->rss_key != NULL) {
++		if (rss_conf->rss_key_len != HW_HASH_KEY_SIZE)
++			PMD_DRV_LOG(WARNING, "port %u RSS key len must be %d bytes long",
++				    eth_dev->data->port_id, HW_HASH_KEY_SIZE);
++		else
++			memcpy(bp->rss_conf.rss_key, rss_conf->rss_key, HW_HASH_KEY_SIZE);
++	}
++	bp->rss_conf.rss_key_len = HW_HASH_KEY_SIZE;
++	bp->rss_conf.rss_hf = rss_conf->rss_hf;
++
+ 	bnxt_mtu_set_op(eth_dev, eth_dev->data->mtu);
+ 
+ 	return 0;
+@@ -1336,6 +1352,11 @@ static int bnxt_handle_if_change_status(struct bnxt *bp)
+ 
+ 	/* clear fatal flag so that re-init happens */
+ 	bp->flags &= ~BNXT_FLAG_FATAL_ERROR;
++
++	rc = bnxt_check_fw_ready(bp);
++	if (rc)
++		return rc;
++
+ 	rc = bnxt_init_resources(bp, true);
+ 
+ 	bp->flags &= ~BNXT_FLAG_IF_CHANGE_HOT_FW_RESET_DONE;
+@@ -1525,7 +1546,7 @@ static int bnxt_dev_stop(struct rte_eth_dev *eth_dev)
+ }
+ 
+ /* Unload the driver, release resources */
+-static int bnxt_dev_stop_op(struct rte_eth_dev *eth_dev)
++int bnxt_dev_stop_op(struct rte_eth_dev *eth_dev)
+ {
+ 	struct bnxt *bp = eth_dev->data->dev_private;
+ 
+@@ -1541,18 +1562,13 @@ static int bnxt_dev_stop_op(struct rte_eth_dev *eth_dev)
+ 	return bnxt_dev_stop(eth_dev);
+ }
+ 
+-static int bnxt_dev_start_op(struct rte_eth_dev *eth_dev)
++int bnxt_dev_start_op(struct rte_eth_dev *eth_dev)
+ {
+ 	struct bnxt *bp = eth_dev->data->dev_private;
+ 	uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads;
+ 	int vlan_mask = 0;
+ 	int rc, retry_cnt = BNXT_IF_CHANGE_RETRY_COUNT;
+ 
+-	if (!eth_dev->data->nb_tx_queues || !eth_dev->data->nb_rx_queues) {
+-		PMD_DRV_LOG(ERR, "Queues are not configured yet!\n");
+-		return -EINVAL;
+-	}
+-
+ 	if (bp->rx_cp_nr_rings > RTE_ETHDEV_QUEUE_STAT_CNTRS)
+ 		PMD_DRV_LOG(ERR,
+ 			    "RxQ cnt %d > RTE_ETHDEV_QUEUE_STAT_CNTRS %d\n",
+@@ -1673,6 +1689,7 @@ static int bnxt_dev_close_op(struct rte_eth_dev *eth_dev)
+ 	rte_eal_alarm_cancel(bnxt_dev_reset_and_resume, (void *)bp);
+ 	rte_eal_alarm_cancel(bnxt_dev_recover, (void *)bp);
+ 	bnxt_cancel_fc_thread(bp);
++	rte_eal_alarm_cancel(bnxt_handle_vf_cfg_change, (void *)bp);
+ 
+ 	if (eth_dev->data->dev_started)
+ 		ret = bnxt_dev_stop(eth_dev);
+@@ -1812,6 +1829,14 @@ int bnxt_link_update_op(struct rte_eth_dev *eth_dev, int wait_to_complete)
+ 	if (bp->link_info == NULL)
+ 		goto out;
+ 
++	/* Only single function PF can bring the phy down.
++	 * In certain scenarios, device is not obliged link down even when forced.
++	 * When port is stopped, report link down in those cases.
++	 */
++	if (!eth_dev->data->dev_started &&
++	    (!BNXT_SINGLE_PF(bp) || bnxt_force_link_config(bp)))
++		goto out;
++
+ 	do {
+ 		/* Retrieve link info from hardware */
+ 		rc = bnxt_get_hwrm_link_config(bp, &new);
+@@ -1829,12 +1854,6 @@ int bnxt_link_update_op(struct rte_eth_dev *eth_dev, int wait_to_complete)
+ 		rte_delay_ms(BNXT_LINK_WAIT_INTERVAL);
+ 	} while (cnt--);
+ 
+-	/* Only single function PF can bring phy down.
+-	 * When port is stopped, report link down for VF/MH/NPAR functions.
+-	 */
+-	if (!BNXT_SINGLE_PF(bp) && !eth_dev->data->dev_started)
+-		memset(&new, 0, sizeof(new));
+-
+ out:
+ 	/* Timed out or success */
+ 	if (new.link_status != eth_dev->data->dev_link.link_status ||
+@@ -2125,11 +2144,6 @@ static int bnxt_rss_hash_update_op(struct rte_eth_dev *eth_dev,
+ 			return -EINVAL;
+ 	}
+ 
+-	bp->flags |= BNXT_FLAG_UPDATE_HASH;
+-	memcpy(&eth_dev->data->dev_conf.rx_adv_conf.rss_conf,
+-	       rss_conf,
+-	       sizeof(*rss_conf));
+-
+ 	/* Update the default RSS VNIC(s) */
+ 	vnic = BNXT_GET_DEFAULT_VNIC(bp);
+ 	vnic->hash_type = bnxt_rte_to_hwrm_hash_types(rss_conf->rss_hf);
+@@ -2137,6 +2151,9 @@ static int bnxt_rss_hash_update_op(struct rte_eth_dev *eth_dev,
+ 		bnxt_rte_to_hwrm_hash_level(bp, rss_conf->rss_hf,
+ 					    RTE_ETH_RSS_LEVEL(rss_conf->rss_hf));
+ 
++	/* Cache the hash function */
++	bp->rss_conf.rss_hf = rss_conf->rss_hf;
++
+ 	/*
+ 	 * If hashkey is not specified, use the previously configured
+ 	 * hashkey
+@@ -2152,6 +2169,9 @@ static int bnxt_rss_hash_update_op(struct rte_eth_dev *eth_dev,
+ 	}
+ 	memcpy(vnic->rss_hash_key, rss_conf->rss_key, rss_conf->rss_key_len);
+ 
++	/* Cache the hash key */
++	memcpy(bp->rss_conf.rss_key, rss_conf->rss_key, HW_HASH_KEY_SIZE);
++
+ rss_config:
+ 	rc = bnxt_hwrm_vnic_rss_cfg(bp, vnic);
+ 	return rc;
+@@ -2831,9 +2851,8 @@ bnxt_dev_set_mc_addr_list_op(struct rte_eth_dev *eth_dev,
+ 			  uint32_t nb_mc_addr)
+ {
+ 	struct bnxt *bp = eth_dev->data->dev_private;
+-	char *mc_addr_list = (char *)mc_addr_set;
+ 	struct bnxt_vnic_info *vnic;
+-	uint32_t off = 0, i = 0;
++	uint32_t i = 0;
+ 	int rc;
+ 
+ 	rc = is_bnxt_in_error(bp);
+@@ -2842,6 +2861,8 @@ bnxt_dev_set_mc_addr_list_op(struct rte_eth_dev *eth_dev,
+ 
+ 	vnic = BNXT_GET_DEFAULT_VNIC(bp);
+ 
++	bp->nb_mc_addr = nb_mc_addr;
++
+ 	if (nb_mc_addr > BNXT_MAX_MC_ADDRS) {
+ 		vnic->flags |= BNXT_VNIC_INFO_ALLMULTI;
+ 		goto allmulti;
+@@ -2849,14 +2870,10 @@ bnxt_dev_set_mc_addr_list_op(struct rte_eth_dev *eth_dev,
+ 
+ 	/* TODO Check for Duplicate mcast addresses */
+ 	vnic->flags &= ~BNXT_VNIC_INFO_ALLMULTI;
+-	for (i = 0; i < nb_mc_addr; i++) {
+-		memcpy(vnic->mc_list + off, &mc_addr_list[i],
+-			RTE_ETHER_ADDR_LEN);
+-		off += RTE_ETHER_ADDR_LEN;
+-	}
++	for (i = 0; i < nb_mc_addr; i++)
++		rte_ether_addr_copy(&mc_addr_set[i], &bp->mcast_addr_list[i]);
+ 
+-	vnic->mc_addr_cnt = i;
+-	if (vnic->mc_addr_cnt)
++	if (bp->nb_mc_addr)
+ 		vnic->flags |= BNXT_VNIC_INFO_MCAST;
+ 	else
+ 		vnic->flags &= ~BNXT_VNIC_INFO_MCAST;
+@@ -3003,9 +3020,7 @@ bnxt_tx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id,
+ 
+ int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu)
+ {
+-	uint32_t overhead = BNXT_MAX_PKT_LEN - BNXT_MAX_MTU;
+ 	struct bnxt *bp = eth_dev->data->dev_private;
+-	uint32_t new_pkt_size;
+ 	uint32_t rc;
+ 	uint32_t i;
+ 
+@@ -3013,35 +3028,25 @@ int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu)
+ 	if (rc)
+ 		return rc;
+ 
++	/* Return if port is active */
++	if (eth_dev->data->dev_started) {
++		PMD_DRV_LOG(ERR, "Stop port before changing MTU\n");
++		return -EPERM;
++	}
++
+ 	/* Exit if receive queues are not configured yet */
+ 	if (!eth_dev->data->nb_rx_queues)
+-		return rc;
+-
+-	new_pkt_size = new_mtu + overhead;
++		return -ENOTSUP;
+ 
+-	/*
+-	 * Disallow any MTU change that would require scattered receive support
+-	 * if it is not already enabled.
+-	 */
+-	if (eth_dev->data->dev_started &&
+-	    !eth_dev->data->scattered_rx &&
+-	    (new_pkt_size >
+-	     eth_dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) {
+-		PMD_DRV_LOG(ERR,
+-			    "MTU change would require scattered rx support. ");
+-		PMD_DRV_LOG(ERR, "Stop port before changing MTU.\n");
+-		return -EINVAL;
+-	}
++	/* Is there a change in mtu setting? */
++	if (eth_dev->data->mtu == new_mtu)
++		return 0;
+ 
+ 	if (new_mtu > RTE_ETHER_MTU)
+ 		bp->flags |= BNXT_FLAG_JUMBO;
+ 	else
+ 		bp->flags &= ~BNXT_FLAG_JUMBO;
+ 
+-	/* Is there a change in mtu setting? */
+-	if (eth_dev->data->mtu == new_mtu)
+-		return rc;
+-
+ 	for (i = 0; i < bp->nr_vnics; i++) {
+ 		struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
+ 		uint16_t size = 0;
+@@ -4264,6 +4269,18 @@ static int bnxt_restore_mac_filters(struct bnxt *bp)
+ 	return 0;
+ }
+ 
++static int bnxt_restore_mcast_mac_filters(struct bnxt *bp)
++{
++	int ret = 0;
++
++	ret = bnxt_dev_set_mc_addr_list_op(bp->eth_dev, bp->mcast_addr_list,
++					   bp->nb_mc_addr);
++	if (ret)
++		PMD_DRV_LOG(ERR, "Failed to restore multicast MAC addreeses\n");
++
++	return ret;
++}
++
+ static int bnxt_restore_filters(struct bnxt *bp)
+ {
+ 	struct rte_eth_dev *dev = bp->eth_dev;
+@@ -4284,14 +4301,21 @@ static int bnxt_restore_filters(struct bnxt *bp)
+ 	if (ret)
+ 		return ret;
+ 
++	/* if vlans are already programmed, this can fail with -EEXIST */
+ 	ret = bnxt_restore_vlan_filters(bp);
+-	/* TODO restore other filters as well */
++	if (ret && ret != -EEXIST)
++		return ret;
++
++	ret = bnxt_restore_mcast_mac_filters(bp);
++	if (ret)
++		return ret;
++
+ 	return ret;
+ }
+ 
+ static int bnxt_check_fw_ready(struct bnxt *bp)
+ {
+-	int timeout = bp->fw_reset_max_msecs;
++	int timeout = bp->fw_reset_max_msecs ? : BNXT_MAX_FW_RESET_TIMEOUT;
+ 	int rc = 0;
+ 
+ 	do {
+@@ -4345,16 +4369,16 @@ static void bnxt_dev_recover(void *arg)
+ 		goto err_start;
+ 	}
+ 
++	rc = bnxt_restore_filters(bp);
++	if (rc)
++		goto err_start;
++
+ 	rte_eth_fp_ops[bp->eth_dev->data->port_id].rx_pkt_burst =
+ 		bp->eth_dev->rx_pkt_burst;
+ 	rte_eth_fp_ops[bp->eth_dev->data->port_id].tx_pkt_burst =
+ 		bp->eth_dev->tx_pkt_burst;
+ 	rte_mb();
+ 
+-	rc = bnxt_restore_filters(bp);
+-	if (rc)
+-		goto err_start;
+-
+ 	PMD_DRV_LOG(INFO, "Port: %u Recovered from FW reset\n",
+ 		    bp->eth_dev->data->port_id);
+ 	pthread_mutex_unlock(&bp->err_recovery_lock);
+@@ -4985,11 +5009,15 @@ static int bnxt_alloc_stats_mem(struct bnxt *bp)
+ static int bnxt_setup_mac_addr(struct rte_eth_dev *eth_dev)
+ {
+ 	struct bnxt *bp = eth_dev->data->dev_private;
++	size_t max_mac_addr = RTE_MIN(bp->max_l2_ctx, RTE_ETH_NUM_RECEIVE_MAC_ADDR);
+ 	int rc = 0;
+ 
++	if (bp->max_l2_ctx > RTE_ETH_NUM_RECEIVE_MAC_ADDR)
++		PMD_DRV_LOG(INFO, "Max number of MAC addrs supported is %d, but will be limited to %d\n",
++			    bp->max_l2_ctx, RTE_ETH_NUM_RECEIVE_MAC_ADDR);
++
+ 	eth_dev->data->mac_addrs = rte_zmalloc("bnxt_mac_addr_tbl",
+-					       RTE_ETHER_ADDR_LEN *
+-					       bp->max_l2_ctx,
++					       RTE_ETHER_ADDR_LEN * max_mac_addr,
+ 					       0);
+ 	if (eth_dev->data->mac_addrs == NULL) {
+ 		PMD_DRV_LOG(ERR, "Failed to alloc MAC addr tbl\n");
+@@ -5016,6 +5044,23 @@ static int bnxt_setup_mac_addr(struct rte_eth_dev *eth_dev)
+ 	/* Copy the permanent MAC from the FUNC_QCAPS response */
+ 	memcpy(&eth_dev->data->mac_addrs[0], bp->mac_addr, RTE_ETHER_ADDR_LEN);
+ 
++	/*
++	 *  Allocate memory to hold multicast mac addresses added.
++	 *  Used to restore them during reset recovery
++	 */
++	bp->mcast_addr_list = rte_zmalloc("bnxt_mcast_addr_tbl",
++					  sizeof(struct rte_ether_addr) *
++					  BNXT_MAX_MC_ADDRS, 0);
++	if (bp->mcast_addr_list == NULL) {
++		PMD_DRV_LOG(ERR, "Failed to allocate multicast addr table\n");
++		return -ENOMEM;
++	}
++	bp->mc_list_dma_addr = rte_malloc_virt2iova(bp->mcast_addr_list);
++	if (bp->mc_list_dma_addr == RTE_BAD_IOVA) {
++		PMD_DRV_LOG(ERR, "Fail to map mcast_addr_list to physical memory\n");
++		return -ENOMEM;
++	}
++
+ 	return rc;
+ }
+ 
+@@ -5178,10 +5223,6 @@ static int bnxt_get_config(struct bnxt *bp)
+ 	if (rc)
+ 		return rc;
+ 
+-	rc = bnxt_hwrm_cfa_adv_flow_mgmt_qcaps(bp);
+-	if (rc)
+-		return rc;
+-
+ 	bnxt_hwrm_port_mac_qcfg(bp);
+ 
+ 	bnxt_hwrm_parent_pf_qcfg(bp);
+@@ -5229,6 +5270,25 @@ bnxt_init_locks(struct bnxt *bp)
+ 	return err;
+ }
+ 
++/* This should be called after we have queried trusted VF cap */
++static int bnxt_alloc_switch_domain(struct bnxt *bp)
++{
++	int rc = 0;
++
++	if (BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp)) {
++		rc = rte_eth_switch_domain_alloc(&bp->switch_domain_id);
++		if (rc)
++			PMD_DRV_LOG(ERR,
++				    "Failed to alloc switch domain: %d\n", rc);
++		else
++			PMD_DRV_LOG(INFO,
++				    "Switch domain allocated %d\n",
++				    bp->switch_domain_id);
++	}
++
++	return rc;
++}
++
+ static int bnxt_init_resources(struct bnxt *bp, bool reconfig_dev)
+ {
+ 	int rc = 0;
+@@ -5237,6 +5297,10 @@ static int bnxt_init_resources(struct bnxt *bp, bool reconfig_dev)
+ 	if (rc)
+ 		return rc;
+ 
++	rc = bnxt_alloc_switch_domain(bp);
++	if (rc)
++		return rc;
++
+ 	if (!reconfig_dev) {
+ 		rc = bnxt_setup_mac_addr(bp->eth_dev);
+ 		if (rc)
+@@ -5272,6 +5336,16 @@ static int bnxt_init_resources(struct bnxt *bp, bool reconfig_dev)
+ 		}
+ 	}
+ 
++	if (!reconfig_dev) {
++		bp->rss_conf.rss_key = rte_zmalloc("bnxt_rss_key",
++						   HW_HASH_KEY_SIZE, 0);
++		if (bp->rss_conf.rss_key == NULL) {
++			PMD_DRV_LOG(ERR, "port %u cannot allocate RSS hash key memory",
++				    bp->eth_dev->data->port_id);
++			return -ENOMEM;
++		}
++	}
++
+ 	rc = bnxt_alloc_mem(bp, reconfig_dev);
+ 	if (rc)
+ 		return rc;
+@@ -5666,24 +5740,6 @@ bnxt_parse_dev_args(struct bnxt *bp, struct rte_devargs *devargs)
+ 	return ret;
+ }
+ 
+-static int bnxt_alloc_switch_domain(struct bnxt *bp)
+-{
+-	int rc = 0;
+-
+-	if (BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp)) {
+-		rc = rte_eth_switch_domain_alloc(&bp->switch_domain_id);
+-		if (rc)
+-			PMD_DRV_LOG(ERR,
+-				    "Failed to alloc switch domain: %d\n", rc);
+-		else
+-			PMD_DRV_LOG(INFO,
+-				    "Switch domain allocated %d\n",
+-				    bp->switch_domain_id);
+-	}
+-
+-	return rc;
+-}
+-
+ /* Allocate and initialize various fields in bnxt struct that
+  * need to be allocated/destroyed only once in the lifetime of the driver
+  */
+@@ -5760,10 +5816,6 @@ static int bnxt_drv_init(struct rte_eth_dev *eth_dev)
+ 	if (rc)
+ 		return rc;
+ 
+-	rc = bnxt_alloc_switch_domain(bp);
+-	if (rc)
+-		return rc;
+-
+ 	return rc;
+ }
+ 
+@@ -5916,6 +5968,10 @@ bnxt_uninit_resources(struct bnxt *bp, bool reconfig_dev)
+ 	if (!reconfig_dev) {
+ 		bnxt_free_hwrm_resources(bp);
+ 		bnxt_free_error_recovery_info(bp);
++		rte_free(bp->mcast_addr_list);
++		bp->mcast_addr_list = NULL;
++		rte_free(bp->rss_conf.rss_key);
++		bp->rss_conf.rss_key = NULL;
+ 	}
+ 
+ 	bnxt_uninit_ctx_mem(bp);
+@@ -6302,4 +6358,4 @@ bool is_bnxt_supported(struct rte_eth_dev *dev)
+ RTE_LOG_REGISTER_SUFFIX(bnxt_logtype_driver, driver, NOTICE);
+ RTE_PMD_REGISTER_PCI(net_bnxt, bnxt_rte_pmd);
+ RTE_PMD_REGISTER_PCI_TABLE(net_bnxt, bnxt_pci_id_map);
+-
++RTE_PMD_REGISTER_KMOD_DEP(net_bnxt, "* igb_uio | uio_pci_generic | vfio-pci");
+diff --git a/dpdk/drivers/net/bnxt/bnxt_filter.c b/dpdk/drivers/net/bnxt/bnxt_filter.c
+index 1d08e03b2f..b0c3bbd1b2 100644
+--- a/dpdk/drivers/net/bnxt/bnxt_filter.c
++++ b/dpdk/drivers/net/bnxt/bnxt_filter.c
+@@ -99,6 +99,8 @@ void bnxt_free_all_filters(struct bnxt *bp)
+ 					bnxt_filter_info, next);
+ 			STAILQ_INSERT_TAIL(&bp->free_filter_list,
+ 					filter, next);
++			if (filter->vnic)
++				filter->vnic = NULL;
+ 			filter = temp_filter;
+ 		}
+ 		STAILQ_INIT(&vnic->filter);
+diff --git a/dpdk/drivers/net/bnxt/bnxt_flow.c b/dpdk/drivers/net/bnxt/bnxt_flow.c
+index d062be5525..8bdf2405f0 100644
+--- a/dpdk/drivers/net/bnxt/bnxt_flow.c
++++ b/dpdk/drivers/net/bnxt/bnxt_flow.c
+@@ -1074,7 +1074,6 @@ bnxt_update_filter_flags_en(struct bnxt_filter_info *filter,
+ 		filter1, filter->fw_l2_filter_id, filter->l2_ref_cnt);
+ }
+ 
+-/* Valid actions supported along with RSS are count and mark. */
+ static int
+ bnxt_validate_rss_action(const struct rte_flow_action actions[])
+ {
+@@ -1116,13 +1115,55 @@ bnxt_vnic_rss_cfg_update(struct bnxt *bp,
+ 			 struct rte_flow_error *error)
+ {
+ 	const struct rte_flow_action_rss *rss;
+-	unsigned int rss_idx, i;
++	unsigned int rss_idx, i, j, fw_idx;
+ 	uint16_t hash_type;
+ 	uint64_t types;
+ 	int rc;
+ 
+ 	rss = (const struct rte_flow_action_rss *)act->conf;
+ 
++	/* must specify either all the Rx queues created by application or zero queues */
++	if (rss->queue_num && vnic->rx_queue_cnt != rss->queue_num) {
++		rte_flow_error_set(error,
++				   EINVAL,
++				   RTE_FLOW_ERROR_TYPE_ACTION,
++				   act,
++				   "Incorrect RXQ count");
++		rc = -rte_errno;
++		goto ret;
++	}
++
++	/* Validate Rx queues */
++	for (i = 0; i < rss->queue_num; i++) {
++		PMD_DRV_LOG(DEBUG, "RSS action Queue %d\n", rss->queue[i]);
++
++		if (rss->queue[i] >= bp->rx_nr_rings ||
++		    !bp->rx_queues[rss->queue[i]]) {
++			rte_flow_error_set(error,
++					   EINVAL,
++					   RTE_FLOW_ERROR_TYPE_ACTION,
++					   act,
++					   "Invalid queue ID for RSS");
++			rc = -rte_errno;
++			goto ret;
++		}
++	}
++
++	/* Duplicate queue ids are not supported. */
++	for (i = 0; i < rss->queue_num; i++) {
++		for (j = i + 1; j < rss->queue_num; j++) {
++			if (rss->queue[i] == rss->queue[j]) {
++				rte_flow_error_set(error,
++						   EINVAL,
++						   RTE_FLOW_ERROR_TYPE_ACTION,
++						   act,
++						   "Duplicate queue ID for RSS");
++				rc = -rte_errno;
++				goto ret;
++			}
++		}
++	}
++
+ 	/* Currently only Toeplitz hash is supported. */
+ 	if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT &&
+ 	    rss->func != RTE_ETH_HASH_FUNCTION_TOEPLITZ) {
+@@ -1190,28 +1231,22 @@ bnxt_vnic_rss_cfg_update(struct bnxt *bp,
+ 	if (rss->queue_num == 0)
+ 		goto skip_rss_table;
+ 
+-	/* Validate Rx queues */
+-	for (i = 0; i < rss->queue_num; i++) {
+-		PMD_DRV_LOG(DEBUG, "RSS action Queue %d\n", rss->queue[i]);
+-
+-		if (rss->queue[i] >= bp->rx_nr_rings ||
+-		    !bp->rx_queues[rss->queue[i]]) {
+-			rte_flow_error_set(error,
+-					   EINVAL,
+-					   RTE_FLOW_ERROR_TYPE_ACTION,
+-					   act,
+-					   "Invalid queue ID for RSS");
+-			rc = -rte_errno;
+-			goto ret;
+-		}
+-	}
+-
+ 	/* Prepare the indirection table */
+-	for (rss_idx = 0; rss_idx < HW_HASH_INDEX_SIZE; rss_idx++) {
++	for (rss_idx = 0, fw_idx = 0; rss_idx < HW_HASH_INDEX_SIZE;
++	     rss_idx++, fw_idx++) {
++		uint8_t *rxq_state = bp->eth_dev->data->rx_queue_state;
+ 		struct bnxt_rx_queue *rxq;
+ 		uint32_t idx;
+ 
+-		idx = rss->queue[rss_idx % rss->queue_num];
++		for (i = 0; i < bp->rx_cp_nr_rings; i++) {
++			idx = rss->queue[fw_idx % rss->queue_num];
++			if (rxq_state[idx] != RTE_ETH_QUEUE_STATE_STOPPED)
++				break;
++			fw_idx++;
++		}
++
++		if (i == bp->rx_cp_nr_rings)
++			return 0;
+ 
+ 		if (BNXT_CHIP_P5(bp)) {
+ 			rxq = bp->rx_queues[idx];
+@@ -1293,13 +1328,6 @@ bnxt_validate_and_parse_flow(struct rte_eth_dev *dev,
+ 		}
+ 		PMD_DRV_LOG(DEBUG, "Queue index %d\n", act_q->index);
+ 
+-		if (use_ntuple && !BNXT_RFS_NEEDS_VNIC(bp)) {
+-			filter->flags =
+-				HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DEST_RFS_RING_IDX;
+-			filter->dst_id = act_q->index;
+-			goto skip_vnic_alloc;
+-		}
+-
+ 		vnic_id = attr->group;
+ 		if (!vnic_id) {
+ 			PMD_DRV_LOG(DEBUG, "Group id is 0\n");
+@@ -1364,7 +1392,7 @@ bnxt_validate_and_parse_flow(struct rte_eth_dev *dev,
+ 		PMD_DRV_LOG(DEBUG,
+ 			    "Setting vnic ff_idx %d\n", vnic->ff_pool_idx);
+ 		filter->dst_id = vnic->fw_vnic_id;
+-skip_vnic_alloc:
++
+ 		/* For ntuple filter, create the L2 filter with default VNIC.
+ 		 * The user specified redirect queue will be set while creating
+ 		 * the ntuple filter in hardware.
+@@ -2063,10 +2091,7 @@ bnxt_flow_create(struct rte_eth_dev *dev,
+ 		}
+ 	}
+ 
+-	if (BNXT_RFS_NEEDS_VNIC(bp))
+-		vnic = find_matching_vnic(bp, filter);
+-	else
+-		vnic = BNXT_GET_DEFAULT_VNIC(bp);
++	vnic = find_matching_vnic(bp, filter);
+ done:
+ 	if (!ret || update_flow) {
+ 		flow->filter = filter;
+diff --git a/dpdk/drivers/net/bnxt/bnxt_hwrm.c b/dpdk/drivers/net/bnxt/bnxt_hwrm.c
+index f53f8632fe..9c5257309a 100644
+--- a/dpdk/drivers/net/bnxt/bnxt_hwrm.c
++++ b/dpdk/drivers/net/bnxt/bnxt_hwrm.c
+@@ -506,8 +506,8 @@ int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp,
+ 		mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
+ 	} else if (vnic->flags & BNXT_VNIC_INFO_MCAST) {
+ 		mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
+-		req.num_mc_entries = rte_cpu_to_le_32(vnic->mc_addr_cnt);
+-		req.mc_tbl_addr = rte_cpu_to_le_64(vnic->mc_list_dma_addr);
++		req.num_mc_entries = rte_cpu_to_le_32(bp->nb_mc_addr);
++		req.mc_tbl_addr = rte_cpu_to_le_64(bp->mc_list_dma_addr);
+ 	}
+ 	if (vlan_table) {
+ 		if (!(mask & HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN))
+@@ -902,18 +902,12 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
+ 	bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
+ 	if (!BNXT_CHIP_P5(bp) && !bp->pdev->max_vfs)
+ 		bp->max_l2_ctx += bp->max_rx_em_flows;
+-	/* TODO: For now, do not support VMDq/RFS on VFs. */
+-	if (BNXT_PF(bp)) {
+-		if (bp->pf->max_vfs)
+-			bp->max_vnics = 1;
+-		else
+-			bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
+-	} else {
+-		bp->max_vnics = 1;
+-	}
++	bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
+ 	PMD_DRV_LOG(DEBUG, "Max l2_cntxts is %d vnics is %d\n",
+ 		    bp->max_l2_ctx, bp->max_vnics);
+ 	bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
++	bp->max_mcast_addr = rte_le_to_cpu_32(resp->max_mcast_filters);
++
+ 	if (BNXT_PF(bp)) {
+ 		bp->pf->total_vnics = rte_le_to_cpu_16(resp->max_vnics);
+ 		if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_PTP_SUPPORTED) {
+@@ -945,6 +939,11 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
+ 		bp->fw_cap |= BNXT_FW_CAP_VLAN_TX_INSERT;
+ 		PMD_DRV_LOG(DEBUG, "VLAN acceleration for TX is enabled\n");
+ 	}
++
++	bp->tunnel_disable_flag = rte_le_to_cpu_16(resp->tunnel_disable_flag);
++	if (bp->tunnel_disable_flag)
++		PMD_DRV_LOG(DEBUG, "Tunnel parsing capability is disabled, flags : %#x\n",
++			    bp->tunnel_disable_flag);
+ unlock:
+ 	HWRM_UNLOCK();
+ 
+@@ -1250,11 +1249,6 @@ int bnxt_hwrm_ver_get(struct bnxt *bp, uint32_t timeout)
+ 	else
+ 		HWRM_CHECK_RESULT();
+ 
+-	if (resp->flags & HWRM_VER_GET_OUTPUT_FLAGS_DEV_NOT_RDY) {
+-		rc = -EAGAIN;
+-		goto error;
+-	}
+-
+ 	PMD_DRV_LOG(INFO, "%d.%d.%d:%d.%d.%d.%d\n",
+ 		resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
+ 		resp->hwrm_intf_upd_8b, resp->hwrm_fw_maj_8b,
+@@ -1430,20 +1424,21 @@ static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf)
+ 			}
+ 		}
+ 		/* AutoNeg - Advertise speeds specified. */
+-		if (conf->auto_link_speed_mask &&
++		if ((conf->auto_link_speed_mask || conf->auto_pam4_link_speed_mask) &&
+ 		    !(conf->phy_flags & HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE)) {
+ 			req.auto_mode =
+ 				HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK;
+-			req.auto_link_speed_mask =
+-				conf->auto_link_speed_mask;
+-			if (conf->auto_pam4_link_speeds) {
++			if (conf->auto_pam4_link_speed_mask) {
+ 				enables |=
+ 				HWRM_PORT_PHY_CFG_IN_EN_AUTO_PAM4_LINK_SPD_MASK;
+ 				req.auto_link_pam4_speed_mask =
+-					conf->auto_pam4_link_speeds;
+-			} else {
++				rte_cpu_to_le_16(conf->auto_pam4_link_speed_mask);
++			}
++			if (conf->auto_link_speed_mask) {
+ 				enables |=
+ 				HWRM_PORT_PHY_CFG_IN_EN_AUTO_LINK_SPEED_MASK;
++				req.auto_link_speed_mask =
++				rte_cpu_to_le_16(conf->auto_link_speed_mask);
+ 			}
+ 		}
+ 		if (conf->auto_link_speed &&
+@@ -1511,12 +1506,12 @@ static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp,
+ 	link_info->phy_ver[1] = resp->phy_min;
+ 	link_info->phy_ver[2] = resp->phy_bld;
+ 	link_info->link_signal_mode =
+-		rte_le_to_cpu_16(resp->active_fec_signal_mode);
++		resp->active_fec_signal_mode & HWRM_PORT_PHY_QCFG_OUTPUT_SIGNAL_MODE_MASK;
+ 	link_info->force_pam4_link_speed =
+ 			rte_le_to_cpu_16(resp->force_pam4_link_speed);
+ 	link_info->support_pam4_speeds =
+ 			rte_le_to_cpu_16(resp->support_pam4_speeds);
+-	link_info->auto_pam4_link_speeds =
++	link_info->auto_pam4_link_speed_mask =
+ 			rte_le_to_cpu_16(resp->auto_pam4_link_speed_mask);
+ 	link_info->module_status = resp->module_status;
+ 	HWRM_UNLOCK();
+@@ -1527,7 +1522,7 @@ static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp,
+ 		    link_info->support_speeds, link_info->force_link_speed);
+ 	PMD_DRV_LOG(DEBUG, "Link Signal:%d,PAM::Auto:%x,Support:%x,Force:%x\n",
+ 		    link_info->link_signal_mode,
+-		    link_info->auto_pam4_link_speeds,
++		    link_info->auto_pam4_link_speed_mask,
+ 		    link_info->support_pam4_speeds,
+ 		    link_info->force_pam4_link_speed);
+ 	return rc;
+@@ -2975,7 +2970,7 @@ static uint16_t bnxt_check_eth_link_autoneg(uint32_t conf_link)
+ }
+ 
+ static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed,
+-					  uint16_t pam4_link)
++					  struct bnxt_link_info *link_info)
+ {
+ 	uint16_t eth_link_speed = 0;
+ 
+@@ -3014,18 +3009,29 @@ static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed,
+ 			HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB;
+ 		break;
+ 	case RTE_ETH_LINK_SPEED_50G:
+-		eth_link_speed = pam4_link ?
+-			HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_50GB :
+-			HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB;
++		if (link_info->support_pam4_speeds &
++		    HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_PAM4_SPEEDS_50G) {
++			eth_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_50GB;
++			link_info->link_signal_mode = BNXT_SIG_MODE_PAM4;
++		} else {
++			eth_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB;
++			link_info->link_signal_mode = BNXT_SIG_MODE_NRZ;
++		}
+ 		break;
+ 	case RTE_ETH_LINK_SPEED_100G:
+-		eth_link_speed = pam4_link ?
+-			HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_100GB :
+-			HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_100GB;
++		if (link_info->support_pam4_speeds &
++		    HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_PAM4_SPEEDS_100G) {
++			eth_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_100GB;
++			link_info->link_signal_mode = BNXT_SIG_MODE_PAM4;
++		} else {
++			eth_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_100GB;
++			link_info->link_signal_mode = BNXT_SIG_MODE_NRZ;
++		}
+ 		break;
+ 	case RTE_ETH_LINK_SPEED_200G:
+ 		eth_link_speed =
+ 			HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_200GB;
++		link_info->link_signal_mode = BNXT_SIG_MODE_PAM4;
+ 		break;
+ 	default:
+ 		PMD_DRV_LOG(ERR,
+@@ -3229,9 +3235,11 @@ int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
+ 	if (!link_up)
+ 		goto port_phy_cfg;
+ 
++	/* Get user requested autoneg setting */
+ 	autoneg = bnxt_check_eth_link_autoneg(dev_conf->link_speeds);
++
+ 	if (BNXT_CHIP_P5(bp) &&
+-	    dev_conf->link_speeds == RTE_ETH_LINK_SPEED_40G) {
++	    dev_conf->link_speeds & RTE_ETH_LINK_SPEED_40G) {
+ 		/* 40G is not supported as part of media auto detect.
+ 		 * The speed should be forced and autoneg disabled
+ 		 * to configure 40G speed.
+@@ -3240,24 +3248,27 @@ int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
+ 		autoneg = 0;
+ 	}
+ 
+-	/* No auto speeds and no auto_pam4_link. Disable autoneg */
+-	if (bp->link_info->auto_link_speed == 0 &&
+-	    bp->link_info->link_signal_mode &&
+-	    bp->link_info->auto_pam4_link_speeds == 0)
++	/* Override based on current Autoneg setting in PHY for 200G */
++	if (autoneg == 1 && BNXT_CHIP_P5(bp) && bp->link_info->auto_mode == 0 &&
++	    bp->link_info->force_pam4_link_speed ==
++	    HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_200GB) {
+ 		autoneg = 0;
++		PMD_DRV_LOG(DEBUG, "Disabling autoneg for 200G\n");
++	}
+ 
+ 	speed = bnxt_parse_eth_link_speed(dev_conf->link_speeds,
+-					  bp->link_info->link_signal_mode);
++					  bp->link_info);
+ 	link_req.phy_flags = HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY;
+ 	/* Autoneg can be done only when the FW allows. */
+-	if (autoneg == 1 && bp->link_info->support_auto_speeds) {
++	if (autoneg == 1 &&
++	    (bp->link_info->support_auto_speeds || bp->link_info->support_pam4_auto_speeds)) {
+ 		link_req.phy_flags |=
+ 				HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG;
+ 		link_req.auto_link_speed_mask =
+ 			bnxt_parse_eth_link_speed_mask(bp,
+ 						       dev_conf->link_speeds);
+-		link_req.auto_pam4_link_speeds =
+-			bp->link_info->auto_pam4_link_speeds;
++		link_req.auto_pam4_link_speed_mask =
++			bp->link_info->auto_pam4_link_speed_mask;
+ 	} else {
+ 		if (bp->link_info->phy_type ==
+ 		    HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASET ||
+@@ -3276,21 +3287,21 @@ int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
+ 		else if (bp->link_info->force_pam4_link_speed)
+ 			link_req.link_speed =
+ 				bp->link_info->force_pam4_link_speed;
+-		else if (bp->link_info->auto_pam4_link_speeds)
++		else if (bp->link_info->force_link_speed)
++			link_req.link_speed = bp->link_info->force_link_speed;
++		else if (bp->link_info->auto_pam4_link_speed_mask)
+ 			link_req.link_speed =
+-				bp->link_info->auto_pam4_link_speeds;
++				bp->link_info->auto_pam4_link_speed_mask;
+ 		else if (bp->link_info->support_pam4_speeds)
+ 			link_req.link_speed =
+ 				bp->link_info->support_pam4_speeds;
+-		else if (bp->link_info->force_link_speed)
+-			link_req.link_speed = bp->link_info->force_link_speed;
+ 		else
+ 			link_req.link_speed = bp->link_info->auto_link_speed;
+ 		/* Auto PAM4 link speed is zero, but auto_link_speed is not
+ 		 * zero. Use the auto_link_speed.
+ 		 */
+ 		if (bp->link_info->auto_link_speed != 0 &&
+-		    bp->link_info->auto_pam4_link_speeds == 0)
++		    bp->link_info->auto_pam4_link_speed_mask == 0)
+ 			link_req.link_speed = bp->link_info->auto_link_speed;
+ 	}
+ 	link_req.duplex = bnxt_parse_eth_link_duplex(dev_conf->link_speeds);
+@@ -3491,7 +3502,7 @@ static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp,
+ 			rte_cpu_to_le_16(pf_resc->num_hw_ring_grps);
+ 	} else if (BNXT_HAS_NQ(bp)) {
+ 		enables |= HWRM_FUNC_CFG_INPUT_ENABLES_NUM_MSIX;
+-		req.num_msix = rte_cpu_to_le_16(bp->max_nq_rings);
++		req.num_msix = rte_cpu_to_le_16(pf_resc->num_nq_rings);
+ 	}
+ 
+ 	req.flags = rte_cpu_to_le_32(bp->pf->func_cfg_flags);
+@@ -3504,7 +3515,7 @@ static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp,
+ 	req.num_tx_rings = rte_cpu_to_le_16(pf_resc->num_tx_rings);
+ 	req.num_rx_rings = rte_cpu_to_le_16(pf_resc->num_rx_rings);
+ 	req.num_l2_ctxs = rte_cpu_to_le_16(pf_resc->num_l2_ctxs);
+-	req.num_vnics = rte_cpu_to_le_16(bp->max_vnics);
++	req.num_vnics = rte_cpu_to_le_16(pf_resc->num_vnics);
+ 	req.fid = rte_cpu_to_le_16(0xffff);
+ 	req.enables = rte_cpu_to_le_32(enables);
+ 
+@@ -3541,14 +3552,12 @@ bnxt_fill_vf_func_cfg_req_new(struct bnxt *bp,
+ 	req->min_rx_rings = req->max_rx_rings;
+ 	req->max_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx / (num_vfs + 1));
+ 	req->min_l2_ctxs = req->max_l2_ctxs;
+-	/* TODO: For now, do not support VMDq/RFS on VFs. */
+-	req->max_vnics = rte_cpu_to_le_16(1);
++	req->max_vnics = rte_cpu_to_le_16(bp->max_vnics / (num_vfs + 1));
+ 	req->min_vnics = req->max_vnics;
+ 	req->max_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps /
+ 						 (num_vfs + 1));
+ 	req->min_hw_ring_grps = req->max_hw_ring_grps;
+-	req->flags =
+-	 rte_cpu_to_le_16(HWRM_FUNC_VF_RESOURCE_CFG_INPUT_FLAGS_MIN_GUARANTEED);
++	req->max_msix = rte_cpu_to_le_16(bp->max_nq_rings / (num_vfs + 1));
+ }
+ 
+ static void
+@@ -3608,6 +3617,8 @@ static int bnxt_update_max_resources(struct bnxt *bp,
+ 	bp->max_rx_rings -= rte_le_to_cpu_16(resp->alloc_rx_rings);
+ 	bp->max_l2_ctx -= rte_le_to_cpu_16(resp->alloc_l2_ctx);
+ 	bp->max_ring_grps -= rte_le_to_cpu_16(resp->alloc_hw_ring_grps);
++	bp->max_nq_rings -= rte_le_to_cpu_16(resp->alloc_msix);
++	bp->max_vnics -= rte_le_to_cpu_16(resp->alloc_vnics);
+ 
+ 	HWRM_UNLOCK();
+ 
+@@ -3681,6 +3692,8 @@ static int bnxt_query_pf_resources(struct bnxt *bp,
+ 	pf_resc->num_rx_rings = rte_le_to_cpu_16(resp->alloc_rx_rings);
+ 	pf_resc->num_l2_ctxs = rte_le_to_cpu_16(resp->alloc_l2_ctx);
+ 	pf_resc->num_hw_ring_grps = rte_le_to_cpu_32(resp->alloc_hw_ring_grps);
++	pf_resc->num_nq_rings = rte_le_to_cpu_32(resp->alloc_msix);
++	pf_resc->num_vnics = rte_le_to_cpu_16(resp->alloc_vnics);
+ 	bp->pf->evb_mode = resp->evb_mode;
+ 
+ 	HWRM_UNLOCK();
+@@ -3701,6 +3714,8 @@ bnxt_calculate_pf_resources(struct bnxt *bp,
+ 		pf_resc->num_rx_rings = bp->max_rx_rings;
+ 		pf_resc->num_l2_ctxs = bp->max_l2_ctx;
+ 		pf_resc->num_hw_ring_grps = bp->max_ring_grps;
++		pf_resc->num_nq_rings = bp->max_nq_rings;
++		pf_resc->num_vnics = bp->max_vnics;
+ 
+ 		return;
+ 	}
+@@ -3719,6 +3734,10 @@ bnxt_calculate_pf_resources(struct bnxt *bp,
+ 			       bp->max_l2_ctx % (num_vfs + 1);
+ 	pf_resc->num_hw_ring_grps = bp->max_ring_grps / (num_vfs + 1) +
+ 				    bp->max_ring_grps % (num_vfs + 1);
++	pf_resc->num_nq_rings = bp->max_nq_rings / (num_vfs + 1) +
++				bp->max_nq_rings % (num_vfs + 1);
++	pf_resc->num_vnics = bp->max_vnics / (num_vfs + 1) +
++				bp->max_vnics % (num_vfs + 1);
+ }
+ 
+ int bnxt_hwrm_allocate_pf_only(struct bnxt *bp)
+@@ -3727,7 +3746,7 @@ int bnxt_hwrm_allocate_pf_only(struct bnxt *bp)
+ 	int rc;
+ 
+ 	if (!BNXT_PF(bp)) {
+-		PMD_DRV_LOG(ERR, "Attempt to allcoate VFs on a VF!\n");
++		PMD_DRV_LOG(ERR, "Attempt to allocate VFs on a VF!\n");
+ 		return -EINVAL;
+ 	}
+ 
+@@ -3894,6 +3913,8 @@ bnxt_update_pf_resources(struct bnxt *bp,
+ 	bp->max_tx_rings = pf_resc->num_tx_rings;
+ 	bp->max_rx_rings = pf_resc->num_rx_rings;
+ 	bp->max_ring_grps = pf_resc->num_hw_ring_grps;
++	bp->max_nq_rings = pf_resc->num_nq_rings;
++	bp->max_vnics = pf_resc->num_vnics;
+ }
+ 
+ static int32_t
+@@ -6106,38 +6127,6 @@ int bnxt_hwrm_cfa_pair_free(struct bnxt *bp, struct bnxt_representor *rep_bp)
+ 	return rc;
+ }
+ 
+-int bnxt_hwrm_cfa_adv_flow_mgmt_qcaps(struct bnxt *bp)
+-{
+-	struct hwrm_cfa_adv_flow_mgnt_qcaps_output *resp =
+-					bp->hwrm_cmd_resp_addr;
+-	struct hwrm_cfa_adv_flow_mgnt_qcaps_input req = {0};
+-	uint32_t flags = 0;
+-	int rc = 0;
+-
+-	if (!(bp->fw_cap & BNXT_FW_CAP_ADV_FLOW_MGMT))
+-		return 0;
+-
+-	if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
+-		PMD_DRV_LOG(DEBUG,
+-			    "Not a PF or trusted VF. Command not supported\n");
+-		return 0;
+-	}
+-
+-	HWRM_PREP(&req, HWRM_CFA_ADV_FLOW_MGNT_QCAPS, BNXT_USE_CHIMP_MB);
+-	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
+-
+-	HWRM_CHECK_RESULT();
+-	flags = rte_le_to_cpu_32(resp->flags);
+-	HWRM_UNLOCK();
+-
+-	if (flags & HWRM_CFA_ADV_FLOW_MGNT_QCAPS_RFS_RING_TBL_IDX_V2_SUPPORTED)
+-		bp->flags |= BNXT_FLAG_FLOW_CFA_RFS_RING_TBL_IDX_V2;
+-	else
+-		bp->flags |= BNXT_FLAG_RFS_NEEDS_VNIC;
+-
+-	return rc;
+-}
+-
+ int bnxt_hwrm_fw_echo_reply(struct bnxt *bp, uint32_t echo_req_data1,
+ 			    uint32_t echo_req_data2)
+ {
+@@ -6175,10 +6164,6 @@ int bnxt_hwrm_poll_ver_get(struct bnxt *bp)
+ 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
+ 
+ 	HWRM_CHECK_RESULT_SILENT();
+-
+-	if (resp->flags & HWRM_VER_GET_OUTPUT_FLAGS_DEV_NOT_RDY)
+-		rc = -EAGAIN;
+-
+ 	HWRM_UNLOCK();
+ 
+ 	return rc;
+@@ -6262,3 +6247,26 @@ int bnxt_hwrm_config_host_mtu(struct bnxt *bp)
+ 
+ 	return rc;
+ }
++
++int
++bnxt_vnic_rss_clear_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic)
++{
++	struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
++	struct hwrm_vnic_rss_cfg_input req = {0};
++	int nr_ctxs = vnic->num_lb_ctxts;
++	int i, rc = 0;
++
++	for (i = 0; i < nr_ctxs; i++) {
++		HWRM_PREP(&req, HWRM_VNIC_RSS_CFG, BNXT_USE_CHIMP_MB);
++
++		req.rss_ctx_idx = rte_cpu_to_le_16(vnic->fw_grp_ids[i]);
++		req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
++
++		rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
++
++		HWRM_CHECK_RESULT();
++		HWRM_UNLOCK();
++	}
++
++	return rc;
++}
+diff --git a/dpdk/drivers/net/bnxt/bnxt_hwrm.h b/dpdk/drivers/net/bnxt/bnxt_hwrm.h
+index f8f0556201..a82d9fb3ef 100644
+--- a/dpdk/drivers/net/bnxt/bnxt_hwrm.h
++++ b/dpdk/drivers/net/bnxt/bnxt_hwrm.h
+@@ -58,9 +58,6 @@ struct hwrm_func_qstats_output;
+ #define HWRM_PORT_PHY_CFG_IN_EN_AUTO_LINK_SPEED_MASK \
+ 	HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK
+ 
+-#define HWRM_CFA_ADV_FLOW_MGNT_QCAPS_RFS_RING_TBL_IDX_V2_SUPPORTED \
+-	HWRM_CFA_ADV_FLOW_MGNT_QCAPS_OUTPUT_FLAGS_RFS_RING_TBL_IDX_V2_SUPPORTED
+-
+ #define HWRM_SPEC_CODE_1_8_4		0x10804
+ #define HWRM_SPEC_CODE_1_9_0		0x10900
+ #define HWRM_SPEC_CODE_1_9_2		0x10902
+@@ -117,11 +114,36 @@ struct bnxt_pf_resource_info {
+ 	uint16_t num_rx_rings;
+ 	uint16_t num_cp_rings;
+ 	uint16_t num_l2_ctxs;
++	uint16_t num_nq_rings;
++	uint16_t num_vnics;
+ 	uint32_t num_hw_ring_grps;
+ };
+ 
+ #define BNXT_CTX_VAL_INVAL	0xFFFF
+ 
++#define BNXT_TUNNELED_OFFLOADS_CAP_VXLAN_EN(bp)		\
++	(!((bp)->tunnel_disable_flag & HWRM_FUNC_QCAPS_OUTPUT_TUNNEL_DISABLE_FLAG_DISABLE_VXLAN))
++#define BNXT_TUNNELED_OFFLOADS_CAP_NGE_EN(bp)		\
++	(!((bp)->tunnel_disable_flag & HWRM_FUNC_QCAPS_OUTPUT_TUNNEL_DISABLE_FLAG_DISABLE_NGE))
++#define BNXT_TUNNELED_OFFLOADS_CAP_GRE_EN(bp)		\
++	(!((bp)->tunnel_disable_flag & HWRM_FUNC_QCAPS_OUTPUT_TUNNEL_DISABLE_FLAG_DISABLE_GRE))
++#define BNXT_TUNNELED_OFFLOADS_CAP_IPINIP_EN(bp)	\
++	(!((bp)->tunnel_disable_flag & HWRM_FUNC_QCAPS_OUTPUT_TUNNEL_DISABLE_FLAG_DISABLE_IPINIP))
++
++/*
++ * If the device supports VXLAN, GRE, IPIP and GENEVE tunnel parsing, then report
++ * RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM, RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM and
++ * RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM in the Rx/Tx offload capabilities of the device.
++ */
++#define BNXT_TUNNELED_OFFLOADS_CAP_ALL_EN(bp)			\
++	(BNXT_TUNNELED_OFFLOADS_CAP_VXLAN_EN(bp) &&		\
++	 BNXT_TUNNELED_OFFLOADS_CAP_NGE_EN(bp)   &&		\
++	 BNXT_TUNNELED_OFFLOADS_CAP_GRE_EN(bp)   &&		\
++	 BNXT_TUNNELED_OFFLOADS_CAP_IPINIP_EN(bp))
++
++#define BNXT_SIG_MODE_NRZ	HWRM_PORT_PHY_QCFG_OUTPUT_SIGNAL_MODE_NRZ
++#define BNXT_SIG_MODE_PAM4	HWRM_PORT_PHY_QCFG_OUTPUT_SIGNAL_MODE_PAM4
++
+ int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp,
+ 				   struct bnxt_vnic_info *vnic);
+ int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic,
+@@ -296,7 +318,6 @@ int bnxt_hwrm_first_vf_id_query(struct bnxt *bp, uint16_t fid,
+ int bnxt_hwrm_cfa_pair_exists(struct bnxt *bp, struct bnxt_representor *rep_bp);
+ int bnxt_hwrm_cfa_pair_alloc(struct bnxt *bp, struct bnxt_representor *rep);
+ int bnxt_hwrm_cfa_pair_free(struct bnxt *bp, struct bnxt_representor *rep);
+-int bnxt_hwrm_cfa_adv_flow_mgmt_qcaps(struct bnxt *bp);
+ int bnxt_hwrm_fw_echo_reply(struct bnxt *bp, uint32_t echo_req_data1,
+ 			    uint32_t echo_req_data2);
+ int bnxt_hwrm_poll_ver_get(struct bnxt *bp);
+@@ -310,4 +331,5 @@ int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp, struct bnxt_cp_ring_info *cpr);
+ void bnxt_free_hwrm_tx_ring(struct bnxt *bp, int queue_index);
+ int bnxt_alloc_hwrm_tx_ring(struct bnxt *bp, int queue_index);
+ int bnxt_hwrm_config_host_mtu(struct bnxt *bp);
++int bnxt_vnic_rss_clear_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic);
+ #endif
+diff --git a/dpdk/drivers/net/bnxt/bnxt_reps.c b/dpdk/drivers/net/bnxt/bnxt_reps.c
+index 22b76b72b9..299b4c24a8 100644
+--- a/dpdk/drivers/net/bnxt/bnxt_reps.c
++++ b/dpdk/drivers/net/bnxt/bnxt_reps.c
+@@ -35,16 +35,20 @@ static const struct eth_dev_ops bnxt_rep_dev_ops = {
+ uint16_t
+ bnxt_vfr_recv(uint16_t port_id, uint16_t queue_id, struct rte_mbuf *mbuf)
+ {
+-	struct rte_mbuf **prod_rx_buf;
++	struct bnxt_representor *vfr_bp = NULL;
+ 	struct bnxt_rx_ring_info *rep_rxr;
+-	struct bnxt_rx_queue *rep_rxq;
+ 	struct rte_eth_dev *vfr_eth_dev;
+-	struct bnxt_representor *vfr_bp;
++	struct rte_mbuf **prod_rx_buf;
++	struct bnxt_rx_queue *rep_rxq;
+ 	uint16_t mask;
+ 	uint8_t que;
+ 
+ 	vfr_eth_dev = &rte_eth_devices[port_id];
+-	vfr_bp = vfr_eth_dev->data->dev_private;
++	vfr_bp = vfr_eth_dev ? vfr_eth_dev->data->dev_private : NULL;
++
++	if (unlikely(vfr_bp == NULL))
++		return 1;
++
+ 	/* If rxq_id happens to be > nr_rings, use ring 0 */
+ 	que = queue_id < vfr_bp->rx_nr_rings ? queue_id : 0;
+ 	rep_rxq = vfr_bp->rx_queues[que];
+@@ -545,7 +549,10 @@ int bnxt_rep_dev_info_get_op(struct rte_eth_dev *eth_dev,
+ 	dev_info->max_mac_addrs = parent_bp->max_l2_ctx;
+ 	dev_info->max_hash_mac_addrs = 0;
+ 
+-	max_rx_rings = BNXT_MAX_VF_REP_RINGS;
++	max_rx_rings = parent_bp->rx_nr_rings ?
++		RTE_MIN(parent_bp->rx_nr_rings, BNXT_MAX_VF_REP_RINGS) :
++		BNXT_MAX_VF_REP_RINGS;
++
+ 	/* For the sake of symmetry, max_rx_queues = max_tx_queues */
+ 	dev_info->max_rx_queues = max_rx_rings;
+ 	dev_info->max_tx_queues = max_rx_rings;
+@@ -561,10 +568,8 @@ int bnxt_rep_dev_info_get_op(struct rte_eth_dev *eth_dev,
+ 	dev_info->min_rx_bufsize = 1;
+ 	dev_info->max_rx_pktlen = BNXT_MAX_PKT_LEN;
+ 
+-	dev_info->rx_offload_capa = BNXT_DEV_RX_OFFLOAD_SUPPORT;
+-	if (parent_bp->flags & BNXT_FLAG_PTP_SUPPORTED)
+-		dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_TIMESTAMP;
+-	dev_info->tx_offload_capa = BNXT_DEV_TX_OFFLOAD_SUPPORT;
++	dev_info->rx_offload_capa = bnxt_get_rx_port_offloads(parent_bp);
++	dev_info->tx_offload_capa = bnxt_get_tx_port_offloads(parent_bp);
+ 	dev_info->flow_type_rss_offloads = BNXT_ETH_RSS_SUPPORT;
+ 
+ 	dev_info->switch_info.name = eth_dev->device->name;
+@@ -626,10 +631,10 @@ int bnxt_rep_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
+ 	struct rte_mbuf **buf_ring;
+ 	int rc = 0;
+ 
+-	if (queue_idx >= BNXT_MAX_VF_REP_RINGS) {
++	if (queue_idx >= rep_bp->rx_nr_rings) {
+ 		PMD_DRV_LOG(ERR,
+ 			    "Cannot create Rx ring %d. %d rings available\n",
+-			    queue_idx, BNXT_MAX_VF_REP_RINGS);
++			    queue_idx, rep_bp->rx_nr_rings);
+ 		return -EINVAL;
+ 	}
+ 
+@@ -726,10 +731,10 @@ int bnxt_rep_tx_queue_setup_op(struct rte_eth_dev *eth_dev,
+ 	struct bnxt_tx_queue *parent_txq, *txq;
+ 	struct bnxt_vf_rep_tx_queue *vfr_txq;
+ 
+-	if (queue_idx >= BNXT_MAX_VF_REP_RINGS) {
++	if (queue_idx >= rep_bp->rx_nr_rings) {
+ 		PMD_DRV_LOG(ERR,
+ 			    "Cannot create Tx rings %d. %d rings available\n",
+-			    queue_idx, BNXT_MAX_VF_REP_RINGS);
++			    queue_idx, rep_bp->rx_nr_rings);
+ 		return -EINVAL;
+ 	}
+ 
+@@ -802,10 +807,10 @@ int bnxt_rep_stats_get_op(struct rte_eth_dev *eth_dev,
+ 			     struct rte_eth_stats *stats)
+ {
+ 	struct bnxt_representor *rep_bp = eth_dev->data->dev_private;
+-	int i;
++	unsigned int i;
+ 
+ 	memset(stats, 0, sizeof(*stats));
+-	for (i = 0; i < BNXT_MAX_VF_REP_RINGS; i++) {
++	for (i = 0; i < rep_bp->rx_nr_rings; i++) {
+ 		stats->obytes += rep_bp->tx_bytes[i];
+ 		stats->opackets += rep_bp->tx_pkts[i];
+ 		stats->ibytes += rep_bp->rx_bytes[i];
+@@ -825,9 +830,9 @@ int bnxt_rep_stats_get_op(struct rte_eth_dev *eth_dev,
+ int bnxt_rep_stats_reset_op(struct rte_eth_dev *eth_dev)
+ {
+ 	struct bnxt_representor *rep_bp = eth_dev->data->dev_private;
+-	int i;
++	unsigned int i;
+ 
+-	for (i = 0; i < BNXT_MAX_VF_REP_RINGS; i++) {
++	for (i = 0; i < rep_bp->rx_nr_rings; i++) {
+ 		rep_bp->tx_pkts[i] = 0;
+ 		rep_bp->tx_bytes[i] = 0;
+ 		rep_bp->rx_pkts[i] = 0;
+diff --git a/dpdk/drivers/net/bnxt/bnxt_ring.c b/dpdk/drivers/net/bnxt/bnxt_ring.c
+index dc437f314e..4cdbb177d9 100644
+--- a/dpdk/drivers/net/bnxt/bnxt_ring.c
++++ b/dpdk/drivers/net/bnxt/bnxt_ring.c
+@@ -752,6 +752,8 @@ int bnxt_alloc_hwrm_rings(struct bnxt *bp)
+ 		rc = bnxt_alloc_hwrm_rx_ring(bp, i);
+ 		if (rc)
+ 			goto err_out;
++		bnxt_hwrm_set_ring_coal(bp, &coal,
++					rxq->cp_ring->cp_ring_struct->fw_ring_id);
+ 	}
+ 
+ 	/* If something is wrong with Rx ring alloc, skip Tx ring alloc */
+@@ -851,6 +853,7 @@ int bnxt_alloc_async_ring_struct(struct bnxt *bp)
+ 	ring->ring_mask = ring->ring_size - 1;
+ 	ring->vmem_size = 0;
+ 	ring->vmem = NULL;
++	ring->fw_ring_id = INVALID_HW_RING_ID;
+ 
+ 	bp->async_cp_ring = cpr;
+ 	cpr->cp_ring_struct = ring;
+diff --git a/dpdk/drivers/net/bnxt/bnxt_rxq.c b/dpdk/drivers/net/bnxt/bnxt_rxq.c
+index 1456f8b54f..4f2e0e7376 100644
+--- a/dpdk/drivers/net/bnxt/bnxt_rxq.c
++++ b/dpdk/drivers/net/bnxt/bnxt_rxq.c
+@@ -20,6 +20,32 @@
+  * RX Queues
+  */
+ 
++uint64_t bnxt_get_rx_port_offloads(struct bnxt *bp)
++{
++	uint64_t rx_offload_capa;
++
++	rx_offload_capa = RTE_ETH_RX_OFFLOAD_IPV4_CKSUM  |
++			  RTE_ETH_RX_OFFLOAD_UDP_CKSUM   |
++			  RTE_ETH_RX_OFFLOAD_TCP_CKSUM   |
++			  RTE_ETH_RX_OFFLOAD_KEEP_CRC    |
++			  RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
++			  RTE_ETH_RX_OFFLOAD_VLAN_EXTEND |
++			  RTE_ETH_RX_OFFLOAD_TCP_LRO |
++			  RTE_ETH_RX_OFFLOAD_SCATTER |
++			  RTE_ETH_RX_OFFLOAD_RSS_HASH;
++
++	if (bp->flags & BNXT_FLAG_PTP_SUPPORTED)
++		rx_offload_capa |= RTE_ETH_RX_OFFLOAD_TIMESTAMP;
++	if (bp->vnic_cap_flags & BNXT_VNIC_CAP_VLAN_RX_STRIP)
++		rx_offload_capa |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
++
++	if (BNXT_TUNNELED_OFFLOADS_CAP_ALL_EN(bp))
++		rx_offload_capa |= RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
++					RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM;
++
++	return rx_offload_capa;
++}
++
+ /* Determine whether the current configuration needs aggregation ring in HW. */
+ int bnxt_need_agg_ring(struct rte_eth_dev *eth_dev)
+ {
+@@ -40,6 +66,7 @@ void bnxt_free_rxq_stats(struct bnxt_rx_queue *rxq)
+ int bnxt_mq_rx_configure(struct bnxt *bp)
+ {
+ 	struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
++	struct rte_eth_rss_conf *rss = &bp->rss_conf;
+ 	const struct rte_eth_vmdq_rx_conf *conf =
+ 		    &dev_conf->rx_adv_conf.vmdq_rx_conf;
+ 	unsigned int i, j, nb_q_per_grp = 1, ring_idx = 0;
+@@ -147,32 +174,19 @@ int bnxt_mq_rx_configure(struct bnxt *bp)
+ 
+ 	bp->rx_num_qs_per_vnic = nb_q_per_grp;
+ 
+-	if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) {
+-		struct rte_eth_rss_conf *rss = &dev_conf->rx_adv_conf.rss_conf;
+-
+-		if (bp->flags & BNXT_FLAG_UPDATE_HASH)
+-			bp->flags &= ~BNXT_FLAG_UPDATE_HASH;
+-
+-		for (i = 0; i < bp->nr_vnics; i++) {
+-			uint32_t lvl = RTE_ETH_RSS_LEVEL(rss->rss_hf);
+-
+-			vnic = &bp->vnic_info[i];
+-			vnic->hash_type =
+-				bnxt_rte_to_hwrm_hash_types(rss->rss_hf);
+-			vnic->hash_mode =
+-				bnxt_rte_to_hwrm_hash_level(bp,
+-							    rss->rss_hf,
+-							    lvl);
+-
+-			/*
+-			 * Use the supplied key if the key length is
+-			 * acceptable and the rss_key is not NULL
+-			 */
+-			if (rss->rss_key &&
+-			    rss->rss_key_len <= HW_HASH_KEY_SIZE)
+-				memcpy(vnic->rss_hash_key,
+-				       rss->rss_key, rss->rss_key_len);
+-		}
++	for (i = 0; i < bp->nr_vnics; i++) {
++		uint32_t lvl = RTE_ETH_RSS_LEVEL(rss->rss_hf);
++
++		vnic = &bp->vnic_info[i];
++		vnic->hash_type = bnxt_rte_to_hwrm_hash_types(rss->rss_hf);
++		vnic->hash_mode = bnxt_rte_to_hwrm_hash_level(bp, rss->rss_hf, lvl);
++
++		/*
++		 * Use the supplied key if the key length is
++		 * acceptable and the rss_key is not NULL
++		 */
++		if (rss->rss_key && rss->rss_key_len <= HW_HASH_KEY_SIZE)
++			memcpy(vnic->rss_hash_key, rss->rss_key, rss->rss_key_len);
+ 	}
+ 
+ 	return rc;
+@@ -214,7 +228,9 @@ void bnxt_rx_queue_release_mbufs(struct bnxt_rx_queue *rxq)
+ 		}
+ 	}
+ 	/* Free up mbufs in Agg ring */
+-	if (!bnxt_need_agg_ring(rxq->bp->eth_dev))
++	if (rxq->bp == NULL ||
++	    rxq->bp->eth_dev == NULL ||
++	    !bnxt_need_agg_ring(rxq->bp->eth_dev))
+ 		return;
+ 
+ 	sw_ring = rxq->rx_ring->ag_buf_ring;
+@@ -387,10 +403,6 @@ int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
+ 	rxq->rx_started = rxq->rx_deferred_start ? false : true;
+ 	rxq->vnic = BNXT_GET_DEFAULT_VNIC(bp);
+ 
+-	/* Configure mtu if it is different from what was configured before */
+-	if (!queue_idx)
+-		bnxt_mtu_set_op(eth_dev, eth_dev->data->mtu);
+-
+ 	return 0;
+ err:
+ 	bnxt_rx_queue_release_op(eth_dev, queue_idx);
+@@ -472,10 +484,11 @@ int bnxt_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+ 	if (rc)
+ 		return rc;
+ 
+-	if (BNXT_CHIP_P5(bp)) {
+-		/* Reconfigure default receive ring and MRU. */
+-		bnxt_hwrm_vnic_cfg(bp, rxq->vnic);
+-	}
++	if (BNXT_HAS_RING_GRPS(bp))
++		rxq->vnic->dflt_ring_grp = bp->grp_info[rx_queue_id].fw_grp_id;
++	/* Reconfigure default receive ring and MRU. */
++	bnxt_hwrm_vnic_cfg(bp, rxq->vnic);
++
+ 	PMD_DRV_LOG(INFO, "Rx queue started %d\n", rx_queue_id);
+ 
+ 	if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) {
+@@ -574,6 +587,9 @@ int bnxt_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+ 		if (active_queue_cnt == 0) {
+ 			uint16_t saved_mru = vnic->mru;
+ 
++			/* clear RSS setting on vnic. */
++			bnxt_vnic_rss_clear_p5(bp, vnic);
++
+ 			vnic->mru = 0;
+ 			/* Reconfigure default receive ring and MRU. */
+ 			bnxt_hwrm_vnic_cfg(bp, vnic);
+diff --git a/dpdk/drivers/net/bnxt/bnxt_rxq.h b/dpdk/drivers/net/bnxt/bnxt_rxq.h
+index 0331c23810..287df8dff3 100644
+--- a/dpdk/drivers/net/bnxt/bnxt_rxq.h
++++ b/dpdk/drivers/net/bnxt/bnxt_rxq.h
+@@ -65,4 +65,5 @@ int bnxt_rx_queue_stop(struct rte_eth_dev *dev,
+ void bnxt_rx_queue_release_mbufs(struct bnxt_rx_queue *rxq);
+ int bnxt_need_agg_ring(struct rte_eth_dev *eth_dev);
+ void bnxt_free_rxq_mem(struct bnxt_rx_queue *rxq);
++uint64_t bnxt_get_rx_port_offloads(struct bnxt *bp);
+ #endif
+diff --git a/dpdk/drivers/net/bnxt/bnxt_rxr.c b/dpdk/drivers/net/bnxt/bnxt_rxr.c
+index 44247d7200..b60c2470f3 100644
+--- a/dpdk/drivers/net/bnxt/bnxt_rxr.c
++++ b/dpdk/drivers/net/bnxt/bnxt_rxr.c
+@@ -824,6 +824,9 @@ void bnxt_set_mark_in_mbuf(struct bnxt *bp,
+ {
+ 	uint32_t cfa_code = 0;
+ 
++	if (unlikely(bp->mark_table == NULL))
++		return;
++
+ 	cfa_code = rte_le_to_cpu_16(rxcmp1->cfa_code);
+ 	if (!cfa_code)
+ 		return;
+@@ -1408,6 +1411,9 @@ int bnxt_flush_rx_cmp(struct bnxt_cp_ring_info *cpr)
+ 		cons = RING_CMP(cpr->cp_ring_struct, raw_cons);
+ 		rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons];
+ 
++		if (!bnxt_cpr_cmp_valid(rxcmp, raw_cons, ring_mask + 1))
++			break;
++
+ 		if (CMP_TYPE(rxcmp) == CMPL_BASE_TYPE_HWRM_DONE)
+ 			return 1;
+ 
+diff --git a/dpdk/drivers/net/bnxt/bnxt_rxtx_vec_neon.c b/dpdk/drivers/net/bnxt/bnxt_rxtx_vec_neon.c
+index f15e2d3b4e..611fbadb08 100644
+--- a/dpdk/drivers/net/bnxt/bnxt_rxtx_vec_neon.c
++++ b/dpdk/drivers/net/bnxt/bnxt_rxtx_vec_neon.c
+@@ -235,25 +235,38 @@ recv_burst_vec_neon(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+ 		}
+ 
+ 		/*
+-		 * Load the four current descriptors into SSE registers in
+-		 * reverse order to ensure consistent state.
++		 * Load the four current descriptors into NEON registers.
++		 * IO barriers are used to ensure consistent state.
+ 		 */
+ 		rxcmp1[3] = vld1q_u32((void *)&cpr->cp_desc_ring[cons + 7]);
+ 		rte_io_rmb();
++		/* Reload lower 64b of descriptors to make it ordered after info3_v. */
++		rxcmp1[3] = vreinterpretq_u32_u64(vld1q_lane_u64
++				((void *)&cpr->cp_desc_ring[cons + 7],
++				vreinterpretq_u64_u32(rxcmp1[3]), 0));
+ 		rxcmp[3] = vld1q_u32((void *)&cpr->cp_desc_ring[cons + 6]);
+ 
+ 		rxcmp1[2] = vld1q_u32((void *)&cpr->cp_desc_ring[cons + 5]);
+ 		rte_io_rmb();
++		rxcmp1[2] = vreinterpretq_u32_u64(vld1q_lane_u64
++				((void *)&cpr->cp_desc_ring[cons + 5],
++				vreinterpretq_u64_u32(rxcmp1[2]), 0));
+ 		rxcmp[2] = vld1q_u32((void *)&cpr->cp_desc_ring[cons + 4]);
+ 
+ 		t1 = vreinterpretq_u64_u32(vzip2q_u32(rxcmp1[2], rxcmp1[3]));
+ 
+ 		rxcmp1[1] = vld1q_u32((void *)&cpr->cp_desc_ring[cons + 3]);
+ 		rte_io_rmb();
++		rxcmp1[1] = vreinterpretq_u32_u64(vld1q_lane_u64
++				((void *)&cpr->cp_desc_ring[cons + 3],
++				vreinterpretq_u64_u32(rxcmp1[1]), 0));
+ 		rxcmp[1] = vld1q_u32((void *)&cpr->cp_desc_ring[cons + 2]);
+ 
+ 		rxcmp1[0] = vld1q_u32((void *)&cpr->cp_desc_ring[cons + 1]);
+ 		rte_io_rmb();
++		rxcmp1[0] = vreinterpretq_u32_u64(vld1q_lane_u64
++				((void *)&cpr->cp_desc_ring[cons + 1],
++				vreinterpretq_u64_u32(rxcmp1[0]), 0));
+ 		rxcmp[0] = vld1q_u32((void *)&cpr->cp_desc_ring[cons + 0]);
+ 
+ 		t0 = vreinterpretq_u64_u32(vzip2q_u32(rxcmp1[0], rxcmp1[1]));
+diff --git a/dpdk/drivers/net/bnxt/bnxt_stats.c b/dpdk/drivers/net/bnxt/bnxt_stats.c
+index 991eafc644..208aa5616d 100644
+--- a/dpdk/drivers/net/bnxt/bnxt_stats.c
++++ b/dpdk/drivers/net/bnxt/bnxt_stats.c
+@@ -741,7 +741,7 @@ int bnxt_dev_xstats_get_op(struct rte_eth_dev *eth_dev,
+ 					(bp->fw_tx_port_stats_ext_size /
+ 					 stat_size));
+ 
+-	memset(xstats, 0, sizeof(*xstats));
++	memset(xstats, 0, sizeof(*xstats) * n);
+ 
+ 	count = 0;
+ 	for (i = 0; i < RTE_DIM(bnxt_rx_stats_strings); i++) {
+@@ -846,7 +846,7 @@ int bnxt_flow_stats_cnt(struct bnxt *bp)
+ 
+ int bnxt_dev_xstats_get_names_op(struct rte_eth_dev *eth_dev,
+ 		struct rte_eth_xstat_name *xstats_names,
+-		__rte_unused unsigned int limit)
++		unsigned int size)
+ {
+ 	struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+ 	const unsigned int stat_cnt = RTE_DIM(bnxt_rx_stats_strings) +
+@@ -862,63 +862,62 @@ int bnxt_dev_xstats_get_names_op(struct rte_eth_dev *eth_dev,
+ 	if (rc)
+ 		return rc;
+ 
+-	if (xstats_names != NULL) {
+-		count = 0;
++	if (xstats_names == NULL || size < stat_cnt)
++		return stat_cnt;
+ 
+-		for (i = 0; i < RTE_DIM(bnxt_rx_stats_strings); i++) {
+-			strlcpy(xstats_names[count].name,
+-				bnxt_rx_stats_strings[i].name,
+-				sizeof(xstats_names[count].name));
+-			count++;
+-		}
++	for (i = 0; i < RTE_DIM(bnxt_rx_stats_strings); i++) {
++		strlcpy(xstats_names[count].name,
++			bnxt_rx_stats_strings[i].name,
++			sizeof(xstats_names[count].name));
++		count++;
++	}
+ 
+-		for (i = 0; i < RTE_DIM(bnxt_tx_stats_strings); i++) {
+-			strlcpy(xstats_names[count].name,
+-				bnxt_tx_stats_strings[i].name,
+-				sizeof(xstats_names[count].name));
+-			count++;
+-		}
++	for (i = 0; i < RTE_DIM(bnxt_tx_stats_strings); i++) {
++		strlcpy(xstats_names[count].name,
++			bnxt_tx_stats_strings[i].name,
++			sizeof(xstats_names[count].name));
++		count++;
++	}
+ 
+-		for (i = 0; i < RTE_DIM(bnxt_func_stats_strings); i++) {
+-			strlcpy(xstats_names[count].name,
+-				bnxt_func_stats_strings[i].name,
+-				sizeof(xstats_names[count].name));
+-			count++;
+-		}
++	for (i = 0; i < RTE_DIM(bnxt_func_stats_strings); i++) {
++		strlcpy(xstats_names[count].name,
++			bnxt_func_stats_strings[i].name,
++			sizeof(xstats_names[count].name));
++		count++;
++	}
+ 
+-		for (i = 0; i < RTE_DIM(bnxt_rx_ext_stats_strings); i++) {
+-			strlcpy(xstats_names[count].name,
+-				bnxt_rx_ext_stats_strings[i].name,
+-				sizeof(xstats_names[count].name));
++	for (i = 0; i < RTE_DIM(bnxt_rx_ext_stats_strings); i++) {
++		strlcpy(xstats_names[count].name,
++			bnxt_rx_ext_stats_strings[i].name,
++			sizeof(xstats_names[count].name));
+ 
+-			count++;
+-		}
++		count++;
++	}
+ 
+-		for (i = 0; i < RTE_DIM(bnxt_tx_ext_stats_strings); i++) {
+-			strlcpy(xstats_names[count].name,
+-				bnxt_tx_ext_stats_strings[i].name,
+-				sizeof(xstats_names[count].name));
++	for (i = 0; i < RTE_DIM(bnxt_tx_ext_stats_strings); i++) {
++		strlcpy(xstats_names[count].name,
++			bnxt_tx_ext_stats_strings[i].name,
++			sizeof(xstats_names[count].name));
+ 
+-			count++;
+-		}
++		count++;
++	}
+ 
+-		if (bp->fw_cap & BNXT_FW_CAP_ADV_FLOW_COUNTERS &&
+-		    bp->fw_cap & BNXT_FW_CAP_ADV_FLOW_MGMT &&
+-		    BNXT_FLOW_XSTATS_EN(bp)) {
+-			for (i = 0; i < bp->max_l2_ctx; i++) {
+-				char buf[RTE_ETH_XSTATS_NAME_SIZE];
++	if (bp->fw_cap & BNXT_FW_CAP_ADV_FLOW_COUNTERS &&
++	    bp->fw_cap & BNXT_FW_CAP_ADV_FLOW_MGMT &&
++	    BNXT_FLOW_XSTATS_EN(bp)) {
++		for (i = 0; i < bp->max_l2_ctx; i++) {
++			char buf[RTE_ETH_XSTATS_NAME_SIZE];
+ 
+-				sprintf(buf, "flow_%d_bytes", i);
+-				strlcpy(xstats_names[count].name, buf,
+-					sizeof(xstats_names[count].name));
+-				count++;
++			sprintf(buf, "flow_%d_bytes", i);
++			strlcpy(xstats_names[count].name, buf,
++				sizeof(xstats_names[count].name));
++			count++;
+ 
+-				sprintf(buf, "flow_%d_packets", i);
+-				strlcpy(xstats_names[count].name, buf,
+-					sizeof(xstats_names[count].name));
++			sprintf(buf, "flow_%d_packets", i);
++			strlcpy(xstats_names[count].name, buf,
++				sizeof(xstats_names[count].name));
+ 
+-				count++;
+-			}
++			count++;
+ 		}
+ 	}
+ 
+diff --git a/dpdk/drivers/net/bnxt/bnxt_txq.c b/dpdk/drivers/net/bnxt/bnxt_txq.c
+index 72a55ea643..c8745add5e 100644
+--- a/dpdk/drivers/net/bnxt/bnxt_txq.c
++++ b/dpdk/drivers/net/bnxt/bnxt_txq.c
+@@ -17,6 +17,35 @@
+  * TX Queues
+  */
+ 
++uint64_t bnxt_get_tx_port_offloads(struct bnxt *bp)
++{
++	uint64_t tx_offload_capa;
++
++	tx_offload_capa = RTE_ETH_TX_OFFLOAD_IPV4_CKSUM  |
++			  RTE_ETH_TX_OFFLOAD_UDP_CKSUM   |
++			  RTE_ETH_TX_OFFLOAD_TCP_CKSUM   |
++			  RTE_ETH_TX_OFFLOAD_TCP_TSO     |
++			  RTE_ETH_TX_OFFLOAD_QINQ_INSERT |
++			  RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
++
++	if (bp->fw_cap & BNXT_FW_CAP_VLAN_TX_INSERT)
++		tx_offload_capa |= RTE_ETH_TX_OFFLOAD_VLAN_INSERT;
++
++	if (BNXT_TUNNELED_OFFLOADS_CAP_ALL_EN(bp))
++		tx_offload_capa |= RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM;
++
++	if (BNXT_TUNNELED_OFFLOADS_CAP_VXLAN_EN(bp))
++		tx_offload_capa |= RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO;
++	if (BNXT_TUNNELED_OFFLOADS_CAP_GRE_EN(bp))
++		tx_offload_capa |= RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO;
++	if (BNXT_TUNNELED_OFFLOADS_CAP_NGE_EN(bp))
++		tx_offload_capa |= RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO;
++	if (BNXT_TUNNELED_OFFLOADS_CAP_IPINIP_EN(bp))
++		tx_offload_capa |= RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO;
++
++	return tx_offload_capa;
++}
++
+ void bnxt_free_txq_stats(struct bnxt_tx_queue *txq)
+ {
+ 	if (txq && txq->cp_ring && txq->cp_ring->hw_stats)
+diff --git a/dpdk/drivers/net/bnxt/bnxt_txq.h b/dpdk/drivers/net/bnxt/bnxt_txq.h
+index 67fd4cbebb..f3a03812ad 100644
+--- a/dpdk/drivers/net/bnxt/bnxt_txq.h
++++ b/dpdk/drivers/net/bnxt/bnxt_txq.h
+@@ -43,4 +43,5 @@ int bnxt_tx_queue_setup_op(struct rte_eth_dev *eth_dev,
+ 			       uint16_t nb_desc,
+ 			       unsigned int socket_id,
+ 			       const struct rte_eth_txconf *tx_conf);
++uint64_t bnxt_get_tx_port_offloads(struct bnxt *bp);
+ #endif
+diff --git a/dpdk/drivers/net/bnxt/bnxt_txr.c b/dpdk/drivers/net/bnxt/bnxt_txr.c
+index e2b7e40571..3b8f2382f9 100644
+--- a/dpdk/drivers/net/bnxt/bnxt_txr.c
++++ b/dpdk/drivers/net/bnxt/bnxt_txr.c
+@@ -602,6 +602,9 @@ int bnxt_flush_tx_cmp(struct bnxt_cp_ring_info *cpr)
+ 		cons = RING_CMPL(ring_mask, raw_cons);
+ 		txcmp = (struct tx_cmpl *)&cp_desc_ring[cons];
+ 
++		if (!bnxt_cpr_cmp_valid(txcmp, raw_cons, ring_mask + 1))
++			break;
++
+ 		opaque = rte_cpu_to_le_32(txcmp->opaque);
+ 		raw_cons = NEXT_RAW_CMP(raw_cons);
+ 
+diff --git a/dpdk/drivers/net/bnxt/bnxt_vnic.c b/dpdk/drivers/net/bnxt/bnxt_vnic.c
+index c63cf4b943..b3c03a2af5 100644
+--- a/dpdk/drivers/net/bnxt/bnxt_vnic.c
++++ b/dpdk/drivers/net/bnxt/bnxt_vnic.c
+@@ -98,23 +98,16 @@ void bnxt_free_vnic_attributes(struct bnxt *bp)
+ 
+ 	for (i = 0; i < bp->max_vnics; i++) {
+ 		vnic = &bp->vnic_info[i];
+-		if (vnic->rss_table) {
+-			/* 'Unreserve' the rss_table */
+-			/* N/A */
+-
+-			vnic->rss_table = NULL;
+-		}
+-
+-		if (vnic->rss_hash_key) {
+-			/* 'Unreserve' the rss_hash_key */
+-			/* N/A */
+-
++		if (vnic->rss_mz != NULL) {
++			rte_memzone_free(vnic->rss_mz);
++			vnic->rss_mz = NULL;
+ 			vnic->rss_hash_key = NULL;
++			vnic->rss_table = NULL;
+ 		}
+ 	}
+ }
+ 
+-int bnxt_alloc_vnic_attributes(struct bnxt *bp)
++int bnxt_alloc_vnic_attributes(struct bnxt *bp, bool reconfig)
+ {
+ 	struct bnxt_vnic_info *vnic;
+ 	struct rte_pci_device *pdev = bp->pdev;
+@@ -122,12 +115,10 @@ int bnxt_alloc_vnic_attributes(struct bnxt *bp)
+ 	char mz_name[RTE_MEMZONE_NAMESIZE];
+ 	uint32_t entry_length;
+ 	size_t rss_table_size;
+-	uint16_t max_vnics;
+ 	int i;
+ 	rte_iova_t mz_phys_addr;
+ 
+-	entry_length = HW_HASH_KEY_SIZE +
+-		       BNXT_MAX_MC_ADDRS * RTE_ETHER_ADDR_LEN;
++	entry_length = HW_HASH_KEY_SIZE;
+ 
+ 	if (BNXT_CHIP_P5(bp))
+ 		rss_table_size = BNXT_RSS_TBL_SIZE_P5 *
+@@ -137,43 +128,42 @@ int bnxt_alloc_vnic_attributes(struct bnxt *bp)
+ 
+ 	entry_length = RTE_CACHE_LINE_ROUNDUP(entry_length + rss_table_size);
+ 
+-	max_vnics = bp->max_vnics;
+-	snprintf(mz_name, RTE_MEMZONE_NAMESIZE,
+-		 "bnxt_" PCI_PRI_FMT "_vnicattr", pdev->addr.domain,
+-		 pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
+-	mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0;
+-	mz = rte_memzone_lookup(mz_name);
+-	if (!mz) {
+-		mz = rte_memzone_reserve(mz_name,
+-				entry_length * max_vnics,
+-				bp->eth_dev->device->numa_node,
+-				RTE_MEMZONE_2MB |
+-				RTE_MEMZONE_SIZE_HINT_ONLY |
+-				RTE_MEMZONE_IOVA_CONTIG);
+-		if (!mz)
+-			return -ENOMEM;
+-	}
+-	mz_phys_addr = mz->iova;
+-
+-	for (i = 0; i < max_vnics; i++) {
++	for (i = 0; i < bp->max_vnics; i++) {
+ 		vnic = &bp->vnic_info[i];
+ 
++		snprintf(mz_name, RTE_MEMZONE_NAMESIZE,
++			 "bnxt_" PCI_PRI_FMT "_vnicattr_%d", pdev->addr.domain,
++			 pdev->addr.bus, pdev->addr.devid, pdev->addr.function, i);
++		mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0;
++		mz = rte_memzone_lookup(mz_name);
++		if (mz == NULL) {
++			mz = rte_memzone_reserve(mz_name,
++						 entry_length,
++						 bp->eth_dev->device->numa_node,
++						 RTE_MEMZONE_2MB |
++						 RTE_MEMZONE_SIZE_HINT_ONLY |
++						 RTE_MEMZONE_IOVA_CONTIG);
++			if (mz == NULL) {
++				PMD_DRV_LOG(ERR, "Cannot allocate bnxt vnic_attributes memory\n");
++				return -ENOMEM;
++			}
++		}
++		vnic->rss_mz = mz;
++		mz_phys_addr = mz->iova;
++
+ 		/* Allocate rss table and hash key */
+-		vnic->rss_table =
+-			(void *)((char *)mz->addr + (entry_length * i));
++		vnic->rss_table = (void *)((char *)mz->addr);
++		vnic->rss_table_dma_addr = mz_phys_addr;
+ 		memset(vnic->rss_table, -1, entry_length);
+ 
+-		vnic->rss_table_dma_addr = mz_phys_addr + (entry_length * i);
+-		vnic->rss_hash_key = (void *)((char *)vnic->rss_table +
+-					      rss_table_size);
+-
+-		vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr +
+-					      rss_table_size;
+-		vnic->mc_list = (void *)((char *)vnic->rss_hash_key +
+-				HW_HASH_KEY_SIZE);
+-		vnic->mc_list_dma_addr = vnic->rss_hash_key_dma_addr +
+-				HW_HASH_KEY_SIZE;
+-		bnxt_prandom_bytes(vnic->rss_hash_key, HW_HASH_KEY_SIZE);
++		vnic->rss_hash_key = (void *)((char *)vnic->rss_table + rss_table_size);
++		vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + rss_table_size;
++		if (!reconfig) {
++			bnxt_prandom_bytes(vnic->rss_hash_key, HW_HASH_KEY_SIZE);
++			memcpy(bp->rss_conf.rss_key, vnic->rss_hash_key, HW_HASH_KEY_SIZE);
++		} else {
++			memcpy(vnic->rss_hash_key, bp->rss_conf.rss_key, HW_HASH_KEY_SIZE);
++		}
+ 	}
+ 
+ 	return 0;
+diff --git a/dpdk/drivers/net/bnxt/bnxt_vnic.h b/dpdk/drivers/net/bnxt/bnxt_vnic.h
+index 37b452f281..9055b93c4b 100644
+--- a/dpdk/drivers/net/bnxt/bnxt_vnic.h
++++ b/dpdk/drivers/net/bnxt/bnxt_vnic.h
+@@ -28,14 +28,11 @@ struct bnxt_vnic_info {
+ 	uint16_t	mru;
+ 	uint16_t	hash_type;
+ 	uint8_t		hash_mode;
++	const struct rte_memzone *rss_mz;
+ 	rte_iova_t	rss_table_dma_addr;
+ 	uint16_t	*rss_table;
+ 	rte_iova_t	rss_hash_key_dma_addr;
+ 	void		*rss_hash_key;
+-	rte_iova_t	mc_list_dma_addr;
+-	char		*mc_list;
+-	uint32_t	mc_addr_cnt;
+-#define BNXT_MAX_MC_ADDRS		16
+ 	uint32_t	flags;
+ #define BNXT_VNIC_INFO_PROMISC			(1 << 0)
+ #define BNXT_VNIC_INFO_ALLMULTI			(1 << 1)
+@@ -64,7 +61,7 @@ int bnxt_free_vnic(struct bnxt *bp, struct bnxt_vnic_info *vnic,
+ struct bnxt_vnic_info *bnxt_alloc_vnic(struct bnxt *bp);
+ void bnxt_free_all_vnics(struct bnxt *bp);
+ void bnxt_free_vnic_attributes(struct bnxt *bp);
+-int bnxt_alloc_vnic_attributes(struct bnxt *bp);
++int bnxt_alloc_vnic_attributes(struct bnxt *bp, bool reconfig);
+ void bnxt_free_vnic_mem(struct bnxt *bp);
+ int bnxt_alloc_vnic_mem(struct bnxt *bp);
+ int bnxt_vnic_grp_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic);
+diff --git a/dpdk/drivers/net/bnxt/hsi_struct_def_dpdk.h b/dpdk/drivers/net/bnxt/hsi_struct_def_dpdk.h
+index 88624f8129..f1fd5c8c6e 100644
+--- a/dpdk/drivers/net/bnxt/hsi_struct_def_dpdk.h
++++ b/dpdk/drivers/net/bnxt/hsi_struct_def_dpdk.h
+@@ -12221,7 +12221,66 @@ struct hwrm_func_qcaps_output {
+ 	 * function call for allocating Key Contexts.
+ 	 */
+ 	uint16_t	max_key_ctxs_alloc;
+-	uint8_t	unused_1[7];
++	uint32_t	flags_ext2;
++	/*
++	 * When this bit is '1', it indicates that FW will support
++	 * timestamping on all RX packets, not just PTP type packets.
++	 */
++	#define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT2_RX_ALL_PKTS_TIMESTAMPS_SUPPORTED \
++		UINT32_C(0x1)
++	/* When this bit is '1', it indicates that HW and FW support QUIC. */
++	#define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT2_QUIC_SUPPORTED \
++		UINT32_C(0x2)
++	uint16_t	tunnel_disable_flag;
++	/*
++	 * When this bit is '1', it indicates that the VXLAN parsing
++	 * is disabled in hardware
++	 */
++	#define HWRM_FUNC_QCAPS_OUTPUT_TUNNEL_DISABLE_FLAG_DISABLE_VXLAN \
++		UINT32_C(0x1)
++	/*
++	 * When this bit is '1', it indicates that the NGE parsing
++	 * is disabled in hardware
++	 */
++	#define HWRM_FUNC_QCAPS_OUTPUT_TUNNEL_DISABLE_FLAG_DISABLE_NGE \
++		UINT32_C(0x2)
++	/*
++	 * When this bit is '1', it indicates that the NVGRE parsing
++	 * is disabled in hardware
++	 */
++	#define HWRM_FUNC_QCAPS_OUTPUT_TUNNEL_DISABLE_FLAG_DISABLE_NVGRE \
++		UINT32_C(0x4)
++	/*
++	 * When this bit is '1', it indicates that the L2GRE parsing
++	 * is disabled in hardware
++	 */
++	#define HWRM_FUNC_QCAPS_OUTPUT_TUNNEL_DISABLE_FLAG_DISABLE_L2GRE \
++		UINT32_C(0x8)
++	/*
++	 * When this bit is '1', it indicates that the GRE parsing
++	 * is disabled in hardware
++	 */
++	#define HWRM_FUNC_QCAPS_OUTPUT_TUNNEL_DISABLE_FLAG_DISABLE_GRE \
++		UINT32_C(0x10)
++	/*
++	 * When this bit is '1', it indicates that the IPINIP parsing
++	 * is disabled in hardware
++	 */
++	#define HWRM_FUNC_QCAPS_OUTPUT_TUNNEL_DISABLE_FLAG_DISABLE_IPINIP \
++		UINT32_C(0x20)
++	/*
++	 * When this bit is '1', it indicates that the MPLS parsing
++	 * is disabled in hardware
++	 */
++	#define HWRM_FUNC_QCAPS_OUTPUT_TUNNEL_DISABLE_FLAG_DISABLE_MPLS \
++		UINT32_C(0x40)
++	/*
++	 * When this bit is '1', it indicates that the PPPOE parsing
++	 * is disabled in hardware
++	 */
++	#define HWRM_FUNC_QCAPS_OUTPUT_TUNNEL_DISABLE_FLAG_DISABLE_PPPOE \
++		UINT32_C(0x80)
++	uint8_t	unused_1;
+ 	/*
+ 	 * This field is used in Output records to indicate that the output
+ 	 * is completely written to RAM.  This field should be read as '1'
+diff --git a/dpdk/drivers/net/bnxt/tf_core/tf_session.c b/dpdk/drivers/net/bnxt/tf_core/tf_session.c
+index 9f849a0a76..c30c0e7029 100644
+--- a/dpdk/drivers/net/bnxt/tf_core/tf_session.c
++++ b/dpdk/drivers/net/bnxt/tf_core/tf_session.c
+@@ -230,10 +230,12 @@ tf_session_create(struct tf *tfp,
+ 			    "FW Session close failed, rc:%s\n",
+ 			    strerror(-rc));
+ 	}
++	if (tfp->session) {
++		tfp_free(tfp->session->core_data);
++		tfp_free(tfp->session);
++		tfp->session = NULL;
++	}
+ 
+-	tfp_free(tfp->session->core_data);
+-	tfp_free(tfp->session);
+-	tfp->session = NULL;
+ 	return rc;
+ }
+ 
+diff --git a/dpdk/drivers/net/bnxt/tf_core/tfp.c b/dpdk/drivers/net/bnxt/tf_core/tfp.c
+index a4b0934610..a967a9ccf2 100644
+--- a/dpdk/drivers/net/bnxt/tf_core/tfp.c
++++ b/dpdk/drivers/net/bnxt/tf_core/tfp.c
+@@ -52,7 +52,7 @@ tfp_send_msg_direct(struct bnxt *bp,
+ }
+ 
+ /**
+- * Allocates zero'ed memory from the heap.
++ * Allocates zeroed memory from the heap.
+  *
+  * Returns success or failure code.
+  */
+diff --git a/dpdk/drivers/net/bnxt/tf_core/tfp.h b/dpdk/drivers/net/bnxt/tf_core/tfp.h
+index dd0a347058..5a99c7a06e 100644
+--- a/dpdk/drivers/net/bnxt/tf_core/tfp.h
++++ b/dpdk/drivers/net/bnxt/tf_core/tfp.h
+@@ -150,7 +150,7 @@ tfp_msg_hwrm_oem_cmd(struct tf *tfp,
+ 		     uint32_t max_flows);
+ 
+ /**
+- * Allocates zero'ed memory from the heap.
++ * Allocates zeroed memory from the heap.
+  *
+  * NOTE: Also performs virt2phy address conversion by default thus is
+  * can be expensive to invoke.
+diff --git a/dpdk/drivers/net/bnxt/tf_ulp/ulp_rte_parser.c b/dpdk/drivers/net/bnxt/tf_ulp/ulp_rte_parser.c
+index f4274dd634..9edf3e8799 100644
+--- a/dpdk/drivers/net/bnxt/tf_ulp/ulp_rte_parser.c
++++ b/dpdk/drivers/net/bnxt/tf_ulp/ulp_rte_parser.c
+@@ -1096,7 +1096,7 @@ ulp_rte_ipv4_hdr_handler(const struct rte_flow_item *item,
+ 						   hdr.fragment_offset),
+ 			      ulp_deference_struct(ipv4_mask,
+ 						   hdr.fragment_offset),
+-			      ULP_PRSR_ACT_DEFAULT);
++			      ULP_PRSR_ACT_MASK_IGNORE);
+ 
+ 	size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.time_to_live);
+ 	ulp_rte_prsr_fld_mask(params, &idx, size,
+diff --git a/dpdk/drivers/net/bonding/eth_bond_8023ad_private.h b/dpdk/drivers/net/bonding/eth_bond_8023ad_private.h
+index 9b5738afee..a5e1fffea1 100644
+--- a/dpdk/drivers/net/bonding/eth_bond_8023ad_private.h
++++ b/dpdk/drivers/net/bonding/eth_bond_8023ad_private.h
+@@ -20,7 +20,7 @@
+ /** Maximum number of LACP packets from one slave queued in TX ring. */
+ #define BOND_MODE_8023AX_SLAVE_TX_PKTS        1
+ /**
+- * Timeouts deffinitions (5.4.4 in 802.1AX documentation).
++ * Timeouts definitions (5.4.4 in 802.1AX documentation).
+  */
+ #define BOND_8023AD_FAST_PERIODIC_MS                900
+ #define BOND_8023AD_SLOW_PERIODIC_MS              29000
+diff --git a/dpdk/drivers/net/bonding/eth_bond_private.h b/dpdk/drivers/net/bonding/eth_bond_private.h
+index 8b104b6391..8222e3cd38 100644
+--- a/dpdk/drivers/net/bonding/eth_bond_private.h
++++ b/dpdk/drivers/net/bonding/eth_bond_private.h
+@@ -139,7 +139,7 @@ struct bond_dev_private {
+ 
+ 	uint16_t slave_count;			/**< Number of bonded slaves */
+ 	struct bond_slave_details slaves[RTE_MAX_ETHPORTS];
+-	/**< Arary of bonded slaves details */
++	/**< Array of bonded slaves details */
+ 
+ 	struct mode8023ad_private mode4;
+ 	uint16_t tlb_slaves_order[RTE_MAX_ETHPORTS];
+@@ -240,12 +240,16 @@ slave_remove_mac_addresses(struct rte_eth_dev *bonded_eth_dev,
+ 		uint16_t slave_port_id);
+ 
+ int
+-bond_ethdev_mode_set(struct rte_eth_dev *eth_dev, int mode);
++bond_ethdev_mode_set(struct rte_eth_dev *eth_dev, uint8_t mode);
+ 
+ int
+ slave_configure(struct rte_eth_dev *bonded_eth_dev,
+ 		struct rte_eth_dev *slave_eth_dev);
+ 
++int
++slave_start(struct rte_eth_dev *bonded_eth_dev,
++		struct rte_eth_dev *slave_eth_dev);
++
+ void
+ slave_remove(struct bond_dev_private *internals,
+ 		struct rte_eth_dev *slave_eth_dev);
+diff --git a/dpdk/drivers/net/bonding/rte_eth_bond_8023ad.c b/dpdk/drivers/net/bonding/rte_eth_bond_8023ad.c
+index ca50583d62..b3cddd8a20 100644
+--- a/dpdk/drivers/net/bonding/rte_eth_bond_8023ad.c
++++ b/dpdk/drivers/net/bonding/rte_eth_bond_8023ad.c
+@@ -243,7 +243,7 @@ record_default(struct port *port)
+ {
+ 	/* Record default parameters for partner. Partner admin parameters
+ 	 * are not implemented so set them to arbitrary default (last known) and
+-	 * mark actor that parner is in defaulted state. */
++	 * mark actor that partner is in defaulted state. */
+ 	port->partner_state = STATE_LACP_ACTIVE;
+ 	ACTOR_STATE_SET(port, DEFAULTED);
+ }
+@@ -300,7 +300,7 @@ rx_machine(struct bond_dev_private *internals, uint16_t slave_id,
+ 		MODE4_DEBUG("LACP -> CURRENT\n");
+ 		BOND_PRINT_LACP(lacp);
+ 		/* Update selected flag. If partner parameters are defaulted assume they
+-		 * are match. If not defaulted  compare LACP actor with ports parner
++		 * are match. If not defaulted  compare LACP actor with ports partner
+ 		 * params. */
+ 		if (!ACTOR_STATE(port, DEFAULTED) &&
+ 			(ACTOR_STATE(port, AGGREGATION) != PARTNER_STATE(port, AGGREGATION)
+@@ -399,16 +399,16 @@ periodic_machine(struct bond_dev_private *internals, uint16_t slave_id)
+ 		PARTNER_STATE(port, LACP_ACTIVE);
+ 
+ 	uint8_t is_partner_fast, was_partner_fast;
+-	/* No periodic is on BEGIN, LACP DISABLE or when both sides are pasive */
++	/* No periodic is on BEGIN, LACP DISABLE or when both sides are passive */
+ 	if (SM_FLAG(port, BEGIN) || !SM_FLAG(port, LACP_ENABLED) || !active) {
+ 		timer_cancel(&port->periodic_timer);
+ 		timer_force_expired(&port->tx_machine_timer);
+ 		SM_FLAG_CLR(port, PARTNER_SHORT_TIMEOUT);
+ 
+ 		MODE4_DEBUG("-> NO_PERIODIC ( %s%s%s)\n",
+-			SM_FLAG(port, BEGIN) ? "begind " : "",
++			SM_FLAG(port, BEGIN) ? "begin " : "",
+ 			SM_FLAG(port, LACP_ENABLED) ? "" : "LACP disabled ",
+-			active ? "LACP active " : "LACP pasive ");
++			active ? "LACP active " : "LACP passive ");
+ 		return;
+ 	}
+ 
+@@ -495,10 +495,10 @@ mux_machine(struct bond_dev_private *internals, uint16_t slave_id)
+ 	if ((ACTOR_STATE(port, DISTRIBUTING) || ACTOR_STATE(port, COLLECTING)) &&
+ 		!PARTNER_STATE(port, SYNCHRONIZATION)) {
+ 		/* If in COLLECTING or DISTRIBUTING state and partner becomes out of
+-		 * sync transit to ATACHED state.  */
++		 * sync transit to ATTACHED state.  */
+ 		ACTOR_STATE_CLR(port, DISTRIBUTING);
+ 		ACTOR_STATE_CLR(port, COLLECTING);
+-		/* Clear actor sync to activate transit ATACHED in condition bellow */
++		/* Clear actor sync to activate transit ATTACHED in condition bellow */
+ 		ACTOR_STATE_CLR(port, SYNCHRONIZATION);
+ 		MODE4_DEBUG("Out of sync -> ATTACHED\n");
+ 	}
+@@ -696,7 +696,7 @@ selection_logic(struct bond_dev_private *internals, uint16_t slave_id)
+ 	/* Search for aggregator suitable for this port */
+ 	for (i = 0; i < slaves_count; ++i) {
+ 		agg = &bond_mode_8023ad_ports[slaves[i]];
+-		/* Skip ports that are not aggreagators */
++		/* Skip ports that are not aggregators */
+ 		if (agg->aggregator_port_id != slaves[i])
+ 			continue;
+ 
+@@ -921,7 +921,7 @@ bond_mode_8023ad_periodic_cb(void *arg)
+ 
+ 			SM_FLAG_SET(port, BEGIN);
+ 
+-			/* LACP is disabled on half duples or link is down */
++			/* LACP is disabled on half duplex or link is down */
+ 			if (SM_FLAG(port, LACP_ENABLED)) {
+ 				/* If port was enabled set it to BEGIN state */
+ 				SM_FLAG_CLR(port, LACP_ENABLED);
+@@ -1069,7 +1069,7 @@ bond_mode_8023ad_activate_slave(struct rte_eth_dev *bond_dev,
+ 	port->partner_state = STATE_LACP_ACTIVE | STATE_AGGREGATION;
+ 	port->sm_flags = SM_FLAGS_BEGIN;
+ 
+-	/* use this port as agregator */
++	/* use this port as aggregator */
+ 	port->aggregator_port_id = slave_id;
+ 
+ 	if (bond_mode_8023ad_register_lacp_mac(slave_id) < 0) {
+diff --git a/dpdk/drivers/net/bonding/rte_eth_bond_8023ad.h b/dpdk/drivers/net/bonding/rte_eth_bond_8023ad.h
+index 11a71a55e5..7eb392f8c8 100644
+--- a/dpdk/drivers/net/bonding/rte_eth_bond_8023ad.h
++++ b/dpdk/drivers/net/bonding/rte_eth_bond_8023ad.h
+@@ -68,7 +68,7 @@ struct port_params {
+ 	struct rte_ether_addr system;
+ 	/**< System ID - Slave MAC address, same as bonding MAC address */
+ 	uint16_t key;
+-	/**< Speed information (implementation dependednt) and duplex. */
++	/**< Speed information (implementation dependent) and duplex. */
+ 	uint16_t port_priority;
+ 	/**< Priority of this (unused in current implementation) */
+ 	uint16_t port_number;
+@@ -317,7 +317,7 @@ rte_eth_bond_8023ad_dedicated_queues_disable(uint16_t port_id);
+  * @param port_id Bonding device id
+  *
+  * @return
+- *   agregator mode on success, negative value otherwise
++ *   aggregator mode on success, negative value otherwise
+  */
+ int
+ rte_eth_bond_8023ad_agg_selection_get(uint16_t port_id);
+diff --git a/dpdk/drivers/net/bonding/rte_eth_bond_alb.h b/dpdk/drivers/net/bonding/rte_eth_bond_alb.h
+index 386e70c594..4e9aeda9bc 100644
+--- a/dpdk/drivers/net/bonding/rte_eth_bond_alb.h
++++ b/dpdk/drivers/net/bonding/rte_eth_bond_alb.h
+@@ -96,7 +96,7 @@ bond_mode_alb_arp_xmit(struct rte_ether_hdr *eth_h, uint16_t offset,
+  * @param internals		Bonding data.
+  *
+  * @return
+- * Index of slawe on which packet should be sent.
++ * Index of slave on which packet should be sent.
+  */
+ uint16_t
+ bond_mode_alb_arp_upd(struct client_data *client_info,
+diff --git a/dpdk/drivers/net/bonding/rte_eth_bond_api.c b/dpdk/drivers/net/bonding/rte_eth_bond_api.c
+index 84943cffe2..919c580fb8 100644
+--- a/dpdk/drivers/net/bonding/rte_eth_bond_api.c
++++ b/dpdk/drivers/net/bonding/rte_eth_bond_api.c
+@@ -375,7 +375,7 @@ eth_bond_slave_inherit_dev_info_rx_next(struct bond_dev_private *internals,
+ 	 * value. Thus, the new internal value of default Rx queue offloads
+ 	 * has to be masked by rx_queue_offload_capa to make sure that only
+ 	 * commonly supported offloads are preserved from both the previous
+-	 * value and the value being inhereted from the new slave device.
++	 * value and the value being inherited from the new slave device.
+ 	 */
+ 	rxconf_i->offloads = (rxconf_i->offloads | rxconf->offloads) &
+ 			     internals->rx_queue_offload_capa;
+@@ -413,7 +413,7 @@ eth_bond_slave_inherit_dev_info_tx_next(struct bond_dev_private *internals,
+ 	 * value. Thus, the new internal value of default Tx queue offloads
+ 	 * has to be masked by tx_queue_offload_capa to make sure that only
+ 	 * commonly supported offloads are preserved from both the previous
+-	 * value and the value being inhereted from the new slave device.
++	 * value and the value being inherited from the new slave device.
+ 	 */
+ 	txconf_i->offloads = (txconf_i->offloads | txconf->offloads) &
+ 			     internals->tx_queue_offload_capa;
+@@ -566,6 +566,12 @@ __eth_bond_slave_add_lock_free(uint16_t bonded_port_id, uint16_t slave_port_id)
+ 					slave_port_id);
+ 			return -1;
+ 		}
++		if (slave_start(bonded_eth_dev, slave_eth_dev) != 0) {
++			internals->slave_count--;
++			RTE_BOND_LOG(ERR, "rte_bond_slaves_start: port=%d",
++					slave_port_id);
++			return -1;
++		}
+ 	}
+ 
+ 	/* Update all slave devices MACs */
+@@ -668,7 +674,7 @@ __eth_bond_slave_remove_lock_free(uint16_t bonded_port_id,
+ 		}
+ 
+ 	if (slave_idx < 0) {
+-		RTE_BOND_LOG(ERR, "Couldn't find slave in port list, slave count %d",
++		RTE_BOND_LOG(ERR, "Couldn't find slave in port list, slave count %u",
+ 				internals->slave_count);
+ 		return -1;
+ 	}
+diff --git a/dpdk/drivers/net/bonding/rte_eth_bond_pmd.c b/dpdk/drivers/net/bonding/rte_eth_bond_pmd.c
+index 84f4900ee5..9b3acde46c 100644
+--- a/dpdk/drivers/net/bonding/rte_eth_bond_pmd.c
++++ b/dpdk/drivers/net/bonding/rte_eth_bond_pmd.c
+@@ -1318,7 +1318,7 @@ bond_ethdev_tx_burst_broadcast(void *queue, struct rte_mbuf **bufs,
+ 
+ 	/* Increment reference count on mbufs */
+ 	for (i = 0; i < nb_pkts; i++)
+-		rte_mbuf_refcnt_update(bufs[i], num_of_slaves - 1);
++		rte_pktmbuf_refcnt_update(bufs[i], num_of_slaves - 1);
+ 
+ 	/* Transmit burst on each active slave */
+ 	for (i = 0; i < num_of_slaves; i++) {
+@@ -1554,7 +1554,7 @@ mac_address_slaves_update(struct rte_eth_dev *bonded_eth_dev)
+ }
+ 
+ int
+-bond_ethdev_mode_set(struct rte_eth_dev *eth_dev, int mode)
++bond_ethdev_mode_set(struct rte_eth_dev *eth_dev, uint8_t mode)
+ {
+ 	struct bond_dev_private *internals;
+ 
+@@ -1678,14 +1678,10 @@ int
+ slave_configure(struct rte_eth_dev *bonded_eth_dev,
+ 		struct rte_eth_dev *slave_eth_dev)
+ {
+-	struct bond_rx_queue *bd_rx_q;
+-	struct bond_tx_queue *bd_tx_q;
+ 	uint16_t nb_rx_queues;
+ 	uint16_t nb_tx_queues;
+ 
+ 	int errval;
+-	uint16_t q_id;
+-	struct rte_flow_error flow_error;
+ 
+ 	struct bond_dev_private *internals = bonded_eth_dev->data->dev_private;
+ 
+@@ -1711,19 +1707,32 @@ slave_configure(struct rte_eth_dev *bonded_eth_dev,
+ 				bonded_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
+ 		slave_eth_dev->data->dev_conf.rxmode.mq_mode =
+ 				bonded_eth_dev->data->dev_conf.rxmode.mq_mode;
++	} else {
++		slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len = 0;
++		slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
++		slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
++		slave_eth_dev->data->dev_conf.rxmode.mq_mode =
++				bonded_eth_dev->data->dev_conf.rxmode.mq_mode;
+ 	}
+ 
+-	if (bonded_eth_dev->data->dev_conf.rxmode.offloads &
+-			RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
+-		slave_eth_dev->data->dev_conf.rxmode.offloads |=
+-				RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
+-	else
+-		slave_eth_dev->data->dev_conf.rxmode.offloads &=
+-				~RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
+-
+ 	slave_eth_dev->data->dev_conf.rxmode.mtu =
+ 			bonded_eth_dev->data->dev_conf.rxmode.mtu;
+ 
++	slave_eth_dev->data->dev_conf.txmode.offloads |=
++		bonded_eth_dev->data->dev_conf.txmode.offloads;
++
++	slave_eth_dev->data->dev_conf.txmode.offloads &=
++		(bonded_eth_dev->data->dev_conf.txmode.offloads |
++		~internals->tx_offload_capa);
++
++	slave_eth_dev->data->dev_conf.rxmode.offloads |=
++		bonded_eth_dev->data->dev_conf.rxmode.offloads;
++
++	slave_eth_dev->data->dev_conf.rxmode.offloads &=
++		(bonded_eth_dev->data->dev_conf.rxmode.offloads |
++		~internals->rx_offload_capa);
++
++
+ 	nb_rx_queues = bonded_eth_dev->data->nb_rx_queues;
+ 	nb_tx_queues = bonded_eth_dev->data->nb_tx_queues;
+ 
+@@ -1734,14 +1743,6 @@ slave_configure(struct rte_eth_dev *bonded_eth_dev,
+ 		}
+ 	}
+ 
+-	errval = rte_eth_dev_set_mtu(slave_eth_dev->data->port_id,
+-				     bonded_eth_dev->data->mtu);
+-	if (errval != 0 && errval != -ENOTSUP) {
+-		RTE_BOND_LOG(ERR, "rte_eth_dev_set_mtu: port %u, err (%d)",
+-				slave_eth_dev->data->port_id, errval);
+-		return errval;
+-	}
+-
+ 	/* Configure device */
+ 	errval = rte_eth_dev_configure(slave_eth_dev->data->port_id,
+ 			nb_rx_queues, nb_tx_queues,
+@@ -1752,6 +1753,27 @@ slave_configure(struct rte_eth_dev *bonded_eth_dev,
+ 		return errval;
+ 	}
+ 
++	errval = rte_eth_dev_set_mtu(slave_eth_dev->data->port_id,
++				     bonded_eth_dev->data->mtu);
++	if (errval != 0 && errval != -ENOTSUP) {
++		RTE_BOND_LOG(ERR, "rte_eth_dev_set_mtu: port %u, err (%d)",
++				slave_eth_dev->data->port_id, errval);
++		return errval;
++	}
++	return 0;
++}
++
++int
++slave_start(struct rte_eth_dev *bonded_eth_dev,
++		struct rte_eth_dev *slave_eth_dev)
++{
++	int errval = 0;
++	struct bond_rx_queue *bd_rx_q;
++	struct bond_tx_queue *bd_tx_q;
++	uint16_t q_id;
++	struct rte_flow_error flow_error;
++	struct bond_dev_private *internals = bonded_eth_dev->data->dev_private;
++
+ 	/* Setup Rx Queues */
+ 	for (q_id = 0; q_id < bonded_eth_dev->data->nb_rx_queues; q_id++) {
+ 		bd_rx_q = (struct bond_rx_queue *)bonded_eth_dev->data->rx_queues[q_id];
+@@ -1799,10 +1821,13 @@ slave_configure(struct rte_eth_dev *bonded_eth_dev,
+ 			return errval;
+ 		}
+ 
+-		if (internals->mode4.dedicated_queues.flow[slave_eth_dev->data->port_id] != NULL)
+-			rte_flow_destroy(slave_eth_dev->data->port_id,
++		if (internals->mode4.dedicated_queues.flow[slave_eth_dev->data->port_id] != NULL) {
++			errval = rte_flow_destroy(slave_eth_dev->data->port_id,
+ 					internals->mode4.dedicated_queues.flow[slave_eth_dev->data->port_id],
+ 					&flow_error);
++			RTE_BOND_LOG(ERR, "bond_ethdev_8023ad_flow_destroy: port=%d, err (%d)",
++				slave_eth_dev->data->port_id, errval);
++		}
+ 
+ 		errval = bond_ethdev_8023ad_flow_set(bonded_eth_dev,
+ 				slave_eth_dev->data->port_id);
+@@ -1994,6 +2019,13 @@ bond_ethdev_start(struct rte_eth_dev *eth_dev)
+ 				internals->slaves[i].port_id);
+ 			goto out_err;
+ 		}
++		if (slave_start(eth_dev, slave_ethdev) != 0) {
++			RTE_BOND_LOG(ERR,
++				"bonded port (%d) failed to start slave device (%d)",
++				eth_dev->data->port_id,
++				internals->slaves[i].port_id);
++			goto out_err;
++		}
+ 		/* We will need to poll for link status if any slave doesn't
+ 		 * support interrupts
+ 		 */
+@@ -2092,18 +2124,20 @@ bond_ethdev_stop(struct rte_eth_dev *eth_dev)
+ 	internals->link_status_polling_enabled = 0;
+ 	for (i = 0; i < internals->slave_count; i++) {
+ 		uint16_t slave_id = internals->slaves[i].port_id;
++
++		internals->slaves[i].last_link_status = 0;
++		ret = rte_eth_dev_stop(slave_id);
++		if (ret != 0) {
++			RTE_BOND_LOG(ERR, "Failed to stop device on port %u",
++				     slave_id);
++			return ret;
++		}
++
++		/* active slaves need to be deactivated. */
+ 		if (find_slave_by_id(internals->active_slaves,
+ 				internals->active_slave_count, slave_id) !=
+-						internals->active_slave_count) {
+-			internals->slaves[i].last_link_status = 0;
+-			ret = rte_eth_dev_stop(slave_id);
+-			if (ret != 0) {
+-				RTE_BOND_LOG(ERR, "Failed to stop device on port %u",
+-					     slave_id);
+-				return ret;
+-			}
++					internals->active_slave_count)
+ 			deactivate_slave(eth_dev, slave_id);
+-		}
+ 	}
+ 
+ 	return 0;
+@@ -2128,6 +2162,7 @@ bond_ethdev_close(struct rte_eth_dev *dev)
+ 			RTE_BOND_LOG(ERR, "Failed to stop device on port %u",
+ 				     port_id);
+ 			skipped++;
++			continue;
+ 		}
+ 
+ 		if (rte_eth_bond_slave_remove(bond_port_id, port_id) != 0) {
+@@ -2684,6 +2719,39 @@ bond_ethdev_promiscuous_disable(struct rte_eth_dev *dev)
+ 	return ret;
+ }
+ 
++static int
++bond_ethdev_promiscuous_update(struct rte_eth_dev *dev)
++{
++	struct bond_dev_private *internals = dev->data->dev_private;
++	uint16_t port_id = internals->current_primary_port;
++
++	switch (internals->mode) {
++	case BONDING_MODE_ROUND_ROBIN:
++	case BONDING_MODE_BALANCE:
++	case BONDING_MODE_BROADCAST:
++	case BONDING_MODE_8023AD:
++		/* As promiscuous mode is propagated to all slaves for these
++		 * mode, no need to update for bonding device.
++		 */
++		break;
++	case BONDING_MODE_ACTIVE_BACKUP:
++	case BONDING_MODE_TLB:
++	case BONDING_MODE_ALB:
++	default:
++		/* As promiscuous mode is propagated only to primary slave
++		 * for these mode. When active/standby switchover, promiscuous
++		 * mode should be set to new primary slave according to bonding
++		 * device.
++		 */
++		if (rte_eth_promiscuous_get(internals->port_id) == 1)
++			rte_eth_promiscuous_enable(port_id);
++		else
++			rte_eth_promiscuous_disable(port_id);
++	}
++
++	return 0;
++}
++
+ static int
+ bond_ethdev_allmulticast_enable(struct rte_eth_dev *eth_dev)
+ {
+@@ -2797,6 +2865,39 @@ bond_ethdev_allmulticast_disable(struct rte_eth_dev *eth_dev)
+ 	return ret;
+ }
+ 
++static int
++bond_ethdev_allmulticast_update(struct rte_eth_dev *dev)
++{
++	struct bond_dev_private *internals = dev->data->dev_private;
++	uint16_t port_id = internals->current_primary_port;
++
++	switch (internals->mode) {
++	case BONDING_MODE_ROUND_ROBIN:
++	case BONDING_MODE_BALANCE:
++	case BONDING_MODE_BROADCAST:
++	case BONDING_MODE_8023AD:
++		/* As allmulticast mode is propagated to all slaves for these
++		 * mode, no need to update for bonding device.
++		 */
++		break;
++	case BONDING_MODE_ACTIVE_BACKUP:
++	case BONDING_MODE_TLB:
++	case BONDING_MODE_ALB:
++	default:
++		/* As allmulticast mode is propagated only to primary slave
++		 * for these mode. When active/standby switchover, allmulticast
++		 * mode should be set to new primary slave according to bonding
++		 * device.
++		 */
++		if (rte_eth_allmulticast_get(internals->port_id) == 1)
++			rte_eth_allmulticast_enable(port_id);
++		else
++			rte_eth_allmulticast_disable(port_id);
++	}
++
++	return 0;
++}
++
+ static void
+ bond_ethdev_delayed_lsc_propagation(void *arg)
+ {
+@@ -2886,6 +2987,8 @@ bond_ethdev_lsc_event_callback(uint16_t port_id, enum rte_eth_event_type type,
+ 			lsc_flag = 1;
+ 
+ 			mac_address_slaves_update(bonded_eth_dev);
++			bond_ethdev_promiscuous_update(bonded_eth_dev);
++			bond_ethdev_allmulticast_update(bonded_eth_dev);
+ 		}
+ 
+ 		activate_slave(bonded_eth_dev, port_id);
+@@ -2915,6 +3018,8 @@ bond_ethdev_lsc_event_callback(uint16_t port_id, enum rte_eth_event_type type,
+ 			else
+ 				internals->current_primary_port = internals->primary_port;
+ 			mac_address_slaves_update(bonded_eth_dev);
++			bond_ethdev_promiscuous_update(bonded_eth_dev);
++			bond_ethdev_allmulticast_update(bonded_eth_dev);
+ 		}
+ 	}
+ 
+@@ -3293,7 +3398,7 @@ bond_alloc(struct rte_vdev_device *dev, uint8_t mode)
+ 	/* Set mode 4 default configuration */
+ 	bond_mode_8023ad_setup(eth_dev, NULL);
+ 	if (bond_ethdev_mode_set(eth_dev, mode)) {
+-		RTE_BOND_LOG(ERR, "Failed to set bonded device %d mode to %d",
++		RTE_BOND_LOG(ERR, "Failed to set bonded device %u mode to %u",
+ 				 eth_dev->data->port_id, mode);
+ 		goto err;
+ 	}
+@@ -3483,6 +3588,7 @@ bond_ethdev_configure(struct rte_eth_dev *dev)
+ 	const char *name = dev->device->name;
+ 	struct bond_dev_private *internals = dev->data->dev_private;
+ 	struct rte_kvargs *kvlist = internals->kvlist;
++	uint64_t offloads;
+ 	int arg_count;
+ 	uint16_t port_id = dev - rte_eth_devices;
+ 	uint8_t agg_mode;
+@@ -3504,6 +3610,11 @@ bond_ethdev_configure(struct rte_eth_dev *dev)
+ 	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS) {
+ 		struct rte_eth_rss_conf *rss_conf =
+ 			&dev->data->dev_conf.rx_adv_conf.rss_conf;
++
++		if (internals->rss_key_len == 0) {
++			internals->rss_key_len = sizeof(default_rss_key);
++		}
++
+ 		if (rss_conf->rss_key != NULL) {
+ 			if (internals->rss_key_len > rss_conf->rss_key_len) {
+ 				RTE_BOND_LOG(ERR, "Invalid rss key length(%u)",
+@@ -3515,13 +3626,18 @@ bond_ethdev_configure(struct rte_eth_dev *dev)
+ 			       internals->rss_key_len);
+ 		} else {
+ 			if (internals->rss_key_len > sizeof(default_rss_key)) {
+-				RTE_BOND_LOG(ERR,
+-				       "There is no suitable default hash key");
+-				return -EINVAL;
++				/*
++				 * If the rss_key includes standard_rss_key and
++				 * extended_hash_key, the rss key length will be
++				 * larger than default rss key length, so it should
++				 * re-calculate the hash key.
++				 */
++				for (i = 0; i < internals->rss_key_len; i++)
++					internals->rss_key[i] = (uint8_t)rte_rand();
++			} else {
++				memcpy(internals->rss_key, default_rss_key,
++					internals->rss_key_len);
+ 			}
+-
+-			memcpy(internals->rss_key, default_rss_key,
+-			       internals->rss_key_len);
+ 		}
+ 
+ 		for (i = 0; i < RTE_DIM(internals->reta_conf); i++) {
+@@ -3533,6 +3649,16 @@ bond_ethdev_configure(struct rte_eth_dev *dev)
+ 		}
+ 	}
+ 
++	offloads = dev->data->dev_conf.txmode.offloads;
++	if ((offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) &&
++			(internals->mode == BONDING_MODE_8023AD ||
++			internals->mode == BONDING_MODE_BROADCAST)) {
++		RTE_BOND_LOG(WARNING,
++			"bond mode broadcast & 8023AD don't support MBUF_FAST_FREE offload, force disable it.");
++		offloads &= ~RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
++		dev->data->dev_conf.txmode.offloads = offloads;
++	}
++
+ 	/* set the max_rx_pktlen */
+ 	internals->max_rx_pktlen = internals->candidate_max_rx_pktlen;
+ 
+@@ -3765,6 +3891,18 @@ bond_ethdev_configure(struct rte_eth_dev *dev)
+ 		return -1;
+ 	}
+ 
++	/* configure slaves so we can pass mtu setting */
++	for (i = 0; i < internals->slave_count; i++) {
++		struct rte_eth_dev *slave_ethdev =
++				&(rte_eth_devices[internals->slaves[i].port_id]);
++		if (slave_configure(dev, slave_ethdev) != 0) {
++			RTE_BOND_LOG(ERR,
++				"bonded port (%d) failed to configure slave device (%d)",
++				dev->data->port_id,
++				internals->slaves[i].port_id);
++			return -1;
++		}
++	}
+ 	return 0;
+ }
+ 
+diff --git a/dpdk/drivers/net/cnxk/cn10k_ethdev.c b/dpdk/drivers/net/cnxk/cn10k_ethdev.c
+index 8378cbffc2..c36b858110 100644
+--- a/dpdk/drivers/net/cnxk/cn10k_ethdev.c
++++ b/dpdk/drivers/net/cnxk/cn10k_ethdev.c
+@@ -313,6 +313,12 @@ cn10k_nix_configure(struct rte_eth_dev *eth_dev)
+ 	if (rc)
+ 		return rc;
+ 
++	if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY ||
++	    dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
++		/* Register callback to handle security error work */
++		roc_nix_inl_cb_register(cn10k_eth_sec_sso_work_cb, NULL);
++	}
++
+ 	/* Update offload flags */
+ 	dev->rx_offload_flags = nix_rx_offload_flags(eth_dev);
+ 	dev->tx_offload_flags = nix_tx_offload_flags(eth_dev);
+diff --git a/dpdk/drivers/net/cnxk/cn10k_ethdev.h b/dpdk/drivers/net/cnxk/cn10k_ethdev.h
+index c2a46ad7ec..13403e14c4 100644
+--- a/dpdk/drivers/net/cnxk/cn10k_ethdev.h
++++ b/dpdk/drivers/net/cnxk/cn10k_ethdev.h
+@@ -53,7 +53,7 @@ struct cn10k_outb_priv_data {
+ 	void *userdata;
+ 	/* Rlen computation data */
+ 	struct cnxk_ipsec_outb_rlens rlens;
+-	/* Back pinter to eth sec session */
++	/* Back pointer to eth sec session */
+ 	struct cnxk_eth_sec_sess *eth_sec;
+ 	/* SA index */
+ 	uint32_t sa_idx;
+@@ -82,6 +82,9 @@ void cn10k_eth_set_tx_function(struct rte_eth_dev *eth_dev);
+ /* Security context setup */
+ void cn10k_eth_sec_ops_override(void);
+ 
++/* SSO Work callback */
++void cn10k_eth_sec_sso_work_cb(uint64_t *gw, void *args);
++
+ #define LMT_OFF(lmt_addr, lmt_num, offset)                                     \
+ 	(void *)((uintptr_t)(lmt_addr) +                                       \
+ 		 ((uint64_t)(lmt_num) << ROC_LMT_LINE_SIZE_LOG2) + (offset))
+diff --git a/dpdk/drivers/net/cnxk/cn10k_ethdev_sec.c b/dpdk/drivers/net/cnxk/cn10k_ethdev_sec.c
+index 235c16840e..f84a0fe80c 100644
+--- a/dpdk/drivers/net/cnxk/cn10k_ethdev_sec.c
++++ b/dpdk/drivers/net/cnxk/cn10k_ethdev_sec.c
+@@ -138,7 +138,21 @@ static const struct rte_security_capability cn10k_eth_sec_capabilities[] = {
+ 	}
+ };
+ 
+-static void
++static inline void
++cnxk_pktmbuf_free_no_cache(struct rte_mbuf *mbuf)
++{
++	struct rte_mbuf *next;
++
++	if (!mbuf)
++		return;
++	do {
++		next = mbuf->next;
++		roc_npa_aura_op_free(mbuf->pool->pool_id, 1, (rte_iova_t)mbuf);
++		mbuf = next;
++	} while (mbuf != NULL);
++}
++
++void
+ cn10k_eth_sec_sso_work_cb(uint64_t *gw, void *args)
+ {
+ 	struct rte_eth_event_ipsec_desc desc;
+@@ -148,6 +162,7 @@ cn10k_eth_sec_sso_work_cb(uint64_t *gw, void *args)
+ 	struct cpt_cn10k_res_s *res;
+ 	struct rte_eth_dev *eth_dev;
+ 	struct cnxk_eth_dev *dev;
++	static uint64_t warn_cnt;
+ 	uint16_t dlen_adj, rlen;
+ 	struct rte_mbuf *mbuf;
+ 	uintptr_t sa_base;
+@@ -161,7 +176,7 @@ cn10k_eth_sec_sso_work_cb(uint64_t *gw, void *args)
+ 		/* Event from inbound inline dev due to IPSEC packet bad L4 */
+ 		mbuf = (struct rte_mbuf *)(gw[1] - sizeof(struct rte_mbuf));
+ 		plt_nix_dbg("Received mbuf %p from inline dev inbound", mbuf);
+-		rte_pktmbuf_free(mbuf);
++		cnxk_pktmbuf_free_no_cache(mbuf);
+ 		return;
+ 	case RTE_EVENT_TYPE_CPU:
+ 		/* Check for subtype */
+@@ -212,17 +227,29 @@ cn10k_eth_sec_sso_work_cb(uint64_t *gw, void *args)
+ 	case ROC_IE_OT_UCC_ERR_SA_OVERFLOW:
+ 		desc.subtype = RTE_ETH_EVENT_IPSEC_ESN_OVERFLOW;
+ 		break;
++	case ROC_IE_OT_UCC_ERR_PKT_IP:
++		warn_cnt++;
++		if (warn_cnt % 10000 == 0)
++			plt_warn("Outbound error, bad ip pkt, mbuf %p,"
++				 " sa_index %u (total warnings %" PRIu64 ")",
++				 mbuf, sess_priv.sa_idx, warn_cnt);
++		desc.subtype = RTE_ETH_EVENT_IPSEC_UNKNOWN;
++		break;
+ 	default:
+-		plt_warn("Outbound error, mbuf %p, sa_index %u, "
+-			 "compcode %x uc %x", mbuf, sess_priv.sa_idx,
+-			 res->compcode, res->uc_compcode);
++		warn_cnt++;
++		if (warn_cnt % 10000 == 0)
++			plt_warn("Outbound error, mbuf %p, sa_index %u,"
++				 " compcode %x uc %x,"
++				 " (total warnings %" PRIu64 ")",
++				 mbuf, sess_priv.sa_idx, res->compcode,
++				 res->uc_compcode, warn_cnt);
+ 		desc.subtype = RTE_ETH_EVENT_IPSEC_UNKNOWN;
+ 		break;
+ 	}
+ 
+ 	desc.metadata = (uint64_t)priv->userdata;
+ 	rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_IPSEC, &desc);
+-	rte_pktmbuf_free(mbuf);
++	cnxk_pktmbuf_free_no_cache(mbuf);
+ }
+ 
+ static int
+@@ -249,9 +276,6 @@ cn10k_eth_sec_session_create(void *device,
+ 	if (rte_security_dynfield_register() < 0)
+ 		return -ENOTSUP;
+ 
+-	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+-		roc_nix_inl_cb_register(cn10k_eth_sec_sso_work_cb, NULL);
+-
+ 	ipsec = &conf->ipsec;
+ 	crypto = conf->crypto_xform;
+ 	inbound = !!(ipsec->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS);
+diff --git a/dpdk/drivers/net/cnxk/cn10k_rx.c b/dpdk/drivers/net/cnxk/cn10k_rx.c
+index 5d603514c0..94b0dfcde7 100644
+--- a/dpdk/drivers/net/cnxk/cn10k_rx.c
++++ b/dpdk/drivers/net/cnxk/cn10k_rx.c
+@@ -31,6 +31,10 @@ pick_rx_func(struct rte_eth_dev *eth_dev,
+ 		[!!(dev->rx_offload_flags & NIX_RX_OFFLOAD_PTYPE_F)]
+ 		[!!(dev->rx_offload_flags & NIX_RX_OFFLOAD_RSS_F)];
+ 
++	if (eth_dev->data->dev_started)
++		rte_eth_fp_ops[eth_dev->data->port_id].rx_pkt_burst =
++		    eth_dev->rx_pkt_burst;
++
+ 	rte_atomic_thread_fence(__ATOMIC_RELEASE);
+ }
+ 
+diff --git a/dpdk/drivers/net/cnxk/cn10k_rx.h b/dpdk/drivers/net/cnxk/cn10k_rx.h
+index fe408907a6..5806392322 100644
+--- a/dpdk/drivers/net/cnxk/cn10k_rx.h
++++ b/dpdk/drivers/net/cnxk/cn10k_rx.h
+@@ -363,7 +363,13 @@ cn10k_nix_cqe_to_mbuf(const struct nix_cqe_hdr_s *cq, const uint32_t tag,
+ 	*(uint64_t *)(&mbuf->rearm_data) = val;
+ 
+ 	if (flag & NIX_RX_MULTI_SEG_F)
+-		nix_cqe_xtract_mseg(rx, mbuf, val, flag);
++		/*
++		 * For multi segment packets, mbuf length correction according
++		 * to Rx timestamp length will be handled later during
++		 * timestamp data process.
++		 * Hence, flag argument is not required.
++		 */
++		nix_cqe_xtract_mseg(rx, mbuf, val, 0);
+ 	else
+ 		mbuf->next = NULL;
+ }
+@@ -451,7 +457,6 @@ cn10k_nix_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t pkts,
+ 				      flags);
+ 		cnxk_nix_mbuf_to_tstamp(mbuf, rxq->tstamp,
+ 					(flags & NIX_RX_OFFLOAD_TSTAMP_F),
+-					(flags & NIX_RX_MULTI_SEG_F),
+ 					(uint64_t *)((uint8_t *)mbuf
+ 								+ data_off));
+ 		rx_pkts[packets++] = mbuf;
+@@ -481,10 +486,11 @@ cn10k_nix_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t pkts,
+ 	plt_write64((wdata | nb_pkts), rxq->cq_door);
+ 
+ 	/* Free remaining meta buffers if any */
+-	if (flags & NIX_RX_OFFLOAD_SECURITY_F && loff) {
++	if (flags & NIX_RX_OFFLOAD_SECURITY_F && loff)
+ 		nix_sec_flush_meta(laddr, lmt_id + lnum, loff, aura_handle);
+-		plt_io_wmb();
+-	}
++
++	if (flags & NIX_RX_OFFLOAD_SECURITY_F)
++		rte_io_wmb();
+ 
+ 	return nb_pkts;
+ }
+diff --git a/dpdk/drivers/net/cnxk/cn10k_tx.c b/dpdk/drivers/net/cnxk/cn10k_tx.c
+index 5e6c5ee111..4e1abf7804 100644
+--- a/dpdk/drivers/net/cnxk/cn10k_tx.c
++++ b/dpdk/drivers/net/cnxk/cn10k_tx.c
+@@ -37,6 +37,10 @@ pick_tx_func(struct rte_eth_dev *eth_dev,
+ 		[!!(dev->tx_offload_flags & NIX_TX_OFFLOAD_VLAN_QINQ_F)]
+ 		[!!(dev->tx_offload_flags & NIX_TX_OFFLOAD_OL3_OL4_CSUM_F)]
+ 		[!!(dev->tx_offload_flags & NIX_TX_OFFLOAD_L3_L4_CSUM_F)];
++
++	if (eth_dev->data->dev_started)
++		rte_eth_fp_ops[eth_dev->data->port_id].tx_pkt_burst =
++		    eth_dev->tx_pkt_burst;
+ }
+ 
+ void
+diff --git a/dpdk/drivers/net/cnxk/cn10k_tx.h b/dpdk/drivers/net/cnxk/cn10k_tx.h
+index 873e1871f9..6704d2d655 100644
+--- a/dpdk/drivers/net/cnxk/cn10k_tx.h
++++ b/dpdk/drivers/net/cnxk/cn10k_tx.h
+@@ -736,7 +736,7 @@ cn10k_nix_xmit_prepare_tstamp(uintptr_t lmt_addr, const uint64_t *cmd,
+ 			/* Retrieving the default desc values */
+ 			lmt[off] = cmd[2];
+ 
+-			/* Using compiler barier to avoid voilation of C
++			/* Using compiler barrier to avoid violation of C
+ 			 * aliasing rules.
+ 			 */
+ 			rte_compiler_barrier();
+@@ -745,7 +745,7 @@ cn10k_nix_xmit_prepare_tstamp(uintptr_t lmt_addr, const uint64_t *cmd,
+ 		/* Packets for which RTE_MBUF_F_TX_IEEE1588_TMST is not set, tx tstamp
+ 		 * should not be recorded, hence changing the alg type to
+ 		 * NIX_SENDMEMALG_SET and also changing send mem addr field to
+-		 * next 8 bytes as it corrpt the actual tx tstamp registered
++		 * next 8 bytes as it corrupts the actual Tx tstamp registered
+ 		 * address.
+ 		 */
+ 		send_mem->w0.subdc = NIX_SUBDC_MEM;
+@@ -849,8 +849,8 @@ cn10k_nix_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t pkts,
+ 	uintptr_t pa, lbase = txq->lmt_base;
+ 	uint16_t lmt_id, burst, left, i;
+ 	uintptr_t c_lbase = lbase;
++	uint64_t lso_tun_fmt = 0;
+ 	rte_iova_t c_io_addr;
+-	uint64_t lso_tun_fmt;
+ 	uint16_t c_lmt_id;
+ 	uint64_t sa_base;
+ 	uintptr_t laddr;
+@@ -976,9 +976,9 @@ cn10k_nix_xmit_pkts_mseg(void *tx_queue, struct rte_mbuf **tx_pkts,
+ 	uint16_t segdw, lmt_id, burst, left, i;
+ 	uint8_t lnum, c_lnum, c_loff;
+ 	uintptr_t c_lbase = lbase;
++	uint64_t lso_tun_fmt = 0;
+ 	uint64_t data0, data1;
+ 	rte_iova_t c_io_addr;
+-	uint64_t lso_tun_fmt;
+ 	uint8_t shft, c_shft;
+ 	__uint128_t data128;
+ 	uint16_t c_lmt_id;
+@@ -2254,7 +2254,7 @@ cn10k_nix_xmit_pkts_vector(void *tx_queue, struct rte_mbuf **tx_pkts,
+ 		}
+ 
+ 		if (flags & NIX_TX_OFFLOAD_TSTAMP_F) {
+-			/* Tx ol_flag for timestam. */
++			/* Tx ol_flag for timestamp. */
+ 			const uint64x2_t olf = {RTE_MBUF_F_TX_IEEE1588_TMST,
+ 						RTE_MBUF_F_TX_IEEE1588_TMST};
+ 			/* Set send mem alg to SUB. */
+diff --git a/dpdk/drivers/net/cnxk/cn9k_rx.c b/dpdk/drivers/net/cnxk/cn9k_rx.c
+index 8d504c4a6d..60baf10b39 100644
+--- a/dpdk/drivers/net/cnxk/cn9k_rx.c
++++ b/dpdk/drivers/net/cnxk/cn9k_rx.c
+@@ -31,6 +31,10 @@ pick_rx_func(struct rte_eth_dev *eth_dev,
+ 		[!!(dev->rx_offload_flags & NIX_RX_OFFLOAD_PTYPE_F)]
+ 		[!!(dev->rx_offload_flags & NIX_RX_OFFLOAD_RSS_F)];
+ 
++	if (eth_dev->data->dev_started)
++		rte_eth_fp_ops[eth_dev->data->port_id].rx_pkt_burst =
++		    eth_dev->rx_pkt_burst;
++
+ 	rte_atomic_thread_fence(__ATOMIC_RELEASE);
+ }
+ 
+diff --git a/dpdk/drivers/net/cnxk/cn9k_rx.h b/dpdk/drivers/net/cnxk/cn9k_rx.h
+index 225bb4197c..848df4190c 100644
+--- a/dpdk/drivers/net/cnxk/cn9k_rx.h
++++ b/dpdk/drivers/net/cnxk/cn9k_rx.h
+@@ -341,14 +341,19 @@ cn9k_nix_cqe_to_mbuf(const struct nix_cqe_hdr_s *cq, const uint32_t tag,
+ 		ol_flags =
+ 			nix_update_match_id(rx->cn9k.match_id, ol_flags, mbuf);
+ 
++	mbuf->ol_flags = ol_flags;
++	*(uint64_t *)(&mbuf->rearm_data) = val;
+ 	mbuf->pkt_len = len;
+ 	mbuf->data_len = len;
+-	*(uint64_t *)(&mbuf->rearm_data) = val;
+-
+-	mbuf->ol_flags = ol_flags;
+ 
+ 	if (flag & NIX_RX_MULTI_SEG_F)
+-		nix_cqe_xtract_mseg(rx, mbuf, val, flag);
++		/*
++		 * For multi segment packets, mbuf length correction according
++		 * to Rx timestamp length will be handled later during
++		 * timestamp data process.
++		 * Hence, flag argument is not required.
++		 */
++		nix_cqe_xtract_mseg(rx, mbuf, val, 0);
+ 	else
+ 		mbuf->next = NULL;
+ }
+@@ -413,7 +418,6 @@ cn9k_nix_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t pkts,
+ 				     flags);
+ 		cnxk_nix_mbuf_to_tstamp(mbuf, rxq->tstamp,
+ 					(flags & NIX_RX_OFFLOAD_TSTAMP_F),
+-					(flags & NIX_RX_MULTI_SEG_F),
+ 					(uint64_t *)((uint8_t *)mbuf
+ 								+ data_off));
+ 		rx_pkts[packets++] = mbuf;
+diff --git a/dpdk/drivers/net/cnxk/cn9k_tx.c b/dpdk/drivers/net/cnxk/cn9k_tx.c
+index f3f19fed97..f560286c97 100644
+--- a/dpdk/drivers/net/cnxk/cn9k_tx.c
++++ b/dpdk/drivers/net/cnxk/cn9k_tx.c
+@@ -36,6 +36,10 @@ pick_tx_func(struct rte_eth_dev *eth_dev,
+ 		[!!(dev->tx_offload_flags & NIX_TX_OFFLOAD_VLAN_QINQ_F)]
+ 		[!!(dev->tx_offload_flags & NIX_TX_OFFLOAD_OL3_OL4_CSUM_F)]
+ 		[!!(dev->tx_offload_flags & NIX_TX_OFFLOAD_L3_L4_CSUM_F)];
++
++	if (eth_dev->data->dev_started)
++		rte_eth_fp_ops[eth_dev->data->port_id].tx_pkt_burst =
++		    eth_dev->tx_pkt_burst;
+ }
+ 
+ void
+diff --git a/dpdk/drivers/net/cnxk/cn9k_tx.h b/dpdk/drivers/net/cnxk/cn9k_tx.h
+index 435dde1317..8167313a15 100644
+--- a/dpdk/drivers/net/cnxk/cn9k_tx.h
++++ b/dpdk/drivers/net/cnxk/cn9k_tx.h
+@@ -304,7 +304,7 @@ cn9k_nix_xmit_prepare_tstamp(uint64_t *cmd, const uint64_t *send_mem_desc,
+ 			/* Retrieving the default desc values */
+ 			cmd[off] = send_mem_desc[6];
+ 
+-			/* Using compiler barier to avoid voilation of C
++			/* Using compiler barrier to avoid violation of C
+ 			 * aliasing rules.
+ 			 */
+ 			rte_compiler_barrier();
+@@ -313,7 +313,7 @@ cn9k_nix_xmit_prepare_tstamp(uint64_t *cmd, const uint64_t *send_mem_desc,
+ 		/* Packets for which RTE_MBUF_F_TX_IEEE1588_TMST is not set, tx tstamp
+ 		 * should not be recorded, hence changing the alg type to
+ 		 * NIX_SENDMEMALG_SET and also changing send mem addr field to
+-		 * next 8 bytes as it corrpt the actual tx tstamp registered
++		 * next 8 bytes as it corrupts the actual Tx tstamp registered
+ 		 * address.
+ 		 */
+ 		send_mem->w0.cn9k.alg =
+@@ -465,8 +465,8 @@ cn9k_nix_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t pkts,
+ {
+ 	struct cn9k_eth_txq *txq = tx_queue;
+ 	const rte_iova_t io_addr = txq->io_addr;
++	uint64_t lso_tun_fmt = 0;
+ 	void *lmt_addr = txq->lmt_addr;
+-	uint64_t lso_tun_fmt;
+ 	uint16_t i;
+ 
+ 	NIX_XMIT_FC_OR_RETURN(txq, pkts);
+@@ -506,8 +506,8 @@ cn9k_nix_xmit_pkts_mseg(void *tx_queue, struct rte_mbuf **tx_pkts,
+ {
+ 	struct cn9k_eth_txq *txq = tx_queue;
+ 	const rte_iova_t io_addr = txq->io_addr;
++	uint64_t lso_tun_fmt = 0;
+ 	void *lmt_addr = txq->lmt_addr;
+-	uint64_t lso_tun_fmt;
+ 	uint16_t segdw;
+ 	uint64_t i;
+ 
+@@ -1531,7 +1531,7 @@ cn9k_nix_xmit_pkts_vector(void *tx_queue, struct rte_mbuf **tx_pkts,
+ 		}
+ 
+ 		if (flags & NIX_TX_OFFLOAD_TSTAMP_F) {
+-			/* Tx ol_flag for timestam. */
++			/* Tx ol_flag for timestamp. */
+ 			const uint64x2_t olf = {RTE_MBUF_F_TX_IEEE1588_TMST,
+ 						RTE_MBUF_F_TX_IEEE1588_TMST};
+ 			/* Set send mem alg to SUB. */
+diff --git a/dpdk/drivers/net/cnxk/cnxk_ethdev.c b/dpdk/drivers/net/cnxk/cnxk_ethdev.c
+index 74f625553d..a1da90be57 100644
+--- a/dpdk/drivers/net/cnxk/cnxk_ethdev.c
++++ b/dpdk/drivers/net/cnxk/cnxk_ethdev.c
+@@ -3,6 +3,8 @@
+  */
+ #include <cnxk_ethdev.h>
+ 
++#include <rte_eventdev.h>
++
+ static inline uint64_t
+ nix_get_rx_offload_capa(struct cnxk_eth_dev *dev)
+ {
+@@ -177,7 +179,7 @@ nix_meter_fini(struct cnxk_eth_dev *dev)
+ 	struct roc_nix *nix = &dev->nix;
+ 	struct roc_nix_rq *rq;
+ 	uint32_t i;
+-	int rc;
++	int rc = 0;
+ 
+ 	RTE_TAILQ_FOREACH_SAFE(mtr, fms, next, next_mtr) {
+ 		for (i = 0; i < mtr->rq_num; i++) {
+@@ -597,6 +599,13 @@ cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
+ 	rxq_sp->qconf.mp = mp;
+ 
+ 	if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
++		/* Pass a tagmask used to handle error packets in inline device.
++		 * Ethdev rq's tag_mask field will be overwritten later
++		 * when sso is setup.
++		 */
++		rq->tag_mask =
++			0x0FF00000 | ((uint32_t)RTE_EVENT_TYPE_ETHDEV << 28);
++
+ 		/* Setup rq reference for inline dev if present */
+ 		rc = roc_nix_inl_dev_rq_get(rq);
+ 		if (rc)
+@@ -1122,6 +1131,10 @@ cnxk_nix_configure(struct rte_eth_dev *eth_dev)
+ 		goto fail_configure;
+ 	}
+ 
++	/* Check if ptp is enable in PF owning this VF*/
++	if (!roc_nix_is_pf(nix) && (!roc_nix_is_sdp(nix)))
++		dev->ptp_en = roc_nix_ptp_is_enable(nix);
++
+ 	dev->npc.channel = roc_nix_get_base_chan(nix);
+ 
+ 	nb_rxq = data->nb_rx_queues;
+diff --git a/dpdk/drivers/net/cnxk/cnxk_ethdev.h b/dpdk/drivers/net/cnxk/cnxk_ethdev.h
+index 5bfda3d815..480cc6dfa4 100644
+--- a/dpdk/drivers/net/cnxk/cnxk_ethdev.h
++++ b/dpdk/drivers/net/cnxk/cnxk_ethdev.h
+@@ -685,14 +685,11 @@ cnxk_nix_timestamp_dynfield(struct rte_mbuf *mbuf,
+ static __rte_always_inline void
+ cnxk_nix_mbuf_to_tstamp(struct rte_mbuf *mbuf,
+ 			struct cnxk_timesync_info *tstamp,
+-			const uint8_t ts_enable, const uint8_t mseg_enable,
+-			uint64_t *tstamp_ptr)
++			const uint8_t ts_enable, uint64_t *tstamp_ptr)
+ {
+ 	if (ts_enable) {
+-		if (!mseg_enable) {
+-			mbuf->pkt_len -= CNXK_NIX_TIMESYNC_RX_OFFSET;
+-			mbuf->data_len -= CNXK_NIX_TIMESYNC_RX_OFFSET;
+-		}
++		mbuf->pkt_len -= CNXK_NIX_TIMESYNC_RX_OFFSET;
++		mbuf->data_len -= CNXK_NIX_TIMESYNC_RX_OFFSET;
+ 
+ 		/* Reading the rx timestamp inserted by CGX, viz at
+ 		 * starting of the packet data.
+diff --git a/dpdk/drivers/net/cnxk/cnxk_ethdev_mtr.c b/dpdk/drivers/net/cnxk/cnxk_ethdev_mtr.c
+index 39d8563826..b6ccccdc39 100644
+--- a/dpdk/drivers/net/cnxk/cnxk_ethdev_mtr.c
++++ b/dpdk/drivers/net/cnxk/cnxk_ethdev_mtr.c
+@@ -277,15 +277,54 @@ cnxk_nix_mtr_profile_delete(struct rte_eth_dev *eth_dev, uint32_t profile_id,
+ 	return 0;
+ }
+ 
++static int
++update_mtr_err(uint32_t act_color, struct rte_mtr_error *error, bool action)
++{
++	const char *str = NULL;
++	switch (act_color) {
++	case RTE_COLOR_GREEN:
++		if (action) {
++			str = "Green action is not valid";
++			goto notsup;
++		} else {
++			str = "Green action is null";
++			goto notvalid;
++		}
++		break;
++	case RTE_COLOR_YELLOW:
++		if (action) {
++			str = "Yellow action is not valid";
++			goto notsup;
++		} else {
++			str = "Yellow action is null";
++			goto notvalid;
++		}
++		break;
++	case RTE_COLOR_RED:
++		if (action) {
++			str = "Red action is not valid";
++			goto notsup;
++		} else {
++			str = "Red action is null";
++			goto notvalid;
++		}
++		break;
++	}
++notsup:
++	return -rte_mtr_error_set(error, ENOTSUP,
++				  RTE_MTR_ERROR_TYPE_METER_POLICY, NULL, str);
++notvalid:
++	return -rte_mtr_error_set(error, EINVAL,
++				  RTE_MTR_ERROR_TYPE_METER_POLICY, NULL, str);
++}
++
+ static int
+ cnxk_nix_mtr_policy_validate(struct rte_eth_dev *dev,
+ 			     struct rte_mtr_meter_policy_params *policy,
+ 			     struct rte_mtr_error *error)
+ {
+-	static const char *const action_color[] = {"Green", "Yellow", "Red"};
+ 	bool supported[RTE_COLORS] = {false, false, false};
+ 	const struct rte_flow_action *action;
+-	char message[1024];
+ 	uint32_t i;
+ 
+ 	RTE_SET_USED(dev);
+@@ -304,21 +343,11 @@ cnxk_nix_mtr_policy_validate(struct rte_eth_dev *dev,
+ 				if (action->type == RTE_FLOW_ACTION_TYPE_DROP)
+ 					supported[i] = true;
+ 
+-				if (!supported[i]) {
+-					sprintf(message,
+-						"%s action is not valid",
+-						action_color[i]);
+-					return -rte_mtr_error_set(error,
+-					  ENOTSUP,
+-					  RTE_MTR_ERROR_TYPE_METER_POLICY, NULL,
+-					  message);
+-				}
++				if (!supported[i])
++					return update_mtr_err(i, error, true);
+ 			}
+ 		} else {
+-			sprintf(message, "%s action is null", action_color[i]);
+-			return -rte_mtr_error_set(error, EINVAL,
+-				RTE_MTR_ERROR_TYPE_METER_POLICY, NULL,
+-				message);
++			return update_mtr_err(i, error, false);
+ 		}
+ 	}
+ 
+diff --git a/dpdk/drivers/net/cnxk/cnxk_ethdev_ops.c b/dpdk/drivers/net/cnxk/cnxk_ethdev_ops.c
+index ce5f1f7240..f1d13c5004 100644
+--- a/dpdk/drivers/net/cnxk/cnxk_ethdev_ops.c
++++ b/dpdk/drivers/net/cnxk/cnxk_ethdev_ops.c
+@@ -517,7 +517,8 @@ cnxk_nix_allmulticast_enable(struct rte_eth_dev *eth_dev)
+ {
+ 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+ 
+-	return roc_nix_npc_mcast_config(&dev->nix, true, false);
++	return roc_nix_npc_mcast_config(&dev->nix, true,
++					eth_dev->data->promiscuous);
+ }
+ 
+ int
+@@ -746,6 +747,8 @@ cnxk_nix_reta_update(struct rte_eth_dev *eth_dev,
+ 		goto fail;
+ 	}
+ 
++	roc_nix_rss_reta_get(nix, 0, reta);
++
+ 	/* Copy RETA table */
+ 	for (i = 0; i < (int)(dev->nix.reta_sz / RTE_ETH_RETA_GROUP_SIZE); i++) {
+ 		for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++) {
+diff --git a/dpdk/drivers/net/cnxk/cnxk_ethdev_telemetry.c b/dpdk/drivers/net/cnxk/cnxk_ethdev_telemetry.c
+index 83bc65848c..4fd9048643 100644
+--- a/dpdk/drivers/net/cnxk/cnxk_ethdev_telemetry.c
++++ b/dpdk/drivers/net/cnxk/cnxk_ethdev_telemetry.c
+@@ -49,6 +49,8 @@ ethdev_tel_handle_info(const char *cmd __rte_unused,
+ 	rte_tel_data_add_dict_int(d, "n_ports", n_ports);
+ 
+ 	i_data = rte_tel_data_alloc();
++	if (i_data == NULL)
++		return -ENOMEM;
+ 	rte_tel_data_start_array(i_data, RTE_TEL_U64_VAL);
+ 
+ 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
+diff --git a/dpdk/drivers/net/cnxk/cnxk_ptp.c b/dpdk/drivers/net/cnxk/cnxk_ptp.c
+index 139fea256c..359f9a30ae 100644
+--- a/dpdk/drivers/net/cnxk/cnxk_ptp.c
++++ b/dpdk/drivers/net/cnxk/cnxk_ptp.c
+@@ -12,7 +12,7 @@ cnxk_nix_read_clock(struct rte_eth_dev *eth_dev, uint64_t *clock)
+ 	/* This API returns the raw PTP HI clock value. Since LFs do not
+ 	 * have direct access to PTP registers and it requires mbox msg
+ 	 * to AF for this value. In fastpath reading this value for every
+-	 * packet (which involes mbox call) becomes very expensive, hence
++	 * packet (which involves mbox call) becomes very expensive, hence
+ 	 * we should be able to derive PTP HI clock value from tsc by
+ 	 * using freq_mult and clk_delta calculated during configure stage.
+ 	 */
+diff --git a/dpdk/drivers/net/cnxk/cnxk_rte_flow.c b/dpdk/drivers/net/cnxk/cnxk_rte_flow.c
+index b08d7c34fa..32166ae764 100644
+--- a/dpdk/drivers/net/cnxk/cnxk_rte_flow.c
++++ b/dpdk/drivers/net/cnxk/cnxk_rte_flow.c
+@@ -297,7 +297,14 @@ cnxk_flow_validate(struct rte_eth_dev *eth_dev,
+ 		return rc;
+ 	}
+ 
+-	return roc_npc_flow_parse(npc, &in_attr, in_pattern, in_actions, &flow);
++	rc = roc_npc_flow_parse(npc, &in_attr, in_pattern, in_actions, &flow);
++
++	if (rc) {
++		rte_flow_error_set(error, 0, rc, NULL,
++				   "Flow validation failed");
++		return rc;
++	}
++	return 0;
+ }
+ 
+ struct roc_npc_flow *
+diff --git a/dpdk/drivers/net/cxgbe/base/adapter.h b/dpdk/drivers/net/cxgbe/base/adapter.h
+index 1c7c8afe16..97963422bf 100644
+--- a/dpdk/drivers/net/cxgbe/base/adapter.h
++++ b/dpdk/drivers/net/cxgbe/base/adapter.h
+@@ -291,8 +291,6 @@ struct sge {
+ 	u32 fl_starve_thres;        /* Free List starvation threshold */
+ };
+ 
+-#define T4_OS_NEEDS_MBOX_LOCKING 1
+-
+ /*
+  * OS Lock/List primitives for those interfaces in the Common Code which
+  * need this.
+diff --git a/dpdk/drivers/net/cxgbe/base/t4_hw.c b/dpdk/drivers/net/cxgbe/base/t4_hw.c
+index cdcd7e5510..645833765a 100644
+--- a/dpdk/drivers/net/cxgbe/base/t4_hw.c
++++ b/dpdk/drivers/net/cxgbe/base/t4_hw.c
+@@ -263,17 +263,6 @@ static void fw_asrt(struct adapter *adap, u32 mbox_addr)
+ 
+ #define X_CIM_PF_NOACCESS 0xeeeeeeee
+ 
+-/*
+- * If the Host OS Driver needs locking arround accesses to the mailbox, this
+- * can be turned on via the T4_OS_NEEDS_MBOX_LOCKING CPP define ...
+- */
+-/* makes single-statement usage a bit cleaner ... */
+-#ifdef T4_OS_NEEDS_MBOX_LOCKING
+-#define T4_OS_MBOX_LOCKING(x) x
+-#else
+-#define T4_OS_MBOX_LOCKING(x) do {} while (0)
+-#endif
+-
+ /**
+  * t4_wr_mbox_meat_timeout - send a command to FW through the given mailbox
+  * @adap: the adapter
+@@ -314,28 +303,17 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox,
+ 		1, 1, 3, 5, 10, 10, 20, 50, 100
+ 	};
+ 
+-	u32 v;
+-	u64 res;
+-	int i, ms;
+-	unsigned int delay_idx;
+-	__be64 *temp = (__be64 *)malloc(size * sizeof(char));
+-	__be64 *p = temp;
+ 	u32 data_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_DATA);
+ 	u32 ctl_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_CTRL);
+-	u32 ctl;
+-	struct mbox_entry entry;
+-	u32 pcie_fw = 0;
+-
+-	if (!temp)
+-		return -ENOMEM;
++	struct mbox_entry *entry;
++	u32 v, ctl, pcie_fw = 0;
++	unsigned int delay_idx;
++	const __be64 *p;
++	int i, ms, ret;
++	u64 res;
+ 
+-	if ((size & 15) || size > MBOX_LEN) {
+-		free(temp);
++	if ((size & 15) != 0 || size > MBOX_LEN)
+ 		return -EINVAL;
+-	}
+-
+-	memset(p, 0, size);
+-	memcpy(p, (const __be64 *)cmd, size);
+ 
+ 	/*
+ 	 * If we have a negative timeout, that implies that we can't sleep.
+@@ -345,14 +323,17 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox,
+ 		timeout = -timeout;
+ 	}
+ 
+-#ifdef T4_OS_NEEDS_MBOX_LOCKING
++	entry = t4_os_alloc(sizeof(*entry));
++	if (entry == NULL)
++		return -ENOMEM;
++
+ 	/*
+ 	 * Queue ourselves onto the mailbox access list.  When our entry is at
+ 	 * the front of the list, we have rights to access the mailbox.  So we
+ 	 * wait [for a while] till we're at the front [or bail out with an
+ 	 * EBUSY] ...
+ 	 */
+-	t4_os_atomic_add_tail(&entry, &adap->mbox_list, &adap->mbox_lock);
++	t4_os_atomic_add_tail(entry, &adap->mbox_list, &adap->mbox_lock);
+ 
+ 	delay_idx = 0;
+ 	ms = delay[0];
+@@ -367,18 +348,18 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox,
+ 		 */
+ 		pcie_fw = t4_read_reg(adap, A_PCIE_FW);
+ 		if (i > 4 * timeout || (pcie_fw & F_PCIE_FW_ERR)) {
+-			t4_os_atomic_list_del(&entry, &adap->mbox_list,
++			t4_os_atomic_list_del(entry, &adap->mbox_list,
+ 					      &adap->mbox_lock);
+ 			t4_report_fw_error(adap);
+-			free(temp);
+-			return (pcie_fw & F_PCIE_FW_ERR) ? -ENXIO : -EBUSY;
++			ret = ((pcie_fw & F_PCIE_FW_ERR) != 0) ? -ENXIO : -EBUSY;
++			goto out_free;
+ 		}
+ 
+ 		/*
+ 		 * If we're at the head, break out and start the mailbox
+ 		 * protocol.
+ 		 */
+-		if (t4_os_list_first_entry(&adap->mbox_list) == &entry)
++		if (t4_os_list_first_entry(&adap->mbox_list) == entry)
+ 			break;
+ 
+ 		/*
+@@ -393,7 +374,6 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox,
+ 			rte_delay_ms(ms);
+ 		}
+ 	}
+-#endif /* T4_OS_NEEDS_MBOX_LOCKING */
+ 
+ 	/*
+ 	 * Attempt to gain access to the mailbox.
+@@ -410,12 +390,11 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox,
+ 	 * mailbox atomic access list and report the error to our caller.
+ 	 */
+ 	if (v != X_MBOWNER_PL) {
+-		T4_OS_MBOX_LOCKING(t4_os_atomic_list_del(&entry,
+-							 &adap->mbox_list,
+-							 &adap->mbox_lock));
++		t4_os_atomic_list_del(entry, &adap->mbox_list,
++				      &adap->mbox_lock);
+ 		t4_report_fw_error(adap);
+-		free(temp);
+-		return (v == X_MBOWNER_FW ? -EBUSY : -ETIMEDOUT);
++		ret = (v == X_MBOWNER_FW) ? -EBUSY : -ETIMEDOUT;
++		goto out_free;
+ 	}
+ 
+ 	/*
+@@ -441,7 +420,7 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox,
+ 	/*
+ 	 * Copy in the new mailbox command and send it on its way ...
+ 	 */
+-	for (i = 0; i < size; i += 8, p++)
++	for (i = 0, p = cmd; i < size; i += 8, p++)
+ 		t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p));
+ 
+ 	CXGBE_DEBUG_MBOX(adap, "%s: mbox %u: %016llx %016llx %016llx %016llx "
+@@ -512,11 +491,10 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox,
+ 				get_mbox_rpl(adap, rpl, size / 8, data_reg);
+ 			}
+ 			t4_write_reg(adap, ctl_reg, V_MBOWNER(X_MBOWNER_NONE));
+-			T4_OS_MBOX_LOCKING(
+-				t4_os_atomic_list_del(&entry, &adap->mbox_list,
+-						      &adap->mbox_lock));
+-			free(temp);
+-			return -G_FW_CMD_RETVAL((int)res);
++			t4_os_atomic_list_del(entry, &adap->mbox_list,
++					      &adap->mbox_lock);
++			ret = -G_FW_CMD_RETVAL((int)res);
++			goto out_free;
+ 		}
+ 	}
+ 
+@@ -527,12 +505,13 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox,
+ 	 */
+ 	dev_err(adap, "command %#x in mailbox %d timed out\n",
+ 		*(const u8 *)cmd, mbox);
+-	T4_OS_MBOX_LOCKING(t4_os_atomic_list_del(&entry,
+-						 &adap->mbox_list,
+-						 &adap->mbox_lock));
++	t4_os_atomic_list_del(entry, &adap->mbox_list, &adap->mbox_lock);
+ 	t4_report_fw_error(adap);
+-	free(temp);
+-	return (pcie_fw & F_PCIE_FW_ERR) ? -ENXIO : -ETIMEDOUT;
++	ret = ((pcie_fw & F_PCIE_FW_ERR) != 0) ? -ENXIO : -ETIMEDOUT;
++
++out_free:
++	t4_os_free(entry);
++	return ret;
+ }
+ 
+ int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
+diff --git a/dpdk/drivers/net/cxgbe/base/t4vf_hw.c b/dpdk/drivers/net/cxgbe/base/t4vf_hw.c
+index 561d759dbc..7dbd4deb79 100644
+--- a/dpdk/drivers/net/cxgbe/base/t4vf_hw.c
++++ b/dpdk/drivers/net/cxgbe/base/t4vf_hw.c
+@@ -83,7 +83,7 @@ int t4vf_wr_mbox_core(struct adapter *adapter,
+ 
+ 	u32 mbox_ctl = T4VF_CIM_BASE_ADDR + A_CIM_VF_EXT_MAILBOX_CTRL;
+ 	__be64 cmd_rpl[MBOX_LEN / 8];
+-	struct mbox_entry entry;
++	struct mbox_entry *entry;
+ 	unsigned int delay_idx;
+ 	u32 v, mbox_data;
+ 	const __be64 *p;
+@@ -106,13 +106,17 @@ int t4vf_wr_mbox_core(struct adapter *adapter,
+ 			size > NUM_CIM_VF_MAILBOX_DATA_INSTANCES * 4)
+ 		return -EINVAL;
+ 
++	entry = t4_os_alloc(sizeof(*entry));
++	if (entry == NULL)
++		return -ENOMEM;
++
+ 	/*
+ 	 * Queue ourselves onto the mailbox access list.  When our entry is at
+ 	 * the front of the list, we have rights to access the mailbox.  So we
+ 	 * wait [for a while] till we're at the front [or bail out with an
+ 	 * EBUSY] ...
+ 	 */
+-	t4_os_atomic_add_tail(&entry, &adapter->mbox_list, &adapter->mbox_lock);
++	t4_os_atomic_add_tail(entry, &adapter->mbox_list, &adapter->mbox_lock);
+ 
+ 	delay_idx = 0;
+ 	ms = delay[0];
+@@ -125,17 +129,17 @@ int t4vf_wr_mbox_core(struct adapter *adapter,
+ 		 * contend on access to the mailbox ...
+ 		 */
+ 		if (i > (2 * FW_CMD_MAX_TIMEOUT)) {
+-			t4_os_atomic_list_del(&entry, &adapter->mbox_list,
++			t4_os_atomic_list_del(entry, &adapter->mbox_list,
+ 					      &adapter->mbox_lock);
+ 			ret = -EBUSY;
+-			return ret;
++			goto out_free;
+ 		}
+ 
+ 		/*
+ 		 * If we're at the head, break out and start the mailbox
+ 		 * protocol.
+ 		 */
+-		if (t4_os_list_first_entry(&adapter->mbox_list) == &entry)
++		if (t4_os_list_first_entry(&adapter->mbox_list) == entry)
+ 			break;
+ 
+ 		/*
+@@ -160,10 +164,10 @@ int t4vf_wr_mbox_core(struct adapter *adapter,
+ 		v = G_MBOWNER(t4_read_reg(adapter, mbox_ctl));
+ 
+ 	if (v != X_MBOWNER_PL) {
+-		t4_os_atomic_list_del(&entry, &adapter->mbox_list,
++		t4_os_atomic_list_del(entry, &adapter->mbox_list,
+ 				      &adapter->mbox_lock);
+ 		ret = (v == X_MBOWNER_FW) ? -EBUSY : -ETIMEDOUT;
+-		return ret;
++		goto out_free;
+ 	}
+ 
+ 	/*
+@@ -224,7 +228,7 @@ int t4vf_wr_mbox_core(struct adapter *adapter,
+ 			get_mbox_rpl(adapter, cmd_rpl, size / 8, mbox_data);
+ 			t4_write_reg(adapter, mbox_ctl,
+ 				     V_MBOWNER(X_MBOWNER_NONE));
+-			t4_os_atomic_list_del(&entry, &adapter->mbox_list,
++			t4_os_atomic_list_del(entry, &adapter->mbox_list,
+ 					      &adapter->mbox_lock);
+ 
+ 			/* return value in high-order host-endian word */
+@@ -236,7 +240,8 @@ int t4vf_wr_mbox_core(struct adapter *adapter,
+ 					 & F_FW_CMD_REQUEST) == 0);
+ 				memcpy(rpl, cmd_rpl, size);
+ 			}
+-			return -((int)G_FW_CMD_RETVAL(v));
++			ret = -((int)G_FW_CMD_RETVAL(v));
++			goto out_free;
+ 		}
+ 	}
+ 
+@@ -246,8 +251,11 @@ int t4vf_wr_mbox_core(struct adapter *adapter,
+ 	dev_err(adapter, "command %#x timed out\n",
+ 		*(const u8 *)cmd);
+ 	dev_err(adapter, "    Control = %#x\n", t4_read_reg(adapter, mbox_ctl));
+-	t4_os_atomic_list_del(&entry, &adapter->mbox_list, &adapter->mbox_lock);
++	t4_os_atomic_list_del(entry, &adapter->mbox_list, &adapter->mbox_lock);
+ 	ret = -ETIMEDOUT;
++
++out_free:
++	t4_os_free(entry);
+ 	return ret;
+ }
+ 
+diff --git a/dpdk/drivers/net/cxgbe/cxgbe_flow.c b/dpdk/drivers/net/cxgbe/cxgbe_flow.c
+index edcbba9d7c..6e460dfe2e 100644
+--- a/dpdk/drivers/net/cxgbe/cxgbe_flow.c
++++ b/dpdk/drivers/net/cxgbe/cxgbe_flow.c
+@@ -1378,7 +1378,7 @@ cxgbe_flow_validate(struct rte_eth_dev *dev,
+ }
+ 
+ /*
+- * @ret : > 0 filter destroyed succsesfully
++ * @ret : > 0 filter destroyed successfully
+  *        < 0 error destroying filter
+  *        == 1 filter not active / not found
+  */
+diff --git a/dpdk/drivers/net/cxgbe/cxgbevf_main.c b/dpdk/drivers/net/cxgbe/cxgbevf_main.c
+index f639612ae4..d0c93f8ac3 100644
+--- a/dpdk/drivers/net/cxgbe/cxgbevf_main.c
++++ b/dpdk/drivers/net/cxgbe/cxgbevf_main.c
+@@ -44,7 +44,7 @@ static void size_nports_qsets(struct adapter *adapter)
+ 	 */
+ 	pmask_nports = hweight32(adapter->params.vfres.pmask);
+ 	if (pmask_nports < adapter->params.nports) {
+-		dev_warn(adapter->pdev_dev, "only using %d of %d provissioned"
++		dev_warn(adapter->pdev_dev, "only using %d of %d provisioned"
+ 			 " virtual interfaces; limited by Port Access Rights"
+ 			 " mask %#x\n", pmask_nports, adapter->params.nports,
+ 			 adapter->params.vfres.pmask);
+diff --git a/dpdk/drivers/net/cxgbe/sge.c b/dpdk/drivers/net/cxgbe/sge.c
+index f623f3e684..566cd48406 100644
+--- a/dpdk/drivers/net/cxgbe/sge.c
++++ b/dpdk/drivers/net/cxgbe/sge.c
+@@ -211,7 +211,7 @@ static inline unsigned int fl_cap(const struct sge_fl *fl)
+  * @fl: the Free List
+  *
+  * Tests specified Free List to see whether the number of buffers
+- * available to the hardware has falled below our "starvation"
++ * available to the hardware has fallen below our "starvation"
+  * threshold.
+  */
+ static inline bool fl_starving(const struct adapter *adapter,
+@@ -678,7 +678,7 @@ static void write_sgl(struct rte_mbuf *mbuf, struct sge_txq *q,
+  * @q: the Tx queue
+  * @n: number of new descriptors to give to HW
+  *
+- * Ring the doorbel for a Tx queue.
++ * Ring the doorbell for a Tx queue.
+  */
+ static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q)
+ {
+@@ -789,9 +789,9 @@ static inline void txq_advance(struct sge_txq *q, unsigned int n)
+ 
+ #define MAX_COALESCE_LEN 64000
+ 
+-static inline int wraps_around(struct sge_txq *q, int ndesc)
++static inline bool wraps_around(struct sge_txq *q, int ndesc)
+ {
+-	return (q->pidx + ndesc) > q->size ? 1 : 0;
++	return (q->pidx + ndesc) > q->size ? true : false;
+ }
+ 
+ static void tx_timer_cb(void *data)
+@@ -842,7 +842,6 @@ static inline void ship_tx_pkt_coalesce_wr(struct adapter *adap,
+ 
+ 	/* fill the pkts WR header */
+ 	wr = (void *)&q->desc[q->pidx];
+-	wr->op_pkd = htonl(V_FW_WR_OP(FW_ETH_TX_PKTS2_WR));
+ 	vmwr = (void *)&q->desc[q->pidx];
+ 
+ 	wr_mid = V_FW_WR_LEN16(DIV_ROUND_UP(q->coalesce.flits, 2));
+@@ -852,8 +851,11 @@ static inline void ship_tx_pkt_coalesce_wr(struct adapter *adap,
+ 	wr->npkt = q->coalesce.idx;
+ 	wr->r3 = 0;
+ 	if (is_pf4(adap)) {
+-		wr->op_pkd = htonl(V_FW_WR_OP(FW_ETH_TX_PKTS2_WR));
+ 		wr->type = q->coalesce.type;
++		if (likely(wr->type != 0))
++			wr->op_pkd = htonl(V_FW_WR_OP(FW_ETH_TX_PKTS2_WR));
++		else
++			wr->op_pkd = htonl(V_FW_WR_OP(FW_ETH_TX_PKTS_WR));
+ 	} else {
+ 		wr->op_pkd = htonl(V_FW_WR_OP(FW_ETH_TX_PKTS_VM_WR));
+ 		vmwr->r4 = 0;
+@@ -877,7 +879,7 @@ static inline void ship_tx_pkt_coalesce_wr(struct adapter *adap,
+ }
+ 
+ /**
+- * should_tx_packet_coalesce - decides wether to coalesce an mbuf or not
++ * should_tx_packet_coalesce - decides whether to coalesce an mbuf or not
+  * @txq: tx queue where the mbuf is sent
+  * @mbuf: mbuf to be sent
+  * @nflits: return value for number of flits needed
+@@ -932,13 +934,16 @@ static inline int should_tx_packet_coalesce(struct sge_eth_txq *txq,
+ 		ndesc = DIV_ROUND_UP(q->coalesce.flits + flits, 8);
+ 		credits = txq_avail(q) - ndesc;
+ 
++		if (unlikely(wraps_around(q, ndesc)))
++			return 0;
++
+ 		/* If we are wrapping or this is last mbuf then, send the
+ 		 * already coalesced mbufs and let the non-coalesce pass
+ 		 * handle the mbuf.
+ 		 */
+-		if (unlikely(credits < 0 || wraps_around(q, ndesc))) {
++		if (unlikely(credits < 0)) {
+ 			ship_tx_pkt_coalesce_wr(adap, txq);
+-			return 0;
++			return -EBUSY;
+ 		}
+ 
+ 		/* If the max coalesce len or the max WR len is reached
+@@ -962,8 +967,12 @@ static inline int should_tx_packet_coalesce(struct sge_eth_txq *txq,
+ 	ndesc = flits_to_desc(q->coalesce.flits + flits);
+ 	credits = txq_avail(q) - ndesc;
+ 
+-	if (unlikely(credits < 0 || wraps_around(q, ndesc)))
++	if (unlikely(wraps_around(q, ndesc)))
+ 		return 0;
++
++	if (unlikely(credits < 0))
++		return -EBUSY;
++
+ 	q->coalesce.flits += wr_size / sizeof(__be64);
+ 	q->coalesce.type = type;
+ 	q->coalesce.ptr = (unsigned char *)&q->desc[q->pidx] +
+@@ -1106,7 +1115,7 @@ int t4_eth_xmit(struct sge_eth_txq *txq, struct rte_mbuf *mbuf,
+ 	unsigned int flits, ndesc, cflits;
+ 	int l3hdr_len, l4hdr_len, eth_xtra_len;
+ 	int len, last_desc;
+-	int credits;
++	int should_coal, credits;
+ 	u32 wr_mid;
+ 	u64 cntrl, *end;
+ 	bool v6;
+@@ -1138,9 +1147,9 @@ int t4_eth_xmit(struct sge_eth_txq *txq, struct rte_mbuf *mbuf,
+ 	/* align the end of coalesce WR to a 512 byte boundary */
+ 	txq->q.coalesce.max = (8 - (txq->q.pidx & 7)) * 8;
+ 
+-	if (!((m->ol_flags & RTE_MBUF_F_TX_TCP_SEG) ||
+-			m->pkt_len > RTE_ETHER_MAX_LEN)) {
+-		if (should_tx_packet_coalesce(txq, mbuf, &cflits, adap)) {
++	if ((m->ol_flags & RTE_MBUF_F_TX_TCP_SEG) == 0) {
++		should_coal = should_tx_packet_coalesce(txq, mbuf, &cflits, adap);
++		if (should_coal > 0) {
+ 			if (unlikely(map_mbuf(mbuf, addr) < 0)) {
+ 				dev_warn(adap, "%s: mapping err for coalesce\n",
+ 					 __func__);
+@@ -1149,8 +1158,8 @@ int t4_eth_xmit(struct sge_eth_txq *txq, struct rte_mbuf *mbuf,
+ 			}
+ 			return tx_do_packet_coalesce(txq, mbuf, cflits, adap,
+ 						     pi, addr, nb_pkts);
+-		} else {
+-			return -EBUSY;
++		} else if (should_coal < 0) {
++			return should_coal;
+ 		}
+ 	}
+ 
+@@ -1197,8 +1206,7 @@ int t4_eth_xmit(struct sge_eth_txq *txq, struct rte_mbuf *mbuf,
+ 		end = (u64 *)vmwr + flits;
+ 	}
+ 
+-	len = 0;
+-	len += sizeof(*cpl);
++	len = sizeof(*cpl);
+ 
+ 	/* Coalescing skipped and we send through normal path */
+ 	if (!(m->ol_flags & RTE_MBUF_F_TX_TCP_SEG)) {
+@@ -1846,7 +1854,7 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
+ 		 * for its status page) along with the associated software
+ 		 * descriptor ring.  The free list size needs to be a multiple
+ 		 * of the Egress Queue Unit and at least 2 Egress Units larger
+-		 * than the SGE's Egress Congrestion Threshold
++		 * than the SGE's Egress Congestion Threshold
+ 		 * (fl_starve_thres - 1).
+ 		 */
+ 		if (fl->size < s->fl_starve_thres - 1 + 2 * 8)
+@@ -1910,7 +1918,7 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
+ 	iq->stat = (void *)&iq->desc[iq->size * 8];
+ 	iq->eth_dev = eth_dev;
+ 	iq->handler = hnd;
+-	iq->port_id = pi->pidx;
++	iq->port_id = eth_dev->data->port_id;
+ 	iq->mb_pool = mp;
+ 
+ 	/* set offset to -1 to distinguish ingress queues without FL */
+diff --git a/dpdk/drivers/net/dpaa/dpaa_ethdev.c b/dpdk/drivers/net/dpaa/dpaa_ethdev.c
+index e49f765434..9847ca1be1 100644
+--- a/dpdk/drivers/net/dpaa/dpaa_ethdev.c
++++ b/dpdk/drivers/net/dpaa/dpaa_ethdev.c
+@@ -1030,7 +1030,7 @@ int dpaa_eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+ 				   QM_FQCTRL_CTXASTASHING |
+ 				   QM_FQCTRL_PREFERINCACHE;
+ 		opts.fqd.context_a.stashing.exclusive = 0;
+-		/* In muticore scenario stashing becomes a bottleneck on LS1046.
++		/* In multicore scenario stashing becomes a bottleneck on LS1046.
+ 		 * So do not enable stashing in this case
+ 		 */
+ 		if (dpaa_svr_family != SVR_LS1046A_FAMILY)
+@@ -1201,23 +1201,17 @@ int
+ dpaa_eth_eventq_detach(const struct rte_eth_dev *dev,
+ 		int eth_rx_queue_id)
+ {
+-	struct qm_mcc_initfq opts;
++	struct qm_mcc_initfq opts = {0};
+ 	int ret;
+ 	u32 flags = 0;
+ 	struct dpaa_if *dpaa_intf = dev->data->dev_private;
+ 	struct qman_fq *rxq = &dpaa_intf->rx_queues[eth_rx_queue_id];
+ 
+-	dpaa_poll_queue_default_config(&opts);
+-
+-	if (dpaa_intf->cgr_rx) {
+-		opts.we_mask |= QM_INITFQ_WE_CGID;
+-		opts.fqd.cgid = dpaa_intf->cgr_rx[eth_rx_queue_id].cgrid;
+-		opts.fqd.fq_ctrl |= QM_FQCTRL_CGE;
+-	}
+-
++	qman_retire_fq(rxq, NULL);
++	qman_oos_fq(rxq);
+ 	ret = qman_init_fq(rxq, flags, &opts);
+ 	if (ret) {
+-		DPAA_PMD_ERR("init rx fqid %d failed with ret: %d",
++		DPAA_PMD_ERR("detach rx fqid %d failed with ret: %d",
+ 			     rxq->fqid, ret);
+ 	}
+ 
+@@ -1866,7 +1860,7 @@ dpaa_dev_init(struct rte_eth_dev *eth_dev)
+ 
+ 	dpaa_intf->name = dpaa_device->name;
+ 
+-	/* save fman_if & cfg in the interface struture */
++	/* save fman_if & cfg in the interface structure */
+ 	eth_dev->process_private = fman_intf;
+ 	dpaa_intf->ifid = dev_id;
+ 	dpaa_intf->cfg = cfg;
+@@ -2169,7 +2163,7 @@ rte_dpaa_probe(struct rte_dpaa_driver *dpaa_drv,
+ 		if (dpaa_svr_family == SVR_LS1043A_FAMILY)
+ 			dpaa_push_mode_max_queue = 0;
+ 
+-		/* if push mode queues to be enabled. Currenly we are allowing
++		/* if push mode queues to be enabled. Currently we are allowing
+ 		 * only one queue per thread.
+ 		 */
+ 		if (getenv("DPAA_PUSH_QUEUES_NUMBER")) {
+diff --git a/dpdk/drivers/net/dpaa/dpaa_rxtx.c b/dpdk/drivers/net/dpaa/dpaa_rxtx.c
+index ffac6ce3e2..956fe946fa 100644
+--- a/dpdk/drivers/net/dpaa/dpaa_rxtx.c
++++ b/dpdk/drivers/net/dpaa/dpaa_rxtx.c
+@@ -600,8 +600,8 @@ void dpaa_rx_cb_prepare(struct qm_dqrr_entry *dq, void **bufs)
+ 	void *ptr = rte_dpaa_mem_ptov(qm_fd_addr(&dq->fd));
+ 
+ 	/* In case of LS1046, annotation stashing is disabled due to L2 cache
+-	 * being bottleneck in case of multicore scanario for this platform.
+-	 * So we prefetch the annoation beforehand, so that it is available
++	 * being bottleneck in case of multicore scenario for this platform.
++	 * So we prefetch the annotation beforehand, so that it is available
+ 	 * in cache when accessed.
+ 	 */
+ 	rte_prefetch0((void *)((uint8_t *)ptr + DEFAULT_RX_ICEOF));
+diff --git a/dpdk/drivers/net/dpaa/fmlib/fm_ext.h b/dpdk/drivers/net/dpaa/fmlib/fm_ext.h
+index 27c9fb471e..8e7153bdaf 100644
+--- a/dpdk/drivers/net/dpaa/fmlib/fm_ext.h
++++ b/dpdk/drivers/net/dpaa/fmlib/fm_ext.h
+@@ -176,7 +176,7 @@ typedef struct t_fm_prs_result {
+ #define FM_FD_ERR_PRS_HDR_ERR	0x00000020
+ 		/**< Header error was identified during parsing */
+ #define FM_FD_ERR_BLOCK_LIMIT_EXCEEDED  0x00000008
+-			/**< Frame parsed beyind 256 first bytes */
++			/**< Frame parsed beyond 256 first bytes */
+ 
+ #define FM_FD_TX_STATUS_ERR_MASK	(FM_FD_ERR_UNSUPPORTED_FORMAT   | \
+ 					FM_FD_ERR_LENGTH		| \
+diff --git a/dpdk/drivers/net/dpaa/fmlib/fm_pcd_ext.h b/dpdk/drivers/net/dpaa/fmlib/fm_pcd_ext.h
+index 8be3885fbc..3802b42916 100644
+--- a/dpdk/drivers/net/dpaa/fmlib/fm_pcd_ext.h
++++ b/dpdk/drivers/net/dpaa/fmlib/fm_pcd_ext.h
+@@ -276,7 +276,7 @@ typedef struct ioc_fm_pcd_counters_params_t {
+ } ioc_fm_pcd_counters_params_t;
+ 
+ /*
+- * @Description   structure for FM exception definitios
++ * @Description   structure for FM exception definitions
+  */
+ typedef struct ioc_fm_pcd_exception_params_t {
+ 	ioc_fm_pcd_exceptions exception;	/**< The requested exception */
+@@ -883,7 +883,7 @@ typedef enum ioc_fm_pcd_manip_hdr_rmv_specific_l2 {
+ 	e_IOC_FM_PCD_MANIP_HDR_RMV_ETHERNET,	/**< Ethernet/802.3 MAC */
+ 	e_IOC_FM_PCD_MANIP_HDR_RMV_STACKED_QTAGS,	/**< stacked QTags */
+ 	e_IOC_FM_PCD_MANIP_HDR_RMV_ETHERNET_AND_MPLS,
+-			/**< MPLS and Ethernet/802.3 MAC header unitl the header
++			/**< MPLS and Ethernet/802.3 MAC header until the header
+ 			 * which follows the MPLS header
+ 			 */
+ 	e_IOC_FM_PCD_MANIP_HDR_RMV_MPLS
+@@ -3293,7 +3293,7 @@ typedef struct ioc_fm_pcd_cc_tbl_get_stats_t {
+ /*
+  * @Function	  fm_pcd_net_env_characteristics_delete
+  *
+- * @Description   Deletes a set of Network Environment Charecteristics.
++ * @Description   Deletes a set of Network Environment Characteristics.
+  *
+  * @Param[in]	  ioc_fm_obj_t		The id of a Network Environment object.
+  *
+@@ -3493,7 +3493,7 @@ typedef struct ioc_fm_pcd_cc_tbl_get_stats_t {
+  * @Return	  0 on success; Error code otherwise.
+  *
+  * @Cautions	  Allowed only following fm_pcd_match_table_set() not only of
+- *		  the relevnt node but also the node that points to this node.
++ *		  the relevant node but also the node that points to this node.
+  */
+ #define FM_PCD_IOC_MATCH_TABLE_MODIFY_KEY_AND_NEXT_ENGINE \
+ 		_IOW(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(35), \
+diff --git a/dpdk/drivers/net/dpaa/fmlib/fm_port_ext.h b/dpdk/drivers/net/dpaa/fmlib/fm_port_ext.h
+index 6f5479fbe1..bb2e00222e 100644
+--- a/dpdk/drivers/net/dpaa/fmlib/fm_port_ext.h
++++ b/dpdk/drivers/net/dpaa/fmlib/fm_port_ext.h
+@@ -498,7 +498,7 @@ typedef struct ioc_fm_port_pcd_prs_params_t {
+ 		/**< Number of bytes from beginning of packet to start parsing
+ 		 */
+ 	ioc_net_header_type	first_prs_hdr;
+-		/**< The type of the first header axpected at 'parsing_offset'
++		/**< The type of the first header expected at 'parsing_offset'
+ 		 */
+ 	bool		include_in_prs_statistics;
+ 		/**< TRUE to include this port in the parser statistics */
+@@ -524,7 +524,7 @@ typedef struct ioc_fm_port_pcd_prs_params_t {
+ } ioc_fm_port_pcd_prs_params_t;
+ 
+ /*
+- * @Description   A structure for defining coarse alassification parameters
++ * @Description   A structure for defining coarse classification parameters
+  *		  (Must match t_fm_portPcdCcParams defined in fm_port_ext.h)
+  */
+ typedef struct ioc_fm_port_pcd_cc_params_t {
+@@ -602,7 +602,7 @@ typedef struct ioc_fm_pcd_prs_start_t {
+ 		/**< Number of bytes from beginning of packet to start parsing
+ 		 */
+ 	ioc_net_header_type first_prs_hdr;
+-		/**< The type of the first header axpected at 'parsing_offset'
++		/**< The type of the first header expected at 'parsing_offset'
+ 		 */
+ } ioc_fm_pcd_prs_start_t;
+ 
+@@ -1356,7 +1356,7 @@ typedef uint32_t	fm_port_frame_err_select_t;
+ #define FM_PORT_FRM_ERR_PRS_HDR_ERR	FM_FD_ERR_PRS_HDR_ERR
+ 			/**< Header error was identified during parsing */
+ #define FM_PORT_FRM_ERR_BLOCK_LIMIT_EXCEEDED	FM_FD_ERR_BLOCK_LIMIT_EXCEEDED
+-			/**< Frame parsed beyind 256 first bytes */
++			/**< Frame parsed beyond 256 first bytes */
+ #define FM_PORT_FRM_ERR_PROCESS_TIMEOUT	0x00000001
+ 			/**< FPM Frame Processing Timeout Exceeded */
+ /* @} */
+@@ -1390,7 +1390,7 @@ typedef void (t_fm_port_exception_callback) (t_handle h_app,
+  * @Param[in]	  length	length of received data
+  * @Param[in]	  status	receive status and errors
+  * @Param[in]	  position	position of buffer in frame
+- * @Param[in]	  h_buf_context	A handle of the user acossiated with this buffer
++ * @Param[in]	  h_buf_context	A handle of the user associated with this buffer
+  *
+  * @Retval	  e_RX_STORE_RESPONSE_CONTINUE
+  *		  order the driver to continue Rx operation for all ready data.
+@@ -1414,7 +1414,7 @@ typedef e_rx_store_response(t_fm_port_im_rx_store_callback) (t_handle h_app,
+  * @Param[in]	  p_data	A pointer to data received
+  * @Param[in]	  status	transmit status and errors
+  * @Param[in]	  last_buffer	is last buffer in frame
+- * @Param[in]	  h_buf_context	A handle of the user acossiated with this buffer
++ * @Param[in]	  h_buf_context	A handle of the user associated with this buffer
+  */
+ typedef void (t_fm_port_im_tx_conf_callback) (t_handle   h_app,
+ 				uint8_t	*p_data,
+@@ -2585,7 +2585,7 @@ typedef struct t_fm_port_congestion_grps {
+ 	bool	pfc_prio_enable[FM_NUM_CONG_GRPS][FM_MAX_PFC_PRIO];
+ 			/**< a matrix that represents the map between the CG ids
+ 			 * defined in 'congestion_grps_to_consider' to the
+-			 * priorties mapping array.
++			 * priorities mapping array.
+ 			 */
+ } t_fm_port_congestion_grps;
+ 
+diff --git a/dpdk/drivers/net/dpaa2/dpaa2_ethdev.c b/dpdk/drivers/net/dpaa2/dpaa2_ethdev.c
+index a3706439d5..b875139689 100644
+--- a/dpdk/drivers/net/dpaa2/dpaa2_ethdev.c
++++ b/dpdk/drivers/net/dpaa2/dpaa2_ethdev.c
+@@ -143,7 +143,7 @@ dpaa2_vlan_offload_set(struct rte_eth_dev *dev, int mask)
+ 	PMD_INIT_FUNC_TRACE();
+ 
+ 	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
+-		/* VLAN Filter not avaialble */
++		/* VLAN Filter not available */
+ 		if (!priv->max_vlan_filters) {
+ 			DPAA2_PMD_INFO("VLAN filter not available");
+ 			return -ENOTSUP;
+@@ -395,6 +395,8 @@ dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev)
+ 	if (dpaa2_enable_err_queue) {
+ 		priv->rx_err_vq = rte_zmalloc("dpni_rx_err",
+ 			sizeof(struct dpaa2_queue), 0);
++		if (!priv->rx_err_vq)
++			goto fail;
+ 
+ 		dpaa2_q = (struct dpaa2_queue *)priv->rx_err_vq;
+ 		dpaa2_q->q_storage = rte_malloc("err_dq_storage",
+@@ -916,7 +918,7 @@ dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev,
+ 		cong_notif_cfg.units = DPNI_CONGESTION_UNIT_FRAMES;
+ 		cong_notif_cfg.threshold_entry = nb_tx_desc;
+ 		/* Notify that the queue is not congested when the data in
+-		 * the queue is below this thershold.(90% of value)
++		 * the queue is below this threshold.(90% of value)
+ 		 */
+ 		cong_notif_cfg.threshold_exit = (nb_tx_desc * 9) / 10;
+ 		cong_notif_cfg.message_ctx = 0;
+@@ -1058,7 +1060,7 @@ dpaa2_supported_ptypes_get(struct rte_eth_dev *dev)
+  * Dpaa2 link Interrupt handler
+  *
+  * @param param
+- *  The address of parameter (struct rte_eth_dev *) regsitered before.
++ *  The address of parameter (struct rte_eth_dev *) registered before.
+  *
+  * @return
+  *  void
+@@ -1252,7 +1254,12 @@ dpaa2_dev_stop(struct rte_eth_dev *dev)
+ 	struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
+ 	int ret;
+ 	struct rte_eth_link link;
+-	struct rte_intr_handle *intr_handle = dev->intr_handle;
++	struct rte_device *rdev = dev->device;
++	struct rte_intr_handle *intr_handle;
++	struct rte_dpaa2_device *dpaa2_dev;
++
++	dpaa2_dev = container_of(rdev, struct rte_dpaa2_device, device);
++	intr_handle = dpaa2_dev->intr_handle;
+ 
+ 	PMD_INIT_FUNC_TRACE();
+ 
+@@ -2236,7 +2243,7 @@ int dpaa2_eth_eventq_attach(const struct rte_eth_dev *dev,
+ 		ocfg.oa = 1;
+ 		/* Late arrival window size disabled */
+ 		ocfg.olws = 0;
+-		/* ORL resource exhaustaion advance NESN disabled */
++		/* ORL resource exhaustion advance NESN disabled */
+ 		ocfg.oeane = 0;
+ 		/* Loose ordering enabled */
+ 		ocfg.oloe = 1;
+@@ -2720,13 +2727,13 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev)
+ 	}
+ 	eth_dev->tx_pkt_burst = dpaa2_dev_tx;
+ 
+-	/*Init fields w.r.t. classficaition*/
++	/* Init fields w.r.t. classification */
+ 	memset(&priv->extract.qos_key_extract, 0,
+ 		sizeof(struct dpaa2_key_extract));
+ 	priv->extract.qos_extract_param = (size_t)rte_malloc(NULL, 256, 64);
+ 	if (!priv->extract.qos_extract_param) {
+ 		DPAA2_PMD_ERR(" Error(%d) in allocation resources for flow "
+-			    " classificaiton ", ret);
++			    " classification ", ret);
+ 		goto init_err;
+ 	}
+ 	priv->extract.qos_key_extract.key_info.ipv4_src_offset =
+@@ -2744,7 +2751,7 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev)
+ 		priv->extract.tc_extract_param[i] =
+ 			(size_t)rte_malloc(NULL, 256, 64);
+ 		if (!priv->extract.tc_extract_param[i]) {
+-			DPAA2_PMD_ERR(" Error(%d) in allocation resources for flow classificaiton",
++			DPAA2_PMD_ERR(" Error(%d) in allocation resources for flow classification",
+ 				     ret);
+ 			goto init_err;
+ 		}
+diff --git a/dpdk/drivers/net/dpaa2/dpaa2_ethdev.h b/dpdk/drivers/net/dpaa2/dpaa2_ethdev.h
+index c5e9267bf0..fd4eabed4e 100644
+--- a/dpdk/drivers/net/dpaa2/dpaa2_ethdev.h
++++ b/dpdk/drivers/net/dpaa2/dpaa2_ethdev.h
+@@ -62,7 +62,7 @@
+ /* Disable RX tail drop, default is enable */
+ #define DPAA2_RX_TAILDROP_OFF	0x04
+ /* Tx confirmation enabled */
+-#define DPAA2_TX_CONF_ENABLE	0x08
++#define DPAA2_TX_CONF_ENABLE	0x06
+ 
+ #define DPAA2_RSS_OFFLOAD_ALL ( \
+ 	RTE_ETH_RSS_L2_PAYLOAD | \
+@@ -117,7 +117,7 @@ extern int dpaa2_timestamp_dynfield_offset;
+ 
+ #define DPAA2_FLOW_MAX_KEY_SIZE		16
+ 
+-/*Externaly defined*/
++/* Externally defined */
+ extern const struct rte_flow_ops dpaa2_flow_ops;
+ 
+ extern const struct rte_tm_ops dpaa2_tm_ops;
+diff --git a/dpdk/drivers/net/dpaa2/dpaa2_flow.c b/dpdk/drivers/net/dpaa2/dpaa2_flow.c
+index 84fe37a7c0..bf55eb70a3 100644
+--- a/dpdk/drivers/net/dpaa2/dpaa2_flow.c
++++ b/dpdk/drivers/net/dpaa2/dpaa2_flow.c
+@@ -1451,7 +1451,7 @@ dpaa2_configure_flow_generic_ip(
+ 			flow, pattern, &local_cfg,
+ 			device_configured, group);
+ 	if (ret) {
+-		DPAA2_PMD_ERR("IP discrimation failed!");
++		DPAA2_PMD_ERR("IP discrimination failed!");
+ 		return -1;
+ 	}
+ 
+@@ -3349,7 +3349,7 @@ dpaa2_flow_verify_action(
+ 					(actions[j].conf);
+ 			if (rss_conf->queue_num > priv->dist_queues) {
+ 				DPAA2_PMD_ERR(
+-					"RSS number exceeds the distrbution size");
++					"RSS number exceeds the distribution size");
+ 				return -ENOTSUP;
+ 			}
+ 			for (i = 0; i < (int)rss_conf->queue_num; i++) {
+@@ -3596,7 +3596,7 @@ dpaa2_generic_flow_set(struct rte_flow *flow,
+ 				qos_cfg.keep_entries = true;
+ 				qos_cfg.key_cfg_iova =
+ 					(size_t)priv->extract.qos_extract_param;
+-				/* QoS table is effecitive for multiple TCs.*/
++				/* QoS table is effective for multiple TCs. */
+ 				if (priv->num_rx_tc > 1) {
+ 					ret = dpni_set_qos_table(dpni, CMD_PRI_LOW,
+ 						priv->token, &qos_cfg);
+@@ -3655,7 +3655,7 @@ dpaa2_generic_flow_set(struct rte_flow *flow,
+ 						0, 0);
+ 				if (ret < 0) {
+ 					DPAA2_PMD_ERR(
+-						"Error in addnig entry to QoS table(%d)", ret);
++						"Error in adding entry to QoS table(%d)", ret);
+ 					return ret;
+ 				}
+ 			}
+diff --git a/dpdk/drivers/net/dpaa2/dpaa2_mux.c b/dpdk/drivers/net/dpaa2/dpaa2_mux.c
+index d347f4df51..54f53b7ea0 100644
+--- a/dpdk/drivers/net/dpaa2/dpaa2_mux.c
++++ b/dpdk/drivers/net/dpaa2/dpaa2_mux.c
+@@ -95,7 +95,7 @@ rte_pmd_dpaa2_mux_flow_create(uint32_t dpdmux_id,
+ 	mask_iova = (void *)((size_t)key_iova + DIST_PARAM_IOVA_SIZE);
+ 
+ 	/* Currently taking only IP protocol as an extract type.
+-	 * This can be exended to other fields using pattern->type.
++	 * This can be extended to other fields using pattern->type.
+ 	 */
+ 	memset(&kg_cfg, 0, sizeof(struct dpkg_profile_cfg));
+ 
+@@ -296,7 +296,7 @@ dpaa2_create_dpdmux_device(int vdev_fd __rte_unused,
+ 	}
+ 
+ 	ret = dpdmux_if_set_default(&dpdmux_dev->dpdmux, CMD_PRI_LOW,
+-				    dpdmux_dev->token, 1);
++				    dpdmux_dev->token, attr.default_if);
+ 	if (ret) {
+ 		DPAA2_PMD_ERR("setting default interface failed in %s",
+ 			      __func__);
+diff --git a/dpdk/drivers/net/dpaa2/dpaa2_ptp.c b/dpdk/drivers/net/dpaa2/dpaa2_ptp.c
+index 8d79e39244..3a4536dd69 100644
+--- a/dpdk/drivers/net/dpaa2/dpaa2_ptp.c
++++ b/dpdk/drivers/net/dpaa2/dpaa2_ptp.c
+@@ -111,10 +111,12 @@ int dpaa2_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
+ {
+ 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
+ 
+-	if (priv->next_tx_conf_queue)
+-		dpaa2_dev_tx_conf(priv->next_tx_conf_queue);
+-	else
++	if (priv->next_tx_conf_queue) {
++		while (!priv->tx_timestamp)
++			dpaa2_dev_tx_conf(priv->next_tx_conf_queue);
++	} else {
+ 		return -1;
++	}
+ 	*timestamp = rte_ns_to_timespec(priv->tx_timestamp);
+ 
+ 	return 0;
+diff --git a/dpdk/drivers/net/dpaa2/dpaa2_rxtx.c b/dpdk/drivers/net/dpaa2/dpaa2_rxtx.c
+index c65589a5f3..9fb6c5f91d 100644
+--- a/dpdk/drivers/net/dpaa2/dpaa2_rxtx.c
++++ b/dpdk/drivers/net/dpaa2/dpaa2_rxtx.c
+@@ -140,8 +140,10 @@ dpaa2_dev_rx_parse_slow(struct rte_mbuf *mbuf,
+ 			annotation->word3, annotation->word4);
+ 
+ #if defined(RTE_LIBRTE_IEEE1588)
+-	if (BIT_ISSET_AT_POS(annotation->word1, DPAA2_ETH_FAS_PTP))
++	if (BIT_ISSET_AT_POS(annotation->word1, DPAA2_ETH_FAS_PTP)) {
+ 		mbuf->ol_flags |= RTE_MBUF_F_RX_IEEE1588_PTP;
++		mbuf->ol_flags |= RTE_MBUF_F_RX_IEEE1588_TMST;
++	}
+ #endif
+ 
+ 	if (BIT_ISSET_AT_POS(annotation->word3, L2_VLAN_1_PRESENT)) {
+@@ -714,7 +716,7 @@ dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
+ 	rte_prefetch0((void *)(size_t)(dq_storage + 1));
+ 
+ 	/* Prepare next pull descriptor. This will give space for the
+-	 * prefething done on DQRR entries
++	 * prefetching done on DQRR entries
+ 	 */
+ 	q_storage->toggle ^= 1;
+ 	dq_storage1 = q_storage->dq_storage[q_storage->toggle];
+@@ -769,7 +771,10 @@ dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
+ 		else
+ 			bufs[num_rx] = eth_fd_to_mbuf(fd, eth_data->port_id);
+ #if defined(RTE_LIBRTE_IEEE1588)
+-		priv->rx_timestamp = *dpaa2_timestamp_dynfield(bufs[num_rx]);
++		if (bufs[num_rx]->ol_flags & PKT_RX_IEEE1588_TMST) {
++			priv->rx_timestamp =
++				*dpaa2_timestamp_dynfield(bufs[num_rx]);
++		}
+ #endif
+ 
+ 		if (eth_data->dev_conf.rxmode.offloads &
+@@ -986,6 +991,13 @@ dpaa2_dev_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
+ 				bufs[num_rx] = eth_fd_to_mbuf(fd,
+ 							eth_data->port_id);
+ 
++#if defined(RTE_LIBRTE_IEEE1588)
++		if (bufs[num_rx]->ol_flags & PKT_RX_IEEE1588_TMST) {
++			priv->rx_timestamp =
++				*dpaa2_timestamp_dynfield(bufs[num_rx]);
++		}
++#endif
++
+ 		if (eth_data->dev_conf.rxmode.offloads &
+ 				RTE_ETH_RX_OFFLOAD_VLAN_STRIP) {
+ 			rte_vlan_strip(bufs[num_rx]);
+@@ -1021,6 +1033,8 @@ uint16_t dpaa2_dev_tx_conf(void *queue)
+ 	struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
+ 	struct dpaa2_dev_priv *priv = eth_data->dev_private;
+ 	struct dpaa2_annot_hdr *annotation;
++	void *v_addr;
++	struct rte_mbuf *mbuf;
+ #endif
+ 
+ 	if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
+@@ -1105,10 +1119,16 @@ uint16_t dpaa2_dev_tx_conf(void *queue)
+ 			num_tx_conf++;
+ 			num_pulled++;
+ #if defined(RTE_LIBRTE_IEEE1588)
+-			annotation = (struct dpaa2_annot_hdr *)((size_t)
+-				DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)) +
+-				DPAA2_FD_PTA_SIZE);
+-			priv->tx_timestamp = annotation->word2;
++			v_addr = DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
++			mbuf = DPAA2_INLINE_MBUF_FROM_BUF(v_addr,
++				rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
++
++			if (mbuf->ol_flags & PKT_TX_IEEE1588_TMST) {
++				annotation = (struct dpaa2_annot_hdr *)((size_t)
++					DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)) +
++					DPAA2_FD_PTA_SIZE);
++				priv->tx_timestamp = annotation->word2;
++			}
+ #endif
+ 		} while (pending);
+ 
+@@ -1184,8 +1204,11 @@ dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
+ 	 * corresponding to last packet transmitted for reading
+ 	 * the timestamp
+ 	 */
+-	priv->next_tx_conf_queue = dpaa2_q->tx_conf_queue;
+-	dpaa2_dev_tx_conf(dpaa2_q->tx_conf_queue);
++	if ((*bufs)->ol_flags & PKT_TX_IEEE1588_TMST) {
++		priv->next_tx_conf_queue = dpaa2_q->tx_conf_queue;
++		dpaa2_dev_tx_conf(dpaa2_q->tx_conf_queue);
++		priv->tx_timestamp = 0;
++	}
+ #endif
+ 
+ 	/*Prepare enqueue descriptor*/
+@@ -1510,7 +1533,7 @@ dpaa2_dev_tx_ordered(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
+ 			if (*dpaa2_seqn(*bufs)) {
+ 				/* Use only queue 0 for Tx in case of atomic/
+ 				 * ordered packets as packets can get unordered
+-				 * when being tranmitted out from the interface
++				 * when being transmitted out from the interface
+ 				 */
+ 				dpaa2_set_enqueue_descriptor(order_sendq,
+ 							     (*bufs),
+@@ -1738,7 +1761,7 @@ dpaa2_dev_loopback_rx(void *queue,
+ 	rte_prefetch0((void *)(size_t)(dq_storage + 1));
+ 
+ 	/* Prepare next pull descriptor. This will give space for the
+-	 * prefething done on DQRR entries
++	 * prefetching done on DQRR entries
+ 	 */
+ 	q_storage->toggle ^= 1;
+ 	dq_storage1 = q_storage->dq_storage[q_storage->toggle];
+diff --git a/dpdk/drivers/net/dpaa2/mc/fsl_dpni.h b/dpdk/drivers/net/dpaa2/mc/fsl_dpni.h
+index 469ab9b3d4..3b9bffeed7 100644
+--- a/dpdk/drivers/net/dpaa2/mc/fsl_dpni.h
++++ b/dpdk/drivers/net/dpaa2/mc/fsl_dpni.h
+@@ -93,7 +93,7 @@ struct fsl_mc_io;
+  */
+ #define DPNI_OPT_OPR_PER_TC				0x000080
+ /**
+- * All Tx traffic classes will use a single sender (ignore num_queueus for tx)
++ * All Tx traffic classes will use a single sender (ignore num_queues for tx)
+  */
+ #define DPNI_OPT_SINGLE_SENDER			0x000100
+ /**
+@@ -617,7 +617,7 @@ int dpni_get_tx_data_offset(struct fsl_mc_io *mc_io,
+  * @page_3.ceetm_reject_bytes: Cumulative count of the number of bytes in all
+  *	frames whose enqueue was rejected
+  * @page_3.ceetm_reject_frames: Cumulative count of all frame enqueues rejected
+- * @page_4: congestion point drops for seleted TC
++ * @page_4: congestion point drops for selected TC
+  * @page_4.cgr_reject_frames: number of rejected frames due to congestion point
+  * @page_4.cgr_reject_bytes: number of rejected bytes due to congestion point
+  * @page_5: policer statistics per TC
+@@ -1417,7 +1417,7 @@ int dpni_get_tx_confirmation_mode(struct fsl_mc_io *mc_io,
+  *		dpkg_prepare_key_cfg()
+  * @discard_on_miss: Set to '1' to discard frames in case of no match (miss);
+  *		'0' to use the 'default_tc' in such cases
+- * @keep_entries: if set to one will not delele existing table entries. This
++ * @keep_entries: if set to one will not delete existing table entries. This
+  *		option will work properly only for dpni objects created with
+  *		DPNI_OPT_HAS_KEY_MASKING option. All previous QoS entries must
+  *		be compatible with new key composition rule.
+@@ -1516,7 +1516,7 @@ int dpni_clear_qos_table(struct fsl_mc_io *mc_io,
+  * @flow_id: Identifies the Rx queue used for matching traffic.  Supported
+  *     values are in range 0 to num_queue-1.
+  * @redirect_obj_token: token that identifies the object where frame is
+- * redirected when this rule is hit. This paraneter is used only when one of the
++ * redirected when this rule is hit. This parameter is used only when one of the
+  * flags DPNI_FS_OPT_REDIRECT_TO_DPNI_RX or DPNI_FS_OPT_REDIRECT_TO_DPNI_TX is
+  * set.
+  * The token is obtained using dpni_open() API call. The object must stay
+@@ -1797,7 +1797,7 @@ int dpni_load_sw_sequence(struct fsl_mc_io *mc_io,
+ 		  struct dpni_load_ss_cfg *cfg);
+ 
+ /**
+- * dpni_eanble_sw_sequence() - Enables a software sequence in the parser
++ * dpni_enable_sw_sequence() - Enables a software sequence in the parser
+  *				profile
+  * corresponding to the ingress or egress of the DPNI.
+  * @mc_io:	Pointer to MC portal's I/O object
+diff --git a/dpdk/drivers/net/e1000/e1000_ethdev.h b/dpdk/drivers/net/e1000/e1000_ethdev.h
+index a548ae2ccb..718a9746ed 100644
+--- a/dpdk/drivers/net/e1000/e1000_ethdev.h
++++ b/dpdk/drivers/net/e1000/e1000_ethdev.h
+@@ -103,7 +103,7 @@
+  * Maximum number of Ring Descriptors.
+  *
+  * Since RDLEN/TDLEN should be multiple of 128 bytes, the number of ring
+- * desscriptors should meet the following condition:
++ * descriptors should meet the following condition:
+  * (num_ring_desc * sizeof(struct e1000_rx/tx_desc)) % 128 == 0
+  */
+ #define	E1000_MIN_RING_DESC	32
+@@ -252,7 +252,7 @@ struct igb_rte_flow_rss_conf {
+ };
+ 
+ /*
+- * Structure to store filters'info.
++ * Structure to store filters' info.
+  */
+ struct e1000_filter_info {
+ 	uint8_t ethertype_mask; /* Bit mask for every used ethertype filter */
+diff --git a/dpdk/drivers/net/e1000/em_ethdev.c b/dpdk/drivers/net/e1000/em_ethdev.c
+index 31c4870086..794496abfc 100644
+--- a/dpdk/drivers/net/e1000/em_ethdev.c
++++ b/dpdk/drivers/net/e1000/em_ethdev.c
+@@ -1058,8 +1058,8 @@ eth_em_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
+ 
+ 	/*
+ 	 * Starting with 631xESB hw supports 2 TX/RX queues per port.
+-	 * Unfortunatelly, all these nics have just one TX context.
+-	 * So we have few choises for TX:
++	 * Unfortunately, all these nics have just one TX context.
++	 * So we have few choices for TX:
+ 	 * - Use just one TX queue.
+ 	 * - Allow cksum offload only for one TX queue.
+ 	 * - Don't allow TX cksum offload at all.
+@@ -1068,7 +1068,7 @@ eth_em_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
+ 	 * (Multiple Receive Queues are mutually exclusive with UDP
+ 	 * fragmentation and are not supported when a legacy receive
+ 	 * descriptor format is used).
+-	 * Which means separate RX routinies - as legacy nics (82540, 82545)
++	 * Which means separate RX routines - as legacy nics (82540, 82545)
+ 	 * don't support extended RXD.
+ 	 * To avoid it we support just one RX queue for now (no RSS).
+ 	 */
+@@ -1558,7 +1558,7 @@ eth_em_interrupt_get_status(struct rte_eth_dev *dev)
+ }
+ 
+ /*
+- * It executes link_update after knowing an interrupt is prsent.
++ * It executes link_update after knowing an interrupt is present.
+  *
+  * @param dev
+  *  Pointer to struct rte_eth_dev.
+@@ -1616,7 +1616,7 @@ eth_em_interrupt_action(struct rte_eth_dev *dev,
+  * @param handle
+  *  Pointer to interrupt handle.
+  * @param param
+- *  The address of parameter (struct rte_eth_dev *) regsitered before.
++ *  The address of parameter (struct rte_eth_dev *) registered before.
+  *
+  * @return
+  *  void
+diff --git a/dpdk/drivers/net/e1000/em_rxtx.c b/dpdk/drivers/net/e1000/em_rxtx.c
+index 39262502bb..cea5b490ba 100644
+--- a/dpdk/drivers/net/e1000/em_rxtx.c
++++ b/dpdk/drivers/net/e1000/em_rxtx.c
+@@ -141,7 +141,7 @@ union em_vlan_macip {
+ struct em_ctx_info {
+ 	uint64_t flags;              /**< ol_flags related to context build. */
+ 	uint32_t cmp_mask;           /**< compare mask */
+-	union em_vlan_macip hdrlen;  /**< L2 and L3 header lenghts */
++	union em_vlan_macip hdrlen;  /**< L2 and L3 header lengths */
+ };
+ 
+ /**
+@@ -829,7 +829,7 @@ eth_em_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+ 	 * register.
+ 	 * Update the RDT with the value of the last processed RX descriptor
+ 	 * minus 1, to guarantee that the RDT register is never equal to the
+-	 * RDH register, which creates a "full" ring situtation from the
++	 * RDH register, which creates a "full" ring situation from the
+ 	 * hardware point of view...
+ 	 */
+ 	nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
+@@ -1074,7 +1074,7 @@ eth_em_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+ 	 * register.
+ 	 * Update the RDT with the value of the last processed RX descriptor
+ 	 * minus 1, to guarantee that the RDT register is never equal to the
+-	 * RDH register, which creates a "full" ring situtation from the
++	 * RDH register, which creates a "full" ring situation from the
+ 	 * hardware point of view...
+ 	 */
+ 	nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
+diff --git a/dpdk/drivers/net/e1000/igb_ethdev.c b/dpdk/drivers/net/e1000/igb_ethdev.c
+index 3ee16c15fe..a9c18b27e8 100644
+--- a/dpdk/drivers/net/e1000/igb_ethdev.c
++++ b/dpdk/drivers/net/e1000/igb_ethdev.c
+@@ -1149,7 +1149,7 @@ eth_igb_configure(struct rte_eth_dev *dev)
+ 	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+ 		dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
+ 
+-	/* multipe queue mode checking */
++	/* multiple queue mode checking */
+ 	ret  = igb_check_mq_mode(dev);
+ 	if (ret != 0) {
+ 		PMD_DRV_LOG(ERR, "igb_check_mq_mode fails with %d.",
+@@ -1265,7 +1265,7 @@ eth_igb_start(struct rte_eth_dev *dev)
+ 		}
+ 	}
+ 
+-	/* confiugre msix for rx interrupt */
++	/* configure MSI-X for Rx interrupt */
+ 	eth_igb_configure_msix_intr(dev);
+ 
+ 	/* Configure for OS presence */
+@@ -2819,7 +2819,7 @@ eth_igb_interrupt_get_status(struct rte_eth_dev *dev)
+ }
+ 
+ /*
+- * It executes link_update after knowing an interrupt is prsent.
++ * It executes link_update after knowing an interrupt is present.
+  *
+  * @param dev
+  *  Pointer to struct rte_eth_dev.
+@@ -2889,7 +2889,7 @@ eth_igb_interrupt_action(struct rte_eth_dev *dev,
+  * @param handle
+  *  Pointer to interrupt handle.
+  * @param param
+- *  The address of parameter (struct rte_eth_dev *) regsitered before.
++ *  The address of parameter (struct rte_eth_dev *) registered before.
+  *
+  * @return
+  *  void
+@@ -3787,7 +3787,7 @@ igb_inject_2uple_filter(struct rte_eth_dev *dev,
+  *
+  * @param
+  * dev: Pointer to struct rte_eth_dev.
+- * ntuple_filter: ponter to the filter that will be added.
++ * ntuple_filter: pointer to the filter that will be added.
+  *
+  * @return
+  *    - On success, zero.
+@@ -3868,7 +3868,7 @@ igb_delete_2tuple_filter(struct rte_eth_dev *dev,
+  *
+  * @param
+  * dev: Pointer to struct rte_eth_dev.
+- * ntuple_filter: ponter to the filter that will be removed.
++ * ntuple_filter: pointer to the filter that will be removed.
+  *
+  * @return
+  *    - On success, zero.
+@@ -4226,7 +4226,7 @@ igb_inject_5tuple_filter_82576(struct rte_eth_dev *dev,
+  *
+  * @param
+  * dev: Pointer to struct rte_eth_dev.
+- * ntuple_filter: ponter to the filter that will be added.
++ * ntuple_filter: pointer to the filter that will be added.
+  *
+  * @return
+  *    - On success, zero.
+@@ -4313,7 +4313,7 @@ igb_delete_5tuple_filter_82576(struct rte_eth_dev *dev,
+  *
+  * @param
+  * dev: Pointer to struct rte_eth_dev.
+- * ntuple_filter: ponter to the filter that will be removed.
++ * ntuple_filter: pointer to the filter that will be removed.
+  *
+  * @return
+  *    - On success, zero.
+@@ -4831,7 +4831,7 @@ igb_timesync_disable(struct rte_eth_dev *dev)
+ 	/* Disable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
+ 	E1000_WRITE_REG(hw, E1000_ETQF(E1000_ETQF_FILTER_1588), 0);
+ 
+-	/* Stop incrementating the System Time registers. */
++	/* Stop incrementing the System Time registers. */
+ 	E1000_WRITE_REG(hw, E1000_TIMINCA, 0);
+ 
+ 	return 0;
+diff --git a/dpdk/drivers/net/e1000/igb_flow.c b/dpdk/drivers/net/e1000/igb_flow.c
+index e72376f69c..e46697b6a1 100644
+--- a/dpdk/drivers/net/e1000/igb_flow.c
++++ b/dpdk/drivers/net/e1000/igb_flow.c
+@@ -57,7 +57,7 @@ struct igb_flex_filter_list igb_filter_flex_list;
+ struct igb_rss_filter_list igb_filter_rss_list;
+ 
+ /**
+- * Please aware there's an asumption for all the parsers.
++ * Please be aware there's an assumption for all the parsers.
+  * rte_flow_item is using big endian, rte_flow_attr and
+  * rte_flow_action are using CPU order.
+  * Because the pattern is used to describe the packets,
+@@ -1608,7 +1608,7 @@ igb_flow_create(struct rte_eth_dev *dev,
+ 
+ /**
+  * Check if the flow rule is supported by igb.
+- * It only checkes the format. Don't guarantee the rule can be programmed into
++ * It only checks the format. Don't guarantee the rule can be programmed into
+  * the HW. Because there can be no enough room for the rule.
+  */
+ static int
+diff --git a/dpdk/drivers/net/e1000/igb_pf.c b/dpdk/drivers/net/e1000/igb_pf.c
+index fe355ef6b3..3f3fd0d61e 100644
+--- a/dpdk/drivers/net/e1000/igb_pf.c
++++ b/dpdk/drivers/net/e1000/igb_pf.c
+@@ -155,7 +155,7 @@ int igb_pf_host_configure(struct rte_eth_dev *eth_dev)
+ 	else
+ 		E1000_WRITE_REG(hw, E1000_DTXSWC, E1000_DTXSWC_VMDQ_LOOPBACK_EN);
+ 
+-	/* clear VMDq map to perment rar 0 */
++	/* clear VMDq map to permanent rar 0 */
+ 	rah = E1000_READ_REG(hw, E1000_RAH(0));
+ 	rah &= ~ (0xFF << E1000_RAH_POOLSEL_SHIFT);
+ 	E1000_WRITE_REG(hw, E1000_RAH(0), rah);
+diff --git a/dpdk/drivers/net/e1000/igb_rxtx.c b/dpdk/drivers/net/e1000/igb_rxtx.c
+index 4a311a7b18..f32dee46df 100644
+--- a/dpdk/drivers/net/e1000/igb_rxtx.c
++++ b/dpdk/drivers/net/e1000/igb_rxtx.c
+@@ -150,7 +150,7 @@ union igb_tx_offload {
+ 	(TX_MACIP_LEN_CMP_MASK | TX_TCP_LEN_CMP_MASK | TX_TSO_MSS_CMP_MASK)
+ 
+ /**
+- * Strucutre to check if new context need be built
++ * Structure to check if new context need be built
+  */
+ struct igb_advctx_info {
+ 	uint64_t flags;           /**< ol_flags related to context build. */
+@@ -967,7 +967,7 @@ eth_igb_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+ 	 * register.
+ 	 * Update the RDT with the value of the last processed RX descriptor
+ 	 * minus 1, to guarantee that the RDT register is never equal to the
+-	 * RDH register, which creates a "full" ring situtation from the
++	 * RDH register, which creates a "full" ring situation from the
+ 	 * hardware point of view...
+ 	 */
+ 	nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
+@@ -1229,7 +1229,7 @@ eth_igb_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+ 	 * register.
+ 	 * Update the RDT with the value of the last processed RX descriptor
+ 	 * minus 1, to guarantee that the RDT register is never equal to the
+-	 * RDH register, which creates a "full" ring situtation from the
++	 * RDH register, which creates a "full" ring situation from the
+ 	 * hardware point of view...
+ 	 */
+ 	nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
+@@ -1252,7 +1252,7 @@ eth_igb_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+  * Maximum number of Ring Descriptors.
+  *
+  * Since RDLEN/TDLEN should be multiple of 128bytes, the number of ring
+- * desscriptors should meet the following condition:
++ * descriptors should meet the following condition:
+  *      (num_ring_desc * sizeof(struct e1000_rx/tx_desc)) % 128 == 0
+  */
+ 
+@@ -1350,7 +1350,7 @@ igb_tx_done_cleanup(struct igb_tx_queue *txq, uint32_t free_cnt)
+ 						sw_ring[tx_id].last_id = tx_id;
+ 					}
+ 
+-					/* Move to next segemnt. */
++					/* Move to next segment. */
+ 					tx_id = sw_ring[tx_id].next_id;
+ 
+ 				} while (tx_id != tx_next);
+@@ -1383,7 +1383,7 @@ igb_tx_done_cleanup(struct igb_tx_queue *txq, uint32_t free_cnt)
+ 
+ 			/* Walk the list and find the next mbuf, if any. */
+ 			do {
+-				/* Move to next segemnt. */
++				/* Move to next segment. */
+ 				tx_id = sw_ring[tx_id].next_id;
+ 
+ 				if (sw_ring[tx_id].mbuf)
+@@ -2146,7 +2146,7 @@ igb_vmdq_rx_hw_configure(struct rte_eth_dev *dev)
+ 
+ 	igb_rss_disable(dev);
+ 
+-	/* RCTL: eanble VLAN filter */
++	/* RCTL: enable VLAN filter */
+ 	rctl = E1000_READ_REG(hw, E1000_RCTL);
+ 	rctl |= E1000_RCTL_VFE;
+ 	E1000_WRITE_REG(hw, E1000_RCTL, rctl);
+diff --git a/dpdk/drivers/net/ena/ena_ethdev.c b/dpdk/drivers/net/ena/ena_ethdev.c
+index 634c97acf6..770b101688 100644
+--- a/dpdk/drivers/net/ena/ena_ethdev.c
++++ b/dpdk/drivers/net/ena/ena_ethdev.c
+@@ -38,11 +38,6 @@
+ 
+ #define ENA_PTYPE_HAS_HASH	(RTE_PTYPE_L4_TCP | RTE_PTYPE_L4_UDP)
+ 
+-enum ethtool_stringset {
+-	ETH_SS_TEST             = 0,
+-	ETH_SS_STATS,
+-};
+-
+ struct ena_stats {
+ 	char name[ETH_GSTRING_LEN];
+ 	int stat_offset;
+@@ -280,6 +275,15 @@ static const struct eth_dev_ops ena_dev_ops = {
+ 	.rss_hash_conf_get    = ena_rss_hash_conf_get,
+ };
+ 
++static inline void ena_trigger_reset(struct ena_adapter *adapter,
++				     enum ena_regs_reset_reason_types reason)
++{
++	if (likely(!adapter->trigger_reset)) {
++		adapter->reset_reason = reason;
++		adapter->trigger_reset = true;
++	}
++}
++
+ static inline void ena_rx_mbuf_prepare(struct rte_mbuf *mbuf,
+ 				       struct ena_com_rx_ctx *ena_rx_ctx,
+ 				       bool fill_hash)
+@@ -306,7 +310,13 @@ static inline void ena_rx_mbuf_prepare(struct rte_mbuf *mbuf,
+ 		ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_UNKNOWN;
+ 	else
+ 		if (unlikely(ena_rx_ctx->l4_csum_err))
+-			ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
++			/*
++			 * For the L4 Rx checksum offload the HW may indicate
++			 * bad checksum although it's valid. Because of that,
++			 * we're setting the UNKNOWN flag to let the app
++			 * re-verify the checksum.
++			 */
++			ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_UNKNOWN;
+ 		else
+ 			ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
+ 
+@@ -344,6 +354,8 @@ static inline void ena_tx_mbuf_prepare(struct rte_mbuf *mbuf,
+ 
+ 		if (mbuf->ol_flags & RTE_MBUF_F_TX_IPV6) {
+ 			ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV6;
++			/* For the IPv6 packets, DF always needs to be true. */
++			ena_tx_ctx->df = 1;
+ 		} else {
+ 			ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV4;
+ 
+@@ -351,7 +363,7 @@ static inline void ena_tx_mbuf_prepare(struct rte_mbuf *mbuf,
+ 			if (mbuf->packet_type &
+ 				(RTE_PTYPE_L4_NONFRAG
+ 				 | RTE_PTYPE_INNER_L4_NONFRAG))
+-				ena_tx_ctx->df = true;
++				ena_tx_ctx->df = 1;
+ 		}
+ 
+ 		/* check if L4 checksum is needed */
+@@ -399,8 +411,7 @@ static int validate_tx_req_id(struct ena_ring *tx_ring, u16 req_id)
+ 
+ 	/* Trigger device reset */
+ 	++tx_ring->tx_stats.bad_req_id;
+-	tx_ring->adapter->reset_reason = ENA_REGS_RESET_INV_TX_REQ_ID;
+-	tx_ring->adapter->trigger_reset	= true;
++	ena_trigger_reset(tx_ring->adapter, ENA_REGS_RESET_INV_TX_REQ_ID);
+ 	return -EFAULT;
+ }
+ 
+@@ -1408,7 +1419,7 @@ static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count)
+ 		++rxq->rx_stats.refill_partial;
+ 	}
+ 
+-	/* When we submitted free recources to device... */
++	/* When we submitted free resources to device... */
+ 	if (likely(i > 0)) {
+ 		/* ...let HW know that it can fill buffers with data. */
+ 		ena_com_write_sq_doorbell(rxq->ena_com_io_sq);
+@@ -1529,8 +1540,7 @@ static void check_for_missing_keep_alive(struct ena_adapter *adapter)
+ 	if (unlikely((rte_get_timer_cycles() - adapter->timestamp_wd) >=
+ 	    adapter->keep_alive_timeout)) {
+ 		PMD_DRV_LOG(ERR, "Keep alive timeout\n");
+-		adapter->reset_reason = ENA_REGS_RESET_KEEP_ALIVE_TO;
+-		adapter->trigger_reset = true;
++		ena_trigger_reset(adapter, ENA_REGS_RESET_KEEP_ALIVE_TO);
+ 		++adapter->dev_stats.wd_expired;
+ 	}
+ }
+@@ -1540,8 +1550,7 @@ static void check_for_admin_com_state(struct ena_adapter *adapter)
+ {
+ 	if (unlikely(!ena_com_get_admin_running_state(&adapter->ena_dev))) {
+ 		PMD_DRV_LOG(ERR, "ENA admin queue is not in running state\n");
+-		adapter->reset_reason = ENA_REGS_RESET_ADMIN_TO;
+-		adapter->trigger_reset = true;
++		ena_trigger_reset(adapter, ENA_REGS_RESET_ADMIN_TO);
+ 	}
+ }
+ 
+@@ -1632,6 +1641,9 @@ static void ena_timer_wd_callback(__rte_unused struct rte_timer *timer,
+ 	struct rte_eth_dev *dev = arg;
+ 	struct ena_adapter *adapter = dev->data->dev_private;
+ 
++	if (unlikely(adapter->trigger_reset))
++		return;
++
+ 	check_for_missing_keep_alive(adapter);
+ 	check_for_admin_com_state(adapter);
+ 	check_for_tx_completions(adapter);
+@@ -1682,6 +1694,13 @@ ena_set_queues_placement_policy(struct ena_adapter *adapter,
+ 		return 0;
+ 	}
+ 
++	if (adapter->dev_mem_base == NULL) {
++		PMD_DRV_LOG(ERR,
++			"LLQ is advertised as supported, but device doesn't expose mem bar\n");
++		ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
++		return 0;
++	}
++
+ 	rc = ena_com_config_dev_mode(ena_dev, llq, llq_default_configurations);
+ 	if (unlikely(rc)) {
+ 		PMD_INIT_LOG(WARNING,
+@@ -1694,13 +1713,6 @@ ena_set_queues_placement_policy(struct ena_adapter *adapter,
+ 	if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
+ 		return 0;
+ 
+-	if (!adapter->dev_mem_base) {
+-		PMD_DRV_LOG(ERR,
+-			"Unable to access LLQ BAR resource. Fallback to host mode policy.\n");
+-		ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
+-		return 0;
+-	}
+-
+ 	ena_dev->mem_bar = adapter->dev_mem_base;
+ 
+ 	return 0;
+@@ -2028,9 +2040,6 @@ static int ena_dev_configure(struct rte_eth_dev *dev)
+ 	 */
+ 	adapter->tx_cleanup_stall_delay = adapter->missing_tx_completion_to / 2;
+ 
+-	adapter->tx_selected_offloads = dev->data->dev_conf.txmode.offloads;
+-	adapter->rx_selected_offloads = dev->data->dev_conf.rxmode.offloads;
+-
+ 	return 0;
+ }
+ 
+@@ -2325,14 +2334,13 @@ static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+ 				rc);
+ 			if (rc == ENA_COM_NO_SPACE) {
+ 				++rx_ring->rx_stats.bad_desc_num;
+-				rx_ring->adapter->reset_reason =
+-					ENA_REGS_RESET_TOO_MANY_RX_DESCS;
++				ena_trigger_reset(rx_ring->adapter,
++					ENA_REGS_RESET_TOO_MANY_RX_DESCS);
+ 			} else {
+ 				++rx_ring->rx_stats.bad_req_id;
+-				rx_ring->adapter->reset_reason =
+-					ENA_REGS_RESET_INV_RX_REQ_ID;
++				ena_trigger_reset(rx_ring->adapter,
++					ENA_REGS_RESET_INV_RX_REQ_ID);
+ 			}
+-			rx_ring->adapter->trigger_reset = true;
+ 			return 0;
+ 		}
+ 
+@@ -2732,9 +2740,8 @@ static int ena_xmit_mbuf(struct ena_ring *tx_ring, struct rte_mbuf *mbuf)
+ 	if (unlikely(rc)) {
+ 		PMD_DRV_LOG(ERR, "Failed to prepare Tx buffers, rc: %d\n", rc);
+ 		++tx_ring->tx_stats.prepare_ctx_err;
+-		tx_ring->adapter->reset_reason =
+-		    ENA_REGS_RESET_DRIVER_INVALID_STATE;
+-		tx_ring->adapter->trigger_reset = true;
++		ena_trigger_reset(tx_ring->adapter,
++			ENA_REGS_RESET_DRIVER_INVALID_STATE);
+ 		return rc;
+ 	}
+ 
+diff --git a/dpdk/drivers/net/ena/ena_ethdev.h b/dpdk/drivers/net/ena/ena_ethdev.h
+index 865e1241e0..0632f793d0 100644
+--- a/dpdk/drivers/net/ena/ena_ethdev.h
++++ b/dpdk/drivers/net/ena/ena_ethdev.h
+@@ -42,7 +42,7 @@
+ 
+ /* While processing submitted and completed descriptors (rx and tx path
+  * respectively) in a loop it is desired to:
+- *  - perform batch submissions while populating sumbissmion queue
++ *  - perform batch submissions while populating submission queue
+  *  - avoid blocking transmission of other packets during cleanup phase
+  * Hence the utilization ratio of 1/8 of a queue size or max value if the size
+  * of the ring is very big - like 8k Rx rings.
+@@ -280,11 +280,6 @@ struct ena_adapter {
+ 	struct ena_driver_stats *drv_stats;
+ 	enum ena_adapter_state state;
+ 
+-	uint64_t tx_supported_offloads;
+-	uint64_t tx_selected_offloads;
+-	uint64_t rx_supported_offloads;
+-	uint64_t rx_selected_offloads;
+-
+ 	bool link_status;
+ 
+ 	enum ena_regs_reset_reason_types reset_reason;
+diff --git a/dpdk/drivers/net/ena/ena_rss.c b/dpdk/drivers/net/ena/ena_rss.c
+index be4007e3f3..8193eaf6fc 100644
+--- a/dpdk/drivers/net/ena/ena_rss.c
++++ b/dpdk/drivers/net/ena/ena_rss.c
+@@ -51,15 +51,14 @@ void ena_rss_key_fill(void *key, size_t size)
+ 	static uint8_t default_key[ENA_HASH_KEY_SIZE];
+ 	size_t i;
+ 
+-	RTE_ASSERT(size <= ENA_HASH_KEY_SIZE);
+-
+ 	if (!key_generated) {
+-		for (i = 0; i < ENA_HASH_KEY_SIZE; ++i)
++		for (i = 0; i < RTE_DIM(default_key); ++i)
+ 			default_key[i] = rte_rand() & 0xff;
+ 		key_generated = true;
+ 	}
+ 
+-	rte_memcpy(key, default_key, size);
++	RTE_ASSERT(size <= sizeof(default_key));
++	rte_memcpy(key, default_key, RTE_MIN(size, sizeof(default_key)));
+ }
+ 
+ int ena_rss_reta_update(struct rte_eth_dev *dev,
+diff --git a/dpdk/drivers/net/enetfec/enet_ethdev.c b/dpdk/drivers/net/enetfec/enet_ethdev.c
+index 714f8ac7ec..c938e58204 100644
+--- a/dpdk/drivers/net/enetfec/enet_ethdev.c
++++ b/dpdk/drivers/net/enetfec/enet_ethdev.c
+@@ -2,9 +2,12 @@
+  * Copyright 2020-2021 NXP
+  */
+ 
++#include <inttypes.h>
++
+ #include <ethdev_vdev.h>
+ #include <ethdev_driver.h>
+ #include <rte_io.h>
++
+ #include "enet_pmd_logs.h"
+ #include "enet_ethdev.h"
+ #include "enet_regs.h"
+@@ -454,6 +457,12 @@ enetfec_rx_queue_setup(struct rte_eth_dev *dev,
+ 		return -EINVAL;
+ 	}
+ 
++	if (queue_idx >= ENETFEC_MAX_Q) {
++		ENETFEC_PMD_ERR("Invalid queue id %" PRIu16 ", max %d\n",
++			queue_idx, ENETFEC_MAX_Q);
++		return -EINVAL;
++	}
++
+ 	/* allocate receive queue */
+ 	rxq = rte_zmalloc(NULL, sizeof(*rxq), RTE_CACHE_LINE_SIZE);
+ 	if (rxq == NULL) {
+diff --git a/dpdk/drivers/net/enetfec/enet_regs.h b/dpdk/drivers/net/enetfec/enet_regs.h
+index a300c6f8bc..c9400957f8 100644
+--- a/dpdk/drivers/net/enetfec/enet_regs.h
++++ b/dpdk/drivers/net/enetfec/enet_regs.h
+@@ -12,7 +12,7 @@
+ #define RX_BD_CR	((ushort)0x0004) /* CRC or Frame error */
+ #define RX_BD_SH	((ushort)0x0008) /* Reserved */
+ #define RX_BD_NO	((ushort)0x0010) /* Rcvd non-octet aligned frame */
+-#define RX_BD_LG	((ushort)0x0020) /* Rcvd frame length voilation */
++#define RX_BD_LG	((ushort)0x0020) /* Rcvd frame length violation */
+ #define RX_BD_FIRST	((ushort)0x0400) /* Reserved */
+ #define RX_BD_LAST	((ushort)0x0800) /* last buffer in the frame */
+ #define RX_BD_INT	0x00800000
+diff --git a/dpdk/drivers/net/enic/enic_flow.c b/dpdk/drivers/net/enic/enic_flow.c
+index 33147169ba..cf51793cfe 100644
+--- a/dpdk/drivers/net/enic/enic_flow.c
++++ b/dpdk/drivers/net/enic/enic_flow.c
+@@ -405,7 +405,7 @@ enic_copy_item_ipv4_v1(struct copy_item_args *arg)
+ 		return ENOTSUP;
+ 	}
+ 
+-	/* check that the suppied mask exactly matches capabilty */
++	/* check that the supplied mask exactly matches capability */
+ 	if (!mask_exact_match((const uint8_t *)&supported_mask,
+ 			      (const uint8_t *)item->mask, sizeof(*mask))) {
+ 		ENICPMD_LOG(ERR, "IPv4 exact match mask");
+@@ -443,7 +443,7 @@ enic_copy_item_udp_v1(struct copy_item_args *arg)
+ 		return ENOTSUP;
+ 	}
+ 
+-	/* check that the suppied mask exactly matches capabilty */
++	/* check that the supplied mask exactly matches capability */
+ 	if (!mask_exact_match((const uint8_t *)&supported_mask,
+ 			      (const uint8_t *)item->mask, sizeof(*mask))) {
+ 		ENICPMD_LOG(ERR, "UDP exact match mask");
+@@ -482,7 +482,7 @@ enic_copy_item_tcp_v1(struct copy_item_args *arg)
+ 		return ENOTSUP;
+ 	}
+ 
+-	/* check that the suppied mask exactly matches capabilty */
++	/* check that the supplied mask exactly matches capability */
+ 	if (!mask_exact_match((const uint8_t *)&supported_mask,
+ 			     (const uint8_t *)item->mask, sizeof(*mask))) {
+ 		ENICPMD_LOG(ERR, "TCP exact match mask");
+@@ -1044,14 +1044,14 @@ fixup_l5_layer(struct enic *enic, struct filter_generic_1 *gp,
+ }
+ 
+ /**
+- * Build the intenal enic filter structure from the provided pattern. The
++ * Build the internal enic filter structure from the provided pattern. The
+  * pattern is validated as the items are copied.
+  *
+  * @param pattern[in]
+  * @param items_info[in]
+  *   Info about this NICs item support, like valid previous items.
+  * @param enic_filter[out]
+- *   NIC specfilc filters derived from the pattern.
++ *   NIC specific filters derived from the pattern.
+  * @param error[out]
+  */
+ static int
+@@ -1123,12 +1123,12 @@ enic_copy_filter(const struct rte_flow_item pattern[],
+ }
+ 
+ /**
+- * Build the intenal version 1 NIC action structure from the provided pattern.
++ * Build the internal version 1 NIC action structure from the provided pattern.
+  * The pattern is validated as the items are copied.
+  *
+  * @param actions[in]
+  * @param enic_action[out]
+- *   NIC specfilc actions derived from the actions.
++ *   NIC specific actions derived from the actions.
+  * @param error[out]
+  */
+ static int
+@@ -1170,12 +1170,12 @@ enic_copy_action_v1(__rte_unused struct enic *enic,
+ }
+ 
+ /**
+- * Build the intenal version 2 NIC action structure from the provided pattern.
++ * Build the internal version 2 NIC action structure from the provided pattern.
+  * The pattern is validated as the items are copied.
+  *
+  * @param actions[in]
+  * @param enic_action[out]
+- *   NIC specfilc actions derived from the actions.
++ *   NIC specific actions derived from the actions.
+  * @param error[out]
+  */
+ static int
+diff --git a/dpdk/drivers/net/enic/enic_fm_flow.c b/dpdk/drivers/net/enic/enic_fm_flow.c
+index ae43f36bc0..ab73cd8530 100644
+--- a/dpdk/drivers/net/enic/enic_fm_flow.c
++++ b/dpdk/drivers/net/enic/enic_fm_flow.c
+@@ -721,7 +721,7 @@ enic_fm_copy_item_gtp(struct copy_item_args *arg)
+ 	}
+ 
+ 	/* NIC does not support GTP tunnels. No Items are allowed after this.
+-	 * This prevents the specificaiton of further items.
++	 * This prevents the specification of further items.
+ 	 */
+ 	arg->header_level = 0;
+ 
+@@ -733,7 +733,7 @@ enic_fm_copy_item_gtp(struct copy_item_args *arg)
+ 
+ 	/*
+ 	 * Use the raw L4 buffer to match GTP as fm_header_set does not have
+-	 * GTP header. UDP dst port must be specifiec. Using the raw buffer
++	 * GTP header. UDP dst port must be specific. Using the raw buffer
+ 	 * does not affect such UDP item, since we skip UDP in the raw buffer.
+ 	 */
+ 	fm_data->fk_header_select |= FKH_L4RAW;
+@@ -1846,7 +1846,7 @@ enic_fm_dump_tcam_actions(const struct fm_action *fm_action)
+ 	/* Remove trailing comma */
+ 	if (buf[0])
+ 		*(bp - 1) = '\0';
+-	ENICPMD_LOG(DEBUG, "       Acions: %s", buf);
++	ENICPMD_LOG(DEBUG, "       Actions: %s", buf);
+ }
+ 
+ static int
+@@ -2364,7 +2364,7 @@ enic_action_handle_get(struct enic_flowman *fm, struct fm_action *action_in,
+ 	if (ret < 0 && ret != -ENOENT)
+ 		return rte_flow_error_set(error, -ret,
+ 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+-				   NULL, "enic: rte_hash_lookup(aciton)");
++				   NULL, "enic: rte_hash_lookup(action)");
+ 
+ 	if (ret == -ENOENT) {
+ 		/* Allocate a new action on the NIC. */
+@@ -2372,11 +2372,11 @@ enic_action_handle_get(struct enic_flowman *fm, struct fm_action *action_in,
+ 		memcpy(fma, action_in, sizeof(*fma));
+ 
+ 		ah = calloc(1, sizeof(*ah));
+-		memcpy(&ah->key, action_in, sizeof(struct fm_action));
+ 		if (ah == NULL)
+ 			return rte_flow_error_set(error, ENOMEM,
+ 					   RTE_FLOW_ERROR_TYPE_HANDLE,
+ 					   NULL, "enic: calloc(fm-action)");
++		memcpy(&ah->key, action_in, sizeof(struct fm_action));
+ 		args[0] = FM_ACTION_ALLOC;
+ 		args[1] = fm->cmd.pa;
+ 		ret = flowman_cmd(fm, args, 2);
+@@ -2435,7 +2435,7 @@ __enic_fm_flow_add_entry(struct enic_flowman *fm,
+ 
+ 	ENICPMD_FUNC_TRACE();
+ 
+-	/* Get or create an aciton handle. */
++	/* Get or create an action handle. */
+ 	ret = enic_action_handle_get(fm, action_in, error, &ah);
+ 	if (ret)
+ 		return ret;
+diff --git a/dpdk/drivers/net/enic/enic_main.c b/dpdk/drivers/net/enic/enic_main.c
+index 7f84b5f935..97d97ea793 100644
+--- a/dpdk/drivers/net/enic/enic_main.c
++++ b/dpdk/drivers/net/enic/enic_main.c
+@@ -1137,7 +1137,7 @@ int enic_disable(struct enic *enic)
+ 	}
+ 
+ 	/* If we were using interrupts, set the interrupt vector to -1
+-	 * to disable interrupts.  We are not disabling link notifcations,
++	 * to disable interrupts.  We are not disabling link notifications,
+ 	 * though, as we want the polling of link status to continue working.
+ 	 */
+ 	if (enic->rte_dev->data->dev_conf.intr_conf.lsc)
+diff --git a/dpdk/drivers/net/enic/enic_rxtx.c b/dpdk/drivers/net/enic/enic_rxtx.c
+index c44715bfd0..33e96b480e 100644
+--- a/dpdk/drivers/net/enic/enic_rxtx.c
++++ b/dpdk/drivers/net/enic/enic_rxtx.c
+@@ -653,7 +653,7 @@ static void enqueue_simple_pkts(struct rte_mbuf **pkts,
+ 		 * The app should not send oversized
+ 		 * packets. tx_pkt_prepare includes a check as
+ 		 * well. But some apps ignore the device max size and
+-		 * tx_pkt_prepare. Oversized packets cause WQ errrors
++		 * tx_pkt_prepare. Oversized packets cause WQ errors
+ 		 * and the NIC ends up disabling the whole WQ. So
+ 		 * truncate packets..
+ 		 */
+diff --git a/dpdk/drivers/net/failsafe/failsafe.c b/dpdk/drivers/net/failsafe/failsafe.c
+index 3c754a5f66..05cf533896 100644
+--- a/dpdk/drivers/net/failsafe/failsafe.c
++++ b/dpdk/drivers/net/failsafe/failsafe.c
+@@ -308,8 +308,8 @@ fs_rte_eth_free(const char *name)
+ 	if (dev == NULL)
+ 		return 0; /* port already released */
+ 	ret = failsafe_eth_dev_close(dev);
+-	rte_eth_dev_release_port(dev);
+ 	rte_intr_instance_free(PRIV(dev)->intr_handle);
++	rte_eth_dev_release_port(dev);
+ 	return ret;
+ }
+ 
+diff --git a/dpdk/drivers/net/fm10k/fm10k.h b/dpdk/drivers/net/fm10k/fm10k.h
+index 7cfa29faa8..17a7056c45 100644
+--- a/dpdk/drivers/net/fm10k/fm10k.h
++++ b/dpdk/drivers/net/fm10k/fm10k.h
+@@ -44,7 +44,7 @@
+ #define FM10K_TX_MAX_MTU_SEG UINT8_MAX
+ 
+ /*
+- * byte aligment for HW RX data buffer
++ * byte alignment for HW RX data buffer
+  * Datasheet requires RX buffer addresses shall either be 512-byte aligned or
+  * be 8-byte aligned but without crossing host memory pages (4KB alignment
+  * boundaries). Satisfy first option.
+diff --git a/dpdk/drivers/net/fm10k/fm10k_ethdev.c b/dpdk/drivers/net/fm10k/fm10k_ethdev.c
+index 43e1d13431..8bbd8b445d 100644
+--- a/dpdk/drivers/net/fm10k/fm10k_ethdev.c
++++ b/dpdk/drivers/net/fm10k/fm10k_ethdev.c
+@@ -290,7 +290,7 @@ rx_queue_free(struct fm10k_rx_queue *q)
+ }
+ 
+ /*
+- * disable RX queue, wait unitl HW finished necessary flush operation
++ * disable RX queue, wait until HW finished necessary flush operation
+  */
+ static inline int
+ rx_queue_disable(struct fm10k_hw *hw, uint16_t qnum)
+@@ -379,7 +379,7 @@ tx_queue_free(struct fm10k_tx_queue *q)
+ }
+ 
+ /*
+- * disable TX queue, wait unitl HW finished necessary flush operation
++ * disable TX queue, wait until HW finished necessary flush operation
+  */
+ static inline int
+ tx_queue_disable(struct fm10k_hw *hw, uint16_t qnum)
+@@ -453,7 +453,7 @@ fm10k_dev_configure(struct rte_eth_dev *dev)
+ 	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+ 		dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
+ 
+-	/* multipe queue mode checking */
++	/* multiple queue mode checking */
+ 	ret  = fm10k_check_mq_mode(dev);
+ 	if (ret != 0) {
+ 		PMD_DRV_LOG(ERR, "fm10k_check_mq_mode fails with %d.",
+@@ -2553,7 +2553,7 @@ fm10k_dev_handle_fault(struct fm10k_hw *hw, uint32_t eicr)
+  * @param handle
+  *  Pointer to interrupt handle.
+  * @param param
+- *  The address of parameter (struct rte_eth_dev *) regsitered before.
++ *  The address of parameter (struct rte_eth_dev *) registered before.
+  *
+  * @return
+  *  void
+@@ -2676,7 +2676,7 @@ fm10k_dev_interrupt_handler_pf(void *param)
+  * @param handle
+  *  Pointer to interrupt handle.
+  * @param param
+- *  The address of parameter (struct rte_eth_dev *) regsitered before.
++ *  The address of parameter (struct rte_eth_dev *) registered before.
+  *
+  * @return
+  *  void
+@@ -3034,7 +3034,7 @@ fm10k_params_init(struct rte_eth_dev *dev)
+ 	struct fm10k_dev_info *info =
+ 		FM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private);
+ 
+-	/* Inialize bus info. Normally we would call fm10k_get_bus_info(), but
++	/* Initialize bus info. Normally we would call fm10k_get_bus_info(), but
+ 	 * there is no way to get link status without reading BAR4.  Until this
+ 	 * works, assume we have maximum bandwidth.
+ 	 * @todo - fix bus info
+diff --git a/dpdk/drivers/net/fm10k/fm10k_rxtx_vec.c b/dpdk/drivers/net/fm10k/fm10k_rxtx_vec.c
+index 1269250e23..10ce5a7582 100644
+--- a/dpdk/drivers/net/fm10k/fm10k_rxtx_vec.c
++++ b/dpdk/drivers/net/fm10k/fm10k_rxtx_vec.c
+@@ -212,7 +212,7 @@ fm10k_rx_vec_condition_check(struct rte_eth_dev *dev)
+ 	struct rte_eth_fdir_conf *fconf = &dev->data->dev_conf.fdir_conf;
+ 
+ #ifndef RTE_FM10K_RX_OLFLAGS_ENABLE
+-	/* whithout rx ol_flags, no VP flag report */
++	/* without rx ol_flags, no VP flag report */
+ 	if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
+ 		return -1;
+ #endif
+@@ -239,7 +239,7 @@ fm10k_rxq_vec_setup(struct fm10k_rx_queue *rxq)
+ 	struct rte_mbuf mb_def = { .buf_addr = 0 }; /* zeroed mbuf */
+ 
+ 	mb_def.nb_segs = 1;
+-	/* data_off will be ajusted after new mbuf allocated for 512-byte
++	/* data_off will be adjusted after new mbuf allocated for 512-byte
+ 	 * alignment.
+ 	 */
+ 	mb_def.data_off = RTE_PKTMBUF_HEADROOM;
+@@ -410,7 +410,7 @@ fm10k_recv_raw_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
+ 	if (!(rxdp->d.staterr & FM10K_RXD_STATUS_DD))
+ 		return 0;
+ 
+-	/* Vecotr RX will process 4 packets at a time, strip the unaligned
++	/* Vector RX will process 4 packets at a time, strip the unaligned
+ 	 * tails in case it's not multiple of 4.
+ 	 */
+ 	nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, RTE_FM10K_DESCS_PER_LOOP);
+@@ -481,7 +481,7 @@ fm10k_recv_raw_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
+ 		_mm_storeu_si128((__m128i *)&rx_pkts[pos], mbp1);
+ 
+ #if defined(RTE_ARCH_X86_64)
+-		/* B.1 load 2 64 bit mbuf poitns */
++		/* B.1 load 2 64 bit mbuf points */
+ 		mbp2 = _mm_loadu_si128((__m128i *)&mbufp[pos+2]);
+ #endif
+ 
+@@ -573,7 +573,7 @@ fm10k_recv_raw_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
+ 
+ 		fm10k_desc_to_pktype_v(descs0, &rx_pkts[pos]);
+ 
+-		/* C.4 calc avaialbe number of desc */
++		/* C.4 calc available number of desc */
+ 		var = __builtin_popcountll(_mm_cvtsi128_si64(staterr));
+ 		nb_pkts_recd += var;
+ 		if (likely(var != RTE_FM10K_DESCS_PER_LOOP))
+diff --git a/dpdk/drivers/net/hinic/hinic_pmd_ethdev.c b/dpdk/drivers/net/hinic/hinic_pmd_ethdev.c
+index 1853511c3b..e8d9aaba84 100644
+--- a/dpdk/drivers/net/hinic/hinic_pmd_ethdev.c
++++ b/dpdk/drivers/net/hinic/hinic_pmd_ethdev.c
+@@ -255,7 +255,7 @@ static int hinic_vlan_offload_set(struct rte_eth_dev *dev, int mask);
+  * Interrupt handler triggered by NIC  for handling
+  * specific event.
+  *
+- * @param: The address of parameter (struct rte_eth_dev *) regsitered before.
++ * @param: The address of parameter (struct rte_eth_dev *) registered before.
+  */
+ static void hinic_dev_interrupt_handler(void *param)
+ {
+@@ -336,7 +336,7 @@ static int hinic_dev_configure(struct rte_eth_dev *dev)
+ 		return err;
+ 	}
+ 
+-	/* init vlan offoad */
++	/* init VLAN offload */
+ 	err = hinic_vlan_offload_set(dev,
+ 				RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK);
+ 	if (err) {
+diff --git a/dpdk/drivers/net/hinic/hinic_pmd_ethdev.h b/dpdk/drivers/net/hinic/hinic_pmd_ethdev.h
+index 5eca8b10b9..8e6251f69f 100644
+--- a/dpdk/drivers/net/hinic/hinic_pmd_ethdev.h
++++ b/dpdk/drivers/net/hinic/hinic_pmd_ethdev.h
+@@ -170,7 +170,7 @@ struct tag_tcam_key_mem {
+ 		/*
+ 		 * tunnel packet, mask must be 0xff, spec value is 1;
+ 		 * normal packet, mask must be 0, spec value is 0;
+-		 * if tunnal packet, ucode use
++		 * if tunnel packet, ucode use
+ 		 * sip/dip/protocol/src_port/dst_dport from inner packet
+ 		 */
+ 		u32 tunnel_flag:8;
+diff --git a/dpdk/drivers/net/hinic/hinic_pmd_flow.c b/dpdk/drivers/net/hinic/hinic_pmd_flow.c
+index d71a42afbd..2cf24ebcf6 100644
+--- a/dpdk/drivers/net/hinic/hinic_pmd_flow.c
++++ b/dpdk/drivers/net/hinic/hinic_pmd_flow.c
+@@ -734,7 +734,7 @@ static int hinic_check_ntuple_item_ele(const struct rte_flow_item *item,
+  * END
+  * other members in mask and spec should set to 0x00.
+  * item->last should be NULL.
+- * Please aware there's an asumption for all the parsers.
++ * Please be aware there's an assumption for all the parsers.
+  * rte_flow_item is using big endian, rte_flow_attr and
+  * rte_flow_action are using CPU order.
+  * Because the pattern is used to describe the packets,
+@@ -1630,7 +1630,7 @@ static int hinic_parse_fdir_filter(struct rte_eth_dev *dev,
+ 
+ /**
+  * Check if the flow rule is supported by nic.
+- * It only checkes the format. Don't guarantee the rule can be programmed into
++ * It only checks the format. Don't guarantee the rule can be programmed into
+  * the HW. Because there can be no enough room for the rule.
+  */
+ static int hinic_flow_validate(struct rte_eth_dev *dev,
+diff --git a/dpdk/drivers/net/hinic/hinic_pmd_tx.c b/dpdk/drivers/net/hinic/hinic_pmd_tx.c
+index 2688817f37..f09b1a6e1e 100644
+--- a/dpdk/drivers/net/hinic/hinic_pmd_tx.c
++++ b/dpdk/drivers/net/hinic/hinic_pmd_tx.c
+@@ -1144,7 +1144,7 @@ u16 hinic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, u16 nb_pkts)
+ 		mbuf_pkt = *tx_pkts++;
+ 		queue_info = 0;
+ 
+-		/* 1. parse sge and tx offlod info from mbuf */
++		/* 1. parse sge and tx offload info from mbuf */
+ 		if (unlikely(!hinic_get_sge_txoff_info(mbuf_pkt,
+ 						       &sqe_info, &off_info))) {
+ 			txq->txq_stats.off_errs++;
+diff --git a/dpdk/drivers/net/hns3/hns3_cmd.c b/dpdk/drivers/net/hns3/hns3_cmd.c
+index 2ce59d8de6..3495e2acc1 100644
+--- a/dpdk/drivers/net/hns3/hns3_cmd.c
++++ b/dpdk/drivers/net/hns3/hns3_cmd.c
+@@ -466,7 +466,7 @@ hns3_mask_capability(struct hns3_hw *hw,
+ 	for (i = 0; i < MAX_CAPS_BIT; i++) {
+ 		if (!(caps_masked & BIT_ULL(i)))
+ 			continue;
+-		hns3_info(hw, "mask capabiliy: id-%u, name-%s.",
++		hns3_info(hw, "mask capability: id-%u, name-%s.",
+ 			  i, hns3_get_caps_name(i));
+ 	}
+ }
+@@ -635,39 +635,6 @@ hns3_firmware_compat_config(struct hns3_hw *hw, bool is_init)
+ 	struct hns3_cmd_desc desc;
+ 	uint32_t compat = 0;
+ 
+-#if defined(RTE_HNS3_ONLY_1630_FPGA)
+-	/* If resv reg enabled phy driver of imp is not configured, driver
+-	 * will use temporary phy driver.
+-	 */
+-	struct rte_pci_device *pci_dev;
+-	struct rte_eth_dev *eth_dev;
+-	uint8_t revision;
+-	int ret;
+-
+-	eth_dev = &rte_eth_devices[hw->data->port_id];
+-	pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+-	/* Get PCI revision id */
+-	ret = rte_pci_read_config(pci_dev, &revision, HNS3_PCI_REVISION_ID_LEN,
+-				  HNS3_PCI_REVISION_ID);
+-	if (ret != HNS3_PCI_REVISION_ID_LEN) {
+-		PMD_INIT_LOG(ERR, "failed to read pci revision id, ret = %d",
+-			     ret);
+-		return -EIO;
+-	}
+-	if (revision == PCI_REVISION_ID_HIP09_A) {
+-		struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw);
+-		if (hns3_dev_get_support(hw, COPPER) == 0 || pf->is_tmp_phy) {
+-			PMD_INIT_LOG(ERR, "***use temp phy driver in dpdk***");
+-			pf->is_tmp_phy = true;
+-			hns3_set_bit(hw->capability,
+-				     HNS3_DEV_SUPPORT_COPPER_B, 1);
+-			return 0;
+-		}
+-
+-		PMD_INIT_LOG(ERR, "***use phy driver in imp***");
+-	}
+-#endif
+-
+ 	hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_FIRMWARE_COMPAT_CFG, false);
+ 	req = (struct hns3_firmware_compat_cmd *)desc.data;
+ 
+@@ -736,7 +703,7 @@ hns3_cmd_init(struct hns3_hw *hw)
+ 		return 0;
+ 
+ 	/*
+-	 * Requiring firmware to enable some features, firber port can still
++	 * Requiring firmware to enable some features, fiber port can still
+ 	 * work without it, but copper port can't work because the firmware
+ 	 * fails to take over the PHY.
+ 	 */
+diff --git a/dpdk/drivers/net/hns3/hns3_cmd.h b/dpdk/drivers/net/hns3/hns3_cmd.h
+index 81bc9e9d98..82c999061d 100644
+--- a/dpdk/drivers/net/hns3/hns3_cmd.h
++++ b/dpdk/drivers/net/hns3/hns3_cmd.h
+@@ -323,7 +323,7 @@ enum HNS3_CAPS_BITS {
+ 	HNS3_CAPS_UDP_TUNNEL_CSUM_B,
+ 	HNS3_CAPS_RAS_IMP_B,
+ 	HNS3_CAPS_RXD_ADV_LAYOUT_B = 15,
+-	HNS3_CAPS_TM_B = 17,
++	HNS3_CAPS_TM_B = 19,
+ };
+ 
+ /* Capabilities of VF dependent on the PF */
+@@ -603,7 +603,6 @@ struct hns3_cfg_gro_status_cmd {
+ 
+ #define HNS3_RSS_HASH_KEY_OFFSET_B	4
+ 
+-#define HNS3_RSS_CFG_TBL_SIZE	16
+ #define HNS3_RSS_HASH_KEY_NUM	16
+ /* Configure the algorithm mode and Hash Key, opcode:0x0D01 */
+ struct hns3_rss_generic_config_cmd {
+diff --git a/dpdk/drivers/net/hns3/hns3_common.c b/dpdk/drivers/net/hns3/hns3_common.c
+index eac2aa1040..78158401f2 100644
+--- a/dpdk/drivers/net/hns3/hns3_common.c
++++ b/dpdk/drivers/net/hns3/hns3_common.c
+@@ -216,7 +216,7 @@ hns3_parse_mbx_time_limit(const char *key, const char *value, void *extra_args)
+ 
+ 	/*
+ 	 * 500ms is empirical value in process of mailbox communication. If
+-	 * the delay value is set to one lower thanthe empirical value, mailbox
++	 * the delay value is set to one lower than the empirical value, mailbox
+ 	 * communication may fail.
+ 	 */
+ 	if (val > HNS3_MBX_DEF_TIME_LIMIT_MS && val <= UINT16_MAX)
+@@ -236,6 +236,12 @@ hns3_parse_devargs(struct rte_eth_dev *dev)
+ 	uint64_t dev_caps_mask = 0;
+ 	struct rte_kvargs *kvlist;
+ 
++	/* Set default value of runtime config parameters. */
++	hns->rx_func_hint = HNS3_IO_FUNC_HINT_NONE;
++	hns->tx_func_hint = HNS3_IO_FUNC_HINT_NONE;
++	hns->dev_caps_mask = 0;
++	hns->mbx_time_limit_ms = HNS3_MBX_DEF_TIME_LIMIT_MS;
++
+ 	if (dev->device->devargs == NULL)
+ 		return;
+ 
+@@ -603,7 +609,7 @@ hns3_init_ring_with_vector(struct hns3_hw *hw)
+ 	hw->intr_tqps_num = RTE_MIN(vec, hw->tqps_num);
+ 	for (i = 0; i < hw->intr_tqps_num; i++) {
+ 		/*
+-		 * Set gap limiter/rate limiter/quanity limiter algorithm
++		 * Set gap limiter/rate limiter/quantity limiter algorithm
+ 		 * configuration for interrupt coalesce of queue's interrupt.
+ 		 */
+ 		hns3_set_queue_intr_gl(hw, i, HNS3_RING_GL_RX,
+diff --git a/dpdk/drivers/net/hns3/hns3_dcb.c b/dpdk/drivers/net/hns3/hns3_dcb.c
+index 3d0159d787..e4417e87fd 100644
+--- a/dpdk/drivers/net/hns3/hns3_dcb.c
++++ b/dpdk/drivers/net/hns3/hns3_dcb.c
+@@ -25,7 +25,7 @@
+  * IR(Mbps) = -------------------------  *  CLOCK(1000Mbps)
+  *		Tick * (2 ^ IR_s)
+  *
+- * @return: 0: calculate sucessful, negative: fail
++ * @return: 0: calculate successful, negative: fail
+  */
+ static int
+ hns3_shaper_para_calc(struct hns3_hw *hw, uint32_t ir, uint8_t shaper_level,
+@@ -36,8 +36,8 @@ hns3_shaper_para_calc(struct hns3_hw *hw, uint32_t ir, uint8_t shaper_level,
+ #define DIVISOR_IR_B_126	(126 * DIVISOR_CLK)
+ 
+ 	const uint16_t tick_array[HNS3_SHAPER_LVL_CNT] = {
+-		6 * 256,    /* Prioriy level */
+-		6 * 32,     /* Prioriy group level */
++		6 * 256,    /* Priority level */
++		6 * 32,     /* Priority group level */
+ 		6 * 8,      /* Port level */
+ 		6 * 256     /* Qset level */
+ 	};
+@@ -1532,7 +1532,7 @@ hns3_dcb_hw_configure(struct hns3_adapter *hns)
+ 
+ 	ret = hns3_dcb_schd_setup_hw(hw);
+ 	if (ret) {
+-		hns3_err(hw, "dcb schdule configure failed! ret = %d", ret);
++		hns3_err(hw, "dcb schedule configure failed! ret = %d", ret);
+ 		return ret;
+ 	}
+ 
+@@ -1737,7 +1737,7 @@ hns3_get_fc_mode(struct hns3_hw *hw, enum rte_eth_fc_mode mode)
+  * hns3_dcb_pfc_enable - Enable priority flow control
+  * @dev: pointer to ethernet device
+  *
+- * Configures the pfc settings for one porority.
++ * Configures the pfc settings for one priority.
+  */
+ int
+ hns3_dcb_pfc_enable(struct rte_eth_dev *dev, struct rte_eth_pfc_conf *pfc_conf)
+diff --git a/dpdk/drivers/net/hns3/hns3_ethdev.c b/dpdk/drivers/net/hns3/hns3_ethdev.c
+index 0bd12907d8..40a33549e0 100644
+--- a/dpdk/drivers/net/hns3/hns3_ethdev.c
++++ b/dpdk/drivers/net/hns3/hns3_ethdev.c
+@@ -227,17 +227,11 @@ hns3_check_event_cause(struct hns3_adapter *hns, uint32_t *clearval)
+ 	return ret;
+ }
+ 
+-static bool
+-hns3_is_1588_event_type(uint32_t event_type)
+-{
+-	return (event_type == HNS3_VECTOR0_EVENT_PTP);
+-}
+-
+ static void
+ hns3_clear_event_cause(struct hns3_hw *hw, uint32_t event_type, uint32_t regclr)
+ {
+ 	if (event_type == HNS3_VECTOR0_EVENT_RST ||
+-	    hns3_is_1588_event_type(event_type))
++	    event_type == HNS3_VECTOR0_EVENT_PTP)
+ 		hns3_write_dev(hw, HNS3_MISC_RESET_STS_REG, regclr);
+ 	else if (event_type == HNS3_VECTOR0_EVENT_MBX)
+ 		hns3_write_dev(hw, HNS3_VECTOR0_CMDQ_SRC_REG, regclr);
+@@ -324,7 +318,7 @@ hns3_interrupt_handler(void *param)
+ 		hns3_schedule_reset(hns);
+ 	} else if (event_cause == HNS3_VECTOR0_EVENT_MBX) {
+ 		hns3_dev_handle_mbx_msg(hw);
+-	} else {
++	} else if (event_cause != HNS3_VECTOR0_EVENT_PTP) {
+ 		hns3_warn(hw, "received unknown event: vector0_int_stat:0x%x "
+ 			  "ras_int_stat:0x%x cmdq_int_stat:0x%x",
+ 			  vector0_int, ras_int, cmdq_int);
+@@ -574,7 +568,7 @@ hns3_set_vlan_rx_offload_cfg(struct hns3_adapter *hns,
+ 	hns3_set_bit(req->vport_vlan_cfg, HNS3_SHOW_TAG2_EN_B,
+ 		     vcfg->vlan2_vlan_prionly ? 1 : 0);
+ 
+-	/* firmwall will ignore this configuration for PCI_REVISION_ID_HIP08 */
++	/* firmware will ignore this configuration for PCI_REVISION_ID_HIP08 */
+ 	hns3_set_bit(req->vport_vlan_cfg, HNS3_DISCARD_TAG1_EN_B,
+ 		     vcfg->strip_tag1_discard_en ? 1 : 0);
+ 	hns3_set_bit(req->vport_vlan_cfg, HNS3_DISCARD_TAG2_EN_B,
+@@ -784,7 +778,7 @@ hns3_set_vlan_tx_offload_cfg(struct hns3_adapter *hns,
+ 		     vcfg->insert_tag2_en ? 1 : 0);
+ 	hns3_set_bit(req->vport_vlan_cfg, HNS3_CFG_NIC_ROCE_SEL_B, 0);
+ 
+-	/* firmwall will ignore this configuration for PCI_REVISION_ID_HIP08 */
++	/* firmware will ignore this configuration for PCI_REVISION_ID_HIP08 */
+ 	hns3_set_bit(req->vport_vlan_cfg, HNS3_TAG_SHIFT_MODE_EN_B,
+ 		     vcfg->tag_shift_mode_en ? 1 : 0);
+ 
+@@ -2033,11 +2027,9 @@ hns3_dev_configure(struct rte_eth_dev *dev)
+ 			goto cfg_err;
+ 	}
+ 
+-	/* When RSS is not configured, redirect the packet queue 0 */
+ 	if ((uint32_t)mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) {
+ 		conf->rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
+ 		rss_conf = conf->rx_adv_conf.rss_conf;
+-		hw->rss_dis_flag = false;
+ 		ret = hns3_dev_rss_hash_update(dev, &rss_conf);
+ 		if (ret)
+ 			goto cfg_err;
+@@ -2093,7 +2085,6 @@ static int
+ hns3_config_mtu(struct hns3_hw *hw, uint16_t mps)
+ {
+ 	struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
+-	uint16_t original_mps = hns->pf.mps;
+ 	int err;
+ 	int ret;
+ 
+@@ -2103,22 +2094,20 @@ hns3_config_mtu(struct hns3_hw *hw, uint16_t mps)
+ 		return ret;
+ 	}
+ 
+-	hns->pf.mps = mps;
+ 	ret = hns3_buffer_alloc(hw);
+ 	if (ret) {
+ 		hns3_err(hw, "failed to allocate buffer, ret = %d", ret);
+ 		goto rollback;
+ 	}
+ 
++	hns->pf.mps = mps;
++
+ 	return 0;
+ 
+ rollback:
+-	err = hns3_set_mac_mtu(hw, original_mps);
+-	if (err) {
++	err = hns3_set_mac_mtu(hw, hns->pf.mps);
++	if (err)
+ 		hns3_err(hw, "fail to rollback MTU, err = %d", err);
+-		return ret;
+-	}
+-	hns->pf.mps = original_mps;
+ 
+ 	return ret;
+ }
+@@ -2767,6 +2756,10 @@ hns3_get_capability(struct hns3_hw *hw)
+ 	}
+ 	hw->revision = revision;
+ 
++	ret = hns3_query_mac_stats_reg_num(hw);
++	if (ret)
++		return ret;
++
+ 	if (revision < PCI_REVISION_ID_HIP09_A) {
+ 		hns3_set_default_dev_specifications(hw);
+ 		hw->intr.mapping_mode = HNS3_INTR_MAPPING_VEC_RSV_ONE;
+@@ -2820,11 +2813,8 @@ hns3_check_media_type(struct hns3_hw *hw, uint8_t media_type)
+ 		}
+ 		break;
+ 	case HNS3_MEDIA_TYPE_FIBER:
+-		ret = 0;
+-		break;
+ 	case HNS3_MEDIA_TYPE_BACKPLANE:
+-		PMD_INIT_LOG(ERR, "Media type is Backplane, not supported.");
+-		ret = -EOPNOTSUPP;
++		ret = 0;
+ 		break;
+ 	default:
+ 		PMD_INIT_LOG(ERR, "Unknown media type = %u!", media_type);
+@@ -2855,7 +2845,6 @@ hns3_get_board_configuration(struct hns3_hw *hw)
+ 
+ 	hw->mac.media_type = cfg.media_type;
+ 	hw->rss_size_max = cfg.rss_size_max;
+-	hw->rss_dis_flag = false;
+ 	memcpy(hw->mac.mac_addr, cfg.mac_addr, RTE_ETHER_ADDR_LEN);
+ 	hw->mac.phy_addr = cfg.phy_addr;
+ 	hw->num_tx_desc = cfg.tqp_desc_num;
+@@ -3420,7 +3409,7 @@ hns3_only_alloc_priv_buff(struct hns3_hw *hw,
+  * hns3_rx_buffer_calc: calculate the rx private buffer size for all TCs
+  * @hw: pointer to struct hns3_hw
+  * @buf_alloc: pointer to buffer calculation data
+- * @return: 0: calculate sucessful, negative: fail
++ * @return: 0: calculate successful, negative: fail
+  */
+ static int
+ hns3_rx_buffer_calc(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc)
+@@ -4280,14 +4269,11 @@ hns3_update_link_info(struct rte_eth_dev *eth_dev)
+ {
+ 	struct hns3_adapter *hns = eth_dev->data->dev_private;
+ 	struct hns3_hw *hw = &hns->hw;
+-	int ret = 0;
+ 
+ 	if (hw->mac.media_type == HNS3_MEDIA_TYPE_COPPER)
+-		ret = hns3_update_copper_link_info(hw);
+-	else if (hw->mac.media_type == HNS3_MEDIA_TYPE_FIBER)
+-		ret = hns3_update_fiber_link_info(hw);
++		return hns3_update_copper_link_info(hw);
+ 
+-	return ret;
++	return hns3_update_fiber_link_info(hw);
+ }
+ 
+ static int
+@@ -4396,10 +4382,12 @@ hns3_service_handler(void *param)
+ 	struct hns3_adapter *hns = eth_dev->data->dev_private;
+ 	struct hns3_hw *hw = &hns->hw;
+ 
+-	if (!hns3_is_reset_pending(hns))
++	if (!hns3_is_reset_pending(hns)) {
+ 		hns3_update_linkstatus_and_event(hw, true);
+-	else
++		hns3_update_hw_stats(hw);
++	} else {
+ 		hns3_warn(hw, "Cancel the query when reset is pending");
++	}
+ 
+ 	rte_eal_alarm_set(HNS3_SERVICE_INTERVAL, hns3_service_handler, eth_dev);
+ }
+@@ -4410,6 +4398,10 @@ hns3_init_hardware(struct hns3_adapter *hns)
+ 	struct hns3_hw *hw = &hns->hw;
+ 	int ret;
+ 
++	/*
++	 * All queue-related HW operations must be performed after the TCAM
++	 * table is configured.
++	 */
+ 	ret = hns3_map_tqp(hw);
+ 	if (ret) {
+ 		PMD_INIT_LOG(ERR, "Failed to map tqp: %d", ret);
+@@ -4550,14 +4542,14 @@ hns3_set_firber_default_support_speed(struct hns3_hw *hw)
+ }
+ 
+ /*
+- * Validity of supported_speed for firber and copper media type can be
++ * Validity of supported_speed for fiber and copper media type can be
+  * guaranteed by the following policy:
+  * Copper:
+  *       Although the initialization of the phy in the firmware may not be
+  *       completed, the firmware can guarantees that the supported_speed is
+  *       an valid value.
+  * Firber:
+- *       If the version of firmware supports the acitive query way of the
++ *       If the version of firmware supports the active query way of the
+  *       HNS3_OPC_GET_SFP_INFO opcode, the supported_speed can be obtained
+  *       through it. If unsupported, use the SFP's speed as the value of the
+  *       supported_speed.
+@@ -4574,11 +4566,13 @@ hns3_get_port_supported_speed(struct rte_eth_dev *eth_dev)
+ 	if (ret)
+ 		return ret;
+ 
+-	if (mac->media_type == HNS3_MEDIA_TYPE_FIBER) {
++	if (mac->media_type == HNS3_MEDIA_TYPE_FIBER ||
++	    mac->media_type == HNS3_MEDIA_TYPE_BACKPLANE) {
+ 		/*
+ 		 * Some firmware does not support the report of supported_speed,
+-		 * and only report the effective speed of SFP. In this case, it
+-		 * is necessary to use the SFP's speed as the supported_speed.
++		 * and only report the effective speed of SFP/backplane. In this
++		 * case, it is necessary to use the SFP/backplane's speed as the
++		 * supported_speed.
+ 		 */
+ 		if (mac->supported_speed == 0)
+ 			mac->supported_speed =
+@@ -4650,13 +4644,6 @@ hns3_init_pf(struct rte_eth_dev *eth_dev)
+ 		goto err_cmd_init;
+ 	}
+ 
+-	/* Hardware statistics of imissed registers cleared. */
+-	ret = hns3_update_imissed_stats(hw, true);
+-	if (ret) {
+-		hns3_err(hw, "clear imissed stats failed, ret = %d", ret);
+-		goto err_cmd_init;
+-	}
+-
+ 	hns3_config_all_msix_error(hw, true);
+ 
+ 	ret = rte_intr_callback_register(pci_dev->intr_handle,
+@@ -4682,7 +4669,7 @@ hns3_init_pf(struct rte_eth_dev *eth_dev)
+ 		goto err_get_config;
+ 	}
+ 
+-	ret = hns3_tqp_stats_init(hw);
++	ret = hns3_stats_init(hw);
+ 	if (ret)
+ 		goto err_get_config;
+ 
+@@ -4728,7 +4715,7 @@ hns3_init_pf(struct rte_eth_dev *eth_dev)
+ err_fdir:
+ 	hns3_uninit_umv_space(hw);
+ err_init_hw:
+-	hns3_tqp_stats_uninit(hw);
++	hns3_stats_uninit(hw);
+ err_get_config:
+ 	hns3_pf_disable_irq0(hw);
+ 	rte_intr_disable(pci_dev->intr_handle);
+@@ -4762,7 +4749,7 @@ hns3_uninit_pf(struct rte_eth_dev *eth_dev)
+ 	hns3_flow_uninit(eth_dev);
+ 	hns3_fdir_filter_uninit(hns);
+ 	hns3_uninit_umv_space(hw);
+-	hns3_tqp_stats_uninit(hw);
++	hns3_stats_uninit(hw);
+ 	hns3_config_mac_tnl_int(hw, false);
+ 	hns3_pf_disable_irq0(hw);
+ 	rte_intr_disable(pci_dev->intr_handle);
+@@ -4847,7 +4834,7 @@ hns3_check_port_speed(struct hns3_hw *hw, uint32_t link_speeds)
+ 
+ 	if (mac->media_type == HNS3_MEDIA_TYPE_COPPER)
+ 		speed_bit = hns3_convert_link_speeds2bitmap_copper(link_speeds);
+-	else if (mac->media_type == HNS3_MEDIA_TYPE_FIBER)
++	else
+ 		speed_bit = hns3_convert_link_speeds2bitmap_fiber(link_speeds);
+ 
+ 	if (!(speed_bit & supported_speed)) {
+@@ -4991,32 +4978,35 @@ hns3_set_fiber_port_link_speed(struct hns3_hw *hw,
+ 	return hns3_cfg_mac_speed_dup(hw, cfg->speed, cfg->duplex);
+ }
+ 
++static const char *
++hns3_get_media_type_name(uint8_t media_type)
++{
++	if (media_type == HNS3_MEDIA_TYPE_FIBER)
++		return "fiber";
++	else if (media_type == HNS3_MEDIA_TYPE_COPPER)
++		return "copper";
++	else if (media_type == HNS3_MEDIA_TYPE_BACKPLANE)
++		return "backplane";
++	else
++		return "unknown";
++}
++
+ static int
+ hns3_set_port_link_speed(struct hns3_hw *hw,
+ 			 struct hns3_set_link_speed_cfg *cfg)
+ {
+ 	int ret;
+ 
+-	if (hw->mac.media_type == HNS3_MEDIA_TYPE_COPPER) {
+-#if defined(RTE_HNS3_ONLY_1630_FPGA)
+-		struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw);
+-		if (pf->is_tmp_phy)
+-			return 0;
+-#endif
+-
++	if (hw->mac.media_type == HNS3_MEDIA_TYPE_COPPER)
+ 		ret = hns3_set_copper_port_link_speed(hw, cfg);
+-		if (ret) {
+-			hns3_err(hw, "failed to set copper port link speed,"
+-				 "ret = %d.", ret);
+-			return ret;
+-		}
+-	} else if (hw->mac.media_type == HNS3_MEDIA_TYPE_FIBER) {
++	else
+ 		ret = hns3_set_fiber_port_link_speed(hw, cfg);
+-		if (ret) {
+-			hns3_err(hw, "failed to set fiber port link speed,"
+-				 "ret = %d.", ret);
+-			return ret;
+-		}
++
++	if (ret) {
++		hns3_err(hw, "failed to set %s port link speed, ret = %d.",
++			 hns3_get_media_type_name(hw->mac.media_type),
++			 ret);
++		return ret;
+ 	}
+ 
+ 	return 0;
+@@ -5327,7 +5317,7 @@ hns3_get_autoneg_fc_mode(struct hns3_hw *hw)
+ 
+ 	/*
+ 	 * Flow control auto-negotiation is not supported for fiber and
+-	 * backpalne media type.
++	 * backplane media type.
+ 	 */
+ 	case HNS3_MEDIA_TYPE_FIBER:
+ 	case HNS3_MEDIA_TYPE_BACKPLANE:
+@@ -5579,15 +5569,15 @@ hns3_reinit_dev(struct hns3_adapter *hns)
+ 		return ret;
+ 	}
+ 
+-	ret = hns3_reset_all_tqps(hns);
++	ret = hns3_init_hardware(hns);
+ 	if (ret) {
+-		hns3_err(hw, "Failed to reset all queues: %d", ret);
++		hns3_err(hw, "Failed to init hardware: %d", ret);
+ 		return ret;
+ 	}
+ 
+-	ret = hns3_init_hardware(hns);
++	ret = hns3_reset_all_tqps(hns);
+ 	if (ret) {
+-		hns3_err(hw, "Failed to init hardware: %d", ret);
++		hns3_err(hw, "Failed to reset all queues: %d", ret);
+ 		return ret;
+ 	}
+ 
+@@ -6191,7 +6181,7 @@ hns3_fec_get_internal(struct hns3_hw *hw, uint32_t *fec_capa)
+ 	}
+ 
+ 	/*
+-	 * FEC mode order defined in hns3 hardware is inconsistend with
++	 * FEC mode order defined in hns3 hardware is inconsistent with
+ 	 * that defined in the ethdev library. So the sequence needs
+ 	 * to be converted.
+ 	 */
+diff --git a/dpdk/drivers/net/hns3/hns3_ethdev.h b/dpdk/drivers/net/hns3/hns3_ethdev.h
+index aa45b31261..134a33ee2f 100644
+--- a/dpdk/drivers/net/hns3/hns3_ethdev.h
++++ b/dpdk/drivers/net/hns3/hns3_ethdev.h
+@@ -126,7 +126,7 @@ struct hns3_tc_info {
+ 	uint8_t tc_sch_mode;  /* 0: sp; 1: dwrr */
+ 	uint8_t pgid;
+ 	uint32_t bw_limit;
+-	uint8_t up_to_tc_map; /* user priority maping on the TC */
++	uint8_t up_to_tc_map; /* user priority mapping on the TC */
+ };
+ 
+ struct hns3_dcb_info {
+@@ -502,8 +502,15 @@ struct hns3_hw {
+ 	struct hns3_tqp_stats tqp_stats;
+ 	/* Include Mac stats | Rx stats | Tx stats */
+ 	struct hns3_mac_stats mac_stats;
++	uint32_t mac_stats_reg_num;
+ 	struct hns3_rx_missed_stats imissed_stats;
+ 	uint64_t oerror_stats;
++	/*
++	 * The lock is used to protect statistics update in stats APIs and
++	 * periodic task.
++	 */
++	rte_spinlock_t stats_lock;
++
+ 	uint32_t fw_version;
+ 	uint16_t pf_vf_if_version;  /* version of communication interface */
+ 
+@@ -523,7 +530,6 @@ struct hns3_hw {
+ 
+ 	/* The configuration info of RSS */
+ 	struct hns3_rss_conf rss_info;
+-	bool rss_dis_flag; /* disable rss flag. true: disable, false: enable */
+ 	uint16_t rss_ind_tbl_size;
+ 	uint16_t rss_key_size;
+ 
+@@ -571,12 +577,12 @@ struct hns3_hw {
+ 	/*
+ 	 * vlan mode.
+ 	 * value range:
+-	 *      HNS3_SW_SHIFT_AND_DISCARD_MODE/HNS3_HW_SHFIT_AND_DISCARD_MODE
++	 *      HNS3_SW_SHIFT_AND_DISCARD_MODE/HNS3_HW_SHIFT_AND_DISCARD_MODE
+ 	 *
+ 	 *  - HNS3_SW_SHIFT_AND_DISCARD_MODE
+ 	 *     For some versions of hardware network engine, because of the
+ 	 *     hardware limitation, PMD needs to detect the PVID status
+-	 *     to work with haredware to implement PVID-related functions.
++	 *     to work with hardware to implement PVID-related functions.
+ 	 *     For example, driver need discard the stripped PVID tag to ensure
+ 	 *     the PVID will not report to mbuf and shift the inserted VLAN tag
+ 	 *     to avoid port based VLAN covering it.
+@@ -724,7 +730,7 @@ enum hns3_mp_req_type {
+ 	HNS3_MP_REQ_MAX
+ };
+ 
+-/* Pameters for IPC. */
++/* Parameters for IPC. */
+ struct hns3_mp_param {
+ 	enum hns3_mp_req_type type;
+ 	int port_id;
+diff --git a/dpdk/drivers/net/hns3/hns3_ethdev_vf.c b/dpdk/drivers/net/hns3/hns3_ethdev_vf.c
+index 805abd4543..0af4dcb324 100644
+--- a/dpdk/drivers/net/hns3/hns3_ethdev_vf.c
++++ b/dpdk/drivers/net/hns3/hns3_ethdev_vf.c
+@@ -242,7 +242,7 @@ hns3vf_set_default_mac_addr(struct rte_eth_dev *dev,
+ 		if (ret == -EPERM) {
+ 			hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
+ 					      old_addr);
+-			hns3_warn(hw, "Has permanet mac addr(%s) for vf",
++			hns3_warn(hw, "Has permanent mac addr(%s) for vf",
+ 				  mac_str);
+ 		} else {
+ 			hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
+@@ -318,7 +318,7 @@ hns3vf_set_promisc_mode(struct hns3_hw *hw, bool en_bc_pmc,
+ 	 * 1. The promiscuous/allmulticast mode can be configured successfully
+ 	 *    only based on the trusted VF device. If based on the non trusted
+ 	 *    VF device, configuring promiscuous/allmulticast mode will fail.
+-	 *    The hns3 VF device can be confiruged as trusted device by hns3 PF
++	 *    The hns3 VF device can be configured as trusted device by hns3 PF
+ 	 *    kernel ethdev driver on the host by the following command:
+ 	 *      "ip link set <eth num> vf <vf id> turst on"
+ 	 * 2. After the promiscuous mode is configured successfully, hns3 VF PMD
+@@ -330,7 +330,7 @@ hns3vf_set_promisc_mode(struct hns3_hw *hw, bool en_bc_pmc,
+ 	 *    filter is still effective even in promiscuous mode. If upper
+ 	 *    applications don't call rte_eth_dev_vlan_filter API function to
+ 	 *    set vlan based on VF device, hns3 VF PMD will can't receive
+-	 *    the packets with vlan tag in promiscuoue mode.
++	 *    the packets with vlan tag in promiscuous mode.
+ 	 */
+ 	hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MBX_VF_TO_PF, false);
+ 	req->msg[0] = HNS3_MBX_SET_PROMISC_MODE;
+@@ -496,7 +496,6 @@ hns3vf_dev_configure(struct rte_eth_dev *dev)
+ 	/* When RSS is not configured, redirect the packet queue 0 */
+ 	if ((uint32_t)mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) {
+ 		conf->rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
+-		hw->rss_dis_flag = false;
+ 		rss_conf = conf->rx_adv_conf.rss_conf;
+ 		ret = hns3_dev_rss_hash_update(dev, &rss_conf);
+ 		if (ret)
+@@ -780,6 +779,14 @@ hns3vf_get_push_lsc_cap(struct hns3_hw *hw)
+ 
+ 	while (remain_ms > 0) {
+ 		rte_delay_ms(HNS3_POLL_RESPONE_MS);
++		/*
++		 * The probe process may perform in interrupt thread context.
++		 * For example, users attach a device in the secondary process.
++		 * At the moment, the handling mailbox task will be blocked. So
++		 * driver has to actively handle the HNS3_MBX_LINK_STAT_CHANGE
++		 * mailbox from PF driver to get this capability.
++		 */
++		hns3_dev_handle_mbx_msg(hw);
+ 		if (__atomic_load_n(&vf->pf_push_lsc_cap, __ATOMIC_ACQUIRE) !=
+ 			HNS3_PF_PUSH_LSC_CAP_UNKNOWN)
+ 			break;
+@@ -1031,7 +1038,6 @@ hns3vf_get_configuration(struct hns3_hw *hw)
+ 	int ret;
+ 
+ 	hw->mac.media_type = HNS3_MEDIA_TYPE_NONE;
+-	hw->rss_dis_flag = false;
+ 
+ 	/* Get device capability */
+ 	ret = hns3vf_get_capability(hw);
+@@ -1385,10 +1391,12 @@ hns3vf_service_handler(void *param)
+ 	 * Before querying the link status, check whether there is a reset
+ 	 * pending, and if so, abandon the query.
+ 	 */
+-	if (!hns3vf_is_reset_pending(hns))
++	if (!hns3vf_is_reset_pending(hns)) {
+ 		hns3vf_request_link_info(hw);
+-	else
++		hns3_update_hw_stats(hw);
++	} else {
+ 		hns3_warn(hw, "Cancel the query when reset is pending");
++	}
+ 
+ 	rte_eal_alarm_set(HNS3VF_SERVICE_INTERVAL, hns3vf_service_handler,
+ 			  eth_dev);
+@@ -1558,17 +1566,10 @@ hns3vf_init_vf(struct rte_eth_dev *eth_dev)
+ 		goto err_get_config;
+ 	}
+ 
+-	ret = hns3_tqp_stats_init(hw);
++	ret = hns3_stats_init(hw);
+ 	if (ret)
+ 		goto err_get_config;
+ 
+-	/* Hardware statistics of imissed registers cleared. */
+-	ret = hns3_update_imissed_stats(hw, true);
+-	if (ret) {
+-		hns3_err(hw, "clear imissed stats failed, ret = %d", ret);
+-		goto err_set_tc_queue;
+-	}
+-
+ 	ret = hns3vf_set_tc_queue_mapping(hns, hw->tqps_num, hw->tqps_num);
+ 	if (ret) {
+ 		PMD_INIT_LOG(ERR, "failed to set tc info, ret = %d.", ret);
+@@ -1596,7 +1597,7 @@ hns3vf_init_vf(struct rte_eth_dev *eth_dev)
+ 	return 0;
+ 
+ err_set_tc_queue:
+-	hns3_tqp_stats_uninit(hw);
++	hns3_stats_uninit(hw);
+ 
+ err_get_config:
+ 	hns3vf_disable_irq0(hw);
+@@ -1627,7 +1628,7 @@ hns3vf_uninit_vf(struct rte_eth_dev *eth_dev)
+ 	(void)hns3vf_set_alive(hw, false);
+ 	(void)hns3vf_set_promisc_mode(hw, false, false, false);
+ 	hns3_flow_uninit(eth_dev);
+-	hns3_tqp_stats_uninit(hw);
++	hns3_stats_uninit(hw);
+ 	hns3vf_disable_irq0(hw);
+ 	rte_intr_disable(pci_dev->intr_handle);
+ 	hns3_intr_unregister(pci_dev->intr_handle, hns3vf_interrupt_handler,
+@@ -1925,6 +1926,7 @@ hns3vf_is_reset_pending(struct hns3_adapter *hns)
+ static int
+ hns3vf_wait_hardware_ready(struct hns3_adapter *hns)
+ {
++#define HNS3_WAIT_PF_RESET_READY_TIME 5
+ 	struct hns3_hw *hw = &hns->hw;
+ 	struct hns3_wait_data *wait_data = hw->reset.wait_data;
+ 	struct timeval tv;
+@@ -1945,12 +1947,14 @@ hns3vf_wait_hardware_ready(struct hns3_adapter *hns)
+ 			return 0;
+ 
+ 		wait_data->check_completion = NULL;
+-		wait_data->interval = 1 * MSEC_PER_SEC * USEC_PER_MSEC;
++		wait_data->interval = HNS3_WAIT_PF_RESET_READY_TIME *
++			MSEC_PER_SEC * USEC_PER_MSEC;
+ 		wait_data->count = 1;
+ 		wait_data->result = HNS3_WAIT_REQUEST;
+ 		rte_eal_alarm_set(wait_data->interval, hns3_wait_callback,
+ 				  wait_data);
+-		hns3_warn(hw, "hardware is ready, delay 1 sec for PF reset complete");
++		hns3_warn(hw, "hardware is ready, delay %d sec for PF reset complete",
++				HNS3_WAIT_PF_RESET_READY_TIME);
+ 		return -EAGAIN;
+ 	} else if (wait_data->result == HNS3_WAIT_TIMEOUT) {
+ 		hns3_clock_gettime(&tv);
+@@ -2472,7 +2476,6 @@ hns3vf_dev_uninit(struct rte_eth_dev *eth_dev)
+ 	PMD_INIT_FUNC_TRACE();
+ 
+ 	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+-		__atomic_fetch_sub(&hw->secondary_cnt, 1, __ATOMIC_RELAXED);
+ 		hns3_mp_uninit(eth_dev);
+ 		return 0;
+ 	}
+diff --git a/dpdk/drivers/net/hns3/hns3_fdir.c b/dpdk/drivers/net/hns3/hns3_fdir.c
+index d043f5786d..2426264138 100644
+--- a/dpdk/drivers/net/hns3/hns3_fdir.c
++++ b/dpdk/drivers/net/hns3/hns3_fdir.c
+@@ -631,7 +631,7 @@ static bool hns3_fd_convert_tuple(struct hns3_hw *hw,
+ 		break;
+ 	default:
+ 		hns3_warn(hw, "not support tuple of (%u)", tuple);
+-		break;
++		return false;
+ 	}
+ 	return true;
+ }
+diff --git a/dpdk/drivers/net/hns3/hns3_fdir.h b/dpdk/drivers/net/hns3/hns3_fdir.h
+index f9efff3b52..07b393393d 100644
+--- a/dpdk/drivers/net/hns3/hns3_fdir.h
++++ b/dpdk/drivers/net/hns3/hns3_fdir.h
+@@ -139,7 +139,7 @@ struct hns3_fdir_rule {
+ 	uint32_t flags;
+ 	uint32_t fd_id; /* APP marked unique value for this rule. */
+ 	uint8_t action;
+-	/* VF id, avaiblable when flags with HNS3_RULE_FLAG_VF_ID. */
++	/* VF id, available when flags with HNS3_RULE_FLAG_VF_ID. */
+ 	uint8_t vf_id;
+ 	/*
+ 	 * equal 0 when action is drop.
+diff --git a/dpdk/drivers/net/hns3/hns3_flow.c b/dpdk/drivers/net/hns3/hns3_flow.c
+index 9f2f9cb6cd..24caf8e870 100644
+--- a/dpdk/drivers/net/hns3/hns3_flow.c
++++ b/dpdk/drivers/net/hns3/hns3_flow.c
+@@ -10,15 +10,6 @@
+ #include "hns3_logs.h"
+ #include "hns3_flow.h"
+ 
+-/* Default default keys */
+-static uint8_t hns3_hash_key[] = {
+-	0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
+-	0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
+-	0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
+-	0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
+-	0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
+-};
+-
+ static const uint8_t full_mask[VNI_OR_TNI_LEN] = { 0xFF, 0xFF, 0xFF };
+ static const uint8_t zero_mask[VNI_OR_TNI_LEN] = { 0x00, 0x00, 0x00 };
+ 
+@@ -338,7 +329,7 @@ hns3_handle_action_queue_region(struct rte_eth_dev *dev,
+  *
+  * @param actions[in]
+  * @param rule[out]
+- *   NIC specfilc actions derived from the actions.
++ *   NIC specific actions derived from the actions.
+  * @param error[out]
+  */
+ static int
+@@ -369,7 +360,7 @@ hns3_handle_actions(struct rte_eth_dev *dev,
+ 		 * Queue region is implemented by FDIR + RSS in hns3 hardware,
+ 		 * the FDIR's action is one queue region (start_queue_id and
+ 		 * queue_num), then RSS spread packets to the queue region by
+-		 * RSS algorigthm.
++		 * RSS algorithm.
+ 		 */
+ 		case RTE_FLOW_ACTION_TYPE_RSS:
+ 			ret = hns3_handle_action_queue_region(dev, actions,
+@@ -940,7 +931,7 @@ hns3_parse_nvgre(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
+ 	if (nvgre_mask->protocol || nvgre_mask->c_k_s_rsvd0_ver)
+ 		return rte_flow_error_set(error, EINVAL,
+ 					  RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
+-					  "Ver/protocal is not supported in NVGRE");
++					  "Ver/protocol is not supported in NVGRE");
+ 
+ 	/* TNI must be totally masked or not. */
+ 	if (memcmp(nvgre_mask->tni, full_mask, VNI_OR_TNI_LEN) &&
+@@ -985,7 +976,7 @@ hns3_parse_geneve(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
+ 	if (geneve_mask->ver_opt_len_o_c_rsvd0 || geneve_mask->protocol)
+ 		return rte_flow_error_set(error, EINVAL,
+ 					  RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
+-					  "Ver/protocal is not supported in GENEVE");
++					  "Ver/protocol is not supported in GENEVE");
+ 	/* VNI must be totally masked or not. */
+ 	if (memcmp(geneve_mask->vni, full_mask, VNI_OR_TNI_LEN) &&
+ 	    memcmp(geneve_mask->vni, zero_mask, VNI_OR_TNI_LEN))
+@@ -1238,6 +1229,7 @@ static bool
+ hns3_action_rss_same(const struct rte_flow_action_rss *comp,
+ 		     const struct rte_flow_action_rss *with)
+ {
++	bool rss_key_is_same;
+ 	bool func_is_same;
+ 
+ 	/*
+@@ -1251,13 +1243,19 @@ hns3_action_rss_same(const struct rte_flow_action_rss *comp,
+ 	if (comp->func == RTE_ETH_HASH_FUNCTION_MAX)
+ 		func_is_same = false;
+ 	else
+-		func_is_same = with->func ? (comp->func == with->func) : true;
++		func_is_same = (with->func != RTE_ETH_HASH_FUNCTION_DEFAULT) ?
++				(comp->func == with->func) : true;
+ 
+-	return (func_is_same &&
++	if (with->key_len == 0 || with->key == NULL)
++		rss_key_is_same = 1;
++	else
++		rss_key_is_same = comp->key_len == with->key_len &&
++		!memcmp(comp->key, with->key, with->key_len);
++
++	return (func_is_same && rss_key_is_same &&
+ 		comp->types == (with->types & HNS3_ETH_RSS_SUPPORT) &&
+-		comp->level == with->level && comp->key_len == with->key_len &&
++		comp->level == with->level &&
+ 		comp->queue_num == with->queue_num &&
+-		!memcmp(comp->key, with->key, with->key_len) &&
+ 		!memcmp(comp->queue, with->queue,
+ 			sizeof(*with->queue) * with->queue_num));
+ }
+@@ -1309,7 +1307,7 @@ hns3_rss_input_tuple_supported(struct hns3_hw *hw,
+ }
+ 
+ /*
+- * This function is used to parse rss action validatation.
++ * This function is used to parse rss action validation.
+  */
+ static int
+ hns3_parse_rss_filter(struct rte_eth_dev *dev,
+@@ -1391,15 +1389,10 @@ hns3_disable_rss(struct hns3_hw *hw)
+ {
+ 	int ret;
+ 
+-	/* Redirected the redirection table to queue 0 */
+-	ret = hns3_rss_reset_indir_table(hw);
++	ret = hns3_set_rss_tuple_by_rss_hf(hw, 0);
+ 	if (ret)
+ 		return ret;
+ 
+-	/* Disable RSS */
+-	hw->rss_info.conf.types = 0;
+-	hw->rss_dis_flag = true;
+-
+ 	return 0;
+ }
+ 
+@@ -1445,7 +1438,6 @@ hns3_parse_rss_algorithm(struct hns3_hw *hw, enum rte_eth_hash_function *func,
+ static int
+ hns3_hw_rss_hash_set(struct hns3_hw *hw, struct rte_flow_action_rss *rss_config)
+ {
+-	struct hns3_rss_tuple_cfg *tuple;
+ 	int ret;
+ 
+ 	hns3_parse_rss_key(hw, rss_config);
+@@ -1461,8 +1453,7 @@ hns3_hw_rss_hash_set(struct hns3_hw *hw, struct rte_flow_action_rss *rss_config)
+ 
+ 	hw->rss_info.conf.func = rss_config->func;
+ 
+-	tuple = &hw->rss_info.rss_tuple_sets;
+-	ret = hns3_set_rss_tuple_by_rss_hf(hw, tuple, rss_config->types);
++	ret = hns3_set_rss_tuple_by_rss_hf(hw, rss_config->types);
+ 	if (ret)
+ 		hns3_err(hw, "Update RSS tuples by rss hf failed %d", ret);
+ 
+@@ -1682,7 +1673,7 @@ hns3_flow_args_check(const struct rte_flow_attr *attr,
+ 
+ /*
+  * Check if the flow rule is supported by hns3.
+- * It only checkes the format. Don't guarantee the rule can be programmed into
++ * It only checks the format. Don't guarantee the rule can be programmed into
+  * the HW. Because there can be no enough room for the rule.
+  */
+ static int
+diff --git a/dpdk/drivers/net/hns3/hns3_mbx.c b/dpdk/drivers/net/hns3/hns3_mbx.c
+index b3563d4694..02028dcd9c 100644
+--- a/dpdk/drivers/net/hns3/hns3_mbx.c
++++ b/dpdk/drivers/net/hns3/hns3_mbx.c
+@@ -78,14 +78,14 @@ hns3_get_mbx_resp(struct hns3_hw *hw, uint16_t code, uint16_t subcode,
+ 	mbx_time_limit = (uint32_t)hns->mbx_time_limit_ms * US_PER_MS;
+ 	while (wait_time < mbx_time_limit) {
+ 		if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED)) {
+-			hns3_err(hw, "Don't wait for mbx respone because of "
++			hns3_err(hw, "Don't wait for mbx response because of "
+ 				 "disable_cmd");
+ 			return -EBUSY;
+ 		}
+ 
+ 		if (is_reset_pending(hns)) {
+ 			hw->mbx_resp.req_msg_data = 0;
+-			hns3_err(hw, "Don't wait for mbx respone because of "
++			hns3_err(hw, "Don't wait for mbx response because of "
+ 				 "reset pending");
+ 			return -EIO;
+ 		}
+diff --git a/dpdk/drivers/net/hns3/hns3_mbx.h b/dpdk/drivers/net/hns3/hns3_mbx.h
+index d637bd2b23..0172a2e288 100644
+--- a/dpdk/drivers/net/hns3/hns3_mbx.h
++++ b/dpdk/drivers/net/hns3/hns3_mbx.h
+@@ -22,7 +22,7 @@ enum HNS3_MBX_OPCODE {
+ 	HNS3_MBX_GET_RETA,              /* (VF -> PF) get RETA */
+ 	HNS3_MBX_GET_RSS_KEY,           /* (VF -> PF) get RSS key */
+ 	HNS3_MBX_GET_MAC_ADDR,          /* (VF -> PF) get MAC addr */
+-	HNS3_MBX_PF_VF_RESP,            /* (PF -> VF) generate respone to VF */
++	HNS3_MBX_PF_VF_RESP,            /* (PF -> VF) generate response to VF */
+ 	HNS3_MBX_GET_BDNUM,             /* (VF -> PF) get BD num */
+ 	HNS3_MBX_GET_BUFSIZE,           /* (VF -> PF) get buffer size */
+ 	HNS3_MBX_GET_STREAMID,          /* (VF -> PF) get stream id */
+diff --git a/dpdk/drivers/net/hns3/hns3_mp.c b/dpdk/drivers/net/hns3/hns3_mp.c
+index 999b407f7d..e74ddea195 100644
+--- a/dpdk/drivers/net/hns3/hns3_mp.c
++++ b/dpdk/drivers/net/hns3/hns3_mp.c
+@@ -74,7 +74,6 @@ mp_secondary_handle(const struct rte_mp_msg *mp_msg, const void *peer)
+ 	struct hns3_mp_param *res = (struct hns3_mp_param *)mp_res.param;
+ 	const struct hns3_mp_param *param =
+ 		(const struct hns3_mp_param *)mp_msg->param;
+-	eth_tx_prep_t prep = NULL;
+ 	struct rte_eth_dev *dev;
+ 	int ret;
+ 
+@@ -98,14 +97,12 @@ mp_secondary_handle(const struct rte_mp_msg *mp_msg, const void *peer)
+ 	case HNS3_MP_REQ_START_TX:
+ 		PMD_INIT_LOG(INFO, "port %u starting Tx datapath",
+ 			     dev->data->port_id);
+-		dev->tx_pkt_burst = hns3_get_tx_function(dev, &prep);
+-		dev->tx_pkt_prepare = prep;
++		hns3_start_tx_datapath(dev);
+ 		break;
+ 	case HNS3_MP_REQ_STOP_TX:
+ 		PMD_INIT_LOG(INFO, "port %u stopping Tx datapath",
+ 			     dev->data->port_id);
+-		dev->tx_pkt_burst = hns3_dummy_rxtx_burst;
+-		dev->tx_pkt_prepare = NULL;
++		hns3_stop_tx_datapath(dev);
+ 		break;
+ 	default:
+ 		rte_errno = EINVAL;
+diff --git a/dpdk/drivers/net/hns3/hns3_ptp.c b/dpdk/drivers/net/hns3/hns3_ptp.c
+index 9a829d7011..0b0061bba5 100644
+--- a/dpdk/drivers/net/hns3/hns3_ptp.c
++++ b/dpdk/drivers/net/hns3/hns3_ptp.c
+@@ -81,7 +81,7 @@ hns3_timesync_configure(struct hns3_adapter *hns, bool en)
+ 	struct hns3_hw *hw = &hns->hw;
+ 	struct hns3_pf *pf = &hns->pf;
+ 	struct hns3_cmd_desc desc;
+-	int val;
++	uint32_t val;
+ 	int ret;
+ 
+ 	hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_PTP_MODE, false);
+@@ -125,6 +125,7 @@ hns3_timesync_enable(struct rte_eth_dev *dev)
+ 
+ 	if (pf->ptp_enable)
+ 		return 0;
++	hns3_warn(hw, "note: please ensure Rx/Tx burst mode is simple or common when enabling PTP!");
+ 
+ 	rte_spinlock_lock(&hw->lock);
+ 	ret = hns3_timesync_configure(hns, true);
+diff --git a/dpdk/drivers/net/hns3/hns3_rss.c b/dpdk/drivers/net/hns3/hns3_rss.c
+index 3a4b699ae2..980fbe74e8 100644
+--- a/dpdk/drivers/net/hns3/hns3_rss.c
++++ b/dpdk/drivers/net/hns3/hns3_rss.c
+@@ -9,10 +9,8 @@
+ #include "hns3_ethdev.h"
+ #include "hns3_logs.h"
+ 
+-/*
+- * The hash key used for rss initialization.
+- */
+-static const uint8_t hns3_hash_key[] = {
++/* Default hash keys */
++const uint8_t hns3_hash_key[] = {
+ 	0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
+ 	0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
+ 	0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
+@@ -152,10 +150,6 @@ static const struct {
+ 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_IP_D) |
+ 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_TCP_S) |
+ 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_TCP_D) },
+-	{ RTE_ETH_RSS_NONFRAG_IPV4_TCP, BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_IP_S) |
+-	  BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_IP_D) |
+-	  BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_TCP_S) |
+-	  BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_TCP_D) },
+ 	{ RTE_ETH_RSS_NONFRAG_IPV4_UDP, BIT_ULL(HNS3_RSS_FIELD_IPV4_UDP_EN_IP_S) |
+ 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_UDP_EN_IP_D) |
+ 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_UDP_EN_UDP_S) |
+@@ -241,31 +235,6 @@ hns3_rss_set_algo_key(struct hns3_hw *hw, const uint8_t *key)
+ 	return 0;
+ }
+ 
+-/*
+- * Used to configure the tuple selection for RSS hash input.
+- */
+-static int
+-hns3_rss_set_input_tuple(struct hns3_hw *hw)
+-{
+-	struct hns3_rss_conf *rss_config = &hw->rss_info;
+-	struct hns3_rss_input_tuple_cmd *req;
+-	struct hns3_cmd_desc desc_tuple;
+-	int ret;
+-
+-	hns3_cmd_setup_basic_desc(&desc_tuple, HNS3_OPC_RSS_INPUT_TUPLE, false);
+-
+-	req = (struct hns3_rss_input_tuple_cmd *)desc_tuple.data;
+-
+-	req->tuple_field =
+-		rte_cpu_to_le_64(rss_config->rss_tuple_sets.rss_tuple_fields);
+-
+-	ret = hns3_cmd_send(hw, &desc_tuple, 1);
+-	if (ret)
+-		hns3_err(hw, "Configure RSS input tuple mode failed %d", ret);
+-
+-	return ret;
+-}
+-
+ /*
+  * rss_indirection_table command function, opcode:0x0D07.
+  * Used to configure the indirection table of rss.
+@@ -339,8 +308,7 @@ hns3_rss_reset_indir_table(struct hns3_hw *hw)
+ }
+ 
+ int
+-hns3_set_rss_tuple_by_rss_hf(struct hns3_hw *hw,
+-			     struct hns3_rss_tuple_cfg *tuple, uint64_t rss_hf)
++hns3_set_rss_tuple_by_rss_hf(struct hns3_hw *hw, uint64_t rss_hf)
+ {
+ 	struct hns3_rss_input_tuple_cmd *req;
+ 	struct hns3_cmd_desc desc;
+@@ -385,7 +353,8 @@ hns3_set_rss_tuple_by_rss_hf(struct hns3_hw *hw,
+ 		return ret;
+ 	}
+ 
+-	tuple->rss_tuple_fields = rte_le_to_cpu_64(req->tuple_field);
++	/* Update supported flow types when set tuple success */
++	hw->rss_info.conf.types = rss_hf;
+ 
+ 	return 0;
+ }
+@@ -403,55 +372,36 @@ int
+ hns3_dev_rss_hash_update(struct rte_eth_dev *dev,
+ 			 struct rte_eth_rss_conf *rss_conf)
+ {
+-	struct hns3_adapter *hns = dev->data->dev_private;
+-	struct hns3_hw *hw = &hns->hw;
+-	struct hns3_rss_tuple_cfg *tuple = &hw->rss_info.rss_tuple_sets;
+-	struct hns3_rss_conf *rss_cfg = &hw->rss_info;
++	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
++	uint64_t rss_hf_bk = hw->rss_info.conf.types;
+ 	uint8_t key_len = rss_conf->rss_key_len;
+ 	uint64_t rss_hf = rss_conf->rss_hf;
+ 	uint8_t *key = rss_conf->rss_key;
+ 	int ret;
+ 
+-	if (hw->rss_dis_flag)
++	if (key && key_len != HNS3_RSS_KEY_SIZE) {
++		hns3_err(hw, "the hash key len(%u) is invalid, must be %u",
++			 key_len, HNS3_RSS_KEY_SIZE);
+ 		return -EINVAL;
++	}
+ 
+ 	rte_spinlock_lock(&hw->lock);
+-	ret = hns3_set_rss_tuple_by_rss_hf(hw, tuple, rss_hf);
++	ret = hns3_set_rss_tuple_by_rss_hf(hw, rss_hf);
+ 	if (ret)
+-		goto conf_err;
+-
+-	if (rss_cfg->conf.types && rss_hf == 0) {
+-		/* Disable RSS, reset indirection table by local variable */
+-		ret = hns3_rss_reset_indir_table(hw);
+-		if (ret)
+-			goto conf_err;
+-	} else if (rss_hf && rss_cfg->conf.types == 0) {
+-		/* Enable RSS, restore indirection table by hw's config */
+-		ret = hns3_set_rss_indir_table(hw, rss_cfg->rss_indirection_tbl,
+-					       hw->rss_ind_tbl_size);
+-		if (ret)
+-			goto conf_err;
+-	}
+-
+-	/* Update supported flow types when set tuple success */
+-	rss_cfg->conf.types = rss_hf;
++		goto set_tuple_fail;
+ 
+ 	if (key) {
+-		if (key_len != HNS3_RSS_KEY_SIZE) {
+-			hns3_err(hw, "The hash key len(%u) is invalid",
+-				 key_len);
+-			ret = -EINVAL;
+-			goto conf_err;
+-		}
+ 		ret = hns3_rss_set_algo_key(hw, key);
+ 		if (ret)
+-			goto conf_err;
++			goto set_algo_key_fail;
+ 	}
+ 	rte_spinlock_unlock(&hw->lock);
+ 
+ 	return 0;
+ 
+-conf_err:
++set_algo_key_fail:
++	(void)hns3_set_rss_tuple_by_rss_hf(hw, rss_hf_bk);
++set_tuple_fail:
+ 	rte_spinlock_unlock(&hw->lock);
+ 	return ret;
+ }
+@@ -582,33 +532,59 @@ hns3_dev_rss_reta_query(struct rte_eth_dev *dev,
+ 	return 0;
+ }
+ 
+-/*
+- * Used to configure the tc_size and tc_offset.
+- */
++static void
++hns3_set_rss_tc_mode_entry(struct hns3_hw *hw, uint8_t *tc_valid,
++			   uint16_t *tc_size, uint16_t *tc_offset,
++			   uint8_t tc_num)
++{
++	struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
++	uint16_t rss_size = hw->alloc_rss_size;
++	uint16_t roundup_size;
++	uint16_t i;
++
++	roundup_size = roundup_pow_of_two(rss_size);
++	roundup_size = ilog2(roundup_size);
++
++	for (i = 0; i < tc_num; i++) {
++		if (hns->is_vf) {
++			/*
++			 * For packets with VLAN priorities destined for the VF,
++			 * hardware still assign Rx queue based on the Up-to-TC
++			 * mapping PF configured. But VF has only one TC. If
++			 * other TC don't enable, it causes that the priority
++			 * packets that aren't destined for TC0 aren't received
++			 * by RSS hash but is destined for queue 0. So driver
++			 * has to enable the unused TC by using TC0 queue
++			 * mapping configuration.
++			 */
++			tc_valid[i] = (hw->hw_tc_map & BIT(i)) ?
++					!!(hw->hw_tc_map & BIT(i)) : 1;
++			tc_size[i] = roundup_size;
++			tc_offset[i] = (hw->hw_tc_map & BIT(i)) ?
++					rss_size * i : 0;
++		} else {
++			tc_valid[i] = !!(hw->hw_tc_map & BIT(i));
++			tc_size[i] = tc_valid[i] ? roundup_size : 0;
++			tc_offset[i] = tc_valid[i] ? rss_size * i : 0;
++		}
++	}
++}
++
+ static int
+ hns3_set_rss_tc_mode(struct hns3_hw *hw)
+ {
+-	uint16_t rss_size = hw->alloc_rss_size;
+ 	struct hns3_rss_tc_mode_cmd *req;
+ 	uint16_t tc_offset[HNS3_MAX_TC_NUM];
+ 	uint8_t tc_valid[HNS3_MAX_TC_NUM];
+ 	uint16_t tc_size[HNS3_MAX_TC_NUM];
+ 	struct hns3_cmd_desc desc;
+-	uint16_t roundup_size;
+ 	uint16_t i;
+ 	int ret;
+ 
+-	req = (struct hns3_rss_tc_mode_cmd *)desc.data;
+-
+-	roundup_size = roundup_pow_of_two(rss_size);
+-	roundup_size = ilog2(roundup_size);
+-
+-	for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
+-		tc_valid[i] = !!(hw->hw_tc_map & BIT(i));
+-		tc_size[i] = roundup_size;
+-		tc_offset[i] = rss_size * i;
+-	}
++	hns3_set_rss_tc_mode_entry(hw, tc_valid, tc_size,
++				   tc_offset, HNS3_MAX_TC_NUM);
+ 
++	req = (struct hns3_rss_tc_mode_cmd *)desc.data;
+ 	hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RSS_TC_MODE, false);
+ 	for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
+ 		uint16_t mode = 0;
+@@ -675,7 +651,8 @@ hns3_config_rss(struct hns3_adapter *hns)
+ 	struct hns3_hw *hw = &hns->hw;
+ 	struct hns3_rss_conf *rss_cfg = &hw->rss_info;
+ 	uint8_t *hash_key = rss_cfg->key;
+-	int ret, ret1;
++	uint64_t rss_hf;
++	int ret;
+ 
+ 	enum rte_eth_rx_mq_mode mq_mode = hw->data->dev_conf.rxmode.mq_mode;
+ 
+@@ -691,51 +668,30 @@ hns3_config_rss(struct hns3_adapter *hns)
+ 		break;
+ 	}
+ 
+-	/* When RSS is off, redirect the packet queue 0 */
+-	if (((uint32_t)mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) == 0)
+-		hns3_rss_uninit(hns);
+-
+ 	/* Configure RSS hash algorithm and hash key offset */
+ 	ret = hns3_rss_set_algo_key(hw, hash_key);
+ 	if (ret)
+ 		return ret;
+ 
+-	/* Configure the tuple selection for RSS hash input */
+-	ret = hns3_rss_set_input_tuple(hw);
++	ret = hns3_set_rss_indir_table(hw, rss_cfg->rss_indirection_tbl,
++				       hw->rss_ind_tbl_size);
+ 	if (ret)
+ 		return ret;
+ 
+-	/*
+-	 * When RSS is off, it doesn't need to configure rss redirection table
+-	 * to hardware.
+-	 */
+-	if (((uint32_t)mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)) {
+-		ret = hns3_set_rss_indir_table(hw, rss_cfg->rss_indirection_tbl,
+-					       hw->rss_ind_tbl_size);
+-		if (ret)
+-			goto rss_tuple_uninit;
+-	}
+-
+ 	ret = hns3_set_rss_tc_mode(hw);
+ 	if (ret)
+-		goto rss_indir_table_uninit;
+-
+-	return ret;
+-
+-rss_indir_table_uninit:
+-	if (((uint32_t)mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)) {
+-		ret1 = hns3_rss_reset_indir_table(hw);
+-		if (ret1 != 0)
+-			return ret;
+-	}
+-
+-rss_tuple_uninit:
+-	hns3_rss_tuple_uninit(hw);
++		return ret;
+ 
+-	/* Disable RSS */
+-	hw->rss_info.conf.types = 0;
++	/*
++	 * When muli-queue RSS mode flag is not set or unsupported tuples are
++	 * set, disable all tuples.
++	 */
++	rss_hf = hw->rss_info.conf.types;
++	if (!((uint32_t)mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) ||
++	    !(rss_hf & HNS3_ETH_RSS_SUPPORT))
++		rss_hf = 0;
+ 
+-	return ret;
++	return hns3_set_rss_tuple_by_rss_hf(hw, rss_hf);
+ }
+ 
+ /*
+diff --git a/dpdk/drivers/net/hns3/hns3_rss.h b/dpdk/drivers/net/hns3/hns3_rss.h
+index 6f153a1b7b..56627cbd4c 100644
+--- a/dpdk/drivers/net/hns3/hns3_rss.h
++++ b/dpdk/drivers/net/hns3/hns3_rss.h
+@@ -41,9 +41,8 @@ struct hns3_rss_tuple_cfg {
+ struct hns3_rss_conf {
+ 	/* RSS parameters :algorithm, flow_types,  key, queue */
+ 	struct rte_flow_action_rss conf;
+-	uint8_t hash_algo; /* hash function type definited by hardware */
++	uint8_t hash_algo; /* hash function type defined by hardware */
+ 	uint8_t key[HNS3_RSS_KEY_SIZE];  /* Hash key */
+-	struct hns3_rss_tuple_cfg rss_tuple_sets;
+ 	uint16_t rss_indirection_tbl[HNS3_RSS_IND_TBL_SIZE_MAX];
+ 	uint16_t queue[HNS3_RSS_QUEUES_BUFFER_NUM]; /* Queues indices to use */
+ 	bool valid; /* check if RSS rule is valid */
+@@ -89,6 +88,8 @@ static inline uint32_t roundup_pow_of_two(uint32_t x)
+ 	return 1UL << fls(x - 1);
+ }
+ 
++extern const uint8_t hns3_hash_key[];
++
+ struct hns3_adapter;
+ 
+ int hns3_dev_rss_hash_update(struct rte_eth_dev *dev,
+@@ -107,9 +108,7 @@ int hns3_set_rss_indir_table(struct hns3_hw *hw, uint16_t *indir,
+ int hns3_rss_reset_indir_table(struct hns3_hw *hw);
+ int hns3_config_rss(struct hns3_adapter *hns);
+ void hns3_rss_uninit(struct hns3_adapter *hns);
+-int hns3_set_rss_tuple_by_rss_hf(struct hns3_hw *hw,
+-				 struct hns3_rss_tuple_cfg *tuple,
+-				 uint64_t rss_hf);
++int hns3_set_rss_tuple_by_rss_hf(struct hns3_hw *hw, uint64_t rss_hf);
+ int hns3_rss_set_algo_key(struct hns3_hw *hw, const uint8_t *key);
+ int hns3_restore_rss_filter(struct rte_eth_dev *dev);
+ 
+diff --git a/dpdk/drivers/net/hns3/hns3_rxtx.c b/dpdk/drivers/net/hns3/hns3_rxtx.c
+index f365daadf8..403f811a51 100644
+--- a/dpdk/drivers/net/hns3/hns3_rxtx.c
++++ b/dpdk/drivers/net/hns3/hns3_rxtx.c
+@@ -776,7 +776,7 @@ hns3vf_reset_all_tqps(struct hns3_hw *hw)
+ 	int ret;
+ 	int i;
+ 
+-	memset(msg_data, 0, sizeof(uint16_t));
++	memset(msg_data, 0, sizeof(msg_data));
+ 	ret = hns3_send_mbx_msg(hw, HNS3_MBX_QUEUE_RESET, 0, msg_data,
+ 				sizeof(msg_data), true, &reset_status,
+ 				sizeof(reset_status));
+@@ -1763,7 +1763,8 @@ hns3_rxq_conf_runtime_check(struct hns3_hw *hw, uint16_t buf_size,
+ 		return -EINVAL;
+ 	}
+ 
+-	if (pkt_burst == hns3_recv_pkts_vec) {
++	if (pkt_burst == hns3_recv_pkts_vec ||
++	    pkt_burst == hns3_recv_pkts_vec_sve) {
+ 		min_vec_bds = HNS3_DEFAULT_RXQ_REARM_THRESH +
+ 			      HNS3_DEFAULT_RX_BURST;
+ 		if (nb_desc < min_vec_bds ||
+@@ -1903,7 +1904,7 @@ hns3_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc,
+ 	 * For hns3 VF device, whether it needs to process PVID depends
+ 	 * on the configuration of PF kernel mode netdevice driver. And the
+ 	 * related PF configuration is delivered through the mailbox and finally
+-	 * reflectd in port_base_vlan_cfg.
++	 * reflected in port_base_vlan_cfg.
+ 	 */
+ 	if (hns->is_vf || hw->vlan_mode == HNS3_SW_SHIFT_AND_DISCARD_MODE)
+ 		rxq->pvid_sw_discard_en = hw->port_base_vlan_cfg.state ==
+@@ -2388,14 +2389,14 @@ hns3_rx_alloc_buffer(struct hns3_rx_queue *rxq)
+ 		return rte_mbuf_raw_alloc(rxq->mb_pool);
+ }
+ 
+-static inline void
++static void
+ hns3_rx_ptp_timestamp_handle(struct hns3_rx_queue *rxq, struct rte_mbuf *mbuf,
+-		  volatile struct hns3_desc *rxd)
++			     uint64_t timestamp)
+ {
+ 	struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(rxq->hns);
+-	uint64_t timestamp = rte_le_to_cpu_64(rxd->timestamp);
+ 
+-	mbuf->ol_flags |= RTE_MBUF_F_RX_IEEE1588_PTP | RTE_MBUF_F_RX_IEEE1588_TMST;
++	mbuf->ol_flags |= RTE_MBUF_F_RX_IEEE1588_PTP |
++			  RTE_MBUF_F_RX_IEEE1588_TMST;
+ 	if (hns3_timestamp_rx_dynflag > 0) {
+ 		*RTE_MBUF_DYNFIELD(mbuf, hns3_timestamp_dynfield_offset,
+ 			rte_mbuf_timestamp_t *) = timestamp;
+@@ -2469,7 +2470,8 @@ hns3_recv_pkts_simple(void *rx_queue,
+ 		rxe->mbuf = nmb;
+ 
+ 		if (unlikely(bd_base_info & BIT(HNS3_RXD_TS_VLD_B)))
+-			hns3_rx_ptp_timestamp_handle(rxq, rxm, rxdp);
++			hns3_rx_ptp_timestamp_handle(rxq, rxm,
++				rte_le_to_cpu_64(rxdp->timestamp));
+ 
+ 		dma_addr = rte_mbuf_data_iova_default(nmb);
+ 		rxdp->addr = rte_cpu_to_le_64(dma_addr);
+@@ -2540,6 +2542,7 @@ hns3_recv_scattered_pkts(void *rx_queue,
+ 	struct rte_mbuf *rxm;
+ 	struct rte_eth_dev *dev;
+ 	uint32_t bd_base_info;
++	uint64_t timestamp;
+ 	uint32_t l234_info;
+ 	uint32_t gro_size;
+ 	uint32_t ol_info;
+@@ -2649,6 +2652,9 @@ hns3_recv_scattered_pkts(void *rx_queue,
+ 		rxm = rxe->mbuf;
+ 		rxe->mbuf = nmb;
+ 
++		if (unlikely(bd_base_info & BIT(HNS3_RXD_TS_VLD_B)))
++			timestamp = rte_le_to_cpu_64(rxdp->timestamp);
++
+ 		dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
+ 		rxdp->rx.bd_base_info = 0;
+ 		rxdp->addr = dma_addr;
+@@ -2671,7 +2677,7 @@ hns3_recv_scattered_pkts(void *rx_queue,
+ 		}
+ 
+ 		if (unlikely(bd_base_info & BIT(HNS3_RXD_TS_VLD_B)))
+-			hns3_rx_ptp_timestamp_handle(rxq, first_seg, rxdp);
++			hns3_rx_ptp_timestamp_handle(rxq, first_seg, timestamp);
+ 
+ 		/*
+ 		 * The last buffer of the received packet. packet len from
+@@ -3043,7 +3049,7 @@ hns3_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc,
+ 	 * For hns3 VF device, whether it needs to process PVID depends
+ 	 * on the configuration of PF kernel mode netdev driver. And the
+ 	 * related PF configuration is delivered through the mailbox and finally
+-	 * reflectd in port_base_vlan_cfg.
++	 * reflected in port_base_vlan_cfg.
+ 	 */
+ 	if (hns->is_vf || hw->vlan_mode == HNS3_SW_SHIFT_AND_DISCARD_MODE)
+ 		txq->pvid_sw_shift_en = hw->port_base_vlan_cfg.state ==
+@@ -3208,7 +3214,7 @@ hns3_fill_first_desc(struct hns3_tx_queue *txq, struct hns3_desc *desc,
+ 	 * in Tx direction based on hns3 network engine. So when the number of
+ 	 * VLANs in the packets represented by rxm plus the number of VLAN
+ 	 * offload by hardware such as PVID etc, exceeds two, the packets will
+-	 * be discarded or the original VLAN of the packets will be overwitted
++	 * be discarded or the original VLAN of the packets will be overwritten
+ 	 * by hardware. When the PF PVID is enabled by calling the API function
+ 	 * named rte_eth_dev_set_vlan_pvid or the VF PVID is enabled by the hns3
+ 	 * PF kernel ether driver, the outer VLAN tag will always be the PVID.
+@@ -3393,7 +3399,7 @@ hns3_parse_inner_params(struct rte_mbuf *m, uint32_t *ol_type_vlan_len_msec,
+ 		/*
+ 		 * The inner l2 length of mbuf is the sum of outer l4 length,
+ 		 * tunneling header length and inner l2 length for a tunnel
+-		 * packect. But in hns3 tx descriptor, the tunneling header
++		 * packet. But in hns3 tx descriptor, the tunneling header
+ 		 * length is contained in the field of outer L4 length.
+ 		 * Therefore, driver need to calculate the outer L4 length and
+ 		 * inner L2 length.
+@@ -3409,7 +3415,7 @@ hns3_parse_inner_params(struct rte_mbuf *m, uint32_t *ol_type_vlan_len_msec,
+ 		tmp_outer |= hns3_gen_field_val(HNS3_TXD_TUNTYPE_M,
+ 					HNS3_TXD_TUNTYPE_S, HNS3_TUN_NVGRE);
+ 		/*
+-		 * For NVGRE tunnel packect, the outer L4 is empty. So only
++		 * For NVGRE tunnel packet, the outer L4 is empty. So only
+ 		 * fill the NVGRE header length to the outer L4 field.
+ 		 */
+ 		tmp_outer |= hns3_gen_field_val(HNS3_TXD_L4LEN_M,
+@@ -3452,7 +3458,7 @@ hns3_parse_tunneling_params(struct hns3_tx_queue *txq, struct rte_mbuf *m,
+ 	 * mbuf, but for hns3 descriptor, it is contained in the outer L4. So,
+ 	 * there is a need that switching between them. To avoid multiple
+ 	 * calculations, the length of the L2 header include the outer and
+-	 * inner, will be filled during the parsing of tunnel packects.
++	 * inner, will be filled during the parsing of tunnel packets.
+ 	 */
+ 	if (!(ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK)) {
+ 		/*
+@@ -3632,7 +3638,7 @@ hns3_outer_ipv4_cksum_prepared(struct rte_mbuf *m, uint64_t ol_flags,
+ 	if (ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM) {
+ 		struct rte_udp_hdr *udp_hdr;
+ 		/*
+-		 * If OUTER_UDP_CKSUM is support, HW can caclulate the pseudo
++		 * If OUTER_UDP_CKSUM is support, HW can calculate the pseudo
+ 		 * header for TSO packets
+ 		 */
+ 		if (ol_flags & RTE_MBUF_F_TX_TCP_SEG)
+@@ -3657,7 +3663,7 @@ hns3_outer_ipv6_cksum_prepared(struct rte_mbuf *m, uint64_t ol_flags,
+ 	if (ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM) {
+ 		struct rte_udp_hdr *udp_hdr;
+ 		/*
+-		 * If OUTER_UDP_CKSUM is support, HW can caclulate the pseudo
++		 * If OUTER_UDP_CKSUM is support, HW can calculate the pseudo
+ 		 * header for TSO packets
+ 		 */
+ 		if (ol_flags & RTE_MBUF_F_TX_TCP_SEG)
+@@ -4044,7 +4050,7 @@ static inline void
+ hns3_tx_setup_4bd(struct hns3_desc *txdp, struct rte_mbuf **pkts)
+ {
+ #define PER_LOOP_NUM	4
+-	const uint16_t bd_flag = BIT(HNS3_TXD_VLD_B) | BIT(HNS3_TXD_FE_B);
++	uint16_t bd_flag = BIT(HNS3_TXD_VLD_B) | BIT(HNS3_TXD_FE_B);
+ 	uint64_t dma_addr;
+ 	uint32_t i;
+ 
+@@ -4055,6 +4061,8 @@ hns3_tx_setup_4bd(struct hns3_desc *txdp, struct rte_mbuf **pkts)
+ 		txdp->tx.paylen_fd_dop_ol4cs = 0;
+ 		txdp->tx.type_cs_vlan_tso_len = 0;
+ 		txdp->tx.ol_type_vlan_len_msec = 0;
++		if (unlikely((*pkts)->ol_flags & RTE_MBUF_F_TX_IEEE1588_TMST))
++			bd_flag |= BIT(HNS3_TXD_TSYN_B);
+ 		txdp->tx.tp_fe_sc_vld_ra_ri = rte_cpu_to_le_16(bd_flag);
+ 	}
+ }
+@@ -4062,7 +4070,7 @@ hns3_tx_setup_4bd(struct hns3_desc *txdp, struct rte_mbuf **pkts)
+ static inline void
+ hns3_tx_setup_1bd(struct hns3_desc *txdp, struct rte_mbuf **pkts)
+ {
+-	const uint16_t bd_flag = BIT(HNS3_TXD_VLD_B) | BIT(HNS3_TXD_FE_B);
++	uint16_t bd_flag = BIT(HNS3_TXD_VLD_B) | BIT(HNS3_TXD_FE_B);
+ 	uint64_t dma_addr;
+ 
+ 	dma_addr = rte_mbuf_data_iova(*pkts);
+@@ -4071,6 +4079,8 @@ hns3_tx_setup_1bd(struct hns3_desc *txdp, struct rte_mbuf **pkts)
+ 	txdp->tx.paylen_fd_dop_ol4cs = 0;
+ 	txdp->tx.type_cs_vlan_tso_len = 0;
+ 	txdp->tx.ol_type_vlan_len_msec = 0;
++	if (unlikely((*pkts)->ol_flags & RTE_MBUF_F_TX_IEEE1588_TMST))
++		bd_flag |= BIT(HNS3_TXD_TSYN_B);
+ 	txdp->tx.tp_fe_sc_vld_ra_ri = rte_cpu_to_le_16(bd_flag);
+ }
+ 
+@@ -4312,10 +4322,6 @@ hns3_tx_check_simple_support(struct rte_eth_dev *dev)
+ {
+ 	uint64_t offloads = dev->data->dev_conf.txmode.offloads;
+ 
+-	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+-	if (hns3_dev_get_support(hw, PTP))
+-		return false;
+-
+ 	return (offloads == (offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE));
+ }
+ 
+@@ -4408,7 +4414,23 @@ hns3_trace_rxtx_function(struct rte_eth_dev *dev)
+ 		 rx_mode.info, tx_mode.info);
+ }
+ 
+-void hns3_set_rxtx_function(struct rte_eth_dev *eth_dev)
++static void
++hns3_eth_dev_fp_ops_config(const struct rte_eth_dev *dev)
++{
++	struct rte_eth_fp_ops *fpo = rte_eth_fp_ops;
++	uint16_t port_id = dev->data->port_id;
++
++	fpo[port_id].rx_pkt_burst = dev->rx_pkt_burst;
++	fpo[port_id].tx_pkt_burst = dev->tx_pkt_burst;
++	fpo[port_id].tx_pkt_prepare = dev->tx_pkt_prepare;
++	fpo[port_id].rx_descriptor_status = dev->rx_descriptor_status;
++	fpo[port_id].tx_descriptor_status = dev->tx_descriptor_status;
++	fpo[port_id].rxq.data = dev->data->rx_queues;
++	fpo[port_id].txq.data = dev->data->tx_queues;
++}
++
++void
++hns3_set_rxtx_function(struct rte_eth_dev *eth_dev)
+ {
+ 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+ 	struct hns3_adapter *hns = eth_dev->data->dev_private;
+@@ -4429,6 +4451,8 @@ void hns3_set_rxtx_function(struct rte_eth_dev *eth_dev)
+ 		eth_dev->tx_pkt_burst = hns3_dummy_rxtx_burst;
+ 		eth_dev->tx_pkt_prepare = NULL;
+ 	}
++
++	hns3_eth_dev_fp_ops_config(eth_dev);
+ }
+ 
+ void
+@@ -4591,7 +4615,7 @@ hns3_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+ static int
+ hns3_tx_done_cleanup_full(struct hns3_tx_queue *txq, uint32_t free_cnt)
+ {
+-	uint16_t round_free_cnt;
++	uint16_t round_cnt;
+ 	uint32_t idx;
+ 
+ 	if (free_cnt == 0 || free_cnt > txq->nb_tx_desc)
+@@ -4600,13 +4624,13 @@ hns3_tx_done_cleanup_full(struct hns3_tx_queue *txq, uint32_t free_cnt)
+ 	if (txq->tx_rs_thresh == 0)
+ 		return 0;
+ 
+-	round_free_cnt = roundup(free_cnt, txq->tx_rs_thresh);
+-	for (idx = 0; idx < round_free_cnt; idx += txq->tx_rs_thresh) {
++	round_cnt = rounddown(free_cnt, txq->tx_rs_thresh);
++	for (idx = 0; idx < round_cnt; idx += txq->tx_rs_thresh) {
+ 		if (hns3_tx_free_useless_buffer(txq) != 0)
+ 			break;
+ 	}
+ 
+-	return RTE_MIN(idx, free_cnt);
++	return idx;
+ }
+ 
+ int
+@@ -4729,6 +4753,11 @@ hns3_stop_tx_datapath(struct rte_eth_dev *dev)
+ {
+ 	dev->tx_pkt_burst = hns3_dummy_rxtx_burst;
+ 	dev->tx_pkt_prepare = NULL;
++	hns3_eth_dev_fp_ops_config(dev);
++
++	if (rte_eal_process_type() == RTE_PROC_SECONDARY)
++		return;
++
+ 	rte_wmb();
+ 	/* Disable tx datapath on secondary process. */
+ 	hns3_mp_req_stop_tx(dev);
+@@ -4743,5 +4772,10 @@ hns3_start_tx_datapath(struct rte_eth_dev *dev)
+ 
+ 	dev->tx_pkt_burst = hns3_get_tx_function(dev, &prep);
+ 	dev->tx_pkt_prepare = prep;
++	hns3_eth_dev_fp_ops_config(dev);
++
++	if (rte_eal_process_type() == RTE_PROC_SECONDARY)
++		return;
++
+ 	hns3_mp_req_start_tx(dev);
+ }
+diff --git a/dpdk/drivers/net/hns3/hns3_rxtx.h b/dpdk/drivers/net/hns3/hns3_rxtx.h
+index 5423568cd0..0e412d07b3 100644
+--- a/dpdk/drivers/net/hns3/hns3_rxtx.h
++++ b/dpdk/drivers/net/hns3/hns3_rxtx.h
+@@ -349,7 +349,7 @@ struct hns3_rx_queue {
+ 	 * The following fields are not accessed in the I/O path, so they are
+ 	 * placed at the end.
+ 	 */
+-	void *io_base;
++	void *io_base __rte_cache_aligned;
+ 	struct hns3_adapter *hns;
+ 	uint64_t rx_ring_phys_addr; /* RX ring DMA address */
+ 	const struct rte_memzone *mz;
+@@ -523,7 +523,7 @@ struct hns3_tx_queue {
+ 	 * The following fields are not accessed in the I/O path, so they are
+ 	 * placed at the end.
+ 	 */
+-	void *io_base;
++	void *io_base __rte_cache_aligned;
+ 	struct hns3_adapter *hns;
+ 	uint64_t tx_ring_phys_addr; /* TX ring DMA address */
+ 	const struct rte_memzone *mz;
+@@ -611,7 +611,7 @@ hns3_handle_bdinfo(struct hns3_rx_queue *rxq, struct rte_mbuf *rxm,
+ 
+ 	/*
+ 	 * If packet len bigger than mtu when recv with no-scattered algorithm,
+-	 * the first n bd will without FE bit, we need process this sisution.
++	 * the first n bd will without FE bit, we need process this situation.
+ 	 * Note: we don't need add statistic counter because latest BD which
+ 	 *       with FE bit will mark HNS3_RXD_L2E_B bit.
+ 	 */
+diff --git a/dpdk/drivers/net/hns3/hns3_rxtx_vec.c b/dpdk/drivers/net/hns3/hns3_rxtx_vec.c
+index 455110361a..73f0ab6bc8 100644
+--- a/dpdk/drivers/net/hns3/hns3_rxtx_vec.c
++++ b/dpdk/drivers/net/hns3/hns3_rxtx_vec.c
+@@ -17,15 +17,17 @@ int
+ hns3_tx_check_vec_support(struct rte_eth_dev *dev)
+ {
+ 	struct rte_eth_txmode *txmode = &dev->data->dev_conf.txmode;
+-
+-	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+-	if (hns3_dev_get_support(hw, PTP))
+-		return -ENOTSUP;
++	struct hns3_adapter *hns = dev->data->dev_private;
++	struct hns3_pf *pf = &hns->pf;
+ 
+ 	/* Only support RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE */
+ 	if (txmode->offloads != RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
+ 		return -ENOTSUP;
+ 
++	/* Vec is not supported when PTP enabled */
++	if (pf->ptp_enable)
++		return -ENOTSUP;
++
+ 	return 0;
+ }
+ 
+@@ -232,10 +234,8 @@ hns3_rx_check_vec_support(struct rte_eth_dev *dev)
+ 	struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
+ 	uint64_t offloads_mask = RTE_ETH_RX_OFFLOAD_TCP_LRO |
+ 				 RTE_ETH_RX_OFFLOAD_VLAN;
+-
+-	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+-	if (hns3_dev_get_support(hw, PTP))
+-		return -ENOTSUP;
++	struct hns3_adapter *hns = dev->data->dev_private;
++	struct hns3_pf *pf = &hns->pf;
+ 
+ 	if (dev->data->scattered_rx)
+ 		return -ENOTSUP;
+@@ -249,5 +249,9 @@ hns3_rx_check_vec_support(struct rte_eth_dev *dev)
+ 	if (hns3_rxq_iterate(dev, hns3_rxq_vec_check, NULL) != 0)
+ 		return -ENOTSUP;
+ 
++	/* Vec is not supported when PTP enabled */
++	if (pf->ptp_enable)
++		return -ENOTSUP;
++
+ 	return 0;
+ }
+diff --git a/dpdk/drivers/net/hns3/hns3_stats.c b/dpdk/drivers/net/hns3/hns3_stats.c
+index 0fe853d626..1b0464f3f7 100644
+--- a/dpdk/drivers/net/hns3/hns3_stats.c
++++ b/dpdk/drivers/net/hns3/hns3_stats.c
+@@ -307,24 +307,21 @@ static const struct hns3_xstats_name_offset hns3_imissed_stats_strings[] = {
+ 
+ static void hns3_tqp_stats_clear(struct hns3_hw *hw);
+ 
+-/*
+- * Query all the MAC statistics data of Network ICL command ,opcode id: 0x0034.
+- * This command is used before send 'query_mac_stat command', the descriptor
+- * number of 'query_mac_stat command' must match with reg_num in this command.
+- * @praram hw
+- *   Pointer to structure hns3_hw.
+- * @return
+- *   0 on success.
+- */
+ static int
+-hns3_update_mac_stats(struct hns3_hw *hw, const uint32_t desc_num)
++hns3_update_mac_stats(struct hns3_hw *hw)
+ {
++#define HNS3_MAC_STATS_REG_NUM_PER_DESC	4
++
+ 	uint64_t *data = (uint64_t *)(&hw->mac_stats);
+ 	struct hns3_cmd_desc *desc;
++	uint32_t stats_iterms;
+ 	uint64_t *desc_data;
+-	uint16_t i, k, n;
++	uint32_t desc_num;
++	uint16_t i;
+ 	int ret;
+ 
++	/* The first desc has a 64-bit header, so need to consider it. */
++	desc_num = hw->mac_stats_reg_num / HNS3_MAC_STATS_REG_NUM_PER_DESC + 1;
+ 	desc = rte_malloc("hns3_mac_desc",
+ 			  desc_num * sizeof(struct hns3_cmd_desc), 0);
+ 	if (desc == NULL) {
+@@ -340,65 +337,71 @@ hns3_update_mac_stats(struct hns3_hw *hw, const uint32_t desc_num)
+ 		return ret;
+ 	}
+ 
+-	for (i = 0; i < desc_num; i++) {
+-		/* For special opcode 0034, only the first desc has the head */
+-		if (i == 0) {
+-			desc_data = (uint64_t *)(&desc[i].data[0]);
+-			n = HNS3_RD_FIRST_STATS_NUM;
+-		} else {
+-			desc_data = (uint64_t *)(&desc[i]);
+-			n = HNS3_RD_OTHER_STATS_NUM;
+-		}
+-
+-		for (k = 0; k < n; k++) {
+-			*data += rte_le_to_cpu_64(*desc_data);
+-			data++;
+-			desc_data++;
+-		}
++	stats_iterms = RTE_MIN(sizeof(hw->mac_stats) / sizeof(uint64_t),
++			       hw->mac_stats_reg_num);
++	desc_data = (uint64_t *)(&desc[0].data[0]);
++	for (i = 0; i < stats_iterms; i++) {
++		/*
++		 * Data memory is continuous and only the first descriptor has a
++		 * header in this command.
++		 */
++		*data += rte_le_to_cpu_64(*desc_data);
++		data++;
++		desc_data++;
+ 	}
+ 	rte_free(desc);
+ 
+ 	return 0;
+ }
+ 
+-/*
+- * Query Mac stat reg num command ,opcode id: 0x0033.
+- * This command is used before send 'query_mac_stat command', the descriptor
+- * number of 'query_mac_stat command' must match with reg_num in this command.
+- * @praram rte_stats
+- *   Pointer to structure rte_eth_stats.
+- * @return
+- *   0 on success.
+- */
+ static int
+-hns3_mac_query_reg_num(struct rte_eth_dev *dev, uint32_t *desc_num)
++hns3_mac_query_reg_num(struct hns3_hw *hw, uint32_t *reg_num)
+ {
+-	struct hns3_adapter *hns = dev->data->dev_private;
+-	struct hns3_hw *hw = &hns->hw;
++#define HNS3_MAC_STATS_RSV_REG_NUM_ON_HIP08_B	3
+ 	struct hns3_cmd_desc desc;
+-	uint32_t *desc_data;
+-	uint32_t reg_num;
+ 	int ret;
+ 
+ 	hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_MAC_REG_NUM, true);
+ 	ret = hns3_cmd_send(hw, &desc, 1);
+-	if (ret)
++	if (ret) {
++		hns3_err(hw, "failed to query MAC statistic reg number, ret = %d",
++			 ret);
+ 		return ret;
++	}
+ 
+-	/*
+-	 * The num of MAC statistics registers that are provided by IMP in this
+-	 * version.
+-	 */
+-	desc_data = (uint32_t *)(&desc.data[0]);
+-	reg_num = rte_le_to_cpu_32(*desc_data);
++	/* The number of MAC statistics registers are provided by firmware. */
++	*reg_num = rte_le_to_cpu_32(desc.data[0]);
++	if (*reg_num == 0) {
++		hns3_err(hw, "MAC statistic reg number is invalid!");
++		return -ENODATA;
++	}
+ 
+ 	/*
+-	 * The descriptor number of 'query_additional_mac_stat command' is
+-	 * '1 + (reg_num-3)/4 + ((reg_num-3)%4 !=0)';
+-	 * This value is 83 in this version
++	 * If driver doesn't request the firmware to report more MAC statistics
++	 * iterms and the total number of MAC statistics registers by using new
++	 * method, firmware will only reports the number of valid statistics
++	 * registers. However, structure hns3_mac_stats in driver contains valid
++	 * and reserved statistics iterms. In this case, the total register
++	 * number must be added to three reserved statistics registers.
+ 	 */
+-	*desc_num = 1 + ((reg_num - 3) >> 2) +
+-		    (uint32_t)(((reg_num - 3) & 0x3) ? 1 : 0);
++	*reg_num += HNS3_MAC_STATS_RSV_REG_NUM_ON_HIP08_B;
++
++	return 0;
++}
++
++int
++hns3_query_mac_stats_reg_num(struct hns3_hw *hw)
++{
++	uint32_t mac_stats_reg_num = 0;
++	int ret;
++
++	ret = hns3_mac_query_reg_num(hw, &mac_stats_reg_num);
++	if (ret)
++		return ret;
++
++	hw->mac_stats_reg_num = mac_stats_reg_num;
++	if (hw->mac_stats_reg_num > sizeof(hw->mac_stats) / sizeof(uint64_t))
++		hns3_warn(hw, "MAC stats reg number from firmware is greater than stats iterms in driver.");
+ 
+ 	return 0;
+ }
+@@ -408,15 +411,8 @@ hns3_query_update_mac_stats(struct rte_eth_dev *dev)
+ {
+ 	struct hns3_adapter *hns = dev->data->dev_private;
+ 	struct hns3_hw *hw = &hns->hw;
+-	uint32_t desc_num;
+-	int ret;
+ 
+-	ret = hns3_mac_query_reg_num(dev, &desc_num);
+-	if (ret == 0)
+-		ret = hns3_update_mac_stats(hw, desc_num);
+-	else
+-		hns3_err(hw, "Query mac reg num fail : %d", ret);
+-	return ret;
++	return hns3_update_mac_stats(hw);
+ }
+ 
+ static int
+@@ -544,7 +540,7 @@ hns3_update_port_tx_ssu_drop_stats(struct hns3_hw *hw)
+ 	return 0;
+ }
+ 
+-int
++static int
+ hns3_update_imissed_stats(struct hns3_hw *hw, bool is_clear)
+ {
+ 	struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
+@@ -588,6 +584,28 @@ hns3_update_oerror_stats(struct hns3_hw *hw, bool is_clear)
+ 	return 0;
+ }
+ 
++static void
++hns3_rcb_rx_ring_stats_get(struct hns3_rx_queue *rxq,
++			   struct hns3_tqp_stats *stats)
++{
++	uint32_t cnt;
++
++	cnt = hns3_read_dev(rxq, HNS3_RING_RX_PKTNUM_RECORD_REG);
++	stats->rcb_rx_ring_pktnum_rcd += cnt;
++	stats->rcb_rx_ring_pktnum[rxq->queue_id] += cnt;
++}
++
++static void
++hns3_rcb_tx_ring_stats_get(struct hns3_tx_queue *txq,
++			   struct hns3_tqp_stats *stats)
++{
++	uint32_t cnt;
++
++	cnt = hns3_read_dev(txq, HNS3_RING_TX_PKTNUM_RECORD_REG);
++	stats->rcb_tx_ring_pktnum_rcd += cnt;
++	stats->rcb_tx_ring_pktnum[txq->queue_id] += cnt;
++}
++
+ /*
+  * Query tqp tx queue statistics ,opcode id: 0x0B03.
+  * Query tqp rx queue statistics ,opcode id: 0x0B13.
+@@ -608,16 +626,15 @@ hns3_stats_get(struct rte_eth_dev *eth_dev, struct rte_eth_stats *rte_stats)
+ 	struct hns3_tqp_stats *stats = &hw->tqp_stats;
+ 	struct hns3_rx_queue *rxq;
+ 	struct hns3_tx_queue *txq;
+-	uint64_t cnt;
+ 	uint16_t i;
+ 	int ret;
+ 
++	rte_spinlock_lock(&hw->stats_lock);
+ 	/* Update imissed stats */
+ 	ret = hns3_update_imissed_stats(hw, false);
+ 	if (ret) {
+-		hns3_err(hw, "update imissed stats failed, ret = %d",
+-			 ret);
+-		return ret;
++		hns3_err(hw, "update imissed stats failed, ret = %d", ret);
++		goto out;
+ 	}
+ 	rte_stats->imissed = imissed_stats->rpu_rx_drop_cnt +
+ 				imissed_stats->ssu_rx_drop_cnt;
+@@ -628,15 +645,9 @@ hns3_stats_get(struct rte_eth_dev *eth_dev, struct rte_eth_stats *rte_stats)
+ 		if (rxq == NULL)
+ 			continue;
+ 
+-		cnt = hns3_read_dev(rxq, HNS3_RING_RX_PKTNUM_RECORD_REG);
+-		/*
+-		 * Read hardware and software in adjacent positions to minumize
+-		 * the timing variance.
+-		 */
++		hns3_rcb_rx_ring_stats_get(rxq, stats);
+ 		rte_stats->ierrors += rxq->err_stats.l2_errors +
+ 				      rxq->err_stats.pkt_len_errors;
+-		stats->rcb_rx_ring_pktnum_rcd += cnt;
+-		stats->rcb_rx_ring_pktnum[i] += cnt;
+ 		rte_stats->ibytes += rxq->basic_stats.bytes;
+ 	}
+ 
+@@ -646,17 +657,14 @@ hns3_stats_get(struct rte_eth_dev *eth_dev, struct rte_eth_stats *rte_stats)
+ 		if (txq == NULL)
+ 			continue;
+ 
+-		cnt = hns3_read_dev(txq, HNS3_RING_TX_PKTNUM_RECORD_REG);
+-		stats->rcb_tx_ring_pktnum_rcd += cnt;
+-		stats->rcb_tx_ring_pktnum[i] += cnt;
++		hns3_rcb_tx_ring_stats_get(txq, stats);
+ 		rte_stats->obytes += txq->basic_stats.bytes;
+ 	}
+ 
+ 	ret = hns3_update_oerror_stats(hw, false);
+ 	if (ret) {
+-		hns3_err(hw, "update oerror stats failed, ret = %d",
+-			 ret);
+-		return ret;
++		hns3_err(hw, "update oerror stats failed, ret = %d", ret);
++		goto out;
+ 	}
+ 	rte_stats->oerrors = hw->oerror_stats;
+ 
+@@ -672,7 +680,10 @@ hns3_stats_get(struct rte_eth_dev *eth_dev, struct rte_eth_stats *rte_stats)
+ 		rte_stats->oerrors;
+ 	rte_stats->rx_nombuf = eth_dev->data->rx_mbuf_alloc_failed;
+ 
+-	return 0;
++out:
++	rte_spinlock_unlock(&hw->stats_lock);
++
++	return ret;
+ }
+ 
+ int
+@@ -685,6 +696,7 @@ hns3_stats_reset(struct rte_eth_dev *eth_dev)
+ 	uint16_t i;
+ 	int ret;
+ 
++	rte_spinlock_lock(&hw->stats_lock);
+ 	/*
+ 	 * Note: Reading hardware statistics of imissed registers will
+ 	 * clear them.
+@@ -692,7 +704,7 @@ hns3_stats_reset(struct rte_eth_dev *eth_dev)
+ 	ret = hns3_update_imissed_stats(hw, true);
+ 	if (ret) {
+ 		hns3_err(hw, "clear imissed stats failed, ret = %d", ret);
+-		return ret;
++		goto out;
+ 	}
+ 
+ 	/*
+@@ -701,9 +713,8 @@ hns3_stats_reset(struct rte_eth_dev *eth_dev)
+ 	 */
+ 	ret = hns3_update_oerror_stats(hw, true);
+ 	if (ret) {
+-		hns3_err(hw, "clear oerror stats failed, ret = %d",
+-			 ret);
+-		return ret;
++		hns3_err(hw, "clear oerror stats failed, ret = %d", ret);
++		goto out;
+ 	}
+ 
+ 	for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
+@@ -745,7 +756,10 @@ hns3_stats_reset(struct rte_eth_dev *eth_dev)
+ 
+ 	hns3_tqp_stats_clear(hw);
+ 
+-	return 0;
++out:
++	rte_spinlock_unlock(&hw->stats_lock);
++
++	return ret;
+ }
+ 
+ static int
+@@ -912,7 +926,6 @@ hns3_rxq_basic_stats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
+ 	struct hns3_rx_basic_stats *rxq_stats;
+ 	struct hns3_rx_queue *rxq;
+ 	uint16_t i, j;
+-	uint32_t cnt;
+ 	char *val;
+ 
+ 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
+@@ -920,16 +933,10 @@ hns3_rxq_basic_stats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
+ 		if (rxq == NULL)
+ 			continue;
+ 
+-		cnt = hns3_read_dev(rxq, HNS3_RING_RX_PKTNUM_RECORD_REG);
+-		/*
+-		 * Read hardware and software in adjacent positions to minimize
+-		 * the time difference.
+-		 */
++		hns3_rcb_rx_ring_stats_get(rxq, stats);
+ 		rxq_stats = &rxq->basic_stats;
+ 		rxq_stats->errors = rxq->err_stats.l2_errors +
+ 					rxq->err_stats.pkt_len_errors;
+-		stats->rcb_rx_ring_pktnum_rcd += cnt;
+-		stats->rcb_rx_ring_pktnum[i] += cnt;
+ 
+ 		/*
+ 		 * If HW statistics are reset by stats_reset, but a lot of
+@@ -959,7 +966,6 @@ hns3_txq_basic_stats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
+ 	struct hns3_tx_basic_stats *txq_stats;
+ 	struct hns3_tx_queue *txq;
+ 	uint16_t i, j;
+-	uint32_t cnt;
+ 	char *val;
+ 
+ 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
+@@ -967,9 +973,7 @@ hns3_txq_basic_stats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
+ 		if (txq == NULL)
+ 			continue;
+ 
+-		cnt = hns3_read_dev(txq, HNS3_RING_TX_PKTNUM_RECORD_REG);
+-		stats->rcb_tx_ring_pktnum_rcd += cnt;
+-		stats->rcb_tx_ring_pktnum[i] += cnt;
++		hns3_rcb_tx_ring_stats_get(txq, stats);
+ 
+ 		txq_stats = &txq->basic_stats;
+ 		txq_stats->packets = stats->rcb_tx_ring_pktnum[i];
+@@ -1024,9 +1028,13 @@ hns3_imissed_stats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
+  * @praram xstats
+  *   A pointer to a table of structure of type *rte_eth_xstat*
+  *   to be filled with device statistics ids and values.
+- *   This parameter can be set to NULL if n is 0.
++ *   This parameter can be set to NULL if and only if n is 0.
+  * @param n
+  *   The size of the xstats array (number of elements).
++ *   If lower than the required number of elements, the function returns the
++ *   required number of elements.
++ *   If equal to zero, the xstats parameter must be NULL, the function returns
++ *   the required number of elements.
+  * @return
+  *   0 on fail, count(The size of the statistics elements) on success.
+  */
+@@ -1045,15 +1053,13 @@ hns3_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
+ 	int count;
+ 	int ret;
+ 
+-	if (xstats == NULL)
+-		return 0;
+-
+ 	count = hns3_xstats_calc_num(dev);
+ 	if ((int)n < count)
+ 		return count;
+ 
+ 	count = 0;
+ 
++	rte_spinlock_lock(&hw->stats_lock);
+ 	hns3_tqp_basic_stats_get(dev, xstats, &count);
+ 
+ 	if (!hns->is_vf) {
+@@ -1061,6 +1067,7 @@ hns3_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
+ 		ret = hns3_query_update_mac_stats(dev);
+ 		if (ret < 0) {
+ 			hns3_err(hw, "Update Mac stats fail : %d", ret);
++			rte_spinlock_unlock(&hw->stats_lock);
+ 			return ret;
+ 		}
+ 
+@@ -1075,8 +1082,8 @@ hns3_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
+ 
+ 	ret = hns3_update_imissed_stats(hw, false);
+ 	if (ret) {
+-		hns3_err(hw, "update imissed stats failed, ret = %d",
+-			 ret);
++		hns3_err(hw, "update imissed stats failed, ret = %d", ret);
++		rte_spinlock_unlock(&hw->stats_lock);
+ 		return ret;
+ 	}
+ 
+@@ -1107,6 +1114,7 @@ hns3_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
+ 
+ 	hns3_tqp_dfx_stats_get(dev, xstats, &count);
+ 	hns3_queue_stats_get(dev, xstats, &count);
++	rte_spinlock_unlock(&hw->stats_lock);
+ 
+ 	return count;
+ }
+@@ -1289,7 +1297,7 @@ hns3_dev_xstats_get_names(struct rte_eth_dev *dev,
+  *   A pointer to an ids array passed by application. This tells which
+  *   statistics values function should retrieve. This parameter
+  *   can be set to NULL if size is 0. In this case function will retrieve
+- *   all avalible statistics.
++ *   all available statistics.
+  * @param values
+  *   A pointer to a table to be filled with device statistics values.
+  * @param size
+@@ -1457,6 +1465,7 @@ int
+ hns3_dev_xstats_reset(struct rte_eth_dev *dev)
+ {
+ 	struct hns3_adapter *hns = dev->data->dev_private;
++	struct hns3_hw *hw = &hns->hw;
+ 	int ret;
+ 
+ 	/* Clear tqp stats */
+@@ -1464,23 +1473,25 @@ hns3_dev_xstats_reset(struct rte_eth_dev *dev)
+ 	if (ret)
+ 		return ret;
+ 
++	rte_spinlock_lock(&hw->stats_lock);
+ 	hns3_tqp_dfx_stats_clear(dev);
+ 
+ 	/* Clear reset stats */
+ 	memset(&hns->hw.reset.stats, 0, sizeof(struct hns3_reset_stats));
+ 
+ 	if (hns->is_vf)
+-		return 0;
++		goto out;
+ 
+ 	/* HW registers are cleared on read */
+ 	ret = hns3_mac_stats_reset(dev);
+-	if (ret)
+-		return ret;
+ 
+-	return 0;
++out:
++	rte_spinlock_unlock(&hw->stats_lock);
++
++	return ret;
+ }
+ 
+-int
++static int
+ hns3_tqp_stats_init(struct hns3_hw *hw)
+ {
+ 	struct hns3_tqp_stats *tqp_stats = &hw->tqp_stats;
+@@ -1504,7 +1515,7 @@ hns3_tqp_stats_init(struct hns3_hw *hw)
+ 	return 0;
+ }
+ 
+-void
++static void
+ hns3_tqp_stats_uninit(struct hns3_hw *hw)
+ {
+ 	struct hns3_tqp_stats *tqp_stats = &hw->tqp_stats;
+@@ -1525,3 +1536,64 @@ hns3_tqp_stats_clear(struct hns3_hw *hw)
+ 	memset(stats->rcb_rx_ring_pktnum, 0, sizeof(uint64_t) * hw->tqps_num);
+ 	memset(stats->rcb_tx_ring_pktnum, 0, sizeof(uint64_t) * hw->tqps_num);
+ }
++
++int
++hns3_stats_init(struct hns3_hw *hw)
++{
++	int ret;
++
++	rte_spinlock_init(&hw->stats_lock);
++	/* Hardware statistics of imissed registers cleared. */
++	ret = hns3_update_imissed_stats(hw, true);
++	if (ret) {
++		hns3_err(hw, "clear imissed stats failed, ret = %d", ret);
++		return ret;
++	}
++
++	return hns3_tqp_stats_init(hw);
++}
++
++void
++hns3_stats_uninit(struct hns3_hw *hw)
++{
++	hns3_tqp_stats_uninit(hw);
++}
++
++static void
++hns3_update_queues_stats(struct hns3_hw *hw)
++{
++	struct rte_eth_dev_data *data = hw->data;
++	struct hns3_rx_queue *rxq;
++	struct hns3_tx_queue *txq;
++	uint16_t i;
++
++	for (i = 0; i < data->nb_rx_queues; i++) {
++		rxq = data->rx_queues[i];
++		if (rxq != NULL)
++			hns3_rcb_rx_ring_stats_get(rxq, &hw->tqp_stats);
++	}
++
++	for (i = 0; i < data->nb_tx_queues; i++) {
++		txq = data->tx_queues[i];
++		if (txq != NULL)
++			hns3_rcb_tx_ring_stats_get(txq, &hw->tqp_stats);
++	}
++}
++
++/*
++ * Some hardware statistics registers are not 64-bit. If hardware statistics are
++ * not obtained for a long time, these statistics may be reversed. This function
++ * is used to update these hardware statistics in periodic task.
++ */
++void
++hns3_update_hw_stats(struct hns3_hw *hw)
++{
++	struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
++
++	rte_spinlock_lock(&hw->stats_lock);
++	if (!hns->is_vf)
++		hns3_update_mac_stats(hw);
++
++	hns3_update_queues_stats(hw);
++	rte_spinlock_unlock(&hw->stats_lock);
++}
+diff --git a/dpdk/drivers/net/hns3/hns3_stats.h b/dpdk/drivers/net/hns3/hns3_stats.h
+index d1230f94cb..b5cd6188b4 100644
+--- a/dpdk/drivers/net/hns3/hns3_stats.h
++++ b/dpdk/drivers/net/hns3/hns3_stats.h
+@@ -5,11 +5,6 @@
+ #ifndef _HNS3_STATS_H_
+ #define _HNS3_STATS_H_
+ 
+-/* stats macro */
+-#define HNS3_MAC_CMD_NUM		21
+-#define HNS3_RD_FIRST_STATS_NUM		2
+-#define HNS3_RD_OTHER_STATS_NUM		4
+-
+ /* TQP stats */
+ struct hns3_tqp_stats {
+ 	uint64_t rcb_tx_ring_pktnum_rcd; /* Total num of transmitted packets */
+@@ -22,6 +17,7 @@ struct hns3_tqp_stats {
+ struct hns3_mac_stats {
+ 	uint64_t mac_tx_mac_pause_num;
+ 	uint64_t mac_rx_mac_pause_num;
++	uint64_t rsv0;
+ 	uint64_t mac_tx_pfc_pri0_pkt_num;
+ 	uint64_t mac_tx_pfc_pri1_pkt_num;
+ 	uint64_t mac_tx_pfc_pri2_pkt_num;
+@@ -58,7 +54,7 @@ struct hns3_mac_stats {
+ 	uint64_t mac_tx_1519_2047_oct_pkt_num;
+ 	uint64_t mac_tx_2048_4095_oct_pkt_num;
+ 	uint64_t mac_tx_4096_8191_oct_pkt_num;
+-	uint64_t rsv0;
++	uint64_t rsv1;
+ 	uint64_t mac_tx_8192_9216_oct_pkt_num;
+ 	uint64_t mac_tx_9217_12287_oct_pkt_num;
+ 	uint64_t mac_tx_12288_16383_oct_pkt_num;
+@@ -85,7 +81,7 @@ struct hns3_mac_stats {
+ 	uint64_t mac_rx_1519_2047_oct_pkt_num;
+ 	uint64_t mac_rx_2048_4095_oct_pkt_num;
+ 	uint64_t mac_rx_4096_8191_oct_pkt_num;
+-	uint64_t rsv1;
++	uint64_t rsv2;
+ 	uint64_t mac_rx_8192_9216_oct_pkt_num;
+ 	uint64_t mac_rx_9217_12287_oct_pkt_num;
+ 	uint64_t mac_rx_12288_16383_oct_pkt_num;
+@@ -165,8 +161,9 @@ int hns3_dev_xstats_get_names_by_id(struct rte_eth_dev *dev,
+ 				    struct rte_eth_xstat_name *xstats_names,
+ 				    uint32_t size);
+ int hns3_stats_reset(struct rte_eth_dev *dev);
+-int hns3_tqp_stats_init(struct hns3_hw *hw);
+-void hns3_tqp_stats_uninit(struct hns3_hw *hw);
+-int hns3_update_imissed_stats(struct hns3_hw *hw, bool is_clear);
++int hns3_stats_init(struct hns3_hw *hw);
++void hns3_stats_uninit(struct hns3_hw *hw);
++int hns3_query_mac_stats_reg_num(struct hns3_hw *hw);
++void hns3_update_hw_stats(struct hns3_hw *hw);
+ 
+ #endif /* _HNS3_STATS_H_ */
+diff --git a/dpdk/drivers/net/i40e/i40e_ethdev.c b/dpdk/drivers/net/i40e/i40e_ethdev.c
+index c0bfff43ee..bad27355fc 100644
+--- a/dpdk/drivers/net/i40e/i40e_ethdev.c
++++ b/dpdk/drivers/net/i40e/i40e_ethdev.c
+@@ -386,6 +386,7 @@ static int i40e_set_default_mac_addr(struct rte_eth_dev *dev,
+ 				      struct rte_ether_addr *mac_addr);
+ 
+ static int i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
++static void i40e_set_mac_max_frame(struct rte_eth_dev *dev, uint16_t size);
+ 
+ static int i40e_ethertype_filter_convert(
+ 	const struct rte_eth_ethertype_filter *input,
+@@ -1709,11 +1710,6 @@ eth_i40e_dev_init(struct rte_eth_dev *dev, void *init_params __rte_unused)
+ 	 */
+ 	i40e_add_tx_flow_control_drop_filter(pf);
+ 
+-	/* Set the max frame size to 0x2600 by default,
+-	 * in case other drivers changed the default value.
+-	 */
+-	i40e_aq_set_mac_config(hw, I40E_FRAME_SIZE_MAX, TRUE, false, 0, NULL);
+-
+ 	/* initialize RSS rule list */
+ 	TAILQ_INIT(&pf->rss_config_list);
+ 
+@@ -2364,6 +2360,7 @@ i40e_dev_start(struct rte_eth_dev *dev)
+ 	uint32_t intr_vector = 0;
+ 	struct i40e_vsi *vsi;
+ 	uint16_t nb_rxq, nb_txq;
++	uint16_t max_frame_size;
+ 
+ 	hw->adapter_stopped = 0;
+ 
+@@ -2483,7 +2480,7 @@ i40e_dev_start(struct rte_eth_dev *dev)
+ 		if (ret != I40E_SUCCESS)
+ 			PMD_DRV_LOG(WARNING, "Fail to set phy mask");
+ 
+-		/* Call get_link_info aq commond to enable/disable LSE */
++		/* Call get_link_info aq command to enable/disable LSE */
+ 		i40e_dev_link_update(dev, 0);
+ 	}
+ 
+@@ -2502,6 +2499,9 @@ i40e_dev_start(struct rte_eth_dev *dev)
+ 			    "please call hierarchy_commit() "
+ 			    "before starting the port");
+ 
++	max_frame_size = dev->data->mtu + I40E_ETH_OVERHEAD;
++	i40e_set_mac_max_frame(dev, max_frame_size);
++
+ 	return I40E_SUCCESS;
+ 
+ tx_err:
+@@ -2848,6 +2848,9 @@ i40e_dev_set_link_down(struct rte_eth_dev *dev)
+ 	return i40e_phy_conf_link(hw, abilities, speed, false);
+ }
+ 
++#define CHECK_INTERVAL             100  /* 100ms */
++#define MAX_REPEAT_TIME            10  /* 1s (10 * 100ms) in total */
++
+ static __rte_always_inline void
+ update_link_reg(struct i40e_hw *hw, struct rte_eth_link *link)
+ {
+@@ -2914,8 +2917,6 @@ static __rte_always_inline void
+ update_link_aq(struct i40e_hw *hw, struct rte_eth_link *link,
+ 	bool enable_lse, int wait_to_complete)
+ {
+-#define CHECK_INTERVAL             100  /* 100ms */
+-#define MAX_REPEAT_TIME            10  /* 1s (10 * 100ms) in total */
+ 	uint32_t rep_cnt = MAX_REPEAT_TIME;
+ 	struct i40e_link_status link_status;
+ 	int status;
+@@ -3555,7 +3556,7 @@ static int i40e_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
+ 		count++;
+ 	}
+ 
+-	/* Get individiual stats from i40e_hw_port struct */
++	/* Get individual stats from i40e_hw_port struct */
+ 	for (i = 0; i < I40E_NB_HW_PORT_XSTATS; i++) {
+ 		strlcpy(xstats_names[count].name,
+ 			rte_i40e_hw_port_strings[i].name,
+@@ -3613,7 +3614,7 @@ i40e_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
+ 		count++;
+ 	}
+ 
+-	/* Get individiual stats from i40e_hw_port struct */
++	/* Get individual stats from i40e_hw_port struct */
+ 	for (i = 0; i < I40E_NB_HW_PORT_XSTATS; i++) {
+ 		xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
+ 			rte_i40e_hw_port_strings[i].offset);
+@@ -5544,7 +5545,7 @@ i40e_vsi_get_bw_config(struct i40e_vsi *vsi)
+ 					&ets_sla_config, NULL);
+ 	if (ret != I40E_SUCCESS) {
+ 		PMD_DRV_LOG(ERR,
+-			"VSI failed to get TC bandwdith configuration %u",
++			"VSI failed to get TC bandwidth configuration %u",
+ 			hw->aq.asq_last_status);
+ 		return ret;
+ 	}
+@@ -6719,6 +6720,7 @@ i40e_dev_handle_aq_msg(struct rte_eth_dev *dev)
+ 			if (!ret)
+ 				rte_eth_dev_callback_process(dev,
+ 					RTE_ETH_EVENT_INTR_LSC, NULL);
++
+ 			break;
+ 		default:
+ 			PMD_DRV_LOG(DEBUG, "Request %u is not supported yet",
+@@ -6822,7 +6824,7 @@ i40e_handle_mdd_event(struct rte_eth_dev *dev)
+  * @param handle
+  *  Pointer to interrupt handle.
+  * @param param
+- *  The address of parameter (struct rte_eth_dev *) regsitered before.
++ *  The address of parameter (struct rte_eth_dev *) registered before.
+  *
+  * @return
+  *  void
+@@ -9719,7 +9721,7 @@ i40e_ethertype_filter_convert(const struct rte_eth_ethertype_filter *input,
+ 	return 0;
+ }
+ 
+-/* Check if there exists the ehtertype filter */
++/* Check if there exists the ethertype filter */
+ struct i40e_ethertype_filter *
+ i40e_sw_ethertype_filter_lookup(struct i40e_ethertype_rule *ethertype_rule,
+ 				const struct i40e_ethertype_filter_input *input)
+@@ -12103,6 +12105,35 @@ i40e_cloud_filter_qinq_create(struct i40e_pf *pf)
+ 	return ret;
+ }
+ 
++static void
++i40e_set_mac_max_frame(struct rte_eth_dev *dev, uint16_t size)
++{
++	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
++	uint32_t rep_cnt = MAX_REPEAT_TIME;
++	struct rte_eth_link link;
++	enum i40e_status_code status;
++	bool can_be_set = true;
++
++	/* I40E_MEDIA_TYPE_BASET link up can be ignored */
++	if (hw->phy.media_type != I40E_MEDIA_TYPE_BASET) {
++		do {
++			update_link_reg(hw, &link);
++			if (link.link_status)
++				break;
++			rte_delay_ms(CHECK_INTERVAL);
++		} while (--rep_cnt);
++		can_be_set = !!link.link_status;
++	}
++
++	if (can_be_set) {
++		status = i40e_aq_set_mac_config(hw, size, TRUE, 0, false, NULL);
++		if (status != I40E_SUCCESS)
++			PMD_DRV_LOG(ERR, "Failed to set max frame size at port level");
++	} else {
++		PMD_DRV_LOG(ERR, "Set max frame size at port level not applicable on link down");
++	}
++}
++
+ RTE_LOG_REGISTER_SUFFIX(i40e_logtype_init, init, NOTICE);
+ RTE_LOG_REGISTER_SUFFIX(i40e_logtype_driver, driver, NOTICE);
+ #ifdef RTE_ETHDEV_DEBUG_RX
+diff --git a/dpdk/drivers/net/i40e/i40e_ethdev.h b/dpdk/drivers/net/i40e/i40e_ethdev.h
+index 2d182f8000..a1ebdc093c 100644
+--- a/dpdk/drivers/net/i40e/i40e_ethdev.h
++++ b/dpdk/drivers/net/i40e/i40e_ethdev.h
+@@ -897,7 +897,7 @@ struct i40e_tunnel_filter {
+ 	TAILQ_ENTRY(i40e_tunnel_filter) rules;
+ 	struct i40e_tunnel_filter_input input;
+ 	uint8_t is_to_vf; /* 0 - to PF, 1 - to VF */
+-	uint16_t vf_id;   /* VF id, avaiblable when is_to_vf is 1. */
++	uint16_t vf_id;   /* VF id, available when is_to_vf is 1. */
+ 	uint16_t queue; /* Queue assigned to when match */
+ };
+ 
+@@ -966,7 +966,7 @@ struct i40e_tunnel_filter_conf {
+ 	uint32_t tenant_id;     /**< Tenant ID to match. VNI, GRE key... */
+ 	uint16_t queue_id;      /**< Queue assigned to if match. */
+ 	uint8_t is_to_vf;       /**< 0 - to PF, 1 - to VF */
+-	uint16_t vf_id;         /**< VF id, avaiblable when is_to_vf is 1. */
++	uint16_t vf_id;         /**< VF id, available when is_to_vf is 1. */
+ };
+ 
+ TAILQ_HEAD(i40e_flow_list, rte_flow);
+@@ -1100,7 +1100,7 @@ struct i40e_vf_msg_cfg {
+ 	/*
+ 	 * If message statistics from a VF exceed the maximal limitation,
+ 	 * the PF will ignore any new message from that VF for
+-	 * 'ignor_second' time.
++	 * 'ignore_second' time.
+ 	 */
+ 	uint32_t ignore_second;
+ };
+@@ -1257,7 +1257,7 @@ struct i40e_adapter {
+ };
+ 
+ /**
+- * Strucute to store private data for each VF representor instance
++ * Structure to store private data for each VF representor instance
+  */
+ struct i40e_vf_representor {
+ 	uint16_t switch_domain_id;
+@@ -1265,7 +1265,7 @@ struct i40e_vf_representor {
+ 	uint16_t vf_id;
+ 	/**< Virtual Function ID */
+ 	struct i40e_adapter *adapter;
+-	/**< Private data store of assocaiated physical function */
++	/**< Private data store of associated physical function */
+ 	struct i40e_eth_stats stats_offset;
+ 	/**< Zero-point of VF statistics*/
+ };
+diff --git a/dpdk/drivers/net/i40e/i40e_fdir.c b/dpdk/drivers/net/i40e/i40e_fdir.c
+index df2a5aaecc..8caedea14e 100644
+--- a/dpdk/drivers/net/i40e/i40e_fdir.c
++++ b/dpdk/drivers/net/i40e/i40e_fdir.c
+@@ -142,7 +142,7 @@ i40e_fdir_rx_queue_init(struct i40e_rx_queue *rxq)
+ 		I40E_QRX_TAIL(rxq->vsi->base_queue);
+ 
+ 	rte_wmb();
+-	/* Init the RX tail regieter. */
++	/* Init the RX tail register. */
+ 	I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
+ 
+ 	return err;
+@@ -430,7 +430,7 @@ i40e_check_fdir_flex_payload(const struct rte_eth_flex_payload_cfg *flex_cfg)
+ 
+ 	for (i = 0; i < I40E_FDIR_MAX_FLEX_LEN; i++) {
+ 		if (flex_cfg->src_offset[i] >= I40E_MAX_FLX_SOURCE_OFF) {
+-			PMD_DRV_LOG(ERR, "exceeds maxmial payload limit.");
++			PMD_DRV_LOG(ERR, "exceeds maximal payload limit.");
+ 			return -EINVAL;
+ 		}
+ 	}
+@@ -438,7 +438,7 @@ i40e_check_fdir_flex_payload(const struct rte_eth_flex_payload_cfg *flex_cfg)
+ 	memset(flex_pit, 0, sizeof(flex_pit));
+ 	num = i40e_srcoff_to_flx_pit(flex_cfg->src_offset, flex_pit);
+ 	if (num > I40E_MAX_FLXPLD_FIED) {
+-		PMD_DRV_LOG(ERR, "exceeds maxmial number of flex fields.");
++		PMD_DRV_LOG(ERR, "exceeds maximal number of flex fields.");
+ 		return -EINVAL;
+ 	}
+ 	for (i = 0; i < num; i++) {
+@@ -948,7 +948,7 @@ i40e_flow_fdir_construct_pkt(struct i40e_pf *pf,
+ 	uint8_t pctype = fdir_input->pctype;
+ 	struct i40e_customized_pctype *cus_pctype;
+ 
+-	/* raw pcket template - just copy contents of the raw packet */
++	/* raw packet template - just copy contents of the raw packet */
+ 	if (fdir_input->flow_ext.pkt_template) {
+ 		memcpy(raw_pkt, fdir_input->flow.raw_flow.packet,
+ 		       fdir_input->flow.raw_flow.length);
+@@ -1831,7 +1831,7 @@ i40e_flow_add_del_fdir_filter(struct rte_eth_dev *dev,
+ 				&check_filter.fdir.input);
+ 		if (!node) {
+ 			PMD_DRV_LOG(ERR,
+-				    "There's no corresponding flow firector filter!");
++				    "There's no corresponding flow director filter!");
+ 			return -EINVAL;
+ 		}
+ 
+diff --git a/dpdk/drivers/net/i40e/i40e_flow.c b/dpdk/drivers/net/i40e/i40e_flow.c
+index c9676caab5..4f3808cb5f 100644
+--- a/dpdk/drivers/net/i40e/i40e_flow.c
++++ b/dpdk/drivers/net/i40e/i40e_flow.c
+@@ -3043,7 +3043,7 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
+ 				rte_flow_error_set(error, EINVAL,
+ 					   RTE_FLOW_ERROR_TYPE_ITEM,
+ 					   item,
+-					   "Exceeds maxmial payload limit.");
++					   "Exceeds maximal payload limit.");
+ 				return -rte_errno;
+ 			}
+ 
+@@ -3142,8 +3142,11 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
+ 		/* Check if the input set is valid */
+ 		if (i40e_validate_input_set(pctype, RTE_ETH_FILTER_FDIR,
+ 						input_set) != 0) {
+-			PMD_DRV_LOG(ERR, "Invalid input set");
+-			return -EINVAL;
++			rte_flow_error_set(error, EINVAL,
++					   RTE_FLOW_ERROR_TYPE_ITEM,
++					   item,
++					   "Invalid input set");
++			return -rte_errno;
+ 		}
+ 
+ 		filter->input.flow_ext.input_set = input_set;
+diff --git a/dpdk/drivers/net/i40e/i40e_pf.c b/dpdk/drivers/net/i40e/i40e_pf.c
+index ccb3924a5f..15d9ff868f 100644
+--- a/dpdk/drivers/net/i40e/i40e_pf.c
++++ b/dpdk/drivers/net/i40e/i40e_pf.c
+@@ -343,7 +343,7 @@ i40e_pf_host_process_cmd_get_vf_resource(struct i40e_pf_vf *vf, uint8_t *msg,
+ 		vf->request_caps = *(uint32_t *)msg;
+ 
+ 	/* enable all RSS by default,
+-	 * doesn't support hena setting by virtchnnl yet.
++	 * doesn't support hena setting by virtchnl yet.
+ 	 */
+ 	if (vf->request_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
+ 		I40E_WRITE_REG(hw, I40E_VFQF_HENA1(0, vf->vf_idx),
+@@ -597,14 +597,14 @@ i40e_pf_config_irq_link_list(struct i40e_pf_vf *vf,
+ 	tempmap = vvm->rxq_map;
+ 	for (i = 0; i < sizeof(vvm->rxq_map) * BITS_PER_CHAR; i++) {
+ 		if (tempmap & 0x1)
+-			linklistmap |= (1 << (2 * i));
++			linklistmap |= RTE_BIT64(2 * i);
+ 		tempmap >>= 1;
+ 	}
+ 
+ 	tempmap = vvm->txq_map;
+ 	for (i = 0; i < sizeof(vvm->txq_map) * BITS_PER_CHAR; i++) {
+ 		if (tempmap & 0x1)
+-			linklistmap |= (1 << (2 * i + 1));
++			linklistmap |= RTE_BIT64(2 * i + 1);
+ 		tempmap >>= 1;
+ 	}
+ 
+@@ -725,7 +725,7 @@ i40e_pf_host_process_cmd_config_irq_map(struct i40e_pf_vf *vf,
+ 		if ((map->rxq_map < qbit_max) && (map->txq_map < qbit_max)) {
+ 			i40e_pf_config_irq_link_list(vf, map);
+ 		} else {
+-			/* configured queue size excceed limit */
++			/* configured queue size exceed limit */
+ 			ret = I40E_ERR_PARAM;
+ 			goto send_msg;
+ 		}
+diff --git a/dpdk/drivers/net/i40e/i40e_rxtx.c b/dpdk/drivers/net/i40e/i40e_rxtx.c
+index e4cb33dc3c..9a00a9b71e 100644
+--- a/dpdk/drivers/net/i40e/i40e_rxtx.c
++++ b/dpdk/drivers/net/i40e/i40e_rxtx.c
+@@ -609,7 +609,7 @@ i40e_rx_alloc_bufs(struct i40e_rx_queue *rxq)
+ 		rxdp[i].read.pkt_addr = dma_addr;
+ 	}
+ 
+-	/* Update rx tail regsiter */
++	/* Update rx tail register */
+ 	I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->rx_free_trigger);
+ 
+ 	rxq->rx_free_trigger =
+@@ -995,7 +995,7 @@ i40e_recv_scattered_pkts(void *rx_queue,
+ 	 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
+ 	 * register. Update the RDT with the value of the last processed RX
+ 	 * descriptor minus 1, to guarantee that the RDT register is never
+-	 * equal to the RDH register, which creates a "full" ring situtation
++	 * equal to the RDH register, which creates a "full" ring situation
+ 	 * from the hardware point of view.
+ 	 */
+ 	nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);
+@@ -1467,7 +1467,7 @@ tx_xmit_pkts(struct i40e_tx_queue *txq,
+ 	i40e_tx_fill_hw_ring(txq, tx_pkts + n, (uint16_t)(nb_pkts - n));
+ 	txq->tx_tail = (uint16_t)(txq->tx_tail + (nb_pkts - n));
+ 
+-	/* Determin if RS bit needs to be set */
++	/* Determine if RS bit needs to be set */
+ 	if (txq->tx_tail > txq->tx_next_rs) {
+ 		txr[txq->tx_next_rs].cmd_type_offset_bsz |=
+ 			rte_cpu_to_le_64(((uint64_t)I40E_TX_DESC_CMD_RS) <<
+@@ -1697,7 +1697,7 @@ i40e_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+ 	}
+ 
+ 	if (rxq->rx_deferred_start)
+-		PMD_DRV_LOG(WARNING, "RX queue %u is deferrd start",
++		PMD_DRV_LOG(WARNING, "RX queue %u is deferred start",
+ 			    rx_queue_id);
+ 
+ 	err = i40e_alloc_rx_queue_mbufs(rxq);
+@@ -1706,7 +1706,7 @@ i40e_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+ 		return err;
+ 	}
+ 
+-	/* Init the RX tail regieter. */
++	/* Init the RX tail register. */
+ 	I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
+ 
+ 	err = i40e_switch_rx_queue(hw, rxq->reg_idx, TRUE);
+@@ -1771,7 +1771,7 @@ i40e_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+ 	}
+ 
+ 	if (txq->tx_deferred_start)
+-		PMD_DRV_LOG(WARNING, "TX queue %u is deferrd start",
++		PMD_DRV_LOG(WARNING, "TX queue %u is deferred start",
+ 			    tx_queue_id);
+ 
+ 	/*
+@@ -1930,7 +1930,7 @@ i40e_dev_rx_queue_setup_runtime(struct rte_eth_dev *dev,
+ 		PMD_DRV_LOG(ERR, "Can't use default burst.");
+ 		return -EINVAL;
+ 	}
+-	/* check scatterred conflict */
++	/* check scattered conflict */
+ 	if (!dev->data->scattered_rx && use_scattered_rx) {
+ 		PMD_DRV_LOG(ERR, "Scattered rx is required.");
+ 		return -EINVAL;
+@@ -2014,7 +2014,7 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
+ 	rxq->rx_deferred_start = rx_conf->rx_deferred_start;
+ 	rxq->offloads = offloads;
+ 
+-	/* Allocate the maximun number of RX ring hardware descriptor. */
++	/* Allocate the maximum number of RX ring hardware descriptor. */
+ 	len = I40E_MAX_RING_DESC;
+ 
+ 	/**
+@@ -2322,7 +2322,7 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
+ 	 */
+ 	tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?
+ 		tx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH);
+-	/* force tx_rs_thresh to adapt an aggresive tx_free_thresh */
++	/* force tx_rs_thresh to adapt an aggressive tx_free_thresh */
+ 	tx_rs_thresh = (DEFAULT_TX_RS_THRESH + tx_free_thresh > nb_desc) ?
+ 		nb_desc - tx_free_thresh : DEFAULT_TX_RS_THRESH;
+ 	if (tx_conf->tx_rs_thresh > 0)
+@@ -2991,7 +2991,7 @@ i40e_rx_queue_init(struct i40e_rx_queue *rxq)
+ 	if (rxq->max_pkt_len > buf_size)
+ 		dev_data->scattered_rx = 1;
+ 
+-	/* Init the RX tail regieter. */
++	/* Init the RX tail register. */
+ 	I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
+ 
+ 	return 0;
+diff --git a/dpdk/drivers/net/i40e/i40e_rxtx_vec_altivec.c b/dpdk/drivers/net/i40e/i40e_rxtx_vec_altivec.c
+index d0bf86dfba..00a015013e 100644
+--- a/dpdk/drivers/net/i40e/i40e_rxtx_vec_altivec.c
++++ b/dpdk/drivers/net/i40e/i40e_rxtx_vec_altivec.c
+@@ -27,10 +27,10 @@ i40e_rxq_rearm(struct i40e_rx_queue *rxq)
+ 	struct i40e_rx_entry *rxep = &rxq->sw_ring[rxq->rxrearm_start];
+ 	struct rte_mbuf *mb0, *mb1;
+ 
+-	vector unsigned long hdr_room = (vector unsigned long){
++	__vector unsigned long hdr_room = (__vector unsigned long){
+ 						RTE_PKTMBUF_HEADROOM,
+ 						RTE_PKTMBUF_HEADROOM};
+-	vector unsigned long dma_addr0, dma_addr1;
++	__vector unsigned long dma_addr0, dma_addr1;
+ 
+ 	rxdp = rxq->rx_ring + rxq->rxrearm_start;
+ 
+@@ -40,11 +40,11 @@ i40e_rxq_rearm(struct i40e_rx_queue *rxq)
+ 				 RTE_I40E_RXQ_REARM_THRESH) < 0) {
+ 		if (rxq->rxrearm_nb + RTE_I40E_RXQ_REARM_THRESH >=
+ 		    rxq->nb_rx_desc) {
+-			dma_addr0 = (vector unsigned long){};
++			dma_addr0 = (__vector unsigned long){};
+ 			for (i = 0; i < RTE_I40E_DESCS_PER_LOOP; i++) {
+ 				rxep[i].mbuf = &rxq->fake_mbuf;
+ 				vec_st(dma_addr0, 0,
+-				       (vector unsigned long *)&rxdp[i].read);
++				       (__vector unsigned long *)&rxdp[i].read);
+ 			}
+ 		}
+ 		rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed +=
+@@ -54,7 +54,7 @@ i40e_rxq_rearm(struct i40e_rx_queue *rxq)
+ 
+ 	/* Initialize the mbufs in vector, process 2 mbufs in one loop */
+ 	for (i = 0; i < RTE_I40E_RXQ_REARM_THRESH; i += 2, rxep += 2) {
+-		vector unsigned long vaddr0, vaddr1;
++		__vector unsigned long vaddr0, vaddr1;
+ 		uintptr_t p0, p1;
+ 
+ 		mb0 = rxep[0].mbuf;
+@@ -72,8 +72,8 @@ i40e_rxq_rearm(struct i40e_rx_queue *rxq)
+ 		*(uint64_t *)p1 = rxq->mbuf_initializer;
+ 
+ 		/* load buf_addr(lo 64bit) and buf_iova(hi 64bit) */
+-		vaddr0 = vec_ld(0, (vector unsigned long *)&mb0->buf_addr);
+-		vaddr1 = vec_ld(0, (vector unsigned long *)&mb1->buf_addr);
++		vaddr0 = vec_ld(0, (__vector unsigned long *)&mb0->buf_addr);
++		vaddr1 = vec_ld(0, (__vector unsigned long *)&mb1->buf_addr);
+ 
+ 		/* convert pa to dma_addr hdr/data */
+ 		dma_addr0 = vec_mergel(vaddr0, vaddr0);
+@@ -84,8 +84,8 @@ i40e_rxq_rearm(struct i40e_rx_queue *rxq)
+ 		dma_addr1 = vec_add(dma_addr1, hdr_room);
+ 
+ 		/* flush desc with pa dma_addr */
+-		vec_st(dma_addr0, 0, (vector unsigned long *)&rxdp++->read);
+-		vec_st(dma_addr1, 0, (vector unsigned long *)&rxdp++->read);
++		vec_st(dma_addr0, 0, (__vector unsigned long *)&rxdp++->read);
++		vec_st(dma_addr1, 0, (__vector unsigned long *)&rxdp++->read);
+ 	}
+ 
+ 	rxq->rxrearm_start += RTE_I40E_RXQ_REARM_THRESH;
+@@ -102,32 +102,32 @@ i40e_rxq_rearm(struct i40e_rx_queue *rxq)
+ }
+ 
+ static inline void
+-desc_to_olflags_v(vector unsigned long descs[4], struct rte_mbuf **rx_pkts)
++desc_to_olflags_v(__vector unsigned long descs[4], struct rte_mbuf **rx_pkts)
+ {
+-	vector unsigned int vlan0, vlan1, rss, l3_l4e;
++	__vector unsigned int vlan0, vlan1, rss, l3_l4e;
+ 
+ 	/* mask everything except RSS, flow director and VLAN flags
+ 	 * bit2 is for VLAN tag, bit11 for flow director indication
+ 	 * bit13:12 for RSS indication.
+ 	 */
+-	const vector unsigned int rss_vlan_msk = (vector unsigned int){
++	const __vector unsigned int rss_vlan_msk = (__vector unsigned int){
+ 			(int32_t)0x1c03804, (int32_t)0x1c03804,
+ 			(int32_t)0x1c03804, (int32_t)0x1c03804};
+ 
+ 	/* map rss and vlan type to rss hash and vlan flag */
+-	const vector unsigned char vlan_flags = (vector unsigned char){
++	const __vector unsigned char vlan_flags = (__vector unsigned char){
+ 			0, 0, 0, 0,
+ 			RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED, 0, 0, 0,
+ 			0, 0, 0, 0,
+ 			0, 0, 0, 0};
+ 
+-	const vector unsigned char rss_flags = (vector unsigned char){
++	const __vector unsigned char rss_flags = (__vector unsigned char){
+ 			0, RTE_MBUF_F_RX_FDIR, 0, 0,
+ 			0, 0, RTE_MBUF_F_RX_RSS_HASH, RTE_MBUF_F_RX_RSS_HASH | RTE_MBUF_F_RX_FDIR,
+ 			0, 0, 0, 0,
+ 			0, 0, 0, 0};
+ 
+-	const vector unsigned char l3_l4e_flags = (vector unsigned char){
++	const __vector unsigned char l3_l4e_flags = (__vector unsigned char){
+ 			0,
+ 			RTE_MBUF_F_RX_IP_CKSUM_BAD,
+ 			RTE_MBUF_F_RX_L4_CKSUM_BAD,
+@@ -139,23 +139,23 @@ desc_to_olflags_v(vector unsigned long descs[4], struct rte_mbuf **rx_pkts)
+ 					     | RTE_MBUF_F_RX_IP_CKSUM_BAD,
+ 			0, 0, 0, 0, 0, 0, 0, 0};
+ 
+-	vlan0 = (vector unsigned int)vec_mergel(descs[0], descs[1]);
+-	vlan1 = (vector unsigned int)vec_mergel(descs[2], descs[3]);
+-	vlan0 = (vector unsigned int)vec_mergeh(vlan0, vlan1);
++	vlan0 = (__vector unsigned int)vec_mergel(descs[0], descs[1]);
++	vlan1 = (__vector unsigned int)vec_mergel(descs[2], descs[3]);
++	vlan0 = (__vector unsigned int)vec_mergeh(vlan0, vlan1);
+ 
+ 	vlan1 = vec_and(vlan0, rss_vlan_msk);
+-	vlan0 = (vector unsigned int)vec_perm(vlan_flags,
+-					(vector unsigned char){},
+-					*(vector unsigned char *)&vlan1);
++	vlan0 = (__vector unsigned int)vec_perm(vlan_flags,
++				(__vector unsigned char){},
++				*(__vector unsigned char *)&vlan1);
+ 
+-	rss = vec_sr(vlan1, (vector unsigned int){11, 11, 11, 11});
+-	rss = (vector unsigned int)vec_perm(rss_flags, (vector unsigned char){},
+-					*(vector unsigned char *)&rss);
++	rss = vec_sr(vlan1, (__vector unsigned int){11, 11, 11, 11});
++	rss = (__vector unsigned int)vec_perm(rss_flags, (__vector unsigned char){},
++				*(__vector unsigned char *)&rss);
+ 
+-	l3_l4e = vec_sr(vlan1, (vector unsigned int){22, 22, 22, 22});
+-	l3_l4e = (vector unsigned int)vec_perm(l3_l4e_flags,
+-					(vector unsigned char){},
+-					*(vector unsigned char *)&l3_l4e);
++	l3_l4e = vec_sr(vlan1, (__vector unsigned int){22, 22, 22, 22});
++	l3_l4e = (__vector unsigned int)vec_perm(l3_l4e_flags,
++				(__vector unsigned char){},
++				*(__vector unsigned char *)&l3_l4e);
+ 
+ 	vlan0 = vec_or(vlan0, rss);
+ 	vlan0 = vec_or(vlan0, l3_l4e);
+@@ -169,23 +169,23 @@ desc_to_olflags_v(vector unsigned long descs[4], struct rte_mbuf **rx_pkts)
+ #define PKTLEN_SHIFT     10
+ 
+ static inline void
+-desc_to_ptype_v(vector unsigned long descs[4], struct rte_mbuf **rx_pkts,
++desc_to_ptype_v(__vector unsigned long descs[4], struct rte_mbuf **rx_pkts,
+ 		uint32_t *ptype_tbl)
+ {
+-	vector unsigned long ptype0 = vec_mergel(descs[0], descs[1]);
+-	vector unsigned long ptype1 = vec_mergel(descs[2], descs[3]);
++	__vector unsigned long ptype0 = vec_mergel(descs[0], descs[1]);
++	__vector unsigned long ptype1 = vec_mergel(descs[2], descs[3]);
+ 
+-	ptype0 = vec_sr(ptype0, (vector unsigned long){30, 30});
+-	ptype1 = vec_sr(ptype1, (vector unsigned long){30, 30});
++	ptype0 = vec_sr(ptype0, (__vector unsigned long){30, 30});
++	ptype1 = vec_sr(ptype1, (__vector unsigned long){30, 30});
+ 
+ 	rx_pkts[0]->packet_type =
+-		ptype_tbl[(*(vector unsigned char *)&ptype0)[0]];
++		ptype_tbl[(*(__vector unsigned char *)&ptype0)[0]];
+ 	rx_pkts[1]->packet_type =
+-		ptype_tbl[(*(vector unsigned char *)&ptype0)[8]];
++		ptype_tbl[(*(__vector unsigned char *)&ptype0)[8]];
+ 	rx_pkts[2]->packet_type =
+-		ptype_tbl[(*(vector unsigned char *)&ptype1)[0]];
++		ptype_tbl[(*(__vector unsigned char *)&ptype1)[0]];
+ 	rx_pkts[3]->packet_type =
+-		ptype_tbl[(*(vector unsigned char *)&ptype1)[8]];
++		ptype_tbl[(*(__vector unsigned char *)&ptype1)[8]];
+ }
+ 
+ /**
+@@ -204,17 +204,17 @@ _recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts,
+ 	uint16_t nb_pkts_recd;
+ 	int pos;
+ 	uint64_t var;
+-	vector unsigned char shuf_msk;
++	__vector unsigned char shuf_msk;
+ 	uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
+ 
+-	vector unsigned short crc_adjust = (vector unsigned short){
++	__vector unsigned short crc_adjust = (__vector unsigned short){
+ 		0, 0,         /* ignore pkt_type field */
+ 		rxq->crc_len, /* sub crc on pkt_len */
+ 		0,            /* ignore high-16bits of pkt_len */
+ 		rxq->crc_len, /* sub crc on data_len */
+ 		0, 0, 0       /* ignore non-length fields */
+ 		};
+-	vector unsigned long dd_check, eop_check;
++	__vector unsigned long dd_check, eop_check;
+ 
+ 	/* nb_pkts has to be floor-aligned to RTE_I40E_DESCS_PER_LOOP */
+ 	nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, RTE_I40E_DESCS_PER_LOOP);
+@@ -240,15 +240,15 @@ _recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts,
+ 		return 0;
+ 
+ 	/* 4 packets DD mask */
+-	dd_check = (vector unsigned long){0x0000000100000001ULL,
++	dd_check = (__vector unsigned long){0x0000000100000001ULL,
+ 					  0x0000000100000001ULL};
+ 
+ 	/* 4 packets EOP mask */
+-	eop_check = (vector unsigned long){0x0000000200000002ULL,
++	eop_check = (__vector unsigned long){0x0000000200000002ULL,
+ 					   0x0000000200000002ULL};
+ 
+ 	/* mask to shuffle from desc. to mbuf */
+-	shuf_msk = (vector unsigned char){
++	shuf_msk = (__vector unsigned char){
+ 		0xFF, 0xFF,   /* pkt_type set as unknown */
+ 		0xFF, 0xFF,   /* pkt_type set as unknown */
+ 		14, 15,       /* octet 15~14, low 16 bits pkt_len */
+@@ -274,35 +274,35 @@ _recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts,
+ 	for (pos = 0, nb_pkts_recd = 0; pos < nb_pkts;
+ 			pos += RTE_I40E_DESCS_PER_LOOP,
+ 			rxdp += RTE_I40E_DESCS_PER_LOOP) {
+-		vector unsigned long descs[RTE_I40E_DESCS_PER_LOOP];
+-		vector unsigned char pkt_mb1, pkt_mb2, pkt_mb3, pkt_mb4;
+-		vector unsigned short staterr, sterr_tmp1, sterr_tmp2;
+-		vector unsigned long mbp1, mbp2; /* two mbuf pointer
++		__vector unsigned long descs[RTE_I40E_DESCS_PER_LOOP];
++		__vector unsigned char pkt_mb1, pkt_mb2, pkt_mb3, pkt_mb4;
++		__vector unsigned short staterr, sterr_tmp1, sterr_tmp2;
++		__vector unsigned long mbp1, mbp2; /* two mbuf pointer
+ 						  * in one XMM reg.
+ 						  */
+ 
+ 		/* B.1 load 2 mbuf point */
+-		mbp1 = *(vector unsigned long *)&sw_ring[pos];
++		mbp1 = *(__vector unsigned long *)&sw_ring[pos];
+ 		/* Read desc statuses backwards to avoid race condition */
+ 		/* A.1 load desc[3] */
+-		descs[3] = *(vector unsigned long *)(rxdp + 3);
++		descs[3] = *(__vector unsigned long *)(rxdp + 3);
+ 		rte_compiler_barrier();
+ 
+ 		/* B.2 copy 2 mbuf point into rx_pkts  */
+-		*(vector unsigned long *)&rx_pkts[pos] = mbp1;
++		*(__vector unsigned long *)&rx_pkts[pos] = mbp1;
+ 
+ 		/* B.1 load 2 mbuf point */
+-		mbp2 = *(vector unsigned long *)&sw_ring[pos + 2];
++		mbp2 = *(__vector unsigned long *)&sw_ring[pos + 2];
+ 
+ 		/* A.1 load desc[2-0] */
+-		descs[2] = *(vector unsigned long *)(rxdp + 2);
++		descs[2] = *(__vector unsigned long *)(rxdp + 2);
+ 		rte_compiler_barrier();
+-		descs[1] = *(vector unsigned long *)(rxdp + 1);
++		descs[1] = *(__vector unsigned long *)(rxdp + 1);
+ 		rte_compiler_barrier();
+-		descs[0] = *(vector unsigned long *)(rxdp);
++		descs[0] = *(__vector unsigned long *)(rxdp);
+ 
+ 		/* B.2 copy 2 mbuf point into rx_pkts  */
+-		*(vector unsigned long *)&rx_pkts[pos + 2] =  mbp2;
++		*(__vector unsigned long *)&rx_pkts[pos + 2] =  mbp2;
+ 
+ 		if (split_packet) {
+ 			rte_mbuf_prefetch_part2(rx_pkts[pos]);
+@@ -315,78 +315,78 @@ _recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts,
+ 		rte_compiler_barrier();
+ 
+ 		/* pkt 3,4 shift the pktlen field to be 16-bit aligned*/
+-		const vector unsigned int len3 = vec_sl(
+-			vec_ld(0, (vector unsigned int *)&descs[3]),
+-			(vector unsigned int){0, 0, 0, PKTLEN_SHIFT});
++		const __vector unsigned int len3 = vec_sl(
++			vec_ld(0, (__vector unsigned int *)&descs[3]),
++			(__vector unsigned int){0, 0, 0, PKTLEN_SHIFT});
+ 
+-		const vector unsigned int len2 = vec_sl(
+-			vec_ld(0, (vector unsigned int *)&descs[2]),
+-			(vector unsigned int){0, 0, 0, PKTLEN_SHIFT});
++		const __vector unsigned int len2 = vec_sl(
++			vec_ld(0, (__vector unsigned int *)&descs[2]),
++			(__vector unsigned int){0, 0, 0, PKTLEN_SHIFT});
+ 
+ 		/* merge the now-aligned packet length fields back in */
+-		descs[3] = (vector unsigned long)len3;
+-		descs[2] = (vector unsigned long)len2;
++		descs[3] = (__vector unsigned long)len3;
++		descs[2] = (__vector unsigned long)len2;
+ 
+ 		/* D.1 pkt 3,4 convert format from desc to pktmbuf */
+-		pkt_mb4 = vec_perm((vector unsigned char)descs[3],
+-				  (vector unsigned char){}, shuf_msk);
+-		pkt_mb3 = vec_perm((vector unsigned char)descs[2],
+-				  (vector unsigned char){}, shuf_msk);
++		pkt_mb4 = vec_perm((__vector unsigned char)descs[3],
++				  (__vector unsigned char){}, shuf_msk);
++		pkt_mb3 = vec_perm((__vector unsigned char)descs[2],
++				  (__vector unsigned char){}, shuf_msk);
+ 
+ 		/* C.1 4=>2 filter staterr info only */
+-		sterr_tmp2 = vec_mergel((vector unsigned short)descs[3],
+-					(vector unsigned short)descs[2]);
++		sterr_tmp2 = vec_mergel((__vector unsigned short)descs[3],
++					(__vector unsigned short)descs[2]);
+ 		/* C.1 4=>2 filter staterr info only */
+-		sterr_tmp1 = vec_mergel((vector unsigned short)descs[1],
+-					(vector unsigned short)descs[0]);
++		sterr_tmp1 = vec_mergel((__vector unsigned short)descs[1],
++					(__vector unsigned short)descs[0]);
+ 		/* D.2 pkt 3,4 set in_port/nb_seg and remove crc */
+-		pkt_mb4 = (vector unsigned char)vec_sub(
+-				(vector unsigned short)pkt_mb4, crc_adjust);
+-		pkt_mb3 = (vector unsigned char)vec_sub(
+-				(vector unsigned short)pkt_mb3, crc_adjust);
++		pkt_mb4 = (__vector unsigned char)vec_sub(
++				(__vector unsigned short)pkt_mb4, crc_adjust);
++		pkt_mb3 = (__vector unsigned char)vec_sub(
++				(__vector unsigned short)pkt_mb3, crc_adjust);
+ 
+ 		/* pkt 1,2 shift the pktlen field to be 16-bit aligned*/
+-		const vector unsigned int len1 = vec_sl(
+-			vec_ld(0, (vector unsigned int *)&descs[1]),
+-			(vector unsigned int){0, 0, 0, PKTLEN_SHIFT});
+-		const vector unsigned int len0 = vec_sl(
+-			vec_ld(0, (vector unsigned int *)&descs[0]),
+-			(vector unsigned int){0, 0, 0, PKTLEN_SHIFT});
++		const __vector unsigned int len1 = vec_sl(
++			vec_ld(0, (__vector unsigned int *)&descs[1]),
++			(__vector unsigned int){0, 0, 0, PKTLEN_SHIFT});
++		const __vector unsigned int len0 = vec_sl(
++			vec_ld(0, (__vector unsigned int *)&descs[0]),
++			(__vector unsigned int){0, 0, 0, PKTLEN_SHIFT});
+ 
+ 		/* merge the now-aligned packet length fields back in */
+-		descs[1] = (vector unsigned long)len1;
+-		descs[0] = (vector unsigned long)len0;
++		descs[1] = (__vector unsigned long)len1;
++		descs[0] = (__vector unsigned long)len0;
+ 
+ 		/* D.1 pkt 1,2 convert format from desc to pktmbuf */
+-		pkt_mb2 = vec_perm((vector unsigned char)descs[1],
+-				   (vector unsigned char){}, shuf_msk);
+-		pkt_mb1 = vec_perm((vector unsigned char)descs[0],
+-				   (vector unsigned char){}, shuf_msk);
++		pkt_mb2 = vec_perm((__vector unsigned char)descs[1],
++				(__vector unsigned char){}, shuf_msk);
++		pkt_mb1 = vec_perm((__vector unsigned char)descs[0],
++				(__vector unsigned char){}, shuf_msk);
+ 
+ 		/* C.2 get 4 pkts staterr value  */
+-		staterr = (vector unsigned short)vec_mergeh(
++		staterr = (__vector unsigned short)vec_mergeh(
+ 				sterr_tmp1, sterr_tmp2);
+ 
+ 		/* D.3 copy final 3,4 data to rx_pkts */
+ 		vec_st(pkt_mb4, 0,
+-		 (vector unsigned char *)&rx_pkts[pos + 3]
++		 (__vector unsigned char *)&rx_pkts[pos + 3]
+ 			->rx_descriptor_fields1
+ 		);
+ 		vec_st(pkt_mb3, 0,
+-		 (vector unsigned char *)&rx_pkts[pos + 2]
++		 (__vector unsigned char *)&rx_pkts[pos + 2]
+ 			->rx_descriptor_fields1
+ 		);
+ 
+ 		/* D.2 pkt 1,2 set in_port/nb_seg and remove crc */
+-		pkt_mb2 = (vector unsigned char)vec_sub(
+-				(vector unsigned short)pkt_mb2, crc_adjust);
+-		pkt_mb1 = (vector unsigned char)vec_sub(
+-				(vector unsigned short)pkt_mb1,	crc_adjust);
++		pkt_mb2 = (__vector unsigned char)vec_sub(
++				(__vector unsigned short)pkt_mb2, crc_adjust);
++		pkt_mb1 = (__vector unsigned char)vec_sub(
++				(__vector unsigned short)pkt_mb1,	crc_adjust);
+ 
+ 		/* C* extract and record EOP bit */
+ 		if (split_packet) {
+-			vector unsigned char eop_shuf_mask =
+-				(vector unsigned char){
++			__vector unsigned char eop_shuf_mask =
++				(__vector unsigned char){
+ 					0xFF, 0xFF, 0xFF, 0xFF,
+ 					0xFF, 0xFF, 0xFF, 0xFF,
+ 					0xFF, 0xFF, 0xFF, 0xFF,
+@@ -394,19 +394,19 @@ _recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts,
+ 				};
+ 
+ 			/* and with mask to extract bits, flipping 1-0 */
+-			vector unsigned char eop_bits = vec_and(
+-				(vector unsigned char)vec_nor(staterr, staterr),
+-				(vector unsigned char)eop_check);
++			__vector unsigned char eop_bits = vec_and(
++				(__vector unsigned char)vec_nor(staterr, staterr),
++				(__vector unsigned char)eop_check);
+ 			/* the staterr values are not in order, as the count
+ 			 * of dd bits doesn't care. However, for end of
+ 			 * packet tracking, we do care, so shuffle. This also
+ 			 * compresses the 32-bit values to 8-bit
+ 			 */
+-			eop_bits = vec_perm(eop_bits, (vector unsigned char){},
++			eop_bits = vec_perm(eop_bits, (__vector unsigned char){},
+ 					    eop_shuf_mask);
+ 			/* store the resulting 32-bit value */
+ 			*split_packet = (vec_ld(0,
+-					 (vector unsigned int *)&eop_bits))[0];
++					 (__vector unsigned int *)&eop_bits))[0];
+ 			split_packet += RTE_I40E_DESCS_PER_LOOP;
+ 
+ 			/* zero-out next pointers */
+@@ -417,22 +417,22 @@ _recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts,
+ 		}
+ 
+ 		/* C.3 calc available number of desc */
+-		staterr = vec_and(staterr, (vector unsigned short)dd_check);
++		staterr = vec_and(staterr, (__vector unsigned short)dd_check);
+ 
+ 		/* D.3 copy final 1,2 data to rx_pkts */
+ 		vec_st(pkt_mb2, 0,
+-		 (vector unsigned char *)&rx_pkts[pos + 1]
++		 (__vector unsigned char *)&rx_pkts[pos + 1]
+ 			->rx_descriptor_fields1
+ 		);
+ 		vec_st(pkt_mb1, 0,
+-		 (vector unsigned char *)&rx_pkts[pos]->rx_descriptor_fields1
++		 (__vector unsigned char *)&rx_pkts[pos]->rx_descriptor_fields1
+ 		);
+ 		desc_to_ptype_v(descs, &rx_pkts[pos], ptype_tbl);
+ 		desc_to_olflags_v(descs, &rx_pkts[pos]);
+ 
+-		/* C.4 calc avaialbe number of desc */
++		/* C.4 calc available number of desc */
+ 		var = __builtin_popcountll((vec_ld(0,
+-			(vector unsigned long *)&staterr)[0]));
++			(__vector unsigned long *)&staterr)[0]));
+ 		nb_pkts_recd += var;
+ 		if (likely(var != RTE_I40E_DESCS_PER_LOOP))
+ 			break;
+@@ -533,9 +533,9 @@ vtx1(volatile struct i40e_tx_desc *txdp,
+ 		((uint64_t)flags  << I40E_TXD_QW1_CMD_SHIFT) |
+ 		((uint64_t)pkt->data_len << I40E_TXD_QW1_TX_BUF_SZ_SHIFT));
+ 
+-	vector unsigned long descriptor = (vector unsigned long){
++	__vector unsigned long descriptor = (__vector unsigned long){
+ 		pkt->buf_iova + pkt->data_off, high_qw};
+-	*(vector unsigned long *)txdp = descriptor;
++	*(__vector unsigned long *)txdp = descriptor;
+ }
+ 
+ static inline void
+diff --git a/dpdk/drivers/net/i40e/i40e_rxtx_vec_neon.c b/dpdk/drivers/net/i40e/i40e_rxtx_vec_neon.c
+index b951ea2dc3..507468531f 100644
+--- a/dpdk/drivers/net/i40e/i40e_rxtx_vec_neon.c
++++ b/dpdk/drivers/net/i40e/i40e_rxtx_vec_neon.c
+@@ -151,7 +151,7 @@ desc_to_olflags_v(struct i40e_rx_queue *rxq, uint64x2_t descs[4],
+ 					      vreinterpretq_u8_u32(l3_l4e)));
+ 	/* then we shift left 1 bit */
+ 	l3_l4e = vshlq_n_u32(l3_l4e, 1);
+-	/* we need to mask out the reduntant bits */
++	/* we need to mask out the redundant bits */
+ 	l3_l4e = vandq_u32(l3_l4e, cksum_mask);
+ 
+ 	vlan0 = vorrq_u32(vlan0, rss);
+@@ -416,7 +416,7 @@ _recv_raw_pkts_vec(struct i40e_rx_queue *__rte_restrict rxq,
+ 					    I40E_UINT16_BIT - 1));
+ 		stat = ~vgetq_lane_u64(vreinterpretq_u64_u16(staterr), 0);
+ 
+-		/* C.4 calc avaialbe number of desc */
++		/* C.4 calc available number of desc */
+ 		if (unlikely(stat == 0)) {
+ 			nb_pkts_recd += RTE_I40E_DESCS_PER_LOOP;
+ 		} else {
+diff --git a/dpdk/drivers/net/i40e/i40e_rxtx_vec_sse.c b/dpdk/drivers/net/i40e/i40e_rxtx_vec_sse.c
+index 497b2404c6..3782e8052f 100644
+--- a/dpdk/drivers/net/i40e/i40e_rxtx_vec_sse.c
++++ b/dpdk/drivers/net/i40e/i40e_rxtx_vec_sse.c
+@@ -282,7 +282,7 @@ desc_to_olflags_v(struct i40e_rx_queue *rxq, volatile union i40e_rx_desc *rxdp,
+ 	l3_l4e = _mm_shuffle_epi8(l3_l4e_flags, l3_l4e);
+ 	/* then we shift left 1 bit */
+ 	l3_l4e = _mm_slli_epi32(l3_l4e, 1);
+-	/* we need to mask out the reduntant bits */
++	/* we need to mask out the redundant bits */
+ 	l3_l4e = _mm_and_si128(l3_l4e, cksum_mask);
+ 
+ 	vlan0 = _mm_or_si128(vlan0, rss);
+@@ -297,7 +297,7 @@ desc_to_olflags_v(struct i40e_rx_queue *rxq, volatile union i40e_rx_desc *rxdp,
+ 		__m128i v_fdir_ol_flags = descs_to_fdir_16b(desc_fltstat,
+ 							    descs, rx_pkts);
+ #endif
+-		/* OR in ol_flag bits after descriptor speicific extraction */
++		/* OR in ol_flag bits after descriptor specific extraction */
+ 		vlan0 = _mm_or_si128(vlan0, v_fdir_ol_flags);
+ 	}
+ 
+@@ -577,7 +577,7 @@ _recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts,
+ 		_mm_storeu_si128((void *)&rx_pkts[pos]->rx_descriptor_fields1,
+ 				 pkt_mb1);
+ 		desc_to_ptype_v(descs, &rx_pkts[pos], ptype_tbl);
+-		/* C.4 calc avaialbe number of desc */
++		/* C.4 calc available number of desc */
+ 		var = __builtin_popcountll(_mm_cvtsi128_si64(staterr));
+ 		nb_pkts_recd += var;
+ 		if (likely(var != RTE_I40E_DESCS_PER_LOOP))
+diff --git a/dpdk/drivers/net/i40e/rte_pmd_i40e.c b/dpdk/drivers/net/i40e/rte_pmd_i40e.c
+index a492959b75..35829a1eea 100644
+--- a/dpdk/drivers/net/i40e/rte_pmd_i40e.c
++++ b/dpdk/drivers/net/i40e/rte_pmd_i40e.c
+@@ -1427,7 +1427,7 @@ rte_pmd_i40e_set_tc_strict_prio(uint16_t port, uint8_t tc_map)
+ 	/* Get all TCs' bandwidth. */
+ 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ 		if (veb->enabled_tc & BIT_ULL(i)) {
+-			/* For rubust, if bandwidth is 0, use 1 instead. */
++			/* For robust, if bandwidth is 0, use 1 instead. */
+ 			if (veb->bw_info.bw_ets_share_credits[i])
+ 				ets_data.tc_bw_share_credits[i] =
+ 					veb->bw_info.bw_ets_share_credits[i];
+diff --git a/dpdk/drivers/net/iavf/iavf.h b/dpdk/drivers/net/iavf/iavf.h
+index 0bb5698583..29692e3994 100644
+--- a/dpdk/drivers/net/iavf/iavf.h
++++ b/dpdk/drivers/net/iavf/iavf.h
+@@ -18,7 +18,7 @@
+ 
+ #define IAVF_AQ_LEN               32
+ #define IAVF_AQ_BUF_SZ            4096
+-#define IAVF_RESET_WAIT_CNT       50
++#define IAVF_RESET_WAIT_CNT       500
+ #define IAVF_BUF_SIZE_MIN         1024
+ #define IAVF_FRAME_SIZE_MAX       9728
+ #define IAVF_QUEUE_BASE_ADDR_UNIT 128
+@@ -296,6 +296,7 @@ struct iavf_adapter {
+ 	bool tx_vec_allowed;
+ 	uint32_t ptype_tbl[IAVF_MAX_PKT_TYPE] __rte_cache_min_aligned;
+ 	bool stopped;
++	bool closed;
+ 	uint16_t fdir_ref_cnt;
+ 	struct iavf_devargs devargs;
+ };
+diff --git a/dpdk/drivers/net/iavf/iavf_ethdev.c b/dpdk/drivers/net/iavf/iavf_ethdev.c
+index 377d7bc7a6..f835457e4f 100644
+--- a/dpdk/drivers/net/iavf/iavf_ethdev.c
++++ b/dpdk/drivers/net/iavf/iavf_ethdev.c
+@@ -229,9 +229,15 @@ static const struct eth_dev_ops iavf_eth_dev_ops = {
+ };
+ 
+ static int
+-iavf_tm_ops_get(struct rte_eth_dev *dev __rte_unused,
++iavf_tm_ops_get(struct rte_eth_dev *dev,
+ 			void *arg)
+ {
++	struct iavf_adapter *adapter =
++		IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
++
++	if (adapter->closed)
++		return -EIO;
++
+ 	if (!arg)
+ 		return -EINVAL;
+ 
+@@ -342,6 +348,9 @@ iavf_set_mc_addr_list(struct rte_eth_dev *dev,
+ 		return -EINVAL;
+ 	}
+ 
++	if (adapter->closed)
++		return -EIO;
++
+ 	/* flush previous addresses */
+ 	err = iavf_add_del_mc_addr_list(adapter, vf->mc_addrs, vf->mc_addrs_num,
+ 					false);
+@@ -516,7 +525,7 @@ iavf_init_rss(struct iavf_adapter *adapter)
+ 			j = 0;
+ 		vf->rss_lut[i] = j;
+ 	}
+-	/* send virtchnnl ops to configure rss*/
++	/* send virtchnl ops to configure RSS */
+ 	ret = iavf_configure_rss_lut(adapter);
+ 	if (ret)
+ 		return ret;
+@@ -613,6 +622,9 @@ iavf_dev_configure(struct rte_eth_dev *dev)
+ 		dev->data->nb_tx_queues);
+ 	int ret;
+ 
++	if (ad->closed)
++		return -EIO;
++
+ 	ad->rx_bulk_alloc_allowed = true;
+ 	/* Initialize to TRUE. If any of Rx queues doesn't meet the
+ 	 * vector Rx/Tx preconditions, it will be reset.
+@@ -831,7 +843,7 @@ static int iavf_config_rx_queues_irqs(struct rte_eth_dev *dev,
+ 				    "vector %u are mapping to all Rx queues",
+ 				    vf->msix_base);
+ 		} else {
+-			/* If Rx interrupt is reuquired, and we can use
++			/* If Rx interrupt is required, and we can use
+ 			 * multi interrupts, then the vec is from 1
+ 			 */
+ 			vf->nb_msix =
+@@ -896,28 +908,38 @@ iavf_start_queues(struct rte_eth_dev *dev)
+ 	struct iavf_rx_queue *rxq;
+ 	struct iavf_tx_queue *txq;
+ 	int i;
++	uint16_t nb_txq, nb_rxq;
+ 
+-	for (i = 0; i < dev->data->nb_tx_queues; i++) {
+-		txq = dev->data->tx_queues[i];
++	for (nb_txq = 0; nb_txq < dev->data->nb_tx_queues; nb_txq++) {
++		txq = dev->data->tx_queues[nb_txq];
+ 		if (txq->tx_deferred_start)
+ 			continue;
+-		if (iavf_dev_tx_queue_start(dev, i) != 0) {
+-			PMD_DRV_LOG(ERR, "Fail to start queue %u", i);
+-			return -1;
++		if (iavf_dev_tx_queue_start(dev, nb_txq) != 0) {
++			PMD_DRV_LOG(ERR, "Fail to start tx queue %u", nb_txq);
++			goto tx_err;
+ 		}
+ 	}
+ 
+-	for (i = 0; i < dev->data->nb_rx_queues; i++) {
+-		rxq = dev->data->rx_queues[i];
++	for (nb_rxq = 0; nb_rxq < dev->data->nb_rx_queues; nb_rxq++) {
++		rxq = dev->data->rx_queues[nb_rxq];
+ 		if (rxq->rx_deferred_start)
+ 			continue;
+-		if (iavf_dev_rx_queue_start(dev, i) != 0) {
+-			PMD_DRV_LOG(ERR, "Fail to start queue %u", i);
+-			return -1;
++		if (iavf_dev_rx_queue_start(dev, nb_rxq) != 0) {
++			PMD_DRV_LOG(ERR, "Fail to start rx queue %u", nb_rxq);
++			goto rx_err;
+ 		}
+ 	}
+ 
+ 	return 0;
++
++rx_err:
++	for (i = 0; i < nb_rxq; i++)
++		iavf_dev_rx_queue_stop(dev, i);
++tx_err:
++	for (i = 0; i < nb_txq; i++)
++		iavf_dev_tx_queue_stop(dev, i);
++
++	return -1;
+ }
+ 
+ static int
+@@ -932,6 +954,9 @@ iavf_dev_start(struct rte_eth_dev *dev)
+ 
+ 	PMD_INIT_FUNC_TRACE();
+ 
++	if (adapter->closed)
++		return -1;
++
+ 	adapter->stopped = 0;
+ 
+ 	vf->max_pkt_len = dev->data->mtu + IAVF_ETH_OVERHEAD;
+@@ -1009,6 +1034,9 @@ iavf_dev_stop(struct rte_eth_dev *dev)
+ 
+ 	PMD_INIT_FUNC_TRACE();
+ 
++	if (adapter->closed)
++		return -1;
++
+ 	if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) &&
+ 	    dev->data->dev_conf.intr_conf.rxq != 0)
+ 		rte_intr_disable(intr_handle);
+@@ -1030,9 +1058,6 @@ iavf_dev_stop(struct rte_eth_dev *dev)
+ 	iavf_add_del_mc_addr_list(adapter, vf->mc_addrs, vf->mc_addrs_num,
+ 				  false);
+ 
+-	/* free iAVF security device context all related resources */
+-	iavf_security_ctx_destroy(adapter);
+-
+ 	adapter->stopped = 1;
+ 	dev->data->dev_started = 0;
+ 
+@@ -1046,6 +1071,9 @@ iavf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
+ 		IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ 	struct iavf_info *vf = &adapter->vf;
+ 
++	if (adapter->closed)
++		return -EIO;
++
+ 	dev_info->max_rx_queues = IAVF_MAX_NUM_QUEUES_LV;
+ 	dev_info->max_tx_queues = IAVF_MAX_NUM_QUEUES_LV;
+ 	dev_info->min_rx_bufsize = IAVF_BUF_SIZE_MIN;
+@@ -1286,6 +1314,9 @@ iavf_dev_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
+ 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+ 	int err;
+ 
++	if (adapter->closed)
++		return -EIO;
++
+ 	if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN_V2) {
+ 		err = iavf_add_del_vlan_v2(adapter, vlan_id, on);
+ 		if (err)
+@@ -1362,6 +1393,9 @@ iavf_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask)
+ 	struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
+ 	int err;
+ 
++	if (adapter->closed)
++		return -EIO;
++
+ 	if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN_V2)
+ 		return iavf_dev_vlan_offload_set_v2(dev, mask);
+ 
+@@ -1394,6 +1428,9 @@ iavf_dev_rss_reta_update(struct rte_eth_dev *dev,
+ 	uint16_t i, idx, shift;
+ 	int ret;
+ 
++	if (adapter->closed)
++		return -EIO;
++
+ 	if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF))
+ 		return -ENOTSUP;
+ 
+@@ -1420,7 +1457,7 @@ iavf_dev_rss_reta_update(struct rte_eth_dev *dev,
+ 	}
+ 
+ 	rte_memcpy(vf->rss_lut, lut, reta_size);
+-	/* send virtchnnl ops to configure rss*/
++	/* send virtchnl ops to configure RSS */
+ 	ret = iavf_configure_rss_lut(adapter);
+ 	if (ret) /* revert back */
+ 		rte_memcpy(vf->rss_lut, lut, reta_size);
+@@ -1439,6 +1476,9 @@ iavf_dev_rss_reta_query(struct rte_eth_dev *dev,
+ 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+ 	uint16_t i, idx, shift;
+ 
++	if (adapter->closed)
++		return -EIO;
++
+ 	if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF))
+ 		return -ENOTSUP;
+ 
+@@ -1492,6 +1532,9 @@ iavf_dev_rss_hash_update(struct rte_eth_dev *dev,
+ 
+ 	adapter->dev_data->dev_conf.rx_adv_conf.rss_conf = *rss_conf;
+ 
++	if (adapter->closed)
++		return -EIO;
++
+ 	if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF))
+ 		return -ENOTSUP;
+ 
+@@ -1545,6 +1588,9 @@ iavf_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
+ 		IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+ 
++	if (adapter->closed)
++		return -EIO;
++
+ 	if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF))
+ 		return -ENOTSUP;
+ 
+@@ -1792,6 +1838,9 @@ iavf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
+ 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+ 	uint16_t msix_intr;
+ 
++	if (adapter->closed)
++		return -EIO;
++
+ 	msix_intr = rte_intr_vec_list_index_get(pci_dev->intr_handle,
+ 						       queue_id);
+ 	if (msix_intr == IAVF_MISC_VEC_ID) {
+@@ -1833,7 +1882,7 @@ iavf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
+ 
+ 	IAVF_WRITE_REG(hw,
+ 		      IAVF_VFINT_DYN_CTLN1(msix_intr - IAVF_RX_VEC_START),
+-		      0);
++		      IAVF_VFINT_DYN_CTLN1_WB_ON_ITR_MASK);
+ 
+ 	IAVF_WRITE_FLUSH(hw);
+ 	return 0;
+@@ -2412,8 +2461,11 @@ static int
+ iavf_dev_flow_ops_get(struct rte_eth_dev *dev,
+ 		      const struct rte_flow_ops **ops)
+ {
+-	if (!dev)
+-		return -EINVAL;
++	struct iavf_adapter *adapter =
++		IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
++
++	if (adapter->closed)
++		return -EIO;
+ 
+ 	*ops = &iavf_flow_ops;
+ 	return 0;
+@@ -2554,7 +2606,7 @@ iavf_dev_init(struct rte_eth_dev *eth_dev)
+ 
+ 	/* Start device watchdog */
+ 	iavf_dev_watchdog_enable(adapter);
+-
++	adapter->closed = false;
+ 
+ 	return 0;
+ 
+@@ -2582,7 +2634,16 @@ iavf_dev_close(struct rte_eth_dev *dev)
+ 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ 		return 0;
+ 
++	if (adapter->closed) {
++		ret = 0;
++		goto out;
++	}
++
+ 	ret = iavf_dev_stop(dev);
++	adapter->closed = true;
++
++	/* free iAVF security device context all related resources */
++	iavf_security_ctx_destroy(adapter);
+ 
+ 	iavf_flow_flush(dev, NULL);
+ 	iavf_flow_uninit(adapter);
+@@ -2636,6 +2697,7 @@ iavf_dev_close(struct rte_eth_dev *dev)
+ 	 * the bus master bit will not be disabled, and this call will have no
+ 	 * effect.
+ 	 */
++out:
+ 	if (vf->vf_reset && !rte_pci_set_bus_master(pci_dev, true))
+ 		vf->vf_reset = false;
+ 
+diff --git a/dpdk/drivers/net/iavf/iavf_fdir.c b/dpdk/drivers/net/iavf/iavf_fdir.c
+index b63aaca91d..6b847894d8 100644
+--- a/dpdk/drivers/net/iavf/iavf_fdir.c
++++ b/dpdk/drivers/net/iavf/iavf_fdir.c
+@@ -1185,8 +1185,22 @@ iavf_fdir_parse_pattern(__rte_unused struct iavf_adapter *ad,
+ 										 GTPU_DWN, QFI);
+ 				}
+ 
+-				rte_memcpy(hdr->buffer, gtp_psc_spec,
+-					sizeof(*gtp_psc_spec));
++				/*
++				 * New structure to fix gap between kernel driver and
++				 * rte_gtp_psc_generic_hdr.
++				 */
++				struct iavf_gtp_psc_spec_hdr {
++					uint8_t len;
++					uint8_t qfi:6;
++					uint8_t type:4;
++					uint8_t next;
++				} psc;
++				psc.len = gtp_psc_spec->hdr.ext_hdr_len;
++				psc.qfi = gtp_psc_spec->hdr.qfi;
++				psc.type = gtp_psc_spec->hdr.type;
++				psc.next = 0;
++				rte_memcpy(hdr->buffer, &psc,
++					sizeof(struct iavf_gtp_psc_spec_hdr));
+ 			}
+ 
+ 			hdrs->count = ++layer;
+diff --git a/dpdk/drivers/net/iavf/iavf_ipsec_crypto.c b/dpdk/drivers/net/iavf/iavf_ipsec_crypto.c
+index 884169e061..75f05ee558 100644
+--- a/dpdk/drivers/net/iavf/iavf_ipsec_crypto.c
++++ b/dpdk/drivers/net/iavf/iavf_ipsec_crypto.c
+@@ -69,7 +69,7 @@ struct iavf_security_session {
+  *  16B - 3
+  *
+  * but we also need the IV Length for TSO to correctly calculate the total
+- * header length so placing it in the upper 6-bits here for easier reterival.
++ * header length so placing it in the upper 6-bits here for easier retrieval.
+  */
+ static inline uint8_t
+ calc_ipsec_desc_iv_len_field(uint16_t iv_sz)
+@@ -448,7 +448,7 @@ sa_add_set_auth_params(struct virtchnl_ipsec_crypto_cfg_item *cfg,
+ /**
+  * Send SA add virtual channel request to Inline IPsec driver.
+  *
+- * Inline IPsec driver expects SPI and destination IP adderss to be in host
++ * Inline IPsec driver expects SPI and destination IP address to be in host
+  * order, but DPDK APIs are network order, therefore we need to do a htonl
+  * conversion of these parameters.
+  */
+@@ -614,7 +614,7 @@ set_session_parameter(struct iavf_security_ctx *iavf_sctx,
+ 		if (conf->crypto_xform->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC) {
+ 			sess->block_sz = get_auth_blocksize(iavf_sctx,
+ 				conf->crypto_xform->auth.algo);
+-			sess->iv_sz = conf->crypto_xform->auth.iv.length;
++			sess->iv_sz = sizeof(uint64_t); /* iv len inc. salt */
+ 			sess->icv_sz = conf->crypto_xform->auth.digest_length;
+ 		} else {
+ 			sess->block_sz = get_cipher_blocksize(iavf_sctx,
+@@ -726,7 +726,7 @@ iavf_ipsec_crypto_action_valid(struct rte_eth_dev *ethdev,
+ /**
+  * Send virtual channel security policy add request to IES driver.
+  *
+- * IES driver expects SPI and destination IP adderss to be in host
++ * IES driver expects SPI and destination IP address to be in host
+  * order, but DPDK APIs are network order, therefore we need to do a htonl
+  * conversion of these parameters.
+  */
+@@ -736,7 +736,9 @@ iavf_ipsec_crypto_inbound_security_policy_add(struct iavf_adapter *adapter,
+ 	uint8_t is_v4,
+ 	rte_be32_t v4_dst_addr,
+ 	uint8_t *v6_dst_addr,
+-	uint8_t drop)
++	uint8_t drop,
++	bool is_udp,
++	uint16_t udp_port)
+ {
+ 	struct inline_ipsec_msg *request = NULL, *response = NULL;
+ 	size_t request_len, response_len;
+@@ -781,6 +783,8 @@ iavf_ipsec_crypto_inbound_security_policy_add(struct iavf_adapter *adapter,
+ 	/** Traffic Class/Congestion Domain currently not support */
+ 	request->ipsec_data.sp_cfg->set_tc = 0;
+ 	request->ipsec_data.sp_cfg->cgd = 0;
++	request->ipsec_data.sp_cfg->is_udp = is_udp;
++	request->ipsec_data.sp_cfg->udp_port = htons(udp_port);
+ 
+ 	response_len = sizeof(struct inline_ipsec_msg) +
+ 			sizeof(struct virtchnl_ipsec_sp_cfg_resp);
+@@ -994,7 +998,7 @@ iavf_ipsec_crypto_sa_del(struct iavf_adapter *adapter,
+ 	request->req_id = (uint16_t)0xDEADBEEF;
+ 
+ 	/**
+-	 * SA delete supports deletetion of 1-8 specified SA's or if the flag
++	 * SA delete supports deletion of 1-8 specified SA's or if the flag
+ 	 * field is zero, all SA's associated with VF will be deleted.
+ 	 */
+ 	if (sess) {
+@@ -1114,11 +1118,14 @@ iavf_ipsec_crypto_compute_l4_payload_length(struct rte_mbuf *m,
+ 		 * ipv4/6 hdr + ext hdrs
+ 		 */
+ 
+-	if (s->udp_encap.enabled)
++	if (s->udp_encap.enabled) {
+ 		ol4_len = sizeof(struct rte_udp_hdr);
+-
+-	l3_len = m->l3_len;
+-	l4_len = m->l4_len;
++		l3_len = m->l3_len - ol4_len;
++		l4_len = l3_len;
++	} else {
++		l3_len = m->l3_len;
++		l4_len = m->l4_len;
++	}
+ 
+ 	return rte_pktmbuf_pkt_len(m) - (ol2_len + ol3_len + ol4_len +
+ 			esp_hlen + l3_len + l4_len + esp_tlen);
+@@ -1147,7 +1154,7 @@ iavf_ipsec_crypto_pkt_metadata_set(void *device,
+ 	md = RTE_MBUF_DYNFIELD(m, iavf_sctx->pkt_md_offset,
+ 		struct iavf_ipsec_crypto_pkt_metadata *);
+ 
+-	/* Set immutatable metadata values from session template */
++	/* Set immutable metadata values from session template */
+ 	memcpy(md, &iavf_sess->pkt_metadata_template,
+ 		sizeof(struct iavf_ipsec_crypto_pkt_metadata));
+ 
+@@ -1352,10 +1359,12 @@ iavf_ipsec_crypto_set_security_capabililites(struct iavf_security_ctx
+ 	capabilities = rte_zmalloc("crypto_cap",
+ 		sizeof(struct rte_cryptodev_capabilities) *
+ 		(number_of_capabilities + 1), 0);
++	if (!capabilities)
++		return -ENOMEM;
+ 	capabilities[number_of_capabilities].op = RTE_CRYPTO_OP_TYPE_UNDEFINED;
+ 
+ 	/**
+-	 * Iterate over each virtchl crypto capability by crypto type and
++	 * Iterate over each virtchnl crypto capability by crypto type and
+ 	 * algorithm.
+ 	 */
+ 	for (i = 0; i < VIRTCHNL_IPSEC_MAX_CRYPTO_CAP_NUM; i++) {
+@@ -1454,7 +1463,7 @@ iavf_ipsec_crypto_capabilities_get(void *device)
+ 	/**
+ 	 * Update the security capabilities struct with the runtime discovered
+ 	 * crypto capabilities, except for last element of the array which is
+-	 * the null terminatation
++	 * the null termination
+ 	 */
+ 	for (i = 0; i < ((sizeof(iavf_security_capabilities) /
+ 			sizeof(iavf_security_capabilities[0])) - 1); i++) {
+@@ -1545,29 +1554,90 @@ iavf_security_ctx_destroy(struct iavf_adapter *adapter)
+ 	if (iavf_sctx == NULL)
+ 		return -ENODEV;
+ 
+-	/* TODO: Add resources cleanup */
+-
+ 	/* free and reset security data structures */
+ 	rte_free(iavf_sctx);
+ 	rte_free(sctx);
+ 
+-	iavf_sctx = NULL;
+-	sctx = NULL;
++	adapter->security_ctx = NULL;
++	adapter->vf.eth_dev->security_ctx = NULL;
+ 
+ 	return 0;
+ }
+ 
++static int
++iavf_ipsec_crypto_status_get(struct iavf_adapter *adapter,
++		struct virtchnl_ipsec_status *status)
++{
++	/* Perform pf-vf comms */
++	struct inline_ipsec_msg *request = NULL, *response = NULL;
++	size_t request_len, response_len;
++	int rc;
++
++	request_len = sizeof(struct inline_ipsec_msg);
++
++	request = rte_malloc("iavf-device-status-request", request_len, 0);
++	if (request == NULL) {
++		rc = -ENOMEM;
++		goto update_cleanup;
++	}
++
++	response_len = sizeof(struct inline_ipsec_msg) +
++			sizeof(struct virtchnl_ipsec_cap);
++	response = rte_malloc("iavf-device-status-response",
++			response_len, 0);
++	if (response == NULL) {
++		rc = -ENOMEM;
++		goto update_cleanup;
++	}
++
++	/* set msg header params */
++	request->ipsec_opcode = INLINE_IPSEC_OP_GET_STATUS;
++	request->req_id = (uint16_t)0xDEADBEEF;
++
++	/* send virtual channel request to add SA to hardware database */
++	rc = iavf_ipsec_crypto_request(adapter,
++			(uint8_t *)request, request_len,
++			(uint8_t *)response, response_len);
++	if (rc)
++		goto update_cleanup;
++
++	/* verify response id */
++	if (response->ipsec_opcode != request->ipsec_opcode ||
++		response->req_id != request->req_id){
++		rc = -EFAULT;
++		goto update_cleanup;
++	}
++	memcpy(status, response->ipsec_data.ipsec_status, sizeof(*status));
++
++update_cleanup:
++	rte_free(response);
++	rte_free(request);
++
++	return rc;
++}
++
++
+ int
+ iavf_ipsec_crypto_supported(struct iavf_adapter *adapter)
+ {
+ 	struct virtchnl_vf_resource *resources = adapter->vf.vf_res;
++	int crypto_supported = false;
+ 
+ 	/** Capability check for IPsec Crypto */
+ 	if (resources && (resources->vf_cap_flags &
+-		VIRTCHNL_VF_OFFLOAD_INLINE_IPSEC_CRYPTO))
+-		return true;
++		VIRTCHNL_VF_OFFLOAD_INLINE_IPSEC_CRYPTO)) {
++		struct virtchnl_ipsec_status status;
++		int rc = iavf_ipsec_crypto_status_get(adapter, &status);
++		if (rc == 0 && status.status == INLINE_IPSEC_STATUS_AVAILABLE)
++			crypto_supported = true;
++	}
+ 
+-	return false;
++	/* Clear the VF flag to return faster next call */
++	if (resources && !crypto_supported)
++		resources->vf_cap_flags &=
++				~(VIRTCHNL_VF_OFFLOAD_INLINE_IPSEC_CRYPTO);
++
++	return crypto_supported;
+ }
+ 
+ #define IAVF_IPSEC_INSET_ESP (\
+@@ -1623,6 +1693,7 @@ struct iavf_ipsec_flow_item {
+ 		struct rte_ipv6_hdr ipv6_hdr;
+ 	};
+ 	struct rte_udp_hdr udp_hdr;
++	uint8_t is_udp;
+ };
+ 
+ static void
+@@ -1735,6 +1806,7 @@ iavf_ipsec_flow_item_parse(struct rte_eth_dev *ethdev,
+ 		parse_udp_item((const struct rte_flow_item_udp *)
+ 				pattern[2].spec,
+ 			&ipsec_flow->udp_hdr);
++		ipsec_flow->is_udp = true;
+ 		ipsec_flow->spi =
+ 			((const struct rte_flow_item_esp *)
+ 					pattern[3].spec)->hdr.spi;
+@@ -1804,7 +1876,9 @@ iavf_ipsec_flow_create(struct iavf_adapter *ad,
+ 			1,
+ 			ipsec_flow->ipv4_hdr.dst_addr,
+ 			NULL,
+-			0);
++			0,
++			ipsec_flow->is_udp,
++			ipsec_flow->udp_hdr.dst_port);
+ 	} else {
+ 		ipsec_flow->id =
+ 			iavf_ipsec_crypto_inbound_security_policy_add(ad,
+@@ -1812,7 +1886,9 @@ iavf_ipsec_flow_create(struct iavf_adapter *ad,
+ 			0,
+ 			0,
+ 			ipsec_flow->ipv6_hdr.dst_addr,
+-			0);
++			0,
++			ipsec_flow->is_udp,
++			ipsec_flow->udp_hdr.dst_port);
+ 	}
+ 
+ 	if (ipsec_flow->id < 1) {
+diff --git a/dpdk/drivers/net/iavf/iavf_ipsec_crypto.h b/dpdk/drivers/net/iavf/iavf_ipsec_crypto.h
+index 4e4c8798ec..8ea0f9540e 100644
+--- a/dpdk/drivers/net/iavf/iavf_ipsec_crypto.h
++++ b/dpdk/drivers/net/iavf/iavf_ipsec_crypto.h
+@@ -73,7 +73,7 @@ enum iavf_ipsec_iv_len {
+ };
+ 
+ 
+-/* IPsec Crypto Packet Metaday offload flags */
++/* IPsec Crypto Packet Metadata offload flags */
+ #define IAVF_IPSEC_CRYPTO_OL_FLAGS_IS_TUN		(0x1 << 0)
+ #define IAVF_IPSEC_CRYPTO_OL_FLAGS_ESN			(0x1 << 1)
+ #define IAVF_IPSEC_CRYPTO_OL_FLAGS_IPV6_EXT_HDRS	(0x1 << 2)
+@@ -145,7 +145,9 @@ iavf_ipsec_crypto_inbound_security_policy_add(struct iavf_adapter *adapter,
+ 	uint8_t is_v4,
+ 	rte_be32_t v4_dst_addr,
+ 	uint8_t *v6_dst_addr,
+-	uint8_t drop);
++	uint8_t drop,
++	bool is_udp,
++	uint16_t udp_port);
+ 
+ /**
+  * Delete inbound security policy rule from hardware
+diff --git a/dpdk/drivers/net/iavf/iavf_rxtx.c b/dpdk/drivers/net/iavf/iavf_rxtx.c
+index 154472c50f..3a0dfca2a7 100644
+--- a/dpdk/drivers/net/iavf/iavf_rxtx.c
++++ b/dpdk/drivers/net/iavf/iavf_rxtx.c
+@@ -363,12 +363,24 @@ release_txq_mbufs(struct iavf_tx_queue *txq)
+ 	}
+ }
+ 
+-static const struct iavf_rxq_ops def_rxq_ops = {
+-	.release_mbufs = release_rxq_mbufs,
++static const
++struct iavf_rxq_ops iavf_rxq_release_mbufs_ops[] = {
++	[IAVF_REL_MBUFS_DEFAULT].release_mbufs = release_rxq_mbufs,
++#ifdef RTE_ARCH_X86
++	[IAVF_REL_MBUFS_SSE_VEC].release_mbufs = iavf_rx_queue_release_mbufs_sse,
++#endif
+ };
+ 
+-static const struct iavf_txq_ops def_txq_ops = {
+-	.release_mbufs = release_txq_mbufs,
++static const
++struct iavf_txq_ops iavf_txq_release_mbufs_ops[] = {
++	[IAVF_REL_MBUFS_DEFAULT].release_mbufs = release_txq_mbufs,
++#ifdef RTE_ARCH_X86
++	[IAVF_REL_MBUFS_SSE_VEC].release_mbufs = iavf_tx_queue_release_mbufs_sse,
++#ifdef CC_AVX512_SUPPORT
++	[IAVF_REL_MBUFS_AVX512_VEC].release_mbufs = iavf_tx_queue_release_mbufs_avx512,
++#endif
++#endif
++
+ };
+ 
+ static inline void
+@@ -476,54 +488,56 @@ iavf_rxd_to_pkt_fields_by_comms_aux_v2(struct iavf_rx_queue *rxq,
+ #endif
+ }
+ 
++static const
++iavf_rxd_to_pkt_fields_t rxd_to_pkt_fields_ops[IAVF_RXDID_LAST + 1] = {
++	[IAVF_RXDID_COMMS_AUX_VLAN] = iavf_rxd_to_pkt_fields_by_comms_aux_v1,
++	[IAVF_RXDID_COMMS_AUX_IPV4] = iavf_rxd_to_pkt_fields_by_comms_aux_v1,
++	[IAVF_RXDID_COMMS_AUX_IPV6] = iavf_rxd_to_pkt_fields_by_comms_aux_v1,
++	[IAVF_RXDID_COMMS_AUX_IPV6_FLOW] =
++		iavf_rxd_to_pkt_fields_by_comms_aux_v1,
++	[IAVF_RXDID_COMMS_AUX_TCP] = iavf_rxd_to_pkt_fields_by_comms_aux_v1,
++	[IAVF_RXDID_COMMS_AUX_IP_OFFSET] =
++		iavf_rxd_to_pkt_fields_by_comms_aux_v2,
++	[IAVF_RXDID_COMMS_IPSEC_CRYPTO] =
++		iavf_rxd_to_pkt_fields_by_comms_aux_v2,
++	[IAVF_RXDID_COMMS_OVS_1] = iavf_rxd_to_pkt_fields_by_comms_ovs,
++};
++
+ static void
+ iavf_select_rxd_to_pkt_fields_handler(struct iavf_rx_queue *rxq, uint32_t rxdid)
+ {
++	rxq->rxdid = rxdid;
++
+ 	switch (rxdid) {
+ 	case IAVF_RXDID_COMMS_AUX_VLAN:
+ 		rxq->xtr_ol_flag = rte_pmd_ifd_dynflag_proto_xtr_vlan_mask;
+-		rxq->rxd_to_pkt_fields =
+-			iavf_rxd_to_pkt_fields_by_comms_aux_v1;
+ 		break;
+ 	case IAVF_RXDID_COMMS_AUX_IPV4:
+ 		rxq->xtr_ol_flag = rte_pmd_ifd_dynflag_proto_xtr_ipv4_mask;
+-		rxq->rxd_to_pkt_fields =
+-			iavf_rxd_to_pkt_fields_by_comms_aux_v1;
+ 		break;
+ 	case IAVF_RXDID_COMMS_AUX_IPV6:
+ 		rxq->xtr_ol_flag = rte_pmd_ifd_dynflag_proto_xtr_ipv6_mask;
+-		rxq->rxd_to_pkt_fields =
+-			iavf_rxd_to_pkt_fields_by_comms_aux_v1;
+ 		break;
+ 	case IAVF_RXDID_COMMS_AUX_IPV6_FLOW:
+ 		rxq->xtr_ol_flag =
+ 			rte_pmd_ifd_dynflag_proto_xtr_ipv6_flow_mask;
+-		rxq->rxd_to_pkt_fields =
+-			iavf_rxd_to_pkt_fields_by_comms_aux_v1;
+ 		break;
+ 	case IAVF_RXDID_COMMS_AUX_TCP:
+ 		rxq->xtr_ol_flag = rte_pmd_ifd_dynflag_proto_xtr_tcp_mask;
+-		rxq->rxd_to_pkt_fields =
+-			iavf_rxd_to_pkt_fields_by_comms_aux_v1;
+ 		break;
+ 	case IAVF_RXDID_COMMS_AUX_IP_OFFSET:
+ 		rxq->xtr_ol_flag =
+ 			rte_pmd_ifd_dynflag_proto_xtr_ip_offset_mask;
+-		rxq->rxd_to_pkt_fields =
+-			iavf_rxd_to_pkt_fields_by_comms_aux_v2;
+ 		break;
+ 	case IAVF_RXDID_COMMS_IPSEC_CRYPTO:
+ 		rxq->xtr_ol_flag =
+ 			rte_pmd_ifd_dynflag_proto_xtr_ipsec_crypto_said_mask;
+-		rxq->rxd_to_pkt_fields =
+-			iavf_rxd_to_pkt_fields_by_comms_aux_v2;
+ 		break;
+ 	case IAVF_RXDID_COMMS_OVS_1:
+-		rxq->rxd_to_pkt_fields = iavf_rxd_to_pkt_fields_by_comms_ovs;
+ 		break;
+ 	default:
+ 		/* update this according to the RXDID for FLEX_DESC_NONE */
+-		rxq->rxd_to_pkt_fields = iavf_rxd_to_pkt_fields_by_comms_ovs;
++		rxq->rxdid = IAVF_RXDID_COMMS_OVS_1;
+ 		break;
+ 	}
+ 
+@@ -553,6 +567,9 @@ iavf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+ 
+ 	PMD_INIT_FUNC_TRACE();
+ 
++	if (ad->closed)
++		return -EIO;
++
+ 	offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
+ 
+ 	if (nb_desc % IAVF_ALIGN_RING_DESC != 0 ||
+@@ -648,8 +665,8 @@ iavf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+ 		return -ENOMEM;
+ 	}
+ 
+-	/* Allocate the maximun number of RX ring hardware descriptor with
+-	 * a liitle more to support bulk allocate.
++	/* Allocate the maximum number of RX ring hardware descriptor with
++	 * a little more to support bulk allocate.
+ 	 */
+ 	len = IAVF_MAX_RING_DESC + IAVF_RX_MAX_BURST;
+ 	ring_size = RTE_ALIGN(len * sizeof(union iavf_rx_desc),
+@@ -673,7 +690,7 @@ iavf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+ 	rxq->q_set = true;
+ 	dev->data->rx_queues[queue_idx] = rxq;
+ 	rxq->qrx_tail = hw->hw_addr + IAVF_QRX_TAIL1(rxq->queue_id);
+-	rxq->ops = &def_rxq_ops;
++	rxq->rel_mbufs_type = IAVF_REL_MBUFS_DEFAULT;
+ 
+ 	if (check_rx_bulk_allow(rxq) == true) {
+ 		PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
+@@ -714,6 +731,9 @@ iavf_dev_tx_queue_setup(struct rte_eth_dev *dev,
+ 
+ 	PMD_INIT_FUNC_TRACE();
+ 
++	if (adapter->closed)
++		return -EIO;
++
+ 	offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
+ 
+ 	if (nb_desc % IAVF_ALIGN_RING_DESC != 0 ||
+@@ -810,7 +830,7 @@ iavf_dev_tx_queue_setup(struct rte_eth_dev *dev,
+ 	txq->q_set = true;
+ 	dev->data->tx_queues[queue_idx] = txq;
+ 	txq->qtx_tail = hw->hw_addr + IAVF_QTX_TAIL1(queue_idx);
+-	txq->ops = &def_txq_ops;
++	txq->rel_mbufs_type = IAVF_REL_MBUFS_DEFAULT;
+ 
+ 	if (check_tx_vec_allow(txq) == false) {
+ 		struct iavf_adapter *ad =
+@@ -942,7 +962,7 @@ iavf_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+ 	}
+ 
+ 	rxq = dev->data->rx_queues[rx_queue_id];
+-	rxq->ops->release_mbufs(rxq);
++	iavf_rxq_release_mbufs_ops[rxq->rel_mbufs_type].release_mbufs(rxq);
+ 	reset_rx_queue(rxq);
+ 	dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+ 
+@@ -970,7 +990,7 @@ iavf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+ 	}
+ 
+ 	txq = dev->data->tx_queues[tx_queue_id];
+-	txq->ops->release_mbufs(txq);
++	iavf_txq_release_mbufs_ops[txq->rel_mbufs_type].release_mbufs(txq);
+ 	reset_tx_queue(txq);
+ 	dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+ 
+@@ -985,7 +1005,7 @@ iavf_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
+ 	if (!q)
+ 		return;
+ 
+-	q->ops->release_mbufs(q);
++	iavf_rxq_release_mbufs_ops[q->rel_mbufs_type].release_mbufs(q);
+ 	rte_free(q->sw_ring);
+ 	rte_memzone_free(q->mz);
+ 	rte_free(q);
+@@ -999,7 +1019,7 @@ iavf_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
+ 	if (!q)
+ 		return;
+ 
+-	q->ops->release_mbufs(q);
++	iavf_txq_release_mbufs_ops[q->rel_mbufs_type].release_mbufs(q);
+ 	rte_free(q->sw_ring);
+ 	rte_memzone_free(q->mz);
+ 	rte_free(q);
+@@ -1033,7 +1053,7 @@ iavf_stop_queues(struct rte_eth_dev *dev)
+ 		txq = dev->data->tx_queues[i];
+ 		if (!txq)
+ 			continue;
+-		txq->ops->release_mbufs(txq);
++		iavf_txq_release_mbufs_ops[txq->rel_mbufs_type].release_mbufs(txq);
+ 		reset_tx_queue(txq);
+ 		dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
+ 	}
+@@ -1041,7 +1061,7 @@ iavf_stop_queues(struct rte_eth_dev *dev)
+ 		rxq = dev->data->rx_queues[i];
+ 		if (!rxq)
+ 			continue;
+-		rxq->ops->release_mbufs(rxq);
++		iavf_rxq_release_mbufs_ops[rxq->rel_mbufs_type].release_mbufs(rxq);
+ 		reset_rx_queue(rxq);
+ 		dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
+ 	}
+@@ -1484,7 +1504,7 @@ iavf_recv_pkts_flex_rxd(void *rx_queue,
+ 		iavf_flex_rxd_to_vlan_tci(rxm, &rxd);
+ 		iavf_flex_rxd_to_ipsec_crypto_status(rxm, &rxd,
+ 				&rxq->stats.ipsec_crypto);
+-		rxq->rxd_to_pkt_fields(rxq, rxm, &rxd);
++		rxd_to_pkt_fields_ops[rxq->rxdid](rxq, rxm, &rxd);
+ 		pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
+ 		rxm->ol_flags |= pkt_flags;
+ 
+@@ -1628,7 +1648,7 @@ iavf_recv_scattered_pkts_flex_rxd(void *rx_queue, struct rte_mbuf **rx_pkts,
+ 		iavf_flex_rxd_to_vlan_tci(first_seg, &rxd);
+ 		iavf_flex_rxd_to_ipsec_crypto_status(first_seg, &rxd,
+ 				&rxq->stats.ipsec_crypto);
+-		rxq->rxd_to_pkt_fields(rxq, first_seg, &rxd);
++		rxd_to_pkt_fields_ops[rxq->rxdid](rxq, first_seg, &rxd);
+ 		pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
+ 
+ 		first_seg->ol_flags |= pkt_flags;
+@@ -1819,7 +1839,7 @@ iavf_rx_scan_hw_ring_flex_rxd(struct iavf_rx_queue *rxq)
+ 	struct rte_mbuf *mb;
+ 	uint16_t stat_err0;
+ 	uint16_t pkt_len;
+-	int32_t s[IAVF_LOOK_AHEAD], nb_dd;
++	int32_t s[IAVF_LOOK_AHEAD], var, nb_dd;
+ 	int32_t i, j, nb_rx = 0;
+ 	uint64_t pkt_flags;
+ 	const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
+@@ -1844,9 +1864,27 @@ iavf_rx_scan_hw_ring_flex_rxd(struct iavf_rx_queue *rxq)
+ 
+ 		rte_smp_rmb();
+ 
+-		/* Compute how many status bits were set */
+-		for (j = 0, nb_dd = 0; j < IAVF_LOOK_AHEAD; j++)
+-			nb_dd += s[j] & (1 << IAVF_RX_FLEX_DESC_STATUS0_DD_S);
++		/* Compute how many contiguous DD bits were set */
++		for (j = 0, nb_dd = 0; j < IAVF_LOOK_AHEAD; j++) {
++			var = s[j] & (1 << IAVF_RX_FLEX_DESC_STATUS0_DD_S);
++#ifdef RTE_ARCH_ARM
++			/* For Arm platforms, count only contiguous descriptors
++			 * whose DD bit is set to 1. On Arm platforms, reads of
++			 * descriptors can be reordered. Since the CPU may
++			 * be reading the descriptors as the NIC updates them
++			 * in memory, it is possbile that the DD bit for a
++			 * descriptor earlier in the queue is read as not set
++			 * while the DD bit for a descriptor later in the queue
++			 * is read as set.
++			 */
++			if (var)
++				nb_dd += 1;
++			else
++				break;
++#else
++			nb_dd += var;
++#endif
++		}
+ 
+ 		nb_rx += nb_dd;
+ 
+@@ -1868,7 +1906,7 @@ iavf_rx_scan_hw_ring_flex_rxd(struct iavf_rx_queue *rxq)
+ 			iavf_flex_rxd_to_vlan_tci(mb, &rxdp[j]);
+ 			iavf_flex_rxd_to_ipsec_crypto_status(mb, &rxdp[j],
+ 				&rxq->stats.ipsec_crypto);
+-			rxq->rxd_to_pkt_fields(rxq, mb, &rxdp[j]);
++			rxd_to_pkt_fields_ops[rxq->rxdid](rxq, mb, &rxdp[j]);
+ 			stat_err0 = rte_le_to_cpu_16(rxdp[j].wb.status_error0);
+ 			pkt_flags = iavf_flex_rxd_error_to_pkt_flags(stat_err0);
+ 
+@@ -1898,7 +1936,7 @@ iavf_rx_scan_hw_ring(struct iavf_rx_queue *rxq)
+ 	uint16_t pkt_len;
+ 	uint64_t qword1;
+ 	uint32_t rx_status;
+-	int32_t s[IAVF_LOOK_AHEAD], nb_dd;
++	int32_t s[IAVF_LOOK_AHEAD], var, nb_dd;
+ 	int32_t i, j, nb_rx = 0;
+ 	uint64_t pkt_flags;
+ 	const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
+@@ -1929,9 +1967,27 @@ iavf_rx_scan_hw_ring(struct iavf_rx_queue *rxq)
+ 
+ 		rte_smp_rmb();
+ 
+-		/* Compute how many status bits were set */
+-		for (j = 0, nb_dd = 0; j < IAVF_LOOK_AHEAD; j++)
+-			nb_dd += s[j] & (1 << IAVF_RX_DESC_STATUS_DD_SHIFT);
++		/* Compute how many contiguous DD bits were set */
++		for (j = 0, nb_dd = 0; j < IAVF_LOOK_AHEAD; j++) {
++			var = s[j] & (1 << IAVF_RX_DESC_STATUS_DD_SHIFT);
++#ifdef RTE_ARCH_ARM
++			/* For Arm platforms, count only contiguous descriptors
++			 * whose DD bit is set to 1. On Arm platforms, reads of
++			 * descriptors can be reordered. Since the CPU may
++			 * be reading the descriptors as the NIC updates them
++			 * in memory, it is possbile that the DD bit for a
++			 * descriptor earlier in the queue is read as not set
++			 * while the DD bit for a descriptor later in the queue
++			 * is read as set.
++			 */
++			if (var)
++				nb_dd += 1;
++			else
++				break;
++#else
++			nb_dd += var;
++#endif
++		}
+ 
+ 		nb_rx += nb_dd;
+ 
+@@ -2439,6 +2495,14 @@ iavf_fill_data_desc(volatile struct iavf_tx_desc *desc,
+ 		if (m->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD)
+ 			hdrlen += ipseclen;
+ 		bufsz = hdrlen + tlen;
++	} else if ((m->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD) &&
++			(m->ol_flags & (RTE_MBUF_F_TX_TCP_SEG |
++					RTE_MBUF_F_TX_UDP_SEG))) {
++		hdrlen += m->outer_l3_len + m->l3_len + ipseclen;
++		if (m->ol_flags & RTE_MBUF_F_TX_L4_MASK)
++			hdrlen += m->l4_len;
++		bufsz = hdrlen + tlen;
++
+ 	} else {
+ 		bufsz = m->data_len;
+ 	}
+@@ -2484,12 +2548,6 @@ iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+ 	desc_idx = txq->tx_tail;
+ 	txe = &txe_ring[desc_idx];
+ 
+-#ifdef RTE_LIBRTE_IAVF_DEBUG_TX_DESC_RING
+-		iavf_dump_tx_entry_ring(txq);
+-		iavf_dump_tx_desc_ring(txq);
+-#endif
+-
+-
+ 	for (idx = 0; idx < nb_pkts; idx++) {
+ 		volatile struct iavf_tx_desc *ddesc;
+ 		struct iavf_ipsec_crypto_pkt_metadata *ipsec_md;
+@@ -2694,6 +2752,10 @@ iavf_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
+ 	struct iavf_tx_queue *txq = tx_queue;
+ 	struct rte_eth_dev *dev = &rte_eth_devices[txq->port_id];
+ 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
++	struct iavf_adapter *adapter = IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
++
++	if (adapter->closed)
++		return 0;
+ 
+ 	for (i = 0; i < nb_pkts; i++) {
+ 		m = tx_pkts[i];
+@@ -2750,14 +2812,27 @@ iavf_set_rx_function(struct rte_eth_dev *dev)
+ 	struct iavf_adapter *adapter =
+ 		IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
++	int i;
++	struct iavf_rx_queue *rxq;
++	bool use_flex = true;
++
++	for (i = 0; i < dev->data->nb_rx_queues; i++) {
++		rxq = dev->data->rx_queues[i];
++		if (rxq->rxdid <= IAVF_RXDID_LEGACY_1) {
++			PMD_DRV_LOG(NOTICE, "request RXDID[%d] in Queue[%d] is legacy, "
++				"set rx_pkt_burst as legacy for all queues", rxq->rxdid, i);
++			use_flex = false;
++		} else if (!(vf->supported_rxdid & BIT(rxq->rxdid))) {
++			PMD_DRV_LOG(NOTICE, "request RXDID[%d] in Queue[%d] is not supported, "
++				"set rx_pkt_burst as legacy for all queues", rxq->rxdid, i);
++			use_flex = false;
++		}
++	}
+ 
+ #ifdef RTE_ARCH_X86
+-	struct iavf_rx_queue *rxq;
+-	int i;
+ 	int check_ret;
+ 	bool use_avx2 = false;
+ 	bool use_avx512 = false;
+-	bool use_flex = false;
+ 
+ 	check_ret = iavf_rx_vec_dev_check(dev);
+ 	if (check_ret >= 0 &&
+@@ -2774,10 +2849,6 @@ iavf_set_rx_function(struct rte_eth_dev *dev)
+ 			use_avx512 = true;
+ #endif
+ 
+-		if (vf->vf_res->vf_cap_flags &
+-			VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC)
+-			use_flex = true;
+-
+ 		for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ 			rxq = dev->data->rx_queues[i];
+ 			(void)iavf_rxq_vec_setup(rxq);
+@@ -2881,7 +2952,7 @@ iavf_set_rx_function(struct rte_eth_dev *dev)
+ 	if (dev->data->scattered_rx) {
+ 		PMD_DRV_LOG(DEBUG, "Using a Scattered Rx callback (port=%d).",
+ 			    dev->data->port_id);
+-		if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC)
++		if (use_flex)
+ 			dev->rx_pkt_burst = iavf_recv_scattered_pkts_flex_rxd;
+ 		else
+ 			dev->rx_pkt_burst = iavf_recv_scattered_pkts;
+@@ -2892,7 +2963,7 @@ iavf_set_rx_function(struct rte_eth_dev *dev)
+ 	} else {
+ 		PMD_DRV_LOG(DEBUG, "Using Basic Rx callback (port=%d).",
+ 			    dev->data->port_id);
+-		if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC)
++		if (use_flex)
+ 			dev->rx_pkt_burst = iavf_recv_pkts_flex_rxd;
+ 		else
+ 			dev->rx_pkt_burst = iavf_recv_pkts;
+diff --git a/dpdk/drivers/net/iavf/iavf_rxtx.h b/dpdk/drivers/net/iavf/iavf_rxtx.h
+index b610176b30..48cc0da6f5 100644
+--- a/dpdk/drivers/net/iavf/iavf_rxtx.h
++++ b/dpdk/drivers/net/iavf/iavf_rxtx.h
+@@ -187,6 +187,7 @@ struct iavf_rx_queue {
+ 	struct rte_mbuf *pkt_last_seg;  /* last segment of current packet */
+ 	struct rte_mbuf fake_mbuf;      /* dummy mbuf */
+ 	uint8_t rxdid;
++	uint8_t rel_mbufs_type;
+ 
+ 	/* used for VPMD */
+ 	uint16_t rxrearm_nb;       /* number of remaining to be re-armed */
+@@ -217,8 +218,6 @@ struct iavf_rx_queue {
+ 	uint8_t proto_xtr; /* protocol extraction type */
+ 	uint64_t xtr_ol_flag;
+ 		/* flexible descriptor metadata extraction offload flag */
+-	iavf_rxd_to_pkt_fields_t rxd_to_pkt_fields;
+-				/* handle flexible descriptor by RXDID */
+ 	struct iavf_rx_queue_stats stats;
+ 	uint64_t offloads;
+ };
+@@ -248,6 +247,7 @@ struct iavf_tx_queue {
+ 	uint16_t last_desc_cleaned;    /* last desc have been cleaned*/
+ 	uint16_t free_thresh;
+ 	uint16_t rs_thresh;
++	uint8_t rel_mbufs_type;
+ 
+ 	uint16_t port_id;
+ 	uint16_t queue_id;
+@@ -391,6 +391,12 @@ struct iavf_32b_rx_flex_desc_comms_ipsec {
+ 	__le32 ipsec_said;
+ };
+ 
++enum iavf_rxtx_rel_mbufs_type {
++	IAVF_REL_MBUFS_DEFAULT		= 0,
++	IAVF_REL_MBUFS_SSE_VEC		= 1,
++	IAVF_REL_MBUFS_AVX512_VEC	= 2,
++};
++
+ /* Receive Flex Descriptor profile IDs: There are a total
+  * of 64 profiles where profile IDs 0/1 are for legacy; and
+  * profiles 2-63 are flex profiles that can be programmed
+@@ -694,6 +700,9 @@ int iavf_txq_vec_setup_avx512(struct iavf_tx_queue *txq);
+ uint8_t iavf_proto_xtr_type_to_rxdid(uint8_t xtr_type);
+ 
+ void iavf_set_default_ptype_table(struct rte_eth_dev *dev);
++void iavf_tx_queue_release_mbufs_avx512(struct iavf_tx_queue *txq);
++void iavf_rx_queue_release_mbufs_sse(struct iavf_rx_queue *rxq);
++void iavf_tx_queue_release_mbufs_sse(struct iavf_tx_queue *txq);
+ 
+ static inline
+ void iavf_dump_rx_descriptor(struct iavf_rx_queue *rxq,
+diff --git a/dpdk/drivers/net/iavf/iavf_rxtx_vec_avx512.c b/dpdk/drivers/net/iavf/iavf_rxtx_vec_avx512.c
+index 6ff38ac368..c975a5e7d7 100644
+--- a/dpdk/drivers/net/iavf/iavf_rxtx_vec_avx512.c
++++ b/dpdk/drivers/net/iavf/iavf_rxtx_vec_avx512.c
+@@ -1994,7 +1994,7 @@ iavf_xmit_pkts_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
+ 	return iavf_xmit_pkts_vec_avx512_cmn(tx_queue, tx_pkts, nb_pkts, false);
+ }
+ 
+-static inline void
++void __rte_cold
+ iavf_tx_queue_release_mbufs_avx512(struct iavf_tx_queue *txq)
+ {
+ 	unsigned int i;
+@@ -2014,14 +2014,10 @@ iavf_tx_queue_release_mbufs_avx512(struct iavf_tx_queue *txq)
+ 	}
+ }
+ 
+-static const struct iavf_txq_ops avx512_vec_txq_ops = {
+-	.release_mbufs = iavf_tx_queue_release_mbufs_avx512,
+-};
+-
+ int __rte_cold
+ iavf_txq_vec_setup_avx512(struct iavf_tx_queue *txq)
+ {
+-	txq->ops = &avx512_vec_txq_ops;
++	txq->rel_mbufs_type = IAVF_REL_MBUFS_AVX512_VEC;
+ 	return 0;
+ }
+ 
+diff --git a/dpdk/drivers/net/iavf/iavf_rxtx_vec_sse.c b/dpdk/drivers/net/iavf/iavf_rxtx_vec_sse.c
+index 1bac59bf0e..4b23ca8d82 100644
+--- a/dpdk/drivers/net/iavf/iavf_rxtx_vec_sse.c
++++ b/dpdk/drivers/net/iavf/iavf_rxtx_vec_sse.c
+@@ -159,7 +159,7 @@ desc_to_olflags_v(struct iavf_rx_queue *rxq, __m128i descs[4],
+ 	l3_l4e = _mm_shuffle_epi8(l3_l4e_flags, l3_l4e);
+ 	/* then we shift left 1 bit */
+ 	l3_l4e = _mm_slli_epi32(l3_l4e, 1);
+-	/* we need to mask out the reduntant bits */
++	/* we need to mask out the redundant bits */
+ 	l3_l4e = _mm_and_si128(l3_l4e, cksum_mask);
+ 
+ 	vlan0 = _mm_or_si128(vlan0, rss);
+@@ -613,7 +613,7 @@ _recv_raw_pkts_vec(struct iavf_rx_queue *rxq, struct rte_mbuf **rx_pkts,
+ 		_mm_storeu_si128((void *)&rx_pkts[pos]->rx_descriptor_fields1,
+ 				 pkt_mb1);
+ 		desc_to_ptype_v(descs, &rx_pkts[pos], ptype_tbl);
+-		/* C.4 calc avaialbe number of desc */
++		/* C.4 calc available number of desc */
+ 		var = __builtin_popcountll(_mm_cvtsi128_si64(staterr));
+ 		nb_pkts_recd += var;
+ 		if (likely(var != IAVF_VPMD_DESCS_PER_LOOP))
+@@ -1200,37 +1200,29 @@ iavf_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
+ 	return nb_tx;
+ }
+ 
+-static void __rte_cold
++void __rte_cold
+ iavf_rx_queue_release_mbufs_sse(struct iavf_rx_queue *rxq)
+ {
+ 	_iavf_rx_queue_release_mbufs_vec(rxq);
+ }
+ 
+-static void __rte_cold
++void __rte_cold
+ iavf_tx_queue_release_mbufs_sse(struct iavf_tx_queue *txq)
+ {
+ 	_iavf_tx_queue_release_mbufs_vec(txq);
+ }
+ 
+-static const struct iavf_rxq_ops sse_vec_rxq_ops = {
+-	.release_mbufs = iavf_rx_queue_release_mbufs_sse,
+-};
+-
+-static const struct iavf_txq_ops sse_vec_txq_ops = {
+-	.release_mbufs = iavf_tx_queue_release_mbufs_sse,
+-};
+-
+ int __rte_cold
+ iavf_txq_vec_setup(struct iavf_tx_queue *txq)
+ {
+-	txq->ops = &sse_vec_txq_ops;
++	txq->rel_mbufs_type = IAVF_REL_MBUFS_SSE_VEC;
+ 	return 0;
+ }
+ 
+ int __rte_cold
+ iavf_rxq_vec_setup(struct iavf_rx_queue *rxq)
+ {
+-	rxq->ops = &sse_vec_rxq_ops;
++	rxq->rel_mbufs_type = IAVF_REL_MBUFS_SSE_VEC;
+ 	return iavf_rxq_vec_setup_default(rxq);
+ }
+ 
+diff --git a/dpdk/drivers/net/iavf/iavf_vchnl.c b/dpdk/drivers/net/iavf/iavf_vchnl.c
+index 145b059837..1bd3559ec2 100644
+--- a/dpdk/drivers/net/iavf/iavf_vchnl.c
++++ b/dpdk/drivers/net/iavf/iavf_vchnl.c
+@@ -265,6 +265,11 @@ iavf_handle_pf_event_msg(struct rte_eth_dev *dev, uint8_t *msg,
+ 	struct virtchnl_pf_event *pf_msg =
+ 			(struct virtchnl_pf_event *)msg;
+ 
++	if (adapter->closed) {
++		PMD_DRV_LOG(DEBUG, "Port closed");
++		return;
++	}
++
+ 	if (msglen < sizeof(struct virtchnl_pf_event)) {
+ 		PMD_DRV_LOG(DEBUG, "Error event");
+ 		return;
+@@ -461,7 +466,7 @@ iavf_check_api_version(struct iavf_adapter *adapter)
+ 	    (vf->virtchnl_version.major == VIRTCHNL_VERSION_MAJOR_START &&
+ 	     vf->virtchnl_version.minor < VIRTCHNL_VERSION_MINOR_START)) {
+ 		PMD_INIT_LOG(ERR, "VIRTCHNL API version should not be lower"
+-			     " than (%u.%u) to support Adapative VF",
++			     " than (%u.%u) to support Adaptive VF",
+ 			     VIRTCHNL_VERSION_MAJOR_START,
+ 			     VIRTCHNL_VERSION_MAJOR_START);
+ 		return -1;
+@@ -502,7 +507,7 @@ iavf_get_vf_resource(struct iavf_adapter *adapter)
+ 		VIRTCHNL_VF_OFFLOAD_VLAN_V2 |
+ 		VIRTCHNL_VF_LARGE_NUM_QPAIRS |
+ 		VIRTCHNL_VF_OFFLOAD_QOS |
+-+		VIRTCHNL_VF_OFFLOAD_INLINE_IPSEC_CRYPTO;
++		VIRTCHNL_VF_OFFLOAD_INLINE_IPSEC_CRYPTO;
+ 
+ 	args.in_args = (uint8_t *)&caps;
+ 	args.in_args_size = sizeof(caps);
+@@ -777,6 +782,9 @@ iavf_switch_queue(struct iavf_adapter *adapter, uint16_t qid,
+ 	struct iavf_cmd_info args;
+ 	int err;
+ 
++	if (adapter->closed)
++		return -EIO;
++
+ 	memset(&queue_select, 0, sizeof(queue_select));
+ 	queue_select.vsi_id = vf->vsi_res->vsi_id;
+ 	if (rx)
+@@ -1241,6 +1249,9 @@ iavf_query_stats(struct iavf_adapter *adapter,
+ 	struct iavf_cmd_info args;
+ 	int err;
+ 
++	if (adapter->closed)
++		return -EIO;
++
+ 	memset(&q_stats, 0, sizeof(q_stats));
+ 	q_stats.vsi_id = vf->vsi_res->vsi_id;
+ 	args.ops = VIRTCHNL_OP_GET_STATS;
+@@ -1269,6 +1280,9 @@ iavf_config_promisc(struct iavf_adapter *adapter,
+ 	struct iavf_cmd_info args;
+ 	int err;
+ 
++	if (adapter->closed)
++		return -EIO;
++
+ 	promisc.flags = 0;
+ 	promisc.vsi_id = vf->vsi_res->vsi_id;
+ 
+@@ -1312,6 +1326,9 @@ iavf_add_del_eth_addr(struct iavf_adapter *adapter, struct rte_ether_addr *addr,
+ 	struct iavf_cmd_info args;
+ 	int err;
+ 
++	if (adapter->closed)
++		return -EIO;
++
+ 	list = (struct virtchnl_ether_addr_list *)cmd_buffer;
+ 	list->vsi_id = vf->vsi_res->vsi_id;
+ 	list->num_elements = 1;
+@@ -1487,7 +1504,7 @@ iavf_fdir_check(struct iavf_adapter *adapter,
+ 
+ 	err = iavf_execute_vf_cmd(adapter, &args, 0);
+ 	if (err) {
+-		PMD_DRV_LOG(ERR, "fail to check flow direcotor rule");
++		PMD_DRV_LOG(ERR, "fail to check flow director rule");
+ 		return err;
+ 	}
+ 
+diff --git a/dpdk/drivers/net/ice/base/ice_flex_pipe.c b/dpdk/drivers/net/ice/base/ice_flex_pipe.c
+index 395787806b..3918169001 100644
+--- a/dpdk/drivers/net/ice/base/ice_flex_pipe.c
++++ b/dpdk/drivers/net/ice/base/ice_flex_pipe.c
+@@ -1785,8 +1785,12 @@ static enum ice_prof_type
+ ice_get_sw_prof_type(struct ice_hw *hw, struct ice_fv *fv)
+ {
+ 	u16 i;
++	bool valid_prof = false;
+ 
+ 	for (i = 0; i < hw->blk[ICE_BLK_SW].es.fvw; i++) {
++		if (fv->ew[i].off != ICE_NAN_OFFSET)
++			valid_prof = true;
++
+ 		/* UDP tunnel will have UDP_OF protocol ID and VNI offset */
+ 		if (fv->ew[i].prot_id == (u8)ICE_PROT_UDP_OF &&
+ 		    fv->ew[i].off == ICE_VNI_OFFSET)
+@@ -1801,7 +1805,7 @@ ice_get_sw_prof_type(struct ice_hw *hw, struct ice_fv *fv)
+ 			return ICE_PROF_TUN_PPPOE;
+ 	}
+ 
+-	return ICE_PROF_NON_TUN;
++	return valid_prof ? ICE_PROF_NON_TUN : ICE_PROF_INVALID;
+ }
+ 
+ /**
+@@ -1818,11 +1822,6 @@ ice_get_sw_fv_bitmap(struct ice_hw *hw, enum ice_prof_type req_profs,
+ 	struct ice_seg *ice_seg;
+ 	struct ice_fv *fv;
+ 
+-	if (req_profs == ICE_PROF_ALL) {
+-		ice_bitmap_set(bm, 0, ICE_MAX_NUM_PROFILES);
+-		return;
+-	}
+-
+ 	ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
+ 	ice_zero_bitmap(bm, ICE_MAX_NUM_PROFILES);
+ 	ice_seg = hw->seg;
+@@ -2565,7 +2564,7 @@ enum ice_status ice_destroy_tunnel(struct ice_hw *hw, u16 port, bool all)
+  * @off: variable to receive the protocol offset
+  */
+ enum ice_status
+-ice_find_prot_off(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 fv_idx,
++ice_find_prot_off(struct ice_hw *hw, enum ice_block blk, u8 prof, u8 fv_idx,
+ 		  u8 *prot, u16 *off)
+ {
+ 	struct ice_fv_word *fv_ext;
+diff --git a/dpdk/drivers/net/ice/base/ice_flex_pipe.h b/dpdk/drivers/net/ice/base/ice_flex_pipe.h
+index 23ba45564a..ab897de4f3 100644
+--- a/dpdk/drivers/net/ice/base/ice_flex_pipe.h
++++ b/dpdk/drivers/net/ice/base/ice_flex_pipe.h
+@@ -25,7 +25,7 @@ enum ice_status
+ ice_acquire_change_lock(struct ice_hw *hw, enum ice_aq_res_access_type access);
+ void ice_release_change_lock(struct ice_hw *hw);
+ enum ice_status
+-ice_find_prot_off(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 fv_idx,
++ice_find_prot_off(struct ice_hw *hw, enum ice_block blk, u8 prof, u8 fv_idx,
+ 		  u8 *prot, u16 *off);
+ enum ice_status
+ ice_find_label_value(struct ice_seg *ice_seg, char const *name, u32 type,
+diff --git a/dpdk/drivers/net/ice/base/ice_flex_type.h b/dpdk/drivers/net/ice/base/ice_flex_type.h
+index 59eeca0a30..09a02fe9ac 100644
+--- a/dpdk/drivers/net/ice/base/ice_flex_type.h
++++ b/dpdk/drivers/net/ice/base/ice_flex_type.h
+@@ -1003,6 +1003,7 @@ struct ice_chs_chg {
+ #define ICE_FLOW_PTYPE_MAX		ICE_XLT1_CNT
+ 
+ enum ice_prof_type {
++	ICE_PROF_INVALID = 0x0,
+ 	ICE_PROF_NON_TUN = 0x1,
+ 	ICE_PROF_TUN_UDP = 0x2,
+ 	ICE_PROF_TUN_GRE = 0x4,
+diff --git a/dpdk/drivers/net/ice/base/ice_protocol_type.h b/dpdk/drivers/net/ice/base/ice_protocol_type.h
+index cef8354f77..d27ef46713 100644
+--- a/dpdk/drivers/net/ice/base/ice_protocol_type.h
++++ b/dpdk/drivers/net/ice/base/ice_protocol_type.h
+@@ -54,6 +54,7 @@ enum ice_protocol_type {
+ 	ICE_GTP_NO_PAY,
+ 	ICE_VLAN_EX,
+ 	ICE_VLAN_IN,
++	ICE_FLG_DIR,
+ 	ICE_PROTOCOL_LAST
+ };
+ 
+@@ -191,6 +192,7 @@ enum ice_prot_id {
+ 
+ #define ICE_VNI_OFFSET		12 /* offset of VNI from ICE_PROT_UDP_OF */
+ 
++#define ICE_NAN_OFFSET		511
+ #define ICE_MAC_OFOS_HW		1
+ #define ICE_MAC_IL_HW		4
+ #define ICE_ETYPE_OL_HW		9
+@@ -217,9 +219,10 @@ enum ice_prot_id {
+ #define ICE_META_DATA_ID_HW 255 /* this is used for tunnel type */
+ 
+ #define ICE_MDID_SIZE 2
+-#define ICE_TUN_FLAG_MDID 21
+-#define ICE_TUN_FLAG_MDID_OFF (ICE_MDID_SIZE * ICE_TUN_FLAG_MDID)
++#define ICE_TUN_FLAG_MDID 20
++#define ICE_TUN_FLAG_MDID_OFF(word)   (ICE_MDID_SIZE * (ICE_TUN_FLAG_MDID + (word)))
+ #define ICE_TUN_FLAG_MASK 0xFF
++#define ICE_DIR_FLAG_MASK 0x10
+ #define ICE_TUN_FLAG_VLAN_MASK 0x01
+ #define ICE_TUN_FLAG_FV_IND 2
+ 
+@@ -420,7 +423,7 @@ struct ice_recp_grp_entry {
+ #define ICE_INVAL_CHAIN_IND 0xFF
+ 	u16 rid;
+ 	u8 chain_idx;
+-	u16 fv_idx[ICE_NUM_WORDS_RECIPE];
++	u8 fv_idx[ICE_NUM_WORDS_RECIPE];
+ 	u16 fv_mask[ICE_NUM_WORDS_RECIPE];
+ 	struct ice_pref_recipe_group r_group;
+ };
+diff --git a/dpdk/drivers/net/ice/base/ice_sched.c b/dpdk/drivers/net/ice/base/ice_sched.c
+index 2620892c9e..e697c579be 100644
+--- a/dpdk/drivers/net/ice/base/ice_sched.c
++++ b/dpdk/drivers/net/ice/base/ice_sched.c
+@@ -4774,12 +4774,12 @@ ice_sched_get_node_by_id_type(struct ice_port_info *pi, u32 id,
+ 
+ 	case ICE_AGG_TYPE_Q:
+ 		/* The current implementation allows single queue to modify */
+-		node = ice_sched_get_node(pi, id);
++		node = ice_sched_find_node_by_teid(pi->root, id);
+ 		break;
+ 
+ 	case ICE_AGG_TYPE_QG:
+ 		/* The current implementation allows single qg to modify */
+-		child_node = ice_sched_get_node(pi, id);
++		child_node = ice_sched_find_node_by_teid(pi->root, id);
+ 		if (!child_node)
+ 			break;
+ 		node = child_node->parent;
+diff --git a/dpdk/drivers/net/ice/base/ice_switch.c b/dpdk/drivers/net/ice/base/ice_switch.c
+index 1fee790c25..c0df3a1815 100644
+--- a/dpdk/drivers/net/ice/base/ice_switch.c
++++ b/dpdk/drivers/net/ice/base/ice_switch.c
+@@ -2303,7 +2303,7 @@ ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid,
+ 			lkup_exts->field_mask[fv_word_idx] =
+ 				rg_entry->fv_mask[i];
+ 			if (prot == ICE_META_DATA_ID_HW &&
+-			    off == ICE_TUN_FLAG_MDID_OFF)
++			    off == ICE_TUN_FLAG_MDID_OFF(1))
+ 				vlan = true;
+ 			fv_word_idx++;
+ 		}
+@@ -6770,6 +6770,7 @@ static struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = {
+ 	{ ICE_GTP_NO_PAY,	ICE_UDP_ILOS_HW },
+ 	{ ICE_VLAN_EX,		ICE_VLAN_OF_HW },
+ 	{ ICE_VLAN_IN,		ICE_VLAN_OL_HW },
++	{ ICE_FLG_DIR,		ICE_META_DATA_ID_HW},
+ };
+ 
+ /**
+@@ -7488,9 +7489,10 @@ ice_get_fv(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
+ /**
+  * ice_tun_type_match_word - determine if tun type needs a match mask
+  * @tun_type: tunnel type
++ * @off: offset of packet flag
+  * @mask: mask to be used for the tunnel
+  */
+-static bool ice_tun_type_match_word(enum ice_sw_tunnel_type tun_type, u16 *mask)
++static bool ice_tun_type_match_word(enum ice_sw_tunnel_type tun_type, u16 *off, u16 *mask)
+ {
+ 	switch (tun_type) {
+ 	case ICE_SW_TUN_VXLAN_GPE:
+@@ -7506,15 +7508,23 @@ static bool ice_tun_type_match_word(enum ice_sw_tunnel_type tun_type, u16 *mask)
+ 	case ICE_SW_TUN_PPPOE_IPV4_QINQ:
+ 	case ICE_SW_TUN_PPPOE_IPV6_QINQ:
+ 		*mask = ICE_TUN_FLAG_MASK;
++		*off = ICE_TUN_FLAG_MDID_OFF(1);
++		return true;
++
++	case ICE_SW_TUN_AND_NON_TUN:
++		*mask = ICE_DIR_FLAG_MASK;
++		*off = ICE_TUN_FLAG_MDID_OFF(0);
+ 		return true;
+ 
+ 	case ICE_SW_TUN_GENEVE_VLAN:
+ 	case ICE_SW_TUN_VXLAN_VLAN:
+ 		*mask = ICE_TUN_FLAG_MASK & ~ICE_TUN_FLAG_VLAN_MASK;
++		*off = ICE_TUN_FLAG_MDID_OFF(1);
+ 		return true;
+ 
+ 	default:
+ 		*mask = 0;
++		*off = 0;
+ 		return false;
+ 	}
+ }
+@@ -7529,16 +7539,18 @@ ice_add_special_words(struct ice_adv_rule_info *rinfo,
+ 		      struct ice_prot_lkup_ext *lkup_exts)
+ {
+ 	u16 mask;
++	u16 off;
+ 
+ 	/* If this is a tunneled packet, then add recipe index to match the
+-	 * tunnel bit in the packet metadata flags.
++	 * tunnel bit in the packet metadata flags. If this is a tun_and_non_tun
++	 * packet, then add recipe index to match the direction bit in the flag.
+ 	 */
+-	if (ice_tun_type_match_word(rinfo->tun_type, &mask)) {
++	if (ice_tun_type_match_word(rinfo->tun_type, &off, &mask)) {
+ 		if (lkup_exts->n_val_words < ICE_MAX_CHAIN_WORDS) {
+ 			u8 word = lkup_exts->n_val_words++;
+ 
+ 			lkup_exts->fv_words[word].prot_id = ICE_META_DATA_ID_HW;
+-			lkup_exts->fv_words[word].off = ICE_TUN_FLAG_MDID_OFF;
++			lkup_exts->fv_words[word].off = off;
+ 			lkup_exts->field_mask[word] = mask;
+ 		} else {
+ 			return ICE_ERR_MAX_LIMIT;
+@@ -7779,6 +7791,7 @@ ice_get_compat_fv_bitmap(struct ice_hw *hw, struct ice_adv_rule_info *rinfo,
+ bool ice_is_prof_rule(enum ice_sw_tunnel_type type)
+ {
+ 	switch (type) {
++	case ICE_SW_TUN_AND_NON_TUN:
+ 	case ICE_SW_TUN_PROFID_IPV6_ESP:
+ 	case ICE_SW_TUN_PROFID_IPV6_AH:
+ 	case ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3:
+@@ -7863,6 +7876,15 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
+ 	 */
+ 	ice_get_compat_fv_bitmap(hw, rinfo, fv_bitmap);
+ 
++	/* If it is a packet to match any, add a lookup element to match direction
++	 * flag of source interface.
++	 */
++	if (rinfo->tun_type == ICE_SW_TUN_AND_NON_TUN &&
++	    lkups_cnt < ICE_MAX_CHAIN_WORDS) {
++		lkups[lkups_cnt].type = ICE_FLG_DIR;
++		lkups_cnt++;
++	}
++
+ 	status = ice_get_fv(hw, lkups, lkups_cnt, fv_bitmap, &rm->fv_list);
+ 	if (status)
+ 		goto err_unroll;
+diff --git a/dpdk/drivers/net/ice/base/ice_switch.h b/dpdk/drivers/net/ice/base/ice_switch.h
+index a2b3c80107..c67cd09d21 100644
+--- a/dpdk/drivers/net/ice/base/ice_switch.h
++++ b/dpdk/drivers/net/ice/base/ice_switch.h
+@@ -203,7 +203,7 @@ struct ice_fltr_info {
+ 
+ struct ice_update_recipe_lkup_idx_params {
+ 	u16 rid;
+-	u16 fv_idx;
++	u8 fv_idx;
+ 	bool ignore_valid;
+ 	u16 mask;
+ 	bool mask_valid;
+diff --git a/dpdk/drivers/net/ice/ice_dcf.c b/dpdk/drivers/net/ice/ice_dcf.c
+index cca1d7bf46..7f0c074b01 100644
+--- a/dpdk/drivers/net/ice/ice_dcf.c
++++ b/dpdk/drivers/net/ice/ice_dcf.c
+@@ -864,7 +864,7 @@ ice_dcf_init_rss(struct ice_dcf_hw *hw)
+ 			j = 0;
+ 		hw->rss_lut[i] = j;
+ 	}
+-	/* send virtchnnl ops to configure rss*/
++	/* send virtchnl ops to configure RSS */
+ 	ret = ice_dcf_configure_rss_lut(hw);
+ 	if (ret)
+ 		return ret;
+diff --git a/dpdk/drivers/net/ice/ice_dcf_ethdev.c b/dpdk/drivers/net/ice/ice_dcf_ethdev.c
+index 28f7f7fb72..6e9e80c1df 100644
+--- a/dpdk/drivers/net/ice/ice_dcf_ethdev.c
++++ b/dpdk/drivers/net/ice/ice_dcf_ethdev.c
+@@ -203,7 +203,7 @@ ice_dcf_config_rx_queues_irqs(struct rte_eth_dev *dev,
+ 				    "vector %u are mapping to all Rx queues",
+ 				    hw->msix_base);
+ 		} else {
+-			/* If Rx interrupt is reuquired, and we can use
++			/* If Rx interrupt is required, and we can use
+ 			 * multi interrupts, then the vec is from 1
+ 			 */
+ 			hw->nb_msix = RTE_MIN(hw->vf_res->max_vectors,
+@@ -664,6 +664,8 @@ ice_dcf_dev_info_get(struct rte_eth_dev *dev,
+ 	dev_info->reta_size = hw->vf_res->rss_lut_size;
+ 	dev_info->flow_type_rss_offloads = ICE_RSS_OFFLOAD_ALL;
+ 	dev_info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;
++	dev_info->max_mtu = dev_info->max_rx_pktlen - ICE_ETH_OVERHEAD;
++	dev_info->min_mtu = RTE_ETHER_MIN_MTU;
+ 
+ 	dev_info->rx_offload_capa =
+ 		RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+@@ -681,6 +683,7 @@ ice_dcf_dev_info_get(struct rte_eth_dev *dev,
+ 		RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+ 		RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
+ 		RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
++		RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM |
+ 		RTE_ETH_TX_OFFLOAD_TCP_TSO |
+ 		RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+ 		RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
+@@ -956,6 +959,13 @@ ice_dcf_link_update(struct rte_eth_dev *dev,
+ 	return rte_eth_linkstatus_set(dev, &new_link);
+ }
+ 
++bool
++ice_dcf_adminq_need_retry(struct ice_adapter *ad)
++{
++	return ad->hw.dcf_enabled &&
++	       !__atomic_load_n(&ad->dcf_state_on, __ATOMIC_RELAXED);
++}
++
+ /* Add UDP tunneling port */
+ static int
+ ice_dcf_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
+@@ -1105,6 +1115,7 @@ static int
+ ice_dcf_dev_init(struct rte_eth_dev *eth_dev)
+ {
+ 	struct ice_dcf_adapter *adapter = eth_dev->data->dev_private;
++	struct ice_adapter *parent_adapter = &adapter->parent;
+ 
+ 	eth_dev->dev_ops = &ice_dcf_eth_dev_ops;
+ 	eth_dev->rx_pkt_burst = ice_dcf_recv_pkts;
+@@ -1116,9 +1127,13 @@ ice_dcf_dev_init(struct rte_eth_dev *eth_dev)
+ 	adapter->real_hw.vc_event_msg_cb = ice_dcf_handle_pf_event_msg;
+ 	if (ice_dcf_init_hw(eth_dev, &adapter->real_hw) != 0) {
+ 		PMD_INIT_LOG(ERR, "Failed to init DCF hardware");
++		__atomic_store_n(&parent_adapter->dcf_state_on, false,
++				 __ATOMIC_RELAXED);
+ 		return -1;
+ 	}
+ 
++	__atomic_store_n(&parent_adapter->dcf_state_on, true, __ATOMIC_RELAXED);
++
+ 	if (ice_dcf_init_parent_adapter(eth_dev) != 0) {
+ 		PMD_INIT_LOG(ERR, "Failed to init DCF parent adapter");
+ 		ice_dcf_uninit_hw(eth_dev, &adapter->real_hw);
+diff --git a/dpdk/drivers/net/ice/ice_dcf_ethdev.h b/dpdk/drivers/net/ice/ice_dcf_ethdev.h
+index 8510e37119..11a1305038 100644
+--- a/dpdk/drivers/net/ice/ice_dcf_ethdev.h
++++ b/dpdk/drivers/net/ice/ice_dcf_ethdev.h
+@@ -64,5 +64,6 @@ int ice_dcf_vf_repr_init(struct rte_eth_dev *vf_rep_eth_dev, void *init_param);
+ int ice_dcf_vf_repr_uninit(struct rte_eth_dev *vf_rep_eth_dev);
+ int ice_dcf_vf_repr_init_vlan(struct rte_eth_dev *vf_rep_eth_dev);
+ void ice_dcf_vf_repr_stop_all(struct ice_dcf_adapter *dcf_adapter);
++bool ice_dcf_adminq_need_retry(struct ice_adapter *ad);
+ 
+ #endif /* _ICE_DCF_ETHDEV_H_ */
+diff --git a/dpdk/drivers/net/ice/ice_dcf_parent.c b/dpdk/drivers/net/ice/ice_dcf_parent.c
+index 1ff2c47172..2f96dedcce 100644
+--- a/dpdk/drivers/net/ice/ice_dcf_parent.c
++++ b/dpdk/drivers/net/ice/ice_dcf_parent.c
+@@ -119,7 +119,9 @@ ice_dcf_vsi_update_service_handler(void *param)
+ {
+ 	struct ice_dcf_reset_event_param *reset_param = param;
+ 	struct ice_dcf_hw *hw = reset_param->dcf_hw;
+-	struct ice_dcf_adapter *adapter;
++	struct ice_dcf_adapter *adapter =
++		container_of(hw, struct ice_dcf_adapter, real_hw);
++	struct ice_adapter *parent_adapter = &adapter->parent;
+ 
+ 	pthread_detach(pthread_self());
+ 
+@@ -127,11 +129,12 @@ ice_dcf_vsi_update_service_handler(void *param)
+ 
+ 	rte_spinlock_lock(&vsi_update_lock);
+ 
+-	adapter = container_of(hw, struct ice_dcf_adapter, real_hw);
+-
+-	if (!ice_dcf_handle_vsi_update_event(hw))
++	if (!ice_dcf_handle_vsi_update_event(hw)) {
++		__atomic_store_n(&parent_adapter->dcf_state_on, true,
++				 __ATOMIC_RELAXED);
+ 		ice_dcf_update_vf_vsi_map(&adapter->parent.hw,
+ 					  hw->num_vfs, hw->vf_vsi_map);
++	}
+ 
+ 	if (reset_param->vfr && adapter->repr_infos) {
+ 		struct rte_eth_dev *vf_rep_eth_dev =
+@@ -224,6 +227,9 @@ ice_dcf_handle_pf_event_msg(struct ice_dcf_hw *dcf_hw,
+ 			    uint8_t *msg, uint16_t msglen)
+ {
+ 	struct virtchnl_pf_event *pf_msg = (struct virtchnl_pf_event *)msg;
++	struct ice_dcf_adapter *adapter =
++		container_of(dcf_hw, struct ice_dcf_adapter, real_hw);
++	struct ice_adapter *parent_adapter = &adapter->parent;
+ 
+ 	if (msglen < sizeof(struct virtchnl_pf_event)) {
+ 		PMD_DRV_LOG(DEBUG, "Invalid event message length : %u", msglen);
+@@ -258,6 +264,8 @@ ice_dcf_handle_pf_event_msg(struct ice_dcf_hw *dcf_hw,
+ 		PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_DCF_VSI_MAP_UPDATE event : VF%u with VSI num %u",
+ 			    pf_msg->event_data.vf_vsi_map.vf_id,
+ 			    pf_msg->event_data.vf_vsi_map.vsi_id);
++		__atomic_store_n(&parent_adapter->dcf_state_on, false,
++				 __ATOMIC_RELAXED);
+ 		start_vsi_reset_thread(dcf_hw, true,
+ 				       pf_msg->event_data.vf_vsi_map.vf_id);
+ 		break;
+@@ -332,7 +340,7 @@ ice_dcf_init_parent_hw(struct ice_hw *hw)
+ 		goto err_unroll_alloc;
+ 
+ 	/* Initialize port_info struct with link information */
+-	status = ice_aq_get_link_info(hw->port_info, false, NULL, NULL);
++	status = ice_aq_get_link_info(hw->port_info, true, NULL, NULL);
+ 	if (status)
+ 		goto err_unroll_alloc;
+ 
+diff --git a/dpdk/drivers/net/ice/ice_ethdev.c b/dpdk/drivers/net/ice/ice_ethdev.c
+index 13a7a9702a..7df1b4ec19 100644
+--- a/dpdk/drivers/net/ice/ice_ethdev.c
++++ b/dpdk/drivers/net/ice/ice_ethdev.c
+@@ -1264,7 +1264,7 @@ ice_handle_aq_msg(struct rte_eth_dev *dev)
+  * @param handle
+  *  Pointer to interrupt handle.
+  * @param param
+- *  The address of parameter (struct rte_eth_dev *) regsitered before.
++ *  The address of parameter (struct rte_eth_dev *) registered before.
+  *
+  * @return
+  *  void
+@@ -1627,7 +1627,7 @@ ice_setup_vsi(struct ice_pf *pf, enum ice_vsi_type type)
+ 	}
+ 
+ 	/* At the beginning, only TC0. */
+-	/* What we need here is the maximam number of the TX queues.
++	/* What we need here is the maximum number of the TX queues.
+ 	 * Currently vsi->nb_qps means it.
+ 	 * Correct it if any change.
+ 	 */
+@@ -3235,7 +3235,8 @@ static int ice_init_rss(struct ice_pf *pf)
+ 			   RTE_MIN(rss_conf->rss_key_len,
+ 				   vsi->rss_key_size));
+ 
+-	rte_memcpy(key.standard_rss_key, vsi->rss_key, vsi->rss_key_size);
++	rte_memcpy(key.standard_rss_key, vsi->rss_key,
++		RTE_MIN(sizeof(key.standard_rss_key), vsi->rss_key_size));
+ 	ret = ice_aq_set_rss_key(hw, vsi->idx, &key);
+ 	if (ret)
+ 		goto out;
+@@ -3576,7 +3577,7 @@ ice_dev_start(struct rte_eth_dev *dev)
+ 		goto rx_err;
+ 	}
+ 
+-	/* enable Rx interrput and mapping Rx queue to interrupt vector */
++	/* enable Rx interrupt and mapping Rx queue to interrupt vector */
+ 	if (ice_rxq_intr_setup(dev))
+ 		return -EIO;
+ 
+@@ -3603,8 +3604,8 @@ ice_dev_start(struct rte_eth_dev *dev)
+ 
+ 	ice_dev_set_link_up(dev);
+ 
+-	/* Call get_link_info aq commond to enable/disable LSE */
+-	ice_link_update(dev, 0);
++	/* Call get_link_info aq command to enable/disable LSE */
++	ice_link_update(dev, 1);
+ 
+ 	pf->adapter_stopped = false;
+ 
+@@ -5395,7 +5396,7 @@ ice_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
+ 		count++;
+ 	}
+ 
+-	/* Get individiual stats from ice_hw_port struct */
++	/* Get individual stats from ice_hw_port struct */
+ 	for (i = 0; i < ICE_NB_HW_PORT_XSTATS; i++) {
+ 		xstats[count].value =
+ 			*(uint64_t *)((char *)hw_stats +
+@@ -5426,7 +5427,7 @@ static int ice_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
+ 		count++;
+ 	}
+ 
+-	/* Get individiual stats from ice_hw_port struct */
++	/* Get individual stats from ice_hw_port struct */
+ 	for (i = 0; i < ICE_NB_HW_PORT_XSTATS; i++) {
+ 		strlcpy(xstats_names[count].name, ice_hw_port_strings[i].name,
+ 			sizeof(xstats_names[count].name));
+@@ -5454,6 +5455,8 @@ ice_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
+ {
+ 	int ret = 0;
+ 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
++	struct ice_adapter *ad =
++		ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ 
+ 	if (udp_tunnel == NULL)
+ 		return -EINVAL;
+@@ -5461,6 +5464,9 @@ ice_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
+ 	switch (udp_tunnel->prot_type) {
+ 	case RTE_ETH_TUNNEL_TYPE_VXLAN:
+ 		ret = ice_create_tunnel(hw, TNL_VXLAN, udp_tunnel->udp_port);
++		if (!ret && ad->psr != NULL)
++			ice_parser_vxlan_tunnel_set(ad->psr,
++					udp_tunnel->udp_port, true);
+ 		break;
+ 	default:
+ 		PMD_DRV_LOG(ERR, "Invalid tunnel type");
+@@ -5478,6 +5484,8 @@ ice_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
+ {
+ 	int ret = 0;
+ 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
++	struct ice_adapter *ad =
++		ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ 
+ 	if (udp_tunnel == NULL)
+ 		return -EINVAL;
+@@ -5485,6 +5493,9 @@ ice_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
+ 	switch (udp_tunnel->prot_type) {
+ 	case RTE_ETH_TUNNEL_TYPE_VXLAN:
+ 		ret = ice_destroy_tunnel(hw, udp_tunnel->udp_port, 0);
++		if (!ret && ad->psr != NULL)
++			ice_parser_vxlan_tunnel_set(ad->psr,
++					udp_tunnel->udp_port, false);
+ 		break;
+ 	default:
+ 		PMD_DRV_LOG(ERR, "Invalid tunnel type");
+diff --git a/dpdk/drivers/net/ice/ice_ethdev.h b/dpdk/drivers/net/ice/ice_ethdev.h
+index 2e3e45f3d7..ac56c3cc60 100644
+--- a/dpdk/drivers/net/ice/ice_ethdev.h
++++ b/dpdk/drivers/net/ice/ice_ethdev.h
+@@ -531,6 +531,9 @@ struct ice_adapter {
+ 	uint64_t time_hw;
+ 	struct ice_fdir_prof_info fdir_prof_info[ICE_MAX_PTGS];
+ 	struct ice_rss_prof_info rss_prof_info[ICE_MAX_PTGS];
++	/* True if DCF state of the associated PF is on */
++	bool dcf_state_on;
++	struct ice_parser *psr;
+ #ifdef RTE_ARCH_X86
+ 	bool rx_use_avx2;
+ 	bool rx_use_avx512;
+diff --git a/dpdk/drivers/net/ice/ice_fdir_filter.c b/dpdk/drivers/net/ice/ice_fdir_filter.c
+index 13a2ac42df..72c8bd8f02 100644
+--- a/dpdk/drivers/net/ice/ice_fdir_filter.c
++++ b/dpdk/drivers/net/ice/ice_fdir_filter.c
+@@ -1828,7 +1828,6 @@ ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad,
+ 	struct ice_fdir_v4 *p_v4 = NULL;
+ 	struct ice_fdir_v6 *p_v6 = NULL;
+ 	struct ice_parser_result rslt;
+-	struct ice_parser *psr;
+ 	uint8_t item_num = 0;
+ 
+ 	for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+@@ -1863,6 +1862,9 @@ ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad,
+ 
+ 		switch (item_type) {
+ 		case RTE_FLOW_ITEM_TYPE_RAW: {
++			if (ad->psr == NULL)
++				return -rte_errno;
++
+ 			raw_spec = item->spec;
+ 			raw_mask = item->mask;
+ 
+@@ -1870,11 +1872,11 @@ ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad,
+ 				break;
+ 
+ 			/* convert raw spec & mask from byte string to int */
+-			unsigned char *tmp_spec =
++			unsigned char *spec_pattern =
+ 				(uint8_t *)(uintptr_t)raw_spec->pattern;
+-			unsigned char *tmp_mask =
++			unsigned char *mask_pattern =
+ 				(uint8_t *)(uintptr_t)raw_mask->pattern;
+-			uint16_t udp_port = 0;
++			uint8_t *tmp_spec, *tmp_mask;
+ 			uint16_t tmp_val = 0;
+ 			uint8_t pkt_len = 0;
+ 			uint8_t tmp = 0;
+@@ -1885,8 +1887,18 @@ ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad,
+ 				pkt_len)
+ 				return -rte_errno;
+ 
++			tmp_spec = rte_zmalloc(NULL, pkt_len / 2, 0);
++			if (!tmp_spec)
++				return -rte_errno;
++
++			tmp_mask = rte_zmalloc(NULL, pkt_len / 2, 0);
++			if (!tmp_mask) {
++				rte_free(tmp_spec);
++				return -rte_errno;
++			}
++
+ 			for (i = 0, j = 0; i < pkt_len; i += 2, j++) {
+-				tmp = tmp_spec[i];
++				tmp = spec_pattern[i];
+ 				if (tmp >= 'a' && tmp <= 'f')
+ 					tmp_val = tmp - 'a' + 10;
+ 				if (tmp >= 'A' && tmp <= 'F')
+@@ -1895,7 +1907,7 @@ ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad,
+ 					tmp_val = tmp - '0';
+ 
+ 				tmp_val *= 16;
+-				tmp = tmp_spec[i + 1];
++				tmp = spec_pattern[i + 1];
+ 				if (tmp >= 'a' && tmp <= 'f')
+ 					tmp_spec[j] = tmp_val + tmp - 'a' + 10;
+ 				if (tmp >= 'A' && tmp <= 'F')
+@@ -1903,7 +1915,7 @@ ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad,
+ 				if (tmp >= '0' && tmp <= '9')
+ 					tmp_spec[j] = tmp_val + tmp - '0';
+ 
+-				tmp = tmp_mask[i];
++				tmp = mask_pattern[i];
+ 				if (tmp >= 'a' && tmp <= 'f')
+ 					tmp_val = tmp - 'a' + 10;
+ 				if (tmp >= 'A' && tmp <= 'F')
+@@ -1912,7 +1924,7 @@ ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad,
+ 					tmp_val = tmp - '0';
+ 
+ 				tmp_val *= 16;
+-				tmp = tmp_mask[i + 1];
++				tmp = mask_pattern[i + 1];
+ 				if (tmp >= 'a' && tmp <= 'f')
+ 					tmp_mask[j] = tmp_val + tmp - 'a' + 10;
+ 				if (tmp >= 'A' && tmp <= 'F')
+@@ -1923,15 +1935,8 @@ ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad,
+ 
+ 			pkt_len /= 2;
+ 
+-			if (ice_parser_create(&ad->hw, &psr))
+-				return -rte_errno;
+-			if (ice_get_open_tunnel_port(&ad->hw, TNL_VXLAN,
+-						     &udp_port))
+-				ice_parser_vxlan_tunnel_set(psr, udp_port,
+-							    true);
+-			if (ice_parser_run(psr, tmp_spec, pkt_len, &rslt))
++			if (ice_parser_run(ad->psr, tmp_spec, pkt_len, &rslt))
+ 				return -rte_errno;
+-			ice_parser_destroy(psr);
+ 
+ 			if (!tmp_mask)
+ 				return -rte_errno;
+@@ -1955,6 +1960,8 @@ ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad,
+ 
+ 			filter->parser_ena = true;
+ 
++			rte_free(tmp_spec);
++			rte_free(tmp_mask);
+ 			break;
+ 		}
+ 
+@@ -2037,6 +2044,14 @@ ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad,
+ 				return -rte_errno;
+ 			}
+ 
++			/* Mask for IPv4 src/dst addrs not supported */
++			if (ipv4_mask->hdr.src_addr &&
++				ipv4_mask->hdr.src_addr != UINT32_MAX)
++				return -rte_errno;
++			if (ipv4_mask->hdr.dst_addr &&
++				ipv4_mask->hdr.dst_addr != UINT32_MAX)
++				return -rte_errno;
++
+ 			if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
+ 				*input_set |= ICE_INSET_IPV4_DST;
+ 			if (ipv4_mask->hdr.src_addr == UINT32_MAX)
+@@ -2178,6 +2193,14 @@ ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad,
+ 				return -rte_errno;
+ 			}
+ 
++			/* Mask for TCP src/dst ports not supported */
++			if (tcp_mask->hdr.src_port &&
++				tcp_mask->hdr.src_port != UINT16_MAX)
++				return -rte_errno;
++			if (tcp_mask->hdr.dst_port &&
++				tcp_mask->hdr.dst_port != UINT16_MAX)
++				return -rte_errno;
++
+ 			if (tcp_mask->hdr.src_port == UINT16_MAX)
+ 				*input_set |= ICE_INSET_TCP_SRC_PORT;
+ 			if (tcp_mask->hdr.dst_port == UINT16_MAX)
+@@ -2217,6 +2240,14 @@ ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad,
+ 				return -rte_errno;
+ 			}
+ 
++			/* Mask for UDP src/dst ports not supported */
++			if (udp_mask->hdr.src_port &&
++				udp_mask->hdr.src_port != UINT16_MAX)
++				return -rte_errno;
++			if (udp_mask->hdr.dst_port &&
++				udp_mask->hdr.dst_port != UINT16_MAX)
++				return -rte_errno;
++
+ 			if (udp_mask->hdr.src_port == UINT16_MAX)
+ 				*input_set |= ICE_INSET_UDP_SRC_PORT;
+ 			if (udp_mask->hdr.dst_port == UINT16_MAX)
+@@ -2254,6 +2285,14 @@ ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad,
+ 				return -rte_errno;
+ 			}
+ 
++			/* Mask for SCTP src/dst ports not supported */
++			if (sctp_mask->hdr.src_port &&
++				sctp_mask->hdr.src_port != UINT16_MAX)
++				return -rte_errno;
++			if (sctp_mask->hdr.dst_port &&
++				sctp_mask->hdr.dst_port != UINT16_MAX)
++				return -rte_errno;
++
+ 			if (sctp_mask->hdr.src_port == UINT16_MAX)
+ 				*input_set |= ICE_INSET_SCTP_SRC_PORT;
+ 			if (sctp_mask->hdr.dst_port == UINT16_MAX)
+diff --git a/dpdk/drivers/net/ice/ice_generic_flow.c b/dpdk/drivers/net/ice/ice_generic_flow.c
+index c673feb7a6..f9be3a5c94 100644
+--- a/dpdk/drivers/net/ice/ice_generic_flow.c
++++ b/dpdk/drivers/net/ice/ice_generic_flow.c
+@@ -1826,6 +1826,9 @@ ice_flow_init(struct ice_adapter *ad)
+ 	TAILQ_INIT(&pf->dist_parser_list);
+ 	rte_spinlock_init(&pf->flow_ops_lock);
+ 
++	if (ice_parser_create(&ad->hw, &ad->psr) != ICE_SUCCESS)
++		PMD_INIT_LOG(WARNING, "Failed to initialize DDP parser, raw packet filter will not be supported");
++
+ 	RTE_TAILQ_FOREACH_SAFE(engine, &engine_list, node, temp) {
+ 		if (engine->init == NULL) {
+ 			PMD_INIT_LOG(ERR, "Invalid engine type (%d)",
+@@ -1880,6 +1883,11 @@ ice_flow_uninit(struct ice_adapter *ad)
+ 		TAILQ_REMOVE(&pf->dist_parser_list, p_parser, node);
+ 		rte_free(p_parser);
+ 	}
++
++	if (ad->psr != NULL) {
++		ice_parser_destroy(ad->psr);
++		ad->psr = NULL;
++	}
+ }
+ 
+ static struct ice_parser_list *
+@@ -2515,7 +2523,9 @@ ice_flow_flush(struct rte_eth_dev *dev,
+ 		ret = ice_flow_destroy(dev, p_flow, error);
+ 		if (ret) {
+ 			PMD_DRV_LOG(ERR, "Failed to flush flows");
+-			return -EINVAL;
++			if (ret != -EAGAIN)
++				ret = -EINVAL;
++			return ret;
+ 		}
+ 	}
+ 
+diff --git a/dpdk/drivers/net/ice/ice_hash.c b/dpdk/drivers/net/ice/ice_hash.c
+index afbb357fa3..f35727856e 100644
+--- a/dpdk/drivers/net/ice/ice_hash.c
++++ b/dpdk/drivers/net/ice/ice_hash.c
+@@ -653,13 +653,15 @@ ice_hash_parse_raw_pattern(struct ice_adapter *ad,
+ 	const struct rte_flow_item_raw *raw_spec, *raw_mask;
+ 	struct ice_parser_profile prof;
+ 	struct ice_parser_result rslt;
+-	struct ice_parser *psr;
+ 	uint8_t *pkt_buf, *msk_buf;
+ 	uint8_t spec_len, pkt_len;
+ 	uint8_t tmp_val = 0;
+ 	uint8_t tmp_c = 0;
+ 	int i, j;
+ 
++	if (ad->psr == NULL)
++		return -rte_errno;
++
+ 	raw_spec = item->spec;
+ 	raw_mask = item->mask;
+ 
+@@ -713,11 +715,8 @@ ice_hash_parse_raw_pattern(struct ice_adapter *ad,
+ 			msk_buf[j] = tmp_val * 16 + tmp_c - '0';
+ 	}
+ 
+-	if (ice_parser_create(&ad->hw, &psr))
+-		return -rte_errno;
+-	if (ice_parser_run(psr, pkt_buf, pkt_len, &rslt))
++	if (ice_parser_run(ad->psr, pkt_buf, pkt_len, &rslt))
+ 		return -rte_errno;
+-	ice_parser_destroy(psr);
+ 
+ 	if (ice_parser_profile_init(&rslt, pkt_buf, msk_buf,
+ 		pkt_len, ICE_BLK_RSS, true, &prof))
+diff --git a/dpdk/drivers/net/ice/ice_rxtx.c b/dpdk/drivers/net/ice/ice_rxtx.c
+index f6d8564ab8..71e5c6f5d6 100644
+--- a/dpdk/drivers/net/ice/ice_rxtx.c
++++ b/dpdk/drivers/net/ice/ice_rxtx.c
+@@ -163,6 +163,8 @@ ice_rxd_to_pkt_fields_by_comms_aux_v1(struct ice_rx_queue *rxq,
+ 			*RTE_NET_ICE_DYNF_PROTO_XTR_METADATA(mb) = metadata;
+ 		}
+ 	}
++#else
++	RTE_SET_USED(rxq);
+ #endif
+ }
+ 
+@@ -201,6 +203,8 @@ ice_rxd_to_pkt_fields_by_comms_aux_v2(struct ice_rx_queue *rxq,
+ 			*RTE_NET_ICE_DYNF_PROTO_XTR_METADATA(mb) = metadata;
+ 		}
+ 	}
++#else
++	RTE_SET_USED(rxq);
+ #endif
+ }
+ 
+@@ -1118,7 +1122,7 @@ ice_rx_queue_setup(struct rte_eth_dev *dev,
+ 	rxq->proto_xtr = pf->proto_xtr != NULL ?
+ 			 pf->proto_xtr[queue_idx] : PROTO_XTR_NONE;
+ 
+-	/* Allocate the maximun number of RX ring hardware descriptor. */
++	/* Allocate the maximum number of RX ring hardware descriptor. */
+ 	len = ICE_MAX_RING_DESC;
+ 
+ 	/**
+@@ -1248,7 +1252,7 @@ ice_tx_queue_setup(struct rte_eth_dev *dev,
+ 	tx_free_thresh = (uint16_t)(tx_conf->tx_free_thresh ?
+ 				    tx_conf->tx_free_thresh :
+ 				    ICE_DEFAULT_TX_FREE_THRESH);
+-	/* force tx_rs_thresh to adapt an aggresive tx_free_thresh */
++	/* force tx_rs_thresh to adapt an aggressive tx_free_thresh */
+ 	tx_rs_thresh =
+ 		(ICE_DEFAULT_TX_RSBIT_THRESH + tx_free_thresh > nb_desc) ?
+ 			nb_desc - tx_free_thresh : ICE_DEFAULT_TX_RSBIT_THRESH;
+@@ -1554,6 +1558,9 @@ ice_rxd_to_vlan_tci(struct rte_mbuf *mb, volatile union ice_rx_flex_desc *rxdp)
+ #if (ICE_LOOK_AHEAD != 8)
+ #error "PMD ICE: ICE_LOOK_AHEAD must be 8\n"
+ #endif
++
++#define ICE_PTP_TS_VALID 0x1
++
+ static inline int
+ ice_rx_scan_hw_ring(struct ice_rx_queue *rxq)
+ {
+@@ -1567,9 +1574,10 @@ ice_rx_scan_hw_ring(struct ice_rx_queue *rxq)
+ 	uint64_t pkt_flags = 0;
+ 	uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
+ #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
++	bool is_tsinit = false;
++	uint64_t ts_ns;
+ 	struct ice_vsi *vsi = rxq->vsi;
+ 	struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
+-	uint64_t ts_ns;
+ 	struct ice_adapter *ad = rxq->vsi->adapter;
+ #endif
+ 	rxdp = &rxq->rx_ring[rxq->rx_tail];
+@@ -1581,8 +1589,14 @@ ice_rx_scan_hw_ring(struct ice_rx_queue *rxq)
+ 	if (!(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S)))
+ 		return 0;
+ 
+-	if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)
+-		rxq->hw_register_set = 1;
++#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
++	if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
++		uint64_t sw_cur_time = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
++
++		if (unlikely(sw_cur_time - rxq->hw_time_update > 4))
++			is_tsinit = 1;
++	}
++#endif
+ 
+ 	/**
+ 	 * Scan LOOK_AHEAD descriptors at a time to determine which
+@@ -1618,14 +1632,26 @@ ice_rx_scan_hw_ring(struct ice_rx_queue *rxq)
+ 			rxd_to_pkt_fields_ops[rxq->rxdid](rxq, mb, &rxdp[j]);
+ #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
+ 			if (ice_timestamp_dynflag > 0) {
+-				ts_ns = ice_tstamp_convert_32b_64b(hw, ad,
+-					rxq->hw_register_set,
+-					rte_le_to_cpu_32(rxdp[j].wb.flex_ts.ts_high));
+-				rxq->hw_register_set = 0;
++				rxq->time_high =
++				rte_le_to_cpu_32(rxdp[j].wb.flex_ts.ts_high);
++				if (unlikely(is_tsinit)) {
++					ts_ns = ice_tstamp_convert_32b_64b(hw, ad, 1,
++									   rxq->time_high);
++					rxq->hw_time_low = (uint32_t)ts_ns;
++					rxq->hw_time_high = (uint32_t)(ts_ns >> 32);
++					is_tsinit = false;
++				} else {
++					if (rxq->time_high < rxq->hw_time_low)
++						rxq->hw_time_high += 1;
++					ts_ns = (uint64_t)rxq->hw_time_high << 32 | rxq->time_high;
++					rxq->hw_time_low = rxq->time_high;
++				}
++				rxq->hw_time_update = rte_get_timer_cycles() /
++						     (rte_get_timer_hz() / 1000);
+ 				*RTE_MBUF_DYNFIELD(mb,
+-					ice_timestamp_dynfield_offset,
+-					rte_mbuf_timestamp_t *) = ts_ns;
+-				mb->ol_flags |= ice_timestamp_dynflag;
++						   ice_timestamp_dynfield_offset,
++						   rte_mbuf_timestamp_t *) = ts_ns;
++				pkt_flags |= ice_timestamp_dynflag;
+ 			}
+ 
+ 			if (ad->ptp_ena && ((mb->packet_type &
+@@ -1634,6 +1660,10 @@ ice_rx_scan_hw_ring(struct ice_rx_queue *rxq)
+ 				   rte_le_to_cpu_32(rxdp[j].wb.flex_ts.ts_high);
+ 				mb->timesync = rxq->queue_id;
+ 				pkt_flags |= RTE_MBUF_F_RX_IEEE1588_PTP;
++				if (rxdp[j].wb.time_stamp_low &
++				    ICE_PTP_TS_VALID)
++					pkt_flags |=
++						RTE_MBUF_F_RX_IEEE1588_TMST;
+ 			}
+ #endif
+ 			mb->ol_flags |= pkt_flags;
+@@ -1714,7 +1744,7 @@ ice_rx_alloc_bufs(struct ice_rx_queue *rxq)
+ 		rxdp[i].read.pkt_addr = dma_addr;
+ 	}
+ 
+-	/* Update rx tail regsiter */
++	/* Update Rx tail register */
+ 	ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->rx_free_trigger);
+ 
+ 	rxq->rx_free_trigger =
+@@ -1820,14 +1850,19 @@ ice_recv_scattered_pkts(void *rx_queue,
+ 	uint64_t pkt_flags;
+ 	uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
+ #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
++	bool is_tsinit = false;
++	uint64_t ts_ns;
+ 	struct ice_vsi *vsi = rxq->vsi;
+ 	struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
+-	uint64_t ts_ns;
+ 	struct ice_adapter *ad = rxq->vsi->adapter;
+-#endif
+ 
+-	if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)
+-		rxq->hw_register_set = 1;
++	if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
++		uint64_t sw_cur_time = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
++
++		if (unlikely(sw_cur_time - rxq->hw_time_update > 4))
++			is_tsinit = true;
++	}
++#endif
+ 
+ 	while (nb_rx < nb_pkts) {
+ 		rxdp = &rx_ring[rx_id];
+@@ -1940,14 +1975,25 @@ ice_recv_scattered_pkts(void *rx_queue,
+ 		pkt_flags = ice_rxd_error_to_pkt_flags(rx_stat_err0);
+ #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
+ 		if (ice_timestamp_dynflag > 0) {
+-			ts_ns = ice_tstamp_convert_32b_64b(hw, ad,
+-				rxq->hw_register_set,
+-				rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high));
+-			rxq->hw_register_set = 0;
+-			*RTE_MBUF_DYNFIELD(first_seg,
+-				ice_timestamp_dynfield_offset,
+-				rte_mbuf_timestamp_t *) = ts_ns;
+-			first_seg->ol_flags |= ice_timestamp_dynflag;
++			rxq->time_high =
++			   rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high);
++			if (unlikely(is_tsinit)) {
++				ts_ns = ice_tstamp_convert_32b_64b(hw, ad, 1, rxq->time_high);
++				rxq->hw_time_low = (uint32_t)ts_ns;
++				rxq->hw_time_high = (uint32_t)(ts_ns >> 32);
++				is_tsinit = false;
++			} else {
++				if (rxq->time_high < rxq->hw_time_low)
++					rxq->hw_time_high += 1;
++				ts_ns = (uint64_t)rxq->hw_time_high << 32 | rxq->time_high;
++				rxq->hw_time_low = rxq->time_high;
++			}
++			rxq->hw_time_update = rte_get_timer_cycles() /
++					     (rte_get_timer_hz() / 1000);
++			*RTE_MBUF_DYNFIELD(rxm,
++					   (ice_timestamp_dynfield_offset),
++					   rte_mbuf_timestamp_t *) = ts_ns;
++			pkt_flags |= ice_timestamp_dynflag;
+ 		}
+ 
+ 		if (ad->ptp_ena && ((first_seg->packet_type & RTE_PTYPE_L2_MASK)
+@@ -1976,7 +2022,7 @@ ice_recv_scattered_pkts(void *rx_queue,
+ 	 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
+ 	 * register. Update the RDT with the value of the last processed RX
+ 	 * descriptor minus 1, to guarantee that the RDT register is never
+-	 * equal to the RDH register, which creates a "full" ring situtation
++	 * equal to the RDH register, which creates a "full" ring situation
+ 	 * from the hardware point of view.
+ 	 */
+ 	nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);
+@@ -2314,14 +2360,19 @@ ice_recv_pkts(void *rx_queue,
+ 	uint64_t pkt_flags;
+ 	uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
+ #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
++	bool is_tsinit = false;
++	uint64_t ts_ns;
+ 	struct ice_vsi *vsi = rxq->vsi;
+ 	struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
+-	uint64_t ts_ns;
+ 	struct ice_adapter *ad = rxq->vsi->adapter;
+-#endif
+ 
+-	if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)
+-		rxq->hw_register_set = 1;
++	if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
++		uint64_t sw_cur_time = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
++
++		if (unlikely(sw_cur_time - rxq->hw_time_update > 4))
++			is_tsinit = 1;
++	}
++#endif
+ 
+ 	while (nb_rx < nb_pkts) {
+ 		rxdp = &rx_ring[rx_id];
+@@ -2375,14 +2426,25 @@ ice_recv_pkts(void *rx_queue,
+ 		pkt_flags = ice_rxd_error_to_pkt_flags(rx_stat_err0);
+ #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
+ 		if (ice_timestamp_dynflag > 0) {
+-			ts_ns = ice_tstamp_convert_32b_64b(hw, ad,
+-				rxq->hw_register_set,
+-				rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high));
+-			rxq->hw_register_set = 0;
++			rxq->time_high =
++			   rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high);
++			if (unlikely(is_tsinit)) {
++				ts_ns = ice_tstamp_convert_32b_64b(hw, ad, 1, rxq->time_high);
++				rxq->hw_time_low = (uint32_t)ts_ns;
++				rxq->hw_time_high = (uint32_t)(ts_ns >> 32);
++				is_tsinit = false;
++			} else {
++				if (rxq->time_high < rxq->hw_time_low)
++					rxq->hw_time_high += 1;
++				ts_ns = (uint64_t)rxq->hw_time_high << 32 | rxq->time_high;
++				rxq->hw_time_low = rxq->time_high;
++			}
++			rxq->hw_time_update = rte_get_timer_cycles() /
++					     (rte_get_timer_hz() / 1000);
+ 			*RTE_MBUF_DYNFIELD(rxm,
+-				ice_timestamp_dynfield_offset,
+-				rte_mbuf_timestamp_t *) = ts_ns;
+-			rxm->ol_flags |= ice_timestamp_dynflag;
++					   (ice_timestamp_dynfield_offset),
++					   rte_mbuf_timestamp_t *) = ts_ns;
++			pkt_flags |= ice_timestamp_dynflag;
+ 		}
+ 
+ 		if (ad->ptp_ena && ((rxm->packet_type & RTE_PTYPE_L2_MASK) ==
+@@ -2397,6 +2459,7 @@ ice_recv_pkts(void *rx_queue,
+ 		/* copy old mbuf to rx_pkts */
+ 		rx_pkts[nb_rx++] = rxm;
+ 	}
++
+ 	rxq->rx_tail = rx_id;
+ 	/**
+ 	 * If the number of free RX descriptors is greater than the RX free
+@@ -2493,15 +2556,15 @@ ice_txd_enable_checksum(uint64_t ol_flags,
+ 	if (ol_flags & RTE_MBUF_F_TX_IP_CKSUM) {
+ 		*td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV4_CSUM;
+ 		*td_offset |= (tx_offload.l3_len >> 2) <<
+-			      ICE_TX_DESC_LEN_IPLEN_S;
++			ICE_TX_DESC_LEN_IPLEN_S;
+ 	} else if (ol_flags & RTE_MBUF_F_TX_IPV4) {
+ 		*td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV4;
+ 		*td_offset |= (tx_offload.l3_len >> 2) <<
+-			      ICE_TX_DESC_LEN_IPLEN_S;
++			ICE_TX_DESC_LEN_IPLEN_S;
+ 	} else if (ol_flags & RTE_MBUF_F_TX_IPV6) {
+ 		*td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV6;
+ 		*td_offset |= (tx_offload.l3_len >> 2) <<
+-			      ICE_TX_DESC_LEN_IPLEN_S;
++			ICE_TX_DESC_LEN_IPLEN_S;
+ 	}
+ 
+ 	if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
+@@ -3117,7 +3180,7 @@ tx_xmit_pkts(struct ice_tx_queue *txq,
+ 	ice_tx_fill_hw_ring(txq, tx_pkts + n, (uint16_t)(nb_pkts - n));
+ 	txq->tx_tail = (uint16_t)(txq->tx_tail + (nb_pkts - n));
+ 
+-	/* Determin if RS bit needs to be set */
++	/* Determine if RS bit needs to be set */
+ 	if (txq->tx_tail > txq->tx_next_rs) {
+ 		txr[txq->tx_next_rs].cmd_type_offset_bsz |=
+ 			rte_cpu_to_le_64(((uint64_t)ICE_TX_DESC_CMD_RS) <<
+@@ -3541,8 +3604,9 @@ static const struct {
+ 	{ ice_xmit_pkts_vec_avx512, "Vector AVX512" },
+ 	{ ice_xmit_pkts_vec_avx512_offload, "Offload Vector AVX512" },
+ #endif
+-	{ ice_xmit_pkts_vec_avx2, "Vector AVX2" },
+-	{ ice_xmit_pkts_vec,      "Vector SSE" },
++	{ ice_xmit_pkts_vec_avx2,         "Vector AVX2" },
++	{ ice_xmit_pkts_vec_avx2_offload, "Offload Vector AVX2" },
++	{ ice_xmit_pkts_vec,              "Vector SSE" },
+ #endif
+ };
+ 
+diff --git a/dpdk/drivers/net/ice/ice_rxtx.h b/dpdk/drivers/net/ice/ice_rxtx.h
+index bb18a01951..f5337d5284 100644
+--- a/dpdk/drivers/net/ice/ice_rxtx.h
++++ b/dpdk/drivers/net/ice/ice_rxtx.h
+@@ -95,6 +95,9 @@ struct ice_rx_queue {
+ 	uint32_t time_high;
+ 	uint32_t hw_register_set;
+ 	const struct rte_memzone *mz;
++	uint32_t hw_time_high; /* high 32 bits of timestamp */
++	uint32_t hw_time_low; /* low 32 bits of timestamp */
++	uint64_t hw_time_update; /* SW time of HW record updating */
+ };
+ 
+ struct ice_tx_entry {
+diff --git a/dpdk/drivers/net/ice/ice_rxtx_vec_common.h b/dpdk/drivers/net/ice/ice_rxtx_vec_common.h
+index dfe60c81d9..2dd2d83650 100644
+--- a/dpdk/drivers/net/ice/ice_rxtx_vec_common.h
++++ b/dpdk/drivers/net/ice/ice_rxtx_vec_common.h
+@@ -250,7 +250,8 @@ ice_rxq_vec_setup_default(struct ice_rx_queue *rxq)
+ #define ICE_TX_NO_VECTOR_FLAGS (			\
+ 		RTE_ETH_TX_OFFLOAD_MULTI_SEGS |		\
+ 		RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |	\
+-		RTE_ETH_TX_OFFLOAD_TCP_TSO)
++		RTE_ETH_TX_OFFLOAD_TCP_TSO |	\
++		RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM)
+ 
+ #define ICE_TX_VECTOR_OFFLOAD (				\
+ 		RTE_ETH_TX_OFFLOAD_VLAN_INSERT |		\
+@@ -366,7 +367,7 @@ ice_txd_enable_offload(struct rte_mbuf *tx_pkt,
+ 	/* Tx Checksum Offload */
+ 	/* SET MACLEN */
+ 	td_offset |= (tx_pkt->l2_len >> 1) <<
+-			ICE_TX_DESC_LEN_MACLEN_S;
++		ICE_TX_DESC_LEN_MACLEN_S;
+ 
+ 	/* Enable L3 checksum offload */
+ 	if (ol_flags & RTE_MBUF_F_TX_IP_CKSUM) {
+diff --git a/dpdk/drivers/net/ice/ice_rxtx_vec_sse.c b/dpdk/drivers/net/ice/ice_rxtx_vec_sse.c
+index 6cd44c5847..fd94cedde3 100644
+--- a/dpdk/drivers/net/ice/ice_rxtx_vec_sse.c
++++ b/dpdk/drivers/net/ice/ice_rxtx_vec_sse.c
+@@ -202,7 +202,7 @@ ice_rx_desc_to_olflags_v(struct ice_rx_queue *rxq, __m128i descs[4],
+ 	__m128i l3_l4_mask = _mm_set_epi32(~0x6, ~0x6, ~0x6, ~0x6);
+ 	__m128i l3_l4_flags = _mm_and_si128(flags, l3_l4_mask);
+ 	flags = _mm_or_si128(l3_l4_flags, l4_outer_flags);
+-	/* we need to mask out the reduntant bits introduced by RSS or
++	/* we need to mask out the redundant bits introduced by RSS or
+ 	 * VLAN fields.
+ 	 */
+ 	flags = _mm_and_si128(flags, cksum_mask);
+@@ -566,7 +566,7 @@ _ice_recv_raw_pkts_vec(struct ice_rx_queue *rxq, struct rte_mbuf **rx_pkts,
+ 		_mm_storeu_si128((void *)&rx_pkts[pos]->rx_descriptor_fields1,
+ 				 pkt_mb0);
+ 		ice_rx_desc_to_ptype_v(descs, &rx_pkts[pos], ptype_tbl);
+-		/* C.4 calc avaialbe number of desc */
++		/* C.4 calc available number of desc */
+ 		var = __builtin_popcountll(_mm_cvtsi128_si64(staterr));
+ 		nb_pkts_recd += var;
+ 		if (likely(var != ICE_DESCS_PER_LOOP))
+diff --git a/dpdk/drivers/net/ice/ice_switch_filter.c b/dpdk/drivers/net/ice/ice_switch_filter.c
+index ed29c00d77..bd805d9606 100644
+--- a/dpdk/drivers/net/ice/ice_switch_filter.c
++++ b/dpdk/drivers/net/ice/ice_switch_filter.c
+@@ -400,6 +400,14 @@ ice_switch_create(struct ice_adapter *ad,
+ 			"lookup list should not be NULL");
+ 		goto error;
+ 	}
++
++	if (ice_dcf_adminq_need_retry(ad)) {
++		rte_flow_error_set(error, EAGAIN,
++			RTE_FLOW_ERROR_TYPE_ITEM, NULL,
++			"DCF is not on");
++		goto error;
++	}
++
+ 	ret = ice_add_adv_rule(hw, list, lkups_cnt, rule_info, &rule_added);
+ 	if (!ret) {
+ 		filter_conf_ptr = rte_zmalloc("ice_switch_filter",
+@@ -423,7 +431,12 @@ ice_switch_create(struct ice_adapter *ad,
+ 
+ 		flow->rule = filter_conf_ptr;
+ 	} else {
+-		rte_flow_error_set(error, EINVAL,
++		if (ice_dcf_adminq_need_retry(ad))
++			ret = -EAGAIN;
++		else
++			ret = -EINVAL;
++
++		rte_flow_error_set(error, -ret,
+ 			RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ 			"switch filter create flow fail");
+ 		goto error;
+@@ -475,9 +488,21 @@ ice_switch_destroy(struct ice_adapter *ad,
+ 		return -rte_errno;
+ 	}
+ 
++	if (ice_dcf_adminq_need_retry(ad)) {
++		rte_flow_error_set(error, EAGAIN,
++			RTE_FLOW_ERROR_TYPE_ITEM, NULL,
++			"DCF is not on");
++		return -rte_errno;
++	}
++
+ 	ret = ice_rem_adv_rule_by_id(hw, &filter_conf_ptr->sw_query_data);
+ 	if (ret) {
+-		rte_flow_error_set(error, EINVAL,
++		if (ice_dcf_adminq_need_retry(ad))
++			ret = -EAGAIN;
++		else
++			ret = -EINVAL;
++
++		rte_flow_error_set(error, -ret,
+ 			RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ 			"fail to destroy switch filter rule");
+ 		return -rte_errno;
+@@ -2016,6 +2041,12 @@ ice_switch_redirect(struct ice_adapter *ad,
+ 	}
+ 
+ rmv_rule:
++	if (ice_dcf_adminq_need_retry(ad)) {
++		PMD_DRV_LOG(WARNING, "DCF is not on");
++		ret = -EAGAIN;
++		goto out;
++	}
++
+ 	/* Remove the old rule */
+ 	ret = ice_rem_adv_rule(hw, lkups_ref, lkups_cnt, &rinfo);
+ 	if (ret) {
+@@ -2028,6 +2059,12 @@ ice_switch_redirect(struct ice_adapter *ad,
+ 	}
+ 
+ add_rule:
++	if (ice_dcf_adminq_need_retry(ad)) {
++		PMD_DRV_LOG(WARNING, "DCF is not on");
++		ret = -EAGAIN;
++		goto out;
++	}
++
+ 	/* Update VSI context */
+ 	hw->vsi_ctx[rd->vsi_handle]->vsi_num = rd->new_vsi_num;
+ 
+@@ -2047,6 +2084,10 @@ ice_switch_redirect(struct ice_adapter *ad,
+ 	}
+ 
+ out:
++	if (ret == -EINVAL)
++		if (ice_dcf_adminq_need_retry(ad))
++			ret = -EAGAIN;
++
+ 	ice_free(hw, lkups_dp);
+ 	return ret;
+ }
+diff --git a/dpdk/drivers/net/igc/igc_ethdev.c b/dpdk/drivers/net/igc/igc_ethdev.c
+index a1f1a9772b..8d8a0da424 100644
+--- a/dpdk/drivers/net/igc/igc_ethdev.c
++++ b/dpdk/drivers/net/igc/igc_ethdev.c
+@@ -1234,8 +1234,15 @@ eth_igc_dev_init(struct rte_eth_dev *dev)
+ 	 * has already done this work. Only check we don't need a different
+ 	 * RX function.
+ 	 */
+-	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
++	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
++		dev->rx_pkt_burst = igc_recv_pkts;
++		if (dev->data->scattered_rx)
++			dev->rx_pkt_burst = igc_recv_scattered_pkts;
++
++		dev->tx_pkt_burst = igc_xmit_pkts;
++		dev->tx_pkt_prepare = eth_igc_prep_pkts;
+ 		return 0;
++	}
+ 
+ 	rte_eth_copy_pci_info(dev, pci_dev);
+ 	dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
+diff --git a/dpdk/drivers/net/igc/igc_filter.c b/dpdk/drivers/net/igc/igc_filter.c
+index 51fcabfb59..bff98df200 100644
+--- a/dpdk/drivers/net/igc/igc_filter.c
++++ b/dpdk/drivers/net/igc/igc_filter.c
+@@ -167,7 +167,7 @@ igc_tuple_filter_lookup(const struct igc_adapter *igc,
+ 		/* search the filter array */
+ 		for (; i < IGC_MAX_NTUPLE_FILTERS; i++) {
+ 			if (igc->ntuple_filters[i].hash_val) {
+-				/* compare the hase value */
++				/* compare the hash value */
+ 				if (ntuple->hash_val ==
+ 					igc->ntuple_filters[i].hash_val)
+ 					/* filter be found, return index */
+diff --git a/dpdk/drivers/net/igc/igc_txrx.c b/dpdk/drivers/net/igc/igc_txrx.c
+index 339b0c9aa1..ffd219b0df 100644
+--- a/dpdk/drivers/net/igc/igc_txrx.c
++++ b/dpdk/drivers/net/igc/igc_txrx.c
+@@ -345,7 +345,7 @@ rx_desc_get_pkt_info(struct igc_rx_queue *rxq, struct rte_mbuf *rxm,
+ 	rxm->packet_type = rx_desc_pkt_info_to_pkt_type(pkt_info);
+ }
+ 
+-static uint16_t
++uint16_t
+ igc_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+ {
+ 	struct igc_rx_queue * const rxq = rx_queue;
+@@ -488,7 +488,7 @@ igc_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+ 	return nb_rx;
+ }
+ 
+-static uint16_t
++uint16_t
+ igc_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+ 			uint16_t nb_pkts)
+ {
+@@ -1397,7 +1397,7 @@ eth_igc_rx_queue_setup(struct rte_eth_dev *dev,
+ }
+ 
+ /* prepare packets for transmit */
+-static uint16_t
++uint16_t
+ eth_igc_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
+ 		uint16_t nb_pkts)
+ {
+@@ -1604,7 +1604,7 @@ tx_desc_cksum_flags_to_olinfo(uint64_t ol_flags)
+ 	return tmp;
+ }
+ 
+-static uint16_t
++uint16_t
+ igc_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+ {
+ 	struct igc_tx_queue * const txq = tx_queue;
+@@ -2099,7 +2099,7 @@ eth_igc_tx_done_cleanup(void *txqueue, uint32_t free_cnt)
+ 				sw_ring[tx_id].mbuf = NULL;
+ 				sw_ring[tx_id].last_id = tx_id;
+ 
+-				/* Move to next segemnt. */
++				/* Move to next segment. */
+ 				tx_id = sw_ring[tx_id].next_id;
+ 			} while (tx_id != tx_next);
+ 
+@@ -2133,7 +2133,7 @@ eth_igc_tx_done_cleanup(void *txqueue, uint32_t free_cnt)
+ 			 * Walk the list and find the next mbuf, if any.
+ 			 */
+ 			do {
+-				/* Move to next segemnt. */
++				/* Move to next segment. */
+ 				tx_id = sw_ring[tx_id].next_id;
+ 
+ 				if (sw_ring[tx_id].mbuf)
+diff --git a/dpdk/drivers/net/igc/igc_txrx.h b/dpdk/drivers/net/igc/igc_txrx.h
+index 535108a868..02a0a051bb 100644
+--- a/dpdk/drivers/net/igc/igc_txrx.h
++++ b/dpdk/drivers/net/igc/igc_txrx.h
+@@ -49,6 +49,12 @@ void eth_igc_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+ 	struct rte_eth_txq_info *qinfo);
+ void eth_igc_vlan_strip_queue_set(struct rte_eth_dev *dev,
+ 			uint16_t rx_queue_id, int on);
++uint16_t igc_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
++uint16_t igc_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts);
++uint16_t eth_igc_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
++	uint16_t nb_pkts);
++uint16_t igc_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
++	uint16_t nb_pkts);
+ #ifdef __cplusplus
+ }
+ #endif
+diff --git a/dpdk/drivers/net/ionic/ionic_if.h b/dpdk/drivers/net/ionic/ionic_if.h
+index 693b44d764..45bad9b040 100644
+--- a/dpdk/drivers/net/ionic/ionic_if.h
++++ b/dpdk/drivers/net/ionic/ionic_if.h
+@@ -2068,7 +2068,7 @@ typedef struct ionic_admin_comp ionic_fw_download_comp;
+  * enum ionic_fw_control_oper - FW control operations
+  * @IONIC_FW_RESET:     Reset firmware
+  * @IONIC_FW_INSTALL:   Install firmware
+- * @IONIC_FW_ACTIVATE:  Acticate firmware
++ * @IONIC_FW_ACTIVATE:  Activate firmware
+  */
+ enum ionic_fw_control_oper {
+ 	IONIC_FW_RESET		= 0,
+@@ -2091,7 +2091,7 @@ struct ionic_fw_control_cmd {
+ };
+ 
+ /**
+- * struct ionic_fw_control_comp - Firmware control copletion
++ * struct ionic_fw_control_comp - Firmware control completion
+  * @status:     Status of the command (enum ionic_status_code)
+  * @comp_index: Index in the descriptor ring for which this is the completion
+  * @slot:       Slot where the firmware was installed
+@@ -2878,7 +2878,7 @@ struct ionic_doorbell {
+  *                    and @identity->intr_coal_div to convert from
+  *                    usecs to device units:
+  *
+- *                      coal_init = coal_usecs * coal_mutl / coal_div
++ *                      coal_init = coal_usecs * coal_mult / coal_div
+  *
+  *                    When an interrupt is sent the interrupt
+  *                    coalescing timer current value
+diff --git a/dpdk/drivers/net/ipn3ke/ipn3ke_ethdev.c b/dpdk/drivers/net/ipn3ke/ipn3ke_ethdev.c
+index 964506c6db..014e438dd5 100644
+--- a/dpdk/drivers/net/ipn3ke/ipn3ke_ethdev.c
++++ b/dpdk/drivers/net/ipn3ke/ipn3ke_ethdev.c
+@@ -483,7 +483,7 @@ static int ipn3ke_vswitch_probe(struct rte_afu_device *afu_dev)
+ 					RTE_CACHE_LINE_SIZE,
+ 					afu_dev->device.numa_node);
+ 		if (!hw) {
+-			IPN3KE_AFU_PMD_ERR("failed to allocate hardwart data");
++			IPN3KE_AFU_PMD_ERR("failed to allocate hardware data");
+ 				retval = -ENOMEM;
+ 				return -ENOMEM;
+ 		}
+diff --git a/dpdk/drivers/net/ipn3ke/ipn3ke_ethdev.h b/dpdk/drivers/net/ipn3ke/ipn3ke_ethdev.h
+index 041f13d9c3..58fcc50c57 100644
+--- a/dpdk/drivers/net/ipn3ke/ipn3ke_ethdev.h
++++ b/dpdk/drivers/net/ipn3ke/ipn3ke_ethdev.h
+@@ -223,7 +223,7 @@ struct ipn3ke_hw_cap {
+ };
+ 
+ /**
+- * Strucute to store private data for each representor instance
++ * Structure to store private data for each representor instance
+  */
+ struct ipn3ke_rpst {
+ 	TAILQ_ENTRY(ipn3ke_rpst) next;       /**< Next in device list. */
+@@ -237,7 +237,7 @@ struct ipn3ke_rpst {
+ 	uint16_t i40e_pf_eth_port_id;
+ 	struct rte_eth_link ori_linfo;
+ 	struct ipn3ke_tm_internals tm;
+-	/**< Private data store of assocaiated physical function */
++	/**< Private data store of associated physical function */
+ 	struct rte_ether_addr mac_addr;
+ };
+ 
+diff --git a/dpdk/drivers/net/ipn3ke/ipn3ke_flow.c b/dpdk/drivers/net/ipn3ke/ipn3ke_flow.c
+index f5867ca055..66ae31a5a9 100644
+--- a/dpdk/drivers/net/ipn3ke/ipn3ke_flow.c
++++ b/dpdk/drivers/net/ipn3ke/ipn3ke_flow.c
+@@ -1299,7 +1299,7 @@ int ipn3ke_flow_init(void *dev)
+ 	IPN3KE_AFU_PMD_DEBUG("IPN3KE_CLF_LKUP_ENABLE: %x\n", data);
+ 
+ 
+-	/* configure rx parse config, settings associatied with VxLAN */
++	/* configure rx parse config, settings associated with VxLAN */
+ 	IPN3KE_MASK_WRITE_REG(hw,
+ 			IPN3KE_CLF_RX_PARSE_CFG,
+ 			0,
+diff --git a/dpdk/drivers/net/ipn3ke/ipn3ke_representor.c b/dpdk/drivers/net/ipn3ke/ipn3ke_representor.c
+index de325c7d29..abbecfdf2e 100644
+--- a/dpdk/drivers/net/ipn3ke/ipn3ke_representor.c
++++ b/dpdk/drivers/net/ipn3ke/ipn3ke_representor.c
+@@ -2218,9 +2218,6 @@ ipn3ke_rpst_xstats_get
+ 	struct ipn3ke_rpst_hw_port_stats hw_stats;
+ 	struct rte_eth_stats stats;
+ 
+-	if (!xstats)
+-		return 0;
+-
+ 	if (!ethdev) {
+ 		IPN3KE_AFU_PMD_ERR("ethernet device to get statistics is NULL");
+ 		return -EINVAL;
+@@ -2282,7 +2279,7 @@ ipn3ke_rpst_xstats_get
+ 		count++;
+ 	}
+ 
+-	/* Get individiual stats from ipn3ke_rpst_hw_port */
++	/* Get individual stats from ipn3ke_rpst_hw_port */
+ 	for (i = 0; i < IPN3KE_RPST_HW_PORT_XSTATS_CNT; i++) {
+ 		xstats[count].value = *(uint64_t *)(((char *)(&hw_stats)) +
+ 			ipn3ke_rpst_hw_port_strings[i].offset);
+@@ -2290,7 +2287,7 @@ ipn3ke_rpst_xstats_get
+ 		count++;
+ 	}
+ 
+-	/* Get individiual stats from ipn3ke_rpst_rxq_pri */
++	/* Get individual stats from ipn3ke_rpst_rxq_pri */
+ 	for (i = 0; i < IPN3KE_RPST_RXQ_PRIO_XSTATS_CNT; i++) {
+ 		for (prio = 0; prio < IPN3KE_RPST_PRIO_XSTATS_CNT; prio++) {
+ 			xstats[count].value =
+@@ -2302,7 +2299,7 @@ ipn3ke_rpst_xstats_get
+ 		}
+ 	}
+ 
+-	/* Get individiual stats from ipn3ke_rpst_txq_prio */
++	/* Get individual stats from ipn3ke_rpst_txq_prio */
+ 	for (i = 0; i < IPN3KE_RPST_TXQ_PRIO_XSTATS_CNT; i++) {
+ 		for (prio = 0; prio < IPN3KE_RPST_PRIO_XSTATS_CNT; prio++) {
+ 			xstats[count].value =
+@@ -2340,7 +2337,7 @@ __rte_unused unsigned int limit)
+ 		count++;
+ 	}
+ 
+-	/* Get individiual stats from ipn3ke_rpst_hw_port */
++	/* Get individual stats from ipn3ke_rpst_hw_port */
+ 	for (i = 0; i < IPN3KE_RPST_HW_PORT_XSTATS_CNT; i++) {
+ 		snprintf(xstats_names[count].name,
+ 			 sizeof(xstats_names[count].name),
+@@ -2349,7 +2346,7 @@ __rte_unused unsigned int limit)
+ 		count++;
+ 	}
+ 
+-	/* Get individiual stats from ipn3ke_rpst_rxq_pri */
++	/* Get individual stats from ipn3ke_rpst_rxq_pri */
+ 	for (i = 0; i < IPN3KE_RPST_RXQ_PRIO_XSTATS_CNT; i++) {
+ 		for (prio = 0; prio < 8; prio++) {
+ 			snprintf(xstats_names[count].name,
+@@ -2361,7 +2358,7 @@ __rte_unused unsigned int limit)
+ 		}
+ 	}
+ 
+-	/* Get individiual stats from ipn3ke_rpst_txq_prio */
++	/* Get individual stats from ipn3ke_rpst_txq_prio */
+ 	for (i = 0; i < IPN3KE_RPST_TXQ_PRIO_XSTATS_CNT; i++) {
+ 		for (prio = 0; prio < 8; prio++) {
+ 			snprintf(xstats_names[count].name,
+diff --git a/dpdk/drivers/net/ipn3ke/meson.build b/dpdk/drivers/net/ipn3ke/meson.build
+index 4bf739809e..104d2f58e5 100644
+--- a/dpdk/drivers/net/ipn3ke/meson.build
++++ b/dpdk/drivers/net/ipn3ke/meson.build
+@@ -8,7 +8,7 @@ if is_windows
+ endif
+ 
+ #
+-# Add the experimenatal APIs called from this PMD
++# Add the experimental APIs called from this PMD
+ #  rte_eth_switch_domain_alloc()
+ #  rte_eth_dev_create()
+ #  rte_eth_dev_destroy()
+diff --git a/dpdk/drivers/net/ixgbe/ixgbe_bypass.c b/dpdk/drivers/net/ixgbe/ixgbe_bypass.c
+index 67ced6c723..94f34a2996 100644
+--- a/dpdk/drivers/net/ixgbe/ixgbe_bypass.c
++++ b/dpdk/drivers/net/ixgbe/ixgbe_bypass.c
+@@ -11,7 +11,7 @@
+ 
+ #define	BYPASS_STATUS_OFF_MASK	3
+ 
+-/* Macros to check for invlaid function pointers. */
++/* Macros to check for invalid function pointers. */
+ #define	FUNC_PTR_OR_ERR_RET(func, retval) do {              \
+ 	if ((func) == NULL) {                               \
+ 		PMD_DRV_LOG(ERR, "%s:%d function not supported", \
+diff --git a/dpdk/drivers/net/ixgbe/ixgbe_bypass_api.h b/dpdk/drivers/net/ixgbe/ixgbe_bypass_api.h
+index 8eb773391b..6ef965dbb6 100644
+--- a/dpdk/drivers/net/ixgbe/ixgbe_bypass_api.h
++++ b/dpdk/drivers/net/ixgbe/ixgbe_bypass_api.h
+@@ -135,7 +135,7 @@ static s32 ixgbe_bypass_rw_generic(struct ixgbe_hw *hw, u32 cmd, u32 *status)
+  * ixgbe_bypass_valid_rd_generic - Verify valid return from bit-bang.
+  *
+  * If we send a write we can't be sure it took until we can read back
+- * that same register.  It can be a problem as some of the feilds may
++ * that same register.  It can be a problem as some of the fields may
+  * for valid reasons change between the time wrote the register and
+  * we read it again to verify.  So this function check everything we
+  * can check and then assumes it worked.
+@@ -189,7 +189,7 @@ static bool ixgbe_bypass_valid_rd_generic(u32 in_reg, u32 out_reg)
+ }
+ 
+ /**
+- *  ixgbe_bypass_set_generic - Set a bypass field in the FW CTRL Regiter.
++ *  ixgbe_bypass_set_generic - Set a bypass field in the FW CTRL Register.
+  *
+  *  @hw: pointer to hardware structure
+  *  @cmd: The control word we are setting.
+diff --git a/dpdk/drivers/net/ixgbe/ixgbe_ethdev.c b/dpdk/drivers/net/ixgbe/ixgbe_ethdev.c
+index fe61dba81d..31d06b6110 100644
+--- a/dpdk/drivers/net/ixgbe/ixgbe_ethdev.c
++++ b/dpdk/drivers/net/ixgbe/ixgbe_ethdev.c
+@@ -128,6 +128,13 @@
+ #define IXGBE_EXVET_VET_EXT_SHIFT              16
+ #define IXGBE_DMATXCTL_VT_MASK                 0xFFFF0000
+ 
++#define IXGBE_DEVARG_FIBER_SDP3_NOT_TX_DISABLE	"fiber_sdp3_no_tx_disable"
++
++static const char * const ixgbe_valid_arguments[] = {
++	IXGBE_DEVARG_FIBER_SDP3_NOT_TX_DISABLE,
++	NULL
++};
++
+ #define IXGBEVF_DEVARG_PFLINK_FULLCHK		"pflink_fullchk"
+ 
+ static const char * const ixgbevf_valid_arguments[] = {
+@@ -348,6 +355,8 @@ static int ixgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
+ static int ixgbe_filter_restore(struct rte_eth_dev *dev);
+ static void ixgbe_l2_tunnel_conf(struct rte_eth_dev *dev);
+ static int ixgbe_wait_for_link_up(struct ixgbe_hw *hw);
++static int devarg_handle_int(__rte_unused const char *key, const char *value,
++			     void *extra_args);
+ 
+ /*
+  * Define VF Stats MACRO for Non "cleared on read" register
+@@ -781,6 +790,20 @@ ixgbe_is_sfp(struct ixgbe_hw *hw)
+ 	case ixgbe_phy_sfp_passive_unknown:
+ 		return 1;
+ 	default:
++		/* x550em devices may be SFP, check media type */
++		switch (hw->mac.type) {
++		case ixgbe_mac_X550EM_x:
++		case ixgbe_mac_X550EM_a:
++			switch (ixgbe_get_media_type(hw)) {
++			case ixgbe_media_type_fiber:
++			case ixgbe_media_type_fiber_qsfp:
++				return 1;
++			default:
++				break;
++			}
++		default:
++			break;
++		}
+ 		return 0;
+ 	}
+ }
+@@ -1018,6 +1041,29 @@ ixgbe_swfw_lock_reset(struct ixgbe_hw *hw)
+ 	ixgbe_release_swfw_semaphore(hw, mask);
+ }
+ 
++static void
++ixgbe_parse_devargs(struct ixgbe_adapter *adapter,
++		      struct rte_devargs *devargs)
++{
++	struct rte_kvargs *kvlist;
++	uint16_t sdp3_no_tx_disable;
++
++	if (devargs == NULL)
++		return;
++
++	kvlist = rte_kvargs_parse(devargs->args, ixgbe_valid_arguments);
++	if (kvlist == NULL)
++		return;
++
++	if (rte_kvargs_count(kvlist, IXGBE_DEVARG_FIBER_SDP3_NOT_TX_DISABLE) == 1 &&
++	    rte_kvargs_process(kvlist, IXGBE_DEVARG_FIBER_SDP3_NOT_TX_DISABLE,
++			       devarg_handle_int, &sdp3_no_tx_disable) == 0 &&
++	    sdp3_no_tx_disable == 1)
++		adapter->sdp3_no_tx_disable = 1;
++
++	rte_kvargs_free(kvlist);
++}
++
+ /*
+  * This function is based on code in ixgbe_attach() in base/ixgbe.c.
+  * It returns 0 on success.
+@@ -1081,6 +1127,8 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
+ 	}
+ 
+ 	rte_atomic32_clear(&ad->link_thread_running);
++	ixgbe_parse_devargs(eth_dev->data->dev_private,
++			    pci_dev->device.devargs);
+ 	rte_eth_copy_pci_info(eth_dev, pci_dev);
+ 	eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
+ 
+@@ -1223,13 +1271,8 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
+ 
+ 	/* initialize PF if max_vfs not zero */
+ 	ret = ixgbe_pf_host_init(eth_dev);
+-	if (ret) {
+-		rte_free(eth_dev->data->mac_addrs);
+-		eth_dev->data->mac_addrs = NULL;
+-		rte_free(eth_dev->data->hash_mac_addrs);
+-		eth_dev->data->hash_mac_addrs = NULL;
+-		return ret;
+-	}
++	if (ret)
++		goto err_pf_host_init;
+ 
+ 	ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
+ 	/* let hardware know driver is loaded */
+@@ -1268,10 +1311,14 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
+ 	TAILQ_INIT(&filter_info->fivetuple_list);
+ 
+ 	/* initialize flow director filter list & hash */
+-	ixgbe_fdir_filter_init(eth_dev);
++	ret = ixgbe_fdir_filter_init(eth_dev);
++	if (ret)
++		goto err_fdir_filter_init;
+ 
+ 	/* initialize l2 tunnel filter list & hash */
+-	ixgbe_l2_tn_filter_init(eth_dev);
++	ret = ixgbe_l2_tn_filter_init(eth_dev);
++	if (ret)
++		goto err_l2_tn_filter_init;
+ 
+ 	/* initialize flow filter lists */
+ 	ixgbe_filterlist_init();
+@@ -1283,6 +1330,21 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
+ 	ixgbe_tm_conf_init(eth_dev);
+ 
+ 	return 0;
++
++err_l2_tn_filter_init:
++	ixgbe_fdir_filter_uninit(eth_dev);
++err_fdir_filter_init:
++	ixgbe_disable_intr(hw);
++	rte_intr_disable(intr_handle);
++	rte_intr_callback_unregister(intr_handle,
++		ixgbe_dev_interrupt_handler, eth_dev);
++	ixgbe_pf_host_uninit(eth_dev);
++err_pf_host_init:
++	rte_free(eth_dev->data->mac_addrs);
++	eth_dev->data->mac_addrs = NULL;
++	rte_free(eth_dev->data->hash_mac_addrs);
++	eth_dev->data->hash_mac_addrs = NULL;
++	return ret;
+ }
+ 
+ static int
+@@ -2375,7 +2437,7 @@ ixgbe_dev_configure(struct rte_eth_dev *dev)
+ 	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+ 		dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
+ 
+-	/* multipe queue mode checking */
++	/* multiple queue mode checking */
+ 	ret  = ixgbe_check_mq_mode(dev);
+ 	if (ret != 0) {
+ 		PMD_DRV_LOG(ERR, "ixgbe_check_mq_mode fails with %d.",
+@@ -2603,7 +2665,7 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
+ 		}
+ 	}
+ 
+-	/* confiugre msix for sleep until rx interrupt */
++	/* configure MSI-X for sleep until Rx interrupt */
+ 	ixgbe_configure_msix(dev);
+ 
+ 	/* initialize transmission unit */
+@@ -2907,7 +2969,7 @@ ixgbe_dev_set_link_up(struct rte_eth_dev *dev)
+ 	if (hw->mac.type == ixgbe_mac_82599EB) {
+ #ifdef RTE_LIBRTE_IXGBE_BYPASS
+ 		if (hw->device_id == IXGBE_DEV_ID_82599_BYPASS) {
+-			/* Not suported in bypass mode */
++			/* Not supported in bypass mode */
+ 			PMD_INIT_LOG(ERR, "Set link up is not supported "
+ 				     "by device id 0x%x", hw->device_id);
+ 			return -ENOTSUP;
+@@ -2938,7 +3000,7 @@ ixgbe_dev_set_link_down(struct rte_eth_dev *dev)
+ 	if (hw->mac.type == ixgbe_mac_82599EB) {
+ #ifdef RTE_LIBRTE_IXGBE_BYPASS
+ 		if (hw->device_id == IXGBE_DEV_ID_82599_BYPASS) {
+-			/* Not suported in bypass mode */
++			/* Not supported in bypass mode */
+ 			PMD_INIT_LOG(ERR, "Set link down is not supported "
+ 				     "by device id 0x%x", hw->device_id);
+ 			return -ENOTSUP;
+@@ -3028,6 +3090,7 @@ ixgbe_dev_close(struct rte_eth_dev *dev)
+ 
+ #ifdef RTE_LIB_SECURITY
+ 	rte_free(dev->security_ctx);
++	dev->security_ctx = NULL;
+ #endif
+ 
+ 	return ret;
+@@ -4236,7 +4299,8 @@ ixgbe_dev_link_update_share(struct rte_eth_dev *dev,
+ 		return rte_eth_linkstatus_set(dev, &link);
+ 	}
+ 
+-	if (ixgbe_get_media_type(hw) == ixgbe_media_type_fiber) {
++	if (ixgbe_get_media_type(hw) == ixgbe_media_type_fiber &&
++	    !ad->sdp3_no_tx_disable) {
+ 		esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
+ 		if ((esdp_reg & IXGBE_ESDP_SDP3))
+ 			link_up = 0;
+@@ -4603,7 +4667,7 @@ ixgbe_dev_interrupt_action(struct rte_eth_dev *dev)
+  * @param handle
+  *  Pointer to interrupt handle.
+  * @param param
+- *  The address of parameter (struct rte_eth_dev *) regsitered before.
++ *  The address of parameter (struct rte_eth_dev *) registered before.
+  *
+  * @return
+  *  void
+@@ -4659,7 +4723,7 @@ ixgbe_dev_interrupt_delayed_handler(void *param)
+  * @param handle
+  *  Pointer to interrupt handle.
+  * @param param
+- *  The address of parameter (struct rte_eth_dev *) regsitered before.
++ *  The address of parameter (struct rte_eth_dev *) registered before.
+  *
+  * @return
+  *  void
+@@ -5921,7 +5985,7 @@ ixgbevf_configure_msix(struct rte_eth_dev *dev)
+ 	/* Configure all RX queues of VF */
+ 	for (q_idx = 0; q_idx < dev->data->nb_rx_queues; q_idx++) {
+ 		/* Force all queue use vector 0,
+-		 * as IXGBE_VF_MAXMSIVECOTR = 1
++		 * as IXGBE_VF_MAXMSIVECTOR = 1
+ 		 */
+ 		ixgbevf_set_ivar_map(hw, 0, q_idx, vector_idx);
+ 		rte_intr_vec_list_index_set(intr_handle, q_idx,
+@@ -6256,7 +6320,7 @@ ixgbe_inject_5tuple_filter(struct rte_eth_dev *dev,
+  * @param
+  * dev: Pointer to struct rte_eth_dev.
+  * index: the index the filter allocates.
+- * filter: ponter to the filter that will be added.
++ * filter: pointer to the filter that will be added.
+  * rx_queue: the queue id the filter assigned to.
+  *
+  * @return
+@@ -6872,7 +6936,7 @@ ixgbe_timesync_disable(struct rte_eth_dev *dev)
+ 	/* Disable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
+ 	IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_1588), 0);
+ 
+-	/* Stop incrementating the System Time registers. */
++	/* Stop incrementing the System Time registers. */
+ 	IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, 0);
+ 
+ 	return 0;
+@@ -8225,6 +8289,8 @@ ixgbe_dev_macsec_register_disable(struct rte_eth_dev *dev)
+ RTE_PMD_REGISTER_PCI(net_ixgbe, rte_ixgbe_pmd);
+ RTE_PMD_REGISTER_PCI_TABLE(net_ixgbe, pci_id_ixgbe_map);
+ RTE_PMD_REGISTER_KMOD_DEP(net_ixgbe, "* igb_uio | uio_pci_generic | vfio-pci");
++RTE_PMD_REGISTER_PARAM_STRING(net_ixgbe,
++			      IXGBE_DEVARG_FIBER_SDP3_NOT_TX_DISABLE "=<0|1>");
+ RTE_PMD_REGISTER_PCI(net_ixgbe_vf, rte_ixgbevf_pmd);
+ RTE_PMD_REGISTER_PCI_TABLE(net_ixgbe_vf, pci_id_ixgbevf_map);
+ RTE_PMD_REGISTER_KMOD_DEP(net_ixgbe_vf, "* igb_uio | vfio-pci");
+diff --git a/dpdk/drivers/net/ixgbe/ixgbe_ethdev.h b/dpdk/drivers/net/ixgbe/ixgbe_ethdev.h
+index 83e8b5e56a..cc6049a66a 100644
+--- a/dpdk/drivers/net/ixgbe/ixgbe_ethdev.h
++++ b/dpdk/drivers/net/ixgbe/ixgbe_ethdev.h
+@@ -68,7 +68,7 @@
+ #define IXGBE_LPBK_NONE   0x0 /* Default value. Loopback is disabled. */
+ #define IXGBE_LPBK_TX_RX  0x1 /* Tx->Rx loopback operation is enabled. */
+ /* X540-X550 specific loopback operations */
+-#define IXGBE_MII_AUTONEG_ENABLE        0x1000 /* Auto-negociation enable (default = 1) */
++#define IXGBE_MII_AUTONEG_ENABLE        0x1000 /* Auto-negotiation enable (default = 1) */
+ 
+ #define IXGBE_MAX_JUMBO_FRAME_SIZE      0x2600 /* Maximum Jumbo frame size. */
+ 
+@@ -501,6 +501,9 @@ struct ixgbe_adapter {
+ 	/* For RSS reta table update */
+ 	uint8_t rss_reta_updated;
+ 
++	/* Used for limiting SDP3 TX_DISABLE checks */
++	uint8_t sdp3_no_tx_disable;
++
+ 	/* Used for VF link sync with PF's physical and logical (by checking
+ 	 * mailbox status) link status.
+ 	 */
+diff --git a/dpdk/drivers/net/ixgbe/ixgbe_fdir.c b/dpdk/drivers/net/ixgbe/ixgbe_fdir.c
+index 7894047829..834c1b3f51 100644
+--- a/dpdk/drivers/net/ixgbe/ixgbe_fdir.c
++++ b/dpdk/drivers/net/ixgbe/ixgbe_fdir.c
+@@ -390,7 +390,7 @@ fdir_set_input_mask_x550(struct rte_eth_dev *dev)
+ 
+ 		switch (info->mask.tunnel_type_mask) {
+ 		case 0:
+-			/* Mask turnnel type */
++			/* Mask tunnel type */
+ 			fdiripv6m |= IXGBE_FDIRIP6M_TUNNEL_TYPE;
+ 			break;
+ 		case 1:
+diff --git a/dpdk/drivers/net/ixgbe/ixgbe_flow.c b/dpdk/drivers/net/ixgbe/ixgbe_flow.c
+index bdc9d4796c..368342872a 100644
+--- a/dpdk/drivers/net/ixgbe/ixgbe_flow.c
++++ b/dpdk/drivers/net/ixgbe/ixgbe_flow.c
+@@ -135,7 +135,7 @@ const struct rte_flow_action *next_no_void_action(
+ }
+ 
+ /**
+- * Please aware there's an asumption for all the parsers.
++ * Please be aware there's an assumption for all the parsers.
+  * rte_flow_item is using big endian, rte_flow_attr and
+  * rte_flow_action are using CPU order.
+  * Because the pattern is used to describe the packets,
+@@ -3261,7 +3261,7 @@ ixgbe_flow_create(struct rte_eth_dev *dev,
+ 
+ /**
+  * Check if the flow rule is supported by ixgbe.
+- * It only checkes the format. Don't guarantee the rule can be programmed into
++ * It only checks the format. Don't guarantee the rule can be programmed into
+  * the HW. Because there can be no enough room for the rule.
+  */
+ static int
+diff --git a/dpdk/drivers/net/ixgbe/ixgbe_ipsec.c b/dpdk/drivers/net/ixgbe/ixgbe_ipsec.c
+index 944c9f2380..c353ae33b4 100644
+--- a/dpdk/drivers/net/ixgbe/ixgbe_ipsec.c
++++ b/dpdk/drivers/net/ixgbe/ixgbe_ipsec.c
+@@ -310,7 +310,7 @@ ixgbe_crypto_remove_sa(struct rte_eth_dev *dev,
+ 			return -1;
+ 		}
+ 
+-		/* Disable and clear Rx SPI and key table table entryes*/
++		/* Disable and clear Rx SPI and key table table entries*/
+ 		reg_val = IPSRXIDX_WRITE | IPSRXIDX_TABLE_SPI | (sa_index << 3);
+ 		IXGBE_WRITE_REG(hw, IXGBE_IPSRXSPI, 0);
+ 		IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPIDX, 0);
+diff --git a/dpdk/drivers/net/ixgbe/ixgbe_pf.c b/dpdk/drivers/net/ixgbe/ixgbe_pf.c
+index 9f1bd0a62b..c73833b7ae 100644
+--- a/dpdk/drivers/net/ixgbe/ixgbe_pf.c
++++ b/dpdk/drivers/net/ixgbe/ixgbe_pf.c
+@@ -242,7 +242,7 @@ int ixgbe_pf_host_configure(struct rte_eth_dev *eth_dev)
+ 	/* PFDMA Tx General Switch Control Enables VMDQ loopback */
+ 	IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
+ 
+-	/* clear VMDq map to perment rar 0 */
++	/* clear VMDq map to permanent rar 0 */
+ 	hw->mac.ops.clear_vmdq(hw, 0, IXGBE_CLEAR_VMDQ_ALL);
+ 
+ 	/* clear VMDq map to scan rar 127 */
+diff --git a/dpdk/drivers/net/ixgbe/ixgbe_rxtx.c b/dpdk/drivers/net/ixgbe/ixgbe_rxtx.c
+index d7c80d4242..99e928a2a9 100644
+--- a/dpdk/drivers/net/ixgbe/ixgbe_rxtx.c
++++ b/dpdk/drivers/net/ixgbe/ixgbe_rxtx.c
+@@ -1954,7 +1954,7 @@ ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+ 	 * register.
+ 	 * Update the RDT with the value of the last processed RX descriptor
+ 	 * minus 1, to guarantee that the RDT register is never equal to the
+-	 * RDH register, which creates a "full" ring situtation from the
++	 * RDH register, which creates a "full" ring situation from the
+ 	 * hardware point of view...
+ 	 */
+ 	nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
+@@ -2303,7 +2303,7 @@ ixgbe_recv_pkts_lro(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts,
+ 	 * register.
+ 	 * Update the RDT with the value of the last processed RX descriptor
+ 	 * minus 1, to guarantee that the RDT register is never equal to the
+-	 * RDH register, which creates a "full" ring situtation from the
++	 * RDH register, which creates a "full" ring situation from the
+ 	 * hardware point of view...
+ 	 */
+ 	if (!bulk_alloc && nb_hold > rxq->rx_free_thresh) {
+@@ -2666,7 +2666,7 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
+ 	 */
+ 	tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?
+ 			tx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH);
+-	/* force tx_rs_thresh to adapt an aggresive tx_free_thresh */
++	/* force tx_rs_thresh to adapt an aggressive tx_free_thresh */
+ 	tx_rs_thresh = (DEFAULT_TX_RS_THRESH + tx_free_thresh > nb_desc) ?
+ 			nb_desc - tx_free_thresh : DEFAULT_TX_RS_THRESH;
+ 	if (tx_conf->tx_rs_thresh > 0)
+@@ -4831,7 +4831,7 @@ ixgbe_set_rx_function(struct rte_eth_dev *dev)
+ 				     dev->data->port_id);
+ 			dev->rx_pkt_burst = ixgbe_recv_pkts_lro_bulk_alloc;
+ 		} else {
+-			PMD_INIT_LOG(DEBUG, "Using Regualr (non-vector, "
++			PMD_INIT_LOG(DEBUG, "Using Regular (non-vector, "
+ 					    "single allocation) "
+ 					    "Scattered Rx callback "
+ 					    "(port=%d).",
+@@ -5170,7 +5170,7 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
+ 	/*
+ 	 * Setup the Checksum Register.
+ 	 * Disable Full-Packet Checksum which is mutually exclusive with RSS.
+-	 * Enable IP/L4 checkum computation by hardware if requested to do so.
++	 * Enable IP/L4 checksum computation by hardware if requested to do so.
+ 	 */
+ 	rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
+ 	rxcsum |= IXGBE_RXCSUM_PCSD;
+diff --git a/dpdk/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c b/dpdk/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c
+index 1eed949495..bb34b27168 100644
+--- a/dpdk/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c
++++ b/dpdk/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c
+@@ -364,6 +364,17 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
+ 	uint8_t vlan_flags;
+ 	uint16_t udp_p_flag = 0; /* Rx Descriptor UDP header present */
+ 
++	/*
++	 * Under the circumstance that `rx_tail` wrap back to zero
++	 * and the advance speed of `rx_tail` is greater than `rxrearm_start`,
++	 * `rx_tail` will catch up with `rxrearm_start` and surpass it.
++	 * This may cause some mbufs be reused by application.
++	 *
++	 * So we need to make some restrictions to ensure that
++	 * `rx_tail` will not exceed `rxrearm_start`.
++	 */
++	nb_pkts = RTE_MIN(nb_pkts, RTE_IXGBE_RXQ_REARM_THRESH);
++
+ 	/* nb_pkts has to be floor-aligned to RTE_IXGBE_DESCS_PER_LOOP */
+ 	nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, RTE_IXGBE_DESCS_PER_LOOP);
+ 
+@@ -562,7 +573,7 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
+ 
+ 		desc_to_ptype_v(descs, rxq->pkt_type_mask, &rx_pkts[pos]);
+ 
+-		/* C.4 calc avaialbe number of desc */
++		/* C.4 calc available number of desc */
+ 		var = __builtin_popcountll(_mm_cvtsi128_si64(staterr));
+ 		nb_pkts_recd += var;
+ 		if (likely(var != RTE_IXGBE_DESCS_PER_LOOP))
+diff --git a/dpdk/drivers/net/kni/rte_eth_kni.c b/dpdk/drivers/net/kni/rte_eth_kni.c
+index c428caf441..0532de5315 100644
+--- a/dpdk/drivers/net/kni/rte_eth_kni.c
++++ b/dpdk/drivers/net/kni/rte_eth_kni.c
+@@ -124,7 +124,7 @@ eth_kni_start(struct rte_eth_dev *dev)
+ 	struct pmd_internals *internals = dev->data->dev_private;
+ 	uint16_t port_id = dev->data->port_id;
+ 	struct rte_mempool *mb_pool;
+-	struct rte_kni_conf conf;
++	struct rte_kni_conf conf = {{0}};
+ 	const char *name = dev->device->name + 4; /* remove net_ */
+ 
+ 	mb_pool = internals->rx_queues[0].mb_pool;
+diff --git a/dpdk/drivers/net/memif/memif_socket.c b/dpdk/drivers/net/memif/memif_socket.c
+index 079cf01269..7886644412 100644
+--- a/dpdk/drivers/net/memif/memif_socket.c
++++ b/dpdk/drivers/net/memif/memif_socket.c
+@@ -402,11 +402,10 @@ memif_msg_enq_init(struct rte_eth_dev *dev)
+ {
+ 	struct pmd_internals *pmd = dev->data->dev_private;
+ 	struct memif_msg_queue_elt *e = memif_msg_enq(pmd->cc);
+-	memif_msg_init_t *i = &e->msg.init;
++	memif_msg_init_t *i;
+ 
+ 	if (e == NULL)
+ 		return -1;
+-
+ 	i = &e->msg.init;
+ 	e->msg.type = MEMIF_MSG_TYPE_INIT;
+ 	i->version = MEMIF_VERSION;
+@@ -726,7 +725,7 @@ memif_msg_receive(struct memif_control_channel *cc)
+ 		break;
+ 	case MEMIF_MSG_TYPE_INIT:
+ 		/*
+-		 * This cc does not have an interface asociated with it.
++		 * This cc does not have an interface associated with it.
+ 		 * If suitable interface is found it will be assigned here.
+ 		 */
+ 		ret = memif_msg_receive_init(cc, &msg);
+diff --git a/dpdk/drivers/net/memif/rte_eth_memif.c b/dpdk/drivers/net/memif/rte_eth_memif.c
+index e3d523af57..205d08b028 100644
+--- a/dpdk/drivers/net/memif/rte_eth_memif.c
++++ b/dpdk/drivers/net/memif/rte_eth_memif.c
+@@ -351,13 +351,13 @@ eth_memif_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
+ 			goto no_free_bufs;
+ 		mbuf = mbuf_head;
+ 		mbuf->port = mq->in_port;
++		dst_off = 0;
+ 
+ next_slot:
+ 		s0 = cur_slot & mask;
+ 		d0 = &ring->desc[s0];
+ 
+ 		src_len = d0->length;
+-		dst_off = 0;
+ 		src_off = 0;
+ 
+ 		do {
+@@ -1026,7 +1026,7 @@ memif_regions_init(struct rte_eth_dev *dev)
+ 		if (ret < 0)
+ 			return ret;
+ 	} else {
+-		/* create one memory region contaning rings and buffers */
++		/* create one memory region containing rings and buffers */
+ 		ret = memif_region_init_shm(dev, /* has buffers */ 1);
+ 		if (ret < 0)
+ 			return ret;
+@@ -1500,23 +1500,6 @@ memif_stats_reset(struct rte_eth_dev *dev)
+ 	return 0;
+ }
+ 
+-static int
+-memif_rx_queue_intr_enable(struct rte_eth_dev *dev __rte_unused,
+-			   uint16_t qid __rte_unused)
+-{
+-	MIF_LOG(WARNING, "Interrupt mode not supported.");
+-
+-	return -1;
+-}
+-
+-static int
+-memif_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t qid __rte_unused)
+-{
+-	struct pmd_internals *pmd __rte_unused = dev->data->dev_private;
+-
+-	return 0;
+-}
+-
+ static const struct eth_dev_ops ops = {
+ 	.dev_start = memif_dev_start,
+ 	.dev_stop = memif_dev_stop,
+@@ -1527,8 +1510,6 @@ static const struct eth_dev_ops ops = {
+ 	.rx_queue_setup = memif_rx_queue_setup,
+ 	.rx_queue_release = memif_rx_queue_release,
+ 	.tx_queue_release = memif_tx_queue_release,
+-	.rx_queue_intr_enable = memif_rx_queue_intr_enable,
+-	.rx_queue_intr_disable = memif_rx_queue_intr_disable,
+ 	.link_update = memif_link_update,
+ 	.stats_get = memif_stats_get,
+ 	.stats_reset = memif_stats_reset,
+diff --git a/dpdk/drivers/net/mlx4/meson.build b/dpdk/drivers/net/mlx4/meson.build
+index 99a30eab8f..a038c1ec1b 100644
+--- a/dpdk/drivers/net/mlx4/meson.build
++++ b/dpdk/drivers/net/mlx4/meson.build
+@@ -42,7 +42,7 @@ foreach libname:libnames
+ endforeach
+ if static_ibverbs or dlopen_ibverbs
+     # Build without adding shared libs to Requires.private
+-    ibv_cflags = run_command(pkgconf, '--cflags', 'libibverbs').stdout()
++    ibv_cflags = run_command(pkgconf, '--cflags', 'libibverbs', check:true).stdout()
+     ext_deps += declare_dependency(compile_args: ibv_cflags.split())
+ endif
+ if static_ibverbs
+diff --git a/dpdk/drivers/net/mlx4/mlx4.h b/dpdk/drivers/net/mlx4/mlx4.h
+index 2d0c512f79..4023a47602 100644
+--- a/dpdk/drivers/net/mlx4/mlx4.h
++++ b/dpdk/drivers/net/mlx4/mlx4.h
+@@ -74,7 +74,7 @@ enum mlx4_mp_req_type {
+ 	MLX4_MP_REQ_STOP_RXTX,
+ };
+ 
+-/* Pameters for IPC. */
++/* Parameters for IPC. */
+ struct mlx4_mp_param {
+ 	enum mlx4_mp_req_type type;
+ 	int port_id;
+diff --git a/dpdk/drivers/net/mlx4/mlx4_ethdev.c b/dpdk/drivers/net/mlx4/mlx4_ethdev.c
+index d606ec8ca7..ce74c51ce2 100644
+--- a/dpdk/drivers/net/mlx4/mlx4_ethdev.c
++++ b/dpdk/drivers/net/mlx4/mlx4_ethdev.c
+@@ -752,7 +752,7 @@ mlx4_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
+  *   Pointer to Ethernet device structure.
+  *
+  * @return
+- *   alwasy 0 on success
++ *   always 0 on success
+  */
+ int
+ mlx4_stats_reset(struct rte_eth_dev *dev)
+diff --git a/dpdk/drivers/net/mlx5/linux/mlx5_ethdev_os.c b/dpdk/drivers/net/mlx5/linux/mlx5_ethdev_os.c
+index c19825ee52..fadcbd7ef7 100644
+--- a/dpdk/drivers/net/mlx5/linux/mlx5_ethdev_os.c
++++ b/dpdk/drivers/net/mlx5/linux/mlx5_ethdev_os.c
+@@ -38,6 +38,7 @@
+ #include <mlx5_devx_cmds.h>
+ #include <mlx5_common.h>
+ #include <mlx5_malloc.h>
++#include <mlx5_nl.h>
+ 
+ #include "mlx5.h"
+ #include "mlx5_rxtx.h"
+@@ -760,6 +761,56 @@ mlx5_dev_interrupt_device_fatal(struct mlx5_dev_ctx_shared *sh)
+ 	}
+ }
+ 
++static void
++mlx5_dev_interrupt_nl_cb(struct nlmsghdr *hdr, void *cb_arg)
++{
++	struct mlx5_dev_ctx_shared *sh = cb_arg;
++	uint32_t i;
++	uint32_t if_index;
++
++	if (mlx5_nl_parse_link_status_update(hdr, &if_index) < 0)
++		return;
++	for (i = 0; i < sh->max_port; i++) {
++		struct mlx5_dev_shared_port *port = &sh->port[i];
++		struct rte_eth_dev *dev;
++		struct mlx5_priv *priv;
++
++		if (port->nl_ih_port_id >= RTE_MAX_ETHPORTS)
++			continue;
++		dev = &rte_eth_devices[port->nl_ih_port_id];
++		/* Probing may initiate an LSC before configuration is done. */
++		if (dev->data->dev_configured &&
++		    !dev->data->dev_conf.intr_conf.lsc)
++			break;
++		priv = dev->data->dev_private;
++		if (priv->if_index == if_index) {
++			/* Block logical LSC events. */
++			uint16_t prev_status = dev->data->dev_link.link_status;
++
++			if (mlx5_link_update(dev, 0) < 0)
++				DRV_LOG(ERR, "Failed to update link status: %s",
++					rte_strerror(rte_errno));
++			else if (prev_status != dev->data->dev_link.link_status)
++				rte_eth_dev_callback_process
++					(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
++			break;
++		}
++	}
++}
++
++void
++mlx5_dev_interrupt_handler_nl(void *arg)
++{
++	struct mlx5_dev_ctx_shared *sh = arg;
++	int nlsk_fd = rte_intr_fd_get(sh->intr_handle_nl);
++
++	if (nlsk_fd < 0)
++		return;
++	if (mlx5_nl_read_events(nlsk_fd, mlx5_dev_interrupt_nl_cb, sh) < 0)
++		DRV_LOG(ERR, "Failed to process Netlink events: %s",
++			rte_strerror(rte_errno));
++}
++
+ /**
+  * Handle shared asynchronous events the NIC (removal event
+  * and link status change). Supports multiport IB device.
+@@ -823,18 +874,6 @@ mlx5_dev_interrupt_handler(void *cb_arg)
+ 		tmp = sh->port[tmp - 1].ih_port_id;
+ 		dev = &rte_eth_devices[tmp];
+ 		MLX5_ASSERT(dev);
+-		if ((event.event_type == IBV_EVENT_PORT_ACTIVE ||
+-		     event.event_type == IBV_EVENT_PORT_ERR) &&
+-			dev->data->dev_conf.intr_conf.lsc) {
+-			mlx5_glue->ack_async_event(&event);
+-			if (mlx5_link_update(dev, 0) == -EAGAIN) {
+-				usleep(0);
+-				continue;
+-			}
+-			rte_eth_dev_callback_process
+-				(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
+-			continue;
+-		}
+ 		DRV_LOG(DEBUG,
+ 			"port %u cannot handle an unknown event (type %d)",
+ 			dev->data->port_id, event.event_type);
+@@ -1079,7 +1118,6 @@ mlx5_sysfs_switch_info(unsigned int ifindex, struct mlx5_switch_info *info)
+ 	bool port_switch_id_set = false;
+ 	bool device_dir = false;
+ 	char c;
+-	int ret;
+ 
+ 	if (!if_indextoname(ifindex, ifname)) {
+ 		rte_errno = errno;
+@@ -1095,10 +1133,9 @@ mlx5_sysfs_switch_info(unsigned int ifindex, struct mlx5_switch_info *info)
+ 
+ 	file = fopen(phys_port_name, "rb");
+ 	if (file != NULL) {
+-		ret = fscanf(file, "%" RTE_STR(IF_NAMESIZE) "s", port_name);
+-		fclose(file);
+-		if (ret == 1)
++		if (fgets(port_name, IF_NAMESIZE, file) != NULL)
+ 			mlx5_translate_port_name(port_name, &data);
++		fclose(file);
+ 	}
+ 	file = fopen(phys_switch_id, "rb");
+ 	if (file == NULL) {
+@@ -1347,15 +1384,16 @@ mlx5_os_read_dev_counters(struct rte_eth_dev *dev, uint64_t *stats)
+ 		}
+ 	} else {
+ 		ret = _mlx5_os_read_dev_counters(dev, -1, stats);
++		if (ret)
++			return ret;
+ 	}
+ 	/* Read IB counters. */
+ 	for (i = 0; i != xstats_ctrl->mlx5_stats_n; ++i) {
+ 		if (!xstats_ctrl->info[i].dev)
+ 			continue;
+-		ret = mlx5_os_read_dev_stat(priv, xstats_ctrl->info[i].ctr_name,
+-					    &stats[i]);
+ 		/* return last xstats counter if fail to read. */
+-		if (ret != 0)
++		if (mlx5_os_read_dev_stat(priv, xstats_ctrl->info[i].ctr_name,
++			    &stats[i]) == 0)
+ 			xstats_ctrl->xstats[i] = stats[i];
+ 		else
+ 			stats[i] = xstats_ctrl->xstats[i];
+diff --git a/dpdk/drivers/net/mlx5/linux/mlx5_flow_os.c b/dpdk/drivers/net/mlx5/linux/mlx5_flow_os.c
+index 893f00b824..a5956c255a 100644
+--- a/dpdk/drivers/net/mlx5/linux/mlx5_flow_os.c
++++ b/dpdk/drivers/net/mlx5/linux/mlx5_flow_os.c
+@@ -14,7 +14,8 @@ mlx5_flow_os_init_workspace_once(void)
+ {
+ 	if (rte_thread_key_create(&key_workspace, flow_release_workspace)) {
+ 		DRV_LOG(ERR, "Can't create flow workspace data thread key.");
+-		return -ENOMEM;
++		rte_errno = ENOMEM;
++		return -rte_errno;
+ 	}
+ 	return 0;
+ }
+diff --git a/dpdk/drivers/net/mlx5/linux/mlx5_os.c b/dpdk/drivers/net/mlx5/linux/mlx5_os.c
+index c29fe3d92b..792dd2cb22 100644
+--- a/dpdk/drivers/net/mlx5/linux/mlx5_os.c
++++ b/dpdk/drivers/net/mlx5/linux/mlx5_os.c
+@@ -112,7 +112,7 @@ static struct mlx5_indexed_pool_config icfg[] = {
+  *   Pointer to RQ channel object, which includes the channel fd
+  *
+  * @param[out] fd
+- *   The file descriptor (representing the intetrrupt) used in this channel.
++ *   The file descriptor (representing the interrupt) used in this channel.
+  *
+  * @return
+  *   0 on successfully setting the fd to non-blocking, non-zero otherwise.
+@@ -138,7 +138,7 @@ mlx5_os_set_nonblock_channel_fd(int fd)
+  *   Pointer to mlx5 device attributes.
+  *
+  * @return
+- *   0 on success, non zero error number otherwise
++ *   0 on success, a negative errno value otherwise and rte_errno is set.
+  */
+ int
+ mlx5_os_get_dev_attr(struct mlx5_common_device *cdev,
+@@ -150,8 +150,10 @@ mlx5_os_get_dev_attr(struct mlx5_common_device *cdev,
+ 
+ 	memset(device_attr, 0, sizeof(*device_attr));
+ 	err = mlx5_glue->query_device_ex(ctx, NULL, &attr_ex);
+-	if (err)
+-		return err;
++	if (err) {
++		rte_errno = errno;
++		return -rte_errno;
++	}
+ 	device_attr->device_cap_flags_ex = attr_ex.device_cap_flags_ex;
+ 	device_attr->max_qp_wr = attr_ex.orig_attr.max_qp_wr;
+ 	device_attr->max_sge = attr_ex.orig_attr.max_sge;
+@@ -170,8 +172,10 @@ mlx5_os_get_dev_attr(struct mlx5_common_device *cdev,
+ 
+ 	struct mlx5dv_context dv_attr = { .comp_mask = 0 };
+ 	err = mlx5_glue->dv_query_device(ctx, &dv_attr);
+-	if (err)
+-		return err;
++	if (err) {
++		rte_errno = errno;
++		return -rte_errno;
++	}
+ 
+ 	device_attr->flags = dv_attr.flags;
+ 	device_attr->comp_mask = dv_attr.comp_mask;
+@@ -195,7 +199,7 @@ mlx5_os_get_dev_attr(struct mlx5_common_device *cdev,
+ 	strlcpy(device_attr->fw_ver, attr_ex.orig_attr.fw_ver,
+ 		sizeof(device_attr->fw_ver));
+ 
+-	return err;
++	return 0;
+ }
+ 
+ /**
+@@ -881,10 +885,6 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
+ 	unsigned int mpls_en = 0;
+ 	unsigned int swp = 0;
+ 	unsigned int mprq = 0;
+-	unsigned int mprq_min_stride_size_n = 0;
+-	unsigned int mprq_max_stride_size_n = 0;
+-	unsigned int mprq_min_stride_num_n = 0;
+-	unsigned int mprq_max_stride_num_n = 0;
+ 	struct rte_ether_addr mac;
+ 	char name[RTE_ETH_NAME_MAX_LEN];
+ 	int own_domain_id = 0;
+@@ -981,10 +981,6 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
+ 			strerror(rte_errno));
+ 		goto error;
+ 	}
+-	if (config->dv_miss_info) {
+-		if (switch_info->master || switch_info->representor)
+-			config->dv_xmeta_en = MLX5_XMETA_MODE_META16;
+-	}
+ 	sh = mlx5_alloc_shared_dev_ctx(spawn, config);
+ 	if (!sh)
+ 		return NULL;
+@@ -1039,15 +1035,17 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
+ 			mprq_caps.max_single_wqe_log_num_of_strides);
+ 		DRV_LOG(DEBUG, "\tsupported_qpts: %d",
+ 			mprq_caps.supported_qpts);
++		DRV_LOG(DEBUG, "\tmin_stride_wqe_log_size: %d",
++			config->mprq.log_min_stride_wqe_size);
+ 		DRV_LOG(DEBUG, "device supports Multi-Packet RQ");
+ 		mprq = 1;
+-		mprq_min_stride_size_n =
++		config->mprq.log_min_stride_size =
+ 			mprq_caps.min_single_stride_log_num_of_bytes;
+-		mprq_max_stride_size_n =
++		config->mprq.log_max_stride_size =
+ 			mprq_caps.max_single_stride_log_num_of_bytes;
+-		mprq_min_stride_num_n =
++		config->mprq.log_min_stride_num =
+ 			mprq_caps.min_single_wqe_log_num_of_strides;
+-		mprq_max_stride_num_n =
++		config->mprq.log_max_stride_num =
+ 			mprq_caps.max_single_wqe_log_num_of_strides;
+ 	}
+ #endif
+@@ -1088,7 +1086,7 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
+ 		" old OFED/rdma-core version or firmware configuration");
+ #endif
+ 	config->mpls_en = mpls_en;
+-	nl_rdma = mlx5_nl_init(NETLINK_RDMA);
++	nl_rdma = mlx5_nl_init(NETLINK_RDMA, 0);
+ 	/* Check port status. */
+ 	if (spawn->phys_port <= UINT8_MAX) {
+ 		/* Legacy Verbs api only support u8 port number. */
+@@ -1135,7 +1133,7 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
+ 	priv->mtu = RTE_ETHER_MTU;
+ 	/* Some internal functions rely on Netlink sockets, open them now. */
+ 	priv->nl_socket_rdma = nl_rdma;
+-	priv->nl_socket_route =	mlx5_nl_init(NETLINK_ROUTE);
++	priv->nl_socket_route =	mlx5_nl_init(NETLINK_ROUTE, 0);
+ 	priv->representor = !!switch_info->representor;
+ 	priv->master = !!switch_info->master;
+ 	priv->domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID;
+@@ -1243,6 +1241,32 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
+ 	}
+ 	/* Override some values set by hardware configuration. */
+ 	mlx5_args(config, dpdk_dev->devargs);
++	/* Update final values for devargs before check sibling config. */
++	if (config->dv_miss_info) {
++		if (switch_info->master || switch_info->representor)
++			config->dv_xmeta_en = MLX5_XMETA_MODE_META16;
++	}
++#if !defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_MLX5DV_DR)
++	if (config->dv_flow_en) {
++		DRV_LOG(WARNING, "DV flow is not supported.");
++		config->dv_flow_en = 0;
++	}
++#endif
++#ifdef HAVE_MLX5DV_DR_ESWITCH
++	if (!(sh->cdev->config.hca_attr.eswitch_manager && config->dv_flow_en &&
++	      (switch_info->representor || switch_info->master)))
++		config->dv_esw_en = 0;
++#else
++	config->dv_esw_en = 0;
++#endif
++	if (!priv->config.dv_esw_en &&
++	    priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
++		DRV_LOG(WARNING,
++			"Metadata mode %u is not supported (no E-Switch).",
++			priv->config.dv_xmeta_en);
++		priv->config.dv_xmeta_en = MLX5_XMETA_MODE_LEGACY;
++	}
++	/* Check sibling device configurations. */
+ 	err = mlx5_dev_check_sibling_config(priv, config, dpdk_dev);
+ 	if (err)
+ 		goto error;
+@@ -1253,12 +1277,6 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
+ #if !defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) && \
+ 	!defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
+ 	DRV_LOG(DEBUG, "counters are not supported");
+-#endif
+-#if !defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_MLX5DV_DR)
+-	if (config->dv_flow_en) {
+-		DRV_LOG(WARNING, "DV flow is not supported");
+-		config->dv_flow_en = 0;
+-	}
+ #endif
+ 	config->ind_table_max_size =
+ 		sh->device_attr.max_rwq_indirection_table_size;
+@@ -1548,36 +1566,7 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
+ 		config->hw_fcs_strip = 0;
+ 	DRV_LOG(DEBUG, "FCS stripping configuration is %ssupported",
+ 		(config->hw_fcs_strip ? "" : "not "));
+-	if (config->mprq.enabled && mprq) {
+-		if (config->mprq.stride_num_n &&
+-		    (config->mprq.stride_num_n > mprq_max_stride_num_n ||
+-		     config->mprq.stride_num_n < mprq_min_stride_num_n)) {
+-			config->mprq.stride_num_n =
+-				RTE_MIN(RTE_MAX(MLX5_MPRQ_STRIDE_NUM_N,
+-						mprq_min_stride_num_n),
+-					mprq_max_stride_num_n);
+-			DRV_LOG(WARNING,
+-				"the number of strides"
+-				" for Multi-Packet RQ is out of range,"
+-				" setting default value (%u)",
+-				1 << config->mprq.stride_num_n);
+-		}
+-		if (config->mprq.stride_size_n &&
+-		    (config->mprq.stride_size_n > mprq_max_stride_size_n ||
+-		     config->mprq.stride_size_n < mprq_min_stride_size_n)) {
+-			config->mprq.stride_size_n =
+-				RTE_MIN(RTE_MAX(MLX5_MPRQ_STRIDE_SIZE_N,
+-						mprq_min_stride_size_n),
+-					mprq_max_stride_size_n);
+-			DRV_LOG(WARNING,
+-				"the size of a stride"
+-				" for Multi-Packet RQ is out of range,"
+-				" setting default value (%u)",
+-				1 << config->mprq.stride_size_n);
+-		}
+-		config->mprq.min_stride_size_n = mprq_min_stride_size_n;
+-		config->mprq.max_stride_size_n = mprq_max_stride_size_n;
+-	} else if (config->mprq.enabled && !mprq) {
++	if (config->mprq.enabled && !mprq) {
+ 		DRV_LOG(WARNING, "Multi-Packet RQ isn't supported");
+ 		config->mprq.enabled = 0;
+ 	}
+@@ -1676,20 +1665,12 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
+ 	/* Bring Ethernet device up. */
+ 	DRV_LOG(DEBUG, "port %u forcing Ethernet interface up",
+ 		eth_dev->data->port_id);
+-	mlx5_set_link_up(eth_dev);
+-	/*
+-	 * Even though the interrupt handler is not installed yet,
+-	 * interrupts will still trigger on the async_fd from
+-	 * Verbs context returned by ibv_open_device().
+-	 */
++	/* Read link status in case it is up and there will be no event. */
+ 	mlx5_link_update(eth_dev, 0);
+-#ifdef HAVE_MLX5DV_DR_ESWITCH
+-	if (!(config->hca_attr.eswitch_manager && config->dv_flow_en &&
+-	      (switch_info->representor || switch_info->master)))
+-		config->dv_esw_en = 0;
+-#else
+-	config->dv_esw_en = 0;
+-#endif
++	/* Watch LSC interrupts between port probe and port start. */
++	priv->sh->port[priv->dev_port - 1].nl_ih_port_id =
++							eth_dev->data->port_id;
++	mlx5_set_link_up(eth_dev);
+ 	/* Detect minimal data bytes to inline. */
+ 	mlx5_set_min_inline(spawn, config);
+ 	/* Store device configuration on private structure. */
+@@ -1743,7 +1724,7 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
+ 	priv->drop_queue.hrxq = mlx5_drop_action_create(eth_dev);
+ 	if (!priv->drop_queue.hrxq)
+ 		goto error;
+-	/* Port representor shares the same max prioirity with pf port. */
++	/* Port representor shares the same max priority with pf port. */
+ 	if (!priv->sh->flow_priority_check_flag) {
+ 		/* Supported Verbs flow priority number detection. */
+ 		err = mlx5_flow_discover_priorities(eth_dev);
+@@ -1756,12 +1737,6 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
+ 		err = -err;
+ 		goto error;
+ 	}
+-	if (!priv->config.dv_esw_en &&
+-	    priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
+-		DRV_LOG(WARNING, "metadata mode %u is not supported "
+-				 "(no E-Switch)", priv->config.dv_xmeta_en);
+-		priv->config.dv_xmeta_en = MLX5_XMETA_MODE_LEGACY;
+-	}
+ 	mlx5_set_metadata_mask(eth_dev);
+ 	if (priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
+ 	    !priv->sh->dv_regc0_mask) {
+@@ -2068,7 +2043,8 @@ mlx5_device_bond_pci_match(const char *ibdev_name,
+ }
+ 
+ static void
+-mlx5_os_config_default(struct mlx5_dev_config *config)
++mlx5_os_config_default(struct mlx5_dev_config *config,
++		       struct mlx5_common_dev_config *cconf)
+ {
+ 	memset(config, 0, sizeof(*config));
+ 	config->mps = MLX5_ARG_UNSET;
+@@ -2080,6 +2056,10 @@ mlx5_os_config_default(struct mlx5_dev_config *config)
+ 	config->vf_nl_en = 1;
+ 	config->mprq.max_memcpy_len = MLX5_MPRQ_MEMCPY_DEFAULT_LEN;
+ 	config->mprq.min_rxqs_num = MLX5_MPRQ_MIN_RXQS;
++	config->mprq.log_min_stride_wqe_size = cconf->devx ?
++					cconf->hca_attr.log_min_stride_wqe_sz :
++					MLX5_MPRQ_LOG_MIN_STRIDE_WQE_SIZE;
++	config->mprq.log_stride_num = MLX5_MPRQ_DEFAULT_LOG_STRIDE_NUM;
+ 	config->dv_esw_en = 1;
+ 	config->dv_flow_en = 1;
+ 	config->decap_en = 1;
+@@ -2156,8 +2136,8 @@ mlx5_os_pci_probe_pf(struct mlx5_common_device *cdev,
+ 	 * matching ones, gathering into the list.
+ 	 */
+ 	struct ibv_device *ibv_match[ret + 1];
+-	int nl_route = mlx5_nl_init(NETLINK_ROUTE);
+-	int nl_rdma = mlx5_nl_init(NETLINK_RDMA);
++	int nl_route = mlx5_nl_init(NETLINK_ROUTE, 0);
++	int nl_rdma = mlx5_nl_init(NETLINK_RDMA, 0);
+ 	unsigned int i;
+ 
+ 	while (ret-- > 0) {
+@@ -2209,9 +2189,9 @@ mlx5_os_pci_probe_pf(struct mlx5_common_device *cdev,
+ 	if (!nd) {
+ 		/* No device matches, just complain and bail out. */
+ 		DRV_LOG(WARNING,
+-			"No Verbs device matches PCI device " PCI_PRI_FMT ","
++			"PF %u doesn't have Verbs device matches PCI device " PCI_PRI_FMT ","
+ 			" are kernel drivers loaded?",
+-			owner_pci.domain, owner_pci.bus,
++			owner_id, owner_pci.domain, owner_pci.bus,
+ 			owner_pci.devid, owner_pci.function);
+ 		rte_errno = ENOENT;
+ 		ret = -rte_errno;
+@@ -2300,7 +2280,7 @@ mlx5_os_pci_probe_pf(struct mlx5_common_device *cdev,
+ 						/*
+ 						 * Force standalone bonding
+ 						 * device for ROCE LAG
+-						 * confgiurations.
++						 * configurations.
+ 						 */
+ 						list[ns].info.master = 0;
+ 						list[ns].info.representor = 0;
+@@ -2496,7 +2476,7 @@ mlx5_os_pci_probe_pf(struct mlx5_common_device *cdev,
+ 		uint32_t restore;
+ 
+ 		/* Default configuration. */
+-		mlx5_os_config_default(&dev_config);
++		mlx5_os_config_default(&dev_config, &cdev->config);
+ 		dev_config.vf = dev_config_vf;
+ 		list[i].eth_dev = mlx5_dev_spawn(cdev->dev, &list[i],
+ 						 &dev_config, &eth_da);
+@@ -2632,16 +2612,16 @@ mlx5_os_pci_probe(struct mlx5_common_device *cdev)
+ 		for (p = 0; p < eth_da.nb_ports; p++) {
+ 			ret = mlx5_os_pci_probe_pf(cdev, &eth_da,
+ 						   eth_da.ports[p]);
+-			if (ret)
+-				break;
+-		}
+-		if (ret) {
+-			DRV_LOG(ERR, "Probe of PCI device " PCI_PRI_FMT " "
+-				"aborted due to proding failure of PF %u",
+-				pci_dev->addr.domain, pci_dev->addr.bus,
+-				pci_dev->addr.devid, pci_dev->addr.function,
+-				eth_da.ports[p]);
+-			mlx5_net_remove(cdev);
++			if (ret) {
++				DRV_LOG(INFO, "Probe of PCI device " PCI_PRI_FMT " "
++					"aborted due to proding failure of PF %u",
++					pci_dev->addr.domain, pci_dev->addr.bus,
++					pci_dev->addr.devid, pci_dev->addr.function,
++					eth_da.ports[p]);
++				mlx5_net_remove(cdev);
++				if (p != 0)
++					break;
++			}
+ 		}
+ 	} else {
+ 		ret = mlx5_os_pci_probe_pf(cdev, &eth_da, 0);
+@@ -2666,7 +2646,7 @@ mlx5_os_auxiliary_probe(struct mlx5_common_device *cdev)
+ 	if (ret != 0)
+ 		return ret;
+ 	/* Set default config data. */
+-	mlx5_os_config_default(&config);
++	mlx5_os_config_default(&config, &cdev->config);
+ 	config.sf = 1;
+ 	/* Init spawn data. */
+ 	spawn.max_port = 1;
+@@ -2733,6 +2713,40 @@ mlx5_os_net_cleanup(void)
+ 	mlx5_pmd_socket_uninit();
+ }
+ 
++static int
++mlx5_os_dev_shared_handler_install_lsc(struct mlx5_dev_ctx_shared *sh)
++{
++	int nlsk_fd, flags, ret;
++
++	nlsk_fd = mlx5_nl_init(NETLINK_ROUTE, RTMGRP_LINK);
++	if (nlsk_fd < 0) {
++		DRV_LOG(ERR, "Failed to create a socket for Netlink events: %s",
++			rte_strerror(rte_errno));
++		return -1;
++	}
++	flags = fcntl(nlsk_fd, F_GETFL);
++	ret = fcntl(nlsk_fd, F_SETFL, flags | O_NONBLOCK);
++	if (ret != 0) {
++		DRV_LOG(ERR, "Failed to make Netlink event socket non-blocking: %s",
++			strerror(errno));
++		rte_errno = errno;
++		goto error;
++	}
++	rte_intr_type_set(sh->intr_handle_nl, RTE_INTR_HANDLE_EXT);
++	rte_intr_fd_set(sh->intr_handle_nl, nlsk_fd);
++	if (rte_intr_callback_register(sh->intr_handle_nl,
++				       mlx5_dev_interrupt_handler_nl,
++				       sh) != 0) {
++		DRV_LOG(ERR, "Failed to register Netlink events interrupt");
++		rte_intr_fd_set(sh->intr_handle_nl, -1);
++		goto error;
++	}
++	return 0;
++error:
++	close(nlsk_fd);
++	return -1;
++}
++
+ /**
+  * Install shared asynchronous device events handler.
+  * This function is implemented to support event sharing
+@@ -2770,6 +2784,18 @@ mlx5_os_dev_shared_handler_install(struct mlx5_dev_ctx_shared *sh)
+ 			rte_intr_fd_set(sh->intr_handle, -1);
+ 		}
+ 	}
++	sh->intr_handle_nl = rte_intr_instance_alloc
++						(RTE_INTR_INSTANCE_F_SHARED);
++	if (sh->intr_handle_nl == NULL) {
++		DRV_LOG(ERR, "Fail to allocate intr_handle");
++		rte_errno = ENOMEM;
++		return;
++	}
++	rte_intr_fd_set(sh->intr_handle_nl, -1);
++	if (mlx5_os_dev_shared_handler_install_lsc(sh) < 0) {
++		DRV_LOG(INFO, "Fail to install the shared Netlink event handler.");
++		rte_intr_fd_set(sh->intr_handle_nl, -1);
++	}
+ 	if (sh->devx) {
+ #ifdef HAVE_IBV_DEVX_ASYNC
+ 		sh->intr_handle_devx =
+@@ -2817,10 +2843,19 @@ mlx5_os_dev_shared_handler_install(struct mlx5_dev_ctx_shared *sh)
+ void
+ mlx5_os_dev_shared_handler_uninstall(struct mlx5_dev_ctx_shared *sh)
+ {
++	int nlsk_fd;
++
+ 	if (rte_intr_fd_get(sh->intr_handle) >= 0)
+ 		mlx5_intr_callback_unregister(sh->intr_handle,
+ 					      mlx5_dev_interrupt_handler, sh);
+ 	rte_intr_instance_free(sh->intr_handle);
++	nlsk_fd = rte_intr_fd_get(sh->intr_handle_nl);
++	if (nlsk_fd >= 0) {
++		mlx5_intr_callback_unregister
++			(sh->intr_handle_nl, mlx5_dev_interrupt_handler_nl, sh);
++		close(nlsk_fd);
++	}
++	rte_intr_instance_free(sh->intr_handle_nl);
+ #ifdef HAVE_IBV_DEVX_ASYNC
+ 	if (rte_intr_fd_get(sh->intr_handle_devx) >= 0)
+ 		rte_intr_callback_unregister(sh->intr_handle_devx,
+diff --git a/dpdk/drivers/net/mlx5/linux/mlx5_verbs.c b/dpdk/drivers/net/mlx5/linux/mlx5_verbs.c
+index 58556d2bf0..b113731097 100644
+--- a/dpdk/drivers/net/mlx5/linux/mlx5_verbs.c
++++ b/dpdk/drivers/net/mlx5/linux/mlx5_verbs.c
+@@ -94,7 +94,6 @@ mlx5_ibv_modify_qp(struct mlx5_txq_obj *obj, enum mlx5_txq_modify_type type,
+ 		.qp_state = IBV_QPS_RESET,
+ 		.port_num = dev_port,
+ 	};
+-	int attr_mask = (IBV_QP_STATE | IBV_QP_PORT);
+ 	int ret;
+ 
+ 	if (type != MLX5_TXQ_MOD_RST2RDY) {
+@@ -108,10 +107,8 @@ mlx5_ibv_modify_qp(struct mlx5_txq_obj *obj, enum mlx5_txq_modify_type type,
+ 		if (type == MLX5_TXQ_MOD_RDY2RST)
+ 			return 0;
+ 	}
+-	if (type == MLX5_TXQ_MOD_ERR2RDY)
+-		attr_mask = IBV_QP_STATE;
+ 	mod.qp_state = IBV_QPS_INIT;
+-	ret = mlx5_glue->modify_qp(obj->qp, &mod, attr_mask);
++	ret = mlx5_glue->modify_qp(obj->qp, &mod, IBV_QP_STATE | IBV_QP_PORT);
+ 	if (ret) {
+ 		DRV_LOG(ERR, "Cannot change Tx QP state to INIT %s",
+ 			strerror(errno));
+@@ -272,8 +269,8 @@ mlx5_rxq_ibv_wq_create(struct mlx5_rxq_priv *rxq)
+ 
+ 		wq_attr.mlx5.comp_mask |= MLX5DV_WQ_INIT_ATTR_MASK_STRIDING_RQ;
+ 		*mprq_attr = (struct mlx5dv_striding_rq_init_attr){
+-			.single_stride_log_num_of_bytes = rxq_data->strd_sz_n,
+-			.single_wqe_log_num_of_strides = rxq_data->strd_num_n,
++			.single_stride_log_num_of_bytes = rxq_data->log_strd_sz,
++			.single_wqe_log_num_of_strides = rxq_data->log_strd_num,
+ 			.two_byte_shift_en = MLX5_MPRQ_TWO_BYTE_SHIFT,
+ 		};
+ 	}
+diff --git a/dpdk/drivers/net/mlx5/linux/mlx5_vlan_os.c b/dpdk/drivers/net/mlx5/linux/mlx5_vlan_os.c
+index 005904bdfe..7ee2460a23 100644
+--- a/dpdk/drivers/net/mlx5/linux/mlx5_vlan_os.c
++++ b/dpdk/drivers/net/mlx5/linux/mlx5_vlan_os.c
+@@ -136,7 +136,7 @@ mlx5_vlan_vmwa_init(struct rte_eth_dev *dev, uint32_t ifindex)
+ 		return NULL;
+ 	}
+ 	rte_spinlock_init(&vmwa->sl);
+-	vmwa->nl_socket = mlx5_nl_init(NETLINK_ROUTE);
++	vmwa->nl_socket = mlx5_nl_init(NETLINK_ROUTE, 0);
+ 	if (vmwa->nl_socket < 0) {
+ 		DRV_LOG(WARNING,
+ 			"Can not create Netlink socket"
+diff --git a/dpdk/drivers/net/mlx5/mlx5.c b/dpdk/drivers/net/mlx5/mlx5.c
+index aa5f313c1a..2234dc7563 100644
+--- a/dpdk/drivers/net/mlx5/mlx5.c
++++ b/dpdk/drivers/net/mlx5/mlx5.c
+@@ -1172,12 +1172,11 @@ mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn,
+ 	MLX5_ASSERT(spawn->max_port);
+ 	sh = mlx5_malloc(MLX5_MEM_ZERO | MLX5_MEM_RTE,
+ 			 sizeof(struct mlx5_dev_ctx_shared) +
+-			 spawn->max_port *
+-			 sizeof(struct mlx5_dev_shared_port),
++			 spawn->max_port * sizeof(struct mlx5_dev_shared_port),
+ 			 RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
+ 	if (!sh) {
+-		DRV_LOG(ERR, "shared context allocation failure");
+-		rte_errno  = ENOMEM;
++		DRV_LOG(ERR, "Shared context allocation failure.");
++		rte_errno = ENOMEM;
+ 		goto exit;
+ 	}
+ 	pthread_mutex_init(&sh->txpp.mutex, NULL);
+@@ -1199,24 +1198,24 @@ mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn,
+ 	strncpy(sh->ibdev_path, mlx5_os_get_ctx_device_path(sh->cdev->ctx),
+ 		sizeof(sh->ibdev_path) - 1);
+ 	/*
+-	 * Setting port_id to max unallowed value means
+-	 * there is no interrupt subhandler installed for
+-	 * the given port index i.
++	 * Setting port_id to max unallowed value means there is no interrupt
++	 * subhandler installed for the given port index i.
+ 	 */
+ 	for (i = 0; i < sh->max_port; i++) {
+ 		sh->port[i].ih_port_id = RTE_MAX_ETHPORTS;
+ 		sh->port[i].devx_ih_port_id = RTE_MAX_ETHPORTS;
++		sh->port[i].nl_ih_port_id = RTE_MAX_ETHPORTS;
+ 	}
+ 	if (sh->devx) {
+ 		sh->td = mlx5_devx_cmd_create_td(sh->cdev->ctx);
+ 		if (!sh->td) {
+ 			DRV_LOG(ERR, "TD allocation failure");
+-			err = ENOMEM;
++			rte_errno = ENOMEM;
+ 			goto error;
+ 		}
+ 		if (mlx5_setup_tis(sh)) {
+ 			DRV_LOG(ERR, "TIS allocation failure");
+-			err = ENOMEM;
++			rte_errno = ENOMEM;
+ 			goto error;
+ 		}
+ 		err = mlx5_rxtx_uars_prepare(sh);
+@@ -1246,19 +1245,19 @@ mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn,
+ 	pthread_mutex_unlock(&mlx5_dev_ctx_list_mutex);
+ 	return sh;
+ error:
++	err = rte_errno;
+ 	pthread_mutex_destroy(&sh->txpp.mutex);
+ 	pthread_mutex_unlock(&mlx5_dev_ctx_list_mutex);
+ 	MLX5_ASSERT(sh);
+-	if (sh->td)
+-		claim_zero(mlx5_devx_cmd_destroy(sh->td));
++	mlx5_rxtx_uars_release(sh);
+ 	i = 0;
+ 	do {
+ 		if (sh->tis[i])
+ 			claim_zero(mlx5_devx_cmd_destroy(sh->tis[i]));
+ 	} while (++i < (uint32_t)sh->bond.n_port);
+-	mlx5_rxtx_uars_release(sh);
++	if (sh->td)
++		claim_zero(mlx5_devx_cmd_destroy(sh->td));
+ 	mlx5_free(sh);
+-	MLX5_ASSERT(err > 0);
+ 	rte_errno = err;
+ 	return NULL;
+ }
+@@ -1321,6 +1320,8 @@ mlx5_free_shared_dev_ctx(struct mlx5_dev_ctx_shared *sh)
+ 	 *  Only primary process handles async device events.
+ 	 **/
+ 	mlx5_flow_counters_mng_close(sh);
++	if (sh->ct_mng)
++		mlx5_flow_aso_ct_mng_close(sh);
+ 	if (sh->aso_age_mng) {
+ 		mlx5_flow_aso_age_mng_close(sh);
+ 		sh->aso_age_mng = NULL;
+@@ -1594,8 +1595,6 @@ mlx5_dev_close(struct rte_eth_dev *dev)
+ 	if (priv->mreg_cp_tbl)
+ 		mlx5_hlist_destroy(priv->mreg_cp_tbl);
+ 	mlx5_mprq_free_mp(dev);
+-	if (priv->sh->ct_mng)
+-		mlx5_flow_aso_ct_mng_close(priv->sh);
+ 	mlx5_os_free_shared_dr(priv);
+ 	if (priv->rss_conf.rss_key != NULL)
+ 		mlx5_free(priv->rss_conf.rss_key);
+@@ -1642,7 +1641,7 @@ mlx5_dev_close(struct rte_eth_dev *dev)
+ 	/*
+ 	 * Free the shared context in last turn, because the cleanup
+ 	 * routines above may use some shared fields, like
+-	 * mlx5_os_mac_addr_flush() uses ibdev_path for retrieveing
++	 * mlx5_os_mac_addr_flush() uses ibdev_path for retrieving
+ 	 * ifindex if Netlink fails.
+ 	 */
+ 	mlx5_free_shared_dev_ctx(priv->sh);
+@@ -1884,9 +1883,9 @@ mlx5_args_check(const char *key, const char *val, void *opaque)
+ 	} else if (strcmp(MLX5_RX_MPRQ_EN, key) == 0) {
+ 		config->mprq.enabled = !!tmp;
+ 	} else if (strcmp(MLX5_RX_MPRQ_LOG_STRIDE_NUM, key) == 0) {
+-		config->mprq.stride_num_n = tmp;
++		config->mprq.log_stride_num = tmp;
+ 	} else if (strcmp(MLX5_RX_MPRQ_LOG_STRIDE_SIZE, key) == 0) {
+-		config->mprq.stride_size_n = tmp;
++		config->mprq.log_stride_size = tmp;
+ 	} else if (strcmp(MLX5_RX_MPRQ_MAX_MEMCPY_LEN, key) == 0) {
+ 		config->mprq.max_memcpy_len = tmp;
+ 	} else if (strcmp(MLX5_RXQS_MIN_MPRQ, key) == 0) {
+@@ -1962,7 +1961,7 @@ mlx5_args_check(const char *key, const char *val, void *opaque)
+ 		if (tmp != MLX5_RCM_NONE &&
+ 		    tmp != MLX5_RCM_LIGHT &&
+ 		    tmp != MLX5_RCM_AGGR) {
+-			DRV_LOG(ERR, "Unrecognize %s: \"%s\"", key, val);
++			DRV_LOG(ERR, "Unrecognized %s: \"%s\"", key, val);
+ 			rte_errno = EINVAL;
+ 			return -rte_errno;
+ 		}
+@@ -2177,17 +2176,17 @@ mlx5_set_metadata_mask(struct rte_eth_dev *dev)
+ 		break;
+ 	}
+ 	if (sh->dv_mark_mask && sh->dv_mark_mask != mark)
+-		DRV_LOG(WARNING, "metadata MARK mask mismatche %08X:%08X",
++		DRV_LOG(WARNING, "metadata MARK mask mismatch %08X:%08X",
+ 				 sh->dv_mark_mask, mark);
+ 	else
+ 		sh->dv_mark_mask = mark;
+ 	if (sh->dv_meta_mask && sh->dv_meta_mask != meta)
+-		DRV_LOG(WARNING, "metadata META mask mismatche %08X:%08X",
++		DRV_LOG(WARNING, "metadata META mask mismatch %08X:%08X",
+ 				 sh->dv_meta_mask, meta);
+ 	else
+ 		sh->dv_meta_mask = meta;
+ 	if (sh->dv_regc0_mask && sh->dv_regc0_mask != reg_c0)
+-		DRV_LOG(WARNING, "metadata reg_c0 mask mismatche %08X:%08X",
++		DRV_LOG(WARNING, "metadata reg_c0 mask mismatch %08X:%08X",
+ 				 sh->dv_meta_mask, reg_c0);
+ 	else
+ 		sh->dv_regc0_mask = reg_c0;
+diff --git a/dpdk/drivers/net/mlx5/mlx5.h b/dpdk/drivers/net/mlx5/mlx5.h
+index 8466531060..128ebd6937 100644
+--- a/dpdk/drivers/net/mlx5/mlx5.h
++++ b/dpdk/drivers/net/mlx5/mlx5.h
+@@ -275,10 +275,14 @@ struct mlx5_dev_config {
+ 	unsigned int hp_delay_drop:1; /* Enable hairpin Rxq delay drop. */
+ 	struct {
+ 		unsigned int enabled:1; /* Whether MPRQ is enabled. */
+-		unsigned int stride_num_n; /* Number of strides. */
+-		unsigned int stride_size_n; /* Size of a stride. */
+-		unsigned int min_stride_size_n; /* Min size of a stride. */
+-		unsigned int max_stride_size_n; /* Max size of a stride. */
++		unsigned int log_stride_num; /* Log number of strides. */
++		unsigned int log_stride_size; /* Log size of a stride. */
++		unsigned int log_min_stride_size; /* Log min size of a stride.*/
++		unsigned int log_max_stride_size; /* Log max size of a stride.*/
++		unsigned int log_min_stride_num; /* Log min num of strides. */
++		unsigned int log_max_stride_num; /* Log max num of strides. */
++		unsigned int log_min_stride_wqe_size;
++		/* Log min WQE size, (size of single stride)*(num of strides).*/
+ 		unsigned int max_memcpy_len;
+ 		/* Maximum packet size to memcpy Rx packets. */
+ 		unsigned int min_rxqs_num;
+@@ -601,6 +605,7 @@ struct mlx5_age_info {
+ struct mlx5_dev_shared_port {
+ 	uint32_t ih_port_id;
+ 	uint32_t devx_ih_port_id;
++	uint32_t nl_ih_port_id;
+ 	/*
+ 	 * Interrupt handler port_id. Used by shared interrupt
+ 	 * handler to find the corresponding rte_eth device
+@@ -742,6 +747,8 @@ struct mlx5_flow_meter_policy {
+ 	/* If yellow color policy is skipped. */
+ 	uint32_t skip_g:1;
+ 	/* If green color policy is skipped. */
++	uint32_t mark:1;
++	/* If policy contains mark action. */
+ 	rte_spinlock_t sl;
+ 	uint32_t ref_cnt;
+ 	/* Use count. */
+@@ -956,7 +963,6 @@ union mlx5_flow_tbl_key {
+ /* Table structure. */
+ struct mlx5_flow_tbl_resource {
+ 	void *obj; /**< Pointer to DR table object. */
+-	uint32_t refcnt; /**< Reference counter. */
+ };
+ 
+ #define MLX5_MAX_TABLES UINT16_MAX
+@@ -977,7 +983,7 @@ struct mlx5_flow_id_pool {
+ 	uint32_t base_index;
+ 	/**< The next index that can be used without any free elements. */
+ 	uint32_t *curr; /**< Pointer to the index to pop. */
+-	uint32_t *last; /**< Pointer to the last element in the empty arrray. */
++	uint32_t *last; /**< Pointer to the last element in the empty array. */
+ 	uint32_t max_id; /**< Maximum id can be allocated from the pool. */
+ };
+ 
+@@ -1014,7 +1020,7 @@ struct mlx5_dev_txpp {
+ 	void *pp; /* Packet pacing context. */
+ 	uint16_t pp_id; /* Packet pacing context index. */
+ 	uint16_t ts_n; /* Number of captured timestamps. */
+-	uint16_t ts_p; /* Pointer to statisticks timestamp. */
++	uint16_t ts_p; /* Pointer to statistics timestamp. */
+ 	struct mlx5_txpp_ts *tsa; /* Timestamps sliding window stats. */
+ 	struct mlx5_txpp_ts ts; /* Cached completion id/timestamp. */
+ 	uint32_t sync_lost:1; /* ci/timestamp synchronization lost. */
+@@ -1118,7 +1124,7 @@ struct mlx5_flex_parser_devx {
+ 	uint32_t sample_ids[MLX5_GRAPH_NODE_SAMPLE_NUM];
+ };
+ 
+-/* Pattern field dscriptor - how to translate flex pattern into samples. */
++/* Pattern field descriptor - how to translate flex pattern into samples. */
+ __extension__
+ struct mlx5_flex_pattern_field {
+ 	uint16_t width:6;
+@@ -1169,7 +1175,7 @@ struct mlx5_dev_ctx_shared {
+ 	/* Shared DV/DR flow data section. */
+ 	uint32_t dv_meta_mask; /* flow META metadata supported mask. */
+ 	uint32_t dv_mark_mask; /* flow MARK metadata supported mask. */
+-	uint32_t dv_regc0_mask; /* available bits of metatada reg_c[0]. */
++	uint32_t dv_regc0_mask; /* available bits of metadata reg_c[0]. */
+ 	void *fdb_domain; /* FDB Direct Rules name space handle. */
+ 	void *rx_domain; /* RX Direct Rules name space handle. */
+ 	void *tx_domain; /* TX Direct Rules name space handle. */
+@@ -1199,6 +1205,7 @@ struct mlx5_dev_ctx_shared {
+ 	/* Shared interrupt handler section. */
+ 	struct rte_intr_handle *intr_handle; /* Interrupt handler for device. */
+ 	struct rte_intr_handle *intr_handle_devx; /* DEVX interrupt handler. */
++	struct rte_intr_handle *intr_handle_nl; /* Netlink interrupt handler. */
+ 	void *devx_comp; /* DEVX async comp obj. */
+ 	struct mlx5_devx_obj *tis[16]; /* TIS object. */
+ 	struct mlx5_devx_obj *td; /* Transport domain. */
+@@ -1409,6 +1416,7 @@ struct mlx5_priv {
+ 	unsigned int mtr_en:1; /* Whether support meter. */
+ 	unsigned int mtr_reg_share:1; /* Whether support meter REG_C share. */
+ 	unsigned int lb_used:1; /* Loopback queue is referred to. */
++	uint32_t mark_enabled:1; /* If mark action is enabled on rxqs. */
+ 	uint16_t domain_id; /* Switch domain identifier. */
+ 	uint16_t vport_id; /* Associated VF vport index (if any). */
+ 	uint32_t vport_meta_tag; /* Used for vport index match ove VF LAG. */
+@@ -1580,6 +1588,7 @@ int mlx5_dev_set_flow_ctrl(struct rte_eth_dev *dev,
+ 			   struct rte_eth_fc_conf *fc_conf);
+ void mlx5_dev_interrupt_handler(void *arg);
+ void mlx5_dev_interrupt_handler_devx(void *arg);
++void mlx5_dev_interrupt_handler_nl(void *arg);
+ int mlx5_set_link_down(struct rte_eth_dev *dev);
+ int mlx5_set_link_up(struct rte_eth_dev *dev);
+ int mlx5_is_removed(struct rte_eth_dev *dev);
+diff --git a/dpdk/drivers/net/mlx5/mlx5_defs.h b/dpdk/drivers/net/mlx5/mlx5_defs.h
+index 258475ed2c..2d48fde010 100644
+--- a/dpdk/drivers/net/mlx5/mlx5_defs.h
++++ b/dpdk/drivers/net/mlx5/mlx5_defs.h
+@@ -50,7 +50,7 @@
+ #define MLX5_MAX_XSTATS 32
+ 
+ /* Maximum Packet headers size (L2+L3+L4) for TSO. */
+-#define MLX5_MAX_TSO_HEADER (128u + 34u)
++#define MLX5_MAX_TSO_HEADER 192U
+ 
+ /* Inline data size required by NICs. */
+ #define MLX5_INLINE_HSIZE_NONE 0
+@@ -113,10 +113,10 @@
+ #define MLX5_UAR_PAGE_NUM_MASK ((MLX5_UAR_PAGE_NUM_MAX) - 1)
+ 
+ /* Log 2 of the default number of strides per WQE for Multi-Packet RQ. */
+-#define MLX5_MPRQ_STRIDE_NUM_N 6U
++#define MLX5_MPRQ_DEFAULT_LOG_STRIDE_NUM 6U
+ 
+ /* Log 2 of the default size of a stride per WQE for Multi-Packet RQ. */
+-#define MLX5_MPRQ_STRIDE_SIZE_N 11U
++#define MLX5_MPRQ_DEFAULT_LOG_STRIDE_SIZE 11U
+ 
+ /* Two-byte shift is disabled for Multi-Packet RQ. */
+ #define MLX5_MPRQ_TWO_BYTE_SHIFT 0
+diff --git a/dpdk/drivers/net/mlx5/mlx5_devx.c b/dpdk/drivers/net/mlx5/mlx5_devx.c
+index 105c3d67f0..44c439bb55 100644
+--- a/dpdk/drivers/net/mlx5/mlx5_devx.c
++++ b/dpdk/drivers/net/mlx5/mlx5_devx.c
+@@ -257,11 +257,11 @@ mlx5_rxq_create_devx_rq_resources(struct mlx5_rxq_priv *rxq)
+ 		 * 512*2^single_wqe_log_num_of_strides.
+ 		 */
+ 		rq_attr.wq_attr.single_wqe_log_num_of_strides =
+-				rxq_data->strd_num_n -
++				rxq_data->log_strd_num -
+ 				MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES;
+ 		/* Stride size = (2^single_stride_log_num_of_bytes)*64B. */
+ 		rq_attr.wq_attr.single_stride_log_num_of_bytes =
+-				rxq_data->strd_sz_n -
++				rxq_data->log_strd_sz -
+ 				MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES;
+ 		wqe_size = sizeof(struct mlx5_wqe_mprq);
+ 	} else {
+@@ -706,7 +706,7 @@ mlx5_devx_tir_attr_set(struct rte_eth_dev *dev, const uint8_t *rss_key,
+ {
+ 	struct mlx5_priv *priv = dev->data->dev_private;
+ 	enum mlx5_rxq_type rxq_obj_type;
+-	bool lro = true;
++	bool lro = false;
+ 	uint32_t i;
+ 
+ 	/* NULL queues designate drop queue. */
+@@ -715,6 +715,7 @@ mlx5_devx_tir_attr_set(struct rte_eth_dev *dev, const uint8_t *rss_key,
+ 				mlx5_rxq_ctrl_get(dev, ind_tbl->queues[0]);
+ 		rxq_obj_type = rxq_ctrl != NULL ? rxq_ctrl->type :
+ 						  MLX5_RXQ_TYPE_STANDARD;
++		lro = true;
+ 
+ 		/* Enable TIR LRO only if all the queues were configured for. */
+ 		for (i = 0; i < ind_tbl->queues_n; ++i) {
+@@ -768,6 +769,7 @@ mlx5_devx_tir_attr_set(struct rte_eth_dev *dev, const uint8_t *rss_key,
+ 		tir_attr->self_lb_block =
+ 					MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST;
+ 	if (lro) {
++		MLX5_ASSERT(priv->config.lro.supported);
+ 		tir_attr->lro_timeout_period_usecs = priv->config.lro.timeout;
+ 		tir_attr->lro_max_msg_sz = priv->max_lro_msg_size;
+ 		tir_attr->lro_enable_mask =
+@@ -931,6 +933,8 @@ mlx5_rxq_devx_obj_drop_create(struct rte_eth_dev *dev)
+ 		rte_errno = ENOMEM;
+ 		goto error;
+ 	}
++	/* set the CPU socket ID where the rxq_ctrl was allocated */
++	rxq_ctrl->socket = socket_id;
+ 	rxq_obj->rxq_ctrl = rxq_ctrl;
+ 	rxq_ctrl->type = MLX5_RXQ_TYPE_STANDARD;
+ 	rxq_ctrl->sh = priv->sh;
+diff --git a/dpdk/drivers/net/mlx5/mlx5_ethdev.c b/dpdk/drivers/net/mlx5/mlx5_ethdev.c
+index dc647d5580..9c44471c42 100644
+--- a/dpdk/drivers/net/mlx5/mlx5_ethdev.c
++++ b/dpdk/drivers/net/mlx5/mlx5_ethdev.c
+@@ -109,7 +109,7 @@ mlx5_dev_configure(struct rte_eth_dev *dev)
+ 				       MLX5_MEM_RTE | MLX5_MEM_ZERO,
+ 				       sizeof(void *) * rxqs_n, 0,
+ 				       SOCKET_ID_ANY);
+-	if (priv->rxq_privs == NULL) {
++	if (rxqs_n && priv->rxq_privs == NULL) {
+ 		DRV_LOG(ERR, "port %u cannot allocate rxq private data",
+ 			dev->data->port_id);
+ 		rte_errno = ENOMEM;
+diff --git a/dpdk/drivers/net/mlx5/mlx5_flow.c b/dpdk/drivers/net/mlx5/mlx5_flow.c
+index f34e4b88aa..42de516bfd 100644
+--- a/dpdk/drivers/net/mlx5/mlx5_flow.c
++++ b/dpdk/drivers/net/mlx5/mlx5_flow.c
+@@ -18,6 +18,7 @@
+ #include <rte_flow_driver.h>
+ #include <rte_malloc.h>
+ #include <rte_ip.h>
++#include <rte_bus_pci.h>
+ 
+ #include <mlx5_glue.h>
+ #include <mlx5_devx_cmds.h>
+@@ -148,6 +149,8 @@ mlx5_flow_is_rss_expandable_item(const struct rte_flow_item *item)
+ 	case RTE_FLOW_ITEM_TYPE_IPV6:
+ 	case RTE_FLOW_ITEM_TYPE_UDP:
+ 	case RTE_FLOW_ITEM_TYPE_TCP:
++	case RTE_FLOW_ITEM_TYPE_ICMP:
++	case RTE_FLOW_ITEM_TYPE_ICMP6:
+ 	case RTE_FLOW_ITEM_TYPE_VXLAN:
+ 	case RTE_FLOW_ITEM_TYPE_NVGRE:
+ 	case RTE_FLOW_ITEM_TYPE_GRE:
+@@ -164,128 +167,152 @@ mlx5_flow_is_rss_expandable_item(const struct rte_flow_item *item)
+ 	return false;
+ }
+ 
++/**
++ * Network Service Header (NSH) and its next protocol values
++ * are described in RFC-8393.
++ */
++static enum rte_flow_item_type
++mlx5_nsh_proto_to_item_type(uint8_t proto_spec, uint8_t proto_mask)
++{
++	enum rte_flow_item_type type;
++
++	switch (proto_mask & proto_spec) {
++	case 0:
++		type = RTE_FLOW_ITEM_TYPE_VOID;
++		break;
++	case RTE_VXLAN_GPE_TYPE_IPV4:
++		type = RTE_FLOW_ITEM_TYPE_IPV4;
++		break;
++	case RTE_VXLAN_GPE_TYPE_IPV6:
++		type = RTE_VXLAN_GPE_TYPE_IPV6;
++		break;
++	case RTE_VXLAN_GPE_TYPE_ETH:
++		type = RTE_FLOW_ITEM_TYPE_ETH;
++		break;
++	default:
++		type = RTE_FLOW_ITEM_TYPE_END;
++	}
++	return type;
++}
++
++static enum rte_flow_item_type
++mlx5_inet_proto_to_item_type(uint8_t proto_spec, uint8_t proto_mask)
++{
++	enum rte_flow_item_type type;
++
++	switch (proto_mask & proto_spec) {
++	case 0:
++		type = RTE_FLOW_ITEM_TYPE_VOID;
++		break;
++	case IPPROTO_UDP:
++		type = RTE_FLOW_ITEM_TYPE_UDP;
++		break;
++	case IPPROTO_TCP:
++		type = RTE_FLOW_ITEM_TYPE_TCP;
++		break;
++	case IPPROTO_IPIP:
++		type = RTE_FLOW_ITEM_TYPE_IPV4;
++		break;
++	case IPPROTO_IPV6:
++		type = RTE_FLOW_ITEM_TYPE_IPV6;
++		break;
++	default:
++		type = RTE_FLOW_ITEM_TYPE_END;
++	}
++	return type;
++}
++
++static enum rte_flow_item_type
++mlx5_ethertype_to_item_type(rte_be16_t type_spec,
++			    rte_be16_t type_mask, bool is_tunnel)
++{
++	enum rte_flow_item_type type;
++
++	switch (rte_be_to_cpu_16(type_spec & type_mask)) {
++	case 0:
++		type = RTE_FLOW_ITEM_TYPE_VOID;
++		break;
++	case RTE_ETHER_TYPE_TEB:
++		type = is_tunnel ?
++		       RTE_FLOW_ITEM_TYPE_ETH : RTE_FLOW_ITEM_TYPE_END;
++		break;
++	case RTE_ETHER_TYPE_VLAN:
++		type = !is_tunnel ?
++		       RTE_FLOW_ITEM_TYPE_VLAN : RTE_FLOW_ITEM_TYPE_END;
++		break;
++	case RTE_ETHER_TYPE_IPV4:
++		type = RTE_FLOW_ITEM_TYPE_IPV4;
++		break;
++	case RTE_ETHER_TYPE_IPV6:
++		type = RTE_FLOW_ITEM_TYPE_IPV6;
++		break;
++	default:
++		type = RTE_FLOW_ITEM_TYPE_END;
++	}
++	return type;
++}
++
+ static enum rte_flow_item_type
+ mlx5_flow_expand_rss_item_complete(const struct rte_flow_item *item)
+ {
+-	enum rte_flow_item_type ret = RTE_FLOW_ITEM_TYPE_VOID;
+-	uint16_t ether_type = 0;
+-	uint16_t ether_type_m;
+-	uint8_t ip_next_proto = 0;
+-	uint8_t ip_next_proto_m;
++#define MLX5_XSET_ITEM_MASK_SPEC(type, fld)                              \
++	do {                                                             \
++		const void *m = item->mask;                              \
++		const void *s = item->spec;                              \
++		mask = m ?                                               \
++			((const struct rte_flow_item_##type *)m)->fld :  \
++			rte_flow_item_##type##_mask.fld;                 \
++		spec = ((const struct rte_flow_item_##type *)s)->fld;    \
++	} while (0)
++
++	enum rte_flow_item_type ret;
++	uint16_t spec, mask;
+ 
+ 	if (item == NULL || item->spec == NULL)
+-		return ret;
++		return RTE_FLOW_ITEM_TYPE_VOID;
+ 	switch (item->type) {
+ 	case RTE_FLOW_ITEM_TYPE_ETH:
+-		if (item->mask)
+-			ether_type_m = ((const struct rte_flow_item_eth *)
+-						(item->mask))->type;
+-		else
+-			ether_type_m = rte_flow_item_eth_mask.type;
+-		if (ether_type_m != RTE_BE16(0xFFFF))
+-			break;
+-		ether_type = ((const struct rte_flow_item_eth *)
+-				(item->spec))->type;
+-		if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_IPV4)
+-			ret = RTE_FLOW_ITEM_TYPE_IPV4;
+-		else if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_IPV6)
+-			ret = RTE_FLOW_ITEM_TYPE_IPV6;
+-		else if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_VLAN)
+-			ret = RTE_FLOW_ITEM_TYPE_VLAN;
+-		else
+-			ret = RTE_FLOW_ITEM_TYPE_END;
++		MLX5_XSET_ITEM_MASK_SPEC(eth, type);
++		if (!mask)
++			return RTE_FLOW_ITEM_TYPE_VOID;
++		ret = mlx5_ethertype_to_item_type(spec, mask, false);
+ 		break;
+ 	case RTE_FLOW_ITEM_TYPE_VLAN:
+-		if (item->mask)
+-			ether_type_m = ((const struct rte_flow_item_vlan *)
+-						(item->mask))->inner_type;
+-		else
+-			ether_type_m = rte_flow_item_vlan_mask.inner_type;
+-		if (ether_type_m != RTE_BE16(0xFFFF))
+-			break;
+-		ether_type = ((const struct rte_flow_item_vlan *)
+-				(item->spec))->inner_type;
+-		if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_IPV4)
+-			ret = RTE_FLOW_ITEM_TYPE_IPV4;
+-		else if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_IPV6)
+-			ret = RTE_FLOW_ITEM_TYPE_IPV6;
+-		else if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_VLAN)
+-			ret = RTE_FLOW_ITEM_TYPE_VLAN;
+-		else
+-			ret = RTE_FLOW_ITEM_TYPE_END;
++		MLX5_XSET_ITEM_MASK_SPEC(vlan, inner_type);
++		if (!mask)
++			return RTE_FLOW_ITEM_TYPE_VOID;
++		ret = mlx5_ethertype_to_item_type(spec, mask, false);
+ 		break;
+ 	case RTE_FLOW_ITEM_TYPE_IPV4:
+-		if (item->mask)
+-			ip_next_proto_m = ((const struct rte_flow_item_ipv4 *)
+-					(item->mask))->hdr.next_proto_id;
+-		else
+-			ip_next_proto_m =
+-				rte_flow_item_ipv4_mask.hdr.next_proto_id;
+-		if (ip_next_proto_m != 0xFF)
+-			break;
+-		ip_next_proto = ((const struct rte_flow_item_ipv4 *)
+-				(item->spec))->hdr.next_proto_id;
+-		if (ip_next_proto == IPPROTO_UDP)
+-			ret = RTE_FLOW_ITEM_TYPE_UDP;
+-		else if (ip_next_proto == IPPROTO_TCP)
+-			ret = RTE_FLOW_ITEM_TYPE_TCP;
+-		else if (ip_next_proto == IPPROTO_IP)
+-			ret = RTE_FLOW_ITEM_TYPE_IPV4;
+-		else if (ip_next_proto == IPPROTO_IPV6)
+-			ret = RTE_FLOW_ITEM_TYPE_IPV6;
+-		else
+-			ret = RTE_FLOW_ITEM_TYPE_END;
++		MLX5_XSET_ITEM_MASK_SPEC(ipv4, hdr.next_proto_id);
++		if (!mask)
++			return RTE_FLOW_ITEM_TYPE_VOID;
++		ret = mlx5_inet_proto_to_item_type(spec, mask);
+ 		break;
+ 	case RTE_FLOW_ITEM_TYPE_IPV6:
+-		if (item->mask)
+-			ip_next_proto_m = ((const struct rte_flow_item_ipv6 *)
+-						(item->mask))->hdr.proto;
+-		else
+-			ip_next_proto_m =
+-				rte_flow_item_ipv6_mask.hdr.proto;
+-		if (ip_next_proto_m != 0xFF)
+-			break;
+-		ip_next_proto = ((const struct rte_flow_item_ipv6 *)
+-				(item->spec))->hdr.proto;
+-		if (ip_next_proto == IPPROTO_UDP)
+-			ret = RTE_FLOW_ITEM_TYPE_UDP;
+-		else if (ip_next_proto == IPPROTO_TCP)
+-			ret = RTE_FLOW_ITEM_TYPE_TCP;
+-		else if (ip_next_proto == IPPROTO_IP)
+-			ret = RTE_FLOW_ITEM_TYPE_IPV4;
+-		else if (ip_next_proto == IPPROTO_IPV6)
+-			ret = RTE_FLOW_ITEM_TYPE_IPV6;
+-		else
+-			ret = RTE_FLOW_ITEM_TYPE_END;
++		MLX5_XSET_ITEM_MASK_SPEC(ipv6, hdr.proto);
++		if (!mask)
++			return RTE_FLOW_ITEM_TYPE_VOID;
++		ret = mlx5_inet_proto_to_item_type(spec, mask);
+ 		break;
+ 	case RTE_FLOW_ITEM_TYPE_GENEVE:
+-		ether_type_m = item->mask ?
+-			       ((const struct rte_flow_item_geneve *)
+-			       (item->mask))->protocol :
+-			       rte_flow_item_geneve_mask.protocol;
+-		ether_type = ((const struct rte_flow_item_geneve *)
+-			     (item->spec))->protocol;
+-		ether_type_m = rte_be_to_cpu_16(ether_type_m);
+-		ether_type = rte_be_to_cpu_16(ether_type);
+-		switch (ether_type_m & ether_type) {
+-		case RTE_ETHER_TYPE_TEB:
+-			ret = RTE_FLOW_ITEM_TYPE_ETH;
+-			break;
+-		case RTE_ETHER_TYPE_IPV4:
+-			ret = RTE_FLOW_ITEM_TYPE_IPV4;
+-			break;
+-		case RTE_ETHER_TYPE_IPV6:
+-			ret = RTE_FLOW_ITEM_TYPE_IPV6;
+-			break;
+-		default:
+-			ret = RTE_FLOW_ITEM_TYPE_END;
+-		}
++		MLX5_XSET_ITEM_MASK_SPEC(geneve, protocol);
++		ret = mlx5_ethertype_to_item_type(spec, mask, true);
++		break;
++	case RTE_FLOW_ITEM_TYPE_GRE:
++		MLX5_XSET_ITEM_MASK_SPEC(gre, protocol);
++		ret = mlx5_ethertype_to_item_type(spec, mask, true);
++		break;
++	case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
++		MLX5_XSET_ITEM_MASK_SPEC(vxlan_gpe, protocol);
++		ret = mlx5_nsh_proto_to_item_type(spec, mask);
+ 		break;
+ 	default:
+ 		ret = RTE_FLOW_ITEM_TYPE_VOID;
+ 		break;
+ 	}
+ 	return ret;
++#undef MLX5_XSET_ITEM_MASK_SPEC
+ }
+ 
+ static const int *
+@@ -533,9 +560,11 @@ enum mlx5_expansion {
+ 	MLX5_EXPANSION_OUTER_IPV4,
+ 	MLX5_EXPANSION_OUTER_IPV4_UDP,
+ 	MLX5_EXPANSION_OUTER_IPV4_TCP,
++	MLX5_EXPANSION_OUTER_IPV4_ICMP,
+ 	MLX5_EXPANSION_OUTER_IPV6,
+ 	MLX5_EXPANSION_OUTER_IPV6_UDP,
+ 	MLX5_EXPANSION_OUTER_IPV6_TCP,
++	MLX5_EXPANSION_OUTER_IPV6_ICMP6,
+ 	MLX5_EXPANSION_VXLAN,
+ 	MLX5_EXPANSION_STD_VXLAN,
+ 	MLX5_EXPANSION_L3_VXLAN,
+@@ -549,9 +578,11 @@ enum mlx5_expansion {
+ 	MLX5_EXPANSION_IPV4,
+ 	MLX5_EXPANSION_IPV4_UDP,
+ 	MLX5_EXPANSION_IPV4_TCP,
++	MLX5_EXPANSION_IPV4_ICMP,
+ 	MLX5_EXPANSION_IPV6,
+ 	MLX5_EXPANSION_IPV6_UDP,
+ 	MLX5_EXPANSION_IPV6_TCP,
++	MLX5_EXPANSION_IPV6_ICMP6,
+ 	MLX5_EXPANSION_IPV6_FRAG_EXT,
+ 	MLX5_EXPANSION_GTP,
+ 	MLX5_EXPANSION_GENEVE,
+@@ -586,6 +617,7 @@ static const struct mlx5_flow_expand_node mlx5_support_expansion[] = {
+ 		.next = MLX5_FLOW_EXPAND_RSS_NEXT
+ 			(MLX5_EXPANSION_OUTER_IPV4_UDP,
+ 			 MLX5_EXPANSION_OUTER_IPV4_TCP,
++			 MLX5_EXPANSION_OUTER_IPV4_ICMP,
+ 			 MLX5_EXPANSION_GRE,
+ 			 MLX5_EXPANSION_NVGRE,
+ 			 MLX5_EXPANSION_IPV4,
+@@ -607,10 +639,14 @@ static const struct mlx5_flow_expand_node mlx5_support_expansion[] = {
+ 		.type = RTE_FLOW_ITEM_TYPE_TCP,
+ 		.rss_types = RTE_ETH_RSS_NONFRAG_IPV4_TCP,
+ 	},
++	[MLX5_EXPANSION_OUTER_IPV4_ICMP] = {
++		.type = RTE_FLOW_ITEM_TYPE_ICMP,
++	},
+ 	[MLX5_EXPANSION_OUTER_IPV6] = {
+ 		.next = MLX5_FLOW_EXPAND_RSS_NEXT
+ 			(MLX5_EXPANSION_OUTER_IPV6_UDP,
+ 			 MLX5_EXPANSION_OUTER_IPV6_TCP,
++			 MLX5_EXPANSION_OUTER_IPV6_ICMP6,
+ 			 MLX5_EXPANSION_IPV4,
+ 			 MLX5_EXPANSION_IPV6,
+ 			 MLX5_EXPANSION_GRE,
+@@ -632,6 +668,9 @@ static const struct mlx5_flow_expand_node mlx5_support_expansion[] = {
+ 		.type = RTE_FLOW_ITEM_TYPE_TCP,
+ 		.rss_types = RTE_ETH_RSS_NONFRAG_IPV6_TCP,
+ 	},
++	[MLX5_EXPANSION_OUTER_IPV6_ICMP6] = {
++		.type = RTE_FLOW_ITEM_TYPE_ICMP6,
++	},
+ 	[MLX5_EXPANSION_VXLAN] = {
+ 		.next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH,
+ 						  MLX5_EXPANSION_IPV4,
+@@ -691,7 +730,8 @@ static const struct mlx5_flow_expand_node mlx5_support_expansion[] = {
+ 	},
+ 	[MLX5_EXPANSION_IPV4] = {
+ 		.next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4_UDP,
+-						  MLX5_EXPANSION_IPV4_TCP),
++						  MLX5_EXPANSION_IPV4_TCP,
++						  MLX5_EXPANSION_IPV4_ICMP),
+ 		.type = RTE_FLOW_ITEM_TYPE_IPV4,
+ 		.rss_types = RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 |
+ 			RTE_ETH_RSS_NONFRAG_IPV4_OTHER,
+@@ -704,9 +744,13 @@ static const struct mlx5_flow_expand_node mlx5_support_expansion[] = {
+ 		.type = RTE_FLOW_ITEM_TYPE_TCP,
+ 		.rss_types = RTE_ETH_RSS_NONFRAG_IPV4_TCP,
+ 	},
++	[MLX5_EXPANSION_IPV4_ICMP] = {
++		.type = RTE_FLOW_ITEM_TYPE_ICMP,
++	},
+ 	[MLX5_EXPANSION_IPV6] = {
+ 		.next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV6_UDP,
+ 						  MLX5_EXPANSION_IPV6_TCP,
++						  MLX5_EXPANSION_IPV6_ICMP6,
+ 						  MLX5_EXPANSION_IPV6_FRAG_EXT),
+ 		.type = RTE_FLOW_ITEM_TYPE_IPV6,
+ 		.rss_types = RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6 |
+@@ -723,6 +767,9 @@ static const struct mlx5_flow_expand_node mlx5_support_expansion[] = {
+ 	[MLX5_EXPANSION_IPV6_FRAG_EXT] = {
+ 		.type = RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT,
+ 	},
++	[MLX5_EXPANSION_IPV6_ICMP6] = {
++		.type = RTE_FLOW_ITEM_TYPE_ICMP6,
++	},
+ 	[MLX5_EXPANSION_GTP] = {
+ 		.next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4,
+ 						  MLX5_EXPANSION_IPV6),
+@@ -1206,7 +1253,7 @@ flow_rxq_tunnel_ptype_update(struct mlx5_rxq_ctrl *rxq_ctrl)
+ }
+ 
+ /**
+- * Set the Rx queue flags (Mark/Flag and Tunnel Ptypes) according to the devive
++ * Set the Rx queue flags (Mark/Flag and Tunnel Ptypes) according to the device
+  * flow.
+  *
+  * @param[in] dev
+@@ -1219,7 +1266,6 @@ flow_drv_rxq_flags_set(struct rte_eth_dev *dev,
+ 		       struct mlx5_flow_handle *dev_handle)
+ {
+ 	struct mlx5_priv *priv = dev->data->dev_private;
+-	const int mark = dev_handle->mark;
+ 	const int tunnel = !!(dev_handle->layers & MLX5_FLOW_LAYER_TUNNEL);
+ 	struct mlx5_ind_table_obj *ind_tbl = NULL;
+ 	unsigned int i;
+@@ -1254,15 +1300,6 @@ flow_drv_rxq_flags_set(struct rte_eth_dev *dev,
+ 		 * this must be always enabled (metadata may arive
+ 		 * from other port - not from local flows only.
+ 		 */
+-		if (priv->config.dv_flow_en &&
+-		    priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
+-		    mlx5_flow_ext_mreg_supported(dev)) {
+-			rxq_ctrl->rxq.mark = 1;
+-			rxq_ctrl->flow_mark_n = 1;
+-		} else if (mark) {
+-			rxq_ctrl->rxq.mark = 1;
+-			rxq_ctrl->flow_mark_n++;
+-		}
+ 		if (tunnel) {
+ 			unsigned int j;
+ 
+@@ -1280,6 +1317,20 @@ flow_drv_rxq_flags_set(struct rte_eth_dev *dev,
+ 	}
+ }
+ 
++static void
++flow_rxq_mark_flag_set(struct rte_eth_dev *dev)
++{
++	struct mlx5_priv *priv = dev->data->dev_private;
++	struct mlx5_rxq_ctrl *rxq_ctrl;
++
++	if (priv->mark_enabled)
++		return;
++	LIST_FOREACH(rxq_ctrl, &priv->rxqsctrl, next) {
++		rxq_ctrl->rxq.mark = 1;
++	}
++	priv->mark_enabled = 1;
++}
++
+ /**
+  * Set the Rx queue flags (Mark/Flag and Tunnel Ptypes) for a flow
+  *
+@@ -1294,7 +1345,11 @@ flow_rxq_flags_set(struct rte_eth_dev *dev, struct rte_flow *flow)
+ 	struct mlx5_priv *priv = dev->data->dev_private;
+ 	uint32_t handle_idx;
+ 	struct mlx5_flow_handle *dev_handle;
++	struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
+ 
++	MLX5_ASSERT(wks);
++	if (wks->mark)
++		flow_rxq_mark_flag_set(dev);
+ 	SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
+ 		       handle_idx, dev_handle, next)
+ 		flow_drv_rxq_flags_set(dev, dev_handle);
+@@ -1314,7 +1369,6 @@ flow_drv_rxq_flags_trim(struct rte_eth_dev *dev,
+ 			struct mlx5_flow_handle *dev_handle)
+ {
+ 	struct mlx5_priv *priv = dev->data->dev_private;
+-	const int mark = dev_handle->mark;
+ 	const int tunnel = !!(dev_handle->layers & MLX5_FLOW_LAYER_TUNNEL);
+ 	struct mlx5_ind_table_obj *ind_tbl = NULL;
+ 	unsigned int i;
+@@ -1345,15 +1399,6 @@ flow_drv_rxq_flags_trim(struct rte_eth_dev *dev,
+ 		MLX5_ASSERT(rxq_ctrl != NULL);
+ 		if (rxq_ctrl == NULL)
+ 			continue;
+-		if (priv->config.dv_flow_en &&
+-		    priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
+-		    mlx5_flow_ext_mreg_supported(dev)) {
+-			rxq_ctrl->rxq.mark = 1;
+-			rxq_ctrl->flow_mark_n = 1;
+-		} else if (mark) {
+-			rxq_ctrl->flow_mark_n--;
+-			rxq_ctrl->rxq.mark = !!rxq_ctrl->flow_mark_n;
+-		}
+ 		if (tunnel) {
+ 			unsigned int j;
+ 
+@@ -1410,12 +1455,12 @@ flow_rxq_flags_clear(struct rte_eth_dev *dev)
+ 
+ 		if (rxq == NULL || rxq->ctrl == NULL)
+ 			continue;
+-		rxq->ctrl->flow_mark_n = 0;
+ 		rxq->ctrl->rxq.mark = 0;
+ 		for (j = 0; j != MLX5_FLOW_TUNNEL; ++j)
+ 			rxq->ctrl->flow_tunnels_n[j] = 0;
+ 		rxq->ctrl->rxq.tunnel = 0;
+ 	}
++	priv->mark_enabled = 0;
+ }
+ 
+ /**
+@@ -3008,7 +3053,7 @@ mlx5_flow_validate_item_geneve_opt(const struct rte_flow_item *item,
+ 	if ((uint32_t)spec->option_len > MLX5_GENEVE_OPTLEN_MASK)
+ 		return rte_flow_error_set
+ 			(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
+-			"Geneve TLV opt length exceeeds the limit (31)");
++			"Geneve TLV opt length exceeds the limit (31)");
+ 	/* Check if class type and length masks are full. */
+ 	if (full_mask.option_class != mask->option_class ||
+ 	    full_mask.option_type != mask->option_type ||
+@@ -3957,7 +4002,7 @@ find_graph_root(uint32_t rss_level)
+  *  subflow.
+  *
+  * @param[in] dev_flow
+- *   Pointer the created preifx subflow.
++ *   Pointer the created prefix subflow.
+  *
+  * @return
+  *   The layers get from prefix subflow.
+@@ -4284,7 +4329,7 @@ flow_dv_mreg_create_cb(void *tool_ctx, void *cb_ctx)
+ 		[3] = { .type = RTE_FLOW_ACTION_TYPE_END, },
+ 	};
+ 
+-	/* Fill the register fileds in the flow. */
++	/* Fill the register fields in the flow. */
+ 	ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
+ 	if (ret < 0)
+ 		return NULL;
+@@ -4353,7 +4398,7 @@ flow_dv_mreg_create_cb(void *tool_ctx, void *cb_ctx)
+ 	/*
+ 	 * The copy Flows are not included in any list. There
+ 	 * ones are referenced from other Flows and can not
+-	 * be applied, removed, deleted in ardbitrary order
++	 * be applied, removed, deleted in arbitrary order
+ 	 * by list traversing.
+ 	 */
+ 	mcp_res->rix_flow = flow_list_create(dev, MLX5_FLOW_TYPE_MCP,
+@@ -4796,6 +4841,7 @@ flow_create_split_inner(struct rte_eth_dev *dev,
+ 			struct rte_flow_error *error)
+ {
+ 	struct mlx5_flow *dev_flow;
++	struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
+ 
+ 	dev_flow = flow_drv_prepare(dev, flow, attr, items, actions,
+ 				    flow_split_info->flow_idx, error);
+@@ -4810,12 +4856,14 @@ flow_create_split_inner(struct rte_eth_dev *dev,
+ 	/*
+ 	 * If dev_flow is as one of the suffix flow, some actions in suffix
+ 	 * flow may need some user defined item layer flags, and pass the
+-	 * Metadate rxq mark flag to suffix flow as well.
++	 * Metadata rxq mark flag to suffix flow as well.
+ 	 */
+ 	if (flow_split_info->prefix_layers)
+ 		dev_flow->handle->layers = flow_split_info->prefix_layers;
+-	if (flow_split_info->prefix_mark)
+-		dev_flow->handle->mark = 1;
++	if (flow_split_info->prefix_mark) {
++		MLX5_ASSERT(wks);
++		wks->mark = 1;
++	}
+ 	if (sub_flow)
+ 		*sub_flow = dev_flow;
+ #ifdef HAVE_IBV_FLOW_DV_SUPPORT
+@@ -5006,9 +5054,10 @@ flow_meter_split_prep(struct rte_eth_dev *dev,
+ 	uint32_t tag_id = 0;
+ 	struct rte_flow_item *vlan_item_dst = NULL;
+ 	const struct rte_flow_item *vlan_item_src = NULL;
++	const struct rte_flow_item *orig_items = items;
+ 	struct rte_flow_action *hw_mtr_action;
+ 	struct rte_flow_action *action_pre_head = NULL;
+-	int32_t flow_src_port = priv->representor_id;
++	uint16_t flow_src_port = priv->representor_id;
+ 	bool mtr_first;
+ 	uint8_t mtr_id_offset = priv->mtr_reg_share ? MLX5_MTR_COLOR_BITS : 0;
+ 	uint8_t mtr_reg_bits = priv->mtr_reg_share ?
+@@ -5016,27 +5065,18 @@ flow_meter_split_prep(struct rte_eth_dev *dev,
+ 	uint32_t flow_id = 0;
+ 	uint32_t flow_id_reversed = 0;
+ 	uint8_t flow_id_bits = 0;
++	bool after_meter = false;
+ 	int shift;
+ 
+ 	/* Prepare the suffix subflow items. */
+ 	tag_item = sfx_items++;
+ 	for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
+-		struct mlx5_priv *port_priv;
+-		const struct rte_flow_item_port_id *pid_v;
+ 		int item_type = items->type;
+ 
+ 		switch (item_type) {
+ 		case RTE_FLOW_ITEM_TYPE_PORT_ID:
+-			pid_v = items->spec;
+-			MLX5_ASSERT(pid_v);
+-			port_priv = mlx5_port_to_eswitch_info(pid_v->id, false);
+-			if (!port_priv)
+-				return rte_flow_error_set(error,
+-						rte_errno,
+-						RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
+-						pid_v,
+-						"Failed to get port info.");
+-			flow_src_port = port_priv->representor_id;
++			if (mlx5_flow_get_item_vport_id(dev, items, &flow_src_port, error))
++				return -rte_errno;
+ 			if (!fm->def_policy && wks->policy->is_hierarchy &&
+ 			    flow_src_port != priv->representor_id) {
+ 				if (flow_drv_mtr_hierarchy_rule_create(dev,
+@@ -5082,6 +5122,7 @@ flow_meter_split_prep(struct rte_eth_dev *dev,
+ 				tag_action = actions_pre++;
+ 				action_cur = actions_pre++;
+ 			}
++			after_meter = true;
+ 			break;
+ 		case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
+ 		case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
+@@ -5110,6 +5151,11 @@ flow_meter_split_prep(struct rte_eth_dev *dev,
+ 						MLX5_RTE_FLOW_ITEM_TYPE_VLAN;
+ 			}
+ 			break;
++		case RTE_FLOW_ACTION_TYPE_COUNT:
++			if (fm->def_policy)
++				action_cur = after_meter ?
++						actions_sfx++ : actions_pre++;
++			break;
+ 		default:
+ 			break;
+ 		}
+@@ -5130,7 +5176,8 @@ flow_meter_split_prep(struct rte_eth_dev *dev,
+ 
+ 		if (!fm->def_policy) {
+ 			sub_policy = get_meter_sub_policy(dev, flow, wks,
+-							  attr, items, error);
++							  attr, orig_items,
++							  error);
+ 			if (!sub_policy)
+ 				return -rte_errno;
+ 		} else {
+@@ -5359,7 +5406,7 @@ flow_mreg_split_qrss_prep(struct rte_eth_dev *dev,
+  * @param[out] error
+  *   Perform verbose error reporting if not NULL.
+  * @param[in] encap_idx
+- *   The encap action inndex.
++ *   The encap action index.
+  *
+  * @return
+  *   0 on success, negative value otherwise
+@@ -5527,7 +5574,7 @@ flow_check_match_action(const struct rte_flow_action actions[],
+ 	return flag ? actions_n + 1 : 0;
+ }
+ 
+-#define SAMPLE_SUFFIX_ITEM 2
++#define SAMPLE_SUFFIX_ITEM 3
+ 
+ /**
+  * Split the sample flow.
+@@ -5568,6 +5615,7 @@ flow_check_match_action(const struct rte_flow_action actions[],
+ static int
+ flow_sample_split_prep(struct rte_eth_dev *dev,
+ 		       int add_tag,
++		       const struct rte_flow_item items[],
+ 		       struct rte_flow_item sfx_items[],
+ 		       const struct rte_flow_action actions[],
+ 		       struct rte_flow_action actions_sfx[],
+@@ -5584,8 +5632,9 @@ flow_sample_split_prep(struct rte_eth_dev *dev,
+ 	struct mlx5_rte_flow_item_tag *tag_mask;
+ 	struct rte_flow_action_jump *jump_action;
+ 	uint32_t tag_id = 0;
+-	int index;
+ 	int append_index = 0;
++	int set_tag_idx = -1;
++	int index;
+ 	int ret;
+ 
+ 	if (sample_action_pos < 0)
+@@ -5594,6 +5643,52 @@ flow_sample_split_prep(struct rte_eth_dev *dev,
+ 					  NULL, "invalid position of sample "
+ 					  "action in list");
+ 	/* Prepare the actions for prefix and suffix flow. */
++	if (add_tag) {
++		/* Update the new added tag action index preceding
++		 * the PUSH_VLAN or ENCAP action.
++		 */
++		const struct rte_flow_action_raw_encap *raw_encap;
++		const struct rte_flow_action *action = actions;
++		int encap_idx;
++		int action_idx = 0;
++		int raw_decap_idx = -1;
++		int push_vlan_idx = -1;
++		for (; action->type != RTE_FLOW_ACTION_TYPE_END; action++) {
++			switch (action->type) {
++			case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
++				raw_decap_idx = action_idx;
++				break;
++			case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
++				raw_encap = action->conf;
++				if (raw_encap->size >
++					MLX5_ENCAPSULATION_DECISION_SIZE) {
++					encap_idx = raw_decap_idx != -1 ?
++						    raw_decap_idx : action_idx;
++					if (encap_idx < sample_action_pos &&
++					    push_vlan_idx == -1)
++						set_tag_idx = encap_idx;
++				}
++				break;
++			case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
++			case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
++				encap_idx = action_idx;
++				if (encap_idx < sample_action_pos &&
++				    push_vlan_idx == -1)
++					set_tag_idx = encap_idx;
++				break;
++			case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
++			case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
++				push_vlan_idx = action_idx;
++				if (push_vlan_idx < sample_action_pos)
++					set_tag_idx = action_idx;
++				break;
++			default:
++				break;
++			}
++			action_idx++;
++		}
++	}
++	/* Prepare the actions for prefix and suffix flow. */
+ 	if (qrss_action_pos >= 0 && qrss_action_pos < sample_action_pos) {
+ 		index = qrss_action_pos;
+ 		/* Put the preceding the Queue/RSS action into prefix flow. */
+@@ -5610,6 +5705,14 @@ flow_sample_split_prep(struct rte_eth_dev *dev,
+ 		memcpy(actions_sfx, actions + qrss_action_pos,
+ 		       sizeof(struct rte_flow_action));
+ 		actions_sfx++;
++	} else if (add_tag && set_tag_idx >= 0) {
++		if (set_tag_idx > 0)
++			memcpy(actions_pre, actions,
++			       sizeof(struct rte_flow_action) * set_tag_idx);
++		memcpy(actions_pre + set_tag_idx + 1, actions + set_tag_idx,
++		       sizeof(struct rte_flow_action) *
++		       (sample_action_pos - set_tag_idx));
++		index = sample_action_pos;
+ 	} else {
+ 		index = sample_action_pos;
+ 		if (index != 0)
+@@ -5625,6 +5728,12 @@ flow_sample_split_prep(struct rte_eth_dev *dev,
+ 		append_index++;
+ 		set_tag = (void *)(actions_pre + actions_n + append_index);
+ 		ret = mlx5_flow_get_reg_id(dev, MLX5_SAMPLE_ID, 0, error);
++		/* Trust VF/SF on CX5 not supported meter so that the reserved
++		 * metadata regC is REG_NON, back to use application tag
++		 * index 0.
++		 */
++		if (unlikely(ret == REG_NON))
++			ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, 0, error);
+ 		if (ret < 0)
+ 			return ret;
+ 		mlx5_ipool_malloc(priv->sh->ipool
+@@ -5634,6 +5743,12 @@ flow_sample_split_prep(struct rte_eth_dev *dev,
+ 			.data = tag_id,
+ 		};
+ 		/* Prepare the suffix subflow items. */
++		for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
++			if (items->type == RTE_FLOW_ITEM_TYPE_PORT_ID) {
++				memcpy(sfx_items, items, sizeof(*sfx_items));
++				sfx_items++;
++			}
++		}
+ 		tag_spec = (void *)(sfx_items + SAMPLE_SUFFIX_ITEM);
+ 		tag_spec->data = tag_id;
+ 		tag_spec->id = set_tag->id;
+@@ -5651,13 +5766,17 @@ flow_sample_split_prep(struct rte_eth_dev *dev,
+ 				RTE_FLOW_ITEM_TYPE_END,
+ 		};
+ 		/* Prepare the tag action in prefix subflow. */
+-		actions_pre[index++] =
++		set_tag_idx = (set_tag_idx == -1) ? index : set_tag_idx;
++		actions_pre[set_tag_idx] =
+ 			(struct rte_flow_action){
+ 			.type = (enum rte_flow_action_type)
+ 				MLX5_RTE_FLOW_ACTION_TYPE_TAG,
+ 			.conf = set_tag,
+ 		};
++		/* Update next sample position due to add one tag action */
++		index += 1;
+ 	}
++	/* Copy the sample action into prefix flow. */
+ 	memcpy(actions_pre + index, actions + sample_action_pos,
+ 	       sizeof(struct rte_flow_action));
+ 	index += 1;
+@@ -6042,6 +6161,8 @@ flow_create_split_meter(struct rte_eth_dev *dev,
+ 								  fm->policy_id,
+ 								  NULL);
+ 			MLX5_ASSERT(wks->policy);
++			if (wks->policy->mark)
++				wks->mark = 1;
+ 			if (wks->policy->is_hierarchy) {
+ 				wks->final_policy =
+ 				mlx5_flow_meter_hierarchy_get_final_policy(dev,
+@@ -6065,8 +6186,10 @@ flow_create_split_meter(struct rte_eth_dev *dev,
+ 		if (!fm->def_policy && !is_mtr_hierarchy &&
+ 		    (!has_modify || !fm->drop_cnt))
+ 			set_mtr_reg = false;
+-		/* Prefix actions: meter, decap, encap, tag, jump, end. */
+-		act_size = sizeof(struct rte_flow_action) * (actions_n + 6) +
++		/* Prefix actions: meter, decap, encap, tag, jump, end, cnt. */
++#define METER_PREFIX_ACTION 7
++		act_size = (sizeof(struct rte_flow_action) *
++			    (actions_n + METER_PREFIX_ACTION)) +
+ 			   sizeof(struct mlx5_rte_flow_action_set_tag);
+ 		/* Suffix items: tag, vlan, port id, end. */
+ #define METER_SUFFIX_ITEM 4
+@@ -6128,7 +6251,7 @@ flow_create_split_meter(struct rte_eth_dev *dev,
+ 				 MLX5_FLOW_TABLE_LEVEL_METER;
+ 		flow_split_info->prefix_layers =
+ 				flow_get_prefix_layer_flags(dev_flow);
+-		flow_split_info->prefix_mark |= dev_flow->handle->mark;
++		flow_split_info->prefix_mark |= wks->mark;
+ 		flow_split_info->table_id = MLX5_MTR_TABLE_ID_SUFFIX;
+ 	}
+ 	/* Add the prefix subflow. */
+@@ -6194,6 +6317,7 @@ flow_create_split_sample(struct rte_eth_dev *dev,
+ 	struct mlx5_flow_dv_sample_resource *sample_res;
+ 	struct mlx5_flow_tbl_data_entry *sfx_tbl_data;
+ 	struct mlx5_flow_tbl_resource *sfx_tbl;
++	struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
+ #endif
+ 	size_t act_size;
+ 	size_t item_size;
+@@ -6240,7 +6364,7 @@ flow_create_split_sample(struct rte_eth_dev *dev,
+ 			jump_table = attr->group * MLX5_FLOW_TABLE_FACTOR +
+ 				     next_ft_step;
+ 		pre_actions = sfx_actions + actions_n;
+-		tag_id = flow_sample_split_prep(dev, add_tag, sfx_items,
++		tag_id = flow_sample_split_prep(dev, add_tag, items, sfx_items,
+ 						actions, sfx_actions,
+ 						pre_actions, actions_n,
+ 						sample_action_pos,
+@@ -6280,7 +6404,8 @@ flow_create_split_sample(struct rte_eth_dev *dev,
+ 		}
+ 		flow_split_info->prefix_layers =
+ 				flow_get_prefix_layer_flags(dev_flow);
+-		flow_split_info->prefix_mark |= dev_flow->handle->mark;
++		MLX5_ASSERT(wks);
++		flow_split_info->prefix_mark |= wks->mark;
+ 		/* Suffix group level already be scaled with factor, set
+ 		 * MLX5_SCALE_FLOW_GROUP_BIT of skip_scale to 1 to avoid scale
+ 		 * again in translation.
+@@ -6884,7 +7009,7 @@ flow_list_destroy(struct rte_eth_dev *dev, enum mlx5_flow_type type,
+  * @param type
+  *   Flow type to be flushed.
+  * @param active
+- *   If flushing is called avtively.
++ *   If flushing is called actively.
+  */
+ void
+ mlx5_flow_list_flush(struct rte_eth_dev *dev, enum mlx5_flow_type type,
+@@ -8531,7 +8656,7 @@ mlx5_flow_dev_dump_sh_all(struct rte_eth_dev *dev,
+  *   Perform verbose error reporting if not NULL. PMDs initialize this
+  *   structure in case of error only.
+  * @return
+- *   0 on success, a nagative value otherwise.
++ *   0 on success, a negative value otherwise.
+  */
+ int
+ mlx5_flow_dev_dump(struct rte_eth_dev *dev, struct rte_flow *flow_idx,
+@@ -9009,7 +9134,7 @@ mlx5_get_tof(const struct rte_flow_item *item,
+ }
+ 
+ /**
+- * tunnel offload functionalilty is defined for DV environment only
++ * tunnel offload functionality is defined for DV environment only
+  */
+ #ifdef HAVE_IBV_FLOW_DV_SUPPORT
+ __extension__
+@@ -9822,10 +9947,27 @@ mlx5_flow_flex_item_create(struct rte_eth_dev *dev,
+ 			   struct rte_flow_error *error)
+ {
+ 	static const char err_msg[] = "flex item creation unsupported";
++	struct mlx5_priv *priv = dev->data->dev_private;
+ 	struct rte_flow_attr attr = { .transfer = 0 };
+ 	const struct mlx5_flow_driver_ops *fops =
+ 			flow_get_drv_ops(flow_get_drv_type(dev, &attr));
+ 
++	if (!priv->pci_dev) {
++		rte_flow_error_set(error, ENOTSUP,
++				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
++				   "create flex item on PF only");
++		return NULL;
++	}
++	switch (priv->pci_dev->id.device_id) {
++	case PCI_DEVICE_ID_MELLANOX_CONNECTX6DXBF:
++	case PCI_DEVICE_ID_MELLANOX_CONNECTX7BF:
++		break;
++	default:
++		rte_flow_error_set(error, ENOTSUP,
++				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
++				   "flex item available on BlueField ports only");
++		return NULL;
++	}
+ 	if (!fops->item_create) {
+ 		DRV_LOG(ERR, "port %u %s.", dev->data->port_id, err_msg);
+ 		rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
+@@ -10012,3 +10154,80 @@ mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
+ 	}
+ 	return  res;
+ }
++
++/**
++ * Get the E-Switch Manager vport id.
++ *
++ * @param[in] dev
++ *   Pointer to the Ethernet device structure.
++ *
++ * @return
++ *   The vport id.
++ */
++int16_t mlx5_flow_get_esw_manager_vport_id(struct rte_eth_dev *dev)
++{
++	struct mlx5_priv *priv = dev->data->dev_private;
++	struct mlx5_common_device *cdev = priv->sh->cdev;
++
++	/* New FW exposes E-Switch Manager vport ID, can use it directly. */
++	if (cdev->config.hca_attr.esw_mgr_vport_id_valid)
++		return (int16_t)cdev->config.hca_attr.esw_mgr_vport_id;
++
++	if (priv->pci_dev == NULL)
++		return 0;
++	switch (priv->pci_dev->id.device_id) {
++	case PCI_DEVICE_ID_MELLANOX_CONNECTX5BF:
++	case PCI_DEVICE_ID_MELLANOX_CONNECTX6DXBF:
++	case PCI_DEVICE_ID_MELLANOX_CONNECTX7BF:
++	/*
++	 * In old FW which doesn't expose the E-Switch Manager vport ID in the capability,
++	 * only the BF embedded CPUs control the E-Switch Manager port. Hence,
++	 * ECPF vport ID is selected and not the host port (0) in any BF case.
++	 */
++		return (int16_t)MLX5_ECPF_VPORT_ID;
++	default:
++		return MLX5_PF_VPORT_ID;
++	}
++}
++
++/**
++ * Parse item to get the vport id.
++ *
++ * @param[in] dev
++ *   Pointer to the Ethernet device structure.
++ * @param[in] item
++ *   The src port id match item.
++ * @param[out] vport_id
++ *   Pointer to put the vport id.
++ * @param[out] error
++ *   Pointer to error structure.
++ *
++ * @return
++ *   0 on success, a negative errno value otherwise and rte_errno is set.
++ */
++int mlx5_flow_get_item_vport_id(struct rte_eth_dev *dev,
++				const struct rte_flow_item *item,
++				uint16_t *vport_id,
++				struct rte_flow_error *error)
++{
++	struct mlx5_priv *port_priv;
++	const struct rte_flow_item_port_id *pid_v;
++
++	if (item->type != RTE_FLOW_ITEM_TYPE_PORT_ID)
++		return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
++					  NULL, "Incorrect item type.");
++	pid_v = item->spec;
++	if (!pid_v)
++		return 0;
++	if (pid_v->id == MLX5_PORT_ESW_MGR) {
++		*vport_id = mlx5_flow_get_esw_manager_vport_id(dev);
++	} else {
++		port_priv = mlx5_port_to_eswitch_info(pid_v->id, false);
++		if (!port_priv)
++			return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
++						  NULL, "Failed to get port info.");
++		*vport_id = port_priv->representor_id;
++	}
++
++	return 0;
++}
+diff --git a/dpdk/drivers/net/mlx5/mlx5_flow.h b/dpdk/drivers/net/mlx5/mlx5_flow.h
+index 1f54649c69..29ccb98351 100644
+--- a/dpdk/drivers/net/mlx5/mlx5_flow.h
++++ b/dpdk/drivers/net/mlx5/mlx5_flow.h
+@@ -426,7 +426,7 @@ enum mlx5_feature_name {
+ #define MLX5_ACT_NUM_MDF_IPV6		4
+ #define MLX5_ACT_NUM_MDF_MAC		2
+ #define MLX5_ACT_NUM_MDF_VID		1
+-#define MLX5_ACT_NUM_MDF_PORT		2
++#define MLX5_ACT_NUM_MDF_PORT		1
+ #define MLX5_ACT_NUM_MDF_TTL		1
+ #define MLX5_ACT_NUM_DEC_TTL		MLX5_ACT_NUM_MDF_TTL
+ #define MLX5_ACT_NUM_MDF_TCPSEQ		1
+@@ -598,7 +598,7 @@ struct mlx5_flow_tbl_data_entry {
+ 	const struct mlx5_flow_tunnel *tunnel;
+ 	uint32_t group_id;
+ 	uint32_t external:1;
+-	uint32_t tunnel_offload:1; /* Tunnel offlod table or not. */
++	uint32_t tunnel_offload:1; /* Tunnel offload table or not. */
+ 	uint32_t is_egress:1; /**< Egress table. */
+ 	uint32_t is_transfer:1; /**< Transfer table. */
+ 	uint32_t dummy:1; /**<  DR table. */
+@@ -696,10 +696,8 @@ struct mlx5_flow_handle {
+ 	/**< Bit-fields of present layers, see MLX5_FLOW_LAYER_*. */
+ 	void *drv_flow; /**< pointer to driver flow object. */
+ 	uint32_t split_flow_id:27; /**< Sub flow unique match flow id. */
+-	uint32_t is_meter_flow_id:1; /**< Indate if flow_id is for meter. */
+-	uint32_t mark:1; /**< Metadate rxq mark flag. */
++	uint32_t is_meter_flow_id:1; /**< Indicate if flow_id is for meter. */
+ 	uint32_t fate_action:3; /**< Fate action type. */
+-	uint32_t flex_item; /**< referenced Flex Item bitmask. */
+ 	union {
+ 		uint32_t rix_hrxq; /**< Hash Rx queue object index. */
+ 		uint32_t rix_jump; /**< Index to the jump action resource. */
+@@ -715,6 +713,7 @@ struct mlx5_flow_handle {
+ #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
+ 	struct mlx5_flow_handle_dv dvh;
+ #endif
++	uint8_t flex_item; /**< referenced Flex Item bitmask. */
+ } __rte_packed;
+ 
+ /*
+@@ -1108,6 +1107,7 @@ struct mlx5_flow_workspace {
+ 	/* The final policy when meter policy is hierarchy. */
+ 	uint32_t skip_matcher_reg:1;
+ 	/* Indicates if need to skip matcher register in translate. */
++	uint32_t mark:1; /* Indicates if flow contains mark action. */
+ };
+ 
+ struct mlx5_flow_split_info {
+@@ -1450,6 +1450,20 @@ flow_aso_ct_get_by_idx(struct rte_eth_dev *dev, uint32_t own_idx)
+ 	return ct;
+ }
+ 
++static inline uint16_t
++mlx5_translate_tunnel_etypes(uint64_t pattern_flags)
++{
++	if (pattern_flags & MLX5_FLOW_LAYER_INNER_L2)
++		return RTE_ETHER_TYPE_TEB;
++	else if (pattern_flags & MLX5_FLOW_LAYER_INNER_L3_IPV4)
++		return RTE_ETHER_TYPE_IPV4;
++	else if (pattern_flags & MLX5_FLOW_LAYER_INNER_L3_IPV6)
++		return RTE_ETHER_TYPE_IPV6;
++	else if (pattern_flags & MLX5_FLOW_LAYER_MPLS)
++		return RTE_ETHER_TYPE_MPLS;
++	return 0;
++}
++
+ int mlx5_flow_group_to_table(struct rte_eth_dev *dev,
+ 			     const struct mlx5_flow_tunnel *tunnel,
+ 			     uint32_t group, uint32_t *table,
+@@ -1752,4 +1766,14 @@ const struct mlx5_flow_tunnel *
+ mlx5_get_tof(const struct rte_flow_item *items,
+ 	     const struct rte_flow_action *actions,
+ 	     enum mlx5_tof_rule_type *rule_type);
++
++#define MLX5_PF_VPORT_ID 0
++#define MLX5_ECPF_VPORT_ID 0xFFFE
++
++int16_t mlx5_flow_get_esw_manager_vport_id(struct rte_eth_dev *dev);
++int mlx5_flow_get_item_vport_id(struct rte_eth_dev *dev,
++				const struct rte_flow_item *item,
++				uint16_t *vport_id,
++				struct rte_flow_error *error);
++
+ #endif /* RTE_PMD_MLX5_FLOW_H_ */
+diff --git a/dpdk/drivers/net/mlx5/mlx5_flow_aso.c b/dpdk/drivers/net/mlx5/mlx5_flow_aso.c
+index ddf4328dec..eb7fc43da3 100644
+--- a/dpdk/drivers/net/mlx5/mlx5_flow_aso.c
++++ b/dpdk/drivers/net/mlx5/mlx5_flow_aso.c
+@@ -13,50 +13,6 @@
+ #include "mlx5.h"
+ #include "mlx5_flow.h"
+ 
+-/**
+- * Destroy Completion Queue used for ASO access.
+- *
+- * @param[in] cq
+- *   ASO CQ to destroy.
+- */
+-static void
+-mlx5_aso_cq_destroy(struct mlx5_aso_cq *cq)
+-{
+-	if (cq->cq_obj.cq)
+-		mlx5_devx_cq_destroy(&cq->cq_obj);
+-	memset(cq, 0, sizeof(*cq));
+-}
+-
+-/**
+- * Create Completion Queue used for ASO access.
+- *
+- * @param[in] ctx
+- *   Context returned from mlx5 open_device() glue function.
+- * @param[in/out] cq
+- *   Pointer to CQ to create.
+- * @param[in] log_desc_n
+- *   Log of number of descriptors in queue.
+- * @param[in] socket
+- *   Socket to use for allocation.
+- * @param[in] uar_page_id
+- *   UAR page ID to use.
+- *
+- * @return
+- *   0 on success, a negative errno value otherwise and rte_errno is set.
+- */
+-static int
+-mlx5_aso_cq_create(void *ctx, struct mlx5_aso_cq *cq, uint16_t log_desc_n,
+-		   int socket, int uar_page_id)
+-{
+-	struct mlx5_devx_cq_attr attr = {
+-		.uar_page_id = uar_page_id,
+-	};
+-
+-	cq->log_desc_n = log_desc_n;
+-	cq->cq_ci = 0;
+-	return mlx5_devx_cq_create(ctx, &cq->cq_obj, log_desc_n, &attr, socket);
+-}
+-
+ /**
+  * Free MR resources.
+  *
+@@ -84,21 +40,18 @@ mlx5_aso_dereg_mr(struct mlx5_common_device *cdev, struct mlx5_pmd_mr *mr)
+  *   Size of MR buffer.
+  * @param[in/out] mr
+  *   Pointer to MR to create.
+- * @param[in] socket
+- *   Socket to use for allocation.
+  *
+  * @return
+  *   0 on success, a negative errno value otherwise and rte_errno is set.
+  */
+ static int
+ mlx5_aso_reg_mr(struct mlx5_common_device *cdev, size_t length,
+-		struct mlx5_pmd_mr *mr, int socket)
++		struct mlx5_pmd_mr *mr)
+ {
+-
+ 	int ret;
+ 
+ 	mr->addr = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, length, 4096,
+-			       socket);
++			       SOCKET_ID_ANY);
+ 	if (!mr->addr) {
+ 		DRV_LOG(ERR, "Failed to create ASO bits mem for MR.");
+ 		return -1;
+@@ -122,7 +75,7 @@ static void
+ mlx5_aso_destroy_sq(struct mlx5_aso_sq *sq)
+ {
+ 	mlx5_devx_sq_destroy(&sq->sq_obj);
+-	mlx5_aso_cq_destroy(&sq->cq);
++	mlx5_devx_cq_destroy(&sq->cq.cq_obj);
+ 	memset(sq, 0, sizeof(*sq));
+ }
+ 
+@@ -226,35 +179,31 @@ mlx5_aso_ct_init_sq(struct mlx5_aso_sq *sq)
+ /**
+  * Create Send Queue used for ASO access.
+  *
+- * @param[in] ctx
+- *   Context returned from mlx5 open_device() glue function.
++ * @param[in] cdev
++ *   Pointer to the mlx5 common device.
+  * @param[in/out] sq
+  *   Pointer to SQ to create.
+- * @param[in] socket
+- *   Socket to use for allocation.
+  * @param[in] uar
+  *   User Access Region object.
+- * @param[in] pdn
+- *   Protection Domain number to use.
+- * @param[in] log_desc_n
+- *   Log of number of descriptors in queue.
+- * @param[in] ts_format
+- *   timestamp format supported by the queue.
+  *
+  * @return
+  *   0 on success, a negative errno value otherwise and rte_errno is set.
+  */
+ static int
+-mlx5_aso_sq_create(void *ctx, struct mlx5_aso_sq *sq, int socket, void *uar,
+-		   uint32_t pdn, uint16_t log_desc_n, uint32_t ts_format)
++mlx5_aso_sq_create(struct mlx5_common_device *cdev, struct mlx5_aso_sq *sq,
++		   void *uar)
+ {
+-	struct mlx5_devx_create_sq_attr attr = {
++	struct mlx5_devx_cq_attr cq_attr = {
++		.uar_page_id = mlx5_os_get_devx_uar_page_id(uar),
++	};
++	struct mlx5_devx_create_sq_attr sq_attr = {
+ 		.user_index = 0xFFFF,
+ 		.wq_attr = (struct mlx5_devx_wq_attr){
+-			.pd = pdn,
++			.pd = cdev->pdn,
+ 			.uar_page = mlx5_os_get_devx_uar_page_id(uar),
+ 		},
+-		.ts_format = mlx5_ts_format_conv(ts_format),
++		.ts_format =
++			mlx5_ts_format_conv(cdev->config.hca_attr.sq_ts_format),
+ 	};
+ 	struct mlx5_devx_modify_sq_attr modify_attr = {
+ 		.state = MLX5_SQC_STATE_RDY,
+@@ -262,14 +211,18 @@ mlx5_aso_sq_create(void *ctx, struct mlx5_aso_sq *sq, int socket, void *uar,
+ 	uint16_t log_wqbb_n;
+ 	int ret;
+ 
+-	if (mlx5_aso_cq_create(ctx, &sq->cq, log_desc_n, socket,
+-			       mlx5_os_get_devx_uar_page_id(uar)))
++	if (mlx5_devx_cq_create(cdev->ctx, &sq->cq.cq_obj,
++				MLX5_ASO_QUEUE_LOG_DESC, &cq_attr,
++				SOCKET_ID_ANY))
+ 		goto error;
+-	sq->log_desc_n = log_desc_n;
+-	attr.cqn = sq->cq.cq_obj.cq->id;
++	sq->cq.cq_ci = 0;
++	sq->cq.log_desc_n = MLX5_ASO_QUEUE_LOG_DESC;
++	sq->log_desc_n = MLX5_ASO_QUEUE_LOG_DESC;
++	sq_attr.cqn = sq->cq.cq_obj.cq->id;
+ 	/* for mlx5_aso_wqe that is twice the size of mlx5_wqe */
+-	log_wqbb_n = log_desc_n + 1;
+-	ret = mlx5_devx_sq_create(ctx, &sq->sq_obj, log_wqbb_n, &attr, socket);
++	log_wqbb_n = sq->log_desc_n + 1;
++	ret = mlx5_devx_sq_create(cdev->ctx, &sq->sq_obj, log_wqbb_n, &sq_attr,
++				  SOCKET_ID_ANY);
+ 	if (ret) {
+ 		DRV_LOG(ERR, "Can't create SQ object.");
+ 		rte_errno = ENOMEM;
+@@ -313,34 +266,28 @@ mlx5_aso_queue_init(struct mlx5_dev_ctx_shared *sh,
+ 	switch (aso_opc_mod) {
+ 	case ASO_OPC_MOD_FLOW_HIT:
+ 		if (mlx5_aso_reg_mr(cdev, (MLX5_ASO_AGE_ACTIONS_PER_POOL / 8) *
+-				    sq_desc_n, &sh->aso_age_mng->aso_sq.mr, 0))
++				    sq_desc_n, &sh->aso_age_mng->aso_sq.mr))
+ 			return -1;
+-		if (mlx5_aso_sq_create(cdev->ctx, &sh->aso_age_mng->aso_sq, 0,
+-				       sh->tx_uar.obj, cdev->pdn,
+-				       MLX5_ASO_QUEUE_LOG_DESC,
+-				       cdev->config.hca_attr.sq_ts_format)) {
++		if (mlx5_aso_sq_create(cdev, &sh->aso_age_mng->aso_sq,
++				       sh->tx_uar.obj)) {
+ 			mlx5_aso_dereg_mr(cdev, &sh->aso_age_mng->aso_sq.mr);
+ 			return -1;
+ 		}
+ 		mlx5_aso_age_init_sq(&sh->aso_age_mng->aso_sq);
+ 		break;
+ 	case ASO_OPC_MOD_POLICER:
+-		if (mlx5_aso_sq_create(cdev->ctx, &sh->mtrmng->pools_mng.sq, 0,
+-				       sh->tx_uar.obj, cdev->pdn,
+-				       MLX5_ASO_QUEUE_LOG_DESC,
+-				       cdev->config.hca_attr.sq_ts_format))
++		if (mlx5_aso_sq_create(cdev, &sh->mtrmng->pools_mng.sq,
++				       sh->tx_uar.obj))
+ 			return -1;
+ 		mlx5_aso_mtr_init_sq(&sh->mtrmng->pools_mng.sq);
+ 		break;
+ 	case ASO_OPC_MOD_CONNECTION_TRACKING:
+ 		/* 64B per object for query. */
+ 		if (mlx5_aso_reg_mr(cdev, 64 * sq_desc_n,
+-				    &sh->ct_mng->aso_sq.mr, 0))
++				    &sh->ct_mng->aso_sq.mr))
+ 			return -1;
+-		if (mlx5_aso_sq_create(cdev->ctx, &sh->ct_mng->aso_sq, 0,
+-				       sh->tx_uar.obj, cdev->pdn,
+-				       MLX5_ASO_QUEUE_LOG_DESC,
+-				       cdev->config.hca_attr.sq_ts_format)) {
++		if (mlx5_aso_sq_create(cdev, &sh->ct_mng->aso_sq,
++				       sh->tx_uar.obj)) {
+ 			mlx5_aso_dereg_mr(cdev, &sh->ct_mng->aso_sq.mr);
+ 			return -1;
+ 		}
+diff --git a/dpdk/drivers/net/mlx5/mlx5_flow_dv.c b/dpdk/drivers/net/mlx5/mlx5_flow_dv.c
+index 3da122cbb9..70031d3dc9 100644
+--- a/dpdk/drivers/net/mlx5/mlx5_flow_dv.c
++++ b/dpdk/drivers/net/mlx5/mlx5_flow_dv.c
+@@ -93,37 +93,6 @@ static int
+ flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev,
+ 				  uint32_t rix_jump);
+ 
+-static inline uint16_t
+-mlx5_translate_tunnel_etypes(uint64_t pattern_flags)
+-{
+-	if (pattern_flags & MLX5_FLOW_LAYER_INNER_L2)
+-		return RTE_ETHER_TYPE_TEB;
+-	else if (pattern_flags & MLX5_FLOW_LAYER_INNER_L3_IPV4)
+-		return RTE_ETHER_TYPE_IPV4;
+-	else if (pattern_flags & MLX5_FLOW_LAYER_INNER_L3_IPV6)
+-		return RTE_ETHER_TYPE_IPV6;
+-	else if (pattern_flags & MLX5_FLOW_LAYER_MPLS)
+-		return RTE_ETHER_TYPE_MPLS;
+-	return 0;
+-}
+-
+-static int16_t
+-flow_dv_get_esw_manager_vport_id(struct rte_eth_dev *dev)
+-{
+-	struct mlx5_priv *priv = dev->data->dev_private;
+-
+-	if (priv->pci_dev == NULL)
+-		return 0;
+-	switch (priv->pci_dev->id.device_id) {
+-	case PCI_DEVICE_ID_MELLANOX_CONNECTX5BF:
+-	case PCI_DEVICE_ID_MELLANOX_CONNECTX6DXBF:
+-	case PCI_DEVICE_ID_MELLANOX_CONNECTX7BF:
+-		return (int16_t)0xfffe;
+-	default:
+-		return 0;
+-	}
+-}
+-
+ /**
+  * Initialize flow attributes structure according to flow items' types.
+  *
+@@ -172,6 +141,7 @@ flow_dv_attr_init(const struct rte_flow_item *item, union flow_dv_attr *attr,
+ 		case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
+ 		case RTE_FLOW_ITEM_TYPE_GENEVE:
+ 		case RTE_FLOW_ITEM_TYPE_MPLS:
++		case RTE_FLOW_ITEM_TYPE_GTP:
+ 			if (tunnel_decap)
+ 				attr->attr = 0;
+ 			break;
+@@ -326,7 +296,8 @@ flow_dv_hlist_prepare(struct mlx5_dev_ctx_shared *sh, struct mlx5_hlist **phl,
+ 		     mlx5_list_match_cb cb_match,
+ 		     mlx5_list_remove_cb cb_remove,
+ 		     mlx5_list_clone_cb cb_clone,
+-		     mlx5_list_clone_free_cb cb_clone_free)
++		     mlx5_list_clone_free_cb cb_clone_free,
++		     struct rte_flow_error *error)
+ {
+ 	struct mlx5_hlist *hl;
+ 	struct mlx5_hlist *expected = NULL;
+@@ -341,7 +312,9 @@ flow_dv_hlist_prepare(struct mlx5_dev_ctx_shared *sh, struct mlx5_hlist **phl,
+ 			cb_clone_free);
+ 	if (!hl) {
+ 		DRV_LOG(ERR, "%s hash creation failed", name);
+-		rte_errno = ENOMEM;
++		rte_flow_error_set(error, ENOMEM,
++				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
++				   "cannot allocate resource memory");
+ 		return NULL;
+ 	}
+ 	if (!__atomic_compare_exchange_n(phl, &expected, hl, false,
+@@ -1503,7 +1476,7 @@ mlx5_flow_field_id_to_modify_info
+ 			if (data->offset < 16)
+ 				info[idx++] = (struct field_modify_info){2, 0,
+ 						MLX5_MODI_OUT_DMAC_15_0};
+-			info[idx] = (struct field_modify_info){4, 0,
++			info[idx] = (struct field_modify_info){4, off,
+ 						MLX5_MODI_OUT_DMAC_47_16};
+ 		}
+ 		break;
+@@ -1533,7 +1506,7 @@ mlx5_flow_field_id_to_modify_info
+ 			if (data->offset < 16)
+ 				info[idx++] = (struct field_modify_info){2, 0,
+ 						MLX5_MODI_OUT_SMAC_15_0};
+-			info[idx] = (struct field_modify_info){4, 0,
++			info[idx] = (struct field_modify_info){4, off,
+ 						MLX5_MODI_OUT_SMAC_47_16};
+ 		}
+ 		break;
+@@ -1881,7 +1854,7 @@ flow_dv_convert_action_modify_field
+ 	struct field_modify_info dcopy[MLX5_ACT_MAX_MOD_FIELDS] = {
+ 								{0, 0, 0} };
+ 	uint32_t mask[MLX5_ACT_MAX_MOD_FIELDS] = {0, 0, 0, 0, 0};
+-	uint32_t type;
++	uint32_t type, meta = 0;
+ 	uint32_t shift = 0;
+ 
+ 	if (conf->src.field == RTE_FLOW_FIELD_POINTER ||
+@@ -1894,6 +1867,11 @@ flow_dv_convert_action_modify_field
+ 		item.spec = conf->src.field == RTE_FLOW_FIELD_POINTER ?
+ 					(void *)(uintptr_t)conf->src.pvalue :
+ 					(void *)(uintptr_t)&conf->src.value;
++		if (conf->dst.field == RTE_FLOW_FIELD_META) {
++			meta = *(const unaligned_uint32_t *)item.spec;
++			meta = rte_cpu_to_be_32(meta);
++			item.spec = &meta;
++		}
+ 	} else {
+ 		type = MLX5_MODIFICATION_TYPE_COPY;
+ 		/** For COPY fill the destination field (dcopy) without mask. */
+@@ -2032,7 +2010,7 @@ flow_dv_validate_item_meta(struct rte_eth_dev *dev __rte_unused,
+ 		if (reg == REG_NON)
+ 			return rte_flow_error_set(error, ENOTSUP,
+ 					RTE_FLOW_ERROR_TYPE_ITEM, item,
+-					"unavalable extended metadata register");
++					"unavailable extended metadata register");
+ 		if (reg == REG_B)
+ 			return rte_flow_error_set(error, ENOTSUP,
+ 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
+@@ -2879,8 +2857,6 @@ flow_dv_validate_action_push_vlan(struct rte_eth_dev *dev,
+ {
+ 	const struct rte_flow_action_of_push_vlan *push_vlan = action->conf;
+ 	const struct mlx5_priv *priv = dev->data->dev_private;
+-	struct mlx5_dev_ctx_shared *sh = priv->sh;
+-	bool direction_error = false;
+ 
+ 	if (push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_VLAN) &&
+ 	    push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_QINQ))
+@@ -2892,22 +2868,6 @@ flow_dv_validate_action_push_vlan(struct rte_eth_dev *dev,
+ 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
+ 					  "wrong action order, port_id should "
+ 					  "be after push VLAN");
+-	/* Push VLAN is not supported in ingress except for CX6 FDB mode. */
+-	if (attr->transfer) {
+-		bool fdb_tx = priv->representor_id != UINT16_MAX;
+-		bool is_cx5 = sh->steering_format_version ==
+-		    MLX5_STEERING_LOGIC_FORMAT_CONNECTX_5;
+-
+-		if (!fdb_tx && is_cx5)
+-			direction_error = true;
+-	} else if (attr->ingress) {
+-		direction_error = true;
+-	}
+-	if (direction_error)
+-		return rte_flow_error_set(error, ENOTSUP,
+-					  RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
+-					  NULL,
+-					  "push vlan action not supported for ingress");
+ 	if (!attr->transfer && priv->representor)
+ 		return rte_flow_error_set(error, ENOTSUP,
+ 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+@@ -3205,7 +3165,7 @@ flow_dv_validate_action_set_meta(struct rte_eth_dev *dev,
+ 	if (reg == REG_NON)
+ 		return rte_flow_error_set(error, ENOTSUP,
+ 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
+-					  "unavalable extended metadata register");
++					  "unavailable extended metadata register");
+ 	if (reg != REG_A && reg != REG_B) {
+ 		struct mlx5_priv *priv = dev->data->dev_private;
+ 
+@@ -3283,6 +3243,25 @@ flow_dv_validate_action_set_tag(struct rte_eth_dev *dev,
+ 	return 0;
+ }
+ 
++/**
++ * Indicates whether ASO aging is supported.
++ *
++ * @param[in] sh
++ *   Pointer to shared device context structure.
++ * @param[in] attr
++ *   Attributes of flow that includes AGE action.
++ *
++ * @return
++ *   True when ASO aging is supported, false otherwise.
++ */
++static inline bool
++flow_hit_aso_supported(const struct mlx5_dev_ctx_shared *sh,
++		const struct rte_flow_attr *attr)
++{
++	MLX5_ASSERT(sh && attr);
++	return (sh->flow_hit_aso_en && (attr->transfer || attr->group));
++}
++
+ /**
+  * Validate count action.
+  *
+@@ -3292,6 +3271,8 @@ flow_dv_validate_action_set_tag(struct rte_eth_dev *dev,
+  *   Indicator if action is shared.
+  * @param[in] action_flags
+  *   Holds the actions detected until now.
++ * @param[in] attr
++ *   Attributes of flow that includes this action.
+  * @param[out] error
+  *   Pointer to error structure.
+  *
+@@ -3301,6 +3282,7 @@ flow_dv_validate_action_set_tag(struct rte_eth_dev *dev,
+ static int
+ flow_dv_validate_action_count(struct rte_eth_dev *dev, bool shared,
+ 			      uint64_t action_flags,
++			      const struct rte_flow_attr *attr,
+ 			      struct rte_flow_error *error)
+ {
+ 	struct mlx5_priv *priv = dev->data->dev_private;
+@@ -3312,10 +3294,10 @@ flow_dv_validate_action_count(struct rte_eth_dev *dev, bool shared,
+ 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ 					  "duplicate count actions set");
+ 	if (shared && (action_flags & MLX5_FLOW_ACTION_AGE) &&
+-	    !priv->sh->flow_hit_aso_en)
++	    !flow_hit_aso_supported(priv->sh, attr))
+ 		return rte_flow_error_set(error, EINVAL,
+ 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+-					  "old age and shared count combination is not supported");
++					  "old age and indirect count combination is not supported");
+ #ifdef HAVE_IBV_FLOW_DEVX_COUNTERS
+ 	return 0;
+ #endif
+@@ -3740,7 +3722,8 @@ flow_dv_encap_decap_resource_register
+ 				flow_dv_encap_decap_match_cb,
+ 				flow_dv_encap_decap_remove_cb,
+ 				flow_dv_encap_decap_clone_cb,
+-				flow_dv_encap_decap_clone_free_cb);
++				flow_dv_encap_decap_clone_free_cb,
++				error);
+ 	if (unlikely(!encaps_decaps))
+ 		return -rte_errno;
+ 	resource->flags = dev_flow->dv.group ? 0 : 1;
+@@ -4982,7 +4965,7 @@ flow_dv_validate_action_jump(struct rte_eth_dev *dev,
+ 			     const struct rte_flow_attr *attributes,
+ 			     bool external, struct rte_flow_error *error)
+ {
+-	uint32_t target_group, table;
++	uint32_t target_group, table = 0;
+ 	int ret = 0;
+ 	struct flow_grp_info grp_info = {
+ 		.external = !!external,
+@@ -5013,6 +4996,10 @@ flow_dv_validate_action_jump(struct rte_eth_dev *dev,
+ 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ 					  "target group must be other than"
+ 					  " the current flow group");
++	if (table == 0)
++		return rte_flow_error_set(error, EINVAL,
++					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
++					  NULL, "root table shouldn't be destination");
+ 	return 0;
+ }
+ 
+@@ -5145,7 +5132,7 @@ flow_dv_modify_hdr_action_max(struct rte_eth_dev *dev __rte_unused,
+  *   Pointer to error structure.
+  *
+  * @return
+- *   0 on success, a negative errno value otherwise and rte_ernno is set.
++ *   0 on success, a negative errno value otherwise and rte_errno is set.
+  */
+ static int
+ mlx5_flow_validate_action_meter(struct rte_eth_dev *dev,
+@@ -5230,21 +5217,12 @@ mlx5_flow_validate_action_meter(struct rte_eth_dev *dev,
+ 			 */
+ 			struct mlx5_priv *policy_port_priv =
+ 					mtr_policy->dev->data->dev_private;
+-			int32_t flow_src_port = priv->representor_id;
++			uint16_t flow_src_port = priv->representor_id;
+ 
+ 			if (port_id_item) {
+-				const struct rte_flow_item_port_id *spec =
+-							port_id_item->spec;
+-				struct mlx5_priv *port_priv =
+-					mlx5_port_to_eswitch_info(spec->id,
+-								  false);
+-				if (!port_priv)
+-					return rte_flow_error_set(error,
+-						rte_errno,
+-						RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
+-						spec,
+-						"Failed to get port info.");
+-				flow_src_port = port_priv->representor_id;
++				if (mlx5_flow_get_item_vport_id(dev, port_id_item,
++								&flow_src_port, error))
++					return -rte_errno;
+ 			}
+ 			if (flow_src_port != policy_port_priv->representor_id)
+ 				return rte_flow_error_set(error,
+@@ -5678,7 +5656,7 @@ flow_dv_validate_action_sample(uint64_t *action_flags,
+ 		case RTE_FLOW_ACTION_TYPE_COUNT:
+ 			ret = flow_dv_validate_action_count
+ 				(dev, false, *action_flags | sub_action_flags,
+-				 error);
++				 attr, error);
+ 			if (ret < 0)
+ 				return ret;
+ 			*count = act->conf;
+@@ -5832,7 +5810,8 @@ flow_dv_modify_hdr_resource_register
+ 				flow_dv_modify_match_cb,
+ 				flow_dv_modify_remove_cb,
+ 				flow_dv_modify_clone_cb,
+-				flow_dv_modify_clone_free_cb);
++				flow_dv_modify_clone_free_cb,
++				error);
+ 	if (unlikely(!modify_cmds))
+ 		return -rte_errno;
+ 	resource->root = !dev_flow->dv.group;
+@@ -6714,6 +6693,12 @@ flow_dv_validate_item_integrity(struct rte_eth_dev *dev,
+ 					  RTE_FLOW_ERROR_TYPE_ITEM,
+ 					  integrity_item,
+ 					  "unsupported integrity filter");
++	if ((mask->l3_ok & !spec->l3_ok) || (mask->l4_ok & !spec->l4_ok) ||
++		(mask->ipv4_csum_ok & !spec->ipv4_csum_ok) ||
++		(mask->l4_csum_ok & !spec->l4_csum_ok))
++		return rte_flow_error_set(error, EINVAL,
++					  RTE_FLOW_ERROR_TYPE_ITEM,
++					  NULL, "negative integrity flow is not supported");
+ 	if (spec->level > 1) {
+ 		if (pattern_flags & MLX5_FLOW_ITEM_INNER_INTEGRITY)
+ 			return rte_flow_error_set
+@@ -6844,7 +6829,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
+ 		 bool external, int hairpin, struct rte_flow_error *error)
+ {
+ 	int ret;
+-	uint64_t action_flags = 0;
++	uint64_t aso_mask, action_flags = 0;
+ 	uint64_t item_flags = 0;
+ 	uint64_t last_item = 0;
+ 	uint8_t next_protocol = 0xff;
+@@ -6911,7 +6896,11 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
+ 	const struct rte_flow_item *integrity_items[2] = {NULL, NULL};
+ 	const struct rte_flow_item *port_id_item = NULL;
+ 	bool def_policy = false;
++	bool shared_count = false;
+ 	uint16_t udp_dport = 0;
++	uint32_t tag_id = 0;
++	const struct rte_flow_action_age *non_shared_age = NULL;
++	const struct rte_flow_action_count *count = NULL;
+ 
+ 	if (items == NULL)
+ 		return -1;
+@@ -7209,8 +7198,10 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
+ 				return ret;
+ 			last_item = MLX5_FLOW_ITEM_TAG;
+ 			break;
+-		case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
+ 		case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:
++			last_item = MLX5_FLOW_ITEM_TX_QUEUE;
++			break;
++		case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
+ 			break;
+ 		case RTE_FLOW_ITEM_TYPE_GTP:
+ 			ret = flow_dv_validate_item_gtp(dev, items, item_flags,
+@@ -7281,7 +7272,6 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
+ 	}
+ 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
+ 		int type = actions->type;
+-		bool shared_count = false;
+ 
+ 		if (!mlx5_flow_os_action_supported(type))
+ 			return rte_flow_error_set(error, ENOTSUP,
+@@ -7380,6 +7370,8 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
+ 				++actions_n;
+ 			if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
+ 				modify_after_mirror = 1;
++			tag_id = ((const struct rte_flow_action_set_tag *)
++				  actions->conf)->index;
+ 			action_flags |= MLX5_FLOW_ACTION_SET_TAG;
+ 			rw_act_num += MLX5_ACT_NUM_SET_TAG;
+ 			break;
+@@ -7438,9 +7430,10 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
+ 		case RTE_FLOW_ACTION_TYPE_COUNT:
+ 			ret = flow_dv_validate_action_count(dev, shared_count,
+ 							    action_flags,
+-							    error);
++							    attr, error);
+ 			if (ret < 0)
+ 				return ret;
++			count = actions->conf;
+ 			action_flags |= MLX5_FLOW_ACTION_COUNT;
+ 			++actions_n;
+ 			break;
+@@ -7746,6 +7739,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
+ 			++actions_n;
+ 			break;
+ 		case RTE_FLOW_ACTION_TYPE_AGE:
++			non_shared_age = actions->conf;
+ 			ret = flow_dv_validate_action_age(action_flags,
+ 							  actions, dev,
+ 							  error);
+@@ -7753,15 +7747,15 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
+ 				return ret;
+ 			/*
+ 			 * Validate the regular AGE action (using counter)
+-			 * mutual exclusion with share counter actions.
++			 * mutual exclusion with indirect counter actions.
+ 			 */
+-			if (!priv->sh->flow_hit_aso_en) {
++			if (!flow_hit_aso_supported(priv->sh, attr)) {
+ 				if (shared_count)
+ 					return rte_flow_error_set
+ 						(error, EINVAL,
+ 						RTE_FLOW_ERROR_TYPE_ACTION,
+ 						NULL,
+-						"old age and shared count combination is not supported");
++						"old age and indirect count combination is not supported");
+ 				if (sample_count)
+ 					return rte_flow_error_set
+ 						(error, EINVAL,
+@@ -7814,6 +7808,11 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
+ 							     error);
+ 			if (ret < 0)
+ 				return ret;
++			if ((action_flags & MLX5_FLOW_ACTION_SET_TAG) &&
++			    tag_id == 0 && priv->mtr_color_reg == REG_NON)
++				return rte_flow_error_set(error, EINVAL,
++					RTE_FLOW_ERROR_TYPE_ACTION, NULL,
++					"sample after tag action causes metadata tag index 0 corruption");
+ 			action_flags |= MLX5_FLOW_ACTION_SAMPLE;
+ 			++actions_n;
+ 			break;
+@@ -7858,7 +7857,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
+ 	 * - Explicit decap action is prohibited by the tunnel offload API.
+ 	 * - Drop action in tunnel steer rule is prohibited by the API.
+ 	 * - Application cannot use MARK action because it's value can mask
+-	 *   tunnel default miss nitification.
++	 *   tunnel default miss notification.
+ 	 * - JUMP in tunnel match rule has no support in current PMD
+ 	 *   implementation.
+ 	 * - TAG & META are reserved for future uses.
+@@ -7970,6 +7969,28 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
+ 						  RTE_FLOW_ERROR_TYPE_ACTION,
+ 						  NULL, "encap and decap "
+ 						  "combination aren't supported");
++		/* Push VLAN is not supported in ingress except for NICs newer than CX5. */
++		if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN) {
++			struct mlx5_dev_ctx_shared *sh = priv->sh;
++			bool direction_error = false;
++
++			if (attr->transfer) {
++				bool fdb_tx = priv->representor_id != UINT16_MAX;
++				bool is_cx5 = sh->steering_format_version ==
++				    MLX5_STEERING_LOGIC_FORMAT_CONNECTX_5;
++
++				if (!fdb_tx && is_cx5)
++					direction_error = true;
++			} else if (attr->ingress) {
++				direction_error = true;
++			}
++			if (direction_error)
++				return rte_flow_error_set(error, ENOTSUP,
++							  RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
++							  NULL,
++							  "push VLAN action not supported "
++							  "for ingress");
++		}
+ 		if (!attr->transfer && attr->ingress) {
+ 			if (action_flags & MLX5_FLOW_ACTION_ENCAP)
+ 				return rte_flow_error_set
+@@ -7977,12 +7998,6 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
+ 						 RTE_FLOW_ERROR_TYPE_ACTION,
+ 						 NULL, "encap is not supported"
+ 						 " for ingress traffic");
+-			else if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
+-				return rte_flow_error_set
+-						(error, ENOTSUP,
+-						 RTE_FLOW_ERROR_TYPE_ACTION,
+-						 NULL, "push VLAN action not "
+-						 "supported for ingress");
+ 			else if ((action_flags & MLX5_FLOW_VLAN_ACTIONS) ==
+ 					MLX5_FLOW_VLAN_ACTIONS)
+ 				return rte_flow_error_set
+@@ -8022,6 +8037,20 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
+ 					"cannot be done before meter action");
+ 		}
+ 	}
++	/*
++	 * Only support one ASO action in a single flow rule.
++	 * non-shared AGE + counter will fallback to use HW counter, no ASO hit object.
++	 * Group 0 uses HW counter for AGE too even if no counter action.
++	 */
++	aso_mask = (action_flags & MLX5_FLOW_ACTION_METER && priv->sh->meter_aso_en) << 2 |
++		   (action_flags & MLX5_FLOW_ACTION_CT && priv->sh->ct_aso_en) << 1 |
++		   (action_flags & MLX5_FLOW_ACTION_AGE &&
++		    !(non_shared_age && count) &&
++		    (attr->group || (attr->transfer && priv->fdb_def_rule)) &&
++		    priv->sh->flow_hit_aso_en);
++	if (__builtin_popcountl(aso_mask) > 1)
++		return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
++					  NULL, "unsupported combining AGE, METER, CT ASO actions in a single rule");
+ 	/*
+ 	 * Hairpin flow will add one more TAG action in TX implicit mode.
+ 	 * In TX explicit mode, there will be no hairpin flow ID.
+@@ -8045,6 +8074,18 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
+ 		return rte_flow_error_set(error, EINVAL,
+ 				RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ 				"sample before modify action is not supported");
++	/*
++	 * Validation the NIC Egress flow on representor, except implicit
++	 * hairpin default egress flow with TX_QUEUE item, other flows not
++	 * work due to metadata regC0 mismatch.
++	 */
++	if ((!attr->transfer && attr->egress) && priv->representor &&
++	    !(item_flags & MLX5_FLOW_ITEM_TX_QUEUE))
++		return rte_flow_error_set(error, EINVAL,
++					  RTE_FLOW_ERROR_TYPE_ITEM,
++					  NULL,
++					  "NIC egress rules on representors"
++					  " is not supported");
+ 	return 0;
+ }
+ 
+@@ -9184,7 +9225,7 @@ flow_dev_geneve_tlv_option_resource_register(struct rte_eth_dev *dev,
+ 			geneve_opt_v->option_type &&
+ 			geneve_opt_resource->length ==
+ 			geneve_opt_v->option_len) {
+-			/* We already have GENVE TLV option obj allocated. */
++			/* We already have GENEVE TLV option obj allocated. */
+ 			__atomic_fetch_add(&geneve_opt_resource->refcnt, 1,
+ 					   __ATOMIC_RELAXED);
+ 		} else {
+@@ -9713,7 +9754,7 @@ flow_dv_translate_item_port_id(struct rte_eth_dev *dev, void *matcher,
+ 
+ 	if (pid_v && pid_v->id == MLX5_PORT_ESW_MGR) {
+ 		flow_dv_translate_item_source_vport(matcher, key,
+-			flow_dv_get_esw_manager_vport_id(dev), 0xffff);
++			mlx5_flow_get_esw_manager_vport_id(dev), 0xffff);
+ 		return 0;
+ 	}
+ 	mask = pid_m ? pid_m->id : 0xffff;
+@@ -10170,7 +10211,7 @@ flow_dv_translate_item_flex(struct rte_eth_dev *dev, void *matcher, void *key,
+ 		/* Don't count both inner and outer flex items in one rule. */
+ 		if (mlx5_flex_acquire_index(dev, spec->handle, true) != index)
+ 			MLX5_ASSERT(false);
+-		dev_flow->handle->flex_item |= RTE_BIT32(index);
++		dev_flow->handle->flex_item |= (uint8_t)RTE_BIT32(index);
+ 	}
+ 	mlx5_flex_flow_translate_item(dev, matcher, key, item, is_inner);
+ }
+@@ -10226,7 +10267,7 @@ __flow_dv_adjust_buf_size(size_t *size, uint8_t match_criteria)
+ 	 * Check flow matching criteria first, subtract misc5/4 length if flow
+ 	 * doesn't own misc5/4 parameters. In some old rdma-core releases,
+ 	 * misc5/4 are not supported, and matcher creation failure is expected
+-	 * w/o subtration. If misc5 is provided, misc4 must be counted in since
++	 * w/o subtraction. If misc5 is provided, misc4 must be counted in since
+ 	 * misc5 is right after misc4.
+ 	 */
+ 	if (!(match_criteria & (1 << MLX5_MATCH_CRITERIA_ENABLE_MISC5_BIT))) {
+@@ -10514,7 +10555,8 @@ flow_dv_tbl_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry)
+ 			tbl_data->tunnel->tunnel_id : 0,
+ 			tbl_data->group_id);
+ 	}
+-	mlx5_list_destroy(tbl_data->matchers);
++	if (tbl_data->matchers)
++		mlx5_list_destroy(tbl_data->matchers);
+ 	mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], tbl_data->idx);
+ }
+ 
+@@ -10769,7 +10811,8 @@ flow_dv_tag_resource_register
+ 				      flow_dv_tag_match_cb,
+ 				      flow_dv_tag_remove_cb,
+ 				      flow_dv_tag_clone_cb,
+-				      flow_dv_tag_clone_free_cb);
++				      flow_dv_tag_clone_free_cb,
++				      error);
+ 	if (unlikely(!tag_table))
+ 		return -rte_errno;
+ 	entry = mlx5_hlist_register(tag_table, tag_be24, &ctx);
+@@ -11074,6 +11117,7 @@ flow_dv_hrxq_prepare(struct rte_eth_dev *dev,
+ {
+ 	struct mlx5_priv *priv = dev->data->dev_private;
+ 	struct mlx5_flow_handle *dh = dev_flow->handle;
++	uint32_t shared_rss = rss_desc->shared_rss;
+ 	struct mlx5_hrxq *hrxq;
+ 
+ 	MLX5_ASSERT(rss_desc->queue_num);
+@@ -11088,6 +11132,7 @@ flow_dv_hrxq_prepare(struct rte_eth_dev *dev,
+ 		return NULL;
+ 	hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
+ 			      *hrxq_idx);
++	rss_desc->shared_rss = shared_rss;
+ 	return hrxq;
+ }
+ 
+@@ -11425,7 +11470,7 @@ flow_dv_dest_array_create_cb(void *tool_ctx __rte_unused, void *cb_ctx)
+ 			goto error;
+ 		}
+ 	}
+-	/* create a dest array actioin */
++	/* create a dest array action */
+ 	ret = mlx5_os_flow_dr_create_flow_action_dest_array
+ 						(domain,
+ 						 resource->num_of_dest,
+@@ -11660,7 +11705,7 @@ flow_dv_translate_action_sample(struct rte_eth_dev *dev,
+ 				(((const struct rte_flow_action_mark *)
+ 				(sub_actions->conf))->id);
+ 
+-			dev_flow->handle->mark = 1;
++			wks->mark = 1;
+ 			pre_rix = dev_flow->handle->dvh.rix_tag;
+ 			/* Save the mark resource before sample */
+ 			pre_r = dev_flow->dv.tag_resource;
+@@ -12820,7 +12865,7 @@ flow_dv_translate(struct rte_eth_dev *dev,
+ 			break;
+ 		case RTE_FLOW_ACTION_TYPE_FLAG:
+ 			action_flags |= MLX5_FLOW_ACTION_FLAG;
+-			dev_flow->handle->mark = 1;
++			wks->mark = 1;
+ 			if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
+ 				struct rte_flow_action_mark mark = {
+ 					.id = MLX5_FLOW_MARK_DEFAULT,
+@@ -12849,7 +12894,7 @@ flow_dv_translate(struct rte_eth_dev *dev,
+ 			break;
+ 		case RTE_FLOW_ACTION_TYPE_MARK:
+ 			action_flags |= MLX5_FLOW_ACTION_MARK;
+-			dev_flow->handle->mark = 1;
++			wks->mark = 1;
+ 			if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
+ 				const struct rte_flow_action_mark *mark =
+ 					(const struct rte_flow_action_mark *)
+@@ -13306,8 +13351,7 @@ flow_dv_translate(struct rte_eth_dev *dev,
+ 			 */
+ 			if (action_flags & MLX5_FLOW_ACTION_AGE) {
+ 				if ((non_shared_age && count) ||
+-				    !(priv->sh->flow_hit_aso_en &&
+-				      (attr->group || attr->transfer))) {
++				    !flow_hit_aso_supported(priv->sh, attr)) {
+ 					/* Creates age by counters. */
+ 					cnt_act = flow_dv_prepare_counter
+ 								(dev, dev_flow,
+@@ -13538,11 +13582,13 @@ flow_dv_translate(struct rte_eth_dev *dev,
+ 		case RTE_FLOW_ITEM_TYPE_ICMP:
+ 			flow_dv_translate_item_icmp(match_mask, match_value,
+ 						    items, tunnel);
++			matcher.priority = MLX5_PRIORITY_MAP_L4;
+ 			last_item = MLX5_FLOW_LAYER_ICMP;
+ 			break;
+ 		case RTE_FLOW_ITEM_TYPE_ICMP6:
+ 			flow_dv_translate_item_icmp6(match_mask, match_value,
+ 						      items, tunnel);
++			matcher.priority = MLX5_PRIORITY_MAP_L4;
+ 			last_item = MLX5_FLOW_LAYER_ICMP6;
+ 			break;
+ 		case RTE_FLOW_ITEM_TYPE_TAG:
+@@ -13617,12 +13663,14 @@ flow_dv_translate(struct rte_eth_dev *dev,
+ 	/*
+ 	 * When E-Switch mode is enabled, we have two cases where we need to
+ 	 * set the source port manually.
+-	 * The first one, is in case of Nic steering rule, and the second is
+-	 * E-Switch rule where no port_id item was found. In both cases
+-	 * the source port is set according the current port in use.
++	 * The first one, is in case of NIC ingress steering rule, and the
++	 * second is E-Switch rule where no port_id item was found.
++	 * In both cases the source port is set according the current port
++	 * in use.
+ 	 */
+ 	if (!(item_flags & MLX5_FLOW_ITEM_PORT_ID) &&
+-	    (priv->representor || priv->master)) {
++	    (priv->representor || priv->master) &&
++	    !(attr->egress && !attr->transfer)) {
+ 		if (flow_dv_translate_item_port_id(dev, match_mask,
+ 						   match_value, NULL, attr))
+ 			return -rte_errno;
+@@ -14508,7 +14556,7 @@ flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
+ 			int index = rte_bsf32(dev_handle->flex_item);
+ 
+ 			mlx5_flex_release_index(dev, index);
+-			dev_handle->flex_item &= ~RTE_BIT32(index);
++			dev_handle->flex_item &= ~(uint8_t)RTE_BIT32(index);
+ 		}
+ 		if (dev_handle->dvh.matcher)
+ 			flow_dv_matcher_release(dev, dev_handle);
+@@ -14607,8 +14655,8 @@ __flow_dv_action_rss_hrxqs_release(struct rte_eth_dev *dev,
+  * MLX5_RSS_HASH_IPV4_DST_ONLY are mutually exclusive so they can share
+  * same slot in mlx5_rss_hash_fields.
+  *
+- * @param[in] rss
+- *   Pointer to the shared action RSS conf.
++ * @param[in] orig_rss_types
++ *   RSS type as provided in shared RSS action.
+  * @param[in, out] hash_field
+  *   hash_field variable needed to be adjusted.
+  *
+@@ -14616,10 +14664,10 @@ __flow_dv_action_rss_hrxqs_release(struct rte_eth_dev *dev,
+  *   void
+  */
+ static void
+-__flow_dv_action_rss_l34_hash_adjust(struct mlx5_shared_action_rss *rss,
++__flow_dv_action_rss_l34_hash_adjust(uint64_t orig_rss_types,
+ 				     uint64_t *hash_field)
+ {
+-	uint64_t rss_types = rss->origin.types;
++	uint64_t rss_types = rte_eth_rss_hf_refine(orig_rss_types);
+ 
+ 	switch (*hash_field & ~IBV_RX_HASH_INNER) {
+ 	case MLX5_RSS_HASH_IPV4:
+@@ -14721,7 +14769,8 @@ __flow_dv_action_rss_setup(struct rte_eth_dev *dev,
+ 		uint64_t hash_fields = mlx5_rss_hash_fields[i];
+ 		int tunnel = 0;
+ 
+-		__flow_dv_action_rss_l34_hash_adjust(shared_rss, &hash_fields);
++		__flow_dv_action_rss_l34_hash_adjust(shared_rss->origin.types,
++						     &hash_fields);
+ 		if (shared_rss->origin.level > 1) {
+ 			hash_fields |= IBV_RX_HASH_INNER;
+ 			tunnel = 1;
+@@ -15455,7 +15504,6 @@ __flow_dv_create_domain_policy_acts(struct rte_eth_dev *dev,
+ 					  NULL,
+ 					  "cannot create policy "
+ 					  "mark action for this color");
+-				dev_flow.handle->mark = 1;
+ 				if (flow_dv_tag_resource_register(dev, tag_be,
+ 						  &dev_flow, &flow_err))
+ 					return -rte_mtr_error_set(error,
+@@ -15467,6 +15515,7 @@ __flow_dv_create_domain_policy_acts(struct rte_eth_dev *dev,
+ 				act_cnt->rix_mark =
+ 					dev_flow.handle->dvh.rix_tag;
+ 				action_flags |= MLX5_FLOW_ACTION_MARK;
++				mtr_policy->mark = 1;
+ 				break;
+ 			}
+ 			case RTE_FLOW_ACTION_TYPE_SET_TAG:
+@@ -15750,6 +15799,8 @@ __flow_dv_create_domain_policy_acts(struct rte_eth_dev *dev,
+ 				act_cnt->next_sub_policy = NULL;
+ 				mtr_policy->is_hierarchy = 1;
+ 				mtr_policy->dev = next_policy->dev;
++				if (next_policy->mark)
++					mtr_policy->mark = 1;
+ 				action_flags |=
+ 				MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY;
+ 				break;
+@@ -16880,7 +16931,9 @@ __flow_dv_meter_get_rss_sub_policy(struct rte_eth_dev *dev,
+ 	struct mlx5_meter_policy_action_container *act_cnt;
+ 	uint32_t domain = MLX5_MTR_DOMAIN_INGRESS;
+ 	uint16_t sub_policy_num;
++	struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
+ 
++	MLX5_ASSERT(wks);
+ 	rte_spinlock_lock(&mtr_policy->sl);
+ 	for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
+ 		if (!rss_desc[i])
+@@ -16914,7 +16967,8 @@ __flow_dv_meter_get_rss_sub_policy(struct rte_eth_dev *dev,
+ 		}
+ 	}
+ 	/* Create sub policy. */
+-	if (!mtr_policy->sub_policys[domain][0]->rix_hrxq[0]) {
++	if (!mtr_policy->sub_policys[domain][0]->rix_hrxq[RTE_COLOR_GREEN] &&
++	    !mtr_policy->sub_policys[domain][0]->rix_hrxq[RTE_COLOR_YELLOW]) {
+ 		/* Reuse the first pre-allocated sub_policy. */
+ 		sub_policy = mtr_policy->sub_policys[domain][0];
+ 		sub_policy_idx = sub_policy->idx;
+@@ -16954,7 +17008,7 @@ __flow_dv_meter_get_rss_sub_policy(struct rte_eth_dev *dev,
+ 			if (act_cnt->rix_mark || act_cnt->modify_hdr) {
+ 				memset(&dh, 0, sizeof(struct mlx5_flow_handle));
+ 				if (act_cnt->rix_mark)
+-					dh.mark = 1;
++					wks->mark = 1;
+ 				dh.fate_action = MLX5_FLOW_FATE_QUEUE;
+ 				dh.rix_hrxq = hrxq_idx[i];
+ 				flow_drv_rxq_flags_set(dev, &dh);
+@@ -17635,7 +17689,7 @@ flow_dv_action_validate(struct rte_eth_dev *dev,
+ 						"Indirect age action not supported");
+ 		return flow_dv_validate_action_age(0, action, dev, err);
+ 	case RTE_FLOW_ACTION_TYPE_COUNT:
+-		return flow_dv_validate_action_count(dev, true, 0, err);
++		return flow_dv_validate_action_count(dev, true, 0, NULL, err);
+ 	case RTE_FLOW_ACTION_TYPE_CONNTRACK:
+ 		if (!priv->sh->ct_aso_en)
+ 			return rte_flow_error_set(err, ENOTSUP,
+@@ -18291,4 +18345,3 @@ const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = {
+ };
+ 
+ #endif /* HAVE_IBV_FLOW_DV_SUPPORT */
+-
+diff --git a/dpdk/drivers/net/mlx5/mlx5_flow_flex.c b/dpdk/drivers/net/mlx5/mlx5_flow_flex.c
+index 64867dc9e2..3ef46db1f6 100644
+--- a/dpdk/drivers/net/mlx5/mlx5_flow_flex.c
++++ b/dpdk/drivers/net/mlx5/mlx5_flow_flex.c
+@@ -205,7 +205,7 @@ mlx5_flex_set_match_sample(void *misc4_m, void *misc4_v,
+  * @param dev
+  *   Ethernet device to translate flex item on.
+  * @param[in, out] matcher
+- *   Flow matcher to confgiure
++ *   Flow matcher to configure
+  * @param[in, out] key
+  *   Flow matcher value.
+  * @param[in] item
+@@ -382,15 +382,11 @@ mlx5_flex_translate_length(struct mlx5_hca_flex_attr *attr,
+ 			return rte_flow_error_set
+ 				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+ 				 "unsupported header length field mode (FIXED)");
+-		if (attr->header_length_mask_width < field->field_size)
+-			return rte_flow_error_set
+-				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+-				 "header length field width exceeds limit");
+-		if (field->offset_shift < 0 ||
+-		    field->offset_shift > attr->header_length_mask_width)
++		if (field->field_size ||
++		    field->offset_mask || field->offset_shift)
+ 			return rte_flow_error_set
+ 				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+-				 "invalid header length field shift (FIXED");
++				 "invalid fields for fixed mode");
+ 		if (field->field_base < 0)
+ 			return rte_flow_error_set
+ 				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+@@ -457,7 +453,7 @@ mlx5_flex_translate_length(struct mlx5_hca_flex_attr *attr,
+ 		if (field->offset_shift > 15 || field->offset_shift < 0)
+ 			return rte_flow_error_set
+ 				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+-				 "header length field shift exceeeds limit");
++				 "header length field shift exceeds limit");
+ 		node->header_length_field_shift	= field->offset_shift;
+ 		node->header_length_field_offset = field->offset_base;
+ 	}
+diff --git a/dpdk/drivers/net/mlx5/mlx5_flow_meter.c b/dpdk/drivers/net/mlx5/mlx5_flow_meter.c
+index f4a7b697e6..a58e30dc83 100644
+--- a/dpdk/drivers/net/mlx5/mlx5_flow_meter.c
++++ b/dpdk/drivers/net/mlx5/mlx5_flow_meter.c
+@@ -15,6 +15,9 @@
+ #include "mlx5.h"
+ #include "mlx5_flow.h"
+ 
++static int mlx5_flow_meter_disable(struct rte_eth_dev *dev,
++		uint32_t meter_id, struct rte_mtr_error *error);
++
+ /**
+  * Create the meter action.
+  *
+@@ -251,7 +254,7 @@ mlx5_flow_meter_xir_man_exp_calc(int64_t xir, uint8_t *man, uint8_t *exp)
+ 	uint8_t _exp = 0;
+ 	uint64_t m, e;
+ 
+-	/* Special case xir == 0 ? both exp and matissa are 0. */
++	/* Special case xir == 0 ? both exp and mantissa are 0. */
+ 	if (xir == 0) {
+ 		*man = 0;
+ 		*exp = 0;
+@@ -287,7 +290,7 @@ mlx5_flow_meter_xbs_man_exp_calc(uint64_t xbs, uint8_t *man, uint8_t *exp)
+ 	int _exp;
+ 	double _man;
+ 
+-	/* Special case xbs == 0 ? both exp and matissa are 0. */
++	/* Special case xbs == 0 ? both exp and mantissa are 0. */
+ 	if (xbs == 0) {
+ 		*man = 0;
+ 		*exp = 0;
+@@ -295,8 +298,10 @@ mlx5_flow_meter_xbs_man_exp_calc(uint64_t xbs, uint8_t *man, uint8_t *exp)
+ 	}
+ 	/* xbs = xbs_mantissa * 2^xbs_exponent */
+ 	_man = frexp(xbs, &_exp);
+-	_man = _man * pow(2, MLX5_MAN_WIDTH);
+-	_exp = _exp - MLX5_MAN_WIDTH;
++	if (_exp >= MLX5_MAN_WIDTH) {
++		_man = _man * pow(2, MLX5_MAN_WIDTH);
++		_exp = _exp - MLX5_MAN_WIDTH;
++	}
+ 	*man = (uint8_t)ceil(_man);
+ 	*exp = _exp;
+ }
+@@ -305,7 +310,7 @@ mlx5_flow_meter_xbs_man_exp_calc(uint64_t xbs, uint8_t *man, uint8_t *exp)
+  * Fill the prm meter parameter.
+  *
+  * @param[in,out] fmp
+- *   Pointer to meter profie to be converted.
++ *   Pointer to meter profile to be converted.
+  * @param[out] error
+  *   Pointer to the error structure.
+  *
+@@ -437,11 +442,14 @@ mlx5_flow_mtr_cap_get(struct rte_eth_dev *dev,
+ 		/* 2 meters per one ASO cache line. */
+ 		cap->n_max = 1 << (qattr->log_max_num_meter_aso + 1);
+ 		cap->srtcm_rfc2697_packet_mode_supported = 1;
++		cap->trtcm_rfc2698_packet_mode_supported = 1;
++		cap->trtcm_rfc4115_packet_mode_supported = 1;
+ 	} else {
+ 		cap->n_max = 1 << qattr->log_max_flow_meter;
+-		cap->srtcm_rfc2697_packet_mode_supported = 0;
+ 	}
+ 	cap->srtcm_rfc2697_byte_mode_supported = 1;
++	cap->trtcm_rfc2698_byte_mode_supported = 1;
++	cap->trtcm_rfc4115_byte_mode_supported = 1;
+ 	cap->n_shared_max = cap->n_max;
+ 	cap->identical = 1;
+ 	cap->shared_identical = 1;
+@@ -449,7 +457,10 @@ mlx5_flow_mtr_cap_get(struct rte_eth_dev *dev,
+ 	/* 2M flows can share the same meter. */
+ 	cap->chaining_n_mtrs_per_flow_max = 1; /* Chaining is not supported. */
+ 	cap->meter_srtcm_rfc2697_n_max = qattr->flow_meter_old ? cap->n_max : 0;
++	cap->meter_trtcm_rfc2698_n_max = qattr->flow_meter_old ? cap->n_max : 0;
++	cap->meter_trtcm_rfc4115_n_max = qattr->flow_meter_old ? cap->n_max : 0;
+ 	cap->meter_rate_max = 1ULL << 40; /* 1 Tera tokens per sec. */
++	cap->meter_policy_n_max = cap->n_max;
+ 	cap->stats_mask = RTE_MTR_STATS_N_BYTES_DROPPED |
+ 			  RTE_MTR_STATS_N_PKTS_DROPPED;
+ 	return 0;
+@@ -1101,7 +1112,7 @@ mlx5_flow_meter_action_modify(struct mlx5_priv *priv,
+ 			if (ret)
+ 				return ret;
+ 		}
+-		/* Update succeedded modify meter parameters. */
++		/* Update succeeded modify meter parameters. */
+ 		if (modify_bits & MLX5_FLOW_METER_OBJ_MODIFY_FIELD_ACTIVE)
+ 			fm->active_state = !!active_state;
+ 	}
+@@ -1167,7 +1178,8 @@ mlx5_flow_meter_create(struct rte_eth_dev *dev, uint32_t meter_id,
+ 	struct mlx5_legacy_flow_meters *fms = &priv->flow_meters;
+ 	struct mlx5_flow_meter_profile *fmp;
+ 	struct mlx5_flow_meter_info *fm;
+-	struct mlx5_legacy_flow_meter *legacy_fm;
++	/* GCC fails to infer legacy_fm is set when !priv->sh->meter_aso_en. */
++	struct mlx5_legacy_flow_meter *legacy_fm = NULL;
+ 	struct mlx5_flow_meter_policy *mtr_policy = NULL;
+ 	struct mlx5_indexed_pool_config flow_ipool_cfg = {
+ 		.size = 0,
+@@ -1273,11 +1285,13 @@ mlx5_flow_meter_create(struct rte_eth_dev *dev, uint32_t meter_id,
+ 	if (mlx5_flow_create_mtr_tbls(dev, fm, mtr_idx, domain_bitmap))
+ 		goto error;
+ 	/* Add to the flow meter list. */
+-	if (!priv->sh->meter_aso_en)
++	if (!priv->sh->meter_aso_en) {
++		MLX5_ASSERT(legacy_fm != NULL);
+ 		TAILQ_INSERT_TAIL(fms, legacy_fm, next);
++	}
+ 	/* Add to the flow meter list. */
+ 	fm->active_state = 1; /* Config meter starts as active. */
+-	fm->is_enable = 1;
++	fm->is_enable = params->meter_enable;
+ 	fm->shared = !!shared;
+ 	__atomic_add_fetch(&fm->profile->ref_cnt, 1, __ATOMIC_RELAXED);
+ 	if (params->meter_policy_id == priv->sh->mtrmng->def_policy_id) {
+@@ -1302,7 +1316,10 @@ mlx5_flow_meter_create(struct rte_eth_dev *dev, uint32_t meter_id,
+ 		data.dword = mtr_idx;
+ 		if (mlx5_l3t_set_entry(priv->mtr_idx_tbl, meter_id, &data))
+ 			goto error;
++	} else if (!params->meter_enable && mlx5_flow_meter_disable(dev, meter_id, error)) {
++		goto error;
+ 	}
++	fm->active_state = params->meter_enable;
+ 	if (mtr_policy)
+ 		__atomic_add_fetch(&mtr_policy->ref_cnt, 1, __ATOMIC_RELAXED);
+ 	return 0;
+@@ -1615,7 +1632,7 @@ mlx5_flow_meter_profile_update(struct rte_eth_dev *dev,
+ 		return -rte_mtr_error_set(error, -ret,
+ 					  RTE_MTR_ERROR_TYPE_MTR_PARAMS,
+ 					  NULL, "Failed to update meter"
+-					  " parmeters in hardware.");
++					  " parameters in hardware.");
+ 	}
+ 	old_fmp->ref_cnt--;
+ 	fmp->ref_cnt++;
+diff --git a/dpdk/drivers/net/mlx5/mlx5_flow_verbs.c b/dpdk/drivers/net/mlx5/mlx5_flow_verbs.c
+index 29cd694752..165786f864 100644
+--- a/dpdk/drivers/net/mlx5/mlx5_flow_verbs.c
++++ b/dpdk/drivers/net/mlx5/mlx5_flow_verbs.c
+@@ -882,13 +882,48 @@ flow_verbs_item_gre_ip_protocol_update(struct ibv_flow_attr *attr,
+ 	}
+ }
+ 
++/**
++ * Reserve space for GRE spec in spec buffer.
++ *
++ * @param[in,out] dev_flow
++ *   Pointer to dev_flow structure.
++ *
++ * @return
++ *   Pointer to reserved space in spec buffer.
++ */
++static uint8_t *
++flow_verbs_reserve_gre(struct mlx5_flow *dev_flow)
++{
++	uint8_t *buffer;
++	struct mlx5_flow_verbs_workspace *verbs = &dev_flow->verbs;
++#ifndef HAVE_IBV_DEVICE_MPLS_SUPPORT
++	unsigned int size = sizeof(struct ibv_flow_spec_tunnel);
++	struct ibv_flow_spec_tunnel tunnel = {
++		.type = IBV_FLOW_SPEC_VXLAN_TUNNEL,
++		.size = size,
++	};
++#else
++	unsigned int size = sizeof(struct ibv_flow_spec_gre);
++	struct ibv_flow_spec_gre tunnel = {
++		.type = IBV_FLOW_SPEC_GRE,
++		.size = size,
++	};
++#endif
++
++	buffer = verbs->specs + verbs->size;
++	flow_verbs_spec_add(verbs, &tunnel, size);
++	return buffer;
++}
++
+ /**
+  * Convert the @p item into a Verbs specification. This function assumes that
+- * the input is valid and that there is space to insert the requested item
+- * into the flow.
++ * the input is valid and that Verbs specification will be placed in
++ * the pre-reserved space.
+  *
+  * @param[in, out] dev_flow
+  *   Pointer to dev_flow structure.
++ * @param[in, out] gre_spec
++ *   Pointer to space reserved for GRE spec.
+  * @param[in] item
+  *   Item specification.
+  * @param[in] item_flags
+@@ -896,6 +931,7 @@ flow_verbs_item_gre_ip_protocol_update(struct ibv_flow_attr *attr,
+  */
+ static void
+ flow_verbs_translate_item_gre(struct mlx5_flow *dev_flow,
++			      uint8_t *gre_spec,
+ 			      const struct rte_flow_item *item __rte_unused,
+ 			      uint64_t item_flags)
+ {
+@@ -907,6 +943,7 @@ flow_verbs_translate_item_gre(struct mlx5_flow *dev_flow,
+ 		.size = size,
+ 	};
+ #else
++	static const struct rte_flow_item_gre empty_gre = {0,};
+ 	const struct rte_flow_item_gre *spec = item->spec;
+ 	const struct rte_flow_item_gre *mask = item->mask;
+ 	unsigned int size = sizeof(struct ibv_flow_spec_gre);
+@@ -915,17 +952,29 @@ flow_verbs_translate_item_gre(struct mlx5_flow *dev_flow,
+ 		.size = size,
+ 	};
+ 
+-	if (!mask)
+-		mask = &rte_flow_item_gre_mask;
+-	if (spec) {
+-		tunnel.val.c_ks_res0_ver = spec->c_rsvd0_ver;
+-		tunnel.val.protocol = spec->protocol;
+-		tunnel.mask.c_ks_res0_ver = mask->c_rsvd0_ver;
+-		tunnel.mask.protocol = mask->protocol;
+-		/* Remove unwanted bits from values. */
+-		tunnel.val.c_ks_res0_ver &= tunnel.mask.c_ks_res0_ver;
++	if (!spec) {
++		spec = &empty_gre;
++		mask = &empty_gre;
++	} else {
++		if (!mask)
++			mask = &rte_flow_item_gre_mask;
++	}
++	tunnel.val.c_ks_res0_ver = spec->c_rsvd0_ver;
++	tunnel.val.protocol = spec->protocol;
++	tunnel.mask.c_ks_res0_ver = mask->c_rsvd0_ver;
++	tunnel.mask.protocol = mask->protocol;
++	/* Remove unwanted bits from values. */
++	tunnel.val.c_ks_res0_ver &= tunnel.mask.c_ks_res0_ver;
++	tunnel.val.key &= tunnel.mask.key;
++	if (tunnel.mask.protocol) {
+ 		tunnel.val.protocol &= tunnel.mask.protocol;
+-		tunnel.val.key &= tunnel.mask.key;
++	} else {
++		tunnel.val.protocol = mlx5_translate_tunnel_etypes(item_flags);
++		if (tunnel.val.protocol) {
++			tunnel.mask.protocol = 0xFFFF;
++			tunnel.val.protocol =
++				rte_cpu_to_be_16(tunnel.val.protocol);
++		}
+ 	}
+ #endif
+ 	if (item_flags & MLX5_FLOW_LAYER_OUTER_L3_IPV4)
+@@ -936,7 +985,8 @@ flow_verbs_translate_item_gre(struct mlx5_flow *dev_flow,
+ 		flow_verbs_item_gre_ip_protocol_update(&verbs->attr,
+ 						       IBV_FLOW_SPEC_IPV6,
+ 						       IPPROTO_GRE);
+-	flow_verbs_spec_add(verbs, &tunnel, size);
++	MLX5_ASSERT(gre_spec);
++	memcpy(gre_spec, &tunnel, size);
+ }
+ 
+ /**
+@@ -1666,6 +1716,8 @@ flow_verbs_translate(struct rte_eth_dev *dev,
+ 	struct mlx5_priv *priv = dev->data->dev_private;
+ 	struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
+ 	struct mlx5_flow_rss_desc *rss_desc;
++	const struct rte_flow_item *tunnel_item = NULL;
++	uint8_t *gre_spec = NULL;
+ 
+ 	MLX5_ASSERT(wks);
+ 	rss_desc = &wks->rss_desc;
+@@ -1680,12 +1732,12 @@ flow_verbs_translate(struct rte_eth_dev *dev,
+ 		case RTE_FLOW_ACTION_TYPE_FLAG:
+ 			flow_verbs_translate_action_flag(dev_flow, actions);
+ 			action_flags |= MLX5_FLOW_ACTION_FLAG;
+-			dev_flow->handle->mark = 1;
++			wks->mark = 1;
+ 			break;
+ 		case RTE_FLOW_ACTION_TYPE_MARK:
+ 			flow_verbs_translate_action_mark(dev_flow, actions);
+ 			action_flags |= MLX5_FLOW_ACTION_MARK;
+-			dev_flow->handle->mark = 1;
++			wks->mark = 1;
+ 			break;
+ 		case RTE_FLOW_ACTION_TYPE_DROP:
+ 			flow_verbs_translate_action_drop(dev_flow, actions);
+@@ -1803,10 +1855,10 @@ flow_verbs_translate(struct rte_eth_dev *dev,
+ 			item_flags |= MLX5_FLOW_LAYER_VXLAN_GPE;
+ 			break;
+ 		case RTE_FLOW_ITEM_TYPE_GRE:
+-			flow_verbs_translate_item_gre(dev_flow, items,
+-						      item_flags);
++			gre_spec = flow_verbs_reserve_gre(dev_flow);
+ 			subpriority = MLX5_TUNNEL_PRIO_GET(rss_desc);
+ 			item_flags |= MLX5_FLOW_LAYER_GRE;
++			tunnel_item = items;
+ 			break;
+ 		case RTE_FLOW_ITEM_TYPE_MPLS:
+ 			flow_verbs_translate_item_mpls(dev_flow, items,
+@@ -1820,6 +1872,9 @@ flow_verbs_translate(struct rte_eth_dev *dev,
+ 						  NULL, "item not supported");
+ 		}
+ 	}
++	if (item_flags & MLX5_FLOW_LAYER_GRE)
++		flow_verbs_translate_item_gre(dev_flow, gre_spec,
++					      tunnel_item, item_flags);
+ 	dev_flow->handle->layers = item_flags;
+ 	/* Other members of attr will be ignored. */
+ 	dev_flow->verbs.attr.priority =
+diff --git a/dpdk/drivers/net/mlx5/mlx5_rx.c b/dpdk/drivers/net/mlx5/mlx5_rx.c
+index e8215f7381..9fcd039c22 100644
+--- a/dpdk/drivers/net/mlx5/mlx5_rx.c
++++ b/dpdk/drivers/net/mlx5/mlx5_rx.c
+@@ -73,7 +73,7 @@ rx_queue_count(struct mlx5_rxq_data *rxq)
+ 	const unsigned int cqe_n = (1 << rxq->cqe_n);
+ 	const unsigned int sges_n = (1 << rxq->sges_n);
+ 	const unsigned int elts_n = (1 << rxq->elts_n);
+-	const unsigned int strd_n = (1 << rxq->strd_num_n);
++	const unsigned int strd_n = RTE_BIT32(rxq->log_strd_num);
+ 	const unsigned int cqe_cnt = cqe_n - 1;
+ 	unsigned int cq_ci, used;
+ 
+@@ -167,8 +167,8 @@ mlx5_rxq_info_get(struct rte_eth_dev *dev, uint16_t rx_queue_id,
+ 	qinfo->conf.offloads = dev->data->dev_conf.rxmode.offloads;
+ 	qinfo->scattered_rx = dev->data->scattered_rx;
+ 	qinfo->nb_desc = mlx5_rxq_mprq_enabled(rxq) ?
+-		(1 << rxq->elts_n) * (1 << rxq->strd_num_n) :
+-		(1 << rxq->elts_n);
++		RTE_BIT32(rxq->elts_n) * RTE_BIT32(rxq->log_strd_num) :
++		RTE_BIT32(rxq->elts_n);
+ }
+ 
+ /**
+@@ -178,7 +178,7 @@ mlx5_rxq_info_get(struct rte_eth_dev *dev, uint16_t rx_queue_id,
+  *   Pointer to the device structure.
+  *
+  * @param rx_queue_id
+- *   Rx queue identificatior.
++ *   Rx queue identification.
+  *
+  * @param mode
+  *   Pointer to the burts mode information.
+@@ -354,10 +354,10 @@ mlx5_rxq_initialize(struct mlx5_rxq_data *rxq)
+ 
+ 			scat = &((volatile struct mlx5_wqe_mprq *)
+ 				rxq->wqes)[i].dseg;
+-			addr = (uintptr_t)mlx5_mprq_buf_addr(buf,
+-							 1 << rxq->strd_num_n);
+-			byte_count = (1 << rxq->strd_sz_n) *
+-					(1 << rxq->strd_num_n);
++			addr = (uintptr_t)mlx5_mprq_buf_addr
++					(buf, RTE_BIT32(rxq->log_strd_num));
++			byte_count = RTE_BIT32(rxq->log_strd_sz) *
++				     RTE_BIT32(rxq->log_strd_num);
+ 			lkey = mlx5_rx_addr2mr(rxq, addr);
+ 		} else {
+ 			struct rte_mbuf *buf = (*rxq->elts)[i];
+@@ -383,13 +383,18 @@ mlx5_rxq_initialize(struct mlx5_rxq_data *rxq)
+ 		.ai = 0,
+ 	};
+ 	rxq->elts_ci = mlx5_rxq_mprq_enabled(rxq) ?
+-		(wqe_n >> rxq->sges_n) * (1 << rxq->strd_num_n) : 0;
++		(wqe_n >> rxq->sges_n) * RTE_BIT32(rxq->log_strd_num) : 0;
+ 	/* Update doorbell counter. */
+ 	rxq->rq_ci = wqe_n >> rxq->sges_n;
+ 	rte_io_wmb();
+ 	*rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci);
+ }
+ 
++/* Must be negative. */
++#define MLX5_ERROR_CQE_RET (-1)
++/* Must not be negative. */
++#define MLX5_RECOVERY_ERROR_RET 0
++
+ /**
+  * Handle a Rx error.
+  * The function inserts the RQ state to reset when the first error CQE is
+@@ -404,7 +409,7 @@ mlx5_rxq_initialize(struct mlx5_rxq_data *rxq)
+  *   0 when called from non-vectorized Rx burst.
+  *
+  * @return
+- *   -1 in case of recovery error, otherwise the CQE status.
++ *   MLX5_RECOVERY_ERROR_RET in case of recovery error, otherwise the CQE status.
+  */
+ int
+ mlx5_rx_err_handle(struct mlx5_rxq_data *rxq, uint8_t vec)
+@@ -412,7 +417,7 @@ mlx5_rx_err_handle(struct mlx5_rxq_data *rxq, uint8_t vec)
+ 	const uint16_t cqe_n = 1 << rxq->cqe_n;
+ 	const uint16_t cqe_mask = cqe_n - 1;
+ 	const uint16_t wqe_n = 1 << rxq->elts_n;
+-	const uint16_t strd_n = 1 << rxq->strd_num_n;
++	const uint16_t strd_n = RTE_BIT32(rxq->log_strd_num);
+ 	struct mlx5_rxq_ctrl *rxq_ctrl =
+ 			container_of(rxq, struct mlx5_rxq_ctrl, rxq);
+ 	union {
+@@ -433,7 +438,7 @@ mlx5_rx_err_handle(struct mlx5_rxq_data *rxq, uint8_t vec)
+ 		sm.queue_id = rxq->idx;
+ 		sm.state = IBV_WQS_RESET;
+ 		if (mlx5_queue_state_modify(RXQ_DEV(rxq_ctrl), &sm))
+-			return -1;
++			return MLX5_RECOVERY_ERROR_RET;
+ 		if (rxq_ctrl->dump_file_n <
+ 		    RXQ_PORT(rxq_ctrl)->config.max_dump_files_num) {
+ 			MKSTR(err_str, "Unexpected CQE error syndrome "
+@@ -473,7 +478,7 @@ mlx5_rx_err_handle(struct mlx5_rxq_data *rxq, uint8_t vec)
+ 			sm.queue_id = rxq->idx;
+ 			sm.state = IBV_WQS_RDY;
+ 			if (mlx5_queue_state_modify(RXQ_DEV(rxq_ctrl), &sm))
+-				return -1;
++				return MLX5_RECOVERY_ERROR_RET;
+ 			if (vec) {
+ 				const uint32_t elts_n =
+ 					mlx5_rxq_mprq_enabled(rxq) ?
+@@ -501,7 +506,7 @@ mlx5_rx_err_handle(struct mlx5_rxq_data *rxq, uint8_t vec)
+ 							rte_pktmbuf_free_seg
+ 								(*elt);
+ 						}
+-						return -1;
++						return MLX5_RECOVERY_ERROR_RET;
+ 					}
+ 				}
+ 				for (i = 0; i < (int)elts_n; ++i) {
+@@ -520,7 +525,7 @@ mlx5_rx_err_handle(struct mlx5_rxq_data *rxq, uint8_t vec)
+ 		}
+ 		return ret;
+ 	default:
+-		return -1;
++		return MLX5_RECOVERY_ERROR_RET;
+ 	}
+ }
+ 
+@@ -538,7 +543,9 @@ mlx5_rx_err_handle(struct mlx5_rxq_data *rxq, uint8_t vec)
+  *   written.
+  *
+  * @return
+- *   0 in case of empty CQE, otherwise the packet size in bytes.
++ *   0 in case of empty CQE, MLX5_ERROR_CQE_RET in case of error CQE,
++ *   otherwise the packet size in regular RxQ, and striding byte
++ *   count format in mprq case.
+  */
+ static inline int
+ mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe,
+@@ -605,8 +612,8 @@ mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe,
+ 					     rxq->err_state)) {
+ 					ret = mlx5_rx_err_handle(rxq, 0);
+ 					if (ret == MLX5_CQE_STATUS_HW_OWN ||
+-					    ret == -1)
+-						return 0;
++					    ret == MLX5_RECOVERY_ERROR_RET)
++						return MLX5_ERROR_CQE_RET;
+ 				} else {
+ 					return 0;
+ 				}
+@@ -851,8 +858,10 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
+ 		if (!pkt) {
+ 			cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_cnt];
+ 			len = mlx5_rx_poll_len(rxq, cqe, cqe_cnt, &mcqe);
+-			if (!len) {
++			if (len <= 0) {
+ 				rte_mbuf_raw_free(rep);
++				if (unlikely(len == MLX5_ERROR_CQE_RET))
++					rq_ci = rxq->rq_ci << sges_n;
+ 				break;
+ 			}
+ 			pkt = seg;
+@@ -1045,8 +1054,8 @@ uint16_t
+ mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
+ {
+ 	struct mlx5_rxq_data *rxq = dpdk_rxq;
+-	const uint32_t strd_n = 1 << rxq->strd_num_n;
+-	const uint32_t strd_sz = 1 << rxq->strd_sz_n;
++	const uint32_t strd_n = RTE_BIT32(rxq->log_strd_num);
++	const uint32_t strd_sz = RTE_BIT32(rxq->log_strd_sz);
+ 	const uint32_t cq_mask = (1 << rxq->cqe_n) - 1;
+ 	const uint32_t wq_mask = (1 << rxq->elts_n) - 1;
+ 	volatile struct mlx5_cqe *cqe = &(*rxq->cqes)[rxq->cq_ci & cq_mask];
+@@ -1075,8 +1084,13 @@ mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
+ 		}
+ 		cqe = &(*rxq->cqes)[rxq->cq_ci & cq_mask];
+ 		ret = mlx5_rx_poll_len(rxq, cqe, cq_mask, &mcqe);
+-		if (!ret)
++		if (ret == 0)
++			break;
++		if (unlikely(ret == MLX5_ERROR_CQE_RET)) {
++			rq_ci = rxq->rq_ci;
++			consumed_strd = rxq->consumed_strd;
+ 			break;
++		}
+ 		byte_cnt = ret;
+ 		len = (byte_cnt & MLX5_MPRQ_LEN_MASK) >> MLX5_MPRQ_LEN_SHIFT;
+ 		MLX5_ASSERT((int)len >= (rxq->crc_present << 2));
+diff --git a/dpdk/drivers/net/mlx5/mlx5_rx.h b/dpdk/drivers/net/mlx5/mlx5_rx.h
+index f808bf288f..423d80e4a7 100644
+--- a/dpdk/drivers/net/mlx5/mlx5_rx.h
++++ b/dpdk/drivers/net/mlx5/mlx5_rx.h
+@@ -88,8 +88,8 @@ struct mlx5_rxq_data {
+ 	unsigned int elts_n:4; /* Log 2 of Mbufs. */
+ 	unsigned int rss_hash:1; /* RSS hash result is enabled. */
+ 	unsigned int mark:1; /* Marked flow available on the queue. */
+-	unsigned int strd_num_n:5; /* Log 2 of the number of stride. */
+-	unsigned int strd_sz_n:4; /* Log 2 of stride size. */
++	unsigned int log_strd_num:5; /* Log 2 of the number of stride. */
++	unsigned int log_strd_sz:4; /* Log 2 of stride size. */
+ 	unsigned int strd_shift_en:1; /* Enable 2bytes shift on a stride. */
+ 	unsigned int err_state:2; /* enum mlx5_rxq_err_state. */
+ 	unsigned int strd_scatter_en:1; /* Scattered packets from a stride. */
+@@ -125,6 +125,7 @@ struct mlx5_rxq_data {
+ 	struct mlx5_dev_ctx_shared *sh; /* Shared context. */
+ 	uint16_t idx; /* Queue index. */
+ 	struct mlx5_rxq_stats stats;
++	struct mlx5_rxq_stats stats_reset; /* stats on last reset. */
+ 	rte_xmm_t mbuf_initializer; /* Default rearm/flags for vectorized Rx. */
+ 	struct rte_mbuf fake_mbuf; /* elts padding for vectorized Rx. */
+ 	struct mlx5_uar_data uar_data; /* CQ doorbell. */
+@@ -161,7 +162,6 @@ struct mlx5_rxq_ctrl {
+ 	uint16_t share_qid; /* Shared RxQ ID in group. */
+ 	unsigned int started:1; /* Whether (shared) RXQ has been started. */
+ 	unsigned int irq:1; /* Whether IRQ is enabled. */
+-	uint32_t flow_mark_n; /* Number of Mark/Flag flows using this Queue. */
+ 	uint32_t flow_tunnels_n[MLX5_FLOW_TUNNEL]; /* Tunnels counters. */
+ 	uint32_t wqn; /* WQ number. */
+ 	uint32_t rxseg_n; /* Number of split segment descriptions. */
+@@ -401,7 +401,7 @@ mlx5_timestamp_set(struct rte_mbuf *mbuf, int offset,
+ static __rte_always_inline void
+ mprq_buf_replace(struct mlx5_rxq_data *rxq, uint16_t rq_idx)
+ {
+-	const uint32_t strd_n = 1 << rxq->strd_num_n;
++	const uint32_t strd_n = RTE_BIT32(rxq->log_strd_num);
+ 	struct mlx5_mprq_buf *rep = rxq->mprq_repl;
+ 	volatile struct mlx5_wqe_data_seg *wqe =
+ 		&((volatile struct mlx5_wqe_mprq *)rxq->wqes)[rq_idx].dseg;
+@@ -459,8 +459,8 @@ static __rte_always_inline enum mlx5_rqx_code
+ mprq_buf_to_pkt(struct mlx5_rxq_data *rxq, struct rte_mbuf *pkt, uint32_t len,
+ 		struct mlx5_mprq_buf *buf, uint16_t strd_idx, uint16_t strd_cnt)
+ {
+-	const uint32_t strd_n = 1 << rxq->strd_num_n;
+-	const uint16_t strd_sz = 1 << rxq->strd_sz_n;
++	const uint32_t strd_n = RTE_BIT32(rxq->log_strd_num);
++	const uint16_t strd_sz = RTE_BIT32(rxq->log_strd_sz);
+ 	const uint16_t strd_shift =
+ 		MLX5_MPRQ_STRIDE_SHIFT_BYTE * rxq->strd_shift_en;
+ 	const int32_t hdrm_overlap =
+@@ -543,7 +543,7 @@ mprq_buf_to_pkt(struct mlx5_rxq_data *rxq, struct rte_mbuf *pkt, uint32_t len,
+ 					  buf_len, shinfo);
+ 		/* Set mbuf head-room. */
+ 		SET_DATA_OFF(pkt, RTE_PKTMBUF_HEADROOM);
+-		MLX5_ASSERT(pkt->ol_flags == RTE_MBUF_F_EXTERNAL);
++		MLX5_ASSERT(pkt->ol_flags & RTE_MBUF_F_EXTERNAL);
+ 		MLX5_ASSERT(rte_pktmbuf_tailroom(pkt) >=
+ 			len - (hdrm_overlap > 0 ? hdrm_overlap : 0));
+ 		DATA_LEN(pkt) = len;
+@@ -605,7 +605,7 @@ mlx5_check_mprq_support(struct rte_eth_dev *dev)
+ static __rte_always_inline int
+ mlx5_rxq_mprq_enabled(struct mlx5_rxq_data *rxq)
+ {
+-	return rxq->strd_num_n > 0;
++	return rxq->log_strd_num > 0;
+ }
+ 
+ /**
+diff --git a/dpdk/drivers/net/mlx5/mlx5_rxq.c b/dpdk/drivers/net/mlx5/mlx5_rxq.c
+index f77d42dedf..807aaf2fc9 100644
+--- a/dpdk/drivers/net/mlx5/mlx5_rxq.c
++++ b/dpdk/drivers/net/mlx5/mlx5_rxq.c
+@@ -67,7 +67,7 @@ mlx5_rxq_cqe_num(struct mlx5_rxq_data *rxq_data)
+ 	unsigned int wqe_n = 1 << rxq_data->elts_n;
+ 
+ 	if (mlx5_rxq_mprq_enabled(rxq_data))
+-		cqe_n = wqe_n * (1 << rxq_data->strd_num_n) - 1;
++		cqe_n = wqe_n * RTE_BIT32(rxq_data->log_strd_num) - 1;
+ 	else
+ 		cqe_n = wqe_n - 1;
+ 	return cqe_n;
+@@ -137,8 +137,10 @@ rxq_alloc_elts_sprq(struct mlx5_rxq_ctrl *rxq_ctrl)
+ {
+ 	const unsigned int sges_n = 1 << rxq_ctrl->rxq.sges_n;
+ 	unsigned int elts_n = mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq) ?
+-		(1 << rxq_ctrl->rxq.elts_n) * (1 << rxq_ctrl->rxq.strd_num_n) :
+-		(1 << rxq_ctrl->rxq.elts_n);
++			      RTE_BIT32(rxq_ctrl->rxq.elts_n) *
++			      RTE_BIT32(rxq_ctrl->rxq.log_strd_num) :
++			      RTE_BIT32(rxq_ctrl->rxq.elts_n);
++	bool has_vec_support = mlx5_rxq_check_vec_support(&rxq_ctrl->rxq) > 0;
+ 	unsigned int i;
+ 	int err;
+ 
+@@ -160,8 +162,9 @@ rxq_alloc_elts_sprq(struct mlx5_rxq_ctrl *rxq_ctrl)
+ 			rte_errno = ENOMEM;
+ 			goto error;
+ 		}
+-		/* Headroom is reserved by rte_pktmbuf_alloc(). */
+-		MLX5_ASSERT(DATA_OFF(buf) == RTE_PKTMBUF_HEADROOM);
++		/* Only vectored Rx routines rely on headroom size. */
++		MLX5_ASSERT(!has_vec_support ||
++			    DATA_OFF(buf) >= RTE_PKTMBUF_HEADROOM);
+ 		/* Buffer is supposed to be empty. */
+ 		MLX5_ASSERT(rte_pktmbuf_data_len(buf) == 0);
+ 		MLX5_ASSERT(rte_pktmbuf_pkt_len(buf) == 0);
+@@ -174,7 +177,7 @@ rxq_alloc_elts_sprq(struct mlx5_rxq_ctrl *rxq_ctrl)
+ 		(*rxq_ctrl->rxq.elts)[i] = buf;
+ 	}
+ 	/* If Rx vector is activated. */
+-	if (mlx5_rxq_check_vec_support(&rxq_ctrl->rxq) > 0) {
++	if (has_vec_support) {
+ 		struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
+ 		struct rte_mbuf *mbuf_init = &rxq->fake_mbuf;
+ 		struct rte_pktmbuf_pool_private *priv =
+@@ -293,8 +296,8 @@ rxq_free_elts_sprq(struct mlx5_rxq_ctrl *rxq_ctrl)
+ {
+ 	struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
+ 	const uint16_t q_n = mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq) ?
+-		(1 << rxq->elts_n) * (1 << rxq->strd_num_n) :
+-		(1 << rxq->elts_n);
++		RTE_BIT32(rxq->elts_n) * RTE_BIT32(rxq->log_strd_num) :
++		RTE_BIT32(rxq->elts_n);
+ 	const uint16_t q_mask = q_n - 1;
+ 	uint16_t elts_ci = mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq) ?
+ 		rxq->elts_ci : rxq->rq_ci;
+@@ -838,6 +841,14 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
+ 	uint64_t offloads = conf->offloads |
+ 			    dev->data->dev_conf.rxmode.offloads;
+ 
++	if ((offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) &&
++	    !priv->config.lro.supported) {
++		DRV_LOG(ERR,
++			"Port %u queue %u LRO is configured but not supported.",
++			dev->data->port_id, idx);
++		rte_errno = EINVAL;
++		return -rte_errno;
++	}
+ 	if (mp) {
+ 		/*
+ 		 * The parameters should be checked on rte_eth_dev layer.
+@@ -1378,8 +1389,8 @@ mlx5_mprq_alloc_mp(struct rte_eth_dev *dev)
+ 	unsigned int buf_len;
+ 	unsigned int obj_num;
+ 	unsigned int obj_size;
+-	unsigned int strd_num_n = 0;
+-	unsigned int strd_sz_n = 0;
++	unsigned int log_strd_num = 0;
++	unsigned int log_strd_sz = 0;
+ 	unsigned int i;
+ 	unsigned int n_ibv = 0;
+ 	int ret;
+@@ -1398,16 +1409,18 @@ mlx5_mprq_alloc_mp(struct rte_eth_dev *dev)
+ 		n_ibv++;
+ 		desc += 1 << rxq->elts_n;
+ 		/* Get the max number of strides. */
+-		if (strd_num_n < rxq->strd_num_n)
+-			strd_num_n = rxq->strd_num_n;
++		if (log_strd_num < rxq->log_strd_num)
++			log_strd_num = rxq->log_strd_num;
+ 		/* Get the max size of a stride. */
+-		if (strd_sz_n < rxq->strd_sz_n)
+-			strd_sz_n = rxq->strd_sz_n;
+-	}
+-	MLX5_ASSERT(strd_num_n && strd_sz_n);
+-	buf_len = (1 << strd_num_n) * (1 << strd_sz_n);
+-	obj_size = sizeof(struct mlx5_mprq_buf) + buf_len + (1 << strd_num_n) *
+-		sizeof(struct rte_mbuf_ext_shared_info) + RTE_PKTMBUF_HEADROOM;
++		if (log_strd_sz < rxq->log_strd_sz)
++			log_strd_sz = rxq->log_strd_sz;
++	}
++	MLX5_ASSERT(log_strd_num && log_strd_sz);
++	buf_len = RTE_BIT32(log_strd_num) * RTE_BIT32(log_strd_sz);
++	obj_size = sizeof(struct mlx5_mprq_buf) + buf_len +
++		   RTE_BIT32(log_strd_num) *
++		   sizeof(struct rte_mbuf_ext_shared_info) +
++		   RTE_PKTMBUF_HEADROOM;
+ 	/*
+ 	 * Received packets can be either memcpy'd or externally referenced. In
+ 	 * case that the packet is attached to an mbuf as an external buffer, as
+@@ -1453,7 +1466,7 @@ mlx5_mprq_alloc_mp(struct rte_eth_dev *dev)
+ 	snprintf(name, sizeof(name), "port-%u-mprq", dev->data->port_id);
+ 	mp = rte_mempool_create(name, obj_num, obj_size, MLX5_MPRQ_MP_CACHE_SZ,
+ 				0, NULL, NULL, mlx5_mprq_buf_init,
+-				(void *)((uintptr_t)1 << strd_num_n),
++				(void *)((uintptr_t)1 << log_strd_num),
+ 				dev->device->numa_node, 0);
+ 	if (mp == NULL) {
+ 		DRV_LOG(ERR,
+@@ -1530,6 +1543,126 @@ mlx5_max_lro_msg_size_adjust(struct rte_eth_dev *dev, uint16_t idx,
+ 		priv->max_lro_msg_size * MLX5_LRO_SEG_CHUNK_SIZE);
+ }
+ 
++/**
++ * Prepare both size and number of stride for Multi-Packet RQ.
++ *
++ * @param dev
++ *   Pointer to Ethernet device.
++ * @param idx
++ *   RX queue index.
++ * @param desc
++ *   Number of descriptors to configure in queue.
++ * @param rx_seg_en
++ *   Indicator if Rx segment enables, if so Multi-Packet RQ doesn't enable.
++ * @param min_mbuf_size
++ *   Non scatter min mbuf size, max_rx_pktlen plus overhead.
++ * @param actual_log_stride_num
++ *   Log number of strides to configure for this queue.
++ * @param actual_log_stride_size
++ *   Log stride size to configure for this queue.
++ *
++ * @return
++ *   0 if Multi-Packet RQ is supported, otherwise -1.
++ */
++static int
++mlx5_mprq_prepare(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
++		  bool rx_seg_en, uint32_t min_mbuf_size,
++		  uint32_t *actual_log_stride_num,
++		  uint32_t *actual_log_stride_size)
++{
++	struct mlx5_priv *priv = dev->data->dev_private;
++	struct mlx5_dev_config *config = &priv->config;
++	uint32_t log_min_stride_num = config->mprq.log_min_stride_num;
++	uint32_t log_max_stride_num = config->mprq.log_max_stride_num;
++	uint32_t log_def_stride_num =
++			RTE_MIN(RTE_MAX(MLX5_MPRQ_DEFAULT_LOG_STRIDE_NUM,
++					log_min_stride_num),
++				log_max_stride_num);
++	uint32_t log_min_stride_size = config->mprq.log_min_stride_size;
++	uint32_t log_max_stride_size = config->mprq.log_max_stride_size;
++	uint32_t log_def_stride_size =
++			RTE_MIN(RTE_MAX(MLX5_MPRQ_DEFAULT_LOG_STRIDE_SIZE,
++					log_min_stride_size),
++				log_max_stride_size);
++	uint32_t log_stride_wqe_size;
++
++	if (mlx5_check_mprq_support(dev) != 1 || rx_seg_en)
++		goto unsupport;
++	/* Checks if chosen number of strides is in supported range. */
++	if (config->mprq.log_stride_num > log_max_stride_num ||
++	    config->mprq.log_stride_num < log_min_stride_num) {
++		*actual_log_stride_num = log_def_stride_num;
++		DRV_LOG(WARNING,
++			"Port %u Rx queue %u number of strides for Multi-Packet RQ is out of range, setting default value (%u)",
++			dev->data->port_id, idx, RTE_BIT32(log_def_stride_num));
++	} else {
++		*actual_log_stride_num = config->mprq.log_stride_num;
++	}
++	if (config->mprq.log_stride_size) {
++		/* Checks if chosen size of stride is in supported range. */
++		if (config->mprq.log_stride_size > log_max_stride_size ||
++		    config->mprq.log_stride_size < log_min_stride_size) {
++			*actual_log_stride_size = log_def_stride_size;
++			DRV_LOG(WARNING,
++				"Port %u Rx queue %u size of a stride for Multi-Packet RQ is out of range, setting default value (%u)",
++				dev->data->port_id, idx,
++				RTE_BIT32(log_def_stride_size));
++		} else {
++			*actual_log_stride_size = config->mprq.log_stride_size;
++		}
++	} else {
++		if (min_mbuf_size <= RTE_BIT32(log_max_stride_size))
++			*actual_log_stride_size = log2above(min_mbuf_size);
++		else
++			goto unsupport;
++	}
++	log_stride_wqe_size = *actual_log_stride_num + *actual_log_stride_size;
++	/* Check if WQE buffer size is supported by hardware. */
++	if (log_stride_wqe_size < config->mprq.log_min_stride_wqe_size) {
++		*actual_log_stride_num = log_def_stride_num;
++		*actual_log_stride_size = log_def_stride_size;
++		DRV_LOG(WARNING,
++			"Port %u Rx queue %u size of WQE buffer for Multi-Packet RQ is too small, setting default values (stride_num_n=%u, stride_size_n=%u)",
++			dev->data->port_id, idx, RTE_BIT32(log_def_stride_num),
++			RTE_BIT32(log_def_stride_size));
++		log_stride_wqe_size = log_def_stride_num + log_def_stride_size;
++	}
++	MLX5_ASSERT(log_stride_wqe_size >= config->mprq.log_min_stride_wqe_size);
++	if (desc <= RTE_BIT32(*actual_log_stride_num))
++		goto unsupport;
++	if (min_mbuf_size > RTE_BIT32(log_stride_wqe_size)) {
++		DRV_LOG(WARNING, "Port %u Rx queue %u "
++			"Multi-Packet RQ is unsupported, WQE buffer size (%u) "
++			"is smaller than min mbuf size (%u)",
++			dev->data->port_id, idx, RTE_BIT32(log_stride_wqe_size),
++			min_mbuf_size);
++		goto unsupport;
++	}
++	DRV_LOG(DEBUG, "Port %u Rx queue %u "
++		"Multi-Packet RQ is enabled strd_num_n = %u, strd_sz_n = %u",
++		dev->data->port_id, idx, RTE_BIT32(*actual_log_stride_num),
++		RTE_BIT32(*actual_log_stride_size));
++	return 0;
++unsupport:
++	if (config->mprq.enabled)
++		DRV_LOG(WARNING,
++			"Port %u MPRQ is requested but cannot be enabled\n"
++			" (requested: pkt_sz = %u, desc_num = %u,"
++			" rxq_num = %u, stride_sz = %u, stride_num = %u\n"
++			"  supported: min_rxqs_num = %u, min_buf_wqe_sz = %u"
++			" min_stride_sz = %u, max_stride_sz = %u).\n"
++			"Rx segment is %senable.",
++			dev->data->port_id, min_mbuf_size, desc, priv->rxqs_n,
++			RTE_BIT32(config->mprq.log_stride_size),
++			RTE_BIT32(config->mprq.log_stride_num),
++			config->mprq.min_rxqs_num,
++			RTE_BIT32(config->mprq.log_min_stride_wqe_size),
++			RTE_BIT32(config->mprq.log_min_stride_size),
++			RTE_BIT32(config->mprq.log_max_stride_size),
++			rx_seg_en ? "" : "not ");
++	return -1;
++}
++
+ /**
+  * Create a DPDK Rx queue.
+  *
+@@ -1567,41 +1700,33 @@ mlx5_rxq_new(struct rte_eth_dev *dev, struct mlx5_rxq_priv *rxq,
+ 							RTE_PKTMBUF_HEADROOM;
+ 	unsigned int max_lro_size = 0;
+ 	unsigned int first_mb_free_size = mb_len - RTE_PKTMBUF_HEADROOM;
+-	const int mprq_en = mlx5_check_mprq_support(dev) > 0 && n_seg == 1 &&
+-			    !rx_seg[0].offset && !rx_seg[0].length;
+-	unsigned int mprq_stride_nums = config->mprq.stride_num_n ?
+-		config->mprq.stride_num_n : MLX5_MPRQ_STRIDE_NUM_N;
+-	unsigned int mprq_stride_size = non_scatter_min_mbuf_size <=
+-		(1U << config->mprq.max_stride_size_n) ?
+-		log2above(non_scatter_min_mbuf_size) : MLX5_MPRQ_STRIDE_SIZE_N;
+-	unsigned int mprq_stride_cap = (config->mprq.stride_num_n ?
+-		(1U << config->mprq.stride_num_n) : (1U << mprq_stride_nums)) *
+-		(config->mprq.stride_size_n ?
+-		(1U << config->mprq.stride_size_n) : (1U << mprq_stride_size));
++	uint32_t mprq_log_actual_stride_num = 0;
++	uint32_t mprq_log_actual_stride_size = 0;
++	bool rx_seg_en = n_seg != 1 || rx_seg[0].offset || rx_seg[0].length;
++	const int mprq_en = !mlx5_mprq_prepare(dev, idx, desc, rx_seg_en,
++					       non_scatter_min_mbuf_size,
++					       &mprq_log_actual_stride_num,
++					       &mprq_log_actual_stride_size);
+ 	/*
+ 	 * Always allocate extra slots, even if eventually
+ 	 * the vector Rx will not be used.
+ 	 */
+ 	uint16_t desc_n = desc + config->rx_vec_en * MLX5_VPMD_DESCS_PER_LOOP;
++	size_t alloc_size = sizeof(*tmpl) + desc_n * sizeof(struct rte_mbuf *);
+ 	const struct rte_eth_rxseg_split *qs_seg = rx_seg;
+ 	unsigned int tail_len;
+ 
+-	tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO,
+-		sizeof(*tmpl) + desc_n * sizeof(struct rte_mbuf *) +
+-		(!!mprq_en) *
+-		(desc >> mprq_stride_nums) * sizeof(struct mlx5_mprq_buf *),
+-		0, socket);
++	if (mprq_en) {
++		/* Trim the number of descs needed. */
++		desc >>= mprq_log_actual_stride_num;
++		alloc_size += desc * sizeof(struct mlx5_mprq_buf *);
++	}
++	tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, alloc_size, 0, socket);
+ 	if (!tmpl) {
+ 		rte_errno = ENOMEM;
+ 		return NULL;
+ 	}
+ 	LIST_INIT(&tmpl->owners);
+-	if (conf->share_group > 0) {
+-		tmpl->rxq.shared = 1;
+-		tmpl->share_group = conf->share_group;
+-		tmpl->share_qid = conf->share_qid;
+-		LIST_INSERT_HEAD(&priv->sh->shared_rxqs, tmpl, share_entry);
+-	}
+ 	rxq->ctrl = tmpl;
+ 	LIST_INSERT_HEAD(&tmpl->owners, rxq, owner_entry);
+ 	MLX5_ASSERT(n_seg && n_seg <= MLX5_MAX_RXQ_NSEG);
+@@ -1695,43 +1820,19 @@ mlx5_rxq_new(struct rte_eth_dev *dev, struct mlx5_rxq_priv *rxq,
+ 	tmpl->socket = socket;
+ 	if (dev->data->dev_conf.intr_conf.rxq)
+ 		tmpl->irq = 1;
+-	/*
+-	 * This Rx queue can be configured as a Multi-Packet RQ if all of the
+-	 * following conditions are met:
+-	 *  - MPRQ is enabled.
+-	 *  - The number of descs is more than the number of strides.
+-	 *  - max_rx_pktlen plus overhead is less than the max size
+-	 *    of a stride or mprq_stride_size is specified by a user.
+-	 *    Need to make sure that there are enough strides to encap
+-	 *    the maximum packet size in case mprq_stride_size is set.
+-	 *  Otherwise, enable Rx scatter if necessary.
+-	 */
+-	if (mprq_en && desc > (1U << mprq_stride_nums) &&
+-	    (non_scatter_min_mbuf_size <=
+-	     (1U << config->mprq.max_stride_size_n) ||
+-	     (config->mprq.stride_size_n &&
+-	      non_scatter_min_mbuf_size <= mprq_stride_cap))) {
++	if (mprq_en) {
+ 		/* TODO: Rx scatter isn't supported yet. */
+ 		tmpl->rxq.sges_n = 0;
+-		/* Trim the number of descs needed. */
+-		desc >>= mprq_stride_nums;
+-		tmpl->rxq.strd_num_n = config->mprq.stride_num_n ?
+-			config->mprq.stride_num_n : mprq_stride_nums;
+-		tmpl->rxq.strd_sz_n = config->mprq.stride_size_n ?
+-			config->mprq.stride_size_n : mprq_stride_size;
++		tmpl->rxq.log_strd_num = mprq_log_actual_stride_num;
++		tmpl->rxq.log_strd_sz = mprq_log_actual_stride_size;
+ 		tmpl->rxq.strd_shift_en = MLX5_MPRQ_TWO_BYTE_SHIFT;
+ 		tmpl->rxq.strd_scatter_en =
+ 				!!(offloads & RTE_ETH_RX_OFFLOAD_SCATTER);
+ 		tmpl->rxq.mprq_max_memcpy_len = RTE_MIN(first_mb_free_size,
+ 				config->mprq.max_memcpy_len);
+ 		max_lro_size = RTE_MIN(max_rx_pktlen,
+-				       (1u << tmpl->rxq.strd_num_n) *
+-				       (1u << tmpl->rxq.strd_sz_n));
+-		DRV_LOG(DEBUG,
+-			"port %u Rx queue %u: Multi-Packet RQ is enabled"
+-			" strd_num_n = %u, strd_sz_n = %u",
+-			dev->data->port_id, idx,
+-			tmpl->rxq.strd_num_n, tmpl->rxq.strd_sz_n);
++				       RTE_BIT32(tmpl->rxq.log_strd_num) *
++				       RTE_BIT32(tmpl->rxq.log_strd_sz));
+ 	} else if (tmpl->rxq.rxseg_n == 1) {
+ 		MLX5_ASSERT(max_rx_pktlen <= first_mb_free_size);
+ 		tmpl->rxq.sges_n = 0;
+@@ -1765,24 +1866,6 @@ mlx5_rxq_new(struct rte_eth_dev *dev, struct mlx5_rxq_priv *rxq,
+ 		tmpl->rxq.sges_n = sges_n;
+ 		max_lro_size = max_rx_pktlen;
+ 	}
+-	if (config->mprq.enabled && !mlx5_rxq_mprq_enabled(&tmpl->rxq))
+-		DRV_LOG(WARNING,
+-			"port %u MPRQ is requested but cannot be enabled\n"
+-			" (requested: pkt_sz = %u, desc_num = %u,"
+-			" rxq_num = %u, stride_sz = %u, stride_num = %u\n"
+-			"  supported: min_rxqs_num = %u,"
+-			" min_stride_sz = %u, max_stride_sz = %u).",
+-			dev->data->port_id, non_scatter_min_mbuf_size,
+-			desc, priv->rxqs_n,
+-			config->mprq.stride_size_n ?
+-				(1U << config->mprq.stride_size_n) :
+-				(1U << mprq_stride_size),
+-			config->mprq.stride_num_n ?
+-				(1U << config->mprq.stride_num_n) :
+-				(1U << mprq_stride_nums),
+-			config->mprq.min_rxqs_num,
+-			(1U << config->mprq.min_stride_size_n),
+-			(1U << config->mprq.max_stride_size_n));
+ 	DRV_LOG(DEBUG, "port %u maximum number of segments per packet: %u",
+ 		dev->data->port_id, 1 << tmpl->rxq.sges_n);
+ 	if (desc % (1 << tmpl->rxq.sges_n)) {
+@@ -1840,20 +1923,24 @@ mlx5_rxq_new(struct rte_eth_dev *dev, struct mlx5_rxq_priv *rxq,
+ 		dev->data->port_id,
+ 		tmpl->rxq.crc_present ? "disabled" : "enabled",
+ 		tmpl->rxq.crc_present << 2);
+-	/* Save port ID. */
+ 	tmpl->rxq.rss_hash = !!priv->rss_conf.rss_hf &&
+ 		(!!(dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS));
++	/* Save port ID. */
+ 	tmpl->rxq.port_id = dev->data->port_id;
+ 	tmpl->sh = priv->sh;
+ 	tmpl->rxq.mp = rx_seg[0].mp;
+ 	tmpl->rxq.elts_n = log2above(desc);
+-	tmpl->rxq.rq_repl_thresh =
+-		MLX5_VPMD_RXQ_RPLNSH_THRESH(desc_n);
+-	tmpl->rxq.elts =
+-		(struct rte_mbuf *(*)[desc_n])(tmpl + 1);
++	tmpl->rxq.rq_repl_thresh = MLX5_VPMD_RXQ_RPLNSH_THRESH(desc_n);
++	tmpl->rxq.elts = (struct rte_mbuf *(*)[desc_n])(tmpl + 1);
+ 	tmpl->rxq.mprq_bufs =
+ 		(struct mlx5_mprq_buf *(*)[desc])(*tmpl->rxq.elts + desc_n);
+ 	tmpl->rxq.idx = idx;
++	if (conf->share_group > 0) {
++		tmpl->rxq.shared = 1;
++		tmpl->share_group = conf->share_group;
++		tmpl->share_qid = conf->share_qid;
++		LIST_INSERT_HEAD(&priv->sh->shared_rxqs, tmpl, share_entry);
++	}
+ 	LIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next);
+ 	return tmpl;
+ error:
+@@ -1969,6 +2056,8 @@ mlx5_rxq_get(struct rte_eth_dev *dev, uint16_t idx)
+ {
+ 	struct mlx5_priv *priv = dev->data->dev_private;
+ 
++	if (idx >= priv->rxqs_n)
++		return NULL;
+ 	MLX5_ASSERT(priv->rxq_privs != NULL);
+ 	return (*priv->rxq_privs)[idx];
+ }
+@@ -2152,7 +2241,7 @@ mlx5_rxq_get_hairpin_conf(struct rte_eth_dev *dev, uint16_t idx)
+  *   Number of queues in the array.
+  *
+  * @return
+- *   1 if all queues in indirection table match 0 othrwise.
++ *   1 if all queues in indirection table match 0 otherwise.
+  */
+ static int
+ mlx5_ind_table_obj_match_queues(const struct mlx5_ind_table_obj *ind_tbl,
+@@ -2586,7 +2675,7 @@ mlx5_hrxq_modify(struct rte_eth_dev *dev, uint32_t hrxq_idx,
+ 		if (hrxq->standalone) {
+ 			/*
+ 			 * Replacement of indirection table unsupported for
+-			 * stanalone hrxq objects (used by shared RSS).
++			 * standalone hrxq objects (used by shared RSS).
+ 			 */
+ 			rte_errno = ENOTSUP;
+ 			return -rte_errno;
+@@ -2828,7 +2917,7 @@ mlx5_drop_action_create(struct rte_eth_dev *dev)
+ 
+ 	if (priv->drop_queue.hrxq)
+ 		return priv->drop_queue.hrxq;
+-	hrxq = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*hrxq), 0, SOCKET_ID_ANY);
++	hrxq = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*hrxq) + MLX5_RSS_HASH_KEY_LEN, 0, SOCKET_ID_ANY);
+ 	if (!hrxq) {
+ 		DRV_LOG(WARNING,
+ 			"Port %u cannot allocate memory for drop queue.",
+diff --git a/dpdk/drivers/net/mlx5/mlx5_rxtx_vec.c b/dpdk/drivers/net/mlx5/mlx5_rxtx_vec.c
+index 6212ce8247..0e2eab068a 100644
+--- a/dpdk/drivers/net/mlx5/mlx5_rxtx_vec.c
++++ b/dpdk/drivers/net/mlx5/mlx5_rxtx_vec.c
+@@ -148,7 +148,7 @@ static inline void
+ mlx5_rx_mprq_replenish_bulk_mbuf(struct mlx5_rxq_data *rxq)
+ {
+ 	const uint16_t wqe_n = 1 << rxq->elts_n;
+-	const uint32_t strd_n = 1 << rxq->strd_num_n;
++	const uint32_t strd_n = RTE_BIT32(rxq->log_strd_num);
+ 	const uint32_t elts_n = wqe_n * strd_n;
+ 	const uint32_t wqe_mask = elts_n - 1;
+ 	uint32_t n = elts_n - (rxq->elts_ci - rxq->rq_pi);
+@@ -197,8 +197,8 @@ rxq_copy_mprq_mbuf_v(struct mlx5_rxq_data *rxq,
+ {
+ 	const uint16_t wqe_n = 1 << rxq->elts_n;
+ 	const uint16_t wqe_mask = wqe_n - 1;
+-	const uint16_t strd_sz = 1 << rxq->strd_sz_n;
+-	const uint32_t strd_n = 1 << rxq->strd_num_n;
++	const uint16_t strd_sz = RTE_BIT32(rxq->log_strd_sz);
++	const uint32_t strd_n = RTE_BIT32(rxq->log_strd_num);
+ 	const uint32_t elts_n = wqe_n * strd_n;
+ 	const uint32_t elts_mask = elts_n - 1;
+ 	uint32_t elts_idx = rxq->rq_pi & elts_mask;
+@@ -428,7 +428,7 @@ rxq_burst_mprq_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts,
+ 	const uint16_t q_n = 1 << rxq->cqe_n;
+ 	const uint16_t q_mask = q_n - 1;
+ 	const uint16_t wqe_n = 1 << rxq->elts_n;
+-	const uint32_t strd_n = 1 << rxq->strd_num_n;
++	const uint32_t strd_n = RTE_BIT32(rxq->log_strd_num);
+ 	const uint32_t elts_n = wqe_n * strd_n;
+ 	const uint32_t elts_mask = elts_n - 1;
+ 	volatile struct mlx5_cqe *cq;
+diff --git a/dpdk/drivers/net/mlx5/mlx5_rxtx_vec_altivec.h b/dpdk/drivers/net/mlx5/mlx5_rxtx_vec_altivec.h
+index 423e229508..683a8f9a6c 100644
+--- a/dpdk/drivers/net/mlx5/mlx5_rxtx_vec_altivec.h
++++ b/dpdk/drivers/net/mlx5/mlx5_rxtx_vec_altivec.h
+@@ -47,11 +47,11 @@ rxq_copy_mbuf_v(struct rte_mbuf **elts, struct rte_mbuf **pkts, uint16_t n)
+ 	uint16_t p = n & -2;
+ 
+ 	for (pos = 0; pos < p; pos += 2) {
+-		vector unsigned char mbp;
++		__vector unsigned char mbp;
+ 
+-		mbp = (vector unsigned char)vec_vsx_ld(0,
++		mbp = (__vector unsigned char)vec_vsx_ld(0,
+ 				(signed int const *)&elts[pos]);
+-		*(vector unsigned char *)&pkts[pos] = mbp;
++		*(__vector unsigned char *)&pkts[pos] = mbp;
+ 	}
+ 	if (n & 1)
+ 		pkts[pos] = elts[pos];
+@@ -78,15 +78,15 @@ rxq_cq_decompress_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq,
+ {
+ 	volatile struct mlx5_mini_cqe8 *mcq = (void *)&(cq + 1)->pkt_info;
+ 	struct rte_mbuf *t_pkt = elts[0]; /* Title packet is pre-built. */
+-	const vector unsigned char zero = (vector unsigned char){0};
++	const __vector unsigned char zero = (__vector unsigned char){0};
+ 	/* Mask to shuffle from extracted mini CQE to mbuf. */
+-	const vector unsigned char shuf_mask1 = (vector unsigned char){
++	const __vector unsigned char shuf_mask1 = (__vector unsigned char){
+ 			-1, -1, -1, -1,   /* skip packet_type */
+ 			 7,  6, -1, -1,   /* bswap16, pkt_len */
+ 			 7,  6,           /* bswap16, data_len */
+ 			-1, -1,           /* skip vlan_tci */
+ 			 3,  2,  1,  0};  /* bswap32, rss */
+-	const vector unsigned char shuf_mask2 = (vector unsigned char){
++	const __vector unsigned char shuf_mask2 = (__vector unsigned char){
+ 			-1, -1, -1, -1,   /* skip packet_type */
+ 			15, 14, -1, -1,   /* bswap16, pkt_len */
+ 			15, 14,           /* data_len, bswap16 */
+@@ -95,30 +95,30 @@ rxq_cq_decompress_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq,
+ 	/* Restore the compressed count. Must be 16 bits. */
+ 	const uint16_t mcqe_n = t_pkt->data_len +
+ 		(rxq->crc_present * RTE_ETHER_CRC_LEN);
+-	const vector unsigned char rearm =
+-		(vector unsigned char)vec_vsx_ld(0,
++	const __vector unsigned char rearm =
++		(__vector unsigned char)vec_vsx_ld(0,
+ 		(signed int const *)&t_pkt->rearm_data);
+-	const vector unsigned char rxdf =
+-		(vector unsigned char)vec_vsx_ld(0,
++	const __vector unsigned char rxdf =
++		(__vector unsigned char)vec_vsx_ld(0,
+ 		(signed int const *)&t_pkt->rx_descriptor_fields1);
+-	const vector unsigned char crc_adj =
+-		(vector unsigned char)(vector unsigned short){
++	const __vector unsigned char crc_adj =
++		(__vector unsigned char)(__vector unsigned short){
+ 			0, 0, rxq->crc_present * RTE_ETHER_CRC_LEN, 0,
+ 			rxq->crc_present * RTE_ETHER_CRC_LEN, 0, 0, 0};
+-	const vector unsigned short rxdf_sel_mask =
+-		(vector unsigned short){
++	const __vector unsigned short rxdf_sel_mask =
++		(__vector unsigned short){
+ 			0xffff, 0xffff, 0, 0, 0, 0xffff, 0, 0};
+-	vector unsigned char ol_flags = (vector unsigned char){0};
+-	vector unsigned char ol_flags_mask = (vector unsigned char){0};
++	__vector unsigned char ol_flags = (__vector unsigned char){0};
++	__vector unsigned char ol_flags_mask = (__vector unsigned char){0};
+ 	unsigned int pos;
+ 	unsigned int i;
+ 	unsigned int inv = 0;
+ 
+ #ifdef MLX5_PMD_SOFT_COUNTERS
+-	const vector unsigned char ones = vec_splat_u8(-1);
++	const __vector unsigned char ones = vec_splat_u8(-1);
+ 	uint32_t rcvd_byte = 0;
+ 	/* Mask to shuffle byte_cnt to add up stats. Do bswap16 for all. */
+-	const vector unsigned char len_shuf_mask = (vector unsigned char){
++	const __vector unsigned char len_shuf_mask = (__vector unsigned char){
+ 		 3,  2, 11, 10,
+ 		 7,  6, 15, 14,
+ 		-1, -1, -1, -1,
+@@ -133,125 +133,125 @@ rxq_cq_decompress_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq,
+ 	 * E. store flow tag (rte_flow mark).
+ 	 */
+ 	for (pos = 0; pos < mcqe_n; ) {
+-		vector unsigned char mcqe1, mcqe2;
+-		vector unsigned char rxdf1, rxdf2;
++		__vector unsigned char mcqe1, mcqe2;
++		__vector unsigned char rxdf1, rxdf2;
+ #ifdef MLX5_PMD_SOFT_COUNTERS
+-		const vector unsigned short mcqe_sel_mask =
+-			(vector unsigned short){0, 0, 0xffff, 0xffff,
++		const __vector unsigned short mcqe_sel_mask =
++			(__vector unsigned short){0, 0, 0xffff, 0xffff,
+ 			0, 0, 0xfff, 0xffff};
+-		const vector unsigned char lower_half = {
++		const __vector unsigned char lower_half = {
+ 			0, 1, 4, 5, 8, 9, 12, 13, 16,
+ 			17, 20, 21, 24, 25, 28, 29};
+-		const vector unsigned char upper_half = {
++		const __vector unsigned char upper_half = {
+ 			2, 3, 6, 7, 10, 11, 14, 15,
+ 			18, 19, 22, 23, 26, 27, 30, 31};
+-		vector unsigned short left, right;
+-		vector unsigned char byte_cnt, invalid_mask;
+-		vector unsigned long lshift;
++		__vector unsigned short left, right;
++		__vector unsigned char byte_cnt, invalid_mask;
++		__vector unsigned long lshift;
+ 		__attribute__((altivec(vector__)))
+ 			__attribute__((altivec(bool__)))
+ 			unsigned long long shmask;
+-		const vector unsigned long shmax = {64, 64};
++		const __vector unsigned long shmax = {64, 64};
+ #endif
+ 
+ 		for (i = 0; i < MLX5_VPMD_DESCS_PER_LOOP; ++i)
+ 			if (likely(pos + i < mcqe_n))
+ 				rte_prefetch0((void *)(cq + pos + i));
+ 		/* A.1 load mCQEs into a 128bit register. */
+-		mcqe1 = (vector unsigned char)vec_vsx_ld(0,
++		mcqe1 = (__vector unsigned char)vec_vsx_ld(0,
+ 			(signed int const *)&mcq[pos % 8]);
+-		mcqe2 = (vector unsigned char)vec_vsx_ld(0,
++		mcqe2 = (__vector unsigned char)vec_vsx_ld(0,
+ 			(signed int const *)&mcq[pos % 8 + 2]);
+ 
+ 		/* B.1 store rearm data to mbuf. */
+-		*(vector unsigned char *)
++		*(__vector unsigned char *)
+ 			&elts[pos]->rearm_data = rearm;
+-		*(vector unsigned char *)
++		*(__vector unsigned char *)
+ 			&elts[pos + 1]->rearm_data = rearm;
+ 
+ 		/* C.1 combine data from mCQEs with rx_descriptor_fields1. */
+ 		rxdf1 = vec_perm(mcqe1, zero, shuf_mask1);
+ 		rxdf2 = vec_perm(mcqe1, zero, shuf_mask2);
+-		rxdf1 = (vector unsigned char)
+-			((vector unsigned short)rxdf1 -
+-			(vector unsigned short)crc_adj);
+-		rxdf2 = (vector unsigned char)
+-			((vector unsigned short)rxdf2 -
+-			(vector unsigned short)crc_adj);
+-		rxdf1 = (vector unsigned char)
+-			vec_sel((vector unsigned short)rxdf1,
+-			(vector unsigned short)rxdf, rxdf_sel_mask);
+-		rxdf2 = (vector unsigned char)
+-			vec_sel((vector unsigned short)rxdf2,
+-			(vector unsigned short)rxdf, rxdf_sel_mask);
++		rxdf1 = (__vector unsigned char)
++			((__vector unsigned short)rxdf1 -
++			(__vector unsigned short)crc_adj);
++		rxdf2 = (__vector unsigned char)
++			((__vector unsigned short)rxdf2 -
++			(__vector unsigned short)crc_adj);
++		rxdf1 = (__vector unsigned char)
++			vec_sel((__vector unsigned short)rxdf1,
++			(__vector unsigned short)rxdf, rxdf_sel_mask);
++		rxdf2 = (__vector unsigned char)
++			vec_sel((__vector unsigned short)rxdf2,
++			(__vector unsigned short)rxdf, rxdf_sel_mask);
+ 
+ 		/* D.1 store rx_descriptor_fields1. */
+-		*(vector unsigned char *)
++		*(__vector unsigned char *)
+ 			&elts[pos]->rx_descriptor_fields1 = rxdf1;
+-		*(vector unsigned char *)
++		*(__vector unsigned char *)
+ 			&elts[pos + 1]->rx_descriptor_fields1 = rxdf2;
+ 
+ 		/* B.1 store rearm data to mbuf. */
+-		*(vector unsigned char *)
++		*(__vector unsigned char *)
+ 			&elts[pos + 2]->rearm_data = rearm;
+-		*(vector unsigned char *)
++		*(__vector unsigned char *)
+ 			&elts[pos + 3]->rearm_data = rearm;
+ 
+ 		/* C.1 combine data from mCQEs with rx_descriptor_fields1. */
+ 		rxdf1 = vec_perm(mcqe2, zero, shuf_mask1);
+ 		rxdf2 = vec_perm(mcqe2, zero, shuf_mask2);
+-		rxdf1 = (vector unsigned char)
+-			((vector unsigned short)rxdf1 -
+-			(vector unsigned short)crc_adj);
+-		rxdf2 = (vector unsigned char)
+-			((vector unsigned short)rxdf2 -
+-			(vector unsigned short)crc_adj);
+-		rxdf1 = (vector unsigned char)
+-			vec_sel((vector unsigned short)rxdf1,
+-			(vector unsigned short)rxdf, rxdf_sel_mask);
+-		rxdf2 = (vector unsigned char)
+-			vec_sel((vector unsigned short)rxdf2,
+-			(vector unsigned short)rxdf, rxdf_sel_mask);
++		rxdf1 = (__vector unsigned char)
++			((__vector unsigned short)rxdf1 -
++			(__vector unsigned short)crc_adj);
++		rxdf2 = (__vector unsigned char)
++			((__vector unsigned short)rxdf2 -
++			(__vector unsigned short)crc_adj);
++		rxdf1 = (__vector unsigned char)
++			vec_sel((__vector unsigned short)rxdf1,
++			(__vector unsigned short)rxdf, rxdf_sel_mask);
++		rxdf2 = (__vector unsigned char)
++			vec_sel((__vector unsigned short)rxdf2,
++			(__vector unsigned short)rxdf, rxdf_sel_mask);
+ 
+ 		/* D.1 store rx_descriptor_fields1. */
+-		*(vector unsigned char *)
++		*(__vector unsigned char *)
+ 			&elts[pos + 2]->rx_descriptor_fields1 = rxdf1;
+-		*(vector unsigned char *)
++		*(__vector unsigned char *)
+ 			&elts[pos + 3]->rx_descriptor_fields1 = rxdf2;
+ 
+ #ifdef MLX5_PMD_SOFT_COUNTERS
+-		invalid_mask = (vector unsigned char)(vector unsigned long){
++		invalid_mask = (__vector unsigned char)(__vector unsigned long){
+ 			(mcqe_n - pos) * sizeof(uint16_t) * 8, 0};
+ 
+ 		lshift =
+-			vec_splat((vector unsigned long)invalid_mask, 0);
++			vec_splat((__vector unsigned long)invalid_mask, 0);
+ 		shmask = vec_cmpgt(shmax, lshift);
+-		invalid_mask = (vector unsigned char)
+-			vec_sl((vector unsigned long)ones, lshift);
+-		invalid_mask = (vector unsigned char)
+-			vec_sel((vector unsigned long)shmask,
+-			(vector unsigned long)invalid_mask, shmask);
+-
+-		byte_cnt = (vector unsigned char)
+-			vec_sel((vector unsigned short)
+-			vec_sro((vector unsigned short)mcqe1,
+-			(vector unsigned char){32}),
+-			(vector unsigned short)mcqe2, mcqe_sel_mask);
++		invalid_mask = (__vector unsigned char)
++			vec_sl((__vector unsigned long)ones, lshift);
++		invalid_mask = (__vector unsigned char)
++			vec_sel((__vector unsigned long)shmask,
++			(__vector unsigned long)invalid_mask, shmask);
++
++		byte_cnt = (__vector unsigned char)
++			vec_sel((__vector unsigned short)
++			vec_sro((__vector unsigned short)mcqe1,
++			(__vector unsigned char){32}),
++			(__vector unsigned short)mcqe2, mcqe_sel_mask);
+ 		byte_cnt = vec_perm(byte_cnt, zero, len_shuf_mask);
+-		byte_cnt = (vector unsigned char)
+-			vec_andc((vector unsigned long)byte_cnt,
+-			(vector unsigned long)invalid_mask);
+-		left = vec_perm((vector unsigned short)byte_cnt,
+-			(vector unsigned short)zero, lower_half);
+-		right = vec_perm((vector unsigned short)byte_cnt,
+-			(vector unsigned short)zero, upper_half);
+-		byte_cnt = (vector unsigned char)vec_add(left, right);
+-		left = vec_perm((vector unsigned short)byte_cnt,
+-			(vector unsigned short)zero, lower_half);
+-		right = vec_perm((vector unsigned short)byte_cnt,
+-			(vector unsigned short)zero, upper_half);
+-		byte_cnt = (vector unsigned char)vec_add(left, right);
+-		rcvd_byte += ((vector unsigned long)byte_cnt)[0];
++		byte_cnt = (__vector unsigned char)
++			vec_andc((__vector unsigned long)byte_cnt,
++			(__vector unsigned long)invalid_mask);
++		left = vec_perm((__vector unsigned short)byte_cnt,
++			(__vector unsigned short)zero, lower_half);
++		right = vec_perm((__vector unsigned short)byte_cnt,
++			(__vector unsigned short)zero, upper_half);
++		byte_cnt = (__vector unsigned char)vec_add(left, right);
++		left = vec_perm((__vector unsigned short)byte_cnt,
++			(__vector unsigned short)zero, lower_half);
++		right = vec_perm((__vector unsigned short)byte_cnt,
++			(__vector unsigned short)zero, upper_half);
++		byte_cnt = (__vector unsigned char)vec_add(left, right);
++		rcvd_byte += ((__vector unsigned long)byte_cnt)[0];
+ #endif
+ 
+ 		if (rxq->mark) {
+@@ -265,99 +265,99 @@ rxq_cq_decompress_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq,
+ 				elts[pos + 2]->hash.fdir.hi = flow_tag;
+ 				elts[pos + 3]->hash.fdir.hi = flow_tag;
+ 			} else {
+-				const vector unsigned char flow_mark_adj =
+-					(vector unsigned char)
+-					(vector unsigned int){
++				const __vector unsigned char flow_mark_adj =
++					(__vector unsigned char)
++					(__vector unsigned int){
+ 					-1, -1, -1, -1};
+-				const vector unsigned char flow_mark_shuf =
+-					(vector unsigned char){
++				const __vector unsigned char flow_mark_shuf =
++					(__vector unsigned char){
+ 					-1, -1, -1, -1,
+ 					-1, -1, -1, -1,
+ 					12,  8,  9, -1,
+ 					 4,  0,  1,  -1};
+-				const vector unsigned char ft_mask =
+-					(vector unsigned char)
+-					(vector unsigned int){
++				const __vector unsigned char ft_mask =
++					(__vector unsigned char)
++					(__vector unsigned int){
+ 					0xffffff00, 0xffffff00,
+ 					0xffffff00, 0xffffff00};
+-				const vector unsigned char fdir_flags =
+-					(vector unsigned char)
+-					(vector unsigned int){
++				const __vector unsigned char fdir_flags =
++					(__vector unsigned char)
++					(__vector unsigned int){
+ 					RTE_MBUF_F_RX_FDIR, RTE_MBUF_F_RX_FDIR,
+ 					RTE_MBUF_F_RX_FDIR, RTE_MBUF_F_RX_FDIR};
+-				const vector unsigned char fdir_all_flags =
+-					(vector unsigned char)
+-					(vector unsigned int){
++				const __vector unsigned char fdir_all_flags =
++					(__vector unsigned char)
++					(__vector unsigned int){
+ 					RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID,
+ 					RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID,
+ 					RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID,
+ 					RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID};
+-				vector unsigned char fdir_id_flags =
+-					(vector unsigned char)
+-					(vector unsigned int){
++				__vector unsigned char fdir_id_flags =
++					(__vector unsigned char)
++					(__vector unsigned int){
+ 					RTE_MBUF_F_RX_FDIR_ID, RTE_MBUF_F_RX_FDIR_ID,
+ 					RTE_MBUF_F_RX_FDIR_ID, RTE_MBUF_F_RX_FDIR_ID};
+ 				/* Extract flow_tag field. */
+-				vector unsigned char ftag0 = vec_perm(mcqe1,
++				__vector unsigned char ftag0 = vec_perm(mcqe1,
+ 							zero, flow_mark_shuf);
+-				vector unsigned char ftag1 = vec_perm(mcqe2,
++				__vector unsigned char ftag1 = vec_perm(mcqe2,
+ 							zero, flow_mark_shuf);
+-				vector unsigned char ftag =
+-					(vector unsigned char)
+-					vec_mergel((vector unsigned int)ftag0,
+-					(vector unsigned int)ftag1);
+-				vector unsigned char invalid_mask =
+-					(vector unsigned char)
+-					vec_cmpeq((vector unsigned int)ftag,
+-					(vector unsigned int)zero);
+-
+-				ol_flags_mask = (vector unsigned char)
+-					vec_or((vector unsigned long)
++				__vector unsigned char ftag =
++					(__vector unsigned char)
++					vec_mergel((__vector unsigned int)ftag0,
++					(__vector unsigned int)ftag1);
++				__vector unsigned char invalid_mask =
++					(__vector unsigned char)
++					vec_cmpeq((__vector unsigned int)ftag,
++					(__vector unsigned int)zero);
++
++				ol_flags_mask = (__vector unsigned char)
++					vec_or((__vector unsigned long)
+ 					ol_flags_mask,
+-					(vector unsigned long)fdir_all_flags);
++					(__vector unsigned long)fdir_all_flags);
+ 
+ 				/* Set RTE_MBUF_F_RX_FDIR if flow tag is non-zero. */
+-				invalid_mask = (vector unsigned char)
+-					vec_cmpeq((vector unsigned int)ftag,
+-					(vector unsigned int)zero);
+-				ol_flags = (vector unsigned char)
+-					vec_or((vector unsigned long)ol_flags,
+-					(vector unsigned long)
+-					vec_andc((vector unsigned long)
++				invalid_mask = (__vector unsigned char)
++					vec_cmpeq((__vector unsigned int)ftag,
++					(__vector unsigned int)zero);
++				ol_flags = (__vector unsigned char)
++					vec_or((__vector unsigned long)ol_flags,
++					(__vector unsigned long)
++					vec_andc((__vector unsigned long)
+ 					fdir_flags,
+-					(vector unsigned long)invalid_mask));
+-				ol_flags_mask = (vector unsigned char)
+-					vec_or((vector unsigned long)
++					(__vector unsigned long)invalid_mask));
++				ol_flags_mask = (__vector unsigned char)
++					vec_or((__vector unsigned long)
+ 					ol_flags_mask,
+-					(vector unsigned long)fdir_flags);
++					(__vector unsigned long)fdir_flags);
+ 
+ 				/* Mask out invalid entries. */
+-				fdir_id_flags = (vector unsigned char)
+-					vec_andc((vector unsigned long)
++				fdir_id_flags = (__vector unsigned char)
++					vec_andc((__vector unsigned long)
+ 					fdir_id_flags,
+-					(vector unsigned long)invalid_mask);
++					(__vector unsigned long)invalid_mask);
+ 
+ 				/* Check if flow tag MLX5_FLOW_MARK_DEFAULT. */
+-				ol_flags = (vector unsigned char)
+-					vec_or((vector unsigned long)ol_flags,
+-					(vector unsigned long)
+-					vec_andc((vector unsigned long)
++				ol_flags = (__vector unsigned char)
++					vec_or((__vector unsigned long)ol_flags,
++					(__vector unsigned long)
++					vec_andc((__vector unsigned long)
+ 					fdir_id_flags,
+-					(vector unsigned long)
+-					vec_cmpeq((vector unsigned int)ftag,
+-					(vector unsigned int)ft_mask)));
++					(__vector unsigned long)
++					vec_cmpeq((__vector unsigned int)ftag,
++					(__vector unsigned int)ft_mask)));
+ 
+-				ftag = (vector unsigned char)
+-					((vector unsigned int)ftag +
+-					(vector unsigned int)flow_mark_adj);
++				ftag = (__vector unsigned char)
++					((__vector unsigned int)ftag +
++					(__vector unsigned int)flow_mark_adj);
+ 				elts[pos]->hash.fdir.hi =
+-					((vector unsigned int)ftag)[0];
++					((__vector unsigned int)ftag)[0];
+ 				elts[pos + 1]->hash.fdir.hi =
+-					((vector unsigned int)ftag)[1];
++					((__vector unsigned int)ftag)[1];
+ 				elts[pos + 2]->hash.fdir.hi =
+-					((vector unsigned int)ftag)[2];
++					((__vector unsigned int)ftag)[2];
+ 				elts[pos + 3]->hash.fdir.hi =
+-					((vector unsigned int)ftag)[3];
++					((__vector unsigned int)ftag)[3];
+ 			}
+ 		}
+ 		if (unlikely(rxq->mcqe_format != MLX5_CQE_RESP_FORMAT_HASH)) {
+@@ -373,37 +373,37 @@ rxq_cq_decompress_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq,
+ 					mcq[pos % 8 + 2].hdr_type;
+ 				const uint8_t pkt_hdr3 =
+ 					mcq[pos % 8 + 3].hdr_type;
+-				const vector unsigned char vlan_mask =
+-					(vector unsigned char)
+-					(vector unsigned int) {
++				const __vector unsigned char vlan_mask =
++					(__vector unsigned char)
++					(__vector unsigned int) {
+ 					(RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED),
+ 					(RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED),
+ 					(RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED),
+ 					(RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED)};
+-				const vector unsigned char cv_mask =
+-					(vector unsigned char)
+-					(vector unsigned int) {
++				const __vector unsigned char cv_mask =
++					(__vector unsigned char)
++					(__vector unsigned int) {
+ 					MLX5_CQE_VLAN_STRIPPED,
+ 					MLX5_CQE_VLAN_STRIPPED,
+ 					MLX5_CQE_VLAN_STRIPPED,
+ 					MLX5_CQE_VLAN_STRIPPED};
+-				vector unsigned char pkt_cv =
+-					(vector unsigned char)
+-					(vector unsigned int) {
++				__vector unsigned char pkt_cv =
++					(__vector unsigned char)
++					(__vector unsigned int) {
+ 					pkt_hdr0 & 0x1, pkt_hdr1 & 0x1,
+ 					pkt_hdr2 & 0x1, pkt_hdr3 & 0x1};
+ 
+-				ol_flags_mask = (vector unsigned char)
+-					vec_or((vector unsigned long)
++				ol_flags_mask = (__vector unsigned char)
++					vec_or((__vector unsigned long)
+ 					ol_flags_mask,
+-					(vector unsigned long)vlan_mask);
+-				ol_flags = (vector unsigned char)
+-					vec_or((vector unsigned long)ol_flags,
+-					(vector unsigned long)
+-					vec_and((vector unsigned long)vlan_mask,
+-					(vector unsigned long)
+-					vec_cmpeq((vector unsigned int)pkt_cv,
+-					(vector unsigned int)cv_mask)));
++					(__vector unsigned long)vlan_mask);
++				ol_flags = (__vector unsigned char)
++					vec_or((__vector unsigned long)ol_flags,
++					(__vector unsigned long)
++					vec_and((__vector unsigned long)vlan_mask,
++					(__vector unsigned long)
++					vec_cmpeq((__vector unsigned int)pkt_cv,
++					(__vector unsigned int)cv_mask)));
+ 				elts[pos]->packet_type =
+ 					mlx5_ptype_table[(pkt_hdr0 >> 2) |
+ 							 pkt_info];
+@@ -431,36 +431,36 @@ rxq_cq_decompress_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq,
+ 						pkt_info) & (1 << 6));
+ 				}
+ 			}
+-			const vector unsigned char hash_mask =
+-				(vector unsigned char)(vector unsigned int) {
++			const __vector unsigned char hash_mask =
++				(__vector unsigned char)(__vector unsigned int) {
+ 					RTE_MBUF_F_RX_RSS_HASH,
+ 					RTE_MBUF_F_RX_RSS_HASH,
+ 					RTE_MBUF_F_RX_RSS_HASH,
+ 					RTE_MBUF_F_RX_RSS_HASH};
+-			const vector unsigned char rearm_flags =
+-				(vector unsigned char)(vector unsigned int) {
++			const __vector unsigned char rearm_flags =
++				(__vector unsigned char)(__vector unsigned int) {
+ 				(uint32_t)t_pkt->ol_flags,
+ 				(uint32_t)t_pkt->ol_flags,
+ 				(uint32_t)t_pkt->ol_flags,
+ 				(uint32_t)t_pkt->ol_flags};
+ 
+-			ol_flags_mask = (vector unsigned char)
+-				vec_or((vector unsigned long)ol_flags_mask,
+-				(vector unsigned long)hash_mask);
+-			ol_flags = (vector unsigned char)
+-				vec_or((vector unsigned long)ol_flags,
+-				(vector unsigned long)
+-				vec_andc((vector unsigned long)rearm_flags,
+-				(vector unsigned long)ol_flags_mask));
++			ol_flags_mask = (__vector unsigned char)
++				vec_or((__vector unsigned long)ol_flags_mask,
++				(__vector unsigned long)hash_mask);
++			ol_flags = (__vector unsigned char)
++				vec_or((__vector unsigned long)ol_flags,
++				(__vector unsigned long)
++				vec_andc((__vector unsigned long)rearm_flags,
++				(__vector unsigned long)ol_flags_mask));
+ 
+ 			elts[pos]->ol_flags =
+-				((vector unsigned int)ol_flags)[0];
++				((__vector unsigned int)ol_flags)[0];
+ 			elts[pos + 1]->ol_flags =
+-				((vector unsigned int)ol_flags)[1];
++				((__vector unsigned int)ol_flags)[1];
+ 			elts[pos + 2]->ol_flags =
+-				((vector unsigned int)ol_flags)[2];
++				((__vector unsigned int)ol_flags)[2];
+ 			elts[pos + 3]->ol_flags =
+-				((vector unsigned int)ol_flags)[3];
++				((__vector unsigned int)ol_flags)[3];
+ 			elts[pos]->hash.rss = 0;
+ 			elts[pos + 1]->hash.rss = 0;
+ 			elts[pos + 2]->hash.rss = 0;
+@@ -524,13 +524,13 @@ rxq_cq_decompress_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq,
+  */
+ static inline void
+ rxq_cq_to_ptype_oflags_v(struct mlx5_rxq_data *rxq,
+-		vector unsigned char cqes[4], vector unsigned char op_err,
++		__vector unsigned char cqes[4], __vector unsigned char op_err,
+ 		struct rte_mbuf **pkts)
+ {
+-	vector unsigned char pinfo0, pinfo1;
+-	vector unsigned char pinfo, ptype;
+-	vector unsigned char ol_flags = (vector unsigned char)
+-		(vector unsigned int){
++	__vector unsigned char pinfo0, pinfo1;
++	__vector unsigned char pinfo, ptype;
++	__vector unsigned char ol_flags = (__vector unsigned char)
++		(__vector unsigned int){
+ 			rxq->rss_hash * RTE_MBUF_F_RX_RSS_HASH |
+ 				rxq->hw_timestamp * rxq->timestamp_rx_flag,
+ 			rxq->rss_hash * RTE_MBUF_F_RX_RSS_HASH |
+@@ -539,25 +539,25 @@ rxq_cq_to_ptype_oflags_v(struct mlx5_rxq_data *rxq,
+ 				rxq->hw_timestamp * rxq->timestamp_rx_flag,
+ 			rxq->rss_hash * RTE_MBUF_F_RX_RSS_HASH |
+ 				rxq->hw_timestamp * rxq->timestamp_rx_flag};
+-	vector unsigned char cv_flags;
+-	const vector unsigned char zero = (vector unsigned char){0};
+-	const vector unsigned char ptype_mask =
+-		(vector unsigned char)(vector unsigned int){
++	__vector unsigned char cv_flags;
++	const __vector unsigned char zero = (__vector unsigned char){0};
++	const __vector unsigned char ptype_mask =
++		(__vector unsigned char)(__vector unsigned int){
+ 		0x0000fd06, 0x0000fd06, 0x0000fd06, 0x0000fd06};
+-	const vector unsigned char ptype_ol_mask =
+-		(vector unsigned char)(vector unsigned int){
++	const __vector unsigned char ptype_ol_mask =
++		(__vector unsigned char)(__vector unsigned int){
+ 		0x00000106, 0x00000106, 0x00000106, 0x00000106};
+-	const vector unsigned char pinfo_mask =
+-		(vector unsigned char)(vector unsigned int){
++	const __vector unsigned char pinfo_mask =
++		(__vector unsigned char)(__vector unsigned int){
+ 		0x00000003, 0x00000003, 0x00000003, 0x00000003};
+-	const vector unsigned char cv_flag_sel = (vector unsigned char){
++	const __vector unsigned char cv_flag_sel = (__vector unsigned char){
+ 		0, (uint8_t)(RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED),
+ 		(uint8_t)(RTE_MBUF_F_RX_IP_CKSUM_GOOD >> 1), 0,
+ 		(uint8_t)(RTE_MBUF_F_RX_L4_CKSUM_GOOD >> 1), 0,
+ 		(uint8_t)((RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_GOOD) >> 1),
+ 		0, 0, 0, 0, 0, 0, 0, 0, 0};
+-	const vector unsigned char cv_mask =
+-		(vector unsigned char)(vector unsigned int){
++	const __vector unsigned char cv_mask =
++		(__vector unsigned char)(__vector unsigned int){
+ 		RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_GOOD |
+ 		RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED,
+ 		RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_GOOD |
+@@ -566,77 +566,77 @@ rxq_cq_to_ptype_oflags_v(struct mlx5_rxq_data *rxq,
+ 		RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED,
+ 		RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_GOOD |
+ 		RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED};
+-	const vector unsigned char mbuf_init =
+-		(vector unsigned char)vec_vsx_ld
+-			(0, (vector unsigned char *)&rxq->mbuf_initializer);
+-	const vector unsigned short rearm_sel_mask =
+-		(vector unsigned short){0, 0, 0, 0, 0xffff, 0xffff, 0, 0};
+-	vector unsigned char rearm0, rearm1, rearm2, rearm3;
++	const __vector unsigned char mbuf_init =
++		(__vector unsigned char)vec_vsx_ld
++			(0, (__vector unsigned char *)&rxq->mbuf_initializer);
++	const __vector unsigned short rearm_sel_mask =
++		(__vector unsigned short){0, 0, 0, 0, 0xffff, 0xffff, 0, 0};
++	__vector unsigned char rearm0, rearm1, rearm2, rearm3;
+ 	uint8_t pt_idx0, pt_idx1, pt_idx2, pt_idx3;
+ 
+ 	/* Extract pkt_info field. */
+-	pinfo0 = (vector unsigned char)
+-		vec_mergeh((vector unsigned int)cqes[0],
+-		(vector unsigned int)cqes[1]);
+-	pinfo1 = (vector unsigned char)
+-		vec_mergeh((vector unsigned int)cqes[2],
+-		(vector unsigned int)cqes[3]);
+-	pinfo = (vector unsigned char)
+-		vec_mergeh((vector unsigned long)pinfo0,
+-		(vector unsigned long)pinfo1);
++	pinfo0 = (__vector unsigned char)
++		vec_mergeh((__vector unsigned int)cqes[0],
++		(__vector unsigned int)cqes[1]);
++	pinfo1 = (__vector unsigned char)
++		vec_mergeh((__vector unsigned int)cqes[2],
++		(__vector unsigned int)cqes[3]);
++	pinfo = (__vector unsigned char)
++		vec_mergeh((__vector unsigned long)pinfo0,
++		(__vector unsigned long)pinfo1);
+ 
+ 	/* Extract hdr_type_etc field. */
+-	pinfo0 = (vector unsigned char)
+-		vec_mergel((vector unsigned int)cqes[0],
+-		(vector unsigned int)cqes[1]);
+-	pinfo1 = (vector unsigned char)
+-		vec_mergel((vector unsigned int)cqes[2],
+-		(vector unsigned int)cqes[3]);
+-	ptype = (vector unsigned char)
+-		vec_mergeh((vector unsigned long)pinfo0,
+-		(vector unsigned long)pinfo1);
++	pinfo0 = (__vector unsigned char)
++		vec_mergel((__vector unsigned int)cqes[0],
++		(__vector unsigned int)cqes[1]);
++	pinfo1 = (__vector unsigned char)
++		vec_mergel((__vector unsigned int)cqes[2],
++		(__vector unsigned int)cqes[3]);
++	ptype = (__vector unsigned char)
++		vec_mergeh((__vector unsigned long)pinfo0,
++		(__vector unsigned long)pinfo1);
+ 
+ 	if (rxq->mark) {
+-		const vector unsigned char pinfo_ft_mask =
+-			(vector unsigned char)(vector unsigned int){
++		const __vector unsigned char pinfo_ft_mask =
++			(__vector unsigned char)(__vector unsigned int){
+ 			0xffffff00, 0xffffff00, 0xffffff00, 0xffffff00};
+-		const vector unsigned char fdir_flags =
+-			(vector unsigned char)(vector unsigned int){
++		const __vector unsigned char fdir_flags =
++			(__vector unsigned char)(__vector unsigned int){
+ 			RTE_MBUF_F_RX_FDIR, RTE_MBUF_F_RX_FDIR,
+ 			RTE_MBUF_F_RX_FDIR, RTE_MBUF_F_RX_FDIR};
+-		vector unsigned char fdir_id_flags =
+-			(vector unsigned char)(vector unsigned int){
++		__vector unsigned char fdir_id_flags =
++			(__vector unsigned char)(__vector unsigned int){
+ 			RTE_MBUF_F_RX_FDIR_ID, RTE_MBUF_F_RX_FDIR_ID,
+ 			RTE_MBUF_F_RX_FDIR_ID, RTE_MBUF_F_RX_FDIR_ID};
+-		vector unsigned char flow_tag, invalid_mask;
++		__vector unsigned char flow_tag, invalid_mask;
+ 
+-		flow_tag = (vector unsigned char)
+-			vec_and((vector unsigned long)pinfo,
+-			(vector unsigned long)pinfo_ft_mask);
++		flow_tag = (__vector unsigned char)
++			vec_and((__vector unsigned long)pinfo,
++			(__vector unsigned long)pinfo_ft_mask);
+ 
+ 		/* Check if flow tag is non-zero then set RTE_MBUF_F_RX_FDIR. */
+-		invalid_mask = (vector unsigned char)
+-			vec_cmpeq((vector unsigned int)flow_tag,
+-			(vector unsigned int)zero);
+-		ol_flags = (vector unsigned char)
+-			vec_or((vector unsigned long)ol_flags,
+-			(vector unsigned long)
+-			vec_andc((vector unsigned long)fdir_flags,
+-			(vector unsigned long)invalid_mask));
++		invalid_mask = (__vector unsigned char)
++			vec_cmpeq((__vector unsigned int)flow_tag,
++			(__vector unsigned int)zero);
++		ol_flags = (__vector unsigned char)
++			vec_or((__vector unsigned long)ol_flags,
++			(__vector unsigned long)
++			vec_andc((__vector unsigned long)fdir_flags,
++			(__vector unsigned long)invalid_mask));
+ 
+ 		/* Mask out invalid entries. */
+-		fdir_id_flags = (vector unsigned char)
+-			vec_andc((vector unsigned long)fdir_id_flags,
+-			(vector unsigned long)invalid_mask);
++		fdir_id_flags = (__vector unsigned char)
++			vec_andc((__vector unsigned long)fdir_id_flags,
++			(__vector unsigned long)invalid_mask);
+ 
+ 		/* Check if flow tag MLX5_FLOW_MARK_DEFAULT. */
+-		ol_flags = (vector unsigned char)
+-			vec_or((vector unsigned long)ol_flags,
+-			(vector unsigned long)
+-			vec_andc((vector unsigned long)fdir_id_flags,
+-			(vector unsigned long)
+-			vec_cmpeq((vector unsigned int)flow_tag,
+-			(vector unsigned int)pinfo_ft_mask)));
++		ol_flags = (__vector unsigned char)
++			vec_or((__vector unsigned long)ol_flags,
++			(__vector unsigned long)
++			vec_andc((__vector unsigned long)fdir_id_flags,
++			(__vector unsigned long)
++			vec_cmpeq((__vector unsigned int)flow_tag,
++			(__vector unsigned int)pinfo_ft_mask)));
+ 	}
+ 	/*
+ 	 * Merge the two fields to generate the following:
+@@ -649,39 +649,39 @@ rxq_cq_to_ptype_oflags_v(struct mlx5_rxq_data *rxq,
+ 	 * bit[16]    = tunneled
+ 	 * bit[17]    = outer_l3_type
+ 	 */
+-	ptype = (vector unsigned char)
+-		vec_and((vector unsigned long)ptype,
+-		(vector unsigned long)ptype_mask);
+-	pinfo = (vector unsigned char)
+-		vec_and((vector unsigned long)pinfo,
+-		(vector unsigned long)pinfo_mask);
+-	pinfo = (vector unsigned char)
+-		vec_sl((vector unsigned int)pinfo,
+-		(vector unsigned int){16, 16, 16, 16});
++	ptype = (__vector unsigned char)
++		vec_and((__vector unsigned long)ptype,
++		(__vector unsigned long)ptype_mask);
++	pinfo = (__vector unsigned char)
++		vec_and((__vector unsigned long)pinfo,
++		(__vector unsigned long)pinfo_mask);
++	pinfo = (__vector unsigned char)
++		vec_sl((__vector unsigned int)pinfo,
++		(__vector unsigned int){16, 16, 16, 16});
+ 
+ 	/* Make pinfo has merged fields for ol_flags calculation. */
+-	pinfo = (vector unsigned char)
+-		vec_or((vector unsigned long)ptype,
+-		(vector unsigned long)pinfo);
+-	ptype = (vector unsigned char)
+-		vec_sr((vector unsigned int)pinfo,
+-		(vector unsigned int){10, 10, 10, 10});
+-	ptype = (vector unsigned char)
+-		vec_packs((vector unsigned int)ptype,
+-		(vector unsigned int)zero);
++	pinfo = (__vector unsigned char)
++		vec_or((__vector unsigned long)ptype,
++		(__vector unsigned long)pinfo);
++	ptype = (__vector unsigned char)
++		vec_sr((__vector unsigned int)pinfo,
++		(__vector unsigned int){10, 10, 10, 10});
++	ptype = (__vector unsigned char)
++		vec_packs((__vector unsigned int)ptype,
++		(__vector unsigned int)zero);
+ 
+ 	/* Errored packets will have RTE_PTYPE_ALL_MASK. */
+-	op_err = (vector unsigned char)
+-		vec_sr((vector unsigned short)op_err,
+-		(vector unsigned short){8, 8, 8, 8, 8, 8, 8, 8});
+-	ptype = (vector unsigned char)
+-		vec_or((vector unsigned long)ptype,
+-		(vector unsigned long)op_err);
+-
+-	pt_idx0 = (uint8_t)((vector unsigned char)ptype)[0];
+-	pt_idx1 = (uint8_t)((vector unsigned char)ptype)[2];
+-	pt_idx2 = (uint8_t)((vector unsigned char)ptype)[4];
+-	pt_idx3 = (uint8_t)((vector unsigned char)ptype)[6];
++	op_err = (__vector unsigned char)
++		vec_sr((__vector unsigned short)op_err,
++		(__vector unsigned short){8, 8, 8, 8, 8, 8, 8, 8});
++	ptype = (__vector unsigned char)
++		vec_or((__vector unsigned long)ptype,
++		(__vector unsigned long)op_err);
++
++	pt_idx0 = (uint8_t)((__vector unsigned char)ptype)[0];
++	pt_idx1 = (uint8_t)((__vector unsigned char)ptype)[2];
++	pt_idx2 = (uint8_t)((__vector unsigned char)ptype)[4];
++	pt_idx3 = (uint8_t)((__vector unsigned char)ptype)[6];
+ 
+ 	pkts[0]->packet_type = mlx5_ptype_table[pt_idx0] |
+ 		!!(pt_idx0 & (1 << 6)) * rxq->tunnel;
+@@ -693,63 +693,63 @@ rxq_cq_to_ptype_oflags_v(struct mlx5_rxq_data *rxq,
+ 		!!(pt_idx3 & (1 << 6)) * rxq->tunnel;
+ 
+ 	/* Fill flags for checksum and VLAN. */
+-	pinfo = (vector unsigned char)
+-		vec_and((vector unsigned long)pinfo,
+-		(vector unsigned long)ptype_ol_mask);
++	pinfo = (__vector unsigned char)
++		vec_and((__vector unsigned long)pinfo,
++		(__vector unsigned long)ptype_ol_mask);
+ 	pinfo = vec_perm(cv_flag_sel, zero, pinfo);
+ 
+ 	/* Locate checksum flags at byte[2:1] and merge with VLAN flags. */
+-	cv_flags = (vector unsigned char)
+-		vec_sl((vector unsigned int)pinfo,
+-		(vector unsigned int){9, 9, 9, 9});
+-	cv_flags = (vector unsigned char)
+-		vec_or((vector unsigned long)pinfo,
+-		(vector unsigned long)cv_flags);
++	cv_flags = (__vector unsigned char)
++		vec_sl((__vector unsigned int)pinfo,
++		(__vector unsigned int){9, 9, 9, 9});
++	cv_flags = (__vector unsigned char)
++		vec_or((__vector unsigned long)pinfo,
++		(__vector unsigned long)cv_flags);
+ 
+ 	/* Move back flags to start from byte[0]. */
+-	cv_flags = (vector unsigned char)
+-		vec_sr((vector unsigned int)cv_flags,
+-		(vector unsigned int){8, 8, 8, 8});
++	cv_flags = (__vector unsigned char)
++		vec_sr((__vector unsigned int)cv_flags,
++		(__vector unsigned int){8, 8, 8, 8});
+ 
+ 	/* Mask out garbage bits. */
+-	cv_flags = (vector unsigned char)
+-		vec_and((vector unsigned long)cv_flags,
+-		(vector unsigned long)cv_mask);
++	cv_flags = (__vector unsigned char)
++		vec_and((__vector unsigned long)cv_flags,
++		(__vector unsigned long)cv_mask);
+ 
+ 	/* Merge to ol_flags. */
+-	ol_flags = (vector unsigned char)
+-		vec_or((vector unsigned long)ol_flags,
+-		(vector unsigned long)cv_flags);
++	ol_flags = (__vector unsigned char)
++		vec_or((__vector unsigned long)ol_flags,
++		(__vector unsigned long)cv_flags);
+ 
+ 	/* Merge mbuf_init and ol_flags. */
+-	rearm0 = (vector unsigned char)
+-		vec_sel((vector unsigned short)mbuf_init,
+-		(vector unsigned short)
+-		vec_slo((vector unsigned short)ol_flags,
+-		(vector unsigned char){64}), rearm_sel_mask);
+-	rearm1 = (vector unsigned char)
+-		vec_sel((vector unsigned short)mbuf_init,
+-		(vector unsigned short)
+-		vec_slo((vector unsigned short)ol_flags,
+-		(vector unsigned char){32}), rearm_sel_mask);
+-	rearm2 = (vector unsigned char)
+-		vec_sel((vector unsigned short)mbuf_init,
+-		(vector unsigned short)ol_flags, rearm_sel_mask);
+-	rearm3 = (vector unsigned char)
+-		vec_sel((vector unsigned short)mbuf_init,
+-		(vector unsigned short)
+-		vec_sro((vector unsigned short)ol_flags,
+-		(vector unsigned char){32}), rearm_sel_mask);
++	rearm0 = (__vector unsigned char)
++		vec_sel((__vector unsigned short)mbuf_init,
++		(__vector unsigned short)
++		vec_slo((__vector unsigned short)ol_flags,
++		(__vector unsigned char){64}), rearm_sel_mask);
++	rearm1 = (__vector unsigned char)
++		vec_sel((__vector unsigned short)mbuf_init,
++		(__vector unsigned short)
++		vec_slo((__vector unsigned short)ol_flags,
++		(__vector unsigned char){32}), rearm_sel_mask);
++	rearm2 = (__vector unsigned char)
++		vec_sel((__vector unsigned short)mbuf_init,
++		(__vector unsigned short)ol_flags, rearm_sel_mask);
++	rearm3 = (__vector unsigned char)
++		vec_sel((__vector unsigned short)mbuf_init,
++		(__vector unsigned short)
++		vec_sro((__vector unsigned short)ol_flags,
++		(__vector unsigned char){32}), rearm_sel_mask);
+ 
+ 	/* Write 8B rearm_data and 8B ol_flags. */
+ 	vec_vsx_st(rearm0, 0,
+-		(vector unsigned char *)&pkts[0]->rearm_data);
++		(__vector unsigned char *)&pkts[0]->rearm_data);
+ 	vec_vsx_st(rearm1, 0,
+-		(vector unsigned char *)&pkts[1]->rearm_data);
++		(__vector unsigned char *)&pkts[1]->rearm_data);
+ 	vec_vsx_st(rearm2, 0,
+-		(vector unsigned char *)&pkts[2]->rearm_data);
++		(__vector unsigned char *)&pkts[2]->rearm_data);
+ 	vec_vsx_st(rearm3, 0,
+-		(vector unsigned char *)&pkts[3]->rearm_data);
++		(__vector unsigned char *)&pkts[3]->rearm_data);
+ }
+ 
+ /**
+@@ -788,31 +788,31 @@ rxq_cq_process_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq,
+ 	uint64_t comp_idx = MLX5_VPMD_DESCS_PER_LOOP;
+ 	uint16_t nocmp_n = 0;
+ 	unsigned int ownership = !!(rxq->cq_ci & (q_mask + 1));
+-	const vector unsigned char zero = (vector unsigned char){0};
+-	const vector unsigned char ones = vec_splat_u8(-1);
+-	const vector unsigned char owner_check =
+-		(vector unsigned char)(vector unsigned long){
++	const __vector unsigned char zero = (__vector unsigned char){0};
++	const __vector unsigned char ones = vec_splat_u8(-1);
++	const __vector unsigned char owner_check =
++		(__vector unsigned char)(__vector unsigned long){
+ 		0x0100000001000000LL, 0x0100000001000000LL};
+-	const vector unsigned char opcode_check =
+-		(vector unsigned char)(vector unsigned long){
++	const __vector unsigned char opcode_check =
++		(__vector unsigned char)(__vector unsigned long){
+ 		0xf0000000f0000000LL, 0xf0000000f0000000LL};
+-	const vector unsigned char format_check =
+-		(vector unsigned char)(vector unsigned long){
++	const __vector unsigned char format_check =
++		(__vector unsigned char)(__vector unsigned long){
+ 		0x0c0000000c000000LL, 0x0c0000000c000000LL};
+-	const vector unsigned char resp_err_check =
+-		(vector unsigned char)(vector unsigned long){
++	const __vector unsigned char resp_err_check =
++		(__vector unsigned char)(__vector unsigned long){
+ 		0xe0000000e0000000LL, 0xe0000000e0000000LL};
+ #ifdef MLX5_PMD_SOFT_COUNTERS
+ 	uint32_t rcvd_byte = 0;
+ 	/* Mask to shuffle byte_cnt to add up stats. Do bswap16 for all. */
+-	const vector unsigned char len_shuf_mask = (vector unsigned char){
++	const __vector unsigned char len_shuf_mask = (__vector unsigned char){
+ 		 1,  0,  5,  4,
+ 		 9,  8, 13, 12,
+ 		-1, -1, -1, -1,
+ 		-1, -1, -1, -1};
+ #endif
+ 	/* Mask to shuffle from extracted CQE to mbuf. */
+-	const vector unsigned char shuf_mask = (vector unsigned char){
++	const __vector unsigned char shuf_mask = (__vector unsigned char){
+ 		 5,  4,           /* bswap16, pkt_len */
+ 		-1, -1,           /* zero out 2nd half of pkt_len */
+ 		 5,  4,           /* bswap16, data_len */
+@@ -821,22 +821,22 @@ rxq_cq_process_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq,
+ 		 1,  2,  3, -1};  /* fdir.hi */
+ 	/* Mask to blend from the last Qword to the first DQword. */
+ 	/* Mask to blend from the last Qword to the first DQword. */
+-	const vector unsigned char blend_mask = (vector unsigned char){
++	const __vector unsigned char blend_mask = (__vector unsigned char){
+ 		-1,  0,  0,  0,
+ 		 0,  0,  0,  0,
+ 		-1, -1, -1, -1,
+ 		-1, -1, -1, -1};
+-	const vector unsigned char crc_adj =
+-		(vector unsigned char)(vector unsigned short){
++	const __vector unsigned char crc_adj =
++		(__vector unsigned char)(__vector unsigned short){
+ 		rxq->crc_present * RTE_ETHER_CRC_LEN, 0,
+ 		rxq->crc_present * RTE_ETHER_CRC_LEN, 0, 0, 0, 0, 0};
+-	const vector unsigned char flow_mark_adj =
+-		(vector unsigned char)(vector unsigned int){
++	const __vector unsigned char flow_mark_adj =
++		(__vector unsigned char)(__vector unsigned int){
+ 		0, 0, 0, rxq->mark * (-1)};
+-	const vector unsigned short cqe_sel_mask1 =
+-		(vector unsigned short){0, 0, 0, 0, 0xffff, 0xffff, 0, 0};
+-	const vector unsigned short cqe_sel_mask2 =
+-		(vector unsigned short){0, 0, 0xffff, 0, 0, 0, 0, 0};
++	const __vector unsigned short cqe_sel_mask1 =
++		(__vector unsigned short){0, 0, 0, 0, 0xffff, 0xffff, 0, 0};
++	const __vector unsigned short cqe_sel_mask2 =
++		(__vector unsigned short){0, 0, 0xffff, 0, 0, 0, 0, 0};
+ 
+ 	/*
+ 	 * A. load first Qword (8bytes) in one loop.
+@@ -861,30 +861,30 @@ rxq_cq_process_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq,
+ 	for (pos = 0;
+ 	     pos < pkts_n;
+ 	     pos += MLX5_VPMD_DESCS_PER_LOOP) {
+-		vector unsigned char cqes[MLX5_VPMD_DESCS_PER_LOOP];
+-		vector unsigned char cqe_tmp1, cqe_tmp2;
+-		vector unsigned char pkt_mb0, pkt_mb1, pkt_mb2, pkt_mb3;
+-		vector unsigned char op_own, op_own_tmp1, op_own_tmp2;
+-		vector unsigned char opcode, owner_mask, invalid_mask;
+-		vector unsigned char comp_mask;
+-		vector unsigned char mask;
++		__vector unsigned char cqes[MLX5_VPMD_DESCS_PER_LOOP];
++		__vector unsigned char cqe_tmp1, cqe_tmp2;
++		__vector unsigned char pkt_mb0, pkt_mb1, pkt_mb2, pkt_mb3;
++		__vector unsigned char op_own, op_own_tmp1, op_own_tmp2;
++		__vector unsigned char opcode, owner_mask, invalid_mask;
++		__vector unsigned char comp_mask;
++		__vector unsigned char mask;
+ #ifdef MLX5_PMD_SOFT_COUNTERS
+-		const vector unsigned char lower_half = {
++		const __vector unsigned char lower_half = {
+ 			0, 1, 4, 5, 8, 9, 12, 13,
+ 			16, 17, 20, 21, 24, 25, 28, 29};
+-		const vector unsigned char upper_half = {
++		const __vector unsigned char upper_half = {
+ 			2, 3, 6, 7, 10, 11, 14, 15,
+ 			18, 19, 22, 23, 26, 27, 30, 31};
+-		const vector unsigned long shmax = {64, 64};
+-		vector unsigned char byte_cnt;
+-		vector unsigned short left, right;
+-		vector unsigned long lshift;
+-		vector __attribute__((altivec(bool__)))
++		const __vector unsigned long shmax = {64, 64};
++		__vector unsigned char byte_cnt;
++		__vector unsigned short left, right;
++		__vector unsigned long lshift;
++		__vector __attribute__((altivec(bool__)))
+ 			unsigned long shmask;
+ #endif
+-		vector unsigned char mbp1, mbp2;
+-		vector unsigned char p =
+-			(vector unsigned char)(vector unsigned short){
++		__vector unsigned char mbp1, mbp2;
++		__vector unsigned char p =
++			(__vector unsigned char)(__vector unsigned short){
+ 				0, 1, 2, 3, 0, 0, 0, 0};
+ 		unsigned int p1, p2, p3;
+ 
+@@ -897,295 +897,295 @@ rxq_cq_process_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq,
+ 		}
+ 
+ 		/* A.0 do not cross the end of CQ. */
+-		mask = (vector unsigned char)(vector unsigned long){
++		mask = (__vector unsigned char)(__vector unsigned long){
+ 			(pkts_n - pos) * sizeof(uint16_t) * 8, 0};
+ 
+ 		{
+-			vector unsigned long lshift;
+-			vector __attribute__((altivec(bool__)))
++			__vector unsigned long lshift;
++			__vector __attribute__((altivec(bool__)))
+ 				unsigned long shmask;
+-			const vector unsigned long shmax = {64, 64};
++			const __vector unsigned long shmax = {64, 64};
+ 
+-			lshift = vec_splat((vector unsigned long)mask, 0);
++			lshift = vec_splat((__vector unsigned long)mask, 0);
+ 			shmask = vec_cmpgt(shmax, lshift);
+-			mask = (vector unsigned char)
+-				vec_sl((vector unsigned long)ones, lshift);
+-			mask = (vector unsigned char)
+-				vec_sel((vector unsigned long)shmask,
+-				(vector unsigned long)mask, shmask);
++			mask = (__vector unsigned char)
++				vec_sl((__vector unsigned long)ones, lshift);
++			mask = (__vector unsigned char)
++				vec_sel((__vector unsigned long)shmask,
++				(__vector unsigned long)mask, shmask);
+ 		}
+ 
+-		p = (vector unsigned char)
+-			vec_andc((vector unsigned long)p,
+-			(vector unsigned long)mask);
++		p = (__vector unsigned char)
++			vec_andc((__vector unsigned long)p,
++			(__vector unsigned long)mask);
+ 
+ 		/* A.1 load cqes. */
+-		p3 = (unsigned int)((vector unsigned short)p)[3];
+-		cqes[3] = (vector unsigned char)(vector unsigned long){
++		p3 = (unsigned int)((__vector unsigned short)p)[3];
++		cqes[3] = (__vector unsigned char)(__vector unsigned long){
+ 			*(__rte_aligned(8) unsigned long *)
+ 			&cq[pos + p3].sop_drop_qpn, 0LL};
+ 		rte_compiler_barrier();
+ 
+-		p2 = (unsigned int)((vector unsigned short)p)[2];
+-		cqes[2] = (vector unsigned char)(vector unsigned long){
++		p2 = (unsigned int)((__vector unsigned short)p)[2];
++		cqes[2] = (__vector unsigned char)(__vector unsigned long){
+ 			*(__rte_aligned(8) unsigned long *)
+ 			&cq[pos + p2].sop_drop_qpn, 0LL};
+ 		rte_compiler_barrier();
+ 
+ 		/* B.1 load mbuf pointers. */
+-		mbp1 = (vector unsigned char)vec_vsx_ld(0,
++		mbp1 = (__vector unsigned char)vec_vsx_ld(0,
+ 			(signed int const *)&elts[pos]);
+-		mbp2 = (vector unsigned char)vec_vsx_ld(0,
++		mbp2 = (__vector unsigned char)vec_vsx_ld(0,
+ 			(signed int const *)&elts[pos + 2]);
+ 
+ 		/* A.1 load a block having op_own. */
+-		p1 = (unsigned int)((vector unsigned short)p)[1];
+-		cqes[1] = (vector unsigned char)(vector unsigned long){
++		p1 = (unsigned int)((__vector unsigned short)p)[1];
++		cqes[1] = (__vector unsigned char)(__vector unsigned long){
+ 			*(__rte_aligned(8) unsigned long *)
+ 			&cq[pos + p1].sop_drop_qpn, 0LL};
+ 		rte_compiler_barrier();
+ 
+-		cqes[0] = (vector unsigned char)(vector unsigned long){
++		cqes[0] = (__vector unsigned char)(__vector unsigned long){
+ 			*(__rte_aligned(8) unsigned long *)
+ 			&cq[pos].sop_drop_qpn, 0LL};
+ 		rte_compiler_barrier();
+ 
+ 		/* B.2 copy mbuf pointers. */
+-		*(vector unsigned char *)&pkts[pos] = mbp1;
+-		*(vector unsigned char *)&pkts[pos + 2] = mbp2;
++		*(__vector unsigned char *)&pkts[pos] = mbp1;
++		*(__vector unsigned char *)&pkts[pos + 2] = mbp2;
+ 		rte_io_rmb();
+ 
+ 		/* C.1 load remaining CQE data and extract necessary fields. */
+-		cqe_tmp2 = *(vector unsigned char *)
++		cqe_tmp2 = *(__vector unsigned char *)
+ 			&cq[pos + p3].pkt_info;
+-		cqe_tmp1 = *(vector unsigned char *)
++		cqe_tmp1 = *(__vector unsigned char *)
+ 			&cq[pos + p2].pkt_info;
+ 		cqes[3] = vec_sel(cqes[3], cqe_tmp2, blend_mask);
+ 		cqes[2] = vec_sel(cqes[2], cqe_tmp1, blend_mask);
+-		cqe_tmp2 = (vector unsigned char)vec_vsx_ld(0,
++		cqe_tmp2 = (__vector unsigned char)vec_vsx_ld(0,
+ 			(signed int const *)&cq[pos + p3].csum);
+-		cqe_tmp1 = (vector unsigned char)vec_vsx_ld(0,
++		cqe_tmp1 = (__vector unsigned char)vec_vsx_ld(0,
+ 			(signed int const *)&cq[pos + p2].csum);
+-		cqes[3] = (vector unsigned char)
+-			vec_sel((vector unsigned short)cqes[3],
+-			(vector unsigned short)cqe_tmp2, cqe_sel_mask1);
+-		cqes[2] = (vector unsigned char)
+-			vec_sel((vector unsigned short)cqes[2],
+-			(vector unsigned short)cqe_tmp1, cqe_sel_mask1);
+-		cqe_tmp2 = (vector unsigned char)(vector unsigned long){
++		cqes[3] = (__vector unsigned char)
++			vec_sel((__vector unsigned short)cqes[3],
++			(__vector unsigned short)cqe_tmp2, cqe_sel_mask1);
++		cqes[2] = (__vector unsigned char)
++			vec_sel((__vector unsigned short)cqes[2],
++			(__vector unsigned short)cqe_tmp1, cqe_sel_mask1);
++		cqe_tmp2 = (__vector unsigned char)(__vector unsigned long){
+ 			*(__rte_aligned(8) unsigned long *)
+ 			&cq[pos + p3].rsvd4[2], 0LL};
+-		cqe_tmp1 = (vector unsigned char)(vector unsigned long){
++		cqe_tmp1 = (__vector unsigned char)(__vector unsigned long){
+ 			*(__rte_aligned(8) unsigned long *)
+ 			&cq[pos + p2].rsvd4[2], 0LL};
+-		cqes[3] = (vector unsigned char)
+-			vec_sel((vector unsigned short)cqes[3],
+-			(vector unsigned short)cqe_tmp2,
+-			(vector unsigned short)cqe_sel_mask2);
+-		cqes[2] = (vector unsigned char)
+-			vec_sel((vector unsigned short)cqes[2],
+-			(vector unsigned short)cqe_tmp1,
+-			(vector unsigned short)cqe_sel_mask2);
++		cqes[3] = (__vector unsigned char)
++			vec_sel((__vector unsigned short)cqes[3],
++			(__vector unsigned short)cqe_tmp2,
++			(__vector unsigned short)cqe_sel_mask2);
++		cqes[2] = (__vector unsigned char)
++			vec_sel((__vector unsigned short)cqes[2],
++			(__vector unsigned short)cqe_tmp1,
++			(__vector unsigned short)cqe_sel_mask2);
+ 
+ 		/* C.2 generate final structure for mbuf with swapping bytes. */
+ 		pkt_mb3 = vec_perm(cqes[3], zero, shuf_mask);
+ 		pkt_mb2 = vec_perm(cqes[2], zero, shuf_mask);
+ 
+ 		/* C.3 adjust CRC length. */
+-		pkt_mb3 = (vector unsigned char)
+-			((vector unsigned short)pkt_mb3 -
+-			(vector unsigned short)crc_adj);
+-		pkt_mb2 = (vector unsigned char)
+-			((vector unsigned short)pkt_mb2 -
+-			(vector unsigned short)crc_adj);
++		pkt_mb3 = (__vector unsigned char)
++			((__vector unsigned short)pkt_mb3 -
++			(__vector unsigned short)crc_adj);
++		pkt_mb2 = (__vector unsigned char)
++			((__vector unsigned short)pkt_mb2 -
++			(__vector unsigned short)crc_adj);
+ 
+ 		/* C.4 adjust flow mark. */
+-		pkt_mb3 = (vector unsigned char)
+-			((vector unsigned int)pkt_mb3 +
+-			(vector unsigned int)flow_mark_adj);
+-		pkt_mb2 = (vector unsigned char)
+-			((vector unsigned int)pkt_mb2 +
+-			(vector unsigned int)flow_mark_adj);
++		pkt_mb3 = (__vector unsigned char)
++			((__vector unsigned int)pkt_mb3 +
++			(__vector unsigned int)flow_mark_adj);
++		pkt_mb2 = (__vector unsigned char)
++			((__vector unsigned int)pkt_mb2 +
++			(__vector unsigned int)flow_mark_adj);
+ 
+ 		/* D.1 fill in mbuf - rx_descriptor_fields1. */
+-		*(vector unsigned char *)
++		*(__vector unsigned char *)
+ 			&pkts[pos + 3]->pkt_len = pkt_mb3;
+-		*(vector unsigned char *)
++		*(__vector unsigned char *)
+ 			&pkts[pos + 2]->pkt_len = pkt_mb2;
+ 
+ 		/* E.1 extract op_own field. */
+-		op_own_tmp2 = (vector unsigned char)
+-			vec_mergeh((vector unsigned int)cqes[2],
+-			(vector unsigned int)cqes[3]);
++		op_own_tmp2 = (__vector unsigned char)
++			vec_mergeh((__vector unsigned int)cqes[2],
++			(__vector unsigned int)cqes[3]);
+ 
+ 		/* C.1 load remaining CQE data and extract necessary fields. */
+-		cqe_tmp2 = *(vector unsigned char *)
++		cqe_tmp2 = *(__vector unsigned char *)
+ 			&cq[pos + p1].pkt_info;
+-		cqe_tmp1 = *(vector unsigned char *)
++		cqe_tmp1 = *(__vector unsigned char *)
+ 			&cq[pos].pkt_info;
+ 		cqes[1] = vec_sel(cqes[1], cqe_tmp2, blend_mask);
+ 		cqes[0] = vec_sel(cqes[0], cqe_tmp2, blend_mask);
+-		cqe_tmp2 = (vector unsigned char)vec_vsx_ld(0,
++		cqe_tmp2 = (__vector unsigned char)vec_vsx_ld(0,
+ 			(signed int const *)&cq[pos + p1].csum);
+-		cqe_tmp1 = (vector unsigned char)vec_vsx_ld(0,
++		cqe_tmp1 = (__vector unsigned char)vec_vsx_ld(0,
+ 			(signed int const *)&cq[pos].csum);
+-		cqes[1] = (vector unsigned char)
+-			vec_sel((vector unsigned short)cqes[1],
+-			(vector unsigned short)cqe_tmp2, cqe_sel_mask1);
+-		cqes[0] = (vector unsigned char)
+-			vec_sel((vector unsigned short)cqes[0],
+-			(vector unsigned short)cqe_tmp1, cqe_sel_mask1);
+-		cqe_tmp2 = (vector unsigned char)(vector unsigned long){
++		cqes[1] = (__vector unsigned char)
++			vec_sel((__vector unsigned short)cqes[1],
++			(__vector unsigned short)cqe_tmp2, cqe_sel_mask1);
++		cqes[0] = (__vector unsigned char)
++			vec_sel((__vector unsigned short)cqes[0],
++			(__vector unsigned short)cqe_tmp1, cqe_sel_mask1);
++		cqe_tmp2 = (__vector unsigned char)(__vector unsigned long){
+ 			*(__rte_aligned(8) unsigned long *)
+ 			&cq[pos + p1].rsvd4[2], 0LL};
+-		cqe_tmp1 = (vector unsigned char)(vector unsigned long){
++		cqe_tmp1 = (__vector unsigned char)(__vector unsigned long){
+ 			*(__rte_aligned(8) unsigned long *)
+ 			&cq[pos].rsvd4[2], 0LL};
+-		cqes[1] = (vector unsigned char)
+-			vec_sel((vector unsigned short)cqes[1],
+-			(vector unsigned short)cqe_tmp2, cqe_sel_mask2);
+-		cqes[0] = (vector unsigned char)
+-			vec_sel((vector unsigned short)cqes[0],
+-			(vector unsigned short)cqe_tmp1, cqe_sel_mask2);
++		cqes[1] = (__vector unsigned char)
++			vec_sel((__vector unsigned short)cqes[1],
++			(__vector unsigned short)cqe_tmp2, cqe_sel_mask2);
++		cqes[0] = (__vector unsigned char)
++			vec_sel((__vector unsigned short)cqes[0],
++			(__vector unsigned short)cqe_tmp1, cqe_sel_mask2);
+ 
+ 		/* C.2 generate final structure for mbuf with swapping bytes. */
+ 		pkt_mb1 = vec_perm(cqes[1], zero, shuf_mask);
+ 		pkt_mb0 = vec_perm(cqes[0], zero, shuf_mask);
+ 
+ 		/* C.3 adjust CRC length. */
+-		pkt_mb1 = (vector unsigned char)
+-			((vector unsigned short)pkt_mb1 -
+-			(vector unsigned short)crc_adj);
+-		pkt_mb0 = (vector unsigned char)
+-			((vector unsigned short)pkt_mb0 -
+-			(vector unsigned short)crc_adj);
++		pkt_mb1 = (__vector unsigned char)
++			((__vector unsigned short)pkt_mb1 -
++			(__vector unsigned short)crc_adj);
++		pkt_mb0 = (__vector unsigned char)
++			((__vector unsigned short)pkt_mb0 -
++			(__vector unsigned short)crc_adj);
+ 
+ 		/* C.4 adjust flow mark. */
+-		pkt_mb1 = (vector unsigned char)
+-			((vector unsigned int)pkt_mb1 +
+-			(vector unsigned int)flow_mark_adj);
+-		pkt_mb0 = (vector unsigned char)
+-			((vector unsigned int)pkt_mb0 +
+-			(vector unsigned int)flow_mark_adj);
++		pkt_mb1 = (__vector unsigned char)
++			((__vector unsigned int)pkt_mb1 +
++			(__vector unsigned int)flow_mark_adj);
++		pkt_mb0 = (__vector unsigned char)
++			((__vector unsigned int)pkt_mb0 +
++			(__vector unsigned int)flow_mark_adj);
+ 
+ 		/* E.1 extract op_own byte. */
+-		op_own_tmp1 = (vector unsigned char)
+-			vec_mergeh((vector unsigned int)cqes[0],
+-			(vector unsigned int)cqes[1]);
+-		op_own = (vector unsigned char)
+-			vec_mergel((vector unsigned long)op_own_tmp1,
+-			(vector unsigned long)op_own_tmp2);
++		op_own_tmp1 = (__vector unsigned char)
++			vec_mergeh((__vector unsigned int)cqes[0],
++			(__vector unsigned int)cqes[1]);
++		op_own = (__vector unsigned char)
++			vec_mergel((__vector unsigned long)op_own_tmp1,
++			(__vector unsigned long)op_own_tmp2);
+ 
+ 		/* D.1 fill in mbuf - rx_descriptor_fields1. */
+-		*(vector unsigned char *)
++		*(__vector unsigned char *)
+ 			&pkts[pos + 1]->pkt_len = pkt_mb1;
+-		*(vector unsigned char *)
++		*(__vector unsigned char *)
+ 			&pkts[pos]->pkt_len = pkt_mb0;
+ 
+ 		/* E.2 flip owner bit to mark CQEs from last round. */
+-		owner_mask = (vector unsigned char)
+-			vec_and((vector unsigned long)op_own,
+-			(vector unsigned long)owner_check);
++		owner_mask = (__vector unsigned char)
++			vec_and((__vector unsigned long)op_own,
++			(__vector unsigned long)owner_check);
+ 		if (ownership)
+-			owner_mask = (vector unsigned char)
+-				vec_xor((vector unsigned long)owner_mask,
+-				(vector unsigned long)owner_check);
+-		owner_mask = (vector unsigned char)
+-			vec_cmpeq((vector unsigned int)owner_mask,
+-			(vector unsigned int)owner_check);
+-		owner_mask = (vector unsigned char)
+-			vec_packs((vector unsigned int)owner_mask,
+-			(vector unsigned int)zero);
++			owner_mask = (__vector unsigned char)
++				vec_xor((__vector unsigned long)owner_mask,
++				(__vector unsigned long)owner_check);
++		owner_mask = (__vector unsigned char)
++			vec_cmpeq((__vector unsigned int)owner_mask,
++			(__vector unsigned int)owner_check);
++		owner_mask = (__vector unsigned char)
++			vec_packs((__vector unsigned int)owner_mask,
++			(__vector unsigned int)zero);
+ 
+ 		/* E.3 get mask for invalidated CQEs. */
+-		opcode = (vector unsigned char)
+-			vec_and((vector unsigned long)op_own,
+-			(vector unsigned long)opcode_check);
+-		invalid_mask = (vector unsigned char)
+-			vec_cmpeq((vector unsigned int)opcode_check,
+-			(vector unsigned int)opcode);
+-		invalid_mask = (vector unsigned char)
+-			vec_packs((vector unsigned int)invalid_mask,
+-			(vector unsigned int)zero);
++		opcode = (__vector unsigned char)
++			vec_and((__vector unsigned long)op_own,
++			(__vector unsigned long)opcode_check);
++		invalid_mask = (__vector unsigned char)
++			vec_cmpeq((__vector unsigned int)opcode_check,
++			(__vector unsigned int)opcode);
++		invalid_mask = (__vector unsigned char)
++			vec_packs((__vector unsigned int)invalid_mask,
++			(__vector unsigned int)zero);
+ 
+ 		/* E.4 mask out beyond boundary. */
+-		invalid_mask = (vector unsigned char)
+-			vec_or((vector unsigned long)invalid_mask,
+-			(vector unsigned long)mask);
++		invalid_mask = (__vector unsigned char)
++			vec_or((__vector unsigned long)invalid_mask,
++			(__vector unsigned long)mask);
+ 
+ 		/* E.5 merge invalid_mask with invalid owner. */
+-		invalid_mask = (vector unsigned char)
+-			vec_or((vector unsigned long)invalid_mask,
+-			(vector unsigned long)owner_mask);
++		invalid_mask = (__vector unsigned char)
++			vec_or((__vector unsigned long)invalid_mask,
++			(__vector unsigned long)owner_mask);
+ 
+ 		/* F.1 find compressed CQE format. */
+-		comp_mask = (vector unsigned char)
+-			vec_and((vector unsigned long)op_own,
+-			(vector unsigned long)format_check);
+-		comp_mask = (vector unsigned char)
+-			vec_cmpeq((vector unsigned int)comp_mask,
+-			(vector unsigned int)format_check);
+-		comp_mask = (vector unsigned char)
+-			vec_packs((vector unsigned int)comp_mask,
+-			(vector unsigned int)zero);
++		comp_mask = (__vector unsigned char)
++			vec_and((__vector unsigned long)op_own,
++			(__vector unsigned long)format_check);
++		comp_mask = (__vector unsigned char)
++			vec_cmpeq((__vector unsigned int)comp_mask,
++			(__vector unsigned int)format_check);
++		comp_mask = (__vector unsigned char)
++			vec_packs((__vector unsigned int)comp_mask,
++			(__vector unsigned int)zero);
+ 
+ 		/* F.2 mask out invalid entries. */
+-		comp_mask = (vector unsigned char)
+-			vec_andc((vector unsigned long)comp_mask,
+-			(vector unsigned long)invalid_mask);
+-		comp_idx = ((vector unsigned long)comp_mask)[0];
++		comp_mask = (__vector unsigned char)
++			vec_andc((__vector unsigned long)comp_mask,
++			(__vector unsigned long)invalid_mask);
++		comp_idx = ((__vector unsigned long)comp_mask)[0];
+ 
+ 		/* F.3 get the first compressed CQE. */
+ 		comp_idx = comp_idx ? __builtin_ctzll(comp_idx) /
+ 			(sizeof(uint16_t) * 8) : MLX5_VPMD_DESCS_PER_LOOP;
+ 
+ 		/* E.6 mask out entries after the compressed CQE. */
+-		mask = (vector unsigned char)(vector unsigned long){
++		mask = (__vector unsigned char)(__vector unsigned long){
+ 			(comp_idx * sizeof(uint16_t) * 8), 0};
+-		lshift = vec_splat((vector unsigned long)mask, 0);
++		lshift = vec_splat((__vector unsigned long)mask, 0);
+ 		shmask = vec_cmpgt(shmax, lshift);
+-		mask = (vector unsigned char)
+-			vec_sl((vector unsigned long)ones, lshift);
+-		mask = (vector unsigned char)
+-			vec_sel((vector unsigned long)shmask,
+-			(vector unsigned long)mask, shmask);
+-		invalid_mask = (vector unsigned char)
+-			vec_or((vector unsigned long)invalid_mask,
+-			(vector unsigned long)mask);
++		mask = (__vector unsigned char)
++			vec_sl((__vector unsigned long)ones, lshift);
++		mask = (__vector unsigned char)
++			vec_sel((__vector unsigned long)shmask,
++			(__vector unsigned long)mask, shmask);
++		invalid_mask = (__vector unsigned char)
++			vec_or((__vector unsigned long)invalid_mask,
++			(__vector unsigned long)mask);
+ 
+ 		/* E.7 count non-compressed valid CQEs. */
+-		n = ((vector unsigned long)invalid_mask)[0];
++		n = ((__vector unsigned long)invalid_mask)[0];
+ 		n = n ? __builtin_ctzll(n) / (sizeof(uint16_t) * 8) :
+ 			MLX5_VPMD_DESCS_PER_LOOP;
+ 		nocmp_n += n;
+ 
+ 		/* D.2 get the final invalid mask. */
+-		mask = (vector unsigned char)(vector unsigned long){
++		mask = (__vector unsigned char)(__vector unsigned long){
+ 			(n * sizeof(uint16_t) * 8), 0};
+-		lshift = vec_splat((vector unsigned long)mask, 0);
++		lshift = vec_splat((__vector unsigned long)mask, 0);
+ 		shmask = vec_cmpgt(shmax, lshift);
+-		mask = (vector unsigned char)
+-			vec_sl((vector unsigned long)ones, lshift);
+-		mask = (vector unsigned char)
+-			vec_sel((vector unsigned long)shmask,
+-			(vector unsigned long)mask, shmask);
+-		invalid_mask = (vector unsigned char)
+-			vec_or((vector unsigned long)invalid_mask,
+-			(vector unsigned long)mask);
++		mask = (__vector unsigned char)
++			vec_sl((__vector unsigned long)ones, lshift);
++		mask = (__vector unsigned char)
++			vec_sel((__vector unsigned long)shmask,
++			(__vector unsigned long)mask, shmask);
++		invalid_mask = (__vector unsigned char)
++			vec_or((__vector unsigned long)invalid_mask,
++			(__vector unsigned long)mask);
+ 
+ 		/* D.3 check error in opcode. */
+-		opcode = (vector unsigned char)
+-			vec_cmpeq((vector unsigned int)resp_err_check,
+-			(vector unsigned int)opcode);
+-		opcode = (vector unsigned char)
+-			vec_packs((vector unsigned int)opcode,
+-			(vector unsigned int)zero);
+-		opcode = (vector unsigned char)
+-			vec_andc((vector unsigned long)opcode,
+-			(vector unsigned long)invalid_mask);
++		opcode = (__vector unsigned char)
++			vec_cmpeq((__vector unsigned int)resp_err_check,
++			(__vector unsigned int)opcode);
++		opcode = (__vector unsigned char)
++			vec_packs((__vector unsigned int)opcode,
++			(__vector unsigned int)zero);
++		opcode = (__vector unsigned char)
++			vec_andc((__vector unsigned long)opcode,
++			(__vector unsigned long)invalid_mask);
+ 
+ 		/* D.4 mark if any error is set */
+-		*err |= ((vector unsigned long)opcode)[0];
++		*err |= ((__vector unsigned long)opcode)[0];
+ 
+ 		/* D.5 fill in mbuf - rearm_data and packet_type. */
+ 		rxq_cq_to_ptype_oflags_v(rxq, cqes, opcode, &pkts[pos]);
+@@ -1230,7 +1230,7 @@ rxq_cq_process_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq,
+ 			uint32_t mask = rxq->flow_meta_port_mask;
+ 			uint32_t metadata;
+ 
+-			/* This code is subject for futher optimization. */
++			/* This code is subject for further optimization. */
+ 			metadata = rte_be_to_cpu_32
+ 				(cq[pos].flow_table_metadata) & mask;
+ 			*RTE_MBUF_DYNFIELD(pkts[pos], offs, uint32_t *) =
+@@ -1255,20 +1255,20 @@ rxq_cq_process_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq,
+ #ifdef MLX5_PMD_SOFT_COUNTERS
+ 		/* Add up received bytes count. */
+ 		byte_cnt = vec_perm(op_own, zero, len_shuf_mask);
+-		byte_cnt = (vector unsigned char)
+-			vec_andc((vector unsigned long)byte_cnt,
+-			(vector unsigned long)invalid_mask);
+-		left = vec_perm((vector unsigned short)byte_cnt,
+-			(vector unsigned short)zero, lower_half);
+-		right = vec_perm((vector unsigned short)byte_cnt,
+-			(vector unsigned short)zero, upper_half);
+-		byte_cnt = (vector unsigned char)vec_add(left, right);
+-		left = vec_perm((vector unsigned short)byte_cnt,
+-			(vector unsigned short)zero, lower_half);
+-		right = vec_perm((vector unsigned short)byte_cnt,
+-			(vector unsigned short)zero, upper_half);
+-		byte_cnt = (vector unsigned char)vec_add(left, right);
+-		rcvd_byte += ((vector unsigned long)byte_cnt)[0];
++		byte_cnt = (__vector unsigned char)
++			vec_andc((__vector unsigned long)byte_cnt,
++			(__vector unsigned long)invalid_mask);
++		left = vec_perm((__vector unsigned short)byte_cnt,
++			(__vector unsigned short)zero, lower_half);
++		right = vec_perm((__vector unsigned short)byte_cnt,
++			(__vector unsigned short)zero, upper_half);
++		byte_cnt = (__vector unsigned char)vec_add(left, right);
++		left = vec_perm((__vector unsigned short)byte_cnt,
++			(__vector unsigned short)zero, lower_half);
++		right = vec_perm((__vector unsigned short)byte_cnt,
++			(__vector unsigned short)zero, upper_half);
++		byte_cnt = (__vector unsigned char)vec_add(left, right);
++		rcvd_byte += ((__vector unsigned long)byte_cnt)[0];
+ #endif
+ 
+ 		/*
+diff --git a/dpdk/drivers/net/mlx5/mlx5_rxtx_vec_neon.h b/dpdk/drivers/net/mlx5/mlx5_rxtx_vec_neon.h
+index b1d16baa61..f7bbde4e0e 100644
+--- a/dpdk/drivers/net/mlx5/mlx5_rxtx_vec_neon.h
++++ b/dpdk/drivers/net/mlx5/mlx5_rxtx_vec_neon.h
+@@ -839,7 +839,7 @@ rxq_cq_process_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq,
+ 			}
+ 		}
+ 		if (rxq->dynf_meta) {
+-			/* This code is subject for futher optimization. */
++			/* This code is subject for further optimization. */
+ 			int32_t offs = rxq->flow_meta_offset;
+ 			uint32_t mask = rxq->flow_meta_port_mask;
+ 
+diff --git a/dpdk/drivers/net/mlx5/mlx5_rxtx_vec_sse.h b/dpdk/drivers/net/mlx5/mlx5_rxtx_vec_sse.h
+index f3d838389e..185d2695db 100644
+--- a/dpdk/drivers/net/mlx5/mlx5_rxtx_vec_sse.h
++++ b/dpdk/drivers/net/mlx5/mlx5_rxtx_vec_sse.h
+@@ -772,7 +772,7 @@ rxq_cq_process_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq,
+ 			}
+ 		}
+ 		if (rxq->dynf_meta) {
+-			/* This code is subject for futher optimization. */
++			/* This code is subject for further optimization. */
+ 			int32_t offs = rxq->flow_meta_offset;
+ 			uint32_t mask = rxq->flow_meta_port_mask;
+ 
+diff --git a/dpdk/drivers/net/mlx5/mlx5_stats.c b/dpdk/drivers/net/mlx5/mlx5_stats.c
+index 732775954a..f64fa3587b 100644
+--- a/dpdk/drivers/net/mlx5/mlx5_stats.c
++++ b/dpdk/drivers/net/mlx5/mlx5_stats.c
+@@ -114,18 +114,23 @@ mlx5_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
+ 		idx = rxq->idx;
+ 		if (idx < RTE_ETHDEV_QUEUE_STAT_CNTRS) {
+ #ifdef MLX5_PMD_SOFT_COUNTERS
+-			tmp.q_ipackets[idx] += rxq->stats.ipackets;
+-			tmp.q_ibytes[idx] += rxq->stats.ibytes;
++			tmp.q_ipackets[idx] += rxq->stats.ipackets -
++				rxq->stats_reset.ipackets;
++			tmp.q_ibytes[idx] += rxq->stats.ibytes -
++				rxq->stats_reset.ibytes;
+ #endif
+ 			tmp.q_errors[idx] += (rxq->stats.idropped +
+-					      rxq->stats.rx_nombuf);
++					      rxq->stats.rx_nombuf) -
++					      (rxq->stats_reset.idropped +
++					      rxq->stats_reset.rx_nombuf);
+ 		}
+ #ifdef MLX5_PMD_SOFT_COUNTERS
+-		tmp.ipackets += rxq->stats.ipackets;
+-		tmp.ibytes += rxq->stats.ibytes;
++		tmp.ipackets += rxq->stats.ipackets - rxq->stats_reset.ipackets;
++		tmp.ibytes += rxq->stats.ibytes - rxq->stats_reset.ibytes;
+ #endif
+-		tmp.ierrors += rxq->stats.idropped;
+-		tmp.rx_nombuf += rxq->stats.rx_nombuf;
++		tmp.ierrors += rxq->stats.idropped - rxq->stats_reset.idropped;
++		tmp.rx_nombuf += rxq->stats.rx_nombuf -
++					rxq->stats_reset.rx_nombuf;
+ 	}
+ 	for (i = 0; (i != priv->txqs_n); ++i) {
+ 		struct mlx5_txq_data *txq = (*priv->txqs)[i];
+@@ -135,15 +140,17 @@ mlx5_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
+ 		idx = txq->idx;
+ 		if (idx < RTE_ETHDEV_QUEUE_STAT_CNTRS) {
+ #ifdef MLX5_PMD_SOFT_COUNTERS
+-			tmp.q_opackets[idx] += txq->stats.opackets;
+-			tmp.q_obytes[idx] += txq->stats.obytes;
++			tmp.q_opackets[idx] += txq->stats.opackets -
++						txq->stats_reset.opackets;
++			tmp.q_obytes[idx] += txq->stats.obytes -
++						txq->stats_reset.obytes;
+ #endif
+ 		}
+ #ifdef MLX5_PMD_SOFT_COUNTERS
+-		tmp.opackets += txq->stats.opackets;
+-		tmp.obytes += txq->stats.obytes;
++		tmp.opackets += txq->stats.opackets - txq->stats_reset.opackets;
++		tmp.obytes += txq->stats.obytes - txq->stats_reset.obytes;
+ #endif
+-		tmp.oerrors += txq->stats.oerrors;
++		tmp.oerrors += txq->stats.oerrors - txq->stats_reset.oerrors;
+ 	}
+ 	ret = mlx5_os_read_dev_stat(priv, "out_of_buffer", &tmp.imissed);
+ 	if (ret == 0) {
+@@ -185,13 +192,14 @@ mlx5_stats_reset(struct rte_eth_dev *dev)
+ 
+ 		if (rxq_data == NULL)
+ 			continue;
+-		memset(&rxq_data->stats, 0, sizeof(struct mlx5_rxq_stats));
++		rxq_data->stats_reset = rxq_data->stats;
+ 	}
+ 	for (i = 0; (i != priv->txqs_n); ++i) {
+-		if ((*priv->txqs)[i] == NULL)
++		struct mlx5_txq_data *txq_data = (*priv->txqs)[i];
++
++		if (txq_data == NULL)
+ 			continue;
+-		memset(&(*priv->txqs)[i]->stats, 0,
+-		       sizeof(struct mlx5_txq_stats));
++		txq_data->stats_reset = txq_data->stats;
+ 	}
+ 	mlx5_os_read_dev_stat(priv, "out_of_buffer", &stats_ctrl->imissed_base);
+ 	stats_ctrl->imissed = 0;
+diff --git a/dpdk/drivers/net/mlx5/mlx5_trigger.c b/dpdk/drivers/net/mlx5/mlx5_trigger.c
+index 74c9c0a4ff..2ba456ad7a 100644
+--- a/dpdk/drivers/net/mlx5/mlx5_trigger.c
++++ b/dpdk/drivers/net/mlx5/mlx5_trigger.c
+@@ -1205,11 +1205,18 @@ mlx5_dev_start(struct rte_eth_dev *dev)
+ 		priv->sh->port[priv->dev_port - 1].ih_port_id =
+ 					(uint32_t)dev->data->port_id;
+ 	} else {
+-		DRV_LOG(INFO, "port %u starts without LSC and RMV interrupts.",
++		DRV_LOG(INFO, "port %u starts without RMV interrupts.",
+ 			dev->data->port_id);
+-		dev->data->dev_conf.intr_conf.lsc = 0;
+ 		dev->data->dev_conf.intr_conf.rmv = 0;
+ 	}
++	if (rte_intr_fd_get(priv->sh->intr_handle_nl) >= 0) {
++		priv->sh->port[priv->dev_port - 1].nl_ih_port_id =
++					(uint32_t)dev->data->port_id;
++	} else {
++		DRV_LOG(INFO, "port %u starts without LSC interrupts.",
++			dev->data->port_id);
++		dev->data->dev_conf.intr_conf.lsc = 0;
++	}
+ 	if (rte_intr_fd_get(priv->sh->intr_handle_devx) >= 0)
+ 		priv->sh->port[priv->dev_port - 1].devx_ih_port_id =
+ 					(uint32_t)dev->data->port_id;
+@@ -1261,6 +1268,7 @@ mlx5_dev_stop(struct rte_eth_dev *dev)
+ 	mlx5_rx_intr_vec_disable(dev);
+ 	priv->sh->port[priv->dev_port - 1].ih_port_id = RTE_MAX_ETHPORTS;
+ 	priv->sh->port[priv->dev_port - 1].devx_ih_port_id = RTE_MAX_ETHPORTS;
++	priv->sh->port[priv->dev_port - 1].nl_ih_port_id = RTE_MAX_ETHPORTS;
+ 	mlx5_txq_stop(dev);
+ 	mlx5_rxq_stop(dev);
+ 	if (priv->obj_ops.lb_dummy_queue_release)
+diff --git a/dpdk/drivers/net/mlx5/mlx5_tx.c b/dpdk/drivers/net/mlx5/mlx5_tx.c
+index 5492d64cae..fd2cf20967 100644
+--- a/dpdk/drivers/net/mlx5/mlx5_tx.c
++++ b/dpdk/drivers/net/mlx5/mlx5_tx.c
+@@ -728,7 +728,7 @@ mlx5_txq_info_get(struct rte_eth_dev *dev, uint16_t tx_queue_id,
+  *   Pointer to the device structure.
+  *
+  * @param tx_queue_id
+- *   Tx queue identificatior.
++ *   Tx queue identification.
+  *
+  * @param mode
+  *   Pointer to the burts mode information.
+diff --git a/dpdk/drivers/net/mlx5/mlx5_tx.h b/dpdk/drivers/net/mlx5/mlx5_tx.h
+index 099e72935a..6ed00f722e 100644
+--- a/dpdk/drivers/net/mlx5/mlx5_tx.h
++++ b/dpdk/drivers/net/mlx5/mlx5_tx.h
+@@ -161,6 +161,7 @@ struct mlx5_txq_data {
+ 	int32_t ts_offset; /* Timestamp field dynamic offset. */
+ 	struct mlx5_dev_ctx_shared *sh; /* Shared context. */
+ 	struct mlx5_txq_stats stats; /* TX queue counters. */
++	struct mlx5_txq_stats stats_reset; /* stats on last reset. */
+ 	struct mlx5_uar_data uar_data;
+ 	struct rte_mbuf *elts[0];
+ 	/* Storage for queued packets, must be the last field. */
+@@ -1710,7 +1711,6 @@ mlx5_tx_packet_multi_tso(struct mlx5_txq_data *__rte_restrict txq,
+ 		     inlen <= MLX5_ESEG_MIN_INLINE_SIZE ||
+ 		     inlen > (dlen + vlan)))
+ 		return MLX5_TXCMP_CODE_ERROR;
+-	MLX5_ASSERT(inlen >= txq->inlen_mode);
+ 	/*
+ 	 * Check whether there are enough free WQEBBs:
+ 	 * - Control Segment
+@@ -2019,7 +2019,7 @@ mlx5_tx_packet_multi_inline(struct mlx5_txq_data *__rte_restrict txq,
+ 	if (unlikely(loc->wqe_free < ((ds + 3) / 4)))
+ 		return MLX5_TXCMP_CODE_EXIT;
+ 	/* Check for maximal WQE size. */
+-	if (unlikely((MLX5_WQE_SIZE_MAX / MLX5_WSEG_SIZE) < ((ds + 3) / 4)))
++	if (unlikely((MLX5_WQE_SIZE_MAX / MLX5_WSEG_SIZE) < ds))
+ 		return MLX5_TXCMP_CODE_ERROR;
+ #ifdef MLX5_PMD_SOFT_COUNTERS
+ 	/* Update sent data bytes/packets counters. */
+diff --git a/dpdk/drivers/net/mlx5/mlx5_utils.c b/dpdk/drivers/net/mlx5/mlx5_utils.c
+index e4e66ae4c5..4115a2ad77 100644
+--- a/dpdk/drivers/net/mlx5/mlx5_utils.c
++++ b/dpdk/drivers/net/mlx5/mlx5_utils.c
+@@ -340,6 +340,8 @@ mlx5_ipool_allocate_from_global(struct mlx5_indexed_pool *pool, int cidx)
+ 	/* Enqueue half of the index to global. */
+ 	ts_idx = mlx5_trunk_idx_offset_get(pool, trunk_idx) + 1;
+ 	fetch_size = trunk->free >> 1;
++	if (fetch_size > pool->cfg.per_core_cache)
++		fetch_size = trunk->free - pool->cfg.per_core_cache;
+ 	for (i = 0; i < fetch_size; i++)
+ 		lc->idx[i] = ts_idx + i;
+ 	lc->len = fetch_size;
+@@ -1184,44 +1186,3 @@ mlx5_l3t_set_entry(struct mlx5_l3t_tbl *tbl, uint32_t idx,
+ 	rte_spinlock_unlock(&tbl->sl);
+ 	return ret;
+ }
+-
+-int32_t
+-mlx5_l3t_prepare_entry(struct mlx5_l3t_tbl *tbl, uint32_t idx,
+-		       union mlx5_l3t_data *data,
+-		       mlx5_l3t_alloc_callback_fn cb, void *ctx)
+-{
+-	int32_t ret;
+-
+-	rte_spinlock_lock(&tbl->sl);
+-	/* Check if entry data is ready. */
+-	ret = __l3t_get_entry(tbl, idx, data);
+-	if (!ret) {
+-		switch (tbl->type) {
+-		case MLX5_L3T_TYPE_WORD:
+-			if (data->word)
+-				goto out;
+-			break;
+-		case MLX5_L3T_TYPE_DWORD:
+-			if (data->dword)
+-				goto out;
+-			break;
+-		case MLX5_L3T_TYPE_QWORD:
+-			if (data->qword)
+-				goto out;
+-			break;
+-		default:
+-			if (data->ptr)
+-				goto out;
+-			break;
+-		}
+-	}
+-	/* Entry data is not ready, use user callback to create it. */
+-	ret = cb(ctx, data);
+-	if (ret)
+-		goto out;
+-	/* Save the new allocated data to entry. */
+-	ret = __l3t_set_entry(tbl, idx, data);
+-out:
+-	rte_spinlock_unlock(&tbl->sl);
+-	return ret;
+-}
+diff --git a/dpdk/drivers/net/mlx5/mlx5_utils.h b/dpdk/drivers/net/mlx5/mlx5_utils.h
+index cf3db89403..254c879d1a 100644
+--- a/dpdk/drivers/net/mlx5/mlx5_utils.h
++++ b/dpdk/drivers/net/mlx5/mlx5_utils.h
+@@ -55,7 +55,7 @@ extern int mlx5_logtype;
+ 
+ /*
+  * For the case which data is linked with sequence increased index, the
+- * array table will be more efficiect than hash table once need to serarch
++ * array table will be more efficient than hash table once need to search
+  * one data entry in large numbers of entries. Since the traditional hash
+  * tables has fixed table size, when huge numbers of data saved to the hash
+  * table, it also comes lots of hash conflict.
+@@ -459,34 +459,6 @@ void mlx5_l3t_destroy(struct mlx5_l3t_tbl *tbl);
+ int32_t mlx5_l3t_get_entry(struct mlx5_l3t_tbl *tbl, uint32_t idx,
+ 			    union mlx5_l3t_data *data);
+ 
+-/**
+- * This function gets the index entry from Three-level table.
+- *
+- * If the index entry is not available, allocate new one by callback
+- * function and fill in the entry.
+- *
+- * @param tbl
+- *   Pointer to the l3t.
+- * @param idx
+- *   Index to the entry.
+- * @param data
+- *   Pointer to the memory which saves the entry data.
+- *   When function call returns 0, data contains the entry data get from
+- *   l3t.
+- *   When function call returns -1, data is not modified.
+- * @param cb
+- *   Callback function to allocate new data.
+- * @param ctx
+- *   Context for callback function.
+- *
+- * @return
+- *   0 if success, -1 on error.
+- */
+-
+-int32_t mlx5_l3t_prepare_entry(struct mlx5_l3t_tbl *tbl, uint32_t idx,
+-			       union mlx5_l3t_data *data,
+-			       mlx5_l3t_alloc_callback_fn cb, void *ctx);
+-
+ /**
+  * This function decreases and clear index entry if reference
+  * counter is 0 from Three-level table.
+diff --git a/dpdk/drivers/net/mlx5/windows/mlx5_flow_os.c b/dpdk/drivers/net/mlx5/windows/mlx5_flow_os.c
+index c4d5790726..f5e3893ed4 100644
+--- a/dpdk/drivers/net/mlx5/windows/mlx5_flow_os.c
++++ b/dpdk/drivers/net/mlx5/windows/mlx5_flow_os.c
+@@ -372,7 +372,7 @@ mlx5_flow_os_init_workspace_once(void)
+ 
+ 	if (err) {
+ 		DRV_LOG(ERR, "Can't create flow workspace data thread key.");
+-		return err;
++		return -rte_errno;
+ 	}
+ 	pthread_mutex_init(&lock_thread_list, NULL);
+ 	return 0;
+@@ -400,7 +400,7 @@ mlx5_flow_os_set_specific_workspace(struct mlx5_flow_workspace *data)
+ 		/*
+ 		 * set_specific_workspace when current value is NULL
+ 		 * can happen only once per thread, mark this thread in
+-		 * linked list to be able to release reasorces later on.
++		 * linked list to be able to release resources later on.
+ 		 */
+ 		err = mlx5_add_workspace_to_list(data);
+ 		if (err) {
+diff --git a/dpdk/drivers/net/mlx5/windows/mlx5_os.c b/dpdk/drivers/net/mlx5/windows/mlx5_os.c
+index dec4b923d0..ba99901c5c 100644
+--- a/dpdk/drivers/net/mlx5/windows/mlx5_os.c
++++ b/dpdk/drivers/net/mlx5/windows/mlx5_os.c
+@@ -136,7 +136,7 @@ mlx5_init_once(void)
+  *   Pointer to mlx5 device attributes.
+  *
+  * @return
+- *   0 on success, non zero error number otherwise.
++ *   0 on success, a negative errno value otherwise and rte_errno is set.
+  */
+ int
+ mlx5_os_get_dev_attr(struct mlx5_common_device *cdev,
+@@ -145,10 +145,11 @@ mlx5_os_get_dev_attr(struct mlx5_common_device *cdev,
+ 	struct mlx5_context *mlx5_ctx;
+ 	void *pv_iseg = NULL;
+ 	u32 cb_iseg = 0;
+-	int err = 0;
+ 
+-	if (!cdev || !cdev->ctx)
+-		return -EINVAL;
++	if (!cdev || !cdev->ctx) {
++		rte_errno = EINVAL;
++		return -rte_errno;
++	}
+ 	mlx5_ctx = (struct mlx5_context *)cdev->ctx;
+ 	memset(device_attr, 0, sizeof(*device_attr));
+ 	device_attr->max_cq = 1 << cdev->config.hca_attr.log_max_cq;
+@@ -171,15 +172,14 @@ mlx5_os_get_dev_attr(struct mlx5_common_device *cdev,
+ 	pv_iseg = mlx5_glue->query_hca_iseg(mlx5_ctx, &cb_iseg);
+ 	if (pv_iseg == NULL) {
+ 		DRV_LOG(ERR, "Failed to get device hca_iseg");
+-		return errno;
+-	}
+-	if (!err) {
+-		snprintf(device_attr->fw_ver, 64, "%x.%x.%04x",
+-			MLX5_GET(initial_seg, pv_iseg, fw_rev_major),
+-			MLX5_GET(initial_seg, pv_iseg, fw_rev_minor),
+-			MLX5_GET(initial_seg, pv_iseg, fw_rev_subminor));
++		rte_errno = errno;
++		return -rte_errno;
+ 	}
+-	return err;
++	snprintf(device_attr->fw_ver, 64, "%x.%x.%04x",
++		 MLX5_GET(initial_seg, pv_iseg, fw_rev_major),
++		 MLX5_GET(initial_seg, pv_iseg, fw_rev_minor),
++		 MLX5_GET(initial_seg, pv_iseg, fw_rev_subminor));
++	return 0;
+ }
+ 
+ /**
+@@ -226,7 +226,7 @@ mlx5_os_free_shared_dr(struct mlx5_priv *priv)
+  *   Pointer to RQ channel object, which includes the channel fd
+  *
+  * @param[out] fd
+- *   The file descriptor (representing the intetrrupt) used in this channel.
++ *   The file descriptor (representing the interrupt) used in this channel.
+  *
+  * @return
+  *   0 on successfully setting the fd to non-blocking, non-zero otherwise.
+@@ -423,6 +423,21 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
+ 	}
+ 	/* Override some values set by hardware configuration. */
+ 	mlx5_args(config, dpdk_dev->devargs);
++	/* Update final values for devargs before check sibling config. */
++	config->dv_esw_en = 0;
++	if (!config->dv_flow_en) {
++		DRV_LOG(ERR, "Windows flow mode must be DV flow enable.");
++		err = ENOTSUP;
++		goto error;
++	}
++	if (!priv->config.dv_esw_en &&
++	    priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
++		DRV_LOG(WARNING,
++			"Metadata mode %u is not supported (no E-Switch).",
++			priv->config.dv_xmeta_en);
++		priv->config.dv_xmeta_en = MLX5_XMETA_MODE_LEGACY;
++	}
++	/* Check sibling device configurations. */
+ 	err = mlx5_dev_check_sibling_config(priv, config, dpdk_dev);
+ 	if (err)
+ 		goto error;
+@@ -584,7 +599,6 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
+ 	 * Verbs context returned by ibv_open_device().
+ 	 */
+ 	mlx5_link_update(eth_dev, 0);
+-	config->dv_esw_en = 0;
+ 	/* Detect minimal data bytes to inline. */
+ 	mlx5_set_min_inline(spawn, config);
+ 	/* Store device configuration on private structure. */
+@@ -606,12 +620,6 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
+ 	}
+ 	/* No supported flow priority number detection. */
+ 	priv->sh->flow_max_priority = -1;
+-	if (!priv->config.dv_esw_en &&
+-	    priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
+-		DRV_LOG(WARNING, "metadata mode %u is not supported "
+-				 "(no E-Switch)", priv->config.dv_xmeta_en);
+-		priv->config.dv_xmeta_en = MLX5_XMETA_MODE_LEGACY;
+-	}
+ 	mlx5_set_metadata_mask(eth_dev);
+ 	if (priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
+ 	    !priv->sh->dv_regc0_mask) {
+@@ -645,12 +653,10 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
+ 			goto error;
+ 		}
+ 	}
+-	if (sh->devx && config->dv_flow_en) {
++	if (sh->devx) {
+ 		priv->obj_ops = devx_obj_ops;
+ 	} else {
+-		DRV_LOG(ERR, "Flow mode %u is not supported "
+-				"(Windows flow must be DevX with DV flow enabled).",
+-				priv->config.dv_flow_en);
++		DRV_LOG(ERR, "Windows flow must be DevX.");
+ 		err = ENOTSUP;
+ 		goto error;
+ 	}
+diff --git a/dpdk/drivers/net/mvneta/mvneta_ethdev.c b/dpdk/drivers/net/mvneta/mvneta_ethdev.c
+index 10fe6d828c..eef016aa0b 100644
+--- a/dpdk/drivers/net/mvneta/mvneta_ethdev.c
++++ b/dpdk/drivers/net/mvneta/mvneta_ethdev.c
+@@ -247,7 +247,7 @@ mvneta_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
+ 	    (mru + MRVL_NETA_PKT_OFFS > mbuf_data_size)) {
+ 		mru = mbuf_data_size - MRVL_NETA_PKT_OFFS;
+ 		mtu = MRVL_NETA_MRU_TO_MTU(mru);
+-		MVNETA_LOG(WARNING, "MTU too big, max MTU possible limitted by"
++		MVNETA_LOG(WARNING, "MTU too big, max MTU possible limited by"
+ 			" current mbuf size: %u. Set MTU to %u, MRU to %u",
+ 			mbuf_data_size, mtu, mru);
+ 	}
+diff --git a/dpdk/drivers/net/mvpp2/mrvl_ethdev.c b/dpdk/drivers/net/mvpp2/mrvl_ethdev.c
+index 9c7fe13f7f..735efb6cfc 100644
+--- a/dpdk/drivers/net/mvpp2/mrvl_ethdev.c
++++ b/dpdk/drivers/net/mvpp2/mrvl_ethdev.c
+@@ -579,7 +579,7 @@ mrvl_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
+ 	if (mru - RTE_ETHER_CRC_LEN + MRVL_PKT_OFFS > mbuf_data_size) {
+ 		mru = mbuf_data_size + RTE_ETHER_CRC_LEN - MRVL_PKT_OFFS;
+ 		mtu = MRVL_PP2_MRU_TO_MTU(mru);
+-		MRVL_LOG(WARNING, "MTU too big, max MTU possible limitted "
++		MRVL_LOG(WARNING, "MTU too big, max MTU possible limited "
+ 			"by current mbuf size: %u. Set MTU to %u, MRU to %u",
+ 			mbuf_data_size, mtu, mru);
+ 	}
+@@ -1626,13 +1626,14 @@ mrvl_xstats_get(struct rte_eth_dev *dev,
+ {
+ 	struct mrvl_priv *priv = dev->data->dev_private;
+ 	struct pp2_ppio_statistics ppio_stats;
+-	unsigned int i;
++	unsigned int i, count;
+ 
+-	if (!stats)
+-		return 0;
++	count = RTE_DIM(mrvl_xstats_tbl);
++	if (n < count)
++		return count;
+ 
+ 	pp2_ppio_get_statistics(priv->ppio, &ppio_stats, 0);
+-	for (i = 0; i < n && i < RTE_DIM(mrvl_xstats_tbl); i++) {
++	for (i = 0; i < count; i++) {
+ 		uint64_t val;
+ 
+ 		if (mrvl_xstats_tbl[i].size == sizeof(uint32_t))
+@@ -1648,7 +1649,7 @@ mrvl_xstats_get(struct rte_eth_dev *dev,
+ 		stats[i].value = val;
+ 	}
+ 
+-	return n;
++	return count;
+ }
+ 
+ /**
+diff --git a/dpdk/drivers/net/mvpp2/mrvl_qos.c b/dpdk/drivers/net/mvpp2/mrvl_qos.c
+index dbfc3b5d20..99f0ee56d1 100644
+--- a/dpdk/drivers/net/mvpp2/mrvl_qos.c
++++ b/dpdk/drivers/net/mvpp2/mrvl_qos.c
+@@ -301,7 +301,7 @@ get_entry_values(const char *entry, uint8_t *tab,
+ }
+ 
+ /**
+- * Parse Traffic Class'es mapping configuration.
++ * Parse Traffic Classes mapping configuration.
+  *
+  * @param file Config file handle.
+  * @param port Which port to look for.
+@@ -736,7 +736,7 @@ mrvl_get_cfg(const char *key __rte_unused, const char *path, void *extra_args)
+ 
+ 		/* MRVL_TOK_START_HDR replaces MRVL_TOK_DSA_MODE parameter.
+ 		 * MRVL_TOK_DSA_MODE will be supported for backward
+-		 * compatibillity.
++		 * compatibility.
+ 		 */
+ 		entry = rte_cfgfile_get_entry(file, sec_name,
+ 				MRVL_TOK_START_HDR);
+diff --git a/dpdk/drivers/net/netvsc/hn_ethdev.c b/dpdk/drivers/net/netvsc/hn_ethdev.c
+index 8a950403ac..787139c0b2 100644
+--- a/dpdk/drivers/net/netvsc/hn_ethdev.c
++++ b/dpdk/drivers/net/netvsc/hn_ethdev.c
+@@ -554,9 +554,10 @@ static int hn_subchan_configure(struct hn_data *hv,
+ static void netvsc_hotplug_retry(void *args)
+ {
+ 	int ret;
+-	struct hn_data *hv = args;
++	struct hv_hotadd_context *hot_ctx = args;
++	struct hn_data *hv = hot_ctx->hv;
+ 	struct rte_eth_dev *dev = &rte_eth_devices[hv->port_id];
+-	struct rte_devargs *d = &hv->devargs;
++	struct rte_devargs *d = &hot_ctx->da;
+ 	char buf[256];
+ 
+ 	DIR *di;
+@@ -566,10 +567,13 @@ static void netvsc_hotplug_retry(void *args)
+ 	int s;
+ 
+ 	PMD_DRV_LOG(DEBUG, "%s: retry count %d",
+-		    __func__, hv->eal_hot_plug_retry);
++		    __func__, hot_ctx->eal_hot_plug_retry);
+ 
+-	if (hv->eal_hot_plug_retry++ > NETVSC_MAX_HOTADD_RETRY)
+-		return;
++	if (hot_ctx->eal_hot_plug_retry++ > NETVSC_MAX_HOTADD_RETRY) {
++		PMD_DRV_LOG(NOTICE, "Failed to parse PCI device retry=%d",
++			    hot_ctx->eal_hot_plug_retry);
++		goto free_hotadd_ctx;
++	}
+ 
+ 	snprintf(buf, sizeof(buf), "/sys/bus/pci/devices/%s/net", d->name);
+ 	di = opendir(buf);
+@@ -602,7 +606,7 @@ static void netvsc_hotplug_retry(void *args)
+ 		}
+ 		if (req.ifr_hwaddr.sa_family != ARPHRD_ETHER) {
+ 			closedir(di);
+-			return;
++			goto free_hotadd_ctx;
+ 		}
+ 		memcpy(eth_addr.addr_bytes, req.ifr_hwaddr.sa_data,
+ 		       RTE_DIM(eth_addr.addr_bytes));
+@@ -611,8 +615,13 @@ static void netvsc_hotplug_retry(void *args)
+ 			PMD_DRV_LOG(NOTICE,
+ 				    "Found matching MAC address, adding device %s network name %s",
+ 				    d->name, dir->d_name);
++
++			/* If this device has been hot removed from this
++			 * parent device, restore its args.
++			 */
+ 			ret = rte_eal_hotplug_add(d->bus->name, d->name,
+-						  d->args);
++						  hv->vf_devargs ?
++						  hv->vf_devargs : "");
+ 			if (ret) {
+ 				PMD_DRV_LOG(ERR,
+ 					    "Failed to add PCI device %s",
+@@ -624,12 +633,20 @@ static void netvsc_hotplug_retry(void *args)
+ 		 * the device, or its MAC address did not match.
+ 		 */
+ 		closedir(di);
+-		return;
++		goto free_hotadd_ctx;
+ 	}
+ 	closedir(di);
+ retry:
+ 	/* The device is still being initialized, retry after 1 second */
+-	rte_eal_alarm_set(1000000, netvsc_hotplug_retry, hv);
++	rte_eal_alarm_set(1000000, netvsc_hotplug_retry, hot_ctx);
++	return;
++
++free_hotadd_ctx:
++	rte_spinlock_lock(&hv->hotadd_lock);
++	LIST_REMOVE(hot_ctx, list);
++	rte_spinlock_unlock(&hv->hotadd_lock);
++
++	rte_free(hot_ctx);
+ }
+ 
+ static void
+@@ -637,7 +654,8 @@ netvsc_hotadd_callback(const char *device_name, enum rte_dev_event_type type,
+ 		       void *arg)
+ {
+ 	struct hn_data *hv = arg;
+-	struct rte_devargs *d = &hv->devargs;
++	struct hv_hotadd_context *hot_ctx;
++	struct rte_devargs *d;
+ 	int ret;
+ 
+ 	PMD_DRV_LOG(INFO, "Device notification type=%d device_name=%s",
+@@ -649,26 +667,42 @@ netvsc_hotadd_callback(const char *device_name, enum rte_dev_event_type type,
+ 		if (hv->vf_ctx.vf_state > vf_removed)
+ 			break;
+ 
++		hot_ctx = rte_zmalloc("NETVSC-HOTADD", sizeof(*hot_ctx),
++				      rte_mem_page_size());
++
++		if (!hot_ctx) {
++			PMD_DRV_LOG(ERR, "Failed to allocate hotadd context");
++			return;
++		}
++
++		hot_ctx->hv = hv;
++		d = &hot_ctx->da;
++
+ 		ret = rte_devargs_parse(d, device_name);
+ 		if (ret) {
+ 			PMD_DRV_LOG(ERR,
+ 				    "devargs parsing failed ret=%d", ret);
+-			return;
++			goto free_ctx;
+ 		}
+ 
+ 		if (!strcmp(d->bus->name, "pci")) {
+ 			/* Start the process of figuring out if this
+ 			 * PCI device is a VF device
+ 			 */
+-			hv->eal_hot_plug_retry = 0;
+-			rte_eal_alarm_set(1000000, netvsc_hotplug_retry, hv);
++			rte_spinlock_lock(&hv->hotadd_lock);
++			LIST_INSERT_HEAD(&hv->hotadd_list, hot_ctx, list);
++			rte_spinlock_unlock(&hv->hotadd_lock);
++			rte_eal_alarm_set(1000000, netvsc_hotplug_retry, hot_ctx);
++			return;
+ 		}
+ 
+ 		/* We will switch to VF on RDNIS configure message
+ 		 * sent from VSP
+ 		 */
+-
++free_ctx:
++		rte_free(hot_ctx);
+ 		break;
++
+ 	default:
+ 		break;
+ 	}
+@@ -1003,12 +1037,20 @@ hn_dev_close(struct rte_eth_dev *dev)
+ {
+ 	int ret;
+ 	struct hn_data *hv = dev->data->dev_private;
++	struct hv_hotadd_context *hot_ctx;
+ 
+ 	PMD_INIT_FUNC_TRACE();
+ 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ 		return 0;
+ 
+-	rte_eal_alarm_cancel(netvsc_hotplug_retry, &hv->devargs);
++	rte_spinlock_lock(&hv->hotadd_lock);
++	while (!LIST_EMPTY(&hv->hotadd_list)) {
++		hot_ctx = LIST_FIRST(&hv->hotadd_list);
++		rte_eal_alarm_cancel(netvsc_hotplug_retry, hot_ctx);
++		LIST_REMOVE(hot_ctx, list);
++		rte_free(hot_ctx);
++	}
++	rte_spinlock_unlock(&hv->hotadd_lock);
+ 
+ 	ret = hn_vf_close(dev);
+ 	hn_dev_free_queues(dev);
+@@ -1097,6 +1139,9 @@ eth_hn_dev_init(struct rte_eth_dev *eth_dev)
+ 
+ 	PMD_INIT_FUNC_TRACE();
+ 
++	rte_spinlock_init(&hv->hotadd_lock);
++	LIST_INIT(&hv->hotadd_list);
++
+ 	vmbus = container_of(device, struct rte_vmbus_device, device);
+ 	eth_dev->dev_ops = &hn_eth_dev_ops;
+ 	eth_dev->rx_queue_count = hn_dev_rx_queue_count;
+@@ -1124,8 +1169,8 @@ eth_hn_dev_init(struct rte_eth_dev *eth_dev)
+ 	}
+ 
+ 	hv->vmbus = vmbus;
+-	hv->rxbuf_res = &vmbus->resource[HV_RECV_BUF_MAP];
+-	hv->chim_res  = &vmbus->resource[HV_SEND_BUF_MAP];
++	hv->rxbuf_res = vmbus->resource[HV_RECV_BUF_MAP];
++	hv->chim_res  = vmbus->resource[HV_SEND_BUF_MAP];
+ 	hv->port_id = eth_dev->data->port_id;
+ 	hv->latency = HN_CHAN_LATENCY_NS;
+ 	hv->rx_copybreak = HN_RXCOPY_THRESHOLD;
+@@ -1221,6 +1266,9 @@ eth_hn_dev_uninit(struct rte_eth_dev *eth_dev)
+ 	ret_stop = hn_dev_stop(eth_dev);
+ 	hn_dev_close(eth_dev);
+ 
++	free(hv->vf_devargs);
++	hv->vf_devargs = NULL;
++
+ 	hn_detach(hv);
+ 	hn_chim_uninit(eth_dev);
+ 	rte_vmbus_chan_close(hv->primary->chan);
+diff --git a/dpdk/drivers/net/netvsc/hn_nvs.c b/dpdk/drivers/net/netvsc/hn_nvs.c
+index 89dbba6cd9..b90280c9ff 100644
+--- a/dpdk/drivers/net/netvsc/hn_nvs.c
++++ b/dpdk/drivers/net/netvsc/hn_nvs.c
+@@ -193,11 +193,11 @@ hn_nvs_conn_rxbuf(struct hn_data *hv)
+ 	 * Connect RXBUF to NVS.
+ 	 */
+ 	conn.type = NVS_TYPE_RXBUF_CONN;
+-	conn.gpadl = hv->rxbuf_res->phys_addr;
++	conn.gpadl = hv->rxbuf_res.phys_addr;
+ 	conn.sig = NVS_RXBUF_SIG;
+ 	PMD_DRV_LOG(DEBUG, "connect rxbuff va=%p gpad=%#" PRIx64,
+-		    hv->rxbuf_res->addr,
+-		    hv->rxbuf_res->phys_addr);
++		    hv->rxbuf_res.addr,
++		    hv->rxbuf_res.phys_addr);
+ 
+ 	error = hn_nvs_execute(hv, &conn, sizeof(conn),
+ 			       &resp, sizeof(resp),
+@@ -229,7 +229,7 @@ hn_nvs_conn_rxbuf(struct hn_data *hv)
+ 	hv->rxbuf_section_cnt = resp.nvs_sect[0].slotcnt;
+ 
+ 	/*
+-	 * Pimary queue's rxbuf_info is not allocated at creation time.
++	 * Primary queue's rxbuf_info is not allocated at creation time.
+ 	 * Now we can allocate it after we figure out the slotcnt.
+ 	 */
+ 	hv->primary->rxbuf_info = rte_calloc("HN_RXBUF_INFO",
+@@ -308,17 +308,17 @@ hn_nvs_conn_chim(struct hn_data *hv)
+ 	struct hn_nvs_chim_conn chim;
+ 	struct hn_nvs_chim_connresp resp;
+ 	uint32_t sectsz;
+-	unsigned long len = hv->chim_res->len;
++	unsigned long len = hv->chim_res.len;
+ 	int error;
+ 
+ 	/* Connect chimney sending buffer to NVS */
+ 	memset(&chim, 0, sizeof(chim));
+ 	chim.type = NVS_TYPE_CHIM_CONN;
+-	chim.gpadl = hv->chim_res->phys_addr;
++	chim.gpadl = hv->chim_res.phys_addr;
+ 	chim.sig = NVS_CHIM_SIG;
+ 	PMD_DRV_LOG(DEBUG, "connect send buf va=%p gpad=%#" PRIx64,
+-		    hv->chim_res->addr,
+-		    hv->chim_res->phys_addr);
++		    hv->chim_res.addr,
++		    hv->chim_res.phys_addr);
+ 
+ 	error = hn_nvs_execute(hv, &chim, sizeof(chim),
+ 			       &resp, sizeof(resp),
+diff --git a/dpdk/drivers/net/netvsc/hn_rxtx.c b/dpdk/drivers/net/netvsc/hn_rxtx.c
+index 028f176c7e..7a3bd523a5 100644
+--- a/dpdk/drivers/net/netvsc/hn_rxtx.c
++++ b/dpdk/drivers/net/netvsc/hn_rxtx.c
+@@ -578,11 +578,11 @@ static void hn_rxpkt(struct hn_rx_queue *rxq, struct hn_rx_bufinfo *rxb,
+ 		rte_iova_t iova;
+ 
+ 		/*
+-		 * Build an external mbuf that points to recveive area.
++		 * Build an external mbuf that points to receive area.
+ 		 * Use refcount to handle multiple packets in same
+ 		 * receive buffer section.
+ 		 */
+-		rxbuf = hv->rxbuf_res->addr;
++		rxbuf = hv->rxbuf_res.addr;
+ 		iova = rte_mem_virt2iova(rxbuf) + RTE_PTR_DIFF(data, rxbuf);
+ 		shinfo = &rxb->shinfo;
+ 
+@@ -765,8 +765,8 @@ hn_nvs_handle_rxbuf(struct rte_eth_dev *dev,
+ {
+ 	const struct vmbus_chanpkt_rxbuf *pkt;
+ 	const struct hn_nvs_hdr *nvs_hdr = buf;
+-	uint32_t rxbuf_sz = hv->rxbuf_res->len;
+-	char *rxbuf = hv->rxbuf_res->addr;
++	uint32_t rxbuf_sz = hv->rxbuf_res.len;
++	char *rxbuf = hv->rxbuf_res.addr;
+ 	unsigned int i, hlen, count;
+ 	struct hn_rx_bufinfo *rxb;
+ 
+@@ -1031,7 +1031,7 @@ hn_dev_rx_queue_count(void *rx_queue)
+  * returns:
+  *  - -EINVAL               - offset outside of ring
+  *  - RTE_ETH_RX_DESC_AVAIL - no data available yet
+- *  - RTE_ETH_RX_DESC_DONE  - data is waiting in stagin ring
++ *  - RTE_ETH_RX_DESC_DONE  - data is waiting in staging ring
+  */
+ int hn_dev_rx_queue_status(void *arg, uint16_t offset)
+ {
+@@ -1266,7 +1266,7 @@ hn_try_txagg(struct hn_data *hv, struct hn_tx_queue *txq,
+ 	if (txd->chim_index == NVS_CHIM_IDX_INVALID)
+ 		return NULL;
+ 
+-	chim = (uint8_t *)hv->chim_res->addr
++	chim = (uint8_t *)hv->chim_res.addr
+ 			+ txd->chim_index * hv->chim_szmax;
+ 
+ 	txq->agg_txd = txd;
+@@ -1348,8 +1348,11 @@ static void hn_encap(struct rndis_packet_msg *pkt,
+ 			*pi_data = NDIS_LSO2_INFO_MAKEIPV4(hlen,
+ 							   m->tso_segsz);
+ 		}
+-	} else if (m->ol_flags &
+-		   (RTE_MBUF_F_TX_TCP_CKSUM | RTE_MBUF_F_TX_UDP_CKSUM | RTE_MBUF_F_TX_IP_CKSUM)) {
++	} else if ((m->ol_flags & RTE_MBUF_F_TX_L4_MASK) ==
++			RTE_MBUF_F_TX_TCP_CKSUM ||
++		   (m->ol_flags & RTE_MBUF_F_TX_L4_MASK) ==
++			RTE_MBUF_F_TX_UDP_CKSUM ||
++		   (m->ol_flags & RTE_MBUF_F_TX_IP_CKSUM)) {
+ 		pi_data = hn_rndis_pktinfo_append(pkt, NDIS_TXCSUM_INFO_SIZE,
+ 						  NDIS_PKTINFO_TYPE_CSUM);
+ 		*pi_data = 0;
+@@ -1363,9 +1366,11 @@ static void hn_encap(struct rndis_packet_msg *pkt,
+ 				*pi_data |= NDIS_TXCSUM_INFO_IPCS;
+ 		}
+ 
+-		if (m->ol_flags & RTE_MBUF_F_TX_TCP_CKSUM)
++		if ((m->ol_flags & RTE_MBUF_F_TX_L4_MASK) ==
++				RTE_MBUF_F_TX_TCP_CKSUM)
+ 			*pi_data |= NDIS_TXCSUM_INFO_MKTCPCS(hlen);
+-		else if (m->ol_flags & RTE_MBUF_F_TX_UDP_CKSUM)
++		else if ((m->ol_flags & RTE_MBUF_F_TX_L4_MASK) ==
++				RTE_MBUF_F_TX_UDP_CKSUM)
+ 			*pi_data |= NDIS_TXCSUM_INFO_MKUDPCS(hlen);
+ 	}
+ 
+diff --git a/dpdk/drivers/net/netvsc/hn_var.h b/dpdk/drivers/net/netvsc/hn_var.h
+index fbb3995507..98a3b83033 100644
+--- a/dpdk/drivers/net/netvsc/hn_var.h
++++ b/dpdk/drivers/net/netvsc/hn_var.h
+@@ -126,6 +126,13 @@ struct hn_vf_ctx {
+ 	enum vf_device_state	vf_state;
+ };
+ 
++struct hv_hotadd_context {
++	LIST_ENTRY(hv_hotadd_context) list;
++	struct hn_data *hv;
++	struct rte_devargs da;
++	int eal_hot_plug_retry;
++};
++
+ struct hn_data {
+ 	struct rte_vmbus_device *vmbus;
+ 	struct hn_rx_queue *primary;
+@@ -140,7 +147,7 @@ struct hn_data {
+ 	uint32_t	link_status;
+ 	uint32_t	link_speed;
+ 
+-	struct rte_mem_resource *rxbuf_res;	/* UIO resource for Rx */
++	struct rte_mem_resource rxbuf_res;	/* UIO resource for Rx */
+ 	uint32_t	rxbuf_section_cnt;	/* # of Rx sections */
+ 	uint32_t	rx_copybreak;
+ 	uint32_t	rx_extmbuf_enable;
+@@ -149,7 +156,7 @@ struct hn_data {
+ 	uint64_t	rss_offloads;
+ 
+ 	rte_spinlock_t	chim_lock;
+-	struct rte_mem_resource *chim_res;	/* UIO resource for Tx */
++	struct rte_mem_resource chim_res;	/* UIO resource for Tx */
+ 	struct rte_bitmap *chim_bmap;		/* Send buffer map */
+ 	void		*chim_bmem;
+ 	uint32_t	tx_copybreak;
+@@ -175,8 +182,9 @@ struct hn_data {
+ 
+ 	struct vmbus_channel *channels[HN_MAX_CHANNELS];
+ 
+-	struct rte_devargs devargs;
+-	int		eal_hot_plug_retry;
++	rte_spinlock_t	hotadd_lock;
++	LIST_HEAD(hotadd_list, hv_hotadd_context) hotadd_list;
++	char		*vf_devargs;
+ };
+ 
+ static inline struct vmbus_channel *
+diff --git a/dpdk/drivers/net/netvsc/hn_vf.c b/dpdk/drivers/net/netvsc/hn_vf.c
+index fead8eba5d..62948bf889 100644
+--- a/dpdk/drivers/net/netvsc/hn_vf.c
++++ b/dpdk/drivers/net/netvsc/hn_vf.c
+@@ -103,7 +103,7 @@ static void hn_remove_delayed(void *args)
+ 	struct rte_device *dev = rte_eth_devices[port_id].device;
+ 	int ret;
+ 
+-	/* Tell VSP to switch data path to synthentic */
++	/* Tell VSP to switch data path to synthetic */
+ 	hn_vf_remove(hv);
+ 
+ 	PMD_DRV_LOG(NOTICE, "Start to remove port %d", port_id);
+@@ -129,6 +129,10 @@ static void hn_remove_delayed(void *args)
+ 		PMD_DRV_LOG(ERR, "rte_eth_dev_stop failed port_id=%u ret=%d",
+ 			    port_id, ret);
+ 
++	/* Record the device parameters for possible hotplug events */
++	if (dev->devargs && dev->devargs->args)
++		hv->vf_devargs = strdup(dev->devargs->args);
++
+ 	ret = rte_eth_dev_close(port_id);
+ 	if (ret)
+ 		PMD_DRV_LOG(ERR, "rte_eth_dev_close failed port_id=%u ret=%d",
+diff --git a/dpdk/drivers/net/nfb/nfb.h b/dpdk/drivers/net/nfb/nfb.h
+index 59d3ab4986..96c44c3a45 100644
+--- a/dpdk/drivers/net/nfb/nfb.h
++++ b/dpdk/drivers/net/nfb/nfb.h
+@@ -48,10 +48,6 @@ struct pmd_internals {
+ 
+ 	char             nfb_dev[PATH_MAX];
+ 	struct nfb_device *nfb;
+-	/* Place to remember if filter was promiscuous or filtering by table,
+-	 * when disabling allmulticast
+-	 */
+-	enum nc_rxmac_mac_filter rx_filter_original;
+ };
+ 
+ #endif /* _NFB_H_ */
+diff --git a/dpdk/drivers/net/nfb/nfb_ethdev.c b/dpdk/drivers/net/nfb/nfb_ethdev.c
+index 3c39937816..d9e43bc027 100644
+--- a/dpdk/drivers/net/nfb/nfb_ethdev.c
++++ b/dpdk/drivers/net/nfb/nfb_ethdev.c
+@@ -77,9 +77,10 @@ static void
+ nfb_nc_rxmac_deinit(struct nc_rxmac *rxmac[RTE_MAX_NC_RXMAC],
+ 	uint16_t max_rxmac)
+ {
+-	for (; max_rxmac > 0; --max_rxmac) {
+-		nc_rxmac_close(rxmac[max_rxmac]);
+-		rxmac[max_rxmac] = NULL;
++	uint16_t i;
++	for (i = 0; i < max_rxmac; i++) {
++		nc_rxmac_close(rxmac[i]);
++		rxmac[i] = NULL;
+ 	}
+ }
+ 
+@@ -95,9 +96,10 @@ static void
+ nfb_nc_txmac_deinit(struct nc_txmac *txmac[RTE_MAX_NC_TXMAC],
+ 	uint16_t max_txmac)
+ {
+-	for (; max_txmac > 0; --max_txmac) {
+-		nc_txmac_close(txmac[max_txmac]);
+-		txmac[max_txmac] = NULL;
++	uint16_t i;
++	for (i = 0; i < max_txmac; i++) {
++		nc_txmac_close(txmac[i]);
++		txmac[i] = NULL;
+ 	}
+ }
+ 
+@@ -514,7 +516,6 @@ nfb_eth_dev_init(struct rte_eth_dev *dev)
+ 
+ 	data->promiscuous = nfb_eth_promiscuous_get(dev);
+ 	data->all_multicast = nfb_eth_allmulticast_get(dev);
+-	internals->rx_filter_original = data->promiscuous;
+ 
+ 	dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
+ 
+diff --git a/dpdk/drivers/net/nfb/nfb_rxmode.c b/dpdk/drivers/net/nfb/nfb_rxmode.c
+index 2d0b613d21..ca6e4d5578 100644
+--- a/dpdk/drivers/net/nfb/nfb_rxmode.c
++++ b/dpdk/drivers/net/nfb/nfb_rxmode.c
+@@ -14,8 +14,6 @@ nfb_eth_promiscuous_enable(struct rte_eth_dev *dev)
+ 		dev->data->dev_private;
+ 	uint16_t i;
+ 
+-	internals->rx_filter_original = RXMAC_MAC_FILTER_PROMISCUOUS;
+-
+ 	for (i = 0; i < internals->max_rxmac; ++i) {
+ 		nc_rxmac_mac_filter_enable(internals->rxmac[i],
+ 			RXMAC_MAC_FILTER_PROMISCUOUS);
+@@ -30,16 +28,13 @@ nfb_eth_promiscuous_disable(struct rte_eth_dev *dev)
+ 	struct pmd_internals *internals = (struct pmd_internals *)
+ 		dev->data->dev_private;
+ 	uint16_t i;
++	enum nc_rxmac_mac_filter filter = RXMAC_MAC_FILTER_TABLE_BCAST;
+ 
+-	internals->rx_filter_original = RXMAC_MAC_FILTER_TABLE;
+-
+-	/* if promisc is not enabled, do nothing */
+-	if (!nfb_eth_promiscuous_get(dev))
+-		return 0;
++	if (dev->data->all_multicast)
++		filter = RXMAC_MAC_FILTER_TABLE_BCAST_MCAST;
+ 
+ 	for (i = 0; i < internals->max_rxmac; ++i) {
+-		nc_rxmac_mac_filter_enable(internals->rxmac[i],
+-			RXMAC_MAC_FILTER_TABLE);
++		nc_rxmac_mac_filter_enable(internals->rxmac[i], filter);
+ 	}
+ 
+ 	return 0;
+@@ -67,6 +62,8 @@ nfb_eth_allmulticast_enable(struct rte_eth_dev *dev)
+ 		dev->data->dev_private;
+ 
+ 	uint16_t i;
++	if (dev->data->promiscuous)
++		return 0;
+ 	for (i = 0; i < internals->max_rxmac; ++i) {
+ 		nc_rxmac_mac_filter_enable(internals->rxmac[i],
+ 			RXMAC_MAC_FILTER_TABLE_BCAST_MCAST);
+@@ -83,13 +80,12 @@ nfb_eth_allmulticast_disable(struct rte_eth_dev *dev)
+ 
+ 	uint16_t i;
+ 
+-	/* if multicast is not enabled do nothing */
+-	if (!nfb_eth_allmulticast_get(dev))
++	if (dev->data->promiscuous)
+ 		return 0;
+ 
+ 	for (i = 0; i < internals->max_rxmac; ++i) {
+ 		nc_rxmac_mac_filter_enable(internals->rxmac[i],
+-			internals->rx_filter_original);
++			RXMAC_MAC_FILTER_TABLE_BCAST);
+ 	}
+ 
+ 	return 0;
+diff --git a/dpdk/drivers/net/nfp/nfp_common.c b/dpdk/drivers/net/nfp/nfp_common.c
+index f8978e803a..34e3a03edd 100644
+--- a/dpdk/drivers/net/nfp/nfp_common.c
++++ b/dpdk/drivers/net/nfp/nfp_common.c
+@@ -176,6 +176,13 @@ nfp_net_configure(struct rte_eth_dev *dev)
+ 		return -EINVAL;
+ 	}
+ 
++	/* Checking MTU set */
++	if (rxmode->mtu > hw->flbufsz) {
++		PMD_INIT_LOG(INFO, "MTU (%u) larger then current mbufsize (%u) not supported",
++				    rxmode->mtu, hw->flbufsz);
++		return -ERANGE;
++	}
++
+ 	return 0;
+ }
+ 
+@@ -280,10 +287,6 @@ nfp_set_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr)
+ 		return -EBUSY;
+ 	}
+ 
+-	if ((hw->ctrl & NFP_NET_CFG_CTRL_ENABLE) &&
+-	    !(hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR))
+-		return -EBUSY;
+-
+ 	/* Writing new MAC to the specific port BAR address */
+ 	nfp_net_write_mac(hw, (uint8_t *)mac_addr);
+ 
+@@ -696,7 +699,17 @@ nfp_net_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
+ 	dev_info->max_rx_queues = (uint16_t)hw->max_rx_queues;
+ 	dev_info->max_tx_queues = (uint16_t)hw->max_tx_queues;
+ 	dev_info->min_rx_bufsize = RTE_ETHER_MIN_MTU;
+-	dev_info->max_rx_pktlen = hw->max_mtu;
++	/*
++	 * The maximum rx packet length (max_rx_pktlen) is set to the
++	 * maximum supported frame size that the NFP can handle. This
++	 * includes layer 2 headers, CRC and other metadata that can
++	 * optionally be used.
++	 * The maximum layer 3 MTU (max_mtu) is read from hardware,
++	 * which was set by the firmware loaded onto the card.
++	 */
++	dev_info->max_rx_pktlen = NFP_FRAME_SIZE_MAX;
++	dev_info->max_mtu = hw->max_mtu;
++	dev_info->min_mtu = RTE_ETHER_MIN_MTU;
+ 	/* Next should change when PF support is implemented */
+ 	dev_info->max_mac_addrs = 1;
+ 
+@@ -956,6 +969,13 @@ nfp_net_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
+ 		return -EBUSY;
+ 	}
+ 
++	/* MTU larger then current mbufsize not supported */
++	if (mtu > hw->flbufsz) {
++		PMD_DRV_LOG(ERR, "MTU (%u) larger then current mbufsize (%u) not supported",
++			    mtu, hw->flbufsz);
++		return -ERANGE;
++	}
++
+ 	/* writing to configuration space */
+ 	nn_cfg_writel(hw, NFP_NET_CFG_MTU, mtu);
+ 
+@@ -969,22 +989,25 @@ nfp_net_vlan_offload_set(struct rte_eth_dev *dev, int mask)
+ {
+ 	uint32_t new_ctrl, update;
+ 	struct nfp_net_hw *hw;
++	struct rte_eth_conf *dev_conf;
+ 	int ret;
+ 
+ 	hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+-	new_ctrl = 0;
+-
+-	/* Enable vlan strip if it is not configured yet */
+-	if ((mask & RTE_ETH_VLAN_STRIP_OFFLOAD) &&
+-	    !(hw->ctrl & NFP_NET_CFG_CTRL_RXVLAN))
+-		new_ctrl = hw->ctrl | NFP_NET_CFG_CTRL_RXVLAN;
++	dev_conf = &dev->data->dev_conf;
++	new_ctrl = hw->ctrl;
+ 
+-	/* Disable vlan strip just if it is configured */
+-	if (!(mask & RTE_ETH_VLAN_STRIP_OFFLOAD) &&
+-	    (hw->ctrl & NFP_NET_CFG_CTRL_RXVLAN))
+-		new_ctrl = hw->ctrl & ~NFP_NET_CFG_CTRL_RXVLAN;
++	/*
++	 * Vlan stripping setting
++	 * Enable or disable VLAN stripping
++	 */
++	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
++		if (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
++			new_ctrl |= NFP_NET_CFG_CTRL_RXVLAN;
++		else
++			new_ctrl &= ~NFP_NET_CFG_CTRL_RXVLAN;
++	}
+ 
+-	if (new_ctrl == 0)
++	if (new_ctrl == hw->ctrl)
+ 		return 0;
+ 
+ 	update = NFP_NET_CFG_UPDATE_GEN;
+diff --git a/dpdk/drivers/net/nfp/nfp_common.h b/dpdk/drivers/net/nfp/nfp_common.h
+index 8b35fa119c..8db5ec23f8 100644
+--- a/dpdk/drivers/net/nfp/nfp_common.h
++++ b/dpdk/drivers/net/nfp/nfp_common.h
+@@ -98,6 +98,9 @@ struct nfp_net_adapter;
+ /* Number of supported physical ports */
+ #define NFP_MAX_PHYPORTS	12
+ 
++/* Maximum supported NFP frame size (MTU + layer 2 headers) */
++#define NFP_FRAME_SIZE_MAX	10048
++
+ #include <linux/types.h>
+ #include <rte_io.h>
+ 
+diff --git a/dpdk/drivers/net/nfp/nfp_ethdev.c b/dpdk/drivers/net/nfp/nfp_ethdev.c
+index 8e81cc498f..1a9f7581a7 100644
+--- a/dpdk/drivers/net/nfp/nfp_ethdev.c
++++ b/dpdk/drivers/net/nfp/nfp_ethdev.c
+@@ -302,11 +302,13 @@ nfp_net_close(struct rte_eth_dev *dev)
+ 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ 		this_tx_q = (struct nfp_net_txq *)dev->data->tx_queues[i];
+ 		nfp_net_reset_tx_queue(this_tx_q);
++		nfp_net_tx_queue_release(dev, i);
+ 	}
+ 
+ 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ 		this_rx_q = (struct nfp_net_rxq *)dev->data->rx_queues[i];
+ 		nfp_net_reset_rx_queue(this_rx_q);
++		nfp_net_rx_queue_release(dev, i);
+ 	}
+ 
+ 	/* Cancel possible impending LSC work here before releasing the port*/
+@@ -500,6 +502,7 @@ nfp_net_init(struct rte_eth_dev *eth_dev)
+ 	hw->cap = nn_cfg_readl(hw, NFP_NET_CFG_CAP);
+ 	hw->max_mtu = nn_cfg_readl(hw, NFP_NET_CFG_MAX_MTU);
+ 	hw->mtu = RTE_ETHER_MTU;
++	hw->flbufsz = RTE_ETHER_MTU;
+ 
+ 	/* VLAN insertion is incompatible with LSOv2 */
+ 	if (hw->cap & NFP_NET_CFG_CTRL_LSO2)
+diff --git a/dpdk/drivers/net/nfp/nfp_ethdev_vf.c b/dpdk/drivers/net/nfp/nfp_ethdev_vf.c
+index 303ef72b1b..0781f34764 100644
+--- a/dpdk/drivers/net/nfp/nfp_ethdev_vf.c
++++ b/dpdk/drivers/net/nfp/nfp_ethdev_vf.c
+@@ -219,11 +219,13 @@ nfp_netvf_close(struct rte_eth_dev *dev)
+ 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ 		this_tx_q =  (struct nfp_net_txq *)dev->data->tx_queues[i];
+ 		nfp_net_reset_tx_queue(this_tx_q);
++		nfp_net_tx_queue_release(dev, i);
+ 	}
+ 
+ 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ 		this_rx_q =  (struct nfp_net_rxq *)dev->data->rx_queues[i];
+ 		nfp_net_reset_rx_queue(this_rx_q);
++		nfp_net_rx_queue_release(dev, i);
+ 	}
+ 
+ 	rte_intr_disable(pci_dev->intr_handle);
+@@ -367,6 +369,7 @@ nfp_netvf_init(struct rte_eth_dev *eth_dev)
+ 	hw->cap = nn_cfg_readl(hw, NFP_NET_CFG_CAP);
+ 	hw->max_mtu = nn_cfg_readl(hw, NFP_NET_CFG_MAX_MTU);
+ 	hw->mtu = RTE_ETHER_MTU;
++	hw->flbufsz = RTE_ETHER_MTU;
+ 
+ 	/* VLAN insertion is incompatible with LSOv2 */
+ 	if (hw->cap & NFP_NET_CFG_CTRL_LSO2)
+diff --git a/dpdk/drivers/net/nfp/nfp_rxtx.c b/dpdk/drivers/net/nfp/nfp_rxtx.c
+index 0fe1415596..335a90b2c9 100644
+--- a/dpdk/drivers/net/nfp/nfp_rxtx.c
++++ b/dpdk/drivers/net/nfp/nfp_rxtx.c
+@@ -470,6 +470,7 @@ nfp_net_rx_queue_release(struct rte_eth_dev *dev, uint16_t queue_idx)
+ 
+ 	if (rxq) {
+ 		nfp_net_rx_queue_release_mbufs(rxq);
++		rte_eth_dma_zone_free(dev, "rx_ring", queue_idx);
+ 		rte_free(rxq->rxbufs);
+ 		rte_free(rxq);
+ 	}
+@@ -660,6 +661,7 @@ nfp_net_tx_queue_release(struct rte_eth_dev *dev, uint16_t queue_idx)
+ 
+ 	if (txq) {
+ 		nfp_net_tx_queue_release_mbufs(txq);
++		rte_eth_dma_zone_free(dev, "tx_ring", queue_idx);
+ 		rte_free(txq->txbufs);
+ 		rte_free(txq);
+ 	}
+diff --git a/dpdk/drivers/net/nfp/nfpcore/nfp-common/nfp_resid.h b/dpdk/drivers/net/nfp/nfpcore/nfp-common/nfp_resid.h
+index 0e03948ec7..394a7628e0 100644
+--- a/dpdk/drivers/net/nfp/nfpcore/nfp-common/nfp_resid.h
++++ b/dpdk/drivers/net/nfp/nfpcore/nfp-common/nfp_resid.h
+@@ -63,7 +63,7 @@
+  * Wildcard indicating a CPP read or write action
+  *
+  * The action used will be either read or write depending on whether a read or
+- * write instruction/call is performed on the NFP_CPP_ID.  It is recomended that
++ * write instruction/call is performed on the NFP_CPP_ID.  It is recommended that
+  * the RW action is used even if all actions to be performed on a NFP_CPP_ID are
+  * known to be only reads or writes. Doing so will in many cases save NFP CPP
+  * internal software resources.
+@@ -405,7 +405,7 @@ int nfp_idstr2meid(int chip_family, const char *s, const char **endptr);
+  * @param chip_family Chip family ID
+  * @param s           A string of format "iX.anything" or "iX"
+  * @param endptr      If non-NULL, *endptr will point to the trailing
+- *                    striong after the ME ID part of the string, which
++ *                    string after the ME ID part of the string, which
+  *                    is either an empty string or the first character
+  *                    after the separating period.
+  * @return            The island ID on succes, -1 on error.
+@@ -425,7 +425,7 @@ int nfp_idstr2island(int chip_family, const char *s, const char **endptr);
+  * @param chip_family Chip family ID
+  * @param s           A string of format "meX.anything" or "meX"
+  * @param endptr      If non-NULL, *endptr will point to the trailing
+- *                    striong after the ME ID part of the string, which
++ *                    string after the ME ID part of the string, which
+  *                    is either an empty string or the first character
+  *                    after the separating period.
+  * @return            The ME number on succes, -1 on error.
+diff --git a/dpdk/drivers/net/nfp/nfpcore/nfp_cpp_pcie_ops.c b/dpdk/drivers/net/nfp/nfpcore/nfp_cpp_pcie_ops.c
+index bad80a5a1c..08bc4e8ef2 100644
+--- a/dpdk/drivers/net/nfp/nfpcore/nfp_cpp_pcie_ops.c
++++ b/dpdk/drivers/net/nfp/nfpcore/nfp_cpp_pcie_ops.c
+@@ -16,9 +16,6 @@
+ 
+ #include <assert.h>
+ #include <stdio.h>
+-#if defined(RTE_BACKTRACE)
+-#include <execinfo.h>
+-#endif
+ #include <stdlib.h>
+ #include <unistd.h>
+ #include <stdint.h>
+diff --git a/dpdk/drivers/net/nfp/nfpcore/nfp_cppcore.c b/dpdk/drivers/net/nfp/nfpcore/nfp_cppcore.c
+index f91049383e..37799af558 100644
+--- a/dpdk/drivers/net/nfp/nfpcore/nfp_cppcore.c
++++ b/dpdk/drivers/net/nfp/nfpcore/nfp_cppcore.c
+@@ -202,7 +202,7 @@ nfp_cpp_area_alloc(struct nfp_cpp *cpp, uint32_t dest,
+  * @address:    start address on CPP target
+  * @size:   size of area
+  *
+- * Allocate and initilizae a CPP area structure, and lock it down so
++ * Allocate and initialize a CPP area structure, and lock it down so
+  * that it can be accessed directly.
+  *
+  * NOTE: @address and @size must be 32-bit aligned values.
+diff --git a/dpdk/drivers/net/nfp/nfpcore/nfp_nsp.h b/dpdk/drivers/net/nfp/nfpcore/nfp_nsp.h
+index c9c7b0d0fb..e74cdeb191 100644
+--- a/dpdk/drivers/net/nfp/nfpcore/nfp_nsp.h
++++ b/dpdk/drivers/net/nfp/nfpcore/nfp_nsp.h
+@@ -272,7 +272,7 @@ int __nfp_eth_set_split(struct nfp_nsp *nsp, unsigned int lanes);
+  * @br_primary:   branch id of primary bootloader
+  * @br_secondary: branch id of secondary bootloader
+  * @br_nsp:       branch id of NSP
+- * @primary:      version of primarary bootloader
++ * @primary:      version of primary bootloader
+  * @secondary:    version id of secondary bootloader
+  * @nsp:          version id of NSP
+  * @sensor_mask:  mask of present sensors available on NIC
+diff --git a/dpdk/drivers/net/nfp/nfpcore/nfp_resource.c b/dpdk/drivers/net/nfp/nfpcore/nfp_resource.c
+index dd41fa4de4..7b5630fd86 100644
+--- a/dpdk/drivers/net/nfp/nfpcore/nfp_resource.c
++++ b/dpdk/drivers/net/nfp/nfpcore/nfp_resource.c
+@@ -207,7 +207,7 @@ nfp_resource_acquire(struct nfp_cpp *cpp, const char *name)
+  * nfp_resource_release() - Release a NFP Resource handle
+  * @res:	NFP Resource handle
+  *
+- * NOTE: This function implictly unlocks the resource handle
++ * NOTE: This function implicitly unlocks the resource handle
+  */
+ void
+ nfp_resource_release(struct nfp_resource *res)
+diff --git a/dpdk/drivers/net/nfp/nfpcore/nfp_rtsym.c b/dpdk/drivers/net/nfp/nfpcore/nfp_rtsym.c
+index cb7d83db51..2feca2ed81 100644
+--- a/dpdk/drivers/net/nfp/nfpcore/nfp_rtsym.c
++++ b/dpdk/drivers/net/nfp/nfpcore/nfp_rtsym.c
+@@ -236,7 +236,7 @@ nfp_rtsym_lookup(struct nfp_rtsym_table *rtbl, const char *name)
+  * nfp_rtsym_read_le() - Read a simple unsigned scalar value from symbol
+  * @rtbl:	NFP RTsym table
+  * @name:	Symbol name
+- * @error:	Poniter to error code (optional)
++ * @error:	Pointer to error code (optional)
+  *
+  * Lookup a symbol, map, read it and return it's value. Value of the symbol
+  * will be interpreted as a simple little-endian unsigned value. Symbol can
+diff --git a/dpdk/drivers/net/nfp/nfpcore/nfp_target.h b/dpdk/drivers/net/nfp/nfpcore/nfp_target.h
+index 2884a0034f..e8dcc9ad1e 100644
+--- a/dpdk/drivers/net/nfp/nfpcore/nfp_target.h
++++ b/dpdk/drivers/net/nfp/nfpcore/nfp_target.h
+@@ -37,7 +37,7 @@ pushpull_width(int pp)
+ static inline int
+ target_rw(uint32_t cpp_id, int pp, int start, int len)
+ {
+-	int island = NFP_CPP_ID_ISLAND_of(cpp_id);
++	uint8_t island = NFP_CPP_ID_ISLAND_of(cpp_id);
+ 
+ 	if (island && (island < start || island > (start + len)))
+ 		return NFP_ERRNO(EINVAL);
+@@ -117,7 +117,7 @@ nfp6000_nbi_ppc(uint32_t cpp_id)
+ static inline int
+ nfp6000_nbi(uint32_t cpp_id, uint64_t address)
+ {
+-	int island = NFP_CPP_ID_ISLAND_of(cpp_id);
++	uint8_t island = NFP_CPP_ID_ISLAND_of(cpp_id);
+ 	uint64_t rel_addr = address & 0x3fFFFF;
+ 
+ 	if (island && (island < 8 || island > 9))
+@@ -281,7 +281,7 @@ static inline int
+ nfp6000_mu(uint32_t cpp_id, uint64_t address)
+ {
+ 	int pp;
+-	int island = NFP_CPP_ID_ISLAND_of(cpp_id);
++	uint8_t island = NFP_CPP_ID_ISLAND_of(cpp_id);
+ 
+ 	if (island == 0) {
+ 		if (address < 0x2000000000ULL)
+@@ -316,7 +316,7 @@ nfp6000_mu(uint32_t cpp_id, uint64_t address)
+ static inline int
+ nfp6000_ila(uint32_t cpp_id)
+ {
+-	int island = NFP_CPP_ID_ISLAND_of(cpp_id);
++	uint8_t island = NFP_CPP_ID_ISLAND_of(cpp_id);
+ 
+ 	if (island && (island < 48 || island > 51))
+ 		return NFP_ERRNO(EINVAL);
+@@ -336,7 +336,7 @@ nfp6000_ila(uint32_t cpp_id)
+ static inline int
+ nfp6000_pci(uint32_t cpp_id)
+ {
+-	int island = NFP_CPP_ID_ISLAND_of(cpp_id);
++	uint8_t island = NFP_CPP_ID_ISLAND_of(cpp_id);
+ 
+ 	if (island && (island < 4 || island > 7))
+ 		return NFP_ERRNO(EINVAL);
+@@ -354,7 +354,7 @@ nfp6000_pci(uint32_t cpp_id)
+ static inline int
+ nfp6000_crypto(uint32_t cpp_id)
+ {
+-	int island = NFP_CPP_ID_ISLAND_of(cpp_id);
++	uint8_t island = NFP_CPP_ID_ISLAND_of(cpp_id);
+ 
+ 	if (island && (island < 12 || island > 15))
+ 		return NFP_ERRNO(EINVAL);
+@@ -370,9 +370,9 @@ nfp6000_crypto(uint32_t cpp_id)
+ static inline int
+ nfp6000_cap_xpb(uint32_t cpp_id)
+ {
+-	int island = NFP_CPP_ID_ISLAND_of(cpp_id);
++	uint8_t island = NFP_CPP_ID_ISLAND_of(cpp_id);
+ 
+-	if (island && (island < 1 || island > 63))
++	if (island > 63)
+ 		return NFP_ERRNO(EINVAL);
+ 
+ 	switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) {
+@@ -410,9 +410,9 @@ nfp6000_cap_xpb(uint32_t cpp_id)
+ static inline int
+ nfp6000_cls(uint32_t cpp_id)
+ {
+-	int island = NFP_CPP_ID_ISLAND_of(cpp_id);
++	uint8_t island = NFP_CPP_ID_ISLAND_of(cpp_id);
+ 
+-	if (island && (island < 1 || island > 63))
++	if (island > 63)
+ 		return NFP_ERRNO(EINVAL);
+ 
+ 	switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) {
+@@ -540,11 +540,11 @@ nfp_target_cpp(uint32_t cpp_island_id, uint64_t cpp_island_address,
+ 	       const uint32_t *imb_table)
+ {
+ 	int err;
+-	int island = NFP_CPP_ID_ISLAND_of(cpp_island_id);
+-	int target = NFP_CPP_ID_TARGET_of(cpp_island_id);
++	uint8_t island = NFP_CPP_ID_ISLAND_of(cpp_island_id);
++	uint8_t target = NFP_CPP_ID_TARGET_of(cpp_island_id);
+ 	uint32_t imb;
+ 
+-	if (target < 0 || target >= 16)
++	if (target >= 16)
+ 		return NFP_ERRNO(EINVAL);
+ 
+ 	if (island == 0) {
+diff --git a/dpdk/drivers/net/ngbe/base/ngbe_dummy.h b/dpdk/drivers/net/ngbe/base/ngbe_dummy.h
+index 61b0d82bfb..d74c9f7b54 100644
+--- a/dpdk/drivers/net/ngbe/base/ngbe_dummy.h
++++ b/dpdk/drivers/net/ngbe/base/ngbe_dummy.h
+@@ -114,6 +114,9 @@ static inline s32 ngbe_mac_get_link_capabilities_dummy(struct ngbe_hw *TUP0,
+ {
+ 	return NGBE_ERR_OPS_DUMMY;
+ }
++static inline void ngbe_setup_pba_dummy(struct ngbe_hw *TUP0)
++{
++}
+ static inline s32 ngbe_mac_led_on_dummy(struct ngbe_hw *TUP0, u32 TUP1)
+ {
+ 	return NGBE_ERR_OPS_DUMMY;
+@@ -298,6 +301,7 @@ static inline void ngbe_init_ops_dummy(struct ngbe_hw *hw)
+ 	hw->mac.setup_link = ngbe_mac_setup_link_dummy;
+ 	hw->mac.check_link = ngbe_mac_check_link_dummy;
+ 	hw->mac.get_link_capabilities = ngbe_mac_get_link_capabilities_dummy;
++	hw->mac.setup_pba = ngbe_setup_pba_dummy;
+ 	hw->mac.led_on = ngbe_mac_led_on_dummy;
+ 	hw->mac.led_off = ngbe_mac_led_off_dummy;
+ 	hw->mac.set_rar = ngbe_mac_set_rar_dummy;
+diff --git a/dpdk/drivers/net/ngbe/base/ngbe_eeprom.c b/dpdk/drivers/net/ngbe/base/ngbe_eeprom.c
+index f9a876e9bd..6375ee9b29 100644
+--- a/dpdk/drivers/net/ngbe/base/ngbe_eeprom.c
++++ b/dpdk/drivers/net/ngbe/base/ngbe_eeprom.c
+@@ -20,8 +20,6 @@ s32 ngbe_init_eeprom_params(struct ngbe_hw *hw)
+ 	u32 eec;
+ 	u16 eeprom_size;
+ 
+-	DEBUGFUNC("ngbe_init_eeprom_params");
+-
+ 	if (eeprom->type != ngbe_eeprom_unknown)
+ 		return 0;
+ 
+@@ -52,8 +50,8 @@ s32 ngbe_init_eeprom_params(struct ngbe_hw *hw)
+ 	eeprom->address_bits = 16;
+ 	eeprom->sw_addr = 0x80;
+ 
+-	DEBUGOUT("eeprom params: type = %d, size = %d, address bits: "
+-		  "%d %d\n", eeprom->type, eeprom->word_size,
++	DEBUGOUT("eeprom params: type = %d, size = %d, address bits: %d %d",
++		  eeprom->type, eeprom->word_size,
+ 		  eeprom->address_bits, eeprom->sw_addr);
+ 
+ 	return 0;
+@@ -72,9 +70,6 @@ s32 ngbe_get_eeprom_semaphore(struct ngbe_hw *hw)
+ 	u32 i;
+ 	u32 swsm;
+ 
+-	DEBUGFUNC("ngbe_get_eeprom_semaphore");
+-
+-
+ 	/* Get SMBI software semaphore between device drivers first */
+ 	for (i = 0; i < timeout; i++) {
+ 		/*
+@@ -90,8 +85,7 @@ s32 ngbe_get_eeprom_semaphore(struct ngbe_hw *hw)
+ 	}
+ 
+ 	if (i == timeout) {
+-		DEBUGOUT("Driver can't access the eeprom - SMBI Semaphore "
+-			 "not granted.\n");
++		DEBUGOUT("Driver can't access the eeprom - SMBI Semaphore not granted.");
+ 		/*
+ 		 * this release is particularly important because our attempts
+ 		 * above to get the semaphore may have succeeded, and if there
+@@ -134,13 +128,12 @@ s32 ngbe_get_eeprom_semaphore(struct ngbe_hw *hw)
+ 		 * was not granted because we don't have access to the EEPROM
+ 		 */
+ 		if (i >= timeout) {
+-			DEBUGOUT("SWESMBI Software EEPROM semaphore not granted.\n");
++			DEBUGOUT("SWESMBI Software EEPROM semaphore not granted.");
+ 			ngbe_release_eeprom_semaphore(hw);
+ 			status = NGBE_ERR_EEPROM;
+ 		}
+ 	} else {
+-		DEBUGOUT("Software semaphore SMBI between device drivers "
+-			 "not granted.\n");
++		DEBUGOUT("Software semaphore SMBI between device drivers not granted.");
+ 	}
+ 
+ 	return status;
+@@ -154,8 +147,6 @@ s32 ngbe_get_eeprom_semaphore(struct ngbe_hw *hw)
+  **/
+ void ngbe_release_eeprom_semaphore(struct ngbe_hw *hw)
+ {
+-	DEBUGFUNC("ngbe_release_eeprom_semaphore");
+-
+ 	wr32m(hw, NGBE_MNGSWSYNC, NGBE_MNGSWSYNC_REQ, 0);
+ 	wr32m(hw, NGBE_SWSEM, NGBE_SWSEM_PF, 0);
+ 	ngbe_flush(hw);
+@@ -276,7 +267,6 @@ s32 ngbe_validate_eeprom_checksum_em(struct ngbe_hw *hw,
+ 	u32 eeprom_cksum_devcap = 0;
+ 	int err = 0;
+ 
+-	DEBUGFUNC("ngbe_validate_eeprom_checksum_em");
+ 	UNREFERENCED_PARAMETER(checksum_val);
+ 
+ 	/* Check EEPROM only once */
+@@ -315,8 +305,6 @@ s32 ngbe_save_eeprom_version(struct ngbe_hw *hw)
+ 	u32 etrack_id = 0;
+ 	u32 offset = (hw->rom.sw_addr + NGBE_EEPROM_VERSION_L) << 1;
+ 
+-	DEBUGFUNC("ngbe_save_eeprom_version");
+-
+ 	if (hw->bus.lan_id == 0) {
+ 		hw->rom.read32(hw, offset, &eeprom_verl);
+ 		etrack_id = eeprom_verl;
+diff --git a/dpdk/drivers/net/ngbe/base/ngbe_hw.c b/dpdk/drivers/net/ngbe/base/ngbe_hw.c
+index 0716357725..08a7e02943 100644
+--- a/dpdk/drivers/net/ngbe/base/ngbe_hw.c
++++ b/dpdk/drivers/net/ngbe/base/ngbe_hw.c
+@@ -20,8 +20,6 @@ s32 ngbe_start_hw(struct ngbe_hw *hw)
+ {
+ 	s32 err;
+ 
+-	DEBUGFUNC("ngbe_start_hw");
+-
+ 	/* Clear the VLAN filter table */
+ 	hw->mac.clear_vfta(hw);
+ 
+@@ -31,7 +29,7 @@ s32 ngbe_start_hw(struct ngbe_hw *hw)
+ 	/* Setup flow control */
+ 	err = hw->mac.setup_fc(hw);
+ 	if (err != 0 && err != NGBE_NOT_IMPLEMENTED) {
+-		DEBUGOUT("Flow control setup failed, returning %d\n", err);
++		DEBUGOUT("Flow control setup failed, returning %d", err);
+ 		return err;
+ 	}
+ 
+@@ -55,8 +53,6 @@ s32 ngbe_init_hw(struct ngbe_hw *hw)
+ {
+ 	s32 status;
+ 
+-	DEBUGFUNC("ngbe_init_hw");
+-
+ 	ngbe_save_eeprom_version(hw);
+ 
+ 	/* Reset the hardware */
+@@ -67,7 +63,7 @@ s32 ngbe_init_hw(struct ngbe_hw *hw)
+ 	}
+ 
+ 	if (status != 0)
+-		DEBUGOUT("Failed to initialize HW, STATUS = %d\n", status);
++		DEBUGOUT("Failed to initialize HW, STATUS = %d", status);
+ 
+ 	return status;
+ }
+@@ -156,8 +152,6 @@ s32 ngbe_reset_hw_em(struct ngbe_hw *hw)
+ {
+ 	s32 status;
+ 
+-	DEBUGFUNC("ngbe_reset_hw_em");
+-
+ 	/* Call adapter stop to disable tx/rx and clear interrupts */
+ 	status = hw->mac.stop_hw(hw);
+ 	if (status != 0)
+@@ -205,8 +199,6 @@ s32 ngbe_clear_hw_cntrs(struct ngbe_hw *hw)
+ {
+ 	u16 i = 0;
+ 
+-	DEBUGFUNC("ngbe_clear_hw_cntrs");
+-
+ 	/* QP Stats */
+ 	/* don't write clear queue stats */
+ 	for (i = 0; i < NGBE_MAX_QP; i++) {
+@@ -305,8 +297,6 @@ s32 ngbe_get_mac_addr(struct ngbe_hw *hw, u8 *mac_addr)
+ 	u32 rar_low;
+ 	u16 i;
+ 
+-	DEBUGFUNC("ngbe_get_mac_addr");
+-
+ 	wr32(hw, NGBE_ETHADDRIDX, 0);
+ 	rar_high = rd32(hw, NGBE_ETHADDRH);
+ 	rar_low = rd32(hw, NGBE_ETHADDRL);
+@@ -332,8 +322,6 @@ void ngbe_set_lan_id_multi_port(struct ngbe_hw *hw)
+ 	struct ngbe_bus_info *bus = &hw->bus;
+ 	u32 reg = 0;
+ 
+-	DEBUGFUNC("ngbe_set_lan_id_multi_port");
+-
+ 	reg = rd32(hw, NGBE_PORTSTAT);
+ 	bus->lan_id = NGBE_PORTSTAT_ID(reg);
+ 	bus->func = bus->lan_id;
+@@ -350,10 +338,8 @@ void ngbe_set_lan_id_multi_port(struct ngbe_hw *hw)
+  **/
+ s32 ngbe_stop_hw(struct ngbe_hw *hw)
+ {
+-	u32 reg_val;
+ 	u16 i;
+-
+-	DEBUGFUNC("ngbe_stop_hw");
++	s32 status = 0;
+ 
+ 	/*
+ 	 * Set the adapter_stopped flag so other driver functions stop touching
+@@ -372,16 +358,27 @@ s32 ngbe_stop_hw(struct ngbe_hw *hw)
+ 	wr32(hw, NGBE_ICRMISC, NGBE_ICRMISC_MASK);
+ 	wr32(hw, NGBE_ICR(0), NGBE_ICR_MASK);
+ 
+-	/* Disable the transmit unit.  Each queue must be disabled. */
+-	for (i = 0; i < hw->mac.max_tx_queues; i++)
+-		wr32(hw, NGBE_TXCFG(i), NGBE_TXCFG_FLUSH);
++	wr32(hw, NGBE_BMECTL, 0x3);
+ 
+ 	/* Disable the receive unit by stopping each queue */
+-	for (i = 0; i < hw->mac.max_rx_queues; i++) {
+-		reg_val = rd32(hw, NGBE_RXCFG(i));
+-		reg_val &= ~NGBE_RXCFG_ENA;
+-		wr32(hw, NGBE_RXCFG(i), reg_val);
+-	}
++	for (i = 0; i < hw->mac.max_rx_queues; i++)
++		wr32(hw, NGBE_RXCFG(i), 0);
++
++	/* flush all queues disables */
++	ngbe_flush(hw);
++	msec_delay(2);
++
++	/*
++	 * Prevent the PCI-E bus from hanging by disabling PCI-E master
++	 * access and verify no pending requests
++	 */
++	status = ngbe_set_pcie_master(hw, false);
++	if (status)
++		return status;
++
++	/* Disable the transmit unit.  Each queue must be disabled. */
++	for (i = 0; i < hw->mac.max_tx_queues; i++)
++		wr32(hw, NGBE_TXCFG(i), 0);
+ 
+ 	/* flush all queues disables */
+ 	ngbe_flush(hw);
+@@ -399,8 +396,6 @@ s32 ngbe_led_on(struct ngbe_hw *hw, u32 index)
+ {
+ 	u32 led_reg = rd32(hw, NGBE_LEDCTL);
+ 
+-	DEBUGFUNC("ngbe_led_on");
+-
+ 	if (index > 3)
+ 		return NGBE_ERR_PARAM;
+ 
+@@ -421,8 +416,6 @@ s32 ngbe_led_off(struct ngbe_hw *hw, u32 index)
+ {
+ 	u32 led_reg = rd32(hw, NGBE_LEDCTL);
+ 
+-	DEBUGFUNC("ngbe_led_off");
+-
+ 	if (index > 3)
+ 		return NGBE_ERR_PARAM;
+ 
+@@ -444,8 +437,6 @@ s32 ngbe_validate_mac_addr(u8 *mac_addr)
+ {
+ 	s32 status = 0;
+ 
+-	DEBUGFUNC("ngbe_validate_mac_addr");
+-
+ 	/* Make sure it is not a multicast address */
+ 	if (NGBE_IS_MULTICAST((struct rte_ether_addr *)mac_addr)) {
+ 		status = NGBE_ERR_INVALID_MAC_ADDR;
+@@ -476,11 +467,9 @@ s32 ngbe_set_rar(struct ngbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
+ 	u32 rar_low, rar_high;
+ 	u32 rar_entries = hw->mac.num_rar_entries;
+ 
+-	DEBUGFUNC("ngbe_set_rar");
+-
+ 	/* Make sure we are using a valid rar index range */
+ 	if (index >= rar_entries) {
+-		DEBUGOUT("RAR index %d is out of range.\n", index);
++		DEBUGOUT("RAR index %d is out of range.", index);
+ 		return NGBE_ERR_INVALID_ARGUMENT;
+ 	}
+ 
+@@ -528,11 +517,9 @@ s32 ngbe_clear_rar(struct ngbe_hw *hw, u32 index)
+ 	u32 rar_high;
+ 	u32 rar_entries = hw->mac.num_rar_entries;
+ 
+-	DEBUGFUNC("ngbe_clear_rar");
+-
+ 	/* Make sure we are using a valid rar index range */
+ 	if (index >= rar_entries) {
+-		DEBUGOUT("RAR index %d is out of range.\n", index);
++		DEBUGOUT("RAR index %d is out of range.", index);
+ 		return NGBE_ERR_INVALID_ARGUMENT;
+ 	}
+ 
+@@ -568,8 +555,6 @@ s32 ngbe_init_rx_addrs(struct ngbe_hw *hw)
+ 	u32 psrctl;
+ 	u32 rar_entries = hw->mac.num_rar_entries;
+ 
+-	DEBUGFUNC("ngbe_init_rx_addrs");
+-
+ 	/*
+ 	 * If the current mac address is valid, assume it is a software override
+ 	 * to the permanent address.
+@@ -580,18 +565,18 @@ s32 ngbe_init_rx_addrs(struct ngbe_hw *hw)
+ 		/* Get the MAC address from the RAR0 for later reference */
+ 		hw->mac.get_mac_addr(hw, hw->mac.addr);
+ 
+-		DEBUGOUT(" Keeping Current RAR0 Addr =%.2X %.2X %.2X ",
++		DEBUGOUT(" Keeping Current RAR0 Addr = "
++			  RTE_ETHER_ADDR_PRT_FMT,
+ 			  hw->mac.addr[0], hw->mac.addr[1],
+-			  hw->mac.addr[2]);
+-		DEBUGOUT("%.2X %.2X %.2X\n", hw->mac.addr[3],
++			  hw->mac.addr[2], hw->mac.addr[3],
+ 			  hw->mac.addr[4], hw->mac.addr[5]);
+ 	} else {
+ 		/* Setup the receive address. */
+-		DEBUGOUT("Overriding MAC Address in RAR[0]\n");
+-		DEBUGOUT(" New MAC Addr =%.2X %.2X %.2X ",
++		DEBUGOUT("Overriding MAC Address in RAR[0]");
++		DEBUGOUT(" New MAC Addr = "
++			  RTE_ETHER_ADDR_PRT_FMT,
+ 			  hw->mac.addr[0], hw->mac.addr[1],
+-			  hw->mac.addr[2]);
+-		DEBUGOUT("%.2X %.2X %.2X\n", hw->mac.addr[3],
++			  hw->mac.addr[2], hw->mac.addr[3],
+ 			  hw->mac.addr[4], hw->mac.addr[5]);
+ 
+ 		hw->mac.set_rar(hw, 0, hw->mac.addr, 0, true);
+@@ -601,7 +586,7 @@ s32 ngbe_init_rx_addrs(struct ngbe_hw *hw)
+ 	hw->mac.clear_vmdq(hw, 0, BIT_MASK32);
+ 
+ 	/* Zero out the other receive addresses. */
+-	DEBUGOUT("Clearing RAR[1-%d]\n", rar_entries - 1);
++	DEBUGOUT("Clearing RAR[1-%d]", rar_entries - 1);
+ 	for (i = 1; i < rar_entries; i++) {
+ 		wr32(hw, NGBE_ETHADDRIDX, i);
+ 		wr32(hw, NGBE_ETHADDRL, 0);
+@@ -615,7 +600,7 @@ s32 ngbe_init_rx_addrs(struct ngbe_hw *hw)
+ 	psrctl |= NGBE_PSRCTL_ADHF12(hw->mac.mc_filter_type);
+ 	wr32(hw, NGBE_PSRCTL, psrctl);
+ 
+-	DEBUGOUT(" Clearing MTA\n");
++	DEBUGOUT(" Clearing MTA");
+ 	for (i = 0; i < hw->mac.mcft_size; i++)
+ 		wr32(hw, NGBE_MCADDRTBL(i), 0);
+ 
+@@ -640,8 +625,6 @@ static s32 ngbe_mta_vector(struct ngbe_hw *hw, u8 *mc_addr)
+ {
+ 	u32 vector = 0;
+ 
+-	DEBUGFUNC("ngbe_mta_vector");
+-
+ 	switch (hw->mac.mc_filter_type) {
+ 	case 0:   /* use bits [47:36] of the address */
+ 		vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4));
+@@ -656,7 +639,7 @@ static s32 ngbe_mta_vector(struct ngbe_hw *hw, u8 *mc_addr)
+ 		vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8));
+ 		break;
+ 	default:  /* Invalid mc_filter_type */
+-		DEBUGOUT("MC filter type param set incorrectly\n");
++		DEBUGOUT("MC filter type param set incorrectly");
+ 		ASSERT(0);
+ 		break;
+ 	}
+@@ -679,12 +662,10 @@ void ngbe_set_mta(struct ngbe_hw *hw, u8 *mc_addr)
+ 	u32 vector_bit;
+ 	u32 vector_reg;
+ 
+-	DEBUGFUNC("ngbe_set_mta");
+-
+ 	hw->addr_ctrl.mta_in_use++;
+ 
+ 	vector = ngbe_mta_vector(hw, mc_addr);
+-	DEBUGOUT(" bit-vector = 0x%03X\n", vector);
++	DEBUGOUT(" bit-vector = 0x%03X", vector);
+ 
+ 	/*
+ 	 * The MTA is a register array of 128 32-bit registers. It is treated
+@@ -718,8 +699,6 @@ s32 ngbe_update_mc_addr_list(struct ngbe_hw *hw, u8 *mc_addr_list,
+ 	u32 i;
+ 	u32 vmdq;
+ 
+-	DEBUGFUNC("ngbe_update_mc_addr_list");
+-
+ 	/*
+ 	 * Set the new number of MC addresses that we are being requested to
+ 	 * use.
+@@ -729,13 +708,13 @@ s32 ngbe_update_mc_addr_list(struct ngbe_hw *hw, u8 *mc_addr_list,
+ 
+ 	/* Clear mta_shadow */
+ 	if (clear) {
+-		DEBUGOUT(" Clearing MTA\n");
++		DEBUGOUT(" Clearing MTA");
+ 		memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow));
+ 	}
+ 
+ 	/* Update mta_shadow */
+ 	for (i = 0; i < mc_addr_count; i++) {
+-		DEBUGOUT(" Adding the multicast addresses:\n");
++		DEBUGOUT(" Adding the multicast addresses:");
+ 		ngbe_set_mta(hw, next(hw, &mc_addr_list, &vmdq));
+ 	}
+ 
+@@ -752,7 +731,7 @@ s32 ngbe_update_mc_addr_list(struct ngbe_hw *hw, u8 *mc_addr_list,
+ 		wr32(hw, NGBE_PSRCTL, psrctl);
+ 	}
+ 
+-	DEBUGOUT("ngbe update mc addr list complete\n");
++	DEBUGOUT("ngbe update mc addr list complete");
+ 	return 0;
+ }
+ 
+@@ -767,11 +746,9 @@ s32 ngbe_setup_fc_em(struct ngbe_hw *hw)
+ 	s32 err = 0;
+ 	u16 reg_cu = 0;
+ 
+-	DEBUGFUNC("ngbe_setup_fc");
+-
+ 	/* Validate the requested mode */
+ 	if (hw->fc.strict_ieee && hw->fc.requested_mode == ngbe_fc_rx_pause) {
+-		DEBUGOUT("ngbe_fc_rx_pause not valid in strict IEEE mode\n");
++		DEBUGOUT("ngbe_fc_rx_pause not valid in strict IEEE mode");
+ 		err = NGBE_ERR_INVALID_LINK_SETTINGS;
+ 		goto out;
+ 	}
+@@ -827,7 +804,7 @@ s32 ngbe_setup_fc_em(struct ngbe_hw *hw)
+ 			reg_cu |= 0xC00; /*need to merge rtl and mvl on page 0*/
+ 		break;
+ 	default:
+-		DEBUGOUT("Flow control param set incorrectly\n");
++		DEBUGOUT("Flow control param set incorrectly");
+ 		err = NGBE_ERR_CONFIG;
+ 		goto out;
+ 	}
+@@ -851,8 +828,6 @@ s32 ngbe_fc_enable(struct ngbe_hw *hw)
+ 	u32 pause_time;
+ 	u32 fcrtl, fcrth;
+ 
+-	DEBUGFUNC("ngbe_fc_enable");
+-
+ 	/* Validate the water mark configuration */
+ 	if (!hw->fc.pause_time) {
+ 		err = NGBE_ERR_INVALID_LINK_SETTINGS;
+@@ -863,7 +838,7 @@ s32 ngbe_fc_enable(struct ngbe_hw *hw)
+ 	if ((hw->fc.current_mode & ngbe_fc_tx_pause) && hw->fc.high_water) {
+ 		if (!hw->fc.low_water ||
+ 			hw->fc.low_water >= hw->fc.high_water) {
+-			DEBUGOUT("Invalid water mark configuration\n");
++			DEBUGOUT("Invalid water mark configuration");
+ 			err = NGBE_ERR_INVALID_LINK_SETTINGS;
+ 			goto out;
+ 		}
+@@ -919,7 +894,7 @@ s32 ngbe_fc_enable(struct ngbe_hw *hw)
+ 		fccfg_reg |= NGBE_TXFCCFG_FC;
+ 		break;
+ 	default:
+-		DEBUGOUT("Flow control param set incorrectly\n");
++		DEBUGOUT("Flow control param set incorrectly");
+ 		err = NGBE_ERR_CONFIG;
+ 		goto out;
+ 	}
+@@ -977,8 +952,7 @@ s32 ngbe_negotiate_fc(struct ngbe_hw *hw, u32 adv_reg, u32 lp_reg,
+ 		       u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm)
+ {
+ 	if ((!(adv_reg)) ||  (!(lp_reg))) {
+-		DEBUGOUT("Local or link partner's advertised flow control "
+-			 "settings are NULL. Local: %x, link partner: %x\n",
++		DEBUGOUT("Local or link partner's advertised flow control settings are NULL. Local: %x, link partner: %x",
+ 			      adv_reg, lp_reg);
+ 		return NGBE_ERR_FC_NOT_NEGOTIATED;
+ 	}
+@@ -993,22 +967,22 @@ s32 ngbe_negotiate_fc(struct ngbe_hw *hw, u32 adv_reg, u32 lp_reg,
+ 		 */
+ 		if (hw->fc.requested_mode == ngbe_fc_full) {
+ 			hw->fc.current_mode = ngbe_fc_full;
+-			DEBUGOUT("Flow Control = FULL.\n");
++			DEBUGOUT("Flow Control = FULL.");
+ 		} else {
+ 			hw->fc.current_mode = ngbe_fc_rx_pause;
+-			DEBUGOUT("Flow Control=RX PAUSE frames only\n");
++			DEBUGOUT("Flow Control=RX PAUSE frames only");
+ 		}
+ 	} else if (!(adv_reg & adv_sym) && (adv_reg & adv_asm) &&
+ 		   (lp_reg & lp_sym) && (lp_reg & lp_asm)) {
+ 		hw->fc.current_mode = ngbe_fc_tx_pause;
+-		DEBUGOUT("Flow Control = TX PAUSE frames only.\n");
++		DEBUGOUT("Flow Control = TX PAUSE frames only.");
+ 	} else if ((adv_reg & adv_sym) && (adv_reg & adv_asm) &&
+ 		   !(lp_reg & lp_sym) && (lp_reg & lp_asm)) {
+ 		hw->fc.current_mode = ngbe_fc_rx_pause;
+-		DEBUGOUT("Flow Control = RX PAUSE frames only.\n");
++		DEBUGOUT("Flow Control = RX PAUSE frames only.");
+ 	} else {
+ 		hw->fc.current_mode = ngbe_fc_none;
+-		DEBUGOUT("Flow Control = NONE.\n");
++		DEBUGOUT("Flow Control = NONE.");
+ 	}
+ 	return 0;
+ }
+@@ -1046,8 +1020,6 @@ void ngbe_fc_autoneg(struct ngbe_hw *hw)
+ 	u32 speed;
+ 	bool link_up;
+ 
+-	DEBUGFUNC("ngbe_fc_autoneg");
+-
+ 	/*
+ 	 * AN should have completed when the cable was plugged in.
+ 	 * Look for reasons to bail out.  Bail out if:
+@@ -1076,6 +1048,64 @@ void ngbe_fc_autoneg(struct ngbe_hw *hw)
+ 	}
+ }
+ 
++/**
++ *  ngbe_set_pcie_master - Disable or Enable PCI-express master access
++ *  @hw: pointer to hardware structure
++ *
++ *  Disables PCI-Express master access and verifies there are no pending
++ *  requests. NGBE_ERR_MASTER_REQUESTS_PENDING is returned if master disable
++ *  bit hasn't caused the master requests to be disabled, else 0
++ *  is returned signifying master requests disabled.
++ **/
++s32 ngbe_set_pcie_master(struct ngbe_hw *hw, bool enable)
++{
++	struct rte_pci_device *pci_dev = (struct rte_pci_device *)hw->back;
++	s32 status = 0;
++	s32 ret = 0;
++	u32 i;
++	u16 reg;
++
++	ret = rte_pci_read_config(pci_dev, &reg,
++			sizeof(reg), PCI_COMMAND);
++	if (ret != sizeof(reg)) {
++		DEBUGOUT("Cannot read command from PCI config space!\n");
++		return -1;
++	}
++
++	if (enable)
++		reg |= PCI_COMMAND_MASTER;
++	else
++		reg &= ~PCI_COMMAND_MASTER;
++
++	ret = rte_pci_write_config(pci_dev, &reg,
++			sizeof(reg), PCI_COMMAND);
++	if (ret != sizeof(reg)) {
++		DEBUGOUT("Cannot write command to PCI config space!\n");
++		return -1;
++	}
++
++	if (enable)
++		goto out;
++
++	/* Exit if master requests are blocked */
++	if (!(rd32(hw, NGBE_BMEPEND)) ||
++	    NGBE_REMOVED(hw->hw_addr))
++		goto out;
++
++	/* Poll for master request bit to clear */
++	for (i = 0; i < NGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
++		usec_delay(100);
++		if (!(rd32(hw, NGBE_BMEPEND)))
++			goto out;
++	}
++
++	DEBUGOUT("PCIe transaction pending bit also did not clear.");
++	status = NGBE_ERR_MASTER_REQUESTS_PENDING;
++
++out:
++	return status;
++}
++
+ /**
+  *  ngbe_acquire_swfw_sync - Acquire SWFW semaphore
+  *  @hw: pointer to hardware structure
+@@ -1092,8 +1122,6 @@ s32 ngbe_acquire_swfw_sync(struct ngbe_hw *hw, u32 mask)
+ 	u32 timeout = 200;
+ 	u32 i;
+ 
+-	DEBUGFUNC("ngbe_acquire_swfw_sync");
+-
+ 	for (i = 0; i < timeout; i++) {
+ 		/*
+ 		 * SW NVM semaphore bit is used for access to all
+@@ -1136,8 +1164,6 @@ void ngbe_release_swfw_sync(struct ngbe_hw *hw, u32 mask)
+ 	u32 mngsem;
+ 	u32 swmask = mask;
+ 
+-	DEBUGFUNC("ngbe_release_swfw_sync");
+-
+ 	ngbe_get_eeprom_semaphore(hw);
+ 
+ 	mngsem = rd32(hw, NGBE_MNGSEM);
+@@ -1161,9 +1187,6 @@ s32 ngbe_disable_sec_rx_path(struct ngbe_hw *hw)
+ 	int i;
+ 	u32 secrxreg;
+ 
+-	DEBUGFUNC("ngbe_disable_sec_rx_path");
+-
+-
+ 	secrxreg = rd32(hw, NGBE_SECRXCTL);
+ 	secrxreg |= NGBE_SECRXCTL_XDSA;
+ 	wr32(hw, NGBE_SECRXCTL, secrxreg);
+@@ -1178,8 +1201,7 @@ s32 ngbe_disable_sec_rx_path(struct ngbe_hw *hw)
+ 
+ 	/* For informational purposes only */
+ 	if (i >= NGBE_MAX_SECRX_POLL)
+-		DEBUGOUT("Rx unit being enabled before security "
+-			 "path fully disabled.  Continuing with init.\n");
++		DEBUGOUT("Rx unit being enabled before security path fully disabled.  Continuing with init.");
+ 
+ 	return 0;
+ }
+@@ -1194,8 +1216,6 @@ s32 ngbe_enable_sec_rx_path(struct ngbe_hw *hw)
+ {
+ 	u32 secrxreg;
+ 
+-	DEBUGFUNC("ngbe_enable_sec_rx_path");
+-
+ 	secrxreg = rd32(hw, NGBE_SECRXCTL);
+ 	secrxreg &= ~NGBE_SECRXCTL_XDSA;
+ 	wr32(hw, NGBE_SECRXCTL, secrxreg);
+@@ -1215,11 +1235,9 @@ s32 ngbe_clear_vmdq(struct ngbe_hw *hw, u32 rar, u32 vmdq)
+ 	u32 mpsar;
+ 	u32 rar_entries = hw->mac.num_rar_entries;
+ 
+-	DEBUGFUNC("ngbe_clear_vmdq");
+-
+ 	/* Make sure we are using a valid rar index range */
+ 	if (rar >= rar_entries) {
+-		DEBUGOUT("RAR index %d is out of range.\n", rar);
++		DEBUGOUT("RAR index %d is out of range.", rar);
+ 		return NGBE_ERR_INVALID_ARGUMENT;
+ 	}
+ 
+@@ -1253,11 +1271,9 @@ s32 ngbe_set_vmdq(struct ngbe_hw *hw, u32 rar, u32 vmdq)
+ 	u32 mpsar;
+ 	u32 rar_entries = hw->mac.num_rar_entries;
+ 
+-	DEBUGFUNC("ngbe_set_vmdq");
+-
+ 	/* Make sure we are using a valid rar index range */
+ 	if (rar >= rar_entries) {
+-		DEBUGOUT("RAR index %d is out of range.\n", rar);
++		DEBUGOUT("RAR index %d is out of range.", rar);
+ 		return NGBE_ERR_INVALID_ARGUMENT;
+ 	}
+ 
+@@ -1278,8 +1294,7 @@ s32 ngbe_init_uta_tables(struct ngbe_hw *hw)
+ {
+ 	int i;
+ 
+-	DEBUGFUNC("ngbe_init_uta_tables");
+-	DEBUGOUT(" Clearing UTA\n");
++	DEBUGOUT(" Clearing UTA");
+ 
+ 	for (i = 0; i < 128; i++)
+ 		wr32(hw, NGBE_UCADDRTBL(i), 0);
+@@ -1334,7 +1349,7 @@ s32 ngbe_find_vlvf_slot(struct ngbe_hw *hw, u32 vlan, bool vlvf_bypass)
+ 	 * slot we found during our search, else error.
+ 	 */
+ 	if (!first_empty_slot)
+-		DEBUGOUT("No space in VLVF.\n");
++		DEBUGOUT("No space in VLVF.");
+ 
+ 	return first_empty_slot ? first_empty_slot : NGBE_ERR_NO_SPACE;
+ }
+@@ -1355,8 +1370,6 @@ s32 ngbe_set_vfta(struct ngbe_hw *hw, u32 vlan, u32 vind,
+ 	u32 regidx, vfta_delta, vfta;
+ 	s32 err;
+ 
+-	DEBUGFUNC("ngbe_set_vfta");
+-
+ 	if (vlan > 4095 || vind > 63)
+ 		return NGBE_ERR_PARAM;
+ 
+@@ -1424,8 +1437,6 @@ s32 ngbe_set_vlvf(struct ngbe_hw *hw, u32 vlan, u32 vind,
+ 	u32 portctl;
+ 	s32 vlvf_index;
+ 
+-	DEBUGFUNC("ngbe_set_vlvf");
+-
+ 	if (vlan > 4095 || vind > 63)
+ 		return NGBE_ERR_PARAM;
+ 
+@@ -1505,8 +1516,6 @@ s32 ngbe_clear_vfta(struct ngbe_hw *hw)
+ {
+ 	u32 offset;
+ 
+-	DEBUGFUNC("ngbe_clear_vfta");
+-
+ 	for (offset = 0; offset < hw->mac.vft_size; offset++)
+ 		wr32(hw, NGBE_VLANTBL(offset), 0);
+ 
+@@ -1534,8 +1543,6 @@ s32 ngbe_check_mac_link_em(struct ngbe_hw *hw, u32 *speed,
+ 	u32 i, reg;
+ 	s32 status = 0;
+ 
+-	DEBUGFUNC("ngbe_check_mac_link_em");
+-
+ 	reg = rd32(hw, NGBE_GPIOINTSTAT);
+ 	wr32(hw, NGBE_GPIOEOI, reg);
+ 
+@@ -1559,7 +1566,6 @@ s32 ngbe_get_link_capabilities_em(struct ngbe_hw *hw,
+ {
+ 	s32 status = 0;
+ 
+-	DEBUGFUNC("\n");
+ 
+ 	hw->mac.autoneg = *autoneg;
+ 
+@@ -1582,8 +1588,6 @@ s32 ngbe_setup_mac_link_em(struct ngbe_hw *hw,
+ {
+ 	s32 status;
+ 
+-	DEBUGFUNC("\n");
+-
+ 	/* Setup the PHY according to input speed */
+ 	status = hw->phy.setup_link(hw, speed, autoneg_wait_to_complete);
+ 
+@@ -1609,6 +1613,30 @@ void ngbe_set_mac_anti_spoofing(struct ngbe_hw *hw, bool enable, int vf)
+ 	wr32(hw, NGBE_POOLTXASMAC, pfvfspoof);
+ }
+ 
++/**
++ * ngbe_set_pba - Initialize Rx packet buffer
++ * @hw: pointer to hardware structure
++ * @headroom: reserve n KB of headroom
++ **/
++void ngbe_set_pba(struct ngbe_hw *hw)
++{
++	u32 rxpktsize = hw->mac.rx_pb_size;
++	u32 txpktsize, txpbthresh;
++
++	/* Reserve 256 KB of headroom */
++	rxpktsize -= 256;
++
++	rxpktsize <<= 10;
++	wr32(hw, NGBE_PBRXSIZE, rxpktsize);
++
++	/* Only support an equally distributed Tx packet buffer strategy. */
++	txpktsize = NGBE_PBTXSIZE_MAX;
++	txpbthresh = (txpktsize / 1024) - NGBE_TXPKT_SIZE_MAX;
++
++	wr32(hw, NGBE_PBTXSIZE, txpktsize);
++	wr32(hw, NGBE_PBTXDMATH, txpbthresh);
++}
++
+ /**
+  *  ngbe_set_vlan_anti_spoofing - Enable/Disable VLAN anti-spoofing
+  *  @hw: pointer to hardware structure
+@@ -1639,8 +1667,6 @@ s32 ngbe_init_thermal_sensor_thresh(struct ngbe_hw *hw)
+ {
+ 	struct ngbe_thermal_sensor_data *data = &hw->mac.thermal_sensor_data;
+ 
+-	DEBUGFUNC("ngbe_init_thermal_sensor_thresh");
+-
+ 	memset(data, 0, sizeof(struct ngbe_thermal_sensor_data));
+ 
+ 	if (hw->bus.lan_id != 0)
+@@ -1664,8 +1690,6 @@ s32 ngbe_mac_check_overtemp(struct ngbe_hw *hw)
+ 	s32 status = 0;
+ 	u32 ts_state;
+ 
+-	DEBUGFUNC("ngbe_mac_check_overtemp");
+-
+ 	/* Check that the LASI temp alarm status was triggered */
+ 	ts_state = rd32(hw, NGBE_TSALM);
+ 
+@@ -1720,8 +1744,6 @@ s32 ngbe_set_mac_type(struct ngbe_hw *hw)
+ {
+ 	s32 err = 0;
+ 
+-	DEBUGFUNC("ngbe_set_mac_type");
+-
+ 	if (hw->vendor_id != PCI_VENDOR_ID_WANGXUN) {
+ 		DEBUGOUT("Unsupported vendor id: %x", hw->vendor_id);
+ 		return NGBE_ERR_DEVICE_NOT_SUPPORTED;
+@@ -1750,7 +1772,7 @@ s32 ngbe_set_mac_type(struct ngbe_hw *hw)
+ 		break;
+ 	}
+ 
+-	DEBUGOUT("found mac: %d media: %d, returns: %d\n",
++	DEBUGOUT("found mac: %d media: %d, returns: %d",
+ 		  hw->mac.type, hw->phy.media_type, err);
+ 	return err;
+ }
+@@ -1764,15 +1786,12 @@ s32 ngbe_set_mac_type(struct ngbe_hw *hw)
+  **/
+ s32 ngbe_enable_rx_dma(struct ngbe_hw *hw, u32 regval)
+ {
+-	DEBUGFUNC("ngbe_enable_rx_dma");
+-
+ 	/*
+ 	 * Workaround silicon errata when enabling the Rx datapath.
+ 	 * If traffic is incoming before we enable the Rx unit, it could hang
+ 	 * the Rx DMA unit.  Therefore, make sure the security engine is
+ 	 * completely disabled prior to enabling the Rx unit.
+ 	 */
+-
+ 	hw->mac.disable_sec_rx_path(hw);
+ 
+ 	if (regval & NGBE_PBRXCTL_ENA)
+@@ -1853,8 +1872,6 @@ s32 ngbe_init_ops_pf(struct ngbe_hw *hw)
+ 	struct ngbe_rom_info *rom = &hw->rom;
+ 	struct ngbe_mbx_info *mbx = &hw->mbx;
+ 
+-	DEBUGFUNC("ngbe_init_ops_pf");
+-
+ 	/* BUS */
+ 	bus->set_lan_id = ngbe_set_lan_id_multi_port;
+ 
+@@ -1907,6 +1924,8 @@ s32 ngbe_init_ops_pf(struct ngbe_hw *hw)
+ 	mac->check_link = ngbe_check_mac_link_em;
+ 	mac->setup_link = ngbe_setup_mac_link_em;
+ 
++	mac->setup_pba = ngbe_set_pba;
++
+ 	/* Manageability interface */
+ 	mac->init_thermal_sensor_thresh = ngbe_init_thermal_sensor_thresh;
+ 	mac->check_overtemp = ngbe_mac_check_overtemp;
+@@ -1928,6 +1947,7 @@ s32 ngbe_init_ops_pf(struct ngbe_hw *hw)
+ 	mac->mcft_size		= NGBE_EM_MC_TBL_SIZE;
+ 	mac->vft_size		= NGBE_EM_VFT_TBL_SIZE;
+ 	mac->num_rar_entries	= NGBE_EM_RAR_ENTRIES;
++	mac->rx_pb_size		= NGBE_EM_RX_PB_SIZE;
+ 	mac->max_rx_queues	= NGBE_EM_MAX_RX_QUEUES;
+ 	mac->max_tx_queues	= NGBE_EM_MAX_TX_QUEUES;
+ 
+@@ -1953,8 +1973,6 @@ s32 ngbe_init_shared_code(struct ngbe_hw *hw)
+ {
+ 	s32 status = 0;
+ 
+-	DEBUGFUNC("ngbe_init_shared_code");
+-
+ 	/*
+ 	 * Set the mac type
+ 	 */
+diff --git a/dpdk/drivers/net/ngbe/base/ngbe_hw.h b/dpdk/drivers/net/ngbe/base/ngbe_hw.h
+index ad7e8fc2d9..7e0e23b195 100644
+--- a/dpdk/drivers/net/ngbe/base/ngbe_hw.h
++++ b/dpdk/drivers/net/ngbe/base/ngbe_hw.h
+@@ -13,6 +13,7 @@
+ #define NGBE_EM_RAR_ENTRIES   32
+ #define NGBE_EM_MC_TBL_SIZE   32
+ #define NGBE_EM_VFT_TBL_SIZE  128
++#define NGBE_EM_RX_PB_SIZE    42 /*KB*/
+ 
+ s32 ngbe_init_hw(struct ngbe_hw *hw);
+ s32 ngbe_start_hw(struct ngbe_hw *hw);
+@@ -44,6 +45,7 @@ s32 ngbe_update_mc_addr_list(struct ngbe_hw *hw, u8 *mc_addr_list,
+ 				      ngbe_mc_addr_itr func, bool clear);
+ s32 ngbe_disable_sec_rx_path(struct ngbe_hw *hw);
+ s32 ngbe_enable_sec_rx_path(struct ngbe_hw *hw);
++void ngbe_set_pba(struct ngbe_hw *hw);
+ 
+ s32 ngbe_setup_fc_em(struct ngbe_hw *hw);
+ s32 ngbe_fc_enable(struct ngbe_hw *hw);
+@@ -52,6 +54,7 @@ void ngbe_fc_autoneg(struct ngbe_hw *hw);
+ s32 ngbe_validate_mac_addr(u8 *mac_addr);
+ s32 ngbe_acquire_swfw_sync(struct ngbe_hw *hw, u32 mask);
+ void ngbe_release_swfw_sync(struct ngbe_hw *hw, u32 mask);
++s32 ngbe_set_pcie_master(struct ngbe_hw *hw, bool enable);
+ 
+ s32 ngbe_set_vmdq(struct ngbe_hw *hw, u32 rar, u32 vmdq);
+ s32 ngbe_clear_vmdq(struct ngbe_hw *hw, u32 rar, u32 vmdq);
+diff --git a/dpdk/drivers/net/ngbe/base/ngbe_mbx.c b/dpdk/drivers/net/ngbe/base/ngbe_mbx.c
+index 764ae81319..bc0adbb3ec 100644
+--- a/dpdk/drivers/net/ngbe/base/ngbe_mbx.c
++++ b/dpdk/drivers/net/ngbe/base/ngbe_mbx.c
+@@ -21,8 +21,6 @@ s32 ngbe_read_mbx(struct ngbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
+ 	struct ngbe_mbx_info *mbx = &hw->mbx;
+ 	s32 ret_val = NGBE_ERR_MBX;
+ 
+-	DEBUGFUNC("ngbe_read_mbx");
+-
+ 	/* limit read to size of mailbox */
+ 	if (size > mbx->size)
+ 		size = mbx->size;
+@@ -47,8 +45,6 @@ s32 ngbe_write_mbx(struct ngbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
+ 	struct ngbe_mbx_info *mbx = &hw->mbx;
+ 	s32 ret_val = 0;
+ 
+-	DEBUGFUNC("ngbe_write_mbx");
+-
+ 	if (size > mbx->size) {
+ 		ret_val = NGBE_ERR_MBX;
+ 		DEBUGOUT("Invalid mailbox message size %d", size);
+@@ -71,8 +67,6 @@ s32 ngbe_check_for_msg(struct ngbe_hw *hw, u16 mbx_id)
+ 	struct ngbe_mbx_info *mbx = &hw->mbx;
+ 	s32 ret_val = NGBE_ERR_MBX;
+ 
+-	DEBUGFUNC("ngbe_check_for_msg");
+-
+ 	if (mbx->check_for_msg)
+ 		ret_val = mbx->check_for_msg(hw, mbx_id);
+ 
+@@ -91,8 +85,6 @@ s32 ngbe_check_for_ack(struct ngbe_hw *hw, u16 mbx_id)
+ 	struct ngbe_mbx_info *mbx = &hw->mbx;
+ 	s32 ret_val = NGBE_ERR_MBX;
+ 
+-	DEBUGFUNC("ngbe_check_for_ack");
+-
+ 	if (mbx->check_for_ack)
+ 		ret_val = mbx->check_for_ack(hw, mbx_id);
+ 
+@@ -111,8 +103,6 @@ s32 ngbe_check_for_rst(struct ngbe_hw *hw, u16 mbx_id)
+ 	struct ngbe_mbx_info *mbx = &hw->mbx;
+ 	s32 ret_val = NGBE_ERR_MBX;
+ 
+-	DEBUGFUNC("ngbe_check_for_rst");
+-
+ 	if (mbx->check_for_rst)
+ 		ret_val = mbx->check_for_rst(hw, mbx_id);
+ 
+@@ -144,8 +134,6 @@ s32 ngbe_check_for_msg_pf(struct ngbe_hw *hw, u16 vf_number)
+ 	s32 ret_val = NGBE_ERR_MBX;
+ 	u32 vf_bit = vf_number;
+ 
+-	DEBUGFUNC("ngbe_check_for_msg_pf");
+-
+ 	if (!ngbe_check_for_bit_pf(hw, NGBE_MBVFICR_VFREQ_VF1 << vf_bit)) {
+ 		ret_val = 0;
+ 		hw->mbx.stats.reqs++;
+@@ -166,8 +154,6 @@ s32 ngbe_check_for_ack_pf(struct ngbe_hw *hw, u16 vf_number)
+ 	s32 ret_val = NGBE_ERR_MBX;
+ 	u32 vf_bit = vf_number;
+ 
+-	DEBUGFUNC("ngbe_check_for_ack_pf");
+-
+ 	if (!ngbe_check_for_bit_pf(hw, NGBE_MBVFICR_VFACK_VF1 << vf_bit)) {
+ 		ret_val = 0;
+ 		hw->mbx.stats.acks++;
+@@ -188,8 +174,6 @@ s32 ngbe_check_for_rst_pf(struct ngbe_hw *hw, u16 vf_number)
+ 	u32 vflre = 0;
+ 	s32 ret_val = NGBE_ERR_MBX;
+ 
+-	DEBUGFUNC("ngbe_check_for_rst_pf");
+-
+ 	vflre = rd32(hw, NGBE_FLRVFE);
+ 	if (vflre & (1 << vf_number)) {
+ 		ret_val = 0;
+@@ -212,8 +196,6 @@ STATIC s32 ngbe_obtain_mbx_lock_pf(struct ngbe_hw *hw, u16 vf_number)
+ 	s32 ret_val = NGBE_ERR_MBX;
+ 	u32 p2v_mailbox;
+ 
+-	DEBUGFUNC("ngbe_obtain_mbx_lock_pf");
+-
+ 	/* Take ownership of the buffer */
+ 	wr32(hw, NGBE_MBCTL(vf_number), NGBE_MBCTL_PFU);
+ 
+@@ -242,8 +224,6 @@ s32 ngbe_write_mbx_pf(struct ngbe_hw *hw, u32 *msg, u16 size, u16 vf_number)
+ 	s32 ret_val;
+ 	u16 i;
+ 
+-	DEBUGFUNC("ngbe_write_mbx_pf");
+-
+ 	/* lock the mailbox to prevent pf/vf race condition */
+ 	ret_val = ngbe_obtain_mbx_lock_pf(hw, vf_number);
+ 	if (ret_val)
+@@ -283,8 +263,6 @@ s32 ngbe_read_mbx_pf(struct ngbe_hw *hw, u32 *msg, u16 size, u16 vf_number)
+ 	s32 ret_val;
+ 	u16 i;
+ 
+-	DEBUGFUNC("ngbe_read_mbx_pf");
+-
+ 	/* lock the mailbox to prevent pf/vf race condition */
+ 	ret_val = ngbe_obtain_mbx_lock_pf(hw, vf_number);
+ 	if (ret_val)
+diff --git a/dpdk/drivers/net/ngbe/base/ngbe_mng.c b/dpdk/drivers/net/ngbe/base/ngbe_mng.c
+index a3dd8093ce..dd32644bfe 100644
+--- a/dpdk/drivers/net/ngbe/base/ngbe_mng.c
++++ b/dpdk/drivers/net/ngbe/base/ngbe_mng.c
+@@ -26,10 +26,8 @@ ngbe_hic_unlocked(struct ngbe_hw *hw, u32 *buffer, u32 length, u32 timeout)
+ 	u32 value, loop;
+ 	u16 i, dword_len;
+ 
+-	DEBUGFUNC("ngbe_hic_unlocked");
+-
+ 	if (!length || length > NGBE_PMMBX_BSIZE) {
+-		DEBUGOUT("Buffer length failure buffersize=%d.\n", length);
++		DEBUGOUT("Buffer length failure buffersize=%d.", length);
+ 		return NGBE_ERR_HOST_INTERFACE_COMMAND;
+ 	}
+ 
+@@ -59,7 +57,7 @@ ngbe_hic_unlocked(struct ngbe_hw *hw, u32 *buffer, u32 length, u32 timeout)
+ 		NGBE_MNGMBXCTL_FWRDY, NGBE_MNGMBXCTL_FWRDY,
+ 		&value, timeout, 1000);
+ 	if (!loop || !(value & NGBE_MNGMBXCTL_FWACK)) {
+-		DEBUGOUT("Command has failed with no status valid.\n");
++		DEBUGOUT("Command has failed with no status valid.");
+ 		return NGBE_ERR_HOST_INTERFACE_COMMAND;
+ 	}
+ 
+@@ -95,10 +93,8 @@ ngbe_host_interface_command(struct ngbe_hw *hw, u32 *buffer,
+ 	u32 bi;
+ 	u32 dword_len;
+ 
+-	DEBUGFUNC("ngbe_host_interface_command");
+-
+ 	if (length == 0 || length > NGBE_PMMBX_BSIZE) {
+-		DEBUGOUT("Buffer length failure buffersize=%d.\n", length);
++		DEBUGOUT("Buffer length failure buffersize=%d.", length);
+ 		return NGBE_ERR_HOST_INTERFACE_COMMAND;
+ 	}
+ 
+@@ -140,7 +136,7 @@ ngbe_host_interface_command(struct ngbe_hw *hw, u32 *buffer,
+ 		goto rel_out;
+ 
+ 	if (length < buf_len + hdr_size) {
+-		DEBUGOUT("Buffer not large enough for reply message.\n");
++		DEBUGOUT("Buffer not large enough for reply message.");
+ 		err = NGBE_ERR_HOST_INTERFACE_COMMAND;
+ 		goto rel_out;
+ 	}
+@@ -243,14 +239,69 @@ s32 ngbe_hic_sr_write(struct ngbe_hw *hw, u32 addr, u8 *buf, int len)
+ 	return err;
+ }
+ 
++s32 ngbe_hic_pcie_read(struct ngbe_hw *hw, u16 addr, u32 *buf, int len)
++{
++	struct ngbe_hic_read_pcie command;
++	u32 value = 0;
++	int err, i = 0;
++
++	if (len > NGBE_PMMBX_DATA_SIZE)
++		return NGBE_ERR_HOST_INTERFACE_COMMAND;
++
++	memset(&command, 0, sizeof(command));
++	command.hdr.cmd = FW_PCIE_READ_CMD;
++	command.hdr.buf_len = sizeof(command) - sizeof(command.hdr);
++	command.hdr.checksum = FW_DEFAULT_CHECKSUM;
++	command.lan_id = hw->bus.lan_id;
++	command.addr = addr;
++
++	err = ngbe_host_interface_command(hw, (u32 *)&command,
++			sizeof(command), NGBE_HI_COMMAND_TIMEOUT, false);
++	if (err)
++		return err;
++
++	while (i < (len >> 2)) {
++		value = rd32a(hw, NGBE_MNGMBX, FW_PCIE_BUSMASTER_OFFSET + i);
++		((u32 *)buf)[i] = value;
++		i++;
++	}
++
++	return 0;
++}
++
++s32 ngbe_hic_pcie_write(struct ngbe_hw *hw, u16 addr, u32 *buf, int len)
++{
++	struct ngbe_hic_write_pcie command;
++	u32 value = 0;
++	int err, i = 0;
++
++	while (i < (len >> 2)) {
++		value = ((u32 *)buf)[i];
++		i++;
++	}
++
++	memset(&command, 0, sizeof(command));
++	command.hdr.cmd = FW_PCIE_WRITE_CMD;
++	command.hdr.buf_len = sizeof(command) - sizeof(command.hdr);
++	command.hdr.checksum = FW_DEFAULT_CHECKSUM;
++	command.lan_id = hw->bus.lan_id;
++	command.addr = addr;
++	command.data = value;
++
++	err = ngbe_host_interface_command(hw, (u32 *)&command,
++			sizeof(command), NGBE_HI_COMMAND_TIMEOUT, false);
++	if (err)
++		return err;
++
++	return 0;
++}
++
+ s32 ngbe_hic_check_cap(struct ngbe_hw *hw)
+ {
+ 	struct ngbe_hic_read_shadow_ram command;
+ 	s32 err;
+ 	int i;
+ 
+-	DEBUGFUNC("\n");
+-
+ 	command.hdr.req.cmd = FW_EEPROM_CHECK_STATUS;
+ 	command.hdr.req.buf_lenh = 0;
+ 	command.hdr.req.buf_lenl = 0;
+diff --git a/dpdk/drivers/net/ngbe/base/ngbe_mng.h b/dpdk/drivers/net/ngbe/base/ngbe_mng.h
+index e3d0309cbc..321338a051 100644
+--- a/dpdk/drivers/net/ngbe/base/ngbe_mng.h
++++ b/dpdk/drivers/net/ngbe/base/ngbe_mng.h
+@@ -20,6 +20,9 @@
+ #define FW_READ_SHADOW_RAM_LEN          0x6
+ #define FW_WRITE_SHADOW_RAM_CMD         0x33
+ #define FW_WRITE_SHADOW_RAM_LEN         0xA /* 8 plus 1 WORD to write */
++#define FW_PCIE_READ_CMD		0xEC
++#define FW_PCIE_WRITE_CMD		0xED
++#define FW_PCIE_BUSMASTER_OFFSET        2
+ #define FW_DEFAULT_CHECKSUM             0xFF /* checksum always 0xFF */
+ #define FW_NVM_DATA_OFFSET              3
+ #define FW_EEPROM_CHECK_STATUS		0xE9
+@@ -76,8 +79,26 @@ struct ngbe_hic_write_shadow_ram {
+ 	u16 pad3;
+ };
+ 
++struct ngbe_hic_read_pcie {
++	struct ngbe_hic_hdr hdr;
++	u8 lan_id;
++	u8 rsvd;
++	u16 addr;
++	u32 data;
++};
++
++struct ngbe_hic_write_pcie {
++	struct ngbe_hic_hdr hdr;
++	u8 lan_id;
++	u8 rsvd;
++	u16 addr;
++	u32 data;
++};
++
+ s32 ngbe_hic_sr_read(struct ngbe_hw *hw, u32 addr, u8 *buf, int len);
+ s32 ngbe_hic_sr_write(struct ngbe_hw *hw, u32 addr, u8 *buf, int len);
++s32 ngbe_hic_pcie_read(struct ngbe_hw *hw, u16 addr, u32 *buf, int len);
++s32 ngbe_hic_pcie_write(struct ngbe_hw *hw, u16 addr, u32 *buf, int len);
+ 
+ s32 ngbe_hic_check_cap(struct ngbe_hw *hw);
+ #endif /* _NGBE_MNG_H_ */
+diff --git a/dpdk/drivers/net/ngbe/base/ngbe_osdep.h b/dpdk/drivers/net/ngbe/base/ngbe_osdep.h
+index b62d793191..bf1fa30312 100644
+--- a/dpdk/drivers/net/ngbe/base/ngbe_osdep.h
++++ b/dpdk/drivers/net/ngbe/base/ngbe_osdep.h
+@@ -19,6 +19,7 @@
+ #include <rte_config.h>
+ #include <rte_io.h>
+ #include <rte_ether.h>
++#include <rte_bus_pci.h>
+ 
+ #include "../ngbe_logs.h"
+ 
+@@ -180,4 +181,7 @@ static inline u64 REVERT_BIT_MASK64(u64 mask)
+ #define ETH_P_8021Q      0x8100
+ #define ETH_P_8021AD     0x88A8
+ 
++#define PCI_COMMAND		0x04
++#define  PCI_COMMAND_MASTER	0x4
++
+ #endif /* _NGBE_OS_H_ */
+diff --git a/dpdk/drivers/net/ngbe/base/ngbe_phy.c b/dpdk/drivers/net/ngbe/base/ngbe_phy.c
+index 51b0a2ec60..e2b6b05ddf 100644
+--- a/dpdk/drivers/net/ngbe/base/ngbe_phy.c
++++ b/dpdk/drivers/net/ngbe/base/ngbe_phy.c
+@@ -46,7 +46,7 @@ s32 ngbe_mdi_map_register(mdi_reg_t *reg, mdi_reg_22_t *reg22)
+ static bool ngbe_probe_phy(struct ngbe_hw *hw, u16 phy_addr)
+ {
+ 	if (!ngbe_validate_phy_addr(hw, phy_addr)) {
+-		DEBUGOUT("Unable to validate PHY address 0x%04X\n",
++		DEBUGOUT("Unable to validate PHY address 0x%04X",
+ 			phy_addr);
+ 		return false;
+ 	}
+@@ -72,8 +72,6 @@ s32 ngbe_identify_phy(struct ngbe_hw *hw)
+ 	s32 err = NGBE_ERR_PHY_ADDR_INVALID;
+ 	u16 phy_addr;
+ 
+-	DEBUGFUNC("ngbe_identify_phy");
+-
+ 	if (hw->phy.type != ngbe_phy_unknown)
+ 		return 0;
+ 
+@@ -103,11 +101,9 @@ s32 ngbe_check_reset_blocked(struct ngbe_hw *hw)
+ {
+ 	u32 mmngc;
+ 
+-	DEBUGFUNC("ngbe_check_reset_blocked");
+-
+ 	mmngc = rd32(hw, NGBE_STAT);
+ 	if (mmngc & NGBE_STAT_MNGVETO) {
+-		DEBUGOUT("MNG_VETO bit detected.\n");
++		DEBUGOUT("MNG_VETO bit detected.");
+ 		return true;
+ 	}
+ 
+@@ -125,19 +121,14 @@ bool ngbe_validate_phy_addr(struct ngbe_hw *hw, u32 phy_addr)
+ 	u16 phy_id = 0;
+ 	bool valid = false;
+ 
+-	DEBUGFUNC("ngbe_validate_phy_addr");
+-
+-	if (hw->sub_device_id == NGBE_SUB_DEV_ID_EM_YT8521S_SFP)
+-		return true;
+-
+ 	hw->phy.addr = phy_addr;
+-	hw->phy.read_reg(hw, NGBE_MD_PHY_ID_HIGH,
++	hw->phy.read_reg(hw, NGBE_MD_PHY_ID_LOW,
+ 			     NGBE_MD_DEV_PMA_PMD, &phy_id);
+ 
+ 	if (phy_id != 0xFFFF && phy_id != 0x0)
+ 		valid = true;
+ 
+-	DEBUGOUT("PHY ID HIGH is 0x%04X\n", phy_id);
++	DEBUGOUT("PHY ID LOW is 0x%04X", phy_id);
+ 
+ 	return valid;
+ }
+@@ -153,8 +144,6 @@ s32 ngbe_get_phy_id(struct ngbe_hw *hw)
+ 	u16 phy_id_high = 0;
+ 	u16 phy_id_low = 0;
+ 
+-	DEBUGFUNC("ngbe_get_phy_id");
+-
+ 	err = hw->phy.read_reg(hw, NGBE_MD_PHY_ID_HIGH,
+ 				      NGBE_MD_DEV_PMA_PMD,
+ 				      &phy_id_high);
+@@ -166,7 +155,7 @@ s32 ngbe_get_phy_id(struct ngbe_hw *hw)
+ 	hw->phy.id |= (u32)(phy_id_low & NGBE_PHY_REVISION_MASK);
+ 	hw->phy.revision = (u32)(phy_id_low & ~NGBE_PHY_REVISION_MASK);
+ 
+-	DEBUGOUT("PHY_ID_HIGH 0x%04X, PHY_ID_LOW 0x%04X\n",
++	DEBUGOUT("PHY_ID_HIGH 0x%04X, PHY_ID_LOW 0x%04X",
+ 		  phy_id_high, phy_id_low);
+ 
+ 	return err;
+@@ -181,8 +170,6 @@ enum ngbe_phy_type ngbe_get_phy_type_from_id(struct ngbe_hw *hw)
+ {
+ 	enum ngbe_phy_type phy_type;
+ 
+-	DEBUGFUNC("ngbe_get_phy_type_from_id");
+-
+ 	switch (hw->phy.id) {
+ 	case NGBE_PHYID_RTL:
+ 		phy_type = ngbe_phy_rtl;
+@@ -215,8 +202,6 @@ s32 ngbe_reset_phy(struct ngbe_hw *hw)
+ {
+ 	s32 err = 0;
+ 
+-	DEBUGFUNC("ngbe_reset_phy");
+-
+ 	if (hw->phy.type == ngbe_phy_unknown)
+ 		err = ngbe_identify_phy(hw);
+ 
+@@ -281,7 +266,7 @@ s32 ngbe_read_phy_reg_mdi(struct ngbe_hw *hw, u32 reg_addr, u32 device_type,
+ 	 */
+ 	if (!po32m(hw, NGBE_MDIOSCD, NGBE_MDIOSCD_BUSY,
+ 		0, NULL, 100, 100)) {
+-		DEBUGOUT("PHY address command did not complete\n");
++		DEBUGOUT("PHY address command did not complete");
+ 		return NGBE_ERR_PHY;
+ 	}
+ 
+@@ -305,8 +290,6 @@ s32 ngbe_read_phy_reg(struct ngbe_hw *hw, u32 reg_addr,
+ 	s32 err;
+ 	u32 gssr = hw->phy.phy_semaphore_mask;
+ 
+-	DEBUGFUNC("ngbe_read_phy_reg");
+-
+ 	if (hw->mac.acquire_swfw_sync(hw, gssr))
+ 		return NGBE_ERR_SWFW_SYNC;
+ 
+@@ -346,7 +329,7 @@ s32 ngbe_write_phy_reg_mdi(struct ngbe_hw *hw, u32 reg_addr,
+ 	/* wait for completion */
+ 	if (!po32m(hw, NGBE_MDIOSCD, NGBE_MDIOSCD_BUSY,
+ 		0, NULL, 100, 100)) {
+-		TLOG_DEBUG("PHY write cmd didn't complete\n");
++		DEBUGOUT("PHY write cmd didn't complete");
+ 		return NGBE_ERR_PHY;
+ 	}
+ 
+@@ -367,8 +350,6 @@ s32 ngbe_write_phy_reg(struct ngbe_hw *hw, u32 reg_addr,
+ 	s32 err;
+ 	u32 gssr = hw->phy.phy_semaphore_mask;
+ 
+-	DEBUGFUNC("ngbe_write_phy_reg");
+-
+ 	if (hw->mac.acquire_swfw_sync(hw, gssr))
+ 		err = NGBE_ERR_SWFW_SYNC;
+ 
+@@ -394,8 +375,6 @@ s32 ngbe_init_phy(struct ngbe_hw *hw)
+ 	struct ngbe_phy_info *phy = &hw->phy;
+ 	s32 err = 0;
+ 
+-	DEBUGFUNC("ngbe_init_phy");
+-
+ 	hw->phy.addr = 0;
+ 
+ 	switch (hw->sub_device_id) {
+diff --git a/dpdk/drivers/net/ngbe/base/ngbe_phy_mvl.c b/dpdk/drivers/net/ngbe/base/ngbe_phy_mvl.c
+index 2eb351d258..b1da0e3167 100644
+--- a/dpdk/drivers/net/ngbe/base/ngbe_phy_mvl.c
++++ b/dpdk/drivers/net/ngbe/base/ngbe_phy_mvl.c
+@@ -54,8 +54,6 @@ s32 ngbe_init_phy_mvl(struct ngbe_hw *hw)
+ 	u16 value = 0;
+ 	int i;
+ 
+-	DEBUGFUNC("ngbe_init_phy_mvl");
+-
+ 	/* enable interrupts, only link status change and an done is allowed */
+ 	ngbe_write_phy_reg_mdi(hw, MVL_PAGE_SEL, 0, 2);
+ 	ngbe_read_phy_reg_mdi(hw, MVL_RGM_CTL2, 0, &value);
+@@ -73,7 +71,7 @@ s32 ngbe_init_phy_mvl(struct ngbe_hw *hw)
+ 	}
+ 
+ 	if (i == 15) {
+-		DEBUGOUT("phy reset exceeds maximum waiting period.\n");
++		DEBUGOUT("phy reset exceeds maximum waiting period.");
+ 		return NGBE_ERR_TIMEOUT;
+ 	}
+ 
+@@ -119,7 +117,6 @@ s32 ngbe_setup_phy_link_mvl(struct ngbe_hw *hw, u32 speed,
+ 	u16 value_r9 = 0;
+ 	u16 value;
+ 
+-	DEBUGFUNC("ngbe_setup_phy_link_mvl");
+ 	UNREFERENCED_PARAMETER(autoneg_wait_to_complete);
+ 
+ 	hw->phy.autoneg_advertised = 0;
+@@ -176,8 +173,6 @@ s32 ngbe_reset_phy_mvl(struct ngbe_hw *hw)
+ 	u16 ctrl = 0;
+ 	s32 status = 0;
+ 
+-	DEBUGFUNC("ngbe_reset_phy_mvl");
+-
+ 	if (hw->phy.type != ngbe_phy_mvl && hw->phy.type != ngbe_phy_mvl_sfi)
+ 		return NGBE_ERR_PHY_TYPE;
+ 
+@@ -202,7 +197,7 @@ s32 ngbe_reset_phy_mvl(struct ngbe_hw *hw)
+ 	}
+ 
+ 	if (i == MVL_PHY_RST_WAIT_PERIOD) {
+-		DEBUGOUT("PHY reset polling failed to complete.\n");
++		DEBUGOUT("PHY reset polling failed to complete.");
+ 		return NGBE_ERR_RESET_FAILED;
+ 	}
+ 
+@@ -250,8 +245,6 @@ s32 ngbe_set_phy_pause_adv_mvl(struct ngbe_hw *hw, u16 pause_bit)
+ 	u16 value;
+ 	s32 status = 0;
+ 
+-	DEBUGFUNC("ngbe_set_phy_pause_adv_mvl");
+-
+ 	if (hw->phy.type == ngbe_phy_mvl) {
+ 		status = hw->phy.read_reg(hw, MVL_ANA, 0, &value);
+ 		value &= ~(MVL_CANA_ASM_PAUSE | MVL_CANA_PAUSE);
+@@ -275,8 +268,6 @@ s32 ngbe_check_phy_link_mvl(struct ngbe_hw *hw,
+ 	u16 phy_data = 0;
+ 	u16 insr = 0;
+ 
+-	DEBUGFUNC("ngbe_check_phy_link_mvl");
+-
+ 	/* Initialize speed and link to default case */
+ 	*link_up = false;
+ 	*speed = NGBE_LINK_SPEED_UNKNOWN;
+diff --git a/dpdk/drivers/net/ngbe/base/ngbe_phy_rtl.c b/dpdk/drivers/net/ngbe/base/ngbe_phy_rtl.c
+index 7b08b7a46c..a95efcbab6 100644
+--- a/dpdk/drivers/net/ngbe/base/ngbe_phy_rtl.c
++++ b/dpdk/drivers/net/ngbe/base/ngbe_phy_rtl.c
+@@ -57,7 +57,7 @@ s32 ngbe_init_phy_rtl(struct ngbe_hw *hw)
+ 		msec_delay(10);
+ 	}
+ 	if (i == 15) {
+-		DEBUGOUT("GPhy reset exceeds maximum times.\n");
++		DEBUGOUT("GPhy reset exceeds maximum times.");
+ 		return NGBE_ERR_PHY_TIMEOUT;
+ 	}
+ 
+@@ -108,8 +108,6 @@ s32 ngbe_setup_phy_link_rtl(struct ngbe_hw *hw,
+ 	u16 autoneg_reg = NGBE_MII_AUTONEG_REG;
+ 	u16 value = 0;
+ 
+-	DEBUGFUNC("ngbe_setup_phy_link_rtl");
+-
+ 	UNREFERENCED_PARAMETER(autoneg_wait_to_complete);
+ 
+ 	hw->phy.read_reg(hw, RTL_INSR, 0xa43, &autoneg_reg);
+@@ -129,7 +127,7 @@ s32 ngbe_setup_phy_link_rtl(struct ngbe_hw *hw,
+ 			break;
+ 		default:
+ 			value = RTL_BMCR_SPEED_SELECT1 | RTL_BMCR_SPEED_SELECT0;
+-			DEBUGOUT("unknown speed = 0x%x.\n", speed);
++			DEBUGOUT("unknown speed = 0x%x.", speed);
+ 			break;
+ 		}
+ 		/* duplex full */
+@@ -229,8 +227,6 @@ s32 ngbe_reset_phy_rtl(struct ngbe_hw *hw)
+ 	u16 value = 0, i;
+ 	s32 status = 0;
+ 
+-	DEBUGFUNC("ngbe_reset_phy_rtl");
+-
+ 	value |= RTL_BMCR_RESET;
+ 	status = hw->phy.write_reg(hw, RTL_BMCR, RTL_DEV_ZERO, value);
+ 
+@@ -299,8 +295,6 @@ s32 ngbe_check_phy_link_rtl(struct ngbe_hw *hw, u32 *speed, bool *link_up)
+ 	u16 phy_data = 0;
+ 	u16 insr = 0;
+ 
+-	DEBUGFUNC("ngbe_check_phy_link_rtl");
+-
+ 	hw->phy.read_reg(hw, RTL_INSR, 0xa43, &insr);
+ 
+ 	/* Initialize speed and link to default case */
+diff --git a/dpdk/drivers/net/ngbe/base/ngbe_phy_yt.c b/dpdk/drivers/net/ngbe/base/ngbe_phy_yt.c
+index 8db0f9ce48..40f1725f61 100644
+--- a/dpdk/drivers/net/ngbe/base/ngbe_phy_yt.c
++++ b/dpdk/drivers/net/ngbe/base/ngbe_phy_yt.c
+@@ -102,8 +102,6 @@ s32 ngbe_init_phy_yt(struct ngbe_hw *hw)
+ {
+ 	u16 value = 0;
+ 
+-	DEBUGFUNC("ngbe_init_phy_yt");
+-
+ 	if (hw->phy.type != ngbe_phy_yt8521s_sfi)
+ 		return 0;
+ 
+@@ -131,7 +129,6 @@ s32 ngbe_setup_phy_link_yt(struct ngbe_hw *hw, u32 speed,
+ 	u16 value_r9 = 0;
+ 	u16 value;
+ 
+-	DEBUGFUNC("ngbe_setup_phy_link_yt");
+ 	UNREFERENCED_PARAMETER(autoneg_wait_to_complete);
+ 
+ 	hw->phy.autoneg_advertised = 0;
+@@ -208,8 +205,6 @@ s32 ngbe_reset_phy_yt(struct ngbe_hw *hw)
+ 	u16 ctrl = 0;
+ 	s32 status = 0;
+ 
+-	DEBUGFUNC("ngbe_reset_phy_yt");
+-
+ 	if (hw->phy.type != ngbe_phy_yt8521s &&
+ 		hw->phy.type != ngbe_phy_yt8521s_sfi)
+ 		return NGBE_ERR_PHY_TYPE;
+@@ -227,7 +222,7 @@ s32 ngbe_reset_phy_yt(struct ngbe_hw *hw)
+ 	}
+ 
+ 	if (i == YT_PHY_RST_WAIT_PERIOD) {
+-		DEBUGOUT("PHY reset polling failed to complete.\n");
++		DEBUGOUT("PHY reset polling failed to complete.");
+ 		return NGBE_ERR_RESET_FAILED;
+ 	}
+ 
+@@ -239,8 +234,6 @@ s32 ngbe_get_phy_advertised_pause_yt(struct ngbe_hw *hw, u8 *pause_bit)
+ 	u16 value;
+ 	s32 status = 0;
+ 
+-	DEBUGFUNC("ngbe_get_phy_advertised_pause_yt");
+-
+ 	status = hw->phy.read_reg(hw, YT_ANA, 0, &value);
+ 	value &= YT_FANA_PAUSE_MASK;
+ 	*pause_bit = (u8)(value >> 7);
+@@ -253,8 +246,6 @@ s32 ngbe_get_phy_lp_advertised_pause_yt(struct ngbe_hw *hw, u8 *pause_bit)
+ 	u16 value;
+ 	s32 status = 0;
+ 
+-	DEBUGFUNC("ngbe_get_phy_lp_advertised_pause_yt");
+-
+ 	status = hw->phy.read_reg(hw, YT_LPAR, 0, &value);
+ 	value &= YT_FLPAR_PAUSE_MASK;
+ 	*pause_bit = (u8)(value >> 7);
+@@ -267,9 +258,6 @@ s32 ngbe_set_phy_pause_adv_yt(struct ngbe_hw *hw, u16 pause_bit)
+ 	u16 value;
+ 	s32 status = 0;
+ 
+-	DEBUGFUNC("ngbe_set_phy_pause_adv_yt");
+-
+-
+ 	status = hw->phy.read_reg(hw, YT_ANA, 0, &value);
+ 	value &= ~YT_FANA_PAUSE_MASK;
+ 	value |= pause_bit;
+@@ -287,8 +275,6 @@ s32 ngbe_check_phy_link_yt(struct ngbe_hw *hw,
+ 	u16 phy_data = 0;
+ 	u16 insr = 0;
+ 
+-	DEBUGFUNC("ngbe_check_phy_link_yt");
+-
+ 	/* Initialize speed and link to default case */
+ 	*link_up = false;
+ 	*speed = NGBE_LINK_SPEED_UNKNOWN;
+diff --git a/dpdk/drivers/net/ngbe/base/ngbe_regs.h b/dpdk/drivers/net/ngbe/base/ngbe_regs.h
+index 872b008c46..640e385990 100644
+--- a/dpdk/drivers/net/ngbe/base/ngbe_regs.h
++++ b/dpdk/drivers/net/ngbe/base/ngbe_regs.h
+@@ -785,30 +785,30 @@ enum ngbe_5tuple_protocol {
+ #define NGBE_MACRXERRCRCH           0x01192C
+ #define NGBE_MACRXERRLENL           0x011978
+ #define NGBE_MACRXERRLENH           0x01197C
+-#define NGBE_MACRX1TO64L            0x001940
+-#define NGBE_MACRX1TO64H            0x001944
+-#define NGBE_MACRX65TO127L          0x001948
+-#define NGBE_MACRX65TO127H          0x00194C
+-#define NGBE_MACRX128TO255L         0x001950
+-#define NGBE_MACRX128TO255H         0x001954
+-#define NGBE_MACRX256TO511L         0x001958
+-#define NGBE_MACRX256TO511H         0x00195C
+-#define NGBE_MACRX512TO1023L        0x001960
+-#define NGBE_MACRX512TO1023H        0x001964
+-#define NGBE_MACRX1024TOMAXL        0x001968
+-#define NGBE_MACRX1024TOMAXH        0x00196C
+-#define NGBE_MACTX1TO64L            0x001834
+-#define NGBE_MACTX1TO64H            0x001838
+-#define NGBE_MACTX65TO127L          0x00183C
+-#define NGBE_MACTX65TO127H          0x001840
+-#define NGBE_MACTX128TO255L         0x001844
+-#define NGBE_MACTX128TO255H         0x001848
+-#define NGBE_MACTX256TO511L         0x00184C
+-#define NGBE_MACTX256TO511H         0x001850
+-#define NGBE_MACTX512TO1023L        0x001854
+-#define NGBE_MACTX512TO1023H        0x001858
+-#define NGBE_MACTX1024TOMAXL        0x00185C
+-#define NGBE_MACTX1024TOMAXH        0x001860
++#define NGBE_MACRX1TO64L            0x011940
++#define NGBE_MACRX1TO64H            0x011944
++#define NGBE_MACRX65TO127L          0x011948
++#define NGBE_MACRX65TO127H          0x01194C
++#define NGBE_MACRX128TO255L         0x011950
++#define NGBE_MACRX128TO255H         0x011954
++#define NGBE_MACRX256TO511L         0x011958
++#define NGBE_MACRX256TO511H         0x01195C
++#define NGBE_MACRX512TO1023L        0x011960
++#define NGBE_MACRX512TO1023H        0x011964
++#define NGBE_MACRX1024TOMAXL        0x011968
++#define NGBE_MACRX1024TOMAXH        0x01196C
++#define NGBE_MACTX1TO64L            0x011834
++#define NGBE_MACTX1TO64H            0x011838
++#define NGBE_MACTX65TO127L          0x01183C
++#define NGBE_MACTX65TO127H          0x011840
++#define NGBE_MACTX128TO255L         0x011844
++#define NGBE_MACTX128TO255H         0x011848
++#define NGBE_MACTX256TO511L         0x01184C
++#define NGBE_MACTX256TO511H         0x011850
++#define NGBE_MACTX512TO1023L        0x011854
++#define NGBE_MACTX512TO1023H        0x011858
++#define NGBE_MACTX1024TOMAXL        0x01185C
++#define NGBE_MACTX1024TOMAXH        0x011860
+ 
+ #define NGBE_MACRXUNDERSIZE         0x011938
+ #define NGBE_MACRXOVERSIZE          0x01193C
+@@ -866,6 +866,9 @@ enum ngbe_5tuple_protocol {
+  * PF(Physical Function) Registers
+  ******************************************************************************/
+ /* Interrupt */
++#define NGBE_BMECTL		0x012020
++#define   NGBE_BMECTL_VFDRP	MS(1, 0x1)
++#define   NGBE_BMECTL_PFDRP	MS(0, 0x1)
+ #define NGBE_ICRMISC		0x000100
+ #define   NGBE_ICRMISC_MASK	MS(8, 0xFFFFFF)
+ #define   NGBE_ICRMISC_RST	MS(10, 0x1) /* device reset event */
+@@ -1419,8 +1422,13 @@ po32m(struct ngbe_hw *hw, u32 reg, u32 mask, u32 expect, u32 *actual,
+ 	}
+ 
+ 	do {
+-		all |= rd32(hw, reg);
+-		value |= mask & all;
++		if (expect != 0) {
++			all |= rd32(hw, reg);
++			value |= mask & all;
++		} else {
++			all = rd32(hw, reg);
++			value = mask & all;
++		}
+ 		if (value == expect)
+ 			break;
+ 
+diff --git a/dpdk/drivers/net/ngbe/base/ngbe_type.h b/dpdk/drivers/net/ngbe/base/ngbe_type.h
+index 12847b7272..4c995e7397 100644
+--- a/dpdk/drivers/net/ngbe/base/ngbe_type.h
++++ b/dpdk/drivers/net/ngbe/base/ngbe_type.h
+@@ -11,9 +11,15 @@
+ #define NGBE_FRAME_SIZE_MAX       (9728) /* Maximum frame size, +FCS */
+ #define NGBE_FRAME_SIZE_DFT       (1522) /* Default frame size, +FCS */
+ #define NGBE_NUM_POOL             (32)
++#define NGBE_PBRXSIZE_MAX         0x00080000 /* 512KB Packet Buffer */
++#define NGBE_PBTXSIZE_MAX         0x00005000 /* 20KB Packet Buffer */
++#define NGBE_TXPKT_SIZE_MAX       0xA /* Max Tx Packet size */
+ #define NGBE_MAX_QP               (8)
+ #define NGBE_MAX_UTA              128
+ 
++#define NGBE_PCI_MASTER_DISABLE_TIMEOUT	800
++
++
+ #define NGBE_ALIGN		128 /* as intel did */
+ #define NGBE_ISB_SIZE		16
+ 
+@@ -269,6 +275,9 @@ struct ngbe_mac_info {
+ 	s32 (*get_link_capabilities)(struct ngbe_hw *hw,
+ 				      u32 *speed, bool *autoneg);
+ 
++	/* Packet Buffer manipulation */
++	void (*setup_pba)(struct ngbe_hw *hw);
++
+ 	/* LED */
+ 	s32 (*led_on)(struct ngbe_hw *hw, u32 index);
+ 	s32 (*led_off)(struct ngbe_hw *hw, u32 index);
+@@ -311,6 +320,7 @@ struct ngbe_mac_info {
+ 	u32 mcft_size;
+ 	u32 vft_size;
+ 	u32 num_rar_entries;
++	u32 rx_pb_size;
+ 	u32 max_tx_queues;
+ 	u32 max_rx_queues;
+ 	bool get_link_status;
+diff --git a/dpdk/drivers/net/ngbe/ngbe_ethdev.c b/dpdk/drivers/net/ngbe/ngbe_ethdev.c
+index 981592f7f4..b930326379 100644
+--- a/dpdk/drivers/net/ngbe/ngbe_ethdev.c
++++ b/dpdk/drivers/net/ngbe/ngbe_ethdev.c
+@@ -89,7 +89,6 @@ static int ngbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev);
+ static int ngbe_dev_misc_interrupt_setup(struct rte_eth_dev *dev);
+ static int ngbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
+ static void ngbe_dev_interrupt_handler(void *param);
+-static void ngbe_dev_interrupt_delayed_handler(void *param);
+ static void ngbe_configure_msix(struct rte_eth_dev *dev);
+ 
+ #define NGBE_SET_HWSTRIP(h, q) do {\
+@@ -165,6 +164,8 @@ static const struct rte_ngbe_xstats_name_off rte_ngbe_stats_strings[] = {
+ 	HW_XSTAT(rx_management_packets),
+ 	HW_XSTAT(tx_management_packets),
+ 	HW_XSTAT(rx_management_dropped),
++	HW_XSTAT(rx_dma_drop),
++	HW_XSTAT(tx_secdrp_packets),
+ 
+ 	/* Basic Error */
+ 	HW_XSTAT(rx_crc_errors),
+@@ -180,6 +181,12 @@ static const struct rte_ngbe_xstats_name_off rte_ngbe_stats_strings[] = {
+ 	HW_XSTAT(mac_local_errors),
+ 	HW_XSTAT(mac_remote_errors),
+ 
++	/* PB Stats */
++	HW_XSTAT(rx_up_dropped),
++	HW_XSTAT(rdb_pkt_cnt),
++	HW_XSTAT(rdb_repli_cnt),
++	HW_XSTAT(rdb_drp_cnt),
++
+ 	/* MACSEC */
+ 	HW_XSTAT(tx_macsec_pkts_untagged),
+ 	HW_XSTAT(tx_macsec_pkts_encrypted),
+@@ -356,6 +363,7 @@ eth_ngbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
+ 	eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
+ 
+ 	/* Vendor and Device ID need to be set before init of shared code */
++	hw->back = pci_dev;
+ 	hw->device_id = pci_dev->id.device_id;
+ 	hw->vendor_id = pci_dev->id.vendor_id;
+ 	hw->sub_system_id = pci_dev->id.subsystem_device_id;
+@@ -943,12 +951,14 @@ ngbe_dev_start(struct rte_eth_dev *dev)
+ 
+ 	PMD_INIT_FUNC_TRACE();
+ 
++	/* Stop the link setup handler before resetting the HW. */
++	rte_eal_alarm_cancel(ngbe_dev_setup_link_alarm_handler, dev);
++
+ 	/* disable uio/vfio intr/eventfd mapping */
+ 	rte_intr_disable(intr_handle);
+ 
+ 	/* stop adapter */
+ 	hw->adapter_stopped = 0;
+-	ngbe_stop_hw(hw);
+ 
+ 	/* reinitialize adapter, this calls reset and start */
+ 	hw->nb_rx_queues = dev->data->nb_rx_queues;
+@@ -959,6 +969,8 @@ ngbe_dev_start(struct rte_eth_dev *dev)
+ 	hw->mac.start_hw(hw);
+ 	hw->mac.get_link_status = true;
+ 
++	ngbe_set_pcie_master(hw, true);
++
+ 	/* configure PF module if SRIOV enabled */
+ 	ngbe_pf_host_configure(dev);
+ 
+@@ -983,7 +995,7 @@ ngbe_dev_start(struct rte_eth_dev *dev)
+ 		}
+ 	}
+ 
+-	/* confiugre MSI-X for sleep until Rx interrupt */
++	/* configure MSI-X for sleep until Rx interrupt */
+ 	ngbe_configure_msix(dev);
+ 
+ 	/* initialize transmission unit */
+@@ -1004,6 +1016,7 @@ ngbe_dev_start(struct rte_eth_dev *dev)
+ 		goto error;
+ 	}
+ 
++	hw->mac.setup_pba(hw);
+ 	ngbe_configure_port(dev);
+ 
+ 	err = ngbe_dev_rxtx_start(dev);
+@@ -1037,7 +1050,7 @@ ngbe_dev_start(struct rte_eth_dev *dev)
+ 	if (hw->mac.default_speeds & NGBE_LINK_SPEED_10M_FULL)
+ 		allowed_speeds |= RTE_ETH_LINK_SPEED_10M;
+ 
+-	if (*link_speeds & ~allowed_speeds) {
++	if (((*link_speeds) >> 1) & ~(allowed_speeds >> 1)) {
+ 		PMD_INIT_LOG(ERR, "Invalid link setting");
+ 		goto error;
+ 	}
+@@ -1131,6 +1144,8 @@ ngbe_dev_stop(struct rte_eth_dev *dev)
+ 
+ 	PMD_INIT_FUNC_TRACE();
+ 
++	rte_eal_alarm_cancel(ngbe_dev_setup_link_alarm_handler, dev);
++
+ 	if ((hw->sub_system_id & NGBE_OEM_MASK) == NGBE_LY_M88E1512_SFP ||
+ 		(hw->sub_system_id & NGBE_OEM_MASK) == NGBE_LY_YT8521S_SFP) {
+ 		/* gpio0 is used to power on/off control*/
+@@ -1169,6 +1184,8 @@ ngbe_dev_stop(struct rte_eth_dev *dev)
+ 	rte_intr_efd_disable(intr_handle);
+ 	rte_intr_vec_list_free(intr_handle);
+ 
++	ngbe_set_pcie_master(hw, true);
++
+ 	adapter->rss_reta_updated = 0;
+ 
+ 	hw->adapter_stopped = true;
+@@ -1197,6 +1214,8 @@ ngbe_dev_close(struct rte_eth_dev *dev)
+ 
+ 	ngbe_dev_free_queues(dev);
+ 
++	ngbe_set_pcie_master(hw, false);
++
+ 	/* reprogram the RAR[0] in case user changed it. */
+ 	ngbe_set_rar(hw, 0, hw->mac.addr, 0, true);
+ 
+@@ -1800,6 +1819,24 @@ ngbe_dev_supported_ptypes_get(struct rte_eth_dev *dev)
+ 	return NULL;
+ }
+ 
++void
++ngbe_dev_setup_link_alarm_handler(void *param)
++{
++	struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
++	struct ngbe_hw *hw = ngbe_dev_hw(dev);
++	struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
++	u32 speed;
++	bool autoneg = false;
++
++	speed = hw->phy.autoneg_advertised;
++	if (!speed)
++		hw->mac.get_link_capabilities(hw, &speed, &autoneg);
++
++	hw->mac.setup_link(hw, speed, true);
++
++	intr->flags &= ~NGBE_FLAG_NEED_LINK_CONFIG;
++}
++
+ /* return 0 means link status changed, -1 means not changed */
+ int
+ ngbe_dev_link_update_share(struct rte_eth_dev *dev,
+@@ -1837,8 +1874,16 @@ ngbe_dev_link_update_share(struct rte_eth_dev *dev,
+ 		return rte_eth_linkstatus_set(dev, &link);
+ 	}
+ 
+-	if (!link_up)
++	if (!link_up) {
++		if (hw->phy.media_type == ngbe_media_type_fiber &&
++			hw->phy.type != ngbe_phy_mvl_sfi) {
++			intr->flags |= NGBE_FLAG_NEED_LINK_CONFIG;
++			rte_eal_alarm_set(10,
++				ngbe_dev_setup_link_alarm_handler, dev);
++		}
++
+ 		return rte_eth_linkstatus_set(dev, &link);
++	}
+ 
+ 	intr->flags &= ~NGBE_FLAG_NEED_LINK_CONFIG;
+ 	link.link_status = RTE_ETH_LINK_UP;
+@@ -2061,9 +2106,6 @@ ngbe_dev_interrupt_get_status(struct rte_eth_dev *dev)
+ 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
+ 	struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
+ 
+-	/* clear all cause mask */
+-	ngbe_disable_intr(hw);
+-
+ 	/* read-on-clear nic registers here */
+ 	eicr = ((u32 *)hw->isb_mem)[NGBE_ISB_MISC];
+ 	PMD_DRV_LOG(DEBUG, "eicr %x", eicr);
+@@ -2083,6 +2125,8 @@ ngbe_dev_interrupt_get_status(struct rte_eth_dev *dev)
+ 	if (eicr & NGBE_ICRMISC_GPIO)
+ 		intr->flags |= NGBE_FLAG_NEED_LINK_UPDATE;
+ 
++	((u32 *)hw->isb_mem)[NGBE_ISB_MISC] = 0;
++
+ 	return 0;
+ }
+ 
+@@ -2135,7 +2179,6 @@ static int
+ ngbe_dev_interrupt_action(struct rte_eth_dev *dev)
+ {
+ 	struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
+-	int64_t timeout;
+ 
+ 	PMD_DRV_LOG(DEBUG, "intr action type %d", intr->flags);
+ 
+@@ -2151,31 +2194,11 @@ ngbe_dev_interrupt_action(struct rte_eth_dev *dev)
+ 		rte_eth_linkstatus_get(dev, &link);
+ 
+ 		ngbe_dev_link_update(dev, 0);
+-
+-		/* likely to up */
+-		if (link.link_status != RTE_ETH_LINK_UP)
+-			/* handle it 1 sec later, wait it being stable */
+-			timeout = NGBE_LINK_UP_CHECK_TIMEOUT;
+-		/* likely to down */
+-		else
+-			/* handle it 4 sec later, wait it being stable */
+-			timeout = NGBE_LINK_DOWN_CHECK_TIMEOUT;
+-
++		intr->flags &= ~NGBE_FLAG_NEED_LINK_UPDATE;
+ 		ngbe_dev_link_status_print(dev);
+-		if (rte_eal_alarm_set(timeout * 1000,
+-				      ngbe_dev_interrupt_delayed_handler,
+-				      (void *)dev) < 0) {
+-			PMD_DRV_LOG(ERR, "Error setting alarm");
+-		} else {
+-			/* remember original mask */
+-			intr->mask_misc_orig = intr->mask_misc;
+-			/* only disable lsc interrupt */
+-			intr->mask_misc &= ~NGBE_ICRMISC_PHY;
+-
+-			intr->mask_orig = intr->mask;
+-			/* only disable all misc interrupts */
+-			intr->mask &= ~(1ULL << NGBE_MISC_VEC_ID);
+-		}
++		if (dev->data->dev_link.link_speed != link.link_speed)
++			rte_eth_dev_callback_process(dev,
++				RTE_ETH_EVENT_INTR_LSC, NULL);
+ 	}
+ 
+ 	PMD_DRV_LOG(DEBUG, "enable intr immediately");
+@@ -2184,53 +2207,6 @@ ngbe_dev_interrupt_action(struct rte_eth_dev *dev)
+ 	return 0;
+ }
+ 
+-/**
+- * Interrupt handler which shall be registered for alarm callback for delayed
+- * handling specific interrupt to wait for the stable nic state. As the
+- * NIC interrupt state is not stable for ngbe after link is just down,
+- * it needs to wait 4 seconds to get the stable status.
+- *
+- * @param param
+- *  The address of parameter (struct rte_eth_dev *) registered before.
+- */
+-static void
+-ngbe_dev_interrupt_delayed_handler(void *param)
+-{
+-	struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
+-	struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
+-	struct ngbe_hw *hw = ngbe_dev_hw(dev);
+-	uint32_t eicr;
+-
+-	ngbe_disable_intr(hw);
+-
+-	eicr = ((u32 *)hw->isb_mem)[NGBE_ISB_MISC];
+-	if (eicr & NGBE_ICRMISC_VFMBX)
+-		ngbe_pf_mbx_process(dev);
+-
+-	if (intr->flags & NGBE_FLAG_NEED_LINK_UPDATE) {
+-		ngbe_dev_link_update(dev, 0);
+-		intr->flags &= ~NGBE_FLAG_NEED_LINK_UPDATE;
+-		ngbe_dev_link_status_print(dev);
+-		rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
+-					      NULL);
+-	}
+-
+-	if (intr->flags & NGBE_FLAG_MACSEC) {
+-		rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_MACSEC,
+-					      NULL);
+-		intr->flags &= ~NGBE_FLAG_MACSEC;
+-	}
+-
+-	/* restore original mask */
+-	intr->mask_misc = intr->mask_misc_orig;
+-	intr->mask_misc_orig = 0;
+-	intr->mask = intr->mask_orig;
+-	intr->mask_orig = 0;
+-
+-	PMD_DRV_LOG(DEBUG, "enable intr in delayed handler S[%08x]", eicr);
+-	ngbe_enable_intr(dev);
+-}
+-
+ /**
+  * Interrupt handler triggered by NIC  for handling
+  * specific interrupt.
+@@ -2641,7 +2617,7 @@ ngbe_set_ivar_map(struct ngbe_hw *hw, int8_t direction,
+ 		wr32(hw, NGBE_IVARMISC, tmp);
+ 	} else {
+ 		/* rx or tx causes */
+-		/* Workround for ICR lost */
++		/* Workaround for ICR lost */
+ 		idx = ((16 * (queue & 1)) + (8 * direction));
+ 		tmp = rd32(hw, NGBE_IVAR(queue >> 1));
+ 		tmp &= ~(0xFF << idx);
+@@ -2893,7 +2869,7 @@ ngbe_timesync_disable(struct rte_eth_dev *dev)
+ 	/* Disable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
+ 	wr32(hw, NGBE_ETFLT(NGBE_ETF_ID_1588), 0);
+ 
+-	/* Stop incrementating the System Time registers. */
++	/* Stop incrementing the System Time registers. */
+ 	wr32(hw, NGBE_TSTIMEINC, 0);
+ 
+ 	return 0;
+diff --git a/dpdk/drivers/net/ngbe/ngbe_ethdev.h b/dpdk/drivers/net/ngbe/ngbe_ethdev.h
+index bb96f6a5e7..8d500fd38c 100644
+--- a/dpdk/drivers/net/ngbe/ngbe_ethdev.h
++++ b/dpdk/drivers/net/ngbe/ngbe_ethdev.h
+@@ -341,6 +341,7 @@ void ngbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev,
+ 		uint16_t queue, bool on);
+ void ngbe_config_vlan_strip_on_all_queues(struct rte_eth_dev *dev,
+ 						  int mask);
++void ngbe_dev_setup_link_alarm_handler(void *param);
+ void ngbe_read_stats_registers(struct ngbe_hw *hw,
+ 			   struct ngbe_hw_stats *hw_stats);
+ 
+diff --git a/dpdk/drivers/net/ngbe/ngbe_logs.h b/dpdk/drivers/net/ngbe/ngbe_logs.h
+index fd306419e6..e5165ffd60 100644
+--- a/dpdk/drivers/net/ngbe/ngbe_logs.h
++++ b/dpdk/drivers/net/ngbe/ngbe_logs.h
+@@ -37,10 +37,7 @@ extern int ngbe_logtype_tx;
+ #define PMD_TX_LOG(level, fmt, args...) do { } while (0)
+ #endif
+ 
+-#define TLOG_DEBUG(fmt, args...)  PMD_DRV_LOG(DEBUG, fmt, ##args)
+-
+-#define DEBUGOUT(fmt, args...)    TLOG_DEBUG(fmt, ##args)
+-#define PMD_INIT_FUNC_TRACE()     TLOG_DEBUG(" >>")
+-#define DEBUGFUNC(fmt)            TLOG_DEBUG(fmt)
++#define DEBUGOUT(fmt, args...)    PMD_DRV_LOG(DEBUG, fmt, ##args)
++#define PMD_INIT_FUNC_TRACE()     PMD_DRV_LOG(DEBUG, ">>")
+ 
+ #endif /* _NGBE_LOGS_H_ */
+diff --git a/dpdk/drivers/net/ngbe/ngbe_pf.c b/dpdk/drivers/net/ngbe/ngbe_pf.c
+index 7f9c04fb0e..12a18de31d 100644
+--- a/dpdk/drivers/net/ngbe/ngbe_pf.c
++++ b/dpdk/drivers/net/ngbe/ngbe_pf.c
+@@ -163,7 +163,7 @@ int ngbe_pf_host_configure(struct rte_eth_dev *eth_dev)
+ 
+ 	wr32(hw, NGBE_PSRCTL, NGBE_PSRCTL_LBENA);
+ 
+-	/* clear VMDq map to perment rar 0 */
++	/* clear VMDq map to permanent rar 0 */
+ 	hw->mac.clear_vmdq(hw, 0, BIT_MASK32);
+ 
+ 	/* clear VMDq map to scan rar 31 */
+diff --git a/dpdk/drivers/net/octeontx/octeontx_ethdev.c b/dpdk/drivers/net/octeontx/octeontx_ethdev.c
+index 4f1e368c61..f879a0c9fc 100644
+--- a/dpdk/drivers/net/octeontx/octeontx_ethdev.c
++++ b/dpdk/drivers/net/octeontx/octeontx_ethdev.c
+@@ -26,6 +26,11 @@
+ #include "octeontx_rxtx.h"
+ #include "octeontx_logs.h"
+ 
++/* Useful in stopping/closing event device if no of
++ * eth ports are using it.
++ */
++uint16_t evdev_refcnt;
++
+ struct evdev_priv_data {
+ 	OFFLOAD_FLAGS; /*Sequence should not be changed */
+ } __rte_cache_aligned;
+@@ -491,7 +496,11 @@ octeontx_dev_close(struct rte_eth_dev *dev)
+ 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ 		return 0;
+ 
+-	rte_event_dev_close(nic->evdev);
++	/* Stopping/closing event device once all eth ports are closed. */
++	if (__atomic_sub_fetch(&evdev_refcnt, 1, __ATOMIC_ACQUIRE) == 0) {
++		rte_event_dev_stop(nic->evdev);
++		rte_event_dev_close(nic->evdev);
++	}
+ 
+ 	octeontx_dev_flow_ctrl_fini(dev);
+ 
+@@ -671,8 +680,6 @@ octeontx_dev_stop(struct rte_eth_dev *dev)
+ 
+ 	PMD_INIT_FUNC_TRACE();
+ 
+-	rte_event_dev_stop(nic->evdev);
+-
+ 	ret = octeontx_port_stop(nic);
+ 	if (ret < 0) {
+ 		octeontx_log_err("failed to req stop port %d res=%d",
+@@ -1090,7 +1097,7 @@ octeontx_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
+ 
+ 	/* Verify queue index */
+ 	if (qidx >= dev->data->nb_rx_queues) {
+-		octeontx_log_err("QID %d not supporteded (0 - %d available)\n",
++		octeontx_log_err("QID %d not supported (0 - %d available)\n",
+ 				qidx, (dev->data->nb_rx_queues - 1));
+ 		return -ENOTSUP;
+ 	}
+@@ -1334,6 +1341,7 @@ octeontx_create(struct rte_vdev_device *dev, int port, uint8_t evdev,
+ 	nic->pko_vfid = pko_vfid;
+ 	nic->port_id = port;
+ 	nic->evdev = evdev;
++	__atomic_add_fetch(&evdev_refcnt, 1, __ATOMIC_ACQUIRE);
+ 
+ 	res = octeontx_port_open(nic);
+ 	if (res < 0)
+@@ -1583,6 +1591,7 @@ octeontx_probe(struct rte_vdev_device *dev)
+ 		}
+ 	}
+ 
++	__atomic_store_n(&evdev_refcnt, 0, __ATOMIC_RELEASE);
+ 	/*
+ 	 * Do 1:1 links for ports & queues. All queues would be mapped to
+ 	 * one port. If there are more ports than queues, then some ports
+diff --git a/dpdk/drivers/net/octeontx2/otx2_ethdev_irq.c b/dpdk/drivers/net/octeontx2/otx2_ethdev_irq.c
+index cc573bb2e8..f56d5b2a38 100644
+--- a/dpdk/drivers/net/octeontx2/otx2_ethdev_irq.c
++++ b/dpdk/drivers/net/octeontx2/otx2_ethdev_irq.c
+@@ -369,7 +369,7 @@ oxt2_nix_register_cq_irqs(struct rte_eth_dev *eth_dev)
+ 				 "rc=%d", rc);
+ 			return rc;
+ 		}
+-		/* VFIO vector zero is resereved for misc interrupt so
++		/* VFIO vector zero is reserved for misc interrupt so
+ 		 * doing required adjustment. (b13bfab4cd)
+ 		 */
+ 		if (rte_intr_vec_list_index_set(handle, q,
+diff --git a/dpdk/drivers/net/octeontx2/otx2_ptp.c b/dpdk/drivers/net/octeontx2/otx2_ptp.c
+index abb2130587..974018f97e 100644
+--- a/dpdk/drivers/net/octeontx2/otx2_ptp.c
++++ b/dpdk/drivers/net/octeontx2/otx2_ptp.c
+@@ -440,7 +440,7 @@ otx2_nix_read_clock(struct rte_eth_dev *eth_dev, uint64_t *clock)
+ 	/* This API returns the raw PTP HI clock value. Since LFs doesn't
+ 	 * have direct access to PTP registers and it requires mbox msg
+ 	 * to AF for this value. In fastpath reading this value for every
+-	 * packet (which involes mbox call) becomes very expensive, hence
++	 * packet (which involves mbox call) becomes very expensive, hence
+ 	 * we should be able to derive PTP HI clock value from tsc by
+ 	 * using freq_mult and clk_delta calculated during configure stage.
+ 	 */
+diff --git a/dpdk/drivers/net/octeontx2/otx2_tx.h b/dpdk/drivers/net/octeontx2/otx2_tx.h
+index 4bbd5a390f..a2fb7ce3cb 100644
+--- a/dpdk/drivers/net/octeontx2/otx2_tx.h
++++ b/dpdk/drivers/net/octeontx2/otx2_tx.h
+@@ -61,7 +61,7 @@ otx2_nix_xmit_prepare_tstamp(uint64_t *cmd,  const uint64_t *send_mem_desc,
+ 			/* Retrieving the default desc values */
+ 			cmd[off] = send_mem_desc[6];
+ 
+-			/* Using compiler barier to avoid voilation of C
++			/* Using compiler barrier to avoid violation of C
+ 			 * aliasing rules.
+ 			 */
+ 			rte_compiler_barrier();
+@@ -70,7 +70,7 @@ otx2_nix_xmit_prepare_tstamp(uint64_t *cmd,  const uint64_t *send_mem_desc,
+ 		/* Packets for which RTE_MBUF_F_TX_IEEE1588_TMST is not set, tx tstamp
+ 		 * should not be recorded, hence changing the alg type to
+ 		 * NIX_SENDMEMALG_SET and also changing send mem addr field to
+-		 * next 8 bytes as it corrpt the actual tx tstamp registered
++		 * next 8 bytes as it corrupts the actual tx tstamp registered
+ 		 * address.
+ 		 */
+ 		send_mem->alg = NIX_SENDMEMALG_SETTSTMP - (is_ol_tstamp);
+diff --git a/dpdk/drivers/net/octeontx2/otx2_vlan.c b/dpdk/drivers/net/octeontx2/otx2_vlan.c
+index cce643b7b5..359680de5c 100644
+--- a/dpdk/drivers/net/octeontx2/otx2_vlan.c
++++ b/dpdk/drivers/net/octeontx2/otx2_vlan.c
+@@ -953,7 +953,7 @@ static void nix_vlan_reinstall_vlan_filters(struct rte_eth_dev *eth_dev)
+ 	struct vlan_entry *entry;
+ 	int rc;
+ 
+-	/* VLAN filters can't be set without setting filtern on */
++	/* VLAN filters can't be set without setting filters on */
+ 	rc = nix_vlan_handle_default_rx_entry(eth_dev, false, true, true);
+ 	if (rc) {
+ 		otx2_err("Failed to reinstall vlan filters");
+diff --git a/dpdk/drivers/net/octeontx_ep/otx2_ep_vf.c b/dpdk/drivers/net/octeontx_ep/otx2_ep_vf.c
+index 0716beb9b1..85e14a998f 100644
+--- a/dpdk/drivers/net/octeontx_ep/otx2_ep_vf.c
++++ b/dpdk/drivers/net/octeontx_ep/otx2_ep_vf.c
+@@ -104,7 +104,7 @@ otx2_vf_setup_iq_regs(struct otx_ep_device *otx_ep, uint32_t iq_no)
+ 	iq->inst_cnt_reg = (uint8_t *)otx_ep->hw_addr +
+ 			   SDP_VF_R_IN_CNTS(iq_no);
+ 
+-	otx_ep_dbg("InstQ[%d]:dbell reg @ 0x%p instcnt_reg @ 0x%p",
++	otx_ep_dbg("InstQ[%d]:dbell reg @ 0x%p inst_cnt_reg @ 0x%p",
+ 		   iq_no, iq->doorbell_reg, iq->inst_cnt_reg);
+ 
+ 	do {
+diff --git a/dpdk/drivers/net/octeontx_ep/otx_ep_vf.c b/dpdk/drivers/net/octeontx_ep/otx_ep_vf.c
+index c9b91fef9e..96366b2a7f 100644
+--- a/dpdk/drivers/net/octeontx_ep/otx_ep_vf.c
++++ b/dpdk/drivers/net/octeontx_ep/otx_ep_vf.c
+@@ -117,7 +117,7 @@ otx_ep_setup_iq_regs(struct otx_ep_device *otx_ep, uint32_t iq_no)
+ 	iq->inst_cnt_reg = (uint8_t *)otx_ep->hw_addr +
+ 			   OTX_EP_R_IN_CNTS(iq_no);
+ 
+-	otx_ep_dbg("InstQ[%d]:dbell reg @ 0x%p instcnt_reg @ 0x%p\n",
++	otx_ep_dbg("InstQ[%d]:dbell reg @ 0x%p inst_cnt_reg @ 0x%p\n",
+ 		     iq_no, iq->doorbell_reg, iq->inst_cnt_reg);
+ 
+ 	do {
+diff --git a/dpdk/drivers/net/pfe/pfe_ethdev.c b/dpdk/drivers/net/pfe/pfe_ethdev.c
+index 047010e15e..ebb5d1ae0e 100644
+--- a/dpdk/drivers/net/pfe/pfe_ethdev.c
++++ b/dpdk/drivers/net/pfe/pfe_ethdev.c
+@@ -769,7 +769,7 @@ pfe_eth_init(struct rte_vdev_device *vdev, struct pfe *pfe, int id)
+ 	if (eth_dev == NULL)
+ 		return -ENOMEM;
+ 
+-	/* Extract pltform data */
++	/* Extract platform data */
+ 	pfe_info = (struct ls1012a_pfe_platform_data *)&pfe->platform_data;
+ 	if (!pfe_info) {
+ 		PFE_PMD_ERR("pfe missing additional platform data");
+diff --git a/dpdk/drivers/net/pfe/pfe_hal.c b/dpdk/drivers/net/pfe/pfe_hal.c
+index 41d783dbff..6431dec47e 100644
+--- a/dpdk/drivers/net/pfe/pfe_hal.c
++++ b/dpdk/drivers/net/pfe/pfe_hal.c
+@@ -187,7 +187,7 @@ gemac_set_mode(void *base, __rte_unused int mode)
+ {
+ 	u32 val = readl(base + EMAC_RCNTRL_REG);
+ 
+-	/*Remove loopbank*/
++	/* Remove loopback */
+ 	val &= ~EMAC_RCNTRL_LOOP;
+ 
+ 	/*Enable flow control and MII mode*/
+diff --git a/dpdk/drivers/net/pfe/pfe_hif.c b/dpdk/drivers/net/pfe/pfe_hif.c
+index c4a7154ba7..69b1d0edde 100644
+--- a/dpdk/drivers/net/pfe/pfe_hif.c
++++ b/dpdk/drivers/net/pfe/pfe_hif.c
+@@ -114,9 +114,9 @@ pfe_hif_init_buffers(struct pfe_hif *hif)
+ 		 * results, eth id, queue id from PFE block along with data.
+ 		 * so we have to provide additional memory for each packet to
+ 		 * HIF rx rings so that PFE block can write its headers.
+-		 * so, we are giving the data pointor to HIF rings whose
++		 * so, we are giving the data pointer to HIF rings whose
+ 		 * calculation is as below:
+-		 * mbuf->data_pointor - Required_header_size
++		 * mbuf->data_pointer - Required_header_size
+ 		 *
+ 		 * We are utilizing the HEADROOM area to receive the PFE
+ 		 * block headers. On packet reception, HIF driver will use
+diff --git a/dpdk/drivers/net/pfe/pfe_hif.h b/dpdk/drivers/net/pfe/pfe_hif.h
+index 6aaf904bb1..e8d5ba10e1 100644
+--- a/dpdk/drivers/net/pfe/pfe_hif.h
++++ b/dpdk/drivers/net/pfe/pfe_hif.h
+@@ -8,7 +8,7 @@
+ #define HIF_CLIENT_QUEUES_MAX	16
+ #define HIF_RX_PKT_MIN_SIZE RTE_CACHE_LINE_SIZE
+ /*
+- * HIF_TX_DESC_NT value should be always greter than 4,
++ * HIF_TX_DESC_NT value should be always greater than 4,
+  * Otherwise HIF_TX_POLL_MARK will become zero.
+  */
+ #define HIF_RX_DESC_NT		64
+diff --git a/dpdk/drivers/net/pfe/pfe_hif_lib.c b/dpdk/drivers/net/pfe/pfe_hif_lib.c
+index 799050dce3..6fe6d33d23 100644
+--- a/dpdk/drivers/net/pfe/pfe_hif_lib.c
++++ b/dpdk/drivers/net/pfe/pfe_hif_lib.c
+@@ -38,7 +38,7 @@ pfe_hif_shm_clean(struct hif_shm *hif_shm)
+  * This function should be called before initializing HIF driver.
+  *
+  * @param[in] hif_shm		Shared memory address location in DDR
+- * @rerurn			0 - on succes, <0 on fail to initialize
++ * @return			0 - on succes, <0 on fail to initialize
+  */
+ int
+ pfe_hif_shm_init(struct hif_shm *hif_shm, struct rte_mempool *mb_pool)
+@@ -109,9 +109,9 @@ hif_lib_client_release_rx_buffers(struct hif_client_s *client)
+ 		for (ii = 0; ii < client->rx_q[qno].size; ii++) {
+ 			buf = (void *)desc->data;
+ 			if (buf) {
+-			/* Data pointor to mbuf pointor calculation:
++			/* Data pointer to mbuf pointer calculation:
+ 			 * "Data - User private data - headroom - mbufsize"
+-			 * Actual data pointor given to HIF BDs was
++			 * Actual data pointer given to HIF BDs was
+ 			 * "mbuf->data_offset - PFE_PKT_HEADER_SZ"
+ 			 */
+ 				buf = buf + PFE_PKT_HEADER_SZ
+@@ -477,7 +477,7 @@ hif_hdr_write(struct hif_hdr *pkt_hdr, unsigned int
+ 	      client_id, unsigned int qno,
+ 	      u32 client_ctrl)
+ {
+-	/* Optimize the write since the destinaton may be non-cacheable */
++	/* Optimize the write since the destination may be non-cacheable */
+ 	if (!((unsigned long)pkt_hdr & 0x3)) {
+ 		((u32 *)pkt_hdr)[0] = (client_ctrl << 16) | (qno << 8) |
+ 					client_id;
+diff --git a/dpdk/drivers/net/qede/base/bcm_osal.h b/dpdk/drivers/net/qede/base/bcm_osal.h
+index c5b5399282..9ea579bfc8 100644
+--- a/dpdk/drivers/net/qede/base/bcm_osal.h
++++ b/dpdk/drivers/net/qede/base/bcm_osal.h
+@@ -14,7 +14,6 @@
+ #include <rte_spinlock.h>
+ #include <rte_malloc.h>
+ #include <rte_atomic.h>
+-#include <rte_memcpy.h>
+ #include <rte_log.h>
+ #include <rte_cycles.h>
+ #include <rte_debug.h>
+@@ -99,7 +98,7 @@ typedef intptr_t osal_int_ptr_t;
+ 	} while (0)
+ #define OSAL_VFREE(dev, memory) OSAL_FREE(dev, memory)
+ #define OSAL_MEM_ZERO(mem, size) bzero(mem, size)
+-#define OSAL_MEMCPY(dst, src, size) rte_memcpy(dst, src, size)
++#define OSAL_MEMCPY(dst, src, size) memcpy(dst, src, size)
+ #define OSAL_MEMCMP(s1, s2, size) memcmp(s1, s2, size)
+ #define OSAL_MEMSET(dst, val, length) \
+ 	memset(dst, val, length)
+diff --git a/dpdk/drivers/net/qede/qede_debug.c b/dpdk/drivers/net/qede/qede_debug.c
+index 2297d245c4..18f2d988fb 100644
+--- a/dpdk/drivers/net/qede/qede_debug.c
++++ b/dpdk/drivers/net/qede/qede_debug.c
+@@ -1809,7 +1809,8 @@ static u32 qed_grc_dump_addr_range(struct ecore_hwfn *p_hwfn,
+ 				   u8 split_id)
+ {
+ 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+-	u8 port_id = 0, pf_id = 0, vf_id = 0, fid = 0;
++	u8 port_id = 0, pf_id = 0;
++	u16 vf_id = 0, fid = 0;
+ 	bool read_using_dmae = false;
+ 	u32 thresh;
+ 
+@@ -3522,7 +3523,7 @@ static enum dbg_status qed_grc_dump(struct ecore_hwfn *p_hwfn,
+ 
+ 	/* Dump MCP HW Dump */
+ 	if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_MCP_HW_DUMP) &&
+-	    !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP) && 1)
++	    !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP))
+ 		offset += qed_grc_dump_mcp_hw_dump(p_hwfn,
+ 						   p_ptt,
+ 						   dump_buf + offset, dump);
+@@ -5983,7 +5984,7 @@ static char *qed_get_buf_ptr(void *buf, u32 offset)
+ /* Reads a param from the specified buffer. Returns the number of dwords read.
+  * If the returned str_param is NULL, the param is numeric and its value is
+  * returned in num_param.
+- * Otheriwise, the param is a string and its pointer is returned in str_param.
++ * Otherwise, the param is a string and its pointer is returned in str_param.
+  */
+ static u32 qed_read_param(u32 *dump_buf,
+ 			  const char **param_name,
+@@ -7558,7 +7559,7 @@ static enum dbg_status format_feature(struct ecore_hwfn *p_hwfn,
+ 		text_buf[i] = '\n';
+ 
+ 
+-	/* Free the old dump_buf and point the dump_buf to the newly allocagted
++	/* Free the old dump_buf and point the dump_buf to the newly allocated
+ 	 * and formatted text buffer.
+ 	 */
+ 	OSAL_VFREE(p_hwfn, feature->dump_buf);
+diff --git a/dpdk/drivers/net/qede/qede_ethdev.c b/dpdk/drivers/net/qede/qede_ethdev.c
+index 3e9aaeecd3..2a3123f0c8 100644
+--- a/dpdk/drivers/net/qede/qede_ethdev.c
++++ b/dpdk/drivers/net/qede/qede_ethdev.c
+@@ -358,7 +358,7 @@ qede_assign_rxtx_handlers(struct rte_eth_dev *dev, bool is_dummy)
+ static void
+ qede_alloc_etherdev(struct qede_dev *qdev, struct qed_dev_eth_info *info)
+ {
+-	rte_memcpy(&qdev->dev_info, info, sizeof(*info));
++	qdev->dev_info = *info;
+ 	qdev->ops = qed_ops;
+ }
+ 
+@@ -2338,7 +2338,7 @@ static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
+ 		if (fp->rxq != NULL) {
+ 			bufsz = (uint16_t)rte_pktmbuf_data_room_size(
+ 				fp->rxq->mb_pool) - RTE_PKTMBUF_HEADROOM;
+-			/* cache align the mbuf size to simplfy rx_buf_size
++			/* cache align the mbuf size to simplify rx_buf_size
+ 			 * calculation
+ 			 */
+ 			bufsz = QEDE_FLOOR_TO_CACHE_LINE_SIZE(bufsz);
+diff --git a/dpdk/drivers/net/qede/qede_filter.c b/dpdk/drivers/net/qede/qede_filter.c
+index 440440423a..ca3165d972 100644
+--- a/dpdk/drivers/net/qede/qede_filter.c
++++ b/dpdk/drivers/net/qede/qede_filter.c
+@@ -388,10 +388,8 @@ qede_arfs_construct_pkt(struct rte_eth_dev *eth_dev,
+ 		ip6->vtc_flow =
+ 			rte_cpu_to_be_32(QEDE_FDIR_IPV6_DEFAULT_VTC_FLOW);
+ 
+-		rte_memcpy(&ip6->src_addr, arfs->tuple.src_ipv6,
+-			   IPV6_ADDR_LEN);
+-		rte_memcpy(&ip6->dst_addr, arfs->tuple.dst_ipv6,
+-			   IPV6_ADDR_LEN);
++		memcpy(&ip6->src_addr, arfs->tuple.src_ipv6, IPV6_ADDR_LEN);
++		memcpy(&ip6->dst_addr, arfs->tuple.dst_ipv6, IPV6_ADDR_LEN);
+ 		len += sizeof(struct rte_ipv6_hdr);
+ 		params->ipv6 = true;
+ 
+@@ -821,12 +819,10 @@ qede_flow_parse_pattern(__rte_unused struct rte_eth_dev *dev,
+ 				const struct rte_flow_item_ipv6 *spec;
+ 
+ 				spec = pattern->spec;
+-				rte_memcpy(flow->entry.tuple.src_ipv6,
+-					   spec->hdr.src_addr,
+-					   IPV6_ADDR_LEN);
+-				rte_memcpy(flow->entry.tuple.dst_ipv6,
+-					   spec->hdr.dst_addr,
+-					   IPV6_ADDR_LEN);
++				memcpy(flow->entry.tuple.src_ipv6,
++				       spec->hdr.src_addr, IPV6_ADDR_LEN);
++				memcpy(flow->entry.tuple.dst_ipv6,
++				       spec->hdr.dst_addr, IPV6_ADDR_LEN);
+ 				flow->entry.tuple.eth_proto =
+ 					RTE_ETHER_TYPE_IPV6;
+ 			}
+diff --git a/dpdk/drivers/net/qede/qede_main.c b/dpdk/drivers/net/qede/qede_main.c
+index 2d1f70693a..c5afdb00d5 100644
+--- a/dpdk/drivers/net/qede/qede_main.c
++++ b/dpdk/drivers/net/qede/qede_main.c
+@@ -373,7 +373,7 @@ qed_fill_dev_info(struct ecore_dev *edev, struct qed_dev_info *dev_info)
+ 	dev_info->mtu = ECORE_LEADING_HWFN(edev)->hw_info.mtu;
+ 	dev_info->dev_type = edev->type;
+ 
+-	rte_memcpy(&dev_info->hw_mac, &edev->hwfns[0].hw_info.hw_mac_addr,
++	memcpy(&dev_info->hw_mac, &edev->hwfns[0].hw_info.hw_mac_addr,
+ 	       RTE_ETHER_ADDR_LEN);
+ 
+ 	dev_info->fw_major = FW_MAJOR_VERSION;
+@@ -441,7 +441,7 @@ qed_fill_eth_dev_info(struct ecore_dev *edev, struct qed_dev_eth_info *info)
+ 		info->num_vlan_filters = RESC_NUM(&edev->hwfns[0], ECORE_VLAN) -
+ 					 max_vf_vlan_filters;
+ 
+-		rte_memcpy(&info->port_mac, &edev->hwfns[0].hw_info.hw_mac_addr,
++		memcpy(&info->port_mac, &edev->hwfns[0].hw_info.hw_mac_addr,
+ 			   RTE_ETHER_ADDR_LEN);
+ 	} else {
+ 		ecore_vf_get_num_rxqs(ECORE_LEADING_HWFN(edev),
+@@ -472,7 +472,7 @@ static void qed_set_name(struct ecore_dev *edev, char name[NAME_SIZE])
+ {
+ 	int i;
+ 
+-	rte_memcpy(edev->name, name, NAME_SIZE);
++	memcpy(edev->name, name, NAME_SIZE);
+ 	for_each_hwfn(edev, i) {
+ 		snprintf(edev->hwfns[i].name, NAME_SIZE, "%s-%d", name, i);
+ 	}
+@@ -514,10 +514,9 @@ static void qed_fill_link(struct ecore_hwfn *hwfn,
+ 
+ 	/* Prepare source inputs */
+ 	if (IS_PF(hwfn->p_dev)) {
+-		rte_memcpy(&params, ecore_mcp_get_link_params(hwfn),
+-		       sizeof(params));
+-		rte_memcpy(&link, ecore_mcp_get_link_state(hwfn), sizeof(link));
+-		rte_memcpy(&link_caps, ecore_mcp_get_link_capabilities(hwfn),
++		memcpy(&params, ecore_mcp_get_link_params(hwfn), sizeof(params));
++		memcpy(&link, ecore_mcp_get_link_state(hwfn), sizeof(link));
++		memcpy(&link_caps, ecore_mcp_get_link_capabilities(hwfn),
+ 		       sizeof(link_caps));
+ 	} else {
+ 		ecore_vf_read_bulletin(hwfn, &change);
+diff --git a/dpdk/drivers/net/qede/qede_rxtx.c b/dpdk/drivers/net/qede/qede_rxtx.c
+index c0eeea896e..936d5c2dc1 100644
+--- a/dpdk/drivers/net/qede/qede_rxtx.c
++++ b/dpdk/drivers/net/qede/qede_rxtx.c
+@@ -38,48 +38,40 @@ static inline int qede_alloc_rx_buffer(struct qede_rx_queue *rxq)
+ 
+ static inline int qede_alloc_rx_bulk_mbufs(struct qede_rx_queue *rxq, int count)
+ {
++	void *obj_p[QEDE_MAX_BULK_ALLOC_COUNT] __rte_cache_aligned;
+ 	struct rte_mbuf *mbuf = NULL;
+ 	struct eth_rx_bd *rx_bd;
+ 	dma_addr_t mapping;
+ 	int i, ret = 0;
+ 	uint16_t idx;
+-	uint16_t mask = NUM_RX_BDS(rxq);
+-
+-	if (count > QEDE_MAX_BULK_ALLOC_COUNT)
+-		count = QEDE_MAX_BULK_ALLOC_COUNT;
+ 
+ 	idx = rxq->sw_rx_prod & NUM_RX_BDS(rxq);
+ 
+-	if (count > mask - idx + 1)
+-		count = mask - idx + 1;
+-
+-	ret = rte_mempool_get_bulk(rxq->mb_pool, (void **)&rxq->sw_rx_ring[idx],
+-				   count);
+-
++	ret = rte_mempool_get_bulk(rxq->mb_pool, obj_p, count);
+ 	if (unlikely(ret)) {
+ 		PMD_RX_LOG(ERR, rxq,
+ 			   "Failed to allocate %d rx buffers "
+ 			    "sw_rx_prod %u sw_rx_cons %u mp entries %u free %u",
+-			    count,
+-			    rxq->sw_rx_prod & NUM_RX_BDS(rxq),
+-			    rxq->sw_rx_cons & NUM_RX_BDS(rxq),
++			    count, idx, rxq->sw_rx_cons & NUM_RX_BDS(rxq),
+ 			    rte_mempool_avail_count(rxq->mb_pool),
+ 			    rte_mempool_in_use_count(rxq->mb_pool));
+ 		return -ENOMEM;
+ 	}
+ 
+ 	for (i = 0; i < count; i++) {
+-		rte_prefetch0(rxq->sw_rx_ring[(idx + 1) & NUM_RX_BDS(rxq)]);
+-		mbuf = rxq->sw_rx_ring[idx & NUM_RX_BDS(rxq)];
++		mbuf = obj_p[i];
++		if (likely(i < count - 1))
++			rte_prefetch0(obj_p[i + 1]);
+ 
++		idx = rxq->sw_rx_prod & NUM_RX_BDS(rxq);
++		rxq->sw_rx_ring[idx] = mbuf;
+ 		mapping = rte_mbuf_data_iova_default(mbuf);
+ 		rx_bd = (struct eth_rx_bd *)
+ 			ecore_chain_produce(&rxq->rx_bd_ring);
+ 		rx_bd->addr.hi = rte_cpu_to_le_32(U64_HI(mapping));
+ 		rx_bd->addr.lo = rte_cpu_to_le_32(U64_LO(mapping));
+-		idx++;
++		rxq->sw_rx_prod++;
+ 	}
+-	rxq->sw_rx_prod = idx;
+ 
+ 	return 0;
+ }
+@@ -90,7 +82,7 @@ static inline int qede_alloc_rx_bulk_mbufs(struct qede_rx_queue *rxq, int count)
+  *    (MTU + Maximum L2 Header Size + 2) / ETH_RX_MAX_BUFF_PER_PKT
+  * 3) In regular mode - minimum rx_buf_size should be
+  *    (MTU + Maximum L2 Header Size + 2)
+- *    In above cases +2 corrosponds to 2 bytes padding in front of L2
++ *    In above cases +2 corresponds to 2 bytes padding in front of L2
+  *    header.
+  * 4) rx_buf_size should be cacheline-size aligned. So considering
+  *    criteria 1, we need to adjust the size to floor instead of ceil,
+@@ -106,7 +98,7 @@ qede_calc_rx_buf_size(struct rte_eth_dev *dev, uint16_t mbufsz,
+ 
+ 	if (dev->data->scattered_rx) {
+ 		/* per HW limitation, only ETH_RX_MAX_BUFF_PER_PKT number of
+-		 * bufferes can be used for single packet. So need to make sure
++		 * buffers can be used for single packet. So need to make sure
+ 		 * mbuf size is sufficient enough for this.
+ 		 */
+ 		if ((mbufsz * ETH_RX_MAX_BUFF_PER_PKT) <
+@@ -243,11 +235,11 @@ qede_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qid,
+ 		dev->data->rx_queues[qid] = NULL;
+ 	}
+ 
+-	max_rx_pktlen = dev->data->mtu + RTE_ETHER_HDR_LEN;
++	max_rx_pktlen = dev->data->mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
+ 
+ 	/* Fix up RX buffer size */
+ 	bufsz = (uint16_t)rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM;
+-	/* cache align the mbuf size to simplfy rx_buf_size calculation */
++	/* cache align the mbuf size to simplify rx_buf_size calculation */
+ 	bufsz = QEDE_FLOOR_TO_CACHE_LINE_SIZE(bufsz);
+ 	if ((rxmode->offloads & RTE_ETH_RX_OFFLOAD_SCATTER)	||
+ 	    (max_rx_pktlen + QEDE_ETH_OVERHEAD) > bufsz) {
+@@ -887,68 +879,55 @@ qede_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id)
+ }
+ 
+ static inline void
+-qede_process_tx_compl(__rte_unused struct ecore_dev *edev,
+-		      struct qede_tx_queue *txq)
++qede_free_tx_pkt(struct qede_tx_queue *txq)
+ {
+-	uint16_t hw_bd_cons;
+-	uint16_t sw_tx_cons;
+-	uint16_t remaining;
+-	uint16_t mask;
+ 	struct rte_mbuf *mbuf;
+ 	uint16_t nb_segs;
+ 	uint16_t idx;
+-	uint16_t first_idx;
+-
+-	rte_compiler_barrier();
+-	rte_prefetch0(txq->hw_cons_ptr);
+-	sw_tx_cons = ecore_chain_get_cons_idx(&txq->tx_pbl);
+-	hw_bd_cons = rte_le_to_cpu_16(*txq->hw_cons_ptr);
+-#ifdef RTE_LIBRTE_QEDE_DEBUG_TX
+-	PMD_TX_LOG(DEBUG, txq, "Tx Completions = %u\n",
+-		   abs(hw_bd_cons - sw_tx_cons));
+-#endif
+ 
+-	mask = NUM_TX_BDS(txq);
+-	idx = txq->sw_tx_cons & mask;
+-
+-	remaining = hw_bd_cons - sw_tx_cons;
+-	txq->nb_tx_avail += remaining;
+-	first_idx = idx;
+-
+-	while (remaining) {
+-		mbuf = txq->sw_tx_ring[idx];
+-		RTE_ASSERT(mbuf);
++	idx = TX_CONS(txq);
++	mbuf = txq->sw_tx_ring[idx];
++	if (mbuf) {
+ 		nb_segs = mbuf->nb_segs;
+-		remaining -= nb_segs;
+-
+-		/* Prefetch the next mbuf. Note that at least the last 4 mbufs
+-		 * that are prefetched will not be used in the current call.
+-		 */
+-		rte_mbuf_prefetch_part1(txq->sw_tx_ring[(idx + 4) & mask]);
+-		rte_mbuf_prefetch_part2(txq->sw_tx_ring[(idx + 4) & mask]);
+-
+ 		PMD_TX_LOG(DEBUG, txq, "nb_segs to free %u\n", nb_segs);
+-
+ 		while (nb_segs) {
++			/* It's like consuming rxbuf in recv() */
+ 			ecore_chain_consume(&txq->tx_pbl);
++			txq->nb_tx_avail++;
+ 			nb_segs--;
+ 		}
+-
+-		idx = (idx + 1) & mask;
++		rte_pktmbuf_free(mbuf);
++		txq->sw_tx_ring[idx] = NULL;
++		txq->sw_tx_cons++;
+ 		PMD_TX_LOG(DEBUG, txq, "Freed tx packet\n");
+-	}
+-	txq->sw_tx_cons = idx;
+-
+-	if (first_idx > idx) {
+-		rte_pktmbuf_free_bulk(&txq->sw_tx_ring[first_idx],
+-							  mask - first_idx + 1);
+-		rte_pktmbuf_free_bulk(&txq->sw_tx_ring[0], idx);
+ 	} else {
+-		rte_pktmbuf_free_bulk(&txq->sw_tx_ring[first_idx],
+-							  idx - first_idx);
++		ecore_chain_consume(&txq->tx_pbl);
++		txq->nb_tx_avail++;
+ 	}
+ }
+ 
++static inline void
++qede_process_tx_compl(__rte_unused struct ecore_dev *edev,
++		      struct qede_tx_queue *txq)
++{
++	uint16_t hw_bd_cons;
++#ifdef RTE_LIBRTE_QEDE_DEBUG_TX
++	uint16_t sw_tx_cons;
++#endif
++
++	hw_bd_cons = rte_le_to_cpu_16(*txq->hw_cons_ptr);
++	/* read barrier prevents speculative execution on stale data */
++	rte_rmb();
++
++#ifdef RTE_LIBRTE_QEDE_DEBUG_TX
++	sw_tx_cons = ecore_chain_get_cons_idx(&txq->tx_pbl);
++	PMD_TX_LOG(DEBUG, txq, "Tx Completions = %u\n",
++		   abs(hw_bd_cons - sw_tx_cons));
++#endif
++	while (hw_bd_cons !=  ecore_chain_get_cons_idx(&txq->tx_pbl))
++		qede_free_tx_pkt(txq);
++}
++
+ static int qede_drain_txq(struct qede_dev *qdev,
+ 			  struct qede_tx_queue *txq, bool allow_drain)
+ {
+@@ -1559,25 +1538,26 @@ qede_recv_pkts_regular(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+ 	uint8_t bitfield_val;
+ #endif
+ 	uint8_t offset, flags, bd_num;
+-
++	uint16_t count = 0;
+ 
+ 	/* Allocate buffers that we used in previous loop */
+ 	if (rxq->rx_alloc_count) {
+-		if (unlikely(qede_alloc_rx_bulk_mbufs(rxq,
+-			     rxq->rx_alloc_count))) {
++		count = rxq->rx_alloc_count > QEDE_MAX_BULK_ALLOC_COUNT ?
++			QEDE_MAX_BULK_ALLOC_COUNT : rxq->rx_alloc_count;
++
++		if (unlikely(qede_alloc_rx_bulk_mbufs(rxq, count))) {
+ 			struct rte_eth_dev *dev;
+ 
+ 			PMD_RX_LOG(ERR, rxq,
+-				   "New buffer allocation failed,"
+-				   "dropping incoming packetn");
++				   "New buffers allocation failed,"
++				   "dropping incoming packets\n");
+ 			dev = &rte_eth_devices[rxq->port_id];
+-			dev->data->rx_mbuf_alloc_failed +=
+-							rxq->rx_alloc_count;
+-			rxq->rx_alloc_errors += rxq->rx_alloc_count;
++			dev->data->rx_mbuf_alloc_failed += count;
++			rxq->rx_alloc_errors += count;
+ 			return 0;
+ 		}
+ 		qede_update_rx_prod(qdev, rxq);
+-		rxq->rx_alloc_count = 0;
++		rxq->rx_alloc_count -= count;
+ 	}
+ 
+ 	hw_comp_cons = rte_le_to_cpu_16(*rxq->hw_cons_ptr);
+@@ -1745,8 +1725,8 @@ qede_recv_pkts_regular(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+ 		}
+ 	}
+ 
+-	/* Request number of bufferes to be allocated in next loop */
+-	rxq->rx_alloc_count = rx_alloc_count;
++	/* Request number of buffers to be allocated in next loop */
++	rxq->rx_alloc_count += rx_alloc_count;
+ 
+ 	rxq->rcv_pkts += rx_pkt;
+ 	rxq->rx_segs += rx_pkt;
+@@ -1786,25 +1766,26 @@ qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+ 	struct qede_agg_info *tpa_info = NULL;
+ 	uint32_t rss_hash;
+ 	int rx_alloc_count = 0;
+-
++	uint16_t count = 0;
+ 
+ 	/* Allocate buffers that we used in previous loop */
+ 	if (rxq->rx_alloc_count) {
+-		if (unlikely(qede_alloc_rx_bulk_mbufs(rxq,
+-			     rxq->rx_alloc_count))) {
++		count = rxq->rx_alloc_count > QEDE_MAX_BULK_ALLOC_COUNT ?
++			QEDE_MAX_BULK_ALLOC_COUNT : rxq->rx_alloc_count;
++
++		if (unlikely(qede_alloc_rx_bulk_mbufs(rxq, count))) {
+ 			struct rte_eth_dev *dev;
+ 
+ 			PMD_RX_LOG(ERR, rxq,
+-				   "New buffer allocation failed,"
+-				   "dropping incoming packetn");
++				   "New buffers allocation failed,"
++				   "dropping incoming packets\n");
+ 			dev = &rte_eth_devices[rxq->port_id];
+-			dev->data->rx_mbuf_alloc_failed +=
+-							rxq->rx_alloc_count;
+-			rxq->rx_alloc_errors += rxq->rx_alloc_count;
++			dev->data->rx_mbuf_alloc_failed += count;
++			rxq->rx_alloc_errors += count;
+ 			return 0;
+ 		}
+ 		qede_update_rx_prod(qdev, rxq);
+-		rxq->rx_alloc_count = 0;
++		rxq->rx_alloc_count -= count;
+ 	}
+ 
+ 	hw_comp_cons = rte_le_to_cpu_16(*rxq->hw_cons_ptr);
+@@ -2042,8 +2023,8 @@ qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+ 		}
+ 	}
+ 
+-	/* Request number of bufferes to be allocated in next loop */
+-	rxq->rx_alloc_count = rx_alloc_count;
++	/* Request number of buffers to be allocated in next loop */
++	rxq->rx_alloc_count += rx_alloc_count;
+ 
+ 	rxq->rcv_pkts += rx_pkt;
+ 
+@@ -2506,7 +2487,7 @@ qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+ 				/* Inner L2 header size in two byte words */
+ 				inner_l2_hdr_size = (mbuf->l2_len -
+ 						MPLSINUDP_HDR_SIZE) / 2;
+-				/* Inner L4 header offset from the beggining
++				/* Inner L4 header offset from the beginning
+ 				 * of inner packet in two byte words
+ 				 */
+ 				inner_l4_hdr_offset = (mbuf->l2_len -
+diff --git a/dpdk/drivers/net/qede/qede_rxtx.h b/dpdk/drivers/net/qede/qede_rxtx.h
+index 754efe793f..11ed1d9b9c 100644
+--- a/dpdk/drivers/net/qede/qede_rxtx.h
++++ b/dpdk/drivers/net/qede/qede_rxtx.h
+@@ -225,7 +225,7 @@ struct qede_fastpath {
+ 	struct qede_tx_queue *txq;
+ };
+ 
+-/* This structure holds the inforation of fast path queues
++/* This structure holds the information of fast path queues
+  * belonging to individual engines in CMT mode.
+  */
+ struct qede_fastpath_cmt {
+diff --git a/dpdk/drivers/net/qede/qede_sriov.c b/dpdk/drivers/net/qede/qede_sriov.c
+index 0b99a8d6fe..937d339fb8 100644
+--- a/dpdk/drivers/net/qede/qede_sriov.c
++++ b/dpdk/drivers/net/qede/qede_sriov.c
+@@ -203,10 +203,10 @@ void qed_inform_vf_link_state(struct ecore_hwfn *hwfn)
+ 	if (!hwfn->pf_iov_info)
+ 		return;
+ 
+-	rte_memcpy(&params, ecore_mcp_get_link_params(lead_hwfn),
++	memcpy(&params, ecore_mcp_get_link_params(lead_hwfn),
+ 		   sizeof(params));
+-	rte_memcpy(&link, ecore_mcp_get_link_state(lead_hwfn), sizeof(link));
+-	rte_memcpy(&caps, ecore_mcp_get_link_capabilities(lead_hwfn),
++	memcpy(&link, ecore_mcp_get_link_state(lead_hwfn), sizeof(link));
++	memcpy(&caps, ecore_mcp_get_link_capabilities(lead_hwfn),
+ 		   sizeof(caps));
+ 
+ 	/* Update bulletin of all future possible VFs with link configuration */
+diff --git a/dpdk/drivers/net/sfc/sfc.c b/dpdk/drivers/net/sfc/sfc.c
+index ed714fe02f..2cead4e045 100644
+--- a/dpdk/drivers/net/sfc/sfc.c
++++ b/dpdk/drivers/net/sfc/sfc.c
+@@ -371,7 +371,7 @@ sfc_set_drv_limits(struct sfc_adapter *sa)
+ 
+ 	/*
+ 	 * Limits are strict since take into account initial estimation.
+-	 * Resource allocation stategy is described in
++	 * Resource allocation strategy is described in
+ 	 * sfc_estimate_resource_limits().
+ 	 */
+ 	lim.edl_min_evq_count = lim.edl_max_evq_count =
+diff --git a/dpdk/drivers/net/sfc/sfc_dp.c b/dpdk/drivers/net/sfc/sfc_dp.c
+index d4cd162541..da2d1603cf 100644
+--- a/dpdk/drivers/net/sfc/sfc_dp.c
++++ b/dpdk/drivers/net/sfc/sfc_dp.c
+@@ -68,7 +68,7 @@ sfc_dp_register(struct sfc_dp_list *head, struct sfc_dp *entry)
+ {
+ 	if (sfc_dp_find_by_name(head, entry->type, entry->name) != NULL) {
+ 		SFC_GENERIC_LOG(ERR,
+-			"sfc %s dapapath '%s' already registered",
++			"sfc %s datapath '%s' already registered",
+ 			entry->type == SFC_DP_RX ? "Rx" :
+ 			entry->type == SFC_DP_TX ? "Tx" :
+ 			"unknown",
+diff --git a/dpdk/drivers/net/sfc/sfc_dp_rx.h b/dpdk/drivers/net/sfc/sfc_dp_rx.h
+index 760540ba22..246adbd87c 100644
+--- a/dpdk/drivers/net/sfc/sfc_dp_rx.h
++++ b/dpdk/drivers/net/sfc/sfc_dp_rx.h
+@@ -158,7 +158,7 @@ typedef int (sfc_dp_rx_qcreate_t)(uint16_t port_id, uint16_t queue_id,
+ 				  struct sfc_dp_rxq **dp_rxqp);
+ 
+ /**
+- * Free resources allocated for datapath recevie queue.
++ * Free resources allocated for datapath receive queue.
+  */
+ typedef void (sfc_dp_rx_qdestroy_t)(struct sfc_dp_rxq *dp_rxq);
+ 
+@@ -191,7 +191,7 @@ typedef bool (sfc_dp_rx_qrx_ps_ev_t)(struct sfc_dp_rxq *dp_rxq,
+ /**
+  * Receive queue purge function called after queue flush.
+  *
+- * Should be used to free unused recevie buffers.
++ * Should be used to free unused receive buffers.
+  */
+ typedef void (sfc_dp_rx_qpurge_t)(struct sfc_dp_rxq *dp_rxq);
+ 
+diff --git a/dpdk/drivers/net/sfc/sfc_ef100.h b/dpdk/drivers/net/sfc/sfc_ef100.h
+index 5e2052d142..e81847e75a 100644
+--- a/dpdk/drivers/net/sfc/sfc_ef100.h
++++ b/dpdk/drivers/net/sfc/sfc_ef100.h
+@@ -19,7 +19,7 @@ extern "C" {
+  *
+  * @param evq_prime	Global address of the prime register
+  * @param evq_hw_index	Event queue index
+- * @param evq_read_ptr	Masked event qeueu read pointer
++ * @param evq_read_ptr	Masked event queue read pointer
+  */
+ static inline void
+ sfc_ef100_evq_prime(volatile void *evq_prime, unsigned int evq_hw_index,
+diff --git a/dpdk/drivers/net/sfc/sfc_ef100_rx.c b/dpdk/drivers/net/sfc/sfc_ef100_rx.c
+index 5d16bf281d..45253ed7dc 100644
+--- a/dpdk/drivers/net/sfc/sfc_ef100_rx.c
++++ b/dpdk/drivers/net/sfc/sfc_ef100_rx.c
+@@ -851,7 +851,7 @@ sfc_ef100_rx_qstart(struct sfc_dp_rxq *dp_rxq, unsigned int evq_read_ptr,
+ 	unsup_rx_prefix_fields =
+ 		efx_rx_prefix_layout_check(pinfo, &sfc_ef100_rx_prefix_layout);
+ 
+-	/* LENGTH and CLASS filds must always be present */
++	/* LENGTH and CLASS fields must always be present */
+ 	if ((unsup_rx_prefix_fields &
+ 	     ((1U << EFX_RX_PREFIX_FIELD_LENGTH) |
+ 	      (1U << EFX_RX_PREFIX_FIELD_CLASS))) != 0)
+diff --git a/dpdk/drivers/net/sfc/sfc_ef10_essb_rx.c b/dpdk/drivers/net/sfc/sfc_ef10_essb_rx.c
+index 712c207617..78bd430363 100644
+--- a/dpdk/drivers/net/sfc/sfc_ef10_essb_rx.c
++++ b/dpdk/drivers/net/sfc/sfc_ef10_essb_rx.c
+@@ -630,7 +630,7 @@ sfc_ef10_essb_rx_qcreate(uint16_t port_id, uint16_t queue_id,
+ 			      rxq->block_size, rxq->buf_stride);
+ 	sfc_ef10_essb_rx_info(&rxq->dp.dpq,
+ 			      "max fill level is %u descs (%u bufs), "
+-			      "refill threashold %u descs (%u bufs)",
++			      "refill threshold %u descs (%u bufs)",
+ 			      rxq->max_fill_level,
+ 			      rxq->max_fill_level * rxq->block_size,
+ 			      rxq->refill_threshold,
+diff --git a/dpdk/drivers/net/sfc/sfc_ef10_rx_ev.h b/dpdk/drivers/net/sfc/sfc_ef10_rx_ev.h
+index 821e2227bb..412254e3d7 100644
+--- a/dpdk/drivers/net/sfc/sfc_ef10_rx_ev.h
++++ b/dpdk/drivers/net/sfc/sfc_ef10_rx_ev.h
+@@ -40,7 +40,7 @@ sfc_ef10_rx_ev_to_offloads(const efx_qword_t rx_ev, struct rte_mbuf *m,
+ 		rte_cpu_to_le_64((1ull << ESF_DZ_RX_ECC_ERR_LBN) |
+ 				 (1ull << ESF_DZ_RX_ECRC_ERR_LBN) |
+ 				 (1ull << ESF_DZ_RX_PARSE_INCOMPLETE_LBN)))) {
+-		/* Zero packet type is used as a marker to dicard bad packets */
++		/* Zero packet type is used as a marker to discard bad packets */
+ 		goto done;
+ 	}
+ 
+diff --git a/dpdk/drivers/net/sfc/sfc_ethdev.c b/dpdk/drivers/net/sfc/sfc_ethdev.c
+index d4210b63dd..184f6e7c67 100644
+--- a/dpdk/drivers/net/sfc/sfc_ethdev.c
++++ b/dpdk/drivers/net/sfc/sfc_ethdev.c
+@@ -94,7 +94,6 @@ sfc_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
+ 	struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
+ 	struct sfc_rss *rss = &sas->rss;
+ 	struct sfc_mae *mae = &sa->mae;
+-	uint64_t txq_offloads_def = 0;
+ 
+ 	sfc_log_init(sa, "entry");
+ 
+@@ -146,11 +145,6 @@ sfc_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
+ 	dev_info->tx_offload_capa = sfc_tx_get_dev_offload_caps(sa) |
+ 				    dev_info->tx_queue_offload_capa;
+ 
+-	if (dev_info->tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
+-		txq_offloads_def |= RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
+-
+-	dev_info->default_txconf.offloads |= txq_offloads_def;
+-
+ 	if (rss->context_type != EFX_RX_SCALE_UNAVAILABLE) {
+ 		uint64_t rte_hf = 0;
+ 		unsigned int i;
+diff --git a/dpdk/drivers/net/sfc/sfc_flow.c b/dpdk/drivers/net/sfc/sfc_flow.c
+index fc74c8035e..509fde4a86 100644
+--- a/dpdk/drivers/net/sfc/sfc_flow.c
++++ b/dpdk/drivers/net/sfc/sfc_flow.c
+@@ -1477,6 +1477,9 @@ sfc_flow_parse_rss(struct sfc_adapter *sa,
+ 			rxq_hw_index_max = rxq->hw_index;
+ 	}
+ 
++	if (rxq_hw_index_max - rxq_hw_index_min + 1 > EFX_MAXRSS)
++		return -EINVAL;
++
+ 	switch (action_rss->func) {
+ 	case RTE_ETH_HASH_FUNCTION_DEFAULT:
+ 	case RTE_ETH_HASH_FUNCTION_TOEPLITZ:
+@@ -1612,9 +1615,8 @@ sfc_flow_filter_insert(struct sfc_adapter *sa,
+ 		uint8_t *rss_key;
+ 
+ 		if (spec_filter->rss) {
+-			rss_spread = MIN(flow_rss->rxq_hw_index_max -
+-					flow_rss->rxq_hw_index_min + 1,
+-					EFX_MAXRSS);
++			rss_spread = flow_rss->rxq_hw_index_max -
++				     flow_rss->rxq_hw_index_min + 1;
+ 			rss_hash_types = flow_rss->rss_hash_types;
+ 			rss_key = flow_rss->rss_key;
+ 		} else {
+diff --git a/dpdk/drivers/net/sfc/sfc_flow_tunnel.c b/dpdk/drivers/net/sfc/sfc_flow_tunnel.c
+index 463b01c596..af5941c1ba 100644
+--- a/dpdk/drivers/net/sfc/sfc_flow_tunnel.c
++++ b/dpdk/drivers/net/sfc/sfc_flow_tunnel.c
+@@ -21,7 +21,7 @@ sfc_flow_tunnel_is_supported(struct sfc_adapter *sa)
+ 	SFC_ASSERT(sfc_adapter_is_locked(sa));
+ 
+ 	return ((sa->priv.dp_rx->features & SFC_DP_RX_FEAT_FLOW_MARK) != 0 &&
+-		sa->mae.status == SFC_MAE_STATUS_SUPPORTED);
++		sa->mae.status == SFC_MAE_STATUS_ADMIN);
+ }
+ 
+ bool
+@@ -433,7 +433,7 @@ sfc_flow_tunnel_get_restore_info(struct rte_eth_dev *dev,
+ 	ft = &sa->flow_tunnels[ft_id];
+ 
+ 	if (ft->refcnt == 0) {
+-		sfc_err(sa, "tunnel offload: get_restore_info: tunnel=%u does not exist",
++		sfc_dbg(sa, "tunnel offload: get_restore_info: tunnel=%u does not exist",
+ 			ft_id);
+ 		rc = ENOENT;
+ 		goto fail;
+diff --git a/dpdk/drivers/net/sfc/sfc_intr.c b/dpdk/drivers/net/sfc/sfc_intr.c
+index ab67aa9237..ddddefad7b 100644
+--- a/dpdk/drivers/net/sfc/sfc_intr.c
++++ b/dpdk/drivers/net/sfc/sfc_intr.c
+@@ -8,7 +8,7 @@
+  */
+ 
+ /*
+- * At the momemt of writing DPDK v16.07 has notion of two types of
++ * At the moment of writing DPDK v16.07 has notion of two types of
+  * interrupts: LSC (link status change) and RXQ (receive indication).
+  * It allows to register interrupt callback for entire device which is
+  * not intended to be used for receive indication (i.e. link status
+diff --git a/dpdk/drivers/net/sfc/sfc_repr_proxy.c b/dpdk/drivers/net/sfc/sfc_repr_proxy.c
+index 535b07ea52..8660d419a3 100644
+--- a/dpdk/drivers/net/sfc/sfc_repr_proxy.c
++++ b/dpdk/drivers/net/sfc/sfc_repr_proxy.c
+@@ -1413,6 +1413,7 @@ sfc_repr_proxy_add_rxq(uint16_t pf_port_id, uint16_t repr_id,
+ 	port = sfc_repr_proxy_find_port(rp, repr_id);
+ 	if (port == NULL) {
+ 		sfc_err(sa, "%s() failed: no such port", __func__);
++		sfc_put_adapter(sa);
+ 		return ENOENT;
+ 	}
+ 
+@@ -1451,6 +1452,7 @@ sfc_repr_proxy_del_rxq(uint16_t pf_port_id, uint16_t repr_id,
+ 	port = sfc_repr_proxy_find_port(rp, repr_id);
+ 	if (port == NULL) {
+ 		sfc_err(sa, "%s() failed: no such port", __func__);
++		sfc_put_adapter(sa);
+ 		return;
+ 	}
+ 
+@@ -1484,6 +1486,7 @@ sfc_repr_proxy_add_txq(uint16_t pf_port_id, uint16_t repr_id,
+ 	port = sfc_repr_proxy_find_port(rp, repr_id);
+ 	if (port == NULL) {
+ 		sfc_err(sa, "%s() failed: no such port", __func__);
++		sfc_put_adapter(sa);
+ 		return ENOENT;
+ 	}
+ 
+@@ -1516,6 +1519,7 @@ sfc_repr_proxy_del_txq(uint16_t pf_port_id, uint16_t repr_id,
+ 	port = sfc_repr_proxy_find_port(rp, repr_id);
+ 	if (port == NULL) {
+ 		sfc_err(sa, "%s() failed: no such port", __func__);
++		sfc_put_adapter(sa);
+ 		return;
+ 	}
+ 
+@@ -1614,6 +1618,7 @@ sfc_repr_proxy_stop_repr(uint16_t pf_port_id, uint16_t repr_id)
+ 	port = sfc_repr_proxy_find_port(rp, repr_id);
+ 	if (port == NULL) {
+ 		sfc_err(sa, "%s() failed: no such port", __func__);
++		sfc_put_adapter(sa);
+ 		return ENOENT;
+ 	}
+ 
+diff --git a/dpdk/drivers/net/sfc/sfc_rx.c b/dpdk/drivers/net/sfc/sfc_rx.c
+index 7104284106..cd58d60a36 100644
+--- a/dpdk/drivers/net/sfc/sfc_rx.c
++++ b/dpdk/drivers/net/sfc/sfc_rx.c
+@@ -1057,7 +1057,7 @@ sfc_rx_mb_pool_buf_size(struct sfc_adapter *sa, struct rte_mempool *mb_pool)
+ 	/* Make sure that end padding does not write beyond the buffer */
+ 	if (buf_aligned < nic_align_end) {
+ 		/*
+-		 * Estimate space which can be lost. If guarnteed buffer
++		 * Estimate space which can be lost. If guaranteed buffer
+ 		 * size is odd, lost space is (nic_align_end - 1). More
+ 		 * accurate formula is below.
+ 		 */
+@@ -1702,7 +1702,7 @@ sfc_rx_fini_queues(struct sfc_adapter *sa, unsigned int nb_rx_queues)
+ 
+ 	/*
+ 	 * Finalize only ethdev queues since other ones are finalized only
+-	 * on device close and they may require additional deinitializaton.
++	 * on device close and they may require additional deinitialization.
+ 	 */
+ 	ethdev_qid = sas->ethdev_rxq_count;
+ 	while (--ethdev_qid >= (int)nb_rx_queues) {
+@@ -1775,7 +1775,7 @@ sfc_rx_configure(struct sfc_adapter *sa)
+ 
+ 		reconfigure = true;
+ 
+-		/* Do not ununitialize reserved queues */
++		/* Do not uninitialize reserved queues */
+ 		if (nb_rx_queues < sas->ethdev_rxq_count)
+ 			sfc_rx_fini_queues(sa, nb_rx_queues);
+ 
+diff --git a/dpdk/drivers/net/sfc/sfc_sw_stats.c b/dpdk/drivers/net/sfc/sfc_sw_stats.c
+index 70259660c0..81f5aa3cc4 100644
+--- a/dpdk/drivers/net/sfc/sfc_sw_stats.c
++++ b/dpdk/drivers/net/sfc/sfc_sw_stats.c
+@@ -777,7 +777,7 @@ sfc_sw_xstats_configure(struct sfc_adapter *sa)
+ 
+ 	memset(*reset_vals, 0, nb_supported * sizeof(**reset_vals));
+ 
+-	*cache = rte_realloc(*cache, cache_count * sizeof(*cache), 0);
++	*cache = rte_realloc(*cache, cache_count * sizeof(**cache), 0);
+ 	if (*cache == NULL) {
+ 		rc = ENOMEM;
+ 		goto fail_cache;
+diff --git a/dpdk/drivers/net/sfc/sfc_tx.c b/dpdk/drivers/net/sfc/sfc_tx.c
+index 0dccf21f7c..f376f24f7b 100644
+--- a/dpdk/drivers/net/sfc/sfc_tx.c
++++ b/dpdk/drivers/net/sfc/sfc_tx.c
+@@ -308,6 +308,7 @@ sfc_tx_qinit_info(struct sfc_adapter *sa, sfc_sw_index_t sw_index)
+ static int
+ sfc_tx_check_mode(struct sfc_adapter *sa, const struct rte_eth_txmode *txmode)
+ {
++	uint64_t dev_tx_offload_cap = sfc_tx_get_dev_offload_caps(sa);
+ 	int rc = 0;
+ 
+ 	switch (txmode->mq_mode) {
+@@ -319,6 +320,13 @@ sfc_tx_check_mode(struct sfc_adapter *sa, const struct rte_eth_txmode *txmode)
+ 		rc = EINVAL;
+ 	}
+ 
++	if ((dev_tx_offload_cap & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) != 0 &&
++	    (txmode->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) == 0) {
++		sfc_err(sa, "There is no FAST_FREE flag in the attempted Tx mode configuration");
++		sfc_err(sa, "FAST_FREE is always active as per the current Tx datapath variant");
++		rc = EINVAL;
++	}
++
+ 	/*
+ 	 * These features are claimed to be i40e-specific,
+ 	 * but it does make sense to double-check their absence
+@@ -356,7 +364,7 @@ sfc_tx_fini_queues(struct sfc_adapter *sa, unsigned int nb_tx_queues)
+ 
+ 	/*
+ 	 * Finalize only ethdev queues since other ones are finalized only
+-	 * on device close and they may require additional deinitializaton.
++	 * on device close and they may require additional deinitialization.
+ 	 */
+ 	ethdev_qid = sas->ethdev_txq_count;
+ 	while (--ethdev_qid >= (int)nb_tx_queues) {
+diff --git a/dpdk/drivers/net/softnic/rte_eth_softnic_flow.c b/dpdk/drivers/net/softnic/rte_eth_softnic_flow.c
+index ca70eab678..ad96288e7e 100644
+--- a/dpdk/drivers/net/softnic/rte_eth_softnic_flow.c
++++ b/dpdk/drivers/net/softnic/rte_eth_softnic_flow.c
+@@ -930,7 +930,7 @@ flow_rule_match_acl_get(struct pmd_internals *softnic __rte_unused,
+  * Both *tmask* and *fmask* are byte arrays of size *tsize* and *fsize*
+  * respectively.
+  * They are located within a larger buffer at offsets *toffset* and *foffset*
+- * respectivelly. Both *tmask* and *fmask* represent bitmasks for the larger
++ * respectively. Both *tmask* and *fmask* represent bitmasks for the larger
+  * buffer.
+  * Question: are the two masks equivalent?
+  *
+diff --git a/dpdk/drivers/net/tap/rte_eth_tap.c b/dpdk/drivers/net/tap/rte_eth_tap.c
+index f1b48cae82..e020a2417b 100644
+--- a/dpdk/drivers/net/tap/rte_eth_tap.c
++++ b/dpdk/drivers/net/tap/rte_eth_tap.c
+@@ -67,6 +67,7 @@
+ 
+ /* IPC key for queue fds sync */
+ #define TAP_MP_KEY "tap_mp_sync_queues"
++#define TAP_MP_REQ_START_RXTX "tap_mp_req_start_rxtx"
+ 
+ #define TAP_IOV_DEFAULT_MAX 1024
+ 
+@@ -525,7 +526,7 @@ tap_tx_l4_cksum(uint16_t *l4_cksum, uint16_t l4_phdr_cksum,
+ 	}
+ }
+ 
+-/* Accumaulate L4 raw checksums */
++/* Accumulate L4 raw checksums */
+ static void
+ tap_tx_l4_add_rcksum(char *l4_data, unsigned int l4_len, uint16_t *l4_cksum,
+ 			uint32_t *l4_raw_cksum)
+@@ -880,11 +881,49 @@ tap_link_set_up(struct rte_eth_dev *dev)
+ 	return tap_ioctl(pmd, SIOCSIFFLAGS, &ifr, 1, LOCAL_AND_REMOTE);
+ }
+ 
++static int
++tap_mp_req_on_rxtx(struct rte_eth_dev *dev)
++{
++	struct rte_mp_msg msg;
++	struct ipc_queues *request_param = (struct ipc_queues *)msg.param;
++	int err;
++	int fd_iterator = 0;
++	struct pmd_process_private *process_private = dev->process_private;
++	int i;
++
++	memset(&msg, 0, sizeof(msg));
++	strlcpy(msg.name, TAP_MP_REQ_START_RXTX, sizeof(msg.name));
++	strlcpy(request_param->port_name, dev->data->name, sizeof(request_param->port_name));
++	msg.len_param = sizeof(*request_param);
++	for (i = 0; i < dev->data->nb_tx_queues; i++) {
++		msg.fds[fd_iterator++] = process_private->txq_fds[i];
++		msg.num_fds++;
++		request_param->txq_count++;
++	}
++	for (i = 0; i < dev->data->nb_rx_queues; i++) {
++		msg.fds[fd_iterator++] = process_private->rxq_fds[i];
++		msg.num_fds++;
++		request_param->rxq_count++;
++	}
++
++	err = rte_mp_sendmsg(&msg);
++	if (err < 0) {
++		TAP_LOG(ERR, "Failed to send start req to secondary %d",
++			rte_errno);
++		return -1;
++	}
++
++	return 0;
++}
++
+ static int
+ tap_dev_start(struct rte_eth_dev *dev)
+ {
+ 	int err, i;
+ 
++	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
++		tap_mp_req_on_rxtx(dev);
++
+ 	err = tap_intr_handle_set(dev, 1);
+ 	if (err)
+ 		return err;
+@@ -901,6 +940,34 @@ tap_dev_start(struct rte_eth_dev *dev)
+ 	return err;
+ }
+ 
++static int
++tap_mp_req_start_rxtx(const struct rte_mp_msg *request, __rte_unused const void *peer)
++{
++	struct rte_eth_dev *dev;
++	const struct ipc_queues *request_param =
++		(const struct ipc_queues *)request->param;
++	int fd_iterator;
++	int queue;
++	struct pmd_process_private *process_private;
++
++	dev = rte_eth_dev_get_by_name(request_param->port_name);
++	if (!dev) {
++		TAP_LOG(ERR, "Failed to get dev for %s",
++			request_param->port_name);
++		return -1;
++	}
++	process_private = dev->process_private;
++	fd_iterator = 0;
++	TAP_LOG(DEBUG, "tap_attach rx_q:%d tx_q:%d\n", request_param->rxq_count,
++		request_param->txq_count);
++	for (queue = 0; queue < request_param->txq_count; queue++)
++		process_private->txq_fds[queue] = request->fds[fd_iterator++];
++	for (queue = 0; queue < request_param->rxq_count; queue++)
++		process_private->rxq_fds[queue] = request->fds[fd_iterator++];
++
++	return 0;
++}
++
+ /* This function gets called when the current port gets stopped.
+  */
+ static int
+@@ -1084,6 +1151,9 @@ tap_dev_close(struct rte_eth_dev *dev)
+ 
+ 	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+ 		rte_free(dev->process_private);
++		if (tap_devices_count == 1)
++			rte_mp_action_unregister(TAP_MP_REQ_START_RXTX);
++		tap_devices_count--;
+ 		return 0;
+ 	}
+ 
+@@ -1135,6 +1205,8 @@ tap_dev_close(struct rte_eth_dev *dev)
+ 	TAP_LOG(DEBUG, "Closing %s Ethernet device on numa %u",
+ 		tuntap_types[internals->type], rte_socket_id());
+ 
++	rte_intr_instance_free(internals->intr_handle);
++
+ 	if (internals->ioctl_sock != -1) {
+ 		close(internals->ioctl_sock);
+ 		internals->ioctl_sock = -1;
+@@ -2099,8 +2171,8 @@ eth_dev_tap_create(struct rte_vdev_device *vdev, const char *tap_name,
+ 		close(pmd->ioctl_sock);
+ 	/* mac_addrs must not be freed alone because part of dev_private */
+ 	dev->data->mac_addrs = NULL;
+-	rte_eth_dev_release_port(dev);
+ 	rte_intr_instance_free(pmd->intr_handle);
++	rte_eth_dev_release_port(dev);
+ 
+ error_exit_nodev:
+ 	TAP_LOG(ERR, "%s Unable to initialize %s",
+@@ -2445,6 +2517,16 @@ rte_pmd_tap_probe(struct rte_vdev_device *dev)
+ 		ret = tap_mp_attach_queues(name, eth_dev);
+ 		if (ret != 0)
+ 			return -1;
++
++		if (!tap_devices_count) {
++			ret = rte_mp_action_register(TAP_MP_REQ_START_RXTX, tap_mp_req_start_rxtx);
++			if (ret < 0 && rte_errno != ENOTSUP) {
++				TAP_LOG(ERR, "tap: Failed to register IPC callback: %s",
++					strerror(rte_errno));
++				return -1;
++			}
++		}
++		tap_devices_count++;
+ 		rte_eth_dev_probing_finish(eth_dev);
+ 		return 0;
+ 	}
+diff --git a/dpdk/drivers/net/tap/tap_bpf_api.c b/dpdk/drivers/net/tap/tap_bpf_api.c
+index 98f6a76011..15283f8917 100644
+--- a/dpdk/drivers/net/tap/tap_bpf_api.c
++++ b/dpdk/drivers/net/tap/tap_bpf_api.c
+@@ -96,7 +96,7 @@ static inline int sys_bpf(enum bpf_cmd cmd, union bpf_attr *attr,
+  * Load BPF instructions to kernel
+  *
+  * @param[in] type
+- *   BPF program type: classifieir or action
++ *   BPF program type: classifier or action
+  *
+  * @param[in] insns
+  *   Array of BPF instructions (equivalent to BPF instructions)
+@@ -104,7 +104,7 @@ static inline int sys_bpf(enum bpf_cmd cmd, union bpf_attr *attr,
+  * @param[in] insns_cnt
+  *   Number of BPF instructions (size of array)
+  *
+- * @param[in] lincense
++ * @param[in] license
+  *   License string that must be acknowledged by the kernel
+  *
+  * @return
+diff --git a/dpdk/drivers/net/tap/tap_flow.c b/dpdk/drivers/net/tap/tap_flow.c
+index c4f60ce98e..7673823945 100644
+--- a/dpdk/drivers/net/tap/tap_flow.c
++++ b/dpdk/drivers/net/tap/tap_flow.c
+@@ -961,7 +961,7 @@ add_action(struct rte_flow *flow, size_t *act_index, struct action_data *adata)
+ }
+ 
+ /**
+- * Helper function to send a serie of TC actions to the kernel
++ * Helper function to send a series of TC actions to the kernel
+  *
+  * @param[in] flow
+  *   Pointer to rte flow containing the netlink message
+@@ -2017,7 +2017,7 @@ static int bpf_rss_key(enum bpf_rss_key_e cmd, __u32 *key_idx)
+ 			break;
+ 
+ 		/*
+-		 * Subtract offest to restore real key index
++		 * Subtract offset to restore real key index
+ 		 * If a non RSS flow is falsely trying to release map
+ 		 * entry 0 - the offset subtraction will calculate the real
+ 		 * map index as an out-of-range value and the release operation
+diff --git a/dpdk/drivers/net/tap/tap_intr.c b/dpdk/drivers/net/tap/tap_intr.c
+index 56c343acea..a9097def1a 100644
+--- a/dpdk/drivers/net/tap/tap_intr.c
++++ b/dpdk/drivers/net/tap/tap_intr.c
+@@ -34,8 +34,6 @@ tap_rx_intr_vec_uninstall(struct rte_eth_dev *dev)
+ 	rte_intr_free_epoll_fd(intr_handle);
+ 	rte_intr_vec_list_free(intr_handle);
+ 	rte_intr_nb_efd_set(intr_handle, 0);
+-
+-	rte_intr_instance_free(intr_handle);
+ }
+ 
+ /**
+diff --git a/dpdk/drivers/net/thunderx/nicvf_svf.c b/dpdk/drivers/net/thunderx/nicvf_svf.c
+index bccf290599..1bcf73d9fc 100644
+--- a/dpdk/drivers/net/thunderx/nicvf_svf.c
++++ b/dpdk/drivers/net/thunderx/nicvf_svf.c
+@@ -21,7 +21,7 @@ nicvf_svf_push(struct nicvf *vf)
+ 
+ 	entry = rte_zmalloc("nicvf", sizeof(*entry), RTE_CACHE_LINE_SIZE);
+ 	if (entry == NULL)
+-		rte_panic("Cannoc allocate memory for svf_entry\n");
++		rte_panic("Cannot allocate memory for svf_entry\n");
+ 
+ 	entry->vf = vf;
+ 
+diff --git a/dpdk/drivers/net/txgbe/base/meson.build b/dpdk/drivers/net/txgbe/base/meson.build
+index 7a30191472..a81d6890fe 100644
+--- a/dpdk/drivers/net/txgbe/base/meson.build
++++ b/dpdk/drivers/net/txgbe/base/meson.build
+@@ -22,6 +22,6 @@ foreach flag: error_cflags
+ endforeach
+ 
+ base_lib = static_library('txgbe_base', sources,
+-    dependencies: static_rte_eal,
++    dependencies: [static_rte_eal, static_rte_net],
+     c_args: c_args)
+ base_objs = base_lib.extract_all_objects(recursive: true)
+diff --git a/dpdk/drivers/net/txgbe/base/txgbe_eeprom.c b/dpdk/drivers/net/txgbe/base/txgbe_eeprom.c
+index 72901cd0b0..4ed6bd6728 100644
+--- a/dpdk/drivers/net/txgbe/base/txgbe_eeprom.c
++++ b/dpdk/drivers/net/txgbe/base/txgbe_eeprom.c
+@@ -21,8 +21,6 @@ s32 txgbe_init_eeprom_params(struct txgbe_hw *hw)
+ 	u16 eeprom_size;
+ 	int err = 0;
+ 
+-	DEBUGFUNC("txgbe_init_eeprom_params");
+-
+ 	if (eeprom->type != txgbe_eeprom_unknown)
+ 		return 0;
+ 
+@@ -54,12 +52,12 @@ s32 txgbe_init_eeprom_params(struct txgbe_hw *hw)
+ 
+ 	err = eeprom->read32(hw, TXGBE_SW_REGION_PTR << 1, &eeprom->sw_addr);
+ 	if (err) {
+-		DEBUGOUT("EEPROM read failed.\n");
++		DEBUGOUT("EEPROM read failed.");
+ 		return err;
+ 	}
+ 
+-	DEBUGOUT("eeprom params: type = %d, size = %d, address bits: "
+-		  "%d %d\n", eeprom->type, eeprom->word_size,
++	DEBUGOUT("eeprom params: type = %d, size = %d, address bits: %d %d",
++		  eeprom->type, eeprom->word_size,
+ 		  eeprom->address_bits, eeprom->sw_addr);
+ 
+ 	return 0;
+@@ -78,9 +76,6 @@ s32 txgbe_get_eeprom_semaphore(struct txgbe_hw *hw)
+ 	u32 i;
+ 	u32 swsm;
+ 
+-	DEBUGFUNC("txgbe_get_eeprom_semaphore");
+-
+-
+ 	/* Get SMBI software semaphore between device drivers first */
+ 	for (i = 0; i < timeout; i++) {
+ 		/*
+@@ -96,8 +91,7 @@ s32 txgbe_get_eeprom_semaphore(struct txgbe_hw *hw)
+ 	}
+ 
+ 	if (i == timeout) {
+-		DEBUGOUT("Driver can't access the eeprom - SMBI Semaphore "
+-			 "not granted.\n");
++		DEBUGOUT("Driver can't access the eeprom - SMBI Semaphore not granted.");
+ 		/*
+ 		 * this release is particularly important because our attempts
+ 		 * above to get the semaphore may have succeeded, and if there
+@@ -140,13 +134,12 @@ s32 txgbe_get_eeprom_semaphore(struct txgbe_hw *hw)
+ 		 * was not granted because we don't have access to the EEPROM
+ 		 */
+ 		if (i >= timeout) {
+-			DEBUGOUT("SWESMBI Software EEPROM semaphore not granted.\n");
++			DEBUGOUT("SWESMBI Software EEPROM semaphore not granted.");
+ 			txgbe_release_eeprom_semaphore(hw);
+ 			status = TXGBE_ERR_EEPROM;
+ 		}
+ 	} else {
+-		DEBUGOUT("Software semaphore SMBI between device drivers "
+-			 "not granted.\n");
++		DEBUGOUT("Software semaphore SMBI between device drivers not granted.");
+ 	}
+ 
+ 	return status;
+@@ -160,8 +153,6 @@ s32 txgbe_get_eeprom_semaphore(struct txgbe_hw *hw)
+  **/
+ void txgbe_release_eeprom_semaphore(struct txgbe_hw *hw)
+ {
+-	DEBUGFUNC("txgbe_release_eeprom_semaphore");
+-
+ 	wr32m(hw, TXGBE_MNGSWSYNC, TXGBE_MNGSWSYNC_REQ, 0);
+ 	wr32m(hw, TXGBE_SWSEM, TXGBE_SWSEM_PF, 0);
+ 	txgbe_flush(hw);
+@@ -290,8 +281,6 @@ s32 txgbe_ee_write16(struct txgbe_hw *hw, u32 offset,
+ 	u32 addr = (offset << 1);
+ 	int err;
+ 
+-	DEBUGFUNC("\n");
+-
+ 	err = hw->mac.acquire_swfw_sync(hw, mask);
+ 	if (err)
+ 		return err;
+@@ -348,8 +337,6 @@ s32 txgbe_ee_writew_sw(struct txgbe_hw *hw, u32 offset,
+ 	u32 addr = hw->rom.sw_addr + (offset << 1);
+ 	int err;
+ 
+-	DEBUGFUNC("\n");
+-
+ 	err = hw->mac.acquire_swfw_sync(hw, mask);
+ 	if (err)
+ 		return err;
+@@ -399,11 +386,9 @@ s32 txgbe_calc_eeprom_checksum(struct txgbe_hw *hw)
+ 	int err;
+ 	u16 buffer[BUFF_SIZE];
+ 
+-	DEBUGFUNC("txgbe_calc_eeprom_checksum");
+-
+ 	err = hw->rom.readw_sw(hw, TXGBE_EEPROM_CHECKSUM, &read_checksum);
+ 	if (err) {
+-		DEBUGOUT("EEPROM read failed\n");
++		DEBUGOUT("EEPROM read failed");
+ 		return err;
+ 	}
+ 
+@@ -437,15 +422,13 @@ s32 txgbe_validate_eeprom_checksum(struct txgbe_hw *hw,
+ 	u16 read_checksum = 0;
+ 	int err;
+ 
+-	DEBUGFUNC("txgbe_validate_eeprom_checksum");
+-
+ 	/* Read the first word from the EEPROM. If this times out or fails, do
+ 	 * not continue or we could be in for a very long wait while every
+ 	 * EEPROM read fails
+ 	 */
+ 	err = hw->rom.read16(hw, 0, &checksum);
+ 	if (err) {
+-		DEBUGOUT("EEPROM read failed\n");
++		DEBUGOUT("EEPROM read failed");
+ 		return err;
+ 	}
+ 
+@@ -457,7 +440,7 @@ s32 txgbe_validate_eeprom_checksum(struct txgbe_hw *hw,
+ 
+ 	err = hw->rom.readw_sw(hw, TXGBE_EEPROM_CHECKSUM, &read_checksum);
+ 	if (err) {
+-		DEBUGOUT("EEPROM read failed\n");
++		DEBUGOUT("EEPROM read failed");
+ 		return err;
+ 	}
+ 
+@@ -466,7 +449,7 @@ s32 txgbe_validate_eeprom_checksum(struct txgbe_hw *hw,
+ 	 */
+ 	if (read_checksum != checksum) {
+ 		err = TXGBE_ERR_EEPROM_CHECKSUM;
+-		DEBUGOUT("EEPROM checksum error\n");
++		DEBUGOUT("EEPROM checksum error");
+ 	}
+ 
+ 	/* If the user cares, return the calculated checksum */
+@@ -485,15 +468,13 @@ s32 txgbe_update_eeprom_checksum(struct txgbe_hw *hw)
+ 	s32 status;
+ 	u16 checksum;
+ 
+-	DEBUGFUNC("txgbe_update_eeprom_checksum");
+-
+ 	/* Read the first word from the EEPROM. If this times out or fails, do
+ 	 * not continue or we could be in for a very long wait while every
+ 	 * EEPROM read fails
+ 	 */
+ 	status = hw->rom.read16(hw, 0, &checksum);
+ 	if (status) {
+-		DEBUGOUT("EEPROM read failed\n");
++		DEBUGOUT("EEPROM read failed");
+ 		return status;
+ 	}
+ 
+diff --git a/dpdk/drivers/net/txgbe/base/txgbe_hw.c b/dpdk/drivers/net/txgbe/base/txgbe_hw.c
+index 00a8db78bf..776891ee7e 100644
+--- a/dpdk/drivers/net/txgbe/base/txgbe_hw.c
++++ b/dpdk/drivers/net/txgbe/base/txgbe_hw.c
+@@ -42,8 +42,6 @@ bool txgbe_device_supports_autoneg_fc(struct txgbe_hw *hw)
+ 	u32 speed;
+ 	bool link_up;
+ 
+-	DEBUGFUNC("txgbe_device_supports_autoneg_fc");
+-
+ 	switch (hw->phy.media_type) {
+ 	case txgbe_media_type_fiber_qsfp:
+ 	case txgbe_media_type_fiber:
+@@ -93,11 +91,9 @@ s32 txgbe_setup_fc(struct txgbe_hw *hw)
+ 	u32 value = 0;
+ 	u64 reg_bp = 0;
+ 
+-	DEBUGFUNC("txgbe_setup_fc");
+-
+ 	/* Validate the requested mode */
+ 	if (hw->fc.strict_ieee && hw->fc.requested_mode == txgbe_fc_rx_pause) {
+-		DEBUGOUT("txgbe_fc_rx_pause not valid in strict IEEE mode\n");
++		DEBUGOUT("txgbe_fc_rx_pause not valid in strict IEEE mode");
+ 		err = TXGBE_ERR_INVALID_LINK_SETTINGS;
+ 		goto out;
+ 	}
+@@ -149,7 +145,7 @@ s32 txgbe_setup_fc(struct txgbe_hw *hw)
+ 			SR_AN_MMD_ADV_REG1_PAUSE_ASM;
+ 		break;
+ 	default:
+-		DEBUGOUT("Flow control param set incorrectly\n");
++		DEBUGOUT("Flow control param set incorrectly");
+ 		err = TXGBE_ERR_CONFIG;
+ 		goto out;
+ 	}
+@@ -180,7 +176,7 @@ s32 txgbe_setup_fc(struct txgbe_hw *hw)
+ 				      TXGBE_MD_DEV_AUTO_NEG, reg_cu);
+ 	}
+ 
+-	DEBUGOUT("Set up FC; reg = 0x%08X\n", reg);
++	DEBUGOUT("Set up FC; reg = 0x%08X", reg);
+ out:
+ 	return err;
+ }
+@@ -199,8 +195,6 @@ s32 txgbe_start_hw(struct txgbe_hw *hw)
+ 	s32 err;
+ 	u16 device_caps;
+ 
+-	DEBUGFUNC("txgbe_start_hw");
+-
+ 	/* Set the media type */
+ 	hw->phy.media_type = hw->phy.get_media_type(hw);
+ 
+@@ -213,7 +207,7 @@ s32 txgbe_start_hw(struct txgbe_hw *hw)
+ 	/* Setup flow control */
+ 	err = txgbe_setup_fc(hw);
+ 	if (err != 0 && err != TXGBE_NOT_IMPLEMENTED) {
+-		DEBUGOUT("Flow control setup failed, returning %d\n", err);
++		DEBUGOUT("Flow control setup failed, returning %d", err);
+ 		return err;
+ 	}
+ 
+@@ -275,8 +269,6 @@ s32 txgbe_init_hw(struct txgbe_hw *hw)
+ {
+ 	s32 status;
+ 
+-	DEBUGFUNC("txgbe_init_hw");
+-
+ 	/* Get firmware version */
+ 	hw->phy.get_fw_version(hw, &hw->fw_version);
+ 
+@@ -288,7 +280,7 @@ s32 txgbe_init_hw(struct txgbe_hw *hw)
+ 	}
+ 
+ 	if (status != 0)
+-		DEBUGOUT("Failed to initialize HW, STATUS = %d\n", status);
++		DEBUGOUT("Failed to initialize HW, STATUS = %d", status);
+ 
+ 	return status;
+ }
+@@ -304,8 +296,6 @@ s32 txgbe_clear_hw_cntrs(struct txgbe_hw *hw)
+ {
+ 	u16 i = 0;
+ 
+-	DEBUGFUNC("txgbe_clear_hw_cntrs");
+-
+ 	/* QP Stats */
+ 	/* don't write clear queue stats */
+ 	for (i = 0; i < TXGBE_MAX_QP; i++) {
+@@ -425,8 +415,6 @@ s32 txgbe_get_mac_addr(struct txgbe_hw *hw, u8 *mac_addr)
+ 	u32 rar_low;
+ 	u16 i;
+ 
+-	DEBUGFUNC("txgbe_get_mac_addr");
+-
+ 	wr32(hw, TXGBE_ETHADDRIDX, 0);
+ 	rar_high = rd32(hw, TXGBE_ETHADDRH);
+ 	rar_low = rd32(hw, TXGBE_ETHADDRL);
+@@ -452,8 +440,6 @@ void txgbe_set_lan_id_multi_port(struct txgbe_hw *hw)
+ 	struct txgbe_bus_info *bus = &hw->bus;
+ 	u32 reg;
+ 
+-	DEBUGFUNC("txgbe_set_lan_id_multi_port_pcie");
+-
+ 	reg = rd32(hw, TXGBE_PORTSTAT);
+ 	bus->lan_id = TXGBE_PORTSTAT_ID(reg);
+ 
+@@ -479,8 +465,6 @@ s32 txgbe_stop_hw(struct txgbe_hw *hw)
+ 	u32 reg_val;
+ 	u16 i;
+ 
+-	DEBUGFUNC("txgbe_stop_hw");
+-
+ 	/*
+ 	 * Set the adapter_stopped flag so other driver functions stop touching
+ 	 * the hardware
+@@ -527,8 +511,6 @@ s32 txgbe_led_on(struct txgbe_hw *hw, u32 index)
+ {
+ 	u32 led_reg = rd32(hw, TXGBE_LEDCTL);
+ 
+-	DEBUGFUNC("txgbe_led_on");
+-
+ 	if (index > 4)
+ 		return TXGBE_ERR_PARAM;
+ 
+@@ -550,8 +532,6 @@ s32 txgbe_led_off(struct txgbe_hw *hw, u32 index)
+ {
+ 	u32 led_reg = rd32(hw, TXGBE_LEDCTL);
+ 
+-	DEBUGFUNC("txgbe_led_off");
+-
+ 	if (index > 4)
+ 		return TXGBE_ERR_PARAM;
+ 
+@@ -574,8 +554,6 @@ s32 txgbe_validate_mac_addr(u8 *mac_addr)
+ {
+ 	s32 status = 0;
+ 
+-	DEBUGFUNC("txgbe_validate_mac_addr");
+-
+ 	/* Make sure it is not a multicast address */
+ 	if (TXGBE_IS_MULTICAST(mac_addr)) {
+ 		status = TXGBE_ERR_INVALID_MAC_ADDR;
+@@ -606,11 +584,9 @@ s32 txgbe_set_rar(struct txgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
+ 	u32 rar_low, rar_high;
+ 	u32 rar_entries = hw->mac.num_rar_entries;
+ 
+-	DEBUGFUNC("txgbe_set_rar");
+-
+ 	/* Make sure we are using a valid rar index range */
+ 	if (index >= rar_entries) {
+-		DEBUGOUT("RAR index %d is out of range.\n", index);
++		DEBUGOUT("RAR index %d is out of range.", index);
+ 		return TXGBE_ERR_INVALID_ARGUMENT;
+ 	}
+ 
+@@ -658,11 +634,9 @@ s32 txgbe_clear_rar(struct txgbe_hw *hw, u32 index)
+ 	u32 rar_high;
+ 	u32 rar_entries = hw->mac.num_rar_entries;
+ 
+-	DEBUGFUNC("txgbe_clear_rar");
+-
+ 	/* Make sure we are using a valid rar index range */
+ 	if (index >= rar_entries) {
+-		DEBUGOUT("RAR index %d is out of range.\n", index);
++		DEBUGOUT("RAR index %d is out of range.", index);
+ 		return TXGBE_ERR_INVALID_ARGUMENT;
+ 	}
+ 
+@@ -698,8 +672,6 @@ s32 txgbe_init_rx_addrs(struct txgbe_hw *hw)
+ 	u32 psrctl;
+ 	u32 rar_entries = hw->mac.num_rar_entries;
+ 
+-	DEBUGFUNC("txgbe_init_rx_addrs");
+-
+ 	/*
+ 	 * If the current mac address is valid, assume it is a software override
+ 	 * to the permanent address.
+@@ -710,18 +682,18 @@ s32 txgbe_init_rx_addrs(struct txgbe_hw *hw)
+ 		/* Get the MAC address from the RAR0 for later reference */
+ 		hw->mac.get_mac_addr(hw, hw->mac.addr);
+ 
+-		DEBUGOUT(" Keeping Current RAR0 Addr =%.2X %.2X %.2X ",
++		DEBUGOUT(" Keeping Current RAR0 Addr = "
++			  RTE_ETHER_ADDR_PRT_FMT,
+ 			  hw->mac.addr[0], hw->mac.addr[1],
+-			  hw->mac.addr[2]);
+-		DEBUGOUT("%.2X %.2X %.2X\n", hw->mac.addr[3],
++			  hw->mac.addr[2], hw->mac.addr[3],
+ 			  hw->mac.addr[4], hw->mac.addr[5]);
+ 	} else {
+ 		/* Setup the receive address. */
+-		DEBUGOUT("Overriding MAC Address in RAR[0]\n");
+-		DEBUGOUT(" New MAC Addr =%.2X %.2X %.2X ",
++		DEBUGOUT("Overriding MAC Address in RAR[0]");
++		DEBUGOUT(" New MAC Addr = "
++			  RTE_ETHER_ADDR_PRT_FMT,
+ 			  hw->mac.addr[0], hw->mac.addr[1],
+-			  hw->mac.addr[2]);
+-		DEBUGOUT("%.2X %.2X %.2X\n", hw->mac.addr[3],
++			  hw->mac.addr[2], hw->mac.addr[3],
+ 			  hw->mac.addr[4], hw->mac.addr[5]);
+ 
+ 		hw->mac.set_rar(hw, 0, hw->mac.addr, 0, true);
+@@ -735,7 +707,7 @@ s32 txgbe_init_rx_addrs(struct txgbe_hw *hw)
+ 	hw->addr_ctrl.rar_used_count = 1;
+ 
+ 	/* Zero out the other receive addresses. */
+-	DEBUGOUT("Clearing RAR[1-%d]\n", rar_entries - 1);
++	DEBUGOUT("Clearing RAR[1-%d]", rar_entries - 1);
+ 	for (i = 1; i < rar_entries; i++) {
+ 		wr32(hw, TXGBE_ETHADDRIDX, i);
+ 		wr32(hw, TXGBE_ETHADDRL, 0);
+@@ -749,7 +721,7 @@ s32 txgbe_init_rx_addrs(struct txgbe_hw *hw)
+ 	psrctl |= TXGBE_PSRCTL_ADHF12(hw->mac.mc_filter_type);
+ 	wr32(hw, TXGBE_PSRCTL, psrctl);
+ 
+-	DEBUGOUT(" Clearing MTA\n");
++	DEBUGOUT(" Clearing MTA");
+ 	for (i = 0; i < hw->mac.mcft_size; i++)
+ 		wr32(hw, TXGBE_MCADDRTBL(i), 0);
+ 
+@@ -774,8 +746,6 @@ static s32 txgbe_mta_vector(struct txgbe_hw *hw, u8 *mc_addr)
+ {
+ 	u32 vector = 0;
+ 
+-	DEBUGFUNC("txgbe_mta_vector");
+-
+ 	switch (hw->mac.mc_filter_type) {
+ 	case 0:   /* use bits [47:36] of the address */
+ 		vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4));
+@@ -790,7 +760,7 @@ static s32 txgbe_mta_vector(struct txgbe_hw *hw, u8 *mc_addr)
+ 		vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8));
+ 		break;
+ 	default:  /* Invalid mc_filter_type */
+-		DEBUGOUT("MC filter type param set incorrectly\n");
++		DEBUGOUT("MC filter type param set incorrectly");
+ 		ASSERT(0);
+ 		break;
+ 	}
+@@ -813,12 +783,10 @@ void txgbe_set_mta(struct txgbe_hw *hw, u8 *mc_addr)
+ 	u32 vector_bit;
+ 	u32 vector_reg;
+ 
+-	DEBUGFUNC("txgbe_set_mta");
+-
+ 	hw->addr_ctrl.mta_in_use++;
+ 
+ 	vector = txgbe_mta_vector(hw, mc_addr);
+-	DEBUGOUT(" bit-vector = 0x%03X\n", vector);
++	DEBUGOUT(" bit-vector = 0x%03X", vector);
+ 
+ 	/*
+ 	 * The MTA is a register array of 128 32-bit registers. It is treated
+@@ -852,8 +820,6 @@ s32 txgbe_update_mc_addr_list(struct txgbe_hw *hw, u8 *mc_addr_list,
+ 	u32 i;
+ 	u32 vmdq;
+ 
+-	DEBUGFUNC("txgbe_update_mc_addr_list");
+-
+ 	/*
+ 	 * Set the new number of MC addresses that we are being requested to
+ 	 * use.
+@@ -863,13 +829,13 @@ s32 txgbe_update_mc_addr_list(struct txgbe_hw *hw, u8 *mc_addr_list,
+ 
+ 	/* Clear mta_shadow */
+ 	if (clear) {
+-		DEBUGOUT(" Clearing MTA\n");
++		DEBUGOUT(" Clearing MTA");
+ 		memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow));
+ 	}
+ 
+ 	/* Update mta_shadow */
+ 	for (i = 0; i < mc_addr_count; i++) {
+-		DEBUGOUT(" Adding the multicast addresses:\n");
++		DEBUGOUT(" Adding the multicast addresses:");
+ 		txgbe_set_mta(hw, next(hw, &mc_addr_list, &vmdq));
+ 	}
+ 
+@@ -886,7 +852,7 @@ s32 txgbe_update_mc_addr_list(struct txgbe_hw *hw, u8 *mc_addr_list,
+ 		wr32(hw, TXGBE_PSRCTL, psrctl);
+ 	}
+ 
+-	DEBUGOUT("txgbe update mc addr list complete\n");
++	DEBUGOUT("txgbe update mc addr list complete");
+ 	return 0;
+ }
+ 
+@@ -904,8 +870,6 @@ s32 txgbe_fc_enable(struct txgbe_hw *hw)
+ 	u32 fcrtl, fcrth;
+ 	int i;
+ 
+-	DEBUGFUNC("txgbe_fc_enable");
+-
+ 	/* Validate the water mark configuration */
+ 	if (!hw->fc.pause_time) {
+ 		err = TXGBE_ERR_INVALID_LINK_SETTINGS;
+@@ -918,7 +882,7 @@ s32 txgbe_fc_enable(struct txgbe_hw *hw)
+ 		    hw->fc.high_water[i]) {
+ 			if (!hw->fc.low_water[i] ||
+ 			    hw->fc.low_water[i] >= hw->fc.high_water[i]) {
+-				DEBUGOUT("Invalid water mark configuration\n");
++				DEBUGOUT("Invalid water mark configuration");
+ 				err = TXGBE_ERR_INVALID_LINK_SETTINGS;
+ 				goto out;
+ 			}
+@@ -976,7 +940,7 @@ s32 txgbe_fc_enable(struct txgbe_hw *hw)
+ 		fccfg_reg |= TXGBE_TXFCCFG_FC;
+ 		break;
+ 	default:
+-		DEBUGOUT("Flow control param set incorrectly\n");
++		DEBUGOUT("Flow control param set incorrectly");
+ 		err = TXGBE_ERR_CONFIG;
+ 		goto out;
+ 	}
+@@ -1037,8 +1001,7 @@ s32 txgbe_negotiate_fc(struct txgbe_hw *hw, u32 adv_reg, u32 lp_reg,
+ 		       u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm)
+ {
+ 	if ((!(adv_reg)) ||  (!(lp_reg))) {
+-		DEBUGOUT("Local or link partner's advertised flow control "
+-			      "settings are NULL. Local: %x, link partner: %x\n",
++		DEBUGOUT("Local or link partner's advertised flow control settings are NULL. Local: %x, link partner: %x",
+ 			      adv_reg, lp_reg);
+ 		return TXGBE_ERR_FC_NOT_NEGOTIATED;
+ 	}
+@@ -1053,22 +1016,22 @@ s32 txgbe_negotiate_fc(struct txgbe_hw *hw, u32 adv_reg, u32 lp_reg,
+ 		 */
+ 		if (hw->fc.requested_mode == txgbe_fc_full) {
+ 			hw->fc.current_mode = txgbe_fc_full;
+-			DEBUGOUT("Flow Control = FULL.\n");
++			DEBUGOUT("Flow Control = FULL.");
+ 		} else {
+ 			hw->fc.current_mode = txgbe_fc_rx_pause;
+-			DEBUGOUT("Flow Control=RX PAUSE frames only\n");
++			DEBUGOUT("Flow Control=RX PAUSE frames only");
+ 		}
+ 	} else if (!(adv_reg & adv_sym) && (adv_reg & adv_asm) &&
+ 		   (lp_reg & lp_sym) && (lp_reg & lp_asm)) {
+ 		hw->fc.current_mode = txgbe_fc_tx_pause;
+-		DEBUGOUT("Flow Control = TX PAUSE frames only.\n");
++		DEBUGOUT("Flow Control = TX PAUSE frames only.");
+ 	} else if ((adv_reg & adv_sym) && (adv_reg & adv_asm) &&
+ 		   !(lp_reg & lp_sym) && (lp_reg & lp_asm)) {
+ 		hw->fc.current_mode = txgbe_fc_rx_pause;
+-		DEBUGOUT("Flow Control = RX PAUSE frames only.\n");
++		DEBUGOUT("Flow Control = RX PAUSE frames only.");
+ 	} else {
+ 		hw->fc.current_mode = txgbe_fc_none;
+-		DEBUGOUT("Flow Control = NONE.\n");
++		DEBUGOUT("Flow Control = NONE.");
+ 	}
+ 	return 0;
+ }
+@@ -1168,8 +1131,6 @@ void txgbe_fc_autoneg(struct txgbe_hw *hw)
+ 	u32 speed;
+ 	bool link_up;
+ 
+-	DEBUGFUNC("txgbe_fc_autoneg");
+-
+ 	/*
+ 	 * AN should have completed when the cable was plugged in.
+ 	 * Look for reasons to bail out.  Bail out if:
+@@ -1235,8 +1196,6 @@ s32 txgbe_acquire_swfw_sync(struct txgbe_hw *hw, u32 mask)
+ 	u32 timeout = 200;
+ 	u32 i;
+ 
+-	DEBUGFUNC("txgbe_acquire_swfw_sync");
+-
+ 	for (i = 0; i < timeout; i++) {
+ 		/*
+ 		 * SW NVM semaphore bit is used for access to all
+@@ -1279,8 +1238,6 @@ void txgbe_release_swfw_sync(struct txgbe_hw *hw, u32 mask)
+ 	u32 mngsem;
+ 	u32 swmask = mask;
+ 
+-	DEBUGFUNC("txgbe_release_swfw_sync");
+-
+ 	txgbe_get_eeprom_semaphore(hw);
+ 
+ 	mngsem = rd32(hw, TXGBE_MNGSEM);
+@@ -1304,8 +1261,6 @@ s32 txgbe_disable_sec_rx_path(struct txgbe_hw *hw)
+ 	int i;
+ 	u32 secrxreg;
+ 
+-	DEBUGFUNC("txgbe_disable_sec_rx_path");
+-
+ 	secrxreg = rd32(hw, TXGBE_SECRXCTL);
+ 	secrxreg |= TXGBE_SECRXCTL_XDSA;
+ 	wr32(hw, TXGBE_SECRXCTL, secrxreg);
+@@ -1320,8 +1275,7 @@ s32 txgbe_disable_sec_rx_path(struct txgbe_hw *hw)
+ 
+ 	/* For informational purposes only */
+ 	if (i >= TXGBE_MAX_SECRX_POLL)
+-		DEBUGOUT("Rx unit being enabled before security "
+-			 "path fully disabled.  Continuing with init.\n");
++		DEBUGOUT("Rx unit being enabled before security path fully disabled.  Continuing with init.");
+ 
+ 	return 0;
+ }
+@@ -1336,8 +1290,6 @@ s32 txgbe_enable_sec_rx_path(struct txgbe_hw *hw)
+ {
+ 	u32 secrxreg;
+ 
+-	DEBUGFUNC("txgbe_enable_sec_rx_path");
+-
+ 	secrxreg = rd32(hw, TXGBE_SECRXCTL);
+ 	secrxreg &= ~TXGBE_SECRXCTL_XDSA;
+ 	wr32(hw, TXGBE_SECRXCTL, secrxreg);
+@@ -1373,8 +1325,7 @@ int txgbe_disable_sec_tx_path(struct txgbe_hw *hw)
+ 
+ 	/* For informational purposes only */
+ 	if (i >= TXGBE_MAX_SECTX_POLL)
+-		PMD_DRV_LOG(DEBUG, "Tx unit being enabled before security "
+-			 "path fully disabled.  Continuing with init.");
++		DEBUGOUT("Tx unit being enabled before security path fully disabled.  Continuing with init.");
+ 
+ 	return 0;
+ }
+@@ -1411,8 +1362,6 @@ static s32 txgbe_get_san_mac_addr_offset(struct txgbe_hw *hw,
+ {
+ 	s32 err;
+ 
+-	DEBUGFUNC("txgbe_get_san_mac_addr_offset");
+-
+ 	/*
+ 	 * First read the EEPROM pointer to see if the MAC addresses are
+ 	 * available.
+@@ -1443,8 +1392,6 @@ s32 txgbe_get_san_mac_addr(struct txgbe_hw *hw, u8 *san_mac_addr)
+ 	u8 i;
+ 	s32 err;
+ 
+-	DEBUGFUNC("txgbe_get_san_mac_addr");
+-
+ 	/*
+ 	 * First read the EEPROM pointer to see if the MAC addresses are
+ 	 * available. If they're not, no point in calling set_lan_id() here.
+@@ -1493,8 +1440,6 @@ s32 txgbe_set_san_mac_addr(struct txgbe_hw *hw, u8 *san_mac_addr)
+ 	u16 san_mac_data, san_mac_offset;
+ 	u8 i;
+ 
+-	DEBUGFUNC("txgbe_set_san_mac_addr");
+-
+ 	/* Look for SAN mac address pointer.  If not defined, return */
+ 	err = txgbe_get_san_mac_addr_offset(hw, &san_mac_offset);
+ 	if (err || san_mac_offset == 0 || san_mac_offset == 0xFFFF)
+@@ -1525,11 +1470,9 @@ s32 txgbe_clear_vmdq(struct txgbe_hw *hw, u32 rar, u32 vmdq)
+ 	u32 mpsar_lo, mpsar_hi;
+ 	u32 rar_entries = hw->mac.num_rar_entries;
+ 
+-	DEBUGFUNC("txgbe_clear_vmdq");
+-
+ 	/* Make sure we are using a valid rar index range */
+ 	if (rar >= rar_entries) {
+-		DEBUGOUT("RAR index %d is out of range.\n", rar);
++		DEBUGOUT("RAR index %d is out of range.", rar);
+ 		return TXGBE_ERR_INVALID_ARGUMENT;
+ 	}
+ 
+@@ -1579,11 +1522,9 @@ s32 txgbe_set_vmdq(struct txgbe_hw *hw, u32 rar, u32 vmdq)
+ 	u32 mpsar;
+ 	u32 rar_entries = hw->mac.num_rar_entries;
+ 
+-	DEBUGFUNC("txgbe_set_vmdq");
+-
+ 	/* Make sure we are using a valid rar index range */
+ 	if (rar >= rar_entries) {
+-		DEBUGOUT("RAR index %d is out of range.\n", rar);
++		DEBUGOUT("RAR index %d is out of range.", rar);
+ 		return TXGBE_ERR_INVALID_ARGUMENT;
+ 	}
+ 
+@@ -1608,8 +1549,7 @@ s32 txgbe_init_uta_tables(struct txgbe_hw *hw)
+ {
+ 	int i;
+ 
+-	DEBUGFUNC("txgbe_init_uta_tables");
+-	DEBUGOUT(" Clearing UTA\n");
++	DEBUGOUT(" Clearing UTA");
+ 
+ 	for (i = 0; i < 128; i++)
+ 		wr32(hw, TXGBE_UCADDRTBL(i), 0);
+@@ -1664,7 +1604,7 @@ s32 txgbe_find_vlvf_slot(struct txgbe_hw *hw, u32 vlan, bool vlvf_bypass)
+ 	 * slot we found during our search, else error.
+ 	 */
+ 	if (!first_empty_slot)
+-		DEBUGOUT("No space in VLVF.\n");
++		DEBUGOUT("No space in VLVF.");
+ 
+ 	return first_empty_slot ? first_empty_slot : TXGBE_ERR_NO_SPACE;
+ }
+@@ -1685,8 +1625,6 @@ s32 txgbe_set_vfta(struct txgbe_hw *hw, u32 vlan, u32 vind,
+ 	u32 regidx, vfta_delta, vfta;
+ 	s32 err;
+ 
+-	DEBUGFUNC("txgbe_set_vfta");
+-
+ 	if (vlan > 4095 || vind > 63)
+ 		return TXGBE_ERR_PARAM;
+ 
+@@ -1754,8 +1692,6 @@ s32 txgbe_set_vlvf(struct txgbe_hw *hw, u32 vlan, u32 vind,
+ 	u32 portctl;
+ 	s32 vlvf_index;
+ 
+-	DEBUGFUNC("txgbe_set_vlvf");
+-
+ 	if (vlan > 4095 || vind > 63)
+ 		return TXGBE_ERR_PARAM;
+ 
+@@ -1835,8 +1771,6 @@ s32 txgbe_clear_vfta(struct txgbe_hw *hw)
+ {
+ 	u32 offset;
+ 
+-	DEBUGFUNC("txgbe_clear_vfta");
+-
+ 	for (offset = 0; offset < hw->mac.vft_size; offset++)
+ 		wr32(hw, TXGBE_VLANTBL(offset), 0);
+ 
+@@ -1890,8 +1824,6 @@ s32 txgbe_check_mac_link(struct txgbe_hw *hw, u32 *speed,
+ 	u32 links_reg, links_orig;
+ 	u32 i;
+ 
+-	DEBUGFUNC("txgbe_check_mac_link");
+-
+ 	/* If Crosstalk fix enabled do the sanity check of making sure
+ 	 * the SFP+ cage is full.
+ 	 */
+@@ -1922,7 +1854,7 @@ s32 txgbe_check_mac_link(struct txgbe_hw *hw, u32 *speed,
+ 	links_reg = rd32(hw, TXGBE_PORTSTAT);
+ 
+ 	if (links_orig != links_reg) {
+-		DEBUGOUT("LINKS changed from %08X to %08X\n",
++		DEBUGOUT("LINKS changed from %08X to %08X",
+ 			  links_orig, links_reg);
+ 	}
+ 
+@@ -1977,8 +1909,6 @@ s32 txgbe_get_wwn_prefix(struct txgbe_hw *hw, u16 *wwnn_prefix,
+ 	u16 offset, caps;
+ 	u16 alt_san_mac_blk_offset;
+ 
+-	DEBUGFUNC("txgbe_get_wwn_prefix");
+-
+ 	/* clear output first */
+ 	*wwnn_prefix = 0xFFFF;
+ 	*wwpn_prefix = 0xFFFF;
+@@ -2068,8 +1998,6 @@ void txgbe_set_ethertype_anti_spoofing(struct txgbe_hw *hw,
+  **/
+ s32 txgbe_get_device_caps(struct txgbe_hw *hw, u16 *device_caps)
+ {
+-	DEBUGFUNC("txgbe_get_device_caps");
+-
+ 	hw->rom.readw_sw(hw, TXGBE_DEVICE_CAPS, device_caps);
+ 
+ 	return 0;
+@@ -2191,8 +2119,6 @@ s32 txgbe_get_thermal_sensor_data(struct txgbe_hw *hw)
+ 	s64 tsv;
+ 	u32 ts_stat;
+ 
+-	DEBUGFUNC("txgbe_get_thermal_sensor_data");
+-
+ 	/* Only support thermal sensors attached to physical port 0 */
+ 	if (hw->bus.lan_id != 0)
+ 		return TXGBE_NOT_IMPLEMENTED;
+@@ -2223,8 +2149,6 @@ s32 txgbe_init_thermal_sensor_thresh(struct txgbe_hw *hw)
+ {
+ 	struct txgbe_thermal_sensor_data *data = &hw->mac.thermal_sensor_data;
+ 
+-	DEBUGFUNC("txgbe_init_thermal_sensor_thresh");
+-
+ 	memset(data, 0, sizeof(struct txgbe_thermal_sensor_data));
+ 
+ 	if (hw->bus.lan_id != 0)
+@@ -2295,8 +2219,6 @@ s32 txgbe_setup_mac_link_multispeed_fiber(struct txgbe_hw *hw,
+ 	u32 i = 0;
+ 	bool autoneg, link_up = false;
+ 
+-	DEBUGFUNC("txgbe_setup_mac_link_multispeed_fiber");
+-
+ 	/* Mask off requested but non-supported speeds */
+ 	status = hw->mac.get_link_capabilities(hw, &link_speed, &autoneg);
+ 	if (status != 0)
+@@ -2321,7 +2243,7 @@ s32 txgbe_setup_mac_link_multispeed_fiber(struct txgbe_hw *hw,
+ 			/* QSFP module automatically detects MAC link speed */
+ 			break;
+ 		default:
+-			DEBUGOUT("Unexpected media type.\n");
++			DEBUGOUT("Unexpected media type.");
+ 			break;
+ 		}
+ 
+@@ -2371,7 +2293,7 @@ s32 txgbe_setup_mac_link_multispeed_fiber(struct txgbe_hw *hw,
+ 			/* QSFP module automatically detects link speed */
+ 			break;
+ 		default:
+-			DEBUGOUT("Unexpected media type.\n");
++			DEBUGOUT("Unexpected media type.");
+ 			break;
+ 		}
+ 
+@@ -2437,8 +2359,6 @@ s32 txgbe_init_shared_code(struct txgbe_hw *hw)
+ {
+ 	s32 status;
+ 
+-	DEBUGFUNC("txgbe_init_shared_code");
+-
+ 	/*
+ 	 * Set the mac type
+ 	 */
+@@ -2474,8 +2394,6 @@ s32 txgbe_set_mac_type(struct txgbe_hw *hw)
+ {
+ 	s32 err = 0;
+ 
+-	DEBUGFUNC("txgbe_set_mac_type");
+-
+ 	if (hw->vendor_id != PCI_VENDOR_ID_WANGXUN) {
+ 		DEBUGOUT("Unsupported vendor id: %x", hw->vendor_id);
+ 		return TXGBE_ERR_DEVICE_NOT_SUPPORTED;
+@@ -2497,7 +2415,7 @@ s32 txgbe_set_mac_type(struct txgbe_hw *hw)
+ 		break;
+ 	}
+ 
+-	DEBUGOUT("found mac: %d, returns: %d\n",
++	DEBUGOUT("found mac: %d, returns: %d",
+ 		  hw->mac.type, err);
+ 	return err;
+ }
+@@ -2506,8 +2424,6 @@ void txgbe_init_mac_link_ops(struct txgbe_hw *hw)
+ {
+ 	struct txgbe_mac_info *mac = &hw->mac;
+ 
+-	DEBUGFUNC("txgbe_init_mac_link_ops");
+-
+ 	/*
+ 	 * enable the laser control functions for SFP+ fiber
+ 	 * and MNG not enabled
+@@ -2550,8 +2466,6 @@ s32 txgbe_init_phy_raptor(struct txgbe_hw *hw)
+ 	struct txgbe_phy_info *phy = &hw->phy;
+ 	s32 err = 0;
+ 
+-	DEBUGFUNC("txgbe_init_phy_raptor");
+-
+ 	if ((hw->device_id & 0xFF) == TXGBE_DEV_ID_QSFP) {
+ 		/* Store flag indicating I2C bus access control unit. */
+ 		hw->phy.qsfp_shared_i2c_bus = TRUE;
+@@ -2598,8 +2512,6 @@ s32 txgbe_setup_sfp_modules(struct txgbe_hw *hw)
+ {
+ 	s32 err = 0;
+ 
+-	DEBUGFUNC("txgbe_setup_sfp_modules");
+-
+ 	if (hw->phy.sfp_type == txgbe_sfp_type_unknown)
+ 		return 0;
+ 
+@@ -2619,7 +2531,7 @@ s32 txgbe_setup_sfp_modules(struct txgbe_hw *hw)
+ 	msec_delay(hw->rom.semaphore_delay);
+ 
+ 	if (err) {
+-		DEBUGOUT("sfp module setup not complete\n");
++		DEBUGOUT("sfp module setup not complete");
+ 		return TXGBE_ERR_SFP_SETUP_NOT_COMPLETE;
+ 	}
+ 
+@@ -2717,8 +2629,6 @@ s32 txgbe_init_ops_pf(struct txgbe_hw *hw)
+ 	struct txgbe_rom_info *rom = &hw->rom;
+ 	struct txgbe_mbx_info *mbx = &hw->mbx;
+ 
+-	DEBUGFUNC("txgbe_init_ops_pf");
+-
+ 	/* BUS */
+ 	bus->set_lan_id = txgbe_set_lan_id_multi_port;
+ 
+@@ -2845,8 +2755,6 @@ s32 txgbe_get_link_capabilities_raptor(struct txgbe_hw *hw,
+ 	s32 status = 0;
+ 	u32 autoc = 0;
+ 
+-	DEBUGFUNC("txgbe_get_link_capabilities_raptor");
+-
+ 	/* Check if 1G SFP module. */
+ 	if (hw->phy.sfp_type == txgbe_sfp_type_1g_cu_core0 ||
+ 	    hw->phy.sfp_type == txgbe_sfp_type_1g_cu_core1 ||
+@@ -2950,8 +2858,6 @@ u32 txgbe_get_media_type_raptor(struct txgbe_hw *hw)
+ {
+ 	u32 media_type;
+ 
+-	DEBUGFUNC("txgbe_get_media_type_raptor");
+-
+ 	if (hw->phy.ffe_set)
+ 		txgbe_bp_mode_set(hw);
+ 
+@@ -3010,8 +2916,6 @@ s32 txgbe_start_mac_link_raptor(struct txgbe_hw *hw,
+ 	s32 status = 0;
+ 	bool got_lock = false;
+ 
+-	DEBUGFUNC("txgbe_start_mac_link_raptor");
+-
+ 	UNREFERENCED_PARAMETER(autoneg_wait_to_complete);
+ 
+ 	/*  reset_pipeline requires us to hold this lock as it writes to
+@@ -3094,8 +2998,6 @@ void txgbe_enable_tx_laser_multispeed_fiber(struct txgbe_hw *hw)
+  **/
+ void txgbe_flap_tx_laser_multispeed_fiber(struct txgbe_hw *hw)
+ {
+-	DEBUGFUNC("txgbe_flap_tx_laser_multispeed_fiber");
+-
+ 	/* Blocked by MNG FW so bail */
+ 	if (txgbe_check_reset_blocked(hw))
+ 		return;
+@@ -3127,7 +3029,7 @@ void txgbe_set_hard_rate_select_speed(struct txgbe_hw *hw,
+ 		esdp_reg &= ~(TXGBE_GPIOBIT_4 | TXGBE_GPIOBIT_5);
+ 		break;
+ 	default:
+-		DEBUGOUT("Invalid fixed module speed\n");
++		DEBUGOUT("Invalid fixed module speed");
+ 		return;
+ 	}
+ 
+@@ -3153,8 +3055,6 @@ s32 txgbe_setup_mac_link_smartspeed(struct txgbe_hw *hw,
+ 	bool link_up = false;
+ 	u32 autoc_reg = rd32_epcs(hw, SR_AN_MMD_ADV_REG1);
+ 
+-	DEBUGFUNC("txgbe_setup_mac_link_smartspeed");
+-
+ 	 /* Set autoneg_advertised value based on input link speed */
+ 	hw->phy.autoneg_advertised = 0;
+ 
+@@ -3243,8 +3143,7 @@ s32 txgbe_setup_mac_link_smartspeed(struct txgbe_hw *hw,
+ 
+ out:
+ 	if (link_up && link_speed == TXGBE_LINK_SPEED_1GB_FULL)
+-		DEBUGOUT("Smartspeed has downgraded the link speed "
+-		"from the maximum advertised\n");
++		DEBUGOUT("Smartspeed has downgraded the link speed from the maximum advertised");
+ 	return status;
+ }
+ 
+@@ -3270,7 +3169,6 @@ s32 txgbe_setup_mac_link(struct txgbe_hw *hw,
+ 	u64 orig_autoc = 0;
+ 	u32 link_capabilities = TXGBE_LINK_SPEED_UNKNOWN;
+ 
+-	DEBUGFUNC("txgbe_setup_mac_link");
+ 	UNREFERENCED_PARAMETER(autoneg_wait_to_complete);
+ 
+ 	/* Check to see if speed passed in is supported. */
+@@ -3357,8 +3255,6 @@ static s32 txgbe_setup_copper_link_raptor(struct txgbe_hw *hw,
+ {
+ 	s32 status;
+ 
+-	DEBUGFUNC("txgbe_setup_copper_link_raptor");
+-
+ 	/* Setup the PHY according to input speed */
+ 	status = hw->phy.setup_link_speed(hw, speed,
+ 					      autoneg_wait_to_complete);
+@@ -3467,8 +3363,6 @@ s32 txgbe_reset_hw(struct txgbe_hw *hw)
+ 	s32 status;
+ 	u32 autoc;
+ 
+-	DEBUGFUNC("txgbe_reset_hw");
+-
+ 	/* Call adapter stop to disable tx/rx and clear interrupts */
+ 	status = hw->mac.stop_hw(hw);
+ 	if (status != 0)
+@@ -3624,15 +3518,13 @@ s32 txgbe_reinit_fdir_tables(struct txgbe_hw *hw)
+ 	u32 fdircmd;
+ 	fdirctrl &= ~TXGBE_FDIRCTL_INITDONE;
+ 
+-	DEBUGFUNC("txgbe_reinit_fdir_tables");
+-
+ 	/*
+ 	 * Before starting reinitialization process,
+ 	 * FDIRPICMD.OP must be zero.
+ 	 */
+ 	err = txgbe_fdir_check_cmd_complete(hw, &fdircmd);
+ 	if (err) {
+-		DEBUGOUT("Flow Director previous command did not complete, aborting table re-initialization.\n");
++		DEBUGOUT("Flow Director previous command did not complete, aborting table re-initialization.");
+ 		return err;
+ 	}
+ 
+@@ -3666,7 +3558,7 @@ s32 txgbe_reinit_fdir_tables(struct txgbe_hw *hw)
+ 		msec_delay(1);
+ 	}
+ 	if (i >= TXGBE_FDIR_INIT_DONE_POLL) {
+-		DEBUGOUT("Flow Director Signature poll time exceeded!\n");
++		DEBUGOUT("Flow Director Signature poll time exceeded!");
+ 		return TXGBE_ERR_FDIR_REINIT_FAILED;
+ 	}
+ 
+@@ -3692,8 +3584,6 @@ s32 txgbe_start_hw_raptor(struct txgbe_hw *hw)
+ {
+ 	s32 err = 0;
+ 
+-	DEBUGFUNC("txgbe_start_hw_raptor");
+-
+ 	err = txgbe_start_hw(hw);
+ 	if (err != 0)
+ 		goto out;
+@@ -3718,8 +3608,6 @@ s32 txgbe_start_hw_raptor(struct txgbe_hw *hw)
+  **/
+ s32 txgbe_enable_rx_dma_raptor(struct txgbe_hw *hw, u32 regval)
+ {
+-	DEBUGFUNC("txgbe_enable_rx_dma_raptor");
+-
+ 	/*
+ 	 * Workaround silicon errata when enabling the Rx datapath.
+ 	 * If traffic is incoming before we enable the Rx unit, it could hang
+@@ -3752,8 +3640,6 @@ bool txgbe_verify_lesm_fw_enabled_raptor(struct txgbe_hw *hw)
+ 	u16 fw_offset, fw_lesm_param_offset, fw_lesm_state;
+ 	s32 status;
+ 
+-	DEBUGFUNC("txgbe_verify_lesm_fw_enabled_raptor");
+-
+ 	/* get the offset to the Firmware Module block */
+ 	status = hw->rom.read16(hw, TXGBE_FW_PTR, &fw_offset);
+ 
+diff --git a/dpdk/drivers/net/txgbe/base/txgbe_mbx.c b/dpdk/drivers/net/txgbe/base/txgbe_mbx.c
+index 4d64c6c3e9..7f2489a13f 100644
+--- a/dpdk/drivers/net/txgbe/base/txgbe_mbx.c
++++ b/dpdk/drivers/net/txgbe/base/txgbe_mbx.c
+@@ -21,8 +21,6 @@ s32 txgbe_read_mbx(struct txgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
+ 	struct txgbe_mbx_info *mbx = &hw->mbx;
+ 	s32 ret_val = TXGBE_ERR_MBX;
+ 
+-	DEBUGFUNC("txgbe_read_mbx");
+-
+ 	/* limit read to size of mailbox */
+ 	if (size > mbx->size)
+ 		size = mbx->size;
+@@ -47,8 +45,6 @@ s32 txgbe_write_mbx(struct txgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
+ 	struct txgbe_mbx_info *mbx = &hw->mbx;
+ 	s32 ret_val = 0;
+ 
+-	DEBUGFUNC("txgbe_write_mbx");
+-
+ 	if (size > mbx->size) {
+ 		ret_val = TXGBE_ERR_MBX;
+ 		DEBUGOUT("Invalid mailbox message size %d", size);
+@@ -71,8 +67,6 @@ s32 txgbe_check_for_msg(struct txgbe_hw *hw, u16 mbx_id)
+ 	struct txgbe_mbx_info *mbx = &hw->mbx;
+ 	s32 ret_val = TXGBE_ERR_MBX;
+ 
+-	DEBUGFUNC("txgbe_check_for_msg");
+-
+ 	if (mbx->check_for_msg)
+ 		ret_val = mbx->check_for_msg(hw, mbx_id);
+ 
+@@ -91,8 +85,6 @@ s32 txgbe_check_for_ack(struct txgbe_hw *hw, u16 mbx_id)
+ 	struct txgbe_mbx_info *mbx = &hw->mbx;
+ 	s32 ret_val = TXGBE_ERR_MBX;
+ 
+-	DEBUGFUNC("txgbe_check_for_ack");
+-
+ 	if (mbx->check_for_ack)
+ 		ret_val = mbx->check_for_ack(hw, mbx_id);
+ 
+@@ -111,8 +103,6 @@ s32 txgbe_check_for_rst(struct txgbe_hw *hw, u16 mbx_id)
+ 	struct txgbe_mbx_info *mbx = &hw->mbx;
+ 	s32 ret_val = TXGBE_ERR_MBX;
+ 
+-	DEBUGFUNC("txgbe_check_for_rst");
+-
+ 	if (mbx->check_for_rst)
+ 		ret_val = mbx->check_for_rst(hw, mbx_id);
+ 
+@@ -131,8 +121,6 @@ STATIC s32 txgbe_poll_for_msg(struct txgbe_hw *hw, u16 mbx_id)
+ 	struct txgbe_mbx_info *mbx = &hw->mbx;
+ 	int countdown = mbx->timeout;
+ 
+-	DEBUGFUNC("txgbe_poll_for_msg");
+-
+ 	if (!countdown || !mbx->check_for_msg)
+ 		goto out;
+ 
+@@ -162,8 +150,6 @@ STATIC s32 txgbe_poll_for_ack(struct txgbe_hw *hw, u16 mbx_id)
+ 	struct txgbe_mbx_info *mbx = &hw->mbx;
+ 	int countdown = mbx->timeout;
+ 
+-	DEBUGFUNC("txgbe_poll_for_ack");
+-
+ 	if (!countdown || !mbx->check_for_ack)
+ 		goto out;
+ 
+@@ -196,8 +182,6 @@ s32 txgbe_read_posted_mbx(struct txgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
+ 	struct txgbe_mbx_info *mbx = &hw->mbx;
+ 	s32 ret_val = TXGBE_ERR_MBX;
+ 
+-	DEBUGFUNC("txgbe_read_posted_mbx");
+-
+ 	if (!mbx->read)
+ 		goto out;
+ 
+@@ -226,8 +210,6 @@ s32 txgbe_write_posted_mbx(struct txgbe_hw *hw, u32 *msg, u16 size,
+ 	struct txgbe_mbx_info *mbx = &hw->mbx;
+ 	s32 ret_val = TXGBE_ERR_MBX;
+ 
+-	DEBUGFUNC("txgbe_write_posted_mbx");
+-
+ 	/* exit if either we can't write or there isn't a defined timeout */
+ 	if (!mbx->write || !mbx->timeout)
+ 		goto out;
+@@ -292,7 +274,6 @@ s32 txgbe_check_for_msg_vf(struct txgbe_hw *hw, u16 mbx_id)
+ 	s32 ret_val = TXGBE_ERR_MBX;
+ 
+ 	UNREFERENCED_PARAMETER(mbx_id);
+-	DEBUGFUNC("txgbe_check_for_msg_vf");
+ 
+ 	if (!txgbe_check_for_bit_vf(hw, TXGBE_VFMBCTL_PFSTS)) {
+ 		ret_val = 0;
+@@ -314,7 +295,6 @@ s32 txgbe_check_for_ack_vf(struct txgbe_hw *hw, u16 mbx_id)
+ 	s32 ret_val = TXGBE_ERR_MBX;
+ 
+ 	UNREFERENCED_PARAMETER(mbx_id);
+-	DEBUGFUNC("txgbe_check_for_ack_vf");
+ 
+ 	if (!txgbe_check_for_bit_vf(hw, TXGBE_VFMBCTL_PFACK)) {
+ 		ret_val = 0;
+@@ -336,7 +316,6 @@ s32 txgbe_check_for_rst_vf(struct txgbe_hw *hw, u16 mbx_id)
+ 	s32 ret_val = TXGBE_ERR_MBX;
+ 
+ 	UNREFERENCED_PARAMETER(mbx_id);
+-	DEBUGFUNC("txgbe_check_for_rst_vf");
+ 
+ 	if (!txgbe_check_for_bit_vf(hw, (TXGBE_VFMBCTL_RSTD |
+ 	    TXGBE_VFMBCTL_RSTI))) {
+@@ -357,8 +336,6 @@ STATIC s32 txgbe_obtain_mbx_lock_vf(struct txgbe_hw *hw)
+ {
+ 	s32 ret_val = TXGBE_ERR_MBX;
+ 
+-	DEBUGFUNC("txgbe_obtain_mbx_lock_vf");
+-
+ 	/* Take ownership of the buffer */
+ 	wr32(hw, TXGBE_VFMBCTL, TXGBE_VFMBCTL_VFU);
+ 
+@@ -386,8 +363,6 @@ s32 txgbe_write_mbx_vf(struct txgbe_hw *hw, u32 *msg, u16 size,
+ 
+ 	UNREFERENCED_PARAMETER(mbx_id);
+ 
+-	DEBUGFUNC("txgbe_write_mbx_vf");
+-
+ 	/* lock the mailbox to prevent pf/vf race condition */
+ 	ret_val = txgbe_obtain_mbx_lock_vf(hw);
+ 	if (ret_val)
+@@ -426,7 +401,6 @@ s32 txgbe_read_mbx_vf(struct txgbe_hw *hw, u32 *msg, u16 size,
+ 	s32 ret_val = 0;
+ 	u16 i;
+ 
+-	DEBUGFUNC("txgbe_read_mbx_vf");
+ 	UNREFERENCED_PARAMETER(mbx_id);
+ 
+ 	/* lock the mailbox to prevent pf/vf race condition */
+@@ -499,8 +473,6 @@ s32 txgbe_check_for_msg_pf(struct txgbe_hw *hw, u16 vf_number)
+ 	s32 index = TXGBE_MBVFICR_INDEX(vf_number);
+ 	u32 vf_bit = vf_number % 16;
+ 
+-	DEBUGFUNC("txgbe_check_for_msg_pf");
+-
+ 	if (!txgbe_check_for_bit_pf(hw, TXGBE_MBVFICR_VFREQ_VF1 << vf_bit,
+ 				    index)) {
+ 		ret_val = 0;
+@@ -523,8 +495,6 @@ s32 txgbe_check_for_ack_pf(struct txgbe_hw *hw, u16 vf_number)
+ 	s32 index = TXGBE_MBVFICR_INDEX(vf_number);
+ 	u32 vf_bit = vf_number % 16;
+ 
+-	DEBUGFUNC("txgbe_check_for_ack_pf");
+-
+ 	if (!txgbe_check_for_bit_pf(hw, TXGBE_MBVFICR_VFACK_VF1 << vf_bit,
+ 				    index)) {
+ 		ret_val = 0;
+@@ -548,8 +518,6 @@ s32 txgbe_check_for_rst_pf(struct txgbe_hw *hw, u16 vf_number)
+ 	u32 vflre = 0;
+ 	s32 ret_val = TXGBE_ERR_MBX;
+ 
+-	DEBUGFUNC("txgbe_check_for_rst_pf");
+-
+ 	vflre = rd32(hw, TXGBE_FLRVFE(reg_offset));
+ 	if (vflre & (1 << vf_shift)) {
+ 		ret_val = 0;
+@@ -572,8 +540,6 @@ STATIC s32 txgbe_obtain_mbx_lock_pf(struct txgbe_hw *hw, u16 vf_number)
+ 	s32 ret_val = TXGBE_ERR_MBX;
+ 	u32 p2v_mailbox;
+ 
+-	DEBUGFUNC("txgbe_obtain_mbx_lock_pf");
+-
+ 	/* Take ownership of the buffer */
+ 	wr32(hw, TXGBE_MBCTL(vf_number), TXGBE_MBCTL_PFU);
+ 
+@@ -602,8 +568,6 @@ s32 txgbe_write_mbx_pf(struct txgbe_hw *hw, u32 *msg, u16 size, u16 vf_number)
+ 	s32 ret_val;
+ 	u16 i;
+ 
+-	DEBUGFUNC("txgbe_write_mbx_pf");
+-
+ 	/* lock the mailbox to prevent pf/vf race condition */
+ 	ret_val = txgbe_obtain_mbx_lock_pf(hw, vf_number);
+ 	if (ret_val)
+@@ -643,8 +607,6 @@ s32 txgbe_read_mbx_pf(struct txgbe_hw *hw, u32 *msg, u16 size, u16 vf_number)
+ 	s32 ret_val;
+ 	u16 i;
+ 
+-	DEBUGFUNC("txgbe_read_mbx_pf");
+-
+ 	/* lock the mailbox to prevent pf/vf race condition */
+ 	ret_val = txgbe_obtain_mbx_lock_pf(hw, vf_number);
+ 	if (ret_val)
+diff --git a/dpdk/drivers/net/txgbe/base/txgbe_mng.c b/dpdk/drivers/net/txgbe/base/txgbe_mng.c
+index dbe512122c..045a2f5de0 100644
+--- a/dpdk/drivers/net/txgbe/base/txgbe_mng.c
++++ b/dpdk/drivers/net/txgbe/base/txgbe_mng.c
+@@ -45,10 +45,8 @@ txgbe_hic_unlocked(struct txgbe_hw *hw, u32 *buffer, u32 length, u32 timeout)
+ 	u32 value, loop;
+ 	u16 i, dword_len;
+ 
+-	DEBUGFUNC("txgbe_hic_unlocked");
+-
+ 	if (!length || length > TXGBE_PMMBX_BSIZE) {
+-		DEBUGOUT("Buffer length failure buffersize=%d.\n", length);
++		DEBUGOUT("Buffer length failure buffersize=%d.", length);
+ 		return TXGBE_ERR_HOST_INTERFACE_COMMAND;
+ 	}
+ 
+@@ -78,7 +76,7 @@ txgbe_hic_unlocked(struct txgbe_hw *hw, u32 *buffer, u32 length, u32 timeout)
+ 		TXGBE_MNGMBXCTL_FWRDY, TXGBE_MNGMBXCTL_FWRDY,
+ 		&value, timeout, 1000);
+ 	if (!loop || !(value & TXGBE_MNGMBXCTL_FWACK)) {
+-		DEBUGOUT("Command has failed with no status valid.\n");
++		DEBUGOUT("Command has failed with no status valid.");
+ 		return TXGBE_ERR_HOST_INTERFACE_COMMAND;
+ 	}
+ 
+@@ -114,10 +112,8 @@ txgbe_host_interface_command(struct txgbe_hw *hw, u32 *buffer,
+ 	u32 bi;
+ 	u32 dword_len;
+ 
+-	DEBUGFUNC("txgbe_host_interface_command");
+-
+ 	if (length == 0 || length > TXGBE_PMMBX_BSIZE) {
+-		DEBUGOUT("Buffer length failure buffersize=%d.\n", length);
++		DEBUGOUT("Buffer length failure buffersize=%d.", length);
+ 		return TXGBE_ERR_HOST_INTERFACE_COMMAND;
+ 	}
+ 
+@@ -159,7 +155,7 @@ txgbe_host_interface_command(struct txgbe_hw *hw, u32 *buffer,
+ 		goto rel_out;
+ 
+ 	if (length < buf_len + hdr_size) {
+-		DEBUGOUT("Buffer not large enough for reply message.\n");
++		DEBUGOUT("Buffer not large enough for reply message.");
+ 		err = TXGBE_ERR_HOST_INTERFACE_COMMAND;
+ 		goto rel_out;
+ 	}
+@@ -285,7 +281,6 @@ s32 txgbe_hic_set_drv_ver(struct txgbe_hw *hw, u8 maj, u8 min,
+ 	int i;
+ 	s32 ret_val = 0;
+ 
+-	DEBUGFUNC("txgbe_hic_set_drv_ver");
+ 	UNREFERENCED_PARAMETER(len, driver_ver);
+ 
+ 	fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO;
+@@ -338,8 +333,6 @@ txgbe_hic_reset(struct txgbe_hw *hw)
+ 	int i;
+ 	s32 err = 0;
+ 
+-	DEBUGFUNC("\n");
+-
+ 	reset_cmd.hdr.cmd = FW_RESET_CMD;
+ 	reset_cmd.hdr.buf_len = FW_RESET_LEN;
+ 	reset_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED;
+diff --git a/dpdk/drivers/net/txgbe/base/txgbe_osdep.h b/dpdk/drivers/net/txgbe/base/txgbe_osdep.h
+index 11fcf7e8fe..b62c0b0824 100644
+--- a/dpdk/drivers/net/txgbe/base/txgbe_osdep.h
++++ b/dpdk/drivers/net/txgbe/base/txgbe_osdep.h
+@@ -18,6 +18,7 @@
+ #include <rte_byteorder.h>
+ #include <rte_config.h>
+ #include <rte_io.h>
++#include <rte_ether.h>
+ 
+ #include "../txgbe_logs.h"
+ 
+diff --git a/dpdk/drivers/net/txgbe/base/txgbe_phy.c b/dpdk/drivers/net/txgbe/base/txgbe_phy.c
+index 3f5229ecc2..9f46d5bdb0 100644
+--- a/dpdk/drivers/net/txgbe/base/txgbe_phy.c
++++ b/dpdk/drivers/net/txgbe/base/txgbe_phy.c
+@@ -35,7 +35,7 @@ static bool txgbe_identify_extphy(struct txgbe_hw *hw)
+ 	u16 phy_addr = 0;
+ 
+ 	if (!txgbe_validate_phy_addr(hw, phy_addr)) {
+-		DEBUGOUT("Unable to validate PHY address 0x%04X\n",
++		DEBUGOUT("Unable to validate PHY address 0x%04X",
+ 			phy_addr);
+ 		return false;
+ 	}
+@@ -100,8 +100,6 @@ s32 txgbe_identify_phy(struct txgbe_hw *hw)
+ {
+ 	s32 err = TXGBE_ERR_PHY_ADDR_INVALID;
+ 
+-	DEBUGFUNC("txgbe_identify_phy");
+-
+ 	txgbe_read_phy_if(hw);
+ 
+ 	if (hw->phy.type != txgbe_phy_unknown)
+@@ -137,11 +135,9 @@ s32 txgbe_check_reset_blocked(struct txgbe_hw *hw)
+ {
+ 	u32 mmngc;
+ 
+-	DEBUGFUNC("txgbe_check_reset_blocked");
+-
+ 	mmngc = rd32(hw, TXGBE_STAT);
+ 	if (mmngc & TXGBE_STAT_MNGVETO) {
+-		DEBUGOUT("MNG_VETO bit detected.\n");
++		DEBUGOUT("MNG_VETO bit detected.");
+ 		return true;
+ 	}
+ 
+@@ -159,8 +155,6 @@ bool txgbe_validate_phy_addr(struct txgbe_hw *hw, u32 phy_addr)
+ 	u16 phy_id = 0;
+ 	bool valid = false;
+ 
+-	DEBUGFUNC("txgbe_validate_phy_addr");
+-
+ 	hw->phy.addr = phy_addr;
+ 	hw->phy.read_reg(hw, TXGBE_MD_PHY_ID_HIGH,
+ 			     TXGBE_MD_DEV_PMA_PMD, &phy_id);
+@@ -168,7 +162,7 @@ bool txgbe_validate_phy_addr(struct txgbe_hw *hw, u32 phy_addr)
+ 	if (phy_id != 0xFFFF && phy_id != 0x0)
+ 		valid = true;
+ 
+-	DEBUGOUT("PHY ID HIGH is 0x%04X\n", phy_id);
++	DEBUGOUT("PHY ID HIGH is 0x%04X", phy_id);
+ 
+ 	return valid;
+ }
+@@ -184,8 +178,6 @@ s32 txgbe_get_phy_id(struct txgbe_hw *hw)
+ 	u16 phy_id_high = 0;
+ 	u16 phy_id_low = 0;
+ 
+-	DEBUGFUNC("txgbe_get_phy_id");
+-
+ 	err = hw->phy.read_reg(hw, TXGBE_MD_PHY_ID_HIGH,
+ 				      TXGBE_MD_DEV_PMA_PMD,
+ 				      &phy_id_high);
+@@ -198,7 +190,7 @@ s32 txgbe_get_phy_id(struct txgbe_hw *hw)
+ 		hw->phy.id |= (u32)(phy_id_low & TXGBE_PHY_REVISION_MASK);
+ 		hw->phy.revision = (u32)(phy_id_low & ~TXGBE_PHY_REVISION_MASK);
+ 	}
+-	DEBUGOUT("PHY_ID_HIGH 0x%04X, PHY_ID_LOW 0x%04X\n",
++	DEBUGOUT("PHY_ID_HIGH 0x%04X, PHY_ID_LOW 0x%04X",
+ 		  phy_id_high, phy_id_low);
+ 
+ 	return err;
+@@ -213,8 +205,6 @@ enum txgbe_phy_type txgbe_get_phy_type_from_id(u32 phy_id)
+ {
+ 	enum txgbe_phy_type phy_type;
+ 
+-	DEBUGFUNC("txgbe_get_phy_type_from_id");
+-
+ 	switch (phy_id) {
+ 	case TXGBE_PHYID_TN1010:
+ 		phy_type = txgbe_phy_tn;
+@@ -272,7 +262,7 @@ txgbe_reset_extphy(struct txgbe_hw *hw)
+ 
+ 	if (ctrl & TXGBE_MD_PORT_CTRL_RESET) {
+ 		err = TXGBE_ERR_RESET_FAILED;
+-		DEBUGOUT("PHY reset polling failed to complete.\n");
++		DEBUGOUT("PHY reset polling failed to complete.");
+ 	}
+ 
+ 	return err;
+@@ -286,8 +276,6 @@ s32 txgbe_reset_phy(struct txgbe_hw *hw)
+ {
+ 	s32 err = 0;
+ 
+-	DEBUGFUNC("txgbe_reset_phy");
+-
+ 	if (hw->phy.type == txgbe_phy_unknown)
+ 		err = txgbe_identify_phy(hw);
+ 
+@@ -343,7 +331,7 @@ s32 txgbe_read_phy_reg_mdi(struct txgbe_hw *hw, u32 reg_addr, u32 device_type,
+ 	 */
+ 	if (!po32m(hw, TXGBE_MDIOSCD, TXGBE_MDIOSCD_BUSY,
+ 		0, NULL, 100, 100)) {
+-		DEBUGOUT("PHY address command did not complete\n");
++		DEBUGOUT("PHY address command did not complete");
+ 		return TXGBE_ERR_PHY;
+ 	}
+ 
+@@ -367,8 +355,6 @@ s32 txgbe_read_phy_reg(struct txgbe_hw *hw, u32 reg_addr,
+ 	s32 err;
+ 	u32 gssr = hw->phy.phy_semaphore_mask;
+ 
+-	DEBUGFUNC("txgbe_read_phy_reg");
+-
+ 	if (hw->mac.acquire_swfw_sync(hw, gssr))
+ 		return TXGBE_ERR_SWFW_SYNC;
+ 
+@@ -406,7 +392,7 @@ s32 txgbe_write_phy_reg_mdi(struct txgbe_hw *hw, u32 reg_addr,
+ 	/* wait for completion */
+ 	if (!po32m(hw, TXGBE_MDIOSCD, TXGBE_MDIOSCD_BUSY,
+ 		0, NULL, 100, 100)) {
+-		TLOG_DEBUG("PHY write cmd didn't complete\n");
++		DEBUGOUT("PHY write cmd didn't complete");
+ 		return -TERR_PHY;
+ 	}
+ 
+@@ -427,8 +413,6 @@ s32 txgbe_write_phy_reg(struct txgbe_hw *hw, u32 reg_addr,
+ 	s32 err;
+ 	u32 gssr = hw->phy.phy_semaphore_mask;
+ 
+-	DEBUGFUNC("txgbe_write_phy_reg");
+-
+ 	if (hw->mac.acquire_swfw_sync(hw, gssr))
+ 		err = TXGBE_ERR_SWFW_SYNC;
+ 
+@@ -452,8 +436,6 @@ s32 txgbe_setup_phy_link(struct txgbe_hw *hw)
+ 	bool autoneg = false;
+ 	u32 speed;
+ 
+-	DEBUGFUNC("txgbe_setup_phy_link");
+-
+ 	txgbe_get_copper_link_capabilities(hw, &speed, &autoneg);
+ 
+ 	/* Set or unset auto-negotiation 10G advertisement */
+@@ -539,8 +521,6 @@ s32 txgbe_setup_phy_link_speed(struct txgbe_hw *hw,
+ {
+ 	UNREFERENCED_PARAMETER(autoneg_wait_to_complete);
+ 
+-	DEBUGFUNC("txgbe_setup_phy_link_speed");
+-
+ 	/*
+ 	 * Clear autoneg_advertised and set new values based on input link
+ 	 * speed.
+@@ -623,8 +603,6 @@ s32 txgbe_get_copper_link_capabilities(struct txgbe_hw *hw,
+ {
+ 	s32 err = 0;
+ 
+-	DEBUGFUNC("txgbe_get_copper_link_capabilities");
+-
+ 	*autoneg = true;
+ 	if (!hw->phy.speeds_supported)
+ 		err = txgbe_get_copper_speeds_supported(hw);
+@@ -652,8 +630,6 @@ s32 txgbe_check_phy_link_tnx(struct txgbe_hw *hw, u32 *speed,
+ 	u16 phy_speed = 0;
+ 	u16 phy_data = 0;
+ 
+-	DEBUGFUNC("txgbe_check_phy_link_tnx");
+-
+ 	/* Initialize speed and link to default case */
+ 	*link_up = false;
+ 	*speed = TXGBE_LINK_SPEED_10GB_FULL;
+@@ -697,8 +673,6 @@ s32 txgbe_setup_phy_link_tnx(struct txgbe_hw *hw)
+ 	bool autoneg = false;
+ 	u32 speed;
+ 
+-	DEBUGFUNC("txgbe_setup_phy_link_tnx");
+-
+ 	txgbe_get_copper_link_capabilities(hw, &speed, &autoneg);
+ 
+ 	if (speed & TXGBE_LINK_SPEED_10GB_FULL) {
+@@ -772,8 +746,6 @@ s32 txgbe_identify_module(struct txgbe_hw *hw)
+ {
+ 	s32 err = TXGBE_ERR_SFP_NOT_PRESENT;
+ 
+-	DEBUGFUNC("txgbe_identify_module");
+-
+ 	switch (hw->phy.media_type) {
+ 	case txgbe_media_type_fiber:
+ 		err = txgbe_identify_sfp_module(hw);
+@@ -811,8 +783,6 @@ s32 txgbe_identify_sfp_module(struct txgbe_hw *hw)
+ 	u8 cable_spec = 0;
+ 	u16 enforce_sfp = 0;
+ 
+-	DEBUGFUNC("txgbe_identify_sfp_module");
+-
+ 	if (hw->phy.media_type != txgbe_media_type_fiber) {
+ 		hw->phy.sfp_type = txgbe_sfp_type_not_present;
+ 		return TXGBE_ERR_SFP_NOT_PRESENT;
+@@ -992,7 +962,7 @@ s32 txgbe_identify_sfp_module(struct txgbe_hw *hw)
+ 	      hw->phy.sfp_type == txgbe_sfp_type_1g_lx_core1 ||
+ 	      hw->phy.sfp_type == txgbe_sfp_type_1g_sx_core0 ||
+ 	      hw->phy.sfp_type == txgbe_sfp_type_1g_sx_core1)) {
+-		DEBUGOUT("SFP+ module not supported\n");
++		DEBUGOUT("SFP+ module not supported");
+ 		hw->phy.type = txgbe_phy_sfp_unsupported;
+ 		return TXGBE_ERR_SFP_NOT_SUPPORTED;
+ 	}
+@@ -1021,8 +991,6 @@ s32 txgbe_identify_qsfp_module(struct txgbe_hw *hw)
+ 	u8 device_tech = 0;
+ 	bool active_cable = false;
+ 
+-	DEBUGFUNC("txgbe_identify_qsfp_module");
+-
+ 	if (hw->phy.media_type != txgbe_media_type_fiber_qsfp) {
+ 		hw->phy.sfp_type = txgbe_sfp_type_not_present;
+ 		err = TXGBE_ERR_SFP_NOT_PRESENT;
+@@ -1165,10 +1133,10 @@ s32 txgbe_identify_qsfp_module(struct txgbe_hw *hw)
+ 				if (hw->allow_unsupported_sfp) {
+ 					DEBUGOUT("WARNING: Wangxun (R) Network Connections are quality tested using Wangxun (R) Ethernet Optics. "
+ 						"Using untested modules is not supported and may cause unstable operation or damage to the module or the adapter. "
+-						"Wangxun Corporation is not responsible for any harm caused by using untested modules.\n");
++						"Wangxun Corporation is not responsible for any harm caused by using untested modules.");
+ 					err = 0;
+ 				} else {
+-					DEBUGOUT("QSFP module not supported\n");
++					DEBUGOUT("QSFP module not supported");
+ 					hw->phy.type =
+ 						txgbe_phy_sfp_unsupported;
+ 					err = TXGBE_ERR_SFP_NOT_SUPPORTED;
+@@ -1194,8 +1162,6 @@ s32 txgbe_identify_qsfp_module(struct txgbe_hw *hw)
+ s32 txgbe_read_i2c_eeprom(struct txgbe_hw *hw, u8 byte_offset,
+ 				  u8 *eeprom_data)
+ {
+-	DEBUGFUNC("txgbe_read_i2c_eeprom");
+-
+ 	return hw->phy.read_i2c_byte(hw, byte_offset,
+ 					 TXGBE_I2C_EEPROM_DEV_ADDR,
+ 					 eeprom_data);
+@@ -1228,8 +1194,6 @@ s32 txgbe_read_i2c_sff8472(struct txgbe_hw *hw, u8 byte_offset,
+ s32 txgbe_write_i2c_eeprom(struct txgbe_hw *hw, u8 byte_offset,
+ 				   u8 eeprom_data)
+ {
+-	DEBUGFUNC("txgbe_write_i2c_eeprom");
+-
+ 	return hw->phy.write_i2c_byte(hw, byte_offset,
+ 					  TXGBE_I2C_EEPROM_DEV_ADDR,
+ 					  eeprom_data);
+@@ -1248,8 +1212,6 @@ s32 txgbe_write_i2c_eeprom(struct txgbe_hw *hw, u8 byte_offset,
+ s32 txgbe_read_i2c_byte_unlocked(struct txgbe_hw *hw, u8 byte_offset,
+ 					   u8 dev_addr, u8 *data)
+ {
+-	DEBUGFUNC("txgbe_read_i2c_byte");
+-
+ 	txgbe_i2c_start(hw, dev_addr);
+ 
+ 	/* wait tx empty */
+@@ -1312,8 +1274,6 @@ s32 txgbe_read_i2c_byte(struct txgbe_hw *hw, u8 byte_offset,
+ s32 txgbe_write_i2c_byte_unlocked(struct txgbe_hw *hw, u8 byte_offset,
+ 					    u8 dev_addr, u8 data)
+ {
+-	DEBUGFUNC("txgbe_write_i2c_byte");
+-
+ 	txgbe_i2c_start(hw, dev_addr);
+ 
+ 	/* wait tx empty */
+@@ -1367,8 +1327,6 @@ s32 txgbe_write_i2c_byte(struct txgbe_hw *hw, u8 byte_offset,
+  **/
+ static void txgbe_i2c_start(struct txgbe_hw *hw, u8 dev_addr)
+ {
+-	DEBUGFUNC("txgbe_i2c_start");
+-
+ 	wr32(hw, TXGBE_I2CENA, 0);
+ 
+ 	wr32(hw, TXGBE_I2CCON,
+@@ -1396,12 +1354,10 @@ static void txgbe_i2c_start(struct txgbe_hw *hw, u8 dev_addr)
+  **/
+ static void txgbe_i2c_stop(struct txgbe_hw *hw)
+ {
+-	DEBUGFUNC("txgbe_i2c_stop");
+-
+ 	/* wait for completion */
+ 	if (!po32m(hw, TXGBE_I2CSTAT, TXGBE_I2CSTAT_MST,
+ 		0, NULL, 100, 100)) {
+-		DEBUGFUNC("i2c stop timeout.");
++		DEBUGOUT("i2c stop timeout.");
+ 	}
+ 
+ 	wr32(hw, TXGBE_I2CENA, 0);
+@@ -1411,9 +1367,17 @@ static void
+ txgbe_set_sgmii_an37_ability(struct txgbe_hw *hw)
+ {
+ 	u32 value;
++	u8 device_type = hw->subsystem_device_id & 0xF0;
+ 
+ 	wr32_epcs(hw, VR_XS_OR_PCS_MMD_DIGI_CTL1, 0x3002);
+-	wr32_epcs(hw, SR_MII_MMD_AN_CTL, 0x0105);
++	/* for sgmii + external phy, set to 0x0105 (phy sgmii mode) */
++	/* for sgmii direct link, set to 0x010c (mac sgmii mode) */
++	if (device_type == TXGBE_DEV_ID_MAC_SGMII ||
++			hw->phy.media_type == txgbe_media_type_fiber)
++		wr32_epcs(hw, SR_MII_MMD_AN_CTL, 0x010C);
++	else if (device_type == TXGBE_DEV_ID_SGMII ||
++			device_type == TXGBE_DEV_ID_XAUI)
++		wr32_epcs(hw, SR_MII_MMD_AN_CTL, 0x0105);
+ 	wr32_epcs(hw, SR_MII_MMD_DIGI_CTL, 0x0200);
+ 	value = rd32_epcs(hw, SR_MII_MMD_CTL);
+ 	value = (value & ~0x1200) | (0x1 << 12) | (0x1 << 9);
+@@ -1455,6 +1419,10 @@ txgbe_set_link_to_kr(struct txgbe_hw *hw, bool autoneg)
+ 		if (!(hw->devarg.auto_neg == 1)) {
+ 			wr32_epcs(hw, SR_AN_CTRL, 0);
+ 			wr32_epcs(hw, VR_AN_KR_MODE_CL, 0);
++		} else {
++			value = rd32_epcs(hw, TXGBE_PHY_TX_EQ_CTL1);
++			value &= ~(1 << 6);
++			wr32_epcs(hw, TXGBE_PHY_TX_EQ_CTL1, value);
+ 		}
+ 		if (hw->devarg.present == 1) {
+ 			value = rd32_epcs(hw, TXGBE_PHY_TX_EQ_CTL1);
+@@ -2320,6 +2288,8 @@ void txgbe_autoc_write(struct txgbe_hw *hw, u64 autoc)
+ 		}
+ 	} else if (hw->phy.media_type == txgbe_media_type_fiber) {
+ 		txgbe_set_link_to_sfi(hw, speed);
++		if (speed == TXGBE_LINK_SPEED_1GB_FULL)
++			txgbe_set_sgmii_an37_ability(hw);
+ 	}
+ 
+ 	if (speed == TXGBE_LINK_SPEED_10GB_FULL)
+@@ -2416,8 +2386,6 @@ s32 txgbe_kr_handle(struct txgbe_hw *hw)
+ 	u32 value;
+ 	s32 status = 0;
+ 
+-	DEBUGFUNC("txgbe_kr_handle");
+-
+ 	value = rd32_epcs(hw, VR_AN_INTR);
+ 	BP_LOG("AN INTERRUPT!! value: 0x%x\n", value);
+ 	if (!(value & VR_AN_INTR_PG_RCV)) {
+@@ -2441,8 +2409,6 @@ static s32 txgbe_handle_bp_flow(u32 link_mode, struct txgbe_hw *hw)
+ 	s32 status = 0;
+ 	struct txgbe_backplane_ability local_ability, lp_ability;
+ 
+-	DEBUGFUNC("txgbe_handle_bp_flow");
+-
+ 	local_ability.current_link_mode = link_mode;
+ 
+ 	/* 1. Get the local AN73 Base Page Ability */
+@@ -2544,8 +2510,6 @@ static void txgbe_get_bp_ability(struct txgbe_backplane_ability *ability,
+ {
+ 	u32 value = 0;
+ 
+-	DEBUGFUNC("txgbe_get_bp_ability");
+-
+ 	/* Link Partner Base Page */
+ 	if (link_partner == 1) {
+ 		/* Read the link partner AN73 Base Page Ability Registers */
+@@ -2617,8 +2581,6 @@ static s32 txgbe_check_bp_ability(struct txgbe_backplane_ability *local_ability,
+ 	u32 com_link_abi;
+ 	s32 ret = 0;
+ 
+-	DEBUGFUNC("txgbe_check_bp_ability");
+-
+ 	com_link_abi = local_ability->link_ability & lp_ability->link_ability;
+ 	BP_LOG("com_link_abi = 0x%x, local_ability = 0x%x, lp_ability = 0x%x\n",
+ 		com_link_abi, local_ability->link_ability,
+@@ -2674,8 +2636,6 @@ static void txgbe_clear_bp_intr(u32 bit, u32 bit_high, struct txgbe_hw *hw)
+ {
+ 	u32 rdata = 0, wdata, i;
+ 
+-	DEBUGFUNC("txgbe_clear_bp_intr");
+-
+ 	rdata = rd32_epcs(hw, VR_AN_INTR);
+ 	BP_LOG("[Before clear]Read VR AN MMD Interrupt Register: 0x%x\n",
+ 			rdata);
+@@ -2700,8 +2660,6 @@ static s32 txgbe_enable_kr_training(struct txgbe_hw *hw)
+ 	s32 status = 0;
+ 	u32 value = 0;
+ 
+-	DEBUGFUNC("txgbe_enable_kr_training");
+-
+ 	BP_LOG("Enable Clause 72 KR Training ...\n");
+ 
+ 	if (CL72_KRTR_PRBS_MODE_EN != 0xFFFF) {
+@@ -2745,8 +2703,6 @@ static s32 txgbe_disable_kr_training(struct txgbe_hw *hw, s32 post, s32 mode)
+ {
+ 	s32 status = 0;
+ 
+-	DEBUGFUNC("txgbe_disable_kr_training");
+-
+ 	BP_LOG("Disable Clause 72 KR Training ...\n");
+ 	/* Read PHY Lane0 TX EQ before Clause 72 KR Training. */
+ 	txgbe_read_phy_lane_tx_eq(0, hw, post, mode);
+@@ -2763,8 +2719,6 @@ static s32 txgbe_check_kr_training(struct txgbe_hw *hw)
+ 	int i;
+ 	int times = hw->devarg.poll ? 35 : 20;
+ 
+-	DEBUGFUNC("txgbe_check_kr_training");
+-
+ 	for (i = 0; i < times; i++) {
+ 		value = rd32_epcs(hw, SR_PMA_KR_LP_CEU);
+ 		BP_LOG("SR PMA MMD 10GBASE-KR LP Coefficient Update Register: 0x%x\n",
+@@ -2822,8 +2776,6 @@ static void txgbe_read_phy_lane_tx_eq(u16 lane, struct txgbe_hw *hw,
+ 	u32 addr;
+ 	u32 tx_main_cursor, tx_pre_cursor, tx_post_cursor, lmain;
+ 
+-	DEBUGFUNC("txgbe_read_phy_lane_tx_eq");
+-
+ 	addr = TXGBE_PHY_LANE0_TX_EQ_CTL1 | (lane << 8);
+ 	value = rd32_ephy(hw, addr);
+ 	BP_LOG("PHY LANE TX EQ Read Value: %x\n", lane);
+diff --git a/dpdk/drivers/net/txgbe/base/txgbe_regs.h b/dpdk/drivers/net/txgbe/base/txgbe_regs.h
+index 144047ba62..dc22ef53e3 100644
+--- a/dpdk/drivers/net/txgbe/base/txgbe_regs.h
++++ b/dpdk/drivers/net/txgbe/base/txgbe_regs.h
+@@ -1862,8 +1862,13 @@ po32m(struct txgbe_hw *hw, u32 reg, u32 mask, u32 expect, u32 *actual,
+ 	}
+ 
+ 	do {
+-		all |= rd32(hw, reg);
+-		value |= mask & all;
++		if (expect != 0) {
++			all |= rd32(hw, reg);
++			value |= mask & all;
++		} else {
++			all = rd32(hw, reg);
++			value = mask & all;
++		}
+ 		if (value == expect)
+ 			break;
+ 
+@@ -1896,7 +1901,7 @@ po32m(struct txgbe_hw *hw, u32 reg, u32 mask, u32 expect, u32 *actual,
+ 
+ #define wr32w(hw, reg, val, mask, slice) do { \
+ 	wr32((hw), reg, val); \
+-	po32m((hw), reg, mask, mask, NULL, 5, slice); \
++	po32m((hw), reg, mask, 0, NULL, 5, slice); \
+ } while (0)
+ 
+ #define TXGBE_XPCS_IDAADDR    0x13000
+diff --git a/dpdk/drivers/net/txgbe/base/txgbe_vf.c b/dpdk/drivers/net/txgbe/base/txgbe_vf.c
+index fb6d6d90ea..a73502351e 100644
+--- a/dpdk/drivers/net/txgbe/base/txgbe_vf.c
++++ b/dpdk/drivers/net/txgbe/base/txgbe_vf.c
+@@ -107,8 +107,6 @@ s32 txgbe_reset_hw_vf(struct txgbe_hw *hw)
+ 	u32 msgbuf[TXGBE_VF_PERMADDR_MSG_LEN];
+ 	u8 *addr = (u8 *)(&msgbuf[1]);
+ 
+-	DEBUGFUNC("txgbevf_reset_hw_vf");
+-
+ 	/* Call adapter stop to disable tx/rx and clear interrupts */
+ 	hw->mac.stop_hw(hw);
+ 
+@@ -121,7 +119,7 @@ s32 txgbe_reset_hw_vf(struct txgbe_hw *hw)
+ 	mbx->write_posted(hw, msgbuf, 1, 0);
+ 	msec_delay(10);
+ 
+-	DEBUGOUT("Issuing a function level reset to MAC\n");
++	DEBUGOUT("Issuing a function level reset to MAC");
+ 	wr32(hw, TXGBE_VFRST, TXGBE_VFRST_SET);
+ 	txgbe_flush(hw);
+ 	msec_delay(50);
+@@ -238,7 +236,7 @@ STATIC s32 txgbe_mta_vector(struct txgbe_hw *hw, u8 *mc_addr)
+ 		vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8));
+ 		break;
+ 	default:  /* Invalid mc_filter_type */
+-		DEBUGOUT("MC filter type param set incorrectly\n");
++		DEBUGOUT("MC filter type param set incorrectly");
+ 		ASSERT(0);
+ 		break;
+ 	}
+@@ -316,8 +314,6 @@ s32 txgbe_update_mc_addr_list_vf(struct txgbe_hw *hw, u8 *mc_addr_list,
+ 
+ 	UNREFERENCED_PARAMETER(clear);
+ 
+-	DEBUGFUNC("txgbe_update_mc_addr_list_vf");
+-
+ 	/* Each entry in the list uses 1 16 bit word.  We have 30
+ 	 * 16 bit words available in our HW msg buffer (minus 1 for the
+ 	 * msg type).  That's 30 hash values if we pack 'em right.  If
+@@ -327,7 +323,7 @@ s32 txgbe_update_mc_addr_list_vf(struct txgbe_hw *hw, u8 *mc_addr_list,
+ 	 * addresses except for in large enterprise network environments.
+ 	 */
+ 
+-	DEBUGOUT("MC Addr Count = %d\n", mc_addr_count);
++	DEBUGOUT("MC Addr Count = %d", mc_addr_count);
+ 
+ 	cnt = (mc_addr_count > 30) ? 30 : mc_addr_count;
+ 	msgbuf[0] = TXGBE_VF_SET_MULTICAST;
+@@ -335,7 +331,7 @@ s32 txgbe_update_mc_addr_list_vf(struct txgbe_hw *hw, u8 *mc_addr_list,
+ 
+ 	for (i = 0; i < cnt; i++) {
+ 		vector = txgbe_mta_vector(hw, next(hw, &mc_addr_list, &vmdq));
+-		DEBUGOUT("Hash value = 0x%03X\n", vector);
++		DEBUGOUT("Hash value = 0x%03X", vector);
+ 		vector_list[i] = (u16)vector;
+ 	}
+ 
+diff --git a/dpdk/drivers/net/txgbe/txgbe_ethdev.c b/dpdk/drivers/net/txgbe/txgbe_ethdev.c
+index 47d0e6ea40..49948e62bc 100644
+--- a/dpdk/drivers/net/txgbe/txgbe_ethdev.c
++++ b/dpdk/drivers/net/txgbe/txgbe_ethdev.c
+@@ -376,7 +376,7 @@ txgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
+ 	if (hw->mac.type != txgbe_mac_raptor)
+ 		return -ENOSYS;
+ 
+-	if (stat_idx & !QMAP_FIELD_RESERVED_BITS_MASK)
++	if (stat_idx & ~QMAP_FIELD_RESERVED_BITS_MASK)
+ 		return -EIO;
+ 
+ 	PMD_INIT_LOG(DEBUG, "Setting port %d, %s queue_id %d to stat index %d",
+@@ -1678,7 +1678,7 @@ txgbe_dev_start(struct rte_eth_dev *dev)
+ 			return -ENOMEM;
+ 		}
+ 	}
+-	/* confiugre msix for sleep until rx interrupt */
++	/* configure msix for sleep until rx interrupt */
+ 	txgbe_configure_msix(dev);
+ 
+ 	/* initialize transmission unit */
+@@ -1937,6 +1937,7 @@ txgbe_dev_set_link_up(struct rte_eth_dev *dev)
+ 	} else {
+ 		/* Turn on the laser */
+ 		hw->mac.enable_tx_laser(hw);
++		hw->dev_start = true;
+ 		txgbe_dev_link_update(dev, 0);
+ 	}
+ 
+@@ -1957,6 +1958,7 @@ txgbe_dev_set_link_down(struct rte_eth_dev *dev)
+ 	} else {
+ 		/* Turn off the laser */
+ 		hw->mac.disable_tx_laser(hw);
++		hw->dev_start = false;
+ 		txgbe_dev_link_update(dev, 0);
+ 	}
+ 
+@@ -2034,6 +2036,7 @@ txgbe_dev_close(struct rte_eth_dev *dev)
+ 
+ #ifdef RTE_LIB_SECURITY
+ 	rte_free(dev->security_ctx);
++	dev->security_ctx = NULL;
+ #endif
+ 
+ 	return ret;
+@@ -3682,7 +3685,7 @@ txgbe_set_ivar_map(struct txgbe_hw *hw, int8_t direction,
+ 		wr32(hw, TXGBE_IVARMISC, tmp);
+ 	} else {
+ 		/* rx or tx causes */
+-		/* Workround for ICR lost */
++		/* Workaround for ICR lost */
+ 		idx = ((16 * (queue & 1)) + (8 * direction));
+ 		tmp = rd32(hw, TXGBE_IVAR(queue >> 1));
+ 		tmp &= ~(0xFF << idx);
+@@ -4387,7 +4390,7 @@ txgbe_timesync_disable(struct rte_eth_dev *dev)
+ 	/* Disable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
+ 	wr32(hw, TXGBE_ETFLT(TXGBE_ETF_ID_1588), 0);
+ 
+-	/* Stop incrementating the System Time registers. */
++	/* Stop incrementing the System Time registers. */
+ 	wr32(hw, TXGBE_TSTIMEINC, 0);
+ 
+ 	return 0;
+diff --git a/dpdk/drivers/net/txgbe/txgbe_ethdev_vf.c b/dpdk/drivers/net/txgbe/txgbe_ethdev_vf.c
+index 84b960b8f9..f52cd8bc19 100644
+--- a/dpdk/drivers/net/txgbe/txgbe_ethdev_vf.c
++++ b/dpdk/drivers/net/txgbe/txgbe_ethdev_vf.c
+@@ -961,7 +961,7 @@ txgbevf_set_ivar_map(struct txgbe_hw *hw, int8_t direction,
+ 		wr32(hw, TXGBE_VFIVARMISC, tmp);
+ 	} else {
+ 		/* rx or tx cause */
+-		/* Workround for ICR lost */
++		/* Workaround for ICR lost */
+ 		idx = ((16 * (queue & 1)) + (8 * direction));
+ 		tmp = rd32(hw, TXGBE_VFIVAR(queue >> 1));
+ 		tmp &= ~(0xFF << idx);
+@@ -997,7 +997,7 @@ txgbevf_configure_msix(struct rte_eth_dev *dev)
+ 	/* Configure all RX queues of VF */
+ 	for (q_idx = 0; q_idx < dev->data->nb_rx_queues; q_idx++) {
+ 		/* Force all queue use vector 0,
+-		 * as TXGBE_VF_MAXMSIVECOTR = 1
++		 * as TXGBE_VF_MAXMSIVECTOR = 1
+ 		 */
+ 		txgbevf_set_ivar_map(hw, 0, q_idx, vector_idx);
+ 		rte_intr_vec_list_index_set(intr_handle, q_idx,
+@@ -1288,7 +1288,7 @@ txgbevf_dev_interrupt_get_status(struct rte_eth_dev *dev)
+ 
+ 	/* only one misc vector supported - mailbox */
+ 	eicr &= TXGBE_VFICR_MASK;
+-	/* Workround for ICR lost */
++	/* Workaround for ICR lost */
+ 	intr->flags |= TXGBE_FLAG_MAILBOX;
+ 
+ 	/* To avoid compiler warnings set eicr to used. */
+diff --git a/dpdk/drivers/net/txgbe/txgbe_ipsec.c b/dpdk/drivers/net/txgbe/txgbe_ipsec.c
+index 445733f3ba..3ca3d85ed5 100644
+--- a/dpdk/drivers/net/txgbe/txgbe_ipsec.c
++++ b/dpdk/drivers/net/txgbe/txgbe_ipsec.c
+@@ -288,7 +288,7 @@ txgbe_crypto_remove_sa(struct rte_eth_dev *dev,
+ 			return -1;
+ 		}
+ 
+-		/* Disable and clear Rx SPI and key table entryes*/
++		/* Disable and clear Rx SPI and key table entries */
+ 		reg_val = TXGBE_IPSRXIDX_WRITE |
+ 			TXGBE_IPSRXIDX_TB_SPI | (sa_index << 3);
+ 		wr32(hw, TXGBE_IPSRXSPI, 0);
+diff --git a/dpdk/drivers/net/txgbe/txgbe_logs.h b/dpdk/drivers/net/txgbe/txgbe_logs.h
+index 67e9bfb3af..74f49ab9ef 100644
+--- a/dpdk/drivers/net/txgbe/txgbe_logs.h
++++ b/dpdk/drivers/net/txgbe/txgbe_logs.h
+@@ -48,11 +48,8 @@ extern int txgbe_logtype_tx_free;
+ #define PMD_TX_FREE_LOG(level, fmt, args...) do { } while (0)
+ #endif
+ 
+-#define TLOG_DEBUG(fmt, args...)  PMD_DRV_LOG(DEBUG, fmt, ##args)
+-
+-#define DEBUGOUT(fmt, args...)    TLOG_DEBUG(fmt, ##args)
+-#define PMD_INIT_FUNC_TRACE()     TLOG_DEBUG(" >>")
+-#define DEBUGFUNC(fmt)            TLOG_DEBUG(fmt)
++#define DEBUGOUT(fmt, args...)    PMD_DRV_LOG(DEBUG, fmt, ##args)
++#define PMD_INIT_FUNC_TRACE()     PMD_DRV_LOG(DEBUG, ">>")
+ 
+ extern int txgbe_logtype_bp;
+ #define BP_LOG(fmt, args...) \
+diff --git a/dpdk/drivers/net/txgbe/txgbe_pf.c b/dpdk/drivers/net/txgbe/txgbe_pf.c
+index 30be287330..0b82fb1a88 100644
+--- a/dpdk/drivers/net/txgbe/txgbe_pf.c
++++ b/dpdk/drivers/net/txgbe/txgbe_pf.c
+@@ -108,7 +108,7 @@ int txgbe_pf_host_init(struct rte_eth_dev *eth_dev)
+ 		nb_queue = 4;
+ 		RTE_ETH_DEV_SRIOV(eth_dev).active = RTE_ETH_32_POOLS;
+ 	} else {
+-		nb_queue = 8;
++		nb_queue = 4;
+ 		RTE_ETH_DEV_SRIOV(eth_dev).active = RTE_ETH_16_POOLS;
+ 	}
+ 
+@@ -236,7 +236,7 @@ int txgbe_pf_host_configure(struct rte_eth_dev *eth_dev)
+ 
+ 	wr32(hw, TXGBE_PSRCTL, TXGBE_PSRCTL_LBENA);
+ 
+-	/* clear VMDq map to perment rar 0 */
++	/* clear VMDq map to permanent rar 0 */
+ 	hw->mac.clear_vmdq(hw, 0, BIT_MASK32);
+ 
+ 	/* clear VMDq map to scan rar 127 */
+diff --git a/dpdk/drivers/net/vhost/rte_eth_vhost.c b/dpdk/drivers/net/vhost/rte_eth_vhost.c
+index 070f0e6dfd..a280e788fb 100644
+--- a/dpdk/drivers/net/vhost/rte_eth_vhost.c
++++ b/dpdk/drivers/net/vhost/rte_eth_vhost.c
+@@ -716,10 +716,11 @@ eth_vhost_install_intr(struct rte_eth_dev *dev)
+ }
+ 
+ static void
+-update_queuing_status(struct rte_eth_dev *dev)
++update_queuing_status(struct rte_eth_dev *dev, bool wait_queuing)
+ {
+ 	struct pmd_internal *internal = dev->data->dev_private;
+ 	struct vhost_queue *vq;
++	struct rte_vhost_vring_state *state;
+ 	unsigned int i;
+ 	int allow_queuing = 1;
+ 
+@@ -730,13 +731,18 @@ update_queuing_status(struct rte_eth_dev *dev)
+ 	    rte_atomic32_read(&internal->dev_attached) == 0)
+ 		allow_queuing = 0;
+ 
++	state = vring_states[dev->data->port_id];
++
+ 	/* Wait until rx/tx_pkt_burst stops accessing vhost device */
+ 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ 		vq = dev->data->rx_queues[i];
+ 		if (vq == NULL)
+ 			continue;
+-		rte_atomic32_set(&vq->allow_queuing, allow_queuing);
+-		while (rte_atomic32_read(&vq->while_queuing))
++		if (allow_queuing && state->cur[vq->virtqueue_id])
++			rte_atomic32_set(&vq->allow_queuing, 1);
++		else
++			rte_atomic32_set(&vq->allow_queuing, 0);
++		while (wait_queuing && rte_atomic32_read(&vq->while_queuing))
+ 			rte_pause();
+ 	}
+ 
+@@ -744,8 +750,11 @@ update_queuing_status(struct rte_eth_dev *dev)
+ 		vq = dev->data->tx_queues[i];
+ 		if (vq == NULL)
+ 			continue;
+-		rte_atomic32_set(&vq->allow_queuing, allow_queuing);
+-		while (rte_atomic32_read(&vq->while_queuing))
++		if (allow_queuing && state->cur[vq->virtqueue_id])
++			rte_atomic32_set(&vq->allow_queuing, 1);
++		else
++			rte_atomic32_set(&vq->allow_queuing, 0);
++		while (wait_queuing && rte_atomic32_read(&vq->while_queuing))
+ 			rte_pause();
+ 	}
+ }
+@@ -827,7 +836,7 @@ new_device(int vid)
+ 	eth_dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
+ 
+ 	rte_atomic32_set(&internal->dev_attached, 1);
+-	update_queuing_status(eth_dev);
++	update_queuing_status(eth_dev, false);
+ 
+ 	VHOST_LOG(INFO, "Vhost device %d created\n", vid);
+ 
+@@ -857,7 +866,7 @@ destroy_device(int vid)
+ 	internal = eth_dev->data->dev_private;
+ 
+ 	rte_atomic32_set(&internal->dev_attached, 0);
+-	update_queuing_status(eth_dev);
++	update_queuing_status(eth_dev, true);
+ 
+ 	eth_dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
+ 
+@@ -967,6 +976,8 @@ vring_state_changed(int vid, uint16_t vring, int enable)
+ 	state->max_vring = RTE_MAX(vring, state->max_vring);
+ 	rte_spinlock_unlock(&state->lock);
+ 
++	update_queuing_status(eth_dev, false);
++
+ 	VHOST_LOG(INFO, "vring%u is %s\n",
+ 			vring, enable ? "enabled" : "disabled");
+ 
+@@ -1152,7 +1163,7 @@ eth_dev_start(struct rte_eth_dev *eth_dev)
+ 	}
+ 
+ 	rte_atomic32_set(&internal->started, 1);
+-	update_queuing_status(eth_dev);
++	update_queuing_status(eth_dev, false);
+ 
+ 	return 0;
+ }
+@@ -1164,7 +1175,7 @@ eth_dev_stop(struct rte_eth_dev *dev)
+ 
+ 	dev->data->dev_started = 0;
+ 	rte_atomic32_set(&internal->started, 0);
+-	update_queuing_status(dev);
++	update_queuing_status(dev, true);
+ 
+ 	return 0;
+ }
+@@ -1643,11 +1654,11 @@ rte_pmd_vhost_probe(struct rte_vdev_device *dev)
+ 				&open_int, &tso);
+ 		if (ret < 0)
+ 			goto out_free;
++	}
+ 
+-		if (tso == 0) {
+-			disable_flags |= (1ULL << VIRTIO_NET_F_HOST_TSO4);
+-			disable_flags |= (1ULL << VIRTIO_NET_F_HOST_TSO6);
+-		}
++	if (tso == 0) {
++		disable_flags |= (1ULL << VIRTIO_NET_F_HOST_TSO4);
++		disable_flags |= (1ULL << VIRTIO_NET_F_HOST_TSO6);
+ 	}
+ 
+ 	if (rte_kvargs_count(kvlist, ETH_VHOST_LINEAR_BUF) == 1) {
+diff --git a/dpdk/drivers/net/virtio/meson.build b/dpdk/drivers/net/virtio/meson.build
+index 01a333ada2..d78b8278c6 100644
+--- a/dpdk/drivers/net/virtio/meson.build
++++ b/dpdk/drivers/net/virtio/meson.build
+@@ -30,11 +30,11 @@ if arch_subdir == 'x86'
+                           c_args: [cflags, '-mavx512f', '-mavx512bw', '-mavx512vl'])
+             objs += virtio_avx512_lib.extract_objects('virtio_rxtx_packed.c')
+             if (toolchain == 'gcc' and cc.version().version_compare('>=8.3.0'))
+-                cflags += '-DVHOST_GCC_UNROLL_PRAGMA'
++                cflags += '-DVIRTIO_GCC_UNROLL_PRAGMA'
+             elif (toolchain == 'clang' and cc.version().version_compare('>=3.7.0'))
+-                cflags += '-DVHOST_CLANG_UNROLL_PRAGMA'
++                cflags += '-DVIRTIO_CLANG_UNROLL_PRAGMA'
+             elif (toolchain == 'icc' and cc.version().version_compare('>=16.0.0'))
+-                cflags += '-DVHOST_ICC_UNROLL_PRAGMA'
++                cflags += '-DVIRTIO_ICC_UNROLL_PRAGMA'
+             endif
+         endif
+     endif
+diff --git a/dpdk/drivers/net/virtio/virtio_ethdev.c b/dpdk/drivers/net/virtio/virtio_ethdev.c
+index c2588369b2..d180162abd 100644
+--- a/dpdk/drivers/net/virtio/virtio_ethdev.c
++++ b/dpdk/drivers/net/virtio/virtio_ethdev.c
+@@ -2028,7 +2028,8 @@ virtio_dev_rss_hash_update(struct rte_eth_dev *dev,
+ 
+ 	return 0;
+ restore_key:
+-	memcpy(hw->rss_key, old_rss_key, VIRTIO_NET_RSS_KEY_SIZE);
++	if (rss_conf->rss_key && rss_conf->rss_key_len)
++		memcpy(hw->rss_key, old_rss_key, VIRTIO_NET_RSS_KEY_SIZE);
+ restore_types:
+ 	hw->rss_hash_types = old_hash_types;
+ 
+@@ -2657,7 +2658,7 @@ virtio_dev_configure(struct rte_eth_dev *dev)
+ 	hw->has_rx_offload = rx_offload_enabled(hw);
+ 
+ 	if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
+-		/* Enable vector (0) for Link State Intrerrupt */
++		/* Enable vector (0) for Link State Interrupt */
+ 		if (VIRTIO_OPS(hw)->set_config_irq(hw, 0) ==
+ 				VIRTIO_MSI_NO_VECTOR) {
+ 			PMD_DRV_LOG(ERR, "failed to set config vector");
+@@ -2775,7 +2776,7 @@ virtio_dev_start(struct rte_eth_dev *dev)
+ 		}
+ 	}
+ 
+-	/* Enable uio/vfio intr/eventfd mapping: althrough we already did that
++	/* Enable uio/vfio intr/eventfd mapping: although we already did that
+ 	 * in device configure, but it could be unmapped  when device is
+ 	 * stopped.
+ 	 */
+diff --git a/dpdk/drivers/net/virtio/virtio_pci.c b/dpdk/drivers/net/virtio/virtio_pci.c
+index 182cfc9eae..632451dcbe 100644
+--- a/dpdk/drivers/net/virtio/virtio_pci.c
++++ b/dpdk/drivers/net/virtio/virtio_pci.c
+@@ -235,7 +235,7 @@ legacy_get_isr(struct virtio_hw *hw)
+ 	return dst;
+ }
+ 
+-/* Enable one vector (0) for Link State Intrerrupt */
++/* Enable one vector (0) for Link State Interrupt */
+ static uint16_t
+ legacy_set_config_irq(struct virtio_hw *hw, uint16_t vec)
+ {
+diff --git a/dpdk/drivers/net/virtio/virtio_pci_ethdev.c b/dpdk/drivers/net/virtio/virtio_pci_ethdev.c
+index 54645dc62e..1f6bdeddda 100644
+--- a/dpdk/drivers/net/virtio/virtio_pci_ethdev.c
++++ b/dpdk/drivers/net/virtio/virtio_pci_ethdev.c
+@@ -122,10 +122,20 @@ static int
+ eth_virtio_pci_uninit(struct rte_eth_dev *eth_dev)
+ {
+ 	int ret;
++	struct virtio_pci_dev *dev;
++	struct virtio_hw *hw;
+ 	PMD_INIT_FUNC_TRACE();
+ 
+-	if (rte_eal_process_type() == RTE_PROC_SECONDARY)
++	if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
++		dev = eth_dev->data->dev_private;
++		hw = &dev->hw;
++
++		if (dev->modern)
++			rte_pci_unmap_device(RTE_ETH_DEV_TO_PCI(eth_dev));
++		else
++			vtpci_legacy_ioport_unmap(hw);
+ 		return 0;
++	}
+ 
+ 	ret = virtio_dev_stop(eth_dev);
+ 	virtio_dev_close(eth_dev);
+diff --git a/dpdk/drivers/net/virtio/virtio_rxtx.c b/dpdk/drivers/net/virtio/virtio_rxtx.c
+index 2e115ded02..4795893ec7 100644
+--- a/dpdk/drivers/net/virtio/virtio_rxtx.c
++++ b/dpdk/drivers/net/virtio/virtio_rxtx.c
+@@ -814,7 +814,7 @@ virtio_dev_tx_queue_setup(struct rte_eth_dev *dev,
+ 			unsigned int socket_id __rte_unused,
+ 			const struct rte_eth_txconf *tx_conf)
+ {
+-	uint8_t vq_idx = 2 * queue_idx + VTNET_SQ_TQ_QUEUE_IDX;
++	uint16_t vq_idx = 2 * queue_idx + VTNET_SQ_TQ_QUEUE_IDX;
+ 	struct virtio_hw *hw = dev->data->dev_private;
+ 	struct virtqueue *vq = hw->vqs[vq_idx];
+ 	struct virtnet_tx *txvq;
+@@ -858,7 +858,7 @@ int
+ virtio_dev_tx_queue_setup_finish(struct rte_eth_dev *dev,
+ 				uint16_t queue_idx)
+ {
+-	uint8_t vq_idx = 2 * queue_idx + VTNET_SQ_TQ_QUEUE_IDX;
++	uint16_t vq_idx = 2 * queue_idx + VTNET_SQ_TQ_QUEUE_IDX;
+ 	struct virtio_hw *hw = dev->data->dev_private;
+ 	struct virtqueue *vq = hw->vqs[vq_idx];
+ 
+@@ -962,7 +962,7 @@ virtio_rx_offload(struct rte_mbuf *m, struct virtio_net_hdr *hdr)
+ 			return -EINVAL;
+ 		}
+ 
+-		/* Update mss lengthes in mbuf */
++		/* Update mss lengths in mbuf */
+ 		m->tso_segsz = hdr->gso_size;
+ 		switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
+ 			case VIRTIO_NET_HDR_GSO_TCPV4:
+diff --git a/dpdk/drivers/net/virtio/virtio_rxtx_packed.h b/dpdk/drivers/net/virtio/virtio_rxtx_packed.h
+index d5c259a1f6..536112983c 100644
+--- a/dpdk/drivers/net/virtio/virtio_rxtx_packed.h
++++ b/dpdk/drivers/net/virtio/virtio_rxtx_packed.h
+@@ -125,13 +125,12 @@ virtqueue_enqueue_single_packed_vec(struct virtnet_tx *txvq,
+ 	 * any_layout => number of segments
+ 	 * default    => number of segments + 1
+ 	 */
+-	slots = use_indirect ? 1 : (txm->nb_segs + !can_push);
+ 	can_push = rte_mbuf_refcnt_read(txm) == 1 &&
+ 		   RTE_MBUF_DIRECT(txm) &&
+ 		   txm->nb_segs == 1 &&
+ 		   rte_pktmbuf_headroom(txm) >= hdr_size;
+ 
+-	slots = txm->nb_segs + !can_push;
++	slots = use_indirect ? 1 : (txm->nb_segs + !can_push);
+ 	need = slots - vq->vq_free_cnt;
+ 
+ 	/* Positive value indicates it need free vring descriptors */
+diff --git a/dpdk/drivers/net/virtio/virtio_rxtx_packed_avx.h b/dpdk/drivers/net/virtio/virtio_rxtx_packed_avx.h
+index 8cb71f3fe6..584ac72f95 100644
+--- a/dpdk/drivers/net/virtio/virtio_rxtx_packed_avx.h
++++ b/dpdk/drivers/net/virtio/virtio_rxtx_packed_avx.h
+@@ -192,7 +192,7 @@ virtqueue_dequeue_batch_packed_vec(struct virtnet_rx *rxvq,
+ 
+ 	/*
+ 	 * load len from desc, store into mbuf pkt_len and data_len
+-	 * len limiated by l6bit buf_len, pkt_len[16:31] can be ignored
++	 * len limited by l6bit buf_len, pkt_len[16:31] can be ignored
+ 	 */
+ 	const __mmask16 mask = 0x6 | 0x6 << 4 | 0x6 << 8 | 0x6 << 12;
+ 	__m512i values = _mm512_maskz_shuffle_epi32(mask, v_desc, 0xAA);
+diff --git a/dpdk/drivers/net/virtio/virtio_rxtx_simple_altivec.c b/dpdk/drivers/net/virtio/virtio_rxtx_simple_altivec.c
+index 7534974ef4..e7f0ed6068 100644
+--- a/dpdk/drivers/net/virtio/virtio_rxtx_simple_altivec.c
++++ b/dpdk/drivers/net/virtio/virtio_rxtx_simple_altivec.c
+@@ -50,9 +50,9 @@ virtio_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
+ 	struct rte_mbuf **sw_ring_end;
+ 	struct rte_mbuf **ref_rx_pkts;
+ 	uint16_t nb_pkts_received = 0;
+-	const vector unsigned char zero = {0};
++	const __vector unsigned char zero = {0};
+ 
+-	const vector unsigned char shuf_msk1 = {
++	const __vector unsigned char shuf_msk1 = {
+ 		0xFF, 0xFF, 0xFF, 0xFF,	/* packet type */
+ 		4, 5, 0xFF, 0xFF, /* vlan tci */
+ 		4, 5,			/* dat len */
+@@ -60,7 +60,7 @@ virtio_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
+ 		0xFF, 0xFF, 0xFF, 0xFF
+ 	};
+ 
+-	const vector unsigned char shuf_msk2 = {
++	const __vector unsigned char shuf_msk2 = {
+ 		0xFF, 0xFF, 0xFF, 0xFF,	/* packet type */
+ 		12, 13, 0xFF, 0xFF,	/* pkt len */
+ 		12, 13,			/* dat len */
+@@ -72,7 +72,7 @@ virtio_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
+ 	 * Subtract the header length.
+ 	 *  In which case do we need the header length in used->len ?
+ 	 */
+-	const vector unsigned short len_adjust = {
++	const __vector unsigned short len_adjust = {
+ 		0, 0,
+ 		(uint16_t)-vq->hw->vtnet_hdr_size, 0,
+ 		(uint16_t)-vq->hw->vtnet_hdr_size, 0,
+@@ -112,68 +112,68 @@ virtio_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
+ 	ref_rx_pkts = rx_pkts;
+ 	for (nb_pkts_received = 0;
+ 		nb_pkts_received < nb_total;) {
+-		vector unsigned char desc[RTE_VIRTIO_DESC_PER_LOOP / 2];
+-		vector unsigned char mbp[RTE_VIRTIO_DESC_PER_LOOP / 2];
+-		vector unsigned char pkt_mb[RTE_VIRTIO_DESC_PER_LOOP];
++		__vector unsigned char desc[RTE_VIRTIO_DESC_PER_LOOP / 2];
++		__vector unsigned char mbp[RTE_VIRTIO_DESC_PER_LOOP / 2];
++		__vector unsigned char pkt_mb[RTE_VIRTIO_DESC_PER_LOOP];
+ 
+ 		mbp[0] = vec_vsx_ld(0, (unsigned char const *)(sw_ring + 0));
+ 		desc[0] = vec_vsx_ld(0, (unsigned char const *)(rused + 0));
+-		*(vector unsigned char *)&rx_pkts[0] = mbp[0];
++		*(__vector unsigned char *)&rx_pkts[0] = mbp[0];
+ 
+ 		mbp[1] = vec_vsx_ld(0, (unsigned char const *)(sw_ring + 2));
+ 		desc[1] = vec_vsx_ld(0, (unsigned char const *)(rused + 2));
+-		*(vector unsigned char *)&rx_pkts[2] = mbp[1];
++		*(__vector unsigned char *)&rx_pkts[2] = mbp[1];
+ 
+ 		mbp[2] = vec_vsx_ld(0, (unsigned char const *)(sw_ring + 4));
+ 		desc[2] = vec_vsx_ld(0, (unsigned char const *)(rused + 4));
+-		*(vector unsigned char *)&rx_pkts[4] = mbp[2];
++		*(__vector unsigned char *)&rx_pkts[4] = mbp[2];
+ 
+ 		mbp[3] = vec_vsx_ld(0, (unsigned char const *)(sw_ring + 6));
+ 		desc[3] = vec_vsx_ld(0, (unsigned char const *)(rused + 6));
+-		*(vector unsigned char *)&rx_pkts[6] = mbp[3];
++		*(__vector unsigned char *)&rx_pkts[6] = mbp[3];
+ 
+ 		pkt_mb[0] = vec_perm(desc[0], zero, shuf_msk1);
+ 		pkt_mb[1] = vec_perm(desc[0], zero, shuf_msk2);
+-		pkt_mb[0] = (vector unsigned char)
+-			((vector unsigned short)pkt_mb[0] + len_adjust);
+-		pkt_mb[1] = (vector unsigned char)
+-			((vector unsigned short)pkt_mb[1] + len_adjust);
+-		*(vector unsigned char *)&rx_pkts[0]->rx_descriptor_fields1 =
++		pkt_mb[0] = (__vector unsigned char)
++			((__vector unsigned short)pkt_mb[0] + len_adjust);
++		pkt_mb[1] = (__vector unsigned char)
++			((__vector unsigned short)pkt_mb[1] + len_adjust);
++		*(__vector unsigned char *)&rx_pkts[0]->rx_descriptor_fields1 =
+ 			pkt_mb[0];
+-		*(vector unsigned char *)&rx_pkts[1]->rx_descriptor_fields1 =
++		*(__vector unsigned char *)&rx_pkts[1]->rx_descriptor_fields1 =
+ 			pkt_mb[1];
+ 
+ 		pkt_mb[2] = vec_perm(desc[1], zero, shuf_msk1);
+ 		pkt_mb[3] = vec_perm(desc[1], zero, shuf_msk2);
+-		pkt_mb[2] = (vector unsigned char)
+-			((vector unsigned short)pkt_mb[2] + len_adjust);
+-		pkt_mb[3] = (vector unsigned char)
+-			((vector unsigned short)pkt_mb[3] + len_adjust);
+-		*(vector unsigned char *)&rx_pkts[2]->rx_descriptor_fields1 =
++		pkt_mb[2] = (__vector unsigned char)
++			((__vector unsigned short)pkt_mb[2] + len_adjust);
++		pkt_mb[3] = (__vector unsigned char)
++			((__vector unsigned short)pkt_mb[3] + len_adjust);
++		*(__vector unsigned char *)&rx_pkts[2]->rx_descriptor_fields1 =
+ 			pkt_mb[2];
+-		*(vector unsigned char *)&rx_pkts[3]->rx_descriptor_fields1 =
++		*(__vector unsigned char *)&rx_pkts[3]->rx_descriptor_fields1 =
+ 			pkt_mb[3];
+ 
+ 		pkt_mb[4] = vec_perm(desc[2], zero, shuf_msk1);
+ 		pkt_mb[5] = vec_perm(desc[2], zero, shuf_msk2);
+-		pkt_mb[4] = (vector unsigned char)
+-			((vector unsigned short)pkt_mb[4] + len_adjust);
+-		pkt_mb[5] = (vector unsigned char)
+-			((vector unsigned short)pkt_mb[5] + len_adjust);
+-		*(vector unsigned char *)&rx_pkts[4]->rx_descriptor_fields1 =
++		pkt_mb[4] = (__vector unsigned char)
++			((__vector unsigned short)pkt_mb[4] + len_adjust);
++		pkt_mb[5] = (__vector unsigned char)
++			((__vector unsigned short)pkt_mb[5] + len_adjust);
++		*(__vector unsigned char *)&rx_pkts[4]->rx_descriptor_fields1 =
+ 			pkt_mb[4];
+-		*(vector unsigned char *)&rx_pkts[5]->rx_descriptor_fields1 =
++		*(__vector unsigned char *)&rx_pkts[5]->rx_descriptor_fields1 =
+ 			pkt_mb[5];
+ 
+ 		pkt_mb[6] = vec_perm(desc[3], zero, shuf_msk1);
+ 		pkt_mb[7] = vec_perm(desc[3], zero, shuf_msk2);
+-		pkt_mb[6] = (vector unsigned char)
+-			((vector unsigned short)pkt_mb[6] + len_adjust);
+-		pkt_mb[7] = (vector unsigned char)
+-			((vector unsigned short)pkt_mb[7] + len_adjust);
+-		*(vector unsigned char *)&rx_pkts[6]->rx_descriptor_fields1 =
++		pkt_mb[6] = (__vector unsigned char)
++			((__vector unsigned short)pkt_mb[6] + len_adjust);
++		pkt_mb[7] = (__vector unsigned char)
++			((__vector unsigned short)pkt_mb[7] + len_adjust);
++		*(__vector unsigned char *)&rx_pkts[6]->rx_descriptor_fields1 =
+ 			pkt_mb[6];
+-		*(vector unsigned char *)&rx_pkts[7]->rx_descriptor_fields1 =
++		*(__vector unsigned char *)&rx_pkts[7]->rx_descriptor_fields1 =
+ 			pkt_mb[7];
+ 
+ 		if (unlikely(nb_used <= RTE_VIRTIO_DESC_PER_LOOP)) {
+diff --git a/dpdk/drivers/net/virtio/virtio_user/vhost_user.c b/dpdk/drivers/net/virtio/virtio_user/vhost_user.c
+index cc830a660f..77820bf967 100644
+--- a/dpdk/drivers/net/virtio/virtio_user/vhost_user.c
++++ b/dpdk/drivers/net/virtio/virtio_user/vhost_user.c
+@@ -840,8 +840,10 @@ vhost_user_setup(struct virtio_user_dev *dev)
+ 	}
+ 
+ 	flag = fcntl(fd, F_GETFD);
+-	if (fcntl(fd, F_SETFD, flag | FD_CLOEXEC) < 0)
+-		PMD_DRV_LOG(WARNING, "fcntl failed, %s", strerror(errno));
++	if (flag == -1)
++		PMD_DRV_LOG(WARNING, "fcntl get fd failed, %s", strerror(errno));
++	else if (fcntl(fd, F_SETFD, flag | FD_CLOEXEC) < 0)
++		PMD_DRV_LOG(WARNING, "fcntl set fd failed, %s", strerror(errno));
+ 
+ 	memset(&un, 0, sizeof(un));
+ 	un.sun_family = AF_UNIX;
+@@ -940,15 +942,8 @@ vhost_user_update_link_state(struct virtio_user_dev *dev)
+ 
+ 	if (data->vhostfd >= 0) {
+ 		int r;
+-		int flags;
+ 
+-		flags = fcntl(data->vhostfd, F_GETFL);
+-		if (fcntl(data->vhostfd, F_SETFL, flags | O_NONBLOCK) == -1) {
+-			PMD_DRV_LOG(ERR, "error setting O_NONBLOCK flag");
+-			return -1;
+-		}
+-
+-		r = recv(data->vhostfd, buf, 128, MSG_PEEK);
++		r = recv(data->vhostfd, buf, 128, MSG_PEEK | MSG_DONTWAIT);
+ 		if (r == 0 || (r < 0 && errno != EAGAIN)) {
+ 			dev->net_status &= (~VIRTIO_NET_S_LINK_UP);
+ 			PMD_DRV_LOG(ERR, "virtio-user port %u is down", dev->hw.port_id);
+@@ -963,12 +958,6 @@ vhost_user_update_link_state(struct virtio_user_dev *dev)
+ 		} else {
+ 			dev->net_status |= VIRTIO_NET_S_LINK_UP;
+ 		}
+-
+-		if (fcntl(data->vhostfd, F_SETFL,
+-					flags & ~O_NONBLOCK) == -1) {
+-			PMD_DRV_LOG(ERR, "error clearing O_NONBLOCK flag");
+-			return -1;
+-		}
+ 	} else if (dev->is_server) {
+ 		dev->net_status &= (~VIRTIO_NET_S_LINK_UP);
+ 		if (virtio_user_dev_server_reconnect(dev) >= 0)
+diff --git a/dpdk/drivers/net/virtio/virtio_user/virtio_user_dev.c b/dpdk/drivers/net/virtio/virtio_user/virtio_user_dev.c
+index 35aa76b1ff..f9cada05e4 100644
+--- a/dpdk/drivers/net/virtio/virtio_user/virtio_user_dev.c
++++ b/dpdk/drivers/net/virtio/virtio_user/virtio_user_dev.c
+@@ -417,7 +417,7 @@ virtio_user_fill_intr_handle(struct virtio_user_dev *dev)
+ 
+ 	for (i = 0; i < dev->max_queue_pairs; ++i) {
+ 		if (rte_intr_efds_index_set(eth_dev->intr_handle, i,
+-				dev->callfds[i]))
++				dev->callfds[2 * i + VTNET_SQ_RQ_QUEUE_IDX]))
+ 			return -rte_errno;
+ 	}
+ 
+diff --git a/dpdk/drivers/net/virtio/virtio_user_ethdev.c b/dpdk/drivers/net/virtio/virtio_user_ethdev.c
+index 0271098f0d..16eca2f940 100644
+--- a/dpdk/drivers/net/virtio/virtio_user_ethdev.c
++++ b/dpdk/drivers/net/virtio/virtio_user_ethdev.c
+@@ -666,6 +666,7 @@ virtio_user_pmd_probe(struct rte_vdev_device *vdev)
+ 	/* previously called by pci probing for physical dev */
+ 	if (eth_virtio_dev_init(eth_dev) < 0) {
+ 		PMD_INIT_LOG(ERR, "eth_virtio_dev_init fails");
++		virtio_user_dev_uninit(dev);
+ 		virtio_user_eth_dev_free(eth_dev);
+ 		goto end;
+ 	}
+diff --git a/dpdk/drivers/net/virtio/virtqueue.c b/dpdk/drivers/net/virtio/virtqueue.c
+index 65bf792eb0..c98d696e62 100644
+--- a/dpdk/drivers/net/virtio/virtqueue.c
++++ b/dpdk/drivers/net/virtio/virtqueue.c
+@@ -13,7 +13,7 @@
+ /*
+  * Two types of mbuf to be cleaned:
+  * 1) mbuf that has been consumed by backend but not used by virtio.
+- * 2) mbuf that hasn't been consued by backend.
++ * 2) mbuf that hasn't been consumed by backend.
+  */
+ struct rte_mbuf *
+ virtqueue_detach_unused(struct virtqueue *vq)
+diff --git a/dpdk/drivers/net/virtio/virtqueue.h b/dpdk/drivers/net/virtio/virtqueue.h
+index 855f57a956..99c68cf622 100644
+--- a/dpdk/drivers/net/virtio/virtqueue.h
++++ b/dpdk/drivers/net/virtio/virtqueue.h
+@@ -227,7 +227,7 @@ struct virtio_net_ctrl_rss {
+  * Control link announce acknowledgement
+  *
+  * The command VIRTIO_NET_CTRL_ANNOUNCE_ACK is used to indicate that
+- * driver has recevied the notification; device would clear the
++ * driver has received the notification; device would clear the
+  * VIRTIO_NET_S_ANNOUNCE bit in the status field after it receives
+  * this command.
+  */
+@@ -312,7 +312,7 @@ struct virtqueue {
+ 	struct vq_desc_extra vq_descx[0];
+ };
+ 
+-/* If multiqueue is provided by host, then we suppport it. */
++/* If multiqueue is provided by host, then we support it. */
+ #define VIRTIO_NET_CTRL_MQ   4
+ 
+ #define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET        0
+diff --git a/dpdk/drivers/net/vmxnet3/vmxnet3_ethdev.c b/dpdk/drivers/net/vmxnet3/vmxnet3_ethdev.c
+index d1ef1cad08..d4aea74026 100644
+--- a/dpdk/drivers/net/vmxnet3/vmxnet3_ethdev.c
++++ b/dpdk/drivers/net/vmxnet3/vmxnet3_ethdev.c
+@@ -822,6 +822,11 @@ vmxnet3_setup_driver_shared(struct rte_eth_dev *dev)
+ 		rqd->conf.rxRingSize[1]   = rxq->cmd_ring[1].size;
+ 		rqd->conf.compRingSize    = rxq->comp_ring.size;
+ 
++		if (VMXNET3_VERSION_GE_3(hw)) {
++			rqd->conf.rxDataRingBasePA = rxq->data_ring.basePA;
++			rqd->conf.rxDataRingDescSize = rxq->data_desc_size;
++		}
++
+ 		if (hw->intr.lsc_only)
+ 			rqd->conf.intrIdx = 1;
+ 		else
+diff --git a/dpdk/drivers/raw/dpaa2_qdma/dpaa2_qdma.c b/dpdk/drivers/raw/dpaa2_qdma/dpaa2_qdma.c
+index de26d2aef3..ebc2cd5d0d 100644
+--- a/dpdk/drivers/raw/dpaa2_qdma/dpaa2_qdma.c
++++ b/dpdk/drivers/raw/dpaa2_qdma/dpaa2_qdma.c
+@@ -653,7 +653,7 @@ dpdmai_dev_dequeue_multijob_prefetch(
+ 	rte_prefetch0((void *)(size_t)(dq_storage + 1));
+ 
+ 	/* Prepare next pull descriptor. This will give space for the
+-	 * prefething done on DQRR entries
++	 * prefetching done on DQRR entries
+ 	 */
+ 	q_storage->toggle ^= 1;
+ 	dq_storage1 = q_storage->dq_storage[q_storage->toggle];
+diff --git a/dpdk/drivers/raw/dpaa2_qdma/dpaa2_qdma.h b/dpdk/drivers/raw/dpaa2_qdma/dpaa2_qdma.h
+index d6f6bb5522..1973d5d2b2 100644
+--- a/dpdk/drivers/raw/dpaa2_qdma/dpaa2_qdma.h
++++ b/dpdk/drivers/raw/dpaa2_qdma/dpaa2_qdma.h
+@@ -82,7 +82,7 @@ struct qdma_device {
+ 	/** total number of hw queues. */
+ 	uint16_t num_hw_queues;
+ 	/**
+-	 * Maximum number of hw queues to be alocated per core.
++	 * Maximum number of hw queues to be allocated per core.
+ 	 * This is limited by MAX_HW_QUEUE_PER_CORE
+ 	 */
+ 	uint16_t max_hw_queues_per_core;
+@@ -268,7 +268,7 @@ struct dpaa2_dpdmai_dev {
+ 	struct fsl_mc_io dpdmai;
+ 	/** HW ID for DPDMAI object */
+ 	uint32_t dpdmai_id;
+-	/** Tocken of this device */
++	/** Token of this device */
+ 	uint16_t token;
+ 	/** Number of queue in this DPDMAI device */
+ 	uint8_t num_queues;
+diff --git a/dpdk/drivers/raw/ifpga/base/ifpga_defines.h b/dpdk/drivers/raw/ifpga/base/ifpga_defines.h
+index dca1518a83..8f6203392b 100644
+--- a/dpdk/drivers/raw/ifpga/base/ifpga_defines.h
++++ b/dpdk/drivers/raw/ifpga/base/ifpga_defines.h
+@@ -93,9 +93,9 @@ enum fpga_id_type {
+ 
+ #define PORT_FEATURE_ID_HEADER FEATURE_ID_FIU_HEADER
+ #define PORT_FEATURE_ID_ERROR 0x10
+-#define PORT_FEATURE_ID_UMSG 0x12
+-#define PORT_FEATURE_ID_UINT 0x13
+-#define PORT_FEATURE_ID_STP 0x14
++#define PORT_FEATURE_ID_UMSG 0x11
++#define PORT_FEATURE_ID_UINT 0x12
++#define PORT_FEATURE_ID_STP 0x13
+ #define PORT_FEATURE_ID_UAFU FEATURE_ID_AFU
+ 
+ /*
+diff --git a/dpdk/drivers/raw/ifpga/base/opae_spi.c b/dpdk/drivers/raw/ifpga/base/opae_spi.c
+index 9efeecb791..ca3d41fb92 100644
+--- a/dpdk/drivers/raw/ifpga/base/opae_spi.c
++++ b/dpdk/drivers/raw/ifpga/base/opae_spi.c
+@@ -239,6 +239,18 @@ int spi_command(struct altera_spi_device *dev, unsigned int chip_select,
+ 	return 0;
+ }
+ 
++int spi_write(struct altera_spi_device *dev, unsigned int chip_select,
++		unsigned int wlen, void *wdata)
++{
++	return spi_command(dev, chip_select, wlen, wdata, 0, NULL);
++}
++
++int spi_read(struct altera_spi_device *dev, unsigned int chip_select,
++		unsigned int rlen, void *rdata)
++{
++	return spi_command(dev, chip_select, 0, NULL, rlen, rdata);
++}
++
+ struct altera_spi_device *altera_spi_alloc(void *base, int type)
+ {
+ 	struct altera_spi_device *spi_dev =
+diff --git a/dpdk/drivers/raw/ifpga/base/opae_spi.h b/dpdk/drivers/raw/ifpga/base/opae_spi.h
+index af11656e4d..bcff67dd66 100644
+--- a/dpdk/drivers/raw/ifpga/base/opae_spi.h
++++ b/dpdk/drivers/raw/ifpga/base/opae_spi.h
+@@ -117,6 +117,10 @@ struct spi_tran_header {
+ 	u32 addr;
+ };
+ 
++int spi_read(struct altera_spi_device *dev, unsigned int chip_select,
++		unsigned int rlen, void *rdata);
++int spi_write(struct altera_spi_device *dev, unsigned int chip_select,
++		unsigned int wlen, void *wdata);
+ int spi_command(struct altera_spi_device *dev, unsigned int chip_select,
+ 		unsigned int wlen, void *wdata, unsigned int rlen, void *rdata);
+ void spi_cs_deactivate(struct altera_spi_device *dev);
+diff --git a/dpdk/drivers/raw/ifpga/base/opae_spi_transaction.c b/dpdk/drivers/raw/ifpga/base/opae_spi_transaction.c
+index 006cdb4c1a..cd50d40629 100644
+--- a/dpdk/drivers/raw/ifpga/base/opae_spi_transaction.c
++++ b/dpdk/drivers/raw/ifpga/base/opae_spi_transaction.c
+@@ -40,7 +40,7 @@ static void print_buffer(const char *string, void *buffer, int len)
+ 	printf("%s print buffer, len=%d\n", string, len);
+ 
+ 	for (i = 0; i < len; i++)
+-		printf("%x ", *(p+i));
++		printf("%02x ", *(p+i));
+ 	printf("\n");
+ }
+ #else
+@@ -72,43 +72,6 @@ static void reorder_phy_data(u8 bits_per_word,
+ 	}
+ }
+ 
+-enum {
+-	SPI_FOUND_SOP,
+-	SPI_FOUND_EOP,
+-	SPI_NOT_FOUND,
+-};
+-
+-static int resp_find_sop_eop(unsigned char *resp, unsigned int len,
+-		int flags)
+-{
+-	int ret = SPI_NOT_FOUND;
+-
+-	unsigned char *b = resp;
+-
+-	/* find SOP */
+-	if (flags != SPI_FOUND_SOP) {
+-		while (b < resp + len && *b != SPI_PACKET_SOP)
+-			b++;
+-
+-		if (*b != SPI_PACKET_SOP)
+-			goto done;
+-
+-		ret = SPI_FOUND_SOP;
+-	}
+-
+-	/* find EOP */
+-	while (b < resp + len && *b != SPI_PACKET_EOP)
+-		b++;
+-
+-	if (*b != SPI_PACKET_EOP)
+-		goto done;
+-
+-	ret = SPI_FOUND_EOP;
+-
+-done:
+-	return ret;
+-}
+-
+ static void phy_tx_pad(unsigned char *phy_buf, unsigned int phy_buf_len,
+ 		unsigned int *aligned_len)
+ {
+@@ -137,6 +100,104 @@ static void phy_tx_pad(unsigned char *phy_buf, unsigned int phy_buf_len,
+ 		*p++ = SPI_BYTE_IDLE;
+ }
+ 
++#define RX_ALL_IDLE_DATA (SPI_BYTE_IDLE << 24 | SPI_BYTE_IDLE << 16 |	\
++			 SPI_BYTE_IDLE << 8 | SPI_BYTE_IDLE)
++
++static bool all_idle_data(u8 *rxbuf)
++{
++	return *(u32 *)rxbuf == RX_ALL_IDLE_DATA;
++}
++
++static unsigned char *find_eop(u8 *rxbuf, u32 BPW)
++{
++	return memchr(rxbuf, SPI_PACKET_EOP, BPW);
++}
++
++static int do_spi_txrx(struct spi_transaction_dev *dev,
++		unsigned char *tx_buffer,
++		unsigned int tx_len, unsigned char *rx_buffer,
++		unsigned int rx_len,
++		unsigned int *actual_rx)
++{
++	unsigned int rx_cnt = 0;
++	int ret = 0;
++	unsigned int BPW = 4;
++	bool eop_found = false;
++	unsigned char *eop;
++	unsigned char *ptr;
++	unsigned char *rxbuf = rx_buffer;
++	int add_byte = 0;
++	unsigned long ticks;
++	unsigned long timeout;
++
++	/* send command */
++	ret = spi_write(dev->dev, dev->chipselect, tx_len, tx_buffer);
++	if (ret)
++		return -EBUSY;
++
++	timeout = rte_get_timer_cycles() +
++				msecs_to_timer_cycles(2000);
++
++	/* read out data */
++	while (rx_cnt < rx_len) {
++		ret = spi_read(dev->dev, dev->chipselect, BPW, rxbuf);
++		if (ret)
++			return -EBUSY;
++
++		/* skip all of invalid data */
++		if (!eop_found && all_idle_data(rxbuf)) {
++			ticks = rte_get_timer_cycles();
++			if (!time_after(ticks, timeout)) {
++				continue;
++			} else {
++				dev_err(dev, "read spi data timeout\n");
++				return -ETIMEDOUT;
++			}
++		}
++
++		rx_cnt += BPW;
++		if (!eop_found) {
++			/* EOP is found, we read 2 more bytes and exit. */
++			eop = find_eop(rxbuf, BPW);
++			if (eop) {
++				if ((BPW + rxbuf - eop) > 2) {
++					/*
++					 * check if the last 2 bytes are already
++					 * received in current word.
++					 */
++					break;
++				} else if ((BPW + rxbuf - eop) == 2) {
++					/*
++					 * skip if last byte is not SPI_BYTE_ESC
++					 * or SPI_PACKET_ESC. this is the valid
++					 * end of a response too.
++					 */
++					ptr = eop + 1;
++
++					if (*ptr != SPI_BYTE_ESC &&
++							*ptr != SPI_PACKET_ESC)
++						break;
++
++					add_byte = 1;
++				} else {
++					add_byte = 2;
++				}
++
++				rx_len = min(rx_len,
++						IFPGA_ALIGN(rx_cnt +
++							add_byte, BPW));
++				eop_found = true;
++			}
++		}
++		rxbuf += BPW;
++	}
++
++	*actual_rx = rx_cnt;
++	print_buffer("found valid data:", rx_buffer, rx_cnt);
++
++	return ret;
++}
++
+ static int byte_to_core_convert(struct spi_transaction_dev *dev,
+ 		unsigned int send_len, unsigned char *send_data,
+ 		unsigned int resp_len, unsigned char *resp_data,
+@@ -148,15 +209,9 @@ static int byte_to_core_convert(struct spi_transaction_dev *dev,
+ 	unsigned char *resp_packet = dev->buffer->bytes_resp;
+ 	unsigned char *p;
+ 	unsigned char current_byte;
+-	unsigned char *tx_buffer;
+ 	unsigned int tx_len = 0;
+-	unsigned char *rx_buffer;
+-	unsigned int rx_len = 0;
+-	int retry = 0;
+-	int spi_flags;
+-	unsigned long timeout = msecs_to_timer_cycles(1000);
+-	unsigned long ticks;
+ 	unsigned int resp_max_len = 2 * resp_len;
++	unsigned int actual_rx;
+ 
+ 	print_buffer("before bytes:", send_data, send_len);
+ 
+@@ -190,48 +245,15 @@ static int byte_to_core_convert(struct spi_transaction_dev *dev,
+ 
+ 	print_buffer("after order to spi:", send_packet, tx_len);
+ 
+-	/* call spi */
+-	tx_buffer = send_packet;
+-	rx_buffer = resp_packet;
+-	rx_len = resp_max_len;
+-	spi_flags = SPI_NOT_FOUND;
+-
+-read_again:
+-	ret = spi_command(dev->dev, dev->chipselect, tx_len, tx_buffer,
+-			rx_len, rx_buffer);
++	ret = do_spi_txrx(dev, send_packet, tx_len, resp_packet,
++			resp_max_len, &actual_rx);
+ 	if (ret)
+-		return -EBUSY;
+-
+-	print_buffer("read from spi:", rx_buffer, rx_len);
+-
+-	/* look for SOP firstly*/
+-	ret = resp_find_sop_eop(rx_buffer, rx_len - 1, spi_flags);
+-	if (ret != SPI_FOUND_EOP) {
+-		tx_buffer = NULL;
+-		tx_len = 0;
+-		ticks = rte_get_timer_cycles();
+-		if (time_after(ticks, timeout) &&
+-				retry++ > SPI_MAX_RETRY) {
+-			dev_err(NULL, "Have retry %d, found invalid packet data\n",
+-				retry);
+-			return -EBUSY;
+-		}
+-
+-		if (ret == SPI_FOUND_SOP) {
+-			rx_buffer += rx_len;
+-			resp_max_len += rx_len;
+-		}
+-
+-		spi_flags = ret;
+-		goto read_again;
+-	}
+-
+-	print_buffer("found valid data:", resp_packet, resp_max_len);
++		return ret;
+ 
+ 	/* analyze response packet */
+ 	i = 0;
+ 	p = resp_data;
+-	while (i < resp_max_len) {
++	while (i < actual_rx) {
+ 		current_byte = resp_packet[i];
+ 		switch (current_byte) {
+ 		case SPI_BYTE_IDLE:
+@@ -337,9 +359,13 @@ static int packet_to_byte_conver(struct spi_transaction_dev *dev,
+ 		current_byte = resp_packet[i];
+ 
+ 		switch (current_byte) {
+-		case SPI_PACKET_ESC:
+-		case SPI_PACKET_CHANNEL:
+ 		case SPI_PACKET_SOP:
++			dev_err(dev, "error on get SOP after SOP\n");
++			return -EINVAL;
++		case SPI_PACKET_CHANNEL:
++			i += 2;
++			break;
++		case SPI_PACKET_ESC:
+ 			i++;
+ 			current_byte = resp_packet[i];
+ 			*p++ = xor_20(current_byte);
+@@ -348,23 +374,30 @@ static int packet_to_byte_conver(struct spi_transaction_dev *dev,
+ 		case SPI_PACKET_EOP:
+ 			i++;
+ 			current_byte = resp_packet[i];
+-			if (current_byte == SPI_PACKET_ESC ||
+-					current_byte == SPI_PACKET_CHANNEL ||
+-					current_byte == SPI_PACKET_SOP) {
++			switch (current_byte) {
++			case SPI_PACKET_ESC:
+ 				i++;
+ 				current_byte = resp_packet[i];
+ 				*p++ = xor_20(current_byte);
+-			} else
++				break;
++			case SPI_PACKET_CHANNEL:
++			case SPI_PACKET_SOP:
++			case SPI_PACKET_EOP:
++				dev_err(dev, "error get SOP/EOP after EOP\n");
++				return -EINVAL;
++			default:
+ 				*p++ = current_byte;
+-			i = valid_resp_len;
+-			break;
++				break;
++			}
++			goto done;
++
+ 		default:
+ 			*p++ = current_byte;
+ 			i++;
+ 		}
+-
+ 	}
+ 
++done:
+ 	*valid = p - resp_buf;
+ 
+ 	print_buffer("after packet:", resp_buf, *valid);
+diff --git a/dpdk/drivers/raw/ifpga/ifpga_rawdev.c b/dpdk/drivers/raw/ifpga/ifpga_rawdev.c
+index 8d9db585a4..cb0427157a 100644
+--- a/dpdk/drivers/raw/ifpga/ifpga_rawdev.c
++++ b/dpdk/drivers/raw/ifpga/ifpga_rawdev.c
+@@ -68,13 +68,9 @@ static const struct rte_pci_id pci_ifpga_map[] = {
+ 
+ static struct ifpga_rawdev ifpga_rawdevices[IFPGA_RAWDEV_NUM];
+ 
+-static int ifpga_monitor_start;
++static int ifpga_monitor_refcnt;
+ static pthread_t ifpga_monitor_start_thread;
+ 
+-#define IFPGA_MAX_IRQ 12
+-/* 0 for FME interrupt, others are reserved for AFU irq */
+-static struct rte_intr_handle *ifpga_irq_handle[IFPGA_MAX_IRQ];
+-
+ static struct ifpga_rawdev *
+ ifpga_rawdev_allocate(struct rte_rawdev *rawdev);
+ static int set_surprise_link_check_aer(
+@@ -82,6 +78,7 @@ static int set_surprise_link_check_aer(
+ static int ifpga_pci_find_next_ext_capability(unsigned int fd,
+ 					      int start, uint32_t cap);
+ static int ifpga_pci_find_ext_capability(unsigned int fd, uint32_t cap);
++static void fme_interrupt_handler(void *param);
+ 
+ struct ifpga_rawdev *
+ ifpga_rawdev_get(const struct rte_rawdev *rawdev)
+@@ -118,6 +115,7 @@ ifpga_rawdev_allocate(struct rte_rawdev *rawdev)
+ {
+ 	struct ifpga_rawdev *dev;
+ 	uint16_t dev_id;
++	int i = 0;
+ 
+ 	dev = ifpga_rawdev_get(rawdev);
+ 	if (dev != NULL) {
+@@ -134,6 +132,11 @@ ifpga_rawdev_allocate(struct rte_rawdev *rawdev)
+ 	dev = &ifpga_rawdevices[dev_id];
+ 	dev->rawdev = rawdev;
+ 	dev->dev_id = dev_id;
++	for (i = 0; i < IFPGA_MAX_IRQ; i++)
++		dev->intr_handle[i] = NULL;
++	dev->poll_enabled = 0;
++	for (i = 0; i < IFPGA_MAX_VDEV; i++)
++		dev->vdev_name[i] = NULL;
+ 
+ 	return dev;
+ }
+@@ -208,15 +211,16 @@ static int ifpga_get_dev_vendor_id(const char *bdf,
+ 
+ 	return 0;
+ }
+-static int ifpga_rawdev_fill_info(struct ifpga_rawdev *ifpga_dev,
+-	const char *bdf)
++
++static int ifpga_rawdev_fill_info(struct ifpga_rawdev *ifpga_dev)
+ {
+-	char path[1024] = "/sys/bus/pci/devices/0000:";
++	struct opae_adapter *adapter = NULL;
++	char path[1024] = "/sys/bus/pci/devices/";
+ 	char link[1024], link1[1024];
+ 	char dir[1024] = "/sys/devices/";
+ 	char *c;
+ 	int ret;
+-	char sub_brg_bdf[4][16];
++	char sub_brg_bdf[4][16] = {{0}};
+ 	int point;
+ 	DIR *dp = NULL;
+ 	struct dirent *entry;
+@@ -224,9 +228,14 @@ static int ifpga_rawdev_fill_info(struct ifpga_rawdev *ifpga_dev,
+ 
+ 	unsigned int dom, bus, dev;
+ 	int func;
+-	uint32_t dev_id, vendor_id;
++	uint32_t dev_id = 0;
++	uint32_t vendor_id = 0;
+ 
+-	strlcat(path, bdf, sizeof(path));
++	adapter = ifpga_dev ? ifpga_rawdev_get_priv(ifpga_dev->rawdev) : NULL;
++	if (!adapter)
++		return -ENODEV;
++
++	strlcat(path, adapter->name, sizeof(path));
+ 	memset(link, 0, sizeof(link));
+ 	memset(link1, 0, sizeof(link1));
+ 	ret = readlink(path, link, (sizeof(link)-1));
+@@ -376,13 +385,13 @@ ifpga_monitor_sensor(struct rte_rawdev *raw_dev,
+ 		/* monitor temperature sensors */
+ 		if (!strcmp(sensor->name, "Board Temperature") ||
+ 				!strcmp(sensor->name, "FPGA Die Temperature")) {
+-			IFPGA_RAWDEV_PMD_INFO("read sensor %s %d %d %d\n",
++			IFPGA_RAWDEV_PMD_DEBUG("read sensor %s %d %d %d\n",
+ 					sensor->name, value, sensor->high_warn,
+ 					sensor->high_fatal);
+ 
+ 			if (HIGH_WARN(sensor, value) ||
+ 				LOW_WARN(sensor, value)) {
+-				IFPGA_RAWDEV_PMD_INFO("%s reach theshold %d\n",
++				IFPGA_RAWDEV_PMD_INFO("%s reach threshold %d\n",
+ 					sensor->name, value);
+ 				*gsd_start = true;
+ 				break;
+@@ -393,7 +402,7 @@ ifpga_monitor_sensor(struct rte_rawdev *raw_dev,
+ 		if (!strcmp(sensor->name, "12V AUX Voltage")) {
+ 			if (value < AUX_VOLTAGE_WARN) {
+ 				IFPGA_RAWDEV_PMD_INFO(
+-					"%s reach theshold %d mV\n",
++					"%s reach threshold %d mV\n",
+ 					sensor->name, value);
+ 				*gsd_start = true;
+ 				break;
+@@ -418,7 +427,7 @@ static int set_surprise_link_check_aer(
+ 	bool enable = 0;
+ 	uint32_t aer_new0, aer_new1;
+ 
+-	if (!ifpga_rdev) {
++	if (!ifpga_rdev || !ifpga_rdev->rawdev) {
+ 		printf("\n device does not exist\n");
+ 		return -EFAULT;
+ 	}
+@@ -441,12 +450,12 @@ static int set_surprise_link_check_aer(
+ 		pos = ifpga_pci_find_ext_capability(fd, RTE_PCI_EXT_CAP_ID_ERR);
+ 		if (!pos)
+ 			goto end;
+-		/* save previout ECAP_AER+0x08 */
++		/* save previous ECAP_AER+0x08 */
+ 		ret = pread(fd, &data, sizeof(data), pos+0x08);
+ 		if (ret == -1)
+ 			goto end;
+ 		ifpga_rdev->aer_old[0] = data;
+-		/* save previout ECAP_AER+0x14 */
++		/* save previous ECAP_AER+0x14 */
+ 		ret = pread(fd, &data, sizeof(data), pos+0x14);
+ 		if (ret == -1)
+ 			goto end;
+@@ -497,11 +506,11 @@ ifpga_rawdev_gsd_handle(__rte_unused void *param)
+ 	int gsd_enable, ret;
+ #define MS 1000
+ 
+-	while (1) {
++	while (__atomic_load_n(&ifpga_monitor_refcnt, __ATOMIC_RELAXED)) {
+ 		gsd_enable = 0;
+ 		for (i = 0; i < IFPGA_RAWDEV_NUM; i++) {
+ 			ifpga_rdev = &ifpga_rawdevices[i];
+-			if (ifpga_rdev->rawdev) {
++			if (ifpga_rdev->poll_enabled) {
+ 				ret = set_surprise_link_check_aer(ifpga_rdev,
+ 					gsd_enable);
+ 				if (ret == 1 && !gsd_enable) {
+@@ -521,30 +530,46 @@ ifpga_rawdev_gsd_handle(__rte_unused void *param)
+ }
+ 
+ static int
+-ifpga_monitor_start_func(void)
++ifpga_monitor_start_func(struct ifpga_rawdev *dev)
+ {
+ 	int ret;
+ 
+-	if (ifpga_monitor_start == 0) {
++	if (!dev)
++		return -ENODEV;
++
++	ret = ifpga_rawdev_fill_info(dev);
++	if (ret)
++		return ret;
++
++	dev->poll_enabled = 1;
++
++	if (!__atomic_fetch_add(&ifpga_monitor_refcnt, 1, __ATOMIC_RELAXED)) {
+ 		ret = rte_ctrl_thread_create(&ifpga_monitor_start_thread,
+ 					     "ifpga-monitor", NULL,
+ 					     ifpga_rawdev_gsd_handle, NULL);
+ 		if (ret != 0) {
++			ifpga_monitor_start_thread = 0;
+ 			IFPGA_RAWDEV_PMD_ERR(
+-				"Fail to create ifpga nonitor thread");
++				"Fail to create ifpga monitor thread");
+ 			return -1;
+ 		}
+-		ifpga_monitor_start = 1;
+ 	}
+ 
+ 	return 0;
+ }
++
+ static int
+-ifpga_monitor_stop_func(void)
++ifpga_monitor_stop_func(struct ifpga_rawdev *dev)
+ {
+ 	int ret;
+ 
+-	if (ifpga_monitor_start == 1) {
++	if (!dev || !dev->poll_enabled)
++		return 0;
++
++	dev->poll_enabled = 0;
++
++	if (!__atomic_sub_fetch(&ifpga_monitor_refcnt, 1, __ATOMIC_RELAXED) &&
++		ifpga_monitor_start_thread) {
+ 		ret = pthread_cancel(ifpga_monitor_start_thread);
+ 		if (ret)
+ 			IFPGA_RAWDEV_PMD_ERR("Can't cancel the thread");
+@@ -553,8 +578,6 @@ ifpga_monitor_stop_func(void)
+ 		if (ret)
+ 			IFPGA_RAWDEV_PMD_ERR("Can't join the thread");
+ 
+-		ifpga_monitor_start = 0;
+-
+ 		return ret;
+ 	}
+ 
+@@ -716,17 +739,38 @@ ifpga_rawdev_stop(struct rte_rawdev *dev)
+ static int
+ ifpga_rawdev_close(struct rte_rawdev *dev)
+ {
++	struct ifpga_rawdev *ifpga_rdev = NULL;
+ 	struct opae_adapter *adapter;
++	struct opae_manager *mgr;
++	char *vdev_name = NULL;
++	int i, ret = 0;
+ 
+ 	if (dev) {
++		ifpga_rdev = ifpga_rawdev_get(dev);
++		if (ifpga_rdev) {
++			for (i = 0; i < IFPGA_MAX_VDEV; i++) {
++				vdev_name = ifpga_rdev->vdev_name[i];
++				if (vdev_name)
++					rte_vdev_uninit(vdev_name);
++			}
++			ifpga_monitor_stop_func(ifpga_rdev);
++			ifpga_rdev->rawdev = NULL;
++		}
+ 		adapter = ifpga_rawdev_get_priv(dev);
+ 		if (adapter) {
++			mgr = opae_adapter_get_mgr(adapter);
++			if (ifpga_rdev && mgr) {
++				if (ifpga_unregister_msix_irq(ifpga_rdev,
++					IFPGA_FME_IRQ, 0,
++					fme_interrupt_handler, mgr) < 0)
++					ret = -EINVAL;
++			}
+ 			opae_adapter_destroy(adapter);
+ 			opae_adapter_data_free(adapter->data);
+ 		}
+ 	}
+ 
+-	return dev ? 0:1;
++	return ret;
+ }
+ 
+ static int
+@@ -1341,49 +1385,62 @@ fme_interrupt_handler(void *param)
+ }
+ 
+ int
+-ifpga_unregister_msix_irq(enum ifpga_irq_type type,
++ifpga_unregister_msix_irq(struct ifpga_rawdev *dev, enum ifpga_irq_type type,
+ 		int vec_start, rte_intr_callback_fn handler, void *arg)
+ {
+-	struct rte_intr_handle *intr_handle;
+-	int rc, i;
++	struct rte_intr_handle **intr_handle;
++	int rc = 0;
++	int i = vec_start + 1;
++
++	if (!dev)
++		return -ENODEV;
+ 
+ 	if (type == IFPGA_FME_IRQ)
+-		intr_handle = ifpga_irq_handle[0];
++		intr_handle = (struct rte_intr_handle **)&dev->intr_handle[0];
+ 	else if (type == IFPGA_AFU_IRQ)
+-		intr_handle = ifpga_irq_handle[vec_start + 1];
++		intr_handle = (struct rte_intr_handle **)&dev->intr_handle[i];
+ 	else
+-		return 0;
++		return -EINVAL;
+ 
+-	rte_intr_efd_disable(intr_handle);
++	if ((*intr_handle) == NULL) {
++		IFPGA_RAWDEV_PMD_ERR("%s interrupt %d not registered\n",
++			type == IFPGA_FME_IRQ ? "FME" : "AFU",
++			type == IFPGA_FME_IRQ ? 0 : vec_start);
++		return -ENOENT;
++	}
+ 
+-	rc = rte_intr_callback_unregister(intr_handle, handler, arg);
++	rte_intr_efd_disable(*intr_handle);
++
++	rc = rte_intr_callback_unregister(*intr_handle, handler, arg);
++	if (rc < 0) {
++		IFPGA_RAWDEV_PMD_ERR("Failed to unregister %s interrupt %d\n",
++			type == IFPGA_FME_IRQ ? "FME" : "AFU",
++			type == IFPGA_FME_IRQ ? 0 : vec_start);
++	} else {
++		rte_intr_instance_free(*intr_handle);
++		*intr_handle = NULL;
++	}
+ 
+-	for (i = 0; i < IFPGA_MAX_IRQ; i++)
+-		rte_intr_instance_free(ifpga_irq_handle[i]);
+ 	return rc;
+ }
+ 
+ int
+-ifpga_register_msix_irq(struct rte_rawdev *dev, int port_id,
++ifpga_register_msix_irq(struct ifpga_rawdev *dev, int port_id,
+ 		enum ifpga_irq_type type, int vec_start, int count,
+ 		rte_intr_callback_fn handler, const char *name,
+ 		void *arg)
+ {
+ 	int ret;
+-	struct rte_intr_handle *intr_handle;
++	struct rte_intr_handle **intr_handle;
+ 	struct opae_adapter *adapter;
+ 	struct opae_manager *mgr;
+ 	struct opae_accelerator *acc;
+ 	int *intr_efds = NULL, nb_intr, i;
+ 
+-	for (i = 0; i < IFPGA_MAX_IRQ; i++) {
+-		ifpga_irq_handle[i] =
+-			rte_intr_instance_alloc(RTE_INTR_INSTANCE_F_PRIVATE);
+-		if (ifpga_irq_handle[i] == NULL)
+-			return -ENOMEM;
+-	}
++	if (!dev || !dev->rawdev)
++		return -ENODEV;
+ 
+-	adapter = ifpga_rawdev_get_priv(dev);
++	adapter = ifpga_rawdev_get_priv(dev->rawdev);
+ 	if (!adapter)
+ 		return -ENODEV;
+ 
+@@ -1392,32 +1449,40 @@ ifpga_register_msix_irq(struct rte_rawdev *dev, int port_id,
+ 		return -ENODEV;
+ 
+ 	if (type == IFPGA_FME_IRQ) {
+-		intr_handle = ifpga_irq_handle[0];
++		intr_handle = (struct rte_intr_handle **)&dev->intr_handle[0];
+ 		count = 1;
+ 	} else if (type == IFPGA_AFU_IRQ) {
+-		intr_handle = ifpga_irq_handle[vec_start + 1];
++		i = vec_start + 1;
++		intr_handle = (struct rte_intr_handle **)&dev->intr_handle[i];
+ 	} else {
+ 		return -EINVAL;
+ 	}
+ 
+-	if (rte_intr_type_set(intr_handle, RTE_INTR_HANDLE_VFIO_MSIX))
++	if (*intr_handle)
++		return -EBUSY;
++
++	*intr_handle = rte_intr_instance_alloc(RTE_INTR_INSTANCE_F_PRIVATE);
++	if (!(*intr_handle))
++		return -ENOMEM;
++
++	if (rte_intr_type_set(*intr_handle, RTE_INTR_HANDLE_VFIO_MSIX))
+ 		return -rte_errno;
+ 
+-	ret = rte_intr_efd_enable(intr_handle, count);
++	ret = rte_intr_efd_enable(*intr_handle, count);
+ 	if (ret)
+ 		return -ENODEV;
+ 
+-	if (rte_intr_fd_set(intr_handle,
+-			rte_intr_efds_index_get(intr_handle, 0)))
++	if (rte_intr_fd_set(*intr_handle,
++			rte_intr_efds_index_get(*intr_handle, 0)))
+ 		return -rte_errno;
+ 
+ 	IFPGA_RAWDEV_PMD_DEBUG("register %s irq, vfio_fd=%d, fd=%d\n",
+-			name, rte_intr_dev_fd_get(intr_handle),
+-			rte_intr_fd_get(intr_handle));
++			name, rte_intr_dev_fd_get(*intr_handle),
++			rte_intr_fd_get(*intr_handle));
+ 
+ 	if (type == IFPGA_FME_IRQ) {
+ 		struct fpga_fme_err_irq_set err_irq_set;
+-		err_irq_set.evtfd = rte_intr_efds_index_get(intr_handle,
++		err_irq_set.evtfd = rte_intr_efds_index_get(*intr_handle,
+ 								   0);
+ 
+ 		ret = opae_manager_ifpga_set_err_irq(mgr, &err_irq_set);
+@@ -1428,14 +1493,14 @@ ifpga_register_msix_irq(struct rte_rawdev *dev, int port_id,
+ 		if (!acc)
+ 			return -EINVAL;
+ 
+-		nb_intr = rte_intr_nb_intr_get(intr_handle);
++		nb_intr = rte_intr_nb_intr_get(*intr_handle);
+ 
+ 		intr_efds = calloc(nb_intr, sizeof(int));
+ 		if (!intr_efds)
+ 			return -ENOMEM;
+ 
+ 		for (i = 0; i < nb_intr; i++)
+-			intr_efds[i] = rte_intr_efds_index_get(intr_handle, i);
++			intr_efds[i] = rte_intr_efds_index_get(*intr_handle, i);
+ 
+ 		ret = opae_acc_set_irq(acc, vec_start, count, intr_efds);
+ 		if (ret) {
+@@ -1445,7 +1510,7 @@ ifpga_register_msix_irq(struct rte_rawdev *dev, int port_id,
+ 	}
+ 
+ 	/* register interrupt handler using DPDK API */
+-	ret = rte_intr_callback_register(intr_handle,
++	ret = rte_intr_callback_register(*intr_handle,
+ 			handler, (void *)arg);
+ 	if (ret) {
+ 		free(intr_efds);
+@@ -1547,11 +1612,15 @@ ifpga_rawdev_create(struct rte_pci_device *pci_dev,
+ 		IFPGA_RAWDEV_PMD_INFO("this is a PF function");
+ 	}
+ 
+-	ret = ifpga_register_msix_irq(rawdev, 0, IFPGA_FME_IRQ, 0, 0,
++	ret = ifpga_register_msix_irq(dev, 0, IFPGA_FME_IRQ, 0, 0,
+ 			fme_interrupt_handler, "fme_irq", mgr);
+ 	if (ret)
+ 		goto free_adapter_data;
+ 
++	ret = ifpga_monitor_start_func(dev);
++	if (ret)
++		goto free_adapter_data;
++
+ 	return ret;
+ 
+ free_adapter_data:
+@@ -1570,9 +1639,6 @@ ifpga_rawdev_destroy(struct rte_pci_device *pci_dev)
+ 	int ret;
+ 	struct rte_rawdev *rawdev;
+ 	char name[RTE_RAWDEV_NAME_MAX_LEN];
+-	struct opae_adapter *adapter;
+-	struct opae_manager *mgr;
+-	struct ifpga_rawdev *dev;
+ 
+ 	if (!pci_dev) {
+ 		IFPGA_RAWDEV_PMD_ERR("Invalid pci_dev of the device!");
+@@ -1592,21 +1658,6 @@ ifpga_rawdev_destroy(struct rte_pci_device *pci_dev)
+ 		IFPGA_RAWDEV_PMD_ERR("Invalid device name (%s)", name);
+ 		return -EINVAL;
+ 	}
+-	dev = ifpga_rawdev_get(rawdev);
+-	if (dev)
+-		dev->rawdev = NULL;
+-
+-	adapter = ifpga_rawdev_get_priv(rawdev);
+-	if (!adapter)
+-		return -ENODEV;
+-
+-	mgr = opae_adapter_get_mgr(adapter);
+-	if (!mgr)
+-		return -ENODEV;
+-
+-	if (ifpga_unregister_msix_irq(IFPGA_FME_IRQ, 0,
+-				fme_interrupt_handler, mgr) < 0)
+-		return -EINVAL;
+ 
+ 	/* rte_rawdev_close is called by pmd_release */
+ 	ret = rte_rawdev_pmd_release(rawdev);
+@@ -1627,7 +1678,7 @@ ifpga_rawdev_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ static int
+ ifpga_rawdev_pci_remove(struct rte_pci_device *pci_dev)
+ {
+-	ifpga_monitor_stop_func();
++	IFPGA_RAWDEV_PMD_INFO("remove pci_dev %s", pci_dev->device.name);
+ 	return ifpga_rawdev_destroy(pci_dev);
+ }
+ 
+@@ -1669,80 +1720,118 @@ static int ifpga_rawdev_get_string_arg(const char *key __rte_unused,
+ 
+ 	return 0;
+ }
++
+ static int
+-ifpga_cfg_probe(struct rte_vdev_device *dev)
++ifpga_vdev_parse_devargs(struct rte_devargs *devargs,
++	struct ifpga_vdev_args *args)
+ {
+-	struct rte_devargs *devargs;
+-	struct rte_kvargs *kvlist = NULL;
+-	struct rte_rawdev *rawdev = NULL;
+-	struct ifpga_rawdev *ifpga_dev;
+-	int port;
++	struct rte_kvargs *kvlist;
+ 	char *name = NULL;
+-	const char *bdf;
+-	char dev_name[RTE_RAWDEV_NAME_MAX_LEN];
+-	int ret = -1;
++	int port = 0;
++	int ret = -EINVAL;
+ 
+-	devargs = dev->device.devargs;
++	if (!devargs || !args)
++		return ret;
+ 
+ 	kvlist = rte_kvargs_parse(devargs->args, valid_args);
+ 	if (!kvlist) {
+-		IFPGA_RAWDEV_PMD_LOG(ERR, "error when parsing param");
+-		goto end;
++		IFPGA_RAWDEV_PMD_ERR("error when parsing devargs");
++		return ret;
+ 	}
+ 
+ 	if (rte_kvargs_count(kvlist, IFPGA_ARG_NAME) == 1) {
+ 		if (rte_kvargs_process(kvlist, IFPGA_ARG_NAME,
+-				       &ifpga_rawdev_get_string_arg,
+-				       &name) < 0) {
++			&ifpga_rawdev_get_string_arg, &name) < 0) {
+ 			IFPGA_RAWDEV_PMD_ERR("error to parse %s",
+-				     IFPGA_ARG_NAME);
++				IFPGA_ARG_NAME);
+ 			goto end;
++		} else {
++			strlcpy(args->bdf, name, sizeof(args->bdf));
++			rte_free(name);
+ 		}
+ 	} else {
+ 		IFPGA_RAWDEV_PMD_ERR("arg %s is mandatory for ifpga bus",
+-			  IFPGA_ARG_NAME);
++			IFPGA_ARG_NAME);
+ 		goto end;
+ 	}
+ 
+ 	if (rte_kvargs_count(kvlist, IFPGA_ARG_PORT) == 1) {
+-		if (rte_kvargs_process(kvlist,
+-			IFPGA_ARG_PORT,
+-			&rte_ifpga_get_integer32_arg,
+-			&port) < 0) {
++		if (rte_kvargs_process(kvlist, IFPGA_ARG_PORT,
++			&rte_ifpga_get_integer32_arg, &port) < 0) {
+ 			IFPGA_RAWDEV_PMD_ERR("error to parse %s",
+ 				IFPGA_ARG_PORT);
+ 			goto end;
++		} else {
++			args->port = port;
+ 		}
+ 	} else {
+ 		IFPGA_RAWDEV_PMD_ERR("arg %s is mandatory for ifpga bus",
+-			  IFPGA_ARG_PORT);
++			IFPGA_ARG_PORT);
+ 		goto end;
+ 	}
+ 
++	ret = 0;
++
++end:
++	if (kvlist)
++		rte_kvargs_free(kvlist);
++
++	return ret;
++}
++
++static int
++ifpga_cfg_probe(struct rte_vdev_device *vdev)
++{
++	struct rte_rawdev *rawdev = NULL;
++	struct ifpga_rawdev *ifpga_dev;
++	struct ifpga_vdev_args args;
++	char dev_name[RTE_RAWDEV_NAME_MAX_LEN];
++	const char *vdev_name = NULL;
++	int i, n, ret = 0;
++
++	vdev_name = rte_vdev_device_name(vdev);
++	if (!vdev_name)
++		return -EINVAL;
++
++	IFPGA_RAWDEV_PMD_INFO("probe ifpga virtual device %s", vdev_name);
++
++	ret = ifpga_vdev_parse_devargs(vdev->device.devargs, &args);
++	if (ret)
++		return ret;
++
+ 	memset(dev_name, 0, sizeof(dev_name));
+-	snprintf(dev_name, RTE_RAWDEV_NAME_MAX_LEN, "IFPGA:%s", name);
++	snprintf(dev_name, RTE_RAWDEV_NAME_MAX_LEN, "IFPGA:%s", args.bdf);
+ 	rawdev = rte_rawdev_pmd_get_named_dev(dev_name);
+ 	if (!rawdev)
+-		goto end;
++		return -ENODEV;
+ 	ifpga_dev = ifpga_rawdev_get(rawdev);
+ 	if (!ifpga_dev)
+-		goto end;
+-	bdf = name;
+-	ifpga_rawdev_fill_info(ifpga_dev, bdf);
++		return -ENODEV;
+ 
+-	ifpga_monitor_start_func();
++	for (i = 0; i < IFPGA_MAX_VDEV; i++) {
++		if (ifpga_dev->vdev_name[i] == NULL) {
++			n = strlen(vdev_name) + 1;
++			ifpga_dev->vdev_name[i] = rte_malloc(NULL, n, 0);
++			if (ifpga_dev->vdev_name[i] == NULL)
++				return -ENOMEM;
++			strlcpy(ifpga_dev->vdev_name[i], vdev_name, n);
++			break;
++		}
++	}
+ 
+-	memset(dev_name, 0, sizeof(dev_name));
+-	snprintf(dev_name, RTE_RAWDEV_NAME_MAX_LEN, "%d|%s",
+-	port, name);
++	if (i >= IFPGA_MAX_VDEV) {
++		IFPGA_RAWDEV_PMD_ERR("Can't create more virtual device!");
++		return -ENOENT;
++	}
+ 
++	snprintf(dev_name, RTE_RAWDEV_NAME_MAX_LEN, "%d|%s",
++		args.port, args.bdf);
+ 	ret = rte_eal_hotplug_add(RTE_STR(IFPGA_BUS_NAME),
+-			dev_name, devargs->args);
+-end:
+-	if (kvlist)
+-		rte_kvargs_free(kvlist);
+-	if (name)
+-		free(name);
++			dev_name, vdev->device.devargs->args);
++	if (ret) {
++		rte_free(ifpga_dev->vdev_name[i]);
++		ifpga_dev->vdev_name[i] = NULL;
++	}
+ 
+ 	return ret;
+ }
+@@ -1750,10 +1839,47 @@ ifpga_cfg_probe(struct rte_vdev_device *dev)
+ static int
+ ifpga_cfg_remove(struct rte_vdev_device *vdev)
+ {
+-	IFPGA_RAWDEV_PMD_INFO("Remove ifpga_cfg %p",
+-		vdev);
++	struct rte_rawdev *rawdev = NULL;
++	struct ifpga_rawdev *ifpga_dev;
++	struct ifpga_vdev_args args;
++	char dev_name[RTE_RAWDEV_NAME_MAX_LEN];
++	const char *vdev_name = NULL;
++	char *tmp_vdev = NULL;
++	int i, ret = 0;
+ 
+-	return 0;
++	vdev_name = rte_vdev_device_name(vdev);
++	if (!vdev_name)
++		return -EINVAL;
++
++	IFPGA_RAWDEV_PMD_INFO("remove ifpga virtual device %s", vdev_name);
++
++	ret = ifpga_vdev_parse_devargs(vdev->device.devargs, &args);
++	if (ret)
++		return ret;
++
++	memset(dev_name, 0, sizeof(dev_name));
++	snprintf(dev_name, RTE_RAWDEV_NAME_MAX_LEN, "IFPGA:%s", args.bdf);
++	rawdev = rte_rawdev_pmd_get_named_dev(dev_name);
++	if (!rawdev)
++		return -ENODEV;
++	ifpga_dev = ifpga_rawdev_get(rawdev);
++	if (!ifpga_dev)
++		return -ENODEV;
++
++	snprintf(dev_name, RTE_RAWDEV_NAME_MAX_LEN, "%d|%s",
++		args.port, args.bdf);
++	ret = rte_eal_hotplug_remove(RTE_STR(IFPGA_BUS_NAME), dev_name);
++
++	for (i = 0; i < IFPGA_MAX_VDEV; i++) {
++		tmp_vdev = ifpga_dev->vdev_name[i];
++		if (tmp_vdev && !strcmp(tmp_vdev, vdev_name)) {
++			free(tmp_vdev);
++			ifpga_dev->vdev_name[i] = NULL;
++			break;
++		}
++	}
++
++	return ret;
+ }
+ 
+ static struct rte_vdev_driver ifpga_cfg_driver = {
+diff --git a/dpdk/drivers/raw/ifpga/ifpga_rawdev.h b/dpdk/drivers/raw/ifpga/ifpga_rawdev.h
+index 61c8366707..4c191190ca 100644
+--- a/dpdk/drivers/raw/ifpga/ifpga_rawdev.h
++++ b/dpdk/drivers/raw/ifpga/ifpga_rawdev.h
+@@ -50,6 +50,8 @@ ifpga_rawdev_get_priv(const struct rte_rawdev *rawdev)
+ 
+ #define IFPGA_RAWDEV_MSIX_IRQ_NUM 7
+ #define IFPGA_RAWDEV_NUM 32
++#define IFPGA_MAX_VDEV 4
++#define IFPGA_MAX_IRQ 12
+ 
+ struct ifpga_rawdev {
+ 	int dev_id;
+@@ -59,6 +61,17 @@ struct ifpga_rawdev {
+ 	uint32_t aer_old[2];
+ 	char fvl_bdf[8][16];
+ 	char parent_bdf[16];
++	/* 0 for FME interrupt, others are reserved for AFU irq */
++	void *intr_handle[IFPGA_MAX_IRQ];
++	/* enable monitor thread poll device's sensors or not */
++	int poll_enabled;
++	/* name of virtual devices created on raw device */
++	char *vdev_name[IFPGA_MAX_VDEV];
++};
++
++struct ifpga_vdev_args {
++	char bdf[PCI_PRI_STR_SIZE];
++	int port;
+ };
+ 
+ struct ifpga_rawdev *
+@@ -70,12 +83,12 @@ enum ifpga_irq_type {
+ };
+ 
+ int
+-ifpga_register_msix_irq(struct rte_rawdev *dev, int port_id,
++ifpga_register_msix_irq(struct ifpga_rawdev *dev, int port_id,
+ 		enum ifpga_irq_type type, int vec_start, int count,
+ 		rte_intr_callback_fn handler, const char *name,
+ 		void *arg);
+ int
+-ifpga_unregister_msix_irq(enum ifpga_irq_type type,
++ifpga_unregister_msix_irq(struct ifpga_rawdev *dev, enum ifpga_irq_type type,
+ 		int vec_start, rte_intr_callback_fn handler, void *arg);
+ 
+ struct rte_pci_bus *ifpga_get_pci_bus(void);
+diff --git a/dpdk/drivers/raw/ioat/ioat_common.c b/dpdk/drivers/raw/ioat/ioat_common.c
+index 60de41152a..e44e181e58 100644
+--- a/dpdk/drivers/raw/ioat/ioat_common.c
++++ b/dpdk/drivers/raw/ioat/ioat_common.c
+@@ -9,6 +9,8 @@
+ 
+ #include "ioat_private.h"
+ 
++RTE_LOG_REGISTER_DEFAULT(ioat_rawdev_logtype, INFO);
++
+ static const char * const xstat_names[] = {
+ 		"failed_enqueues", "successful_enqueues",
+ 		"copies_started", "copies_completed"
+diff --git a/dpdk/drivers/raw/ioat/ioat_private.h b/dpdk/drivers/raw/ioat/ioat_private.h
+index f032d5fe3d..7fb685a3d7 100644
+--- a/dpdk/drivers/raw/ioat/ioat_private.h
++++ b/dpdk/drivers/raw/ioat/ioat_private.h
+@@ -18,10 +18,10 @@
+ #include <rte_rawdev_pmd.h>
+ #include "rte_ioat_rawdev.h"
+ 
+-extern int ioat_pmd_logtype;
++extern int ioat_rawdev_logtype;
+ 
+ #define IOAT_PMD_LOG(level, fmt, args...) rte_log(RTE_LOG_ ## level, \
+-		ioat_pmd_logtype, "IOAT: %s(): " fmt "\n", __func__, ##args)
++		ioat_rawdev_logtype, "IOAT: %s(): " fmt "\n", __func__, ##args)
+ 
+ #define IOAT_PMD_DEBUG(fmt, args...)  IOAT_PMD_LOG(DEBUG, fmt, ## args)
+ #define IOAT_PMD_INFO(fmt, args...)   IOAT_PMD_LOG(INFO, fmt, ## args)
+diff --git a/dpdk/drivers/raw/ioat/ioat_rawdev.c b/dpdk/drivers/raw/ioat/ioat_rawdev.c
+index 5396671d4f..11341fcf5d 100644
+--- a/dpdk/drivers/raw/ioat/ioat_rawdev.c
++++ b/dpdk/drivers/raw/ioat/ioat_rawdev.c
+@@ -28,8 +28,6 @@ static struct rte_pci_driver ioat_pmd_drv;
+ #define IOAT_DEVICE_ID_BDXF	0x6f2F
+ #define IOAT_DEVICE_ID_ICX	0x0b00
+ 
+-RTE_LOG_REGISTER_DEFAULT(ioat_pmd_logtype, INFO);
+-
+ #define DESC_SZ sizeof(struct rte_ioat_generic_hw_desc)
+ #define COMPLETION_SZ sizeof(__m128i)
+ 
+diff --git a/dpdk/drivers/raw/ioat/rte_idxd_rawdev_fns.h b/dpdk/drivers/raw/ioat/rte_idxd_rawdev_fns.h
+index 3ea25f6ca9..a07892faa0 100644
+--- a/dpdk/drivers/raw/ioat/rte_idxd_rawdev_fns.h
++++ b/dpdk/drivers/raw/ioat/rte_idxd_rawdev_fns.h
+@@ -16,6 +16,7 @@
+  */
+ 
+ #include <stdint.h>
++#include <rte_errno.h>
+ 
+ /*
+  * Defines used in the data path for interacting with IDXD hardware.
+diff --git a/dpdk/drivers/raw/ntb/ntb.c b/dpdk/drivers/raw/ntb/ntb.c
+index 46ac02e5ab..f5e773c53b 100644
+--- a/dpdk/drivers/raw/ntb/ntb.c
++++ b/dpdk/drivers/raw/ntb/ntb.c
+@@ -1398,6 +1398,10 @@ ntb_init_hw(struct rte_rawdev *dev, struct rte_pci_device *pci_dev)
+ 
+ 	/* Init doorbell. */
+ 	hw->db_valid_mask = RTE_LEN2MASK(hw->db_cnt, uint64_t);
++	/* Clear all valid doorbell bits before registering intr handler */
++	if (hw->ntb_ops->db_clear == NULL)
++		return -ENOTSUP;
++	(*hw->ntb_ops->db_clear)(dev, hw->db_valid_mask);
+ 
+ 	intr_handle = pci_dev->intr_handle;
+ 	/* Register callback func to eal lib */
+diff --git a/dpdk/drivers/raw/ntb/ntb.h b/dpdk/drivers/raw/ntb/ntb.h
+index cdf7667d5d..c9ff33aa59 100644
+--- a/dpdk/drivers/raw/ntb/ntb.h
++++ b/dpdk/drivers/raw/ntb/ntb.h
+@@ -95,7 +95,7 @@ enum ntb_spad_idx {
+  * @spad_write: Write val to local/peer spad register.
+  * @db_read: Read doorbells status.
+  * @db_clear: Clear local doorbells.
+- * @db_set_mask: Set bits in db mask, preventing db interrpts generated
++ * @db_set_mask: Set bits in db mask, preventing db interrupts generated
+  * for those db bits.
+  * @peer_db_set: Set doorbell bit to generate peer interrupt for that bit.
+  * @vector_bind: Bind vector source [intr] to msix vector [msix].
+diff --git a/dpdk/drivers/regex/mlx5/mlx5_rxp.c b/dpdk/drivers/regex/mlx5/mlx5_rxp.c
+index 5ead3ca341..51b6e71376 100644
+--- a/dpdk/drivers/regex/mlx5/mlx5_rxp.c
++++ b/dpdk/drivers/regex/mlx5/mlx5_rxp.c
+@@ -148,7 +148,7 @@ mlx5_regex_configure(struct rte_regexdev *dev,
+ 	dev->data->dev_conf.nb_queue_pairs = priv->nb_queues;
+ 	priv->qps = rte_zmalloc(NULL, sizeof(struct mlx5_regex_qp) *
+ 				priv->nb_queues, 0);
+-	if (!priv->nb_queues) {
++	if (!priv->qps) {
+ 		DRV_LOG(ERR, "can't allocate qps memory");
+ 		rte_errno = ENOMEM;
+ 		return -rte_errno;
+diff --git a/dpdk/drivers/vdpa/ifc/base/ifcvf.c b/dpdk/drivers/vdpa/ifc/base/ifcvf.c
+index 721cb1da8a..f3c29f94b3 100644
+--- a/dpdk/drivers/vdpa/ifc/base/ifcvf.c
++++ b/dpdk/drivers/vdpa/ifc/base/ifcvf.c
+@@ -87,6 +87,8 @@ ifcvf_init_hw(struct ifcvf_hw *hw, PCI_DEV *dev)
+ 	}
+ 
+ 	hw->lm_cfg = hw->mem_resource[4].addr;
++	if (!hw->lm_cfg)
++		WARNINGOUT("HW support live migration not support!\n");
+ 
+ 	if (hw->common_cfg == NULL || hw->notify_base == NULL ||
+ 			hw->isr == NULL || hw->dev_cfg == NULL) {
+@@ -94,12 +96,14 @@ ifcvf_init_hw(struct ifcvf_hw *hw, PCI_DEV *dev)
+ 		return -1;
+ 	}
+ 
+-	DEBUGOUT("capability mapping:\ncommon cfg: %p\n"
+-			"notify base: %p\nisr cfg: %p\ndevice cfg: %p\n"
+-			"multiplier: %u\n",
+-			hw->common_cfg, hw->dev_cfg,
+-			hw->isr, hw->notify_base,
+-			hw->notify_off_multiplier);
++	DEBUGOUT("capability mapping:\n"
++		 "common cfg: %p\n"
++		 "notify base: %p\n"
++		 "isr cfg: %p\n"
++		 "device cfg: %p\n"
++		 "multiplier: %u\n",
++		 hw->common_cfg, hw->notify_base, hw->isr, hw->dev_cfg,
++		 hw->notify_off_multiplier);
+ 
+ 	return 0;
+ }
+@@ -216,10 +220,12 @@ ifcvf_hw_enable(struct ifcvf_hw *hw)
+ 				&cfg->queue_used_hi);
+ 		IFCVF_WRITE_REG16(hw->vring[i].size, &cfg->queue_size);
+ 
+-		*(u32 *)(lm_cfg + IFCVF_LM_RING_STATE_OFFSET +
+-				(i / 2) * IFCVF_LM_CFG_SIZE + (i % 2) * 4) =
+-			(u32)hw->vring[i].last_avail_idx |
+-			((u32)hw->vring[i].last_used_idx << 16);
++		if (lm_cfg) {
++			*(u32 *)(lm_cfg + IFCVF_LM_RING_STATE_OFFSET +
++					(i / 2) * IFCVF_LM_CFG_SIZE + (i % 2) * 4) =
++				(u32)hw->vring[i].last_avail_idx |
++				((u32)hw->vring[i].last_used_idx << 16);
++		}
+ 
+ 		IFCVF_WRITE_REG16(i + 1, &cfg->queue_msix_vector);
+ 		if (IFCVF_READ_REG16(&cfg->queue_msix_vector) ==
+@@ -289,6 +295,8 @@ ifcvf_enable_logging(struct ifcvf_hw *hw, u64 log_base, u64 log_size)
+ 	u8 *lm_cfg;
+ 
+ 	lm_cfg = hw->lm_cfg;
++	if (!lm_cfg)
++		return;
+ 
+ 	*(u32 *)(lm_cfg + IFCVF_LM_BASE_ADDR_LOW) =
+ 		log_base & IFCVF_32_BIT_MASK;
+@@ -311,6 +319,9 @@ ifcvf_disable_logging(struct ifcvf_hw *hw)
+ 	u8 *lm_cfg;
+ 
+ 	lm_cfg = hw->lm_cfg;
++	if (!lm_cfg)
++		return;
++
+ 	*(u32 *)(lm_cfg + IFCVF_LM_LOGGING_CTRL) = IFCVF_LM_DISABLE;
+ }
+ 
+diff --git a/dpdk/drivers/vdpa/ifc/base/ifcvf_osdep.h b/dpdk/drivers/vdpa/ifc/base/ifcvf_osdep.h
+index 6aef25ea45..3d567695cc 100644
+--- a/dpdk/drivers/vdpa/ifc/base/ifcvf_osdep.h
++++ b/dpdk/drivers/vdpa/ifc/base/ifcvf_osdep.h
+@@ -14,6 +14,7 @@
+ #include <rte_log.h>
+ #include <rte_io.h>
+ 
++#define WARNINGOUT(S, args...)  RTE_LOG(WARNING, PMD, S, ##args)
+ #define DEBUGOUT(S, args...)    RTE_LOG(DEBUG, PMD, S, ##args)
+ #define STATIC                  static
+ 
+diff --git a/dpdk/drivers/vdpa/ifc/ifcvf_vdpa.c b/dpdk/drivers/vdpa/ifc/ifcvf_vdpa.c
+index 3853c4cf7e..6a915b0d5e 100644
+--- a/dpdk/drivers/vdpa/ifc/ifcvf_vdpa.c
++++ b/dpdk/drivers/vdpa/ifc/ifcvf_vdpa.c
+@@ -356,6 +356,8 @@ vdpa_enable_vfio_intr(struct ifcvf_internal *internal, bool m_rx)
+ 	vring.callfd = -1;
+ 
+ 	nr_vring = rte_vhost_get_vring_num(internal->vid);
++	if (nr_vring > IFCVF_MAX_QUEUES * 2)
++		return -1;
+ 
+ 	irq_set = (struct vfio_irq_set *)irq_set_buf;
+ 	irq_set->argsz = sizeof(irq_set_buf);
+diff --git a/dpdk/drivers/vdpa/mlx5/mlx5_vdpa.c b/dpdk/drivers/vdpa/mlx5/mlx5_vdpa.c
+index b9e84dd9bf..9c1c70037c 100644
+--- a/dpdk/drivers/vdpa/mlx5/mlx5_vdpa.c
++++ b/dpdk/drivers/vdpa/mlx5/mlx5_vdpa.c
+@@ -81,7 +81,7 @@ mlx5_vdpa_get_queue_num(struct rte_vdpa_device *vdev, uint32_t *queue_num)
+ 		DRV_LOG(ERR, "Invalid vDPA device: %s.", vdev->device->name);
+ 		return -1;
+ 	}
+-	*queue_num = priv->caps.max_num_virtio_queues;
++	*queue_num = priv->caps.max_num_virtio_queues / 2;
+ 	return 0;
+ }
+ 
+@@ -138,7 +138,7 @@ mlx5_vdpa_set_vring_state(int vid, int vring, int state)
+ 		DRV_LOG(ERR, "Invalid vDPA device: %s.", vdev->device->name);
+ 		return -EINVAL;
+ 	}
+-	if (vring >= (int)priv->caps.max_num_virtio_queues * 2) {
++	if (vring >= (int)priv->caps.max_num_virtio_queues) {
+ 		DRV_LOG(ERR, "Too big vring id: %d.", vring);
+ 		return -E2BIG;
+ 	}
+@@ -250,10 +250,10 @@ mlx5_vdpa_dev_close(int vid)
+ 		DRV_LOG(ERR, "Invalid vDPA device: %s.", vdev->device->name);
+ 		return -1;
+ 	}
+-	if (priv->configured)
+-		ret |= mlx5_vdpa_lm_log(priv);
+ 	mlx5_vdpa_err_event_unset(priv);
+ 	mlx5_vdpa_cqe_event_unset(priv);
++	if (priv->configured)
++		ret |= mlx5_vdpa_lm_log(priv);
+ 	mlx5_vdpa_steer_unset(priv);
+ 	mlx5_vdpa_virtqs_release(priv);
+ 	mlx5_vdpa_event_qp_global_release(priv);
+@@ -518,7 +518,7 @@ mlx5_vdpa_dev_probe(struct mlx5_common_device *cdev)
+ 		DRV_LOG(DEBUG, "No capability to support virtq statistics.");
+ 	priv = rte_zmalloc("mlx5 vDPA device private", sizeof(*priv) +
+ 			   sizeof(struct mlx5_vdpa_virtq) *
+-			   attr->vdpa.max_num_virtio_queues * 2,
++			   attr->vdpa.max_num_virtio_queues,
+ 			   RTE_CACHE_LINE_SIZE);
+ 	if (!priv) {
+ 		DRV_LOG(ERR, "Failed to allocate private memory.");
+diff --git a/dpdk/drivers/vdpa/mlx5/mlx5_vdpa_event.c b/dpdk/drivers/vdpa/mlx5/mlx5_vdpa_event.c
+index f8d910b33f..ecad139768 100644
+--- a/dpdk/drivers/vdpa/mlx5/mlx5_vdpa_event.c
++++ b/dpdk/drivers/vdpa/mlx5/mlx5_vdpa_event.c
+@@ -460,6 +460,7 @@ mlx5_vdpa_cqe_event_setup(struct mlx5_vdpa_priv *priv)
+ {
+ 	int ret;
+ 	rte_cpuset_t cpuset;
++	pthread_attr_t *attrp = NULL;
+ 	pthread_attr_t attr;
+ 	char name[16];
+ 	const struct sched_param sp = {
+@@ -469,22 +470,27 @@ mlx5_vdpa_cqe_event_setup(struct mlx5_vdpa_priv *priv)
+ 	if (!priv->eventc)
+ 		/* All virtqs are in poll mode. */
+ 		return 0;
+-	pthread_attr_init(&attr);
+-	ret = pthread_attr_setschedpolicy(&attr, SCHED_RR);
++	ret = pthread_attr_init(&attr);
++	if (ret != 0) {
++		DRV_LOG(ERR, "Failed to initialize thread attributes");
++		goto out;
++	}
++	attrp = &attr;
++	ret = pthread_attr_setschedpolicy(attrp, SCHED_RR);
+ 	if (ret) {
+ 		DRV_LOG(ERR, "Failed to set thread sched policy = RR.");
+-		return -1;
++		goto out;
+ 	}
+-	ret = pthread_attr_setschedparam(&attr, &sp);
++	ret = pthread_attr_setschedparam(attrp, &sp);
+ 	if (ret) {
+ 		DRV_LOG(ERR, "Failed to set thread priority.");
+-		return -1;
++		goto out;
+ 	}
+-	ret = pthread_create(&priv->timer_tid, &attr, mlx5_vdpa_event_handle,
++	ret = pthread_create(&priv->timer_tid, attrp, mlx5_vdpa_event_handle,
+ 			     (void *)priv);
+ 	if (ret) {
+ 		DRV_LOG(ERR, "Failed to create timer thread.");
+-		return -1;
++		goto out;
+ 	}
+ 	CPU_ZERO(&cpuset);
+ 	if (priv->event_core != -1)
+@@ -494,12 +500,16 @@ mlx5_vdpa_cqe_event_setup(struct mlx5_vdpa_priv *priv)
+ 	ret = pthread_setaffinity_np(priv->timer_tid, sizeof(cpuset), &cpuset);
+ 	if (ret) {
+ 		DRV_LOG(ERR, "Failed to set thread affinity.");
+-		return -1;
++		goto out;
+ 	}
+ 	snprintf(name, sizeof(name), "vDPA-mlx5-%d", priv->vid);
+-	ret = rte_thread_setname(priv->timer_tid, name);
+-	if (ret)
++	if (rte_thread_setname(priv->timer_tid, name) != 0)
+ 		DRV_LOG(DEBUG, "Cannot set timer thread name.");
++out:
++	if (attrp != NULL)
++		pthread_attr_destroy(attrp);
++	if (ret != 0)
++		return -1;
+ 	return 0;
+ }
+ 
+diff --git a/dpdk/drivers/vdpa/mlx5/mlx5_vdpa_mem.c b/dpdk/drivers/vdpa/mlx5/mlx5_vdpa_mem.c
+index b1b9053bff..130d201a85 100644
+--- a/dpdk/drivers/vdpa/mlx5/mlx5_vdpa_mem.c
++++ b/dpdk/drivers/vdpa/mlx5/mlx5_vdpa_mem.c
+@@ -160,7 +160,7 @@ mlx5_vdpa_vhost_mem_regions_prepare(int vid, uint8_t *mode, uint64_t *mem_size,
+  * The target here is to group all the physical memory regions of the
+  * virtio device in one indirect mkey.
+  * For KLM Fixed Buffer Size mode (HW find the translation entry in one
+- * read according to the guest phisical address):
++ * read according to the guest physical address):
+  * All the sub-direct mkeys of it must be in the same size, hence, each
+  * one of them should be in the GCD size of all the virtio memory
+  * regions and the holes between them.
+diff --git a/dpdk/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c b/dpdk/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c
+index db971bad48..98d788d734 100644
+--- a/dpdk/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c
++++ b/dpdk/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c
+@@ -9,6 +9,7 @@
+ #include <rte_malloc.h>
+ #include <rte_errno.h>
+ #include <rte_io.h>
++#include <rte_eal_paging.h>
+ 
+ #include <mlx5_common.h>
+ 
+@@ -17,17 +18,17 @@
+ 
+ 
+ static void
+-mlx5_vdpa_virtq_handler(void *cb_arg)
++mlx5_vdpa_virtq_kick_handler(void *cb_arg)
+ {
+ 	struct mlx5_vdpa_virtq *virtq = cb_arg;
+ 	struct mlx5_vdpa_priv *priv = virtq->priv;
+ 	uint64_t buf;
+ 	int nbytes;
++	int retry;
+ 
+ 	if (rte_intr_fd_get(virtq->intr_handle) < 0)
+ 		return;
+-
+-	do {
++	for (retry = 0; retry < 3; ++retry) {
+ 		nbytes = read(rte_intr_fd_get(virtq->intr_handle), &buf,
+ 			      8);
+ 		if (nbytes < 0) {
+@@ -39,7 +40,9 @@ mlx5_vdpa_virtq_handler(void *cb_arg)
+ 				virtq->index, strerror(errno));
+ 		}
+ 		break;
+-	} while (1);
++	}
++	if (nbytes < 0)
++		return;
+ 	rte_write32(virtq->index, priv->virtq_db_addr);
+ 	if (virtq->notifier_state == MLX5_VDPA_NOTIFIER_STATE_DISABLED) {
+ 		if (rte_vhost_host_notifier_ctrl(priv->vid, virtq->index, true))
+@@ -59,20 +62,16 @@ static int
+ mlx5_vdpa_virtq_unset(struct mlx5_vdpa_virtq *virtq)
+ {
+ 	unsigned int i;
+-	int retries = MLX5_VDPA_INTR_RETRIES;
+ 	int ret = -EAGAIN;
+ 
+-	if (rte_intr_fd_get(virtq->intr_handle) != -1) {
+-		while (retries-- && ret == -EAGAIN) {
++	if (rte_intr_fd_get(virtq->intr_handle) >= 0) {
++		while (ret == -EAGAIN) {
+ 			ret = rte_intr_callback_unregister(virtq->intr_handle,
+-							mlx5_vdpa_virtq_handler,
+-							virtq);
++					mlx5_vdpa_virtq_kick_handler, virtq);
+ 			if (ret == -EAGAIN) {
+-				DRV_LOG(DEBUG, "Try again to unregister fd %d "
+-				"of virtq %d interrupt, retries = %d.",
+-				rte_intr_fd_get(virtq->intr_handle),
+-				(int)virtq->index, retries);
+-
++				DRV_LOG(DEBUG, "Try again to unregister fd %d of virtq %hu interrupt",
++					rte_intr_fd_get(virtq->intr_handle),
++					virtq->index);
+ 				usleep(MLX5_VDPA_INTR_RETRIES_USEC);
+ 			}
+ 		}
+@@ -124,7 +123,9 @@ mlx5_vdpa_virtqs_release(struct mlx5_vdpa_priv *priv)
+ 		priv->td = NULL;
+ 	}
+ 	if (priv->virtq_db_addr) {
+-		claim_zero(munmap(priv->virtq_db_addr, priv->var->length));
++		/* Mask out the within page offset for munmap. */
++		claim_zero(munmap((void *)((uintptr_t)priv->virtq_db_addr &
++			~(rte_mem_page_size() - 1)), priv->var->length));
+ 		priv->virtq_db_addr = NULL;
+ 	}
+ 	priv->features = 0;
+@@ -360,7 +361,7 @@ mlx5_vdpa_virtq_setup(struct mlx5_vdpa_priv *priv, int index)
+ 			goto error;
+ 
+ 		if (rte_intr_callback_register(virtq->intr_handle,
+-					       mlx5_vdpa_virtq_handler,
++					       mlx5_vdpa_virtq_kick_handler,
+ 					       virtq)) {
+ 			rte_intr_fd_set(virtq->intr_handle, -1);
+ 			DRV_LOG(ERR, "Failed to register virtq %d interrupt.",
+@@ -403,7 +404,7 @@ mlx5_vdpa_features_validate(struct mlx5_vdpa_priv *priv)
+ 	if (priv->features & (1ULL << VIRTIO_F_RING_PACKED)) {
+ 		if (!(priv->caps.virtio_queue_type & (1 <<
+ 						     MLX5_VIRTQ_TYPE_PACKED))) {
+-			DRV_LOG(ERR, "Failed to configur PACKED mode for vdev "
++			DRV_LOG(ERR, "Failed to configure PACKED mode for vdev "
+ 				"%d - it was not reported by HW/driver"
+ 				" capability.", priv->vid);
+ 			return -ENOTSUP;
+@@ -472,9 +473,9 @@ mlx5_vdpa_virtqs_prepare(struct mlx5_vdpa_priv *priv)
+ 		DRV_LOG(INFO, "TSO is enabled without CSUM, force CSUM.");
+ 		priv->features |= (1ULL << VIRTIO_NET_F_CSUM);
+ 	}
+-	if (nr_vring > priv->caps.max_num_virtio_queues * 2) {
++	if (nr_vring > priv->caps.max_num_virtio_queues) {
+ 		DRV_LOG(ERR, "Do not support more than %d virtqs(%d).",
+-			(int)priv->caps.max_num_virtio_queues * 2,
++			(int)priv->caps.max_num_virtio_queues,
+ 			(int)nr_vring);
+ 		return -1;
+ 	}
+@@ -487,6 +488,10 @@ mlx5_vdpa_virtqs_prepare(struct mlx5_vdpa_priv *priv)
+ 		priv->virtq_db_addr = NULL;
+ 		goto error;
+ 	} else {
++		/* Add within page offset for 64K page system. */
++		priv->virtq_db_addr = (char *)priv->virtq_db_addr +
++			((rte_mem_page_size() - 1) &
++			priv->caps.doorbell_bar_offset);
+ 		DRV_LOG(DEBUG, "VAR address of doorbell mapping is %p.",
+ 			priv->virtq_db_addr);
+ 	}
+diff --git a/dpdk/drivers/vdpa/sfc/sfc_vdpa.c b/dpdk/drivers/vdpa/sfc/sfc_vdpa.c
+index fccdd8c687..53f598facc 100644
+--- a/dpdk/drivers/vdpa/sfc/sfc_vdpa.c
++++ b/dpdk/drivers/vdpa/sfc/sfc_vdpa.c
+@@ -328,7 +328,8 @@ sfc_vdpa_pci_remove(struct rte_pci_device *pci_dev)
+ 
+ 	sva = sfc_vdpa_get_adapter_by_dev(pci_dev);
+ 	if (sva == NULL) {
+-		sfc_vdpa_info(sva, "invalid device: %s", pci_dev->name);
++		SFC_VDPA_GENERIC_LOG(INFO,
++			"Invalid device: %s.", pci_dev->name);
+ 		return -1;
+ 	}
+ 
+diff --git a/dpdk/drivers/vdpa/sfc/sfc_vdpa_ops.c b/dpdk/drivers/vdpa/sfc/sfc_vdpa_ops.c
+index c4ce4474ef..b84699d234 100644
+--- a/dpdk/drivers/vdpa/sfc/sfc_vdpa_ops.c
++++ b/dpdk/drivers/vdpa/sfc/sfc_vdpa_ops.c
+@@ -222,6 +222,7 @@ static int
+ sfc_vdpa_virtq_start(struct sfc_vdpa_ops_data *ops_data, int vq_num)
+ {
+ 	int rc;
++	uint32_t doorbell;
+ 	efx_virtio_vq_t *vq;
+ 	struct sfc_vdpa_vring_info vring;
+ 	efx_virtio_vq_cfg_t vq_cfg;
+@@ -270,22 +271,35 @@ sfc_vdpa_virtq_start(struct sfc_vdpa_ops_data *ops_data, int vq_num)
+ 	/* Start virtqueue */
+ 	rc = efx_virtio_qstart(vq, &vq_cfg, &vq_dyncfg);
+ 	if (rc != 0) {
+-		/* destroy virtqueue */
+ 		sfc_vdpa_err(ops_data->dev_handle,
+ 			     "virtqueue start failed: %s",
+ 			     rte_strerror(rc));
+-		efx_virtio_qdestroy(vq);
+ 		goto fail_virtio_qstart;
+ 	}
+ 
+ 	sfc_vdpa_info(ops_data->dev_handle,
+ 		      "virtqueue started successfully for vq_num %d", vq_num);
+ 
++	rc = efx_virtio_get_doorbell_offset(vq,	&doorbell);
++	if (rc != 0) {
++		sfc_vdpa_err(ops_data->dev_handle,
++			     "failed to get doorbell offset: %s",
++			     rte_strerror(rc));
++		goto fail_doorbell;
++	}
++
++	/*
++	 * Cache the bar_offset here for each VQ here, it will come
++	 * in handy when sfc_vdpa_get_notify_area() is invoked.
++	 */
++	ops_data->vq_cxt[vq_num].doorbell = (void *)(uintptr_t)doorbell;
+ 	ops_data->vq_cxt[vq_num].enable = B_TRUE;
+ 
+ 	return rc;
+ 
++fail_doorbell:
+ fail_virtio_qstart:
++	efx_virtio_qdestroy(vq);
+ fail_vring_info:
+ 	return rc;
+ }
+@@ -611,7 +625,7 @@ sfc_vdpa_dev_config(int vid)
+ 
+ 	ops_data = sfc_vdpa_get_data_by_dev(vdpa_dev);
+ 	if (ops_data == NULL) {
+-		sfc_vdpa_err(ops_data->dev_handle,
++		SFC_VDPA_GENERIC_LOG(ERR,
+ 			     "invalid vDPA device : %p, vid : %d",
+ 			     vdpa_dev, vid);
+ 		return -1;
+@@ -666,7 +680,7 @@ sfc_vdpa_dev_close(int vid)
+ 
+ 	ops_data = sfc_vdpa_get_data_by_dev(vdpa_dev);
+ 	if (ops_data == NULL) {
+-		sfc_vdpa_err(ops_data->dev_handle,
++		SFC_VDPA_GENERIC_LOG(ERR,
+ 			     "invalid vDPA device : %p, vid : %d",
+ 			     vdpa_dev, vid);
+ 		return -1;
+@@ -792,8 +806,8 @@ sfc_vdpa_get_notify_area(int vid, int qid, uint64_t *offset, uint64_t *size)
+ 	int ret;
+ 	efx_nic_t *nic;
+ 	int vfio_dev_fd;
+-	efx_rc_t rc;
+-	unsigned int bar_offset;
++	volatile void *doorbell;
++	struct rte_pci_device *pci_dev;
+ 	struct rte_vdpa_device *vdpa_dev;
+ 	struct sfc_vdpa_ops_data *ops_data;
+ 	struct vfio_region_info reg = { .argsz = sizeof(reg) };
+@@ -822,19 +836,6 @@ sfc_vdpa_get_notify_area(int vid, int qid, uint64_t *offset, uint64_t *size)
+ 		return -1;
+ 	}
+ 
+-	if (ops_data->vq_cxt[qid].enable != B_TRUE) {
+-		sfc_vdpa_err(dev, "vq is not enabled");
+-		return -1;
+-	}
+-
+-	rc = efx_virtio_get_doorbell_offset(ops_data->vq_cxt[qid].vq,
+-					    &bar_offset);
+-	if (rc != 0) {
+-		sfc_vdpa_err(dev, "failed to get doorbell offset: %s",
+-			     rte_strerror(rc));
+-		return rc;
+-	}
+-
+ 	reg.index = sfc_vdpa_adapter_by_dev_handle(dev)->mem_bar.esb_rid;
+ 	ret = ioctl(vfio_dev_fd, VFIO_DEVICE_GET_REGION_INFO, &reg);
+ 	if (ret != 0) {
+@@ -843,7 +844,8 @@ sfc_vdpa_get_notify_area(int vid, int qid, uint64_t *offset, uint64_t *size)
+ 		return ret;
+ 	}
+ 
+-	*offset = reg.offset + bar_offset;
++	/* Use bar_offset that was cached during sfc_vdpa_virtq_start() */
++	*offset = reg.offset + (uint64_t)ops_data->vq_cxt[qid].doorbell;
+ 
+ 	len = (1U << encp->enc_vi_window_shift) / 2;
+ 	if (len >= sysconf(_SC_PAGESIZE)) {
+@@ -856,6 +858,18 @@ sfc_vdpa_get_notify_area(int vid, int qid, uint64_t *offset, uint64_t *size)
+ 	sfc_vdpa_info(dev, "vDPA ops get_notify_area :: offset : 0x%" PRIx64,
+ 		      *offset);
+ 
++	pci_dev = sfc_vdpa_adapter_by_dev_handle(dev)->pdev;
++	doorbell = (uint8_t *)pci_dev->mem_resource[reg.index].addr + *offset;
++
++	/*
++	 * virtio-net driver in VM sends queue notifications before
++	 * vDPA has a chance to setup the queues and notification area,
++	 * and hence the HW misses these doorbell notifications.
++	 * Since, it is safe to send duplicate doorbell, send another
++	 * doorbell from vDPA driver as workaround for this timing issue.
++	 */
++	rte_write16(qid, doorbell);
++
+ 	return 0;
+ }
+ 
+diff --git a/dpdk/drivers/vdpa/sfc/sfc_vdpa_ops.h b/dpdk/drivers/vdpa/sfc/sfc_vdpa_ops.h
+index 6d790fd8be..9dbd5b84dd 100644
+--- a/dpdk/drivers/vdpa/sfc/sfc_vdpa_ops.h
++++ b/dpdk/drivers/vdpa/sfc/sfc_vdpa_ops.h
+@@ -35,6 +35,7 @@ struct sfc_vdpa_vring_info {
+ };
+ 
+ typedef struct sfc_vdpa_vq_context_s {
++	volatile void			*doorbell;
+ 	uint8_t				enable;
+ 	uint32_t			pidx;
+ 	uint32_t			cidx;
+diff --git a/dpdk/examples/bbdev_app/main.c b/dpdk/examples/bbdev_app/main.c
+index ecafc5e4f1..fc7e8b8174 100644
+--- a/dpdk/examples/bbdev_app/main.c
++++ b/dpdk/examples/bbdev_app/main.c
+@@ -372,7 +372,7 @@ add_awgn(struct rte_mbuf **mbufs, uint16_t num_pkts)
+ /* Encoder output to Decoder input adapter. The Decoder accepts only soft input
+  * so each bit of the encoder output must be translated into one byte of LLR. If
+  * Sub-block Deinterleaver is bypassed, which is the case, the padding bytes
+- * must additionally be insterted at the end of each sub-block.
++ * must additionally be inserted at the end of each sub-block.
+  */
+ static inline void
+ transform_enc_out_dec_in(struct rte_mbuf **mbufs, uint8_t *temp_buf,
+diff --git a/dpdk/examples/bond/main.c b/dpdk/examples/bond/main.c
+index 1087b0dad1..4efebb3902 100644
+--- a/dpdk/examples/bond/main.c
++++ b/dpdk/examples/bond/main.c
+@@ -230,7 +230,7 @@ bond_port_init(struct rte_mempool *mbuf_pool)
+ 			0 /*SOCKET_ID_ANY*/);
+ 	if (retval < 0)
+ 		rte_exit(EXIT_FAILURE,
+-				"Faled to create bond port\n");
++				"Failed to create bond port\n");
+ 
+ 	BOND_PORT = retval;
+ 
+@@ -373,7 +373,7 @@ static int lcore_main(__rte_unused void *arg1)
+ 	bond_ip = BOND_IP_1 | (BOND_IP_2 << 8) |
+ 				(BOND_IP_3 << 16) | (BOND_IP_4 << 24);
+ 
+-	rte_spinlock_trylock(&global_flag_stru_p->lock);
++	rte_spinlock_lock(&global_flag_stru_p->lock);
+ 
+ 	while (global_flag_stru_p->LcoreMainIsRunning) {
+ 		rte_spinlock_unlock(&global_flag_stru_p->lock);
+@@ -405,7 +405,7 @@ static int lcore_main(__rte_unused void *arg1)
+ 						struct rte_ether_hdr *);
+ 			ether_type = eth_hdr->ether_type;
+ 			if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN))
+-				printf("VLAN taged frame, offset:");
++				printf("VLAN tagged frame, offset:");
+ 			offset = get_vlan_offset(eth_hdr, &ether_type);
+ 			if (offset > 0)
+ 				printf("%d\n", offset);
+@@ -456,7 +456,7 @@ static int lcore_main(__rte_unused void *arg1)
+ 			if (is_free == 0)
+ 				rte_pktmbuf_free(pkts[i]);
+ 		}
+-		rte_spinlock_trylock(&global_flag_stru_p->lock);
++		rte_spinlock_lock(&global_flag_stru_p->lock);
+ 	}
+ 	rte_spinlock_unlock(&global_flag_stru_p->lock);
+ 	printf("BYE lcore_main\n");
+@@ -571,7 +571,7 @@ static void cmd_start_parsed(__rte_unused void *parsed_result,
+ {
+ 	int worker_core_id = rte_lcore_id();
+ 
+-	rte_spinlock_trylock(&global_flag_stru_p->lock);
++	rte_spinlock_lock(&global_flag_stru_p->lock);
+ 	if (global_flag_stru_p->LcoreMainIsRunning == 0) {
+ 		if (rte_eal_get_lcore_state(global_flag_stru_p->LcoreMainCore)
+ 		    != WAIT) {
+@@ -591,7 +591,7 @@ static void cmd_start_parsed(__rte_unused void *parsed_result,
+ 	if ((worker_core_id >= RTE_MAX_LCORE) || (worker_core_id == 0))
+ 		return;
+ 
+-	rte_spinlock_trylock(&global_flag_stru_p->lock);
++	rte_spinlock_lock(&global_flag_stru_p->lock);
+ 	global_flag_stru_p->LcoreMainIsRunning = 1;
+ 	rte_spinlock_unlock(&global_flag_stru_p->lock);
+ 	cmdline_printf(cl,
+@@ -659,7 +659,7 @@ static void cmd_stop_parsed(__rte_unused void *parsed_result,
+ 			    struct cmdline *cl,
+ 			    __rte_unused void *data)
+ {
+-	rte_spinlock_trylock(&global_flag_stru_p->lock);
++	rte_spinlock_lock(&global_flag_stru_p->lock);
+ 	if (global_flag_stru_p->LcoreMainIsRunning == 0)	{
+ 		cmdline_printf(cl,
+ 					"lcore_main not running on core:%d\n",
+@@ -700,7 +700,7 @@ static void cmd_quit_parsed(__rte_unused void *parsed_result,
+ 			    struct cmdline *cl,
+ 			    __rte_unused void *data)
+ {
+-	rte_spinlock_trylock(&global_flag_stru_p->lock);
++	rte_spinlock_lock(&global_flag_stru_p->lock);
+ 	if (global_flag_stru_p->LcoreMainIsRunning == 0)	{
+ 		cmdline_printf(cl,
+ 					"lcore_main not running on core:%d\n",
+@@ -762,7 +762,7 @@ static void cmd_show_parsed(__rte_unused void *parsed_result,
+ 		printf("\n");
+ 	}
+ 
+-	rte_spinlock_trylock(&global_flag_stru_p->lock);
++	rte_spinlock_lock(&global_flag_stru_p->lock);
+ 	cmdline_printf(cl,
+ 			"Active_slaves:%d "
+ 			"packets received:Tot:%d Arp:%d IPv4:%d\n",
+diff --git a/dpdk/examples/distributor/main.c b/dpdk/examples/distributor/main.c
+index c681e237ea..8995806b4e 100644
+--- a/dpdk/examples/distributor/main.c
++++ b/dpdk/examples/distributor/main.c
+@@ -108,7 +108,7 @@ static inline int
+ port_init(uint16_t port, struct rte_mempool *mbuf_pool)
+ {
+ 	struct rte_eth_conf port_conf = port_conf_default;
+-	const uint16_t rxRings = 1, txRings = rte_lcore_count() - 1;
++	const uint16_t rxRings = 1, txRings = 1;
+ 	int retval;
+ 	uint16_t q;
+ 	uint16_t nb_rxd = RX_RING_SIZE;
+@@ -261,8 +261,8 @@ lcore_rx(struct lcore_params *p)
+  * packets are then send straight to the tx core.
+  */
+ #if 0
+-	rte_distributor_process(d, bufs, nb_rx);
+-	const uint16_t nb_ret = rte_distributor_returned_pktsd,
++		rte_distributor_process(p->d, bufs, nb_rx);
++		const uint16_t nb_ret = rte_distributor_returned_pkts(p->d,
+ 			bufs, BURST_SIZE*2);
+ 
+ 		app_stats.rx.returned_pkts += nb_ret;
+diff --git a/dpdk/examples/dma/dmafwd.c b/dpdk/examples/dma/dmafwd.c
+index d074acc905..dd576bcf77 100644
+--- a/dpdk/examples/dma/dmafwd.c
++++ b/dpdk/examples/dma/dmafwd.c
+@@ -87,7 +87,7 @@ static uint16_t nb_queues = 1;
+ /* MAC updating enabled by default. */
+ static int mac_updating = 1;
+ 
+-/* hardare copy mode enabled by default. */
++/* hardware copy mode enabled by default. */
+ static copy_mode_t copy_mode = COPY_MODE_DMA_NUM;
+ 
+ /* size of descriptor ring for hardware copy mode or
+@@ -117,12 +117,11 @@ static uint16_t nb_txd = TX_DEFAULT_RINGSIZE;
+ static volatile bool force_quit;
+ 
+ static uint32_t dma_batch_sz = MAX_PKT_BURST;
+-static uint32_t max_frame_size = RTE_ETHER_MAX_LEN;
++static uint32_t max_frame_size;
+ 
+ /* ethernet addresses of ports */
+ static struct rte_ether_addr dma_ports_eth_addr[RTE_MAX_ETHPORTS];
+ 
+-static struct rte_eth_dev_tx_buffer *tx_buffer[RTE_MAX_ETHPORTS];
+ struct rte_mempool *dma_pktmbuf_pool;
+ 
+ /* Print out statistics for one port. */
+@@ -484,10 +483,13 @@ dma_tx_port(struct rxtx_port_config *tx_config)
+ 
+ 		port_statistics.tx[tx_config->rxtx_port] += nb_tx;
+ 
+-		/* Free any unsent packets. */
+-		if (unlikely(nb_tx < nb_dq))
++		if (unlikely(nb_tx < nb_dq)) {
++			port_statistics.tx_dropped[tx_config->rxtx_port] +=
++				(nb_dq - nb_tx);
++			/* Free any unsent packets. */
+ 			rte_mempool_put_bulk(dma_pktmbuf_pool,
+ 			(void *)&mbufs[nb_tx], nb_dq - nb_tx);
++		}
+ 	}
+ }
+ /* >8 End of transmitting packets from dmadev. */
+@@ -851,6 +853,38 @@ assign_rings(void)
+ }
+ /* >8 End of assigning ring structures for packet exchanging. */
+ 
++static uint32_t
++eth_dev_get_overhead_len(uint32_t max_rx_pktlen, uint16_t max_mtu)
++{
++	uint32_t overhead_len;
++
++	if (max_mtu != UINT16_MAX && max_rx_pktlen > max_mtu)
++		overhead_len = max_rx_pktlen - max_mtu;
++	else
++		overhead_len = RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
++
++	return overhead_len;
++}
++
++static int
++config_port_max_pkt_len(struct rte_eth_conf *conf,
++		struct rte_eth_dev_info *dev_info)
++{
++	uint32_t overhead_len;
++
++	if (max_frame_size == 0)
++		return 0;
++
++	if (max_frame_size < RTE_ETHER_MIN_LEN)
++		return -1;
++
++	overhead_len = eth_dev_get_overhead_len(dev_info->max_rx_pktlen,
++			dev_info->max_mtu);
++	conf->rxmode.mtu = max_frame_size - overhead_len;
++
++	return 0;
++}
++
+ /*
+  * Initializes a given port using global settings and with the RX buffers
+  * coming from the mbuf_pool passed as a parameter.
+@@ -878,9 +912,6 @@ port_init(uint16_t portid, struct rte_mempool *mbuf_pool, uint16_t nb_queues)
+ 	struct rte_eth_dev_info dev_info;
+ 	int ret, i;
+ 
+-	if (max_frame_size > local_port_conf.rxmode.mtu)
+-		local_port_conf.rxmode.mtu = max_frame_size;
+-
+ 	/* Skip ports that are not enabled */
+ 	if ((dma_enabled_port_mask & (1 << portid)) == 0) {
+ 		printf("Skipping disabled port %u\n", portid);
+@@ -895,6 +926,12 @@ port_init(uint16_t portid, struct rte_mempool *mbuf_pool, uint16_t nb_queues)
+ 		rte_exit(EXIT_FAILURE, "Cannot get device info: %s, port=%u\n",
+ 			rte_strerror(-ret), portid);
+ 
++	ret = config_port_max_pkt_len(&local_port_conf, &dev_info);
++	if (ret != 0)
++		rte_exit(EXIT_FAILURE,
++			"Invalid max frame size: %u (port %u)\n",
++			max_frame_size, portid);
++
+ 	local_port_conf.rx_adv_conf.rss_conf.rss_hf &=
+ 		dev_info.flow_type_rss_offloads;
+ 	ret = rte_eth_dev_configure(portid, nb_queues, 1, &local_port_conf);
+@@ -935,25 +972,6 @@ port_init(uint16_t portid, struct rte_mempool *mbuf_pool, uint16_t nb_queues)
+ 			"rte_eth_tx_queue_setup:err=%d,port=%u\n",
+ 			ret, portid);
+ 
+-	/* Initialize TX buffers */
+-	tx_buffer[portid] = rte_zmalloc_socket("tx_buffer",
+-			RTE_ETH_TX_BUFFER_SIZE(MAX_PKT_BURST), 0,
+-			rte_eth_dev_socket_id(portid));
+-	if (tx_buffer[portid] == NULL)
+-		rte_exit(EXIT_FAILURE,
+-			"Cannot allocate buffer for tx on port %u\n",
+-			portid);
+-
+-	rte_eth_tx_buffer_init(tx_buffer[portid], MAX_PKT_BURST);
+-
+-	ret = rte_eth_tx_buffer_set_err_callback(tx_buffer[portid],
+-		rte_eth_tx_buffer_count_callback,
+-		&port_statistics.tx_dropped[portid]);
+-	if (ret < 0)
+-		rte_exit(EXIT_FAILURE,
+-			"Cannot set error callback for tx buffer on port %u\n",
+-			portid);
+-
+ 	/* Start device. 8< */
+ 	ret = rte_eth_dev_start(portid);
+ 	if (ret < 0)
+diff --git a/dpdk/examples/ethtool/lib/rte_ethtool.c b/dpdk/examples/ethtool/lib/rte_ethtool.c
+index 86286d38a6..ffaad96498 100644
+--- a/dpdk/examples/ethtool/lib/rte_ethtool.c
++++ b/dpdk/examples/ethtool/lib/rte_ethtool.c
+@@ -402,7 +402,7 @@ rte_ethtool_net_set_rx_mode(uint16_t port_id)
+ #endif
+ 	}
+ 
+-	/* Enable Rx vlan filter, VF unspport status is discard */
++	/* Enable Rx vlan filter, VF unsupported status is discard */
+ 	ret = rte_eth_dev_set_vlan_offload(port_id, RTE_ETH_VLAN_FILTER_MASK);
+ 	if (ret != 0)
+ 		return ret;
+diff --git a/dpdk/examples/ethtool/lib/rte_ethtool.h b/dpdk/examples/ethtool/lib/rte_ethtool.h
+index f177096636..d27e0102b1 100644
+--- a/dpdk/examples/ethtool/lib/rte_ethtool.h
++++ b/dpdk/examples/ethtool/lib/rte_ethtool.h
+@@ -189,7 +189,7 @@ int rte_ethtool_get_module_eeprom(uint16_t port_id,
+ 
+ /**
+  * Retrieve the Ethernet device pause frame configuration according to
+- * parameter attributes desribed by ethtool data structure,
++ * parameter attributes described by ethtool data structure,
+  * ethtool_pauseparam.
+  *
+  * @param port_id
+@@ -209,7 +209,7 @@ int rte_ethtool_get_pauseparam(uint16_t port_id,
+ 
+ /**
+  * Setting the Ethernet device pause frame configuration according to
+- * parameter attributes desribed by ethtool data structure, ethtool_pauseparam.
++ * parameter attributes described by ethtool data structure, ethtool_pauseparam.
+  *
+  * @param port_id
+  *   The port identifier of the Ethernet device.
+diff --git a/dpdk/examples/fips_validation/fips_validation.c b/dpdk/examples/fips_validation/fips_validation.c
+index 52a7bf952d..511c176a2a 100644
+--- a/dpdk/examples/fips_validation/fips_validation.c
++++ b/dpdk/examples/fips_validation/fips_validation.c
+@@ -522,7 +522,7 @@ parse_uint8_hex_str(const char *key, char *src, struct fips_val *val)
+ 		val->val = NULL;
+ 	}
+ 
+-	val->val = rte_zmalloc(NULL, len, 0);
++	val->val = rte_zmalloc(NULL, len + 1, 0);
+ 	if (!val->val)
+ 		return -ENOMEM;
+ 
+diff --git a/dpdk/examples/flow_classify/flow_classify.c b/dpdk/examples/flow_classify/flow_classify.c
+index 6185b34060..97708b7084 100644
+--- a/dpdk/examples/flow_classify/flow_classify.c
++++ b/dpdk/examples/flow_classify/flow_classify.c
+@@ -430,7 +430,7 @@ parse_ipv4_5tuple_rule(char *str, struct rte_eth_ntuple_filter *ntuple_filter)
+ 			&ntuple_filter->dst_ip,
+ 			&ntuple_filter->dst_ip_mask);
+ 	if (ret != 0) {
+-		flow_classify_log("failed to read source address/mask: %s\n",
++		flow_classify_log("failed to read destination address/mask: %s\n",
+ 			in[CB_FLD_DST_ADDR]);
+ 		return ret;
+ 	}
+diff --git a/dpdk/examples/ip_reassembly/main.c b/dpdk/examples/ip_reassembly/main.c
+index fb3cac3bd0..6e4c11c3c7 100644
+--- a/dpdk/examples/ip_reassembly/main.c
++++ b/dpdk/examples/ip_reassembly/main.c
+@@ -244,7 +244,7 @@ static struct rte_lpm6 *socket_lpm6[RTE_MAX_NUMA_NODES];
+ #endif /* RTE_LIBRTE_IP_FRAG_TBL_STAT */
+ 
+ /*
+- * If number of queued packets reached given threahold, then
++ * If number of queued packets reached given threshold, then
+  * send burst of packets on an output interface.
+  */
+ static inline uint32_t
+@@ -873,11 +873,11 @@ setup_queue_tbl(struct rx_queue *rxq, uint32_t lcore, uint32_t queue)
+ 
+ 	/*
+ 	 * At any given moment up to <max_flow_num * (MAX_FRAG_NUM)>
+-	 * mbufs could be stored int the fragment table.
++	 * mbufs could be stored in the fragment table.
+ 	 * Plus, each TX queue can hold up to <max_flow_num> packets.
+ 	 */
+ 
+-	/* mbufs stored int the gragment table. 8< */
++	/* mbufs stored in the fragment table. 8< */
+ 	nb_mbuf = RTE_MAX(max_flow_num, 2UL * MAX_PKT_BURST) * MAX_FRAG_NUM;
+ 	nb_mbuf *= (port_conf.rxmode.mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN
+ 			+ BUF_SIZE - 1) / BUF_SIZE;
+@@ -895,7 +895,7 @@ setup_queue_tbl(struct rx_queue *rxq, uint32_t lcore, uint32_t queue)
+ 			"rte_pktmbuf_pool_create(%s) failed", buf);
+ 		return -1;
+ 	}
+-	/* >8 End of mbufs stored int the fragmentation table. */
++	/* >8 End of mbufs stored in the fragmentation table. */
+ 
+ 	return 0;
+ }
+diff --git a/dpdk/examples/ipsec-secgw/event_helper.c b/dpdk/examples/ipsec-secgw/event_helper.c
+index e8600f5e90..8947e41803 100644
+--- a/dpdk/examples/ipsec-secgw/event_helper.c
++++ b/dpdk/examples/ipsec-secgw/event_helper.c
+@@ -716,6 +716,16 @@ eh_initialize_eventdev(struct eventmode_conf *em_conf)
+ 		}
+ 	}
+ 
++	return 0;
++}
++
++static int
++eh_start_eventdev(struct eventmode_conf *em_conf)
++{
++	struct eventdev_params *eventdev_config;
++	int nb_eventdev = em_conf->nb_eventdev;
++	int i, ret;
++
+ 	/* Start event devices */
+ 	for (i = 0; i < nb_eventdev; i++) {
+ 
+@@ -1353,7 +1363,7 @@ eh_display_rx_adapter_conf(struct eventmode_conf *em_conf)
+ 	for (i = 0; i < nb_rx_adapter; i++) {
+ 		adapter = &(em_conf->rx_adapter[i]);
+ 		sprintf(print_buf,
+-			"\tRx adaper ID: %-2d\tConnections: %-2d\tEvent dev ID: %-2d",
++			"\tRx adapter ID: %-2d\tConnections: %-2d\tEvent dev ID: %-2d",
+ 			adapter->adapter_id,
+ 			adapter->nb_connections,
+ 			adapter->eventdev_id);
+@@ -1688,6 +1698,13 @@ eh_devs_init(struct eh_conf *conf)
+ 		return ret;
+ 	}
+ 
++	/* Start eventdev */
++	ret = eh_start_eventdev(em_conf);
++	if (ret < 0) {
++		EH_LOG_ERR("Failed to start event dev %d", ret);
++		return ret;
++	}
++
+ 	/* Start eth devices after setting up adapter */
+ 	RTE_ETH_FOREACH_DEV(port_id) {
+ 
+diff --git a/dpdk/examples/ipsec-secgw/flow.c b/dpdk/examples/ipsec-secgw/flow.c
+index 1a1ec7861c..c217b9e475 100644
+--- a/dpdk/examples/ipsec-secgw/flow.c
++++ b/dpdk/examples/ipsec-secgw/flow.c
+@@ -214,7 +214,7 @@ flow_init_single(struct flow_rule_entry *rule)
+ 	struct rte_flow_item pattern[MAX_RTE_FLOW_PATTERN] = {};
+ 	struct rte_flow_action action[MAX_RTE_FLOW_ACTIONS] = {};
+ 	struct rte_flow_attr attr = {};
+-	struct rte_flow_error err;
++	struct rte_flow_error err = {};
+ 	int ret;
+ 
+ 	attr.egress = 0;
+diff --git a/dpdk/examples/ipsec-secgw/ipsec-secgw.c b/dpdk/examples/ipsec-secgw/ipsec-secgw.c
+index bf3dbf6b5c..a1faff6a59 100644
+--- a/dpdk/examples/ipsec-secgw/ipsec-secgw.c
++++ b/dpdk/examples/ipsec-secgw/ipsec-secgw.c
+@@ -169,7 +169,7 @@ uint32_t single_sa_idx;
+ /* mask of enabled ports */
+ static uint32_t enabled_port_mask;
+ static uint64_t enabled_cryptodev_mask = UINT64_MAX;
+-static int32_t promiscuous_on = 1;
++static int32_t promiscuous_on;
+ static int32_t numa_on = 1; /**< NUMA is enabled by default. */
+ static uint32_t nb_lcores;
+ static uint32_t single_sa;
+@@ -265,7 +265,7 @@ struct socket_ctx socket_ctx[NB_SOCKETS];
+ /*
+  * Determine is multi-segment support required:
+  *  - either frame buffer size is smaller then mtu
+- *  - or reassmeble support is requested
++ *  - or reassemble support is requested
+  */
+ static int
+ multi_seg_required(void)
+@@ -2050,7 +2050,7 @@ add_mapping(struct rte_hash *map, const char *str, uint16_t cdev_id,
+ 
+ 	ret = rte_hash_add_key_data(map, &key, (void *)i);
+ 	if (ret < 0) {
+-		printf("Faled to insert cdev mapping for (lcore %u, "
++		printf("Failed to insert cdev mapping for (lcore %u, "
+ 				"cdev %u, qp %u), errno %d\n",
+ 				key.lcore_id, ipsec_ctx->tbl[i].id,
+ 				ipsec_ctx->tbl[i].qp, ret);
+@@ -2083,7 +2083,7 @@ add_cdev_mapping(struct rte_cryptodev_info *dev_info, uint16_t cdev_id,
+ 		str = "Inbound";
+ 	}
+ 
+-	/* Required cryptodevs with operation chainning */
++	/* Required cryptodevs with operation chaining */
+ 	if (!(dev_info->feature_flags &
+ 				RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING))
+ 		return ret;
+@@ -2251,7 +2251,7 @@ port_init(uint16_t portid, uint64_t req_rx_offloads, uint64_t req_tx_offloads)
+ 			"Error during getting device (port %u) info: %s\n",
+ 			portid, strerror(-ret));
+ 
+-	/* limit allowed HW offloafs, as user requested */
++	/* limit allowed HW offloads, as user requested */
+ 	dev_info.rx_offload_capa &= dev_rx_offload;
+ 	dev_info.tx_offload_capa &= dev_tx_offload;
+ 
+@@ -2298,7 +2298,7 @@ port_init(uint16_t portid, uint64_t req_rx_offloads, uint64_t req_tx_offloads)
+ 			local_port_conf.rxmode.offloads)
+ 		rte_exit(EXIT_FAILURE,
+ 			"Error: port %u required RX offloads: 0x%" PRIx64
+-			", avaialbe RX offloads: 0x%" PRIx64 "\n",
++			", available RX offloads: 0x%" PRIx64 "\n",
+ 			portid, local_port_conf.rxmode.offloads,
+ 			dev_info.rx_offload_capa);
+ 
+@@ -2306,7 +2306,7 @@ port_init(uint16_t portid, uint64_t req_rx_offloads, uint64_t req_tx_offloads)
+ 			local_port_conf.txmode.offloads)
+ 		rte_exit(EXIT_FAILURE,
+ 			"Error: port %u required TX offloads: 0x%" PRIx64
+-			", avaialbe TX offloads: 0x%" PRIx64 "\n",
++			", available TX offloads: 0x%" PRIx64 "\n",
+ 			portid, local_port_conf.txmode.offloads,
+ 			dev_info.tx_offload_capa);
+ 
+@@ -2317,7 +2317,7 @@ port_init(uint16_t portid, uint64_t req_rx_offloads, uint64_t req_tx_offloads)
+ 	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM)
+ 		local_port_conf.txmode.offloads |= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM;
+ 
+-	printf("port %u configurng rx_offloads=0x%" PRIx64
++	printf("port %u configuring rx_offloads=0x%" PRIx64
+ 		", tx_offloads=0x%" PRIx64 "\n",
+ 		portid, local_port_conf.rxmode.offloads,
+ 		local_port_conf.txmode.offloads);
+@@ -3379,13 +3379,14 @@ main(int32_t argc, char **argv)
+ 		if ((enabled_port_mask & (1 << portid)) == 0)
+ 			continue;
+ 
+-		/* Create flow before starting the device */
+-		create_default_ipsec_flow(portid, req_rx_offloads[portid]);
+-
+ 		ret = rte_eth_dev_start(portid);
+ 		if (ret < 0)
+ 			rte_exit(EXIT_FAILURE, "rte_eth_dev_start: "
+ 					"err=%d, port=%d\n", ret, portid);
++
++		/* Create flow after starting the device */
++		create_default_ipsec_flow(portid, req_rx_offloads[portid]);
++
+ 		/*
+ 		 * If enabled, put device in promiscuous mode.
+ 		 * This allows IO forwarding mode to forward packets
+diff --git a/dpdk/examples/ipsec-secgw/ipsec.c b/dpdk/examples/ipsec-secgw/ipsec.c
+index 2d4a26c962..b66ff2b650 100644
+--- a/dpdk/examples/ipsec-secgw/ipsec.c
++++ b/dpdk/examples/ipsec-secgw/ipsec.c
+@@ -496,7 +496,7 @@ int
+ create_ipsec_esp_flow(struct ipsec_sa *sa)
+ {
+ 	int ret = 0;
+-	struct rte_flow_error err;
++	struct rte_flow_error err = {};
+ 	if (sa->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
+ 		RTE_LOG(ERR, IPSEC,
+ 			"No Flow director rule for Egress traffic\n");
+diff --git a/dpdk/examples/ipsec-secgw/ipsec_process.c b/dpdk/examples/ipsec-secgw/ipsec_process.c
+index 3fc4b3a84f..285e9c7f4c 100644
+--- a/dpdk/examples/ipsec-secgw/ipsec_process.c
++++ b/dpdk/examples/ipsec-secgw/ipsec_process.c
+@@ -232,8 +232,10 @@ prep_process_group(void *sa, struct rte_mbuf *mb[], uint32_t cnt)
+ 					mb[j]->outer_l3_len = mb[j]->l3_len;
+ 					mb[j]->outer_l2_len = mb[j]->l2_len;
+ 					mb[j]->ol_flags |=
+-					(RTE_MBUF_F_TX_OUTER_IP_CKSUM |
+-						RTE_MBUF_F_TX_TUNNEL_ESP);
++						RTE_MBUF_F_TX_TUNNEL_ESP;
++					if (RTE_ETH_IS_IPV4_HDR(ptype))
++						mb[j]->ol_flags |=
++						RTE_MBUF_F_TX_OUTER_IP_CKSUM;
+ 				}
+ 				mb[j]->l4_len = sizeof(struct rte_tcp_hdr);
+ 				mb[j]->ol_flags |= (RTE_MBUF_F_TX_TCP_SEG |
+diff --git a/dpdk/examples/ipsec-secgw/ipsec_worker.c b/dpdk/examples/ipsec-secgw/ipsec_worker.c
+index 7419e85db2..5fe91b62e4 100644
+--- a/dpdk/examples/ipsec-secgw/ipsec_worker.c
++++ b/dpdk/examples/ipsec-secgw/ipsec_worker.c
+@@ -205,12 +205,16 @@ check_sp_sa_bulk(struct sp_ctx *sp, struct sa_ctx *sa_ctx,
+ 			ip->pkts[j++] = m;
+ 		else {
+ 			sa = *(struct ipsec_sa **)rte_security_dynfield(m);
+-			if (sa == NULL)
++			if (sa == NULL) {
+ 				free_pkts(&m, 1);
++				continue;
++			}
+ 
+ 			/* SPI on the packet should match with the one in SA */
+-			if (unlikely(sa->spi != sa_ctx->sa[res - 1].spi))
++			if (unlikely(sa->spi != sa_ctx->sa[res - 1].spi)) {
+ 				free_pkts(&m, 1);
++				continue;
++			}
+ 
+ 			ip->pkts[j++] = m;
+ 		}
+@@ -535,6 +539,7 @@ ipsec_ev_route_pkts(struct rte_event_vector *vec, struct route_table *rt,
+ 				RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)) {
+ 				RTE_LOG(ERR, IPSEC, "SA type not supported\n");
+ 				free_pkts(&pkt, 1);
++				continue;
+ 			}
+ 			rte_security_set_pkt_metadata(sess->security.ctx,
+ 						sess->security.ses, pkt, NULL);
+@@ -693,11 +698,13 @@ ipsec_ev_vector_process(struct lcore_conf_ev_tx_int_port_wrkr *lconf,
+ 		ret = process_ipsec_ev_outbound_vector(&lconf->outbound,
+ 						       &lconf->rt, vec);
+ 
+-	if (ret > 0) {
++	if (likely(ret > 0)) {
+ 		vec->nb_elem = ret;
+ 		rte_event_eth_tx_adapter_enqueue(links[0].eventdev_id,
+ 						 links[0].event_port_id,
+ 						 ev, 1, 0);
++	} else {
++		rte_mempool_put(rte_mempool_from_obj(vec), vec);
+ 	}
+ }
+ 
+@@ -718,6 +725,8 @@ ipsec_ev_vector_drv_mode_process(struct eh_event_link_info *links,
+ 		rte_event_eth_tx_adapter_enqueue(links[0].eventdev_id,
+ 						 links[0].event_port_id,
+ 						 ev, 1, 0);
++	else
++		rte_mempool_put(rte_mempool_from_obj(vec), vec);
+ }
+ 
+ /*
+diff --git a/dpdk/examples/ipsec-secgw/sa.c b/dpdk/examples/ipsec-secgw/sa.c
+index 30bc693e06..89131f71e5 100644
+--- a/dpdk/examples/ipsec-secgw/sa.c
++++ b/dpdk/examples/ipsec-secgw/sa.c
+@@ -897,7 +897,7 @@ parse_sa_tokens(char **tokens, uint32_t n_tokens,
+ 			continue;
+ 		}
+ 
+-		/* unrecognizeable input */
++		/* unrecognizable input */
+ 		APP_CHECK(0, status, "unrecognized input \"%s\"",
+ 			tokens[ti]);
+ 		return;
+@@ -1145,7 +1145,7 @@ get_spi_proto(uint32_t spi, enum rte_security_ipsec_sa_direction dir,
+ 	if (rc4 >= 0) {
+ 		if (rc6 >= 0) {
+ 			RTE_LOG(ERR, IPSEC,
+-				"%s: SPI %u used simultaeously by "
++				"%s: SPI %u used simultaneously by "
+ 				"IPv4(%d) and IPv6 (%d) SP rules\n",
+ 				__func__, spi, rc4, rc6);
+ 			return -EINVAL;
+@@ -1458,9 +1458,16 @@ fill_ipsec_sa_prm(struct rte_ipsec_sa_prm *prm, const struct ipsec_sa *ss,
+ 		RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT :
+ 		RTE_SECURITY_IPSEC_SA_MODE_TUNNEL;
+ 	prm->ipsec_xform.options.udp_encap = ss->udp_encap;
++	prm->ipsec_xform.udp.dport = ss->udp.dport;
++	prm->ipsec_xform.udp.sport = ss->udp.sport;
+ 	prm->ipsec_xform.options.ecn = 1;
+ 	prm->ipsec_xform.options.copy_dscp = 1;
+ 
++	if (ss->esn > 0) {
++		prm->ipsec_xform.options.esn = 1;
++		prm->ipsec_xform.esn.value = ss->esn;
++	}
++
+ 	if (IS_IP4_TUNNEL(ss->flags)) {
+ 		prm->ipsec_xform.tunnel.type = RTE_SECURITY_IPSEC_TUNNEL_IPV4;
+ 		prm->tun.hdr_len = sizeof(*v4);
+@@ -1513,13 +1520,13 @@ ipsec_sa_init(struct ipsec_sa *lsa, struct rte_ipsec_sa *sa, uint32_t sa_size)
+ 		.version_ihl = IPVERSION << 4 |
+ 			sizeof(v4) / RTE_IPV4_IHL_MULTIPLIER,
+ 		.time_to_live = IPDEFTTL,
+-		.next_proto_id = IPPROTO_ESP,
++		.next_proto_id = lsa->udp_encap ? IPPROTO_UDP : IPPROTO_ESP,
+ 		.src_addr = lsa->src.ip.ip4,
+ 		.dst_addr = lsa->dst.ip.ip4,
+ 	};
+ 	struct rte_ipv6_hdr v6 = {
+ 		.vtc_flow = htonl(IP6_VERSION << 28),
+-		.proto = IPPROTO_ESP,
++		.proto = lsa->udp_encap ? IPPROTO_UDP : IPPROTO_ESP,
+ 	};
+ 
+ 	if (IS_IP6_TUNNEL(lsa->flags)) {
+@@ -1550,7 +1557,7 @@ ipsec_sa_init(struct ipsec_sa *lsa, struct rte_ipsec_sa *sa, uint32_t sa_size)
+ }
+ 
+ /*
+- * Allocate space and init rte_ipsec_sa strcutures,
++ * Allocate space and init rte_ipsec_sa structures,
+  * one per session.
+  */
+ static int
+diff --git a/dpdk/examples/ipsec-secgw/sp4.c b/dpdk/examples/ipsec-secgw/sp4.c
+index beddd7bc1d..fc4101a4a2 100644
+--- a/dpdk/examples/ipsec-secgw/sp4.c
++++ b/dpdk/examples/ipsec-secgw/sp4.c
+@@ -410,7 +410,7 @@ parse_sp4_tokens(char **tokens, uint32_t n_tokens,
+ 			continue;
+ 		}
+ 
+-		/* unrecognizeable input */
++		/* unrecognizable input */
+ 		APP_CHECK(0, status, "unrecognized input \"%s\"",
+ 			tokens[ti]);
+ 		return;
+diff --git a/dpdk/examples/ipsec-secgw/sp6.c b/dpdk/examples/ipsec-secgw/sp6.c
+index 328e085288..cce4da7862 100644
+--- a/dpdk/examples/ipsec-secgw/sp6.c
++++ b/dpdk/examples/ipsec-secgw/sp6.c
+@@ -515,7 +515,7 @@ parse_sp6_tokens(char **tokens, uint32_t n_tokens,
+ 			continue;
+ 		}
+ 
+-		/* unrecognizeable input */
++		/* unrecognizable input */
+ 		APP_CHECK(0, status, "unrecognized input \"%s\"",
+ 			tokens[ti]);
+ 		return;
+diff --git a/dpdk/examples/ipsec-secgw/test/common_defs.sh b/dpdk/examples/ipsec-secgw/test/common_defs.sh
+index f22eb3ab12..3ef06bc761 100644
+--- a/dpdk/examples/ipsec-secgw/test/common_defs.sh
++++ b/dpdk/examples/ipsec-secgw/test/common_defs.sh
+@@ -20,7 +20,7 @@ REMOTE_MAC=`ssh ${REMOTE_HOST} ip addr show dev ${REMOTE_IFACE}`
+ st=$?
+ REMOTE_MAC=`echo ${REMOTE_MAC} | sed -e 's/^.*ether //' -e 's/ brd.*$//'`
+ if [[ $st -ne 0 || -z "${REMOTE_MAC}" ]]; then
+-	echo "coouldn't retrieve ether addr from ${REMOTE_IFACE}"
++	echo "couldn't retrieve ether addr from ${REMOTE_IFACE}"
+ 	exit 127
+ fi
+ 
+@@ -40,7 +40,7 @@ DPDK_VARS=""
+ 
+ # by default ipsec-secgw can't deal with multi-segment packets
+ # make sure our local/remote host wouldn't generate fragmented packets
+-# if reassmebly option is not enabled
++# if reassembly option is not enabled
+ DEF_MTU_LEN=1400
+ DEF_PING_LEN=1200
+ 
+diff --git a/dpdk/examples/kni/main.c b/dpdk/examples/kni/main.c
+index d324ee2241..e99ef5c38a 100644
+--- a/dpdk/examples/kni/main.c
++++ b/dpdk/examples/kni/main.c
+@@ -719,7 +719,7 @@ log_link_state(struct rte_kni *kni, int prev, struct rte_eth_link *link)
+ 
+ 	rte_eth_link_to_str(link_status_text, sizeof(link_status_text), link);
+ 	if (prev != link->link_status)
+-		RTE_LOG(INFO, APP, "%s NIC %s",
++		RTE_LOG(INFO, APP, "%s NIC %s\n",
+ 			rte_kni_get_name(kni),
+ 			link_status_text);
+ }
+@@ -1039,7 +1039,7 @@ main(int argc, char** argv)
+ 	pthread_t kni_link_tid;
+ 	int pid;
+ 
+-	/* Associate signal_hanlder function with USR signals */
++	/* Associate signal_handler function with USR signals */
+ 	signal(SIGUSR1, signal_handler);
+ 	signal(SIGUSR2, signal_handler);
+ 	signal(SIGRTMIN, signal_handler);
+diff --git a/dpdk/examples/l2fwd-cat/l2fwd-cat.c b/dpdk/examples/l2fwd-cat/l2fwd-cat.c
+index d9cf00c9df..6e16705e99 100644
+--- a/dpdk/examples/l2fwd-cat/l2fwd-cat.c
++++ b/dpdk/examples/l2fwd-cat/l2fwd-cat.c
+@@ -157,7 +157,7 @@ main(int argc, char *argv[])
+ 	int ret = rte_eal_init(argc, argv);
+ 	if (ret < 0)
+ 		rte_exit(EXIT_FAILURE, "Error with EAL initialization\n");
+-	/* >8 End of initializion the Environment Abstraction Layer (EAL). */
++	/* >8 End of initialization the Environment Abstraction Layer (EAL). */
+ 
+ 	argc -= ret;
+ 	argv += ret;
+diff --git a/dpdk/examples/l2fwd-crypto/main.c b/dpdk/examples/l2fwd-crypto/main.c
+index 4d9f8861af..b1e2613ccf 100644
+--- a/dpdk/examples/l2fwd-crypto/main.c
++++ b/dpdk/examples/l2fwd-crypto/main.c
+@@ -252,11 +252,9 @@ struct l2fwd_port_statistics port_statistics[RTE_MAX_ETHPORTS];
+ struct l2fwd_crypto_statistics crypto_statistics[RTE_CRYPTO_MAX_DEVS];
+ 
+ /* A tsc-based timer responsible for triggering statistics printout */
+-#define TIMER_MILLISECOND 2000000ULL /* around 1ms at 2 Ghz */
++#define TIMER_MILLISECOND (rte_get_tsc_hz() / 1000)
+ #define MAX_TIMER_PERIOD 86400UL /* 1 day max */
+-
+-/* default period is 10 seconds */
+-static int64_t timer_period = 10 * TIMER_MILLISECOND * 1000;
++#define DEFAULT_TIMER_PERIOD 10UL
+ 
+ /* Print out statistics on packets dropped */
+ static void
+@@ -894,18 +892,17 @@ l2fwd_main_loop(struct l2fwd_crypto_options *options)
+ 			}
+ 
+ 			/* if timer is enabled */
+-			if (timer_period > 0) {
++			if (options->refresh_period > 0) {
+ 
+ 				/* advance the timer */
+ 				timer_tsc += diff_tsc;
+ 
+ 				/* if timer has reached its timeout */
+ 				if (unlikely(timer_tsc >=
+-						(uint64_t)timer_period)) {
++						options->refresh_period)) {
+ 
+ 					/* do this only on main core */
+-					if (lcore_id == rte_get_main_lcore()
+-						&& options->refresh_period) {
++					if (lcore_id == rte_get_main_lcore()) {
+ 						print_stats();
+ 						timer_tsc = 0;
+ 					}
+@@ -1481,7 +1478,8 @@ l2fwd_crypto_default_options(struct l2fwd_crypto_options *options)
+ {
+ 	options->portmask = 0xffffffff;
+ 	options->nb_ports_per_lcore = 1;
+-	options->refresh_period = 10000;
++	options->refresh_period = DEFAULT_TIMER_PERIOD *
++					TIMER_MILLISECOND * 1000;
+ 	options->single_lcore = 0;
+ 	options->sessionless = 0;
+ 
+@@ -2719,7 +2717,7 @@ initialize_ports(struct l2fwd_crypto_options *options)
+ 			last_portid = portid;
+ 		}
+ 
+-		l2fwd_enabled_port_mask |= (1 << portid);
++		l2fwd_enabled_port_mask |= (1ULL << portid);
+ 		enabled_portcount++;
+ 	}
+ 
+diff --git a/dpdk/examples/l2fwd-event/l2fwd_event_generic.c b/dpdk/examples/l2fwd-event/l2fwd_event_generic.c
+index f31569a744..1977e23261 100644
+--- a/dpdk/examples/l2fwd-event/l2fwd_event_generic.c
++++ b/dpdk/examples/l2fwd-event/l2fwd_event_generic.c
+@@ -42,7 +42,7 @@ l2fwd_event_device_setup_generic(struct l2fwd_resources *rsrc)
+ 		ethdev_count++;
+ 	}
+ 
+-	/* Event device configurtion */
++	/* Event device configuration */
+ 	rte_event_dev_info_get(event_d_id, &dev_info);
+ 
+ 	/* Enable implicit release */
+diff --git a/dpdk/examples/l2fwd-event/l2fwd_event_internal_port.c b/dpdk/examples/l2fwd-event/l2fwd_event_internal_port.c
+index 86d772d817..717a7bceb8 100644
+--- a/dpdk/examples/l2fwd-event/l2fwd_event_internal_port.c
++++ b/dpdk/examples/l2fwd-event/l2fwd_event_internal_port.c
+@@ -40,7 +40,7 @@ l2fwd_event_device_setup_internal_port(struct l2fwd_resources *rsrc)
+ 		ethdev_count++;
+ 	}
+ 
+-	/* Event device configurtion */
++	/* Event device configuration */
+ 	rte_event_dev_info_get(event_d_id, &dev_info);
+ 
+ 	/* Enable implicit release */
+diff --git a/dpdk/examples/l2fwd-jobstats/main.c b/dpdk/examples/l2fwd-jobstats/main.c
+index d8eabe4c86..9e71ba2d4e 100644
+--- a/dpdk/examples/l2fwd-jobstats/main.c
++++ b/dpdk/examples/l2fwd-jobstats/main.c
+@@ -468,7 +468,7 @@ l2fwd_flush_job(__rte_unused struct rte_timer *timer, __rte_unused void *arg)
+ 		qconf->next_flush_time[portid] = rte_get_timer_cycles() + drain_tsc;
+ 	}
+ 
+-	/* Pass target to indicate that this job is happy of time interwal
++	/* Pass target to indicate that this job is happy of time interval
+ 	 * in which it was called. */
+ 	rte_jobstats_finish(&qconf->flush_job, qconf->flush_job.target);
+ }
+diff --git a/dpdk/examples/l3fwd-acl/main.c b/dpdk/examples/l3fwd-acl/main.c
+index 1fb1807235..2d2ecc7635 100644
+--- a/dpdk/examples/l3fwd-acl/main.c
++++ b/dpdk/examples/l3fwd-acl/main.c
+@@ -801,8 +801,8 @@ send_packets(struct rte_mbuf **m, uint32_t *res, int num)
+ }
+ 
+ /*
+- * Parses IPV6 address, exepcts the following format:
+- * XXXX:XXXX:XXXX:XXXX:XXXX:XXXX:XXXX:XXXX (where X - is a hexedecimal digit).
++ * Parse IPv6 address, expects the following format:
++ * XXXX:XXXX:XXXX:XXXX:XXXX:XXXX:XXXX:XXXX (where X is a hexadecimal digit).
+  */
+ static int
+ parse_ipv6_addr(const char *in, const char **end, uint32_t v[IPV6_ADDR_U32],
+@@ -1959,7 +1959,7 @@ check_all_ports_link_status(uint32_t port_mask)
+ }
+ 
+ /*
+- * build-up default vaues for dest MACs.
++ * build-up default values for dest MACs.
+  */
+ static void
+ set_default_dest_mac(void)
+diff --git a/dpdk/examples/l3fwd-power/main.c b/dpdk/examples/l3fwd-power/main.c
+index b8b3be2b8a..20e5b59af9 100644
+--- a/dpdk/examples/l3fwd-power/main.c
++++ b/dpdk/examples/l3fwd-power/main.c
+@@ -433,7 +433,7 @@ signal_exit_now(int sigtype)
+ 
+ }
+ 
+-/*  Freqency scale down timer callback */
++/*  Frequency scale down timer callback */
+ static void
+ power_timer_cb(__rte_unused struct rte_timer *tim,
+ 			  __rte_unused void *arg)
+@@ -2358,7 +2358,7 @@ update_telemetry(__rte_unused struct rte_timer *tim,
+ 	ret = rte_metrics_update_values(RTE_METRICS_GLOBAL, telstats_index,
+ 					values, RTE_DIM(values));
+ 	if (ret < 0)
+-		RTE_LOG(WARNING, POWER, "failed to update metrcis\n");
++		RTE_LOG(WARNING, POWER, "failed to update metrics\n");
+ }
+ 
+ static int
+diff --git a/dpdk/examples/l3fwd/l3fwd.h b/dpdk/examples/l3fwd/l3fwd.h
+index 38ca19133c..4ccdb28b4a 100644
+--- a/dpdk/examples/l3fwd/l3fwd.h
++++ b/dpdk/examples/l3fwd/l3fwd.h
+@@ -81,6 +81,10 @@ struct lcore_conf {
+ 
+ extern volatile bool force_quit;
+ 
++/* RX and TX queue depths */
++extern uint16_t nb_rxd;
++extern uint16_t nb_txd;
++
+ /* ethernet addresses of ports */
+ extern uint64_t dest_eth_addr[RTE_MAX_ETHPORTS];
+ extern struct rte_ether_addr ports_eth_addr[RTE_MAX_ETHPORTS];
+diff --git a/dpdk/examples/l3fwd/l3fwd_altivec.h b/dpdk/examples/l3fwd/l3fwd_altivec.h
+index ed9b0b8f25..88fb41843b 100644
+--- a/dpdk/examples/l3fwd/l3fwd_altivec.h
++++ b/dpdk/examples/l3fwd/l3fwd_altivec.h
+@@ -17,50 +17,50 @@
+ static inline void
+ processx4_step3(struct rte_mbuf *pkt[FWDSTEP], uint16_t dst_port[FWDSTEP])
+ {
+-	vector unsigned int te[FWDSTEP];
+-	vector unsigned int ve[FWDSTEP];
+-	vector unsigned int *p[FWDSTEP];
++	__vector unsigned int te[FWDSTEP];
++	__vector unsigned int ve[FWDSTEP];
++	__vector unsigned int *p[FWDSTEP];
+ 
+-	p[0] = rte_pktmbuf_mtod(pkt[0], vector unsigned int *);
+-	p[1] = rte_pktmbuf_mtod(pkt[1], vector unsigned int *);
+-	p[2] = rte_pktmbuf_mtod(pkt[2], vector unsigned int *);
+-	p[3] = rte_pktmbuf_mtod(pkt[3], vector unsigned int *);
++	p[0] = rte_pktmbuf_mtod(pkt[0], __vector unsigned int *);
++	p[1] = rte_pktmbuf_mtod(pkt[1], __vector unsigned int *);
++	p[2] = rte_pktmbuf_mtod(pkt[2], __vector unsigned int *);
++	p[3] = rte_pktmbuf_mtod(pkt[3], __vector unsigned int *);
+ 
+-	ve[0] = (vector unsigned int)val_eth[dst_port[0]];
++	ve[0] = (__vector unsigned int)val_eth[dst_port[0]];
+ 	te[0] = *p[0];
+ 
+-	ve[1] = (vector unsigned int)val_eth[dst_port[1]];
++	ve[1] = (__vector unsigned int)val_eth[dst_port[1]];
+ 	te[1] = *p[1];
+ 
+-	ve[2] = (vector unsigned int)val_eth[dst_port[2]];
++	ve[2] = (__vector unsigned int)val_eth[dst_port[2]];
+ 	te[2] = *p[2];
+ 
+-	ve[3] = (vector unsigned int)val_eth[dst_port[3]];
++	ve[3] = (__vector unsigned int)val_eth[dst_port[3]];
+ 	te[3] = *p[3];
+ 
+ 	/* Update first 12 bytes, keep rest bytes intact. */
+-	te[0] = (vector unsigned int)vec_sel(
+-			(vector unsigned short)ve[0],
+-			(vector unsigned short)te[0],
+-			(vector unsigned short) {0, 0, 0, 0,
++	te[0] = (__vector unsigned int)vec_sel(
++			(__vector unsigned short)ve[0],
++			(__vector unsigned short)te[0],
++			(__vector unsigned short) {0, 0, 0, 0,
+ 						0, 0, 0xffff, 0xffff});
+ 
+-	te[1] = (vector unsigned int)vec_sel(
+-			(vector unsigned short)ve[1],
+-			(vector unsigned short)te[1],
+-			(vector unsigned short) {0, 0, 0, 0,
++	te[1] = (__vector unsigned int)vec_sel(
++			(__vector unsigned short)ve[1],
++			(__vector unsigned short)te[1],
++			(__vector unsigned short) {0, 0, 0, 0,
+ 						0, 0, 0xffff, 0xffff});
+ 
+-	te[2] = (vector unsigned int)vec_sel(
+-			(vector unsigned short)ve[2],
+-			(vector unsigned short)te[2],
+-			(vector unsigned short) {0, 0, 0, 0, 0,
++	te[2] = (__vector unsigned int)vec_sel(
++			(__vector unsigned short)ve[2],
++			(__vector unsigned short)te[2],
++			(__vector unsigned short) {0, 0, 0, 0, 0,
+ 						0, 0xffff, 0xffff});
+ 
+-	te[3] = (vector unsigned int)vec_sel(
+-			(vector unsigned short)ve[3],
+-			(vector unsigned short)te[3],
+-			(vector unsigned short) {0, 0, 0, 0,
++	te[3] = (__vector unsigned int)vec_sel(
++			(__vector unsigned short)ve[3],
++			(__vector unsigned short)te[3],
++			(__vector unsigned short) {0, 0, 0, 0,
+ 						0, 0, 0xffff, 0xffff});
+ 
+ 	*p[0] = te[0];
+@@ -91,8 +91,9 @@ processx4_step3(struct rte_mbuf *pkt[FWDSTEP], uint16_t dst_port[FWDSTEP])
+  * This mask is used as an index into prebuild array of pnum values.
+  */
+ static inline uint16_t *
+-port_groupx4(uint16_t pn[FWDSTEP + 1], uint16_t *lp, vector unsigned short dp1,
+-	vector unsigned short dp2)
++port_groupx4(uint16_t pn[FWDSTEP + 1], uint16_t *lp,
++		__vector unsigned short dp1,
++		__vector unsigned short dp2)
+ {
+ 	union {
+ 		uint16_t u16[FWDSTEP + 1];
+@@ -126,24 +127,24 @@ static inline void
+ process_packet(struct rte_mbuf *pkt, uint16_t *dst_port)
+ {
+ 	struct rte_ether_hdr *eth_hdr;
+-	vector unsigned int te, ve;
++	__vector unsigned int te, ve;
+ 
+ 	eth_hdr = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
+ 
+-	te = *(vector unsigned int *)eth_hdr;
+-	ve = (vector unsigned int)val_eth[dst_port[0]];
++	te = *(__vector unsigned int *)eth_hdr;
++	ve = (__vector unsigned int)val_eth[dst_port[0]];
+ 
+ 	rfc1812_process((struct rte_ipv4_hdr *)(eth_hdr + 1), dst_port,
+ 			pkt->packet_type);
+ 
+ 	/* dynamically vec_sel te and ve for MASK_ETH (0x3f) */
+-	te = (vector unsigned int)vec_sel(
+-		(vector unsigned short)ve,
+-		(vector unsigned short)te,
+-		(vector unsigned short){0, 0, 0, 0,
++	te = (__vector unsigned int)vec_sel(
++		(__vector unsigned short)ve,
++		(__vector unsigned short)te,
++		(__vector unsigned short){0, 0, 0, 0,
+ 					0, 0, 0xffff, 0xffff});
+ 
+-	*(vector unsigned int *)eth_hdr = te;
++	*(__vector unsigned int *)eth_hdr = te;
+ }
+ 
+ /**
+@@ -165,7 +166,7 @@ send_packets_multi(struct lcore_conf *qconf, struct rte_mbuf **pkts_burst,
+ 	 */
+ 	k = RTE_ALIGN_FLOOR(nb_rx, FWDSTEP);
+ 	if (k != 0) {
+-		vector unsigned short dp1, dp2;
++		__vector unsigned short dp1, dp2;
+ 
+ 		lp = pnum;
+ 		lp[0] = 1;
+@@ -173,7 +174,7 @@ send_packets_multi(struct lcore_conf *qconf, struct rte_mbuf **pkts_burst,
+ 		processx4_step3(pkts_burst, dst_port);
+ 
+ 		/* dp1: <d[0], d[1], d[2], d[3], ... > */
+-		dp1 = *(vector unsigned short *)dst_port;
++		dp1 = *(__vector unsigned short *)dst_port;
+ 
+ 		for (j = FWDSTEP; j != k; j += FWDSTEP) {
+ 			processx4_step3(&pkts_burst[j], &dst_port[j]);
+@@ -182,7 +183,7 @@ send_packets_multi(struct lcore_conf *qconf, struct rte_mbuf **pkts_burst,
+ 			 * dp2:
+ 			 * <d[j-3], d[j-2], d[j-1], d[j], ... >
+ 			 */
+-			dp2 = *((vector unsigned short *)
++			dp2 = *((__vector unsigned short *)
+ 					&dst_port[j - FWDSTEP + 1]);
+ 			lp  = port_groupx4(&pnum[j - FWDSTEP], lp, dp1, dp2);
+ 
+@@ -190,7 +191,7 @@ send_packets_multi(struct lcore_conf *qconf, struct rte_mbuf **pkts_burst,
+ 			 * dp1:
+ 			 * <d[j], d[j+1], d[j+2], d[j+3], ... >
+ 			 */
+-			dp1 = vec_sro(dp2, (vector unsigned char) {
++			dp1 = vec_sro(dp2, (__vector unsigned char) {
+ 				0, 0, 0, 0, 0, 0, 0, 0,
+ 				0, 0, 0, (FWDSTEP - 1) * sizeof(dst_port[0])});
+ 		}
+@@ -198,8 +199,8 @@ send_packets_multi(struct lcore_conf *qconf, struct rte_mbuf **pkts_burst,
+ 		/*
+ 		 * dp2: <d[j-3], d[j-2], d[j-1], d[j-1], ... >
+ 		 */
+-		dp2 = vec_perm(dp1, (vector unsigned short){},
+-				(vector unsigned char){0xf9});
++		dp2 = vec_perm(dp1, (__vector unsigned short){},
++				(__vector unsigned char){0xf9});
+ 		lp  = port_groupx4(&pnum[j - FWDSTEP], lp, dp1, dp2);
+ 
+ 		/*
+diff --git a/dpdk/examples/l3fwd/l3fwd_common.h b/dpdk/examples/l3fwd/l3fwd_common.h
+index 7d83ff641a..8e4c27218f 100644
+--- a/dpdk/examples/l3fwd/l3fwd_common.h
++++ b/dpdk/examples/l3fwd/l3fwd_common.h
+@@ -51,7 +51,7 @@ rfc1812_process(struct rte_ipv4_hdr *ipv4_hdr, uint16_t *dp, uint32_t ptype)
+ #endif /* DO_RFC_1812_CHECKS */
+ 
+ /*
+- * We group consecutive packets with the same destionation port into one burst.
++ * We group consecutive packets with the same destination port into one burst.
+  * To avoid extra latency this is done together with some other packet
+  * processing, but after we made a final decision about packet's destination.
+  * To do this we maintain:
+@@ -76,7 +76,7 @@ rfc1812_process(struct rte_ipv4_hdr *ipv4_hdr, uint16_t *dp, uint32_t ptype)
+ 
+ static const struct {
+ 	uint64_t pnum; /* prebuild 4 values for pnum[]. */
+-	int32_t  idx;  /* index for new last updated elemnet. */
++	int32_t  idx;  /* index for new last updated element. */
+ 	uint16_t lpv;  /* add value to the last updated element. */
+ } gptbl[GRPSZ] = {
+ 	{
+@@ -236,6 +236,9 @@ send_packetsx4(struct lcore_conf *qconf, uint16_t port, struct rte_mbuf *m[],
+ 
+ 		/* copy rest of the packets into the TX buffer. */
+ 		len = num - n;
++		if (len == 0)
++			goto exit;
++
+ 		j = 0;
+ 		switch (len % FWDSTEP) {
+ 		while (j < len) {
+@@ -258,6 +261,7 @@ send_packetsx4(struct lcore_conf *qconf, uint16_t port, struct rte_mbuf *m[],
+ 		}
+ 	}
+ 
++exit:
+ 	qconf->tx_mbufs[port].len = len;
+ }
+ 
+diff --git a/dpdk/examples/l3fwd/l3fwd_event.c b/dpdk/examples/l3fwd/l3fwd_event.c
+index dd9eafa9b9..7a401290f8 100644
+--- a/dpdk/examples/l3fwd/l3fwd_event.c
++++ b/dpdk/examples/l3fwd/l3fwd_event.c
+@@ -43,8 +43,6 @@ l3fwd_eth_dev_port_setup(struct rte_eth_conf *port_conf)
+ {
+ 	struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc();
+ 	uint16_t nb_ports = rte_eth_dev_count_avail();
+-	uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT;
+-	uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT;
+ 	unsigned int nb_lcores = rte_lcore_count();
+ 	struct rte_eth_conf local_port_conf;
+ 	struct rte_eth_dev_info dev_info;
+diff --git a/dpdk/examples/l3fwd/l3fwd_event_internal_port.c b/dpdk/examples/l3fwd/l3fwd_event_internal_port.c
+index 1e8f46bc11..32cf657148 100644
+--- a/dpdk/examples/l3fwd/l3fwd_event_internal_port.c
++++ b/dpdk/examples/l3fwd/l3fwd_event_internal_port.c
+@@ -118,6 +118,8 @@ l3fwd_event_port_setup_internal_port(void)
+ 		event_p_conf.event_port_cfg |=
+ 			RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL;
+ 
++	evt_rsrc->deq_depth = def_p_conf.dequeue_depth;
++
+ 	for (event_p_id = 0; event_p_id < evt_rsrc->evp.nb_ports;
+ 								event_p_id++) {
+ 		ret = rte_event_port_setup(event_d_id, event_p_id,
+diff --git a/dpdk/examples/l3fwd/l3fwd_lpm_altivec.h b/dpdk/examples/l3fwd/l3fwd_lpm_altivec.h
+index 7c6814252a..0c6852a7bb 100644
+--- a/dpdk/examples/l3fwd/l3fwd_lpm_altivec.h
++++ b/dpdk/examples/l3fwd/l3fwd_lpm_altivec.h
+@@ -14,7 +14,7 @@
+  */
+ static inline void
+ processx4_step1(struct rte_mbuf *pkt[FWDSTEP],
+-		vector unsigned int *dip,
++		__vector unsigned int *dip,
+ 		uint32_t *ipv4_flag)
+ {
+ 	struct rte_ipv4_hdr *ipv4_hdr;
+@@ -45,7 +45,7 @@ processx4_step1(struct rte_mbuf *pkt[FWDSTEP],
+ 	ipv4_flag[0] &= pkt[3]->packet_type;
+ 
+ 	rte_compiler_barrier();
+-	dip[0] = (vector unsigned int){x0, x1, x2, x3};
++	dip[0] = (__vector unsigned int){x0, x1, x2, x3};
+ }
+ 
+ /*
+@@ -54,22 +54,22 @@ processx4_step1(struct rte_mbuf *pkt[FWDSTEP],
+  */
+ static inline void
+ processx4_step2(const struct lcore_conf *qconf,
+-		vector unsigned int dip,
++		__vector unsigned int dip,
+ 		uint32_t ipv4_flag,
+ 		uint8_t portid,
+ 		struct rte_mbuf *pkt[FWDSTEP],
+ 		uint16_t dprt[FWDSTEP])
+ {
+ 	rte_xmm_t dst;
+-	const vector unsigned char bswap_mask = (vector unsigned char){
++	const __vector unsigned char bswap_mask = (__vector unsigned char){
+ 							3, 2, 1, 0,
+ 							7, 6, 5, 4,
+ 							11, 10, 9, 8,
+ 							15, 14, 13, 12};
+ 
+ 	/* Byte swap 4 IPV4 addresses. */
+-	dip = (vector unsigned int)vec_perm(*(vector unsigned char *)&dip,
+-					(vector unsigned char){}, bswap_mask);
++	dip = (__vector unsigned int)vec_perm(*(__vector unsigned char *)&dip,
++					(__vector unsigned char){}, bswap_mask);
+ 
+ 	/* if all 4 packets are IPV4. */
+ 	if (likely(ipv4_flag)) {
+@@ -101,7 +101,7 @@ l3fwd_lpm_send_packets(int nb_rx, struct rte_mbuf **pkts_burst,
+ {
+ 	int32_t j;
+ 	uint16_t dst_port[MAX_PKT_BURST];
+-	vector unsigned int dip[MAX_PKT_BURST / FWDSTEP];
++	__vector unsigned int dip[MAX_PKT_BURST / FWDSTEP];
+ 	uint32_t ipv4_flag[MAX_PKT_BURST / FWDSTEP];
+ 	const int32_t k = RTE_ALIGN_FLOOR(nb_rx, FWDSTEP);
+ 
+diff --git a/dpdk/examples/l3fwd/l3fwd_neon.h b/dpdk/examples/l3fwd/l3fwd_neon.h
+index 86ac5971d7..e3d33a5229 100644
+--- a/dpdk/examples/l3fwd/l3fwd_neon.h
++++ b/dpdk/examples/l3fwd/l3fwd_neon.h
+@@ -64,7 +64,7 @@ processx4_step3(struct rte_mbuf *pkt[FWDSTEP], uint16_t dst_port[FWDSTEP])
+ 
+ /*
+  * Group consecutive packets with the same destination port in bursts of 4.
+- * Suppose we have array of destionation ports:
++ * Suppose we have array of destination ports:
+  * dst_port[] = {a, b, c, d,, e, ... }
+  * dp1 should contain: <a, b, c, d>, dp2: <b, c, d, e>.
+  * We doing 4 comparisons at once and the result is 4 bit mask.
+diff --git a/dpdk/examples/l3fwd/l3fwd_sse.h b/dpdk/examples/l3fwd/l3fwd_sse.h
+index bb565ed546..d5a717e18c 100644
+--- a/dpdk/examples/l3fwd/l3fwd_sse.h
++++ b/dpdk/examples/l3fwd/l3fwd_sse.h
+@@ -64,7 +64,7 @@ processx4_step3(struct rte_mbuf *pkt[FWDSTEP], uint16_t dst_port[FWDSTEP])
+ 
+ /*
+  * Group consecutive packets with the same destination port in bursts of 4.
+- * Suppose we have array of destionation ports:
++ * Suppose we have array of destination ports:
+  * dst_port[] = {a, b, c, d,, e, ... }
+  * dp1 should contain: <a, b, c, d>, dp2: <b, c, d, e>.
+  * We doing 4 comparisons at once and the result is 4 bit mask.
+diff --git a/dpdk/examples/l3fwd/main.c b/dpdk/examples/l3fwd/main.c
+index eb68ffc5aa..3a0e15109b 100644
+--- a/dpdk/examples/l3fwd/main.c
++++ b/dpdk/examples/l3fwd/main.c
+@@ -53,9 +53,8 @@
+ 
+ #define MAX_LCORE_PARAMS 1024
+ 
+-/* Static global variables used within this file. */
+-static uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT;
+-static uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT;
++uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT;
++uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT;
+ 
+ /**< Ports set in promiscuous mode off by default. */
+ static int promiscuous_on;
+@@ -342,6 +341,8 @@ print_usage(const char *prgname)
+ 		" [-P]"
+ 		" [--lookup]"
+ 		" --config (port,queue,lcore)[,(port,queue,lcore)]"
++		" [--rx-queue-size NPKTS]"
++		" [--tx-queue-size NPKTS]"
+ 		" [--eth-dest=X,MM:MM:MM:MM:MM:MM]"
+ 		" [--max-pkt-len PKTLEN]"
+ 		" [--no-numa]"
+@@ -361,6 +362,10 @@ print_usage(const char *prgname)
+ 		"            Default: lpm\n"
+ 		"            Accepted: em (Exact Match), lpm (Longest Prefix Match), fib (Forwarding Information Base)\n"
+ 		"  --config (port,queue,lcore): Rx queue configuration\n"
++		"  --rx-queue-size NPKTS: Rx queue size in decimal\n"
++		"            Default: %d\n"
++		"  --tx-queue-size NPKTS: Tx queue size in decimal\n"
++		"            Default: %d\n"
+ 		"  --eth-dest=X,MM:MM:MM:MM:MM:MM: Ethernet destination for port X\n"
+ 		"  --max-pkt-len PKTLEN: maximum packet length in decimal (64-9600)\n"
+ 		"  --no-numa: Disable numa awareness\n"
+@@ -382,7 +387,7 @@ print_usage(const char *prgname)
+ 		"  --event-vector-tmo: Max timeout to form vector in nanoseconds if event vectorization is enabled\n"
+ 		"  -E : Enable exact match, legacy flag please use --lookup=em instead\n"
+ 		"  -L : Enable longest prefix match, legacy flag please use --lookup=lpm instead\n\n",
+-		prgname);
++		prgname, RTE_TEST_RX_DESC_DEFAULT, RTE_TEST_TX_DESC_DEFAULT);
+ }
+ 
+ static int
+@@ -525,6 +530,38 @@ parse_mode(const char *optarg)
+ 		evt_rsrc->enabled = true;
+ }
+ 
++static void
++parse_queue_size(const char *queue_size_arg, uint16_t *queue_size, int rx)
++{
++	char *end = NULL;
++	unsigned long value;
++
++	/* parse decimal string */
++	value = strtoul(queue_size_arg, &end, 10);
++	if ((queue_size_arg[0] == '\0') || (end == NULL) ||
++		(*end != '\0') || (value == 0)) {
++		if (rx == 1)
++			rte_exit(EXIT_FAILURE, "Invalid rx-queue-size\n");
++		else
++			rte_exit(EXIT_FAILURE, "Invalid tx-queue-size\n");
++
++		return;
++	}
++
++	if (value > UINT16_MAX) {
++		if (rx == 1)
++			rte_exit(EXIT_FAILURE, "rx-queue-size %lu > %d\n",
++				value, UINT16_MAX);
++		else
++			rte_exit(EXIT_FAILURE, "tx-queue-size %lu > %d\n",
++				value, UINT16_MAX);
++
++		return;
++	}
++
++	*queue_size = value;
++}
++
+ static void
+ parse_eventq_sched(const char *optarg)
+ {
+@@ -582,6 +619,8 @@ static const char short_options[] =
+ 	;
+ 
+ #define CMD_LINE_OPT_CONFIG "config"
++#define CMD_LINE_OPT_RX_QUEUE_SIZE "rx-queue-size"
++#define CMD_LINE_OPT_TX_QUEUE_SIZE "tx-queue-size"
+ #define CMD_LINE_OPT_ETH_DEST "eth-dest"
+ #define CMD_LINE_OPT_NO_NUMA "no-numa"
+ #define CMD_LINE_OPT_IPV6 "ipv6"
+@@ -604,6 +643,8 @@ enum {
+ 	 * conflict with short options */
+ 	CMD_LINE_OPT_MIN_NUM = 256,
+ 	CMD_LINE_OPT_CONFIG_NUM,
++	CMD_LINE_OPT_RX_QUEUE_SIZE_NUM,
++	CMD_LINE_OPT_TX_QUEUE_SIZE_NUM,
+ 	CMD_LINE_OPT_ETH_DEST_NUM,
+ 	CMD_LINE_OPT_NO_NUMA_NUM,
+ 	CMD_LINE_OPT_IPV6_NUM,
+@@ -622,6 +663,8 @@ enum {
+ 
+ static const struct option lgopts[] = {
+ 	{CMD_LINE_OPT_CONFIG, 1, 0, CMD_LINE_OPT_CONFIG_NUM},
++	{CMD_LINE_OPT_RX_QUEUE_SIZE, 1, 0, CMD_LINE_OPT_RX_QUEUE_SIZE_NUM},
++	{CMD_LINE_OPT_TX_QUEUE_SIZE, 1, 0, CMD_LINE_OPT_TX_QUEUE_SIZE_NUM},
+ 	{CMD_LINE_OPT_ETH_DEST, 1, 0, CMD_LINE_OPT_ETH_DEST_NUM},
+ 	{CMD_LINE_OPT_NO_NUMA, 0, 0, CMD_LINE_OPT_NO_NUMA_NUM},
+ 	{CMD_LINE_OPT_IPV6, 0, 0, CMD_LINE_OPT_IPV6_NUM},
+@@ -715,6 +758,14 @@ parse_args(int argc, char **argv)
+ 			lcore_params = 1;
+ 			break;
+ 
++		case CMD_LINE_OPT_RX_QUEUE_SIZE_NUM:
++			parse_queue_size(optarg, &nb_rxd, 1);
++			break;
++
++		case CMD_LINE_OPT_TX_QUEUE_SIZE_NUM:
++			parse_queue_size(optarg, &nb_txd, 0);
++			break;
++
+ 		case CMD_LINE_OPT_ETH_DEST_NUM:
+ 			parse_eth_dest(optarg);
+ 			break;
+diff --git a/dpdk/examples/link_status_interrupt/main.c b/dpdk/examples/link_status_interrupt/main.c
+index 551f0524da..9699e14ce6 100644
+--- a/dpdk/examples/link_status_interrupt/main.c
++++ b/dpdk/examples/link_status_interrupt/main.c
+@@ -101,9 +101,10 @@ struct lsi_port_statistics {
+ struct lsi_port_statistics port_statistics[RTE_MAX_ETHPORTS];
+ 
+ /* A tsc-based timer responsible for triggering statistics printout */
+-#define TIMER_MILLISECOND 2000000ULL /* around 1ms at 2 Ghz */
++#define TIMER_MILLISECOND (rte_get_timer_hz() / 1000)
+ #define MAX_TIMER_PERIOD 86400 /* 1 day max */
+-static int64_t timer_period = 10 * TIMER_MILLISECOND * 1000; /* default period is 10 seconds */
++#define DEFAULT_TIMER_PERIOD 10UL /* default period is 10 seconds */
++static int64_t timer_period;
+ 
+ /* Print out statistics on packets dropped */
+ static void
+@@ -370,6 +371,8 @@ lsi_parse_args(int argc, char **argv)
+ 		{NULL, 0, 0, 0}
+ 	};
+ 
++	timer_period = DEFAULT_TIMER_PERIOD * TIMER_MILLISECOND * 1000;
++
+ 	argvopt = argv;
+ 
+ 	while ((opt = getopt_long(argc, argvopt, "p:q:T:",
+diff --git a/dpdk/examples/multi_process/hotplug_mp/commands.c b/dpdk/examples/multi_process/hotplug_mp/commands.c
+index 48fd329583..41ea265e45 100644
+--- a/dpdk/examples/multi_process/hotplug_mp/commands.c
++++ b/dpdk/examples/multi_process/hotplug_mp/commands.c
+@@ -175,7 +175,7 @@ static void cmd_dev_detach_parsed(void *parsed_result,
+ 		cmdline_printf(cl, "detached device %s\n",
+ 			da.name);
+ 	else
+-		cmdline_printf(cl, "failed to dettach device %s\n",
++		cmdline_printf(cl, "failed to detach device %s\n",
+ 			da.name);
+ 	rte_devargs_reset(&da);
+ }
+diff --git a/dpdk/examples/multi_process/simple_mp/main.c b/dpdk/examples/multi_process/simple_mp/main.c
+index 5df2a39000..9d5f1088b0 100644
+--- a/dpdk/examples/multi_process/simple_mp/main.c
++++ b/dpdk/examples/multi_process/simple_mp/main.c
+@@ -4,7 +4,7 @@
+ 
+ /*
+  * This sample application is a simple multi-process application which
+- * demostrates sharing of queues and memory pools between processes, and
++ * demonstrates sharing of queues and memory pools between processes, and
+  * using those queues/pools for communication between the processes.
+  *
+  * Application is designed to run with two processes, a primary and a
+diff --git a/dpdk/examples/multi_process/symmetric_mp/main.c b/dpdk/examples/multi_process/symmetric_mp/main.c
+index b35886a77b..050337765f 100644
+--- a/dpdk/examples/multi_process/symmetric_mp/main.c
++++ b/dpdk/examples/multi_process/symmetric_mp/main.c
+@@ -3,7 +3,7 @@
+  */
+ 
+ /*
+- * Sample application demostrating how to do packet I/O in a multi-process
++ * Sample application demonstrating how to do packet I/O in a multi-process
+  * environment. The same code can be run as a primary process and as a
+  * secondary process, just with a different proc-id parameter in each case
+  * (apart from the EAL flag to indicate a secondary process).
+diff --git a/dpdk/examples/ntb/ntb_fwd.c b/dpdk/examples/ntb/ntb_fwd.c
+index f110fc129f..81964d0308 100644
+--- a/dpdk/examples/ntb/ntb_fwd.c
++++ b/dpdk/examples/ntb/ntb_fwd.c
+@@ -696,7 +696,7 @@ assign_stream_to_lcores(void)
+ 			break;
+ 	}
+ 
+-	/* Print packet forwading config. */
++	/* Print packet forwarding config. */
+ 	RTE_LCORE_FOREACH_WORKER(lcore_id) {
+ 		conf = &fwd_lcore_conf[lcore_id];
+ 
+diff --git a/dpdk/examples/packet_ordering/main.c b/dpdk/examples/packet_ordering/main.c
+index b01ac60fd1..99e67ef67b 100644
+--- a/dpdk/examples/packet_ordering/main.c
++++ b/dpdk/examples/packet_ordering/main.c
+@@ -686,7 +686,7 @@ main(int argc, char **argv)
+ 	if (ret < 0)
+ 		rte_exit(EXIT_FAILURE, "Invalid packet_ordering arguments\n");
+ 
+-	/* Check if we have enought cores */
++	/* Check if we have enough cores */
+ 	if (rte_lcore_count() < 3)
+ 		rte_exit(EXIT_FAILURE, "Error, This application needs at "
+ 				"least 3 logical cores to run:\n"
+diff --git a/dpdk/examples/performance-thread/common/lthread.c b/dpdk/examples/performance-thread/common/lthread.c
+index 009374a8c3..b02e0fc13a 100644
+--- a/dpdk/examples/performance-thread/common/lthread.c
++++ b/dpdk/examples/performance-thread/common/lthread.c
+@@ -178,7 +178,7 @@ lthread_create(struct lthread **new_lt, int lcore_id,
+ 	bzero(lt, sizeof(struct lthread));
+ 	lt->root_sched = THIS_SCHED;
+ 
+-	/* set the function args and exit handlder */
++	/* set the function args and exit handler */
+ 	_lthread_init(lt, fun, arg, _lthread_exit_handler);
+ 
+ 	/* put it in the ready queue */
+@@ -384,7 +384,7 @@ void lthread_exit(void *ptr)
+ 	}
+ 
+ 
+-	/* wait until the joinging thread has collected the exit value */
++	/* wait until the joining thread has collected the exit value */
+ 	while (lt->join != LT_JOIN_EXIT_VAL_READ)
+ 		_reschedule();
+ 
+@@ -410,7 +410,7 @@ int lthread_join(struct lthread *lt, void **ptr)
+ 	/* invalid to join a detached thread, or a thread that is joined */
+ 	if ((lt_state & BIT(ST_LT_DETACH)) || (lt->join == LT_JOIN_THREAD_SET))
+ 		return POSIX_ERRNO(EINVAL);
+-	/* pointer to the joining thread and a poingter to return a value */
++	/* pointer to the joining thread and a pointer to return a value */
+ 	lt->lt_join = current;
+ 	current->lt_exit_ptr = ptr;
+ 	/* There is a race between lthread_join() and lthread_exit()
+diff --git a/dpdk/examples/performance-thread/common/lthread_diag.c b/dpdk/examples/performance-thread/common/lthread_diag.c
+index 57760a1e23..b1bdf7a30c 100644
+--- a/dpdk/examples/performance-thread/common/lthread_diag.c
++++ b/dpdk/examples/performance-thread/common/lthread_diag.c
+@@ -232,7 +232,7 @@ lthread_sched_stats_display(void)
+ }
+ 
+ /*
+- * Defafult diagnostic callback
++ * Default diagnostic callback
+  */
+ static uint64_t
+ _lthread_diag_default_cb(uint64_t time, struct lthread *lt, int diag_event,
+diff --git a/dpdk/examples/performance-thread/common/lthread_int.h b/dpdk/examples/performance-thread/common/lthread_int.h
+index d010126f16..ec018e34a1 100644
+--- a/dpdk/examples/performance-thread/common/lthread_int.h
++++ b/dpdk/examples/performance-thread/common/lthread_int.h
+@@ -107,7 +107,7 @@ enum join_st {
+ 	LT_JOIN_EXIT_VAL_READ,	/* joining thread has collected ret val */
+ };
+ 
+-/* defnition of an lthread stack object */
++/* definition of an lthread stack object */
+ struct lthread_stack {
+ 	uint8_t stack[LTHREAD_MAX_STACK_SIZE];
+ 	size_t stack_size;
+diff --git a/dpdk/examples/performance-thread/common/lthread_tls.c b/dpdk/examples/performance-thread/common/lthread_tls.c
+index 4ab2e3558b..bae45f2aa9 100644
+--- a/dpdk/examples/performance-thread/common/lthread_tls.c
++++ b/dpdk/examples/performance-thread/common/lthread_tls.c
+@@ -215,7 +215,7 @@ void _lthread_tls_alloc(struct lthread *lt)
+ 	tls->root_sched = (THIS_SCHED);
+ 	lt->tls = tls;
+ 
+-	/* allocate data for TLS varaiables using RTE_PER_LTHREAD macros */
++	/* allocate data for TLS variables using RTE_PER_LTHREAD macros */
+ 	if (sizeof(void *) < (uint64_t)RTE_PER_LTHREAD_SECTION_SIZE) {
+ 		lt->per_lthread_data =
+ 		    _lthread_objcache_alloc((THIS_SCHED)->per_lthread_cache);
+diff --git a/dpdk/examples/performance-thread/l3fwd-thread/main.c b/dpdk/examples/performance-thread/l3fwd-thread/main.c
+index 8a35040597..1ddb2a9138 100644
+--- a/dpdk/examples/performance-thread/l3fwd-thread/main.c
++++ b/dpdk/examples/performance-thread/l3fwd-thread/main.c
+@@ -125,7 +125,7 @@ cb_parse_ptype(__rte_unused uint16_t port, __rte_unused uint16_t queue,
+ }
+ 
+ /*
+- *  When set to zero, simple forwaring path is eanbled.
++ *  When set to zero, simple forwarding path is enabled.
+  *  When set to one, optimized forwarding path is enabled.
+  *  Note that LPM optimisation path uses SSE4.1 instructions.
+  */
+@@ -1529,7 +1529,7 @@ processx4_step3(struct rte_mbuf *pkt[FWDSTEP], uint16_t dst_port[FWDSTEP])
+ }
+ 
+ /*
+- * We group consecutive packets with the same destionation port into one burst.
++ * We group consecutive packets with the same destination port into one burst.
+  * To avoid extra latency this is done together with some other packet
+  * processing, but after we made a final decision about packet's destination.
+  * To do this we maintain:
+@@ -1554,7 +1554,7 @@ processx4_step3(struct rte_mbuf *pkt[FWDSTEP], uint16_t dst_port[FWDSTEP])
+ 
+ /*
+  * Group consecutive packets with the same destination port in bursts of 4.
+- * Suppose we have array of destionation ports:
++ * Suppose we have array of destination ports:
+  * dst_port[] = {a, b, c, d,, e, ... }
+  * dp1 should contain: <a, b, c, d>, dp2: <b, c, d, e>.
+  * We doing 4 comparisons at once and the result is 4 bit mask.
+@@ -1565,7 +1565,7 @@ port_groupx4(uint16_t pn[FWDSTEP + 1], uint16_t *lp, __m128i dp1, __m128i dp2)
+ {
+ 	static const struct {
+ 		uint64_t pnum; /* prebuild 4 values for pnum[]. */
+-		int32_t  idx;  /* index for new last updated elemnet. */
++		int32_t  idx;  /* index for new last updated element. */
+ 		uint16_t lpv;  /* add value to the last updated element. */
+ 	} gptbl[GRPSZ] = {
+ 	{
+@@ -1834,7 +1834,7 @@ process_burst(struct rte_mbuf *pkts_burst[MAX_PKT_BURST], int nb_rx,
+ 
+ 	/*
+ 	 * Send packets out, through destination port.
+-	 * Consecuteve pacekts with the same destination port
++	 * Consecutive packets with the same destination port
+ 	 * are already grouped together.
+ 	 * If destination port for the packet equals BAD_PORT,
+ 	 * then free the packet without sending it out.
+@@ -3514,7 +3514,7 @@ main(int argc, char **argv)
+ 
+ 	ret = rte_timer_subsystem_init();
+ 	if (ret < 0)
+-		rte_exit(EXIT_FAILURE, "Failed to initialize timer subystem\n");
++		rte_exit(EXIT_FAILURE, "Failed to initialize timer subsystem\n");
+ 
+ 	/* pre-init dst MACs for all ports to 02:00:00:00:00:xx */
+ 	for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) {
+diff --git a/dpdk/examples/performance-thread/pthread_shim/pthread_shim.c b/dpdk/examples/performance-thread/pthread_shim/pthread_shim.c
+index bbc076584b..a44cb8244d 100644
+--- a/dpdk/examples/performance-thread/pthread_shim/pthread_shim.c
++++ b/dpdk/examples/performance-thread/pthread_shim/pthread_shim.c
+@@ -586,6 +586,11 @@ pthread_t pthread_self(void)
+ 	return _sys_pthread_funcs.f_pthread_self();
+ }
+ 
++#if defined(RTE_TOOLCHAIN_GCC) && (GCC_VERSION >= 120000)
++#pragma GCC diagnostic push
++#pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
++#endif
++
+ int pthread_setspecific(pthread_key_t key, const void *data)
+ {
+ 	if (override) {
+@@ -595,6 +600,10 @@ int pthread_setspecific(pthread_key_t key, const void *data)
+ 	return _sys_pthread_funcs.f_pthread_setspecific(key, data);
+ }
+ 
++#if defined(RTE_TOOLCHAIN_GCC) && (GCC_VERSION >= 120000)
++#pragma GCC diagnostic pop
++#endif
++
+ int pthread_spin_init(pthread_spinlock_t *a, int b)
+ {
+ 	NOT_IMPLEMENTED;
+diff --git a/dpdk/examples/performance-thread/pthread_shim/pthread_shim.h b/dpdk/examples/performance-thread/pthread_shim/pthread_shim.h
+index e90fb15fc1..ce51627a5b 100644
+--- a/dpdk/examples/performance-thread/pthread_shim/pthread_shim.h
++++ b/dpdk/examples/performance-thread/pthread_shim/pthread_shim.h
+@@ -41,7 +41,7 @@
+  *
+  * The decision whether to invoke the real library function or the lthread
+  * function is controlled by a per pthread flag that can be switched
+- * on of off by the pthread_override_set() API described below. Typcially
++ * on of off by the pthread_override_set() API described below. Typically
+  * this should be done as the first action of the initial lthread.
+  *
+  * N.B In general it would be poor practice to revert to invoke a real
+diff --git a/dpdk/examples/pipeline/examples/registers.spec b/dpdk/examples/pipeline/examples/registers.spec
+index 74a014ad06..59998fef03 100644
+--- a/dpdk/examples/pipeline/examples/registers.spec
++++ b/dpdk/examples/pipeline/examples/registers.spec
+@@ -4,7 +4,7 @@
+ ; This program is setting up two register arrays called "pkt_counters" and "byte_counters".
+ ; On every input packet (Ethernet/IPv4), the "pkt_counters" register at location indexed by
+ ; the IPv4 header "Source Address" field is incremented, while the same location in the
+-; "byte_counters" array accummulates the value of the IPv4 header "Total Length" field.
++; "byte_counters" array accumulates the value of the IPv4 header "Total Length" field.
+ ;
+ ; The "regrd" and "regwr" CLI commands can be used to read and write the current value of
+ ; any register array location.
+diff --git a/dpdk/examples/qos_sched/args.c b/dpdk/examples/qos_sched/args.c
+index 10ca7bea61..ff51d0215a 100644
+--- a/dpdk/examples/qos_sched/args.c
++++ b/dpdk/examples/qos_sched/args.c
+@@ -11,6 +11,7 @@
+ #include <limits.h>
+ #include <getopt.h>
+ 
++#include <rte_bitops.h>
+ #include <rte_log.h>
+ #include <rte_eal.h>
+ #include <rte_lcore.h>
+@@ -427,13 +428,13 @@ app_parse_args(int argc, char **argv)
+ 
+ 	/* check main core index validity */
+ 	for (i = 0; i <= app_main_core; i++) {
+-		if (app_used_core_mask & (1u << app_main_core)) {
++		if (app_used_core_mask & RTE_BIT64(app_main_core)) {
+ 			RTE_LOG(ERR, APP, "Main core index is not configured properly\n");
+ 			app_usage(prgname);
+ 			return -1;
+ 		}
+ 	}
+-	app_used_core_mask |= 1u << app_main_core;
++	app_used_core_mask |= RTE_BIT64(app_main_core);
+ 
+ 	if ((app_used_core_mask != app_eal_core_mask()) ||
+ 			(app_main_core != rte_get_main_lcore())) {
+diff --git a/dpdk/examples/qos_sched/cmdline.c b/dpdk/examples/qos_sched/cmdline.c
+index 257b87a7cf..6691b02d89 100644
+--- a/dpdk/examples/qos_sched/cmdline.c
++++ b/dpdk/examples/qos_sched/cmdline.c
+@@ -41,7 +41,7 @@ static void cmd_help_parsed(__rte_unused void *parsed_result,
+ 		"    qavg port X subport Y pipe Z              : Show average queue size per pipe.\n"
+ 		"    qavg port X subport Y pipe Z tc A         : Show average queue size per pipe and TC.\n"
+ 		"    qavg port X subport Y pipe Z tc A q B     : Show average queue size of a specific queue.\n"
+-		"    qavg [n|period] X                     : Set number of times and peiod (us).\n\n"
++		"    qavg [n|period] X                     : Set number of times and period (us).\n\n"
+ 	);
+ 
+ }
+diff --git a/dpdk/examples/server_node_efd/node/node.c b/dpdk/examples/server_node_efd/node/node.c
+index ba1c7e5153..fc2aa5ffef 100644
+--- a/dpdk/examples/server_node_efd/node/node.c
++++ b/dpdk/examples/server_node_efd/node/node.c
+@@ -296,7 +296,7 @@ handle_packets(struct rte_hash *h, struct rte_mbuf **bufs, uint16_t num_packets)
+ 		}
+ 	}
+ }
+-/* >8 End of packets dequeueing. */
++/* >8 End of packets dequeuing. */
+ 
+ /*
+  * Application main function - loops through
+diff --git a/dpdk/examples/skeleton/basicfwd.c b/dpdk/examples/skeleton/basicfwd.c
+index 16435ee3cc..518cd72179 100644
+--- a/dpdk/examples/skeleton/basicfwd.c
++++ b/dpdk/examples/skeleton/basicfwd.c
+@@ -179,7 +179,7 @@ main(int argc, char *argv[])
+ 	int ret = rte_eal_init(argc, argv);
+ 	if (ret < 0)
+ 		rte_exit(EXIT_FAILURE, "Error with EAL initialization\n");
+-	/* >8 End of initializion the Environment Abstraction Layer (EAL). */
++	/* >8 End of initialization the Environment Abstraction Layer (EAL). */
+ 
+ 	argc -= ret;
+ 	argv += ret;
+diff --git a/dpdk/examples/vhost/main.c b/dpdk/examples/vhost/main.c
+index 33d023aa39..f9e932061f 100644
+--- a/dpdk/examples/vhost/main.c
++++ b/dpdk/examples/vhost/main.c
+@@ -32,6 +32,8 @@
+ #define MAX_QUEUES 128
+ #endif
+ 
++#define NUM_MBUFS_DEFAULT 0x24000
++
+ /* the maximum number of external ports supported */
+ #define MAX_SUP_PORTS 1
+ 
+@@ -57,6 +59,9 @@
+ 
+ #define INVALID_PORT_ID 0xFF
+ 
++/* number of mbufs in all pools - if specified on command-line. */
++static int total_num_mbufs = NUM_MBUFS_DEFAULT;
++
+ /* mask of enabled ports */
+ static uint32_t enabled_port_mask = 0;
+ 
+@@ -107,7 +112,7 @@ static uint32_t burst_rx_retry_num = BURST_RX_RETRIES;
+ static char *socket_files;
+ static int nb_sockets;
+ 
+-/* empty vmdq configuration structure. Filled in programatically */
++/* empty VMDq configuration structure. Filled in programmatically */
+ static struct rte_eth_conf vmdq_conf_default = {
+ 	.rxmode = {
+ 		.mq_mode        = RTE_ETH_MQ_RX_VMDQ_ONLY,
+@@ -115,7 +120,7 @@ static struct rte_eth_conf vmdq_conf_default = {
+ 		/*
+ 		 * VLAN strip is necessary for 1G NIC such as I350,
+ 		 * this fixes bug of ipv4 forwarding in guest can't
+-		 * forward pakets from one virtio dev to another virtio dev.
++		 * forward packets from one virtio dev to another virtio dev.
+ 		 */
+ 		.offloads = RTE_ETH_RX_OFFLOAD_VLAN_STRIP,
+ 	},
+@@ -259,6 +264,10 @@ port_init(uint16_t port)
+ 
+ 		return retval;
+ 	}
++	if (dev_info.max_vmdq_pools == 0) {
++		RTE_LOG(ERR, VHOST_PORT, "Failed to get VMDq info.\n");
++		return -1;
++	}
+ 
+ 	rxconf = &dev_info.default_rxconf;
+ 	txconf = &dev_info.default_txconf;
+@@ -463,7 +472,7 @@ us_vhost_usage(const char *prgname)
+ 	"		--nb-devices ND\n"
+ 	"		-p PORTMASK: Set mask for ports to be used by application\n"
+ 	"		--vm2vm [0|1|2]: disable/software(default)/hardware vm2vm comms\n"
+-	"		--rx-retry [0|1]: disable/enable(default) retries on rx. Enable retry if destintation queue is full\n"
++	"		--rx-retry [0|1]: disable/enable(default) retries on Rx. Enable retry if destination queue is full\n"
+ 	"		--rx-retry-delay [0-N]: timeout(in usecond) between retries on RX. This makes effect only if retries on rx enabled\n"
+ 	"		--rx-retry-num [0-N]: the number of retries on rx. This makes effect only if retries on rx enabled\n"
+ 	"		--mergeable [0|1]: disable(default)/enable RX mergeable buffers\n"
+@@ -473,7 +482,8 @@ us_vhost_usage(const char *prgname)
+ 	"		--tso [0|1] disable/enable TCP segment offload.\n"
+ 	"		--client register a vhost-user socket as client mode.\n"
+ 	"		--dma-type register dma type for your vhost async driver. For example \"ioat\" for now.\n"
+-	"		--dmas register dma channel for specific vhost device.\n",
++	"		--dmas register dma channel for specific vhost device.\n"
++	"		--total-num-mbufs [0-N] set the number of mbufs to be allocated in mbuf pools, the default value is 147456.\n",
+ 	       prgname);
+ }
+ 
+@@ -504,6 +514,8 @@ enum {
+ 	OPT_DMA_TYPE_NUM,
+ #define OPT_DMAS                "dmas"
+ 	OPT_DMAS_NUM,
++#define OPT_NUM_MBUFS           "total-num-mbufs"
++	OPT_NUM_MBUFS_NUM,
+ };
+ 
+ /*
+@@ -543,6 +555,8 @@ us_vhost_parse_args(int argc, char **argv)
+ 				NULL, OPT_DMA_TYPE_NUM},
+ 		{OPT_DMAS, required_argument,
+ 				NULL, OPT_DMAS_NUM},
++		{OPT_NUM_MBUFS, required_argument,
++				NULL, OPT_NUM_MBUFS_NUM},
+ 		{NULL, 0, 0, 0},
+ 	};
+ 
+@@ -675,6 +689,19 @@ us_vhost_parse_args(int argc, char **argv)
+ 			async_vhost_driver = 1;
+ 			break;
+ 
++		case OPT_NUM_MBUFS_NUM:
++			ret = parse_num_opt(optarg, INT32_MAX);
++			if (ret == -1) {
++				RTE_LOG(INFO, VHOST_CONFIG,
++					"Invalid argument for total-num-mbufs [0..N]\n");
++				us_vhost_usage(prgname);
++				return -1;
++			}
++
++			if (total_num_mbufs < ret)
++				total_num_mbufs = ret;
++			break;
++
+ 		case OPT_CLIENT_NUM:
+ 			client_mode = 1;
+ 			break;
+@@ -873,31 +900,43 @@ sync_virtio_xmit(struct vhost_dev *dst_vdev, struct vhost_dev *src_vdev,
+ 	}
+ }
+ 
+-static __rte_always_inline void
+-drain_vhost(struct vhost_dev *vdev)
++static __rte_always_inline uint16_t
++enqueue_pkts(struct vhost_dev *vdev, struct rte_mbuf **pkts, uint16_t rx_count)
+ {
+-	uint16_t ret;
+-	uint32_t buff_idx = rte_lcore_id() * MAX_VHOST_DEVICE + vdev->vid;
+-	uint16_t nr_xmit = vhost_txbuff[buff_idx]->len;
+-	struct rte_mbuf **m = vhost_txbuff[buff_idx]->m_table;
++	uint16_t enqueue_count;
+ 
+ 	if (builtin_net_driver) {
+-		ret = vs_enqueue_pkts(vdev, VIRTIO_RXQ, m, nr_xmit);
++		enqueue_count = vs_enqueue_pkts(vdev, VIRTIO_RXQ, pkts, rx_count);
+ 	} else if (async_vhost_driver) {
+ 		uint16_t enqueue_fail = 0;
+ 
+ 		complete_async_pkts(vdev);
+-		ret = rte_vhost_submit_enqueue_burst(vdev->vid, VIRTIO_RXQ, m, nr_xmit);
+-		__atomic_add_fetch(&vdev->pkts_inflight, ret, __ATOMIC_SEQ_CST);
++		enqueue_count = rte_vhost_submit_enqueue_burst(vdev->vid,
++					VIRTIO_RXQ, pkts, rx_count);
++		__atomic_add_fetch(&vdev->pkts_inflight, enqueue_count, __ATOMIC_SEQ_CST);
+ 
+-		enqueue_fail = nr_xmit - ret;
++		enqueue_fail = rx_count - enqueue_count;
+ 		if (enqueue_fail)
+-			free_pkts(&m[ret], nr_xmit - ret);
++			free_pkts(&pkts[enqueue_count], enqueue_fail);
++
+ 	} else {
+-		ret = rte_vhost_enqueue_burst(vdev->vid, VIRTIO_RXQ,
+-						m, nr_xmit);
++		enqueue_count = rte_vhost_enqueue_burst(vdev->vid, VIRTIO_RXQ,
++						pkts, rx_count);
+ 	}
+ 
++	return enqueue_count;
++}
++
++static __rte_always_inline void
++drain_vhost(struct vhost_dev *vdev)
++{
++	uint16_t ret;
++	uint32_t buff_idx = rte_lcore_id() * MAX_VHOST_DEVICE + vdev->vid;
++	uint16_t nr_xmit = vhost_txbuff[buff_idx]->len;
++	struct rte_mbuf **m = vhost_txbuff[buff_idx]->m_table;
++
++	ret = enqueue_pkts(vdev, m, nr_xmit);
++
+ 	if (enable_stats) {
+ 		__atomic_add_fetch(&vdev->stats.rx_total_atomic, nr_xmit,
+ 				__ATOMIC_SEQ_CST);
+@@ -1190,44 +1229,19 @@ drain_eth_rx(struct vhost_dev *vdev)
+ 	if (!rx_count)
+ 		return;
+ 
+-	/*
+-	 * When "enable_retry" is set, here we wait and retry when there
+-	 * is no enough free slots in the queue to hold @rx_count packets,
+-	 * to diminish packet loss.
+-	 */
+-	if (enable_retry &&
+-	    unlikely(rx_count > rte_vhost_avail_entries(vdev->vid,
+-			VIRTIO_RXQ))) {
+-		uint32_t retry;
++	enqueue_count = enqueue_pkts(vdev, pkts, rx_count);
++
++	/* Retry if necessary */
++	if (enable_retry && unlikely(enqueue_count < rx_count)) {
++		uint32_t retry = 0;
+ 
+-		for (retry = 0; retry < burst_rx_retry_num; retry++) {
++		while (enqueue_count < rx_count && retry++ < burst_rx_retry_num) {
+ 			rte_delay_us(burst_rx_delay_time);
+-			if (rx_count <= rte_vhost_avail_entries(vdev->vid,
+-					VIRTIO_RXQ))
+-				break;
++			enqueue_count += enqueue_pkts(vdev, &pkts[enqueue_count],
++						rx_count - enqueue_count);
+ 		}
+ 	}
+ 
+-	if (builtin_net_driver) {
+-		enqueue_count = vs_enqueue_pkts(vdev, VIRTIO_RXQ,
+-						pkts, rx_count);
+-	} else if (async_vhost_driver) {
+-		uint16_t enqueue_fail = 0;
+-
+-		complete_async_pkts(vdev);
+-		enqueue_count = rte_vhost_submit_enqueue_burst(vdev->vid,
+-					VIRTIO_RXQ, pkts, rx_count);
+-		__atomic_add_fetch(&vdev->pkts_inflight, enqueue_count, __ATOMIC_SEQ_CST);
+-
+-		enqueue_fail = rx_count - enqueue_count;
+-		if (enqueue_fail)
+-			free_pkts(&pkts[enqueue_count], enqueue_fail);
+-
+-	} else {
+-		enqueue_count = rte_vhost_enqueue_burst(vdev->vid, VIRTIO_RXQ,
+-						pkts, rx_count);
+-	}
+-
+ 	if (enable_stats) {
+ 		__atomic_add_fetch(&vdev->stats.rx_total_atomic, rx_count,
+ 				__ATOMIC_SEQ_CST);
+@@ -1289,7 +1303,7 @@ switch_worker(void *arg __rte_unused)
+ 	struct vhost_dev *vdev;
+ 	struct mbuf_table *tx_q;
+ 
+-	RTE_LOG(INFO, VHOST_DATA, "Procesing on Core %u started\n", lcore_id);
++	RTE_LOG(INFO, VHOST_DATA, "Processing on Core %u started\n", lcore_id);
+ 
+ 	tx_q = &lcore_tx_queue[lcore_id];
+ 	for (i = 0; i < rte_lcore_count(); i++) {
+@@ -1333,7 +1347,7 @@ switch_worker(void *arg __rte_unused)
+ 
+ /*
+  * Remove a device from the specific data core linked list and from the
+- * main linked list. Synchonization  occurs through the use of the
++ * main linked list. Synchronization  occurs through the use of the
+  * lcore dev_removal_flag. Device is made volatile here to avoid re-ordering
+  * of dev->remove=1 which can cause an infinite loop in the rte_pause loop.
+  */
+@@ -1606,57 +1620,6 @@ sigint_handler(__rte_unused int signum)
+ 	exit(0);
+ }
+ 
+-/*
+- * While creating an mbuf pool, one key thing is to figure out how
+- * many mbuf entries is enough for our use. FYI, here are some
+- * guidelines:
+- *
+- * - Each rx queue would reserve @nr_rx_desc mbufs at queue setup stage
+- *
+- * - For each switch core (A CPU core does the packet switch), we need
+- *   also make some reservation for receiving the packets from virtio
+- *   Tx queue. How many is enough depends on the usage. It's normally
+- *   a simple calculation like following:
+- *
+- *       MAX_PKT_BURST * max packet size / mbuf size
+- *
+- *   So, we definitely need allocate more mbufs when TSO is enabled.
+- *
+- * - Similarly, for each switching core, we should serve @nr_rx_desc
+- *   mbufs for receiving the packets from physical NIC device.
+- *
+- * - We also need make sure, for each switch core, we have allocated
+- *   enough mbufs to fill up the mbuf cache.
+- */
+-static void
+-create_mbuf_pool(uint16_t nr_port, uint32_t nr_switch_core, uint32_t mbuf_size,
+-	uint32_t nr_queues, uint32_t nr_rx_desc, uint32_t nr_mbuf_cache)
+-{
+-	uint32_t nr_mbufs;
+-	uint32_t nr_mbufs_per_core;
+-	uint32_t mtu = 1500;
+-
+-	if (mergeable)
+-		mtu = 9000;
+-	if (enable_tso)
+-		mtu = 64 * 1024;
+-
+-	nr_mbufs_per_core  = (mtu + mbuf_size) * MAX_PKT_BURST /
+-			(mbuf_size - RTE_PKTMBUF_HEADROOM);
+-	nr_mbufs_per_core += nr_rx_desc;
+-	nr_mbufs_per_core  = RTE_MAX(nr_mbufs_per_core, nr_mbuf_cache);
+-
+-	nr_mbufs  = nr_queues * nr_rx_desc;
+-	nr_mbufs += nr_mbufs_per_core * nr_switch_core;
+-	nr_mbufs *= nr_port;
+-
+-	mbuf_pool = rte_pktmbuf_pool_create("MBUF_POOL", nr_mbufs,
+-					    nr_mbuf_cache, 0, mbuf_size,
+-					    rte_socket_id());
+-	if (mbuf_pool == NULL)
+-		rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");
+-}
+-
+ /*
+  * Main function, does initialisation and calls the per-lcore functions.
+  */
+@@ -1715,8 +1678,11 @@ main(int argc, char *argv[])
+ 	 * many queues here. We probably should only do allocation for
+ 	 * those queues we are going to use.
+ 	 */
+-	create_mbuf_pool(valid_num_ports, rte_lcore_count() - 1, MBUF_DATA_SIZE,
+-			 MAX_QUEUES, RTE_TEST_RX_DESC_DEFAULT, MBUF_CACHE_SIZE);
++	mbuf_pool = rte_pktmbuf_pool_create("MBUF_POOL", total_num_mbufs,
++					    MBUF_CACHE_SIZE, 0, MBUF_DATA_SIZE,
++					    rte_socket_id());
++	if (mbuf_pool == NULL)
++		rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");
+ 
+ 	if (vm2vm_mode == VM2VM_HARDWARE) {
+ 		/* Enable VT loop back to let L2 switch to do it. */
+diff --git a/dpdk/examples/vm_power_manager/channel_monitor.c b/dpdk/examples/vm_power_manager/channel_monitor.c
+index d767423a40..97b8def7ca 100644
+--- a/dpdk/examples/vm_power_manager/channel_monitor.c
++++ b/dpdk/examples/vm_power_manager/channel_monitor.c
+@@ -404,7 +404,7 @@ get_pcpu_to_control(struct policy *pol)
+ 
+ 	/*
+ 	 * So now that we're handling virtual and physical cores, we need to
+-	 * differenciate between them when adding them to the branch monitor.
++	 * differentiate between them when adding them to the branch monitor.
+ 	 * Virtual cores need to be converted to physical cores.
+ 	 */
+ 	if (pol->pkt.core_type == RTE_POWER_CORE_TYPE_VIRTUAL) {
+diff --git a/dpdk/examples/vm_power_manager/power_manager.h b/dpdk/examples/vm_power_manager/power_manager.h
+index d35f8cbe01..d51039e2c6 100644
+--- a/dpdk/examples/vm_power_manager/power_manager.h
++++ b/dpdk/examples/vm_power_manager/power_manager.h
+@@ -224,7 +224,7 @@ int power_manager_enable_turbo_core(unsigned int core_num);
+ int power_manager_disable_turbo_core(unsigned int core_num);
+ 
+ /**
+- * Get the current freuency of the core specified by core_num
++ * Get the current frequency of the core specified by core_num
+  *
+  * @param core_num
+  *  The core number to get the current frequency
+diff --git a/dpdk/examples/vmdq/main.c b/dpdk/examples/vmdq/main.c
+index 2c00a942f1..10410b8783 100644
+--- a/dpdk/examples/vmdq/main.c
++++ b/dpdk/examples/vmdq/main.c
+@@ -62,7 +62,7 @@ static uint8_t rss_enable;
+ 
+ /* Default structure for VMDq. 8< */
+ 
+-/* empty vmdq configuration structure. Filled in programatically */
++/* empty VMDq configuration structure. Filled in programmatically */
+ static const struct rte_eth_conf vmdq_conf_default = {
+ 	.rxmode = {
+ 		.mq_mode        = RTE_ETH_MQ_RX_VMDQ_ONLY,
+diff --git a/dpdk/kernel/freebsd/meson.build b/dpdk/kernel/freebsd/meson.build
+index bf5aa20a55..1f612711be 100644
+--- a/dpdk/kernel/freebsd/meson.build
++++ b/dpdk/kernel/freebsd/meson.build
+@@ -9,8 +9,8 @@ kmods = ['contigmem', 'nic_uio']
+ # right now, which allows us to simplify things. We pull in the sourcer
+ # files from the individual meson.build files, and then use a custom
+ # target to call make, passing in the values as env parameters.
+-kmod_cflags = ['-I' + meson.build_root(),
+-        '-I' + join_paths(meson.source_root(), 'config'),
++kmod_cflags = ['-I' + dpdk_build_root,
++        '-I' + join_paths(dpdk_source_root, 'config'),
+         '-include rte_config.h']
+ 
+ # to avoid warnings due to race conditions with creating the dev_if.h, etc.
+diff --git a/dpdk/kernel/linux/kni/compat.h b/dpdk/kernel/linux/kni/compat.h
+index 664785674f..3a86d12bbc 100644
+--- a/dpdk/kernel/linux/kni/compat.h
++++ b/dpdk/kernel/linux/kni/compat.h
+@@ -141,3 +141,11 @@
+ #if KERNEL_VERSION(5, 9, 0) > LINUX_VERSION_CODE
+ #define HAVE_TSK_IN_GUP
+ #endif
++
++#if KERNEL_VERSION(5, 15, 0) <= LINUX_VERSION_CODE
++#define HAVE_ETH_HW_ADDR_SET
++#endif
++
++#if KERNEL_VERSION(5, 18, 0) > LINUX_VERSION_CODE
++#define HAVE_NETIF_RX_NI
++#endif
+diff --git a/dpdk/kernel/linux/kni/kni_fifo.h b/dpdk/kernel/linux/kni/kni_fifo.h
+index 5c91b55379..1ba5172002 100644
+--- a/dpdk/kernel/linux/kni/kni_fifo.h
++++ b/dpdk/kernel/linux/kni/kni_fifo.h
+@@ -41,7 +41,7 @@ kni_fifo_put(struct rte_kni_fifo *fifo, void **data, uint32_t num)
+ }
+ 
+ /**
+- * Get up to num elements from the fifo. Return the number actully read
++ * Get up to num elements from the FIFO. Return the number actually read
+  */
+ static inline uint32_t
+ kni_fifo_get(struct rte_kni_fifo *fifo, void **data, uint32_t num)
+diff --git a/dpdk/kernel/linux/kni/kni_misc.c b/dpdk/kernel/linux/kni/kni_misc.c
+index f10dcd069d..feed12b568 100644
+--- a/dpdk/kernel/linux/kni/kni_misc.c
++++ b/dpdk/kernel/linux/kni/kni_misc.c
+@@ -184,13 +184,17 @@ kni_dev_remove(struct kni_dev *dev)
+ 	if (!dev)
+ 		return -ENODEV;
+ 
++	/*
++	 * The memory of kni device is allocated and released together
++	 * with net device. Release mbuf before freeing net device.
++	 */
++	kni_net_release_fifo_phy(dev);
++
+ 	if (dev->net_dev) {
+ 		unregister_netdev(dev->net_dev);
+ 		free_netdev(dev->net_dev);
+ 	}
+ 
+-	kni_net_release_fifo_phy(dev);
+-
+ 	return 0;
+ }
+ 
+@@ -220,8 +224,8 @@ kni_release(struct inode *inode, struct file *file)
+ 			dev->pthread = NULL;
+ 		}
+ 
+-		kni_dev_remove(dev);
+ 		list_del(&dev->list);
++		kni_dev_remove(dev);
+ 	}
+ 	up_write(&knet->kni_list_lock);
+ 
+@@ -400,11 +404,16 @@ kni_ioctl_create(struct net *net, uint32_t ioctl_num,
+ 	pr_debug("mbuf_size:    %u\n", kni->mbuf_size);
+ 
+ 	/* if user has provided a valid mac address */
+-	if (is_valid_ether_addr(dev_info.mac_addr))
++	if (is_valid_ether_addr(dev_info.mac_addr)) {
++#ifdef HAVE_ETH_HW_ADDR_SET
++		eth_hw_addr_set(net_dev, dev_info.mac_addr);
++#else
+ 		memcpy(net_dev->dev_addr, dev_info.mac_addr, ETH_ALEN);
+-	else
+-		/* Generate random MAC address. */
+-		eth_random_addr(net_dev->dev_addr);
++#endif
++	} else {
++		/* Assign random MAC address. */
++		eth_hw_addr_random(net_dev);
++	}
+ 
+ 	if (dev_info.mtu)
+ 		net_dev->mtu = dev_info.mtu;
+@@ -470,8 +479,8 @@ kni_ioctl_release(struct net *net, uint32_t ioctl_num,
+ 			dev->pthread = NULL;
+ 		}
+ 
+-		kni_dev_remove(dev);
+ 		list_del(&dev->list);
++		kni_dev_remove(dev);
+ 		ret = 0;
+ 		break;
+ 	}
+diff --git a/dpdk/kernel/linux/kni/kni_net.c b/dpdk/kernel/linux/kni/kni_net.c
+index 29e5b9e21f..779ee3451a 100644
+--- a/dpdk/kernel/linux/kni/kni_net.c
++++ b/dpdk/kernel/linux/kni/kni_net.c
+@@ -441,7 +441,11 @@ kni_net_rx_normal(struct kni_dev *kni)
+ 		skb->ip_summed = CHECKSUM_UNNECESSARY;
+ 
+ 		/* Call netif interface */
++#ifdef HAVE_NETIF_RX_NI
+ 		netif_rx_ni(skb);
++#else
++		netif_rx(skb);
++#endif
+ 
+ 		/* Update statistics */
+ 		dev->stats.rx_bytes += len;
+@@ -779,7 +783,11 @@ kni_net_set_mac(struct net_device *netdev, void *p)
+ 		return -EADDRNOTAVAIL;
+ 
+ 	memcpy(req.mac_addr, addr->sa_data, netdev->addr_len);
++#ifdef HAVE_ETH_HW_ADDR_SET
++	eth_hw_addr_set(netdev, addr->sa_data);
++#else
+ 	memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
++#endif
+ 
+ 	ret = kni_net_process_request(netdev, &req);
+ 
+diff --git a/dpdk/kernel/linux/kni/meson.build b/dpdk/kernel/linux/kni/meson.build
+index c683fc7b36..4c90069e99 100644
+--- a/dpdk/kernel/linux/kni/meson.build
++++ b/dpdk/kernel/linux/kni/meson.build
+@@ -5,7 +5,7 @@
+ # Ref: https://jira.devtools.intel.com/browse/DPDK-29263
+ kmod_cflags = ''
+ file_path = kernel_source_dir + '/include/linux/netdevice.h'
+-run_cmd = run_command('grep', 'ndo_tx_timeout', file_path)
++run_cmd = run_command('grep', 'ndo_tx_timeout', file_path, check: false)
+ 
+ if run_cmd.stdout().contains('txqueue') == true
+    kmod_cflags = '-DHAVE_ARG_TX_QUEUE'
+@@ -29,10 +29,10 @@ custom_target('rte_kni',
+             'M=' + meson.current_build_dir(),
+             'src=' + meson.current_source_dir(),
+             ' '.join(['MODULE_CFLAGS=', kmod_cflags,'-include '])
+-            + meson.source_root() + '/config/rte_config.h' +
+-            ' -I' + meson.source_root() + '/lib/eal/include' +
+-            ' -I' + meson.source_root() + '/lib/kni' +
+-            ' -I' + meson.build_root() +
++            + dpdk_source_root + '/config/rte_config.h' +
++            ' -I' + dpdk_source_root + '/lib/eal/include' +
++            ' -I' + dpdk_source_root + '/lib/kni' +
++            ' -I' + dpdk_build_root +
+             ' -I' + meson.current_source_dir(),
+             'modules'] + cross_args,
+         depends: kni_mkfile,
+diff --git a/dpdk/kernel/linux/meson.build b/dpdk/kernel/linux/meson.build
+index 0637452e95..d8fb20c1c3 100644
+--- a/dpdk/kernel/linux/meson.build
++++ b/dpdk/kernel/linux/meson.build
+@@ -11,7 +11,7 @@ cross_args = []
+ 
+ if not meson.is_cross_build()
+     # native build
+-    kernel_version = run_command('uname', '-r').stdout().strip()
++    kernel_version = run_command('uname', '-r', check: true).stdout().strip()
+     kernel_install_dir = '/lib/modules/' + kernel_version + '/extra/dpdk'
+     if kernel_build_dir == ''
+         # use default path for native builds
+@@ -24,14 +24,14 @@ if not meson.is_cross_build()
+ 
+     # test running make in kernel directory, using "make kernelversion"
+     make_returncode = run_command('make', '-sC', kernel_build_dir,
+-            'kernelversion').returncode()
++            'kernelversion', check: true).returncode()
+     if make_returncode != 0
+         # backward compatibility:
+         # the headers could still be in the 'build' subdir
+         if not kernel_build_dir.endswith('build') and not kernel_build_dir.endswith('build/')
+             kernel_build_dir = join_paths(kernel_build_dir, 'build')
+             make_returncode = run_command('make', '-sC', kernel_build_dir,
+-                    'kernelversion').returncode()
++                    'kernelversion', check: true).returncode()
+         endif
+     endif
+ 
+@@ -54,7 +54,8 @@ if kernel_build_dir == ''
+ endif
+ cross_compiler = find_program('c').path()
+ if cross_compiler.endswith('gcc')
+-    cross_prefix = run_command([py3, '-c', 'print("' + cross_compiler + '"[:-3])']).stdout().strip()
++    cross_prefix = run_command([py3, '-c', 'print("' + cross_compiler + '"[:-3])'],
++            check: true).stdout().strip()
+ elif cross_compiler.endswith('clang')
+     cross_prefix = ''
+     found_target = false
+diff --git a/dpdk/lib/acl/acl_bld.c b/dpdk/lib/acl/acl_bld.c
+index f316d3e875..2816632803 100644
+--- a/dpdk/lib/acl/acl_bld.c
++++ b/dpdk/lib/acl/acl_bld.c
+@@ -12,6 +12,9 @@
+ /* number of pointers per alloc */
+ #define ACL_PTR_ALLOC	32
+ 
++/* account for situation when all fields are 8B long */
++#define ACL_MAX_INDEXES	(2 * RTE_ACL_MAX_FIELDS)
++
+ /* macros for dividing rule sets heuristics */
+ #define NODE_MAX	0x4000
+ #define NODE_MIN	0x800
+@@ -80,7 +83,7 @@ struct acl_build_context {
+ 	struct tb_mem_pool        pool;
+ 	struct rte_acl_trie       tries[RTE_ACL_MAX_TRIES];
+ 	struct rte_acl_bld_trie   bld_tries[RTE_ACL_MAX_TRIES];
+-	uint32_t            data_indexes[RTE_ACL_MAX_TRIES][RTE_ACL_MAX_FIELDS];
++	uint32_t            data_indexes[RTE_ACL_MAX_TRIES][ACL_MAX_INDEXES];
+ 
+ 	/* memory free lists for nodes and blocks used for node ptrs */
+ 	struct acl_mem_block      blocks[MEM_BLOCK_NUM];
+@@ -885,7 +888,7 @@ acl_gen_range_trie(struct acl_build_context *context,
+ 		return root;
+ 	}
+ 
+-	/* gather information about divirgent paths */
++	/* gather information about divergent paths */
+ 	lo_00 = 0;
+ 	hi_ff = UINT8_MAX;
+ 	for (k = n - 1; k >= 0; k--) {
+@@ -988,7 +991,7 @@ build_trie(struct acl_build_context *context, struct rte_acl_build_rule *head,
+ 				 */
+ 				uint64_t mask;
+ 				mask = RTE_ACL_MASKLEN_TO_BITMASK(
+-					fld->mask_range.u32,
++					fld->mask_range.u64,
+ 					rule->config->defs[n].size);
+ 
+ 				/* gen a mini-trie for this field */
+@@ -1301,6 +1304,9 @@ acl_build_index(const struct rte_acl_config *config, uint32_t *data_index)
+ 		if (last_header != config->defs[n].input_index) {
+ 			last_header = config->defs[n].input_index;
+ 			data_index[m++] = config->defs[n].offset;
++			if (config->defs[n].size > sizeof(uint32_t))
++				data_index[m++] = config->defs[n].offset +
++					sizeof(uint32_t);
+ 		}
+ 	}
+ 
+@@ -1487,7 +1493,7 @@ acl_set_data_indexes(struct rte_acl_ctx *ctx)
+ 		memcpy(ctx->data_indexes + ofs, ctx->trie[i].data_index,
+ 			n * sizeof(ctx->data_indexes[0]));
+ 		ctx->trie[i].data_index = ctx->data_indexes + ofs;
+-		ofs += RTE_ACL_MAX_FIELDS;
++		ofs += ACL_MAX_INDEXES;
+ 	}
+ }
+ 
+@@ -1643,7 +1649,7 @@ rte_acl_build(struct rte_acl_ctx *ctx, const struct rte_acl_config *cfg)
+ 			/* allocate and fill run-time  structures. */
+ 			rc = rte_acl_gen(ctx, bcx.tries, bcx.bld_tries,
+ 				bcx.num_tries, bcx.cfg.num_categories,
+-				RTE_ACL_MAX_FIELDS * RTE_DIM(bcx.tries) *
++				ACL_MAX_INDEXES * RTE_DIM(bcx.tries) *
+ 				sizeof(ctx->data_indexes[0]), max_size);
+ 			if (rc == 0) {
+ 				/* set data indexes. */
+diff --git a/dpdk/lib/acl/acl_run_altivec.h b/dpdk/lib/acl/acl_run_altivec.h
+index 2de6f27b1f..4dfe7a14b4 100644
+--- a/dpdk/lib/acl/acl_run_altivec.h
++++ b/dpdk/lib/acl/acl_run_altivec.h
+@@ -41,7 +41,7 @@ resolve_priority_altivec(uint64_t transition, int n,
+ {
+ 	uint32_t x;
+ 	xmm_t results, priority, results1, priority1;
+-	vector bool int selector;
++	__vector bool int selector;
+ 	xmm_t *saved_results, *saved_priority;
+ 
+ 	for (x = 0; x < categories; x += RTE_ACL_RESULTS_MULTIPLIER) {
+@@ -110,8 +110,8 @@ transition4(xmm_t next_input, const uint64_t *trans,
+ 	xmm_t in, node_type, r, t;
+ 	xmm_t dfa_ofs, quad_ofs;
+ 	xmm_t *index_mask, *tp;
+-	vector bool int dfa_msk;
+-	vector signed char zeroes = {};
++	__vector bool int dfa_msk;
++	__vector signed char zeroes = {};
+ 	union {
+ 		uint64_t d64[2];
+ 		uint32_t d32[4];
+@@ -127,7 +127,7 @@ transition4(xmm_t next_input, const uint64_t *trans,
+ 	index_mask = (xmm_t *)&altivec_acl_const.xmm_index_mask.u32;
+ 	t = vec_xor(*index_mask, *index_mask);
+ 	in = vec_perm(next_input, (xmm_t){},
+-		*(vector unsigned char *)&altivec_acl_const.xmm_shuffle_input);
++		*(__vector unsigned char *)&altivec_acl_const.xmm_shuffle_input);
+ 
+ 	/* Calc node type and node addr */
+ 	node_type = vec_and(vec_nor(*index_mask, *index_mask), tr_lo);
+@@ -137,30 +137,30 @@ transition4(xmm_t next_input, const uint64_t *trans,
+ 	dfa_msk = vec_cmpeq(node_type, t);
+ 
+ 	/* DFA calculations. */
+-	r = vec_sr(in, (vector unsigned int){30, 30, 30, 30});
++	r = vec_sr(in, (__vector unsigned int){30, 30, 30, 30});
+ 	tp = (xmm_t *)&altivec_acl_const.range_base.u32;
+ 	r = vec_add(r, *tp);
+-	t = vec_sr(in, (vector unsigned int){24, 24, 24, 24});
++	t = vec_sr(in, (__vector unsigned int){24, 24, 24, 24});
+ 	r = vec_perm(tr_hi, (xmm_t){(uint16_t)0 << 16},
+-		(vector unsigned char)r);
++		(__vector unsigned char)r);
+ 
+ 	dfa_ofs = vec_sub(t, r);
+ 
+-	/* QUAD/SINGLE caluclations. */
+-	t = (xmm_t)vec_cmpgt((vector signed char)in, (vector signed char)tr_hi);
++	/* QUAD/SINGLE calculations. */
++	t = (xmm_t)vec_cmpgt((__vector signed char)in, (__vector signed char)tr_hi);
+ 	t = (xmm_t)vec_sel(
+ 		vec_sel(
+-			(vector signed char)vec_sub(
+-				zeroes, (vector signed char)t),
+-			(vector signed char)t,
+-			vec_cmpgt((vector signed char)t, zeroes)),
++			(__vector signed char)vec_sub(
++				zeroes, (__vector signed char)t),
++			(__vector signed char)t,
++			vec_cmpgt((__vector signed char)t, zeroes)),
+ 		zeroes,
+-		vec_cmpeq((vector signed char)t, zeroes));
++		vec_cmpeq((__vector signed char)t, zeroes));
+ 
+-	t = (xmm_t)vec_msum((vector signed char)t,
+-		(vector unsigned char)t, (xmm_t){});
+-	quad_ofs = (xmm_t)vec_msum((vector signed short)t,
+-		*(vector signed short *)&altivec_acl_const.xmm_ones_16.u16,
++	t = (xmm_t)vec_msum((__vector signed char)t,
++		(__vector unsigned char)t, (xmm_t){});
++	quad_ofs = (xmm_t)vec_msum((__vector signed short)t,
++		*(__vector signed short *)&altivec_acl_const.xmm_ones_16.u16,
+ 		(xmm_t){});
+ 
+ 	/* blend DFA and QUAD/SINGLE. */
+@@ -177,7 +177,7 @@ transition4(xmm_t next_input, const uint64_t *trans,
+ 	*indices2 = (xmm_t){v.d32[0], v.d32[1], v.d32[2], v.d32[3]};
+ 
+ 	return vec_sr(next_input,
+-		(vector unsigned int){CHAR_BIT, CHAR_BIT, CHAR_BIT, CHAR_BIT});
++		(__vector unsigned int){CHAR_BIT, CHAR_BIT, CHAR_BIT, CHAR_BIT});
+ }
+ 
+ /*
+diff --git a/dpdk/lib/acl/acl_run_avx512.c b/dpdk/lib/acl/acl_run_avx512.c
+index 78fbe34f7c..3b8795561b 100644
+--- a/dpdk/lib/acl/acl_run_avx512.c
++++ b/dpdk/lib/acl/acl_run_avx512.c
+@@ -64,7 +64,7 @@ update_flow_mask(const struct acl_flow_avx512 *flow, uint32_t *fmsk,
+ }
+ 
+ /*
+- * Resolve matches for multiple categories (LE 8, use 128b instuctions/regs)
++ * Resolve matches for multiple categories (LE 8, use 128b instructions/regs)
+  */
+ static inline void
+ resolve_mcle8_avx512x1(uint32_t result[],
+diff --git a/dpdk/lib/acl/acl_run_avx512x16.h b/dpdk/lib/acl/acl_run_avx512x16.h
+index 48bb6fed85..f87293eeb7 100644
+--- a/dpdk/lib/acl/acl_run_avx512x16.h
++++ b/dpdk/lib/acl/acl_run_avx512x16.h
+@@ -10,7 +10,7 @@
+  */
+ 
+ /*
+- * This implementation uses 512-bit registers(zmm) and instrincts.
++ * This implementation uses 512-bit registers(zmm) and intrinsics.
+  * So our main SIMD type is 512-bit width and each such variable can
+  * process sizeof(__m512i) / sizeof(uint32_t) == 16 entries in parallel.
+  */
+@@ -25,20 +25,20 @@
+ #define _F_(x)		x##_avx512x16
+ 
+ /*
+- * Same instrincts have different syntaxis (depending on the bit-width),
++ * Same intrinsics have different syntaxes (depending on the bit-width),
+  * so to overcome that few macros need to be defined.
+  */
+ 
+-/* Naming convention for generic epi(packed integers) type instrincts. */
++/* Naming convention for generic epi(packed integers) type intrinsics. */
+ #define _M_I_(x)	_mm512_##x
+ 
+-/* Naming convention for si(whole simd integer) type instrincts. */
++/* Naming convention for si(whole simd integer) type intrinsics. */
+ #define _M_SI_(x)	_mm512_##x##_si512
+ 
+-/* Naming convention for masked gather type instrincts. */
++/* Naming convention for masked gather type intrinsics. */
+ #define _M_MGI_(x)	_mm512_##x
+ 
+-/* Naming convention for gather type instrincts. */
++/* Naming convention for gather type intrinsics. */
+ #define _M_GI_(name, idx, base, scale)	_mm512_##name(idx, base, scale)
+ 
+ /* num/mask of transitions per SIMD regs */
+@@ -239,7 +239,7 @@ _F_(gather_bytes)(__m512i zero, const __m512i p[2], const uint32_t m[2],
+ }
+ 
+ /*
+- * Resolve matches for multiple categories (GT 8, use 512b instuctions/regs)
++ * Resolve matches for multiple categories (GT 8, use 512b instructions/regs)
+  */
+ static inline void
+ resolve_mcgt8_avx512x1(uint32_t result[],
+diff --git a/dpdk/lib/acl/acl_run_avx512x8.h b/dpdk/lib/acl/acl_run_avx512x8.h
+index 61ac9d1b47..5da2bbfdeb 100644
+--- a/dpdk/lib/acl/acl_run_avx512x8.h
++++ b/dpdk/lib/acl/acl_run_avx512x8.h
+@@ -10,7 +10,7 @@
+  */
+ 
+ /*
+- * This implementation uses 256-bit registers(ymm) and instrincts.
++ * This implementation uses 256-bit registers(ymm) and intrinsics.
+  * So our main SIMD type is 256-bit width and each such variable can
+  * process sizeof(__m256i) / sizeof(uint32_t) == 8 entries in parallel.
+  */
+@@ -25,20 +25,20 @@
+ #define _F_(x)		x##_avx512x8
+ 
+ /*
+- * Same instrincts have different syntaxis (depending on the bit-width),
++ * Same intrinsics have different syntaxes (depending on the bit-width),
+  * so to overcome that few macros need to be defined.
+  */
+ 
+-/* Naming convention for generic epi(packed integers) type instrincts. */
++/* Naming convention for generic epi(packed integers) type intrinsics. */
+ #define _M_I_(x)	_mm256_##x
+ 
+-/* Naming convention for si(whole simd integer) type instrincts. */
++/* Naming convention for si(whole simd integer) type intrinsics. */
+ #define _M_SI_(x)	_mm256_##x##_si256
+ 
+-/* Naming convention for masked gather type instrincts. */
++/* Naming convention for masked gather type intrinsics. */
+ #define _M_MGI_(x)	_mm256_m##x
+ 
+-/* Naming convention for gather type instrincts. */
++/* Naming convention for gather type intrinsics. */
+ #define _M_GI_(name, idx, base, scale)	_mm256_##name(base, idx, scale)
+ 
+ /* num/mask of transitions per SIMD regs */
+diff --git a/dpdk/lib/acl/rte_acl_osdep.h b/dpdk/lib/acl/rte_acl_osdep.h
+index b2c262dee7..3c1dc402ca 100644
+--- a/dpdk/lib/acl/rte_acl_osdep.h
++++ b/dpdk/lib/acl/rte_acl_osdep.h
+@@ -5,6 +5,10 @@
+ #ifndef _RTE_ACL_OSDEP_H_
+ #define _RTE_ACL_OSDEP_H_
+ 
++#ifdef __cplusplus
++extern "C" {
++#endif
++
+ /**
+  * @file
+  *
+@@ -45,4 +49,8 @@
+ #include <rte_cpuflags.h>
+ #include <rte_debug.h>
+ 
++#ifdef __cplusplus
++}
++#endif
++
+ #endif /* _RTE_ACL_OSDEP_H_ */
+diff --git a/dpdk/lib/bpf/bpf_convert.c b/dpdk/lib/bpf/bpf_convert.c
+index db84add7dc..9563274c9c 100644
+--- a/dpdk/lib/bpf/bpf_convert.c
++++ b/dpdk/lib/bpf/bpf_convert.c
+@@ -412,7 +412,7 @@ static int bpf_convert_filter(const struct bpf_insn *prog, size_t len,
+ 			BPF_EMIT_JMP;
+ 			break;
+ 
+-			/* ldxb 4 * ([14] & 0xf) is remaped into 6 insns. */
++			/* ldxb 4 * ([14] & 0xf) is remapped into 6 insns. */
+ 		case BPF_LDX | BPF_MSH | BPF_B:
+ 			/* tmp = A */
+ 			*insn++ = BPF_MOV64_REG(BPF_REG_TMP, BPF_REG_A);
+@@ -428,7 +428,7 @@ static int bpf_convert_filter(const struct bpf_insn *prog, size_t len,
+ 			*insn = BPF_MOV64_REG(BPF_REG_A, BPF_REG_TMP);
+ 			break;
+ 
+-			/* RET_K is remaped into 2 insns. RET_A case doesn't need an
++			/* RET_K is remapped into 2 insns. RET_A case doesn't need an
+ 			 * extra mov as EBPF_REG_0 is already mapped into BPF_REG_A.
+ 			 */
+ 		case BPF_RET | BPF_A:
+diff --git a/dpdk/lib/bpf/bpf_def.h b/dpdk/lib/bpf/bpf_def.h
+index fa9125307e..f08cd9106b 100644
+--- a/dpdk/lib/bpf/bpf_def.h
++++ b/dpdk/lib/bpf/bpf_def.h
+@@ -7,6 +7,10 @@
+ #ifndef _RTE_BPF_DEF_H_
+ #define _RTE_BPF_DEF_H_
+ 
++#ifdef __cplusplus
++extern "C" {
++#endif
++
+ /**
+  * @file
+  *
+@@ -140,4 +144,8 @@ struct ebpf_insn {
+  */
+ #define	EBPF_FUNC_MAX_ARGS	(EBPF_REG_6 - EBPF_REG_1)
+ 
++#ifdef __cplusplus
++}
++#endif
++
+ #endif /* RTE_BPF_DEF_H_ */
+diff --git a/dpdk/lib/bpf/bpf_impl.h b/dpdk/lib/bpf/bpf_impl.h
+index 26d165ad5c..b4d8e87c6d 100644
+--- a/dpdk/lib/bpf/bpf_impl.h
++++ b/dpdk/lib/bpf/bpf_impl.h
+@@ -2,8 +2,8 @@
+  * Copyright(c) 2018 Intel Corporation
+  */
+ 
+-#ifndef _BPF_H_
+-#define _BPF_H_
++#ifndef BPF_IMPL_H
++#define BPF_IMPL_H
+ 
+ #include <rte_bpf.h>
+ #include <sys/mman.h>
+@@ -43,4 +43,4 @@ bpf_size(uint32_t bpf_op_sz)
+ 	return 0;
+ }
+ 
+-#endif /* _BPF_H_ */
++#endif /* BPF_IMPL_H */
+diff --git a/dpdk/lib/compressdev/rte_compressdev_internal.h b/dpdk/lib/compressdev/rte_compressdev_internal.h
+index 22ceac66e2..b3b193e3ee 100644
+--- a/dpdk/lib/compressdev/rte_compressdev_internal.h
++++ b/dpdk/lib/compressdev/rte_compressdev_internal.h
+@@ -5,6 +5,10 @@
+ #ifndef _RTE_COMPRESSDEV_INTERNAL_H_
+ #define _RTE_COMPRESSDEV_INTERNAL_H_
+ 
++#ifdef __cplusplus
++extern "C" {
++#endif
++
+ /* rte_compressdev_internal.h
+  * This file holds Compressdev private data structures.
+  */
+@@ -18,7 +22,7 @@
+ /* Logging Macros */
+ extern int compressdev_logtype;
+ #define COMPRESSDEV_LOG(level, fmt, args...) \
+-	rte_log(RTE_LOG_ ## level, compressdev_logtype, "%s(): "fmt "\n", \
++	rte_log(RTE_LOG_ ## level, compressdev_logtype, "%s(): " fmt "\n", \
+ 			__func__, ##args)
+ 
+ /**
+@@ -94,7 +98,7 @@ struct rte_compressdev {
+ struct rte_compressdev_data {
+ 	uint8_t dev_id;
+ 	/**< Compress device identifier */
+-	uint8_t socket_id;
++	int socket_id;
+ 	/**< Socket identifier where memory is allocated */
+ 	char name[RTE_COMPRESSDEV_NAME_MAX_LEN];
+ 	/**< Unique identifier name */
+@@ -111,4 +115,9 @@ struct rte_compressdev_data {
+ 	void *dev_private;
+ 	/**< PMD-specific private data */
+ } __rte_cache_aligned;
++
++#ifdef __cplusplus
++}
++#endif
++
+ #endif
+diff --git a/dpdk/lib/cryptodev/cryptodev_pmd.h b/dpdk/lib/cryptodev/cryptodev_pmd.h
+index b9146f652c..56e659b474 100644
+--- a/dpdk/lib/cryptodev/cryptodev_pmd.h
++++ b/dpdk/lib/cryptodev/cryptodev_pmd.h
+@@ -5,6 +5,10 @@
+ #ifndef _CRYPTODEV_PMD_H_
+ #define _CRYPTODEV_PMD_H_
+ 
++#ifdef __cplusplus
++extern "C" {
++#endif
++
+ /** @file
+  * RTE Crypto PMD APIs
+  *
+@@ -640,4 +644,8 @@ set_asym_session_private_data(struct rte_cryptodev_asym_session *sess,
+ 	sess->sess_private_data[driver_id] = private_data;
+ }
+ 
++#ifdef __cplusplus
++}
++#endif
++
+ #endif /* _CRYPTODEV_PMD_H_ */
+diff --git a/dpdk/lib/cryptodev/rte_crypto.h b/dpdk/lib/cryptodev/rte_crypto.h
+index a864f5036f..aeb3bf6e38 100644
+--- a/dpdk/lib/cryptodev/rte_crypto.h
++++ b/dpdk/lib/cryptodev/rte_crypto.h
+@@ -123,15 +123,24 @@ struct rte_crypto_op {
+ 	rte_iova_t phys_addr;
+ 	/**< physical address of crypto operation */
+ 
++/* empty structures do not have zero size in C++ leading to compilation errors
++ * with clang about structure/union having different sizes in C and C++.
++ * While things are clearer with an explicit union, since each field is
++ * zero-sized it's not actually needed, so omit it for C++
++ */
++#ifndef __cplusplus
+ 	__extension__
+ 	union {
++#endif
+ 		struct rte_crypto_sym_op sym[0];
+ 		/**< Symmetric operation parameters */
+ 
+ 		struct rte_crypto_asym_op asym[0];
+ 		/**< Asymmetric operation parameters */
+ 
++#ifndef __cplusplus
+ 	}; /**< operation specific parameters */
++#endif
+ };
+ 
+ /**
+diff --git a/dpdk/lib/cryptodev/rte_crypto_asym.h b/dpdk/lib/cryptodev/rte_crypto_asym.h
+index 9c866f553f..9c5bb9233a 100644
+--- a/dpdk/lib/cryptodev/rte_crypto_asym.h
++++ b/dpdk/lib/cryptodev/rte_crypto_asym.h
+@@ -146,7 +146,7 @@ enum rte_crypto_rsa_padding_type {
+ enum rte_crypto_rsa_priv_key_type {
+ 	RTE_RSA_KEY_TYPE_EXP,
+ 	/**< RSA private key is an exponent */
+-	RTE_RSA_KET_TYPE_QT,
++	RTE_RSA_KEY_TYPE_QT,
+ 	/**< RSA private key is in quintuple format
+ 	 * See rte_crypto_rsa_priv_key_qt
+ 	 */
+diff --git a/dpdk/lib/distributor/rte_distributor_single.c b/dpdk/lib/distributor/rte_distributor_single.c
+index b653620688..60ca86152f 100644
+--- a/dpdk/lib/distributor/rte_distributor_single.c
++++ b/dpdk/lib/distributor/rte_distributor_single.c
+@@ -247,8 +247,7 @@ rte_distributor_process_single(struct rte_distributor_single *d,
+ 			 * worker given by the bit-position
+ 			 */
+ 			for (i = 0; i < d->num_workers; i++)
+-				match |= (!(d->in_flight_tags[i] ^ new_tag)
+-					<< i);
++				match |= ((uint64_t)!(d->in_flight_tags[i] ^ new_tag) << i);
+ 
+ 			/* Only turned-on bits are considered as match */
+ 			match &= d->in_flight_bitmask;
+diff --git a/dpdk/lib/dmadev/rte_dmadev.h b/dpdk/lib/dmadev/rte_dmadev.h
+index 9942c6ec21..4abe79c536 100644
+--- a/dpdk/lib/dmadev/rte_dmadev.h
++++ b/dpdk/lib/dmadev/rte_dmadev.h
+@@ -533,7 +533,7 @@ struct rte_dma_port_param {
+ 		 * @note If some fields can not be supported by the
+ 		 * hardware/driver, then the driver ignores those fields.
+ 		 * Please check driver-specific documentation for limitations
+-		 * and capablites.
++		 * and capabilities.
+ 		 */
+ 		__extension__
+ 		struct {
+@@ -731,7 +731,7 @@ enum rte_dma_status_code {
+ 	/** The operation completed successfully. */
+ 	RTE_DMA_STATUS_SUCCESSFUL,
+ 	/** The operation failed to complete due abort by user.
+-	 * This is mainly used when processing dev_stop, user could modidy the
++	 * This is mainly used when processing dev_stop, user could modify the
+ 	 * descriptors (e.g. change one bit to tell hardware abort this job),
+ 	 * it allows outstanding requests to be complete as much as possible,
+ 	 * so reduce the time to stop the device.
+diff --git a/dpdk/lib/dmadev/rte_dmadev_pmd.h b/dpdk/lib/dmadev/rte_dmadev_pmd.h
+index 5316ad5b5f..82ab7a8cc7 100644
+--- a/dpdk/lib/dmadev/rte_dmadev_pmd.h
++++ b/dpdk/lib/dmadev/rte_dmadev_pmd.h
+@@ -14,6 +14,8 @@
+  * by any application.
+  */
+ 
++#include <rte_dev.h>
++
+ #include "rte_dmadev.h"
+ 
+ #ifdef __cplusplus
+diff --git a/dpdk/lib/eal/arm/include/rte_cycles_32.h b/dpdk/lib/eal/arm/include/rte_cycles_32.h
+index f79718ce8c..cec4d69e7a 100644
+--- a/dpdk/lib/eal/arm/include/rte_cycles_32.h
++++ b/dpdk/lib/eal/arm/include/rte_cycles_32.h
+@@ -30,7 +30,7 @@ extern "C" {
+ 
+ /**
+  * This call is easily portable to any architecture, however,
+- * it may require a system call and inprecise for some tasks.
++ * it may require a system call and imprecise for some tasks.
+  */
+ static inline uint64_t
+ __rte_rdtsc_syscall(void)
+diff --git a/dpdk/lib/eal/common/eal_common_dev.c b/dpdk/lib/eal/common/eal_common_dev.c
+index e1e9976d8d..07f285f862 100644
+--- a/dpdk/lib/eal/common/eal_common_dev.c
++++ b/dpdk/lib/eal/common/eal_common_dev.c
+@@ -185,8 +185,10 @@ local_dev_probe(const char *devargs, struct rte_device **new_dev)
+ 	return ret;
+ 
+ err_devarg:
+-	if (rte_devargs_remove(da) != 0)
++	if (rte_devargs_remove(da) != 0) {
+ 		rte_devargs_reset(da);
++		free(da);
++	}
+ 	return ret;
+ }
+ 
+diff --git a/dpdk/lib/eal/common/eal_common_devargs.c b/dpdk/lib/eal/common/eal_common_devargs.c
+index 8c7650cf6c..184fe676aa 100644
+--- a/dpdk/lib/eal/common/eal_common_devargs.c
++++ b/dpdk/lib/eal/common/eal_common_devargs.c
+@@ -191,6 +191,7 @@ rte_devargs_parse(struct rte_devargs *da, const char *dev)
+ 
+ 	if (da == NULL)
+ 		return -EINVAL;
++	memset(da, 0, sizeof(*da));
+ 
+ 	/* First parse according global device syntax. */
+ 	if (rte_devargs_layers_parse(da, dev) == 0) {
+diff --git a/dpdk/lib/eal/common/eal_common_dynmem.c b/dpdk/lib/eal/common/eal_common_dynmem.c
+index 7c5437ddfa..c1e1889f5c 100644
+--- a/dpdk/lib/eal/common/eal_common_dynmem.c
++++ b/dpdk/lib/eal/common/eal_common_dynmem.c
+@@ -304,6 +304,10 @@ eal_dynmem_hugepage_init(void)
+ 				needed = num_pages - num_pages_alloc;
+ 
+ 				pages = malloc(sizeof(*pages) * needed);
++				if (pages == NULL) {
++					RTE_LOG(ERR, EAL, "Failed to malloc pages\n");
++					return -1;
++				}
+ 
+ 				/* do not request exact number of pages */
+ 				cur_pages = eal_memalloc_alloc_seg_bulk(pages,
+diff --git a/dpdk/lib/eal/common/eal_common_proc.c b/dpdk/lib/eal/common/eal_common_proc.c
+index ebd0f6673b..b33d58ea0a 100644
+--- a/dpdk/lib/eal/common/eal_common_proc.c
++++ b/dpdk/lib/eal/common/eal_common_proc.c
+@@ -282,8 +282,17 @@ read_msg(struct mp_msg_internal *m, struct sockaddr_un *s)
+ 	msgh.msg_control = control;
+ 	msgh.msg_controllen = sizeof(control);
+ 
++retry:
+ 	msglen = recvmsg(mp_fd, &msgh, 0);
++
++	/* zero length message means socket was closed */
++	if (msglen == 0)
++		return 0;
++
+ 	if (msglen < 0) {
++		if (errno == EINTR)
++			goto retry;
++
+ 		RTE_LOG(ERR, EAL, "recvmsg failed, %s\n", strerror(errno));
+ 		return -1;
+ 	}
+@@ -311,7 +320,7 @@ read_msg(struct mp_msg_internal *m, struct sockaddr_un *s)
+ 		RTE_LOG(ERR, EAL, "invalid received data length\n");
+ 		return -1;
+ 	}
+-	return 0;
++	return msglen;
+ }
+ 
+ static void
+@@ -385,8 +394,13 @@ mp_handle(void *arg __rte_unused)
+ 	struct sockaddr_un sa;
+ 
+ 	while (mp_fd >= 0) {
+-		if (read_msg(&msg, &sa) == 0)
+-			process_msg(&msg, &sa);
++		int ret;
++
++		ret = read_msg(&msg, &sa);
++		if (ret <= 0)
++			break;
++
++		process_msg(&msg, &sa);
+ 	}
+ 
+ 	return NULL;
+diff --git a/dpdk/lib/eal/common/eal_common_trace_utils.c b/dpdk/lib/eal/common/eal_common_trace_utils.c
+index 64f58fb66a..2b55dbec65 100644
+--- a/dpdk/lib/eal/common/eal_common_trace_utils.c
++++ b/dpdk/lib/eal/common/eal_common_trace_utils.c
+@@ -104,13 +104,15 @@ trace_session_name_generate(char *trace_dir)
+ 	rc = rte_strscpy(trace_dir, eal_get_hugefile_prefix(),
+ 			TRACE_PREFIX_LEN);
+ 	if (rc == -E2BIG)
+-		rc = TRACE_PREFIX_LEN;
++		rc = TRACE_PREFIX_LEN - 1;
+ 	trace_dir[rc++] = '-';
+ 
+ 	rc = strftime(trace_dir + rc, TRACE_DIR_STR_LEN - rc,
+ 			"%Y-%m-%d-%p-%I-%M-%S", tm_result);
+-	if (rc == 0)
++	if (rc == 0) {
++		errno = ENOSPC;
+ 		goto fail;
++	}
+ 
+ 	return rc;
+ fail:
+diff --git a/dpdk/lib/eal/common/malloc_elem.h b/dpdk/lib/eal/common/malloc_elem.h
+index 15d8ba7af2..503fe5c470 100644
+--- a/dpdk/lib/eal/common/malloc_elem.h
++++ b/dpdk/lib/eal/common/malloc_elem.h
+@@ -7,6 +7,8 @@
+ 
+ #include <stdbool.h>
+ 
++#include <rte_common.h>
++
+ #define MIN_DATA_SIZE (RTE_CACHE_LINE_SIZE)
+ 
+ /* dummy definition of struct so we can use pointers to it in malloc_elem struct */
+@@ -125,12 +127,6 @@ malloc_elem_cookies_ok(const struct malloc_elem *elem)
+ #define ASAN_MEM_TO_SHADOW(mem) \
+ 	RTE_PTR_ADD(ASAN_MEM_SHIFT(mem), ASAN_SHADOW_OFFSET)
+ 
+-#if defined(__clang__)
+-#define __rte_no_asan __attribute__((no_sanitize("address", "hwaddress")))
+-#else
+-#define __rte_no_asan __attribute__((no_sanitize_address))
+-#endif
+-
+ __rte_no_asan
+ static inline void
+ asan_set_shadow(void *addr, char val)
+@@ -270,7 +266,9 @@ old_malloc_size(struct malloc_elem *elem)
+ 
+ #else /* !RTE_MALLOC_ASAN */
+ 
+-#define __rte_no_asan
++static inline void
++asan_set_zone(void *ptr __rte_unused, size_t len __rte_unused,
++		uint32_t val __rte_unused) { }
+ 
+ static inline void
+ asan_set_freezone(void *ptr __rte_unused, size_t size __rte_unused) { }
+diff --git a/dpdk/lib/eal/common/malloc_heap.c b/dpdk/lib/eal/common/malloc_heap.c
+index 55aad2711b..55063ccf81 100644
+--- a/dpdk/lib/eal/common/malloc_heap.c
++++ b/dpdk/lib/eal/common/malloc_heap.c
+@@ -402,7 +402,7 @@ try_expand_heap_primary(struct malloc_heap *heap, uint64_t pg_sz,
+ 	bool callback_triggered = false;
+ 
+ 	alloc_sz = RTE_ALIGN_CEIL(align + elt_size +
+-			MALLOC_ELEM_TRAILER_LEN, pg_sz);
++			MALLOC_ELEM_OVERHEAD, pg_sz);
+ 	n_segs = alloc_sz / pg_sz;
+ 
+ 	/* we can't know in advance how many pages we'll need, so we malloc */
+@@ -860,6 +860,7 @@ malloc_heap_free(struct malloc_elem *elem)
+ 	struct rte_memseg_list *msl;
+ 	unsigned int i, n_segs, before_space, after_space;
+ 	int ret;
++	bool unmapped = false;
+ 	const struct internal_config *internal_conf =
+ 		eal_get_internal_configuration();
+ 
+@@ -1026,6 +1027,9 @@ malloc_heap_free(struct malloc_elem *elem)
+ 		request_to_primary(&req);
+ 	}
+ 
++	/* we didn't exit early, meaning we have unmapped some pages */
++	unmapped = true;
++
+ 	RTE_LOG(DEBUG, EAL, "Heap on socket %d was shrunk by %zdMB\n",
+ 		msl->socket_id, aligned_len >> 20ULL);
+ 
+@@ -1033,6 +1037,37 @@ malloc_heap_free(struct malloc_elem *elem)
+ free_unlock:
+ 	asan_set_freezone(asan_ptr, asan_data_len);
+ 
++	/* if we unmapped some memory, we need to do additional work for ASan */
++	if (unmapped) {
++		void *asan_end = RTE_PTR_ADD(asan_ptr, asan_data_len);
++		void *aligned_end = RTE_PTR_ADD(aligned_start, aligned_len);
++		void *aligned_trailer = RTE_PTR_SUB(aligned_start,
++				MALLOC_ELEM_TRAILER_LEN);
++
++		/*
++		 * There was a memory area that was unmapped. This memory area
++		 * will have to be marked as available for ASan, because we will
++		 * want to use it next time it gets mapped again. The OS memory
++		 * protection should trigger a fault on access to these areas
++		 * anyway, so we are not giving up any protection.
++		 */
++		asan_set_zone(aligned_start, aligned_len, 0x00);
++
++		/*
++		 * ...however, when we unmap pages, we create new free elements
++		 * which might have been marked as "freed" with an earlier
++		 * `asan_set_freezone` call. So, if there is an area past the
++		 * unmapped space that was marked as freezone for ASan, we need
++		 * to mark the malloc header as available.
++		 */
++		if (asan_end > aligned_end)
++			asan_set_zone(aligned_end, MALLOC_ELEM_HEADER_LEN, 0x00);
++
++		/* if there's space before unmapped memory, mark as available */
++		if (asan_ptr < aligned_start)
++			asan_set_zone(aligned_trailer, MALLOC_ELEM_TRAILER_LEN, 0x00);
++	}
++
+ 	rte_spinlock_unlock(&(heap->lock));
+ 	return ret;
+ }
+diff --git a/dpdk/lib/eal/common/malloc_mp.c b/dpdk/lib/eal/common/malloc_mp.c
+index 2e597a17a2..774bd1132f 100644
+--- a/dpdk/lib/eal/common/malloc_mp.c
++++ b/dpdk/lib/eal/common/malloc_mp.c
+@@ -251,7 +251,7 @@ handle_alloc_request(const struct malloc_mp_req *m,
+ 	}
+ 
+ 	alloc_sz = RTE_ALIGN_CEIL(ar->align + ar->elt_size +
+-			MALLOC_ELEM_TRAILER_LEN, ar->page_sz);
++			MALLOC_ELEM_OVERHEAD, ar->page_sz);
+ 	n_segs = alloc_sz / ar->page_sz;
+ 
+ 	/* we can't know in advance how many pages we'll need, so we malloc */
+diff --git a/dpdk/lib/eal/common/rte_service.c b/dpdk/lib/eal/common/rte_service.c
+index bd8fb72e78..e76c2baffc 100644
+--- a/dpdk/lib/eal/common/rte_service.c
++++ b/dpdk/lib/eal/common/rte_service.c
+@@ -764,7 +764,9 @@ rte_service_lcore_stop(uint32_t lcore)
+ 		return -EALREADY;
+ 
+ 	uint32_t i;
+-	uint64_t service_mask = lcore_states[lcore].service_mask;
++	struct core_state *cs = &lcore_states[lcore];
++	uint64_t service_mask = cs->service_mask;
++
+ 	for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
+ 		int32_t enabled = service_mask & (UINT64_C(1) << i);
+ 		int32_t service_running = rte_service_runstate_get(i);
+@@ -772,6 +774,11 @@ rte_service_lcore_stop(uint32_t lcore)
+ 			__atomic_load_n(&rte_services[i].num_mapped_cores,
+ 				__ATOMIC_RELAXED));
+ 
++		/* Switch off this core for all services, to ensure that future
++		 * calls to may_be_active() know this core is switched off.
++		 */
++		cs->service_active_on_lcore[i] = 0;
++
+ 		/* if the core is mapped, and the service is running, and this
+ 		 * is the only core that is mapped, the service would cease to
+ 		 * run if this core stopped, so fail instead.
+diff --git a/dpdk/lib/eal/freebsd/eal.c b/dpdk/lib/eal/freebsd/eal.c
+index a1cd2462db..414aad3dd3 100644
+--- a/dpdk/lib/eal/freebsd/eal.c
++++ b/dpdk/lib/eal/freebsd/eal.c
+@@ -986,11 +986,11 @@ rte_eal_cleanup(void)
+ 		eal_get_internal_configuration();
+ 	rte_service_finalize();
+ 	rte_mp_channel_cleanup();
++	rte_trace_save();
++	eal_trace_fini();
+ 	/* after this point, any DPDK pointers will become dangling */
+ 	rte_eal_memory_detach();
+ 	rte_eal_alarm_cleanup();
+-	rte_trace_save();
+-	eal_trace_fini();
+ 	eal_cleanup_config(internal_conf);
+ 	return 0;
+ }
+diff --git a/dpdk/lib/eal/freebsd/eal_interrupts.c b/dpdk/lib/eal/freebsd/eal_interrupts.c
+index 10aa91cc09..9f720bdc8f 100644
+--- a/dpdk/lib/eal/freebsd/eal_interrupts.c
++++ b/dpdk/lib/eal/freebsd/eal_interrupts.c
+@@ -234,7 +234,7 @@ rte_intr_callback_unregister_pending(const struct rte_intr_handle *intr_handle,
+ 
+ 	rte_spinlock_lock(&intr_lock);
+ 
+-	/* check if the insterrupt source for the fd is existent */
++	/* check if the interrupt source for the fd is existent */
+ 	TAILQ_FOREACH(src, &intr_sources, next)
+ 		if (rte_intr_fd_get(src->intr_handle) == rte_intr_fd_get(intr_handle))
+ 			break;
+@@ -288,7 +288,7 @@ rte_intr_callback_unregister(const struct rte_intr_handle *intr_handle,
+ 
+ 	rte_spinlock_lock(&intr_lock);
+ 
+-	/* check if the insterrupt source for the fd is existent */
++	/* check if the interrupt source for the fd is existent */
+ 	TAILQ_FOREACH(src, &intr_sources, next)
+ 		if (rte_intr_fd_get(src->intr_handle) == rte_intr_fd_get(intr_handle))
+ 			break;
+diff --git a/dpdk/lib/eal/freebsd/eal_memory.c b/dpdk/lib/eal/freebsd/eal_memory.c
+index 78ac142b82..17ab10e0ca 100644
+--- a/dpdk/lib/eal/freebsd/eal_memory.c
++++ b/dpdk/lib/eal/freebsd/eal_memory.c
+@@ -446,8 +446,8 @@ memseg_secondary_init(void)
+ 
+ 		msl = &mcfg->memsegs[msl_idx];
+ 
+-		/* skip empty memseg lists */
+-		if (msl->memseg_arr.len == 0)
++		/* skip empty and external memseg lists */
++		if (msl->memseg_arr.len == 0 || msl->external)
+ 			continue;
+ 
+ 		if (rte_fbarray_attach(&msl->memseg_arr)) {
+diff --git a/dpdk/lib/eal/freebsd/include/rte_os.h b/dpdk/lib/eal/freebsd/include/rte_os.h
+index 9d8a69008c..003468caff 100644
+--- a/dpdk/lib/eal/freebsd/include/rte_os.h
++++ b/dpdk/lib/eal/freebsd/include/rte_os.h
+@@ -5,6 +5,10 @@
+ #ifndef _RTE_OS_H_
+ #define _RTE_OS_H_
+ 
++#ifdef __cplusplus
++extern "C" {
++#endif
++
+ /**
+  * This header should contain any definition
+  * which is not supported natively or named differently in FreeBSD.
+@@ -24,6 +28,8 @@
+ 
+ typedef cpuset_t rte_cpuset_t;
+ #define RTE_HAS_CPUSET
++
++#ifdef RTE_EAL_FREEBSD_CPUSET_LEGACY
+ #define RTE_CPU_AND(dst, src1, src2) do \
+ { \
+ 	cpuset_t tmp; \
+@@ -57,6 +63,23 @@ typedef cpuset_t rte_cpuset_t;
+ 	CPU_ANDNOT(&tmp, src); \
+ 	CPU_COPY(&tmp, dst); \
+ } while (0)
++#endif /* CPU_NAND */
++
++#else /* RTE_EAL_FREEBSD_CPUSET_LEGACY */
++
++#define RTE_CPU_AND CPU_AND
++#define RTE_CPU_OR CPU_OR
++#define RTE_CPU_FILL CPU_FILL
++#define RTE_CPU_NOT(dst, src) do { \
++	cpu_set_t tmp; \
++	CPU_FILL(&tmp); \
++	CPU_XOR(dst, src, &tmp); \
++} while (0)
++
++#endif /* RTE_EAL_FREEBSD_CPUSET_LEGACY */
++
++#ifdef __cplusplus
++}
+ #endif
+ 
+ #endif /* _RTE_OS_H_ */
+diff --git a/dpdk/lib/eal/freebsd/meson.build b/dpdk/lib/eal/freebsd/meson.build
+index 398ceab71d..fe9097303a 100644
+--- a/dpdk/lib/eal/freebsd/meson.build
++++ b/dpdk/lib/eal/freebsd/meson.build
+@@ -19,3 +19,14 @@ sources += files(
+ )
+ 
+ deps += ['kvargs', 'telemetry']
++
++# test for version of cpuset macros
++cpuset_test_code = '''
++        #include <sys/types.h>
++        #include <sys/cpuset.h>
++        void cpu_test_or(cpuset_t *s) { CPU_OR(s, s, s); }
++'''
++
++if not cc.compiles(cpuset_test_code, name: 'Detect argument count for CPU_OR')
++    dpdk_conf.set('RTE_EAL_FREEBSD_CPUSET_LEGACY', 1)
++endif
+diff --git a/dpdk/lib/eal/include/generic/rte_pflock.h b/dpdk/lib/eal/include/generic/rte_pflock.h
+index b9de063c89..e7bb29b3c5 100644
+--- a/dpdk/lib/eal/include/generic/rte_pflock.h
++++ b/dpdk/lib/eal/include/generic/rte_pflock.h
+@@ -157,7 +157,7 @@ rte_pflock_write_lock(rte_pflock_t *pf)
+ 	uint16_t ticket, w;
+ 
+ 	/* Acquire ownership of write-phase.
+-	 * This is same as rte_tickelock_lock().
++	 * This is same as rte_ticketlock_lock().
+ 	 */
+ 	ticket = __atomic_fetch_add(&pf->wr.in, 1, __ATOMIC_RELAXED);
+ 	rte_wait_until_equal_16(&pf->wr.out, ticket, __ATOMIC_ACQUIRE);
+diff --git a/dpdk/lib/eal/include/generic/rte_ticketlock.h b/dpdk/lib/eal/include/generic/rte_ticketlock.h
+index c1b8808f51..693c67b517 100644
+--- a/dpdk/lib/eal/include/generic/rte_ticketlock.h
++++ b/dpdk/lib/eal/include/generic/rte_ticketlock.h
+@@ -91,13 +91,13 @@ rte_ticketlock_unlock(rte_ticketlock_t *tl)
+ static inline int
+ rte_ticketlock_trylock(rte_ticketlock_t *tl)
+ {
+-	rte_ticketlock_t old, new;
+-	old.tickets = __atomic_load_n(&tl->tickets, __ATOMIC_RELAXED);
+-	new.tickets = old.tickets;
+-	new.s.next++;
+-	if (old.s.next == old.s.current) {
+-		if (__atomic_compare_exchange_n(&tl->tickets, &old.tickets,
+-		    new.tickets, 0, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED))
++	rte_ticketlock_t oldl, newl;
++	oldl.tickets = __atomic_load_n(&tl->tickets, __ATOMIC_RELAXED);
++	newl.tickets = oldl.tickets;
++	newl.s.next++;
++	if (oldl.s.next == oldl.s.current) {
++		if (__atomic_compare_exchange_n(&tl->tickets, &oldl.tickets,
++		    newl.tickets, 0, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED))
+ 			return 1;
+ 	}
+ 
+diff --git a/dpdk/lib/eal/include/meson.build b/dpdk/lib/eal/include/meson.build
+index 86468d1a2b..9700494816 100644
+--- a/dpdk/lib/eal/include/meson.build
++++ b/dpdk/lib/eal/include/meson.build
+@@ -60,6 +60,7 @@ generic_headers = files(
+         'generic/rte_mcslock.h',
+         'generic/rte_memcpy.h',
+         'generic/rte_pause.h',
++        'generic/rte_pflock.h',
+         'generic/rte_power_intrinsics.h',
+         'generic/rte_prefetch.h',
+         'generic/rte_rwlock.h',
+diff --git a/dpdk/lib/eal/include/rte_bitops.h b/dpdk/lib/eal/include/rte_bitops.h
+index 141e8ea730..f50dbe4388 100644
+--- a/dpdk/lib/eal/include/rte_bitops.h
++++ b/dpdk/lib/eal/include/rte_bitops.h
+@@ -17,6 +17,10 @@
+ #include <rte_debug.h>
+ #include <rte_compat.h>
+ 
++#ifdef __cplusplus
++extern "C" {
++#endif
++
+ /**
+  * Get the uint64_t value for a specified bit set.
+  *
+@@ -271,4 +275,8 @@ rte_bit_relaxed_test_and_clear64(unsigned int nr, volatile uint64_t *addr)
+ 	return val & mask;
+ }
+ 
++#ifdef __cplusplus
++}
++#endif
++
+ #endif /* _RTE_BITOPS_H_ */
+diff --git a/dpdk/lib/eal/include/rte_branch_prediction.h b/dpdk/lib/eal/include/rte_branch_prediction.h
+index 854ef9e5dd..0256a9de60 100644
+--- a/dpdk/lib/eal/include/rte_branch_prediction.h
++++ b/dpdk/lib/eal/include/rte_branch_prediction.h
+@@ -10,6 +10,10 @@
+ #ifndef _RTE_BRANCH_PREDICTION_H_
+ #define _RTE_BRANCH_PREDICTION_H_
+ 
++#ifdef __cplusplus
++extern "C" {
++#endif
++
+ /**
+  * Check if a branch is likely to be taken.
+  *
+@@ -38,4 +42,8 @@
+ #define unlikely(x)	__builtin_expect(!!(x), 0)
+ #endif /* unlikely */
+ 
++#ifdef __cplusplus
++}
++#endif
++
+ #endif /* _RTE_BRANCH_PREDICTION_H_ */
+diff --git a/dpdk/lib/eal/include/rte_common.h b/dpdk/lib/eal/include/rte_common.h
+index 4a399cc7c8..6f004f6cb3 100644
+--- a/dpdk/lib/eal/include/rte_common.h
++++ b/dpdk/lib/eal/include/rte_common.h
+@@ -85,6 +85,11 @@ typedef uint16_t unaligned_uint16_t;
+  */
+ #define __rte_packed __attribute__((__packed__))
+ 
++/**
++ * Macro to mark a type that is not subject to type-based aliasing rules
++ */
++#define __rte_may_alias __attribute__((__may_alias__))
++
+ /******* Macro to mark functions and fields scheduled for removal *****/
+ #define __rte_deprecated	__attribute__((__deprecated__))
+ #define __rte_deprecated_msg(msg)	__attribute__((__deprecated__(msg)))
+@@ -242,6 +247,19 @@ static void __attribute__((destructor(RTE_PRIO(prio)), used)) func(void)
+  */
+ #define __rte_cold __attribute__((cold))
+ 
++/**
++ * Disable AddressSanitizer on some code
++ */
++#ifdef RTE_MALLOC_ASAN
++#ifdef RTE_CC_CLANG
++#define __rte_no_asan __attribute__((no_sanitize("address", "hwaddress")))
++#else
++#define __rte_no_asan __attribute__((no_sanitize_address))
++#endif
++#else /* ! RTE_MALLOC_ASAN */
++#define __rte_no_asan
++#endif
++
+ /*********** Macros for pointer arithmetic ********/
+ 
+ /**
+diff --git a/dpdk/lib/eal/include/rte_compat.h b/dpdk/lib/eal/include/rte_compat.h
+index 2718612cce..a7dbe23449 100644
+--- a/dpdk/lib/eal/include/rte_compat.h
++++ b/dpdk/lib/eal/include/rte_compat.h
+@@ -6,6 +6,10 @@
+ #ifndef _RTE_COMPAT_H_
+ #define _RTE_COMPAT_H_
+ 
++#ifdef __cplusplus
++extern "C" {
++#endif
++
+ #ifndef ALLOW_EXPERIMENTAL_API
+ 
+ #define __rte_experimental \
+@@ -43,4 +47,8 @@ __attribute__((section(".text.internal")))
+ 
+ #endif
+ 
++#ifdef __cplusplus
++}
++#endif
++
+ #endif /* _RTE_COMPAT_H_ */
+diff --git a/dpdk/lib/eal/include/rte_dev.h b/dpdk/lib/eal/include/rte_dev.h
+index 448a41cb0e..e6ff1218f9 100644
+--- a/dpdk/lib/eal/include/rte_dev.h
++++ b/dpdk/lib/eal/include/rte_dev.h
+@@ -320,10 +320,6 @@ rte_dev_iterator_next(struct rte_dev_iterator *it);
+ 	     dev != NULL; \
+ 	     dev = rte_dev_iterator_next(it))
+ 
+-#ifdef __cplusplus
+-}
+-#endif
+-
+ /**
+  * @warning
+  * @b EXPERIMENTAL: this API may change without prior notice
+@@ -496,4 +492,8 @@ int
+ rte_dev_dma_unmap(struct rte_device *dev, void *addr, uint64_t iova,
+ 		  size_t len);
+ 
++#ifdef __cplusplus
++}
++#endif
++
+ #endif /* _RTE_DEV_H_ */
+diff --git a/dpdk/lib/eal/include/rte_hypervisor.h b/dpdk/lib/eal/include/rte_hypervisor.h
+index 5fe719c1d4..1666431ce3 100644
+--- a/dpdk/lib/eal/include/rte_hypervisor.h
++++ b/dpdk/lib/eal/include/rte_hypervisor.h
+@@ -5,6 +5,10 @@
+ #ifndef RTE_HYPERVISOR_H
+ #define RTE_HYPERVISOR_H
+ 
++#ifdef __cplusplus
++extern "C" {
++#endif
++
+ /**
+  * @file
+  * Hypervisor awareness.
+@@ -30,4 +34,8 @@ rte_hypervisor_get(void);
+ const char *
+ rte_hypervisor_get_name(enum rte_hypervisor id);
+ 
++#ifdef __cplusplus
++}
++#endif
++
+ #endif /* RTE_HYPERVISOR_H */
+diff --git a/dpdk/lib/eal/include/rte_keepalive.h b/dpdk/lib/eal/include/rte_keepalive.h
+index bd25508da8..538fb09095 100644
+--- a/dpdk/lib/eal/include/rte_keepalive.h
++++ b/dpdk/lib/eal/include/rte_keepalive.h
+@@ -11,6 +11,10 @@
+ #ifndef _KEEPALIVE_H_
+ #define _KEEPALIVE_H_
+ 
++#ifdef __cplusplus
++extern "C" {
++#endif
++
+ #include <rte_config.h>
+ #include <rte_memory.h>
+ 
+@@ -139,4 +143,8 @@ rte_keepalive_register_relay_callback(struct rte_keepalive *keepcfg,
+ 	rte_keepalive_relay_callback_t callback,
+ 	void *data);
+ 
++#ifdef __cplusplus
++}
++#endif
++
+ #endif /* _KEEPALIVE_H_ */
+diff --git a/dpdk/lib/eal/include/rte_malloc.h b/dpdk/lib/eal/include/rte_malloc.h
+index ed02e15119..3892519fab 100644
+--- a/dpdk/lib/eal/include/rte_malloc.h
++++ b/dpdk/lib/eal/include/rte_malloc.h
+@@ -58,7 +58,7 @@ rte_malloc(const char *type, size_t size, unsigned align)
+ 	__rte_alloc_size(2);
+ 
+ /**
+- * Allocate zero'ed memory from the heap.
++ * Allocate zeroed memory from the heap.
+  *
+  * Equivalent to rte_malloc() except that the memory zone is
+  * initialised with zeros. In NUMA systems, the memory allocated resides on the
+@@ -189,7 +189,7 @@ rte_malloc_socket(const char *type, size_t size, unsigned align, int socket)
+ 	__rte_alloc_size(2);
+ 
+ /**
+- * Allocate zero'ed memory from the heap.
++ * Allocate zeroed memory from the heap.
+  *
+  * Equivalent to rte_malloc() except that the memory zone is
+  * initialised with zeros.
+diff --git a/dpdk/lib/eal/include/rte_pci_dev_feature_defs.h b/dpdk/lib/eal/include/rte_pci_dev_feature_defs.h
+index e12c22081f..c5bb631286 100644
+--- a/dpdk/lib/eal/include/rte_pci_dev_feature_defs.h
++++ b/dpdk/lib/eal/include/rte_pci_dev_feature_defs.h
+@@ -5,6 +5,10 @@
+ #ifndef _RTE_PCI_DEV_DEFS_H_
+ #define _RTE_PCI_DEV_DEFS_H_
+ 
++#ifdef __cplusplus
++extern "C" {
++#endif
++
+ /* interrupt mode */
+ enum rte_intr_mode {
+ 	RTE_INTR_MODE_NONE = 0,
+@@ -13,4 +17,8 @@ enum rte_intr_mode {
+ 	RTE_INTR_MODE_MSIX
+ };
+ 
++#ifdef __cplusplus
++}
++#endif
++
+ #endif /* _RTE_PCI_DEV_DEFS_H_ */
+diff --git a/dpdk/lib/eal/include/rte_pci_dev_features.h b/dpdk/lib/eal/include/rte_pci_dev_features.h
+index 6104123d27..ee6e10590c 100644
+--- a/dpdk/lib/eal/include/rte_pci_dev_features.h
++++ b/dpdk/lib/eal/include/rte_pci_dev_features.h
+@@ -5,6 +5,10 @@
+ #ifndef _RTE_PCI_DEV_FEATURES_H
+ #define _RTE_PCI_DEV_FEATURES_H
+ 
++#ifdef __cplusplus
++extern "C" {
++#endif
++
+ #include <rte_pci_dev_feature_defs.h>
+ 
+ #define RTE_INTR_MODE_NONE_NAME "none"
+@@ -12,4 +16,8 @@
+ #define RTE_INTR_MODE_MSI_NAME "msi"
+ #define RTE_INTR_MODE_MSIX_NAME "msix"
+ 
++#ifdef __cplusplus
++}
++#endif
++
+ #endif
+diff --git a/dpdk/lib/eal/include/rte_time.h b/dpdk/lib/eal/include/rte_time.h
+index 5ad7c8841a..ec25f7b93d 100644
+--- a/dpdk/lib/eal/include/rte_time.h
++++ b/dpdk/lib/eal/include/rte_time.h
+@@ -5,6 +5,10 @@
+ #ifndef _RTE_TIME_H_
+ #define _RTE_TIME_H_
+ 
++#ifdef __cplusplus
++extern "C" {
++#endif
++
+ #include <stdint.h>
+ #include <time.h>
+ 
+@@ -98,4 +102,8 @@ rte_ns_to_timespec(uint64_t nsec)
+ 	return ts;
+ }
+ 
++#ifdef __cplusplus
++}
++#endif
++
+ #endif /* _RTE_TIME_H_ */
+diff --git a/dpdk/lib/eal/include/rte_trace_point.h b/dpdk/lib/eal/include/rte_trace_point.h
+index e226f073f7..0f8700974f 100644
+--- a/dpdk/lib/eal/include/rte_trace_point.h
++++ b/dpdk/lib/eal/include/rte_trace_point.h
+@@ -370,7 +370,7 @@ do { \
+ do { \
+ 	if (unlikely(in == NULL)) \
+ 		return; \
+-	rte_strscpy(mem, in, __RTE_TRACE_EMIT_STRING_LEN_MAX); \
++	rte_strscpy((char *)mem, in, __RTE_TRACE_EMIT_STRING_LEN_MAX); \
+ 	mem = RTE_PTR_ADD(mem, __RTE_TRACE_EMIT_STRING_LEN_MAX); \
+ } while (0)
+ 
+diff --git a/dpdk/lib/eal/include/rte_trace_point_register.h b/dpdk/lib/eal/include/rte_trace_point_register.h
+index 4f5c86552d..2e61439940 100644
+--- a/dpdk/lib/eal/include/rte_trace_point_register.h
++++ b/dpdk/lib/eal/include/rte_trace_point_register.h
+@@ -5,6 +5,10 @@
+ #ifndef _RTE_TRACE_POINT_REGISTER_H_
+ #define _RTE_TRACE_POINT_REGISTER_H_
+ 
++#ifdef __cplusplus
++extern "C" {
++#endif
++
+ #ifdef _RTE_TRACE_POINT_H_
+ #error for registration, include this file first before <rte_trace_point.h>
+ #endif
+@@ -42,4 +46,8 @@ do { \
+ 		RTE_STR(in)"[32]", "string_bounded_t"); \
+ } while (0)
+ 
++#ifdef __cplusplus
++}
++#endif
++
+ #endif /* _RTE_TRACE_POINT_REGISTER_H_ */
+diff --git a/dpdk/lib/eal/linux/eal.c b/dpdk/lib/eal/linux/eal.c
+index 60b4924838..e3d34f7b7c 100644
+--- a/dpdk/lib/eal/linux/eal.c
++++ b/dpdk/lib/eal/linux/eal.c
+@@ -1362,13 +1362,17 @@ rte_eal_cleanup(void)
+ 
+ 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+ 		rte_memseg_walk(mark_freeable, NULL);
++
+ 	rte_service_finalize();
++#ifdef VFIO_PRESENT
++	vfio_mp_sync_cleanup();
++#endif
+ 	rte_mp_channel_cleanup();
++	rte_trace_save();
++	eal_trace_fini();
+ 	/* after this point, any DPDK pointers will become dangling */
+ 	rte_eal_memory_detach();
+ 	rte_eal_alarm_cleanup();
+-	rte_trace_save();
+-	eal_trace_fini();
+ 	eal_cleanup_config(internal_conf);
+ 	return 0;
+ }
+diff --git a/dpdk/lib/eal/linux/eal_dev.c b/dpdk/lib/eal/linux/eal_dev.c
+index bde55a3d92..52fe336572 100644
+--- a/dpdk/lib/eal/linux/eal_dev.c
++++ b/dpdk/lib/eal/linux/eal_dev.c
+@@ -231,13 +231,13 @@ dev_uev_handler(__rte_unused void *param)
+ {
+ 	struct rte_dev_event uevent;
+ 	int ret;
+-	char buf[EAL_UEV_MSG_LEN];
++	char buf[EAL_UEV_MSG_LEN + 1];
+ 	struct rte_bus *bus;
+ 	struct rte_device *dev;
+ 	const char *busname = "";
+ 
+ 	memset(&uevent, 0, sizeof(struct rte_dev_event));
+-	memset(buf, 0, EAL_UEV_MSG_LEN);
++	memset(buf, 0, EAL_UEV_MSG_LEN + 1);
+ 
+ 	if (rte_intr_fd_get(intr_handle) < 0)
+ 		return;
+@@ -384,6 +384,7 @@ rte_dev_event_monitor_stop(void)
+ 	close(rte_intr_fd_get(intr_handle));
+ 	rte_intr_instance_free(intr_handle);
+ 	intr_handle = NULL;
++	ret = 0;
+ 
+ 	monitor_refcount--;
+ 
+diff --git a/dpdk/lib/eal/linux/eal_interrupts.c b/dpdk/lib/eal/linux/eal_interrupts.c
+index 6e3925efd4..70060bf3ef 100644
+--- a/dpdk/lib/eal/linux/eal_interrupts.c
++++ b/dpdk/lib/eal/linux/eal_interrupts.c
+@@ -589,7 +589,7 @@ rte_intr_callback_unregister_pending(const struct rte_intr_handle *intr_handle,
+ 
+ 	rte_spinlock_lock(&intr_lock);
+ 
+-	/* check if the insterrupt source for the fd is existent */
++	/* check if the interrupt source for the fd is existent */
+ 	TAILQ_FOREACH(src, &intr_sources, next) {
+ 		if (rte_intr_fd_get(src->intr_handle) == rte_intr_fd_get(intr_handle))
+ 			break;
+@@ -639,7 +639,7 @@ rte_intr_callback_unregister(const struct rte_intr_handle *intr_handle,
+ 
+ 	rte_spinlock_lock(&intr_lock);
+ 
+-	/* check if the insterrupt source for the fd is existent */
++	/* check if the interrupt source for the fd is existent */
+ 	TAILQ_FOREACH(src, &intr_sources, next)
+ 		if (rte_intr_fd_get(src->intr_handle) == rte_intr_fd_get(intr_handle))
+ 			break;
+diff --git a/dpdk/lib/eal/linux/eal_memalloc.c b/dpdk/lib/eal/linux/eal_memalloc.c
+index 337f2bc739..16b58d861b 100644
+--- a/dpdk/lib/eal/linux/eal_memalloc.c
++++ b/dpdk/lib/eal/linux/eal_memalloc.c
+@@ -308,8 +308,8 @@ get_seg_fd(char *path, int buflen, struct hugepage_info *hi,
+ 		if (fd < 0) {
+ 			fd = open(path, O_CREAT | O_RDWR, 0600);
+ 			if (fd < 0) {
+-				RTE_LOG(ERR, EAL, "%s(): open failed: %s\n",
+-					__func__, strerror(errno));
++				RTE_LOG(ERR, EAL, "%s(): open '%s' failed: %s\n",
++					__func__, path, strerror(errno));
+ 				return -1;
+ 			}
+ 			/* take out a read lock and keep it indefinitely */
+@@ -346,8 +346,8 @@ get_seg_fd(char *path, int buflen, struct hugepage_info *hi,
+ 
+ 			fd = open(path, O_CREAT | O_RDWR, 0600);
+ 			if (fd < 0) {
+-				RTE_LOG(DEBUG, EAL, "%s(): open failed: %s\n",
+-					__func__, strerror(errno));
++				RTE_LOG(ERR, EAL, "%s(): open '%s' failed: %s\n",
++					__func__, path, strerror(errno));
+ 				return -1;
+ 			}
+ 			/* take out a read lock */
+diff --git a/dpdk/lib/eal/linux/eal_memory.c b/dpdk/lib/eal/linux/eal_memory.c
+index 03a4f2dd2d..fda6a159d5 100644
+--- a/dpdk/lib/eal/linux/eal_memory.c
++++ b/dpdk/lib/eal/linux/eal_memory.c
+@@ -1883,8 +1883,8 @@ memseg_secondary_init(void)
+ 
+ 		msl = &mcfg->memsegs[msl_idx];
+ 
+-		/* skip empty memseg lists */
+-		if (msl->memseg_arr.len == 0)
++		/* skip empty and external memseg lists */
++		if (msl->memseg_arr.len == 0 || msl->external)
+ 			continue;
+ 
+ 		if (rte_fbarray_attach(&msl->memseg_arr)) {
+diff --git a/dpdk/lib/eal/linux/eal_vfio.h b/dpdk/lib/eal/linux/eal_vfio.h
+index 6ebaca6a0c..bba5c7afa5 100644
+--- a/dpdk/lib/eal/linux/eal_vfio.h
++++ b/dpdk/lib/eal/linux/eal_vfio.h
+@@ -103,7 +103,7 @@ struct vfio_group {
+ typedef int (*vfio_dma_func_t)(int);
+ 
+ /* Custom memory region DMA mapping function prototype.
+- * Takes VFIO container fd, virtual address, phisical address, length and
++ * Takes VFIO container fd, virtual address, physical address, length and
+  * operation type (0 to unmap 1 for map) as a parameters.
+  * Returns 0 on success, -1 on error.
+  **/
+@@ -133,6 +133,7 @@ int
+ vfio_has_supported_extensions(int vfio_container_fd);
+ 
+ int vfio_mp_sync_setup(void);
++void vfio_mp_sync_cleanup(void);
+ 
+ #define EAL_VFIO_MP "eal_vfio_mp_sync"
+ 
+diff --git a/dpdk/lib/eal/linux/eal_vfio_mp_sync.c b/dpdk/lib/eal/linux/eal_vfio_mp_sync.c
+index a2accfab3a..d12bbaee64 100644
+--- a/dpdk/lib/eal/linux/eal_vfio_mp_sync.c
++++ b/dpdk/lib/eal/linux/eal_vfio_mp_sync.c
+@@ -120,4 +120,12 @@ vfio_mp_sync_setup(void)
+ 	return 0;
+ }
+ 
++void
++vfio_mp_sync_cleanup(void)
++{
++	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
++		return;
++
++	rte_mp_action_unregister(EAL_VFIO_MP);
++}
+ #endif
+diff --git a/dpdk/lib/eal/linux/include/rte_os.h b/dpdk/lib/eal/linux/include/rte_os.h
+index 35c07c70cb..c72bf5b7e6 100644
+--- a/dpdk/lib/eal/linux/include/rte_os.h
++++ b/dpdk/lib/eal/linux/include/rte_os.h
+@@ -5,6 +5,10 @@
+ #ifndef _RTE_OS_H_
+ #define _RTE_OS_H_
+ 
++#ifdef __cplusplus
++extern "C" {
++#endif
++
+ /**
+  * This header should contain any definition
+  * which is not supported natively or named differently in Linux.
+@@ -42,4 +46,8 @@ typedef cpu_set_t rte_cpuset_t;
+ } while (0)
+ #endif
+ 
++#ifdef __cplusplus
++}
++#endif
++
+ #endif /* _RTE_OS_H_ */
+diff --git a/dpdk/lib/eal/ppc/include/rte_cycles.h b/dpdk/lib/eal/ppc/include/rte_cycles.h
+index 5585f9273c..666fc9b0bf 100644
+--- a/dpdk/lib/eal/ppc/include/rte_cycles.h
++++ b/dpdk/lib/eal/ppc/include/rte_cycles.h
+@@ -10,7 +10,10 @@
+ extern "C" {
+ #endif
+ 
++#include <features.h>
++#ifdef __GLIBC__
+ #include <sys/platform/ppc.h>
++#endif
+ 
+ #include "generic/rte_cycles.h"
+ 
+@@ -26,7 +29,11 @@ extern "C" {
+ static inline uint64_t
+ rte_rdtsc(void)
+ {
++#ifdef __GLIBC__
+ 	return __ppc_get_timebase();
++#else
++	return __builtin_ppc_get_timebase();
++#endif
+ }
+ 
+ static inline uint64_t
+diff --git a/dpdk/lib/eal/ppc/include/rte_vect.h b/dpdk/lib/eal/ppc/include/rte_vect.h
+index c1f0b0672c..a5f009b7df 100644
+--- a/dpdk/lib/eal/ppc/include/rte_vect.h
++++ b/dpdk/lib/eal/ppc/include/rte_vect.h
+@@ -17,7 +17,7 @@ extern "C" {
+ 
+ #define RTE_VECT_DEFAULT_SIMD_BITWIDTH RTE_VECT_SIMD_256
+ 
+-typedef vector signed int xmm_t;
++typedef __vector signed int xmm_t;
+ 
+ #define	XMM_SIZE	(sizeof(xmm_t))
+ #define	XMM_MASK	(XMM_SIZE - 1)
+diff --git a/dpdk/lib/eal/ppc/rte_cycles.c b/dpdk/lib/eal/ppc/rte_cycles.c
+index 3180adb0ff..cd4bdff8b8 100644
+--- a/dpdk/lib/eal/ppc/rte_cycles.c
++++ b/dpdk/lib/eal/ppc/rte_cycles.c
+@@ -2,12 +2,51 @@
+  * Copyright (C) IBM Corporation 2019.
+  */
+ 
++#include <features.h>
++#ifdef __GLIBC__
+ #include <sys/platform/ppc.h>
++#elif RTE_EXEC_ENV_LINUX
++#include <string.h>
++#include <stdio.h>
++#endif
+ 
+ #include "eal_private.h"
+ 
+ uint64_t
+ get_tsc_freq_arch(void)
+ {
++#ifdef __GLIBC__
+ 	return __ppc_get_timebase_freq();
++#elif RTE_EXEC_ENV_LINUX
++	static unsigned long base;
++	char buf[512];
++	ssize_t nr;
++	FILE *f;
++
++	if (base != 0)
++		goto out;
++
++	f = fopen("/proc/cpuinfo", "rb");
++	if (f == NULL)
++		goto out;
++
++	while (fgets(buf, sizeof(buf), f) != NULL) {
++		char *ret = strstr(buf, "timebase");
++
++		if (ret == NULL)
++			continue;
++		ret += sizeof("timebase") - 1;
++		ret = strchr(ret, ':');
++		if (ret == NULL)
++			continue;
++		base = strtoul(ret + 1, NULL, 10);
++		break;
++	}
++	fclose(f);
++out:
++	return (uint64_t) base;
++#else
++	return 0;
++#endif
++
+ }
+diff --git a/dpdk/lib/eal/windows/eal_memalloc.c b/dpdk/lib/eal/windows/eal_memalloc.c
+index 55d6dcc71c..aa7589b81d 100644
+--- a/dpdk/lib/eal/windows/eal_memalloc.c
++++ b/dpdk/lib/eal/windows/eal_memalloc.c
+@@ -17,7 +17,7 @@ eal_memalloc_get_seg_fd(int list_idx, int seg_idx)
+ 	RTE_SET_USED(list_idx);
+ 	RTE_SET_USED(seg_idx);
+ 	EAL_LOG_NOT_IMPLEMENTED();
+-	return -1;
++	return -ENOTSUP;
+ }
+ 
+ int
+@@ -28,7 +28,7 @@ eal_memalloc_get_seg_fd_offset(int list_idx, int seg_idx, size_t *offset)
+ 	RTE_SET_USED(seg_idx);
+ 	RTE_SET_USED(offset);
+ 	EAL_LOG_NOT_IMPLEMENTED();
+-	return -1;
++	return -ENOTSUP;
+ }
+ 
+ static int
+@@ -428,7 +428,7 @@ eal_memalloc_sync_with_primary(void)
+ {
+ 	/* No multi-process support. */
+ 	EAL_LOG_NOT_IMPLEMENTED();
+-	return -1;
++	return -ENOTSUP;
+ }
+ 
+ int
+diff --git a/dpdk/lib/eal/windows/eal_thread.c b/dpdk/lib/eal/windows/eal_thread.c
+index 54fa93fa62..ff84cb42af 100644
+--- a/dpdk/lib/eal/windows/eal_thread.c
++++ b/dpdk/lib/eal/windows/eal_thread.c
+@@ -150,13 +150,18 @@ eal_thread_create(pthread_t *thread)
+ 
+ 	th = CreateThread(NULL, 0,
+ 		(LPTHREAD_START_ROUTINE)(ULONG_PTR)eal_thread_loop,
+-						NULL, 0, (LPDWORD)thread);
++						NULL, CREATE_SUSPENDED, (LPDWORD)thread);
+ 	if (!th)
+ 		return -1;
+ 
+ 	SetPriorityClass(GetCurrentProcess(), NORMAL_PRIORITY_CLASS);
+ 	SetThreadPriority(th, THREAD_PRIORITY_NORMAL);
+ 
++	if (ResumeThread(th) == (DWORD)-1) {
++		(void)CloseHandle(th);
++		return -1;
++	}
++
+ 	return 0;
+ }
+ 
+diff --git a/dpdk/lib/eal/windows/eal_windows.h b/dpdk/lib/eal/windows/eal_windows.h
+index 23ead6d30c..245aa60344 100644
+--- a/dpdk/lib/eal/windows/eal_windows.h
++++ b/dpdk/lib/eal/windows/eal_windows.h
+@@ -63,7 +63,7 @@ unsigned int eal_socket_numa_node(unsigned int socket_id);
+  * @param arg
+  *  Argument to the called function.
+  * @return
+- *  0 on success, netagive error code on failure.
++ *  0 on success, negative error code on failure.
+  */
+ int eal_intr_thread_schedule(void (*func)(void *arg), void *arg);
+ 
+diff --git a/dpdk/lib/eal/windows/include/dirent.h b/dpdk/lib/eal/windows/include/dirent.h
+index 869a598378..34eb077f8c 100644
+--- a/dpdk/lib/eal/windows/include/dirent.h
++++ b/dpdk/lib/eal/windows/include/dirent.h
+@@ -440,7 +440,7 @@ opendir(const char *dirname)
+  * display correctly on console. The problem can be fixed in two ways:
+  * (1) change the character set of console to 1252 using chcp utility
+  * and use Lucida Console font, or (2) use _cprintf function when
+- * writing to console. The _cprinf() will re-encode ANSI strings to the
++ * writing to console. The _cprintf() will re-encode ANSI strings to the
+  * console code page so many non-ASCII characters will display correctly.
+  */
+ static struct dirent*
+@@ -579,7 +579,7 @@ dirent_mbstowcs_s(
+ 			wcstr[n] = 0;
+ 		}
+ 
+-		/* Length of resuting multi-byte string WITH zero
++		/* Length of resulting multi-byte string WITH zero
+ 		 *terminator
+ 		 */
+ 		if (pReturnValue)
+diff --git a/dpdk/lib/eal/windows/include/fnmatch.h b/dpdk/lib/eal/windows/include/fnmatch.h
+index c272f65ccd..c6b226bd5d 100644
+--- a/dpdk/lib/eal/windows/include/fnmatch.h
++++ b/dpdk/lib/eal/windows/include/fnmatch.h
+@@ -26,14 +26,14 @@ extern "C" {
+ #define FNM_PREFIX_DIRS 0x20
+ 
+ /**
+- * This function is used for searhing a given string source
++ * This function is used for searching a given string source
+  * with the given regular expression pattern.
+  *
+  * @param pattern
+  *	regular expression notation describing the pattern to match
+  *
+  * @param string
+- *	source string to searcg for the pattern
++ *	source string to search for the pattern
+  *
+  * @param flag
+  *	containing information about the pattern
+diff --git a/dpdk/lib/eal/windows/include/rte_windows.h b/dpdk/lib/eal/windows/include/rte_windows.h
+index 0063b5d78c..83730c3d2e 100644
+--- a/dpdk/lib/eal/windows/include/rte_windows.h
++++ b/dpdk/lib/eal/windows/include/rte_windows.h
+@@ -5,6 +5,10 @@
+ #ifndef _RTE_WINDOWS_H_
+ #define _RTE_WINDOWS_H_
+ 
++#ifdef __cplusplus
++extern "C" {
++#endif
++
+ /**
+  * @file Windows-specific facilities
+  *
+@@ -48,4 +52,8 @@
+ 		RTE_FMT_HEAD(__VA_ARGS__,) "\n", GetLastError(), \
+ 		RTE_FMT_TAIL(__VA_ARGS__,)))
+ 
++#ifdef __cplusplus
++}
++#endif
++
+ #endif /* _RTE_WINDOWS_H_ */
+diff --git a/dpdk/lib/eal/x86/include/rte_atomic.h b/dpdk/lib/eal/x86/include/rte_atomic.h
+index 915afd9d27..f2ee1a9ce9 100644
+--- a/dpdk/lib/eal/x86/include/rte_atomic.h
++++ b/dpdk/lib/eal/x86/include/rte_atomic.h
+@@ -60,7 +60,7 @@ extern "C" {
+  * Basic idea is to use lock prefixed add with some dummy memory location
+  * as the destination. From their experiments 128B(2 cache lines) below
+  * current stack pointer looks like a good candidate.
+- * So below we use that techinque for rte_smp_mb() implementation.
++ * So below we use that technique for rte_smp_mb() implementation.
+  */
+ 
+ static __rte_always_inline void
+diff --git a/dpdk/lib/eal/x86/include/rte_memcpy.h b/dpdk/lib/eal/x86/include/rte_memcpy.h
+index 1b6c6e585f..b678b5c942 100644
+--- a/dpdk/lib/eal/x86/include/rte_memcpy.h
++++ b/dpdk/lib/eal/x86/include/rte_memcpy.h
+@@ -45,6 +45,52 @@ extern "C" {
+ static __rte_always_inline void *
+ rte_memcpy(void *dst, const void *src, size_t n);
+ 
++/**
++ * Copy bytes from one location to another,
++ * locations should not overlap.
++ * Use with n <= 15.
++ */
++static __rte_always_inline void *
++rte_mov15_or_less(void *dst, const void *src, size_t n)
++{
++	/**
++	 * Use the following structs to avoid violating C standard
++	 * alignment requirements and to avoid strict aliasing bugs
++	 */
++	struct rte_uint64_alias {
++		uint64_t val;
++	} __rte_packed __rte_may_alias;
++	struct rte_uint32_alias {
++		uint32_t val;
++	} __rte_packed __rte_may_alias;
++	struct rte_uint16_alias {
++		uint16_t val;
++	} __rte_packed __rte_may_alias;
++
++	void *ret = dst;
++	if (n & 8) {
++		((struct rte_uint64_alias *)dst)->val =
++			((const struct rte_uint64_alias *)src)->val;
++		src = (const uint64_t *)src + 1;
++		dst = (uint64_t *)dst + 1;
++	}
++	if (n & 4) {
++		((struct rte_uint32_alias *)dst)->val =
++			((const struct rte_uint32_alias *)src)->val;
++		src = (const uint32_t *)src + 1;
++		dst = (uint32_t *)dst + 1;
++	}
++	if (n & 2) {
++		((struct rte_uint16_alias *)dst)->val =
++			((const struct rte_uint16_alias *)src)->val;
++		src = (const uint16_t *)src + 1;
++		dst = (uint16_t *)dst + 1;
++	}
++	if (n & 1)
++		*(uint8_t *)dst = *(const uint8_t *)src;
++	return ret;
++}
++
+ #if defined __AVX512F__ && defined RTE_MEMCPY_AVX512
+ 
+ #define ALIGNMENT_MASK 0x3F
+@@ -171,8 +217,6 @@ rte_mov512blocks(uint8_t *dst, const uint8_t *src, size_t n)
+ static __rte_always_inline void *
+ rte_memcpy_generic(void *dst, const void *src, size_t n)
+ {
+-	uintptr_t dstu = (uintptr_t)dst;
+-	uintptr_t srcu = (uintptr_t)src;
+ 	void *ret = dst;
+ 	size_t dstofss;
+ 	size_t bits;
+@@ -181,24 +225,7 @@ rte_memcpy_generic(void *dst, const void *src, size_t n)
+ 	 * Copy less than 16 bytes
+ 	 */
+ 	if (n < 16) {
+-		if (n & 0x01) {
+-			*(uint8_t *)dstu = *(const uint8_t *)srcu;
+-			srcu = (uintptr_t)((const uint8_t *)srcu + 1);
+-			dstu = (uintptr_t)((uint8_t *)dstu + 1);
+-		}
+-		if (n & 0x02) {
+-			*(uint16_t *)dstu = *(const uint16_t *)srcu;
+-			srcu = (uintptr_t)((const uint16_t *)srcu + 1);
+-			dstu = (uintptr_t)((uint16_t *)dstu + 1);
+-		}
+-		if (n & 0x04) {
+-			*(uint32_t *)dstu = *(const uint32_t *)srcu;
+-			srcu = (uintptr_t)((const uint32_t *)srcu + 1);
+-			dstu = (uintptr_t)((uint32_t *)dstu + 1);
+-		}
+-		if (n & 0x08)
+-			*(uint64_t *)dstu = *(const uint64_t *)srcu;
+-		return ret;
++		return rte_mov15_or_less(dst, src, n);
+ 	}
+ 
+ 	/**
+@@ -379,8 +406,6 @@ rte_mov128blocks(uint8_t *dst, const uint8_t *src, size_t n)
+ static __rte_always_inline void *
+ rte_memcpy_generic(void *dst, const void *src, size_t n)
+ {
+-	uintptr_t dstu = (uintptr_t)dst;
+-	uintptr_t srcu = (uintptr_t)src;
+ 	void *ret = dst;
+ 	size_t dstofss;
+ 	size_t bits;
+@@ -389,25 +414,7 @@ rte_memcpy_generic(void *dst, const void *src, size_t n)
+ 	 * Copy less than 16 bytes
+ 	 */
+ 	if (n < 16) {
+-		if (n & 0x01) {
+-			*(uint8_t *)dstu = *(const uint8_t *)srcu;
+-			srcu = (uintptr_t)((const uint8_t *)srcu + 1);
+-			dstu = (uintptr_t)((uint8_t *)dstu + 1);
+-		}
+-		if (n & 0x02) {
+-			*(uint16_t *)dstu = *(const uint16_t *)srcu;
+-			srcu = (uintptr_t)((const uint16_t *)srcu + 1);
+-			dstu = (uintptr_t)((uint16_t *)dstu + 1);
+-		}
+-		if (n & 0x04) {
+-			*(uint32_t *)dstu = *(const uint32_t *)srcu;
+-			srcu = (uintptr_t)((const uint32_t *)srcu + 1);
+-			dstu = (uintptr_t)((uint32_t *)dstu + 1);
+-		}
+-		if (n & 0x08) {
+-			*(uint64_t *)dstu = *(const uint64_t *)srcu;
+-		}
+-		return ret;
++		return rte_mov15_or_less(dst, src, n);
+ 	}
+ 
+ 	/**
+@@ -672,8 +679,6 @@ static __rte_always_inline void *
+ rte_memcpy_generic(void *dst, const void *src, size_t n)
+ {
+ 	__m128i xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8;
+-	uintptr_t dstu = (uintptr_t)dst;
+-	uintptr_t srcu = (uintptr_t)src;
+ 	void *ret = dst;
+ 	size_t dstofss;
+ 	size_t srcofs;
+@@ -682,25 +687,7 @@ rte_memcpy_generic(void *dst, const void *src, size_t n)
+ 	 * Copy less than 16 bytes
+ 	 */
+ 	if (n < 16) {
+-		if (n & 0x01) {
+-			*(uint8_t *)dstu = *(const uint8_t *)srcu;
+-			srcu = (uintptr_t)((const uint8_t *)srcu + 1);
+-			dstu = (uintptr_t)((uint8_t *)dstu + 1);
+-		}
+-		if (n & 0x02) {
+-			*(uint16_t *)dstu = *(const uint16_t *)srcu;
+-			srcu = (uintptr_t)((const uint16_t *)srcu + 1);
+-			dstu = (uintptr_t)((uint16_t *)dstu + 1);
+-		}
+-		if (n & 0x04) {
+-			*(uint32_t *)dstu = *(const uint32_t *)srcu;
+-			srcu = (uintptr_t)((const uint32_t *)srcu + 1);
+-			dstu = (uintptr_t)((uint32_t *)dstu + 1);
+-		}
+-		if (n & 0x08) {
+-			*(uint64_t *)dstu = *(const uint64_t *)srcu;
+-		}
+-		return ret;
++		return rte_mov15_or_less(dst, src, n);
+ 	}
+ 
+ 	/**
+@@ -818,27 +805,9 @@ rte_memcpy_aligned(void *dst, const void *src, size_t n)
+ {
+ 	void *ret = dst;
+ 
+-	/* Copy size <= 16 bytes */
++	/* Copy size < 16 bytes */
+ 	if (n < 16) {
+-		if (n & 0x01) {
+-			*(uint8_t *)dst = *(const uint8_t *)src;
+-			src = (const uint8_t *)src + 1;
+-			dst = (uint8_t *)dst + 1;
+-		}
+-		if (n & 0x02) {
+-			*(uint16_t *)dst = *(const uint16_t *)src;
+-			src = (const uint16_t *)src + 1;
+-			dst = (uint16_t *)dst + 1;
+-		}
+-		if (n & 0x04) {
+-			*(uint32_t *)dst = *(const uint32_t *)src;
+-			src = (const uint32_t *)src + 1;
+-			dst = (uint32_t *)dst + 1;
+-		}
+-		if (n & 0x08)
+-			*(uint64_t *)dst = *(const uint64_t *)src;
+-
+-		return ret;
++		return rte_mov15_or_less(dst, src, n);
+ 	}
+ 
+ 	/* Copy 16 <= size <= 32 bytes */
+@@ -882,6 +851,8 @@ rte_memcpy(void *dst, const void *src, size_t n)
+ 		return rte_memcpy_generic(dst, src, n);
+ }
+ 
++#undef ALIGNMENT_MASK
++
+ #if defined(RTE_TOOLCHAIN_GCC) && (GCC_VERSION >= 100000)
+ #pragma GCC diagnostic pop
+ #endif
+diff --git a/dpdk/lib/efd/rte_efd.c b/dpdk/lib/efd/rte_efd.c
+index 86ef46863c..6c794d7750 100644
+--- a/dpdk/lib/efd/rte_efd.c
++++ b/dpdk/lib/efd/rte_efd.c
+@@ -1165,7 +1165,7 @@ rte_efd_update(struct rte_efd_table * const table, const unsigned int socket_id,
+ {
+ 	uint32_t chunk_id = 0, group_id = 0, bin_id = 0;
+ 	uint8_t new_bin_choice = 0;
+-	struct efd_online_group_entry entry;
++	struct efd_online_group_entry entry = {{0}};
+ 
+ 	int status = efd_compute_update(table, socket_id, key, value,
+ 			&chunk_id, &group_id, &bin_id,
+diff --git a/dpdk/lib/ethdev/ethdev_driver.h b/dpdk/lib/ethdev/ethdev_driver.h
+index d95605a355..2822fd8c72 100644
+--- a/dpdk/lib/ethdev/ethdev_driver.h
++++ b/dpdk/lib/ethdev/ethdev_driver.h
+@@ -5,6 +5,10 @@
+ #ifndef _RTE_ETHDEV_DRIVER_H_
+ #define _RTE_ETHDEV_DRIVER_H_
+ 
++#ifdef __cplusplus
++extern "C" {
++#endif
++
+ /**
+  * @file
+  *
+@@ -1629,6 +1633,24 @@ rte_eth_hairpin_queue_peer_bind(uint16_t cur_port, uint16_t cur_queue,
+ 				struct rte_hairpin_peer_info *peer_info,
+ 				uint32_t direction);
+ 
++/**
++ * @internal
++ * Get rte_eth_dev from device name. The device name should be specified
++ * as below:
++ * - PCIe address (Domain:Bus:Device.Function), for example 0000:2:00.0
++ * - SoC device name, for example fsl-gmac0
++ * - vdev dpdk name, for example net_[pcap0|null0|tap0]
++ *
++ * @param name
++ *   PCI address or name of the device
++ * @return
++ *   - rte_eth_dev if successful
++ *   - NULL on failure
++ */
++__rte_internal
++struct rte_eth_dev*
++rte_eth_dev_get_by_name(const char *name);
++
+ /**
+  * @internal
+  * Reset the current queue state and configuration to disconnect (unbind) it
+@@ -1750,4 +1772,8 @@ struct rte_eth_tunnel_filter_conf {
+ 	uint16_t queue_id;      /**< Queue assigned to if match */
+ };
+ 
++#ifdef __cplusplus
++}
++#endif
++
+ #endif /* _RTE_ETHDEV_DRIVER_H_ */
+diff --git a/dpdk/lib/ethdev/ethdev_pci.h b/dpdk/lib/ethdev/ethdev_pci.h
+index 71aa4b2e98..0549842709 100644
+--- a/dpdk/lib/ethdev/ethdev_pci.h
++++ b/dpdk/lib/ethdev/ethdev_pci.h
+@@ -6,6 +6,10 @@
+ #ifndef _RTE_ETHDEV_PCI_H_
+ #define _RTE_ETHDEV_PCI_H_
+ 
++#ifdef __cplusplus
++extern "C" {
++#endif
++
+ #include <rte_malloc.h>
+ #include <rte_pci.h>
+ #include <rte_bus_pci.h>
+@@ -46,8 +50,9 @@ rte_eth_copy_pci_info(struct rte_eth_dev *eth_dev,
+ }
+ 
+ static inline int
+-eth_dev_pci_specific_init(struct rte_eth_dev *eth_dev, void *bus_device) {
+-	struct rte_pci_device *pci_dev = bus_device;
++eth_dev_pci_specific_init(struct rte_eth_dev *eth_dev, void *bus_device)
++{
++	struct rte_pci_device *pci_dev = (struct rte_pci_device *)bus_device;
+ 
+ 	if (!pci_dev)
+ 		return -ENODEV;
+@@ -171,4 +176,8 @@ rte_eth_dev_pci_generic_remove(struct rte_pci_device *pci_dev,
+ 	return 0;
+ }
+ 
++#ifdef __cplusplus
++}
++#endif
++
+ #endif /* _RTE_ETHDEV_PCI_H_ */
+diff --git a/dpdk/lib/ethdev/ethdev_profile.h b/dpdk/lib/ethdev/ethdev_profile.h
+index e5ee4df824..881aec1273 100644
+--- a/dpdk/lib/ethdev/ethdev_profile.h
++++ b/dpdk/lib/ethdev/ethdev_profile.h
+@@ -6,6 +6,7 @@
+ #define _RTE_ETHDEV_PROFILE_H_
+ 
+ #include "rte_ethdev.h"
++#include "ethdev_driver.h"
+ 
+ /**
+  * Initialization of the Ethernet device profiling.
+diff --git a/dpdk/lib/ethdev/ethdev_vdev.h b/dpdk/lib/ethdev/ethdev_vdev.h
+index 2b49e9665b..f5f536ce64 100644
+--- a/dpdk/lib/ethdev/ethdev_vdev.h
++++ b/dpdk/lib/ethdev/ethdev_vdev.h
+@@ -6,6 +6,10 @@
+ #ifndef _RTE_ETHDEV_VDEV_H_
+ #define _RTE_ETHDEV_VDEV_H_
+ 
++#ifdef __cplusplus
++extern "C" {
++#endif
++
+ #include <rte_config.h>
+ #include <rte_malloc.h>
+ #include <rte_bus_vdev.h>
+@@ -52,4 +56,8 @@ rte_eth_vdev_allocate(struct rte_vdev_device *dev, size_t private_data_size)
+ 	return eth_dev;
+ }
+ 
++#ifdef __cplusplus
++}
++#endif
++
+ #endif /* _RTE_ETHDEV_VDEV_H_ */
+diff --git a/dpdk/lib/ethdev/rte_dev_info.h b/dpdk/lib/ethdev/rte_dev_info.h
+index cb2fe0ae97..67cf0ae526 100644
+--- a/dpdk/lib/ethdev/rte_dev_info.h
++++ b/dpdk/lib/ethdev/rte_dev_info.h
+@@ -5,6 +5,10 @@
+ #ifndef _RTE_DEV_INFO_H_
+ #define _RTE_DEV_INFO_H_
+ 
++#ifdef __cplusplus
++extern "C" {
++#endif
++
+ #include <stdint.h>
+ 
+ /*
+@@ -48,4 +52,8 @@ struct rte_eth_dev_module_info {
+ #define RTE_ETH_MODULE_SFF_8436_LEN         256
+ #define RTE_ETH_MODULE_SFF_8436_MAX_LEN     640
+ 
++#ifdef __cplusplus
++}
++#endif
++
+ #endif /* _RTE_DEV_INFO_H_ */
+diff --git a/dpdk/lib/ethdev/rte_ethdev.c b/dpdk/lib/ethdev/rte_ethdev.c
+index a1d475a292..62e67f006d 100644
+--- a/dpdk/lib/ethdev/rte_ethdev.c
++++ b/dpdk/lib/ethdev/rte_ethdev.c
+@@ -894,6 +894,17 @@ rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id)
+ 	return -ENODEV;
+ }
+ 
++struct rte_eth_dev *
++rte_eth_dev_get_by_name(const char *name)
++{
++	uint16_t pid;
++
++	if (rte_eth_dev_get_port_by_name(name, &pid))
++		return NULL;
++
++	return &rte_eth_devices[pid];
++}
++
+ static int
+ eth_err(uint16_t port_id, int ret)
+ {
+@@ -1879,8 +1890,9 @@ rte_eth_dev_stop(uint16_t port_id)
+ 	/* point fast-path functions to dummy ones */
+ 	eth_dev_fp_ops_reset(rte_eth_fp_ops + port_id);
+ 
+-	dev->data->dev_started = 0;
+ 	ret = (*dev->dev_ops->dev_stop)(dev);
++	if (ret == 0)
++		dev->data->dev_started = 0;
+ 	rte_ethdev_trace_stop(port_id, ret);
+ 
+ 	return ret;
+@@ -1920,7 +1932,13 @@ rte_eth_dev_close(uint16_t port_id)
+ 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+ 	dev = &rte_eth_devices[port_id];
+ 
+-	if (dev->data->dev_started) {
++	/*
++	 * Secondary process needs to close device to release process private
++	 * resources. But secondary process should not be obliged to wait
++	 * for device stop before closing ethdev.
++	 */
++	if (rte_eal_process_type() == RTE_PROC_PRIMARY &&
++			dev->data->dev_started) {
+ 		RTE_ETHDEV_LOG(ERR, "Cannot close started device (port %u)\n",
+ 			       port_id);
+ 		return -EINVAL;
+@@ -4070,6 +4088,7 @@ rte_eth_dev_rss_reta_update(uint16_t port_id,
+ 			    struct rte_eth_rss_reta_entry64 *reta_conf,
+ 			    uint16_t reta_size)
+ {
++	enum rte_eth_rx_mq_mode mq_mode;
+ 	struct rte_eth_dev *dev;
+ 	int ret;
+ 
+@@ -4101,6 +4120,12 @@ rte_eth_dev_rss_reta_update(uint16_t port_id,
+ 	if (ret < 0)
+ 		return ret;
+ 
++	mq_mode = dev->data->dev_conf.rxmode.mq_mode;
++	if (!(mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)) {
++		RTE_ETHDEV_LOG(ERR, "Multi-queue RSS mode isn't enabled.\n");
++		return -ENOTSUP;
++	}
++
+ 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_update, -ENOTSUP);
+ 	return eth_err(port_id, (*dev->dev_ops->reta_update)(dev, reta_conf,
+ 							     reta_size));
+@@ -4140,6 +4165,7 @@ rte_eth_dev_rss_hash_update(uint16_t port_id,
+ {
+ 	struct rte_eth_dev *dev;
+ 	struct rte_eth_dev_info dev_info = { .flow_type_rss_offloads = 0, };
++	enum rte_eth_rx_mq_mode mq_mode;
+ 	int ret;
+ 
+ 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+@@ -4165,6 +4191,13 @@ rte_eth_dev_rss_hash_update(uint16_t port_id,
+ 			dev_info.flow_type_rss_offloads);
+ 		return -EINVAL;
+ 	}
++
++	mq_mode = dev->data->dev_conf.rxmode.mq_mode;
++	if (!(mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)) {
++		RTE_ETHDEV_LOG(ERR, "Multi-queue RSS mode isn't enabled.\n");
++		return -ENOTSUP;
++	}
++
+ 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_update, -ENOTSUP);
+ 	return eth_err(port_id, (*dev->dev_ops->rss_hash_update)(dev,
+ 								 rss_conf));
+@@ -6156,6 +6189,8 @@ eth_dev_add_port_queue_stats(struct rte_tel_data *d, uint64_t *q_stats,
+ {
+ 	int q;
+ 	struct rte_tel_data *q_data = rte_tel_data_alloc();
++	if (q_data == NULL)
++		return;
+ 	rte_tel_data_start_array(q_data, RTE_TEL_U64_VAL);
+ 	for (q = 0; q < RTE_ETHDEV_QUEUE_STAT_CNTRS; q++)
+ 		rte_tel_data_add_array_u64(q_data, q_stats[q]);
+@@ -6249,6 +6284,7 @@ eth_dev_handle_port_xstats(const char *cmd __rte_unused,
+ 	for (i = 0; i < num_xstats; i++)
+ 		rte_tel_data_add_dict_u64(d, xstat_names[i].name,
+ 				eth_xstats[i].value);
++	free(eth_xstats);
+ 	return 0;
+ }
+ 
+@@ -6295,7 +6331,7 @@ eth_dev_handle_port_info(const char *cmd __rte_unused,
+ 		struct rte_tel_data *d)
+ {
+ 	struct rte_tel_data *rxq_state, *txq_state;
+-	char mac_addr[RTE_ETHER_ADDR_LEN];
++	char mac_addr[RTE_ETHER_ADDR_FMT_SIZE];
+ 	struct rte_eth_dev *eth_dev;
+ 	char *end_param;
+ 	int port_id, i;
+@@ -6312,16 +6348,16 @@ eth_dev_handle_port_info(const char *cmd __rte_unused,
+ 		return -EINVAL;
+ 
+ 	eth_dev = &rte_eth_devices[port_id];
+-	if (!eth_dev)
+-		return -EINVAL;
+ 
+ 	rxq_state = rte_tel_data_alloc();
+ 	if (!rxq_state)
+ 		return -ENOMEM;
+ 
+ 	txq_state = rte_tel_data_alloc();
+-	if (!txq_state)
++	if (!txq_state) {
++		rte_tel_data_free(rxq_state);
+ 		return -ENOMEM;
++	}
+ 
+ 	rte_tel_data_start_dict(d);
+ 	rte_tel_data_add_dict_string(d, "name", eth_dev->data->name);
+@@ -6336,13 +6372,8 @@ eth_dev_handle_port_info(const char *cmd __rte_unused,
+ 			eth_dev->data->min_rx_buf_size);
+ 	rte_tel_data_add_dict_int(d, "rx_mbuf_alloc_fail",
+ 			eth_dev->data->rx_mbuf_alloc_failed);
+-	snprintf(mac_addr, RTE_ETHER_ADDR_LEN, "%02x:%02x:%02x:%02x:%02x:%02x",
+-			 eth_dev->data->mac_addrs->addr_bytes[0],
+-			 eth_dev->data->mac_addrs->addr_bytes[1],
+-			 eth_dev->data->mac_addrs->addr_bytes[2],
+-			 eth_dev->data->mac_addrs->addr_bytes[3],
+-			 eth_dev->data->mac_addrs->addr_bytes[4],
+-			 eth_dev->data->mac_addrs->addr_bytes[5]);
++	rte_ether_format_addr(mac_addr, sizeof(mac_addr),
++			eth_dev->data->mac_addrs);
+ 	rte_tel_data_add_dict_string(d, "mac_addr", mac_addr);
+ 	rte_tel_data_add_dict_int(d, "promiscuous",
+ 			eth_dev->data->promiscuous);
+diff --git a/dpdk/lib/ethdev/rte_ethdev.h b/dpdk/lib/ethdev/rte_ethdev.h
+index fa299c8ad7..0be04c5809 100644
+--- a/dpdk/lib/ethdev/rte_ethdev.h
++++ b/dpdk/lib/ethdev/rte_ethdev.h
+@@ -74,7 +74,7 @@
+  * rte_eth_rx_queue_setup()), it must call rte_eth_dev_stop() first to stop the
+  * device and then do the reconfiguration before calling rte_eth_dev_start()
+  * again. The transmit and receive functions should not be invoked when the
+- * device is stopped.
++ * device or the queue is stopped.
+  *
+  * Please note that some configuration is not stored between calls to
+  * rte_eth_dev_stop()/rte_eth_dev_start(). The following configuration will
+@@ -1787,7 +1787,7 @@ enum rte_eth_representor_type {
+  * device, etc...
+  */
+ struct rte_eth_dev_info {
+-	struct rte_device *device; /** Generic device information */
++	struct rte_device *device; /**< Generic device information */
+ 	const char *driver_name; /**< Device Driver name. */
+ 	unsigned int if_index; /**< Index to bound host interface, or 0 if none.
+ 		Use if_indextoname() to translate into an interface name. */
+@@ -1801,8 +1801,8 @@ struct rte_eth_dev_info {
+ 	uint16_t max_rx_queues; /**< Maximum number of Rx queues. */
+ 	uint16_t max_tx_queues; /**< Maximum number of Tx queues. */
+ 	uint32_t max_mac_addrs; /**< Maximum number of MAC addresses. */
+-	uint32_t max_hash_mac_addrs;
+ 	/** Maximum number of hash MAC addresses for MTA and UTA. */
++	uint32_t max_hash_mac_addrs;
+ 	uint16_t max_vfs; /**< Maximum number of VFs. */
+ 	uint16_t max_vmdq_pools; /**< Maximum number of VMDq pools. */
+ 	struct rte_eth_rxseg_capa rx_seg_capa; /**< Segmentation capability.*/
+diff --git a/dpdk/lib/ethdev/version.map b/dpdk/lib/ethdev/version.map
+index c2fb0669a4..1f7359c846 100644
+--- a/dpdk/lib/ethdev/version.map
++++ b/dpdk/lib/ethdev/version.map
+@@ -267,6 +267,7 @@ INTERNAL {
+ 	rte_eth_dev_callback_process;
+ 	rte_eth_dev_create;
+ 	rte_eth_dev_destroy;
++	rte_eth_dev_get_by_name;
+ 	rte_eth_dev_is_rx_hairpin_queue;
+ 	rte_eth_dev_is_tx_hairpin_queue;
+ 	rte_eth_dev_probing_finish;
+diff --git a/dpdk/lib/eventdev/eventdev_pmd.h b/dpdk/lib/eventdev/eventdev_pmd.h
+index d0b0c00a60..ce469d47a6 100644
+--- a/dpdk/lib/eventdev/eventdev_pmd.h
++++ b/dpdk/lib/eventdev/eventdev_pmd.h
+@@ -5,6 +5,10 @@
+ #ifndef _RTE_EVENTDEV_PMD_H_
+ #define _RTE_EVENTDEV_PMD_H_
+ 
++#ifdef __cplusplus
++extern "C" {
++#endif
++
+ /** @file
+  * RTE Event PMD APIs
+  *
+diff --git a/dpdk/lib/eventdev/eventdev_pmd_pci.h b/dpdk/lib/eventdev/eventdev_pmd_pci.h
+index 499852db16..24b56faaa9 100644
+--- a/dpdk/lib/eventdev/eventdev_pmd_pci.h
++++ b/dpdk/lib/eventdev/eventdev_pmd_pci.h
+@@ -5,6 +5,10 @@
+ #ifndef _RTE_EVENTDEV_PMD_PCI_H_
+ #define _RTE_EVENTDEV_PMD_PCI_H_
+ 
++#ifdef __cplusplus
++extern "C" {
++#endif
++
+ /** @file
+  * RTE Eventdev PCI PMD APIs
+  *
+@@ -150,4 +154,8 @@ rte_event_pmd_pci_remove(struct rte_pci_device *pci_dev,
+ 	return 0;
+ }
+ 
++#ifdef __cplusplus
++}
++#endif
++
+ #endif /* _RTE_EVENTDEV_PMD_PCI_H_ */
+diff --git a/dpdk/lib/eventdev/eventdev_pmd_vdev.h b/dpdk/lib/eventdev/eventdev_pmd_vdev.h
+index d9ee7277dd..77904910a2 100644
+--- a/dpdk/lib/eventdev/eventdev_pmd_vdev.h
++++ b/dpdk/lib/eventdev/eventdev_pmd_vdev.h
+@@ -5,6 +5,10 @@
+ #ifndef _RTE_EVENTDEV_PMD_VDEV_H_
+ #define _RTE_EVENTDEV_PMD_VDEV_H_
+ 
++#ifdef __cplusplus
++extern "C" {
++#endif
++
+ /** @file
+  * RTE Eventdev VDEV PMD APIs
+  *
+@@ -99,4 +103,8 @@ rte_event_pmd_vdev_uninit(const char *name)
+ 	return 0;
+ }
+ 
++#ifdef __cplusplus
++}
++#endif
++
+ #endif /* _RTE_EVENTDEV_PMD_VDEV_H_ */
+diff --git a/dpdk/lib/eventdev/rte_event_eth_rx_adapter.c b/dpdk/lib/eventdev/rte_event_eth_rx_adapter.c
+index 809416d9b7..2356e2a535 100644
+--- a/dpdk/lib/eventdev/rte_event_eth_rx_adapter.c
++++ b/dpdk/lib/eventdev/rte_event_eth_rx_adapter.c
+@@ -293,6 +293,30 @@ rxa_event_buf_get(struct event_eth_rx_adapter *rx_adapter, uint16_t eth_dev_id,
+ 	} \
+ } while (0)
+ 
++#define RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_GOTO_ERR_RET(id, retval) do { \
++	if (!rxa_validate_id(id)) { \
++		RTE_EDEV_LOG_ERR("Invalid eth Rx adapter id = %d\n", id); \
++		ret = retval; \
++		goto error; \
++	} \
++} while (0)
++
++#define RTE_EVENT_ETH_RX_ADAPTER_TOKEN_VALID_OR_GOTO_ERR_RET(token, retval) do { \
++	if ((token) == NULL || strlen(token) == 0 || !isdigit(*token)) { \
++		RTE_EDEV_LOG_ERR("Invalid eth Rx adapter token\n"); \
++		ret = retval; \
++		goto error; \
++	} \
++} while (0)
++
++#define RTE_ETH_VALID_PORTID_OR_GOTO_ERR_RET(port_id, retval) do { \
++	if (!rte_eth_dev_is_valid_port(port_id)) { \
++		RTE_ETHDEV_LOG(ERR, "Invalid port_id=%u\n", port_id); \
++		ret = retval; \
++		goto error; \
++	} \
++} while (0)
++
+ static inline int
+ rxa_sw_adapter_queue_count(struct event_eth_rx_adapter *rx_adapter)
+ {
+@@ -2984,15 +3008,17 @@ rte_event_eth_rx_adapter_queue_stats_get(uint8_t id,
+ 		return -EINVAL;
+ 	}
+ 
+-	queue_info = &dev_info->rx_queue[rx_queue_id];
+-	event_buf = queue_info->event_buf;
+-	q_stats = queue_info->stats;
++	if (dev_info->internal_event_port == 0) {
++		queue_info = &dev_info->rx_queue[rx_queue_id];
++		event_buf = queue_info->event_buf;
++		q_stats = queue_info->stats;
+ 
+-	stats->rx_event_buf_count = event_buf->count;
+-	stats->rx_event_buf_size = event_buf->events_size;
+-	stats->rx_packets = q_stats->rx_packets;
+-	stats->rx_poll_count = q_stats->rx_poll_count;
+-	stats->rx_dropped = q_stats->rx_dropped;
++		stats->rx_event_buf_count = event_buf->count;
++		stats->rx_event_buf_size = event_buf->events_size;
++		stats->rx_packets = q_stats->rx_packets;
++		stats->rx_poll_count = q_stats->rx_poll_count;
++		stats->rx_dropped = q_stats->rx_dropped;
++	}
+ 
+ 	dev = &rte_eventdevs[rx_adapter->eventdev_id];
+ 	if (dev->dev_ops->eth_rx_adapter_queue_stats_get != NULL) {
+@@ -3086,8 +3112,10 @@ rte_event_eth_rx_adapter_queue_stats_reset(uint8_t id,
+ 		return -EINVAL;
+ 	}
+ 
+-	queue_info = &dev_info->rx_queue[rx_queue_id];
+-	rxa_queue_stats_reset(queue_info);
++	if (dev_info->internal_event_port == 0) {
++		queue_info = &dev_info->rx_queue[rx_queue_id];
++		rxa_queue_stats_reset(queue_info);
++	}
+ 
+ 	dev = &rte_eventdevs[rx_adapter->eventdev_id];
+ 	if (dev->dev_ops->eth_rx_adapter_queue_stats_reset != NULL) {
+@@ -3170,11 +3198,11 @@ rte_event_eth_rx_adapter_queue_conf_get(uint8_t id,
+ 			uint16_t rx_queue_id,
+ 			struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
+ {
++#define TICK2NSEC(_ticks, _freq) (((_ticks) * (1E9)) / (_freq))
+ 	struct rte_eventdev *dev;
+ 	struct event_eth_rx_adapter *rx_adapter;
+ 	struct eth_device_info *dev_info;
+ 	struct eth_rx_queue_info *queue_info;
+-	struct rte_event *qi_ev;
+ 	int ret;
+ 
+ 	if (rxa_memzone_lookup())
+@@ -3205,7 +3233,6 @@ rte_event_eth_rx_adapter_queue_conf_get(uint8_t id,
+ 	}
+ 
+ 	queue_info = &dev_info->rx_queue[rx_queue_id];
+-	qi_ev = (struct rte_event *)&queue_info->event;
+ 
+ 	memset(queue_conf, 0, sizeof(*queue_conf));
+ 	queue_conf->rx_queue_flags = 0;
+@@ -3214,7 +3241,18 @@ rte_event_eth_rx_adapter_queue_conf_get(uint8_t id,
+ 			RTE_EVENT_ETH_RX_ADAPTER_QUEUE_FLOW_ID_VALID;
+ 	queue_conf->servicing_weight = queue_info->wt;
+ 
+-	memcpy(&queue_conf->ev, qi_ev, sizeof(*qi_ev));
++	queue_conf->ev.event = queue_info->event;
++
++	queue_conf->vector_sz = queue_info->vector_data.max_vector_count;
++	queue_conf->vector_mp = queue_info->vector_data.vector_pool;
++	/* need to be converted from ticks to ns */
++	queue_conf->vector_timeout_ns = TICK2NSEC(
++		queue_info->vector_data.vector_timeout_ticks, rte_get_timer_hz());
++
++	if (queue_info->event_buf != NULL)
++		queue_conf->event_buf_size = queue_info->event_buf->events_size;
++	else
++		queue_conf->event_buf_size = 0;
+ 
+ 	dev = &rte_eventdevs[rx_adapter->eventdev_id];
+ 	if (dev->dev_ops->eth_rx_adapter_queue_conf_get != NULL) {
+@@ -3276,7 +3314,7 @@ handle_rxa_stats_reset(const char *cmd __rte_unused,
+ {
+ 	uint8_t rx_adapter_id;
+ 
+-	if (params == NULL || strlen(params) == 0 || ~isdigit(*params))
++	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
+ 		return -1;
+ 
+ 	/* Get Rx adapter ID from parameter string */
+@@ -3299,7 +3337,7 @@ handle_rxa_get_queue_conf(const char *cmd __rte_unused,
+ {
+ 	uint8_t rx_adapter_id;
+ 	uint16_t rx_queue_id;
+-	int eth_dev_id;
++	int eth_dev_id, ret = -1;
+ 	char *token, *l_params;
+ 	struct rte_event_eth_rx_adapter_queue_conf queue_conf;
+ 
+@@ -3308,33 +3346,37 @@ handle_rxa_get_queue_conf(const char *cmd __rte_unused,
+ 
+ 	/* Get Rx adapter ID from parameter string */
+ 	l_params = strdup(params);
++	if (l_params == NULL)
++		return -ENOMEM;
+ 	token = strtok(l_params, ",");
++	RTE_EVENT_ETH_RX_ADAPTER_TOKEN_VALID_OR_GOTO_ERR_RET(token, -1);
+ 	rx_adapter_id = strtoul(token, NULL, 10);
+-	RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(rx_adapter_id, -EINVAL);
++	RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_GOTO_ERR_RET(rx_adapter_id, -EINVAL);
+ 
+ 	token = strtok(NULL, ",");
+-	if (token == NULL || strlen(token) == 0 || !isdigit(*token))
+-		return -1;
++	RTE_EVENT_ETH_RX_ADAPTER_TOKEN_VALID_OR_GOTO_ERR_RET(token, -1);
+ 
+ 	/* Get device ID from parameter string */
+ 	eth_dev_id = strtoul(token, NULL, 10);
+-	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(eth_dev_id, -EINVAL);
++	RTE_ETH_VALID_PORTID_OR_GOTO_ERR_RET(eth_dev_id, -EINVAL);
+ 
+ 	token = strtok(NULL, ",");
+-	if (token == NULL || strlen(token) == 0 || !isdigit(*token))
+-		return -1;
++	RTE_EVENT_ETH_RX_ADAPTER_TOKEN_VALID_OR_GOTO_ERR_RET(token, -1);
+ 
+ 	/* Get Rx queue ID from parameter string */
+ 	rx_queue_id = strtoul(token, NULL, 10);
+ 	if (rx_queue_id >= rte_eth_devices[eth_dev_id].data->nb_rx_queues) {
+ 		RTE_EDEV_LOG_ERR("Invalid rx queue_id %u", rx_queue_id);
+-		return -EINVAL;
++		ret = -EINVAL;
++		goto error;
+ 	}
+ 
+ 	token = strtok(NULL, "\0");
+ 	if (token != NULL)
+ 		RTE_EDEV_LOG_ERR("Extra parameters passed to eventdev"
+-				 " telemetry command, igrnoring");
++				 " telemetry command, ignoring");
++	/* Parsing parameter finished */
++	free(l_params);
+ 
+ 	if (rte_event_eth_rx_adapter_queue_conf_get(rx_adapter_id, eth_dev_id,
+ 						    rx_queue_id, &queue_conf)) {
+@@ -3354,6 +3396,10 @@ handle_rxa_get_queue_conf(const char *cmd __rte_unused,
+ 	RXA_ADD_DICT(queue_conf.ev, flow_id);
+ 
+ 	return 0;
++
++error:
++	free(l_params);
++	return ret;
+ }
+ 
+ static int
+@@ -3363,7 +3409,7 @@ handle_rxa_get_queue_stats(const char *cmd __rte_unused,
+ {
+ 	uint8_t rx_adapter_id;
+ 	uint16_t rx_queue_id;
+-	int eth_dev_id;
++	int eth_dev_id, ret = -1;
+ 	char *token, *l_params;
+ 	struct rte_event_eth_rx_adapter_queue_stats q_stats;
+ 
+@@ -3372,33 +3418,37 @@ handle_rxa_get_queue_stats(const char *cmd __rte_unused,
+ 
+ 	/* Get Rx adapter ID from parameter string */
+ 	l_params = strdup(params);
++	if (l_params == NULL)
++		return -ENOMEM;
+ 	token = strtok(l_params, ",");
++	RTE_EVENT_ETH_RX_ADAPTER_TOKEN_VALID_OR_GOTO_ERR_RET(token, -1);
+ 	rx_adapter_id = strtoul(token, NULL, 10);
+-	RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(rx_adapter_id, -EINVAL);
++	RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_GOTO_ERR_RET(rx_adapter_id, -EINVAL);
+ 
+ 	token = strtok(NULL, ",");
+-	if (token == NULL || strlen(token) == 0 || !isdigit(*token))
+-		return -1;
++	RTE_EVENT_ETH_RX_ADAPTER_TOKEN_VALID_OR_GOTO_ERR_RET(token, -1);
+ 
+ 	/* Get device ID from parameter string */
+ 	eth_dev_id = strtoul(token, NULL, 10);
+-	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(eth_dev_id, -EINVAL);
++	RTE_ETH_VALID_PORTID_OR_GOTO_ERR_RET(eth_dev_id, -EINVAL);
+ 
+ 	token = strtok(NULL, ",");
+-	if (token == NULL || strlen(token) == 0 || !isdigit(*token))
+-		return -1;
++	RTE_EVENT_ETH_RX_ADAPTER_TOKEN_VALID_OR_GOTO_ERR_RET(token, -1);
+ 
+ 	/* Get Rx queue ID from parameter string */
+ 	rx_queue_id = strtoul(token, NULL, 10);
+ 	if (rx_queue_id >= rte_eth_devices[eth_dev_id].data->nb_rx_queues) {
+ 		RTE_EDEV_LOG_ERR("Invalid rx queue_id %u", rx_queue_id);
+-		return -EINVAL;
++		ret = -EINVAL;
++		goto error;
+ 	}
+ 
+ 	token = strtok(NULL, "\0");
+ 	if (token != NULL)
+ 		RTE_EDEV_LOG_ERR("Extra parameters passed to eventdev"
+-				 " telemetry command, igrnoring");
++				 " telemetry command, ignoring");
++	/* Parsing parameter finished */
++	free(l_params);
+ 
+ 	if (rte_event_eth_rx_adapter_queue_stats_get(rx_adapter_id, eth_dev_id,
+ 						    rx_queue_id, &q_stats)) {
+@@ -3417,6 +3467,10 @@ handle_rxa_get_queue_stats(const char *cmd __rte_unused,
+ 	RXA_ADD_DICT(q_stats, rx_dropped);
+ 
+ 	return 0;
++
++error:
++	free(l_params);
++	return ret;
+ }
+ 
+ static int
+@@ -3426,7 +3480,7 @@ handle_rxa_queue_stats_reset(const char *cmd __rte_unused,
+ {
+ 	uint8_t rx_adapter_id;
+ 	uint16_t rx_queue_id;
+-	int eth_dev_id;
++	int eth_dev_id, ret = -1;
+ 	char *token, *l_params;
+ 
+ 	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
+@@ -3434,33 +3488,37 @@ handle_rxa_queue_stats_reset(const char *cmd __rte_unused,
+ 
+ 	/* Get Rx adapter ID from parameter string */
+ 	l_params = strdup(params);
++	if (l_params == NULL)
++		return -ENOMEM;
+ 	token = strtok(l_params, ",");
++	RTE_EVENT_ETH_RX_ADAPTER_TOKEN_VALID_OR_GOTO_ERR_RET(token, -1);
+ 	rx_adapter_id = strtoul(token, NULL, 10);
+-	RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(rx_adapter_id, -EINVAL);
++	RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_GOTO_ERR_RET(rx_adapter_id, -EINVAL);
+ 
+ 	token = strtok(NULL, ",");
+-	if (token == NULL || strlen(token) == 0 || !isdigit(*token))
+-		return -1;
++	RTE_EVENT_ETH_RX_ADAPTER_TOKEN_VALID_OR_GOTO_ERR_RET(token, -1);
+ 
+ 	/* Get device ID from parameter string */
+ 	eth_dev_id = strtoul(token, NULL, 10);
+-	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(eth_dev_id, -EINVAL);
++	RTE_ETH_VALID_PORTID_OR_GOTO_ERR_RET(eth_dev_id, -EINVAL);
+ 
+ 	token = strtok(NULL, ",");
+-	if (token == NULL || strlen(token) == 0 || !isdigit(*token))
+-		return -1;
++	RTE_EVENT_ETH_RX_ADAPTER_TOKEN_VALID_OR_GOTO_ERR_RET(token, -1);
+ 
+ 	/* Get Rx queue ID from parameter string */
+ 	rx_queue_id = strtoul(token, NULL, 10);
+ 	if (rx_queue_id >= rte_eth_devices[eth_dev_id].data->nb_rx_queues) {
+ 		RTE_EDEV_LOG_ERR("Invalid rx queue_id %u", rx_queue_id);
+-		return -EINVAL;
++		ret = -EINVAL;
++		goto error;
+ 	}
+ 
+ 	token = strtok(NULL, "\0");
+ 	if (token != NULL)
+ 		RTE_EDEV_LOG_ERR("Extra parameters passed to eventdev"
+-				 " telemetry command, igrnoring");
++				 " telemetry command, ignoring");
++	/* Parsing parameter finished */
++	free(l_params);
+ 
+ 	if (rte_event_eth_rx_adapter_queue_stats_reset(rx_adapter_id,
+ 						       eth_dev_id,
+@@ -3470,6 +3528,10 @@ handle_rxa_queue_stats_reset(const char *cmd __rte_unused,
+ 	}
+ 
+ 	return 0;
++
++error:
++	free(l_params);
++	return ret;
+ }
+ 
+ RTE_INIT(rxa_init_telemetry)
+diff --git a/dpdk/lib/eventdev/rte_event_eth_tx_adapter.c b/dpdk/lib/eventdev/rte_event_eth_tx_adapter.c
+index c17f33f098..b4b37f1cae 100644
+--- a/dpdk/lib/eventdev/rte_event_eth_tx_adapter.c
++++ b/dpdk/lib/eventdev/rte_event_eth_tx_adapter.c
+@@ -224,7 +224,7 @@ txa_service_data_init(void)
+ 	if (txa_service_data_array == NULL) {
+ 		txa_service_data_array =
+ 				txa_memzone_array_get("txa_service_data_array",
+-					sizeof(int),
++					sizeof(*txa_service_data_array),
+ 					RTE_EVENT_ETH_TX_ADAPTER_MAX_INSTANCE);
+ 		if (txa_service_data_array == NULL)
+ 			return -ENOMEM;
+@@ -806,10 +806,8 @@ txa_service_queue_add(uint8_t id,
+ 
+ 	rte_spinlock_lock(&txa->tx_lock);
+ 
+-	if (txa_service_is_queue_added(txa, eth_dev, tx_queue_id)) {
+-		rte_spinlock_unlock(&txa->tx_lock);
+-		return 0;
+-	}
++	if (txa_service_is_queue_added(txa, eth_dev, tx_queue_id))
++		goto ret_unlock;
+ 
+ 	ret = txa_service_queue_array_alloc(txa, eth_dev->data->port_id);
+ 	if (ret)
+@@ -821,6 +819,8 @@ txa_service_queue_add(uint8_t id,
+ 
+ 	tdi = &txa->txa_ethdev[eth_dev->data->port_id];
+ 	tqi = txa_service_queue(txa, eth_dev->data->port_id, tx_queue_id);
++	if (tqi == NULL)
++		goto err_unlock;
+ 
+ 	txa_retry = &tqi->txa_retry;
+ 	txa_retry->id = txa->id;
+@@ -836,6 +836,10 @@ txa_service_queue_add(uint8_t id,
+ 	tdi->nb_queues++;
+ 	txa->nb_queues++;
+ 
++ret_unlock:
++	rte_spinlock_unlock(&txa->tx_lock);
++	return 0;
++
+ err_unlock:
+ 	if (txa->nb_queues == 0) {
+ 		txa_service_queue_array_free(txa,
+@@ -844,7 +848,7 @@ txa_service_queue_add(uint8_t id,
+ 	}
+ 
+ 	rte_spinlock_unlock(&txa->tx_lock);
+-	return 0;
++	return -1;
+ }
+ 
+ static int
+@@ -887,9 +891,10 @@ txa_service_queue_del(uint8_t id,
+ 
+ 	txa = txa_service_id_to_data(id);
+ 
++	rte_spinlock_lock(&txa->tx_lock);
+ 	tqi = txa_service_queue(txa, port_id, tx_queue_id);
+ 	if (tqi == NULL || !tqi->added)
+-		return 0;
++		goto ret_unlock;
+ 
+ 	tb = tqi->tx_buf;
+ 	tqi->added = 0;
+@@ -899,6 +904,9 @@ txa_service_queue_del(uint8_t id,
+ 	txa->txa_ethdev[port_id].nb_queues--;
+ 
+ 	txa_service_queue_array_free(txa, port_id);
++
++ret_unlock:
++	rte_spinlock_unlock(&txa->tx_lock);
+ 	return 0;
+ }
+ 
+diff --git a/dpdk/lib/eventdev/rte_event_ring.h b/dpdk/lib/eventdev/rte_event_ring.h
+index c0861b0ec2..0b9aefb000 100644
+--- a/dpdk/lib/eventdev/rte_event_ring.h
++++ b/dpdk/lib/eventdev/rte_event_ring.h
+@@ -14,6 +14,10 @@
+ #ifndef _RTE_EVENT_RING_
+ #define _RTE_EVENT_RING_
+ 
++#ifdef __cplusplus
++extern "C" {
++#endif
++
+ #include <stdint.h>
+ 
+ #include <rte_common.h>
+@@ -266,4 +270,9 @@ rte_event_ring_get_capacity(const struct rte_event_ring *r)
+ {
+ 	return rte_ring_get_capacity(&r->r);
+ }
++
++#ifdef __cplusplus
++}
++#endif
++
+ #endif
+diff --git a/dpdk/lib/eventdev/rte_event_timer_adapter.h b/dpdk/lib/eventdev/rte_event_timer_adapter.h
+index 1551741820..e68d02da72 100644
+--- a/dpdk/lib/eventdev/rte_event_timer_adapter.h
++++ b/dpdk/lib/eventdev/rte_event_timer_adapter.h
+@@ -678,4 +678,8 @@ rte_event_timer_cancel_burst(const struct rte_event_timer_adapter *adapter,
+ 	return adapter->cancel_burst(adapter, evtims, nb_evtims);
+ }
+ 
++#ifdef __cplusplus
++}
++#endif
++
+ #endif /* __RTE_EVENT_TIMER_ADAPTER_H__ */
+diff --git a/dpdk/lib/eventdev/rte_eventdev.h b/dpdk/lib/eventdev/rte_eventdev.h
+index eef47d8acc..476bcbcc21 100644
+--- a/dpdk/lib/eventdev/rte_eventdev.h
++++ b/dpdk/lib/eventdev/rte_eventdev.h
+@@ -986,21 +986,31 @@ struct rte_event_vector {
+ 	};
+ 	/**< Union to hold common attributes of the vector array. */
+ 	uint64_t impl_opaque;
++
++/* empty structures do not have zero size in C++ leading to compilation errors
++ * with clang about structure having different sizes in C and C++.
++ * Since these are all zero-sized arrays, we can omit the "union" wrapper for
++ * C++ builds, removing the warning.
++ */
++#ifndef __cplusplus
+ 	/**< Implementation specific opaque value.
+ 	 * An implementation may use this field to hold implementation specific
+ 	 * value to share between dequeue and enqueue operation.
+ 	 * The application should not modify this field.
+ 	 */
+ 	union {
++#endif
+ 		struct rte_mbuf *mbufs[0];
+ 		void *ptrs[0];
+ 		uint64_t *u64s[0];
++#ifndef __cplusplus
+ 	} __rte_aligned(16);
++#endif
+ 	/**< Start of the vector array union. Depending upon the event type the
+ 	 * vector array can be an array of mbufs or pointers or opaque u64
+ 	 * values.
+ 	 */
+-};
++} __rte_aligned(16);
+ 
+ /* Scheduler type definitions */
+ #define RTE_SCHED_TYPE_ORDERED          0
+@@ -1805,7 +1815,7 @@ __rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id,
+ 		return 0;
+ 	}
+ #endif
+-	rte_eventdev_trace_enq_burst(dev_id, port_id, ev, nb_events, fn);
++	rte_eventdev_trace_enq_burst(dev_id, port_id, ev, nb_events, (void *)fn);
+ 	/*
+ 	 * Allow zero cost non burst mode routine invocation if application
+ 	 * requests nb_events as const one
+diff --git a/dpdk/lib/fib/rte_fib.c b/dpdk/lib/fib/rte_fib.c
+index 6ca180d7e7..0cced97a77 100644
+--- a/dpdk/lib/fib/rte_fib.c
++++ b/dpdk/lib/fib/rte_fib.c
+@@ -40,10 +40,10 @@ EAL_REGISTER_TAILQ(rte_fib_tailq)
+ struct rte_fib {
+ 	char			name[RTE_FIB_NAMESIZE];
+ 	enum rte_fib_type	type;	/**< Type of FIB struct */
+-	struct rte_rib		*rib;	/**< RIB helper datastruct */
++	struct rte_rib		*rib;	/**< RIB helper datastructure */
+ 	void			*dp;	/**< pointer to the dataplane struct*/
+-	rte_fib_lookup_fn_t	lookup;	/**< fib lookup function */
+-	rte_fib_modify_fn_t	modify; /**< modify fib datastruct */
++	rte_fib_lookup_fn_t	lookup;	/**< FIB lookup function */
++	rte_fib_modify_fn_t	modify; /**< modify FIB datastructure */
+ 	uint64_t		def_nh;
+ };
+ 
+diff --git a/dpdk/lib/fib/rte_fib.h b/dpdk/lib/fib/rte_fib.h
+index b3c59dfaaa..e592d3251a 100644
+--- a/dpdk/lib/fib/rte_fib.h
++++ b/dpdk/lib/fib/rte_fib.h
+@@ -189,7 +189,7 @@ rte_fib_lookup_bulk(struct rte_fib *fib, uint32_t *ips,
+  *   FIB object handle
+  * @return
+  *   Pointer on the dataplane struct on success
+- *   NULL othervise
++ *   NULL otherwise
+  */
+ void *
+ rte_fib_get_dp(struct rte_fib *fib);
+@@ -201,7 +201,7 @@ rte_fib_get_dp(struct rte_fib *fib);
+  *   FIB object handle
+  * @return
+  *   Pointer on the RIB on success
+- *   NULL othervise
++ *   NULL otherwise
+  */
+ struct rte_rib *
+ rte_fib_get_rib(struct rte_fib *fib);
+diff --git a/dpdk/lib/fib/rte_fib6.c b/dpdk/lib/fib/rte_fib6.c
+index be79efe004..eebee297d6 100644
+--- a/dpdk/lib/fib/rte_fib6.c
++++ b/dpdk/lib/fib/rte_fib6.c
+@@ -40,10 +40,10 @@ EAL_REGISTER_TAILQ(rte_fib6_tailq)
+ struct rte_fib6 {
+ 	char			name[FIB6_NAMESIZE];
+ 	enum rte_fib6_type	type;	/**< Type of FIB struct */
+-	struct rte_rib6		*rib;	/**< RIB helper datastruct */
++	struct rte_rib6		*rib;	/**< RIB helper datastructure */
+ 	void			*dp;	/**< pointer to the dataplane struct*/
+-	rte_fib6_lookup_fn_t	lookup;	/**< fib lookup function */
+-	rte_fib6_modify_fn_t	modify; /**< modify fib datastruct */
++	rte_fib6_lookup_fn_t	lookup;	/**< FIB lookup function */
++	rte_fib6_modify_fn_t	modify; /**< modify FIB datastructure */
+ 	uint64_t		def_nh;
+ };
+ 
+diff --git a/dpdk/lib/fib/rte_fib6.h b/dpdk/lib/fib/rte_fib6.h
+index 95879af96d..cb133719e1 100644
+--- a/dpdk/lib/fib/rte_fib6.h
++++ b/dpdk/lib/fib/rte_fib6.h
+@@ -184,7 +184,7 @@ rte_fib6_lookup_bulk(struct rte_fib6 *fib,
+  *   FIB6 object handle
+  * @return
+  *   Pointer on the dataplane struct on success
+- *   NULL othervise
++ *   NULL otherwise
+  */
+ void *
+ rte_fib6_get_dp(struct rte_fib6 *fib);
+@@ -196,7 +196,7 @@ rte_fib6_get_dp(struct rte_fib6 *fib);
+  *   FIB object handle
+  * @return
+  *   Pointer on the RIB6 on success
+- *   NULL othervise
++ *   NULL otherwise
+  */
+ struct rte_rib6 *
+ rte_fib6_get_rib(struct rte_fib6 *fib);
+diff --git a/dpdk/lib/gpudev/version.map b/dpdk/lib/gpudev/version.map
+index 2e414c65cc..34186ab7f1 100644
+--- a/dpdk/lib/gpudev/version.map
++++ b/dpdk/lib/gpudev/version.map
+@@ -35,4 +35,6 @@ INTERNAL {
+ 	rte_gpu_get_by_name;
+ 	rte_gpu_notify;
+ 	rte_gpu_release;
++
++	local: *;
+ };
+diff --git a/dpdk/lib/graph/rte_graph_worker.h b/dpdk/lib/graph/rte_graph_worker.h
+index eef77f732a..0c0b9c095a 100644
+--- a/dpdk/lib/graph/rte_graph_worker.h
++++ b/dpdk/lib/graph/rte_graph_worker.h
+@@ -155,7 +155,7 @@ rte_graph_walk(struct rte_graph *graph)
+ 	 *	+-----+ <= cir_start + mask
+ 	 */
+ 	while (likely(head != graph->tail)) {
+-		node = RTE_PTR_ADD(graph, cir_start[(int32_t)head++]);
++		node = (struct rte_node *)RTE_PTR_ADD(graph, cir_start[(int32_t)head++]);
+ 		RTE_ASSERT(node->fence == RTE_GRAPH_FENCE);
+ 		objs = node->objs;
+ 		rte_prefetch0(objs);
+diff --git a/dpdk/lib/gro/rte_gro.c b/dpdk/lib/gro/rte_gro.c
+index 8ca4da67e9..7a788523ad 100644
+--- a/dpdk/lib/gro/rte_gro.c
++++ b/dpdk/lib/gro/rte_gro.c
+@@ -33,6 +33,7 @@ static gro_tbl_pkt_count_fn tbl_pkt_count_fn[RTE_GRO_TYPE_MAX_NUM] = {
+ 
+ #define IS_IPV4_TCP_PKT(ptype) (RTE_ETH_IS_IPV4_HDR(ptype) && \
+ 		((ptype & RTE_PTYPE_L4_TCP) == RTE_PTYPE_L4_TCP) && \
++		((ptype & RTE_PTYPE_L4_FRAG) != RTE_PTYPE_L4_FRAG) && \
+ 		(RTE_ETH_IS_TUNNEL_PKT(ptype) == 0))
+ 
+ #define IS_IPV4_UDP_PKT(ptype) (RTE_ETH_IS_IPV4_HDR(ptype) && \
+@@ -41,6 +42,7 @@ static gro_tbl_pkt_count_fn tbl_pkt_count_fn[RTE_GRO_TYPE_MAX_NUM] = {
+ 
+ #define IS_IPV4_VXLAN_TCP4_PKT(ptype) (RTE_ETH_IS_IPV4_HDR(ptype) && \
+ 		((ptype & RTE_PTYPE_L4_UDP) == RTE_PTYPE_L4_UDP) && \
++		((ptype & RTE_PTYPE_L4_FRAG) != RTE_PTYPE_L4_FRAG) && \
+ 		((ptype & RTE_PTYPE_TUNNEL_VXLAN) == \
+ 		 RTE_PTYPE_TUNNEL_VXLAN) && \
+ 		((ptype & RTE_PTYPE_INNER_L4_TCP) == \
+diff --git a/dpdk/lib/ipsec/esp_outb.c b/dpdk/lib/ipsec/esp_outb.c
+index 672e56aba0..28bd58e3c7 100644
+--- a/dpdk/lib/ipsec/esp_outb.c
++++ b/dpdk/lib/ipsec/esp_outb.c
+@@ -197,7 +197,7 @@ outb_tun_pkt_prepare(struct rte_ipsec_sa *sa, rte_be64_t sqc,
+ 	/* if UDP encap is enabled update the dgram_len */
+ 	if (sa->type & RTE_IPSEC_SATP_NATT_ENABLE) {
+ 		struct rte_udp_hdr *udph = (struct rte_udp_hdr *)
+-				(ph - sizeof(struct rte_udp_hdr));
++			(ph + sa->hdr_len - sizeof(struct rte_udp_hdr));
+ 		udph->dgram_len = rte_cpu_to_be_16(mb->pkt_len - sqh_len -
+ 				sa->hdr_l3_off - sa->hdr_len);
+ 	}
+diff --git a/dpdk/lib/ipsec/ipsec_telemetry.c b/dpdk/lib/ipsec/ipsec_telemetry.c
+index b8b08404b6..9a91e47122 100644
+--- a/dpdk/lib/ipsec/ipsec_telemetry.c
++++ b/dpdk/lib/ipsec/ipsec_telemetry.c
+@@ -236,7 +236,7 @@ RTE_INIT(rte_ipsec_telemetry_init)
+ 		"Return list of IPsec SAs with telemetry enabled.");
+ 	rte_telemetry_register_cmd("/ipsec/sa/stats",
+ 		handle_telemetry_cmd_ipsec_sa_stats,
+-		"Returns IPsec SA stastistics. Parameters: int sa_spi");
++		"Returns IPsec SA statistics. Parameters: int sa_spi");
+ 	rte_telemetry_register_cmd("/ipsec/sa/details",
+ 		handle_telemetry_cmd_ipsec_sa_details,
+ 		"Returns IPsec SA configuration. Parameters: int sa_spi");
+diff --git a/dpdk/lib/ipsec/rte_ipsec_group.h b/dpdk/lib/ipsec/rte_ipsec_group.h
+index 60ab297710..62c2bd7217 100644
+--- a/dpdk/lib/ipsec/rte_ipsec_group.h
++++ b/dpdk/lib/ipsec/rte_ipsec_group.h
+@@ -49,10 +49,10 @@ rte_ipsec_ses_from_crypto(const struct rte_crypto_op *cop)
+ 
+ 	if (cop->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
+ 		ss = cop->sym[0].sec_session;
+-		return (void *)(uintptr_t)ss->opaque_data;
++		return (struct rte_ipsec_session *)(uintptr_t)ss->opaque_data;
+ 	} else if (cop->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
+ 		cs = cop->sym[0].session;
+-		return (void *)(uintptr_t)cs->opaque_data;
++		return (struct rte_ipsec_session *)(uintptr_t)cs->opaque_data;
+ 	}
+ 	return NULL;
+ }
+diff --git a/dpdk/lib/ipsec/rte_ipsec_sad.h b/dpdk/lib/ipsec/rte_ipsec_sad.h
+index b65d295831..a3ae57df7e 100644
+--- a/dpdk/lib/ipsec/rte_ipsec_sad.h
++++ b/dpdk/lib/ipsec/rte_ipsec_sad.h
+@@ -153,7 +153,7 @@ rte_ipsec_sad_destroy(struct rte_ipsec_sad *sad);
+  * @param keys
+  *   Array of keys to be looked up in the SAD
+  * @param sa
+- *   Pointer assocoated with the keys.
++ *   Pointer associated with the keys.
+  *   If the lookup for the given key failed, then corresponding sa
+  *   will be NULL
+  * @param n
+diff --git a/dpdk/lib/ipsec/sa.c b/dpdk/lib/ipsec/sa.c
+index 1e51482c92..c921699390 100644
+--- a/dpdk/lib/ipsec/sa.c
++++ b/dpdk/lib/ipsec/sa.c
+@@ -362,13 +362,13 @@ esp_outb_tun_init(struct rte_ipsec_sa *sa, const struct rte_ipsec_sa_prm *prm)
+ 
+ 	memcpy(sa->hdr, prm->tun.hdr, prm->tun.hdr_len);
+ 
+-	/* insert UDP header if UDP encapsulation is inabled */
++	/* insert UDP header if UDP encapsulation is enabled */
+ 	if (sa->type & RTE_IPSEC_SATP_NATT_ENABLE) {
+ 		struct rte_udp_hdr *udph = (struct rte_udp_hdr *)
+ 				&sa->hdr[prm->tun.hdr_len];
+ 		sa->hdr_len += sizeof(struct rte_udp_hdr);
+-		udph->src_port = prm->ipsec_xform.udp.sport;
+-		udph->dst_port = prm->ipsec_xform.udp.dport;
++		udph->src_port = rte_cpu_to_be_16(prm->ipsec_xform.udp.sport);
++		udph->dst_port = rte_cpu_to_be_16(prm->ipsec_xform.udp.dport);
+ 		udph->dgram_cksum = 0;
+ 	}
+ 
+diff --git a/dpdk/lib/kni/rte_kni_common.h b/dpdk/lib/kni/rte_kni_common.h
+index b547ea5501..8d3ee0fa4f 100644
+--- a/dpdk/lib/kni/rte_kni_common.h
++++ b/dpdk/lib/kni/rte_kni_common.h
+@@ -6,6 +6,10 @@
+ #ifndef _RTE_KNI_COMMON_H_
+ #define _RTE_KNI_COMMON_H_
+ 
++#ifdef __cplusplus
++extern "C" {
++#endif
++
+ #ifdef __KERNEL__
+ #include <linux/if.h>
+ #include <asm/barrier.h>
+@@ -136,4 +140,8 @@ struct rte_kni_device_info {
+ #define RTE_KNI_IOCTL_CREATE  _IOWR(0, 2, struct rte_kni_device_info)
+ #define RTE_KNI_IOCTL_RELEASE _IOWR(0, 3, struct rte_kni_device_info)
+ 
++#ifdef __cplusplus
++}
++#endif
++
+ #endif /* _RTE_KNI_COMMON_H_ */
+diff --git a/dpdk/lib/lpm/rte_lpm_altivec.h b/dpdk/lib/lpm/rte_lpm_altivec.h
+index 4fbc1b595d..bab8929495 100644
+--- a/dpdk/lib/lpm/rte_lpm_altivec.h
++++ b/dpdk/lib/lpm/rte_lpm_altivec.h
+@@ -19,14 +19,14 @@ static inline void
+ rte_lpm_lookupx4(const struct rte_lpm *lpm, xmm_t ip, uint32_t hop[4],
+ 	uint32_t defv)
+ {
+-	vector signed int i24;
++	xmm_t i24;
+ 	rte_xmm_t i8;
+ 	uint32_t tbl[4];
+ 	uint64_t idx, pt, pt2;
+ 	const uint32_t *ptbl;
+ 
+ 	const uint32_t mask = UINT8_MAX;
+-	const vector signed int mask8 = (xmm_t){mask, mask, mask, mask};
++	const xmm_t mask8 = (xmm_t){mask, mask, mask, mask};
+ 
+ 	/*
+ 	 * RTE_LPM_VALID_EXT_ENTRY_BITMASK for 2 LPM entries
+@@ -46,7 +46,7 @@ rte_lpm_lookupx4(const struct rte_lpm *lpm, xmm_t ip, uint32_t hop[4],
+ 
+ 	/* get 4 indexes for tbl24[]. */
+ 	i24 = vec_sr((xmm_t) ip,
+-		(vector unsigned int){CHAR_BIT, CHAR_BIT, CHAR_BIT, CHAR_BIT});
++		(__vector unsigned int){CHAR_BIT, CHAR_BIT, CHAR_BIT, CHAR_BIT});
+ 
+ 	/* extract values from tbl24[] */
+ 	idx = (uint32_t)i24[0];
+diff --git a/dpdk/lib/mbuf/rte_mbuf.c b/dpdk/lib/mbuf/rte_mbuf.c
+index 604d77bbda..dce900f28f 100644
+--- a/dpdk/lib/mbuf/rte_mbuf.c
++++ b/dpdk/lib/mbuf/rte_mbuf.c
+@@ -685,6 +685,9 @@ rte_pktmbuf_dump(FILE *f, const struct rte_mbuf *m, unsigned dump_len)
+ 	fprintf(f, "  pkt_len=%u, ol_flags=%#"PRIx64", nb_segs=%u, port=%u",
+ 		m->pkt_len, m->ol_flags, m->nb_segs, m->port);
+ 
++	if (m->ol_flags & (RTE_MBUF_F_RX_QINQ | RTE_MBUF_F_TX_QINQ))
++		fprintf(f, ", vlan_tci_outer=%u", m->vlan_tci_outer);
++
+ 	if (m->ol_flags & (RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_TX_VLAN))
+ 		fprintf(f, ", vlan_tci=%u", m->vlan_tci);
+ 
+diff --git a/dpdk/lib/mbuf/rte_mbuf_core.h b/dpdk/lib/mbuf/rte_mbuf_core.h
+index 321a419c71..3d6ddd6773 100644
+--- a/dpdk/lib/mbuf/rte_mbuf_core.h
++++ b/dpdk/lib/mbuf/rte_mbuf_core.h
+@@ -8,7 +8,7 @@
+ 
+ /**
+  * @file
+- * This file contains definion of RTE mbuf structure itself,
++ * This file contains definition of RTE mbuf structure itself,
+  * packet offload flags and some related macros.
+  * For majority of DPDK entities, it is not recommended to include
+  * this file directly, use include <rte_mbuf.h> instead.
+diff --git a/dpdk/lib/meson.build b/dpdk/lib/meson.build
+index 018976df17..fbaa6ef7c2 100644
+--- a/dpdk/lib/meson.build
++++ b/dpdk/lib/meson.build
+@@ -3,7 +3,7 @@
+ 
+ 
+ # process all libraries equally, as far as possible
+-# "core" libs first, then others alphebetically as far as possible
++# "core" libs first, then others alphabetically as far as possible
+ # NOTE: for speed of meson runs, the dependencies in the subdirectories
+ # sometimes skip deps that would be implied by others, e.g. if mempool is
+ # given as a dep, no need to mention ring. This is especially true for the
+diff --git a/dpdk/lib/metrics/rte_metrics_telemetry.h b/dpdk/lib/metrics/rte_metrics_telemetry.h
+index 2b6eb1ccc8..09b14d9336 100644
+--- a/dpdk/lib/metrics/rte_metrics_telemetry.h
++++ b/dpdk/lib/metrics/rte_metrics_telemetry.h
+@@ -13,6 +13,9 @@
+ #ifndef _RTE_METRICS_TELEMETRY_H_
+ #define _RTE_METRICS_TELEMETRY_H_
+ 
++#ifdef __cplusplus
++extern "C" {
++#endif
+ 
+ enum rte_telemetry_stats_type {
+ 	PORT_STATS = 0,
+@@ -60,4 +63,8 @@ __rte_experimental
+ int32_t
+ rte_metrics_tel_extract_data(struct telemetry_encode_param *ep, json_t *data);
+ 
++#ifdef __cplusplus
++}
++#endif
++
+ #endif
+diff --git a/dpdk/lib/net/rte_gtp.h b/dpdk/lib/net/rte_gtp.h
+index dca940c2c5..9849872366 100644
+--- a/dpdk/lib/net/rte_gtp.h
++++ b/dpdk/lib/net/rte_gtp.h
+@@ -75,11 +75,11 @@ struct rte_gtp_psc_generic_hdr {
+ 	uint8_t spare:2;	/**< type specific spare bits */
+ 	uint8_t qfi:6;		/**< Qos Flow Identifier */
+ #else
+-	uint8_t qfi:6;		/**< Qos Flow Identifier */
+-	uint8_t spare:2;	/**< type specific spare bits */
+ 	uint8_t pad:3;		/**< type specific pad bits */
+ 	uint8_t qmp:1;		/**< Qos Monitoring Packet */
+ 	uint8_t type:4;		/**< PDU type */
++	uint8_t qfi:6;		/**< Qos Flow Identifier */
++	uint8_t spare:2;	/**< type specific spare bits */
+ #endif
+ 	uint8_t data[0];	/**< variable length data fields */
+ } __rte_packed;
+@@ -100,12 +100,13 @@ struct rte_gtp_psc_type0_hdr {
+ 	uint8_t rqi:1;		/**< Reflective Qos Indicator */
+ 	uint8_t qfi:6;		/**< Qos Flow Identifier */
+ #else
+-	uint8_t qfi:6;		/**< Qos Flow Identifier */
+-	uint8_t rqi:1;		/**< Reflective Qos Indicator */
+-	uint8_t ppp:1;		/**< Paging policy presence */
+ 	uint8_t spare_dl1:2;	/**< spare down link bits */
+ 	uint8_t snp:1;		/**< Sequence number presence */
++	uint8_t qmp:1;		/**< Qos Monitoring Packet */
+ 	uint8_t type:4;		/**< PDU type */
++	uint8_t qfi:6;		/**< Qos Flow Identifier */
++	uint8_t rqi:1;		/**< Reflective Qos Indicator */
++	uint8_t ppp:1;		/**< Paging policy presence */
+ #endif
+ 	uint8_t data[0];	/**< variable length data fields */
+ } __rte_packed;
+@@ -127,14 +128,14 @@ struct rte_gtp_psc_type1_hdr {
+ 	uint8_t spare_ul2:1;	/**< spare up link bits */
+ 	uint8_t qfi:6;		/**< Qos Flow Identifier */
+ #else
+-	uint8_t qfi:6;		/**< Qos Flow Identifier */
+-	uint8_t spare_ul2:1;	/**< spare up link bits */
+-	uint8_t n_delay_ind:1;	/**< N3/N9 delay result presence */
+ 	uint8_t snp:1;		/**< Sequence number presence ul */
+ 	uint8_t ul_delay_ind:1;	/**< ul delay result presence */
+ 	uint8_t dl_delay_ind:1;	/**< dl delay result presence */
+ 	uint8_t qmp:1;		/**< Qos Monitoring Packet */
+ 	uint8_t type:4;		/**< PDU type */
++	uint8_t qfi:6;		/**< Qos Flow Identifier */
++	uint8_t spare_ul2:1;	/**< spare up link bits */
++	uint8_t n_delay_ind:1;	/**< N3/N9 delay result presence */
+ #endif
+ 	uint8_t data[0];	/**< variable length data fields */
+ } __rte_packed;
+diff --git a/dpdk/lib/net/rte_l2tpv2.h b/dpdk/lib/net/rte_l2tpv2.h
+index b90e36cf12..1f3ad3f03c 100644
+--- a/dpdk/lib/net/rte_l2tpv2.h
++++ b/dpdk/lib/net/rte_l2tpv2.h
+@@ -89,16 +89,6 @@ struct rte_l2tpv2_common_hdr {
+ 		__extension__
+ 		struct {
+ #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
+-			uint16_t t:1;		/**< message Type */
+-			uint16_t l:1;		/**< length option bit */
+-			uint16_t res1:2;	/**< reserved */
+-			uint16_t s:1;		/**< ns/nr option bit */
+-			uint16_t res2:1;	/**< reserved */
+-			uint16_t o:1;		/**< offset option bit */
+-			uint16_t p:1;		/**< priority option bit */
+-			uint16_t res3:4;	/**< reserved */
+-			uint16_t ver:4;		/**< protocol version */
+-#elif RTE_BYTE_ORDER == RTE_BIG_ENDIAN
+ 			uint16_t ver:4;		/**< protocol version */
+ 			uint16_t res3:4;	/**< reserved */
+ 			uint16_t p:1;		/**< priority option bit */
+@@ -108,6 +98,16 @@ struct rte_l2tpv2_common_hdr {
+ 			uint16_t res1:2;	/**< reserved */
+ 			uint16_t l:1;		/**< length option bit */
+ 			uint16_t t:1;		/**< message Type */
++#elif RTE_BYTE_ORDER == RTE_BIG_ENDIAN
++			uint16_t t:1;		/**< message Type */
++			uint16_t l:1;		/**< length option bit */
++			uint16_t res1:2;	/**< reserved */
++			uint16_t s:1;		/**< ns/nr option bit */
++			uint16_t res2:1;	/**< reserved */
++			uint16_t o:1;		/**< offset option bit */
++			uint16_t p:1;		/**< priority option bit */
++			uint16_t res3:4;	/**< reserved */
++			uint16_t ver:4;		/**< protocol version */
+ #endif
+ 		};
+ 	};
+@@ -143,7 +143,7 @@ struct rte_l2tpv2_msg_without_length {
+ /**
+  * L2TPv2 message Header contains all options except ns_nr(length,
+  * offset size, offset padding).
+- * Ns and Nr MUST be toghter.
++ * Ns and Nr MUST be together.
+  */
+ struct rte_l2tpv2_msg_without_ns_nr {
+ 	rte_be16_t length;		/**< length(16) */
+@@ -155,7 +155,7 @@ struct rte_l2tpv2_msg_without_ns_nr {
+ 
+ /**
+  * L2TPv2 message Header contains all options except ns_nr(length, ns, nr).
+- * offset size and offset padding MUST be toghter.
++ * offset size and offset padding MUST be together.
+  */
+ struct rte_l2tpv2_msg_without_offset {
+ 	rte_be16_t length;		/**< length(16) */
+diff --git a/dpdk/lib/pcapng/rte_pcapng.c b/dpdk/lib/pcapng/rte_pcapng.c
+index 03edabe73e..0caf3d31f8 100644
+--- a/dpdk/lib/pcapng/rte_pcapng.c
++++ b/dpdk/lib/pcapng/rte_pcapng.c
+@@ -20,6 +20,7 @@
+ #include <rte_ether.h>
+ #include <rte_mbuf.h>
+ #include <rte_pcapng.h>
++#include <rte_reciprocal.h>
+ #include <rte_time.h>
+ 
+ #include "pcapng_proto.h"
+@@ -35,27 +36,63 @@ struct rte_pcapng {
+ };
+ 
+ /* For converting TSC cycles to PCAPNG ns format */
+-struct pcapng_time {
++static struct pcapng_time {
+ 	uint64_t ns;
+ 	uint64_t cycles;
++	uint64_t tsc_hz;
++	struct rte_reciprocal_u64 tsc_hz_inverse;
+ } pcapng_time;
+ 
+-RTE_INIT(pcapng_init)
++static inline void
++pcapng_init(void)
+ {
+ 	struct timespec ts;
+ 
+ 	pcapng_time.cycles = rte_get_tsc_cycles();
+ 	clock_gettime(CLOCK_REALTIME, &ts);
++	pcapng_time.cycles = (pcapng_time.cycles + rte_get_tsc_cycles()) / 2;
+ 	pcapng_time.ns = rte_timespec_to_ns(&ts);
++
++	pcapng_time.tsc_hz = rte_get_tsc_hz();
++	pcapng_time.tsc_hz_inverse = rte_reciprocal_value_u64(pcapng_time.tsc_hz);
+ }
+ 
+ /* PCAPNG timestamps are in nanoseconds */
+ static uint64_t pcapng_tsc_to_ns(uint64_t cycles)
+ {
+-	uint64_t delta;
+-
++	uint64_t delta, secs;
++
++	if (!pcapng_time.tsc_hz)
++		pcapng_init();
++
++	/* In essence the calculation is:
++	 *   delta = (cycles - pcapng_time.cycles) * NSEC_PRE_SEC / rte_get_tsc_hz()
++	 * but this overflows within 4 to 8 seconds depending on TSC frequency.
++	 * Instead, if delta >= pcapng_time.tsc_hz:
++	 *   Increase pcapng_time.ns and pcapng_time.cycles by the number of
++	 *   whole seconds in delta and reduce delta accordingly.
++	 * delta will therefore always lie in the interval [0, pcapng_time.tsc_hz),
++	 * which will not overflow when multiplied by NSEC_PER_SEC provided the
++	 * TSC frequency < approx 18.4GHz.
++	 *
++	 * Currently all TSCs operate below 5GHz.
++	 */
+ 	delta = cycles - pcapng_time.cycles;
+-	return pcapng_time.ns + (delta * NSEC_PER_SEC) / rte_get_tsc_hz();
++	if (unlikely(delta >= pcapng_time.tsc_hz)) {
++		if (likely(delta < pcapng_time.tsc_hz * 2)) {
++			delta -= pcapng_time.tsc_hz;
++			pcapng_time.cycles += pcapng_time.tsc_hz;
++			pcapng_time.ns += NSEC_PER_SEC;
++		} else {
++			secs = rte_reciprocal_divide_u64(delta, &pcapng_time.tsc_hz_inverse);
++			delta -= secs * pcapng_time.tsc_hz;
++			pcapng_time.cycles += secs * pcapng_time.tsc_hz;
++			pcapng_time.ns += secs * NSEC_PER_SEC;
++		}
++	}
++
++	return pcapng_time.ns + rte_reciprocal_divide_u64(delta * NSEC_PER_SEC,
++							  &pcapng_time.tsc_hz_inverse);
+ }
+ 
+ /* length of option including padding */
+@@ -177,8 +214,8 @@ pcapng_add_interface(rte_pcapng_t *self, uint16_t port)
+ 			 "%s-%s", dev->bus->name, dev->name);
+ 
+ 	/* DPDK reports in units of Mbps */
+-	rte_eth_link_get(port, &link);
+-	if (link.link_status == RTE_ETH_LINK_UP)
++	if (rte_eth_link_get(port, &link) == 0 &&
++	    link.link_status == RTE_ETH_LINK_UP)
+ 		speed = link.link_speed * PCAPNG_MBPS_SPEED;
+ 
+ 	if (rte_eth_macaddr_get(port, &macaddr) < 0)
+diff --git a/dpdk/lib/pipeline/rte_swx_ctl.c b/dpdk/lib/pipeline/rte_swx_ctl.c
+index 1c908e3e3f..f52ccffd75 100644
+--- a/dpdk/lib/pipeline/rte_swx_ctl.c
++++ b/dpdk/lib/pipeline/rte_swx_ctl.c
+@@ -372,18 +372,34 @@ table_entry_check(struct rte_swx_ctl_pipeline *ctl,
+ 
+ 	if (data_check) {
+ 		struct action *a;
++		struct rte_swx_ctl_table_action_info *tai;
+ 		uint32_t i;
+ 
+ 		/* action_id. */
+-		for (i = 0; i < table->info.n_actions; i++)
+-			if (entry->action_id == table->actions[i].action_id)
++		for (i = 0; i < table->info.n_actions; i++) {
++			tai = &table->actions[i];
++
++			if (entry->action_id == tai->action_id)
+ 				break;
++		}
+ 
+ 		CHECK(i < table->info.n_actions, EINVAL);
+ 
+ 		/* action_data. */
+ 		a = &ctl->actions[entry->action_id];
+ 		CHECK(!(a->data_size && !entry->action_data), EINVAL);
++
++		/* When both key_check and data_check are true, we are interested in both the entry
++		 * key and data, which means the operation is _regular_ table entry add.
++		 */
++		if (key_check && !tai->action_is_for_table_entries)
++			return -EINVAL;
++
++		/* When key_check is false while data_check is true, we are only interested in the
++		 * entry data, which means the operation is _default_ table entry add.
++		 */
++		if (!key_check && !tai->action_is_for_default_entry)
++			return -EINVAL;
+ 	}
+ 
+ 	return 0;
+@@ -1005,15 +1021,16 @@ learner_action_data_size_get(struct rte_swx_ctl_pipeline *ctl, struct learner *l
+ static void
+ table_state_free(struct rte_swx_ctl_pipeline *ctl)
+ {
+-	uint32_t i;
++	uint32_t table_base_index, selector_base_index, learner_base_index, i;
+ 
+ 	if (!ctl->ts_next)
+ 		return;
+ 
+ 	/* For each table, free its table state. */
++	table_base_index = 0;
+ 	for (i = 0; i < ctl->info.n_tables; i++) {
+ 		struct table *table = &ctl->tables[i];
+-		struct rte_swx_table_state *ts = &ctl->ts_next[i];
++		struct rte_swx_table_state *ts = &ctl->ts_next[table_base_index + i];
+ 
+ 		/* Default action data. */
+ 		free(ts->default_action_data);
+@@ -1024,8 +1041,9 @@ table_state_free(struct rte_swx_ctl_pipeline *ctl)
+ 	}
+ 
+ 	/* For each selector table, free its table state. */
++	selector_base_index = ctl->info.n_tables;
+ 	for (i = 0; i < ctl->info.n_selectors; i++) {
+-		struct rte_swx_table_state *ts = &ctl->ts_next[i];
++		struct rte_swx_table_state *ts = &ctl->ts_next[selector_base_index + i];
+ 
+ 		/* Table object. */
+ 		if (ts->obj)
+@@ -1033,8 +1051,9 @@ table_state_free(struct rte_swx_ctl_pipeline *ctl)
+ 	}
+ 
+ 	/* For each learner table, free its table state. */
++	learner_base_index = ctl->info.n_tables + ctl->info.n_selectors;
+ 	for (i = 0; i < ctl->info.n_learners; i++) {
+-		struct rte_swx_table_state *ts = &ctl->ts_next[i];
++		struct rte_swx_table_state *ts = &ctl->ts_next[learner_base_index + i];
+ 
+ 		/* Default action data. */
+ 		free(ts->default_action_data);
+@@ -1047,10 +1066,10 @@ table_state_free(struct rte_swx_ctl_pipeline *ctl)
+ static int
+ table_state_create(struct rte_swx_ctl_pipeline *ctl)
+ {
++	uint32_t table_base_index, selector_base_index, learner_base_index, i;
+ 	int status = 0;
+-	uint32_t i;
+ 
+-	ctl->ts_next = calloc(ctl->info.n_tables + ctl->info.n_selectors,
++	ctl->ts_next = calloc(ctl->info.n_tables + ctl->info.n_selectors + ctl->info.n_learners,
+ 			      sizeof(struct rte_swx_table_state));
+ 	if (!ctl->ts_next) {
+ 		status = -ENOMEM;
+@@ -1058,10 +1077,11 @@ table_state_create(struct rte_swx_ctl_pipeline *ctl)
+ 	}
+ 
+ 	/* Tables. */
++	table_base_index = 0;
+ 	for (i = 0; i < ctl->info.n_tables; i++) {
+ 		struct table *table = &ctl->tables[i];
+-		struct rte_swx_table_state *ts = &ctl->ts[i];
+-		struct rte_swx_table_state *ts_next = &ctl->ts_next[i];
++		struct rte_swx_table_state *ts = &ctl->ts[table_base_index + i];
++		struct rte_swx_table_state *ts_next = &ctl->ts_next[table_base_index + i];
+ 
+ 		/* Table object. */
+ 		if (!table->is_stub && table->ops.add) {
+@@ -1094,9 +1114,10 @@ table_state_create(struct rte_swx_ctl_pipeline *ctl)
+ 	}
+ 
+ 	/* Selector tables. */
++	selector_base_index = ctl->info.n_tables;
+ 	for (i = 0; i < ctl->info.n_selectors; i++) {
+ 		struct selector *s = &ctl->selectors[i];
+-		struct rte_swx_table_state *ts_next = &ctl->ts_next[ctl->info.n_tables + i];
++		struct rte_swx_table_state *ts_next = &ctl->ts_next[selector_base_index + i];
+ 
+ 		/* Table object. */
+ 		ts_next->obj = rte_swx_table_selector_create(&s->params, NULL, ctl->numa_node);
+@@ -1107,10 +1128,11 @@ table_state_create(struct rte_swx_ctl_pipeline *ctl)
+ 	}
+ 
+ 	/* Learner tables. */
++	learner_base_index = ctl->info.n_tables + ctl->info.n_selectors;
+ 	for (i = 0; i < ctl->info.n_learners; i++) {
+ 		struct learner *l = &ctl->learners[i];
+-		struct rte_swx_table_state *ts = &ctl->ts[i];
+-		struct rte_swx_table_state *ts_next = &ctl->ts_next[i];
++		struct rte_swx_table_state *ts = &ctl->ts[learner_base_index + i];
++		struct rte_swx_table_state *ts_next = &ctl->ts_next[learner_base_index + i];
+ 
+ 		/* Table object: duplicate from the current table state. */
+ 		ts_next->obj = ts->obj;
+@@ -1446,8 +1468,6 @@ rte_swx_ctl_pipeline_table_entry_add(struct rte_swx_ctl_pipeline *ctl,
+ 	CHECK(entry, EINVAL);
+ 	CHECK(!table_entry_check(ctl, table_id, entry, 1, 1), EINVAL);
+ 
+-	CHECK(table->actions[entry->action_id].action_is_for_table_entries, EINVAL);
+-
+ 	new_entry = table_entry_duplicate(ctl, table_id, entry, 1, 1);
+ 	CHECK(new_entry, ENOMEM);
+ 
+@@ -1653,8 +1673,6 @@ rte_swx_ctl_pipeline_table_default_entry_add(struct rte_swx_ctl_pipeline *ctl,
+ 	CHECK(entry, EINVAL);
+ 	CHECK(!table_entry_check(ctl, table_id, entry, 0, 1), EINVAL);
+ 
+-	CHECK(table->actions[entry->action_id].action_is_for_default_entry, EINVAL);
+-
+ 	new_entry = table_entry_duplicate(ctl, table_id, entry, 0, 1);
+ 	CHECK(new_entry, ENOMEM);
+ 
+diff --git a/dpdk/lib/pipeline/rte_swx_ctl.h b/dpdk/lib/pipeline/rte_swx_ctl.h
+index 46d05823e1..82e62e70a7 100644
+--- a/dpdk/lib/pipeline/rte_swx_ctl.h
++++ b/dpdk/lib/pipeline/rte_swx_ctl.h
+@@ -369,7 +369,7 @@ struct rte_swx_table_stats {
+ 	uint64_t n_pkts_miss;
+ 
+ 	/** Number of packets (with either lookup hit or miss) per pipeline
+-	 * action. Array of pipeline *n_actions* elements indedex by the
++	 * action. Array of pipeline *n_actions* elements indexed by the
+ 	 * pipeline-level *action_id*, therefore this array has the same size
+ 	 * for all the tables within the same pipeline.
+ 	 */
+@@ -629,7 +629,7 @@ struct rte_swx_learner_stats {
+ 	uint64_t n_pkts_forget;
+ 
+ 	/** Number of packets (with either lookup hit or miss) per pipeline action. Array of
+-	 * pipeline *n_actions* elements indedex by the pipeline-level *action_id*, therefore this
++	 * pipeline *n_actions* elements indexed by the pipeline-level *action_id*, therefore this
+ 	 * array has the same size for all the tables within the same pipeline.
+ 	 */
+ 	uint64_t *n_pkts_action;
+diff --git a/dpdk/lib/pipeline/rte_swx_pipeline.c b/dpdk/lib/pipeline/rte_swx_pipeline.c
+index 2145ca0a42..8d5073cf19 100644
+--- a/dpdk/lib/pipeline/rte_swx_pipeline.c
++++ b/dpdk/lib/pipeline/rte_swx_pipeline.c
+@@ -8531,7 +8531,7 @@ table_state_build(struct rte_swx_pipeline *p)
+ 	struct selector *s;
+ 	struct learner *l;
+ 
+-	p->table_state = calloc(p->n_tables + p->n_selectors,
++	p->table_state = calloc(p->n_tables + p->n_selectors + p->n_learners,
+ 				sizeof(struct rte_swx_table_state));
+ 	CHECK(p->table_state, ENOMEM);
+ 
+diff --git a/dpdk/lib/pipeline/rte_swx_pipeline_internal.h b/dpdk/lib/pipeline/rte_swx_pipeline_internal.h
+index 1921fdcd78..c8fa978580 100644
+--- a/dpdk/lib/pipeline/rte_swx_pipeline_internal.h
++++ b/dpdk/lib/pipeline/rte_swx_pipeline_internal.h
+@@ -309,7 +309,7 @@ enum instruction_type {
+ 	 */
+ 	INSTR_ALU_CKADD_FIELD,    /* src = H */
+ 	INSTR_ALU_CKADD_STRUCT20, /* src = h.header, with sizeof(header) = 20 */
+-	INSTR_ALU_CKADD_STRUCT,   /* src = h.hdeader, with any sizeof(header) */
++	INSTR_ALU_CKADD_STRUCT,   /* src = h.header, with any sizeof(header) */
+ 
+ 	/* cksub dst src
+ 	 * dst = dst '- src
+@@ -1562,7 +1562,7 @@ emit_handler(struct thread *t)
+ 		return;
+ 	}
+ 
+-	/* Header encapsulation (optionally, with prior header decasulation). */
++	/* Header encapsulation (optionally, with prior header decapsulation). */
+ 	if ((t->n_headers_out == 2) &&
+ 	    (h1->ptr + h1->n_bytes == t->ptr) &&
+ 	    (h0->ptr == h0->ptr0)) {
+@@ -1820,9 +1820,9 @@ __instr_hdr_emit_many_exec(struct rte_swx_pipeline *p __rte_unused,
+ {
+ 	uint64_t valid_headers = t->valid_headers;
+ 	uint32_t n_headers_out = t->n_headers_out;
+-	struct header_out_runtime *ho = &t->headers_out[n_headers_out - 1];
++	struct header_out_runtime *ho = NULL;
+ 	uint8_t *ho_ptr = NULL;
+-	uint32_t ho_nbytes = 0, first = 1, i;
++	uint32_t ho_nbytes = 0, i;
+ 
+ 	for (i = 0; i < n_emit; i++) {
+ 		uint32_t header_id = ip->io.hdr.header_id[i];
+@@ -1834,18 +1834,21 @@ __instr_hdr_emit_many_exec(struct rte_swx_pipeline *p __rte_unused,
+ 
+ 		uint8_t *hi_ptr = t->structs[struct_id];
+ 
+-		if (!MASK64_BIT_GET(valid_headers, header_id))
++		if (!MASK64_BIT_GET(valid_headers, header_id)) {
++			TRACE("[Thread %2u]: emit header %u (invalid)\n",
++			      p->thread_id,
++			      header_id);
++
+ 			continue;
++		}
+ 
+-		TRACE("[Thread %2u]: emit header %u\n",
++		TRACE("[Thread %2u]: emit header %u (valid)\n",
+ 		      p->thread_id,
+ 		      header_id);
+ 
+ 		/* Headers. */
+-		if (first) {
+-			first = 0;
+-
+-			if (!t->n_headers_out) {
++		if (!ho) {
++			if (!n_headers_out) {
+ 				ho = &t->headers_out[0];
+ 
+ 				ho->ptr0 = hi_ptr0;
+@@ -1858,6 +1861,8 @@ __instr_hdr_emit_many_exec(struct rte_swx_pipeline *p __rte_unused,
+ 
+ 				continue;
+ 			} else {
++				ho = &t->headers_out[n_headers_out - 1];
++
+ 				ho_ptr = ho->ptr;
+ 				ho_nbytes = ho->n_bytes;
+ 			}
+@@ -1879,7 +1884,8 @@ __instr_hdr_emit_many_exec(struct rte_swx_pipeline *p __rte_unused,
+ 		}
+ 	}
+ 
+-	ho->n_bytes = ho_nbytes;
++	if (ho)
++		ho->n_bytes = ho_nbytes;
+ 	t->n_headers_out = n_headers_out;
+ }
+ 
+diff --git a/dpdk/lib/pipeline/rte_swx_pipeline_spec.c b/dpdk/lib/pipeline/rte_swx_pipeline_spec.c
+index 8e9aa44e30..07a7580ac8 100644
+--- a/dpdk/lib/pipeline/rte_swx_pipeline_spec.c
++++ b/dpdk/lib/pipeline/rte_swx_pipeline_spec.c
+@@ -2011,7 +2011,7 @@ rte_swx_pipeline_build_from_spec(struct rte_swx_pipeline *p,
+ 		if (err_line)
+ 			*err_line = 0;
+ 		if (err_msg)
+-			*err_msg = "Null pipeline arument.";
++			*err_msg = "Null pipeline argument.";
+ 		status = -EINVAL;
+ 		goto error;
+ 	}
+diff --git a/dpdk/lib/power/power_cppc_cpufreq.c b/dpdk/lib/power/power_cppc_cpufreq.c
+index 6afd310e4e..25185a791c 100644
+--- a/dpdk/lib/power/power_cppc_cpufreq.c
++++ b/dpdk/lib/power/power_cppc_cpufreq.c
+@@ -621,7 +621,7 @@ power_cppc_enable_turbo(unsigned int lcore_id)
+ 		return -1;
+ 	}
+ 
+-	/* TODO: must set to max once enbling Turbo? Considering add condition:
++	/* TODO: must set to max once enabling Turbo? Considering add condition:
+ 	 * if ((pi->turbo_available) && (pi->curr_idx <= 1))
+ 	 */
+ 	/* Max may have changed, so call to max function */
+diff --git a/dpdk/lib/regexdev/rte_regexdev.h b/dpdk/lib/regexdev/rte_regexdev.h
+index 86f0b231b0..513ce5b67c 100644
+--- a/dpdk/lib/regexdev/rte_regexdev.h
++++ b/dpdk/lib/regexdev/rte_regexdev.h
+@@ -228,6 +228,9 @@ extern int rte_regexdev_logtype;
+ } while (0)
+ 
+ /**
++ * @warning
++ * @b EXPERIMENTAL: this API may change without prior notice.
++ *
+  * Check if dev_id is ready.
+  *
+  * @param dev_id
+@@ -237,6 +240,7 @@ extern int rte_regexdev_logtype;
+  *   - 0 if device state is not in ready state.
+  *   - 1 if device state is ready state.
+  */
++__rte_experimental
+ int rte_regexdev_is_valid_dev(uint16_t dev_id);
+ 
+ /**
+@@ -298,14 +302,14 @@ rte_regexdev_get_dev_id(const char *name);
+  * backtracking positions remembered by any tokens inside the group.
+  * Example RegEx is `a(?>bc|b)c` if the given patterns are `abc` and `abcc` then
+  * `a(bc|b)c` matches both where as `a(?>bc|b)c` matches only abcc because
+- * atomic groups don't allow backtracing back to `b`.
++ * atomic groups don't allow backtracking back to `b`.
+  *
+  * @see struct rte_regexdev_info::regexdev_capa
+  */
+ 
+ #define RTE_REGEXDEV_SUPP_PCRE_BACKTRACKING_CTRL_F (1ULL << 3)
+ /**< RegEx device support PCRE backtracking control verbs.
+- * Some examples of backtracing verbs are (*COMMIT), (*ACCEPT), (*FAIL),
++ * Some examples of backtracking verbs are (*COMMIT), (*ACCEPT), (*FAIL),
+  * (*SKIP), (*PRUNE).
+  *
+  * @see struct rte_regexdev_info::regexdev_capa
+@@ -1015,7 +1019,7 @@ rte_regexdev_rule_db_update(uint8_t dev_id,
+  * @b EXPERIMENTAL: this API may change without prior notice.
+  *
+  * Compile local rule set and burn the complied result to the
+- * RegEx deive.
++ * RegEx device.
+  *
+  * @param dev_id
+  *   RegEx device identifier.
+diff --git a/dpdk/lib/regexdev/rte_regexdev_driver.h b/dpdk/lib/regexdev/rte_regexdev_driver.h
+index 64742016c0..6246b144a6 100644
+--- a/dpdk/lib/regexdev/rte_regexdev_driver.h
++++ b/dpdk/lib/regexdev/rte_regexdev_driver.h
+@@ -32,6 +32,7 @@ extern "C" {
+  *   A pointer to the RegEx device slot case of success,
+  *   NULL otherwise.
+  */
++__rte_internal
+ struct rte_regexdev *rte_regexdev_register(const char *name);
+ 
+ /**
+@@ -41,6 +42,7 @@ struct rte_regexdev *rte_regexdev_register(const char *name);
+  * @param dev
+  *   Device to be released.
+  */
++__rte_internal
+ void rte_regexdev_unregister(struct rte_regexdev *dev);
+ 
+ /**
+@@ -50,6 +52,7 @@ void rte_regexdev_unregister(struct rte_regexdev *dev);
+  * @param name
+  *   The device name.
+  */
++__rte_internal
+ struct rte_regexdev *rte_regexdev_get_device_by_name(const char *name);
+ 
+ #ifdef __cplusplus
+diff --git a/dpdk/lib/regexdev/version.map b/dpdk/lib/regexdev/version.map
+index 8db9b17018..3c6e9fffa1 100644
+--- a/dpdk/lib/regexdev/version.map
++++ b/dpdk/lib/regexdev/version.map
+@@ -1,6 +1,7 @@
+ EXPERIMENTAL {
+ 	global:
+ 
++	rte_regex_devices;
+ 	rte_regexdev_attr_get;
+ 	rte_regexdev_attr_set;
+ 	rte_regexdev_close;
+@@ -11,6 +12,8 @@ EXPERIMENTAL {
+ 	rte_regexdev_enqueue_burst;
+ 	rte_regexdev_get_dev_id;
+ 	rte_regexdev_info_get;
++	rte_regexdev_is_valid_dev;
++	rte_regexdev_logtype;
+ 	rte_regexdev_queue_pair_setup;
+ 	rte_regexdev_rule_db_compile_activate;
+ 	rte_regexdev_rule_db_export;
+@@ -23,4 +26,12 @@ EXPERIMENTAL {
+ 	rte_regexdev_xstats_get;
+ 	rte_regexdev_xstats_names_get;
+ 	rte_regexdev_xstats_reset;
++
++	local: *;
++};
++
++INTERNAL {
++	rte_regexdev_get_device_by_name;
++	rte_regexdev_register;
++	rte_regexdev_unregister;
+ };
+diff --git a/dpdk/lib/rib/rte_rib.c b/dpdk/lib/rib/rte_rib.c
+index 6c29e1c49a..1a4b10d728 100644
+--- a/dpdk/lib/rib/rte_rib.c
++++ b/dpdk/lib/rib/rte_rib.c
+@@ -73,6 +73,8 @@ is_covered(uint32_t ip1, uint32_t ip2, uint8_t depth)
+ static inline struct rte_rib_node *
+ get_nxt_node(struct rte_rib_node *node, uint32_t ip)
+ {
++	if (node->depth == RIB_MAXDEPTH)
++		return NULL;
+ 	return (ip & (1 << (31 - node->depth))) ? node->right : node->left;
+ }
+ 
+diff --git a/dpdk/lib/rib/rte_rib6.h b/dpdk/lib/rib/rte_rib6.h
+index 6f532265c6..d52b0b05cc 100644
+--- a/dpdk/lib/rib/rte_rib6.h
++++ b/dpdk/lib/rib/rte_rib6.h
+@@ -40,12 +40,12 @@ struct rte_rib6_node;
+ /** RIB configuration structure */
+ struct rte_rib6_conf {
+ 	/**
+-	 * Size of extension block inside rte_rib_node.
++	 * Size of extension block inside rte_rib6_node.
+ 	 * This space could be used to store additional user
+ 	 * defined data.
+ 	 */
+ 	size_t	ext_sz;
+-	/* size of rte_rib_node's pool */
++	/* size of rte_rib6_node's pool */
+ 	int	max_nodes;
+ };
+ 
+@@ -307,7 +307,7 @@ rte_rib6_create(const char *name, int socket_id,
+  * Find an existing RIB object and return a pointer to it.
+  *
+  * @param name
+- *  Name of the rib object as passed to rte_rib_create()
++ *  Name of the rib object as passed to rte_rib6_create()
+  * @return
+  *  Pointer to RIB object on success
+  *  NULL otherwise with rte_errno indicating reason for failure.
+diff --git a/dpdk/lib/ring/rte_ring.c b/dpdk/lib/ring/rte_ring.c
+index f17bd966be..6a94a038c4 100644
+--- a/dpdk/lib/ring/rte_ring.c
++++ b/dpdk/lib/ring/rte_ring.c
+@@ -75,7 +75,7 @@ rte_ring_get_memsize_elem(unsigned int esize, unsigned int count)
+ 		return -EINVAL;
+ 	}
+ 
+-	sz = sizeof(struct rte_ring) + count * esize;
++	sz = sizeof(struct rte_ring) + (ssize_t)count * esize;
+ 	sz = RTE_ALIGN(sz, RTE_CACHE_LINE_SIZE);
+ 	return sz;
+ }
+@@ -267,7 +267,7 @@ rte_ring_create_elem(const char *name, unsigned int esize, unsigned int count,
+ 
+ 	ring_size = rte_ring_get_memsize_elem(esize, count);
+ 	if (ring_size < 0) {
+-		rte_errno = ring_size;
++		rte_errno = -ring_size;
+ 		return NULL;
+ 	}
+ 
+diff --git a/dpdk/lib/ring/rte_ring_core.h b/dpdk/lib/ring/rte_ring_core.h
+index 46ad584f9c..1252ca9546 100644
+--- a/dpdk/lib/ring/rte_ring_core.h
++++ b/dpdk/lib/ring/rte_ring_core.h
+@@ -12,7 +12,7 @@
+ 
+ /**
+  * @file
+- * This file contains definion of RTE ring structure itself,
++ * This file contains definition of RTE ring structure itself,
+  * init flags and some related macros.
+  * For majority of DPDK entities, it is not recommended to include
+  * this file directly, use include <rte_ring.h> or <rte_ring_elem.h>
+diff --git a/dpdk/lib/sched/rte_pie.c b/dpdk/lib/sched/rte_pie.c
+index 934e9aee50..79db6e96b1 100644
+--- a/dpdk/lib/sched/rte_pie.c
++++ b/dpdk/lib/sched/rte_pie.c
+@@ -3,6 +3,7 @@
+  */
+ 
+ #include <stdlib.h>
++#include <string.h>
+ 
+ #include "rte_pie.h"
+ #include <rte_common.h>
+@@ -17,26 +18,11 @@ int
+ rte_pie_rt_data_init(struct rte_pie *pie)
+ {
+ 	if (pie == NULL) {
+-		/* Allocate memory to use the PIE data structure */
+-		pie = rte_malloc(NULL, sizeof(struct rte_pie), 0);
+-
+-		if (pie == NULL)
+-			RTE_LOG(ERR, SCHED, "%s: Memory allocation fails\n", __func__);
+-
+-		return -1;
++		RTE_LOG(ERR, SCHED, "%s: Invalid addr for pie\n", __func__);
++		return -EINVAL;
+ 	}
+ 
+-	pie->active = 0;
+-	pie->in_measurement = 0;
+-	pie->departed_bytes_count = 0;
+-	pie->start_measurement = 0;
+-	pie->last_measurement = 0;
+-	pie->qlen = 0;
+-	pie->avg_dq_time = 0;
+-	pie->burst_allowance = 0;
+-	pie->qdelay_old = 0;
+-	pie->drop_prob = 0;
+-	pie->accu_prob = 0;
++	memset(pie, 0, sizeof(*pie));
+ 
+ 	return 0;
+ }
+diff --git a/dpdk/lib/sched/rte_pie.h b/dpdk/lib/sched/rte_pie.h
+index dfdf572311..3e2c1ef467 100644
+--- a/dpdk/lib/sched/rte_pie.h
++++ b/dpdk/lib/sched/rte_pie.h
+@@ -218,7 +218,7 @@ _rte_pie_drop(const struct rte_pie_config *pie_cfg,
+ 	struct rte_pie *pie)
+ {
+ 	uint64_t rand_value;
+-	double qdelay = pie_cfg->qdelay_ref * 0.5;
++	uint64_t qdelay = pie_cfg->qdelay_ref / 2;
+ 
+ 	/* PIE is active but the queue is not congested: return 0 */
+ 	if (((pie->qdelay_old < qdelay) && (pie->drop_prob < 0.2)) ||
+@@ -252,7 +252,7 @@ _rte_pie_drop(const struct rte_pie_config *pie_cfg,
+ }
+ 
+ /**
+- * @brief Decides if new packet should be enqeued or dropped for non-empty queue
++ * @brief Decides if new packet should be enqueued or dropped for non-empty queue
+  *
+  * @param pie_cfg [in] config pointer to a PIE configuration parameter structure
+  * @param pie [in,out] data pointer to PIE runtime data
+@@ -319,7 +319,7 @@ rte_pie_enqueue_nonempty(const struct rte_pie_config *pie_cfg,
+ }
+ 
+ /**
+- * @brief Decides if new packet should be enqeued or dropped
++ * @brief Decides if new packet should be enqueued or dropped
+  * Updates run time data and gives verdict whether to enqueue or drop the packet.
+  *
+  * @param pie_cfg [in] config pointer to a PIE configuration parameter structure
+@@ -330,7 +330,7 @@ rte_pie_enqueue_nonempty(const struct rte_pie_config *pie_cfg,
+  *
+  * @return Operation status
+  * @retval 0 enqueue the packet
+- * @retval 1 drop the packet based on drop probility criteria
++ * @retval 1 drop the packet based on drop probability criteria
+  */
+ static inline int
+ __rte_experimental
+diff --git a/dpdk/lib/sched/rte_red.h b/dpdk/lib/sched/rte_red.h
+index 36273cac64..f5843dab1b 100644
+--- a/dpdk/lib/sched/rte_red.h
++++ b/dpdk/lib/sched/rte_red.h
+@@ -303,7 +303,7 @@ __rte_red_drop(const struct rte_red_config *red_cfg, struct rte_red *red)
+ }
+ 
+ /**
+- * @brief Decides if new packet should be enqeued or dropped in queue non-empty case
++ * @brief Decides if new packet should be enqueued or dropped in queue non-empty case
+  *
+  * @param red_cfg [in] config pointer to a RED configuration parameter structure
+  * @param red [in,out] data pointer to RED runtime data
+@@ -361,7 +361,7 @@ rte_red_enqueue_nonempty(const struct rte_red_config *red_cfg,
+ }
+ 
+ /**
+- * @brief Decides if new packet should be enqeued or dropped
++ * @brief Decides if new packet should be enqueued or dropped
+  * Updates run time data based on new queue size value.
+  * Based on new queue average and RED configuration parameters
+  * gives verdict whether to enqueue or drop the packet.
+diff --git a/dpdk/lib/sched/rte_sched.c b/dpdk/lib/sched/rte_sched.c
+index ed44808f7b..62b3d2e315 100644
+--- a/dpdk/lib/sched/rte_sched.c
++++ b/dpdk/lib/sched/rte_sched.c
+@@ -239,7 +239,7 @@ struct rte_sched_port {
+ 	int socket;
+ 
+ 	/* Timing */
+-	uint64_t time_cpu_cycles;     /* Current CPU time measured in CPU cyles */
++	uint64_t time_cpu_cycles;     /* Current CPU time measured in CPU cycles */
+ 	uint64_t time_cpu_bytes;      /* Current CPU time measured in bytes */
+ 	uint64_t time;                /* Current NIC TX time measured in bytes */
+ 	struct rte_reciprocal inv_cycles_per_byte; /* CPU cycles per byte */
+diff --git a/dpdk/lib/sched/rte_sched.h b/dpdk/lib/sched/rte_sched.h
+index 484dbdcc3d..3c625ba169 100644
+--- a/dpdk/lib/sched/rte_sched.h
++++ b/dpdk/lib/sched/rte_sched.h
+@@ -360,7 +360,7 @@ rte_sched_subport_pipe_profile_add(struct rte_sched_port *port,
+  *
+  * Hierarchical scheduler subport bandwidth profile add
+  * Note that this function is safe to use in runtime for adding new
+- * subport bandwidth profile as it doesn't have any impact on hiearchical
++ * subport bandwidth profile as it doesn't have any impact on hierarchical
+  * structure of the scheduler.
+  * @param port
+  *   Handle to port scheduler instance
+diff --git a/dpdk/lib/security/rte_security.h b/dpdk/lib/security/rte_security.h
+index 1228b6c8b1..1a15e95267 100644
+--- a/dpdk/lib/security/rte_security.h
++++ b/dpdk/lib/security/rte_security.h
+@@ -301,9 +301,9 @@ struct rte_security_ipsec_lifetime {
+ 	uint64_t bytes_soft_limit;
+ 	/**< Soft expiry limit in bytes */
+ 	uint64_t packets_hard_limit;
+-	/**< Soft expiry limit in number of packets */
++	/**< Hard expiry limit in number of packets */
+ 	uint64_t bytes_hard_limit;
+-	/**< Soft expiry limit in bytes */
++	/**< Hard expiry limit in bytes */
+ };
+ 
+ /**
+diff --git a/dpdk/lib/stack/meson.build b/dpdk/lib/stack/meson.build
+index 2f53f49677..18177a742f 100644
+--- a/dpdk/lib/stack/meson.build
++++ b/dpdk/lib/stack/meson.build
+@@ -9,4 +9,5 @@ indirect_headers += files(
+         'rte_stack_lf.h',
+         'rte_stack_lf_generic.h',
+         'rte_stack_lf_c11.h',
++        'rte_stack_lf_stubs.h',
+ )
+diff --git a/dpdk/lib/table/rte_swx_table.h b/dpdk/lib/table/rte_swx_table.h
+index f93e5f3f95..c1383c2e57 100644
+--- a/dpdk/lib/table/rte_swx_table.h
++++ b/dpdk/lib/table/rte_swx_table.h
+@@ -216,7 +216,7 @@ typedef int
+  * operations into the same table.
+  *
+  * The typical reason an implementation may choose to split the table lookup
+- * operation into multiple steps is to hide the latency of the inherrent memory
++ * operation into multiple steps is to hide the latency of the inherent memory
+  * read operations: before a read operation with the source data likely not in
+  * the CPU cache, the source data prefetch is issued and the table lookup
+  * operation is postponed in favor of some other unrelated work, which the CPU
+diff --git a/dpdk/lib/table/rte_swx_table_selector.h b/dpdk/lib/table/rte_swx_table_selector.h
+index 62988d2856..05863cc90b 100644
+--- a/dpdk/lib/table/rte_swx_table_selector.h
++++ b/dpdk/lib/table/rte_swx_table_selector.h
+@@ -155,7 +155,7 @@ rte_swx_table_selector_group_set(void *table,
+  * mechanism allows for multiple concurrent select operations into the same table.
+  *
+  * The typical reason an implementation may choose to split the operation into multiple steps is to
+- * hide the latency of the inherrent memory read operations: before a read operation with the
++ * hide the latency of the inherent memory read operations: before a read operation with the
+  * source data likely not in the CPU cache, the source data prefetch is issued and the operation is
+  * postponed in favor of some other unrelated work, which the CPU executes in parallel with the
+  * source data being fetched into the CPU cache; later on, the operation is resumed, this time with
+diff --git a/dpdk/lib/table/rte_table_hash_func.h b/dpdk/lib/table/rte_table_hash_func.h
+index c4c35cc06a..a962ec2f68 100644
+--- a/dpdk/lib/table/rte_table_hash_func.h
++++ b/dpdk/lib/table/rte_table_hash_func.h
+@@ -58,8 +58,8 @@ static inline uint64_t
+ rte_table_hash_crc_key8(void *key, void *mask, __rte_unused uint32_t key_size,
+ 	uint64_t seed)
+ {
+-	uint64_t *k = key;
+-	uint64_t *m = mask;
++	uint64_t *k = (uint64_t *)key;
++	uint64_t *m = (uint64_t *)mask;
+ 	uint64_t crc0;
+ 
+ 	crc0 = rte_crc32_u64(seed, k[0] & m[0]);
+@@ -72,8 +72,8 @@ static inline uint64_t
+ rte_table_hash_crc_key16(void *key, void *mask, __rte_unused uint32_t key_size,
+ 	uint64_t seed)
+ {
+-	uint64_t *k = key;
+-	uint64_t *m = mask;
++	uint64_t *k = (uint64_t *)key;
++	uint64_t *m = (uint64_t *)mask;
+ 	uint64_t k0, crc0, crc1;
+ 
+ 	k0 = k[0] & m[0];
+@@ -91,8 +91,8 @@ static inline uint64_t
+ rte_table_hash_crc_key24(void *key, void *mask, __rte_unused uint32_t key_size,
+ 	uint64_t seed)
+ {
+-	uint64_t *k = key;
+-	uint64_t *m = mask;
++	uint64_t *k = (uint64_t *)key;
++	uint64_t *m = (uint64_t *)mask;
+ 	uint64_t k0, k2, crc0, crc1;
+ 
+ 	k0 = k[0] & m[0];
+@@ -113,8 +113,8 @@ static inline uint64_t
+ rte_table_hash_crc_key32(void *key, void *mask, __rte_unused uint32_t key_size,
+ 	uint64_t seed)
+ {
+-	uint64_t *k = key;
+-	uint64_t *m = mask;
++	uint64_t *k = (uint64_t *)key;
++	uint64_t *m = (uint64_t *)mask;
+ 	uint64_t k0, k2, crc0, crc1, crc2, crc3;
+ 
+ 	k0 = k[0] & m[0];
+@@ -139,8 +139,8 @@ static inline uint64_t
+ rte_table_hash_crc_key40(void *key, void *mask, __rte_unused uint32_t key_size,
+ 	uint64_t seed)
+ {
+-	uint64_t *k = key;
+-	uint64_t *m = mask;
++	uint64_t *k = (uint64_t *)key;
++	uint64_t *m = (uint64_t *)mask;
+ 	uint64_t k0, k2, crc0, crc1, crc2, crc3;
+ 
+ 	k0 = k[0] & m[0];
+@@ -165,8 +165,8 @@ static inline uint64_t
+ rte_table_hash_crc_key48(void *key, void *mask, __rte_unused uint32_t key_size,
+ 	uint64_t seed)
+ {
+-	uint64_t *k = key;
+-	uint64_t *m = mask;
++	uint64_t *k = (uint64_t *)key;
++	uint64_t *m = (uint64_t *)mask;
+ 	uint64_t k0, k2, k5, crc0, crc1, crc2, crc3;
+ 
+ 	k0 = k[0] & m[0];
+@@ -192,8 +192,8 @@ static inline uint64_t
+ rte_table_hash_crc_key56(void *key, void *mask, __rte_unused uint32_t key_size,
+ 	uint64_t seed)
+ {
+-	uint64_t *k = key;
+-	uint64_t *m = mask;
++	uint64_t *k = (uint64_t *)key;
++	uint64_t *m = (uint64_t *)mask;
+ 	uint64_t k0, k2, k5, crc0, crc1, crc2, crc3, crc4, crc5;
+ 
+ 	k0 = k[0] & m[0];
+@@ -222,8 +222,8 @@ static inline uint64_t
+ rte_table_hash_crc_key64(void *key, void *mask, __rte_unused uint32_t key_size,
+ 	uint64_t seed)
+ {
+-	uint64_t *k = key;
+-	uint64_t *m = mask;
++	uint64_t *k = (uint64_t *)key;
++	uint64_t *m = (uint64_t *)mask;
+ 	uint64_t k0, k2, k5, crc0, crc1, crc2, crc3, crc4, crc5;
+ 
+ 	k0 = k[0] & m[0];
+diff --git a/dpdk/lib/telemetry/rte_telemetry.h b/dpdk/lib/telemetry/rte_telemetry.h
+index 7bca8a9a49..3372b32f38 100644
+--- a/dpdk/lib/telemetry/rte_telemetry.h
++++ b/dpdk/lib/telemetry/rte_telemetry.h
+@@ -9,6 +9,10 @@
+ #ifndef _RTE_TELEMETRY_H_
+ #define _RTE_TELEMETRY_H_
+ 
++#ifdef __cplusplus
++extern "C" {
++#endif
++
+ /** Maximum length for string used in object. */
+ #define RTE_TEL_MAX_STRING_LEN 128
+ /** Maximum length of string. */
+@@ -294,4 +298,8 @@ rte_tel_data_alloc(void);
+ void
+ rte_tel_data_free(struct rte_tel_data *data);
+ 
++#ifdef __cplusplus
++}
++#endif
++
+ #endif
+diff --git a/dpdk/lib/telemetry/telemetry.c b/dpdk/lib/telemetry/telemetry.c
+index a7483167d4..e5ccfe47f7 100644
+--- a/dpdk/lib/telemetry/telemetry.c
++++ b/dpdk/lib/telemetry/telemetry.c
+@@ -534,7 +534,7 @@ telemetry_legacy_init(void)
+ 	}
+ 	rc = pthread_create(&t_old, NULL, socket_listener, &v1_socket);
+ 	if (rc != 0) {
+-		TMTY_LOG(ERR, "Error with create legcay socket thread: %s\n",
++		TMTY_LOG(ERR, "Error with create legacy socket thread: %s\n",
+ 			 strerror(rc));
+ 		close(v1_socket.sock);
+ 		v1_socket.sock = -1;
+diff --git a/dpdk/lib/telemetry/telemetry_json.h b/dpdk/lib/telemetry/telemetry_json.h
+index f02a12f5b0..db70690274 100644
+--- a/dpdk/lib/telemetry/telemetry_json.h
++++ b/dpdk/lib/telemetry/telemetry_json.h
+@@ -23,7 +23,7 @@
+ /**
+  * @internal
+  * Copies a value into a buffer if the buffer has enough available space.
+- * Nothing written to buffer if an overflow ocurs.
++ * Nothing written to buffer if an overflow occurs.
+  * This function is not for use for values larger than given buffer length.
+  */
+ __rte_format_printf(3, 4)
+diff --git a/dpdk/lib/vhost/rte_vdpa.h b/dpdk/lib/vhost/rte_vdpa.h
+index 1437f400bf..6ac85d1bbf 100644
+--- a/dpdk/lib/vhost/rte_vdpa.h
++++ b/dpdk/lib/vhost/rte_vdpa.h
+@@ -5,6 +5,10 @@
+ #ifndef _RTE_VDPA_H_
+ #define _RTE_VDPA_H_
+ 
++#ifdef __cplusplus
++extern "C" {
++#endif
++
+ /**
+  * @file
+  *
+@@ -183,4 +187,9 @@ rte_vdpa_get_stats(struct rte_vdpa_device *dev, uint16_t qid,
+  */
+ int
+ rte_vdpa_reset_stats(struct rte_vdpa_device *dev, uint16_t qid);
++
++#ifdef __cplusplus
++}
++#endif
++
+ #endif /* _RTE_VDPA_H_ */
+diff --git a/dpdk/lib/vhost/rte_vhost.h b/dpdk/lib/vhost/rte_vhost.h
+index b454c05868..2acb31df2d 100644
+--- a/dpdk/lib/vhost/rte_vhost.h
++++ b/dpdk/lib/vhost/rte_vhost.h
+@@ -21,10 +21,12 @@
+ extern "C" {
+ #endif
+ 
++#ifndef __cplusplus
+ /* These are not C++-aware. */
+ #include <linux/vhost.h>
+ #include <linux/virtio_ring.h>
+ #include <linux/virtio_net.h>
++#endif
+ 
+ #define RTE_VHOST_USER_CLIENT		(1ULL << 0)
+ #define RTE_VHOST_USER_NO_RECONNECT	(1ULL << 1)
+diff --git a/dpdk/lib/vhost/rte_vhost_async.h b/dpdk/lib/vhost/rte_vhost_async.h
+index a87ea6ba37..d20152ca7a 100644
+--- a/dpdk/lib/vhost/rte_vhost_async.h
++++ b/dpdk/lib/vhost/rte_vhost_async.h
+@@ -5,6 +5,10 @@
+ #ifndef _RTE_VHOST_ASYNC_H_
+ #define _RTE_VHOST_ASYNC_H_
+ 
++#ifdef __cplusplus
++extern "C" {
++#endif
++
+ #include "rte_vhost.h"
+ 
+ /**
+@@ -242,4 +246,8 @@ __rte_experimental
+ uint16_t rte_vhost_clear_queue_thread_unsafe(int vid, uint16_t queue_id,
+ 		struct rte_mbuf **pkts, uint16_t count);
+ 
++#ifdef __cplusplus
++}
++#endif
++
+ #endif /* _RTE_VHOST_ASYNC_H_ */
+diff --git a/dpdk/lib/vhost/rte_vhost_crypto.h b/dpdk/lib/vhost/rte_vhost_crypto.h
+index f54d731139..b49e389579 100644
+--- a/dpdk/lib/vhost/rte_vhost_crypto.h
++++ b/dpdk/lib/vhost/rte_vhost_crypto.h
+@@ -5,6 +5,10 @@
+ #ifndef _VHOST_CRYPTO_H_
+ #define _VHOST_CRYPTO_H_
+ 
++#ifdef __cplusplus
++extern "C" {
++#endif
++
+ #include <stdint.h>
+ 
+ #include <rte_compat.h>
+@@ -132,4 +136,8 @@ uint16_t
+ rte_vhost_crypto_finalize_requests(struct rte_crypto_op **ops,
+ 		uint16_t nb_ops, int *callfds, uint16_t *nb_callfds);
+ 
++#ifdef __cplusplus
++}
++#endif
++
+ #endif /**< _VHOST_CRYPTO_H_ */
+diff --git a/dpdk/lib/vhost/socket.c b/dpdk/lib/vhost/socket.c
+index 82963c1e6d..33f54a779b 100644
+--- a/dpdk/lib/vhost/socket.c
++++ b/dpdk/lib/vhost/socket.c
+@@ -501,7 +501,7 @@ vhost_user_reconnect_init(void)
+ 
+ 	ret = pthread_mutex_init(&reconn_list.mutex, NULL);
+ 	if (ret < 0) {
+-		VHOST_LOG_CONFIG(ERR, "failed to initialize mutex");
++		VHOST_LOG_CONFIG(ERR, "failed to initialize mutex\n");
+ 		return ret;
+ 	}
+ 	TAILQ_INIT(&reconn_list.head);
+@@ -509,10 +509,10 @@ vhost_user_reconnect_init(void)
+ 	ret = rte_ctrl_thread_create(&reconn_tid, "vhost_reconn", NULL,
+ 			     vhost_user_client_reconnect, NULL);
+ 	if (ret != 0) {
+-		VHOST_LOG_CONFIG(ERR, "failed to create reconnect thread");
++		VHOST_LOG_CONFIG(ERR, "failed to create reconnect thread\n");
+ 		if (pthread_mutex_destroy(&reconn_list.mutex)) {
+ 			VHOST_LOG_CONFIG(ERR,
+-				"failed to destroy reconnect mutex");
++				"failed to destroy reconnect mutex\n");
+ 		}
+ 	}
+ 
+@@ -1147,7 +1147,7 @@ rte_vhost_driver_start(const char *path)
+ 			&vhost_user.fdset);
+ 		if (ret != 0) {
+ 			VHOST_LOG_CONFIG(ERR,
+-				"failed to create fdset handling thread");
++				"failed to create fdset handling thread\n");
+ 
+ 			fdset_pipe_uninit(&vhost_user.fdset);
+ 			return -1;
+diff --git a/dpdk/lib/vhost/vdpa_driver.h b/dpdk/lib/vhost/vdpa_driver.h
+index fc2d6acedd..7ba9e28e57 100644
+--- a/dpdk/lib/vhost/vdpa_driver.h
++++ b/dpdk/lib/vhost/vdpa_driver.h
+@@ -5,6 +5,10 @@
+ #ifndef _VDPA_DRIVER_H_
+ #define _VDPA_DRIVER_H_
+ 
++#ifdef __cplusplus
++extern "C" {
++#endif
++
+ #include <stdbool.h>
+ 
+ #include <rte_compat.h>
+@@ -141,4 +145,8 @@ __rte_internal
+ int
+ rte_vdpa_relay_vring_used(int vid, uint16_t qid, void *vring_m);
+ 
++#ifdef __cplusplus
++}
++#endif
++
+ #endif /* _VDPA_DRIVER_H_ */
+diff --git a/dpdk/lib/vhost/version.map b/dpdk/lib/vhost/version.map
+index a7ef7f1976..0f315ed2a5 100644
+--- a/dpdk/lib/vhost/version.map
++++ b/dpdk/lib/vhost/version.map
+@@ -87,7 +87,7 @@ EXPERIMENTAL {
+ };
+ 
+ INTERNAL {
+-	global;
++	global:
+ 
+ 	rte_vdpa_register_device;
+ 	rte_vdpa_relay_vring_used;
+diff --git a/dpdk/lib/vhost/vhost.c b/dpdk/lib/vhost/vhost.c
+index 13a9bb9dd1..24f94495c6 100644
+--- a/dpdk/lib/vhost/vhost.c
++++ b/dpdk/lib/vhost/vhost.c
+@@ -1299,11 +1299,15 @@ rte_vhost_vring_call(int vid, uint16_t vring_idx)
+ 	if (!vq)
+ 		return -1;
+ 
++	rte_spinlock_lock(&vq->access_lock);
++
+ 	if (vq_is_packed(dev))
+ 		vhost_vring_call_packed(dev, vq);
+ 	else
+ 		vhost_vring_call_split(dev, vq);
+ 
++	rte_spinlock_unlock(&vq->access_lock);
++
+ 	return 0;
+ }
+ 
+@@ -1779,26 +1783,22 @@ rte_vhost_async_channel_unregister(int vid, uint16_t queue_id)
+ 	if (vq == NULL)
+ 		return ret;
+ 
+-	ret = 0;
+-
+-	if (!vq->async)
+-		return ret;
+-
+ 	if (!rte_spinlock_trylock(&vq->access_lock)) {
+ 		VHOST_LOG_CONFIG(ERR, "Failed to unregister async channel. "
+ 			"virt queue busy.\n");
+-		return -1;
++		return ret;
+ 	}
+ 
+-	if (vq->async->pkts_inflight_n) {
++	if (!vq->async) {
++		ret = 0;
++	} else if (vq->async->pkts_inflight_n) {
+ 		VHOST_LOG_CONFIG(ERR, "Failed to unregister async channel. "
+ 			"async inflight packets must be completed before unregistration.\n");
+-		ret = -1;
+-		goto out;
++	} else {
++		vhost_free_async_mem(vq);
++		ret = 0;
+ 	}
+ 
+-	vhost_free_async_mem(vq);
+-out:
+ 	rte_spinlock_unlock(&vq->access_lock);
+ 
+ 	return ret;
+@@ -1853,16 +1853,15 @@ rte_vhost_async_get_inflight(int vid, uint16_t queue_id)
+ 	if (vq == NULL)
+ 		return ret;
+ 
+-	if (!vq->async)
+-		return ret;
+-
+ 	if (!rte_spinlock_trylock(&vq->access_lock)) {
+ 		VHOST_LOG_CONFIG(DEBUG, "Failed to check in-flight packets. "
+ 			"virt queue busy.\n");
+ 		return ret;
+ 	}
+ 
+-	ret = vq->async->pkts_inflight_n;
++	if (vq->async)
++		ret = vq->async->pkts_inflight_n;
++
+ 	rte_spinlock_unlock(&vq->access_lock);
+ 
+ 	return ret;
+diff --git a/dpdk/lib/vhost/vhost.h b/dpdk/lib/vhost/vhost.h
+index 7085e0885c..d4586f3341 100644
+--- a/dpdk/lib/vhost/vhost.h
++++ b/dpdk/lib/vhost/vhost.h
+@@ -354,7 +354,8 @@ struct vring_packed_desc_event {
+ 
+ struct guest_page {
+ 	uint64_t guest_phys_addr;
+-	uint64_t host_phys_addr;
++	uint64_t host_iova;
++	uint64_t host_user_addr;
+ 	uint64_t size;
+ };
+ 
+@@ -587,6 +588,20 @@ static __rte_always_inline int guest_page_addrcmp(const void *p1,
+ 	return 0;
+ }
+ 
++static __rte_always_inline int guest_page_rangecmp(const void *p1, const void *p2)
++{
++	const struct guest_page *page1 = (const struct guest_page *)p1;
++	const struct guest_page *page2 = (const struct guest_page *)p2;
++
++	if (page1->guest_phys_addr >= page2->guest_phys_addr) {
++		if (page1->guest_phys_addr < page2->guest_phys_addr + page2->size)
++			return 0;
++		else
++			return 1;
++	} else
++		return -1;
++}
++
+ static __rte_always_inline rte_iova_t
+ gpa_to_first_hpa(struct virtio_net *dev, uint64_t gpa,
+ 	uint64_t gpa_size, uint64_t *hpa_size)
+@@ -597,20 +612,20 @@ gpa_to_first_hpa(struct virtio_net *dev, uint64_t gpa,
+ 
+ 	*hpa_size = gpa_size;
+ 	if (dev->nr_guest_pages >= VHOST_BINARY_SEARCH_THRESH) {
+-		key.guest_phys_addr = gpa & ~(dev->guest_pages[0].size - 1);
++		key.guest_phys_addr = gpa;
+ 		page = bsearch(&key, dev->guest_pages, dev->nr_guest_pages,
+-			       sizeof(struct guest_page), guest_page_addrcmp);
++			       sizeof(struct guest_page), guest_page_rangecmp);
+ 		if (page) {
+ 			if (gpa + gpa_size <=
+ 					page->guest_phys_addr + page->size) {
+ 				return gpa - page->guest_phys_addr +
+-					page->host_phys_addr;
++					page->host_iova;
+ 			} else if (gpa < page->guest_phys_addr +
+ 						page->size) {
+ 				*hpa_size = page->guest_phys_addr +
+ 					page->size - gpa;
+ 				return gpa - page->guest_phys_addr +
+-					page->host_phys_addr;
++					page->host_iova;
+ 			}
+ 		}
+ 	} else {
+@@ -621,13 +636,13 @@ gpa_to_first_hpa(struct virtio_net *dev, uint64_t gpa,
+ 				if (gpa + gpa_size <=
+ 					page->guest_phys_addr + page->size) {
+ 					return gpa - page->guest_phys_addr +
+-						page->host_phys_addr;
++						page->host_iova;
+ 				} else if (gpa < page->guest_phys_addr +
+ 							page->size) {
+ 					*hpa_size = page->guest_phys_addr +
+ 						page->size - gpa;
+ 					return gpa - page->guest_phys_addr +
+-						page->host_phys_addr;
++						page->host_iova;
+ 				}
+ 			}
+ 		}
+diff --git a/dpdk/lib/vhost/vhost_crypto.c b/dpdk/lib/vhost/vhost_crypto.c
+index 926b5c0bd9..7d1d6a1861 100644
+--- a/dpdk/lib/vhost/vhost_crypto.c
++++ b/dpdk/lib/vhost/vhost_crypto.c
+@@ -565,94 +565,57 @@ get_data_ptr(struct vhost_crypto_data_req *vc_req,
+ 	return data;
+ }
+ 
+-static __rte_always_inline int
+-copy_data(void *dst_data, struct vhost_crypto_data_req *vc_req,
+-		struct vhost_crypto_desc *head,
+-		struct vhost_crypto_desc **cur_desc,
+-		uint32_t size, uint32_t max_n_descs)
++static __rte_always_inline uint32_t
++copy_data_from_desc(void *dst, struct vhost_crypto_data_req *vc_req,
++	struct vhost_crypto_desc *desc, uint32_t size)
+ {
+-	struct vhost_crypto_desc *desc = *cur_desc;
+-	uint64_t remain, addr, dlen, len;
+-	uint32_t to_copy;
+-	uint8_t *data = dst_data;
+-	uint8_t *src;
+-	int left = size;
+-
+-	to_copy = RTE_MIN(desc->len, (uint32_t)left);
+-	dlen = to_copy;
+-	src = IOVA_TO_VVA(uint8_t *, vc_req, desc->addr, &dlen,
+-			VHOST_ACCESS_RO);
+-	if (unlikely(!src || !dlen))
+-		return -1;
++	uint64_t remain;
++	uint64_t addr;
++
++	remain = RTE_MIN(desc->len, size);
++	addr = desc->addr;
++	do {
++		uint64_t len;
++		void *src;
++
++		len = remain;
++		src = IOVA_TO_VVA(void *, vc_req, addr, &len, VHOST_ACCESS_RO);
++		if (unlikely(src == NULL || len == 0))
++			return 0;
+ 
+-	rte_memcpy((uint8_t *)data, src, dlen);
+-	data += dlen;
++		rte_memcpy(dst, src, len);
++		remain -= len;
++		/* cast is needed for 32-bit architecture */
++		dst = RTE_PTR_ADD(dst, (size_t)len);
++		addr += len;
++	} while (unlikely(remain != 0));
+ 
+-	if (unlikely(dlen < to_copy)) {
+-		remain = to_copy - dlen;
+-		addr = desc->addr + dlen;
++	return RTE_MIN(desc->len, size);
++}
+ 
+-		while (remain) {
+-			len = remain;
+-			src = IOVA_TO_VVA(uint8_t *, vc_req, addr, &len,
+-					VHOST_ACCESS_RO);
+-			if (unlikely(!src || !len)) {
+-				VC_LOG_ERR("Failed to map descriptor");
+-				return -1;
+-			}
+ 
+-			rte_memcpy(data, src, len);
+-			addr += len;
+-			remain -= len;
+-			data += len;
+-		}
+-	}
++static __rte_always_inline int
++copy_data(void *data, struct vhost_crypto_data_req *vc_req,
++	struct vhost_crypto_desc *head, struct vhost_crypto_desc **cur_desc,
++	uint32_t size, uint32_t max_n_descs)
++{
++	struct vhost_crypto_desc *desc = *cur_desc;
++	uint32_t left = size;
+ 
+-	left -= to_copy;
++	do {
++		uint32_t copied;
+ 
+-	while (desc >= head && desc - head < (int)max_n_descs && left) {
+-		desc++;
+-		to_copy = RTE_MIN(desc->len, (uint32_t)left);
+-		dlen = to_copy;
+-		src = IOVA_TO_VVA(uint8_t *, vc_req, desc->addr, &dlen,
+-				VHOST_ACCESS_RO);
+-		if (unlikely(!src || !dlen)) {
+-			VC_LOG_ERR("Failed to map descriptor");
++		copied = copy_data_from_desc(data, vc_req, desc, left);
++		if (copied == 0)
+ 			return -1;
+-		}
+-
+-		rte_memcpy(data, src, dlen);
+-		data += dlen;
+-
+-		if (unlikely(dlen < to_copy)) {
+-			remain = to_copy - dlen;
+-			addr = desc->addr + dlen;
+-
+-			while (remain) {
+-				len = remain;
+-				src = IOVA_TO_VVA(uint8_t *, vc_req, addr, &len,
+-						VHOST_ACCESS_RO);
+-				if (unlikely(!src || !len)) {
+-					VC_LOG_ERR("Failed to map descriptor");
+-					return -1;
+-				}
+-
+-				rte_memcpy(data, src, len);
+-				addr += len;
+-				remain -= len;
+-				data += len;
+-			}
+-		}
+-
+-		left -= to_copy;
+-	}
++		left -= copied;
++		data = RTE_PTR_ADD(data, copied);
++	} while (left != 0 && ++desc < head + max_n_descs);
+ 
+-	if (unlikely(left > 0)) {
+-		VC_LOG_ERR("Incorrect virtio descriptor");
++	if (unlikely(left != 0))
+ 		return -1;
+-	}
+ 
+-	if (unlikely(desc - head == (int)max_n_descs))
++	if (unlikely(desc == head + max_n_descs))
+ 		*cur_desc = NULL;
+ 	else
+ 		*cur_desc = desc + 1;
+@@ -852,6 +815,7 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
+ 	/* iv */
+ 	if (unlikely(copy_data(iv_data, vc_req, head, &desc,
+ 			cipher->para.iv_len, max_n_descs))) {
++		VC_LOG_ERR("Incorrect virtio descriptor");
+ 		ret = VIRTIO_CRYPTO_BADMSG;
+ 		goto error_exit;
+ 	}
+@@ -883,6 +847,7 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
+ 		if (unlikely(copy_data(rte_pktmbuf_mtod(m_src, uint8_t *),
+ 				vc_req, head, &desc, cipher->para.src_data_len,
+ 				max_n_descs) < 0)) {
++			VC_LOG_ERR("Incorrect virtio descriptor");
+ 			ret = VIRTIO_CRYPTO_BADMSG;
+ 			goto error_exit;
+ 		}
+@@ -1006,6 +971,7 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
+ 	/* iv */
+ 	if (unlikely(copy_data(iv_data, vc_req, head, &desc,
+ 			chain->para.iv_len, max_n_descs) < 0)) {
++		VC_LOG_ERR("Incorrect virtio descriptor");
+ 		ret = VIRTIO_CRYPTO_BADMSG;
+ 		goto error_exit;
+ 	}
+@@ -1037,6 +1003,7 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
+ 		if (unlikely(copy_data(rte_pktmbuf_mtod(m_src, uint8_t *),
+ 				vc_req, head, &desc, chain->para.src_data_len,
+ 				max_n_descs) < 0)) {
++			VC_LOG_ERR("Incorrect virtio descriptor");
+ 			ret = VIRTIO_CRYPTO_BADMSG;
+ 			goto error_exit;
+ 		}
+@@ -1121,6 +1088,7 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
+ 		if (unlikely(copy_data(digest_addr, vc_req, head, &digest_desc,
+ 				chain->para.hash_result_len,
+ 				max_n_descs) < 0)) {
++			VC_LOG_ERR("Incorrect virtio descriptor");
+ 			ret = VIRTIO_CRYPTO_BADMSG;
+ 			goto error_exit;
+ 		}
+diff --git a/dpdk/lib/vhost/vhost_user.c b/dpdk/lib/vhost/vhost_user.c
+index a781346c4d..df780fd7d6 100644
+--- a/dpdk/lib/vhost/vhost_user.c
++++ b/dpdk/lib/vhost/vhost_user.c
+@@ -143,57 +143,59 @@ get_blk_size(int fd)
+ 	return ret == -1 ? (uint64_t)-1 : (uint64_t)stat.st_blksize;
+ }
+ 
+-static int
+-async_dma_map(struct rte_vhost_mem_region *region, bool do_map)
++static void
++async_dma_map(struct virtio_net *dev, bool do_map)
+ {
+-	uint64_t host_iova;
+ 	int ret = 0;
++	uint32_t i;
++	struct guest_page *page;
+ 
+-	host_iova = rte_mem_virt2iova((void *)(uintptr_t)region->host_user_addr);
+ 	if (do_map) {
+-		/* Add mapped region into the default container of DPDK. */
+-		ret = rte_vfio_container_dma_map(RTE_VFIO_DEFAULT_CONTAINER_FD,
+-						 region->host_user_addr,
+-						 host_iova,
+-						 region->size);
+-		if (ret) {
+-			/*
+-			 * DMA device may bind with kernel driver, in this case,
+-			 * we don't need to program IOMMU manually. However, if no
+-			 * device is bound with vfio/uio in DPDK, and vfio kernel
+-			 * module is loaded, the API will still be called and return
+-			 * with ENODEV/ENOSUP.
+-			 *
+-			 * DPDK vfio only returns ENODEV/ENOSUP in very similar
+-			 * situations(vfio either unsupported, or supported
+-			 * but no devices found). Either way, no mappings could be
+-			 * performed. We treat it as normal case in async path.
+-			 */
+-			if (rte_errno == ENODEV || rte_errno == ENOTSUP)
+-				return 0;
+-
+-			VHOST_LOG_CONFIG(ERR, "DMA engine map failed\n");
+-			/* DMA mapping errors won't stop VHST_USER_SET_MEM_TABLE. */
+-			return 0;
++		for (i = 0; i < dev->nr_guest_pages; i++) {
++			page = &dev->guest_pages[i];
++			ret = rte_vfio_container_dma_map(RTE_VFIO_DEFAULT_CONTAINER_FD,
++							 page->host_user_addr,
++							 page->host_iova,
++							 page->size);
++			if (ret) {
++				/*
++				 * DMA device may bind with kernel driver, in this case,
++				 * we don't need to program IOMMU manually. However, if no
++				 * device is bound with vfio/uio in DPDK, and vfio kernel
++				 * module is loaded, the API will still be called and return
++				 * with ENODEV.
++				 *
++				 * DPDK vfio only returns ENODEV in very similar situations
++				 * (vfio either unsupported, or supported but no devices found).
++				 * Either way, no mappings could be performed. We treat it as
++				 * normal case in async path. This is a workaround.
++				 */
++				if (rte_errno == ENODEV)
++					return;
++
++				/* DMA mapping errors won't stop VHOST_USER_SET_MEM_TABLE. */
++				VHOST_LOG_CONFIG(ERR, "(%s) DMA engine map failed\n",
++					dev->ifname);
++			}
+ 		}
+ 
+ 	} else {
+-		/* Remove mapped region from the default container of DPDK. */
+-		ret = rte_vfio_container_dma_unmap(RTE_VFIO_DEFAULT_CONTAINER_FD,
+-						   region->host_user_addr,
+-						   host_iova,
+-						   region->size);
+-		if (ret) {
+-			/* like DMA map, ignore the kernel driver case when unmap. */
+-			if (rte_errno == EINVAL)
+-				return 0;
+-
+-			VHOST_LOG_CONFIG(ERR, "DMA engine unmap failed\n");
+-			return ret;
++		for (i = 0; i < dev->nr_guest_pages; i++) {
++			page = &dev->guest_pages[i];
++			ret = rte_vfio_container_dma_unmap(RTE_VFIO_DEFAULT_CONTAINER_FD,
++							   page->host_user_addr,
++							   page->host_iova,
++							   page->size);
++			if (ret) {
++				/* like DMA map, ignore the kernel driver case when unmap. */
++				if (rte_errno == EINVAL)
++					return;
++
++				VHOST_LOG_CONFIG(ERR, "(%s) DMA engine unmap failed\n",
++					dev->ifname);
++			}
+ 		}
+ 	}
+-
+-	return ret;
+ }
+ 
+ static void
+@@ -205,12 +207,12 @@ free_mem_region(struct virtio_net *dev)
+ 	if (!dev || !dev->mem)
+ 		return;
+ 
++	if (dev->async_copy && rte_vfio_is_enabled("vfio"))
++		async_dma_map(dev, false);
++
+ 	for (i = 0; i < dev->mem->nregions; i++) {
+ 		reg = &dev->mem->regions[i];
+ 		if (reg->host_user_addr) {
+-			if (dev->async_copy && rte_vfio_is_enabled("vfio"))
+-				async_dma_map(reg, false);
+-
+ 			munmap(reg->mmap_addr, reg->mmap_size);
+ 			close(reg->fd);
+ 		}
+@@ -978,7 +980,7 @@ vhost_user_set_vring_base(struct virtio_net **pdev,
+ 
+ static int
+ add_one_guest_page(struct virtio_net *dev, uint64_t guest_phys_addr,
+-		   uint64_t host_phys_addr, uint64_t size)
++		   uint64_t host_iova, uint64_t host_user_addr, uint64_t size)
+ {
+ 	struct guest_page *page, *last_page;
+ 	struct guest_page *old_pages;
+@@ -990,7 +992,8 @@ add_one_guest_page(struct virtio_net *dev, uint64_t guest_phys_addr,
+ 					dev->max_guest_pages * sizeof(*page),
+ 					RTE_CACHE_LINE_SIZE);
+ 		if (dev->guest_pages == NULL) {
+-			VHOST_LOG_CONFIG(ERR, "cannot realloc guest_pages\n");
++			VHOST_LOG_CONFIG(ERR, "(%s) cannot realloc guest_pages\n",
++				dev->ifname);
+ 			rte_free(old_pages);
+ 			return -1;
+ 		}
+@@ -999,8 +1002,9 @@ add_one_guest_page(struct virtio_net *dev, uint64_t guest_phys_addr,
+ 	if (dev->nr_guest_pages > 0) {
+ 		last_page = &dev->guest_pages[dev->nr_guest_pages - 1];
+ 		/* merge if the two pages are continuous */
+-		if (host_phys_addr == last_page->host_phys_addr +
+-				      last_page->size) {
++		if (host_iova == last_page->host_iova + last_page->size &&
++		    guest_phys_addr == last_page->guest_phys_addr + last_page->size &&
++		    host_user_addr == last_page->host_user_addr + last_page->size) {
+ 			last_page->size += size;
+ 			return 0;
+ 		}
+@@ -1008,7 +1012,8 @@ add_one_guest_page(struct virtio_net *dev, uint64_t guest_phys_addr,
+ 
+ 	page = &dev->guest_pages[dev->nr_guest_pages++];
+ 	page->guest_phys_addr = guest_phys_addr;
+-	page->host_phys_addr  = host_phys_addr;
++	page->host_iova  = host_iova;
++	page->host_user_addr = host_user_addr;
+ 	page->size = size;
+ 
+ 	return 0;
+@@ -1021,14 +1026,15 @@ add_guest_pages(struct virtio_net *dev, struct rte_vhost_mem_region *reg,
+ 	uint64_t reg_size = reg->size;
+ 	uint64_t host_user_addr  = reg->host_user_addr;
+ 	uint64_t guest_phys_addr = reg->guest_phys_addr;
+-	uint64_t host_phys_addr;
++	uint64_t host_iova;
+ 	uint64_t size;
+ 
+-	host_phys_addr = rte_mem_virt2iova((void *)(uintptr_t)host_user_addr);
++	host_iova = rte_mem_virt2iova((void *)(uintptr_t)host_user_addr);
+ 	size = page_size - (guest_phys_addr & (page_size - 1));
+ 	size = RTE_MIN(size, reg_size);
+ 
+-	if (add_one_guest_page(dev, guest_phys_addr, host_phys_addr, size) < 0)
++	if (add_one_guest_page(dev, guest_phys_addr, host_iova,
++			       host_user_addr, size) < 0)
+ 		return -1;
+ 
+ 	host_user_addr  += size;
+@@ -1037,10 +1043,10 @@ add_guest_pages(struct virtio_net *dev, struct rte_vhost_mem_region *reg,
+ 
+ 	while (reg_size > 0) {
+ 		size = RTE_MIN(reg_size, page_size);
+-		host_phys_addr = rte_mem_virt2iova((void *)(uintptr_t)
++		host_iova = rte_mem_virt2iova((void *)(uintptr_t)
+ 						  host_user_addr);
+-		if (add_one_guest_page(dev, guest_phys_addr, host_phys_addr,
+-				size) < 0)
++		if (add_one_guest_page(dev, guest_phys_addr, host_iova,
++				       host_user_addr, size) < 0)
+ 			return -1;
+ 
+ 		host_user_addr  += size;
+@@ -1071,11 +1077,11 @@ dump_guest_pages(struct virtio_net *dev)
+ 		VHOST_LOG_CONFIG(INFO,
+ 			"guest physical page region %u\n"
+ 			"\t guest_phys_addr: %" PRIx64 "\n"
+-			"\t host_phys_addr : %" PRIx64 "\n"
++			"\t host_iova      : %" PRIx64 "\n"
+ 			"\t size           : %" PRIx64 "\n",
+ 			i,
+ 			page->guest_phys_addr,
+-			page->host_phys_addr,
++			page->host_iova,
+ 			page->size);
+ 	}
+ }
+@@ -1115,7 +1121,7 @@ vhost_user_postcopy_region_register(struct virtio_net *dev,
+ 	struct uffdio_register reg_struct;
+ 
+ 	/*
+-	 * Let's register all the mmap'ed area to ensure
++	 * Let's register all the mmapped area to ensure
+ 	 * alignment on page boundary.
+ 	 */
+ 	reg_struct.range.start = (uint64_t)(uintptr_t)reg->mmap_addr;
+@@ -1177,7 +1183,7 @@ vhost_user_postcopy_register(struct virtio_net *dev, int main_fd,
+ 	msg->fd_num = 0;
+ 	send_vhost_reply(main_fd, msg);
+ 
+-	/* Wait for qemu to acknolwedge it's got the addresses
++	/* Wait for qemu to acknowledge it got the addresses
+ 	 * we've got to wait before we're allowed to generate faults.
+ 	 */
+ 	if (read_vhost_message(main_fd, &ack_msg) <= 0) {
+@@ -1215,7 +1221,6 @@ vhost_user_mmap_region(struct virtio_net *dev,
+ 	uint64_t mmap_size;
+ 	uint64_t alignment;
+ 	int populate;
+-	int ret;
+ 
+ 	/* Check for memory_size + mmap_offset overflow */
+ 	if (mmap_offset >= -region->size) {
+@@ -1274,14 +1279,6 @@ vhost_user_mmap_region(struct virtio_net *dev,
+ 			VHOST_LOG_CONFIG(ERR, "adding guest pages to region failed.\n");
+ 			return -1;
+ 		}
+-
+-		if (rte_vfio_is_enabled("vfio")) {
+-			ret = async_dma_map(region, true);
+-			if (ret) {
+-				VHOST_LOG_CONFIG(ERR, "Configure IOMMU for DMA engine failed\n");
+-				return -1;
+-			}
+-		}
+ 	}
+ 
+ 	VHOST_LOG_CONFIG(INFO,
+@@ -1420,6 +1417,9 @@ vhost_user_set_mem_table(struct virtio_net **pdev, struct VhostUserMsg *msg,
+ 		dev->mem->nregions++;
+ 	}
+ 
++	if (dev->async_copy && rte_vfio_is_enabled("vfio"))
++		async_dma_map(dev, true);
++
+ 	if (vhost_user_postcopy_register(dev, main_fd, msg) < 0)
+ 		goto free_mem_table;
+ 
+@@ -1603,6 +1603,9 @@ vhost_user_get_inflight_fd(struct virtio_net **pdev,
+ 	int numa_node = SOCKET_ID_ANY;
+ 	void *addr;
+ 
++	if (validate_msg_fds(msg, 0) != 0)
++		return RTE_VHOST_MSG_RESULT_ERR;
++
+ 	if (msg->size != sizeof(msg->payload.inflight)) {
+ 		VHOST_LOG_CONFIG(ERR,
+ 			"invalid get_inflight_fd message size is %d\n",
+@@ -1704,6 +1707,9 @@ vhost_user_set_inflight_fd(struct virtio_net **pdev, VhostUserMsg *msg,
+ 	int fd, i;
+ 	int numa_node = SOCKET_ID_ANY;
+ 
++	if (validate_msg_fds(msg, 1) != 0)
++		return RTE_VHOST_MSG_RESULT_ERR;
++
+ 	fd = msg->fds[0];
+ 	if (msg->size != sizeof(msg->payload.inflight) || fd < 0) {
+ 		VHOST_LOG_CONFIG(ERR,
+@@ -2566,8 +2572,11 @@ vhost_user_iotlb_msg(struct virtio_net **pdev, struct VhostUserMsg *msg,
+ 			vhost_user_iotlb_cache_insert(vq, imsg->iova, vva,
+ 					len, imsg->perm);
+ 
+-			if (is_vring_iotlb(dev, vq, imsg))
++			if (is_vring_iotlb(dev, vq, imsg)) {
++				rte_spinlock_lock(&vq->access_lock);
+ 				*pdev = dev = translate_ring_addresses(dev, i);
++				rte_spinlock_unlock(&vq->access_lock);
++			}
+ 		}
+ 		break;
+ 	case VHOST_IOTLB_INVALIDATE:
+@@ -2580,8 +2589,11 @@ vhost_user_iotlb_msg(struct virtio_net **pdev, struct VhostUserMsg *msg,
+ 			vhost_user_iotlb_cache_remove(vq, imsg->iova,
+ 					imsg->size);
+ 
+-			if (is_vring_iotlb(dev, vq, imsg))
++			if (is_vring_iotlb(dev, vq, imsg)) {
++				rte_spinlock_lock(&vq->access_lock);
+ 				vring_invalidate(dev, vq);
++				rte_spinlock_unlock(&vq->access_lock);
++			}
+ 		}
+ 		break;
+ 	default:
+@@ -2873,6 +2885,9 @@ vhost_user_check_and_alloc_queue_pair(struct virtio_net *dev,
+ 	case VHOST_USER_SET_VRING_ADDR:
+ 		vring_idx = msg->payload.addr.index;
+ 		break;
++	case VHOST_USER_SET_INFLIGHT_FD:
++		vring_idx = msg->payload.inflight.num_queues - 1;
++		break;
+ 	default:
+ 		return 0;
+ 	}
+@@ -2961,7 +2976,6 @@ vhost_user_msg_handler(int vid, int fd)
+ 		return -1;
+ 	}
+ 
+-	ret = 0;
+ 	request = msg.request.master;
+ 	if (request > VHOST_USER_NONE && request < VHOST_USER_MAX &&
+ 			vhost_message_str[request]) {
+@@ -3103,9 +3117,11 @@ vhost_user_msg_handler(int vid, int fd)
+ 	} else if (ret == RTE_VHOST_MSG_RESULT_ERR) {
+ 		VHOST_LOG_CONFIG(ERR,
+ 			"vhost message handling failed.\n");
+-		return -1;
++		ret = -1;
++		goto unlock;
+ 	}
+ 
++	ret = 0;
+ 	for (i = 0; i < dev->nr_vring; i++) {
+ 		struct vhost_virtqueue *vq = dev->virtqueue[i];
+ 		bool cur_ready = vq_is_ready(dev, vq);
+@@ -3116,10 +3132,11 @@ vhost_user_msg_handler(int vid, int fd)
+ 		}
+ 	}
+ 
++unlock:
+ 	if (unlock_required)
+ 		vhost_user_unlock_all_queue_pairs(dev);
+ 
+-	if (!virtio_is_ready(dev))
++	if (ret != 0 || !virtio_is_ready(dev))
+ 		goto out;
+ 
+ 	/*
+@@ -3146,7 +3163,7 @@ vhost_user_msg_handler(int vid, int fd)
+ 	}
+ 
+ out:
+-	return 0;
++	return ret;
+ }
+ 
+ static int process_slave_message_reply(struct virtio_net *dev,
+diff --git a/dpdk/lib/vhost/virtio_net.c b/dpdk/lib/vhost/virtio_net.c
+index b3d954aab4..bf4d75b4bd 100644
+--- a/dpdk/lib/vhost/virtio_net.c
++++ b/dpdk/lib/vhost/virtio_net.c
+@@ -415,6 +415,16 @@ virtio_enqueue_offload(struct rte_mbuf *m_buf, struct virtio_net_hdr *net_hdr)
+ 		csum_l4 |= RTE_MBUF_F_TX_TCP_CKSUM;
+ 
+ 	if (csum_l4) {
++		/*
++		 * Pseudo-header checksum must be set as per Virtio spec.
++		 *
++		 * Note: We don't propagate rte_net_intel_cksum_prepare()
++		 * errors, as it would have an impact on performance, and an
++		 * error would mean the packet is dropped by the guest instead
++		 * of being dropped here.
++		 */
++		rte_net_intel_cksum_prepare(m_buf);
++
+ 		net_hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
+ 		net_hdr->csum_start = m_buf->l2_len + m_buf->l3_len;
+ 
+@@ -870,20 +880,21 @@ async_mbuf_to_desc_seg(struct virtio_net *dev, struct vhost_virtqueue *vq,
+ 	struct vhost_async *async = vq->async;
+ 	uint64_t mapped_len;
+ 	uint32_t buf_offset = 0;
+-	void *hpa;
++	void *host_iova;
+ 
+ 	while (cpy_len) {
+-		hpa = (void *)(uintptr_t)gpa_to_first_hpa(dev,
++		host_iova = (void *)(uintptr_t)gpa_to_first_hpa(dev,
+ 				buf_iova + buf_offset, cpy_len, &mapped_len);
+-		if (unlikely(!hpa)) {
+-			VHOST_LOG_DATA(ERR, "(%d) %s: failed to get hpa.\n", dev->vid, __func__);
++		if (unlikely(!host_iova)) {
++			VHOST_LOG_DATA(ERR, "(%d) %s: failed to get host_iova.\n",
++				       dev->vid, __func__);
+ 			return -1;
+ 		}
+ 
+ 		if (unlikely(async_iter_add_iovec(async,
+ 						(void *)(uintptr_t)rte_pktmbuf_iova_offset(m,
+ 							mbuf_offset),
+-						hpa, (size_t)mapped_len)))
++						host_iova, (size_t)mapped_len)))
+ 			return -1;
+ 
+ 		cpy_len -= (uint32_t)mapped_len;
+@@ -1900,16 +1911,22 @@ rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
+ 
+ 	vq = dev->virtqueue[queue_id];
+ 
++	if (!rte_spinlock_trylock(&vq->access_lock)) {
++		VHOST_LOG_DATA(DEBUG,
++			"%s: virtqueue %u is busy.\n",
++			__func__, queue_id);
++		return 0;
++	}
++
+ 	if (unlikely(!vq->async)) {
+ 		VHOST_LOG_DATA(ERR, "(%d) %s: async not registered for queue id %d.\n",
+ 			dev->vid, __func__, queue_id);
+-		return 0;
++		goto out;
+ 	}
+ 
+-	rte_spinlock_lock(&vq->access_lock);
+-
+ 	n_pkts_cpl = vhost_poll_enqueue_completed(dev, queue_id, pkts, count);
+ 
++out:
+ 	rte_spinlock_unlock(&vq->access_lock);
+ 
+ 	return n_pkts_cpl;
+@@ -2305,25 +2322,22 @@ copy_desc_to_mbuf(struct virtio_net *dev, struct vhost_virtqueue *vq,
+ 	uint32_t buf_avail, buf_offset;
+ 	uint64_t buf_addr, buf_len;
+ 	uint32_t mbuf_avail, mbuf_offset;
++	uint32_t hdr_remain = dev->vhost_hlen;
+ 	uint32_t cpy_len;
+ 	struct rte_mbuf *cur = m, *prev = m;
+ 	struct virtio_net_hdr tmp_hdr;
+ 	struct virtio_net_hdr *hdr = NULL;
+-	/* A counter to avoid desc dead loop chain */
+-	uint16_t vec_idx = 0;
++	uint16_t vec_idx;
+ 	struct batch_copy_elem *batch_copy = vq->batch_copy_elems;
+ 	int error = 0;
+ 
+-	buf_addr = buf_vec[vec_idx].buf_addr;
+-	buf_len = buf_vec[vec_idx].buf_len;
+-
+-	if (unlikely(buf_len < dev->vhost_hlen && nr_vec <= 1)) {
+-		error = -1;
+-		goto out;
+-	}
++	/*
++	 * The caller has checked the descriptors chain is larger than the
++	 * header size.
++	 */
+ 
+ 	if (virtio_net_with_host_offload(dev)) {
+-		if (unlikely(buf_len < sizeof(struct virtio_net_hdr))) {
++		if (unlikely(buf_vec[0].buf_len < sizeof(struct virtio_net_hdr))) {
+ 			/*
+ 			 * No luck, the virtio-net header doesn't fit
+ 			 * in a contiguous virtual area.
+@@ -2331,34 +2345,22 @@ copy_desc_to_mbuf(struct virtio_net *dev, struct vhost_virtqueue *vq,
+ 			copy_vnet_hdr_from_desc(&tmp_hdr, buf_vec);
+ 			hdr = &tmp_hdr;
+ 		} else {
+-			hdr = (struct virtio_net_hdr *)((uintptr_t)buf_addr);
++			hdr = (struct virtio_net_hdr *)((uintptr_t)buf_vec[0].buf_addr);
+ 		}
+ 	}
+ 
+-	/*
+-	 * A virtio driver normally uses at least 2 desc buffers
+-	 * for Tx: the first for storing the header, and others
+-	 * for storing the data.
+-	 */
+-	if (unlikely(buf_len < dev->vhost_hlen)) {
+-		buf_offset = dev->vhost_hlen - buf_len;
+-		vec_idx++;
+-		buf_addr = buf_vec[vec_idx].buf_addr;
+-		buf_len = buf_vec[vec_idx].buf_len;
+-		buf_avail  = buf_len - buf_offset;
+-	} else if (buf_len == dev->vhost_hlen) {
+-		if (unlikely(++vec_idx >= nr_vec))
+-			goto out;
+-		buf_addr = buf_vec[vec_idx].buf_addr;
+-		buf_len = buf_vec[vec_idx].buf_len;
++	for (vec_idx = 0; vec_idx < nr_vec; vec_idx++) {
++		if (buf_vec[vec_idx].buf_len > hdr_remain)
++			break;
+ 
+-		buf_offset = 0;
+-		buf_avail = buf_len;
+-	} else {
+-		buf_offset = dev->vhost_hlen;
+-		buf_avail = buf_vec[vec_idx].buf_len - dev->vhost_hlen;
++		hdr_remain -= buf_vec[vec_idx].buf_len;
+ 	}
+ 
++	buf_addr = buf_vec[vec_idx].buf_addr;
++	buf_len = buf_vec[vec_idx].buf_len;
++	buf_offset = hdr_remain;
++	buf_avail = buf_vec[vec_idx].buf_len - hdr_remain;
++
+ 	PRINT_PACKET(dev,
+ 			(uintptr_t)(buf_addr + buf_offset),
+ 			(uint32_t)buf_avail, 0);
+@@ -2551,6 +2553,14 @@ virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
+ 
+ 		update_shadow_used_ring_split(vq, head_idx, 0);
+ 
++		if (unlikely(buf_len <= dev->vhost_hlen)) {
++			dropped += 1;
++			i++;
++			break;
++		}
++
++		buf_len -= dev->vhost_hlen;
++
+ 		err = virtio_dev_pktmbuf_prep(dev, pkts[i], buf_len);
+ 		if (unlikely(err)) {
+ 			/*
+@@ -2754,6 +2764,11 @@ vhost_dequeue_single_packed(struct virtio_net *dev,
+ 					 VHOST_ACCESS_RO) < 0))
+ 		return -1;
+ 
++	if (unlikely(buf_len <= dev->vhost_hlen))
++		return -1;
++
++	buf_len -= dev->vhost_hlen;
++
+ 	if (unlikely(virtio_dev_pktmbuf_prep(dev, pkts, buf_len))) {
+ 		if (!allocerr_warned) {
+ 			VHOST_LOG_DATA(ERR,
+diff --git a/dpdk/meson.build b/dpdk/meson.build
+index 12cb6e0e83..21dc51f00d 100644
+--- a/dpdk/meson.build
++++ b/dpdk/meson.build
+@@ -5,7 +5,7 @@ project('DPDK', 'C',
+         # Get version number from file.
+         # Fallback to "more" for Windows compatibility.
+         version: run_command(find_program('cat', 'more'),
+-            files('VERSION')).stdout().strip(),
++            files('VERSION'), check: true).stdout().strip(),
+         license: 'BSD',
+         default_options: ['buildtype=release', 'default_library=static'],
+         meson_version: '>= 0.49.2'
+@@ -27,6 +27,8 @@ endif
+ 
+ # set up some global vars for compiler, platform, configuration, etc.
+ cc = meson.get_compiler('c')
++dpdk_source_root = meson.current_source_dir()
++dpdk_build_root = meson.current_build_dir()
+ dpdk_conf = configuration_data()
+ dpdk_libraries = []
+ dpdk_static_libraries = []
+diff --git a/include/linux/automake.mk b/include/linux/automake.mk
+index 8f063f482e..f857c7e088 100644
+--- a/include/linux/automake.mk
++++ b/include/linux/automake.mk
+@@ -2,6 +2,7 @@ noinst_HEADERS += \
+ 	include/linux/netlink.h \
+ 	include/linux/netfilter/nf_conntrack_sctp.h \
+ 	include/linux/pkt_cls.h \
++	include/linux/gen_stats.h \
+ 	include/linux/tc_act/tc_mpls.h \
+ 	include/linux/tc_act/tc_pedit.h \
+ 	include/linux/tc_act/tc_skbedit.h \
+diff --git a/include/linux/gen_stats.h b/include/linux/gen_stats.h
+new file mode 100644
+index 0000000000..6fae6f727c
+--- /dev/null
++++ b/include/linux/gen_stats.h
+@@ -0,0 +1,81 @@
++/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
++#ifndef __LINUX_GEN_STATS_WRAPPER_H
++#define __LINUX_GEN_STATS_WRAPPER_H 1
++
++#if defined(__KERNEL__) || defined(HAVE_TCA_STATS_PKT64)
++#include_next <linux/gen_stats.h>
++#else
++#include <linux/types.h>
++
++enum {
++	TCA_STATS_UNSPEC,
++	TCA_STATS_BASIC,
++	TCA_STATS_RATE_EST,
++	TCA_STATS_QUEUE,
++	TCA_STATS_APP,
++	TCA_STATS_RATE_EST64,
++	TCA_STATS_PAD,
++	TCA_STATS_BASIC_HW,
++	TCA_STATS_PKT64,
++	__TCA_STATS_MAX,
++};
++#define TCA_STATS_MAX (__TCA_STATS_MAX - 1)
++
++/**
++ * struct gnet_stats_basic - byte/packet throughput statistics
++ * @bytes: number of seen bytes
++ * @packets: number of seen packets
++ */
++struct gnet_stats_basic {
++	__u64	bytes;
++	__u32	packets;
++};
++
++/**
++ * struct gnet_stats_rate_est - rate estimator
++ * @bps: current byte rate
++ * @pps: current packet rate
++ */
++struct gnet_stats_rate_est {
++	__u32	bps;
++	__u32	pps;
++};
++
++/**
++ * struct gnet_stats_rate_est64 - rate estimator
++ * @bps: current byte rate
++ * @pps: current packet rate
++ */
++struct gnet_stats_rate_est64 {
++	__u64	bps;
++	__u64	pps;
++};
++
++/**
++ * struct gnet_stats_queue - queuing statistics
++ * @qlen: queue length
++ * @backlog: backlog size of queue
++ * @drops: number of dropped packets
++ * @requeues: number of requeues
++ * @overlimits: number of enqueues over the limit
++ */
++struct gnet_stats_queue {
++	__u32	qlen;
++	__u32	backlog;
++	__u32	drops;
++	__u32	requeues;
++	__u32	overlimits;
++};
++
++/**
++ * struct gnet_estimator - rate estimator configuration
++ * @interval: sampling period
++ * @ewma_log: the log of measurement window weight
++ */
++struct gnet_estimator {
++	signed char	interval;
++	unsigned char	ewma_log;
++};
++
++#endif /* __KERNEL__ || !HAVE_TCA_STATS_PKT64 */
++#endif /* __LINUX_GEN_STATS_WRAPPER_H */
+diff --git a/include/openvswitch/dynamic-string.h b/include/openvswitch/dynamic-string.h
+index ee18217107..1c262b0494 100644
+--- a/include/openvswitch/dynamic-string.h
++++ b/include/openvswitch/dynamic-string.h
+@@ -61,6 +61,8 @@ void ds_put_printable(struct ds *, const char *, size_t);
+ void ds_put_hex(struct ds *ds, const void *buf, size_t size);
+ void ds_put_hex_dump(struct ds *ds, const void *buf_, size_t size,
+                      uintptr_t ofs, bool ascii);
++void ds_put_sparse_hex_dump(struct ds *ds, const void *buf_, size_t size,
++                            uintptr_t ofs, bool ascii);
+ int ds_get_line(struct ds *, FILE *);
+ int ds_get_preprocessed_line(struct ds *, FILE *, int *line_number);
+ int ds_get_test_line(struct ds *, FILE *);
+diff --git a/include/openvswitch/flow.h b/include/openvswitch/flow.h
+index 3054015d93..df10cf579e 100644
+--- a/include/openvswitch/flow.h
++++ b/include/openvswitch/flow.h
+@@ -141,15 +141,14 @@ struct flow {
+     uint8_t nw_tos;             /* IP ToS (including DSCP and ECN). */
+     uint8_t nw_ttl;             /* IP TTL/Hop Limit. */
+     uint8_t nw_proto;           /* IP protocol or low 8 bits of ARP opcode. */
++    /* L4 (64-bit aligned) */
+     struct in6_addr nd_target;  /* IPv6 neighbor discovery (ND) target. */
+     struct eth_addr arp_sha;    /* ARP/ND source hardware address. */
+     struct eth_addr arp_tha;    /* ARP/ND target hardware address. */
+-    ovs_be16 tcp_flags;         /* TCP flags/ICMPv6 ND options type.
+-                                 * With L3 to avoid matching L4. */
++    ovs_be16 tcp_flags;         /* TCP flags/ICMPv6 ND options type. */
+     ovs_be16 pad2;              /* Pad to 64 bits. */
+     struct ovs_key_nsh nsh;     /* Network Service Header keys */
+ 
+-    /* L4 (64-bit aligned) */
+     ovs_be16 tp_src;            /* TCP/UDP/SCTP source port/ICMP type. */
+     ovs_be16 tp_dst;            /* TCP/UDP/SCTP destination port/ICMP code. */
+     ovs_be16 ct_tp_src;         /* CT original tuple source port/ICMP type. */
+@@ -179,7 +178,7 @@ BUILD_ASSERT_DECL(offsetof(struct flow, igmp_group_ip4) + sizeof(uint32_t)
+ enum {
+     FLOW_SEGMENT_1_ENDS_AT = offsetof(struct flow, dl_dst),
+     FLOW_SEGMENT_2_ENDS_AT = offsetof(struct flow, nw_src),
+-    FLOW_SEGMENT_3_ENDS_AT = offsetof(struct flow, tp_src),
++    FLOW_SEGMENT_3_ENDS_AT = offsetof(struct flow, nd_target),
+ };
+ BUILD_ASSERT_DECL(FLOW_SEGMENT_1_ENDS_AT % sizeof(uint64_t) == 0);
+ BUILD_ASSERT_DECL(FLOW_SEGMENT_2_ENDS_AT % sizeof(uint64_t) == 0);
+diff --git a/include/openvswitch/hmap.h b/include/openvswitch/hmap.h
+index 4e001cc692..beb48295b9 100644
+--- a/include/openvswitch/hmap.h
++++ b/include/openvswitch/hmap.h
+@@ -134,17 +134,17 @@ struct hmap_node *hmap_random_node(const struct hmap *);
+  * without using 'break', NODE will be NULL.  This is true for all of the
+  * HMAP_FOR_EACH_*() macros.
+  */
+-#define HMAP_FOR_EACH_WITH_HASH(NODE, MEMBER, HASH, HMAP)               \
+-    for (INIT_CONTAINER(NODE, hmap_first_with_hash(HMAP, HASH), MEMBER); \
+-         (NODE != OBJECT_CONTAINING(NULL, NODE, MEMBER))                \
+-         || ((NODE = NULL), false);                                     \
+-         ASSIGN_CONTAINER(NODE, hmap_next_with_hash(&(NODE)->MEMBER),   \
+-                          MEMBER))
+-#define HMAP_FOR_EACH_IN_BUCKET(NODE, MEMBER, HASH, HMAP)               \
+-    for (INIT_CONTAINER(NODE, hmap_first_in_bucket(HMAP, HASH), MEMBER); \
+-         (NODE != OBJECT_CONTAINING(NULL, NODE, MEMBER))                \
+-         || ((NODE = NULL), false);                                     \
+-         ASSIGN_CONTAINER(NODE, hmap_next_in_bucket(&(NODE)->MEMBER), MEMBER))
++#define HMAP_FOR_EACH_WITH_HASH(NODE, MEMBER, HASH, HMAP)                     \
++    for (INIT_MULTIVAR(NODE, MEMBER, hmap_first_with_hash(HMAP, HASH),        \
++                       struct hmap_node);                                     \
++         CONDITION_MULTIVAR(NODE, MEMBER, ITER_VAR(NODE) != NULL);            \
++         UPDATE_MULTIVAR(NODE, hmap_next_with_hash(ITER_VAR(NODE))))
++
++#define HMAP_FOR_EACH_IN_BUCKET(NODE, MEMBER, HASH, HMAP)                     \
++    for (INIT_MULTIVAR(NODE, MEMBER, hmap_first_in_bucket(HMAP, HASH),        \
++                       struct hmap_node);                                     \
++         CONDITION_MULTIVAR(NODE, MEMBER, ITER_VAR(NODE) != NULL);            \
++         UPDATE_MULTIVAR(NODE, hmap_next_in_bucket(ITER_VAR(NODE))))
+ 
+ static inline struct hmap_node *hmap_first_with_hash(const struct hmap *,
+                                                      size_t hash);
+@@ -170,54 +170,80 @@ bool hmap_contains(const struct hmap *, const struct hmap_node *);
+ /* Iterates through every node in HMAP. */
+ #define HMAP_FOR_EACH(NODE, MEMBER, HMAP) \
+     HMAP_FOR_EACH_INIT(NODE, MEMBER, HMAP, (void) 0)
+-#define HMAP_FOR_EACH_INIT(NODE, MEMBER, HMAP, ...)                     \
+-    for (INIT_CONTAINER(NODE, hmap_first(HMAP), MEMBER), __VA_ARGS__;   \
+-         (NODE != OBJECT_CONTAINING(NULL, NODE, MEMBER))                \
+-         || ((NODE = NULL), false);                                     \
+-         ASSIGN_CONTAINER(NODE, hmap_next(HMAP, &(NODE)->MEMBER), MEMBER))
++#define HMAP_FOR_EACH_INIT(NODE, MEMBER, HMAP, ...)                           \
++    for (INIT_MULTIVAR_EXP(NODE, MEMBER, hmap_first(HMAP), struct hmap_node,  \
++                           __VA_ARGS__);                                      \
++         CONDITION_MULTIVAR(NODE, MEMBER, ITER_VAR(NODE) != NULL);            \
++         UPDATE_MULTIVAR(NODE, hmap_next(HMAP, ITER_VAR(NODE))))
+ 
+ /* Safe when NODE may be freed (not needed when NODE may be removed from the
+  * hash map but its members remain accessible and intact). */
+-#define HMAP_FOR_EACH_SAFE(NODE, NEXT, MEMBER, HMAP) \
+-    HMAP_FOR_EACH_SAFE_INIT(NODE, NEXT, MEMBER, HMAP, (void) 0)
+-#define HMAP_FOR_EACH_SAFE_INIT(NODE, NEXT, MEMBER, HMAP, ...)          \
+-    for (INIT_CONTAINER(NODE, hmap_first(HMAP), MEMBER), __VA_ARGS__;   \
+-         ((NODE != OBJECT_CONTAINING(NULL, NODE, MEMBER))               \
+-          || ((NODE = NULL), false)                                     \
+-          ? INIT_CONTAINER(NEXT, hmap_next(HMAP, &(NODE)->MEMBER), MEMBER), 1 \
+-          : 0);                                                         \
+-         (NODE) = (NEXT))
++#define HMAP_FOR_EACH_SAFE_LONG(NODE, NEXT, MEMBER, HMAP) \
++    HMAP_FOR_EACH_SAFE_LONG_INIT (NODE, NEXT, MEMBER, HMAP, (void) NEXT)
++
++#define HMAP_FOR_EACH_SAFE_LONG_INIT(NODE, NEXT, MEMBER, HMAP, ...)           \
++    for (INIT_MULTIVAR_SAFE_LONG_EXP(NODE, NEXT, MEMBER, hmap_first(HMAP),    \
++                                     struct hmap_node, __VA_ARGS__);          \
++         CONDITION_MULTIVAR_SAFE_LONG(NODE, NEXT, MEMBER,                     \
++                                      ITER_VAR(NODE) != NULL,                 \
++                            ITER_VAR(NEXT) = hmap_next(HMAP, ITER_VAR(NODE)), \
++                                      ITER_VAR(NEXT) != NULL);                \
++         UPDATE_MULTIVAR_SAFE_LONG(NODE, NEXT))
++
++/* Short versions of HMAP_FOR_EACH_SAFE. */
++#define HMAP_FOR_EACH_SAFE_SHORT(NODE, MEMBER, HMAP)                          \
++    HMAP_FOR_EACH_SAFE_SHORT_INIT (NODE, MEMBER, HMAP, (void) 0)
++
++#define HMAP_FOR_EACH_SAFE_SHORT_INIT(NODE, MEMBER, HMAP, ...)                \
++    for (INIT_MULTIVAR_SAFE_SHORT_EXP(NODE, MEMBER, hmap_first(HMAP),         \
++                                      struct hmap_node, __VA_ARGS__);         \
++         CONDITION_MULTIVAR_SAFE_SHORT(NODE, MEMBER,                          \
++                                       ITER_VAR(NODE) != NULL,                \
++                      ITER_NEXT_VAR(NODE) = hmap_next(HMAP, ITER_VAR(NODE))); \
++         UPDATE_MULTIVAR_SAFE_SHORT(NODE))
++
++#define HMAP_FOR_EACH_SAFE(...)                                               \
++    OVERLOAD_SAFE_MACRO(HMAP_FOR_EACH_SAFE_LONG,                              \
++                        HMAP_FOR_EACH_SAFE_SHORT,                             \
++                        4, __VA_ARGS__)
++
+ 
+ /* Continues an iteration from just after NODE. */
+ #define HMAP_FOR_EACH_CONTINUE(NODE, MEMBER, HMAP) \
+     HMAP_FOR_EACH_CONTINUE_INIT(NODE, MEMBER, HMAP, (void) 0)
+-#define HMAP_FOR_EACH_CONTINUE_INIT(NODE, MEMBER, HMAP, ...)            \
+-    for (ASSIGN_CONTAINER(NODE, hmap_next(HMAP, &(NODE)->MEMBER), MEMBER), \
+-         __VA_ARGS__;                                                   \
+-         (NODE != OBJECT_CONTAINING(NULL, NODE, MEMBER))                \
+-         || ((NODE = NULL), false);                                     \
+-         ASSIGN_CONTAINER(NODE, hmap_next(HMAP, &(NODE)->MEMBER), MEMBER))
++#define HMAP_FOR_EACH_CONTINUE_INIT(NODE, MEMBER, HMAP, ...)                  \
++    for (INIT_MULTIVAR_EXP(NODE, MEMBER, hmap_next(HMAP, &(NODE)->MEMBER),    \
++                           struct hmap_node, __VA_ARGS__);                    \
++         CONDITION_MULTIVAR(NODE, MEMBER, ITER_VAR(NODE) != NULL);            \
++         UPDATE_MULTIVAR(NODE, hmap_next(HMAP, ITER_VAR(NODE))))
++
++struct hmap_pop_helper_iter__ {
++    size_t bucket;
++    struct hmap_node *node;
++};
+ 
+-static inline struct hmap_node *
+-hmap_pop_helper__(struct hmap *hmap, size_t *bucket) {
++static inline void
++hmap_pop_helper__(struct hmap *hmap, struct hmap_pop_helper_iter__ *iter) {
+ 
+-    for (; *bucket <= hmap->mask; (*bucket)++) {
+-        struct hmap_node *node = hmap->buckets[*bucket];
++    for (; iter->bucket <= hmap->mask; (iter->bucket)++) {
++        struct hmap_node *node = hmap->buckets[iter->bucket];
+ 
+         if (node) {
+             hmap_remove(hmap, node);
+-            return node;
++            iter->node = node;
++            return;
+         }
+     }
+-
+-    return NULL;
++    iter->node = NULL;
+ }
+ 
+-#define HMAP_FOR_EACH_POP(NODE, MEMBER, HMAP)                               \
+-    for (size_t bucket__ = 0;                                               \
+-         INIT_CONTAINER(NODE, hmap_pop_helper__(HMAP, &bucket__), MEMBER),  \
+-         (NODE != OBJECT_CONTAINING(NULL, NODE, MEMBER))                    \
+-         || ((NODE = NULL), false);)
++#define HMAP_FOR_EACH_POP(NODE, MEMBER, HMAP)                                 \
++    for (struct hmap_pop_helper_iter__ ITER_VAR(NODE) = { 0, NULL };          \
++         hmap_pop_helper__(HMAP, &ITER_VAR(NODE)),                            \
++         (ITER_VAR(NODE).node != NULL) ?                                      \
++            (((NODE) = OBJECT_CONTAINING(ITER_VAR(NODE).node,                 \
++                                         NODE, MEMBER)),1):                   \
++            (((NODE) = NULL), 0);)
+ 
+ static inline struct hmap_node *hmap_first(const struct hmap *);
+ static inline struct hmap_node *hmap_next(const struct hmap *,
+diff --git a/include/openvswitch/list.h b/include/openvswitch/list.h
+index 8ad5eeb327..6272d340cf 100644
+--- a/include/openvswitch/list.h
++++ b/include/openvswitch/list.h
+@@ -72,37 +72,74 @@ static inline bool ovs_list_is_empty(const struct ovs_list *);
+ static inline bool ovs_list_is_singleton(const struct ovs_list *);
+ static inline bool ovs_list_is_short(const struct ovs_list *);
+ 
+-#define LIST_FOR_EACH(ITER, MEMBER, LIST)                               \
+-    for (INIT_CONTAINER(ITER, (LIST)->next, MEMBER);                    \
+-         &(ITER)->MEMBER != (LIST);                                     \
+-         ASSIGN_CONTAINER(ITER, (ITER)->MEMBER.next, MEMBER))
+-#define LIST_FOR_EACH_CONTINUE(ITER, MEMBER, LIST)                      \
+-    for (ASSIGN_CONTAINER(ITER, (ITER)->MEMBER.next, MEMBER);             \
+-         &(ITER)->MEMBER != (LIST);                                     \
+-         ASSIGN_CONTAINER(ITER, (ITER)->MEMBER.next, MEMBER))
+-#define LIST_FOR_EACH_REVERSE(ITER, MEMBER, LIST)                       \
+-    for (INIT_CONTAINER(ITER, (LIST)->prev, MEMBER);                    \
+-         &(ITER)->MEMBER != (LIST);                                     \
+-         ASSIGN_CONTAINER(ITER, (ITER)->MEMBER.prev, MEMBER))
+-#define LIST_FOR_EACH_REVERSE_SAFE(ITER, PREV, MEMBER, LIST)        \
+-    for (INIT_CONTAINER(ITER, (LIST)->prev, MEMBER);                \
+-         (&(ITER)->MEMBER != (LIST)                                 \
+-          ? INIT_CONTAINER(PREV, (ITER)->MEMBER.prev, MEMBER), 1    \
+-          : 0);                                                     \
+-         (ITER) = (PREV))
+-#define LIST_FOR_EACH_REVERSE_CONTINUE(ITER, MEMBER, LIST)              \
+-    for (ASSIGN_CONTAINER(ITER, (ITER)->MEMBER.prev, MEMBER);           \
+-         &(ITER)->MEMBER != (LIST);                                     \
+-         ASSIGN_CONTAINER(ITER, (ITER)->MEMBER.prev, MEMBER))
+-#define LIST_FOR_EACH_SAFE(ITER, NEXT, MEMBER, LIST)               \
+-    for (INIT_CONTAINER(ITER, (LIST)->next, MEMBER);               \
+-         (&(ITER)->MEMBER != (LIST)                                \
+-          ? INIT_CONTAINER(NEXT, (ITER)->MEMBER.next, MEMBER), 1   \
+-          : 0);                                                    \
+-         (ITER) = (NEXT))
+-#define LIST_FOR_EACH_POP(ITER, MEMBER, LIST)                      \
+-    while (!ovs_list_is_empty(LIST)                                    \
+-           && (INIT_CONTAINER(ITER, ovs_list_pop_front(LIST), MEMBER), 1))
++#define LIST_FOR_EACH(VAR, MEMBER, LIST)                                      \
++    for (INIT_MULTIVAR(VAR, MEMBER, (LIST)->next, struct ovs_list);           \
++         CONDITION_MULTIVAR(VAR, MEMBER, ITER_VAR(VAR) != (LIST));            \
++         UPDATE_MULTIVAR(VAR, ITER_VAR(VAR)->next))
++
++#define LIST_FOR_EACH_CONTINUE(VAR, MEMBER, LIST)                             \
++    for (INIT_MULTIVAR(VAR, MEMBER, VAR->MEMBER.next, struct ovs_list);       \
++         CONDITION_MULTIVAR(VAR, MEMBER, ITER_VAR(VAR) != (LIST));            \
++         UPDATE_MULTIVAR(VAR, ITER_VAR(VAR)->next))
++
++#define LIST_FOR_EACH_REVERSE(VAR, MEMBER, LIST)                              \
++    for (INIT_MULTIVAR(VAR, MEMBER, (LIST)->prev, struct ovs_list);           \
++         CONDITION_MULTIVAR(VAR, MEMBER, ITER_VAR(VAR) != (LIST));            \
++         UPDATE_MULTIVAR(VAR, ITER_VAR(VAR)->prev))
++
++#define LIST_FOR_EACH_REVERSE_CONTINUE(VAR, MEMBER, LIST)                     \
++    for (INIT_MULTIVAR(VAR, MEMBER, VAR->MEMBER.prev, struct ovs_list);       \
++         CONDITION_MULTIVAR(VAR, MEMBER, ITER_VAR(VAR) != (LIST));            \
++         UPDATE_MULTIVAR(VAR, ITER_VAR(VAR)->prev))
++
++/* LONG version of SAFE iterators. */
++#define LIST_FOR_EACH_REVERSE_SAFE_LONG(VAR, PREV, MEMBER, LIST)              \
++    for (INIT_MULTIVAR_SAFE_LONG(VAR, PREV, MEMBER, (LIST)->prev,             \
++                                 struct ovs_list);                            \
++         CONDITION_MULTIVAR_SAFE_LONG(VAR, PREV, MEMBER,                      \
++                                      ITER_VAR(VAR) != (LIST),                \
++                                      ITER_VAR(PREV) = ITER_VAR(VAR)->prev,   \
++                                      ITER_VAR(PREV) != (LIST));              \
++         UPDATE_MULTIVAR_SAFE_LONG(VAR, PREV))
++
++#define LIST_FOR_EACH_SAFE_LONG(VAR, NEXT, MEMBER, LIST)                      \
++    for (INIT_MULTIVAR_SAFE_LONG(VAR, NEXT, MEMBER, (LIST)->next,             \
++                                 struct ovs_list);                            \
++         CONDITION_MULTIVAR_SAFE_LONG(VAR, NEXT, MEMBER,                      \
++                                      ITER_VAR(VAR) != (LIST),                \
++                                      ITER_VAR(NEXT) = ITER_VAR(VAR)->next,   \
++                                      ITER_VAR(NEXT) != (LIST));              \
++         UPDATE_MULTIVAR_SAFE_LONG(VAR, NEXT))
++
++/* SHORT version of SAFE iterators. */
++#define LIST_FOR_EACH_REVERSE_SAFE_SHORT(VAR, MEMBER, LIST)                   \
++    for (INIT_MULTIVAR_SAFE_SHORT(VAR, MEMBER, (LIST)->prev, struct ovs_list);\
++         CONDITION_MULTIVAR_SAFE_SHORT(VAR, MEMBER,                           \
++                                       ITER_VAR(VAR) != (LIST),               \
++                                 ITER_NEXT_VAR(VAR) = ITER_VAR(VAR)->prev);   \
++         UPDATE_MULTIVAR_SAFE_SHORT(VAR))
++
++#define LIST_FOR_EACH_SAFE_SHORT(VAR, MEMBER, LIST)                           \
++    for (INIT_MULTIVAR_SAFE_SHORT(VAR, MEMBER, (LIST)->next, struct ovs_list);\
++         CONDITION_MULTIVAR_SAFE_SHORT(VAR, MEMBER,                           \
++                                       ITER_VAR(VAR) != (LIST),               \
++                                 ITER_NEXT_VAR(VAR) = ITER_VAR(VAR)->next);   \
++         UPDATE_MULTIVAR_SAFE_SHORT(VAR))
++
++#define LIST_FOR_EACH_SAFE(...)                      \
++    OVERLOAD_SAFE_MACRO(LIST_FOR_EACH_SAFE_LONG,     \
++                        LIST_FOR_EACH_SAFE_SHORT,    \
++                        4, __VA_ARGS__)
++
++#define LIST_FOR_EACH_REVERSE_SAFE(...)                        \
++    OVERLOAD_SAFE_MACRO(LIST_FOR_EACH_REVERSE_SAFE_LONG,       \
++                        LIST_FOR_EACH_REVERSE_SAFE_SHORT,      \
++                        4, __VA_ARGS__)
++
++#define LIST_FOR_EACH_POP(ITER, MEMBER, LIST)                                 \
++    while (!ovs_list_is_empty(LIST) ?                                         \
++           (INIT_CONTAINER(ITER, ovs_list_pop_front(LIST), MEMBER), 1) :      \
++           (ITER = NULL, 0))
+ 
+ /* Inline implementations. */
+ 
+diff --git a/include/openvswitch/ofp-actions.h b/include/openvswitch/ofp-actions.h
+index 41bcb55d20..b7231c7bb3 100644
+--- a/include/openvswitch/ofp-actions.h
++++ b/include/openvswitch/ofp-actions.h
+@@ -218,7 +218,9 @@ struct ofpact *ofpact_next_flattened(const struct ofpact *);
+ static inline struct ofpact *
+ ofpact_end(const struct ofpact *ofpacts, size_t ofpacts_len)
+ {
+-    return ALIGNED_CAST(struct ofpact *, (uint8_t *) ofpacts + ofpacts_len);
++    return ofpacts
++           ? ALIGNED_CAST(struct ofpact *, (uint8_t *) ofpacts + ofpacts_len)
++           : NULL;
+ }
+ 
+ static inline bool
+diff --git a/include/openvswitch/ofpbuf.h b/include/openvswitch/ofpbuf.h
+index 1136ba04c8..32f03ea837 100644
+--- a/include/openvswitch/ofpbuf.h
++++ b/include/openvswitch/ofpbuf.h
+@@ -179,7 +179,11 @@ static inline void ofpbuf_delete(struct ofpbuf *b)
+ static inline void *ofpbuf_at(const struct ofpbuf *b, size_t offset,
+                               size_t size)
+ {
+-    return offset + size <= b->size ? (char *) b->data + offset : NULL;
++    if (offset + size <= b->size) {
++        ovs_assert(b->data);
++        return (char *) b->data + offset;
++    }
++    return NULL;
+ }
+ 
+ /* Returns a pointer to byte 'offset' in 'b', which must contain at least
+@@ -188,20 +192,23 @@ static inline void *ofpbuf_at_assert(const struct ofpbuf *b, size_t offset,
+                                      size_t size)
+ {
+     ovs_assert(offset + size <= b->size);
+-    return ((char *) b->data) + offset;
++    ovs_assert(b->data);
++    return (char *) b->data + offset;
+ }
+ 
+ /* Returns a pointer to byte following the last byte of data in use in 'b'. */
+ static inline void *ofpbuf_tail(const struct ofpbuf *b)
+ {
+-    return (char *) b->data + b->size;
++    ovs_assert(b->data || !b->size);
++    return b->data ? (char *) b->data + b->size : NULL;
+ }
+ 
+ /* Returns a pointer to byte following the last byte allocated for use (but
+  * not necessarily in use) in 'b'. */
+ static inline void *ofpbuf_end(const struct ofpbuf *b)
+ {
+-    return (char *) b->base + b->allocated;
++    ovs_assert(b->base || !b->allocated);
++    return b->base ? (char *) b->base + b->allocated : NULL;
+ }
+ 
+ /* Returns the number of bytes of headroom in 'b', that is, the number of bytes
+@@ -249,6 +256,11 @@ static inline void *ofpbuf_pull(struct ofpbuf *b, size_t size)
+ {
+     ovs_assert(b->size >= size);
+     void *data = b->data;
++
++    if (!size) {
++        return data;
++    }
++
+     b->data = (char*)b->data + size;
+     b->size = b->size - size;
+     return data;
+@@ -270,7 +282,7 @@ static inline struct ofpbuf *ofpbuf_from_list(const struct ovs_list *list)
+ static inline bool ofpbuf_equal(const struct ofpbuf *a, const struct ofpbuf *b)
+ {
+     return a->size == b->size &&
+-           memcmp(a->data, b->data, a->size) == 0;
++           (a->size == 0 || memcmp(a->data, b->data, a->size) == 0);
+ }
+ 
+ static inline bool ofpbuf_oversized(const struct ofpbuf *ofpacts)
+diff --git a/include/openvswitch/shash.h b/include/openvswitch/shash.h
+index c249e13e1f..4e7badd4dc 100644
+--- a/include/openvswitch/shash.h
++++ b/include/openvswitch/shash.h
+@@ -41,13 +41,24 @@ struct shash {
+                         BUILD_ASSERT_TYPE(SHASH_NODE, struct shash_node *), \
+                         BUILD_ASSERT_TYPE(SHASH, struct shash *))
+ 
+-#define SHASH_FOR_EACH_SAFE(SHASH_NODE, NEXT, SHASH)        \
+-    HMAP_FOR_EACH_SAFE_INIT (                               \
++#define SHASH_FOR_EACH_SAFE_SHORT(SHASH_NODE, SHASH)        \
++    HMAP_FOR_EACH_SAFE_SHORT_INIT (                         \
++        SHASH_NODE, node, &(SHASH)->map,                    \
++        BUILD_ASSERT_TYPE(SHASH_NODE, struct shash_node *), \
++        BUILD_ASSERT_TYPE(SHASH, struct shash *))
++
++#define SHASH_FOR_EACH_SAFE_LONG(SHASH_NODE, NEXT, SHASH)   \
++    HMAP_FOR_EACH_SAFE_LONG_INIT (                          \
+         SHASH_NODE, NEXT, node, &(SHASH)->map,              \
+         BUILD_ASSERT_TYPE(SHASH_NODE, struct shash_node *), \
+         BUILD_ASSERT_TYPE(NEXT, struct shash_node *),       \
+         BUILD_ASSERT_TYPE(SHASH, struct shash *))
+ 
++#define SHASH_FOR_EACH_SAFE(...)                                              \
++    OVERLOAD_SAFE_MACRO(SHASH_FOR_EACH_SAFE_LONG,                             \
++                        SHASH_FOR_EACH_SAFE_SHORT,                            \
++                        3, __VA_ARGS__)
++
+ void shash_init(struct shash *);
+ void shash_destroy(struct shash *);
+ void shash_destroy_free_data(struct shash *);
+diff --git a/include/openvswitch/util.h b/include/openvswitch/util.h
+index 228b185c3a..8e6c46a85f 100644
+--- a/include/openvswitch/util.h
++++ b/include/openvswitch/util.h
+@@ -145,6 +145,150 @@ OVS_NO_RETURN void ovs_assert_failure(const char *, const char *, const char *);
+ #define INIT_CONTAINER(OBJECT, POINTER, MEMBER) \
+     ((OBJECT) = NULL, ASSIGN_CONTAINER(OBJECT, POINTER, MEMBER))
+ 
++/* Multi-variable container iterators.
++ *
++ * The following macros facilitate safe iteration over data structures
++ * contained in objects. It does so by using an internal iterator variable of
++ * the type of the member object pointer (i.e: pointer to the data structure).
++ */
++
++/* Multi-variable iterator variable name.
++ * Returns the name of the internal iterator variable.
++ */
++#define ITER_VAR(NAME) NAME ## __iterator__
++
++/* Multi-variable initialization. Creates an internal iterator variable that
++ * points to the provided pointer. The type of the iterator variable is
++ * ITER_TYPE*. It must be the same type as &VAR->MEMBER.
++ *
++ * The _EXP version evaluates the extra expressions once.
++ */
++#define INIT_MULTIVAR(VAR, MEMBER, POINTER, ITER_TYPE)                  \
++    INIT_MULTIVAR_EXP(VAR, MEMBER, POINTER, ITER_TYPE, (void) 0)
++
++#define INIT_MULTIVAR_EXP(VAR, MEMBER, POINTER, ITER_TYPE, ...)         \
++    ITER_TYPE *ITER_VAR(VAR) = ( __VA_ARGS__ , (ITER_TYPE *) POINTER)
++
++/* Multi-variable condition.
++ * Evaluates the condition expression (that must be based on the internal
++ * iterator variable). Only if the result of expression is true, the OBJECT is
++ * set to the object containing the current value of the iterator variable.
++ *
++ * It is up to the caller to make sure it is safe to run OBJECT_CONTAINING on
++ * the pointers that verify the condition.
++ */
++#define CONDITION_MULTIVAR(VAR, MEMBER, EXPR)                                 \
++    ((EXPR) ?                                                                 \
++     (((VAR) = OBJECT_CONTAINING(ITER_VAR(VAR), VAR, MEMBER)), 1) :           \
++     (((VAR) = NULL), 0))
++
++/* Multi-variable update.
++ * Sets the iterator value to NEXT_ITER.
++ */
++#define UPDATE_MULTIVAR(VAR, NEXT_ITER)                                       \
++    (ITER_VAR(VAR) = NEXT_ITER)
++
++/* In the safe version of the multi-variable container iteration, the next
++ * value of the iterator is precalculated on the condition expression.
++ * This allows for the iterator to be freed inside the loop.
++ *
++ * Two versions of the macros are provided:
++ *
++ * * In the _SHORT version, the user does not have to provide a variable to
++ * store the next value of the iterator. Instead, a second iterator variable
++ * is declared in the INIT_ macro and its name is determined by
++ * ITER_NEXT_VAR(OBJECT).
++ *
++ * * In the _LONG version, the user provides another variable of the same type
++ * as the iterator object variable to store the next containing object.
++ * We still declare an iterator variable inside the loop but in this case it's
++ * name is derived from the name of the next containing variable.
++ * The value of the next containing object will only be set
++ * (via OBJECT_CONTAINING) if an additional condition is statisfied. This
++ * second condition must ensure it is safe to call OBJECT_CONTAINING on the
++ * next iterator variable.
++ * With respect to the value of the next containing object:
++ *  - Inside of the loop: the variable is either NULL or safe to use.
++ *  - Outside of the loop: the variable is NULL if the loop ends normally.
++ *     If the loop ends with a "break;" statement, rules of Inside the loop
++ *     apply.
++ */
++#define ITER_NEXT_VAR(NAME) NAME ## __iterator__next__
++
++/* Safe initialization declares both iterators. */
++#define INIT_MULTIVAR_SAFE_SHORT(VAR, MEMBER, POINTER, ITER_TYPE)             \
++    INIT_MULTIVAR_SAFE_SHORT_EXP(VAR, MEMBER, POINTER, ITER_TYPE, (void) 0)
++
++#define INIT_MULTIVAR_SAFE_SHORT_EXP(VAR, MEMBER, POINTER, ITER_TYPE, ...)    \
++    ITER_TYPE *ITER_VAR(VAR) = ( __VA_ARGS__ , (ITER_TYPE *) POINTER),        \
++        *ITER_NEXT_VAR(VAR) = NULL
++
++/* Evaluate the condition expression and, if satisfied, update the _next_
++ * iterator with the NEXT_EXPR.
++ * Both EXPR and NEXT_EXPR should only use ITER_VAR(VAR) and
++ * ITER_NEXT_VAR(VAR).
++ */
++#define CONDITION_MULTIVAR_SAFE_SHORT(VAR, MEMBER, EXPR, NEXT_EXPR)           \
++    ((EXPR) ?                                                                 \
++     (((VAR) = OBJECT_CONTAINING(ITER_VAR(VAR), VAR, MEMBER)),                \
++      (NEXT_EXPR), 1) :                                                       \
++     (((VAR) = NULL), 0))
++
++#define UPDATE_MULTIVAR_SAFE_SHORT(VAR)                                       \
++    UPDATE_MULTIVAR(VAR, ITER_NEXT_VAR(VAR))
++
++/* _LONG versions of the macros. */
++
++#define INIT_MULTIVAR_SAFE_LONG(VAR, NEXT_VAR, MEMBER, POINTER, ITER_TYPE)    \
++    INIT_MULTIVAR_SAFE_LONG_EXP(VAR, NEXT_VAR, MEMBER, POINTER, ITER_TYPE,    \
++                                (void) 0)                                     \
++
++#define INIT_MULTIVAR_SAFE_LONG_EXP(VAR, NEXT_VAR, MEMBER, POINTER,           \
++                                    ITER_TYPE, ...)                           \
++    ITER_TYPE  *ITER_VAR(VAR) = ( __VA_ARGS__ , (ITER_TYPE *) POINTER),       \
++        *ITER_VAR(NEXT_VAR) = NULL
++
++/* Evaluate the condition expression and, if satisfied, update the _next_
++ * iterator with the NEXT_EXPR. After, evaluate the NEXT_COND and, if
++ * satisfied, set the value to NEXT_VAR. NEXT_COND must use ITER_VAR(NEXT_VAR).
++ *
++ * Both EXPR and NEXT_EXPR should only use ITER_VAR(VAR) and
++ * ITER_VAR(NEXT_VAR).
++ */
++#define CONDITION_MULTIVAR_SAFE_LONG(VAR, NEXT_VAR, MEMBER, EXPR, NEXT_EXPR,  \
++                                     NEXT_COND)                               \
++    ((EXPR) ?                                                                 \
++     (((VAR) = OBJECT_CONTAINING(ITER_VAR(VAR), VAR, MEMBER)),                \
++      (NEXT_EXPR), ((NEXT_COND) ?                                             \
++       ((NEXT_VAR) =                                                          \
++        OBJECT_CONTAINING(ITER_VAR(NEXT_VAR), NEXT_VAR, MEMBER)) :            \
++       ((NEXT_VAR) = NULL)), 1) :                                             \
++     (((VAR) = NULL), ((NEXT_VAR) = NULL), 0))
++
++#define UPDATE_MULTIVAR_SAFE_LONG(VAR, NEXT_VAR)                              \
++    UPDATE_MULTIVAR(VAR, ITER_VAR(NEXT_VAR))
++
++/* Helpers to allow overloading the *_SAFE iterator macros and select either
++ * the LONG or the SHORT version depending on the number of arguments.
++ */
++#define GET_SAFE_MACRO2(_1, _2, NAME, ...) NAME
++#define GET_SAFE_MACRO3(_1, _2, _3, NAME, ...) NAME
++#define GET_SAFE_MACRO4(_1, _2, _3, _4, NAME, ...) NAME
++#define GET_SAFE_MACRO5(_1, _2, _3, _4, _5, NAME, ...) NAME
++#define GET_SAFE_MACRO6(_1, _2, _3, _4, _5, _6, NAME, ...) NAME
++#define GET_SAFE_MACRO(MAX_ARGS) GET_SAFE_MACRO ## MAX_ARGS
++
++/* MSVC treats __VA_ARGS__ as a simple token in argument lists. Introduce
++ * a level of indirection to work around that. */
++#define EXPAND_MACRO(name, args) name args
++
++/* Overload the LONG and the SHORT version of the macros. MAX_ARGS is the
++ * maximum number of arguments (i.e: the number of arguments of the LONG
++ * version). */
++#define OVERLOAD_SAFE_MACRO(LONG, SHORT, MAX_ARGS, ...) \
++        EXPAND_MACRO(GET_SAFE_MACRO(MAX_ARGS), \
++                     (__VA_ARGS__, LONG, SHORT))(__VA_ARGS__)
++
+ /* Returns the number of elements in ARRAY. */
+ #define ARRAY_SIZE(ARRAY) __ARRAY_SIZE(ARRAY)
+ 
+@@ -285,6 +429,9 @@ is_pow2(uintmax_t x)
   * segfault, so it is important to be aware of correct alignment. */
  #define ALIGNED_CAST(TYPE, ATTR) ((TYPE) (void *) (ATTR))
  
@@ -1219,6 +50609,33 @@ index c502d23112..72e2ec5f71 100644
  
  #define CMAP_CURSOR_FOR_EACH(NODE, MEMBER, CURSOR, CMAP)    \
      for (*(CURSOR) = cmap_cursor_start(CMAP);               \
+diff --git a/lib/conntrack-other.c b/lib/conntrack-other.c
+index d3b4601858..7f3e63c384 100644
+--- a/lib/conntrack-other.c
++++ b/lib/conntrack-other.c
+@@ -48,18 +48,19 @@ other_conn_update(struct conntrack *ct, struct conn *conn_,
+                   struct dp_packet *pkt OVS_UNUSED, bool reply, long long now)
+ {
+     struct conn_other *conn = conn_other_cast(conn_);
+-    enum ct_update_res ret = CT_UPDATE_VALID;
+ 
+     if (reply && conn->state != OTHERS_BIDIR) {
+         conn->state = OTHERS_BIDIR;
+     } else if (conn->state == OTHERS_FIRST) {
+         conn->state = OTHERS_MULTIPLE;
+-        ret = CT_UPDATE_VALID_NEW;
+     }
+ 
+     conn_update_expiration(ct, &conn->up, other_timeouts[conn->state], now);
+ 
+-    return ret;
++    if (conn->state == OTHERS_BIDIR) {
++        return CT_UPDATE_VALID;
++    }
++    return CT_UPDATE_VALID_NEW;
+ }
+ 
+ static bool
 diff --git a/lib/conntrack.c b/lib/conntrack.c
 index 33a1a92953..0103fb5396 100644
 --- a/lib/conntrack.c
@@ -1578,10 +50995,10 @@ index 7bc1e9e9a5..fb2084392a 100644
      CHECK_LOOKUP_FUNCTION(9, 1, use_vpop);
      CHECK_LOOKUP_FUNCTION(5, 3, use_vpop);
 diff --git a/lib/dpif-netdev-lookup.c b/lib/dpif-netdev-lookup.c
-index bd0a99abe7..b1d2801575 100644
+index bd0a99abe7..8612501474 100644
 --- a/lib/dpif-netdev-lookup.c
 +++ b/lib/dpif-netdev-lookup.c
-@@ -18,10 +18,25 @@
+@@ -18,9 +18,26 @@
  #include <errno.h>
  #include "dpif-netdev-lookup.h"
  
@@ -1589,8 +51006,10 @@ index bd0a99abe7..b1d2801575 100644
  #include "openvswitch/vlog.h"
  
  VLOG_DEFINE_THIS_MODULE(dpif_netdev_lookup);
- 
-+#if (__x86_64__ && HAVE_AVX512F && HAVE_LD_AVX512_GOOD && __SSE4_2__)
++#define DPCLS_IMPL_AVX512_CHECK (__x86_64__ && HAVE_AVX512F \
++    && HAVE_LD_AVX512_GOOD && __SSE4_2__)
++
++#if DPCLS_IMPL_AVX512_CHECK
 +static dpcls_subtable_lookup_func
 +dpcls_subtable_avx512_gather_probe(uint32_t u0_bits, uint32_t u1_bits)
 +{
@@ -1603,10 +51022,18 @@ index bd0a99abe7..b1d2801575 100644
 +        cpu_has_isa(OVS_CPU_ISA_X86_VPOPCNTDQ));
 +}
 +#endif
-+
+ 
  /* Actual list of implementations goes here */
  static struct dpcls_subtable_lookup_info_t subtable_lookups[] = {
-     /* The autovalidator implementation will not be used by default, it must
+@@ -43,7 +60,7 @@ static struct dpcls_subtable_lookup_info_t subtable_lookups[] = {
+       .probe = dpcls_subtable_generic_probe,
+       .name = "generic", },
+ 
+-#if (__x86_64__ && HAVE_AVX512F && HAVE_LD_AVX512_GOOD && __SSE4_2__)
++#if DPCLS_IMPL_AVX512_CHECK
+     /* Only available on x86_64 bit builds with SSE 4.2 used for OVS core. */
+     { .prio = 0,
+       .probe = dpcls_subtable_avx512_gather_probe,
 diff --git a/lib/dpif-netdev-lookup.h b/lib/dpif-netdev-lookup.h
 index 59f51faa0e..5d2d845945 100644
 --- a/lib/dpif-netdev-lookup.h
@@ -1622,10 +51049,10 @@ index 59f51faa0e..5d2d845945 100644
  
  /* Subtable registration and iteration helpers */
 diff --git a/lib/dpif-netdev-private-dpif.c b/lib/dpif-netdev-private-dpif.c
-index 84d4ec156e..5ae119a308 100644
+index 84d4ec156e..ef4cee2bad 100644
 --- a/lib/dpif-netdev-private-dpif.c
 +++ b/lib/dpif-netdev-private-dpif.c
-@@ -22,6 +22,7 @@
+@@ -22,17 +22,33 @@
  #include <errno.h>
  #include <string.h>
  
@@ -1633,11 +51060,17 @@ index 84d4ec156e..5ae119a308 100644
  #include "openvswitch/dynamic-string.h"
  #include "openvswitch/vlog.h"
  #include "util.h"
-@@ -33,6 +34,19 @@ enum dpif_netdev_impl_info_idx {
+ 
+ VLOG_DEFINE_THIS_MODULE(dpif_netdev_impl);
++#define DPIF_NETDEV_IMPL_AVX512_CHECK (__x86_64__ && HAVE_AVX512F \
++    && HAVE_LD_AVX512_GOOD && __SSE4_2__)
+ 
+ enum dpif_netdev_impl_info_idx {
+     DPIF_NETDEV_IMPL_SCALAR,
      DPIF_NETDEV_IMPL_AVX512
  };
  
-+#if (__x86_64__ && HAVE_AVX512F && HAVE_LD_AVX512_GOOD && __SSE4_2__)
++#if DPIF_NETDEV_IMPL_AVX512_CHECK
 +static int32_t
 +dp_netdev_input_outer_avx512_probe(void)
 +{
@@ -1653,6 +51086,24 @@ index 84d4ec156e..5ae119a308 100644
  /* Actual list of implementations goes here. */
  static struct dpif_netdev_impl_info_t dpif_impls[] = {
      /* The default scalar C code implementation. */
+@@ -40,7 +56,7 @@ static struct dpif_netdev_impl_info_t dpif_impls[] = {
+       .probe = NULL,
+       .name = "dpif_scalar", },
+ 
+-#if (__x86_64__ && HAVE_AVX512F && HAVE_LD_AVX512_GOOD && __SSE4_2__)
++#if DPIF_NETDEV_IMPL_AVX512_CHECK
+     /* Only available on x86_64 bit builds with SSE 4.2 used for OVS core. */
+     [DPIF_NETDEV_IMPL_AVX512] = { .input_func = dp_netdev_input_outer_avx512,
+       .probe = dp_netdev_input_outer_avx512_probe,
+@@ -59,7 +75,7 @@ dp_netdev_impl_get_default(void)
+         int dpif_idx = DPIF_NETDEV_IMPL_SCALAR;
+ 
+ /* Configure-time overriding to run test suite on all implementations. */
+-#if (__x86_64__ && HAVE_AVX512F && HAVE_LD_AVX512_GOOD && __SSE4_2__)
++#if DPIF_NETDEV_IMPL_AVX512_CHECK
+ #ifdef DPIF_AVX512_DEFAULT
+         dp_netdev_input_func_probe probe;
+ 
 diff --git a/lib/dpif-netdev-private-dpif.h b/lib/dpif-netdev-private-dpif.h
 index 0da639c55a..3e38630f53 100644
 --- a/lib/dpif-netdev-private-dpif.h
@@ -1670,7 +51121,7 @@ index 0da639c55a..3e38630f53 100644
  dp_netdev_input_outer_avx512(struct dp_netdev_pmd_thread *pmd,
                               struct dp_packet_batch *packets,
 diff --git a/lib/dpif-netdev-private-extract.c b/lib/dpif-netdev-private-extract.c
-index a29bdcfa78..fe04ea80ff 100644
+index a29bdcfa78..e24e5cf0ed 100644
 --- a/lib/dpif-netdev-private-extract.c
 +++ b/lib/dpif-netdev-private-extract.c
 @@ -19,6 +19,7 @@
@@ -1685,7 +51136,7 @@ index a29bdcfa78..fe04ea80ff 100644
  /* Variable to hold the default MFEX implementation. */
  static ATOMIC(miniflow_extract_func) default_mfex_func;
  
-+#if (__x86_64__ && HAVE_AVX512F && HAVE_LD_AVX512_GOOD && __SSE4_2__)
++#if MFEX_IMPL_AVX512_CHECK
 +static int32_t
 +avx512_isa_probe(bool needs_vbmi)
 +{
@@ -1725,11 +51176,48 @@ index a29bdcfa78..fe04ea80ff 100644
  /* Implementations of available extract options and
   * the implementations are always in order of preference.
   */
+@@ -54,7 +92,7 @@ static struct dpif_miniflow_extract_impl mfex_impls[] = {
+         .name = "study", },
+ 
+ /* Compile in implementations only if the compiler ISA checks pass. */
+-#if (__x86_64__ && HAVE_AVX512F && HAVE_LD_AVX512_GOOD && __SSE4_2__)
++#if MFEX_IMPL_AVX512_CHECK
+     [MFEX_IMPL_VMBI_IPv4_UDP] = {
+         .probe = mfex_avx512_vbmi_probe,
+         .extract_func = mfex_avx512_vbmi_ip_udp,
 diff --git a/lib/dpif-netdev-private-extract.h b/lib/dpif-netdev-private-extract.h
-index f9a757ba41..3e06148c5a 100644
+index f9a757ba41..d92090190c 100644
 --- a/lib/dpif-netdev-private-extract.h
 +++ b/lib/dpif-netdev-private-extract.h
-@@ -176,10 +176,8 @@ mfex_study_traffic(struct dp_packet_batch *packets,
+@@ -19,6 +19,9 @@
+ 
+ #include <sys/types.h>
+ 
++#define MFEX_IMPL_AVX512_CHECK (__x86_64__ && HAVE_AVX512F \
++    && HAVE_LD_AVX512_GOOD && __SSE4_2__)
++
+ /* Forward declarations. */
+ struct dp_packet;
+ struct miniflow;
+@@ -81,7 +84,7 @@ enum dpif_miniflow_extract_impl_idx {
+     MFEX_IMPL_AUTOVALIDATOR,
+     MFEX_IMPL_SCALAR,
+     MFEX_IMPL_STUDY,
+-#if (__x86_64__ && HAVE_AVX512F && HAVE_LD_AVX512_GOOD && __SSE4_2__)
++#if MFEX_IMPL_AVX512_CHECK
+     MFEX_IMPL_VMBI_IPv4_UDP,
+     MFEX_IMPL_IPv4_UDP,
+     MFEX_IMPL_VMBI_IPv4_TCP,
+@@ -99,7 +102,7 @@ extern struct ovs_mutex dp_netdev_mutex;
+ /* Define a index which points to the first traffic optimized MFEX
+  * option from the enum list else holds max value.
+  */
+-#if (__x86_64__ && HAVE_AVX512F && HAVE_LD_AVX512_GOOD && __SSE4_2__)
++#if MFEX_IMPL_AVX512_CHECK
+ 
+ #define MFEX_IMPL_START_IDX MFEX_IMPL_VMBI_IPv4_UDP
+ #else
+@@ -176,10 +179,8 @@ mfex_study_traffic(struct dp_packet_batch *packets,
  int
  mfex_set_study_pkt_cnt(uint32_t pkt_cmp_count, const char *name);
  
@@ -2032,7 +51520,7 @@ index 9f35713ef5..3d9d8929f7 100644
  }
  
 diff --git a/lib/dpif-netlink.c b/lib/dpif-netlink.c
-index 71e35ccdda..06e1e8ca02 100644
+index 71e35ccdda..484545cfb8 100644
 --- a/lib/dpif-netlink.c
 +++ b/lib/dpif-netlink.c
 @@ -85,7 +85,7 @@ enum { MAX_PORTS = USHRT_MAX };
@@ -2044,8 +51532,214 @@ index 71e35ccdda..06e1e8ca02 100644
  
  /* This PID is not used by the kernel datapath when using dispatch per CPU,
   * but it is required to be set (not zero). */
+@@ -801,14 +801,28 @@ dpif_netlink_set_handler_pids(struct dpif *dpif_, const uint32_t *upcall_pids,
+                               uint32_t n_upcall_pids)
+ {
+     struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
++    int largest_cpu_id = ovs_numa_get_largest_core_id();
+     struct dpif_netlink_dp request, reply;
+     struct ofpbuf *bufp;
+-    int error;
+-    int n_cores;
+ 
+-    n_cores = count_cpu_cores();
+-    ovs_assert(n_cores == n_upcall_pids);
+-    VLOG_DBG("Dispatch mode(per-cpu): Number of CPUs is %d", n_cores);
++    uint32_t *corrected;
++    int error, i, n_cores;
++
++    if (largest_cpu_id == OVS_NUMA_UNSPEC) {
++        largest_cpu_id = -1;
++    }
++
++    /* Some systems have non-continuous cpu core ids.  count_total_cores()
++     * would return an accurate number, however, this number cannot be used.
++     * e.g. If the largest core_id of a system is cpu9, but the system only
++     * has 4 cpus then the OVS kernel module would throw a "CPU mismatch"
++     * warning.  With the MAX() in place in this example we send an array of
++     * size 10 and prevent the warning.  This has no bearing on the number of
++     * threads created.
++     */
++    n_cores = MAX(count_total_cores(), largest_cpu_id + 1);
++    VLOG_DBG("Dispatch mode(per-cpu): Setting up handler PIDs for %d cores",
++             n_cores);
+ 
+     dpif_netlink_dp_init(&request);
+     request.cmd = OVS_DP_CMD_SET;
+@@ -817,7 +831,12 @@ dpif_netlink_set_handler_pids(struct dpif *dpif_, const uint32_t *upcall_pids,
+     request.user_features = dpif->user_features |
+                             OVS_DP_F_DISPATCH_UPCALL_PER_CPU;
+ 
+-    request.upcall_pids = upcall_pids;
++    corrected = xcalloc(n_cores, sizeof *corrected);
++
++    for (i = 0; i < n_cores; i++) {
++        corrected[i] = upcall_pids[i % n_upcall_pids];
++    }
++    request.upcall_pids = corrected;
+     request.n_upcall_pids = n_cores;
+ 
+     error = dpif_netlink_dp_transact(&request, &reply, &bufp);
+@@ -825,9 +844,10 @@ dpif_netlink_set_handler_pids(struct dpif *dpif_, const uint32_t *upcall_pids,
+         dpif->user_features = reply.user_features;
+         ofpbuf_delete(bufp);
+         if (!dpif_netlink_upcall_per_cpu(dpif)) {
+-            return -EOPNOTSUPP;
++            error = -EOPNOTSUPP;
+         }
+     }
++    free(corrected);
+     return error;
+ }
+ 
+@@ -1074,7 +1094,7 @@ dpif_netlink_port_add_compat(struct dpif_netlink *dpif, struct netdev *netdev,
+ 
+             ext_ofs = nl_msg_start_nested(&options, OVS_TUNNEL_ATTR_EXTENSION);
+             for (i = 0; i < 32; i++) {
+-                if (tnl_cfg->exts & (1 << i)) {
++                if (tnl_cfg->exts & (UINT32_C(1) << i)) {
+                     nl_msg_put_flag(&options, i);
+                 }
+             }
+@@ -2237,8 +2257,6 @@ parse_flow_put(struct dpif_netlink *dpif, struct dpif_flow_put *put)
+     size_t left;
+     struct netdev *dev;
+     struct offload_info info;
+-    ovs_be16 dst_port = 0;
+-    uint8_t csum_on = false;
+     int err;
+ 
+     info.tc_modify_flow_deleted = false;
+@@ -2258,10 +2276,9 @@ parse_flow_put(struct dpif_netlink *dpif, struct dpif_flow_put *put)
+         return EOPNOTSUPP;
+     }
+ 
+-    /* Get tunnel dst port */
++    /* Check the output port for a tunnel. */
+     NL_ATTR_FOR_EACH(nla, left, put->actions, put->actions_len) {
+         if (nl_attr_type(nla) == OVS_ACTION_ATTR_OUTPUT) {
+-            const struct netdev_tunnel_config *tnl_cfg;
+             struct netdev *outdev;
+             odp_port_t out_port;
+ 
+@@ -2271,19 +2288,10 @@ parse_flow_put(struct dpif_netlink *dpif, struct dpif_flow_put *put)
+                 err = EOPNOTSUPP;
+                 goto out;
+             }
+-            tnl_cfg = netdev_get_tunnel_config(outdev);
+-            if (tnl_cfg && tnl_cfg->dst_port != 0) {
+-                dst_port = tnl_cfg->dst_port;
+-            }
+-            if (tnl_cfg) {
+-                csum_on = tnl_cfg->csum;
+-            }
+             netdev_close(outdev);
+         }
+     }
+ 
+-    info.tp_dst_port = dst_port;
+-    info.tunnel_csum_on = csum_on;
+     info.recirc_id_shared_with_tc = (dpif->user_features
+                                      & OVS_DP_F_TC_RECIRC_SHARING);
+     err = netdev_flow_put(dev, &match,
+@@ -2506,6 +2514,77 @@ dpif_netlink_handler_uninit(struct dpif_handler *handler)
+ }
+ #endif
+ 
++/* Returns true if num is a prime number,
++ * otherwise, return false.
++ */
++static bool
++is_prime(uint32_t num)
++{
++    if (num == 2) {
++        return true;
++    }
++
++    if (num < 2) {
++        return false;
++    }
++
++    if (num % 2 == 0) {
++        return false;
++    }
++
++    for (uint64_t i = 3; i * i <= num; i += 2) {
++        if (num % i == 0) {
++            return false;
++        }
++    }
++
++    return true;
++}
++
++/* Returns start if start is a prime number.  Otherwise returns the next
++ * prime greater than start.  Search is limited by UINT32_MAX.
++ *
++ * Returns 0 if no prime has been found between start and UINT32_MAX.
++ */
++static uint32_t
++next_prime(uint32_t start)
++{
++    if (start <= 2) {
++        return 2;
++    }
++
++    for (uint32_t i = start; i < UINT32_MAX; i++) {
++        if (is_prime(i)) {
++            return i;
++        }
++    }
++
++    return 0;
++}
++
++/* Calculates and returns the number of handler threads needed based
++ * the following formula:
++ *
++ * handlers_n = min(next_prime(active_cores + 1), total_cores)
++ */
++static uint32_t
++dpif_netlink_calculate_n_handlers(void)
++{
++    uint32_t total_cores = count_total_cores();
++    uint32_t n_handlers = count_cpu_cores();
++    uint32_t next_prime_num;
++
++    /* If not all cores are available to OVS, create additional handler
++     * threads to ensure more fair distribution of load between them.
++     */
++    if (n_handlers < total_cores && total_cores > 2) {
++        next_prime_num = next_prime(n_handlers + 1);
++        n_handlers = MIN(next_prime_num, total_cores);
++    }
++
++    return n_handlers;
++}
++
+ static int
+ dpif_netlink_refresh_handlers_cpu_dispatch(struct dpif_netlink *dpif)
+     OVS_REQ_WRLOCK(dpif->upcall_lock)
+@@ -2515,7 +2594,7 @@ dpif_netlink_refresh_handlers_cpu_dispatch(struct dpif_netlink *dpif)
+     uint32_t n_handlers;
+     uint32_t *upcall_pids;
+ 
+-    n_handlers = count_cpu_cores();
++    n_handlers = dpif_netlink_calculate_n_handlers();
+     if (dpif->n_handlers != n_handlers) {
+         VLOG_DBG("Dispatch mode(per-cpu): initializing %d handlers",
+                    n_handlers);
+@@ -2755,7 +2834,7 @@ dpif_netlink_number_handlers_required(struct dpif *dpif_, uint32_t *n_handlers)
+     struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
+ 
+     if (dpif_netlink_upcall_per_cpu(dpif)) {
+-        *n_handlers = count_cpu_cores();
++        *n_handlers = dpif_netlink_calculate_n_handlers();
+         return true;
+     }
+ 
 diff --git a/lib/dynamic-string.c b/lib/dynamic-string.c
-index fd0127ed17..3b4520f87c 100644
+index fd0127ed17..8e9555a630 100644
 --- a/lib/dynamic-string.c
 +++ b/lib/dynamic-string.c
 @@ -152,7 +152,10 @@ ds_put_format_valist(struct ds *ds, const char *format, va_list args_)
@@ -2085,6 +51779,69 @@ index fd0127ed17..3b4520f87c 100644
          if (used) {
              ds->length += used;
              return;
+@@ -384,13 +389,9 @@ ds_put_hex(struct ds *ds, const void *buf_, size_t size)
+     }
+ }
+ 
+-/* Writes the 'size' bytes in 'buf' to 'string' as hex bytes arranged 16 per
+- * line.  Numeric offsets are also included, starting at 'ofs' for the first
+- * byte in 'buf'.  If 'ascii' is true then the corresponding ASCII characters
+- * are also rendered alongside. */
+-void
+-ds_put_hex_dump(struct ds *ds, const void *buf_, size_t size,
+-                uintptr_t ofs, bool ascii)
++static void
++ds_put_hex_dump__(struct ds *ds, const void *buf_, size_t size,
++                  uintptr_t ofs, bool ascii, bool skip_zero_lines)
+ {
+     const uint8_t *buf = buf_;
+     const size_t per_line = 16; /* Maximum bytes per line. */
+@@ -406,6 +407,10 @@ ds_put_hex_dump(struct ds *ds, const void *buf_, size_t size,
+             end = start + size;
+         n = end - start;
+ 
++        if (skip_zero_lines && is_all_zeros(&buf[start], n)) {
++            goto next;
++        }
++
+         /* Print line. */
+         ds_put_format(ds, "%08"PRIxMAX"  ",
+                       (uintmax_t) ROUND_DOWN(ofs, per_line));
+@@ -433,13 +438,33 @@ ds_put_hex_dump(struct ds *ds, const void *buf_, size_t size,
+             ds_chomp(ds, ' ');
+         }
+         ds_put_format(ds, "\n");
+-
++next:
+         ofs += n;
+         buf += n;
+         size -= n;
+     }
+ }
+ 
++/* Writes the 'size' bytes in 'buf' to 'string' as hex bytes arranged 16 per
++ * line.  Numeric offsets are also included, starting at 'ofs' for the first
++ * byte in 'buf'.  If 'ascii' is true then the corresponding ASCII characters
++ * are also rendered alongside. */
++void
++ds_put_hex_dump(struct ds *ds, const void *buf_, size_t size,
++                uintptr_t ofs, bool ascii)
++{
++    ds_put_hex_dump__(ds, buf_, size, ofs, ascii, false);
++}
++
++/* Same as 'ds_put_hex_dump', but doesn't print lines that only contains
++ * zero bytes. */
++void
++ds_put_sparse_hex_dump(struct ds *ds, const void *buf_, size_t size,
++                       uintptr_t ofs, bool ascii)
++{
++    ds_put_hex_dump__(ds, buf_, size, ofs, ascii, true);
++}
++
+ int
+ ds_last(const struct ds *ds)
+ {
 diff --git a/lib/fat-rwlock.c b/lib/fat-rwlock.c
 index d913b2088f..771ccc9737 100644
 --- a/lib/fat-rwlock.c
@@ -2106,6 +51863,19 @@ index d913b2088f..771ccc9737 100644
          free_slot(slot);
      }
      ovs_mutex_destroy(&rwlock->mutex);
+diff --git a/lib/flow.c b/lib/flow.c
+index dd523c889b..418c3c4f48 100644
+--- a/lib/flow.c
++++ b/lib/flow.c
+@@ -1950,7 +1950,7 @@ flow_wildcards_init_for_packet(struct flow_wildcards *wc,
+     }
+ 
+     /* IPv4 or IPv6. */
+-    WC_MASK_FIELD(wc, nw_frag);
++    WC_MASK_FIELD_MASK(wc, nw_frag, FLOW_NW_FRAG_MASK);
+     WC_MASK_FIELD(wc, nw_tos);
+     WC_MASK_FIELD(wc, nw_ttl);
+     WC_MASK_FIELD(wc, nw_proto);
 diff --git a/lib/hindex.h b/lib/hindex.h
 index 876c5a9e39..ea7402587e 100644
 --- a/lib/hindex.h
@@ -2370,6 +52140,28 @@ index 89d711225f..3252f17ebf 100644
              member_destroy(member);
          }
  
+diff --git a/lib/libopenvswitch.pc.in b/lib/libopenvswitch.pc.in
+index 2a3f2ca7bc..44fbb1f9fd 100644
+--- a/lib/libopenvswitch.pc.in
++++ b/lib/libopenvswitch.pc.in
+@@ -7,5 +7,5 @@ Name: libopenvswitch
+ Description: Open vSwitch library
+ Version: @VERSION@
+ Libs: -L${libdir} -lopenvswitch
+-Libs.private: @LIBS@
+-Cflags: -I${includedir}/openvswitch
++Libs.private: @LIBS@ @SSL_LIBS@ @CAPNG_LDADD@ @LIBBPF_LDADD@
++Cflags: -I${includedir}
+diff --git a/lib/libsflow.pc.in b/lib/libsflow.pc.in
+index e70a2b7048..34bb7e3d69 100644
+--- a/lib/libsflow.pc.in
++++ b/lib/libsflow.pc.in
+@@ -8,4 +8,4 @@ Description: sFlow library of Open vSwitch
+ Version: @VERSION@
+ Libs: -L${libdir} -lsflow
+ Libs.private: @LIBS@
+-Cflags: -I${includedir}/openvswitch
++Cflags: -I${includedir}
 diff --git a/lib/lldp/lldpd-structs.c b/lib/lldp/lldpd-structs.c
 index 499b441746..a8c7fad098 100644
 --- a/lib/lldp/lldpd-structs.c
@@ -2454,6 +52246,19 @@ index 3fcd7d9b77..a60794fb26 100644
              mac_learning_expire(ml, e);
          }
          hmap_destroy(&ml->table);
+diff --git a/lib/match.c b/lib/match.c
+index 2ad03e044e..0b9dc4278c 100644
+--- a/lib/match.c
++++ b/lib/match.c
+@@ -1737,7 +1737,7 @@ match_format(const struct match *match,
+     format_be32_masked(s, "mpls_lse1", f->mpls_lse[1], wc->masks.mpls_lse[1]);
+     format_be32_masked(s, "mpls_lse2", f->mpls_lse[2], wc->masks.mpls_lse[2]);
+ 
+-    switch (wc->masks.nw_frag) {
++    switch (wc->masks.nw_frag & FLOW_NW_FRAG_MASK) {
+     case FLOW_NW_FRAG_ANY | FLOW_NW_FRAG_LATER:
+         ds_put_format(s, "%snw_frag=%s%s,", colors.param, colors.end,
+                       f->nw_frag & FLOW_NW_FRAG_ANY
 diff --git a/lib/mcast-snooping.c b/lib/mcast-snooping.c
 index 6730301b67..029ca28558 100644
 --- a/lib/mcast-snooping.c
@@ -2975,10 +52780,46 @@ index b6b29c75e3..e28e397d7e 100644
          free(queue);
      }
 diff --git a/lib/netdev-linux.c b/lib/netdev-linux.c
-index 620a451dec..2766b3f2bf 100644
+index 620a451dec..1b9c2874ac 100644
 --- a/lib/netdev-linux.c
 +++ b/lib/netdev-linux.c
-@@ -5331,11 +5331,11 @@ static void
+@@ -247,6 +247,14 @@ enum {
+     VALID_NUMA_ID           = 1 << 8,
+ };
+ 
++/* Linux 4.4 introduced the ability to skip the internal stats gathering
++ * that netlink does via an external filter mask that can be passed into
++ * a netlink request.
++ */
++#ifndef RTEXT_FILTER_SKIP_STATS
++#define RTEXT_FILTER_SKIP_STATS (1 << 3)
++#endif
++
+ /* Use one for the packet buffer and another for the aux buffer to receive
+  * TSO packets. */
+ #define IOV_STD_SIZE 1
+@@ -682,7 +690,10 @@ netdev_linux_update_lag(struct rtnetlink_change *change)
+                 return;
+             }
+ 
+-            if (is_netdev_linux_class(master_netdev->netdev_class)) {
++            /* If LAG master is not attached to ovs, ingress block on LAG
++             * members shoud not be updated. */
++            if (!master_netdev->auto_classified &&
++                is_netdev_linux_class(master_netdev->netdev_class)) {
+                 block_id = netdev_get_block_id(master_netdev);
+                 if (!block_id) {
+                     netdev_close(master_netdev);
+@@ -2623,7 +2634,7 @@ static void
+ nl_msg_act_police_end_nest(struct ofpbuf *request, size_t offset,
+                            size_t act_offset)
+ {
+-    nl_msg_put_u32(request, TCA_POLICE_RESULT, TC_ACT_PIPE);
++    nl_msg_put_u32(request, TCA_POLICE_RESULT, TC_ACT_UNSPEC);
+     nl_msg_end_nested(request, offset);
+     nl_msg_end_nested(request, act_offset);
+ }
+@@ -5331,11 +5342,11 @@ static void
  hfsc_tc_destroy(struct tc *tc)
  {
      struct hfsc *hfsc;
@@ -2992,7 +52833,7 @@ index 620a451dec..2766b3f2bf 100644
          hmap_remove(&hfsc->tc.queues, &hc->tc_queue.hmap_node);
          free(hc);
      }
-@@ -6295,7 +6295,14 @@ get_stats_via_netlink(const struct netdev *netdev_, struct netdev_stats *stats)
+@@ -6295,7 +6306,14 @@ get_stats_via_netlink(const struct netdev *netdev_, struct netdev_stats *stats)
      if (ofpbuf_try_pull(reply, NLMSG_HDRLEN + sizeof(struct ifinfomsg))) {
          const struct nlattr *a = nl_attr_find(reply, 0, IFLA_STATS64);
          if (a && nl_attr_get_size(a) >= sizeof(struct rtnl_link_stats64)) {
@@ -3008,11 +52849,29 @@ index 620a451dec..2766b3f2bf 100644
              error = 0;
          } else {
              a = nl_attr_find(reply, 0, IFLA_STATS);
+@@ -6411,6 +6429,9 @@ netdev_linux_update_via_netlink(struct netdev_linux *netdev)
+     if (netdev_linux_netnsid_is_remote(netdev)) {
+         nl_msg_put_u32(&request, IFLA_IF_NETNSID, netdev->netnsid);
+     }
++
++    nl_msg_put_u32(&request, IFLA_EXT_MASK, RTEXT_FILTER_SKIP_STATS);
++
+     error = nl_transact(NETLINK_ROUTE, &request, &reply);
+     ofpbuf_uninit(&request);
+     if (error) {
 diff --git a/lib/netdev-offload-dpdk.c b/lib/netdev-offload-dpdk.c
-index 94dc6a9b74..12d299603a 100644
+index 94dc6a9b74..be6adc32ff 100644
 --- a/lib/netdev-offload-dpdk.c
 +++ b/lib/netdev-offload-dpdk.c
-@@ -363,6 +363,8 @@ dump_flow_pattern(struct ds *s,
+@@ -18,6 +18,7 @@
+ 
+ #include <sys/types.h>
+ #include <netinet/ip6.h>
++#include <rte_ethdev.h>
+ #include <rte_flow.h>
+ #include <rte_gre.h>
+ 
+@@ -363,6 +364,8 @@ dump_flow_pattern(struct ds *s,
  
          ds_put_cstr(s, "eth ");
          if (eth_spec) {
@@ -3021,7 +52880,7 @@ index 94dc6a9b74..12d299603a 100644
              if (!eth_mask) {
                  eth_mask = &rte_flow_item_eth_mask;
              }
-@@ -377,6 +379,9 @@ dump_flow_pattern(struct ds *s,
+@@ -377,6 +380,9 @@ dump_flow_pattern(struct ds *s,
              DUMP_PATTERN_ITEM(eth_mask->type, false, "type", "0x%04"PRIx16,
                                ntohs(eth_spec->type),
                                ntohs(eth_mask->type), 0);
@@ -3031,7 +52890,7 @@ index 94dc6a9b74..12d299603a 100644
          }
          ds_put_cstr(s, "/ ");
      } else if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
-@@ -1369,6 +1374,7 @@ parse_flow_match(struct netdev *netdev,
+@@ -1369,6 +1375,7 @@ parse_flow_match(struct netdev *netdev,
                   struct flow_patterns *patterns,
                   struct match *match)
  {
@@ -3039,7 +52898,7 @@ index 94dc6a9b74..12d299603a 100644
      struct flow *consumed_masks;
      uint8_t proto = 0;
  
-@@ -1414,6 +1420,11 @@ parse_flow_match(struct netdev *netdev,
+@@ -1414,6 +1421,11 @@ parse_flow_match(struct netdev *netdev,
          memset(&consumed_masks->dl_src, 0, sizeof consumed_masks->dl_src);
          consumed_masks->dl_type = 0;
  
@@ -3051,7 +52910,7 @@ index 94dc6a9b74..12d299603a 100644
          add_flow_pattern(patterns, RTE_FLOW_ITEM_TYPE_ETH, spec, mask, NULL);
      }
  
-@@ -1427,8 +1438,14 @@ parse_flow_match(struct netdev *netdev,
+@@ -1427,8 +1439,14 @@ parse_flow_match(struct netdev *netdev,
          spec->tci = match->flow.vlans[0].tci & ~htons(VLAN_CFI);
          mask->tci = match->wc.masks.vlans[0].tci & ~htons(VLAN_CFI);
  
@@ -3068,11 +52927,28 @@ index 94dc6a9b74..12d299603a 100644
  
          add_flow_pattern(patterns, RTE_FLOW_ITEM_TYPE_VLAN, spec, mask, NULL);
      }
+@@ -1697,7 +1715,7 @@ add_flow_mark_rss_actions(struct flow_actions *actions,
+         .conf = (struct rte_flow_action_rss) {
+             .func = RTE_ETH_HASH_FUNCTION_DEFAULT,
+             .level = 0,
+-            .types = 0,
++            .types = RTE_ETH_RSS_IP | RTE_ETH_RSS_UDP | RTE_ETH_RSS_TCP,
+             .queue_num = netdev_n_rxq(netdev),
+             .queue = rss_data->queue,
+             .key_len = 0,
 diff --git a/lib/netdev-offload-tc.c b/lib/netdev-offload-tc.c
-index 9845e8d3fe..262faf3c62 100644
+index 9845e8d3fe..93321989a9 100644
 --- a/lib/netdev-offload-tc.c
 +++ b/lib/netdev-offload-tc.c
-@@ -417,11 +417,11 @@ delete_chains_from_netdev(struct netdev *netdev, struct tcf_id *id)
+@@ -44,6 +44,7 @@
+ VLOG_DEFINE_THIS_MODULE(netdev_offload_tc);
+ 
+ static struct vlog_rate_limit error_rl = VLOG_RATE_LIMIT_INIT(60, 5);
++static struct vlog_rate_limit warn_rl = VLOG_RATE_LIMIT_INIT(10, 2);
+ 
+ static struct hmap ufid_to_tc = HMAP_INITIALIZER(&ufid_to_tc);
+ static struct hmap tc_to_ufid = HMAP_INITIALIZER(&tc_to_ufid);
+@@ -417,11 +418,11 @@ delete_chains_from_netdev(struct netdev *netdev, struct tcf_id *id)
  static int
  netdev_tc_flow_flush(struct netdev *netdev)
  {
@@ -3086,7 +52962,7 @@ index 9845e8d3fe..262faf3c62 100644
          if (data->netdev != netdev) {
              continue;
          }
-@@ -481,10 +481,10 @@ netdev_tc_flow_dump_destroy(struct netdev_flow_dump *dump)
+@@ -481,10 +482,10 @@ netdev_tc_flow_dump_destroy(struct netdev_flow_dump *dump)
  
  static void
  parse_flower_rewrite_to_netlink_action(struct ofpbuf *buf,
@@ -3100,7 +52976,57 @@ index 9845e8d3fe..262faf3c62 100644
  
      for (int type = 0; type < ARRAY_SIZE(set_flower_map); type++) {
          char *put = NULL;
-@@ -585,8 +585,10 @@ parse_tc_flower_to_stats(struct tc_flower *flower,
+@@ -550,30 +551,42 @@ flower_tun_opt_to_match(struct match *match, struct tc_flower *flower)
+     struct geneve_opt *opt, *opt_mask;
+     int len, cnt = 0;
+ 
++    /* Options are always in UDPIF format in the 'flower'. */
++    match->flow.tunnel.flags |= FLOW_TNL_F_UDPIF;
++    match->wc.masks.tunnel.flags |= FLOW_TNL_F_UDPIF;
++
++    match->flow.tunnel.metadata.present.len =
++           flower->key.tunnel.metadata.present.len;
++    /* In the 'flower' mask len is an actual length, not a mask.  But in the
++     * 'match' it is an actual mask, so should be an exact match, because TC
++     * will always match on the exact value. */
++    match->wc.masks.tunnel.metadata.present.len = 0xff;
++
++    if (!flower->key.tunnel.metadata.present.len) {
++        /* No options present. */
++        return;
++    }
++
+     memcpy(match->flow.tunnel.metadata.opts.gnv,
+            flower->key.tunnel.metadata.opts.gnv,
+            flower->key.tunnel.metadata.present.len);
+-    match->flow.tunnel.metadata.present.len =
+-           flower->key.tunnel.metadata.present.len;
+-    match->flow.tunnel.flags |= FLOW_TNL_F_UDPIF;
+     memcpy(match->wc.masks.tunnel.metadata.opts.gnv,
+            flower->mask.tunnel.metadata.opts.gnv,
+            flower->mask.tunnel.metadata.present.len);
+ 
++    /* Fixing up 'length' fields of particular options, since these are
++     * also not masks, but actual lengths in the 'flower' structure. */
+     len = flower->key.tunnel.metadata.present.len;
+     while (len) {
+         opt = &match->flow.tunnel.metadata.opts.gnv[cnt];
+         opt_mask = &match->wc.masks.tunnel.metadata.opts.gnv[cnt];
+ 
++        /* "Exact" match as set in tun_metadata_to_geneve_mask__(). */
+         opt_mask->length = 0x1f;
+ 
+         cnt += sizeof(struct geneve_opt) / 4 + opt->length;
+         len -= sizeof(struct geneve_opt) + opt->length * 4;
+     }
+-
+-    match->wc.masks.tunnel.metadata.present.len =
+-           flower->mask.tunnel.metadata.present.len;
+-    match->wc.masks.tunnel.flags |= FLOW_TNL_F_UDPIF;
+ }
+ 
+ static void
+@@ -585,8 +598,10 @@ parse_tc_flower_to_stats(struct tc_flower *flower,
      }
  
      memset(stats, 0, sizeof *stats);
@@ -3113,7 +53039,47 @@ index 9845e8d3fe..262faf3c62 100644
      stats->used = flower->lastused;
  }
  
-@@ -877,7 +879,7 @@ parse_tc_flower_to_match(struct tc_flower *flower,
+@@ -616,7 +631,8 @@ parse_tc_flower_terse_to_match(struct tc_flower *flower,
+ }
+ 
+ static int
+-parse_tc_flower_to_match(struct tc_flower *flower,
++parse_tc_flower_to_match(const struct netdev *netdev,
++                         struct tc_flower *flower,
+                          struct match *match,
+                          struct nlattr **actions,
+                          struct dpif_flow_stats *stats,
+@@ -803,18 +819,24 @@ parse_tc_flower_to_match(struct tc_flower *flower,
+                                           &flower->key.tunnel.ipv6.ipv6_src,
+                                           &flower->mask.tunnel.ipv6.ipv6_src);
+         }
+-        if (flower->key.tunnel.tos) {
++        if (flower->mask.tunnel.tos) {
+             match_set_tun_tos_masked(match, flower->key.tunnel.tos,
+                                      flower->mask.tunnel.tos);
+         }
+-        if (flower->key.tunnel.ttl) {
++        if (flower->mask.tunnel.ttl) {
+             match_set_tun_ttl_masked(match, flower->key.tunnel.ttl,
+                                      flower->mask.tunnel.ttl);
+         }
+-        if (flower->key.tunnel.tp_dst) {
+-            match_set_tun_tp_dst(match, flower->key.tunnel.tp_dst);
++        if (flower->mask.tunnel.tp_src) {
++            match_set_tun_tp_dst_masked(match, flower->key.tunnel.tp_src,
++                                        flower->mask.tunnel.tp_src);
+         }
+-        if (flower->key.tunnel.metadata.present.len) {
++        if (flower->mask.tunnel.tp_dst) {
++            match_set_tun_tp_dst_masked(match, flower->key.tunnel.tp_dst,
++                                        flower->mask.tunnel.tp_dst);
++        }
++
++        if (!strcmp(netdev_get_type(netdev), "geneve")) {
+             flower_tun_opt_to_match(match, flower);
+         }
+     }
+@@ -877,7 +899,7 @@ parse_tc_flower_to_match(struct tc_flower *flower,
              }
              break;
              case TC_ACT_PEDIT: {
@@ -3122,7 +53088,18 @@ index 9845e8d3fe..262faf3c62 100644
              }
              break;
              case TC_ACT_ENCAP: {
-@@ -1222,8 +1224,8 @@ parse_put_flow_set_masked_action(struct tc_flower *flower,
+@@ -1054,8 +1076,8 @@ netdev_tc_flow_dump_next(struct netdev_flow_dump *dump,
+             continue;
+         }
+ 
+-        if (parse_tc_flower_to_match(&flower, match, actions, stats, attrs,
+-                                     wbuffer, dump->terse)) {
++        if (parse_tc_flower_to_match(netdev, &flower, match, actions,
++                                     stats, attrs, wbuffer, dump->terse)) {
+             continue;
+         }
+ 
+@@ -1222,8 +1244,8 @@ parse_put_flow_set_masked_action(struct tc_flower *flower,
      uint64_t set_stub[1024 / 8];
      struct ofpbuf set_buf = OFPBUF_STUB_INITIALIZER(set_stub);
      char *set_data, *set_mask;
@@ -3133,7 +53110,7 @@ index 9845e8d3fe..262faf3c62 100644
      const struct nlattr *attr;
      int i, j, type;
      size_t size;
-@@ -1265,14 +1267,6 @@ parse_put_flow_set_masked_action(struct tc_flower *flower,
+@@ -1265,14 +1287,6 @@ parse_put_flow_set_masked_action(struct tc_flower *flower,
          }
      }
  
@@ -3148,7 +53125,7 @@ index 9845e8d3fe..262faf3c62 100644
      if (hasmask && !is_all_zeros(set_mask, size)) {
          VLOG_DBG_RL(&rl, "unsupported sub attribute of set action type %d",
                      type);
-@@ -1281,6 +1275,8 @@ parse_put_flow_set_masked_action(struct tc_flower *flower,
+@@ -1281,6 +1295,8 @@ parse_put_flow_set_masked_action(struct tc_flower *flower,
      }
  
      ofpbuf_uninit(&set_buf);
@@ -3157,7 +53134,163 @@ index 9845e8d3fe..262faf3c62 100644
      return 0;
  }
  
-@@ -1541,6 +1537,12 @@ parse_match_ct_state_to_flower(struct tc_flower *flower, struct match *match)
+@@ -1288,6 +1304,7 @@ static int
+ parse_put_flow_set_action(struct tc_flower *flower, struct tc_action *action,
+                           const struct nlattr *set, size_t set_len)
+ {
++    static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 20);
+     const struct nlattr *tunnel;
+     const struct nlattr *tun_attr;
+     size_t tun_left, tunnel_len;
+@@ -1306,6 +1323,7 @@ parse_put_flow_set_action(struct tc_flower *flower, struct tc_action *action,
+ 
+     action->type = TC_ACT_ENCAP;
+     action->encap.id_present = false;
++    action->encap.no_csum = 1;
+     flower->action_count++;
+     NL_ATTR_FOR_EACH_UNSAFE(tun_attr, tun_left, tunnel, tunnel_len) {
+         switch (nl_attr_type(tun_attr)) {
+@@ -1330,6 +1348,18 @@ parse_put_flow_set_action(struct tc_flower *flower, struct tc_action *action,
+             action->encap.ttl = nl_attr_get_u8(tun_attr);
+         }
+         break;
++        case OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT: {
++            /* XXX: This is wrong!  We're ignoring the DF flag configuration
++             * requested by the user.  However, TC for now has no way to pass
++             * that flag and it is set by default, meaning tunnel offloading
++             * will not work if 'options:df_default=false' is not set.
++             * Keeping incorrect behavior for now. */
++        }
++        break;
++        case OVS_TUNNEL_KEY_ATTR_CSUM: {
++            action->encap.no_csum = 0;
++        }
++        break;
+         case OVS_TUNNEL_KEY_ATTR_IPV6_SRC: {
+             action->encap.ipv6.ipv6_src =
+                 nl_attr_get_in6_addr(tun_attr);
+@@ -1354,12 +1384,31 @@ parse_put_flow_set_action(struct tc_flower *flower, struct tc_action *action,
+             action->encap.data.present.len = nl_attr_get_size(tun_attr);
+         }
+         break;
++        default:
++            VLOG_DBG_RL(&rl, "unsupported tunnel key attribute %d",
++                        nl_attr_type(tun_attr));
++            return EOPNOTSUPP;
+         }
+     }
+ 
+     return 0;
+ }
+ 
++static bool
++is_ipv6_fragment_and_masked(const struct flow *key, const struct flow *mask)
++{
++    if (key->dl_type != htons(ETH_P_IPV6)) {
++        return false;
++    }
++    if (mask->nw_proto && key->nw_proto == IPPROTO_FRAGMENT) {
++        return true;
++    }
++    if (key->nw_frag & (mask->nw_frag & FLOW_NW_FRAG_ANY)) {
++        return true;
++    }
++    return false;
++}
++
+ static int
+ test_key_and_mask(struct match *match)
+ {
+@@ -1442,8 +1491,23 @@ test_key_and_mask(struct match *match)
+         return EOPNOTSUPP;
+     }
+ 
++    if (is_ipv6_fragment_and_masked(key, mask)) {
++        VLOG_DBG_RL(&rl, "offloading of IPv6 fragments isn't supported");
++        return EOPNOTSUPP;
++    }
++
+     if (!is_all_zeros(mask, sizeof *mask)) {
+-        VLOG_DBG_RL(&rl, "offloading isn't supported, unknown attribute");
++        if (!VLOG_DROP_DBG(&rl)) {
++            struct ds ds = DS_EMPTY_INITIALIZER;
++
++            ds_put_cstr(&ds,
++                        "offloading isn't supported, unknown attribute\n"
++                        "Unused mask bits:\n");
++            ds_put_sparse_hex_dump(&ds, mask, sizeof *mask, 0, false);
++
++            VLOG_DBG("%s", ds_cstr(&ds));
++            ds_destroy(&ds);
++        }
+         return EOPNOTSUPP;
+     }
+ 
+@@ -1452,18 +1516,51 @@ test_key_and_mask(struct match *match)
+ 
+ static void
+ flower_match_to_tun_opt(struct tc_flower *flower, const struct flow_tnl *tnl,
+-                        const struct flow_tnl *tnl_mask)
++                        struct flow_tnl *tnl_mask)
+ {
+     struct geneve_opt *opt, *opt_mask;
+     int len, cnt = 0;
+ 
+-    memcpy(flower->key.tunnel.metadata.opts.gnv, tnl->metadata.opts.gnv,
+-           tnl->metadata.present.len);
++    /* 'flower' always has an exact match on tunnel metadata length, so having
++     * it in a wrong format is not acceptable unless it is empty. */
++    if (!(tnl->flags & FLOW_TNL_F_UDPIF)) {
++        if (tnl->metadata.present.map) {
++            /* XXX: Add non-UDPIF format parsing here? */
++            VLOG_WARN_RL(&warn_rl, "Tunnel options are in the wrong format.");
++        } else {
++            /* There are no options, that equals for them to be in UDPIF format
++             * with a zero 'len'.  Clearing the 'map' mask as consumed.
++             * No need to explicitly set 'len' to zero in the 'flower'. */
++            tnl_mask->flags &= ~FLOW_TNL_F_UDPIF;
++            memset(&tnl_mask->metadata.present.map, 0,
++                   sizeof tnl_mask->metadata.present.map);
++        }
++        return;
++    }
++
++    tnl_mask->flags &= ~FLOW_TNL_F_UDPIF;
++
+     flower->key.tunnel.metadata.present.len = tnl->metadata.present.len;
++    /* Copying from the key and not from the mask, since in the 'flower'
++     * the length for a mask is not a mask, but the actual length.  TC
++     * will use an exact match for the length. */
++    flower->mask.tunnel.metadata.present.len = tnl->metadata.present.len;
++    memset(&tnl_mask->metadata.present.len, 0,
++           sizeof tnl_mask->metadata.present.len);
+ 
++    if (!tnl->metadata.present.len) {
++        return;
++    }
++
++    memcpy(flower->key.tunnel.metadata.opts.gnv, tnl->metadata.opts.gnv,
++           tnl->metadata.present.len);
+     memcpy(flower->mask.tunnel.metadata.opts.gnv, tnl_mask->metadata.opts.gnv,
+            tnl->metadata.present.len);
+ 
++    memset(tnl_mask->metadata.opts.gnv, 0, tnl->metadata.present.len);
++
++    /* Fixing up 'length' fields of particular options, since these are
++     * also not masks, but actual lengths in the 'flower' structure. */
+     len = flower->key.tunnel.metadata.present.len;
+     while (len) {
+         opt = &flower->key.tunnel.metadata.opts.gnv[cnt];
+@@ -1474,8 +1571,6 @@ flower_match_to_tun_opt(struct tc_flower *flower, const struct flow_tnl *tnl,
+         cnt += sizeof(struct geneve_opt) / 4 + opt->length;
+         len -= sizeof(struct geneve_opt) + opt->length * 4;
+     }
+-
+-    flower->mask.tunnel.metadata.present.len = tnl->metadata.present.len;
+ }
+ 
+ static void
+@@ -1541,6 +1636,12 @@ parse_match_ct_state_to_flower(struct tc_flower *flower, struct match *match)
              flower->key.ct_state &= ~(TCA_FLOWER_KEY_CT_FLAGS_NEW);
              flower->mask.ct_state &= ~(TCA_FLOWER_KEY_CT_FLAGS_NEW);
          }
@@ -3170,7 +53303,68 @@ index 9845e8d3fe..262faf3c62 100644
      }
  
      if (mask->ct_zone) {
-@@ -1638,7 +1640,7 @@ netdev_tc_flow_put(struct netdev *netdev, struct match *match,
+@@ -1574,7 +1675,7 @@ netdev_tc_flow_put(struct netdev *netdev, struct match *match,
+     const struct flow *key = &match->flow;
+     struct flow *mask = &match->wc.masks;
+     const struct flow_tnl *tnl = &match->flow.tunnel;
+-    const struct flow_tnl *tnl_mask = &mask->tunnel;
++    struct flow_tnl *tnl_mask = &mask->tunnel;
+     struct tc_action *action;
+     bool recirc_act = false;
+     uint32_t block_id = 0;
+@@ -1615,17 +1716,49 @@ netdev_tc_flow_put(struct netdev *netdev, struct match *match,
+         flower.key.tunnel.ttl = tnl->ip_ttl;
+         flower.key.tunnel.tp_src = tnl->tp_src;
+         flower.key.tunnel.tp_dst = tnl->tp_dst;
++
+         flower.mask.tunnel.ipv4.ipv4_src = tnl_mask->ip_src;
+         flower.mask.tunnel.ipv4.ipv4_dst = tnl_mask->ip_dst;
+         flower.mask.tunnel.ipv6.ipv6_src = tnl_mask->ipv6_src;
+         flower.mask.tunnel.ipv6.ipv6_dst = tnl_mask->ipv6_dst;
+         flower.mask.tunnel.tos = tnl_mask->ip_tos;
+         flower.mask.tunnel.ttl = tnl_mask->ip_ttl;
++        flower.mask.tunnel.tp_src = tnl_mask->tp_src;
++        /* XXX: We should be setting the mask from 'tnl_mask->tp_dst' here, but
++         * some hardware drivers (mlx5) doesn't support masked matches and will
++         * refuse to offload such flows keeping them in software path.
++         * Degrading the flow down to exact match for now as a workaround. */
++        flower.mask.tunnel.tp_dst = OVS_BE16_MAX;
+         flower.mask.tunnel.id = (tnl->flags & FLOW_TNL_F_KEY) ? tnl_mask->tun_id : 0;
+-        flower_match_to_tun_opt(&flower, tnl, tnl_mask);
++
++        memset(&tnl_mask->ip_src, 0, sizeof tnl_mask->ip_src);
++        memset(&tnl_mask->ip_dst, 0, sizeof tnl_mask->ip_dst);
++        memset(&tnl_mask->ipv6_src, 0, sizeof tnl_mask->ipv6_src);
++        memset(&tnl_mask->ipv6_dst, 0, sizeof tnl_mask->ipv6_dst);
++        memset(&tnl_mask->ip_tos, 0, sizeof tnl_mask->ip_tos);
++        memset(&tnl_mask->ip_ttl, 0, sizeof tnl_mask->ip_ttl);
++        memset(&tnl_mask->tp_src, 0, sizeof tnl_mask->tp_src);
++        memset(&tnl_mask->tp_dst, 0, sizeof tnl_mask->tp_dst);
++
++        memset(&tnl_mask->tun_id, 0, sizeof tnl_mask->tun_id);
++        tnl_mask->flags &= ~FLOW_TNL_F_KEY;
++
++        /* XXX: This is wrong!  We're ignoring DF and CSUM flags configuration
++         * requested by the user.  However, TC for now has no way to pass
++         * these flags in a flower key and their masks are set by default,
++         * meaning tunnel offloading will not work at all if not cleared.
++         * Keeping incorrect behavior for now. */
++        tnl_mask->flags &= ~(FLOW_TNL_F_DONT_FRAGMENT | FLOW_TNL_F_CSUM);
++
++        if (!strcmp(netdev_get_type(netdev), "geneve")) {
++            flower_match_to_tun_opt(&flower, tnl, tnl_mask);
++        }
+         flower.tunnel = true;
++    } else {
++        /* There is no tunnel metadata to match on, but there could be some
++         * mask bits set due to flow translation artifacts.  Clear them. */
++        memset(&mask->tunnel, 0, sizeof mask->tunnel);
+     }
+-    memset(&mask->tunnel, 0, sizeof mask->tunnel);
+ 
+     flower.key.eth_type = key->dl_type;
+     flower.mask.eth_type = mask->dl_type;
+@@ -1638,7 +1771,7 @@ netdev_tc_flow_put(struct netdev *netdev, struct match *match,
  
      if (mask->vlans[0].tpid && eth_type_vlan(key->vlans[0].tpid)) {
          flower.key.encap_eth_type[0] = flower.key.eth_type;
@@ -3179,7 +53373,16 @@ index 9845e8d3fe..262faf3c62 100644
          flower.key.eth_type = key->vlans[0].tpid;
          flower.mask.eth_type = mask->vlans[0].tpid;
      }
-@@ -1841,7 +1843,25 @@ netdev_tc_flow_put(struct netdev *netdev, struct match *match,
+@@ -1734,7 +1867,7 @@ netdev_tc_flow_put(struct netdev *netdev, struct match *match,
+             memset(&mask->arp_tha, 0, sizeof mask->arp_tha);
+     }
+ 
+-    if (is_ip_any(key)) {
++    if (is_ip_any(key) && !is_ipv6_fragment_and_masked(key, mask)) {
+         flower.key.ip_proto = key->nw_proto;
+         flower.mask.ip_proto = mask->nw_proto;
+         mask->nw_proto = 0;
+@@ -1841,7 +1974,25 @@ netdev_tc_flow_put(struct netdev *netdev, struct match *match,
                  VLOG_DBG_RL(&rl, "Can't find netdev for output port %d", port);
                  return ENODEV;
              }
@@ -3205,7 +53408,28 @@ index 9845e8d3fe..262faf3c62 100644
              action->out.ingress = is_internal_port(netdev_get_type(outdev));
              action->type = TC_ACT_OUTPUT;
              flower.action_count++;
-@@ -2015,9 +2035,7 @@ netdev_tc_flow_del(struct netdev *netdev OVS_UNUSED,
+@@ -1879,10 +2030,6 @@ netdev_tc_flow_put(struct netdev *netdev, struct match *match,
+             if (err) {
+                 return err;
+             }
+-            if (action->type == TC_ACT_ENCAP) {
+-                action->encap.tp_dst = info->tp_dst_port;
+-                action->encap.no_csum = !info->tunnel_csum_on;
+-            }
+         } else if (nl_attr_type(nla) == OVS_ACTION_ATTR_SET_MASKED) {
+             const struct nlattr *set = nl_attr_get(nla);
+             const size_t set_len = nl_attr_get_size(nla);
+@@ -1989,7 +2136,8 @@ netdev_tc_flow_get(struct netdev *netdev,
+     }
+ 
+     in_port = netdev_ifindex_to_odp_port(id.ifindex);
+-    parse_tc_flower_to_match(&flower, match, actions, stats, attrs, buf, false);
++    parse_tc_flower_to_match(netdev, &flower, match, actions,
++                             stats, attrs, buf, false);
+ 
+     match->wc.masks.in_port.odp_port = u32_to_odp(UINT32_MAX);
+     match->flow.in_port.odp_port = in_port;
+@@ -2015,9 +2163,7 @@ netdev_tc_flow_del(struct netdev *netdev OVS_UNUSED,
      if (stats) {
          memset(stats, 0, sizeof *stats);
          if (!tc_get_flower(&id, &flower)) {
@@ -3216,6 +53440,73 @@ index 9845e8d3fe..262faf3c62 100644
          }
      }
  
+diff --git a/lib/netdev-offload.h b/lib/netdev-offload.h
+index 8237a85ddb..93eb2df48b 100644
+--- a/lib/netdev-offload.h
++++ b/lib/netdev-offload.h
+@@ -65,9 +65,6 @@ struct netdev_flow_dump {
+ 
+ /* Flow offloading. */
+ struct offload_info {
+-    ovs_be16 tp_dst_port; /* Destination port for tunnel in SET action */
+-    uint8_t tunnel_csum_on; /* Tunnel header with checksum */
+-
+     bool recirc_id_shared_with_tc;  /* Indicates whever tc chains will be in
+                                      * sync with datapath recirc ids. */
+ 
+diff --git a/lib/netdev.c b/lib/netdev.c
+index 8305f6c427..ce0d4117ac 100644
+--- a/lib/netdev.c
++++ b/lib/netdev.c
+@@ -387,25 +387,30 @@ netdev_open(const char *name, const char *type, struct netdev **netdevp)
+     ovs_mutex_lock(&netdev_mutex);
+     netdev = shash_find_data(&netdev_shash, name);
+ 
+-    if (netdev &&
+-        type && type[0] && strcmp(type, netdev->netdev_class->type)) {
+-
+-        if (netdev->auto_classified) {
+-            /* If this device was first created without a classification type,
+-             * for example due to routing or tunneling code, and they keep a
+-             * reference, a "classified" call to open will fail. In this case
+-             * we remove the classless device, and re-add it below. We remove
+-             * the netdev from the shash, and change the sequence, so owners of
+-             * the old classless device can release/cleanup. */
+-            if (netdev->node) {
+-                shash_delete(&netdev_shash, netdev->node);
+-                netdev->node = NULL;
+-                netdev_change_seq_changed(netdev);
+-            }
++    if (netdev && type && type[0]) {
++        if (strcmp(type, netdev->netdev_class->type)) {
++
++            if (netdev->auto_classified) {
++                /* If this device was first created without a classification
++                 * type, for example due to routing or tunneling code, and they
++                 * keep a reference, a "classified" call to open will fail.
++                 * In this case we remove the classless device, and re-add it
++                 * below. We remove the netdev from the shash, and change the
++                 * sequence, so owners of the old classless device can
++                 * release/cleanup. */
++                if (netdev->node) {
++                    shash_delete(&netdev_shash, netdev->node);
++                    netdev->node = NULL;
++                    netdev_change_seq_changed(netdev);
++                }
+ 
+-            netdev = NULL;
+-        } else {
+-            error = EEXIST;
++                netdev = NULL;
++            } else {
++                error = EEXIST;
++            }
++        } else if (netdev->auto_classified) {
++            /* If netdev reopened with type "system", clear auto_classified. */
++            netdev->auto_classified = false;
+         }
+     }
+ 
 diff --git a/lib/odp-util.c b/lib/odp-util.c
 index 9a705cffa3..2d2a6893c6 100644
 --- a/lib/odp-util.c
@@ -3630,11 +53921,59 @@ index 0e536e8c27..661ac4e18a 100644
  void lldp_process_packet(struct lldp *cfg, const struct dp_packet *);
  void lldp_put_packet(struct lldp *lldp, struct dp_packet *packet,
                       const struct eth_addr eth_src);
+diff --git a/lib/ovs-numa.c b/lib/ovs-numa.c
+index 9e3fa54216..6a197772c1 100644
+--- a/lib/ovs-numa.c
++++ b/lib/ovs-numa.c
+@@ -387,6 +387,35 @@ ovs_numa_get_n_cores_on_numa(int numa_id)
+     return OVS_CORE_UNSPEC;
+ }
+ 
++/* Returns the largest core_id.
++ *
++ * Return OVS_CORE_UNSPEC, if core_id information is not found.
++ *
++ * Returning OVS_CORE_UNSPEC comes at a caveat.  The caller function
++ * must remember to check the return value of this callee function
++ * against OVS_CORE_UNSPEC.  OVS_CORE_UNSPEC is a positive integer
++ * INT_MAX, which the caller may interpret it as the largest
++ * core_id if it's not checking for it.
++ */
++unsigned
++ovs_numa_get_largest_core_id(void)
++{
++    struct cpu_core *core;
++    unsigned max_id = 0;
++
++    if (!found_numa_and_core) {
++        return OVS_CORE_UNSPEC;
++    }
++
++    HMAP_FOR_EACH (core, hmap_node, &all_cpu_cores) {
++        if (core->core_id > max_id) {
++            max_id = core->core_id;
++        }
++    }
++
++    return max_id;
++}
++
+ static struct ovs_numa_dump *
+ ovs_numa_dump_create(void)
+ {
 diff --git a/lib/ovs-numa.h b/lib/ovs-numa.h
-index ecc251a7ff..83bd10cca5 100644
+index ecc251a7ff..02c9e84cf5 100644
 --- a/lib/ovs-numa.h
 +++ b/lib/ovs-numa.h
-@@ -68,9 +68,9 @@ void ovs_numa_dump_destroy(struct ovs_numa_dump *);
+@@ -56,6 +56,7 @@ int ovs_numa_get_n_numas(void);
+ int ovs_numa_get_n_cores(void);
+ int ovs_numa_get_numa_id(unsigned core_id);
+ int ovs_numa_get_n_cores_on_numa(int numa_id);
++unsigned ovs_numa_get_largest_core_id(void);
+ struct ovs_numa_dump *ovs_numa_dump_cores_on_numa(int numa_id);
+ struct ovs_numa_dump *ovs_numa_dump_cores_with_cmask(const char *cmask);
+ struct ovs_numa_dump *ovs_numa_dump_n_cores_per_numa(int n);
+@@ -68,9 +69,9 @@ void ovs_numa_dump_destroy(struct ovs_numa_dump *);
  int ovs_numa_thread_setaffinity_core(unsigned core_id);
  
  #define FOR_EACH_CORE_ON_DUMP(ITER, DUMP)                    \
@@ -3769,6 +54108,46 @@ index 34ea163eef..d8ce3c00de 100644
  #ifdef  __cplusplus
  }
  #endif
+diff --git a/lib/ovs-thread.c b/lib/ovs-thread.c
+index 805cba6223..78ed3e9707 100644
+--- a/lib/ovs-thread.c
++++ b/lib/ovs-thread.c
+@@ -663,6 +663,23 @@ count_cpu_cores(void)
+     return n_cores > 0 ? n_cores : 0;
+ }
+ 
++/* Returns the total number of cores on the system, or 0 if the
++ * number cannot be determined. */
++int
++count_total_cores(void)
++{
++    long int n_cores;
++
++#ifndef _WIN32
++    n_cores = sysconf(_SC_NPROCESSORS_CONF);
++#else
++    n_cores = 0;
++    errno = ENOTSUP;
++#endif
++
++    return n_cores > 0 ? n_cores : 0;
++}
++
+ /* Returns 'true' if current thread is PMD thread. */
+ bool
+ thread_is_pmd(void)
+diff --git a/lib/ovs-thread.h b/lib/ovs-thread.h
+index 3b444ccdcc..aac5e19c99 100644
+--- a/lib/ovs-thread.h
++++ b/lib/ovs-thread.h
+@@ -522,6 +522,7 @@ bool may_fork(void);
+ /* Useful functions related to threading. */
+ 
+ int count_cpu_cores(void);
++int count_total_cores(void);
+ bool thread_is_pmd(void);
+ 
+ #endif /* ovs-thread.h */
 diff --git a/lib/ovsdb-cs.c b/lib/ovsdb-cs.c
 index dead31275d..9713c7dc7c 100644
 --- a/lib/ovsdb-cs.c
@@ -4101,7 +54480,7 @@ index 62c4621181..321043282e 100644
      }
      hmap_destroy(&list->hmap);
 diff --git a/lib/packets.c b/lib/packets.c
-index d0fba81766..874066e3c6 100644
+index d0fba81766..1dcd4a6fcd 100644
 --- a/lib/packets.c
 +++ b/lib/packets.c
 @@ -427,9 +427,9 @@ add_mpls(struct dp_packet *packet, ovs_be16 ethtype, ovs_be32 lse,
@@ -4116,6 +54495,66 @@ index d0fba81766..874066e3c6 100644
          packet->l2_5_ofs = 0;
          packet->packet_type = PACKET_TYPE_BE(OFPHTN_ETHERTYPE,
                                               ntohs(ethtype));
+@@ -1153,7 +1153,7 @@ packet_set_ipv4_addr(struct dp_packet *packet,
+  *
+  * This function assumes that L3 and L4 offsets are set in the packet. */
+ static bool
+-packet_rh_present(struct dp_packet *packet, uint8_t *nexthdr)
++packet_rh_present(struct dp_packet *packet, uint8_t *nexthdr, bool *first_frag)
+ {
+     const struct ovs_16aligned_ip6_hdr *nh;
+     size_t len;
+@@ -1203,6 +1203,8 @@ packet_rh_present(struct dp_packet *packet, uint8_t *nexthdr)
+             const struct ovs_16aligned_ip6_frag *frag_hdr
+                 = ALIGNED_CAST(struct ovs_16aligned_ip6_frag *, data);
+ 
++            *first_frag = !(frag_hdr->ip6f_offlg & IP6F_OFF_MASK) &&
++                           (frag_hdr->ip6f_offlg & IP6F_MORE_FRAG);
+             *nexthdr = frag_hdr->ip6f_nxt;
+             len = sizeof *frag_hdr;
+         } else if (*nexthdr == IPPROTO_ROUTING) {
+@@ -1333,18 +1335,20 @@ packet_set_ipv6(struct dp_packet *packet, const struct in6_addr *src,
+                 uint8_t key_hl)
+ {
+     struct ovs_16aligned_ip6_hdr *nh = dp_packet_l3(packet);
++    bool recalc_csum = true;
+     uint8_t proto = 0;
+     bool rh_present;
+ 
+-    rh_present = packet_rh_present(packet, &proto);
++    rh_present = packet_rh_present(packet, &proto, &recalc_csum);
+ 
+     if (memcmp(&nh->ip6_src, src, sizeof(ovs_be32[4]))) {
+-        packet_set_ipv6_addr(packet, proto, nh->ip6_src.be32, src, true);
++        packet_set_ipv6_addr(packet, proto, nh->ip6_src.be32,
++                             src, recalc_csum);
+     }
+ 
+     if (memcmp(&nh->ip6_dst, dst, sizeof(ovs_be32[4]))) {
+         packet_set_ipv6_addr(packet, proto, nh->ip6_dst.be32, dst,
+-                             !rh_present);
++                             !rh_present && recalc_csum);
+     }
+ 
+     packet_set_ipv6_tc(&nh->ip6_flow, key_tc);
+@@ -1705,7 +1709,7 @@ compose_ipv6(struct dp_packet *packet, uint8_t proto,
+              const struct in6_addr *src, const struct in6_addr *dst,
+              uint8_t key_tc, ovs_be32 key_fl, uint8_t key_hl, int size)
+ {
+-    struct ip6_hdr *nh;
++    struct ovs_16aligned_ip6_hdr *nh;
+     void *data;
+ 
+     nh = dp_packet_l3(packet);
+@@ -1843,7 +1847,7 @@ packet_put_ra_prefix_opt(struct dp_packet *b,
+                          const ovs_be128 prefix)
+ {
+     size_t prev_l4_size = dp_packet_l4_size(b);
+-    struct ip6_hdr *nh = dp_packet_l3(b);
++    struct ovs_16aligned_ip6_hdr *nh = dp_packet_l3(b);
+     nh->ip6_plen = htons(prev_l4_size + ND_PREFIX_OPT_LEN);
+ 
+     struct ovs_nd_prefix_opt *prefix_opt =
 diff --git a/lib/pcap-file.c b/lib/pcap-file.c
 index 41835f6f4d..3ed7ea4880 100644
 --- a/lib/pcap-file.c
@@ -4716,10 +55155,25 @@ index fcaddf10ad..71039e24f1 100644
  
  /* Attempts to guess the content type of a stream whose first few bytes were
 diff --git a/lib/tc.c b/lib/tc.c
-index adb2d3182a..bbb8c86f7b 100644
+index adb2d3182a..276cc54b35 100644
 --- a/lib/tc.c
 +++ b/lib/tc.c
-@@ -568,16 +568,17 @@ nl_parse_flower_vlan(struct nlattr **attrs, struct tc_flower *flower)
+@@ -395,8 +395,14 @@ static const struct nl_policy tca_flower_policy[] = {
+     [TCA_FLOWER_KEY_ENC_IPV6_DST_MASK] = { .type = NL_A_UNSPEC,
+                                            .min_len = sizeof(struct in6_addr),
+                                            .optional = true, },
++    [TCA_FLOWER_KEY_ENC_UDP_SRC_PORT] = { .type = NL_A_U16,
++                                          .optional = true, },
++    [TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK] = { .type = NL_A_U16,
++                                               .optional = true, },
+     [TCA_FLOWER_KEY_ENC_UDP_DST_PORT] = { .type = NL_A_U16,
+                                           .optional = true, },
++    [TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK] = { .type = NL_A_U16,
++                                               .optional = true, },
+     [TCA_FLOWER_KEY_FLAGS] = { .type = NL_A_BE32, .optional = true, },
+     [TCA_FLOWER_KEY_FLAGS_MASK] = { .type = NL_A_BE32, .optional = true, },
+     [TCA_FLOWER_KEY_IP_TTL] = { .type = NL_A_U8,
+@@ -568,16 +574,17 @@ nl_parse_flower_vlan(struct nlattr **attrs, struct tc_flower *flower)
  
      flower->key.encap_eth_type[0] =
          nl_attr_get_be16(attrs[TCA_FLOWER_KEY_ETH_TYPE]);
@@ -4739,7 +55193,7 @@ index adb2d3182a..bbb8c86f7b 100644
      }
  
      if (!attrs[TCA_FLOWER_KEY_VLAN_ETH_TYPE]) {
-@@ -590,17 +591,18 @@ nl_parse_flower_vlan(struct nlattr **attrs, struct tc_flower *flower)
+@@ -590,17 +597,18 @@ nl_parse_flower_vlan(struct nlattr **attrs, struct tc_flower *flower)
      }
  
      flower->key.encap_eth_type[1] = flower->key.encap_eth_type[0];
@@ -4760,7 +55214,57 @@ index adb2d3182a..bbb8c86f7b 100644
      }
  }
  
-@@ -937,24 +939,21 @@ nl_parse_flower_ip(struct nlattr **attrs, struct tc_flower *flower) {
+@@ -705,15 +713,17 @@ flower_tun_geneve_opt_check_len(struct tun_metadata *key,
+     const struct geneve_opt *opt, *opt_mask;
+     int len, cnt = 0;
+ 
++    if (key->present.len != mask->present.len) {
++        goto bad_length;
++    }
++
+     len = key->present.len;
+     while (len) {
+         opt = &key->opts.gnv[cnt];
+         opt_mask = &mask->opts.gnv[cnt];
+ 
+         if (opt->length != opt_mask->length) {
+-            VLOG_ERR_RL(&error_rl,
+-                        "failed to parse tun options; key/mask length differ");
+-            return EINVAL;
++            goto bad_length;
+         }
+ 
+         cnt += sizeof(struct geneve_opt) / 4 + opt->length;
+@@ -721,6 +731,11 @@ flower_tun_geneve_opt_check_len(struct tun_metadata *key,
+     }
+ 
+     return 0;
++
++bad_length:
++    VLOG_ERR_RL(&error_rl,
++                "failed to parse tun options; key/mask length differ");
++    return EINVAL;
+ }
+ 
+ static int
+@@ -758,7 +773,15 @@ nl_parse_flower_tunnel(struct nlattr **attrs, struct tc_flower *flower)
+         flower->key.tunnel.ipv6.ipv6_dst =
+             nl_attr_get_in6_addr(attrs[TCA_FLOWER_KEY_ENC_IPV6_DST]);
+     }
+-    if (attrs[TCA_FLOWER_KEY_ENC_UDP_DST_PORT]) {
++    if (attrs[TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK]) {
++        flower->mask.tunnel.tp_src =
++            nl_attr_get_be16(attrs[TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK]);
++        flower->key.tunnel.tp_src =
++            nl_attr_get_be16(attrs[TCA_FLOWER_KEY_ENC_UDP_SRC_PORT]);
++    }
++    if (attrs[TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK]) {
++        flower->mask.tunnel.tp_dst =
++            nl_attr_get_be16(attrs[TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK]);
+         flower->key.tunnel.tp_dst =
+             nl_attr_get_be16(attrs[TCA_FLOWER_KEY_ENC_UDP_DST_PORT]);
+     }
+@@ -937,24 +960,21 @@ nl_parse_flower_ip(struct nlattr **attrs, struct tc_flower *flower) {
              key->icmp_code =
                 nl_attr_get_u8(attrs[TCA_FLOWER_KEY_ICMPV4_CODE]);
              mask->icmp_code =
@@ -4790,7 +55294,7 @@ index adb2d3182a..bbb8c86f7b 100644
              mask->icmp_type =
                  nl_attr_get_u8(attrs[TCA_FLOWER_KEY_ICMPV6_TYPE_MASK]);
          }
-@@ -1006,14 +1005,14 @@ static const struct nl_policy pedit_policy[] = {
+@@ -1006,14 +1026,14 @@ static const struct nl_policy pedit_policy[] = {
  static int
  nl_parse_act_pedit(struct nlattr *options, struct tc_flower *flower)
  {
@@ -4808,7 +55312,7 @@ index adb2d3182a..bbb8c86f7b 100644
      size_t keys_ex_size, left;
      int type, i = 0, err;
  
-@@ -1092,7 +1091,6 @@ nl_parse_act_pedit(struct nlattr *options, struct tc_flower *flower)
+@@ -1092,7 +1112,6 @@ nl_parse_act_pedit(struct nlattr *options, struct tc_flower *flower)
          i++;
      }
  
@@ -4816,7 +55320,7 @@ index adb2d3182a..bbb8c86f7b 100644
      action->type = TC_ACT_PEDIT;
  
      return 0;
-@@ -1314,8 +1312,8 @@ nl_parse_act_gact(struct nlattr *options, struct tc_flower *flower)
+@@ -1314,8 +1333,8 @@ nl_parse_act_gact(struct nlattr *options, struct tc_flower *flower)
      struct nlattr *gact_attrs[ARRAY_SIZE(gact_policy)];
      const struct tc_gact *p;
      struct nlattr *gact_parms;
@@ -4826,7 +55330,7 @@ index adb2d3182a..bbb8c86f7b 100644
  
      if (!nl_parse_nested(options, gact_policy, gact_attrs,
                           ARRAY_SIZE(gact_policy))) {
-@@ -1335,8 +1333,9 @@ nl_parse_act_gact(struct nlattr *options, struct tc_flower *flower)
+@@ -1335,8 +1354,9 @@ nl_parse_act_gact(struct nlattr *options, struct tc_flower *flower)
          return EINVAL;
      }
  
@@ -4838,7 +55342,7 @@ index adb2d3182a..bbb8c86f7b 100644
  
      return 0;
  }
-@@ -1357,9 +1356,9 @@ nl_parse_act_mirred(struct nlattr *options, struct tc_flower *flower)
+@@ -1357,9 +1377,9 @@ nl_parse_act_mirred(struct nlattr *options, struct tc_flower *flower)
      struct nlattr *mirred_attrs[ARRAY_SIZE(mirred_policy)];
      const struct tc_mirred *m;
      const struct nlattr *mirred_parms;
@@ -4849,7 +55353,7 @@ index adb2d3182a..bbb8c86f7b 100644
  
      if (!nl_parse_nested(options, mirred_policy, mirred_attrs,
                           ARRAY_SIZE(mirred_policy))) {
-@@ -1387,8 +1386,8 @@ nl_parse_act_mirred(struct nlattr *options, struct tc_flower *flower)
+@@ -1387,8 +1407,8 @@ nl_parse_act_mirred(struct nlattr *options, struct tc_flower *flower)
      action->type = TC_ACT_OUTPUT;
  
      mirred_tm = mirred_attrs[TCA_MIRRED_TM];
@@ -4860,7 +55364,7 @@ index adb2d3182a..bbb8c86f7b 100644
  
      return 0;
  }
-@@ -1487,7 +1486,9 @@ nl_parse_act_ct(struct nlattr *options, struct tc_flower *flower)
+@@ -1487,7 +1507,9 @@ nl_parse_act_ct(struct nlattr *options, struct tc_flower *flower)
                  if (ipv4_max) {
                      ovs_be32 addr = nl_attr_get_be32(ipv4_max);
  
@@ -4871,7 +55375,7 @@ index adb2d3182a..bbb8c86f7b 100644
                  }
              } else if (ipv6_min) {
                  action->ct.range.ip_family = AF_INET6;
-@@ -1496,7 +1497,9 @@ nl_parse_act_ct(struct nlattr *options, struct tc_flower *flower)
+@@ -1496,7 +1518,9 @@ nl_parse_act_ct(struct nlattr *options, struct tc_flower *flower)
                  if (ipv6_max) {
                      struct in6_addr addr = nl_attr_get_in6_addr(ipv6_max);
  
@@ -4882,7 +55386,7 @@ index adb2d3182a..bbb8c86f7b 100644
                  }
              }
  
-@@ -1504,6 +1507,10 @@ nl_parse_act_ct(struct nlattr *options, struct tc_flower *flower)
+@@ -1504,6 +1528,10 @@ nl_parse_act_ct(struct nlattr *options, struct tc_flower *flower)
                  action->ct.range.port.min = nl_attr_get_be16(port_min);
                  if (port_max) {
                      action->ct.range.port.max = nl_attr_get_be16(port_max);
@@ -4893,7 +55397,7 @@ index adb2d3182a..bbb8c86f7b 100644
                  }
              }
          }
-@@ -1702,6 +1709,9 @@ static const struct nl_policy stats_policy[] = {
+@@ -1702,6 +1730,9 @@ static const struct nl_policy stats_policy[] = {
      [TCA_STATS_BASIC] = { .type = NL_A_UNSPEC,
                            .min_len = sizeof(struct gnet_stats_basic),
                            .optional = false, },
@@ -4903,7 +55407,7 @@ index adb2d3182a..bbb8c86f7b 100644
  };
  
  static int
-@@ -1714,8 +1724,9 @@ nl_parse_single_action(struct nlattr *action, struct tc_flower *flower,
+@@ -1714,8 +1745,9 @@ nl_parse_single_action(struct nlattr *action, struct tc_flower *flower,
      const char *act_kind;
      struct nlattr *action_attrs[ARRAY_SIZE(act_policy)];
      struct nlattr *stats_attrs[ARRAY_SIZE(stats_policy)];
@@ -4915,7 +55419,7 @@ index adb2d3182a..bbb8c86f7b 100644
      int err = 0;
  
      if (!nl_parse_nested(action, act_policy, action_attrs,
-@@ -1771,10 +1782,30 @@ nl_parse_single_action(struct nlattr *action, struct tc_flower *flower,
+@@ -1771,10 +1803,30 @@ nl_parse_single_action(struct nlattr *action, struct tc_flower *flower,
          return EPROTO;
      }
  
@@ -4950,7 +55454,7 @@ index adb2d3182a..bbb8c86f7b 100644
      }
  
      return 0;
-@@ -2399,14 +2430,14 @@ nl_msg_put_act_flags(struct ofpbuf *request) {
+@@ -2399,14 +2451,14 @@ nl_msg_put_act_flags(struct ofpbuf *request) {
   * first_word_mask/last_word_mask - the mask to use for the first/last read
   * (as we read entire words). */
  static void
@@ -4968,7 +55472,7 @@ index adb2d3182a..bbb8c86f7b 100644
  
      max_offset = m->offset + m->size;
      start_offset = ROUND_DOWN(m->offset, 4);
-@@ -2473,7 +2504,8 @@ csum_update_flag(struct tc_flower *flower,
+@@ -2473,7 +2525,8 @@ csum_update_flag(struct tc_flower *flower,
  
  static int
  nl_msg_put_flower_rewrite_pedits(struct ofpbuf *request,
@@ -4978,7 +55482,7 @@ index adb2d3182a..bbb8c86f7b 100644
  {
      struct {
          struct tc_pedit sel;
-@@ -2497,7 +2529,7 @@ nl_msg_put_flower_rewrite_pedits(struct ofpbuf *request,
+@@ -2497,12 +2550,12 @@ nl_msg_put_flower_rewrite_pedits(struct ofpbuf *request,
              continue;
          }
  
@@ -4987,7 +55491,14 @@ index adb2d3182a..bbb8c86f7b 100644
                       &first_word_mask, &mask, &data);
  
          for (j = 0; j < cnt; j++,  mask++, data++, cur_offset += 4) {
-@@ -2556,6 +2588,29 @@ nl_msg_put_flower_acts_release(struct ofpbuf *request, uint16_t act_index)
+-            ovs_be32 mask_word = *mask;
+-            ovs_be32 data_word = *data;
++            ovs_be32 mask_word = get_unaligned_be32(mask);
++            ovs_be32 data_word = get_unaligned_be32(data);
+ 
+             if (j == 0) {
+                 mask_word &= first_word_mask;
+@@ -2556,6 +2609,29 @@ nl_msg_put_flower_acts_release(struct ofpbuf *request, uint16_t act_index)
      nl_msg_end_nested(request, act_offset);
  }
  
@@ -5017,7 +55528,7 @@ index adb2d3182a..bbb8c86f7b 100644
  static int
  nl_msg_put_flower_acts(struct ofpbuf *request, struct tc_flower *flower)
  {
-@@ -2572,20 +2627,22 @@ nl_msg_put_flower_acts(struct ofpbuf *request, struct tc_flower *flower)
+@@ -2572,20 +2648,22 @@ nl_msg_put_flower_acts(struct ofpbuf *request, struct tc_flower *flower)
  
          action = flower->actions;
          for (i = 0; i < flower->action_count; i++, action++) {
@@ -5046,7 +55557,42 @@ index adb2d3182a..bbb8c86f7b 100644
                  }
              }
              break;
-@@ -2914,13 +2971,13 @@ nl_msg_put_flower_options(struct ofpbuf *request, struct tc_flower *flower)
+@@ -2792,13 +2870,16 @@ nl_msg_put_flower_tunnel(struct ofpbuf *request, struct tc_flower *flower)
+     struct in6_addr *ipv6_dst_mask = &flower->mask.tunnel.ipv6.ipv6_dst;
+     struct in6_addr *ipv6_src = &flower->key.tunnel.ipv6.ipv6_src;
+     struct in6_addr *ipv6_dst = &flower->key.tunnel.ipv6.ipv6_dst;
+-    ovs_be16 tp_dst = flower->key.tunnel.tp_dst;
+     ovs_be32 id = be64_to_be32(flower->key.tunnel.id);
++    ovs_be16 tp_src = flower->key.tunnel.tp_src;
++    ovs_be16 tp_dst = flower->key.tunnel.tp_dst;
+     uint8_t tos = flower->key.tunnel.tos;
+     uint8_t ttl = flower->key.tunnel.ttl;
+     uint8_t tos_mask = flower->mask.tunnel.tos;
+     uint8_t ttl_mask = flower->mask.tunnel.ttl;
+     ovs_be64 id_mask = flower->mask.tunnel.id;
++    ovs_be16 tp_src_mask = flower->mask.tunnel.tp_src;
++    ovs_be16 tp_dst_mask = flower->mask.tunnel.tp_dst;
+ 
+     if (ipv4_dst_mask || ipv4_src_mask) {
+         nl_msg_put_be32(request, TCA_FLOWER_KEY_ENC_IPV4_DST_MASK,
+@@ -2824,8 +2905,15 @@ nl_msg_put_flower_tunnel(struct ofpbuf *request, struct tc_flower *flower)
+         nl_msg_put_u8(request, TCA_FLOWER_KEY_ENC_IP_TTL, ttl);
+         nl_msg_put_u8(request, TCA_FLOWER_KEY_ENC_IP_TTL_MASK, ttl_mask);
+     }
+-    if (tp_dst) {
++    if (tp_src_mask) {
++        nl_msg_put_be16(request, TCA_FLOWER_KEY_ENC_UDP_SRC_PORT, tp_src);
++        nl_msg_put_be16(request, TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK,
++                        tp_src_mask);
++    }
++    if (tp_dst_mask) {
+         nl_msg_put_be16(request, TCA_FLOWER_KEY_ENC_UDP_DST_PORT, tp_dst);
++        nl_msg_put_be16(request, TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK,
++                        tp_dst_mask);
+     }
+     if (id_mask) {
+         nl_msg_put_be32(request, TCA_FLOWER_KEY_ENC_KEY_ID, id);
+@@ -2914,13 +3002,13 @@ nl_msg_put_flower_options(struct ofpbuf *request, struct tc_flower *flower)
              FLOWER_PUT_MASKED_VALUE(icmp_code, TCA_FLOWER_KEY_ICMPV6_CODE);
              FLOWER_PUT_MASKED_VALUE(icmp_type, TCA_FLOWER_KEY_ICMPV6_TYPE);
          }
@@ -5065,7 +55611,7 @@ index adb2d3182a..bbb8c86f7b 100644
      if (host_eth_type == ETH_P_IP) {
              FLOWER_PUT_MASKED_VALUE(ipv4.ipv4_src, TCA_FLOWER_KEY_IPV4_SRC);
              FLOWER_PUT_MASKED_VALUE(ipv4.ipv4_dst, TCA_FLOWER_KEY_IPV4_DST);
-@@ -2993,12 +3050,79 @@ nl_msg_put_flower_options(struct ofpbuf *request, struct tc_flower *flower)
+@@ -2993,12 +3081,79 @@ nl_msg_put_flower_options(struct ofpbuf *request, struct tc_flower *flower)
      return 0;
  }
  
@@ -5086,17 +55632,17 @@ index adb2d3182a..bbb8c86f7b 100644
 +        key_b[i] = ((uint8_t *) &b->key)[i] & mask_b;
 +    }
 +    ds_put_cstr(&s, "\nExpected Mask:\n");
-+    ds_put_hex(&s, &a->mask, sizeof a->mask);
++    ds_put_sparse_hex_dump(&s, &a->mask, sizeof a->mask, 0, false);
 +    ds_put_cstr(&s, "\nReceived Mask:\n");
-+    ds_put_hex(&s, &b->mask, sizeof b->mask);
++    ds_put_sparse_hex_dump(&s, &b->mask, sizeof b->mask, 0, false);
 +    ds_put_cstr(&s, "\nExpected Key:\n");
-+    ds_put_hex(&s, &a->key, sizeof a->key);
++    ds_put_sparse_hex_dump(&s, &a->key, sizeof a->key, 0, false);
 +    ds_put_cstr(&s, "\nReceived Key:\n");
-+    ds_put_hex(&s, &b->key, sizeof b->key);
++    ds_put_sparse_hex_dump(&s, &b->key, sizeof b->key, 0, false);
 +    ds_put_cstr(&s, "\nExpected Masked Key:\n");
-+    ds_put_hex(&s, key_a, sizeof key_a);
++    ds_put_sparse_hex_dump(&s, key_a, sizeof key_a, 0, false);
 +    ds_put_cstr(&s, "\nReceived Masked Key:\n");
-+    ds_put_hex(&s, key_b, sizeof key_b);
++    ds_put_sparse_hex_dump(&s, key_b, sizeof key_b, 0, false);
 +
 +    if (a->action_count != b->action_count) {
 +        /* If action count is not equal, we print all actions to see which
@@ -5146,7 +55692,7 @@ index adb2d3182a..bbb8c86f7b 100644
          return false;
      }
  
-@@ -3011,8 +3135,8 @@ cmp_tc_flower_match_action(const struct tc_flower *a,
+@@ -3011,8 +3166,8 @@ cmp_tc_flower_match_action(const struct tc_flower *a,
          uint8_t key_b = ((uint8_t *)&b->key)[i] & mask;
  
          if (key_a != key_b) {
@@ -5157,7 +55703,7 @@ index adb2d3182a..bbb8c86f7b 100644
              return false;
          }
      }
-@@ -3022,14 +3146,15 @@ cmp_tc_flower_match_action(const struct tc_flower *a,
+@@ -3022,14 +3177,15 @@ cmp_tc_flower_match_action(const struct tc_flower *a,
      const struct tc_action *action_b = b->actions;
  
      if (a->action_count != b->action_count) {
@@ -5348,11 +55894,146 @@ index 7415e6291f..b556762277 100644
          ovs_list_remove(&b->list_node);
          int error = vconn_send_block(vconn, b);
          if (error) {
+diff --git a/m4/ax_func_posix_memalign.m4 b/m4/ax_func_posix_memalign.m4
+index bd60adcbc8..2442ceca74 100644
+--- a/m4/ax_func_posix_memalign.m4
++++ b/m4/ax_func_posix_memalign.m4
+@@ -1,5 +1,5 @@
+ # ===========================================================================
+-#  http://www.gnu.org/software/autoconf-archive/ax_func_posix_memalign.html
++#  https://www.gnu.org/software/autoconf-archive/ax_func_posix_memalign.html
+ # ===========================================================================
+ #
+ # SYNOPSIS
+@@ -22,12 +22,12 @@
+ #   and this notice are preserved. This file is offered as-is, without any
+ #   warranty.
+ 
+-#serial 7
++#serial 9
+ 
+ AC_DEFUN([AX_FUNC_POSIX_MEMALIGN],
+ [AC_CACHE_CHECK([for working posix_memalign],
+   [ax_cv_func_posix_memalign_works],
+-  [AC_TRY_RUN([
++  [AC_RUN_IFELSE([AC_LANG_SOURCE([[
+ #include <stdlib.h>
+ 
+ int
+@@ -39,7 +39,7 @@ main ()
+    * the size word. */
+   exit (posix_memalign (&buffer, sizeof(void *), 123) != 0);
+ }
+-    ],
++    ]])],
+     [ax_cv_func_posix_memalign_works=yes],
+     [ax_cv_func_posix_memalign_works=no],
+     [ax_cv_func_posix_memalign_works=no])])
+diff --git a/m4/openvswitch.m4 b/m4/openvswitch.m4
+index 4c3bace6ef..21808483e9 100644
+--- a/m4/openvswitch.m4
++++ b/m4/openvswitch.m4
+@@ -21,7 +21,7 @@ AC_DEFUN([OVS_CHECK_COVERAGE],
+   [AC_REQUIRE([AC_PROG_CC])
+    AC_ARG_ENABLE(
+      [coverage],
+-     [AC_HELP_STRING([--enable-coverage],
++     [AS_HELP_STRING([--enable-coverage],
+                      [Enable gcov coverage tool.])],
+      [case "${enableval}" in
+         (yes) coverage=true ;;
+@@ -50,7 +50,7 @@ dnl Checks for --enable-ndebug and defines NDEBUG if it is specified.
+ AC_DEFUN([OVS_CHECK_NDEBUG],
+   [AC_ARG_ENABLE(
+      [ndebug],
+-     [AC_HELP_STRING([--enable-ndebug],
++     [AS_HELP_STRING([--enable-ndebug],
+                      [Disable debugging features for max performance])],
+      [case "${enableval}" in
+         (yes) ndebug=true ;;
+@@ -64,7 +64,7 @@ dnl Checks for --enable-usdt-probes and defines HAVE_USDT if it is specified.
+ AC_DEFUN([OVS_CHECK_USDT], [
+   AC_ARG_ENABLE(
+     [usdt-probes],
+-    [AC_HELP_STRING([--enable-usdt-probes],
++    [AS_HELP_STRING([--enable-usdt-probes],
+                     [Enable User Statically Defined Tracing (USDT) probes])],
+     [case "${enableval}" in
+        (yes) usdt=true ;;
+@@ -227,7 +227,7 @@ dnl Checks for libcap-ng.
+ AC_DEFUN([OVS_CHECK_LIBCAPNG],
+   [AC_ARG_ENABLE(
+      [libcapng],
+-     [AC_HELP_STRING([--disable-libcapng], [Disable Linux capability support])],
++     [AS_HELP_STRING([--disable-libcapng], [Disable Linux capability support])],
+      [case "${enableval}" in
+         (yes) libcapng=true ;;
+         (no)  libcapng=false ;;
+@@ -263,7 +263,7 @@ dnl Checks for OpenSSL.
+ AC_DEFUN([OVS_CHECK_OPENSSL],
+   [AC_ARG_ENABLE(
+      [ssl],
+-     [AC_HELP_STRING([--disable-ssl], [Disable OpenSSL support])],
++     [AS_HELP_STRING([--disable-ssl], [Disable OpenSSL support])],
+      [case "${enableval}" in
+         (yes) ssl=true ;;
+         (no)  ssl=false ;;
+@@ -320,7 +320,7 @@ dnl Checks for the directory in which to store the PKI.
+ AC_DEFUN([OVS_CHECK_PKIDIR],
+   [AC_ARG_WITH(
+      [pkidir],
+-     AC_HELP_STRING([--with-pkidir=DIR],
++     AS_HELP_STRING([--with-pkidir=DIR],
+                     [PKI hierarchy directory [[LOCALSTATEDIR/lib/openvswitch/pki]]]),
+      [PKIDIR=$withval],
+      [PKIDIR='${localstatedir}/lib/openvswitch/pki'])
+@@ -330,7 +330,7 @@ dnl Checks for the directory in which to store pidfiles.
+ AC_DEFUN([OVS_CHECK_RUNDIR],
+   [AC_ARG_WITH(
+      [rundir],
+-     AC_HELP_STRING([--with-rundir=DIR],
++     AS_HELP_STRING([--with-rundir=DIR],
+                     [directory used for pidfiles
+                     [[LOCALSTATEDIR/run/openvswitch]]]),
+      [RUNDIR=$withval],
+@@ -341,7 +341,7 @@ dnl Checks for the directory in which to store logs.
+ AC_DEFUN([OVS_CHECK_LOGDIR],
+   [AC_ARG_WITH(
+      [logdir],
+-     AC_HELP_STRING([--with-logdir=DIR],
++     AS_HELP_STRING([--with-logdir=DIR],
+                     [directory used for logs [[LOCALSTATEDIR/log/PACKAGE]]]),
+      [LOGDIR=$withval],
+      [LOGDIR='${localstatedir}/log/${PACKAGE}'])
+@@ -351,7 +351,7 @@ dnl Checks for the directory in which to store the Open vSwitch database.
+ AC_DEFUN([OVS_CHECK_DBDIR],
+   [AC_ARG_WITH(
+      [dbdir],
+-     AC_HELP_STRING([--with-dbdir=DIR],
++     AS_HELP_STRING([--with-dbdir=DIR],
+                     [directory used for conf.db [[SYSCONFDIR/PACKAGE]]]),
+      [DBDIR=$withval],
+      [DBDIR='${sysconfdir}/${PACKAGE}'])
 diff --git a/ofproto/bond.c b/ofproto/bond.c
-index cdfdf0b9d8..845f69e21d 100644
+index cdfdf0b9d8..6ecd6e1c9f 100644
 --- a/ofproto/bond.c
 +++ b/ofproto/bond.c
-@@ -338,7 +338,7 @@ static void
+@@ -185,10 +185,14 @@ static struct bond_member *choose_output_member(const struct bond *,
+                                                 uint16_t vlan)
+     OVS_REQ_RDLOCK(rwlock);
+ static void update_recirc_rules__(struct bond *);
++static bool bond_may_recirc(const struct bond *);
++static void bond_update_post_recirc_rules__(struct bond *, bool force)
++    OVS_REQ_WRLOCK(rwlock);
+ static bool bond_is_falling_back_to_ab(const struct bond *);
+ static void bond_add_lb_output_buckets(const struct bond *);
+ static void bond_del_lb_output_buckets(const struct bond *);
+ 
++
+ /* Attempts to parse 's' as the name of a bond balancing mode.  If successful,
+  * stores the mode in '*balance' and returns true.  Otherwise returns false
+  * without modifying '*balance'. */
+@@ -338,7 +342,7 @@ static void
  update_recirc_rules__(struct bond *bond)
  {
      struct match match;
@@ -5361,7 +56042,7 @@ index cdfdf0b9d8..845f69e21d 100644
      uint64_t ofpacts_stub[128 / 8];
      struct ofpbuf ofpacts;
      int i;
-@@ -372,7 +372,7 @@ update_recirc_rules__(struct bond *bond)
+@@ -372,7 +376,7 @@ update_recirc_rules__(struct bond *bond)
  
      ofpbuf_use_stub(&ofpacts, ofpacts_stub, sizeof ofpacts_stub);
  
@@ -5370,7 +56051,62 @@ index cdfdf0b9d8..845f69e21d 100644
          int error;
          switch (pr_op->op) {
          case ADD:
-@@ -1258,7 +1258,7 @@ insert_bal(struct ovs_list *bals, struct bond_member *member)
+@@ -510,6 +514,12 @@ bond_reconfigure(struct bond *bond, const struct bond_settings *s)
+         bond_entry_reset(bond);
+     }
+ 
++    if (bond->ofproto->backer->rt_support.odp.recirc
++        && bond_may_recirc(bond)) {
++        /* Update rules to reflect possible recirc_id changes. */
++        update_recirc_rules(bond);
++    }
++
+     ovs_rwlock_unlock(&rwlock);
+     return revalidate;
+ }
+@@ -723,6 +733,12 @@ bond_run(struct bond *bond, enum lacp_status lacp_status)
+         bond_choose_active_member(bond);
+     }
+ 
++    if (bond->ofproto->backer->rt_support.odp.recirc
++        && bond_may_recirc(bond)) {
++        /* Update rules to reflect possible link state changes. */
++        bond_update_post_recirc_rules__(bond, false);
++    }
++
+     revalidate = bond->bond_revalidate;
+     bond->bond_revalidate = false;
+     ovs_rwlock_unlock(&rwlock);
+@@ -1038,7 +1054,7 @@ bond_may_recirc(const struct bond *bond)
+ }
+ 
+ static void
+-bond_update_post_recirc_rules__(struct bond* bond, const bool force)
++bond_update_post_recirc_rules__(struct bond* bond, bool force)
+     OVS_REQ_WRLOCK(rwlock)
+ {
+    struct bond_entry *e;
+@@ -1086,6 +1102,19 @@ bond_update_post_recirc_rules(struct bond *bond, uint32_t *recirc_id,
+     }
+ }
+ 
++void
++bond_get_recirc_id_and_hash_basis(struct bond *bond, uint32_t *recirc_id,
++                                  uint32_t *hash_basis)
++{
++    ovs_rwlock_rdlock(&rwlock);
++    if (bond_may_recirc(bond)) {
++        *recirc_id = bond->recirc_id;
++        *hash_basis = bond->basis;
++    } else {
++        *recirc_id = *hash_basis = 0;
++    }
++    ovs_rwlock_unlock(&rwlock);
++}
+ 
+ /* Rebalancing. */
+ 
+@@ -1258,7 +1287,7 @@ insert_bal(struct ovs_list *bals, struct bond_member *member)
              break;
          }
      }
@@ -5379,6 +56115,20 @@ index cdfdf0b9d8..845f69e21d 100644
  }
  
  /* Removes 'member' from its current list and then inserts it into 'bals' so
+diff --git a/ofproto/bond.h b/ofproto/bond.h
+index 1683ec8781..e7f7ea7a95 100644
+--- a/ofproto/bond.h
++++ b/ofproto/bond.h
+@@ -128,6 +128,9 @@ void bond_rebalance(struct bond *);
+ void bond_update_post_recirc_rules(struct bond *, uint32_t *recirc_id,
+                                    uint32_t *hash_basis);
+ 
++void bond_get_recirc_id_and_hash_basis(struct bond *, uint32_t *recirc_id,
++                                       uint32_t *hash_basis);
++
+ bool bond_use_lb_output_action(const struct bond *bond);
+ 
+ #endif /* bond.h */
 diff --git a/ofproto/connmgr.c b/ofproto/connmgr.c
 index fa8f6cd0e8..172a58cfb7 100644
 --- a/ofproto/connmgr.c
@@ -5483,6 +56233,16 @@ index 82d8dfa147..3992251f5f 100644
          switch (rule->op) {
          case ADD:
              ofproto_add_flow(ib->ofproto, &rule->match, rule->priority,
+diff --git a/ofproto/libofproto.pc.in b/ofproto/libofproto.pc.in
+index 2740712505..49894fb695 100644
+--- a/ofproto/libofproto.pc.in
++++ b/ofproto/libofproto.pc.in
+@@ -8,4 +8,4 @@ Description: OpenFlow library of Open vSwitch
+ Version: @VERSION@
+ Libs: -L${libdir} -lofproto
+ Libs.private: @LIBS@
+-Cflags: -I${includedir}/openvswitch
++Cflags: -I${includedir}
 diff --git a/ofproto/netflow.c b/ofproto/netflow.c
 index ed58de17de..aad9f9c77a 100644
 --- a/ofproto/netflow.c
@@ -5767,6 +56527,64 @@ index 78a54c715d..109940ad2a 100644
              ovs_list_remove(&node->node);
              oftrace_node_destroy(node);
          }
+diff --git a/ofproto/ofproto-dpif-upcall.c b/ofproto/ofproto-dpif-upcall.c
+index 57f94df544..fe47090584 100644
+--- a/ofproto/ofproto-dpif-upcall.c
++++ b/ofproto/ofproto-dpif-upcall.c
+@@ -362,6 +362,10 @@ static void upcall_unixctl_dump_wait(struct unixctl_conn *conn, int argc,
+                                      const char *argv[], void *aux);
+ static void upcall_unixctl_purge(struct unixctl_conn *conn, int argc,
+                                  const char *argv[], void *aux);
++static void upcall_unixctl_pause(struct unixctl_conn *conn, int argc,
++                                 const char *argv[], void *aux);
++static void upcall_unixctl_resume(struct unixctl_conn *conn, int argc,
++                                  const char *argv[], void *aux);
+ 
+ static struct udpif_key *ukey_create_from_upcall(struct upcall *,
+                                                  struct flow_wildcards *);
+@@ -434,6 +438,10 @@ udpif_init(void)
+                                  upcall_unixctl_dump_wait, NULL);
+         unixctl_command_register("revalidator/purge", "", 0, 0,
+                                  upcall_unixctl_purge, NULL);
++        unixctl_command_register("revalidator/pause", NULL, 0, 0,
++                                 upcall_unixctl_pause, NULL);
++        unixctl_command_register("revalidator/resume", NULL, 0, 0,
++                                 upcall_unixctl_resume, NULL);
+         ovsthread_once_done(&once);
+     }
+ }
+@@ -3099,6 +3107,31 @@ upcall_unixctl_purge(struct unixctl_conn *conn, int argc OVS_UNUSED,
+     unixctl_command_reply(conn, "");
+ }
+ 
++static void
++upcall_unixctl_pause(struct unixctl_conn *conn, int argc OVS_UNUSED,
++                     const char *argv[] OVS_UNUSED, void *aux OVS_UNUSED)
++{
++    struct udpif *udpif;
++
++    LIST_FOR_EACH (udpif, list_node, &all_udpifs) {
++        udpif_pause_revalidators(udpif);
++    }
++    unixctl_command_reply(conn, "");
++}
++
++static void
++upcall_unixctl_resume(struct unixctl_conn *conn, int argc OVS_UNUSED,
++                      const char *argv[] OVS_UNUSED, void *aux OVS_UNUSED)
++{
++    struct udpif *udpif;
++
++    LIST_FOR_EACH (udpif, list_node, &all_udpifs) {
++        udpif_resume_revalidators(udpif);
++    }
++    unixctl_command_reply(conn, "");
++}
++
++
+ /* Flows are sorted in the following order:
+  * netdev, flow state (offloaded/kernel path), flow_pps_rate.
+  */
 diff --git a/ofproto/ofproto-dpif-xlate-cache.c b/ofproto/ofproto-dpif-xlate-cache.c
 index dcc91cb380..9224ee2e6d 100644
 --- a/ofproto/ofproto-dpif-xlate-cache.c
@@ -5804,7 +56622,7 @@ index 114aff8ea3..0fc6d2ea60 100644
      enum xc_type type;
      union {
 diff --git a/ofproto/ofproto-dpif-xlate.c b/ofproto/ofproto-dpif-xlate.c
-index 578cbfe581..7716c22f49 100644
+index 578cbfe581..e804c4d887 100644
 --- a/ofproto/ofproto-dpif-xlate.c
 +++ b/ofproto/ofproto-dpif-xlate.c
 @@ -865,7 +865,7 @@ xlate_xbridge_init(struct xlate_cfg *xcfg, struct xbridge *xbridge)
@@ -5855,6 +56673,26 @@ index 578cbfe581..7716c22f49 100644
          xlate_xbundle_remove(xcfg, xbundle);
      }
  
+@@ -1515,7 +1515,7 @@ xlate_lookup_ofproto_(const struct dpif_backer *backer,
+         if (OVS_UNLIKELY(!recirc_id_node)) {
+             if (errorp) {
+                 *errorp = xasprintf("no recirculation data for recirc_id "
+-                                    "%"PRIu32, flow->recirc_id);
++                                    "%#"PRIx32, flow->recirc_id);
+             }
+             return NULL;
+         }
+@@ -1556,8 +1556,8 @@ xlate_lookup_ofproto_(const struct dpif_backer *backer,
+         if (errorp) {
+             *errorp = (tnl_port_should_receive(flow)
+                        ? xstrdup("no OpenFlow tunnel port for this packet")
+-                       : xasprintf("no OpenFlow tunnel port for datapath "
+-                                   "port %"PRIu32, flow->in_port.odp_port));
++                       : xasprintf("no OpenFlow port for datapath port "
++                                   "%"PRIu32, flow->in_port.odp_port));
+         }
+         return NULL;
+     }
 @@ -1639,7 +1639,7 @@ xbridge_lookup(struct xlate_cfg *xcfg, const struct ofproto_dpif *ofproto)
  
      xbridges = &xcfg->xbridges;
@@ -5906,7 +56744,29 @@ index 578cbfe581..7716c22f49 100644
  
  
          /* If this mirror selects on the basis of VLAN, and it does not select
-@@ -3015,7 +3037,7 @@ xlate_normal(struct xlate_ctx *ctx)
+@@ -2444,9 +2466,18 @@ output_normal(struct xlate_ctx *ctx, const struct xbundle *out_xbundle,
+             /* In case recirculation is not actually in use, 'xr.recirc_id'
+              * will be set to '0', since a valid 'recirc_id' can
+              * not be zero.  */
+-            bond_update_post_recirc_rules(out_xbundle->bond,
+-                                          &xr.recirc_id,
+-                                          &xr.hash_basis);
++            if (ctx->xin->allow_side_effects) {
++                bond_update_post_recirc_rules(out_xbundle->bond,
++                                              &xr.recirc_id,
++                                              &xr.hash_basis);
++            } else {
++                /* If side effects are not allowed, only getting the bond
++                 * configuration.  Rule updates will be handled by the
++                 * main thread later. */
++                bond_get_recirc_id_and_hash_basis(out_xbundle->bond,
++                                                  &xr.recirc_id,
++                                                  &xr.hash_basis);
++            }
+             if (xr.recirc_id) {
+                 /* Use recirculation instead of output. */
+                 use_recirc = true;
+@@ -3015,7 +3046,7 @@ xlate_normal(struct xlate_ctx *ctx)
      bool is_grat_arp = is_gratuitous_arp(flow, wc);
      if (ctx->xin->allow_side_effects
          && flow->packet_type == htonl(PT_ETH)
@@ -5915,7 +56775,7 @@ index 578cbfe581..7716c22f49 100644
      ) {
          update_learning_table(ctx, in_xbundle, flow->dl_src, vlan,
                                is_grat_arp);
-@@ -3024,12 +3046,14 @@ xlate_normal(struct xlate_ctx *ctx)
+@@ -3024,12 +3055,14 @@ xlate_normal(struct xlate_ctx *ctx)
          struct xc_entry *entry;
  
          /* Save just enough info to update mac learning table later. */
@@ -5936,7 +56796,7 @@ index 578cbfe581..7716c22f49 100644
      }
  
      /* Determine output bundle. */
-@@ -3048,7 +3072,6 @@ xlate_normal(struct xlate_ctx *ctx)
+@@ -3048,7 +3081,6 @@ xlate_normal(struct xlate_ctx *ctx)
               */
              ctx->xout->slow |= SLOW_ACTION;
  
@@ -5944,7 +56804,7 @@ index 578cbfe581..7716c22f49 100644
              if (mcast_snooping_is_membership(flow->tp_src) ||
                  mcast_snooping_is_query(flow->tp_src)) {
                  if (ctx->xin->allow_side_effects && ctx->xin->packet) {
-@@ -3523,6 +3546,9 @@ propagate_tunnel_data_to_flow__(struct flow *dst_flow,
+@@ -3523,6 +3555,9 @@ propagate_tunnel_data_to_flow__(struct flow *dst_flow,
      dst_flow->dl_dst = dmac;
      dst_flow->dl_src = smac;
  
@@ -5954,7 +56814,7 @@ index 578cbfe581..7716c22f49 100644
      dst_flow->packet_type = htonl(PT_ETH);
      dst_flow->nw_dst = src_flow->tunnel.ip_dst;
      dst_flow->nw_src = src_flow->tunnel.ip_src;
-@@ -3654,14 +3680,27 @@ native_tunnel_output(struct xlate_ctx *ctx, const struct xport *xport,
+@@ -3654,14 +3689,27 @@ native_tunnel_output(struct xlate_ctx *ctx, const struct xport *xport,
  
      err = tnl_neigh_lookup(out_dev->xbridge->name, &d_ip6, &dmac);
      if (err) {
@@ -5984,7 +56844,7 @@ index 578cbfe581..7716c22f49 100644
          }
          return err;
      }
-@@ -4176,6 +4215,10 @@ compose_output_action__(struct xlate_ctx *ctx, ofp_port_t ofp_port,
+@@ -4176,6 +4224,10 @@ compose_output_action__(struct xlate_ctx *ctx, ofp_port_t ofp_port,
          if (xport->pt_mode == NETDEV_PT_LEGACY_L3) {
              flow->packet_type = PACKET_TYPE_BE(OFPHTN_ETHERTYPE,
                                                 ntohs(flow->dl_type));
@@ -5995,7 +56855,7 @@ index 578cbfe581..7716c22f49 100644
          }
      }
  
-@@ -5622,7 +5665,8 @@ xlate_sample_action(struct xlate_ctx *ctx,
+@@ -5622,7 +5674,8 @@ xlate_sample_action(struct xlate_ctx *ctx,
  
      /* Scale the probability from 16-bit to 32-bit while representing
       * the same percentage. */
@@ -6005,7 +56865,18 @@ index 578cbfe581..7716c22f49 100644
  
      /* If ofp_port in flow sample action is equel to ofp_port,
       * this sample action is a input port action. */
-@@ -7784,6 +7828,12 @@ xlate_actions(struct xlate_in *xin, struct xlate_out *xout)
+@@ -7609,6 +7662,10 @@ xlate_wc_finish(struct xlate_ctx *ctx)
+             ctx->wc->masks.vlans[i].tci = 0;
+         }
+     }
++    /* Clear tunnel wc bits if original packet is non-tunnel. */
++    if (!flow_tnl_dst_is_set(&ctx->xin->upcall_flow->tunnel)) {
++        memset(&ctx->wc->masks.tunnel, 0, sizeof ctx->wc->masks.tunnel);
++    }
+ }
+ 
+ /* Translates the flow, actions, or rule in 'xin' into datapath actions in
+@@ -7784,6 +7841,12 @@ xlate_actions(struct xlate_in *xin, struct xlate_out *xout)
          goto exit;
      }
  
@@ -6574,7 +57445,7 @@ index fbcefafc6e..a4f9d38f11 100644
  /ovsdb-client.1
  /ovsdb-doc
 diff --git a/ovsdb/automake.mk b/ovsdb/automake.mk
-index 62cc02686f..3b3140102b 100644
+index 62cc02686f..eba713bb6d 100644
 --- a/ovsdb/automake.mk
 +++ b/ovsdb/automake.mk
 @@ -148,4 +148,25 @@ ovsdb/ovsdb-server.5: \
@@ -6595,7 +57466,7 @@ index 62cc02686f..3b3140102b 100644
 +CLEANFILES += ovsdb/ovsdb.local-config.5
 +man_MANS += ovsdb/ovsdb.local-config.5
 +ovsdb/ovsdb.local-config.5: \
-+	ovsdb/ovsdb-doc ovsdb/ ovsdb/local-config.xml ovsdb/local-config.ovsschema
++	ovsdb/ovsdb-doc ovsdb/local-config.xml ovsdb/local-config.ovsschema
 +	$(AM_V_GEN)$(OVSDB_DOC) \
 +		--version=$(VERSION) \
 +		$(srcdir)/ovsdb/local-config.ovsschema \
@@ -6758,6 +57629,16 @@ index 351c39d8aa..916a1f414e 100644
          ovsdb_jsonrpc_monitor_destroy(m, false);
      }
  }
+diff --git a/ovsdb/libovsdb.pc.in b/ovsdb/libovsdb.pc.in
+index fe367ea7b1..54c9039cfd 100644
+--- a/ovsdb/libovsdb.pc.in
++++ b/ovsdb/libovsdb.pc.in
+@@ -8,4 +8,4 @@ Description: OVSDB library of Open vSwitch
+ Version: @VERSION@
+ Libs: -L${libdir} -lovsdb
+ Libs.private: @LIBS@
+-Cflags: -I${includedir}/openvswitch
++Cflags: -I${includedir}
 diff --git a/ovsdb/local-config.ovsschema b/ovsdb/local-config.ovsschema
 new file mode 100644
 index 0000000000..bd86d0f4f6
@@ -7403,7 +58284,7 @@ index 30760233ee..e685c8103b 100644
          raft_server_destroy(s);
      }
 diff --git a/ovsdb/raft.c b/ovsdb/raft.c
-index 1a3447a8dd..856d083f21 100644
+index 1a3447a8dd..cf9edf35c6 100644
 --- a/ovsdb/raft.c
 +++ b/ovsdb/raft.c
 @@ -74,9 +74,12 @@ enum raft_failure_test {
@@ -7751,6 +58632,15 @@ index 1a3447a8dd..856d083f21 100644
  /* Figure 3.1: "If there exists an N such that N > commitIndex, a
   * majority of matchIndex[i] >= N, and log[N].term == currentTerm, set
   * commitIndex = N (sections 3.5 and 3.6)." */
+@@ -4000,7 +4039,7 @@ raft_write_snapshot(struct raft *raft, struct ovsdb_log *log,
+     if (error) {
+         return error;
+     }
+-    ovsdb_log_mark_base(raft->log);
++    ovsdb_log_mark_base(log);
+ 
+     /* Write log records. */
+     for (uint64_t index = new_log_start; index < raft->log_end; index++) {
 @@ -4142,6 +4181,10 @@ static void
  raft_handle_install_snapshot_request(
      struct raft *raft, const struct raft_install_snapshot_request *rq)
@@ -8006,18 +58896,136 @@ index 726c138bf0..7d3003bca3 100644
 -    struct ovsdb_trigger *t, *next;
 +    struct ovsdb_trigger *t;
  
-     bool run_triggers = db->run_triggers;
-     db->run_triggers_now = db->run_triggers = false;
+     bool run_triggers = db->run_triggers;
+     db->run_triggers_now = db->run_triggers = false;
+ 
+     bool disconnect_all = false;
+ 
+-    LIST_FOR_EACH_SAFE (t, next, node, &db->triggers) {
++    LIST_FOR_EACH_SAFE (t, node, &db->triggers) {
+         if (run_triggers
+             || now - t->created >= t->timeout_msec
+             || t->progress || t->txn_forward) {
+diff --git a/python/ovs/_json.c b/python/ovs/_json.c
+index ef7bb4b8ee..c36a140a8e 100644
+--- a/python/ovs/_json.c
++++ b/python/ovs/_json.c
+@@ -2,10 +2,6 @@
+ #include <openvswitch/json.h>
+ #include "structmember.h"
+ 
+-#if PY_MAJOR_VERSION >= 3
+-#define IS_PY3K
+-#endif
+-
+ typedef struct {
+     PyObject_HEAD
+     struct json_parser *_parser;
+@@ -54,7 +50,7 @@ Parser_feed(json_ParserObject * self, PyObject * args)
+     Py_ssize_t input_sz;
+     PyObject *input;
+     size_t rd;
+-    char *input_str;
++    const char *input_str;
+ 
+     if (self->_parser == NULL) {
+         return NULL;
+@@ -63,21 +59,13 @@ Parser_feed(json_ParserObject * self, PyObject * args)
+     if (!PyArg_UnpackTuple(args, "input", 1, 1, &input)) {
+         return NULL;
+     }
+-#ifdef IS_PY3K
+     if ((input_str = PyUnicode_AsUTF8AndSize(input, &input_sz)) == NULL) {
+-#else
+-    if (PyString_AsStringAndSize(input, &input_str, &input_sz) < 0) {
+-#endif
+         return NULL;
+     }
+ 
+     rd = json_parser_feed(self->_parser, input_str, (size_t) input_sz);
+ 
+-#ifdef IS_PY3K
+     return PyLong_FromSize_t(rd);
+-#else
+-    return PyInt_FromSize_t(rd);
+-#endif
+ }
+ 
+ static PyObject *
+@@ -123,7 +111,7 @@ json_to_python(struct json *json)
+             return dict;
+         }
+     case JSON_ARRAY:{
+-            int i;
++            size_t i;
+             PyObject *arr = PyList_New(json->array.n);
+ 
+             if (arr == NULL) {
+@@ -144,11 +132,7 @@ json_to_python(struct json *json)
+             return PyFloat_FromDouble(json->real);
+         } /* fall through to treat 0 as int */
+     case JSON_INTEGER:
+-#ifdef IS_PY3K
+         return PyLong_FromLong((long) json->integer);
+-#else
+-        return PyInt_FromLong((long) json->integer);
+-#endif
  
-     bool disconnect_all = false;
+     case JSON_STRING:
+         return PyUnicode_FromString(json->string);
+@@ -225,7 +209,6 @@ static PyTypeObject json_ParserType = {
+     Parser_new,                 /* tp_new */
+ };
  
--    LIST_FOR_EACH_SAFE (t, next, node, &db->triggers) {
-+    LIST_FOR_EACH_SAFE (t, node, &db->triggers) {
-         if (run_triggers
-             || now - t->created >= t->timeout_msec
-             || t->progress || t->txn_forward) {
+-#ifdef IS_PY3K
+ static struct PyModuleDef moduledef = {
+     PyModuleDef_HEAD_INIT,
+     "ovs._json",                /* m_name */
+@@ -238,32 +221,25 @@ static struct PyModuleDef moduledef = {
+     0,                          /* m_free */
+ };
+ 
+-#define INITERROR return NULL
+-#else /* !IS_PY3K */
+-#define INITERROR return
+-#endif
+-
+ PyMODINIT_FUNC
+-#ifdef IS_PY3K
+ PyInit__json(void)
+-#else
+-init_json(void)
+-#endif
+ {
+     PyObject *m;
+ 
+     if (PyType_Ready(&json_ParserType) < 0) {
+-        INITERROR;
++        return NULL;
+     }
+-#ifdef IS_PY3K
++
+     m = PyModule_Create(&moduledef);
+-#else
+-    m = Py_InitModule3("ovs._json", NULL, "OVS JSON Parser module");
+-#endif
++    if (!m) {
++        return NULL;
++    }
+ 
+     Py_INCREF(&json_ParserType);
+-    PyModule_AddObject(m, "Parser", (PyObject *) & json_ParserType);
+-#ifdef IS_PY3K
++    if (PyModule_AddObject(m, "Parser", (PyObject *) &json_ParserType) < 0) {
++        Py_DECREF(&json_ParserType);
++        Py_DECREF(m);
++        return NULL;
++    }
+     return m;
+-#endif
+ }
 diff --git a/python/ovs/db/idl.py b/python/ovs/db/idl.py
-index 4ecdcaa197..b87099ff52 100644
+index 4ecdcaa197..8f13d1f55a 100644
 --- a/python/ovs/db/idl.py
 +++ b/python/ovs/db/idl.py
 @@ -140,6 +140,47 @@ class ConditionState(object):
@@ -8026,7 +59034,7 @@ index 4ecdcaa197..b87099ff52 100644
  
 +class IdlTable(object):
 +    def __init__(self, idl, table):
-+        assert(isinstance(table, ovs.db.schema.TableSchema))
++        assert isinstance(table, ovs.db.schema.TableSchema)
 +        self._table = table
 +        self.need_table = False
 +        self.rows = custom_index.IndexedRows(self)
@@ -8048,7 +59056,7 @@ index 4ecdcaa197..b87099ff52 100644
 +
 +    @condition.setter
 +    def condition(self, condition):
-+        assert(isinstance(condition, list))
++        assert isinstance(condition, list)
 +        self.idl.cond_change(self.name, condition)
 +
 +    @classmethod
@@ -8355,11 +59363,110 @@ index c4c6c87e9f..6b0d023ae3 100644
          if deadline is not None:
              remaining = deadline - now
              return max(0, remaining)
+diff --git a/python/ovs/socket_util.py b/python/ovs/socket_util.py
+index 651012bf06..7b41dc44bf 100644
+--- a/python/ovs/socket_util.py
++++ b/python/ovs/socket_util.py
+@@ -23,6 +23,11 @@ import ovs.fatal_signal
+ import ovs.poller
+ import ovs.vlog
+ 
++try:
++    import ssl
++except ImportError:
++    ssl = None
++
+ if sys.platform == 'win32':
+     import ovs.winutils as winutils
+     import win32file
+@@ -178,7 +183,12 @@ def check_connection_completion(sock):
+         if revents & ovs.poller.POLLERR or revents & ovs.poller.POLLHUP:
+             try:
+                 # The following should raise an exception.
+-                sock.send("\0".encode(), socket.MSG_DONTWAIT)
++                if ssl and isinstance(sock, ssl.SSLSocket):
++                    # SSL wrapped socket does not allow
++                    # non-zero optional flag.
++                    sock.send("\0".encode())
++                else:
++                    sock.send("\0".encode(), socket.MSG_DONTWAIT)
+ 
+                 # (Here's where we end up if it didn't.)
+                 # XXX rate-limit
+diff --git a/python/setup.py b/python/setup.py
+index cfe01763f3..36ced65089 100644
+--- a/python/setup.py
++++ b/python/setup.py
+@@ -12,9 +12,13 @@
+ 
+ import sys
+ 
+-from distutils.command.build_ext import build_ext
+-from distutils.errors import CCompilerError, DistutilsExecError, \
+-    DistutilsPlatformError
++from setuptools.command.build_ext import build_ext
++try:
++    from setuptools.errors import CCompilerError, ExecError, PlatformError
++except ImportError:  # Needed for setuptools < 59.0
++    from distutils.errors import CCompilerError
++    from distutils.errors import DistutilsExecError as ExecError
++    from distutils.errors import DistutilsPlatformError as PlatformError
+ 
+ import setuptools
+ 
+@@ -37,7 +41,7 @@ except IOError:
+           file=sys.stderr)
+     sys.exit(-1)
+ 
+-ext_errors = (CCompilerError, DistutilsExecError, DistutilsPlatformError)
++ext_errors = (CCompilerError, ExecError, PlatformError)
+ if sys.platform == 'win32':
+     ext_errors += (IOError, ValueError)
+ 
+@@ -53,7 +57,7 @@ class try_build_ext(build_ext):
+     def run(self):
+         try:
+             build_ext.run(self)
+-        except DistutilsPlatformError:
++        except PlatformError:
+             raise BuildFailed()
+ 
+     def build_extension(self, ext):
+@@ -102,6 +106,6 @@ except BuildFailed:
+     print("Retrying the build without the C extension.")
+     print("*" * 75)
+ 
+-    del(setup_args['cmdclass'])
+-    del(setup_args['ext_modules'])
++    del setup_args['cmdclass']
++    del setup_args['ext_modules']
+     setuptools.setup(**setup_args)
 diff --git a/rhel/openvswitch-fedora.spec.in b/rhel/openvswitch-fedora.spec.in
-index 16ef1ac3ab..d0ae78e4ed 100644
+index 16ef1ac3ab..ddde4f7d12 100644
 --- a/rhel/openvswitch-fedora.spec.in
 +++ b/rhel/openvswitch-fedora.spec.in
-@@ -455,6 +455,7 @@ fi
+@@ -199,20 +199,6 @@ make install DESTDIR=$RPM_BUILD_ROOT
+ install -d -m 0755 $RPM_BUILD_ROOT%{_rundir}/openvswitch
+ install -d -m 0750 $RPM_BUILD_ROOT%{_localstatedir}/log/openvswitch
+ install -d -m 0755 $RPM_BUILD_ROOT%{_sysconfdir}/openvswitch
+-copy_headers() {
+-    src=$1
+-    dst=$RPM_BUILD_ROOT/$2
+-    install -d -m 0755 $dst
+-    install -m 0644 $src/*.h $dst
+-}
+-copy_headers include %{_includedir}/openvswitch
+-copy_headers include/openflow %{_includedir}/openvswitch/openflow
+-copy_headers include/openvswitch %{_includedir}/openvswitch/openvswitch
+-copy_headers include/sparse %{_includedir}/openvswitch/sparse
+-copy_headers include/sparse/arpa %{_includedir}/openvswitch/sparse/arpa
+-copy_headers include/sparse/netinet %{_includedir}/openvswitch/sparse/netinet
+-copy_headers include/sparse/sys %{_includedir}/openvswitch/sparse/sys
+-copy_headers lib %{_includedir}/openvswitch/lib
+ 
+ %if %{with dpdk}
+ install -p -D -m 0644 rhel/usr_lib_udev_rules.d_91-vfio.rules \
+@@ -455,6 +441,7 @@ fi
  %{_datadir}/openvswitch/scripts/ovs-ctl
  %{_datadir}/openvswitch/scripts/ovs-kmod-ctl
  %{_datadir}/openvswitch/scripts/ovs-systemd-reload
@@ -8367,7 +59474,7 @@ index 16ef1ac3ab..d0ae78e4ed 100644
  %config %{_datadir}/openvswitch/vswitch.ovsschema
  %config %{_datadir}/openvswitch/vtep.ovsschema
  %{_bindir}/ovs-appctl
-@@ -476,6 +477,7 @@ fi
+@@ -476,6 +463,7 @@ fi
  %{_mandir}/man1/ovsdb-server.1*
  %{_mandir}/man1/ovsdb-tool.1*
  %{_mandir}/man5/ovsdb-server.5*
@@ -8376,10 +59483,35 @@ index 16ef1ac3ab..d0ae78e4ed 100644
  %{_mandir}/man5/ovsdb.5*
  %{_mandir}/man5/vtep.5*
 diff --git a/rhel/openvswitch.spec.in b/rhel/openvswitch.spec.in
-index 220e5c7472..2d8ff18bb0 100644
+index 220e5c7472..8ee8a99c22 100644
 --- a/rhel/openvswitch.spec.in
 +++ b/rhel/openvswitch.spec.in
-@@ -229,6 +229,7 @@ exit 0
+@@ -110,24 +110,6 @@ install -d -m 0755 $RPM_BUILD_ROOT%{_rundir}/openvswitch
+ install -d -m 0755 $RPM_BUILD_ROOT%{_localstatedir}/log/openvswitch
+ install -d -m 0755 $RPM_BUILD_ROOT/var/lib/openvswitch
+ 
+-copy_headers() {
+-    src=$1
+-    dst=$RPM_BUILD_ROOT/$2
+-    install -d -m 0755 $dst
+-    install -m 0644 $src/*.h $dst
+-}
+-copy_headers include %{_includedir}/openvswitch
+-copy_headers include/openflow %{_includedir}/openvswitch/openflow
+-copy_headers include/openvswitch %{_includedir}/openvswitch/openvswitch
+-copy_headers include/sparse %{_includedir}/openvswitch/sparse
+-copy_headers include/sparse/arpa %{_includedir}/openvswitch/sparse/arpa
+-copy_headers include/sparse/netinet %{_includedir}/openvswitch/sparse/netinet
+-copy_headers include/sparse/sys %{_includedir}/openvswitch/sparse/sys
+-copy_headers lib %{_includedir}/openvswitch/lib
+-
+-install -D -m 0644 lib/.libs/libopenvswitch.a \
+-    $RPM_BUILD_ROOT/%{_libdir}/libopenvswitch.a
+-
+ %check
+ %if %{with check}
+     if make check TESTSUITEFLAGS='%{_smp_mflags}' RECHECK=yes; then :;
+@@ -229,6 +211,7 @@ exit 0
  /usr/share/man/man1/ovsdb-client.1.gz
  /usr/share/man/man1/ovsdb-server.1.gz
  /usr/share/man/man1/ovsdb-tool.1.gz
@@ -8387,7 +59519,7 @@ index 220e5c7472..2d8ff18bb0 100644
  /usr/share/man/man5/ovsdb-server.5.gz
  /usr/share/man/man5/ovs-vswitchd.conf.db.5.gz
  %{_mandir}/man5/ovsdb.5*
-@@ -262,6 +263,7 @@ exit 0
+@@ -262,6 +245,7 @@ exit 0
  /usr/share/openvswitch/scripts/ovs-vtep
  /usr/share/openvswitch/scripts/sysconfig.template
  /usr/share/openvswitch/scripts/ovs-monitor-ipsec
@@ -8472,320 +59604,2019 @@ index 2bef06f39c..922185d61d 100644
 +AT_CHECK([ovs-vsctl set open_vswitch . other_config:pmd-auto-lb-rebal-interval="1000"])
 +CHECK_ALB_PARAM([interval], [1000 mins], [+$LINENUM])
 +
-+# Set Negative value
-+get_log_next_line_num
-+AT_CHECK([ovs-vsctl set open_vswitch . other_config:pmd-auto-lb-rebal-interval="-1"])
-+CHECK_ALB_PARAM([interval], [1 mins], [+$LINENUM])
++# Set Negative value
++get_log_next_line_num
++AT_CHECK([ovs-vsctl set open_vswitch . other_config:pmd-auto-lb-rebal-interval="-1"])
++CHECK_ALB_PARAM([interval], [1 mins], [+$LINENUM])
+ 
+ OVS_VSWITCHD_STOP
+ AT_CLEANUP
+diff --git a/tests/classifier.at b/tests/classifier.at
+index cdcd72c156..f652b59837 100644
+--- a/tests/classifier.at
++++ b/tests/classifier.at
+@@ -129,6 +129,31 @@ Datapath actions: 3
+ OVS_VSWITCHD_STOP(["/'prefixes' with incompatible field: ipv6_label/d"])
+ AT_CLEANUP
+ 
++AT_SETUP([flow classifier - ipv6 ND dependency])
++OVS_VSWITCHD_START
++add_of_ports br0 1 2
++AT_DATA([flows.txt], [dnl
++ table=0,priority=100,ipv6,ipv6_src=1000::/10 actions=resubmit(,1)
++ table=0,priority=0 actions=NORMAL
++ table=1,priority=110,ipv6,ipv6_dst=1000::3 actions=resubmit(,2)
++ table=1,priority=100,ipv6,ipv6_dst=1000::4 actions=resubmit(,2)
++ table=1,priority=0 actions=NORMAL
++ table=2,priority=120,icmp6,nw_ttl=255,icmp_type=135,icmp_code=0,nd_target=1000::1 actions=NORMAL
++ table=2,priority=100,tcp actions=NORMAL
++ table=2,priority=100,icmp6 actions=NORMAL
++ table=2,priority=0 actions=NORMAL
++])
++AT_CHECK([ovs-ofctl add-flows br0 flows.txt])
++
++# test ICMPv6 echo request (which should have no nd_target field)
++AT_CHECK([ovs-appctl ofproto/trace br0 "in_port=1,eth_src=f6:d2:b0:19:5e:7b,eth_dst=d2:49:19:91:78:fe,dl_type=0x86dd,ipv6_src=1000::3,ipv6_dst=1000::4,nw_proto=58,icmpv6_type=128,icmpv6_code=0"], [0], [stdout])
++AT_CHECK([tail -2 stdout], [0],
++  [Megaflow: recirc_id=0,eth,icmp6,in_port=1,dl_src=f6:d2:b0:19:5e:7b,dl_dst=d2:49:19:91:78:fe,ipv6_src=1000::/10,ipv6_dst=1000::4,nw_ttl=0,nw_frag=no
++Datapath actions: 100,2
++])
++OVS_VSWITCHD_STOP
++AT_CLEANUP
++
+ AT_BANNER([conjunctive match])
+ 
+ AT_SETUP([single conjunctive match])
+diff --git a/tests/completion.at b/tests/completion.at
+index 00e3a46b8b..b6155af253 100644
+--- a/tests/completion.at
++++ b/tests/completion.at
+@@ -351,22 +351,22 @@ OVS_VSWITCHD_START
+ TMP="$(ovs-vsctl --commands | cut -d',' -f1-2 | tr -d ',[[]]' | tr -s ' ' '\n')
+ $(ovs-vsctl --options | grep -- '--' | sed -e 's/=.*$/=/g')"
+ MATCH="$(PREPARE_MATCH_NOSPACE(${TMP}))"
+-AT_CHECK_UNQUOTED([ovs-vsctl-bashcomp.bash test "--db=unix:$OVS_RUNDIR/db.sock "],
++AT_CHECK_UNQUOTED([bash ovs-vsctl-bashcomp.bash test "--db=unix:$OVS_RUNDIR/db.sock "],
+ [0], [dnl
+ ${MATCH}
+ ])
+ # complete ovs-vsctl [TAB]
+-AT_CHECK_UNQUOTED([ovs-vsctl-bashcomp.bash test ""],
++AT_CHECK_UNQUOTED([bash ovs-vsctl-bashcomp.bash test ""],
+ [0], [dnl
+ ${MATCH}
+ ])
+ 
+ # complete on global options.
+-AT_CHECK_UNQUOTED([ovs-vsctl-bashcomp.bash test "--dry-run "],
++AT_CHECK_UNQUOTED([bash ovs-vsctl-bashcomp.bash test "--dry-run "],
+ [0], [dnl
+ ${MATCH}
+ ])
+-AT_CHECK_UNQUOTED([ovs-vsctl-bashcomp.bash test "--dry-run --pretty "],
++AT_CHECK_UNQUOTED([bash ovs-vsctl-bashcomp.bash test "--dry-run --pretty "],
+ [0], [dnl
+ ${MATCH}
+ ])
+@@ -374,7 +374,7 @@ ${MATCH}
+ # complete on local options.
+ TMP="$(ovs-vsctl --commands | grep -- '--may-exist' | cut -d',' -f1-2 | tr -d ',[[]]' | tr -s ' ' '\n' | grep -v -- '--may-exist')"
+ MATCH="$(PREPARE_MATCH_SPACE(${TMP}))"
+-AT_CHECK_UNQUOTED([ovs-vsctl-bashcomp.bash test "--may-exist "],
++AT_CHECK_UNQUOTED([bash ovs-vsctl-bashcomp.bash test "--may-exist "],
+ [0], [dnl
+ ${MATCH}
+ ])
+@@ -385,37 +385,37 @@ ${MATCH}
+ # test !.  no following arguments are expanded.
+ TMP="$(ovsdb-client --no-heading list-tables)"
+ MATCH="$(PREPARE_MATCH_SPACE(${TMP}))"
+-AT_CHECK_UNQUOTED([ovs-vsctl-bashcomp.bash test "set "],
++AT_CHECK_UNQUOTED([bash ovs-vsctl-bashcomp.bash test "set "],
+ [0], [dnl
+ ${MATCH}
+ ])
+ # test ?.  will show completions for both current and following arguments.
+ ovs-vsctl br-set-external-id br0 bridge-id br0
+ MATCH="$(PREPARE_MATCH_SPACE(bridge-id --))"
+-AT_CHECK_UNQUOTED([ovs-vsctl-bashcomp.bash test "br-get-external-id br0 "],
++AT_CHECK_UNQUOTED([bash ovs-vsctl-bashcomp.bash test "br-get-external-id br0 "],
+ [0], [dnl
+ ${MATCH}
+ ])
+ # test *.  argument with this prefix could be completed for zero or more times.
+ TMP="$(ovs-vsctl --no-heading --columns=_uuid,name list Bridge | tr -d '\"')"
+ MATCH="$(PREPARE_MATCH_SPACE(${TMP} --))"
+-AT_CHECK_UNQUOTED([ovs-vsctl-bashcomp.bash test "destroy Bridge "],
++AT_CHECK_UNQUOTED([bash ovs-vsctl-bashcomp.bash test "destroy Bridge "],
+ [0], [dnl
+ ${MATCH}
+ ])
+-AT_CHECK_UNQUOTED([ovs-vsctl-bashcomp.bash test "destroy Bridge br0 "],
++AT_CHECK_UNQUOTED([bash ovs-vsctl-bashcomp.bash test "destroy Bridge br0 "],
+ [0], [dnl
+ ${MATCH}
+ ])
+ # test +.  the first time, an argument is required, after that, it becomes '*'.
+ TMP="$(ovsdb-client --no-heading list-columns Open_vSwitch Bridge | awk '/key.*value/ { print $1":"; next } { print $1; next }')"
+ MATCH="$(PREPARE_MATCH_NOSPACE(${TMP}))"
+-AT_CHECK_UNQUOTED([ovs-vsctl-bashcomp.bash test "set Bridge br0 "],
++AT_CHECK_UNQUOTED([bash ovs-vsctl-bashcomp.bash test "set Bridge br0 "],
+ [0], [dnl
+ ${MATCH}
+ ])
+ MATCH="$(PREPARE_MATCH_NOSPACE(${TMP} --))"
+-AT_CHECK_UNQUOTED([ovs-vsctl-bashcomp.bash test "set Bridge br0 other_config:random_key=123 "],
++AT_CHECK_UNQUOTED([bash ovs-vsctl-bashcomp.bash test "set Bridge br0 other_config:random_key=123 "],
+ [0], [dnl
+ ${MATCH}
+ ])
+@@ -453,12 +453,12 @@ OVS_VSWITCHD_START(
+ #
+ TMP="$(ovsdb-client --no-heading list-tables)"
+ MATCH="$(PREPARE_MATCH_SPACE(${TMP}))"
+-AT_CHECK_UNQUOTED([ovs-vsctl-bashcomp.bash test "set "],
++AT_CHECK_UNQUOTED([bash ovs-vsctl-bashcomp.bash test "set "],
+ [0], [dnl
+ ${MATCH}
+ ])
+ MATCH="$(PREPARE_MATCH_SPACE(Open_vSwitch))"
+-AT_CHECK_UNQUOTED([ovs-vsctl-bashcomp.bash test "set Open"],
++AT_CHECK_UNQUOTED([bash ovs-vsctl-bashcomp.bash test "set Open"],
+ [0], [dnl
+ ${MATCH}
+ ])
+@@ -469,13 +469,13 @@ ${MATCH}
+ #
+ TMP="$(ovs-vsctl --no-heading --columns=_uuid list Open_vSwitch | tr -d '\"')"
+ MATCH="$(PREPARE_MATCH_SPACE(${TMP}))"
+-AT_CHECK_UNQUOTED([ovs-vsctl-bashcomp.bash test "set Open_vSwitch "],
++AT_CHECK_UNQUOTED([bash ovs-vsctl-bashcomp.bash test "set Open_vSwitch "],
+ [0], [dnl
+ ${MATCH}
+ ])
+ TMP="$(ovs-vsctl --no-heading --columns=_uuid,name list Bridge | tr -d '\"')"
+ MATCH="$(PREPARE_MATCH_SPACE(${TMP}))"
+-AT_CHECK_UNQUOTED([ovs-vsctl-bashcomp.bash test "set Bridge "],
++AT_CHECK_UNQUOTED([bash ovs-vsctl-bashcomp.bash test "set Bridge "],
+ [0], [dnl
+ ${MATCH}
+ ])
+@@ -486,13 +486,13 @@ ${MATCH}
+ #
+ TMP="$(ovs-vsctl list-br)"
+ MATCH="$(PREPARE_MATCH_SPACE(${TMP}))"
+-AT_CHECK_UNQUOTED([ovs-vsctl-bashcomp.bash test "br-to-vlan "],
++AT_CHECK_UNQUOTED([bash ovs-vsctl-bashcomp.bash test "br-to-vlan "],
+ [0], [dnl
+ ${MATCH}
+ ])
+ # this also helps check the '_ovs_vsctl_check_startswith_string'.
+ MATCH="$(PREPARE_MATCH_SPACE(--weird-br_name))"
+-AT_CHECK_UNQUOTED([ovs-vsctl-bashcomp.bash test "br-to-vlan --"],
++AT_CHECK_UNQUOTED([bash ovs-vsctl-bashcomp.bash test "br-to-vlan --"],
+ [0], [dnl
+ ${MATCH}
+ ])
+@@ -503,14 +503,14 @@ ${MATCH}
+ #
+ TMP="$(ovs-vsctl --no-heading --columns=name list Port | tr -d '\"')"
+ MATCH="$(PREPARE_MATCH_SPACE(${TMP}))"
+-AT_CHECK_UNQUOTED([ovs-vsctl-bashcomp.bash test "port-to-br "],
++AT_CHECK_UNQUOTED([bash ovs-vsctl-bashcomp.bash test "port-to-br "],
+ [0], [dnl
+ ${MATCH}
+ ])
+ # complete on ports in particular bridge.
+ TMP="$(ovs-vsctl list-ports br0)"
+ MATCH="$(PREPARE_MATCH_SPACE(${TMP}))"
+-AT_CHECK_UNQUOTED([ovs-vsctl-bashcomp.bash test "del-port br0 "],
++AT_CHECK_UNQUOTED([bash ovs-vsctl-bashcomp.bash test "del-port br0 "],
+ [0], [dnl
+ ${MATCH}
+ ])
+@@ -523,7 +523,7 @@ for br in `ovs-vsctl list-br`; do
+     TMP="${TMP} $(ovs-vsctl list-ifaces $br)"
+ done
+ MATCH="$(PREPARE_MATCH_SPACE(${TMP}))"
+-AT_CHECK_UNQUOTED([ovs-vsctl-bashcomp.bash test "iface-to-br "],
++AT_CHECK_UNQUOTED([bash ovs-vsctl-bashcomp.bash test "iface-to-br "],
+ [0], [dnl
+ ${MATCH}
+ ])
+@@ -533,7 +533,7 @@ ${MATCH}
+ # test: _ovs_vsctl_complete_bridge_fail_mode
+ #
+ MATCH="$(PREPARE_MATCH_SPACE(standalone secure))"
+-AT_CHECK_UNQUOTED([ovs-vsctl-bashcomp.bash test "set-fail-mode br0 "],
++AT_CHECK_UNQUOTED([bash ovs-vsctl-bashcomp.bash test "set-fail-mode br0 "],
+ [0], [dnl
+ ${MATCH}
+ ])
+@@ -542,25 +542,25 @@ ${MATCH}
+ #
+ # test: _ovs_vsctl_complete_key
+ #
+-AT_CHECK_UNQUOTED([ovs-vsctl-bashcomp.bash test "br-set-external-id br0 "],
++AT_CHECK_UNQUOTED([bash ovs-vsctl-bashcomp.bash test "br-set-external-id br0 "],
+ [0], [dnl
+ 
+ ])
+ # since there is no key added yet, we will only get our own input.
+ MATCH="$(PREPARE_MATCH_SPACE(test_key))"
+-AT_CHECK_UNQUOTED([ovs-vsctl-bashcomp.bash test "br-set-external-id br0 test_key"],
++AT_CHECK_UNQUOTED([bash ovs-vsctl-bashcomp.bash test "br-set-external-id br0 test_key"],
+ [0], [dnl
+ ${MATCH}
+ ])
+ # now add a key, as we should see it.
+ ovs-vsctl br-set-external-id br0 bridge-id br0
+ MATCH="$(PREPARE_MATCH_SPACE(bridge-id))"
+-AT_CHECK_UNQUOTED([ovs-vsctl-bashcomp.bash test "br-set-external-id br0 "],
++AT_CHECK_UNQUOTED([bash ovs-vsctl-bashcomp.bash test "br-set-external-id br0 "],
+ [0], [dnl
+ ${MATCH}
+ ])
+ MATCH="$(PREPARE_MATCH_SPACE(bridge-id --))"
+-AT_CHECK_UNQUOTED([ovs-vsctl-bashcomp.bash test "br-get-external-id br0 "],
++AT_CHECK_UNQUOTED([bash ovs-vsctl-bashcomp.bash test "br-get-external-id br0 "],
+ [0], [dnl
+ ${MATCH}
+ ])
+@@ -571,7 +571,7 @@ ${MATCH}
+ #
+ # should just return the user input.
+ MATCH="$(PREPARE_MATCH_SPACE(test_value --))"
+-AT_CHECK_UNQUOTED([ovs-vsctl-bashcomp.bash test "br-set-external-id br0 bridge-id test_value"],
++AT_CHECK_UNQUOTED([bash ovs-vsctl-bashcomp.bash test "br-set-external-id br0 bridge-id test_value"],
+ [0], [dnl
+ ${MATCH}
+ ])
+@@ -583,13 +583,13 @@ ${MATCH}
+ TMP="$(ovsdb-client --no-heading list-columns Open_vSwitch Open_vSwitch | tr -d ':' | cut -d' ' -f1)"
+ UUID="$(ovs-vsctl --no-heading --columns=_uuid list Open_vSwitch | tr -d ' ')"
+ MATCH="$(PREPARE_MATCH_SPACE(${TMP}))"
+-AT_CHECK_UNQUOTED([ovs-vsctl-bashcomp.bash test "clear Open_vSwitch $UUID "],
++AT_CHECK_UNQUOTED([bash ovs-vsctl-bashcomp.bash test "clear Open_vSwitch $UUID "],
+ [0], [dnl
+ ${MATCH}
+ ])
+ TMP="$(ovsdb-client --no-heading list-columns Open_vSwitch Bridge | tr -d ':' | cut -d' ' -f1)"
+ MATCH="$(PREPARE_MATCH_SPACE(${TMP}))"
+-AT_CHECK_UNQUOTED([ovs-vsctl-bashcomp.bash test "clear Bridge br0 "],
++AT_CHECK_UNQUOTED([bash ovs-vsctl-bashcomp.bash test "clear Bridge br0 "],
+ [0], [dnl
+ ${MATCH}
+ ])
+@@ -597,7 +597,7 @@ ${MATCH}
+ # so, with one specified COLUMN 'other_config', it should still complete on
+ # COLUMNs, plus '--'.
+ MATCH="$(PREPARE_MATCH_SPACE(${TMP} --))"
+-AT_CHECK_UNQUOTED([ovs-vsctl-bashcomp.bash test "clear Bridge br0 other_config "],
++AT_CHECK_UNQUOTED([bash ovs-vsctl-bashcomp.bash test "clear Bridge br0 other_config "],
+ [0], [dnl
+ ${MATCH}
+ ])
+@@ -608,19 +608,19 @@ ${MATCH}
+ #
+ # with no key available, should always get user input.
+ MATCH="$(PREPARE_MATCH_NOSPACE(random_key))"
+-AT_CHECK_UNQUOTED([ovs-vsctl-bashcomp.bash test "add Bridge br0 other_config random_key"],
++AT_CHECK_UNQUOTED([bash ovs-vsctl-bashcomp.bash test "add Bridge br0 other_config random_key"],
+ [0], [dnl
+ ${MATCH}
+ ])
+ MATCH="$(PREPARE_MATCH_NOSPACE(abc))"
+-AT_CHECK_UNQUOTED([ovs-vsctl-bashcomp.bash test "add Bridge br0 other_config random_key=abc"],
++AT_CHECK_UNQUOTED([bash ovs-vsctl-bashcomp.bash test "add Bridge br0 other_config random_key=abc"],
+ [0], [dnl
+ ${MATCH}
+ ])
+ # now add two random keys.
+ ovs-vsctl set Bridge br0 other_config:random_key1=abc other_config:random_val1=xyz
+ MATCH="$(PREPARE_MATCH_NOSPACE(random_key1= random_val1=))"
+-AT_CHECK_UNQUOTED([ovs-vsctl-bashcomp.bash test "add Bridge br0 other_config ran"],
++AT_CHECK_UNQUOTED([bash ovs-vsctl-bashcomp.bash test "add Bridge br0 other_config ran"],
+ [0], [dnl
+ ${MATCH}
+ ])
+@@ -632,25 +632,25 @@ ${MATCH}
+ # at first, we should complete on column.
+ TMP="$(ovsdb-client --no-heading list-columns Open_vSwitch Bridge | awk '/key.*value/ { print $1":"; next } { print $1; next }')"
+ MATCH="$(PREPARE_MATCH_NOSPACE(${TMP}))"
+-AT_CHECK_UNQUOTED([ovs-vsctl-bashcomp.bash test "set Bridge br0 "],
++AT_CHECK_UNQUOTED([bash ovs-vsctl-bashcomp.bash test "set Bridge br0 "],
+ [0], [dnl
+ ${MATCH}
+ ])
+ MATCH="$(PREPARE_MATCH_NOSPACE(other_config:))"
+-AT_CHECK_UNQUOTED([ovs-vsctl-bashcomp.bash test "set Bridge br0 other"],
++AT_CHECK_UNQUOTED([bash ovs-vsctl-bashcomp.bash test "set Bridge br0 other"],
+ [0], [dnl
+ ${MATCH}
+ ])
+ # then, with the ':' we should complete on key.
+ TMP="$(ovs-vsctl --no-heading --columns=other_config list Bridge br0 | tr -d '{\"}' | tr -s ', ' '\n' | cut -d'=' -f1)"
+ MATCH="$(PREPARE_MATCH_NOSPACE(${TMP}))"
+-AT_CHECK_UNQUOTED([ovs-vsctl-bashcomp.bash test "set Bridge br0 other_config:"],
++AT_CHECK_UNQUOTED([bash ovs-vsctl-bashcomp.bash test "set Bridge br0 other_config:"],
+ [0], [dnl
+ ${MATCH}
+ ])
+ # finally, if user fill in some value, we should just complete on user input.
+ MATCH="$(PREPARE_MATCH_NOSPACE(random_val1))"
+-AT_CHECK_UNQUOTED([ovs-vsctl-bashcomp.bash test "set Bridge br0 other_config:random_val1=12345"],
++AT_CHECK_UNQUOTED([bash ovs-vsctl-bashcomp.bash test "set Bridge br0 other_config:random_val1=12345"],
+ [0], [dnl
+ ${MATCH}
+ ])
+@@ -661,12 +661,12 @@ ${MATCH}
+ #
+ touch private_key certificate
+ MATCH="$(PREPARE_MATCH_SPACE(private_key))"
+-AT_CHECK_UNQUOTED([ovs-vsctl-bashcomp.bash test "set-ssl priva"],
++AT_CHECK_UNQUOTED([bash ovs-vsctl-bashcomp.bash test "set-ssl priva"],
+ [0], [dnl
+ ${MATCH}
+ ])
+ MATCH="$(PREPARE_MATCH_SPACE(certificate))"
+-AT_CHECK_UNQUOTED([ovs-vsctl-bashcomp.bash test "set-ssl private_key cer"],
++AT_CHECK_UNQUOTED([bash ovs-vsctl-bashcomp.bash test "set-ssl private_key cer"],
+ [0], [dnl
+ ${MATCH}
+ ])
+@@ -676,20 +676,20 @@ ${MATCH}
+ # test: _ovs_vsctl_complete_target
+ #
+ MATCH="$(PREPARE_MATCH_NOSPACE(pssl: ptcp: punix: ssl: tcp: unix:))"
+-AT_CHECK_UNQUOTED([ovs-vsctl-bashcomp.bash test "set-manager "],
++AT_CHECK_UNQUOTED([bash ovs-vsctl-bashcomp.bash test "set-manager "],
+ [0], [dnl
+ ${MATCH}
+ ])
+ # filename completion on unix, punix.
+ MATCH="$(PREPARE_MATCH_NOSPACE(testsuite.log))"
+-AT_CHECK_UNQUOTED([ovs-vsctl-bashcomp.bash test "set-manager unix:test"],
++AT_CHECK_UNQUOTED([bash ovs-vsctl-bashcomp.bash test "set-manager unix:test"],
+ [0], [dnl
+ ${MATCH}
+ ])
+ # no completion on other type, just return available types.
+ # in real environment, bash will not complete on anything.
+ MATCH="$(PREPARE_MATCH_NOSPACE(pssl: ptcp: punix: tcp: unix:))"
+-AT_CHECK_UNQUOTED([ovs-vsctl-bashcomp.bash test "set-manager ssl:something"],
++AT_CHECK_UNQUOTED([bash ovs-vsctl-bashcomp.bash test "set-manager ssl:something"],
+ [0], [dnl
+ ${MATCH}
+ ])
+@@ -699,14 +699,14 @@ ${MATCH}
+ # test: _ovs_vsctl_complete_new
+ #
+ # test 'add-br'
+-AT_CHECK_UNQUOTED([ovs-vsctl-bashcomp.bash test "add-br "],
++AT_CHECK_UNQUOTED([bash ovs-vsctl-bashcomp.bash test "add-br "],
+ [0], [dnl
+ --- BEGIN MESSAGE
+ Enter a new bridge:
+ > ovs-vsctl add-br --- END MESSAGE
+ ])
+ # user input does not change the output.
+-AT_CHECK_UNQUOTED([ovs-vsctl-bashcomp.bash test "add-br new-br"],
++AT_CHECK_UNQUOTED([bash ovs-vsctl-bashcomp.bash test "add-br new-br"],
+ [0], [dnl
+ --- BEGIN MESSAGE
+ Enter a new bridge:
+@@ -715,7 +715,7 @@ Enter a new bridge:
+ # after specifying the new bridge name, we should complete on parent bridge.
+ TMP="$(ovs-vsctl list-br)"
+ MATCH="$(PREPARE_MATCH_SPACE(${TMP}))"
+-AT_CHECK_UNQUOTED([ovs-vsctl-bashcomp.bash test "add-br new-br "],
++AT_CHECK_UNQUOTED([bash ovs-vsctl-bashcomp.bash test "add-br new-br "],
+ [0], [dnl
+ ${MATCH}
+ ])
+@@ -724,7 +724,7 @@ ${MATCH}
+ # of '*COLUMN?:KEY=VALUE'.
+ TMP="$(ovsdb-client --no-heading list-columns Open_vSwitch Port | awk '/key.*value/ { print $1":"; next } { print $1; next }')"
+ MATCH="$(PREPARE_MATCH_NOSPACE(${TMP} --))"
+-AT_CHECK_UNQUOTED([ovs-vsctl-bashcomp.bash test "add-port br0 new-port "],
++AT_CHECK_UNQUOTED([bash ovs-vsctl-bashcomp.bash test "add-port br0 new-port "],
+ [0], [dnl
+ ${MATCH}
+ ])
+@@ -736,13 +736,13 @@ ${MATCH}
+ # after '--', there should be no global options available for completion.
+ TMP="$(ovs-vsctl --commands | cut -d',' -f1-2 | tr -d ',[[]]' | tr -s ' ' '\n')"
+ MATCH="$(PREPARE_MATCH_NOSPACE(${TMP}))"
+-AT_CHECK_UNQUOTED([ovs-vsctl-bashcomp.bash test "init -- "],
++AT_CHECK_UNQUOTED([bash ovs-vsctl-bashcomp.bash test "init -- "],
+ [0], [dnl
+ ${MATCH}
+ ])
+ TMP="$(ovs-vsctl --no-heading --columns=name,_uuid list Port | tr -d '\"')"
+ MATCH="$(PREPARE_MATCH_SPACE(${TMP} newp1 newp2))"
+-AT_CHECK_UNQUOTED([ovs-vsctl-bashcomp.bash test "add-port br0 newp1 -- add-port br1 newp2 -- set Port "],
++AT_CHECK_UNQUOTED([bash ovs-vsctl-bashcomp.bash test "add-port br0 newp1 -- add-port br1 newp2 -- set Port "],
+ [0], [dnl
+ ${MATCH}
+ ])
+@@ -757,25 +757,25 @@ AT_SKIP_IF([eval 'test ${BASH_VERSINFO[[0]]} -lt 4'])
+ OVS_VSWITCHD_START
+ 
+ # complete non-matching command.
+-AT_CHECK_UNQUOTED([ovs-vsctl-bashcomp.bash test "invalid"],
++AT_CHECK_UNQUOTED([bash ovs-vsctl-bashcomp.bash test "invalid"],
+ [0], [dnl
+ 
+ ])
+ 
+ # complete after invalid command.
+-AT_CHECK_UNQUOTED([ovs-vsctl-bashcomp.bash test "invalid argu"],
++AT_CHECK_UNQUOTED([bash ovs-vsctl-bashcomp.bash test "invalid argu"],
+ [0], [dnl
+ 
+ ])
+ 
+ # complete non-matching end argument.
+-AT_CHECK_UNQUOTED([ovs-vsctl-bashcomp.bash test "set INVALID_"],
++AT_CHECK_UNQUOTED([bash ovs-vsctl-bashcomp.bash test "set INVALID_"],
+ [0], [dnl
+ 
+ ])
+ 
+ # complete after invalid intermediate argument.
+-AT_CHECK_UNQUOTED([ovs-vsctl-bashcomp.bash test "set INVALID_TBL "],
++AT_CHECK_UNQUOTED([bash ovs-vsctl-bashcomp.bash test "set INVALID_TBL "],
+ [1], [dnl
+ --- BEGIN MESSAGE
+ Cannot complete 'INVALID_TBL' at index 3:
+@@ -783,12 +783,12 @@ Cannot complete 'INVALID_TBL' at index 3:
+ 
+ # complete ovs-vsctl --db=wrongdb [TAB]
+ # should return 1 and show nothing.
+-AT_CHECK_UNQUOTED([ovs-vsctl-bashcomp.bash test "--db=wrongdb"],
++AT_CHECK_UNQUOTED([bash ovs-vsctl-bashcomp.bash test "--db=wrongdb"],
+ [1], [])
+ 
+ OVS_VSWITCHD_STOP
+ # delete ovsdb-server and try again.
+-AT_CHECK_UNQUOTED([ovs-vsctl-bashcomp.bash test ""],
++AT_CHECK_UNQUOTED([bash ovs-vsctl-bashcomp.bash test ""],
+ [1], [])
+ 
+ AT_CLEANUP
+diff --git a/tests/dpif-netdev.at b/tests/dpif-netdev.at
+index a79ebdb618..3179e1645d 100644
+--- a/tests/dpif-netdev.at
++++ b/tests/dpif-netdev.at
+@@ -439,7 +439,7 @@ recirc_id(0),in_port(1),packet_type(ns=0,id=0),eth_type(0x0800),ipv4(frag=no), a
+ 
+    # Check for succesfull packet matching with installed offloaded flow.
+    AT_CHECK([filter_hw_packet_netdev_dummy < ovs-vswitchd.log | strip_xout], [0], [dnl
+-p1: packet: ip,vlan_tci=0x0000,dl_src=00:06:07:08:09:0a,dl_dst=00:01:02:03:04:05,nw_src=127.0.0.1,nw_dst=127.0.0.1,nw_proto=0,nw_tos=0,nw_ecn=0,nw_ttl=64 matches with flow: recirc_id=0,eth,ip,vlan_tci=0x0000/0x1fff,nw_frag=no with mark: 1
++p1: packet: ip,vlan_tci=0x0000,dl_src=00:06:07:08:09:0a,dl_dst=00:01:02:03:04:05,nw_src=127.0.0.1,nw_dst=127.0.0.1,nw_proto=0,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no matches with flow: recirc_id=0,eth,ip,vlan_tci=0x0000/0x1fff,nw_frag=no with mark: 1
+ ])
+ 
+    ovs-appctl revalidator/wait
+@@ -506,7 +506,7 @@ recirc_id(0),in_port(1),packet_type(ns=0,id=0),eth_type(0x8100),vlan(vid=99,pcp=
+ 
+    # Check for succesfull packet matching with installed offloaded flow.
+    AT_CHECK([filter_hw_packet_netdev_dummy < ovs-vswitchd.log | strip_xout], [0], [dnl
+-p1: packet: udp,dl_vlan=99,dl_vlan_pcp=7,vlan_tci1=0x0000,dl_src=00:06:07:08:09:0a,dl_dst=00:01:02:03:04:05,nw_src=127.0.0.1,nw_dst=127.0.0.1,nw_tos=0,nw_ecn=0,nw_ttl=64,tp_src=81,tp_dst=82 dnl
++p1: packet: udp,dl_vlan=99,dl_vlan_pcp=7,vlan_tci1=0x0000,dl_src=00:06:07:08:09:0a,dl_dst=00:01:02:03:04:05,nw_src=127.0.0.1,nw_dst=127.0.0.1,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,tp_src=81,tp_dst=82 dnl
+ matches with flow: recirc_id=0,eth,udp,dl_vlan=99,nw_src=127.0.0.1,nw_frag=no,tp_dst=82 with mark: 1
+ ])
+ 
+@@ -530,10 +530,10 @@ p1: flow del: mark: 1
+ 
+    # Check that ip address and udp port were correctly modified in output packets.
+    AT_CHECK([ovs-ofctl parse-pcap p1.pcap], [0], [dnl
+-udp,in_port=ANY,dl_vlan=99,dl_vlan_pcp=7,vlan_tci1=0x0000,dl_src=00:06:07:08:09:0a,dl_dst=00:01:02:03:04:05,nw_src=127.0.0.1,nw_dst=127.0.0.1,nw_tos=0,nw_ecn=0,nw_ttl=64,tp_src=81,tp_dst=82
+-udp,in_port=ANY,dl_vlan=99,dl_vlan_pcp=7,vlan_tci1=0x0000,dl_src=00:06:07:08:09:0a,dl_dst=00:01:02:03:04:05,nw_src=192.168.0.7,nw_dst=127.0.0.1,nw_tos=0,nw_ecn=0,nw_ttl=64,tp_src=81,tp_dst=3773
+-udp,in_port=ANY,dl_vlan=99,dl_vlan_pcp=7,vlan_tci1=0x0000,dl_src=00:06:07:08:09:0a,dl_dst=00:01:02:03:04:05,nw_src=127.0.0.1,nw_dst=127.0.0.1,nw_tos=0,nw_ecn=0,nw_ttl=64,tp_src=81,tp_dst=82
+-udp,in_port=ANY,dl_vlan=99,dl_vlan_pcp=7,vlan_tci1=0x0000,dl_src=00:06:07:08:09:0a,dl_dst=00:01:02:03:04:05,nw_src=192.168.0.7,nw_dst=127.0.0.1,nw_tos=0,nw_ecn=0,nw_ttl=64,tp_src=81,tp_dst=3773
++udp,in_port=ANY,dl_vlan=99,dl_vlan_pcp=7,vlan_tci1=0x0000,dl_src=00:06:07:08:09:0a,dl_dst=00:01:02:03:04:05,nw_src=127.0.0.1,nw_dst=127.0.0.1,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,tp_src=81,tp_dst=82
++udp,in_port=ANY,dl_vlan=99,dl_vlan_pcp=7,vlan_tci1=0x0000,dl_src=00:06:07:08:09:0a,dl_dst=00:01:02:03:04:05,nw_src=192.168.0.7,nw_dst=127.0.0.1,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,tp_src=81,tp_dst=3773
++udp,in_port=ANY,dl_vlan=99,dl_vlan_pcp=7,vlan_tci1=0x0000,dl_src=00:06:07:08:09:0a,dl_dst=00:01:02:03:04:05,nw_src=127.0.0.1,nw_dst=127.0.0.1,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,tp_src=81,tp_dst=82
++udp,in_port=ANY,dl_vlan=99,dl_vlan_pcp=7,vlan_tci1=0x0000,dl_src=00:06:07:08:09:0a,dl_dst=00:01:02:03:04:05,nw_src=192.168.0.7,nw_dst=127.0.0.1,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,tp_src=81,tp_dst=3773
+ ])
+ 
+    OVS_VSWITCHD_STOP
+diff --git a/tests/drop-stats.at b/tests/drop-stats.at
+index f3e19cd83b..1d3af98dab 100644
+--- a/tests/drop-stats.at
++++ b/tests/drop-stats.at
+@@ -83,6 +83,9 @@ AT_CHECK([
+     ovs-ofctl -Oopenflow13 add-flows br0 flows.txt
+     ovs-ofctl -Oopenflow13 dump-flows br0 | ofctl_strip | sort | grep actions ], [0], [ignore])
+ 
++ovs-appctl time/warp 15000
++AT_CHECK([ovs-appctl revalidator/wait])
++
+ AT_CHECK([
+     ovs-appctl netdev-dummy/receive p1 'in_port(1),packet_type(ns=0,id=0),eth(src=3a:6d:d2:09:9c:ab,dst=1e:2c:e9:2a:66:9e),ipv4(src=192.168.10.10,dst=192.168.10.30,proto=1,tos=0,ttl=64,frag=no),icmp(type=8,code=0)'
+ ], [0], [ignore])
+diff --git a/tests/library.at b/tests/library.at
+index db4997d8f0..6489be2c15 100644
+--- a/tests/library.at
++++ b/tests/library.at
+@@ -252,7 +252,7 @@ AT_CHECK([ovstest test-barrier], [0], [])
+ AT_CLEANUP
+ 
+ AT_SETUP([rcu])
+-AT_CHECK([ovstest test-rcu-quiesce], [0], [])
++AT_CHECK([ovstest test-rcu], [0], [])
+ AT_CLEANUP
+ 
+ AT_SETUP([stopwatch module])
+diff --git a/tests/mcast-snooping.at b/tests/mcast-snooping.at
+index 757cf7186e..fe475e7b38 100644
+--- a/tests/mcast-snooping.at
++++ b/tests/mcast-snooping.at
+@@ -216,3 +216,70 @@ AT_CHECK([ovs-appctl mdb/show br0], [0], [dnl
+ ])
+ 
+ AT_CLEANUP
++
++
++AT_SETUP([mcast - igmp flood for non-snoop enabled])
++OVS_VSWITCHD_START([])
++
++AT_CHECK([
++    ovs-vsctl set bridge br0 \
++    datapath_type=dummy], [0])
++
++add_of_ports br0 1 2
++
++AT_CHECK([ovs-ofctl add-flow br0 action=normal])
++
++ovs-appctl time/stop
++
++dnl Basic scenario - needs to flood for IGMP followed by unicast ICMP
++dnl in reverse direction
++AT_CHECK([ovs-appctl netdev-dummy/receive p1 \
++    '0101000c29a0aa55aa550001080046c00028000040000102d3494565eb4ae0000016940400002200f9020000000104000000e00000fb000000000000'])
++AT_CHECK([ovs-appctl netdev-dummy/receive p2 \
++    'aa55aa5500010101000c29a008004500001c00010000400164dc0a0101010a0101020800f7ffffffffff'])
++
++
++AT_CHECK([ovs-appctl dpctl/dump-flows | grep -e .*ipv4 | sort | dnl
++          strip_stats | strip_used | strip_recirc | dnl
++          sed -e 's/,packet_type(ns=[[0-9]]*,id=[[0-9]]*),/,/'],
++                     [0], [dnl
++recirc_id(<recirc>),in_port(1),eth(src=aa:55:aa:55:00:01,dst=01:01:00:0c:29:a0),eth_type(0x0800),ipv4(frag=no), packets:0, bytes:0, used:never, actions:100,2
++recirc_id(<recirc>),in_port(2),eth(src=01:01:00:0c:29:a0,dst=aa:55:aa:55:00:01),eth_type(0x0800),ipv4(frag=no), packets:0, bytes:0, used:never, actions:1
++])
++
++ovs-appctl time/warp 100000
++
++dnl Next we should clear the flows and install a complex case
++AT_CHECK([ovs-ofctl del-flows br0])
++
++AT_DATA([flows.txt], [dnl
++table=0, arp actions=NORMAL
++table=0, ip,in_port=1 actions=ct(table=1,zone=64000)
++table=0, in_port=2 actions=output:1
++table=1, ip,ct_state=+trk+inv actions=drop
++table=1  ip,in_port=1,icmp,ct_state=+trk+new actions=output:2
++table=1, in_port=1,ip,ct_state=+trk+new actions=controller(userdata=00.de.ad.be.ef.ca.fe.01)
++table=1, in_port=1,ip,ct_state=+trk+est actions=output:2
++])
++AT_CHECK([ovs-ofctl add-flows br0 flows.txt])
++
++ovs-appctl time/warp 100000
++
++dnl Send the IGMP, followed by a unicast ICMP - ensure we won't black hole
++AT_CHECK([ovs-appctl netdev-dummy/receive p1 \
++    '0101000c29a0aa55aa550001080046c00028000040000102d3494565eb4ae0000016940400002200f9020000000104000000e00000fb000000000000'])
++AT_CHECK([ovs-appctl netdev-dummy/receive p1 \
++    'aa55aa550001aa55aa55000208004500001c00010000400164dc0a0101010a0101020800f7ffffffffff'])
++
++
++AT_CHECK([ovs-appctl dpctl/dump-flows | grep -e .*ipv4 | sort | dnl
++          strip_stats | strip_used | strip_recirc | dnl
++          sed 's/pid=[[0-9]]*,//
++               s/,packet_type(ns=[[0-9]]*,id=[[0-9]]*),/,/'],
++                     [0], [dnl
++ct_state(+new-inv+trk),recirc_id(<recirc>),in_port(1),eth_type(0x0800),ipv4(proto=1,frag=no), packets:0, bytes:0, used:never, actions:2
++ct_state(+new-inv+trk),recirc_id(<recirc>),in_port(1),eth_type(0x0800),ipv4(proto=2,frag=no), packets:0, bytes:0, used:never, actions:userspace(controller(reason=1,dont_send=0,continuation=0,recirc_id=<recirc>,rule_cookie=0,controller_id=0,max_len=65535))
++recirc_id(<recirc>),in_port(1),eth_type(0x0800),ipv4(frag=no), packets:0, bytes:0, used:0.0s, actions:ct(zone=64000),recirc(<recirc>)
++])
++
++AT_CLEANUP
+diff --git a/tests/nsh.at b/tests/nsh.at
+index 4d49f12017..91ded1445d 100644
+--- a/tests/nsh.at
++++ b/tests/nsh.at
+@@ -27,7 +27,7 @@ AT_CHECK([
+ AT_CHECK([
+     ovs-appctl ofproto/trace br0 'in_port=1,dl_type=0x894f,nsh_ttl=63,nsh_mdtype=1,nsh_np=3,nsh_spi=0x123456,nsh_si=255,nsh_c1=0x11223344,nsh_c2=0x55667788,nsh_c3=0x99aabbcc,nsh_c4=0xddeeff00'
+ ], [0], [dnl
+-Flow: in_port=1,vlan_tci=0x0000,dl_src=00:00:00:00:00:00,dl_dst=00:00:00:00:00:00,dl_type=0x894f,nsh_flags=0,nsh_ttl=63,nsh_mdtype=1,nsh_np=3,nsh_spi=0x123456,nsh_si=255,nsh_c1=0x11223344,nsh_c2=0x55667788,nsh_c3=0x99aabbcc,nsh_c4=0xddeeff00,nw_proto=0,nw_tos=0,nw_ecn=0,nw_ttl=0
++Flow: in_port=1,vlan_tci=0x0000,dl_src=00:00:00:00:00:00,dl_dst=00:00:00:00:00:00,dl_type=0x894f,nsh_flags=0,nsh_ttl=63,nsh_mdtype=1,nsh_np=3,nsh_spi=0x123456,nsh_si=255,nsh_c1=0x11223344,nsh_c2=0x55667788,nsh_c3=0x99aabbcc,nsh_c4=0xddeeff00,nw_proto=0,nw_tos=0,nw_ecn=0,nw_ttl=0,nw_frag=no
+ 
+ bridge("br0")
+ -------------
+@@ -37,7 +37,7 @@ bridge("br0")
+     set_field:0x44332211->nsh_c1
+     output:2
+ 
+-Final flow: in_port=1,vlan_tci=0x0000,dl_src=00:00:00:00:00:00,dl_dst=00:00:00:00:00:00,dl_type=0x894f,nsh_flags=2,nsh_ttl=63,nsh_mdtype=1,nsh_np=3,nsh_spi=0x123456,nsh_si=254,nsh_c1=0x44332211,nsh_c2=0x55667788,nsh_c3=0x99aabbcc,nsh_c4=0xddeeff00,nw_proto=0,nw_tos=0,nw_ecn=0,nw_ttl=0
++Final flow: in_port=1,vlan_tci=0x0000,dl_src=00:00:00:00:00:00,dl_dst=00:00:00:00:00:00,dl_type=0x894f,nsh_flags=2,nsh_ttl=63,nsh_mdtype=1,nsh_np=3,nsh_spi=0x123456,nsh_si=254,nsh_c1=0x44332211,nsh_c2=0x55667788,nsh_c3=0x99aabbcc,nsh_c4=0xddeeff00,nw_proto=0,nw_tos=0,nw_ecn=0,nw_ttl=0,nw_frag=no
+ Megaflow: recirc_id=0,eth,in_port=1,dl_type=0x894f,nsh_flags=0,nsh_ttl=63,nsh_mdtype=1,nsh_np=3,nsh_spi=0x123456,nsh_si=255,nsh_c1=0x11223344
+ Datapath actions: set(nsh(flags=2,ttl=63,spi=0x123456,si=254,c1=0x44332211)),2
+ ])
+@@ -85,7 +85,7 @@ AT_CHECK([
+ AT_CHECK([
+     ovs-appctl ofproto/trace br0 'in_port=1,icmp,dl_src=00:11:22:33:44:55,dl_dst=66:77:88:99:aa:bb,nw_dst=10.10.10.10,nw_src=20.20.20.20'
+ ], [0], [dnl
+-Flow: icmp,in_port=1,vlan_tci=0x0000,dl_src=00:11:22:33:44:55,dl_dst=66:77:88:99:aa:bb,nw_src=20.20.20.20,nw_dst=10.10.10.10,nw_tos=0,nw_ecn=0,nw_ttl=0,icmp_type=0,icmp_code=0
++Flow: icmp,in_port=1,vlan_tci=0x0000,dl_src=00:11:22:33:44:55,dl_dst=66:77:88:99:aa:bb,nw_src=20.20.20.20,nw_dst=10.10.10.10,nw_tos=0,nw_ecn=0,nw_ttl=0,nw_frag=no,icmp_type=0,icmp_code=0
+ 
+ bridge("br0")
+ -------------
+@@ -103,7 +103,7 @@ bridge("br0")
+     decap()
+     decap()
+ 
+-Final flow: in_port=1,vlan_tci=0x0000,dl_src=00:00:00:00:00:00,dl_dst=11:22:33:44:55:66,dl_type=0x894f,nsh_flags=0,nsh_ttl=63,nsh_mdtype=1,nsh_np=3,nsh_spi=0x1234,nsh_si=255,nsh_c1=0x11223344,nsh_c2=0x0,nsh_c3=0x0,nsh_c4=0x0,nw_proto=0,nw_tos=0,nw_ecn=0,nw_ttl=0
++Final flow: in_port=1,vlan_tci=0x0000,dl_src=00:00:00:00:00:00,dl_dst=11:22:33:44:55:66,dl_type=0x894f,nsh_flags=0,nsh_ttl=63,nsh_mdtype=1,nsh_np=3,nsh_spi=0x1234,nsh_si=255,nsh_c1=0x11223344,nsh_c2=0x0,nsh_c3=0x0,nsh_c4=0x0,nw_proto=0,nw_tos=0,nw_ecn=0,nw_ttl=0,nw_frag=no
+ Megaflow: recirc_id=0,eth,ip,in_port=1,dl_dst=66:77:88:99:aa:bb,nw_frag=no
+ Datapath actions: push_nsh(flags=0,ttl=63,mdtype=1,np=3,spi=0x1234,si=255,c1=0x11223344,c2=0x0,c3=0x0,c4=0x0),push_eth(src=00:00:00:00:00:00,dst=11:22:33:44:55:66),pop_eth,pop_nsh(),recirc(0x1)
+ ])
+@@ -111,7 +111,7 @@ Datapath actions: push_nsh(flags=0,ttl=63,mdtype=1,np=3,spi=0x1234,si=255,c1=0x1
+ AT_CHECK([
+     ovs-appctl ofproto/trace br0 'in_port=4,dl_type=0x894f,nsh_mdtype=1,nsh_np=3,nsh_spi=0x1234,nsh_c1=0x11223344'
+ ], [0], [dnl
+-Flow: in_port=4,vlan_tci=0x0000,dl_src=00:00:00:00:00:00,dl_dst=00:00:00:00:00:00,dl_type=0x894f,nsh_flags=0,nsh_ttl=0,nsh_mdtype=1,nsh_np=3,nsh_spi=0x1234,nsh_si=0,nsh_c1=0x11223344,nsh_c2=0x0,nsh_c3=0x0,nsh_c4=0x0,nw_proto=0,nw_tos=0,nw_ecn=0,nw_ttl=0
++Flow: in_port=4,vlan_tci=0x0000,dl_src=00:00:00:00:00:00,dl_dst=00:00:00:00:00:00,dl_type=0x894f,nsh_flags=0,nsh_ttl=0,nsh_mdtype=1,nsh_np=3,nsh_spi=0x1234,nsh_si=0,nsh_c1=0x11223344,nsh_c2=0x0,nsh_c3=0x0,nsh_c4=0x0,nw_proto=0,nw_tos=0,nw_ecn=0,nw_ttl=0,nw_frag=no
+ 
+ bridge("br0")
+ -------------
+@@ -213,7 +213,7 @@ AT_CHECK([
+ AT_CHECK([
+     ovs-appctl ofproto/trace br0 'in_port=1,icmp,dl_src=00:11:22:33:44:55,dl_dst=66:77:88:99:aa:bb,nw_dst=10.10.10.10,nw_src=20.20.20.20'
+ ], [0], [dnl
+-Flow: icmp,in_port=1,vlan_tci=0x0000,dl_src=00:11:22:33:44:55,dl_dst=66:77:88:99:aa:bb,nw_src=20.20.20.20,nw_dst=10.10.10.10,nw_tos=0,nw_ecn=0,nw_ttl=0,icmp_type=0,icmp_code=0
++Flow: icmp,in_port=1,vlan_tci=0x0000,dl_src=00:11:22:33:44:55,dl_dst=66:77:88:99:aa:bb,nw_src=20.20.20.20,nw_dst=10.10.10.10,nw_tos=0,nw_ecn=0,nw_ttl=0,nw_frag=no,icmp_type=0,icmp_code=0
+ 
+ bridge("br0")
+ -------------
+@@ -230,7 +230,7 @@ bridge("br0")
+     decap()
+     decap()
+ 
+-Final flow: in_port=1,vlan_tci=0x0000,dl_src=00:00:00:00:00:00,dl_dst=11:22:33:44:55:66,dl_type=0x894f,nsh_flags=0,nsh_ttl=63,nsh_mdtype=2,nsh_np=3,nsh_spi=0x1234,nsh_si=255,nw_proto=0,nw_tos=0,nw_ecn=0,nw_ttl=0
++Final flow: in_port=1,vlan_tci=0x0000,dl_src=00:00:00:00:00:00,dl_dst=11:22:33:44:55:66,dl_type=0x894f,nsh_flags=0,nsh_ttl=63,nsh_mdtype=2,nsh_np=3,nsh_spi=0x1234,nsh_si=255,nw_proto=0,nw_tos=0,nw_ecn=0,nw_ttl=0,nw_frag=no
+ Megaflow: recirc_id=0,eth,ip,in_port=1,dl_dst=66:77:88:99:aa:bb,nw_frag=no
+ Datapath actions: push_nsh(flags=0,ttl=63,mdtype=2,np=3,spi=0x1234,si=255,md2=0x10000a041234567820001408fedcba9876543210),push_eth(src=00:00:00:00:00:00,dst=11:22:33:44:55:66),pop_eth,pop_nsh(),recirc(0x1)
+ ])
+@@ -238,7 +238,7 @@ Datapath actions: push_nsh(flags=0,ttl=63,mdtype=2,np=3,spi=0x1234,si=255,md2=0x
+ AT_CHECK([
+     ovs-appctl ofproto/trace br0 'in_port=4,dl_type=0x894f,nsh_mdtype=2,nsh_np=3,nsh_spi=0x1234'
+ ], [0], [dnl
+-Flow: in_port=4,vlan_tci=0x0000,dl_src=00:00:00:00:00:00,dl_dst=00:00:00:00:00:00,dl_type=0x894f,nsh_flags=0,nsh_ttl=0,nsh_mdtype=2,nsh_np=3,nsh_spi=0x1234,nsh_si=0,nw_proto=0,nw_tos=0,nw_ecn=0,nw_ttl=0
++Flow: in_port=4,vlan_tci=0x0000,dl_src=00:00:00:00:00:00,dl_dst=00:00:00:00:00:00,dl_type=0x894f,nsh_flags=0,nsh_ttl=0,nsh_mdtype=2,nsh_np=3,nsh_spi=0x1234,nsh_si=0,nw_proto=0,nw_tos=0,nw_ecn=0,nw_ttl=0,nw_frag=no
+ 
+ bridge("br0")
+ -------------
+@@ -325,7 +325,7 @@ AT_CHECK([
+ AT_CHECK([
+     ovs-appctl ofproto/trace br0 'in_port=1,icmp,dl_src=00:11:22:33:44:55,dl_dst=66:77:88:99:aa:bb,nw_dst=10.10.10.10,nw_src=20.20.20.20'
+ ], [0], [dnl
+-Flow: icmp,in_port=1,vlan_tci=0x0000,dl_src=00:11:22:33:44:55,dl_dst=66:77:88:99:aa:bb,nw_src=20.20.20.20,nw_dst=10.10.10.10,nw_tos=0,nw_ecn=0,nw_ttl=0,icmp_type=0,icmp_code=0
++Flow: icmp,in_port=1,vlan_tci=0x0000,dl_src=00:11:22:33:44:55,dl_dst=66:77:88:99:aa:bb,nw_src=20.20.20.20,nw_dst=10.10.10.10,nw_tos=0,nw_ecn=0,nw_ttl=0,nw_frag=no,icmp_type=0,icmp_code=0
+ 
+ bridge("br0")
+ -------------
+@@ -381,7 +381,7 @@ Datapath actions: pop_nsh(),recirc(0x2)
+ AT_CHECK([
+     ovs-appctl ofproto/trace br0 'recirc_id=2,in_port=4,ip'
+ ], [0], [dnl
+-Flow: recirc_id=0x2,eth,ip,in_port=4,vlan_tci=0x0000,dl_src=00:00:00:00:00:00,dl_dst=00:00:00:00:00:00,nw_src=0.0.0.0,nw_dst=0.0.0.0,nw_proto=0,nw_tos=0,nw_ecn=0,nw_ttl=0
++Flow: recirc_id=0x2,eth,ip,in_port=4,vlan_tci=0x0000,dl_src=00:00:00:00:00:00,dl_dst=00:00:00:00:00:00,nw_src=0.0.0.0,nw_dst=0.0.0.0,nw_proto=0,nw_tos=0,nw_ecn=0,nw_ttl=0,nw_frag=no
+ 
+ bridge("br0")
+ -------------
+diff --git a/tests/ofp-print.at b/tests/ofp-print.at
+index 2c7e163bd6..7be6628c34 100644
+--- a/tests/ofp-print.at
++++ b/tests/ofp-print.at
+@@ -471,7 +471,7 @@ c0 a8 00 02 27 2f 00 00 78 50 cc 5b 57 af 42 1e \
+ 50 02 02 00 26 e8 00 00 00 00 00 00 00 00 \
+ "], [0], [dnl
+ OFPT_PACKET_IN (xid=0x0): total_len=60 in_port=3 (via no_match) data_len=60 buffer=0x00000111
+-tcp,vlan_tci=0x0000,dl_src=50:54:00:00:00:05,dl_dst=50:54:00:00:00:06,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,tp_src=10031,tp_dst=0,tcp_flags=syn tcp_csum:26e8
++tcp,vlan_tci=0x0000,dl_src=50:54:00:00:00:05,dl_dst=50:54:00:00:00:06,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,tp_src=10031,tp_dst=0,tcp_flags=syn tcp_csum:26e8
+ ])
+ AT_CLEANUP
  
- OVS_VSWITCHD_STOP
+@@ -485,7 +485,7 @@ c0 a8 00 02 27 2f 00 00 78 50 cc 5b 57 af 42 1e \
+ 50 10 02 00 26 e8 00 00 00 00 00 00 00 00 \
+ " 3], [0], [dnl
+ OFPT_PACKET_IN (xid=0x0): total_len=60 in_port=3 (via no_match) data_len=60 buffer=0x00000111
+-tcp,vlan_tci=0x0000,dl_src=50:54:00:00:00:05,dl_dst=50:54:00:00:00:06,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,tp_src=10031,tp_dst=0,tcp_flags=ack tcp_csum:26e8
++tcp,vlan_tci=0x0000,dl_src=50:54:00:00:00:05,dl_dst=50:54:00:00:00:06,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,tp_src=10031,tp_dst=0,tcp_flags=ack tcp_csum:26e8
+ 00000000  50 54 00 00 00 06 50 54-00 00 00 05 08 00 45 00
+ 00000010  00 28 bd 12 00 00 40 06-3c 6a c0 a8 00 01 c0 a8
+ 00000020  00 02 27 2f 00 00 78 50-cc 5b 57 af 42 1e 50 10
+@@ -504,7 +504,7 @@ c0 a8 00 02 27 2f 00 00 78 50 cc 5b 57 af 42 1e \
+ 50 02 02 00 26 e8 00 00 00 00 00 00 00 00 \
+ "], [0], [dnl
+ OFPT_PACKET_IN (OF1.1) (xid=0x0): total_len=60 in_port=3 (via no_match) data_len=60 buffer=0x00000111
+-tcp,vlan_tci=0x0000,dl_src=50:54:00:00:00:05,dl_dst=50:54:00:00:00:06,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,tp_src=10031,tp_dst=0,tcp_flags=syn tcp_csum:26e8
++tcp,vlan_tci=0x0000,dl_src=50:54:00:00:00:05,dl_dst=50:54:00:00:00:06,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,tp_src=10031,tp_dst=0,tcp_flags=syn tcp_csum:26e8
+ ])
  AT_CLEANUP
-diff --git a/tests/classifier.at b/tests/classifier.at
-index cdcd72c156..f652b59837 100644
---- a/tests/classifier.at
-+++ b/tests/classifier.at
-@@ -129,6 +129,31 @@ Datapath actions: 3
- OVS_VSWITCHD_STOP(["/'prefixes' with incompatible field: ipv6_label/d"])
+ 
+@@ -736,7 +736,7 @@ b9 7c c0 a8 00 02 c0 a8 00 01 00 00 2b 60 00 00 \
+ 00 00 00 00 \
+ "], [0], [dnl
+ OFPT_PACKET_OUT (xid=0x0): in_port=1 actions=output:3 data_len=60
+-tcp,vlan_tci=0x0000,dl_src=50:54:00:00:00:06,dl_dst=50:54:00:00:00:05,nw_src=192.168.0.2,nw_dst=192.168.0.1,nw_tos=0,nw_ecn=0,nw_ttl=64,tp_src=0,tp_dst=11104,tcp_flags=rst|ack tcp_csum:6d75
++tcp,vlan_tci=0x0000,dl_src=50:54:00:00:00:06,dl_dst=50:54:00:00:00:05,nw_src=192.168.0.2,nw_dst=192.168.0.1,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,tp_src=0,tp_dst=11104,tcp_flags=rst|ack tcp_csum:6d75
+ ])
  AT_CLEANUP
  
-+AT_SETUP([flow classifier - ipv6 ND dependency])
-+OVS_VSWITCHD_START
-+add_of_ports br0 1 2
-+AT_DATA([flows.txt], [dnl
-+ table=0,priority=100,ipv6,ipv6_src=1000::/10 actions=resubmit(,1)
-+ table=0,priority=0 actions=NORMAL
-+ table=1,priority=110,ipv6,ipv6_dst=1000::3 actions=resubmit(,2)
-+ table=1,priority=100,ipv6,ipv6_dst=1000::4 actions=resubmit(,2)
-+ table=1,priority=0 actions=NORMAL
-+ table=2,priority=120,icmp6,nw_ttl=255,icmp_type=135,icmp_code=0,nd_target=1000::1 actions=NORMAL
-+ table=2,priority=100,tcp actions=NORMAL
-+ table=2,priority=100,icmp6 actions=NORMAL
-+ table=2,priority=0 actions=NORMAL
+@@ -751,7 +751,7 @@ b9 7c c0 a8 00 02 c0 a8 00 01 00 00 2b 60 00 00 \
+ 00 00 00 00 \
+ " 3], [0], [dnl
+ OFPT_PACKET_OUT (xid=0x0): in_port=1 actions=output:3 data_len=60
+-tcp,vlan_tci=0x0000,dl_src=50:54:00:00:00:06,dl_dst=50:54:00:00:00:05,nw_src=192.168.0.2,nw_dst=192.168.0.1,nw_tos=0,nw_ecn=0,nw_ttl=64,tp_src=0,tp_dst=11104,tcp_flags=rst|ack tcp_csum:6d75
++tcp,vlan_tci=0x0000,dl_src=50:54:00:00:00:06,dl_dst=50:54:00:00:00:05,nw_src=192.168.0.2,nw_dst=192.168.0.1,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,tp_src=0,tp_dst=11104,tcp_flags=rst|ack tcp_csum:6d75
+ 00000000  50 54 00 00 00 05 50 54-00 00 00 06 08 00 45 00
+ 00000010  00 28 00 00 40 00 40 06-b9 7c c0 a8 00 02 c0 a8
+ 00000020  00 01 00 00 2b 60 00 00-00 00 6a 4f 2b 58 50 14
+@@ -782,7 +782,7 @@ b9 7c c0 a8 00 02 c0 a8 00 01 00 00 2b 60 00 00 \
+ 00 00 00 00 \
+ "], [0], [dnl
+ OFPT_PACKET_OUT (OF1.2) (xid=0x8858dfc5): in_port=LOCAL actions=FLOOD data_len=60
+-tcp,vlan_tci=0x0000,dl_src=50:54:00:00:00:06,dl_dst=50:54:00:00:00:05,nw_src=192.168.0.2,nw_dst=192.168.0.1,nw_tos=0,nw_ecn=0,nw_ttl=64,tp_src=0,tp_dst=11104,tcp_flags=rst|ack tcp_csum:6d75
++tcp,vlan_tci=0x0000,dl_src=50:54:00:00:00:06,dl_dst=50:54:00:00:00:05,nw_src=192.168.0.2,nw_dst=192.168.0.1,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,tp_src=0,tp_dst=11104,tcp_flags=rst|ack tcp_csum:6d75
+ ])
+ AT_CLEANUP
+ 
+@@ -850,7 +850,7 @@ b9 7c c0 a8 00 02 c0 a8 00 01 00 00 2b 60 00 00 \
+ 00 00 00 00
+ "], [0], [dnl
+ OFPT_PACKET_OUT (OF1.5) (xid=0x11223344): metadata=0x3,in_port=1 actions=FLOOD data_len=60
+-tcp,vlan_tci=0x0000,dl_src=50:54:00:00:00:06,dl_dst=50:54:00:00:00:05,nw_src=192.168.0.2,nw_dst=192.168.0.1,nw_tos=0,nw_ecn=0,nw_ttl=64,tp_src=0,tp_dst=11104,tcp_flags=rst|ack tcp_csum:6d75
++tcp,vlan_tci=0x0000,dl_src=50:54:00:00:00:06,dl_dst=50:54:00:00:00:05,nw_src=192.168.0.2,nw_dst=192.168.0.1,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,tp_src=0,tp_dst=11104,tcp_flags=rst|ack tcp_csum:6d75
+ ])
+ AT_CLEANUP
+ 
+@@ -3103,7 +3103,7 @@ ff ff ff ff ff ff 00 00 00 00 82 82 82 82 82 82 \
+ 31 6d 00 00 00 00 00 00 00 00 \
+ "], [0], [dnl
+ NXT_PACKET_IN (xid=0x0): table_id=7 cookie=0x9 total_len=64 reg0=0x1,reg1=0x2,reg2=0x3,reg3=0x4,reg4=0x5,tun_id=0x6,metadata=0x5a5a5a5a5a5a5a5a,in_port=1 (via action) data_len=64 (unbuffered)
+-tcp,dl_vlan=80,dl_vlan_pcp=0,vlan_tci1=0x0000,dl_src=80:81:81:81:81:81,dl_dst=82:82:82:82:82:82,nw_src=83.83.83.83,nw_dst=84.84.84.84,nw_tos=0,nw_ecn=0,nw_ttl=0,tp_src=85,tp_dst=86,tcp_flags=syn tcp_csum:316d
++tcp,dl_vlan=80,dl_vlan_pcp=0,vlan_tci1=0x0000,dl_src=80:81:81:81:81:81,dl_dst=82:82:82:82:82:82,nw_src=83.83.83.83,nw_dst=84.84.84.84,nw_tos=0,nw_ecn=0,nw_ttl=0,nw_frag=no,tp_src=85,tp_dst=86,tcp_flags=syn tcp_csum:316d
+ ])
+ AT_CLEANUP
+ 
+@@ -3124,7 +3124,7 @@ ff ff ff ff ff ff 00 00 00 00 82 82 82 82 82 82 \
+ 31 6d 00 00 00 00 00 00 00 00 \
+ " 3], [0], [dnl
+ NXT_PACKET_IN (xid=0x0): table_id=7 cookie=0x9 total_len=64 reg0=0x1,reg1=0x2,reg2=0x3,reg3=0x4,reg4=0x5,tun_id=0x6,metadata=0x5a5a5a5a5a5a5a5a,in_port=1 (via action) data_len=64 (unbuffered)
+-tcp,dl_vlan=80,dl_vlan_pcp=0,vlan_tci1=0x0000,dl_src=80:81:81:81:81:81,dl_dst=82:82:82:82:82:82,nw_src=83.83.83.83,nw_dst=84.84.84.84,nw_tos=0,nw_ecn=0,nw_ttl=0,tp_src=85,tp_dst=86,tcp_flags=fin tcp_csum:316d
++tcp,dl_vlan=80,dl_vlan_pcp=0,vlan_tci1=0x0000,dl_src=80:81:81:81:81:81,dl_dst=82:82:82:82:82:82,nw_src=83.83.83.83,nw_dst=84.84.84.84,nw_tos=0,nw_ecn=0,nw_ttl=0,nw_frag=no,tp_src=85,tp_dst=86,tcp_flags=fin tcp_csum:316d
+ 00000000  82 82 82 82 82 82 80 81-81 81 81 81 81 00 00 50
+ 00000010  08 00 45 00 00 28 00 00-00 00 00 06 32 05 53 53
+ 00000020  53 53 54 54 54 54 00 55-00 56 00 00 00 00 00 00
+@@ -3151,7 +3151,7 @@ AT_CHECK([ovs-ofctl ofp-print "
+ ], [0], [dnl
+ NXT_PACKET_IN2 (xid=0x0): table_id=7 cookie=0xfedcba9876543210 total_len=64 metadata=0x5a5a5a5a5a5a5a5a (via action) data_len=48 buffer=0x00000114
+  userdata=01.02.03.04.05
+-ip,dl_vlan=80,dl_vlan_pcp=0,vlan_tci1=0x0000,dl_src=80:81:81:81:81:81,dl_dst=82:82:82:82:82:82,nw_src=0.0.0.0,nw_dst=0.0.0.0,nw_proto=0,nw_tos=0,nw_ecn=0,nw_ttl=0
++ip,dl_vlan=80,dl_vlan_pcp=0,vlan_tci1=0x0000,dl_src=80:81:81:81:81:81,dl_dst=82:82:82:82:82:82,nw_src=0.0.0.0,nw_dst=0.0.0.0,nw_proto=0,nw_tos=0,nw_ecn=0,nw_ttl=0,nw_frag=no
+ ])
+ AT_CLEANUP
+ 
+@@ -3869,7 +3869,7 @@ b9 7c c0 a8 00 02 c0 a8 00 01 00 00 2b 60 00 00 \
+ OFPT_BUNDLE_ADD_MESSAGE (OF1.4) (xid=0x3):
+  bundle_id=0x1 flags=atomic
+ OFPT_PACKET_OUT (OF1.4) (xid=0x3): in_port=LOCAL actions=FLOOD data_len=60
+-tcp,vlan_tci=0x0000,dl_src=50:54:00:00:00:06,dl_dst=50:54:00:00:00:05,nw_src=192.168.0.2,nw_dst=192.168.0.1,nw_tos=0,nw_ecn=0,nw_ttl=64,tp_src=0,tp_dst=11104,tcp_flags=rst|ack tcp_csum:6d75
++tcp,vlan_tci=0x0000,dl_src=50:54:00:00:00:06,dl_dst=50:54:00:00:00:05,nw_src=192.168.0.2,nw_dst=192.168.0.1,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,tp_src=0,tp_dst=11104,tcp_flags=rst|ack tcp_csum:6d75
+ ])
+ AT_CLEANUP
+ 
+diff --git a/tests/ofproto-dpif.at b/tests/ofproto-dpif.at
+index 7c2edeb9d4..1a8de7398c 100644
+--- a/tests/ofproto-dpif.at
++++ b/tests/ofproto-dpif.at
+@@ -29,6 +29,39 @@ AT_CHECK([ovs-appctl revalidator/wait])
+ OVS_VSWITCHD_STOP
+ AT_CLEANUP
+ 
++AT_SETUP([ofproto-dpif - lldp revalidator event(REV_RECONFIGURE)])
++OVS_VSWITCHD_START(
++    [add-port br0 p1 -- set interface p1 ofport_request=1 type=dummy]
++)
++dnl first revalidation triggered by add interface
++AT_CHECK([ovs-appctl coverage/read-counter rev_reconfigure], [0], [dnl
++1
 +])
-+AT_CHECK([ovs-ofctl add-flows br0 flows.txt])
 +
-+# test ICMPv6 echo request (which should have no nd_target field)
-+AT_CHECK([ovs-appctl ofproto/trace br0 "in_port=1,eth_src=f6:d2:b0:19:5e:7b,eth_dst=d2:49:19:91:78:fe,dl_type=0x86dd,ipv6_src=1000::3,ipv6_dst=1000::4,nw_proto=58,icmpv6_type=128,icmpv6_code=0"], [0], [stdout])
-+AT_CHECK([tail -2 stdout], [0],
-+  [Megaflow: recirc_id=0,eth,icmp6,in_port=1,dl_src=f6:d2:b0:19:5e:7b,dl_dst=d2:49:19:91:78:fe,ipv6_src=1000::/10,ipv6_dst=1000::4,nw_ttl=0,nw_frag=no
-+Datapath actions: 100,2
++dnl enable lldp
++AT_CHECK([ovs-vsctl set interface p1 lldp:enable=true])
++AT_CHECK([ovs-appctl revalidator/wait])
++AT_CHECK([ovs-appctl coverage/read-counter rev_reconfigure], [0], [dnl
++2
++])
++
++dnl disable lldp
++AT_CHECK([ovs-vsctl set interface p1 lldp:enable=false])
++AT_CHECK([ovs-appctl revalidator/wait])
++AT_CHECK([ovs-appctl coverage/read-counter rev_reconfigure], [0], [dnl
++3
++])
++
++dnl remove lldp, no revalidation as lldp was disabled
++AT_CHECK([ovs-vsctl remove interface p1 lldp enable])
++AT_CHECK([ovs-appctl revalidator/wait])
++AT_CHECK([ovs-appctl coverage/read-counter rev_reconfigure], [0], [dnl
++3
 +])
++
 +OVS_VSWITCHD_STOP
 +AT_CLEANUP
 +
- AT_BANNER([conjunctive match])
+ AT_SETUP([ofproto-dpif - active-backup bonding (with primary)])
  
- AT_SETUP([single conjunctive match])
-diff --git a/tests/drop-stats.at b/tests/drop-stats.at
-index f3e19cd83b..1d3af98dab 100644
---- a/tests/drop-stats.at
-+++ b/tests/drop-stats.at
-@@ -83,6 +83,9 @@ AT_CHECK([
-     ovs-ofctl -Oopenflow13 add-flows br0 flows.txt
-     ovs-ofctl -Oopenflow13 dump-flows br0 | ofctl_strip | sort | grep actions ], [0], [ignore])
+ dnl Create br0 with members p1, p2 and p7, creating bond0 with p1 and
+@@ -81,11 +114,12 @@ recirc_id(0),in_port(4),packet_type(ns=0,id=0),eth(src=50:54:00:00:00:0b,dst=ff:
  
-+ovs-appctl time/warp 15000
-+AT_CHECK([ovs-appctl revalidator/wait])
-+
- AT_CHECK([
-     ovs-appctl netdev-dummy/receive p1 'in_port(1),packet_type(ns=0,id=0),eth(src=3a:6d:d2:09:9c:ab,dst=1e:2c:e9:2a:66:9e),ipv4(src=192.168.10.10,dst=192.168.10.30,proto=1,tos=0,ttl=64,frag=no),icmp(type=8,code=0)'
- ], [0], [ignore])
-diff --git a/tests/library.at b/tests/library.at
-index db4997d8f0..6489be2c15 100644
---- a/tests/library.at
-+++ b/tests/library.at
-@@ -252,7 +252,7 @@ AT_CHECK([ovstest test-barrier], [0], [])
- AT_CLEANUP
+ ovs-appctl netdev-dummy/set-admin-state p1 up
+ ovs-appctl time/warp 100
+-OVS_WAIT_UNTIL([ovs-appctl bond/show | STRIP_RECIRC_ID | STRIP_ACTIVE_MEMBER_MAC], [0], [dnl
++OVS_WAIT_UNTIL_EQUAL([ovs-appctl bond/show | STRIP_RECIRC_ID | STRIP_ACTIVE_MEMBER_MAC], [dnl
+ ---- bond0 ----
+ bond_mode: active-backup
+ bond may use recirculation: no, <del>
+ bond-hash-basis: 0
++lb_output action: disabled, bond-id: -1
+ updelay: 0 ms
+ downdelay: 0 ms
+ lacp_status: off
+@@ -99,7 +133,6 @@ member p1: enabled
  
- AT_SETUP([rcu])
--AT_CHECK([ovstest test-rcu-quiesce], [0], [])
-+AT_CHECK([ovstest test-rcu], [0], [])
- AT_CLEANUP
+ member p2: enabled
+   may_enable: true
+-
+ ])
  
- AT_SETUP([stopwatch module])
-diff --git a/tests/mcast-snooping.at b/tests/mcast-snooping.at
-index 757cf7186e..fe475e7b38 100644
---- a/tests/mcast-snooping.at
-+++ b/tests/mcast-snooping.at
-@@ -216,3 +216,70 @@ AT_CHECK([ovs-appctl mdb/show br0], [0], [dnl
+ OVS_VSWITCHD_STOP
+@@ -126,14 +159,15 @@ dnl bring the primary back and verify that we switched back to the
+ dnl primary.
+ ovs-appctl netdev-dummy/set-admin-state p1 down
+ ovs-appctl time/warp 100
+-OVS_WAIT_UNTIL([test -n "`ovs-appctl bond/show | fgrep 'member p1: disabled'`"])
++OVS_WAIT_UNTIL([test -n "`ovs-appctl bond/show | grep -F 'member p1: disabled'`"])
+ ovs-appctl netdev-dummy/set-admin-state p1 up
+ ovs-appctl time/warp 100
+-OVS_WAIT_UNTIL([ovs-appctl bond/show | STRIP_RECIRC_ID | STRIP_ACTIVE_MEMBER_MAC], [0], [dnl
++OVS_WAIT_UNTIL_EQUAL([ovs-appctl bond/show | STRIP_RECIRC_ID | STRIP_ACTIVE_MEMBER_MAC], [dnl
+ ---- bond0 ----
+ bond_mode: active-backup
+ bond may use recirculation: no, <del>
+ bond-hash-basis: 0
++lb_output action: disabled, bond-id: -1
+ updelay: 0 ms
+ downdelay: 0 ms
+ lacp_status: off
+@@ -150,14 +184,13 @@ member p2: enabled
+ 
+ member p3: enabled
+   may_enable: true
+-
+ ])
+ 
+ dnl Now delete the primary and verify that the output shows that the
+ dnl primary is no longer an member
+ ovs-vsctl --id=@p1 get Interface p1 -- remove Port bond0 interfaces @p1
+ ovs-appctl time/warp 100
+-OVS_WAIT_UNTIL([test -n "`ovs-appctl bond/show | fgrep 'active-backup primary: p1 (no such member)'`"])
++OVS_WAIT_UNTIL([test -n "`ovs-appctl bond/show | grep -F 'active-backup primary: p1 (no such member)'`"])
+ 
+ dnl Now re-add the primary and verify that the output shows that the
+ dnl primary is available again.
+@@ -171,11 +204,12 @@ ovs-vsctl \
+    --id=@p1 create Interface name=p1 type=dummy options:pstream=punix:$OVS_RUNDIR/p1.sock ofport_request=1 -- \
+    set Port bond0 interfaces="$uuids, @p1]"
+ ovs-appctl time/warp 100
+-OVS_WAIT_UNTIL([ovs-appctl bond/show | STRIP_RECIRC_ID | STRIP_ACTIVE_MEMBER_MAC], [0], [dnl
++OVS_WAIT_UNTIL_EQUAL([ovs-appctl bond/show | STRIP_RECIRC_ID | STRIP_ACTIVE_MEMBER_MAC], [dnl
+ ---- bond0 ----
+ bond_mode: active-backup
+ bond may use recirculation: no, <del>
+ bond-hash-basis: 0
++lb_output action: disabled, bond-id: -1
+ updelay: 0 ms
+ downdelay: 0 ms
+ lacp_status: off
+@@ -192,17 +226,17 @@ member p2: enabled
+ 
+ member p3: enabled
+   may_enable: true
+-
+ ])
+ 
+ dnl Switch to another primary
+ ovs-vsctl set port bond0 other_config:bond-primary=p2
+ ovs-appctl time/warp 100
+-OVS_WAIT_UNTIL([ovs-appctl bond/show | STRIP_RECIRC_ID | STRIP_ACTIVE_MEMBER_MAC], [0], [dnl
++OVS_WAIT_UNTIL_EQUAL([ovs-appctl bond/show | STRIP_RECIRC_ID | STRIP_ACTIVE_MEMBER_MAC], [dnl
+ ---- bond0 ----
+ bond_mode: active-backup
+ bond may use recirculation: no, <del>
+ bond-hash-basis: 0
++lb_output action: disabled, bond-id: -1
+ updelay: 0 ms
+ downdelay: 0 ms
+ lacp_status: off
+@@ -211,25 +245,25 @@ active-backup primary: p2
+ <active member mac del>
+ 
+ member p1: enabled
+-  active member
+   may_enable: true
+ 
+ member p2: enabled
++  active member
+   may_enable: true
+ 
+ member p3: enabled
+   may_enable: true
+-
+ ])
+ 
+ dnl Remove the "bond-primary" config directive from the bond.
+ AT_CHECK([ovs-vsctl remove Port bond0 other_config bond-primary])
+ ovs-appctl time/warp 100
+-OVS_WAIT_UNTIL([ovs-appctl bond/show | STRIP_RECIRC_ID | STRIP_ACTIVE_MEMBER_MAC], [0], [dnl
++OVS_WAIT_UNTIL_EQUAL([ovs-appctl bond/show | STRIP_RECIRC_ID | STRIP_ACTIVE_MEMBER_MAC], [dnl
+ ---- bond0 ----
+ bond_mode: active-backup
+ bond may use recirculation: no, <del>
+ bond-hash-basis: 0
++lb_output action: disabled, bond-id: -1
+ updelay: 0 ms
+ downdelay: 0 ms
+ lacp_status: off
+@@ -238,15 +272,14 @@ active-backup primary: <none>
+ <active member mac del>
+ 
+ member p1: enabled
+-  active member
+   may_enable: true
+ 
+ member p2: enabled
++  active member
+   may_enable: true
+ 
+ member p3: enabled
+   may_enable: true
+-
  ])
  
+ OVS_VSWITCHD_STOP
+@@ -336,9 +369,9 @@ ovs-appctl time/warp 100
+ AT_CHECK([ovs-appctl dpif/dump-flows br1 > br1_flows.txt])
+ # Make sure there is resonable distribution to all three ports.
+ # We don't want to make this check precise, in case hash function changes.
+-AT_CHECK([test `egrep 'in_port\(4\)' br1_flows.txt |wc -l` -gt 3])
+-AT_CHECK([test `egrep 'in_port\(5\)' br1_flows.txt |wc -l` -gt 3])
+-AT_CHECK([test `egrep 'in_port\(6\)' br1_flows.txt |wc -l` -gt 3])
++AT_CHECK([test `grep -E 'in_port\(4\)' br1_flows.txt |wc -l` -gt 3])
++AT_CHECK([test `grep -E 'in_port\(5\)' br1_flows.txt |wc -l` -gt 3])
++AT_CHECK([test `grep -E 'in_port\(6\)' br1_flows.txt |wc -l` -gt 3])
+ OVS_VSWITCHD_STOP
  AT_CLEANUP
-+
-+
-+AT_SETUP([mcast - igmp flood for non-snoop enabled])
-+OVS_VSWITCHD_START([])
-+
-+AT_CHECK([
-+    ovs-vsctl set bridge br0 \
-+    datapath_type=dummy], [0])
-+
-+add_of_ports br0 1 2
-+
+ 
+@@ -498,6 +531,72 @@ AT_CHECK([sed -n '/member p2/,/^$/p' bond3.txt | grep 'hash'], [0], [ignore])
+ OVS_VSWITCHD_STOP()
+ AT_CLEANUP
+ 
++dnl Regression test for a deadlock / double lock on post-recirculation rule
++dnl updates while processing PACKET_OUT.
++AT_SETUP([ofproto-dpif - balance-tcp bonding rule updates on packet-out])
++dnl Create br0 with interfaces bond0(p1, p2) and p5,
++dnl    and br1 with interfaces bond1(p3, p4) and p6.
++dnl    bond0 <-> bond1
++OVS_VSWITCHD_START(
++  [add-bond br0 bond0 p1 p2 bond_mode=balance-tcp lacp=active dnl
++        other-config:lacp-time=fast other-config:bond-rebalance-interval=1000 -- dnl
++   set interface p1 type=dummy options:pstream=punix:$OVS_RUNDIR/p1.sock ofport_request=1 mtu_request=65535 -- dnl
++   set interface p2 type=dummy options:pstream=punix:$OVS_RUNDIR/p2.sock ofport_request=2 mtu_request=65535 -- dnl
++   add-port br0 p5 -- set interface p5 ofport_request=5 type=dummy mtu_request=65535 -- dnl
++   add-br br1 -- dnl
++   set bridge br1 other-config:hwaddr=aa:66:aa:66:00:00 -- dnl
++   set bridge br1 datapath-type=dummy other-config:datapath-id=1234 dnl
++                  fail-mode=secure -- dnl
++   add-bond br1 bond1 p3 p4 bond_mode=balance-tcp lacp=active dnl
++        other-config:lacp-time=fast other-config:bond-rebalance-interval=1000 -- dnl
++   set interface p3 type=dummy options:stream=unix:$OVS_RUNDIR/p1.sock ofport_request=3 mtu_request=65535 -- dnl
++   set interface p4 type=dummy options:stream=unix:$OVS_RUNDIR/p2.sock ofport_request=4 mtu_request=65535 -- dnl
++   add-port br1 p6 -- set interface p6 ofport_request=6 type=dummy mtu_request=65535 --])
++AT_CHECK([ovs-appctl vlog/set bond:dbg])
++AT_CHECK([ovs-appctl netdev-dummy/set-admin-state up], 0, [OK
++])
 +AT_CHECK([ovs-ofctl add-flow br0 action=normal])
++AT_CHECK([ovs-ofctl add-flow br1 action=normal])
++OVS_WAIT_WHILE([ovs-appctl bond/show | grep "may_enable: false"])
 +
 +ovs-appctl time/stop
++ovs-appctl time/warp 2000 200
 +
-+dnl Basic scenario - needs to flood for IGMP followed by unicast ICMP
-+dnl in reverse direction
-+AT_CHECK([ovs-appctl netdev-dummy/receive p1 \
-+    '0101000c29a0aa55aa550001080046c00028000040000102d3494565eb4ae0000016940400002200f9020000000104000000e00000fb000000000000'])
-+AT_CHECK([ovs-appctl netdev-dummy/receive p2 \
-+    'aa55aa5500010101000c29a008004500001c00010000400164dc0a0101010a0101020800f7ffffffffff'])
-+
++dnl Send some traffic to distribute all the hashes between ports.
++AT_CHECK([SEND_TCP_BOND_PKTS([p5], [5], [65500])])
 +
-+AT_CHECK([ovs-appctl dpctl/dump-flows | grep -e .*ipv4 | sort | dnl
-+          strip_stats | strip_used | strip_recirc | dnl
-+          sed -e 's/,packet_type(ns=[[0-9]]*,id=[[0-9]]*),/,/'],
-+                     [0], [dnl
-+recirc_id(<recirc>),in_port(1),eth(src=aa:55:aa:55:00:01,dst=01:01:00:0c:29:a0),eth_type(0x0800),ipv4(frag=no), packets:0, bytes:0, used:never, actions:100,2
-+recirc_id(<recirc>),in_port(2),eth(src=01:01:00:0c:29:a0,dst=aa:55:aa:55:00:01),eth_type(0x0800),ipv4(frag=no), packets:0, bytes:0, used:never, actions:1
-+])
++dnl Wait for rebalancing for per-hash stats accounting.
++ovs-appctl time/warp 1000 100
 +
-+ovs-appctl time/warp 100000
++dnl Check that p2 handles some hashes.
++ovs-appctl bond/show > bond1.txt
++AT_CHECK([sed -n '/member p2/,/^$/p' bond1.txt | grep 'hash'], [0], [ignore])
 +
-+dnl Next we should clear the flows and install a complex case
-+AT_CHECK([ovs-ofctl del-flows br0])
++dnl Pause revalidators to be sure that they do not update flows while
++dnl the bonding configuration chnages.
++ovs-appctl revalidator/pause
 +
-+AT_DATA([flows.txt], [dnl
-+table=0, arp actions=NORMAL
-+table=0, ip,in_port=1 actions=ct(table=1,zone=64000)
-+table=0, in_port=2 actions=output:1
-+table=1, ip,ct_state=+trk+inv actions=drop
-+table=1  ip,in_port=1,icmp,ct_state=+trk+new actions=output:2
-+table=1, in_port=1,ip,ct_state=+trk+new actions=controller(userdata=00.de.ad.be.ef.ca.fe.01)
-+table=1, in_port=1,ip,ct_state=+trk+est actions=output:2
++dnl Move p2 down to trigger update of bonding post-recirculation rules by
++dnl forcing move of all the hashes to p1.
++AT_CHECK([ovs-appctl netdev-dummy/set-admin-state p2 down], 0, [OK
 +])
-+AT_CHECK([ovs-ofctl add-flows br0 flows.txt])
-+
-+ovs-appctl time/warp 100000
 +
-+dnl Send the IGMP, followed by a unicast ICMP - ensure we won't black hole
-+AT_CHECK([ovs-appctl netdev-dummy/receive p1 \
-+    '0101000c29a0aa55aa550001080046c00028000040000102d3494565eb4ae0000016940400002200f9020000000104000000e00000fb000000000000'])
-+AT_CHECK([ovs-appctl netdev-dummy/receive p1 \
-+    'aa55aa550001aa55aa55000208004500001c00010000400164dc0a0101010a0101020800f7ffffffffff'])
++dnl Send PACKET_OUT that may lead to flow updates since the bonding
++dnl configuration changed.
++packet=ffffffffffff00102030405008004500001c00000000401100000a000002ffffffff0035111100080000
++AT_CHECK([ovs-ofctl packet-out br0 "in_port=p5 packet=$packet actions=resubmit(,0)"])
 +
++dnl Resume revalidators.
++ovs-appctl revalidator/resume
++ovs-appctl revalidator/wait
 +
-+AT_CHECK([ovs-appctl dpctl/dump-flows | grep -e .*ipv4 | sort | dnl
-+          strip_stats | strip_used | strip_recirc | dnl
-+          sed 's/pid=[[0-9]]*,//
-+               s/,packet_type(ns=[[0-9]]*,id=[[0-9]]*),/,/'],
-+                     [0], [dnl
-+ct_state(+new-inv+trk),recirc_id(<recirc>),in_port(1),eth_type(0x0800),ipv4(proto=1,frag=no), packets:0, bytes:0, used:never, actions:2
-+ct_state(+new-inv+trk),recirc_id(<recirc>),in_port(1),eth_type(0x0800),ipv4(proto=2,frag=no), packets:0, bytes:0, used:never, actions:userspace(controller(reason=1,dont_send=0,continuation=0,recirc_id=<recirc>,rule_cookie=0,controller_id=0,max_len=65535))
-+recirc_id(<recirc>),in_port(1),eth_type(0x0800),ipv4(frag=no), packets:0, bytes:0, used:0.0s, actions:ct(zone=64000),recirc(<recirc>)
-+])
++ovs-appctl time/warp 200 100
++dnl Check that all hashes moved form p2 and OVS is still working.
++ovs-appctl bond/show > bond2.txt
++AT_CHECK([sed -n '/member p2/,/^$/p' bond2.txt | grep 'hash'], [1], [ignore])
 +
++OVS_VSWITCHD_STOP()
 +AT_CLEANUP
-diff --git a/tests/ofproto-dpif.at b/tests/ofproto-dpif.at
-index 7c2edeb9d4..c923ed6606 100644
---- a/tests/ofproto-dpif.at
-+++ b/tests/ofproto-dpif.at
-@@ -29,6 +29,39 @@ AT_CHECK([ovs-appctl revalidator/wait])
+ 
+ # Makes sure recirculation does not change the way packet is handled.
+ AT_SETUP([ofproto-dpif - balance-tcp bonding, different recirc flow ])
+@@ -570,7 +669,7 @@ table=1 in_port=2 priority=1500 icmp actions=output(17),resubmit(,2)
+ table=1 in_port=3 priority=1500 icmp actions=output(14),resubmit(,2)
+ ])
+ AT_CHECK([ovs-ofctl add-flows br0 flows.txt])
+-AT_CHECK([ovs-appctl ofproto/trace br0 'in_port=p1,dl_src=50:54:00:00:00:05,dl_dst=50:54:00:00:00:07,dl_type=0x0800,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_proto=1,nw_tos=0,nw_ttl=128,icmp_type=8,icmp_code=0'], [0], [stdout])
++AT_CHECK([ovs-appctl ofproto/trace br0 'in_port=p1,dl_src=50:54:00:00:00:05,dl_dst=50:54:00:00:00:07,dl_type=0x0800,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_proto=1,nw_tos=0,nw_ttl=128,nw_frag=no,icmp_type=8,icmp_code=0'], [0], [stdout])
+ AT_CHECK([tail -1 stdout], [0],
+   [Datapath actions: 10,11,12,13,14,15,16,17,18,19,20,21
+ ])
+@@ -584,7 +683,7 @@ echo "table=0 in_port=1 actions=output(10),goto_table(1)" > flows.txt
+ for i in `seq 1 63`; do echo "table=$i actions=goto_table($(($i+1)))"; done >> flows.txt
+ echo "table=64 actions=output(11)" >> flows.txt
+ AT_CHECK([ovs-ofctl -O OpenFlow12 add-flows br0 flows.txt])
+-AT_CHECK([ovs-appctl ofproto/trace br0 'in_port=1,dl_src=50:54:00:00:00:05,dl_dst=50:54:00:00:00:07,dl_type=0x0800,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_proto=1,nw_tos=0,nw_ttl=128,icmp_type=8,icmp_code=0'], [0], [stdout])
++AT_CHECK([ovs-appctl ofproto/trace br0 'in_port=1,dl_src=50:54:00:00:00:05,dl_dst=50:54:00:00:00:07,dl_type=0x0800,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_proto=1,nw_tos=0,nw_ttl=128,nw_frag=no,icmp_type=8,icmp_code=0'], [0], [stdout])
+ AT_CHECK([tail -1 stdout], [0],
+   [Datapath actions: 10,11
+ ])
+@@ -600,7 +699,7 @@ table=1 ip actions=write_actions(output(13)),goto_table(2)
+ table=2 ip actions=set_field:192.168.3.91->ip_src,output(11)
+ ])
+ AT_CHECK([ovs-ofctl -O OpenFlow12 add-flows br0 flows.txt])
+-AT_CHECK([ovs-appctl ofproto/trace br0 'in_port=1,dl_src=50:54:00:00:00:05,dl_dst=50:54:00:00:00:07,dl_type=0x0800,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_proto=1,nw_tos=0,nw_ttl=128,icmp_type=8,icmp_code=0'], [0], [stdout])
++AT_CHECK([ovs-appctl ofproto/trace br0 'in_port=1,dl_src=50:54:00:00:00:05,dl_dst=50:54:00:00:00:07,dl_type=0x0800,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_proto=1,nw_tos=0,nw_ttl=128,nw_frag=no,icmp_type=8,icmp_code=0'], [0], [stdout])
+ AT_CHECK([tail -2 stdout], [0],
+   [Megaflow: recirc_id=0,eth,ip,in_port=1,nw_src=192.168.0.1,nw_frag=no
+ Datapath actions: 10,set(ipv4(src=192.168.3.91)),11,set(ipv4(src=192.168.3.90)),13
+@@ -617,7 +716,7 @@ table=1 icmp6 actions=write_actions(output(13)),goto_table(2)
+ table=2 in_port=1,icmp6,icmpv6_type=135 actions=set_field:fe80::4->nd_target,set_field:cc:cc:cc:cc:cc:cc->nd_sll,output(11)
+ ])
+ AT_CHECK([ovs-ofctl -O OpenFlow12 add-flows br0 flows.txt])
+-AT_CHECK([ovs-appctl ofproto/trace br0 'in_port=1,dl_src=50:54:00:00:00:05,dl_dst=50:54:00:00:00:07,icmp6,ipv6_src=fe80::1,ipv6_dst=fe80::2,nw_tos=0,nw_ttl=128,icmpv6_type=135,nd_target=fe80::2020,nd_sll=66:55:44:33:22:11'], [0], [stdout])
++AT_CHECK([ovs-appctl ofproto/trace br0 'in_port=1,dl_src=50:54:00:00:00:05,dl_dst=50:54:00:00:00:07,icmp6,ipv6_src=fe80::1,ipv6_dst=fe80::2,nw_tos=0,nw_ttl=128,nw_frag=no,icmpv6_type=135,nd_target=fe80::2020,nd_sll=66:55:44:33:22:11'], [0], [stdout])
+ AT_CHECK([tail -4 stdout], [0],
+   [Megaflow: recirc_id=0,eth,icmp6,in_port=1,nw_frag=no,icmp_type=0x87/0xff,icmp_code=0x0/0xff,nd_target=fe80::2020,nd_sll=66:55:44:33:22:11
+ Datapath actions: 10,set(nd(target=fe80::4,sll=cc:cc:cc:cc:cc:cc)),11,set(nd(target=fe80::3,sll=aa:aa:aa:aa:aa:aa)),13
+@@ -635,7 +734,7 @@ table=0 in_port=1,ip actions=output(10),write_actions(set_field:192.168.3.90->ip
+ table=1 tcp actions=set_field:91->tp_src,output(11),clear_actions
+ ])
+ AT_CHECK([ovs-ofctl -O OpenFlow12 add-flows br0 flows.txt])
+-AT_CHECK([ovs-appctl ofproto/trace br0 'in_port=1,dl_src=50:54:00:00:00:05,dl_dst=50:54:00:00:00:07,dl_type=0x0800,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_proto=6,nw_tos=0,nw_ttl=128,tp_src=8,tp_dst=9'], [0], [stdout])
++AT_CHECK([ovs-appctl ofproto/trace br0 'in_port=1,dl_src=50:54:00:00:00:05,dl_dst=50:54:00:00:00:07,dl_type=0x0800,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_proto=6,nw_tos=0,nw_ttl=128,nw_frag=no,tp_src=8,tp_dst=9'], [0], [stdout])
+ AT_CHECK([tail -2 stdout], [0],
+   [Megaflow: recirc_id=0,eth,tcp,in_port=1,nw_frag=no,tp_src=8
+ Datapath actions: 10,set(tcp(src=91)),11
+@@ -649,7 +748,7 @@ add_of_ports br0 1 10 11
+ AT_CHECK([ovs-ofctl -O OpenFlow12 add-group br0 'group_id=1234,type=all,bucket=set_field:192.168.3.90->ip_src,group:123,bucket=output:11'])
+ AT_CHECK([ovs-ofctl -O OpenFlow12 add-group br0 'group_id=123,type=all,bucket=output:10'])
+ AT_CHECK([ovs-ofctl -O OpenFlow12 add-flow br0 'ip actions=group:1234'])
+-AT_CHECK([ovs-appctl ofproto/trace br0 'in_port=1,dl_src=50:54:00:00:00:05,dl_dst=50:54:00:00:00:07,dl_type=0x0800,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_proto=1,nw_tos=0,nw_ttl=128,icmp_type=8,icmp_code=0'], [0], [stdout])
++AT_CHECK([ovs-appctl ofproto/trace br0 'in_port=1,dl_src=50:54:00:00:00:05,dl_dst=50:54:00:00:00:07,dl_type=0x0800,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_proto=1,nw_tos=0,nw_ttl=128,nw_frag=no,icmp_type=8,icmp_code=0'], [0], [stdout])
+ AT_CHECK([tail -1 stdout], [0],
+   [Datapath actions: set(ipv4(src=192.168.3.90)),10,set(ipv4(src=192.168.0.1)),11
+ ])
+@@ -661,7 +760,7 @@ OVS_VSWITCHD_START
+ add_of_ports br0 1 10 11
+ AT_CHECK([ovs-ofctl -O OpenFlow12 add-group br0 'group_id=1234,type=all,bucket=output:10,set_field:192.168.3.90->ip_src,bucket=output:11'])
+ AT_CHECK([ovs-ofctl -O OpenFlow12 add-flow br0 'ip actions=group:1234'])
+-AT_CHECK([ovs-appctl ofproto/trace br0 'in_port=1,dl_src=50:54:00:00:00:05,dl_dst=50:54:00:00:00:07,dl_type=0x0800,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_proto=1,nw_tos=0,nw_ttl=128,icmp_type=8,icmp_code=0'], [0], [stdout])
++AT_CHECK([ovs-appctl ofproto/trace br0 'in_port=1,dl_src=50:54:00:00:00:05,dl_dst=50:54:00:00:00:07,dl_type=0x0800,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_proto=1,nw_tos=0,nw_ttl=128,nw_frag=no,icmp_type=8,icmp_code=0'], [0], [stdout])
+ # Must match on the source address to be able to restore it's value for
+ # the second bucket
+ AT_CHECK([tail -2 stdout], [0],
+@@ -676,7 +775,7 @@ OVS_VSWITCHD_START
+ add_of_ports br0 1 10
+ AT_CHECK([ovs-ofctl -O OpenFlow12 add-group br0 group_id=1234,type=indirect,bucket=output:10])
+ AT_CHECK([ovs-ofctl -O OpenFlow12 add-flow br0 'ip actions=group:1234'])
+-AT_CHECK([ovs-appctl ofproto/trace br0 'in_port=1,dl_src=50:54:00:00:00:05,dl_dst=50:54:00:00:00:07,dl_type=0x0800,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_proto=1,nw_tos=0,nw_ttl=128,icmp_type=8,icmp_code=0'], [0], [stdout])
++AT_CHECK([ovs-appctl ofproto/trace br0 'in_port=1,dl_src=50:54:00:00:00:05,dl_dst=50:54:00:00:00:07,dl_type=0x0800,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_proto=1,nw_tos=0,nw_ttl=128,nw_frag=no,icmp_type=8,icmp_code=0'], [0], [stdout])
+ AT_CHECK([tail -1 stdout], [0],
+   [Datapath actions: 10
+ ])
+@@ -708,7 +807,7 @@ OVS_VSWITCHD_START
+ add_of_ports br0 1 10 11
+ AT_CHECK([ovs-ofctl -O OpenFlow12 add-group br0 'group_id=1234,type=all,bucket=output:10,set_field:192.168.3.90->ip_src,bucket=output:11'])
+ AT_CHECK([ovs-ofctl -O OpenFlow12 add-flow br0 'ip actions=write_actions(group:1234)'])
+-AT_CHECK([ovs-appctl ofproto/trace br0 'in_port=1,dl_src=50:54:00:00:00:05,dl_dst=50:54:00:00:00:07,dl_type=0x0800,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_proto=1,nw_tos=0,nw_ttl=128,icmp_type=8,icmp_code=0'], [0], [stdout])
++AT_CHECK([ovs-appctl ofproto/trace br0 'in_port=1,dl_src=50:54:00:00:00:05,dl_dst=50:54:00:00:00:07,dl_type=0x0800,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_proto=1,nw_tos=0,nw_ttl=128,nw_frag=no,icmp_type=8,icmp_code=0'], [0], [stdout])
+ # Must match on the source address to be able to restore it's value for
+ # the third bucket
+ AT_CHECK([tail -2 stdout], [0],
+@@ -723,7 +822,7 @@ OVS_VSWITCHD_START
+ add_of_ports br0 1 10
+ AT_CHECK([ovs-ofctl -O OpenFlow12 add-group br0 group_id=1234,type=indirect,bucket=output:10])
+ AT_CHECK([ovs-ofctl -O OpenFlow12 add-flow br0 'ip actions=write_actions(group:1234)'])
+-AT_CHECK([ovs-appctl ofproto/trace br0 'in_port=1,dl_src=50:54:00:00:00:05,dl_dst=50:54:00:00:00:07,dl_type=0x0800,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_proto=1,nw_tos=0,nw_ttl=128,icmp_type=8,icmp_code=0'], [0], [stdout])
++AT_CHECK([ovs-appctl ofproto/trace br0 'in_port=1,dl_src=50:54:00:00:00:05,dl_dst=50:54:00:00:00:07,dl_type=0x0800,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_proto=1,nw_tos=0,nw_ttl=128,nw_frag=no,icmp_type=8,icmp_code=0'], [0], [stdout])
+ AT_CHECK([tail -1 stdout], [0],
+   [Datapath actions: 10
+ ])
+@@ -743,11 +842,11 @@ add_of_ports br0 1
+ add_of_ports br1 2
+ AT_CHECK([ovs-ofctl -O OpenFlow12 add-flow br1 'ip actions=write_actions(pop_vlan,output:2)'])
+ AT_CHECK([ovs-ofctl -O OpenFlow12 add-flow br0 'ip actions=output:10'])
+-AT_CHECK([ovs-appctl ofproto/trace br1 'in_port=20,dl_src=50:54:00:00:00:05,dl_dst=50:54:00:00:00:07,dl_vlan=100,dl_type=0x0800,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_proto=1,nw_tos=0,nw_ttl=128,icmp_type=8,icmp_code=0'], [0], [stdout])
++AT_CHECK([ovs-appctl ofproto/trace br1 'in_port=20,dl_src=50:54:00:00:00:05,dl_dst=50:54:00:00:00:07,dl_vlan=100,dl_type=0x0800,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_proto=1,nw_tos=0,nw_ttl=128,nw_frag=no,icmp_type=8,icmp_code=0'], [0], [stdout])
+ AT_CHECK([tail -1 stdout], [0],
+   [Datapath actions: pop_vlan,2
+ ])
+-AT_CHECK([ovs-appctl ofproto/trace br0 'in_port=1,dl_src=50:54:00:00:00:05,dl_dst=50:54:00:00:00:07,dl_vlan=100,dl_type=0x0800,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_proto=1,nw_tos=0,nw_ttl=128,icmp_type=8,icmp_code=0'], [0], [stdout])
++AT_CHECK([ovs-appctl ofproto/trace br0 'in_port=1,dl_src=50:54:00:00:00:05,dl_dst=50:54:00:00:00:07,dl_vlan=100,dl_type=0x0800,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_proto=1,nw_tos=0,nw_ttl=128,nw_frag=no,icmp_type=8,icmp_code=0'], [0], [stdout])
+ AT_CHECK([tail -1 stdout], [0],
+   [Datapath actions: pop_vlan,2
+ ])
+@@ -1011,7 +1110,7 @@ OVS_VSWITCHD_START
+ add_of_ports br0 1 10 11
+ AT_CHECK([ovs-ofctl -O OpenFlow12 add-group br0 'group_id=1234,type=ff,bucket=watch_port:10,output:10,bucket=watch_port:11,output:11'])
+ AT_CHECK([ovs-ofctl -O OpenFlow12 add-flow br0 'ip actions=write_actions(group:1234)'])
+-AT_CHECK([ovs-appctl ofproto/trace br0 'in_port=1,dl_src=50:54:00:00:00:05,dl_dst=50:54:00:00:00:07,dl_type=0x0800,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_proto=1,nw_tos=0,nw_ttl=128,icmp_type=8,icmp_code=0'], [0], [stdout])
++AT_CHECK([ovs-appctl ofproto/trace br0 'in_port=1,dl_src=50:54:00:00:00:05,dl_dst=50:54:00:00:00:07,dl_type=0x0800,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_proto=1,nw_tos=0,nw_ttl=128,nw_frag=no,icmp_type=8,icmp_code=0'], [0], [stdout])
+ AT_CHECK([tail -1 stdout], [0],
+   [Datapath actions: 10
+ ])
+@@ -1142,7 +1241,7 @@ OVS_VSWITCHD_START
+ add_of_ports br0 1 10 11
+ AT_CHECK([ovs-ofctl -O OpenFlow12 add-group br0 'group_id=1234,type=all,bucket=output:10,move:NXM_NX_REG1[[]]->NXM_OF_IP_SRC[[]],bucket=output:11'])
+ AT_CHECK([ovs-ofctl -O OpenFlow12 add-flow br0 'ip actions=write_actions(load:0xffffffff->NXM_NX_REG1[[]],move:NXM_NX_REG1[[]]->NXM_NX_REG2[[]],group:1234)'])
+-AT_CHECK([ovs-appctl ofproto/trace br0 'in_port=1,dl_src=50:54:00:00:00:05,dl_dst=50:54:00:00:00:07,dl_type=0x0800,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_proto=1,nw_tos=0,nw_ttl=128,icmp_type=8,icmp_code=0'], [0], [stdout])
++AT_CHECK([ovs-appctl ofproto/trace br0 'in_port=1,dl_src=50:54:00:00:00:05,dl_dst=50:54:00:00:00:07,dl_type=0x0800,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_proto=1,nw_tos=0,nw_ttl=128,nw_frag=no,icmp_type=8,icmp_code=0'], [0], [stdout])
+ AT_CHECK([tail -2 stdout], [0],
+   [Megaflow: recirc_id=0,eth,ip,in_port=1,nw_src=192.168.0.1,nw_frag=no
+ Datapath actions: set(ipv4(src=255.255.255.255)),10,set(ipv4(src=192.168.0.1)),11
+@@ -1288,7 +1387,7 @@ table=1 in_port=1 action=dec_ttl,output:3
+ AT_CHECK([ovs-ofctl add-flows br0 flows.txt])
+ AT_CHECK([ovs-appctl ofproto/trace ovs-dummy 'in_port(1),eth(src=50:54:00:00:00:05,dst=50:54:00:00:00:07),eth_type(0x0800),ipv4(src=192.168.0.1,dst=192.168.0.2,proto=111,tos=0,ttl=2,frag=no)' -generate], [0], [stdout])
+ AT_CHECK([tail -4 stdout], [0], [
+-Final flow: ip,in_port=1,vlan_tci=0x0000,dl_src=50:54:00:00:00:05,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_proto=111,nw_tos=0,nw_ecn=0,nw_ttl=1
++Final flow: ip,in_port=1,vlan_tci=0x0000,dl_src=50:54:00:00:00:05,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_proto=111,nw_tos=0,nw_ecn=0,nw_ttl=1,nw_frag=no
+ Megaflow: recirc_id=0,eth,ip,in_port=1,nw_ttl=2,nw_frag=no
+ Datapath actions: set(ipv4(ttl=1)),2,userspace(pid=0,controller(reason=2,dont_send=0,continuation=0,recirc_id=1,rule_cookie=0,controller_id=0,max_len=65535)),4
+ ])
+@@ -1311,7 +1410,7 @@ ovs-appctl netdev-dummy/receive p1 'in_port(1),eth(src=50:54:00:00:00:05,dst=50:
+ OVS_APP_EXIT_AND_WAIT([ovs-ofctl])
+ AT_CHECK([cat ofctl_monitor.log], [0], [dnl
+ NXT_PACKET_IN (xid=0x0): table_id=1 cookie=0x0 total_len=34 in_port=1 (via invalid_ttl) data_len=34 (unbuffered)
+-ip,vlan_tci=0x0000,dl_src=50:54:00:00:00:05,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_proto=111,nw_tos=0,nw_ecn=0,nw_ttl=1
++ip,vlan_tci=0x0000,dl_src=50:54:00:00:00:05,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_proto=111,nw_tos=0,nw_ecn=0,nw_ttl=1,nw_frag=no
+ ])
  OVS_VSWITCHD_STOP
  AT_CLEANUP
+@@ -1497,13 +1596,13 @@ OVS_WAIT_UNTIL([test `wc -l < ofctl_monitor.log` -ge 6])
+ OVS_APP_EXIT_AND_WAIT([ovs-ofctl])
+ AT_CHECK([cat ofctl_monitor.log], [0], [dnl
+ NXT_PACKET_IN (xid=0x0): cookie=0x0 total_len=54 in_port=1 (via no_match) data_len=54 (unbuffered)
+-tcp,vlan_tci=0x0000,dl_src=50:54:00:00:00:05,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,tp_src=8,tp_dst=9,tcp_flags=syn tcp_csum:2e7e
++tcp,vlan_tci=0x0000,dl_src=50:54:00:00:00:05,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,tp_src=8,tp_dst=9,tcp_flags=syn tcp_csum:2e7e
+ dnl
+ NXT_PACKET_IN (xid=0x0): cookie=0x0 total_len=54 in_port=1 (via no_match) data_len=54 (unbuffered)
+-tcp,vlan_tci=0x0000,dl_src=50:54:00:00:00:05,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,tp_src=8,tp_dst=9,tcp_flags=syn tcp_csum:2e7e
++tcp,vlan_tci=0x0000,dl_src=50:54:00:00:00:05,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,tp_src=8,tp_dst=9,tcp_flags=syn tcp_csum:2e7e
+ dnl
+ NXT_PACKET_IN (xid=0x0): cookie=0x0 total_len=54 in_port=1 (via no_match) data_len=54 (unbuffered)
+-tcp,vlan_tci=0x0000,dl_src=50:54:00:00:00:05,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,tp_src=8,tp_dst=9,tcp_flags=syn tcp_csum:2e7e
++tcp,vlan_tci=0x0000,dl_src=50:54:00:00:00:05,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,tp_src=8,tp_dst=9,tcp_flags=syn tcp_csum:2e7e
+ ])
  
-+AT_SETUP([ofproto-dpif - lldp revalidator event(REV_RECONFIGURE)])
-+OVS_VSWITCHD_START(
-+    [add-port br0 p1 -- set interface p1 ofport_request=1 type=dummy]
-+)
-+dnl first revalidation triggered by add interface
-+AT_CHECK([ovs-appctl coverage/read-counter rev_reconfigure], [0], [dnl
-+1
-+])
-+
-+dnl enable lldp
-+AT_CHECK([ovs-vsctl set interface p1 lldp:enable=true])
-+AT_CHECK([ovs-appctl revalidator/wait])
-+AT_CHECK([ovs-appctl coverage/read-counter rev_reconfigure], [0], [dnl
-+2
-+])
-+
-+dnl disable lldp
-+AT_CHECK([ovs-vsctl set interface p1 lldp:enable=false])
-+AT_CHECK([ovs-appctl revalidator/wait])
-+AT_CHECK([ovs-appctl coverage/read-counter rev_reconfigure], [0], [dnl
-+3
-+])
-+
-+dnl remove lldp, no revalidation as lldp was disabled
-+AT_CHECK([ovs-vsctl remove interface p1 lldp enable])
-+AT_CHECK([ovs-appctl revalidator/wait])
-+AT_CHECK([ovs-appctl coverage/read-counter rev_reconfigure], [0], [dnl
-+3
-+])
-+
-+OVS_VSWITCHD_STOP
-+AT_CLEANUP
-+
- AT_SETUP([ofproto-dpif - active-backup bonding (with primary)])
+ AT_CHECK([ovs-appctl time/warp 5000], [0], [ignore])
+@@ -1560,13 +1659,13 @@ OVS_WAIT_UNTIL([test `wc -l < ofctl_monitor.log` -ge 6])
+ OVS_APP_EXIT_AND_WAIT([ovs-ofctl])
+ AT_CHECK([cat ofctl_monitor.log], [0], [dnl
+ OFPT_PACKET_IN (xid=0x0): total_len=54 in_port=1 (via no_match) data_len=54 (unbuffered)
+-tcp,vlan_tci=0x0000,dl_src=50:54:00:00:00:05,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,tp_src=8,tp_dst=9,tcp_flags=rst|urg tcp_csum:2e5c
++tcp,vlan_tci=0x0000,dl_src=50:54:00:00:00:05,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,tp_src=8,tp_dst=9,tcp_flags=rst|urg tcp_csum:2e5c
+ dnl
+ OFPT_PACKET_IN (xid=0x0): total_len=54 in_port=1 (via no_match) data_len=54 (unbuffered)
+-tcp,vlan_tci=0x0000,dl_src=50:54:00:00:00:05,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,tp_src=8,tp_dst=9,tcp_flags=rst|urg tcp_csum:2e5c
++tcp,vlan_tci=0x0000,dl_src=50:54:00:00:00:05,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,tp_src=8,tp_dst=9,tcp_flags=rst|urg tcp_csum:2e5c
+ dnl
+ OFPT_PACKET_IN (xid=0x0): total_len=54 in_port=1 (via no_match) data_len=54 (unbuffered)
+-tcp,vlan_tci=0x0000,dl_src=50:54:00:00:00:05,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,tp_src=8,tp_dst=9,tcp_flags=rst|urg tcp_csum:2e5c
++tcp,vlan_tci=0x0000,dl_src=50:54:00:00:00:05,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,tp_src=8,tp_dst=9,tcp_flags=rst|urg tcp_csum:2e5c
+ ])
+ 
+ AT_CHECK([ovs-appctl revalidator/purge], [0])
+@@ -1627,13 +1726,13 @@ OVS_APP_EXIT_AND_WAIT([ovs-ofctl])
+ 
+ AT_CHECK([cat ofctl_monitor.log], [0], [dnl
+ NXT_PACKET_IN (xid=0x0): table_id=1 cookie=0x0 total_len=118 in_port=1 (via action) data_len=118 (unbuffered)
+-tcp,vlan_tci=0x0000,dl_src=10:11:11:11:11:11,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,tp_src=8,tp_dst=9,tcp_flags=ack tcp_csum:4a2c
++tcp,vlan_tci=0x0000,dl_src=10:11:11:11:11:11,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,tp_src=8,tp_dst=9,tcp_flags=ack tcp_csum:4a2c
+ dnl
+ NXT_PACKET_IN (xid=0x0): table_id=1 cookie=0x0 total_len=118 in_port=1 (via action) data_len=118 (unbuffered)
+-tcp,vlan_tci=0x0000,dl_src=10:11:11:11:11:11,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,tp_src=8,tp_dst=9,tcp_flags=ack tcp_csum:4a2c
++tcp,vlan_tci=0x0000,dl_src=10:11:11:11:11:11,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,tp_src=8,tp_dst=9,tcp_flags=ack tcp_csum:4a2c
+ dnl
+ NXT_PACKET_IN (xid=0x0): table_id=1 cookie=0x0 total_len=118 in_port=1 (via action) data_len=118 (unbuffered)
+-tcp,vlan_tci=0x0000,dl_src=10:11:11:11:11:11,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,tp_src=8,tp_dst=9,tcp_flags=ack tcp_csum:4a2c
++tcp,vlan_tci=0x0000,dl_src=10:11:11:11:11:11,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,tp_src=8,tp_dst=9,tcp_flags=ack tcp_csum:4a2c
+ ])
+ 
+ dnl Hit table 0, Miss all other tables, sent to controller
+@@ -1647,13 +1746,13 @@ OVS_APP_EXIT_AND_WAIT([ovs-ofctl])
+ 
+ AT_CHECK([cat ofctl_monitor.log], [0], [dnl
+ NXT_PACKET_IN (xid=0x0): table_id=253 cookie=0x0 total_len=118 in_port=1 (via no_match) data_len=118 (unbuffered)
+-tcp,vlan_tci=0x0000,dl_src=50:54:00:00:00:05,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,tp_src=8,tp_dst=9,tcp_flags=ack tcp_csum:4a2c
++tcp,vlan_tci=0x0000,dl_src=50:54:00:00:00:05,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,tp_src=8,tp_dst=9,tcp_flags=ack tcp_csum:4a2c
+ dnl
+ NXT_PACKET_IN (xid=0x0): table_id=253 cookie=0x0 total_len=118 in_port=1 (via no_match) data_len=118 (unbuffered)
+-tcp,vlan_tci=0x0000,dl_src=50:54:00:00:00:05,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,tp_src=8,tp_dst=9,tcp_flags=ack tcp_csum:4a2c
++tcp,vlan_tci=0x0000,dl_src=50:54:00:00:00:05,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,tp_src=8,tp_dst=9,tcp_flags=ack tcp_csum:4a2c
+ dnl
+ NXT_PACKET_IN (xid=0x0): table_id=253 cookie=0x0 total_len=118 in_port=1 (via no_match) data_len=118 (unbuffered)
+-tcp,vlan_tci=0x0000,dl_src=50:54:00:00:00:05,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,tp_src=8,tp_dst=9,tcp_flags=ack tcp_csum:4a2c
++tcp,vlan_tci=0x0000,dl_src=50:54:00:00:00:05,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,tp_src=8,tp_dst=9,tcp_flags=ack tcp_csum:4a2c
+ ])
+ 
+ AT_CHECK([ovs-appctl revalidator/purge], [0])
+@@ -1690,13 +1789,13 @@ OVS_APP_EXIT_AND_WAIT(ovs-ofctl)
+ 
+ AT_CHECK([cat ofctl_monitor.log], [0], [dnl
+ NXT_PACKET_IN (xid=0x0): table_id=2 cookie=0x0 total_len=118 in_port=1 (via action) data_len=118 (unbuffered)
+-tcp,vlan_tci=0x0000,dl_src=10:11:11:11:11:11,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,tp_src=8,tp_dst=9,tcp_flags=ack tcp_csum:4a2c
++tcp,vlan_tci=0x0000,dl_src=10:11:11:11:11:11,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,tp_src=8,tp_dst=9,tcp_flags=ack tcp_csum:4a2c
+ dnl
+ NXT_PACKET_IN (xid=0x0): table_id=2 cookie=0x0 total_len=118 in_port=1 (via action) data_len=118 (unbuffered)
+-tcp,vlan_tci=0x0000,dl_src=10:11:11:11:11:11,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,tp_src=8,tp_dst=9,tcp_flags=ack tcp_csum:4a2c
++tcp,vlan_tci=0x0000,dl_src=10:11:11:11:11:11,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,tp_src=8,tp_dst=9,tcp_flags=ack tcp_csum:4a2c
+ dnl
+ NXT_PACKET_IN (xid=0x0): table_id=2 cookie=0x0 total_len=118 in_port=1 (via action) data_len=118 (unbuffered)
+-tcp,vlan_tci=0x0000,dl_src=10:11:11:11:11:11,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,tp_src=8,tp_dst=9,tcp_flags=ack tcp_csum:4a2c
++tcp,vlan_tci=0x0000,dl_src=10:11:11:11:11:11,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,tp_src=8,tp_dst=9,tcp_flags=ack tcp_csum:4a2c
+ ])
+ 
+ dnl Hit table 1, Miss all other tables, sent to controller
+@@ -1710,13 +1809,13 @@ OVS_APP_EXIT_AND_WAIT([ovs-ofctl])
+ 
+ AT_CHECK([cat ofctl_monitor.log], [0], [dnl
+ NXT_PACKET_IN (xid=0x0): table_id=253 cookie=0x0 total_len=118 in_port=1 (via no_match) data_len=118 (unbuffered)
+-tcp,vlan_tci=0x0000,dl_src=50:54:00:00:00:05,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,tp_src=8,tp_dst=9,tcp_flags=ack tcp_csum:4a2c
++tcp,vlan_tci=0x0000,dl_src=50:54:00:00:00:05,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,tp_src=8,tp_dst=9,tcp_flags=ack tcp_csum:4a2c
+ dnl
+ NXT_PACKET_IN (xid=0x0): table_id=253 cookie=0x0 total_len=118 in_port=1 (via no_match) data_len=118 (unbuffered)
+-tcp,vlan_tci=0x0000,dl_src=50:54:00:00:00:05,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,tp_src=8,tp_dst=9,tcp_flags=ack tcp_csum:4a2c
++tcp,vlan_tci=0x0000,dl_src=50:54:00:00:00:05,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,tp_src=8,tp_dst=9,tcp_flags=ack tcp_csum:4a2c
+ dnl
+ NXT_PACKET_IN (xid=0x0): table_id=253 cookie=0x0 total_len=118 in_port=1 (via no_match) data_len=118 (unbuffered)
+-tcp,vlan_tci=0x0000,dl_src=50:54:00:00:00:05,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,tp_src=8,tp_dst=9,tcp_flags=ack tcp_csum:4a2c
++tcp,vlan_tci=0x0000,dl_src=50:54:00:00:00:05,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,tp_src=8,tp_dst=9,tcp_flags=ack tcp_csum:4a2c
+ ])
+ 
+ AT_CHECK([ovs-appctl revalidator/purge], [0])
+@@ -1900,13 +1999,13 @@ OVS_APP_EXIT_AND_WAIT(ovs-ofctl)
+ 
+ AT_CHECK([cat ofctl_monitor.log], [0], [dnl
+ OFPT_PACKET_IN (xid=0x0): total_len=118 in_port=1 (via no_match) data_len=118 (unbuffered)
+-tcp,vlan_tci=0x0000,dl_src=50:54:00:00:00:05,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,tp_src=8,tp_dst=9,tcp_flags=ack tcp_csum:4a2c
++tcp,vlan_tci=0x0000,dl_src=50:54:00:00:00:05,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,tp_src=8,tp_dst=9,tcp_flags=ack tcp_csum:4a2c
+ dnl
+ OFPT_PACKET_IN (xid=0x0): total_len=118 in_port=1 (via no_match) data_len=118 (unbuffered)
+-tcp,vlan_tci=0x0000,dl_src=50:54:00:00:00:05,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,tp_src=8,tp_dst=9,tcp_flags=ack tcp_csum:4a2c
++tcp,vlan_tci=0x0000,dl_src=50:54:00:00:00:05,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,tp_src=8,tp_dst=9,tcp_flags=ack tcp_csum:4a2c
+ dnl
+ OFPT_PACKET_IN (xid=0x0): total_len=118 in_port=1 (via no_match) data_len=118 (unbuffered)
+-tcp,vlan_tci=0x0000,dl_src=50:54:00:00:00:05,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,tp_src=8,tp_dst=9,tcp_flags=ack tcp_csum:4a2c
++tcp,vlan_tci=0x0000,dl_src=50:54:00:00:00:05,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,tp_src=8,tp_dst=9,tcp_flags=ack tcp_csum:4a2c
+ ])
+ 
+ dnl Singleton controller action.
+@@ -1920,11 +2019,11 @@ OVS_APP_EXIT_AND_WAIT(ovs-ofctl)
+ 
+ AT_CHECK([cat ofctl_monitor.log], [0], [dnl
+ OFPT_PACKET_IN (xid=0x0): total_len=54 in_port=1 (via action) data_len=54 (unbuffered)
+-tcp,vlan_tci=0x0000,dl_src=10:11:11:11:11:11,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,tp_src=8,tp_dst=10,tcp_flags=syn tcp_csum:2e7d
++tcp,vlan_tci=0x0000,dl_src=10:11:11:11:11:11,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,tp_src=8,tp_dst=10,tcp_flags=syn tcp_csum:2e7d
+ OFPT_PACKET_IN (xid=0x0): total_len=54 in_port=1 (via action) data_len=54 (unbuffered)
+-tcp,vlan_tci=0x0000,dl_src=10:11:11:11:11:11,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,tp_src=8,tp_dst=10,tcp_flags=syn tcp_csum:2e7d
++tcp,vlan_tci=0x0000,dl_src=10:11:11:11:11:11,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,tp_src=8,tp_dst=10,tcp_flags=syn tcp_csum:2e7d
+ OFPT_PACKET_IN (xid=0x0): total_len=54 in_port=1 (via action) data_len=54 (unbuffered)
+-tcp,vlan_tci=0x0000,dl_src=10:11:11:11:11:11,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,tp_src=8,tp_dst=10,tcp_flags=syn tcp_csum:2e7d
++tcp,vlan_tci=0x0000,dl_src=10:11:11:11:11:11,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,tp_src=8,tp_dst=10,tcp_flags=syn tcp_csum:2e7d
+ ])
+ 
+ dnl Modified controller action.
+@@ -1938,13 +2037,13 @@ OVS_APP_EXIT_AND_WAIT(ovs-ofctl)
+ 
+ AT_CHECK([cat ofctl_monitor.log], [0], [dnl
+ OFPT_PACKET_IN (xid=0x0): total_len=58 in_port=1 (via action) data_len=58 (unbuffered)
+-tcp,dl_vlan=15,dl_vlan_pcp=0,vlan_tci1=0x0000,dl_src=30:33:33:33:33:33,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,tp_src=8,tp_dst=10,tcp_flags=fin tcp_csum:2e7e
++tcp,dl_vlan=15,dl_vlan_pcp=0,vlan_tci1=0x0000,dl_src=30:33:33:33:33:33,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,tp_src=8,tp_dst=10,tcp_flags=fin tcp_csum:2e7e
+ dnl
+ OFPT_PACKET_IN (xid=0x0): total_len=58 in_port=1 (via action) data_len=58 (unbuffered)
+-tcp,dl_vlan=15,dl_vlan_pcp=0,vlan_tci1=0x0000,dl_src=30:33:33:33:33:33,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,tp_src=8,tp_dst=10,tcp_flags=fin tcp_csum:2e7e
++tcp,dl_vlan=15,dl_vlan_pcp=0,vlan_tci1=0x0000,dl_src=30:33:33:33:33:33,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,tp_src=8,tp_dst=10,tcp_flags=fin tcp_csum:2e7e
+ dnl
+ OFPT_PACKET_IN (xid=0x0): total_len=58 in_port=1 (via action) data_len=58 (unbuffered)
+-tcp,dl_vlan=15,dl_vlan_pcp=0,vlan_tci1=0x0000,dl_src=30:33:33:33:33:33,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,tp_src=8,tp_dst=10,tcp_flags=fin tcp_csum:2e7e
++tcp,dl_vlan=15,dl_vlan_pcp=0,vlan_tci1=0x0000,dl_src=30:33:33:33:33:33,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,tp_src=8,tp_dst=10,tcp_flags=fin tcp_csum:2e7e
+ ])
+ 
+ dnl Modified VLAN controller action.
+@@ -1958,13 +2057,13 @@ OVS_APP_EXIT_AND_WAIT(ovs-ofctl)
+ 
+ AT_CHECK([cat ofctl_monitor.log], [0], [dnl
+ NXT_PACKET_IN (xid=0x0): cookie=0xa total_len=38 in_port=1 (via action) data_len=38 (unbuffered)
+-ip,dl_vlan=99,dl_vlan_pcp=1,vlan_tci1=0x0000,dl_src=40:44:44:44:44:41,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_proto=16,nw_tos=0,nw_ecn=0,nw_ttl=64
++ip,dl_vlan=99,dl_vlan_pcp=1,vlan_tci1=0x0000,dl_src=40:44:44:44:44:41,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_proto=16,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no
+ dnl
+ NXT_PACKET_IN (xid=0x0): cookie=0xa total_len=38 in_port=1 (via action) data_len=38 (unbuffered)
+-ip,dl_vlan=99,dl_vlan_pcp=1,vlan_tci1=0x0000,dl_src=40:44:44:44:44:41,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_proto=16,nw_tos=0,nw_ecn=0,nw_ttl=64
++ip,dl_vlan=99,dl_vlan_pcp=1,vlan_tci1=0x0000,dl_src=40:44:44:44:44:41,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_proto=16,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no
+ dnl
+ NXT_PACKET_IN (xid=0x0): cookie=0xa total_len=38 in_port=1 (via action) data_len=38 (unbuffered)
+-ip,dl_vlan=99,dl_vlan_pcp=1,vlan_tci1=0x0000,dl_src=40:44:44:44:44:41,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_proto=16,nw_tos=0,nw_ecn=0,nw_ttl=64
++ip,dl_vlan=99,dl_vlan_pcp=1,vlan_tci1=0x0000,dl_src=40:44:44:44:44:41,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_proto=16,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no
+ ])
+ 
+ dnl Checksum TCP.
+@@ -1978,31 +2077,31 @@ OVS_APP_EXIT_AND_WAIT([ovs-ofctl])
+ 
+ AT_CHECK([cat ofctl_monitor.log], [0], [dnl
+ NXT_PACKET_IN (xid=0x0): cookie=0x1 total_len=54 in_port=1 (via action) data_len=54 (unbuffered)
+-tcp,vlan_tci=0x0000,dl_src=20:22:22:22:22:22,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,tp_src=8,tp_dst=11,tcp_flags=fin tcp_csum:2e7d
++tcp,vlan_tci=0x0000,dl_src=20:22:22:22:22:22,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,tp_src=8,tp_dst=11,tcp_flags=fin tcp_csum:2e7d
+ dnl
+ NXT_PACKET_IN (xid=0x0): table_id=1 cookie=0x3 total_len=58 reg0=0x1,in_port=1 (via action) data_len=58 (unbuffered)
+-tcp,dl_vlan=80,dl_vlan_pcp=0,vlan_tci1=0x0000,dl_src=20:22:22:22:22:22,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,tp_src=8,tp_dst=11,tcp_flags=fin tcp_csum:2e7d
++tcp,dl_vlan=80,dl_vlan_pcp=0,vlan_tci1=0x0000,dl_src=20:22:22:22:22:22,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,tp_src=8,tp_dst=11,tcp_flags=fin tcp_csum:2e7d
+ dnl
+ NXT_PACKET_IN (xid=0x0): table_id=2 cookie=0x4 total_len=58 reg0=0x1,reg1=0x2,in_port=1 (via action) data_len=58 (unbuffered)
+-tcp,dl_vlan=80,dl_vlan_pcp=0,vlan_tci1=0x0000,dl_src=80:81:81:81:81:81,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,tp_src=8,tp_dst=11,tcp_flags=fin tcp_csum:2e7d
++tcp,dl_vlan=80,dl_vlan_pcp=0,vlan_tci1=0x0000,dl_src=80:81:81:81:81:81,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,tp_src=8,tp_dst=11,tcp_flags=fin tcp_csum:2e7d
+ dnl
+ NXT_PACKET_IN (xid=0x0): table_id=3 cookie=0x5 total_len=58 reg0=0x1,reg1=0x2,reg2=0x3,in_port=1 (via action) data_len=58 (unbuffered)
+-tcp,dl_vlan=80,dl_vlan_pcp=0,vlan_tci1=0x0000,dl_src=80:81:81:81:81:81,dl_dst=82:82:82:82:82:82,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,tp_src=8,tp_dst=11,tcp_flags=fin tcp_csum:2e7d
++tcp,dl_vlan=80,dl_vlan_pcp=0,vlan_tci1=0x0000,dl_src=80:81:81:81:81:81,dl_dst=82:82:82:82:82:82,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,tp_src=8,tp_dst=11,tcp_flags=fin tcp_csum:2e7d
+ dnl
+ NXT_PACKET_IN (xid=0x0): table_id=4 cookie=0x6 total_len=58 reg0=0x1,reg1=0x2,reg2=0x3,reg3=0x4,in_port=1 (via action) data_len=58 (unbuffered)
+-tcp,dl_vlan=80,dl_vlan_pcp=0,vlan_tci1=0x0000,dl_src=80:81:81:81:81:81,dl_dst=82:82:82:82:82:82,nw_src=83.83.83.83,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,tp_src=8,tp_dst=11,tcp_flags=fin tcp_csum:4880
++tcp,dl_vlan=80,dl_vlan_pcp=0,vlan_tci1=0x0000,dl_src=80:81:81:81:81:81,dl_dst=82:82:82:82:82:82,nw_src=83.83.83.83,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,tp_src=8,tp_dst=11,tcp_flags=fin tcp_csum:4880
+ dnl
+ NXT_PACKET_IN (xid=0x0): table_id=5 cookie=0x7 total_len=58 reg0=0x1,reg1=0x2,reg2=0x3,reg3=0x4,reg4=0x5,tun_id=0x6,in_port=1 (via action) data_len=58 (unbuffered)
+-tcp,dl_vlan=80,dl_vlan_pcp=0,vlan_tci1=0x0000,dl_src=80:81:81:81:81:81,dl_dst=82:82:82:82:82:82,nw_src=83.83.83.83,nw_dst=84.84.84.84,nw_tos=0,nw_ecn=0,nw_ttl=64,tp_src=8,tp_dst=11,tcp_flags=fin tcp_csum:6082
++tcp,dl_vlan=80,dl_vlan_pcp=0,vlan_tci1=0x0000,dl_src=80:81:81:81:81:81,dl_dst=82:82:82:82:82:82,nw_src=83.83.83.83,nw_dst=84.84.84.84,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,tp_src=8,tp_dst=11,tcp_flags=fin tcp_csum:6082
+ dnl
+ NXT_PACKET_IN (xid=0x0): table_id=6 cookie=0x8 total_len=58 reg0=0x1,reg1=0x2,reg2=0x3,reg3=0x4,reg4=0x5,tun_id=0x6,in_port=1 (via action) data_len=58 (unbuffered)
+-tcp,dl_vlan=80,dl_vlan_pcp=0,vlan_tci1=0x0000,dl_src=80:81:81:81:81:81,dl_dst=82:82:82:82:82:82,nw_src=83.83.83.83,nw_dst=84.84.84.84,nw_tos=0,nw_ecn=0,nw_ttl=64,tp_src=85,tp_dst=11,tcp_flags=fin tcp_csum:6035
++tcp,dl_vlan=80,dl_vlan_pcp=0,vlan_tci1=0x0000,dl_src=80:81:81:81:81:81,dl_dst=82:82:82:82:82:82,nw_src=83.83.83.83,nw_dst=84.84.84.84,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,tp_src=85,tp_dst=11,tcp_flags=fin tcp_csum:6035
+ dnl
+ NXT_PACKET_IN (xid=0x0): table_id=7 cookie=0x9 total_len=58 reg0=0x1,reg1=0x2,reg2=0x3,reg3=0x4,reg4=0x5,tun_id=0x6,in_port=1 (via action) data_len=58 (unbuffered)
+-tcp,dl_vlan=80,dl_vlan_pcp=0,vlan_tci1=0x0000,dl_src=80:81:81:81:81:81,dl_dst=82:82:82:82:82:82,nw_src=83.83.83.83,nw_dst=84.84.84.84,nw_tos=0,nw_ecn=0,nw_ttl=64,tp_src=85,tp_dst=86,tcp_flags=fin tcp_csum:5fea
++tcp,dl_vlan=80,dl_vlan_pcp=0,vlan_tci1=0x0000,dl_src=80:81:81:81:81:81,dl_dst=82:82:82:82:82:82,nw_src=83.83.83.83,nw_dst=84.84.84.84,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,tp_src=85,tp_dst=86,tcp_flags=fin tcp_csum:5fea
+ dnl
+ NXT_PACKET_IN (xid=0x0): table_id=7 cookie=0x9 total_len=58 reg0=0x1,reg1=0x2,reg2=0x3,reg3=0x4,reg4=0x5,tun_id=0x6,in_port=1 (via action) data_len=58 (unbuffered)
+-tcp,dl_vlan=80,dl_vlan_pcp=0,vlan_tci1=0x0000,dl_src=80:81:81:81:81:81,dl_dst=82:82:82:82:82:82,nw_src=83.83.83.83,nw_dst=84.84.84.84,nw_tos=0,nw_ecn=0,nw_ttl=64,tp_src=85,tp_dst=86,tcp_flags=fin tcp_csum:5fea
++tcp,dl_vlan=80,dl_vlan_pcp=0,vlan_tci1=0x0000,dl_src=80:81:81:81:81:81,dl_dst=82:82:82:82:82:82,nw_src=83.83.83.83,nw_dst=84.84.84.84,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,tp_src=85,tp_dst=86,tcp_flags=fin tcp_csum:5fea
+ ])
+ 
+ dnl Checksum UDP.
+@@ -2016,31 +2115,31 @@ OVS_APP_EXIT_AND_WAIT([ovs-ofctl])
+ 
+ AT_CHECK([cat ofctl_monitor.log], [0], [dnl
+ NXT_PACKET_IN (xid=0x0): cookie=0x1 total_len=60 in_port=1 (via action) data_len=60 (unbuffered)
+-udp,vlan_tci=0x0000,dl_src=20:22:22:22:22:22,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=0,tp_src=8,tp_dst=11 udp_csum:1234
++udp,vlan_tci=0x0000,dl_src=20:22:22:22:22:22,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=0,nw_frag=no,tp_src=8,tp_dst=11 udp_csum:1234
+ dnl
+ NXT_PACKET_IN (xid=0x0): table_id=1 cookie=0x3 total_len=64 reg0=0x1,in_port=1 (via action) data_len=64 (unbuffered)
+-udp,dl_vlan=80,dl_vlan_pcp=0,vlan_tci1=0x0000,dl_src=20:22:22:22:22:22,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=0,tp_src=8,tp_dst=11 udp_csum:1234
++udp,dl_vlan=80,dl_vlan_pcp=0,vlan_tci1=0x0000,dl_src=20:22:22:22:22:22,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=0,nw_frag=no,tp_src=8,tp_dst=11 udp_csum:1234
+ dnl
+ NXT_PACKET_IN (xid=0x0): table_id=2 cookie=0x4 total_len=64 reg0=0x1,reg1=0x2,in_port=1 (via action) data_len=64 (unbuffered)
+-udp,dl_vlan=80,dl_vlan_pcp=0,vlan_tci1=0x0000,dl_src=80:81:81:81:81:81,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=0,tp_src=8,tp_dst=11 udp_csum:1234
++udp,dl_vlan=80,dl_vlan_pcp=0,vlan_tci1=0x0000,dl_src=80:81:81:81:81:81,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=0,nw_frag=no,tp_src=8,tp_dst=11 udp_csum:1234
+ dnl
+ NXT_PACKET_IN (xid=0x0): table_id=3 cookie=0x5 total_len=64 reg0=0x1,reg1=0x2,reg2=0x3,in_port=1 (via action) data_len=64 (unbuffered)
+-udp,dl_vlan=80,dl_vlan_pcp=0,vlan_tci1=0x0000,dl_src=80:81:81:81:81:81,dl_dst=82:82:82:82:82:82,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=0,tp_src=8,tp_dst=11 udp_csum:1234
++udp,dl_vlan=80,dl_vlan_pcp=0,vlan_tci1=0x0000,dl_src=80:81:81:81:81:81,dl_dst=82:82:82:82:82:82,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=0,nw_frag=no,tp_src=8,tp_dst=11 udp_csum:1234
+ dnl
+ NXT_PACKET_IN (xid=0x0): table_id=4 cookie=0x6 total_len=64 reg0=0x1,reg1=0x2,reg2=0x3,reg3=0x4,in_port=1 (via action) data_len=64 (unbuffered)
+-udp,dl_vlan=80,dl_vlan_pcp=0,vlan_tci1=0x0000,dl_src=80:81:81:81:81:81,dl_dst=82:82:82:82:82:82,nw_src=83.83.83.83,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=0,tp_src=8,tp_dst=11 udp_csum:2c37
++udp,dl_vlan=80,dl_vlan_pcp=0,vlan_tci1=0x0000,dl_src=80:81:81:81:81:81,dl_dst=82:82:82:82:82:82,nw_src=83.83.83.83,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=0,nw_frag=no,tp_src=8,tp_dst=11 udp_csum:2c37
+ dnl
+ NXT_PACKET_IN (xid=0x0): table_id=5 cookie=0x7 total_len=64 reg0=0x1,reg1=0x2,reg2=0x3,reg3=0x4,reg4=0x5,tun_id=0x6,in_port=1 (via action) data_len=64 (unbuffered)
+-udp,dl_vlan=80,dl_vlan_pcp=0,vlan_tci1=0x0000,dl_src=80:81:81:81:81:81,dl_dst=82:82:82:82:82:82,nw_src=83.83.83.83,nw_dst=84.84.84.84,nw_tos=0,nw_ecn=0,nw_ttl=0,tp_src=8,tp_dst=11 udp_csum:4439
++udp,dl_vlan=80,dl_vlan_pcp=0,vlan_tci1=0x0000,dl_src=80:81:81:81:81:81,dl_dst=82:82:82:82:82:82,nw_src=83.83.83.83,nw_dst=84.84.84.84,nw_tos=0,nw_ecn=0,nw_ttl=0,nw_frag=no,tp_src=8,tp_dst=11 udp_csum:4439
+ dnl
+ NXT_PACKET_IN (xid=0x0): table_id=6 cookie=0x8 total_len=64 reg0=0x1,reg1=0x2,reg2=0x3,reg3=0x4,reg4=0x5,tun_id=0x6,in_port=1 (via action) data_len=64 (unbuffered)
+-udp,dl_vlan=80,dl_vlan_pcp=0,vlan_tci1=0x0000,dl_src=80:81:81:81:81:81,dl_dst=82:82:82:82:82:82,nw_src=83.83.83.83,nw_dst=84.84.84.84,nw_tos=0,nw_ecn=0,nw_ttl=0,tp_src=85,tp_dst=11 udp_csum:43ec
++udp,dl_vlan=80,dl_vlan_pcp=0,vlan_tci1=0x0000,dl_src=80:81:81:81:81:81,dl_dst=82:82:82:82:82:82,nw_src=83.83.83.83,nw_dst=84.84.84.84,nw_tos=0,nw_ecn=0,nw_ttl=0,nw_frag=no,tp_src=85,tp_dst=11 udp_csum:43ec
+ dnl
+ NXT_PACKET_IN (xid=0x0): table_id=7 cookie=0x9 total_len=64 reg0=0x1,reg1=0x2,reg2=0x3,reg3=0x4,reg4=0x5,tun_id=0x6,in_port=1 (via action) data_len=64 (unbuffered)
+-udp,dl_vlan=80,dl_vlan_pcp=0,vlan_tci1=0x0000,dl_src=80:81:81:81:81:81,dl_dst=82:82:82:82:82:82,nw_src=83.83.83.83,nw_dst=84.84.84.84,nw_tos=0,nw_ecn=0,nw_ttl=0,tp_src=85,tp_dst=86 udp_csum:43a1
++udp,dl_vlan=80,dl_vlan_pcp=0,vlan_tci1=0x0000,dl_src=80:81:81:81:81:81,dl_dst=82:82:82:82:82:82,nw_src=83.83.83.83,nw_dst=84.84.84.84,nw_tos=0,nw_ecn=0,nw_ttl=0,nw_frag=no,tp_src=85,tp_dst=86 udp_csum:43a1
+ dnl
+ NXT_PACKET_IN (xid=0x0): table_id=7 cookie=0x9 total_len=64 reg0=0x1,reg1=0x2,reg2=0x3,reg3=0x4,reg4=0x5,tun_id=0x6,in_port=1 (via action) data_len=64 (unbuffered)
+-udp,dl_vlan=80,dl_vlan_pcp=0,vlan_tci1=0x0000,dl_src=80:81:81:81:81:81,dl_dst=82:82:82:82:82:82,nw_src=83.83.83.83,nw_dst=84.84.84.84,nw_tos=0,nw_ecn=0,nw_ttl=0,tp_src=85,tp_dst=86 udp_csum:43a1
++udp,dl_vlan=80,dl_vlan_pcp=0,vlan_tci1=0x0000,dl_src=80:81:81:81:81:81,dl_dst=82:82:82:82:82:82,nw_src=83.83.83.83,nw_dst=84.84.84.84,nw_tos=0,nw_ecn=0,nw_ttl=0,nw_frag=no,tp_src=85,tp_dst=86 udp_csum:43a1
+ ])
+ 
+ dnl Modified ARP controller action.
+@@ -2087,31 +2186,31 @@ OVS_WAIT_UNTIL([test `wc -l < ofctl_monitor.log` -ge 18])
+ OVS_APP_EXIT_AND_WAIT([ovs-ofctl])
+ AT_CHECK([cat ofctl_monitor.log], [0], [dnl
+ NXT_PACKET_IN (xid=0x0): cookie=0x1 total_len=98 in_port=1 (via action) data_len=98 (unbuffered)
+-sctp,vlan_tci=0x0000,dl_src=20:22:22:22:22:22,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=0,tp_src=1112,tp_dst=2223 sctp_csum:d9d79157
++sctp,vlan_tci=0x0000,dl_src=20:22:22:22:22:22,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=0,nw_frag=no,tp_src=1112,tp_dst=2223 sctp_csum:d9d79157
+ dnl
+ NXT_PACKET_IN (xid=0x0): table_id=1 cookie=0x3 total_len=102 reg0=0x1,in_port=1 (via action) data_len=102 (unbuffered)
+-sctp,dl_vlan=80,dl_vlan_pcp=0,vlan_tci1=0x0000,dl_src=20:22:22:22:22:22,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=0,tp_src=1112,tp_dst=2223 sctp_csum:d9d79157
++sctp,dl_vlan=80,dl_vlan_pcp=0,vlan_tci1=0x0000,dl_src=20:22:22:22:22:22,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=0,nw_frag=no,tp_src=1112,tp_dst=2223 sctp_csum:d9d79157
+ dnl
+ NXT_PACKET_IN (xid=0x0): table_id=2 cookie=0x4 total_len=102 reg0=0x1,reg1=0x2,in_port=1 (via action) data_len=102 (unbuffered)
+-sctp,dl_vlan=80,dl_vlan_pcp=0,vlan_tci1=0x0000,dl_src=80:81:81:81:81:81,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=0,tp_src=1112,tp_dst=2223 sctp_csum:d9d79157
++sctp,dl_vlan=80,dl_vlan_pcp=0,vlan_tci1=0x0000,dl_src=80:81:81:81:81:81,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=0,nw_frag=no,tp_src=1112,tp_dst=2223 sctp_csum:d9d79157
+ dnl
+ NXT_PACKET_IN (xid=0x0): table_id=3 cookie=0x5 total_len=102 reg0=0x1,reg1=0x2,reg2=0x3,in_port=1 (via action) data_len=102 (unbuffered)
+-sctp,dl_vlan=80,dl_vlan_pcp=0,vlan_tci1=0x0000,dl_src=80:81:81:81:81:81,dl_dst=82:82:82:82:82:82,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=0,tp_src=1112,tp_dst=2223 sctp_csum:d9d79157
++sctp,dl_vlan=80,dl_vlan_pcp=0,vlan_tci1=0x0000,dl_src=80:81:81:81:81:81,dl_dst=82:82:82:82:82:82,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=0,nw_frag=no,tp_src=1112,tp_dst=2223 sctp_csum:d9d79157
+ dnl
+ NXT_PACKET_IN (xid=0x0): table_id=4 cookie=0x6 total_len=102 reg0=0x1,reg1=0x2,reg2=0x3,reg3=0x4,in_port=1 (via action) data_len=102 (unbuffered)
+-sctp,dl_vlan=80,dl_vlan_pcp=0,vlan_tci1=0x0000,dl_src=80:81:81:81:81:81,dl_dst=82:82:82:82:82:82,nw_src=83.83.83.83,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=0,tp_src=1112,tp_dst=2223 sctp_csum:d9d79157
++sctp,dl_vlan=80,dl_vlan_pcp=0,vlan_tci1=0x0000,dl_src=80:81:81:81:81:81,dl_dst=82:82:82:82:82:82,nw_src=83.83.83.83,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=0,nw_frag=no,tp_src=1112,tp_dst=2223 sctp_csum:d9d79157
+ dnl
+ NXT_PACKET_IN (xid=0x0): table_id=5 cookie=0x7 total_len=102 reg0=0x1,reg1=0x2,reg2=0x3,reg3=0x4,reg4=0x5,tun_id=0x6,in_port=1 (via action) data_len=102 (unbuffered)
+-sctp,dl_vlan=80,dl_vlan_pcp=0,vlan_tci1=0x0000,dl_src=80:81:81:81:81:81,dl_dst=82:82:82:82:82:82,nw_src=83.83.83.83,nw_dst=84.84.84.84,nw_tos=0,nw_ecn=0,nw_ttl=0,tp_src=1112,tp_dst=2223 sctp_csum:d9d79157
++sctp,dl_vlan=80,dl_vlan_pcp=0,vlan_tci1=0x0000,dl_src=80:81:81:81:81:81,dl_dst=82:82:82:82:82:82,nw_src=83.83.83.83,nw_dst=84.84.84.84,nw_tos=0,nw_ecn=0,nw_ttl=0,nw_frag=no,tp_src=1112,tp_dst=2223 sctp_csum:d9d79157
+ dnl
+ NXT_PACKET_IN (xid=0x0): table_id=6 cookie=0x8 total_len=102 reg0=0x1,reg1=0x2,reg2=0x3,reg3=0x4,reg4=0x5,tun_id=0x6,in_port=1 (via action) data_len=102 (unbuffered)
+-sctp,dl_vlan=80,dl_vlan_pcp=0,vlan_tci1=0x0000,dl_src=80:81:81:81:81:81,dl_dst=82:82:82:82:82:82,nw_src=83.83.83.83,nw_dst=84.84.84.84,nw_tos=0,nw_ecn=0,nw_ttl=0,tp_src=85,tp_dst=2223 sctp_csum:dd778f5f
++sctp,dl_vlan=80,dl_vlan_pcp=0,vlan_tci1=0x0000,dl_src=80:81:81:81:81:81,dl_dst=82:82:82:82:82:82,nw_src=83.83.83.83,nw_dst=84.84.84.84,nw_tos=0,nw_ecn=0,nw_ttl=0,nw_frag=no,tp_src=85,tp_dst=2223 sctp_csum:dd778f5f
+ dnl
+ NXT_PACKET_IN (xid=0x0): table_id=7 cookie=0x9 total_len=102 reg0=0x1,reg1=0x2,reg2=0x3,reg3=0x4,reg4=0x5,tun_id=0x6,in_port=1 (via action) data_len=102 (unbuffered)
+-sctp,dl_vlan=80,dl_vlan_pcp=0,vlan_tci1=0x0000,dl_src=80:81:81:81:81:81,dl_dst=82:82:82:82:82:82,nw_src=83.83.83.83,nw_dst=84.84.84.84,nw_tos=0,nw_ecn=0,nw_ttl=0,tp_src=85,tp_dst=86 sctp_csum:62051f56
++sctp,dl_vlan=80,dl_vlan_pcp=0,vlan_tci1=0x0000,dl_src=80:81:81:81:81:81,dl_dst=82:82:82:82:82:82,nw_src=83.83.83.83,nw_dst=84.84.84.84,nw_tos=0,nw_ecn=0,nw_ttl=0,nw_frag=no,tp_src=85,tp_dst=86 sctp_csum:62051f56
+ dnl
+ NXT_PACKET_IN (xid=0x0): table_id=7 cookie=0x9 total_len=102 reg0=0x1,reg1=0x2,reg2=0x3,reg3=0x4,reg4=0x5,tun_id=0x6,in_port=1 (via action) data_len=102 (unbuffered)
+-sctp,dl_vlan=80,dl_vlan_pcp=0,vlan_tci1=0x0000,dl_src=80:81:81:81:81:81,dl_dst=82:82:82:82:82:82,nw_src=83.83.83.83,nw_dst=84.84.84.84,nw_tos=0,nw_ecn=0,nw_ttl=0,tp_src=85,tp_dst=86 sctp_csum:62051f56
++sctp,dl_vlan=80,dl_vlan_pcp=0,vlan_tci1=0x0000,dl_src=80:81:81:81:81:81,dl_dst=82:82:82:82:82:82,nw_src=83.83.83.83,nw_dst=84.84.84.84,nw_tos=0,nw_ecn=0,nw_ttl=0,nw_frag=no,tp_src=85,tp_dst=86 sctp_csum:62051f56
+ ])
+ 
+ AT_CHECK([ovs-ofctl dump-flows br0 | ofctl_strip | sort], [0], [dnl
+@@ -2151,13 +2250,13 @@ OVS_APP_EXIT_AND_WAIT(ovs-ofctl)
+ 
+ AT_CHECK([cat ofctl_monitor.log], [0], [dnl
+ OFPT_PACKET_IN (xid=0x0): total_len=118 in_port=1 (via action) data_len=118 (unbuffered)
+-tcp,vlan_tci=0x0000,dl_src=50:54:00:00:00:05,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,tp_src=8,tp_dst=9,tcp_flags=ack tcp_csum:4a2c
++tcp,vlan_tci=0x0000,dl_src=50:54:00:00:00:05,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,tp_src=8,tp_dst=9,tcp_flags=ack tcp_csum:4a2c
+ dnl
+ OFPT_PACKET_IN (xid=0x0): total_len=118 in_port=1 (via action) data_len=118 (unbuffered)
+-tcp,vlan_tci=0x0000,dl_src=50:54:00:00:00:05,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,tp_src=8,tp_dst=9,tcp_flags=ack tcp_csum:4a2c
++tcp,vlan_tci=0x0000,dl_src=50:54:00:00:00:05,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,tp_src=8,tp_dst=9,tcp_flags=ack tcp_csum:4a2c
+ dnl
+ OFPT_PACKET_IN (xid=0x0): total_len=118 in_port=1 (via action) data_len=118 (unbuffered)
+-tcp,vlan_tci=0x0000,dl_src=50:54:00:00:00:05,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,tp_src=8,tp_dst=9,tcp_flags=ack tcp_csum:4a2c
++tcp,vlan_tci=0x0000,dl_src=50:54:00:00:00:05,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,tp_src=8,tp_dst=9,tcp_flags=ack tcp_csum:4a2c
+ ])
+ 
+ OVS_VSWITCHD_STOP
+@@ -2325,13 +2424,13 @@ OVS_APP_EXIT_AND_WAIT(ovs-ofctl)
+ 
+ AT_CHECK([cat ofctl_monitor.log], [0], [dnl
+ NXT_PACKET_IN (xid=0x0): cookie=0xa total_len=34 in_port=1 (via action) data_len=34 (unbuffered)
+-ip,vlan_tci=0x0000,dl_src=41:44:44:44:44:42,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_proto=16,nw_tos=0,nw_ecn=0,nw_ttl=64
++ip,vlan_tci=0x0000,dl_src=41:44:44:44:44:42,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_proto=16,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no
+ dnl
+ NXT_PACKET_IN (xid=0x0): cookie=0xa total_len=34 in_port=1 (via action) data_len=34 (unbuffered)
+-ip,vlan_tci=0x0000,dl_src=41:44:44:44:44:42,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_proto=16,nw_tos=0,nw_ecn=0,nw_ttl=64
++ip,vlan_tci=0x0000,dl_src=41:44:44:44:44:42,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_proto=16,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no
+ dnl
+ NXT_PACKET_IN (xid=0x0): cookie=0xa total_len=34 in_port=1 (via action) data_len=34 (unbuffered)
+-ip,vlan_tci=0x0000,dl_src=41:44:44:44:44:42,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_proto=16,nw_tos=0,nw_ecn=0,nw_ttl=64
++ip,vlan_tci=0x0000,dl_src=41:44:44:44:44:42,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_proto=16,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no
+ ])
+ 
+ dnl Modified MPLS controller action.
+@@ -2539,13 +2638,13 @@ OVS_APP_EXIT_AND_WAIT(ovs-ofctl)
+ 
+ AT_CHECK([cat ofctl_monitor.log], [0], [dnl
+ NXT_PACKET_IN (xid=0x0): cookie=0xd total_len=58 in_port=1 (via action) data_len=58 (unbuffered)
+-tcp,vlan_tci=0x0000,dl_src=60:66:66:66:66:66,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,tp_src=80,tp_dst=0,tcp_flags=0 tcp_csum:7744
++tcp,vlan_tci=0x0000,dl_src=60:66:66:66:66:66,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,tp_src=80,tp_dst=0,tcp_flags=0 tcp_csum:7744
+ dnl
+ NXT_PACKET_IN (xid=0x0): cookie=0xd total_len=58 in_port=1 (via action) data_len=58 (unbuffered)
+-tcp,vlan_tci=0x0000,dl_src=60:66:66:66:66:66,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,tp_src=80,tp_dst=0,tcp_flags=0 tcp_csum:7744
++tcp,vlan_tci=0x0000,dl_src=60:66:66:66:66:66,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,tp_src=80,tp_dst=0,tcp_flags=0 tcp_csum:7744
+ dnl
+ NXT_PACKET_IN (xid=0x0): cookie=0xd total_len=58 in_port=1 (via action) data_len=58 (unbuffered)
+-tcp,vlan_tci=0x0000,dl_src=60:66:66:66:66:66,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,tp_src=80,tp_dst=0,tcp_flags=0 tcp_csum:7744
++tcp,vlan_tci=0x0000,dl_src=60:66:66:66:66:66,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,tp_src=80,tp_dst=0,tcp_flags=0 tcp_csum:7744
+ ])
+ 
+ AT_CHECK([ovs-appctl time/warp 5000], [0], [ignore])
+@@ -2565,13 +2664,13 @@ OVS_APP_EXIT_AND_WAIT(ovs-ofctl)
+ 
+ AT_CHECK([strip_metadata < ofctl_monitor.log], [0], [dnl
+ NXT_PACKET_IN (xid=0x0): cookie=0xd total_len=58 in_port=1 (via action) data_len=58 (unbuffered)
+-tcp,vlan_tci=0x0000,dl_src=60:66:66:66:00:01,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=254,tp_src=80,tp_dst=0,tcp_flags=0 tcp_csum:7744
++tcp,vlan_tci=0x0000,dl_src=60:66:66:66:00:01,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=254,nw_frag=no,tp_src=80,tp_dst=0,tcp_flags=0 tcp_csum:7744
+ dnl
+ NXT_PACKET_IN (xid=0x0): cookie=0xd total_len=58 in_port=1 (via action) data_len=58 (unbuffered)
+-tcp,vlan_tci=0x0000,dl_src=60:66:66:66:00:01,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=254,tp_src=80,tp_dst=0,tcp_flags=0 tcp_csum:7744
++tcp,vlan_tci=0x0000,dl_src=60:66:66:66:00:01,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=254,nw_frag=no,tp_src=80,tp_dst=0,tcp_flags=0 tcp_csum:7744
+ dnl
+ NXT_PACKET_IN (xid=0x0): cookie=0xd total_len=58 in_port=1 (via action) data_len=58 (unbuffered)
+-tcp,vlan_tci=0x0000,dl_src=60:66:66:66:00:01,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=254,tp_src=80,tp_dst=0,tcp_flags=0 tcp_csum:7744
++tcp,vlan_tci=0x0000,dl_src=60:66:66:66:00:01,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=254,nw_frag=no,tp_src=80,tp_dst=0,tcp_flags=0 tcp_csum:7744
+ ])
+ 
+ AT_CHECK([ovs-appctl time/warp 5000], [0], [ignore])
+@@ -2591,13 +2690,13 @@ OVS_APP_EXIT_AND_WAIT(ovs-ofctl)
+ 
+ AT_CHECK([strip_metadata < ofctl_monitor.log], [0], [dnl
+ NXT_PACKET_IN (xid=0x0): cookie=0xd total_len=58 in_port=1 (via action) data_len=58 (unbuffered)
+-tcp,vlan_tci=0x0000,dl_src=60:66:66:66:00:02,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=10.0.0.1,nw_tos=0,nw_ecn=0,nw_ttl=255,tp_src=80,tp_dst=0,tcp_flags=0 tcp_csum:2dee
++tcp,vlan_tci=0x0000,dl_src=60:66:66:66:00:02,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=10.0.0.1,nw_tos=0,nw_ecn=0,nw_ttl=255,nw_frag=no,tp_src=80,tp_dst=0,tcp_flags=0 tcp_csum:2dee
+ dnl
+ NXT_PACKET_IN (xid=0x0): cookie=0xd total_len=58 in_port=1 (via action) data_len=58 (unbuffered)
+-tcp,vlan_tci=0x0000,dl_src=60:66:66:66:00:02,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=10.0.0.1,nw_tos=0,nw_ecn=0,nw_ttl=255,tp_src=80,tp_dst=0,tcp_flags=0 tcp_csum:2dee
++tcp,vlan_tci=0x0000,dl_src=60:66:66:66:00:02,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=10.0.0.1,nw_tos=0,nw_ecn=0,nw_ttl=255,nw_frag=no,tp_src=80,tp_dst=0,tcp_flags=0 tcp_csum:2dee
+ dnl
+ NXT_PACKET_IN (xid=0x0): cookie=0xd total_len=58 in_port=1 (via action) data_len=58 (unbuffered)
+-tcp,vlan_tci=0x0000,dl_src=60:66:66:66:00:02,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=10.0.0.1,nw_tos=0,nw_ecn=0,nw_ttl=255,tp_src=80,tp_dst=0,tcp_flags=0 tcp_csum:2dee
++tcp,vlan_tci=0x0000,dl_src=60:66:66:66:00:02,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=10.0.0.1,nw_tos=0,nw_ecn=0,nw_ttl=255,nw_frag=no,tp_src=80,tp_dst=0,tcp_flags=0 tcp_csum:2dee
+ ])
+ 
+ AT_CHECK([ovs-appctl time/warp 5000], [0], [ignore])
+@@ -2617,13 +2716,13 @@ OVS_APP_EXIT_AND_WAIT(ovs-ofctl)
+ 
+ AT_CHECK([strip_metadata < ofctl_monitor.log], [0], [dnl
+ NXT_PACKET_IN (xid=0x0): cookie=0xd total_len=58 in_port=1 (via action) data_len=58 (unbuffered)
+-tcp,vlan_tci=0x0000,dl_src=60:66:66:66:00:03,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.2,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=255,tp_src=80,tp_dst=0,tcp_flags=0 tcp_csum:7743
++tcp,vlan_tci=0x0000,dl_src=60:66:66:66:00:03,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.2,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=255,nw_frag=no,tp_src=80,tp_dst=0,tcp_flags=0 tcp_csum:7743
+ dnl
+ NXT_PACKET_IN (xid=0x0): cookie=0xd total_len=58 in_port=1 (via action) data_len=58 (unbuffered)
+-tcp,vlan_tci=0x0000,dl_src=60:66:66:66:00:03,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.2,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=255,tp_src=80,tp_dst=0,tcp_flags=0 tcp_csum:7743
++tcp,vlan_tci=0x0000,dl_src=60:66:66:66:00:03,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.2,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=255,nw_frag=no,tp_src=80,tp_dst=0,tcp_flags=0 tcp_csum:7743
+ dnl
+ NXT_PACKET_IN (xid=0x0): cookie=0xd total_len=58 in_port=1 (via action) data_len=58 (unbuffered)
+-tcp,vlan_tci=0x0000,dl_src=60:66:66:66:00:03,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.2,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=255,tp_src=80,tp_dst=0,tcp_flags=0 tcp_csum:7743
++tcp,vlan_tci=0x0000,dl_src=60:66:66:66:00:03,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.2,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=255,nw_frag=no,tp_src=80,tp_dst=0,tcp_flags=0 tcp_csum:7743
+ ])
+ 
+ AT_CHECK([ovs-appctl time/warp 5000], [0], [ignore])
+@@ -2643,13 +2742,13 @@ OVS_APP_EXIT_AND_WAIT(ovs-ofctl)
+ 
+ AT_CHECK([strip_metadata < ofctl_monitor.log], [0], [dnl
+ NXT_PACKET_IN (xid=0x0): cookie=0xd total_len=58 in_port=1 (via action) data_len=58 (unbuffered)
+-tcp,vlan_tci=0x0000,dl_src=60:66:66:66:00:04,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.2,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=255,tp_src=80,tp_dst=0,tcp_flags=0 tcp_csum:7743
++tcp,vlan_tci=0x0000,dl_src=60:66:66:66:00:04,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.2,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=255,nw_frag=no,tp_src=80,tp_dst=0,tcp_flags=0 tcp_csum:7743
+ dnl
+ NXT_PACKET_IN (xid=0x0): cookie=0xd total_len=58 in_port=1 (via action) data_len=58 (unbuffered)
+-tcp,vlan_tci=0x0000,dl_src=60:66:66:66:00:04,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.2,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=255,tp_src=80,tp_dst=0,tcp_flags=0 tcp_csum:7743
++tcp,vlan_tci=0x0000,dl_src=60:66:66:66:00:04,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.2,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=255,nw_frag=no,tp_src=80,tp_dst=0,tcp_flags=0 tcp_csum:7743
+ dnl
+ NXT_PACKET_IN (xid=0x0): cookie=0xd total_len=58 in_port=1 (via action) data_len=58 (unbuffered)
+-tcp,vlan_tci=0x0000,dl_src=60:66:66:66:00:04,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.2,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=255,tp_src=80,tp_dst=0,tcp_flags=0 tcp_csum:7743
++tcp,vlan_tci=0x0000,dl_src=60:66:66:66:00:04,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.2,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=255,nw_frag=no,tp_src=80,tp_dst=0,tcp_flags=0 tcp_csum:7743
+ ])
+ 
+ AT_CHECK([ovs-appctl time/warp 5000], [0], [ignore])
+@@ -2669,13 +2768,13 @@ OVS_APP_EXIT_AND_WAIT(ovs-ofctl)
+ 
+ AT_CHECK([strip_metadata < ofctl_monitor.log], [0], [dnl
+ NXT_PACKET_IN (xid=0x0): cookie=0xd total_len=58 in_port=1 (via action) data_len=58 (unbuffered)
+-tcp,vlan_tci=0x0000,dl_src=60:66:66:66:00:05,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.0,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=255,tp_src=80,tp_dst=0,tcp_flags=0 tcp_csum:7745
++tcp,vlan_tci=0x0000,dl_src=60:66:66:66:00:05,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.0,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=255,nw_frag=no,tp_src=80,tp_dst=0,tcp_flags=0 tcp_csum:7745
+ dnl
+ NXT_PACKET_IN (xid=0x0): cookie=0xd total_len=58 in_port=1 (via action) data_len=58 (unbuffered)
+-tcp,vlan_tci=0x0000,dl_src=60:66:66:66:00:05,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.0,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=255,tp_src=80,tp_dst=0,tcp_flags=0 tcp_csum:7745
++tcp,vlan_tci=0x0000,dl_src=60:66:66:66:00:05,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.0,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=255,nw_frag=no,tp_src=80,tp_dst=0,tcp_flags=0 tcp_csum:7745
+ dnl
+ NXT_PACKET_IN (xid=0x0): cookie=0xd total_len=58 in_port=1 (via action) data_len=58 (unbuffered)
+-tcp,vlan_tci=0x0000,dl_src=60:66:66:66:00:05,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.0,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=255,tp_src=80,tp_dst=0,tcp_flags=0 tcp_csum:7745
++tcp,vlan_tci=0x0000,dl_src=60:66:66:66:00:05,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.0,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=255,nw_frag=no,tp_src=80,tp_dst=0,tcp_flags=0 tcp_csum:7745
+ ])
+ 
+ AT_CHECK([ovs-appctl time/warp 5000], [0], [ignore])
+@@ -2695,13 +2794,13 @@ OVS_APP_EXIT_AND_WAIT(ovs-ofctl)
+ 
+ AT_CHECK([strip_metadata < ofctl_monitor.log], [0], [dnl
+ NXT_PACKET_IN (xid=0x0): cookie=0xd total_len=58 in_port=1 (via action) data_len=58 (unbuffered)
+-tcp,vlan_tci=0x0000,dl_src=60:66:66:66:00:06,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=255,tp_src=80,tp_dst=0,tcp_flags=0 tcp_csum:7744
++tcp,vlan_tci=0x0000,dl_src=60:66:66:66:00:06,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=255,nw_frag=no,tp_src=80,tp_dst=0,tcp_flags=0 tcp_csum:7744
+ dnl
+ NXT_PACKET_IN (xid=0x0): cookie=0xd total_len=58 in_port=1 (via action) data_len=58 (unbuffered)
+-tcp,vlan_tci=0x0000,dl_src=60:66:66:66:00:06,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=255,tp_src=80,tp_dst=0,tcp_flags=0 tcp_csum:7744
++tcp,vlan_tci=0x0000,dl_src=60:66:66:66:00:06,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=255,nw_frag=no,tp_src=80,tp_dst=0,tcp_flags=0 tcp_csum:7744
+ dnl
+ NXT_PACKET_IN (xid=0x0): cookie=0xd total_len=58 in_port=1 (via action) data_len=58 (unbuffered)
+-tcp,vlan_tci=0x0000,dl_src=60:66:66:66:00:06,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=255,tp_src=80,tp_dst=0,tcp_flags=0 tcp_csum:7744
++tcp,vlan_tci=0x0000,dl_src=60:66:66:66:00:06,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=255,nw_frag=no,tp_src=80,tp_dst=0,tcp_flags=0 tcp_csum:7744
+ ])
+ 
+ AT_CHECK([ovs-appctl time/warp 5000], [0], [ignore])
+@@ -2721,13 +2820,13 @@ OVS_APP_EXIT_AND_WAIT(ovs-ofctl)
+ 
+ AT_CHECK([strip_metadata < ofctl_monitor.log], [0], [dnl
+ NXT_PACKET_IN (xid=0x0): cookie=0xd total_len=58 in_port=1 (via action) data_len=58 (unbuffered)
+-tcp,vlan_tci=0x0000,dl_src=60:66:66:66:00:07,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=255,tp_src=80,tp_dst=0,tcp_flags=0 tcp_csum:7744
++tcp,vlan_tci=0x0000,dl_src=60:66:66:66:00:07,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=255,nw_frag=no,tp_src=80,tp_dst=0,tcp_flags=0 tcp_csum:7744
+ dnl
+ NXT_PACKET_IN (xid=0x0): cookie=0xd total_len=58 in_port=1 (via action) data_len=58 (unbuffered)
+-tcp,vlan_tci=0x0000,dl_src=60:66:66:66:00:07,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=255,tp_src=80,tp_dst=0,tcp_flags=0 tcp_csum:7744
++tcp,vlan_tci=0x0000,dl_src=60:66:66:66:00:07,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=255,nw_frag=no,tp_src=80,tp_dst=0,tcp_flags=0 tcp_csum:7744
+ dnl
+ NXT_PACKET_IN (xid=0x0): cookie=0xd total_len=58 in_port=1 (via action) data_len=58 (unbuffered)
+-tcp,vlan_tci=0x0000,dl_src=60:66:66:66:00:07,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=255,tp_src=80,tp_dst=0,tcp_flags=0 tcp_csum:7744
++tcp,vlan_tci=0x0000,dl_src=60:66:66:66:00:07,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=255,nw_frag=no,tp_src=80,tp_dst=0,tcp_flags=0 tcp_csum:7744
+ ])
+ 
+ AT_CHECK([ovs-appctl time/warp 5000], [0], [ignore])
+@@ -2771,13 +2870,13 @@ OVS_APP_EXIT_AND_WAIT(ovs-ofctl)
+ 
+ AT_CHECK([strip_metadata < ofctl_monitor.log], [0], [dnl
+ NXT_PACKET_IN (xid=0x0): cookie=0xd total_len=58 in_port=1 (via action) data_len=58 (unbuffered)
+-tcp,vlan_tci=0x0000,dl_src=60:66:66:66:00:09,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=48,nw_ecn=0,nw_ttl=255,tp_src=80,tp_dst=0,tcp_flags=0 tcp_csum:7744
++tcp,vlan_tci=0x0000,dl_src=60:66:66:66:00:09,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=48,nw_ecn=0,nw_ttl=255,nw_frag=no,tp_src=80,tp_dst=0,tcp_flags=0 tcp_csum:7744
+ dnl
+ NXT_PACKET_IN (xid=0x0): cookie=0xd total_len=58 in_port=1 (via action) data_len=58 (unbuffered)
+-tcp,vlan_tci=0x0000,dl_src=60:66:66:66:00:09,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=48,nw_ecn=0,nw_ttl=255,tp_src=80,tp_dst=0,tcp_flags=0 tcp_csum:7744
++tcp,vlan_tci=0x0000,dl_src=60:66:66:66:00:09,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=48,nw_ecn=0,nw_ttl=255,nw_frag=no,tp_src=80,tp_dst=0,tcp_flags=0 tcp_csum:7744
+ dnl
+ NXT_PACKET_IN (xid=0x0): cookie=0xd total_len=58 in_port=1 (via action) data_len=58 (unbuffered)
+-tcp,vlan_tci=0x0000,dl_src=60:66:66:66:00:09,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=48,nw_ecn=0,nw_ttl=255,tp_src=80,tp_dst=0,tcp_flags=0 tcp_csum:7744
++tcp,vlan_tci=0x0000,dl_src=60:66:66:66:00:09,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=48,nw_ecn=0,nw_ttl=255,nw_frag=no,tp_src=80,tp_dst=0,tcp_flags=0 tcp_csum:7744
+ ])
+ 
+ AT_CHECK([ovs-appctl time/warp 5000], [0], [ignore])
+@@ -2797,13 +2896,13 @@ OVS_APP_EXIT_AND_WAIT(ovs-ofctl)
+ 
+ AT_CHECK([strip_metadata < ofctl_monitor.log], [0], [dnl
+ NXT_PACKET_IN (xid=0x0): cookie=0xd total_len=58 in_port=1 (via action) data_len=58 (unbuffered)
+-tcp,vlan_tci=0x0000,dl_src=60:66:66:66:00:0a,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=10.0.0.1,nw_tos=0,nw_ecn=0,nw_ttl=255,tp_src=80,tp_dst=0,tcp_flags=0 tcp_csum:2dee
++tcp,vlan_tci=0x0000,dl_src=60:66:66:66:00:0a,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=10.0.0.1,nw_tos=0,nw_ecn=0,nw_ttl=255,nw_frag=no,tp_src=80,tp_dst=0,tcp_flags=0 tcp_csum:2dee
+ dnl
+ NXT_PACKET_IN (xid=0x0): cookie=0xd total_len=58 in_port=1 (via action) data_len=58 (unbuffered)
+-tcp,vlan_tci=0x0000,dl_src=60:66:66:66:00:0a,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=10.0.0.1,nw_tos=0,nw_ecn=0,nw_ttl=255,tp_src=80,tp_dst=0,tcp_flags=0 tcp_csum:2dee
++tcp,vlan_tci=0x0000,dl_src=60:66:66:66:00:0a,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=10.0.0.1,nw_tos=0,nw_ecn=0,nw_ttl=255,nw_frag=no,tp_src=80,tp_dst=0,tcp_flags=0 tcp_csum:2dee
+ dnl
+ NXT_PACKET_IN (xid=0x0): cookie=0xd total_len=58 in_port=1 (via action) data_len=58 (unbuffered)
+-tcp,vlan_tci=0x0000,dl_src=60:66:66:66:00:0a,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=10.0.0.1,nw_tos=0,nw_ecn=0,nw_ttl=255,tp_src=80,tp_dst=0,tcp_flags=0 tcp_csum:2dee
++tcp,vlan_tci=0x0000,dl_src=60:66:66:66:00:0a,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=10.0.0.1,nw_tos=0,nw_ecn=0,nw_ttl=255,nw_frag=no,tp_src=80,tp_dst=0,tcp_flags=0 tcp_csum:2dee
+ ])
+ 
+ AT_CHECK([ovs-appctl time/warp 5000], [0], [ignore])
+@@ -2823,13 +2922,13 @@ OVS_APP_EXIT_AND_WAIT(ovs-ofctl)
+ 
+ AT_CHECK([strip_metadata < ofctl_monitor.log], [0], [dnl
+ NXT_PACKET_IN (xid=0x0): cookie=0xd total_len=58 in_port=1 (via action) data_len=58 (unbuffered)
+-tcp,vlan_tci=0x0000,dl_src=60:66:66:66:00:0b,dl_dst=50:54:00:00:00:07,nw_src=10.0.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=255,tp_src=80,tp_dst=0,tcp_flags=0 tcp_csum:2ded
++tcp,vlan_tci=0x0000,dl_src=60:66:66:66:00:0b,dl_dst=50:54:00:00:00:07,nw_src=10.0.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=255,nw_frag=no,tp_src=80,tp_dst=0,tcp_flags=0 tcp_csum:2ded
+ dnl
+ NXT_PACKET_IN (xid=0x0): cookie=0xd total_len=58 in_port=1 (via action) data_len=58 (unbuffered)
+-tcp,vlan_tci=0x0000,dl_src=60:66:66:66:00:0b,dl_dst=50:54:00:00:00:07,nw_src=10.0.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=255,tp_src=80,tp_dst=0,tcp_flags=0 tcp_csum:2ded
++tcp,vlan_tci=0x0000,dl_src=60:66:66:66:00:0b,dl_dst=50:54:00:00:00:07,nw_src=10.0.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=255,nw_frag=no,tp_src=80,tp_dst=0,tcp_flags=0 tcp_csum:2ded
+ dnl
+ NXT_PACKET_IN (xid=0x0): cookie=0xd total_len=58 in_port=1 (via action) data_len=58 (unbuffered)
+-tcp,vlan_tci=0x0000,dl_src=60:66:66:66:00:0b,dl_dst=50:54:00:00:00:07,nw_src=10.0.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=255,tp_src=80,tp_dst=0,tcp_flags=0 tcp_csum:2ded
++tcp,vlan_tci=0x0000,dl_src=60:66:66:66:00:0b,dl_dst=50:54:00:00:00:07,nw_src=10.0.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=255,nw_frag=no,tp_src=80,tp_dst=0,tcp_flags=0 tcp_csum:2ded
+ ])
  
- dnl Create br0 with members p1, p2 and p7, creating bond0 with p1 and
-@@ -81,11 +114,12 @@ recirc_id(0),in_port(4),packet_type(ns=0,id=0),eth(src=50:54:00:00:00:0b,dst=ff:
+ AT_CHECK([ovs-appctl time/warp 5000], [0], [ignore])
+@@ -2931,13 +3030,13 @@ OVS_APP_EXIT_AND_WAIT(ovs-ofctl)
  
- ovs-appctl netdev-dummy/set-admin-state p1 up
- ovs-appctl time/warp 100
--OVS_WAIT_UNTIL([ovs-appctl bond/show | STRIP_RECIRC_ID | STRIP_ACTIVE_MEMBER_MAC], [0], [dnl
-+OVS_WAIT_UNTIL_EQUAL([ovs-appctl bond/show | STRIP_RECIRC_ID | STRIP_ACTIVE_MEMBER_MAC], [dnl
- ---- bond0 ----
- bond_mode: active-backup
- bond may use recirculation: no, <del>
- bond-hash-basis: 0
-+lb_output action: disabled, bond-id: -1
- updelay: 0 ms
- downdelay: 0 ms
- lacp_status: off
-@@ -99,7 +133,6 @@ member p1: enabled
+ AT_CHECK([cat ofctl_monitor.log], [0], [dnl
+ NXT_PACKET_IN (xid=0x0): cookie=0xd total_len=58 in_port=1 (via action) data_len=58 (unbuffered)
+-tcp,vlan_tci=0x0000,dl_src=60:66:66:66:02:00,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=255,tp_src=80,tp_dst=0,tcp_flags=0 tcp_csum:7744
++tcp,vlan_tci=0x0000,dl_src=60:66:66:66:02:00,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=255,nw_frag=no,tp_src=80,tp_dst=0,tcp_flags=0 tcp_csum:7744
+ dnl
+ NXT_PACKET_IN (xid=0x0): cookie=0xd total_len=58 in_port=1 (via action) data_len=58 (unbuffered)
+-tcp,vlan_tci=0x0000,dl_src=60:66:66:66:02:00,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=255,tp_src=80,tp_dst=0,tcp_flags=0 tcp_csum:7744
++tcp,vlan_tci=0x0000,dl_src=60:66:66:66:02:00,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=255,nw_frag=no,tp_src=80,tp_dst=0,tcp_flags=0 tcp_csum:7744
+ dnl
+ NXT_PACKET_IN (xid=0x0): cookie=0xd total_len=58 in_port=1 (via action) data_len=58 (unbuffered)
+-tcp,vlan_tci=0x0000,dl_src=60:66:66:66:02:00,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=255,tp_src=80,tp_dst=0,tcp_flags=0 tcp_csum:7744
++tcp,vlan_tci=0x0000,dl_src=60:66:66:66:02:00,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=255,nw_frag=no,tp_src=80,tp_dst=0,tcp_flags=0 tcp_csum:7744
+ ])
  
- member p2: enabled
-   may_enable: true
--
+ AT_CHECK([ovs-appctl time/warp 5000], [0], [ignore])
+@@ -2959,13 +3058,13 @@ OVS_APP_EXIT_AND_WAIT(ovs-ofctl)
+ 
+ AT_CHECK([strip_metadata < ofctl_monitor.log], [0], [dnl
+ NXT_PACKET_IN (xid=0x0): cookie=0xe total_len=58 in_port=1 (via action) data_len=58 (unbuffered)
+-tcp,vlan_tci=0x0000,dl_src=60:66:66:66:02:01,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=254,tp_src=80,tp_dst=0,tcp_flags=0 tcp_csum:7744
++tcp,vlan_tci=0x0000,dl_src=60:66:66:66:02:01,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=254,nw_frag=no,tp_src=80,tp_dst=0,tcp_flags=0 tcp_csum:7744
+ dnl
+ NXT_PACKET_IN (xid=0x0): cookie=0xe total_len=58 in_port=1 (via action) data_len=58 (unbuffered)
+-tcp,vlan_tci=0x0000,dl_src=60:66:66:66:02:01,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=254,tp_src=80,tp_dst=0,tcp_flags=0 tcp_csum:7744
++tcp,vlan_tci=0x0000,dl_src=60:66:66:66:02:01,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=254,nw_frag=no,tp_src=80,tp_dst=0,tcp_flags=0 tcp_csum:7744
+ dnl
+ NXT_PACKET_IN (xid=0x0): cookie=0xe total_len=58 in_port=1 (via action) data_len=58 (unbuffered)
+-tcp,vlan_tci=0x0000,dl_src=60:66:66:66:02:01,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=254,tp_src=80,tp_dst=0,tcp_flags=0 tcp_csum:7744
++tcp,vlan_tci=0x0000,dl_src=60:66:66:66:02:01,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=254,nw_frag=no,tp_src=80,tp_dst=0,tcp_flags=0 tcp_csum:7744
  ])
  
- OVS_VSWITCHD_STOP
-@@ -129,11 +162,12 @@ ovs-appctl time/warp 100
- OVS_WAIT_UNTIL([test -n "`ovs-appctl bond/show | fgrep 'member p1: disabled'`"])
- ovs-appctl netdev-dummy/set-admin-state p1 up
- ovs-appctl time/warp 100
--OVS_WAIT_UNTIL([ovs-appctl bond/show | STRIP_RECIRC_ID | STRIP_ACTIVE_MEMBER_MAC], [0], [dnl
-+OVS_WAIT_UNTIL_EQUAL([ovs-appctl bond/show | STRIP_RECIRC_ID | STRIP_ACTIVE_MEMBER_MAC], [dnl
- ---- bond0 ----
- bond_mode: active-backup
- bond may use recirculation: no, <del>
- bond-hash-basis: 0
-+lb_output action: disabled, bond-id: -1
- updelay: 0 ms
- downdelay: 0 ms
- lacp_status: off
-@@ -150,7 +184,6 @@ member p2: enabled
+ AT_CHECK([ovs-appctl time/warp 5000], [0], [ignore])
+@@ -2986,13 +3085,13 @@ OVS_APP_EXIT_AND_WAIT(ovs-ofctl)
  
- member p3: enabled
-   may_enable: true
--
+ AT_CHECK([strip_metadata < ofctl_monitor.log], [0], [dnl
+ NXT_PACKET_IN (xid=0x0): cookie=0xe total_len=58 in_port=1 (via action) data_len=58 (unbuffered)
+-tcp,vlan_tci=0x0000,dl_src=60:66:66:66:02:10,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=254,tp_src=80,tp_dst=0,tcp_flags=0 tcp_csum:7744
++tcp,vlan_tci=0x0000,dl_src=60:66:66:66:02:10,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=254,nw_frag=no,tp_src=80,tp_dst=0,tcp_flags=0 tcp_csum:7744
+ dnl
+ NXT_PACKET_IN (xid=0x0): cookie=0xe total_len=58 in_port=1 (via action) data_len=58 (unbuffered)
+-tcp,vlan_tci=0x0000,dl_src=60:66:66:66:02:10,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=254,tp_src=80,tp_dst=0,tcp_flags=0 tcp_csum:7744
++tcp,vlan_tci=0x0000,dl_src=60:66:66:66:02:10,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=254,nw_frag=no,tp_src=80,tp_dst=0,tcp_flags=0 tcp_csum:7744
+ dnl
+ NXT_PACKET_IN (xid=0x0): cookie=0xe total_len=58 in_port=1 (via action) data_len=58 (unbuffered)
+-tcp,vlan_tci=0x0000,dl_src=60:66:66:66:02:10,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=254,tp_src=80,tp_dst=0,tcp_flags=0 tcp_csum:7744
++tcp,vlan_tci=0x0000,dl_src=60:66:66:66:02:10,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=254,nw_frag=no,tp_src=80,tp_dst=0,tcp_flags=0 tcp_csum:7744
  ])
  
- dnl Now delete the primary and verify that the output shows that the
-@@ -171,11 +204,12 @@ ovs-vsctl \
-    --id=@p1 create Interface name=p1 type=dummy options:pstream=punix:$OVS_RUNDIR/p1.sock ofport_request=1 -- \
-    set Port bond0 interfaces="$uuids, @p1]"
- ovs-appctl time/warp 100
--OVS_WAIT_UNTIL([ovs-appctl bond/show | STRIP_RECIRC_ID | STRIP_ACTIVE_MEMBER_MAC], [0], [dnl
-+OVS_WAIT_UNTIL_EQUAL([ovs-appctl bond/show | STRIP_RECIRC_ID | STRIP_ACTIVE_MEMBER_MAC], [dnl
- ---- bond0 ----
- bond_mode: active-backup
- bond may use recirculation: no, <del>
- bond-hash-basis: 0
-+lb_output action: disabled, bond-id: -1
- updelay: 0 ms
- downdelay: 0 ms
- lacp_status: off
-@@ -192,17 +226,17 @@ member p2: enabled
+ AT_CHECK([ovs-appctl time/warp 5000], [0], [ignore])
+@@ -3314,13 +3413,13 @@ OVS_APP_EXIT_AND_WAIT(ovs-ofctl)
  
- member p3: enabled
-   may_enable: true
--
+ AT_CHECK([strip_metadata < ofctl_monitor.log], [0], [dnl
+ OFPT_PACKET_IN (OF1.2) (xid=0x0): table_id=1 total_len=58 in_port=1 (via action) data_len=58 (unbuffered)
+-tcp,vlan_tci=0x0000,dl_src=60:66:66:66:00:08,dl_dst=50:54:00:00:00:01,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=32,nw_ecn=0,nw_ttl=255,tp_src=80,tp_dst=0,tcp_flags=0 tcp_csum:7744
++tcp,vlan_tci=0x0000,dl_src=60:66:66:66:00:08,dl_dst=50:54:00:00:00:01,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=32,nw_ecn=0,nw_ttl=255,nw_frag=no,tp_src=80,tp_dst=0,tcp_flags=0 tcp_csum:7744
+ dnl
+ OFPT_PACKET_IN (OF1.2) (xid=0x0): table_id=1 total_len=58 in_port=1 (via action) data_len=58 (unbuffered)
+-tcp,vlan_tci=0x0000,dl_src=60:66:66:66:00:08,dl_dst=50:54:00:00:00:01,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=32,nw_ecn=0,nw_ttl=255,tp_src=80,tp_dst=0,tcp_flags=0 tcp_csum:7744
++tcp,vlan_tci=0x0000,dl_src=60:66:66:66:00:08,dl_dst=50:54:00:00:00:01,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=32,nw_ecn=0,nw_ttl=255,nw_frag=no,tp_src=80,tp_dst=0,tcp_flags=0 tcp_csum:7744
+ dnl
+ OFPT_PACKET_IN (OF1.2) (xid=0x0): table_id=1 total_len=58 in_port=1 (via action) data_len=58 (unbuffered)
+-tcp,vlan_tci=0x0000,dl_src=60:66:66:66:00:08,dl_dst=50:54:00:00:00:01,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=32,nw_ecn=0,nw_ttl=255,tp_src=80,tp_dst=0,tcp_flags=0 tcp_csum:7744
++tcp,vlan_tci=0x0000,dl_src=60:66:66:66:00:08,dl_dst=50:54:00:00:00:01,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=32,nw_ecn=0,nw_ttl=255,nw_frag=no,tp_src=80,tp_dst=0,tcp_flags=0 tcp_csum:7744
  ])
  
- dnl Switch to another primary
- ovs-vsctl set port bond0 other_config:bond-primary=p2
- ovs-appctl time/warp 100
--OVS_WAIT_UNTIL([ovs-appctl bond/show | STRIP_RECIRC_ID | STRIP_ACTIVE_MEMBER_MAC], [0], [dnl
-+OVS_WAIT_UNTIL_EQUAL([ovs-appctl bond/show | STRIP_RECIRC_ID | STRIP_ACTIVE_MEMBER_MAC], [dnl
- ---- bond0 ----
- bond_mode: active-backup
- bond may use recirculation: no, <del>
- bond-hash-basis: 0
-+lb_output action: disabled, bond-id: -1
- updelay: 0 ms
- downdelay: 0 ms
- lacp_status: off
-@@ -211,25 +245,25 @@ active-backup primary: p2
- <active member mac del>
+ AT_CHECK([ovs-appctl revalidator/purge], [0])
+@@ -3363,13 +3462,13 @@ OVS_APP_EXIT_AND_WAIT(ovs-ofctl)
  
- member p1: enabled
--  active member
-   may_enable: true
+ AT_CHECK([strip_metadata < ofctl_monitor.log], [0], [dnl
+ OFPT_PACKET_IN (OF1.2) (xid=0x0): total_len=58 in_port=1 (via action) data_len=58 (unbuffered)
+-tcp,vlan_tci=0x0000,dl_src=60:66:66:66:00:08,dl_dst=50:54:00:00:00:01,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=32,nw_ecn=0,nw_ttl=254,tp_src=80,tp_dst=0,tcp_flags=0 tcp_csum:7744
++tcp,vlan_tci=0x0000,dl_src=60:66:66:66:00:08,dl_dst=50:54:00:00:00:01,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=32,nw_ecn=0,nw_ttl=254,nw_frag=no,tp_src=80,tp_dst=0,tcp_flags=0 tcp_csum:7744
+ dnl
+ OFPT_PACKET_IN (OF1.2) (xid=0x0): total_len=58 in_port=1 (via action) data_len=58 (unbuffered)
+-tcp,vlan_tci=0x0000,dl_src=60:66:66:66:00:08,dl_dst=50:54:00:00:00:01,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=32,nw_ecn=0,nw_ttl=254,tp_src=80,tp_dst=0,tcp_flags=0 tcp_csum:7744
++tcp,vlan_tci=0x0000,dl_src=60:66:66:66:00:08,dl_dst=50:54:00:00:00:01,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=32,nw_ecn=0,nw_ttl=254,nw_frag=no,tp_src=80,tp_dst=0,tcp_flags=0 tcp_csum:7744
+ dnl
+ OFPT_PACKET_IN (OF1.2) (xid=0x0): total_len=58 in_port=1 (via action) data_len=58 (unbuffered)
+-tcp,vlan_tci=0x0000,dl_src=60:66:66:66:00:08,dl_dst=50:54:00:00:00:01,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=32,nw_ecn=0,nw_ttl=254,tp_src=80,tp_dst=0,tcp_flags=0 tcp_csum:7744
++tcp,vlan_tci=0x0000,dl_src=60:66:66:66:00:08,dl_dst=50:54:00:00:00:01,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=32,nw_ecn=0,nw_ttl=254,nw_frag=no,tp_src=80,tp_dst=0,tcp_flags=0 tcp_csum:7744
+ ])
  
- member p2: enabled
-+  active member
-   may_enable: true
+ AT_CHECK([ovs-appctl revalidator/purge], [0])
+@@ -3402,13 +3501,13 @@ OVS_APP_EXIT_AND_WAIT([ovs-ofctl])
  
- member p3: enabled
-   may_enable: true
--
+ AT_CHECK([cat ofctl_monitor.log], [0], [dnl
+ OFPT_PACKET_IN (xid=0x0): total_len=54 in_port=1 (via action) data_len=54 (unbuffered)
+-tcp,vlan_tci=0x0000,dl_src=10:11:11:11:11:11,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,tp_src=8,tp_dst=10,tcp_flags=syn tcp_csum:2e7d
++tcp,vlan_tci=0x0000,dl_src=10:11:11:11:11:11,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,tp_src=8,tp_dst=10,tcp_flags=syn tcp_csum:2e7d
+ dnl
+ OFPT_PACKET_IN (xid=0x0): total_len=54 in_port=1 (via action) data_len=54 (unbuffered)
+-tcp,vlan_tci=0x0000,dl_src=10:11:11:11:11:11,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,tp_src=8,tp_dst=10,tcp_flags=syn tcp_csum:2e7d
++tcp,vlan_tci=0x0000,dl_src=10:11:11:11:11:11,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,tp_src=8,tp_dst=10,tcp_flags=syn tcp_csum:2e7d
+ dnl
+ OFPT_PACKET_IN (xid=0x0): total_len=54 in_port=1 (via action) data_len=54 (unbuffered)
+-tcp,vlan_tci=0x0000,dl_src=10:11:11:11:11:11,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,tp_src=8,tp_dst=10,tcp_flags=syn tcp_csum:2e7d
++tcp,vlan_tci=0x0000,dl_src=10:11:11:11:11:11,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,tp_src=8,tp_dst=10,tcp_flags=syn tcp_csum:2e7d
  ])
  
- dnl Remove the "bond-primary" config directive from the bond.
- AT_CHECK([ovs-vsctl remove Port bond0 other_config bond-primary])
- ovs-appctl time/warp 100
--OVS_WAIT_UNTIL([ovs-appctl bond/show | STRIP_RECIRC_ID | STRIP_ACTIVE_MEMBER_MAC], [0], [dnl
-+OVS_WAIT_UNTIL_EQUAL([ovs-appctl bond/show | STRIP_RECIRC_ID | STRIP_ACTIVE_MEMBER_MAC], [dnl
- ---- bond0 ----
- bond_mode: active-backup
- bond may use recirculation: no, <del>
- bond-hash-basis: 0
-+lb_output action: disabled, bond-id: -1
- updelay: 0 ms
- downdelay: 0 ms
- lacp_status: off
-@@ -238,15 +272,14 @@ active-backup primary: <none>
- <active member mac del>
+ AT_CHECK([ovs-appctl revalidator/purge], [0])
+@@ -3444,13 +3543,13 @@ AT_CHECK([ovs-appctl time/warp 5000], [0], [ignore])
  
- member p1: enabled
--  active member
-   may_enable: true
+ AT_CHECK([cat ofctl_monitor.log], [0], [dnl
+ OFPT_PACKET_IN (OF1.3) (xid=0x0): cookie=0x0 total_len=54 in_port=1 (via no_match) data_len=54 (unbuffered)
+-tcp,vlan_tci=0x0000,dl_src=10:11:11:11:11:11,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,tp_src=8,tp_dst=10,tcp_flags=syn tcp_csum:2e7d
++tcp,vlan_tci=0x0000,dl_src=10:11:11:11:11:11,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,tp_src=8,tp_dst=10,tcp_flags=syn tcp_csum:2e7d
+ dnl
+ OFPT_PACKET_IN (OF1.3) (xid=0x0): cookie=0x0 total_len=54 in_port=1 (via no_match) data_len=54 (unbuffered)
+-tcp,vlan_tci=0x0000,dl_src=10:11:11:11:11:11,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,tp_src=8,tp_dst=10,tcp_flags=syn tcp_csum:2e7d
++tcp,vlan_tci=0x0000,dl_src=10:11:11:11:11:11,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,tp_src=8,tp_dst=10,tcp_flags=syn tcp_csum:2e7d
+ dnl
+ OFPT_PACKET_IN (OF1.3) (xid=0x0): cookie=0x0 total_len=54 in_port=1 (via no_match) data_len=54 (unbuffered)
+-tcp,vlan_tci=0x0000,dl_src=10:11:11:11:11:11,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,tp_src=8,tp_dst=10,tcp_flags=syn tcp_csum:2e7d
++tcp,vlan_tci=0x0000,dl_src=10:11:11:11:11:11,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,tp_src=8,tp_dst=10,tcp_flags=syn tcp_csum:2e7d
+ ])
  
- member p2: enabled
-+  active member
-   may_enable: true
+ AT_CHECK([ovs-appctl revalidator/purge], [0])
+@@ -3516,13 +3615,13 @@ send: OFPT_SET_ASYNC (OF1.3) (xid=0x2):
+   REQUESTFORWARD: (off)
+ dnl
+ OFPT_PACKET_IN (OF1.3) (xid=0x0): cookie=0x0 total_len=54 in_port=1 (via no_match) data_len=54 (unbuffered)
+-tcp,vlan_tci=0x0000,dl_src=10:11:11:11:11:11,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,tp_src=8,tp_dst=10,tcp_flags=syn tcp_csum:2e7d
++tcp,vlan_tci=0x0000,dl_src=10:11:11:11:11:11,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,tp_src=8,tp_dst=10,tcp_flags=syn tcp_csum:2e7d
+ dnl
+ OFPT_PACKET_IN (OF1.3) (xid=0x0): cookie=0x0 total_len=54 in_port=1 (via no_match) data_len=54 (unbuffered)
+-tcp,vlan_tci=0x0000,dl_src=10:11:11:11:11:11,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,tp_src=8,tp_dst=10,tcp_flags=syn tcp_csum:2e7d
++tcp,vlan_tci=0x0000,dl_src=10:11:11:11:11:11,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,tp_src=8,tp_dst=10,tcp_flags=syn tcp_csum:2e7d
+ dnl
+ OFPT_PACKET_IN (OF1.3) (xid=0x0): cookie=0x0 total_len=54 in_port=1 (via no_match) data_len=54 (unbuffered)
+-tcp,vlan_tci=0x0000,dl_src=10:11:11:11:11:11,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,tp_src=8,tp_dst=10,tcp_flags=syn tcp_csum:2e7d
++tcp,vlan_tci=0x0000,dl_src=10:11:11:11:11:11,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,tp_src=8,tp_dst=10,tcp_flags=syn tcp_csum:2e7d
+ ])
  
- member p3: enabled
-   may_enable: true
--
+ AT_CHECK([ovs-appctl time/warp 5000], [0], [ignore])
+@@ -3558,13 +3657,13 @@ OVS_APP_EXIT_AND_WAIT([ovs-ofctl])
+ AT_CHECK([ovs-appctl revalidator/purge], [0])
+ AT_CHECK([cat ofctl_monitor.log], [0], [dnl
+ OFPT_PACKET_IN (OF1.4) (xid=0x0): cookie=0x0 total_len=54 in_port=1 (via no_match) data_len=54 (unbuffered)
+-tcp,vlan_tci=0x0000,dl_src=10:11:11:11:11:11,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,tp_src=8,tp_dst=10,tcp_flags=syn tcp_csum:2e7d
++tcp,vlan_tci=0x0000,dl_src=10:11:11:11:11:11,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,tp_src=8,tp_dst=10,tcp_flags=syn tcp_csum:2e7d
+ dnl
+ OFPT_PACKET_IN (OF1.4) (xid=0x0): cookie=0x0 total_len=54 in_port=1 (via no_match) data_len=54 (unbuffered)
+-tcp,vlan_tci=0x0000,dl_src=10:11:11:11:11:11,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,tp_src=8,tp_dst=10,tcp_flags=syn tcp_csum:2e7d
++tcp,vlan_tci=0x0000,dl_src=10:11:11:11:11:11,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,tp_src=8,tp_dst=10,tcp_flags=syn tcp_csum:2e7d
+ dnl
+ OFPT_PACKET_IN (OF1.4) (xid=0x0): cookie=0x0 total_len=54 in_port=1 (via no_match) data_len=54 (unbuffered)
+-tcp,vlan_tci=0x0000,dl_src=10:11:11:11:11:11,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,tp_src=8,tp_dst=10,tcp_flags=syn tcp_csum:2e7d
++tcp,vlan_tci=0x0000,dl_src=10:11:11:11:11:11,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,tp_src=8,tp_dst=10,tcp_flags=syn tcp_csum:2e7d
+ ])
+ 
+ AT_CHECK([ovs-appctl time/warp 5000], [0], [ignore])
+@@ -3607,34 +3706,34 @@ AT_CHECK([ovs-appctl time/warp 5000], [0], [ignore])
+ 
+ AT_CHECK([cat ofctl_monitor.log], [0], [dnl
+ OFPT_PACKET_IN (OF1.3) (xid=0x0): table_id=1 cookie=0x0 total_len=54 in_port=1 (via action) data_len=54 (unbuffered)
+-tcp,vlan_tci=0x0000,dl_src=10:11:11:11:11:11,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,tp_src=8,tp_dst=10,tcp_flags=syn tcp_csum:2e7d
++tcp,vlan_tci=0x0000,dl_src=10:11:11:11:11:11,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,tp_src=8,tp_dst=10,tcp_flags=syn tcp_csum:2e7d
+ dnl
+ OFPT_PACKET_IN (OF1.3) (xid=0x0): table_id=2 cookie=0x0 total_len=54 in_port=1 (via action) data_len=54 (unbuffered)
+-tcp,vlan_tci=0x0000,dl_src=10:11:11:11:11:11,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,tp_src=8,tp_dst=10,tcp_flags=syn tcp_csum:2e7d
++tcp,vlan_tci=0x0000,dl_src=10:11:11:11:11:11,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,tp_src=8,tp_dst=10,tcp_flags=syn tcp_csum:2e7d
+ dnl
+ OFPT_PACKET_IN (OF1.3) (xid=0x0): cookie=0x0 total_len=54 in_port=1 (via action) data_len=54 (unbuffered)
+-tcp,vlan_tci=0x0000,dl_src=10:11:11:11:11:11,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,tp_src=8,tp_dst=10,tcp_flags=syn tcp_csum:2e7d
++tcp,vlan_tci=0x0000,dl_src=10:11:11:11:11:11,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,tp_src=8,tp_dst=10,tcp_flags=syn tcp_csum:2e7d
+ dnl
+ OFPT_PACKET_IN (OF1.3) (xid=0x0): table_id=1 cookie=0x0 total_len=54 in_port=1 (via action) data_len=54 (unbuffered)
+-tcp,vlan_tci=0x0000,dl_src=10:11:11:11:11:11,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,tp_src=8,tp_dst=10,tcp_flags=syn tcp_csum:2e7d
++tcp,vlan_tci=0x0000,dl_src=10:11:11:11:11:11,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,tp_src=8,tp_dst=10,tcp_flags=syn tcp_csum:2e7d
+ dnl
+ OFPT_PACKET_IN (OF1.3) (xid=0x0): table_id=2 cookie=0x0 total_len=54 in_port=1 (via action) data_len=54 (unbuffered)
+-tcp,vlan_tci=0x0000,dl_src=10:11:11:11:11:11,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,tp_src=8,tp_dst=10,tcp_flags=syn tcp_csum:2e7d
++tcp,vlan_tci=0x0000,dl_src=10:11:11:11:11:11,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,tp_src=8,tp_dst=10,tcp_flags=syn tcp_csum:2e7d
+ dnl
+ OFPT_PACKET_IN (OF1.3) (xid=0x0): cookie=0x0 total_len=54 in_port=1 (via action) data_len=54 (unbuffered)
+-tcp,vlan_tci=0x0000,dl_src=10:11:11:11:11:11,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,tp_src=8,tp_dst=10,tcp_flags=syn tcp_csum:2e7d
++tcp,vlan_tci=0x0000,dl_src=10:11:11:11:11:11,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,tp_src=8,tp_dst=10,tcp_flags=syn tcp_csum:2e7d
+ dnl
+ OFPT_PACKET_IN (OF1.3) (xid=0x0): table_id=1 cookie=0x0 total_len=54 in_port=1 (via action) data_len=54 (unbuffered)
+-tcp,vlan_tci=0x0000,dl_src=10:11:11:11:11:11,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,tp_src=8,tp_dst=10,tcp_flags=syn tcp_csum:2e7d
++tcp,vlan_tci=0x0000,dl_src=10:11:11:11:11:11,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,tp_src=8,tp_dst=10,tcp_flags=syn tcp_csum:2e7d
+ dnl
+ OFPT_PACKET_IN (OF1.3) (xid=0x0): table_id=2 cookie=0x0 total_len=54 in_port=1 (via action) data_len=54 (unbuffered)
+-tcp,vlan_tci=0x0000,dl_src=10:11:11:11:11:11,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,tp_src=8,tp_dst=10,tcp_flags=syn tcp_csum:2e7d
++tcp,vlan_tci=0x0000,dl_src=10:11:11:11:11:11,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,tp_src=8,tp_dst=10,tcp_flags=syn tcp_csum:2e7d
+ dnl
+ OFPT_PACKET_IN (OF1.3) (xid=0x0): cookie=0x0 total_len=54 in_port=1 (via action) data_len=54 (unbuffered)
+-tcp,vlan_tci=0x0000,dl_src=10:11:11:11:11:11,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,tp_src=8,tp_dst=10,tcp_flags=syn tcp_csum:2e7d
++tcp,vlan_tci=0x0000,dl_src=10:11:11:11:11:11,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,tp_src=8,tp_dst=10,tcp_flags=syn tcp_csum:2e7d
+ dnl
+ OFPT_PACKET_IN (OF1.3) (xid=0x0): total_len=54 in_port=ANY (via action) data_len=54 (unbuffered)
+-tcp,vlan_tci=0x0000,dl_src=10:11:11:11:11:11,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,tp_src=8,tp_dst=10,tcp_flags=syn tcp_csum:2e7d
++tcp,vlan_tci=0x0000,dl_src=10:11:11:11:11:11,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,tp_src=8,tp_dst=10,tcp_flags=syn tcp_csum:2e7d
+ ])
+ 
+ AT_CHECK([ovs-appctl time/warp 5000], [0], [ignore])
+@@ -3679,34 +3778,34 @@ AT_CHECK([ovs-appctl time/warp 5000], [0], [ignore])
+ 
+ AT_CHECK([cat ofctl_monitor.log], [0], [dnl
+ OFPT_PACKET_IN (OF1.4) (xid=0x0): table_id=1 cookie=0x0 total_len=54 in_port=1 (via action) data_len=54 (unbuffered)
+-tcp,vlan_tci=0x0000,dl_src=10:11:11:11:11:11,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,tp_src=8,tp_dst=10,tcp_flags=syn tcp_csum:2e7d
++tcp,vlan_tci=0x0000,dl_src=10:11:11:11:11:11,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,tp_src=8,tp_dst=10,tcp_flags=syn tcp_csum:2e7d
+ dnl
+ OFPT_PACKET_IN (OF1.4) (xid=0x0): table_id=2 cookie=0x0 total_len=54 in_port=1 (via group) data_len=54 (unbuffered)
+-tcp,vlan_tci=0x0000,dl_src=10:11:11:11:11:11,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,tp_src=8,tp_dst=10,tcp_flags=syn tcp_csum:2e7d
++tcp,vlan_tci=0x0000,dl_src=10:11:11:11:11:11,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,tp_src=8,tp_dst=10,tcp_flags=syn tcp_csum:2e7d
+ dnl
+ OFPT_PACKET_IN (OF1.4) (xid=0x0): cookie=0x0 total_len=54 in_port=1 (via action_set) data_len=54 (unbuffered)
+-tcp,vlan_tci=0x0000,dl_src=10:11:11:11:11:11,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,tp_src=8,tp_dst=10,tcp_flags=syn tcp_csum:2e7d
++tcp,vlan_tci=0x0000,dl_src=10:11:11:11:11:11,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,tp_src=8,tp_dst=10,tcp_flags=syn tcp_csum:2e7d
+ dnl
+ OFPT_PACKET_IN (OF1.4) (xid=0x0): table_id=1 cookie=0x0 total_len=54 in_port=1 (via action) data_len=54 (unbuffered)
+-tcp,vlan_tci=0x0000,dl_src=10:11:11:11:11:11,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,tp_src=8,tp_dst=10,tcp_flags=syn tcp_csum:2e7d
++tcp,vlan_tci=0x0000,dl_src=10:11:11:11:11:11,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,tp_src=8,tp_dst=10,tcp_flags=syn tcp_csum:2e7d
+ dnl
+ OFPT_PACKET_IN (OF1.4) (xid=0x0): table_id=2 cookie=0x0 total_len=54 in_port=1 (via group) data_len=54 (unbuffered)
+-tcp,vlan_tci=0x0000,dl_src=10:11:11:11:11:11,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,tp_src=8,tp_dst=10,tcp_flags=syn tcp_csum:2e7d
++tcp,vlan_tci=0x0000,dl_src=10:11:11:11:11:11,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,tp_src=8,tp_dst=10,tcp_flags=syn tcp_csum:2e7d
+ dnl
+ OFPT_PACKET_IN (OF1.4) (xid=0x0): cookie=0x0 total_len=54 in_port=1 (via action_set) data_len=54 (unbuffered)
+-tcp,vlan_tci=0x0000,dl_src=10:11:11:11:11:11,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,tp_src=8,tp_dst=10,tcp_flags=syn tcp_csum:2e7d
++tcp,vlan_tci=0x0000,dl_src=10:11:11:11:11:11,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,tp_src=8,tp_dst=10,tcp_flags=syn tcp_csum:2e7d
+ dnl
+ OFPT_PACKET_IN (OF1.4) (xid=0x0): table_id=1 cookie=0x0 total_len=54 in_port=1 (via action) data_len=54 (unbuffered)
+-tcp,vlan_tci=0x0000,dl_src=10:11:11:11:11:11,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,tp_src=8,tp_dst=10,tcp_flags=syn tcp_csum:2e7d
++tcp,vlan_tci=0x0000,dl_src=10:11:11:11:11:11,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,tp_src=8,tp_dst=10,tcp_flags=syn tcp_csum:2e7d
+ dnl
+ OFPT_PACKET_IN (OF1.4) (xid=0x0): table_id=2 cookie=0x0 total_len=54 in_port=1 (via group) data_len=54 (unbuffered)
+-tcp,vlan_tci=0x0000,dl_src=10:11:11:11:11:11,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,tp_src=8,tp_dst=10,tcp_flags=syn tcp_csum:2e7d
++tcp,vlan_tci=0x0000,dl_src=10:11:11:11:11:11,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,tp_src=8,tp_dst=10,tcp_flags=syn tcp_csum:2e7d
+ dnl
+ OFPT_PACKET_IN (OF1.4) (xid=0x0): cookie=0x0 total_len=54 in_port=1 (via action_set) data_len=54 (unbuffered)
+-tcp,vlan_tci=0x0000,dl_src=10:11:11:11:11:11,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,tp_src=8,tp_dst=10,tcp_flags=syn tcp_csum:2e7d
++tcp,vlan_tci=0x0000,dl_src=10:11:11:11:11:11,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,tp_src=8,tp_dst=10,tcp_flags=syn tcp_csum:2e7d
+ dnl
+ OFPT_PACKET_IN (OF1.4) (xid=0x0): total_len=54 in_port=ANY (via packet_out) data_len=54 (unbuffered)
+-tcp,vlan_tci=0x0000,dl_src=10:11:11:11:11:11,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,tp_src=8,tp_dst=10,tcp_flags=syn tcp_csum:2e7d
++tcp,vlan_tci=0x0000,dl_src=10:11:11:11:11:11,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,tp_src=8,tp_dst=10,tcp_flags=syn tcp_csum:2e7d
+ ])
+ 
+ AT_CHECK([ovs-appctl time/warp 5000], [0], [ignore])
+@@ -3751,10 +3850,10 @@ OVS_APP_EXIT_AND_WAIT(ovs-ofctl)
+ 
+ AT_CHECK([cat ofctl_monitor.log], [0], [dnl
+ OFPT_PACKET_IN (OF1.3) (xid=0x0): table_id=43 cookie=0x0 total_len=98 metadata=0x67871d4d000000,in_port=1 (via action) data_len=98 (unbuffered)
+-icmp,vlan_tci=0x0000,dl_src=3a:6d:d2:09:9c:ab,dl_dst=1e:2c:e9:2a:66:9e,nw_src=192.168.10.10,nw_dst=192.168.10.30,nw_tos=0,nw_ecn=0,nw_ttl=64,icmp_type=8,icmp_code=0 icmp_csum:6f20
++icmp,vlan_tci=0x0000,dl_src=3a:6d:d2:09:9c:ab,dl_dst=1e:2c:e9:2a:66:9e,nw_src=192.168.10.10,nw_dst=192.168.10.30,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,icmp_type=8,icmp_code=0 icmp_csum:6f20
+ dnl
+ OFPT_PACKET_IN (OF1.3) (xid=0x0): table_id=50 cookie=0x0 total_len=98 metadata=0x67871d4d000000,in_port=1 (via no_match) data_len=98 (unbuffered)
+-icmp,vlan_tci=0x0000,dl_src=3a:6d:d2:09:9c:ab,dl_dst=1e:2c:e9:2a:66:9e,nw_src=192.168.10.10,nw_dst=192.168.10.30,nw_tos=0,nw_ecn=0,nw_ttl=64,icmp_type=8,icmp_code=0 icmp_csum:6f20
++icmp,vlan_tci=0x0000,dl_src=3a:6d:d2:09:9c:ab,dl_dst=1e:2c:e9:2a:66:9e,nw_src=192.168.10.10,nw_dst=192.168.10.30,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,icmp_type=8,icmp_code=0 icmp_csum:6f20
+ ])
+ 
+ OVS_VSWITCHD_STOP
+@@ -5007,7 +5106,7 @@ ovs-vsctl \
+ AT_CHECK([ovs-ofctl add-flow br0 action=output:1])
+ 
+ # "in_port" defaults to OFPP_NONE if it's not specified.
+-flow="icmp,dl_src=50:54:00:00:00:05,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_ttl=128,icmp_type=8,icmp_code=0"
++flow="icmp,dl_src=50:54:00:00:00:05,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_ttl=128,nw_frag=no,icmp_type=8,icmp_code=0"
+ AT_CHECK([ovs-appctl ofproto/trace br0 "$flow"], [0], [stdout])
+ AT_CHECK_UNQUOTED([tail -1 stdout], [0],
+   [Datapath actions: 1,2
+@@ -5317,7 +5416,7 @@ ovs-vsctl \
+ AT_CHECK([ovs-ofctl add-flow br0 action=output:1])
+ 
+ # "in_port" defaults to OFPP_NONE if it's not specified.
+-flow="icmp,dl_src=50:54:00:00:00:05,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_ttl=128,icmp_type=8,icmp_code=0"
++flow="icmp,dl_src=50:54:00:00:00:05,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_ttl=128,nw_frag=no,icmp_type=8,icmp_code=0"
+ AT_CHECK([ovs-appctl ofproto/trace br0 "$flow"], [0], [stdout])
+ AT_CHECK_UNQUOTED([tail -1 stdout], [0],
+   [Datapath actions: 1,trunc(100),2
+@@ -5464,7 +5563,7 @@ ovs-vsctl \
+ 
+ flow="in_port=1"
+ AT_CHECK([ovs-appctl ofproto/trace br0 "$flow"], [0], [stdout])
+-AT_CHECK([tail -1 stdout | egrep "trunc\(200\),2,trunc\(300\),3,100|trunc\(300\),3,trunc\(200\),2,100"], [0], [stdout])
++AT_CHECK([tail -1 stdout | grep -E "trunc\(200\),2,trunc\(300\),3,100|trunc\(300\),3,trunc\(200\),2,100"], [0], [stdout])
+ 
+ OVS_VSWITCHD_STOP
+ AT_CLEANUP
+@@ -5512,11 +5611,11 @@ AT_CHECK([ovs-ofctl add-flows br0 flows.txt])
+ 
+ flow="in_port(1),eth(src=50:54:00:00:00:05,dst=50:54:00:00:00:07),eth_type(0x0800),ipv4(src=192.168.0.1,dst=192.168.0.2,proto=1,tos=0,ttl=128,frag=no),icmp(type=8,code=0)"
+ AT_CHECK([ovs-appctl ofproto/trace ovs-dummy "$flow" -generate], [0], [stdout])
+-AT_CHECK([grep "Final flow:" stdout], [0], [Final flow: icmp,tun_id=0x6,in_port=1,vlan_tci=0x0000,dl_src=50:54:00:00:00:05,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=128,icmp_type=8,icmp_code=0
++AT_CHECK([grep "Final flow:" stdout], [0], [Final flow: icmp,tun_id=0x6,in_port=1,vlan_tci=0x0000,dl_src=50:54:00:00:00:05,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=128,nw_frag=no,icmp_type=8,icmp_code=0
+ ])
+ 
+ AT_CHECK([ovs-appctl ofproto/trace ovs-dummy "$flow,recirc_id(1)" -generate], [0], [stdout])
+-AT_CHECK([grep "Final flow:" stdout], [0], [Final flow: recirc_id=0x1,eth,icmp,tun_id=0x6,in_port=1,vlan_tci=0x0000,dl_src=50:54:00:00:00:05,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=128,icmp_type=8,icmp_code=0
++AT_CHECK([grep "Final flow:" stdout], [0], [Final flow: recirc_id=0x1,eth,icmp,tun_id=0x6,in_port=1,vlan_tci=0x0000,dl_src=50:54:00:00:00:05,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=128,nw_frag=no,icmp_type=8,icmp_code=0
  ])
  
  OVS_VSWITCHD_STOP
-@@ -5573,7 +5606,36 @@ check_flows () {
+@@ -5573,7 +5672,36 @@ check_flows () {
      echo "n_packets=$n"
      test "$n" = 1
  }
@@ -8823,7 +61654,37 @@ index 7c2edeb9d4..c923ed6606 100644
  
  OVS_VSWITCHD_STOP
  AT_CLEANUP
-@@ -7600,13 +7662,28 @@ dnl configure bridge IPFIX and ensure that sample action generation works at the
+@@ -6202,6 +6330,20 @@ AT_CHECK([tail -2 stderr], [0], [dnl
+ ovs-appctl: ovs-vswitchd: server returned an error
+ ])
+ 
++# Test incorrect command: ofproto/trace with nonexistent port number
++AT_CHECK([ovs-appctl ofproto/trace ovs-dummy "in_port(42)" ], [2], [stdout], [stderr])
++AT_CHECK([tail -2 stderr], [0], [dnl
++no OpenFlow port for datapath port 42
++ovs-appctl: ovs-vswitchd: server returned an error
++])
++
++# Test incorrect command: ofproto/trace with nonexistent recirc_id
++AT_CHECK([ovs-appctl ofproto/trace ovs-dummy "recirc_id(0x42)" ], [2], [stdout], [stderr])
++AT_CHECK([tail -2 stderr], [0], [dnl
++no recirculation data for recirc_id 0x42
++ovs-appctl: ovs-vswitchd: server returned an error
++])
++
+ OVS_VSWITCHD_STOP
+ AT_CLEANUP
+ 
+@@ -7031,7 +7173,7 @@ dnl An 170 byte packet
+ AT_CHECK([ovs-appctl netdev-dummy/receive p1 '000c29c8a0a4005056c0000808004500009cb4a6000040019003c0a8da01c0a8da640800cb5fa762000556f431ad0009388e08090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f202122232425262728292a2b2c2d2e2f303132333435363738393a3b3c3d3e3f404142434445464748494a4b4c4d4e4f505152535455565758595a5b5c5d5e5f606162636465666768696a6b6c6d6e6f707172737475767778797a7b7c7d7e7f'])
+ 
+ AT_CHECK([ovs-ofctl parse-pcap p1.pcap], [0], [dnl
+-icmp,in_port=ANY,vlan_tci=0x0000,dl_src=00:50:56:c0:00:08,dl_dst=00:0c:29:c8:a0:a4,nw_src=192.168.218.1,nw_dst=192.168.218.100,nw_tos=0,nw_ecn=0,nw_ttl=64,icmp_type=8,icmp_code=0
++icmp,in_port=ANY,vlan_tci=0x0000,dl_src=00:50:56:c0:00:08,dl_dst=00:0c:29:c8:a0:a4,nw_src=192.168.218.1,nw_dst=192.168.218.100,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,icmp_type=8,icmp_code=0
+ ])
+ 
+ AT_CHECK([ovs-appctl revalidator/purge], [0])
+@@ -7600,13 +7742,28 @@ dnl configure bridge IPFIX and ensure that sample action generation works at the
  dnl datapath level.
  AT_SETUP([ofproto-dpif - Bridge IPFIX sanity check])
  OVS_VSWITCHD_START
@@ -8853,6 +61714,238 @@ index 7c2edeb9d4..c923ed6606 100644
  dnl Send some packets that should be sampled.
  for i in `seq 1 3`; do
      AT_CHECK([ovs-appctl netdev-dummy/receive p1 'in_port(1),eth(src=50:54:00:00:00:09,dst=50:54:00:00:00:0a),eth_type(0x0800)'])
+@@ -8666,7 +8823,7 @@ recirc_id(0),in_port(100),packet_type(ns=0,id=0),eth_type(0x0800),ipv4(src=192.1
+ ])
+ 
+ AT_CHECK([grep -e '|ofproto_dpif_xlate|WARN|' ovs-vswitchd.log | sed "s/^.*|WARN|//"], [0], [dnl
+-stack underflow on bridge br1 while processing icmp,in_port=LOCAL,vlan_tci=0x0000,dl_src=50:54:00:00:00:05,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,icmp_type=8,icmp_code=0
++stack underflow on bridge br1 while processing icmp,in_port=LOCAL,vlan_tci=0x0000,dl_src=50:54:00:00:00:05,dl_dst=50:54:00:00:00:07,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,icmp_type=8,icmp_code=0
+ ])
+ 
+ OVS_VSWITCHD_STOP(["/stack underflow/d"])
+@@ -9855,7 +10012,7 @@ OVS_APP_EXIT_AND_WAIT([ovs-ofctl])
+ 
+ AT_CHECK([cat ofctl_monitor.log], [0], [dnl
+ NXT_PACKET_IN (xid=0x0): cookie=0x0 total_len=86 in_port=1 (via no_match) data_len=86 (unbuffered)
+-icmp6,vlan_tci=0x0000,dl_src=00:00:86:05:80:da,dl_dst=00:60:97:07:69:ea,ipv6_src=fe80::200:86ff:fe05:80da,ipv6_dst=fe80::260:97ff:fe07:69ea,ipv6_label=0x00000,nw_tos=0,nw_ecn=0,nw_ttl=255,icmp_type=135,icmp_code=0,nd_target=fe80::260:97ff:fe07:69ea,nd_sll=00:00:86:05:80:da,nd_tll=00:00:00:00:00:00 icmp6_csum:68bd
++icmp6,vlan_tci=0x0000,dl_src=00:00:86:05:80:da,dl_dst=00:60:97:07:69:ea,ipv6_src=fe80::200:86ff:fe05:80da,ipv6_dst=fe80::260:97ff:fe07:69ea,ipv6_label=0x00000,nw_tos=0,nw_ecn=0,nw_ttl=255,nw_frag=no,icmp_type=135,icmp_code=0,nd_target=fe80::260:97ff:fe07:69ea,nd_sll=00:00:86:05:80:da,nd_tll=00:00:00:00:00:00 icmp6_csum:68bd
+ ])
+ 
+ OVS_VSWITCHD_STOP
+@@ -9906,7 +10063,7 @@ OVS_APP_EXIT_AND_WAIT([ovs-ofctl])
+ 
+ AT_CHECK([cat ofctl_monitor.log], [0], [dnl
+ NXT_PACKET_IN (xid=0x0): cookie=0x0 total_len=86 in_port=1 (via action) data_len=86 (unbuffered)
+-icmp6,vlan_tci=0x0000,dl_src=00:00:86:05:80:da,dl_dst=00:60:97:07:69:ea,ipv6_src=fe80::200:86ff:fe05:80da,ipv6_dst=fe80::260:97ff:fe07:69ea,ipv6_label=0x00000,nw_tos=0,nw_ecn=0,nw_ttl=255,icmp_type=135,icmp_code=0,nd_target=fe80::1,nd_sll=32:21:14:86:11:74,nd_tll=00:00:00:00:00:00 icmp6_csum:19d3
++icmp6,vlan_tci=0x0000,dl_src=00:00:86:05:80:da,dl_dst=00:60:97:07:69:ea,ipv6_src=fe80::200:86ff:fe05:80da,ipv6_dst=fe80::260:97ff:fe07:69ea,ipv6_label=0x00000,nw_tos=0,nw_ecn=0,nw_ttl=255,nw_frag=no,icmp_type=135,icmp_code=0,nd_target=fe80::1,nd_sll=32:21:14:86:11:74,nd_tll=00:00:00:00:00:00 icmp6_csum:19d3
+ ])
+ 
+ OVS_VSWITCHD_STOP
+@@ -10166,10 +10323,10 @@ OVS_WAIT_UNTIL([ovs-appctl -t ovs-ofctl exit])
+ dnl Check this output. We only see the latter two packets, not the first.
+ AT_CHECK([cat ofctl_monitor.log], [0], [dnl
+ NXT_PACKET_IN (xid=0x0): table_id=6 cookie=0x0 total_len=106 reg0=0x1,reg1=0x4d2,reg2=0x1,reg3=0x1,reg4=0x1,in_port=1 (via action) data_len=106 (unbuffered)
+-udp,vlan_tci=0x0000,dl_src=50:54:00:00:00:09,dl_dst=50:54:00:00:00:0a,nw_src=10.1.1.1,nw_dst=10.1.1.2,nw_tos=0,nw_ecn=0,nw_ttl=64,tp_src=1,tp_dst=2 udp_csum:553
++udp,vlan_tci=0x0000,dl_src=50:54:00:00:00:09,dl_dst=50:54:00:00:00:0a,nw_src=10.1.1.1,nw_dst=10.1.1.2,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,tp_src=1,tp_dst=2 udp_csum:553
+ dnl
+ NXT_PACKET_IN (xid=0x0): table_id=6 cookie=0x0 total_len=106 ct_state=est|rpl|trk,ct_zone=1,ct_mark=0x1,ct_label=0x4d2000000000000000000000000,ct_nw_src=10.1.1.1,ct_nw_dst=10.1.1.2,ct_nw_proto=17,ct_tp_src=1,ct_tp_dst=2,ip,reg0=0x1,reg1=0x4d2,reg2=0x1,reg3=0x2,reg4=0x1,in_port=2 (via action) data_len=106 (unbuffered)
+-udp,vlan_tci=0x0000,dl_src=50:54:00:00:00:0a,dl_dst=50:54:00:00:00:09,nw_src=10.1.1.2,nw_dst=10.1.1.1,nw_tos=0,nw_ecn=0,nw_ttl=64,tp_src=2,tp_dst=1 udp_csum:553
++udp,vlan_tci=0x0000,dl_src=50:54:00:00:00:0a,dl_dst=50:54:00:00:00:09,nw_src=10.1.1.2,nw_dst=10.1.1.1,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,tp_src=2,tp_dst=1 udp_csum:553
+ ])
+ 
+ AT_CHECK([ovs-ofctl monitor br0 65534 invalid_ttl -P nxt_packet_in --detach --no-chdir --pidfile 2> ofctl_monitor.log])
+@@ -10187,10 +10344,10 @@ OVS_WAIT_UNTIL([ovs-appctl -t ovs-ofctl exit])
+ dnl Check this output. We should see both packets
+ AT_CHECK([cat ofctl_monitor.log], [0], [dnl
+ NXT_PACKET_IN (xid=0x0): table_id=6 cookie=0x0 total_len=106 reg0=0x1,reg1=0x4d2,reg2=0x1,reg3=0x1,reg4=0x1,in_port=1 (via action) data_len=106 (unbuffered)
+-udp,vlan_tci=0x0000,dl_src=50:54:00:00:00:09,dl_dst=50:54:00:00:00:0a,nw_src=10.1.1.1,nw_dst=10.1.1.2,nw_tos=0,nw_ecn=0,nw_ttl=64,tp_src=3,tp_dst=2 udp_csum:551
++udp,vlan_tci=0x0000,dl_src=50:54:00:00:00:09,dl_dst=50:54:00:00:00:0a,nw_src=10.1.1.1,nw_dst=10.1.1.2,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,tp_src=3,tp_dst=2 udp_csum:551
+ dnl
+ NXT_PACKET_IN (xid=0x0): table_id=6 cookie=0x0 total_len=106 ct_state=est|rpl|trk,ct_zone=1,ct_mark=0x1,ct_label=0x4d2000000000000000000000000,ct_nw_src=10.1.1.1,ct_nw_dst=10.1.1.2,ct_nw_proto=17,ct_tp_src=3,ct_tp_dst=2,ip,reg0=0x1,reg1=0x4d2,reg2=0x1,reg3=0x2,reg4=0x1,in_port=2 (via action) data_len=106 (unbuffered)
+-udp,vlan_tci=0x0000,dl_src=50:54:00:00:00:0a,dl_dst=50:54:00:00:00:09,nw_src=10.1.1.2,nw_dst=10.1.1.1,nw_tos=0,nw_ecn=0,nw_ttl=64,tp_src=2,tp_dst=3 udp_csum:551
++udp,vlan_tci=0x0000,dl_src=50:54:00:00:00:0a,dl_dst=50:54:00:00:00:09,nw_src=10.1.1.2,nw_dst=10.1.1.1,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,tp_src=2,tp_dst=3 udp_csum:551
+ ])
+ 
+ OVS_VSWITCHD_STOP
+@@ -10239,10 +10396,10 @@ dnl Note that the first packet doesn't have the ct_state bits set. This
+ dnl happens because the ct_state field is available only after recirc.
+ AT_CHECK([cat ofctl_monitor.log], [0], [dnl
+ NXT_PACKET_IN (xid=0x0): cookie=0x0 total_len=106 in_port=1 (via action) data_len=106 (unbuffered)
+-udp,vlan_tci=0x0000,dl_src=50:54:00:00:00:09,dl_dst=50:54:00:00:00:0a,nw_src=10.1.1.1,nw_dst=10.1.1.2,nw_tos=0,nw_ecn=0,nw_ttl=64,tp_src=1,tp_dst=2 udp_csum:553
++udp,vlan_tci=0x0000,dl_src=50:54:00:00:00:09,dl_dst=50:54:00:00:00:0a,nw_src=10.1.1.1,nw_dst=10.1.1.2,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,tp_src=1,tp_dst=2 udp_csum:553
+ dnl
+ NXT_PACKET_IN (xid=0x0): table_id=1 cookie=0x0 total_len=106 in_port=2 (via action) data_len=106 (unbuffered)
+-udp,vlan_tci=0x0000,dl_src=50:54:00:00:00:0a,dl_dst=50:54:00:00:00:09,nw_src=10.1.1.2,nw_dst=10.1.1.1,nw_tos=0,nw_ecn=0,nw_ttl=64,tp_src=2,tp_dst=1 udp_csum:553
++udp,vlan_tci=0x0000,dl_src=50:54:00:00:00:0a,dl_dst=50:54:00:00:00:09,nw_src=10.1.1.2,nw_dst=10.1.1.1,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,tp_src=2,tp_dst=1 udp_csum:553
+ ])
+ 
+ AT_CHECK([ovs-ofctl monitor br0 65534 invalid_ttl -P nxt_packet_in --detach --no-chdir --pidfile 2> ofctl_monitor.log])
+@@ -10261,10 +10418,10 @@ dnl Note that the first packet doesn't have the ct_state bits set. This
+ dnl happens because the ct_state field is available only after recirc.
+ AT_CHECK([cat ofctl_monitor.log], [0], [dnl
+ NXT_PACKET_IN (xid=0x0): cookie=0x0 total_len=106 in_port=1 (via action) data_len=106 (unbuffered)
+-udp,vlan_tci=0x0000,dl_src=50:54:00:00:00:09,dl_dst=50:54:00:00:00:0a,nw_src=10.1.1.1,nw_dst=10.1.1.2,nw_tos=0,nw_ecn=0,nw_ttl=64,tp_src=3,tp_dst=4 udp_csum:54f
++udp,vlan_tci=0x0000,dl_src=50:54:00:00:00:09,dl_dst=50:54:00:00:00:0a,nw_src=10.1.1.1,nw_dst=10.1.1.2,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,tp_src=3,tp_dst=4 udp_csum:54f
+ dnl
+ NXT_PACKET_IN (xid=0x0): table_id=1 cookie=0x0 total_len=106 in_port=2 (via action) data_len=106 (unbuffered)
+-udp,vlan_tci=0x0000,dl_src=50:54:00:00:00:0a,dl_dst=50:54:00:00:00:09,nw_src=10.1.1.2,nw_dst=10.1.1.1,nw_tos=0,nw_ecn=0,nw_ttl=64,tp_src=4,tp_dst=3 udp_csum:54f
++udp,vlan_tci=0x0000,dl_src=50:54:00:00:00:0a,dl_dst=50:54:00:00:00:09,nw_src=10.1.1.2,nw_dst=10.1.1.1,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,tp_src=4,tp_dst=3 udp_csum:54f
+ ])
+ 
+ dnl
+@@ -10320,9 +10477,9 @@ dnl Note that the first packet doesn't have the ct_state bits set. This
+ dnl happens because the ct_state field is available only after recirc.
+ AT_CHECK([cat ofctl_monitor.log], [0], [dnl
+ NXT_PACKET_IN (xid=0x0): cookie=0x0 total_len=126 in_port=1 (via action) data_len=126 (unbuffered)
+-udp6,vlan_tci=0x0000,dl_src=50:54:00:00:00:09,dl_dst=50:54:00:00:00:0a,ipv6_src=2001:db8::1,ipv6_dst=2001:db8::2,ipv6_label=0x00000,nw_tos=112,nw_ecn=0,nw_ttl=128,tp_src=1,tp_dst=2 udp_csum:bfe2
++udp6,vlan_tci=0x0000,dl_src=50:54:00:00:00:09,dl_dst=50:54:00:00:00:0a,ipv6_src=2001:db8::1,ipv6_dst=2001:db8::2,ipv6_label=0x00000,nw_tos=112,nw_ecn=0,nw_ttl=128,nw_frag=no,tp_src=1,tp_dst=2 udp_csum:bfe2
+ NXT_PACKET_IN (xid=0x0): table_id=1 cookie=0x0 total_len=126 ct_state=est|rpl|trk,ct_ipv6_src=2001:db8::1,ct_ipv6_dst=2001:db8::2,ct_nw_proto=17,ct_tp_src=1,ct_tp_dst=2,ipv6,in_port=2 (via action) data_len=126 (unbuffered)
+-udp6,vlan_tci=0x0000,dl_src=50:54:00:00:00:0a,dl_dst=50:54:00:00:00:09,ipv6_src=2001:db8::2,ipv6_dst=2001:db8::1,ipv6_label=0x00000,nw_tos=112,nw_ecn=0,nw_ttl=128,tp_src=2,tp_dst=1 udp_csum:bfe2
++udp6,vlan_tci=0x0000,dl_src=50:54:00:00:00:0a,dl_dst=50:54:00:00:00:09,ipv6_src=2001:db8::2,ipv6_dst=2001:db8::1,ipv6_label=0x00000,nw_tos=112,nw_ecn=0,nw_ttl=128,nw_frag=no,tp_src=2,tp_dst=1 udp_csum:bfe2
+ ])
+ 
+ OVS_VSWITCHD_STOP
+@@ -10433,7 +10590,7 @@ OVS_WAIT_UNTIL([ovs-appctl -t ovs-ofctl exit])
+ dnl Check this output. Only one reply must be there
+ AT_CHECK([cat ofctl_monitor.log], [0], [dnl
+ NXT_PACKET_IN (xid=0x0): table_id=1 cookie=0x0 total_len=106 ct_state=est|rpl|trk,ct_nw_src=10.1.1.1,ct_nw_dst=10.1.1.2,ct_nw_proto=17,ct_tp_src=1,ct_tp_dst=2,ip,in_port=2 (via action) data_len=106 (unbuffered)
+-udp,vlan_tci=0x0000,dl_src=50:54:00:00:00:0a,dl_dst=50:54:00:00:00:09,nw_src=10.1.1.2,nw_dst=10.1.1.1,nw_tos=0,nw_ecn=0,nw_ttl=64,tp_src=2,tp_dst=1 udp_csum:553
++udp,vlan_tci=0x0000,dl_src=50:54:00:00:00:0a,dl_dst=50:54:00:00:00:09,nw_src=10.1.1.2,nw_dst=10.1.1.1,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,tp_src=2,tp_dst=1 udp_csum:553
+ dnl
+ OFPT_ECHO_REQUEST (xid=0x0): 0 bytes of payload
+ ])
+@@ -10467,7 +10624,7 @@ OVS_WAIT_UNTIL([ovs-appctl -t ovs-ofctl exit])
+ 
+ AT_CHECK([cat ofctl_monitor.log], [0], [dnl
+ NXT_PACKET_IN (xid=0x0): cookie=0x0 total_len=86 ct_state=inv|trk,ipv6,in_port=2 (via action) data_len=86 (unbuffered)
+-icmp6,vlan_tci=0x0000,dl_src=00:00:86:05:80:da,dl_dst=00:60:97:07:69:ea,ipv6_src=fe80::200:86ff:fe05:80da,ipv6_dst=fe80::260:97ff:fe07:69ea,ipv6_label=0x00000,nw_tos=0,nw_ecn=0,nw_ttl=255,icmp_type=135,icmp_code=0,nd_target=fe80::260:97ff:fe07:69ea,nd_sll=00:00:86:05:80:da,nd_tll=00:00:00:00:00:00 icmp6_csum:68bd
++icmp6,vlan_tci=0x0000,dl_src=00:00:86:05:80:da,dl_dst=00:60:97:07:69:ea,ipv6_src=fe80::200:86ff:fe05:80da,ipv6_dst=fe80::260:97ff:fe07:69ea,ipv6_label=0x00000,nw_tos=0,nw_ecn=0,nw_ttl=255,nw_frag=no,icmp_type=135,icmp_code=0,nd_target=fe80::260:97ff:fe07:69ea,nd_sll=00:00:86:05:80:da,nd_tll=00:00:00:00:00:00 icmp6_csum:68bd
+ ])
+ 
+ OVS_VSWITCHD_STOP
+@@ -10523,16 +10680,16 @@ OVS_WAIT_UNTIL([ovs-appctl -t ovs-ofctl exit])
+ dnl Check this output. We only see the latter two packets (for each zone), not the first.
+ AT_CHECK([cat ofctl_monitor.log], [0], [dnl
+ NXT_PACKET_IN (xid=0x0): cookie=0x0 total_len=106 in_port=1 (via action) data_len=106 (unbuffered)
+-udp,vlan_tci=0x0000,dl_src=50:54:00:00:00:09,dl_dst=50:54:00:00:00:0a,nw_src=10.1.1.1,nw_dst=10.1.1.2,nw_tos=0,nw_ecn=0,nw_ttl=64,tp_src=1,tp_dst=2 udp_csum:553
++udp,vlan_tci=0x0000,dl_src=50:54:00:00:00:09,dl_dst=50:54:00:00:00:0a,nw_src=10.1.1.1,nw_dst=10.1.1.2,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,tp_src=1,tp_dst=2 udp_csum:553
+ dnl
+ NXT_PACKET_IN (xid=0x0): table_id=1 cookie=0x0 total_len=106 ct_state=est|rpl|trk,ct_nw_src=10.1.1.1,ct_nw_dst=10.1.1.2,ct_nw_proto=17,ct_tp_src=1,ct_tp_dst=2,ip,in_port=2 (via action) data_len=106 (unbuffered)
+-udp,vlan_tci=0x0000,dl_src=50:54:00:00:00:0a,dl_dst=50:54:00:00:00:09,nw_src=10.1.1.2,nw_dst=10.1.1.1,nw_tos=0,nw_ecn=0,nw_ttl=64,tp_src=2,tp_dst=1 udp_csum:553
++udp,vlan_tci=0x0000,dl_src=50:54:00:00:00:0a,dl_dst=50:54:00:00:00:09,nw_src=10.1.1.2,nw_dst=10.1.1.1,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,tp_src=2,tp_dst=1 udp_csum:553
+ dnl
+ NXT_PACKET_IN (xid=0x0): cookie=0x0 total_len=106 in_port=3 (via action) data_len=106 (unbuffered)
+-udp,vlan_tci=0x0000,dl_src=50:54:00:00:00:09,dl_dst=50:54:00:00:00:0a,nw_src=10.1.1.1,nw_dst=10.1.1.2,nw_tos=0,nw_ecn=0,nw_ttl=64,tp_src=1,tp_dst=2 udp_csum:553
++udp,vlan_tci=0x0000,dl_src=50:54:00:00:00:09,dl_dst=50:54:00:00:00:0a,nw_src=10.1.1.1,nw_dst=10.1.1.2,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,tp_src=1,tp_dst=2 udp_csum:553
+ dnl
+ NXT_PACKET_IN (xid=0x0): table_id=1 cookie=0x0 total_len=106 ct_state=est|rpl|trk,ct_zone=1,ct_nw_src=10.1.1.1,ct_nw_dst=10.1.1.2,ct_nw_proto=17,ct_tp_src=1,ct_tp_dst=2,ip,in_port=4 (via action) data_len=106 (unbuffered)
+-udp,vlan_tci=0x0000,dl_src=50:54:00:00:00:0a,dl_dst=50:54:00:00:00:09,nw_src=10.1.1.2,nw_dst=10.1.1.1,nw_tos=0,nw_ecn=0,nw_ttl=64,tp_src=2,tp_dst=1 udp_csum:553
++udp,vlan_tci=0x0000,dl_src=50:54:00:00:00:0a,dl_dst=50:54:00:00:00:09,nw_src=10.1.1.2,nw_dst=10.1.1.1,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,tp_src=2,tp_dst=1 udp_csum:553
+ ])
+ 
+ OVS_VSWITCHD_STOP
+@@ -10579,10 +10736,10 @@ OVS_WAIT_UNTIL([ovs-appctl -t ovs-ofctl exit])
+ dnl Check this output. We only see the latter two packets, not the first.
+ AT_CHECK([cat ofctl_monitor.log], [0], [dnl
+ NXT_PACKET_IN (xid=0x0): table_id=1 cookie=0x0 total_len=106 in_port=1 (via action) data_len=106 (unbuffered)
+-udp,vlan_tci=0x0000,dl_src=50:54:00:00:00:09,dl_dst=50:54:00:00:00:0a,nw_src=10.1.1.1,nw_dst=10.1.1.2,nw_tos=0,nw_ecn=0,nw_ttl=64,tp_src=1,tp_dst=2 udp_csum:553
++udp,vlan_tci=0x0000,dl_src=50:54:00:00:00:09,dl_dst=50:54:00:00:00:0a,nw_src=10.1.1.1,nw_dst=10.1.1.2,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,tp_src=1,tp_dst=2 udp_csum:553
+ dnl
+ NXT_PACKET_IN (xid=0x0): table_id=1 cookie=0x0 total_len=106 ct_state=est|rpl|trk,ct_nw_src=10.1.1.1,ct_nw_dst=10.1.1.2,ct_nw_proto=17,ct_tp_src=1,ct_tp_dst=2,ip,in_port=2 (via action) data_len=106 (unbuffered)
+-udp,vlan_tci=0x0000,dl_src=50:54:00:00:00:0a,dl_dst=50:54:00:00:00:09,nw_src=10.1.1.2,nw_dst=10.1.1.1,nw_tos=0,nw_ecn=0,nw_ttl=64,tp_src=2,tp_dst=1 udp_csum:553
++udp,vlan_tci=0x0000,dl_src=50:54:00:00:00:0a,dl_dst=50:54:00:00:00:09,nw_src=10.1.1.2,nw_dst=10.1.1.1,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,tp_src=2,tp_dst=1 udp_csum:553
+ ])
+ 
+ OVS_VSWITCHD_STOP
+@@ -10629,10 +10786,10 @@ OVS_WAIT_UNTIL([ovs-appctl -t ovs-ofctl exit])
+ dnl Check this output. We only see the first and the last packet
+ AT_CHECK([cat ofctl_monitor.log], [0], [dnl
+ NXT_PACKET_IN (xid=0x0): table_id=1 cookie=0x0 total_len=47 ct_state=new|trk,ct_nw_src=172.16.0.1,ct_nw_dst=172.16.0.2,ct_nw_proto=17,ct_tp_src=41614,ct_tp_dst=5555,ip,in_port=1 (via action) data_len=47 (unbuffered)
+-udp,vlan_tci=0x0000,dl_src=e6:4c:47:35:28:c9,dl_dst=c6:f9:4e:cb:72:db,nw_src=172.16.0.1,nw_dst=172.16.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,tp_src=41614,tp_dst=5555 udp_csum:2096
++udp,vlan_tci=0x0000,dl_src=e6:4c:47:35:28:c9,dl_dst=c6:f9:4e:cb:72:db,nw_src=172.16.0.1,nw_dst=172.16.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,tp_src=41614,tp_dst=5555 udp_csum:2096
+ dnl
+ NXT_PACKET_IN (xid=0x0): table_id=1 cookie=0x0 total_len=75 ct_state=rel|rpl|trk,ct_nw_src=172.16.0.1,ct_nw_dst=172.16.0.2,ct_nw_proto=17,ct_tp_src=41614,ct_tp_dst=5555,ip,in_port=2 (via action) data_len=75 (unbuffered)
+-icmp,vlan_tci=0x0000,dl_src=c6:f9:4e:cb:72:db,dl_dst=e6:4c:47:35:28:c9,nw_src=172.16.0.2,nw_dst=172.16.0.1,nw_tos=192,nw_ecn=0,nw_ttl=64,icmp_type=3,icmp_code=3 icmp_csum:553f
++icmp,vlan_tci=0x0000,dl_src=c6:f9:4e:cb:72:db,dl_dst=e6:4c:47:35:28:c9,nw_src=172.16.0.2,nw_dst=172.16.0.1,nw_tos=192,nw_ecn=0,nw_ttl=64,nw_frag=no,icmp_type=3,icmp_code=3 icmp_csum:553f
+ ])
+ 
+ OVS_VSWITCHD_STOP
+@@ -10681,19 +10838,19 @@ OVS_WAIT_UNTIL([ovs-appctl -t ovs-ofctl exit])
+ dnl Check this output.
+ AT_CHECK([cat ofctl_monitor.log], [0], [dnl
+ NXT_PACKET_IN (xid=0x0): cookie=0x0 total_len=106 in_port=1 (via action) data_len=106 (unbuffered)
+-udp,vlan_tci=0x0000,dl_src=50:54:00:00:00:09,dl_dst=50:54:00:00:00:0a,nw_src=10.1.1.1,nw_dst=10.1.1.2,nw_tos=0,nw_ecn=0,nw_ttl=64,tp_src=1,tp_dst=2 udp_csum:553
++udp,vlan_tci=0x0000,dl_src=50:54:00:00:00:09,dl_dst=50:54:00:00:00:0a,nw_src=10.1.1.1,nw_dst=10.1.1.2,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,tp_src=1,tp_dst=2 udp_csum:553
+ dnl
+ NXT_PACKET_IN (xid=0x0): cookie=0x0 total_len=106 in_port=1 (via action) data_len=106 (unbuffered)
+-udp,vlan_tci=0x0000,dl_src=50:54:00:00:00:09,dl_dst=50:54:00:00:00:0a,nw_src=10.1.1.1,nw_dst=10.1.1.2,nw_tos=0,nw_ecn=0,nw_ttl=64,tp_src=3,tp_dst=4 udp_csum:54f
++udp,vlan_tci=0x0000,dl_src=50:54:00:00:00:09,dl_dst=50:54:00:00:00:0a,nw_src=10.1.1.1,nw_dst=10.1.1.2,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,tp_src=3,tp_dst=4 udp_csum:54f
+ dnl
+ NXT_PACKET_IN (xid=0x0): cookie=0x0 total_len=106 in_port=1 (via action) data_len=106 (unbuffered)
+-udp,vlan_tci=0x0000,dl_src=50:54:00:00:00:09,dl_dst=50:54:00:00:00:0a,nw_src=10.1.1.1,nw_dst=10.1.1.2,nw_tos=0,nw_ecn=0,nw_ttl=64,tp_src=5,tp_dst=6 udp_csum:54b
++udp,vlan_tci=0x0000,dl_src=50:54:00:00:00:09,dl_dst=50:54:00:00:00:0a,nw_src=10.1.1.1,nw_dst=10.1.1.2,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,tp_src=5,tp_dst=6 udp_csum:54b
+ dnl
+ NXT_PACKET_IN (xid=0x0): table_id=1 cookie=0x0 total_len=106 ct_state=est|rpl|trk,ct_mark=0x1,ct_nw_src=10.1.1.1,ct_nw_dst=10.1.1.2,ct_nw_proto=17,ct_tp_src=1,ct_tp_dst=2,ip,in_port=2 (via action) data_len=106 (unbuffered)
+-udp,vlan_tci=0x0000,dl_src=50:54:00:00:00:0a,dl_dst=50:54:00:00:00:09,nw_src=10.1.1.2,nw_dst=10.1.1.1,nw_tos=0,nw_ecn=0,nw_ttl=64,tp_src=2,tp_dst=1 udp_csum:553
++udp,vlan_tci=0x0000,dl_src=50:54:00:00:00:0a,dl_dst=50:54:00:00:00:09,nw_src=10.1.1.2,nw_dst=10.1.1.1,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,tp_src=2,tp_dst=1 udp_csum:553
+ dnl
+ NXT_PACKET_IN (xid=0x0): table_id=1 cookie=0x0 total_len=106 ct_state=est|rpl|trk,ct_mark=0x3,ct_nw_src=10.1.1.1,ct_nw_dst=10.1.1.2,ct_nw_proto=17,ct_tp_src=3,ct_tp_dst=4,ip,in_port=2 (via action) data_len=106 (unbuffered)
+-udp,vlan_tci=0x0000,dl_src=50:54:00:00:00:0a,dl_dst=50:54:00:00:00:09,nw_src=10.1.1.2,nw_dst=10.1.1.1,nw_tos=0,nw_ecn=0,nw_ttl=64,tp_src=4,tp_dst=3 udp_csum:54f
++udp,vlan_tci=0x0000,dl_src=50:54:00:00:00:0a,dl_dst=50:54:00:00:00:09,nw_src=10.1.1.2,nw_dst=10.1.1.1,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,tp_src=4,tp_dst=3 udp_csum:54f
+ ])
+ 
+ OVS_VSWITCHD_STOP
+@@ -10738,10 +10895,10 @@ OVS_WAIT_UNTIL([ovs-appctl -t ovs-ofctl exit])
+ dnl Check this output.
+ AT_CHECK([cat ofctl_monitor.log], [0], [dnl
+ NXT_PACKET_IN (xid=0x0): table_id=1 cookie=0x0 total_len=106 ct_state=est|rpl|trk,ct_label=0x1,ct_nw_src=10.1.1.1,ct_nw_dst=10.1.1.2,ct_nw_proto=17,ct_tp_src=1,ct_tp_dst=2,ip,in_port=2 (via action) data_len=106 (unbuffered)
+-udp,vlan_tci=0x0000,dl_src=50:54:00:00:00:0a,dl_dst=50:54:00:00:00:09,nw_src=10.1.1.2,nw_dst=10.1.1.1,nw_tos=0,nw_ecn=0,nw_ttl=64,tp_src=2,tp_dst=1 udp_csum:553
++udp,vlan_tci=0x0000,dl_src=50:54:00:00:00:0a,dl_dst=50:54:00:00:00:09,nw_src=10.1.1.2,nw_dst=10.1.1.1,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,tp_src=2,tp_dst=1 udp_csum:553
+ dnl
+ NXT_PACKET_IN (xid=0x0): table_id=1 cookie=0x0 total_len=106 ct_state=est|rpl|trk,ct_label=0x2,ct_nw_src=10.1.1.1,ct_nw_dst=10.1.1.2,ct_nw_proto=17,ct_tp_src=3,ct_tp_dst=4,ip,in_port=2 (via action) data_len=106 (unbuffered)
+-udp,vlan_tci=0x0000,dl_src=50:54:00:00:00:0a,dl_dst=50:54:00:00:00:09,nw_src=10.1.1.2,nw_dst=10.1.1.1,nw_tos=0,nw_ecn=0,nw_ttl=64,tp_src=4,tp_dst=3 udp_csum:54f
++udp,vlan_tci=0x0000,dl_src=50:54:00:00:00:0a,dl_dst=50:54:00:00:00:09,nw_src=10.1.1.2,nw_dst=10.1.1.1,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,tp_src=4,tp_dst=3 udp_csum:54f
+ ])
+ 
+ OVS_VSWITCHD_STOP
+@@ -11152,16 +11309,16 @@ dnl Note that the first packet doesn't have the ct_state bits set. This
+ dnl happens because the ct_state field is available only after recirc.
+ AT_CHECK([cat ofctl_monitor.log], [0], [dnl
+ NXT_PACKET_IN (xid=0x0): cookie=0x0 total_len=106 in_port=1 (via action) data_len=106 (unbuffered)
+-udp,vlan_tci=0x0000,dl_src=50:54:00:00:00:09,dl_dst=50:54:00:00:00:0a,nw_src=10.1.1.1,nw_dst=10.1.1.2,nw_tos=0,nw_ecn=0,nw_ttl=64,tp_src=1,tp_dst=2 udp_csum:553
++udp,vlan_tci=0x0000,dl_src=50:54:00:00:00:09,dl_dst=50:54:00:00:00:0a,nw_src=10.1.1.1,nw_dst=10.1.1.2,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,tp_src=1,tp_dst=2 udp_csum:553
+ dnl
+ NXT_PACKET_IN (xid=0x0): cookie=0x0 total_len=106 in_port=1 (via action) data_len=106 (unbuffered)
+-udp,vlan_tci=0x0000,dl_src=50:54:00:00:00:09,dl_dst=50:54:00:00:00:0a,nw_src=10.1.1.1,nw_dst=10.1.1.2,nw_tos=0,nw_ecn=0,nw_ttl=64,tp_src=1,tp_dst=2 udp_csum:553
++udp,vlan_tci=0x0000,dl_src=50:54:00:00:00:09,dl_dst=50:54:00:00:00:0a,nw_src=10.1.1.1,nw_dst=10.1.1.2,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,tp_src=1,tp_dst=2 udp_csum:553
+ dnl
+ NXT_PACKET_IN (xid=0x0): table_id=1 cookie=0x0 total_len=106 ct_state=est|rpl|trk,ct_nw_src=10.1.1.1,ct_nw_dst=10.1.1.2,ct_nw_proto=17,ct_tp_src=1,ct_tp_dst=2,ip,in_port=2 (via action) data_len=106 (unbuffered)
+-udp,vlan_tci=0x0000,dl_src=50:54:00:00:00:0a,dl_dst=50:54:00:00:00:09,nw_src=10.1.1.2,nw_dst=10.1.1.1,nw_tos=0,nw_ecn=0,nw_ttl=64,tp_src=2,tp_dst=1 udp_csum:553
++udp,vlan_tci=0x0000,dl_src=50:54:00:00:00:0a,dl_dst=50:54:00:00:00:09,nw_src=10.1.1.2,nw_dst=10.1.1.1,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,tp_src=2,tp_dst=1 udp_csum:553
+ dnl
+ NXT_PACKET_IN (xid=0x0): table_id=1 cookie=0x0 total_len=106 in_port=2 (via action) data_len=106 (unbuffered)
+-udp,vlan_tci=0x0000,dl_src=50:54:00:00:00:0a,dl_dst=50:54:00:00:00:09,nw_src=10.1.1.2,nw_dst=10.1.1.1,nw_tos=0,nw_ecn=0,nw_ttl=64,tp_src=2,tp_dst=1 udp_csum:553
++udp,vlan_tci=0x0000,dl_src=50:54:00:00:00:0a,dl_dst=50:54:00:00:00:09,nw_src=10.1.1.2,nw_dst=10.1.1.1,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,tp_src=2,tp_dst=1 udp_csum:553
+ ])
+ 
+ dnl The next test verifies that ct_clear at the datapath only gets executed
+@@ -11235,13 +11392,13 @@ OVS_WAIT_UNTIL([ovs-appctl -t ovs-ofctl exit])
+ dnl Check this output.
+ AT_CHECK([cat ofctl_monitor.log], [0], [dnl
+ NXT_PACKET_IN (xid=0x0): table_id=1 cookie=0x0 total_len=106 ct_state=est|rpl|trk,ct_nw_src=10.1.2.100,ct_nw_dst=10.1.2.200,ct_nw_proto=17,ct_tp_src=6,ct_tp_dst=6,ip,in_port=2 (via action) data_len=106 (unbuffered)
+-udp,vlan_tci=0x0000,dl_src=50:54:00:00:00:0a,dl_dst=50:54:00:00:00:09,nw_src=10.1.2.200,nw_dst=10.1.2.100,nw_tos=0,nw_ecn=0,nw_ttl=64,tp_src=6,tp_dst=6 udp_csum:221
++udp,vlan_tci=0x0000,dl_src=50:54:00:00:00:0a,dl_dst=50:54:00:00:00:09,nw_src=10.1.2.200,nw_dst=10.1.2.100,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,tp_src=6,tp_dst=6 udp_csum:221
+ dnl
+ NXT_PACKET_IN (xid=0x0): table_id=1 cookie=0x0 total_len=126 ct_state=est|rpl|trk,ct_ipv6_src=2001:db8::1,ct_ipv6_dst=2001:db8::2,ct_nw_proto=17,ct_tp_src=1,ct_tp_dst=2,ipv6,in_port=2 (via action) data_len=126 (unbuffered)
+-udp6,vlan_tci=0x0000,dl_src=50:54:00:00:00:0a,dl_dst=50:54:00:00:00:09,ipv6_src=2001:db8::2,ipv6_dst=2001:db8::1,ipv6_label=0x00000,nw_tos=112,nw_ecn=0,nw_ttl=128,tp_src=2,tp_dst=1 udp_csum:bfe2
++udp6,vlan_tci=0x0000,dl_src=50:54:00:00:00:0a,dl_dst=50:54:00:00:00:09,ipv6_src=2001:db8::2,ipv6_dst=2001:db8::1,ipv6_label=0x00000,nw_tos=112,nw_ecn=0,nw_ttl=128,nw_frag=no,tp_src=2,tp_dst=1 udp_csum:bfe2
+ dnl
+ NXT_PACKET_IN (xid=0x0): table_id=1 cookie=0x0 total_len=106 ct_state=est|rpl|trk,ct_nw_src=10.1.1.1,ct_nw_dst=10.1.1.2,ct_nw_proto=17,ct_tp_src=1,ct_tp_dst=2,ip,in_port=2 (via action) data_len=106 (unbuffered)
+-udp,vlan_tci=0x0000,dl_src=50:54:00:00:00:0a,dl_dst=50:54:00:00:00:09,nw_src=10.1.1.2,nw_dst=10.1.1.1,nw_tos=0,nw_ecn=0,nw_ttl=64,tp_src=2,tp_dst=1 udp_csum:553
++udp,vlan_tci=0x0000,dl_src=50:54:00:00:00:0a,dl_dst=50:54:00:00:00:09,nw_src=10.1.1.2,nw_dst=10.1.1.1,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,tp_src=2,tp_dst=1 udp_csum:553
+ ])
+ 
+ OVS_VSWITCHD_STOP
 diff --git a/tests/ofproto-macros.at b/tests/ofproto-macros.at
 index 736d9809cb..b18f0fbc1e 100644
 --- a/tests/ofproto-macros.at
@@ -8895,10 +61988,68 @@ index 736d9809cb..b18f0fbc1e 100644
  /|WARN|/p
  /|ERR|/p
  /|EMER|/p" ${logs}
+diff --git a/tests/ofproto.at b/tests/ofproto.at
+index 156d3e058c..39c3b04704 100644
+--- a/tests/ofproto.at
++++ b/tests/ofproto.at
+@@ -3108,7 +3108,7 @@ vlan_tci=0x0000,dl_src=00:10:20:30:40:50,dl_dst=00:01:02:03:04:05,dl_type=0x1234
+     ovs-ofctl packet-out br0 "in_port=controller packet=002583dfb4000026b98cb0f908004500003eb7e200000011339bac11370dac100002d7730035002b8f6d86fb0100000100000000000006626c702d7873066e696369726103636f6d00000f00 actions=dec_ttl"
+     if test X"$1" = X"OFPR_INVALID_TTL"; then shift;
+         echo >>expout "OFPT_PACKET_IN: total_len=76 in_port=CONTROLLER (via invalid_ttl) data_len=76 (unbuffered)
+-udp,vlan_tci=0x0000,dl_src=00:26:b9:8c:b0:f9,dl_dst=00:25:83:df:b4:00,nw_src=172.17.55.13,nw_dst=172.16.0.2,nw_tos=0,nw_ecn=0,nw_ttl=0,tp_src=55155,tp_dst=53 udp_csum:8f6d"
++udp,vlan_tci=0x0000,dl_src=00:26:b9:8c:b0:f9,dl_dst=00:25:83:df:b4:00,nw_src=172.17.55.13,nw_dst=172.16.0.2,nw_tos=0,nw_ecn=0,nw_ttl=0,nw_frag=no,tp_src=55155,tp_dst=53 udp_csum:8f6d"
+     fi
+ 
+     # OFPT_PORT_STATUS, OFPPR_ADD
+@@ -3211,7 +3211,7 @@ vlan_tci=0x0000,dl_src=00:10:20:30:40:50,dl_dst=00:01:02:03:04:05,dl_type=0x1234
+     ovs-ofctl -O OpenFlow12 packet-out br0 none dec_ttl '002583dfb4000026b98cb0f908004500003eb7e200000011339bac11370dac100002d7730035002b8f6d86fb0100000100000000000006626c702d7873066e696369726103636f6d00000f00'
+     if test X"$1" = X"OFPR_INVALID_TTL"; then shift;
+         echo >>expout "OFPT_PACKET_IN (OF1.2): total_len=76 in_port=ANY (via invalid_ttl) data_len=76 (unbuffered)
+-udp,vlan_tci=0x0000,dl_src=00:26:b9:8c:b0:f9,dl_dst=00:25:83:df:b4:00,nw_src=172.17.55.13,nw_dst=172.16.0.2,nw_tos=0,nw_ecn=0,nw_ttl=0,tp_src=55155,tp_dst=53 udp_csum:8f6d"
++udp,vlan_tci=0x0000,dl_src=00:26:b9:8c:b0:f9,dl_dst=00:25:83:df:b4:00,nw_src=172.17.55.13,nw_dst=172.16.0.2,nw_tos=0,nw_ecn=0,nw_ttl=0,nw_frag=no,tp_src=55155,tp_dst=53 udp_csum:8f6d"
+     fi
+ 
+     # OFPT_PORT_STATUS, OFPPR_ADD
+@@ -3325,7 +3325,7 @@ vlan_tci=0x0000,dl_src=00:10:20:30:40:50,dl_dst=00:01:02:03:04:05,dl_type=0x1234
+     ovs-ofctl -O OpenFlow13 packet-out br0 none dec_ttl '002583dfb4000026b98cb0f908004500003eb7e200000011339bac11370dac100002d7730035002b8f6d86fb0100000100000000000006626c702d7873066e696369726103636f6d00000f00'
+     if test X"$1" = X"OFPR_INVALID_TTL"; then shift;
+         echo >>expout "OFPT_PACKET_IN (OF1.3): total_len=76 in_port=ANY (via invalid_ttl) data_len=76 (unbuffered)
+-udp,vlan_tci=0x0000,dl_src=00:26:b9:8c:b0:f9,dl_dst=00:25:83:df:b4:00,nw_src=172.17.55.13,nw_dst=172.16.0.2,nw_tos=0,nw_ecn=0,nw_ttl=0,tp_src=55155,tp_dst=53 udp_csum:8f6d"
++udp,vlan_tci=0x0000,dl_src=00:26:b9:8c:b0:f9,dl_dst=00:25:83:df:b4:00,nw_src=172.17.55.13,nw_dst=172.16.0.2,nw_tos=0,nw_ecn=0,nw_ttl=0,nw_frag=no,tp_src=55155,tp_dst=53 udp_csum:8f6d"
+     fi
+ 
+     # OFPT_PORT_STATUS, OFPPR_ADD
+@@ -3459,7 +3459,7 @@ vlan_tci=0x0000,dl_src=00:10:20:30:40:50,dl_dst=00:01:02:03:04:05,dl_type=0x1234
+     ovs_ofctl -O OpenFlow14 packet-out br0 none dec_ttl '002583dfb4000026b98cb0f908004500003eb7e200000011339bac11370dac100002d7730035002b8f6d86fb0100000100000000000006626c702d7873066e696369726103636f6d00000f00'
+     if test X"$1" = X"OFPR_INVALID_TTL"; then shift;
+         echo >>expout "OFPT_PACKET_IN (OF1.4): total_len=76 in_port=ANY (via invalid_ttl) data_len=76 (unbuffered)
+-udp,vlan_tci=0x0000,dl_src=00:26:b9:8c:b0:f9,dl_dst=00:25:83:df:b4:00,nw_src=172.17.55.13,nw_dst=172.16.0.2,nw_tos=0,nw_ecn=0,nw_ttl=0,tp_src=55155,tp_dst=53 udp_csum:8f6d"
++udp,vlan_tci=0x0000,dl_src=00:26:b9:8c:b0:f9,dl_dst=00:25:83:df:b4:00,nw_src=172.17.55.13,nw_dst=172.16.0.2,nw_tos=0,nw_ecn=0,nw_ttl=0,nw_frag=no,tp_src=55155,tp_dst=53 udp_csum:8f6d"
+     fi
+ 
+ # OFPT_PORT_STATUS, OFPPR_ADD
+@@ -4434,7 +4434,7 @@ ovs-appctl -t ovs-ofctl exit
+ 
+ AT_CHECK([sed 's/ (xid=0x[[0-9a-fA-F]]*)//' monitor.log], [0], [dnl
+ OFPT_PACKET_IN (OF1.3): total_len=32 packet_type=(1,0x800),metadata=0xfafafafa5a5a5a5a,in_port=CONTROLLER (via action) data_len=32 (unbuffered)
+-packet_type=(1,0x800),nw_src=10.0.0.20,nw_dst=10.0.0.30,nw_proto=17,nw_tos=0,nw_ecn=0,nw_ttl=255,tp_src=100,tp_dst=200 udp_csum:ea78
++packet_type=(1,0x800),nw_src=10.0.0.20,nw_dst=10.0.0.30,nw_proto=17,nw_tos=0,nw_ecn=0,nw_ttl=255,nw_frag=no,tp_src=100,tp_dst=200 udp_csum:ea78
+ OFPT_BARRIER_REPLY (OF1.3):
+ ])
+ 
 diff --git a/tests/ovs-macros.at b/tests/ovs-macros.at
-index 66545da572..e6c5bc6e94 100644
+index 66545da572..d09dbb4cd5 100644
 --- a/tests/ovs-macros.at
 +++ b/tests/ovs-macros.at
+@@ -134,7 +134,7 @@ parent_pid () {
+     # e.g. Alpine Linux) is noncompliant, so we use a Linux-specific approach
+     # when it's available.  We check the format of the status file to avoid
+     # the NetBSD file with the same name but different contents.
+-    if egrep '^PPid:[[:space:]]*[0-9]*$' /proc/$1/status > /dev/null 2>&1; then
++    if grep -E '^PPid:[[:space:]]*[0-9]*$' /proc/$1/status > /dev/null 2>&1; then
+         sed -n 's/^PPid:	\([0-9]*\)/\1/p' /proc/$1/status
+     else
+         ps -o ppid= -p $1
 @@ -259,7 +259,20 @@ dnl Executes shell COMMAND in a loop until it returns zero.  If COMMAND does
  dnl not return zero within a reasonable time limit, executes the commands
  dnl in IF-FAILED (if provided) and fails the test.
@@ -8933,6 +62084,23 @@ index 66545da572..e6c5bc6e94 100644
              [AT_LINE], [while $1])])
  
  dnl OVS_APP_EXIT_AND_WAIT(DAEMON)
+diff --git a/tests/ovs-ofctl.at b/tests/ovs-ofctl.at
+index 267711bfa4..c9c67f2b1e 100644
+--- a/tests/ovs-ofctl.at
++++ b/tests/ovs-ofctl.at
+@@ -3243,9 +3243,9 @@ AT_CHECK([ovs-testcontroller -vsyslog:off --detach --no-chdir --pidfile punix:te
+ OVS_WAIT_UNTIL([test -e testcontroller])
+ 
+ dnl check for some of the initial handshake messages
+-OVS_WAIT_UNTIL([egrep "OFPT_FEATURES_REQUEST" snoopbr0.txt >/dev/null 2>&1])
+-OVS_WAIT_UNTIL([egrep "OFPT_FEATURES_REPLY" snoopbr0.txt >/dev/null 2>&1])
+-OVS_WAIT_UNTIL([egrep "OFPT_SET_CONFIG" snoopbr0.txt >/dev/null 2>&1])
++OVS_WAIT_UNTIL([grep -E "OFPT_FEATURES_REQUEST" snoopbr0.txt >/dev/null 2>&1])
++OVS_WAIT_UNTIL([grep -E "OFPT_FEATURES_REPLY" snoopbr0.txt >/dev/null 2>&1])
++OVS_WAIT_UNTIL([grep -E "OFPT_SET_CONFIG" snoopbr0.txt >/dev/null 2>&1])
+ 
+ dnl need to suppress the 'connection failed' WARN message in ovs-vswitchd
+ dnl because we need ovs-vswitchd to have the controller config before starting
 diff --git a/tests/ovs-vswitchd.at b/tests/ovs-vswitchd.at
 index bba4fea2bc..977b2eba1f 100644
 --- a/tests/ovs-vswitchd.at
@@ -9565,7 +62733,7 @@ index 876cb836cd..e672c13b27 100644
              db.tmp], [0], [stdout], [stderr])
  PARSE_LISTENING_PORT([listener.log], [BAD_TCP_PORT])
 diff --git a/tests/pmd.at b/tests/pmd.at
-index a2f9d34a2a..3962dd2bd9 100644
+index a2f9d34a2a..a2832b544c 100644
 --- a/tests/pmd.at
 +++ b/tests/pmd.at
 @@ -199,7 +199,7 @@ pmd thread numa_id <cleared> core_id <cleared>:
@@ -9622,6 +62790,108 @@ index a2f9d34a2a..3962dd2bd9 100644
  AT_SETUP([PMD - stats])
  OVS_VSWITCHD_START([add-port br0 p0 -- set Interface p0 ofport_request=7 type=dummy-pmd options:n_rxq=4],
                     [], [], [DUMMY_NUMA])
+@@ -460,7 +498,7 @@ OVS_WAIT_UNTIL([ovs-appctl -t ovs-ofctl exit])
+ 
+ AT_CHECK([cat ofctl_monitor.log], [0], [dnl
+ NXT_PACKET_IN2 (xid=0x0): cookie=0x0 total_len=106 in_port=1 (via action) data_len=106 (unbuffered)
+-icmp,vlan_tci=0x0000,dl_src=50:54:00:00:00:09,dl_dst=50:54:00:00:00:0a,nw_src=10.0.0.2,nw_dst=10.0.0.1,nw_tos=0,nw_ecn=0,nw_ttl=64,icmp_type=8,icmp_code=0 icmp_csum:13fc
++icmp,vlan_tci=0x0000,dl_src=50:54:00:00:00:09,dl_dst=50:54:00:00:00:0a,nw_src=10.0.0.2,nw_dst=10.0.0.1,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,icmp_type=8,icmp_code=0 icmp_csum:13fc
+ ])
+ 
+ AT_CHECK([ovs-vsctl set interface p1 options:n_rxq=4])
+@@ -482,7 +520,7 @@ OVS_WAIT_UNTIL([ovs-appctl -t ovs-ofctl exit])
+ 
+ AT_CHECK([cat ofctl_monitor.log], [0], [dnl
+ NXT_PACKET_IN2 (xid=0x0): cookie=0x0 total_len=106 in_port=1 (via action) data_len=106 (unbuffered)
+-icmp,vlan_tci=0x0000,dl_src=50:54:00:00:00:09,dl_dst=50:54:00:00:00:0a,nw_src=10.0.0.2,nw_dst=10.0.0.1,nw_tos=0,nw_ecn=0,nw_ttl=64,icmp_type=8,icmp_code=0 icmp_csum:13fc
++icmp,vlan_tci=0x0000,dl_src=50:54:00:00:00:09,dl_dst=50:54:00:00:00:0a,nw_src=10.0.0.2,nw_dst=10.0.0.1,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,icmp_type=8,icmp_code=0 icmp_csum:13fc
+ ])
+ 
+ dnl Check resetting to default number of rx queues after removal from the db.
+@@ -533,9 +571,9 @@ AT_CHECK([ovs-appctl dpctl/dump-flows | flow_dump_prepend_pmd], [0], [dnl
+ 
+ AT_CHECK([cat ofctl_monitor.log], [0], [dnl
+ NXT_PACKET_IN2 (xid=0x0): cookie=0x0 total_len=106 in_port=1 (via action) data_len=106 (unbuffered)
+-icmp,vlan_tci=0x0000,dl_src=50:54:00:00:00:09,dl_dst=50:54:00:00:00:0a,nw_src=10.0.0.2,nw_dst=10.0.0.1,nw_tos=0,nw_ecn=0,nw_ttl=64,icmp_type=8,icmp_code=0 icmp_csum:13fc
++icmp,vlan_tci=0x0000,dl_src=50:54:00:00:00:09,dl_dst=50:54:00:00:00:0a,nw_src=10.0.0.2,nw_dst=10.0.0.1,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,icmp_type=8,icmp_code=0 icmp_csum:13fc
+ NXT_PACKET_IN2 (xid=0x0): cookie=0x0 total_len=106 in_port=1 (via action) data_len=106 (unbuffered)
+-icmp,vlan_tci=0x0000,dl_src=50:54:00:00:00:09,dl_dst=50:54:00:00:00:0a,nw_src=10.0.0.2,nw_dst=10.0.0.1,nw_tos=0,nw_ecn=0,nw_ttl=64,icmp_type=8,icmp_code=0 icmp_csum:13fc
++icmp,vlan_tci=0x0000,dl_src=50:54:00:00:00:09,dl_dst=50:54:00:00:00:0a,nw_src=10.0.0.2,nw_dst=10.0.0.1,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,icmp_type=8,icmp_code=0 icmp_csum:13fc
+ ])
+ 
+ OVS_VSWITCHD_STOP
+@@ -568,7 +606,7 @@ OVS_WAIT_UNTIL([ovs-appctl -t ovs-ofctl exit])
+ 
+ AT_CHECK([cat ofctl_monitor.log], [0], [dnl
+ NXT_PACKET_IN2 (xid=0x0): cookie=0x0 total_len=106 in_port=1 (via action) data_len=106 (unbuffered)
+-icmp,vlan_tci=0x0000,dl_src=50:54:00:00:00:09,dl_dst=50:54:00:00:00:0a,nw_src=10.0.0.2,nw_dst=10.0.0.1,nw_tos=0,nw_ecn=0,nw_ttl=64,icmp_type=8,icmp_code=0 icmp_csum:13fc
++icmp,vlan_tci=0x0000,dl_src=50:54:00:00:00:09,dl_dst=50:54:00:00:00:0a,nw_src=10.0.0.2,nw_dst=10.0.0.1,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,icmp_type=8,icmp_code=0 icmp_csum:13fc
+ ])
+ 
+ AT_CHECK([ovs-ofctl monitor br0 65534 invalid_ttl --detach --no-chdir --pidfile 2> ofctl_monitor.log])
+@@ -580,7 +618,7 @@ OVS_WAIT_UNTIL([ovs-appctl -t ovs-ofctl exit])
+ 
+ AT_CHECK([cat ofctl_monitor.log], [0], [dnl
+ NXT_PACKET_IN2 (xid=0x0): cookie=0x0 total_len=106 in_port=2 (via action) data_len=106 (unbuffered)
+-icmp,vlan_tci=0x0000,dl_src=50:54:00:00:00:09,dl_dst=50:54:00:00:00:0a,nw_src=10.0.0.2,nw_dst=10.0.0.1,nw_tos=0,nw_ecn=0,nw_ttl=64,icmp_type=8,icmp_code=0 icmp_csum:13fc
++icmp,vlan_tci=0x0000,dl_src=50:54:00:00:00:09,dl_dst=50:54:00:00:00:0a,nw_src=10.0.0.2,nw_dst=10.0.0.1,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,icmp_type=8,icmp_code=0 icmp_csum:13fc
+ ])
+ 
+ AT_CHECK([ovs-vsctl set Interface p2 options:numa_id=1])
+@@ -601,7 +639,7 @@ OVS_WAIT_UNTIL([ovs-appctl -t ovs-ofctl exit])
+ 
+ AT_CHECK([cat ofctl_monitor.log], [0], [dnl
+ NXT_PACKET_IN2 (xid=0x0): cookie=0x0 total_len=106 in_port=1 (via action) data_len=106 (unbuffered)
+-icmp,vlan_tci=0x0000,dl_src=50:54:00:00:00:09,dl_dst=50:54:00:00:00:0a,nw_src=10.0.0.2,nw_dst=10.0.0.1,nw_tos=0,nw_ecn=0,nw_ttl=64,icmp_type=8,icmp_code=0 icmp_csum:13fc
++icmp,vlan_tci=0x0000,dl_src=50:54:00:00:00:09,dl_dst=50:54:00:00:00:0a,nw_src=10.0.0.2,nw_dst=10.0.0.1,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,icmp_type=8,icmp_code=0 icmp_csum:13fc
+ ])
+ 
+ AT_CHECK([ovs-ofctl monitor br0 65534 invalid_ttl --detach --no-chdir --pidfile 2> ofctl_monitor.log])
+@@ -613,7 +651,7 @@ OVS_WAIT_UNTIL([ovs-appctl -t ovs-ofctl exit])
+ 
+ AT_CHECK([cat ofctl_monitor.log], [0], [dnl
+ NXT_PACKET_IN2 (xid=0x0): cookie=0x0 total_len=106 in_port=2 (via action) data_len=106 (unbuffered)
+-icmp,vlan_tci=0x0000,dl_src=50:54:00:00:00:09,dl_dst=50:54:00:00:00:0a,nw_src=10.0.0.2,nw_dst=10.0.0.1,nw_tos=0,nw_ecn=0,nw_ttl=64,icmp_type=8,icmp_code=0 icmp_csum:13fc
++icmp,vlan_tci=0x0000,dl_src=50:54:00:00:00:09,dl_dst=50:54:00:00:00:0a,nw_src=10.0.0.2,nw_dst=10.0.0.1,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,icmp_type=8,icmp_code=0 icmp_csum:13fc
+ ])
+ 
+ AT_CHECK([ovs-vsctl set Interface p1 options:numa_id=8])
+@@ -634,7 +672,7 @@ OVS_WAIT_UNTIL([ovs-appctl -t ovs-ofctl exit])
+ 
+ AT_CHECK([cat ofctl_monitor.log], [0], [dnl
+ NXT_PACKET_IN2 (xid=0x0): cookie=0x0 total_len=106 in_port=1 (via action) data_len=106 (unbuffered)
+-icmp,vlan_tci=0x0000,dl_src=50:54:00:00:00:09,dl_dst=50:54:00:00:00:0a,nw_src=10.0.0.2,nw_dst=10.0.0.1,nw_tos=0,nw_ecn=0,nw_ttl=64,icmp_type=8,icmp_code=0 icmp_csum:13fc
++icmp,vlan_tci=0x0000,dl_src=50:54:00:00:00:09,dl_dst=50:54:00:00:00:0a,nw_src=10.0.0.2,nw_dst=10.0.0.1,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,icmp_type=8,icmp_code=0 icmp_csum:13fc
+ ])
+ 
+ AT_CHECK([ovs-ofctl monitor br0 65534 invalid_ttl --detach --no-chdir --pidfile 2> ofctl_monitor.log])
+@@ -646,7 +684,7 @@ OVS_WAIT_UNTIL([ovs-appctl -t ovs-ofctl exit])
+ 
+ AT_CHECK([cat ofctl_monitor.log], [0], [dnl
+ NXT_PACKET_IN2 (xid=0x0): cookie=0x0 total_len=106 in_port=2 (via action) data_len=106 (unbuffered)
+-icmp,vlan_tci=0x0000,dl_src=50:54:00:00:00:09,dl_dst=50:54:00:00:00:0a,nw_src=10.0.0.2,nw_dst=10.0.0.1,nw_tos=0,nw_ecn=0,nw_ttl=64,icmp_type=8,icmp_code=0 icmp_csum:13fc
++icmp,vlan_tci=0x0000,dl_src=50:54:00:00:00:09,dl_dst=50:54:00:00:00:0a,nw_src=10.0.0.2,nw_dst=10.0.0.1,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,icmp_type=8,icmp_code=0 icmp_csum:13fc
+ ])
+ 
+ 
+@@ -726,7 +764,7 @@ OVS_WAIT_UNTIL([ovs-appctl -t ovs-ofctl exit])
+ 
+ AT_CHECK([cat ofctl_monitor.log], [0], [dnl
+ NXT_PACKET_IN2 (xid=0x0): cookie=0x0 total_len=106 in_port=1 (via action) data_len=106 (unbuffered)
+-icmp,vlan_tci=0x0000,dl_src=50:54:00:00:00:09,dl_dst=50:54:00:00:00:0a,nw_src=10.0.0.2,nw_dst=10.0.0.1,nw_tos=0,nw_ecn=0,nw_ttl=64,icmp_type=8,icmp_code=0 icmp_csum:13fc
++icmp,vlan_tci=0x0000,dl_src=50:54:00:00:00:09,dl_dst=50:54:00:00:00:0a,nw_src=10.0.0.2,nw_dst=10.0.0.1,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,icmp_type=8,icmp_code=0 icmp_csum:13fc
+ ])
+ 
+ AT_CHECK([ovs-vsctl del-port br0 p1])
+@@ -741,7 +779,7 @@ OVS_WAIT_UNTIL([ovs-appctl -t ovs-ofctl exit])
+ 
+ AT_CHECK([cat ofctl_monitor.log], [0], [dnl
+ NXT_PACKET_IN2 (xid=0x0): cookie=0x0 total_len=106 in_port=1 (via action) data_len=106 (unbuffered)
+-icmp,vlan_tci=0x0000,dl_src=50:54:00:00:00:09,dl_dst=50:54:00:00:00:0a,nw_src=10.0.0.2,nw_dst=10.0.0.1,nw_tos=0,nw_ecn=0,nw_ttl=64,icmp_type=8,icmp_code=0 icmp_csum:13fc
++icmp,vlan_tci=0x0000,dl_src=50:54:00:00:00:09,dl_dst=50:54:00:00:00:0a,nw_src=10.0.0.2,nw_dst=10.0.0.1,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,icmp_type=8,icmp_code=0 icmp_csum:13fc
+ ])
+ 
+ OVS_VSWITCHD_STOP
 @@ -1075,15 +1113,15 @@ AT_SETUP([PMD - dpif configuration])
  OVS_VSWITCHD_START([], [], [], [--dummy-numa 0,0])
  AT_CHECK([ovs-vsctl add-port br0 p1 -- set Interface p1 type=dummy-pmd])
@@ -9863,6 +63133,19 @@ index 19a0b125b9..8b9f5c7525 100644
  # OVS_CHECK_VXLAN()
  #
  # Do basic check for vxlan functionality, skip the test if it's not there.
+diff --git a/tests/system-dpdk-macros.at b/tests/system-dpdk-macros.at
+index ef0e84e939..2579098a04 100644
+--- a/tests/system-dpdk-macros.at
++++ b/tests/system-dpdk-macros.at
+@@ -6,7 +6,7 @@
+ m4_define([OVS_DPDK_PRE_CHECK],
+   [dnl Check Hugepages
+    AT_CHECK([cat /proc/meminfo], [], [stdout])
+-   AT_SKIP_IF([egrep 'HugePages_Free: *0' stdout], [], [stdout])
++   AT_SKIP_IF([grep -E 'HugePages_Free: *0' stdout], [], [stdout])
+    AT_CHECK([mount], [], [stdout])
+    AT_CHECK([grep 'hugetlbfs' stdout], [], [stdout], [])
+ 
 diff --git a/tests/system-dpdk.at b/tests/system-dpdk.at
 index c3ee6990ca..7d2715c4a7 100644
 --- a/tests/system-dpdk.at
@@ -9889,6 +63172,46 @@ index c3ee6990ca..7d2715c4a7 100644
  AT_CHECK([ovs-appctl dpif-netdev/miniflow-parser-set autovalidator], [0], [dnl
  Miniflow extract implementation set to autovalidator.
  ])
+diff --git a/tests/system-kmod-macros.at b/tests/system-kmod-macros.at
+index 86d633ac4f..f0aaae63eb 100644
+--- a/tests/system-kmod-macros.at
++++ b/tests/system-kmod-macros.at
+@@ -200,6 +200,13 @@ m4_define([OVS_CHECK_KERNEL_EXCL],
+     AT_SKIP_IF([ ! ( test $version -lt $1 || ( test $version -eq $1 && test $sublevel -lt $2 ) || test $version -gt $3 || ( test $version -eq $3 && test $sublevel -gt $4 ) ) ])
+ ])
+ 
++# CHECK_LATER_IPV6_FRAGMENTS()
++#
++# Upstream kernels beetween 4.20 and 5.19 are not parsing IPv6 fragments
++# correctly.  The issue was also backported in some older distribution
++# kernels, so kernels below 4.20 are not reliable.
++m4_define([CHECK_LATER_IPV6_FRAGMENTS], [OVS_CHECK_MIN_KERNEL(5, 19)])
++
+ # VSCTL_ADD_DATAPATH_TABLE()
+ #
+ # Create system datapath table "system" for kernel tests in ovsdb
+diff --git a/tests/system-offloads-traffic.at b/tests/system-offloads-traffic.at
+index 80bc1dd5c3..14a332f5ed 100644
+--- a/tests/system-offloads-traffic.at
++++ b/tests/system-offloads-traffic.at
+@@ -90,7 +90,7 @@ AT_CHECK([tc -o -s -d filter show dev ovs-p0 ingress |
+ rate 100Kbit burst 1280b
+ ])
+ AT_CHECK([tc -s -d filter show dev ovs-p0 ingress |
+-  egrep "basic|matchall" > /dev/null], [0])
++  grep -E "basic|matchall" > /dev/null], [0])
+ OVS_TRAFFIC_VSWITCHD_STOP
+ AT_CLEANUP
+ 
+@@ -139,7 +139,7 @@ AT_CHECK([tc -o -s -d filter show dev ovs-p0 ingress |
+ pkts_rate 100000 pkts_burst 10000
+ ])
+ AT_CHECK([tc -s -d filter show dev ovs-p0 ingress |
+-  egrep "basic|matchall" > /dev/null], [0])
++  grep -E "basic|matchall" > /dev/null], [0])
+ OVS_TRAFFIC_VSWITCHD_STOP
+ AT_CLEANUP
+ 
 diff --git a/tests/system-route.at b/tests/system-route.at
 index 1714273e35..270956d13f 100644
 --- a/tests/system-route.at
@@ -9908,10 +63231,57 @@ index 1714273e35..270956d13f 100644
  dnl Delete ip address.
  AT_CHECK([ip addr del 10.0.0.17/24 dev p1-route], [0], [stdout])
 diff --git a/tests/system-traffic.at b/tests/system-traffic.at
-index f22d86e466..36e10aa4a8 100644
+index f22d86e466..69de604fa0 100644
 --- a/tests/system-traffic.at
 +++ b/tests/system-traffic.at
-@@ -218,6 +218,7 @@ OVS_TRAFFIC_VSWITCHD_STOP
+@@ -192,6 +192,46 @@ NS_CHECK_EXEC([at_ns0], [ping6 -s 3200 -q -c 3 -i 0.3 -w 2 fc00:1::2 | FORMAT_PI
+ OVS_TRAFFIC_VSWITCHD_STOP
+ AT_CLEANUP
+ 
++AT_SETUP([datapath - ping6 between two ports with header modify])
++OVS_TRAFFIC_VSWITCHD_START()
++CHECK_LATER_IPV6_FRAGMENTS()
++
++AT_CHECK([ovs-ofctl add-flow br0 "actions=normal"])
++
++ADD_NAMESPACES(at_ns0, at_ns1)
++
++ADD_VETH(p0, at_ns0, br0, "fc00::1/96", e4:11:22:33:44:55)
++ADD_VETH(p1, at_ns1, br0, "fc00::2/96", e4:11:22:33:44:54)
++NS_CHECK_EXEC([at_ns0], [ip -6 neigh add fc00::3 lladdr e4:11:22:33:44:54 dev p0])
++
++dnl Linux seems to take a little time to get its IPv6 stack in order. Without
++dnl waiting, we get occasional failures due to the following error:
++dnl "connect: Cannot assign requested address"
++OVS_WAIT_UNTIL([ip netns exec at_ns0 ping6 -c 1 fc00::2])
++OVS_WAIT_UNTIL([ip netns exec at_ns1 ping6 -c 1 fc00::1])
++
++AT_DATA([flows.txt], [dnl
++priority=100,in_port=ovs-p0,ipv6,ipv6_src=fc00::1,ipv6_dst=fc00::3,actions=set_field:fc00::2->ipv6_dst,ovs-p1
++priority=100,in_port=ovs-p1,ipv6,ipv6_src=fc00::2,ipv6_dst=fc00::1,actions=set_field:fc00::3->ipv6_src,ovs-p0
++priority=0,actions=NORMAL
++])
++
++AT_CHECK([ovs-ofctl del-flows br0])
++AT_CHECK([ovs-ofctl add-flows br0 flows.txt])
++
++NS_CHECK_EXEC([at_ns0], [ping6 -q -c 3 -i 0.3 -w 2 fc00::3 | FORMAT_PING], [0], [dnl
++3 packets transmitted, 3 received, 0% packet loss, time 0ms
++])
++NS_CHECK_EXEC([at_ns0], [ping6 -s 1600 -q -c 3 -i 0.3 -w 2 fc00::3 | FORMAT_PING], [0], [dnl
++3 packets transmitted, 3 received, 0% packet loss, time 0ms
++])
++NS_CHECK_EXEC([at_ns0], [ping6 -s 3200 -q -c 3 -i 0.3 -w 2 fc00::3 | FORMAT_PING], [0], [dnl
++3 packets transmitted, 3 received, 0% packet loss, time 0ms
++])
++
++OVS_TRAFFIC_VSWITCHD_STOP
++AT_CLEANUP
++
+ AT_SETUP([datapath - ping over bond])
+ OVS_TRAFFIC_VSWITCHD_START()
+ 
+@@ -218,6 +258,7 @@ OVS_TRAFFIC_VSWITCHD_STOP
  AT_CLEANUP
  
  AT_SETUP([datapath - ping over vxlan tunnel])
@@ -9919,7 +63289,7 @@ index f22d86e466..36e10aa4a8 100644
  OVS_CHECK_VXLAN()
  
  OVS_TRAFFIC_VSWITCHD_START()
-@@ -258,7 +259,55 @@ NS_CHECK_EXEC([at_ns0], [ping -s 3200 -q -c 3 -i 0.3 -w 2 10.1.1.100 | FORMAT_PI
+@@ -258,7 +299,55 @@ NS_CHECK_EXEC([at_ns0], [ping -s 3200 -q -c 3 -i 0.3 -w 2 10.1.1.100 | FORMAT_PI
  OVS_TRAFFIC_VSWITCHD_STOP
  AT_CLEANUP
  
@@ -9975,7 +63345,7 @@ index f22d86e466..36e10aa4a8 100644
  OVS_CHECK_VXLAN_UDP6ZEROCSUM()
  
  OVS_TRAFFIC_VSWITCHD_START()
-@@ -302,6 +351,7 @@ OVS_TRAFFIC_VSWITCHD_STOP
+@@ -302,6 +391,7 @@ OVS_TRAFFIC_VSWITCHD_STOP
  AT_CLEANUP
  
  AT_SETUP([datapath - ping over gre tunnel])
@@ -9983,7 +63353,7 @@ index f22d86e466..36e10aa4a8 100644
  OVS_CHECK_KERNEL_EXCL(3, 10, 4, 15)
  OVS_CHECK_GRE()
  
-@@ -343,6 +393,7 @@ OVS_TRAFFIC_VSWITCHD_STOP
+@@ -343,6 +433,7 @@ OVS_TRAFFIC_VSWITCHD_STOP
  AT_CLEANUP
  
  AT_SETUP([datapath - ping over ip6gre L2 tunnel])
@@ -9991,7 +63361,7 @@ index f22d86e466..36e10aa4a8 100644
  OVS_CHECK_KERNEL_EXCL(3, 10, 4, 15)
  OVS_CHECK_GRE()
  OVS_CHECK_ERSPAN()
-@@ -383,6 +434,7 @@ AT_CLEANUP
+@@ -383,6 +474,7 @@ AT_CLEANUP
  
  
  AT_SETUP([datapath - ping over erspan v1 tunnel])
@@ -9999,7 +63369,7 @@ index f22d86e466..36e10aa4a8 100644
  OVS_CHECK_KERNEL_EXCL(3, 10, 4, 15)
  OVS_CHECK_GRE()
  OVS_CHECK_ERSPAN()
-@@ -419,6 +471,7 @@ OVS_TRAFFIC_VSWITCHD_STOP
+@@ -419,6 +511,7 @@ OVS_TRAFFIC_VSWITCHD_STOP
  AT_CLEANUP
  
  AT_SETUP([datapath - ping over erspan v2 tunnel])
@@ -10007,7 +63377,7 @@ index f22d86e466..36e10aa4a8 100644
  OVS_CHECK_KERNEL_EXCL(3, 10, 4, 15)
  OVS_CHECK_GRE()
  OVS_CHECK_ERSPAN()
-@@ -455,6 +508,7 @@ OVS_TRAFFIC_VSWITCHD_STOP
+@@ -455,6 +548,7 @@ OVS_TRAFFIC_VSWITCHD_STOP
  AT_CLEANUP
  
  AT_SETUP([datapath - ping over ip6erspan v1 tunnel])
@@ -10015,7 +63385,7 @@ index f22d86e466..36e10aa4a8 100644
  OVS_CHECK_KERNEL_EXCL(3, 10, 4, 15)
  OVS_CHECK_GRE()
  OVS_CHECK_ERSPAN()
-@@ -494,6 +548,7 @@ OVS_TRAFFIC_VSWITCHD_STOP
+@@ -494,6 +588,7 @@ OVS_TRAFFIC_VSWITCHD_STOP
  AT_CLEANUP
  
  AT_SETUP([datapath - ping over ip6erspan v2 tunnel])
@@ -10023,7 +63393,7 @@ index f22d86e466..36e10aa4a8 100644
  OVS_CHECK_KERNEL_EXCL(3, 10, 4, 15)
  OVS_CHECK_GRE()
  OVS_CHECK_ERSPAN()
-@@ -534,6 +589,7 @@ OVS_TRAFFIC_VSWITCHD_STOP
+@@ -534,6 +629,7 @@ OVS_TRAFFIC_VSWITCHD_STOP
  AT_CLEANUP
  
  AT_SETUP([datapath - ping over geneve tunnel])
@@ -10031,7 +63401,7 @@ index f22d86e466..36e10aa4a8 100644
  OVS_CHECK_GENEVE()
  
  OVS_TRAFFIC_VSWITCHD_START()
-@@ -575,6 +631,7 @@ OVS_TRAFFIC_VSWITCHD_STOP
+@@ -575,6 +671,7 @@ OVS_TRAFFIC_VSWITCHD_STOP
  AT_CLEANUP
  
  AT_SETUP([datapath - ping over geneve tunnel, delete flow regression])
@@ -10039,7 +63409,7 @@ index f22d86e466..36e10aa4a8 100644
  OVS_CHECK_GENEVE()
  
  OVS_TRAFFIC_VSWITCHD_START()
-@@ -629,6 +686,7 @@ OVS_TRAFFIC_VSWITCHD_STOP(["/|ERR|/d
+@@ -629,6 +726,7 @@ OVS_TRAFFIC_VSWITCHD_STOP(["/|ERR|/d
  AT_CLEANUP
  
  AT_SETUP([datapath - flow resume with geneve tun_metadata])
@@ -10047,7 +63417,16 @@ index f22d86e466..36e10aa4a8 100644
  OVS_CHECK_GENEVE()
  
  OVS_TRAFFIC_VSWITCHD_START()
-@@ -680,6 +738,7 @@ OVS_TRAFFIC_VSWITCHD_STOP
+@@ -666,7 +764,7 @@ NS_CHECK_EXEC([at_ns0], [ping -q -c 3 10.1.1.100 | FORMAT_PING], [0], [dnl
+ ])
+ 
+ dnl Test OVS handles TLV map modifictions properly when restores frozen state.
+-NS_CHECK_EXEC([at_ns0], [ping 10.1.1.100 > /dev/null &])
++NETNS_DAEMONIZE([at_ns0], [ping 10.1.1.100 > /dev/null], [ping0.pid])
+ 
+ AT_CHECK([ovs-ofctl add-tlv-map br0 "{class=0xffff,type=0x88,len=4}->tun_metadata1"])
+ sleep 1
+@@ -680,6 +778,7 @@ OVS_TRAFFIC_VSWITCHD_STOP
  AT_CLEANUP
  
  AT_SETUP([datapath - ping over geneve6 tunnel])
@@ -10055,7 +63434,7 @@ index f22d86e466..36e10aa4a8 100644
  OVS_CHECK_GENEVE_UDP6ZEROCSUM()
  
  OVS_TRAFFIC_VSWITCHD_START()
-@@ -723,6 +782,7 @@ OVS_TRAFFIC_VSWITCHD_STOP
+@@ -723,6 +822,7 @@ OVS_TRAFFIC_VSWITCHD_STOP
  AT_CLEANUP
  
  AT_SETUP([datapath - ping over gre tunnel by simulated packets])
@@ -10063,7 +63442,19 @@ index f22d86e466..36e10aa4a8 100644
  OVS_CHECK_MIN_KERNEL(3, 10)
  
  OVS_TRAFFIC_VSWITCHD_START()
-@@ -769,6 +829,7 @@ OVS_TRAFFIC_VSWITCHD_STOP
+@@ -759,16 +859,17 @@ dnl ADD_NATIVE_TUNNEL([gretap], [ns_gre0], [at_ns0], [172.31.1.100], [10.1.1.1/2
+ dnl Now, check the overlay by sending out raw arp and icmp packets.
+ ovs-ofctl -O OpenFlow13 packet-out br-underlay "in_port=1 packet=f2ff00000002f2ff00000003080045000042ec2c4000402ff3bcac1f0101ac1f016400006558fffffffffffff2ff0000000408060001080006040001f2ff000000040a0101010000000000000a010164 actions=NORMAL"
+ 
+-OVS_WAIT_UNTIL([cat p0.pcap | egrep "IP 172.31.1.100 > 172.31.1.1: GREv0, length 46: ARP, Reply 10.1.1.100 is-at f2:ff:00:00:00:01.* length 28" 2>&1 1>/dev/null])
++OVS_WAIT_UNTIL([cat p0.pcap | grep -E "IP 172.31.1.100 > 172.31.1.1: GREv0, length 46: ARP, Reply 10.1.1.100 is-at f2:ff:00:00:00:01.* length 28" 2>&1 1>/dev/null])
+ 
+ ovs-ofctl -O OpenFlow13 packet-out br-underlay "in_port=1 packet=f2ff00000002f2ff0000000308004500007aec8e4000402ff322ac1f0101ac1f016400006558f2ff00000001f2ff00000004080045000054548f40004001cfb30a0101010a0101640800e6e829270003e1a3435b00000000ff1a050000000000101112131415161718191a1b1c1d1e1f202122232425262728292a2b2c2d2e2f3031323334353637 actions=NORMAL"
+ 
+-OVS_WAIT_UNTIL([cat p0.pcap | egrep "IP 172.31.1.100 > 172.31.1.1: GREv0, length 102: IP 10.1.1.100 > 10.1.1.1: ICMP echo reply,.* length 64$" 2>&1 1>/dev/null])
++OVS_WAIT_UNTIL([cat p0.pcap | grep -E "IP 172.31.1.100 > 172.31.1.1: GREv0, length 102: IP 10.1.1.100 > 10.1.1.1: ICMP echo reply,.* length 64$" 2>&1 1>/dev/null])
+ 
+ OVS_TRAFFIC_VSWITCHD_STOP
  AT_CLEANUP
  
  AT_SETUP([datapath - ping over erspan v1 tunnel by simulated packets])
@@ -10071,7 +63462,25 @@ index f22d86e466..36e10aa4a8 100644
  OVS_CHECK_MIN_KERNEL(3, 10)
  
  OVS_TRAFFIC_VSWITCHD_START()
-@@ -817,6 +878,7 @@ OVS_TRAFFIC_VSWITCHD_STOP
+@@ -803,20 +904,21 @@ dnl Okay, now send out an arp request from 10.1.1.1 for 10.1.1.100 in erspan.
+ ovs-ofctl -O OpenFlow13 packet-out br-underlay "in_port=1 packet=f2ff00000002f2ff0000000308004500004e151d4000402fcac0ac1f0101ac1f0164100088be000000061000000100000007fffffffffffff2ff0000000408060001080006040001f2ff000000040a0101010000000000000a010164 actions=normal"
+ 
+ dnl 0002 is arp reply, followed by mac address of 10.1.1.100.
+-OVS_WAIT_UNTIL([cat p0.pcap | egrep "0x0030:  0806 0001 0800 0604 0002 f2ff 0000 0001" 2>&1 1>/dev/null])
+-OVS_WAIT_UNTIL([cat p0.pcap | egrep "0x0040:  0a01 0164 f2ff 0000 0004 0a01 0101" 2>&1 1>/dev/null])
++OVS_WAIT_UNTIL([cat p0.pcap | grep -E "0x0030:  0806 0001 0800 0604 0002 f2ff 0000 0001" 2>&1 1>/dev/null])
++OVS_WAIT_UNTIL([cat p0.pcap | grep -E "0x0040:  0a01 0164 f2ff 0000 0004 0a01 0101" 2>&1 1>/dev/null])
+ 
+ dnl Okay, now check the overlay with raw icmp packets.
+-AT_FAIL_IF([cat p0.pcap | egrep "IP 172.31.1.100 > 172.31.1.1: GREv0,.* length 122" 2>&1 1>/dev/null])
++AT_FAIL_IF([cat p0.pcap | grep -E "IP 172.31.1.100 > 172.31.1.1: GREv0,.* length 122" 2>&1 1>/dev/null])
+ 
+ ovs-ofctl -O OpenFlow13 packet-out br-underlay "in_port=1 packet=f2ff00000002f2ff0000000308004500008e70cb4000402f6ed2ac1f0101ac1f0164100088be000000051000000100000007f2ff00000001f2ff0000000408004500005c4a3340004001da070a0101010a010164080084f238fb0001f36a6b5b0000000021870e0000000000101112131415161718191a1b1c1d1e1f202122232425262728292a2b2c2d2e2f303132333435363738393a3b3c3d3e3f actions=normal"
+ 
+-OVS_WAIT_UNTIL([cat p0.pcap | egrep "IP 172.31.1.100 > 172.31.1.1: GREv0,.* length 122" 2>&1 1>/dev/null])
++OVS_WAIT_UNTIL([cat p0.pcap | grep -E "IP 172.31.1.100 > 172.31.1.1: GREv0,.* length 122" 2>&1 1>/dev/null])
+ 
+ OVS_TRAFFIC_VSWITCHD_STOP
  AT_CLEANUP
  
  AT_SETUP([datapath - ping over erspan v2 tunnel by simulated packets])
@@ -10079,7 +63488,28 @@ index f22d86e466..36e10aa4a8 100644
  OVS_CHECK_MIN_KERNEL(3, 10)
  
  OVS_TRAFFIC_VSWITCHD_START()
-@@ -870,6 +932,7 @@ OVS_TRAFFIC_VSWITCHD_STOP
+@@ -854,22 +956,23 @@ NS_CHECK_EXEC([at_ns0], [ping -q -c 3 -i 0.3 -w 2 172.31.1.100 | FORMAT_PING], [
+ dnl Okay, send raw arp request and icmp echo request.
+ ovs-ofctl -O OpenFlow13 packet-out br-underlay "in_port=1 packet=f2ff00000002f2ff00000003080045000052373d4000402fa89cac1f0101ac1f0164100088be00000006200000016f54b41700008078fffffffffffff2ff0000000408060001080006040001f2ff000000040a0101010000000000000a010164 actions=normal"
+ 
+-OVS_WAIT_UNTIL([cat p0.pcap | egrep "0x0030:  0000 0001 0806 0001 0800 0604 0002 f2ff" 2>&1 1>/dev/null])
+-OVS_WAIT_UNTIL([cat p0.pcap | egrep "0x0040:  0000 0001 0a01 0164 f2ff 0000 0004 0a01" 2>&1 1>/dev/null])
+-OVS_WAIT_UNTIL([cat p0.pcap | egrep "0x0050:  0101" 2>&1 1>/dev/null])
++OVS_WAIT_UNTIL([cat p0.pcap | grep -E "0x0030:  0000 0001 0806 0001 0800 0604 0002 f2ff" 2>&1 1>/dev/null])
++OVS_WAIT_UNTIL([cat p0.pcap | grep -E "0x0040:  0000 0001 0a01 0164 f2ff 0000 0004 0a01" 2>&1 1>/dev/null])
++OVS_WAIT_UNTIL([cat p0.pcap | grep -E "0x0050:  0101" 2>&1 1>/dev/null])
+ 
+ dnl Because tcpdump might not be able to parse erspan headers, we check icmp echo reply
+ dnl by packet length.
+-AT_FAIL_IF([cat p0.pcap | egrep "IP 172.31.1.100 > 172.31.1.1: GREv0,.* length 126" 2>&1 1>/dev/null])
++AT_FAIL_IF([cat p0.pcap | grep -E "IP 172.31.1.100 > 172.31.1.1: GREv0,.* length 126" 2>&1 1>/dev/null])
+ 
+ ovs-ofctl -O OpenFlow13 packet-out br-underlay "in_port=1 packet=f2ff00000002f2ff0000000308004500009287e14000402f57b8ac1f0101ac1f0164100088be0000000520000001144cd5a400008078f2ff00000001f2ff0000000408004500005c38d640004001eb640a0101010a01016408005e57585f0001df6c6b5b0000000045bc050000000000101112131415161718191a1b1c1d1e1f202122232425262728292a2b2c2d2e2f303132333435363738393a3b3c3d3e3f actions=normal"
+ 
+-OVS_WAIT_UNTIL([cat p0.pcap | egrep "IP 172.31.1.100 > 172.31.1.1: GREv0,.* length 126" 2>&1 1>/dev/null])
++OVS_WAIT_UNTIL([cat p0.pcap | grep -E "IP 172.31.1.100 > 172.31.1.1: GREv0,.* length 126" 2>&1 1>/dev/null])
+ 
+ OVS_TRAFFIC_VSWITCHD_STOP
  AT_CLEANUP
  
  AT_SETUP([datapath - ping over ip6erspan v1 tunnel by simulated packets])
@@ -10087,7 +63517,26 @@ index f22d86e466..36e10aa4a8 100644
  OVS_CHECK_MIN_KERNEL(3, 10)
  
  OVS_TRAFFIC_VSWITCHD_START()
-@@ -925,6 +988,7 @@ OVS_TRAFFIC_VSWITCHD_STOP
+@@ -911,20 +1014,21 @@ dnl Okay, now send raw arp request and icmp echo request.
+ ovs-ofctl -O OpenFlow13 packet-out br-underlay "in_port=1 packet=f2ff00000002f2ff0000000386dd60008531003a2f40fc000100000000000000000000000001fc000100000000000000000000000100100088be000000051000007b00000007fffffffffffff2ff0000000408060001080006040001f2ff000000040a0101010000000000000a010164 actions=normal"
+ 
+ dnl Check arp reply.
+-OVS_WAIT_UNTIL([cat p0.pcap | egrep "0x0040:  0000 0001 0806 0001 0800 0604 0002 f2ff" 2>&1 1>/dev/null])
+-OVS_WAIT_UNTIL([cat p0.pcap | egrep "0x0050:  0000 0001 0a01 0164 f2ff 0000 0004 0a01" 2>&1 1>/dev/null])
+-OVS_WAIT_UNTIL([cat p0.pcap | egrep "0x0060:  0101" 2>&1 1>/dev/null])
++OVS_WAIT_UNTIL([cat p0.pcap | grep -E "0x0040:  0000 0001 0806 0001 0800 0604 0002 f2ff" 2>&1 1>/dev/null])
++OVS_WAIT_UNTIL([cat p0.pcap | grep -E "0x0050:  0000 0001 0a01 0164 f2ff 0000 0004 0a01" 2>&1 1>/dev/null])
++OVS_WAIT_UNTIL([cat p0.pcap | grep -E "0x0060:  0101" 2>&1 1>/dev/null])
+ 
+-AT_FAIL_IF([cat p0.pcap | egrep "IP6 fc00:100::100 > fc00:100::1: GREv0,.* length 114" 2>&1 1>/dev/null])
++AT_FAIL_IF([cat p0.pcap | grep -E "IP6 fc00:100::100 > fc00:100::1: GREv0,.* length 114" 2>&1 1>/dev/null])
+ 
+ ovs-ofctl -O OpenFlow13 packet-out br-underlay "in_port=1 packet=f2ff00000002f2ff0000000386dd60008531007a3c40fc000100000000000000000000000001fc0001000000000000000000000001002f00040104010100100088be000000061000407b00000007f2ff00000001f2ff0000000408004500005429b640004001fa8c0a0101010a01016408005c2c7526000118d3685b00000000e4aa020000000000101112131415161718191a1b1c1d1e1f202122232425262728292a2b2c2d2e2f3031323334353637 actions=normal"
+ 
+-OVS_WAIT_UNTIL([cat p0.pcap | egrep "IP6 fc00:100::100 > fc00:100::1: GREv0,.* length 114" 2>&1 1>/dev/null])
++OVS_WAIT_UNTIL([cat p0.pcap | grep -E "IP6 fc00:100::100 > fc00:100::1: GREv0,.* length 114" 2>&1 1>/dev/null])
+ 
+ OVS_TRAFFIC_VSWITCHD_STOP
  AT_CLEANUP
  
  AT_SETUP([datapath - ping over ip6erspan v2 tunnel by simulated packets])
@@ -10095,7 +63544,210 @@ index f22d86e466..36e10aa4a8 100644
  OVS_CHECK_MIN_KERNEL(3, 10)
  
  OVS_TRAFFIC_VSWITCHD_START()
-@@ -4100,15 +4164,15 @@ action=normal
+@@ -965,15 +1069,15 @@ NS_CHECK_EXEC([at_ns0], [ping6 -q -c 3 -i 0.3 -w 2 fc00:100::100 | FORMAT_PING],
+ dnl Okay, now send raw arp request and icmp echo request.
+ ovs-ofctl -O OpenFlow13 packet-out br-underlay "in_port=1 packet=f2ff00000002f2ff0000000386dd60008531003e2f40fc000100000000000000000000000001fc000100000000000000000000000100100088be0000000620000079af514f9900008070fffffffffffff2ff0000000408060001080006040001f2ff000000040a0101010000000000000a010164 actions=normal"
+ 
+-OVS_WAIT_UNTIL([cat p0.pcap | egrep "0x0040:  0004 f2ff 0000 0001 0806 0001 0800 0604" 2>&1 1>/dev/null])
+-OVS_WAIT_UNTIL([cat p0.pcap | egrep "0x0050:  0002 f2ff 0000 0001 0a01 0164 f2ff 0000" 2>&1 1>/dev/null])
+-OVS_WAIT_UNTIL([cat p0.pcap | egrep "0x0060:  0004 0a01 0101" 2>&1 1>/dev/null])
++OVS_WAIT_UNTIL([cat p0.pcap | grep -E "0x0040:  0004 f2ff 0000 0001 0806 0001 0800 0604" 2>&1 1>/dev/null])
++OVS_WAIT_UNTIL([cat p0.pcap | grep -E "0x0050:  0002 f2ff 0000 0001 0a01 0164 f2ff 0000" 2>&1 1>/dev/null])
++OVS_WAIT_UNTIL([cat p0.pcap | grep -E "0x0060:  0004 0a01 0101" 2>&1 1>/dev/null])
+ 
+-AT_FAIL_IF([cat p0.pcap | egrep "IP6 fc00:100::100 > fc00:100::1: GREv0, .* length 118" 2>&1 1>/dev/null])
++AT_FAIL_IF([cat p0.pcap | grep -E "IP6 fc00:100::100 > fc00:100::1: GREv0, .* length 118" 2>&1 1>/dev/null])
+ 
+ ovs-ofctl -O OpenFlow13 packet-out br-underlay "in_port=1 packet=f2ff00000002f2ff0000000386dd60008531007e3c40fc000100000000000000000000000001fc0001000000000000000000000001002f00040104010100100088be0000000720004079af514f9b00008070f2ff00000001f2ff00000004080045000054ffcb4000400124770a0101010a0101640800419e23ac000112d7685b000000004caf0c0000000000101112131415161718191a1b1c1d1e1f202122232425262728292a2b2c2d2e2f3031323334353637 actions=normal"
+ 
+-OVS_WAIT_UNTIL([cat p0.pcap | egrep "IP6 fc00:100::100 > fc00:100::1: GREv0, .* length 118" 2>&1 1>/dev/null])
++OVS_WAIT_UNTIL([cat p0.pcap | grep -E "IP6 fc00:100::100 > fc00:100::1: GREv0, .* length 118" 2>&1 1>/dev/null])
+ 
+ OVS_TRAFFIC_VSWITCHD_STOP
+ AT_CLEANUP
+@@ -1004,9 +1108,9 @@ NS_CHECK_EXEC([at_ns0], [ping -q -c 3 -i 0.3 -w 2 10.1.1.2 | FORMAT_PING], [0],
+ OVS_APP_EXIT_AND_WAIT([ovs-ofctl])
+ 
+ AT_CHECK([cat ofctl_monitor.log | STRIP_MONITOR_CSUM], [0], [dnl
+-icmp,vlan_tci=0x0000,dl_src=ae:c6:7e:54:8d:4d,dl_dst=50:54:00:00:00:0b,nw_src=10.1.1.2,nw_dst=192.168.4.4,nw_tos=0,nw_ecn=0,nw_ttl=64,icmp_type=0,icmp_code=0 icmp_csum: <skip>
+-icmp,vlan_tci=0x0000,dl_src=ae:c6:7e:54:8d:4d,dl_dst=50:54:00:00:00:0b,nw_src=10.1.1.2,nw_dst=192.168.4.4,nw_tos=0,nw_ecn=0,nw_ttl=64,icmp_type=0,icmp_code=0 icmp_csum: <skip>
+-icmp,vlan_tci=0x0000,dl_src=ae:c6:7e:54:8d:4d,dl_dst=50:54:00:00:00:0b,nw_src=10.1.1.2,nw_dst=192.168.4.4,nw_tos=0,nw_ecn=0,nw_ttl=64,icmp_type=0,icmp_code=0 icmp_csum: <skip>
++icmp,vlan_tci=0x0000,dl_src=ae:c6:7e:54:8d:4d,dl_dst=50:54:00:00:00:0b,nw_src=10.1.1.2,nw_dst=192.168.4.4,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,icmp_type=0,icmp_code=0 icmp_csum: <skip>
++icmp,vlan_tci=0x0000,dl_src=ae:c6:7e:54:8d:4d,dl_dst=50:54:00:00:00:0b,nw_src=10.1.1.2,nw_dst=192.168.4.4,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,icmp_type=0,icmp_code=0 icmp_csum: <skip>
++icmp,vlan_tci=0x0000,dl_src=ae:c6:7e:54:8d:4d,dl_dst=50:54:00:00:00:0b,nw_src=10.1.1.2,nw_dst=192.168.4.4,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,icmp_type=0,icmp_code=0 icmp_csum: <skip>
+ ])
+ 
+ OVS_TRAFFIC_VSWITCHD_STOP
+@@ -1765,10 +1869,10 @@ dnl p1(at_ns1) interface
+ NS_CHECK_EXEC([at_ns0], [$PYTHON3 $srcdir/sendpkt.py p0 36 b1 ee 7c 01 02 36 b1 ee 7c 01 03 08 00 45 00 00 54 03 44 40 00 40 01 21 61 0a 01 01 01 0a 01 01 02 08 00 ef ac 7c e4 00 03 5b 2c 1f 61 00 00 00 00 50 0b 02 00 00 00 00 00 10 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 31 32 33 34 35 36 37  > /dev/null])
+ 
+ dnl Check the expected mpls encapsulated packet on the egress interface
+-OVS_WAIT_UNTIL([cat p1.pcap | egrep "0x0000:  *0000 *0000 *0002 *0000 *0000 *0001 *8847 *0000" 2>&1 1>/dev/null])
+-OVS_WAIT_UNTIL([cat p1.pcap | egrep "0x0010:  *2140 *36b1 *ee7c *0102 *36b1 *ee7c *0103 *0800" 2>&1 1>/dev/null])
+-OVS_WAIT_UNTIL([cat p1.pcap | egrep "0x0020:  *4500 *0054 *0344 *4000 *4001 *2161 *0a01 *0101" 2>&1 1>/dev/null])
+-OVS_WAIT_UNTIL([cat p1.pcap | egrep "0x0030:  *0a01 *0102 *0800 *efac *7ce4 *0003 *5b2c *1f61" 2>&1 1>/dev/null])
++OVS_WAIT_UNTIL([cat p1.pcap | grep -E "0x0000:  *0000 *0000 *0002 *0000 *0000 *0001 *8847 *0000" 2>&1 1>/dev/null])
++OVS_WAIT_UNTIL([cat p1.pcap | grep -E "0x0010:  *2140 *36b1 *ee7c *0102 *36b1 *ee7c *0103 *0800" 2>&1 1>/dev/null])
++OVS_WAIT_UNTIL([cat p1.pcap | grep -E "0x0020:  *4500 *0054 *0344 *4000 *4001 *2161 *0a01 *0101" 2>&1 1>/dev/null])
++OVS_WAIT_UNTIL([cat p1.pcap | grep -E "0x0030:  *0a01 *0102 *0800 *efac *7ce4 *0003 *5b2c *1f61" 2>&1 1>/dev/null])
+ 
+ OVS_TRAFFIC_VSWITCHD_STOP
+ AT_CLEANUP
+@@ -1797,10 +1901,10 @@ dnl p1(at_ns1) interface
+ NS_CHECK_EXEC([at_ns0], [$PYTHON3 $srcdir/sendpkt.py p0 36 b1 ee 7c 01 02 36 b1 ee 7c 01 03 08 00 45 00 00 54 03 44 40 00 40 01 21 61 0a 01 01 01 0a 01 01 02 08 00 ef ac 7c e4 00 03 5b 2c 1f 61 00 00 00 00 50 0b 02 00 00 00 00 00 10 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 31 32 33 34 35 36 37  > /dev/null])
+ 
+ dnl Check the expected mpls encapsulated packet on the egress interface
+-OVS_WAIT_UNTIL([cat p1.pcap | egrep "0x0000:  *0000 *0000 *0002 *0000 *0000 *0001 *8847 *0000" 2>&1 1>/dev/null])
+-OVS_WAIT_UNTIL([cat p1.pcap | egrep "0x0010:  *2140 *36b1 *ee7c *0102 *36b1 *ee7c *0103 *0800" 2>&1 1>/dev/null])
+-OVS_WAIT_UNTIL([cat p1.pcap | egrep "0x0020:  *4500 *0054 *0344 *4000 *4001 *2161 *0a01 *0101" 2>&1 1>/dev/null])
+-OVS_WAIT_UNTIL([cat p1.pcap | egrep "0x0030:  *0a01 *0102 *0800 *efac *7ce4 *0003 *5b2c *1f61" 2>&1 1>/dev/null])
++OVS_WAIT_UNTIL([cat p1.pcap | grep -E "0x0000:  *0000 *0000 *0002 *0000 *0000 *0001 *8847 *0000" 2>&1 1>/dev/null])
++OVS_WAIT_UNTIL([cat p1.pcap | grep -E "0x0010:  *2140 *36b1 *ee7c *0102 *36b1 *ee7c *0103 *0800" 2>&1 1>/dev/null])
++OVS_WAIT_UNTIL([cat p1.pcap | grep -E "0x0020:  *4500 *0054 *0344 *4000 *4001 *2161 *0a01 *0101" 2>&1 1>/dev/null])
++OVS_WAIT_UNTIL([cat p1.pcap | grep -E "0x0030:  *0a01 *0102 *0800 *efac *7ce4 *0003 *5b2c *1f61" 2>&1 1>/dev/null])
+ 
+ OVS_TRAFFIC_VSWITCHD_STOP
+ AT_CLEANUP
+@@ -1830,10 +1934,10 @@ dnl p1(at_ns1) interface
+ NS_CHECK_EXEC([at_ns0], [$PYTHON3 $srcdir/sendpkt.py p0 36 b1 ee 7c 01 02 36 b1 ee 7c 01 03 08 00 45 00 00 54 03 44 40 00 40 01 21 61 0a 01 01 01 0a 01 01 02 08 00 ef ac 7c e4 00 03 5b 2c 1f 61 00 00 00 00 50 0b 02 00 00 00 00 00 10 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 31 32 33 34 35 36 37  > /dev/null])
+ 
+ dnl Check the expected mpls encapsulated packet on the egress interface
+-OVS_WAIT_UNTIL([cat p1.pcap | egrep "0x0000:  *0000 *0000 *0002 *0000 *0000 *0001 *8848 *0000" 2>&1 1>/dev/null])
+-OVS_WAIT_UNTIL([cat p1.pcap | egrep "0x0010:  *2140 *36b1 *ee7c *0102 *36b1 *ee7c *0103 *0800" 2>&1 1>/dev/null])
+-OVS_WAIT_UNTIL([cat p1.pcap | egrep "0x0020:  *4500 *0054 *0344 *4000 *4001 *2161 *0a01 *0101" 2>&1 1>/dev/null])
+-OVS_WAIT_UNTIL([cat p1.pcap | egrep "0x0030:  *0a01 *0102 *0800 *efac *7ce4 *0003 *5b2c *1f61" 2>&1 1>/dev/null])
++OVS_WAIT_UNTIL([cat p1.pcap | grep -E "0x0000:  *0000 *0000 *0002 *0000 *0000 *0001 *8848 *0000" 2>&1 1>/dev/null])
++OVS_WAIT_UNTIL([cat p1.pcap | grep -E "0x0010:  *2140 *36b1 *ee7c *0102 *36b1 *ee7c *0103 *0800" 2>&1 1>/dev/null])
++OVS_WAIT_UNTIL([cat p1.pcap | grep -E "0x0020:  *4500 *0054 *0344 *4000 *4001 *2161 *0a01 *0101" 2>&1 1>/dev/null])
++OVS_WAIT_UNTIL([cat p1.pcap | grep -E "0x0030:  *0a01 *0102 *0800 *efac *7ce4 *0003 *5b2c *1f61" 2>&1 1>/dev/null])
+ 
+ OVS_TRAFFIC_VSWITCHD_STOP
+ AT_CLEANUP
+@@ -1862,10 +1966,10 @@ dnl p1(at_ns1) interface
+ NS_CHECK_EXEC([at_ns0], [$PYTHON3 $srcdir/sendpkt.py p0 36 b1 ee 7c 01 02 36 b1 ee 7c 01 03 08 00 45 00 00 54 03 44 40 00 40 01 21 61 0a 01 01 01 0a 01 01 02 08 00 ef ac 7c e4 00 03 5b 2c 1f 61 00 00 00 00 50 0b 02 00 00 00 00 00 10 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 31 32 33 34 35 36 37  > /dev/null])
+ 
+ dnl Check the expected mpls encapsulated packet on the egress interface
+-OVS_WAIT_UNTIL([cat p1.pcap | egrep "0x0000:  *0000 *0000 *0002 *0000 *0000 *0001 *8848 *0000" 2>&1 1>/dev/null])
+-OVS_WAIT_UNTIL([cat p1.pcap | egrep "0x0010:  *2140 *36b1 *ee7c *0102 *36b1 *ee7c *0103 *0800" 2>&1 1>/dev/null])
+-OVS_WAIT_UNTIL([cat p1.pcap | egrep "0x0020:  *4500 *0054 *0344 *4000 *4001 *2161 *0a01 *0101" 2>&1 1>/dev/null])
+-OVS_WAIT_UNTIL([cat p1.pcap | egrep "0x0030:  *0a01 *0102 *0800 *efac *7ce4 *0003 *5b2c *1f61" 2>&1 1>/dev/null])
++OVS_WAIT_UNTIL([cat p1.pcap | grep -E "0x0000:  *0000 *0000 *0002 *0000 *0000 *0001 *8848 *0000" 2>&1 1>/dev/null])
++OVS_WAIT_UNTIL([cat p1.pcap | grep -E "0x0010:  *2140 *36b1 *ee7c *0102 *36b1 *ee7c *0103 *0800" 2>&1 1>/dev/null])
++OVS_WAIT_UNTIL([cat p1.pcap | grep -E "0x0020:  *4500 *0054 *0344 *4000 *4001 *2161 *0a01 *0101" 2>&1 1>/dev/null])
++OVS_WAIT_UNTIL([cat p1.pcap | grep -E "0x0030:  *0a01 *0102 *0800 *efac *7ce4 *0003 *5b2c *1f61" 2>&1 1>/dev/null])
+ 
+ OVS_TRAFFIC_VSWITCHD_STOP
+ AT_CLEANUP
+@@ -1896,13 +2000,13 @@ dnl p1(at_ns1) interface
+ NS_CHECK_EXEC([at_ns0], [$PYTHON3 $srcdir/sendpkt.py p0 00 00 00 00 00 02 00 00 00 00 00 01 88 47 00 00 21 40 36 b1 ee 7c 01 02 36 b1 ee 7c 01 03 08 00 45 00 00 54 03 44 40 00 40 01 21 61 0a 01 01 01 0a 01 01 02 08 00 ef ac 7c e4 00 03 5b 2c 1f 61 00 00 00 00 50 0b 02 00 00 00 00 00 10 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 31 32 33 34 35 36 37  > /dev/null])
+ 
+ dnl Check the expected decapsulated on the egress interface
+-OVS_WAIT_UNTIL([cat p1.pcap | egrep "0x0000:  *36b1 *ee7c *0102 *36b1 *ee7c *0103 *0800 *4500" 2>&1 1>/dev/null])
+-OVS_WAIT_UNTIL([cat p1.pcap | egrep "0x0010:  *0054 *0344 *4000 *4001 *2161 *0a01 *0101 *0a01" 2>&1 1>/dev/null])
+-OVS_WAIT_UNTIL([cat p1.pcap | egrep "0x0020:  *0102 *0800 *efac *7ce4 *0003 *5b2c *1f61 *0000" 2>&1 1>/dev/null])
+-OVS_WAIT_UNTIL([cat p1.pcap | egrep "0x0030:  *0000 *500b *0200 *0000 *0000 *1011 *1213 *1415" 2>&1 1>/dev/null])
+-OVS_WAIT_UNTIL([cat p1.pcap | egrep "0x0040:  *1617 *1819 *1a1b *1c1d *1e1f *2021 *2223 *2425" 2>&1 1>/dev/null])
+-OVS_WAIT_UNTIL([cat p1.pcap | egrep "0x0050:  *2627 *2829 *2a2b *2c2d *2e2f *3031 *3233 *3435" 2>&1 1>/dev/null])
+-OVS_WAIT_UNTIL([cat p1.pcap | egrep "0x0060:  *3637" 2>&1 1>/dev/null])
++OVS_WAIT_UNTIL([cat p1.pcap | grep -E "0x0000:  *36b1 *ee7c *0102 *36b1 *ee7c *0103 *0800 *4500" 2>&1 1>/dev/null])
++OVS_WAIT_UNTIL([cat p1.pcap | grep -E "0x0010:  *0054 *0344 *4000 *4001 *2161 *0a01 *0101 *0a01" 2>&1 1>/dev/null])
++OVS_WAIT_UNTIL([cat p1.pcap | grep -E "0x0020:  *0102 *0800 *efac *7ce4 *0003 *5b2c *1f61 *0000" 2>&1 1>/dev/null])
++OVS_WAIT_UNTIL([cat p1.pcap | grep -E "0x0030:  *0000 *500b *0200 *0000 *0000 *1011 *1213 *1415" 2>&1 1>/dev/null])
++OVS_WAIT_UNTIL([cat p1.pcap | grep -E "0x0040:  *1617 *1819 *1a1b *1c1d *1e1f *2021 *2223 *2425" 2>&1 1>/dev/null])
++OVS_WAIT_UNTIL([cat p1.pcap | grep -E "0x0050:  *2627 *2829 *2a2b *2c2d *2e2f *3031 *3233 *3435" 2>&1 1>/dev/null])
++OVS_WAIT_UNTIL([cat p1.pcap | grep -E "0x0060:  *3637" 2>&1 1>/dev/null])
+ 
+ 
+ OVS_TRAFFIC_VSWITCHD_STOP
+@@ -1933,13 +2037,13 @@ dnl p1(at_ns1) interface
+ NS_CHECK_EXEC([at_ns0], [$PYTHON3 $srcdir/sendpkt.py p0 00 00 00 00 00 02 00 00 00 00 00 01 88 47 00 00 21 40 36 b1 ee 7c 01 02 36 b1 ee 7c 01 03 08 00 45 00 00 54 03 44 40 00 40 01 21 61 0a 01 01 01 0a 01 01 02 08 00 ef ac 7c e4 00 03 5b 2c 1f 61 00 00 00 00 50 0b 02 00 00 00 00 00 10 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 31 32 33 34 35 36 37  > /dev/null])
+ 
+ dnl Check the expected decapsulated on the egress interface
+-OVS_WAIT_UNTIL([cat p1.pcap | egrep "0x0000:  *36b1 *ee7c *0102 *36b1 *ee7c *0103 *0800 *4500" 2>&1 1>/dev/null])
+-OVS_WAIT_UNTIL([cat p1.pcap | egrep "0x0010:  *0054 *0344 *4000 *4001 *2161 *0a01 *0101 *0a01" 2>&1 1>/dev/null])
+-OVS_WAIT_UNTIL([cat p1.pcap | egrep "0x0020:  *0102 *0800 *efac *7ce4 *0003 *5b2c *1f61 *0000" 2>&1 1>/dev/null])
+-OVS_WAIT_UNTIL([cat p1.pcap | egrep "0x0030:  *0000 *500b *0200 *0000 *0000 *1011 *1213 *1415" 2>&1 1>/dev/null])
+-OVS_WAIT_UNTIL([cat p1.pcap | egrep "0x0040:  *1617 *1819 *1a1b *1c1d *1e1f *2021 *2223 *2425" 2>&1 1>/dev/null])
+-OVS_WAIT_UNTIL([cat p1.pcap | egrep "0x0050:  *2627 *2829 *2a2b *2c2d *2e2f *3031 *3233 *3435" 2>&1 1>/dev/null])
+-OVS_WAIT_UNTIL([cat p1.pcap | egrep "0x0060:  *3637" 2>&1 1>/dev/null])
++OVS_WAIT_UNTIL([cat p1.pcap | grep -E "0x0000:  *36b1 *ee7c *0102 *36b1 *ee7c *0103 *0800 *4500" 2>&1 1>/dev/null])
++OVS_WAIT_UNTIL([cat p1.pcap | grep -E "0x0010:  *0054 *0344 *4000 *4001 *2161 *0a01 *0101 *0a01" 2>&1 1>/dev/null])
++OVS_WAIT_UNTIL([cat p1.pcap | grep -E "0x0020:  *0102 *0800 *efac *7ce4 *0003 *5b2c *1f61 *0000" 2>&1 1>/dev/null])
++OVS_WAIT_UNTIL([cat p1.pcap | grep -E "0x0030:  *0000 *500b *0200 *0000 *0000 *1011 *1213 *1415" 2>&1 1>/dev/null])
++OVS_WAIT_UNTIL([cat p1.pcap | grep -E "0x0040:  *1617 *1819 *1a1b *1c1d *1e1f *2021 *2223 *2425" 2>&1 1>/dev/null])
++OVS_WAIT_UNTIL([cat p1.pcap | grep -E "0x0050:  *2627 *2829 *2a2b *2c2d *2e2f *3031 *3233 *3435" 2>&1 1>/dev/null])
++OVS_WAIT_UNTIL([cat p1.pcap | grep -E "0x0060:  *3637" 2>&1 1>/dev/null])
+ 
+ 
+ OVS_TRAFFIC_VSWITCHD_STOP
+@@ -1985,9 +2089,9 @@ OVS_APP_EXIT_AND_WAIT([ovs-ofctl])
+ dnl Check this output. We only see the latter two packets, not the first.
+ AT_CHECK([cat ofctl_monitor.log], [0], [dnl
+ NXT_PACKET_IN2 (xid=0x0): total_len=42 in_port=1 (via action) data_len=42 (unbuffered)
+-udp,vlan_tci=0x0000,dl_src=50:54:00:00:00:09,dl_dst=50:54:00:00:00:0a,nw_src=10.1.1.1,nw_dst=10.1.1.2,nw_tos=0,nw_ecn=0,nw_ttl=0,tp_src=1,tp_dst=2 udp_csum:0
++udp,vlan_tci=0x0000,dl_src=50:54:00:00:00:09,dl_dst=50:54:00:00:00:0a,nw_src=10.1.1.1,nw_dst=10.1.1.2,nw_tos=0,nw_ecn=0,nw_ttl=0,nw_frag=no,tp_src=1,tp_dst=2 udp_csum:0
+ NXT_PACKET_IN2 (xid=0x0): cookie=0x0 total_len=42 ct_state=est|rpl|trk,ct_nw_src=10.1.1.1,ct_nw_dst=10.1.1.2,ct_nw_proto=17,ct_tp_src=1,ct_tp_dst=2,ip,in_port=2 (via action) data_len=42 (unbuffered)
+-udp,vlan_tci=0x0000,dl_src=50:54:00:00:00:09,dl_dst=50:54:00:00:00:0a,nw_src=10.1.1.2,nw_dst=10.1.1.1,nw_tos=0,nw_ecn=0,nw_ttl=0,tp_src=2,tp_dst=1 udp_csum:0
++udp,vlan_tci=0x0000,dl_src=50:54:00:00:00:09,dl_dst=50:54:00:00:00:0a,nw_src=10.1.1.2,nw_dst=10.1.1.1,nw_tos=0,nw_ecn=0,nw_ttl=0,nw_frag=no,tp_src=2,tp_dst=1 udp_csum:0
+ ])
+ 
+ OVS_TRAFFIC_VSWITCHD_STOP
+@@ -2033,9 +2137,9 @@ OVS_APP_EXIT_AND_WAIT([ovs-ofctl])
+ dnl Check this output. We only see the latter two packets, not the first.
+ AT_CHECK([cat ofctl_monitor.log], [0], [dnl
+ NXT_PACKET_IN2 (xid=0x0): cookie=0x0 total_len=42 in_port=1 (via action) data_len=42 (unbuffered)
+-udp,vlan_tci=0x0000,dl_src=50:54:00:00:00:09,dl_dst=50:54:00:00:00:0a,nw_src=10.1.1.1,nw_dst=10.1.1.2,nw_tos=0,nw_ecn=0,nw_ttl=0,tp_src=1,tp_dst=2 udp_csum:0
++udp,vlan_tci=0x0000,dl_src=50:54:00:00:00:09,dl_dst=50:54:00:00:00:0a,nw_src=10.1.1.1,nw_dst=10.1.1.2,nw_tos=0,nw_ecn=0,nw_ttl=0,nw_frag=no,tp_src=1,tp_dst=2 udp_csum:0
+ NXT_PACKET_IN2 (xid=0x0): table_id=1 cookie=0x0 total_len=42 ct_state=new|trk,ct_nw_src=10.1.1.2,ct_nw_dst=10.1.1.1,ct_nw_proto=17,ct_tp_src=2,ct_tp_dst=1,ip,in_port=2 (via action) data_len=42 (unbuffered)
+-udp,vlan_tci=0x0000,dl_src=50:54:00:00:00:09,dl_dst=50:54:00:00:00:0a,nw_src=10.1.1.2,nw_dst=10.1.1.1,nw_tos=0,nw_ecn=0,nw_ttl=0,tp_src=2,tp_dst=1 udp_csum:0
++udp,vlan_tci=0x0000,dl_src=50:54:00:00:00:09,dl_dst=50:54:00:00:00:0a,nw_src=10.1.1.2,nw_dst=10.1.1.1,nw_tos=0,nw_ecn=0,nw_ttl=0,nw_frag=no,tp_src=2,tp_dst=1 udp_csum:0
+ ])
+ 
+ dnl
+@@ -2980,6 +3084,15 @@ NXST_FLOW reply:
+  table=1, priority=100,ct_state=+est+trk,in_port=1 actions=output:2
+ ])
+ 
++dnl Send a 3rd UDP packet on port 1
++AT_CHECK([ovs-ofctl -O OpenFlow13 packet-out br0 "in_port=1 packet=50540000000a50540000000908004500001c000000000011a4cd0a0101010a0101020001000200080000 actions=resubmit(,0)"])
++
++dnl There still should not be any packet that matches the established ct_state.
++AT_CHECK([ovs-ofctl dump-flows br0 "table=1 in_port=1,ct_state=+trk+est" | ofctl_strip], [0], [dnl
++NXST_FLOW reply:
++ table=1, priority=100,ct_state=+est+trk,in_port=1 actions=output:2
++])
++
+ OVS_TRAFFIC_VSWITCHD_STOP
+ AT_CLEANUP
+ 
+@@ -3140,11 +3253,11 @@ OVS_APP_EXIT_AND_WAIT([ovs-ofctl])
+ dnl Check this output. We only see the latter two packets, not the first.
+ AT_CHECK([cat ofctl_monitor.log | grep -v ff02 | grep -v fe80 | grep -v no_match], [0], [dnl
+ NXT_PACKET_IN2 (xid=0x0): table_id=1 cookie=0x0 total_len=75 ct_state=inv|trk,ip,in_port=2 (via action) data_len=75 (unbuffered)
+-icmp,vlan_tci=0x0000,dl_src=c6:f5:4e:cb:72:db,dl_dst=f6:4c:47:35:28:c9,nw_src=172.16.0.4,nw_dst=172.16.0.3,nw_tos=192,nw_ecn=0,nw_ttl=64,icmp_type=3,icmp_code=3 icmp_csum:da49
++icmp,vlan_tci=0x0000,dl_src=c6:f5:4e:cb:72:db,dl_dst=f6:4c:47:35:28:c9,nw_src=172.16.0.4,nw_dst=172.16.0.3,nw_tos=192,nw_ecn=0,nw_ttl=64,nw_frag=no,icmp_type=3,icmp_code=3 icmp_csum:da49
+ NXT_PACKET_IN2 (xid=0x0): table_id=1 cookie=0x0 total_len=47 ct_state=new|trk,ct_nw_src=172.16.0.1,ct_nw_dst=172.16.0.2,ct_nw_proto=17,ct_tp_src=41614,ct_tp_dst=5555,ip,in_port=1 (via action) data_len=47 (unbuffered)
+-udp,vlan_tci=0x0000,dl_src=e6:4c:47:35:28:c9,dl_dst=c6:f9:4e:cb:72:db,nw_src=172.16.0.1,nw_dst=172.16.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,tp_src=41614,tp_dst=5555 udp_csum:2096
++udp,vlan_tci=0x0000,dl_src=e6:4c:47:35:28:c9,dl_dst=c6:f9:4e:cb:72:db,nw_src=172.16.0.1,nw_dst=172.16.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,tp_src=41614,tp_dst=5555 udp_csum:2096
+ NXT_PACKET_IN2 (xid=0x0): table_id=1 cookie=0x0 total_len=75 ct_state=rel|rpl|trk,ct_nw_src=172.16.0.1,ct_nw_dst=172.16.0.2,ct_nw_proto=17,ct_tp_src=41614,ct_tp_dst=5555,ip,in_port=2 (via action) data_len=75 (unbuffered)
+-icmp,vlan_tci=0x0000,dl_src=c6:f9:4e:cb:72:db,dl_dst=e6:4c:47:35:28:c9,nw_src=172.16.0.3,nw_dst=172.16.0.1,nw_tos=192,nw_ecn=0,nw_ttl=64,icmp_type=3,icmp_code=3 icmp_csum:553f
++icmp,vlan_tci=0x0000,dl_src=c6:f9:4e:cb:72:db,dl_dst=e6:4c:47:35:28:c9,nw_src=172.16.0.3,nw_dst=172.16.0.1,nw_tos=192,nw_ecn=0,nw_ttl=64,nw_frag=no,icmp_type=3,icmp_code=3 icmp_csum:553f
+ ])
+ 
+ AT_CHECK([ovs-appctl dpctl/dump-conntrack | FORMAT_CT(172.16.0.1)], [0], [dnl
+@@ -3345,6 +3458,11 @@ AT_CHECK([ovs-ofctl bundle br0 bundle.txt])
+ AT_CHECK([ovs-appctl dpctl/dump-conntrack | FORMAT_CT(10.1.1.2)], [0], [dnl
+ ])
+ 
++dnl Send the second fragment in order to avoid keeping the first fragment
++dnl in the queue until the expiration occurs. Fragments already queued, if resent,
++dnl may lead to failures on the kernel datapath.
++AT_CHECK([ovs-ofctl -O OpenFlow13 packet-out br0 "in_port=1, packet=50540000000a505400000009080045000030000100320011a4860a0101010a01010200010002000800000010203040506070809000010203040506070809, actions=ct(commit)"])
++
+ OVS_TRAFFIC_VSWITCHD_STOP
+ AT_CLEANUP
+ 
+@@ -4100,15 +4218,15 @@ action=normal
  
  AT_CHECK([ovs-ofctl --bundle add-flows br0 flows.txt])
  
@@ -10114,7 +63766,25 @@ index f22d86e466..36e10aa4a8 100644
  "1616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161610a, actions=ct(table=1)"])
  
  AT_CHECK([ovs-appctl dpctl/dump-flows | head -2 | tail -1 | grep -q -e ["]udp[(]src=5001["]])
-@@ -6454,7 +6518,7 @@ on_exit 'ovs-appctl revalidator/purge'
+@@ -5384,7 +5502,7 @@ AT_CHECK([ovs-appctl dpctl/dump-conntrack | FORMAT_CT(10.1.1.2) | sed -e 's/dst=
+ udp,orig=(src=10.1.1.1,dst=10.1.1.2,sport=<cleared>,dport=<cleared>),reply=(src=10.1.1.2,dst=10.1.1.2XX,sport=<cleared>,dport=<cleared>),mark=1
+ ])
+ 
+-AT_CHECK([tcpdump -v "icmp" -r p0.pcap 2>/dev/null | egrep 'wrong|bad'], [1], [ignore-nolog])
++AT_CHECK([tcpdump -v "icmp" -r p0.pcap 2>/dev/null | grep -E 'wrong|bad'], [1], [ignore-nolog])
+ 
+ OVS_TRAFFIC_VSWITCHD_STOP
+ AT_CLEANUP
+@@ -6134,7 +6252,7 @@ sleep 1
+ dnl UDP packets from ns0->ns1 should solicit "destination unreachable" response.
+ NS_CHECK_EXEC([at_ns0], [bash -c "echo a | nc -6 $NC_EOF_OPT -u fc00::2 1"])
+ 
+-AT_CHECK([tcpdump -v "icmp6" -r p0.pcap 2>/dev/null | egrep 'wrong|bad'], [1], [ignore-nolog])
++AT_CHECK([tcpdump -v "icmp6" -r p0.pcap 2>/dev/null | grep -E 'wrong|bad'], [1], [ignore-nolog])
+ 
+ AT_CHECK([ovs-appctl dpctl/dump-conntrack | FORMAT_CT(fc00::2)], [0], [dnl
+ udp,orig=(src=fc00::1,dst=fc00::2,sport=<cleared>,dport=<cleared>),reply=(src=fc00::2,dst=fc00::240,sport=<cleared>,dport=<cleared>)
+@@ -6454,7 +6572,7 @@ on_exit 'ovs-appctl revalidator/purge'
  on_exit 'ovs-appctl dpif/dump-flows br0'
  
  dnl Should work with the virtual IP address through NAT
@@ -10123,7 +63793,7 @@ index f22d86e466..36e10aa4a8 100644
      echo Request $i
      NS_CHECK_EXEC([at_ns1], [wget 10.1.1.64 -t 5 -T 1 --retry-connrefused -v -o wget$i.log])
  done
-@@ -6743,6 +6807,132 @@ AT_CHECK([ovs-ofctl dump-flows br0 | grep table=2, | OFPROTO_CLEAR_DURATION_IDLE
+@@ -6743,6 +6861,132 @@ AT_CHECK([ovs-ofctl dump-flows br0 | grep table=2, | OFPROTO_CLEAR_DURATION_IDLE
  OVS_TRAFFIC_VSWITCHD_STOP
  AT_CLEANUP
  
@@ -10256,6 +63926,95 @@ index f22d86e466..36e10aa4a8 100644
  AT_BANNER([802.1ad])
  
  AT_SETUP([802.1ad - vlan_limit])
+@@ -7007,12 +7251,12 @@ dnl p1(at_ns1) interface
+ NS_CHECK_EXEC([at_ns0], [$PYTHON3 $srcdir/sendpkt.py p0 f2 00 00 00 00 02 f2 00 00 00 00 01 08 00 45 00 00 28 00 01 00 00 40 06 b0 13 c0 a8 00 0a 0a 00 00 0a 04 00 08 00 00 00 00 c8 00 00 00 00 50 02 20 00 b8 5e 00 00 > /dev/null])
+ 
+ dnl Check the expected nsh encapsulated packet on the egress interface
+-OVS_WAIT_UNTIL([cat p1.pcap | egrep "0x0000: *f2ff *0000 *0002 *f2ff *0000 *0001 *894f *0fc6" 2>&1 1>/dev/null])
+-OVS_WAIT_UNTIL([cat p1.pcap | egrep "0x0010: *0103 *0012 *34ff *1122 *3344 *0000 *0000 *0000" 2>&1 1>/dev/null])
+-OVS_WAIT_UNTIL([cat p1.pcap | egrep "0x0020: *0000 *0000 *0000 *f200 *0000 *0002 *f200 *0000" 2>&1 1>/dev/null])
+-OVS_WAIT_UNTIL([cat p1.pcap | egrep "0x0030: *0001 *0800 *4500 *0028 *0001 *0000 *4006 *b013" 2>&1 1>/dev/null])
+-OVS_WAIT_UNTIL([cat p1.pcap | egrep "0x0040: *c0a8 *000a *0a00 *000a *0400 *0800 *0000 *00c8" 2>&1 1>/dev/null])
+-OVS_WAIT_UNTIL([cat p1.pcap | egrep "0x0050: *0000 *0000 *5002 *2000 *b85e *0000" 2>&1 1>/dev/null])
++OVS_WAIT_UNTIL([cat p1.pcap | grep -E "0x0000: *f2ff *0000 *0002 *f2ff *0000 *0001 *894f *0fc6" 2>&1 1>/dev/null])
++OVS_WAIT_UNTIL([cat p1.pcap | grep -E "0x0010: *0103 *0012 *34ff *1122 *3344 *0000 *0000 *0000" 2>&1 1>/dev/null])
++OVS_WAIT_UNTIL([cat p1.pcap | grep -E "0x0020: *0000 *0000 *0000 *f200 *0000 *0002 *f200 *0000" 2>&1 1>/dev/null])
++OVS_WAIT_UNTIL([cat p1.pcap | grep -E "0x0030: *0001 *0800 *4500 *0028 *0001 *0000 *4006 *b013" 2>&1 1>/dev/null])
++OVS_WAIT_UNTIL([cat p1.pcap | grep -E "0x0040: *c0a8 *000a *0a00 *000a *0400 *0800 *0000 *00c8" 2>&1 1>/dev/null])
++OVS_WAIT_UNTIL([cat p1.pcap | grep -E "0x0050: *0000 *0000 *5002 *2000 *b85e *0000" 2>&1 1>/dev/null])
+ 
+ OVS_TRAFFIC_VSWITCHD_STOP
+ AT_CLEANUP
+@@ -7039,10 +7283,10 @@ dnl p1(at_ns1) interface
+ NS_CHECK_EXEC([at_ns0], [$PYTHON3 $srcdir/sendpkt.py p0 f2 ff 00 00 00 02 f2 ff 00 00 00 01 89 4f 02 06 01 03 00 00 64 03 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f 10 f2 00 00 00 00 02 f2 00 00 00 00 01 08 00 45 00 00 28 00 01 00 00 40 06 b0 13 c0 a8 00 0a 0a 00 00 0a 04 00 08 00 00 00 00 c8 00 00 00 00 50 02 20 00 b8 5e 00 00 > /dev/null])
+ 
+ dnl Check the expected de-capsulated TCP packet on the egress interface
+-OVS_WAIT_UNTIL([cat p1.pcap | egrep "0x0000: *f200 *0000 *0002 *f200 *0000 *0001 *0800 *4500" 2>&1 1>/dev/null])
+-OVS_WAIT_UNTIL([cat p1.pcap | egrep "0x0010: *0028 *0001 *0000 *4006 *b013 *c0a8 *000a *0a00" 2>&1 1>/dev/null])
+-OVS_WAIT_UNTIL([cat p1.pcap | egrep "0x0020: *000a *0400 *0800 *0000 *00c8 *0000 *0000 *5002" 2>&1 1>/dev/null])
+-OVS_WAIT_UNTIL([cat p1.pcap | egrep "0x0030: *2000 *b85e *0000" 2>&1 1>/dev/null])
++OVS_WAIT_UNTIL([cat p1.pcap | grep -E "0x0000: *f200 *0000 *0002 *f200 *0000 *0001 *0800 *4500" 2>&1 1>/dev/null])
++OVS_WAIT_UNTIL([cat p1.pcap | grep -E "0x0010: *0028 *0001 *0000 *4006 *b013 *c0a8 *000a *0a00" 2>&1 1>/dev/null])
++OVS_WAIT_UNTIL([cat p1.pcap | grep -E "0x0020: *000a *0400 *0800 *0000 *00c8 *0000 *0000 *5002" 2>&1 1>/dev/null])
++OVS_WAIT_UNTIL([cat p1.pcap | grep -E "0x0030: *2000 *b85e *0000" 2>&1 1>/dev/null])
+ 
+ OVS_TRAFFIC_VSWITCHD_STOP
+ AT_CLEANUP
+@@ -7072,12 +7316,12 @@ dnl p1(at_ns1) interface
+ NS_CHECK_EXEC([at_ns0], [$PYTHON3 $srcdir/sendpkt.py p0 f2 ff 00 00 00 02 f2 ff 00 00 00 01 89 4f 02 06 01 03 00 01 00 03 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f 10 f2 00 00 00 00 02 f2 00 00 00 00 01 08 00 45 00 00 28 00 01 00 00 40 06 b0 13 c0 a8 00 0a 0a 00 00 0a 04 00 08 00 00 00 00 c8 00 00 00 00 50 02 20 00 b8 5e 00 00 > /dev/null])
+ 
+ dnl Check the expected NSH packet with new fields in the header
+-OVS_WAIT_UNTIL([cat p1.pcap | egrep "0x0000: *f2ff *0000 *0002 *f2ff *0000* 0001 *894f *01c6" 2>&1 1>/dev/null])
+-OVS_WAIT_UNTIL([cat p1.pcap | egrep "0x0010: *0103 *0001 *0104 *100f *0e0d *0c0b *0a09 *0807" 2>&1 1>/dev/null])
+-OVS_WAIT_UNTIL([cat p1.pcap | egrep "0x0020: *0605 *0403 *0201 *f200 *0000 *0002 *f200 *0000" 2>&1 1>/dev/null])
+-OVS_WAIT_UNTIL([cat p1.pcap | egrep "0x0030: *0001 *0800 *4500 *0028 *0001 *0000 *4006 *b013" 2>&1 1>/dev/null])
+-OVS_WAIT_UNTIL([cat p1.pcap | egrep "0x0040: *c0a8 *000a *0a00 *000a *0400 *0800 *0000 *00c8" 2>&1 1>/dev/null])
+-OVS_WAIT_UNTIL([cat p1.pcap | egrep "0x0050: *0000 *0000 *5002 *2000 *b85e *0000" 2>&1 1>/dev/null])
++OVS_WAIT_UNTIL([cat p1.pcap | grep -E "0x0000: *f2ff *0000 *0002 *f2ff *0000* 0001 *894f *01c6" 2>&1 1>/dev/null])
++OVS_WAIT_UNTIL([cat p1.pcap | grep -E "0x0010: *0103 *0001 *0104 *100f *0e0d *0c0b *0a09 *0807" 2>&1 1>/dev/null])
++OVS_WAIT_UNTIL([cat p1.pcap | grep -E "0x0020: *0605 *0403 *0201 *f200 *0000 *0002 *f200 *0000" 2>&1 1>/dev/null])
++OVS_WAIT_UNTIL([cat p1.pcap | grep -E "0x0030: *0001 *0800 *4500 *0028 *0001 *0000 *4006 *b013" 2>&1 1>/dev/null])
++OVS_WAIT_UNTIL([cat p1.pcap | grep -E "0x0040: *c0a8 *000a *0a00 *000a *0400 *0800 *0000 *00c8" 2>&1 1>/dev/null])
++OVS_WAIT_UNTIL([cat p1.pcap | grep -E "0x0050: *0000 *0000 *5002 *2000 *b85e *0000" 2>&1 1>/dev/null])
+ 
+ OVS_TRAFFIC_VSWITCHD_STOP
+ AT_CLEANUP
+@@ -7106,23 +7350,23 @@ dnl First send packet from at_ns0 --> OVS with SPI=0x100 and SI=2
+ NS_CHECK_EXEC([at_ns0], [$PYTHON3 $srcdir/sendpkt.py p0 f2 ff 00 00 00 02 f2 ff 00 00 00 01 89 4f 02 06 01 03 00 01 00 02 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f 10 f2 00 00 00 00 02 f2 00 00 00 00 01 08 00 45 00 00 28 00 01 00 00 40 06 b0 13 c0 a8 00 0a 0a 00 00 0a 04 00 08 00 00 00 00 c8 00 00 00 00 50 02 20 00 b8 5e 00 00 > /dev/null])
+ 
+ dnl Check for the above packet on p1 interface
+-OVS_WAIT_UNTIL([cat p1.pcap | egrep "0x0000: *f2ff *0000 *0002 *f2ff *0000 *0001 *894f *0206" 2>&1 1>/dev/null])
+-OVS_WAIT_UNTIL([cat p1.pcap | egrep "0x0010: *0103 *0001 *0002 *0102 *0304 *0506 *0708 *090a" 2>&1 1>/dev/null])
+-OVS_WAIT_UNTIL([cat p1.pcap | egrep "0x0020: *0b0c *0d0e *0f10 *f200 *0000 *0002 *f200 *0000" 2>&1 1>/dev/null])
+-OVS_WAIT_UNTIL([cat p1.pcap | egrep "0x0030: *0001 *0800 *4500 *0028 *0001 *0000 *4006 *b013" 2>&1 1>/dev/null])
+-OVS_WAIT_UNTIL([cat p1.pcap | egrep "0x0040: *c0a8 *000a *0a00 *000a *0400 *0800 *0000 *00c8" 2>&1 1>/dev/null])
+-OVS_WAIT_UNTIL([cat p1.pcap | egrep "0x0050: *0000 *0000 *5002 *2000 *b85e *0000" 2>&1 1>/dev/null])
++OVS_WAIT_UNTIL([cat p1.pcap | grep -E "0x0000: *f2ff *0000 *0002 *f2ff *0000 *0001 *894f *0206" 2>&1 1>/dev/null])
++OVS_WAIT_UNTIL([cat p1.pcap | grep -E "0x0010: *0103 *0001 *0002 *0102 *0304 *0506 *0708 *090a" 2>&1 1>/dev/null])
++OVS_WAIT_UNTIL([cat p1.pcap | grep -E "0x0020: *0b0c *0d0e *0f10 *f200 *0000 *0002 *f200 *0000" 2>&1 1>/dev/null])
++OVS_WAIT_UNTIL([cat p1.pcap | grep -E "0x0030: *0001 *0800 *4500 *0028 *0001 *0000 *4006 *b013" 2>&1 1>/dev/null])
++OVS_WAIT_UNTIL([cat p1.pcap | grep -E "0x0040: *c0a8 *000a *0a00 *000a *0400 *0800 *0000 *00c8" 2>&1 1>/dev/null])
++OVS_WAIT_UNTIL([cat p1.pcap | grep -E "0x0050: *0000 *0000 *5002 *2000 *b85e *0000" 2>&1 1>/dev/null])
+ 
+ dnl Send the second packet from at_ns1 --> OVS with SPI=0x100 and SI=1
+ NS_CHECK_EXEC([at_ns1], [$PYTHON3 $srcdir/sendpkt.py p1 f2 ff 00 00 00 02 f2 ff 00 00 00 01 89 4f 01 c6 01 03 00 01 00 01 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f 10 f2 00 00 00 00 02 f2 00 00 00 00 01 08 00 45 00 00 28 00 01 00 00 40 06 b0 13 c0 a8 00 0a 0a 00 00 0a 04 00 08 00 00 00 00 c8 00 00 00 00 50 02 20 00 b8 5e 00 00 > /dev/null])
+ 
+ dnl Check for the above packet on p2 interface
+-OVS_WAIT_UNTIL([cat p2.pcap | egrep "0x0000: *f2ff *0000 *0002 *f2ff *0000 *0001 *894f *01c6" 2>&1 1>/dev/null])
+-OVS_WAIT_UNTIL([cat p2.pcap | egrep "0x0010: *0103 *0001 *0001 *0102 *0304 *0506 *0708 *090a" 2>&1 1>/dev/null])
+-OVS_WAIT_UNTIL([cat p2.pcap | egrep "0x0020: *0b0c *0d0e *0f10 *f200 *0000 *0002 *f200 *0000" 2>&1 1>/dev/null])
+-OVS_WAIT_UNTIL([cat p2.pcap | egrep "0x0030: *0001 *0800 *4500 *0028 *0001 *0000 *4006 *b013" 2>&1 1>/dev/null])
+-OVS_WAIT_UNTIL([cat p2.pcap | egrep "0x0040: *c0a8 *000a *0a00 *000a *0400 *0800 *0000 *00c8" 2>&1 1>/dev/null])
+-OVS_WAIT_UNTIL([cat p2.pcap | egrep "0x0050: *0000 *0000 *5002 *2000 *b85e *0000" 2>&1 1>/dev/null])
++OVS_WAIT_UNTIL([cat p2.pcap | grep -E "0x0000: *f2ff *0000 *0002 *f2ff *0000 *0001 *894f *01c6" 2>&1 1>/dev/null])
++OVS_WAIT_UNTIL([cat p2.pcap | grep -E "0x0010: *0103 *0001 *0001 *0102 *0304 *0506 *0708 *090a" 2>&1 1>/dev/null])
++OVS_WAIT_UNTIL([cat p2.pcap | grep -E "0x0020: *0b0c *0d0e *0f10 *f200 *0000 *0002 *f200 *0000" 2>&1 1>/dev/null])
++OVS_WAIT_UNTIL([cat p2.pcap | grep -E "0x0030: *0001 *0800 *4500 *0028 *0001 *0000 *4006 *b013" 2>&1 1>/dev/null])
++OVS_WAIT_UNTIL([cat p2.pcap | grep -E "0x0040: *c0a8 *000a *0a00 *000a *0400 *0800 *0000 *00c8" 2>&1 1>/dev/null])
++OVS_WAIT_UNTIL([cat p2.pcap | grep -E "0x0050: *0000 *0000 *5002 *2000 *b85e *0000" 2>&1 1>/dev/null])
+ 
+ OVS_TRAFFIC_VSWITCHD_STOP
+ AT_CLEANUP
 diff --git a/tests/system-tso-macros.at b/tests/system-tso-macros.at
 index 406334f3e0..1a80047619 100644
 --- a/tests/system-tso-macros.at
@@ -10266,6 +64025,22 @@ index 406334f3e0..1a80047619 100644
  )
 +
 +m4_define([CHECK_SYSTEM_TSO], [])
+diff --git a/tests/system-userspace-macros.at b/tests/system-userspace-macros.at
+index f639ba53a2..da3a4caca9 100644
+--- a/tests/system-userspace-macros.at
++++ b/tests/system-userspace-macros.at
+@@ -299,6 +299,11 @@ m4_define([OVS_CHECK_KERNEL_EXCL],
+     AT_SKIP_IF([:])
+ ])
+ 
++# CHECK_LATER_IPV6_FRAGMENTS()
++#
++# Userspace is parsing later IPv6 fragments correctly.
++m4_define([CHECK_LATER_IPV6_FRAGMENTS], [])
++
+ # VSCTL_ADD_DATAPATH_TABLE()
+ #
+ # Create datapath table "netdev" for userspace tests in ovsdb
 diff --git a/tests/test-cmap.c b/tests/test-cmap.c
 index 0705475606..588a5dea63 100644
 --- a/tests/test-cmap.c
@@ -10456,7 +64231,7 @@ index 9259b0b3fc..e50c7c3807 100644
          hmap_destroy(&hmap);
      }
 diff --git a/tests/test-list.c b/tests/test-list.c
-index 6f1fb059bc..2c6c444488 100644
+index 6f1fb059bc..ac82f2048e 100644
 --- a/tests/test-list.c
 +++ b/tests/test-list.c
 @@ -61,7 +61,7 @@ check_list(struct ovs_list *list, const int values[], size_t n)
@@ -10477,7 +64252,16 @@ index 6f1fb059bc..2c6c444488 100644
      assert(i == n);
  
      assert(ovs_list_is_empty(list) == !n);
-@@ -135,6 +135,13 @@ test_list_for_each_safe(void)
+@@ -106,6 +106,8 @@ test_list_construction(void)
+         int values[MAX_ELEMS];
+         struct ovs_list list;
+ 
++        memset(elements, 0, sizeof elements);
++        memset(values, 0, sizeof values);
+         make_list(&list, elements, values, n);
+         check_list(&list, values, n);
+     }
+@@ -135,6 +137,13 @@ test_list_for_each_safe(void)
              values_idx = 0;
              n_remaining = n;
              LIST_FOR_EACH_SAFE (e, next, node, &list) {
@@ -10491,7 +64275,7 @@ index 6f1fb059bc..2c6c444488 100644
                  assert(i < n);
                  if (pattern & (1ul << i)) {
                      ovs_list_remove(&e->node);
-@@ -148,7 +155,8 @@ test_list_for_each_safe(void)
+@@ -148,7 +157,8 @@ test_list_for_each_safe(void)
                  i++;
              }
              assert(i == n);
@@ -10501,7 +64285,7 @@ index 6f1fb059bc..2c6c444488 100644
  
              for (i = 0; i < n; i++) {
                  if (pattern & (1ul << i)) {
-@@ -156,6 +164,35 @@ test_list_for_each_safe(void)
+@@ -156,6 +166,35 @@ test_list_for_each_safe(void)
                  }
              }
              assert(n == n_remaining);
@@ -10537,6 +64321,264 @@ index 6f1fb059bc..2c6c444488 100644
          }
      }
  }
+diff --git a/tests/test-ovsdb.c b/tests/test-ovsdb.c
+index ca4e87b811..3194f50f2d 100644
+--- a/tests/test-ovsdb.c
++++ b/tests/test-ovsdb.c
+@@ -294,11 +294,24 @@ print_and_free_ovsdb_error(struct ovsdb_error *error)
+     free(string);
+ }
+ 
++static struct json **json_to_destroy;
++
++static void
++destroy_on_ovsdb_error(struct json **json)
++{
++    json_to_destroy = json;
++}
++
+ static void
+ check_ovsdb_error(struct ovsdb_error *error)
+ {
+     if (error) {
+         char *s = ovsdb_error_to_string_free(error);
++
++        if (json_to_destroy) {
++            json_destroy(*json_to_destroy);
++            json_to_destroy = NULL;
++        }
+         ovs_fatal(0, "%s", s);
+     }
+ }
+@@ -481,6 +494,8 @@ do_diff_data(struct ovs_cmdl_context *ctx)
+     struct json *json;
+     struct ovsdb_datum new, old, diff, reincarnation;
+ 
++    destroy_on_ovsdb_error(&json);
++
+     json = unbox_json(parse_json(ctx->argv[1]));
+     check_ovsdb_error(ovsdb_type_from_json(&type, json));
+     json_destroy(json);
+@@ -556,6 +571,8 @@ do_parse_atomic_type(struct ovs_cmdl_context *ctx)
+     enum ovsdb_atomic_type type;
+     struct json *json;
+ 
++    destroy_on_ovsdb_error(&json);
++
+     json = unbox_json(parse_json(ctx->argv[1]));
+     check_ovsdb_error(ovsdb_atomic_type_from_json(&type, json));
+     json_destroy(json);
+@@ -568,6 +585,8 @@ do_parse_base_type(struct ovs_cmdl_context *ctx)
+     struct ovsdb_base_type base;
+     struct json *json;
+ 
++    destroy_on_ovsdb_error(&json);
++
+     json = unbox_json(parse_json(ctx->argv[1]));
+     check_ovsdb_error(ovsdb_base_type_from_json(&base, json));
+     json_destroy(json);
+@@ -581,6 +600,8 @@ do_parse_type(struct ovs_cmdl_context *ctx)
+     struct ovsdb_type type;
+     struct json *json;
+ 
++    destroy_on_ovsdb_error(&json);
++
+     json = unbox_json(parse_json(ctx->argv[1]));
+     check_ovsdb_error(ovsdb_type_from_json(&type, json));
+     json_destroy(json);
+@@ -595,6 +616,8 @@ do_parse_atoms(struct ovs_cmdl_context *ctx)
+     struct json *json;
+     int i;
+ 
++    destroy_on_ovsdb_error(&json);
++
+     json = unbox_json(parse_json(ctx->argv[1]));
+     check_ovsdb_error(ovsdb_base_type_from_json(&base, json));
+     json_destroy(json);
+@@ -624,6 +647,8 @@ do_parse_atom_strings(struct ovs_cmdl_context *ctx)
+     struct json *json;
+     int i;
+ 
++    destroy_on_ovsdb_error(&json);
++
+     json = unbox_json(parse_json(ctx->argv[1]));
+     check_ovsdb_error(ovsdb_base_type_from_json(&base, json));
+     json_destroy(json);
+@@ -669,6 +694,8 @@ do_parse_data__(int argc, char *argv[],
+     struct json *json;
+     int i;
+ 
++    destroy_on_ovsdb_error(&json);
++
+     json = unbox_json(parse_json(argv[1]));
+     check_ovsdb_error(ovsdb_type_from_json(&type, json));
+     json_destroy(json);
+@@ -700,6 +727,8 @@ do_parse_data_strings(struct ovs_cmdl_context *ctx)
+     struct json *json;
+     int i;
+ 
++    destroy_on_ovsdb_error(&json);
++
+     json = unbox_json(parse_json(ctx->argv[1]));
+     check_ovsdb_error(ovsdb_type_from_json(&type, json));
+     json_destroy(json);
+@@ -740,6 +769,8 @@ do_sort_atoms(struct ovs_cmdl_context *ctx)
+     size_t n_atoms;
+     int i;
+ 
++    destroy_on_ovsdb_error(&json);
++
+     json = unbox_json(parse_json(ctx->argv[1]));
+     check_ovsdb_error(ovsdb_base_type_from_json(&base, json));
+     json_destroy(json);
+@@ -779,6 +810,8 @@ do_parse_column(struct ovs_cmdl_context *ctx)
+     struct ovsdb_column *column;
+     struct json *json;
+ 
++    destroy_on_ovsdb_error(&json);
++
+     json = parse_json(ctx->argv[2]);
+     check_ovsdb_error(ovsdb_column_from_json(json, ctx->argv[1], &column));
+     json_destroy(json);
+@@ -795,6 +828,8 @@ do_parse_table(struct ovs_cmdl_context *ctx)
+ 
+     default_is_root = ctx->argc > 3 && !strcmp(ctx->argv[3], "true");
+ 
++    destroy_on_ovsdb_error(&json);
++
+     json = parse_json(ctx->argv[2]);
+     check_ovsdb_error(ovsdb_table_schema_from_json(json, ctx->argv[1], &ts));
+     json_destroy(json);
+@@ -811,6 +846,8 @@ do_parse_rows(struct ovs_cmdl_context *ctx)
+     struct json *json;
+     int i;
+ 
++    destroy_on_ovsdb_error(&json);
++
+     json = unbox_json(parse_json(ctx->argv[1]));
+     check_ovsdb_error(ovsdb_table_schema_from_json(json, "mytable", &ts));
+     json_destroy(json);
+@@ -870,6 +907,8 @@ do_compare_rows(struct ovs_cmdl_context *ctx)
+     int n_rows;
+     int i, j;
+ 
++    destroy_on_ovsdb_error(&json);
++
+     json = unbox_json(parse_json(ctx->argv[1]));
+     check_ovsdb_error(ovsdb_table_schema_from_json(json, "mytable", &ts));
+     json_destroy(json);
+@@ -929,6 +968,8 @@ do_parse_conditions(struct ovs_cmdl_context *ctx)
+     int exit_code = 0;
+     int i;
+ 
++    destroy_on_ovsdb_error(&json);
++
+     json = unbox_json(parse_json(ctx->argv[1]));
+     check_ovsdb_error(ovsdb_table_schema_from_json(json, "mytable", &ts));
+     json_destroy(json);
+@@ -971,6 +1012,8 @@ do_evaluate_condition__(struct ovs_cmdl_context *ctx, int mode)
+     struct json *json;
+     size_t i, j;
+ 
++    destroy_on_ovsdb_error(&json);
++
+     /* Parse table schema, create table. */
+     json = unbox_json(parse_json(ctx->argv[1]));
+     check_ovsdb_error(ovsdb_table_schema_from_json(json, "mytable", &ts));
+@@ -1058,6 +1101,8 @@ do_compare_conditions(struct ovs_cmdl_context *ctx)
+     struct json *json;
+     size_t i;
+ 
++    destroy_on_ovsdb_error(&json);
++
+     /* Parse table schema, create table. */
+     json = unbox_json(parse_json(ctx->argv[1]));
+     check_ovsdb_error(ovsdb_table_schema_from_json(json, "mytable", &ts));
+@@ -1099,6 +1144,8 @@ do_parse_mutations(struct ovs_cmdl_context *ctx)
+     int exit_code = 0;
+     int i;
+ 
++    destroy_on_ovsdb_error(&json);
++
+     json = unbox_json(parse_json(ctx->argv[1]));
+     check_ovsdb_error(ovsdb_table_schema_from_json(json, "mytable", &ts));
+     json_destroy(json);
+@@ -1138,6 +1185,8 @@ do_execute_mutations(struct ovs_cmdl_context *ctx)
+     struct json *json;
+     size_t i, j;
+ 
++    destroy_on_ovsdb_error(&json);
++
+     /* Parse table schema, create table. */
+     json = unbox_json(parse_json(ctx->argv[1]));
+     check_ovsdb_error(ovsdb_table_schema_from_json(json, "mytable", &ts));
+@@ -1262,6 +1311,8 @@ do_query(struct ovs_cmdl_context *ctx)
+     int exit_code = 0;
+     size_t i;
+ 
++    destroy_on_ovsdb_error(&json);
++
+     /* Parse table schema, create table. */
+     json = unbox_json(parse_json(ctx->argv[1]));
+     check_ovsdb_error(ovsdb_table_schema_from_json(json, "mytable", &ts));
+@@ -1356,6 +1407,8 @@ do_query_distinct(struct ovs_cmdl_context *ctx)
+     int exit_code = 0;
+     size_t i;
+ 
++    destroy_on_ovsdb_error(&json);
++
+     /* Parse table schema, create table. */
+     json = unbox_json(parse_json(ctx->argv[1]));
+     check_ovsdb_error(ovsdb_table_schema_from_json(json, "mytable", &ts));
+@@ -1483,6 +1536,8 @@ do_parse_schema(struct ovs_cmdl_context *ctx)
+     struct ovsdb_schema *schema;
+     struct json *json;
+ 
++    destroy_on_ovsdb_error(&json);
++
+     json = parse_json(ctx->argv[1]);
+     check_ovsdb_error(ovsdb_schema_from_json(json, &schema));
+     json_destroy(json);
+@@ -1498,6 +1553,8 @@ do_execute__(struct ovs_cmdl_context *ctx, bool ro)
+     struct ovsdb *db;
+     int i;
+ 
++    destroy_on_ovsdb_error(&json);
++
+     /* Create database. */
+     json = parse_json(ctx->argv[1]);
+     check_ovsdb_error(ovsdb_schema_from_json(json, &schema));
+@@ -1564,6 +1621,8 @@ do_trigger(struct ovs_cmdl_context *ctx)
+     int number;
+     int i;
+ 
++    destroy_on_ovsdb_error(&json);
++
+     /* Create database. */
+     json = parse_json(ctx->argv[1]);
+     check_ovsdb_error(ovsdb_schema_from_json(json, &schema));
+@@ -1789,6 +1848,8 @@ do_transact(struct ovs_cmdl_context *ctx)
+     struct json *json;
+     int i;
+ 
++    destroy_on_ovsdb_error(&json);
++
+     /* Create table. */
+     json = parse_json("{\"name\": \"testdb\", "
+                       " \"tables\": "
+diff --git a/tests/test-ovsdb.py b/tests/test-ovsdb.py
+index 853264f22b..402cacbe9d 100644
+--- a/tests/test-ovsdb.py
++++ b/tests/test-ovsdb.py
+@@ -620,7 +620,7 @@ def update_condition(idl, commands):
+     commands = commands[len("condition "):].split(";")
+     for command in commands:
+         command = command.split(" ")
+-        if(len(command) != 2):
++        if len(command) != 2:
+             sys.stderr.write("Error parsing condition %s\n" % command)
+             sys.exit(1)
+ 
 diff --git a/tests/test-rcu.c b/tests/test-rcu.c
 index 965f3c49f3..bb17092bf0 100644
 --- a/tests/test-rcu.c
@@ -10617,8 +64659,21 @@ index f0fd042108..7d899fbbfd 100644
  
          /* Check maximum x such that f(x) == n. */
          check_ctz32(UINT32_MAX << n, n);
+diff --git a/tests/tunnel-push-pop-ipv6.at b/tests/tunnel-push-pop-ipv6.at
+index 3f58e3e8fd..8c5af459e9 100644
+--- a/tests/tunnel-push-pop-ipv6.at
++++ b/tests/tunnel-push-pop-ipv6.at
+@@ -452,7 +452,7 @@ OVS_APP_EXIT_AND_WAIT(ovs-ofctl)
+ 
+ AT_CHECK([cat ofctl_monitor.log], [0], [dnl
+ NXT_PACKET_IN2 (xid=0x0): cookie=0x0 total_len=98 tun_id=0x7b,tun_ipv6_src=2001:cafe::92,tun_ipv6_dst=2001:cafe::88,tun_metadata0=0xa,in_port=5 (via action) data_len=98 (unbuffered)
+-icmp,vlan_tci=0x0000,dl_src=be:b6:f4:e1:49:4a,dl_dst=fe:71:d8:83:72:4f,nw_src=30.0.0.1,nw_dst=30.0.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,icmp_type=0,icmp_code=0 icmp_csum:4227
++icmp,vlan_tci=0x0000,dl_src=be:b6:f4:e1:49:4a,dl_dst=fe:71:d8:83:72:4f,nw_src=30.0.0.1,nw_dst=30.0.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,icmp_type=0,icmp_code=0 icmp_csum:4227
+ ])
+ 
+ AT_CHECK([ovs-ofctl dump-ports int-br | grep 'port  5'], [0], [dnl
 diff --git a/tests/tunnel-push-pop.at b/tests/tunnel-push-pop.at
-index 57589758f4..c63344196b 100644
+index 57589758f4..c9a04c76bd 100644
 --- a/tests/tunnel-push-pop.at
 +++ b/tests/tunnel-push-pop.at
 @@ -546,6 +546,28 @@ AT_CHECK([ovs-ofctl dump-ports int-br | grep 'port  [[37]]' | sort], [0], [dnl
@@ -10650,7 +64705,14 @@ index 57589758f4..c63344196b 100644
  dnl Check decapsulation of Geneve packet with options
  AT_CAPTURE_FILE([ofctl_monitor.log])
  AT_CHECK([ovs-ofctl monitor int-br 65534 --detach --no-chdir --pidfile 2> ofctl_monitor.log])
-@@ -565,8 +587,8 @@ icmp,vlan_tci=0x0000,dl_src=be:b6:f4:e1:49:4a,dl_dst=fe:71:d8:83:72:4f,nw_src=30
+@@ -559,14 +581,14 @@ OVS_APP_EXIT_AND_WAIT(ovs-ofctl)
+ 
+ AT_CHECK([cat ofctl_monitor.log], [0], [dnl
+ NXT_PACKET_IN2 (xid=0x0): cookie=0x0 total_len=98 tun_id=0x7b,tun_src=1.1.2.92,tun_dst=1.1.2.88,tun_metadata0=0xa,in_port=5 (via action) data_len=98 (unbuffered)
+-icmp,vlan_tci=0x0000,dl_src=be:b6:f4:e1:49:4a,dl_dst=fe:71:d8:83:72:4f,nw_src=30.0.0.1,nw_dst=30.0.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,icmp_type=0,icmp_code=0 icmp_csum:4227
++icmp,vlan_tci=0x0000,dl_src=be:b6:f4:e1:49:4a,dl_dst=fe:71:d8:83:72:4f,nw_src=30.0.0.1,nw_dst=30.0.0.2,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,icmp_type=0,icmp_code=0 icmp_csum:4227
+ ])
+ 
  AT_CHECK([ovs-ofctl dump-ports int-br | grep 'port  5'], [0], [dnl
    port  5: rx pkts=1, bytes=98, drop=?, errs=?, frame=?, over=?, crc=?
  ])
@@ -10661,6 +64723,24 @@ index 57589758f4..c63344196b 100644
  ])
  
  dnl Receive VXLAN with different MAC and verify that the neigh cache gets updated
+@@ -718,14 +740,14 @@ dnl Output to tunnel from a int-br internal port.
+ dnl Checking that the packet arrived and it was correctly encapsulated.
+ AT_CHECK([ovs-ofctl add-flow int-br "in_port=LOCAL,actions=debug_slow,output:2"])
+ AT_CHECK([ovs-appctl netdev-dummy/receive int-br "${packet}4"])
+-OVS_WAIT_UNTIL([test `ovs-pcap p0.pcap | egrep "${encap}${packet}4" | wc -l` -ge 1])
++OVS_WAIT_UNTIL([test `ovs-pcap p0.pcap | grep -E "${encap}${packet}4" | wc -l` -ge 1])
+ dnl Sending again to exercise the non-miss upcall path.
+ AT_CHECK([ovs-appctl netdev-dummy/receive int-br "${packet}4"])
+-OVS_WAIT_UNTIL([test `ovs-pcap p0.pcap | egrep "${encap}${packet}4" | wc -l` -ge 2])
++OVS_WAIT_UNTIL([test `ovs-pcap p0.pcap | grep -E "${encap}${packet}4" | wc -l` -ge 2])
+ 
+ dnl Output to tunnel from the controller.
+ AT_CHECK([ovs-ofctl -O OpenFlow13 packet-out int-br CONTROLLER "debug_slow,output:2" "${packet}5"])
+-OVS_WAIT_UNTIL([test `ovs-pcap p0.pcap | egrep "${encap}${packet}5" | wc -l` -ge 1])
++OVS_WAIT_UNTIL([test `ovs-pcap p0.pcap | grep -E "${encap}${packet}5" | wc -l` -ge 1])
+ 
+ dnl Datapath actions should not have tunnel push action.
+ AT_CHECK([ovs-appctl dpctl/dump-flows | grep -q tnl_push], [1])
 @@ -842,3 +864,54 @@ Datapath actions: 7
  
  OVS_VSWITCHD_STOP
@@ -10729,6 +64809,19 @@ index b8ae7caa9b..fd482aa872 100644
  ])
  
  OVS_VSWITCHD_STOP
+diff --git a/utilities/bugtool/ovs-bugtool.in b/utilities/bugtool/ovs-bugtool.in
+index fa62cbe949..fee0de8532 100755
+--- a/utilities/bugtool/ovs-bugtool.in
++++ b/utilities/bugtool/ovs-bugtool.in
+@@ -956,7 +956,7 @@ def load_plugins(just_capabilities=False, filter=None):
+                     filters = []
+                 else:
+                     filters = filters_tmp.split(',')
+-                if not(filter is None or filter in filters):
++                if not (filter is None or filter in filters):
+                     continue
+                 if el.tagName == "files":
+                     newest_first = getBoolAttr(el, 'newest_first')
 diff --git a/utilities/gdb/ovs_gdb.py b/utilities/gdb/ovs_gdb.py
 index 62928d50fc..763ece2a78 100644
 --- a/utilities/gdb/ovs_gdb.py
@@ -10743,6 +64836,9 @@ index 62928d50fc..763ece2a78 100644
  
      def extract_pkt(self, pkt):
          pkt_fields = pkt.type.keys()
+diff --git a/utilities/ovs-appctl-bashcomp.bash b/utilities/ovs-appctl-bashcomp.bash
+old mode 100755
+new mode 100644
 diff --git a/utilities/ovs-ofctl.c b/utilities/ovs-ofctl.c
 index ede7f1e61a..6771973ae9 100644
 --- a/utilities/ovs-ofctl.c
@@ -10763,7 +64859,7 @@ index ede7f1e61a..6771973ae9 100644
          ovs_be32 error_xid = error_oh->xid;
          enum ofperr ofperr;
 diff --git a/utilities/ovs-save b/utilities/ovs-save
-index fb2025b765..a190902f4d 100755
+index fb2025b765..67092ecf7e 100755
 --- a/utilities/ovs-save
 +++ b/utilities/ovs-save
 @@ -102,7 +102,7 @@ save_interfaces () {
@@ -10775,6 +64871,15 @@ index fb2025b765..a190902f4d 100755
  }
  
  save_flows () {
+@@ -127,7 +127,7 @@ save_flows () {
+         # Get the highest enabled OpenFlow version
+         ofp_version=$(get_highest_ofp_version "$bridge")
+ 
+-        printf "%s" "ovs-ofctl add-tlv-map ${bridge} '"
++        printf "%s" "ovs-ofctl -O $ofp_version add-tlv-map ${bridge} '"
+         ovs-ofctl dump-tlv-map ${bridge} -O $ofp_version | \
+         awk '/^  *0x/ {if (cnt != 0) printf ","; \
+              cnt++;printf "{class="$1",type="$2",len="$3"}->"$4}'
 diff --git a/utilities/ovs-tcpdump.in b/utilities/ovs-tcpdump.in
 index 82d1bedfa6..7fd26e4055 100755
 --- a/utilities/ovs-tcpdump.in
@@ -10816,6 +64921,9 @@ index 82d1bedfa6..7fd26e4055 100755
          ovsdb.destroy_mirror(interface, ovsdb.port_bridge(interface))
          ovsdb.destroy_port(mirror_interface, ovsdb.port_bridge(interface))
          if tap_created is True:
+diff --git a/utilities/ovs-vsctl-bashcomp.bash b/utilities/ovs-vsctl-bashcomp.bash
+old mode 100755
+new mode 100644
 diff --git a/utilities/ovs-vsctl.c b/utilities/ovs-vsctl.c
 index 37cc72d401..1032089fc2 100644
 --- a/utilities/ovs-vsctl.c
diff --git a/SPECS/openvswitch2.17.spec b/SPECS/openvswitch2.17.spec
index 6978853..bb680e3 100644
--- a/SPECS/openvswitch2.17.spec
+++ b/SPECS/openvswitch2.17.spec
@@ -57,7 +57,7 @@ Summary: Open vSwitch
 Group: System Environment/Daemons daemon/database/utilities
 URL: http://www.openvswitch.org/
 Version: 2.17.0
-Release: 31%{?dist}
+Release: 51%{?dist}
 
 # Nearly all of openvswitch is ASL 2.0.  The bugtool is LGPLv2+, and the
 # lib/sflow*.[ch] files are SISSL
@@ -743,6 +743,892 @@ exit 0
 %endif
 
 %changelog
+* Mon Sep 19 2022 Open vSwitch CI <ovs-ci@redhat.com> - 2.17.0-51
+- Merging upstream branch-2.17 [RH git: daeab22d1e]
+    Commit list:
+    73d7bf64a7 bond: Avoid deadlock while updating post recirculation rules.
+    70a63391cb ofproto-dpif-upcall: Add debug commands to pause/resume revalidators.
+    cf0e12f8ae test-list: Fix false-positive build failure with GCC 12.
+    5cbed27c87 tests: Fix tests with GNU grep 3.8.
+    a5cd60db0f cirrus: Upgrade to FreeBSD 13.1 image.
+    43ece36f31 netdev-linux: Skip some internal kernel stats gathering.
+    846d6a0c51 ofproto-dpif-xlate: Fix error messages for nonexistent ports/recirc_ids.
+    e8814c9b88 ofproto-dpif-xlate: Clear tunnel wc bits if original packet is non-tunnel.
+
+
+* Wed Sep 07 2022 Timothy Redaelli <tredaelli@redhat.com> - 2.17.0-50
+- redhat: use git rev-parse to get BRANCH_NAME [RH git: fbcf506fb4]
+    git name-rev may return tag instead of branch name
+
+
+* Tue Sep 06 2022 Timothy Redaelli <tredaelli@redhat.com> - 2.17.0-49
+- Merging 7bcd45ce82 version: 21.11.2 [RH git: 3073fb2b47]
+    Commit list:
+    7bcd45ce82 version: 21.11.2
+    e12d415556 vhost: fix header spanned across more than two descriptors
+    f167022606 vhost: discard too small descriptor chains
+    25c01bd323 net/mlx5: fix Rx queue recovery mechanism
+    125a65cb03 examples/performance-thread: fix build with GCC 12
+    2a55c38e27 test/crypto: skip oop test for raw api
+    a561d44985 net/vhost: fix null pointer dereference
+    0f80c13b4d version: 21.11.2-rc1
+    84b2018842 app/testpmd: fix GTP PSC raw processing
+    9e7d93ae2f net/iavf: fix GTP-U extension flow
+    b11e955370 vdpa/sfc: resolve race between vhost lib and device conf
+    06b246ead6 vdpa/ifc/base: fix null pointer dereference
+    dbe68f0958 vdpa/mlx5: fix leak on event thread creation
+    9224015451 examples/link_status_interrupt: fix stats refresh rate
+    b941165a00 examples/vhost: fix retry logic on Rx path
+    f169902058 avoid AltiVec keyword vector
+    20ee5fbe91 app/regex: fix mbuf size for multi-segment buffer
+    81a0919f6a app/regex: avoid division by zero
+    e1c3685b21 dma/idxd: fix null dereference in PCI remove
+    5b7a2b5672 dma/idxd: fix partial freeing in PCI close
+    1fd4a985e4 dma/idxd: fix memory leak in PCI close
+    008e1abc82 net/mlx5: reject negative integrity item configuration
+    fa1d93b8c4 common/mlx5: fix non-expandable global MR cache
+    03a6a9f751 common/cnxk: allow changing PTP mode on CN10K
+    8fb51606a7 gro: fix identifying fragmented packets
+    e46eb5a8dc service: fix lingering active status
+    399cbc736c net/igc: support multi-process
+    97e75c37b0 net/iavf: fix VF reset
+    bbc9dcaefa common/cnxk: fix GRE tunnel parsing
+    36f4c8e67f net/virtio-user: fix Rx interrupts with multi-queue
+    c353b1de9f vhost: restore device information in log messages
+    1f963ee7a3 vhost: add some trailing newline in log messages
+    4ef6a79d4a vdpa/sfc: fix sync between QEMU and vhost-user
+    5991d25b74 net/vhost: fix deadlock on vring state change
+    a3ff1d7e9c doc: fix readability in vhost guide
+    19457a68ea net/virtio-user: fix socket non-blocking mode
+    3287afa5e8 net/netvsc: fix vmbus device reference in multi-process
+    b6e4963255 app/testpmd: fix supported RSS offload display
+    49cc0b73a0 eventdev/eth_tx: fix queue delete
+    a1564274cd doc: fix grammar and parameters in l2fwd-crypto guide
+    a8b87a7063 doc: fix grammar and formatting in compressdev guide
+    8017591016 crypto/qat: fix DOCSIS crash
+    f2a62f854e examples/fips_validation: handle empty payload
+    390e956b6d test/crypto: fix SNOW3G vector IV format
+    35b1acf851 test/crypto: fix ZUC vector IV format
+    6706a66aaa test/crypto: fix authentication IV for ZUC SGL
+    f95b184d1e doc: add more instructions for running as non-root
+    a74fd43471 net/bnxt: fix check for autoneg enablement in the PHY FW
+    7a91bb4238 net/bnxt: cleanup MTU setting
+    1ab0afa450 net/bnxt: disallow MTU change when device is started
+    8185654d05 net/bnxt: fix setting forced speed
+    e798345849 net/bnxt: allow Tx only or Rx only
+    cce3a4048e net/bnxt: fix switch domain allocation
+    17d26c7fa0 examples/distributor: fix distributor on Rx core
+    8bbab0b5a5 net/hns3: delete unused code
+    32535f69cd net/hns3: fix descriptors check with SVE
+    5a05333308 net/hns3: fix statistics locking
+    6420d2f828 net/hns3: fix PTP interrupt logging
+    bdabb55ddc net/hns3: support backplane media type
+    a650bf5cfe net/hns3: fix link status capability query from VF
+    44dad33c76 app/testpmd: fix GTP PSC raw processing
+    5dee226e46 net: fix GTP PSC headers
+    95b87a5314 app/testpmd: fix flex parser destroy command
+    2a4ad9bb84 app/testpmd: cleanup port resources after implicit close
+    bbf31ae0fc test: check memory allocation for CRC
+    ed8a477487 app/procinfo: show all non-owned ports
+    6fce2b8067 test/hash: fix out of bound access
+    7181c621fb rib: fix references for IPv6 implementation
+    1b31f49983 dma/idxd: fix non-AVX builds with old compilers
+    d6e109f8aa dma/idxd: fix AVX2 in non-datapath functions
+    b72fa6fd7a raw/ioat: fix build when ioat dmadev enabled
+    d028271a0a raw/ioat: fix build missing errno include
+    a3d0dbcf03 config: fix C++ cross compiler for Arm and PPC
+    3a9c3000f4 vdpa/mlx5: fix maximum number of virtqs
+    45150fc78b vdpa/mlx5: workaround var offset within page
+    0de69e279d doc: fix flow integrity hardware support in mlx5 guide
+    02017fcad3 net/mlx5: fix stack buffer overflow in drop action
+    f8b370bbb4 net/mlx5: fix metering on E-Switch Manager
+    aa8fb4afda net/mlx5: add limitation for E-Switch Manager match
+    fac54fde60 net/mlx5: fix RSS expansion for patterns with ICMP item
+    de9fa7b453 net/mlx5: fix build with clang 14
+    e4939398df net/qede: fix build with GCC 12
+    cca0819d48 net/ice/base: fix build with GCC 12
+    f361d278e7 net/ice: fix race condition in Rx timestamp
+    f294a3dbb0 net/qede: fix build with GCC 13
+    760f94b15a common/cnxk: handle ROC model init failure
+    3a66cbb695 common/cnxk: fix decrypt packet count register update
+    614cd42ac0 net/octeontx: fix port close
+    bee8c21938 malloc: fix allocation of almost hugepage size
+    7b610e0a8d net/virtio: unmap PCI device in secondary process
+    d6e4e0f46e vhost/crypto: fix descriptor processing
+    f69a61bde0 vhost/crypto: fix build with GCC 12
+    361723acef vhost: fix missing enqueue pseudo-header calculation
+    76556a3128 app/testpmd: revert MAC update in checksum forwarding
+    1901dc5492 net/ngbe: add more packet statistics
+    a92e31d35d net/txgbe: fix register polling
+    f48795dea1 app/testpmd: fix bonding slave devices not released
+    b3cfb3db85 app/testpmd: add help messages for multi-process
+    bfaaf994a5 net/hns3: fix TM capability
+    35582af08c net/hns3: fix crash from secondary process
+    ba4aa140b3 net/hns3: fix return value for unsupported tuple
+    332e5fca03 net/hns3: fix code check warning
+    fc61bd5d37 net/hns3: remove duplicate definition
+    e885f508d9 net/hns3: fix an unreasonable memset
+    8854374c9e test/bonding: fix RSS test when disable RSS
+    2b71d44b80 net/bonding: fix RSS inconsistency between ports
+    bd9ffc1961 eventdev/eth_tx: fix adapter creation
+    19591ad643 event/dlb2: fix advertized capabilities
+    f7b34f357c event/cnxk: fix Tx adapter enqueue return for CN10K
+    93b1138ccd event/cnxk: fix QoS parameter handling
+    e6f569043c event/dlb2: fix check of QID in-flight
+    7c0439f319 event/dlb2: rework queue drain handling
+    d2c3d326d0 event/octeontx: fix SSO fast path
+    bf7aa26ddd net/nfp: fix initialization
+    1c770fda6f net/nfp: make sure MTU is never larger than mbuf size
+    fe2cddeb08 net/nfp: update how max MTU is read
+    095d2af061 crypto/cnxk: swap zuc-256 iv
+    38eabfdd0d common/cnxk: swap zuc-256 key
+    d1e2bd80a6 test/ipsec: fix performance test
+    387d7f2a33 test/crypto: fix cipher offset for ZUC
+    bf03e0341c crypto/scheduler: fix queue pair in scheduler failover
+    9445fcf138 test/ipsec: fix build with GCC 12
+    e0bff8480f crypto/cnxk: fix build with GCC 12
+    978835ed87 common/cpt: fix build with GCC 12
+    a9485fd00b examples/ipsec-secgw: fix ESN setting
+    362a219f40 net/iavf: fix NAT-T payload length
+    be3beb946e examples/ipsec-secgw: fix NAT-T header fields
+    d6a5fb4092 ipsec: fix NAT-T ports and length
+    fd2d725ae5 baseband/acc100: add protection for some negative scenario
+    4184a99adb baseband/acc100: update companion PF configure function
+    4fb5429816 eal/x86: drop export of internal alignment macro
+    bba01c7ab8 sched: remove unnecessary floating point
+    03b38f5281 test: drop reference to removed tests
+    b55b2820d2 trace: fix init with long file prefix
+    17615c81fe trace: fix crash when exiting
+    64fdce75b6 net/mlx5: fix RSS hash types adjustment
+    1fb92a1f45 net/bnxt: fix tunnel stateless offloads
+    e10c862914 net/iavf: fix segfaults when calling API after VF reset failed
+    8e8886a0e9 dma/hisilicon: fix includes in header file
+    e027f40cd2 dma/skeleton: fix index returned when no memcpy completed
+    675b5bdf2c app/flow-perf: fix build with GCC 12
+    f85d0fc397 vdpa/ifc: fix build with GCC 12
+    ec6a2fa05c net/ice: fix build with GCC 12
+    ac8e3a7546 net/enetfec: fix build with GCC 12
+    9c1822f59f net/ena: fix build with GCC 12
+    c86456efc9 crypto/ipsec_mb: fix build with GCC 12
+    4cfe560401 kni: use dedicated function to set MAC address
+    9b7982b986 kni: use dedicated function to set random MAC address
+    e731132bca net/tap: fix device freeing
+    63bb35c3f3 net/failsafe: fix device freeing
+    a9062fa2fc app/testpmd: fix multicast address pool leak
+    c18ad5cc3e app/testpmd: fix packet segment allocation
+    8bb9213bbc dma/idxd: fix error code for PCI device commands
+    5215fd05ab doc: fix formatting and link in BPF library guide
+    7133eadc9c bus/fslmc: fix VFIO setup
+    d2d91f50f7 raw/ifpga: unregister interrupt on close
+    56e6acc152 raw/ifpga: remove virtual devices on close
+    e06a55362a eal/ppc: fix compilation for musl
+    c3a48df3d5 dma/hisilicon: enhance CQ scan robustness
+    543121b53e dma/hisilicon: fix index returned when no DMA completed
+    d1461844a6 examples/dma: fix Tx drop statistics
+    6564af3d39 examples/dma: fix MTU configuration
+    5d71b3d9fa common/mlx5: remove unused lcore check
+    94b9525189 net/iavf: remove dead code
+    f0c897ea5a net/iavf: increase reset complete wait count
+    341d13b08b net/iavf: fix device stop
+    eab5e035ce net/iavf: fix device initialization without inline crypto
+    78cf4cbe62 doc: update matching versions in i40e guide
+    d124639aee net/iavf: fix Rx queue interrupt setting
+    4a42ee9346 net/iavf: fix mbuf release in multi-process
+    e1a84de6a9 net/iavf: fix queue start exception handling
+    6730951205 net/i40e: fix max frame size config at port level
+    cfa67fc84b net/ice: fix MTU info for DCF
+    356142f8a2 net/ice/base: fix direction of flow that matches any
+    745563ca3c net/ice/base: fix getting sched node from ID type
+    97f8a95696 net/ixgbe: add option for link up check on pin SDP3
+    111417a49b net/iavf: fix data path selection
+    c8868b3c5c kni: fix build
+    3aeeea257f kni: fix build with Linux 5.18
+    301300a86e net/mlx5: fix statistics read on Linux
+    83abe945a6 net/mlx5: fix Tx recovery
+    f06feb0822 examples/vhost: fix crash when no VMDq
+    a7c72e3e6c vhost: fix deadlock when message handling failed
+    e156da31dd doc: fix vhost multi-queue reconnection
+    17a0ef7be3 vhost: fix async access
+    c8c6eeda37 net/bnxt: fix ULP parser to ignore segment offset
+    ca961550e9 net/bnxt: fix compatibility with some old firmwares
+    ee3b68b408 ethdev: fix port close in secondary process
+    da7caee013 common/sfc_efx/base: convert EFX PCIe INTF to MCDI value
+    959cd86178 net/vmxnet3: fix Rx data ring initialization
+    8c381b1157 app/testpmd: fix help of create meter command
+    237d93b36c net/nfp: fix disabling VLAN stripping
+    b3ef192fec net/txgbe: fix max number of queues for SR-IOV
+    7b5339d563 net/txgbe: fix SGMII mode to link up
+    8bf4f37ede net/ngbe: fix PCIe related operations with bus API
+    512f325928 net/ngbe: fix reading PHY ID
+    ba78db53ee net/ngbe: fix link speed check
+    8e23b06316 ethdev: fix port state when stop
+    54cb103e7b net/memif: fix overwriting of head segment
+    fde361696c net/bonding: fix mbuf fast free usage
+    ce5917f846 app/testpmd: do not poll stopped queues
+    8b28d584d8 app/testpmd: fix use of indirect action after port close
+    28e88ef39e ethdev: prohibit polling stopped queue
+    011122b9e0 app/testpmd: fix metering and policing command for RFC4115
+    e154ece049 app/testpmd: replace hardcoded min mbuf number with macro
+    f7638851b4 net/cnxk: fix possible null dereference in telemetry
+    c05dd44f52 ethdev: fix possible null pointer access
+    e5177f3853 ethdev: fix memory leak in xstats telemetry
+    2104014dbd net/axgbe: fix xstats get return if xstats is null
+    3422f4b58c net/mvpp2: fix xstats get return if xstats is null
+    7a1086a9d5 net/ipn3ke: fix xstats get return if xstats is null
+    bae6c70cd3 net/hns3: fix xstats get return if xstats is null
+    0c48dafbdf app/testpmd: remove useless pointer checks
+    8378498b96 app/testpmd: perform SW IP checksum for GRO/GSO packets
+    65bff89f9a app/testpmd: fix port status of bonding slave device
+    4f9c7fb5af doc: add missing auth algo for IPsec example
+    2d0ec22be8 test/crypto: fix driver name for DPAA raw API test
+    082148b6a5 drivers/crypto: fix warnings for OpenSSL version
+    48dda925a7 test/crypto: fix null check for ZUC authentication
+    c195ec01df examples/ipsec-secgw: fix promiscuous mode option
+    9c33903649 examples/ipsec-secgw: fix uninitialized memory access
+    ea0ab8e686 pcapng: fix timestamp wrapping in output files
+    412da85334 pipeline: fix emit instruction for invalid headers
+    ce7b8e673a devtools: fix null test for NUMA systems
+    84eb565954 doc: fix API index Markdown syntax
+    d55a70f874 mbuf: dump outer VLAN
+    2fcd1cc163 rib: fix traversal with /32 route
+    8c5ab722fb acl: fix rules with 8-byte field size
+    5ffee1e906 test: avoid hang if queues are full and Tx fails
+    c1f49d47a9 eal/freebsd: fix use of newer cpuset macros
+    214462a05b devargs: fix leak on hotplug failure
+    29fa5a6eaf eal/x86: fix unaligned access for small memcpy
+    eeaeb58d56 event/cnxk: fix out of bounds access in test
+    09d859555f eventdev/eth_rx: fix telemetry Rx stats reset
+    ec08dcaf4b doc: fix build with sphinx 4.5
+    671e8fa0c8 net/mlx5: fix no-green metering with RSS
+    6857653625 net/bnxt: fix freeing VNIC filters
+    fa1a893ff7 net/bnxt: recheck FW readiness if in reset process
+    2ff3768d20 net/bnxt: fix link status when port is stopped
+    71ab79d3a7 net/bnxt: force PHY update on certain configurations
+    db239d7290 net/bnxt: fix speed autonegotiation
+    ce36a5d910 net/bnxt: avoid unnecessary endianness conversion
+    8c464cf618 net/bnxt: handle queue stop during RSS flow create
+    c25b1d545e net/bnxt: check duplicate queue IDs
+    3f9914a7b3 net/bnxt: fix ring group on Rx restart
+    821dd9cd43 net/bnxt: fix RSS action
+    3774986bdd net/bnxt: fix Rx configuration
+    d620238a97 net/bnxt: remove unused macro
+    2f66d10615 net/bnxt: fix device capability reporting
+    b174adfcae net/bnxt: fix reordering in NEON Rx
+    7d9f5b3b33 net/cnxk: add barrier after meta batch free in scalar
+    8790891a6d common/cnxk: fix SQ flush sequence
+    08d2d8868e net/cnxk: fix uninitialized variables
+    a10e2ec8ee common/cnxk: fix null pointer dereference
+    d13786763a common/cnxk: fix unaligned access to device memory
+    b117088323 net/cnxk: add message on flow parsing failure
+    caf428f0a0 app/testpmd: fix MTU verification
+    5e1545b730 app/testpmd: check statistics query before printing
+    66b7e330d6 net/hns3: remove unnecessary RSS switch
+    15b794b152 ethdev: fix RSS update when RSS is disabled
+    0cec1c9477 net/hns3: remove redundant RSS tuple field
+    5a6fb3a977 net/hns3: fix rollback on RSS hash update
+    8c193c0b4c net/hns3: fix RSS disable
+    8b00917c08 net/hns3: fix mbuf free on Tx done cleanup
+    74089f471a net/hns3: fix pseudo-sharing between threads
+    827f72e8ce net/hns3: fix MAC and queues HW statistics overflow
+    d96ee7bac0 net/hns3: fix order of clearing imissed register in PF
+    aa2c6d3f69 ethdev: fix build with vtune option
+    b839853e83 net/tap: fix interrupt handler freeing
+    41c0ba64de net/bonding: fix slave stop and remove on port close
+    5a8afc69af net/bonding: fix stopping non-active slaves
+    e856fe9aa6 doc: update matching versions in ice guide
+    82ccc27de5 net/dpaa: fix event queue detach
+    d24d6395d6 vdpa/mlx5: fix dead loop when process interrupted
+    879fb64517 vdpa/mlx5: fix interrupt trash that leads to crash
+    78414da84e vhost: fix missing virtqueue lock protection
+    e3036fbd0a net/vhost: fix TSO feature default disablement
+    4852da727c net/virtio: restore some optimisations with AVX512
+    58d1b856be net/vhost: fix access to freed memory
+    24dabb9d25 net/cxgbe: fix Tx queue stuck with mbuf chain coalescing
+    6627ee48b5 net/cxgbe: fix port ID in Rx mbuf
+    8cf194f699 net/bonding: fix RSS key config with extended key length
+    3192737d10 net/nfp: remove unneeded header inclusion
+    8ab93b06bc net/netvsc: fix hot adding multiple VF PCI devices
+    03e1864411 test/mem: disable ASan when accessing unallocated memory
+    e9b46ab763 net/mlx5: fix LRO configuration in drop Rx queue
+    d5fdf0a2ba net/mlx5: fix LRO validation in Rx setup
+    28ecf49a60 examples/l2fwd-crypto: fix stats refresh rate
+    aeca5959dd common/dpaax: fix short MAC-I IV calculation for ZUC
+    5a9af71a6d crypto/dpaa2_sec: fix operation status for simple FD
+    5e3a3f48d1 crypto/dpaa2_sec: fix crypto operation pointer
+    4644779034 crypto/dpaa_sec: fix secondary process probing
+    15a3ae1a5f crypto/dpaa2_sec: fix chained FD length in raw datapath
+    bee2c296c8 crypto/dpaa_sec: fix chained FD length in raw datapath
+    86ba4e206e crypto/dpaa2_sec: fix buffer pool ID check
+    f72e482fec crypto/dpaa2_sec: fix fle buffer leak
+    8bad3a05f1 crypto/mlx5: fix login cleanup
+    be6637f158 security: fix SA lifetime comments
+    bb386a9f91 crypto/dpaa_sec: fix digest size
+    f343d3b4ed eal: fix C++ include for device event and DMA
+    de48c79f3b malloc: fix ASan handling for unmapped memory
+    804b2e64eb mem: skip attaching external memory in secondary process
+    65855b2d37 test/table: fix buffer overflow on lpm entry
+    c7e0471948 net/mlx5: fix Rx/Tx stats concurrency
+    b0e6a9c183 net/mlx5: fix GTP handling in header modify action
+    b3896dba13 net/mlx5: restrict Rx queue array access to boundary
+    c08c6247f2 net/mlx5: fix counter in non-termination meter
+    99ba358268 net/mlx5: fix probing with secondary bonding member
+    1430ccb1db net/mlx5: fix Tx when inlining is impossible
+    72691359fa common/mlx5: fix memory region range calculation
+    550f0d8288 net/netvsc: fix calculation of checksums based on mbuf flag
+    21edf23c6d net/ice: fix raw flow input pattern parsing
+    aedf24edbb net/ice: refactor parser usage
+    bb6683a89f net/ice: add missing Tx burst mode name
+    91355ad5b2 net/i40e: populate error in flow director parser
+    8ae457cbf5 net/ice: improve performance of Rx timestamp offload
+    a25197930d test/bpf: skip test if libpcap is unavailable
+    6da5f268db examples/bond: fix invalid use of trylock
+    9c267cbd10 net/dpaa2: fix dpdmux default interface
+    d8898f0763 eal/windows: add missing C++ include guards
+    fad1dbc0c5 eal/windows: fix data race when creating threads
+    95e04d4866 doc: fix release note typo
+    592c7bf714 net/af_xdp: make compatible with libbpf >= 0.7.0
+    6721fb14eb net/af_xdp: use libxdp if available
+    fcd039e466 version: 21.11.1
+    2130012318 net/cnxk: fix build with optimization
+    9518bcf700 net/mlx5: fix flex item availability
+    05aa560efc version: 21.11.1-rc1
+    b68dbab7c8 Revert "net/mlx5: fix flex item availability"
+    bb5ce0625c crypto/ipsec_mb: fix GMAC parameters setting
+    cef6bb00ce crypto/ipsec_mb: fix length and offset settings
+    be2edca509 Revert "crypto/ipsec_mb: fix length and offset settings"
+    dec4b1b89e raw/ifpga: fix build with optimization
+    4586b6b8c2 doc: fix telemetry example in cryptodev guide
+    2740b29e48 doc: fix missing note on UIO module in Linux guide
+    713a4bc48c doc: replace characters for (R) symbol in Linux guide
+    a50b228d2d net/mlx5: fix CPU socket ID for Rx queue creation
+    8db2867c79 net/mlx5: fix port matching in sample flow rule
+    7c12be128c eventdev: fix clang C++ include
+    4f263532d0 cryptodev: fix clang C++ include
+    ec8a6dc2e6 compressdev: fix missing space in log macro
+    37232971b0 eal/freebsd: add missing C++ include guards
+    8320df4804 examples/l3fwd: fix buffer overflow in Tx
+    3313fe0301 app/testpmd: fix flow rule with flex input link
+    39d09d7155 app/testpmd: fix GTP header parsing in checksum engine
+    514668e230 app/testpmd: fix show RSS RETA on Windows
+    3a3d4d3332 app/regex: fix number of matches
+    f2a457c605 bpf: fix build with some libpcap version on FreeBSD
+    e84b43b5eb crypto/ipsec_mb: fix GCM requested digest length
+    8c7bebaa38 net/af_xdp: fix custom program loading with multiple queues
+    88dbe7c555 net/qede: fix maximum Rx packet length
+    09891782a4 net/qede: fix Rx bulk
+    506f3198ab net/qede: fix Tx completion
+    268985d32e doc: fix modify field action description for mlx5
+    59a419a416 net/mlx5: fix implicit tag insertion with sample action
+    42cf1850e2 net/mlx5: forbid multiple ASO actions in a single rule
+    dd859e1797 net/mlx5: fix sample flow action on trusted device
+    7680d1d321 net/mlx5: fix VLAN push action validation
+    691ff0b6db net/mlx5: fix NIC egress flow mismatch in switchdev mode
+    6cb68162e4 vhost: fix FD leak with inflight messages
+    4c40d30d2b vhost: fix queue number check when setting inflight FD
+    6ae8ba6b7a build: suppress rte_crypto_asym_op abi check
+    efd091d541 devtools: fix symbols check
+    026470bafa build: hide local symbols in shared libraries
+    89f14be564 common/mlx5: consider local functions as internal
+    6e7f8939f2 regexdev: fix section attribute of symbols
+    6472c2d476 net/iavf: fix potential out-of-bounds access
+    67191a9cb3 net/sfc: reduce log level of tunnel restore info error
+    ee836190a1 net/mlx5: fix meter creation default state
+    a17cea76b7 net/mlx5: fix configuration without Rx queue
+    d31463e0b2 net/mlx5: fix MPLS/GRE Verbs spec ordering
+    48fe9efaf2 net/mlx5: fix flex item availability
+    3bd5cf393d net/mlx5: fix meter policy creation assert
+    c77572d2a1 net/mlx5: remove unused reference counter
+    0036f3941e net/mlx5: fix modify port action validation
+    eebfb74c51 net/mlx5: fix shared RSS destroy
+    5d3ade99bd net/mlx5: fix next protocol RSS expansion
+    4500ec704f net/mlx5: fix inet IPIP protocol type
+    9bdcba122b net/bnxt: fix null dereference in session cleanup
+    4aadf56c66 ethdev: fix doxygen comments for device info struct
+    0c7cbe52f7 build: fix build on FreeBSD with Meson 0.61.1
+    dab4a96be2 devtools: remove event/dlb exception in ABI check
+    4fa43b7bff vhost: fix physical address mapping
+    f03f4b98c9 net/cnxk: fix Rx/Tx function update
+    3a5e1aaee4 net/mlx5: fix initial link status detection
+    295f5022f6 net/mlx5: fix link status change detection
+    be828a8eaf common/mlx5: add Netlink event helpers
+    7214354c52 examples/kni: add missing trailing newline in log
+    f5ba75eb9a examples/l3fwd: make Rx and Tx queue size configurable
+    ef48f23bfd examples/l3fwd: share queue size variables
+    6bf720d7d7 examples/flow_classify: fix failure message
+    2719708908 examples/distributor: reduce Tx queue number to 1
+    7aa3bbafd6 app/dumpcap: check for failure to set promiscuous
+    955a6afc6f test/bpf: skip dump if conversion fails
+    e71f3dc931 pcapng: handle failure of link status query
+    b8222349eb app/pdump: abort on multi-core capture limit
+    8adbf6df92 raw/ifpga: fix monitor thread
+    69da51b405 raw/ifpga: fix interrupt handle allocation
+    d4536cf86a raw/ifpga: fix variable initialization in probing
+    186250df3d gpu/cuda: fix dependency loading path
+    af8ffbba79 sched: remove useless malloc in PIE data init
+    8ebcaf23cb eal/linux: fix device monitor stop return
+    89d84883b0 examples/vhost: fix launch with physical port
+    7a5659dd94 vhost: fix linker script syntax
+    b7f396be62 net/ice: fix Tx offload path choice
+    28acfe550d common/cnxk: fix mbuf data offset for VF
+    51af57d005 common/cnxk: fix bitmap usage for TM
+    43dec151be net/iavf: fix AES-GMAC IV size
+    f314e6acfb net/mlx5: fix flex item header length translation
+    1926a8d8c5 net/mlx5: fix matcher priority with ICMP or ICMPv6
+    1f5aede9bd net/mlx5: reduce flex item flow handle size
+    279cc42d3b net/mlx5: fix GRE item translation in Verbs
+    39cba36e63 doc: fix typos and punctuation in flow API guide
+    41510092eb net/kni: fix config initialization
+    6090ee620d net/txgbe: fix queue statistics mapping
+    8a301f166c net/mlx5: fix check in count action validation
+    c46eaf6f4c net/mlx5: fix shared counter flag in flow validation
+    de3ad851ca net/mlx5: fix destroying empty matchers list
+    6468addfe3 net/mlx5: fix indexed pool fetch overlap
+    ae071e1851 net/iavf: fix function pointer in multi-process
+    b82b6ed613 net/iavf: support NAT-T / UDP encapsulation
+    5f275a0312 net/ixgbe: fix FSP check for X550EM devices
+    aa6f865e7e net/hns3: increase time waiting for PF reset completion
+    94420985c7 net/hns3: fix VF RSS TC mode entry
+    dc3cb423f5 net/hns3: fix RSS TC mode entry
+    772292049b net/hns3: remove duplicate macro definition
+    24939fcc13 compressdev: fix socket ID type
+    30fea0f0a6 app/compress-perf: fix number of queue pairs to setup
+    b2b15ab556 app/compress-perf: fix socket ID type during init
+    8ace98122a compress/mlx5: support out-of-space status
+    d386e37612 app/compress-perf: optimize operations pool allocation
+    c65e648405 app/compress-perf: fix cycle count operations allocation
+    9bb7a3f9df event/dlb2: add shift value check in sparse dequeue
+    d2b19d6346 event/cnxk: fix Rx adapter config check
+    dd8c73295c event/cnxk: fix sub-event clearing mask length
+    170c124998 kni: fix freeing order in device release
+    0617d94900 bus/pci: assign driver pointer before mapping
+    099aba7265 devargs: fix crash with uninitialized parsing
+    dcf545fce1 eal/linux: fix illegal memory access in uevent handler
+    38c59b06b0 distributor: fix potential overflow
+    77b6873f73 efd: fix uninitialized structure
+    b017e1159f test/efd: fix sockets mask size
+    e9100a0196 doc: add CUDA driver features
+    9703132099 app/testpmd: fix build without drivers
+    158012beee app/testpmd: fix raw encap of GENEVE option
+    8c4ce4d7ff net/i40e: fix unintentional integer overflow
+    3334722c21 net/cnxk: fix RSS RETA table update
+    b8bfbcd1a0 net/cnxk: fix build with GCC 12
+    c957e1063b net/cnxk: fix inline IPsec security error handling
+    ee97d867e7 net/cnxk: register callback early to handle initial packets
+    c5124d0ea8 net/cnxk: fix inline device RQ tag mask
+    283f54ba9d mempool/cnxk: fix batch allocation failure path
+    ba9d00afac doc: correct name of BlueField-2 in mlx5 guide
+    9385e97741 doc: replace broken links in mlx guides
+    239796f3dd doc: remove obsolete vector Tx explanations from mlx5 guide
+    2007577b29 net/mlx5: fix E-Switch manager vport ID
+    a600672d1a net/mlx5: fix entry in shared Rx queues list
+    7f982e1320 net/mlx5: fix meter sub-policy creation
+    7b5ea7efc3 net/mlx5: remove unused function
+    aff5b2ee60 net/mlx5: set flow error for hash list create
+    d2e99680f9 common/mlx5: fix queue pair ack timeout configuration
+    233c5aa3e7 net/ena: fix checksum flag for L4
+    f5eff853e4 net/ena: check memory BAR before initializing LLQ
+    70c3e891d0 net/ena: fix meta descriptor DF flag setup
+    867dd857f4 net/ena: fix reset reason being overwritten
+    c443512e3d net/ena: skip timer if reset is triggered
+    4e9e9e29c4 net/ena: remove unused offload variables
+    e63e5c79ce net/ena: remove unused enumeration
+    399b489328 net/txgbe: fix debug logs
+    a8be311dd1 net/ngbe: fix debug logs
+    c1cf1a9735 app/testpmd: fix GENEVE parsing in checksum mode
+    036993974f net/mlx5: fix errno update in shared context creation
+    b6b1c3ad5d net/mlx5: fix ASO CT object release
+    49257a9394 net/mlx5: fix ineffective metadata argument adjustment
+    50f3a03f75 net/mlx5: fix sibling device config check
+    e68285796c net/i40e: enable maximum frame size at port level
+    15ff989ca8 net/iavf: fix segmentation offload buffer size
+    dbb1c53725 net/iavf: fix segmentation offload condition
+    d75be6c28d net/ice: fix overwriting of LSE bit by DCF
+    a628e2bf19 net/af_xdp: ensure socket is deleted on Rx queue setup error
+    ae2f030ad1 net/sfc: fix memory allocation size for cache
+    ea21c6bf4e net/sfc: fix flow tunnel support detection
+    a58ae9af98 common/sfc_efx/base: add missing handler for 1-byte fields
+    4874f1d005 common/sfc_efx/base: fix recirculation ID set in outer rules
+    e4b43ee28c net/cnxk: fix uninitialized local variable
+    f0cfb0e3d1 common/cnxk: fix uninitialized pointer read
+    2f61027cda common/cnxk fix unintended sign extension
+    7eeb8d37ed common/cnxk: add missing checks of return values
+    dd1851c1de net/af_xdp: add missing trailing newline in logs
+    6a9b64907e common/cnxk: fix NPC key extraction validation
+    87b639b4ed vhost: fix unsafe vring addresses modifications
+    01e3dee29c vhost: fix field naming in guest page struct
+    e09a0094a6 common/cnxk: fix base rule merge
+    1751e87f51 common/cnxk: fix log level during MCAM allocation
+    d91869302f common/cnxk: fix flow deletion
+    450ee57e5f app/testpmd: check starting port is not in bonding
+    387187932f net/bonding: fix slaves initializing on MTU setting
+    c93302dd4f net/cnxk: fix mbuf data length
+    116bfaa14e ethdev: fix MAC address in telemetry device info
+    a42a874599 net/iavf: reset security context pointer on stop
+    496747d389 net/txgbe: reset security context pointer on close
+    223010f1da net/ixgbe: reset security context pointer on close
+    967cb49748 net/nfb: fix multicast/promiscuous mode switching
+    afe8e58fed net/nfb: fix array indexes in deinit functions
+    daf06c45e8 crypto/ipsec_mb: fix length and offset settings
+    cfa7703c8e crypto/ipsec_mb: fix ZUC operation overwrite
+    1170e24b20 crypto/ipsec_mb: fix ZUC authentication verify
+    bbc596578a crypto/ipsec_mb: check missing operation types
+    9c67637c8c crypto/virtio: fix out-of-bounds access
+    301ee2f378 baseband/acc100: avoid out-of-bounds access
+    79247ddc0d examples/l2fwd-crypto: fix port mask overflow
+    5772c7b32e doc: fix FIPS guide
+    ad76dc4e91 examples/ipsec-secgw: fix buffer freeing in vector mode
+    f092922c36 cryptodev: fix RSA key type name
+    c8bcbe8b68 crypto/ipsec_mb: remove useless check
+    fec66e64e5 event/cnxk: fix uninitialized local variables
+    52d824d106 event/cnxk: fix variables casting
+    9a552423fd event/dlb2: poll HW CQ inflights before mapping queue
+    720fb431b4 event/dlb2: update rolling mask used for dequeue
+    7d7a9f161d eventdev/eth_rx: fix queue config query
+    529f3a735e eventdev/eth_rx: fix parameters parsing memory leak
+    d33bb6bd28 examples/qos_sched: fix core mask overflow
+    9970eab8c4 doc: improve configuration examples in idxd guide
+    b254386fad dma/idxd: configure maximum batch size to high value
+    ebc0188ccb test/dma: fix missing checks for device capacity
+    c6aea57d99 dma/hisilicon: use common PCI device naming
+    56d6e5b091 ethdev: fix cast for C++ compatibility
+    5d75eb0924 cryptodev: add missing C++ guards
+    c02f5bcfe9 bpf: add missing C++ guards
+    362921a8e3 vhost: add missing C++ guards
+    fb37e2b3ae kni: add missing C++ guards
+    e4dbb6873a eventdev: add missing C++ guards
+    3d1746c9ac compressdev: add missing C++ guards
+    6fca954338 acl: add missing C++ guards
+    447210e07c metrics: add missing C++ guards
+    b99a45df05 ethdev: add missing C++ guards
+    e7291176c2 telemetry: add missing C++ guards
+    b2f85a808a eal: add missing C++ guards
+    81c40b01d0 dmadev: add missing header include
+    692ae335d6 eventdev/eth_tx: fix queue add error code
+    adfebc59b5 pipeline: fix table state memory allocation
+    1e8aa23aba pipeline: fix annotation checks
+    13ddcf9dee raw/ntb: clear all valid doorbell bits on init
+    0627e93c26 crypto/dpaax_sec: fix auth/cipher xform chain checks
+    0fd24703c6 crypto/cnxk: fix update of number of descriptors
+    2630bff5a4 compress/octeontx: fix null pointer dereference
+    6f9d8df3d1 crypto/qat: fix GEN4 AEAD job in raw data path
+    98ec92641b crypto/ipsec_mb: fix buffer overrun
+    13aab9f493 crypto/ipsec_mb: fix premature dereference
+    98ece68514 test/crypto: fix out-of-place SGL in raw datapath
+    4d5d4d7abc examples/ipsec-secgw: fix offload flag used for TSO IPv6
+    abfad6b59c net/txgbe: fix KR auto-negotiation
+    42960ce408 net/txgbe: fix link up and down
+    59691181a3 net/ngbe: fix packet statistics
+    b9c20ea8f0 net/ngbe: fix Tx hang on queue disable
+    3698c17f42 net/ngbe: fix missed link interrupt
+    cacbd7e4f7 net/ngbe: fix Rx by initializing packet buffer early
+    863d787942 net/bnxt: fix ring calculation for representors
+    e53da2ffbe net/bnxt: set HW coalescing parameters
+    6c8ff52958 net/mlx5: fix inline length for multi-segment TSO
+    3831da6c7b net/mlx5: fix meter capabilities reporting
+    6022babd1d net/mlx5: fix committed bucket size
+    c9a140e15b net/mlx5: fix metadata endianness in modify field action
+    8f821b1135 vdpa/sfc: fix null dereference during removal
+    5dfd488d82 vdpa/sfc: fix null dereference during config
+    7537c99618 net/ice: fix build with 16-byte Rx descriptor
+    607d564355 net/ice: fix pattern check in flow director
+    d422a9cdae net/ice/base: add profile validation on switch filter
+    5dc74f1348 net/iavf: count continuous DD bits for Arm in flex Rx
+    68522027e3 net/iavf: count continuous DD bits for Arm
+    f746bb72ba net/iavf: fix null pointer dereference
+    b832a197fa net/sfc: demand Tx fast free offload on EF10 simple datapath
+    915b0b0b9f net/sfc: do not push fast free offload to default TxQ config
+    6fdd1953b0 ethdev: remove unnecessary null check
+    851b597291 net: fix L2TPv2 common header
+    d594afc792 net/memif: remove pointer deference before null check
+    273bacf2a8 config: align mempool elements to 128 bytes on CN10K
+    e183e43e2b vfio: cleanup the multiprocess sync handle
+    c32322e508 ipc: end multiprocess thread during cleanup
+    6e1bc26cde test/mbuf: fix mbuf data content check
+    472f790f95 app/fib: fix division by zero
+    1058b2c369 mem: check allocation in dynamic hugepage init
+    c59904ed03 vhost: fix C++ include
+    6afaa0f3d7 table: fix C++ include
+    91b9d6cd34 ipsec: fix C++ include
+    4f328f8e2b graph: fix C++ include
+    3668e54828 eventdev: fix C++ include
+    1fdfd87f14 eal: fix C++ include
+    72334ceaf1 config/arm: add values for native armv7
+    02a96ad251 stack: fix stubs header export
+    6b06137c98 regex/mlx5: fix memory allocation check
+    72487940d1 net/virtio: fix slots number when indirect feature on
+    160769f648 vhost: fix guest to host physical address mapping
+    eaf935f63c net/sfc: fix lock releases
+    ce413e1922 app/testpmd: fix stack overflow for EEPROM display
+    ac180f4d26 net/tap: fix to populate FDs in secondary process
+    721d0bbd16 ethdev: add internal function to device struct from name
+    605d1de0d3 app/testpmd: fix bonding mode set
+    7b71bc2d00 net/bonding: fix reference count on mbufs
+    26f2cc6490 net/bonding: fix promiscuous and allmulticast state
+    30dcde8467 net/ixgbe: check filter init failure
+    e72696baa4 net/hns3: delete duplicated RSS type
+    2ae91ac660 net/hns3: fix operating queue when TCAM table is invalid
+    2b7587ea99 net/hns3: fix double decrement of secondary count
+    10342b22ae net/hns3: fix insecure way to query MAC statistics
+    9b1f69f906 net/hns3: fix RSS key with null
+    d7033074e0 net/hns3: fix max packet size rollback in PF
+    2c27da1e51 net/bonding: fix MTU set for slaves
+    9ac1343c4d net/dpaa2: fix null pointer dereference
+    90386f428c net/enic: fix dereference before null check
+    57b2aa0265 test/mem: fix error check
+    32cb4f09ff eal/windows: fix error code for not supported API
+    5a9f8c2ba4 ring: fix overflow in memory size calculation
+    8b45a1dea3 ring: fix error code when creating ring
+    63cb4ae54f doc: fix KNI PMD name typo
+    4f140c9a9a build: remove deprecated Meson functions
+    aa8ad3e48c build: fix warnings when running external commands
+    1e770ae599 pflock: fix header file installation
+    86f7ed09ae doc: update matching versions in ice guide
+    37d27abc59 net/mlx5: reject jump to root table
+    bc3452d45d common/mlx5: fix probing failure code
+    99f5cd0dc3 net/mlx5: fix mark enabling for Rx
+    d157628041 common/mlx5: fix MR lookup for non-contiguous mempool
+    4c4c0cf459 net/virtio: fix uninitialized RSS key
+    0d2ddde419 net/virtio-user: check FD flags getting failure
+    4210bb89d8 net/virtio-user: fix resource leak on probing failure
+    efc7ea9dd7 vdpa/ifc: fix log info mismatch
+    7c58dbf159 net/virtio: fix Tx queue 0 overriden by queue 128
+    f05bbce185 vdpa/mlx5: workaround queue stop with traffic
+    ad51b31a30 net/hns3: fix using enum as boolean
+    9d6db3c3ad net/nfp: free HW ring memzone on queue release
+    961922eb71 net/bonding: fix RSS with early configure
+    6492c9875d net/hns3: fix vector Rx/Tx when PTP enabled
+    9c10b251a1 net/hns3: fix mailbox wait time
+    e073f410fb net/hns3: fix Rx/Tx functions update
+    581e547a6f net/memif: remove unnecessary Rx interrupt stub
+    5de680a494 raw/ifpga/base: fix port feature ID
+    0f8f337740 net/bnxt: fix VF resource allocation strategy
+    f70203b5c0 net/bnxt: fix memzone allocation per VNIC
+    e44c18821c net/bnxt: handle ring cleanup in case of error
+    a04034b131 net/bnxt: fix check for autoneg enablement
+    72db0cca69 raw/ifpga: fix thread closing
+    7c682d5c05 net/ice: fix link up when starting device
+    b38f8855d6 net/ice: fix mbuf offload flag for Rx timestamp
+    81597d6e20 raw/ifpga/base: fix SPI transaction
+    4599a6179a net/sfc: validate queue span when parsing flow action RSS
+    c935f2719d ethdev: fix Rx queue telemetry memory leak on failure
+    3fd3c3b3b0 common/cnxk: fix error checking
+    2253ed93c3 common/cnxk: fix uninitialized variables
+    38f3a00894 common/cnxk: fix null pointer dereferences
+    1349f9e568 common/cnxk: always use single interrupt ID with NIX
+    cfcdf00068 common/cnxk: reset stale values on error debug registers
+    285183e606 common/cnxk: fix byte order of frag sizes and infos
+    5deff57b9d common/cnxk: fix shift offset for TL3 length disable
+    41569f9deb net/nfp: remove useless range checks
+    23c2f68598 net/nfp: remove duplicated check when setting MAC address
+    85d9e45c32 net/mlx5: fix MPRQ WQE size assertion
+    a9bc2a46c2 net/mlx5: fix maximum packet headers size for TSO
+    70211750cc net/bnxt: restore dependency on kernel modules
+    279f0d75c0 net/dpaa2: fix timestamping for IEEE1588
+    c96ea2bf1c net/dpaa2: fix unregistering interrupt handler
+    699c30f853 net/cxgbe: fix dangling pointer by mailbox access rework
+    ef94549efe app/testpmd: fix external buffer allocation
+    6d5f3984dc app/testpmd: fix dereference before null check
+    83774f8a67 net/bonding: fix mode type mismatch
+    40a4d0544b net/af_xdp: fix build with -Wunused-function
+    181ddedb1a net/axgbe: use PCI root complex device to distinguish device
+    bb1854bc69 app/testpmd: fix Tx scheduling interval
+    96b92d045f net/bonding: fix offloading configuration
+    661587eaae net/cnxk: fix promiscuous mode in multicast enable flow
+    26a3e3e7d3 net/bnxt: check VF representor pointer before access
+    2f9df1413c net/bnxt: fix xstats query
+    b74a60df81 net/bnxt: fix crash by validating pointer
+    2e31b779e8 net/bnxt: fix PAM4 mask setting
+    6c57090c01 net/bnxt: fix ring teardown
+    f2c08d53a1 net/bnxt: fix handling of VF configuration change
+    377a9a8197 net/bnxt: get maximum supported multicast filters count
+    b0fe5e2fa9 net/bnxt: fix flow create when RSS is disabled
+    6b722d7b37 net/bnxt: add null check for mark table
+    a31a8b6a97 net/bnxt: set fast-path pointers only if recovery succeeds
+    6b7c0ce0ce net/bnxt: cap maximum number of unicast MAC addresses
+    a9ea24c80c net/bnxt: fix restoring VLAN filtering after recovery
+    56f92b77e7 net/bnxt: restore RSS configuration after reset recovery
+    880ed79159 net/bnxt: fix queue stop operation
+    f4d1e64dec net/bnxt: fix multicast MAC restore during reset recovery
+    5e35fae222 net/bnxt: fix multicast address set
+    c59f883c24 net/bnxt: fix xstats names query overrun
+    9d1da3652a net/mlx5: relax headroom assertion
+    ab06c7bf9b net/mlx5: fix GCC uninitialized variable warning
+    379079d6cc net/mlx5: fix GRE protocol type translation for Verbs
+    d8d54171bd net/mlx5: fix RSS expansion with explicit next protocol
+    032e27c0b2 net/mlx5: fix assertion on flags set in packet mbuf
+    59f8d27b5f common/mlx5: fix missing validation in devargs parsing
+    407b3ae746 net/mlx5: fix memory socket selection in ASO management
+    751bca90b1 common/mlx5: fix error handling in multi-class probe
+    0832935bf5 net/ixgbe: add vector Rx parameter check
+    1eef1cf7d3 net/ice: fix Tx checksum offload
+    76a729e7ed net/ice: track DCF state of PF
+    85e84c5930 net/iavf: remove git residue symbol
+    3380c428a5 net/ice: fix Tx checksum offload capability
+    a9ff22fc59 net/ice: fix pattern check for flow director parser
+    f1339fd8ec net/qede: fix redundant condition in debug code
+    9ff875ecff common/cnxk: fix nibble parsing order when dumping MCAM
+    23b8e0a337 net/mlx5: fix MPRQ stride devargs adjustment
+    c58aaabede net/mlx5: improve stride parameter names
+    f873364dfe common/mlx5: add minimum WQE size for striding RQ
+    0422d79548 net/mlx5: fix modify field MAC address offset
+    d021a2f9b7 dma/cnxk: fix installing internal headers
+    bb6b3ec4e9 devtools: fix comment detection in forbidden token check
+    19aefaf2cb examples/ipsec-secgw: fix default flow rule creation
+    8c4f0e9e73 examples/ipsec-secgw: fix eventdev start sequence
+    5831db3bab crypto/ipsec_mb: fix tainted data for session
+    4b8475c98e crypto/ipsec_mb: fix queue cleanup null pointer dereference
+    07ee507051 crypto/ipsec_mb: fix queue setup null pointer dereference
+    7823f35581 crypto/cnxk: fix extend tail calculation
+    5977020bc3 crypto/cnxk: fix inflight count calculation
+    99d6741b64 crypto/cnxk: enable allocated queues only
+    de6b483f38 common/cnxk: fix reset of fields
+    7ee503d33f common/cnxk: add workaround for vWQE flush
+    e3b9a8c32f event/cnxk: fix QoS devargs parsing
+    87646d04a1 examples/l3fwd: fix Rx burst size for event mode
+    8d0ffec0cf eventdev/eth_rx: fix missing internal port checks
+    393d0580db doc: fix dlb2 guide
+    19c6e95cd6 eal/linux: log hugepage create errors with filename
+    24e496918b config: add arch define for Arm
+    a8dd54379d gpu/cuda: fix memory list cleanup
+    8e8fe373c0 dma/idxd: fix wrap-around in burst capacity calculation
+    e0f7faeba1 dma/idxd: fix paths to driver sysfs directory
+    cb7d9a39cb dma/idxd: fix burst capacity calculation
+    572305874a bus/ifpga: remove useless check while browsing devices
+    c30f1ec97d doc: remove dependency on findutils on FreeBSD
+    cb2e09a4da buildtools: fix AVX512 check for Python 3.5
+    3b511fdf21 maintainers: update for stable branches
+    b3122779a5 doc: replace deprecated distutils version parsing
+    2be1e5158e fix spelling in comments and strings
+
+
+* Wed Aug 31 2022 Open vSwitch CI <ovs-ci@redhat.com> - 2.17.0-48
+- Merging upstream branch-2.17 [RH git: ed428149e4]
+    Commit list:
+    dfc3e65c81 raft: Fix unnecessary periodic compactions.
+    6f322ccf8a netdev-offload-tc: Parse tunnel options only for geneve ports.
+
+
+* Mon Aug 29 2022 Open vSwitch CI <ovs-ci@redhat.com> - 2.17.0-47
+- Merging upstream branch-2.17 [RH git: 080c941dff]
+    Commit list:
+    a9f10a2bdc netdev-offload-tc: Add missing handling of the tunnel source port.
+    ec2e967c1d netdev-offload-tc: Fix ignoring unknown tunnel keys.
+    686984d9a0 netdev-offload-tc: Use masks instead of keys while parsing tunnel attributes.
+    92c072d944 netdev-offload-tc: Explicitly handle mask for the tunnel destination port.
+    87f191a3a3 netdev-offload-tc: Fix the mask for tunnel metadata length.
+
+
+* Tue Aug 16 2022 Open vSwitch CI <ovs-ci@redhat.com> - 2.17.0-46
+- Merging upstream branch-2.17 [RH git: 020b9deea5]
+    Commit list:
+    cadcea6fea releases: Mark 2.17 as a new LTS release.
+
+
+* Mon Aug 15 2022 Open vSwitch CI <ovs-ci@redhat.com> - 2.17.0-45
+- Merging upstream branch-2.17 [RH git: 824e124b3b]
+    Commit list:
+    8a1b734480 handlers: Fix handlers mapping.
+    713072fdac handlers: Create additional handler threads when using CPU isolation.
+
+
+* Fri Aug 12 2022 Open vSwitch CI <ovs-ci@redhat.com> - 2.17.0-44
+- Merging upstream branch-2.17 [RH git: 7e55c5405a]
+    Commit list:
+    84a8910ffe packets: Fix misaligned access to ip6_hdr.
+    fe27e0c884 python: Do not send non-zero flag for a SSL socket. (#2115035)
+    729a872f19 dpif-netdev: Simplify AVX512 build time checks to enhance readability.
+
+
+* Wed Aug 10 2022 Timothy Redaelli <tredaelli@redhat.com> - 2.17.0-43
+- pkgtool: keep %{?dist} before added bz string [RH git: 0c88379419]
+    Signed-off-by: Timothy Redaelli <tredaelli@redhat.com>
+
+
+* Tue Aug 09 2022 Open vSwitch CI <ovs-ci@redhat.com> - 2.17.0-42
+- Merging upstream branch-2.17 [RH git: 690a14282a]
+    Commit list:
+    1b566f8b80 github: Move CI to ubuntu 20.04 base image.
+
+
+* Mon Aug 08 2022 Open vSwitch CI <ovs-ci@redhat.com> - 2.17.0-41
+- Merging upstream branch-2.17 [RH git: d208bfac1d]
+    Commit list:
+    86725abe10 netdev-offload-tc: Disable offload of IPv6 fragments.
+    2276daf889 ovs-save: Use right OpenFlow version for add-tlv-map.
+
+
+* Mon Aug 08 2022 Open vSwitch CI <ovs-ci@redhat.com> - 2.17.0-40
+- Merging upstream branch-2.17 [RH git: e21f40b07e]
+    Commit list:
+    c353e757d7 system-traffic: Fix IPv4 fragmentation test sequence for check-kernel.
+    6f54dc134a system-traffic: Fix incorrect neigh entry in ipv6 header modification test.
+    7848ae6ffb system-traffic: Don't run IPv6 header modification test on kernels < 5.19.
+
+
+* Fri Aug 05 2022 Open vSwitch CI <ovs-ci@redhat.com> - 2.17.0-39
+- Merging upstream branch-2.17 [RH git: 7a3929ad1f]
+    Commit list:
+    399185865e netdev-linux: set correct action for packets that passed policer
+
+
+* Thu Aug 04 2022 Open vSwitch CI <ovs-ci@redhat.com> - 2.17.0-38
+- Merging upstream branch-2.17 [RH git: 862609bde4]
+    Commit list:
+    cda60c8558 python: Fix E275 missing whitespace after keyword.
+    3678fb544d tc: Use sparse hex dump while printing inconsistencies.
+    03a0ec82b7 netdev-offload-tc: Print unused mask bits on failure.
+    5b8453a44e dynamic-string: Add function for a sparse hex dump.
+    8d7cb1daf4 dpif-netlink: Fix incorrect bit shift in compat mode.
+    d1cec26869 python: Use setuptools instead of distutils.
+    8d6ecb2592 packets: Re-calculate IPv6 checksum only for first frag upon modify.
+
+
+* Fri Jul 29 2022 Open vSwitch CI <ovs-ci@redhat.com> - 2.17.0-37
+- Merging upstream branch-2.17 [RH git: 083e7533dc]
+    Commit list:
+    26dbc822d3 test-ovsdb: Fix false-positive leaks from LeakSanitizer.
+    6eab10cf2c m4: Update ax_func_posix_memalign to the latest version.
+    2f51bfd23b m4: Replace obsolete AC_HELP_STRING with AS_HELP_STRING.
+    8ad325aab5 libopenvswitch.pc: Add missing libs for a static build.
+    b64ff3f480 rhel: Stop installing internal headers.
+    b63bbf2dba python-c-ext: Handle initialization failures.
+    4ad02ad047 netdev-linux: Do not touch LAG members if master is not attached to OVS.
+    e6dcd07bc2 netdev: Clear auto_classified if netdev reopened with the type specified.
+
+
+* Mon Jul 25 2022 Open vSwitch CI <ovs-ci@redhat.com> - 2.17.0-36
+- Merging upstream branch-2.17 [RH git: 73fb18f95f]
+    Commit list:
+    1eedf45e81 system-traffic: Properly stop dangling ping after geneve test.
+    fb8e34bdba conntrack: Fix conntrack multiple new state.
+    af37f41188 python-c-ext: Fix a couple of build warnings.
+    b7d9f76100 python-c-ext: Remove Python 2 support.
+
+
+* Tue Jul 19 2022 Open vSwitch CI <ovs-ci@redhat.com> - 2.17.0-35
+- Merging upstream branch-2.17 [RH git: 664435a0c0]
+    Commit list:
+    02fb4bfb87 netdev-offload-dpdk: Setting RSS hash types in RSS action.
+    8e8fcf7bda lib: Print nw_frag in flow key.
+    29d8ce1adc ovsdb: Remove extra make target dependency for local-config.5.
+
+
+* Thu Jul 14 2022 Open vSwitch CI <ovs-ci@redhat.com> - 2.17.0-34
+- Merging upstream branch-2.17 [RH git: 43bbc204f0]
+    Commit list:
+    13ac0bc7c6 tc: Fix misaligned access while creating pedit actions.
+
+
+* Tue Jul 12 2022 Open vSwitch CI <ovs-ci@redhat.com> - 2.17.0-33
+- Merging upstream branch-2.17 [RH git: 8edacddc16]
+    Commit list:
+    2c85d737a4 utilities/bashcomp: Fix incorrect file mode.
+
+
+* Fri Jul 08 2022 Timothy Redaelli <tredaelli@redhat.com> - 2.17.0-32
+- Fix REPO_URL [RH git: 3c45153e77]
+
+
 * Fri Jul 01 2022 Open vSwitch CI <ovs-ci@redhat.com> - 2.17.0-31
 - Merging upstream branch-2.17 [RH git: f530505b5e]
     Commit list: