diff --git a/.openvswitch.metadata b/.openvswitch.metadata
new file mode 100644
index 0000000..c54e59a
--- /dev/null
+++ b/.openvswitch.metadata
@@ -0,0 +1,6 @@
+d34f96421a86004aa5d26ecf975edefd09f948b1  SOURCES/Pygments-1.4.tar.gz
+3a11f130c63b057532ca37fe49c8967d0cbae1d5  SOURCES/Sphinx-1.2.3.tar.gz
+002450621b33c5690060345b0aac25bc2426d675  SOURCES/docutils-0.12.tar.gz
+6fa486bc4c6af2bc692fe25f1d0e7737f05fa7d4  SOURCES/openvswitch-3.3.0.tar.gz
+8509a716f9f936526f64fb23f313c5a9baf2f123  SOURCES/pyelftools-0.27.tar.gz
+061198752d3d8b64d33113b7c8c1e272c973403d  SOURCES/dpdk-23.11.tar.xz
diff --git a/SOURCES/openvswitch-3.3.0.patch b/SOURCES/openvswitch-3.3.0.patch
new file mode 100644
index 0000000..ba72404
--- /dev/null
+++ b/SOURCES/openvswitch-3.3.0.patch
@@ -0,0 +1,38920 @@
+diff --git a/.ci/dpdk-build.sh b/.ci/dpdk-build.sh
+index 23f3166a54..698b9e1b14 100755
+--- a/.ci/dpdk-build.sh
++++ b/.ci/dpdk-build.sh
+@@ -40,7 +40,7 @@ function build_dpdk()
+     # any DPDK driver.
+     # check-dpdk unit tests requires testpmd and some net/ driver.
+     DPDK_OPTS="$DPDK_OPTS -Denable_apps=test-pmd"
+-    enable_drivers="net/null,net/af_xdp,net/tap,net/virtio,net/pcap"
++    enable_drivers="net/null,net/af_xdp,net/tap,net/virtio"
+     DPDK_OPTS="$DPDK_OPTS -Denable_drivers=$enable_drivers"
+     # OVS depends on the vhost library (and its dependencies).
+     # net/tap depends on the gso library.
+diff --git a/.ci/dpdk-prepare.sh b/.ci/dpdk-prepare.sh
+index f7e6215dda..4424f9eb97 100755
+--- a/.ci/dpdk-prepare.sh
++++ b/.ci/dpdk-prepare.sh
+@@ -8,4 +8,4 @@ set -ev
+ #     https://github.com/pypa/pip/issues/10655
+ pip3 install --disable-pip-version-check --user wheel
+ pip3 install --disable-pip-version-check --user pyelftools
+-pip3 install --user  'meson==0.53.2'
++pip3 install --user  'meson>=1.4,<1.5'
+diff --git a/.ci/linux-build.sh b/.ci/linux-build.sh
+index bf9d6241d5..702feeb3bb 100755
+--- a/.ci/linux-build.sh
++++ b/.ci/linux-build.sh
+@@ -25,7 +25,7 @@ function install_dpdk()
+     export PKG_CONFIG_PATH=$DPDK_LIB/pkgconfig/:$PKG_CONFIG_PATH
+ 
+     # Expose dpdk binaries.
+-    export PATH=$(pwd)/dpdk-dir/build/bin:$PATH
++    export PATH=$(pwd)/dpdk-dir/bin:$PATH
+ 
+     if [ ! -f "${VERSION_FILE}" ]; then
+         echo "Could not find DPDK in $DPDK_INSTALL_DIR"
+diff --git a/.ci/linux-prepare.sh b/.ci/linux-prepare.sh
+index 5028bdc442..5f8a1db6af 100755
+--- a/.ci/linux-prepare.sh
++++ b/.ci/linux-prepare.sh
+@@ -23,7 +23,7 @@ cd ..
+ #     https://github.com/pypa/pip/issues/10655
+ pip3 install --disable-pip-version-check --user wheel
+ pip3 install --disable-pip-version-check --user \
+-    flake8 'hacking>=3.0' netaddr pyparsing sarif-tools sphinx setuptools
++    flake8 netaddr pyparsing sarif-tools==2.0.0 sphinx setuptools
+ 
+ # Install python test dependencies
+ pip3 install -r python/test_requirements.txt
+diff --git a/.cirrus.yml b/.cirrus.yml
+index d8a9722809..d73154a971 100644
+--- a/.cirrus.yml
++++ b/.cirrus.yml
+@@ -2,8 +2,8 @@ freebsd_build_task:
+ 
+   freebsd_instance:
+     matrix:
+-      image_family: freebsd-13-2-snap
+-      image_family: freebsd-14-0-snap
++      image_family: freebsd-13-3-snap
++      image_family: freebsd-14-1-snap
+     cpu: 4
+     memory: 4G
+ 
+diff --git a/.github/workflows/build-and-test.yml b/.github/workflows/build-and-test.yml
+index fc75581486..fba2d16031 100644
+--- a/.github/workflows/build-and-test.yml
++++ b/.github/workflows/build-and-test.yml
+@@ -2,13 +2,16 @@ name: Build and Test
+ 
+ on: [push, pull_request]
+ 
++env:
++  python_default: 3.12
++
+ jobs:
+   build-dpdk:
+     env:
+-      dependencies: gcc libbpf-dev libnuma-dev libpcap-dev ninja-build pkgconf
++      dependencies: gcc libbpf-dev libnuma-dev ninja-build pkgconf
+       CC: gcc
+-      DPDK_GIT: https://dpdk.org/git/dpdk
+-      DPDK_VER: 23.11
++      DPDK_GIT: https://dpdk.org/git/dpdk-stable
++      DPDK_VER: 23.11.2
+     name: dpdk gcc
+     outputs:
+       dpdk_key: ${{ steps.gen_dpdk_key.outputs.key }}
+@@ -54,7 +57,7 @@ jobs:
+       if: steps.dpdk_cache.outputs.cache-hit != 'true'
+       uses: actions/setup-python@v5
+       with:
+-        python-version: '3.9'
++        python-version: ${{ env.python_default }}
+ 
+     - name: update APT cache
+       if: steps.dpdk_cache.outputs.cache-hit != 'true'
+@@ -76,8 +79,7 @@ jobs:
+     env:
+       dependencies: |
+         automake libtool gcc bc libjemalloc2 libjemalloc-dev libssl-dev \
+-        llvm-dev libnuma-dev libpcap-dev selinux-policy-dev libbpf-dev \
+-        lftp libreswan
++        llvm-dev libnuma-dev selinux-policy-dev libbpf-dev lftp libreswan
+       CC:          ${{ matrix.compiler }}
+       DPDK:        ${{ matrix.dpdk }}
+       DPDK_SHARED: ${{ matrix.dpdk_shared }}
+@@ -217,7 +219,7 @@ jobs:
+     - name: set up python
+       uses: actions/setup-python@v5
+       with:
+-        python-version: '3.9'
++        python-version: ${{ env.python_default }}
+ 
+     - name: cache
+       if:   matrix.dpdk != '' || matrix.dpdk_shared != ''
+@@ -268,8 +270,8 @@ jobs:
+     needs: build-dpdk
+     env:
+       dependencies: |
+-        automake bc clang-tools libbpf-dev libnuma-dev libpcap-dev \
+-        libunbound-dev libunwind-dev libssl-dev libtool llvm-dev
++        automake bc clang-tools libbpf-dev libnuma-dev libunbound-dev \
++        libunwind-dev libssl-dev libtool llvm-dev
+       CC:   clang
+       DPDK: dpdk
+       CLANG_ANALYZE: true
+@@ -346,7 +348,7 @@ jobs:
+     - name: set up python
+       uses: actions/setup-python@v5
+       with:
+-        python-version: '3.9'
++        python-version: ${{ env.python_default }}
+ 
+     - name: get cached dpdk-dir
+       uses: actions/cache/restore@v4
+@@ -399,7 +401,7 @@ jobs:
+     - name: set up python
+       uses: actions/setup-python@v5
+       with:
+-        python-version: '3.9'
++        python-version: ${{ env.python_default }}
+     - name: install dependencies
+       run:  brew install automake libtool
+     - name: prepare
+diff --git a/AUTHORS.rst b/AUTHORS.rst
+index aa9284fb16..fe4064ca71 100644
+--- a/AUTHORS.rst
++++ b/AUTHORS.rst
+@@ -245,6 +245,7 @@ Jon Kohler                         jon@nutanix.com
+ Jonathan Vestin                    jonavest@kau.se
+ Jorge Arturo Sauma Vargas          jorge.sauma@hpe.com
+ Jun Nakajima                       jun.nakajima@intel.com
++Jun Wang                           junwang01@cestc.cn
+ JunhanYan                          juyan@redhat.com
+ JunoZhu                            zhunatuzi@gmail.com
+ Justin Pettit                      jpettit@ovn.org
+@@ -588,6 +589,7 @@ David Evans                     davidjoshuaevans@gmail.com
+ David Palma                     palma@onesource.pt
+ David van Moolenbroek           dvmoolenbroek@aimvalley.nl
+ Derek Cormier                   derek.cormier@lab.ntt.co.jp
++Derrick Lim                     derrick.lim@rakuten.com
+ Dhaval Badiani                  dbadiani@vmware.com
+ DK Moon
+ Ding Zhi                        zhi.ding@6wind.com
+diff --git a/Documentation/conf.py b/Documentation/conf.py
+index 085ca2cd67..774eafdb76 100644
+--- a/Documentation/conf.py
++++ b/Documentation/conf.py
+@@ -12,6 +12,7 @@
+ # All configuration values have a default; values that are commented out
+ # serve to show the default.
+ 
++import os
+ import string
+ import sys
+ 
+@@ -108,6 +109,13 @@ html_logo = '_static/logo.png'
+ # so a file named "default.css" will overwrite the builtin "default.css".
+ html_static_path = ['_static']
+ 
++# Define the canonical URL for our domain configured on Read the Docs.
++html_baseurl = os.environ.get("READTHEDOCS_CANONICAL_URL", "")
++
++# Tell Jinja2 templates the build is running on Read the Docs.
++html_context = {}
++if os.environ.get("READTHEDOCS", "") == "True":
++    html_context["READTHEDOCS"] = True
+ 
+ # -- Options for manual page output ---------------------------------------
+ 
+diff --git a/Documentation/faq/releases.rst b/Documentation/faq/releases.rst
+index 49b987b610..13cc635d04 100644
+--- a/Documentation/faq/releases.rst
++++ b/Documentation/faq/releases.rst
+@@ -216,11 +216,11 @@ Q: What DPDK version does each Open vSwitch release work with?
+     2.14.x       19.11.13
+     2.15.x       20.11.6
+     2.16.x       20.11.6
+-    2.17.x       21.11.6
+-    3.0.x        21.11.6
+-    3.1.x        22.11.4
+-    3.2.x        22.11.4
+-    3.3.x        23.11
++    2.17.x       21.11.8
++    3.0.x        21.11.8
++    3.1.x        22.11.6
++    3.2.x        22.11.6
++    3.3.x        23.11.2
+     ============ ========
+ 
+ Q: Are all the DPDK releases that OVS versions work with maintained?
+diff --git a/Documentation/intro/install/dpdk.rst b/Documentation/intro/install/dpdk.rst
+index ad9bdf22c0..e22b5676df 100644
+--- a/Documentation/intro/install/dpdk.rst
++++ b/Documentation/intro/install/dpdk.rst
+@@ -42,7 +42,7 @@ Build requirements
+ In addition to the requirements described in :doc:`general`, building Open
+ vSwitch with DPDK will require the following:
+ 
+-- DPDK 23.11
++- DPDK 23.11.2
+ 
+ - A `DPDK supported NIC`_
+ 
+@@ -73,9 +73,9 @@ Install DPDK
+ #. Download the `DPDK sources`_, extract the file and set ``DPDK_DIR``::
+ 
+        $ cd /usr/src/
+-       $ wget https://fast.dpdk.org/rel/dpdk-23.11.tar.xz
+-       $ tar xf dpdk-23.11.tar.xz
+-       $ export DPDK_DIR=/usr/src/dpdk-23.11
++       $ wget https://fast.dpdk.org/rel/dpdk-23.11.2.tar.xz
++       $ tar xf dpdk-23.11.2.tar.xz
++       $ export DPDK_DIR=/usr/src/dpdk-stable-23.11.2
+        $ cd $DPDK_DIR
+ 
+ #. Configure and install DPDK using Meson
+diff --git a/Documentation/intro/install/general.rst b/Documentation/intro/install/general.rst
+index 19e360d47c..7eb3a5d370 100644
+--- a/Documentation/intro/install/general.rst
++++ b/Documentation/intro/install/general.rst
+@@ -176,10 +176,7 @@ following to obtain better warnings:
+ 
+ - clang, version 3.4 or later
+ 
+-- flake8 along with the hacking flake8 plugin (for Python code). The automatic
+-  flake8 check that runs against Python code has some warnings enabled that
+-  come from the "hacking" flake8 plugin. If it's not installed, the warnings
+-  just won't occur until it's run on a system with "hacking" installed.
++- flake8 (for Python code)
+ 
+ - the python packages listed in "python/test_requirements.txt" (compatible
+   with pip). If they are installed, the pytest-based Python unit tests will
+diff --git a/Documentation/intro/install/windows.rst b/Documentation/intro/install/windows.rst
+index fce099d5dc..efdb8aebce 100644
+--- a/Documentation/intro/install/windows.rst
++++ b/Documentation/intro/install/windows.rst
+@@ -112,7 +112,7 @@ The following explains the steps in some detail.
+   `OpenSSL for Windows <https://wiki.openssl.org/index.php/Binaries>`__
+ 
+   Note down the directory where OpenSSL is installed (e.g.:
+-  ``C:/OpenSSL-Win32``) for later use.
++  ``C:/OpenSSL-Win64``) for later use.
+ 
+ .. note::
+ 
+@@ -182,7 +182,7 @@ To configure with SSL support, add the requisite additional options:
+        --localstatedir="C:/openvswitch/var"
+        --sysconfdir="C:/openvswitch/etc" \
+        --with-pthread="C:/pthread" \
+-       --enable-ssl --with-openssl="C:/OpenSSL-Win32"
++       --enable-ssl --with-openssl="C:/OpenSSL-Win64"
+ 
+ Finally, to the kernel module also:
+ 
+@@ -194,7 +194,7 @@ Finally, to the kernel module also:
+        --localstatedir="C:/openvswitch/var" \
+        --sysconfdir="C:/openvswitch/etc" \
+        --with-pthread="C:/pthread" \
+-       --enable-ssl --with-openssl="C:/OpenSSL-Win32" \
++       --enable-ssl --with-openssl="C:/OpenSSL-Win64" \
+        --with-vstudiotarget="<target type>" \
+        --with-vstudiotargetver="<target versions>"
+ 
+diff --git a/Documentation/ref/ovs-appctl.8.rst b/Documentation/ref/ovs-appctl.8.rst
+index 3ce02e9848..fceafea059 100644
+--- a/Documentation/ref/ovs-appctl.8.rst
++++ b/Documentation/ref/ovs-appctl.8.rst
+@@ -6,9 +6,9 @@ Synopsis
+ ========
+ 
+ ``ovs-appctl``
+-[``--target=``<target> | ``-t`` <target>]
+-[``--timeout=``<secs> | ``-T`` <secs>]
+-<command> [<arg>...]
++[``--target=``\ *target* | ``-t`` *target*]
++[``--timeout=``\ *secs* | ``-T`` *secs*]
++*command* [*arg* ``...``]
+ 
+ ``ovs-appctl --help``
+ 
+@@ -31,11 +31,11 @@ command and prints the daemon's response on standard output.
+ 
+ In normal use only a single option is accepted:
+ 
+-* ``-t`` <target> or ``--target`` <target>
++* ``-t`` *target* or ``--target=``\ *target*
+ 
+   Tells ``ovs-appctl`` which daemon to contact.
+ 
+-  If <target> begins with ``/`` it must name a Unix domain socket on
++  If *target* begins with ``/`` it must name a Unix domain socket on
+   which an Open vSwitch daemon is listening for control channel
+   connections.  By default, each daemon listens on a Unix domain socket
+   in the rundir (e.g. ``/run``) named ``<program>.<pid>.ctl``, where
+@@ -45,26 +45,26 @@ In normal use only a single option is accepted:
+ 
+   Otherwise, ``ovs-appctl`` looks in the rundir for a pidfile, that is,
+   a file whose contents are the process ID of a running process as a
+-  decimal number, named ``<target>.pid``.  (The ``--pidfile`` option
++  decimal number, named *target*\ ``.pid``.  (The ``--pidfile`` option
+   makes an Open vSwitch daemon create a pidfile.)  ``ovs-appctl`` reads
+   the pidfile, then looks in the rundir for a Unix socket named
+-  ``<target>.<pid>.ctl``, where <pid> is replaced by the process ID read
++  *target*\ ``.<pid>.ctl``, where <pid> is replaced by the process ID read
+   from the pidfile, and uses that file as if it had been specified
+   directly as the target.
+ 
+-  On Windows, <target> can be an absolute path to a file that contains a
++  On Windows, *target* can be an absolute path to a file that contains a
+   localhost TCP port on which an Open vSwitch daemon is listening for
+   control channel connections. By default, each daemon writes the TCP
+   port on which it is listening for control connection into the file
+-  ``<program>.ctl`` located inside the rundir. If <target> is not an
++  ``<program>.ctl`` located inside the rundir. If *target* is not an
+   absolute path, ``ovs-appctl`` looks in the rundir for a file named
+-  ``<target>.ctl``.  The default target is ``ovs-vswitchd``.
++  *target*\ ``.ctl``.  The default *target* is ``ovs-vswitchd``.
+ 
+-* ``-T <secs>`` or ``--timeout=<secs>``
++* ``-T`` *secs* or ``--timeout=``\ *secs*
+ 
+-  By default, or with a <secs> of ``0``, ``ovs-appctl`` waits forever to
++  By default, or with a *secs* of ``0``, ``ovs-appctl`` waits forever to
+   connect to the daemon and receive a response.  This option limits
+-  runtime to approximately <secs> seconds.  If the timeout expires,
++  runtime to approximately *secs* seconds.  If the timeout expires,
+   ``ovs-appctl`` exits with a ``SIGALRM`` signal.
+ 
+ Common Commands
+@@ -138,10 +138,10 @@ and adjusting log levels:
+ 
+   Lists logging pattern used for each destination.
+ 
+-* ``vlog/set`` [<spec>]
++* ``vlog/set`` [*spec*]
+ 
+-  Sets logging levels.  Without any <spec>, sets the log level for
+-  every module and destination to ``dbg``.  Otherwise, <spec> is a
++  Sets logging levels.  Without any *spec*, sets the log level for
++  every module and destination to ``dbg``.  Otherwise, *spec* is a
+   list of words separated by spaces or commas or colons, up to one from
+   each category below:
+ 
+@@ -153,7 +153,7 @@ and adjusting log levels:
+     change to only to the system log, to the console, or to a file,
+     respectively.
+ 
+-    On Windows platform, ``syslog`` is only useful if <target> was
++    On Windows platform, ``syslog`` is only useful if *target* was
+     started with the ``--syslog-target`` option (it has no effect
+     otherwise).
+ 
+@@ -162,20 +162,20 @@ and adjusting log levels:
+     will be logged, and messages of lower severity will be filtered out.
+     ``off`` filters out all messages.
+ 
+-  Case is not significant within <spec>.
++  Case is not significant within *spec*.
+ 
+   Regardless of the log levels set for ``file``, logging to a file
+   will not take place unless the target application was invoked with the
+   ``--log-file`` option.
+ 
+   For compatibility with older versions of OVS, ``any`` is accepted
+-  within <spec> but it has no effect.
++  within *spec* but it has no effect.
+ 
+-* ``vlog/set PATTERN:<destination>:<pattern>``
++* ``vlog/set PATTERN:``\ *destination*:*pattern*
+ 
+-  Sets the log pattern for <destination> to <pattern>.  Each time a
+-  message is logged to <destination>, <pattern> determines the
+-  message's formatting.  Most characters in <pattern> are copied
++  Sets the log pattern for *destination* to *pattern*.  Each time a
++  message is logged to *destination*, *pattern* determines the
++  message's formatting.  Most characters in *pattern* are copied
+   literally to the log, but special escapes beginning with ``%`` are
+   expanded as follows:
+ 
+@@ -194,13 +194,13 @@ and adjusting log levels:
+ 
+   * ``%d``
+ 
+-    The current date and time in ISO 8601 format (YYYY-MM-DD HH:MM:SS).
++    The current date and time in ISO 8601 format (``YYYY-MM-DD HH:MM:SS``).
+ 
+-  * ``%d{<format>}``
++  * ``%d{``\ *format*\ ``}``
+ 
+-    The current date and time in the specified <format>, which takes
+-    the same format as the <template> argument to ``strftime(3)``.  As
+-    an extension, any ``#`` characters in <format> will be replaced by
++    The current date and time in the specified *format*, which takes
++    the same format as the ``template`` argument to ``strftime(3)``.  As
++    an extension, any ``#`` characters in *format* will be replaced by
+     fractional seconds, e.g. use ``%H:%M:%S.###`` for the time to the
+     nearest millisecond.  Sub-second times are only approximate and
+     currently decimal places after the third will always be reported
+@@ -208,14 +208,14 @@ and adjusting log levels:
+ 
+   * ``%D``
+ 
+-    The current UTC date and time in ISO 8601 format (YYYY-MM-DD
+-    HH:MM:SS).
++    The current UTC date and time in ISO 8601 format
++    (``YYYY-MM-DD HH:MM:SS``).
+ 
+-  * ``%D{<format>}``
++  * ``%D{``\ *format*\ ``}``
+ 
+-    The current UTC date and time in the specified <format>, which
+-    takes the same format as the <template> argument to
+-    ``strftime``(3).  Supports the same extension for sub-second
++    The current UTC date and time in the specified *format*, which
++    takes the same format as the ``template`` argument to
++    ``strftime(3)``.  Supports the same extension for sub-second
+     resolution as ``%d{...}``.
+ 
+   * ``%E``
+@@ -279,22 +279,23 @@ and adjusting log levels:
+     Pad the field to the field width with ``0`` characters.  Padding
+     with spaces is the default.
+ 
+-  * <width>
++  * *width*
+ 
+     A number specifies the minimum field width.  If the escape expands
+-    to fewer characters than <width> then it is padded to fill the
+-    field width.  (A field wider than <width> is not truncated to
++    to fewer characters than *width* then it is padded to fill the
++    field width.  (A field wider than *width* is not truncated to
+     fit.)
+ 
+-  The default pattern for console and file output is ``%D{%Y-%m-%dT
+-  %H:%M:%SZ}|%05N|%c|%p|%m``; for syslog output, ``%05N|%c|%p|%m``.
++  The default pattern for console and file output is
++  ``%D{%Y-%m-%dT %H:%M:%SZ}|%05N|%c|%p|%m``; for syslog output,
++  ``%05N|%c|%p|%m``.
+ 
+   Daemons written in Python (e.g. ``ovs-monitor-ipsec``) do not allow
+   control over the log pattern.
+ 
+-* ``vlog/set FACILITY:<facility>``
++* ``vlog/set FACILITY:``\ *facility*
+ 
+-  Sets the RFC5424 facility of the log message. <facility> can be one
++  Sets the RFC5424 facility of the log message. *facility* can be one
+   of ``kern``, ``user``, ``mail``, ``daemon``, ``auth``, ``syslog``,
+   ``lpr``, ``news``, ``uucp``, ``clock``, ``ftp``, ``ntp``, ``audit``,
+   ``alert``, ``clock2``, ``local0``, ``local1``, ``local2``,
+diff --git a/Documentation/topics/dpdk/vhost-user.rst b/Documentation/topics/dpdk/vhost-user.rst
+index e952a686b5..89b2116734 100644
+--- a/Documentation/topics/dpdk/vhost-user.rst
++++ b/Documentation/topics/dpdk/vhost-user.rst
+@@ -312,7 +312,7 @@ predictable migration time. Mostly used as a second phase after the normal
+ 
+ More information can be found in QEMU `docs`_.
+ 
+-.. _`docs`: https://git.qemu.org/?p=qemu.git;a=blob;f=docs/devel/migration.rst
++.. _`docs`: https://www.qemu.org/docs/master/devel/migration/postcopy.html
+ 
+ Post-copy support may be enabled via a global config value
+ ``vhost-postcopy-support``. Setting this to ``true`` enables Post-copy support
+@@ -485,7 +485,7 @@ Sample XML
+       </devices>
+     </domain>
+ 
+-.. _QEMU documentation: http://git.qemu-project.org/?p=qemu.git;a=blob;f=docs/specs/vhost-user.txt;h=7890d7169;hb=HEAD
++.. _QEMU documentation: https://www.qemu.org/docs/master/interop/vhost-user.html
+ 
+ Jumbo Frames
+ ------------
+diff --git a/Makefile.am b/Makefile.am
+index 94f488d183..23c3417a70 100644
+--- a/Makefile.am
++++ b/Makefile.am
+@@ -8,6 +8,8 @@
+ AUTOMAKE_OPTIONS = foreign subdir-objects
+ ACLOCAL_AMFLAGS = -I m4
+ 
++AM_DISTCHECK_CONFIGURE_FLAGS = --with-version-suffix="$(VERSION_SUFFIX)"
++
+ AM_CPPFLAGS = $(SSL_CFLAGS)
+ AM_LDFLAGS = $(SSL_LDFLAGS)
+ AM_LDFLAGS += $(OVS_LDFLAGS)
+@@ -161,6 +163,7 @@ SUFFIXES += .in
+ 	    -e 's,[@]PYTHON3[@],$(PYTHON3),g' \
+ 	    -e 's,[@]RUNDIR[@],$(RUNDIR),g' \
+ 	    -e 's,[@]VERSION[@],$(VERSION),g' \
++	    -e 's,[@]VERSION_SUFFIX[@],$(VERSION_SUFFIX),g' \
+ 	    -e 's,[@]localstatedir[@],$(localstatedir),g' \
+ 	    -e 's,[@]pkgdatadir[@],$(pkgdatadir),g' \
+ 	    -e 's,[@]sysconfdir[@],$(sysconfdir),g' \
+@@ -400,16 +403,10 @@ ALL_LOCAL += flake8-check
+ #   F811 redefinition of unused <name> from line <N> (only from flake8 v2.0)
+ # D*** -- warnings from flake8-docstrings plugin
+ # H*** -- warnings from flake8 hacking plugin (custom style checks beyond PEP8)
+-#   H231 Python 3.x incompatible 'except x,y:' construct
+-#   H232 Python 3.x incompatible octal 077 should be written as 0o77
+-#   H233 Python 3.x incompatible use of print operator
+-#   H238 old style class declaration, use new style (inherit from `object`)
+-FLAKE8_SELECT = H231,H232,H233,H238
+ FLAKE8_IGNORE = E121,E123,E125,E126,E127,E128,E129,E131,E203,E722,W503,W504,F811,D,H,I
+ flake8-check: $(FLAKE8_PYFILES)
+ 	$(FLAKE8_WERROR)$(AM_V_GEN) \
+ 	  src='$^' && \
+-	  flake8 $$src --select=$(FLAKE8_SELECT) $(FLAKE8_FLAGS) && \
+ 	  flake8 $$src --ignore=$(FLAKE8_IGNORE) $(FLAKE8_FLAGS) && \
+ 	  touch $@
+ endif
+diff --git a/NEWS b/NEWS
+index 8888fb3ec5..2015657d12 100644
+--- a/NEWS
++++ b/NEWS
+@@ -1,3 +1,20 @@
++v3.3.3 - xx xxx xxxx
++--------------------
++   - DPDK:
++     * OVS validated with DPDK 23.11.2.
++
++v3.3.2 - 27 Aug 2024
++--------------------
++   - Bug fixes
++   - IPsec:
++     * Fixed compatibility between ovs-monitor-ipsec daemon and Libreswan 5.
++
++v3.3.1 - 07 Jun 2024
++--------------------
++   - Bug fixes
++   - DPDK:
++     * OVS validated with DPDK 23.11.1.
++
+ v3.3.0 - 16 Feb 2024
+ --------------------
+    - OVSDB:
+diff --git a/acinclude.m4 b/acinclude.m4
+index f1ba046c23..1ace70c92a 100644
+--- a/acinclude.m4
++++ b/acinclude.m4
+@@ -497,6 +497,19 @@ AC_DEFUN([OVS_CHECK_DPDK], [
+   AM_CONDITIONAL([DPDK_NETDEV], test "$DPDKLIB_FOUND" = true)
+ ])
+ 
++dnl Append a version suffix.
++
++AC_DEFUN([OVS_CHECK_VERSION_SUFFIX], [
++  AC_ARG_WITH([version-suffix],
++              [AS_HELP_STRING([--with-version-suffix=ver_suffix],
++                              [Specify a string that will be appended
++                               to OVS version])])
++  AC_DEFINE_UNQUOTED([VERSION_SUFFIX], ["$with_version_suffix"],
++                     [Package version suffix])
++  AC_SUBST([VERSION_SUFFIX], [$with_version_suffix])
++  ])
++])
++
+ dnl Checks for net/if_dl.h.
+ dnl
+ dnl (We use this as a proxy for checking whether we're building on FreeBSD
+diff --git a/configure.ac b/configure.ac
+index 05afbb9cc8..f4e75d3c70 100644
+--- a/configure.ac
++++ b/configure.ac
+@@ -13,7 +13,7 @@
+ # limitations under the License.
+ 
+ AC_PREREQ(2.63)
+-AC_INIT(openvswitch, 3.3.0, bugs@openvswitch.org)
++AC_INIT(openvswitch, 3.3.3, bugs@openvswitch.org)
+ AC_CONFIG_SRCDIR([vswitchd/ovs-vswitchd.c])
+ AC_CONFIG_MACRO_DIR([m4])
+ AC_CONFIG_AUX_DIR([build-aux])
+@@ -202,6 +202,7 @@ OVS_CHECK_LINUX_SCTP_CT
+ OVS_CHECK_LINUX_VIRTIO_TYPES
+ OVS_CHECK_DPDK
+ OVS_CHECK_PRAGMA_MESSAGE
++OVS_CHECK_VERSION_SUFFIX
+ AC_SUBST([CFLAGS])
+ AC_SUBST([OVS_CFLAGS])
+ AC_SUBST([OVS_LDFLAGS])
+diff --git a/debian/changelog b/debian/changelog
+index 2049ddaa26..0bae589a49 100644
+--- a/debian/changelog
++++ b/debian/changelog
+@@ -1,3 +1,21 @@
++openvswitch (3.3.3-1) unstable; urgency=low
++   [ Open vSwitch team ]
++   * New upstream version
++
++ -- Open vSwitch team <dev@openvswitch.org>  Tue, 27 Aug 2024 14:33:26 +0200
++
++openvswitch (3.3.2-1) unstable; urgency=low
++   [ Open vSwitch team ]
++   * New upstream version
++
++ -- Open vSwitch team <dev@openvswitch.org>  Tue, 27 Aug 2024 14:33:26 +0200
++
++openvswitch (3.3.1-1) unstable; urgency=low
++   [ Open vSwitch team ]
++   * New upstream version
++
++ -- Open vSwitch team <dev@openvswitch.org>  Fri, 07 Jun 2024 15:58:27 +0200
++
+ openvswitch (3.3.0-1) unstable; urgency=low
+ 
+    * New upstream version
+diff --git a/include/openvswitch/compiler.h b/include/openvswitch/compiler.h
+index 878c5c6a70..ecb91801cc 100644
+--- a/include/openvswitch/compiler.h
++++ b/include/openvswitch/compiler.h
+@@ -69,6 +69,17 @@
+ #define OVS_UNLIKELY(CONDITION) (!!(CONDITION))
+ #endif
+ 
++/* Clang 17's implementation of ubsan enables checking that function pointers
++ * match the type of the called function.  This currently breaks ovs-rcu, which
++ * calls multiple different types of callbacks via a generic void *(void*)
++ * function pointer type.  This macro enables disabling that check for specific
++ * functions. */
++#if __clang__ && __has_feature(undefined_behavior_sanitizer)
++#define OVS_NO_SANITIZE_FUNCTION __attribute__((no_sanitize("function")))
++#else
++#define OVS_NO_SANITIZE_FUNCTION
++#endif
++
+ #if __has_feature(c_thread_safety_attributes)
+ /* "clang" annotations for thread safety check.
+  *
+diff --git a/include/openvswitch/meta-flow.h b/include/openvswitch/meta-flow.h
+index 3b0220aaa2..fb7d17ebe7 100644
+--- a/include/openvswitch/meta-flow.h
++++ b/include/openvswitch/meta-flow.h
+@@ -2305,6 +2305,7 @@ void mf_set_flow_value_masked(const struct mf_field *,
+                               const union mf_value *mask,
+                               struct flow *);
+ bool mf_is_tun_metadata(const struct mf_field *);
++bool mf_is_any_metadata(const struct mf_field *);
+ bool mf_is_frozen_metadata(const struct mf_field *);
+ bool mf_is_pipeline_field(const struct mf_field *);
+ bool mf_is_set(const struct mf_field *, const struct flow *);
+diff --git a/include/openvswitch/version.h.in b/include/openvswitch/version.h.in
+index 23d8fde4f1..231f61e30c 100644
+--- a/include/openvswitch/version.h.in
++++ b/include/openvswitch/version.h.in
+@@ -19,7 +19,7 @@
+ #define OPENVSWITCH_VERSION_H 1
+ 
+ #define OVS_PACKAGE_STRING  "@PACKAGE_STRING@"
+-#define OVS_PACKAGE_VERSION "@PACKAGE_VERSION@"
++#define OVS_PACKAGE_VERSION "@PACKAGE_VERSION@@VERSION_SUFFIX@"
+ 
+ #define OVS_LIB_VERSION     @LT_CURRENT@
+ #define OVS_LIB_REVISION    @LT_REVISION@
+diff --git a/include/sparse/automake.mk b/include/sparse/automake.mk
+index c1229870bb..45e6202c52 100644
+--- a/include/sparse/automake.mk
++++ b/include/sparse/automake.mk
+@@ -1,5 +1,6 @@
+ noinst_HEADERS += \
+         include/sparse/rte_byteorder.h \
++        include/sparse/immintrin.h \
+         include/sparse/xmmintrin.h \
+         include/sparse/arpa/inet.h \
+         include/sparse/bits/floatn.h \
+diff --git a/include/sparse/immintrin.h b/include/sparse/immintrin.h
+new file mode 100644
+index 0000000000..9a23d7f746
+--- /dev/null
++++ b/include/sparse/immintrin.h
+@@ -0,0 +1,34 @@
++/* Copyright (c) 2024 Red Hat, Inc.
++ *
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at:
++ *
++ *     http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++
++#ifndef __CHECKER__
++#error "Use this header only with sparse.  It is not a correct implementation."
++#endif
++
++/* Sparse doesn't know some types used by AVX512 and some other headers.
++ * Mark those headers as already included to avoid failures.  This is fragile,
++ * so may need adjustments with compiler changes. */
++#define _AVX512BF16INTRIN_H_INCLUDED
++#define _AVX512BF16VLINTRIN_H_INCLUDED
++#define _AVXNECONVERTINTRIN_H_INCLUDED
++#define _KEYLOCKERINTRIN_H_INCLUDED
++#define __AVX512FP16INTRIN_H_INCLUDED
++#define __AVX512FP16VLINTRIN_H_INCLUDED
++/* GCC >=14 changed the '__AVX512FP16INTRIN_H_INCLUDED' to have only single
++ * underscore.  We need both to keep compatibility between various GCC
++ * versions. */
++#define _AVX512FP16INTRIN_H_INCLUDED
++
++#include_next <immintrin.h>
+diff --git a/ipsec/ovs-monitor-ipsec.in b/ipsec/ovs-monitor-ipsec.in
+index 7945162f9f..37c509ac68 100755
+--- a/ipsec/ovs-monitor-ipsec.in
++++ b/ipsec/ovs-monitor-ipsec.in
+@@ -457,14 +457,36 @@ conn prevent_unencrypted_vxlan
+     CERTKEY_PREFIX = "ovs_certkey_"
+ 
+     def __init__(self, libreswan_root_prefix, args):
++        # Collect version infromation
++        self.IPSEC = libreswan_root_prefix + "/usr/sbin/ipsec"
++        self.IPSEC_AUTO = [self.IPSEC]
++        proc = subprocess.Popen([self.IPSEC, "--version"],
++                                stdout=subprocess.PIPE,
++                                encoding="latin1")
++        pout, perr = proc.communicate()
++
++        v = re.match("^Libreswan v?(.*)$", pout)
++        try:
++            version = int(v.group(1).split(".")[0])
++        except:
++            version = 0
++
++        if version < 5:
++            # With v5, LibreSWAN removed the auto command, however, it is
++            # still required for older versions
++            self.IPSEC_AUTO.append("auto")
++
++        if version >= 4:
++            ipsec_d = args.ipsec_d if args.ipsec_d else "/var/lib/ipsec/nss"
++        else:
++            ipsec_d = args.ipsec_d if args.ipsec_d else "/etc/ipsec.d"
++
+         ipsec_conf = args.ipsec_conf if args.ipsec_conf else "/etc/ipsec.conf"
+-        ipsec_d = args.ipsec_d if args.ipsec_d else "/etc/ipsec.d"
+         ipsec_secrets = (args.ipsec_secrets if args.ipsec_secrets
+                         else "/etc/ipsec.secrets")
+         ipsec_ctl = (args.ipsec_ctl if args.ipsec_ctl
+                         else "/run/pluto/pluto.ctl")
+ 
+-        self.IPSEC = libreswan_root_prefix + "/usr/sbin/ipsec"
+         self.IPSEC_CONF = libreswan_root_prefix + ipsec_conf
+         self.IPSEC_SECRETS = libreswan_root_prefix + ipsec_secrets
+         self.IPSEC_D = "sql:" + libreswan_root_prefix + ipsec_d
+@@ -577,7 +599,7 @@ conn prevent_unencrypted_vxlan
+ 
+     def refresh(self, monitor):
+         vlog.info("Refreshing LibreSwan configuration")
+-        subprocess.call([self.IPSEC, "auto", "--ctlsocket", self.IPSEC_CTL,
++        subprocess.call(self.IPSEC_AUTO + ["--ctlsocket", self.IPSEC_CTL,
+                         "--config", self.IPSEC_CONF, "--rereadsecrets"])
+         tunnels = set(monitor.tunnels.keys())
+ 
+@@ -605,7 +627,7 @@ conn prevent_unencrypted_vxlan
+ 
+                 if not tunnel or tunnel.version != ver:
+                     vlog.info("%s is outdated %u" % (conn, ver))
+-                    subprocess.call([self.IPSEC, "auto", "--ctlsocket",
++                    subprocess.call(self.IPSEC_AUTO + ["--ctlsocket",
+                                     self.IPSEC_CTL, "--config",
+                                     self.IPSEC_CONF, "--delete", conn])
+                 elif ifname in tunnels:
+@@ -627,44 +649,44 @@ conn prevent_unencrypted_vxlan
+         # Update shunt policy if changed
+         if monitor.conf_in_use["skb_mark"] != monitor.conf["skb_mark"]:
+             if monitor.conf["skb_mark"]:
+-                subprocess.call([self.IPSEC, "auto",
+-                            "--config", self.IPSEC_CONF,
++                subprocess.call(self.IPSEC_AUTO +
++                            ["--config", self.IPSEC_CONF,
+                             "--ctlsocket", self.IPSEC_CTL,
+                             "--add",
+                             "--asynchronous", "prevent_unencrypted_gre"])
+-                subprocess.call([self.IPSEC, "auto",
+-                            "--config", self.IPSEC_CONF,
++                subprocess.call(self.IPSEC_AUTO +
++                            ["--config", self.IPSEC_CONF,
+                             "--ctlsocket", self.IPSEC_CTL,
+                             "--add",
+                             "--asynchronous", "prevent_unencrypted_geneve"])
+-                subprocess.call([self.IPSEC, "auto",
+-                            "--config", self.IPSEC_CONF,
++                subprocess.call(self.IPSEC_AUTO +
++                            ["--config", self.IPSEC_CONF,
+                             "--ctlsocket", self.IPSEC_CTL,
+                             "--add",
+                             "--asynchronous", "prevent_unencrypted_stt"])
+-                subprocess.call([self.IPSEC, "auto",
+-                            "--config", self.IPSEC_CONF,
++                subprocess.call(self.IPSEC_AUTO +
++                            ["--config", self.IPSEC_CONF,
+                             "--ctlsocket", self.IPSEC_CTL,
+                             "--add",
+                             "--asynchronous", "prevent_unencrypted_vxlan"])
+             else:
+-                subprocess.call([self.IPSEC, "auto",
+-                            "--config", self.IPSEC_CONF,
++                subprocess.call(self.IPSEC_AUTO +
++                            ["--config", self.IPSEC_CONF,
+                             "--ctlsocket", self.IPSEC_CTL,
+                             "--delete",
+                             "--asynchronous", "prevent_unencrypted_gre"])
+-                subprocess.call([self.IPSEC, "auto",
+-                            "--config", self.IPSEC_CONF,
++                subprocess.call(self.IPSEC_AUTO +
++                            ["--config", self.IPSEC_CONF,
+                             "--ctlsocket", self.IPSEC_CTL,
+                             "--delete",
+                             "--asynchronous", "prevent_unencrypted_geneve"])
+-                subprocess.call([self.IPSEC, "auto",
+-                            "--config", self.IPSEC_CONF,
++                subprocess.call(self.IPSEC_AUTO +
++                            ["--config", self.IPSEC_CONF,
+                             "--ctlsocket", self.IPSEC_CTL,
+                             "--delete",
+                             "--asynchronous", "prevent_unencrypted_stt"])
+-                subprocess.call([self.IPSEC, "auto",
+-                            "--config", self.IPSEC_CONF,
++                subprocess.call(self.IPSEC_AUTO +
++                            ["--config", self.IPSEC_CONF,
+                             "--ctlsocket", self.IPSEC_CTL,
+                             "--delete",
+                             "--asynchronous", "prevent_unencrypted_vxlan"])
+@@ -710,8 +732,8 @@ conn prevent_unencrypted_vxlan
+         # the "ipsec auto --start" command is lost. Just retry to make sure
+         # the command is received by LibreSwan.
+         while True:
+-            proc = subprocess.Popen([self.IPSEC, "auto",
+-                                    "--config", self.IPSEC_CONF,
++            proc = subprocess.Popen(self.IPSEC_AUTO +
++                                    ["--config", self.IPSEC_CONF,
+                                     "--ctlsocket", self.IPSEC_CTL,
+                                     "--start",
+                                     "--asynchronous", conn],
+diff --git a/lib/bfd.c b/lib/bfd.c
+index 9af258917b..b8149e7897 100644
+--- a/lib/bfd.c
++++ b/lib/bfd.c
+@@ -1130,10 +1130,11 @@ bfd_set_state(struct bfd *bfd, enum state state, enum diag diag)
+         if (!VLOG_DROP_INFO(&rl)) {
+             struct ds ds = DS_EMPTY_INITIALIZER;
+ 
+-            ds_put_format(&ds, "%s: BFD state change: %s->%s"
+-                          " \"%s\"->\"%s\".\n",
++            ds_put_format(&ds, "%s: BFD state change: (bfd.SessionState: %s,"
++                          " bfd.LocalDiag: \"%s\") -> (bfd.SessionState: %s,"
++                          " bfd.LocalDiag: \"%s\")\n",
+                           bfd->name, bfd_state_str(bfd->state),
+-                          bfd_state_str(state), bfd_diag_str(bfd->diag),
++                          bfd_diag_str(bfd->diag), bfd_state_str(state),
+                           bfd_diag_str(diag));
+             bfd_put_details(&ds, bfd);
+             VLOG_INFO("%s", ds_cstr(&ds));
+diff --git a/lib/conntrack.c b/lib/conntrack.c
+index 013709bd62..cf6e2919ba 100644
+--- a/lib/conntrack.c
++++ b/lib/conntrack.c
+@@ -941,6 +941,18 @@ conn_not_found(struct conntrack *ct, struct dp_packet *pkt,
+             nc->parent_key = alg_exp->parent_key;
+         }
+ 
++        ovs_mutex_init_adaptive(&nc->lock);
++        atomic_flag_clear(&nc->reclaimed);
++        fwd_key_node->dir = CT_DIR_FWD;
++        rev_key_node->dir = CT_DIR_REV;
++
++        if (zl) {
++            nc->admit_zone = zl->czl.zone;
++            nc->zone_limit_seq = zl->czl.zone_limit_seq;
++        } else {
++            nc->admit_zone = INVALID_ZONE;
++        }
++
+         if (nat_action_info) {
+             nc->nat_action = nat_action_info->nat_action;
+ 
+@@ -965,21 +977,15 @@ conn_not_found(struct conntrack *ct, struct dp_packet *pkt,
+             cmap_insert(&ct->conns, &rev_key_node->cm_node, rev_hash);
+         }
+ 
+-        ovs_mutex_init_adaptive(&nc->lock);
+-        atomic_flag_clear(&nc->reclaimed);
+-        fwd_key_node->dir = CT_DIR_FWD;
+-        rev_key_node->dir = CT_DIR_REV;
+         cmap_insert(&ct->conns, &fwd_key_node->cm_node, ctx->hash);
+         conn_expire_push_front(ct, nc);
+         atomic_count_inc(&ct->n_conn);
+-        ctx->conn = nc; /* For completeness. */
++
+         if (zl) {
+-            nc->admit_zone = zl->czl.zone;
+-            nc->zone_limit_seq = zl->czl.zone_limit_seq;
+             atomic_count_inc(&zl->czl.count);
+-        } else {
+-            nc->admit_zone = INVALID_ZONE;
+         }
++
++        ctx->conn = nc; /* For completeness. */
+     }
+ 
+     return nc;
+@@ -2290,7 +2296,9 @@ find_addr(const struct conn_key *key, union ct_addr *min,
+           uint32_t hash, bool ipv4,
+           const struct nat_action_info_t *nat_info)
+ {
+-    const union ct_addr zero_ip = {0};
++    union ct_addr zero_ip;
++
++    memset(&zero_ip, 0, sizeof zero_ip);
+ 
+     /* All-zero case. */
+     if (!memcmp(min, &zero_ip, sizeof *min)) {
+@@ -2382,14 +2390,18 @@ nat_get_unique_tuple(struct conntrack *ct, struct conn *conn,
+ {
+     struct conn_key *fwd_key = &conn->key_node[CT_DIR_FWD].key;
+     struct conn_key *rev_key = &conn->key_node[CT_DIR_REV].key;
+-    union ct_addr min_addr = {0}, max_addr = {0}, addr = {0};
+     bool pat_proto = fwd_key->nw_proto == IPPROTO_TCP ||
+                      fwd_key->nw_proto == IPPROTO_UDP ||
+                      fwd_key->nw_proto == IPPROTO_SCTP;
+     uint16_t min_dport, max_dport, curr_dport;
+     uint16_t min_sport, max_sport, curr_sport;
++    union ct_addr min_addr, max_addr, addr;
+     uint32_t hash;
+ 
++    memset(&min_addr, 0, sizeof min_addr);
++    memset(&max_addr, 0, sizeof max_addr);
++    memset(&addr, 0, sizeof addr);
++
+     hash = nat_range_hash(fwd_key, ct->hash_basis, nat_info);
+     min_addr = nat_info->min_addr;
+     max_addr = nat_info->max_addr;
+@@ -2572,7 +2584,9 @@ tuple_to_conn_key(const struct ct_dpif_tuple *tuple, uint16_t zone,
+         key->src.icmp_type = tuple->icmp_type;
+         key->src.icmp_code = tuple->icmp_code;
+         key->dst.icmp_id = tuple->icmp_id;
+-        key->dst.icmp_type = reverse_icmp_type(tuple->icmp_type);
++        key->dst.icmp_type = (tuple->ip_proto == IPPROTO_ICMP)
++                             ? reverse_icmp_type(tuple->icmp_type)
++                             : reverse_icmp6_type(tuple->icmp_type);
+         key->dst.icmp_code = tuple->icmp_code;
+     } else {
+         key->src.port = tuple->src_port;
+@@ -2637,25 +2651,19 @@ conntrack_dump_start(struct conntrack *ct, struct conntrack_dump *dump,
+ 
+     dump->ct = ct;
+     *ptot_bkts = 1; /* Need to clean up the callers. */
++    dump->cursor = cmap_cursor_start(&ct->conns);
+     return 0;
+ }
+ 
+ int
+ conntrack_dump_next(struct conntrack_dump *dump, struct ct_dpif_entry *entry)
+ {
+-    struct conntrack *ct = dump->ct;
+     long long now = time_msec();
+ 
+-    for (;;) {
+-        struct cmap_node *cm_node = cmap_next_position(&ct->conns,
+-                                                       &dump->cm_pos);
+-        if (!cm_node) {
+-            break;
+-        }
+-        struct conn_key_node *keyn;
+-        struct conn *conn;
++    struct conn_key_node *keyn;
++    struct conn *conn;
+ 
+-        INIT_CONTAINER(keyn, cm_node, cm_node);
++    CMAP_CURSOR_FOR_EACH_CONTINUE (keyn, cm_node, &dump->cursor) {
+         if (keyn->dir != CT_DIR_FWD) {
+             continue;
+         }
+diff --git a/lib/conntrack.h b/lib/conntrack.h
+index 0a888be455..6339701627 100644
+--- a/lib/conntrack.h
++++ b/lib/conntrack.h
+@@ -101,8 +101,8 @@ struct conntrack_dump {
+     struct conntrack *ct;
+     unsigned bucket;
+     union {
+-        struct cmap_position cm_pos;
+         struct hmap_position hmap_pos;
++        struct cmap_cursor cursor;
+     };
+     bool filter_zone;
+     uint16_t zone;
+diff --git a/lib/dp-packet.c b/lib/dp-packet.c
+index 305822293b..df7bf8e6b3 100644
+--- a/lib/dp-packet.c
++++ b/lib/dp-packet.c
+@@ -592,6 +592,18 @@ dp_packet_ol_send_prepare(struct dp_packet *p, uint64_t flags)
+     if (dp_packet_hwol_is_tunnel_geneve(p) ||
+         dp_packet_hwol_is_tunnel_vxlan(p)) {
+         tnl_inner = true;
++
++        /* If the TX interface doesn't support UDP tunnel offload but does
++         * support inner checksum offload and an outer UDP checksum is
++         * required, then we can't offload inner checksum either. As that would
++         * invalidate the outer checksum. */
++        if (!(flags & NETDEV_TX_OFFLOAD_OUTER_UDP_CKSUM) &&
++                dp_packet_hwol_is_outer_udp_cksum(p)) {
++            flags &= ~(NETDEV_TX_OFFLOAD_TCP_CKSUM |
++                       NETDEV_TX_OFFLOAD_UDP_CKSUM |
++                       NETDEV_TX_OFFLOAD_SCTP_CKSUM |
++                       NETDEV_TX_OFFLOAD_IPV4_CKSUM);
++        }
+     }
+ 
+     if (dp_packet_hwol_tx_ip_csum(p)) {
+diff --git a/lib/dp-packet.h b/lib/dp-packet.h
+index 2fa17d8140..e816b9f20b 100644
+--- a/lib/dp-packet.h
++++ b/lib/dp-packet.h
+@@ -529,6 +529,16 @@ dp_packet_inner_l3(const struct dp_packet *b)
+            : NULL;
+ }
+ 
++static inline size_t
++dp_packet_inner_l3_size(const struct dp_packet *b)
++{
++    return OVS_LIKELY(b->inner_l3_ofs != UINT16_MAX)
++           ? (const char *) dp_packet_tail(b)
++           - (const char *) dp_packet_inner_l3(b)
++           - dp_packet_l2_pad_size(b)
++           : 0;
++}
++
+ static inline void *
+ dp_packet_inner_l4(const struct dp_packet *b)
+ {
+@@ -604,25 +614,6 @@ dp_packet_get_nd_payload(const struct dp_packet *b)
+ }
+ 
+ #ifdef DPDK_NETDEV
+-static inline void
+-dp_packet_set_l2_len(struct dp_packet *b, size_t l2_len)
+-{
+-    b->mbuf.l2_len = l2_len;
+-}
+-
+-static inline void
+-dp_packet_set_l3_len(struct dp_packet *b, size_t l3_len)
+-{
+-    b->mbuf.l3_len = l3_len;
+-}
+-
+-static inline void
+-dp_packet_set_l4_len(struct dp_packet *b, size_t l4_len)
+-{
+-    b->mbuf.l4_len = l4_len;
+-}
+-
+-
+ static inline uint64_t *
+ dp_packet_ol_flags_ptr(const struct dp_packet *b)
+ {
+@@ -642,24 +633,6 @@ dp_packet_flow_mark_ptr(const struct dp_packet *b)
+ }
+ 
+ #else
+-static inline void
+-dp_packet_set_l2_len(struct dp_packet *b OVS_UNUSED, size_t l2_len OVS_UNUSED)
+-{
+-    /* There is no implementation. */
+-}
+-
+-static inline void
+-dp_packet_set_l3_len(struct dp_packet *b OVS_UNUSED, size_t l3_len OVS_UNUSED)
+-{
+-    /* There is no implementation. */
+-}
+-
+-static inline void
+-dp_packet_set_l4_len(struct dp_packet *b OVS_UNUSED, size_t l4_len OVS_UNUSED)
+-{
+-    /* There is no implementation. */
+-}
+-
+ static inline uint32_t *
+ dp_packet_ol_flags_ptr(const struct dp_packet *b)
+ {
+@@ -1300,6 +1273,14 @@ dp_packet_hwol_set_tunnel_vxlan(struct dp_packet *b)
+     *dp_packet_ol_flags_ptr(b) |= DP_PACKET_OL_TX_TUNNEL_VXLAN;
+ }
+ 
++/* Clears tunnel offloading marks. */
++static inline void
++dp_packet_hwol_reset_tunnel(struct dp_packet *b)
++{
++    *dp_packet_ol_flags_ptr(b) &= ~(DP_PACKET_OL_TX_TUNNEL_VXLAN |
++                                    DP_PACKET_OL_TX_TUNNEL_GENEVE);
++}
++
+ /* Mark packet 'b' as a tunnel packet with outer IPv4 header. */
+ static inline void
+ dp_packet_hwol_set_tx_outer_ipv4(struct dp_packet *b)
+@@ -1419,11 +1400,26 @@ dp_packet_hwol_l3_ipv4(const struct dp_packet *b)
+ static inline void
+ dp_packet_ip_set_header_csum(struct dp_packet *p, bool inner)
+ {
+-    struct ip_header *ip = (inner) ? dp_packet_inner_l3(p) : dp_packet_l3(p);
++    struct ip_header *ip;
++    size_t l3_size;
++    size_t ip_len;
++
++    if (inner) {
++        ip = dp_packet_inner_l3(p);
++        l3_size = dp_packet_inner_l3_size(p);
++    } else {
++        ip = dp_packet_l3(p);
++        l3_size = dp_packet_l3_size(p);
++    }
+ 
+     ovs_assert(ip);
+-    ip->ip_csum = 0;
+-    ip->ip_csum = csum(ip, sizeof *ip);
++
++    ip_len = IP_IHL(ip->ip_ihl_ver) * 4;
++
++    if (OVS_LIKELY(ip_len >= IP_HEADER_LEN && ip_len < l3_size)) {
++        ip->ip_csum = 0;
++        ip->ip_csum = csum(ip, ip_len);
++    }
+ }
+ 
+ /* Returns 'true' if the packet 'p' has good integrity and the
+diff --git a/lib/dpctl.c b/lib/dpctl.c
+index 34ee7d0e2d..cdff4206e2 100644
+--- a/lib/dpctl.c
++++ b/lib/dpctl.c
+@@ -738,8 +738,8 @@ show_dpif(struct dpif *dpif, struct dpctl_params *dpctl_p)
+                 continue;
+             }
+             error = netdev_get_stats(netdev, &s);
++            netdev_close(netdev);
+             if (!error) {
+-                netdev_close(netdev);
+                 print_stat(dpctl_p, "    RX packets:", s.rx_packets);
+                 print_stat(dpctl_p, " errors:", s.rx_errors);
+                 print_stat(dpctl_p, " dropped:", s.rx_dropped);
+@@ -1359,19 +1359,17 @@ static int
+ dpctl_del_flow_dpif(struct dpif *dpif, const char *key_s,
+                     struct dpctl_params *dpctl_p)
+ {
++    struct dpif_port_dump port_dump;
+     struct dpif_flow_stats stats;
++    bool ufid_generated = false;
+     struct dpif_port dpif_port;
+-    struct dpif_port_dump port_dump;
+-    struct ofpbuf key;
++    bool ufid_present = false;
++    struct simap port_names;
+     struct ofpbuf mask; /* To be ignored. */
+-
++    struct ofpbuf key;
+     ovs_u128 ufid;
+-    bool ufid_generated;
+-    bool ufid_present;
+-    struct simap port_names;
+     int n, error;
+ 
+-    ufid_present = false;
+     n = odp_ufid_from_string(key_s, &ufid);
+     if (n < 0) {
+         dpctl_error(dpctl_p, -n, "parsing flow ufid");
+diff --git a/lib/dpdk.c b/lib/dpdk.c
+index d76d53f8f1..940c43c070 100644
+--- a/lib/dpdk.c
++++ b/lib/dpdk.c
+@@ -337,7 +337,9 @@ dpdk_init__(const struct smap *ovs_other_config)
+     }
+ #endif
+ 
+-    if (args_contains(&args, "-c") || args_contains(&args, "-l")) {
++    if (args_contains(&args, "-c") ||
++        args_contains(&args, "-l") ||
++        args_contains(&args, "--lcores")) {
+         auto_determine = false;
+     }
+ 
+diff --git a/lib/dpif-netdev.c b/lib/dpif-netdev.c
+index 46e24d204d..99ff9b3693 100644
+--- a/lib/dpif-netdev.c
++++ b/lib/dpif-netdev.c
+@@ -115,6 +115,7 @@ COVERAGE_DEFINE(datapath_drop_lock_error);
+ COVERAGE_DEFINE(datapath_drop_userspace_action_error);
+ COVERAGE_DEFINE(datapath_drop_tunnel_push_error);
+ COVERAGE_DEFINE(datapath_drop_tunnel_pop_error);
++COVERAGE_DEFINE(datapath_drop_tunnel_tso_recirc);
+ COVERAGE_DEFINE(datapath_drop_recirc_error);
+ COVERAGE_DEFINE(datapath_drop_invalid_port);
+ COVERAGE_DEFINE(datapath_drop_invalid_bond);
+@@ -8912,6 +8913,34 @@ static void
+ dp_netdev_recirculate(struct dp_netdev_pmd_thread *pmd,
+                       struct dp_packet_batch *packets)
+ {
++    static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
++    size_t i, size = dp_packet_batch_size(packets);
++    struct dp_packet *packet;
++
++    DP_PACKET_BATCH_REFILL_FOR_EACH (i, size, packet, packets) {
++        if (dp_packet_hwol_is_tunnel_geneve(packet) ||
++                dp_packet_hwol_is_tunnel_vxlan(packet)) {
++
++            if (dp_packet_hwol_is_tso(packet)) {
++                /* Can't perform GSO in the middle of a pipeline. */
++                COVERAGE_INC(datapath_drop_tunnel_tso_recirc);
++                dp_packet_delete(packet);
++                VLOG_WARN_RL(&rl, "Recirculating tunnel packets with "
++                                  "TSO is not supported");
++                continue;
++            }
++            /* Have to fix all the checksums before re-parsing, because the
++             * packet will be treated as having a single set of headers. */
++            dp_packet_ol_send_prepare(packet, 0);
++            /* This packet must not be marked with anything tunnel-related. */
++            dp_packet_hwol_reset_tunnel(packet);
++            /* Clear inner offsets.  Other ones are collateral, but they will
++             * be re-initialized on re-parsing. */
++            dp_packet_reset_offsets(packet);
++        }
++        dp_packet_batch_refill(packets, packet, i);
++    }
++
+     dp_netdev_input__(pmd, packets, true, 0);
+ }
+ 
+diff --git a/lib/dpif-netlink-rtnl.c b/lib/dpif-netlink-rtnl.c
+index 5788294ae0..f7035333e6 100644
+--- a/lib/dpif-netlink-rtnl.c
++++ b/lib/dpif-netlink-rtnl.c
+@@ -566,6 +566,7 @@ dpif_netlink_rtnl_probe_oot_tunnels(void)
+ 
+         tnl_cfg = netdev_get_tunnel_config(netdev);
+         if (!tnl_cfg) {
++            netdev_close(netdev);
+             return true;
+         }
+ 
+diff --git a/lib/flow.c b/lib/flow.c
+index 8e3402388c..9be4375246 100644
+--- a/lib/flow.c
++++ b/lib/flow.c
+@@ -408,7 +408,8 @@ parse_ethertype(const void **datap, size_t *sizep)
+ static inline bool
+ parse_icmpv6(const void **datap, size_t *sizep,
+              const struct icmp6_data_header *icmp6,
+-             ovs_be32 *rso_flags, const struct in6_addr **nd_target,
++             ovs_be32 *rso_flags,
++             const union ovs_16aligned_in6_addr **nd_target,
+              struct eth_addr arp_buf[2], uint8_t *opt_type)
+ {
+     if (icmp6->icmp6_base.icmp6_code != 0 ||
+@@ -1117,7 +1118,7 @@ miniflow_extract(struct dp_packet *packet, struct miniflow *dst)
+             }
+         } else if (OVS_LIKELY(nw_proto == IPPROTO_ICMPV6)) {
+             if (OVS_LIKELY(size >= sizeof(struct icmp6_data_header))) {
+-                const struct in6_addr *nd_target;
++                const union ovs_16aligned_in6_addr *nd_target;
+                 struct eth_addr arp_buf[2];
+                 /* This will populate whether we received Option 1
+                  * or Option 2. */
+@@ -3420,6 +3421,24 @@ flow_compose(struct dp_packet *p, const struct flow *flow,
+             arp->ar_sha = flow->arp_sha;
+             arp->ar_tha = flow->arp_tha;
+         }
++    } else if (flow->dl_type == htons(ETH_TYPE_NSH)) {
++        struct nsh_hdr *nsh;
++
++        nsh = dp_packet_put_zeros(p, sizeof *nsh);
++        dp_packet_set_l3(p, nsh);
++
++        nsh_set_flags_ttl_len(nsh, flow->nsh.flags, flow->nsh.ttl,
++                              flow->nsh.mdtype == NSH_M_TYPE1
++                              ? NSH_M_TYPE1_LEN : NSH_BASE_HDR_LEN);
++        nsh->next_proto = flow->nsh.np;
++        nsh->md_type = flow->nsh.mdtype;
++        put_16aligned_be32(&nsh->path_hdr, flow->nsh.path_hdr);
++
++        if (flow->nsh.mdtype == NSH_M_TYPE1) {
++            for (size_t i = 0; i < 4; i++) {
++                put_16aligned_be32(&nsh->md1.context[i], flow->nsh.context[i]);
++            }
++        }
+     }
+ 
+     if (eth_type_mpls(flow->dl_type)) {
+diff --git a/lib/hash.c b/lib/hash.c
+index c722f3c3cc..3d574de9b4 100644
+--- a/lib/hash.c
++++ b/lib/hash.c
+@@ -29,15 +29,16 @@ hash_3words(uint32_t a, uint32_t b, uint32_t c)
+ uint32_t
+ hash_bytes(const void *p_, size_t n, uint32_t basis)
+ {
+-    const uint32_t *p = p_;
++    const uint8_t *p = p_;
+     size_t orig_n = n;
+     uint32_t hash;
+ 
+     hash = basis;
+     while (n >= 4) {
+-        hash = hash_add(hash, get_unaligned_u32(p));
++        hash = hash_add(hash,
++                        get_unaligned_u32(ALIGNED_CAST(const uint32_t *, p)));
+         n -= 4;
+-        p += 1;
++        p += 4;
+     }
+ 
+     if (n) {
+diff --git a/lib/ipf.c b/lib/ipf.c
+index 7d74e2c131..2d715f5e9d 100644
+--- a/lib/ipf.c
++++ b/lib/ipf.c
+@@ -506,13 +506,15 @@ ipf_reassemble_v6_frags(struct ipf_list *ipf_list)
+ }
+ 
+ /* Called when a frag list state transitions to another state. This is
+- * triggered by new fragment for the list being received.*/
+-static void
++* triggered by new fragment for the list being received. Returns a reassembled
++* packet if this fragment has completed one. */
++static struct reassembled_pkt *
+ ipf_list_state_transition(struct ipf *ipf, struct ipf_list *ipf_list,
+                           bool ff, bool lf, bool v6)
+     OVS_REQUIRES(ipf->ipf_lock)
+ {
+     enum ipf_list_state curr_state = ipf_list->state;
++    struct reassembled_pkt *ret = NULL;
+     enum ipf_list_state next_state;
+     switch (curr_state) {
+     case IPF_LIST_STATE_UNUSED:
+@@ -562,12 +564,15 @@ ipf_list_state_transition(struct ipf *ipf, struct ipf_list *ipf_list,
+                 ipf_reassembled_list_add(&ipf->reassembled_pkt_list, rp);
+                 ipf_expiry_list_remove(ipf_list);
+                 next_state = IPF_LIST_STATE_COMPLETED;
++                ret = rp;
+             } else {
+                 next_state = IPF_LIST_STATE_REASS_FAIL;
+             }
+         }
+     }
+     ipf_list->state = next_state;
++
++    return ret;
+ }
+ 
+ /* Some sanity checks are redundant, but prudent, in case code paths for
+@@ -799,7 +804,8 @@ ipf_is_frag_duped(const struct ipf_frag *frag_list, int last_inuse_idx,
+ static bool
+ ipf_process_frag(struct ipf *ipf, struct ipf_list *ipf_list,
+                  struct dp_packet *pkt, uint16_t start_data_byte,
+-                 uint16_t end_data_byte, bool ff, bool lf, bool v6)
++                 uint16_t end_data_byte, bool ff, bool lf, bool v6,
++                 struct reassembled_pkt **rp)
+     OVS_REQUIRES(ipf->ipf_lock)
+ {
+     bool duped_frag = ipf_is_frag_duped(ipf_list->frag_list,
+@@ -820,7 +826,7 @@ ipf_process_frag(struct ipf *ipf, struct ipf_list *ipf_list,
+             ipf_list->last_inuse_idx++;
+             atomic_count_inc(&ipf->nfrag);
+             ipf_count(ipf, v6, IPF_NFRAGS_ACCEPTED);
+-            ipf_list_state_transition(ipf, ipf_list, ff, lf, v6);
++            *rp = ipf_list_state_transition(ipf, ipf_list, ff, lf, v6);
+         } else {
+             OVS_NOT_REACHED();
+         }
+@@ -853,7 +859,8 @@ ipf_list_init(struct ipf_list *ipf_list, struct ipf_list_key *key,
+  * to a list of fragemnts. */
+ static bool
+ ipf_handle_frag(struct ipf *ipf, struct dp_packet *pkt, ovs_be16 dl_type,
+-                uint16_t zone, long long now, uint32_t hash_basis)
++                uint16_t zone, long long now, uint32_t hash_basis,
++                struct reassembled_pkt **rp)
+     OVS_REQUIRES(ipf->ipf_lock)
+ {
+     struct ipf_list_key key;
+@@ -922,7 +929,7 @@ ipf_handle_frag(struct ipf *ipf, struct dp_packet *pkt, ovs_be16 dl_type,
+     }
+ 
+     return ipf_process_frag(ipf, ipf_list, pkt, start_data_byte,
+-                            end_data_byte, ff, lf, v6);
++                            end_data_byte, ff, lf, v6, rp);
+ }
+ 
+ /* Filters out fragments from a batch of fragments and adjust the batch. */
+@@ -941,11 +948,17 @@ ipf_extract_frags_from_batch(struct ipf *ipf, struct dp_packet_batch *pb,
+                           ||
+                           (dl_type == htons(ETH_TYPE_IPV6) &&
+                           ipf_is_valid_v6_frag(ipf, pkt)))) {
++            struct reassembled_pkt *rp = NULL;
+ 
+             ovs_mutex_lock(&ipf->ipf_lock);
+-            if (!ipf_handle_frag(ipf, pkt, dl_type, zone, now, hash_basis)) {
++            if (!ipf_handle_frag(ipf, pkt, dl_type, zone, now, hash_basis,
++                                 &rp)) {
+                 dp_packet_batch_refill(pb, pkt, pb_idx);
+             } else {
++                if (rp && !dp_packet_batch_is_full(pb)) {
++                    dp_packet_batch_refill(pb, rp->pkt, pb_idx);
++                    rp->list->reass_execute_ctx = rp->pkt;
++                }
+                 dp_packet_delete(pkt);
+             }
+             ovs_mutex_unlock(&ipf->ipf_lock);
+@@ -1063,6 +1076,9 @@ ipf_send_completed_frags(struct ipf *ipf, struct dp_packet_batch *pb,
+     struct ipf_list *ipf_list;
+ 
+     LIST_FOR_EACH_SAFE (ipf_list, list_node, &ipf->frag_complete_list) {
++        if ((ipf_list->key.dl_type == htons(ETH_TYPE_IPV6)) != v6) {
++            continue;
++        }
+         if (ipf_send_frags_in_list(ipf, ipf_list, pb, IPF_FRAG_COMPLETED_LIST,
+                                    v6, now)) {
+             ipf_completed_list_clean(&ipf->frag_lists, ipf_list);
+@@ -1096,6 +1112,9 @@ ipf_send_expired_frags(struct ipf *ipf, struct dp_packet_batch *pb,
+     size_t lists_removed = 0;
+ 
+     LIST_FOR_EACH_SAFE (ipf_list, list_node, &ipf->frag_exp_list) {
++        if ((ipf_list->key.dl_type == htons(ETH_TYPE_IPV6)) != v6) {
++            continue;
++        }
+         if (now <= ipf_list->expiration ||
+             lists_removed >= IPF_FRAG_LIST_MAX_EXPIRED) {
+             break;
+@@ -1116,7 +1135,8 @@ ipf_send_expired_frags(struct ipf *ipf, struct dp_packet_batch *pb,
+ /* Adds a reassmebled packet to a packet batch to be processed by the caller.
+  */
+ static void
+-ipf_execute_reass_pkts(struct ipf *ipf, struct dp_packet_batch *pb)
++ipf_execute_reass_pkts(struct ipf *ipf, struct dp_packet_batch *pb,
++                       ovs_be16 dl_type)
+ {
+     if (ovs_list_is_empty(&ipf->reassembled_pkt_list)) {
+         return;
+@@ -1127,6 +1147,7 @@ ipf_execute_reass_pkts(struct ipf *ipf, struct dp_packet_batch *pb)
+ 
+     LIST_FOR_EACH_SAFE (rp, rp_list_node, &ipf->reassembled_pkt_list) {
+         if (!rp->list->reass_execute_ctx &&
++            rp->list->key.dl_type == dl_type &&
+             ipf_dp_packet_batch_add(pb, rp->pkt, false)) {
+             rp->list->reass_execute_ctx = rp->pkt;
+         }
+@@ -1237,7 +1258,7 @@ ipf_preprocess_conntrack(struct ipf *ipf, struct dp_packet_batch *pb,
+     }
+ 
+     if (ipf_get_enabled(ipf) || atomic_count_get(&ipf->nfrag)) {
+-        ipf_execute_reass_pkts(ipf, pb);
++        ipf_execute_reass_pkts(ipf, pb, dl_type);
+     }
+ }
+ 
+diff --git a/lib/jhash.c b/lib/jhash.c
+index c59b51b611..a8e3f457b9 100644
+--- a/lib/jhash.c
++++ b/lib/jhash.c
+@@ -96,18 +96,18 @@ jhash_words(const uint32_t *p, size_t n, uint32_t basis)
+ uint32_t
+ jhash_bytes(const void *p_, size_t n, uint32_t basis)
+ {
+-    const uint32_t *p = p_;
++    const uint8_t *p = p_;
+     uint32_t a, b, c;
+ 
+     a = b = c = 0xdeadbeef + n + basis;
+ 
+     while (n >= 12) {
+-        a += get_unaligned_u32(p);
+-        b += get_unaligned_u32(p + 1);
+-        c += get_unaligned_u32(p + 2);
++        a += get_unaligned_u32(ALIGNED_CAST(const uint32_t *, p));
++        b += get_unaligned_u32(ALIGNED_CAST(const uint32_t *, p + 4));
++        c += get_unaligned_u32(ALIGNED_CAST(const uint32_t *, p + 8));
+         jhash_mix(&a, &b, &c);
+         n -= 12;
+-        p += 3;
++        p += 12;
+     }
+ 
+     if (n) {
+diff --git a/lib/match.c b/lib/match.c
+index 0b9dc4278c..9b7e06e0c7 100644
+--- a/lib/match.c
++++ b/lib/match.c
+@@ -1618,7 +1618,7 @@ match_format(const struct match *match,
+         ds_put_char(s, ',');
+     }
+     for (i = 0; i < FLOW_MAX_VLAN_HEADERS; i++) {
+-        char str_i[8];
++        char str_i[12];
+ 
+         if (!wc->masks.vlans[i].tci) {
+             break;
+diff --git a/lib/mcast-snooping.c b/lib/mcast-snooping.c
+index dc5164b41c..bf25e6f20a 100644
+--- a/lib/mcast-snooping.c
++++ b/lib/mcast-snooping.c
+@@ -432,7 +432,9 @@ mcast_snooping_add_group(struct mcast_snooping *ms,
+         uint32_t hash = mcast_table_hash(ms, addr, vlan);
+ 
+         if (hmap_count(&ms->table) >= ms->max_entries) {
+-            group_get_lru(ms, &grp);
++            if (!group_get_lru(ms, &grp)) {
++                return false;
++            }
+             mcast_snooping_flush_group(ms, grp);
+         }
+ 
+diff --git a/lib/meta-flow.c b/lib/meta-flow.c
+index aa7cf1fcbb..b03fe7abf1 100644
+--- a/lib/meta-flow.c
++++ b/lib/meta-flow.c
+@@ -1788,6 +1788,115 @@ mf_is_tun_metadata(const struct mf_field *mf)
+            mf->id < MFF_TUN_METADATA0 + TUN_METADATA_NUM_OPTS;
+ }
+ 
++bool
++mf_is_any_metadata(const struct mf_field *mf)
++{
++    switch (mf->id) {
++    case MFF_DP_HASH:
++    case MFF_RECIRC_ID:
++    case MFF_PACKET_TYPE:
++    case MFF_CONJ_ID:
++    case MFF_TUN_ERSPAN_DIR:
++    CASE_MFF_TUN_METADATA:
++    case MFF_METADATA:
++    case MFF_IN_PORT:
++    case MFF_IN_PORT_OXM:
++    case MFF_ACTSET_OUTPUT:
++    case MFF_SKB_PRIORITY:
++    case MFF_PKT_MARK:
++    case MFF_CT_STATE:
++    case MFF_CT_ZONE:
++    case MFF_CT_MARK:
++    case MFF_CT_LABEL:
++    case MFF_CT_NW_PROTO:
++    case MFF_CT_NW_SRC:
++    case MFF_CT_NW_DST:
++    case MFF_CT_IPV6_SRC:
++    case MFF_CT_IPV6_DST:
++    case MFF_CT_TP_SRC:
++    case MFF_CT_TP_DST:
++    CASE_MFF_REGS:
++    CASE_MFF_XREGS:
++    CASE_MFF_XXREGS:
++        return true;
++
++    case MFF_TUN_ID:
++    case MFF_TUN_SRC:
++    case MFF_TUN_DST:
++    case MFF_TUN_IPV6_SRC:
++    case MFF_TUN_IPV6_DST:
++    case MFF_TUN_FLAGS:
++    case MFF_TUN_TTL:
++    case MFF_TUN_TOS:
++    case MFF_TUN_GBP_ID:
++    case MFF_TUN_GBP_FLAGS:
++    case MFF_TUN_ERSPAN_IDX:
++    case MFF_TUN_ERSPAN_VER:
++    case MFF_TUN_ERSPAN_HWID:
++    case MFF_TUN_GTPU_FLAGS:
++    case MFF_TUN_GTPU_MSGTYPE:
++    case MFF_ETH_SRC:
++    case MFF_ETH_DST:
++    case MFF_ETH_TYPE:
++    case MFF_VLAN_TCI:
++    case MFF_DL_VLAN:
++    case MFF_VLAN_VID:
++    case MFF_DL_VLAN_PCP:
++    case MFF_VLAN_PCP:
++    case MFF_MPLS_LABEL:
++    case MFF_MPLS_TC:
++    case MFF_MPLS_BOS:
++    case MFF_MPLS_TTL:
++    case MFF_IPV4_SRC:
++    case MFF_IPV4_DST:
++    case MFF_IPV6_SRC:
++    case MFF_IPV6_DST:
++    case MFF_IPV6_LABEL:
++    case MFF_IP_PROTO:
++    case MFF_IP_DSCP:
++    case MFF_IP_DSCP_SHIFTED:
++    case MFF_IP_ECN:
++    case MFF_IP_TTL:
++    case MFF_IP_FRAG:
++    case MFF_ARP_OP:
++    case MFF_ARP_SPA:
++    case MFF_ARP_TPA:
++    case MFF_ARP_SHA:
++    case MFF_ARP_THA:
++    case MFF_TCP_SRC:
++    case MFF_TCP_DST:
++    case MFF_TCP_FLAGS:
++    case MFF_UDP_SRC:
++    case MFF_UDP_DST:
++    case MFF_SCTP_SRC:
++    case MFF_SCTP_DST:
++    case MFF_ICMPV4_TYPE:
++    case MFF_ICMPV4_CODE:
++    case MFF_ICMPV6_TYPE:
++    case MFF_ICMPV6_CODE:
++    case MFF_ND_TARGET:
++    case MFF_ND_SLL:
++    case MFF_ND_TLL:
++    case MFF_ND_RESERVED:
++    case MFF_ND_OPTIONS_TYPE:
++    case MFF_NSH_FLAGS:
++    case MFF_NSH_MDTYPE:
++    case MFF_NSH_NP:
++    case MFF_NSH_SPI:
++    case MFF_NSH_SI:
++    case MFF_NSH_C1:
++    case MFF_NSH_C2:
++    case MFF_NSH_C3:
++    case MFF_NSH_C4:
++    case MFF_NSH_TTL:
++        return false;
++
++    case MFF_N_IDS:
++    default:
++        OVS_NOT_REACHED();
++    }
++}
++
+ bool
+ mf_is_frozen_metadata(const struct mf_field *mf)
+ {
+@@ -2543,7 +2652,8 @@ mf_set(const struct mf_field *mf,
+         break;
+ 
+     case MFF_IP_FRAG:
+-        match_set_nw_frag_masked(match, value->u8, mask->u8);
++        match_set_nw_frag_masked(match, value->u8,
++                                 mask->u8 & FLOW_NW_FRAG_MASK);
+         break;
+ 
+     case MFF_ARP_SPA:
+diff --git a/lib/netdev-dpdk.c b/lib/netdev-dpdk.c
+index 45f61930d4..fd54f34692 100644
+--- a/lib/netdev-dpdk.c
++++ b/lib/netdev-dpdk.c
+@@ -464,9 +464,8 @@ struct netdev_dpdk {
+         bool attached;
+         /* If true, rte_eth_dev_start() was successfully called */
+         bool started;
+-        bool reset_needed;
+-        /* 1 pad byte here. */
+         struct eth_addr hwaddr;
++        /* 2 pad bytes here. */
+         int mtu;
+         int socket_id;
+         int buf_size;
+@@ -607,6 +606,9 @@ int netdev_dpdk_get_vid(const struct netdev_dpdk *dev);
+ struct ingress_policer *
+ netdev_dpdk_get_ingress_policer(const struct netdev_dpdk *dev);
+ 
++static void netdev_dpdk_mbuf_dump(const char *prefix, const char *message,
++                                  const struct rte_mbuf *);
++
+ static bool
+ is_dpdk_class(const struct netdev_class *class)
+ {
+@@ -1351,6 +1353,21 @@ dpdk_eth_dev_init(struct netdev_dpdk *dev)
+         info.tx_offload_capa &= ~RTE_ETH_TX_OFFLOAD_TCP_CKSUM;
+     }
+ 
++    if (!strcmp(info.driver_name, "net_ice")
++        || !strcmp(info.driver_name, "net_i40e")
++        || !strcmp(info.driver_name, "net_iavf")
++        || !strcmp(info.driver_name, "net_txgbe")) {
++        /* FIXME: Driver advertises the capability but doesn't seem
++         * to actually support it correctly.  Can remove this once
++         * the driver is fixed on DPDK side. */
++        VLOG_INFO("%s: disabled Tx outer udp checksum offloads for a "
++                  "net/ice, net/i40e, net/iavf or net/txgbe port.",
++                  netdev_get_name(&dev->up));
++        info.tx_offload_capa &= ~RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM;
++        info.tx_offload_capa &= ~RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO;
++        info.tx_offload_capa &= ~RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO;
++    }
++
+     if (info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM) {
+         dev->hw_ol_features |= NETDEV_TX_IPV4_CKSUM_OFFLOAD;
+     } else {
+@@ -1514,7 +1531,6 @@ common_construct(struct netdev *netdev, dpdk_port_t port_no,
+     dev->virtio_features_state = OVS_VIRTIO_F_CLEAN;
+     dev->attached = false;
+     dev->started = false;
+-    dev->reset_needed = false;
+ 
+     ovsrcu_init(&dev->qos_conf, NULL);
+ 
+@@ -2137,13 +2153,11 @@ netdev_dpdk_run(const struct netdev_class *netdev_class OVS_UNUSED)
+             if (!pending_reset) {
+                 continue;
+             }
+-            atomic_store_relaxed(&netdev_dpdk_pending_reset[port_id], false);
+ 
+             ovs_mutex_lock(&dpdk_mutex);
+             dev = netdev_dpdk_lookup_by_port_id(port_id);
+             if (dev) {
+                 ovs_mutex_lock(&dev->mutex);
+-                dev->reset_needed = true;
+                 netdev_request_reconfigure(&dev->up);
+                 VLOG_DBG_RL(&rl, "%s: Device reset requested.",
+                             netdev_get_name(&dev->up));
+@@ -2364,17 +2378,16 @@ netdev_dpdk_set_config(struct netdev *netdev, const struct smap *args,
+         struct eth_addr mac;
+ 
+         if (!dpdk_port_is_representor(dev)) {
+-            VLOG_WARN_BUF(errp, "'%s' is trying to set the VF MAC '%s' "
+-                          "but 'options:dpdk-vf-mac' is only supported for "
+-                          "VF representors.",
+-                          netdev_get_name(netdev), vf_mac);
++            VLOG_WARN("'%s' is trying to set the VF MAC '%s' "
++                      "but 'options:dpdk-vf-mac' is only supported for "
++                      "VF representors.",
++                      netdev_get_name(netdev), vf_mac);
+         } else if (!eth_addr_from_string(vf_mac, &mac)) {
+-            VLOG_WARN_BUF(errp, "interface '%s': cannot parse VF MAC '%s'.",
+-                          netdev_get_name(netdev), vf_mac);
++            VLOG_WARN("interface '%s': cannot parse VF MAC '%s'.",
++                      netdev_get_name(netdev), vf_mac);
+         } else if (eth_addr_is_multicast(mac)) {
+-            VLOG_WARN_BUF(errp,
+-                          "interface '%s': cannot set VF MAC to multicast "
+-                          "address '%s'.", netdev_get_name(netdev), vf_mac);
++            VLOG_WARN("interface '%s': cannot set VF MAC to multicast "
++                      "address '%s'.", netdev_get_name(netdev), vf_mac);
+         } else if (!eth_addr_equals(dev->requested_hwaddr, mac)) {
+             dev->requested_hwaddr = mac;
+             netdev_request_reconfigure(netdev);
+@@ -2567,73 +2580,133 @@ static bool
+ netdev_dpdk_prep_hwol_packet(struct netdev_dpdk *dev, struct rte_mbuf *mbuf)
+ {
+     struct dp_packet *pkt = CONTAINER_OF(mbuf, struct dp_packet, mbuf);
+-    struct tcp_header *th;
+-
+-    if (!(mbuf->ol_flags & (RTE_MBUF_F_TX_IP_CKSUM | RTE_MBUF_F_TX_L4_MASK
+-                            | RTE_MBUF_F_TX_TCP_SEG))) {
+-        mbuf->ol_flags &= ~(RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_IPV6);
++    void *l2;
++    void *l3;
++    void *l4;
++
++    const uint64_t all_inner_requests = (RTE_MBUF_F_TX_IP_CKSUM |
++                                         RTE_MBUF_F_TX_L4_MASK |
++                                         RTE_MBUF_F_TX_TCP_SEG);
++    const uint64_t all_outer_requests = (RTE_MBUF_F_TX_OUTER_IP_CKSUM |
++                                         RTE_MBUF_F_TX_OUTER_UDP_CKSUM);
++    const uint64_t all_requests = all_inner_requests | all_outer_requests;
++    const uint64_t all_inner_marks = (RTE_MBUF_F_TX_IPV4 |
++                                      RTE_MBUF_F_TX_IPV6);
++    const uint64_t all_outer_marks = (RTE_MBUF_F_TX_OUTER_IPV4 |
++                                      RTE_MBUF_F_TX_OUTER_IPV6 |
++                                      RTE_MBUF_F_TX_TUNNEL_MASK);
++    const uint64_t all_marks = all_inner_marks | all_outer_marks;
++
++    if (!(mbuf->ol_flags & all_requests)) {
++        /* No offloads requested, no marks should be set. */
++        mbuf->ol_flags &= ~all_marks;
++
++        uint64_t unexpected = mbuf->ol_flags & RTE_MBUF_F_TX_OFFLOAD_MASK;
++        if (OVS_UNLIKELY(unexpected)) {
++            VLOG_WARN_RL(&rl, "%s: Unexpected Tx offload flags: %#"PRIx64,
++                         netdev_get_name(&dev->up), unexpected);
++            netdev_dpdk_mbuf_dump(netdev_get_name(&dev->up),
++                                  "Packet with unexpected ol_flags", mbuf);
++            return false;
++        }
+         return true;
+     }
+ 
+-    /* If packet is vxlan or geneve tunnel packet, calculate outer
+-     * l2 len and outer l3 len. Inner l2/l3/l4 len are calculated
+-     * before. */
+-    if (mbuf->ol_flags &
+-        (RTE_MBUF_F_TX_TUNNEL_GENEVE | RTE_MBUF_F_TX_TUNNEL_VXLAN)) {
+-        mbuf->outer_l2_len = (char *) dp_packet_l3(pkt) -
+-                 (char *) dp_packet_eth(pkt);
+-        mbuf->outer_l3_len = (char *) dp_packet_l4(pkt) -
+-                 (char *) dp_packet_l3(pkt);
++    const uint64_t tunnel_type = mbuf->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK;
++    if (OVS_UNLIKELY(tunnel_type &&
++                     tunnel_type != RTE_MBUF_F_TX_TUNNEL_GENEVE &&
++                     tunnel_type != RTE_MBUF_F_TX_TUNNEL_VXLAN)) {
++        VLOG_WARN_RL(&rl, "%s: Unexpected tunnel type: %#"PRIx64,
++                     netdev_get_name(&dev->up), tunnel_type);
++        netdev_dpdk_mbuf_dump(netdev_get_name(&dev->up),
++                              "Packet with unexpected tunnel type", mbuf);
++        return false;
++    }
++
++    if (tunnel_type && (mbuf->ol_flags & all_inner_requests)) {
++        if (mbuf->ol_flags & all_outer_requests) {
++            mbuf->outer_l2_len = (char *) dp_packet_l3(pkt) -
++                                 (char *) dp_packet_eth(pkt);
++            mbuf->outer_l3_len = (char *) dp_packet_l4(pkt) -
++                                 (char *) dp_packet_l3(pkt);
++
++            /* Inner L2 length must account for the tunnel header length. */
++            l2 = dp_packet_l4(pkt);
++            l3 = dp_packet_inner_l3(pkt);
++            l4 = dp_packet_inner_l4(pkt);
++        } else {
++            /* If no outer offloading is requested, clear outer marks. */
++            mbuf->ol_flags &= ~all_outer_marks;
++            mbuf->outer_l2_len = 0;
++            mbuf->outer_l3_len = 0;
++
++            /* Skip outer headers. */
++            l2 = dp_packet_eth(pkt);
++            l3 = dp_packet_inner_l3(pkt);
++            l4 = dp_packet_inner_l4(pkt);
++        }
+     } else {
+-        mbuf->l2_len = (char *) dp_packet_l3(pkt) -
+-               (char *) dp_packet_eth(pkt);
+-        mbuf->l3_len = (char *) dp_packet_l4(pkt) -
+-               (char *) dp_packet_l3(pkt);
++        if (tunnel_type) {
++            /* No inner offload is requested, fallback to non tunnel
++             * checksum offloads. */
++            mbuf->ol_flags &= ~all_inner_marks;
++            if (mbuf->ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM) {
++                mbuf->ol_flags |= RTE_MBUF_F_TX_IP_CKSUM;
++                mbuf->ol_flags |= RTE_MBUF_F_TX_IPV4;
++            }
++            if (mbuf->ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM) {
++                mbuf->ol_flags |= RTE_MBUF_F_TX_UDP_CKSUM;
++                mbuf->ol_flags |= mbuf->ol_flags & RTE_MBUF_F_TX_OUTER_IPV4
++                                  ? RTE_MBUF_F_TX_IPV4 : RTE_MBUF_F_TX_IPV6;
++            }
++            mbuf->ol_flags &= ~(all_outer_requests | all_outer_marks);
++        }
+         mbuf->outer_l2_len = 0;
+         mbuf->outer_l3_len = 0;
+-    }
+-    th = dp_packet_l4(pkt);
+ 
+-    if (mbuf->ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
+-        if (!th) {
+-            VLOG_WARN_RL(&rl, "%s: TCP Segmentation without L4 header"
+-                         " pkt len: %"PRIu32"", dev->up.name, mbuf->pkt_len);
+-            return false;
+-        }
++        l2 = dp_packet_eth(pkt);
++        l3 = dp_packet_l3(pkt);
++        l4 = dp_packet_l4(pkt);
+     }
+ 
+-    if (mbuf->ol_flags & RTE_MBUF_F_TX_TCP_CKSUM) {
+-        if (!th) {
+-            VLOG_WARN_RL(&rl, "%s: TCP offloading without L4 header"
+-                         " pkt len: %"PRIu32"", dev->up.name, mbuf->pkt_len);
+-            return false;
+-        }
++    ovs_assert(l4);
+ 
+-        if (mbuf->ol_flags & (RTE_MBUF_F_TX_TUNNEL_GENEVE |
+-            RTE_MBUF_F_TX_TUNNEL_VXLAN)) {
+-            mbuf->tso_segsz = dev->mtu - mbuf->l2_len - mbuf->l3_len -
+-                              mbuf->l4_len - mbuf->outer_l3_len;
++    mbuf->l2_len = (char *) l3 - (char *) l2;
++    mbuf->l3_len = (char *) l4 - (char *) l3;
++
++    if (mbuf->ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
++        struct tcp_header *th = l4;
++        uint16_t link_tso_segsz;
++        int hdr_len;
++
++        mbuf->l4_len = TCP_OFFSET(th->tcp_ctl) * 4;
++        if (tunnel_type) {
++            link_tso_segsz = dev->mtu - mbuf->l2_len - mbuf->l3_len -
++                             mbuf->l4_len - mbuf->outer_l3_len;
+         } else {
+-            mbuf->l4_len = TCP_OFFSET(th->tcp_ctl) * 4;
+-            mbuf->tso_segsz = dev->mtu - mbuf->l3_len - mbuf->l4_len;
++            link_tso_segsz = dev->mtu - mbuf->l3_len - mbuf->l4_len;
+         }
+ 
+-        if (mbuf->ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
+-            int hdr_len = mbuf->l2_len + mbuf->l3_len + mbuf->l4_len;
+-            if (OVS_UNLIKELY((hdr_len +
+-                              mbuf->tso_segsz) > dev->max_packet_len)) {
+-                VLOG_WARN_RL(&rl, "%s: Oversized TSO packet. hdr: %"PRIu32", "
+-                             "gso: %"PRIu32", max len: %"PRIu32"",
+-                             dev->up.name, hdr_len, mbuf->tso_segsz,
+-                             dev->max_packet_len);
+-                return false;
+-            }
++        if (mbuf->tso_segsz > link_tso_segsz) {
++            mbuf->tso_segsz = link_tso_segsz;
+         }
+ 
+-        if (mbuf->ol_flags & RTE_MBUF_F_TX_IPV4) {
+-            mbuf->ol_flags |= RTE_MBUF_F_TX_IP_CKSUM;
++        hdr_len = mbuf->l2_len + mbuf->l3_len + mbuf->l4_len;
++        if (OVS_UNLIKELY((hdr_len + mbuf->tso_segsz) > dev->max_packet_len)) {
++            VLOG_WARN_RL(&rl, "%s: Oversized TSO packet. hdr: %"PRIu32", "
++                         "gso: %"PRIu32", max len: %"PRIu32"",
++                         dev->up.name, hdr_len, mbuf->tso_segsz,
++                         dev->max_packet_len);
++            return false;
+         }
+     }
++
++    /* If L4 checksum is requested, IPv4 should be requested as well. */
++    if (mbuf->ol_flags & RTE_MBUF_F_TX_L4_MASK
++        && mbuf->ol_flags & RTE_MBUF_F_TX_IPV4) {
++        mbuf->ol_flags |= RTE_MBUF_F_TX_IP_CKSUM;
++    }
++
+     return true;
+ }
+ 
+@@ -2664,6 +2737,35 @@ netdev_dpdk_prep_hwol_batch(struct netdev_dpdk *dev, struct rte_mbuf **pkts,
+     return cnt;
+ }
+ 
++static void
++netdev_dpdk_mbuf_dump(const char *prefix, const char *message,
++                      const struct rte_mbuf *mbuf)
++{
++    static struct vlog_rate_limit dump_rl = VLOG_RATE_LIMIT_INIT(5, 5);
++    char *response = NULL;
++    FILE *stream;
++    size_t size;
++
++    if (VLOG_DROP_DBG(&dump_rl)) {
++        return;
++    }
++
++    stream = open_memstream(&response, &size);
++    if (!stream) {
++        VLOG_ERR("Unable to open memstream for mbuf dump: %s.",
++                 ovs_strerror(errno));
++        return;
++    }
++
++    rte_pktmbuf_dump(stream, mbuf, rte_pktmbuf_pkt_len(mbuf));
++
++    fclose(stream);
++
++    VLOG_DBG(prefix ? "%s: %s:\n%s" : "%s%s:\n%s",
++             prefix ? prefix : "", message, response);
++    free(response);
++}
++
+ /* Tries to transmit 'pkts' to txq 'qid' of device 'dev'.  Takes ownership of
+  * 'pkts', even in case of failure.
+  *
+@@ -2680,6 +2782,8 @@ netdev_dpdk_eth_tx_burst(struct netdev_dpdk *dev, int qid,
+         VLOG_WARN_RL(&rl, "%s: Output batch contains invalid packets. "
+                      "Only %u/%u are valid: %s", netdev_get_name(&dev->up),
+                      nb_tx_prep, cnt, rte_strerror(rte_errno));
++        netdev_dpdk_mbuf_dump(netdev_get_name(&dev->up),
++                              "First invalid packet", pkts[nb_tx_prep]);
+     }
+ 
+     while (nb_tx != nb_tx_prep) {
+@@ -4523,10 +4627,11 @@ netdev_dpdk_get_mempool_info(struct unixctl_conn *conn,
+                              int argc, const char *argv[],
+                              void *aux OVS_UNUSED)
+ {
+-    size_t size;
+-    FILE *stream;
+-    char *response = NULL;
+     struct netdev *netdev = NULL;
++    const char *error = NULL;
++    char *response = NULL;
++    FILE *stream;
++    size_t size;
+ 
+     if (argc == 2) {
+         netdev = netdev_from_name(argv[1]);
+@@ -4550,10 +4655,14 @@ netdev_dpdk_get_mempool_info(struct unixctl_conn *conn,
+         ovs_mutex_lock(&dev->mutex);
+         ovs_mutex_lock(&dpdk_mp_mutex);
+ 
+-        rte_mempool_dump(stream, dev->dpdk_mp->mp);
+-        fprintf(stream, "    count: avail (%u), in use (%u)\n",
+-                rte_mempool_avail_count(dev->dpdk_mp->mp),
+-                rte_mempool_in_use_count(dev->dpdk_mp->mp));
++        if (dev->dpdk_mp) {
++            rte_mempool_dump(stream, dev->dpdk_mp->mp);
++            fprintf(stream, "    count: avail (%u), in use (%u)\n",
++                    rte_mempool_avail_count(dev->dpdk_mp->mp),
++                    rte_mempool_in_use_count(dev->dpdk_mp->mp));
++        } else {
++            error = "Not allocated";
++        }
+ 
+         ovs_mutex_unlock(&dpdk_mp_mutex);
+         ovs_mutex_unlock(&dev->mutex);
+@@ -4565,7 +4674,11 @@ netdev_dpdk_get_mempool_info(struct unixctl_conn *conn,
+ 
+     fclose(stream);
+ 
+-    unixctl_command_reply(conn, response);
++    if (error) {
++        unixctl_command_reply_error(conn, error);
++    } else {
++        unixctl_command_reply(conn, response);
++    }
+ out:
+     free(response);
+     netdev_close(netdev);
+@@ -5965,6 +6078,7 @@ static int
+ netdev_dpdk_reconfigure(struct netdev *netdev)
+ {
+     struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
++    bool pending_reset;
+     bool try_rx_steer;
+     int err = 0;
+ 
+@@ -5976,6 +6090,9 @@ netdev_dpdk_reconfigure(struct netdev *netdev)
+         dev->requested_n_rxq += 1;
+     }
+ 
++    atomic_read_relaxed(&netdev_dpdk_pending_reset[dev->port_id],
++                        &pending_reset);
++
+     if (netdev->n_txq == dev->requested_n_txq
+         && netdev->n_rxq == dev->requested_n_rxq
+         && dev->rx_steer_flags == dev->requested_rx_steer_flags
+@@ -5985,7 +6102,7 @@ netdev_dpdk_reconfigure(struct netdev *netdev)
+         && dev->txq_size == dev->requested_txq_size
+         && eth_addr_equals(dev->hwaddr, dev->requested_hwaddr)
+         && dev->socket_id == dev->requested_socket_id
+-        && dev->started && !dev->reset_needed) {
++        && dev->started && !pending_reset) {
+         /* Reconfiguration is unnecessary */
+ 
+         goto out;
+@@ -5994,10 +6111,14 @@ netdev_dpdk_reconfigure(struct netdev *netdev)
+ retry:
+     dpdk_rx_steer_unconfigure(dev);
+ 
+-    if (dev->reset_needed) {
++    if (pending_reset) {
++        /*
++         * Set false before reset to avoid missing a new reset interrupt event
++         * in a race with event callback.
++         */
++        atomic_store_relaxed(&netdev_dpdk_pending_reset[dev->port_id], false);
+         rte_eth_dev_reset(dev->port_id);
+         if_notifier_manual_report();
+-        dev->reset_needed = false;
+     } else {
+         rte_eth_dev_stop(dev->port_id);
+     }
+diff --git a/lib/netdev-dummy.c b/lib/netdev-dummy.c
+index cd7e85a818..e8bbf8d514 100644
+--- a/lib/netdev-dummy.c
++++ b/lib/netdev-dummy.c
+@@ -39,6 +39,7 @@
+ #include "pcap-file.h"
+ #include "openvswitch/poll-loop.h"
+ #include "openvswitch/shash.h"
++#include "ovs-router.h"
+ #include "sset.h"
+ #include "stream.h"
+ #include "unaligned.h"
+@@ -2084,11 +2085,20 @@ netdev_dummy_ip4addr(struct unixctl_conn *conn, int argc OVS_UNUSED,
+ 
+     if (netdev && is_dummy_class(netdev->netdev_class)) {
+         struct in_addr ip, mask;
++        struct in6_addr ip6;
++        uint32_t plen;
+         char *error;
+ 
+-        error = ip_parse_masked(argv[2], &ip.s_addr, &mask.s_addr);
++        error = ip_parse_cidr(argv[2], &ip.s_addr, &plen);
+         if (!error) {
++            mask.s_addr = be32_prefix_mask(plen);
+             netdev_dummy_add_in4(netdev, ip, mask);
++
++            /* Insert local route entry for the new address. */
++            in6_addr_set_mapped_ipv4(&ip6, ip.s_addr);
++            ovs_router_force_insert(0, &ip6, plen + 96, true, argv[1],
++                                    &in6addr_any, &ip6);
++
+             unixctl_command_reply(conn, "OK");
+         } else {
+             unixctl_command_reply_error(conn, error);
+@@ -2118,6 +2128,11 @@ netdev_dummy_ip6addr(struct unixctl_conn *conn, int argc OVS_UNUSED,
+ 
+             mask = ipv6_create_mask(plen);
+             netdev_dummy_add_in6(netdev, &ip6, &mask);
++
++            /* Insert local route entry for the new address. */
++            ovs_router_force_insert(0, &ip6, plen, true, argv[1],
++                                    &in6addr_any, &ip6);
++
+             unixctl_command_reply(conn, "OK");
+         } else {
+             unixctl_command_reply_error(conn, error);
+diff --git a/lib/netdev-linux.c b/lib/netdev-linux.c
+index bf91ef462e..220825074b 100644
+--- a/lib/netdev-linux.c
++++ b/lib/netdev-linux.c
+@@ -1062,8 +1062,7 @@ netdev_linux_construct_tap(struct netdev *netdev_)
+ 
+     if (tap_supports_vnet_hdr
+         && ioctl(netdev->tap_fd, TUNSETOFFLOAD, oflags) == 0) {
+-        netdev_->ol_flags |= (NETDEV_TX_OFFLOAD_IPV4_CKSUM
+-                              | NETDEV_TX_OFFLOAD_TCP_CKSUM
++        netdev_->ol_flags |= (NETDEV_TX_OFFLOAD_TCP_CKSUM
+                               | NETDEV_TX_OFFLOAD_UDP_CKSUM);
+ 
+         if (userspace_tso_enabled()) {
+@@ -2403,6 +2402,7 @@ static int
+ netdev_linux_read_stringset_info(struct netdev_linux *netdev, uint32_t *len)
+ {
+     union {
++        struct ethtool_cmd ecmd;
+         struct ethtool_sset_info hdr;
+         struct {
+             uint64_t pad[2];
+@@ -2440,9 +2440,12 @@ netdev_linux_read_definitions(struct netdev_linux *netdev,
+     int error = 0;
+ 
+     error = netdev_linux_read_stringset_info(netdev, &len);
+-    if (error || !len) {
++    if (error) {
+         return error;
++    } else if (!len) {
++        return -EOPNOTSUPP;
+     }
++
+     strings = xzalloc(sizeof *strings + len * ETH_GSTRING_LEN);
+ 
+     strings->cmd = ETHTOOL_GSTRINGS;
+@@ -2507,13 +2510,11 @@ netdev_linux_set_ol(struct netdev *netdev_)
+         char *string;
+         uint32_t value;
+     } t_list[] = {
+-        {"tx-checksum-ipv4", NETDEV_TX_OFFLOAD_IPV4_CKSUM |
+-                             NETDEV_TX_OFFLOAD_TCP_CKSUM |
++        {"tx-checksum-ipv4", NETDEV_TX_OFFLOAD_TCP_CKSUM |
+                              NETDEV_TX_OFFLOAD_UDP_CKSUM},
+         {"tx-checksum-ipv6", NETDEV_TX_OFFLOAD_TCP_CKSUM |
+                              NETDEV_TX_OFFLOAD_UDP_CKSUM},
+-        {"tx-checksum-ip-generic", NETDEV_TX_OFFLOAD_IPV4_CKSUM |
+-                                   NETDEV_TX_OFFLOAD_TCP_CKSUM |
++        {"tx-checksum-ip-generic", NETDEV_TX_OFFLOAD_TCP_CKSUM |
+                                    NETDEV_TX_OFFLOAD_UDP_CKSUM},
+         {"tx-checksum-sctp", NETDEV_TX_OFFLOAD_SCTP_CKSUM},
+         {"tx-tcp-segmentation", NETDEV_TX_OFFLOAD_TCP_TSO},
+@@ -2725,6 +2726,7 @@ netdev_linux_get_speed_locked(struct netdev_linux *netdev,
+                               uint32_t *current, uint32_t *max)
+ {
+     if (netdev_linux_netnsid_is_remote(netdev)) {
++        *current = *max = 0;
+         return EOPNOTSUPP;
+     }
+ 
+@@ -2734,6 +2736,8 @@ netdev_linux_get_speed_locked(struct netdev_linux *netdev,
+                    ? 0 : netdev->current_speed;
+         *max = MIN(UINT32_MAX,
+                    netdev_features_to_bps(netdev->supported, 0) / 1000000ULL);
++    } else {
++        *current = *max = 0;
+     }
+     return netdev->get_features_error;
+ }
+@@ -6739,7 +6743,8 @@ get_stats_via_netlink(const struct netdev *netdev_, struct netdev_stats *stats)
+             struct rtnl_link_stats64 aligned_lstats;
+ 
+             if (!IS_PTR_ALIGNED(lstats)) {
+-                memcpy(&aligned_lstats, lstats, sizeof aligned_lstats);
++                memcpy(&aligned_lstats, (void *) lstats,
++                       sizeof aligned_lstats);
+                 lstats = &aligned_lstats;
+             }
+             netdev_stats_from_rtnl_link_stats64(stats, lstats);
+@@ -7199,13 +7204,6 @@ netdev_linux_prepend_vnet_hdr(struct dp_packet *b, int mtu)
+         /* The packet has good L4 checksum. No need to validate again. */
+         vnet->csum_start = vnet->csum_offset = (OVS_FORCE __virtio16) 0;
+         vnet->flags = VIRTIO_NET_HDR_F_DATA_VALID;
+-
+-        /* It is possible that L4 is good but the IPv4 checksum isn't
+-         * complete. For example in the case of UDP encapsulation of an ARP
+-         * packet where the UDP checksum is 0. */
+-        if (dp_packet_hwol_l3_csum_ipv4_ol(b)) {
+-            dp_packet_ip_set_header_csum(b, false);
+-        }
+     } else if (dp_packet_hwol_tx_l4_checksum(b)) {
+         /* The csum calculation is offloaded. */
+         if (dp_packet_hwol_l4_is_tcp(b)) {
+diff --git a/lib/netdev-native-tnl.c b/lib/netdev-native-tnl.c
+index dee9ab344e..74e89ba09e 100644
+--- a/lib/netdev-native-tnl.c
++++ b/lib/netdev-native-tnl.c
+@@ -240,71 +240,31 @@ udp_extract_tnl_md(struct dp_packet *packet, struct flow_tnl *tnl,
+     return udp + 1;
+ }
+ 
+-/* Calculate inner l2 l3 l4 len as tunnel outer header is not
+- * encapsulated now. */
+ static void
+ dp_packet_tnl_ol_process(struct dp_packet *packet,
+                          const struct ovs_action_push_tnl *data)
+ {
+-    struct udp_header *udp = NULL;
+-    uint8_t opt_len = 0;
+-    struct eth_header *eth = NULL;
+     struct ip_header *ip = NULL;
+-    struct genevehdr *gnh = NULL;
+ 
+-    /* l2 l3 l4 len refer to inner len, tunnel outer
+-     * header is not encapsulated here. */
+     if (dp_packet_hwol_l4_mask(packet)) {
+         ip = dp_packet_l3(packet);
+ 
+-        if (ip->ip_proto == IPPROTO_TCP) {
+-            struct tcp_header *th = dp_packet_l4(packet);
+-            dp_packet_set_l4_len(packet, TCP_OFFSET(th->tcp_ctl) * 4);
+-        } else if (ip->ip_proto == IPPROTO_UDP) {
+-            dp_packet_set_l4_len(packet, UDP_HEADER_LEN);
+-        } else if (ip->ip_proto == IPPROTO_SCTP) {
+-            dp_packet_set_l4_len(packet, SCTP_HEADER_LEN);
+-        }
+-
+-        dp_packet_set_l3_len(packet, (char *) dp_packet_l4(packet) -
+-                                     (char *) dp_packet_l3(packet));
+-
+         if (data->tnl_type == OVS_VPORT_TYPE_GENEVE ||
+             data->tnl_type == OVS_VPORT_TYPE_VXLAN) {
+ 
+             if (IP_VER(ip->ip_ihl_ver) == 4) {
+                 dp_packet_hwol_set_tx_ipv4(packet);
+-                dp_packet_hwol_tx_ip_csum(packet);
++                dp_packet_hwol_set_tx_ip_csum(packet);
+             } else if (IP_VER(ip->ip_ihl_ver) == 6) {
+                 dp_packet_hwol_set_tx_ipv6(packet);
+             }
+         }
++    }
+ 
+-        /* Attention please, tunnel inner l2 len is consist of udp header
+-         * len and tunnel header len and inner l2 len. */
+-        if (data->tnl_type == OVS_VPORT_TYPE_GENEVE) {
+-            eth = (struct eth_header *)(data->header);
+-            ip = (struct ip_header *)(eth + 1);
+-            udp = (struct udp_header *)(ip + 1);
+-            gnh = (struct genevehdr *)(udp + 1);
+-            opt_len = gnh->opt_len * 4;
+-            dp_packet_hwol_set_tunnel_geneve(packet);
+-            dp_packet_set_l2_len(packet, (char *) dp_packet_l3(packet) -
+-                                         (char *) dp_packet_eth(packet) +
+-                                         GENEVE_BASE_HLEN + opt_len);
+-        } else if (data->tnl_type == OVS_VPORT_TYPE_VXLAN) {
+-            dp_packet_hwol_set_tunnel_vxlan(packet);
+-            dp_packet_set_l2_len(packet, (char *) dp_packet_l3(packet) -
+-                                         (char *) dp_packet_eth(packet) +
+-                                         VXLAN_HLEN);
+-        }
+-    } else {
+-        /* Mark non-l4 packets as tunneled. */
+-        if (data->tnl_type == OVS_VPORT_TYPE_GENEVE) {
+-            dp_packet_hwol_set_tunnel_geneve(packet);
+-        } else if (data->tnl_type == OVS_VPORT_TYPE_VXLAN) {
+-            dp_packet_hwol_set_tunnel_vxlan(packet);
+-        }
++    if (data->tnl_type == OVS_VPORT_TYPE_GENEVE) {
++        dp_packet_hwol_set_tunnel_geneve(packet);
++    } else if (data->tnl_type == OVS_VPORT_TYPE_VXLAN) {
++        dp_packet_hwol_set_tunnel_vxlan(packet);
+     }
+ }
+ 
+@@ -932,9 +892,9 @@ netdev_srv6_build_header(const struct netdev *netdev,
+                          const struct netdev_tnl_build_header_params *params)
+ {
+     const struct netdev_tunnel_config *tnl_cfg;
++    union ovs_16aligned_in6_addr *s;
+     const struct in6_addr *segs;
+     struct srv6_base_hdr *srh;
+-    struct in6_addr *s;
+     ovs_be16 dl_type;
+     int nr_segs;
+     int i;
+@@ -978,8 +938,7 @@ netdev_srv6_build_header(const struct netdev *netdev,
+         return EOPNOTSUPP;
+     }
+ 
+-    s = ALIGNED_CAST(struct in6_addr *,
+-                     (char *) srh + sizeof *srh);
++    s = (union ovs_16aligned_in6_addr *) (srh + 1);
+     for (i = 0; i < nr_segs; i++) {
+         /* Segment list is written to the header in reverse order. */
+         memcpy(s, &segs[nr_segs - i - 1], sizeof *s);
+@@ -1068,7 +1027,10 @@ netdev_srv6_pop_header(struct dp_packet *packet)
+     }
+ 
+     pkt_metadata_init_tnl(md);
+-    netdev_tnl_ip_extract_tnl_md(packet, tnl, &hlen);
++    if (!netdev_tnl_ip_extract_tnl_md(packet, tnl, &hlen)) {
++        goto err;
++    }
++
+     dp_packet_reset_packet(packet, hlen);
+ 
+     return packet;
+diff --git a/lib/netdev-offload-tc.c b/lib/netdev-offload-tc.c
+index 921d523177..3be1c08d24 100644
+--- a/lib/netdev-offload-tc.c
++++ b/lib/netdev-offload-tc.c
+@@ -400,6 +400,8 @@ get_next_available_prio(ovs_be16 protocol)
+             return TC_RESERVED_PRIORITY_IPV4;
+         } else if (protocol == htons(ETH_P_IPV6)) {
+             return TC_RESERVED_PRIORITY_IPV6;
++        } else if (protocol == htons(ETH_P_8021Q)) {
++            return TC_RESERVED_PRIORITY_VLAN;
+         }
+     }
+ 
+diff --git a/lib/netlink-notifier.c b/lib/netlink-notifier.c
+index dfecb97789..7ea5a41818 100644
+--- a/lib/netlink-notifier.c
++++ b/lib/netlink-notifier.c
+@@ -223,7 +223,7 @@ nln_wait(struct nln *nln)
+     }
+ }
+ 
+-void
++void OVS_NO_SANITIZE_FUNCTION
+ nln_report(const struct nln *nln, void *change, int group)
+ {
+     struct nln_notifier *notifier;
+diff --git a/lib/netlink-protocol.h b/lib/netlink-protocol.h
+index 6eaa7035a4..e4bb28ac9f 100644
+--- a/lib/netlink-protocol.h
++++ b/lib/netlink-protocol.h
+@@ -155,6 +155,11 @@ enum {
+ #define NLA_TYPE_MASK       ~(NLA_F_NESTED | NLA_F_NET_BYTEORDER)
+ #endif
+ 
++/* Introduced in v4.4. */
++#ifndef NLM_F_DUMP_FILTERED
++#define NLM_F_DUMP_FILTERED 0x20
++#endif
++
+ /* These were introduced all together in 2.6.14.  (We want our programs to
+  * support the newer kernel features even if compiled with older headers.) */
+ #ifndef NETLINK_ADD_MEMBERSHIP
+@@ -168,6 +173,11 @@ enum {
+ #define NETLINK_LISTEN_ALL_NSID 8
+ #endif
+ 
++/* Strict checking of netlink arguments introduced in Linux kernel v4.20. */
++#ifndef NETLINK_GET_STRICT_CHK
++#define NETLINK_GET_STRICT_CHK 12
++#endif
++
+ /* These were introduced all together in 2.6.23.  (We want our programs to
+  * support the newer kernel features even if compiled with older headers.) */
+ #ifndef CTRL_ATTR_MCAST_GRP_MAX
+diff --git a/lib/netlink-socket.c b/lib/netlink-socket.c
+index 80da20d9f0..5cb1fc89ae 100644
+--- a/lib/netlink-socket.c
++++ b/lib/netlink-socket.c
+@@ -205,6 +205,15 @@ nl_sock_create(int protocol, struct nl_sock **sockp)
+         }
+     }
+ 
++    /* Strict checking only supported for NETLINK_ROUTE. */
++    if (protocol == NETLINK_ROUTE
++        && setsockopt(sock->fd, SOL_NETLINK, NETLINK_GET_STRICT_CHK,
++                      &one, sizeof one) < 0) {
++        VLOG_RL(&rl, errno == ENOPROTOOPT ? VLL_DBG : VLL_WARN,
++                "netlink: could not enable strict checking (%s)",
++                ovs_strerror(errno));
++    }
++
+     retval = get_socket_rcvbuf(sock->fd);
+     if (retval < 0) {
+         retval = -retval;
+diff --git a/lib/odp-execute-avx512.c b/lib/odp-execute-avx512.c
+index 50c48bfd47..09eb685cba 100644
+--- a/lib/odp-execute-avx512.c
++++ b/lib/odp-execute-avx512.c
+@@ -366,6 +366,8 @@ avx512_get_delta(__m256i old_header, __m256i new_header)
+                                           0xF, 0xF, 0xF, 0xF);
+     v_delta = _mm256_permutexvar_epi32(v_swap32a, v_delta);
+ 
++    v_delta = _mm256_hadd_epi32(v_delta, v_zeros);
++    v_delta = _mm256_shuffle_epi8(v_delta, v_swap16a);
+     v_delta = _mm256_hadd_epi32(v_delta, v_zeros);
+     v_delta = _mm256_hadd_epi16(v_delta, v_zeros);
+ 
+@@ -471,7 +473,7 @@ action_avx512_ipv4_set_addrs(struct dp_packet_batch *batch,
+          * (v_pkt_masked). */
+         __m256i v_new_hdr = _mm256_or_si256(v_key_shuf, v_pkt_masked);
+ 
+-        if (dp_packet_hwol_tx_ip_csum(packet)) {
++        if (dp_packet_hwol_l3_ipv4(packet)) {
+             dp_packet_ol_reset_ip_csum_good(packet);
+         } else {
+             ovs_be16 old_csum = ~nh->ip_csum;
+@@ -575,6 +577,9 @@ avx512_ipv6_sum_header(__m512i ip6_header)
+                                           0xF, 0xF, 0xF, 0xF);
+ 
+     v_delta = _mm256_permutexvar_epi32(v_swap32a, v_delta);
++
++    v_delta = _mm256_hadd_epi32(v_delta, v_zeros);
++    v_delta = _mm256_shuffle_epi8(v_delta, v_swap16a);
+     v_delta = _mm256_hadd_epi32(v_delta, v_zeros);
+     v_delta = _mm256_hadd_epi16(v_delta, v_zeros);
+ 
+@@ -736,6 +741,14 @@ action_avx512_set_ipv6(struct dp_packet_batch *batch, const struct nlattr *a)
+         }
+         /* Write back the modified IPv6 addresses. */
+         _mm512_mask_storeu_epi64((void *) nh, 0x1F, v_new_hdr);
++
++        /* Scalar method for setting IPv6 tclass field. */
++        if (key->ipv6_tclass) {
++            uint8_t old_tc = ntohl(get_16aligned_be32(&nh->ip6_flow)) >> 20;
++            uint8_t key_tc = key->ipv6_tclass | (old_tc & ~mask->ipv6_tclass);
++
++            packet_set_ipv6_tc(&nh->ip6_flow, key_tc);
++        }
+     }
+ }
+ #endif /* HAVE_AVX512VBMI */
+diff --git a/lib/odp-util.c b/lib/odp-util.c
+index 9306c9b4d4..5e4f34cf74 100644
+--- a/lib/odp-util.c
++++ b/lib/odp-util.c
+@@ -1797,8 +1797,8 @@ ovs_parse_tnl_push(const char *s, struct ovs_action_push_tnl *data)
+     } else if (ovs_scan_len(s, &n, "srv6(segments_left=%"SCNu8,
+                             &segments_left)) {
+         struct srv6_base_hdr *srh = (struct srv6_base_hdr *) (ip6 + 1);
++        union ovs_16aligned_in6_addr *segs;
+         char seg_s[IPV6_SCAN_LEN + 1];
+-        struct in6_addr *segs;
+         struct in6_addr seg;
+         uint8_t n_segs = 0;
+ 
+@@ -1821,7 +1821,7 @@ ovs_parse_tnl_push(const char *s, struct ovs_action_push_tnl *data)
+             return -EINVAL;
+         }
+ 
+-        segs = ALIGNED_CAST(struct in6_addr *, srh + 1);
++        segs = (union ovs_16aligned_in6_addr *) (srh + 1);
+         segs += segments_left;
+ 
+         while (ovs_scan_len(s, &n, IPV6_SCAN_FMT, seg_s)
+diff --git a/lib/ofp-prop.c b/lib/ofp-prop.c
+index 0a685750c1..0e54543bdd 100644
+--- a/lib/ofp-prop.c
++++ b/lib/ofp-prop.c
+@@ -21,6 +21,7 @@
+ #include "openvswitch/ofp-errors.h"
+ #include "openvswitch/ofp-prop.h"
+ #include "openvswitch/vlog.h"
++#include "unaligned.h"
+ #include "util.h"
+ #include "uuid.h"
+ 
+@@ -190,11 +191,12 @@ ofpprop_parse_be64(const struct ofpbuf *property, ovs_be64 *value)
+ enum ofperr
+ ofpprop_parse_be128(const struct ofpbuf *property, ovs_be128 *value)
+ {
+-    ovs_be128 *p = property->msg;
++    ovs_32aligned_be128 *p = property->msg;
++
+     if (ofpbuf_msgsize(property) != sizeof *p) {
+         return OFPERR_OFPBPC_BAD_LEN;
+     }
+-    *value = *p;
++    *value = get_32aligned_be128(p);
+     return 0;
+ }
+ 
+@@ -270,12 +272,13 @@ ofpprop_parse_u64(const struct ofpbuf *property, uint64_t *value)
+ enum ofperr
+ ofpprop_parse_u128(const struct ofpbuf *property, ovs_u128 *value)
+ {
+-    ovs_be128 *p = property->msg;
+-    if (ofpbuf_msgsize(property) != sizeof *p) {
+-        return OFPERR_OFPBPC_BAD_LEN;
++    enum ofperr error = ofpprop_parse_be128(property, (ovs_be128 *) value);
++
++    if (!error) {
++        *value = ntoh128(*(ovs_be128 *) value);
+     }
+-    *value = ntoh128(*p);
+-    return 0;
++
++    return error;
+ }
+ 
+ /* Attempts to parse 'property' as a property containing a UUID.  If
+diff --git a/lib/ofpbuf.c b/lib/ofpbuf.c
+index d3d42b4148..232ebeb97b 100644
+--- a/lib/ofpbuf.c
++++ b/lib/ofpbuf.c
+@@ -197,12 +197,12 @@ ofpbuf_clone_with_headroom(const struct ofpbuf *b, size_t headroom)
+     struct ofpbuf *new_buffer;
+ 
+     new_buffer = ofpbuf_clone_data_with_headroom(b->data, b->size, headroom);
+-    if (b->header) {
++    if (new_buffer->data && b->header) {
+         ptrdiff_t header_offset = (char *) b->header - (char *) b->data;
+ 
+         new_buffer->header = (char *) new_buffer->data + header_offset;
+     }
+-    if (b->msg) {
++    if (new_buffer->data && b->msg) {
+         ptrdiff_t msg_offset = (char *) b->msg - (char *) b->data;
+ 
+         new_buffer->msg = (char *) new_buffer->data + msg_offset;
+diff --git a/lib/ovs-rcu.c b/lib/ovs-rcu.c
+index 9e07d9bab6..49afcc55c9 100644
+--- a/lib/ovs-rcu.c
++++ b/lib/ovs-rcu.c
+@@ -326,7 +326,7 @@ ovsrcu_postpone__(void (*function)(void *aux), void *aux)
+     cb->aux = aux;
+ }
+ 
+-static bool
++static bool OVS_NO_SANITIZE_FUNCTION
+ ovsrcu_call_postponed(void)
+ {
+     struct ovsrcu_cbset *cbset;
+diff --git a/lib/ovs-router.c b/lib/ovs-router.c
+index ca014d80ed..3d84c9a30a 100644
+--- a/lib/ovs-router.c
++++ b/lib/ovs-router.c
+@@ -330,6 +330,20 @@ ovs_router_insert(uint32_t mark, const struct in6_addr *ip_dst, uint8_t plen,
+     }
+ }
+ 
++/* The same as 'ovs_router_insert', but it adds the route even if updates
++ * from the system routing table are disabled.  Used for unit tests. */
++void
++ovs_router_force_insert(uint32_t mark, const struct in6_addr *ip_dst,
++                        uint8_t plen, bool local, const char output_bridge[],
++                        const struct in6_addr *gw,
++                        const struct in6_addr *prefsrc)
++{
++    uint8_t priority = local ? plen + 64 : plen;
++
++    ovs_router_insert__(mark, priority, local, ip_dst, plen,
++                        output_bridge, gw, prefsrc);
++}
++
+ static void
+ rt_entry_delete__(const struct cls_rule *cr)
+ {
+diff --git a/lib/ovs-router.h b/lib/ovs-router.h
+index eb4ff85d9e..d7dc7e55f3 100644
+--- a/lib/ovs-router.h
++++ b/lib/ovs-router.h
+@@ -34,6 +34,11 @@ void ovs_router_insert(uint32_t mark, const struct in6_addr *ip_dst,
+                        uint8_t plen, bool local,
+                        const char output_bridge[], const struct in6_addr *gw,
+                        const struct in6_addr *prefsrc);
++void ovs_router_force_insert(uint32_t mark, const struct in6_addr *ip_dst,
++                             uint8_t plen, bool local,
++                             const char output_bridge[],
++                             const struct in6_addr *gw,
++                             const struct in6_addr *prefsrc);
+ void ovs_router_flush(void);
+ 
+ void ovs_router_disable_system_routing_table(void);
+diff --git a/lib/ovsdb-error.c b/lib/ovsdb-error.c
+index 9ad42b232d..56512fc28d 100644
+--- a/lib/ovsdb-error.c
++++ b/lib/ovsdb-error.c
+@@ -146,7 +146,7 @@ ovsdb_internal_error(struct ovsdb_error *inner_error,
+         ds_put_char(&ds, ')');
+     }
+ 
+-    ds_put_format(&ds, " (%s %s)", program_name, VERSION);
++    ds_put_format(&ds, " (%s %s)", program_name, VERSION VERSION_SUFFIX);
+ 
+     if (inner_error) {
+         char *s = ovsdb_error_to_string_free(inner_error);
+diff --git a/lib/ovsdb-idl.c b/lib/ovsdb-idl.c
+index ba720474b6..d92df28d19 100644
+--- a/lib/ovsdb-idl.c
++++ b/lib/ovsdb-idl.c
+@@ -3783,6 +3783,8 @@ ovsdb_idl_txn_delete(const struct ovsdb_idl_row *row_)
+     ovsdb_idl_remove_from_indexes(row_);
+     if (!row->old_datum) {
+         ovsdb_idl_row_unparse(row);
++        ovsdb_idl_destroy_all_map_op_lists(row);
++        ovsdb_idl_destroy_all_set_op_lists(row);
+         ovsdb_idl_row_clear_new(row);
+         ovs_assert(!row->prereqs);
+         hmap_remove(&row->table->rows, &row->hmap_node);
+diff --git a/lib/packets.c b/lib/packets.c
+index 5803d26f4a..edac30b77b 100644
+--- a/lib/packets.c
++++ b/lib/packets.c
+@@ -1299,7 +1299,7 @@ packet_set_ipv6_flow_label(ovs_16aligned_be32 *flow_label, ovs_be32 flow_key)
+     put_16aligned_be32(flow_label, new_label);
+ }
+ 
+-static void
++void
+ packet_set_ipv6_tc(ovs_16aligned_be32 *flow_label, uint8_t tc)
+ {
+     ovs_be32 old_label = get_16aligned_be32(flow_label);
+diff --git a/lib/packets.h b/lib/packets.h
+index 8b6994809f..a102f81634 100644
+--- a/lib/packets.h
++++ b/lib/packets.h
+@@ -1635,6 +1635,7 @@ void packet_set_ipv6_addr(struct dp_packet *packet, uint8_t proto,
+                           bool recalculate_csum);
+ void packet_set_ipv6_flow_label(ovs_16aligned_be32 *flow_label,
+                                 ovs_be32 flow_key);
++void packet_set_ipv6_tc(ovs_16aligned_be32 *flow_label, uint8_t tc);
+ void packet_set_tcp_port(struct dp_packet *, ovs_be16 src, ovs_be16 dst);
+ void packet_set_udp_port(struct dp_packet *, ovs_be16 src, ovs_be16 dst);
+ void packet_set_sctp_port(struct dp_packet *, ovs_be16 src, ovs_be16 dst);
+diff --git a/lib/route-table.c b/lib/route-table.c
+index 9927dcc185..c6cb21394a 100644
+--- a/lib/route-table.c
++++ b/lib/route-table.c
+@@ -26,6 +26,7 @@
+ #include <linux/rtnetlink.h>
+ #include <net/if.h>
+ 
++#include "coverage.h"
+ #include "hash.h"
+ #include "netdev.h"
+ #include "netlink.h"
+@@ -44,6 +45,8 @@
+ 
+ VLOG_DEFINE_THIS_MODULE(route_table);
+ 
++COVERAGE_DEFINE(route_table_dump);
++
+ struct route_data {
+     /* Copied from struct rtmsg. */
+     unsigned char rtm_dst_len;
+@@ -80,9 +83,9 @@ static struct nln_notifier *name_notifier = NULL;
+ 
+ static bool route_table_valid = false;
+ 
+-static int route_table_reset(void);
++static void route_table_reset(void);
+ static void route_table_handle_msg(const struct route_table_msg *);
+-static int route_table_parse(struct ofpbuf *, struct route_table_msg *);
++static int route_table_parse(struct ofpbuf *, void *change);
+ static void route_table_change(const struct route_table_msg *, void *);
+ static void route_map_clear(void);
+ 
+@@ -107,8 +110,7 @@ route_table_init(void)
+     ovs_assert(!route6_notifier);
+ 
+     ovs_router_init();
+-    nln = nln_create(NETLINK_ROUTE, (nln_parse_func *) route_table_parse,
+-                     &rtmsg);
++    nln = nln_create(NETLINK_ROUTE, route_table_parse, &rtmsg);
+ 
+     route_notifier =
+         nln_notifier_create(nln, RTNLGRP_IPV4_ROUTE,
+@@ -153,26 +155,22 @@ route_table_wait(void)
+     ovs_mutex_unlock(&route_table_mutex);
+ }
+ 
+-static int
+-route_table_reset(void)
++static bool
++route_table_dump_one_table(unsigned char id)
+ {
+-    struct nl_dump dump;
+-    struct rtgenmsg *rtgenmsg;
+     uint64_t reply_stub[NL_DUMP_BUFSIZE / 8];
+     struct ofpbuf request, reply, buf;
+-
+-    route_map_clear();
+-    netdev_get_addrs_list_flush();
+-    route_table_valid = true;
+-    rt_change_seq++;
++    struct rtmsg *rq_msg;
++    bool filtered = true;
++    struct nl_dump dump;
+ 
+     ofpbuf_init(&request, 0);
+ 
+-    nl_msg_put_nlmsghdr(&request, sizeof *rtgenmsg, RTM_GETROUTE,
+-                        NLM_F_REQUEST);
++    nl_msg_put_nlmsghdr(&request, sizeof *rq_msg, RTM_GETROUTE, NLM_F_REQUEST);
+ 
+-    rtgenmsg = ofpbuf_put_zeros(&request, sizeof *rtgenmsg);
+-    rtgenmsg->rtgen_family = AF_UNSPEC;
++    rq_msg = ofpbuf_put_zeros(&request, sizeof *rq_msg);
++    rq_msg->rtm_family = AF_UNSPEC;
++    rq_msg->rtm_table = id;
+ 
+     nl_dump_start(&dump, NETLINK_ROUTE, &request);
+     ofpbuf_uninit(&request);
+@@ -182,19 +180,51 @@ route_table_reset(void)
+         struct route_table_msg msg;
+ 
+         if (route_table_parse(&reply, &msg)) {
++            struct nlmsghdr *nlmsghdr = nl_msg_nlmsghdr(&reply);
++
++            /* Older kernels do not support filtering. */
++            if (!(nlmsghdr->nlmsg_flags & NLM_F_DUMP_FILTERED)) {
++                filtered = false;
++            }
+             route_table_handle_msg(&msg);
+         }
+     }
+     ofpbuf_uninit(&buf);
++    nl_dump_done(&dump);
++
++    return filtered;
++}
++
++static void
++route_table_reset(void)
++{
++    unsigned char tables[] = {
++        RT_TABLE_DEFAULT,
++        RT_TABLE_MAIN,
++        RT_TABLE_LOCAL,
++    };
+ 
+-    return nl_dump_done(&dump);
++    route_map_clear();
++    netdev_get_addrs_list_flush();
++    route_table_valid = true;
++    rt_change_seq++;
++
++    COVERAGE_INC(route_table_dump);
++
++    for (size_t i = 0; i < ARRAY_SIZE(tables); i++) {
++        if (!route_table_dump_one_table(tables[i])) {
++            /* Got unfiltered reply, no need to dump further. */
++            break;
++        }
++    }
+ }
+ 
+ /* Return RTNLGRP_IPV4_ROUTE or RTNLGRP_IPV6_ROUTE on success, 0 on parse
+  * error. */
+ static int
+-route_table_parse(struct ofpbuf *buf, struct route_table_msg *change)
++route_table_parse(struct ofpbuf *buf, void *change_)
+ {
++    struct route_table_msg *change = change_;
+     bool parsed, ipv4 = false;
+ 
+     static const struct nl_policy policy[] = {
+@@ -203,6 +233,7 @@ route_table_parse(struct ofpbuf *buf, struct route_table_msg *change)
+         [RTA_GATEWAY] = { .type = NL_A_U32, .optional = true },
+         [RTA_MARK] = { .type = NL_A_U32, .optional = true },
+         [RTA_PREFSRC] = { .type = NL_A_U32, .optional = true },
++        [RTA_TABLE] = { .type = NL_A_U32, .optional = true },
+     };
+ 
+     static const struct nl_policy policy6[] = {
+@@ -211,6 +242,7 @@ route_table_parse(struct ofpbuf *buf, struct route_table_msg *change)
+         [RTA_MARK] = { .type = NL_A_U32, .optional = true },
+         [RTA_GATEWAY] = { .type = NL_A_IPV6, .optional = true },
+         [RTA_PREFSRC] = { .type = NL_A_IPV6, .optional = true },
++        [RTA_TABLE] = { .type = NL_A_U32, .optional = true },
+     };
+ 
+     struct nlattr *attrs[ARRAY_SIZE(policy)];
+@@ -232,6 +264,7 @@ route_table_parse(struct ofpbuf *buf, struct route_table_msg *change)
+ 
+     if (parsed) {
+         const struct nlmsghdr *nlmsg;
++        uint32_t table_id;
+         int rta_oif;      /* Output interface index. */
+ 
+         nlmsg = buf->data;
+@@ -247,6 +280,19 @@ route_table_parse(struct ofpbuf *buf, struct route_table_msg *change)
+             rtm->rtm_type != RTN_LOCAL) {
+             change->relevant = false;
+         }
++
++        table_id = rtm->rtm_table;
++        if (attrs[RTA_TABLE]) {
++            table_id = nl_attr_get_u32(attrs[RTA_TABLE]);
++        }
++        /* Do not consider changes in non-standard routing tables. */
++        if (table_id
++            && table_id != RT_TABLE_DEFAULT
++            && table_id != RT_TABLE_MAIN
++            && table_id != RT_TABLE_LOCAL) {
++            change->relevant = false;
++        }
++
+         change->nlmsg_type     = nlmsg->nlmsg_type;
+         change->rd.rtm_dst_len = rtm->rtm_dst_len + (ipv4 ? 96 : 0);
+         change->rd.local = rtm->rtm_type == RTN_LOCAL;
+@@ -312,7 +358,9 @@ static void
+ route_table_change(const struct route_table_msg *change OVS_UNUSED,
+                    void *aux OVS_UNUSED)
+ {
+-    route_table_valid = false;
++    if (!change || change->relevant) {
++        route_table_valid = false;
++    }
+ }
+ 
+ static void
+diff --git a/lib/socket-util.c b/lib/socket-util.c
+index 3eb3a3816b..b3f541b6db 100644
+--- a/lib/socket-util.c
++++ b/lib/socket-util.c
+@@ -546,9 +546,15 @@ inet_parse_active(const char *target_, int default_port,
+     if (!host) {
+         VLOG_ERR("%s: host must be specified", target_);
+         ok = false;
++        if (dns_failure) {
++            *dns_failure = false;
++        }
+     } else if (!port && default_port < 0) {
+         VLOG_ERR("%s: port must be specified", target_);
+         ok = false;
++        if (dns_failure) {
++            *dns_failure = false;
++        }
+     } else {
+         ok = parse_sockaddr_components(ss, host, port, default_port,
+                                        target_, resolve_host, dns_failure);
+@@ -671,6 +677,9 @@ inet_parse_passive(const char *target_, int default_port,
+     if (!port && default_port < 0) {
+         VLOG_ERR("%s: port must be specified", target_);
+         ok = false;
++        if (dns_failure) {
++            *dns_failure = false;
++        }
+     } else {
+         ok = parse_sockaddr_components(ss, host, port, default_port,
+                                        target_, resolve_host, dns_failure);
+diff --git a/lib/table.c b/lib/table.c
+index 48d18b6518..b7addbf390 100644
+--- a/lib/table.c
++++ b/lib/table.c
+@@ -522,7 +522,7 @@ table_print_json__(const struct table *table, const struct table_style *style,
+         json_object_put_string(json, "caption", table->caption);
+     }
+     if (table->timestamp) {
+-        json_object_put_nocopy(
++        json_object_put(
+             json, "time",
+             json_string_create_nocopy(table_format_timestamp__()));
+     }
+diff --git a/lib/tc.c b/lib/tc.c
+index e9bcae4e4b..e55ba3b1bb 100644
+--- a/lib/tc.c
++++ b/lib/tc.c
+@@ -3056,17 +3056,17 @@ nl_msg_put_flower_rewrite_pedits(struct ofpbuf *request,
+                                  struct tc_action *action,
+                                  uint32_t action_pc)
+ {
+-    struct {
++    union {
+         struct tc_pedit sel;
+-        struct tc_pedit_key keys[MAX_PEDIT_OFFSETS];
+-        struct tc_pedit_key_ex keys_ex[MAX_PEDIT_OFFSETS];
+-    } sel = {
+-        .sel = {
+-            .nkeys = 0
+-        }
+-    };
++        uint8_t buffer[sizeof(struct tc_pedit)
++                       + MAX_PEDIT_OFFSETS * sizeof(struct tc_pedit_key)];
++    } sel;
++    struct tc_pedit_key_ex keys_ex[MAX_PEDIT_OFFSETS];
+     int i, j, err;
+ 
++    memset(&sel, 0, sizeof sel);
++    memset(keys_ex, 0, sizeof keys_ex);
++
+     for (i = 0; i < ARRAY_SIZE(flower_pedit_map); i++) {
+         struct flower_key_to_pedit *m = &flower_pedit_map[i];
+         struct tc_pedit_key *pedit_key = NULL;
+@@ -3100,8 +3100,8 @@ nl_msg_put_flower_rewrite_pedits(struct ofpbuf *request,
+                 return EOPNOTSUPP;
+             }
+ 
+-            pedit_key = &sel.keys[sel.sel.nkeys];
+-            pedit_key_ex = &sel.keys_ex[sel.sel.nkeys];
++            pedit_key = &sel.sel.keys[sel.sel.nkeys];
++            pedit_key_ex = &keys_ex[sel.sel.nkeys];
+             pedit_key_ex->cmd = TCA_PEDIT_KEY_EX_CMD_SET;
+             pedit_key_ex->htype = m->htype;
+             pedit_key->off = cur_offset;
+@@ -3121,7 +3121,7 @@ nl_msg_put_flower_rewrite_pedits(struct ofpbuf *request,
+             }
+         }
+     }
+-    nl_msg_put_act_pedit(request, &sel.sel, sel.keys_ex,
++    nl_msg_put_act_pedit(request, &sel.sel, keys_ex,
+                          flower->csum_update_flags ? TC_ACT_PIPE : action_pc);
+ 
+     return 0;
+diff --git a/lib/tc.h b/lib/tc.h
+index fdbcf4b7cb..8442c8d8b8 100644
+--- a/lib/tc.h
++++ b/lib/tc.h
+@@ -51,6 +51,7 @@ enum tc_flower_reserved_prio {
+     TC_RESERVED_PRIORITY_POLICE,
+     TC_RESERVED_PRIORITY_IPV4,
+     TC_RESERVED_PRIORITY_IPV6,
++    TC_RESERVED_PRIORITY_VLAN,
+     __TC_RESERVED_PRIORITY_MAX
+ };
+ #define TC_RESERVED_PRIORITY_MAX (__TC_RESERVED_PRIORITY_MAX -1)
+diff --git a/lib/util.c b/lib/util.c
+index 3fb3a4b40f..3357717ca7 100644
+--- a/lib/util.c
++++ b/lib/util.c
+@@ -224,6 +224,8 @@ xvasprintf(const char *format, va_list args)
+     size_t needed;
+     char *s;
+ 
++    ovs_assert(format);
++
+     va_copy(args2, args);
+     needed = vsnprintf(NULL, 0, format, args);
+ 
+@@ -617,12 +619,14 @@ ovs_set_program_name(const char *argv0, const char *version)
+     program_name = basename;
+ 
+     free(program_version);
+-    if (!strcmp(version, VERSION)) {
+-        program_version = xasprintf("%s (Open vSwitch) "VERSION"\n",
++    if (!strcmp(version, VERSION VERSION_SUFFIX)) {
++        program_version = xasprintf("%s (Open vSwitch) "VERSION
++                                    VERSION_SUFFIX"\n",
+                                     program_name);
+     } else {
+         program_version = xasprintf("%s %s\n"
+-                                    "Open vSwitch Library "VERSION"\n",
++                                    "Open vSwitch Library "VERSION
++                                    VERSION_SUFFIX"\n",
+                                     program_name, version);
+     }
+ }
+diff --git a/lib/vconn.c b/lib/vconn.c
+index e9603432d2..4b1c262eaa 100644
+--- a/lib/vconn.c
++++ b/lib/vconn.c
+@@ -1017,6 +1017,8 @@ recv_flow_stats_reply(struct vconn *vconn, ovs_be32 send_xid,
+                 VLOG_WARN_RL(&rl, "received bad reply: %s",
+                              ofp_to_string(reply->data, reply->size,
+                                            NULL, NULL, 1));
++                ofpbuf_delete(reply);
++                *replyp = NULL;
+                 return EPROTO;
+             }
+         }
+@@ -1031,9 +1033,9 @@ recv_flow_stats_reply(struct vconn *vconn, ovs_be32 send_xid,
+         case EOF:
+             more = ofpmp_more(reply->header);
+             ofpbuf_delete(reply);
++            *replyp = NULL;
+             reply = NULL;
+             if (!more) {
+-                *replyp = NULL;
+                 return EOF;
+             }
+             break;
+@@ -1041,6 +1043,8 @@ recv_flow_stats_reply(struct vconn *vconn, ovs_be32 send_xid,
+         default:
+             VLOG_WARN_RL(&rl, "parse error in reply (%s)",
+                          ofperr_to_string(retval));
++            ofpbuf_delete(reply);
++            *replyp = NULL;
+             return EPROTO;
+         }
+     }
+diff --git a/lib/vlog.c b/lib/vlog.c
+index b2653142f3..59b524b097 100644
+--- a/lib/vlog.c
++++ b/lib/vlog.c
+@@ -29,6 +29,7 @@
+ #include <time.h>
+ #include <unistd.h>
+ #include "async-append.h"
++#include "backtrace.h"
+ #include "coverage.h"
+ #include "dirs.h"
+ #include "openvswitch/dynamic-string.h"
+@@ -410,10 +411,10 @@ vlog_set_log_file__(char *new_log_file_name)
+ 
+     /* Close old log file, if any. */
+     ovs_mutex_lock(&log_file_mutex);
++    async_append_destroy(log_writer);
+     if (log_fd >= 0) {
+         close(log_fd);
+     }
+-    async_append_destroy(log_writer);
+     free(log_file_name);
+ 
+     /* Install new log file. */
+@@ -1274,8 +1275,9 @@ vlog_fatal(const struct vlog_module *module, const char *message, ...)
+     va_end(args);
+ }
+ 
+-/* Logs 'message' to 'module' at maximum verbosity, then calls abort().  Always
+- * writes the message to stderr, even if the console destination is disabled.
++/* Attempts to log a stack trace, logs 'message' to 'module' at maximum
++ * verbosity, then calls abort().  Always writes the message to stderr, even
++ * if the console destination is disabled.
+  *
+  * Choose this function instead of vlog_fatal_valist() if the daemon monitoring
+  * facility should automatically restart the current daemon.  */
+@@ -1289,6 +1291,10 @@ vlog_abort_valist(const struct vlog_module *module_,
+      * message written by the later ovs_abort_valist(). */
+     module->levels[VLF_CONSOLE] = VLL_OFF;
+ 
++    /* Printing the stack trace before the 'message', because the 'message'
++     * will flush the async log queue (VLL_EMER).  With a different order we
++     * would need to flush the queue manually again. */
++    log_backtrace();
+     vlog_valist(module, VLL_EMER, message, args);
+     ovs_abort_valist(0, message, args);
+ }
+diff --git a/m4/ax_check_openssl.m4 b/m4/ax_check_openssl.m4
+index 281d4dc65e..faa5babde2 100644
+--- a/m4/ax_check_openssl.m4
++++ b/m4/ax_check_openssl.m4
+@@ -81,7 +81,8 @@ AC_DEFUN([AX_CHECK_OPENSSL], [
+                 SSL_INCLUDES="-I$ssldir/include"
+                 SSL_LDFLAGS="-L$ssldir/lib"
+                 if test "$WIN32" = "yes"; then
+-                    SSL_LIBS="-lssleay32 -llibeay32"
++                    SSL_LDFLAGS="$SSL_LDFLAGS -L$ssldir/lib/VC/x64/MT"
++                    SSL_LIBS="-llibssl -llibcrypto"
+                     SSL_DIR=/$(echo ${ssldir} | ${SED} -e 's/://')
+                 else
+                     SSL_LIBS="-lssl -lcrypto"
+diff --git a/ofproto/bond.c b/ofproto/bond.c
+index cfdf44f854..45a36fabb9 100644
+--- a/ofproto/bond.c
++++ b/ofproto/bond.c
+@@ -186,13 +186,14 @@ static struct bond_member *choose_output_member(const struct bond *,
+                                                 struct flow_wildcards *,
+                                                 uint16_t vlan)
+     OVS_REQ_RDLOCK(rwlock);
+-static void update_recirc_rules__(struct bond *);
++static void update_recirc_rules(struct bond *) OVS_REQ_WRLOCK(rwlock);
+ static bool bond_may_recirc(const struct bond *);
+ static void bond_update_post_recirc_rules__(struct bond *, bool force)
+     OVS_REQ_WRLOCK(rwlock);
+ static bool bond_is_falling_back_to_ab(const struct bond *);
+ static void bond_add_lb_output_buckets(const struct bond *);
+ static void bond_del_lb_output_buckets(const struct bond *);
++static bool bond_is_balanced(const struct bond *bond) OVS_REQ_RDLOCK(rwlock);
+ 
+ 
+ /* Attempts to parse 's' as the name of a bond balancing mode.  If successful,
+@@ -246,7 +247,7 @@ bond_create(const struct bond_settings *s, struct ofproto_dpif *ofproto)
+     ovs_refcount_init(&bond->ref_cnt);
+     hmap_init(&bond->pr_rule_ops);
+ 
+-    bond->active_member_mac = eth_addr_zero;
++    bond->active_member_mac = s->active_member_mac;
+     bond->active_member_changed = false;
+     bond->primary = NULL;
+ 
+@@ -299,7 +300,10 @@ bond_unref(struct bond *bond)
+     }
+     free(bond->hash);
+     bond->hash = NULL;
+-    update_recirc_rules__(bond);
++
++    ovs_rwlock_wrlock(&rwlock);
++    update_recirc_rules(bond);
++    ovs_rwlock_unlock(&rwlock);
+ 
+     hmap_destroy(&bond->pr_rule_ops);
+     free(bond->primary);
+@@ -331,17 +335,8 @@ add_pr_rule(struct bond *bond, const struct match *match,
+     hmap_insert(&bond->pr_rule_ops, &pr_op->hmap_node, hash);
+ }
+ 
+-/* This function should almost never be called directly.
+- * 'update_recirc_rules()' should be called instead.  Since
+- * this function modifies 'bond->pr_rule_ops', it is only
+- * safe when 'rwlock' is held.
+- *
+- * However, when the 'bond' is the only reference in the system,
+- * calling this function avoid acquiring lock only to satisfy
+- * lock annotation. Currently, only 'bond_unref()' calls
+- * this function directly.  */
+ static void
+-update_recirc_rules__(struct bond *bond)
++update_recirc_rules(struct bond *bond) OVS_REQ_WRLOCK(rwlock)
+ {
+     struct match match;
+     struct bond_pr_rule_op *pr_op;
+@@ -407,6 +402,15 @@ update_recirc_rules__(struct bond *bond)
+ 
+                 VLOG_ERR("failed to remove post recirculation flow %s", err_s);
+                 free(err_s);
++            } else if (bond->hash) {
++                /* If the flow deletion failed, a subsequent call to
++                 * ofproto_dpif_add_internal_flow() would just modify the
++                 * flow preserving its statistics.  Therefore, only reset
++                 * the entry's byte counter if it succeeds. */
++                uint32_t hash = pr_op->match.flow.dp_hash & BOND_MASK;
++                struct bond_entry *entry = &bond->hash[hash];
++
++                entry->pr_tx_bytes = 0;
+             }
+ 
+             hmap_remove(&bond->pr_rule_ops, &pr_op->hmap_node);
+@@ -421,12 +425,6 @@ update_recirc_rules__(struct bond *bond)
+     ofpbuf_uninit(&ofpacts);
+ }
+ 
+-static void
+-update_recirc_rules(struct bond *bond)
+-    OVS_REQ_RDLOCK(rwlock)
+-{
+-    update_recirc_rules__(bond);
+-}
+ 
+ /* Updates 'bond''s overall configuration to 's'.
+  *
+@@ -552,6 +550,7 @@ bond_find_member_by_mac(const struct bond *bond, const struct eth_addr mac)
+ 
+ static void
+ bond_active_member_changed(struct bond *bond)
++    OVS_REQ_WRLOCK(rwlock)
+ {
+     if (bond->active_member) {
+         struct eth_addr mac;
+@@ -561,6 +560,9 @@ bond_active_member_changed(struct bond *bond)
+         bond->active_member_mac = eth_addr_zero;
+     }
+     bond->active_member_changed = true;
++    if (!bond_is_balanced(bond)) {
++        bond->bond_revalidate = true;
++    }
+     seq_change(connectivity_seq_get());
+ }
+ 
+@@ -1124,7 +1126,7 @@ bond_get_recirc_id_and_hash_basis(struct bond *bond, uint32_t *recirc_id,
+ /* Rebalancing. */
+ 
+ static bool
+-bond_is_balanced(const struct bond *bond) OVS_REQ_RDLOCK(rwlock)
++bond_is_balanced(const struct bond *bond)
+ {
+     return bond->rebalance_interval
+         && (bond->balance == BM_SLB || bond->balance == BM_TCP)
+@@ -1728,7 +1730,6 @@ bond_unixctl_set_active_member(struct unixctl_conn *conn,
+     }
+ 
+     if (bond->active_member != member) {
+-        bond->bond_revalidate = true;
+         bond->active_member = member;
+         VLOG_INFO("bond %s: active member is now %s",
+                   bond->name, member->name);
+diff --git a/ofproto/ofproto-dpif-mirror.c b/ofproto/ofproto-dpif-mirror.c
+index 343b75f0ed..45024580aa 100644
+--- a/ofproto/ofproto-dpif-mirror.c
++++ b/ofproto/ofproto-dpif-mirror.c
+@@ -265,7 +265,7 @@ mirror_set(struct mbridge *mbridge, void *aux, const char *name,
+     {
+         hmapx_destroy(&srcs_map);
+         hmapx_destroy(&dsts_map);
+-        return 0;
++        return ECANCELED;
+     }
+ 
+     /* XXX: Not sure if these need to be thread safe. */
+diff --git a/ofproto/ofproto-dpif-trace.c b/ofproto/ofproto-dpif-trace.c
+index b86e7fe07e..e43d9f88c9 100644
+--- a/ofproto/ofproto-dpif-trace.c
++++ b/ofproto/ofproto-dpif-trace.c
+@@ -102,7 +102,7 @@ oftrace_add_recirc_node(struct ovs_list *recirc_queue,
+     node->flow = *flow;
+     node->flow.recirc_id = recirc_id;
+     node->flow.ct_zone = zone;
+-    node->nat_act = ofn;
++    node->nat_act = ofn ? xmemdup(ofn, sizeof *ofn) : NULL;
+     node->packet = packet ? dp_packet_clone(packet) : NULL;
+ 
+     return true;
+@@ -113,6 +113,7 @@ oftrace_recirc_node_destroy(struct oftrace_recirc_node *node)
+ {
+     if (node) {
+         recirc_free_id(node->recirc_id);
++        free(node->nat_act);
+         dp_packet_delete(node->packet);
+         free(node);
+     }
+@@ -845,17 +846,35 @@ ofproto_trace(struct ofproto_dpif *ofproto, const struct flow *flow,
+               bool names)
+ {
+     struct ovs_list recirc_queue = OVS_LIST_INITIALIZER(&recirc_queue);
++    int recirculations = 0;
++
+     ofproto_trace__(ofproto, flow, packet, &recirc_queue,
+                     ofpacts, ofpacts_len, output, names);
+ 
+     struct oftrace_recirc_node *recirc_node;
+     LIST_FOR_EACH_POP (recirc_node, node, &recirc_queue) {
++        if (recirculations++ > 4096) {
++            ds_put_cstr(output, "\n\n");
++            ds_put_char_multiple(output, '=', 79);
++            ds_put_cstr(output, "\nTrace reached the recirculation limit."
++                                "  Sopping the trace here.");
++            ds_put_format(output,
++                          "\nQueued but not processed: %"PRIuSIZE
++                          " recirculations.",
++                          ovs_list_size(&recirc_queue) + 1);
++            oftrace_recirc_node_destroy(recirc_node);
++            break;
++        }
+         ofproto_trace_recirc_node(recirc_node, next_ct_states, output);
+         ofproto_trace__(ofproto, &recirc_node->flow, recirc_node->packet,
+                         &recirc_queue, ofpacts, ofpacts_len, output,
+                         names);
+         oftrace_recirc_node_destroy(recirc_node);
+     }
++    /* Destroy remaining recirculation nodes, if any. */
++    LIST_FOR_EACH_POP (recirc_node, node, &recirc_queue) {
++        oftrace_recirc_node_destroy(recirc_node);
++    }
+ }
+ 
+ void
+diff --git a/ofproto/ofproto-dpif-trace.h b/ofproto/ofproto-dpif-trace.h
+index f579a5ca46..f023b10cdf 100644
+--- a/ofproto/ofproto-dpif-trace.h
++++ b/ofproto/ofproto-dpif-trace.h
+@@ -73,7 +73,7 @@ struct oftrace_recirc_node {
+     uint32_t recirc_id;
+     struct flow flow;
+     struct dp_packet *packet;
+-    const struct ofpact_nat *nat_act;
++    struct ofpact_nat *nat_act;
+ };
+ 
+ /* A node within a next_ct_states list. */
+diff --git a/ofproto/ofproto-dpif-upcall.c b/ofproto/ofproto-dpif-upcall.c
+index b5cbeed878..f122b47f1c 100644
+--- a/ofproto/ofproto-dpif-upcall.c
++++ b/ofproto/ofproto-dpif-upcall.c
+@@ -57,8 +57,10 @@ COVERAGE_DEFINE(dumped_inconsistent_flow);
+ COVERAGE_DEFINE(dumped_new_flow);
+ COVERAGE_DEFINE(handler_duplicate_upcall);
+ COVERAGE_DEFINE(revalidate_missed_dp_flow);
++COVERAGE_DEFINE(revalidate_missing_dp_flow);
+ COVERAGE_DEFINE(ukey_dp_change);
+ COVERAGE_DEFINE(ukey_invalid_stat_reset);
++COVERAGE_DEFINE(ukey_replace_contention);
+ COVERAGE_DEFINE(upcall_flow_limit_grew);
+ COVERAGE_DEFINE(upcall_flow_limit_hit);
+ COVERAGE_DEFINE(upcall_flow_limit_kill);
+@@ -301,6 +303,7 @@ struct udpif_key {
+     uint64_t dump_seq OVS_GUARDED;            /* Tracks udpif->dump_seq. */
+     uint64_t reval_seq OVS_GUARDED;           /* Tracks udpif->reval_seq. */
+     enum ukey_state state OVS_GUARDED;        /* Tracks ukey lifetime. */
++    uint32_t missed_dumps OVS_GUARDED;        /* Missed consecutive dumps. */
+ 
+     /* 'state' debug information. */
+     unsigned int state_thread OVS_GUARDED;    /* Thread that transitions. */
+@@ -1428,8 +1431,6 @@ upcall_cb(const struct dp_packet *packet, const struct flow *flow, ovs_u128 *ufi
+     }
+ 
+     if (upcall.ukey && !ukey_install(udpif, upcall.ukey)) {
+-        static struct vlog_rate_limit rll = VLOG_RATE_LIMIT_INIT(1, 1);
+-        VLOG_WARN_RL(&rll, "upcall_cb failure: ukey installation fails");
+         error = ENOSPC;
+     }
+ out:
+@@ -1927,15 +1928,15 @@ try_ukey_replace(struct umap *umap, struct udpif_key *old_ukey,
+             transition_ukey(old_ukey, UKEY_DELETED);
+             transition_ukey(new_ukey, UKEY_VISIBLE);
+             replaced = true;
++            COVERAGE_INC(upcall_ukey_replace);
++        } else {
++            COVERAGE_INC(handler_duplicate_upcall);
+         }
+         ovs_mutex_unlock(&old_ukey->mutex);
+-    }
+-
+-    if (replaced) {
+-        COVERAGE_INC(upcall_ukey_replace);
+     } else {
+-        COVERAGE_INC(handler_duplicate_upcall);
++        COVERAGE_INC(ukey_replace_contention);
+     }
++
+     return replaced;
+ }
+ 
+@@ -2973,6 +2974,7 @@ revalidator_sweep__(struct revalidator *revalidator, bool purge)
+             /* Handler threads could be holding a ukey lock while it installs a
+              * new flow, so don't hang around waiting for access to it. */
+             if (ovs_mutex_trylock(&ukey->mutex)) {
++                COVERAGE_INC(upcall_ukey_contention);
+                 continue;
+             }
+             ukey_state = ukey->state;
+@@ -2995,6 +2997,20 @@ revalidator_sweep__(struct revalidator *revalidator, bool purge)
+                     result = revalidate_ukey(udpif, ukey, &stats, &odp_actions,
+                                              reval_seq, &recircs);
+                 }
++
++                if (ukey->dump_seq != dump_seq) {
++                    ukey->missed_dumps++;
++                    if (ukey->missed_dumps >= 4) {
++                        /* If the flow was not dumped for 4 revalidator rounds,
++                         * we can assume the datapath flow no longer exists
++                         * and the ukey should be deleted. */
++                        COVERAGE_INC(revalidate_missing_dp_flow);
++                        result = UKEY_DELETE;
++                    }
++                } else {
++                    ukey->missed_dumps = 0;
++                }
++
+                 if (result != UKEY_KEEP) {
+                     /* Clears 'recircs' if filled by revalidate_ukey(). */
+                     reval_op_init(&ops[n_ops++], result, udpif, ukey, &recircs,
+diff --git a/ofproto/ofproto-dpif-xlate.c b/ofproto/ofproto-dpif-xlate.c
+index 1cf4d5f7c9..1b5ada8305 100644
+--- a/ofproto/ofproto-dpif-xlate.c
++++ b/ofproto/ofproto-dpif-xlate.c
+@@ -677,6 +677,7 @@ static size_t count_skb_priorities(const struct xport *);
+ static bool dscp_from_skb_priority(const struct xport *, uint32_t skb_priority,
+                                    uint8_t *dscp);
+ 
++static bool xlate_resubmit_resource_check(struct xlate_ctx *);
+ static void xlate_xbridge_init(struct xlate_cfg *, struct xbridge *);
+ static void xlate_xbundle_init(struct xlate_cfg *, struct xbundle *);
+ static void xlate_xport_init(struct xlate_cfg *, struct xport *);
+@@ -3655,6 +3656,10 @@ compose_table_xlate(struct xlate_ctx *ctx, const struct xport *out_dev,
+     struct ofpact_output output;
+     struct flow flow;
+ 
++    if (!xlate_resubmit_resource_check(ctx)) {
++        return 0;
++    }
++
+     ofpact_init(&output.ofpact, OFPACT_OUTPUT, sizeof output);
+     flow_extract(packet, &flow);
+     flow.in_port.ofp_port = out_dev->ofp_port;
+@@ -3663,7 +3668,8 @@ compose_table_xlate(struct xlate_ctx *ctx, const struct xport *out_dev,
+ 
+     return ofproto_dpif_execute_actions__(xbridge->ofproto, version, &flow,
+                                           NULL, &output.ofpact, sizeof output,
+-                                          ctx->depth, ctx->resubmits, packet);
++                                          ctx->depth + 1, ctx->resubmits,
++                                          packet);
+ }
+ 
+ static void
+@@ -3815,6 +3821,8 @@ native_tunnel_output(struct xlate_ctx *ctx, const struct xport *xport,
+ 
+     if (flow->tunnel.ip_src) {
+         in6_addr_set_mapped_ipv4(&s_ip6, flow->tunnel.ip_src);
++    } else if (ipv6_addr_is_set(&flow->tunnel.ipv6_src)) {
++        s_ip6 = flow->tunnel.ipv6_src;
+     }
+ 
+     err = tnl_route_lookup_flow(ctx, flow, &d_ip6, &s_ip6, &out_dev);
+@@ -5078,10 +5086,37 @@ put_controller_user_action(struct xlate_ctx *ctx,
+                            bool dont_send, bool continuation,
+                            uint32_t recirc_id, int len,
+                            enum ofp_packet_in_reason reason,
++                           uint32_t provider_meter_id,
+                            uint16_t controller_id)
+ {
+     struct user_action_cookie cookie;
+ 
++    /* If the controller action didn't request a meter (indicated by a
++     * 'meter_id' argument other than NX_CTLR_NO_METER), see if one was
++     * configured through the "controller" virtual meter.
++     *
++     * Internally, ovs-vswitchd uses UINT32_MAX to indicate no meter is
++     * configured. */
++    uint32_t meter_id;
++    if (provider_meter_id == UINT32_MAX) {
++        meter_id = ctx->xbridge->ofproto->up.controller_meter_id;
++    } else {
++        meter_id = provider_meter_id;
++    }
++
++    size_t offset;
++    size_t ac_offset;
++    if (meter_id != UINT32_MAX) {
++        /* If controller meter is configured, generate
++         * clone(meter,userspace) action. */
++        offset = nl_msg_start_nested(ctx->odp_actions, OVS_ACTION_ATTR_SAMPLE);
++        nl_msg_put_u32(ctx->odp_actions, OVS_SAMPLE_ATTR_PROBABILITY,
++                       UINT32_MAX);
++        ac_offset = nl_msg_start_nested(ctx->odp_actions,
++                                        OVS_SAMPLE_ATTR_ACTIONS);
++        nl_msg_put_u32(ctx->odp_actions, OVS_ACTION_ATTR_METER, meter_id);
++    }
++
+     memset(&cookie, 0, sizeof cookie);
+     cookie.type = USER_ACTION_COOKIE_CONTROLLER;
+     cookie.ofp_in_port = OFPP_NONE,
+@@ -5099,6 +5134,11 @@ put_controller_user_action(struct xlate_ctx *ctx,
+     uint32_t pid = dpif_port_get_pid(ctx->xbridge->dpif, odp_port);
+     odp_put_userspace_action(pid, &cookie, sizeof cookie, ODPP_NONE,
+                              false, ctx->odp_actions, NULL);
++
++    if (meter_id != UINT32_MAX) {
++        nl_msg_end_nested(ctx->odp_actions, ac_offset);
++        nl_msg_end_nested(ctx->odp_actions, offset);
++    }
+ }
+ 
+ static void
+@@ -5143,32 +5183,6 @@ xlate_controller_action(struct xlate_ctx *ctx, int len,
+     }
+     recirc_refs_add(&ctx->xout->recircs, recirc_id);
+ 
+-    /* If the controller action didn't request a meter (indicated by a
+-     * 'meter_id' argument other than NX_CTLR_NO_METER), see if one was
+-     * configured through the "controller" virtual meter.
+-     *
+-     * Internally, ovs-vswitchd uses UINT32_MAX to indicate no meter is
+-     * configured. */
+-    uint32_t meter_id;
+-    if (provider_meter_id == UINT32_MAX) {
+-        meter_id = ctx->xbridge->ofproto->up.controller_meter_id;
+-    } else {
+-        meter_id = provider_meter_id;
+-    }
+-
+-    size_t offset;
+-    size_t ac_offset;
+-    if (meter_id != UINT32_MAX) {
+-        /* If controller meter is configured, generate clone(meter, userspace)
+-         * action. */
+-        offset = nl_msg_start_nested(ctx->odp_actions, OVS_ACTION_ATTR_SAMPLE);
+-        nl_msg_put_u32(ctx->odp_actions, OVS_SAMPLE_ATTR_PROBABILITY,
+-                       UINT32_MAX);
+-        ac_offset = nl_msg_start_nested(ctx->odp_actions,
+-                                        OVS_SAMPLE_ATTR_ACTIONS);
+-        nl_msg_put_u32(ctx->odp_actions, OVS_ACTION_ATTR_METER, meter_id);
+-    }
+-
+     /* Generate the datapath flows even if we don't send the packet-in
+      * so that debugging more closely represents normal state. */
+     bool dont_send = false;
+@@ -5176,12 +5190,7 @@ xlate_controller_action(struct xlate_ctx *ctx, int len,
+         dont_send = true;
+     }
+     put_controller_user_action(ctx, dont_send, false, recirc_id, len,
+-                               reason, controller_id);
+-
+-    if (meter_id != UINT32_MAX) {
+-        nl_msg_end_nested(ctx->odp_actions, ac_offset);
+-        nl_msg_end_nested(ctx->odp_actions, offset);
+-    }
++                               reason, provider_meter_id, controller_id);
+ }
+ 
+ /* Creates a frozen state, and allocates a unique recirc id for the given
+@@ -5233,6 +5242,7 @@ finish_freezing__(struct xlate_ctx *ctx, uint8_t table)
+         put_controller_user_action(ctx, false, true, recirc_id,
+                                    ctx->pause->max_len,
+                                    ctx->pause->reason,
++                                   ctx->pause->provider_meter_id,
+                                    ctx->pause->controller_id);
+     } else {
+         if (ctx->recirc_update_dp_hash) {
+@@ -7139,7 +7149,7 @@ reset_mirror_ctx(struct xlate_ctx *ctx, const struct flow *flow,
+ 
+         set_field = ofpact_get_SET_FIELD(a);
+         mf = set_field->field;
+-        if (mf_are_prereqs_ok(mf, flow, NULL) && !mf_is_tun_metadata(mf)) {
++        if (mf_are_prereqs_ok(mf, flow, NULL) && !mf_is_any_metadata(mf)) {
+             ctx->mirrors = 0;
+         }
+         return;
+diff --git a/ofproto/ofproto-dpif.c b/ofproto/ofproto-dpif.c
+index f59d69c4d1..f6a80f1aec 100644
+--- a/ofproto/ofproto-dpif.c
++++ b/ofproto/ofproto-dpif.c
+@@ -3669,6 +3669,16 @@ mirror_set__(struct ofproto *ofproto_, void *aux,
+                        s->n_dsts, s->src_vlans,
+                        bundle_lookup(ofproto, s->out_bundle),
+                        s->snaplen, s->out_vlan);
++
++    if (!error) {
++        ofproto->backer->need_revalidate = REV_RECONFIGURE;
++    } else if (error == ECANCELED) {
++        /* The user requested a change that is identical to the current state,
++         * the reconfiguration is canceled, but don't log an error message
++         * about that. */
++        error = 0;
++    }
++
+     free(srcs);
+     free(dsts);
+     return error;
+@@ -3904,15 +3914,21 @@ port_query_by_name(const struct ofproto *ofproto_, const char *devname,
+     int error;
+ 
+     if (sset_contains(&ofproto->ghost_ports, devname)) {
+-        const char *type = netdev_get_type_from_name(devname);
+-
+         /* We may be called before ofproto->up.port_by_name is populated with
+          * the appropriate ofport.  For this reason, we must get the name and
+-         * type from the netdev layer directly. */
+-        if (type) {
+-            const struct ofport *ofport;
++         * type from the netdev layer directly.
++         * However, when a port deleted, the corresponding netdev is also
++         * removed from netdev_shash. netdev_get_type_from_name returns NULL
++         * in such case and we should try to get type from ofport->netdev. */
++        const char *type = netdev_get_type_from_name(devname);
++        const struct ofport *ofport =
++                        shash_find_data(&ofproto->up.port_by_name, devname);
+ 
+-            ofport = shash_find_data(&ofproto->up.port_by_name, devname);
++        if (!type && ofport && ofport->netdev) {
++            type = netdev_get_type(ofport->netdev);
++        }
++
++        if (type) {
+             ofproto_port->ofp_port = ofport ? ofport->ofp_port : OFPP_NONE;
+             ofproto_port->name = xstrdup(devname);
+             ofproto_port->type = xstrdup(type);
+@@ -5150,8 +5166,10 @@ group_setup_dp_hash_table(struct group_dpif *group, size_t max_hash)
+              min_weight, total_weight);
+ 
+     uint64_t min_slots = DIV_ROUND_UP(total_weight, min_weight);
+-    uint64_t min_slots2 = ROUND_UP_POW2(min_slots);
+-    uint64_t n_hash = MAX(16, min_slots2);
++    uint64_t min_slots2 =
++        MAX(min_slots, MIN(n_buckets * 4, MAX_SELECT_GROUP_HASH_VALUES));
++    uint64_t min_slots3 = ROUND_UP_POW2(min_slots2);
++    uint64_t n_hash = MAX(16, min_slots3);
+     if (n_hash > MAX_SELECT_GROUP_HASH_VALUES ||
+         (max_hash != 0 && n_hash > max_hash)) {
+         VLOG_DBG("  Too many hash values required: %"PRIu64, n_hash);
+diff --git a/ovsdb/automake.mk b/ovsdb/automake.mk
+index eba713bb6d..d484fe9deb 100644
+--- a/ovsdb/automake.mk
++++ b/ovsdb/automake.mk
+@@ -114,11 +114,13 @@ $(OVSIDL_BUILT): ovsdb/ovsdb-idlc.in python/ovs/dirs.py
+ 
+ # ovsdb-doc
+ EXTRA_DIST += ovsdb/ovsdb-doc
++FLAKE8_PYFILES += ovsdb/ovsdb-doc
+ OVSDB_DOC = $(run_python) $(srcdir)/ovsdb/ovsdb-doc
+ ovsdb/ovsdb-doc: python/ovs/dirs.py
+ 
+ # ovsdb-dot
+ EXTRA_DIST += ovsdb/ovsdb-dot.in ovsdb/dot2pic
++FLAKE8_PYFILES += ovsdb/ovsdb-dot.in ovsdb/dot2pic
+ noinst_SCRIPTS += ovsdb/ovsdb-dot
+ CLEANFILES += ovsdb/ovsdb-dot
+ OVSDB_DOT = $(run_python) $(srcdir)/ovsdb/ovsdb-dot.in
+diff --git a/ovsdb/dot2pic b/ovsdb/dot2pic
+index 2f858e19d5..3db6444de6 100755
+--- a/ovsdb/dot2pic
++++ b/ovsdb/dot2pic
+@@ -17,6 +17,7 @@
+ import getopt
+ import sys
+ 
++
+ def dot2pic(src, dst):
+     scale = 1.0
+     while True:
+@@ -49,8 +50,8 @@ def dot2pic(src, dst):
+                 dst.write("box at %f,%f wid %f height %f\n"
+                           % (x, y, width, height))
+         elif command == 'edge':
+-            tail = words[1]
+-            head = words[2]
++            # tail = words[1]
++            # head = words[2]
+             n = int(words[3])
+ 
+             # Extract x,y coordinates.
+@@ -114,4 +115,3 @@ else:
+ if font_scale:
+     print(".ps %+d" % font_scale)
+ print(".PE")
+-
+diff --git a/ovsdb/ovsdb-client.c b/ovsdb/ovsdb-client.c
+index 7249805bab..cf2ecfd08a 100644
+--- a/ovsdb/ovsdb-client.c
++++ b/ovsdb/ovsdb-client.c
+@@ -451,8 +451,9 @@ usage(void)
+            "    wait until DATABASE reaches STATE "
+            "(\"added\" or \"connected\" or \"removed\")\n"
+            "    in DATBASE on SERVER.\n"
+-           "\n  dump [SERVER] [DATABASE]\n"
+-           "    dump contents of DATABASE on SERVER to stdout\n"
++           "\n  dump [SERVER] [DATABASE] [TABLE]\n"
++           "    dump contents of TABLE (or all tables) in DATABASE on SERVER\n"
++           "    to stdout\n"
+            "\n  backup [SERVER] [DATABASE] > SNAPSHOT\n"
+            "    dump database contents in the form of a database file\n"
+            "\n  [--force] restore [SERVER] [DATABASE] < SNAPSHOT\n"
+diff --git a/ovsdb/ovsdb-doc b/ovsdb/ovsdb-doc
+index 099770d253..2edf487a28 100755
+--- a/ovsdb/ovsdb-doc
++++ b/ovsdb/ovsdb-doc
+@@ -14,9 +14,7 @@
+ # See the License for the specific language governing permissions and
+ # limitations under the License.
+ 
+-from datetime import date
+ import getopt
+-import os
+ import sys
+ import xml.dom.minidom
+ 
+@@ -24,10 +22,13 @@ import ovs.json
+ from ovs.db import error
+ import ovs.db.schema
+ 
+-from ovs_build_helpers.nroff import *
++from ovs_build_helpers.nroff import block_xml_to_nroff
++from ovs_build_helpers.nroff import escape_nroff_literal
++from ovs_build_helpers.nroff import text_to_nroff
+ 
+ argv0 = sys.argv[0]
+ 
++
+ def typeAndConstraintsToNroff(column):
+     type = column.type.toEnglish(escape_nroff_literal)
+     constraints = column.type.constraintsToEnglish(escape_nroff_literal,
+@@ -38,6 +39,7 @@ def typeAndConstraintsToNroff(column):
+         type += " (must be unique within table)"
+     return type
+ 
++
+ def columnGroupToNroff(table, groupXml, documented_columns):
+     introNodes = []
+     columnNodes = []
+@@ -49,7 +51,10 @@ def columnGroupToNroff(table, groupXml, documented_columns):
+             if (columnNodes
+                 and not (node.nodeType == node.TEXT_NODE
+                          and node.data.isspace())):
+-                raise error.Error("text follows <column> or <group> inside <group>: %s" % node)
++                raise error.Error(
++                    "text follows <column> or <group> inside <group>: %s"
++                    % node
++                )
+             introNodes += [node]
+ 
+     summary = []
+@@ -65,15 +70,9 @@ def columnGroupToNroff(table, groupXml, documented_columns):
+                 if node.hasAttribute('type'):
+                     type_string = node.attributes['type'].nodeValue
+                     type_json = ovs.json.from_string(str(type_string))
+-                    # py2 -> py3 means str -> bytes and unicode -> str
+-                    try:
+-                        if type(type_json) in (str, unicode):
+-                            raise error.Error("%s %s:%s has invalid 'type': %s" 
+-                                              % (table.name, name, key, type_json))
+-                    except:
+-                        if type(type_json) in (bytes, str):
+-                            raise error.Error("%s %s:%s has invalid 'type': %s" 
+-                                              % (table.name, name, key, type_json))
++                    if type(type_json) in (bytes, str):
++                        raise error.Error("%s %s:%s has invalid 'type': %s"
++                                          % (table.name, name, key, type_json))
+                     type_ = ovs.db.types.BaseType.from_json(type_json)
+                 else:
+                     type_ = column.type.value
+@@ -91,10 +90,11 @@ def columnGroupToNroff(table, groupXml, documented_columns):
+                     else:
+                         if type_.type != column.type.value.type:
+                             type_english = type_.toEnglish()
++                            typeNroff += ", containing "
+                             if type_english[0] in 'aeiou':
+-                                typeNroff += ", containing an %s" % type_english
++                                typeNroff += "an %s" % type_english
+                             else:
+-                                typeNroff += ", containing a %s" % type_english
++                                typeNroff += "a %s" % type_english
+                         constraints = (
+                             type_.constraintsToEnglish(escape_nroff_literal,
+                                                        text_to_nroff))
+@@ -121,6 +121,7 @@ def columnGroupToNroff(table, groupXml, documented_columns):
+             raise error.Error("unknown element %s in <table>" % node.tagName)
+     return summary, intro, body
+ 
++
+ def tableSummaryToNroff(summary, level=0):
+     s = ""
+     for type, name, arg in summary:
+@@ -132,6 +133,7 @@ def tableSummaryToNroff(summary, level=0):
+             s += ".RE\n"
+     return s
+ 
++
+ def tableToNroff(schema, tableXml):
+     tableName = tableXml.attributes['name'].nodeValue
+     table = schema.tables[tableName]
+@@ -156,20 +158,17 @@ def tableToNroff(schema, tableXml):
+ 
+     return s
+ 
++
+ def docsToNroff(schemaFile, xmlFile, erFile, version=None):
+     schema = ovs.db.schema.DbSchema.from_json(ovs.json.from_file(schemaFile))
+     doc = xml.dom.minidom.parse(xmlFile).documentElement
+ 
+-    schemaDate = os.stat(schemaFile).st_mtime
+-    xmlDate = os.stat(xmlFile).st_mtime
+-    d = date.fromtimestamp(max(schemaDate, xmlDate))
+-
+     if doc.hasAttribute('name'):
+         manpage = doc.attributes['name'].nodeValue
+     else:
+         manpage = schema.name
+ 
+-    if version == None:
++    if version is None:
+         version = "UNKNOWN"
+ 
+     # Putting '\" p as the first line tells "man" that the manpage
+@@ -194,7 +193,6 @@ def docsToNroff(schemaFile, xmlFile, erFile, version=None):
+ .PP
+ ''' % (manpage, schema.version, version, text_to_nroff(manpage), schema.name)
+ 
+-    tables = ""
+     introNodes = []
+     tableNodes = []
+     summary = []
+@@ -237,8 +235,8 @@ Purpose
+ """ % (name, text_to_nroff(title))
+ 
+     if erFile:
+-        s += """
+-.\\" check if in troff mode (TTY)
++        s += r"""
++.\" check if in troff mode (TTY)
+ .if t \{
+ .bp
+ .SH "TABLE RELATIONSHIPS"
+@@ -248,8 +246,8 @@ database.  Each node represents a table.  Tables that are part of the
+ ``root set'' are shown with double borders.  Each edge leads from the
+ table that contains it and points to the table that its value
+ represents.  Edges are labeled with their column names, followed by a
+-constraint on the number of allowed values: \\fB?\\fR for zero or one,
+-\\fB*\\fR for zero or more, \\fB+\\fR for one or more.  Thick lines
++constraint on the number of allowed values: \fB?\fR for zero or one,
++\fB*\fR for zero or more, \fB+\fR for one or more.  Thick lines
+ represent strong references; thin lines represent weak references.
+ .RS -1in
+ """
+@@ -263,6 +261,7 @@ represent strong references; thin lines represent weak references.
+         s += tableToNroff(schema, node) + "\n"
+     return s
+ 
++
+ def usage():
+     print("""\
+ %(argv0)s: ovsdb schema documentation generator
+@@ -278,6 +277,7 @@ The following options are also available:
+ """ % {'argv0': argv0})
+     sys.exit(0)
+ 
++
+ if __name__ == "__main__":
+     try:
+         try:
+diff --git a/ovsdb/ovsdb-dot.in b/ovsdb/ovsdb-dot.in
+index 41b986c0ac..f1eefd49cb 100755
+--- a/ovsdb/ovsdb-dot.in
++++ b/ovsdb/ovsdb-dot.in
+@@ -1,15 +1,13 @@
+ #! @PYTHON3@
+ 
+-from datetime import date
+ import ovs.db.error
+ import ovs.db.schema
+ import getopt
+-import os
+-import re
+ import sys
+ 
+ argv0 = sys.argv[0]
+ 
++
+ def printEdge(tableName, type, baseType, label):
+     if baseType.ref_table_name:
+         if type.n_min == 0:
+@@ -31,38 +29,42 @@ def printEdge(tableName, type, baseType, label):
+         options['label'] = '"%s%s"' % (label, arity)
+         if baseType.ref_type == 'weak':
+             options['style'] = 'dotted'
+-        print ("\t%s -> %s [%s];" % (
++        print("\t%s -> %s [%s];" % (
+             tableName,
+             baseType.ref_table_name,
+-            ', '.join(['%s=%s' % (k,v) for k,v in options.items()])))
++            ', '.join(['%s=%s' % (k, v) for k, v in options.items()])))
++
+ 
+ def schemaToDot(schemaFile, arrows):
+     schema = ovs.db.schema.DbSchema.from_json(ovs.json.from_file(schemaFile))
+ 
+-    print ("digraph %s {" % schema.name)
+-    print ('\trankdir=LR;')
+-    print ('\tsize="6.5,4";')
+-    print ('\tmargin="0";')
+-    print ("\tnode [shape=box];")
++    print("digraph %s {" % schema.name)
++    print('\trankdir=LR;')
++    print('\tsize="6.5,4";')
++    print('\tmargin="0";')
++    print("\tnode [shape=box];")
+     if not arrows:
+-        print ("\tedge [dir=none, arrowhead=none, arrowtail=none];")
++        print("\tedge [dir=none, arrowhead=none, arrowtail=none];")
+     for tableName, table in schema.tables.items():
+         options = {}
+         if table.is_root:
+             options['style'] = 'bold'
+-        print ("\t%s [%s];" % (
++        print("\t%s [%s];" % (
+             tableName,
+-            ', '.join(['%s=%s' % (k,v) for k,v in options.items()])))
++            ', '.join(['%s=%s' % (k, v) for k, v in options.items()])))
+         for columnName, column in table.columns.items():
+             if column.type.value:
+-                printEdge(tableName, column.type, column.type.key, "%s key" % columnName)
+-                printEdge(tableName, column.type, column.type.value, "%s value" % columnName)
++                printEdge(tableName, column.type, column.type.key,
++                          "%s key" % columnName)
++                printEdge(tableName, column.type, column.type.value,
++                          "%s value" % columnName)
+             else:
+                 printEdge(tableName, column.type, column.type.key, columnName)
+-    print ("}");
++    print("}")
++
+ 
+ def usage():
+-    print ("""\
++    print("""\
+ %(argv0)s: compiles ovsdb schemas to graphviz format
+ Prints a .dot file that "dot" can render to an entity-relationship diagram
+ usage: %(argv0)s [OPTIONS] SCHEMA
+@@ -75,12 +77,13 @@ The following options are also available:
+ """ % {'argv0': argv0})
+     sys.exit(0)
+ 
++
+ if __name__ == "__main__":
+     try:
+         try:
+             options, args = getopt.gnu_getopt(sys.argv[1:], 'hV',
+                                               ['no-arrows',
+-                                               'help', 'version',])
++                                               'help', 'version'])
+         except getopt.GetoptError as geo:
+             sys.stderr.write("%s: %s\n" % (argv0, geo.msg))
+             sys.exit(1)
+@@ -92,7 +95,7 @@ if __name__ == "__main__":
+             elif key in ['-h', '--help']:
+                 usage()
+             elif key in ['-V', '--version']:
+-                print ("ovsdb-dot (Open vSwitch) @VERSION@")
++                print("ovsdb-dot (Open vSwitch) @VERSION@")
+             else:
+                 sys.exit(0)
+ 
+diff --git a/ovsdb/ovsdb-server.c b/ovsdb/ovsdb-server.c
+index b51fd42fe5..a876f8bcf7 100644
+--- a/ovsdb/ovsdb-server.c
++++ b/ovsdb/ovsdb-server.c
+@@ -816,7 +816,8 @@ main(int argc, char *argv[])
+         /* ovsdb-server is usually a long-running process, in which case it
+          * makes plenty of sense to log the version, but --run makes
+          * ovsdb-server more like a command-line tool, so skip it.  */
+-        VLOG_INFO("%s (Open vSwitch) %s", program_name, VERSION);
++        VLOG_INFO("%s (Open vSwitch) %s", program_name,
++                  VERSION VERSION_SUFFIX);
+     }
+ 
+     unixctl_command_register("exit", "", 0, 0, ovsdb_server_exit, &exiting);
+diff --git a/ovsdb/raft.c b/ovsdb/raft.c
+index f463afcb3d..ac3d37ac40 100644
+--- a/ovsdb/raft.c
++++ b/ovsdb/raft.c
+@@ -81,6 +81,7 @@ enum raft_failure_test {
+     FT_STOP_RAFT_RPC,
+     FT_TRANSFER_LEADERSHIP,
+     FT_TRANSFER_LEADERSHIP_AFTER_SEND_APPEND_REQ,
++    FT_TRANSFER_LEADERSHIP_AFTER_STARTING_TO_ADD,
+ };
+ static enum raft_failure_test failure_test;
+ 
+@@ -280,6 +281,7 @@ struct raft {
+     /* Used for joining a cluster. */
+     bool joining;                 /* Attempting to join the cluster? */
+     struct sset remote_addresses; /* Addresses to try to find other servers. */
++#define RAFT_JOIN_TIMEOUT_MS 1000
+     long long int join_timeout;   /* Time to re-send add server request. */
+ 
+     /* Used for leaving a cluster. */
+@@ -385,6 +387,7 @@ static void raft_get_servers_from_log(struct raft *, enum vlog_level);
+ static void raft_get_election_timer_from_log(struct raft *);
+ 
+ static bool raft_handle_write_error(struct raft *, struct ovsdb_error *);
++static bool raft_has_uncommitted_configuration(const struct raft *);
+ 
+ static void raft_run_reconfigure(struct raft *);
+ 
+@@ -1015,8 +1018,13 @@ raft_conn_update_probe_interval(struct raft *raft, struct raft_conn *r_conn)
+      * inactivity probe follower will just try to initiate election
+      * indefinitely staying in 'candidate' role.  And the leader will continue
+      * to send heartbeats to the dead connection thinking that remote server
+-     * is still part of the cluster. */
+-    int probe_interval = raft->election_timer + ELECTION_RANGE_MSEC;
++     * is still part of the cluster.
++     *
++     * While joining, the real value of the election timeout is not known to
++     * this server, so using the maximum. */
++    int probe_interval = (raft->joining ? ELECTION_MAX_MSEC
++                                        : raft->election_timer)
++                         + ELECTION_RANGE_MSEC;
+ 
+     jsonrpc_session_set_probe_interval(r_conn->js, probe_interval);
+ }
+@@ -1083,7 +1091,7 @@ raft_open(struct ovsdb_log *log, struct raft **raftp)
+             raft_start_election(raft, false, false);
+         }
+     } else {
+-        raft->join_timeout = time_msec() + 1000;
++        raft->join_timeout = time_msec() + RAFT_JOIN_TIMEOUT_MS;
+     }
+ 
+     raft_reset_ping_timer(raft);
+@@ -1261,10 +1269,30 @@ raft_transfer_leadership(struct raft *raft, const char *reason)
+         return;
+     }
+ 
+-    struct raft_server *s;
++    struct raft_server **servers, *s;
++    uint64_t threshold = 0;
++    size_t n = 0, start, i;
++
++    servers = xmalloc(hmap_count(&raft->servers) * sizeof *servers);
++
+     HMAP_FOR_EACH (s, hmap_node, &raft->servers) {
+-        if (!uuid_equals(&raft->sid, &s->sid)
+-            && s->phase == RAFT_PHASE_STABLE) {
++        if (uuid_equals(&raft->sid, &s->sid)
++            || s->phase != RAFT_PHASE_STABLE) {
++            continue;
++        }
++        if (s->match_index > threshold) {
++            threshold = s->match_index;
++        }
++        servers[n++] = s;
++    }
++
++    start = n ? random_range(n) : 0;
++
++retry:
++    for (i = 0; i < n; i++) {
++        s = servers[(start + i) % n];
++
++        if (s->match_index >= threshold) {
+             struct raft_conn *conn = raft_find_conn_by_sid(raft, &s->sid);
+             if (!conn) {
+                 continue;
+@@ -1280,7 +1308,10 @@ raft_transfer_leadership(struct raft *raft, const char *reason)
+                     .term = raft->term,
+                 }
+             };
+-            raft_send_to_conn(raft, &rpc, conn);
++
++            if (!raft_send_to_conn(raft, &rpc, conn)) {
++                continue;
++            }
+ 
+             raft_record_note(raft, "transfer leadership",
+                              "transferring leadership to %s because %s",
+@@ -1288,6 +1319,23 @@ raft_transfer_leadership(struct raft *raft, const char *reason)
+             break;
+         }
+     }
++
++    if (n && i == n && threshold) {
++        if (threshold > raft->commit_index) {
++            /* Failed to transfer to servers with the highest 'match_index'.
++             * Try other servers that are not behind the majority. */
++            threshold = raft->commit_index;
++        } else {
++            /* Try any other server.  It is safe, because they either have all
++             * the append requests queued up for them before the leadership
++             * transfer message or their connection is broken and we will not
++             * transfer anyway. */
++            threshold = 0;
++        }
++        goto retry;
++    }
++
++    free(servers);
+ }
+ 
+ /* Send a RemoveServerRequest to the rest of the servers in the cluster.
+@@ -2078,7 +2126,7 @@ raft_run(struct raft *raft)
+                 raft_start_election(raft, true, false);
+             }
+         } else {
+-            raft_start_election(raft, true, false);
++            raft_start_election(raft, hmap_count(&raft->servers) > 1, false);
+         }
+ 
+     }
+@@ -2088,7 +2136,7 @@ raft_run(struct raft *raft)
+     }
+ 
+     if (raft->joining && time_msec() >= raft->join_timeout) {
+-        raft->join_timeout = time_msec() + 1000;
++        raft->join_timeout = time_msec() + RAFT_JOIN_TIMEOUT_MS;
+         LIST_FOR_EACH (conn, list_node, &raft->conns) {
+             raft_send_add_server_request(raft, conn);
+         }
+@@ -2122,10 +2170,12 @@ raft_run(struct raft *raft)
+         raft_reset_ping_timer(raft);
+     }
+ 
++    uint64_t interval = raft->joining
++                        ? RAFT_JOIN_TIMEOUT_MS
++                        : RAFT_TIMER_THRESHOLD(raft->election_timer);
+     cooperative_multitasking_set(
+         &raft_run_cb, (void *) raft, time_msec(),
+-        RAFT_TIMER_THRESHOLD(raft->election_timer)
+-        + RAFT_TIMER_THRESHOLD(raft->election_timer) / 10, "raft_run");
++        interval + interval / 10, "raft_run");
+ 
+     /* Do this only at the end; if we did it as soon as we set raft->left or
+      * raft->failed in handling the RemoveServerReply, then it could easily
+@@ -2696,15 +2746,22 @@ raft_become_follower(struct raft *raft)
+      * new configuration.  Our AppendEntries processing will properly update
+      * the server configuration later, if necessary.
+      *
++     * However, since we're sending replies about a failure to add, those new
++     * servers has to be cleaned up.  Otherwise, they will stuck in a 'CATCHUP'
++     * phase in case this server regains leadership before they join through
++     * the current new leader.  They are not yet in 'raft->servers', so not
++     * part of the shared configuration.
++     *
+      * Also we do not complete commands here, as they can still be completed
+      * if their log entries have already been replicated to other servers.
+      * If the entries were actually committed according to the new leader, our
+      * AppendEntries processing will complete the corresponding commands.
+      */
+     struct raft_server *s;
+-    HMAP_FOR_EACH (s, hmap_node, &raft->add_servers) {
++    HMAP_FOR_EACH_POP (s, hmap_node, &raft->add_servers) {
+         raft_send_add_server_reply__(raft, &s->sid, s->address, false,
+                                      RAFT_SERVER_LOST_LEADERSHIP);
++        raft_server_destroy(s);
+     }
+     if (raft->remove_server) {
+         raft_send_remove_server_reply__(raft, &raft->remove_server->sid,
+@@ -2768,6 +2825,13 @@ raft_send_heartbeats(struct raft *raft)
+     raft_reset_ping_timer(raft);
+ }
+ 
++static void
++raft_join_complete(struct raft *raft)
++{
++    raft->joining = false;
++    raft_update_probe_intervals(raft);
++}
++
+ /* Initializes the fields in 's' that represent the leader's view of the
+  * server. */
+ static void
+@@ -2805,6 +2869,18 @@ raft_become_leader(struct raft *raft)
+     raft_reset_election_timer(raft);
+     raft_reset_ping_timer(raft);
+ 
++    if (raft->joining) {
++        /* It is possible that the server committing this one to the list of
++         * servers lost leadership before the entry is committed but after
++         * it was already replicated to majority of servers.  In this case
++         * other servers will recognize this one as a valid cluster member
++         * and may transfer leadership to it and vote for it.  This way
++         * we're becoming a cluster leader without receiving reply for a
++         * join request and will commit addition of this server ourselves. */
++        VLOG_INFO_RL(&rl, "elected as leader while joining");
++        raft_join_complete(raft);
++    }
++
+     struct raft_server *s;
+     HMAP_FOR_EACH (s, hmap_node, &raft->servers) {
+         raft_server_init_leader(raft, s);
+@@ -2963,12 +3039,12 @@ raft_update_commit_index(struct raft *raft, uint64_t new_commit_index)
+     }
+ 
+     while (raft->commit_index < new_commit_index) {
++        static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 5);
+         uint64_t index = ++raft->commit_index;
+         const struct raft_entry *e = raft_get_entry(raft, index);
+ 
+         if (raft_entry_has_data(e)) {
+             struct raft_command *cmd = raft_find_command_by_eid(raft, &e->eid);
+-            static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 5);
+ 
+             if (cmd) {
+                 if (!cmd->index && raft->role == RAFT_LEADER) {
+@@ -3012,6 +3088,35 @@ raft_update_commit_index(struct raft *raft, uint64_t new_commit_index)
+              * reallocate raft->entries, which would invalidate 'e', so
+              * this case must be last, after the one for 'e->data'. */
+             raft_run_reconfigure(raft);
++        } else if (e->servers && !raft_has_uncommitted_configuration(raft)) {
++            struct ovsdb_error *error;
++            struct raft_server *s;
++            struct hmap servers;
++
++            error = raft_servers_from_json(e->servers, &servers);
++            ovs_assert(!error);
++            HMAP_FOR_EACH (s, hmap_node, &servers) {
++                struct raft_server *server = raft_find_server(raft, &s->sid);
++
++                if (server && server->phase == RAFT_PHASE_COMMITTING) {
++                    /* This server lost leadership while committing
++                     * server 's', but it was committed later by a
++                     * new leader. */
++                    server->phase = RAFT_PHASE_STABLE;
++                }
++
++                if (raft->joining && uuid_equals(&s->sid, &raft->sid)) {
++                    /* Leadership change happened before previous leader
++                     * could commit the change of a servers list, but it
++                     * was replicated and a new leader committed it. */
++                    VLOG_INFO_RL(&rl,
++                        "added to configuration without reply "
++                        "(eid: "UUID_FMT", commit index: %"PRIu64")",
++                        UUID_ARGS(&e->eid), index);
++                    raft_join_complete(raft);
++                }
++            }
++            raft_servers_destroy(&servers);
+         }
+     }
+ 
+@@ -3938,6 +4043,10 @@ raft_handle_add_server_request(struct raft *raft,
+                  "to cluster "CID_FMT, s->nickname, SID_ARGS(&s->sid),
+                  rq->address, CID_ARGS(&raft->cid));
+     raft_send_append_request(raft, s, 0, "initialize new server");
++
++    if (failure_test == FT_TRANSFER_LEADERSHIP_AFTER_STARTING_TO_ADD) {
++        failure_test = FT_TRANSFER_LEADERSHIP;
++    }
+ }
+ 
+ static void
+@@ -3952,7 +4061,7 @@ raft_handle_add_server_reply(struct raft *raft,
+     }
+ 
+     if (rpy->success) {
+-        raft->joining = false;
++        raft_join_complete(raft);
+ 
+         /* It is tempting, at this point, to check that this server is part of
+          * the current configuration.  However, this is not necessarily the
+@@ -4926,6 +5035,7 @@ raft_get_election_timer_from_log(struct raft *raft)
+             break;
+         }
+     }
++    raft_update_probe_intervals(raft);
+ }
+ 
+ static void
+@@ -5063,6 +5173,8 @@ raft_unixctl_failure_test(struct unixctl_conn *conn OVS_UNUSED,
+     } else if (!strcmp(test,
+                        "transfer-leadership-after-sending-append-request")) {
+         failure_test = FT_TRANSFER_LEADERSHIP_AFTER_SEND_APPEND_REQ;
++    } else if (!strcmp(test, "transfer-leadership-after-starting-to-add")) {
++        failure_test = FT_TRANSFER_LEADERSHIP_AFTER_STARTING_TO_ADD;
+     } else if (!strcmp(test, "transfer-leadership")) {
+         failure_test = FT_TRANSFER_LEADERSHIP;
+     } else if (!strcmp(test, "clear")) {
+diff --git a/ovsdb/transaction.c b/ovsdb/transaction.c
+index 484a88e1cc..3f374341f2 100644
+--- a/ovsdb/transaction.c
++++ b/ovsdb/transaction.c
+@@ -1090,7 +1090,6 @@ ovsdb_txn_precommit(struct ovsdb_txn *txn)
+      * was really a no-op. */
+     error = for_each_txn_row(txn, determine_changes);
+     if (error) {
+-        ovsdb_txn_abort(txn);
+         return OVSDB_WRAP_BUG("can't happen", error);
+     }
+     if (ovs_list_is_empty(&txn->txn_tables)) {
+diff --git a/python/.gitignore b/python/.gitignore
+index 60ace6f05b..ad5486af83 100644
+--- a/python/.gitignore
++++ b/python/.gitignore
+@@ -1,2 +1,3 @@
+ dist/
+ *.egg-info
++setup.py
+diff --git a/python/automake.mk b/python/automake.mk
+index 84cf2eab57..d0523870d6 100644
+--- a/python/automake.mk
++++ b/python/automake.mk
+@@ -75,25 +75,24 @@ EXTRA_DIST += \
+ EXTRA_DIST += \
+ 	python/ovs/compat/sortedcontainers/LICENSE \
+ 	python/README.rst \
+-	python/setup.py \
+ 	python/test_requirements.txt
+ 
+ # C extension support.
+ EXTRA_DIST += python/ovs/_json.c
+ 
+-PYFILES = $(ovs_pyfiles) python/ovs/dirs.py $(ovstest_pyfiles) $(ovs_pytests)
++PYFILES = $(ovs_pyfiles) python/ovs/dirs.py python/setup.py $(ovstest_pyfiles) $(ovs_pytests)
+ 
+ EXTRA_DIST += $(PYFILES)
+ PYCOV_CLEAN_FILES += $(PYFILES:.py=.py,cover)
+ 
+ FLAKE8_PYFILES += \
+-	$(filter-out python/ovs/compat/% python/ovs/dirs.py,$(PYFILES)) \
++	$(filter-out python/ovs/compat/% python/ovs/dirs.py python/setup.py,$(PYFILES)) \
+ 	python/ovs_build_helpers/__init__.py \
+ 	python/ovs_build_helpers/extract_ofp_fields.py \
+ 	python/ovs_build_helpers/nroff.py \
+ 	python/ovs_build_helpers/soutil.py \
+ 	python/ovs/dirs.py.template \
+-	python/setup.py
++	python/setup.py.template
+ 
+ nobase_pkgdata_DATA = $(ovs_pyfiles) $(ovstest_pyfiles)
+ ovs-install-data-local:
+@@ -113,7 +112,7 @@ ovs-install-data-local:
+ 	rm python/ovs/dirs.py.tmp
+ 
+ .PHONY: python-sdist
+-python-sdist: $(srcdir)/python/ovs/version.py $(ovs_pyfiles) python/ovs/dirs.py
++python-sdist: $(srcdir)/python/ovs/version.py $(ovs_pyfiles) python/ovs/dirs.py python/setup.py
+ 	cd python/ && $(PYTHON3) -m build --sdist
+ 
+ .PHONY: pypi-upload
+@@ -129,8 +128,8 @@ ovs-uninstall-local:
+ ALL_LOCAL += $(srcdir)/python/ovs/version.py
+ $(srcdir)/python/ovs/version.py: config.status
+ 	$(AM_V_GEN)$(ro_shell) > $(@F).tmp && \
+-	echo 'VERSION = "$(VERSION)"' >> $(@F).tmp && \
+-	if cmp -s $(@F).tmp $@; then touch $@; rm $(@F).tmp; else mv $(@F).tmp $@; fi
++	echo 'VERSION = "$(VERSION)$(VERSION_SUFFIX)"' >> $(@F).tmp && \
++	if cmp -s $(@F).tmp $@; then touch $@; else cp $(@F).tmp $@; fi; rm $(@F).tmp
+ 
+ ALL_LOCAL += $(srcdir)/python/ovs/dirs.py
+ $(srcdir)/python/ovs/dirs.py: python/ovs/dirs.py.template
+@@ -147,6 +146,15 @@ $(srcdir)/python/ovs/dirs.py: python/ovs/dirs.py.template
+ EXTRA_DIST += python/ovs/dirs.py.template
+ CLEANFILES += python/ovs/dirs.py
+ 
++ALL_LOCAL += $(srcdir)/python/setup.py
++$(srcdir)/python/setup.py: python/setup.py.template config.status
++	$(AM_V_GEN)sed \
++		-e 's,[@]VERSION[@],$(VERSION),g' \
++		< $(srcdir)/python/setup.py.template > $(@F).tmp && \
++	if cmp -s $(@F).tmp $@; then touch $@; else cp $(@F).tmp $@; fi; rm $(@F).tmp
++EXTRA_DIST += python/setup.py.template
++CLEANFILES += python/setup.py
++
+ EXTRA_DIST += python/TODO.rst
+ 
+ $(srcdir)/python/ovs/flow/ofp_fields.py: $(srcdir)/build-aux/gen_ofp_field_decoders include/openvswitch/meta-flow.h
+diff --git a/python/ovs/db/custom_index.py b/python/ovs/db/custom_index.py
+index 587caf5e3e..3fa03d3c95 100644
+--- a/python/ovs/db/custom_index.py
++++ b/python/ovs/db/custom_index.py
+@@ -90,14 +90,21 @@ class IndexedRows(DictBase, object):
+         index = self.indexes[name] = MultiColumnIndex(name)
+         return index
+ 
++    def __getitem__(self, key):
++        return self.data[key][-1]
++
+     def __setitem__(self, key, item):
+-        self.data[key] = item
++        try:
++            self.data[key].append(item)
++        except KeyError:
++            self.data[key] = [item]
+         for index in self.indexes.values():
+             index.add(item)
+ 
+     def __delitem__(self, key):
+-        val = self.data[key]
+-        del self.data[key]
++        val = self.data[key].pop()
++        if len(self.data[key]) == 0:
++            del self.data[key]
+         for index in self.indexes.values():
+             index.remove(val)
+ 
+diff --git a/python/ovs/db/idl.py b/python/ovs/db/idl.py
+index a80da84e7a..b6d5ed6972 100644
+--- a/python/ovs/db/idl.py
++++ b/python/ovs/db/idl.py
+@@ -35,9 +35,9 @@ ROW_CREATE = "create"
+ ROW_UPDATE = "update"
+ ROW_DELETE = "delete"
+ 
+-OVSDB_UPDATE = 0
+-OVSDB_UPDATE2 = 1
+-OVSDB_UPDATE3 = 2
++OVSDB_UPDATE = "update"
++OVSDB_UPDATE2 = "update2"
++OVSDB_UPDATE3 = "update3"
+ 
+ CLUSTERED = "clustered"
+ RELAY = "relay"
+@@ -77,7 +77,7 @@ class ColumnDefaultDict(dict):
+         return item in self.keys()
+ 
+ 
+-class Monitor(enum.IntEnum):
++class Monitor(enum.Enum):
+     monitor = OVSDB_UPDATE
+     monitor_cond = OVSDB_UPDATE2
+     monitor_cond_since = OVSDB_UPDATE3
+@@ -465,23 +465,18 @@ class Idl(object):
+                 self.__parse_update(msg.params[2], OVSDB_UPDATE3)
+                 self.last_id = msg.params[1]
+             elif (msg.type == ovs.jsonrpc.Message.T_NOTIFY
+-                    and msg.method == "update2"
+-                    and len(msg.params) == 2):
+-                # Database contents changed.
+-                self.__parse_update(msg.params[1], OVSDB_UPDATE2)
+-            elif (msg.type == ovs.jsonrpc.Message.T_NOTIFY
+-                    and msg.method == "update"
++                    and msg.method in (OVSDB_UPDATE, OVSDB_UPDATE2)
+                     and len(msg.params) == 2):
+                 # Database contents changed.
+                 if msg.params[0] == str(self.server_monitor_uuid):
+-                    self.__parse_update(msg.params[1], OVSDB_UPDATE,
++                    self.__parse_update(msg.params[1], msg.method,
+                                         tables=self.server_tables)
+                     self.change_seqno = previous_change_seqno
+                     if not self.__check_server_db():
+                         self.force_reconnect()
+                         break
+                 else:
+-                    self.__parse_update(msg.params[1], OVSDB_UPDATE)
++                    self.__parse_update(msg.params[1], msg.method)
+             elif self.handle_monitor_canceled(msg):
+                 break
+             elif self.handle_monitor_cancel_reply(msg):
+@@ -540,7 +535,7 @@ class Idl(object):
+                 # Reply to our "monitor" of _Server request.
+                 try:
+                     self._server_monitor_request_id = None
+-                    self.__parse_update(msg.result, OVSDB_UPDATE,
++                    self.__parse_update(msg.result, OVSDB_UPDATE2,
+                                         tables=self.server_tables)
+                     self.change_seqno = previous_change_seqno
+                     if self.__check_server_db():
+@@ -579,6 +574,11 @@ class Idl(object):
+             elif msg.type == ovs.jsonrpc.Message.T_NOTIFY and msg.id == "echo":
+                 # Reply to our echo request.  Ignore it.
+                 pass
++            elif (msg.type == ovs.jsonrpc.Message.T_ERROR and
++                  self.state == self.IDL_S_SERVER_MONITOR_REQUESTED and
++                  msg.id == self._server_monitor_request_id):
++                self._server_monitor_request_id = None
++                self.__send_monitor_request()
+             elif (msg.type == ovs.jsonrpc.Message.T_ERROR and
+                   self.state == (
+                       self.IDL_S_DATA_MONITOR_COND_SINCE_REQUESTED) and
+@@ -912,7 +912,7 @@ class Idl(object):
+         monitor_request = {"columns": columns}
+         monitor_requests[table.name] = [monitor_request]
+         msg = ovs.jsonrpc.Message.create_request(
+-            'monitor', [self._server_db.name,
++            'monitor_cond', [self._server_db.name,
+                              str(self.server_monitor_uuid),
+                              monitor_requests])
+         self._server_monitor_request_id = msg.id
+@@ -1013,7 +1013,9 @@ class Idl(object):
+             if not row:
+                 raise error.Error('Modify non-existing row')
+ 
++            del table.rows[uuid]
+             old_row = self.__apply_diff(table, row, row_update['modify'])
++            table.rows[uuid] = row
+             return Notice(ROW_UPDATE, row, Row(self, table, uuid, old_row))
+         else:
+             raise error.Error('<row-update> unknown operation',
+@@ -1044,9 +1046,10 @@ class Idl(object):
+                 op = ROW_UPDATE
+                 vlog.warn("cannot add existing row %s to table %s"
+                           % (uuid, table.name))
++                del table.rows[uuid]
++
+             changed |= self.__row_update(table, row, new)
+-            if op == ROW_CREATE:
+-                table.rows[uuid] = row
++            table.rows[uuid] = row
+             if changed:
+                 return Notice(ROW_CREATE, row)
+         else:
+@@ -1058,9 +1061,11 @@ class Idl(object):
+                 # XXX rate-limit
+                 vlog.warn("cannot modify missing row %s in table %s"
+                           % (uuid, table.name))
++            else:
++                del table.rows[uuid]
++
+             changed |= self.__row_update(table, row, new)
+-            if op == ROW_CREATE:
+-                table.rows[uuid] = row
++            table.rows[uuid] = row
+             if changed:
+                 return Notice(op, row, Row.from_json(self, table, uuid, old))
+         return False
+@@ -1854,7 +1859,7 @@ class Transaction(object):
+                 if row._data is None:
+                     op["op"] = "insert"
+                     if row._persist_uuid:
+-                        op["uuid"] = row.uuid
++                        op["uuid"] = str(row.uuid)
+                     else:
+                         op["uuid-name"] = _uuid_name_from_uuid(row.uuid)
+ 
+diff --git a/python/ovs/fatal_signal.py b/python/ovs/fatal_signal.py
+index cb2e99e87d..16a7e78a03 100644
+--- a/python/ovs/fatal_signal.py
++++ b/python/ovs/fatal_signal.py
+@@ -16,6 +16,7 @@ import atexit
+ import os
+ import signal
+ import sys
++import threading
+ 
+ import ovs.vlog
+ 
+@@ -112,29 +113,29 @@ def _unlink(file_):
+ def _signal_handler(signr, _):
+     _call_hooks(signr)
+ 
+-    # Re-raise the signal with the default handling so that the program
+-    # termination status reflects that we were killed by this signal.
+-    signal.signal(signr, signal.SIG_DFL)
+-    os.kill(os.getpid(), signr)
+-
+ 
+ def _atexit_handler():
+     _call_hooks(0)
+ 
+ 
+-recurse = False
++mutex = threading.Lock()
+ 
+ 
+ def _call_hooks(signr):
+-    global recurse
+-    if recurse:
++    global mutex
++    if not mutex.acquire(blocking=False):
+         return
+-    recurse = True
+ 
+     for hook, cancel, run_at_exit in _hooks:
+         if signr != 0 or run_at_exit:
+             hook()
+ 
++    if signr != 0:
++        # Re-raise the signal with the default handling so that the program
++        # termination status reflects that we were killed by this signal.
++        signal.signal(signr, signal.SIG_DFL)
++        os.kill(os.getpid(), signr)
++
+ 
+ _inited = False
+ 
+@@ -150,7 +151,9 @@ def _init():
+                        signal.SIGALRM]
+ 
+         for signr in signals:
+-            if signal.getsignal(signr) == signal.SIG_DFL:
++            handler = signal.getsignal(signr)
++            if (handler == signal.SIG_DFL or
++                handler == signal.default_int_handler):
+                 signal.signal(signr, _signal_handler)
+         atexit.register(_atexit_handler)
+ 
+@@ -165,7 +168,6 @@ def signal_alarm(timeout):
+ 
+     if sys.platform == "win32":
+         import time
+-        import threading
+ 
+         class Alarm (threading.Thread):
+             def __init__(self, timeout):
+diff --git a/python/ovs/flow/odp.py b/python/ovs/flow/odp.py
+index 7d9b165d46..a8f8c067a9 100644
+--- a/python/ovs/flow/odp.py
++++ b/python/ovs/flow/odp.py
+@@ -365,29 +365,30 @@ class ODPFlow(Flow):
+             is_list=True,
+         )
+ 
+-        return {
+-            **_decoders,
+-            "check_pkt_len": nested_kv_decoder(
+-                KVDecoders(
+-                    {
+-                        "size": decode_int,
+-                        "gt": nested_kv_decoder(
+-                            KVDecoders(
+-                                decoders=_decoders,
+-                                default_free=decode_free_output,
+-                            ),
+-                            is_list=True,
++        _decoders["check_pkt_len"] = nested_kv_decoder(
++            KVDecoders(
++                {
++                    "size": decode_int,
++                    "gt": nested_kv_decoder(
++                        KVDecoders(
++                            decoders=_decoders,
++                            default_free=decode_free_output,
+                         ),
+-                        "le": nested_kv_decoder(
+-                            KVDecoders(
+-                                decoders=_decoders,
+-                                default_free=decode_free_output,
+-                            ),
+-                            is_list=True,
++                        is_list=True,
++                    ),
++                    "le": nested_kv_decoder(
++                        KVDecoders(
++                            decoders=_decoders,
++                            default_free=decode_free_output,
+                         ),
+-                    }
+-                )
+-            ),
++                        is_list=True,
++                    ),
++                }
++            )
++        )
++
++        return {
++            **_decoders,
+         }
+ 
+     @staticmethod
+diff --git a/python/ovs/tests/test_odp.py b/python/ovs/tests/test_odp.py
+index f19ec386e8..d514e9be32 100644
+--- a/python/ovs/tests/test_odp.py
++++ b/python/ovs/tests/test_odp.py
+@@ -541,6 +541,35 @@ def test_odp_fields(input_string, expected):
+                 ),
+             ],
+         ),
++        (
++            "actions:check_pkt_len(size=200,gt(check_pkt_len(size=400,gt(4),le(2))),le(check_pkt_len(size=100,gt(1),le(drop))))",  # noqa: E501
++            [
++                KeyValue(
++                    "check_pkt_len",
++                    {
++                        "size": 200,
++                        "gt": [
++                            {
++                                "check_pkt_len": {
++                                    "size": 400,
++                                    "gt": [{"output": {"port": 4}}],
++                                    "le": [{"output": {"port": 2}}],
++                                }
++                            }
++                        ],
++                        "le": [
++                            {
++                                "check_pkt_len": {
++                                    "size": 100,
++                                    "gt": [{"output": {"port": 1}}],
++                                    "le": [{"drop": True}],
++                                }
++                            }
++                        ],
++                    },
++                )
++            ],
++        ),
+         (
+             "actions:meter(1),hash(l4(0))",
+             [
+diff --git a/python/setup.py b/python/setup.py.template
+similarity index 87%
+rename from python/setup.py
+rename to python/setup.py.template
+index bcf832ce9b..e7d59f2ca3 100644
+--- a/python/setup.py
++++ b/python/setup.py.template
+@@ -23,24 +23,16 @@ except ImportError:  # Needed for setuptools < 59.0
+ 
+ import setuptools
+ 
+-VERSION = "unknown"
+-
+-try:
+-    # Try to set the version from the generated ovs/version.py
+-    exec(open("ovs/version.py").read())
+-except IOError:
+-    print("Ensure version.py is created by running make python/ovs/version.py",
+-          file=sys.stderr)
+-    sys.exit(-1)
+-
+-try:
+-    # Try to open generated ovs/dirs.py. However, in this case we
+-    # don't need to exec()
+-    open("ovs/dirs.py")
+-except IOError:
+-    print("Ensure dirs.py is created by running make python/ovs/dirs.py",
+-          file=sys.stderr)
+-    sys.exit(-1)
++VERSION = "@VERSION@"
++
++for x in ("version.py", "dirs.py"):
++    try:
++        # Try to open generated ovs/{version,dirs}.py
++        open(f"ovs/{x}")
++    except IOError:
++        print(f"Ensure {x} is created by running make python/ovs/{x}",
++              file=sys.stderr)
++        sys.exit(-1)
+ 
+ ext_errors = (CCompilerError, ExecError, PlatformError)
+ if sys.platform == 'win32':
+diff --git a/python/test_requirements.txt b/python/test_requirements.txt
+index 5043c71e22..a1424506b6 100644
+--- a/python/test_requirements.txt
++++ b/python/test_requirements.txt
+@@ -1,4 +1,5 @@
+ netaddr
++packaging
+ pyftpdlib
+ pyparsing
+ pytest
+diff --git a/rhel/openvswitch-fedora.spec.in b/rhel/openvswitch-fedora.spec.in
+index 5d24ebcda8..650a274bee 100644
+--- a/rhel/openvswitch-fedora.spec.in
++++ b/rhel/openvswitch-fedora.spec.in
+@@ -178,6 +178,7 @@ This package provides IPsec tunneling support for OVS tunnels.
+         --disable-static \
+         --enable-shared \
+         --with-pkidir=%{_sharedstatedir}/openvswitch/pki \
++        --with-version-suffix=-%{release} \
+         PYTHON3=%{__python3}
+ 
+ build-aux/dpdkstrip.py \
+diff --git a/rhel/usr_lib_systemd_system_ovsdb-server.service b/rhel/usr_lib_systemd_system_ovsdb-server.service
+index 49dc06e38c..558632320c 100644
+--- a/rhel/usr_lib_systemd_system_ovsdb-server.service
++++ b/rhel/usr_lib_systemd_system_ovsdb-server.service
+@@ -29,3 +29,4 @@ ExecStop=/usr/share/openvswitch/scripts/ovs-ctl --no-ovs-vswitchd stop
+ ExecReload=/usr/share/openvswitch/scripts/ovs-ctl --no-ovs-vswitchd \
+            ${OVS_USER_OPT} \
+            --no-monitor restart $OPTIONS
++TimeoutSec=300
+diff --git a/selinux/openvswitch-custom.te.in b/selinux/openvswitch-custom.te.in
+index beb0ab0d66..fe2c5bb61a 100644
+--- a/selinux/openvswitch-custom.te.in
++++ b/selinux/openvswitch-custom.te.in
+@@ -49,8 +49,8 @@ require {
+         class fifo_file { getattr read write append ioctl lock open };
+         class filesystem getattr;
+         class lnk_file { read open };
+-        class netlink_audit_socket { create nlmsg_relay audit_write read write };
+-        class netlink_netfilter_socket { create nlmsg_relay audit_write read write };
++        class netlink_audit_socket { create nlmsg_relay read write };
++        class netlink_netfilter_socket { create read write };
+ @begin_dpdk@
+         class netlink_rdma_socket { setopt bind create };
+ @end_dpdk@
+@@ -79,8 +79,8 @@ domtrans_pattern(openvswitch_t, openvswitch_load_module_exec_t, openvswitch_load
+ 
+ #============= openvswitch_t ==============
+ allow openvswitch_t self:capability { dac_override audit_write net_broadcast net_raw };
+-allow openvswitch_t self:netlink_audit_socket { create nlmsg_relay audit_write read write };
+-allow openvswitch_t self:netlink_netfilter_socket { create nlmsg_relay audit_write read write };
++allow openvswitch_t self:netlink_audit_socket { create nlmsg_relay read write };
++allow openvswitch_t self:netlink_netfilter_socket { create read write };
+ @begin_dpdk@
+ allow openvswitch_t self:netlink_rdma_socket { setopt bind create };
+ @end_dpdk@
+diff --git a/tests/atlocal.in b/tests/atlocal.in
+index f321bae55f..8565a0bae9 100644
+--- a/tests/atlocal.in
++++ b/tests/atlocal.in
+@@ -229,18 +229,35 @@ export UBSAN_OPTIONS
+ REQUIREMENT_PATH=$abs_top_srcdir/python/test_requirements.txt $PYTHON3 -c '
+ import os
+ import pathlib
+-import pkg_resources
+ import sys
+ 
++PACKAGING = True
++try:
++    from packaging import requirements
++    from importlib import metadata
++except ModuleNotFoundError:
++    PACKAGING = False
++    import pkg_resources
++
+ with pathlib.Path(os.path.join(os.getenv("REQUIREMENT_PATH"))).open() as reqs:
+-    for req in pkg_resources.parse_requirements(reqs):
+-        try:
+-            pkg_resources.require(str(req))
+-        except pkg_resources.DistributionNotFound:
+-            sys.exit(2)
++    if PACKAGING:
++        for req in reqs.readlines():
++            try:
++                r = requirements.Requirement(req.strip())
++                if metadata.version(r.name) not in r.specifier:
++                    raise metadata.PackageNotFoundError
++            except metadata.PackageNotFoundError:
++                sys.exit(2)
++    else:
++        for req in pkg_resources.parse_requirements(reqs):
++            try:
++                pkg_resources.require(str(req))
++            except pkg_resources.DistributionNotFound:
++                sys.exit(2)
+ '
+ case $? in
+     0) HAVE_PYTEST=yes ;;
+     2) HAVE_PYTEST=no ;;
+-    *) echo "$0: unexpected error probing Python unit test requirements" >&2 ;;
++    *) HAVE_PYTEST=no
++       echo "$0: unexpected error probing Python unit test requirements" >&2 ;;
+ esac
+diff --git a/tests/dpif-netdev.at b/tests/dpif-netdev.at
+index 790b5a43af..c16bdd0326 100644
+--- a/tests/dpif-netdev.at
++++ b/tests/dpif-netdev.at
+@@ -807,6 +807,41 @@ AT_CHECK([ovs-appctl netdev-dummy/receive p1 ${bad_frame}])
+ AT_CHECK([ovs-pcap p2.pcap > p2.pcap.txt 2>&1])
+ AT_CHECK_UNQUOTED([tail -n 1 p2.pcap.txt], [0], [${good_expected}
+ ])
++
++dnl Test with IP optional fields in a valid packet.  Note that neither this
++dnl packet nor the following one contain a correct checksum.  OVS is
++dnl expected to replace this dummy checksum with a valid one if possible.
++m4_define([OPT_PKT], m4_join([],
++dnl eth(dst=aa:aa:aa:aa:aa:aa,src=bb:bb:bb:bb:bb:bb,type=0x0800)
++[aaaaaaaaaaaabbbbbbbbbbbb0800],
++dnl ipv4(dst=10.0.0.2,src=10.0.0.1,proto=1,len=60,tot_len=68,csum=0xeeee)
++[4f000044abab00004001eeee0a0000010a000002],
++dnl IPv4 Opt: type 7 (Record Route) len 39 + type 0 (EOL).
++[07270c010203040a000003000000000000000000],
++[0000000000000000000000000000000000000000],
++dnl icmp(type=8,code=0), csum 0x3e2f incorrect, should be 0x412f.
++[08003e2fb6d00000]))
++
++dnl IP header indicates optional fields but doesn't contain any.
++m4_define([MICROGRAM], m4_join([],
++dnl eth(dst=aa:aa:aa:aa:aa:aa,src=bb:bb:bb:bb:bb:bb,type=0x0800)
++[aaaaaaaaaaaabbbbbbbbbbbb0800],
++dnl ipv4(dst=10.0.0.2,src=10.0.0.1,proto=1,len=60,tot_len=68,csum=0xeeee)
++[4f000044abab00004001eeee0a0000010a000002]))
++
++AT_CHECK([ovs-vsctl set Interface p1 options:ol_ip_csum=true])
++AT_CHECK([ovs-vsctl set Interface p1 options:ol_ip_csum_set_good=true])
++AT_CHECK([ovs-appctl netdev-dummy/receive p1 OPT_PKT])
++AT_CHECK([ovs-appctl netdev-dummy/receive p1 MICROGRAM])
++AT_CHECK([ovs-pcap p2.pcap > p2.pcap.txt 2>&1])
++
++dnl Build the expected modified packets.  The first packet has a valid IPv4
++dnl checksum and modified destination IP address.  The second packet isn't
++dnl expected to change.
++AT_CHECK([echo "OPT_PKT" | sed -e "s/0a000002/c0a80101/" -e "s/eeee/dd2e/" > expout])
++AT_CHECK([echo "MICROGRAM" >> expout])
++AT_CHECK([tail -n 2 p2.pcap.txt], [0], [expout])
++
+ OVS_VSWITCHD_STOP
+ AT_CLEANUP
+ 
+@@ -1091,3 +1126,66 @@ OVS_VSWITCHD_STOP(["dnl
+ /Error: unknown miniflow extract implementation superstudy./d
+ /Error: invalid study_pkt_cnt value: -pmd./d"])
+ AT_CLEANUP
++
++AT_SETUP([datapath - Actions Autovalidator Checksum])
++
++OVS_VSWITCHD_START(add-port br0 p0 -- set Interface p0 type=dummy \
++                   -- add-port br0 p1 -- set Interface p1 type=dummy)
++
++AT_CHECK([ovs-appctl odp-execute/action-impl-set autovalidator], [0], [dnl
++Action implementation set to autovalidator.
++])
++
++dnl Add flows to trigger checksum calculation.
++AT_DATA([flows.txt], [dnl
++  in_port=p0,ip,actions=mod_nw_src=10.1.1.1,p1
++  in_port=p0,ipv6,actions=set_field:fc00::100->ipv6_src,p1
++])
++AT_CHECK([ovs-ofctl del-flows br0])
++AT_CHECK([ovs-ofctl -Oopenflow13 add-flows br0 flows.txt])
++
++dnl Make sure checksum won't be offloaded.
++AT_CHECK([ovs-vsctl set Interface p0 options:ol_ip_csum=false])
++AT_CHECK([ovs-vsctl set Interface p0 options:ol_ip_csum_set_good=false])
++
++AT_CHECK([ovs-vsctl set Interface p1 options:pcap=p1.pcap])
++
++dnl IPv4 packet with values that will trigger carry-over addition for checksum.
++flow_s_v4="
++  eth_src=47:42:86:08:17:50,eth_dst=3e:55:b5:9e:3a:fb,dl_type=0x0800,
++  nw_src=229.167.36.90,nw_dst=130.161.64.186,nw_proto=6,nw_ttl=64,nw_frag=no,
++  tp_src=54392,tp_dst=5201,tcp_flags=ack"
++
++good_frame=$(ovs-ofctl compose-packet --bare "${flow_s_v4}")
++AT_CHECK([ovs-appctl netdev-dummy/receive p0 ${good_frame}])
++
++dnl Checksum should change to 0xAC33 with ip_src changed to 10.1.1.1
++dnl by the datapath while processing the packet.
++flow_expected=$(echo "${flow_s_v4}" | sed 's/229.167.36.90/10.1.1.1/g')
++good_expected=$(ovs-ofctl compose-packet --bare "${flow_expected}")
++AT_CHECK([ovs-pcap p1.pcap > p1.pcap.txt 2>&1])
++AT_CHECK_UNQUOTED([tail -n 1 p1.pcap.txt], [0], [${good_expected}
++])
++
++dnl Repeat similar test for IPv6.
++flow_s_v6="
++  eth_src=8a:bf:7e:2f:05:84,eth_dst=0a:8f:39:4f:e0:73,dl_type=0x86dd,
++  ipv6_src=2f8a:2076:3926:9e7:2d47:4bc9:9c7:17f3,
++  ipv6_dst=7287:10dd:2fb9:41d5:3eb2:2c7a:11b0:6258,
++  ipv6_label=0x51ac,nw_proto=6,nw_ttl=142,nw_frag=no,
++  tp_src=20405,tp_dst=20662,tcp_flags=ack"
++
++good_frame_v6=$(ovs-ofctl compose-packet --bare "${flow_s_v6}")
++AT_CHECK([ovs-appctl netdev-dummy/receive p0 ${good_frame_v6}])
++
++dnl Checksum should change to 0x59FD with ipv6_src changed to fc00::100
++dnl by the datapath while processing the packet.
++flow_expected_v6=$(echo "${flow_s_v6}" | \
++  sed 's/2f8a:2076:3926:9e7:2d47:4bc9:9c7:17f3/fc00::100/g')
++good_expected_v6=$(ovs-ofctl compose-packet --bare "${flow_expected_v6}")
++AT_CHECK([ovs-pcap p1.pcap > p1.pcap.txt 2>&1])
++AT_CHECK_UNQUOTED([tail -n 1 p1.pcap.txt], [0], [${good_expected_v6}
++])
++
++OVS_VSWITCHD_STOP
++AT_CLEANUP
+diff --git a/tests/library.at b/tests/library.at
+index 7b4acebb8a..d962e1b3fd 100644
+--- a/tests/library.at
++++ b/tests/library.at
+@@ -230,7 +230,9 @@ AT_CHECK([ovstest test-util -voff -vfile:info '-vPATTERN:file:%c|%p|%m' --log-fi
+   [$exit_status], [], [stderr])
+ 
+ AT_CHECK([sed 's/\(opened log file\) .*/\1/
+-s/|[[^|]]*: /|/' test-util.log], [0], [dnl
++s/|[[^|]]*: /|/
++/backtrace/d
++/|.*|/!d' test-util.log], [0], [dnl
+ vlog|INFO|opened log file
+ util|EMER|assertion false failed in test_assert()
+ ])
+diff --git a/tests/nsh.at b/tests/nsh.at
+index 55296e5593..0040a50b36 100644
+--- a/tests/nsh.at
++++ b/tests/nsh.at
+@@ -521,51 +521,45 @@ AT_CHECK([
+         set interface vxlangpe32 type=vxlan options:exts=gpe options:remote_ip=30.0.0.2 options:packet_type=ptap ofport_request=3020
+ 
+     ovs-appctl netdev-dummy/ip4addr br-p1 10.0.0.1/24
+-    ovs-appctl ovs/route/add 10.0.0.0/24 br-p1
+     ovs-appctl tnl/arp/set br-p1 10.0.0.1 $HWADDR_BRP1
+     ovs-appctl tnl/arp/set br-p1 10.0.0.2 $HWADDR_BRP2
+     ovs-appctl tnl/arp/set br-p1 10.0.0.3 $HWADDR_BRP3
+ 
+     ovs-appctl netdev-dummy/ip4addr br-p2 20.0.0.2/24
+-    ovs-appctl ovs/route/add 20.0.0.0/24 br-p2
+     ovs-appctl tnl/arp/set br-p2 20.0.0.1 $HWADDR_BRP1
+     ovs-appctl tnl/arp/set br-p2 20.0.0.2 $HWADDR_BRP2
+     ovs-appctl tnl/arp/set br-p2 20.0.0.3 $HWADDR_BRP3
+ 
+     ovs-appctl netdev-dummy/ip4addr br-p3 30.0.0.3/24
+-    ovs-appctl ovs/route/add 30.0.0.0/24 br-p3
+     ovs-appctl tnl/arp/set br-p3 30.0.0.1 $HWADDR_BRP1
+     ovs-appctl tnl/arp/set br-p3 30.0.0.2 $HWADDR_BRP2
+     ovs-appctl tnl/arp/set br-p3 30.0.0.3 $HWADDR_BRP3
+ ], [0], [stdout])
+ 
+ AT_CHECK([
+-    ovs-appctl ovs/route/add 10.0.0.0/24 br-p1
+     ovs-appctl tnl/arp/set br-p1 10.0.0.1 $HWADDR_BRP1
+     ovs-appctl tnl/arp/set br-p1 10.0.0.2 $HWADDR_BRP2
+     ovs-appctl tnl/arp/set br-p1 10.0.0.3 $HWADDR_BRP3
+ ], [0], [stdout])
+ 
+ AT_CHECK([
+-    ovs-appctl ovs/route/add 20.0.0.0/24 br-p2
+     ovs-appctl tnl/arp/set br-p2 20.0.0.1 $HWADDR_BRP1
+     ovs-appctl tnl/arp/set br-p2 20.0.0.2 $HWADDR_BRP2
+     ovs-appctl tnl/arp/set br-p2 20.0.0.3 $HWADDR_BRP3
+ ], [0], [stdout])
+ 
+ AT_CHECK([
+-    ovs-appctl ovs/route/add 30.0.0.0/24 br-p3
+     ovs-appctl tnl/arp/set br-p3 30.0.0.1 $HWADDR_BRP1
+     ovs-appctl tnl/arp/set br-p3 30.0.0.2 $HWADDR_BRP2
+     ovs-appctl tnl/arp/set br-p3 30.0.0.3 $HWADDR_BRP3
+ ], [0], [stdout])
+ 
+ AT_CHECK([
+-    ovs-appctl ovs/route/show | grep User:
++    ovs-appctl ovs/route/show | grep Cached: | sort
+ ], [0], [dnl
+-User: 10.0.0.0/24 dev br-p1 SRC 10.0.0.1
+-User: 20.0.0.0/24 dev br-p2 SRC 20.0.0.2
+-User: 30.0.0.0/24 dev br-p3 SRC 30.0.0.3
++Cached: 10.0.0.0/24 dev br-p1 SRC 10.0.0.1 local
++Cached: 20.0.0.0/24 dev br-p2 SRC 20.0.0.2 local
++Cached: 30.0.0.0/24 dev br-p3 SRC 30.0.0.3 local
+ ])
+ 
+ AT_CHECK([
+diff --git a/tests/ofproto-dpif.at b/tests/ofproto-dpif.at
+index e305e7b9cd..033f548084 100644
+--- a/tests/ofproto-dpif.at
++++ b/tests/ofproto-dpif.at
+@@ -351,6 +351,49 @@ recirc_id(0),in_port(4),packet_type(ns=0,id=0),eth(src=50:54:00:00:00:0b,dst=ff:
+ OVS_VSWITCHD_STOP
+ AT_CLEANUP
+ 
++AT_SETUP([ofproto-dpif - active-backup bonding set primary])
++
++OVS_VSWITCHD_START(
++  [add-bond br0 bond0 p1 p2 bond_mode=active-backup \
++                other_config:bond-primary=p1 -- \
++   set bridge br0 other-config:hwaddr=aa:66:aa:66:aa:00 -- \
++   set interface p1 type=dummy options:pstream=punix:$OVS_RUNDIR/p1.sock ofport_request=1 -- \
++   set interface p2 type=dummy options:pstream=punix:$OVS_RUNDIR/p2.sock ofport_request=2 -- \
++   add-port br0 p7 -- set interface p7 ofport_request=7 type=dummy -- \
++   add-br br1 -- \
++   set bridge br1 other-config:hwaddr=aa:66:aa:66:00:00 -- \
++   set bridge br1 datapath-type=dummy -- \
++   add-bond br1 bond1 p3 p4 bond_mode=active-backup \
++                other_config:bond-primary=p3 -- \
++   set interface p3 type=dummy options:stream=unix:$OVS_RUNDIR/p1.sock ofport_request=3 -- \
++   set interface p4 type=dummy options:stream=unix:$OVS_RUNDIR/p2.sock ofport_request=4 -- \
++   add-port br1 p8 -- set interface p8 ofport_request=8 type=dummy])
++
++WAIT_FOR_DUMMY_PORTS([p3], [p4])
++
++AT_CHECK([ovs-ofctl add-flow br0 action=normal])
++AT_CHECK([ovs-ofctl add-flow br1 action=normal])
++
++dnl Create datapath flow with bidirectional traffic.
++AT_CHECK([ovs-appctl netdev-dummy/receive p8 'in_port(8),eth(src=50:54:00:00:00:0a,dst=50:54:00:00:00:09),eth_type(0x0800),ipv4(src=10.0.0.1,dst=10.0.0.2,proto=1,tos=0,ttl=64,frag=no),icmp(type=8,code=0)'])
++AT_CHECK([ovs-appctl netdev-dummy/receive p7 'in_port(7),eth(src=50:54:00:00:00:09,dst=50:54:00:00:00:0a),eth_type(0x0800),ipv4(src=10.0.0.2,dst=10.0.0.1,proto=1,tos=0,ttl=64,frag=no),icmp(type=8,code=0)'])
++AT_CHECK([ovs-appctl netdev-dummy/receive p8 'in_port(8),eth(src=50:54:00:00:00:0a,dst=50:54:00:00:00:09),eth_type(0x0800),ipv4(src=10.0.0.1,dst=10.0.0.2,proto=1,tos=0,ttl=64,frag=no),icmp(type=8,code=0)'])
++AT_CHECK([ovs-appctl netdev-dummy/receive p7 'in_port(7),eth(src=50:54:00:00:00:09,dst=50:54:00:00:00:0a),eth_type(0x0800),ipv4(src=10.0.0.2,dst=10.0.0.1,proto=1,tos=0,ttl=64,frag=no),icmp(type=8,code=0)'])
++
++dnl Set p2 and p4 as primary.
++AT_CHECK([ovs-vsctl set port bond0 other_config:bond-primary=p2 -- \
++                    set port bond1 other_config:bond-primary=p4])
++
++OVS_WAIT_UNTIL([ovs-appctl bond/show | grep -q 'active-backup primary: p4'])
++
++AT_CHECK([ovs-appctl revalidator/wait])
++
++AT_CHECK([ovs-appctl dpctl/dump-flows --names | grep -q "actions:p[[13]]"], [1])
++AT_CHECK([ovs-appctl dpctl/dump-flows --names | grep -q "actions:p[[24]]"], [0])
++
++OVS_VSWITCHD_STOP
++AT_CLEANUP
++
+ AT_SETUP([ofproto-dpif - balance-slb bonding])
+ # Create br0 with members bond0(p1, p2, p3) and p7,
+ #    and br1 with members p4, p5, p6 and p8.
+@@ -547,6 +590,23 @@ ovs-appctl time/warp 1000 100
+ ovs-appctl bond/show > bond3.txt
+ AT_CHECK([sed -n '/member p2/,/^$/p' bond3.txt | grep 'hash'], [0], [ignore])
+ 
++# Check that both ports doing down and back up doesn't break statistics.
++AT_CHECK([ovs-appctl netdev-dummy/set-admin-state p1 down], 0, [OK
++])
++AT_CHECK([ovs-appctl netdev-dummy/set-admin-state p2 down], 0, [OK
++])
++ovs-appctl time/warp 1000 100
++AT_CHECK([ovs-appctl netdev-dummy/set-admin-state p1 up], 0, [OK
++])
++AT_CHECK([ovs-appctl netdev-dummy/set-admin-state p2 up], 0, [OK
++])
++ovs-appctl time/warp 1000 100
++
++AT_CHECK([SEND_TCP_BOND_PKTS([p5], [5], [65500])])
++# We sent 49125 KB of data total in 3 batches.  No hash should have more
++# than that amount of load. Just checking that it is within 5 digits.
++AT_CHECK([ovs-appctl bond/show | grep -E '[[0-9]]{6}'], [1])
++
+ OVS_VSWITCHD_STOP()
+ AT_CLEANUP
+ 
+@@ -740,6 +800,73 @@ Datapath actions: drop
+ OVS_VSWITCHD_STOP()
+ AT_CLEANUP
+ 
++AT_SETUP([ofproto-dpif - active bond member survives restart])
++dnl Create bond0 with members p1, p2 and p3. Initially, set p2 as active.
++dnl Restart ovs-vswitchd. Check that p2 is still active.
++OVS_VSWITCHD_START(
++  [add-bond br0 bond0 p1 p2 p3 bond_mode=active-backup -- \
++   set interface p1 type=dummy ofport_request=1 -- \
++   set interface p2 type=dummy ofport_request=2 -- \
++   set interface p3 type=dummy ofport_request=3 --])
++AT_CHECK([ovs-appctl bond/set-active-member bond0 p2], [0], [ignore])
++OVS_WAIT_UNTIL_EQUAL([ovs-appctl bond/show | STRIP_RECIRC_ID | STRIP_ACTIVE_MEMBER_MAC], [dnl
++---- bond0 ----
++bond_mode: active-backup
++bond may use recirculation: no, <del>
++bond-hash-basis: 0
++lb_output action: disabled, bond-id: -1
++updelay: 0 ms
++downdelay: 0 ms
++lacp_status: off
++lacp_fallback_ab: false
++active-backup primary: <none>
++<active member mac del>
++
++member p1: enabled
++  may_enable: true
++
++member p2: enabled
++  active member
++  may_enable: true
++
++member p3: enabled
++  may_enable: true
++])
++
++dnl Restart ovs-vswitchd with an empty ovs-vswitchd log file.
++OVS_APP_EXIT_AND_WAIT([ovs-vswitchd])
++mv ovs-vswitchd.log ovs-vswitchd_1.log
++AT_CHECK([ovs-vswitchd --enable-dummy --disable-system --disable-system-route --detach \
++         --no-chdir --pidfile --log-file -vfile:rconn:dbg -vvconn -vofproto_dpif -vunixctl],
++         [0], [], [stderr])
++
++OVS_WAIT_UNTIL_EQUAL([ovs-appctl bond/show | STRIP_RECIRC_ID | STRIP_ACTIVE_MEMBER_MAC], [dnl
++---- bond0 ----
++bond_mode: active-backup
++bond may use recirculation: no, <del>
++bond-hash-basis: 0
++lb_output action: disabled, bond-id: -1
++updelay: 0 ms
++downdelay: 0 ms
++lacp_status: off
++lacp_fallback_ab: false
++active-backup primary: <none>
++<active member mac del>
++
++member p1: enabled
++  may_enable: true
++
++member p2: enabled
++  active member
++  may_enable: true
++
++member p3: enabled
++  may_enable: true
++])
++
++OVS_VSWITCHD_STOP
++AT_CLEANUP
++
+ AT_SETUP([ofproto-dpif - bond - allow duplicated frames])
+ dnl Receiving of duplicated multicast frames should be allowed with 'all_members_active'.
+ OVS_VSWITCHD_START([dnl
+@@ -930,6 +1057,28 @@ AT_CHECK([tail -1 stdout], [0],
+ OVS_VSWITCHD_STOP
+ AT_CLEANUP
+ 
++AT_SETUP([ofproto-dpif - group with ct and dnat recirculation in action list])
++OVS_VSWITCHD_START
++add_of_ports br0 1 10
++AT_CHECK([ovs-ofctl -O OpenFlow12 add-group br0 \
++    'group_id=1234,type=all,bucket=ct(nat(dst=10.10.10.7:80),commit,table=2)'])
++AT_DATA([flows.txt], [dnl
++table=0 ip,ct_state=-trk actions=group:1234
++table=2 ip,ct_state=+trk actions=output:10
++])
++AT_CHECK([ovs-ofctl -O OpenFlow12 add-flows br0 flows.txt])
++AT_CHECK([ovs-appctl ofproto/trace br0 '
++  in_port=1,dl_src=50:54:00:00:00:05,dl_dst=50:54:00:00:00:07,dl_type=0x0800,
++  nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_proto=1,nw_tos=0,nw_ttl=128,nw_frag=no,
++  icmp_type=8,icmp_code=0
++'], [0], [stdout])
++AT_CHECK([grep 'Datapath actions' stdout], [0], [dnl
++Datapath actions: ct(commit,nat(dst=10.10.10.7:80)),recirc(0x1)
++Datapath actions: 10
++])
++OVS_VSWITCHD_STOP
++AT_CLEANUP
++
+ AT_SETUP([ofproto-dpif - group actions have no effect afterwards])
+ OVS_VSWITCHD_START
+ add_of_ports br0 1 10
+@@ -1132,6 +1281,60 @@ bucket3 >= 500
+ OVS_VSWITCHD_STOP
+ AT_CLEANUP
+ 
++AT_SETUP([ofproto-dpif - select group with dp_hash and equal weights])
++
++OVS_VSWITCHD_START
++add_of_ports br0 1 10
++
++AT_CHECK([ovs-appctl vlog/set ofproto_dpif:file:dbg vconn:file:info])
++
++AT_DATA([stddev.awk], [
++  {
++    # $1 (target) is a mean value, because all weights are the same.
++    # $2 (hits) is an actual number of hashes assigned to this bucket.
++    n_hashes += $2
++    n_buckets++
++    sum_sq_diff += ($2 - $1) * ($2 - $1)
++  }
++  END {
++    mean = n_hashes / n_buckets
++    stddev = sqrt(sum_sq_diff / n_buckets)
++    stddevp = stddev * 100 / mean
++
++    print "hashes:", n_hashes, "buckets:", n_buckets
++    print "mean:", mean, "stddev:", stddev, "(", stddevp, "% )"
++
++    # Make sure that standard deviation of load between buckets is below 12.5%.
++    # Note: it's not a strict requirement, but a good number that passes tests.
++    if (stddevp <= 12.5) { print "PASS" }
++    else { print "FAIL" }
++  }
++])
++
++m4_define([CHECK_DISTRIBUTION], [
++  AT_CHECK([tail -n $1 ovs-vswitchd.log | grep 'ofproto_dpif|DBG|.*Bucket' \
++                | sed 's/.*target=\([[0-9\.]]*\) hits=\([[0-9]]*\)/\1 \2/' \
++                | awk -f stddev.awk], [0], [stdout])
++  AT_CHECK([grep -q "buckets: $2" stdout])
++  AT_CHECK([grep -q 'PASS' stdout])
++])
++
++m4_define([OF_GROUP], [group_id=$1,type=select,selection_method=dp_hash])
++m4_define([OFG_BUCKET], [bucket=weight=$1,output:10])
++
++dnl Test load distribution in groups with up to 64 equally weighted buckets.
++m4_define([OFG_BUCKETS], [OFG_BUCKET(100)])
++m4_for([id], [1], [64], [1], [
++  get_log_next_line_num
++  AT_CHECK([ovs-ofctl -O OpenFlow15 add-group br0 \
++                "OF_GROUP(id),OFG_BUCKETS()"])
++  CHECK_DISTRIBUTION([+$LINENUM], [id])
++  m4_append([OFG_BUCKETS], [,OFG_BUCKET(100)])
++])
++
++OVS_VSWITCHD_STOP
++AT_CLEANUP
++
+ AT_SETUP([ofproto-dpif - select group with explicit dp_hash selection method])
+ 
+ OVS_VSWITCHD_START
+@@ -5242,6 +5445,33 @@ AT_CHECK_UNQUOTED([tail -1 stdout], [0],
+ OVS_VSWITCHD_STOP
+ AT_CLEANUP
+ 
++AT_SETUP([ofproto-dpif - mirroring, metadata modification])
++AT_KEYWORDS([mirror mirrors mirroring])
++OVS_VSWITCHD_START
++add_of_ports br0 1 2 3
++AT_CHECK([ovs-vsctl set Bridge br0 mirrors=@m -- \
++            --id=@p3 get Port p3 -- \
++            --id=@m create Mirror name=mymirror select_all=true output_port=@p3],
++         [0], [ignore])
++
++AT_DATA([flows.txt], [dnl
++in_port=1 actions=load:0x00->NXM_OF_IN_PORT[[]],output:2
++])
++AT_CHECK([ovs-ofctl add-flows br0 flows.txt])
++
++dnl Metadata modified, duplicate packet shouldn't be delivered to mirror.
++m4_define([ICMP_FLOW], [m4_join([,],
++  [in_port(1),eth(src=50:54:00:00:00:05,dst=50:54:00:00:00:07),eth_type(0x0800)],
++  [ipv4(src=192.168.0.1,dst=192.168.0.2,proto=1,tos=0,ttl=128,frag=no)],
++  [icmp(type=8,code=0)])])
++AT_CHECK([ovs-appctl ofproto/trace ovs-dummy "ICMP_FLOW"], [0], [stdout])
++AT_CHECK_UNQUOTED([tail -1 stdout], [0],
++  [Datapath actions: 3,2
++])
++
++OVS_VSWITCHD_STOP
++AT_CLEANUP
++
+ AT_SETUP([ofproto-dpif - mirroring, OFPP_NONE ingress port])
+ AT_KEYWORDS([mirror mirrors mirroring])
+ OVS_VSWITCHD_START
+@@ -6178,6 +6408,57 @@ AT_CHECK([test 1 = `$PYTHON3 "$top_srcdir/utilities/ovs-pcap.in" p2-tx.pcap | wc
+ OVS_VSWITCHD_STOP
+ AT_CLEANUP
+ 
++AT_SETUP([ofproto-dpif - continuation with meters])
++AT_KEYWORDS([continuations pause meters])
++OVS_VSWITCHD_START
++add_of_ports br0 1 2
++
++dnl Add meter with id=1.
++AT_CHECK([ovs-ofctl -O OpenFlow13 add-meter br0 'meter=1 pktps bands=type=drop rate=1'])
++
++AT_DATA([flows.txt], [dnl
++table=0 dl_dst=50:54:00:00:00:0a actions=goto_table(1)
++table=1 dl_dst=50:54:00:00:00:0a actions=controller(pause,meter_id=1)
++])
++AT_CHECK([ovs-ofctl -O OpenFlow13 add-flows br0 flows.txt])
++
++on_exit 'kill $(cat ovs-ofctl.pid)'
++AT_CAPTURE_FILE([ofctl_monitor.log])
++AT_CHECK([ovs-ofctl monitor br0 65534 invalid_ttl -P nxt_packet_in \
++                    --detach --no-chdir --pidfile 2> ofctl_monitor.log])
++
++AT_CHECK([ovs-appctl netdev-dummy/receive p1 \
++          'in_port(1),eth(src=50:54:00:00:00:09,dst=50:54:00:00:00:0a),eth_type(0x1234)'])
++
++OVS_WAIT_UNTIL([test $(wc -l < ofctl_monitor.log) -ge 2])
++OVS_APP_EXIT_AND_WAIT([ovs-ofctl])
++AT_CHECK([cat ofctl_monitor.log], [0], [dnl
++NXT_PACKET_IN (xid=0x0): cookie=0x0 total_len=14 in_port=1 (via action) data_len=14 (unbuffered)
++vlan_tci=0x0000,dl_src=50:54:00:00:00:09,dl_dst=50:54:00:00:00:0a,dl_type=0x1234
++])
++
++AT_CHECK([ovs-appctl revalidator/purge], [0])
++AT_CHECK([ovs-ofctl -O OpenFlow13 dump-flows br0 | ofctl_strip | sort], [0], [dnl
++ n_packets=1, n_bytes=14, dl_dst=50:54:00:00:00:0a actions=goto_table:1
++ table=1, n_packets=1, n_bytes=14, dl_dst=50:54:00:00:00:0a actions=controller(pause,meter_id=1)
++OFPST_FLOW reply (OF1.3):
++])
++
++AT_CHECK([ovs-ofctl -O OpenFlow13 dump-meters br0 | ofctl_strip | sort], [0], [dnl
++OFPST_METER_CONFIG reply (OF1.3):
++meter=1 pktps bands=
++type=drop rate=1
++])
++
++AT_CHECK([ovs-ofctl -O OpenFlow13 meter-stats br0 | strip_timers], [0], [dnl
++OFPST_METER reply (OF1.3) (xid=0x2):
++meter:1 flow_count:0 packet_in_count:1 byte_in_count:14 duration:0.0s bands:
++0: packet_count:0 byte_count:0
++])
++
++OVS_VSWITCHD_STOP
++AT_CLEANUP
++
+ AT_SETUP([ofproto-dpif - continuation with patch port])
+ AT_KEYWORDS([continuations pause resume])
+ OVS_VSWITCHD_START(
+@@ -7653,12 +7934,14 @@ dummy@ovs-dummy: hit:0 missed:0
+     vm1 5/3: (dummy: ifindex=2011)
+ ])
+ 
+-dnl set up route to 1.1.2.92 via br0 and action=normal
++dnl Add 1.1.2.92 to br0 and action=normal
+ AT_CHECK([ovs-appctl netdev-dummy/ip4addr br0 1.1.2.88/24], [0], [OK
+ ])
+-AT_CHECK([ovs-appctl ovs/route/add 1.1.2.92/24 br0], [0], [OK
+-])
+ AT_CHECK([ovs-ofctl add-flow br0 action=normal])
++dnl Checking that a local route for added IP was successfully installed.
++AT_CHECK([ovs-appctl ovs/route/show | grep Cached], [0], [dnl
++Cached: 1.1.2.0/24 dev br0 SRC 1.1.2.88 local
++])
+ 
+ dnl Prime ARP Cache for 1.1.2.92
+ AT_CHECK([ovs-appctl netdev-dummy/receive p0 'recirc_id(0),in_port(1),eth(src=f8:bc:12:44:34:b6,dst=ff:ff:ff:ff:ff:ff),eth_type(0x0806),arp(sip=1.1.2.92,tip=1.1.2.88,op=2,sha=f8:bc:12:44:34:b6,tha=00:00:00:00:00:00)'])
+@@ -7669,10 +7952,13 @@ ovs-vsctl \
+    --id=@sf create sflow targets=\"127.0.0.1:$SFLOW_PORT\" agent=127.0.0.1 \
+      header=128 sampling=1 polling=0
+ 
+-dnl set up route to 192.168.1.2 via br0
++dnl Add 192.168.1.2 to br0,
+ AT_CHECK([ovs-appctl netdev-dummy/ip4addr br0 192.168.1.1/16], [0], [OK
+ ])
+-AT_CHECK([ovs-appctl ovs/route/add 192.168.0.0/16 br0], [0], [OK
++dnl Checking that a local route for added IP was successfully installed.
++AT_CHECK([ovs-appctl ovs/route/show | grep Cached | sort], [0], [dnl
++Cached: 1.1.2.0/24 dev br0 SRC 1.1.2.88 local
++Cached: 192.168.0.0/16 dev br0 SRC 192.168.1.1 local
+ ])
+ 
+ dnl add rule for int-br to force packet onto tunnel. There is no ifindex
+@@ -12041,3 +12327,48 @@ AT_CHECK([test 1 = `ovs-ofctl parse-pcap p2-tx.pcap | wc -l`])
+ 
+ OVS_VSWITCHD_STOP
+ AT_CLEANUP
++
++AT_SETUP([ofproto-dpif - Cleanup missing datapath flows])
++
++OVS_VSWITCHD_START
++add_of_ports br0 1 2
++
++m4_define([ICMP_PKT], [m4_join([,],
++    [eth(src=50:54:00:00:00:05,dst=50:54:00:00:00:07),eth_type(0x0800)],
++    [ipv4(src=10.10.10.2,dst=10.10.10.1,proto=1,tos=1,ttl=128,frag=no)],
++    [icmp(type=8,code=0)])])
++
++AT_CHECK([ovs-ofctl del-flows br0])
++AT_CHECK([ovs-ofctl add-flow br0 'actions=normal' ])
++
++AT_CHECK([ovs-appctl netdev-dummy/receive p1 'ICMP_PKT'])
++
++AT_CHECK([ovs-appctl dpctl/dump-flows --names | strip_used | strip_stats | dnl
++          strip_duration | strip_dp_hash | sort], [0], [dnl
++flow-dump from the main thread:
++recirc_id(0),in_port(p1),packet_type(ns=0,id=0),eth(src=50:54:00:00:00:05,dst=50:54:00:00:00:07),eth_type(0x0800),ipv4(frag=no), packets:0, bytes:0, used:never, actions:br0,p2
++])
++
++dnl Make sure the ukey exists.
++AT_CHECK([ovs-appctl upcall/show | grep '(keys' | awk '{print $3}' | \
++            grep -q '1)'], [0])
++
++dnl Delete all datapath flows, and make sure they are gone.
++AT_CHECK([ovs-appctl dpctl/del-flows])
++AT_CHECK([ovs-appctl dpctl/dump-flows --names ], [0], [])
++
++dnl Move forward in time and make sure we have at least 4 * 500ms.
++AT_CHECK([ovs-appctl time/warp 3000 300], [0], [ignore])
++
++dnl Make sure no more ukeys exists.
++AT_CHECK([ovs-appctl upcall/show | grep '(keys' | awk '{print $3}' | \
++            grep -qv '0)'], [1])
++
++dnl Verify coverage counter was hit.
++AT_CHECK([ovs-appctl coverage/read-counter revalidate_missing_dp_flow], [0],
++         [dnl
++1
++])
++
++OVS_VSWITCHD_STOP(["/failed to flow_del (No such file or directory)/d"])
++AT_CLEANUP
+diff --git a/tests/ofproto-macros.at b/tests/ofproto-macros.at
+index c22fb3c79c..3795ca7149 100644
+--- a/tests/ofproto-macros.at
++++ b/tests/ofproto-macros.at
+@@ -169,6 +169,11 @@ strip_recirc() {
+         s/recirc_id=[[x0-9]]*/recirc_id=<recirc>/
+         s/recirc([[x0-9]]*)/recirc(<recirc>)/'
+ }
++
++# Strips dp_hash from output.
++strip_dp_hash() {
++    sed 's/dp_hash([[0-9a-fx/]]*),//'
++}
+ m4_divert_pop([PREPARE_TESTS])
+ 
+ m4_define([TESTABLE_LOG], [-vPATTERN:ANY:'%c|%p|%m'])
+diff --git a/tests/ovs-ofctl.at b/tests/ovs-ofctl.at
+index d03d365003..a9337f6192 100644
+--- a/tests/ovs-ofctl.at
++++ b/tests/ovs-ofctl.at
+@@ -3086,6 +3086,51 @@ AT_CHECK([ovs-ofctl -O OpenFlow14 dump-flows br0 | ofctl_strip | sed '/OFPST_FLO
+ OVS_VSWITCHD_STOP
+ AT_CLEANUP
+ 
++AT_SETUP([ovs-ofctl replace-flows with fragments])
++OVS_VSWITCHD_START
++
++AT_DATA([frag_flows.txt], [dnl
++ ip,nw_frag=first actions=drop
++ ip,nw_frag=later actions=drop
++ ip,nw_frag=no actions=NORMAL
++ ip,nw_frag=not_later actions=NORMAL
++ ip,nw_frag=yes actions=LOCAL
++])
++AT_DATA([replace_flows.txt], [dnl
++ ip,nw_frag=first actions=NORMAL
++ ip,nw_frag=later actions=LOCAL
++ ip,nw_frag=no actions=drop
++ ip,nw_frag=not_later actions=drop
++ ip,nw_frag=yes actions=drop
++])
++
++AT_CHECK([ovs-ofctl -O OpenFlow13 add-flows br0 frag_flows.txt])
++on_exit 'ovs-ofctl -O OpenFlow13 dump-flows br0'
++
++dnl Check that flow replacement works.
++AT_CHECK([ovs-ofctl -vvconn:console:dbg -O OpenFlow13 \
++            replace-flows br0 replace_flows.txt 2>&1 | grep FLOW_MOD \
++            | sed 's/.*\(OFPT_FLOW_MOD.*\)/\1/' | strip_xids | sort], [0], [dnl
++OFPT_FLOW_MOD (OF1.3): ADD ip,nw_frag=first actions=NORMAL
++OFPT_FLOW_MOD (OF1.3): ADD ip,nw_frag=later actions=LOCAL
++OFPT_FLOW_MOD (OF1.3): ADD ip,nw_frag=no actions=drop
++OFPT_FLOW_MOD (OF1.3): ADD ip,nw_frag=not_later actions=drop
++OFPT_FLOW_MOD (OF1.3): ADD ip,nw_frag=yes actions=drop
++])
++
++dnl Check that replacement to the same set doesn't cause flow modifications.
++AT_CHECK([ovs-ofctl -vvconn:console:dbg -O OpenFlow13 \
++            replace-flows br0 replace_flows.txt 2>&1 | grep FLOW_MOD \
++            | sed 's/.*\(OFPT_FLOW_MOD.*\)/\1/' | strip_xids | sort], [0], [])
++
++dnl Compare the flow dump against the expected set.
++cat replace_flows.txt > expout
++AT_CHECK([ovs-ofctl -O OpenFlow13 dump-flows br0 \
++            | ofctl_strip | sed '/OFPST_FLOW/d' | sort], [0], [expout])
++
++OVS_VSWITCHD_STOP
++AT_CLEANUP
++
+ AT_SETUP([ovs-ofctl replace-flows with --bundle])
+ OVS_VSWITCHD_START
+ 
+diff --git a/tests/ovsdb-cluster.at b/tests/ovsdb-cluster.at
+index 481afc08b3..9d8b4d06a4 100644
+--- a/tests/ovsdb-cluster.at
++++ b/tests/ovsdb-cluster.at
+@@ -473,6 +473,112 @@ done
+ 
+ AT_CLEANUP
+ 
++AT_SETUP([OVSDB cluster - leadership change after replication while joining])
++AT_KEYWORDS([ovsdb server negative unix cluster join])
++
++n=5
++AT_CHECK([ovsdb-tool '-vPATTERN:console:%c|%p|%m' create-cluster s1.db dnl
++              $abs_srcdir/idltest.ovsschema unix:s1.raft], [0], [], [stderr])
++cid=$(ovsdb-tool db-cid s1.db)
++schema_name=$(ovsdb-tool schema-name $abs_srcdir/idltest.ovsschema)
++for i in $(seq 2 $n); do
++    AT_CHECK([ovsdb-tool join-cluster s$i.db $schema_name unix:s$i.raft unix:s1.raft])
++done
++
++on_exit 'kill $(cat *.pid)'
++on_exit "
++  for i in \$(ls $(pwd)/s[[0-$n]]); do
++    ovs-appctl --timeout 1 -t \$i cluster/status $schema_name;
++  done
++"
++
++dnl Starting servers one by one asking all exisitng servers to transfer
++dnl leadership after append reply forcing the joining server to try another
++dnl one that will also transfer leadership.  Since transfer is happening
++dnl after the servers update is replicated to other servers, one of the
++dnl other servers will actually commit it.  It may be a new leader from
++dnl one of the old members or the new joining server itself.
++for i in $(seq $n); do
++    dnl Make sure that all already started servers joined the cluster.
++    for j in $(seq $((i - 1)) ); do
++        AT_CHECK([ovsdb_client_wait unix:s$j.ovsdb $schema_name connected])
++    done
++    for j in $(seq $((i - 1)) ); do
++        OVS_WAIT_UNTIL([ovs-appctl -t "$(pwd)"/s$j \
++                          cluster/failure-test \
++                            transfer-leadership-after-sending-append-request \
++                        | grep -q "engaged"])
++    done
++
++    AT_CHECK([ovsdb-server -v -vconsole:off -vsyslog:off \
++                           --detach --no-chdir --log-file=s$i.log \
++                           --pidfile=s$i.pid --unixctl=s$i \
++                           --remote=punix:s$i.ovsdb s$i.db])
++done
++
++dnl Make sure that all servers joined the cluster.
++for i in $(seq $n); do
++    AT_CHECK([ovsdb_client_wait unix:s$i.ovsdb $schema_name connected])
++done
++
++for i in $(seq $n); do
++    OVS_APP_EXIT_AND_WAIT_BY_TARGET([$(pwd)/s$i], [s$i.pid])
++done
++
++AT_CLEANUP
++
++AT_SETUP([OVSDB cluster - leadership change before replication while joining])
++AT_KEYWORDS([ovsdb server negative unix cluster join])
++
++n=5
++AT_CHECK([ovsdb-tool '-vPATTERN:console:%c|%p|%m' create-cluster s1.db dnl
++              $abs_srcdir/idltest.ovsschema unix:s1.raft], [0], [], [stderr])
++cid=$(ovsdb-tool db-cid s1.db)
++schema_name=$(ovsdb-tool schema-name $abs_srcdir/idltest.ovsschema)
++for i in $(seq 2 $n); do
++    AT_CHECK([ovsdb-tool join-cluster s$i.db $schema_name unix:s$i.raft unix:s1.raft])
++done
++
++on_exit 'kill $(cat *.pid)'
++on_exit "
++  for i in \$(ls $(pwd)/s[[0-$n]]); do
++    ovs-appctl --timeout 1 -t \$i cluster/status $schema_name;
++  done
++"
++
++dnl Starting servers one by one asking all exisitng servers to transfer
++dnl leadership right after starting to add a server.  Joining server will
++dnl need to find a new leader that will also transfer leadership.
++dnl This will continue until the same server will not become a leader
++dnl for the second time and will be able to add a new server.
++for i in $(seq $n); do
++    dnl Make sure that all already started servers joined the cluster.
++    for j in $(seq $((i - 1)) ); do
++        AT_CHECK([ovsdb_client_wait unix:s$j.ovsdb $schema_name connected])
++    done
++    for j in $(seq $((i - 1)) ); do
++        OVS_WAIT_UNTIL([ovs-appctl -t "$(pwd)"/s$j \
++                          cluster/failure-test \
++                            transfer-leadership-after-starting-to-add \
++                        | grep -q "engaged"])
++    done
++
++    AT_CHECK([ovsdb-server -v -vconsole:off -vsyslog:off \
++                           --detach --no-chdir --log-file=s$i.log \
++                           --pidfile=s$i.pid --unixctl=s$i \
++                           --remote=punix:s$i.ovsdb s$i.db])
++done
++
++dnl Make sure that all servers joined the cluster.
++for i in $(seq $n); do
++    AT_CHECK([ovsdb_client_wait unix:s$i.ovsdb $schema_name connected])
++done
++
++for i in $(seq $n); do
++    OVS_APP_EXIT_AND_WAIT_BY_TARGET([$(pwd)/s$i], [s$i.pid])
++done
++
++AT_CLEANUP
+ 
+ 
+ OVS_START_SHELL_HELPERS
+diff --git a/tests/ovsdb-idl.at b/tests/ovsdb-idl.at
+index fb568dd823..0f6ebd4d34 100644
+--- a/tests/ovsdb-idl.at
++++ b/tests/ovsdb-idl.at
+@@ -167,8 +167,17 @@ m4_define([OVSDB_CHECK_IDL_REGISTER_COLUMNS_PY],
+    OVSDB_START_IDLTEST
+    m4_if([$2], [], [],
+      [AT_CHECK([ovsdb-client transact unix:socket $2], [0], [ignore], [ignore])])
+-   AT_CHECK([$PYTHON3 $srcdir/test-ovsdb.py  -t10 idl $srcdir/idltest.ovsschema unix:socket ?simple:b,ba,i,ia,r,ra,s,sa,u,ua?simple3:name,uset,uref?simple4:name?simple6:name,weak_ref?link1:i,k,ka,l2?link2:i,l1?singleton:name $3],
+-            [0], [stdout], [ignore])
++   m4_define([REGISTER], m4_joinall([?], [],
++     [simple:b,ba,i,ia,r,ra,s,sa,u,ua],
++     [simple3:name,uset,uref],
++     [simple4:name],
++     [simple6:name,weak_ref],
++     [link1:i,k,ka,l2],
++     [link2:i,l1],
++     [indexed:i],
++     [singleton:name]))
++   AT_CHECK([$PYTHON3 $srcdir/test-ovsdb.py -t10 idl $srcdir/idltest.ovsschema \
++                unix:socket REGISTER $3], [0], [stdout], [ignore])
+    AT_CHECK([sort stdout | uuidfilt]m4_if([$6],,, [[| $6]]),
+             [0], [$4])
+    OVSDB_SERVER_SHUTDOWN
+@@ -747,6 +756,31 @@ OVSDB_CHECK_IDL([simple idl, conditional, multiple tables],
+ 009: done
+ ]])
+ 
++OVSDB_CHECK_IDL([indexed idl, modification and removal],
++  [],
++  [['["idltest",
++      {"op": "insert",
++       "table": "indexed",
++       "row": {"i": 123 }}]' \
++    '["idltest",
++      {"op": "update",
++       "table": "indexed",
++       "where": [["i", "==", 123]],
++       "row": {"i": 456}}]' \
++    '["idltest",
++      {"op": "delete",
++       "table": "indexed",
++       "where": [["i", "==", 456]]}]']],
++  [[000: empty
++001: {"error":null,"result":[{"uuid":["uuid","<0>"]}]}
++002: table indexed: i=123 uuid=<0>
++003: {"error":null,"result":[{"count":1}]}
++004: table indexed: i=456 uuid=<0>
++005: {"error":null,"result":[{"count":1}]}
++006: empty
++007: done
++]])
++
+ OVSDB_CHECK_IDL([self-linking idl, consistent ops],
+   [],
+   [['["idltest",
+@@ -1119,6 +1153,19 @@ OVSDB_CHECK_IDL_FETCH_COLUMNS([simple idl, initially populated],
+ 003: done
+ ]])
+ 
++m4_define([OVSDB_CHECK_IDL_WO_MONITOR_COND_C],
++  [AT_SETUP([$1 - C])
++   AT_KEYWORDS([ovsdb server idl monitor $4])
++   OVSDB_START_IDLTEST
++   AT_CHECK([ovs-appctl -t ovsdb-server ovsdb-server/disable-monitor-cond])
++
++   AT_CHECK([test-ovsdb '-vPATTERN:console:test-ovsdb|%c|%m' -vjsonrpc -t10 idl unix:socket $2],
++            [0], [stdout], [ignore])
++   AT_CHECK([sort stdout | uuidfilt]m4_if([$5],,, [[| $5]]),
++            [0], [$3])
++   OVSDB_SERVER_SHUTDOWN
++   AT_CLEANUP])
++
+ m4_define([OVSDB_CHECK_IDL_WO_MONITOR_COND_PY],
+   [AT_SETUP([$1 - Python3])
+    AT_KEYWORDS([ovsdb server idl Python monitor $4])
+@@ -1132,7 +1179,8 @@ m4_define([OVSDB_CHECK_IDL_WO_MONITOR_COND_PY],
+    AT_CLEANUP])
+ 
+ m4_define([OVSDB_CHECK_IDL_WO_MONITOR_COND],
+-   [OVSDB_CHECK_IDL_WO_MONITOR_COND_PY($@)])
++   [OVSDB_CHECK_IDL_WO_MONITOR_COND_C($@)
++    OVSDB_CHECK_IDL_WO_MONITOR_COND_PY($@)])
+ 
+ 
+ OVSDB_CHECK_IDL_WO_MONITOR_COND([simple idl disable monitor-cond],
+@@ -1274,6 +1322,33 @@ OVSDB_CHECK_IDL_TRACK([track, simple idl, initially populated],
+ 003: done
+ ]])
+ 
++OVSDB_CHECK_IDL_TRACK([track, indexed idl, modification and removal],
++  [],
++  [['["idltest",
++      {"op": "insert",
++       "table": "indexed",
++       "row": {"i": 123 }}]' \
++    '["idltest",
++      {"op": "update",
++       "table": "indexed",
++       "where": [["i", "==", 123]],
++       "row": {"i": 456}}]' \
++    '["idltest",
++      {"op": "delete",
++       "table": "indexed",
++       "where": [["i", "==", 456]]}]']],
++  [[000: empty
++001: {"error":null,"result":[{"uuid":["uuid","<0>"]}]}
++002: table indexed: inserted row: i=123 uuid=<0>
++002: table indexed: updated columns: i
++003: {"error":null,"result":[{"count":1}]}
++004: table indexed: i=456 uuid=<0>
++004: table indexed: updated columns: i
++005: {"error":null,"result":[{"count":1}]}
++006: empty
++007: done
++]])
++
+ dnl This test creates database with weak references and checks that orphan
+ dnl rows created for weak references are not available for iteration via
+ dnl list of tracked changes.
+@@ -1806,7 +1881,10 @@ OVSDB_CHECK_IDL_PARTIAL_UPDATE_MAP_COLUMN([map, simple2 idl-partial-update-map-c
+ 007: name=String2 smap=[[key2 : value2]] imap=[[3 : myids2]]
+ 008: After trying to delete a deleted element
+ 009: name=String2 smap=[[key2 : value2]] imap=[[3 : myids2]]
+-010: End test
++010: After Create element, update smap and Delete element
++011: name=String2 smap=[[key2 : value2]] imap=[[3 : myids2]]
++012: After update smap and Delete element
++014: End test
+ ]])
+ 
+ OVSDB_CHECK_IDL_PY([partial-map idl],
+@@ -1869,7 +1947,9 @@ OVSDB_CHECK_IDL_PARTIAL_UPDATE_SET_COLUMN([set, simple3 idl-partial-update-set-c
+ 009: table simple3: name=String2 uset=[<0>,<1>,<4>] uref=[] uuid=<2>
+ 010: After add to other table + set of strong ref
+ 011: table simple3: name=String2 uset=[<0>,<1>,<4>] uref=[<5>] uuid=<2>
+-012: End test
++012: After Create element, update set and Delete element
++013: table simple3: name=String2 uset=[<0>,<1>,<4>] uref=[<5>] uuid=<2>
++014: End test
+ ]])
+ 
+ OVSDB_CHECK_IDL_PY([partial-set idl],
+@@ -2022,6 +2102,36 @@ OVSDB_CHECK_IDL_NOTIFY([simple idl verify notify],
+ 015: done
+ ]])
+ 
++OVSDB_CHECK_IDL_NOTIFY([indexed idl, modification and removal notify],
++  [['track-notify' \
++    '["idltest",
++      {"op": "insert",
++       "table": "indexed",
++       "row": {"i": 123 }}]' \
++    '["idltest",
++      {"op": "update",
++       "table": "indexed",
++       "where": [["i", "==", 123]],
++       "row": {"i": 456}}]' \
++    '["idltest",
++      {"op": "delete",
++       "table": "indexed",
++       "where": [["i", "==", 456]]}]']],
++  [[000: empty
++000: event:create, row={}, uuid=<0>, updates=None
++000: event:create, row={}, uuid=<1>, updates=None
++001: {"error":null,"result":[{"uuid":["uuid","<2>"]}]}
++002: event:create, row={i=123}, uuid=<2>, updates=None
++002: table indexed: i=123 uuid=<2>
++003: {"error":null,"result":[{"count":1}]}
++004: event:update, row={i=456}, uuid=<2>, updates={i=123}
++004: table indexed: i=456 uuid=<2>
++005: {"error":null,"result":[{"count":1}]}
++006: empty
++006: event:delete, row={i=456}, uuid=<2>, updates=None
++007: done
++]])
++
+ # Tests to verify the functionality of the one column compound index.
+ # It tests index for one column string and integer indexes.
+ # The run of test-ovsdb generates the output of the display of data using the different indexes defined in
+diff --git a/tests/ovsdb-server.at b/tests/ovsdb-server.at
+index b8ccc4c8e2..ce6d32aee1 100644
+--- a/tests/ovsdb-server.at
++++ b/tests/ovsdb-server.at
+@@ -936,8 +936,10 @@ AT_CHECK_UNQUOTED(
+   [ignore])
+ # The error message for being unable to negotiate a shared ciphersuite
+ # is 'sslv3 alert handshake failure'. This is not the clearest message.
++# In openssl 3.2.0 all the error messages were updated to replace 'sslv3'
++# with 'ssl/tls'.
+ AT_CHECK_UNQUOTED(
+-  [grep "sslv3 alert handshake failure" output], [0],
++  [grep -E "(sslv3|ssl/tls) alert handshake failure" output], [0],
+   [stdout],
+   [ignore])
+ OVSDB_SERVER_SHUTDOWN(["
+diff --git a/tests/packet-type-aware.at b/tests/packet-type-aware.at
+index 14cebf6efa..d634930fd5 100644
+--- a/tests/packet-type-aware.at
++++ b/tests/packet-type-aware.at
+@@ -142,30 +142,27 @@ AT_CHECK([
+ ### Setup GRE tunnels
+ AT_CHECK([
+     ovs-appctl netdev-dummy/ip4addr br-p1 10.0.0.1/24 &&
+-    ovs-appctl ovs/route/add 10.0.0.0/24 br-p1 &&
+     ovs-appctl tnl/arp/set br-p1 10.0.0.1 $HWADDR_BRP1 &&
+     ovs-appctl tnl/arp/set br-p1 10.0.0.2 $HWADDR_BRP2 &&
+     ovs-appctl tnl/arp/set br-p1 10.0.0.3 $HWADDR_BRP3 &&
+ 
+     ovs-appctl netdev-dummy/ip4addr br-p2 20.0.0.2/24 &&
+-    ovs-appctl ovs/route/add 20.0.0.0/24 br-p2 &&
+     ovs-appctl tnl/arp/set br-p2 20.0.0.1 $HWADDR_BRP1 &&
+     ovs-appctl tnl/arp/set br-p2 20.0.0.2 $HWADDR_BRP2 &&
+     ovs-appctl tnl/arp/set br-p2 20.0.0.3 $HWADDR_BRP3 &&
+ 
+     ovs-appctl netdev-dummy/ip4addr br-p3 30.0.0.3/24 &&
+-    ovs-appctl ovs/route/add 30.0.0.0/24 br-p3 &&
+     ovs-appctl tnl/arp/set br-p3 30.0.0.1 $HWADDR_BRP1 &&
+     ovs-appctl tnl/arp/set br-p3 30.0.0.2 $HWADDR_BRP2 &&
+     ovs-appctl tnl/arp/set br-p3 30.0.0.3 $HWADDR_BRP3
+ ], [0], [ignore])
+ 
+ AT_CHECK([
+-    ovs-appctl ovs/route/show | grep User:
++    ovs-appctl ovs/route/show | grep Cached: | sort
+ ], [0], [dnl
+-User: 10.0.0.0/24 dev br-p1 SRC 10.0.0.1
+-User: 20.0.0.0/24 dev br-p2 SRC 20.0.0.2
+-User: 30.0.0.0/24 dev br-p3 SRC 30.0.0.3
++Cached: 10.0.0.0/24 dev br-p1 SRC 10.0.0.1 local
++Cached: 20.0.0.0/24 dev br-p2 SRC 20.0.0.2 local
++Cached: 30.0.0.0/24 dev br-p3 SRC 30.0.0.3 local
+ ])
+ 
+ AT_CHECK([
+@@ -681,14 +678,13 @@ AT_CHECK([
+ 
+ AT_CHECK([
+     ovs-appctl netdev-dummy/ip4addr br2 10.0.0.1/24 &&
+-    ovs-appctl ovs/route/add 10.0.0.0/24 br2 &&
+     ovs-appctl tnl/arp/set br2 10.0.0.2 de:af:be:ef:ba:be
+ ], [0], [ignore])
+ 
+ AT_CHECK([
+-    ovs-appctl ovs/route/show | grep User:
++    ovs-appctl ovs/route/show | grep Cached:
+ ], [0], [dnl
+-User: 10.0.0.0/24 dev br2 SRC 10.0.0.1
++Cached: 10.0.0.0/24 dev br2 SRC 10.0.0.1 local
+ ])
+ 
+ 
+@@ -955,7 +951,6 @@ AT_CHECK([
+ 
+ AT_CHECK([
+     ovs-appctl netdev-dummy/ip4addr br0 20.0.0.1/24 &&
+-    ovs-appctl ovs/route/add 20.0.0.2/24 br0 &&
+     ovs-appctl tnl/neigh/set br0 20.0.0.1 aa:bb:cc:00:00:01 &&
+     ovs-appctl tnl/neigh/set br0 20.0.0.2 aa:bb:cc:00:00:02
+ ], [0], [ignore])
+@@ -963,9 +958,9 @@ AT_CHECK([
+ ovs-appctl time/warp 1000
+ 
+ AT_CHECK([
+-    ovs-appctl ovs/route/show | grep User
++    ovs-appctl ovs/route/show | grep Cached:
+ ],[0], [dnl
+-User: 20.0.0.0/24 dev br0 SRC 20.0.0.1
++Cached: 20.0.0.0/24 dev br0 SRC 20.0.0.1 local
+ ])
+ 
+ AT_CHECK([
+diff --git a/tests/sendpkt.py b/tests/sendpkt.py
+index 49ac45275a..7cbea51654 100755
+--- a/tests/sendpkt.py
++++ b/tests/sendpkt.py
+@@ -48,28 +48,10 @@ if len(args) < 2:
+ if options.packet_type != "eth":
+     parser.error('invalid argument to "-t"/"--type". Allowed value is "eth".')
+ 
+-# store the hex bytes with 0x appended at the beginning
+-# if not present in the user input and validate the hex bytes
+-hex_list = []
+-for a in args[1:]:
+-    if a[:2] != "0x":
+-        hex_byte = "0x" + a
+-    else:
+-        hex_byte = a
+-    try:
+-        temp = int(hex_byte, 0)
+-    except:
+-        parser.error("invalid hex byte " + a)
+-
+-    if temp > 0xff:
+-        parser.error("hex byte " + a + " cannot be greater than 0xff!")
+-
+-    hex_list.append(temp)
+-
+-if sys.version_info < (3, 0):
+-    pkt = "".join(map(chr, hex_list))
+-else:
+-    pkt = bytes(hex_list)
++# Strip '0x' prefixes from hex input, combine into a single string and
++# convert to bytes.
++hex_str = "".join([a[2:] if a.startswith("0x") else a for a in args[1:]])
++pkt = bytes.fromhex(hex_str)
+ 
+ try:
+     sockfd = socket.socket(socket.AF_PACKET, socket.SOCK_RAW)
+diff --git a/tests/system-dpdk-macros.at b/tests/system-dpdk-macros.at
+index 7cf9bac170..f8ba766739 100644
+--- a/tests/system-dpdk-macros.at
++++ b/tests/system-dpdk-macros.at
+@@ -102,7 +102,7 @@ m4_define([OVS_DPDK_CHECK_TESTPMD],
+ m4_define([OVS_DPDK_START_TESTPMD],
+   [AT_CHECK([lscpu], [], [stdout])
+    AT_CHECK([cat stdout | grep "NUMA node(s)" | awk '{c=1; while (c++<$(3)) {printf "512,"}; print "512"}' > NUMA_NODE])
+-   eal_options="$DPDK_EAL_OPTIONS --in-memory --socket-mem="$(cat NUMA_NODE)" --single-file-segments --no-pci"
++   eal_options="$DPDK_EAL_OPTIONS --in-memory --socket-mem="$(cat NUMA_NODE)" --single-file-segments --no-pci --file-prefix testpmd"
+    options="$1"
+    test "$options" != "${options%% -- *}" || options="$options -- "
+    eal_options="$eal_options ${options%% -- *}"
+diff --git a/tests/system-dpdk.at b/tests/system-dpdk.at
+index 1c97bf7772..e79c755657 100644
+--- a/tests/system-dpdk.at
++++ b/tests/system-dpdk.at
+@@ -88,6 +88,12 @@ ADD_VHOST_USER_CLIENT_PORT([br10], [dpdkvhostuserclient0], [$OVS_RUNDIR/dpdkvhos
+ AT_CHECK([ovs-vsctl show], [], [stdout])
+ sleep 2
+ 
++dnl Check that no mempool was allocated.
++AT_CHECK([ovs-appctl netdev-dpdk/get-mempool-info dpdkvhostuserclient0], [2], [], [dnl
++Not allocated
++ovs-appctl: ovs-vswitchd: server returned an error
++])
++
+ dnl Clean up
+ AT_CHECK([ovs-vsctl del-port br10 dpdkvhostuserclient0], [], [stdout], [stderr])
+ OVS_DPDK_STOP_VSWITCHD(["dnl
+diff --git a/tests/system-ipsec.at b/tests/system-ipsec.at
+index d3d27133b9..1e155fecea 100644
+--- a/tests/system-ipsec.at
++++ b/tests/system-ipsec.at
+@@ -110,16 +110,16 @@ m4_define([CHECK_LIBRESWAN],
+ dnl IPSEC_STATUS_LOADED([])
+ dnl
+ dnl Get number of loaded connections from ipsec status
+-m4_define([IPSEC_STATUS_LOADED], [ipsec status --rundir $ovs_base/$1 | \
++m4_define([IPSEC_STATUS_LOADED], [ipsec --rundir $ovs_base/$1 status | \
+            grep "Total IPsec connections" | \
+-           sed 's/[[0-9]]* Total IPsec connections: loaded \([[0-2]]\), active \([[0-2]]\).*/\1/m'])
++           sed 's/[[0-9]]* *Total IPsec connections: loaded \([[0-2]]\), active \([[0-2]]\).*/\1/m'])
+ 
+ dnl IPSEC_STATUS_ACTIVE([])
+ dnl
+ dnl Get number of active connections from ipsec status
+-m4_define([IPSEC_STATUS_ACTIVE], [ipsec status --rundir $ovs_base/$1 | \
++m4_define([IPSEC_STATUS_ACTIVE], [ipsec --rundir $ovs_base/$1 status | \
+            grep "Total IPsec connections" | \
+-           sed 's/[[0-9]]* Total IPsec connections: loaded \([[0-2]]\), active \([[0-2]]\).*/\2/m'])
++           sed 's/[[0-9]]* *Total IPsec connections: loaded \([[0-2]]\), active \([[0-2]]\).*/\2/m'])
+ 
+ dnl CHECK_ESP_TRAFFIC()
+ dnl
+diff --git a/tests/system-layer3-tunnels.at b/tests/system-layer3-tunnels.at
+index 6fbdedb64f..5dcdd2afae 100644
+--- a/tests/system-layer3-tunnels.at
++++ b/tests/system-layer3-tunnels.at
+@@ -98,61 +98,6 @@ NS_CHECK_EXEC([at_ns0], [ping -s 3200 -q -c 3 -i 0.3 -W 2 10.1.1.2 | FORMAT_PING
+ OVS_TRAFFIC_VSWITCHD_STOP
+ AT_CLEANUP
+ 
+-AT_SETUP([layer3 - use non-local port as tunnel endpoint])
+-
+-OVS_VSWITCHD_START([add-port br0 p0 -- set Interface p0 type=dummy ofport_request=1])
+-AT_CHECK([ovs-vsctl add-port br0 vtep0 -- set int vtep0 type=dummy], [0])
+-AT_CHECK([ovs-vsctl add-br int-br -- set bridge int-br datapath_type=dummy], [0])
+-AT_CHECK([ovs-vsctl add-port int-br t1 -- set Interface t1 type=gre \
+-                    options:remote_ip=1.1.2.92 ofport_request=3], [0])
+-
+-AT_CHECK([ovs-appctl dpif/show], [0], [dnl
+-dummy@ovs-dummy: hit:0 missed:0
+-  br0:
+-    br0 65534/100: (dummy-internal)
+-    p0 1/1: (dummy)
+-    vtep0 2/2: (dummy)
+-  int-br:
+-    int-br 65534/3: (dummy-internal)
+-    t1 3/4: (gre: remote_ip=1.1.2.92)
+-])
+-
+-AT_CHECK([ovs-appctl netdev-dummy/ip4addr vtep0 1.1.2.88/24], [0], [OK
+-])
+-AT_CHECK([ovs-appctl ovs/route/add 1.1.2.92/24 vtep0], [0], [OK
+-])
+-AT_CHECK([ovs-ofctl add-flow br0 action=normal])
+-AT_CHECK([ovs-ofctl add-flow int-br action=normal])
+-
+-dnl Use arp request and reply to achieve tunnel next hop mac binding
+-dnl By default, vtep0's MAC address is aa:55:aa:55:00:03
+-AT_CHECK([ovs-appctl netdev-dummy/receive vtep0 'recirc_id(0),in_port(2),eth(dst=ff:ff:ff:ff:ff:ff,src=aa:55:aa:55:00:03),eth_type(0x0806),arp(tip=1.1.2.92,sip=1.1.2.88,op=1,sha=aa:55:aa:55:00:03,tha=00:00:00:00:00:00)'])
+-AT_CHECK([ovs-appctl netdev-dummy/receive p0 'recirc_id(0),in_port(1),eth(src=f8:bc:12:44:34:b6,dst=aa:55:aa:55:00:03),eth_type(0x0806),arp(sip=1.1.2.92,tip=1.1.2.88,op=2,sha=f8:bc:12:44:34:b6,tha=aa:55:aa:55:00:03)'])
+-
+-AT_CHECK([ovs-appctl tnl/neigh/show | tail -n+3 | sort], [0], [dnl
+-1.1.2.92                                      f8:bc:12:44:34:b6   br0
+-])
+-
+-AT_CHECK([ovs-appctl ovs/route/show | tail -n+2 | sort], [0], [dnl
+-User: 1.1.2.0/24 dev vtep0 SRC 1.1.2.88
+-])
+-
+-dnl Check GRE tunnel pop
+-AT_CHECK([ovs-appctl ofproto/trace ovs-dummy 'in_port(1),eth(src=f8:bc:12:44:34:b6,dst=aa:55:aa:55:00:03),eth_type(0x0800),ipv4(src=1.1.2.92,dst=1.1.2.88,proto=47,tos=0,ttl=64,frag=no)'], [0], [stdout])
+-
+-AT_CHECK([tail -1 stdout], [0],
+-  [Datapath actions: tnl_pop(4)
+-])
+-
+-dnl Check GRE tunnel push
+-AT_CHECK([ovs-appctl ofproto/trace ovs-dummy 'in_port(3),eth(dst=f9:bc:12:44:34:b6,src=af:55:aa:55:00:03),eth_type(0x0800),ipv4(src=1.1.3.88,dst=1.1.3.92,proto=1,tos=0,ttl=64,frag=no)'], [0], [stdout])
+-AT_CHECK([tail -1 stdout], [0],
+-  [Datapath actions: tnl_push(tnl_port(4),header(size=38,type=3,eth(dst=f8:bc:12:44:34:b6,src=aa:55:aa:55:00:03,dl_type=0x0800),ipv4(src=1.1.2.88,dst=1.1.2.92,proto=47,tos=0,ttl=64,frag=0x4000),gre((flags=0x0,proto=0x6558))),out_port(2)),1
+-])
+-
+-OVS_VSWITCHD_STOP
+-AT_CLEANUP
+-
+ AT_SETUP([layer3 - ping over MPLS Bareudp])
+ OVS_CHECK_BAREUDP()
+ OVS_TRAFFIC_VSWITCHD_START([_ADD_BR([br1])])
+diff --git a/tests/system-route.at b/tests/system-route.at
+index 114aaebc77..c0ecad6cfb 100644
+--- a/tests/system-route.at
++++ b/tests/system-route.at
+@@ -64,3 +64,67 @@ Cached: fc00:db8:beef::13/128 dev br0 GW fc00:db8:cafe::1 SRC fc00:db8:cafe::2])
+ 
+ OVS_TRAFFIC_VSWITCHD_STOP
+ AT_CLEANUP
++
++dnl Checks that OVS doesn't use routes from non-standard tables.
++AT_SETUP([ovs-route - route tables])
++AT_KEYWORDS([route])
++OVS_TRAFFIC_VSWITCHD_START()
++
++dnl Create tap port.
++on_exit 'ip link del p1-route'
++AT_CHECK([ip tuntap add name p1-route mode tap])
++AT_CHECK([ip link set p1-route up])
++
++dnl Add ip address.
++AT_CHECK([ip addr add 10.0.0.17/24 dev p1-route], [0], [stdout])
++
++dnl Check that OVS catches route updates.
++OVS_WAIT_UNTIL_EQUAL([ovs-appctl ovs/route/show | grep 'p1-route' | sort], [dnl
++Cached: 10.0.0.0/24 dev p1-route SRC 10.0.0.17
++Cached: 10.0.0.17/32 dev p1-route SRC 10.0.0.17 local])
++
++dnl Add a route to the main routing table and check that OVS caches
++dnl this new route.
++AT_CHECK([ip route add 10.0.0.18/32 dev p1-route])
++OVS_WAIT_UNTIL_EQUAL([ovs-appctl ovs/route/show | grep 'p1-route' | sort], [dnl
++Cached: 10.0.0.0/24 dev p1-route SRC 10.0.0.17
++Cached: 10.0.0.17/32 dev p1-route SRC 10.0.0.17 local
++Cached: 10.0.0.18/32 dev p1-route SRC 10.0.0.17])
++
++dnl Add a route to a custom routing table and check that OVS doesn't cache it.
++AT_CHECK([ip route add 10.0.0.19/32 dev p1-route table 42])
++AT_CHECK([ip route show table 42 | grep 'p1-route' | grep -q '10.0.0.19'])
++dnl Give the main thread a chance to act.
++AT_CHECK([ovs-appctl revalidator/wait])
++dnl Check that OVS didn't learn this route.
++AT_CHECK([ovs-appctl ovs/route/show | grep 'p1-route' | sort], [0], [dnl
++Cached: 10.0.0.0/24 dev p1-route SRC 10.0.0.17
++Cached: 10.0.0.17/32 dev p1-route SRC 10.0.0.17 local
++Cached: 10.0.0.18/32 dev p1-route SRC 10.0.0.17
++])
++
++dnl Delete a route from the main table and check that OVS removes the route
++dnl from the cache.
++AT_CHECK([ip route del 10.0.0.18/32 dev p1-route])
++OVS_WAIT_UNTIL_EQUAL([ovs-appctl ovs/route/show | grep 'p1-route' | sort], [dnl
++Cached: 10.0.0.0/24 dev p1-route SRC 10.0.0.17
++Cached: 10.0.0.17/32 dev p1-route SRC 10.0.0.17 local])
++
++dnl Delete a route from a custom routing table and check that the cache
++dnl dosn't change.
++AT_CHECK([ip route del 10.0.0.19/32 dev p1-route table 42])
++dnl Give the main thread a chance to act.
++AT_CHECK([ovs-appctl revalidator/wait])
++dnl Check that the cache is still the same.
++AT_CHECK([ovs-appctl ovs/route/show | grep 'p1-route' | sort], [0], [dnl
++Cached: 10.0.0.0/24 dev p1-route SRC 10.0.0.17
++Cached: 10.0.0.17/32 dev p1-route SRC 10.0.0.17 local
++])
++
++dnl Delete ip address.
++AT_CHECK([ip addr del 10.0.0.17/24 dev p1-route], [0], [stdout])
++dnl Check that routes were removed from OVS.
++OVS_WAIT_UNTIL([test $(ovs-appctl ovs/route/show | grep -c 'p1-route') -eq 0 ])
++
++OVS_TRAFFIC_VSWITCHD_STOP
++AT_CLEANUP
+diff --git a/tests/system-traffic.at b/tests/system-traffic.at
+index 98e494abf4..b6de1ed611 100644
+--- a/tests/system-traffic.at
++++ b/tests/system-traffic.at
+@@ -2359,11 +2359,22 @@ table=20 actions=drop
+ AT_CHECK([ovs-ofctl del-flows br0])
+ AT_CHECK([ovs-ofctl --bundle add-flows br0 flows.txt])
+ 
++m4_define([ND_NS_PKT], [m4_join([,],
++  [eth_src=36:b1:ee:7c:01:03,eth_dst=36:b1:ee:7c:01:02,eth_type=0x86dd],
++  [ipv6_src=fe80::f816:3eff:fe04:6604,ipv6_dst=fe80::f816:3eff:fea7:dd0e],
++  [nw_proto=58,nw_ttl=255,nw_frag=no],
++  [icmpv6_type=136,icmpv6_code=0],
++  [nd_options_type=2,nd_tll=36:b1:ee:7c:01:03])])
++
+ dnl Send a mismatching neighbor discovery.
+-NS_CHECK_EXEC([at_ns0], [$PYTHON3 $srcdir/sendpkt.py p0 36 b1 ee 7c 01 02 36 b1 ee 7c 01 03 86 dd 60 00 00 00 00 20 3a ff fe 80 00 00 00 00 00 00 f8 16 3e ff fe 04 66 04 fe 80 00 00 00 00 00 00 f8 16 3e ff fe a7 dd 0e 88 00 f1 f2 20 00 00 00 30 00 00 00 00 00 00 00 00 00 00 00 00 00 00 01 02 01 36 b1 ee 7c 01 03 > /dev/null])
++NS_CHECK_EXEC([at_ns0], [$PYTHON3 $srcdir/sendpkt.py p0 \
++    $(ovs-ofctl compose-packet --bare 'ND_NS_PKT,nd_target=3000::1')],
++  [0], [ignore])
+ 
+ dnl Send a matching neighbor discovery.
+-NS_CHECK_EXEC([at_ns0], [$PYTHON3 $srcdir/sendpkt.py p0 36 b1 ee 7c 01 02 36 b1 ee 7c 01 03 86 dd 60 00 00 00 00 20 3a ff fe 80 00 00 00 00 00 00 f8 16 3e ff fe 04 66 04 fe 80 00 00 00 00 00 00 f8 16 3e ff fe a7 dd 0e 88 00 fe 5f 20 00 00 00 20 01 00 00 00 00 00 00 00 00 00 01 00 00 03 92 02 01 36 b1 ee 7c 01 03 > /dev/null])
++NS_CHECK_EXEC([at_ns0], [$PYTHON3 $srcdir/sendpkt.py p0 \
++    $(ovs-ofctl compose-packet --bare 'ND_NS_PKT,nd_target=2001::1:0:392')],
++  [0], [ignore])
+ 
+ AT_CHECK([ovs-appctl dpctl/dump-flows | strip_stats | strip_used | dnl
+           strip_key32 | strip_ptype | strip_eth | strip_recirc | dnl
+@@ -2375,10 +2386,14 @@ recirc_id(<recirc>),in_port(2),eth_type(0x86dd),ipv6(proto=58,frag=no),icmpv6(ty
+ OVS_WAIT_UNTIL([ovs-appctl dpctl/dump-flows | grep ",nd" | wc -l | grep -E ^0])
+ 
+ dnl Send a matching neighbor discovery.
+-NS_CHECK_EXEC([at_ns0], [$PYTHON3 $srcdir/sendpkt.py p0 36 b1 ee 7c 01 02 36 b1 ee 7c 01 03 86 dd 60 00 00 00 00 20 3a ff fe 80 00 00 00 00 00 00 f8 16 3e ff fe 04 66 04 fe 80 00 00 00 00 00 00 f8 16 3e ff fe a7 dd 0e 88 00 fe 5f 20 00 00 00 20 01 00 00 00 00 00 00 00 00 00 01 00 00 03 92 02 01 36 b1 ee 7c 01 03 > /dev/null])
++NS_CHECK_EXEC([at_ns0], [$PYTHON3 $srcdir/sendpkt.py p0 \
++    $(ovs-ofctl compose-packet --bare 'ND_NS_PKT,nd_target=2001::1:0:392')],
++  [0], [ignore])
+ 
+ dnl Send a mismatching neighbor discovery.
+-NS_CHECK_EXEC([at_ns0], [$PYTHON3 $srcdir/sendpkt.py p0 36 b1 ee 7c 01 02 36 b1 ee 7c 01 03 86 dd 60 00 00 00 00 20 3a ff fe 80 00 00 00 00 00 00 f8 16 3e ff fe 04 66 04 fe 80 00 00 00 00 00 00 f8 16 3e ff fe a7 dd 0e 88 00 f1 f2 20 00 00 00 30 00 00 00 00 00 00 00 00 00 00 00 00 00 00 01 02 01 36 b1 ee 7c 01 03 > /dev/null])
++NS_CHECK_EXEC([at_ns0], [$PYTHON3 $srcdir/sendpkt.py p0 \
++    $(ovs-ofctl compose-packet --bare 'ND_NS_PKT,nd_target=3000::1')],
++  [0], [ignore])
+ 
+ AT_CHECK([ovs-appctl dpctl/dump-flows | strip_stats | strip_used | dnl
+           strip_key32 | strip_ptype | strip_eth | strip_recirc | dnl
+@@ -2407,20 +2422,29 @@ dnl The flow will encap a mpls header to the ip packet
+ dnl eth/ip/icmp --> OVS --> eth/mpls/eth/ip/icmp
+ AT_CHECK([ovs-ofctl -Oopenflow13 add-flow br0 "table=0,priority=100,dl_type=0x0800 actions=encap(mpls),set_mpls_label:2,encap(ethernet),set_field:00:00:00:00:00:02->dl_dst,set_field:00:00:00:00:00:01->dl_src,ovs-p1"])
+ 
+-rm -rf p1.pcap
+-NETNS_DAEMONIZE([at_ns1], [tcpdump -l -n -xx -U -i p1 > p1.pcap], [tcpdump.pid])
+-sleep 1
++NETNS_DAEMONIZE([at_ns1],
++  [tcpdump -l -n -xx -U -i p1 -w p1.pcap 2>tcpdump_err], [tcpdump.pid])
++OVS_WAIT_UNTIL([grep "listening" tcpdump_err])
++
++m4_define([ICMP_PKT], [m4_join([,],
++  [eth_src=36:b1:ee:7c:01:03,eth_dst=36:b1:ee:7c:01:02,eth_type=0x0800],
++  [nw_src=10.1.1.1,nw_dst=10.1.1.2],
++  [nw_proto=1,nw_ttl=64,nw_frag=no],
++  [icmp_type=8,icmp_code=0])])
+ 
+-dnl The hex dump is a icmp packet. pkt=eth/ip/icmp
+ dnl The packet is sent from p0(at_ns0) interface directed to
+-dnl p1(at_ns1) interface
+-NS_CHECK_EXEC([at_ns0], [$PYTHON3 $srcdir/sendpkt.py p0 36 b1 ee 7c 01 02 36 b1 ee 7c 01 03 08 00 45 00 00 54 03 44 40 00 40 01 21 61 0a 01 01 01 0a 01 01 02 08 00 ef ac 7c e4 00 03 5b 2c 1f 61 00 00 00 00 50 0b 02 00 00 00 00 00 10 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 31 32 33 34 35 36 37  > /dev/null])
++dnl p1(at_ns1) interface.
++NS_CHECK_EXEC([at_ns0], [$PYTHON3 $srcdir/sendpkt.py p0 \
++    $(ovs-ofctl compose-packet --bare 'ICMP_PKT')], [0], [ignore])
+ 
+-dnl Check the expected mpls encapsulated packet on the egress interface
+-OVS_WAIT_UNTIL([cat p1.pcap | grep -E "0x0000:  *0000 *0000 *0002 *0000 *0000 *0001 *8847 *0000" 2>&1 1>/dev/null])
+-OVS_WAIT_UNTIL([cat p1.pcap | grep -E "0x0010:  *2140 *36b1 *ee7c *0102 *36b1 *ee7c *0103 *0800" 2>&1 1>/dev/null])
+-OVS_WAIT_UNTIL([cat p1.pcap | grep -E "0x0020:  *4500 *0054 *0344 *4000 *4001 *2161 *0a01 *0101" 2>&1 1>/dev/null])
+-OVS_WAIT_UNTIL([cat p1.pcap | grep -E "0x0030:  *0a01 *0102 *0800 *efac *7ce4 *0003 *5b2c *1f61" 2>&1 1>/dev/null])
++dnl Check the expected mpls encapsulated packet on the egress interface.
++m4_define([MPLS_HEADER], [m4_join([,],
++  [eth_src=00:00:00:00:00:01,eth_dst=00:00:00:00:00:02,eth_type=0x8847],
++  [mpls_label=2,mpls_ttl=64,mpls_bos=1])])
++
++OVS_WAIT_UNTIL([ovs-pcap p1.pcap | grep -q "m4_join([], [^],
++    $(ovs-ofctl compose-packet --bare 'MPLS_HEADER'),
++    $(ovs-ofctl compose-packet --bare 'ICMP_PKT'), [\$])"])
+ 
+ OVS_TRAFFIC_VSWITCHD_STOP
+ AT_CLEANUP
+@@ -2439,20 +2463,29 @@ dnl The flow will encap a mpls header to the ip packet
+ dnl eth/ip/icmp --> OVS --> eth/mpls/eth/ip/icmp
+ AT_CHECK([ovs-ofctl -Oopenflow13 add-flow br0 "table=0,priority=100,dl_type=0x0800 actions=encap(mpls),set_mpls_label:2,encap(ethernet),set_field:00:00:00:00:00:02->dl_dst,set_field:00:00:00:00:00:01->dl_src,ovs-p1"])
+ 
+-rm -rf p1.pcap
+-NETNS_DAEMONIZE([at_ns1], [tcpdump -l -n -xx -U -i p1 > p1.pcap], [tcpdump.pid])
+-sleep 1
++NETNS_DAEMONIZE([at_ns1],
++  [tcpdump -l -n -xx -U -i p1 -w p1.pcap 2>tcpdump_err], [tcpdump.pid])
++OVS_WAIT_UNTIL([grep "listening" tcpdump_err])
++
++m4_define([ICMP_PKT], [m4_join([,],
++  [eth_src=36:b1:ee:7c:01:03,eth_dst=36:b1:ee:7c:01:02,eth_type=0x0800],
++  [nw_src=10.1.1.1,nw_dst=10.1.1.2],
++  [nw_proto=1,nw_ttl=64,nw_frag=no],
++  [icmp_type=8,icmp_code=0])])
+ 
+-dnl The hex dump is a icmp packet. pkt=eth/ip/icmp
+ dnl The packet is sent from p0(at_ns0) interface directed to
+-dnl p1(at_ns1) interface
+-NS_CHECK_EXEC([at_ns0], [$PYTHON3 $srcdir/sendpkt.py p0 36 b1 ee 7c 01 02 36 b1 ee 7c 01 03 08 00 45 00 00 54 03 44 40 00 40 01 21 61 0a 01 01 01 0a 01 01 02 08 00 ef ac 7c e4 00 03 5b 2c 1f 61 00 00 00 00 50 0b 02 00 00 00 00 00 10 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 31 32 33 34 35 36 37  > /dev/null])
++dnl p1(at_ns1) interface.
++NS_CHECK_EXEC([at_ns0], [$PYTHON3 $srcdir/sendpkt.py p0 \
++    $(ovs-ofctl compose-packet --bare 'ICMP_PKT')], [0], [ignore])
++
++dnl Check the expected mpls encapsulated packet on the egress interface.
++m4_define([MPLS_HEADER], [m4_join([,],
++  [eth_src=00:00:00:00:00:01,eth_dst=00:00:00:00:00:02,eth_type=0x8847],
++  [mpls_label=2,mpls_ttl=64,mpls_bos=1])])
+ 
+-dnl Check the expected mpls encapsulated packet on the egress interface
+-OVS_WAIT_UNTIL([cat p1.pcap | grep -E "0x0000:  *0000 *0000 *0002 *0000 *0000 *0001 *8847 *0000" 2>&1 1>/dev/null])
+-OVS_WAIT_UNTIL([cat p1.pcap | grep -E "0x0010:  *2140 *36b1 *ee7c *0102 *36b1 *ee7c *0103 *0800" 2>&1 1>/dev/null])
+-OVS_WAIT_UNTIL([cat p1.pcap | grep -E "0x0020:  *4500 *0054 *0344 *4000 *4001 *2161 *0a01 *0101" 2>&1 1>/dev/null])
+-OVS_WAIT_UNTIL([cat p1.pcap | grep -E "0x0030:  *0a01 *0102 *0800 *efac *7ce4 *0003 *5b2c *1f61" 2>&1 1>/dev/null])
++OVS_WAIT_UNTIL([ovs-pcap p1.pcap | grep -q "m4_join([], [^],
++    $(ovs-ofctl compose-packet --bare 'MPLS_HEADER'),
++    $(ovs-ofctl compose-packet --bare 'ICMP_PKT'), [\$])"])
+ 
+ OVS_TRAFFIC_VSWITCHD_STOP
+ AT_CLEANUP
+@@ -2472,20 +2505,29 @@ dnl The flow will encap a mpls header to the ip packet
+ dnl eth/ip/icmp --> OVS --> eth/mpls/eth/ip/icmp
+ AT_CHECK([ovs-ofctl -Oopenflow13 add-flow br0 "table=0,priority=100,dl_type=0x0800 actions=encap(mpls_mc),set_mpls_label:2,encap(ethernet),set_field:00:00:00:00:00:02->dl_dst,set_field:00:00:00:00:00:01->dl_src,ovs-p1"])
+ 
+-rm -rf p1.pcap
+-NETNS_DAEMONIZE([at_ns1], [tcpdump -l -n -xx -U -i p1 > p1.pcap], [tcpdump.pid])
+-sleep 1
++NETNS_DAEMONIZE([at_ns1],
++  [tcpdump -l -n -xx -U -i p1 -w p1.pcap 2>tcpdump_err], [tcpdump.pid])
++OVS_WAIT_UNTIL([grep "listening" tcpdump_err])
++
++m4_define([ICMP_PKT], [m4_join([,],
++  [eth_src=36:b1:ee:7c:01:03,eth_dst=36:b1:ee:7c:01:02,eth_type=0x0800],
++  [nw_src=10.1.1.1,nw_dst=10.1.1.2],
++  [nw_proto=1,nw_ttl=64,nw_frag=no],
++  [icmp_type=8,icmp_code=0])])
+ 
+-dnl The hex dump is a icmp packet. pkt=eth/ip/icmp
+ dnl The packet is sent from p0(at_ns0) interface directed to
+-dnl p1(at_ns1) interface
+-NS_CHECK_EXEC([at_ns0], [$PYTHON3 $srcdir/sendpkt.py p0 36 b1 ee 7c 01 02 36 b1 ee 7c 01 03 08 00 45 00 00 54 03 44 40 00 40 01 21 61 0a 01 01 01 0a 01 01 02 08 00 ef ac 7c e4 00 03 5b 2c 1f 61 00 00 00 00 50 0b 02 00 00 00 00 00 10 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 31 32 33 34 35 36 37  > /dev/null])
++dnl p1(at_ns1) interface.
++NS_CHECK_EXEC([at_ns0], [$PYTHON3 $srcdir/sendpkt.py p0 \
++    $(ovs-ofctl compose-packet --bare 'ICMP_PKT')], [0], [ignore])
++
++dnl Check the expected mpls encapsulated packet on the egress interface.
++m4_define([MPLS_HEADER], [m4_join([,],
++  [eth_src=00:00:00:00:00:01,eth_dst=00:00:00:00:00:02,eth_type=0x8848],
++  [mpls_label=2,mpls_ttl=64,mpls_bos=1])])
+ 
+-dnl Check the expected mpls encapsulated packet on the egress interface
+-OVS_WAIT_UNTIL([cat p1.pcap | grep -E "0x0000:  *0000 *0000 *0002 *0000 *0000 *0001 *8848 *0000" 2>&1 1>/dev/null])
+-OVS_WAIT_UNTIL([cat p1.pcap | grep -E "0x0010:  *2140 *36b1 *ee7c *0102 *36b1 *ee7c *0103 *0800" 2>&1 1>/dev/null])
+-OVS_WAIT_UNTIL([cat p1.pcap | grep -E "0x0020:  *4500 *0054 *0344 *4000 *4001 *2161 *0a01 *0101" 2>&1 1>/dev/null])
+-OVS_WAIT_UNTIL([cat p1.pcap | grep -E "0x0030:  *0a01 *0102 *0800 *efac *7ce4 *0003 *5b2c *1f61" 2>&1 1>/dev/null])
++OVS_WAIT_UNTIL([ovs-pcap p1.pcap | grep -q "m4_join([], [^],
++    $(ovs-ofctl compose-packet --bare 'MPLS_HEADER'),
++    $(ovs-ofctl compose-packet --bare 'ICMP_PKT'), [\$])"])
+ 
+ OVS_TRAFFIC_VSWITCHD_STOP
+ AT_CLEANUP
+@@ -2504,20 +2546,29 @@ dnl The flow will encap a mpls header to the ip packet
+ dnl eth/ip/icmp --> OVS --> eth/mpls/eth/ip/icmp
+ AT_CHECK([ovs-ofctl -Oopenflow13 add-flow br0 "table=0,priority=100,dl_type=0x0800 actions=encap(mpls_mc),set_mpls_label:2,encap(ethernet),set_field:00:00:00:00:00:02->dl_dst,set_field:00:00:00:00:00:01->dl_src,ovs-p1"])
+ 
+-rm -rf p1.pcap
+-NETNS_DAEMONIZE([at_ns1], [tcpdump -l -n -xx -U -i p1 > p1.pcap], [tcpdump.pid])
+-sleep 1
++NETNS_DAEMONIZE([at_ns1],
++  [tcpdump -l -n -xx -U -i p1 -w p1.pcap 2>tcpdump_err], [tcpdump.pid])
++OVS_WAIT_UNTIL([grep "listening" tcpdump_err])
++
++m4_define([ICMP_PKT], [m4_join([,],
++  [eth_src=36:b1:ee:7c:01:03,eth_dst=36:b1:ee:7c:01:02,eth_type=0x0800],
++  [nw_src=10.1.1.1,nw_dst=10.1.1.2],
++  [nw_proto=1,nw_ttl=64,nw_frag=no],
++  [icmp_type=8,icmp_code=0])])
+ 
+-dnl The hex dump is a icmp packet. pkt=eth/ip/icmp
+ dnl The packet is sent from p0(at_ns0) interface directed to
+-dnl p1(at_ns1) interface
+-NS_CHECK_EXEC([at_ns0], [$PYTHON3 $srcdir/sendpkt.py p0 36 b1 ee 7c 01 02 36 b1 ee 7c 01 03 08 00 45 00 00 54 03 44 40 00 40 01 21 61 0a 01 01 01 0a 01 01 02 08 00 ef ac 7c e4 00 03 5b 2c 1f 61 00 00 00 00 50 0b 02 00 00 00 00 00 10 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 31 32 33 34 35 36 37  > /dev/null])
++dnl p1(at_ns1) interface.
++NS_CHECK_EXEC([at_ns0], [$PYTHON3 $srcdir/sendpkt.py p0 \
++    $(ovs-ofctl compose-packet --bare 'ICMP_PKT')], [0], [ignore])
++
++dnl Check the expected mpls encapsulated packet on the egress interface.
++m4_define([MPLS_HEADER], [m4_join([,],
++  [eth_src=00:00:00:00:00:01,eth_dst=00:00:00:00:00:02,eth_type=0x8848],
++  [mpls_label=2,mpls_ttl=64,mpls_bos=1])])
+ 
+-dnl Check the expected mpls encapsulated packet on the egress interface
+-OVS_WAIT_UNTIL([cat p1.pcap | grep -E "0x0000:  *0000 *0000 *0002 *0000 *0000 *0001 *8848 *0000" 2>&1 1>/dev/null])
+-OVS_WAIT_UNTIL([cat p1.pcap | grep -E "0x0010:  *2140 *36b1 *ee7c *0102 *36b1 *ee7c *0103 *0800" 2>&1 1>/dev/null])
+-OVS_WAIT_UNTIL([cat p1.pcap | grep -E "0x0020:  *4500 *0054 *0344 *4000 *4001 *2161 *0a01 *0101" 2>&1 1>/dev/null])
+-OVS_WAIT_UNTIL([cat p1.pcap | grep -E "0x0030:  *0a01 *0102 *0800 *efac *7ce4 *0003 *5b2c *1f61" 2>&1 1>/dev/null])
++OVS_WAIT_UNTIL([ovs-pcap p1.pcap | grep -q "m4_join([], [^],
++    $(ovs-ofctl compose-packet --bare 'MPLS_HEADER'),
++    $(ovs-ofctl compose-packet --bare 'ICMP_PKT'), [\$])"])
+ 
+ OVS_TRAFFIC_VSWITCHD_STOP
+ AT_CLEANUP
+@@ -2538,24 +2589,30 @@ dnl eth/mpls/eth/ip/icmp --> OVS --> eth/ip/icmp
+ 
+ AT_CHECK([ovs-ofctl -Oopenflow13 add-flow br0 "table=0,priority=100,dl_type=0x8847,mpls_label=2 actions=decap(),decap(packet_type(ns=0,type=0)),ovs-p1"])
+ 
+-rm -rf p1.pcap
+-NETNS_DAEMONIZE([at_ns1], [tcpdump -l -n -xx -U -i p1 > p1.pcap], [tcpdump.pid])
+-sleep 1
++NETNS_DAEMONIZE([at_ns1],
++  [tcpdump -l -n -xx -U -i p1 -w p1.pcap 2>tcpdump_err], [tcpdump.pid])
++OVS_WAIT_UNTIL([grep "listening" tcpdump_err])
+ 
+-dnl The hex dump is an mpls packet encapsulating ethernet packet. pkt=eth/mpls/eth/ip/icmp
+-dnl The packet is sent from p0(at_ns0) interface directed to
+-dnl p1(at_ns1) interface
+-NS_CHECK_EXEC([at_ns0], [$PYTHON3 $srcdir/sendpkt.py p0 00 00 00 00 00 02 00 00 00 00 00 01 88 47 00 00 21 40 36 b1 ee 7c 01 02 36 b1 ee 7c 01 03 08 00 45 00 00 54 03 44 40 00 40 01 21 61 0a 01 01 01 0a 01 01 02 08 00 ef ac 7c e4 00 03 5b 2c 1f 61 00 00 00 00 50 0b 02 00 00 00 00 00 10 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 31 32 33 34 35 36 37  > /dev/null])
++m4_define([MPLS_HEADER], [m4_join([,],
++  [eth_src=00:00:00:00:00:01,eth_dst=00:00:00:00:00:02,eth_type=0x8847],
++  [mpls_label=2,mpls_ttl=64,mpls_bos=1])])
+ 
+-dnl Check the expected decapsulated on the egress interface
+-OVS_WAIT_UNTIL([cat p1.pcap | grep -E "0x0000:  *36b1 *ee7c *0102 *36b1 *ee7c *0103 *0800 *4500" 2>&1 1>/dev/null])
+-OVS_WAIT_UNTIL([cat p1.pcap | grep -E "0x0010:  *0054 *0344 *4000 *4001 *2161 *0a01 *0101 *0a01" 2>&1 1>/dev/null])
+-OVS_WAIT_UNTIL([cat p1.pcap | grep -E "0x0020:  *0102 *0800 *efac *7ce4 *0003 *5b2c *1f61 *0000" 2>&1 1>/dev/null])
+-OVS_WAIT_UNTIL([cat p1.pcap | grep -E "0x0030:  *0000 *500b *0200 *0000 *0000 *1011 *1213 *1415" 2>&1 1>/dev/null])
+-OVS_WAIT_UNTIL([cat p1.pcap | grep -E "0x0040:  *1617 *1819 *1a1b *1c1d *1e1f *2021 *2223 *2425" 2>&1 1>/dev/null])
+-OVS_WAIT_UNTIL([cat p1.pcap | grep -E "0x0050:  *2627 *2829 *2a2b *2c2d *2e2f *3031 *3233 *3435" 2>&1 1>/dev/null])
+-OVS_WAIT_UNTIL([cat p1.pcap | grep -E "0x0060:  *3637" 2>&1 1>/dev/null])
++m4_define([ICMP_PKT], [m4_join([,],
++  [eth_src=36:b1:ee:7c:01:03,eth_dst=36:b1:ee:7c:01:02,eth_type=0x0800],
++  [nw_src=10.1.1.1,nw_dst=10.1.1.2],
++  [nw_proto=1,nw_ttl=64,nw_frag=no],
++  [icmp_type=8,icmp_code=0])])
+ 
++dnl The packet is an eth/mpls/eth/ip/icmp sent from p0(at_ns0) interface
++dnl directed to p1(at_ns1) interface.
++NS_CHECK_EXEC([at_ns0], [$PYTHON3 $srcdir/sendpkt.py p0 \
++    "$(ovs-ofctl compose-packet --bare 'MPLS_HEADER')"  \
++    "$(ovs-ofctl compose-packet --bare 'ICMP_PKT')"],
++  [0], [ignore])
++
++dnl Check the expected decapsulated on the egress interface.
++OVS_WAIT_UNTIL([ovs-pcap p1.pcap | grep -q \
++    "^$(ovs-ofctl compose-packet --bare 'ICMP_PKT')\$"])
+ 
+ OVS_TRAFFIC_VSWITCHD_STOP
+ AT_CLEANUP
+@@ -2575,24 +2632,30 @@ dnl eth/mpls/eth/ip/icmp --> OVS --> eth/ip/icmp
+ 
+ AT_CHECK([ovs-ofctl -Oopenflow13 add-flow br0 "table=0,priority=100,dl_type=0x8847,mpls_label=2 actions=decap(),decap(packet_type(ns=0,type=0)),ovs-p1"])
+ 
+-rm -rf p1.pcap
+-NETNS_DAEMONIZE([at_ns1], [tcpdump -l -n -xx -U -i p1 > p1.pcap], [tcpdump.pid])
+-sleep 1
++NETNS_DAEMONIZE([at_ns1],
++  [tcpdump -l -n -xx -U -i p1 -w p1.pcap 2>tcpdump_err], [tcpdump.pid])
++OVS_WAIT_UNTIL([grep "listening" tcpdump_err])
+ 
+-dnl The hex dump is an mpls packet encapsulating ethernet packet. pkt=eth/mpls/eth/ip/icmp
+-dnl The packet is sent from p0(at_ns0) interface directed to
+-dnl p1(at_ns1) interface
+-NS_CHECK_EXEC([at_ns0], [$PYTHON3 $srcdir/sendpkt.py p0 00 00 00 00 00 02 00 00 00 00 00 01 88 47 00 00 21 40 36 b1 ee 7c 01 02 36 b1 ee 7c 01 03 08 00 45 00 00 54 03 44 40 00 40 01 21 61 0a 01 01 01 0a 01 01 02 08 00 ef ac 7c e4 00 03 5b 2c 1f 61 00 00 00 00 50 0b 02 00 00 00 00 00 10 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 31 32 33 34 35 36 37  > /dev/null])
++m4_define([MPLS_HEADER], [m4_join([,],
++  [eth_src=00:00:00:00:00:01,eth_dst=00:00:00:00:00:02,eth_type=0x8847],
++  [mpls_label=2,mpls_ttl=64,mpls_bos=1])])
++
++m4_define([ICMP_PKT], [m4_join([,],
++  [eth_src=36:b1:ee:7c:01:03,eth_dst=36:b1:ee:7c:01:02,eth_type=0x0800],
++  [nw_src=10.1.1.1,nw_dst=10.1.1.2],
++  [nw_proto=1,nw_ttl=64,nw_frag=no],
++  [icmp_type=8,icmp_code=0])])
+ 
+-dnl Check the expected decapsulated on the egress interface
+-OVS_WAIT_UNTIL([cat p1.pcap | grep -E "0x0000:  *36b1 *ee7c *0102 *36b1 *ee7c *0103 *0800 *4500" 2>&1 1>/dev/null])
+-OVS_WAIT_UNTIL([cat p1.pcap | grep -E "0x0010:  *0054 *0344 *4000 *4001 *2161 *0a01 *0101 *0a01" 2>&1 1>/dev/null])
+-OVS_WAIT_UNTIL([cat p1.pcap | grep -E "0x0020:  *0102 *0800 *efac *7ce4 *0003 *5b2c *1f61 *0000" 2>&1 1>/dev/null])
+-OVS_WAIT_UNTIL([cat p1.pcap | grep -E "0x0030:  *0000 *500b *0200 *0000 *0000 *1011 *1213 *1415" 2>&1 1>/dev/null])
+-OVS_WAIT_UNTIL([cat p1.pcap | grep -E "0x0040:  *1617 *1819 *1a1b *1c1d *1e1f *2021 *2223 *2425" 2>&1 1>/dev/null])
+-OVS_WAIT_UNTIL([cat p1.pcap | grep -E "0x0050:  *2627 *2829 *2a2b *2c2d *2e2f *3031 *3233 *3435" 2>&1 1>/dev/null])
+-OVS_WAIT_UNTIL([cat p1.pcap | grep -E "0x0060:  *3637" 2>&1 1>/dev/null])
++dnl The packet is an eth/mpls/eth/ip/icmp sent from p0(at_ns0) interface
++dnl directed to p1(at_ns1) interface.
++NS_CHECK_EXEC([at_ns0], [$PYTHON3 $srcdir/sendpkt.py p0 \
++    "$(ovs-ofctl compose-packet --bare 'MPLS_HEADER')"  \
++    "$(ovs-ofctl compose-packet --bare 'ICMP_PKT')"],
++  [0], [ignore])
+ 
++dnl Check the expected decapsulated on the egress interface.
++OVS_WAIT_UNTIL([ovs-pcap p1.pcap | grep -q \
++    "^$(ovs-ofctl compose-packet --bare 'ICMP_PKT')\$"])
+ 
+ OVS_TRAFFIC_VSWITCHD_STOP
+ AT_CLEANUP
+@@ -3103,7 +3166,10 @@ AT_CHECK([ovs-appctl dpctl/dump-conntrack | FORMAT_CT(10.1.1.2)], [0], [dnl
+ icmp,orig=(src=10.1.1.1,dst=10.1.1.2,id=<cleared>,type=8,code=0),reply=(src=10.1.1.2,dst=10.1.1.1,id=<cleared>,type=0,code=0)
+ ])
+ 
+-AT_CHECK([ovs-appctl dpctl/flush-conntrack])
++AT_CHECK([ovs-appctl dpctl/flush-conntrack 'ct_nw_src=10.1.1.1,ct_nw_dst=10.1.1.2'])
++
++AT_CHECK([ovs-appctl dpctl/dump-conntrack | FORMAT_CT(10.1.1.2)], [0], [dnl
++])
+ 
+ dnl Pings from ns1->ns0 should fail.
+ NS_CHECK_EXEC([at_ns1], [ping -q -c 3 -i 0.3 -w 2 10.1.1.1 | FORMAT_PING], [0], [dnl
+@@ -3244,6 +3310,11 @@ AT_CHECK([ovs-appctl dpctl/dump-conntrack | FORMAT_CT(fc00::2)], [0], [dnl
+ icmpv6,orig=(src=fc00::1,dst=fc00::2,id=<cleared>,type=128,code=0),reply=(src=fc00::2,dst=fc00::1,id=<cleared>,type=129,code=0)
+ ])
+ 
++AT_CHECK([ovs-appctl dpctl/flush-conntrack 'ct_ipv6_src=fc00::1,ct_ipv6_dst=fc00::2'])
++
++AT_CHECK([ovs-appctl dpctl/dump-conntrack | FORMAT_CT(fc00::2)], [0], [dnl
++])
++
+ OVS_TRAFFIC_VSWITCHD_STOP
+ AT_CLEANUP
+ 
+@@ -6397,11 +6468,11 @@ ADD_NAMESPACES(at_ns0, at_ns1)
+ ADD_VETH(p0, at_ns0, br0, "10.1.1.1/24")
+ NS_CHECK_EXEC([at_ns0], [ip link set dev p0 address 80:88:88:88:88:88])
+ ADD_VETH(p1, at_ns1, br0, "10.1.1.2/24")
++NS_CHECK_EXEC([at_ns1], [ip link set dev p1 address 80:89:89:89:89:89])
+ 
+ dnl Allow any traffic from ns0->ns1. Only allow nd, return traffic from ns1->ns0.
+ AT_DATA([flows.txt], [dnl
+-in_port=1,tcp,action=ct(commit,zone=1,nat(src=10.1.1.240:34568,random)),2
+-in_port=2,ct_state=-trk,tcp,tp_dst=34567,action=ct(table=0,zone=1,nat)
++in_port=1,tcp,action=ct(commit,zone=1,nat(src=10.1.1.240:34568)),2
+ in_port=2,ct_state=-trk,tcp,tp_dst=34568,action=ct(table=0,zone=1,nat)
+ in_port=2,ct_state=+trk,ct_zone=1,tcp,action=1
+ dnl
+@@ -6425,17 +6496,28 @@ AT_CHECK([ovs-ofctl --bundle add-flows br0 flows.txt])
+ 
+ dnl HTTP requests from p0->p1 should work fine.
+ OVS_START_L7([at_ns1], [http])
+-NS_CHECK_EXEC([at_ns0], [wget 10.1.1.2 -t 1 -T 1 --retry-connrefused -v -o wget0.log])
++
++dnl Send a valid SYN to make conntrack pick it up.
++dnl The source port used is 123 to prevent unwanted reuse in the next HTTP request.
++syn_pkt=$(ovs-ofctl compose-packet --bare "eth_src=80:88:88:88:88:88,eth_dst=80:89:89:89:89:89,\
++  dl_type=0x0800,nw_src=10.1.1.1,nw_dst=10.1.1.2,nw_proto=6,nw_ttl=64,nw_frag=no,tcp_flags=syn,\
++  tcp_src=123,tcp_dst=80")
++AT_CHECK([ovs-ofctl packet-out br0 "packet=${syn_pkt} actions=ct(commit,zone=1,nat(src=10.1.1.240:34568))"])
++
++AT_CHECK([ovs-appctl dpctl/dump-conntrack | FORMAT_CT(10.1.1.2) | uniq], [0], [dnl
++tcp,orig=(src=10.1.1.1,dst=10.1.1.2,sport=<cleared>,dport=<cleared>),reply=(src=10.1.1.2,dst=10.1.1.240,sport=<cleared>,dport=<cleared>),zone=1,protoinfo=(state=<cleared>)
++])
+ 
+ NS_CHECK_EXEC([at_ns0], [wget 10.1.1.2 -t 1 -T 1 --retry-connrefused -v -o wget0.log], [4])
+ 
+-AT_CHECK([ovs-appctl dpctl/dump-conntrack | FORMAT_CT(10.1.1.2) | sed -e 's/dst=10.1.1.2[[45]][[0-9]]/dst=10.1.1.2XX/' | uniq], [0], [dnl
+-tcp,orig=(src=10.1.1.1,dst=10.1.1.2,sport=<cleared>,dport=<cleared>),reply=(src=10.1.1.2,dst=10.1.1.2XX,sport=<cleared>,dport=<cleared>),zone=1,protoinfo=(state=<cleared>)
++AT_CHECK([ovs-appctl dpctl/dump-conntrack | FORMAT_CT(10.1.1.2) | uniq], [0], [dnl
++tcp,orig=(src=10.1.1.1,dst=10.1.1.2,sport=<cleared>,dport=<cleared>),reply=(src=10.1.1.2,dst=10.1.1.240,sport=<cleared>,dport=<cleared>),zone=1,protoinfo=(state=<cleared>)
+ ])
+ 
+ OVS_TRAFFIC_VSWITCHD_STOP(["dnl
+ /Unable to NAT due to tuple space exhaustion - if DoS attack, use firewalling and\/or zone partitioning./d
+-/Dropped .* log messages in last .* seconds \(most recently, .* seconds ago\) due to excessive rate/d"])
++/Dropped .* log messages in last .* seconds \(most recently, .* seconds ago\) due to excessive rate/d
++/|WARN|.* execute ct.* failed/d"])
+ AT_CLEANUP
+ 
+ AT_SETUP([conntrack - more complex SNAT])
+@@ -6850,6 +6932,12 @@ dnl Checks the implementation of conntrack with FTP ALGs in combination with
+ dnl NAT, using the provided flow table.
+ m4_define([CHECK_FTP_NAT],
+    [AT_SETUP([conntrack - FTP $1])
++    m4_if(m4_index([$1], [orig tuple]), -1, [], [
++      dnl XXX: 6.8.0-1014-azure #16~22.04.1-Ubuntu kernel in GitHub Actions
++      dnl contains a known conntrack bug, but doesn't have a fix for it:
++      dnl   a23ac973f67f ("openvswitch: get related ct labels from its master
++      dnl                  if it is not confirmed")
++      OVS_CHECK_GITHUB_ACTION()])
+     AT_SKIP_IF([test $HAVE_FTP = no])
+     AT_SKIP_IF([test $HAVE_LFTP = no])
+     CHECK_CONNTRACK()
+@@ -8215,10 +8303,18 @@ table=2,priority=10  ct_state=+trk+est action=drop
+ 
+ AT_CHECK([ovs-ofctl --bundle add-flows br0 flows.txt])
+ 
+-# sending icmp pkts, first and second
+-NS_CHECK_EXEC([at_ns0], [$PYTHON3 $srcdir/sendpkt.py p0 f0 00 00 01 01 02 f0 00 00 01 01 01 08 00 45 00 00 1c 00 01 00 00 40 01 64 dc 0a 01 01 01 0a 01 01 02 08 00 f7 ff ff ff ff ff > /dev/null])
++m4_define([ICMP_PKT], [m4_join([,],
++  [eth_src=f0:00:00:01:01:01,eth_dst=f0:00:00:01:01:02,eth_type=0x0800],
++  [nw_src=10.1.1.1,nw_dst=10.1.1.2],
++  [nw_proto=1,nw_ttl=64,nw_frag=no],
++  [icmp_type=8,icmp_code=0])])
++
++# Sending ICMP packets, first and second.
++NS_CHECK_EXEC([at_ns0], [$PYTHON3 $srcdir/sendpkt.py p0 \
++    $(ovs-ofctl compose-packet --bare 'ICMP_PKT' '')], [0], [ignore])
+ 
+-NS_CHECK_EXEC([at_ns0], [$PYTHON3 $srcdir/sendpkt.py p0 f0 00 00 01 01 02 f0 00 00 01 01 01 08 00 45 00 00 1c 00 01 00 00 40 01 64 dc 0a 01 01 01 0a 01 01 02 08 00 f7 ff ff ff ff ff > /dev/null])
++NS_CHECK_EXEC([at_ns0], [$PYTHON3 $srcdir/sendpkt.py p0 \
++    $(ovs-ofctl compose-packet --bare 'ICMP_PKT' '')], [0], [ignore])
+ 
+ sleep 1
+ 
+@@ -8389,6 +8485,53 @@ AT_CHECK([ovs-pcap client.pcap | grep 000000002010000000002000], [0], [dnl
+ OVS_TRAFFIC_VSWITCHD_STOP
+ AT_CLEANUP
+ 
++AT_SETUP([conntrack - Flush many conntrack entries by port])
++CHECK_CONNTRACK()
++OVS_TRAFFIC_VSWITCHD_START()
++
++ADD_NAMESPACES(at_ns0, at_ns1)
++
++ADD_VETH(p0, at_ns0, br0, "10.1.1.1/24")
++ADD_VETH(p1, at_ns1, br0, "10.1.1.2/24")
++
++AT_DATA([flows.txt], [dnl
++priority=100,in_port=1,udp,action=ct(zone=1,commit),2
++])
++
++AT_CHECK([ovs-ofctl --bundle add-flows br0 flows.txt])
++
++dnl 20 packets from port 1 and 1 packet from port 2.
++flow_l3="\
++    eth_src=50:54:00:00:00:09,eth_dst=50:54:00:00:00:0a,dl_type=0x0800,\
++    nw_src=10.1.1.1,nw_dst=10.1.1.2,nw_proto=17,nw_ttl=64,nw_frag=no"
++
++for i in $(seq 1 20); do
++    frame=$(ovs-ofctl compose-packet --bare "$flow_l3, udp_src=1,udp_dst=$i")
++    AT_CHECK([ovs-ofctl -O OpenFlow13 packet-out br0 "in_port=1 packet=$frame actions=resubmit(,0)"])
++done
++frame=$(ovs-ofctl compose-packet --bare "$flow_l3, udp_src=2,udp_dst=1")
++AT_CHECK([ovs-ofctl -O OpenFlow13 packet-out br0 "in_port=1 packet=$frame actions=resubmit(,0)"])
++
++: > conntrack
++
++for i in $(seq 1 20); do
++    echo "udp,orig=(src=10.1.1.1,dst=10.1.1.2,sport=1,dport=${i}),reply=(src=10.1.1.2,dst=10.1.1.1,sport=${i},dport=1),zone=1" >> conntrack
++done
++echo "udp,orig=(src=10.1.1.1,dst=10.1.1.2,sport=2,dport=1),reply=(src=10.1.1.2,dst=10.1.1.1,sport=1,dport=2),zone=1" >> conntrack
++
++sort conntrack > expout
++
++AT_CHECK([ovs-appctl dpctl/dump-conntrack zone=1 | grep -F "src=10.1.1.1," | sort ], [0], [expout])
++
++dnl Check that flushing conntrack by port 1 flush all ct for port 1 but keeps ct for port 2.
++AT_CHECK([ovs-appctl dpctl/flush-conntrack zone=1 'ct_nw_proto=17,ct_tp_src=1'])
++AT_CHECK([ovs-appctl dpctl/dump-conntrack zone=1 | grep -F "src=10.1.1.1," | sort ], [0], [dnl
++udp,orig=(src=10.1.1.1,dst=10.1.1.2,sport=2,dport=1),reply=(src=10.1.1.2,dst=10.1.1.1,sport=1,dport=2),zone=1
++])
++
++OVS_TRAFFIC_VSWITCHD_STOP
++AT_CLEANUP
++
+ AT_BANNER([IGMP])
+ 
+ AT_SETUP([IGMP - flood under normal action])
+@@ -8724,21 +8867,29 @@ dnl The flow will encap a nsh header to the TCP syn packet
+ dnl eth/ip/tcp --> OVS --> eth/nsh/eth/ip/tcp
+ AT_CHECK([ovs-ofctl -Oopenflow13 add-flow br0 "table=0,priority=100,in_port=ovs-p0,ip,actions=encap(nsh(md_type=1)),set_field:0x1234->nsh_spi,set_field:0x11223344->nsh_c1,encap(ethernet),set_field:f2:ff:00:00:00:02->dl_dst,set_field:f2:ff:00:00:00:01->dl_src,ovs-p1"])
+ 
+-NETNS_DAEMONIZE([at_ns1], [tcpdump -l -n -xx -U -i p1 > p1.pcap], [tcpdump.pid])
+-sleep 1
++NETNS_DAEMONIZE([at_ns1],
++  [tcpdump -l -n -xx -U -i p1 -w p1.pcap 2>tcpdump_err], [tcpdump.pid])
++OVS_WAIT_UNTIL([grep "listening" tcpdump_err])
+ 
+-dnl The hex dump is a TCP syn packet. pkt=eth/ip/tcp
+-dnl The packet is sent from p0(at_ns0) interface directed to
+-dnl p1(at_ns1) interface
+-NS_CHECK_EXEC([at_ns0], [$PYTHON3 $srcdir/sendpkt.py p0 f2 00 00 00 00 02 f2 00 00 00 00 01 08 00 45 00 00 28 00 01 00 00 40 06 b0 13 c0 a8 00 0a 0a 00 00 0a 04 00 08 00 00 00 00 c8 00 00 00 00 50 02 20 00 b8 5e 00 00 > /dev/null])
++m4_define([TCP_SYN_PKT], [m4_join([,],
++  [eth_src=f2:00:00:00:00:01,eth_dst=f2:00:00:00:00:02,eth_type=0x0800],
++  [nw_src=192.168.0.10,nw_dst=10.0.0.10],
++  [nw_proto=6,nw_ttl=64,nw_frag=no],
++  [tcp_src=1024,tcp_dst=2048,tcp_flags=syn])])
++
++dnl Send the TCP SYN packet from p0(at_ns0) interface directed to
++dnl p1(at_ns1) interface.
++NS_CHECK_EXEC([at_ns0], [$PYTHON3 $srcdir/sendpkt.py p0 \
++    $(ovs-ofctl compose-packet --bare 'TCP_SYN_PKT')], [0], [ignore])
++
++m4_define([NSH_HEADER], [m4_join([,],
++  [eth_src=f2:ff:00:00:00:01,eth_dst=f2:ff:00:00:00:02,eth_type=0x894f],
++  [nsh_ttl=63,nsh_np=3,nsh_spi=0x1234,nsh_si=255],
++  [nsh_mdtype=1,nsh_c1=0x11223344])])
+ 
+-dnl Check the expected nsh encapsulated packet on the egress interface
+-OVS_WAIT_UNTIL([cat p1.pcap | grep -E "0x0000: *f2ff *0000 *0002 *f2ff *0000 *0001 *894f *0fc6" 2>&1 1>/dev/null])
+-OVS_WAIT_UNTIL([cat p1.pcap | grep -E "0x0010: *0103 *0012 *34ff *1122 *3344 *0000 *0000 *0000" 2>&1 1>/dev/null])
+-OVS_WAIT_UNTIL([cat p1.pcap | grep -E "0x0020: *0000 *0000 *0000 *f200 *0000 *0002 *f200 *0000" 2>&1 1>/dev/null])
+-OVS_WAIT_UNTIL([cat p1.pcap | grep -E "0x0030: *0001 *0800 *4500 *0028 *0001 *0000 *4006 *b013" 2>&1 1>/dev/null])
+-OVS_WAIT_UNTIL([cat p1.pcap | grep -E "0x0040: *c0a8 *000a *0a00 *000a *0400 *0800 *0000 *00c8" 2>&1 1>/dev/null])
+-OVS_WAIT_UNTIL([cat p1.pcap | grep -E "0x0050: *0000 *0000 *5002 *2000 *b85e *0000" 2>&1 1>/dev/null])
++OVS_WAIT_UNTIL([ovs-pcap p1.pcap | grep -q "m4_join([], [^],
++    $(ovs-ofctl compose-packet --bare 'NSH_HEADER'),
++    $(ovs-ofctl compose-packet --bare 'TCP_SYN_PKT'), [\$])"])
+ 
+ OVS_TRAFFIC_VSWITCHD_STOP
+ AT_CLEANUP
+@@ -8756,19 +8907,31 @@ dnl The flow will decap a nsh header which in turn carries a TCP syn packet
+ dnl eth/nsh/eth/ip/tcp --> OVS --> eth/ip/tcp
+ AT_CHECK([ovs-ofctl -Oopenflow13 add-flow br0 "table=0,priority=100,in_port=ovs-p0,dl_type=0x894f, actions=decap(),decap(), ovs-p1"])
+ 
+-NETNS_DAEMONIZE([at_ns1], [tcpdump -l -n -xx -U -i p1 > p1.pcap], [tcpdump.pid])
+-sleep 1
++NETNS_DAEMONIZE([at_ns1],
++  [tcpdump -l -n -xx -U -i p1 -w p1.pcap 2>tcpdump_err], [tcpdump.pid])
++OVS_WAIT_UNTIL([grep "listening" tcpdump_err])
+ 
+-dnl The hex dump is NSH packet with TCP syn payload. pkt=eth/nsh/eth/ip/tcp
+-dnl The packet is sent from p0(at_ns0) interface directed to
+-dnl p1(at_ns1) interface
+-NS_CHECK_EXEC([at_ns0], [$PYTHON3 $srcdir/sendpkt.py p0 f2 ff 00 00 00 02 f2 ff 00 00 00 01 89 4f 02 06 01 03 00 00 64 03 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f 10 f2 00 00 00 00 02 f2 00 00 00 00 01 08 00 45 00 00 28 00 01 00 00 40 06 b0 13 c0 a8 00 0a 0a 00 00 0a 04 00 08 00 00 00 00 c8 00 00 00 00 50 02 20 00 b8 5e 00 00 > /dev/null])
++m4_define([TCP_SYN_PKT], [m4_join([,],
++  [eth_src=f2:00:00:00:00:01,eth_dst=f2:00:00:00:00:02,eth_type=0x0800],
++  [nw_src=192.168.0.10,nw_dst=10.0.0.10],
++  [nw_proto=6,nw_ttl=64,nw_frag=no],
++  [tcp_src=1024,tcp_dst=2048,tcp_flags=syn])])
++
++m4_define([NSH_HEADER], [m4_join([,],
++  [eth_src=f2:ff:00:00:00:01,eth_dst=f2:ff:00:00:00:02,eth_type=0x894f],
++  [nsh_ttl=63,nsh_np=3,nsh_spi=0x1234,nsh_si=255],
++  [nsh_mdtype=1,nsh_c1=0x11223344])])
++
++dnl Send the NSH packet with TCP SYN payload from p0(at_ns0) interface directed
++dnl to p1(at_ns1) interface.
++NS_CHECK_EXEC([at_ns0], [$PYTHON3 $srcdir/sendpkt.py p0 \
++    "$(ovs-ofctl compose-packet --bare 'NSH_HEADER')" \
++    "$(ovs-ofctl compose-packet --bare 'TCP_SYN_PKT')"],
++  [0], [ignore])
+ 
+ dnl Check the expected de-capsulated TCP packet on the egress interface
+-OVS_WAIT_UNTIL([cat p1.pcap | grep -E "0x0000: *f200 *0000 *0002 *f200 *0000 *0001 *0800 *4500" 2>&1 1>/dev/null])
+-OVS_WAIT_UNTIL([cat p1.pcap | grep -E "0x0010: *0028 *0001 *0000 *4006 *b013 *c0a8 *000a *0a00" 2>&1 1>/dev/null])
+-OVS_WAIT_UNTIL([cat p1.pcap | grep -E "0x0020: *000a *0400 *0800 *0000 *00c8 *0000 *0000 *5002" 2>&1 1>/dev/null])
+-OVS_WAIT_UNTIL([cat p1.pcap | grep -E "0x0030: *2000 *b85e *0000" 2>&1 1>/dev/null])
++OVS_WAIT_UNTIL([ovs-pcap p1.pcap | grep -q \
++    "^$(ovs-ofctl compose-packet --bare 'TCP_SYN_PKT')\$"])
+ 
+ OVS_TRAFFIC_VSWITCHD_STOP
+ AT_CLEANUP
+@@ -8788,22 +8951,38 @@ dnl The flow will add another NSH header with nsh_spi=0x101, nsh_si=4,
+ dnl nsh_ttl=7 and change the md1 context
+ AT_CHECK([ovs-ofctl -Oopenflow13 add-flow br0 "table=0,priority=100,in_port=ovs-p0,dl_type=0x894f,nsh_spi=0x100,nsh_si=0x03,actions=decap(),decap(),encap(nsh(md_type=1)),set_field:0x07->nsh_ttl,set_field:0x0101->nsh_spi,set_field:0x04->nsh_si,set_field:0x100f0e0d->nsh_c1,set_field:0x0c0b0a09->nsh_c2,set_field:0x08070605->nsh_c3,set_field:0x04030201->nsh_c4,encap(ethernet),set_field:f2:ff:00:00:00:02->dl_dst,set_field:f2:ff:00:00:00:01->dl_src,ovs-p1"])
+ 
+-NETNS_DAEMONIZE([at_ns1], [tcpdump -l -n -xx -U -i p1 > p1.pcap], [tcpdump.pid])
+-sleep 1
++NETNS_DAEMONIZE([at_ns1],
++  [tcpdump -l -n -xx -U -i p1 -w p1.pcap 2>tcpdump_err], [tcpdump.pid])
++OVS_WAIT_UNTIL([grep "listening" tcpdump_err])
+ 
+-dnl The hex dump is NSH packet with TCP syn payload. pkt=eth/nsh/eth/ip/tcp
+-dnl The nsh_ttl is 8, nsh_spi is 0x100 and nsh_si is 3
+-dnl The packet is sent from p0(at_ns0) interface directed to
+-dnl p1(at_ns1) interface
+-NS_CHECK_EXEC([at_ns0], [$PYTHON3 $srcdir/sendpkt.py p0 f2 ff 00 00 00 02 f2 ff 00 00 00 01 89 4f 02 06 01 03 00 01 00 03 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f 10 f2 00 00 00 00 02 f2 00 00 00 00 01 08 00 45 00 00 28 00 01 00 00 40 06 b0 13 c0 a8 00 0a 0a 00 00 0a 04 00 08 00 00 00 00 c8 00 00 00 00 50 02 20 00 b8 5e 00 00 > /dev/null])
++m4_define([TCP_SYN_PKT], [m4_join([,],
++  [eth_src=f2:00:00:00:00:01,eth_dst=f2:00:00:00:00:02,eth_type=0x0800],
++  [nw_src=192.168.0.10,nw_dst=10.0.0.10],
++  [nw_proto=6,nw_ttl=64,nw_frag=no],
++  [tcp_src=1024,tcp_dst=2048,tcp_flags=syn])])
++
++m4_define([NSH_HEADER_1], [m4_join([,],
++  [eth_src=f2:ff:00:00:00:01,eth_dst=f2:ff:00:00:00:02,eth_type=0x894f],
++  [nsh_ttl=8,nsh_np=3,nsh_spi=0x100,nsh_si=3,nsh_mdtype=1],
++  [nsh_c1=0x01020304,nsh_c2=0x05060708,nsh_c3=0x090a0b0c,nsh_c4=0x0d0e0f10])])
+ 
+-dnl Check the expected NSH packet with new fields in the header
+-OVS_WAIT_UNTIL([cat p1.pcap | grep -E "0x0000: *f2ff *0000 *0002 *f2ff *0000* 0001 *894f *01c6" 2>&1 1>/dev/null])
+-OVS_WAIT_UNTIL([cat p1.pcap | grep -E "0x0010: *0103 *0001 *0104 *100f *0e0d *0c0b *0a09 *0807" 2>&1 1>/dev/null])
+-OVS_WAIT_UNTIL([cat p1.pcap | grep -E "0x0020: *0605 *0403 *0201 *f200 *0000 *0002 *f200 *0000" 2>&1 1>/dev/null])
+-OVS_WAIT_UNTIL([cat p1.pcap | grep -E "0x0030: *0001 *0800 *4500 *0028 *0001 *0000 *4006 *b013" 2>&1 1>/dev/null])
+-OVS_WAIT_UNTIL([cat p1.pcap | grep -E "0x0040: *c0a8 *000a *0a00 *000a *0400 *0800 *0000 *00c8" 2>&1 1>/dev/null])
+-OVS_WAIT_UNTIL([cat p1.pcap | grep -E "0x0050: *0000 *0000 *5002 *2000 *b85e *0000" 2>&1 1>/dev/null])
++dnl Send the NSH packet with TCP SYN payload from p0(at_ns0) interface directed
++dnl to p1(at_ns1) interface.
++dnl The nsh_ttl is 8, nsh_spi is 0x100 and nsh_si is 3.
++NS_CHECK_EXEC([at_ns0], [$PYTHON3 $srcdir/sendpkt.py p0 \
++    "$(ovs-ofctl compose-packet --bare 'NSH_HEADER_1')" \
++    "$(ovs-ofctl compose-packet --bare 'TCP_SYN_PKT')"],
++  [0], [ignore])
++
++m4_define([NSH_HEADER_2], [m4_join([,],
++  [eth_src=f2:ff:00:00:00:01,eth_dst=f2:ff:00:00:00:02,eth_type=0x894f],
++  [nsh_ttl=7,nsh_np=3,nsh_spi=0x101,nsh_si=4,nsh_mdtype=1],
++  [nsh_c1=0x100f0e0d,nsh_c2=0x0c0b0a09,nsh_c3=0x08070605,nsh_c4=0x04030201])])
++
++dnl Check the expected NSH packet with new fields in the header.
++OVS_WAIT_UNTIL([ovs-pcap p1.pcap | grep -q "m4_join([], [^],
++    $(ovs-ofctl compose-packet --bare 'NSH_HEADER_2'),
++    $(ovs-ofctl compose-packet --bare 'TCP_SYN_PKT'), [\$])"])
+ 
+ OVS_TRAFFIC_VSWITCHD_STOP
+ AT_CLEANUP
+@@ -8824,31 +9003,50 @@ dnl packet to to at_ns2.
+ AT_CHECK([ovs-ofctl -Oopenflow13 add-flow br0 "table=0,priority=100,dl_type=0x894f,nsh_spi=0x100,nsh_si=0x02,actions=ovs-p1"])
+ AT_CHECK([ovs-ofctl -Oopenflow13 add-flow br0 "table=0,priority=100,dl_type=0x894f,nsh_spi=0x100,nsh_si=0x01,actions=ovs-p2"])
+ 
+-NETNS_DAEMONIZE([at_ns1], [tcpdump -l -n -xx -U -i p1 > p1.pcap], [tcpdump.pid])
+-NETNS_DAEMONIZE([at_ns2], [tcpdump -l -n -xx -U -i p2 > p2.pcap], [tcpdump2.pid])
+-sleep 1
+-
+-dnl First send packet from at_ns0 --> OVS with SPI=0x100 and SI=2
+-NS_CHECK_EXEC([at_ns0], [$PYTHON3 $srcdir/sendpkt.py p0 f2 ff 00 00 00 02 f2 ff 00 00 00 01 89 4f 02 06 01 03 00 01 00 02 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f 10 f2 00 00 00 00 02 f2 00 00 00 00 01 08 00 45 00 00 28 00 01 00 00 40 06 b0 13 c0 a8 00 0a 0a 00 00 0a 04 00 08 00 00 00 00 c8 00 00 00 00 50 02 20 00 b8 5e 00 00 > /dev/null])
+-
+-dnl Check for the above packet on p1 interface
+-OVS_WAIT_UNTIL([cat p1.pcap | grep -E "0x0000: *f2ff *0000 *0002 *f2ff *0000 *0001 *894f *0206" 2>&1 1>/dev/null])
+-OVS_WAIT_UNTIL([cat p1.pcap | grep -E "0x0010: *0103 *0001 *0002 *0102 *0304 *0506 *0708 *090a" 2>&1 1>/dev/null])
+-OVS_WAIT_UNTIL([cat p1.pcap | grep -E "0x0020: *0b0c *0d0e *0f10 *f200 *0000 *0002 *f200 *0000" 2>&1 1>/dev/null])
+-OVS_WAIT_UNTIL([cat p1.pcap | grep -E "0x0030: *0001 *0800 *4500 *0028 *0001 *0000 *4006 *b013" 2>&1 1>/dev/null])
+-OVS_WAIT_UNTIL([cat p1.pcap | grep -E "0x0040: *c0a8 *000a *0a00 *000a *0400 *0800 *0000 *00c8" 2>&1 1>/dev/null])
+-OVS_WAIT_UNTIL([cat p1.pcap | grep -E "0x0050: *0000 *0000 *5002 *2000 *b85e *0000" 2>&1 1>/dev/null])
+-
+-dnl Send the second packet from at_ns1 --> OVS with SPI=0x100 and SI=1
+-NS_CHECK_EXEC([at_ns1], [$PYTHON3 $srcdir/sendpkt.py p1 f2 ff 00 00 00 02 f2 ff 00 00 00 01 89 4f 01 c6 01 03 00 01 00 01 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f 10 f2 00 00 00 00 02 f2 00 00 00 00 01 08 00 45 00 00 28 00 01 00 00 40 06 b0 13 c0 a8 00 0a 0a 00 00 0a 04 00 08 00 00 00 00 c8 00 00 00 00 50 02 20 00 b8 5e 00 00 > /dev/null])
+-
+-dnl Check for the above packet on p2 interface
+-OVS_WAIT_UNTIL([cat p2.pcap | grep -E "0x0000: *f2ff *0000 *0002 *f2ff *0000 *0001 *894f *01c6" 2>&1 1>/dev/null])
+-OVS_WAIT_UNTIL([cat p2.pcap | grep -E "0x0010: *0103 *0001 *0001 *0102 *0304 *0506 *0708 *090a" 2>&1 1>/dev/null])
+-OVS_WAIT_UNTIL([cat p2.pcap | grep -E "0x0020: *0b0c *0d0e *0f10 *f200 *0000 *0002 *f200 *0000" 2>&1 1>/dev/null])
+-OVS_WAIT_UNTIL([cat p2.pcap | grep -E "0x0030: *0001 *0800 *4500 *0028 *0001 *0000 *4006 *b013" 2>&1 1>/dev/null])
+-OVS_WAIT_UNTIL([cat p2.pcap | grep -E "0x0040: *c0a8 *000a *0a00 *000a *0400 *0800 *0000 *00c8" 2>&1 1>/dev/null])
+-OVS_WAIT_UNTIL([cat p2.pcap | grep -E "0x0050: *0000 *0000 *5002 *2000 *b85e *0000" 2>&1 1>/dev/null])
++NETNS_DAEMONIZE([at_ns1],
++  [tcpdump -l -n -xx -U -i p1 -w p1.pcap 2>tcpdump_err], [tcpdump.pid])
++OVS_WAIT_UNTIL([grep "listening" tcpdump_err])
++NETNS_DAEMONIZE([at_ns2],
++  [tcpdump -l -n -xx -U -i p2 -w p2.pcap 2>tcpdump2_err], [tcpdump2.pid])
++OVS_WAIT_UNTIL([grep "listening" tcpdump2_err])
++
++m4_define([TCP_SYN_PKT], [m4_join([,],
++  [eth_src=f2:00:00:00:00:01,eth_dst=f2:00:00:00:00:02,eth_type=0x0800],
++  [nw_src=192.168.0.10,nw_dst=10.0.0.10],
++  [nw_proto=6,nw_ttl=64,nw_frag=no],
++  [tcp_src=1024,tcp_dst=2048,tcp_flags=syn])])
++
++dnl First send packet from at_ns0 --> OVS with SPI=0x100 and SI=2.
++m4_define([NSH_HEADER_1], [m4_join([,],
++  [eth_src=f2:ff:00:00:00:01,eth_dst=f2:ff:00:00:00:02,eth_type=0x894f],
++  [nsh_ttl=8,nsh_np=3,nsh_spi=0x100,nsh_si=2,nsh_mdtype=1],
++  [nsh_c1=0x01020304,nsh_c2=0x05060708,nsh_c3=0x090a0b0c,nsh_c4=0x0d0e0f10])])
++
++NS_CHECK_EXEC([at_ns0], [$PYTHON3 $srcdir/sendpkt.py p0 \
++    "$(ovs-ofctl compose-packet --bare 'NSH_HEADER_1')" \
++    "$(ovs-ofctl compose-packet --bare 'TCP_SYN_PKT')"],
++  [0], [ignore])
++
++dnl Check for the above packet on p1 interface.
++OVS_WAIT_UNTIL([ovs-pcap p1.pcap | grep -q "m4_join([], [^],
++    $(ovs-ofctl compose-packet --bare 'NSH_HEADER_1'),
++    $(ovs-ofctl compose-packet --bare 'TCP_SYN_PKT'), [\$])"])
++
++dnl Send the second packet from at_ns1 --> OVS with SPI=0x100 and SI=1.
++m4_define([NSH_HEADER_2], [m4_join([,],
++  [eth_src=f2:ff:00:00:00:01,eth_dst=f2:ff:00:00:00:02,eth_type=0x894f],
++  [nsh_ttl=8,nsh_np=3,nsh_spi=0x100,nsh_si=1,nsh_mdtype=1],
++  [nsh_c1=0x01020304,nsh_c2=0x05060708,nsh_c3=0x090a0b0c,nsh_c4=0x0d0e0f10])])
++
++NS_CHECK_EXEC([at_ns1], [$PYTHON3 $srcdir/sendpkt.py p1 \
++    "$(ovs-ofctl compose-packet --bare 'NSH_HEADER_2')" \
++    "$(ovs-ofctl compose-packet --bare 'TCP_SYN_PKT')"],
++  [0], [ignore])
++
++dnl Check for the above packet on p2 interface.
++OVS_WAIT_UNTIL([ovs-pcap p2.pcap | grep -q "m4_join([], [^],
++    $(ovs-ofctl compose-packet --bare 'NSH_HEADER_2'),
++    $(ovs-ofctl compose-packet --bare 'TCP_SYN_PKT'), [\$])"])
+ 
+ OVS_TRAFFIC_VSWITCHD_STOP
+ AT_CLEANUP
+diff --git a/tests/test-ovsdb.c b/tests/test-ovsdb.c
+index c4ab899d45..710341b655 100644
+--- a/tests/test-ovsdb.c
++++ b/tests/test-ovsdb.c
+@@ -2023,6 +2023,24 @@ print_idl_row_updated_link2(const struct idltest_link2 *l2, int step)
+     }
+ }
+ 
++static void
++print_idl_row_updated_indexed(const struct idltest_indexed *ind, int step)
++{
++    struct ds updates = DS_EMPTY_INITIALIZER;
++
++    for (size_t i = 0; i < IDLTEST_INDEXED_N_COLUMNS; i++) {
++        if (idltest_indexed_is_updated(ind, i)) {
++            ds_put_format(&updates, " %s", idltest_indexed_columns[i].name);
++        }
++    }
++    if (updates.length) {
++        print_and_log("%03d: table %s: updated columns:%s",
++                      step, ind->header_.table->class_->name,
++                      ds_cstr(&updates));
++        ds_destroy(&updates);
++    }
++}
++
+ static void
+ print_idl_row_updated_simple3(const struct idltest_simple3 *s3, int step)
+ {
+@@ -2172,6 +2190,21 @@ print_idl_row_link2(const struct idltest_link2 *l2, int step, bool terse)
+     print_idl_row_updated_link2(l2, step);
+ }
+ 
++static void
++print_idl_row_indexed(const struct idltest_indexed *ind, int step, bool terse)
++{
++    struct ds msg = DS_EMPTY_INITIALIZER;
++
++    ds_put_format(&msg, "i=%"PRId64, ind->i);
++
++    char *row_msg = format_idl_row(&ind->header_, step, ds_cstr(&msg), terse);
++    print_and_log("%s", row_msg);
++    ds_destroy(&msg);
++    free(row_msg);
++
++    print_idl_row_updated_indexed(ind, step);
++}
++
+ static void
+ print_idl_row_simple3(const struct idltest_simple3 *s3, int step, bool terse)
+ {
+@@ -2252,6 +2285,7 @@ print_idl_row_singleton(const struct idltest_singleton *sng, int step,
+ static void
+ print_idl(struct ovsdb_idl *idl, int step, bool terse)
+ {
++    const struct idltest_indexed *ind;
+     const struct idltest_simple3 *s3;
+     const struct idltest_simple4 *s4;
+     const struct idltest_simple6 *s6;
+@@ -2285,6 +2319,10 @@ print_idl(struct ovsdb_idl *idl, int step, bool terse)
+         print_idl_row_simple6(s6, step, terse);
+         n++;
+     }
++    IDLTEST_INDEXED_FOR_EACH (ind, idl) {
++        print_idl_row_indexed(ind, step, terse);
++        n++;
++    }
+     IDLTEST_SINGLETON_FOR_EACH (sng, idl) {
+         print_idl_row_singleton(sng, step, terse);
+         n++;
+@@ -2297,6 +2335,7 @@ print_idl(struct ovsdb_idl *idl, int step, bool terse)
+ static void
+ print_idl_track(struct ovsdb_idl *idl, int step, bool terse)
+ {
++    const struct idltest_indexed *ind;
+     const struct idltest_simple3 *s3;
+     const struct idltest_simple4 *s4;
+     const struct idltest_simple6 *s6;
+@@ -2329,6 +2368,10 @@ print_idl_track(struct ovsdb_idl *idl, int step, bool terse)
+         print_idl_row_simple6(s6, step, terse);
+         n++;
+     }
++    IDLTEST_INDEXED_FOR_EACH (ind, idl) {
++        print_idl_row_indexed(ind, step, terse);
++        n++;
++    }
+ 
+     if (!n) {
+         print_and_log("%03d: empty", step);
+@@ -2977,6 +3020,29 @@ do_idl_partial_update_map_column(struct ovs_cmdl_context *ctx)
+     printf("%03d: After trying to delete a deleted element\n", step++);
+     dump_simple2(idl, myRow, step++);
+ 
++    myTxn = ovsdb_idl_txn_create(idl);
++    myRow = idltest_simple2_insert(myTxn);
++    idltest_simple2_update_smap_setkey(myRow, "key3", "myList3");
++    idltest_simple2_set_name(myRow, "String2");
++    idltest_simple2_delete(myRow);
++    ovsdb_idl_txn_commit_block(myTxn);
++    ovsdb_idl_txn_destroy(myTxn);
++    ovsdb_idl_get_initial_snapshot(idl);
++    printf("%03d: After Create element, update smap and Delete element\n",
++           step++);
++    dump_simple2(idl, myRow, step++);
++
++    myTxn = ovsdb_idl_txn_create(idl);
++    myRow = idltest_simple2_first(idl);
++    idltest_simple2_update_smap_setkey(myRow, "key4", "myList4");
++    idltest_simple2_set_name(myRow, "String3");
++    idltest_simple2_delete(myRow);
++    ovsdb_idl_txn_commit_block(myTxn);
++    ovsdb_idl_txn_destroy(myTxn);
++    ovsdb_idl_get_initial_snapshot(idl);
++    printf("%03d: After update smap and Delete element\n", step++);
++    dump_simple2(idl, myRow, step++);
++
+     ovsdb_idl_destroy(idl);
+     printf("%03d: End test\n", step);
+ }
+@@ -3075,6 +3141,21 @@ do_idl_partial_update_set_column(struct ovs_cmdl_context *ctx)
+     ovsdb_idl_get_initial_snapshot(idl);
+     printf("%03d: After add to other table + set of strong ref\n", step++);
+     dump_simple3(idl, myRow, step++);
++
++    /* create row, insert key, delete row */
++    myTxn = ovsdb_idl_txn_create(idl);
++    myRow = idltest_simple3_insert(myTxn);
++    uuid_from_string(&uuid_to_add, "12345678-dd3f-4616-ab6a-83a490bb0991");
++    idltest_simple3_update_uset_addvalue(myRow, uuid_to_add);
++    idltest_simple3_set_name(myRow, "String2");
++    idltest_simple3_delete(myRow);
++    ovsdb_idl_txn_commit_block(myTxn);
++    ovsdb_idl_txn_destroy(myTxn);
++    ovsdb_idl_get_initial_snapshot(idl);
++    printf("%03d: After Create element, update set and Delete element\n",
++           step++);
++    dump_simple3(idl, myRow, step++);
++
+     ovsdb_idl_destroy(idl);
+     printf("%03d: End test\n", step);
+ }
+diff --git a/tests/test-ovsdb.py b/tests/test-ovsdb.py
+index 48f8ee2d70..67a45f044b 100644
+--- a/tests/test-ovsdb.py
++++ b/tests/test-ovsdb.py
+@@ -228,6 +228,10 @@ def get_link2_table_printable_row(row):
+     return s
+ 
+ 
++def get_indexed_table_printable_row(row):
++    return "i=%s" % row.i
++
++
+ def get_singleton_table_printable_row(row):
+     return "name=%s" % row.name
+ 
+@@ -307,6 +311,14 @@ def print_idl(idl, step, terse=False):
+                       terse)
+             n += 1
+ 
++    if "indexed" in idl.tables:
++        ind = idl.tables["indexed"].rows
++        for row in ind.values():
++            print_row("indexed", row, step,
++                      get_indexed_table_printable_row(row),
++                      terse)
++            n += 1
++
+     if "singleton" in idl.tables:
+         sng = idl.tables["singleton"].rows
+         for row in sng.values():
+@@ -434,7 +446,7 @@ def idl_set(idl, commands, step):
+                 sys.stderr.write('"set" command requires 2 argument\n')
+                 sys.exit(1)
+ 
+-            s = txn.insert(idl.tables["simple"], new_uuid=args[0],
++            s = txn.insert(idl.tables["simple"], new_uuid=uuid.UUID(args[0]),
+                            persist_uuid=True)
+             s.i = int(args[1])
+         elif name == "delete":
+@@ -690,6 +702,9 @@ def do_idl(schema_file, remote, *commands):
+     idl = ovs.db.idl.Idl(remote, schema_helper, leader_only=False)
+     if "simple3" in idl.tables:
+         idl.index_create("simple3", "simple3_by_name")
++    if "indexed" in idl.tables:
++        idx = idl.index_create("indexed", "indexed_by_i")
++        idx.add_column("i")
+ 
+     if commands:
+         remotes = remote.split(',')
+diff --git a/tests/test-util.c b/tests/test-util.c
+index 7d899fbbfd..5d88d38f26 100644
+--- a/tests/test-util.c
++++ b/tests/test-util.c
+@@ -1116,12 +1116,16 @@ test_snprintf(struct ovs_cmdl_context *ctx OVS_UNUSED)
+ {
+     char s[16];
+ 
++    /* GCC 7+ and Clang 18+ warn about the following calls that truncate
++     * a string using snprintf().  We're testing that truncation works
++     * properly, so temporarily disable the warning. */
+ #if __GNUC__ >= 7
+-    /* GCC 7+ warns about the following calls that truncate a string using
+-     * snprintf().  We're testing that truncation works properly, so
+-     * temporarily disable the warning. */
+ #pragma GCC diagnostic push
+ #pragma GCC diagnostic ignored "-Wformat-truncation"
++#endif
++#if __clang_major__ >= 18
++#pragma clang diagnostic push
++#pragma clang diagnostic ignored "-Wformat-truncation"
+ #endif
+     ovs_assert(snprintf(s, 4, "abcde") == 5);
+     ovs_assert(!strcmp(s, "abc"));
+@@ -1130,6 +1134,9 @@ test_snprintf(struct ovs_cmdl_context *ctx OVS_UNUSED)
+     ovs_assert(!strcmp(s, "abcd"));
+ #if __GNUC__ >= 7
+ #pragma GCC diagnostic pop
++#endif
++#if __clang_major__ >= 18
++#pragma clang diagnostic pop
+ #endif
+ 
+     ovs_assert(snprintf(s, 6, "abcde") == 5);
+diff --git a/tests/tunnel-push-pop-ipv6.at b/tests/tunnel-push-pop-ipv6.at
+index a8dd28c5b5..abf9b1d64c 100644
+--- a/tests/tunnel-push-pop-ipv6.at
++++ b/tests/tunnel-push-pop-ipv6.at
+@@ -19,11 +19,12 @@ AT_CHECK([ovs-vsctl add-port int-br3 t3 -- set Interface t3 type=srv6 \
+                        options:srv6_flowlabel=compute \
+                        ], [0])
+ 
+-dnl First setup dummy interface IP address, then add the route
+-dnl so that tnl-port table can get valid IP address for the device.
++dnl Setup dummy interface IP address.
+ AT_CHECK([ovs-appctl netdev-dummy/ip6addr br0 2001:cafe::88/24], [0], [OK
+ ])
+-AT_CHECK([ovs-appctl ovs/route/add 2001:cafe::0/24 br0], [0], [OK
++dnl Checking that a local routes for added IPs were successfully installed.
++AT_CHECK([ovs-appctl ovs/route/show | grep Cached], [0], [dnl
++Cached: 2001:ca00::/24 dev br0 SRC 2001:cafe::88 local
+ ])
+ AT_CHECK([ovs-appctl tnl/neigh/set br0 2001:cafe::91 aa:55:aa:55:00:01], [0], [OK
+ ])
+@@ -105,13 +106,15 @@ dummy@ovs-dummy: hit:0 missed:0
+     t2 2/6: (ip6gre: remote_ip=2001:cafe::92)
+ ])
+ 
+-dnl First setup dummy interface IP address, then add the route
+-dnl so that tnl-port table can get valid IP address for the device.
++dnl Setup dummy interface IP addresses.
+ AT_CHECK([ovs-appctl netdev-dummy/ip6addr br0 2001:cafe::88/24], [0], [OK
+ ])
+ AT_CHECK([ovs-appctl netdev-dummy/ip4addr br0 1.1.2.88/24], [0], [OK
+ ])
+-AT_CHECK([ovs-appctl ovs/route/add 2001:cafe::92/24 br0], [0], [OK
++dnl Checking that a local routes for added IPs were successfully installed.
++AT_CHECK([ovs-appctl ovs/route/show | grep Cached | sort], [0], [dnl
++Cached: 1.1.2.0/24 dev br0 SRC 1.1.2.88 local
++Cached: 2001:ca00::/24 dev br0 SRC 2001:cafe::88 local
+ ])
+ 
+ AT_CHECK([ovs-ofctl add-flow br0 action=normal])
+@@ -179,13 +182,15 @@ dummy@ovs-dummy: hit:0 missed:0
+     t3 3/6: (ip6erspan: erspan_dir=1, erspan_hwid=0x7, erspan_ver=2, key=567, remote_ip=2001:cafe::93)
+ ])
+ 
+-dnl First setup dummy interface IP address, then add the route
+-dnl so that tnl-port table can get valid IP address for the device.
++dnl Setup dummy interface IP addresses.
+ AT_CHECK([ovs-appctl netdev-dummy/ip6addr br0 2001:cafe::88/24], [0], [OK
+ ])
+ AT_CHECK([ovs-appctl netdev-dummy/ip4addr br0 1.1.2.88/24], [0], [OK
+ ])
+-AT_CHECK([ovs-appctl ovs/route/add 2001:cafe::92/24 br0], [0], [OK
++dnl Checking that a local routes for added IPs were successfully installed.
++AT_CHECK([ovs-appctl ovs/route/show | grep Cached | sort], [0], [dnl
++Cached: 1.1.2.0/24 dev br0 SRC 1.1.2.88 local
++Cached: 2001:ca00::/24 dev br0 SRC 2001:cafe::88 local
+ ])
+ 
+ AT_CHECK([ovs-ofctl add-flow br0 action=normal])
+@@ -316,14 +321,15 @@ srv6_sys (6) ref_cnt=1
+ vxlan_sys_4789 (4789) ref_cnt=2
+ ])
+ 
+-
+-dnl First setup dummy interface IP address, then add the route
+-dnl so that tnl-port table can get valid IP address for the device.
++dnl Setup dummy interface IP addresses.
+ AT_CHECK([ovs-appctl netdev-dummy/ip6addr br0 2001:cafe::88/24], [0], [OK
+ ])
+ AT_CHECK([ovs-appctl netdev-dummy/ip4addr br0 1.1.2.88/24], [0], [OK
+ ])
+-AT_CHECK([ovs-appctl ovs/route/add 2001:cafe::92/24 br0], [0], [OK
++dnl Checking that a local routes for added IPs were successfully installed.
++AT_CHECK([ovs-appctl ovs/route/show | grep Cached | sort], [0], [dnl
++Cached: 1.1.2.0/24 dev br0 SRC 1.1.2.88 local
++Cached: 2001:ca00::/24 dev br0 SRC 2001:cafe::88 local
+ ])
+ 
+ AT_CHECK([ovs-ofctl add-flow br0 action=normal])
+@@ -636,3 +642,260 @@ Listening ports:
+ 
+ OVS_VSWITCHD_STOP
+ AT_CLEANUP
++
++AT_SETUP([tunnel_push_pop_ipv6 - local_ip configuration])
++
++OVS_VSWITCHD_START(
++    [add-port br0 p0 \
++     -- set Interface p0 type=dummy ofport_request=1 \
++                         other-config:hwaddr=aa:55:aa:55:00:00])
++AT_CHECK([ovs-appctl vlog/set dpif_netdev:dbg])
++AT_CHECK([ovs-vsctl add-br int-br -- set bridge int-br datapath_type=dummy])
++AT_CHECK([ovs-vsctl add-port int-br t2 \
++          -- set Interface t2 type=geneve \
++                              options:local_ip=2001:beef::88 \
++                              options:remote_ip=2001:cafe::92 \
++                              options:key=123 ofport_request=2])
++
++dnl Setup multiple IP addresses.
++AT_CHECK([ovs-appctl netdev-dummy/ip6addr br0 2001:cafe::88/64], [0], [OK
++])
++AT_CHECK([ovs-appctl netdev-dummy/ip6addr br0 2001:beef::88/64], [0], [OK
++])
++dnl Checking that a local route for added IP was successfully installed.
++AT_CHECK([ovs-appctl ovs/route/show | grep Cached | sort], [0], [dnl
++Cached: 2001:beef::/64 dev br0 SRC 2001:beef::88 local
++Cached: 2001:cafe::/64 dev br0 SRC 2001:cafe::88 local
++])
++AT_CHECK([ovs-ofctl add-flow br0 action=normal])
++AT_CHECK([ovs-ofctl add-flow int-br action=normal])
++
++dnl This Neighbor Advertisement from p0 has two effects:
++dnl 1. The neighbor cache will learn that 2001:cafe::92 is at f8:bc:12:44:34:b6.
++dnl 2. The br0 mac learning will learn that f8:bc:12:44:34:b6 is on p0.
++AT_CHECK([ovs-appctl netdev-dummy/receive p0 dnl
++ 'recirc_id(0),in_port(1),dnl
++  eth(src=f8:bc:12:44:34:b6,dst=aa:55:aa:55:00:00),eth_type(0x86dd),dnl
++  ipv6(src=2001:cafe::92,dst=2001:cafe::88,label=0,proto=58,tclass=0,hlimit=255,frag=no),dnl
++  icmpv6(type=136,code=0),dnl
++  nd(target=2001:cafe::92,sll=00:00:00:00:00:00,tll=f8:bc:12:44:34:b6)'
++])
++
++dnl Check that local_ip is used for encapsulation in the trace.
++AT_CHECK([ovs-appctl ofproto/trace int-br in_port=LOCAL \
++                | grep -E 'tunnel|actions'], [0], [dnl
++     -> output to native tunnel
++     -> tunneling to 2001:cafe::92 via br0
++     -> tunneling from aa:55:aa:55:00:00 2001:beef::88 to f8:bc:12:44:34:b6 2001:cafe::92
++Datapath actions: tnl_push(tnl_port(6081),header(size=70,type=5,dnl
++eth(dst=f8:bc:12:44:34:b6,src=aa:55:aa:55:00:00,dl_type=0x86dd),dnl
++ipv6(src=2001:beef::88,dst=2001:cafe::92,label=0,proto=17,tclass=0x0,hlimit=64),dnl
++udp(src=0,dst=6081,csum=0xffff),geneve(vni=0x7b)),out_port(100)),1
++])
++
++dnl Now check that the packet actually has the local_ip in the header.
++AT_CHECK([ovs-vsctl -- set Interface p0 options:tx_pcap=p0.pcap])
++
++packet=50540000000a5054000000091234
++eth=f8bc124434b6aa55aa55000086dd
++ip6=60000000001e11402001beef0000000000000000000000882001cafe000000000000000000000092
++dnl Source port is based on a packet hash, so it may differ depending on the
++dnl compiler flags and CPU type.  Same for UDP checksum.  Masked with '....'.
++udp=....17c1001e....
++geneve=0000655800007b00
++encap=${eth}${ip6}${udp}${geneve}
++dnl Output to tunnel from a int-br internal port.
++dnl Checking that the packet arrived and it was correctly encapsulated.
++AT_CHECK([ovs-appctl netdev-dummy/receive int-br "${packet}"])
++OVS_WAIT_UNTIL([test $(ovs-pcap p0.pcap | grep -c "${encap}${packet}") -eq 1])
++dnl Sending again to exercise the non-miss upcall path.
++AT_CHECK([ovs-appctl netdev-dummy/receive int-br "${packet}"])
++OVS_WAIT_UNTIL([test $(ovs-pcap p0.pcap | grep -c "${encap}${packet}") -eq 2])
++
++dnl Finally, checking that the datapath flow also has a local_ip.
++AT_CHECK([ovs-appctl dpctl/dump-flows | grep tnl_push \
++            | strip_ufid | strip_used], [0], [dnl
++recirc_id(0),in_port(2),packet_type(ns=0,id=0),dnl
++eth(src=50:54:00:00:00:09,dst=50:54:00:00:00:0a),eth_type(0x1234), dnl
++packets:1, bytes:14, used:0.0s, dnl
++actions:tnl_push(tnl_port(6081),header(size=70,type=5,dnl
++eth(dst=f8:bc:12:44:34:b6,src=aa:55:aa:55:00:00,dl_type=0x86dd),dnl
++ipv6(src=2001:beef::88,dst=2001:cafe::92,label=0,proto=17,tclass=0x0,hlimit=64),dnl
++udp(src=0,dst=6081,csum=0xffff),geneve(vni=0x7b)),out_port(100)),1
++])
++
++OVS_VSWITCHD_STOP
++AT_CLEANUP
++
++dnl This is a regression test for outer header checksum offloading
++dnl with recirculation.
++AT_SETUP([tunnel_push_pop_ipv6 - recirculation after encapsulation])
++
++OVS_VSWITCHD_START(
++    [add-port br0 p0 \
++     -- set Interface p0 type=dummy ofport_request=1 \
++                         other-config:hwaddr=aa:55:aa:55:00:00])
++AT_CHECK([ovs-appctl vlog/set dpif_netdev:dbg])
++AT_CHECK([ovs-vsctl add-br int-br -- set bridge int-br datapath_type=dummy])
++AT_CHECK([ovs-vsctl add-port int-br t2 \
++          -- set Interface t2 type=geneve \
++                              options:remote_ip=2001:cafe::92 \
++                              options:key=123 ofport_request=2])
++
++dnl Setup an IP address.
++AT_CHECK([ovs-appctl netdev-dummy/ip6addr br0 2001:cafe::88/64], [0], [OK
++])
++dnl Checking that a local route for added IP was successfully installed.
++AT_CHECK([ovs-appctl ovs/route/show | grep Cached | sort], [0], [dnl
++Cached: 2001:cafe::/64 dev br0 SRC 2001:cafe::88 local
++])
++
++dnl Add a dp-hash selection group.
++AT_CHECK([ovs-ofctl add-group br0 \
++    'group_id=1234,type=select,selection_method=dp_hash,bucket=weight=1,output:p0'])
++AT_CHECK([ovs-ofctl add-flow br0 in_port=br0,action=group:1234])
++AT_CHECK([ovs-ofctl add-flow br0 in_port=p0,action=normal])
++
++AT_CHECK([ovs-ofctl add-flow int-br action=normal])
++
++dnl This Neighbor Advertisement from p0 has two effects:
++dnl 1. The neighbor cache will learn that 2001:cafe::92 is at f8:bc:12:44:34:b6.
++dnl 2. The br0 mac learning will learn that f8:bc:12:44:34:b6 is on p0.
++AT_CHECK([ovs-appctl netdev-dummy/receive p0 dnl
++ 'recirc_id(0),in_port(1),dnl
++  eth(src=f8:bc:12:44:34:b6,dst=aa:55:aa:55:00:00),eth_type(0x86dd),dnl
++  ipv6(src=2001:cafe::92,dst=2001:cafe::88,label=0,proto=58,tclass=0,hlimit=255,frag=no),dnl
++  icmpv6(type=136,code=0),dnl
++  nd(target=2001:cafe::92,sll=00:00:00:00:00:00,tll=f8:bc:12:44:34:b6)'
++])
++
++dnl Check that selection group is used in the trace.
++AT_CHECK([ovs-appctl ofproto/trace int-br in_port=LOCAL \
++                | grep -E 'tunnel|actions'], [0], [dnl
++     -> output to native tunnel
++     -> tunneling to 2001:cafe::92 via br0
++     -> tunneling from aa:55:aa:55:00:00 2001:cafe::88 to f8:bc:12:44:34:b6 2001:cafe::92
++Datapath actions: tnl_push(tnl_port(6081),header(size=70,type=5,dnl
++eth(dst=f8:bc:12:44:34:b6,src=aa:55:aa:55:00:00,dl_type=0x86dd),dnl
++ipv6(src=2001:cafe::88,dst=2001:cafe::92,label=0,proto=17,tclass=0x0,hlimit=64),dnl
++udp(src=0,dst=6081,csum=0xffff),geneve(vni=0x7b)),out_port(100)),dnl
++hash(l4(0)),recirc(0x1)
++])
++
++dnl Now check that the packet is actually encapsulated and delivered.
++AT_CHECK([ovs-vsctl -- set Interface p0 options:tx_pcap=p0.pcap])
++
++packet=50540000000a5054000000091234
++eth=f8bc124434b6aa55aa55000086dd
++ip6=60000000001e11402001cafe0000000000000000000000882001cafe000000000000000000000092
++dnl Source port is based on a packet hash, so it may differ depending on the
++dnl compiler flags and CPU type.  Same for UDP checksum.  Masked with '....'.
++udp=....17c1001e....
++geneve=0000655800007b00
++encap=${eth}${ip6}${udp}${geneve}
++dnl Output to tunnel from a int-br internal port.
++dnl Checking that the packet arrived and it was correctly encapsulated.
++AT_CHECK([ovs-appctl netdev-dummy/receive int-br "${packet}"])
++OVS_WAIT_UNTIL([test $(ovs-pcap p0.pcap | grep -c "${encap}${packet}") -eq 1])
++dnl Sending again to exercise the non-miss upcall path.
++AT_CHECK([ovs-appctl netdev-dummy/receive int-br "${packet}"])
++OVS_WAIT_UNTIL([test $(ovs-pcap p0.pcap | grep -c "${encap}${packet}") -eq 2])
++
++dnl Finally, checking that the datapath flow is also correct.
++AT_CHECK([ovs-appctl dpctl/dump-flows | grep tnl_push \
++            | strip_ufid | strip_used], [0], [dnl
++recirc_id(0),in_port(2),packet_type(ns=0,id=0),dnl
++eth(src=50:54:00:00:00:09,dst=50:54:00:00:00:0a),eth_type(0x1234), dnl
++packets:1, bytes:14, used:0.0s, dnl
++actions:tnl_push(tnl_port(6081),header(size=70,type=5,dnl
++eth(dst=f8:bc:12:44:34:b6,src=aa:55:aa:55:00:00,dl_type=0x86dd),dnl
++ipv6(src=2001:cafe::88,dst=2001:cafe::92,label=0,proto=17,tclass=0x0,hlimit=64),dnl
++udp(src=0,dst=6081,csum=0xffff),geneve(vni=0x7b)),out_port(100)),dnl
++hash(l4(0)),recirc(0x2)
++])
++
++OVS_VSWITCHD_STOP
++AT_CLEANUP
++
++AT_SETUP([tunnel_push_pop_ipv6 - Mirror over tunnels])
++OVS_VSWITCHD_START([dnl
++    add-br br-ext -- set bridge br-ext datapath_type=dummy \
++        other-config:hwaddr=aa:55:aa:55:00:00 \
++    -- add-port br0 t1 -- set Interface t1 type=geneve \
++        options:remote_ip=2001:cafe::91 \
++    -- add-port br0 t2 -- set Interface t2 type=erspan \
++        options:remote_ip=2001:cafe::92 options:key=flow \
++        options:erspan_ver=1 options:erspan_idx=flow \
++    -- add-port br0 p0 -- set Interface p0 type=dummy \
++    -- add-port br0 p1 -- set Interface p1 type=dummy \
++    -- add-port br-ext p-ext -- set Interface p-ext type=dummy \
++        options:pcap=ext.pcap])
++
++dnl Configure mirroring over the UDP and ERSPAN tunnels.
++AT_CHECK([dnl
++    ovs-vsctl \
++        set Bridge br0 mirrors=@m1,@m2 -- \
++        --id=@t1 get Port t1 -- \
++        --id=@t2 get Port t2 -- \
++        --id=@m1 create Mirror name=vxlan select_all=true output_port=@t1 -- \
++        --id=@m2 create Mirror name=erspan select_all=true output_port=@t2],
++    [0], [stdout])
++
++AT_CHECK([ovs-ofctl add-flow br-ext actions=normal])
++AT_CHECK([ovs-ofctl add-flow br0 actions=normal])
++
++dnl Make sure ephemeral ports stay static across tests.
++AT_CHECK([ovs-appctl tnl/egress_port_range 35190 35190], [0], [OK
++])
++
++dnl Setup an IP address.
++AT_CHECK([ovs-appctl netdev-dummy/ip6addr br-ext 2001:cafe::90/64], [0], [OK
++])
++
++dnl Send two ND packets to set the tunnel's port and mac address.
++AT_CHECK([ovs-appctl netdev-dummy/receive p-ext dnl
++ 'eth(src=f8:bc:12:44:34:b3,dst=aa:55:aa:55:00:00),eth_type(0x86dd),dnl
++  ipv6(src=2001:cafe::91,dst=2001:cafe::90,label=0,proto=58,tclass=0,hlimit=255,frag=no),dnl
++  icmpv6(type=136,code=0),dnl
++  nd(target=2001:cafe::91,sll=00:00:00:00:00:00,tll=f8:bc:12:44:34:b3)'
++])
++AT_CHECK([ovs-appctl netdev-dummy/receive p-ext dnl
++ 'eth(src=f8:bc:12:44:34:b6,dst=aa:55:aa:55:00:00),eth_type(0x86dd),dnl
++  ipv6(src=2001:cafe::92,dst=2001:cafe::90,label=0,proto=58,tclass=0,hlimit=255,frag=no),dnl
++  icmpv6(type=136,code=0),dnl
++  nd(target=2001:cafe::92,sll=00:00:00:00:00:00,tll=f8:bc:12:44:34:b6)'
++])
++
++m4_define([FLOW], [m4_join([,],
++  [in_port(p1)],
++  [eth(src=50:54:00:00:00:05,dst=50:54:00:00:00:07),eth_type(0x0800)],
++  [ipv4(src=192.168.0.1,dst=192.168.0.2,proto=1,tos=0,ttl=128,frag=no)],
++  [icmp(type=8,code=0)])])
++
++m4_define([ERSPAN_ACT], [m4_join([,],
++  [clone(tnl_push(tnl_port(erspan_sys)],
++           [header(size=70,type=108],
++                  [eth(dst=f8:bc:12:44:34:b6,src=aa:55:aa:55:00:00,dl_type=0x86dd)],
++                  [ipv6(src=2001:cafe::90,dst=2001:cafe::92,label=0,proto=47,tclass=0x0,hlimit=64)],
++                  [erspan(ver=1,sid=0x0,idx=0x0))],
++           [out_port(br-ext))],
++         [p-ext)])])
++
++m4_define([GENEVE_ACT], [m4_join([,],
++  [clone(tnl_push(tnl_port(genev_sys_6081)],
++           [header(size=70,type=5],
++                   [eth(dst=f8:bc:12:44:34:b3,src=aa:55:aa:55:00:00,dl_type=0x86dd)],
++                   [ipv6(src=2001:cafe::90,dst=2001:cafe::91,label=0,proto=17,tclass=0x0,hlimit=64)],
++                   [udp(src=0,dst=6081,csum=0xffff)],
++                   [geneve(vni=0x0))],
++           [out_port(br-ext))],
++         [p-ext)])])
++
++dnl Verify packet is mirrored to both tunnels.  Tunnel actions may happen
++dnl in any order.
++AT_CHECK([ovs-appctl ofproto/trace --names ovs-dummy "FLOW"], [0], [stdout])
++AT_CHECK([grep -q "ERSPAN_ACT" stdout])
++AT_CHECK([grep -q "GENEVE_ACT" stdout])
++
++OVS_VSWITCHD_STOP
++AT_CLEANUP
+diff --git a/tests/tunnel-push-pop.at b/tests/tunnel-push-pop.at
+index b1440f5904..885df07e5a 100644
+--- a/tests/tunnel-push-pop.at
++++ b/tests/tunnel-push-pop.at
+@@ -30,17 +30,15 @@ dummy@ovs-dummy: hit:0 missed:0
+     t4 5/3: (erspan: erspan_dir=flow, erspan_hwid=flow, erspan_idx=flow, erspan_ver=flow, key=56, remote_ip=flow)
+ ])
+ 
+-dnl First setup dummy interface IP address, then add the route
+-dnl so that tnl-port table can get valid IP address for the device.
++dnl Setup dummy interface IP addresses.
+ AT_CHECK([ovs-appctl netdev-dummy/ip4addr br0 1.1.2.88/24], [0], [OK
+ ])
+ AT_CHECK([ovs-appctl netdev-dummy/ip6addr br0 2001:cafe::88/24], [0], [OK
+ ])
+-
+-AT_CHECK([ovs-appctl ovs/route/add 1.1.2.92/24 br0], [0], [OK
+-])
+-
+-AT_CHECK([ovs-appctl ovs/route/add 1.1.2.92/24 br0 pkt_mark=1234], [0], [OK
++dnl Checking that a local routes for added IPs were successfully installed.
++AT_CHECK([ovs-appctl ovs/route/show | grep Cached | sort], [0], [dnl
++Cached: 1.1.2.0/24 dev br0 SRC 1.1.2.88 local
++Cached: 2001:ca00::/24 dev br0 SRC 2001:cafe::88 local
+ ])
+ 
+ AT_CHECK([ovs-ofctl add-flow br0 action=normal])
+@@ -237,18 +235,21 @@ dummy@ovs-dummy: hit:0 missed:0
+     t8 9/2152: (gtpu: key=123, remote_ip=1.1.2.92)
+ ])
+ 
+-dnl First setup dummy interface IP address, then add the route
+-dnl so that tnl-port table can get valid IP address for the device.
++dnl Setup dummy interface IP addresses.
+ AT_CHECK([ovs-appctl netdev-dummy/ip4addr br0 1.1.2.88/24], [0], [OK
+ ])
+ AT_CHECK([ovs-appctl netdev-dummy/ip6addr br0 2001:cafe::88/24], [0], [OK
+ ])
+-
+-AT_CHECK([ovs-appctl ovs/route/add 1.1.2.92/24 br0], [0], [OK
+-])
+-
++dnl Add a static route with a mark.
+ AT_CHECK([ovs-appctl ovs/route/add 1.1.2.92/24 br0 pkt_mark=1234], [0], [OK
+ ])
++dnl Checking that local routes for added IPs and the static route with a mark
++dnl were successfully installed.
++AT_CHECK([ovs-appctl ovs/route/show | grep br0 | sort], [0], [dnl
++Cached: 1.1.2.0/24 dev br0 SRC 1.1.2.88 local
++Cached: 2001:ca00::/24 dev br0 SRC 2001:cafe::88 local
++User: 1.1.2.0/24 MARK 1234 dev br0 SRC 1.1.2.88
++])
+ 
+ AT_CHECK([ovs-ofctl add-flow br0 action=normal])
+ 
+@@ -690,12 +691,12 @@ AT_CHECK([ovs-vsctl add-port int-br t2 -- set Interface t2 type=geneve \
+                        options:remote_ip=1.1.2.92 options:key=123 ofport_request=2 \
+                        ])
+ 
+-dnl First setup dummy interface IP address, then add the route
+-dnl so that tnl-port table can get valid IP address for the device.
++dnl Setup dummy interface IP address.
+ AT_CHECK([ovs-appctl netdev-dummy/ip4addr br0 1.1.2.88/24], [0], [OK
+ ])
+-
+-AT_CHECK([ovs-appctl ovs/route/add 1.1.2.92/24 br0], [0], [OK
++dnl Checking that a local route for added IP was successfully installed.
++AT_CHECK([ovs-appctl ovs/route/show | grep Cached], [0], [dnl
++Cached: 1.1.2.0/24 dev br0 SRC 1.1.2.88 local
+ ])
+ 
+ AT_CHECK([ovs-ofctl add-flow br0 action=normal])
+@@ -731,11 +732,12 @@ AT_CHECK([ovs-vsctl add-port int-br t2 dnl
+           -- set Interface t2 type=geneve options:remote_ip=1.1.2.92 dnl
+                               options:key=123 ofport_request=2])
+ 
+-dnl First setup dummy interface IP address, then add the route
+-dnl so that tnl-port table can get valid IP address for the device.
++dnl Setup dummy interface IP address.
+ AT_CHECK([ovs-appctl netdev-dummy/ip4addr br0 1.1.2.88/24], [0], [OK
+ ])
+-AT_CHECK([ovs-appctl ovs/route/add 1.1.2.92/24 br0], [0], [OK
++dnl Checking that a local route for added IP was successfully installed.
++AT_CHECK([ovs-appctl ovs/route/show | grep Cached], [0], [dnl
++Cached: 1.1.2.0/24 dev br0 SRC 1.1.2.88 local
+ ])
+ AT_CHECK([ovs-ofctl add-flow br0 action=normal])
+ 
+@@ -777,6 +779,88 @@ AT_CHECK([ovs-appctl dpctl/dump-flows | grep -q 'slow_path(action)'], [0])
+ OVS_VSWITCHD_STOP
+ AT_CLEANUP
+ 
++AT_SETUP([tunnel_push_pop - local_ip configuration])
++
++OVS_VSWITCHD_START(
++    [add-port br0 p0 \
++     -- set Interface p0 type=dummy ofport_request=1 \
++                         other-config:hwaddr=aa:55:aa:55:00:00])
++AT_CHECK([ovs-appctl vlog/set dpif_netdev:dbg])
++AT_CHECK([ovs-vsctl add-br int-br -- set bridge int-br datapath_type=dummy])
++AT_CHECK([ovs-vsctl add-port int-br t2 \
++          -- set Interface t2 type=geneve \
++                              options:local_ip=2.2.2.88 \
++                              options:remote_ip=1.1.2.92 \
++                              options:key=123 ofport_request=2])
++
++dnl Setup multiple IP addresses.
++AT_CHECK([ovs-appctl netdev-dummy/ip4addr br0 1.1.2.88/24], [0], [OK
++])
++AT_CHECK([ovs-appctl netdev-dummy/ip4addr br0 2.2.2.88/24], [0], [OK
++])
++dnl Checking that a local route for added IP was successfully installed.
++AT_CHECK([ovs-appctl ovs/route/show | grep Cached | sort], [0], [dnl
++Cached: 1.1.2.0/24 dev br0 SRC 1.1.2.88 local
++Cached: 2.2.2.0/24 dev br0 SRC 2.2.2.88 local
++])
++AT_CHECK([ovs-ofctl add-flow br0 action=normal])
++AT_CHECK([ovs-ofctl add-flow int-br action=normal])
++
++dnl This ARP reply from p0 has two effects:
++dnl 1. The ARP cache will learn that 1.1.2.92 is at f8:bc:12:44:34:b6.
++dnl 2. The br0 mac learning will learn that f8:bc:12:44:34:b6 is on p0.
++AT_CHECK([ovs-appctl netdev-dummy/receive p0 dnl
++ 'recirc_id(0),in_port(1),dnl
++  eth(src=f8:bc:12:44:34:b6,dst=ff:ff:ff:ff:ff:ff),eth_type(0x0806),dnl
++  arp(sip=1.1.2.92,tip=1.1.2.88,op=2,sha=f8:bc:12:44:34:b6,tha=00:00:00:00:00:00)'
++])
++
++dnl Check that local_ip is used for encapsulation in the trace.
++AT_CHECK([ovs-appctl ofproto/trace int-br in_port=LOCAL \
++                | grep -E 'tunnel|actions'], [0], [dnl
++     -> output to native tunnel
++     -> tunneling to 1.1.2.92 via br0
++     -> tunneling from aa:55:aa:55:00:00 2.2.2.88 to f8:bc:12:44:34:b6 1.1.2.92
++Datapath actions: tnl_push(tnl_port(6081),header(size=50,type=5,dnl
++eth(dst=f8:bc:12:44:34:b6,src=aa:55:aa:55:00:00,dl_type=0x0800),dnl
++ipv4(src=2.2.2.88,dst=1.1.2.92,proto=17,tos=0,ttl=64,frag=0x4000),dnl
++udp(src=0,dst=6081,csum=0x0),geneve(vni=0x7b)),out_port(100)),1
++])
++
++dnl Now check that the packet actually has the local_ip in the header.
++AT_CHECK([ovs-vsctl -- set Interface p0 options:tx_pcap=p0.pcap])
++
++packet=50540000000a5054000000091234
++eth=f8bc124434b6aa55aa5500000800
++ip4=450000320000400040113305020202580101025c
++dnl Source port is based on a packet hash, so it may differ depending on the
++dnl compiler flags and CPU type.  Masked with '....'.
++udp=....17c1001e0000
++geneve=0000655800007b00
++encap=${eth}${ip4}${udp}${geneve}
++dnl Output to tunnel from a int-br internal port.
++dnl Checking that the packet arrived and it was correctly encapsulated.
++AT_CHECK([ovs-appctl netdev-dummy/receive int-br "${packet}"])
++OVS_WAIT_UNTIL([test $(ovs-pcap p0.pcap | grep -c "${encap}${packet}") -eq 1])
++dnl Sending again to exercise the non-miss upcall path.
++AT_CHECK([ovs-appctl netdev-dummy/receive int-br "${packet}"])
++OVS_WAIT_UNTIL([test $(ovs-pcap p0.pcap | grep -c "${encap}${packet}") -eq 2])
++
++dnl Finally, checking that the datapath flow also has a local_ip.
++AT_CHECK([ovs-appctl dpctl/dump-flows | grep tnl_push \
++            | strip_ufid | strip_used], [0], [dnl
++recirc_id(0),in_port(2),packet_type(ns=0,id=0),dnl
++eth(src=50:54:00:00:00:09,dst=50:54:00:00:00:0a),eth_type(0x1234), dnl
++packets:1, bytes:14, used:0.0s, dnl
++actions:tnl_push(tnl_port(6081),header(size=50,type=5,dnl
++eth(dst=f8:bc:12:44:34:b6,src=aa:55:aa:55:00:00,dl_type=0x0800),dnl
++ipv4(src=2.2.2.88,dst=1.1.2.92,proto=17,tos=0,ttl=64,frag=0x4000),dnl
++udp(src=0,dst=6081,csum=0x0),geneve(vni=0x7b)),out_port(100)),1
++])
++
++OVS_VSWITCHD_STOP
++AT_CLEANUP
++
+ AT_SETUP([tunnel_push_pop - underlay bridge match])
+ 
+ OVS_VSWITCHD_START([add-port br0 p0 -- set Interface p0 type=dummy ofport_request=1 other-config:hwaddr=aa:55:aa:55:00:00])
+@@ -796,8 +880,11 @@ dummy@ovs-dummy: hit:0 missed:0
+ 
+ AT_CHECK([ovs-appctl netdev-dummy/ip4addr br0 1.1.2.88/24], [0], [OK
+ ])
+-AT_CHECK([ovs-appctl ovs/route/add 1.1.2.92/24 br0], [0], [OK
++dnl Checking that a local route for added IP was successfully installed.
++AT_CHECK([ovs-appctl ovs/route/show | grep Cached], [0], [dnl
++Cached: 1.1.2.0/24 dev br0 SRC 1.1.2.88 local
+ ])
++
+ AT_CHECK([ovs-ofctl add-flow br0 'arp,priority=1,action=normal'])
+ 
+ dnl Use arp reply to achieve tunnel next hop mac binding
+@@ -840,11 +927,12 @@ AT_CHECK([ovs-vsctl add-port int-br t2 dnl
+           -- set Interface t2 type=geneve options:remote_ip=1.1.2.92 dnl
+                               options:key=123 ofport_request=2])
+ 
+-dnl First setup dummy interface IP address, then add the route
+-dnl so that tnl-port table can get valid IP address for the device.
++dnl Setup dummy interface IP address.
+ AT_CHECK([ovs-appctl netdev-dummy/ip4addr br0 1.1.2.88/24], [0], [OK
+ ])
+-AT_CHECK([ovs-appctl ovs/route/add 1.1.2.92/24 br0], [0], [OK
++dnl Checking that a local route for added IP was successfully installed.
++AT_CHECK([ovs-appctl ovs/route/show | grep Cached], [0], [dnl
++Cached: 1.1.2.0/24 dev br0 SRC 1.1.2.88 local
+ ])
+ AT_CHECK([ovs-ofctl add-flow br0 action=normal])
+ 
+@@ -908,10 +996,12 @@ AT_CHECK([ovs-vsctl set port p8  tag=42 dnl
+                  -- set port br0 tag=42 dnl
+                  -- set port p7  tag=200])
+ 
+-dnl Set IP address and route for br0.
++dnl Set an IP address for br0.
+ AT_CHECK([ovs-appctl netdev-dummy/ip4addr br0 10.0.0.2/24], [0], [OK
+ ])
+-AT_CHECK([ovs-appctl ovs/route/add 10.0.0.11/24 br0], [0], [OK
++dnl Checking that a local route for added IP was successfully installed.
++AT_CHECK([ovs-appctl ovs/route/show | grep Cached], [0], [dnl
++Cached: 10.0.0.0/24 dev br0 SRC 10.0.0.2 local
+ ])
+ 
+ dnl Send an ARP reply to port b8 on br0, so that packets will be forwarded
+@@ -953,10 +1043,12 @@ AT_CHECK([ovs-vsctl add-port ovs-tun0 tun0 dnl
+           -- add-port ovs-tun0 p7 dnl
+           -- set interface p7 type=dummy ofport_request=7])
+ 
+-dnl Set IP address and route for br0.
++dnl Set an IP address for br0.
+ AT_CHECK([ovs-appctl netdev-dummy/ip4addr br0 10.0.0.2/24], [0], [OK
+ ])
+-AT_CHECK([ovs-appctl ovs/route/add 10.0.0.11/24 br0], [0], [OK
++dnl Checking that a local route for added IP was successfully installed.
++AT_CHECK([ovs-appctl ovs/route/show | grep Cached], [0], [dnl
++Cached: 10.0.0.0/24 dev br0 SRC 10.0.0.2 local
+ ])
+ 
+ dnl Send an ARP reply to port b8 on br0, so that packets will be forwarded
+@@ -993,3 +1085,249 @@ udp(src=0,dst=4789,csum=0x0),vxlan(flags=0x8000000,vni=0x0)),out_port(100)),8),7
+ 
+ OVS_VSWITCHD_STOP
+ AT_CLEANUP
++
++AT_SETUP([tunnel_push_pop - use non-local port as tunnel endpoint])
++
++OVS_VSWITCHD_START([add-port br0 p0 \
++                    -- set Interface p0 type=dummy ofport_request=1])
++
++dnl Adding another port separately to ensure that it gets an
++dnl aa:55:aa:55:00:03 MAC address (dummy port number 3).
++AT_CHECK([ovs-vsctl add-port br0 vtep0 \
++            -- set interface vtep0 type=dummy ofport_request=2])
++AT_CHECK([ovs-vsctl \
++          -- add-br int-br \
++          -- set bridge int-br datapath_type=dummy \
++          -- set Interface int-br ofport_request=3])
++AT_CHECK([ovs-vsctl \
++          -- add-port int-br t1 \
++          -- set Interface t1 type=gre ofport_request=4 \
++                              options:remote_ip=1.1.2.92
++])
++
++AT_CHECK([ovs-appctl dpif/show], [0], [dnl
++dummy@ovs-dummy: hit:0 missed:0
++  br0:
++    br0 65534/100: (dummy-internal)
++    p0 1/1: (dummy)
++    vtep0 2/2: (dummy)
++  int-br:
++    int-br 65534/3: (dummy-internal)
++    t1 4/4: (gre: remote_ip=1.1.2.92)
++])
++
++AT_CHECK([ovs-appctl netdev-dummy/ip4addr vtep0 1.1.2.88/24], [0], [OK
++])
++dnl Checking that a local route for added IP was successfully installed.
++AT_CHECK([ovs-appctl ovs/route/show | grep Cached], [0], [dnl
++Cached: 1.1.2.0/24 dev vtep0 SRC 1.1.2.88 local
++])
++
++AT_CHECK([ovs-ofctl add-flow br0 action=normal])
++AT_CHECK([ovs-ofctl add-flow int-br action=normal])
++
++dnl Use arp request and reply to achieve tunnel next hop mac binding.
++dnl By default, vtep0's MAC address is aa:55:aa:55:00:03.
++AT_CHECK([ovs-appctl netdev-dummy/receive vtep0 'recirc_id(0),in_port(2),dnl
++  eth(dst=ff:ff:ff:ff:ff:ff,src=aa:55:aa:55:00:03),eth_type(0x0806),dnl
++  arp(tip=1.1.2.92,sip=1.1.2.88,op=1,sha=aa:55:aa:55:00:03,tha=00:00:00:00:00:00)'])
++AT_CHECK([ovs-appctl netdev-dummy/receive p0 'recirc_id(0),in_port(1),dnl
++  eth(src=f8:bc:12:44:34:b6,dst=aa:55:aa:55:00:03),eth_type(0x0806),dnl
++  arp(sip=1.1.2.92,tip=1.1.2.88,op=2,sha=f8:bc:12:44:34:b6,tha=aa:55:aa:55:00:03)'])
++
++AT_CHECK([ovs-appctl tnl/neigh/show | tail -n+3 | sort], [0], [dnl
++1.1.2.92                                      f8:bc:12:44:34:b6   br0
++])
++
++dnl Check GRE tunnel pop.
++AT_CHECK([ovs-appctl ofproto/trace ovs-dummy 'in_port(1),dnl
++  eth(src=f8:bc:12:44:34:b6,dst=aa:55:aa:55:00:03),eth_type(0x0800),dnl
++  ipv4(src=1.1.2.92,dst=1.1.2.88,proto=47,tos=0,ttl=64,frag=no)'],
++[0], [stdout])
++
++AT_CHECK([tail -1 stdout], [0],
++  [Datapath actions: tnl_pop(4)
++])
++
++dnl Check GRE tunnel push.
++AT_CHECK([ovs-appctl ofproto/trace ovs-dummy 'in_port(3),dnl
++  eth(dst=f9:bc:12:44:34:b6,src=af:55:aa:55:00:03),eth_type(0x0800),dnl
++  ipv4(src=1.1.3.88,dst=1.1.3.92,proto=1,tos=0,ttl=64,frag=no)'],
++[0], [stdout])
++AT_CHECK([tail -1 stdout], [0],
++  [Datapath actions: tnl_push(tnl_port(4),header(size=38,type=3,dnl
++eth(dst=f8:bc:12:44:34:b6,src=aa:55:aa:55:00:03,dl_type=0x0800),dnl
++ipv4(src=1.1.2.88,dst=1.1.2.92,proto=47,tos=0,ttl=64,frag=0x4000),dnl
++gre((flags=0x0,proto=0x6558))),out_port(2)),1
++])
++
++OVS_VSWITCHD_STOP
++AT_CLEANUP
++
++dnl This is a regression test for outer header checksum offloading
++dnl with recirculation.
++AT_SETUP([tunnel_push_pop - recirculation after encapsulation])
++
++OVS_VSWITCHD_START(
++    [add-port br0 p0 \
++     -- set Interface p0 type=dummy ofport_request=1 \
++                         other-config:hwaddr=aa:55:aa:55:00:00])
++AT_CHECK([ovs-appctl vlog/set dpif_netdev:dbg])
++AT_CHECK([ovs-vsctl add-br int-br -- set bridge int-br datapath_type=dummy])
++AT_CHECK([ovs-vsctl add-port int-br t2 \
++          -- set Interface t2 type=geneve \
++                              options:remote_ip=1.1.2.92 \
++                              options:key=123 ofport_request=2])
++
++dnl Setup an IP address.
++AT_CHECK([ovs-appctl netdev-dummy/ip4addr br0 1.1.2.88/24], [0], [OK
++])
++dnl Checking that a local route for added IP was successfully installed.
++AT_CHECK([ovs-appctl ovs/route/show | grep Cached | sort], [0], [dnl
++Cached: 1.1.2.0/24 dev br0 SRC 1.1.2.88 local
++])
++
++dnl Add a dp-hash selection group.
++AT_CHECK([ovs-ofctl add-group br0 \
++    'group_id=1234,type=select,selection_method=dp_hash,bucket=weight=1,output:p0'])
++AT_CHECK([ovs-ofctl add-flow br0 in_port=br0,action=group:1234])
++AT_CHECK([ovs-ofctl add-flow br0 in_port=p0,action=normal])
++
++AT_CHECK([ovs-ofctl add-flow int-br action=normal])
++
++dnl This ARP reply from p0 has two effects:
++dnl 1. The ARP cache will learn that 1.1.2.92 is at f8:bc:12:44:34:b6.
++dnl 2. The br0 mac learning will learn that f8:bc:12:44:34:b6 is on p0.
++AT_CHECK([ovs-appctl netdev-dummy/receive p0 dnl
++ 'recirc_id(0),in_port(1),dnl
++  eth(src=f8:bc:12:44:34:b6,dst=ff:ff:ff:ff:ff:ff),eth_type(0x0806),dnl
++  arp(sip=1.1.2.92,tip=1.1.2.88,op=2,sha=f8:bc:12:44:34:b6,tha=00:00:00:00:00:00)'
++])
++
++dnl Check that selection group is used in the trace.
++AT_CHECK([ovs-appctl ofproto/trace int-br in_port=LOCAL \
++                | grep -E 'tunnel|actions'], [0], [dnl
++     -> output to native tunnel
++     -> tunneling to 1.1.2.92 via br0
++     -> tunneling from aa:55:aa:55:00:00 1.1.2.88 to f8:bc:12:44:34:b6 1.1.2.92
++Datapath actions: tnl_push(tnl_port(6081),header(size=50,type=5,dnl
++eth(dst=f8:bc:12:44:34:b6,src=aa:55:aa:55:00:00,dl_type=0x0800),dnl
++ipv4(src=1.1.2.88,dst=1.1.2.92,proto=17,tos=0,ttl=64,frag=0x4000),dnl
++udp(src=0,dst=6081,csum=0x0),geneve(vni=0x7b)),out_port(100)),dnl
++hash(l4(0)),recirc(0x1)
++])
++
++dnl Now check that the packet is actually encapsulated and delivered.
++AT_CHECK([ovs-vsctl -- set Interface p0 options:tx_pcap=p0.pcap])
++
++packet=50540000000a5054000000091234
++eth=f8bc124434b6aa55aa5500000800
++ip4=450000320000400040113406010102580101025c
++dnl Source port is based on a packet hash, so it may differ depending on the
++dnl compiler flags and CPU type.  Masked with '....'.
++udp=....17c1001e0000
++geneve=0000655800007b00
++encap=${eth}${ip4}${udp}${geneve}
++dnl Output to tunnel from a int-br internal port.
++dnl Checking that the packet arrived and it was correctly encapsulated.
++AT_CHECK([ovs-appctl netdev-dummy/receive int-br "${packet}"])
++OVS_WAIT_UNTIL([test $(ovs-pcap p0.pcap | grep -c "${encap}${packet}") -eq 1])
++
++dnl Sending again to exercise the non-miss upcall path.
++AT_CHECK([ovs-appctl netdev-dummy/receive int-br "${packet}"])
++OVS_WAIT_UNTIL([test $(ovs-pcap p0.pcap | grep -c "${encap}${packet}") -eq 2])
++
++dnl Finally, checking that the datapath flow is also correct.
++AT_CHECK([ovs-appctl dpctl/dump-flows | grep tnl_push \
++            | strip_ufid | strip_used], [0], [dnl
++recirc_id(0),in_port(2),packet_type(ns=0,id=0),dnl
++eth(src=50:54:00:00:00:09,dst=50:54:00:00:00:0a),eth_type(0x1234), dnl
++packets:1, bytes:14, used:0.0s, dnl
++actions:tnl_push(tnl_port(6081),header(size=50,type=5,dnl
++eth(dst=f8:bc:12:44:34:b6,src=aa:55:aa:55:00:00,dl_type=0x0800),dnl
++ipv4(src=1.1.2.88,dst=1.1.2.92,proto=17,tos=0,ttl=64,frag=0x4000),dnl
++udp(src=0,dst=6081,csum=0x0),geneve(vni=0x7b)),out_port(100)),dnl
++hash(l4(0)),recirc(0x2)
++])
++
++OVS_VSWITCHD_STOP
++AT_CLEANUP
++
++AT_SETUP([tunnel_push_pop - Mirror over tunnels])
++OVS_VSWITCHD_START([dnl
++    add-br br-ext -- set bridge br-ext datapath_type=dummy \
++        other-config:hwaddr=aa:55:aa:55:00:00 \
++    -- add-port br0 t1 -- set Interface t1 type=geneve \
++        options:remote_ip=1.1.1.1 \
++    -- add-port br0 t2 -- set Interface t2 type=erspan \
++        options:remote_ip=1.1.1.2 options:key=flow options:erspan_ver=1 \
++        options:erspan_idx=flow \
++    -- add-port br0 p0 -- set Interface p0 type=dummy \
++    -- add-port br0 p1 -- set Interface p1 type=dummy \
++    -- add-port br-ext p-ext -- set Interface p-ext type=dummy \
++        options:pcap=ext.pcap])
++
++dnl Configure mirroring over the UDP and ERSPAN tunnels.
++AT_CHECK([dnl
++    ovs-vsctl \
++        set Bridge br0 mirrors=@m1,@m2 -- \
++        --id=@t1 get Port t1 -- \
++        --id=@t2 get Port t2 -- \
++        --id=@m1 create Mirror name=vxlan select_all=true output_port=@t1 -- \
++        --id=@m2 create Mirror name=erspan select_all=true output_port=@t2],
++    [0], [stdout])
++
++AT_CHECK([ovs-ofctl add-flow br-ext actions=normal])
++AT_CHECK([ovs-ofctl add-flow br0 actions=normal])
++
++dnl Make sure ephemeral ports stay static across tests.
++AT_CHECK([ovs-appctl tnl/egress_port_range 35190 35190], [0], [OK
++])
++
++dnl Setup an IP address for the local side of the tunnel.
++AT_CHECK([ovs-appctl netdev-dummy/ip4addr br-ext 1.1.1.3/24], [0], [OK
++])
++
++dnl Send two arp replies to populate arp table with tunnel remote endpoints.
++AT_CHECK([ovs-appctl netdev-dummy/receive p-ext dnl
++ 'eth(src=f8:bc:12:44:34:b6,dst=ff:ff:ff:ff:ff:ff),eth_type(0x0806),dnl
++  arp(sip=1.1.1.1,tip=1.1.1.3,op=2,sha=f8:bc:12:44:34:b6,tha=00:00:00:00:00:00)'
++])
++AT_CHECK([ovs-appctl netdev-dummy/receive p-ext dnl
++ 'eth(src=f8:bc:12:44:34:b3,dst=ff:ff:ff:ff:ff:ff),eth_type(0x0806),dnl
++  arp(sip=1.1.1.2,tip=1.1.1.3,op=2,sha=f8:bc:12:44:34:b3,tha=00:00:00:00:00:00)'
++])
++
++m4_define([FLOW], [m4_join([,],
++  [in_port(p1)],
++  [eth(src=50:54:00:00:00:05,dst=50:54:00:00:00:07),eth_type(0x0800)],
++  [ipv4(src=192.168.0.1,dst=192.168.0.2,proto=1,tos=0,ttl=128,frag=no)],
++  [icmp(type=8,code=0)])])
++
++m4_define([ERSPAN_ACT], [m4_join([,],
++  [clone(tnl_push(tnl_port(erspan_sys)],
++           [header(size=50,type=107],
++                  [eth(dst=f8:bc:12:44:34:b3,src=aa:55:aa:55:00:00,dl_type=0x0800)],
++                  [ipv4(src=1.1.1.3,dst=1.1.1.2,proto=47,tos=0,ttl=64,frag=0x4000)],
++                  [erspan(ver=1,sid=0x0,idx=0x0))],
++           [out_port(br-ext))],
++         [p-ext)])])
++
++m4_define([GENEVE_ACT], [m4_join([,],
++  [clone(tnl_push(tnl_port(genev_sys_6081)],
++           [header(size=50,type=5],
++                   [eth(dst=f8:bc:12:44:34:b6,src=aa:55:aa:55:00:00,dl_type=0x0800)],
++                   [ipv4(src=1.1.1.3,dst=1.1.1.1,proto=17,tos=0,ttl=64,frag=0x4000)],
++                   [udp(src=0,dst=6081,csum=0x0)],
++                   [geneve(vni=0x0))],
++           [out_port(br-ext))],
++         [p-ext)])])
++
++dnl Verify packet is mirrored to both tunnels.  Tunnel actions may happen
++dnl in any order.
++AT_CHECK([ovs-appctl ofproto/trace --names ovs-dummy "FLOW"], [0], [stdout])
++AT_CHECK([grep -q "ERSPAN_ACT" stdout])
++AT_CHECK([grep -q "GENEVE_ACT" stdout])
++
++OVS_VSWITCHD_STOP
++AT_CLEANUP
+diff --git a/tests/tunnel.at b/tests/tunnel.at
+index 282651ac73..9d539ee6f6 100644
+--- a/tests/tunnel.at
++++ b/tests/tunnel.at
+@@ -524,11 +524,12 @@ dummy@ovs-dummy: hit:0 missed:0
+     v2 3/3: (dummy-internal)
+ ])
+ 
+-dnl First setup dummy interface IP address, then add the route
+-dnl so that tnl-port table can get valid IP address for the device.
++dnl Setup dummy interface IP address.
+ AT_CHECK([ovs-appctl netdev-dummy/ip4addr br0 172.31.1.1/24], [0], [OK
+ ])
+-AT_CHECK([ovs-appctl ovs/route/add 172.31.1.0/24 br0], [0], [OK
++dnl Checking that a local route for added IP was successfully installed.
++AT_CHECK([ovs-appctl ovs/route/show | grep Cached], [0], [dnl
++Cached: 172.31.1.0/24 dev br0 SRC 172.31.1.1 local
+ ])
+ 
+ dnl change the flow table to bump the internal table version
+@@ -1268,6 +1269,18 @@ OVS_APP_EXIT_AND_WAIT([ovs-vswitchd])
+ OVS_APP_EXIT_AND_WAIT([ovsdb-server])]
+ AT_CLEANUP
+ 
++AT_SETUP([tunnel - re-create port with different name])
++OVS_VSWITCHD_START(
++  [add-port br0 p0 -- set int p0 type=vxlan options:remote_ip=10.10.10.1])
++
++AT_CHECK([ovs-vsctl --if-exists del-port p0 -- \
++          add-port br0 p1 -- \
++          set int p1 type=vxlan options:remote_ip=10.10.10.1])
++
++OVS_APP_EXIT_AND_WAIT([ovs-vswitchd])
++OVS_APP_EXIT_AND_WAIT([ovsdb-server])]
++AT_CLEANUP
++
+ AT_SETUP([tunnel - SRV6 basic])
+ OVS_VSWITCHD_START([add-port br0 p1 -- set Interface p1 type=dummy \
+                     ofport_request=1 \
+@@ -1276,15 +1289,12 @@ OVS_VSWITCHD_START([add-port br0 p1 -- set Interface p1 type=dummy \
+                     ofport_request=2])
+ OVS_VSWITCHD_DISABLE_TUNNEL_PUSH_POP
+ 
+-dnl First setup dummy interface IP address, then add the route
+-dnl so that tnl-port table can get valid IP address for the device.
++dnl Setup dummy interface IP address.
+ AT_CHECK([ovs-appctl netdev-dummy/ip6addr br0 fc00::1/64], [0], [OK
+ ])
+-AT_CHECK([ovs-appctl ovs/route/add fc00::0/64 br0], [0], [OK
+-])
+-AT_CHECK([ovs-appctl ovs/route/show], [0], [dnl
+-Route Table:
+-User: fc00::/64 dev br0 SRC fc00::1
++dnl Checking that a local route for added IP was successfully installed.
++AT_CHECK([ovs-appctl ovs/route/show | grep Cached], [0], [dnl
++Cached: fc00::/64 dev br0 SRC fc00::1 local
+ ])
+ 
+ AT_DATA([flows.txt], [dnl
+diff --git a/tests/vlog.at b/tests/vlog.at
+index 785014956e..2768c07400 100644
+--- a/tests/vlog.at
++++ b/tests/vlog.at
+@@ -8,6 +8,7 @@ AT_CHECK([$PYTHON3 $srcdir/test-vlog.py --log-file log_file \
+ 
+ AT_CHECK([sed -e 's/.*-.*-.*T..:..:..Z |//' \
+ -e 's/File ".*", line [[0-9]][[0-9]]*,/File <name>, line <number>,/' \
++-e '/\^/d' \
+ stderr_log], [0], [dnl
+   0  | module_0 | EMER | emergency
+   1  | module_0 | ERR | error
+diff --git a/utilities/ovs-dpctl-top.in b/utilities/ovs-dpctl-top.in
+index 2c1766eff5..ec57eccd66 100755
+--- a/utilities/ovs-dpctl-top.in
++++ b/utilities/ovs-dpctl-top.in
+@@ -351,7 +351,7 @@ def args_get():
+     # None is a special value indicating to read flows from stdin.
+     # This handles the case
+     #   ovs-dpctl dump-flows | ovs-dpctl-flows.py
+-    parser.add_argument("-v", "--version", version="@VERSION@",
++    parser.add_argument("-v", "--version", version="@VERSION@@VERSION_SUFFIX@",
+                         action="version", help="show version")
+     parser.add_argument("-f", "--flow-file", dest="flowFiles", default=None,
+                         action="append",
+diff --git a/utilities/ovs-lib.in b/utilities/ovs-lib.in
+index 7812a94ee8..d162227dc5 100644
+--- a/utilities/ovs-lib.in
++++ b/utilities/ovs-lib.in
+@@ -70,7 +70,7 @@ ovs_ctl () {
+     esac
+ }
+ 
+-VERSION='@VERSION@'
++VERSION='@VERSION@@VERSION_SUFFIX@'
+ 
+ DAEMON_CWD=/
+ 
+diff --git a/utilities/ovs-parse-backtrace.in b/utilities/ovs-parse-backtrace.in
+index f44f05cd1e..42f831eed5 100755
+--- a/utilities/ovs-parse-backtrace.in
++++ b/utilities/ovs-parse-backtrace.in
+@@ -51,7 +51,7 @@ def addr2line(binary, addr):
+ 
+ 
+ def main():
+-    parser = optparse.OptionParser(version='@VERSION@',
++    parser = optparse.OptionParser(version='@VERSION@@VERSION_SUFFIX@',
+                                    usage="usage: %prog [binary]",
+                                    description="""\
+ Parses the output of ovs-appctl backtrace producing a more human readable
+diff --git a/utilities/ovs-pcap.in b/utilities/ovs-pcap.in
+index 6b5f63399e..d0ca947886 100755
+--- a/utilities/ovs-pcap.in
++++ b/utilities/ovs-pcap.in
+@@ -85,7 +85,7 @@ if __name__ == "__main__":
+             if key in ['-h', '--help']:
+                 usage()
+             elif key in ['-V', '--version']:
+-                print("ovs-pcap (Open vSwitch) @VERSION@")
++                print("ovs-pcap (Open vSwitch) @VERSION@@VERSION_SUFFIX@")
+             else:
+                 sys.exit(0)
+ 
+diff --git a/utilities/ovs-pki.in b/utilities/ovs-pki.in
+index e0ba910f94..d802354df5 100755
+--- a/utilities/ovs-pki.in
++++ b/utilities/ovs-pki.in
+@@ -57,6 +57,77 @@ FreeBSD|NetBSD|Darwin)
+     ;;
+ esac
+ 
++case $(uname -s) in
++MINGW*|MSYS*)
++    chmod()
++    {
++        local PERM=$1
++        local FILE=$2
++        local INH=
++
++        if test -d "${FILE}"; then
++            # Inheritance rules for folders: apply to a folder itself,
++            # subfolders and files within.
++            INH='(OI)(CI)'
++        fi
++
++        case "${PERM}" in
++        *700 | *600)
++            # Reset all own and inherited ACEs and grant full access to the
++            # "Creator Owner".  We're giving full access even for 0600,
++            # because it doesn't matter for a use case of ovs-pki.
++            icacls "${FILE}" /inheritance:r /grant:r "*S-1-3-0:${INH}F"
++            ;;
++        *750)
++            # Reset all own and inherited ACEs, grant full access to the
++            # "Creator Owner" and a read+execute access to the "Creator Group".
++            icacls "${FILE}" /inheritance:r /grant:r \
++                "*S-1-3-0:${INH}F" "*S-1-3-1:${INH}RX"
++            ;;
++        *)
++            echo >&2 "Unable to set ${PERM} mode for ${FILE}."
++            exit 1
++            ;;
++        esac
++    }
++
++    mkdir()
++    {
++        ARG_P=
++        PERM=
++        for arg; do
++            shift
++            case ${arg} in
++            -m?*)
++                PERM=${arg#??}
++                continue
++                ;;
++            -m)
++                PERM=$1
++                shift
++                continue
++                ;;
++            -p)
++                ARG_P=-p
++                continue
++                ;;
++            *)
++                set -- "$@" "${arg}"
++                ;;
++            esac
++        done
++
++        command mkdir ${ARG_P} $@
++        if [ ${PERM} ]; then
++            for dir; do
++                shift
++                chmod ${PERM} ${dir}
++            done
++        fi
++    }
++    ;;
++esac
++
+ for option; do
+     # This option-parsing mechanism borrowed from a Autoconf-generated
+     # configure script under the following license:
+@@ -118,7 +189,7 @@ EOF
+             exit 0
+             ;;
+         -V|--version)
+-            echo "ovs-pki (Open vSwitch) @VERSION@"
++            echo "ovs-pki (Open vSwitch) @VERSION@@VERSION_SUFFIX@"
+             exit 0
+             ;;
+         --di*=*)
+@@ -466,14 +537,24 @@ CN = $cn
+ [ v3_req ]
+ subjectAltName = DNS:$cn
+ EOF
++    # It is important to create private keys in $TMP because umask doesn't
++    # work on Windows and permissions there are inherited from the folder.
++    # umask itself is still needed though to ensure correct permissions
++    # on non-Windows platforms.
+     if test $keytype = rsa; then
+-        (umask 077 && openssl genrsa -out "$1-privkey.pem" $bits) 1>&3 2>&3 \
+-            || exit $?
++        (umask 077 && openssl genrsa -out "$TMP/privkey.pem" $bits) \
++            1>&3 2>&3 || exit $?
+     else
+         must_exist "$dsaparam"
+-        (umask 077 && openssl gendsa -out "$1-privkey.pem" "$dsaparam") \
++        (umask 077 && openssl gendsa -out "$TMP/privkey.pem" "$dsaparam") \
+             1>&3 2>&3 || exit $?
+     fi
++    # Windows: applying permissions (ACEs) to the file itself, just in case.
++    # 'mv' should technically preserve all the inherited ACEs from a TMP
++    # folder, but it's better to not rely on that.
++    chmod 0600 "$TMP/privkey.pem"
++    mv "$TMP/privkey.pem" "$1-privkey.pem"
++
+     openssl req -config "$TMP/req.cnf" -new -text \
+         -key "$1-privkey.pem" -out "$1-req.pem" 1>&3 2>&3
+ }
+diff --git a/utilities/ovs-tcpdump.in b/utilities/ovs-tcpdump.in
+index 4cbd9a5d31..cb46e43ba8 100755
+--- a/utilities/ovs-tcpdump.in
++++ b/utilities/ovs-tcpdump.in
+@@ -47,7 +47,7 @@ try:
+     from ovs.fatal_signal import add_hook
+ except Exception:
+     print("ERROR: Please install the correct Open vSwitch python support")
+-    print("       libraries (version @VERSION@).")
++    print("       libraries (version @VERSION@@VERSION_SUFFIX@).")
+     print("       Alternatively, check that your PYTHONPATH is pointing to")
+     print("       the correct location.")
+     sys.exit(1)
+@@ -453,7 +453,7 @@ def main():
+         if cur in ['-h', '--help']:
+             usage()
+         elif cur in ['-V', '--version']:
+-            print("ovs-tcpdump (Open vSwitch) @VERSION@")
++            print("ovs-tcpdump (Open vSwitch) @VERSION@@VERSION_SUFFIX@")
+             sys.exit(0)
+         elif cur in ['--db-sock']:
+             db_sock = nxt
+@@ -534,29 +534,19 @@ def main():
+     ovsdb.close_idl()
+ 
+     pipes = _doexec(*([dump_cmd, '-i', mirror_interface] + tcpdargs))
+-    try:
+-        while pipes.poll() is None:
+-            data = pipes.stdout.readline().strip(b'\n')
+-            if len(data) == 0:
+-                raise KeyboardInterrupt
+-            print(data.decode('utf-8'))
+-        raise KeyboardInterrupt
+-    except KeyboardInterrupt:
+-        # If there is a pipe behind ovs-tcpdump (such as ovs-tcpdump
+-        # -i eth0 | grep "192.168.1.1"), the pipe is no longer available
+-        # after received Ctrl+C.
+-        # If we write data to an unavailable pipe, a pipe error will be
+-        # reported, so we turn off stdout to avoid subsequent flushing
+-        # of data into the pipe.
+-        try:
+-            sys.stdout.close()
+-        except IOError:
+-            pass
++    while pipes.poll() is None:
++        data = pipes.stdout.readline().strip(b'\n')
++        if len(data) == 0:
++            break
++        print(data.decode('utf-8'))
+ 
+-        if pipes.poll() is None:
+-            pipes.terminate()
++    try:
++        sys.stdout.close()
++    except IOError:
++        pass
+ 
+-    sys.exit(0)
++    if pipes.poll() is None:
++        pipes.terminate()
+ 
+ 
+ if __name__ == '__main__':
+diff --git a/utilities/ovs-tcpundump.in b/utilities/ovs-tcpundump.in
+index ede5448b49..2a1b08332d 100755
+--- a/utilities/ovs-tcpundump.in
++++ b/utilities/ovs-tcpundump.in
+@@ -46,7 +46,7 @@ if __name__ == "__main__":
+         if key in ['-h', '--help']:
+             usage()
+         elif key in ['-V', '--version']:
+-            print("ovs-tcpundump (Open vSwitch) @VERSION@")
++            print("ovs-tcpundump (Open vSwitch) @VERSION@@VERSION_SUFFIX@")
+             sys.exit(0)
+         else:
+             sys.exit(0)
+diff --git a/utilities/ovs-vlan-test.in b/utilities/ovs-vlan-test.in
+index de3ae16862..3c15e2b135 100755
+--- a/utilities/ovs-vlan-test.in
++++ b/utilities/ovs-vlan-test.in
+@@ -393,7 +393,7 @@ def main():
+             usage()
+             return 0
+         elif key in ['-V', '--version']:
+-            print_safe('ovs-vlan-test (Open vSwitch) @VERSION@')
++            print_safe('ovs-vlan-test (Open vSwitch) @VERSION@@VERSION_SUFFIX@')
+             return 0
+         elif key in ['-s', '--server']:
+             server = True
+diff --git a/vswitchd/bridge.c b/vswitchd/bridge.c
+index 95a65fcdcd..0352030fec 100644
+--- a/vswitchd/bridge.c
++++ b/vswitchd/bridge.c
+@@ -3398,7 +3398,8 @@ bridge_run(void)
+ 
+             vlog_enable_async();
+ 
+-            VLOG_INFO_ONCE("%s (Open vSwitch) %s", program_name, VERSION);
++            VLOG_INFO_ONCE("%s (Open vSwitch) %s", program_name,
++                           VERSION VERSION_SUFFIX);
+         }
+     }
+ 
+diff --git a/dpdk/.github/workflows/build.yml b/dpdk/.github/workflows/build.yml
+index 272a6ffc7f..6b35c56da0 100644
+--- a/dpdk/.github/workflows/build.yml
++++ b/dpdk/.github/workflows/build.yml
+@@ -72,7 +72,7 @@ jobs:
+ 
+     steps:
+     - name: Checkout sources
+-      uses: actions/checkout@v3
++      uses: actions/checkout@v4
+     - name: Generate cache keys
+       id: get_ref_keys
+       run: |
+@@ -80,7 +80,7 @@ jobs:
+         echo 'libabigail=libabigail-${{ env.LIBABIGAIL_VERSION }}-${{ matrix.config.os }}' >> $GITHUB_OUTPUT
+         echo 'abi=abi-${{ matrix.config.os }}-${{ matrix.config.compiler }}-${{ matrix.config.cross }}-${{ env.REF_GIT_TAG }}' >> $GITHUB_OUTPUT
+     - name: Retrieve ccache cache
+-      uses: actions/cache@v3
++      uses: actions/cache@v4
+       with:
+         path: ~/.ccache
+         key: ${{ steps.get_ref_keys.outputs.ccache }}-${{ github.ref }}
+@@ -88,13 +88,13 @@ jobs:
+           ${{ steps.get_ref_keys.outputs.ccache }}-refs/heads/main
+     - name: Retrieve libabigail cache
+       id: libabigail-cache
+-      uses: actions/cache@v3
++      uses: actions/cache@v4
+       if: env.ABI_CHECKS == 'true'
+       with:
+         path: libabigail
+         key: ${{ steps.get_ref_keys.outputs.libabigail }}
+     - name: Retrieve ABI reference cache
+-      uses: actions/cache@v3
++      uses: actions/cache@v4
+       if: env.ABI_CHECKS == 'true'
+       with:
+         path: reference
+@@ -143,7 +143,7 @@ jobs:
+       run: .ci/linux-build.sh
+     - name: Upload logs on failure
+       if: failure()
+-      uses: actions/upload-artifact@v3
++      uses: actions/upload-artifact@v4
+       with:
+         name: meson-logs-${{ join(matrix.config.*, '-') }}
+         path: |
+@@ -171,7 +171,7 @@ jobs:
+         echo 'image=image-${{ matrix.config.image }}-'$(date -u +%Y-%m-%d) >> $GITHUB_OUTPUT
+     - name: Retrieve image cache
+       id: image_cache
+-      uses: actions/cache@v3
++      uses: actions/cache@v4
+       with:
+         path: ~/.image
+         key: ${{ steps.get_keys.outputs.image }}
+@@ -218,7 +218,7 @@ jobs:
+ 
+     steps:
+     - name: Checkout sources
+-      uses: actions/checkout@v3
++      uses: actions/checkout@v4
+     - name: Generate various keys
+       id: get_keys
+       run: |
+@@ -226,7 +226,7 @@ jobs:
+         echo 'logs=meson-logs-${{ join(matrix.config.*, '-') }}' | tr -d ':' >> $GITHUB_OUTPUT
+     - name: Retrieve image cache
+       id: image_cache
+-      uses: actions/cache@v3
++      uses: actions/cache@v4
+       with:
+         path: ~/.image
+         key: ${{ needs.prepare-container-images.outputs.image }}
+@@ -236,7 +236,7 @@ jobs:
+         echo 'Image ${{ matrix.config.image }} is not cached.'
+         false
+     - name: Retrieve ccache cache
+-      uses: actions/cache@v3
++      uses: actions/cache@v4
+       with:
+         path: ~/.ccache
+         key: ${{ steps.get_keys.outputs.ccache }}-${{ github.ref }}
+@@ -276,7 +276,7 @@ jobs:
+       run: docker kill dpdk
+     - name: Upload logs on failure
+       if: failure()
+-      uses: actions/upload-artifact@v3
++      uses: actions/upload-artifact@v4
+       with:
+         name: ${{ steps.get_keys.outputs.logs }}
+         path: |
+diff --git a/dpdk/.mailmap b/dpdk/.mailmap
+index ab0742a382..f2883144f3 100644
+--- a/dpdk/.mailmap
++++ b/dpdk/.mailmap
+@@ -2,7 +2,7 @@ Aakash Sasidharan <asasidharan@marvell.com>
+ Aaro Koskinen <aaro.koskinen@nsn.com>
+ Aaron Campbell <aaron@arbor.net>
+ Aaron Conole <aconole@redhat.com>
+-Abdullah Ömer Yamaç <omer.yamac@ceng.metu.edu.tr>
++Abdullah Ömer Yamaç <omer.yamac@ceng.metu.edu.tr> <aomeryamac@gmail.com>
+ Abdullah Sevincer <abdullah.sevincer@intel.com>
+ Abed Kamaluddin <akamaluddin@marvell.com>
+ Abhijit Gangurde <abhijit.gangurde@amd.com>
+@@ -29,10 +29,12 @@ Akash Saxena <akash.saxena@caviumnetworks.com>
+ Akeem G Abodunrin <akeem.g.abodunrin@intel.com>
+ Akhil Goyal <gakhil@marvell.com> <akhil.goyal@nxp.com>
+ Akihiko Odaki <akihiko.odaki@daynix.com>
++Akshay Dorwat <akshay.dorwat@amd.com>
+ Alain Leon <xerebz@gmail.com>
+ Alan Brady <alan.brady@intel.com>
+ Alan Carew <alan.carew@intel.com>
+ Alan Dewar <alan.dewar@att.com> <adewar@brocade.com>
++Alan Elder <alan.elder@microsoft.com>
+ Alan Liu <zaoxingliu@gmail.com>
+ Alan Winkowski <walan@marvell.com>
+ Alejandro Lucero <alejandro.lucero@netronome.com>
+@@ -42,12 +44,14 @@ Aleksandr Miloshenko <a.miloshenko@f5.com>
+ Aleksey Baulin <aleksey.baulin@gmail.com>
+ Aleksey Katargin <gureedo@gmail.com>
+ Ales Musil <amusil@redhat.com>
++Alessio Igor Bogani <alessio.bogani@elettra.eu>
+ Alexander Bechikov <asb.tyum@gmail.com>
+ Alexander Belyakov <abelyako@gmail.com>
+ Alexander Chernavin <achernavin@netgate.com>
+ Alexander Guy <alexander@andern.org>
+ Alexander Kozyrev <akozyrev@nvidia.com> <akozyrev@mellanox.com>
+ Alexander Matushevsky <matua@amazon.com>
++Alexander Skorichenko <askorichenko@netgate.com>
+ Alexander Solganik <solganik@gmail.com>
+ Alexander V Gutkin <alexander.v.gutkin@intel.com>
+ Alexandre Ferrieux <alexandre.ferrieux@orange.com>
+@@ -224,6 +228,7 @@ Cheng Liu <liucheng11@huawei.com>
+ Cheng Peng <cheng.peng5@zte.com.cn>
+ Chengwen Feng <fengchengwen@huawei.com>
+ Chenmin Sun <chenmin.sun@intel.com>
++Chenming Chang <ccm@ccm.ink>
+ Chenxu Di <chenxux.di@intel.com>
+ Chenyu Huang <chenyux.huang@intel.com>
+ Cheryl Houser <chouser@vmware.com>
+@@ -367,6 +372,7 @@ Elad Persiko <eladpe@mellanox.com>
+ Elena Agostini <eagostini@nvidia.com>
+ Eli Britstein <elibr@nvidia.com> <elibr@mellanox.com>
+ Elza Mathew <elza.mathew@intel.com>
++Emi Aoki <embm29@gmail.com>
+ Emma Finn <emma.finn@intel.com>
+ Emma Kenny <emma.kenny@intel.com>
+ Emmanuel Roullit <emmanuel.roullit@gmail.com>
+@@ -401,9 +407,11 @@ Fengtian Guo <fengtian.guo@6wind.com>
+ Ferdinand Thiessen <rpm@fthiessen.de>
+ Ferruh Yigit <ferruh.yigit@amd.com> <ferruh.yigit@intel.com> <ferruh.yigit@xilinx.com> <ferruhy@gmail.com>
+ Fidaullah Noonari <fidaullah.noonari@emumba.com>
++Fidel Castro <fidelcastro.s@hotmail.com>
+ Fiona Trahe <fiona.trahe@intel.com>
+ Flavia Musatescu <flavia.musatescu@intel.com>
+ Flavio Leitner <fbl@redhat.com> <fbl@sysclose.org>
++Flore Norceide <florestecien@gmail.com>
+ Forrest Shi <xuelin.shi@nxp.com>
+ Francesco Mancino <francesco.mancino@tutus.se>
+ Francesco Santoro <francesco.santoro@6wind.com>
+@@ -483,6 +491,7 @@ Hanoch Haim <hhaim@cisco.com>
+ Hanumanth Pothula <hpothula@marvell.com>
+ Hao Chen <chenh@yusur.tech> <chenhao164@huawei.com>
+ Hao Wu <hao.wu@intel.com>
++Haoqian He <haoqian.he@smartx.com>
+ Hari Kumar Vemula <hari.kumarx.vemula@intel.com>
+ Harini Ramakrishnan <harini.ramakrishnan@microsoft.com>
+ Hariprasad Govindharajan <hariprasad.govindharajan@intel.com>
+@@ -518,6 +527,8 @@ Hiral Shah <hshah@marvell.com>
+ Hiroki Shirokura <slank.dev@gmail.com>
+ Hiroshi Shimamoto <h-shimamoto@ct.jp.nec.com>
+ Hiroyuki Mikita <h.mikita89@gmail.com>
++Holly Nichols <hollynichols04@gmail.com>
++Hongbo Li <hongbox.li@intel.com>
+ Hongbo Zheng <zhenghongbo3@huawei.com>
+ Hongjun Ni <hongjun.ni@intel.com>
+ Hongzhi Guo <guohongzhi1@huawei.com>
+@@ -654,6 +665,7 @@ Jin Yu <jin.yu@intel.com>
+ Jiri Slaby <jslaby@suse.cz>
+ Job Abraham <job.abraham@intel.com>
+ Jochen Behrens <jbehrens@vmware.com>
++Joel Kavanagh <joel.kavanagh@intel.com>
+ Joey Xing <joey.xing@jaguarmicro.com>
+ Johan Faltstrom <johan.faltstrom@netinsight.net>
+ Johan Källström <johan.kallstrom@ericsson.com>
+@@ -675,7 +687,7 @@ John Ousterhout <ouster@cs.stanford.edu>
+ John Romein <romein@astron.nl>
+ John W. Linville <linville@tuxdriver.com>
+ Jonas Pfefferle <jpf@zurich.ibm.com> <pepperjo@japf.ch>
+-Jonathan Erb <jonathan.erb@threatblockr.com> <jonathan.erb@banduracyber.com>
++Jonathan Erb <jonathan.erb@threater.com> <jonathan.erb@threatblockr.com> <jonathan.erb@banduracyber.com>
+ Jonathan Tsai <jonathan1.tsai@intel.com>
+ Jon DeVree <nuxi@vault24.org>
+ Jon Loeliger <jdl@netgate.com>
+@@ -707,6 +719,7 @@ Junjie Wan <wanjunjie@bytedance.com>
+ Jun Qiu <jun.qiu@jaguarmicro.com>
+ Jun W Zhou <junx.w.zhou@intel.com>
+ Junxiao Shi <git@mail1.yoursunny.com>
++Jun Wang <junwang01@cestc.cn>
+ Jun Yang <jun.yang@nxp.com>
+ Junyu Jiang <junyux.jiang@intel.com>
+ Juraj Linkeš <juraj.linkes@pantheon.tech>
+@@ -721,7 +734,7 @@ Kamalakshitha Aligeri <kamalakshitha.aligeri@arm.com>
+ Kamil Bednarczyk <kamil.bednarczyk@intel.com>
+ Kamil Chalupnik <kamilx.chalupnik@intel.com>
+ Kamil Rytarowski <kamil.rytarowski@caviumnetworks.com>
+-Kamil Vojanec <xvojan00@stud.fit.vutbr.cz>
++Kamil Vojanec <vojanec@cesnet.cz> <xvojan00@stud.fit.vutbr.cz>
+ Kanaka Durga Kotamarthy <kkotamarthy@marvell.com>
+ Karen Kelly <karen.kelly@intel.com>
+ Karen Sornek <karen.sornek@intel.com>
+@@ -785,9 +798,11 @@ Leszek Zygo <leszek.zygo@intel.com>
+ Levend Sayar <levendsayar@gmail.com>
+ Lev Faerman <lev.faerman@intel.com>
+ Lewei Yang <leweix.yang@intel.com>
++Lewis Donzis <lew@perftech.com>
+ Leyi Rong <leyi.rong@intel.com>
+ Liang Ma <liangma@bytedance.com> <liangma@liangbit.com> <liang.j.ma@intel.com>
+ Liang-Min Larry Wang <liang-min.wang@intel.com>
++Liangxing Wang <liangxing.wang@arm.com>
+ Liang Xu <liang.xu@cinfotech.cn>
+ Liang Zhang <zhangliang@bigo.sg>
+ Li Feng <fengli@smartx.com>
+@@ -853,7 +868,6 @@ Manish Chopra <manishc@marvell.com>
+ Manish Kurup <manish.kurup@broadcom.com>
+ Manish Tomar <manish.tomar@nxp.com>
+ Mao Jiang <maox.jiang@intel.com>
+-Mao YingMing <maoyingming@baidu.com>
+ Marcel Apfelbaum <marcel@redhat.com>
+ Marcel Cornu <marcel.d.cornu@intel.com>
+ Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>
+@@ -870,6 +884,7 @@ Marcin Wojtas <mw@semihalf.com>
+ Marcin Zapolski <marcinx.a.zapolski@intel.com>
+ Marco Varlese <mvarlese@suse.de>
+ Marc Sune <marcdevel@gmail.com> <marc.sune@bisdn.de>
++Marek Mical <marekx.mical@intel.com>
+ Maria Lingemark <maria.lingemark@ericsson.com>
+ Mario Carrillo <mario.alfredo.c.arevalo@intel.com>
+ Mário Kuka <kuka@cesnet.cz>
+@@ -891,8 +906,9 @@ Martin Klozik <martinx.klozik@intel.com>
+ Martin Spinler <spinler@cesnet.cz>
+ Martin Weiser <martin.weiser@allegro-packets.com>
+ Martyna Szapar-Mudlaw <martyna.szapar-mudlaw@intel.com> <martyna.szapar@intel.com>
+-Maryam Tahhan <maryam.tahhan@intel.com>
++Maryam Tahhan <mtahhan@redhat.com> <maryam.tahhan@intel.com>
+ Masoud Hasanifard <masoudhasanifard@gmail.com>
++Masoumeh Farhadi Nia <masoumeh.farhadinia@gmail.com>
+ Matan Azrad <matan@nvidia.com> <matan@mellanox.com>
+ Matej Vido <matejvido@gmail.com> <vido@cesnet.cz>
+ Mateusz Kowalski <mateusz.kowalski@intel.com>
+@@ -936,6 +952,7 @@ Michael Santana <maicolgabriel@hotmail.com> <msantana@redhat.com>
+ Michael Savisko <michaelsav@nvidia.com>
+ Michael Shamis <michaelsh@marvell.com>
+ Michael S. Tsirkin <mst@redhat.com>
++Michael Theodore Stolarchuk <mike.stolarchuk@arista.com>
+ Michael Wildt <michael.wildt@broadcom.com>
+ Michal Berger <michallinuxstuff@gmail.com>
+ Michal Jastrzebski <michalx.k.jastrzebski@intel.com>
+@@ -1009,7 +1026,8 @@ Nemanja Marjanovic <nemanja.marjanovic@intel.com>
+ Netanel Belgazal <netanel@amazon.com>
+ Netanel Gonen <netanelg@mellanox.com>
+ Niall Power <niall.power@intel.com>
+-Nick Connolly <nick.connolly@mayadata.io>
++Nicholas Pratte <npratte@iol.unh.edu>
++Nick Connolly <nick.connolly@arm.com> <nick.connolly@mayadata.io>
+ Nick Nunley <nicholas.d.nunley@intel.com>
+ Niclas Storm <niclas.storm@ericsson.com>
+ Nicolas Chautru <nicolas.chautru@intel.com>
+@@ -1127,6 +1145,7 @@ Przemyslaw Czesnowicz <przemyslaw.czesnowicz@intel.com>
+ Przemyslaw Patynowski <przemyslawx.patynowski@intel.com>
+ Przemyslaw Zegan <przemyslawx.zegan@intel.com>
+ Pu Xu <583493798@qq.com>
++Qian Hao <qi_an_hao@126.com>
+ Qian Xu <qian.q.xu@intel.com>
+ Qiao Liu <qiao.liu@intel.com>
+ Qi Fu <qi.fu@intel.com>
+@@ -1142,6 +1161,7 @@ Quentin Armitage <quentin@armitage.org.uk>
+ Qun Wan <qun.wan@intel.com>
+ Radha Mohan Chintakuntla <radhac@marvell.com>
+ Radoslaw Biernacki <rad@semihalf.com> <radoslaw.biernacki@linaro.org>
++Radoslaw Tyl <radoslawx.tyl@intel.com>
+ Radu Bulie <radu-andrei.bulie@nxp.com>
+ Radu Nicolau <radu.nicolau@intel.com>
+ Rafael Ávila de Espíndola <espindola@scylladb.com>
+@@ -1205,7 +1225,7 @@ Roman Kapl <rka@sysgo.com>
+ Roman Korynkevych <romanx.korynkevych@intel.com>
+ Roman Storozhenko <roman.storozhenko@intel.com>
+ Roman Zhukov <roman.zhukov@arknetworks.am> <roman.zhukov@oktetlabs.ru>
+-Ronak Doshi <doshir@vmware.com>
++Ronak Doshi <ronak.doshi@broadcom.com> <doshir@vmware.com>
+ Ron Beider <rbeider@amazon.com>
+ Ronghua Zhang <rzhang@vmware.com>
+ RongQiang Xie <xie.rongqiang@zte.com.cn>
+@@ -1275,9 +1295,11 @@ Shahed Shaikh <shshaikh@marvell.com> <shahed.shaikh@cavium.com>
+ Shai Brandes <shaibran@amazon.com>
+ Shailendra Bhatnagar <shailendra.bhatnagar@intel.com>
+ Shally Verma <shallyv@marvell.com> <shally.verma@caviumnetworks.com>
++Shani Peretz <shperetz@nvidia.com>
+ Shannon Nelson <snelson@pensando.io>
+ Shannon Zhao <zhaoshenglong@huawei.com>
+ Shaopeng He <shaopeng.he@intel.com>
++Shaowei Sun <1819846787@qq.com>
+ Sharmila Podury <sharmila.podury@att.com>
+ Sharon Haroni <sharon.haroni@intel.com>
+ Shay Agroskin <shayagr@amazon.com>
+@@ -1298,6 +1320,7 @@ Shiyang He <shiyangx.he@intel.com>
+ Shlomi Gridish <sgridish@marvell.com>
+ Shougang Wang <shougangx.wang@intel.com>
+ Shraddha Joshi <jshraddha@vmware.com>
++Shreesh Adiga <16567adigashreesh@gmail.com>
+ Shreyansh Jain <shreyansh.jain@nxp.com>
+ Shrikrishna Khare <skhare@vmware.com>
+ Shuai Zhu <shuaix.zhu@intel.com>
+@@ -1306,6 +1329,7 @@ Shuki Katzenelson <shuki@lightbitslabs.com>
+ Shun Hao <shunh@nvidia.com>
+ Shu Shen <shu.shen@radisys.com>
+ Shujing Dong <shujing.dong@corigine.com>
++Shuo Li <lishuo02@baidu.com>
+ Shweta Choudaha <shweta.choudaha@att.com>
+ Shyam Kumar Shrivastav <shrivastav.shyam@gmail.com>
+ Shy Shyman <shys@nvidia.com> <shys@mellanox.com>
+@@ -1424,7 +1448,9 @@ Timothy McDaniel <timothy.mcdaniel@intel.com>
+ Timothy Miskell <timothy.miskell@intel.com>
+ Timothy Redaelli <tredaelli@redhat.com>
+ Tim Shearer <tim.shearer@overturenetworks.com>
++Ting-Kai Ku <ting-kai.ku@intel.com>
+ Ting Xu <ting.xu@intel.com>
++Tingting Liao <tingtingx.liao@intel.com>
+ Tiwei Bie <tiwei.bie@intel.com> <btw@mail.ustc.edu.cn>
+ Todd Fujinaka <todd.fujinaka@intel.com>
+ Tomasz Cel <tomaszx.cel@intel.com>
+@@ -1437,6 +1463,7 @@ Tomasz Kulasek <tomaszx.kulasek@intel.com>
+ Tomasz Zawadzki <tomasz.zawadzki@intel.com>
+ Tom Barbette <barbette@kth.se> <tom.barbette@ulg.ac.be>
+ Tom Crugnale <tcrugnale@sandvine.com>
++Tom Jones <thj@freebsd.org>
+ Tom Millington <tmillington@solarflare.com>
+ Tom Rix <trix@redhat.com>
+ Tomer Shmilovich <tshmilovich@nvidia.com>
+@@ -1457,6 +1484,7 @@ Vadim Suraev <vadim.suraev@gmail.com>
+ Vakul Garg <vakul.garg@nxp.com>
+ Vamsi Attunuru <vattunuru@marvell.com>
+ Vanshika Shukla <vanshika.shukla@nxp.com>
++Varun Sethi <v.sethi@nxp.com>
+ Vasily Philipov <vasilyf@mellanox.com>
+ Veerasenareddy Burru <vburru@marvell.com>
+ Venkata Suresh Kumar P <venkata.suresh.kumar.p@intel.com>
+@@ -1485,6 +1513,8 @@ Vincent Guo <guopengfei160@163.com>
+ Vincent Jardin <vincent.jardin@6wind.com>
+ Vincent Li <vincent.mc.li@gmail.com>
+ Vincent S. Cojot <vcojot@redhat.com>
++Vinh Tran <vinh.t.tran10@gmail.com>
++Vipin Padmam Ramesh <vipinp@vmware.com>
+ Vipin Varghese <vipin.varghese@amd.com> <vipin.varghese@intel.com>
+ Vipul Ashri <vipul.ashri@oracle.com>
+ Visa Hankala <visa@hankala.org>
+@@ -1507,6 +1537,7 @@ Walter Heymans <walter.heymans@corigine.com>
+ Wang Sheng-Hui <shhuiw@gmail.com>
+ Wangyu (Eric) <seven.wangyu@huawei.com>
+ Waterman Cao <waterman.cao@intel.com>
++Wathsala Vithanage <wathsala.vithanage@arm.com>
+ Weichun Chen <weichunx.chen@intel.com>
+ Wei Dai <wei.dai@intel.com>
+ Weifeng Li <liweifeng96@126.com>
+@@ -1604,6 +1635,7 @@ Yi Lu <luyi68@live.com>
+ Yilun Xu <yilun.xu@intel.com>
+ Yinan Wang <yinan.wang@intel.com>
+ Ying A Wang <ying.a.wang@intel.com>
++Yingming Mao <maoyingming@baidu.com>
+ Yingya Han <yingyax.han@intel.com>
+ Yinjun Zhang <yinjun.zhang@corigine.com>
+ Yipeng Wang <yipeng1.wang@intel.com>
+diff --git a/dpdk/VERSION b/dpdk/VERSION
+index 94c0153b26..d1bc17f504 100644
+--- a/dpdk/VERSION
++++ b/dpdk/VERSION
+@@ -1 +1 @@
+-23.11.0
++23.11.2
+diff --git a/dpdk/app/dumpcap/main.c b/dpdk/app/dumpcap/main.c
+index fc28e2d702..76c7475114 100644
+--- a/dpdk/app/dumpcap/main.c
++++ b/dpdk/app/dumpcap/main.c
+@@ -628,6 +628,11 @@ static void dpdk_init(void)
+ 		eal_argv[i++] = strdup(file_prefix);
+ 	}
+ 
++	for (i = 0; i < (unsigned int)eal_argc; i++) {
++		if (eal_argv[i] == NULL)
++			rte_panic("No memory\n");
++	}
++
+ 	if (rte_eal_init(eal_argc, eal_argv) < 0)
+ 		rte_exit(EXIT_FAILURE, "EAL init failed: is primary process running?\n");
+ }
+@@ -934,6 +939,11 @@ int main(int argc, char **argv)
+ {
+ 	struct rte_ring *r;
+ 	struct rte_mempool *mp;
++	struct sigaction action = {
++		.sa_flags = SA_RESTART,
++		.sa_handler = signal_handler,
++	};
++	struct sigaction origaction;
+ 	dumpcap_out_t out;
+ 	char *p;
+ 
+@@ -959,8 +969,13 @@ int main(int argc, char **argv)
+ 
+ 	compile_filters();
+ 
+-	signal(SIGINT, signal_handler);
+-	signal(SIGPIPE, SIG_IGN);
++	sigemptyset(&action.sa_mask);
++	sigaction(SIGTERM, &action, NULL);
++	sigaction(SIGINT, &action, NULL);
++	sigaction(SIGPIPE, &action, NULL);
++	sigaction(SIGHUP, NULL, &origaction);
++	if (origaction.sa_handler == SIG_DFL)
++		sigaction(SIGHUP, &action, NULL);
+ 
+ 	enable_primary_monitor();
+ 
+diff --git a/dpdk/app/graph/meson.build b/dpdk/app/graph/meson.build
+index 5b0f966d99..8aefdf505c 100644
+--- a/dpdk/app/graph/meson.build
++++ b/dpdk/app/graph/meson.build
+@@ -5,6 +5,7 @@
+ name = 'graph'
+ build = cc.has_header('sys/epoll.h')
+ if not build
++    reason = 'only supported on Linux'
+     subdir_done()
+ endif
+ 
+diff --git a/dpdk/app/meson.build b/dpdk/app/meson.build
+index 8aaed59f39..21b6da29b3 100644
+--- a/dpdk/app/meson.build
++++ b/dpdk/app/meson.build
+@@ -93,7 +93,7 @@ foreach app:apps
+     if not build
+         if reason != ''
+             dpdk_apps_disabled += app
+-            set_variable(app.underscorify() + '_disable_reason', reason)
++            set_variable('app_' + app.underscorify() + '_disable_reason', reason)
+         endif
+         continue
+     endif
+diff --git a/dpdk/app/pdump/main.c b/dpdk/app/pdump/main.c
+index 7a1c7bdf60..3592f8a865 100644
+--- a/dpdk/app/pdump/main.c
++++ b/dpdk/app/pdump/main.c
+@@ -171,6 +171,9 @@ parse_device_id(const char *key __rte_unused, const char *value,
+ 	struct pdump_tuples *pt = extra_args;
+ 
+ 	pt->device_id = strdup(value);
++	if (pt->device_id == NULL)
++		return -1;
++
+ 	pt->dump_by_type = DEVICE_ID;
+ 
+ 	return 0;
+@@ -568,11 +571,9 @@ disable_primary_monitor(void)
+ }
+ 
+ static void
+-signal_handler(int sig_num)
++signal_handler(int sig_num __rte_unused)
+ {
+-	if (sig_num == SIGINT) {
+-		quit_signal = 1;
+-	}
++	quit_signal = 1;
+ }
+ 
+ static inline int
+@@ -972,6 +973,11 @@ enable_primary_monitor(void)
+ int
+ main(int argc, char **argv)
+ {
++	struct sigaction action = {
++		.sa_flags = SA_RESTART,
++		.sa_handler = signal_handler,
++	};
++	struct sigaction origaction;
+ 	int diag;
+ 	int ret;
+ 	int i;
+@@ -980,8 +986,14 @@ main(int argc, char **argv)
+ 	char mp_flag[] = "--proc-type=secondary";
+ 	char *argp[argc + 2];
+ 
+-	/* catch ctrl-c so we can print on exit */
+-	signal(SIGINT, signal_handler);
++	/* catch ctrl-c so we can cleanup on exit */
++	sigemptyset(&action.sa_mask);
++	sigaction(SIGTERM, &action, NULL);
++	sigaction(SIGINT, &action, NULL);
++	sigaction(SIGPIPE, &action, NULL);
++	sigaction(SIGHUP, NULL, &origaction);
++	if (origaction.sa_handler == SIG_DFL)
++		sigaction(SIGHUP, &action, NULL);
+ 
+ 	argp[0] = argv[0];
+ 	argp[1] = n_flag;
+diff --git a/dpdk/app/test-bbdev/test_bbdev_perf.c b/dpdk/app/test-bbdev/test_bbdev_perf.c
+index dcce00aa0a..5c1755ae0d 100644
+--- a/dpdk/app/test-bbdev/test_bbdev_perf.c
++++ b/dpdk/app/test-bbdev/test_bbdev_perf.c
+@@ -94,6 +94,8 @@
+ #define K0_2_2 25 /* K0 fraction numerator for rv 2 and BG 2 */
+ #define K0_3_1 56 /* K0 fraction numerator for rv 3 and BG 1 */
+ #define K0_3_2 43 /* K0 fraction numerator for rv 3 and BG 2 */
++#define NUM_SC_PER_RB (12) /* Number of subcarriers in a RB in 3GPP. */
++#define BITS_PER_LLR  (8)  /* Number of bits in a LLR. */
+ 
+ #define HARQ_MEM_TOLERANCE 256
+ static struct test_bbdev_vector test_vector;
+@@ -2131,7 +2133,8 @@ validate_op_chain(struct rte_bbdev_op_data *op,
+ 		uint16_t data_len = rte_pktmbuf_data_len(m) - offset;
+ 		total_data_size += orig_op->segments[i].length;
+ 
+-		if (orig_op->segments[i].length > RTE_BBDEV_LDPC_E_MAX_MBUF)
++		if ((orig_op->segments[i].length + RTE_PKTMBUF_HEADROOM)
++				> RTE_BBDEV_LDPC_E_MAX_MBUF)
+ 			ignore_mbuf = true;
+ 		if (!ignore_mbuf)
+ 			TEST_ASSERT(orig_op->segments[i].length == data_len,
+@@ -2895,8 +2898,14 @@ calc_fft_size(struct rte_bbdev_fft_op *op)
+ static uint32_t
+ calc_mldts_size(struct rte_bbdev_mldts_op *op)
+ {
+-	uint32_t output_size;
+-	output_size = op->mldts.num_layers * op->mldts.num_rbs * op->mldts.c_rep;
++	uint32_t output_size = 0;
++	uint16_t i;
++
++	for (i = 0; i < op->mldts.num_layers; i++)
++		output_size += op->mldts.q_m[i];
++
++	output_size *= NUM_SC_PER_RB * BITS_PER_LLR * op->mldts.num_rbs * (op->mldts.c_rep + 1);
++
+ 	return output_size;
+ }
+ 
+@@ -3399,15 +3408,6 @@ throughput_intr_lcore_ldpc_dec(void *arg)
+ 			if (unlikely(num_to_process - enqueued < num_to_enq))
+ 				num_to_enq = num_to_process - enqueued;
+ 
+-			enq = 0;
+-			do {
+-				enq += rte_bbdev_enqueue_ldpc_dec_ops(
+-						tp->dev_id,
+-						queue_id, &ops[enqueued],
+-						num_to_enq);
+-			} while (unlikely(num_to_enq != enq));
+-			enqueued += enq;
+-
+ 			/* Write to thread burst_sz current number of enqueued
+ 			 * descriptors. It ensures that proper number of
+ 			 * descriptors will be dequeued in callback
+@@ -3417,6 +3417,15 @@ throughput_intr_lcore_ldpc_dec(void *arg)
+ 			 */
+ 			__atomic_store_n(&tp->burst_sz, num_to_enq, __ATOMIC_RELAXED);
+ 
++			enq = 0;
++			do {
++				enq += rte_bbdev_enqueue_ldpc_dec_ops(
++						tp->dev_id,
++						queue_id, &ops[enqueued],
++						num_to_enq);
++			} while (unlikely(num_to_enq != enq));
++			enqueued += enq;
++
+ 			/* Wait until processing of previous batch is
+ 			 * completed
+ 			 */
+@@ -3491,14 +3500,6 @@ throughput_intr_lcore_dec(void *arg)
+ 			if (unlikely(num_to_process - enqueued < num_to_enq))
+ 				num_to_enq = num_to_process - enqueued;
+ 
+-			enq = 0;
+-			do {
+-				enq += rte_bbdev_enqueue_dec_ops(tp->dev_id,
+-						queue_id, &ops[enqueued],
+-						num_to_enq);
+-			} while (unlikely(num_to_enq != enq));
+-			enqueued += enq;
+-
+ 			/* Write to thread burst_sz current number of enqueued
+ 			 * descriptors. It ensures that proper number of
+ 			 * descriptors will be dequeued in callback
+@@ -3508,6 +3509,14 @@ throughput_intr_lcore_dec(void *arg)
+ 			 */
+ 			__atomic_store_n(&tp->burst_sz, num_to_enq, __ATOMIC_RELAXED);
+ 
++			enq = 0;
++			do {
++				enq += rte_bbdev_enqueue_dec_ops(tp->dev_id,
++						queue_id, &ops[enqueued],
++						num_to_enq);
++			} while (unlikely(num_to_enq != enq));
++			enqueued += enq;
++
+ 			/* Wait until processing of previous batch is
+ 			 * completed
+ 			 */
+@@ -3577,14 +3586,6 @@ throughput_intr_lcore_enc(void *arg)
+ 			if (unlikely(num_to_process - enqueued < num_to_enq))
+ 				num_to_enq = num_to_process - enqueued;
+ 
+-			enq = 0;
+-			do {
+-				enq += rte_bbdev_enqueue_enc_ops(tp->dev_id,
+-						queue_id, &ops[enqueued],
+-						num_to_enq);
+-			} while (unlikely(enq != num_to_enq));
+-			enqueued += enq;
+-
+ 			/* Write to thread burst_sz current number of enqueued
+ 			 * descriptors. It ensures that proper number of
+ 			 * descriptors will be dequeued in callback
+@@ -3594,6 +3595,14 @@ throughput_intr_lcore_enc(void *arg)
+ 			 */
+ 			__atomic_store_n(&tp->burst_sz, num_to_enq, __ATOMIC_RELAXED);
+ 
++			enq = 0;
++			do {
++				enq += rte_bbdev_enqueue_enc_ops(tp->dev_id,
++						queue_id, &ops[enqueued],
++						num_to_enq);
++			} while (unlikely(enq != num_to_enq));
++			enqueued += enq;
++
+ 			/* Wait until processing of previous batch is
+ 			 * completed
+ 			 */
+@@ -3665,15 +3674,6 @@ throughput_intr_lcore_ldpc_enc(void *arg)
+ 			if (unlikely(num_to_process - enqueued < num_to_enq))
+ 				num_to_enq = num_to_process - enqueued;
+ 
+-			enq = 0;
+-			do {
+-				enq += rte_bbdev_enqueue_ldpc_enc_ops(
+-						tp->dev_id,
+-						queue_id, &ops[enqueued],
+-						num_to_enq);
+-			} while (unlikely(enq != num_to_enq));
+-			enqueued += enq;
+-
+ 			/* Write to thread burst_sz current number of enqueued
+ 			 * descriptors. It ensures that proper number of
+ 			 * descriptors will be dequeued in callback
+@@ -3683,6 +3683,15 @@ throughput_intr_lcore_ldpc_enc(void *arg)
+ 			 */
+ 			__atomic_store_n(&tp->burst_sz, num_to_enq, __ATOMIC_RELAXED);
+ 
++			enq = 0;
++			do {
++				enq += rte_bbdev_enqueue_ldpc_enc_ops(
++						tp->dev_id,
++						queue_id, &ops[enqueued],
++						num_to_enq);
++			} while (unlikely(enq != num_to_enq));
++			enqueued += enq;
++
+ 			/* Wait until processing of previous batch is
+ 			 * completed
+ 			 */
+@@ -3754,14 +3763,6 @@ throughput_intr_lcore_fft(void *arg)
+ 			if (unlikely(num_to_process - enqueued < num_to_enq))
+ 				num_to_enq = num_to_process - enqueued;
+ 
+-			enq = 0;
+-			do {
+-				enq += rte_bbdev_enqueue_fft_ops(tp->dev_id,
+-						queue_id, &ops[enqueued],
+-						num_to_enq);
+-			} while (unlikely(enq != num_to_enq));
+-			enqueued += enq;
+-
+ 			/* Write to thread burst_sz current number of enqueued
+ 			 * descriptors. It ensures that proper number of
+ 			 * descriptors will be dequeued in callback
+@@ -3771,6 +3772,14 @@ throughput_intr_lcore_fft(void *arg)
+ 			 */
+ 			__atomic_store_n(&tp->burst_sz, num_to_enq, __ATOMIC_RELAXED);
+ 
++			enq = 0;
++			do {
++				enq += rte_bbdev_enqueue_fft_ops(tp->dev_id,
++						queue_id, &ops[enqueued],
++						num_to_enq);
++			} while (unlikely(enq != num_to_enq));
++			enqueued += enq;
++
+ 			/* Wait until processing of previous batch is
+ 			 * completed
+ 			 */
+@@ -3837,13 +3846,6 @@ throughput_intr_lcore_mldts(void *arg)
+ 			if (unlikely(num_to_process - enqueued < num_to_enq))
+ 				num_to_enq = num_to_process - enqueued;
+ 
+-			enq = 0;
+-			do {
+-				enq += rte_bbdev_enqueue_mldts_ops(tp->dev_id,
+-						queue_id, &ops[enqueued], num_to_enq);
+-			} while (unlikely(enq != num_to_enq));
+-			enqueued += enq;
+-
+ 			/* Write to thread burst_sz current number of enqueued
+ 			 * descriptors. It ensures that proper number of
+ 			 * descriptors will be dequeued in callback
+@@ -3853,6 +3855,13 @@ throughput_intr_lcore_mldts(void *arg)
+ 			 */
+ 			__atomic_store_n(&tp->burst_sz, num_to_enq, __ATOMIC_RELAXED);
+ 
++			enq = 0;
++			do {
++				enq += rte_bbdev_enqueue_mldts_ops(tp->dev_id,
++						queue_id, &ops[enqueued], num_to_enq);
++			} while (unlikely(enq != num_to_enq));
++			enqueued += enq;
++
+ 			/* Wait until processing of previous batch is
+ 			 * completed
+ 			 */
+diff --git a/dpdk/app/test-crypto-perf/cperf_ops.c b/dpdk/app/test-crypto-perf/cperf_ops.c
+index 84945d1313..0e79133310 100644
+--- a/dpdk/app/test-crypto-perf/cperf_ops.c
++++ b/dpdk/app/test-crypto-perf/cperf_ops.c
+@@ -21,7 +21,6 @@ cperf_set_ops_asym(struct rte_crypto_op **ops,
+ 		   uint64_t *tsc_start __rte_unused)
+ {
+ 	uint16_t i;
+-	void *asym_sess = (void *)sess;
+ 
+ 	for (i = 0; i < nb_ops; i++) {
+ 		struct rte_crypto_asym_op *asym_op = ops[i]->asym;
+@@ -31,7 +30,7 @@ cperf_set_ops_asym(struct rte_crypto_op **ops,
+ 		asym_op->modex.base.length = options->modex_data->base.len;
+ 		asym_op->modex.result.data = options->modex_data->result.data;
+ 		asym_op->modex.result.length = options->modex_data->result.len;
+-		rte_crypto_op_attach_asym_session(ops[i], asym_sess);
++		rte_crypto_op_attach_asym_session(ops[i], sess);
+ 	}
+ }
+ 
+@@ -64,7 +63,6 @@ cperf_set_ops_security(struct rte_crypto_op **ops,
+ 
+ 	for (i = 0; i < nb_ops; i++) {
+ 		struct rte_crypto_sym_op *sym_op = ops[i]->sym;
+-		void *sec_sess = (void *)sess;
+ 		uint32_t buf_sz;
+ 
+ 		uint32_t *per_pkt_hfn = rte_crypto_op_ctod_offset(ops[i],
+@@ -72,7 +70,7 @@ cperf_set_ops_security(struct rte_crypto_op **ops,
+ 		*per_pkt_hfn = options->pdcp_ses_hfn_en ? 0 : PDCP_DEFAULT_HFN;
+ 
+ 		ops[i]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+-		rte_security_attach_session(ops[i], sec_sess);
++		rte_security_attach_session(ops[i], sess);
+ 		sym_op->m_src = (struct rte_mbuf *)((uint8_t *)ops[i] +
+ 							src_buf_offset);
+ 
+@@ -129,7 +127,6 @@ cperf_set_ops_security_ipsec(struct rte_crypto_op **ops,
+ 		uint16_t iv_offset __rte_unused, uint32_t *imix_idx,
+ 		uint64_t *tsc_start)
+ {
+-	void *sec_sess = sess;
+ 	const uint32_t test_buffer_size = options->test_buffer_size;
+ 	const uint32_t headroom_sz = options->headroom_sz;
+ 	const uint32_t segment_sz = options->segment_sz;
+@@ -143,7 +140,7 @@ cperf_set_ops_security_ipsec(struct rte_crypto_op **ops,
+ 		struct rte_mbuf *m = sym_op->m_src;
+ 
+ 		ops[i]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+-		rte_security_attach_session(ops[i], sec_sess);
++		rte_security_attach_session(ops[i], sess);
+ 		sym_op->m_src = (struct rte_mbuf *)((uint8_t *)ops[i] +
+ 							src_buf_offset);
+ 
+diff --git a/dpdk/app/test-crypto-perf/cperf_options_parsing.c b/dpdk/app/test-crypto-perf/cperf_options_parsing.c
+index 75afedc7fd..8909b5690d 100644
+--- a/dpdk/app/test-crypto-perf/cperf_options_parsing.c
++++ b/dpdk/app/test-crypto-perf/cperf_options_parsing.c
+@@ -516,6 +516,10 @@ parse_test_file(struct cperf_options *opts,
+ 		const char *arg)
+ {
+ 	opts->test_file = strdup(arg);
++	if (opts->test_file == NULL) {
++		RTE_LOG(ERR, USER1, "Dup vector file failed!\n");
++		return -1;
++	}
+ 	if (access(opts->test_file, F_OK) != -1)
+ 		return 0;
+ 	RTE_LOG(ERR, USER1, "Test vector file doesn't exist\n");
+diff --git a/dpdk/app/test-crypto-perf/cperf_test_common.c b/dpdk/app/test-crypto-perf/cperf_test_common.c
+index 932aab16df..6b8ab65731 100644
+--- a/dpdk/app/test-crypto-perf/cperf_test_common.c
++++ b/dpdk/app/test-crypto-perf/cperf_test_common.c
+@@ -49,7 +49,6 @@ fill_multi_seg_mbuf(struct rte_mbuf *m, struct rte_mempool *mp,
+ {
+ 	uint16_t mbuf_hdr_size = sizeof(struct rte_mbuf);
+ 	uint16_t remaining_segments = segments_nb;
+-	struct rte_mbuf *next_mbuf;
+ 	rte_iova_t next_seg_phys_addr = rte_mempool_virt2iova(obj) +
+ 			 mbuf_offset + mbuf_hdr_size;
+ 
+@@ -70,15 +69,15 @@ fill_multi_seg_mbuf(struct rte_mbuf *m, struct rte_mempool *mp,
+ 		m->nb_segs = segments_nb;
+ 		m->port = 0xff;
+ 		rte_mbuf_refcnt_set(m, 1);
+-		next_mbuf = (struct rte_mbuf *) ((uint8_t *) m +
+-					mbuf_hdr_size + segment_sz);
+-		m->next = next_mbuf;
+-		m = next_mbuf;
+-		remaining_segments--;
+ 
++		remaining_segments--;
++		if (remaining_segments > 0) {
++			m->next = (struct rte_mbuf *)((uint8_t *) m + mbuf_hdr_size + segment_sz);
++			m = m->next;
++		} else {
++			m->next = NULL;
++		}
+ 	} while (remaining_segments > 0);
+-
+-	m->next = NULL;
+ }
+ 
+ static void
+@@ -150,11 +149,11 @@ cperf_alloc_common_memory(const struct cperf_options *options,
+ 	int ret;
+ 
+ 	/* Calculate the object size */
+-	uint16_t crypto_op_size = sizeof(struct rte_crypto_op) +
+-		sizeof(struct rte_crypto_sym_op);
++	uint16_t crypto_op_size = sizeof(struct rte_crypto_op);
+ 	uint16_t crypto_op_private_size;
+ 
+ 	if (options->op_type == CPERF_ASYM_MODEX) {
++		crypto_op_size += sizeof(struct rte_crypto_asym_op);
+ 		snprintf(pool_name, RTE_MEMPOOL_NAMESIZE, "perf_asym_op_pool%u",
+ 			 rte_socket_id());
+ 		*pool = rte_crypto_op_pool_create(
+@@ -171,6 +170,8 @@ cperf_alloc_common_memory(const struct cperf_options *options,
+ 		return 0;
+ 	}
+ 
++	crypto_op_size += sizeof(struct rte_crypto_sym_op);
++
+ 	/*
+ 	 * If doing AES-CCM, IV field needs to be 16 bytes long,
+ 	 * and AAD field needs to be long enough to have 18 bytes,
+@@ -227,7 +228,8 @@ cperf_alloc_common_memory(const struct cperf_options *options,
+ 				(mbuf_size * segments_nb);
+ 		params.dst_buf_offset = *dst_buf_offset;
+ 		/* Destination buffer will be one segment only */
+-		obj_size += max_size + sizeof(struct rte_mbuf);
++		obj_size += max_size + sizeof(struct rte_mbuf) +
++			options->headroom_sz + options->tailroom_sz;
+ 	}
+ 
+ 	*pool = rte_mempool_create_empty(pool_name,
+@@ -269,7 +271,7 @@ cperf_mbuf_set(struct rte_mbuf *mbuf,
+ 		const struct cperf_options *options,
+ 		const struct cperf_test_vector *test_vector)
+ {
+-	uint32_t segment_sz = options->segment_sz;
++	uint32_t segment_sz = options->segment_sz - options->headroom_sz - options->tailroom_sz;
+ 	uint8_t *mbuf_data;
+ 	uint8_t *test_data;
+ 	uint32_t remaining_bytes = options->max_buffer_size;
+diff --git a/dpdk/app/test-crypto-perf/cperf_test_latency.c b/dpdk/app/test-crypto-perf/cperf_test_latency.c
+index 484bc9eb4e..e55d293db3 100644
+--- a/dpdk/app/test-crypto-perf/cperf_test_latency.c
++++ b/dpdk/app/test-crypto-perf/cperf_test_latency.c
+@@ -121,7 +121,11 @@ store_timestamp(struct rte_crypto_op *op, uint64_t timestamp)
+ {
+ 	struct priv_op_data *priv_data;
+ 
+-	priv_data = (struct priv_op_data *) (op->sym + 1);
++	if (op->type == RTE_CRYPTO_OP_TYPE_SYMMETRIC)
++		priv_data = (struct priv_op_data *) (op->sym + 1);
++	else
++		priv_data = (struct priv_op_data *) (op->asym + 1);
++
+ 	priv_data->result->status = op->status;
+ 	priv_data->result->tsc_end = timestamp;
+ }
+@@ -250,9 +254,13 @@ cperf_latency_test_runner(void *arg)
+ 				ctx->res[tsc_idx].tsc_start = tsc_start;
+ 				/*
+ 				 * Private data structure starts after the end of the
+-				 * rte_crypto_sym_op structure.
++				 * rte_crypto_sym_op (or rte_crypto_asym_op) structure.
+ 				 */
+-				priv_data = (struct priv_op_data *) (ops[i]->sym + 1);
++				if (ops[i]->type == RTE_CRYPTO_OP_TYPE_SYMMETRIC)
++					priv_data = (struct priv_op_data *) (ops[i]->sym + 1);
++				else
++					priv_data = (struct priv_op_data *) (ops[i]->asym + 1);
++
+ 				priv_data->result = (void *)&ctx->res[tsc_idx];
+ 				tsc_idx++;
+ 			}
+diff --git a/dpdk/app/test-crypto-perf/cperf_test_verify.c b/dpdk/app/test-crypto-perf/cperf_test_verify.c
+index a6c0ffe813..10172a53a0 100644
+--- a/dpdk/app/test-crypto-perf/cperf_test_verify.c
++++ b/dpdk/app/test-crypto-perf/cperf_test_verify.c
+@@ -111,8 +111,10 @@ cperf_verify_op(struct rte_crypto_op *op,
+ 	uint32_t len;
+ 	uint16_t nb_segs;
+ 	uint8_t *data;
+-	uint32_t cipher_offset, auth_offset;
+-	uint8_t	cipher, auth;
++	uint32_t cipher_offset, auth_offset = 0;
++	bool cipher = false;
++	bool digest_verify = false;
++	bool is_encrypt = false;
+ 	int res = 0;
+ 
+ 	if (op->status != RTE_CRYPTO_OP_STATUS_SUCCESS)
+@@ -150,57 +152,54 @@ cperf_verify_op(struct rte_crypto_op *op,
+ 
+ 	switch (options->op_type) {
+ 	case CPERF_CIPHER_ONLY:
+-		cipher = 1;
++		cipher = true;
+ 		cipher_offset = 0;
+-		auth = 0;
+-		auth_offset = 0;
+-		break;
+-	case CPERF_CIPHER_THEN_AUTH:
+-		cipher = 1;
+-		cipher_offset = 0;
+-		auth = 1;
+-		auth_offset = options->test_buffer_size;
++		is_encrypt = options->cipher_op == RTE_CRYPTO_CIPHER_OP_ENCRYPT;
+ 		break;
+ 	case CPERF_AUTH_ONLY:
+-		cipher = 0;
+ 		cipher_offset = 0;
+-		auth = 1;
+-		auth_offset = options->test_buffer_size;
++		if (options->auth_op == RTE_CRYPTO_AUTH_OP_GENERATE) {
++			auth_offset = options->test_buffer_size;
++			digest_verify = true;
++		}
+ 		break;
++	case CPERF_CIPHER_THEN_AUTH:
+ 	case CPERF_AUTH_THEN_CIPHER:
+-		cipher = 1;
++		cipher = true;
+ 		cipher_offset = 0;
+-		auth = 1;
+-		auth_offset = options->test_buffer_size;
++		if (options->cipher_op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
++			auth_offset = options->test_buffer_size;
++			digest_verify = true;
++			is_encrypt = true;
++		}
+ 		break;
+ 	case CPERF_AEAD:
+-		cipher = 1;
++		cipher = true;
+ 		cipher_offset = 0;
+-		auth = 1;
+-		auth_offset = options->test_buffer_size;
++		if (options->aead_op == RTE_CRYPTO_AEAD_OP_ENCRYPT) {
++			auth_offset = options->test_buffer_size;
++			digest_verify = true;
++			is_encrypt = true;
++		}
+ 		break;
+ 	default:
+ 		res = 1;
+ 		goto out;
+ 	}
+ 
+-	if (cipher == 1) {
+-		if (options->cipher_op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
+-			res += memcmp(data + cipher_offset,
++	if (cipher) {
++		if (is_encrypt)
++			res += !!memcmp(data + cipher_offset,
+ 					vector->ciphertext.data,
+ 					options->test_buffer_size);
+ 		else
+-			res += memcmp(data + cipher_offset,
++			res += !!memcmp(data + cipher_offset,
+ 					vector->plaintext.data,
+ 					options->test_buffer_size);
+ 	}
+ 
+-	if (auth == 1) {
+-		if (options->auth_op == RTE_CRYPTO_AUTH_OP_GENERATE)
+-			res += memcmp(data + auth_offset,
+-					vector->digest.data,
+-					options->digest_sz);
+-	}
++	if (digest_verify)
++		res += !!memcmp(data + auth_offset, vector->digest.data, options->digest_sz);
+ 
+ out:
+ 	rte_free(data);
+@@ -276,7 +275,6 @@ cperf_verify_test_runner(void *test_ctx)
+ 				ops_needed, ctx->sess, ctx->options,
+ 				ctx->test_vector, iv_offset, &imix_idx, NULL);
+ 
+-
+ 		/* Populate the mbuf with the test vector, for verification */
+ 		for (i = 0; i < ops_needed; i++)
+ 			cperf_mbuf_set(ops[i]->sym->m_src,
+@@ -294,6 +292,17 @@ cperf_verify_test_runner(void *test_ctx)
+ 		}
+ #endif /* CPERF_LINEARIZATION_ENABLE */
+ 
++		/**
++		 * When ops_needed is smaller than ops_enqd, the
++		 * unused ops need to be moved to the front for
++		 * next round use.
++		 */
++		if (unlikely(ops_enqd > ops_needed)) {
++			size_t nb_b_to_mov = ops_unused * sizeof(struct rte_crypto_op *);
++
++			memmove(&ops[ops_needed], &ops[ops_enqd], nb_b_to_mov);
++		}
++
+ 		/* Enqueue burst of ops on crypto device */
+ 		ops_enqd = rte_cryptodev_enqueue_burst(ctx->dev_id, ctx->qp_id,
+ 				ops, burst_size);
+diff --git a/dpdk/app/test-dma-perf/main.c b/dpdk/app/test-dma-perf/main.c
+index 5f8bab8f45..544784df50 100644
+--- a/dpdk/app/test-dma-perf/main.c
++++ b/dpdk/app/test-dma-perf/main.c
+@@ -206,6 +206,8 @@ parse_lcore_dma(struct test_configure *test_case, const char *value)
+ 		return -1;
+ 
+ 	input = strndup(value, strlen(value) + 1);
++	if (input == NULL)
++		return -1;
+ 	addrs = input;
+ 
+ 	while (*addrs == '\0')
+diff --git a/dpdk/app/test-pmd/bpf_cmd.c b/dpdk/app/test-pmd/bpf_cmd.c
+index 46f6b7d6d2..24d34f983e 100644
+--- a/dpdk/app/test-pmd/bpf_cmd.c
++++ b/dpdk/app/test-pmd/bpf_cmd.c
+@@ -139,7 +139,7 @@ static cmdline_parse_token_string_t cmd_load_bpf_prm =
+ cmdline_parse_inst_t cmd_operate_bpf_ld_parse = {
+ 	.f = cmd_operate_bpf_ld_parsed,
+ 	.data = NULL,
+-	.help_str = "bpf-load rx|tx <port> <queue> <J|M|B> <file_name>",
++	.help_str = "bpf-load rx|tx <port> <queue> <J|M|-> <file_name>",
+ 	.tokens = {
+ 		(void *)&cmd_load_bpf_start,
+ 		(void *)&cmd_load_bpf_dir,
+diff --git a/dpdk/app/test-pmd/cmdline.c b/dpdk/app/test-pmd/cmdline.c
+index 9369d3b4c5..d9304e4a32 100644
+--- a/dpdk/app/test-pmd/cmdline.c
++++ b/dpdk/app/test-pmd/cmdline.c
+@@ -3528,6 +3528,8 @@ parse_hdrs_list(const char *str, const char *item_name, unsigned int max_items,
+ 
+ 	nb_item = 0;
+ 	char *str2 = strdup(str);
++	if (str2 == NULL)
++		return nb_item;
+ 	cur = strtok_r(str2, ",", &tmp);
+ 	while (cur != NULL) {
+ 		parsed_items[nb_item] = get_ptype(cur);
+diff --git a/dpdk/app/test-pmd/cmdline_flow.c b/dpdk/app/test-pmd/cmdline_flow.c
+index ce71818705..7e6e06a04f 100644
+--- a/dpdk/app/test-pmd/cmdline_flow.c
++++ b/dpdk/app/test-pmd/cmdline_flow.c
+@@ -3520,7 +3520,7 @@ static const struct token token_list[] = {
+ 	[QUEUE_DESTROY] = {
+ 		.name = "destroy",
+ 		.help = "destroy a flow rule",
+-		.next = NEXT(NEXT_ENTRY(QUEUE_DESTROY_ID),
++		.next = NEXT(NEXT_ENTRY(QUEUE_DESTROY_POSTPONE),
+ 			     NEXT_ENTRY(COMMON_QUEUE_ID)),
+ 		.args = ARGS(ARGS_ENTRY(struct buffer, queue)),
+ 		.call = parse_qo_destroy,
+@@ -5543,9 +5543,12 @@ static const struct token token_list[] = {
+ 	[ITEM_CONNTRACK] = {
+ 		.name = "conntrack",
+ 		.help = "conntrack state",
++		.priv = PRIV_ITEM(CONNTRACK,
++				  sizeof(struct rte_flow_item_conntrack)),
+ 		.next = NEXT(NEXT_ENTRY(ITEM_NEXT), NEXT_ENTRY(COMMON_UNSIGNED),
+ 			     item_param),
+ 		.args = ARGS(ARGS_ENTRY(struct rte_flow_item_conntrack, flags)),
++		.call = parse_vc,
+ 	},
+ 	[ITEM_PORT_REPRESENTOR] = {
+ 		.name = "port_representor",
+@@ -6905,7 +6908,7 @@ static const struct token token_list[] = {
+ 		.comp = comp_none,
+ 	},
+ 	[ACTION_MODIFY_FIELD_SRC_TAG_INDEX] = {
+-		.name = "stc_tag_index",
++		.name = "src_tag_index",
+ 		.help = "source field tag array",
+ 		.next = NEXT(action_modify_field_src,
+ 			     NEXT_ENTRY(COMMON_UNSIGNED)),
+@@ -7395,11 +7398,13 @@ static const struct token token_list[] = {
+ 		.type = "UNSIGNED",
+ 		.help = "unsigned integer value",
+ 		.call = parse_indlst_id2ptr,
++		.comp = comp_none,
+ 	},
+ 	[INDIRECT_LIST_ACTION_ID2PTR_CONF] = {
+ 		.type = "UNSIGNED",
+ 		.help = "unsigned integer value",
+ 		.call = parse_indlst_id2ptr,
++		.comp = comp_none,
+ 	},
+ 	[ACTION_SHARED_INDIRECT] = {
+ 		.name = "shared_indirect",
+@@ -11334,34 +11339,36 @@ parse_indlst_id2ptr(struct context *ctx, const struct token *token,
+ 	uint32_t id;
+ 	int ret;
+ 
+-	if (!action)
+-		return -1;
+ 	ctx->objdata = 0;
+ 	ctx->object = &id;
+ 	ctx->objmask = NULL;
+ 	ret = parse_int(ctx, token, str, len, ctx->object, sizeof(id));
++	ctx->object = action;
+ 	if (ret != (int)len)
+ 		return ret;
+-	ctx->object = action;
+-	action_conf = (void *)(uintptr_t)action->conf;
+-	action_conf->conf = NULL;
+-	switch (ctx->curr) {
+-	case INDIRECT_LIST_ACTION_ID2PTR_HANDLE:
+-	action_conf->handle = (typeof(action_conf->handle))
+-				port_action_handle_get_by_id(ctx->port, id);
+-		if (!action_conf->handle) {
+-			printf("no indirect list handle for id %u\n", id);
+-			return -1;
++
++	/* set handle and conf */
++	if (action) {
++		action_conf = (void *)(uintptr_t)action->conf;
++		action_conf->conf = NULL;
++		switch (ctx->curr) {
++		case INDIRECT_LIST_ACTION_ID2PTR_HANDLE:
++		action_conf->handle = (typeof(action_conf->handle))
++					port_action_handle_get_by_id(ctx->port, id);
++			if (!action_conf->handle) {
++				printf("no indirect list handle for id %u\n", id);
++				return -1;
++			}
++			break;
++		case INDIRECT_LIST_ACTION_ID2PTR_CONF:
++			indlst_conf = indirect_action_list_conf_get(id);
++			if (!indlst_conf)
++				return -1;
++			action_conf->conf = (const void **)indlst_conf->conf;
++			break;
++		default:
++			break;
+ 		}
+-		break;
+-	case INDIRECT_LIST_ACTION_ID2PTR_CONF:
+-		indlst_conf = indirect_action_list_conf_get(id);
+-		if (!indlst_conf)
+-			return -1;
+-		action_conf->conf = (const void **)indlst_conf->conf;
+-		break;
+-	default:
+-		break;
+ 	}
+ 	return ret;
+ }
+@@ -12609,6 +12616,7 @@ cmd_flow_parsed(const struct buffer *in)
+ 		port_queue_action_handle_create(
+ 				in->port, in->queue, in->postpone,
+ 				in->args.vc.attr.group,
++				in->command == QUEUE_INDIRECT_ACTION_LIST_CREATE,
+ 				&((const struct rte_flow_indir_action_conf) {
+ 					.ingress = in->args.vc.attr.ingress,
+ 					.egress = in->args.vc.attr.egress,
+diff --git a/dpdk/app/test-pmd/config.c b/dpdk/app/test-pmd/config.c
+index cad7537bc6..40e4e83fb8 100644
+--- a/dpdk/app/test-pmd/config.c
++++ b/dpdk/app/test-pmd/config.c
+@@ -1891,8 +1891,7 @@ port_action_handle_flush(portid_t port_id)
+ 		/* Poisoning to make sure PMDs update it in case of error. */
+ 		memset(&error, 0x44, sizeof(error));
+ 		if (pia->handle != NULL) {
+-			ret = pia->type ==
+-			      RTE_FLOW_ACTION_TYPE_INDIRECT_LIST ?
++			ret = pia->type == RTE_FLOW_ACTION_TYPE_INDIRECT_LIST ?
+ 			      rte_flow_action_list_handle_destroy
+ 				      (port_id, pia->list_handle, &error) :
+ 			      rte_flow_action_handle_destroy
+@@ -1902,11 +1901,9 @@ port_action_handle_flush(portid_t port_id)
+ 				       pia->id);
+ 				ret = port_flow_complain(&error);
+ 			}
+-			tmp = &pia->next;
+-		} else {
+-			*tmp = pia->next;
+-			free(pia);
+ 		}
++		*tmp = pia->next;
++		free(pia);
+ 	}
+ 	return ret;
+ }
+@@ -2789,8 +2786,7 @@ port_queue_flow_create(portid_t port_id, queueid_t queue_id,
+ 		flow = rte_flow_async_create_by_index(port_id, queue_id, &op_attr, pt->table,
+ 			rule_idx, actions, actions_idx, job, &error);
+ 	if (!flow) {
+-		uint64_t flow_id = pf->id;
+-		port_queue_flow_destroy(port_id, queue_id, true, 1, &flow_id);
++		free(pf);
+ 		free(job);
+ 		return port_flow_complain(&error);
+ 	}
+@@ -2997,6 +2993,7 @@ port_queue_flow_update(portid_t port_id, queueid_t queue_id,
+ int
+ port_queue_action_handle_create(portid_t port_id, uint32_t queue_id,
+ 				bool postpone, uint32_t id,
++				bool indirect_list,
+ 				const struct rte_flow_indir_action_conf *conf,
+ 				const struct rte_flow_action *action)
+ {
+@@ -3006,8 +3003,6 @@ port_queue_action_handle_create(portid_t port_id, uint32_t queue_id,
+ 	int ret;
+ 	struct rte_flow_error error;
+ 	struct queue_job *job;
+-	bool is_indirect_list = action[1].type != RTE_FLOW_ACTION_TYPE_END;
+-
+ 
+ 	ret = action_alloc(port_id, id, &pia);
+ 	if (ret)
+@@ -3029,7 +3024,7 @@ port_queue_action_handle_create(portid_t port_id, uint32_t queue_id,
+ 	/* Poisoning to make sure PMDs update it in case of error. */
+ 	memset(&error, 0x88, sizeof(error));
+ 
+-	if (is_indirect_list)
++	if (indirect_list)
+ 		queue_action_list_handle_create(port_id, queue_id, pia, job,
+ 						&attr, conf, action, &error);
+ 	else
+@@ -4611,9 +4606,9 @@ fwd_stream_on_other_lcores(uint16_t domain_id, lcoreid_t src_lc,
+ 				continue;
+ 			printf("Shared Rx queue group %u queue %hu can't be scheduled on different cores:\n",
+ 			       share_group, share_rxq);
+-			printf("  lcore %hhu Port %hu queue %hu\n",
++			printf("  lcore %u Port %hu queue %hu\n",
+ 			       src_lc, src_port, src_rxq);
+-			printf("  lcore %hhu Port %hu queue %hu\n",
++			printf("  lcore %u Port %hu queue %hu\n",
+ 			       lc_id, fs->rx_port, fs->rx_queue);
+ 			printf("Please use --nb-cores=%hu to limit number of forwarding cores\n",
+ 			       nb_rxq);
+@@ -4794,7 +4789,6 @@ rss_fwd_config_setup(void)
+ 	queueid_t  nb_q;
+ 	streamid_t  sm_id;
+ 	int start;
+-	int end;
+ 
+ 	nb_q = nb_rxq;
+ 	if (nb_q > nb_txq)
+@@ -4802,7 +4796,7 @@ rss_fwd_config_setup(void)
+ 	cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores;
+ 	cur_fwd_config.nb_fwd_ports = nb_fwd_ports;
+ 	cur_fwd_config.nb_fwd_streams =
+-		(streamid_t) (nb_q * cur_fwd_config.nb_fwd_ports);
++		(streamid_t) (nb_q / num_procs * cur_fwd_config.nb_fwd_ports);
+ 
+ 	if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores)
+ 		cur_fwd_config.nb_fwd_lcores =
+@@ -4824,7 +4818,6 @@ rss_fwd_config_setup(void)
+ 	 * the 2~3 queue for secondary process.
+ 	 */
+ 	start = proc_id * nb_q / num_procs;
+-	end = start + nb_q / num_procs;
+ 	rxp = 0;
+ 	rxq = start;
+ 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
+@@ -4843,8 +4836,6 @@ rss_fwd_config_setup(void)
+ 			continue;
+ 		rxp = 0;
+ 		rxq++;
+-		if (rxq >= end)
+-			rxq = start;
+ 	}
+ }
+ 
+@@ -4989,7 +4980,7 @@ icmp_echo_config_setup(void)
+ 	lcoreid_t lc_id;
+ 	uint16_t  sm_id;
+ 
+-	if ((nb_txq * nb_fwd_ports) < nb_fwd_lcores)
++	if ((lcoreid_t)(nb_txq * nb_fwd_ports) < nb_fwd_lcores)
+ 		cur_fwd_config.nb_fwd_lcores = (lcoreid_t)
+ 			(nb_txq * nb_fwd_ports);
+ 	else
+diff --git a/dpdk/app/test-pmd/csumonly.c b/dpdk/app/test-pmd/csumonly.c
+index 21210aff43..71add6ca47 100644
+--- a/dpdk/app/test-pmd/csumonly.c
++++ b/dpdk/app/test-pmd/csumonly.c
+@@ -577,21 +577,23 @@ static uint64_t
+ process_outer_cksums(void *outer_l3_hdr, struct testpmd_offload_info *info,
+ 	uint64_t tx_offloads, int tso_enabled, struct rte_mbuf *m)
+ {
+-	struct rte_ipv4_hdr *ipv4_hdr = outer_l3_hdr;
+-	struct rte_ipv6_hdr *ipv6_hdr = outer_l3_hdr;
+ 	struct rte_udp_hdr *udp_hdr;
+ 	uint64_t ol_flags = 0;
+ 
+ 	if (info->outer_ethertype == _htons(RTE_ETHER_TYPE_IPV4)) {
+-		ipv4_hdr->hdr_checksum = 0;
+ 		ol_flags |= RTE_MBUF_F_TX_OUTER_IPV4;
+ 
+-		if (tx_offloads	& RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM)
++		if (tx_offloads	& RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM) {
+ 			ol_flags |= RTE_MBUF_F_TX_OUTER_IP_CKSUM;
+-		else
++		} else {
++			struct rte_ipv4_hdr *ipv4_hdr = outer_l3_hdr;
++
++			ipv4_hdr->hdr_checksum = 0;
+ 			ipv4_hdr->hdr_checksum = rte_ipv4_cksum(ipv4_hdr);
+-	} else
++		}
++	} else {
+ 		ol_flags |= RTE_MBUF_F_TX_OUTER_IPV6;
++	}
+ 
+ 	if (info->outer_l4_proto != IPPROTO_UDP)
+ 		return ol_flags;
+@@ -606,13 +608,6 @@ process_outer_cksums(void *outer_l3_hdr, struct testpmd_offload_info *info,
+ 
+ 	/* Skip SW outer UDP checksum generation if HW supports it */
+ 	if (tx_offloads & RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM) {
+-		if (info->outer_ethertype == _htons(RTE_ETHER_TYPE_IPV4))
+-			udp_hdr->dgram_cksum
+-				= rte_ipv4_phdr_cksum(ipv4_hdr, ol_flags);
+-		else
+-			udp_hdr->dgram_cksum
+-				= rte_ipv6_phdr_cksum(ipv6_hdr, ol_flags);
+-
+ 		ol_flags |= RTE_MBUF_F_TX_OUTER_UDP_CKSUM;
+ 		return ol_flags;
+ 	}
+@@ -870,16 +865,28 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
+ 
+ 	/* receive a burst of packet */
+ 	nb_rx = common_fwd_stream_receive(fs, pkts_burst, nb_pkt_per_burst);
+-	if (unlikely(nb_rx == 0))
++	if (unlikely(nb_rx == 0)) {
++#ifndef RTE_LIB_GRO
+ 		return false;
++#else
++		gro_enable = gro_ports[fs->rx_port].enable;
++		/*
++		 * Check if packets need to be flushed in the GRO context
++		 * due to a timeout.
++		 *
++		 * Continue only in GRO heavyweight mode and if there are
++		 * packets in the GRO context.
++		 */
++		if (!gro_enable || (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) ||
++			(rte_gro_get_pkt_count(current_fwd_lcore()->gro_ctx) == 0))
++			return false;
++#endif
++	}
+ 
+ 	rx_bad_ip_csum = 0;
+ 	rx_bad_l4_csum = 0;
+ 	rx_bad_outer_l4_csum = 0;
+ 	rx_bad_outer_ip_csum = 0;
+-#ifdef RTE_LIB_GRO
+-	gro_enable = gro_ports[fs->rx_port].enable;
+-#endif
+ 
+ 	txp = &ports[fs->tx_port];
+ 	tx_offloads = txp->dev_conf.txmode.offloads;
+@@ -1110,6 +1117,7 @@ tunnel_update:
+ 	}
+ 
+ #ifdef RTE_LIB_GRO
++	gro_enable = gro_ports[fs->rx_port].enable;
+ 	if (unlikely(gro_enable)) {
+ 		if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) {
+ 			nb_rx = rte_gro_reassemble_burst(pkts_burst, nb_rx,
+@@ -1129,6 +1137,8 @@ tunnel_update:
+ 						gro_pkts_num);
+ 				fs->gro_times = 0;
+ 			}
++			if (nb_rx == 0)
++				return false;
+ 		}
+ 
+ 		pkts_ip_csum_recalc(pkts_burst, nb_rx, tx_offloads);
+diff --git a/dpdk/app/test-pmd/ieee1588fwd.c b/dpdk/app/test-pmd/ieee1588fwd.c
+index 386d9f10e6..52ae551e2f 100644
+--- a/dpdk/app/test-pmd/ieee1588fwd.c
++++ b/dpdk/app/test-pmd/ieee1588fwd.c
+@@ -197,14 +197,23 @@ ieee1588_packet_fwd(struct fwd_stream *fs)
+ static int
+ port_ieee1588_fwd_begin(portid_t pi)
+ {
+-	rte_eth_timesync_enable(pi);
+-	return 0;
++	int ret;
++
++	ret = rte_eth_timesync_enable(pi);
++	if (ret)
++		printf("Port %u enable PTP failed, ret = %d\n", pi, ret);
++
++	return ret;
+ }
+ 
+ static void
+ port_ieee1588_fwd_end(portid_t pi)
+ {
+-	rte_eth_timesync_disable(pi);
++	int ret;
++
++	ret = rte_eth_timesync_disable(pi);
++	if (ret)
++		printf("Port %u disable PTP failed, ret = %d\n", pi, ret);
+ }
+ 
+ static void
+diff --git a/dpdk/app/test-pmd/parameters.c b/dpdk/app/test-pmd/parameters.c
+index a9ca58339d..7e23393392 100644
+--- a/dpdk/app/test-pmd/parameters.c
++++ b/dpdk/app/test-pmd/parameters.c
+@@ -100,10 +100,6 @@ usage(char* progname)
+ 	       "of peer ports.\n");
+ 	printf("  --eth-peer=X,M:M:M:M:M:M: set the MAC address of the X peer "
+ 	       "port (0 <= X < %d).\n", RTE_MAX_ETHPORTS);
+-#endif
+-#ifdef RTE_LIB_LATENCYSTATS
+-	printf("  --latencystats=N: enable latency and jitter statistics "
+-	       "monitoring on forwarding lcore id N.\n");
+ #endif
+ 	printf("  --disable-crc-strip: disable CRC stripping by hardware.\n");
+ 	printf("  --enable-scatter: enable scattered Rx.\n");
+@@ -167,8 +163,14 @@ usage(char* progname)
+ 	printf("  --disable-device-start: do not automatically start port\n");
+ 	printf("  --no-lsc-interrupt: disable link status change interrupt.\n");
+ 	printf("  --no-rmv-interrupt: disable device removal interrupt.\n");
++#ifdef RTE_LIB_BITRATESTATS
+ 	printf("  --bitrate-stats=N: set the logical core N to perform "
+ 		"bit-rate calculation.\n");
++#endif
++#ifdef RTE_LIB_LATENCYSTATS
++	printf("  --latencystats=N: enable latency and jitter statistics "
++	       "monitoring on forwarding lcore id N.\n");
++#endif
+ 	printf("  --print-event <unknown|intr_lsc|queue_state|intr_reset|vf_mbox|macsec|intr_rmv|flow_aged|err_recovering|recovery_success|recovery_failed|all>: "
+ 	       "enable print of designated event or all of them.\n");
+ 	printf("  --mask-event <unknown|intr_lsc|queue_state|intr_reset|vf_mbox|macsec|intr_rmv|flow_aged|err_recovering|recovery_success|recovery_failed||all>: "
+@@ -774,7 +776,7 @@ launch_args_parse(int argc, char** argv)
+ 				n = strtoul(optarg, &end, 10);
+ 				if ((optarg[0] == '\0') || (end == NULL) ||
+ 						(*end != '\0'))
+-					break;
++					rte_exit(EXIT_FAILURE, "Invalid stats-period value\n");
+ 
+ 				stats_period = n;
+ 				break;
+@@ -875,8 +877,8 @@ launch_args_parse(int argc, char** argv)
+ 			}
+ 			if (!strcmp(lgopts[opt_idx].name, "nb-cores")) {
+ 				n = atoi(optarg);
+-				if (n > 0 && n <= nb_lcores)
+-					nb_fwd_lcores = (uint8_t) n;
++				if (n > 0 && (lcoreid_t)n <= nb_lcores)
++					nb_fwd_lcores = (lcoreid_t) n;
+ 				else
+ 					rte_exit(EXIT_FAILURE,
+ 						 "nb-cores should be > 0 and <= %d\n",
+@@ -1126,7 +1128,9 @@ launch_args_parse(int argc, char** argv)
+ 								0,
+ 								&dev_info);
+ 					if (ret != 0)
+-						return;
++						rte_exit(EXIT_FAILURE, "Failed to get driver "
++							"recommended burst size, please provide a "
++							"value between 1 and %d\n", MAX_PKT_BURST);
+ 
+ 					rec_nb_pkts = dev_info
+ 						.default_rxportconf.burst_size;
+@@ -1493,7 +1497,7 @@ launch_args_parse(int argc, char** argv)
+ 			break;
+ 		default:
+ 			usage(argv[0]);
+-			fprintf(stderr, "Invalid option: %s\n", argv[optind]);
++			fprintf(stderr, "Invalid option: %s\n", argv[optind - 1]);
+ 			rte_exit(EXIT_FAILURE,
+ 				 "Command line is incomplete or incorrect\n");
+ 			break;
+diff --git a/dpdk/app/test-pmd/testpmd.h b/dpdk/app/test-pmd/testpmd.h
+index 9b10a9ea1c..db6d0d35f4 100644
+--- a/dpdk/app/test-pmd/testpmd.h
++++ b/dpdk/app/test-pmd/testpmd.h
+@@ -84,7 +84,7 @@ extern volatile uint8_t f_quit;
+ /* Maximum number of pools supported per Rx queue */
+ #define MAX_MEMPOOL 8
+ 
+-typedef uint8_t  lcoreid_t;
++typedef uint32_t lcoreid_t;
+ typedef uint16_t portid_t;
+ typedef uint16_t queueid_t;
+ typedef uint16_t streamid_t;
+@@ -996,6 +996,7 @@ int port_queue_flow_update(portid_t port_id, queueid_t queue_id,
+ 			   const struct rte_flow_action *actions);
+ int port_queue_action_handle_create(portid_t port_id, uint32_t queue_id,
+ 			bool postpone, uint32_t id,
++			bool indirect_list,
+ 			const struct rte_flow_indir_action_conf *conf,
+ 			const struct rte_flow_action *action);
+ int port_queue_action_handle_destroy(portid_t port_id,
+diff --git a/dpdk/app/test/process.h b/dpdk/app/test/process.h
+index af7bc3e0de..9fb2bf481c 100644
+--- a/dpdk/app/test/process.h
++++ b/dpdk/app/test/process.h
+@@ -17,6 +17,7 @@
+ #include <dirent.h>
+ 
+ #include <rte_string_fns.h> /* strlcpy */
++#include <rte_devargs.h>
+ 
+ #ifdef RTE_EXEC_ENV_FREEBSD
+ #define self "curproc"
+@@ -34,6 +35,34 @@ extern uint16_t flag_for_send_pkts;
+ #endif
+ #endif
+ 
++#define PREFIX_ALLOW "--allow="
++
++static int
++add_parameter_allow(char **argv, int max_capacity)
++{
++	struct rte_devargs *devargs;
++	int count = 0;
++
++	RTE_EAL_DEVARGS_FOREACH(NULL, devargs) {
++		if (strlen(devargs->name) == 0)
++			continue;
++
++		if (devargs->data == NULL || strlen(devargs->data) == 0) {
++			if (asprintf(&argv[count], PREFIX_ALLOW"%s", devargs->name) < 0)
++				break;
++		} else {
++			if (asprintf(&argv[count], PREFIX_ALLOW"%s,%s",
++					 devargs->name, devargs->data) < 0)
++				break;
++		}
++
++		if (++count == max_capacity)
++			break;
++	}
++
++	return count;
++}
++
+ /*
+  * launches a second copy of the test process using the given argv parameters,
+  * which should include argv[0] as the process name. To identify in the
+@@ -43,8 +72,10 @@ extern uint16_t flag_for_send_pkts;
+ static inline int
+ process_dup(const char *const argv[], int numargs, const char *env_value)
+ {
+-	int num;
+-	char *argv_cpy[numargs + 1];
++	int num = 0;
++	char **argv_cpy;
++	int allow_num;
++	int argv_num;
+ 	int i, status;
+ 	char path[32];
+ #ifdef RTE_LIB_PDUMP
+@@ -58,11 +89,21 @@ process_dup(const char *const argv[], int numargs, const char *env_value)
+ 	if (pid < 0)
+ 		return -1;
+ 	else if (pid == 0) {
++		allow_num = rte_devargs_type_count(RTE_DEVTYPE_ALLOWED);
++		argv_num = numargs + allow_num + 1;
++		argv_cpy = calloc(argv_num, sizeof(char *));
++		if (!argv_cpy)
++			rte_panic("Memory allocation failed\n");
++
+ 		/* make a copy of the arguments to be passed to exec */
+-		for (i = 0; i < numargs; i++)
++		for (i = 0; i < numargs; i++) {
+ 			argv_cpy[i] = strdup(argv[i]);
+-		argv_cpy[i] = NULL;
+-		num = numargs;
++			if (argv_cpy[i] == NULL)
++				rte_panic("Error dup args\n");
++		}
++		if (allow_num > 0)
++			num = add_parameter_allow(&argv_cpy[i], allow_num);
++		num += numargs;
+ 
+ #ifdef RTE_EXEC_ENV_LINUX
+ 		{
+diff --git a/dpdk/app/test/test.c b/dpdk/app/test/test.c
+index bfa9ea52e3..7b882a59de 100644
+--- a/dpdk/app/test/test.c
++++ b/dpdk/app/test/test.c
+@@ -375,11 +375,13 @@ unit_test_suite_runner(struct unit_test_suite *suite)
+ 
+ 			if (test_success == TEST_SUCCESS)
+ 				suite->succeeded++;
+-			else if (test_success == TEST_SKIPPED)
++			else if (test_success == TEST_SKIPPED) {
+ 				suite->skipped++;
+-			else if (test_success == -ENOTSUP)
++				suite->executed--;
++			} else if (test_success == -ENOTSUP) {
+ 				suite->unsupported++;
+-			else
++				suite->executed--;
++			} else
+ 				suite->failed++;
+ 		} else if (test_success == -ENOTSUP) {
+ 			suite->unsupported++;
+diff --git a/dpdk/app/test/test_cfgfile.c b/dpdk/app/test/test_cfgfile.c
+index 2f596affee..a5e3d8699c 100644
+--- a/dpdk/app/test/test_cfgfile.c
++++ b/dpdk/app/test/test_cfgfile.c
+@@ -168,7 +168,7 @@ test_cfgfile_invalid_section_header(void)
+ 	struct rte_cfgfile *cfgfile;
+ 
+ 	cfgfile = rte_cfgfile_load(CFG_FILES_ETC "/invalid_section.ini", 0);
+-	TEST_ASSERT_NULL(cfgfile, "Expected failured did not occur");
++	TEST_ASSERT_NULL(cfgfile, "Expected failure did not occur");
+ 
+ 	return 0;
+ }
+@@ -185,7 +185,7 @@ test_cfgfile_invalid_comment(void)
+ 
+ 	cfgfile = rte_cfgfile_load_with_params(CFG_FILES_ETC "/sample2.ini", 0,
+ 					       &params);
+-	TEST_ASSERT_NULL(cfgfile, "Expected failured did not occur");
++	TEST_ASSERT_NULL(cfgfile, "Expected failure did not occur");
+ 
+ 	return 0;
+ }
+@@ -196,7 +196,7 @@ test_cfgfile_invalid_key_value_pair(void)
+ 	struct rte_cfgfile *cfgfile;
+ 
+ 	cfgfile = rte_cfgfile_load(CFG_FILES_ETC "/empty_key_value.ini", 0);
+-	TEST_ASSERT_NULL(cfgfile, "Expected failured did not occur");
++	TEST_ASSERT_NULL(cfgfile, "Expected failure did not occur");
+ 
+ 	return 0;
+ }
+@@ -236,7 +236,7 @@ test_cfgfile_missing_section(void)
+ 	struct rte_cfgfile *cfgfile;
+ 
+ 	cfgfile = rte_cfgfile_load(CFG_FILES_ETC "/missing_section.ini", 0);
+-	TEST_ASSERT_NULL(cfgfile, "Expected failured did not occur");
++	TEST_ASSERT_NULL(cfgfile, "Expected failure did not occur");
+ 
+ 	return 0;
+ }
+diff --git a/dpdk/app/test/test_cryptodev.c b/dpdk/app/test/test_cryptodev.c
+index 58561ededf..6cd38aefae 100644
+--- a/dpdk/app/test/test_cryptodev.c
++++ b/dpdk/app/test/test_cryptodev.c
+@@ -196,6 +196,8 @@ post_process_raw_dp_op(void *user_data,	uint32_t index __rte_unused,
+ static struct crypto_testsuite_params testsuite_params = { NULL };
+ struct crypto_testsuite_params *p_testsuite_params = &testsuite_params;
+ static struct crypto_unittest_params unittest_params;
++static bool enq_cb_called;
++static bool deq_cb_called;
+ 
+ int
+ process_sym_raw_dp_op(uint8_t dev_id, uint16_t qp_id,
+@@ -9121,7 +9123,7 @@ static int test_pdcp_proto(int i, int oop, enum rte_crypto_cipher_operation opc,
+ 	/* Out of place support */
+ 	if (oop) {
+ 		/*
+-		 * For out-op-place we need to alloc another mbuf
++		 * For out-of-place we need to alloc another mbuf
+ 		 */
+ 		ut_params->obuf = rte_pktmbuf_alloc(ts_params->mbuf_pool);
+ 		rte_pktmbuf_append(ut_params->obuf, output_vec_len);
+@@ -9330,7 +9332,7 @@ test_pdcp_proto_SGL(int i, int oop,
+ 	/* Out of place support */
+ 	if (oop) {
+ 		/*
+-		 * For out-op-place we need to alloc another mbuf
++		 * For out-of-place we need to alloc another mbuf
+ 		 */
+ 		ut_params->obuf = rte_pktmbuf_alloc(ts_params->mbuf_pool);
+ 		rte_pktmbuf_append(ut_params->obuf, frag_size_oop);
+@@ -13540,6 +13542,7 @@ test_enq_callback(uint16_t dev_id, uint16_t qp_id, struct rte_crypto_op **ops,
+ 	RTE_SET_USED(ops);
+ 	RTE_SET_USED(user_param);
+ 
++	enq_cb_called = true;
+ 	printf("crypto enqueue callback called\n");
+ 	return nb_ops;
+ }
+@@ -13553,21 +13556,58 @@ test_deq_callback(uint16_t dev_id, uint16_t qp_id, struct rte_crypto_op **ops,
+ 	RTE_SET_USED(ops);
+ 	RTE_SET_USED(user_param);
+ 
++	deq_cb_called = true;
+ 	printf("crypto dequeue callback called\n");
+ 	return nb_ops;
+ }
+ 
+ /*
+- * Thread using enqueue/dequeue callback with RCU.
++ * Process enqueue/dequeue NULL crypto request to verify callback with RCU.
+  */
+ static int
+-test_enqdeq_callback_thread(void *arg)
++test_enqdeq_callback_null_cipher(void)
+ {
+-	RTE_SET_USED(arg);
+-	/* DP thread calls rte_cryptodev_enqueue_burst()/
+-	 * rte_cryptodev_dequeue_burst() and invokes callback.
+-	 */
+-	test_null_burst_operation();
++	struct crypto_testsuite_params *ts_params = &testsuite_params;
++	struct crypto_unittest_params *ut_params = &unittest_params;
++
++	/* Setup Cipher Parameters */
++	ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
++	ut_params->cipher_xform.next = &ut_params->auth_xform;
++
++	ut_params->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_NULL;
++	ut_params->cipher_xform.cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
++
++	/* Setup Auth Parameters */
++	ut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
++	ut_params->auth_xform.next = NULL;
++
++	ut_params->auth_xform.auth.algo = RTE_CRYPTO_AUTH_NULL;
++	ut_params->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
++
++	/* Create Crypto session */
++	ut_params->sess = rte_cryptodev_sym_session_create(ts_params->valid_devs[0],
++				&ut_params->auth_xform, ts_params->session_mpool);
++	TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
++
++	ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool, RTE_CRYPTO_OP_TYPE_SYMMETRIC);
++	TEST_ASSERT_NOT_NULL(ut_params->op, "Failed to allocate symmetric crypto op");
++
++	/* Allocate mbuf */
++	ut_params->ibuf = rte_pktmbuf_alloc(ts_params->mbuf_pool);
++	TEST_ASSERT_NOT_NULL(ut_params->ibuf, "Failed to allocate mbuf");
++
++	/* Append some random data */
++	TEST_ASSERT_NOT_NULL(rte_pktmbuf_append(ut_params->ibuf, sizeof(unsigned int)),
++			"no room to append data");
++
++	rte_crypto_op_attach_sym_session(ut_params->op, ut_params->sess);
++
++	ut_params->op->sym->m_src = ut_params->ibuf;
++
++	/* Process crypto operation */
++	TEST_ASSERT_NOT_NULL(process_crypto_request(ts_params->valid_devs[0], ut_params->op),
++			"failed to process sym crypto op");
++
+ 	return 0;
+ }
+ 
+@@ -13575,6 +13615,7 @@ static int
+ test_enq_callback_setup(void)
+ {
+ 	struct crypto_testsuite_params *ts_params = &testsuite_params;
++	struct rte_cryptodev_sym_capability_idx cap_idx;
+ 	struct rte_cryptodev_info dev_info;
+ 	struct rte_cryptodev_qp_conf qp_conf = {
+ 		.nb_descriptors = MAX_NUM_OPS_INFLIGHT
+@@ -13582,6 +13623,19 @@ test_enq_callback_setup(void)
+ 
+ 	struct rte_cryptodev_cb *cb;
+ 	uint16_t qp_id = 0;
++	int j = 0;
++
++	/* Verify the crypto capabilities for which enqueue/dequeue is done. */
++	cap_idx.type = RTE_CRYPTO_SYM_XFORM_AUTH;
++	cap_idx.algo.auth = RTE_CRYPTO_AUTH_NULL;
++	if (rte_cryptodev_sym_capability_get(ts_params->valid_devs[0],
++			&cap_idx) == NULL)
++		return TEST_SKIPPED;
++	cap_idx.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
++	cap_idx.algo.cipher = RTE_CRYPTO_CIPHER_NULL;
++	if (rte_cryptodev_sym_capability_get(ts_params->valid_devs[0],
++			&cap_idx) == NULL)
++		return TEST_SKIPPED;
+ 
+ 	/* Stop the device in case it's started so it can be configured */
+ 	rte_cryptodev_stop(ts_params->valid_devs[0]);
+@@ -13605,9 +13659,16 @@ test_enq_callback_setup(void)
+ 			qp_conf.nb_descriptors, qp_id,
+ 			ts_params->valid_devs[0]);
+ 
++	enq_cb_called = false;
+ 	/* Test with invalid crypto device */
+ 	cb = rte_cryptodev_add_enq_callback(RTE_CRYPTO_MAX_DEVS,
+ 			qp_id, test_enq_callback, NULL);
++	if (rte_errno == ENOTSUP) {
++		RTE_LOG(ERR, USER1, "%s line %d: "
++			"rte_cryptodev_add_enq_callback() "
++			"Not supported, skipped\n", __func__, __LINE__);
++		return TEST_SKIPPED;
++	}
+ 	TEST_ASSERT_NULL(cb, "Add callback on qp %u on "
+ 			"cryptodev %u did not fail",
+ 			qp_id, RTE_CRYPTO_MAX_DEVS);
+@@ -13637,12 +13698,11 @@ test_enq_callback_setup(void)
+ 
+ 	rte_cryptodev_start(ts_params->valid_devs[0]);
+ 
+-	/* Launch a thread */
+-	rte_eal_remote_launch(test_enqdeq_callback_thread, NULL,
+-				rte_get_next_lcore(-1, 1, 0));
++	TEST_ASSERT_SUCCESS(test_enqdeq_callback_null_cipher(), "Crypto Processing failed");
+ 
+-	/* Wait until reader exited. */
+-	rte_eal_mp_wait_lcore();
++	/* Wait until callback not called. */
++	while (!enq_cb_called && (j++ < 10))
++		rte_delay_ms(10);
+ 
+ 	/* Test with invalid crypto device */
+ 	TEST_ASSERT_FAIL(rte_cryptodev_remove_enq_callback(
+@@ -13667,6 +13727,8 @@ test_enq_callback_setup(void)
+ 			"qp %u on cryptodev %u",
+ 			qp_id, ts_params->valid_devs[0]);
+ 
++	TEST_ASSERT(deq_cb_called == true, "Crypto dequeue callback not called");
++
+ 	return TEST_SUCCESS;
+ }
+ 
+@@ -13674,6 +13736,7 @@ static int
+ test_deq_callback_setup(void)
+ {
+ 	struct crypto_testsuite_params *ts_params = &testsuite_params;
++	struct rte_cryptodev_sym_capability_idx cap_idx;
+ 	struct rte_cryptodev_info dev_info;
+ 	struct rte_cryptodev_qp_conf qp_conf = {
+ 		.nb_descriptors = MAX_NUM_OPS_INFLIGHT
+@@ -13681,6 +13744,19 @@ test_deq_callback_setup(void)
+ 
+ 	struct rte_cryptodev_cb *cb;
+ 	uint16_t qp_id = 0;
++	int j = 0;
++
++	/* Verify the crypto capabilities for which enqueue/dequeue is done. */
++	cap_idx.type = RTE_CRYPTO_SYM_XFORM_AUTH;
++	cap_idx.algo.auth = RTE_CRYPTO_AUTH_NULL;
++	if (rte_cryptodev_sym_capability_get(ts_params->valid_devs[0],
++			&cap_idx) == NULL)
++		return TEST_SKIPPED;
++	cap_idx.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
++	cap_idx.algo.cipher = RTE_CRYPTO_CIPHER_NULL;
++	if (rte_cryptodev_sym_capability_get(ts_params->valid_devs[0],
++			&cap_idx) == NULL)
++		return TEST_SKIPPED;
+ 
+ 	/* Stop the device in case it's started so it can be configured */
+ 	rte_cryptodev_stop(ts_params->valid_devs[0]);
+@@ -13704,9 +13780,16 @@ test_deq_callback_setup(void)
+ 			qp_conf.nb_descriptors, qp_id,
+ 			ts_params->valid_devs[0]);
+ 
++	deq_cb_called = false;
+ 	/* Test with invalid crypto device */
+ 	cb = rte_cryptodev_add_deq_callback(RTE_CRYPTO_MAX_DEVS,
+ 			qp_id, test_deq_callback, NULL);
++	if (rte_errno == ENOTSUP) {
++		RTE_LOG(ERR, USER1, "%s line %d: "
++			"rte_cryptodev_add_deq_callback() "
++			"Not supported, skipped\n", __func__, __LINE__);
++		return TEST_SKIPPED;
++	}
+ 	TEST_ASSERT_NULL(cb, "Add callback on qp %u on "
+ 			"cryptodev %u did not fail",
+ 			qp_id, RTE_CRYPTO_MAX_DEVS);
+@@ -13736,12 +13819,11 @@ test_deq_callback_setup(void)
+ 
+ 	rte_cryptodev_start(ts_params->valid_devs[0]);
+ 
+-	/* Launch a thread */
+-	rte_eal_remote_launch(test_enqdeq_callback_thread, NULL,
+-				rte_get_next_lcore(-1, 1, 0));
++	TEST_ASSERT_SUCCESS(test_enqdeq_callback_null_cipher(), "Crypto processing failed");
+ 
+-	/* Wait until reader exited. */
+-	rte_eal_mp_wait_lcore();
++	/* Wait until callback not called. */
++	while (!deq_cb_called && (j++ < 10))
++		rte_delay_ms(10);
+ 
+ 	/* Test with invalid crypto device */
+ 	TEST_ASSERT_FAIL(rte_cryptodev_remove_deq_callback(
+@@ -13766,6 +13848,8 @@ test_deq_callback_setup(void)
+ 			"qp %u on cryptodev %u",
+ 			qp_id, ts_params->valid_devs[0]);
+ 
++	TEST_ASSERT(enq_cb_called == true, "Crypto enqueue callback not called");
++
+ 	return TEST_SUCCESS;
+ }
+ 
+@@ -15486,7 +15570,7 @@ test_authenticated_encryption_SGL(const struct aead_test_data *tdata,
+ 	}
+ 
+ 	/*
+-	 * For out-op-place we need to alloc another mbuf
++	 * For out-of-place we need to alloc another mbuf
+ 	 */
+ 	if (oop) {
+ 		ut_params->obuf = rte_pktmbuf_alloc(ts_params->mbuf_pool);
+diff --git a/dpdk/app/test/test_cryptodev_asym.c b/dpdk/app/test/test_cryptodev_asym.c
+index 17daf734e8..3b57a0249b 100644
+--- a/dpdk/app/test/test_cryptodev_asym.c
++++ b/dpdk/app/test/test_cryptodev_asym.c
+@@ -547,8 +547,6 @@ ut_setup_asym(void)
+ 			qp_id, ts_params->valid_devs[0]);
+ 	}
+ 
+-	rte_cryptodev_stats_reset(ts_params->valid_devs[0]);
+-
+ 	/* Start the device */
+ 	TEST_ASSERT_SUCCESS(rte_cryptodev_start(ts_params->valid_devs[0]),
+ 						"Failed to start cryptodev %u",
+@@ -561,7 +559,6 @@ static void
+ ut_teardown_asym(void)
+ {
+ 	struct crypto_testsuite_params_asym *ts_params = &testsuite_params;
+-	struct rte_cryptodev_stats stats;
+ 	uint8_t dev_id = ts_params->valid_devs[0];
+ 
+ 	if (self->sess != NULL)
+@@ -571,8 +568,6 @@ ut_teardown_asym(void)
+ 	self->op = NULL;
+ 	self->result_op = NULL;
+ 
+-	rte_cryptodev_stats_get(ts_params->valid_devs[0], &stats);
+-
+ 	/* Stop the device */
+ 	rte_cryptodev_stop(ts_params->valid_devs[0]);
+ }
+@@ -631,7 +626,7 @@ test_capability(void)
+ 				RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO)) {
+ 		RTE_LOG(INFO, USER1,
+ 				"Device doesn't support asymmetric. Test Skipped\n");
+-		return TEST_SUCCESS;
++		return TEST_SKIPPED;
+ 	}
+ 
+ 	/* print xform capability */
+@@ -646,6 +641,7 @@ test_capability(void)
+ 			capa = rte_cryptodev_asym_capability_get(dev_id,
+ 				(const struct
+ 				rte_cryptodev_asym_capability_idx *) &idx);
++			TEST_ASSERT_NOT_NULL(capa, "Failed to get asymmetric capability");
+ 			print_asym_capa(capa);
+ 			}
+ 	}
+@@ -3200,6 +3196,32 @@ static int send_one(void)
+ 	return TEST_SUCCESS;
+ }
+ 
++static int
++modular_cmpeq(const uint8_t *a, size_t a_len, const uint8_t *b, size_t b_len)
++{
++	const uint8_t *new_a, *new_b;
++	size_t i, j;
++
++	/* Strip leading NUL bytes */
++	for (i = 0; i < a_len; i++)
++		if (a[i] != 0)
++			break;
++
++	for (j = 0; j < b_len; j++)
++		if (b[j] != 0)
++			break;
++
++	if (a_len - i != b_len - j)
++		return 1;
++
++	new_a = &a[i];
++	new_b = &b[j];
++	if (memcmp(new_a, new_b, a_len - i))
++		return 1;
++
++	return 0;
++}
++
+ static int
+ modular_exponentiation(const void *test_data)
+ {
+@@ -3234,9 +3256,9 @@ modular_exponentiation(const void *test_data)
+ 
+ 	TEST_ASSERT_SUCCESS(send_one(),
+ 		"Failed to process crypto op");
+-	TEST_ASSERT_BUFFERS_ARE_EQUAL(vector->reminder.data,
++	TEST_ASSERT_SUCCESS(modular_cmpeq(vector->reminder.data, vector->reminder.len,
+ 			self->result_op->asym->modex.result.data,
+-			self->result_op->asym->modex.result.length,
++			self->result_op->asym->modex.result.length),
+ 			"operation verification failed\n");
+ 
+ 	return TEST_SUCCESS;
+@@ -3292,11 +3314,8 @@ modular_multiplicative_inverse(const void *test_data)
+ 	arg.qt.coef.data = coef; \
+ 	arg.qt.coef.length = vector->coef.len
+ 
+-typedef void (*rsa_key_init_t)(struct rte_crypto_asym_xform *,
+-	const struct rsa_test_data_2 *);
+-
+ static int
+-RSA_Encrypt(const struct rsa_test_data_2 *vector, uint8_t *cipher_buf)
++rsa_encrypt(const struct rsa_test_data_2 *vector, uint8_t *cipher_buf)
+ {
+ 	self->result_op = NULL;
+ 	/* Compute encryption on the test vector */
+@@ -3314,7 +3333,7 @@ RSA_Encrypt(const struct rsa_test_data_2 *vector, uint8_t *cipher_buf)
+ }
+ 
+ static int
+-RSA_Decrypt(const struct rsa_test_data_2 *vector, uint8_t *plaintext,
++rsa_decrypt(const struct rsa_test_data_2 *vector, uint8_t *plaintext,
+ 		const int use_op)
+ {
+ 	uint8_t cipher[TEST_DATA_SIZE] = { 0 };
+@@ -3335,41 +3354,14 @@ RSA_Decrypt(const struct rsa_test_data_2 *vector, uint8_t *plaintext,
+ 	return 0;
+ }
+ 
+-static void
+-RSA_key_init_Exp(struct rte_crypto_asym_xform *xform,
+-		const struct rsa_test_data_2 *vector)
+-{
+-	SET_RSA_PARAM(xform->rsa, vector, n);
+-	SET_RSA_PARAM(xform->rsa, vector, e);
+-	SET_RSA_PARAM(xform->rsa, vector, d);
+-	xform->rsa.key_type = RTE_RSA_KEY_TYPE_EXP;
+-}
+-
+-static void
+-RSA_key_init_CRT(struct rte_crypto_asym_xform *xform,
+-		const struct rsa_test_data_2 *vector)
+-{
+-	SET_RSA_PARAM(xform->rsa, vector, n);
+-	SET_RSA_PARAM(xform->rsa, vector, e);
+-	SET_RSA_PARAM_QT(xform->rsa, vector, p);
+-	SET_RSA_PARAM_QT(xform->rsa, vector, q);
+-	SET_RSA_PARAM_QT(xform->rsa, vector, dP);
+-	SET_RSA_PARAM_QT(xform->rsa, vector, dQ);
+-	SET_RSA_PARAM_QT(xform->rsa, vector, qInv);
+-	xform->rsa.key_type = RTE_RSA_KEY_TYPE_QT;
+-}
+-
+ static int
+-RSA_Init_Session(const struct rsa_test_data_2 *vector,
+-	rsa_key_init_t key_init)
++rsa_init_session(struct rte_crypto_asym_xform *xform)
+ {
+ 	const uint8_t dev_id = params->valid_devs[0];
+ 	struct rte_cryptodev_info dev_info;
+-	struct rte_crypto_asym_xform xform = { };
+ 	int ret = 0;
+ 
+-	key_init(&xform, vector);
+-	xform.xform_type = RTE_CRYPTO_ASYM_XFORM_RSA;
++	xform->xform_type = RTE_CRYPTO_ASYM_XFORM_RSA;
+ 
+ 	rte_cryptodev_info_get(dev_id, &dev_info);
+ 	if (!(dev_info.feature_flags & RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_QT)) {
+@@ -3377,7 +3369,7 @@ RSA_Init_Session(const struct rsa_test_data_2 *vector,
+ 			"Device doesn't support decrypt op with quintuple key type. Test skipped\n");
+ 		return TEST_SKIPPED;
+ 	}
+-	ret = rte_cryptodev_asym_session_create(dev_id, &xform,
++	ret = rte_cryptodev_asym_session_create(dev_id, xform,
+ 		params->session_mpool, &self->sess);
+ 	if (ret < 0) {
+ 		RTE_LOG(ERR, USER1,
+@@ -3388,17 +3380,23 @@ RSA_Init_Session(const struct rsa_test_data_2 *vector,
+ }
+ 
+ static int
+-KAT_RSA_Encrypt(const void *data)
++kat_rsa_encrypt(const void *data)
+ {
+ 	uint8_t cipher_buf[TEST_DATA_SIZE] = {0};
+ 	const struct rsa_test_data_2 *vector = data;
+-	int ret = RSA_Init_Session(vector, RSA_key_init_Exp);
++	struct rte_crypto_asym_xform xform = { };
++
++	SET_RSA_PARAM(xform.rsa, vector, n);
++	SET_RSA_PARAM(xform.rsa, vector, e);
++	SET_RSA_PARAM(xform.rsa, vector, d);
++	xform.rsa.key_type = RTE_RSA_KEY_TYPE_EXP;
++	int ret = rsa_init_session(&xform);
+ 
+ 	if (ret) {
+ 		RTE_LOG(ERR, USER1, "Failed to init session for RSA\n");
+ 		return ret;
+ 	}
+-	TEST_ASSERT_SUCCESS(RSA_Encrypt(vector, cipher_buf),
++	TEST_ASSERT_SUCCESS(rsa_encrypt(vector, cipher_buf),
+ 		"RSA: Failed to encrypt");
+ 	TEST_ASSERT_BUFFERS_ARE_EQUAL(vector->cipher.data,
+ 		self->result_op->asym->rsa.cipher.data,
+@@ -3408,17 +3406,26 @@ KAT_RSA_Encrypt(const void *data)
+ }
+ 
+ static int
+-KAT_RSA_Encrypt_CRT(const void *data)
++kat_rsa_encrypt_crt(const void *data)
+ {
+ 	uint8_t cipher_buf[TEST_DATA_SIZE] = {0};
+ 	const struct rsa_test_data_2 *vector = data;
+-	int ret = RSA_Init_Session(vector, RSA_key_init_CRT);
++	struct rte_crypto_asym_xform xform = { };
+ 
++	SET_RSA_PARAM(xform.rsa, vector, n);
++	SET_RSA_PARAM(xform.rsa, vector, e);
++	SET_RSA_PARAM_QT(xform.rsa, vector, p);
++	SET_RSA_PARAM_QT(xform.rsa, vector, q);
++	SET_RSA_PARAM_QT(xform.rsa, vector, dP);
++	SET_RSA_PARAM_QT(xform.rsa, vector, dQ);
++	SET_RSA_PARAM_QT(xform.rsa, vector, qInv);
++	xform.rsa.key_type = RTE_RSA_KEY_TYPE_QT;
++	int ret = rsa_init_session(&xform);
+ 	if (ret) {
+ 		RTE_LOG(ERR, USER1, "Failed to init session for RSA\n");
+ 		return ret;
+ 	}
+-	TEST_ASSERT_SUCCESS(RSA_Encrypt(vector, cipher_buf),
++	TEST_ASSERT_SUCCESS(rsa_encrypt(vector, cipher_buf),
+ 		"RSA: Failed to encrypt");
+ 	TEST_ASSERT_BUFFERS_ARE_EQUAL(vector->cipher.data,
+ 		self->result_op->asym->rsa.cipher.data,
+@@ -3428,17 +3435,23 @@ KAT_RSA_Encrypt_CRT(const void *data)
+ }
+ 
+ static int
+-KAT_RSA_Decrypt(const void *data)
++kat_rsa_decrypt(const void *data)
+ {
+ 	uint8_t message[TEST_DATA_SIZE] = {0};
+ 	const struct rsa_test_data_2 *vector = data;
+-	int ret = RSA_Init_Session(vector, RSA_key_init_Exp);
++	struct rte_crypto_asym_xform xform = { };
++
++	SET_RSA_PARAM(xform.rsa, vector, n);
++	SET_RSA_PARAM(xform.rsa, vector, e);
++	SET_RSA_PARAM(xform.rsa, vector, d);
++	xform.rsa.key_type = RTE_RSA_KEY_TYPE_EXP;
++	int ret = rsa_init_session(&xform);
+ 
+ 	if (ret) {
+ 		RTE_LOG(ERR, USER1, "Failed to init session for RSA\n");
+ 		return ret;
+ 	}
+-	TEST_ASSERT_SUCCESS(RSA_Decrypt(vector, message, 0),
++	TEST_ASSERT_SUCCESS(rsa_decrypt(vector, message, 0),
+ 		"RSA: Failed to encrypt");
+ 	TEST_ASSERT_BUFFERS_ARE_EQUAL(vector->message.data,
+ 		self->result_op->asym->rsa.message.data,
+@@ -3448,17 +3461,26 @@ KAT_RSA_Decrypt(const void *data)
+ }
+ 
+ static int
+-KAT_RSA_Decrypt_CRT(const void *data)
++kat_rsa_decrypt_crt(const void *data)
+ {
+ 	uint8_t message[TEST_DATA_SIZE] = {0};
+ 	const struct rsa_test_data_2 *vector = data;
+-	int ret = RSA_Init_Session(vector, RSA_key_init_CRT);
++	struct rte_crypto_asym_xform xform = { };
+ 
++	SET_RSA_PARAM(xform.rsa, vector, n);
++	SET_RSA_PARAM(xform.rsa, vector, e);
++	SET_RSA_PARAM_QT(xform.rsa, vector, p);
++	SET_RSA_PARAM_QT(xform.rsa, vector, q);
++	SET_RSA_PARAM_QT(xform.rsa, vector, dP);
++	SET_RSA_PARAM_QT(xform.rsa, vector, dQ);
++	SET_RSA_PARAM_QT(xform.rsa, vector, qInv);
++	xform.rsa.key_type = RTE_RSA_KEY_TYPE_QT;
++	int ret = rsa_init_session(&xform);
+ 	if (ret) {
+ 		RTE_LOG(ERR, USER1, "Failed to init session for RSA\n");
+ 		return ret;
+ 	}
+-	TEST_ASSERT_SUCCESS(RSA_Decrypt(vector, message, 0),
++	TEST_ASSERT_SUCCESS(rsa_decrypt(vector, message, 0),
+ 		"RSA: Failed to encrypt");
+ 	TEST_ASSERT_BUFFERS_ARE_EQUAL(vector->message.data,
+ 		self->result_op->asym->rsa.message.data,
+@@ -3535,20 +3557,20 @@ static struct unit_test_suite cryptodev_qat_asym_testsuite  = {
+ 		TEST_CASE_NAMED_WITH_DATA(
+ 			"RSA Encryption (n=128, pt=20, e=3) EXP, Padding: NONE",
+ 			ut_setup_asym, ut_teardown_asym,
+-			KAT_RSA_Encrypt, &RSA_vector_128_20_3_None),
++			kat_rsa_encrypt, &rsa_vector_128_20_3_none),
+ 		TEST_CASE_NAMED_WITH_DATA(
+ 			"RSA Decryption (n=128, pt=20, e=3) EXP, Padding: NONE",
+ 			ut_setup_asym, ut_teardown_asym,
+-			KAT_RSA_Decrypt, &RSA_vector_128_20_3_None),
++			kat_rsa_decrypt, &rsa_vector_128_20_3_none),
+ 		/* RSA CRT */
+ 		TEST_CASE_NAMED_WITH_DATA(
+ 			"RSA Encryption (n=128, pt=20, e=3) CRT, Padding: NONE",
+ 			ut_setup_asym, ut_teardown_asym,
+-			KAT_RSA_Encrypt_CRT, &RSA_vector_128_20_3_None),
++			kat_rsa_encrypt_crt, &rsa_vector_128_20_3_none),
+ 		TEST_CASE_NAMED_WITH_DATA(
+ 			"RSA Decryption (n=128, pt=20, e=3) CRT, Padding: NONE",
+ 			ut_setup_asym, ut_teardown_asym,
+-			KAT_RSA_Decrypt_CRT, &RSA_vector_128_20_3_None),
++			kat_rsa_decrypt_crt, &rsa_vector_128_20_3_none),
+ 		TEST_CASES_END() /**< NULL terminate unit test array */
+ 	}
+ };
+diff --git a/dpdk/app/test/test_cryptodev_rsa_test_vectors.h b/dpdk/app/test/test_cryptodev_rsa_test_vectors.h
+index b4982014a2..89981f13f0 100644
+--- a/dpdk/app/test/test_cryptodev_rsa_test_vectors.h
++++ b/dpdk/app/test/test_cryptodev_rsa_test_vectors.h
+@@ -72,7 +72,7 @@ struct rsa_test_data_2 {
+ };
+ 
+ static const struct
+-rsa_test_data_2 RSA_vector_128_20_3_None = {
++rsa_test_data_2 rsa_vector_128_20_3_none = {
+ 	.description =
+ 		"RSA Encryption Decryption (n=128, pt=20, e=3) EXP, QT",
+ 	.xform_type = RTE_CRYPTO_ASYM_XFORM_RSA,
+diff --git a/dpdk/app/test/test_event_eth_tx_adapter.c b/dpdk/app/test/test_event_eth_tx_adapter.c
+index dbd22f6800..482b8e69e3 100644
+--- a/dpdk/app/test/test_event_eth_tx_adapter.c
++++ b/dpdk/app/test/test_event_eth_tx_adapter.c
+@@ -484,6 +484,10 @@ tx_adapter_service(void)
+ 	int internal_port;
+ 	uint32_t cap;
+ 
++	/* Initialize mbufs */
++	for (i = 0; i < RING_SIZE; i++)
++		rte_pktmbuf_reset(&bufs[i]);
++
+ 	memset(&dev_conf, 0, sizeof(dev_conf));
+ 	err = rte_event_eth_tx_adapter_caps_get(TEST_DEV_ID, TEST_ETHDEV_ID,
+ 						&cap);
+diff --git a/dpdk/app/test/test_eventdev.c b/dpdk/app/test/test_eventdev.c
+index 71de947ce4..e4e234dc98 100644
+--- a/dpdk/app/test/test_eventdev.c
++++ b/dpdk/app/test/test_eventdev.c
+@@ -33,9 +33,15 @@ testsuite_setup(void)
+ 	uint8_t count;
+ 	count = rte_event_dev_count();
+ 	if (!count) {
++		int ret;
++
+ 		printf("Failed to find a valid event device,"
+-			" testing with event_skeleton device\n");
+-		return rte_vdev_init("event_skeleton", NULL);
++			" trying with event_skeleton device\n");
++		ret = rte_vdev_init("event_skeleton", NULL);
++		if (ret != 0) {
++			printf("No event device, skipping\n");
++			return TEST_SKIPPED;
++		}
+ 	}
+ 	return TEST_SUCCESS;
+ }
+diff --git a/dpdk/app/test/test_fbarray.c b/dpdk/app/test/test_fbarray.c
+index 26a51e2a3e..09f6907fb1 100644
+--- a/dpdk/app/test/test_fbarray.c
++++ b/dpdk/app/test/test_fbarray.c
+@@ -21,23 +21,41 @@ struct fbarray_testsuite_params {
+ };
+ 
+ static struct fbarray_testsuite_params param;
++static struct fbarray_testsuite_params unaligned;
+ 
+ #define FBARRAY_TEST_ARR_NAME "fbarray_autotest"
+ #define FBARRAY_TEST_LEN 256
++#define FBARRAY_UNALIGNED_TEST_ARR_NAME "fbarray_unaligned_autotest"
++#define FBARRAY_UNALIGNED_TEST_LEN 60
+ #define FBARRAY_TEST_ELT_SZ (sizeof(int))
+ 
+ static int autotest_setup(void)
+ {
+-	return rte_fbarray_init(&param.arr, FBARRAY_TEST_ARR_NAME,
++	int ret;
++
++	ret = rte_fbarray_init(&param.arr, FBARRAY_TEST_ARR_NAME,
+ 			FBARRAY_TEST_LEN, FBARRAY_TEST_ELT_SZ);
++	if (ret) {
++		printf("Failed to initialize test array\n");
++		return -1;
++	}
++	ret = rte_fbarray_init(&unaligned.arr, FBARRAY_UNALIGNED_TEST_ARR_NAME,
++			FBARRAY_UNALIGNED_TEST_LEN, FBARRAY_TEST_ELT_SZ);
++	if (ret) {
++		printf("Failed to initialize unaligned test array\n");
++		rte_fbarray_destroy(&param.arr);
++		return -1;
++	}
++	return 0;
+ }
+ 
+ static void autotest_teardown(void)
+ {
+ 	rte_fbarray_destroy(&param.arr);
++	rte_fbarray_destroy(&unaligned.arr);
+ }
+ 
+-static int init_array(void)
++static int init_aligned(void)
+ {
+ 	int i;
+ 	for (i = param.start; i <= param.end; i++) {
+@@ -47,11 +65,35 @@ static int init_array(void)
+ 	return 0;
+ }
+ 
+-static void reset_array(void)
++static int init_unaligned(void)
++{
++	int i;
++	for (i = unaligned.start; i <= unaligned.end; i++) {
++		if (rte_fbarray_set_used(&unaligned.arr, i))
++			return -1;
++	}
++	return 0;
++}
++
++static void reset_aligned(void)
+ {
+ 	int i;
+ 	for (i = 0; i < FBARRAY_TEST_LEN; i++)
+ 		rte_fbarray_set_free(&param.arr, i);
++	/* reset param as well */
++	param.start = -1;
++	param.end = -1;
++}
++
++static void reset_unaligned(void)
++{
++	int i;
++	for (i = 0; i < FBARRAY_UNALIGNED_TEST_LEN; i++)
++		rte_fbarray_set_free(&unaligned.arr, i);
++	/* reset param as well */
++	unaligned.start = -1;
++	unaligned.end = -1;
++
+ }
+ 
+ static int first_msk_test_setup(void)
+@@ -59,7 +101,7 @@ static int first_msk_test_setup(void)
+ 	/* put all within first mask */
+ 	param.start = 3;
+ 	param.end = 10;
+-	return init_array();
++	return init_aligned();
+ }
+ 
+ static int cross_msk_test_setup(void)
+@@ -67,7 +109,7 @@ static int cross_msk_test_setup(void)
+ 	/* put all within second and third mask */
+ 	param.start = 70;
+ 	param.end = 160;
+-	return init_array();
++	return init_aligned();
+ }
+ 
+ static int multi_msk_test_setup(void)
+@@ -75,7 +117,7 @@ static int multi_msk_test_setup(void)
+ 	/* put all within first and last mask */
+ 	param.start = 3;
+ 	param.end = FBARRAY_TEST_LEN - 20;
+-	return init_array();
++	return init_aligned();
+ }
+ 
+ static int last_msk_test_setup(void)
+@@ -83,7 +125,7 @@ static int last_msk_test_setup(void)
+ 	/* put all within last mask */
+ 	param.start = FBARRAY_TEST_LEN - 20;
+ 	param.end = FBARRAY_TEST_LEN - 1;
+-	return init_array();
++	return init_aligned();
+ }
+ 
+ static int full_msk_test_setup(void)
+@@ -91,16 +133,31 @@ static int full_msk_test_setup(void)
+ 	/* fill entire mask */
+ 	param.start = 0;
+ 	param.end = FBARRAY_TEST_LEN - 1;
+-	return init_array();
++	return init_aligned();
+ }
+ 
+-static int empty_msk_test_setup(void)
++static int lookahead_test_setup(void)
+ {
+-	/* do not fill anything in */
+-	reset_array();
+-	param.start = -1;
+-	param.end = -1;
+-	return 0;
++	/* set index 64 as used */
++	param.start = 64;
++	param.end = 64;
++	return init_aligned();
++}
++
++static int lookbehind_test_setup(void)
++{
++	/* set index 63 as used */
++	param.start = 63;
++	param.end = 63;
++	return init_aligned();
++}
++
++static int unaligned_test_setup(void)
++{
++	unaligned.start = 0;
++	/* leave one free bit at the end */
++	unaligned.end = FBARRAY_UNALIGNED_TEST_LEN - 2;
++	return init_unaligned();
+ }
+ 
+ static int test_invalid(void)
+@@ -454,7 +511,7 @@ static int test_basic(void)
+ 	if (check_free())
+ 		return TEST_FAILED;
+ 
+-	reset_array();
++	reset_aligned();
+ 
+ 	return TEST_SUCCESS;
+ }
+@@ -697,6 +754,26 @@ static int test_find(void)
+ 	return TEST_SUCCESS;
+ }
+ 
++static int test_find_unaligned(void)
++{
++	TEST_ASSERT_EQUAL((int)unaligned.arr.count, unaligned.end - unaligned.start + 1,
++			"Wrong element count\n");
++	/* ensure space is free before start */
++	if (ensure_correct(&unaligned.arr, 0, unaligned.start - 1, false))
++		return TEST_FAILED;
++	/* ensure space is occupied where it's supposed to be */
++	if (ensure_correct(&unaligned.arr, unaligned.start, unaligned.end, true))
++		return TEST_FAILED;
++	/* ensure space after end is free as well */
++	if (ensure_correct(&unaligned.arr, unaligned.end + 1, FBARRAY_UNALIGNED_TEST_LEN - 1,
++			false))
++		return TEST_FAILED;
++	/* test if find_biggest API's work correctly */
++	if (test_biggest(&unaligned.arr, unaligned.start, unaligned.end))
++		return TEST_FAILED;
++	return TEST_SUCCESS;
++}
++
+ static int test_empty(void)
+ {
+ 	TEST_ASSERT_EQUAL((int)param.arr.count, 0, "Wrong element count\n");
+@@ -709,6 +786,87 @@ static int test_empty(void)
+ 	return TEST_SUCCESS;
+ }
+ 
++static int test_lookahead(void)
++{
++	int ret;
++
++	/* run regular test first */
++	ret = test_find();
++	if (ret != TEST_SUCCESS)
++		return ret;
++
++	/* test if we can find free chunk while not starting with 0 */
++	TEST_ASSERT_EQUAL(rte_fbarray_find_next_n_free(&param.arr, 1, param.start),
++			param.start + 1, "Free chunk index is wrong\n");
++	return TEST_SUCCESS;
++}
++
++static int test_lookbehind(void)
++{
++	int ret, free_len = 2;
++
++	/* run regular test first */
++	ret = test_find();
++	if (ret != TEST_SUCCESS)
++		return ret;
++
++	/* test if we can find free chunk while crossing mask boundary */
++	TEST_ASSERT_EQUAL(rte_fbarray_find_prev_n_free(&param.arr, param.start + 1, free_len),
++			param.start - free_len, "Free chunk index is wrong\n");
++	return TEST_SUCCESS;
++}
++
++static int test_lookahead_mask(void)
++{
++	/*
++	 * There is a certain type of lookahead behavior we want to test here,
++	 * namely masking of bits that were scanned with lookahead but that we
++	 * know do not match our criteria. This is achieved in following steps:
++	 *
++	 *   0. Look for a big enough chunk of free space (say, 62 elements)
++	 *   1. Trigger lookahead by breaking a run somewhere inside mask 0
++	 *      (indices 0-63)
++	 *   2. Fail lookahead by breaking the run somewhere inside mask 1
++	 *      (indices 64-127)
++	 *   3. Ensure that we can still find free space in mask 1 afterwards
++	 */
++
++	/* break run on first mask */
++	rte_fbarray_set_used(&param.arr, 61);
++	/* break run on second mask */
++	rte_fbarray_set_used(&param.arr, 70);
++
++	/* we expect to find free space at 71 */
++	TEST_ASSERT_EQUAL(rte_fbarray_find_next_n_free(&param.arr, 0, 62),
++			71, "Free chunk index is wrong\n");
++	return TEST_SUCCESS;
++}
++
++static int test_lookbehind_mask(void)
++{
++	/*
++	 * There is a certain type of lookbehind behavior we want to test here,
++	 * namely masking of bits that were scanned with lookbehind but that we
++	 * know do not match our criteria. This is achieved in two steps:
++	 *
++	 *   0. Look for a big enough chunk of free space (say, 62 elements)
++	 *   1. Trigger lookbehind by breaking a run somewhere inside mask 2
++	 *      (indices 128-191)
++	 *   2. Fail lookbehind by breaking the run somewhere inside mask 1
++	 *      (indices 64-127)
++	 *   3. Ensure that we can still find free space in mask 1 afterwards
++	 */
++
++	/* break run on mask 2 */
++	rte_fbarray_set_used(&param.arr, 130);
++	/* break run on mask 1 */
++	rte_fbarray_set_used(&param.arr, 70);
++
++	/* start from 190, we expect to find free space at 8 */
++	TEST_ASSERT_EQUAL(rte_fbarray_find_prev_n_free(&param.arr, 190, 62),
++			8, "Free chunk index is wrong\n");
++	return TEST_SUCCESS;
++}
+ 
+ static struct unit_test_suite fbarray_test_suite = {
+ 	.suite_name = "fbarray autotest",
+@@ -717,12 +875,19 @@ static struct unit_test_suite fbarray_test_suite = {
+ 	.unit_test_cases = {
+ 		TEST_CASE(test_invalid),
+ 		TEST_CASE(test_basic),
+-		TEST_CASE_ST(first_msk_test_setup, reset_array, test_find),
+-		TEST_CASE_ST(cross_msk_test_setup, reset_array, test_find),
+-		TEST_CASE_ST(multi_msk_test_setup, reset_array, test_find),
+-		TEST_CASE_ST(last_msk_test_setup, reset_array, test_find),
+-		TEST_CASE_ST(full_msk_test_setup, reset_array, test_find),
+-		TEST_CASE_ST(empty_msk_test_setup, reset_array, test_empty),
++		TEST_CASE_ST(first_msk_test_setup, reset_aligned, test_find),
++		TEST_CASE_ST(cross_msk_test_setup, reset_aligned, test_find),
++		TEST_CASE_ST(multi_msk_test_setup, reset_aligned, test_find),
++		TEST_CASE_ST(last_msk_test_setup, reset_aligned, test_find),
++		TEST_CASE_ST(full_msk_test_setup, reset_aligned, test_find),
++		/* empty test does not need setup */
++		TEST_CASE_ST(NULL, reset_aligned, test_empty),
++		TEST_CASE_ST(lookahead_test_setup, reset_aligned, test_lookahead),
++		TEST_CASE_ST(lookbehind_test_setup, reset_aligned, test_lookbehind),
++		/* setup for these tests is more complex so do it in test func */
++		TEST_CASE_ST(NULL, reset_aligned, test_lookahead_mask),
++		TEST_CASE_ST(NULL, reset_aligned, test_lookbehind_mask),
++		TEST_CASE_ST(unaligned_test_setup, reset_unaligned, test_find_unaligned),
+ 		TEST_CASES_END()
+ 	}
+ };
+diff --git a/dpdk/app/test/test_graph.c b/dpdk/app/test/test_graph.c
+index 3dd017ebfb..eb4f9a61e3 100644
+--- a/dpdk/app/test/test_graph.c
++++ b/dpdk/app/test/test_graph.c
+@@ -695,6 +695,77 @@ test_graph_clone(void)
+ 	return ret;
+ }
+ 
++static int
++test_graph_id_collisions(void)
++{
++	static const char *node_patterns[] = {"test_node_source1", "test_node00"};
++	struct rte_graph_param gconf = {
++		.socket_id = SOCKET_ID_ANY,
++		.nb_node_patterns = 2,
++		.node_patterns = node_patterns,
++	};
++	rte_graph_t g1, g2, g3, g4;
++
++	g1 = rte_graph_create("worker1", &gconf);
++	if (g1 == RTE_GRAPH_ID_INVALID) {
++		printf("Graph 1 creation failed with error = %d\n", rte_errno);
++		return -1;
++	}
++	g2 = rte_graph_create("worker2", &gconf);
++	if (g2 == RTE_GRAPH_ID_INVALID) {
++		printf("Graph 2 creation failed with error = %d\n", rte_errno);
++		return -1;
++	}
++	g3 = rte_graph_create("worker3", &gconf);
++	if (g3 == RTE_GRAPH_ID_INVALID) {
++		printf("Graph 3 creation failed with error = %d\n", rte_errno);
++		return -1;
++	}
++	if (g1 == g2 || g2 == g3 || g1 == g3) {
++		printf("Graph ids should be different\n");
++		return -1;
++	}
++	if (rte_graph_destroy(g2) < 0) {
++		printf("Graph 2 suppression failed\n");
++		return -1;
++	}
++	g4 = rte_graph_create("worker4", &gconf);
++	if (g4 == RTE_GRAPH_ID_INVALID) {
++		printf("Graph 4 creation failed with error = %d\n", rte_errno);
++		return -1;
++	}
++	if (g1 == g3 || g1 == g4 || g3 == g4) {
++		printf("Graph ids should be different\n");
++		return -1;
++	}
++	g2 = rte_graph_clone(g1, "worker2", &gconf);
++	if (g2 == RTE_GRAPH_ID_INVALID) {
++		printf("Graph 4 creation failed with error = %d\n", rte_errno);
++		return -1;
++	}
++	if (g1 == g2 || g1 == g3 || g1 == g4 || g2 == g3 || g2 == g4 || g3 == g4) {
++		printf("Graph ids should be different\n");
++		return -1;
++	}
++	if (rte_graph_destroy(g1) < 0) {
++		printf("Graph 1 suppression failed\n");
++		return -1;
++	}
++	if (rte_graph_destroy(g2) < 0) {
++		printf("Graph 2 suppression failed\n");
++		return -1;
++	}
++	if (rte_graph_destroy(g3) < 0) {
++		printf("Graph 3 suppression failed\n");
++		return -1;
++	}
++	if (rte_graph_destroy(g4) < 0) {
++		printf("Graph 4 suppression failed\n");
++		return -1;
++	}
++	return 0;
++}
++
+ static int
+ test_graph_model_mcore_dispatch_node_lcore_affinity_set(void)
+ {
+@@ -976,6 +1047,7 @@ static struct unit_test_suite graph_testsuite = {
+ 		TEST_CASE(test_lookup_functions),
+ 		TEST_CASE(test_create_graph),
+ 		TEST_CASE(test_graph_clone),
++		TEST_CASE(test_graph_id_collisions),
+ 		TEST_CASE(test_graph_model_mcore_dispatch_node_lcore_affinity_set),
+ 		TEST_CASE(test_graph_model_mcore_dispatch_core_bind_unbind),
+ 		TEST_CASE(test_graph_worker_model_set_get),
+diff --git a/dpdk/app/test/test_mbuf.c b/dpdk/app/test/test_mbuf.c
+index d7393df7eb..a39288a5f8 100644
+--- a/dpdk/app/test/test_mbuf.c
++++ b/dpdk/app/test/test_mbuf.c
+@@ -2345,16 +2345,13 @@ test_pktmbuf_ext_shinfo_init_helper(struct rte_mempool *pktmbuf_pool)
+ 		GOTO_FAIL("%s: External buffer is not attached to mbuf\n",
+ 				__func__);
+ 
+-	/* allocate one more mbuf */
++	/* allocate one more mbuf, it is attached to the same external buffer */
+ 	clone = rte_pktmbuf_clone(m, pktmbuf_pool);
+ 	if (clone == NULL)
+ 		GOTO_FAIL("%s: mbuf clone allocation failed!\n", __func__);
+ 	if (rte_pktmbuf_pkt_len(clone) != 0)
+ 		GOTO_FAIL("%s: Bad packet length\n", __func__);
+ 
+-	/* attach the same external buffer to the cloned mbuf */
+-	rte_pktmbuf_attach_extbuf(clone, ext_buf_addr, buf_iova, buf_len,
+-			ret_shinfo);
+ 	if (clone->ol_flags != RTE_MBUF_F_EXTERNAL)
+ 		GOTO_FAIL("%s: External buffer is not attached to mbuf\n",
+ 				__func__);
+diff --git a/dpdk/app/test/test_power.c b/dpdk/app/test/test_power.c
+index f1e80299d3..403adc22d6 100644
+--- a/dpdk/app/test/test_power.c
++++ b/dpdk/app/test/test_power.c
+@@ -143,7 +143,7 @@ test_power(void)
+ 		/* Test setting a valid environment */
+ 		ret = rte_power_set_env(envs[i]);
+ 		if (ret != 0) {
+-			printf("Unexpectedly unsucceeded on setting a valid environment\n");
++			printf("Unexpectedly unsuccessful on setting a valid environment\n");
+ 			return -1;
+ 		}
+ 
+diff --git a/dpdk/app/test/test_power_intel_uncore.c b/dpdk/app/test/test_power_intel_uncore.c
+index 80b45ce46e..049658627d 100644
+--- a/dpdk/app/test/test_power_intel_uncore.c
++++ b/dpdk/app/test/test_power_intel_uncore.c
+@@ -17,14 +17,12 @@ test_power_intel_uncore(void)
+ #include <rte_power_uncore.h>
+ #include <power_common.h>
+ 
+-#define MAX_UNCORE_FREQS 32
+-
+ #define VALID_PKG 0
+ #define VALID_DIE 0
+ #define INVALID_PKG (rte_power_uncore_get_num_pkgs() + 1)
+ #define INVALID_DIE (rte_power_uncore_get_num_dies(VALID_PKG) + 1)
+ #define VALID_INDEX 1
+-#define INVALID_INDEX (MAX_UNCORE_FREQS + 1)
++#define INVALID_INDEX (RTE_MAX_UNCORE_FREQS + 1)
+ 
+ static int check_power_uncore_init(void)
+ {
+diff --git a/dpdk/buildtools/dpdk-cmdline-gen.py b/dpdk/buildtools/dpdk-cmdline-gen.py
+index 49b03bee4a..30d32ac183 100755
+--- a/dpdk/buildtools/dpdk-cmdline-gen.py
++++ b/dpdk/buildtools/dpdk-cmdline-gen.py
+@@ -71,8 +71,8 @@ def process_command(lineno, tokens, comment):
+         elif t_type in ["IP", "IP_ADDR", "IPADDR"]:
+             result_struct.append(f"\tcmdline_ipaddr_t {t_name};")
+             initializers.append(
+-                f"cmdline_parse_token_ipaddr_t cmd_{name}_{t_name}_tok =\n"
+-                f"\tTOKEN_IPV4_INITIALIZER(struct cmd_{name}_result, {t_name});"
++                f"static cmdline_parse_token_ipaddr_t cmd_{name}_{t_name}_tok =\n"
++                f"\tTOKEN_IPADDR_INITIALIZER(struct cmd_{name}_result, {t_name});"
+             )
+         elif t_type.startswith("(") and t_type.endswith(")"):
+             result_struct.append(f"\tcmdline_fixed_string_t {t_name};")
+diff --git a/dpdk/buildtools/get-test-suites.py b/dpdk/buildtools/get-test-suites.py
+index 574c233aa8..c61f6a273f 100644
+--- a/dpdk/buildtools/get-test-suites.py
++++ b/dpdk/buildtools/get-test-suites.py
+@@ -19,7 +19,7 @@ def get_fast_test_params(test_name, ln):
+     return f":{nohuge.strip().lower()}:{asan.strip().lower()}"
+ 
+ for fname in input_list:
+-    with open(fname) as f:
++    with open(fname, "r", encoding="utf-8") as f:
+         contents = [ln.strip() for ln in f.readlines()]
+         test_lines = [ln for ln in contents if test_def_regex.match(ln)]
+         non_suite_tests.extend([non_suite_regex.match(ln).group(1)
+diff --git a/dpdk/buildtools/map-list-symbol.sh b/dpdk/buildtools/map-list-symbol.sh
+index a834399816..878c5880df 100755
+--- a/dpdk/buildtools/map-list-symbol.sh
++++ b/dpdk/buildtools/map-list-symbol.sh
+@@ -5,6 +5,7 @@
+ section=all
+ symbol=all
+ quiet=
++version=
+ 
+ while getopts 'S:s:qV:' name; do
+ 	case $name in
+diff --git a/dpdk/buildtools/meson.build b/dpdk/buildtools/meson.build
+index 72447b60a0..3adf34e1a8 100644
+--- a/dpdk/buildtools/meson.build
++++ b/dpdk/buildtools/meson.build
+@@ -5,7 +5,11 @@ pkgconf = find_program('pkg-config', 'pkgconf', required: false)
+ check_symbols = find_program('check-symbols.sh')
+ ldflags_ibverbs_static = find_program('options-ibverbs-static.sh')
+ 
+-python3 = import('python').find_installation(required: false)
++python3_required_modules = []
++if host_machine.system() != 'windows'
++    python3_required_modules = ['elftools']
++endif
++python3 = import('python').find_installation('python3', required: false, modules: python3_required_modules)
+ if python3.found()
+     py3 = [python3]
+ else
+@@ -45,16 +49,3 @@ else
+     pmdinfogen += 'elf'
+ endif
+ 
+-# TODO: starting from Meson 0.51.0 use
+-#     python3 = import('python').find_installation('python',
+-#        modules : python3_required_modules)
+-python3_required_modules = []
+-if host_machine.system() != 'windows'
+-    python3_required_modules = ['elftools']
+-endif
+-foreach module : python3_required_modules
+-    script = 'import importlib.util; import sys; exit(importlib.util.find_spec("@0@") is None)'
+-    if run_command(py3, '-c', script.format(module), check: false).returncode() != 0
+-        error('missing python module: @0@'.format(module))
+-    endif
+-endforeach
+diff --git a/dpdk/buildtools/pmdinfogen.py b/dpdk/buildtools/pmdinfogen.py
+index 2a44f17bda..dfb89500c0 100755
+--- a/dpdk/buildtools/pmdinfogen.py
++++ b/dpdk/buildtools/pmdinfogen.py
+@@ -6,6 +6,7 @@
+ import argparse
+ import ctypes
+ import json
++import re
+ import sys
+ import tempfile
+ 
+@@ -66,11 +67,11 @@ class ELFImage:
+                 return [symbol]
+         return None
+ 
+-    def find_by_prefix(self, prefix):
+-        prefix = prefix.encode("utf-8") if self._legacy_elftools else prefix
++    def find_by_pattern(self, pattern):
++        pattern = pattern.encode("utf-8") if self._legacy_elftools else pattern
+         for i in range(self._symtab.num_symbols()):
+             symbol = self._symtab.get_symbol(i)
+-            if symbol.name.startswith(prefix):
++            if re.match(pattern, symbol.name):
+                 yield ELFSymbol(self._image, symbol)
+ 
+ 
+@@ -97,9 +98,9 @@ class COFFImage:
+     def is_big_endian(self):
+         return False
+ 
+-    def find_by_prefix(self, prefix):
++    def find_by_pattern(self, pattern):
+         for symbol in self._image.symbols:
+-            if symbol.name.startswith(prefix):
++            if re.match(pattern, symbol.name):
+                 yield COFFSymbol(self._image, symbol)
+ 
+     def find_by_name(self, name):
+@@ -199,7 +200,7 @@ class Driver:
+ 
+ def load_drivers(image):
+     drivers = []
+-    for symbol in image.find_by_prefix("this_pmd_name"):
++    for symbol in image.find_by_pattern("^this_pmd_name[0-9]+$"):
+         drivers.append(Driver.load(image, symbol))
+     return drivers
+ 
+diff --git a/dpdk/buildtools/subproject/meson.build b/dpdk/buildtools/subproject/meson.build
+index 3192efaa40..203c5d36c6 100644
+--- a/dpdk/buildtools/subproject/meson.build
++++ b/dpdk/buildtools/subproject/meson.build
+@@ -2,18 +2,23 @@
+ # Copyright(c) 2022 Intel Corporation
+ 
+ message('DPDK subproject linking: ' + get_option('default_library'))
++subproject_cflags = ['-include', 'rte_config.h'] + machine_args
++if is_freebsd
++    subproject_cflags += ['-D__BSD_VISIBLE']
++endif
+ if get_option('default_library') == 'static'
+     dpdk_dep = declare_dependency(
+             version: meson.project_version(),
+             dependencies: dpdk_static_lib_deps,
++            compile_args: subproject_cflags,
+             # static library deps in DPDK build don't include "link_with" parameters,
+             # so explicitly link-in both libs and drivers
+-            link_with: dpdk_static_libraries,
+-            link_whole: dpdk_drivers,
++            link_whole: dpdk_static_libraries + dpdk_drivers,
+             link_args: dpdk_extra_ldflags)
+ else
+     dpdk_dep = declare_dependency(
+             version: meson.project_version(),
++            compile_args: subproject_cflags,
+             # shared library deps include all necessary linking parameters
+             dependencies: dpdk_shared_lib_deps)
+ endif
+diff --git a/dpdk/config/arm/arm32_armv8_linux_gcc b/dpdk/config/arm/arm32_armv8_linux_gcc
+index 269a60ba19..abcb182b16 100644
+--- a/dpdk/config/arm/arm32_armv8_linux_gcc
++++ b/dpdk/config/arm/arm32_armv8_linux_gcc
+@@ -4,6 +4,7 @@ cpp = ['ccache', 'arm-linux-gnueabihf-g++']
+ ar = 'arm-linux-gnueabihf-gcc-ar'
+ strip = 'arm-linux-gnueabihf-strip'
+ pkgconfig = 'arm-linux-gnueabihf-pkg-config'
++pkg-config = 'arm-linux-gnueabihf-pkg-config'
+ pcap-config = ''
+ 
+ [host_machine]
+diff --git a/dpdk/config/arm/arm64_altra_linux_gcc b/dpdk/config/arm/arm64_altra_linux_gcc
+index ce0667ebe2..769503d936 100644
+--- a/dpdk/config/arm/arm64_altra_linux_gcc
++++ b/dpdk/config/arm/arm64_altra_linux_gcc
+@@ -4,6 +4,7 @@ cpp = ['ccache', 'aarch64-linux-gnu-g++']
+ ar = 'aarch64-linux-gnu-gcc-ar'
+ strip = 'aarch64-linux-gnu-strip'
+ pkgconfig = 'aarch64-linux-gnu-pkg-config'
++pkg-config = 'aarch64-linux-gnu-pkg-config'
+ pcap-config = ''
+ 
+ [host_machine]
+diff --git a/dpdk/config/arm/arm64_ampereone_linux_gcc b/dpdk/config/arm/arm64_ampereone_linux_gcc
+index 8964432a74..bb6acfc4cf 100644
+--- a/dpdk/config/arm/arm64_ampereone_linux_gcc
++++ b/dpdk/config/arm/arm64_ampereone_linux_gcc
+@@ -4,6 +4,7 @@ cpp = ['ccache', 'aarch64-linux-gnu-g++']
+ ar = 'aarch64-linux-gnu-gcc-ar'
+ strip = 'aarch64-linux-gnu-strip'
+ pkgconfig = 'aarch64-linux-gnu-pkg-config'
++pkg-config = 'aarch64-linux-gnu-pkg-config'
+ pcap-config = ''
+ 
+ [host_machine]
+diff --git a/dpdk/config/arm/arm64_armada_linux_gcc b/dpdk/config/arm/arm64_armada_linux_gcc
+index 635b4946a3..8f36d895da 100644
+--- a/dpdk/config/arm/arm64_armada_linux_gcc
++++ b/dpdk/config/arm/arm64_armada_linux_gcc
+@@ -5,6 +5,7 @@ ar = 'aarch64-linux-gnu-ar'
+ as = 'aarch64-linux-gnu-as'
+ strip = 'aarch64-linux-gnu-strip'
+ pkgconfig = 'aarch64-linux-gnu-pkg-config'
++pkg-config = 'aarch64-linux-gnu-pkg-config'
+ pcap-config = ''
+ 
+ [host_machine]
+diff --git a/dpdk/config/arm/arm64_armv8_linux_clang_ubuntu b/dpdk/config/arm/arm64_armv8_linux_clang_ubuntu
+index 86ae43937b..e34fabebe5 100644
+--- a/dpdk/config/arm/arm64_armv8_linux_clang_ubuntu
++++ b/dpdk/config/arm/arm64_armv8_linux_clang_ubuntu
+@@ -6,6 +6,7 @@ strip = 'llvm-strip'
+ llvm-config = 'llvm-config'
+ pcap-config = 'llvm-config'
+ pkgconfig = 'aarch64-linux-gnu-pkg-config'
++pkg-config = 'aarch64-linux-gnu-pkg-config'
+ 
+ [host_machine]
+ system = 'linux'
+diff --git a/dpdk/config/arm/arm64_armv8_linux_gcc b/dpdk/config/arm/arm64_armv8_linux_gcc
+index 529694b49d..a9b136cf48 100644
+--- a/dpdk/config/arm/arm64_armv8_linux_gcc
++++ b/dpdk/config/arm/arm64_armv8_linux_gcc
+@@ -4,6 +4,7 @@ cpp = ['ccache', 'aarch64-linux-gnu-g++']
+ ar = 'aarch64-linux-gnu-gcc-ar'
+ strip = 'aarch64-linux-gnu-strip'
+ pkgconfig = 'aarch64-linux-gnu-pkg-config'
++pkg-config = 'aarch64-linux-gnu-pkg-config'
+ pcap-config = ''
+ 
+ [host_machine]
+diff --git a/dpdk/config/arm/arm64_bluefield3_linux_gcc b/dpdk/config/arm/arm64_bluefield3_linux_gcc
+index 775cf5883d..d900e72a6d 100644
+--- a/dpdk/config/arm/arm64_bluefield3_linux_gcc
++++ b/dpdk/config/arm/arm64_bluefield3_linux_gcc
+@@ -4,6 +4,7 @@ cpp = ['ccache', 'aarch64-linux-gnu-g++']
+ ar = 'aarch64-linux-gnu-gcc-ar'
+ strip = 'aarch64-linux-gnu-strip'
+ pkgconfig = 'aarch64-linux-gnu-pkg-config'
++pkg-config = 'aarch64-linux-gnu-pkg-config'
+ pcap-config = ''
+ 
+ [host_machine]
+diff --git a/dpdk/config/arm/arm64_bluefield_linux_gcc b/dpdk/config/arm/arm64_bluefield_linux_gcc
+index 1286227915..bcffb6534b 100644
+--- a/dpdk/config/arm/arm64_bluefield_linux_gcc
++++ b/dpdk/config/arm/arm64_bluefield_linux_gcc
+@@ -4,6 +4,7 @@ cpp = ['ccache', 'aarch64-linux-gnu-g++']
+ ar = 'aarch64-linux-gnu-gcc-ar'
+ strip = 'aarch64-linux-gnu-strip'
+ pkgconfig = 'aarch64-linux-gnu-pkg-config'
++pkg-config = 'aarch64-linux-gnu-pkg-config'
+ pcap-config = ''
+ 
+ [host_machine]
+diff --git a/dpdk/config/arm/arm64_cdx_linux_gcc b/dpdk/config/arm/arm64_cdx_linux_gcc
+index 8e6d619dae..bf494b776e 100644
+--- a/dpdk/config/arm/arm64_cdx_linux_gcc
++++ b/dpdk/config/arm/arm64_cdx_linux_gcc
+@@ -5,6 +5,7 @@ ar = 'aarch64-linux-gnu-ar'
+ as = 'aarch64-linux-gnu-as'
+ strip = 'aarch64-linux-gnu-strip'
+ pkgconfig = 'aarch64-linux-gnu-pkg-config'
++pkg-config = 'aarch64-linux-gnu-pkg-config'
+ pcap-config = ''
+ 
+ [host_machine]
+diff --git a/dpdk/config/arm/arm64_centriq2400_linux_gcc b/dpdk/config/arm/arm64_centriq2400_linux_gcc
+index bc8737e072..33cb5ef675 100644
+--- a/dpdk/config/arm/arm64_centriq2400_linux_gcc
++++ b/dpdk/config/arm/arm64_centriq2400_linux_gcc
+@@ -4,6 +4,7 @@ cpp = ['ccache', 'aarch64-linux-gnu-g++']
+ ar = 'aarch64-linux-gnu-gcc-ar'
+ strip = 'aarch64-linux-gnu-strip'
+ pkgconfig = 'aarch64-linux-gnu-pkg-config'
++pkg-config = 'aarch64-linux-gnu-pkg-config'
+ pcap-config = ''
+ 
+ [host_machine]
+diff --git a/dpdk/config/arm/arm64_cn10k_linux_gcc b/dpdk/config/arm/arm64_cn10k_linux_gcc
+index fa904af5d0..63fcca9cbe 100644
+--- a/dpdk/config/arm/arm64_cn10k_linux_gcc
++++ b/dpdk/config/arm/arm64_cn10k_linux_gcc
+@@ -4,6 +4,7 @@ cpp = ['ccache', 'aarch64-linux-gnu-g++']
+ ar = 'aarch64-linux-gnu-gcc-ar'
+ strip = 'aarch64-linux-gnu-strip'
+ pkgconfig = 'aarch64-linux-gnu-pkg-config'
++pkg-config = 'aarch64-linux-gnu-pkg-config'
+ pcap-config = ''
+ cmake = 'cmake'
+ 
+diff --git a/dpdk/config/arm/arm64_cn9k_linux_gcc b/dpdk/config/arm/arm64_cn9k_linux_gcc
+index 646ce4b5d3..fa4618e370 100644
+--- a/dpdk/config/arm/arm64_cn9k_linux_gcc
++++ b/dpdk/config/arm/arm64_cn9k_linux_gcc
+@@ -4,6 +4,7 @@ cpp = ['ccache', 'aarch64-linux-gnu-g++']
+ ar = 'aarch64-linux-gnu-gcc-ar'
+ strip = 'aarch64-linux-gnu-strip'
+ pkgconfig = 'aarch64-linux-gnu-pkg-config'
++pkg-config = 'aarch64-linux-gnu-pkg-config'
+ pcap-config = ''
+ cmake = 'cmake'
+ 
+diff --git a/dpdk/config/arm/arm64_dpaa_linux_gcc b/dpdk/config/arm/arm64_dpaa_linux_gcc
+index 8465b5097b..bf0eab18e2 100644
+--- a/dpdk/config/arm/arm64_dpaa_linux_gcc
++++ b/dpdk/config/arm/arm64_dpaa_linux_gcc
+@@ -5,6 +5,7 @@ ar = 'aarch64-linux-gnu-ar'
+ as = 'aarch64-linux-gnu-as'
+ strip = 'aarch64-linux-gnu-strip'
+ pkgconfig = 'aarch64-linux-gnu-pkg-config'
++pkg-config = 'aarch64-linux-gnu-pkg-config'
+ pcap-config = ''
+ 
+ [host_machine]
+diff --git a/dpdk/config/arm/arm64_emag_linux_gcc b/dpdk/config/arm/arm64_emag_linux_gcc
+index 248169ed68..3e8c15f8a4 100644
+--- a/dpdk/config/arm/arm64_emag_linux_gcc
++++ b/dpdk/config/arm/arm64_emag_linux_gcc
+@@ -4,6 +4,7 @@ cpp = ['ccache', 'aarch64-linux-gnu-g++']
+ ar = 'aarch64-linux-gnu-gcc-ar'
+ strip = 'aarch64-linux-gnu-strip'
+ pkgconfig = 'aarch64-linux-gnu-pkg-config'
++pkg-config = 'aarch64-linux-gnu-pkg-config'
+ pcap-config = ''
+ 
+ [host_machine]
+diff --git a/dpdk/config/arm/arm64_ft2000plus_linux_gcc b/dpdk/config/arm/arm64_ft2000plus_linux_gcc
+index ae9f779056..4ecb251d3f 100644
+--- a/dpdk/config/arm/arm64_ft2000plus_linux_gcc
++++ b/dpdk/config/arm/arm64_ft2000plus_linux_gcc
+@@ -4,6 +4,7 @@ cpp = ['ccache', 'aarch64-linux-gnu-g++']
+ ar = 'aarch64-linux-gnu-gcc-ar'
+ strip = 'aarch64-linux-gnu-strip'
+ pkgconfig = 'aarch64-linux-gnu-pkg-config'
++pkg-config = 'aarch64-linux-gnu-pkg-config'
+ pcap-config = ''
+ 
+ [host_machine]
+diff --git a/dpdk/config/arm/arm64_graviton2_linux_gcc b/dpdk/config/arm/arm64_graviton2_linux_gcc
+index fdb298bb11..b5f681bc27 100644
+--- a/dpdk/config/arm/arm64_graviton2_linux_gcc
++++ b/dpdk/config/arm/arm64_graviton2_linux_gcc
+@@ -4,6 +4,7 @@ cpp = ['ccache', 'aarch64-linux-gnu-g++']
+ ar = 'aarch64-linux-gnu-gcc-ar'
+ strip = 'aarch64-linux-gnu-strip'
+ pkgconfig = 'aarch64-linux-gnu-pkg-config'
++pkg-config = 'aarch64-linux-gnu-pkg-config'
+ pcap-config = ''
+ 
+ [host_machine]
+diff --git a/dpdk/config/arm/arm64_graviton3_linux_gcc b/dpdk/config/arm/arm64_graviton3_linux_gcc
+index 19b422075d..77b5168836 100644
+--- a/dpdk/config/arm/arm64_graviton3_linux_gcc
++++ b/dpdk/config/arm/arm64_graviton3_linux_gcc
+@@ -4,6 +4,7 @@ cpp = ['ccache', 'aarch64-linux-gnu-g++']
+ ar = 'aarch64-linux-gnu-gcc-ar'
+ strip = 'aarch64-linux-gnu-strip'
+ pkgconfig = 'aarch64-linux-gnu-pkg-config'
++pkg-config = 'aarch64-linux-gnu-pkg-config'
+ pcap-config = ''
+ 
+ [host_machine]
+diff --git a/dpdk/config/arm/arm64_hip10_linux_gcc b/dpdk/config/arm/arm64_hip10_linux_gcc
+index 2943e4abbf..85bcd05887 100644
+--- a/dpdk/config/arm/arm64_hip10_linux_gcc
++++ b/dpdk/config/arm/arm64_hip10_linux_gcc
+@@ -4,6 +4,7 @@ cpp = ['ccache', 'aarch64-linux-gnu-g++']
+ ar = 'aarch64-linux-gnu-gcc-ar'
+ strip = 'aarch64-linux-gnu-strip'
+ pkgconfig = 'aarch64-linux-gnu-pkg-config'
++pkg-config = 'aarch64-linux-gnu-pkg-config'
+ pcap-config = ''
+ 
+ [host_machine]
+diff --git a/dpdk/config/arm/arm64_kunpeng920_linux_gcc b/dpdk/config/arm/arm64_kunpeng920_linux_gcc
+index 193fb48a61..6e7d8ac667 100644
+--- a/dpdk/config/arm/arm64_kunpeng920_linux_gcc
++++ b/dpdk/config/arm/arm64_kunpeng920_linux_gcc
+@@ -4,6 +4,7 @@ cpp = ['ccache', 'aarch64-linux-gnu-g++']
+ ar = 'aarch64-linux-gnu-gcc-ar'
+ strip = 'aarch64-linux-gnu-strip'
+ pkgconfig = 'aarch64-linux-gnu-pkg-config'
++pkg-config = 'aarch64-linux-gnu-pkg-config'
+ pcap-config = ''
+ 
+ [host_machine]
+diff --git a/dpdk/config/arm/arm64_kunpeng930_linux_gcc b/dpdk/config/arm/arm64_kunpeng930_linux_gcc
+index e4281ceb4f..61fe482a2e 100644
+--- a/dpdk/config/arm/arm64_kunpeng930_linux_gcc
++++ b/dpdk/config/arm/arm64_kunpeng930_linux_gcc
+@@ -4,6 +4,7 @@ cpp = ['ccache', 'aarch64-linux-gnu-g++']
+ ar = 'aarch64-linux-gnu-gcc-ar'
+ strip = 'aarch64-linux-gnu-strip'
+ pkgconfig = 'aarch64-linux-gnu-pkg-config'
++pkg-config = 'aarch64-linux-gnu-pkg-config'
+ pcap-config = ''
+ 
+ [host_machine]
+diff --git a/dpdk/config/arm/arm64_n1sdp_linux_gcc b/dpdk/config/arm/arm64_n1sdp_linux_gcc
+index 2806a4241b..c3c12098d8 100644
+--- a/dpdk/config/arm/arm64_n1sdp_linux_gcc
++++ b/dpdk/config/arm/arm64_n1sdp_linux_gcc
+@@ -4,6 +4,7 @@ cpp = ['ccache', 'aarch64-linux-gnu-g++']
+ ar = 'aarch64-linux-gnu-gcc-ar'
+ strip = 'aarch64-linux-gnu-strip'
+ pkgconfig = 'aarch64-linux-gnu-pkg-config'
++pkg-config = 'aarch64-linux-gnu-pkg-config'
+ pcap-config = ''
+ 
+ [host_machine]
+diff --git a/dpdk/config/arm/arm64_n2_linux_gcc b/dpdk/config/arm/arm64_n2_linux_gcc
+index 7404bd197b..89200861c4 100644
+--- a/dpdk/config/arm/arm64_n2_linux_gcc
++++ b/dpdk/config/arm/arm64_n2_linux_gcc
+@@ -4,6 +4,7 @@ cpp = ['ccache', 'aarch64-linux-gnu-g++']
+ ar = 'aarch64-linux-gnu-gcc-ar'
+ strip = 'aarch64-linux-gnu-strip'
+ pkgconfig = 'aarch64-linux-gnu-pkg-config'
++pkg-config = 'aarch64-linux-gnu-pkg-config'
+ pcap-config = ''
+ 
+ [host_machine]
+diff --git a/dpdk/config/arm/arm64_stingray_linux_gcc b/dpdk/config/arm/arm64_stingray_linux_gcc
+index 08148b5c3d..358f3b43d3 100644
+--- a/dpdk/config/arm/arm64_stingray_linux_gcc
++++ b/dpdk/config/arm/arm64_stingray_linux_gcc
+@@ -4,6 +4,7 @@ cpp = ['ccache', 'aarch64-linux-gnu-g++']
+ ar = 'aarch64-linux-gnu-gcc-ar'
+ strip = 'aarch64-linux-gnu-strip'
+ pkgconfig = 'aarch64-linux-gnu-pkg-config'
++pkg-config = 'aarch64-linux-gnu-pkg-config'
+ pcap-config = ''
+ 
+ [host_machine]
+diff --git a/dpdk/config/arm/arm64_thunderx2_linux_gcc b/dpdk/config/arm/arm64_thunderx2_linux_gcc
+index 32ae938e95..124a97da01 100644
+--- a/dpdk/config/arm/arm64_thunderx2_linux_gcc
++++ b/dpdk/config/arm/arm64_thunderx2_linux_gcc
+@@ -4,6 +4,7 @@ cpp = ['ccache', 'aarch64-linux-gnu-g++']
+ ar = 'aarch64-linux-gnu-gcc-ar'
+ strip = 'aarch64-linux-gnu-strip'
+ pkgconfig = 'aarch64-linux-gnu-pkg-config'
++pkg-config = 'aarch64-linux-gnu-pkg-config'
+ pcap-config = ''
+ 
+ [host_machine]
+diff --git a/dpdk/config/arm/arm64_thunderxt83_linux_gcc b/dpdk/config/arm/arm64_thunderxt83_linux_gcc
+index e9d9e62d44..433c3cb4e3 100644
+--- a/dpdk/config/arm/arm64_thunderxt83_linux_gcc
++++ b/dpdk/config/arm/arm64_thunderxt83_linux_gcc
+@@ -4,6 +4,7 @@ cpp = ['ccache', 'aarch64-linux-gnu-g++']
+ ar = 'aarch64-linux-gnu-gcc-ar'
+ strip = 'aarch64-linux-gnu-strip'
+ pkgconfig = 'aarch64-linux-gnu-pkg-config'
++pkg-config = 'aarch64-linux-gnu-pkg-config'
+ pcap-config = ''
+ 
+ [host_machine]
+diff --git a/dpdk/config/arm/arm64_thunderxt88_linux_gcc b/dpdk/config/arm/arm64_thunderxt88_linux_gcc
+index c6e5a5656a..81975aea14 100644
+--- a/dpdk/config/arm/arm64_thunderxt88_linux_gcc
++++ b/dpdk/config/arm/arm64_thunderxt88_linux_gcc
+@@ -4,6 +4,7 @@ cpp = ['ccache', 'aarch64-linux-gnu-g++']
+ ar = 'aarch64-linux-gnu-gcc-ar'
+ strip = 'aarch64-linux-gnu-strip'
+ pkgconfig = 'aarch64-linux-gnu-pkg-config'
++pkg-config = 'aarch64-linux-gnu-pkg-config'
+ pcap-config = ''
+ 
+ [host_machine]
+diff --git a/dpdk/config/arm/arm64_tys2500_linux_gcc b/dpdk/config/arm/arm64_tys2500_linux_gcc
+index fce85fb0d8..24e6539a15 100644
+--- a/dpdk/config/arm/arm64_tys2500_linux_gcc
++++ b/dpdk/config/arm/arm64_tys2500_linux_gcc
+@@ -4,6 +4,7 @@ cpp = ['ccache', 'aarch64-linux-gnu-g++']
+ ar = 'aarch64-linux-gnu-gcc-ar'
+ strip = 'aarch64-linux-gnu-strip'
+ pkgconfig = 'aarch64-linux-gnu-pkg-config'
++pkg-config = 'aarch64-linux-gnu-pkg-config'
+ pcap-config = ''
+ 
+ [host_machine]
+diff --git a/dpdk/config/meson.build b/dpdk/config/meson.build
+index a9ccd56deb..898b719929 100644
+--- a/dpdk/config/meson.build
++++ b/dpdk/config/meson.build
+@@ -121,13 +121,14 @@ else
+             cpu_instruction_set = 'generic'
+         endif
+     endif
++    if platform == 'native'
++        if cpu_instruction_set == 'auto'
++            cpu_instruction_set = 'native'
++        endif
++    endif
+ endif
+ 
+-if platform == 'native'
+-    if cpu_instruction_set == 'auto'
+-        cpu_instruction_set = 'native'
+-    endif
+-elif platform == 'generic'
++if platform == 'generic'
+     if cpu_instruction_set == 'auto'
+         cpu_instruction_set = 'generic'
+     endif
+@@ -188,6 +189,9 @@ dpdk_conf.set('RTE_ARCH_32', cc.sizeof('void *') == 4)
+ 
+ if not is_windows
+     add_project_link_arguments('-Wl,--no-as-needed', language: 'c')
++    if cc.has_link_argument('-Wl,--undefined-version')
++        add_project_link_arguments('-Wl,--undefined-version', language: 'c')
++    endif
+ endif
+ 
+ # use pthreads if available for the platform
+diff --git a/dpdk/config/x86/cross-mingw b/dpdk/config/x86/cross-mingw
+index cddebda5b5..11597eaa26 100644
+--- a/dpdk/config/x86/cross-mingw
++++ b/dpdk/config/x86/cross-mingw
+@@ -5,6 +5,7 @@ ld = 'x86_64-w64-mingw32-ld'
+ ar = 'x86_64-w64-mingw32-ar'
+ strip = 'x86_64-w64-mingw32-strip'
+ pkgconfig = 'x86_64-w64-mingw32-pkg-config'
++pkg-config = 'x86_64-w64-mingw32-pkg-config'
+ objdump = 'x86_64-w64-mingw32-objdump'
+ 
+ [host_machine]
+diff --git a/dpdk/doc/api/doxy-api-index.md b/dpdk/doc/api/doxy-api-index.md
+index a6a768bd7c..29eaad6523 100644
+--- a/dpdk/doc/api/doxy-api-index.md
++++ b/dpdk/doc/api/doxy-api-index.md
+@@ -102,6 +102,7 @@ The public API headers are grouped by topics:
+   [service cores](@ref rte_service.h),
+   [keepalive](@ref rte_keepalive.h),
+   [power/freq](@ref rte_power.h),
++  [power/uncore](@ref rte_power_uncore.h),
+   [PMD power](@ref rte_power_pmd_mgmt.h)
+ 
+ - **layers**:
+diff --git a/dpdk/doc/guides/bbdevs/fpga_5gnr_fec.rst b/dpdk/doc/guides/bbdevs/fpga_5gnr_fec.rst
+index 956dd6bed5..99fc936829 100644
+--- a/dpdk/doc/guides/bbdevs/fpga_5gnr_fec.rst
++++ b/dpdk/doc/guides/bbdevs/fpga_5gnr_fec.rst
+@@ -100,7 +100,6 @@ parameters defined in ``rte_fpga_5gnr_fec_conf`` structure:
+       uint8_t dl_bandwidth;
+       uint8_t ul_load_balance;
+       uint8_t dl_load_balance;
+-      uint16_t flr_time_out;
+   };
+ 
+ - ``pf_mode_en``: identifies whether only PF is to be used, or the VFs. PF and
+@@ -126,10 +125,6 @@ parameters defined in ``rte_fpga_5gnr_fec_conf`` structure:
+   If all hardware queues exceeds the watermark, no code blocks will be
+   streamed in from UL/DL code block FIFO.
+ 
+-- ``flr_time_out``: specifies how many 16.384us to be FLR time out. The
+-  time_out = flr_time_out x 16.384us. For instance, if you want to set 10ms for
+-  the FLR time out then set this setting to 0x262=610.
+-
+ 
+ An example configuration code calling the function ``rte_fpga_5gnr_fec_configure()`` is shown
+ below:
+@@ -154,7 +149,7 @@ below:
+   /* setup FPGA PF */
+   ret = rte_fpga_5gnr_fec_configure(info->dev_name, &conf);
+   TEST_ASSERT_SUCCESS(ret,
+-      "Failed to configure 4G FPGA PF for bbdev %s",
++      "Failed to configure 5GNR FPGA PF for bbdev %s",
+       info->dev_name);
+ 
+ 
+diff --git a/dpdk/doc/guides/cryptodevs/cnxk.rst b/dpdk/doc/guides/cryptodevs/cnxk.rst
+index fbe67475be..89a6556c1d 100644
+--- a/dpdk/doc/guides/cryptodevs/cnxk.rst
++++ b/dpdk/doc/guides/cryptodevs/cnxk.rst
+@@ -197,7 +197,7 @@ Debugging Options
+     +---+------------+-------------------------------------------------------+
+     | # | Component  | EAL log command                                       |
+     +===+============+=======================================================+
+-    | 1 | CPT        | --log-level='pmd\.crypto\.cnxk,8'                     |
++    | 1 | CPT        | --log-level='pmd\.common\.cnxk\.crypto,8'             |
+     +---+------------+-------------------------------------------------------+
+ 
+ Testing
+diff --git a/dpdk/doc/guides/cryptodevs/overview.rst b/dpdk/doc/guides/cryptodevs/overview.rst
+index d754b0cfc6..b068d0d19c 100644
+--- a/dpdk/doc/guides/cryptodevs/overview.rst
++++ b/dpdk/doc/guides/cryptodevs/overview.rst
+@@ -20,17 +20,17 @@ Supported Feature Flags
+    - "OOP SGL In SGL Out" feature flag stands for
+      "Out-of-place Scatter-gather list Input, Scatter-gather list Output",
+      which means PMD supports different scatter-gather styled input and output buffers
+-     (i.e. both can consists of multiple segments).
++     (i.e. both can consist of multiple segments).
+ 
+    - "OOP SGL In LB Out" feature flag stands for
+      "Out-of-place Scatter-gather list Input, Linear Buffers Output",
+-     which means PMD supports input from scatter-gathered styled buffers,
++     which means PMD supports input from scatter-gather styled buffers,
+      outputting linear buffers (i.e. single segment).
+ 
+    - "OOP LB In SGL Out" feature flag stands for
+      "Out-of-place Linear Buffers Input, Scatter-gather list Output",
+      which means PMD supports input from linear buffer, outputting
+-     scatter-gathered styled buffers.
++     scatter-gather styled buffers.
+ 
+    - "OOP LB In LB Out" feature flag stands for
+      "Out-of-place Linear Buffers Input, Linear Buffers Output",
+diff --git a/dpdk/doc/guides/dmadevs/hisilicon.rst b/dpdk/doc/guides/dmadevs/hisilicon.rst
+index 8c1f0f8886..974bc49376 100644
+--- a/dpdk/doc/guides/dmadevs/hisilicon.rst
++++ b/dpdk/doc/guides/dmadevs/hisilicon.rst
+@@ -13,7 +13,6 @@ Supported Kunpeng SoCs
+ ----------------------
+ 
+ * Kunpeng 920
+-* Kunpeng 930
+ 
+ 
+ Device Setup
+diff --git a/dpdk/doc/guides/eventdevs/cnxk.rst b/dpdk/doc/guides/eventdevs/cnxk.rst
+index cccb8a0304..c259d37109 100644
+--- a/dpdk/doc/guides/eventdevs/cnxk.rst
++++ b/dpdk/doc/guides/eventdevs/cnxk.rst
+@@ -208,9 +208,9 @@ Debugging Options
+    +---+------------+-------------------------------------------------------+
+    | # | Component  | EAL log command                                       |
+    +===+============+=======================================================+
+-   | 1 | SSO        | --log-level='pmd\.event\.cnxk,8'                      |
++   | 1 | SSO        | --log-level='pmd\.common\.cnxk\.event,8'              |
+    +---+------------+-------------------------------------------------------+
+-   | 2 | TIM        | --log-level='pmd\.event\.cnxk\.timer,8'               |
++   | 2 | TIM        | --log-level='pmd\.common\.cnxk\.timer,8'              |
+    +---+------------+-------------------------------------------------------+
+ 
+ Limitations
+diff --git a/dpdk/doc/guides/howto/af_xdp_cni.rst b/dpdk/doc/guides/howto/af_xdp_cni.rst
+deleted file mode 100644
+index a1a6d5b99c..0000000000
+--- a/dpdk/doc/guides/howto/af_xdp_cni.rst
++++ /dev/null
+@@ -1,253 +0,0 @@
+-.. SPDX-License-Identifier: BSD-3-Clause
+-   Copyright(c) 2023 Intel Corporation.
+-
+-Using a CNI with the AF_XDP driver
+-==================================
+-
+-Introduction
+-------------
+-
+-CNI, the Container Network Interface, is a technology for configuring
+-container network interfaces
+-and which can be used to setup Kubernetes networking.
+-AF_XDP is a Linux socket Address Family that enables an XDP program
+-to redirect packets to a memory buffer in userspace.
+-
+-This document explains how to enable the `AF_XDP Plugin for Kubernetes`_ within
+-a DPDK application using the :doc:`../nics/af_xdp` to connect and use these technologies.
+-
+-.. _AF_XDP Plugin for Kubernetes: https://github.com/intel/afxdp-plugins-for-kubernetes
+-
+-
+-Background
+-----------
+-
+-The standard :doc:`../nics/af_xdp` initialization process involves loading an eBPF program
+-onto the kernel netdev to be used by the PMD.
+-This operation requires root or escalated Linux privileges
+-and thus prevents the PMD from working in an unprivileged container.
+-The AF_XDP CNI plugin handles this situation
+-by providing a device plugin that performs the program loading.
+-
+-At a technical level the CNI opens a Unix Domain Socket and listens for a client
+-to make requests over that socket.
+-A DPDK application acting as a client connects and initiates a configuration "handshake".
+-The client then receives a file descriptor which points to the XSKMAP
+-associated with the loaded eBPF program.
+-The XSKMAP is a BPF map of AF_XDP sockets (XSK).
+-The client can then proceed with creating an AF_XDP socket
+-and inserting that socket into the XSKMAP pointed to by the descriptor.
+-
+-The EAL vdev argument ``use_cni`` is used to indicate that the user wishes
+-to run the PMD in unprivileged mode and to receive the XSKMAP file descriptor
+-from the CNI.
+-When this flag is set,
+-the ``XSK_LIBBPF_FLAGS__INHIBIT_PROG_LOAD`` libbpf flag
+-should be used when creating the socket
+-to instruct libbpf not to load the default libbpf program on the netdev.
+-Instead the loading is handled by the CNI.
+-
+-.. note::
+-
+-   The Unix Domain Socket file path appear in the end user is "/tmp/afxdp.sock".
+-
+-
+-Prerequisites
+--------------
+-
+-Docker and container prerequisites:
+-
+-* Set up the device plugin
+-  as described in the instructions for `AF_XDP Plugin for Kubernetes`_.
+-
+-* The Docker image should contain the libbpf and libxdp libraries,
+-  which are dependencies for AF_XDP,
+-  and should include support for the ``ethtool`` command.
+-
+-* The Pod should have enabled the capabilities ``CAP_NET_RAW`` and ``CAP_BPF``
+-  for AF_XDP along with support for hugepages.
+-
+-* Increase locked memory limit so containers have enough memory for packet buffers.
+-  For example:
+-
+-  .. code-block:: console
+-
+-     cat << EOF | sudo tee /etc/systemd/system/containerd.service.d/limits.conf
+-     [Service]
+-     LimitMEMLOCK=infinity
+-     EOF
+-
+-* dpdk-testpmd application should have AF_XDP feature enabled.
+-
+-  For further information see the docs for the: :doc:`../../nics/af_xdp`.
+-
+-
+-Example
+--------
+-
+-Howto run dpdk-testpmd with CNI plugin:
+-
+-* Clone the CNI plugin
+-
+-  .. code-block:: console
+-
+-     # git clone https://github.com/intel/afxdp-plugins-for-kubernetes.git
+-
+-* Build the CNI plugin
+-
+-  .. code-block:: console
+-
+-     # cd afxdp-plugins-for-kubernetes/
+-     # make build
+-
+-  .. note::
+-
+-     CNI plugin has a dependence on the config.json.
+-
+-  Sample Config.json
+-
+-  .. code-block:: json
+-
+-     {
+-        "logLevel":"debug",
+-        "logFile":"afxdp-dp-e2e.log",
+-        "pools":[
+-           {
+-              "name":"e2e",
+-              "mode":"primary",
+-              "timeout":30,
+-              "ethtoolCmds" : ["-L -device- combined 1"],
+-              "devices":[
+-                 {
+-                    "name":"ens785f0"
+-                 }
+-              ]
+-           }
+-        ]
+-     }
+-
+-  For further reference please use the `config.json`_
+-
+-  .. _config.json: https://github.com/intel/afxdp-plugins-for-kubernetes/blob/v0.0.2/test/e2e/config.json
+-
+-* Create the Network Attachment definition
+-
+-  .. code-block:: console
+-
+-     # kubectl create -f nad.yaml
+-
+-  Sample nad.yml
+-
+-  .. code-block:: yaml
+-
+-      apiVersion: "k8s.cni.cncf.io/v1"
+-      kind: NetworkAttachmentDefinition
+-      metadata:
+-        name: afxdp-e2e-test
+-        annotations:
+-          k8s.v1.cni.cncf.io/resourceName: afxdp/e2e
+-      spec:
+-        config: '{
+-            "cniVersion": "0.3.0",
+-            "type": "afxdp",
+-            "mode": "cdq",
+-            "logFile": "afxdp-cni-e2e.log",
+-            "logLevel": "debug",
+-            "ipam": {
+-              "type": "host-local",
+-              "subnet": "192.168.1.0/24",
+-              "rangeStart": "192.168.1.200",
+-              "rangeEnd": "192.168.1.216",
+-              "routes": [
+-                { "dst": "0.0.0.0/0" }
+-              ],
+-              "gateway": "192.168.1.1"
+-            }
+-          }'
+-
+-  For further reference please use the `nad.yaml`_
+-
+-  .. _nad.yaml: https://github.com/intel/afxdp-plugins-for-kubernetes/blob/v0.0.2/test/e2e/nad.yaml
+-
+-* Build the Docker image
+-
+-  .. code-block:: console
+-
+-     # docker build -t afxdp-e2e-test -f Dockerfile .
+-
+-  Sample Dockerfile:
+-
+-  .. code-block:: console
+-
+-     FROM ubuntu:20.04
+-     RUN apt-get update -y
+-     RUN apt install build-essential libelf-dev -y
+-     RUN apt-get install iproute2  acl -y
+-     RUN apt install python3-pyelftools ethtool -y
+-     RUN apt install libnuma-dev libjansson-dev libpcap-dev net-tools -y
+-     RUN apt-get install clang llvm -y
+-     COPY ./libbpf<version>.tar.gz /tmp
+-     RUN cd /tmp && tar -xvmf libbpf<version>.tar.gz && cd libbpf/src && make install
+-     COPY ./libxdp<version>.tar.gz /tmp
+-     RUN cd /tmp && tar -xvmf libxdp<version>.tar.gz && cd libxdp && make install
+-
+-  .. note::
+-
+-     All the files that need to COPY-ed should be in the same directory as the Dockerfile
+-
+-* Run the Pod
+-
+-  .. code-block:: console
+-
+-     # kubectl create -f pod.yaml
+-
+-  Sample pod.yaml:
+-
+-  .. code-block:: yaml
+-
+-     apiVersion: v1
+-     kind: Pod
+-     metadata:
+-       name: afxdp-e2e-test
+-       annotations:
+-         k8s.v1.cni.cncf.io/networks: afxdp-e2e-test
+-     spec:
+-       containers:
+-       - name: afxdp
+-         image: afxdp-e2e-test:latest
+-         imagePullPolicy: Never
+-         env:
+-         - name: LD_LIBRARY_PATH
+-           value: /usr/lib64/:/usr/local/lib/
+-         command: ["tail", "-f", "/dev/null"]
+-         securityContext:
+-          capabilities:
+-             add:
+-               - CAP_NET_RAW
+-               - CAP_BPF
+-         resources:
+-           requests:
+-             hugepages-2Mi: 2Gi
+-             memory: 2Gi
+-             afxdp/e2e: '1'
+-           limits:
+-             hugepages-2Mi: 2Gi
+-             memory: 2Gi
+-             afxdp/e2e: '1'
+-
+-  For further reference please use the `pod.yaml`_
+-
+-  .. _pod.yaml: https://github.com/intel/afxdp-plugins-for-kubernetes/blob/v0.0.2/test/e2e/pod-1c1d.yaml
+-
+-* Run DPDK with a command like the following:
+-
+-  .. code-block:: console
+-
+-     kubectl exec -i <Pod name> --container <containers name> -- \
+-           /<Path>/dpdk-testpmd -l 0,1 --no-pci \
+-           --vdev=net_af_xdp0,use_cni=1,iface=<interface name> \
+-           -- --no-mlockall --in-memory
+-
+-For further reference please use the `e2e`_ test case in `AF_XDP Plugin for Kubernetes`_
+-
+-  .. _e2e: https://github.com/intel/afxdp-plugins-for-kubernetes/tree/v0.0.2/test/e2e
+diff --git a/dpdk/doc/guides/howto/af_xdp_dp.rst b/dpdk/doc/guides/howto/af_xdp_dp.rst
+new file mode 100644
+index 0000000000..4ca5462c69
+--- /dev/null
++++ b/dpdk/doc/guides/howto/af_xdp_dp.rst
+@@ -0,0 +1,323 @@
++.. SPDX-License-Identifier: BSD-3-Clause
++   Copyright(c) 2023 Intel Corporation.
++
++Using the AF_XDP driver in Kubernetes
++=====================================
++
++Introduction
++------------
++
++Two infrastructure components are needed in order to provision a pod
++that is using the AF_XDP PMD in Kubernetes:
++
++1. AF_XDP Device Plugin (DP).
++2. AF_XDP Container Network Interface (CNI) binary.
++
++Both of these components are available through
++the `AF_XDP Device Plugin for Kubernetes`_ repository.
++
++The AF_XDP DP provisions and advertises networking interfaces to Kubernetes,
++while the CNI configures and plumbs network interfaces for the Pod.
++
++This document explains how to use the `AF_XDP Device Plugin for Kubernetes`_
++with a DPDK application using the :doc:`../nics/af_xdp`.
++
++.. _AF_XDP Device Plugin for Kubernetes: https://github.com/redhat-et/afxdp-plugins-for-kubernetes
++
++
++Background
++----------
++
++The standard :doc:`../nics/af_xdp` initialization process involves loading an eBPF program
++onto the kernel netdev to be used by the PMD.
++This operation requires root or escalated Linux privileges
++and thus prevents the PMD from working in an unprivileged container.
++The AF_XDP Device Plugin handles this situation
++by managing the eBPF program(s) on behalf of the Pod, outside of the pod context.
++
++At a technical level the AF_XDP Device Plugin opens a Unix Domain Socket (UDS)
++and listens for a client to make requests over that socket.
++A DPDK application acting as a client connects and initiates a configuration "handshake".
++After some validation on the Device Plugin side,
++the client receives a file descriptor which points to the XSKMAP
++associated with the loaded eBPF program.
++The XSKMAP is an eBPF map of AF_XDP sockets (XSK).
++The client can then proceed with creating an AF_XDP socket
++and inserting that socket into the XSKMAP pointed to by the descriptor.
++
++The EAL vdev argument ``use_cni`` is used to indicate that the user wishes
++to run the PMD in unprivileged mode and to receive the XSKMAP file descriptor
++from the CNI.
++When this flag is set,
++the ``XSK_LIBBPF_FLAGS__INHIBIT_PROG_LOAD`` libbpf flag
++should be used when creating the socket
++to instruct libbpf not to load the default libbpf program on the netdev.
++Instead the loading is handled by the AF_XDP Device Plugin.
++
++The EAL vdev argument ``dp_path`` is used alongside the ``use_cni`` argument
++to explicitly tell the AF_XDP PMD where to find the UDS
++to interact with the AF_XDP Device Plugin.
++If this argument is not passed alongside the ``use_cni`` argument
++then the AF_XDP PMD configures it internally.
++
++.. note::
++
++   DPDK AF_XDP PMD <= v23.11 will only work with
++   the AF_XDP Device Plugin <= commit id `38317c2`_.
++
++.. note::
++
++   DPDK AF_XDP PMD > v23.11 will work with latest version of the AF_XDP Device Plugin
++   through a combination of the ``dp_path`` and/or the ``use_cni`` parameter.
++   In these versions of the PMD if a user doesn't explicitly set the ``dp_path`` parameter
++   when using ``use_cni`` then that path is transparently configured in the AF_XDP PMD
++   to the default `AF_XDP Device Plugin for Kubernetes`_ mount point path.
++   The path can be overridden by explicitly setting the ``dp_path`` param.
++
++.. note::
++
++   DPDK AF_XDP PMD > v23.11 is backwards compatible
++   with (older) versions of the AF_XDP DP <= commit id `38317c2`_
++   by explicitly setting ``dp_path`` to ``/tmp/afxdp.sock``.
++
++.. _38317c2: https://github.com/redhat-et/afxdp-plugins-for-kubernetes/commit/38317c256b5c7dfb39e013a0f76010c2ded03669
++
++Prerequisites
++-------------
++
++Device Plugin and DPDK container prerequisites:
++
++* Create a DPDK container image.
++
++* Set up the device plugin and prepare the Pod Spec as described in
++  the instructions for `AF_XDP Device Plugin for Kubernetes`_.
++
++* The Docker image should contain the libbpf and libxdp libraries,
++  which are dependencies for AF_XDP,
++  and should include support for the ``ethtool`` command.
++
++* The Pod should have enabled the capabilities
++  ``CAP_NET_RAW`` for AF_XDP socket creation,
++  ``IPC_LOCK`` for umem creation and
++  ``CAP_BPF`` (for Kernel < 5.19) along with support for hugepages.
++
++  .. note::
++
++     For Kernel versions < 5.19, all BPF sys calls required CAP_BPF,
++     to access maps shared between the eBFP program and the userspace program.
++     Kernels >= 5.19, only requires CAP_BPF for map creation (BPF_MAP_CREATE)
++     and loading programs (BPF_PROG_LOAD).
++
++* Increase locked memory limit so containers have enough memory for packet buffers.
++  For example:
++
++  .. code-block:: console
++
++     cat << EOF | sudo tee /etc/systemd/system/containerd.service.d/limits.conf
++     [Service]
++     LimitMEMLOCK=infinity
++     EOF
++
++* dpdk-testpmd application should have AF_XDP feature enabled.
++
++  For further information see the docs for the: :doc:`../../nics/af_xdp`.
++
++
++Example
++-------
++
++Build a DPDK container image (using Docker)
++~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++
++1. Create a Dockerfile (should be placed in top level DPDK directory):
++
++   .. code-block:: console
++
++      FROM fedora:38
++
++      # Setup container to build DPDK applications
++      RUN dnf -y upgrade && dnf -y install \
++          libbsd-devel \
++          numactl-libs \
++          libbpf-devel \
++          libbpf \
++          meson \
++          ninja-build \
++          libxdp-devel \
++          libxdp \
++          numactl-devel \
++          python3-pyelftools \
++          python38 \
++          iproute
++      RUN dnf groupinstall -y 'Development Tools'
++
++      # Create DPDK dir and copy over sources
++      # Create DPDK dir and copy over sources
++      COPY ./ /dpdk
++      WORKDIR /dpdk
++
++      # Build DPDK
++      RUN meson setup build
++      RUN ninja -C build
++
++2. Build a DPDK container image (using Docker)
++
++   .. code-block:: console
++
++      # docker build -t dpdk -f Dockerfile
++
++Run dpdk-testpmd with the AF_XDP Device Plugin + CNI
++~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++
++* Clone the AF_XDP Device plugin and CNI
++
++  .. code-block:: console
++
++     # git clone https://github.com/redhat-et/afxdp-plugins-for-kubernetes.git
++
++  .. note::
++
++     Ensure you have the AF_XDP Device Plugin + CNI prerequisites installed.
++
++* Build the AF_XDP Device plugin and CNI
++
++  .. code-block:: console
++
++     # cd afxdp-plugins-for-kubernetes/
++     # make image
++
++* Make sure to modify the image used by the `daemonset.yml`_ file
++  in the deployments directory with the following configuration:
++
++  .. _daemonset.yml : https://github.com/redhat-et/afxdp-plugins-for-kubernetes/blob/main/deployments/daemonset.yml
++
++  .. code-block:: yaml
++
++     image: afxdp-device-plugin:latest
++
++  .. note::
++
++     This will select the AF_XDP DP image that was built locally.
++     Detailed configuration options can be found in the AF_XDP Device Plugin `readme`_ .
++
++  .. _readme: https://github.com/redhat-et/afxdp-plugins-for-kubernetes#readme
++
++* Deploy the AF_XDP Device Plugin and CNI
++
++  .. code-block:: console
++
++     # kubectl create -f deployments/daemonset.yml
++
++* Create the Network Attachment definition
++
++  .. code-block:: console
++
++     # kubectl create -f nad.yaml
++
++  Sample nad.yml
++
++  .. code-block:: yaml
++
++     apiVersion: "k8s.cni.cncf.io/v1"
++     kind: NetworkAttachmentDefinition
++     metadata:
++       name: afxdp-network
++       annotations:
++         k8s.v1.cni.cncf.io/resourceName: afxdp/myPool
++     spec:
++       config: '{
++           "cniVersion": "0.3.0",
++           "type": "afxdp",
++           "mode": "primary",
++           "logFile": "afxdp-cni.log",
++           "logLevel": "debug",
++           "ethtoolCmds" : ["-N -device- rx-flow-hash udp4 fn",
++                            "-N -device- flow-type udp4 dst-port 2152 action 22"
++                         ],
++           "ipam": {
++             "type": "host-local",
++             "subnet": "192.168.1.0/24",
++             "rangeStart": "192.168.1.200",
++             "rangeEnd": "192.168.1.220",
++             "routes": [
++               { "dst": "0.0.0.0/0" }
++             ],
++             "gateway": "192.168.1.1"
++           }
++         }'
++
++  For further reference please use the example provided by the AF_XDP DP `nad.yaml`_
++
++  .. _nad.yaml: https://github.com/redhat-et/afxdp-plugins-for-kubernetes/blob/main/examples/network-attachment-definition.yaml
++
++* Run the Pod
++
++  .. code-block:: console
++
++     # kubectl create -f pod.yaml
++
++  Sample pod.yaml:
++
++  .. code-block:: yaml
++
++     apiVersion: v1
++     kind: Pod
++     metadata:
++      name: dpdk
++      annotations:
++        k8s.v1.cni.cncf.io/networks: afxdp-network
++     spec:
++       containers:
++       - name: testpmd
++         image: dpdk:latest
++         command: ["tail", "-f", "/dev/null"]
++         securityContext:
++           capabilities:
++             add:
++               - NET_RAW
++               - IPC_LOCK
++         resources:
++           requests:
++             afxdp/myPool: '1'
++           limits:
++             hugepages-1Gi: 2Gi
++             cpu: 2
++             memory: 256Mi
++             afxdp/myPool: '1'
++         volumeMounts:
++         - name: hugepages
++           mountPath: /dev/hugepages
++       volumes:
++       - name: hugepages
++         emptyDir:
++           medium: HugePages
++
++  For further reference please see the `pod.yaml`_
++
++  .. _pod.yaml: https://github.com/redhat-et/afxdp-plugins-for-kubernetes/blob/main/examples/pod-spec.yaml
++
++* Run DPDK with a command like the following:
++
++  .. code-block:: console
++
++     kubectl exec -i <Pod name> --container <containers name> -- \
++           /<Path>/dpdk-testpmd -l 0,1 --no-pci \
++           --vdev=net_af_xdp0,use_cni=1,iface=<interface name> \
++           --no-mlockall --in-memory \
++           -- -i --a --nb-cores=2 --rxq=1 --txq=1 --forward-mode=macswap;
++
++  Or
++
++  .. code-block:: console
++
++     kubectl exec -i <Pod name> --container <containers name> -- \
++           /<Path>/dpdk-testpmd -l 0,1 --no-pci \
++           --vdev=net_af_xdp0,use_cni=1,iface=<interface name>,dp_path="/tmp/afxdp_dp/<interface name>/afxdp.sock" \
++           --no-mlockall --in-memory \
++           -- -i --a --nb-cores=2 --rxq=1 --txq=1 --forward-mode=macswap;
++
++.. note::
++
++   If the ``dp_path`` parameter isn't explicitly set (like the example above),
++   the AF_XDP PMD will set the parameter value to
++   ``/tmp/afxdp_dp/<<interface name>>/afxdp.sock``.
+diff --git a/dpdk/doc/guides/howto/index.rst b/dpdk/doc/guides/howto/index.rst
+index 71a3381c36..a7692e8a97 100644
+--- a/dpdk/doc/guides/howto/index.rst
++++ b/dpdk/doc/guides/howto/index.rst
+@@ -8,7 +8,7 @@ HowTo Guides
+     :maxdepth: 2
+     :numbered:
+ 
+-    af_xdp_cni
++    af_xdp_dp
+     lm_bond_virtio_sriov
+     lm_virtio_vhost_user
+     flow_bifurcation
+diff --git a/dpdk/doc/guides/linux_gsg/enable_func.rst b/dpdk/doc/guides/linux_gsg/enable_func.rst
+index 2344d97403..5511640cb8 100644
+--- a/dpdk/doc/guides/linux_gsg/enable_func.rst
++++ b/dpdk/doc/guides/linux_gsg/enable_func.rst
+@@ -85,8 +85,7 @@ need to be adjusted in order to ensure normal DPDK operation:
+ The above limits can usually be adjusted by editing
+ ``/etc/security/limits.conf`` file, and rebooting.
+ 
+-See `Hugepage Mapping <hugepage_mapping>`_
+-section to learn how these limits affect EAL.
++See :ref:`Hugepage Mapping <hugepage_mapping>` section to learn how these limits affect EAL.
+ 
+ Device Control
+ ~~~~~~~~~~~~~~
+diff --git a/dpdk/doc/guides/mempool/cnxk.rst b/dpdk/doc/guides/mempool/cnxk.rst
+index d883b83f7b..ffd9a6de86 100644
+--- a/dpdk/doc/guides/mempool/cnxk.rst
++++ b/dpdk/doc/guides/mempool/cnxk.rst
+@@ -72,7 +72,7 @@ Debugging Options
+    +---+------------+-------------------------------------------------------+
+    | # | Component  | EAL log command                                       |
+    +===+============+=======================================================+
+-   | 1 | NPA        | --log-level='pmd\.mempool.cnxk,8'                     |
++   | 1 | NPA        | --log-level='pmd\.common\.cnxk\.mempool,8'            |
+    +---+------------+-------------------------------------------------------+
+ 
+ Standalone mempool device
+diff --git a/dpdk/doc/guides/mldevs/cnxk.rst b/dpdk/doc/guides/mldevs/cnxk.rst
+index ae9e1fae5f..fc1bcd9cdb 100644
+--- a/dpdk/doc/guides/mldevs/cnxk.rst
++++ b/dpdk/doc/guides/mldevs/cnxk.rst
+@@ -383,7 +383,7 @@ Debugging Options
+    +---+------------+-------------------------------------------------------+
+    | # | Component  | EAL log command                                       |
+    +===+============+=======================================================+
+-   | 1 | ML         | --log-level='pmd\.ml\.cnxk,8'                         |
++   | 1 | ML         | --log-level='pmd\.common\.cnxk\.ml,8'                 |
+    +---+------------+-------------------------------------------------------+
+ 
+ 
+diff --git a/dpdk/doc/guides/nics/af_xdp.rst b/dpdk/doc/guides/nics/af_xdp.rst
+index 1932525d4d..60a88c467e 100644
+--- a/dpdk/doc/guides/nics/af_xdp.rst
++++ b/dpdk/doc/guides/nics/af_xdp.rst
+@@ -155,9 +155,9 @@ use_cni
+ ~~~~~~~
+ 
+ The EAL vdev argument ``use_cni`` is used to indicate that the user wishes to
+-enable the `AF_XDP Plugin for Kubernetes`_ within a DPDK application.
++enable the `AF_XDP Device Plugin for Kubernetes`_ with a DPDK application/pod.
+ 
+-.. _AF_XDP Plugin for Kubernetes: https://github.com/intel/afxdp-plugins-for-kubernetes
++.. _AF_XDP Device Plugin for Kubernetes: https://github.com/redhat-et/afxdp-plugins-for-kubernetes
+ 
+ .. code-block:: console
+ 
+@@ -171,6 +171,21 @@ enable the `AF_XDP Plugin for Kubernetes`_ within a DPDK application.
+    so enabling and disabling of the promiscuous mode through the DPDK application
+    is also not supported.
+ 
++dp_path
++~~~~~~~
++
++The EAL vdev argument ``dp_path`` is used alongside the ``use_cni`` argument
++to explicitly tell the AF_XDP PMD where to find the UDS
++to interact with the `AF_XDP Device Plugin for Kubernetes`_.
++If this argument is not passed alongside the ``use_cni`` argument
++then the AF_XDP PMD configures it internally.
++
++.. _AF_XDP Device Plugin for Kubernetes: https://github.com/redhat-et/afxdp-plugins-for-kubernetes
++
++.. code-block:: console
++
++   --vdev=net_af_xdp0,use_cni=1,dp_path="/tmp/afxdp_dp/<<interface name>>/afxdp.sock"
++
+ Limitations
+ -----------
+ 
+diff --git a/dpdk/doc/guides/nics/cnxk.rst b/dpdk/doc/guides/nics/cnxk.rst
+index 9ec52e380f..501ef1f826 100644
+--- a/dpdk/doc/guides/nics/cnxk.rst
++++ b/dpdk/doc/guides/nics/cnxk.rst
+@@ -416,6 +416,18 @@ Runtime Config Options
+    With the above configuration, PMD would allocate meta buffers of size 512 for
+    inline inbound IPsec processing second pass.
+ 
++- ``NPC MCAM Aging poll frequency in seconds`` (default ``10``)
++
++   Poll frequency for aging control thread can be specified by
++   ``aging_poll_freq`` devargs parameter.
++
++   For example::
++
++      -a 0002:01:00.2,aging_poll_freq=50
++
++   With the above configuration, driver would poll for aging flows
++   every 50 seconds.
++
+ .. note::
+ 
+    Above devarg parameters are configurable per device, user needs to pass the
+@@ -601,18 +613,6 @@ Runtime Config Options for inline device
+    With the above configuration, driver would poll for soft expiry events every
+    1000 usec.
+ 
+-- ``NPC MCAM Aging poll frequency in seconds`` (default ``10``)
+-
+-   Poll frequency for aging control thread can be specified by
+-   ``aging_poll_freq`` ``devargs`` parameter.
+-
+-   For example::
+-
+-      -a 0002:01:00.2,aging_poll_freq=50
+-
+-   With the above configuration, driver would poll for aging flows every 50
+-   seconds.
+-
+ Debugging Options
+ -----------------
+ 
+@@ -623,7 +623,7 @@ Debugging Options
+    +---+------------+-------------------------------------------------------+
+    | # | Component  | EAL log command                                       |
+    +===+============+=======================================================+
+-   | 1 | NIX        | --log-level='pmd\.net.cnxk,8'                         |
++   | 1 | NIX        | --log-level='pmd\.common.cnxk\.nix,8'                 |
+    +---+------------+-------------------------------------------------------+
+-   | 2 | NPC        | --log-level='pmd\.net.cnxk\.flow,8'                   |
++   | 2 | NPC        | --log-level='pmd\.common.cnxk\.flow,8'                |
+    +---+------------+-------------------------------------------------------+
+diff --git a/dpdk/doc/guides/nics/features.rst b/dpdk/doc/guides/nics/features.rst
+index f7d9980849..cf9fabb8b8 100644
+--- a/dpdk/doc/guides/nics/features.rst
++++ b/dpdk/doc/guides/nics/features.rst
+@@ -34,6 +34,17 @@ Supports getting the speed capabilities that the current device is capable of.
+ * **[related]  API**: ``rte_eth_dev_info_get()``.
+ 
+ 
++.. _nic_features_link_speeds_config:
++
++Link speed configuration
++------------------------
++
++Supports configurating fixed speed and link autonegotiation.
++
++* **[uses]     user config**: ``dev_conf.link_speeds:RTE_ETH_LINK_SPEED_*``.
++* **[related]  API**: ``rte_eth_dev_configure()``.
++
++
+ .. _nic_features_link_status:
+ 
+ Link status
+@@ -751,6 +762,19 @@ Supports congestion management.
+   ``rte_eth_cman_config_set()``, ``rte_eth_cman_config_get()``.
+ 
+ 
++.. _nic_features_traffic_manager:
++
++Traffic manager
++---------------
++
++Supports Traffic manager.
++
++* **[implements] rte_tm_ops**: ``capabilities_get``, ``shaper_profile_add``,
++  ``hierarchy_commit`` and so on.
++* **[related]    API**: ``rte_tm_capabilities_get()``, ``rte_tm_shaper_profile_add()``,
++  ``rte_tm_hierarchy_commit()`` and so on.
++
++
+ .. _nic_features_fw_version:
+ 
+ FW version
+diff --git a/dpdk/doc/guides/nics/features/atlantic.ini b/dpdk/doc/guides/nics/features/atlantic.ini
+index ef4155027c..29969c1493 100644
+--- a/dpdk/doc/guides/nics/features/atlantic.ini
++++ b/dpdk/doc/guides/nics/features/atlantic.ini
+@@ -5,6 +5,7 @@
+ ;
+ [Features]
+ Speed capabilities   = Y
++Link speed configuration = Y
+ Link status          = Y
+ Link status event    = Y
+ Queue start/stop     = Y
+diff --git a/dpdk/doc/guides/nics/features/bnxt.ini b/dpdk/doc/guides/nics/features/bnxt.ini
+index bd4e2295dc..c33889663d 100644
+--- a/dpdk/doc/guides/nics/features/bnxt.ini
++++ b/dpdk/doc/guides/nics/features/bnxt.ini
+@@ -5,6 +5,7 @@
+ ;
+ [Features]
+ Speed capabilities   = Y
++Link speed configuration = Y
+ Link status          = Y
+ Link status event    = Y
+ Rx interrupt         = Y
+diff --git a/dpdk/doc/guides/nics/features/cnxk.ini b/dpdk/doc/guides/nics/features/cnxk.ini
+index ac7de9a0f0..f85813ab52 100644
+--- a/dpdk/doc/guides/nics/features/cnxk.ini
++++ b/dpdk/doc/guides/nics/features/cnxk.ini
+@@ -28,6 +28,7 @@ RSS key update       = Y
+ RSS reta update      = Y
+ Inner RSS            = Y
+ Congestion management = Y
++Traffic manager      = Y
+ Inline protocol      = Y
+ Flow control         = Y
+ Scattered Rx         = Y
+diff --git a/dpdk/doc/guides/nics/features/default.ini b/dpdk/doc/guides/nics/features/default.ini
+index 806cb033ff..c30702c72e 100644
+--- a/dpdk/doc/guides/nics/features/default.ini
++++ b/dpdk/doc/guides/nics/features/default.ini
+@@ -8,6 +8,7 @@
+ ;
+ [Features]
+ Speed capabilities   =
++Link speed configuration =
+ Link status          =
+ Link status event    =
+ Removal event        =
+@@ -42,6 +43,7 @@ VLAN filter          =
+ Flow control         =
+ Rate limitation      =
+ Congestion management =
++Traffic manager      =
+ Inline crypto        =
+ Inline protocol      =
+ CRC offload          =
+diff --git a/dpdk/doc/guides/nics/features/dpaa.ini b/dpdk/doc/guides/nics/features/dpaa.ini
+index a382c7160c..b136ed191a 100644
+--- a/dpdk/doc/guides/nics/features/dpaa.ini
++++ b/dpdk/doc/guides/nics/features/dpaa.ini
+@@ -5,6 +5,7 @@
+ ;
+ [Features]
+ Speed capabilities   = Y
++Link speed configuration = Y
+ Link status          = Y
+ Link status event    = Y
+ Burst mode info      = Y
+diff --git a/dpdk/doc/guides/nics/features/dpaa2.ini b/dpdk/doc/guides/nics/features/dpaa2.ini
+index 26dc8c2178..f02da463d9 100644
+--- a/dpdk/doc/guides/nics/features/dpaa2.ini
++++ b/dpdk/doc/guides/nics/features/dpaa2.ini
+@@ -17,6 +17,7 @@ Unicast MAC filter   = Y
+ RSS hash             = Y
+ VLAN filter          = Y
+ Flow control         = Y
++Traffic manager      = Y
+ VLAN offload         = Y
+ L3 checksum offload  = Y
+ L4 checksum offload  = Y
+diff --git a/dpdk/doc/guides/nics/features/hns3.ini b/dpdk/doc/guides/nics/features/hns3.ini
+index 338b4e6864..8b623d3077 100644
+--- a/dpdk/doc/guides/nics/features/hns3.ini
++++ b/dpdk/doc/guides/nics/features/hns3.ini
+@@ -5,6 +5,7 @@
+ ;
+ [Features]
+ Speed capabilities   = Y
++Link speed configuration = Y
+ Link status          = Y
+ Link status event    = Y
+ Rx interrupt         = Y
+@@ -28,6 +29,7 @@ RSS reta update      = Y
+ DCB                  = Y
+ VLAN filter          = Y
+ Flow control         = Y
++Traffic manager      = Y
+ CRC offload          = Y
+ VLAN offload         = Y
+ FEC                  = Y
+diff --git a/dpdk/doc/guides/nics/features/i40e.ini b/dpdk/doc/guides/nics/features/i40e.ini
+index e241dad047..ef7514c44b 100644
+--- a/dpdk/doc/guides/nics/features/i40e.ini
++++ b/dpdk/doc/guides/nics/features/i40e.ini
+@@ -5,6 +5,7 @@
+ ;
+ [Features]
+ Speed capabilities   = Y
++Link speed configuration = Y
+ Link status          = Y
+ Link status event    = Y
+ Rx interrupt         = Y
+@@ -27,6 +28,7 @@ SR-IOV               = Y
+ DCB                  = Y
+ VLAN filter          = Y
+ Flow control         = Y
++Traffic manager      = Y
+ CRC offload          = Y
+ VLAN offload         = Y
+ QinQ offload         = P
+diff --git a/dpdk/doc/guides/nics/features/iavf.ini b/dpdk/doc/guides/nics/features/iavf.ini
+index db4f92ce71..ce9860e963 100644
+--- a/dpdk/doc/guides/nics/features/iavf.ini
++++ b/dpdk/doc/guides/nics/features/iavf.ini
+@@ -25,17 +25,18 @@ RSS hash             = Y
+ RSS key update       = Y
+ RSS reta update      = Y
+ VLAN filter          = Y
++Traffic manager      = Y
++Inline crypto        = Y
+ CRC offload          = Y
+ VLAN offload         = P
+ L3 checksum offload  = Y
+ L4 checksum offload  = Y
+ Timestamp offload    = Y
+ Inner L3 checksum    = Y
+-Inner L4 checksum    = Y
++Inner L4 checksum    = P
+ Packet type parsing  = Y
+ Rx descriptor status = Y
+ Tx descriptor status = Y
+-Inline crypto        = Y
+ Basic stats          = Y
+ Multiprocess aware   = Y
+ FreeBSD              = Y
+diff --git a/dpdk/doc/guides/nics/features/ice.ini b/dpdk/doc/guides/nics/features/ice.ini
+index 13f8871dcc..62869ef0a0 100644
+--- a/dpdk/doc/guides/nics/features/ice.ini
++++ b/dpdk/doc/guides/nics/features/ice.ini
+@@ -8,6 +8,7 @@
+ ;
+ [Features]
+ Speed capabilities   = Y
++Link speed configuration = Y
+ Link status          = Y
+ Link status event    = Y
+ Rx interrupt         = Y
+@@ -26,6 +27,7 @@ RSS hash             = Y
+ RSS key update       = Y
+ RSS reta update      = Y
+ VLAN filter          = Y
++Traffic manager      = Y
+ CRC offload          = Y
+ VLAN offload         = Y
+ QinQ offload         = P
+diff --git a/dpdk/doc/guides/nics/features/ice_dcf.ini b/dpdk/doc/guides/nics/features/ice_dcf.ini
+index 3b11622d4c..0e86338990 100644
+--- a/dpdk/doc/guides/nics/features/ice_dcf.ini
++++ b/dpdk/doc/guides/nics/features/ice_dcf.ini
+@@ -22,6 +22,7 @@ Promiscuous mode     = Y
+ Allmulticast mode    = Y
+ Unicast MAC filter   = Y
+ VLAN filter          = Y
++Traffic manager      = Y
+ VLAN offload         = Y
+ Extended stats       = Y
+ Basic stats          = Y
+diff --git a/dpdk/doc/guides/nics/features/igb.ini b/dpdk/doc/guides/nics/features/igb.ini
+index 7b4af6f86c..ee2408f3ee 100644
+--- a/dpdk/doc/guides/nics/features/igb.ini
++++ b/dpdk/doc/guides/nics/features/igb.ini
+@@ -5,6 +5,7 @@
+ ;
+ [Features]
+ Speed capabilities   = P
++Link speed configuration = Y
+ Link status          = Y
+ Link status event    = Y
+ Rx interrupt         = Y
+diff --git a/dpdk/doc/guides/nics/features/igc.ini b/dpdk/doc/guides/nics/features/igc.ini
+index 47d9344435..d6db18c1e8 100644
+--- a/dpdk/doc/guides/nics/features/igc.ini
++++ b/dpdk/doc/guides/nics/features/igc.ini
+@@ -4,6 +4,7 @@
+ ;
+ [Features]
+ Speed capabilities   = Y
++Link speed configuration = Y
+ Link status          = Y
+ Link status event    = Y
+ FW version           = Y
+diff --git a/dpdk/doc/guides/nics/features/ionic.ini b/dpdk/doc/guides/nics/features/ionic.ini
+index af0fc5462a..64b2316288 100644
+--- a/dpdk/doc/guides/nics/features/ionic.ini
++++ b/dpdk/doc/guides/nics/features/ionic.ini
+@@ -5,6 +5,7 @@
+ ;
+ [Features]
+ Speed capabilities   = Y
++Link speed configuration = Y
+ Link status          = Y
+ Link status event    = Y
+ Fast mbuf free       = Y
+diff --git a/dpdk/doc/guides/nics/features/ipn3ke.ini b/dpdk/doc/guides/nics/features/ipn3ke.ini
+index 1f6b780273..e412978820 100644
+--- a/dpdk/doc/guides/nics/features/ipn3ke.ini
++++ b/dpdk/doc/guides/nics/features/ipn3ke.ini
+@@ -25,6 +25,7 @@ SR-IOV               = Y
+ DCB                  = Y
+ VLAN filter          = Y
+ Flow control         = Y
++Traffic manager      = Y
+ CRC offload          = Y
+ VLAN offload         = Y
+ QinQ offload         = Y
+diff --git a/dpdk/doc/guides/nics/features/ixgbe.ini b/dpdk/doc/guides/nics/features/ixgbe.ini
+index 8590ac857f..cb9331dbcd 100644
+--- a/dpdk/doc/guides/nics/features/ixgbe.ini
++++ b/dpdk/doc/guides/nics/features/ixgbe.ini
+@@ -5,6 +5,7 @@
+ ;
+ [Features]
+ Speed capabilities   = Y
++Link speed configuration = Y
+ Link status          = Y
+ Link status event    = Y
+ Rx interrupt         = Y
+@@ -27,6 +28,7 @@ DCB                  = Y
+ VLAN filter          = Y
+ Flow control         = Y
+ Rate limitation      = Y
++Traffic manager      = Y
+ Inline crypto        = Y
+ CRC offload          = P
+ VLAN offload         = P
+diff --git a/dpdk/doc/guides/nics/features/mvpp2.ini b/dpdk/doc/guides/nics/features/mvpp2.ini
+index 653c9d08cb..ccc2c2d4f8 100644
+--- a/dpdk/doc/guides/nics/features/mvpp2.ini
++++ b/dpdk/doc/guides/nics/features/mvpp2.ini
+@@ -12,8 +12,9 @@ Allmulticast mode    = Y
+ Unicast MAC filter   = Y
+ Multicast MAC filter = Y
+ RSS hash             = Y
+-Flow control         = Y
+ VLAN filter          = Y
++Flow control         = Y
++Traffic manager      = Y
+ CRC offload          = Y
+ L3 checksum offload  = Y
+ L4 checksum offload  = Y
+diff --git a/dpdk/doc/guides/nics/features/ngbe.ini b/dpdk/doc/guides/nics/features/ngbe.ini
+index 2701c5f051..1dfd92e96b 100644
+--- a/dpdk/doc/guides/nics/features/ngbe.ini
++++ b/dpdk/doc/guides/nics/features/ngbe.ini
+@@ -5,6 +5,7 @@
+ ;
+ [Features]
+ Speed capabilities   = Y
++Link speed configuration = Y
+ Link status          = Y
+ Link status event    = Y
+ Free Tx mbuf on demand = Y
+diff --git a/dpdk/doc/guides/nics/features/octeontx.ini b/dpdk/doc/guides/nics/features/octeontx.ini
+index fa1e18b120..46ae8318a9 100644
+--- a/dpdk/doc/guides/nics/features/octeontx.ini
++++ b/dpdk/doc/guides/nics/features/octeontx.ini
+@@ -5,6 +5,7 @@
+ ;
+ [Features]
+ Speed capabilities   = Y
++Link speed configuration = Y
+ Link status          = Y
+ Link status event    = Y
+ Lock-free Tx queue   = Y
+diff --git a/dpdk/doc/guides/nics/features/sfc.ini b/dpdk/doc/guides/nics/features/sfc.ini
+index 8a9198adcb..f9654e69ed 100644
+--- a/dpdk/doc/guides/nics/features/sfc.ini
++++ b/dpdk/doc/guides/nics/features/sfc.ini
+@@ -5,6 +5,7 @@
+ ;
+ [Features]
+ Speed capabilities   = Y
++Link speed configuration = Y
+ Link status          = Y
+ Link status event    = Y
+ Rx interrupt         = Y
+diff --git a/dpdk/doc/guides/nics/features/thunderx.ini b/dpdk/doc/guides/nics/features/thunderx.ini
+index b33bb37c82..2ab8db7239 100644
+--- a/dpdk/doc/guides/nics/features/thunderx.ini
++++ b/dpdk/doc/guides/nics/features/thunderx.ini
+@@ -5,6 +5,7 @@
+ ;
+ [Features]
+ Speed capabilities   = Y
++Link speed configuration = Y
+ Link status          = Y
+ Link status event    = Y
+ Queue start/stop     = Y
+diff --git a/dpdk/doc/guides/nics/features/txgbe.ini b/dpdk/doc/guides/nics/features/txgbe.ini
+index e21083052c..be0af3dfad 100644
+--- a/dpdk/doc/guides/nics/features/txgbe.ini
++++ b/dpdk/doc/guides/nics/features/txgbe.ini
+@@ -5,6 +5,7 @@
+ ;
+ [Features]
+ Speed capabilities   = Y
++Link speed configuration = Y
+ Link status          = Y
+ Link status event    = Y
+ Rx interrupt         = Y
+@@ -26,6 +27,7 @@ DCB                  = Y
+ VLAN filter          = Y
+ Flow control         = Y
+ Rate limitation      = Y
++Traffic manager      = Y
+ Inline crypto        = Y
+ CRC offload          = P
+ VLAN offload         = P
+diff --git a/dpdk/doc/guides/nics/hns3.rst b/dpdk/doc/guides/nics/hns3.rst
+index 3b0613fc1b..3e84d1ff1c 100644
+--- a/dpdk/doc/guides/nics/hns3.rst
++++ b/dpdk/doc/guides/nics/hns3.rst
+@@ -6,7 +6,7 @@ HNS3 Poll Mode Driver
+ 
+ The hns3 PMD (**librte_net_hns3**) provides poll mode driver support
+ for the inbuilt HiSilicon Network Subsystem(HNS) network engine
+-found in the HiSilicon Kunpeng 920 SoC and Kunpeng 930 SoC .
++found in the HiSilicon Kunpeng 920 SoC (HIP08) and Kunpeng 930 SoC (HIP09/HIP10).
+ 
+ Features
+ --------
+diff --git a/dpdk/doc/guides/nics/mlx5.rst b/dpdk/doc/guides/nics/mlx5.rst
+index 6b52fb93c5..2c59b24d78 100644
+--- a/dpdk/doc/guides/nics/mlx5.rst
++++ b/dpdk/doc/guides/nics/mlx5.rst
+@@ -245,6 +245,26 @@ Limitations
+   - Matching on ICMP6 following IPv6 routing extension header,
+     should match ``ipv6_routing_ext_next_hdr`` instead of ICMP6.
+ 
++  - The supported actions order is as below::
++
++          MARK (a)
++          *_DECAP (b)
++          OF_POP_VLAN
++          COUNT | AGE
++          METER_MARK | CONNTRACK
++          OF_PUSH_VLAN
++          MODIFY_FIELD
++          *_ENCAP (c)
++          JUMP | DROP | RSS (a) | QUEUE (a) | REPRESENTED_PORT (d)
++
++    a. Only supported on ingress.
++    b. Any decapsulation action, including the combination of RAW_ENCAP and RAW_DECAP actions
++       which results in L3 decapsulation.
++       Not supported on egress.
++    c. Any encapsulation action, including the combination of RAW_ENCAP and RAW_DECAP actions
++       which results in L3 encap.
++    d. Only in transfer (switchdev) mode.
++
+ - When using Verbs flow engine (``dv_flow_en`` = 0), flow pattern without any
+   specific VLAN will match for VLAN packets as well:
+ 
+@@ -708,8 +728,8 @@ Limitations
+ 
+   - Cannot co-exist with ASO meter, ASO age action in a single flow rule.
+   - Flow rules insertion rate and memory consumption need more optimization.
+-  - 256 ports maximum.
+-  - 4M connections maximum with ``dv_flow_en`` 1 mode. 16M with ``dv_flow_en`` 2.
++  - 16 ports maximum.
++  - 32M connections maximum.
+ 
+ - Multi-thread flow insertion:
+ 
+diff --git a/dpdk/doc/guides/nics/nfp.rst b/dpdk/doc/guides/nics/nfp.rst
+index fee1860f4a..b577229bda 100644
+--- a/dpdk/doc/guides/nics/nfp.rst
++++ b/dpdk/doc/guides/nics/nfp.rst
+@@ -299,10 +299,6 @@ be wrote N times in the heads. It is the same with NFD3.
+    +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+    |                        Data for field 5                       |
+    +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+-   |                        Data for field 6                       |
+-   +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+-   |                        Data for field 7                       |
+-   +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+    |                          Packet Data                          |
+    |                              ...                              |
+    +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+diff --git a/dpdk/doc/guides/platform/cnxk.rst b/dpdk/doc/guides/platform/cnxk.rst
+index 70065e3d96..0e61bc91d9 100644
+--- a/dpdk/doc/guides/platform/cnxk.rst
++++ b/dpdk/doc/guides/platform/cnxk.rst
+@@ -250,9 +250,9 @@ Debugging Options
+    +---+------------+-------------------------------------------------------+
+    | # | Component  | EAL log command                                       |
+    +===+============+=======================================================+
+-   | 1 | Common     | --log-level='pmd\.cnxk\.base,8'                       |
++   | 1 | Common     | --log-level='pmd\.common\.cnxk\.base,8'               |
+    +---+------------+-------------------------------------------------------+
+-   | 2 | Mailbox    | --log-level='pmd\.cnxk\.mbox,8'                       |
++   | 2 | Mailbox    | --log-level='pmd\.common\.cnxk\.mbox,8'               |
+    +---+------------+-------------------------------------------------------+
+ 
+ Debugfs support
+diff --git a/dpdk/doc/guides/platform/mlx5.rst b/dpdk/doc/guides/platform/mlx5.rst
+index 400000e284..d64599699e 100644
+--- a/dpdk/doc/guides/platform/mlx5.rst
++++ b/dpdk/doc/guides/platform/mlx5.rst
+@@ -230,7 +230,7 @@ DevX SDK Installation
+ The DevX SDK must be installed on the machine building the Windows PMD.
+ Additional information can be found at
+ `How to Integrate Windows DevX in Your Development Environment
+-<https://docs.nvidia.com/networking/display/winof2v260/RShim+Drivers+and+Usage#RShimDriversandUsage-DevXInterface>`_.
++<https://docs.nvidia.com/networking/display/winof2v290/devx+interface>`_.
+ The minimal supported WinOF2 version is 2.60.
+ 
+ 
+diff --git a/dpdk/doc/guides/prog_guide/img/mbuf1.svg b/dpdk/doc/guides/prog_guide/img/mbuf1.svg
+index a08bf3b6c0..111a874c00 100644
+--- a/dpdk/doc/guides/prog_guide/img/mbuf1.svg
++++ b/dpdk/doc/guides/prog_guide/img/mbuf1.svg
+@@ -487,7 +487,7 @@
+          sodipodi:role="line"
+          id="tspan5256"
+          x="59.842155"
+-         y="282.37683">m-&gt;pkt.next = NULL</tspan></text>
++         y="282.37683">m-&gt;next = NULL</tspan></text>
+     <text
+        xml:space="preserve"
+        style="font-size:10px;font-style:normal;font-weight:normal;fill:#000000;fill-opacity:1;stroke:none;font-family:Bitstream Vera Sans"
+diff --git a/dpdk/doc/guides/prog_guide/img/mbuf2.svg b/dpdk/doc/guides/prog_guide/img/mbuf2.svg
+index f6fdb54002..6a80cbc200 100644
+--- a/dpdk/doc/guides/prog_guide/img/mbuf2.svg
++++ b/dpdk/doc/guides/prog_guide/img/mbuf2.svg
+@@ -1074,7 +1074,7 @@
+          sodipodi:role="line"
+          id="tspan5256-8"
+          x="527.19458"
+-         y="628.45935">m-&gt;pkt.next = NULL</tspan></text>
++         y="628.45935">m-&gt;next = NULL</tspan></text>
+     <text
+        xml:space="preserve"
+        style="font-size:10px;font-style:normal;font-weight:normal;fill:#000000;fill-opacity:1;stroke:none;font-family:Bitstream Vera Sans"
+@@ -1084,7 +1084,7 @@
+          sodipodi:role="line"
+          id="tspan5256-8-3"
+          x="330.50363"
+-         y="628.45935">m-&gt;pkt.next = mseg3</tspan></text>
++         y="628.45935">m-&gt;next = mseg3</tspan></text>
+     <text
+        xml:space="preserve"
+        style="font-size:10px;font-style:normal;font-weight:normal;fill:#000000;fill-opacity:1;stroke:none;font-family:Bitstream Vera Sans"
+@@ -1094,7 +1094,7 @@
+          sodipodi:role="line"
+          id="tspan5256-8-3-4"
+          x="144.79388"
+-         y="628.45935">m-&gt;pkt.next = mseg2</tspan></text>
++         y="628.45935">m-&gt;next = mseg2</tspan></text>
+     <text
+        xml:space="preserve"
+        style="font-size:10px;font-style:normal;font-weight:normal;fill:#000000;fill-opacity:1;stroke:none;font-family:Bitstream Vera Sans"
+diff --git a/dpdk/doc/guides/prog_guide/ip_fragment_reassembly_lib.rst b/dpdk/doc/guides/prog_guide/ip_fragment_reassembly_lib.rst
+index 314d4adbb8..b14289eb73 100644
+--- a/dpdk/doc/guides/prog_guide/ip_fragment_reassembly_lib.rst
++++ b/dpdk/doc/guides/prog_guide/ip_fragment_reassembly_lib.rst
+@@ -43,7 +43,7 @@ Note that all update/lookup operations on Fragment Table are not thread safe.
+ So if different execution contexts (threads/processes) will access the same table simultaneously,
+ then some external syncing mechanism have to be provided.
+ 
+-Each table entry can hold information about packets consisting of up to RTE_LIBRTE_IP_FRAG_MAX (by default: 4) fragments.
++Each table entry can hold information about packets consisting of up to RTE_LIBRTE_IP_FRAG_MAX (by default: 8) fragments.
+ 
+ Code example, that demonstrates creation of a new Fragment table:
+ 
+diff --git a/dpdk/doc/guides/prog_guide/mbuf_lib.rst b/dpdk/doc/guides/prog_guide/mbuf_lib.rst
+index 049357c755..749f9c97a8 100644
+--- a/dpdk/doc/guides/prog_guide/mbuf_lib.rst
++++ b/dpdk/doc/guides/prog_guide/mbuf_lib.rst
+@@ -134,7 +134,7 @@ a vxlan-encapsulated tcp packet:
+ 
+     mb->l2_len = len(out_eth)
+     mb->l3_len = len(out_ip)
+-    mb->ol_flags |= RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_IP_CSUM
++    mb->ol_flags |= RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_IP_CKSUM
+     set out_ip checksum to 0 in the packet
+ 
+   This is supported on hardware advertising RTE_ETH_TX_OFFLOAD_IPV4_CKSUM.
+@@ -143,7 +143,7 @@ a vxlan-encapsulated tcp packet:
+ 
+     mb->l2_len = len(out_eth)
+     mb->l3_len = len(out_ip)
+-    mb->ol_flags |= RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_IP_CSUM | RTE_MBUF_F_TX_UDP_CKSUM
++    mb->ol_flags |= RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_IP_CKSUM | RTE_MBUF_F_TX_UDP_CKSUM
+     set out_ip checksum to 0 in the packet
+     set out_udp checksum to pseudo header using rte_ipv4_phdr_cksum()
+ 
+@@ -154,7 +154,7 @@ a vxlan-encapsulated tcp packet:
+ 
+     mb->l2_len = len(out_eth + out_ip + out_udp + vxlan + in_eth)
+     mb->l3_len = len(in_ip)
+-    mb->ol_flags |= RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_IP_CSUM
++    mb->ol_flags |= RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_IP_CKSUM
+     set in_ip checksum to 0 in the packet
+ 
+   This is similar to case 1), but l2_len is different. It is supported
+@@ -165,7 +165,7 @@ a vxlan-encapsulated tcp packet:
+ 
+     mb->l2_len = len(out_eth + out_ip + out_udp + vxlan + in_eth)
+     mb->l3_len = len(in_ip)
+-    mb->ol_flags |= RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_IP_CSUM | RTE_MBUF_F_TX_TCP_CKSUM
++    mb->ol_flags |= RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_IP_CKSUM | RTE_MBUF_F_TX_TCP_CKSUM
+     set in_ip checksum to 0 in the packet
+     set in_tcp checksum to pseudo header using rte_ipv4_phdr_cksum()
+ 
+diff --git a/dpdk/doc/guides/prog_guide/packet_framework.rst b/dpdk/doc/guides/prog_guide/packet_framework.rst
+index ebc69d8c3e..9987ead6c5 100644
+--- a/dpdk/doc/guides/prog_guide/packet_framework.rst
++++ b/dpdk/doc/guides/prog_guide/packet_framework.rst
+@@ -509,7 +509,7 @@ the number of L2 or L3 cache memory misses is greatly reduced, hence one of the
+ This is because the cost of L2/L3 cache memory miss on memory read accesses is high, as usually due to data dependency between instructions,
+ the CPU execution units have to stall until the read operation is completed from L3 cache memory or external DRAM memory.
+ By using prefetch instructions, the latency of memory read accesses is hidden,
+-provided that it is preformed early enough before the respective data structure is actually used.
++provided that it is performed early enough before the respective data structure is actually used.
+ 
+ By splitting the processing into several stages that are executed on different packets (the packets from the input burst are interlaced),
+ enough work is created to allow the prefetch instructions to complete successfully (before the prefetched data structures are actually accessed) and
+diff --git a/dpdk/doc/guides/prog_guide/profile_app.rst b/dpdk/doc/guides/prog_guide/profile_app.rst
+index 14292d4c25..a6b5fb4d5e 100644
+--- a/dpdk/doc/guides/prog_guide/profile_app.rst
++++ b/dpdk/doc/guides/prog_guide/profile_app.rst
+@@ -59,7 +59,7 @@ addition to the standard events, ``perf`` can be used to profile arm64
+ specific PMU (Performance Monitor Unit) events through raw events (``-e``
+ ``-rXX``).
+ 
+-For more derails refer to the
++For more details refer to the
+ `ARM64 specific PMU events enumeration <http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.100095_0002_04_en/way1382543438508.html>`_.
+ 
+ 
+diff --git a/dpdk/doc/guides/rel_notes/deprecation.rst b/dpdk/doc/guides/rel_notes/deprecation.rst
+index 81b93515cb..10630ba255 100644
+--- a/dpdk/doc/guides/rel_notes/deprecation.rst
++++ b/dpdk/doc/guides/rel_notes/deprecation.rst
+@@ -27,10 +27,6 @@ Deprecation Notices
+ * kvargs: The function ``rte_kvargs_process`` will get a new parameter
+   for returning key match count. It will ease handling of no-match case.
+ 
+-* cmdline: The function ``cmdline_poll`` does not work correctly on either
+-  Linux or Windows and is unused by any part of DPDK.
+-  This function is now deprecated and will be removed in DPDK 23.11.
+-
+ * telemetry: The functions ``rte_tel_data_add_array_u64`` and ``rte_tel_data_add_dict_u64``,
+   used by telemetry callbacks for adding unsigned integer values to be returned to the user,
+   are renamed to ``rte_tel_data_add_array_uint`` and ``rte_tel_data_add_dict_uint`` respectively.
+diff --git a/dpdk/doc/guides/rel_notes/release_23_11.rst b/dpdk/doc/guides/rel_notes/release_23_11.rst
+index 6d83682d73..538e06aede 100644
+--- a/dpdk/doc/guides/rel_notes/release_23_11.rst
++++ b/dpdk/doc/guides/rel_notes/release_23_11.rst
+@@ -669,3 +669,744 @@ Tested Platforms
+   * OFED:
+ 
+     * MLNX_OFED 23.07-0.5.1.2
++
++23.11.1 Release Notes
++---------------------
++
++
++23.11.1 Fixes
++~~~~~~~~~~~~~
++
++* 23.11.1-rc1
++* app/crypto-perf: add missing op resubmission
++* app/crypto-perf: fix copy segment size
++* app/crypto-perf: fix data comparison
++* app/crypto-perf: fix encrypt operation verification
++* app/crypto-perf: fix next segment mbuf
++* app/crypto-perf: fix out-of-place mbuf size
++* app/crypto-perf: verify strdup return
++* app/dma-perf: verify strdup return
++* app/dumpcap: verify strdup return
++* app/graph: fix build reason
++* app/pdump: verify strdup return
++* app/testpmd: fix --stats-period option check
++* app/testpmd: fix GRO packets flush on timeout
++* app/testpmd: fix async flow create failure handling
++* app/testpmd: fix async indirect action list creation
++* app/testpmd: fix auto-completion for indirect action list
++* app/testpmd: fix burst option parsing
++* app/testpmd: fix crash in multi-process forwarding
++* app/testpmd: fix error message for invalid option
++* app/testpmd: fix flow modify tag typo
++* app/testpmd: hide --bitrate-stats in help if disabled
++* app/testpmd: return if no packets in GRO heavy weight mode
++* app/testpmd: verify strdup return
++* build: fix linker warnings about undefined symbols
++* build: fix reasons conflict
++* build: link static libs with whole-archive in subproject
++* build: pass cflags in subproject
++* buildtools/cmdline: fix IP address initializer
++* buildtools/cmdline: fix generated code for IP addresses
++* bus/dpaa: verify strdup return
++* bus/fslmc: verify strdup return
++* bus/vdev: fix devargs in secondary process
++* bus/vdev: verify strdup return
++* ci: update versions of actions in GHA
++* common/cnxk: fix RSS RETA configuration
++* common/cnxk: fix Tx MTU configuration
++* common/cnxk: fix VLAN check for inner header
++* common/cnxk: fix inline device pointer check
++* common/cnxk: fix link config for SDP
++* common/cnxk: fix mbox region copy
++* common/cnxk: fix mbox struct attributes
++* common/cnxk: fix memory leak in CPT init
++* common/cnxk: fix possible out-of-bounds access
++* common/cnxk: remove CN9K inline IPsec FP opcodes
++* common/cnxk: remove dead code
++* common/mlx5: fix calloc parameters
++* common/mlx5: fix duplicate read of general capabilities
++* common/mlx5: fix query sample info capability
++* common/qat: fix legacy flag
++* common/sfc_efx/base: use C11 static assert
++* config: fix CPU instruction set for cross-build
++* crypto/cnxk: fix CN9K ECDH public key verification
++* crypto/qat: fix crash with CCM null AAD pointer
++* cryptodev: remove unused extern variable
++* dma/dpaa2: fix logtype register
++* dma/idxd: verify strdup return
++* dmadev: fix calloc parameters
++* doc: add --latencystats option in testpmd guide
++* doc: add link speeds configuration in features table
++* doc: add traffic manager in features table
++* doc: fix aging poll frequency option in cnxk guide
++* doc: fix commands in eventdev test tool guide
++* doc: fix configuration in baseband 5GNR driver guide
++* doc: fix default IP fragments maximum in programmer guide
++* doc: fix typo in packet framework guide
++* doc: fix typo in profiling guide
++* doc: fix typos in cryptodev overview
++* doc: remove cmdline polling mode deprecation notice
++* doc: update link to Windows DevX in mlx5 guide
++* drivers/net: fix buffer overflow for packet types list
++* dts: fix smoke tests driver regex
++* dts: strip whitespaces from stdout and stderr
++* eal/x86: add AMD vendor check for TSC calibration
++* eal: verify strdup return
++* ethdev: fix NVGRE encap flow action description
++* event/cnxk: fix dequeue timeout configuration
++* event/cnxk: verify strdup return
++* event/dlb2: remove superfluous memcpy
++* eventdev/crypto: fix enqueueing
++* eventdev: fix Doxygen processing of vector struct
++* eventdev: fix calloc parameters
++* eventdev: improve Doxygen comments on configure struct
++* examples/ipsec-secgw: fix Rx queue ID in Rx callback
++* examples/ipsec-secgw: fix cryptodev to SA mapping
++* examples/ipsec-secgw: fix typo in error message
++* examples/ipsec-secgw: fix width of variables
++* examples/l3fwd: fix Rx over not ready port
++* examples/l3fwd: fix Rx queue configuration
++* examples/packet_ordering: fix Rx with reorder mode disabled
++* examples/qos_sched: fix memory leak in args parsing
++* examples/vhost: verify strdup return
++* gro: fix reordering of packets
++* hash: remove some dead code
++* kernel/freebsd: fix module build on FreeBSD 14
++* lib: add newline in logs
++* lib: remove redundant newline from logs
++* lib: use dedicated logtypes and macros
++* ml/cnxk: fix xstats calculation
++* net/af_xdp: fix leak on XSK configuration failure
++* net/af_xdp: fix memzone leak on config failure
++* net/bnx2x: fix calloc parameters
++* net/bnx2x: fix warnings about memcpy lengths
++* net/bnxt: fix 50G and 100G forced speed
++* net/bnxt: fix array overflow
++* net/bnxt: fix backward firmware compatibility
++* net/bnxt: fix deadlock in ULP timer callback
++* net/bnxt: fix null pointer dereference
++* net/bnxt: fix number of Tx queues being created
++* net/bnxt: fix speed change from 200G to 25G on Thor
++* net/bnxt: modify locking for representor Tx
++* net/bonding: fix flow count query
++* net/cnxk: add cookies check for multi-segment offload
++* net/cnxk: fix MTU limit
++* net/cnxk: fix Rx packet format check condition
++* net/cnxk: fix aged flow query
++* net/cnxk: fix buffer size configuration
++* net/cnxk: fix flow RSS configuration
++* net/cnxk: fix indirect mbuf handling in Tx
++* net/cnxk: fix mbuf fields in multi-segment Tx
++* net/cnxk: improve Tx performance for SW mbuf free
++* net/ena/base: fix metrics excessive memory consumption
++* net/ena/base: limit exponential backoff
++* net/ena/base: restructure interrupt handling
++* net/ena: fix fast mbuf free
++* net/ena: fix mbuf double free in fast free mode
++* net/failsafe: fix memory leak in args parsing
++* net/gve: fix DQO for chained descriptors
++* net/hns3: enable PFC for all user priorities
++* net/hns3: fix VF multiple count on one reset
++* net/hns3: fix disable command with firmware
++* net/hns3: fix reset level comparison
++* net/hns3: refactor PF mailbox message struct
++* net/hns3: refactor VF mailbox message struct
++* net/hns3: refactor handle mailbox function
++* net/hns3: refactor send mailbox function
++* net/hns3: remove QinQ insert support for VF
++* net/hns3: support new device
++* net/i40e: remove incorrect 16B descriptor read block
++* net/i40e: remove redundant judgment in flow parsing
++* net/iavf: fix crash on VF start
++* net/iavf: fix memory leak on security context error
++* net/iavf: fix no polling mode switching
++* net/iavf: remove error logs for VLAN offloading
++* net/iavf: remove incorrect 16B descriptor read block
++* net/ice: fix link update
++* net/ice: fix memory leaks
++* net/ice: fix tunnel TSO capabilities
++* net/ice: remove incorrect 16B descriptor read block
++* net/igc: fix timesync disable
++* net/ionic: fix RSS query
++* net/ionic: fix device close
++* net/ionic: fix missing volatile type for cqe pointers
++* net/ixgbe: fix memoy leak after device init failure
++* net/ixgbe: increase VF reset timeout
++* net/ixgbevf: fix RSS init for x550 NICs
++* net/mana: fix memory leak on MR allocation
++* net/mana: handle MR cache expansion failure
++* net/mana: prevent values overflow returned from RDMA layer
++* net/memif: fix crash with Tx burst larger than 255
++* net/memif: fix extra mbuf refcnt update in zero copy Tx
++* net/mlx5/hws: check not supported fields in VXLAN
++* net/mlx5/hws: enable multiple integrity items
++* net/mlx5/hws: fix ESP flow matching validation
++* net/mlx5/hws: fix VLAN inner type
++* net/mlx5/hws: fix VLAN item in non-relaxed mode
++* net/mlx5/hws: fix direct index insert on depend WQE
++* net/mlx5/hws: fix memory access in L3 decapsulation
++* net/mlx5/hws: fix port ID for root table
++* net/mlx5/hws: fix tunnel protocol checks
++* net/mlx5/hws: skip item when inserting rules by index
++* net/mlx5: fix DR context release ordering
++* net/mlx5: fix GENEVE TLV option management
++* net/mlx5: fix GENEVE option item translation
++* net/mlx5: fix HWS meter actions availability
++* net/mlx5: fix HWS registers initialization
++* net/mlx5: fix IP-in-IP tunnels recognition
++* net/mlx5: fix VLAN ID in flow modify
++* net/mlx5: fix VLAN handling in meter split
++* net/mlx5: fix age position in hairpin split
++* net/mlx5: fix async flow create error handling
++* net/mlx5: fix condition of LACP miss flow
++* net/mlx5: fix connection tracking action validation
++* net/mlx5: fix conntrack action handle representation
++* net/mlx5: fix counters map in bonding mode
++* net/mlx5: fix drop action release timing
++* net/mlx5: fix error packets drop in regular Rx
++* net/mlx5: fix flow action template expansion
++* net/mlx5: fix flow configure validation
++* net/mlx5: fix flow counter cache starvation
++* net/mlx5: fix flow tag modification
++* net/mlx5: fix indirect action async job initialization
++* net/mlx5: fix jump action validation
++* net/mlx5: fix meter policy priority
++* net/mlx5: fix modify flex item
++* net/mlx5: fix non-masked indirect list meter translation
++* net/mlx5: fix parameters verification in HWS table create
++* net/mlx5: fix rollback on failed flow configure
++* net/mlx5: fix stats query crash in secondary process
++* net/mlx5: fix sync flow meter action
++* net/mlx5: fix sync meter processing in HWS
++* net/mlx5: fix template clean up of FDB control flow rule
++* net/mlx5: fix use after free when releasing Tx queues
++* net/mlx5: fix warning about copy length
++* net/mlx5: prevent ioctl failure log flooding
++* net/mlx5: prevent querying aged flows on uninit port
++* net/mlx5: remove GENEVE options length limitation
++* net/mlx5: remove device status check in flow creation
++* net/mlx5: remove duplication of L3 flow item validation
++* net/netvsc: fix VLAN metadata parsing
++* net/nfp: fix IPsec data endianness
++* net/nfp: fix NFD3 metadata process
++* net/nfp: fix NFDk metadata process
++* net/nfp: fix Rx descriptor
++* net/nfp: fix Rx memory leak
++* net/nfp: fix calloc parameters
++* net/nfp: fix device close
++* net/nfp: fix device resource freeing
++* net/nfp: fix getting firmware VNIC version
++* net/nfp: fix initialization failure flow
++* net/nfp: fix resource leak for CoreNIC firmware
++* net/nfp: fix resource leak for PF initialization
++* net/nfp: fix resource leak for VF
++* net/nfp: fix resource leak for device initialization
++* net/nfp: fix resource leak for exit of CoreNIC firmware
++* net/nfp: fix resource leak for exit of flower firmware
++* net/nfp: fix resource leak for flower firmware
++* net/nfp: fix switch domain free check
++* net/nfp: fix uninitialized variable
++* net/nfp: free switch domain ID on close
++* net/nfp: verify strdup return
++* net/sfc: fix calloc parameters
++* net/softnic: fix include of log library
++* net/tap: do not overwrite flow API errors
++* net/tap: fix traffic control handle calculation
++* net/thunderx: fix DMAC control register update
++* net/virtio: fix vDPA device init advertising control queue
++* net/virtio: remove duplicate queue xstats
++* net/vmxnet3: fix initialization on FreeBSD
++* net/vmxnet3: ignore Rx queue interrupt setup on FreeBSD
++* net: add macros for VLAN metadata parsing
++* net: fix TCP/UDP checksum with padding data
++* pipeline: fix calloc parameters
++* rawdev: fix calloc parameters
++* rcu: fix acked token in debug log
++* rcu: use atomic operation on acked token
++* regexdev: fix logtype register
++* telemetry: fix connected clients count
++* telemetry: fix empty JSON dictionaries
++* test/cfgfile: fix typo in error messages
++* test/event: fix crash in Tx adapter freeing
++* test/event: skip test if no driver is present
++* test/mbuf: fix external mbuf case with assert enabled
++* test/power: fix typo in error message
++* test: assume C source files are UTF-8 encoded
++* test: do not count skipped tests as executed
++* test: fix probing in secondary process
++* test: verify strdup return
++* vdpa/mlx5: fix queue enable drain CQ
++* version: 23.11.1-rc2
++* vhost: fix VDUSE device destruction failure
++* vhost: fix deadlock during vDPA SW live migration
++* vhost: fix memory leak in Virtio Tx split path
++* vhost: fix virtqueue access check in VDUSE setup
++* vhost: fix virtqueue access check in datapath
++* vhost: fix virtqueue access check in vhost-user setup
++
++23.11.1 Validation
++~~~~~~~~~~~~~~~~~~
++
++* RedHat Testing:
++
++    * Test scenarios:
++
++        * VM with device assignment(PF) throughput testing(1G hugepage size)
++        * VM with device assignment(PF) throughput testing(2M hugepage size)
++        * VM with device assignment(VF) throughput testing
++        * PVP (host dpdk testpmd as vswitch) 1Q: throughput testing
++        * PVP vhost-user 2Q throughput testing
++        * PVP vhost-user 1Q - cross numa node throughput testing
++        * VM with vhost-user 2 queues throughput testing
++        * vhost-user reconnect with dpdk-client, qemu-server qemu reconnect
++        * vhost-user reconnect with dpdk-client, qemu-server ovs reconnect
++        * PVP  reconnect with dpdk-client, qemu-server
++        * PVP 1Q live migration testing
++        * PVP 1Q cross numa node live migration testing
++        * VM with ovs+dpdk+vhost-user 1Q live migration testing
++        * VM with ovs+dpdk+vhost-user 1Q live migration testing (2M)
++        * VM with ovs+dpdk+vhost-user 2Q live migration testing
++        * VM with ovs+dpdk+vhost-user 4Q live migration testing
++        * Host PF + DPDK testing
++        * Host VF + DPDK testing
++
++    * Test Versions and device:
++
++        * RHEL 9.4
++        * qemu-kvm-8.2.0
++        * kernel 5.14
++        * libvirt 10.0
++        * X540-AT2 NIC(ixgbe, 10G)
++
++* Nvidia(R) Testing:
++
++    * Test scenarios:
++
++        * Send and receive multiple types of traffic.
++        * testpmd xstats counter test.
++        * testpmd timestamp test.
++        * Changing/checking link status through testpmd.
++        * rte_flow tests (https://doc.dpdk.org/guides/nics/mlx5.html#supported-hardware-offloads)
++        * RSS tests.
++        * VLAN filtering, stripping, and insertion tests.
++        * Checksum and TSO tests.
++        * ptype tests.
++        * link_status_interrupt example application tests.
++        * l3fwd-power example application tests.
++        * Multi-process example applications tests.
++        * Hardware LRO tests.
++        * Buffer Split tests.
++        * Tx scheduling tests.
++
++    * Test platform:
++
++        * NIC: ConnectX-6 Dx / OS: Ubuntu 20.04 / Driver: MLNX_OFED_LINUX-24.04-0.6.6.0 / Firmware: 22.41.1000
++        * NIC: ConnectX-7 / OS: Ubuntu 20.04 / Driver: MLNX_OFED_LINUX-24.04-0.6.6.0 / Firmware: 28.41.1000
++        * DPU: BlueField-2 / DOCA SW version: 2.7.0 / Firmware: 24.41.1000
++
++    * OS/driver combinations:
++
++        * Debian 12 with MLNX_OFED_LINUX-24.01-0.3.3.1.
++        * Ubuntu 20.04.6 with MLNX_OFED_LINUX-24.01-0.3.3.1.
++        * Ubuntu 22.04.4 with MLNX_OFED_LINUX-24.04-0.6.6.0.
++        * Ubuntu 20.04.6 with rdma-core master (311c591).
++        * Ubuntu 20.04.6 with rdma-core v28.0.
++        * Fedora 40 with rdma-core v48.0.
++        * Fedora 41 (Rawhide) with rdma-core v51.0.
++        * OpenSUSE Leap 15.5 with rdma-core v42.0.
++        * Windows Server 2019 with Clang 16.0.6.
++
++* Intel(R) Testing:
++
++    * Basic NIC testing
++
++        * Build & CFLAG compile: cover the build test combination with latest GCC/Clang version and the popular OS revision such as Ubuntu23.10, Ubuntu22.04, Fedora39, RHEL8.9, RHEL9.2, FreeBSD14.0, SUSE15, CentOS7.9, openEuler22.03-SP2,OpenAnolis8.8 etc.
++        * PF(i40e, ixgbe): test scenarios including RTE_FLOW/TSO/Jumboframe/checksum offload/VLAN/VXLAN, etc.
++        * VF(i40e, ixgbe): test scenarios including VF-RTE_FLOW/TSO/Jumboframe/checksum offload/VLAN/VXLAN, etc.
++        * PF/VF(ice): test scenarios including Switch features/Package Management/Flow Director/Advanced Tx/Advanced RSS/ACL/DCF/Flexible Descriptor, etc.
++        * Intel NIC single core/NIC performance: test scenarios including PF/VF single core performance test, etc.
++        * IPsec: test scenarios including ipsec/ipsec-gw/ipsec library basic test - QAT&SW/FIB library, etc.
++
++    * Basic cryptodev and virtio testing
++
++        * Virtio: both function and performance test are covered. Such as PVP/Virtio_loopback/virtio-user loopback/virtio-net VM2VM perf testing/VMAWARE ESXI 8.0, etc.
++        * Cryptodev Function test: Cryptodev API testing/CompressDev ISA-L/QAT/ZLIB PMD Testing/FIPS, etc.
++        * Cryptodev Performance test: test scenarios including Thoughput Performance/Cryptodev Latency, etc.
++
++23.11.1 Known Issues
++~~~~~~~~~~~~~~~~~~~~
++
++* Start dpdk-pdump in VM with virtio-0.95 protocol failed
++
++    Fix available in upstream.
++
++23.11.2 Release Notes
++---------------------
++
++
++23.11.2 Fixes
++~~~~~~~~~~~~~
++
++* app/bbdev: fix MLD output size computation
++* app/bbdev: fix TB logic
++* app/bbdev: fix interrupt tests
++* app/crypto-perf: fix result for asymmetric
++* app/crypto-perf: remove redundant local variable
++* app/dumpcap: handle SIGTERM and SIGHUP
++* app/pdump: handle SIGTERM and SIGHUP
++* app/testpmd: add postpone option to async flow destroy
++* app/testpmd: fix build on signed comparison
++* app/testpmd: fix help string of BPF load command
++* app/testpmd: fix indirect action flush
++* app/testpmd: fix lcore ID restriction
++* app/testpmd: fix outer IP checksum offload
++* app/testpmd: fix parsing for connection tracking item
++* app/testpmd: handle IEEE1588 init failure
++* baseband/acc: fix memory barrier
++* baseband/la12xx: forbid secondary process
++* bpf: disable on 32-bit x86
++* bpf: fix MOV instruction evaluation
++* bpf: fix load hangs with six IPv6 addresses
++* build: use builtin helper for python dependencies
++* buildtools: fix build with clang 17 and ASan
++* bus/dpaa: fix bus scan for DMA devices
++* bus/dpaa: fix memory leak in bus scan
++* bus/dpaa: remove redundant file descriptor check
++* bus/pci: fix FD in secondary process
++* bus/pci: fix UIO resource mapping in secondary process
++* bus/pci: fix build with musl 1.2.4 / Alpine 3.19
++* bus/vdev: fix device reinitialization
++* common/cnxk: fix flow aging cleanup
++* common/cnxk: fix flow aging on application exit
++* common/cnxk: fix integer overflow
++* common/cnxk: fix segregation of logs based on module
++* common/dpaax/caamflib: fix PDCP AES-AES watchdog error
++* common/dpaax/caamflib: fix PDCP-SDAP watchdog error
++* common/dpaax: fix IOVA table cleanup
++* common/dpaax: fix node array overrun
++* common/idpf: fix PTP message validation
++* common/idpf: fix flex descriptor mask
++* common/mlx5: fix PRM structs
++* common/mlx5: fix unsigned/signed mismatch
++* common/mlx5: remove unneeded field when modify RQ table
++* config: fix warning for cross build with meson >= 1.3.0
++* crypto/cnxk: fix ECDH public key verification
++* crypto/cnxk: fix minimal input normalization
++* crypto/cnxk: fix out-of-bound access
++* crypto/dpaa2_sec: fix event queue user context
++* crypto/dpaa_sec: fix IPsec descriptor
++* crypto/ipsec_mb: fix function comment
++* crypto/openssl: fix GCM and CCM thread unsafe contexts
++* crypto/openssl: make per-QP auth context clones
++* crypto/openssl: make per-QP cipher context clones
++* crypto/openssl: optimize 3DES-CTR context init
++* crypto/openssl: set cipher padding once
++* crypto/qat: fix GEN4 write
++* crypto/qat: fix log message typo
++* crypto/qat: fix placement of OOP offset
++* cryptodev: fix build without crypto callbacks
++* cryptodev: validate crypto callbacks from next node
++* devtools: fix symbol listing
++* dma/hisilicon: remove support for HIP09 platform
++* dma/idxd: fix setup with Ubuntu 24.04
++* dmadev: fix structure alignment
++* doc: add baseline mode in l3fwd-power guide
++* doc: add power uncore in API index
++* doc: describe mlx5 HWS actions order
++* doc: fix AF_XDP device plugin howto
++* doc: fix DMA performance test invocation
++* doc: fix link to hugepage mapping from Linux guide
++* doc: fix mbuf flags
++* doc: fix testpmd ring size command
++* doc: fix typo in l2fwd-crypto guide
++* doc: remove empty section from testpmd guide
++* doc: remove reference to mbuf pkt field
++* doc: update AF_XDP device plugin repository
++* doc: update metadata description in nfp guide
++* eal/linux: lower log level on allocation attempt failure
++* eal/unix: support ZSTD compression for firmware
++* eal/windows: install sched.h file
++* eal: fix type in destructor macro for MSVC
++* ethdev: fix GENEVE option item conversion
++* ethdev: fix device init without socket-local memory
++* ethdev: fix strict aliasing in link up
++* event/sw: fix warning from useless snprintf
++* eventdev/crypto: fix opaque field handling
++* examples/fips_validation: fix dereference and out-of-bound
++* examples/ipsec-secgw: fix SA salt endianness
++* examples/ipsec-secgw: revert SA salt endianness
++* examples/l3fwd: fix crash in ACL mode for mixed traffic
++* examples/l3fwd: fix crash on multiple sockets
++* examples: fix lcore ID restriction
++* examples: fix port ID restriction
++* examples: fix queue ID restriction
++* fbarray: fix finding for unaligned length
++* fbarray: fix incorrect lookahead behavior
++* fbarray: fix incorrect lookbehind behavior
++* fbarray: fix lookahead ignore mask handling
++* fbarray: fix lookbehind ignore mask handling
++* graph: fix ID collisions
++* graph: fix mcore dispatch walk
++* graph: fix stats retrieval while destroying a graph
++* hash: check name when creating a hash
++* hash: fix RCU reclamation size
++* hash: fix return code description in Doxygen
++* latencystats: fix literal float suffix
++* malloc: fix multi-process wait condition handling
++* mbuf: fix dynamic fields copy
++* net/af_packet: align Rx/Tx structs to cache line
++* net/af_xdp: count mbuf allocation failures
++* net/af_xdp: fix multi-interface support for k8s
++* net/af_xdp: fix port ID in Rx mbuf
++* net/af_xdp: fix stats reset
++* net/af_xdp: remove unused local statistic
++* net/ark: fix index arithmetic
++* net/axgbe: check only minimum speed for cables
++* net/axgbe: delay AN timeout during KR training
++* net/axgbe: disable RRC for yellow carp devices
++* net/axgbe: disable interrupts during device removal
++* net/axgbe: enable PLL control for fixed PHY modes only
++* net/axgbe: fix MDIO access for non-zero ports and CL45 PHYs
++* net/axgbe: fix SFP codes check for DAC cables
++* net/axgbe: fix Tx flow on 30H HW
++* net/axgbe: fix connection for SFP+ active cables
++* net/axgbe: fix fluctuations for 1G Bel Fuse SFP
++* net/axgbe: fix linkup in PHY status
++* net/axgbe: reset link when link never comes back
++* net/axgbe: update DMA coherency values
++* net/bonding: fix failover time of LACP with mode 4
++* net/cnxk: fix RSS config
++* net/cnxk: fix extbuf handling for multisegment packet
++* net/cnxk: fix outbound security with higher packet burst
++* net/cnxk: fix promiscuous state after MAC change
++* net/cnxk: update SA userdata and keep original cookie
++* net/cpfl: add checks on control queue messages
++* net/cpfl: fix 32-bit build
++* net/dpaa: forbid MTU configuration for shared interface
++* net/e1000/base: fix link power down
++* net/ena: fix bad checksum handling
++* net/ena: fix checksum handling
++* net/ena: fix return value check
++* net/fm10k: fix cleanup during init failure
++* net/gve: fix RSS hash endianness in DQO format
++* net/gve: fix Tx queue state on queue start
++* net/hns3: check Rx DMA address alignmnent
++* net/hns3: disable SCTP verification tag for RSS hash input
++* net/hns3: fix Rx timestamp flag
++* net/hns3: fix double free for Rx/Tx queue
++* net/hns3: fix offload flag of IEEE 1588
++* net/hns3: fix uninitialized variable in FEC query
++* net/hns3: fix variable overflow
++* net/i40e: fix outer UDP checksum offload for X710
++* net/iavf: fix VF reset when using DCF
++* net/iavf: remove outer UDP checksum offload for X710 VF
++* net/ice/base: fix GCS descriptor field offsets
++* net/ice/base: fix board type definition
++* net/ice/base: fix check for existing switch rule
++* net/ice/base: fix masking when reading context
++* net/ice/base: fix memory leak in firmware version check
++* net/ice/base: fix pointer to variable outside scope
++* net/ice/base: fix potential TLV length overflow
++* net/ice/base: fix preparing PHY for timesync command
++* net/ice/base: fix resource leak
++* net/ice/base: fix return type of bitmap hamming weight
++* net/ice/base: fix sign extension
++* net/ice/base: fix size when allocating children arrays
++* net/ice/base: fix temporary failures reading NVM
++* net/ice: fix VLAN stripping in double VLAN mode
++* net/ice: fix check for outer UDP checksum offload
++* net/ice: fix memory leaks in raw pattern parsing
++* net/ice: fix return value for raw pattern parsing
++* net/ionic: fix mbuf double-free when emptying array
++* net/ixgbe/base: fix 5G link speed reported on VF
++* net/ixgbe/base: fix PHY ID for X550
++* net/ixgbe/base: revert advertising for X550 2.5G/5G
++* net/ixgbe: do not create delayed interrupt handler twice
++* net/ixgbe: do not update link status in secondary process
++* net/mana: fix uninitialized return value
++* net/mlx5/hws: add template match none flag
++* net/mlx5/hws: decrease log level for creation failure
++* net/mlx5/hws: extend tag saving for match and jumbo
++* net/mlx5/hws: fix action template dump
++* net/mlx5/hws: fix check of range templates
++* net/mlx5/hws: fix deletion of action vport
++* net/mlx5/hws: fix function comment
++* net/mlx5/hws: fix matcher reconnect
++* net/mlx5/hws: fix memory leak in modify header
++* net/mlx5/hws: fix port ID on root item convert
++* net/mlx5/hws: fix spinlock release on context open
++* net/mlx5/hws: remove unused variable
++* net/mlx5/hws: set default miss when replacing table
++* net/mlx5: break flow resource release loop
++* net/mlx5: fix Arm build with GCC 9.1
++* net/mlx5: fix MTU configuration
++* net/mlx5: fix access to flow template operations
++* net/mlx5: fix crash on counter pool destroy
++* net/mlx5: fix disabling E-Switch default flow rules
++* net/mlx5: fix end condition of reading xstats
++* net/mlx5: fix flow template indirect action failure
++* net/mlx5: fix hash Rx queue release in flow sample
++* net/mlx5: fix indexed pool with invalid index
++* net/mlx5: fix shared Rx queue data access race
++* net/mlx5: fix start without duplicate flow patterns
++* net/mlx5: fix uplink port probing in bonding mode
++* net/mlx5: support jump in meter hierarchy
++* net/netvsc: fix MTU set
++* net/netvsc: use ethdev API to set VF MTU
++* net/nfp: adapt reverse sequence card
++* net/nfp: disable ctrl VNIC queues on close
++* net/nfp: fix IPv6 TTL and DSCP flow action
++* net/nfp: fix allocation of switch domain
++* net/nfp: fix configuration BAR
++* net/nfp: fix dereference of null pointer
++* net/nfp: fix disabling 32-bit build
++* net/nfp: fix firmware abnormal cleanup
++* net/nfp: fix flow mask table entry
++* net/nfp: fix getting firmware version
++* net/nfp: fix repeat disable port
++* net/nfp: fix representor port queue release
++* net/nfp: fix resource leak in secondary process
++* net/nfp: fix xstats for multi PF firmware
++* net/nfp: forbid offload flow rules with empty action list
++* net/nfp: remove redundant function call
++* net/nfp: remove unneeded logic for VLAN layer
++* net/ngbe: add special config for YT8531SH-CA PHY
++* net/ngbe: fix MTU range
++* net/ngbe: fix hotplug remove
++* net/ngbe: fix memory leaks
++* net/ngbe: keep PHY power down while device probing
++* net/tap: fix file descriptor check in isolated flow
++* net/txgbe: fix MTU range
++* net/txgbe: fix Rx interrupt
++* net/txgbe: fix Tx hang on queue disable
++* net/txgbe: fix VF promiscuous and allmulticast
++* net/txgbe: fix flow filters in VT mode
++* net/txgbe: fix hotplug remove
++* net/txgbe: fix memory leaks
++* net/txgbe: fix tunnel packet parsing
++* net/txgbe: reconfigure more MAC Rx registers
++* net/txgbe: restrict configuration of VLAN strip offload
++* net/virtio-user: add memcpy check
++* net/virtio-user: fix control queue allocation
++* net/virtio-user: fix control queue allocation for non-vDPA
++* net/virtio-user: fix control queue destruction
++* net/virtio-user: fix shadow control queue notification init
++* net/virtio: fix MAC table update
++* net/vmxnet3: add missing register command
++* net/vmxnet3: fix init logs
++* net: fix outer UDP checksum in Intel prepare helper
++* pcapng: add memcpy check
++* power: fix number of uncore frequencies
++* telemetry: fix connection parameter parsing
++* telemetry: lower log level on socket error
++* test/crypto: fix RSA cases in QAT suite
++* test/crypto: fix allocation comment
++* test/crypto: fix asymmetric capability test
++* test/crypto: fix enqueue/dequeue callback case
++* test/crypto: fix modex comparison
++* test/crypto: remove unused stats in setup
++* test/crypto: validate modex from first non-zero
++* v23.11.2-rc1
++* vdpa/sfc: remove dead code
++* version: 23.11.2-rc2
++* vhost: cleanup resubmit info before inflight setup
++* vhost: fix build with GCC 13
++
++23.11.2 Validation
++~~~~~~~~~~~~~~~~~~
++
++* RedHat Testing:
++
++    * Test scenarios:
++
++        * VM with device assignment(PF) throughput testing(1G hugepage size)
++        * VM with device assignment(PF) throughput testing(2M hugepage size)
++        * VM with device assignment(VF) throughput testing
++        * PVP (host dpdk testpmd as vswitch) 1Q: throughput testing
++        * PVP vhost-user 2Q throughput testing
++        * PVP vhost-user 1Q - cross numa node throughput testing
++        * VM with vhost-user 2 queues throughput testing
++        * vhost-user reconnect with dpdk-client, qemu-server qemu reconnect
++        * vhost-user reconnect with dpdk-client, qemu-server ovs reconnect
++        * PVP  reconnect with dpdk-client, qemu-server
++        * PVP 1Q live migration testing
++        * PVP 1Q cross numa node live migration testing
++        * VM with ovs+dpdk+vhost-user 1Q live migration testing
++        * VM with ovs+dpdk+vhost-user 1Q live migration testing (2M)
++        * VM with ovs+dpdk+vhost-user 2Q live migration testing
++        * VM with ovs+dpdk+vhost-user 4Q live migration testing
++        * Host PF + DPDK testing
++        * Host VF + DPDK testing
++
++    * Test Versions and device:
++
++        * RHEL 9.4
++        * qemu-kvm-8.2.0
++        * kernel 5.14
++        * libvirt 10.0
++        * openvswitch 3.3
++        * X540-AT2 NIC(ixgbe, 10G)
++
++* Nvidia(R) Testing:
++
++    * Test scenarios:
++
++        * Send and receive multiple types of traffic.
++        * testpmd xstats counter test.
++        * testpmd timestamp test.
++        * Changing/checking link status through testpmd.
++        * rte_flow tests (https://doc.dpdk.org/guides/nics/mlx5.html#supported-hardware-offloads)
++        * RSS tests.
++        * VLAN filtering, stripping, and insertion tests.
++        * Checksum and TSO tests.
++        * ptype tests.
++        * link_status_interrupt example application tests.
++        * l3fwd-power example application tests.
++        * Multi-process example applications tests.
++        * Hardware LRO tests.
++        * Buffer Split tests.
++        * Tx scheduling tests.
++
++    * Test platform:
++
++        * NIC: ConnectX-6 Dx / OS: Ubuntu 20.04 / Driver: MLNX_OFED_LINUX-24.07-0.6.1.0 / Firmware: 22.42.1000
++        * NIC: ConnectX-7 / OS: Ubuntu 20.04 / Driver: MLNX_OFED_LINUX-24.07-0.6.1.0 / Firmware: 28.42.1000
++        * DPU: BlueField-2 / DOCA SW version: 2.8.0 / Firmware: 24.42.1000
++
++    * OS/driver combinations:
++
++        * Debian 12 with MLNX_OFED_LINUX-24.04-0.7.0.0.
++        * Ubuntu 20.04.6 with MLNX_OFED_LINUX-24.07-0.6.1.0.
++        * Ubuntu 20.04.6 with rdma-core master (dd9c687).
++        * Ubuntu 20.04.6 with rdma-core v28.0.
++        * Fedora 40 with rdma-core v48.0.
++        * Fedora 42 (Rawhide) with rdma-core v51.0.
++        * OpenSUSE Leap 15.6 with rdma-core v49.1.
++
++* Intel(R) Testing:
++
++    * Basic NIC testing
++
++        * Build & CFLAG compile: cover the build test combination with latest GCC/Clang version and the popular OS revision such as Ubuntu23.10, Ubuntu22.04, Fedora39, RHEL8.9, RHEL9.2, FreeBSD14.0, SUSE15, CentOS7.9, openEuler22.03-SP2,OpenAnolis8.8 etc.
++        * PF(i40e, ixgbe): test scenarios including RTE_FLOW/TSO/Jumboframe/checksum offload/VLAN/VXLAN, etc.
++        * VF(i40e, ixgbe): test scenarios including VF-RTE_FLOW/TSO/Jumboframe/checksum offload/VLAN/VXLAN, etc.
++        * PF/VF(ice): test scenarios including Switch features/Package Management/Flow Director/Advanced Tx/Advanced RSS/ACL/DCF/Flexible Descriptor, etc.
++        * Intel NIC single core/NIC performance: test scenarios including PF/VF single core performance test, etc.
++        * IPsec: test scenarios including ipsec/ipsec-gw/ipsec library basic test - QAT&SW/FIB library, etc.
++
++    * Basic cryptodev and virtio testing
++
++        * Virtio: both function and performance test are covered. Such as PVP/Virtio_loopback/virtio-user loopback/virtio-net VM2VM perf testing/VMAWARE ESXI 8.0, etc.
++        * Cryptodev Function test: Cryptodev API testing/CompressDev ISA-L/QAT/ZLIB PMD Testing/FIPS, etc.
++        * Cryptodev Performance test: test scenarios including Thoughput Performance/Cryptodev Latency, etc.
++
++23.11.2 Known Issues
++~~~~~~~~~~~~~~~~~~~~
++
++* Start dpdk-pdump in VM with virtio-0.95 protocol failed
++
++    Fix available in upstream.
++
++* Failed to add vdev when launch dpdk-pdump with vdev secondary process
++
++    Fix available in upstream.
+diff --git a/dpdk/doc/guides/sample_app_ug/l2_forward_crypto.rst b/dpdk/doc/guides/sample_app_ug/l2_forward_crypto.rst
+index ce49eab96f..7ff304d05c 100644
+--- a/dpdk/doc/guides/sample_app_ug/l2_forward_crypto.rst
++++ b/dpdk/doc/guides/sample_app_ug/l2_forward_crypto.rst
+@@ -30,7 +30,7 @@ Compiling the Application
+ 
+ To compile the sample application see :doc:`compiling`.
+ 
+-The application is located in the ``l2fwd-crypt`` sub-directory.
++The application is located in the ``l2fwd-crypto`` sub-directory.
+ 
+ Running the Application
+ -----------------------
+diff --git a/dpdk/doc/guides/sample_app_ug/l3_forward_power_man.rst b/dpdk/doc/guides/sample_app_ug/l3_forward_power_man.rst
+index 4a6f33bf4f..9c9684fea7 100644
+--- a/dpdk/doc/guides/sample_app_ug/l3_forward_power_man.rst
++++ b/dpdk/doc/guides/sample_app_ug/l3_forward_power_man.rst
+@@ -280,6 +280,9 @@ will use automatic PMD power management.
+ This mode is limited to one queue per core,
+ and has three available power management schemes:
+ 
++``baseline``
++  This mode will not enable any power saving features.
++
+ ``monitor``
+   This will use ``rte_power_monitor()`` function to enter
+   a power-optimized state (subject to platform support).
+diff --git a/dpdk/doc/guides/testpmd_app_ug/run_app.rst b/dpdk/doc/guides/testpmd_app_ug/run_app.rst
+index 24a086401e..1a9b812a7f 100644
+--- a/dpdk/doc/guides/testpmd_app_ug/run_app.rst
++++ b/dpdk/doc/guides/testpmd_app_ug/run_app.rst
+@@ -422,6 +422,10 @@ The command line options are:
+ 
+     Set the logical core N to perform bitrate calculation.
+ 
++*   ``--latencystats=N``
++
++    Set the logical core N to perform latency and jitter calculations.
++
+ *   ``--print-event <unknown|intr_lsc|queue_state|intr_reset|vf_mbox|macsec|intr_rmv|dev_probed|dev_released|flow_aged|err_recovering|recovery_success|recovery_failed|all>``
+ 
+     Enable printing the occurrence of the designated event. Using all will
+diff --git a/dpdk/doc/guides/testpmd_app_ug/testpmd_funcs.rst b/dpdk/doc/guides/testpmd_app_ug/testpmd_funcs.rst
+index 447e28e694..45d34d2632 100644
+--- a/dpdk/doc/guides/testpmd_app_ug/testpmd_funcs.rst
++++ b/dpdk/doc/guides/testpmd_app_ug/testpmd_funcs.rst
+@@ -2033,7 +2033,7 @@ port config - queue ring size
+ 
+ Configure a rx/tx queue ring size::
+ 
+-   testpmd> port (port_id) (rxq|txq) (queue_id) ring_size (value)
++   testpmd> port config (port_id) (rxq|txq) (queue_id) ring_size (value)
+ 
+ Only take effect after command that (re-)start the port or command that setup specific queue.
+ 
+@@ -2910,14 +2910,6 @@ for port 0 and queue 0::
+ 
+    testpmd> set port cman config 0 0 obj queue mode red 10 100 1
+ 
+-Filter Functions
+-----------------
+-
+-This section details the available filter functions that are available.
+-
+-Note these functions interface the deprecated legacy filtering framework,
+-superseded by *rte_flow*. See `Flow rules management`_.
+-
+ .. _testpmd_rte_flow:
+ 
+ Flow rules management
+@@ -2927,10 +2919,6 @@ Control of the generic flow API (*rte_flow*) is fully exposed through the
+ ``flow`` command (configuration, validation, creation, destruction, queries
+ and operation modes).
+ 
+-Considering *rte_flow* overlaps with all `Filter Functions`_, using both
+-features simultaneously may cause undefined side-effects and is therefore
+-not recommended.
+-
+ ``flow`` syntax
+ ~~~~~~~~~~~~~~~
+ 
+diff --git a/dpdk/doc/guides/tools/dmaperf.rst b/dpdk/doc/guides/tools/dmaperf.rst
+index 9e3e78a6b7..9fc77ca943 100644
+--- a/dpdk/doc/guides/tools/dmaperf.rst
++++ b/dpdk/doc/guides/tools/dmaperf.rst
+@@ -119,7 +119,7 @@ Typical command-line invocation to execute the application:
+ 
+ .. code-block:: console
+ 
+-   dpdk-test-dma-perf --config=./config_dma.ini --result=./res_dma.csv
++   dpdk-test-dma-perf --config ./config_dma.ini --result ./res_dma.csv
+ 
+ Where ``config_dma.ini`` is the configuration file,
+ and ``res_dma.csv`` will be the generated result file.
+diff --git a/dpdk/doc/guides/tools/testeventdev.rst b/dpdk/doc/guides/tools/testeventdev.rst
+index fc36bfb30c..3fcc2c9894 100644
+--- a/dpdk/doc/guides/tools/testeventdev.rst
++++ b/dpdk/doc/guides/tools/testeventdev.rst
+@@ -308,7 +308,7 @@ Example command to run order queue test:
+ 
+ .. code-block:: console
+ 
+-   sudo <build_dir>/app/dpdk-test-eventdev --vdev=event_sw0 -- \
++   sudo <build_dir>/app/dpdk-test-eventdev -c 0x1f -s 0x10 --vdev=event_sw0 -- \
+                 --test=order_queue --plcores 1 --wlcores 2,3
+ 
+ 
+@@ -371,7 +371,7 @@ Example command to run order ``all types queue`` test:
+ 
+ .. code-block:: console
+ 
+-   sudo <build_dir>/app/dpdk-test-eventdev --vdev=event_octeontx -- \
++   sudo <build_dir>/app/dpdk-test-eventdev -c 0x1f -- \
+                         --test=order_atq --plcores 1 --wlcores 2,3
+ 
+ 
+@@ -475,14 +475,14 @@ Example command to run perf queue test:
+ 
+ .. code-block:: console
+ 
+-   sudo <build_dir>/app/dpdk-test-eventdev -c 0xf -s 0x1 --vdev=event_sw0 -- \
++   sudo <build_dir>/app/dpdk-test-eventdev -c 0xf -s 0x2 --vdev=event_sw0 -- \
+         --test=perf_queue --plcores=2 --wlcore=3 --stlist=p --nb_pkts=0
+ 
+ Example command to run perf queue test with producer enqueuing a burst of events:
+ 
+ .. code-block:: console
+ 
+-   sudo <build_dir>/app/dpdk-test-eventdev -c 0xf -s 0x1 --vdev=event_sw0 -- \
++   sudo <build_dir>/app/dpdk-test-eventdev -c 0xf -s 0x2 --vdev=event_sw0 -- \
+         --test=perf_queue --plcores=2 --wlcore=3 --stlist=p --nb_pkts=0 \
+         --prod_enq_burst_sz=32
+ 
+@@ -490,15 +490,15 @@ Example command to run perf queue test with ethernet ports:
+ 
+ .. code-block:: console
+ 
+-   sudo build/app/dpdk-test-eventdev --vdev=event_sw0 -- \
++   sudo build/app/dpdk-test-eventdev -c 0xf -s 0x2 --vdev=event_sw0 -- \
+         --test=perf_queue --plcores=2 --wlcore=3 --stlist=p --prod_type_ethdev
+ 
+ Example command to run perf queue test with event timer adapter:
+ 
+ .. code-block:: console
+ 
+-   sudo  <build_dir>/app/dpdk-test-eventdev --vdev="event_octeontx" -- \
+-                --wlcores 4 --plcores 12 --test perf_queue --stlist=a \
++   sudo  <build_dir>/app/dpdk-test-eventdev -c 0xfff1 \
++                -- --wlcores 4 --plcores 12 --test perf_queue --stlist=a \
+                 --prod_type_timerdev --fwd_latency
+ 
+ PERF_ATQ Test
+@@ -585,15 +585,15 @@ Example command to run perf ``all types queue`` test:
+ 
+ .. code-block:: console
+ 
+-   sudo <build_dir>/app/dpdk-test-eventdev --vdev=event_octeontx -- \
++   sudo <build_dir>/app/dpdk-test-eventdev -c 0xf -- \
+                 --test=perf_atq --plcores=2 --wlcore=3 --stlist=p --nb_pkts=0
+ 
+ Example command to run perf ``all types queue`` test with event timer adapter:
+ 
+ .. code-block:: console
+ 
+-   sudo  <build_dir>/app/dpdk-test-eventdev --vdev="event_octeontx" -- \
+-                --wlcores 4 --plcores 12 --test perf_atq --verbose 20 \
++   sudo  <build_dir>/app/dpdk-test-eventdev -c 0xfff1 \
++                -- --wlcores 4 --plcores 12 --test perf_atq --verbose 20 \
+                 --stlist=a --prod_type_timerdev --fwd_latency
+ 
+ 
+@@ -817,13 +817,13 @@ Example command to run pipeline atq test:
+ 
+ .. code-block:: console
+ 
+-    sudo <build_dir>/app/dpdk-test-eventdev -c 0xf -s 0x8 --vdev=event_sw0 -- \
++    sudo <build_dir>/app/dpdk-test-eventdev -c 0xf -- \
+         --test=pipeline_atq --wlcore=1 --prod_type_ethdev --stlist=a
+ 
+ Example command to run pipeline atq test with vector events:
+ 
+ .. code-block:: console
+ 
+-    sudo <build_dir>/app/dpdk-test-eventdev -c 0xf -s 0x8 --vdev=event_sw0 -- \
++    sudo <build_dir>/app/dpdk-test-eventdev -c 0xf -- \
+         --test=pipeline_atq --wlcore=1 --prod_type_ethdev --stlist=a \
+         --enable_vector  --vector_size 512
+diff --git a/dpdk/drivers/baseband/acc/acc_common.h b/dpdk/drivers/baseband/acc/acc_common.h
+index bda2ad2f7a..6752c256d2 100644
+--- a/dpdk/drivers/baseband/acc/acc_common.h
++++ b/dpdk/drivers/baseband/acc/acc_common.h
+@@ -1110,6 +1110,9 @@ acc_dma_enqueue(struct acc_queue *q, uint16_t n,
+ 				req_elem_addr,
+ 				(void *)q->mmio_reg_enqueue);
+ 
++		q->aq_enqueued++;
++		q->sw_ring_head += enq_batch_size;
++
+ 		rte_wmb();
+ 
+ 		/* Start time measurement for enqueue function offload. */
+@@ -1120,8 +1123,6 @@ acc_dma_enqueue(struct acc_queue *q, uint16_t n,
+ 
+ 		queue_stats->acc_offload_cycles += rte_rdtsc_precise() - start_time;
+ 
+-		q->aq_enqueued++;
+-		q->sw_ring_head += enq_batch_size;
+ 		n -= enq_batch_size;
+ 
+ 	} while (n);
+diff --git a/dpdk/drivers/baseband/la12xx/bbdev_la12xx.c b/dpdk/drivers/baseband/la12xx/bbdev_la12xx.c
+index bb754a5395..1a56e73abd 100644
+--- a/dpdk/drivers/baseband/la12xx/bbdev_la12xx.c
++++ b/dpdk/drivers/baseband/la12xx/bbdev_la12xx.c
+@@ -1084,6 +1084,9 @@ la12xx_bbdev_remove(struct rte_vdev_device *vdev)
+ 
+ 	PMD_INIT_FUNC_TRACE();
+ 
++	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
++		return 0;
++
+ 	if (vdev == NULL)
+ 		return -EINVAL;
+ 
+diff --git a/dpdk/drivers/bus/dpaa/base/qbman/process.c b/dpdk/drivers/bus/dpaa/base/qbman/process.c
+index 3504ec97db..3e4622f606 100644
+--- a/dpdk/drivers/bus/dpaa/base/qbman/process.c
++++ b/dpdk/drivers/bus/dpaa/base/qbman/process.c
+@@ -1,7 +1,7 @@
+ /* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+  *
+  * Copyright 2011-2016 Freescale Semiconductor Inc.
+- * Copyright 2017,2020 NXP
++ * Copyright 2017,2020,2022,2024 NXP
+  *
+  */
+ #include <assert.h>
+@@ -27,15 +27,16 @@ static int check_fd(void)
+ {
+ 	int ret;
+ 
+-	if (fd >= 0)
+-		return 0;
+ 	ret = pthread_mutex_lock(&fd_init_lock);
+ 	assert(!ret);
++
+ 	/* check again with the lock held */
+ 	if (fd < 0)
+ 		fd = open(PROCESS_PATH, O_RDWR);
++
+ 	ret = pthread_mutex_unlock(&fd_init_lock);
+ 	assert(!ret);
++
+ 	return (fd >= 0) ? 0 : -ENODEV;
+ }
+ 
+diff --git a/dpdk/drivers/bus/dpaa/dpaa_bus.c b/dpdk/drivers/bus/dpaa/dpaa_bus.c
+index e57159f5d8..aaf2a5f43e 100644
+--- a/dpdk/drivers/bus/dpaa/dpaa_bus.c
++++ b/dpdk/drivers/bus/dpaa/dpaa_bus.c
+@@ -187,6 +187,7 @@ dpaa_create_device_list(void)
+ 		if (dev->intr_handle == NULL) {
+ 			DPAA_BUS_LOG(ERR, "Failed to allocate intr handle");
+ 			ret = -ENOMEM;
++			free(dev);
+ 			goto cleanup;
+ 		}
+ 
+@@ -220,7 +221,7 @@ dpaa_create_device_list(void)
+ 
+ 	if (dpaa_sec_available()) {
+ 		DPAA_BUS_LOG(INFO, "DPAA SEC devices are not available");
+-		return 0;
++		goto qdma_dpaa;
+ 	}
+ 
+ 	/* Creating SEC Devices */
+@@ -238,6 +239,7 @@ dpaa_create_device_list(void)
+ 		if (dev->intr_handle == NULL) {
+ 			DPAA_BUS_LOG(ERR, "Failed to allocate intr handle");
+ 			ret = -ENOMEM;
++			free(dev);
+ 			goto cleanup;
+ 		}
+ 
+@@ -259,6 +261,7 @@ dpaa_create_device_list(void)
+ 
+ 	rte_dpaa_bus.device_count += i;
+ 
++qdma_dpaa:
+ 	/* Creating QDMA Device */
+ 	for (i = 0; i < RTE_DPAA_QDMA_DEVICES; i++) {
+ 		dev = calloc(1, sizeof(struct rte_dpaa_device));
+@@ -791,6 +794,10 @@ dpaa_bus_dev_iterate(const void *start, const char *str,
+ 
+ 	/* Now that name=device_name format is available, split */
+ 	dup = strdup(str);
++	if (dup == NULL) {
++		DPAA_BUS_DEBUG("Dup string (%s) failed!\n", str);
++		return NULL;
++	}
+ 	dev_name = dup + strlen("name=");
+ 
+ 	if (start != NULL) {
+diff --git a/dpdk/drivers/bus/fslmc/fslmc_bus.c b/dpdk/drivers/bus/fslmc/fslmc_bus.c
+index 57bfb5111a..89f0f329c0 100644
+--- a/dpdk/drivers/bus/fslmc/fslmc_bus.c
++++ b/dpdk/drivers/bus/fslmc/fslmc_bus.c
+@@ -634,6 +634,10 @@ fslmc_bus_dev_iterate(const void *start, const char *str,
+ 
+ 	/* Now that name=device_name format is available, split */
+ 	dup = strdup(str);
++	if (dup == NULL) {
++		DPAA2_BUS_DEBUG("Dup string (%s) failed!\n", str);
++		return NULL;
++	}
+ 	dev_name = dup + strlen("name=");
+ 
+ 	if (start != NULL) {
+diff --git a/dpdk/drivers/bus/pci/linux/pci_uio.c b/dpdk/drivers/bus/pci/linux/pci_uio.c
+index 97d740dfe5..4afda97858 100644
+--- a/dpdk/drivers/bus/pci/linux/pci_uio.c
++++ b/dpdk/drivers/bus/pci/linux/pci_uio.c
+@@ -237,7 +237,7 @@ pci_uio_alloc_resource(struct rte_pci_device *dev,
+ 	}
+ 	snprintf(devname, sizeof(devname), "/dev/uio%u", uio_num);
+ 
+-	/* save fd if in primary process */
++	/* save fd */
+ 	fd = open(devname, O_RDWR);
+ 	if (fd < 0) {
+ 		RTE_LOG(ERR, EAL, "Cannot open %s: %s\n",
+@@ -275,6 +275,9 @@ pci_uio_alloc_resource(struct rte_pci_device *dev,
+ 		}
+ 	}
+ 
++	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
++		return 0;
++
+ 	/* allocate the mapping details for secondary processes*/
+ 	*uio_res = rte_zmalloc("UIO_RES", sizeof(**uio_res), 0);
+ 	if (*uio_res == NULL) {
+diff --git a/dpdk/drivers/bus/pci/linux/pci_vfio.c b/dpdk/drivers/bus/pci/linux/pci_vfio.c
+index 3f3201daf2..baa0b9d9b3 100644
+--- a/dpdk/drivers/bus/pci/linux/pci_vfio.c
++++ b/dpdk/drivers/bus/pci/linux/pci_vfio.c
+@@ -80,7 +80,7 @@ pci_vfio_read_config(const struct rte_pci_device *dev,
+ 	if ((uint64_t)len + offs > size)
+ 		return -1;
+ 
+-	return pread64(fd, buf, len, offset + offs);
++	return pread(fd, buf, len, offset + offs);
+ }
+ 
+ int
+@@ -101,7 +101,7 @@ pci_vfio_write_config(const struct rte_pci_device *dev,
+ 	if ((uint64_t)len + offs > size)
+ 		return -1;
+ 
+-	return pwrite64(fd, buf, len, offset + offs);
++	return pwrite(fd, buf, len, offset + offs);
+ }
+ 
+ /* get PCI BAR number where MSI-X interrupts are */
+@@ -155,7 +155,7 @@ pci_vfio_enable_bus_memory(struct rte_pci_device *dev, int dev_fd)
+ 		return -1;
+ 	}
+ 
+-	ret = pread64(dev_fd, &cmd, sizeof(cmd), offset + RTE_PCI_COMMAND);
++	ret = pread(dev_fd, &cmd, sizeof(cmd), offset + RTE_PCI_COMMAND);
+ 
+ 	if (ret != sizeof(cmd)) {
+ 		RTE_LOG(ERR, EAL, "Cannot read command from PCI config space!\n");
+@@ -166,7 +166,7 @@ pci_vfio_enable_bus_memory(struct rte_pci_device *dev, int dev_fd)
+ 		return 0;
+ 
+ 	cmd |= RTE_PCI_COMMAND_MEMORY;
+-	ret = pwrite64(dev_fd, &cmd, sizeof(cmd), offset + RTE_PCI_COMMAND);
++	ret = pwrite(dev_fd, &cmd, sizeof(cmd), offset + RTE_PCI_COMMAND);
+ 
+ 	if (ret != sizeof(cmd)) {
+ 		RTE_LOG(ERR, EAL, "Cannot write command to PCI config space!\n");
+@@ -425,7 +425,7 @@ pci_vfio_is_ioport_bar(const struct rte_pci_device *dev, int vfio_dev_fd,
+ 		return -1;
+ 	}
+ 
+-	ret = pread64(vfio_dev_fd, &ioport_bar, sizeof(ioport_bar),
++	ret = pread(vfio_dev_fd, &ioport_bar, sizeof(ioport_bar),
+ 			  offset + RTE_PCI_BASE_ADDRESS_0 + bar_index * 4);
+ 	if (ret != sizeof(ioport_bar)) {
+ 		RTE_LOG(ERR, EAL, "Cannot read command (%x) from config space!\n",
+@@ -1250,7 +1250,7 @@ pci_vfio_ioport_read(struct rte_pci_ioport *p,
+ 	if (vfio_dev_fd < 0)
+ 		return;
+ 
+-	if (pread64(vfio_dev_fd, data,
++	if (pread(vfio_dev_fd, data,
+ 		    len, p->base + offset) <= 0)
+ 		RTE_LOG(ERR, EAL,
+ 			"Can't read from PCI bar (%" PRIu64 ") : offset (%x)\n",
+@@ -1267,7 +1267,7 @@ pci_vfio_ioport_write(struct rte_pci_ioport *p,
+ 	if (vfio_dev_fd < 0)
+ 		return;
+ 
+-	if (pwrite64(vfio_dev_fd, data,
++	if (pwrite(vfio_dev_fd, data,
+ 		     len, p->base + offset) <= 0)
+ 		RTE_LOG(ERR, EAL,
+ 			"Can't write to PCI bar (%" PRIu64 ") : offset (%x)\n",
+@@ -1298,7 +1298,7 @@ pci_vfio_mmio_read(const struct rte_pci_device *dev, int bar,
+ 	if ((uint64_t)len + offs > size)
+ 		return -1;
+ 
+-	return pread64(fd, buf, len, offset + offs);
++	return pread(fd, buf, len, offset + offs);
+ }
+ 
+ int
+@@ -1318,7 +1318,7 @@ pci_vfio_mmio_write(const struct rte_pci_device *dev, int bar,
+ 	if ((uint64_t)len + offs > size)
+ 		return -1;
+ 
+-	return pwrite64(fd, buf, len, offset + offs);
++	return pwrite(fd, buf, len, offset + offs);
+ }
+ 
+ int
+diff --git a/dpdk/drivers/bus/pci/pci_common_uio.c b/dpdk/drivers/bus/pci/pci_common_uio.c
+index 76c661f054..a06378b239 100644
+--- a/dpdk/drivers/bus/pci/pci_common_uio.c
++++ b/dpdk/drivers/bus/pci/pci_common_uio.c
+@@ -26,7 +26,7 @@ EAL_REGISTER_TAILQ(rte_uio_tailq)
+ static int
+ pci_uio_map_secondary(struct rte_pci_device *dev)
+ {
+-	int fd, i, j;
++	int fd, i = 0, j, res_idx;
+ 	struct mapped_pci_resource *uio_res;
+ 	struct mapped_pci_res_list *uio_res_list =
+ 			RTE_TAILQ_CAST(rte_uio_tailq.head, mapped_pci_res_list);
+@@ -37,7 +37,15 @@ pci_uio_map_secondary(struct rte_pci_device *dev)
+ 		if (rte_pci_addr_cmp(&uio_res->pci_addr, &dev->addr))
+ 			continue;
+ 
+-		for (i = 0; i != uio_res->nb_maps; i++) {
++		/* Map all BARs */
++		for (res_idx = 0; res_idx != PCI_MAX_RESOURCE; res_idx++) {
++			/* skip empty BAR */
++			if (dev->mem_resource[res_idx].phys_addr == 0)
++				continue;
++
++			if (i >= uio_res->nb_maps)
++				return -1;
++
+ 			/*
+ 			 * open devname, to mmap it
+ 			 */
+@@ -71,7 +79,9 @@ pci_uio_map_secondary(struct rte_pci_device *dev)
+ 				}
+ 				return -1;
+ 			}
+-			dev->mem_resource[i].addr = mapaddr;
++			dev->mem_resource[res_idx].addr = mapaddr;
++
++			i++;
+ 		}
+ 		return 0;
+ 	}
+@@ -96,15 +106,15 @@ pci_uio_map_resource(struct rte_pci_device *dev)
+ 	if (rte_intr_dev_fd_set(dev->intr_handle, -1))
+ 		return -1;
+ 
+-	/* secondary processes - use already recorded details */
+-	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+-		return pci_uio_map_secondary(dev);
+-
+ 	/* allocate uio resource */
+ 	ret = pci_uio_alloc_resource(dev, &uio_res);
+ 	if (ret)
+ 		return ret;
+ 
++	/* secondary processes - use already recorded details */
++	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
++		return pci_uio_map_secondary(dev);
++
+ 	/* Map all BARs */
+ 	for (i = 0; i != PCI_MAX_RESOURCE; i++) {
+ 		/* skip empty BAR */
+@@ -220,6 +230,18 @@ pci_uio_unmap_resource(struct rte_pci_device *dev)
+ 	if (uio_res == NULL)
+ 		return;
+ 
++	/* close fd */
++	if (rte_intr_fd_get(dev->intr_handle) >= 0)
++		close(rte_intr_fd_get(dev->intr_handle));
++	uio_cfg_fd = rte_intr_dev_fd_get(dev->intr_handle);
++	if (uio_cfg_fd >= 0) {
++		close(uio_cfg_fd);
++		rte_intr_dev_fd_set(dev->intr_handle, -1);
++	}
++
++	rte_intr_fd_set(dev->intr_handle, -1);
++	rte_intr_type_set(dev->intr_handle, RTE_INTR_HANDLE_UNKNOWN);
++
+ 	/* secondary processes - just free maps */
+ 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ 		return pci_uio_unmap(uio_res);
+@@ -231,16 +253,4 @@ pci_uio_unmap_resource(struct rte_pci_device *dev)
+ 
+ 	/* free uio resource */
+ 	rte_free(uio_res);
+-
+-	/* close fd if in primary process */
+-	if (rte_intr_fd_get(dev->intr_handle) >= 0)
+-		close(rte_intr_fd_get(dev->intr_handle));
+-	uio_cfg_fd = rte_intr_dev_fd_get(dev->intr_handle);
+-	if (uio_cfg_fd >= 0) {
+-		close(uio_cfg_fd);
+-		rte_intr_dev_fd_set(dev->intr_handle, -1);
+-	}
+-
+-	rte_intr_fd_set(dev->intr_handle, -1);
+-	rte_intr_type_set(dev->intr_handle, RTE_INTR_HANDLE_UNKNOWN);
+ }
+diff --git a/dpdk/drivers/bus/vdev/vdev.c b/dpdk/drivers/bus/vdev/vdev.c
+index 7974b27295..dcedd0d4a0 100644
+--- a/dpdk/drivers/bus/vdev/vdev.c
++++ b/dpdk/drivers/bus/vdev/vdev.c
+@@ -247,6 +247,10 @@ alloc_devargs(const char *name, const char *args)
+ 		devargs->data = strdup(args);
+ 	else
+ 		devargs->data = strdup("");
++	if (devargs->data == NULL) {
++		free(devargs);
++		return NULL;
++	}
+ 	devargs->args = devargs->data;
+ 
+ 	ret = strlcpy(devargs->name, name, sizeof(devargs->name));
+@@ -259,6 +263,22 @@ alloc_devargs(const char *name, const char *args)
+ 	return devargs;
+ }
+ 
++static struct rte_devargs *
++vdev_devargs_lookup(const char *name)
++{
++	struct rte_devargs *devargs;
++	char dev_name[32];
++
++	RTE_EAL_DEVARGS_FOREACH("vdev", devargs) {
++		devargs->bus->parse(devargs->name, &dev_name);
++		if (strcmp(dev_name, name) == 0) {
++			VDEV_LOG(INFO, "devargs matched %s", dev_name);
++			return devargs;
++		}
++	}
++	return NULL;
++}
++
+ static int
+ insert_vdev(const char *name, const char *args,
+ 		struct rte_vdev_device **p_dev,
+@@ -271,7 +291,11 @@ insert_vdev(const char *name, const char *args,
+ 	if (name == NULL)
+ 		return -EINVAL;
+ 
+-	devargs = alloc_devargs(name, args);
++	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
++		devargs = alloc_devargs(name, args);
++	else
++		devargs = vdev_devargs_lookup(name);
++
+ 	if (!devargs)
+ 		return -ENOMEM;
+ 
+@@ -283,7 +307,6 @@ insert_vdev(const char *name, const char *args,
+ 
+ 	dev->device.bus = &rte_vdev_bus;
+ 	dev->device.numa_node = SOCKET_ID_ANY;
+-	dev->device.name = devargs->name;
+ 
+ 	if (find_vdev(name)) {
+ 		/*
+@@ -298,6 +321,7 @@ insert_vdev(const char *name, const char *args,
+ 	if (init)
+ 		rte_devargs_insert(&devargs);
+ 	dev->device.devargs = devargs;
++	dev->device.name = devargs->name;
+ 	TAILQ_INSERT_TAIL(&vdev_device_list, dev, next);
+ 
+ 	if (p_dev)
+diff --git a/dpdk/drivers/common/cnxk/cnxk_security.c b/dpdk/drivers/common/cnxk/cnxk_security.c
+index a8c3ba90cd..40685d0912 100644
+--- a/dpdk/drivers/common/cnxk/cnxk_security.c
++++ b/dpdk/drivers/common/cnxk/cnxk_security.c
+@@ -618,235 +618,6 @@ cnxk_ot_ipsec_outb_sa_valid(struct roc_ot_ipsec_outb_sa *sa)
+ 	return !!sa->w2.s.valid;
+ }
+ 
+-static inline int
+-ipsec_xfrm_verify(struct rte_security_ipsec_xform *ipsec_xfrm,
+-		  struct rte_crypto_sym_xform *crypto_xfrm)
+-{
+-	if (crypto_xfrm->next == NULL)
+-		return -EINVAL;
+-
+-	if (ipsec_xfrm->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
+-		if (crypto_xfrm->type != RTE_CRYPTO_SYM_XFORM_AUTH ||
+-		    crypto_xfrm->next->type != RTE_CRYPTO_SYM_XFORM_CIPHER)
+-			return -EINVAL;
+-	} else {
+-		if (crypto_xfrm->type != RTE_CRYPTO_SYM_XFORM_CIPHER ||
+-		    crypto_xfrm->next->type != RTE_CRYPTO_SYM_XFORM_AUTH)
+-			return -EINVAL;
+-	}
+-
+-	return 0;
+-}
+-
+-static int
+-onf_ipsec_sa_common_param_fill(struct roc_ie_onf_sa_ctl *ctl, uint8_t *salt,
+-			       uint8_t *cipher_key, uint8_t *hmac_opad_ipad,
+-			       struct rte_security_ipsec_xform *ipsec_xfrm,
+-			       struct rte_crypto_sym_xform *crypto_xfrm)
+-{
+-	struct rte_crypto_sym_xform *auth_xfrm, *cipher_xfrm;
+-	int rc, length, auth_key_len;
+-	const uint8_t *key = NULL;
+-	uint8_t ccm_flag = 0;
+-
+-	/* Set direction */
+-	switch (ipsec_xfrm->direction) {
+-	case RTE_SECURITY_IPSEC_SA_DIR_INGRESS:
+-		ctl->direction = ROC_IE_SA_DIR_INBOUND;
+-		auth_xfrm = crypto_xfrm;
+-		cipher_xfrm = crypto_xfrm->next;
+-		break;
+-	case RTE_SECURITY_IPSEC_SA_DIR_EGRESS:
+-		ctl->direction = ROC_IE_SA_DIR_OUTBOUND;
+-		cipher_xfrm = crypto_xfrm;
+-		auth_xfrm = crypto_xfrm->next;
+-		break;
+-	default:
+-		return -EINVAL;
+-	}
+-
+-	/* Set protocol - ESP vs AH */
+-	switch (ipsec_xfrm->proto) {
+-	case RTE_SECURITY_IPSEC_SA_PROTO_ESP:
+-		ctl->ipsec_proto = ROC_IE_SA_PROTOCOL_ESP;
+-		break;
+-	case RTE_SECURITY_IPSEC_SA_PROTO_AH:
+-		return -ENOTSUP;
+-	default:
+-		return -EINVAL;
+-	}
+-
+-	/* Set mode - transport vs tunnel */
+-	switch (ipsec_xfrm->mode) {
+-	case RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT:
+-		ctl->ipsec_mode = ROC_IE_SA_MODE_TRANSPORT;
+-		break;
+-	case RTE_SECURITY_IPSEC_SA_MODE_TUNNEL:
+-		ctl->ipsec_mode = ROC_IE_SA_MODE_TUNNEL;
+-		break;
+-	default:
+-		return -EINVAL;
+-	}
+-
+-	/* Set encryption algorithm */
+-	if (crypto_xfrm->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
+-		length = crypto_xfrm->aead.key.length;
+-
+-		switch (crypto_xfrm->aead.algo) {
+-		case RTE_CRYPTO_AEAD_AES_GCM:
+-			ctl->enc_type = ROC_IE_ON_SA_ENC_AES_GCM;
+-			ctl->auth_type = ROC_IE_ON_SA_AUTH_NULL;
+-			memcpy(salt, &ipsec_xfrm->salt, 4);
+-			key = crypto_xfrm->aead.key.data;
+-			break;
+-		case RTE_CRYPTO_AEAD_AES_CCM:
+-			ctl->enc_type = ROC_IE_ON_SA_ENC_AES_CCM;
+-			ctl->auth_type = ROC_IE_ON_SA_AUTH_NULL;
+-			ccm_flag = 0x07 & ~ROC_CPT_AES_CCM_CTR_LEN;
+-			*salt = ccm_flag;
+-			memcpy(PLT_PTR_ADD(salt, 1), &ipsec_xfrm->salt, 3);
+-			key = crypto_xfrm->aead.key.data;
+-			break;
+-		default:
+-			return -ENOTSUP;
+-		}
+-
+-	} else {
+-		rc = ipsec_xfrm_verify(ipsec_xfrm, crypto_xfrm);
+-		if (rc)
+-			return rc;
+-
+-		switch (cipher_xfrm->cipher.algo) {
+-		case RTE_CRYPTO_CIPHER_AES_CBC:
+-			ctl->enc_type = ROC_IE_ON_SA_ENC_AES_CBC;
+-			break;
+-		case RTE_CRYPTO_CIPHER_AES_CTR:
+-			ctl->enc_type = ROC_IE_ON_SA_ENC_AES_CTR;
+-			break;
+-		default:
+-			return -ENOTSUP;
+-		}
+-
+-		switch (auth_xfrm->auth.algo) {
+-		case RTE_CRYPTO_AUTH_SHA1_HMAC:
+-			ctl->auth_type = ROC_IE_ON_SA_AUTH_SHA1;
+-			break;
+-		default:
+-			return -ENOTSUP;
+-		}
+-		auth_key_len = auth_xfrm->auth.key.length;
+-		if (auth_key_len < 20 || auth_key_len > 64)
+-			return -ENOTSUP;
+-
+-		key = cipher_xfrm->cipher.key.data;
+-		length = cipher_xfrm->cipher.key.length;
+-
+-		ipsec_hmac_opad_ipad_gen(auth_xfrm, hmac_opad_ipad);
+-	}
+-
+-	switch (length) {
+-	case ROC_CPT_AES128_KEY_LEN:
+-		ctl->aes_key_len = ROC_IE_SA_AES_KEY_LEN_128;
+-		break;
+-	case ROC_CPT_AES192_KEY_LEN:
+-		ctl->aes_key_len = ROC_IE_SA_AES_KEY_LEN_192;
+-		break;
+-	case ROC_CPT_AES256_KEY_LEN:
+-		ctl->aes_key_len = ROC_IE_SA_AES_KEY_LEN_256;
+-		break;
+-	default:
+-		return -EINVAL;
+-	}
+-
+-	memcpy(cipher_key, key, length);
+-
+-	if (ipsec_xfrm->options.esn)
+-		ctl->esn_en = 1;
+-
+-	ctl->spi = rte_cpu_to_be_32(ipsec_xfrm->spi);
+-	return 0;
+-}
+-
+-int
+-cnxk_onf_ipsec_inb_sa_fill(struct roc_onf_ipsec_inb_sa *sa,
+-			   struct rte_security_ipsec_xform *ipsec_xfrm,
+-			   struct rte_crypto_sym_xform *crypto_xfrm)
+-{
+-	struct roc_ie_onf_sa_ctl *ctl = &sa->ctl;
+-	int rc;
+-
+-	rc = onf_ipsec_sa_common_param_fill(ctl, sa->nonce, sa->cipher_key,
+-					    sa->hmac_key, ipsec_xfrm,
+-					    crypto_xfrm);
+-	if (rc)
+-		return rc;
+-
+-	rte_wmb();
+-
+-	/* Enable SA */
+-	ctl->valid = 1;
+-	return 0;
+-}
+-
+-int
+-cnxk_onf_ipsec_outb_sa_fill(struct roc_onf_ipsec_outb_sa *sa,
+-			    struct rte_security_ipsec_xform *ipsec_xfrm,
+-			    struct rte_crypto_sym_xform *crypto_xfrm)
+-{
+-	struct rte_security_ipsec_tunnel_param *tunnel = &ipsec_xfrm->tunnel;
+-	struct roc_ie_onf_sa_ctl *ctl = &sa->ctl;
+-	int rc;
+-
+-	/* Fill common params */
+-	rc = onf_ipsec_sa_common_param_fill(ctl, sa->nonce, sa->cipher_key,
+-					    sa->hmac_key, ipsec_xfrm,
+-					    crypto_xfrm);
+-	if (rc)
+-		return rc;
+-
+-	if (ipsec_xfrm->mode != RTE_SECURITY_IPSEC_SA_MODE_TUNNEL)
+-		goto skip_tunnel_info;
+-
+-	/* Tunnel header info */
+-	switch (tunnel->type) {
+-	case RTE_SECURITY_IPSEC_TUNNEL_IPV4:
+-		memcpy(&sa->ip_src, &tunnel->ipv4.src_ip,
+-		       sizeof(struct in_addr));
+-		memcpy(&sa->ip_dst, &tunnel->ipv4.dst_ip,
+-		       sizeof(struct in_addr));
+-		break;
+-	case RTE_SECURITY_IPSEC_TUNNEL_IPV6:
+-		return -ENOTSUP;
+-	default:
+-		return -EINVAL;
+-	}
+-
+-	/* Update udp encap ports */
+-	if (ipsec_xfrm->options.udp_encap == 1) {
+-		sa->udp_src = 4500;
+-		sa->udp_dst = 4500;
+-	}
+-
+-skip_tunnel_info:
+-	rte_wmb();
+-
+-	/* Enable SA */
+-	ctl->valid = 1;
+-	return 0;
+-}
+-
+-bool
+-cnxk_onf_ipsec_inb_sa_valid(struct roc_onf_ipsec_inb_sa *sa)
+-{
+-	return !!sa->ctl.valid;
+-}
+-
+-bool
+-cnxk_onf_ipsec_outb_sa_valid(struct roc_onf_ipsec_outb_sa *sa)
+-{
+-	return !!sa->ctl.valid;
+-}
+-
+ uint8_t
+ cnxk_ipsec_ivlen_get(enum rte_crypto_cipher_algorithm c_algo,
+ 		     enum rte_crypto_auth_algorithm a_algo,
+diff --git a/dpdk/drivers/common/cnxk/cnxk_security.h b/dpdk/drivers/common/cnxk/cnxk_security.h
+index 2277ce9144..72628ef3b8 100644
+--- a/dpdk/drivers/common/cnxk/cnxk_security.h
++++ b/dpdk/drivers/common/cnxk/cnxk_security.h
+@@ -48,18 +48,6 @@ cnxk_ot_ipsec_outb_sa_fill(struct roc_ot_ipsec_outb_sa *sa,
+ bool __roc_api cnxk_ot_ipsec_inb_sa_valid(struct roc_ot_ipsec_inb_sa *sa);
+ bool __roc_api cnxk_ot_ipsec_outb_sa_valid(struct roc_ot_ipsec_outb_sa *sa);
+ 
+-/* [CN9K, CN10K) */
+-int __roc_api
+-cnxk_onf_ipsec_inb_sa_fill(struct roc_onf_ipsec_inb_sa *sa,
+-			   struct rte_security_ipsec_xform *ipsec_xfrm,
+-			   struct rte_crypto_sym_xform *crypto_xfrm);
+-int __roc_api
+-cnxk_onf_ipsec_outb_sa_fill(struct roc_onf_ipsec_outb_sa *sa,
+-			    struct rte_security_ipsec_xform *ipsec_xfrm,
+-			    struct rte_crypto_sym_xform *crypto_xfrm);
+-bool __roc_api cnxk_onf_ipsec_inb_sa_valid(struct roc_onf_ipsec_inb_sa *sa);
+-bool __roc_api cnxk_onf_ipsec_outb_sa_valid(struct roc_onf_ipsec_outb_sa *sa);
+-
+ /* [CN9K] */
+ int __roc_api
+ cnxk_on_ipsec_inb_sa_create(struct rte_security_ipsec_xform *ipsec,
+diff --git a/dpdk/drivers/common/cnxk/roc_ae.h b/dpdk/drivers/common/cnxk/roc_ae.h
+index a9a08d9fb9..7886b9d107 100644
+--- a/dpdk/drivers/common/cnxk/roc_ae.h
++++ b/dpdk/drivers/common/cnxk/roc_ae.h
+@@ -53,29 +53,31 @@ typedef enum {
+ 	ROC_AE_ERR_ECC_POINT_NOT_ON_CURVE = 0x11
+ } roc_ae_error_code;
+ 
++#define ROC_AE_EC_DATA_MAX 66
++
+ /* Prime and order fields of built-in elliptic curves */
+ struct roc_ae_ec_group {
+ 	struct {
+ 		/* P521 maximum length */
+-		uint8_t data[66];
++		uint8_t data[ROC_AE_EC_DATA_MAX];
+ 		unsigned int length;
+ 	} prime;
+ 
+ 	struct {
+ 		/* P521 maximum length */
+-		uint8_t data[66];
++		uint8_t data[ROC_AE_EC_DATA_MAX];
+ 		unsigned int length;
+ 	} order;
+ 
+ 	struct {
+ 		/* P521 maximum length */
+-		uint8_t data[66];
++		uint8_t data[ROC_AE_EC_DATA_MAX];
+ 		unsigned int length;
+ 	} consta;
+ 
+ 	struct {
+ 		/* P521 maximum length */
+-		uint8_t data[66];
++		uint8_t data[ROC_AE_EC_DATA_MAX];
+ 		unsigned int length;
+ 	} constb;
+ };
+@@ -86,18 +88,18 @@ struct roc_ae_ec_ctx {
+ 
+ 	/* Private key */
+ 	struct {
+-		uint8_t data[66];
++		uint8_t data[ROC_AE_EC_DATA_MAX];
+ 		unsigned int length;
+ 	} pkey;
+ 
+ 	/* Public key */
+ 	struct {
+ 		struct {
+-			uint8_t data[66];
++			uint8_t data[ROC_AE_EC_DATA_MAX];
+ 			unsigned int length;
+ 		} x;
+ 		struct {
+-			uint8_t data[66];
++			uint8_t data[ROC_AE_EC_DATA_MAX];
+ 			unsigned int length;
+ 		} y;
+ 	} q;
+diff --git a/dpdk/drivers/common/cnxk/roc_cpt.c b/dpdk/drivers/common/cnxk/roc_cpt.c
+index 981e85a204..4e23d8c135 100644
+--- a/dpdk/drivers/common/cnxk/roc_cpt.c
++++ b/dpdk/drivers/common/cnxk/roc_cpt.c
+@@ -756,7 +756,7 @@ roc_cpt_dev_init(struct roc_cpt *roc_cpt)
+ 	rc = dev_init(dev, pci_dev);
+ 	if (rc) {
+ 		plt_err("Failed to init roc device");
+-		goto fail;
++		return rc;
+ 	}
+ 
+ 	cpt->pci_dev = pci_dev;
+@@ -788,6 +788,7 @@ roc_cpt_dev_init(struct roc_cpt *roc_cpt)
+ 	return 0;
+ 
+ fail:
++	dev_fini(dev, pci_dev);
+ 	return rc;
+ }
+ 
+diff --git a/dpdk/drivers/common/cnxk/roc_dev.c b/dpdk/drivers/common/cnxk/roc_dev.c
+index e7e89bf3d6..14aff233d5 100644
+--- a/dpdk/drivers/common/cnxk/roc_dev.c
++++ b/dpdk/drivers/common/cnxk/roc_dev.c
+@@ -198,9 +198,8 @@ af_pf_wait_msg(struct dev *dev, uint16_t vf, int num_msg)
+ 			vf_msg = mbox_alloc_msg(&dev->mbox_vfpf_up, vf, sz);
+ 			if (vf_msg) {
+ 				mbox_req_init(MBOX_MSG_CGX_LINK_EVENT, vf_msg);
+-				memcpy((uint8_t *)vf_msg +
+-				       sizeof(struct mbox_msghdr), &linfo,
+-				       sizeof(struct cgx_link_user_info));
++				mbox_memcpy((uint8_t *)vf_msg + sizeof(struct mbox_msghdr), &linfo,
++					    sizeof(struct cgx_link_user_info));
+ 
+ 				vf_msg->rc = msg->rc;
+ 				vf_msg->pcifunc = msg->pcifunc;
+@@ -503,6 +502,8 @@ pf_vf_mbox_send_up_msg(struct dev *dev, void *rec_msg)
+ 	size_t size;
+ 
+ 	size = PLT_ALIGN(mbox_id2size(msg->hdr.id), MBOX_MSG_ALIGN);
++	if (size < sizeof(struct mbox_msghdr))
++		return;
+ 	/* Send UP message to all VF's */
+ 	for (vf = 0; vf < vf_mbox->ndevs; vf++) {
+ 		/* VF active */
+diff --git a/dpdk/drivers/common/cnxk/roc_ie_on.h b/dpdk/drivers/common/cnxk/roc_ie_on.h
+index 9933ffa148..11c995e9d1 100644
+--- a/dpdk/drivers/common/cnxk/roc_ie_on.h
++++ b/dpdk/drivers/common/cnxk/roc_ie_on.h
+@@ -269,66 +269,6 @@ struct roc_ie_on_inb_sa {
+ #define ROC_IE_ON_UCC_L2_HDR_INFO_ERR	  0xCF
+ #define ROC_IE_ON_UCC_L2_HDR_LEN_ERR	  0xE0
+ 
+-struct roc_ie_onf_sa_ctl {
+-	uint32_t spi;
+-	uint64_t exp_proto_inter_frag : 8;
+-	uint64_t rsvd_41_40 : 2;
+-	/* Disable SPI, SEQ data in RPTR for Inbound inline */
+-	uint64_t spi_seq_dis : 1;
+-	uint64_t esn_en : 1;
+-	uint64_t rsvd_44_45 : 2;
+-	uint64_t encap_type : 2;
+-	uint64_t enc_type : 3;
+-	uint64_t rsvd_48 : 1;
+-	uint64_t auth_type : 4;
+-	uint64_t valid : 1;
+-	uint64_t direction : 1;
+-	uint64_t outer_ip_ver : 1;
+-	uint64_t inner_ip_ver : 1;
+-	uint64_t ipsec_mode : 1;
+-	uint64_t ipsec_proto : 1;
+-	uint64_t aes_key_len : 2;
+-};
+-
+-struct roc_onf_ipsec_outb_sa {
+-	/* w0 */
+-	struct roc_ie_onf_sa_ctl ctl;
+-
+-	/* w1 */
+-	uint8_t nonce[4];
+-	uint16_t udp_src;
+-	uint16_t udp_dst;
+-
+-	/* w2 */
+-	uint32_t ip_src;
+-	uint32_t ip_dst;
+-
+-	/* w3-w6 */
+-	uint8_t cipher_key[32];
+-
+-	/* w7-w12 */
+-	uint8_t hmac_key[48];
+-};
+-
+-struct roc_onf_ipsec_inb_sa {
+-	/* w0 */
+-	struct roc_ie_onf_sa_ctl ctl;
+-
+-	/* w1 */
+-	uint8_t nonce[4]; /* Only for AES-GCM */
+-	uint32_t unused;
+-
+-	/* w2 */
+-	uint32_t esn_hi;
+-	uint32_t esn_low;
+-
+-	/* w3-w6 */
+-	uint8_t cipher_key[32];
+-
+-	/* w7-w12 */
+-	uint8_t hmac_key[48];
+-};
+-
+ #define ROC_ONF_IPSEC_INB_MAX_L2_SZ	  32UL
+ #define ROC_ONF_IPSEC_OUTB_MAX_L2_SZ	  30UL
+ #define ROC_ONF_IPSEC_OUTB_MAX_L2_INFO_SZ (ROC_ONF_IPSEC_OUTB_MAX_L2_SZ + 2)
+diff --git a/dpdk/drivers/common/cnxk/roc_mbox.h b/dpdk/drivers/common/cnxk/roc_mbox.h
+index 05434aec5a..7eaff0a0eb 100644
+--- a/dpdk/drivers/common/cnxk/roc_mbox.h
++++ b/dpdk/drivers/common/cnxk/roc_mbox.h
+@@ -1420,12 +1420,12 @@ struct nix_cn10k_aq_enq_req {
+ struct nix_cn10k_aq_enq_rsp {
+ 	struct mbox_msghdr hdr;
+ 	union {
+-		struct nix_cn10k_rq_ctx_s rq;
+-		struct nix_cn10k_sq_ctx_s sq;
+-		struct nix_cq_ctx_s cq;
+-		struct nix_rsse_s rss;
+-		struct nix_rx_mce_s mce;
+-		struct nix_band_prof_s prof;
++		__io struct nix_cn10k_rq_ctx_s rq;
++		__io struct nix_cn10k_sq_ctx_s sq;
++		__io struct nix_cq_ctx_s cq;
++		__io struct nix_rsse_s rss;
++		__io struct nix_rx_mce_s mce;
++		__io struct nix_band_prof_s prof;
+ 	};
+ };
+ 
+@@ -1661,11 +1661,11 @@ struct nix_rq_cpt_field_mask_cfg_req {
+ #define RQ_CTX_MASK_MAX 6
+ 	union {
+ 		uint64_t __io rq_ctx_word_set[RQ_CTX_MASK_MAX];
+-		struct nix_cn10k_rq_ctx_s rq_set;
++		__io struct nix_cn10k_rq_ctx_s rq_set;
+ 	};
+ 	union {
+ 		uint64_t __io rq_ctx_word_mask[RQ_CTX_MASK_MAX];
+-		struct nix_cn10k_rq_ctx_s rq_mask;
++		__io struct nix_cn10k_rq_ctx_s rq_mask;
+ 	};
+ 	struct nix_lf_rx_ipec_cfg1_req {
+ 		uint32_t __io spb_cpt_aura;
+diff --git a/dpdk/drivers/common/cnxk/roc_nix.c b/dpdk/drivers/common/cnxk/roc_nix.c
+index f64933a1d9..afbc3eb901 100644
+--- a/dpdk/drivers/common/cnxk/roc_nix.c
++++ b/dpdk/drivers/common/cnxk/roc_nix.c
+@@ -482,7 +482,7 @@ skip_dev_init:
+ 	sdp_lbk_id_update(pci_dev, nix);
+ 	nix->pci_dev = pci_dev;
+ 	nix->reta_sz = reta_sz;
+-	nix->mtu = ROC_NIX_DEFAULT_HW_FRS;
++	nix->mtu = roc_nix_max_pkt_len(roc_nix);
+ 	nix->dmac_flt_idx = -1;
+ 
+ 	/* Register error and ras interrupts */
+diff --git a/dpdk/drivers/common/cnxk/roc_nix.h b/dpdk/drivers/common/cnxk/roc_nix.h
+index acdd1c4cbc..250d710c07 100644
+--- a/dpdk/drivers/common/cnxk/roc_nix.h
++++ b/dpdk/drivers/common/cnxk/roc_nix.h
+@@ -267,8 +267,6 @@ struct roc_nix_eeprom_info {
+ #define ROC_NIX_RSS_KEY_LEN	     48 /* 352 Bits */
+ #define ROC_NIX_RSS_MCAM_IDX_DEFAULT (-1)
+ 
+-#define ROC_NIX_DEFAULT_HW_FRS 1514
+-
+ #define ROC_NIX_VWQE_MAX_SIZE_LOG2 11
+ #define ROC_NIX_VWQE_MIN_SIZE_LOG2 2
+ 
+diff --git a/dpdk/drivers/common/cnxk/roc_nix_inl.c b/dpdk/drivers/common/cnxk/roc_nix_inl.c
+index 750fd08355..bc9cc2f429 100644
+--- a/dpdk/drivers/common/cnxk/roc_nix_inl.c
++++ b/dpdk/drivers/common/cnxk/roc_nix_inl.c
+@@ -620,8 +620,7 @@ roc_nix_reassembly_configure(uint32_t max_wait_time, uint16_t max_frags)
+ 		return -EFAULT;
+ 
+ 	PLT_SET_USED(max_frags);
+-	if (idev == NULL)
+-		return -ENOTSUP;
++
+ 	roc_cpt = idev->cpt;
+ 	if (!roc_cpt) {
+ 		plt_err("Cannot support inline inbound, cryptodev not probed");
+@@ -876,7 +875,8 @@ roc_nix_inl_inb_init(struct roc_nix *roc_nix)
+ 	inl_dev = idev->nix_inl_dev;
+ 
+ 	roc_nix->custom_meta_aura_ena = (roc_nix->local_meta_aura_ena &&
+-					 (inl_dev->is_multi_channel || roc_nix->custom_sa_action));
++					 ((inl_dev && inl_dev->is_multi_channel) ||
++					  roc_nix->custom_sa_action));
+ 	if (!roc_model_is_cn9k() && !roc_errata_nix_no_meta_aura()) {
+ 		nix->need_meta_aura = true;
+ 		if (!roc_nix->local_meta_aura_ena || roc_nix->custom_meta_aura_ena)
+diff --git a/dpdk/drivers/common/cnxk/roc_nix_inl.h b/dpdk/drivers/common/cnxk/roc_nix_inl.h
+index ab1e9c0f98..f5ce26f03f 100644
+--- a/dpdk/drivers/common/cnxk/roc_nix_inl.h
++++ b/dpdk/drivers/common/cnxk/roc_nix_inl.h
+@@ -4,24 +4,6 @@
+ #ifndef _ROC_NIX_INL_H_
+ #define _ROC_NIX_INL_H_
+ 
+-/* ONF INB HW area */
+-#define ROC_NIX_INL_ONF_IPSEC_INB_HW_SZ                                        \
+-	PLT_ALIGN(sizeof(struct roc_onf_ipsec_inb_sa), ROC_ALIGN)
+-/* ONF INB SW reserved area */
+-#define ROC_NIX_INL_ONF_IPSEC_INB_SW_RSVD 384
+-#define ROC_NIX_INL_ONF_IPSEC_INB_SA_SZ                                        \
+-	(ROC_NIX_INL_ONF_IPSEC_INB_HW_SZ + ROC_NIX_INL_ONF_IPSEC_INB_SW_RSVD)
+-#define ROC_NIX_INL_ONF_IPSEC_INB_SA_SZ_LOG2 9
+-
+-/* ONF OUTB HW area */
+-#define ROC_NIX_INL_ONF_IPSEC_OUTB_HW_SZ                                       \
+-	PLT_ALIGN(sizeof(struct roc_onf_ipsec_outb_sa), ROC_ALIGN)
+-/* ONF OUTB SW reserved area */
+-#define ROC_NIX_INL_ONF_IPSEC_OUTB_SW_RSVD 128
+-#define ROC_NIX_INL_ONF_IPSEC_OUTB_SA_SZ                                       \
+-	(ROC_NIX_INL_ONF_IPSEC_OUTB_HW_SZ + ROC_NIX_INL_ONF_IPSEC_OUTB_SW_RSVD)
+-#define ROC_NIX_INL_ONF_IPSEC_OUTB_SA_SZ_LOG2 8
+-
+ /* ON INB HW area */
+ #define ROC_NIX_INL_ON_IPSEC_INB_HW_SZ                                         \
+ 	PLT_ALIGN(sizeof(struct roc_ie_on_inb_sa), ROC_ALIGN)
+@@ -31,10 +13,10 @@
+ 	(ROC_NIX_INL_ON_IPSEC_INB_HW_SZ + ROC_NIX_INL_ON_IPSEC_INB_SW_RSVD)
+ #define ROC_NIX_INL_ON_IPSEC_INB_SA_SZ_LOG2 10
+ 
+-/* ONF OUTB HW area */
++/* ON OUTB HW area */
+ #define ROC_NIX_INL_ON_IPSEC_OUTB_HW_SZ                                        \
+ 	PLT_ALIGN(sizeof(struct roc_ie_on_outb_sa), ROC_ALIGN)
+-/* ONF OUTB SW reserved area */
++/* ON OUTB SW reserved area */
+ #define ROC_NIX_INL_ON_IPSEC_OUTB_SW_RSVD 256
+ #define ROC_NIX_INL_ON_IPSEC_OUTB_SA_SZ                                        \
+ 	(ROC_NIX_INL_ON_IPSEC_OUTB_HW_SZ + ROC_NIX_INL_ON_IPSEC_OUTB_SW_RSVD)
+@@ -86,34 +68,6 @@ roc_nix_inl_on_ipsec_outb_sa_sw_rsvd(void *sa)
+ 	return PLT_PTR_ADD(sa, ROC_NIX_INL_ON_IPSEC_OUTB_HW_SZ);
+ }
+ 
+-static inline struct roc_onf_ipsec_inb_sa *
+-roc_nix_inl_onf_ipsec_inb_sa(uintptr_t base, uint64_t idx)
+-{
+-	uint64_t off = idx << ROC_NIX_INL_ONF_IPSEC_INB_SA_SZ_LOG2;
+-
+-	return PLT_PTR_ADD(base, off);
+-}
+-
+-static inline struct roc_onf_ipsec_outb_sa *
+-roc_nix_inl_onf_ipsec_outb_sa(uintptr_t base, uint64_t idx)
+-{
+-	uint64_t off = idx << ROC_NIX_INL_ONF_IPSEC_OUTB_SA_SZ_LOG2;
+-
+-	return PLT_PTR_ADD(base, off);
+-}
+-
+-static inline void *
+-roc_nix_inl_onf_ipsec_inb_sa_sw_rsvd(void *sa)
+-{
+-	return PLT_PTR_ADD(sa, ROC_NIX_INL_ONF_IPSEC_INB_HW_SZ);
+-}
+-
+-static inline void *
+-roc_nix_inl_onf_ipsec_outb_sa_sw_rsvd(void *sa)
+-{
+-	return PLT_PTR_ADD(sa, ROC_NIX_INL_ONF_IPSEC_OUTB_HW_SZ);
+-}
+-
+ /* Inline device SSO Work callback */
+ typedef void (*roc_nix_inl_sso_work_cb_t)(uint64_t *gw, void *args,
+ 					  uint32_t soft_exp_event);
+diff --git a/dpdk/drivers/common/cnxk/roc_nix_rss.c b/dpdk/drivers/common/cnxk/roc_nix_rss.c
+index 3599eb9bae..2b88e1360d 100644
+--- a/dpdk/drivers/common/cnxk/roc_nix_rss.c
++++ b/dpdk/drivers/common/cnxk/roc_nix_rss.c
+@@ -196,7 +196,7 @@ roc_nix_rss_reta_set(struct roc_nix *roc_nix, uint8_t group,
+ 	if (rc)
+ 		return rc;
+ 
+-	memcpy(&nix->reta[group], reta, ROC_NIX_RSS_RETA_MAX);
++	memcpy(&nix->reta[group], reta, sizeof(uint16_t) * ROC_NIX_RSS_RETA_MAX);
+ 	return 0;
+ }
+ 
+@@ -209,7 +209,7 @@ roc_nix_rss_reta_get(struct roc_nix *roc_nix, uint8_t group,
+ 	if (group >= ROC_NIX_RSS_GRPS)
+ 		return NIX_ERR_PARAM;
+ 
+-	memcpy(reta, &nix->reta[group], ROC_NIX_RSS_RETA_MAX);
++	memcpy(reta, &nix->reta[group], sizeof(uint16_t) * ROC_NIX_RSS_RETA_MAX);
+ 	return 0;
+ }
+ 
+diff --git a/dpdk/drivers/common/cnxk/roc_nix_tm.c b/dpdk/drivers/common/cnxk/roc_nix_tm.c
+index ece88b5e99..9e5e614b3b 100644
+--- a/dpdk/drivers/common/cnxk/roc_nix_tm.c
++++ b/dpdk/drivers/common/cnxk/roc_nix_tm.c
+@@ -328,6 +328,9 @@ nix_tm_bp_config_set(struct roc_nix *roc_nix, uint16_t sq, uint16_t tc,
+ 	uint8_t k = 0;
+ 	int rc = 0, i;
+ 
++	if (roc_nix_is_sdp(roc_nix))
++		return 0;
++
+ 	sq_s = nix->sqs[sq];
+ 	if (!sq_s)
+ 		return -ENOENT;
+diff --git a/dpdk/drivers/common/cnxk/roc_npc.c b/dpdk/drivers/common/cnxk/roc_npc.c
+index a0d88c0743..fcede1d0b7 100644
+--- a/dpdk/drivers/common/cnxk/roc_npc.c
++++ b/dpdk/drivers/common/cnxk/roc_npc.c
+@@ -351,6 +351,8 @@ roc_npc_fini(struct roc_npc *roc_npc)
+ 	struct npc *npc = roc_npc_to_npc_priv(roc_npc);
+ 	int rc;
+ 
++	npc_aging_ctrl_thread_destroy(roc_npc);
++
+ 	rc = npc_flow_free_all_resources(npc);
+ 	if (rc) {
+ 		plt_err("Error when deleting NPC MCAM entries, counters");
+@@ -1626,8 +1628,7 @@ roc_npc_flow_destroy(struct roc_npc *roc_npc, struct roc_npc_flow *flow)
+ 	if (flow->has_age_action)
+ 		npc_age_flow_list_entry_delete(roc_npc, flow);
+ 
+-	if (roc_npc->flow_age.age_flow_refcnt == 0 &&
+-		plt_thread_is_valid(roc_npc->flow_age.aged_flows_poll_thread))
++	if (roc_npc->flow_age.age_flow_refcnt == 0)
+ 		npc_aging_ctrl_thread_destroy(roc_npc);
+ 
+ done:
+diff --git a/dpdk/drivers/common/cnxk/roc_npc_aging.c b/dpdk/drivers/common/cnxk/roc_npc_aging.c
+index 254dd2139b..258c15e341 100644
+--- a/dpdk/drivers/common/cnxk/roc_npc_aging.c
++++ b/dpdk/drivers/common/cnxk/roc_npc_aging.c
+@@ -133,6 +133,21 @@ exit:
+ 	return rc;
+ }
+ 
++static void
++npc_age_wait_until(struct roc_npc_flow_age *flow_age)
++{
++#define NPC_AGE_WAIT_TIMEOUT_MS 1000
++#define NPC_AGE_WAIT_TIMEOUT_US (NPC_AGE_WAIT_TIMEOUT_MS * NPC_AGE_WAIT_TIMEOUT_MS)
++	uint64_t timeout = 0;
++	uint64_t sleep = 10 * NPC_AGE_WAIT_TIMEOUT_MS;
++
++	do {
++		plt_delay_us(sleep);
++		timeout += sleep;
++	} while (!flow_age->aged_flows_get_thread_exit &&
++		 (timeout < ((uint64_t)flow_age->aging_poll_freq * NPC_AGE_WAIT_TIMEOUT_US)));
++}
++
+ uint32_t
+ npc_aged_flows_get(void *args)
+ {
+@@ -197,7 +212,7 @@ npc_aged_flows_get(void *args)
+ 		plt_seqcount_write_end(&flow_age->seq_cnt);
+ 
+ lbl_sleep:
+-		sleep(flow_age->aging_poll_freq);
++		npc_age_wait_until(flow_age);
+ 	}
+ 
+ 	return 0;
+@@ -234,8 +249,11 @@ npc_age_flow_list_entry_delete(struct roc_npc *roc_npc,
+ {
+ 	struct npc *npc = roc_npc_to_npc_priv(roc_npc);
+ 	struct npc_age_flow_list_head *list;
++	struct roc_npc_flow_age *flow_age;
+ 	struct npc_age_flow_entry *curr;
+ 
++	flow_age = &roc_npc->flow_age;
++
+ 	list = &npc->age_flow_list;
+ 	curr = TAILQ_FIRST(list);
+ 
+@@ -244,6 +262,7 @@ npc_age_flow_list_entry_delete(struct roc_npc *roc_npc,
+ 
+ 	while (curr) {
+ 		if (flow->mcam_id == curr->flow->mcam_id) {
++			plt_bitmap_clear(flow_age->aged_flows, flow->mcam_id);
+ 			TAILQ_REMOVE(list, curr, next);
+ 			plt_free(curr);
+ 			break;
+@@ -299,9 +318,11 @@ npc_aging_ctrl_thread_destroy(struct roc_npc *roc_npc)
+ 	struct roc_npc_flow_age *flow_age;
+ 
+ 	flow_age = &roc_npc->flow_age;
+-	flow_age->aged_flows_get_thread_exit = true;
+-	plt_thread_join(flow_age->aged_flows_poll_thread, NULL);
+-	npc_aged_flows_bitmap_free(roc_npc);
++	if (plt_thread_is_valid(flow_age->aged_flows_poll_thread)) {
++		flow_age->aged_flows_get_thread_exit = true;
++		plt_thread_join(flow_age->aged_flows_poll_thread, NULL);
++		npc_aged_flows_bitmap_free(roc_npc);
++	}
+ }
+ 
+ void *
+diff --git a/dpdk/drivers/common/cnxk/roc_npc_parse.c b/dpdk/drivers/common/cnxk/roc_npc_parse.c
+index ecd1b3e13b..3c288070fb 100644
+--- a/dpdk/drivers/common/cnxk/roc_npc_parse.c
++++ b/dpdk/drivers/common/cnxk/roc_npc_parse.c
+@@ -1092,6 +1092,7 @@ npc_parse_lf(struct npc_parse_state *pst)
+ {
+ 	const struct roc_npc_item_info *pattern, *last_pattern;
+ 	char hw_mask[NPC_MAX_EXTRACT_HW_LEN];
++	const struct roc_npc_flow_item_eth *eth_item;
+ 	struct npc_parse_item_info info;
+ 	int lid, lt, lflags;
+ 	int nr_vlans = 0;
+@@ -1108,10 +1109,12 @@ npc_parse_lf(struct npc_parse_state *pst)
+ 	lt = NPC_LT_LF_TU_ETHER;
+ 	lflags = 0;
+ 
++	eth_item = pst->pattern->spec;
++
+ 	/* No match support for vlan tags */
+ 	info.def_mask = NULL;
+ 	info.hw_mask = NULL;
+-	info.len = pst->pattern->size;
++	info.len = sizeof(eth_item->hdr);
+ 	info.spec = NULL;
+ 	info.mask = NULL;
+ 	info.hw_hdr_len = 0;
+@@ -1142,12 +1145,15 @@ npc_parse_lf(struct npc_parse_state *pst)
+ 	}
+ 
+ 	info.hw_mask = &hw_mask;
+-	info.len = pst->pattern->size;
++	info.len = sizeof(eth_item->hdr);
+ 	info.hw_hdr_len = 0;
+ 	npc_get_hw_supp_mask(pst, &info, lid, lt);
+ 	info.spec = NULL;
+ 	info.mask = NULL;
+ 
++	if (eth_item && eth_item->has_vlan)
++		pst->set_vlan_ltype_mask = true;
++
+ 	rc = npc_parse_item_basic(pst->pattern, &info);
+ 	if (rc != 0)
+ 		return rc;
+diff --git a/dpdk/drivers/common/cnxk/roc_platform.c b/dpdk/drivers/common/cnxk/roc_platform.c
+index 15cbb6d68f..80d81742a2 100644
+--- a/dpdk/drivers/common/cnxk/roc_platform.c
++++ b/dpdk/drivers/common/cnxk/roc_platform.c
+@@ -85,15 +85,15 @@ roc_plt_init(void)
+ 	return 0;
+ }
+ 
+-RTE_LOG_REGISTER_DEFAULT(cnxk_logtype_base, NOTICE);
+-RTE_LOG_REGISTER_DEFAULT(cnxk_logtype_mbox, NOTICE);
+-RTE_LOG_REGISTER_DEFAULT(cnxk_logtype_cpt, NOTICE);
+-RTE_LOG_REGISTER_DEFAULT(cnxk_logtype_ml, NOTICE);
+-RTE_LOG_REGISTER_DEFAULT(cnxk_logtype_npa, NOTICE);
+-RTE_LOG_REGISTER_DEFAULT(cnxk_logtype_nix, NOTICE);
+-RTE_LOG_REGISTER_DEFAULT(cnxk_logtype_npc, NOTICE);
+-RTE_LOG_REGISTER_DEFAULT(cnxk_logtype_sso, NOTICE);
+-RTE_LOG_REGISTER_DEFAULT(cnxk_logtype_tim, NOTICE);
+-RTE_LOG_REGISTER_DEFAULT(cnxk_logtype_tm, NOTICE);
+-RTE_LOG_REGISTER_DEFAULT(cnxk_logtype_dpi, NOTICE);
+-RTE_LOG_REGISTER_DEFAULT(cnxk_logtype_ree, NOTICE);
++RTE_LOG_REGISTER_SUFFIX(cnxk_logtype_base, base, NOTICE);
++RTE_LOG_REGISTER_SUFFIX(cnxk_logtype_mbox, mbox, NOTICE);
++RTE_LOG_REGISTER_SUFFIX(cnxk_logtype_cpt, crypto, NOTICE);
++RTE_LOG_REGISTER_SUFFIX(cnxk_logtype_ml, ml, NOTICE);
++RTE_LOG_REGISTER_SUFFIX(cnxk_logtype_npa, mempool, NOTICE);
++RTE_LOG_REGISTER_SUFFIX(cnxk_logtype_nix, nix, NOTICE);
++RTE_LOG_REGISTER_SUFFIX(cnxk_logtype_npc, flow, NOTICE);
++RTE_LOG_REGISTER_SUFFIX(cnxk_logtype_sso, event, NOTICE);
++RTE_LOG_REGISTER_SUFFIX(cnxk_logtype_tim, timer, NOTICE);
++RTE_LOG_REGISTER_SUFFIX(cnxk_logtype_tm, tm, NOTICE);
++RTE_LOG_REGISTER_SUFFIX(cnxk_logtype_dpi, dpi, NOTICE);
++RTE_LOG_REGISTER_SUFFIX(cnxk_logtype_ree, ree, NOTICE);
+diff --git a/dpdk/drivers/common/cnxk/version.map b/dpdk/drivers/common/cnxk/version.map
+index aa884a8fe2..e718c13acb 100644
+--- a/dpdk/drivers/common/cnxk/version.map
++++ b/dpdk/drivers/common/cnxk/version.map
+@@ -17,10 +17,6 @@ INTERNAL {
+ 	cnxk_logtype_sso;
+ 	cnxk_logtype_tim;
+ 	cnxk_logtype_tm;
+-	cnxk_onf_ipsec_inb_sa_fill;
+-	cnxk_onf_ipsec_outb_sa_fill;
+-	cnxk_onf_ipsec_inb_sa_valid;
+-	cnxk_onf_ipsec_outb_sa_valid;
+ 	cnxk_ot_ipsec_inb_sa_fill;
+ 	cnxk_ot_ipsec_outb_sa_fill;
+ 	cnxk_ot_ipsec_inb_sa_valid;
+diff --git a/dpdk/drivers/common/dpaax/caamflib/desc/ipsec.h b/dpdk/drivers/common/dpaax/caamflib/desc/ipsec.h
+index 95fc3ea5ba..54fca3bc67 100644
+--- a/dpdk/drivers/common/dpaax/caamflib/desc/ipsec.h
++++ b/dpdk/drivers/common/dpaax/caamflib/desc/ipsec.h
+@@ -731,6 +731,79 @@ static inline void __gen_auth_key(struct program *program,
+ 			     authdata->key, authdata->key_type);
+ }
+ 
++/**
++ * rta_inline_ipsec_query() - Provide indications on which data items can be inlined
++ *                      and which shall be referenced in IPsec shared descriptor.
++ * @sd_base_len: Shared descriptor base length - bytes consumed by the commands,
++ *               excluding the data items to be inlined (or corresponding
++ *               pointer if an item is not inlined). Each cnstr_* function that
++ *               generates descriptors should have a define mentioning
++ *               corresponding length.
++ * @jd_len: Maximum length of the job descriptor(s) that will be used
++ *          together with the shared descriptor.
++ * @data_len: Array of lengths of the data items trying to be inlined
++ * @inl_mask: 32bit mask with bit x = 1 if data item x can be inlined, 0
++ *            otherwise.
++ * @count: Number of data items (size of @data_len array); must be <= 32
++ * @auth_algtype: Authentication algorithm type.
++ * @auth_index: Index value of data_len for authentication key length.
++ *		-1 if authentication key length is not present in data_len.
++ *
++ * Return: 0 if data can be inlined / referenced, negative value if not. If 0,
++ *         check @inl_mask for details.
++ */
++static inline int
++rta_inline_ipsec_query(unsigned int sd_base_len,
++		       unsigned int jd_len,
++		       unsigned int *data_len,
++		       uint32_t *inl_mask,
++		       unsigned int count,
++		       uint32_t auth_algtype,
++		       int32_t auth_index)
++{
++	uint32_t dkp_protid;
++
++	switch (auth_algtype & OP_PCL_IPSEC_AUTH_MASK) {
++	case OP_PCL_IPSEC_HMAC_MD5_96:
++	case OP_PCL_IPSEC_HMAC_MD5_128:
++		dkp_protid = OP_PCLID_DKP_MD5;
++		break;
++	case OP_PCL_IPSEC_HMAC_SHA1_96:
++	case OP_PCL_IPSEC_HMAC_SHA1_160:
++		dkp_protid = OP_PCLID_DKP_SHA1;
++		break;
++	case OP_PCL_IPSEC_HMAC_SHA2_256_128:
++		dkp_protid = OP_PCLID_DKP_SHA256;
++		break;
++	case OP_PCL_IPSEC_HMAC_SHA2_384_192:
++		dkp_protid = OP_PCLID_DKP_SHA384;
++		break;
++	case OP_PCL_IPSEC_HMAC_SHA2_512_256:
++		dkp_protid = OP_PCLID_DKP_SHA512;
++		break;
++	case OP_PCL_IPSEC_HMAC_SHA2_224_96:
++	case OP_PCL_IPSEC_HMAC_SHA2_224_112:
++	case OP_PCL_IPSEC_HMAC_SHA2_224_224:
++		dkp_protid = OP_PCLID_DKP_SHA224;
++		break;
++	default:
++		return rta_inline_query(sd_base_len,
++				       jd_len,
++				       data_len,
++				       inl_mask, count);
++	}
++
++	/* Updating the maximum supported inline key length */
++	if (auth_index != -1) {
++		if (split_key_len(dkp_protid) > data_len[auth_index])
++			data_len[auth_index] = split_key_len(dkp_protid);
++	}
++	return rta_inline_query(sd_base_len,
++			       jd_len,
++			       data_len,
++			       inl_mask, count);
++}
++
+ /**
+  * cnstr_shdsc_ipsec_encap - IPSec ESP encapsulation protocol-level shared
+  *                           descriptor.
+diff --git a/dpdk/drivers/common/dpaax/caamflib/desc/pdcp.h b/dpdk/drivers/common/dpaax/caamflib/desc/pdcp.h
+index 7d16c66d79..0ed9eec816 100644
+--- a/dpdk/drivers/common/dpaax/caamflib/desc/pdcp.h
++++ b/dpdk/drivers/common/dpaax/caamflib/desc/pdcp.h
+@@ -1023,6 +1023,11 @@ pdcp_insert_uplane_aes_aes_op(struct program *p,
+ 		SEQFIFOLOAD(p, MSG1, 0, VLF | LAST1 | FLUSH1);
+ 		MOVEB(p, CONTEXT1, 0, MATH3, 0, 4, WAITCOMP | IMMED);
+ 
++		/* conditional jump with calm added to ensure that the
++		 * previous processing has been completed
++		 */
++		JUMP(p, 1, LOCAL_JUMP, ALL_TRUE, CALM);
++
+ 		LOAD(p, CLRW_RESET_CLS1_CHA |
+ 		     CLRW_CLR_C1KEY |
+ 		     CLRW_CLR_C1CTX |
+@@ -1070,6 +1075,11 @@ pdcp_insert_uplane_aes_aes_op(struct program *p,
+ 
+ 		MOVEB(p, OFIFO, 0, MATH3, 0, 4, IMMED);
+ 
++		/* conditional jump with calm added to ensure that the
++		 * previous processing has been completed
++		 */
++		JUMP(p, 1, LOCAL_JUMP, ALL_TRUE, CALM);
++
+ 		LOAD(p, CLRW_RESET_CLS1_CHA |
+ 		     CLRW_CLR_C1KEY |
+ 		     CLRW_CLR_C1CTX |
+diff --git a/dpdk/drivers/common/dpaax/caamflib/desc/sdap.h b/dpdk/drivers/common/dpaax/caamflib/desc/sdap.h
+index b38c15a24f..d41bacf8f9 100644
+--- a/dpdk/drivers/common/dpaax/caamflib/desc/sdap.h
++++ b/dpdk/drivers/common/dpaax/caamflib/desc/sdap.h
+@@ -1,5 +1,5 @@
+ /* SPDX-License-Identifier: BSD-3-Clause
+- * Copyright 2020-2022 NXP
++ * Copyright 2020-2023 NXP
+  */
+ 
+ #ifndef __DESC_SDAP_H__
+@@ -628,6 +628,10 @@ static inline int pdcp_sdap_insert_no_snoop_op(
+ 		/* Save the ICV generated */
+ 		MOVEB(p, CONTEXT1, 0, MATH3, 0, 4, WAITCOMP | IMMED);
+ 
++		/* conditional jump with calm added to ensure that the
++		 * previous processing has been completed
++		 */
++		JUMP(p, 1, LOCAL_JUMP, ALL_TRUE, CALM);
+ 		/* The CHA will be reused so we need to clear it */
+ 		LOAD(p, CLRW_RESET_CLS1_CHA |
+ 		     CLRW_CLR_C1KEY |
+@@ -718,6 +722,10 @@ static inline int pdcp_sdap_insert_no_snoop_op(
+ 		/* Save the ICV which is stalling in output FIFO to MATH3 */
+ 		MOVEB(p, OFIFO, 0, MATH3, 0, 4, IMMED);
+ 
++		/* conditional jump with calm added to ensure that the
++		 * previous processing has been completed
++		 */
++		JUMP(p, 1, LOCAL_JUMP, ALL_TRUE, CALM);
+ 		/* Reset class 1 CHA */
+ 		LOAD(p, CLRW_RESET_CLS1_CHA |
+ 		     CLRW_CLR_C1KEY |
+diff --git a/dpdk/drivers/common/dpaax/dpaax_iova_table.c b/dpdk/drivers/common/dpaax/dpaax_iova_table.c
+index 9daac4bc03..860e702333 100644
+--- a/dpdk/drivers/common/dpaax/dpaax_iova_table.c
++++ b/dpdk/drivers/common/dpaax/dpaax_iova_table.c
+@@ -1,5 +1,5 @@
+ /* SPDX-License-Identifier: BSD-3-Clause
+- * Copyright 2018 NXP
++ * Copyright 2018-2023 NXP
+  */
+ 
+ #include <rte_memory.h>
+@@ -139,10 +139,12 @@ read_memory_node(unsigned int *count)
+ 	}
+ 
+ 	DPAAX_DEBUG("Device-tree memory node data:");
+-	do {
++
++	while (j > 0) {
++		--j;
+ 		DPAAX_DEBUG("    %08" PRIx64 " %08zu",
+ 			    nodes[j].addr, nodes[j].len);
+-	} while (--j);
++	}
+ 
+ cleanup:
+ 	close(fd);
+@@ -255,10 +257,7 @@ dpaax_iova_table_populate(void)
+ void
+ dpaax_iova_table_depopulate(void)
+ {
+-	if (dpaax_iova_table_p == NULL)
+-		return;
+-
+-	rte_free(dpaax_iova_table_p->entries);
++	rte_free(dpaax_iova_table_p);
+ 	dpaax_iova_table_p = NULL;
+ 
+ 	DPAAX_DEBUG("IOVA Table cleaned");
+diff --git a/dpdk/drivers/common/idpf/base/virtchnl2.h b/dpdk/drivers/common/idpf/base/virtchnl2.h
+index 3900b784d0..21b2039aa2 100644
+--- a/dpdk/drivers/common/idpf/base/virtchnl2.h
++++ b/dpdk/drivers/common/idpf/base/virtchnl2.h
+@@ -1872,7 +1872,7 @@ virtchnl2_vc_validate_vf_msg(__rte_unused struct virtchnl2_version_info *ver, u3
+ 	case VIRTCHNL2_OP_GET_PTP_CAPS:
+ 		valid_len = sizeof(struct virtchnl2_get_ptp_caps);
+ 
+-		if (msglen >= valid_len) {
++		if (msglen > valid_len) {
+ 			struct virtchnl2_get_ptp_caps *ptp_caps =
+ 			(struct virtchnl2_get_ptp_caps *)msg;
+ 
+@@ -1888,7 +1888,7 @@ virtchnl2_vc_validate_vf_msg(__rte_unused struct virtchnl2_version_info *ver, u3
+ 	case VIRTCHNL2_OP_GET_PTP_TX_TSTAMP_LATCHES:
+ 		valid_len = sizeof(struct virtchnl2_ptp_tx_tstamp_latches);
+ 
+-		if (msglen >= valid_len) {
++		if (msglen > valid_len) {
+ 			struct virtchnl2_ptp_tx_tstamp_latches *tx_tstamp_latches =
+ 			(struct virtchnl2_ptp_tx_tstamp_latches *)msg;
+ 
+diff --git a/dpdk/drivers/common/idpf/base/virtchnl2_lan_desc.h b/dpdk/drivers/common/idpf/base/virtchnl2_lan_desc.h
+index e6e782a219..a5e3f05014 100644
+--- a/dpdk/drivers/common/idpf/base/virtchnl2_lan_desc.h
++++ b/dpdk/drivers/common/idpf/base/virtchnl2_lan_desc.h
+@@ -110,7 +110,7 @@
+ 	BIT_ULL(VIRTCHNL2_RX_FLEX_DESC_ADV_SPH_S)
+ #define VIRTCHNL2_RX_FLEX_DESC_ADV_FF1_S		12
+ #define VIRTCHNL2_RX_FLEX_DESC_ADV_FF1_M			\
+-	IDPF_M(0x7UL, VIRTCHNL2_RX_FLEX_DESC_ADV_FF1_M)
++	IDPF_M(0x7UL, VIRTCHNL2_RX_FLEX_DESC_ADV_FF1_S)
+ #define VIRTCHNL2_RX_FLEX_DESC_ADV_MISS_S		15
+ #define VIRTCHNL2_RX_FLEX_DESC_ADV_MISS_M		\
+ 	BIT_ULL(VIRTCHNL2_RX_FLEX_DESC_ADV_MISS_S)
+diff --git a/dpdk/drivers/common/mlx5/mlx5_common_mr.c b/dpdk/drivers/common/mlx5/mlx5_common_mr.c
+index 40ff9153bd..85ec10d2ee 100644
+--- a/dpdk/drivers/common/mlx5/mlx5_common_mr.c
++++ b/dpdk/drivers/common/mlx5/mlx5_common_mr.c
+@@ -1381,7 +1381,7 @@ mlx5_mempool_get_chunks(struct rte_mempool *mp, struct mlx5_range **out,
+ 
+ 	DRV_LOG(DEBUG, "Collecting chunks of regular mempool %s", mp->name);
+ 	n = mp->nb_mem_chunks;
+-	*out = calloc(sizeof(**out), n);
++	*out = calloc(n, sizeof(**out));
+ 	if (*out == NULL)
+ 		return -1;
+ 	rte_mempool_mem_iter(mp, mlx5_range_from_mempool_chunk, *out);
+diff --git a/dpdk/drivers/common/mlx5/mlx5_devx_cmds.c b/dpdk/drivers/common/mlx5/mlx5_devx_cmds.c
+index 4d8818924a..630ab96a8f 100644
+--- a/dpdk/drivers/common/mlx5/mlx5_devx_cmds.c
++++ b/dpdk/drivers/common/mlx5/mlx5_devx_cmds.c
+@@ -965,19 +965,9 @@ mlx5_devx_cmd_query_hca_attr(void *ctx,
+ 			max_geneve_tlv_options);
+ 	attr->max_geneve_tlv_option_data_len = MLX5_GET(cmd_hca_cap, hcattr,
+ 			max_geneve_tlv_option_data_len);
++	attr->query_match_sample_info = MLX5_GET(cmd_hca_cap, hcattr,
++						 query_match_sample_info);
+ 	attr->qos.sup = MLX5_GET(cmd_hca_cap, hcattr, qos);
+-	attr->qos.flow_meter_aso_sup = !!(MLX5_GET64(cmd_hca_cap, hcattr,
+-					 general_obj_types) &
+-			      MLX5_GENERAL_OBJ_TYPES_CAP_FLOW_METER_ASO);
+-	attr->vdpa.valid = !!(MLX5_GET64(cmd_hca_cap, hcattr,
+-					 general_obj_types) &
+-			      MLX5_GENERAL_OBJ_TYPES_CAP_VIRTQ_NET_Q);
+-	attr->vdpa.queue_counters_valid = !!(MLX5_GET64(cmd_hca_cap, hcattr,
+-							general_obj_types) &
+-				  MLX5_GENERAL_OBJ_TYPES_CAP_VIRTIO_Q_COUNTERS);
+-	attr->parse_graph_flex_node = !!(MLX5_GET64(cmd_hca_cap, hcattr,
+-					 general_obj_types) &
+-			      MLX5_GENERAL_OBJ_TYPES_CAP_PARSE_GRAPH_FLEX_NODE);
+ 	attr->wqe_index_ignore = MLX5_GET(cmd_hca_cap, hcattr,
+ 					  wqe_index_ignore_cap);
+ 	attr->cross_channel = MLX5_GET(cmd_hca_cap, hcattr, cd);
+@@ -1001,6 +991,9 @@ mlx5_devx_cmd_query_hca_attr(void *ctx,
+ 	/* Read the general_obj_types bitmap and extract the relevant bits. */
+ 	general_obj_types_supported = MLX5_GET64(cmd_hca_cap, hcattr,
+ 						 general_obj_types);
++	attr->qos.flow_meter_aso_sup =
++			!!(general_obj_types_supported &
++			   MLX5_GENERAL_OBJ_TYPES_CAP_FLOW_METER_ASO);
+ 	attr->vdpa.valid = !!(general_obj_types_supported &
+ 			      MLX5_GENERAL_OBJ_TYPES_CAP_VIRTQ_NET_Q);
+ 	attr->vdpa.queue_counters_valid =
+@@ -1074,8 +1067,7 @@ mlx5_devx_cmd_query_hca_attr(void *ctx,
+ 		MLX5_GET(cmd_hca_cap, hcattr, umr_modify_entity_size_disabled);
+ 	attr->wait_on_time = MLX5_GET(cmd_hca_cap, hcattr, wait_on_time);
+ 	attr->crypto = MLX5_GET(cmd_hca_cap, hcattr, crypto);
+-	attr->ct_offload = !!(MLX5_GET64(cmd_hca_cap, hcattr,
+-					 general_obj_types) &
++	attr->ct_offload = !!(general_obj_types_supported &
+ 			      MLX5_GENERAL_OBJ_TYPES_CAP_CONN_TRACK_OFFLOAD);
+ 	attr->rq_delay_drop = MLX5_GET(cmd_hca_cap, hcattr, rq_delay_drop);
+ 	attr->nic_flow_table = MLX5_GET(cmd_hca_cap, hcattr, nic_flow_table);
+@@ -1104,8 +1096,8 @@ mlx5_devx_cmd_query_hca_attr(void *ctx,
+ 			(ctx, &attr->flex);
+ 		if (rc)
+ 			return -1;
+-		attr->flex.query_match_sample_info = MLX5_GET(cmd_hca_cap, hcattr,
+-							      query_match_sample_info);
++		attr->flex.query_match_sample_info =
++						attr->query_match_sample_info;
+ 	}
+ 	if (attr->crypto) {
+ 		attr->aes_xts = MLX5_GET(cmd_hca_cap, hcattr, aes_xts) ||
+@@ -1814,7 +1806,7 @@ mlx5_devx_cmd_create_rqt(void *ctx,
+ 	uint32_t out[MLX5_ST_SZ_DW(create_rqt_out)] = {0};
+ 	void *rqt_ctx;
+ 	struct mlx5_devx_obj *rqt = NULL;
+-	int i;
++	unsigned int i;
+ 
+ 	in = mlx5_malloc(MLX5_MEM_ZERO, inlen, 0, SOCKET_ID_ANY);
+ 	if (!in) {
+@@ -1867,7 +1859,7 @@ mlx5_devx_cmd_modify_rqt(struct mlx5_devx_obj *rqt,
+ 	uint32_t out[MLX5_ST_SZ_DW(modify_rqt_out)] = {0};
+ 	uint32_t *in = mlx5_malloc(MLX5_MEM_ZERO, inlen, 0, SOCKET_ID_ANY);
+ 	void *rqt_ctx;
+-	int i;
++	unsigned int i;
+ 	int ret;
+ 
+ 	if (!in) {
+@@ -1880,7 +1872,6 @@ mlx5_devx_cmd_modify_rqt(struct mlx5_devx_obj *rqt,
+ 	MLX5_SET64(modify_rqt_in, in, modify_bitmask, 0x1);
+ 	rqt_ctx = MLX5_ADDR_OF(modify_rqt_in, in, rqt_context);
+ 	MLX5_SET(rqtc, rqt_ctx, list_q_type, rqt_attr->rq_type);
+-	MLX5_SET(rqtc, rqt_ctx, rqt_max_size, rqt_attr->rqt_max_size);
+ 	MLX5_SET(rqtc, rqt_ctx, rqt_actual_size, rqt_attr->rqt_actual_size);
+ 	for (i = 0; i < rqt_attr->rqt_actual_size; i++)
+ 		MLX5_SET(rqtc, rqt_ctx, rq_num[i], rqt_attr->rq_list[i]);
+diff --git a/dpdk/drivers/common/mlx5/mlx5_devx_cmds.h b/dpdk/drivers/common/mlx5/mlx5_devx_cmds.h
+index 7f23e925a5..b814c8becc 100644
+--- a/dpdk/drivers/common/mlx5/mlx5_devx_cmds.h
++++ b/dpdk/drivers/common/mlx5/mlx5_devx_cmds.h
+@@ -315,6 +315,7 @@ struct mlx5_hca_attr {
+ 	uint32_t flow_counter_bulk_log_granularity:5;
+ 	uint32_t alloc_flow_counter_pd:1;
+ 	uint32_t flow_counter_access_aso:1;
++	uint32_t query_match_sample_info:1;
+ 	uint32_t flow_access_aso_opc_mod:8;
+ 	uint32_t cross_vhca:1;
+ 	uint32_t lag_rx_port_affinity:1;
+diff --git a/dpdk/drivers/common/mlx5/mlx5_prm.h b/dpdk/drivers/common/mlx5/mlx5_prm.h
+index 9e22dce6da..3cbb1179c0 100644
+--- a/dpdk/drivers/common/mlx5/mlx5_prm.h
++++ b/dpdk/drivers/common/mlx5/mlx5_prm.h
+@@ -2334,8 +2334,8 @@ struct mlx5_ifc_cmd_hca_cap_2_bits {
+ 	u8 reserved_at_d0[0x3];
+ 	u8 log_conn_track_max_alloc[0x5];
+ 	u8 reserved_at_d8[0x3];
+-	u8 log_max_conn_track_offload[0x5];
+-	u8 cross_vhca_object_to_object_supported[0x20]; /* End of DW7. */
++	u8 log_max_conn_track_offload[0x5]; /* End of DW7. */
++	u8 cross_vhca_object_to_object_supported[0x20];
+ 	u8 allowed_object_for_other_vhca_access_high[0x20];
+ 	u8 allowed_object_for_other_vhca_access[0x20];
+ 	u8 reserved_at_140[0x20];
+@@ -3606,7 +3606,7 @@ struct mlx5_ifc_stc_ste_param_vport_bits {
+ 	u8 eswitch_owner_vhca_id[0x10];
+ 	u8 vport_number[0x10];
+ 	u8 eswitch_owner_vhca_id_valid[0x1];
+-	u8 reserved_at_21[0x59];
++	u8 reserved_at_21[0x5f];
+ };
+ 
+ union mlx5_ifc_stc_param_bits {
+diff --git a/dpdk/drivers/common/qat/meson.build b/dpdk/drivers/common/qat/meson.build
+index 5c36fbb270..62abcb6fe3 100644
+--- a/dpdk/drivers/common/qat/meson.build
++++ b/dpdk/drivers/common/qat/meson.build
+@@ -17,13 +17,13 @@ qat_compress_relpath = '../../' + qat_compress_path
+ if disable_drivers.contains(qat_crypto_path)
+     qat_crypto = false
+     dpdk_drvs_disabled += qat_crypto_path
+-    set_variable(qat_crypto_path.underscorify() + '_disable_reason',
++    set_variable('drv_' + qat_crypto_path.underscorify() + '_disable_reason',
+             'Explicitly disabled via build config')
+ endif
+ if disable_drivers.contains(qat_compress_path)
+     qat_compress = false
+     dpdk_drvs_disabled += qat_compress_path
+-    set_variable(qat_compress_path.underscorify() + '_disable_reason',
++    set_variable('drv_' + qat_compress_path.underscorify() + '_disable_reason',
+             'Explicitly disabled via build config')
+ endif
+ 
+@@ -36,7 +36,7 @@ if arch_subdir == 'arm'
+     else
+         qat_crypto = false
+         dpdk_drvs_disabled += qat_crypto_path
+-        set_variable(qat_crypto_path.underscorify() + '_disable_reason',
++        set_variable('drv_' + qat_crypto_path.underscorify() + '_disable_reason',
+         'missing dependency for Arm, libcrypto')
+     endif
+ else
+@@ -57,7 +57,7 @@ else
+         else
+             qat_crypto = false
+             dpdk_drvs_disabled += qat_crypto_path
+-            set_variable(qat_crypto_path.underscorify() + '_disable_reason',
++            set_variable('drv_' + qat_crypto_path.underscorify() + '_disable_reason',
+                 'missing dependency, libipsecmb or libcrypto')
+         endif
+     elif libcrypto.found()
+@@ -66,7 +66,7 @@ else
+     else
+         qat_crypto = false
+         dpdk_drvs_disabled += qat_crypto_path
+-        set_variable(qat_crypto_path.underscorify() + '_disable_reason',
++        set_variable('drv_' + qat_crypto_path.underscorify() + '_disable_reason',
+             'missing dependency, libipsecmb or libcrypto')
+     endif
+ endif
+diff --git a/dpdk/drivers/common/qat/qat_device.c b/dpdk/drivers/common/qat/qat_device.c
+index f55dc3c6f0..eceb5c89c4 100644
+--- a/dpdk/drivers/common/qat/qat_device.c
++++ b/dpdk/drivers/common/qat/qat_device.c
+@@ -29,6 +29,7 @@ struct qat_dev_hw_spec_funcs *qat_dev_hw_spec[QAT_N_GENS];
+ /* per-process array of device data */
+ struct qat_device_info qat_pci_devs[RTE_PMD_QAT_MAX_PCI_DEVICES];
+ static int qat_nb_pci_devices;
++int qat_legacy_capa;
+ 
+ /*
+  * The set of PCI devices this driver supports
+diff --git a/dpdk/drivers/common/sfc_efx/base/efx.h b/dpdk/drivers/common/sfc_efx/base/efx.h
+index 3312c2fa8f..5773cb00b3 100644
+--- a/dpdk/drivers/common/sfc_efx/base/efx.h
++++ b/dpdk/drivers/common/sfc_efx/base/efx.h
+@@ -7,6 +7,8 @@
+ #ifndef	_SYS_EFX_H
+ #define	_SYS_EFX_H
+ 
++#include <assert.h>
++
+ #include "efx_annote.h"
+ #include "efsys.h"
+ #include "efx_types.h"
+@@ -17,14 +19,20 @@
+ extern "C" {
+ #endif
+ 
+-#define	EFX_STATIC_ASSERT(_cond)		\
+-	((void)sizeof (char[(_cond) ? 1 : -1]))
++/*
++ * Triggers an error at compilation time if the condition is false.
++ *
++ * The  { } exists to workaround a bug in clang (#55821)
++ * where it would not handle _Static_assert in a switch case.
++ */
++#define	EFX_STATIC_ASSERT(_cond) \
++	{ static_assert((_cond), #_cond); }
+ 
+ #define	EFX_ARRAY_SIZE(_array)			\
+ 	(sizeof (_array) / sizeof ((_array)[0]))
+ 
+ #define	EFX_FIELD_OFFSET(_type, _field)		\
+-	((size_t)&(((_type *)0)->_field))
++	offsetof(_type, _field)
+ 
+ /* The macro expands divider twice */
+ #define	EFX_DIV_ROUND_UP(_n, _d)		(((_n) + (_d) - 1) / (_d))
+diff --git a/dpdk/drivers/crypto/cnxk/cn10k_cryptodev_ops.c b/dpdk/drivers/crypto/cnxk/cn10k_cryptodev_ops.c
+index 997110e3d3..c96cf2b3a1 100644
+--- a/dpdk/drivers/crypto/cnxk/cn10k_cryptodev_ops.c
++++ b/dpdk/drivers/crypto/cnxk/cn10k_cryptodev_ops.c
+@@ -861,15 +861,20 @@ cn10k_cpt_dequeue_post_process(struct cnxk_cpt_qp *qp,
+ 
+ 		return;
+ 	} else if (cop->type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC &&
+-			   cop->sess_type == RTE_CRYPTO_OP_WITH_SESSION &&
+-			   cop->asym->ecdh.ke_type == RTE_CRYPTO_ASYM_KE_PUB_KEY_VERIFY) {
+-		if (likely(compcode == CPT_COMP_GOOD)) {
+-			if (uc_compcode == ROC_AE_ERR_ECC_POINT_NOT_ON_CURVE) {
+-				cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
+-				return;
+-			} else if (uc_compcode == ROC_AE_ERR_ECC_PAI) {
+-				cop->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+-				return;
++		   cop->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
++		struct cnxk_ae_sess *sess;
++
++		sess = (struct cnxk_ae_sess *)cop->asym->session;
++		if (sess->xfrm_type == RTE_CRYPTO_ASYM_XFORM_ECDH &&
++		    cop->asym->ecdh.ke_type == RTE_CRYPTO_ASYM_KE_PUB_KEY_VERIFY) {
++			if (likely(compcode == CPT_COMP_GOOD)) {
++				if (uc_compcode == ROC_AE_ERR_ECC_POINT_NOT_ON_CURVE) {
++					cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
++					return;
++				} else if (uc_compcode == ROC_AE_ERR_ECC_PAI) {
++					cop->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
++					return;
++				}
+ 			}
+ 		}
+ 	}
+diff --git a/dpdk/drivers/crypto/cnxk/cn9k_cryptodev_ops.c b/dpdk/drivers/crypto/cnxk/cn9k_cryptodev_ops.c
+index 34d40b07d4..eb5575b7ec 100644
+--- a/dpdk/drivers/crypto/cnxk/cn9k_cryptodev_ops.c
++++ b/dpdk/drivers/crypto/cnxk/cn9k_cryptodev_ops.c
+@@ -578,7 +578,22 @@ cn9k_cpt_dequeue_post_process(struct cnxk_cpt_qp *qp, struct rte_crypto_op *cop,
+ 		if (unlikely(res->uc_compcode)) {
+ 			if (res->uc_compcode == ROC_SE_ERR_GC_ICV_MISCOMPARE)
+ 				cop->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+-			else
++			else if (cop->type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC &&
++				 cop->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
++				struct cnxk_ae_sess *sess;
++
++				sess = (struct cnxk_ae_sess *)cop->asym->session;
++				if (sess->xfrm_type == RTE_CRYPTO_ASYM_XFORM_ECDH &&
++				    cop->asym->ecdh.ke_type == RTE_CRYPTO_ASYM_KE_PUB_KEY_VERIFY) {
++					if (res->uc_compcode == ROC_AE_ERR_ECC_POINT_NOT_ON_CURVE) {
++						cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
++						return;
++					} else if (res->uc_compcode == ROC_AE_ERR_ECC_PAI) {
++						cop->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
++						return;
++					}
++				}
++			} else
+ 				cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ 
+ 			plt_dp_info("Request failed with microcode error");
+diff --git a/dpdk/drivers/crypto/cnxk/cnxk_ae.h b/dpdk/drivers/crypto/cnxk/cnxk_ae.h
+index ea11e093bf..ef9cb5eb91 100644
+--- a/dpdk/drivers/crypto/cnxk/cnxk_ae.h
++++ b/dpdk/drivers/crypto/cnxk/cnxk_ae.h
+@@ -49,13 +49,22 @@ struct cnxk_ae_sess {
+ };
+ 
+ static __rte_always_inline void
+-cnxk_ae_modex_param_normalize(uint8_t **data, size_t *len)
++cnxk_ae_modex_param_normalize(uint8_t **data, size_t *len, size_t max)
+ {
++	uint8_t msw_len = *len % 8;
++	uint64_t msw_val = 0;
+ 	size_t i;
+ 
+-	/* Strip leading NUL bytes */
+-	for (i = 0; i < *len; i++) {
+-		if ((*data)[i] != 0)
++	if (*len <= 8)
++		return;
++
++	memcpy(&msw_val, *data, msw_len);
++	if (msw_val != 0)
++		return;
++
++	for (i = msw_len; i < *len && (*len - i) < max; i += 8) {
++		memcpy(&msw_val, &(*data)[i], 8);
++		if (msw_val != 0)
+ 			break;
+ 	}
+ 	*data += i;
+@@ -72,8 +81,8 @@ cnxk_ae_fill_modex_params(struct cnxk_ae_sess *sess,
+ 	uint8_t *exp = xform->modex.exponent.data;
+ 	uint8_t *mod = xform->modex.modulus.data;
+ 
+-	cnxk_ae_modex_param_normalize(&mod, &mod_len);
+-	cnxk_ae_modex_param_normalize(&exp, &exp_len);
++	cnxk_ae_modex_param_normalize(&mod, &mod_len, SIZE_MAX);
++	cnxk_ae_modex_param_normalize(&exp, &exp_len, mod_len);
+ 
+ 	if (unlikely(exp_len == 0 || mod_len == 0))
+ 		return -EINVAL;
+@@ -205,16 +214,22 @@ cnxk_ae_fill_ec_params(struct cnxk_ae_sess *sess,
+ 		return 0;
+ 
+ 	ec->pkey.length = xform->ec.pkey.length;
+-	if (xform->ec.pkey.length)
+-		rte_memcpy(ec->pkey.data, xform->ec.pkey.data, xform->ec.pkey.length);
++	if (ec->pkey.length > ROC_AE_EC_DATA_MAX)
++		ec->pkey.length = ROC_AE_EC_DATA_MAX;
++	if (ec->pkey.length)
++		rte_memcpy(ec->pkey.data, xform->ec.pkey.data, ec->pkey.length);
+ 
+ 	ec->q.x.length = xform->ec.q.x.length;
+-	if (xform->ec.q.x.length)
+-		rte_memcpy(ec->q.x.data, xform->ec.q.x.data, xform->ec.q.x.length);
++	if (ec->q.x.length > ROC_AE_EC_DATA_MAX)
++		ec->q.x.length = ROC_AE_EC_DATA_MAX;
++	if (ec->q.x.length)
++		rte_memcpy(ec->q.x.data, xform->ec.q.x.data, ec->q.x.length);
+ 
+ 	ec->q.y.length = xform->ec.q.y.length;
++	if (ec->q.y.length > ROC_AE_EC_DATA_MAX)
++		ec->q.y.length = ROC_AE_EC_DATA_MAX;
+ 	if (xform->ec.q.y.length)
+-		rte_memcpy(ec->q.y.data, xform->ec.q.y.data, xform->ec.q.y.length);
++		rte_memcpy(ec->q.y.data, xform->ec.q.y.data, ec->q.y.length);
+ 
+ 	return 0;
+ }
+@@ -282,7 +297,7 @@ cnxk_ae_modex_prep(struct rte_crypto_op *op, struct roc_ae_buf_ptr *meta_buf,
+ 	struct rte_crypto_mod_op_param mod_op;
+ 	uint64_t total_key_len;
+ 	union cpt_inst_w4 w4;
+-	uint32_t base_len;
++	size_t base_len;
+ 	uint32_t dlen;
+ 	uint8_t *dptr;
+ 
+@@ -290,8 +305,11 @@ cnxk_ae_modex_prep(struct rte_crypto_op *op, struct roc_ae_buf_ptr *meta_buf,
+ 
+ 	base_len = mod_op.base.length;
+ 	if (unlikely(base_len > mod_len)) {
+-		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+-		return -ENOTSUP;
++		cnxk_ae_modex_param_normalize(&mod_op.base.data, &base_len, mod_len);
++		if (base_len > mod_len) {
++			op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
++			return -ENOTSUP;
++		}
+ 	}
+ 
+ 	total_key_len = mod_len + exp_len;
+@@ -735,7 +753,11 @@ cnxk_ae_sm2_sign_prep(struct rte_crypto_sm2_op_param *sm2,
+ 	uint8_t *dptr;
+ 
+ 	prime_len = ec_grp->prime.length;
++	if (prime_len > ROC_AE_EC_DATA_MAX)
++		prime_len = ROC_AE_EC_DATA_MAX;
+ 	order_len = ec_grp->order.length;
++	if (order_len > ROC_AE_EC_DATA_MAX)
++		order_len = ROC_AE_EC_DATA_MAX;
+ 
+ 	/* Truncate input length to curve prime length */
+ 	if (message_len > prime_len)
+@@ -822,7 +844,11 @@ cnxk_ae_sm2_verify_prep(struct rte_crypto_sm2_op_param *sm2,
+ 	uint8_t *dptr;
+ 
+ 	prime_len = ec_grp->prime.length;
++	if (prime_len > ROC_AE_EC_DATA_MAX)
++		prime_len = ROC_AE_EC_DATA_MAX;
+ 	order_len = ec_grp->order.length;
++	if (order_len > ROC_AE_EC_DATA_MAX)
++		order_len = ROC_AE_EC_DATA_MAX;
+ 
+ 	/* Truncate input length to curve prime length */
+ 	if (message_len > prime_len)
+diff --git a/dpdk/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c b/dpdk/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
+index bb5a2c629e..6ae356ace0 100644
+--- a/dpdk/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
++++ b/dpdk/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
+@@ -4124,7 +4124,7 @@ dpaa2_sec_eventq_attach(const struct rte_cryptodev *dev,
+ 	cfg.dest_cfg.priority = priority;
+ 
+ 	cfg.options |= DPSECI_QUEUE_OPT_USER_CTX;
+-	cfg.user_ctx = (size_t)(qp);
++	cfg.user_ctx = (size_t)(&qp->rx_vq);
+ 	if (event->sched_type == RTE_SCHED_TYPE_ATOMIC) {
+ 		cfg.options |= DPSECI_QUEUE_OPT_ORDER_PRESERVATION;
+ 		cfg.order_preservation_en = 1;
+diff --git a/dpdk/drivers/crypto/dpaa_sec/dpaa_sec.c b/dpdk/drivers/crypto/dpaa_sec/dpaa_sec.c
+index a301e8edb2..906ea39047 100644
+--- a/dpdk/drivers/crypto/dpaa_sec/dpaa_sec.c
++++ b/dpdk/drivers/crypto/dpaa_sec/dpaa_sec.c
+@@ -395,10 +395,10 @@ dpaa_sec_prep_ipsec_cdb(dpaa_sec_session *ses)
+ 
+ 	cdb->sh_desc[0] = cipherdata.keylen;
+ 	cdb->sh_desc[1] = authdata.keylen;
+-	err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
++	err = rta_inline_ipsec_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
+ 			       DESC_JOB_IO_LEN,
+ 			       (unsigned int *)cdb->sh_desc,
+-			       &cdb->sh_desc[2], 2);
++			       &cdb->sh_desc[2], 2, authdata.algtype, 1);
+ 
+ 	if (err < 0) {
+ 		DPAA_SEC_ERR("Crypto: Incorrect key lengths");
+diff --git a/dpdk/drivers/crypto/ipsec_mb/ipsec_mb_ops.c b/dpdk/drivers/crypto/ipsec_mb/ipsec_mb_ops.c
+index 30f919cd40..2a5599b7d8 100644
+--- a/dpdk/drivers/crypto/ipsec_mb/ipsec_mb_ops.c
++++ b/dpdk/drivers/crypto/ipsec_mb/ipsec_mb_ops.c
+@@ -406,7 +406,7 @@ ipsec_mb_ipc_request(const struct rte_mp_msg *mp_msg, const void *peer)
+ 		resp_param->result = ipsec_mb_qp_release(dev, qp_id);
+ 		break;
+ 	default:
+-		CDEV_LOG_ERR("invalid mp request type\n");
++		CDEV_LOG_ERR("invalid mp request type");
+ 	}
+ 
+ out:
+diff --git a/dpdk/drivers/crypto/ipsec_mb/pmd_aesni_mb.c b/dpdk/drivers/crypto/ipsec_mb/pmd_aesni_mb.c
+index 4de4866cf3..80de25c65b 100644
+--- a/dpdk/drivers/crypto/ipsec_mb/pmd_aesni_mb.c
++++ b/dpdk/drivers/crypto/ipsec_mb/pmd_aesni_mb.c
+@@ -1500,7 +1500,7 @@ aesni_mb_digest_appended_in_src(struct rte_crypto_op *op, IMB_JOB *job,
+  *
+  * @return
+  * - 0 on success, the IMB_JOB will be filled
+- * - -1 if invalid session or errors allocationg SGL linear buffer,
++ * - -1 if invalid session or errors allocating SGL linear buffer,
+  *   IMB_JOB will not be filled
+  */
+ static inline int
+diff --git a/dpdk/drivers/crypto/openssl/compat.h b/dpdk/drivers/crypto/openssl/compat.h
+index 9f9167c4f1..e1814fea8c 100644
+--- a/dpdk/drivers/crypto/openssl/compat.h
++++ b/dpdk/drivers/crypto/openssl/compat.h
+@@ -5,6 +5,32 @@
+ #ifndef __RTA_COMPAT_H__
+ #define __RTA_COMPAT_H__
+ 
++#if OPENSSL_VERSION_NUMBER >= 0x30000000L
++static __rte_always_inline void
++free_hmac_ctx(EVP_MAC_CTX *ctx)
++{
++	EVP_MAC_CTX_free(ctx);
++}
++
++static __rte_always_inline void
++free_cmac_ctx(EVP_MAC_CTX *ctx)
++{
++	EVP_MAC_CTX_free(ctx);
++}
++#else
++static __rte_always_inline void
++free_hmac_ctx(HMAC_CTX *ctx)
++{
++	HMAC_CTX_free(ctx);
++}
++
++static __rte_always_inline void
++free_cmac_ctx(CMAC_CTX *ctx)
++{
++	CMAC_CTX_free(ctx);
++}
++#endif
++
+ #if (OPENSSL_VERSION_NUMBER < 0x10100000L)
+ 
+ static __rte_always_inline int
+diff --git a/dpdk/drivers/crypto/openssl/openssl_pmd_private.h b/dpdk/drivers/crypto/openssl/openssl_pmd_private.h
+index 334912d335..aa3f466e74 100644
+--- a/dpdk/drivers/crypto/openssl/openssl_pmd_private.h
++++ b/dpdk/drivers/crypto/openssl/openssl_pmd_private.h
+@@ -80,6 +80,20 @@ struct openssl_qp {
+ 	 */
+ } __rte_cache_aligned;
+ 
++struct evp_ctx_pair {
++	EVP_CIPHER_CTX *cipher;
++	union {
++		EVP_MD_CTX *auth;
++#if OPENSSL_VERSION_NUMBER >= 0x30000000L
++		EVP_MAC_CTX *hmac;
++		EVP_MAC_CTX *cmac;
++#else
++		HMAC_CTX *hmac;
++		CMAC_CTX *cmac;
++#endif
++	};
++};
++
+ /** OPENSSL crypto private session structure */
+ struct openssl_session {
+ 	enum openssl_chain_order chain_order;
+@@ -166,6 +180,15 @@ struct openssl_session {
+ 		/**< digest length */
+ 	} auth;
+ 
++	uint16_t ctx_copies_len;
++	/* < number of entries in ctx_copies */
++	struct evp_ctx_pair qp_ctx[];
++	/**< Flexible array member of per-queue-pair structures, each containing
++	 * pointers to copies of the cipher and auth EVP contexts. Cipher
++	 * contexts are not safe to use from multiple cores simultaneously, so
++	 * maintaining these copies allows avoiding per-buffer copying into a
++	 * temporary context.
++	 */
+ } __rte_cache_aligned;
+ 
+ /** OPENSSL crypto private asymmetric session structure */
+@@ -217,7 +240,8 @@ struct openssl_asym_session {
+ /** Set and validate OPENSSL crypto session parameters */
+ extern int
+ openssl_set_session_parameters(struct openssl_session *sess,
+-		const struct rte_crypto_sym_xform *xform);
++		const struct rte_crypto_sym_xform *xform,
++		uint16_t nb_queue_pairs);
+ 
+ /** Reset OPENSSL crypto session parameters */
+ extern void
+diff --git a/dpdk/drivers/crypto/openssl/rte_openssl_pmd.c b/dpdk/drivers/crypto/openssl/rte_openssl_pmd.c
+index e8cb09defc..101111e85b 100644
+--- a/dpdk/drivers/crypto/openssl/rte_openssl_pmd.c
++++ b/dpdk/drivers/crypto/openssl/rte_openssl_pmd.c
+@@ -350,7 +350,8 @@ get_aead_algo(enum rte_crypto_aead_algorithm sess_algo, size_t keylen,
+ static int
+ openssl_set_sess_aead_enc_param(struct openssl_session *sess,
+ 		enum rte_crypto_aead_algorithm algo,
+-		uint8_t tag_len, const uint8_t *key)
++		uint8_t tag_len, const uint8_t *key,
++		EVP_CIPHER_CTX **ctx)
+ {
+ 	int iv_type = 0;
+ 	unsigned int do_ccm;
+@@ -378,7 +379,7 @@ openssl_set_sess_aead_enc_param(struct openssl_session *sess,
+ 	}
+ 
+ 	sess->cipher.mode = OPENSSL_CIPHER_LIB;
+-	sess->cipher.ctx = EVP_CIPHER_CTX_new();
++	*ctx = EVP_CIPHER_CTX_new();
+ 
+ 	if (get_aead_algo(algo, sess->cipher.key.length,
+ 			&sess->cipher.evp_algo) != 0)
+@@ -388,19 +389,19 @@ openssl_set_sess_aead_enc_param(struct openssl_session *sess,
+ 
+ 	sess->chain_order = OPENSSL_CHAIN_COMBINED;
+ 
+-	if (EVP_EncryptInit_ex(sess->cipher.ctx, sess->cipher.evp_algo,
++	if (EVP_EncryptInit_ex(*ctx, sess->cipher.evp_algo,
+ 			NULL, NULL, NULL) <= 0)
+ 		return -EINVAL;
+ 
+-	if (EVP_CIPHER_CTX_ctrl(sess->cipher.ctx, iv_type, sess->iv.length,
++	if (EVP_CIPHER_CTX_ctrl(*ctx, iv_type, sess->iv.length,
+ 			NULL) <= 0)
+ 		return -EINVAL;
+ 
+ 	if (do_ccm)
+-		EVP_CIPHER_CTX_ctrl(sess->cipher.ctx, EVP_CTRL_CCM_SET_TAG,
++		EVP_CIPHER_CTX_ctrl(*ctx, EVP_CTRL_CCM_SET_TAG,
+ 				tag_len, NULL);
+ 
+-	if (EVP_EncryptInit_ex(sess->cipher.ctx, NULL, NULL, key, NULL) <= 0)
++	if (EVP_EncryptInit_ex(*ctx, NULL, NULL, key, NULL) <= 0)
+ 		return -EINVAL;
+ 
+ 	return 0;
+@@ -410,7 +411,8 @@ openssl_set_sess_aead_enc_param(struct openssl_session *sess,
+ static int
+ openssl_set_sess_aead_dec_param(struct openssl_session *sess,
+ 		enum rte_crypto_aead_algorithm algo,
+-		uint8_t tag_len, const uint8_t *key)
++		uint8_t tag_len, const uint8_t *key,
++		EVP_CIPHER_CTX **ctx)
+ {
+ 	int iv_type = 0;
+ 	unsigned int do_ccm = 0;
+@@ -437,7 +439,7 @@ openssl_set_sess_aead_dec_param(struct openssl_session *sess,
+ 	}
+ 
+ 	sess->cipher.mode = OPENSSL_CIPHER_LIB;
+-	sess->cipher.ctx = EVP_CIPHER_CTX_new();
++	*ctx = EVP_CIPHER_CTX_new();
+ 
+ 	if (get_aead_algo(algo, sess->cipher.key.length,
+ 			&sess->cipher.evp_algo) != 0)
+@@ -447,24 +449,46 @@ openssl_set_sess_aead_dec_param(struct openssl_session *sess,
+ 
+ 	sess->chain_order = OPENSSL_CHAIN_COMBINED;
+ 
+-	if (EVP_DecryptInit_ex(sess->cipher.ctx, sess->cipher.evp_algo,
++	if (EVP_DecryptInit_ex(*ctx, sess->cipher.evp_algo,
+ 			NULL, NULL, NULL) <= 0)
+ 		return -EINVAL;
+ 
+-	if (EVP_CIPHER_CTX_ctrl(sess->cipher.ctx, iv_type,
++	if (EVP_CIPHER_CTX_ctrl(*ctx, iv_type,
+ 			sess->iv.length, NULL) <= 0)
+ 		return -EINVAL;
+ 
+ 	if (do_ccm)
+-		EVP_CIPHER_CTX_ctrl(sess->cipher.ctx, EVP_CTRL_CCM_SET_TAG,
++		EVP_CIPHER_CTX_ctrl(*ctx, EVP_CTRL_CCM_SET_TAG,
+ 				tag_len, NULL);
+ 
+-	if (EVP_DecryptInit_ex(sess->cipher.ctx, NULL, NULL, key, NULL) <= 0)
++	if (EVP_DecryptInit_ex(*ctx, NULL, NULL, key, NULL) <= 0)
+ 		return -EINVAL;
+ 
+ 	return 0;
+ }
+ 
++#if (OPENSSL_VERSION_NUMBER >= 0x30000000L && OPENSSL_VERSION_NUMBER < 0x30200000L)
++static int openssl_aesni_ctx_clone(EVP_CIPHER_CTX **dest,
++		struct openssl_session *sess)
++{
++	/* OpenSSL versions 3.0.0 <= V < 3.2.0 have no dupctx() implementation
++	 * for AES-GCM and AES-CCM. In this case, we have to create new empty
++	 * contexts and initialise, as we did the original context.
++	 */
++	if (sess->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC)
++		sess->aead_algo = RTE_CRYPTO_AEAD_AES_GCM;
++
++	if (sess->cipher.direction == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
++		return openssl_set_sess_aead_enc_param(sess, sess->aead_algo,
++				sess->auth.digest_length, sess->cipher.key.data,
++				dest);
++	else
++		return openssl_set_sess_aead_dec_param(sess, sess->aead_algo,
++				sess->auth.digest_length, sess->cipher.key.data,
++				dest);
++}
++#endif
++
+ /** Set session cipher parameters */
+ static int
+ openssl_set_session_cipher_parameters(struct openssl_session *sess,
+@@ -521,6 +545,15 @@ openssl_set_session_cipher_parameters(struct openssl_session *sess,
+ 				sess->cipher.key.length,
+ 				sess->cipher.key.data) != 0)
+ 			return -EINVAL;
++
++
++		/* We use 3DES encryption also for decryption.
++		 * IV is not important for 3DES ECB.
++		 */
++		if (EVP_EncryptInit_ex(sess->cipher.ctx, EVP_des_ede3_ecb(),
++				NULL, sess->cipher.key.data,  NULL) != 1)
++			return -EINVAL;
++
+ 		break;
+ 
+ 	case RTE_CRYPTO_CIPHER_DES_CBC:
+@@ -586,6 +619,8 @@ openssl_set_session_cipher_parameters(struct openssl_session *sess,
+ 		return -ENOTSUP;
+ 	}
+ 
++	EVP_CIPHER_CTX_set_padding(sess->cipher.ctx, 0);
++
+ 	return 0;
+ }
+ 
+@@ -623,12 +658,14 @@ openssl_set_session_auth_parameters(struct openssl_session *sess,
+ 			return openssl_set_sess_aead_enc_param(sess,
+ 						RTE_CRYPTO_AEAD_AES_GCM,
+ 						xform->auth.digest_length,
+-						xform->auth.key.data);
++						xform->auth.key.data,
++						&sess->cipher.ctx);
+ 		else
+ 			return openssl_set_sess_aead_dec_param(sess,
+ 						RTE_CRYPTO_AEAD_AES_GCM,
+ 						xform->auth.digest_length,
+-						xform->auth.key.data);
++						xform->auth.key.data,
++						&sess->cipher.ctx);
+ 		break;
+ 
+ 	case RTE_CRYPTO_AUTH_MD5:
+@@ -770,16 +807,19 @@ openssl_set_session_aead_parameters(struct openssl_session *sess,
+ 	/* Select cipher direction */
+ 	if (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT)
+ 		return openssl_set_sess_aead_enc_param(sess, xform->aead.algo,
+-				xform->aead.digest_length, xform->aead.key.data);
++				xform->aead.digest_length, xform->aead.key.data,
++				&sess->cipher.ctx);
+ 	else
+ 		return openssl_set_sess_aead_dec_param(sess, xform->aead.algo,
+-				xform->aead.digest_length, xform->aead.key.data);
++				xform->aead.digest_length, xform->aead.key.data,
++				&sess->cipher.ctx);
+ }
+ 
+ /** Parse crypto xform chain and set private session parameters */
+ int
+ openssl_set_session_parameters(struct openssl_session *sess,
+-		const struct rte_crypto_sym_xform *xform)
++		const struct rte_crypto_sym_xform *xform,
++		uint16_t nb_queue_pairs)
+ {
+ 	const struct rte_crypto_sym_xform *cipher_xform = NULL;
+ 	const struct rte_crypto_sym_xform *auth_xform = NULL;
+@@ -841,6 +881,12 @@ openssl_set_session_parameters(struct openssl_session *sess,
+ 		}
+ 	}
+ 
++	/*
++	 * With only one queue pair, the array of copies is not needed.
++	 * Otherwise, one entry per queue pair is required.
++	 */
++	sess->ctx_copies_len = nb_queue_pairs > 1 ? nb_queue_pairs : 0;
++
+ 	return 0;
+ }
+ 
+@@ -848,33 +894,45 @@ openssl_set_session_parameters(struct openssl_session *sess,
+ void
+ openssl_reset_session(struct openssl_session *sess)
+ {
+-	EVP_CIPHER_CTX_free(sess->cipher.ctx);
++	/* Free all the qp_ctx entries. */
++	for (uint16_t i = 0; i < sess->ctx_copies_len; i++) {
++		if (sess->qp_ctx[i].cipher != NULL) {
++			EVP_CIPHER_CTX_free(sess->qp_ctx[i].cipher);
++			sess->qp_ctx[i].cipher = NULL;
++		}
+ 
+-	if (sess->chain_order == OPENSSL_CHAIN_CIPHER_BPI)
+-		EVP_CIPHER_CTX_free(sess->cipher.bpi_ctx);
++		switch (sess->auth.mode) {
++		case OPENSSL_AUTH_AS_AUTH:
++			EVP_MD_CTX_destroy(sess->qp_ctx[i].auth);
++			sess->qp_ctx[i].auth = NULL;
++			break;
++		case OPENSSL_AUTH_AS_HMAC:
++			free_hmac_ctx(sess->qp_ctx[i].hmac);
++			sess->qp_ctx[i].hmac = NULL;
++			break;
++		case OPENSSL_AUTH_AS_CMAC:
++			free_cmac_ctx(sess->qp_ctx[i].cmac);
++			sess->qp_ctx[i].cmac = NULL;
++			break;
++		}
++	}
++
++	EVP_CIPHER_CTX_free(sess->cipher.ctx);
+ 
+ 	switch (sess->auth.mode) {
+ 	case OPENSSL_AUTH_AS_AUTH:
+ 		EVP_MD_CTX_destroy(sess->auth.auth.ctx);
+ 		break;
+ 	case OPENSSL_AUTH_AS_HMAC:
+-		EVP_PKEY_free(sess->auth.hmac.pkey);
+-# if OPENSSL_VERSION_NUMBER >= 0x30000000L
+-		EVP_MAC_CTX_free(sess->auth.hmac.ctx);
+-# else
+-		HMAC_CTX_free(sess->auth.hmac.ctx);
+-# endif
++		free_hmac_ctx(sess->auth.hmac.ctx);
+ 		break;
+ 	case OPENSSL_AUTH_AS_CMAC:
+-# if OPENSSL_VERSION_NUMBER >= 0x30000000L
+-		EVP_MAC_CTX_free(sess->auth.cmac.ctx);
+-# else
+-		CMAC_CTX_free(sess->auth.cmac.ctx);
+-# endif
+-		break;
+-	default:
++		free_cmac_ctx(sess->auth.cmac.ctx);
+ 		break;
+ 	}
++
++	if (sess->chain_order == OPENSSL_CHAIN_CIPHER_BPI)
++		EVP_CIPHER_CTX_free(sess->cipher.bpi_ctx);
+ }
+ 
+ /** Provide session for operation */
+@@ -914,7 +972,7 @@ get_session(struct openssl_qp *qp, struct rte_crypto_op *op)
+ 		sess = (struct openssl_session *)_sess->driver_priv_data;
+ 
+ 		if (unlikely(openssl_set_session_parameters(sess,
+-				op->sym->xform) != 0)) {
++				op->sym->xform, 1) != 0)) {
+ 			rte_mempool_put(qp->sess_mp, _sess);
+ 			sess = NULL;
+ 		}
+@@ -1068,8 +1126,6 @@ process_openssl_cipher_encrypt(struct rte_mbuf *mbuf_src, uint8_t *dst,
+ 	if (EVP_EncryptInit_ex(ctx, NULL, NULL, NULL, iv) <= 0)
+ 		goto process_cipher_encrypt_err;
+ 
+-	EVP_CIPHER_CTX_set_padding(ctx, 0);
+-
+ 	if (process_openssl_encryption_update(mbuf_src, offset, &dst,
+ 			srclen, ctx, inplace))
+ 		goto process_cipher_encrypt_err;
+@@ -1118,8 +1174,6 @@ process_openssl_cipher_decrypt(struct rte_mbuf *mbuf_src, uint8_t *dst,
+ 	if (EVP_DecryptInit_ex(ctx, NULL, NULL, NULL, iv) <= 0)
+ 		goto process_cipher_decrypt_err;
+ 
+-	EVP_CIPHER_CTX_set_padding(ctx, 0);
+-
+ 	if (process_openssl_decryption_update(mbuf_src, offset, &dst,
+ 			srclen, ctx, inplace))
+ 		goto process_cipher_decrypt_err;
+@@ -1136,8 +1190,7 @@ process_cipher_decrypt_err:
+ /** Process cipher des 3 ctr encryption, decryption algorithm */
+ static int
+ process_openssl_cipher_des3ctr(struct rte_mbuf *mbuf_src, uint8_t *dst,
+-		int offset, uint8_t *iv, uint8_t *key, int srclen,
+-		EVP_CIPHER_CTX *ctx)
++		int offset, uint8_t *iv, int srclen, EVP_CIPHER_CTX *ctx)
+ {
+ 	uint8_t ebuf[8], ctr[8];
+ 	int unused, n;
+@@ -1155,12 +1208,6 @@ process_openssl_cipher_des3ctr(struct rte_mbuf *mbuf_src, uint8_t *dst,
+ 	src = rte_pktmbuf_mtod_offset(m, uint8_t *, offset);
+ 	l = rte_pktmbuf_data_len(m) - offset;
+ 
+-	/* We use 3DES encryption also for decryption.
+-	 * IV is not important for 3DES ecb
+-	 */
+-	if (EVP_EncryptInit_ex(ctx, EVP_des_ede3_ecb(), NULL, key, NULL) <= 0)
+-		goto process_cipher_des3ctr_err;
+-
+ 	memcpy(ctr, iv, 8);
+ 
+ 	for (n = 0; n < srclen; n++) {
+@@ -1427,6 +1474,9 @@ process_openssl_auth_mac(struct rte_mbuf *mbuf_src, uint8_t *dst, int offset,
+ 	if (m == 0)
+ 		goto process_auth_err;
+ 
++	if (EVP_MAC_init(ctx, NULL, 0, NULL) <= 0)
++		goto process_auth_err;
++
+ 	src = rte_pktmbuf_mtod_offset(m, uint8_t *, offset);
+ 
+ 	l = rte_pktmbuf_data_len(m) - offset;
+@@ -1453,11 +1503,9 @@ process_auth_final:
+ 	if (EVP_MAC_final(ctx, dst, &dstlen, DIGEST_LENGTH_MAX) != 1)
+ 		goto process_auth_err;
+ 
+-	EVP_MAC_CTX_free(ctx);
+ 	return 0;
+ 
+ process_auth_err:
+-	EVP_MAC_CTX_free(ctx);
+ 	OPENSSL_LOG(ERR, "Process openssl auth failed");
+ 	return -EINVAL;
+ }
+@@ -1569,11 +1617,151 @@ process_auth_err:
+ # endif
+ /*----------------------------------------------------------------------------*/
+ 
++static inline EVP_CIPHER_CTX *
++get_local_cipher_ctx(struct openssl_session *sess, struct openssl_qp *qp)
++{
++	/* If the array is not being used, just return the main context. */
++	if (sess->ctx_copies_len == 0)
++		return sess->cipher.ctx;
++
++	EVP_CIPHER_CTX **lctx = &sess->qp_ctx[qp->id].cipher;
++
++	if (unlikely(*lctx == NULL)) {
++#if OPENSSL_VERSION_NUMBER >= 0x30200000L
++		/* EVP_CIPHER_CTX_dup() added in OSSL 3.2 */
++		*lctx = EVP_CIPHER_CTX_dup(sess->cipher.ctx);
++		return *lctx;
++#elif OPENSSL_VERSION_NUMBER >= 0x30000000L
++		if (sess->chain_order == OPENSSL_CHAIN_COMBINED) {
++			/* AESNI special-cased to use openssl_aesni_ctx_clone()
++			 * to allow for working around lack of
++			 * EVP_CIPHER_CTX_copy support for 3.0.0 <= OSSL Version
++			 * < 3.2.0.
++			 */
++			if (openssl_aesni_ctx_clone(lctx, sess) != 0)
++				*lctx = NULL;
++			return *lctx;
++		}
++#endif
++
++		*lctx = EVP_CIPHER_CTX_new();
++		EVP_CIPHER_CTX_copy(*lctx, sess->cipher.ctx);
++	}
++
++	return *lctx;
++}
++
++static inline EVP_MD_CTX *
++get_local_auth_ctx(struct openssl_session *sess, struct openssl_qp *qp)
++{
++	/* If the array is not being used, just return the main context. */
++	if (sess->ctx_copies_len == 0)
++		return sess->auth.auth.ctx;
++
++	EVP_MD_CTX **lctx = &sess->qp_ctx[qp->id].auth;
++
++	if (unlikely(*lctx == NULL)) {
++#if OPENSSL_VERSION_NUMBER >= 0x30100000L
++		/* EVP_MD_CTX_dup() added in OSSL 3.1 */
++		*lctx = EVP_MD_CTX_dup(sess->auth.auth.ctx);
++#else
++		*lctx = EVP_MD_CTX_new();
++		EVP_MD_CTX_copy(*lctx, sess->auth.auth.ctx);
++#endif
++	}
++
++	return *lctx;
++}
++
++#if OPENSSL_VERSION_NUMBER >= 0x30000000L
++static inline EVP_MAC_CTX *
++#else
++static inline HMAC_CTX *
++#endif
++get_local_hmac_ctx(struct openssl_session *sess, struct openssl_qp *qp)
++{
++#if (OPENSSL_VERSION_NUMBER >= 0x30000000L && OPENSSL_VERSION_NUMBER < 0x30003000L)
++	/* For OpenSSL versions 3.0.0 <= v < 3.0.3, re-initing of
++	 * EVP_MAC_CTXs is broken, and doesn't actually reset their
++	 * state. This was fixed in OSSL commit c9ddc5af5199 ("Avoid
++	 * undefined behavior of provided macs on EVP_MAC
++	 * reinitialization"). In cases where the fix is not present,
++	 * fall back to duplicating the context every buffer as a
++	 * workaround, at the cost of performance.
++	 */
++	RTE_SET_USED(qp);
++	return EVP_MAC_CTX_dup(sess->auth.hmac.ctx);
++#else
++	if (sess->ctx_copies_len == 0)
++		return sess->auth.hmac.ctx;
++
++#if OPENSSL_VERSION_NUMBER >= 0x30000000L
++	EVP_MAC_CTX **lctx =
++#else
++	HMAC_CTX **lctx =
++#endif
++		&sess->qp_ctx[qp->id].hmac;
++
++	if (unlikely(*lctx == NULL)) {
++#if OPENSSL_VERSION_NUMBER >= 0x30000000L
++		*lctx = EVP_MAC_CTX_dup(sess->auth.hmac.ctx);
++#else
++		*lctx = HMAC_CTX_new();
++		HMAC_CTX_copy(*lctx, sess->auth.hmac.ctx);
++#endif
++	}
++
++	return *lctx;
++#endif
++}
++
++#if OPENSSL_VERSION_NUMBER >= 0x30000000L
++static inline EVP_MAC_CTX *
++#else
++static inline CMAC_CTX *
++#endif
++get_local_cmac_ctx(struct openssl_session *sess, struct openssl_qp *qp)
++{
++#if (OPENSSL_VERSION_NUMBER >= 0x30000000L && OPENSSL_VERSION_NUMBER < 0x30003000L)
++	/* For OpenSSL versions 3.0.0 <= v < 3.0.3, re-initing of
++	 * EVP_MAC_CTXs is broken, and doesn't actually reset their
++	 * state. This was fixed in OSSL commit c9ddc5af5199 ("Avoid
++	 * undefined behavior of provided macs on EVP_MAC
++	 * reinitialization"). In cases where the fix is not present,
++	 * fall back to duplicating the context every buffer as a
++	 * workaround, at the cost of performance.
++	 */
++	RTE_SET_USED(qp);
++	return EVP_MAC_CTX_dup(sess->auth.cmac.ctx);
++#else
++	if (sess->ctx_copies_len == 0)
++		return sess->auth.cmac.ctx;
++
++#if OPENSSL_VERSION_NUMBER >= 0x30000000L
++	EVP_MAC_CTX **lctx =
++#else
++	CMAC_CTX **lctx =
++#endif
++		&sess->qp_ctx[qp->id].cmac;
++
++	if (unlikely(*lctx == NULL)) {
++#if OPENSSL_VERSION_NUMBER >= 0x30000000L
++		*lctx = EVP_MAC_CTX_dup(sess->auth.cmac.ctx);
++#else
++		*lctx = CMAC_CTX_new();
++		CMAC_CTX_copy(*lctx, sess->auth.cmac.ctx);
++#endif
++	}
++
++	return *lctx;
++#endif
++}
++
+ /** Process auth/cipher combined operation */
+ static void
+-process_openssl_combined_op
+-		(struct rte_crypto_op *op, struct openssl_session *sess,
+-		struct rte_mbuf *mbuf_src, struct rte_mbuf *mbuf_dst)
++process_openssl_combined_op(struct openssl_qp *qp, struct rte_crypto_op *op,
++		struct openssl_session *sess, struct rte_mbuf *mbuf_src,
++		struct rte_mbuf *mbuf_dst)
+ {
+ 	/* cipher */
+ 	uint8_t *dst = NULL, *iv, *tag, *aad;
+@@ -1590,6 +1778,8 @@ process_openssl_combined_op
+ 		return;
+ 	}
+ 
++	EVP_CIPHER_CTX *ctx = get_local_cipher_ctx(sess, qp);
++
+ 	iv = rte_crypto_op_ctod_offset(op, uint8_t *,
+ 			sess->iv.offset);
+ 	if (sess->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC) {
+@@ -1623,12 +1813,12 @@ process_openssl_combined_op
+ 			status = process_openssl_auth_encryption_gcm(
+ 					mbuf_src, offset, srclen,
+ 					aad, aadlen, iv,
+-					dst, tag, sess->cipher.ctx);
++					dst, tag, ctx);
+ 		else
+ 			status = process_openssl_auth_encryption_ccm(
+ 					mbuf_src, offset, srclen,
+ 					aad, aadlen, iv,
+-					dst, tag, taglen, sess->cipher.ctx);
++					dst, tag, taglen, ctx);
+ 
+ 	} else {
+ 		if (sess->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC ||
+@@ -1636,12 +1826,12 @@ process_openssl_combined_op
+ 			status = process_openssl_auth_decryption_gcm(
+ 					mbuf_src, offset, srclen,
+ 					aad, aadlen, iv,
+-					dst, tag, sess->cipher.ctx);
++					dst, tag, ctx);
+ 		else
+ 			status = process_openssl_auth_decryption_ccm(
+ 					mbuf_src, offset, srclen,
+ 					aad, aadlen, iv,
+-					dst, tag, taglen, sess->cipher.ctx);
++					dst, tag, taglen, ctx);
+ 	}
+ 
+ 	if (status != 0) {
+@@ -1656,14 +1846,13 @@ process_openssl_combined_op
+ 
+ /** Process cipher operation */
+ static void
+-process_openssl_cipher_op
+-		(struct rte_crypto_op *op, struct openssl_session *sess,
+-		struct rte_mbuf *mbuf_src, struct rte_mbuf *mbuf_dst)
++process_openssl_cipher_op(struct openssl_qp *qp, struct rte_crypto_op *op,
++		struct openssl_session *sess, struct rte_mbuf *mbuf_src,
++		struct rte_mbuf *mbuf_dst)
+ {
+ 	uint8_t *dst, *iv;
+ 	int srclen, status;
+ 	uint8_t inplace = (mbuf_src == mbuf_dst) ? 1 : 0;
+-	EVP_CIPHER_CTX *ctx_copy;
+ 
+ 	/*
+ 	 * Segmented OOP destination buffer is not supported for encryption/
+@@ -1682,25 +1871,22 @@ process_openssl_cipher_op
+ 
+ 	iv = rte_crypto_op_ctod_offset(op, uint8_t *,
+ 			sess->iv.offset);
+-	ctx_copy = EVP_CIPHER_CTX_new();
+-	EVP_CIPHER_CTX_copy(ctx_copy, sess->cipher.ctx);
++
++	EVP_CIPHER_CTX *ctx = get_local_cipher_ctx(sess, qp);
+ 
+ 	if (sess->cipher.mode == OPENSSL_CIPHER_LIB)
+ 		if (sess->cipher.direction == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
+ 			status = process_openssl_cipher_encrypt(mbuf_src, dst,
+ 					op->sym->cipher.data.offset, iv,
+-					srclen, ctx_copy, inplace);
++					srclen, ctx, inplace);
+ 		else
+ 			status = process_openssl_cipher_decrypt(mbuf_src, dst,
+ 					op->sym->cipher.data.offset, iv,
+-					srclen, ctx_copy, inplace);
++					srclen, ctx, inplace);
+ 	else
+ 		status = process_openssl_cipher_des3ctr(mbuf_src, dst,
+-				op->sym->cipher.data.offset, iv,
+-				sess->cipher.key.data, srclen,
+-				ctx_copy);
++				op->sym->cipher.data.offset, iv, srclen, ctx);
+ 
+-	EVP_CIPHER_CTX_free(ctx_copy);
+ 	if (status != 0)
+ 		op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ }
+@@ -1819,42 +2005,40 @@ process_openssl_auth_op(struct openssl_qp *qp, struct rte_crypto_op *op,
+ 
+ 	switch (sess->auth.mode) {
+ 	case OPENSSL_AUTH_AS_AUTH:
+-		ctx_a = EVP_MD_CTX_create();
+-		EVP_MD_CTX_copy_ex(ctx_a, sess->auth.auth.ctx);
++		ctx_a = get_local_auth_ctx(sess, qp);
+ 		status = process_openssl_auth(mbuf_src, dst,
+ 				op->sym->auth.data.offset, NULL, NULL, srclen,
+ 				ctx_a, sess->auth.auth.evp_algo);
+-		EVP_MD_CTX_destroy(ctx_a);
+ 		break;
+ 	case OPENSSL_AUTH_AS_HMAC:
++		ctx_h = get_local_hmac_ctx(sess, qp);
+ # if OPENSSL_VERSION_NUMBER >= 0x30000000L
+-		ctx_h = EVP_MAC_CTX_dup(sess->auth.hmac.ctx);
+ 		status = process_openssl_auth_mac(mbuf_src, dst,
+ 				op->sym->auth.data.offset, srclen,
+ 				ctx_h);
+ # else
+-		ctx_h = HMAC_CTX_new();
+-		HMAC_CTX_copy(ctx_h, sess->auth.hmac.ctx);
+ 		status = process_openssl_auth_hmac(mbuf_src, dst,
+ 				op->sym->auth.data.offset, srclen,
+ 				ctx_h);
+-		HMAC_CTX_free(ctx_h);
+ # endif
++#if (OPENSSL_VERSION_NUMBER >= 0x30000000L && OPENSSL_VERSION_NUMBER < 0x30003000L)
++		EVP_MAC_CTX_free(ctx_h);
++#endif
+ 		break;
+ 	case OPENSSL_AUTH_AS_CMAC:
++		ctx_c = get_local_cmac_ctx(sess, qp);
+ # if OPENSSL_VERSION_NUMBER >= 0x30000000L
+-		ctx_c = EVP_MAC_CTX_dup(sess->auth.cmac.ctx);
+ 		status = process_openssl_auth_mac(mbuf_src, dst,
+ 				op->sym->auth.data.offset, srclen,
+ 				ctx_c);
+ # else
+-		ctx_c = CMAC_CTX_new();
+-		CMAC_CTX_copy(ctx_c, sess->auth.cmac.ctx);
+ 		status = process_openssl_auth_cmac(mbuf_src, dst,
+ 				op->sym->auth.data.offset, srclen,
+ 				ctx_c);
+-		CMAC_CTX_free(ctx_c);
+ # endif
++#if (OPENSSL_VERSION_NUMBER >= 0x30000000L && OPENSSL_VERSION_NUMBER < 0x30003000L)
++		EVP_MAC_CTX_free(ctx_c);
++#endif
+ 		break;
+ 	default:
+ 		status = -1;
+@@ -3105,13 +3289,13 @@ process_op(struct openssl_qp *qp, struct rte_crypto_op *op,
+ 
+ 	switch (sess->chain_order) {
+ 	case OPENSSL_CHAIN_ONLY_CIPHER:
+-		process_openssl_cipher_op(op, sess, msrc, mdst);
++		process_openssl_cipher_op(qp, op, sess, msrc, mdst);
+ 		break;
+ 	case OPENSSL_CHAIN_ONLY_AUTH:
+ 		process_openssl_auth_op(qp, op, sess, msrc, mdst);
+ 		break;
+ 	case OPENSSL_CHAIN_CIPHER_AUTH:
+-		process_openssl_cipher_op(op, sess, msrc, mdst);
++		process_openssl_cipher_op(qp, op, sess, msrc, mdst);
+ 		/* OOP */
+ 		if (msrc != mdst)
+ 			copy_plaintext(msrc, mdst, op);
+@@ -3119,10 +3303,10 @@ process_op(struct openssl_qp *qp, struct rte_crypto_op *op,
+ 		break;
+ 	case OPENSSL_CHAIN_AUTH_CIPHER:
+ 		process_openssl_auth_op(qp, op, sess, msrc, mdst);
+-		process_openssl_cipher_op(op, sess, msrc, mdst);
++		process_openssl_cipher_op(qp, op, sess, msrc, mdst);
+ 		break;
+ 	case OPENSSL_CHAIN_COMBINED:
+-		process_openssl_combined_op(op, sess, msrc, mdst);
++		process_openssl_combined_op(qp, op, sess, msrc, mdst);
+ 		break;
+ 	case OPENSSL_CHAIN_CIPHER_BPI:
+ 		process_openssl_docsis_bpi_op(op, sess, msrc, mdst);
+diff --git a/dpdk/drivers/crypto/openssl/rte_openssl_pmd_ops.c b/dpdk/drivers/crypto/openssl/rte_openssl_pmd_ops.c
+index b16baaa08f..1bbb855a59 100644
+--- a/dpdk/drivers/crypto/openssl/rte_openssl_pmd_ops.c
++++ b/dpdk/drivers/crypto/openssl/rte_openssl_pmd_ops.c
+@@ -794,9 +794,35 @@ qp_setup_cleanup:
+ 
+ /** Returns the size of the symmetric session structure */
+ static unsigned
+-openssl_pmd_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
++openssl_pmd_sym_session_get_size(struct rte_cryptodev *dev)
+ {
+-	return sizeof(struct openssl_session);
++	/*
++	 * For 0 qps, return the max size of the session - this is necessary if
++	 * the user calls into this function to create the session mempool,
++	 * without first configuring the number of qps for the cryptodev.
++	 */
++	if (dev->data->nb_queue_pairs == 0) {
++		unsigned int max_nb_qps = ((struct openssl_private *)
++				dev->data->dev_private)->max_nb_qpairs;
++		return sizeof(struct openssl_session) +
++				(sizeof(struct evp_ctx_pair) * max_nb_qps);
++	}
++
++	/*
++	 * With only one queue pair, the thread safety of multiple context
++	 * copies is not necessary, so don't allocate extra memory for the
++	 * array.
++	 */
++	if (dev->data->nb_queue_pairs == 1)
++		return sizeof(struct openssl_session);
++
++	/*
++	 * Otherwise, the size of the flexible array member should be enough to
++	 * fit pointers to per-qp contexts. This is twice the number of queue
++	 * pairs, to allow for auth and cipher contexts.
++	 */
++	return sizeof(struct openssl_session) +
++		(sizeof(struct evp_ctx_pair) * dev->data->nb_queue_pairs);
+ }
+ 
+ /** Returns the size of the asymmetric session structure */
+@@ -808,7 +834,7 @@ openssl_pmd_asym_session_get_size(struct rte_cryptodev *dev __rte_unused)
+ 
+ /** Configure the session from a crypto xform chain */
+ static int
+-openssl_pmd_sym_session_configure(struct rte_cryptodev *dev __rte_unused,
++openssl_pmd_sym_session_configure(struct rte_cryptodev *dev,
+ 		struct rte_crypto_sym_xform *xform,
+ 		struct rte_cryptodev_sym_session *sess)
+ {
+@@ -820,7 +846,8 @@ openssl_pmd_sym_session_configure(struct rte_cryptodev *dev __rte_unused,
+ 		return -EINVAL;
+ 	}
+ 
+-	ret = openssl_set_session_parameters(sess_private_data, xform);
++	ret = openssl_set_session_parameters(sess_private_data, xform,
++			dev->data->nb_queue_pairs);
+ 	if (ret != 0) {
+ 		OPENSSL_LOG(ERR, "failed configure session parameters");
+ 
+diff --git a/dpdk/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c b/dpdk/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
+index de72383d4b..b44acece7c 100644
+--- a/dpdk/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
++++ b/dpdk/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
+@@ -9,6 +9,7 @@
+ #include "qat_asym.h"
+ #include "qat_crypto.h"
+ #include "qat_crypto_pmd_gens.h"
++#include "adf_transport_access_macros_gen4vf.h"
+ 
+ 
+ static struct rte_cryptodev_capabilities qat_sym_crypto_legacy_caps_gen4[] = {
+@@ -233,6 +234,78 @@ qat_sym_build_op_aead_gen4(void *in_op, struct qat_sym_session *ctx,
+ 	return 0;
+ }
+ 
++int
++qat_sym_dp_enqueue_done_gen4(void *qp_data, uint8_t *drv_ctx, uint32_t n)
++{
++	struct qat_qp *qp = qp_data;
++	struct qat_queue *tx_queue = &qp->tx_q;
++	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
++
++	if (unlikely(dp_ctx->cached_enqueue != n))
++		return -1;
++
++	qp->enqueued += n;
++	qp->stats.enqueued_count += n;
++
++	tx_queue->tail = dp_ctx->tail;
++
++	WRITE_CSR_RING_TAIL_GEN4VF(qp->mmap_bar_addr,
++		tx_queue->hw_bundle_number,
++		tx_queue->hw_queue_number, tx_queue->tail);
++
++	tx_queue->csr_tail = tx_queue->tail;
++	dp_ctx->cached_enqueue = 0;
++
++	return 0;
++}
++
++int
++qat_sym_dp_dequeue_done_gen4(void *qp_data, uint8_t *drv_ctx, uint32_t n)
++{
++	struct qat_qp *qp = qp_data;
++	struct qat_queue *rx_queue = &qp->rx_q;
++	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
++
++	if (unlikely(dp_ctx->cached_dequeue != n))
++		return -1;
++
++	rx_queue->head = dp_ctx->head;
++	rx_queue->nb_processed_responses += n;
++	qp->dequeued += n;
++	qp->stats.dequeued_count += n;
++	if (rx_queue->nb_processed_responses > QAT_CSR_HEAD_WRITE_THRESH) {
++		uint32_t old_head, new_head;
++		uint32_t max_head;
++
++		old_head = rx_queue->csr_head;
++		new_head = rx_queue->head;
++		max_head = qp->nb_descriptors * rx_queue->msg_size;
++
++		/* write out free descriptors */
++		void *cur_desc = (uint8_t *)rx_queue->base_addr + old_head;
++
++		if (new_head < old_head) {
++			memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE,
++					max_head - old_head);
++			memset(rx_queue->base_addr, ADF_RING_EMPTY_SIG_BYTE,
++					new_head);
++		} else {
++			memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE, new_head -
++					old_head);
++		}
++		rx_queue->nb_processed_responses = 0;
++		rx_queue->csr_head = new_head;
++
++		/* write current head to CSR */
++		WRITE_CSR_RING_HEAD_GEN4VF(qp->mmap_bar_addr,
++			rx_queue->hw_bundle_number, rx_queue->hw_queue_number,
++			new_head);
++	}
++
++	dp_ctx->cached_dequeue = 0;
++	return 0;
++}
++
+ static int
+ qat_sym_crypto_set_session_gen4(void *cdev, void *session)
+ {
+@@ -390,11 +463,51 @@ qat_sym_configure_raw_dp_ctx_gen4(void *_raw_dp_ctx, void *_ctx)
+ {
+ 	struct rte_crypto_raw_dp_ctx *raw_dp_ctx = _raw_dp_ctx;
+ 	struct qat_sym_session *ctx = _ctx;
+-	int ret;
+ 
+-	ret = qat_sym_configure_raw_dp_ctx_gen1(_raw_dp_ctx, _ctx);
+-	if (ret < 0)
+-		return ret;
++	raw_dp_ctx->enqueue_done = qat_sym_dp_enqueue_done_gen4;
++	raw_dp_ctx->dequeue_burst = qat_sym_dp_dequeue_burst_gen1;
++	raw_dp_ctx->dequeue = qat_sym_dp_dequeue_single_gen1;
++	raw_dp_ctx->dequeue_done = qat_sym_dp_dequeue_done_gen4;
++
++	if ((ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER ||
++			ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) &&
++			!ctx->is_gmac) {
++		/* AES-GCM or AES-CCM */
++		if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
++			ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64 ||
++			(ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_AES128
++			&& ctx->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE
++			&& ctx->qat_hash_alg ==
++					ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC)) {
++			raw_dp_ctx->enqueue_burst =
++					qat_sym_dp_enqueue_aead_jobs_gen1;
++			raw_dp_ctx->enqueue =
++					qat_sym_dp_enqueue_single_aead_gen1;
++		} else {
++			raw_dp_ctx->enqueue_burst =
++					qat_sym_dp_enqueue_chain_jobs_gen1;
++			raw_dp_ctx->enqueue =
++					qat_sym_dp_enqueue_single_chain_gen1;
++		}
++	} else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH || ctx->is_gmac) {
++		raw_dp_ctx->enqueue_burst = qat_sym_dp_enqueue_auth_jobs_gen1;
++		raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_auth_gen1;
++	} else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
++		if (ctx->qat_mode == ICP_QAT_HW_CIPHER_AEAD_MODE ||
++			ctx->qat_cipher_alg ==
++				ICP_QAT_HW_CIPHER_ALGO_CHACHA20_POLY1305) {
++			raw_dp_ctx->enqueue_burst =
++					qat_sym_dp_enqueue_aead_jobs_gen1;
++			raw_dp_ctx->enqueue =
++					qat_sym_dp_enqueue_single_aead_gen1;
++		} else {
++			raw_dp_ctx->enqueue_burst =
++					qat_sym_dp_enqueue_cipher_jobs_gen1;
++			raw_dp_ctx->enqueue =
++					qat_sym_dp_enqueue_single_cipher_gen1;
++		}
++	} else
++		return -1;
+ 
+ 	if (ctx->is_single_pass && ctx->is_ucs) {
+ 		raw_dp_ctx->enqueue_burst = qat_sym_dp_enqueue_aead_jobs_gen4;
+diff --git a/dpdk/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h b/dpdk/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h
+index b8ddf42d6f..64e892d022 100644
+--- a/dpdk/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h
++++ b/dpdk/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h
+@@ -394,7 +394,7 @@ qat_sym_convert_op_to_vec_chain(struct rte_crypto_op *op,
+ 		struct qat_sym_op_cookie *cookie)
+ {
+ 	union rte_crypto_sym_ofs ofs;
+-	uint32_t max_len = 0;
++	uint32_t max_len = 0, oop_offset = 0;
+ 	uint32_t cipher_len = 0, cipher_ofs = 0;
+ 	uint32_t auth_len = 0, auth_ofs = 0;
+ 	int is_oop = (op->sym->m_dst != NULL) &&
+@@ -468,6 +468,16 @@ qat_sym_convert_op_to_vec_chain(struct rte_crypto_op *op,
+ 
+ 	max_len = RTE_MAX(cipher_ofs + cipher_len, auth_ofs + auth_len);
+ 
++	/* If OOP, we need to keep in mind that offset needs to start where
++	 * cipher/auth starts, namely no offset on the smaller one
++	 */
++	if (is_oop) {
++		oop_offset = RTE_MIN(auth_ofs, cipher_ofs);
++		auth_ofs -= oop_offset;
++		cipher_ofs -= oop_offset;
++		max_len -= oop_offset;
++	}
++
+ 	/* digest in buffer check. Needed only for wireless algos
+ 	 * or combined cipher-crc operations
+ 	 */
+@@ -508,9 +518,7 @@ qat_sym_convert_op_to_vec_chain(struct rte_crypto_op *op,
+ 			max_len = RTE_MAX(max_len, auth_ofs + auth_len +
+ 					ctx->digest_length);
+ 	}
+-
+-	/* Passing 0 as cipher & auth offsets are assigned into ofs later */
+-	n_src = rte_crypto_mbuf_to_vec(op->sym->m_src, 0, max_len,
++	n_src = rte_crypto_mbuf_to_vec(op->sym->m_src, oop_offset, max_len,
+ 			in_sgl->vec, QAT_SYM_SGL_MAX_NUMBER);
+ 	if (unlikely(n_src < 0 || n_src > op->sym->m_src->nb_segs)) {
+ 		op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+@@ -520,7 +528,7 @@ qat_sym_convert_op_to_vec_chain(struct rte_crypto_op *op,
+ 
+ 	if (unlikely((op->sym->m_dst != NULL) &&
+ 			(op->sym->m_dst != op->sym->m_src))) {
+-		int n_dst = rte_crypto_mbuf_to_vec(op->sym->m_dst, 0,
++		int n_dst = rte_crypto_mbuf_to_vec(op->sym->m_dst, oop_offset,
+ 				max_len, out_sgl->vec, QAT_SYM_SGL_MAX_NUMBER);
+ 
+ 		if (n_dst < 0 || n_dst > op->sym->m_dst->nb_segs) {
+@@ -894,10 +902,12 @@ enqueue_one_aead_job_gen1(struct qat_sym_session *ctx,
+ 		*(uint8_t *)&cipher_param->u.cipher_IV_array[0] =
+ 			q - ICP_QAT_HW_CCM_NONCE_OFFSET;
+ 
+-		rte_memcpy((uint8_t *)aad->va +
+-				ICP_QAT_HW_CCM_NONCE_OFFSET,
+-			(uint8_t *)iv->va + ICP_QAT_HW_CCM_NONCE_OFFSET,
+-			ctx->cipher_iv.length);
++		if (ctx->aad_len > 0) {
++			rte_memcpy((uint8_t *)aad->va +
++					ICP_QAT_HW_CCM_NONCE_OFFSET,
++				(uint8_t *)iv->va + ICP_QAT_HW_CCM_NONCE_OFFSET,
++				ctx->cipher_iv.length);
++		}
+ 		break;
+ 	default:
+ 		break;
+@@ -1007,6 +1017,12 @@ qat_sym_dp_enqueue_done_gen1(void *qp_data, uint8_t *drv_ctx, uint32_t n);
+ int
+ qat_sym_dp_dequeue_done_gen1(void *qp_data, uint8_t *drv_ctx, uint32_t n);
+ 
++int
++qat_sym_dp_enqueue_done_gen4(void *qp_data, uint8_t *drv_ctx, uint32_t n);
++
++int
++qat_sym_dp_dequeue_done_gen4(void *qp_data, uint8_t *drv_ctx, uint32_t n);
++
+ int
+ qat_sym_configure_raw_dp_ctx_gen1(void *_raw_dp_ctx, void *_ctx);
+ 
+diff --git a/dpdk/drivers/crypto/qat/qat_sym.c b/dpdk/drivers/crypto/qat/qat_sym.c
+index 6e03bde841..8235fc0a5a 100644
+--- a/dpdk/drivers/crypto/qat/qat_sym.c
++++ b/dpdk/drivers/crypto/qat/qat_sym.c
+@@ -18,7 +18,6 @@
+ #include "qat_qp.h"
+ 
+ uint8_t qat_sym_driver_id;
+-int qat_legacy_capa;
+ 
+ struct qat_crypto_gen_dev_ops qat_sym_gen_dev_ops[QAT_N_GENS];
+ 
+@@ -266,7 +265,7 @@ qat_sym_dev_create(struct qat_pci_device *qat_pci_dev,
+ 		}
+ 
+ 		cryptodev->feature_flags |= RTE_CRYPTODEV_FF_SECURITY;
+-		QAT_LOG(INFO, "Device %s rte_security support ensabled", name);
++		QAT_LOG(INFO, "Device %s rte_security support enabled", name);
+ 	} else {
+ 		QAT_LOG(INFO, "Device %s rte_security support disabled", name);
+ 	}
+diff --git a/dpdk/drivers/dma/dpaa2/dpaa2_qdma.c b/dpdk/drivers/dma/dpaa2/dpaa2_qdma.c
+index 8968bb853b..2c91ceec13 100644
+--- a/dpdk/drivers/dma/dpaa2/dpaa2_qdma.c
++++ b/dpdk/drivers/dma/dpaa2/dpaa2_qdma.c
+@@ -16,9 +16,6 @@
+ 
+ #define DPAA2_QDMA_PREFETCH "prefetch"
+ 
+-/* Dynamic log type identifier */
+-int dpaa2_qdma_logtype;
+-
+ uint32_t dpaa2_coherent_no_alloc_cache;
+ uint32_t dpaa2_coherent_alloc_cache;
+ 
+@@ -1699,4 +1696,4 @@ static struct rte_dpaa2_driver rte_dpaa2_qdma_pmd = {
+ RTE_PMD_REGISTER_DPAA2(dpaa2_qdma, rte_dpaa2_qdma_pmd);
+ RTE_PMD_REGISTER_PARAM_STRING(dpaa2_qdma,
+ 	"no_prefetch=<int> ");
+-RTE_LOG_REGISTER_DEFAULT(dpaa_qdma2_logtype, INFO);
++RTE_LOG_REGISTER_DEFAULT(dpaa2_qdma_logtype, INFO);
+diff --git a/dpdk/drivers/dma/hisilicon/hisi_dmadev.c b/dpdk/drivers/dma/hisilicon/hisi_dmadev.c
+index 0e11ca14cc..4db3b0554c 100644
+--- a/dpdk/drivers/dma/hisilicon/hisi_dmadev.c
++++ b/dpdk/drivers/dma/hisilicon/hisi_dmadev.c
+@@ -39,8 +39,6 @@ hisi_dma_queue_base(struct hisi_dma_dev *hw)
+ {
+ 	if (hw->reg_layout == HISI_DMA_REG_LAYOUT_HIP08)
+ 		return HISI_DMA_HIP08_QUEUE_BASE;
+-	else if (hw->reg_layout == HISI_DMA_REG_LAYOUT_HIP09)
+-		return HISI_DMA_HIP09_QUEUE_BASE;
+ 	else
+ 		return 0;
+ }
+@@ -216,25 +214,6 @@ hisi_dma_init_hw(struct hisi_dma_dev *hw)
+ 				HISI_DMA_HIP08_QUEUE_INT_MASK_M, true);
+ 		hisi_dma_update_queue_mbit(hw, HISI_DMA_QUEUE_INT_MASK_REG,
+ 				HISI_DMA_HIP08_QUEUE_INT_MASK_M, true);
+-	} else if (hw->reg_layout == HISI_DMA_REG_LAYOUT_HIP09) {
+-		hisi_dma_update_queue_mbit(hw, HISI_DMA_QUEUE_CTRL0_REG,
+-				HISI_DMA_HIP09_QUEUE_CTRL0_ERR_ABORT_M, false);
+-		hisi_dma_update_queue_mbit(hw, HISI_DMA_QUEUE_INT_STATUS_REG,
+-				HISI_DMA_HIP09_QUEUE_INT_MASK_M, true);
+-		hisi_dma_update_queue_mbit(hw, HISI_DMA_QUEUE_INT_MASK_REG,
+-				HISI_DMA_HIP09_QUEUE_INT_MASK_M, true);
+-		hisi_dma_update_queue_mbit(hw,
+-				HISI_DMA_HIP09_QUEUE_ERR_INT_STATUS_REG,
+-				HISI_DMA_HIP09_QUEUE_ERR_INT_MASK_M, true);
+-		hisi_dma_update_queue_mbit(hw,
+-				HISI_DMA_HIP09_QUEUE_ERR_INT_MASK_REG,
+-				HISI_DMA_HIP09_QUEUE_ERR_INT_MASK_M, true);
+-		hisi_dma_update_queue_bit(hw, HISI_DMA_QUEUE_CTRL1_REG,
+-				HISI_DMA_HIP09_QUEUE_CTRL1_VA_ENABLE_B, true);
+-		hisi_dma_update_bit(hw,
+-				HISI_DMA_HIP09_QUEUE_CFG_REG(hw->queue_id),
+-				HISI_DMA_HIP09_QUEUE_CFG_LINK_DOWN_MASK_B,
+-				true);
+ 	}
+ }
+ 
+@@ -256,8 +235,6 @@ hisi_dma_reg_layout(uint8_t revision)
+ {
+ 	if (revision == HISI_DMA_REVISION_HIP08B)
+ 		return HISI_DMA_REG_LAYOUT_HIP08;
+-	else if (revision >= HISI_DMA_REVISION_HIP09A)
+-		return HISI_DMA_REG_LAYOUT_HIP09;
+ 	else
+ 		return HISI_DMA_REG_LAYOUT_INVALID;
+ }
+@@ -328,14 +305,11 @@ hisi_dma_info_get(const struct rte_dma_dev *dev,
+ 		  struct rte_dma_info *dev_info,
+ 		  uint32_t info_sz)
+ {
+-	struct hisi_dma_dev *hw = dev->data->dev_private;
++	RTE_SET_USED(dev);
+ 	RTE_SET_USED(info_sz);
+ 
+ 	dev_info->dev_capa = RTE_DMA_CAPA_MEM_TO_MEM |
+ 			     RTE_DMA_CAPA_OPS_COPY;
+-	if (hw->reg_layout == HISI_DMA_REG_LAYOUT_HIP09)
+-		dev_info->dev_capa |= RTE_DMA_CAPA_HANDLES_ERRORS;
+-
+ 	dev_info->max_vchans = 1;
+ 	dev_info->max_desc = HISI_DMA_MAX_DESC_NUM;
+ 	dev_info->min_desc = HISI_DMA_MIN_DESC_NUM;
+@@ -514,18 +488,6 @@ hisi_dma_dump_common(struct hisi_dma_dev *hw, FILE *f)
+ 		{ HISI_DMA_REG_LAYOUT_HIP08,
+ 		  HISI_DMA_HIP08_DUMP_START_REG,
+ 		  HISI_DMA_HIP08_DUMP_END_REG },
+-		{ HISI_DMA_REG_LAYOUT_HIP09,
+-		  HISI_DMA_HIP09_DUMP_REGION_A_START_REG,
+-		  HISI_DMA_HIP09_DUMP_REGION_A_END_REG },
+-		{ HISI_DMA_REG_LAYOUT_HIP09,
+-		  HISI_DMA_HIP09_DUMP_REGION_B_START_REG,
+-		  HISI_DMA_HIP09_DUMP_REGION_B_END_REG },
+-		{ HISI_DMA_REG_LAYOUT_HIP09,
+-		  HISI_DMA_HIP09_DUMP_REGION_C_START_REG,
+-		  HISI_DMA_HIP09_DUMP_REGION_C_END_REG },
+-		{ HISI_DMA_REG_LAYOUT_HIP09,
+-		  HISI_DMA_HIP09_DUMP_REGION_D_START_REG,
+-		  HISI_DMA_HIP09_DUMP_REGION_D_END_REG },
+ 	};
+ 	uint32_t i;
+ 
+diff --git a/dpdk/drivers/dma/hisilicon/hisi_dmadev.h b/dpdk/drivers/dma/hisilicon/hisi_dmadev.h
+index 5a17f9f69e..a57b5c759a 100644
+--- a/dpdk/drivers/dma/hisilicon/hisi_dmadev.h
++++ b/dpdk/drivers/dma/hisilicon/hisi_dmadev.h
+@@ -25,22 +25,14 @@
+ #define HISI_DMA_DEVICE_ID			0xA122
+ #define HISI_DMA_PCI_REVISION_ID_REG		0x08
+ #define HISI_DMA_REVISION_HIP08B		0x21
+-#define HISI_DMA_REVISION_HIP09A		0x30
+ 
+ #define HISI_DMA_MAX_HW_QUEUES			4
+ #define HISI_DMA_MAX_DESC_NUM			8192
+ #define HISI_DMA_MIN_DESC_NUM			32
+ 
+-/**
+- * The HIP08B(HiSilicon IP08) and HIP09B(HiSilicon IP09) are DMA iEPs, they
+- * have the same pci device id but different pci revision.
+- * Unfortunately, they have different register layouts, so two layout
+- * enumerations are defined.
+- */
+ enum {
+ 	HISI_DMA_REG_LAYOUT_INVALID = 0,
+-	HISI_DMA_REG_LAYOUT_HIP08,
+-	HISI_DMA_REG_LAYOUT_HIP09
++	HISI_DMA_REG_LAYOUT_HIP08
+ };
+ 
+ /**
+@@ -69,9 +61,6 @@ enum {
+  * length of queue-region. The global offset for a single queue register is
+  * calculated by:
+  *     offset = queue-base + (queue-id * queue-region) + reg-offset-in-region.
+- *
+- * The first part of queue region is basically the same for HIP08 and HIP09
+- * register layouts, therefore, HISI_QUEUE_* registers are defined for it.
+  */
+ #define HISI_DMA_QUEUE_SQ_BASE_L_REG		0x0
+ #define HISI_DMA_QUEUE_SQ_BASE_H_REG		0x4
+@@ -110,28 +99,6 @@ enum {
+ #define HISI_DMA_HIP08_DUMP_START_REG			0x2000
+ #define HISI_DMA_HIP08_DUMP_END_REG			0x2280
+ 
+-/**
+- * HiSilicon IP09 DMA register and field define:
+- */
+-#define HISI_DMA_HIP09_QUEUE_BASE			0x2000
+-#define HISI_DMA_HIP09_QUEUE_CTRL0_ERR_ABORT_M		GENMASK(31, 28)
+-#define HISI_DMA_HIP09_QUEUE_CTRL1_VA_ENABLE_B		2
+-#define HISI_DMA_HIP09_QUEUE_INT_MASK_M			0x1
+-#define HISI_DMA_HIP09_QUEUE_ERR_INT_STATUS_REG		0x48
+-#define HISI_DMA_HIP09_QUEUE_ERR_INT_MASK_REG		0x4C
+-#define HISI_DMA_HIP09_QUEUE_ERR_INT_MASK_M		GENMASK(18, 1)
+-#define HISI_DMA_HIP09_QUEUE_CFG_REG(queue_id)		(0x800 + \
+-							 (queue_id) * 0x20)
+-#define HISI_DMA_HIP09_QUEUE_CFG_LINK_DOWN_MASK_B	16
+-#define HISI_DMA_HIP09_DUMP_REGION_A_START_REG		0x0
+-#define HISI_DMA_HIP09_DUMP_REGION_A_END_REG		0x368
+-#define HISI_DMA_HIP09_DUMP_REGION_B_START_REG		0x800
+-#define HISI_DMA_HIP09_DUMP_REGION_B_END_REG		0xA08
+-#define HISI_DMA_HIP09_DUMP_REGION_C_START_REG		0x1800
+-#define HISI_DMA_HIP09_DUMP_REGION_C_END_REG		0x1A4C
+-#define HISI_DMA_HIP09_DUMP_REGION_D_START_REG		0x1C00
+-#define HISI_DMA_HIP09_DUMP_REGION_D_END_REG		0x1CC4
+-
+ /**
+  * In fact, there are multiple states, but it need to pay attention to
+  * the following three states for the driver:
+diff --git a/dpdk/drivers/dma/idxd/dpdk_idxd_cfg.py b/dpdk/drivers/dma/idxd/dpdk_idxd_cfg.py
+index c0c833ade9..5c9572b49d 100755
+--- a/dpdk/drivers/dma/idxd/dpdk_idxd_cfg.py
++++ b/dpdk/drivers/dma/idxd/dpdk_idxd_cfg.py
+@@ -104,8 +104,10 @@ def configure_dsa(dsa_id, args):
+                  "priority": 1,
+                  "max_batch_size": 1024,
+                  "size": int(max_work_queues_size / nb_queues)}
+-        wqcfg.update(parse_wq_opts(args.wq_option))
+         wq_dir = SysfsDir(os.path.join(dsa_dir.path, f"wq{dsa_id}.{q}"))
++        if os.path.exists(os.path.join(wq_dir.path, f"driver_name")):
++            wqcfg.update({"driver_name": "user"})
++        wqcfg.update(parse_wq_opts(args.wq_option))
+         wq_dir.write_values(wqcfg)
+ 
+     # enable device and then queues
+diff --git a/dpdk/drivers/dma/idxd/idxd_bus.c b/dpdk/drivers/dma/idxd/idxd_bus.c
+index 3b2d4c2b65..ba8076715d 100644
+--- a/dpdk/drivers/dma/idxd/idxd_bus.c
++++ b/dpdk/drivers/dma/idxd/idxd_bus.c
+@@ -261,9 +261,15 @@ static int
+ is_for_this_process_use(struct rte_dsa_device *dev, const char *name)
+ {
+ 	char *runtime_dir = strdup(rte_eal_get_runtime_dir());
+-	char *prefix = basename(runtime_dir);
+-	int prefixlen = strlen(prefix);
+ 	int retval = 0;
++	int prefixlen;
++	char *prefix;
++
++	if (runtime_dir == NULL)
++		return retval;
++
++	prefix = basename(runtime_dir);
++	prefixlen = strlen(prefix);
+ 
+ 	if (strncmp(name, "dpdk_", 5) == 0)
+ 		retval = 1;
+diff --git a/dpdk/drivers/event/cnxk/cn10k_tx_worker.h b/dpdk/drivers/event/cnxk/cn10k_tx_worker.h
+index 53e0dde20c..256237b895 100644
+--- a/dpdk/drivers/event/cnxk/cn10k_tx_worker.h
++++ b/dpdk/drivers/event/cnxk/cn10k_tx_worker.h
+@@ -70,6 +70,7 @@ cn10k_sso_tx_one(struct cn10k_sso_hws *ws, struct rte_mbuf *m, uint64_t *cmd,
+ 		 const uint64_t *txq_data, const uint32_t flags)
+ {
+ 	uint8_t lnum = 0, loff = 0, shft = 0;
++	struct rte_mbuf *extm = NULL;
+ 	struct cn10k_eth_txq *txq;
+ 	uintptr_t laddr;
+ 	uint16_t segdw;
+@@ -90,7 +91,7 @@ cn10k_sso_tx_one(struct cn10k_sso_hws *ws, struct rte_mbuf *m, uint64_t *cmd,
+ 	if (flags & NIX_TX_OFFLOAD_TSO_F)
+ 		cn10k_nix_xmit_prepare_tso(m, flags);
+ 
+-	cn10k_nix_xmit_prepare(txq, m, cmd, flags, txq->lso_tun_fmt, &sec,
++	cn10k_nix_xmit_prepare(txq, m, &extm, cmd, flags, txq->lso_tun_fmt, &sec,
+ 			       txq->mark_flag, txq->mark_fmt);
+ 
+ 	laddr = lmt_addr;
+@@ -105,7 +106,7 @@ cn10k_sso_tx_one(struct cn10k_sso_hws *ws, struct rte_mbuf *m, uint64_t *cmd,
+ 	cn10k_nix_xmit_mv_lmt_base(laddr, cmd, flags);
+ 
+ 	if (flags & NIX_TX_MULTI_SEG_F)
+-		segdw = cn10k_nix_prepare_mseg(txq, m, (uint64_t *)laddr, flags);
++		segdw = cn10k_nix_prepare_mseg(txq, m, &extm, (uint64_t *)laddr, flags);
+ 	else
+ 		segdw = cn10k_nix_tx_ext_subs(flags) + 2;
+ 
+@@ -127,6 +128,9 @@ cn10k_sso_tx_one(struct cn10k_sso_hws *ws, struct rte_mbuf *m, uint64_t *cmd,
+ 	/* Memory barrier to make sure lmtst store completes */
+ 	rte_io_wmb();
+ 
++	if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F && !txq->tx_compl.ena)
++		cn10k_nix_free_extmbuf(extm);
++
+ 	return 1;
+ }
+ 
+diff --git a/dpdk/drivers/event/cnxk/cn9k_worker.h b/dpdk/drivers/event/cnxk/cn9k_worker.h
+index 0451157812..107265d54b 100644
+--- a/dpdk/drivers/event/cnxk/cn9k_worker.h
++++ b/dpdk/drivers/event/cnxk/cn9k_worker.h
+@@ -746,7 +746,7 @@ static __rte_always_inline uint16_t
+ cn9k_sso_hws_event_tx(uint64_t base, struct rte_event *ev, uint64_t *cmd,
+ 		      uint64_t *txq_data, const uint32_t flags)
+ {
+-	struct rte_mbuf *m = ev->mbuf;
++	struct rte_mbuf *m = ev->mbuf, *extm = NULL;
+ 	struct cn9k_eth_txq *txq;
+ 
+ 	/* Perform header writes before barrier for TSO */
+@@ -767,7 +767,7 @@ cn9k_sso_hws_event_tx(uint64_t base, struct rte_event *ev, uint64_t *cmd,
+ 	if (cn9k_sso_sq_depth(txq) <= 0)
+ 		return 0;
+ 	cn9k_nix_tx_skeleton(txq, cmd, flags, 0);
+-	cn9k_nix_xmit_prepare(txq, m, cmd, flags, txq->lso_tun_fmt, txq->mark_flag,
++	cn9k_nix_xmit_prepare(txq, m, &extm, cmd, flags, txq->lso_tun_fmt, txq->mark_flag,
+ 			      txq->mark_fmt);
+ 
+ 	if (flags & NIX_TX_OFFLOAD_SECURITY_F) {
+@@ -789,7 +789,7 @@ cn9k_sso_hws_event_tx(uint64_t base, struct rte_event *ev, uint64_t *cmd,
+ 	}
+ 
+ 	if (flags & NIX_TX_MULTI_SEG_F) {
+-		const uint16_t segdw = cn9k_nix_prepare_mseg(txq, m, cmd, flags);
++		const uint16_t segdw = cn9k_nix_prepare_mseg(txq, m, &extm, cmd, flags);
+ 		cn9k_nix_xmit_prepare_tstamp(txq, cmd, m->ol_flags, segdw,
+ 					     flags);
+ 		if (!CNXK_TT_FROM_EVENT(ev->event)) {
+@@ -819,6 +819,9 @@ cn9k_sso_hws_event_tx(uint64_t base, struct rte_event *ev, uint64_t *cmd,
+ 	}
+ 
+ done:
++	if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F && !txq->tx_compl.ena)
++		cn9k_nix_free_extmbuf(extm);
++
+ 	return 1;
+ }
+ 
+diff --git a/dpdk/drivers/event/cnxk/cnxk_eventdev.c b/dpdk/drivers/event/cnxk/cnxk_eventdev.c
+index 0c61f4c20e..20f7f0d6df 100644
+--- a/dpdk/drivers/event/cnxk/cnxk_eventdev.c
++++ b/dpdk/drivers/event/cnxk/cnxk_eventdev.c
+@@ -162,16 +162,17 @@ cnxk_sso_dev_validate(const struct rte_eventdev *event_dev, uint32_t deq_depth,
+ 
+ 	deq_tmo_ns = conf->dequeue_timeout_ns;
+ 
+-	if (deq_tmo_ns == 0)
+-		deq_tmo_ns = dev->min_dequeue_timeout_ns;
+-	if (deq_tmo_ns < dev->min_dequeue_timeout_ns ||
+-	    deq_tmo_ns > dev->max_dequeue_timeout_ns) {
++	if (deq_tmo_ns && (deq_tmo_ns < dev->min_dequeue_timeout_ns ||
++			   deq_tmo_ns > dev->max_dequeue_timeout_ns)) {
+ 		plt_err("Unsupported dequeue timeout requested");
+ 		return -EINVAL;
+ 	}
+ 
+-	if (conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT)
++	if (conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT) {
++		if (deq_tmo_ns == 0)
++			deq_tmo_ns = dev->min_dequeue_timeout_ns;
+ 		dev->is_timeout_deq = 1;
++	}
+ 
+ 	dev->deq_tmo_ns = deq_tmo_ns;
+ 
+@@ -553,6 +554,9 @@ parse_list(const char *value, void *opaque, param_parse_t fn)
+ 	char *end = NULL;
+ 	char *f = s;
+ 
++	if (s == NULL)
++		return;
++
+ 	while (*s) {
+ 		if (*s == '[')
+ 			start = s;
+@@ -663,7 +667,7 @@ cnxk_sso_init(struct rte_eventdev *event_dev)
+ 	}
+ 
+ 	dev->is_timeout_deq = 0;
+-	dev->min_dequeue_timeout_ns = 0;
++	dev->min_dequeue_timeout_ns = USEC2NSEC(1);
+ 	dev->max_dequeue_timeout_ns = USEC2NSEC(0x3FF);
+ 	dev->max_num_events = -1;
+ 	dev->nb_event_queues = 0;
+diff --git a/dpdk/drivers/event/dlb2/dlb2.c b/dpdk/drivers/event/dlb2/dlb2.c
+index 050ace0904..5044cb17ef 100644
+--- a/dpdk/drivers/event/dlb2/dlb2.c
++++ b/dpdk/drivers/event/dlb2/dlb2.c
+@@ -160,7 +160,6 @@ static int
+ dlb2_hw_query_resources(struct dlb2_eventdev *dlb2)
+ {
+ 	struct dlb2_hw_dev *handle = &dlb2->qm_instance;
+-	struct dlb2_hw_resource_info *dlb2_info = &handle->info;
+ 	int num_ldb_ports;
+ 	int ret;
+ 
+@@ -222,8 +221,6 @@ dlb2_hw_query_resources(struct dlb2_eventdev *dlb2)
+ 	handle->info.hw_rsrc_max.reorder_window_size =
+ 		dlb2->hw_rsrc_query_results.num_hist_list_entries;
+ 
+-	rte_memcpy(dlb2_info, &handle->info.hw_rsrc_max, sizeof(*dlb2_info));
+-
+ 	return 0;
+ }
+ 
+diff --git a/dpdk/drivers/event/sw/iq_chunk.h b/dpdk/drivers/event/sw/iq_chunk.h
+index 31d013eab7..7820815c38 100644
+--- a/dpdk/drivers/event/sw/iq_chunk.h
++++ b/dpdk/drivers/event/sw/iq_chunk.h
+@@ -9,8 +9,6 @@
+ #include <stdbool.h>
+ #include <rte_eventdev.h>
+ 
+-#define IQ_ROB_NAMESIZE 12
+-
+ struct sw_queue_chunk {
+ 	struct rte_event events[SW_EVS_PER_Q_CHUNK];
+ 	struct sw_queue_chunk *next;
+diff --git a/dpdk/drivers/event/sw/sw_evdev.c b/dpdk/drivers/event/sw/sw_evdev.c
+index 55e7735cb0..2096496917 100644
+--- a/dpdk/drivers/event/sw/sw_evdev.c
++++ b/dpdk/drivers/event/sw/sw_evdev.c
+@@ -228,9 +228,7 @@ qid_init(struct sw_evdev *sw, unsigned int idx, int type,
+ 		const struct rte_event_queue_conf *queue_conf)
+ {
+ 	unsigned int i;
+-	int dev_id = sw->data->dev_id;
+ 	int socket_id = sw->data->socket_id;
+-	char buf[IQ_ROB_NAMESIZE];
+ 	struct sw_qid *qid = &sw->qids[idx];
+ 
+ 	/* Initialize the FID structures to no pinning (-1), and zero packets */
+@@ -260,8 +258,7 @@ qid_init(struct sw_evdev *sw, unsigned int idx, int type,
+ 			goto cleanup;
+ 		}
+ 
+-		snprintf(buf, sizeof(buf), "sw%d_iq_%d_rob", dev_id, i);
+-		qid->reorder_buffer = rte_zmalloc_socket(buf,
++		qid->reorder_buffer = rte_zmalloc_socket(NULL,
+ 				window_size * sizeof(qid->reorder_buffer[0]),
+ 				0, socket_id);
+ 		if (!qid->reorder_buffer) {
+diff --git a/dpdk/drivers/meson.build b/dpdk/drivers/meson.build
+index 5ba534049a..f2be71bc05 100644
+--- a/dpdk/drivers/meson.build
++++ b/dpdk/drivers/meson.build
+@@ -93,7 +93,7 @@ foreach subpath:subdirs
+         if skip_class
+             drv_path = join_paths(class, '*')
+             dpdk_drvs_disabled += drv_path
+-            set_variable(drv_path.underscorify() + '_disable_reason', reason)
++            set_variable('drv_' + drv_path.underscorify() + '_disable_reason', reason)
+             continue
+         endif
+     endif
+@@ -199,7 +199,7 @@ foreach subpath:subdirs
+             # component disable printout in those cases
+             if reason != ''
+                 dpdk_drvs_disabled += drv_path
+-                set_variable(drv_path.underscorify() + '_disable_reason', reason)
++                set_variable('drv_' + drv_path.underscorify() + '_disable_reason', reason)
+             endif
+             continue
+         endif
+diff --git a/dpdk/drivers/ml/cnxk/cn10k_ml_ops.c b/dpdk/drivers/ml/cnxk/cn10k_ml_ops.c
+index 7f7e5efcea..5370038733 100644
+--- a/dpdk/drivers/ml/cnxk/cn10k_ml_ops.c
++++ b/dpdk/drivers/ml/cnxk/cn10k_ml_ops.c
+@@ -288,6 +288,7 @@ cn10k_ml_model_xstat_get(struct cnxk_ml_dev *cnxk_mldev, struct cnxk_ml_layer *l
+ static int
+ cn10k_ml_cache_model_data(struct cnxk_ml_dev *cnxk_mldev, struct cnxk_ml_layer *layer)
+ {
++	struct cn10k_ml_layer_xstats *xstats;
+ 	char str[RTE_MEMZONE_NAMESIZE];
+ 	const struct plt_memzone *mz;
+ 	uint64_t isize = 0;
+@@ -309,6 +310,16 @@ cn10k_ml_cache_model_data(struct cnxk_ml_dev *cnxk_mldev, struct cnxk_ml_layer *
+ 				      PLT_PTR_ADD(mz->addr, isize), 1);
+ 	plt_memzone_free(mz);
+ 
++	/* Reset sync xstats. */
++	xstats = layer->glow.sync_xstats;
++	xstats->hw_latency_tot = 0;
++	xstats->hw_latency_min = UINT64_MAX;
++	xstats->hw_latency_max = 0;
++	xstats->fw_latency_tot = 0;
++	xstats->fw_latency_min = UINT64_MAX;
++	xstats->fw_latency_max = 0;
++	xstats->dequeued_count = 0;
++
+ 	return ret;
+ }
+ 
+diff --git a/dpdk/drivers/net/af_packet/rte_eth_af_packet.c b/dpdk/drivers/net/af_packet/rte_eth_af_packet.c
+index 397a32db58..6b7b16f348 100644
+--- a/dpdk/drivers/net/af_packet/rte_eth_af_packet.c
++++ b/dpdk/drivers/net/af_packet/rte_eth_af_packet.c
+@@ -6,6 +6,7 @@
+  * All rights reserved.
+  */
+ 
++#include <rte_common.h>
+ #include <rte_string_fns.h>
+ #include <rte_mbuf.h>
+ #include <ethdev_driver.h>
+@@ -39,7 +40,7 @@
+ #define DFLT_FRAME_SIZE		(1 << 11)
+ #define DFLT_FRAME_COUNT	(1 << 9)
+ 
+-struct pkt_rx_queue {
++struct __rte_cache_aligned pkt_rx_queue {
+ 	int sockfd;
+ 
+ 	struct iovec *rd;
+@@ -55,7 +56,7 @@ struct pkt_rx_queue {
+ 	volatile unsigned long rx_bytes;
+ };
+ 
+-struct pkt_tx_queue {
++struct __rte_cache_aligned pkt_tx_queue {
+ 	int sockfd;
+ 	unsigned int frame_data_size;
+ 
+diff --git a/dpdk/drivers/net/af_xdp/compat.h b/dpdk/drivers/net/af_xdp/compat.h
+index 28ea64aeaa..3b5a5c1ed5 100644
+--- a/dpdk/drivers/net/af_xdp/compat.h
++++ b/dpdk/drivers/net/af_xdp/compat.h
+@@ -46,6 +46,21 @@ create_shared_socket(struct xsk_socket **xsk_ptr __rte_unused,
+ }
+ #endif
+ 
++#ifdef ETH_AF_XDP_UPDATE_XSKMAP
++static __rte_always_inline int
++update_xskmap(struct xsk_socket *xsk, int map_fd, int xsk_queue_idx __rte_unused)
++{
++	return xsk_socket__update_xskmap(xsk, map_fd);
++}
++#else
++static __rte_always_inline int
++update_xskmap(struct xsk_socket *xsk, int map_fd, int xsk_queue_idx)
++{
++	int fd = xsk_socket__fd(xsk);
++	return bpf_map_update_elem(map_fd, &xsk_queue_idx, &fd, 0);
++}
++#endif
++
+ #ifdef XDP_USE_NEED_WAKEUP
+ static int
+ tx_syscall_needed(struct xsk_ring_prod *q)
+diff --git a/dpdk/drivers/net/af_xdp/meson.build b/dpdk/drivers/net/af_xdp/meson.build
+index 9f33e57fa2..69d109ff46 100644
+--- a/dpdk/drivers/net/af_xdp/meson.build
++++ b/dpdk/drivers/net/af_xdp/meson.build
+@@ -7,6 +7,12 @@ if is_windows
+     subdir_done()
+ endif
+ 
++if arch_subdir == 'x86' and dpdk_conf.get('RTE_ARCH_32')
++    build = false
++    reason = 'not supported on 32-bit x86'
++    subdir_done()
++endif
++
+ sources = files('rte_eth_af_xdp.c')
+ 
+ libxdp_ver = '>=1.2.2'
+@@ -77,6 +83,10 @@ if build
+                      dependencies : bpf_dep, args: cflags)
+       cflags += ['-DRTE_NET_AF_XDP_LIBBPF_XDP_ATTACH']
+   endif
++  if cc.has_function('xsk_socket__update_xskmap', prefix : xsk_check_prefix,
++                     dependencies : ext_deps, args: cflags)
++      cflags += ['-DETH_AF_XDP_UPDATE_XSKMAP']
++  endif
+ endif
+ 
+ require_iova_in_mbuf = false
+diff --git a/dpdk/drivers/net/af_xdp/rte_eth_af_xdp.c b/dpdk/drivers/net/af_xdp/rte_eth_af_xdp.c
+index 353c8688ec..74f750dbb3 100644
+--- a/dpdk/drivers/net/af_xdp/rte_eth_af_xdp.c
++++ b/dpdk/drivers/net/af_xdp/rte_eth_af_xdp.c
+@@ -83,12 +83,13 @@ RTE_LOG_REGISTER_DEFAULT(af_xdp_logtype, NOTICE);
+ 
+ #define ETH_AF_XDP_MP_KEY "afxdp_mp_send_fds"
+ 
++#define DP_BASE_PATH			"/tmp/afxdp_dp"
++#define DP_UDS_SOCK             "afxdp.sock"
+ #define MAX_LONG_OPT_SZ			64
+ #define UDS_MAX_FD_NUM			2
+ #define UDS_MAX_CMD_LEN			64
+ #define UDS_MAX_CMD_RESP		128
+ #define UDS_XSK_MAP_FD_MSG		"/xsk_map_fd"
+-#define UDS_SOCK			"/tmp/afxdp.sock"
+ #define UDS_CONNECT_MSG			"/connect"
+ #define UDS_HOST_OK_MSG			"/host_ok"
+ #define UDS_HOST_NAK_MSG		"/host_nak"
+@@ -123,7 +124,7 @@ struct xsk_umem_info {
+ struct rx_stats {
+ 	uint64_t rx_pkts;
+ 	uint64_t rx_bytes;
+-	uint64_t rx_dropped;
++	uint64_t imissed_offset;
+ };
+ 
+ struct pkt_rx_queue {
+@@ -131,6 +132,7 @@ struct pkt_rx_queue {
+ 	struct xsk_umem_info *umem;
+ 	struct xsk_socket *xsk;
+ 	struct rte_mempool *mb_pool;
++	uint16_t port;
+ 
+ 	struct rx_stats stats;
+ 
+@@ -171,6 +173,7 @@ struct pmd_internals {
+ 	bool custom_prog_configured;
+ 	bool force_copy;
+ 	bool use_cni;
++	char dp_path[PATH_MAX];
+ 	struct bpf_map *map;
+ 
+ 	struct rte_ether_addr eth_addr;
+@@ -191,6 +194,7 @@ struct pmd_process_private {
+ #define ETH_AF_XDP_BUDGET_ARG			"busy_budget"
+ #define ETH_AF_XDP_FORCE_COPY_ARG		"force_copy"
+ #define ETH_AF_XDP_USE_CNI_ARG			"use_cni"
++#define ETH_AF_XDP_DP_PATH_ARG			"dp_path"
+ 
+ static const char * const valid_arguments[] = {
+ 	ETH_AF_XDP_IFACE_ARG,
+@@ -201,6 +205,7 @@ static const char * const valid_arguments[] = {
+ 	ETH_AF_XDP_BUDGET_ARG,
+ 	ETH_AF_XDP_FORCE_COPY_ARG,
+ 	ETH_AF_XDP_USE_CNI_ARG,
++	ETH_AF_XDP_DP_PATH_ARG,
+ 	NULL
+ };
+ 
+@@ -311,6 +316,7 @@ af_xdp_rx_zc(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
+ 	unsigned long rx_bytes = 0;
+ 	int i;
+ 	struct rte_mbuf *fq_bufs[ETH_AF_XDP_RX_BATCH_SIZE];
++	struct rte_eth_dev *dev = &rte_eth_devices[rxq->port];
+ 
+ 	nb_pkts = xsk_ring_cons__peek(rx, nb_pkts, &idx_rx);
+ 
+@@ -338,6 +344,8 @@ af_xdp_rx_zc(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
+ 		 * xsk_ring_cons__peek
+ 		 */
+ 		rx->cached_cons -= nb_pkts;
++		dev->data->rx_mbuf_alloc_failed += nb_pkts;
++
+ 		return 0;
+ 	}
+ 
+@@ -360,6 +368,7 @@ af_xdp_rx_zc(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
+ 		bufs[i]->data_off = offset - sizeof(struct rte_mbuf) -
+ 			rte_pktmbuf_priv_size(umem->mb_pool) -
+ 			umem->mb_pool->header_size;
++		bufs[i]->port = rxq->port;
+ 
+ 		rte_pktmbuf_pkt_len(bufs[i]) = len;
+ 		rte_pktmbuf_data_len(bufs[i]) = len;
+@@ -388,6 +397,7 @@ af_xdp_rx_cp(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
+ 	int i;
+ 	uint32_t free_thresh = fq->size >> 1;
+ 	struct rte_mbuf *mbufs[ETH_AF_XDP_RX_BATCH_SIZE];
++	struct rte_eth_dev *dev = &rte_eth_devices[rxq->port];
+ 
+ 	if (xsk_prod_nb_free(fq, free_thresh) >= free_thresh)
+ 		(void)reserve_fill_queue(umem, nb_pkts, NULL, fq);
+@@ -406,6 +416,7 @@ af_xdp_rx_cp(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
+ 		 * xsk_ring_cons__peek
+ 		 */
+ 		rx->cached_cons -= nb_pkts;
++		dev->data->rx_mbuf_alloc_failed += nb_pkts;
+ 		return 0;
+ 	}
+ 
+@@ -426,6 +437,7 @@ af_xdp_rx_cp(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
+ 		rte_pktmbuf_data_len(mbufs[i]) = len;
+ 		rx_bytes += len;
+ 		bufs[i] = mbufs[i];
++		bufs[i]->port = rxq->port;
+ 	}
+ 
+ 	xsk_ring_cons__release(rx, nb_pkts);
+@@ -867,7 +879,6 @@ eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
+ 
+ 		stats->ipackets += stats->q_ipackets[i];
+ 		stats->ibytes += stats->q_ibytes[i];
+-		stats->imissed += rxq->stats.rx_dropped;
+ 		stats->oerrors += txq->stats.tx_dropped;
+ 		fd = process_private->rxq_xsk_fds[i];
+ 		ret = fd >= 0 ? getsockopt(fd, SOL_XDP, XDP_STATISTICS,
+@@ -876,7 +887,7 @@ eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
+ 			AF_XDP_LOG(ERR, "getsockopt() failed for XDP_STATISTICS.\n");
+ 			return -1;
+ 		}
+-		stats->imissed += xdp_stats.rx_dropped;
++		stats->imissed += xdp_stats.rx_dropped - rxq->stats.imissed_offset;
+ 
+ 		stats->opackets += stats->q_opackets[i];
+ 		stats->obytes += stats->q_obytes[i];
+@@ -889,13 +900,25 @@ static int
+ eth_stats_reset(struct rte_eth_dev *dev)
+ {
+ 	struct pmd_internals *internals = dev->data->dev_private;
+-	int i;
++	struct pmd_process_private *process_private = dev->process_private;
++	struct xdp_statistics xdp_stats;
++	socklen_t optlen;
++	int i, ret, fd;
+ 
+ 	for (i = 0; i < internals->queue_cnt; i++) {
+ 		memset(&internals->rx_queues[i].stats, 0,
+ 					sizeof(struct rx_stats));
+ 		memset(&internals->tx_queues[i].stats, 0,
+ 					sizeof(struct tx_stats));
++		fd = process_private->rxq_xsk_fds[i];
++		optlen = sizeof(struct xdp_statistics);
++		ret = fd >= 0 ? getsockopt(fd, SOL_XDP, XDP_STATISTICS,
++					   &xdp_stats, &optlen) : -1;
++		if (ret != 0) {
++			AF_XDP_LOG(ERR, "getsockopt() failed for XDP_STATISTICS.\n");
++			return -1;
++		}
++		internals->rx_queues[i].stats.imissed_offset = xdp_stats.rx_dropped;
+ 	}
+ 
+ 	return 0;
+@@ -960,6 +983,9 @@ remove_xdp_program(struct pmd_internals *internals)
+ static void
+ xdp_umem_destroy(struct xsk_umem_info *umem)
+ {
++	(void)xsk_umem__delete(umem->umem);
++	umem->umem = NULL;
++
+ #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
+ 	umem->mb_pool = NULL;
+ #else
+@@ -992,11 +1018,8 @@ eth_dev_close(struct rte_eth_dev *dev)
+ 			break;
+ 		xsk_socket__delete(rxq->xsk);
+ 
+-		if (__atomic_fetch_sub(&rxq->umem->refcnt, 1, __ATOMIC_ACQUIRE) - 1
+-				== 0) {
+-			(void)xsk_umem__delete(rxq->umem->umem);
++		if (__atomic_fetch_sub(&rxq->umem->refcnt, 1, __ATOMIC_ACQUIRE) - 1 == 0)
+ 			xdp_umem_destroy(rxq->umem);
+-		}
+ 
+ 		/* free pkt_tx_queue */
+ 		rte_free(rxq->pair);
+@@ -1234,6 +1257,7 @@ xsk_umem_info *xdp_umem_configure(struct pmd_internals *internals,
+ 		AF_XDP_LOG(ERR, "Failed to reserve memzone for af_xdp umem.\n");
+ 		goto err;
+ 	}
++	umem->mz = mz;
+ 
+ 	ret = xsk_umem__create(&umem->umem, mz->addr,
+ 			       ETH_AF_XDP_NUM_BUFFERS * ETH_AF_XDP_FRAME_SIZE,
+@@ -1244,7 +1268,6 @@ xsk_umem_info *xdp_umem_configure(struct pmd_internals *internals,
+ 		AF_XDP_LOG(ERR, "Failed to create umem\n");
+ 		goto err;
+ 	}
+-	umem->mz = mz;
+ 
+ 	return umem;
+ 
+@@ -1351,7 +1374,7 @@ err_prefer:
+ }
+ 
+ static int
+-init_uds_sock(struct sockaddr_un *server)
++init_uds_sock(struct sockaddr_un *server, const char *dp_path)
+ {
+ 	int sock;
+ 
+@@ -1362,7 +1385,7 @@ init_uds_sock(struct sockaddr_un *server)
+ 	}
+ 
+ 	server->sun_family = AF_UNIX;
+-	strlcpy(server->sun_path, UDS_SOCK, sizeof(server->sun_path));
++	strlcpy(server->sun_path, dp_path, sizeof(server->sun_path));
+ 
+ 	if (connect(sock, (struct sockaddr *)server, sizeof(struct sockaddr_un)) < 0) {
+ 		close(sock);
+@@ -1382,7 +1405,7 @@ struct msg_internal {
+ };
+ 
+ static int
+-send_msg(int sock, char *request, int *fd)
++send_msg(int sock, char *request, int *fd, const char *dp_path)
+ {
+ 	int snd;
+ 	struct iovec iov;
+@@ -1393,7 +1416,7 @@ send_msg(int sock, char *request, int *fd)
+ 
+ 	memset(&dst, 0, sizeof(dst));
+ 	dst.sun_family = AF_UNIX;
+-	strlcpy(dst.sun_path, UDS_SOCK, sizeof(dst.sun_path));
++	strlcpy(dst.sun_path, dp_path, sizeof(dst.sun_path));
+ 
+ 	/* Initialize message header structure */
+ 	memset(&msgh, 0, sizeof(msgh));
+@@ -1470,8 +1493,8 @@ read_msg(int sock, char *response, struct sockaddr_un *s, int *fd)
+ }
+ 
+ static int
+-make_request_cni(int sock, struct sockaddr_un *server, char *request,
+-		 int *req_fd, char *response, int *out_fd)
++make_request_dp(int sock, struct sockaddr_un *server, char *request,
++		 int *req_fd, char *response, int *out_fd, const char *dp_path)
+ {
+ 	int rval;
+ 
+@@ -1483,7 +1506,7 @@ make_request_cni(int sock, struct sockaddr_un *server, char *request,
+ 	if (req_fd == NULL)
+ 		rval = write(sock, request, strlen(request));
+ 	else
+-		rval = send_msg(sock, request, req_fd);
++		rval = send_msg(sock, request, req_fd, dp_path);
+ 
+ 	if (rval < 0) {
+ 		AF_XDP_LOG(ERR, "Write error %s\n", strerror(errno));
+@@ -1507,7 +1530,7 @@ check_response(char *response, char *exp_resp, long size)
+ }
+ 
+ static int
+-get_cni_fd(char *if_name)
++uds_get_xskmap_fd(char *if_name, const char *dp_path)
+ {
+ 	char request[UDS_MAX_CMD_LEN], response[UDS_MAX_CMD_RESP];
+ 	char hostname[MAX_LONG_OPT_SZ], exp_resp[UDS_MAX_CMD_RESP];
+@@ -1520,14 +1543,14 @@ get_cni_fd(char *if_name)
+ 		return -1;
+ 
+ 	memset(&server, 0, sizeof(server));
+-	sock = init_uds_sock(&server);
++	sock = init_uds_sock(&server, dp_path);
+ 	if (sock < 0)
+ 		return -1;
+ 
+-	/* Initiates handshake to CNI send: /connect,hostname */
++	/* Initiates handshake to the AF_XDP Device Plugin send: /connect,hostname */
+ 	snprintf(request, sizeof(request), "%s,%s", UDS_CONNECT_MSG, hostname);
+ 	memset(response, 0, sizeof(response));
+-	if (make_request_cni(sock, &server, request, NULL, response, &out_fd) < 0) {
++	if (make_request_dp(sock, &server, request, NULL, response, &out_fd, dp_path) < 0) {
+ 		AF_XDP_LOG(ERR, "Error in processing cmd [%s]\n", request);
+ 		goto err_close;
+ 	}
+@@ -1541,7 +1564,7 @@ get_cni_fd(char *if_name)
+ 	/* Request for "/version" */
+ 	strlcpy(request, UDS_VERSION_MSG, UDS_MAX_CMD_LEN);
+ 	memset(response, 0, sizeof(response));
+-	if (make_request_cni(sock, &server, request, NULL, response, &out_fd) < 0) {
++	if (make_request_dp(sock, &server, request, NULL, response, &out_fd, dp_path) < 0) {
+ 		AF_XDP_LOG(ERR, "Error in processing cmd [%s]\n", request);
+ 		goto err_close;
+ 	}
+@@ -1549,7 +1572,7 @@ get_cni_fd(char *if_name)
+ 	/* Request for file descriptor for netdev name*/
+ 	snprintf(request, sizeof(request), "%s,%s", UDS_XSK_MAP_FD_MSG, if_name);
+ 	memset(response, 0, sizeof(response));
+-	if (make_request_cni(sock, &server, request, NULL, response, &out_fd) < 0) {
++	if (make_request_dp(sock, &server, request, NULL, response, &out_fd, dp_path) < 0) {
+ 		AF_XDP_LOG(ERR, "Error in processing cmd [%s]\n", request);
+ 		goto err_close;
+ 	}
+@@ -1571,7 +1594,7 @@ get_cni_fd(char *if_name)
+ 	/* Initiate close connection */
+ 	strlcpy(request, UDS_FIN_MSG, UDS_MAX_CMD_LEN);
+ 	memset(response, 0, sizeof(response));
+-	if (make_request_cni(sock, &server, request, NULL, response, &out_fd) < 0) {
++	if (make_request_dp(sock, &server, request, NULL, response, &out_fd, dp_path) < 0) {
+ 		AF_XDP_LOG(ERR, "Error in processing cmd [%s]\n", request);
+ 		goto err_close;
+ 	}
+@@ -1695,21 +1718,21 @@ xsk_configure(struct pmd_internals *internals, struct pkt_rx_queue *rxq,
+ 	}
+ 
+ 	if (internals->use_cni) {
+-		int err, fd, map_fd;
++		int err, map_fd;
+ 
+-		/* get socket fd from CNI plugin */
+-		map_fd = get_cni_fd(internals->if_name);
++		/* get socket fd from AF_XDP Device Plugin */
++		map_fd = uds_get_xskmap_fd(internals->if_name, internals->dp_path);
+ 		if (map_fd < 0) {
+-			AF_XDP_LOG(ERR, "Failed to receive CNI plugin fd\n");
++			AF_XDP_LOG(ERR, "Failed to receive xskmap fd from AF_XDP Device Plugin\n");
+ 			goto out_xsk;
+ 		}
+-		/* get socket fd */
+-		fd = xsk_socket__fd(rxq->xsk);
+-		err = bpf_map_update_elem(map_fd, &rxq->xsk_queue_idx, &fd, 0);
++
++		err = update_xskmap(rxq->xsk, map_fd, rxq->xsk_queue_idx);
+ 		if (err) {
+-			AF_XDP_LOG(ERR, "Failed to insert unprivileged xsk in map.\n");
++			AF_XDP_LOG(ERR, "Failed to insert xsk in map.\n");
+ 			goto out_xsk;
+ 		}
++
+ 	} else if (rxq->busy_budget) {
+ 		ret = configure_preferred_busy_poll(rxq);
+ 		if (ret) {
+@@ -1779,6 +1802,8 @@ eth_rx_queue_setup(struct rte_eth_dev *dev,
+ 
+ 	process_private->rxq_xsk_fds[rx_queue_id] = rxq->fds[0].fd;
+ 
++	rxq->port = dev->data->port_id;
++
+ 	dev->data->rx_queues[rx_queue_id] = rxq;
+ 	return 0;
+ 
+@@ -1881,13 +1906,13 @@ static const struct eth_dev_ops ops = {
+ 	.get_monitor_addr = eth_get_monitor_addr,
+ };
+ 
+-/* CNI option works in unprivileged container environment
+- * and ethernet device functionality will be reduced. So
+- * additional customiszed eth_dev_ops struct is needed
+- * for cni. Promiscuous enable and disable functionality
+- * is removed.
++/* AF_XDP Device Plugin option works in unprivileged
++ * container environments and ethernet device functionality
++ * will be reduced. So additional customised eth_dev_ops
++ * struct is needed for the Device Plugin. Promiscuous
++ * enable and disable functionality is removed.
+  **/
+-static const struct eth_dev_ops ops_cni = {
++static const struct eth_dev_ops ops_afxdp_dp = {
+ 	.dev_start = eth_dev_start,
+ 	.dev_stop = eth_dev_stop,
+ 	.dev_close = eth_dev_close,
+@@ -2023,7 +2048,8 @@ xdp_get_channels_info(const char *if_name, int *max_queues,
+ static int
+ parse_parameters(struct rte_kvargs *kvlist, char *if_name, int *start_queue,
+ 		 int *queue_cnt, int *shared_umem, char *prog_path,
+-		 int *busy_budget, int *force_copy, int *use_cni)
++		 int *busy_budget, int *force_copy, int *use_cni,
++		 char *dp_path)
+ {
+ 	int ret;
+ 
+@@ -2069,6 +2095,11 @@ parse_parameters(struct rte_kvargs *kvlist, char *if_name, int *start_queue,
+ 	if (ret < 0)
+ 		goto free_kvlist;
+ 
++	ret = rte_kvargs_process(kvlist, ETH_AF_XDP_DP_PATH_ARG,
++				 &parse_prog_arg, dp_path);
++	if (ret < 0)
++		goto free_kvlist;
++
+ free_kvlist:
+ 	rte_kvargs_free(kvlist);
+ 	return ret;
+@@ -2108,7 +2139,7 @@ static struct rte_eth_dev *
+ init_internals(struct rte_vdev_device *dev, const char *if_name,
+ 	       int start_queue_idx, int queue_cnt, int shared_umem,
+ 	       const char *prog_path, int busy_budget, int force_copy,
+-	       int use_cni)
++	       int use_cni, const char *dp_path)
+ {
+ 	const char *name = rte_vdev_device_name(dev);
+ 	const unsigned int numa_node = dev->device.numa_node;
+@@ -2138,6 +2169,7 @@ init_internals(struct rte_vdev_device *dev, const char *if_name,
+ 	internals->shared_umem = shared_umem;
+ 	internals->force_copy = force_copy;
+ 	internals->use_cni = use_cni;
++	strlcpy(internals->dp_path, dp_path, PATH_MAX);
+ 
+ 	if (xdp_get_channels_info(if_name, &internals->max_queue_cnt,
+ 				  &internals->combined_queue_cnt)) {
+@@ -2199,7 +2231,7 @@ init_internals(struct rte_vdev_device *dev, const char *if_name,
+ 	if (!internals->use_cni)
+ 		eth_dev->dev_ops = &ops;
+ 	else
+-		eth_dev->dev_ops = &ops_cni;
++		eth_dev->dev_ops = &ops_afxdp_dp;
+ 
+ 	eth_dev->rx_pkt_burst = eth_af_xdp_rx;
+ 	eth_dev->tx_pkt_burst = eth_af_xdp_tx;
+@@ -2328,6 +2360,7 @@ rte_pmd_af_xdp_probe(struct rte_vdev_device *dev)
+ 	int busy_budget = -1, ret;
+ 	int force_copy = 0;
+ 	int use_cni = 0;
++	char dp_path[PATH_MAX] = {'\0'};
+ 	struct rte_eth_dev *eth_dev = NULL;
+ 	const char *name = rte_vdev_device_name(dev);
+ 
+@@ -2370,7 +2403,7 @@ rte_pmd_af_xdp_probe(struct rte_vdev_device *dev)
+ 
+ 	if (parse_parameters(kvlist, if_name, &xsk_start_queue_idx,
+ 			     &xsk_queue_cnt, &shared_umem, prog_path,
+-			     &busy_budget, &force_copy, &use_cni) < 0) {
++			     &busy_budget, &force_copy, &use_cni, dp_path) < 0) {
+ 		AF_XDP_LOG(ERR, "Invalid kvargs value\n");
+ 		return -EINVAL;
+ 	}
+@@ -2384,7 +2417,19 @@ rte_pmd_af_xdp_probe(struct rte_vdev_device *dev)
+ 	if (use_cni && strnlen(prog_path, PATH_MAX)) {
+ 		AF_XDP_LOG(ERR, "When '%s' parameter is used, '%s' parameter is not valid\n",
+ 			ETH_AF_XDP_USE_CNI_ARG, ETH_AF_XDP_PROG_ARG);
+-			return -EINVAL;
++		return -EINVAL;
++	}
++
++	if (use_cni && !strnlen(dp_path, PATH_MAX)) {
++		snprintf(dp_path, sizeof(dp_path), "%s/%s/%s", DP_BASE_PATH, if_name, DP_UDS_SOCK);
++		AF_XDP_LOG(INFO, "'%s' parameter not provided, setting value to '%s'\n",
++			ETH_AF_XDP_DP_PATH_ARG, dp_path);
++	}
++
++	if (!use_cni && strnlen(dp_path, PATH_MAX)) {
++		AF_XDP_LOG(ERR, "'%s' parameter is set, but '%s' was not enabled\n",
++			ETH_AF_XDP_DP_PATH_ARG, ETH_AF_XDP_USE_CNI_ARG);
++		return -EINVAL;
+ 	}
+ 
+ 	if (strlen(if_name) == 0) {
+@@ -2410,7 +2455,7 @@ rte_pmd_af_xdp_probe(struct rte_vdev_device *dev)
+ 
+ 	eth_dev = init_internals(dev, if_name, xsk_start_queue_idx,
+ 				 xsk_queue_cnt, shared_umem, prog_path,
+-				 busy_budget, force_copy, use_cni);
++				 busy_budget, force_copy, use_cni, dp_path);
+ 	if (eth_dev == NULL) {
+ 		AF_XDP_LOG(ERR, "Failed to init internals\n");
+ 		return -1;
+@@ -2471,4 +2516,5 @@ RTE_PMD_REGISTER_PARAM_STRING(net_af_xdp,
+ 			      "xdp_prog=<string> "
+ 			      "busy_budget=<int> "
+ 			      "force_copy=<int> "
+-			      "use_cni=<int> ");
++			      "use_cni=<int> "
++			      "dp_path=<string> ");
+diff --git a/dpdk/drivers/net/ark/ark_ethdev_tx.c b/dpdk/drivers/net/ark/ark_ethdev_tx.c
+index 4792754f19..8f1f90b1a4 100644
+--- a/dpdk/drivers/net/ark/ark_ethdev_tx.c
++++ b/dpdk/drivers/net/ark/ark_ethdev_tx.c
+@@ -39,8 +39,8 @@ struct ark_tx_queue {
+ 	uint32_t queue_mask;
+ 
+ 	/* 3 indexes to the paired data rings. */
+-	int32_t prod_index;		/* where to put the next one */
+-	int32_t free_index;		/* mbuf has been freed */
++	uint32_t prod_index;		/* where to put the next one */
++	uint32_t free_index;		/* mbuf has been freed */
+ 
+ 	/* The queue Id is used to identify the HW Q */
+ 	uint16_t phys_qid;
+@@ -49,7 +49,7 @@ struct ark_tx_queue {
+ 
+ 	/* next cache line - fields written by device */
+ 	RTE_MARKER cacheline1 __rte_cache_min_aligned;
+-	volatile int32_t cons_index;		/* hw is done, can be freed */
++	volatile uint32_t cons_index;		/* hw is done, can be freed */
+ } __rte_cache_aligned;
+ 
+ /* Forward declarations */
+@@ -108,7 +108,7 @@ eth_ark_xmit_pkts(void *vtxq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+ 	uint32_t user_meta[5];
+ 
+ 	int stat;
+-	int32_t prod_index_limit;
++	uint32_t prod_index_limit;
+ 	uint16_t nb;
+ 	uint8_t user_len = 0;
+ 	const uint32_t min_pkt_len = ARK_MIN_TX_PKTLEN;
+@@ -123,8 +123,13 @@ eth_ark_xmit_pkts(void *vtxq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+ 	/* leave 4 elements mpu data */
+ 	prod_index_limit = queue->queue_size + queue->free_index - 4;
+ 
++	/* Populate the buffer bringing prod_index up to or slightly beyond
++	 * prod_index_limit. Prod_index will increment by 2 or more each
++	 * iteration.  Note: indexes are uint32_t, cast to (signed) int32_t
++	 * to catch the slight overage case;  e.g. (200 - 201)
++	 */
+ 	for (nb = 0;
+-	     (nb < nb_pkts) && (prod_index_limit - queue->prod_index) > 0;
++	     (nb < nb_pkts) && (int32_t)(prod_index_limit - queue->prod_index) > 0;
+ 	     ++nb) {
+ 		mbuf = tx_pkts[nb];
+ 
+@@ -194,13 +199,13 @@ eth_ark_tx_jumbo(struct ark_tx_queue *queue, struct rte_mbuf *mbuf,
+ 		 uint32_t *user_meta, uint8_t meta_cnt)
+ {
+ 	struct rte_mbuf *next;
+-	int32_t free_queue_space;
++	uint32_t free_queue_space;
+ 	uint8_t flags = ARK_DDM_SOP;
+ 
+ 	free_queue_space = queue->queue_mask -
+ 		(queue->prod_index - queue->free_index);
+ 	/* We need up to 4 mbufs for first header and 2 for subsequent ones */
+-	if (unlikely(free_queue_space < (2 + (2 * mbuf->nb_segs))))
++	if (unlikely(free_queue_space < (2U + (2U * mbuf->nb_segs))))
+ 		return -1;
+ 
+ 	while (mbuf != NULL) {
+@@ -392,10 +397,11 @@ free_completed_tx(struct ark_tx_queue *queue)
+ {
+ 	struct rte_mbuf *mbuf;
+ 	union ark_tx_meta *meta;
+-	int32_t top_index;
++	uint32_t top_index;
+ 
+ 	top_index = queue->cons_index;	/* read once */
+-	while ((top_index - queue->free_index) > 0) {
++
++	while ((int32_t)(top_index - queue->free_index) > 0) {
+ 		meta = &queue->meta_q[queue->free_index & queue->queue_mask];
+ 		if (likely((meta->flags & ARK_DDM_SOP) != 0)) {
+ 			mbuf = queue->bufs[queue->free_index &
+diff --git a/dpdk/drivers/net/axgbe/axgbe_common.h b/dpdk/drivers/net/axgbe/axgbe_common.h
+index a5d11c5832..51532fb34a 100644
+--- a/dpdk/drivers/net/axgbe/axgbe_common.h
++++ b/dpdk/drivers/net/axgbe/axgbe_common.h
+@@ -407,8 +407,6 @@
+ #define MAC_MDIOSCAR_PA_WIDTH		5
+ #define MAC_MDIOSCAR_RA_INDEX		0
+ #define MAC_MDIOSCAR_RA_WIDTH		16
+-#define MAC_MDIOSCAR_REG_INDEX		0
+-#define MAC_MDIOSCAR_REG_WIDTH		21
+ #define MAC_MDIOSCCDR_BUSY_INDEX	22
+ #define MAC_MDIOSCCDR_BUSY_WIDTH	1
+ #define MAC_MDIOSCCDR_CMD_INDEX		16
+diff --git a/dpdk/drivers/net/axgbe/axgbe_dev.c b/dpdk/drivers/net/axgbe/axgbe_dev.c
+index 6a7fddffca..5233633a53 100644
+--- a/dpdk/drivers/net/axgbe/axgbe_dev.c
++++ b/dpdk/drivers/net/axgbe/axgbe_dev.c
+@@ -63,15 +63,27 @@ static int mdio_complete(struct axgbe_port *pdata)
+ 	return 0;
+ }
+ 
++static unsigned int axgbe_create_mdio_sca(int port, int reg)
++{
++	unsigned int mdio_sca, da;
++
++	da = (reg & MII_ADDR_C45) ? reg >> 16 : 0;
++
++	mdio_sca = 0;
++	AXGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, RA, reg);
++	AXGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, PA, port);
++	AXGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, DA, da);
++
++	return mdio_sca;
++}
++
+ static int axgbe_write_ext_mii_regs(struct axgbe_port *pdata, int addr,
+ 				    int reg, u16 val)
+ {
+ 	unsigned int mdio_sca, mdio_sccd;
+ 	uint64_t timeout;
+ 
+-	mdio_sca = 0;
+-	AXGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, REG, reg);
+-	AXGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, DA, addr);
++	mdio_sca = axgbe_create_mdio_sca(addr, reg);
+ 	AXGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca);
+ 
+ 	mdio_sccd = 0;
+@@ -97,9 +109,7 @@ static int axgbe_read_ext_mii_regs(struct axgbe_port *pdata, int addr,
+ 	unsigned int mdio_sca, mdio_sccd;
+ 	uint64_t timeout;
+ 
+-	mdio_sca = 0;
+-	AXGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, REG, reg);
+-	AXGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, DA, addr);
++	mdio_sca = axgbe_create_mdio_sca(addr, reg);
+ 	AXGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca);
+ 
+ 	mdio_sccd = 0;
+@@ -259,20 +269,28 @@ static int axgbe_set_speed(struct axgbe_port *pdata, int speed)
+ 	return 0;
+ }
+ 
++static unsigned int axgbe_get_fc_queue_count(struct axgbe_port *pdata)
++{
++	unsigned int max_q_count = AXGMAC_MAX_FLOW_CONTROL_QUEUES;
++
++	/* From MAC ver 30H the TFCR is per priority, instead of per queue */
++	if (AXGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER) >= 0x30)
++		return max_q_count;
++	else
++		return (RTE_MIN(pdata->tx_q_count, max_q_count));
++}
++
+ static int axgbe_disable_tx_flow_control(struct axgbe_port *pdata)
+ {
+-	unsigned int max_q_count, q_count;
+ 	unsigned int reg, reg_val;
+-	unsigned int i;
++	unsigned int i, q_count;
+ 
+ 	/* Clear MTL flow control */
+ 	for (i = 0; i < pdata->rx_q_count; i++)
+ 		AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, 0);
+ 
+ 	/* Clear MAC flow control */
+-	max_q_count = AXGMAC_MAX_FLOW_CONTROL_QUEUES;
+-	q_count = RTE_MIN(pdata->tx_q_count,
+-			max_q_count);
++	q_count = axgbe_get_fc_queue_count(pdata);
+ 	reg = MAC_Q0TFCR;
+ 	for (i = 0; i < q_count; i++) {
+ 		reg_val = AXGMAC_IOREAD(pdata, reg);
+@@ -287,9 +305,8 @@ static int axgbe_disable_tx_flow_control(struct axgbe_port *pdata)
+ 
+ static int axgbe_enable_tx_flow_control(struct axgbe_port *pdata)
+ {
+-	unsigned int max_q_count, q_count;
+ 	unsigned int reg, reg_val;
+-	unsigned int i;
++	unsigned int i, q_count;
+ 
+ 	/* Set MTL flow control */
+ 	for (i = 0; i < pdata->rx_q_count; i++) {
+@@ -306,9 +323,7 @@ static int axgbe_enable_tx_flow_control(struct axgbe_port *pdata)
+ 	}
+ 
+ 	/* Set MAC flow control */
+-	max_q_count = AXGMAC_MAX_FLOW_CONTROL_QUEUES;
+-	q_count = RTE_MIN(pdata->tx_q_count,
+-			max_q_count);
++	q_count = axgbe_get_fc_queue_count(pdata);
+ 	reg = MAC_Q0TFCR;
+ 	for (i = 0; i < q_count; i++) {
+ 		reg_val = AXGMAC_IOREAD(pdata, reg);
+@@ -637,23 +652,21 @@ static void axgbe_config_dma_cache(struct axgbe_port *pdata)
+ 	unsigned int arcache, awcache, arwcache;
+ 
+ 	arcache = 0;
+-	AXGMAC_SET_BITS(arcache, DMA_AXIARCR, DRC, 0x3);
++	AXGMAC_SET_BITS(arcache, DMA_AXIARCR, DRC, 0xf);
++	AXGMAC_SET_BITS(arcache, DMA_AXIARCR, TEC, 0xf);
++	AXGMAC_SET_BITS(arcache, DMA_AXIARCR, THC, 0xf);
+ 	AXGMAC_IOWRITE(pdata, DMA_AXIARCR, arcache);
+ 
+ 	awcache = 0;
+-	AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, DWC, 0x3);
+-	AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, RPC, 0x3);
+-	AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, RPD, 0x1);
+-	AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, RHC, 0x3);
+-	AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, RHD, 0x1);
+-	AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, RDC, 0x3);
+-	AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, RDD, 0x1);
++	AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, DWC, 0xf);
++	AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, RPC, 0xf);
++	AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, RHC, 0xf);
++	AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, RDC, 0xf);
+ 	AXGMAC_IOWRITE(pdata, DMA_AXIAWCR, awcache);
+ 
+ 	arwcache = 0;
+-	AXGMAC_SET_BITS(arwcache, DMA_AXIAWRCR, TDWD, 0x1);
+-	AXGMAC_SET_BITS(arwcache, DMA_AXIAWRCR, TDWC, 0x3);
+-	AXGMAC_SET_BITS(arwcache, DMA_AXIAWRCR, RDRC, 0x3);
++	AXGMAC_SET_BITS(arwcache, DMA_AXIAWRCR, TDWC, 0xf);
++	AXGMAC_SET_BITS(arwcache, DMA_AXIAWRCR, RDRC, 0xf);
+ 	AXGMAC_IOWRITE(pdata, DMA_AXIAWRCR, arwcache);
+ }
+ 
+diff --git a/dpdk/drivers/net/axgbe/axgbe_ethdev.c b/dpdk/drivers/net/axgbe/axgbe_ethdev.c
+index f174d46143..6ce87f83f4 100644
+--- a/dpdk/drivers/net/axgbe/axgbe_ethdev.c
++++ b/dpdk/drivers/net/axgbe/axgbe_ethdev.c
+@@ -207,6 +207,7 @@ static struct axgbe_version_data axgbe_v2a = {
+ 	.ecc_support			= 1,
+ 	.i2c_support			= 1,
+ 	.an_cdr_workaround		= 1,
++	.enable_rrc			= 1,
+ };
+ 
+ static struct axgbe_version_data axgbe_v2b = {
+@@ -219,6 +220,7 @@ static struct axgbe_version_data axgbe_v2b = {
+ 	.ecc_support			= 1,
+ 	.i2c_support			= 1,
+ 	.an_cdr_workaround		= 1,
++	.enable_rrc			= 1,
+ };
+ 
+ static const struct rte_eth_desc_lim rx_desc_lim = {
+@@ -2267,6 +2269,9 @@ eth_axgbe_dev_init(struct rte_eth_dev *eth_dev)
+ 
+ 			/* Yellow Carp devices do not need cdr workaround */
+ 			pdata->vdata->an_cdr_workaround = 0;
++
++			/* Yellow Carp devices do not need rrc */
++			pdata->vdata->enable_rrc = 0;
+ 		} else {
+ 			unknown_cpu = 1;
+ 		}
+@@ -2404,12 +2409,14 @@ static int
+ axgbe_dev_close(struct rte_eth_dev *eth_dev)
+ {
+ 	struct rte_pci_device *pci_dev;
++	struct axgbe_port *pdata;
+ 
+ 	PMD_INIT_FUNC_TRACE();
+ 
+ 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ 		return 0;
+ 
++	pdata = eth_dev->data->dev_private;
+ 	pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
+ 	axgbe_dev_clear_queues(eth_dev);
+ 
+@@ -2419,6 +2426,9 @@ axgbe_dev_close(struct rte_eth_dev *eth_dev)
+ 				     axgbe_dev_interrupt_handler,
+ 				     (void *)eth_dev);
+ 
++	/* Disable all interrupts in the hardware */
++	XP_IOWRITE(pdata, XP_INT_EN, 0x0);
++
+ 	return 0;
+ }
+ 
+diff --git a/dpdk/drivers/net/axgbe/axgbe_ethdev.h b/dpdk/drivers/net/axgbe/axgbe_ethdev.h
+index 7f19321d88..b4bd56e239 100644
+--- a/dpdk/drivers/net/axgbe/axgbe_ethdev.h
++++ b/dpdk/drivers/net/axgbe/axgbe_ethdev.h
+@@ -111,6 +111,7 @@
+ /* Auto-negotiation */
+ #define AXGBE_AN_MS_TIMEOUT		500
+ #define AXGBE_LINK_TIMEOUT		5
++#define AXGBE_KR_TRAINING_WAIT_ITER	50
+ 
+ #define AXGBE_SGMII_AN_LINK_STATUS	BIT(1)
+ #define AXGBE_SGMII_AN_LINK_SPEED	(BIT(2) | BIT(3))
+@@ -463,6 +464,7 @@ struct axgbe_version_data {
+ 	unsigned int ecc_support;
+ 	unsigned int i2c_support;
+ 	unsigned int an_cdr_workaround;
++	unsigned int enable_rrc;
+ };
+ 
+ struct axgbe_mmc_stats {
+@@ -653,6 +655,7 @@ struct axgbe_port {
+ 	unsigned int parallel_detect;
+ 	unsigned int fec_ability;
+ 	unsigned long an_start;
++	unsigned long kr_start_time;
+ 	enum axgbe_an_mode an_mode;
+ 
+ 	/* I2C support */
+diff --git a/dpdk/drivers/net/axgbe/axgbe_mdio.c b/dpdk/drivers/net/axgbe/axgbe_mdio.c
+index 913ceada0d..d95a52659e 100644
+--- a/dpdk/drivers/net/axgbe/axgbe_mdio.c
++++ b/dpdk/drivers/net/axgbe/axgbe_mdio.c
+@@ -200,13 +200,14 @@ static void axgbe_switch_mode(struct axgbe_port *pdata)
+ 	axgbe_change_mode(pdata, pdata->phy_if.phy_impl.switch_mode(pdata));
+ }
+ 
+-static void axgbe_set_mode(struct axgbe_port *pdata,
++static bool axgbe_set_mode(struct axgbe_port *pdata,
+ 			   enum axgbe_mode mode)
+ {
+ 	if (mode == axgbe_cur_mode(pdata))
+-		return;
++		return false;
+ 
+ 	axgbe_change_mode(pdata, mode);
++	return true;
+ }
+ 
+ static bool axgbe_use_mode(struct axgbe_port *pdata,
+@@ -357,6 +358,7 @@ static enum axgbe_an axgbe_an73_tx_training(struct axgbe_port *pdata,
+ 	reg |= AXGBE_KR_TRAINING_ENABLE;
+ 	reg |= AXGBE_KR_TRAINING_START;
+ 	XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, reg);
++	pdata->kr_start_time = rte_get_timer_cycles();
+ 
+ 	PMD_DRV_LOG(DEBUG, "KR training initiated\n");
+ 	if (pdata->phy_if.phy_impl.kr_training_post)
+@@ -487,6 +489,7 @@ static enum axgbe_an axgbe_an73_incompat_link(struct axgbe_port *pdata)
+ 
+ 	axgbe_an_disable(pdata);
+ 	axgbe_switch_mode(pdata);
++	pdata->an_result = AXGBE_AN_READY;
+ 	axgbe_an_restart(pdata);
+ 
+ 	return AXGBE_AN_INCOMPAT_LINK;
+@@ -967,11 +970,34 @@ static void axgbe_check_link_timeout(struct axgbe_port *pdata)
+ {
+ 	unsigned long link_timeout;
+ 	unsigned long ticks;
++	unsigned long kr_time;
++	int wait;
+ 
+ 	link_timeout = pdata->link_check + (AXGBE_LINK_TIMEOUT *
+ 					    2 *  rte_get_timer_hz());
+ 	ticks = rte_get_timer_cycles();
+ 	if (time_after(ticks, link_timeout)) {
++		if ((axgbe_cur_mode(pdata) == AXGBE_MODE_KR) &&
++		    pdata->phy.autoneg == AUTONEG_ENABLE) {
++			/* AN restart should not happen while KR training is in progress.
++			 * The while loop ensures no AN restart during KR training,
++			 * waits up to 500ms and AN restart is triggered only if KR
++			 * training is failed.
++			 */
++			wait = AXGBE_KR_TRAINING_WAIT_ITER;
++			while (wait--) {
++				kr_time = pdata->kr_start_time +
++					  msecs_to_timer_cycles(AXGBE_AN_MS_TIMEOUT);
++				ticks = rte_get_timer_cycles();
++				if (time_after(ticks, kr_time))
++					break;
++				/* AN restart is not required, if AN result is COMPLETE */
++				if (pdata->an_result == AXGBE_AN_COMPLETE)
++					return;
++				rte_delay_us(10500);
++			}
++		}
++
+ 		PMD_DRV_LOG(NOTICE, "AN link timeout\n");
+ 		axgbe_phy_config_aneg(pdata);
+ 	}
+@@ -982,7 +1008,7 @@ static enum axgbe_mode axgbe_phy_status_aneg(struct axgbe_port *pdata)
+ 	return pdata->phy_if.phy_impl.an_outcome(pdata);
+ }
+ 
+-static void axgbe_phy_status_result(struct axgbe_port *pdata)
++static bool axgbe_phy_status_result(struct axgbe_port *pdata)
+ {
+ 	enum axgbe_mode mode;
+ 
+@@ -1016,7 +1042,10 @@ static void axgbe_phy_status_result(struct axgbe_port *pdata)
+ 
+ 	pdata->phy.duplex = DUPLEX_FULL;
+ 
+-	axgbe_set_mode(pdata, mode);
++	if (axgbe_set_mode(pdata, mode))
++		return true;
++	else
++		return false;
+ }
+ 
+ static int autoneg_time_out(unsigned long autoneg_start_time)
+@@ -1051,7 +1080,7 @@ static void axgbe_phy_status(struct axgbe_port *pdata)
+ 							     &an_restart);
+ 	if (an_restart) {
+ 		axgbe_phy_config_aneg(pdata);
+-		return;
++		goto adjust_link;
+ 	}
+ 
+ 	if (pdata->phy.link) {
+@@ -1083,7 +1112,10 @@ static void axgbe_phy_status(struct axgbe_port *pdata)
+ 				return;
+ 			}
+ 		}
+-		axgbe_phy_status_result(pdata);
++
++		if (axgbe_phy_status_result(pdata))
++			return;
++
+ 		if (rte_bit_relaxed_get32(AXGBE_LINK_INIT, &pdata->dev_state))
+ 			rte_bit_relaxed_clear32(AXGBE_LINK_INIT,
+ 						&pdata->dev_state);
+diff --git a/dpdk/drivers/net/axgbe/axgbe_phy_impl.c b/dpdk/drivers/net/axgbe/axgbe_phy_impl.c
+index d97fbbfddd..12908d4e6f 100644
+--- a/dpdk/drivers/net/axgbe/axgbe_phy_impl.c
++++ b/dpdk/drivers/net/axgbe/axgbe_phy_impl.c
+@@ -69,6 +69,7 @@ enum axgbe_sfp_cable {
+ 	AXGBE_SFP_CABLE_UNKNOWN = 0,
+ 	AXGBE_SFP_CABLE_ACTIVE,
+ 	AXGBE_SFP_CABLE_PASSIVE,
++	AXGBE_SFP_CABLE_FIBER,
+ };
+ 
+ enum axgbe_sfp_base {
+@@ -116,9 +117,7 @@ enum axgbe_sfp_speed {
+ 
+ #define AXGBE_SFP_BASE_BR			12
+ #define AXGBE_SFP_BASE_BR_1GBE_MIN		0x0a
+-#define AXGBE_SFP_BASE_BR_1GBE_MAX		0x0d
+ #define AXGBE_SFP_BASE_BR_10GBE_MIN		0x64
+-#define AXGBE_SFP_BASE_BR_10GBE_MAX		0x68
+ 
+ #define AXGBE_SFP_BASE_CU_CABLE_LEN		18
+ 
+@@ -535,25 +534,22 @@ static void axgbe_phy_sfp_phy_settings(struct axgbe_port *pdata)
+ static bool axgbe_phy_sfp_bit_rate(struct axgbe_sfp_eeprom *sfp_eeprom,
+ 				   enum axgbe_sfp_speed sfp_speed)
+ {
+-	u8 *sfp_base, min, max;
++	u8 *sfp_base, min;
+ 
+ 	sfp_base = sfp_eeprom->base;
+ 
+ 	switch (sfp_speed) {
+ 	case AXGBE_SFP_SPEED_1000:
+ 		min = AXGBE_SFP_BASE_BR_1GBE_MIN;
+-		max = AXGBE_SFP_BASE_BR_1GBE_MAX;
+ 		break;
+ 	case AXGBE_SFP_SPEED_10000:
+ 		min = AXGBE_SFP_BASE_BR_10GBE_MIN;
+-		max = AXGBE_SFP_BASE_BR_10GBE_MAX;
+ 		break;
+ 	default:
+ 		return false;
+ 	}
+ 
+-	return ((sfp_base[AXGBE_SFP_BASE_BR] >= min) &&
+-		(sfp_base[AXGBE_SFP_BASE_BR] <= max));
++	return sfp_base[AXGBE_SFP_BASE_BR] >= min;
+ }
+ 
+ static void axgbe_phy_sfp_external_phy(struct axgbe_port *pdata)
+@@ -578,6 +574,9 @@ static bool axgbe_phy_belfuse_parse_quirks(struct axgbe_port *pdata)
+ 		   AXGBE_BEL_FUSE_VENDOR, strlen(AXGBE_BEL_FUSE_VENDOR)))
+ 		return false;
+ 
++	/* Reset PHY - wait for self-clearing reset bit to clear */
++	pdata->phy_if.phy_impl.reset(pdata);
++
+ 	if (!memcmp(&sfp_eeprom->base[AXGBE_SFP_BASE_VENDOR_PN],
+ 		    AXGBE_BEL_FUSE_PARTNO, strlen(AXGBE_BEL_FUSE_PARTNO))) {
+ 		phy_data->sfp_base = AXGBE_SFP_BASE_1000_SX;
+@@ -613,16 +612,21 @@ static void axgbe_phy_sfp_parse_eeprom(struct axgbe_port *pdata)
+ 
+ 	axgbe_phy_sfp_parse_quirks(pdata);
+ 
+-	/* Assume ACTIVE cable unless told it is PASSIVE */
++	/* Assume FIBER cable unless told otherwise */
+ 	if (sfp_base[AXGBE_SFP_BASE_CABLE] & AXGBE_SFP_BASE_CABLE_PASSIVE) {
+ 		phy_data->sfp_cable = AXGBE_SFP_CABLE_PASSIVE;
+ 		phy_data->sfp_cable_len = sfp_base[AXGBE_SFP_BASE_CU_CABLE_LEN];
+-	} else {
++	} else if (sfp_base[AXGBE_SFP_BASE_CABLE] & AXGBE_SFP_BASE_CABLE_ACTIVE) {
+ 		phy_data->sfp_cable = AXGBE_SFP_CABLE_ACTIVE;
++	} else {
++		phy_data->sfp_cable = AXGBE_SFP_CABLE_FIBER;
+ 	}
+ 
+ 	/* Determine the type of SFP */
+-	if (sfp_base[AXGBE_SFP_BASE_10GBE_CC] & AXGBE_SFP_BASE_10GBE_CC_SR)
++	if (phy_data->sfp_cable != AXGBE_SFP_CABLE_FIBER &&
++		 axgbe_phy_sfp_bit_rate(sfp_eeprom, AXGBE_SFP_SPEED_10000))
++		phy_data->sfp_base = AXGBE_SFP_BASE_10000_CR;
++	else if (sfp_base[AXGBE_SFP_BASE_10GBE_CC] & AXGBE_SFP_BASE_10GBE_CC_SR)
+ 		phy_data->sfp_base = AXGBE_SFP_BASE_10000_SR;
+ 	else if (sfp_base[AXGBE_SFP_BASE_10GBE_CC] & AXGBE_SFP_BASE_10GBE_CC_LR)
+ 		phy_data->sfp_base = AXGBE_SFP_BASE_10000_LR;
+@@ -639,9 +643,6 @@ static void axgbe_phy_sfp_parse_eeprom(struct axgbe_port *pdata)
+ 		phy_data->sfp_base = AXGBE_SFP_BASE_1000_CX;
+ 	else if (sfp_base[AXGBE_SFP_BASE_1GBE_CC] & AXGBE_SFP_BASE_1GBE_CC_T)
+ 		phy_data->sfp_base = AXGBE_SFP_BASE_1000_T;
+-	else if ((phy_data->sfp_cable == AXGBE_SFP_CABLE_PASSIVE) &&
+-		 axgbe_phy_sfp_bit_rate(sfp_eeprom, AXGBE_SFP_SPEED_10000))
+-		phy_data->sfp_base = AXGBE_SFP_BASE_10000_CR;
+ 
+ 	switch (phy_data->sfp_base) {
+ 	case AXGBE_SFP_BASE_1000_T:
+@@ -1225,6 +1226,10 @@ static void axgbe_phy_rx_reset(struct axgbe_port *pdata)
+ 
+ static void axgbe_phy_pll_ctrl(struct axgbe_port *pdata, bool enable)
+ {
++	/* PLL_CTRL feature needs to be enabled for fixed PHY modes (Non-Autoneg) only */
++	if (pdata->phy.autoneg != AUTONEG_DISABLE)
++		return;
++
+ 	XMDIO_WRITE_BITS(pdata, MDIO_MMD_PMAPMD, MDIO_VEND2_PMA_MISC_CTRL0,
+ 			XGBE_PMA_PLL_CTRL_MASK,
+ 			enable ? XGBE_PMA_PLL_CTRL_SET
+@@ -1269,8 +1274,9 @@ static void axgbe_phy_perform_ratechange(struct axgbe_port *pdata,
+ 	axgbe_phy_rx_reset(pdata);
+ 
+ reenable_pll:
+-	 /* Re-enable the PLL control */
+-	axgbe_phy_pll_ctrl(pdata, true);
++	/* Enable PLL re-initialization, not needed for PHY Power Off and RRC cmds */
++	if (cmd != 0 && cmd != 5)
++		axgbe_phy_pll_ctrl(pdata, true);
+ 
+ 	PMD_DRV_LOG(NOTICE, "firmware mailbox command did not complete\n");
+ }
+@@ -1697,8 +1703,15 @@ static int axgbe_phy_link_status(struct axgbe_port *pdata, int *an_restart)
+ 	if (reg & MDIO_STAT1_LSTATUS)
+ 		return 1;
+ 
++	if (pdata->phy.autoneg == AUTONEG_ENABLE &&
++			phy_data->port_mode == AXGBE_PORT_MODE_BACKPLANE) {
++		if (rte_bit_relaxed_get32(AXGBE_LINK_INIT, &pdata->dev_state)) {
++			*an_restart = 1;
++		}
++	}
++
+ 	/* No link, attempt a receiver reset cycle */
+-	if (phy_data->rrc_count++) {
++	if (pdata->vdata->enable_rrc && phy_data->rrc_count++) {
+ 		phy_data->rrc_count = 0;
+ 		axgbe_phy_rrc(pdata);
+ 	}
+diff --git a/dpdk/drivers/net/bnx2x/bnx2x.c b/dpdk/drivers/net/bnx2x/bnx2x.c
+index c3283c94f3..597ee43359 100644
+--- a/dpdk/drivers/net/bnx2x/bnx2x.c
++++ b/dpdk/drivers/net/bnx2x/bnx2x.c
+@@ -2389,7 +2389,7 @@ int bnx2x_alloc_ilt_mem(struct bnx2x_softc *sc)
+ static int bnx2x_alloc_ilt_lines_mem(struct bnx2x_softc *sc)
+ {
+ 	sc->ilt->lines = rte_calloc("",
+-				    sizeof(struct ilt_line), ILT_MAX_LINES,
++				    ILT_MAX_LINES, sizeof(struct ilt_line),
+ 				    RTE_CACHE_LINE_SIZE);
+ 	return sc->ilt->lines == NULL;
+ }
+diff --git a/dpdk/drivers/net/bnx2x/bnx2x_stats.c b/dpdk/drivers/net/bnx2x/bnx2x_stats.c
+index c07b01510a..69132c7c80 100644
+--- a/dpdk/drivers/net/bnx2x/bnx2x_stats.c
++++ b/dpdk/drivers/net/bnx2x/bnx2x_stats.c
+@@ -114,7 +114,7 @@ bnx2x_hw_stats_post(struct bnx2x_softc *sc)
+ 
+ 	/* Update MCP's statistics if possible */
+ 	if (sc->func_stx) {
+-		rte_memcpy(BNX2X_SP(sc, func_stats), &sc->func_stats,
++		memcpy(BNX2X_SP(sc, func_stats), &sc->func_stats,
+ 				sizeof(sc->func_stats));
+ 	}
+ 
+@@ -817,10 +817,10 @@ bnx2x_hw_stats_update(struct bnx2x_softc *sc)
+ 			  etherstatspktsover1522octets);
+     }
+ 
+-    rte_memcpy(old, new, sizeof(struct nig_stats));
++	memcpy(old, new, sizeof(struct nig_stats));
+ 
+-    rte_memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
+-	   sizeof(struct mac_stx));
++	memcpy(RTE_PTR_ADD(estats, offsetof(struct bnx2x_eth_stats, rx_stat_ifhcinbadoctets_hi)),
++			&pstats->mac_stx[1], sizeof(struct mac_stx));
+     estats->brb_drop_hi = pstats->brb_drop_hi;
+     estats->brb_drop_lo = pstats->brb_drop_lo;
+ 
+@@ -1492,9 +1492,11 @@ bnx2x_stats_init(struct bnx2x_softc *sc)
+ 		REG_RD(sc, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
+ 	if (!CHIP_IS_E3(sc)) {
+ 		REG_RD_DMAE(sc, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
+-				&(sc->port.old_nig_stats.egress_mac_pkt0_lo), 2);
++				RTE_PTR_ADD(&sc->port.old_nig_stats,
++				offsetof(struct nig_stats, egress_mac_pkt0_lo)), 2);
+ 		REG_RD_DMAE(sc, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
+-				&(sc->port.old_nig_stats.egress_mac_pkt1_lo), 2);
++				RTE_PTR_ADD(&sc->port.old_nig_stats,
++				offsetof(struct nig_stats, egress_mac_pkt1_lo)), 2);
+ 	}
+ 
+ 	/* function stats */
+diff --git a/dpdk/drivers/net/bnx2x/bnx2x_vfpf.c b/dpdk/drivers/net/bnx2x/bnx2x_vfpf.c
+index 63953c2979..5411df3a38 100644
+--- a/dpdk/drivers/net/bnx2x/bnx2x_vfpf.c
++++ b/dpdk/drivers/net/bnx2x/bnx2x_vfpf.c
+@@ -52,9 +52,9 @@ bnx2x_check_bull(struct bnx2x_softc *sc)
+ 
+ 	/* check the mac address and VLAN and allocate memory if valid */
+ 	if (valid_bitmap & (1 << MAC_ADDR_VALID) && memcmp(bull->mac, sc->old_bulletin.mac, ETH_ALEN))
+-		rte_memcpy(&sc->link_params.mac_addr, bull->mac, ETH_ALEN);
++		memcpy(&sc->link_params.mac_addr, bull->mac, ETH_ALEN);
+ 	if (valid_bitmap & (1 << VLAN_VALID))
+-		rte_memcpy(&bull->vlan, &sc->old_bulletin.vlan, RTE_VLAN_HLEN);
++		memcpy(&bull->vlan, &sc->old_bulletin.vlan, sizeof(bull->vlan));
+ 
+ 	sc->old_bulletin = *bull;
+ 
+@@ -569,7 +569,7 @@ bnx2x_vf_set_mac(struct bnx2x_softc *sc, int set)
+ 
+ 	bnx2x_check_bull(sc);
+ 
+-	rte_memcpy(query->filters[0].mac, sc->link_params.mac_addr, ETH_ALEN);
++	memcpy(query->filters[0].mac, sc->link_params.mac_addr, ETH_ALEN);
+ 
+ 	bnx2x_add_tlv(sc, query, query->first_tlv.tl.length,
+ 		      BNX2X_VF_TLV_LIST_END,
+@@ -583,9 +583,9 @@ bnx2x_vf_set_mac(struct bnx2x_softc *sc, int set)
+ 	while (BNX2X_VF_STATUS_FAILURE == reply->status &&
+ 			bnx2x_check_bull(sc)) {
+ 		/* A new mac was configured by PF for us */
+-		rte_memcpy(sc->link_params.mac_addr, sc->pf2vf_bulletin->mac,
++		memcpy(sc->link_params.mac_addr, sc->pf2vf_bulletin->mac,
+ 				ETH_ALEN);
+-		rte_memcpy(query->filters[0].mac, sc->pf2vf_bulletin->mac,
++		memcpy(query->filters[0].mac, sc->pf2vf_bulletin->mac,
+ 				ETH_ALEN);
+ 
+ 		rc = bnx2x_do_req4pf(sc, sc->vf2pf_mbox_mapping.paddr);
+@@ -622,10 +622,10 @@ bnx2x_vf_config_rss(struct bnx2x_softc *sc,
+ 		      BNX2X_VF_TLV_LIST_END,
+ 		      sizeof(struct channel_list_end_tlv));
+ 
+-	rte_memcpy(query->rss_key, params->rss_key, sizeof(params->rss_key));
++	memcpy(query->rss_key, params->rss_key, sizeof(params->rss_key));
+ 	query->rss_key_size = T_ETH_RSS_KEY;
+ 
+-	rte_memcpy(query->ind_table, params->ind_table, T_ETH_INDIRECTION_TABLE_SIZE);
++	memcpy(query->ind_table, params->ind_table, T_ETH_INDIRECTION_TABLE_SIZE);
+ 	query->ind_table_size = T_ETH_INDIRECTION_TABLE_SIZE;
+ 
+ 	query->rss_result_mask = params->rss_result_mask;
+diff --git a/dpdk/drivers/net/bnxt/bnxt.h b/dpdk/drivers/net/bnxt/bnxt.h
+index 0e01b1d4ba..be2fd689bb 100644
+--- a/dpdk/drivers/net/bnxt/bnxt.h
++++ b/dpdk/drivers/net/bnxt/bnxt.h
+@@ -449,8 +449,8 @@ struct bnxt_ring_mem_info {
+ 
+ struct bnxt_ctx_pg_info {
+ 	uint32_t	entries;
+-	void		*ctx_pg_arr[MAX_CTX_PAGES];
+-	rte_iova_t	ctx_dma_arr[MAX_CTX_PAGES];
++	void		**ctx_pg_arr;
++	rte_iova_t	*ctx_dma_arr;
+ 	struct bnxt_ring_mem_info ring_mem;
+ };
+ 
+@@ -550,7 +550,6 @@ struct bnxt_mark_info {
+ 
+ struct bnxt_rep_info {
+ 	struct rte_eth_dev	*vfr_eth_dev;
+-	pthread_mutex_t		vfr_lock;
+ 	pthread_mutex_t		vfr_start_lock;
+ 	bool			conduit_valid;
+ };
+@@ -896,6 +895,7 @@ struct bnxt {
+ 	struct rte_ether_addr	*mcast_addr_list;
+ 	rte_iova_t		mc_list_dma_addr;
+ 	uint32_t		nb_mc_addr;
++#define BNXT_DFLT_MAX_MC_ADDR	16 /* for compatibility with older firmware */
+ 	uint32_t		max_mcast_addr; /* maximum number of mcast filters supported */
+ 
+ 	struct rte_eth_rss_conf	rss_conf; /* RSS configuration. */
+diff --git a/dpdk/drivers/net/bnxt/bnxt_ethdev.c b/dpdk/drivers/net/bnxt/bnxt_ethdev.c
+index acf7e6e46e..0fc561d258 100644
+--- a/dpdk/drivers/net/bnxt/bnxt_ethdev.c
++++ b/dpdk/drivers/net/bnxt/bnxt_ethdev.c
+@@ -1673,10 +1673,8 @@ bnxt_uninit_locks(struct bnxt *bp)
+ 	pthread_mutex_destroy(&bp->def_cp_lock);
+ 	pthread_mutex_destroy(&bp->health_check_lock);
+ 	pthread_mutex_destroy(&bp->err_recovery_lock);
+-	if (bp->rep_info) {
+-		pthread_mutex_destroy(&bp->rep_info->vfr_lock);
++	if (bp->rep_info)
+ 		pthread_mutex_destroy(&bp->rep_info->vfr_start_lock);
+-	}
+ }
+ 
+ static void bnxt_drv_uninit(struct bnxt *bp)
+@@ -4750,7 +4748,7 @@ static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp,
+ {
+ 	struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
+ 	const struct rte_memzone *mz = NULL;
+-	char mz_name[RTE_MEMZONE_NAMESIZE];
++	char name[RTE_MEMZONE_NAMESIZE];
+ 	rte_iova_t mz_phys_addr;
+ 	uint64_t valid_bits = 0;
+ 	uint32_t sz;
+@@ -4762,6 +4760,19 @@ static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp,
+ 	rmem->nr_pages = RTE_ALIGN_MUL_CEIL(mem_size, BNXT_PAGE_SIZE) /
+ 			 BNXT_PAGE_SIZE;
+ 	rmem->page_size = BNXT_PAGE_SIZE;
++
++	snprintf(name, RTE_MEMZONE_NAMESIZE, "bnxt_ctx_pg_arr%s_%x_%d",
++		 suffix, idx, bp->eth_dev->data->port_id);
++	ctx_pg->ctx_pg_arr = rte_zmalloc(name, sizeof(void *) * rmem->nr_pages, 0);
++	if (ctx_pg->ctx_pg_arr == NULL)
++		return -ENOMEM;
++
++	snprintf(name, RTE_MEMZONE_NAMESIZE, "bnxt_ctx_dma_arr%s_%x_%d",
++		 suffix, idx, bp->eth_dev->data->port_id);
++	ctx_pg->ctx_dma_arr = rte_zmalloc(name, sizeof(rte_iova_t *) * rmem->nr_pages, 0);
++	if (ctx_pg->ctx_dma_arr == NULL)
++		return -ENOMEM;
++
+ 	rmem->pg_arr = ctx_pg->ctx_pg_arr;
+ 	rmem->dma_arr = ctx_pg->ctx_dma_arr;
+ 	rmem->flags = BNXT_RMEM_VALID_PTE_FLAG;
+@@ -4769,13 +4780,13 @@ static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp,
+ 	valid_bits = PTU_PTE_VALID;
+ 
+ 	if (rmem->nr_pages > 1) {
+-		snprintf(mz_name, RTE_MEMZONE_NAMESIZE,
++		snprintf(name, RTE_MEMZONE_NAMESIZE,
+ 			 "bnxt_ctx_pg_tbl%s_%x_%d",
+ 			 suffix, idx, bp->eth_dev->data->port_id);
+-		mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0;
+-		mz = rte_memzone_lookup(mz_name);
++		name[RTE_MEMZONE_NAMESIZE - 1] = 0;
++		mz = rte_memzone_lookup(name);
+ 		if (!mz) {
+-			mz = rte_memzone_reserve_aligned(mz_name,
++			mz = rte_memzone_reserve_aligned(name,
+ 						rmem->nr_pages * 8,
+ 						bp->eth_dev->device->numa_node,
+ 						RTE_MEMZONE_2MB |
+@@ -4794,11 +4805,11 @@ static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp,
+ 		rmem->pg_tbl_mz = mz;
+ 	}
+ 
+-	snprintf(mz_name, RTE_MEMZONE_NAMESIZE, "bnxt_ctx_%s_%x_%d",
++	snprintf(name, RTE_MEMZONE_NAMESIZE, "bnxt_ctx_%s_%x_%d",
+ 		 suffix, idx, bp->eth_dev->data->port_id);
+-	mz = rte_memzone_lookup(mz_name);
++	mz = rte_memzone_lookup(name);
+ 	if (!mz) {
+-		mz = rte_memzone_reserve_aligned(mz_name,
++		mz = rte_memzone_reserve_aligned(name,
+ 						 mem_size,
+ 						 bp->eth_dev->device->numa_node,
+ 						 RTE_MEMZONE_1GB |
+@@ -4844,6 +4855,17 @@ static void bnxt_free_ctx_mem(struct bnxt *bp)
+ 		return;
+ 
+ 	bp->ctx->flags &= ~BNXT_CTX_FLAG_INITED;
++	rte_free(bp->ctx->qp_mem.ctx_pg_arr);
++	rte_free(bp->ctx->srq_mem.ctx_pg_arr);
++	rte_free(bp->ctx->cq_mem.ctx_pg_arr);
++	rte_free(bp->ctx->vnic_mem.ctx_pg_arr);
++	rte_free(bp->ctx->stat_mem.ctx_pg_arr);
++	rte_free(bp->ctx->qp_mem.ctx_dma_arr);
++	rte_free(bp->ctx->srq_mem.ctx_dma_arr);
++	rte_free(bp->ctx->cq_mem.ctx_dma_arr);
++	rte_free(bp->ctx->vnic_mem.ctx_dma_arr);
++	rte_free(bp->ctx->stat_mem.ctx_dma_arr);
++
+ 	rte_memzone_free(bp->ctx->qp_mem.ring_mem.mz);
+ 	rte_memzone_free(bp->ctx->srq_mem.ring_mem.mz);
+ 	rte_memzone_free(bp->ctx->cq_mem.ring_mem.mz);
+@@ -4856,6 +4878,8 @@ static void bnxt_free_ctx_mem(struct bnxt *bp)
+ 	rte_memzone_free(bp->ctx->stat_mem.ring_mem.pg_tbl_mz);
+ 
+ 	for (i = 0; i < bp->ctx->tqm_fp_rings_count + 1; i++) {
++		rte_free(bp->ctx->tqm_mem[i]->ctx_pg_arr);
++		rte_free(bp->ctx->tqm_mem[i]->ctx_dma_arr);
+ 		if (bp->ctx->tqm_mem[i])
+ 			rte_memzone_free(bp->ctx->tqm_mem[i]->ring_mem.mz);
+ 	}
+@@ -6173,13 +6197,6 @@ static int bnxt_init_rep_info(struct bnxt *bp)
+ 	for (i = 0; i < BNXT_MAX_CFA_CODE; i++)
+ 		bp->cfa_code_map[i] = BNXT_VF_IDX_INVALID;
+ 
+-	rc = pthread_mutex_init(&bp->rep_info->vfr_lock, NULL);
+-	if (rc) {
+-		PMD_DRV_LOG(ERR, "Unable to initialize vfr_lock\n");
+-		bnxt_free_rep_info(bp);
+-		return rc;
+-	}
+-
+ 	rc = pthread_mutex_init(&bp->rep_info->vfr_start_lock, NULL);
+ 	if (rc) {
+ 		PMD_DRV_LOG(ERR, "Unable to initialize vfr_start_lock\n");
+diff --git a/dpdk/drivers/net/bnxt/bnxt_hwrm.c b/dpdk/drivers/net/bnxt/bnxt_hwrm.c
+index 06f196760f..94c3249ae4 100644
+--- a/dpdk/drivers/net/bnxt/bnxt_hwrm.c
++++ b/dpdk/drivers/net/bnxt/bnxt_hwrm.c
+@@ -863,6 +863,8 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
+ 		    bp->max_l2_ctx, bp->max_vnics);
+ 	bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
+ 	bp->max_mcast_addr = rte_le_to_cpu_32(resp->max_mcast_filters);
++	if (!bp->max_mcast_addr)
++		bp->max_mcast_addr = BNXT_DFLT_MAX_MC_ADDR;
+ 	memcpy(bp->dsn, resp->device_serial_number, sizeof(bp->dsn));
+ 
+ 	if (BNXT_PF(bp))
+@@ -3039,6 +3041,8 @@ static uint16_t bnxt_check_eth_link_autoneg(uint32_t conf_link)
+ static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed,
+ 					  struct bnxt_link_info *link_info)
+ {
++	uint16_t support_pam4_speeds = link_info->support_pam4_speeds;
++	uint16_t support_speeds = link_info->support_speeds;
+ 	uint16_t eth_link_speed = 0;
+ 
+ 	if (conf_link_speed == RTE_ETH_LINK_SPEED_AUTONEG)
+@@ -3070,29 +3074,30 @@ static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed,
+ 	case RTE_ETH_LINK_SPEED_25G:
+ 		eth_link_speed =
+ 			HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_25GB;
++		link_info->link_signal_mode = BNXT_SIG_MODE_NRZ;
+ 		break;
+ 	case RTE_ETH_LINK_SPEED_40G:
+ 		eth_link_speed =
+ 			HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB;
+ 		break;
+ 	case RTE_ETH_LINK_SPEED_50G:
+-		if (link_info->support_pam4_speeds &
+-		    HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_PAM4_SPEEDS_50G) {
+-			eth_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_50GB;
+-			link_info->link_signal_mode = BNXT_SIG_MODE_PAM4;
+-		} else {
++		if (support_speeds & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_50GB) {
+ 			eth_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB;
+ 			link_info->link_signal_mode = BNXT_SIG_MODE_NRZ;
++		} else if (support_pam4_speeds &
++			   HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_PAM4_SPEEDS_50G) {
++			eth_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_50GB;
++			link_info->link_signal_mode = BNXT_SIG_MODE_PAM4;
+ 		}
+ 		break;
+ 	case RTE_ETH_LINK_SPEED_100G:
+-		if (link_info->support_pam4_speeds &
+-		    HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_PAM4_SPEEDS_100G) {
+-			eth_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_100GB;
+-			link_info->link_signal_mode = BNXT_SIG_MODE_PAM4;
+-		} else {
++		if (support_speeds & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100GB) {
+ 			eth_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_100GB;
+ 			link_info->link_signal_mode = BNXT_SIG_MODE_NRZ;
++		} else if (support_pam4_speeds &
++			   HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_PAM4_SPEEDS_100G) {
++			eth_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_100GB;
++			link_info->link_signal_mode = BNXT_SIG_MODE_PAM4;
+ 		}
+ 		break;
+ 	case RTE_ETH_LINK_SPEED_200G:
+diff --git a/dpdk/drivers/net/bnxt/bnxt_reps.c b/dpdk/drivers/net/bnxt/bnxt_reps.c
+index 78337431af..6d6b8252e2 100644
+--- a/dpdk/drivers/net/bnxt/bnxt_reps.c
++++ b/dpdk/drivers/net/bnxt/bnxt_reps.c
+@@ -32,6 +32,14 @@ static const struct eth_dev_ops bnxt_rep_dev_ops = {
+ 	.flow_ops_get = bnxt_flow_ops_get_op
+ };
+ 
++static bool bnxt_rep_check_parent(struct bnxt_representor *rep)
++{
++	if (!rep->parent_dev->data->dev_private)
++		return false;
++
++	return true;
++}
++
+ uint16_t
+ bnxt_vfr_recv(uint16_t port_id, uint16_t queue_id, struct rte_mbuf *mbuf)
+ {
+@@ -124,8 +132,8 @@ bnxt_rep_tx_burst(void *tx_queue,
+ 	qid = vfr_txq->txq->queue_id;
+ 	vf_rep_bp = vfr_txq->bp;
+ 	parent = vf_rep_bp->parent_dev->data->dev_private;
+-	pthread_mutex_lock(&parent->rep_info->vfr_lock);
+ 	ptxq = parent->tx_queues[qid];
++	pthread_mutex_lock(&ptxq->txq_lock);
+ 
+ 	ptxq->vfr_tx_cfa_action = vf_rep_bp->vfr_tx_cfa_action;
+ 
+@@ -134,9 +142,9 @@ bnxt_rep_tx_burst(void *tx_queue,
+ 		vf_rep_bp->tx_pkts[qid]++;
+ 	}
+ 
+-	rc = bnxt_xmit_pkts(ptxq, tx_pkts, nb_pkts);
++	rc = _bnxt_xmit_pkts(ptxq, tx_pkts, nb_pkts);
+ 	ptxq->vfr_tx_cfa_action = 0;
+-	pthread_mutex_unlock(&parent->rep_info->vfr_lock);
++	pthread_mutex_unlock(&ptxq->txq_lock);
+ 
+ 	return rc;
+ }
+@@ -266,12 +274,12 @@ int bnxt_representor_uninit(struct rte_eth_dev *eth_dev)
+ 	PMD_DRV_LOG(DEBUG, "BNXT Port:%d VFR uninit\n", eth_dev->data->port_id);
+ 	eth_dev->data->mac_addrs = NULL;
+ 
+-	parent_bp = rep->parent_dev->data->dev_private;
+-	if (!parent_bp) {
++	if (!bnxt_rep_check_parent(rep)) {
+ 		PMD_DRV_LOG(DEBUG, "BNXT Port:%d already freed\n",
+ 			    eth_dev->data->port_id);
+ 		return 0;
+ 	}
++	parent_bp = rep->parent_dev->data->dev_private;
+ 
+ 	parent_bp->num_reps--;
+ 	vf_id = rep->vf_id;
+@@ -539,11 +547,12 @@ int bnxt_rep_dev_info_get_op(struct rte_eth_dev *eth_dev,
+ 	int rc = 0;
+ 
+ 	/* MAC Specifics */
+-	parent_bp = rep_bp->parent_dev->data->dev_private;
+-	if (!parent_bp) {
+-		PMD_DRV_LOG(ERR, "Rep parent NULL!\n");
++	if (!bnxt_rep_check_parent(rep_bp)) {
++		/* Need not be an error scenario, if parent is closed first */
++		PMD_DRV_LOG(INFO, "Rep parent port does not exist.\n");
+ 		return rc;
+ 	}
++	parent_bp = rep_bp->parent_dev->data->dev_private;
+ 	PMD_DRV_LOG(DEBUG, "Representor dev_info_get_op\n");
+ 	dev_info->max_mac_addrs = parent_bp->max_l2_ctx;
+ 	dev_info->max_hash_mac_addrs = 0;
+@@ -730,10 +739,10 @@ int bnxt_rep_tx_queue_setup_op(struct rte_eth_dev *eth_dev,
+ 	struct bnxt_tx_queue *parent_txq, *txq;
+ 	struct bnxt_vf_rep_tx_queue *vfr_txq;
+ 
+-	if (queue_idx >= rep_bp->rx_nr_rings) {
++	if (queue_idx >= rep_bp->tx_nr_rings) {
+ 		PMD_DRV_LOG(ERR,
+ 			    "Cannot create Tx rings %d. %d rings available\n",
+-			    queue_idx, rep_bp->rx_nr_rings);
++			    queue_idx, rep_bp->tx_nr_rings);
+ 		return -EINVAL;
+ 	}
+ 
+diff --git a/dpdk/drivers/net/bnxt/bnxt_txq.c b/dpdk/drivers/net/bnxt/bnxt_txq.c
+index 4df4604975..696603757b 100644
+--- a/dpdk/drivers/net/bnxt/bnxt_txq.c
++++ b/dpdk/drivers/net/bnxt/bnxt_txq.c
+@@ -112,6 +112,7 @@ void bnxt_tx_queue_release_op(struct rte_eth_dev *dev, uint16_t queue_idx)
+ 		txq->mz = NULL;
+ 
+ 		rte_free(txq->free);
++		pthread_mutex_destroy(&txq->txq_lock);
+ 		rte_free(txq);
+ 		dev->data->tx_queues[queue_idx] = NULL;
+ 	}
+@@ -195,6 +196,11 @@ int bnxt_tx_queue_setup_op(struct rte_eth_dev *eth_dev,
+ 		goto err;
+ 	}
+ 
++	rc = pthread_mutex_init(&txq->txq_lock, NULL);
++	if (rc != 0) {
++		PMD_DRV_LOG(ERR, "TxQ mutex init failed!");
++		goto err;
++	}
+ 	return 0;
+ err:
+ 	bnxt_tx_queue_release_op(eth_dev, queue_idx);
+diff --git a/dpdk/drivers/net/bnxt/bnxt_txq.h b/dpdk/drivers/net/bnxt/bnxt_txq.h
+index 3a483ad5c3..9e54985c4c 100644
+--- a/dpdk/drivers/net/bnxt/bnxt_txq.h
++++ b/dpdk/drivers/net/bnxt/bnxt_txq.h
+@@ -26,6 +26,7 @@ struct bnxt_tx_queue {
+ 	int			index;
+ 	int			tx_wake_thresh;
+ 	uint32_t		vfr_tx_cfa_action;
++	pthread_mutex_t		txq_lock;
+ 	struct bnxt_tx_ring_info	*tx_ring;
+ 
+ 	unsigned int		cp_nr_rings;
+diff --git a/dpdk/drivers/net/bnxt/bnxt_txr.c b/dpdk/drivers/net/bnxt/bnxt_txr.c
+index 899986764f..cef14427a8 100644
+--- a/dpdk/drivers/net/bnxt/bnxt_txr.c
++++ b/dpdk/drivers/net/bnxt/bnxt_txr.c
+@@ -562,6 +562,19 @@ static int bnxt_handle_tx_cp(struct bnxt_tx_queue *txq)
+ 
+ uint16_t bnxt_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ 			       uint16_t nb_pkts)
++{
++	struct bnxt_tx_queue *txq = tx_queue;
++	uint16_t rc;
++
++	pthread_mutex_lock(&txq->txq_lock);
++	rc = _bnxt_xmit_pkts(tx_queue, tx_pkts, nb_pkts);
++	pthread_mutex_unlock(&txq->txq_lock);
++
++	return rc;
++}
++
++uint16_t _bnxt_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
++			 uint16_t nb_pkts)
+ {
+ 	int rc;
+ 	uint16_t nb_tx_pkts = 0;
+diff --git a/dpdk/drivers/net/bnxt/bnxt_txr.h b/dpdk/drivers/net/bnxt/bnxt_txr.h
+index e64ea2c7d1..09078d545d 100644
+--- a/dpdk/drivers/net/bnxt/bnxt_txr.h
++++ b/dpdk/drivers/net/bnxt/bnxt_txr.h
+@@ -47,7 +47,9 @@ void bnxt_free_tx_rings(struct bnxt *bp);
+ int bnxt_init_one_tx_ring(struct bnxt_tx_queue *txq);
+ int bnxt_init_tx_ring_struct(struct bnxt_tx_queue *txq, unsigned int socket_id);
+ uint16_t bnxt_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+-			       uint16_t nb_pkts);
++			uint16_t nb_pkts);
++uint16_t _bnxt_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
++			 uint16_t nb_pkts);
+ #if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64)
+ uint16_t bnxt_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
+ 			    uint16_t nb_pkts);
+diff --git a/dpdk/drivers/net/bnxt/tf_ulp/ulp_ha_mgr.c b/dpdk/drivers/net/bnxt/tf_ulp/ulp_ha_mgr.c
+index f3f5bda890..852deef3b4 100644
+--- a/dpdk/drivers/net/bnxt/tf_ulp/ulp_ha_mgr.c
++++ b/dpdk/drivers/net/bnxt/tf_ulp/ulp_ha_mgr.c
+@@ -253,6 +253,7 @@ ulp_ha_mgr_timer_cb(void *arg)
+ 
+ 	myclient_cnt = bnxt_ulp_cntxt_num_shared_clients_get(ulp_ctx);
+ 	if (myclient_cnt == 0) {
++		bnxt_ulp_cntxt_entry_release();
+ 		BNXT_TF_DBG(ERR,
+ 			    "PANIC Client Count is zero kill timer\n.");
+ 		return;
+diff --git a/dpdk/drivers/net/bonding/rte_eth_bond_8023ad.c b/dpdk/drivers/net/bonding/rte_eth_bond_8023ad.c
+index 79f1b3f1a0..06c21ebe6d 100644
+--- a/dpdk/drivers/net/bonding/rte_eth_bond_8023ad.c
++++ b/dpdk/drivers/net/bonding/rte_eth_bond_8023ad.c
+@@ -865,7 +865,6 @@ bond_mode_8023ad_periodic_cb(void *arg)
+ 	struct bond_dev_private *internals = bond_dev->data->dev_private;
+ 	struct port *port;
+ 	struct rte_eth_link link_info;
+-	struct rte_ether_addr member_addr;
+ 	struct rte_mbuf *lacp_pkt = NULL;
+ 	uint16_t member_id;
+ 	uint16_t i;
+@@ -892,7 +891,6 @@ bond_mode_8023ad_periodic_cb(void *arg)
+ 			key = 0;
+ 		}
+ 
+-		rte_eth_macaddr_get(member_id, &member_addr);
+ 		port = &bond_mode_8023ad_ports[member_id];
+ 
+ 		key = rte_cpu_to_be_16(key);
+@@ -904,8 +902,8 @@ bond_mode_8023ad_periodic_cb(void *arg)
+ 			SM_FLAG_SET(port, NTT);
+ 		}
+ 
+-		if (!rte_is_same_ether_addr(&port->actor.system, &member_addr)) {
+-			rte_ether_addr_copy(&member_addr, &port->actor.system);
++		if (!rte_is_same_ether_addr(&internals->mode4.mac_addr, &port->actor.system)) {
++			rte_ether_addr_copy(&internals->mode4.mac_addr, &port->actor.system);
+ 			if (port->aggregator_port_id == member_id)
+ 				SM_FLAG_SET(port, NTT);
+ 		}
+@@ -1173,21 +1171,20 @@ void
+ bond_mode_8023ad_mac_address_update(struct rte_eth_dev *bond_dev)
+ {
+ 	struct bond_dev_private *internals = bond_dev->data->dev_private;
+-	struct rte_ether_addr member_addr;
+ 	struct port *member, *agg_member;
+ 	uint16_t member_id, i, j;
+ 
+ 	bond_mode_8023ad_stop(bond_dev);
+ 
++	rte_eth_macaddr_get(internals->port_id, &internals->mode4.mac_addr);
+ 	for (i = 0; i < internals->active_member_count; i++) {
+ 		member_id = internals->active_members[i];
+ 		member = &bond_mode_8023ad_ports[member_id];
+-		rte_eth_macaddr_get(member_id, &member_addr);
+ 
+-		if (rte_is_same_ether_addr(&member_addr, &member->actor.system))
++		if (rte_is_same_ether_addr(&internals->mode4.mac_addr, &member->actor.system))
+ 			continue;
+ 
+-		rte_ether_addr_copy(&member_addr, &member->actor.system);
++		rte_ether_addr_copy(&internals->mode4.mac_addr, &member->actor.system);
+ 		/* Do nothing if this port is not an aggregator. In other case
+ 		 * Set NTT flag on every port that use this aggregator. */
+ 		if (member->aggregator_port_id != member_id)
+diff --git a/dpdk/drivers/net/bonding/rte_eth_bond_flow.c b/dpdk/drivers/net/bonding/rte_eth_bond_flow.c
+index 71a91675f7..5d0be5caf5 100644
+--- a/dpdk/drivers/net/bonding/rte_eth_bond_flow.c
++++ b/dpdk/drivers/net/bonding/rte_eth_bond_flow.c
+@@ -180,6 +180,8 @@ bond_flow_query_count(struct rte_eth_dev *dev, struct rte_flow *flow,
+ 
+ 	count->bytes = 0;
+ 	count->hits = 0;
++	count->bytes_set = 0;
++	count->hits_set = 0;
+ 	rte_memcpy(&member_count, count, sizeof(member_count));
+ 	for (i = 0; i < internals->member_count; i++) {
+ 		ret = rte_flow_query(internals->members[i].port_id,
+@@ -192,8 +194,12 @@ bond_flow_query_count(struct rte_eth_dev *dev, struct rte_flow *flow,
+ 		}
+ 		count->bytes += member_count.bytes;
+ 		count->hits += member_count.hits;
++		count->bytes_set |= member_count.bytes_set;
++		count->hits_set |= member_count.hits_set;
+ 		member_count.bytes = 0;
+ 		member_count.hits = 0;
++		member_count.bytes_set = 0;
++		member_count.hits_set = 0;
+ 	}
+ 	return 0;
+ }
+diff --git a/dpdk/drivers/net/cnxk/cn10k_ethdev.c b/dpdk/drivers/net/cnxk/cn10k_ethdev.c
+index 4a4e97287c..29b7f2ba5e 100644
+--- a/dpdk/drivers/net/cnxk/cn10k_ethdev.c
++++ b/dpdk/drivers/net/cnxk/cn10k_ethdev.c
+@@ -389,7 +389,13 @@ cn10k_nix_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t qidx)
+ 		struct roc_nix_sq *sq = &dev->sqs[qidx];
+ 		do {
+ 			handle_tx_completion_pkts(txq, flags & NIX_TX_VWQE_F);
++			/* Check if SQ is empty */
+ 			roc_nix_sq_head_tail_get(nix, sq->qid, &head, &tail);
++			if (head != tail)
++				continue;
++
++			/* Check if completion CQ is empty */
++			roc_nix_cq_head_tail_get(nix, sq->cqid, &head, &tail);
+ 		} while (head != tail);
+ 	}
+ 
+diff --git a/dpdk/drivers/net/cnxk/cn10k_ethdev_sec.c b/dpdk/drivers/net/cnxk/cn10k_ethdev_sec.c
+index 575d0fabd5..4719f6b863 100644
+--- a/dpdk/drivers/net/cnxk/cn10k_ethdev_sec.c
++++ b/dpdk/drivers/net/cnxk/cn10k_ethdev_sec.c
+@@ -1087,8 +1087,8 @@ cn10k_eth_sec_session_update(void *device, struct rte_security_session *sess,
+ {
+ 	struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
+ 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+-	struct roc_ot_ipsec_inb_sa *inb_sa_dptr;
+ 	struct rte_security_ipsec_xform *ipsec;
++	struct cn10k_sec_sess_priv sess_priv;
+ 	struct rte_crypto_sym_xform *crypto;
+ 	struct cnxk_eth_sec_sess *eth_sec;
+ 	bool inbound;
+@@ -1109,6 +1109,11 @@ cn10k_eth_sec_session_update(void *device, struct rte_security_session *sess,
+ 	eth_sec->spi = conf->ipsec.spi;
+ 
+ 	if (inbound) {
++		struct roc_ot_ipsec_inb_sa *inb_sa_dptr, *inb_sa;
++		struct cn10k_inb_priv_data *inb_priv;
++
++		inb_sa = eth_sec->sa;
++		inb_priv = roc_nix_inl_ot_ipsec_inb_sa_sw_rsvd(inb_sa);
+ 		inb_sa_dptr = (struct roc_ot_ipsec_inb_sa *)dev->inb.sa_dptr;
+ 		memset(inb_sa_dptr, 0, sizeof(struct roc_ot_ipsec_inb_sa));
+ 
+@@ -1116,26 +1121,74 @@ cn10k_eth_sec_session_update(void *device, struct rte_security_session *sess,
+ 					       true);
+ 		if (rc)
+ 			return -EINVAL;
++		/* Use cookie for original data */
++		inb_sa_dptr->w1.s.cookie = inb_sa->w1.s.cookie;
++
++		if (ipsec->options.stats == 1) {
++			/* Enable mib counters */
++			inb_sa_dptr->w0.s.count_mib_bytes = 1;
++			inb_sa_dptr->w0.s.count_mib_pkts = 1;
++		}
++
++		/* Enable out-of-place processing */
++		if (ipsec->options.ingress_oop)
++			inb_sa_dptr->w0.s.pkt_format = ROC_IE_OT_SA_PKT_FMT_FULL;
+ 
+ 		rc = roc_nix_inl_ctx_write(&dev->nix, inb_sa_dptr, eth_sec->sa,
+ 					   eth_sec->inb,
+ 					   sizeof(struct roc_ot_ipsec_inb_sa));
+ 		if (rc)
+ 			return -EINVAL;
++
++		/* Save userdata in inb private area */
++		inb_priv->userdata = conf->userdata;
+ 	} else {
+-		struct roc_ot_ipsec_outb_sa *outb_sa_dptr;
++		struct roc_ot_ipsec_outb_sa *outb_sa_dptr, *outb_sa;
++		struct cn10k_outb_priv_data *outb_priv;
++		struct cnxk_ipsec_outb_rlens *rlens;
+ 
++		outb_sa = eth_sec->sa;
++		outb_priv = roc_nix_inl_ot_ipsec_outb_sa_sw_rsvd(outb_sa);
++		rlens = &outb_priv->rlens;
+ 		outb_sa_dptr = (struct roc_ot_ipsec_outb_sa *)dev->outb.sa_dptr;
+ 		memset(outb_sa_dptr, 0, sizeof(struct roc_ot_ipsec_outb_sa));
+ 
+ 		rc = cnxk_ot_ipsec_outb_sa_fill(outb_sa_dptr, ipsec, crypto);
+ 		if (rc)
+ 			return -EINVAL;
++
++		/* Save rlen info */
++		cnxk_ipsec_outb_rlens_get(rlens, ipsec, crypto);
++
++		if (ipsec->options.stats == 1) {
++			/* Enable mib counters */
++			outb_sa_dptr->w0.s.count_mib_bytes = 1;
++			outb_sa_dptr->w0.s.count_mib_pkts = 1;
++		}
++
++		sess_priv.u64 = 0;
++		sess_priv.sa_idx = outb_priv->sa_idx;
++		sess_priv.roundup_byte = rlens->roundup_byte;
++		sess_priv.roundup_len = rlens->roundup_len;
++		sess_priv.partial_len = rlens->partial_len;
++		sess_priv.mode = outb_sa_dptr->w2.s.ipsec_mode;
++		sess_priv.outer_ip_ver = outb_sa_dptr->w2.s.outer_ip_ver;
++		/* Propagate inner checksum enable from SA to fast path */
++		sess_priv.chksum =
++			(!ipsec->options.ip_csum_enable << 1 | !ipsec->options.l4_csum_enable);
++		sess_priv.dec_ttl = ipsec->options.dec_ttl;
++		if (roc_feature_nix_has_inl_ipsec_mseg() && dev->outb.cpt_eng_caps & BIT_ULL(35))
++			sess_priv.nixtx_off = 1;
++
+ 		rc = roc_nix_inl_ctx_write(&dev->nix, outb_sa_dptr, eth_sec->sa,
+ 					   eth_sec->inb,
+ 					   sizeof(struct roc_ot_ipsec_outb_sa));
+ 		if (rc)
+ 			return -EINVAL;
++
++		/* Save userdata */
++		outb_priv->userdata = conf->userdata;
++		sess->fast_mdata = sess_priv.u64;
+ 	}
+ 
+ 	return 0;
+diff --git a/dpdk/drivers/net/cnxk/cn10k_rx.h b/dpdk/drivers/net/cnxk/cn10k_rx.h
+index 7bb4c86d75..86e4233dc7 100644
+--- a/dpdk/drivers/net/cnxk/cn10k_rx.h
++++ b/dpdk/drivers/net/cnxk/cn10k_rx.h
+@@ -705,7 +705,7 @@ nix_cqe_xtract_mseg(const union nix_rx_parse_u *rx, struct rte_mbuf *mbuf,
+ 	if (cq_w1 & BIT(11) && flags & NIX_RX_OFFLOAD_SECURITY_F) {
+ 		const uint64_t *wqe = (const uint64_t *)(mbuf + 1);
+ 
+-		if (hdr->w0.pkt_fmt != ROC_IE_OT_SA_PKT_FMT_FULL)
++		if (!(flags & NIX_RX_REAS_F) || hdr->w0.pkt_fmt != ROC_IE_OT_SA_PKT_FMT_FULL)
+ 			rx = (const union nix_rx_parse_u *)(wqe + 1);
+ 	}
+ 
+diff --git a/dpdk/drivers/net/cnxk/cn10k_rxtx.h b/dpdk/drivers/net/cnxk/cn10k_rxtx.h
+index aeffc4ac92..9f33d0192e 100644
+--- a/dpdk/drivers/net/cnxk/cn10k_rxtx.h
++++ b/dpdk/drivers/net/cnxk/cn10k_rxtx.h
+@@ -177,6 +177,7 @@ handle_tx_completion_pkts(struct cn10k_eth_txq *txq, uint8_t mt_safe)
+ 			m = m_next;
+ 		}
+ 		rte_pktmbuf_free_seg(m);
++		txq->tx_compl.ptr[tx_compl_s0->sqe_id] = NULL;
+ 
+ 		head++;
+ 		head &= qmask;
+diff --git a/dpdk/drivers/net/cnxk/cn10k_tx.h b/dpdk/drivers/net/cnxk/cn10k_tx.h
+index 467f0ccc65..c84154ee84 100644
+--- a/dpdk/drivers/net/cnxk/cn10k_tx.h
++++ b/dpdk/drivers/net/cnxk/cn10k_tx.h
+@@ -784,19 +784,35 @@ cn10k_nix_prep_sec(struct rte_mbuf *m, uint64_t *cmd, uintptr_t *nixtx_addr,
+ }
+ #endif
+ 
++static inline void
++cn10k_nix_free_extmbuf(struct rte_mbuf *m)
++{
++	struct rte_mbuf *m_next;
++	while (m != NULL) {
++		m_next = m->next;
++		rte_pktmbuf_free_seg(m);
++		m = m_next;
++	}
++}
++
+ static __rte_always_inline uint64_t
+-cn10k_nix_prefree_seg(struct rte_mbuf *m, struct cn10k_eth_txq *txq,
+-		struct nix_send_hdr_s *send_hdr)
++cn10k_nix_prefree_seg(struct rte_mbuf *m, struct rte_mbuf **extm, struct cn10k_eth_txq *txq,
++		      struct nix_send_hdr_s *send_hdr, uint64_t *aura)
+ {
++	struct rte_mbuf *prev = NULL;
+ 	uint32_t sqe_id;
+ 
+ 	if (RTE_MBUF_HAS_EXTBUF(m)) {
+ 		if (unlikely(txq->tx_compl.ena == 0)) {
+-			rte_pktmbuf_free_seg(m);
++			m->next = *extm;
++			*extm = m;
+ 			return 1;
+ 		}
+ 		if (send_hdr->w0.pnc) {
+-			txq->tx_compl.ptr[send_hdr->w1.sqe_id]->next = m;
++			sqe_id = send_hdr->w1.sqe_id;
++			prev = txq->tx_compl.ptr[sqe_id];
++			m->next = prev;
++			txq->tx_compl.ptr[sqe_id] = m;
+ 		} else {
+ 			sqe_id = __atomic_fetch_add(&txq->tx_compl.sqe_id, 1, __ATOMIC_RELAXED);
+ 			send_hdr->w0.pnc = 1;
+@@ -806,10 +822,160 @@ cn10k_nix_prefree_seg(struct rte_mbuf *m, struct cn10k_eth_txq *txq,
+ 		}
+ 		return 1;
+ 	} else {
+-		return cnxk_nix_prefree_seg(m);
++		return cnxk_nix_prefree_seg(m, aura);
+ 	}
+ }
+ 
++#if defined(RTE_ARCH_ARM64)
++/* Only called for first segments of single segmented mbufs */
++static __rte_always_inline void
++cn10k_nix_prefree_seg_vec(struct rte_mbuf **mbufs, struct rte_mbuf **extm,
++			  struct cn10k_eth_txq *txq,
++			  uint64x2_t *senddesc01_w0, uint64x2_t *senddesc23_w0,
++			  uint64x2_t *senddesc01_w1, uint64x2_t *senddesc23_w1)
++{
++	struct rte_mbuf **tx_compl_ptr = txq->tx_compl.ptr;
++	uint32_t nb_desc_mask = txq->tx_compl.nb_desc_mask;
++	bool tx_compl_ena = txq->tx_compl.ena;
++	struct rte_mbuf *m0, *m1, *m2, *m3;
++	struct rte_mbuf *cookie;
++	uint64_t w0, w1, aura;
++	uint64_t sqe_id;
++
++	m0 = mbufs[0];
++	m1 = mbufs[1];
++	m2 = mbufs[2];
++	m3 = mbufs[3];
++
++	/* mbuf 0 */
++	w0 = vgetq_lane_u64(*senddesc01_w0, 0);
++	if (RTE_MBUF_HAS_EXTBUF(m0)) {
++		w0 |= BIT_ULL(19);
++		w1 = vgetq_lane_u64(*senddesc01_w1, 0);
++		w1 &= ~0xFFFF000000000000UL;
++		if (unlikely(!tx_compl_ena)) {
++			m0->next = *extm;
++			*extm = m0;
++		} else {
++			sqe_id = rte_atomic_fetch_add_explicit(&txq->tx_compl.sqe_id, 1,
++							       rte_memory_order_relaxed);
++			sqe_id = sqe_id & nb_desc_mask;
++			/* Set PNC */
++			w0 |= BIT_ULL(43);
++			w1 |= sqe_id << 48;
++			tx_compl_ptr[sqe_id] = m0;
++			*senddesc01_w1 = vsetq_lane_u64(w1, *senddesc01_w1, 0);
++		}
++	} else {
++		cookie = RTE_MBUF_DIRECT(m0) ? m0 : rte_mbuf_from_indirect(m0);
++		aura = (w0 >> 20) & 0xFFFFF;
++		w0 &= ~0xFFFFF00000UL;
++		w0 |= cnxk_nix_prefree_seg(m0, &aura) << 19;
++		w0 |= aura << 20;
++
++		if ((w0 & BIT_ULL(19)) == 0)
++			RTE_MEMPOOL_CHECK_COOKIES(cookie->pool, (void **)&cookie, 1, 0);
++	}
++	*senddesc01_w0 = vsetq_lane_u64(w0, *senddesc01_w0, 0);
++
++	/* mbuf1 */
++	w0 = vgetq_lane_u64(*senddesc01_w0, 1);
++	if (RTE_MBUF_HAS_EXTBUF(m1)) {
++		w0 |= BIT_ULL(19);
++		w1 = vgetq_lane_u64(*senddesc01_w1, 1);
++		w1 &= ~0xFFFF000000000000UL;
++		if (unlikely(!tx_compl_ena)) {
++			m1->next = *extm;
++			*extm = m1;
++		} else {
++			sqe_id = rte_atomic_fetch_add_explicit(&txq->tx_compl.sqe_id, 1,
++							       rte_memory_order_relaxed);
++			sqe_id = sqe_id & nb_desc_mask;
++			/* Set PNC */
++			w0 |= BIT_ULL(43);
++			w1 |= sqe_id << 48;
++			tx_compl_ptr[sqe_id] = m1;
++			*senddesc01_w1 = vsetq_lane_u64(w1, *senddesc01_w1, 1);
++		}
++	} else {
++		cookie = RTE_MBUF_DIRECT(m1) ? m1 : rte_mbuf_from_indirect(m1);
++		aura = (w0 >> 20) & 0xFFFFF;
++		w0 &= ~0xFFFFF00000UL;
++		w0 |= cnxk_nix_prefree_seg(m1, &aura) << 19;
++		w0 |= aura << 20;
++
++		if ((w0 & BIT_ULL(19)) == 0)
++			RTE_MEMPOOL_CHECK_COOKIES(cookie->pool, (void **)&cookie, 1, 0);
++	}
++	*senddesc01_w0 = vsetq_lane_u64(w0, *senddesc01_w0, 1);
++
++	/* mbuf 2 */
++	w0 = vgetq_lane_u64(*senddesc23_w0, 0);
++	if (RTE_MBUF_HAS_EXTBUF(m2)) {
++		w0 |= BIT_ULL(19);
++		w1 = vgetq_lane_u64(*senddesc23_w1, 0);
++		w1 &= ~0xFFFF000000000000UL;
++		if (unlikely(!tx_compl_ena)) {
++			m2->next = *extm;
++			*extm = m2;
++		} else {
++			sqe_id = rte_atomic_fetch_add_explicit(&txq->tx_compl.sqe_id, 1,
++							       rte_memory_order_relaxed);
++			sqe_id = sqe_id & nb_desc_mask;
++			/* Set PNC */
++			w0 |= BIT_ULL(43);
++			w1 |= sqe_id << 48;
++			tx_compl_ptr[sqe_id] = m2;
++			*senddesc23_w1 = vsetq_lane_u64(w1, *senddesc23_w1, 0);
++		}
++	} else {
++		cookie = RTE_MBUF_DIRECT(m2) ? m2 : rte_mbuf_from_indirect(m2);
++		aura = (w0 >> 20) & 0xFFFFF;
++		w0 &= ~0xFFFFF00000UL;
++		w0 |= cnxk_nix_prefree_seg(m2, &aura) << 19;
++		w0 |= aura << 20;
++
++		if ((w0 & BIT_ULL(19)) == 0)
++			RTE_MEMPOOL_CHECK_COOKIES(cookie->pool, (void **)&cookie, 1, 0);
++	}
++	*senddesc23_w0 = vsetq_lane_u64(w0, *senddesc23_w0, 0);
++
++	/* mbuf3 */
++	w0 = vgetq_lane_u64(*senddesc23_w0, 1);
++	if (RTE_MBUF_HAS_EXTBUF(m3)) {
++		w0 |= BIT_ULL(19);
++		w1 = vgetq_lane_u64(*senddesc23_w1, 1);
++		w1 &= ~0xFFFF000000000000UL;
++		if (unlikely(!tx_compl_ena)) {
++			m3->next = *extm;
++			*extm = m3;
++		} else {
++			sqe_id = rte_atomic_fetch_add_explicit(&txq->tx_compl.sqe_id, 1,
++							       rte_memory_order_relaxed);
++			sqe_id = sqe_id & nb_desc_mask;
++			/* Set PNC */
++			w0 |= BIT_ULL(43);
++			w1 |= sqe_id << 48;
++			tx_compl_ptr[sqe_id] = m3;
++			*senddesc23_w1 = vsetq_lane_u64(w1, *senddesc23_w1, 1);
++		}
++	} else {
++		cookie = RTE_MBUF_DIRECT(m3) ? m3 : rte_mbuf_from_indirect(m3);
++		aura = (w0 >> 20) & 0xFFFFF;
++		w0 &= ~0xFFFFF00000UL;
++		w0 |= cnxk_nix_prefree_seg(m3, &aura) << 19;
++		w0 |= aura << 20;
++
++		if ((w0 & BIT_ULL(19)) == 0)
++			RTE_MEMPOOL_CHECK_COOKIES(cookie->pool, (void **)&cookie, 1, 0);
++	}
++	*senddesc23_w0 = vsetq_lane_u64(w0, *senddesc23_w0, 1);
++#ifndef RTE_LIBRTE_MEMPOOL_DEBUG
++	RTE_SET_USED(cookie);
++#endif
++}
++#endif
++
+ static __rte_always_inline void
+ cn10k_nix_xmit_prepare_tso(struct rte_mbuf *m, const uint64_t flags)
+ {
+@@ -864,9 +1030,9 @@ cn10k_nix_xmit_prepare_tso(struct rte_mbuf *m, const uint64_t flags)
+ 
+ static __rte_always_inline void
+ cn10k_nix_xmit_prepare(struct cn10k_eth_txq *txq,
+-		       struct rte_mbuf *m, uint64_t *cmd, const uint16_t flags,
+-		       const uint64_t lso_tun_fmt, bool *sec, uint8_t mark_flag,
+-		       uint64_t mark_fmt)
++		       struct rte_mbuf *m, struct rte_mbuf **extm, uint64_t *cmd,
++		       const uint16_t flags, const uint64_t lso_tun_fmt, bool *sec,
++		       uint8_t mark_flag, uint64_t mark_fmt)
+ {
+ 	uint8_t mark_off = 0, mark_vlan = 0, markptr = 0;
+ 	struct nix_send_ext_s *send_hdr_ext;
+@@ -889,6 +1055,9 @@ cn10k_nix_xmit_prepare(struct cn10k_eth_txq *txq,
+ 		sg = (union nix_send_sg_s *)(cmd + 2);
+ 	}
+ 
++	if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F)
++		send_hdr->w0.pnc = 0;
++
+ 	if (flags & (NIX_TX_NEED_SEND_HDR_W1 | NIX_TX_OFFLOAD_SECURITY_F)) {
+ 		ol_flags = m->ol_flags;
+ 		w1.u = 0;
+@@ -1049,19 +1218,30 @@ cn10k_nix_xmit_prepare(struct cn10k_eth_txq *txq,
+ 		send_hdr->w1.u = w1.u;
+ 
+ 	if (!(flags & NIX_TX_MULTI_SEG_F)) {
++		struct rte_mbuf *cookie;
++
+ 		sg->seg1_size = send_hdr->w0.total;
+ 		*(rte_iova_t *)(sg + 1) = rte_mbuf_data_iova(m);
++		cookie = RTE_MBUF_DIRECT(m) ? m : rte_mbuf_from_indirect(m);
+ 
+ 		if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F) {
++			uint64_t aura;
++
+ 			/* DF bit = 1 if refcount of current mbuf or parent mbuf
+ 			 *		is greater than 1
+ 			 * DF bit = 0 otherwise
+ 			 */
+-			send_hdr->w0.df = cn10k_nix_prefree_seg(m, txq, send_hdr);
++			aura = send_hdr->w0.aura;
++			send_hdr->w0.df = cn10k_nix_prefree_seg(m, extm, txq, send_hdr, &aura);
++			send_hdr->w0.aura = aura;
+ 		}
++#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
+ 		/* Mark mempool object as "put" since it is freed by NIX */
+ 		if (!send_hdr->w0.df)
+-			RTE_MEMPOOL_CHECK_COOKIES(m->pool, (void **)&m, 1, 0);
++			RTE_MEMPOOL_CHECK_COOKIES(cookie->pool, (void **)&cookie, 1, 0);
++#else
++		RTE_SET_USED(cookie);
++#endif
+ 	} else {
+ 		sg->seg1_size = m->data_len;
+ 		*(rte_iova_t *)(sg + 1) = rte_mbuf_data_iova(m);
+@@ -1113,7 +1293,7 @@ cn10k_nix_xmit_prepare_tstamp(struct cn10k_eth_txq *txq, uintptr_t lmt_addr,
+ 		struct nix_send_mem_s *send_mem;
+ 
+ 		send_mem = (struct nix_send_mem_s *)(lmt + off);
+-		/* Packets for which PKT_TX_IEEE1588_TMST is not set, tx tstamp
++		/* Packets for which RTE_MBUF_F_TX_IEEE1588_TMST is not set, Tx tstamp
+ 		 * should not be recorded, hence changing the alg type to
+ 		 * NIX_SENDMEMALG_SUB and also changing send mem addr field to
+ 		 * next 8 bytes as it corrupts the actual Tx tstamp registered
+@@ -1128,13 +1308,14 @@ cn10k_nix_xmit_prepare_tstamp(struct cn10k_eth_txq *txq, uintptr_t lmt_addr,
+ }
+ 
+ static __rte_always_inline uint16_t
+-cn10k_nix_prepare_mseg(struct cn10k_eth_txq *txq,
+-		       struct rte_mbuf *m, uint64_t *cmd, const uint16_t flags)
++cn10k_nix_prepare_mseg(struct cn10k_eth_txq *txq, struct rte_mbuf *m, struct rte_mbuf **extm,
++		       uint64_t *cmd, const uint16_t flags)
+ {
+ 	uint64_t prefree = 0, aura0, aura, nb_segs, segdw;
+ 	struct nix_send_hdr_s *send_hdr;
+ 	union nix_send_sg_s *sg, l_sg;
+ 	union nix_send_sg2_s l_sg2;
++	struct rte_mbuf *cookie;
+ 	struct rte_mbuf *m_next;
+ 	uint8_t off, is_sg2;
+ 	uint64_t len, dlen;
+@@ -1163,21 +1344,27 @@ cn10k_nix_prepare_mseg(struct cn10k_eth_txq *txq,
+ 	len -= dlen;
+ 	nb_segs = m->nb_segs - 1;
+ 	m_next = m->next;
++	m->next = NULL;
++	m->nb_segs = 1;
+ 	slist = &cmd[3 + off + 1];
+ 
++	cookie = RTE_MBUF_DIRECT(m) ? m : rte_mbuf_from_indirect(m);
+ 	/* Set invert df if buffer is not to be freed by H/W */
+ 	if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F) {
+-		prefree = cn10k_nix_prefree_seg(m, txq, send_hdr);
++		aura = send_hdr->w0.aura;
++		prefree = cn10k_nix_prefree_seg(m, extm, txq, send_hdr, &aura);
++		send_hdr->w0.aura = aura;
+ 		l_sg.i1 = prefree;
+ 	}
+ 
+ #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
+ 	/* Mark mempool object as "put" since it is freed by NIX */
+ 	if (!prefree)
+-		RTE_MEMPOOL_CHECK_COOKIES(m->pool, (void **)&m, 1, 0);
++		RTE_MEMPOOL_CHECK_COOKIES(cookie->pool, (void **)&cookie, 1, 0);
+ 	rte_io_wmb();
++#else
++	RTE_SET_USED(cookie);
+ #endif
+-	m->next = NULL;
+ 
+ 	/* Quickly handle single segmented packets. With this if-condition
+ 	 * compiler will completely optimize out the below do-while loop
+@@ -1207,9 +1394,12 @@ cn10k_nix_prepare_mseg(struct cn10k_eth_txq *txq,
+ 		aura = aura0;
+ 		prefree = 0;
+ 
++		m->next = NULL;
++
++		cookie = RTE_MBUF_DIRECT(m) ? m : rte_mbuf_from_indirect(m);
+ 		if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F) {
+ 			aura = roc_npa_aura_handle_to_aura(m->pool->pool_id);
+-			prefree = cn10k_nix_prefree_seg(m, txq, send_hdr);
++			prefree = cn10k_nix_prefree_seg(m, extm, txq, send_hdr, &aura);
+ 			is_sg2 = aura != aura0 && !prefree;
+ 		}
+ 
+@@ -1259,13 +1449,14 @@ cn10k_nix_prepare_mseg(struct cn10k_eth_txq *txq,
+ 			l_sg.subdc = NIX_SUBDC_SG;
+ 			slist++;
+ 		}
+-		m->next = NULL;
+ 
+ #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
+ 		/* Mark mempool object as "put" since it is freed by NIX
+ 		 */
+ 		if (!prefree)
+-			RTE_MEMPOOL_CHECK_COOKIES(m->pool, (void **)&m, 1, 0);
++			RTE_MEMPOOL_CHECK_COOKIES(cookie->pool, (void **)&cookie, 1, 0);
++#else
++		RTE_SET_USED(cookie);
+ #endif
+ 		m = m_next;
+ 	} while (nb_segs);
+@@ -1302,6 +1493,7 @@ cn10k_nix_xmit_pkts(void *tx_queue, uint64_t *ws, struct rte_mbuf **tx_pkts,
+ 	uint8_t lnum, c_lnum, c_shft, c_loff;
+ 	uintptr_t pa, lbase = txq->lmt_base;
+ 	uint16_t lmt_id, burst, left, i;
++	struct rte_mbuf *extm = NULL;
+ 	uintptr_t c_lbase = lbase;
+ 	uint64_t lso_tun_fmt = 0;
+ 	uint64_t mark_fmt = 0;
+@@ -1356,7 +1548,7 @@ again:
+ 		if (flags & NIX_TX_OFFLOAD_TSO_F)
+ 			cn10k_nix_xmit_prepare_tso(tx_pkts[i], flags);
+ 
+-		cn10k_nix_xmit_prepare(txq, tx_pkts[i], cmd, flags, lso_tun_fmt,
++		cn10k_nix_xmit_prepare(txq, tx_pkts[i], &extm, cmd, flags, lso_tun_fmt,
+ 				       &sec, mark_flag, mark_fmt);
+ 
+ 		laddr = (uintptr_t)LMT_OFF(lbase, lnum, 0);
+@@ -1431,6 +1623,11 @@ again:
+ 	}
+ 
+ 	rte_io_wmb();
++	if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F && !txq->tx_compl.ena) {
++		cn10k_nix_free_extmbuf(extm);
++		extm = NULL;
++	}
++
+ 	if (left)
+ 		goto again;
+ 
+@@ -1446,6 +1643,7 @@ cn10k_nix_xmit_pkts_mseg(void *tx_queue, uint64_t *ws,
+ 	uintptr_t pa0, pa1, lbase = txq->lmt_base;
+ 	const rte_iova_t io_addr = txq->io_addr;
+ 	uint16_t segdw, lmt_id, burst, left, i;
++	struct rte_mbuf *extm = NULL;
+ 	uint8_t lnum, c_lnum, c_loff;
+ 	uintptr_t c_lbase = lbase;
+ 	uint64_t lso_tun_fmt = 0;
+@@ -1507,7 +1705,7 @@ again:
+ 		if (flags & NIX_TX_OFFLOAD_TSO_F)
+ 			cn10k_nix_xmit_prepare_tso(tx_pkts[i], flags);
+ 
+-		cn10k_nix_xmit_prepare(txq, tx_pkts[i], cmd, flags, lso_tun_fmt,
++		cn10k_nix_xmit_prepare(txq, tx_pkts[i], &extm, cmd, flags, lso_tun_fmt,
+ 				       &sec, mark_flag, mark_fmt);
+ 
+ 		laddr = (uintptr_t)LMT_OFF(lbase, lnum, 0);
+@@ -1521,7 +1719,7 @@ again:
+ 		/* Move NIX desc to LMT/NIXTX area */
+ 		cn10k_nix_xmit_mv_lmt_base(laddr, cmd, flags);
+ 		/* Store sg list directly on lmt line */
+-		segdw = cn10k_nix_prepare_mseg(txq, tx_pkts[i], (uint64_t *)laddr,
++		segdw = cn10k_nix_prepare_mseg(txq, tx_pkts[i], &extm, (uint64_t *)laddr,
+ 					       flags);
+ 		cn10k_nix_xmit_prepare_tstamp(txq, laddr, tx_pkts[i]->ol_flags,
+ 					      segdw, flags);
+@@ -1594,6 +1792,11 @@ again:
+ 	}
+ 
+ 	rte_io_wmb();
++	if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F && !txq->tx_compl.ena) {
++		cn10k_nix_free_extmbuf(extm);
++		extm = NULL;
++	}
++
+ 	if (left)
+ 		goto again;
+ 
+@@ -1644,7 +1847,7 @@ cn10k_nix_prepare_tso(struct rte_mbuf *m, union nix_send_hdr_w1_u *w1,
+ 
+ static __rte_always_inline uint16_t
+ cn10k_nix_prepare_mseg_vec_noff(struct cn10k_eth_txq *txq,
+-				struct rte_mbuf *m, uint64_t *cmd,
++				struct rte_mbuf *m, struct rte_mbuf **extm, uint64_t *cmd,
+ 				uint64x2_t *cmd0, uint64x2_t *cmd1,
+ 				uint64x2_t *cmd2, uint64x2_t *cmd3,
+ 				const uint32_t flags)
+@@ -1659,7 +1862,7 @@ cn10k_nix_prepare_mseg_vec_noff(struct cn10k_eth_txq *txq,
+ 		vst1q_u64(cmd + 2, *cmd1); /* sg */
+ 	}
+ 
+-	segdw = cn10k_nix_prepare_mseg(txq, m, cmd, flags);
++	segdw = cn10k_nix_prepare_mseg(txq, m, extm, cmd, flags);
+ 
+ 	if (flags & NIX_TX_OFFLOAD_TSTAMP_F)
+ 		vst1q_u64(cmd + segdw * 2 - 2, *cmd3);
+@@ -1694,9 +1897,13 @@ cn10k_nix_prepare_mseg_vec_list(struct rte_mbuf *m, uint64_t *cmd,
+ 	len -= dlen;
+ 	sg_u = sg_u | ((uint64_t)dlen);
+ 
++	/* Mark mempool object as "put" since it is freed by NIX */
++	RTE_MEMPOOL_CHECK_COOKIES(m->pool, (void **)&m, 1, 0);
++
+ 	nb_segs = m->nb_segs - 1;
+ 	m_next = m->next;
+ 	m->next = NULL;
++	m->nb_segs = 1;
+ 	m = m_next;
+ 	/* Fill mbuf segments */
+ 	do {
+@@ -1719,6 +1926,9 @@ cn10k_nix_prepare_mseg_vec_list(struct rte_mbuf *m, uint64_t *cmd,
+ 			slist++;
+ 		}
+ 		m->next = NULL;
++		/* Mark mempool object as "put" since it is freed by NIX */
++		RTE_MEMPOOL_CHECK_COOKIES(m->pool, (void **)&m, 1, 0);
++
+ 		m = m_next;
+ 	} while (nb_segs);
+ 
+@@ -1742,8 +1952,11 @@ cn10k_nix_prepare_mseg_vec(struct rte_mbuf *m, uint64_t *cmd, uint64x2_t *cmd0,
+ 	union nix_send_hdr_w0_u sh;
+ 	union nix_send_sg_s sg;
+ 
+-	if (m->nb_segs == 1)
++	if (m->nb_segs == 1) {
++		/* Mark mempool object as "put" since it is freed by NIX */
++		RTE_MEMPOOL_CHECK_COOKIES(m->pool, (void **)&m, 1, 0);
+ 		return;
++	}
+ 
+ 	sh.u = vgetq_lane_u64(cmd0[0], 0);
+ 	sg.u = vgetq_lane_u64(cmd1[0], 0);
+@@ -1759,7 +1972,7 @@ cn10k_nix_prepare_mseg_vec(struct rte_mbuf *m, uint64_t *cmd, uint64x2_t *cmd0,
+ 
+ static __rte_always_inline uint8_t
+ cn10k_nix_prep_lmt_mseg_vector(struct cn10k_eth_txq *txq,
+-			       struct rte_mbuf **mbufs, uint64x2_t *cmd0,
++			       struct rte_mbuf **mbufs, struct rte_mbuf **extm, uint64x2_t *cmd0,
+ 			       uint64x2_t *cmd1, uint64x2_t *cmd2,
+ 			       uint64x2_t *cmd3, uint8_t *segdw,
+ 			       uint64_t *lmt_addr, __uint128_t *data128,
+@@ -1777,7 +1990,7 @@ cn10k_nix_prep_lmt_mseg_vector(struct cn10k_eth_txq *txq,
+ 				lmt_addr += 16;
+ 				off = 0;
+ 			}
+-			off += cn10k_nix_prepare_mseg_vec_noff(txq, mbufs[j],
++			off += cn10k_nix_prepare_mseg_vec_noff(txq, mbufs[j], extm,
+ 					lmt_addr + off * 2, &cmd0[j], &cmd1[j],
+ 					&cmd2[j], &cmd3[j], flags);
+ 		}
+@@ -1803,6 +2016,11 @@ cn10k_nix_prep_lmt_mseg_vector(struct cn10k_eth_txq *txq,
+ 			*data128 |= ((__uint128_t)7) << *shift;
+ 			*shift += 3;
+ 
++			/* Mark mempool object as "put" since it is freed by NIX */
++			RTE_MEMPOOL_CHECK_COOKIES(mbufs[0]->pool, (void **)&mbufs[0], 1, 0);
++			RTE_MEMPOOL_CHECK_COOKIES(mbufs[1]->pool, (void **)&mbufs[1], 1, 0);
++			RTE_MEMPOOL_CHECK_COOKIES(mbufs[2]->pool, (void **)&mbufs[2], 1, 0);
++			RTE_MEMPOOL_CHECK_COOKIES(mbufs[3]->pool, (void **)&mbufs[3], 1, 0);
+ 			return 1;
+ 		}
+ 	}
+@@ -1821,6 +2039,11 @@ cn10k_nix_prep_lmt_mseg_vector(struct cn10k_eth_txq *txq,
+ 				vst1q_u64(lmt_addr + 10, cmd2[j + 1]);
+ 				vst1q_u64(lmt_addr + 12, cmd1[j + 1]);
+ 				vst1q_u64(lmt_addr + 14, cmd3[j + 1]);
++
++				/* Mark mempool object as "put" since it is freed by NIX */
++				RTE_MEMPOOL_CHECK_COOKIES(mbufs[j]->pool, (void **)&mbufs[j], 1, 0);
++				RTE_MEMPOOL_CHECK_COOKIES(mbufs[j + 1]->pool,
++							  (void **)&mbufs[j + 1], 1, 0);
+ 			} else if (flags & NIX_TX_NEED_EXT_HDR) {
+ 				/* EXT header take 3 each, space for 2 segs.*/
+ 				cn10k_nix_prepare_mseg_vec(mbufs[j],
+@@ -1920,14 +2143,14 @@ cn10k_nix_lmt_next(uint8_t dw, uintptr_t laddr, uint8_t *lnum, uint8_t *loff,
+ 
+ static __rte_always_inline void
+ cn10k_nix_xmit_store(struct cn10k_eth_txq *txq,
+-		     struct rte_mbuf *mbuf, uint8_t segdw, uintptr_t laddr,
++		     struct rte_mbuf *mbuf, struct rte_mbuf **extm, uint8_t segdw, uintptr_t laddr,
+ 		     uint64x2_t cmd0, uint64x2_t cmd1, uint64x2_t cmd2,
+ 		     uint64x2_t cmd3, const uint16_t flags)
+ {
+ 	uint8_t off;
+ 
+ 	if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F) {
+-		cn10k_nix_prepare_mseg_vec_noff(txq, mbuf, LMT_OFF(laddr, 0, 0),
++		cn10k_nix_prepare_mseg_vec_noff(txq, mbuf, extm, LMT_OFF(laddr, 0, 0),
+ 						&cmd0, &cmd1, &cmd2, &cmd3,
+ 						flags);
+ 		return;
+@@ -1997,13 +2220,10 @@ cn10k_nix_xmit_pkts_vector(void *tx_queue, uint64_t *ws,
+ 	uint64x2_t sgdesc01_w0, sgdesc23_w0;
+ 	uint64x2_t sgdesc01_w1, sgdesc23_w1;
+ 	struct cn10k_eth_txq *txq = tx_queue;
+-	uint64x2_t xmask01_w0, xmask23_w0;
+-	uint64x2_t xmask01_w1, xmask23_w1;
+ 	rte_iova_t io_addr = txq->io_addr;
+ 	uint8_t lnum, shift = 0, loff = 0;
+ 	uintptr_t laddr = txq->lmt_base;
+ 	uint8_t c_lnum, c_shft, c_loff;
+-	struct nix_send_hdr_s send_hdr;
+ 	uint64x2_t ltypes01, ltypes23;
+ 	uint64x2_t xtmp128, ytmp128;
+ 	uint64x2_t xmask01, xmask23;
+@@ -2014,6 +2234,7 @@ cn10k_nix_xmit_pkts_vector(void *tx_queue, uint64_t *ws,
+ 		__uint128_t data128;
+ 		uint64_t data[2];
+ 	} wd;
++	struct rte_mbuf *extm = NULL;
+ 
+ 	if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F && txq->tx_compl.ena)
+ 		handle_tx_completion_pkts(txq, flags & NIX_TX_VWQE_F);
+@@ -2098,7 +2319,8 @@ again:
+ 	}
+ 
+ 	for (i = 0; i < burst; i += NIX_DESCS_PER_LOOP) {
+-		if (flags & NIX_TX_OFFLOAD_SECURITY_F && c_lnum + 2 > 16) {
++		if (flags & NIX_TX_OFFLOAD_SECURITY_F &&
++		    (((int)((16 - c_lnum) << 1) - c_loff) < 4)) {
+ 			burst = i;
+ 			break;
+ 		}
+@@ -2153,7 +2375,7 @@ again:
+ 		}
+ 		/* Clear lower 32bit of SEND_HDR_W0 and SEND_SG_W0 */
+ 		senddesc01_w0 =
+-			vbicq_u64(senddesc01_w0, vdupq_n_u64(0xFFFFFFFF));
++			vbicq_u64(senddesc01_w0, vdupq_n_u64(0x800FFFFFFFF));
+ 		sgdesc01_w0 = vbicq_u64(sgdesc01_w0, vdupq_n_u64(0xFFFFFFFF));
+ 
+ 		senddesc23_w0 = senddesc01_w0;
+@@ -2859,73 +3081,8 @@ again:
+ 		    !(flags & NIX_TX_MULTI_SEG_F) &&
+ 		    !(flags & NIX_TX_OFFLOAD_SECURITY_F)) {
+ 			/* Set don't free bit if reference count > 1 */
+-			xmask01_w0 = vdupq_n_u64(0);
+-			xmask01_w1 = vdupq_n_u64(0);
+-			xmask23_w0 = xmask01_w0;
+-			xmask23_w1 = xmask01_w1;
+-
+-			/* Move mbufs to iova */
+-			mbuf0 = (uint64_t *)tx_pkts[0];
+-			mbuf1 = (uint64_t *)tx_pkts[1];
+-			mbuf2 = (uint64_t *)tx_pkts[2];
+-			mbuf3 = (uint64_t *)tx_pkts[3];
+-
+-			send_hdr.w0.u = 0;
+-			send_hdr.w1.u = 0;
+-
+-			if (cn10k_nix_prefree_seg((struct rte_mbuf *)mbuf0, txq, &send_hdr)) {
+-				send_hdr.w0.df = 1;
+-				xmask01_w0 = vsetq_lane_u64(send_hdr.w0.u, xmask01_w0, 0);
+-				xmask01_w1 = vsetq_lane_u64(send_hdr.w1.u, xmask01_w1, 0);
+-			} else {
+-				RTE_MEMPOOL_CHECK_COOKIES(
+-					((struct rte_mbuf *)mbuf0)->pool,
+-					(void **)&mbuf0, 1, 0);
+-			}
+-
+-			send_hdr.w0.u = 0;
+-			send_hdr.w1.u = 0;
+-
+-			if (cn10k_nix_prefree_seg((struct rte_mbuf *)mbuf1, txq, &send_hdr)) {
+-				send_hdr.w0.df = 1;
+-				xmask01_w0 = vsetq_lane_u64(send_hdr.w0.u, xmask01_w0, 1);
+-				xmask01_w1 = vsetq_lane_u64(send_hdr.w1.u, xmask01_w1, 1);
+-			} else {
+-				RTE_MEMPOOL_CHECK_COOKIES(
+-					((struct rte_mbuf *)mbuf1)->pool,
+-					(void **)&mbuf1, 1, 0);
+-			}
+-
+-			send_hdr.w0.u = 0;
+-			send_hdr.w1.u = 0;
+-
+-			if (cn10k_nix_prefree_seg((struct rte_mbuf *)mbuf2, txq, &send_hdr)) {
+-				send_hdr.w0.df = 1;
+-				xmask23_w0 = vsetq_lane_u64(send_hdr.w0.u, xmask23_w0, 0);
+-				xmask23_w1 = vsetq_lane_u64(send_hdr.w1.u, xmask23_w1, 0);
+-			} else {
+-				RTE_MEMPOOL_CHECK_COOKIES(
+-					((struct rte_mbuf *)mbuf2)->pool,
+-					(void **)&mbuf2, 1, 0);
+-			}
+-
+-			send_hdr.w0.u = 0;
+-			send_hdr.w1.u = 0;
+-
+-			if (cn10k_nix_prefree_seg((struct rte_mbuf *)mbuf3, txq, &send_hdr)) {
+-				send_hdr.w0.df = 1;
+-				xmask23_w0 = vsetq_lane_u64(send_hdr.w0.u, xmask23_w0, 1);
+-				xmask23_w1 = vsetq_lane_u64(send_hdr.w1.u, xmask23_w1, 1);
+-			} else {
+-				RTE_MEMPOOL_CHECK_COOKIES(
+-					((struct rte_mbuf *)mbuf3)->pool,
+-					(void **)&mbuf3, 1, 0);
+-			}
+-
+-			senddesc01_w0 = vorrq_u64(senddesc01_w0, xmask01_w0);
+-			senddesc23_w0 = vorrq_u64(senddesc23_w0, xmask23_w0);
+-			senddesc01_w1 = vorrq_u64(senddesc01_w1, xmask01_w1);
+-			senddesc23_w1 = vorrq_u64(senddesc23_w1, xmask23_w1);
++			cn10k_nix_prefree_seg_vec(tx_pkts, &extm, txq, &senddesc01_w0,
++						  &senddesc23_w0, &senddesc01_w1, &senddesc23_w1);
+ 		} else if (!(flags & NIX_TX_MULTI_SEG_F) &&
+ 			   !(flags & NIX_TX_OFFLOAD_SECURITY_F)) {
+ 			/* Move mbufs to iova */
+@@ -2997,7 +3154,7 @@ again:
+ 						   &shift, &wd.data128, &next);
+ 
+ 			/* Store mbuf0 to LMTLINE/CPT NIXTX area */
+-			cn10k_nix_xmit_store(txq, tx_pkts[0], segdw[0], next,
++			cn10k_nix_xmit_store(txq, tx_pkts[0], &extm, segdw[0], next,
+ 					     cmd0[0], cmd1[0], cmd2[0], cmd3[0],
+ 					     flags);
+ 
+@@ -3013,7 +3170,7 @@ again:
+ 						   &shift, &wd.data128, &next);
+ 
+ 			/* Store mbuf1 to LMTLINE/CPT NIXTX area */
+-			cn10k_nix_xmit_store(txq, tx_pkts[1], segdw[1], next,
++			cn10k_nix_xmit_store(txq, tx_pkts[1], &extm, segdw[1], next,
+ 					     cmd0[1], cmd1[1], cmd2[1], cmd3[1],
+ 					     flags);
+ 
+@@ -3029,7 +3186,7 @@ again:
+ 						   &shift, &wd.data128, &next);
+ 
+ 			/* Store mbuf2 to LMTLINE/CPT NIXTX area */
+-			cn10k_nix_xmit_store(txq, tx_pkts[2], segdw[2], next,
++			cn10k_nix_xmit_store(txq, tx_pkts[2], &extm, segdw[2], next,
+ 					     cmd0[2], cmd1[2], cmd2[2], cmd3[2],
+ 					     flags);
+ 
+@@ -3045,7 +3202,7 @@ again:
+ 						   &shift, &wd.data128, &next);
+ 
+ 			/* Store mbuf3 to LMTLINE/CPT NIXTX area */
+-			cn10k_nix_xmit_store(txq, tx_pkts[3], segdw[3], next,
++			cn10k_nix_xmit_store(txq, tx_pkts[3], &extm, segdw[3], next,
+ 					     cmd0[3], cmd1[3], cmd2[3], cmd3[3],
+ 					     flags);
+ 
+@@ -3053,7 +3210,7 @@ again:
+ 			uint8_t j;
+ 
+ 			segdw[4] = 8;
+-			j = cn10k_nix_prep_lmt_mseg_vector(txq, tx_pkts, cmd0, cmd1,
++			j = cn10k_nix_prep_lmt_mseg_vector(txq, tx_pkts, &extm, cmd0, cmd1,
+ 							  cmd2, cmd3, segdw,
+ 							  (uint64_t *)
+ 							  LMT_OFF(laddr, lnum,
+@@ -3203,6 +3360,11 @@ again:
+ 	}
+ 
+ 	rte_io_wmb();
++	if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F && !txq->tx_compl.ena) {
++		cn10k_nix_free_extmbuf(extm);
++		extm = NULL;
++	}
++
+ 	if (left)
+ 		goto again;
+ 
+diff --git a/dpdk/drivers/net/cnxk/cn9k_ethdev.c b/dpdk/drivers/net/cnxk/cn9k_ethdev.c
+index bae4dda5e2..b92b978a27 100644
+--- a/dpdk/drivers/net/cnxk/cn9k_ethdev.c
++++ b/dpdk/drivers/net/cnxk/cn9k_ethdev.c
+@@ -347,7 +347,13 @@ cn9k_nix_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t qidx)
+ 		struct roc_nix_sq *sq = &dev->sqs[qidx];
+ 		do {
+ 			handle_tx_completion_pkts(txq, 0);
++			/* Check if SQ is empty */
+ 			roc_nix_sq_head_tail_get(nix, sq->qid, &head, &tail);
++			if (head != tail)
++				continue;
++
++			/* Check if completion CQ is empty */
++			roc_nix_cq_head_tail_get(nix, sq->cqid, &head, &tail);
+ 		} while (head != tail);
+ 	}
+ 
+diff --git a/dpdk/drivers/net/cnxk/cn9k_ethdev.h b/dpdk/drivers/net/cnxk/cn9k_ethdev.h
+index 9e0a3c5bb2..6ae0db62ca 100644
+--- a/dpdk/drivers/net/cnxk/cn9k_ethdev.h
++++ b/dpdk/drivers/net/cnxk/cn9k_ethdev.h
+@@ -169,6 +169,7 @@ handle_tx_completion_pkts(struct cn9k_eth_txq *txq, uint8_t mt_safe)
+ 			m = m_next;
+ 		}
+ 		rte_pktmbuf_free_seg(m);
++		txq->tx_compl.ptr[tx_compl_s0->sqe_id] = NULL;
+ 
+ 		head++;
+ 		head &= qmask;
+diff --git a/dpdk/drivers/net/cnxk/cn9k_tx.h b/dpdk/drivers/net/cnxk/cn9k_tx.h
+index fba4bb4215..4715bf8a65 100644
+--- a/dpdk/drivers/net/cnxk/cn9k_tx.h
++++ b/dpdk/drivers/net/cnxk/cn9k_tx.h
+@@ -82,32 +82,198 @@ cn9k_nix_tx_skeleton(struct cn9k_eth_txq *txq, uint64_t *cmd,
+ 	}
+ }
+ 
++static __rte_always_inline void
++cn9k_nix_free_extmbuf(struct rte_mbuf *m)
++{
++	struct rte_mbuf *m_next;
++	while (m != NULL) {
++		m_next = m->next;
++		rte_pktmbuf_free_seg(m);
++		m = m_next;
++	}
++}
++
+ static __rte_always_inline uint64_t
+-cn9k_nix_prefree_seg(struct rte_mbuf *m, struct cn9k_eth_txq *txq,
+-		struct nix_send_hdr_s *send_hdr)
++cn9k_nix_prefree_seg(struct rte_mbuf *m, struct rte_mbuf **extm, struct cn9k_eth_txq *txq,
++		     struct nix_send_hdr_s *send_hdr, uint64_t *aura)
+ {
++	struct rte_mbuf *prev;
+ 	uint32_t sqe_id;
+ 
+ 	if (RTE_MBUF_HAS_EXTBUF(m)) {
+ 		if (unlikely(txq->tx_compl.ena == 0)) {
+-			rte_pktmbuf_free_seg(m);
++			m->next = *extm;
++			*extm = m;
+ 			return 1;
+ 		}
+ 		if (send_hdr->w0.pnc) {
+-			txq->tx_compl.ptr[send_hdr->w1.sqe_id]->next = m;
++			sqe_id = send_hdr->w1.sqe_id;
++			prev = txq->tx_compl.ptr[sqe_id];
++			m->next = prev;
++			txq->tx_compl.ptr[sqe_id] = m;
+ 		} else {
+ 			sqe_id = __atomic_fetch_add(&txq->tx_compl.sqe_id, 1, __ATOMIC_RELAXED);
+ 			send_hdr->w0.pnc = 1;
+ 			send_hdr->w1.sqe_id = sqe_id &
+ 				txq->tx_compl.nb_desc_mask;
+ 			txq->tx_compl.ptr[send_hdr->w1.sqe_id] = m;
++			m->next = NULL;
+ 		}
+ 		return 1;
+ 	} else {
+-		return cnxk_nix_prefree_seg(m);
++		return cnxk_nix_prefree_seg(m, aura);
+ 	}
+ }
+ 
++#if defined(RTE_ARCH_ARM64)
++/* Only called for first segments of single segmented mbufs */
++static __rte_always_inline void
++cn9k_nix_prefree_seg_vec(struct rte_mbuf **mbufs, struct rte_mbuf **extm, struct cn9k_eth_txq *txq,
++			 uint64x2_t *senddesc01_w0, uint64x2_t *senddesc23_w0,
++			 uint64x2_t *senddesc01_w1, uint64x2_t *senddesc23_w1)
++{
++	struct rte_mbuf **tx_compl_ptr = txq->tx_compl.ptr;
++	uint32_t nb_desc_mask = txq->tx_compl.nb_desc_mask;
++	bool tx_compl_ena = txq->tx_compl.ena;
++	struct rte_mbuf *m0, *m1, *m2, *m3;
++	struct rte_mbuf *cookie;
++	uint64_t w0, w1, aura;
++	uint64_t sqe_id;
++
++	m0 = mbufs[0];
++	m1 = mbufs[1];
++	m2 = mbufs[2];
++	m3 = mbufs[3];
++
++	/* mbuf 0 */
++	w0 = vgetq_lane_u64(*senddesc01_w0, 0);
++	if (RTE_MBUF_HAS_EXTBUF(m0)) {
++		w0 |= BIT_ULL(19);
++		w1 = vgetq_lane_u64(*senddesc01_w1, 0);
++		w1 &= ~0xFFFF000000000000UL;
++		if (unlikely(!tx_compl_ena)) {
++			m0->next = *extm;
++			*extm = m0;
++		} else {
++			sqe_id = rte_atomic_fetch_add_explicit(&txq->tx_compl.sqe_id, 1,
++							       rte_memory_order_relaxed);
++			sqe_id = sqe_id & nb_desc_mask;
++			/* Set PNC */
++			w0 |= BIT_ULL(43);
++			w1 |= sqe_id << 48;
++			tx_compl_ptr[sqe_id] = m0;
++			*senddesc01_w1 = vsetq_lane_u64(w1, *senddesc01_w1, 0);
++		}
++	} else {
++		cookie = RTE_MBUF_DIRECT(m0) ? m0 : rte_mbuf_from_indirect(m0);
++		aura = (w0 >> 20) & 0xFFFFF;
++		w0 &= ~0xFFFFF00000UL;
++		w0 |= cnxk_nix_prefree_seg(m0, &aura) << 19;
++		w0 |= aura << 20;
++
++		if ((w0 & BIT_ULL(19)) == 0)
++			RTE_MEMPOOL_CHECK_COOKIES(cookie->pool, (void **)&cookie, 1, 0);
++	}
++	*senddesc01_w0 = vsetq_lane_u64(w0, *senddesc01_w0, 0);
++
++	/* mbuf1 */
++	w0 = vgetq_lane_u64(*senddesc01_w0, 1);
++	if (RTE_MBUF_HAS_EXTBUF(m1)) {
++		w0 |= BIT_ULL(19);
++		w1 = vgetq_lane_u64(*senddesc01_w1, 1);
++		w1 &= ~0xFFFF000000000000UL;
++		if (unlikely(!tx_compl_ena)) {
++			m1->next = *extm;
++			*extm = m1;
++		} else {
++			sqe_id = rte_atomic_fetch_add_explicit(&txq->tx_compl.sqe_id, 1,
++							       rte_memory_order_relaxed);
++			sqe_id = sqe_id & nb_desc_mask;
++			/* Set PNC */
++			w0 |= BIT_ULL(43);
++			w1 |= sqe_id << 48;
++			tx_compl_ptr[sqe_id] = m1;
++			*senddesc01_w1 = vsetq_lane_u64(w1, *senddesc01_w1, 1);
++		}
++	} else {
++		cookie = RTE_MBUF_DIRECT(m1) ? m1 : rte_mbuf_from_indirect(m1);
++		aura = (w0 >> 20) & 0xFFFFF;
++		w0 &= ~0xFFFFF00000UL;
++		w0 |= cnxk_nix_prefree_seg(m1, &aura) << 19;
++		w0 |= aura << 20;
++
++		if ((w0 & BIT_ULL(19)) == 0)
++			RTE_MEMPOOL_CHECK_COOKIES(cookie->pool, (void **)&cookie, 1, 0);
++	}
++	*senddesc01_w0 = vsetq_lane_u64(w0, *senddesc01_w0, 1);
++
++	/* mbuf 2 */
++	w0 = vgetq_lane_u64(*senddesc23_w0, 0);
++	if (RTE_MBUF_HAS_EXTBUF(m2)) {
++		w0 |= BIT_ULL(19);
++		w1 = vgetq_lane_u64(*senddesc23_w1, 0);
++		w1 &= ~0xFFFF000000000000UL;
++		if (unlikely(!tx_compl_ena)) {
++			m2->next = *extm;
++			*extm = m2;
++		} else {
++			sqe_id = rte_atomic_fetch_add_explicit(&txq->tx_compl.sqe_id, 1,
++							       rte_memory_order_relaxed);
++			sqe_id = sqe_id & nb_desc_mask;
++			/* Set PNC */
++			w0 |= BIT_ULL(43);
++			w1 |= sqe_id << 48;
++			tx_compl_ptr[sqe_id] = m2;
++			*senddesc23_w1 = vsetq_lane_u64(w1, *senddesc23_w1, 0);
++		}
++	} else {
++		cookie = RTE_MBUF_DIRECT(m2) ? m2 : rte_mbuf_from_indirect(m2);
++		aura = (w0 >> 20) & 0xFFFFF;
++		w0 &= ~0xFFFFF00000UL;
++		w0 |= cnxk_nix_prefree_seg(m2, &aura) << 19;
++		w0 |= aura << 20;
++
++		if ((w0 & BIT_ULL(19)) == 0)
++			RTE_MEMPOOL_CHECK_COOKIES(cookie->pool, (void **)&cookie, 1, 0);
++	}
++	*senddesc23_w0 = vsetq_lane_u64(w0, *senddesc23_w0, 0);
++
++	/* mbuf3 */
++	w0 = vgetq_lane_u64(*senddesc23_w0, 1);
++	if (RTE_MBUF_HAS_EXTBUF(m3)) {
++		w0 |= BIT_ULL(19);
++		w1 = vgetq_lane_u64(*senddesc23_w1, 1);
++		w1 &= ~0xFFFF000000000000UL;
++		if (unlikely(!tx_compl_ena)) {
++			m3->next = *extm;
++			*extm = m3;
++		} else {
++			sqe_id = rte_atomic_fetch_add_explicit(&txq->tx_compl.sqe_id, 1,
++							       rte_memory_order_relaxed);
++			sqe_id = sqe_id & nb_desc_mask;
++			/* Set PNC */
++			w0 |= BIT_ULL(43);
++			w1 |= sqe_id << 48;
++			tx_compl_ptr[sqe_id] = m3;
++			*senddesc23_w1 = vsetq_lane_u64(w1, *senddesc23_w1, 1);
++		}
++	} else {
++		cookie = RTE_MBUF_DIRECT(m3) ? m3 : rte_mbuf_from_indirect(m3);
++		aura = (w0 >> 20) & 0xFFFFF;
++		w0 &= ~0xFFFFF00000UL;
++		w0 |= cnxk_nix_prefree_seg(m3, &aura) << 19;
++		w0 |= aura << 20;
++
++		if ((w0 & BIT_ULL(19)) == 0)
++			RTE_MEMPOOL_CHECK_COOKIES(cookie->pool, (void **)&cookie, 1, 0);
++	}
++	*senddesc23_w0 = vsetq_lane_u64(w0, *senddesc23_w0, 1);
++#ifndef RTE_LIBRTE_MEMPOOL_DEBUG
++	RTE_SET_USED(cookie);
++#endif
++}
++#endif
++
+ static __rte_always_inline void
+ cn9k_nix_xmit_prepare_tso(struct rte_mbuf *m, const uint64_t flags)
+ {
+@@ -161,10 +327,9 @@ cn9k_nix_xmit_prepare_tso(struct rte_mbuf *m, const uint64_t flags)
+ }
+ 
+ static __rte_always_inline void
+-cn9k_nix_xmit_prepare(struct cn9k_eth_txq *txq,
+-		      struct rte_mbuf *m, uint64_t *cmd, const uint16_t flags,
+-		      const uint64_t lso_tun_fmt, uint8_t mark_flag,
+-		      uint64_t mark_fmt)
++cn9k_nix_xmit_prepare(struct cn9k_eth_txq *txq, struct rte_mbuf *m, struct rte_mbuf **extm,
++		      uint64_t *cmd, const uint16_t flags, const uint64_t lso_tun_fmt,
++		      uint8_t mark_flag, uint64_t mark_fmt)
+ {
+ 	uint8_t mark_off = 0, mark_vlan = 0, markptr = 0;
+ 	struct nix_send_ext_s *send_hdr_ext;
+@@ -191,6 +356,8 @@ cn9k_nix_xmit_prepare(struct cn9k_eth_txq *txq,
+ 		ol_flags = m->ol_flags;
+ 		w1.u = 0;
+ 	}
++	if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F)
++		send_hdr->w0.pnc = 0;
+ 
+ 	if (!(flags & NIX_TX_MULTI_SEG_F))
+ 		send_hdr->w0.total = m->data_len;
+@@ -345,23 +512,33 @@ cn9k_nix_xmit_prepare(struct cn9k_eth_txq *txq,
+ 		send_hdr->w1.u = w1.u;
+ 
+ 	if (!(flags & NIX_TX_MULTI_SEG_F)) {
++		struct rte_mbuf *cookie;
++
+ 		sg->seg1_size = m->data_len;
+ 		*(rte_iova_t *)(++sg) = rte_mbuf_data_iova(m);
++		cookie = RTE_MBUF_DIRECT(m) ? m : rte_mbuf_from_indirect(m);
+ 
+ 		if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F) {
++			uint64_t aura;
+ 			/* DF bit = 1 if refcount of current mbuf or parent mbuf
+ 			 *		is greater than 1
+ 			 * DF bit = 0 otherwise
+ 			 */
+-			send_hdr->w0.df = cn9k_nix_prefree_seg(m, txq, send_hdr);
++			aura = send_hdr->w0.aura;
++			send_hdr->w0.df = cn9k_nix_prefree_seg(m, extm, txq, send_hdr, &aura);
++			send_hdr->w0.aura = aura;
+ 			/* Ensuring mbuf fields which got updated in
+ 			 * cnxk_nix_prefree_seg are written before LMTST.
+ 			 */
+ 			rte_io_wmb();
+ 		}
++#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
+ 		/* Mark mempool object as "put" since it is freed by NIX */
+ 		if (!send_hdr->w0.df)
+-			RTE_MEMPOOL_CHECK_COOKIES(m->pool, (void **)&m, 1, 0);
++			RTE_MEMPOOL_CHECK_COOKIES(cookie->pool, (void **)&cookie, 1, 0);
++#else
++		RTE_SET_USED(cookie);
++#endif
+ 	} else {
+ 		sg->seg1_size = m->data_len;
+ 		*(rte_iova_t *)(sg + 1) = rte_mbuf_data_iova(m);
+@@ -383,7 +560,7 @@ cn9k_nix_xmit_prepare_tstamp(struct cn9k_eth_txq *txq, uint64_t *cmd,
+ 
+ 		send_mem = (struct nix_send_mem_s *)(cmd + off);
+ 
+-		/* Packets for which PKT_TX_IEEE1588_TMST is not set, tx tstamp
++		/* Packets for which RTE_MBUF_F_TX_IEEE1588_TMST is not set, Tx tstamp
+ 		 * should not be recorded, hence changing the alg type to
+ 		 * NIX_SENDMEMALG_SUB and also changing send mem addr field to
+ 		 * next 8 bytes as it corrupts the actual Tx tstamp registered
+@@ -439,10 +616,12 @@ cn9k_nix_xmit_submit_lmt_release(const rte_iova_t io_addr)
+ }
+ 
+ static __rte_always_inline uint16_t
+-cn9k_nix_prepare_mseg(struct cn9k_eth_txq *txq,
+-		      struct rte_mbuf *m, uint64_t *cmd, const uint16_t flags)
++cn9k_nix_prepare_mseg(struct cn9k_eth_txq *txq, struct rte_mbuf *m, struct rte_mbuf **extm,
++		      uint64_t *cmd, const uint16_t flags)
+ {
+ 	struct nix_send_hdr_s *send_hdr;
++	uint64_t prefree = 0, aura;
++	struct rte_mbuf *cookie;
+ 	union nix_send_sg_s *sg;
+ 	struct rte_mbuf *m_next;
+ 	uint64_t *slist, sg_u;
+@@ -467,17 +646,27 @@ cn9k_nix_prepare_mseg(struct cn9k_eth_txq *txq,
+ 	m_next = m->next;
+ 	slist = &cmd[3 + off + 1];
+ 
++	cookie = RTE_MBUF_DIRECT(m) ? m : rte_mbuf_from_indirect(m);
+ 	/* Set invert df if buffer is not to be freed by H/W */
+ 	if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F) {
+-		sg_u |= (cn9k_nix_prefree_seg(m, txq, send_hdr) << 55);
++		aura = send_hdr->w0.aura;
++		prefree = (cn9k_nix_prefree_seg(m, extm, txq, send_hdr, &aura) << 55);
++		send_hdr->w0.aura = aura;
++		sg_u |= prefree;
+ 		rte_io_wmb();
+ 	}
+ 
+ 	/* Mark mempool object as "put" since it is freed by NIX */
+ #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
+ 	if (!(sg_u & (1ULL << 55)))
+-		RTE_MEMPOOL_CHECK_COOKIES(m->pool, (void **)&m, 1, 0);
++		RTE_MEMPOOL_CHECK_COOKIES(cookie->pool, (void **)&cookie, 1, 0);
+ 	rte_io_wmb();
++#else
++	RTE_SET_USED(cookie);
++#endif
++#ifdef RTE_ENABLE_ASSERT
++	m->next = NULL;
++	m->nb_segs = 1;
+ #endif
+ 	m = m_next;
+ 	if (!m)
+@@ -488,16 +677,17 @@ cn9k_nix_prepare_mseg(struct cn9k_eth_txq *txq,
+ 		m_next = m->next;
+ 		sg_u = sg_u | ((uint64_t)m->data_len << (i << 4));
+ 		*slist = rte_mbuf_data_iova(m);
++		cookie = RTE_MBUF_DIRECT(m) ? m : rte_mbuf_from_indirect(m);
+ 		/* Set invert df if buffer is not to be freed by H/W */
+ 		if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F) {
+-			sg_u |= (cn9k_nix_prefree_seg(m, txq, send_hdr) << (i + 55));
++			sg_u |= (cn9k_nix_prefree_seg(m, extm, txq, send_hdr, NULL) << (i + 55));
+ 			/* Commit changes to mbuf */
+ 			rte_io_wmb();
+ 		}
+ 		/* Mark mempool object as "put" since it is freed by NIX */
+ #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
+ 		if (!(sg_u & (1ULL << (i + 55))))
+-			RTE_MEMPOOL_CHECK_COOKIES(m->pool, (void **)&m, 1, 0);
++			RTE_MEMPOOL_CHECK_COOKIES(cookie->pool, (void **)&cookie, 1, 0);
+ 		rte_io_wmb();
+ #endif
+ 		slist++;
+@@ -513,6 +703,9 @@ cn9k_nix_prepare_mseg(struct cn9k_eth_txq *txq,
+ 			sg_u = sg->u;
+ 			slist++;
+ 		}
++#ifdef RTE_ENABLE_ASSERT
++		m->next = NULL;
++#endif
+ 		m = m_next;
+ 	} while (nb_segs);
+ 
+@@ -526,6 +719,9 @@ done:
+ 	segdw += (off >> 1) + 1 + !!(flags & NIX_TX_OFFLOAD_TSTAMP_F);
+ 	send_hdr->w0.sizem1 = segdw - 1;
+ 
++#ifdef RTE_ENABLE_ASSERT
++	rte_io_wmb();
++#endif
+ 	return segdw;
+ }
+ 
+@@ -568,6 +764,7 @@ cn9k_nix_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t pkts,
+ 	const rte_iova_t io_addr = txq->io_addr;
+ 	uint64_t lso_tun_fmt = 0, mark_fmt = 0;
+ 	void *lmt_addr = txq->lmt_addr;
++	struct rte_mbuf *extm = NULL;
+ 	uint8_t mark_flag = 0;
+ 	uint16_t i;
+ 
+@@ -598,13 +795,16 @@ cn9k_nix_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t pkts,
+ 		rte_io_wmb();
+ 
+ 	for (i = 0; i < pkts; i++) {
+-		cn9k_nix_xmit_prepare(txq, tx_pkts[i], cmd, flags, lso_tun_fmt,
++		cn9k_nix_xmit_prepare(txq, tx_pkts[i], &extm, cmd, flags, lso_tun_fmt,
+ 				      mark_flag, mark_fmt);
+ 		cn9k_nix_xmit_prepare_tstamp(txq, cmd, tx_pkts[i]->ol_flags, 4,
+ 					     flags);
+ 		cn9k_nix_xmit_one(cmd, lmt_addr, io_addr, flags);
+ 	}
+ 
++	if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F && !txq->tx_compl.ena)
++		cn9k_nix_free_extmbuf(extm);
++
+ 	/* Reduce the cached count */
+ 	txq->fc_cache_pkts -= pkts;
+ 
+@@ -619,6 +819,7 @@ cn9k_nix_xmit_pkts_mseg(void *tx_queue, struct rte_mbuf **tx_pkts,
+ 	const rte_iova_t io_addr = txq->io_addr;
+ 	uint64_t lso_tun_fmt = 0, mark_fmt = 0;
+ 	void *lmt_addr = txq->lmt_addr;
++	struct rte_mbuf *extm = NULL;
+ 	uint8_t mark_flag = 0;
+ 	uint16_t segdw;
+ 	uint64_t i;
+@@ -650,14 +851,17 @@ cn9k_nix_xmit_pkts_mseg(void *tx_queue, struct rte_mbuf **tx_pkts,
+ 		rte_io_wmb();
+ 
+ 	for (i = 0; i < pkts; i++) {
+-		cn9k_nix_xmit_prepare(txq, tx_pkts[i], cmd, flags, lso_tun_fmt,
++		cn9k_nix_xmit_prepare(txq, tx_pkts[i], &extm, cmd, flags, lso_tun_fmt,
+ 				      mark_flag, mark_fmt);
+-		segdw = cn9k_nix_prepare_mseg(txq, tx_pkts[i], cmd, flags);
++		segdw = cn9k_nix_prepare_mseg(txq, tx_pkts[i], &extm, cmd, flags);
+ 		cn9k_nix_xmit_prepare_tstamp(txq, cmd, tx_pkts[i]->ol_flags,
+ 					     segdw, flags);
+ 		cn9k_nix_xmit_mseg_one(cmd, lmt_addr, io_addr, segdw);
+ 	}
+ 
++	if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F && !txq->tx_compl.ena)
++		cn9k_nix_free_extmbuf(extm);
++
+ 	/* Reduce the cached count */
+ 	txq->fc_cache_pkts -= pkts;
+ 
+@@ -705,12 +909,12 @@ cn9k_nix_prepare_tso(struct rte_mbuf *m, union nix_send_hdr_w1_u *w1,
+ 
+ static __rte_always_inline uint8_t
+ cn9k_nix_prepare_mseg_vec_list(struct cn9k_eth_txq *txq,
+-			       struct rte_mbuf *m, uint64_t *cmd,
++			       struct rte_mbuf *m, struct rte_mbuf **extm, uint64_t *cmd,
+ 			       struct nix_send_hdr_s *send_hdr,
+ 			       union nix_send_sg_s *sg, const uint32_t flags)
+ {
+-	struct rte_mbuf *m_next;
+-	uint64_t *slist, sg_u;
++	struct rte_mbuf *m_next, *cookie;
++	uint64_t *slist, sg_u, aura;
+ 	uint16_t nb_segs;
+ 	uint64_t segdw;
+ 	int i = 1;
+@@ -727,29 +931,40 @@ cn9k_nix_prepare_mseg_vec_list(struct cn9k_eth_txq *txq,
+ 	m_next = m->next;
+ 
+ 	/* Set invert df if buffer is not to be freed by H/W */
+-	if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F)
+-		sg_u |= (cn9k_nix_prefree_seg(m, txq, send_hdr) << 55);
+-		/* Mark mempool object as "put" since it is freed by NIX */
++	cookie = RTE_MBUF_DIRECT(m) ? m : rte_mbuf_from_indirect(m);
++	if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F) {
++		aura = send_hdr->w0.aura;
++		sg_u |= (cn9k_nix_prefree_seg(m, extm, txq, send_hdr, &aura) << 55);
++		send_hdr->w0.aura = aura;
++	}
++	/* Mark mempool object as "put" since it is freed by NIX */
+ #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
+ 	if (!(sg_u & (1ULL << 55)))
+-		RTE_MEMPOOL_CHECK_COOKIES(m->pool, (void **)&m, 1, 0);
++		RTE_MEMPOOL_CHECK_COOKIES(cookie->pool, (void **)&cookie, 1, 0);
+ 	rte_io_wmb();
++#else
++	RTE_SET_USED(cookie);
+ #endif
+ 
++#ifdef RTE_ENABLE_ASSERT
++	m->next = NULL;
++	m->nb_segs = 1;
++#endif
+ 	m = m_next;
+ 	/* Fill mbuf segments */
+ 	do {
+ 		m_next = m->next;
+ 		sg_u = sg_u | ((uint64_t)m->data_len << (i << 4));
+ 		*slist = rte_mbuf_data_iova(m);
++		cookie = RTE_MBUF_DIRECT(m) ? m : rte_mbuf_from_indirect(m);
+ 		/* Set invert df if buffer is not to be freed by H/W */
+ 		if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F)
+-			sg_u |= (cn9k_nix_prefree_seg(m, txq, send_hdr) << (i + 55));
++			sg_u |= (cn9k_nix_prefree_seg(m, extm, txq, send_hdr, &aura) << (i + 55));
+ 			/* Mark mempool object as "put" since it is freed by NIX
+ 			 */
+ #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
+ 		if (!(sg_u & (1ULL << (i + 55))))
+-			RTE_MEMPOOL_CHECK_COOKIES(m->pool, (void **)&m, 1, 0);
++			RTE_MEMPOOL_CHECK_COOKIES(cookie->pool, (void **)&cookie, 1, 0);
+ 		rte_io_wmb();
+ #endif
+ 		slist++;
+@@ -765,6 +980,9 @@ cn9k_nix_prepare_mseg_vec_list(struct cn9k_eth_txq *txq,
+ 			sg_u = sg->u;
+ 			slist++;
+ 		}
++#ifdef RTE_ENABLE_ASSERT
++		m->next = NULL;
++#endif
+ 		m = m_next;
+ 	} while (nb_segs);
+ 
+@@ -780,24 +998,31 @@ cn9k_nix_prepare_mseg_vec_list(struct cn9k_eth_txq *txq,
+ 		 !!(flags & NIX_TX_OFFLOAD_TSTAMP_F);
+ 	send_hdr->w0.sizem1 = segdw - 1;
+ 
++#ifdef RTE_ENABLE_ASSERT
++	rte_io_wmb();
++#endif
+ 	return segdw;
+ }
+ 
+ static __rte_always_inline uint8_t
+-cn9k_nix_prepare_mseg_vec(struct cn9k_eth_txq *txq,
+-			  struct rte_mbuf *m, uint64_t *cmd, uint64x2_t *cmd0,
+-			  uint64x2_t *cmd1, const uint32_t flags)
++cn9k_nix_prepare_mseg_vec(struct cn9k_eth_txq *txq, struct rte_mbuf *m, struct rte_mbuf **extm,
++			  uint64_t *cmd, uint64x2_t *cmd0, uint64x2_t *cmd1, const uint32_t flags)
+ {
+ 	struct nix_send_hdr_s send_hdr;
++	struct rte_mbuf *cookie;
+ 	union nix_send_sg_s sg;
++	uint64_t aura;
+ 	uint8_t ret;
+ 
+ 	if (m->nb_segs == 1) {
++		cookie = RTE_MBUF_DIRECT(m) ? m : rte_mbuf_from_indirect(m);
+ 		if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F) {
+ 			send_hdr.w0.u = vgetq_lane_u64(cmd0[0], 0);
+ 			send_hdr.w1.u = vgetq_lane_u64(cmd0[0], 1);
+ 			sg.u = vgetq_lane_u64(cmd1[0], 0);
+-			sg.u |= (cn9k_nix_prefree_seg(m, txq, &send_hdr) << 55);
++			aura = send_hdr.w0.aura;
++			sg.u |= (cn9k_nix_prefree_seg(m, extm, txq, &send_hdr, &aura) << 55);
++			send_hdr.w0.aura = aura;
+ 			cmd1[0] = vsetq_lane_u64(sg.u, cmd1[0], 0);
+ 			cmd0[0] = vsetq_lane_u64(send_hdr.w0.u, cmd0[0], 0);
+ 			cmd0[0] = vsetq_lane_u64(send_hdr.w1.u, cmd0[0], 1);
+@@ -806,8 +1031,10 @@ cn9k_nix_prepare_mseg_vec(struct cn9k_eth_txq *txq,
+ #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
+ 		sg.u = vgetq_lane_u64(cmd1[0], 0);
+ 		if (!(sg.u & (1ULL << 55)))
+-			RTE_MEMPOOL_CHECK_COOKIES(m->pool, (void **)&m, 1, 0);
++			RTE_MEMPOOL_CHECK_COOKIES(cookie->pool, (void **)&cookie, 1, 0);
+ 		rte_io_wmb();
++#else
++		RTE_SET_USED(cookie);
+ #endif
+ 		return 2 + !!(flags & NIX_TX_NEED_EXT_HDR) +
+ 		       !!(flags & NIX_TX_OFFLOAD_TSTAMP_F);
+@@ -817,7 +1044,7 @@ cn9k_nix_prepare_mseg_vec(struct cn9k_eth_txq *txq,
+ 	send_hdr.w1.u = vgetq_lane_u64(cmd0[0], 1);
+ 	sg.u = vgetq_lane_u64(cmd1[0], 0);
+ 
+-	ret = cn9k_nix_prepare_mseg_vec_list(txq, m, cmd, &send_hdr, &sg, flags);
++	ret = cn9k_nix_prepare_mseg_vec_list(txq, m, extm, cmd, &send_hdr, &sg, flags);
+ 
+ 	cmd0[0] = vsetq_lane_u64(send_hdr.w0.u, cmd0[0], 0);
+ 	cmd0[0] = vsetq_lane_u64(send_hdr.w1.u, cmd0[0], 1);
+@@ -962,11 +1189,9 @@ cn9k_nix_xmit_pkts_vector(void *tx_queue, struct rte_mbuf **tx_pkts,
+ 	uint64x2_t sgdesc01_w1, sgdesc23_w1;
+ 	struct cn9k_eth_txq *txq = tx_queue;
+ 	uint64_t *lmt_addr = txq->lmt_addr;
+-	uint64x2_t xmask01_w0, xmask23_w0;
+-	uint64x2_t xmask01_w1, xmask23_w1;
+ 	rte_iova_t io_addr = txq->io_addr;
+-	struct nix_send_hdr_s send_hdr;
+ 	uint64x2_t ltypes01, ltypes23;
++	struct rte_mbuf *extm = NULL;
+ 	uint64x2_t xtmp128, ytmp128;
+ 	uint64x2_t xmask01, xmask23;
+ 	uint64_t lmt_status, i;
+@@ -1028,7 +1253,7 @@ cn9k_nix_xmit_pkts_vector(void *tx_queue, struct rte_mbuf **tx_pkts,
+ 	for (i = 0; i < pkts; i += NIX_DESCS_PER_LOOP) {
+ 		/* Clear lower 32bit of SEND_HDR_W0 and SEND_SG_W0 */
+ 		senddesc01_w0 =
+-			vbicq_u64(senddesc01_w0, vdupq_n_u64(0xFFFFFFFF));
++			vbicq_u64(senddesc01_w0, vdupq_n_u64(0x800FFFFFFFF));
+ 		sgdesc01_w0 = vbicq_u64(sgdesc01_w0, vdupq_n_u64(0xFFFFFFFF));
+ 
+ 		senddesc23_w0 = senddesc01_w0;
+@@ -1732,74 +1957,8 @@ cn9k_nix_xmit_pkts_vector(void *tx_queue, struct rte_mbuf **tx_pkts,
+ 		if ((flags & NIX_TX_OFFLOAD_MBUF_NOFF_F) &&
+ 		    !(flags & NIX_TX_MULTI_SEG_F)) {
+ 			/* Set don't free bit if reference count > 1 */
+-			xmask01_w0 = vdupq_n_u64(0);
+-			xmask01_w1 = vdupq_n_u64(0);
+-			xmask23_w0 = xmask01_w0;
+-			xmask23_w1 = xmask01_w1;
+-
+-			/* Move mbufs to iova */
+-			mbuf0 = (uint64_t *)tx_pkts[0];
+-			mbuf1 = (uint64_t *)tx_pkts[1];
+-			mbuf2 = (uint64_t *)tx_pkts[2];
+-			mbuf3 = (uint64_t *)tx_pkts[3];
+-
+-			send_hdr.w0.u = 0;
+-			send_hdr.w1.u = 0;
+-
+-			if (cn9k_nix_prefree_seg((struct rte_mbuf *)mbuf0, txq, &send_hdr)) {
+-				send_hdr.w0.df = 1;
+-				xmask01_w0 = vsetq_lane_u64(send_hdr.w0.u, xmask01_w0, 0);
+-				xmask01_w1 = vsetq_lane_u64(send_hdr.w1.u, xmask01_w1, 0);
+-			} else {
+-				RTE_MEMPOOL_CHECK_COOKIES(
+-					((struct rte_mbuf *)mbuf0)->pool,
+-					(void **)&mbuf0, 1, 0);
+-			}
+-
+-			send_hdr.w0.u = 0;
+-			send_hdr.w1.u = 0;
+-
+-			if (cn9k_nix_prefree_seg((struct rte_mbuf *)mbuf1, txq, &send_hdr)) {
+-				send_hdr.w0.df = 1;
+-				xmask01_w0 = vsetq_lane_u64(send_hdr.w0.u, xmask01_w0, 1);
+-				xmask01_w1 = vsetq_lane_u64(send_hdr.w1.u, xmask01_w1, 1);
+-			} else {
+-				RTE_MEMPOOL_CHECK_COOKIES(
+-					((struct rte_mbuf *)mbuf1)->pool,
+-					(void **)&mbuf1, 1, 0);
+-			}
+-
+-			send_hdr.w0.u = 0;
+-			send_hdr.w1.u = 0;
+-
+-			if (cn9k_nix_prefree_seg((struct rte_mbuf *)mbuf2, txq, &send_hdr)) {
+-				send_hdr.w0.df = 1;
+-				xmask23_w0 = vsetq_lane_u64(send_hdr.w0.u, xmask23_w0, 0);
+-				xmask23_w1 = vsetq_lane_u64(send_hdr.w1.u, xmask23_w1, 0);
+-			} else {
+-				RTE_MEMPOOL_CHECK_COOKIES(
+-					((struct rte_mbuf *)mbuf2)->pool,
+-					(void **)&mbuf2, 1, 0);
+-			}
+-
+-			send_hdr.w0.u = 0;
+-			send_hdr.w1.u = 0;
+-
+-			if (cn9k_nix_prefree_seg((struct rte_mbuf *)mbuf3, txq, &send_hdr)) {
+-				send_hdr.w0.df = 1;
+-				xmask23_w0 = vsetq_lane_u64(send_hdr.w0.u, xmask23_w0, 1);
+-				xmask23_w1 = vsetq_lane_u64(send_hdr.w1.u, xmask23_w1, 1);
+-			} else {
+-				RTE_MEMPOOL_CHECK_COOKIES(
+-					((struct rte_mbuf *)mbuf3)->pool,
+-					(void **)&mbuf3, 1, 0);
+-			}
+-
+-			senddesc01_w0 = vorrq_u64(senddesc01_w0, xmask01_w0);
+-			senddesc23_w0 = vorrq_u64(senddesc23_w0, xmask23_w0);
+-			senddesc01_w1 = vorrq_u64(senddesc01_w1, xmask01_w1);
+-			senddesc23_w1 = vorrq_u64(senddesc23_w1, xmask23_w1);
+-
++			cn9k_nix_prefree_seg_vec(tx_pkts, &extm, txq, &senddesc01_w0,
++						 &senddesc23_w0, &senddesc01_w1, &senddesc23_w1);
+ 			/* Ensuring mbuf fields which got updated in
+ 			 * cnxk_nix_prefree_seg are written before LMTST.
+ 			 */
+@@ -1860,7 +2019,7 @@ cn9k_nix_xmit_pkts_vector(void *tx_queue, struct rte_mbuf **tx_pkts,
+ 			/* Build mseg list for each packet individually. */
+ 			for (j = 0; j < NIX_DESCS_PER_LOOP; j++)
+ 				segdw[j] = cn9k_nix_prepare_mseg_vec(txq,
+-							tx_pkts[j],
++							tx_pkts[j], &extm,
+ 							seg_list[j], &cmd0[j],
+ 							&cmd1[j], flags);
+ 			segdw[4] = 8;
+@@ -1935,6 +2094,9 @@ cn9k_nix_xmit_pkts_vector(void *tx_queue, struct rte_mbuf **tx_pkts,
+ 		tx_pkts = tx_pkts + NIX_DESCS_PER_LOOP;
+ 	}
+ 
++	if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F && !txq->tx_compl.ena)
++		cn9k_nix_free_extmbuf(extm);
++
+ 	if (unlikely(pkts_left)) {
+ 		if (flags & NIX_TX_MULTI_SEG_F)
+ 			pkts += cn9k_nix_xmit_pkts_mseg(tx_queue, tx_pkts,
+diff --git a/dpdk/drivers/net/cnxk/cnxk_ethdev.c b/dpdk/drivers/net/cnxk/cnxk_ethdev.c
+index 5e11bbb017..c841b31051 100644
+--- a/dpdk/drivers/net/cnxk/cnxk_ethdev.c
++++ b/dpdk/drivers/net/cnxk/cnxk_ethdev.c
+@@ -1384,6 +1384,13 @@ cnxk_nix_configure(struct rte_eth_dev *eth_dev)
+ 		goto free_nix_lf;
+ 	}
+ 
++	/* Overwrite default RSS setup if requested by user */
++	rc = cnxk_nix_rss_hash_update(eth_dev, &conf->rx_adv_conf.rss_conf);
++	if (rc) {
++		plt_err("Failed to configure rss rc=%d", rc);
++		goto free_nix_lf;
++	}
++
+ 	/* Init the default TM scheduler hierarchy */
+ 	rc = roc_nix_tm_init(nix);
+ 	if (rc) {
+diff --git a/dpdk/drivers/net/cnxk/cnxk_ethdev_devargs.c b/dpdk/drivers/net/cnxk/cnxk_ethdev_devargs.c
+index 8e862be933..a0e9300cff 100644
+--- a/dpdk/drivers/net/cnxk/cnxk_ethdev_devargs.c
++++ b/dpdk/drivers/net/cnxk/cnxk_ethdev_devargs.c
+@@ -75,7 +75,7 @@ parse_ipsec_out_max_sa(const char *key, const char *value, void *extra_args)
+ 	if (errno)
+ 		val = 0;
+ 
+-	*(uint16_t *)extra_args = val;
++	*(uint32_t *)extra_args = val;
+ 
+ 	return 0;
+ }
+diff --git a/dpdk/drivers/net/cnxk/cnxk_ethdev_dp.h b/dpdk/drivers/net/cnxk/cnxk_ethdev_dp.h
+index c1f99a2616..67f40b8e25 100644
+--- a/dpdk/drivers/net/cnxk/cnxk_ethdev_dp.h
++++ b/dpdk/drivers/net/cnxk/cnxk_ethdev_dp.h
+@@ -84,7 +84,7 @@ struct cnxk_timesync_info {
+ 
+ /* Inlines */
+ static __rte_always_inline uint64_t
+-cnxk_pktmbuf_detach(struct rte_mbuf *m)
++cnxk_pktmbuf_detach(struct rte_mbuf *m, uint64_t *aura)
+ {
+ 	struct rte_mempool *mp = m->pool;
+ 	uint32_t mbuf_size, buf_len;
+@@ -94,6 +94,8 @@ cnxk_pktmbuf_detach(struct rte_mbuf *m)
+ 
+ 	/* Update refcount of direct mbuf */
+ 	md = rte_mbuf_from_indirect(m);
++	if (aura)
++		*aura = roc_npa_aura_handle_to_aura(md->pool->pool_id);
+ 	refcount = rte_mbuf_refcnt_update(md, -1);
+ 
+ 	priv_size = rte_pktmbuf_priv_size(mp);
+@@ -126,18 +128,18 @@ cnxk_pktmbuf_detach(struct rte_mbuf *m)
+ }
+ 
+ static __rte_always_inline uint64_t
+-cnxk_nix_prefree_seg(struct rte_mbuf *m)
++cnxk_nix_prefree_seg(struct rte_mbuf *m, uint64_t *aura)
+ {
+ 	if (likely(rte_mbuf_refcnt_read(m) == 1)) {
+ 		if (!RTE_MBUF_DIRECT(m))
+-			return cnxk_pktmbuf_detach(m);
++			return cnxk_pktmbuf_detach(m, aura);
+ 
+ 		m->next = NULL;
+ 		m->nb_segs = 1;
+ 		return 0;
+ 	} else if (rte_mbuf_refcnt_update(m, -1) == 0) {
+ 		if (!RTE_MBUF_DIRECT(m))
+-			return cnxk_pktmbuf_detach(m);
++			return cnxk_pktmbuf_detach(m, aura);
+ 
+ 		rte_mbuf_refcnt_set(m, 1);
+ 		m->next = NULL;
+diff --git a/dpdk/drivers/net/cnxk/cnxk_ethdev_ops.c b/dpdk/drivers/net/cnxk/cnxk_ethdev_ops.c
+index 5de2919047..c8f4848f92 100644
+--- a/dpdk/drivers/net/cnxk/cnxk_ethdev_ops.c
++++ b/dpdk/drivers/net/cnxk/cnxk_ethdev_ops.c
+@@ -20,8 +20,7 @@ cnxk_nix_info_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *devinfo)
+ 	devinfo->max_tx_queues = RTE_MAX_QUEUES_PER_PORT;
+ 	devinfo->max_mac_addrs = dev->max_mac_entries;
+ 	devinfo->max_vfs = pci_dev->max_vfs;
+-	devinfo->max_mtu = devinfo->max_rx_pktlen -
+-				(RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN);
++	devinfo->max_mtu = devinfo->max_rx_pktlen - CNXK_NIX_L2_OVERHEAD;
+ 	devinfo->min_mtu = devinfo->min_rx_bufsize - CNXK_NIX_L2_OVERHEAD;
+ 
+ 	devinfo->rx_offload_capa = dev->rx_offload_capa;
+@@ -448,6 +447,13 @@ cnxk_nix_mac_addr_set(struct rte_eth_dev *eth_dev, struct rte_ether_addr *addr)
+ 			roc_nix_npc_mac_addr_set(nix, dev->mac_addr);
+ 			goto exit;
+ 		}
++
++		if (eth_dev->data->promiscuous) {
++			rc = roc_nix_mac_promisc_mode_enable(nix, true);
++			if (rc)
++				plt_err("Failed to setup promisc mode in mac, rc=%d(%s)", rc,
++					roc_error_msg_get(rc));
++		}
+ 	}
+ 
+ 	/* Update mac address to cnxk ethernet device */
+@@ -544,8 +550,9 @@ cnxk_nix_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
+ 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+ 	struct rte_eth_dev_data *data = eth_dev->data;
+ 	struct roc_nix *nix = &dev->nix;
++	struct cnxk_eth_rxq_sp *rxq_sp;
++	uint32_t buffsz = 0;
+ 	int rc = -EINVAL;
+-	uint32_t buffsz;
+ 
+ 	frame_size += CNXK_NIX_TIMESYNC_RX_OFFSET * dev->ptp_en;
+ 
+@@ -561,8 +568,24 @@ cnxk_nix_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
+ 		goto exit;
+ 	}
+ 
+-	buffsz = data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM;
+-	old_frame_size = data->mtu + CNXK_NIX_L2_OVERHEAD;
++	if (!eth_dev->data->nb_rx_queues)
++		goto skip_buffsz_check;
++
++	/* Perform buff size check */
++	if (data->min_rx_buf_size) {
++		buffsz = data->min_rx_buf_size;
++	} else if (eth_dev->data->rx_queues && eth_dev->data->rx_queues[0]) {
++		rxq_sp = cnxk_eth_rxq_to_sp(data->rx_queues[0]);
++
++		if (rxq_sp->qconf.mp)
++			buffsz = rte_pktmbuf_data_room_size(rxq_sp->qconf.mp);
++	}
++
++	/* Skip validation if RQ's are not yet setup */
++	if (!buffsz)
++		goto skip_buffsz_check;
++
++	buffsz -= RTE_PKTMBUF_HEADROOM;
+ 
+ 	/* Refuse MTU that requires the support of scattered packets
+ 	 * when this feature has not been enabled before.
+@@ -580,6 +603,8 @@ cnxk_nix_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
+ 		goto exit;
+ 	}
+ 
++skip_buffsz_check:
++	old_frame_size = data->mtu + CNXK_NIX_L2_OVERHEAD;
+ 	/* if new MTU was smaller than old one, then flush all SQs before MTU change */
+ 	if (old_frame_size > frame_size) {
+ 		if (data->dev_started) {
+@@ -591,19 +616,9 @@ cnxk_nix_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
+ 
+ 	frame_size -= RTE_ETHER_CRC_LEN;
+ 
+-	/* Update mtu on Tx */
+-	rc = roc_nix_mac_mtu_set(nix, frame_size);
+-	if (rc) {
+-		plt_err("Failed to set MTU, rc=%d", rc);
+-		goto exit;
+-	}
+-
+-	/* Sync same frame size on Rx */
++	/* Set frame size on Rx */
+ 	rc = roc_nix_mac_max_rx_len_set(nix, frame_size);
+ 	if (rc) {
+-		/* Rollback to older mtu */
+-		roc_nix_mac_mtu_set(nix,
+-				    old_frame_size - RTE_ETHER_CRC_LEN);
+ 		plt_err("Failed to max Rx frame length, rc=%d", rc);
+ 		goto exit;
+ 	}
+diff --git a/dpdk/drivers/net/cnxk/cnxk_flow.c b/dpdk/drivers/net/cnxk/cnxk_flow.c
+index 08ab75e2bb..be0330fa04 100644
+--- a/dpdk/drivers/net/cnxk/cnxk_flow.c
++++ b/dpdk/drivers/net/cnxk/cnxk_flow.c
+@@ -102,15 +102,19 @@ npc_rss_action_validate(struct rte_eth_dev *eth_dev,
+ }
+ 
+ static void
+-npc_rss_flowkey_get(struct cnxk_eth_dev *eth_dev,
+-		    const struct roc_npc_action *rss_action,
+-		    uint32_t *flowkey_cfg)
++npc_rss_flowkey_get(struct cnxk_eth_dev *eth_dev, const struct roc_npc_action *rss_action,
++		    uint32_t *flowkey_cfg, uint64_t default_rss_types)
+ {
+ 	const struct roc_npc_action_rss *rss;
++	uint64_t rss_types;
+ 
+ 	rss = (const struct roc_npc_action_rss *)rss_action->conf;
++	rss_types = rss->types;
++	/* If no RSS types are specified, use default one */
++	if (rss_types == 0)
++		rss_types = default_rss_types;
+ 
+-	*flowkey_cfg = cnxk_rss_ethdev_to_nix(eth_dev, rss->types, rss->level);
++	*flowkey_cfg = cnxk_rss_ethdev_to_nix(eth_dev, rss_types, rss->level);
+ }
+ 
+ static int
+@@ -204,7 +208,8 @@ cnxk_map_actions(struct rte_eth_dev *eth_dev, const struct rte_flow_attr *attr,
+ 				goto err_exit;
+ 			in_actions[i].type = ROC_NPC_ACTION_TYPE_RSS;
+ 			in_actions[i].conf = actions->conf;
+-			npc_rss_flowkey_get(dev, &in_actions[i], flowkey_cfg);
++			npc_rss_flowkey_get(dev, &in_actions[i], flowkey_cfg,
++					    eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf);
+ 			break;
+ 
+ 		case RTE_FLOW_ACTION_TYPE_SECURITY:
+@@ -503,6 +508,9 @@ cnxk_flow_get_aged_flows(struct rte_eth_dev *eth_dev, void **context,
+ 
+ 	flow_age = &roc_npc->flow_age;
+ 
++	if (!flow_age->age_flow_refcnt)
++		return 0;
++
+ 	do {
+ 		sn = plt_seqcount_read_begin(&flow_age->seq_cnt);
+ 
+diff --git a/dpdk/drivers/net/cpfl/cpfl_flow_engine_fxp.c b/dpdk/drivers/net/cpfl/cpfl_flow_engine_fxp.c
+index 8a4e1419b4..f6bd1f7599 100644
+--- a/dpdk/drivers/net/cpfl/cpfl_flow_engine_fxp.c
++++ b/dpdk/drivers/net/cpfl/cpfl_flow_engine_fxp.c
+@@ -95,7 +95,7 @@ cpfl_fxp_create(struct rte_eth_dev *dev,
+ 
+ 	ret = cpfl_rule_process(itf, ad->ctlqp[cpq_id], ad->ctlqp[cpq_id + 1],
+ 				rim->rules, rim->rule_num, true);
+-	if (ret < 0) {
++	if (ret != 0) {
+ 		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ 				   "cpfl filter create flow fail");
+ 		rte_free(rim);
+diff --git a/dpdk/drivers/net/cpfl/cpfl_flow_parser.c b/dpdk/drivers/net/cpfl/cpfl_flow_parser.c
+index a8f0488f21..40569ddc6f 100644
+--- a/dpdk/drivers/net/cpfl/cpfl_flow_parser.c
++++ b/dpdk/drivers/net/cpfl/cpfl_flow_parser.c
+@@ -1696,7 +1696,7 @@ cpfl_parse_check_prog_action(struct cpfl_flow_js_mr_key_action *key_act,
+ 	bool check_name;
+ 
+ 	check_name = key_act->prog.has_name ? strcmp(prog->name, key_act->prog.name) == 0
+-					    : atol(prog->name) == key_act->prog.id;
++					    : (uint32_t)atol(prog->name) == key_act->prog.id;
+ 	if (!check_name) {
+ 		PMD_DRV_LOG(ERR, "Not support this prog type: %s.", prog->name);
+ 		return -EINVAL;
+diff --git a/dpdk/drivers/net/cpfl/cpfl_fxp_rule.c b/dpdk/drivers/net/cpfl/cpfl_fxp_rule.c
+index 0e710a007b..be34da9fa2 100644
+--- a/dpdk/drivers/net/cpfl/cpfl_fxp_rule.c
++++ b/dpdk/drivers/net/cpfl/cpfl_fxp_rule.c
+@@ -92,6 +92,14 @@ cpfl_receive_ctlq_msg(struct idpf_hw *hw, struct idpf_ctlq_info *cq, u16 num_q_m
+ 
+ 		/* TODO - process rx controlq message */
+ 		for (i = 0; i < num_q_msg; i++) {
++			ret = q_msg[i].status;
++			if (ret != CPFL_CFG_PKT_ERR_OK &&
++			    q_msg[i].opcode != cpfl_ctlq_sem_query_del_rule_hash_addr) {
++				PMD_INIT_LOG(ERR, "Failed to process rx_ctrlq msg: %s",
++					cpfl_cfg_pkt_errormsg[ret]);
++				return ret;
++			}
++
+ 			if (q_msg[i].data_len > 0)
+ 				dma = q_msg[i].ctx.indirect.payload;
+ 			else
+diff --git a/dpdk/drivers/net/cpfl/cpfl_rules.h b/dpdk/drivers/net/cpfl/cpfl_rules.h
+index d23eae8e91..10569b1fdc 100644
+--- a/dpdk/drivers/net/cpfl/cpfl_rules.h
++++ b/dpdk/drivers/net/cpfl/cpfl_rules.h
+@@ -62,6 +62,17 @@ enum cpfl_cfg_pkt_error_code {
+ 	CPFL_CFG_PKT_ERR_EMAXCOL = 9    /* Max Hash Collision */
+ };
+ 
++static const char * const cpfl_cfg_pkt_errormsg[] = {
++	[CPFL_CFG_PKT_ERR_ESRCH] = "Bad opcode",
++	[CPFL_CFG_PKT_ERR_EEXIST] = "The rule conflicts with already existed one",
++	[CPFL_CFG_PKT_ERR_ENOSPC] = "No space left in the table",
++	[CPFL_CFG_PKT_ERR_ERANGE] = "Parameter out of range",
++	[CPFL_CFG_PKT_ERR_ESBCOMP] = "Completion error",
++	[CPFL_CFG_PKT_ERR_ENOPIN] = "Entry cannot be pinned in cache",
++	[CPFL_CFG_PKT_ERR_ENOTFND] = "Entry does not exist",
++	[CPFL_CFG_PKT_ERR_EMAXCOL] = "Maximum Hash Collisions reached",
++};
++
+ /* macros for creating context for rule descriptor */
+ #define MEV_RULE_VSI_ID_S		0
+ #define MEV_RULE_VSI_ID_M		\
+diff --git a/dpdk/drivers/net/cxgbe/cxgbe_ethdev.c b/dpdk/drivers/net/cxgbe/cxgbe_ethdev.c
+index 8cc3d9f257..781f48cfac 100644
+--- a/dpdk/drivers/net/cxgbe/cxgbe_ethdev.c
++++ b/dpdk/drivers/net/cxgbe/cxgbe_ethdev.c
+@@ -211,9 +211,9 @@ int cxgbe_dev_link_update(struct rte_eth_dev *eth_dev,
+ 	unsigned int i, work_done, budget = 32;
+ 	struct link_config *lc = &pi->link_cfg;
+ 	struct adapter *adapter = pi->adapter;
+-	struct rte_eth_link new_link = { 0 };
+ 	u8 old_link = pi->link_cfg.link_ok;
+ 	struct sge *s = &adapter->sge;
++	struct rte_eth_link new_link;
+ 
+ 	for (i = 0; i < CXGBE_LINK_STATUS_POLL_CNT; i++) {
+ 		if (!s->fw_evtq.desc)
+@@ -232,6 +232,7 @@ int cxgbe_dev_link_update(struct rte_eth_dev *eth_dev,
+ 		rte_delay_ms(CXGBE_LINK_STATUS_POLL_MS);
+ 	}
+ 
++	memset(&new_link, 0, sizeof(new_link));
+ 	new_link.link_status = cxgbe_force_linkup(adapter) ?
+ 			       RTE_ETH_LINK_UP : pi->link_cfg.link_ok;
+ 	new_link.link_autoneg = (lc->link_caps & FW_PORT_CAP32_ANEG) ? 1 : 0;
+diff --git a/dpdk/drivers/net/dpaa/dpaa_ethdev.c b/dpdk/drivers/net/dpaa/dpaa_ethdev.c
+index ef4c06db6a..bcb28f33ee 100644
+--- a/dpdk/drivers/net/dpaa/dpaa_ethdev.c
++++ b/dpdk/drivers/net/dpaa/dpaa_ethdev.c
+@@ -14,6 +14,7 @@
+ #include <pthread.h>
+ #include <sys/types.h>
+ #include <sys/syscall.h>
++#include <sys/ioctl.h>
+ 
+ #include <rte_string_fns.h>
+ #include <rte_byteorder.h>
+@@ -165,9 +166,15 @@ dpaa_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
+ 	uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN
+ 				+ VLAN_TAG_SIZE;
+ 	uint32_t buffsz = dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM;
++	struct fman_if *fif = dev->process_private;
+ 
+ 	PMD_INIT_FUNC_TRACE();
+ 
++	if (fif->is_shared_mac) {
++		DPAA_PMD_ERR("Cannot configure mtu from DPDK in VSP mode.");
++		return -ENOTSUP;
++	}
++
+ 	/*
+ 	 * Refuse mtu that requires the support of scattered packets
+ 	 * when this feature has not been enabled before.
+@@ -206,7 +213,8 @@ dpaa_eth_dev_configure(struct rte_eth_dev *dev)
+ 	struct rte_intr_handle *intr_handle;
+ 	uint32_t max_rx_pktlen;
+ 	int speed, duplex;
+-	int ret, rx_status;
++	int ret, rx_status, socket_fd;
++	struct ifreq ifr;
+ 
+ 	PMD_INIT_FUNC_TRACE();
+ 
+@@ -222,6 +230,26 @@ dpaa_eth_dev_configure(struct rte_eth_dev *dev)
+ 				     dpaa_intf->name);
+ 			return -EHOSTDOWN;
+ 		}
++
++		socket_fd = socket(AF_INET, SOCK_DGRAM, IPPROTO_IP);
++		if (socket_fd == -1) {
++			DPAA_PMD_ERR("Cannot open IF socket");
++			return -errno;
++		}
++
++		strncpy(ifr.ifr_name, dpaa_intf->name, IFNAMSIZ - 1);
++
++		if (ioctl(socket_fd, SIOCGIFMTU, &ifr) < 0) {
++			DPAA_PMD_ERR("Cannot get interface mtu");
++			close(socket_fd);
++			return -errno;
++		}
++
++		close(socket_fd);
++		DPAA_PMD_INFO("Using kernel configured mtu size(%u)",
++			     ifr.ifr_mtu);
++
++		eth_conf->rxmode.mtu = ifr.ifr_mtu;
+ 	}
+ 
+ 	/* Rx offloads which are enabled by default */
+@@ -249,7 +277,8 @@ dpaa_eth_dev_configure(struct rte_eth_dev *dev)
+ 		max_rx_pktlen = DPAA_MAX_RX_PKT_LEN;
+ 	}
+ 
+-	fman_if_set_maxfrm(dev->process_private, max_rx_pktlen);
++	if (!fif->is_shared_mac)
++		fman_if_set_maxfrm(dev->process_private, max_rx_pktlen);
+ 
+ 	if (rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER) {
+ 		DPAA_PMD_DEBUG("enabling scatter mode");
+@@ -363,7 +392,8 @@ dpaa_supported_ptypes_get(struct rte_eth_dev *dev)
+ 		RTE_PTYPE_L4_TCP,
+ 		RTE_PTYPE_L4_UDP,
+ 		RTE_PTYPE_L4_SCTP,
+-		RTE_PTYPE_TUNNEL_ESP
++		RTE_PTYPE_TUNNEL_ESP,
++		RTE_PTYPE_UNKNOWN
+ 	};
+ 
+ 	PMD_INIT_FUNC_TRACE();
+diff --git a/dpdk/drivers/net/e1000/base/e1000_base.c b/dpdk/drivers/net/e1000/base/e1000_base.c
+index ab73e1e59e..3ec32e7240 100644
+--- a/dpdk/drivers/net/e1000/base/e1000_base.c
++++ b/dpdk/drivers/net/e1000/base/e1000_base.c
+@@ -107,7 +107,7 @@ void e1000_power_down_phy_copper_base(struct e1000_hw *hw)
+ 		return;
+ 
+ 	/* If the management interface is not enabled, then power down */
+-	if (phy->ops.check_reset_block(hw))
++	if (!(e1000_enable_mng_pass_thru(hw) || phy->ops.check_reset_block(hw)))
+ 		e1000_power_down_phy_copper(hw);
+ }
+ 
+diff --git a/dpdk/drivers/net/ena/base/ena_com.c b/dpdk/drivers/net/ena/base/ena_com.c
+index 6953a1fa33..2f438597e6 100644
+--- a/dpdk/drivers/net/ena/base/ena_com.c
++++ b/dpdk/drivers/net/ena/base/ena_com.c
+@@ -34,6 +34,8 @@
+ 
+ #define ENA_REGS_ADMIN_INTR_MASK 1
+ 
++#define ENA_MAX_BACKOFF_DELAY_EXP 16U
++
+ #define ENA_MIN_ADMIN_POLL_US 100
+ 
+ #define ENA_MAX_ADMIN_POLL_US 5000
+@@ -177,6 +179,7 @@ static int ena_com_admin_init_aenq(struct ena_com_dev *ena_dev,
+ static void comp_ctxt_release(struct ena_com_admin_queue *queue,
+ 				     struct ena_comp_ctx *comp_ctx)
+ {
++	comp_ctx->user_cqe = NULL;
+ 	comp_ctx->occupied = false;
+ 	ATOMIC32_DEC(&queue->outstanding_cmds);
+ }
+@@ -470,6 +473,9 @@ static void ena_com_handle_single_admin_completion(struct ena_com_admin_queue *a
+ 		return;
+ 	}
+ 
++	if (!comp_ctx->occupied)
++		return;
++
+ 	comp_ctx->status = ENA_CMD_COMPLETED;
+ 	comp_ctx->comp_status = cqe->acq_common_descriptor.status;
+ 
+@@ -545,8 +551,9 @@ static int ena_com_comp_status_to_errno(struct ena_com_admin_queue *admin_queue,
+ 
+ static void ena_delay_exponential_backoff_us(u32 exp, u32 delay_us)
+ {
++	exp = ENA_MIN32(ENA_MAX_BACKOFF_DELAY_EXP, exp);
+ 	delay_us = ENA_MAX32(ENA_MIN_ADMIN_POLL_US, delay_us);
+-	delay_us = ENA_MIN32(delay_us * (1U << exp), ENA_MAX_ADMIN_POLL_US);
++	delay_us = ENA_MIN32(ENA_MAX_ADMIN_POLL_US, delay_us * (1U << exp));
+ 	ENA_USLEEP(delay_us);
+ }
+ 
+@@ -3134,16 +3141,18 @@ int ena_com_allocate_debug_area(struct ena_com_dev *ena_dev,
+ int ena_com_allocate_customer_metrics_buffer(struct ena_com_dev *ena_dev)
+ {
+ 	struct ena_customer_metrics *customer_metrics = &ena_dev->customer_metrics;
++	customer_metrics->buffer_len = ENA_CUSTOMER_METRICS_BUFFER_SIZE;
++	customer_metrics->buffer_virt_addr = NULL;
+ 
+ 	ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
+ 			       customer_metrics->buffer_len,
+ 			       customer_metrics->buffer_virt_addr,
+ 			       customer_metrics->buffer_dma_addr,
+ 			       customer_metrics->buffer_dma_handle);
+-	if (unlikely(customer_metrics->buffer_virt_addr == NULL))
++	if (unlikely(customer_metrics->buffer_virt_addr == NULL)) {
++		customer_metrics->buffer_len = 0;
+ 		return ENA_COM_NO_MEM;
+-
+-	customer_metrics->buffer_len = ENA_CUSTOMER_METRICS_BUFFER_SIZE;
++	}
+ 
+ 	return 0;
+ }
+diff --git a/dpdk/drivers/net/ena/ena_ethdev.c b/dpdk/drivers/net/ena/ena_ethdev.c
+index dc846d2e84..f3962aa76e 100644
+--- a/dpdk/drivers/net/ena/ena_ethdev.c
++++ b/dpdk/drivers/net/ena/ena_ethdev.c
+@@ -37,10 +37,10 @@
+ #define ENA_MIN_RING_DESC	128
+ 
+ /*
+- * We should try to keep ENA_CLEANUP_BUF_SIZE lower than
++ * We should try to keep ENA_CLEANUP_BUF_THRESH lower than
+  * RTE_MEMPOOL_CACHE_MAX_SIZE, so we can fit this in mempool local cache.
+  */
+-#define ENA_CLEANUP_BUF_SIZE	256
++#define ENA_CLEANUP_BUF_THRESH	256
+ 
+ #define ENA_PTYPE_HAS_HASH	(RTE_PTYPE_L4_TCP | RTE_PTYPE_L4_UDP)
+ 
+@@ -648,18 +648,13 @@ static inline void ena_rx_mbuf_prepare(struct ena_ring *rx_ring,
+ 		packet_type |= RTE_PTYPE_L3_IPV6;
+ 	}
+ 
+-	if (!ena_rx_ctx->l4_csum_checked || ena_rx_ctx->frag) {
++	if (!ena_rx_ctx->l4_csum_checked || ena_rx_ctx->frag ||
++		!(packet_type & (RTE_PTYPE_L4_TCP | RTE_PTYPE_L4_UDP))) {
+ 		ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_UNKNOWN;
+ 	} else {
+ 		if (unlikely(ena_rx_ctx->l4_csum_err)) {
+ 			++rx_stats->l4_csum_bad;
+-			/*
+-			 * For the L4 Rx checksum offload the HW may indicate
+-			 * bad checksum although it's valid. Because of that,
+-			 * we're setting the UNKNOWN flag to let the app
+-			 * re-verify the checksum.
+-			 */
+-			ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_UNKNOWN;
++			ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
+ 		} else {
+ 			++rx_stats->l4_csum_good;
+ 			ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
+@@ -797,7 +792,7 @@ static void ena_config_host_info(struct ena_com_dev *ena_dev)
+ 
+ 	rc = ena_com_set_host_attributes(ena_dev);
+ 	if (rc) {
+-		if (rc == -ENA_COM_UNSUPPORTED)
++		if (rc == ENA_COM_UNSUPPORTED)
+ 			PMD_DRV_LOG(WARNING, "Cannot set host attributes\n");
+ 		else
+ 			PMD_DRV_LOG(ERR, "Cannot set host attributes\n");
+@@ -841,7 +836,7 @@ static void ena_config_debug_area(struct ena_adapter *adapter)
+ 
+ 	rc = ena_com_set_host_attributes(&adapter->ena_dev);
+ 	if (rc) {
+-		if (rc == -ENA_COM_UNSUPPORTED)
++		if (rc == ENA_COM_UNSUPPORTED)
+ 			PMD_DRV_LOG(WARNING, "Cannot set host attributes\n");
+ 		else
+ 			PMD_DRV_LOG(ERR, "Cannot set host attributes\n");
+@@ -3105,33 +3100,12 @@ static int ena_xmit_mbuf(struct ena_ring *tx_ring, struct rte_mbuf *mbuf)
+ 	return 0;
+ }
+ 
+-static __rte_always_inline size_t
+-ena_tx_cleanup_mbuf_fast(struct rte_mbuf **mbufs_to_clean,
+-			 struct rte_mbuf *mbuf,
+-			 size_t mbuf_cnt,
+-			 size_t buf_size)
+-{
+-	struct rte_mbuf *m_next;
+-
+-	while (mbuf != NULL) {
+-		m_next = mbuf->next;
+-		mbufs_to_clean[mbuf_cnt++] = mbuf;
+-		if (mbuf_cnt == buf_size) {
+-			rte_mempool_put_bulk(mbufs_to_clean[0]->pool, (void **)mbufs_to_clean,
+-				(unsigned int)mbuf_cnt);
+-			mbuf_cnt = 0;
+-		}
+-		mbuf = m_next;
+-	}
+-
+-	return mbuf_cnt;
+-}
+-
+ static int ena_tx_cleanup(void *txp, uint32_t free_pkt_cnt)
+ {
+-	struct rte_mbuf *mbufs_to_clean[ENA_CLEANUP_BUF_SIZE];
++	struct rte_mbuf *pkts_to_clean[ENA_CLEANUP_BUF_THRESH];
+ 	struct ena_ring *tx_ring = (struct ena_ring *)txp;
+ 	size_t mbuf_cnt = 0;
++	size_t pkt_cnt = 0;
+ 	unsigned int total_tx_descs = 0;
+ 	unsigned int total_tx_pkts = 0;
+ 	uint16_t cleanup_budget;
+@@ -3162,8 +3136,13 @@ static int ena_tx_cleanup(void *txp, uint32_t free_pkt_cnt)
+ 
+ 		mbuf = tx_info->mbuf;
+ 		if (fast_free) {
+-			mbuf_cnt = ena_tx_cleanup_mbuf_fast(mbufs_to_clean, mbuf, mbuf_cnt,
+-				ENA_CLEANUP_BUF_SIZE);
++			pkts_to_clean[pkt_cnt++] = mbuf;
++			mbuf_cnt += mbuf->nb_segs;
++			if (mbuf_cnt >= ENA_CLEANUP_BUF_THRESH) {
++				rte_pktmbuf_free_bulk(pkts_to_clean, pkt_cnt);
++				mbuf_cnt = 0;
++				pkt_cnt = 0;
++			}
+ 		} else {
+ 			rte_pktmbuf_free(mbuf);
+ 		}
+@@ -3186,8 +3165,7 @@ static int ena_tx_cleanup(void *txp, uint32_t free_pkt_cnt)
+ 	}
+ 
+ 	if (mbuf_cnt != 0)
+-		rte_mempool_put_bulk(mbufs_to_clean[0]->pool,
+-			(void **)mbufs_to_clean, mbuf_cnt);
++		rte_pktmbuf_free_bulk(pkts_to_clean, pkt_cnt);
+ 
+ 	/* Notify completion handler that full cleanup was performed */
+ 	if (free_pkt_cnt == 0 || total_tx_pkts < cleanup_budget)
+diff --git a/dpdk/drivers/net/failsafe/failsafe_args.c b/dpdk/drivers/net/failsafe/failsafe_args.c
+index b203e02d9a..3b867437d7 100644
+--- a/dpdk/drivers/net/failsafe/failsafe_args.c
++++ b/dpdk/drivers/net/failsafe/failsafe_args.c
+@@ -248,7 +248,7 @@ fs_parse_device_param(struct rte_eth_dev *dev, const char *param,
+ 			goto free_args;
+ 	} else {
+ 		ERROR("Unrecognized device type: %.*s", (int)b, param);
+-		return -EINVAL;
++		ret = -EINVAL;
+ 	}
+ free_args:
+ 	free(args);
+diff --git a/dpdk/drivers/net/fm10k/fm10k_ethdev.c b/dpdk/drivers/net/fm10k/fm10k_ethdev.c
+index 4d3c4c10cf..cc2012786d 100644
+--- a/dpdk/drivers/net/fm10k/fm10k_ethdev.c
++++ b/dpdk/drivers/net/fm10k/fm10k_ethdev.c
+@@ -3057,7 +3057,7 @@ eth_fm10k_dev_init(struct rte_eth_dev *dev)
+ 	struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ 	struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev);
+ 	struct rte_intr_handle *intr_handle = pdev->intr_handle;
+-	int diag, i;
++	int diag, i, ret;
+ 	struct fm10k_macvlan_filter_info *macvlan;
+ 
+ 	PMD_INIT_FUNC_TRACE();
+@@ -3146,21 +3146,24 @@ eth_fm10k_dev_init(struct rte_eth_dev *dev)
+ 	diag = fm10k_stats_reset(dev);
+ 	if (diag != 0) {
+ 		PMD_INIT_LOG(ERR, "Stats reset failed: %d", diag);
+-		return diag;
++		ret = diag;
++		goto err_stat;
+ 	}
+ 
+ 	/* Reset the hw */
+ 	diag = fm10k_reset_hw(hw);
+ 	if (diag != FM10K_SUCCESS) {
+ 		PMD_INIT_LOG(ERR, "Hardware reset failed: %d", diag);
+-		return -EIO;
++		ret = -EIO;
++		goto err_reset_hw;
+ 	}
+ 
+ 	/* Setup mailbox service */
+ 	diag = fm10k_setup_mbx_service(hw);
+ 	if (diag != FM10K_SUCCESS) {
+ 		PMD_INIT_LOG(ERR, "Failed to setup mailbox: %d", diag);
+-		return -EIO;
++		ret = -EIO;
++		goto err_mbx;
+ 	}
+ 
+ 	/*PF/VF has different interrupt handling mechanism */
+@@ -3199,7 +3202,8 @@ eth_fm10k_dev_init(struct rte_eth_dev *dev)
+ 
+ 		if (switch_ready == false) {
+ 			PMD_INIT_LOG(ERR, "switch is not ready");
+-			return -1;
++			ret = -1;
++			goto err_switch_ready;
+ 		}
+ 	}
+ 
+@@ -3234,7 +3238,8 @@ eth_fm10k_dev_init(struct rte_eth_dev *dev)
+ 
+ 		if (!hw->mac.default_vid) {
+ 			PMD_INIT_LOG(ERR, "default VID is not ready");
+-			return -1;
++			ret = -1;
++			goto err_vid;
+ 		}
+ 	}
+ 
+@@ -3243,6 +3248,28 @@ eth_fm10k_dev_init(struct rte_eth_dev *dev)
+ 		MAIN_VSI_POOL_NUMBER);
+ 
+ 	return 0;
++
++err_vid:
++err_switch_ready:
++	rte_intr_disable(intr_handle);
++
++	if (hw->mac.type == fm10k_mac_pf) {
++		fm10k_dev_disable_intr_pf(dev);
++		rte_intr_callback_unregister(intr_handle,
++			fm10k_dev_interrupt_handler_pf, (void *)dev);
++	} else {
++		fm10k_dev_disable_intr_vf(dev);
++		rte_intr_callback_unregister(intr_handle,
++			fm10k_dev_interrupt_handler_vf, (void *)dev);
++	}
++
++err_mbx:
++err_reset_hw:
++err_stat:
++	rte_free(dev->data->mac_addrs);
++	dev->data->mac_addrs = NULL;
++
++	return ret;
+ }
+ 
+ static int
+diff --git a/dpdk/drivers/net/gve/gve_rx_dqo.c b/dpdk/drivers/net/gve/gve_rx_dqo.c
+index 7c7a8c48d0..a56cdbf11b 100644
+--- a/dpdk/drivers/net/gve/gve_rx_dqo.c
++++ b/dpdk/drivers/net/gve/gve_rx_dqo.c
+@@ -127,7 +127,7 @@ gve_rx_burst_dqo(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+ 		rxm->ol_flags = 0;
+ 
+ 		rxm->ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
+-		rxm->hash.rss = rte_be_to_cpu_32(rx_desc->hash);
++		rxm->hash.rss = rte_le_to_cpu_32(rx_desc->hash);
+ 
+ 		rx_pkts[nb_rx++] = rxm;
+ 		bytes += pkt_len;
+diff --git a/dpdk/drivers/net/gve/gve_tx.c b/dpdk/drivers/net/gve/gve_tx.c
+index 2e0d001109..bb21b90635 100644
+--- a/dpdk/drivers/net/gve/gve_tx.c
++++ b/dpdk/drivers/net/gve/gve_tx.c
+@@ -681,7 +681,7 @@ gve_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+ 
+ 	rte_write32(rte_cpu_to_be_32(GVE_IRQ_MASK), txq->ntfy_addr);
+ 
+-	dev->data->rx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
++	dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
+ 
+ 	return 0;
+ }
+diff --git a/dpdk/drivers/net/gve/gve_tx_dqo.c b/dpdk/drivers/net/gve/gve_tx_dqo.c
+index 16101de84f..97d9c6549b 100644
+--- a/dpdk/drivers/net/gve/gve_tx_dqo.c
++++ b/dpdk/drivers/net/gve/gve_tx_dqo.c
+@@ -13,7 +13,7 @@ gve_tx_clean_dqo(struct gve_tx_queue *txq)
+ 	struct gve_tx_compl_desc *compl_desc;
+ 	struct gve_tx_queue *aim_txq;
+ 	uint16_t nb_desc_clean;
+-	struct rte_mbuf *txe;
++	struct rte_mbuf *txe, *txe_next;
+ 	uint16_t compl_tag;
+ 	uint16_t next;
+ 
+@@ -43,10 +43,15 @@ gve_tx_clean_dqo(struct gve_tx_queue *txq)
+ 		PMD_DRV_LOG(DEBUG, "GVE_COMPL_TYPE_DQO_REINJECTION !!!");
+ 		/* FALLTHROUGH */
+ 	case GVE_COMPL_TYPE_DQO_PKT:
++		/* free all segments. */
+ 		txe = aim_txq->sw_ring[compl_tag];
+-		if (txe != NULL) {
++		while (txe != NULL) {
++			txe_next = txe->next;
+ 			rte_pktmbuf_free_seg(txe);
+-			txe = NULL;
++			if (aim_txq->sw_ring[compl_tag] == txe)
++				aim_txq->sw_ring[compl_tag] = NULL;
++			txe = txe_next;
++			compl_tag = (compl_tag + 1) & (aim_txq->sw_size - 1);
+ 		}
+ 		break;
+ 	case GVE_COMPL_TYPE_DQO_MISS:
+@@ -83,6 +88,7 @@ gve_tx_burst_dqo(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+ 	uint16_t tx_id;
+ 	uint16_t sw_id;
+ 	uint64_t bytes;
++	uint16_t first_sw_id;
+ 
+ 	sw_ring = txq->sw_ring;
+ 	txr = txq->tx_ring;
+@@ -107,23 +113,25 @@ gve_tx_burst_dqo(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+ 
+ 		ol_flags = tx_pkt->ol_flags;
+ 		nb_used = tx_pkt->nb_segs;
+-
++		first_sw_id = sw_id;
+ 		do {
+-			txd = &txr[tx_id];
++			if (sw_ring[sw_id] != NULL)
++				PMD_DRV_LOG(DEBUG, "Overwriting an entry in sw_ring");
+ 
++			txd = &txr[tx_id];
+ 			sw_ring[sw_id] = tx_pkt;
+ 
+ 			/* fill Tx descriptor */
+ 			txd->pkt.buf_addr = rte_cpu_to_le_64(rte_mbuf_data_iova(tx_pkt));
+ 			txd->pkt.dtype = GVE_TX_PKT_DESC_DTYPE_DQO;
+-			txd->pkt.compl_tag = rte_cpu_to_le_16(sw_id);
++			txd->pkt.compl_tag = rte_cpu_to_le_16(first_sw_id);
+ 			txd->pkt.buf_size = RTE_MIN(tx_pkt->data_len, GVE_TX_MAX_BUF_SIZE_DQO);
+ 
+ 			/* size of desc_ring and sw_ring could be different */
+ 			tx_id = (tx_id + 1) & mask;
+ 			sw_id = (sw_id + 1) & sw_mask;
+ 
+-			bytes += tx_pkt->pkt_len;
++			bytes += tx_pkt->data_len;
+ 			tx_pkt = tx_pkt->next;
+ 		} while (tx_pkt);
+ 
+@@ -384,7 +392,7 @@ gve_tx_queue_start_dqo(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+ 
+ 	rte_write32(rte_cpu_to_be_32(GVE_IRQ_MASK), txq->ntfy_addr);
+ 
+-	dev->data->rx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
++	dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
+ 
+ 	return 0;
+ }
+diff --git a/dpdk/drivers/net/hns3/hns3_cmd.c b/dpdk/drivers/net/hns3/hns3_cmd.c
+index 2c1664485b..001ff49b36 100644
+--- a/dpdk/drivers/net/hns3/hns3_cmd.c
++++ b/dpdk/drivers/net/hns3/hns3_cmd.c
+@@ -545,7 +545,9 @@ hns3_set_dcb_capability(struct hns3_hw *hw)
+ 	if (device_id == HNS3_DEV_ID_25GE_RDMA ||
+ 	    device_id == HNS3_DEV_ID_50GE_RDMA ||
+ 	    device_id == HNS3_DEV_ID_100G_RDMA_MACSEC ||
+-	    device_id == HNS3_DEV_ID_200G_RDMA)
++	    device_id == HNS3_DEV_ID_200G_RDMA ||
++	    device_id == HNS3_DEV_ID_100G_ROH ||
++	    device_id == HNS3_DEV_ID_200G_ROH)
+ 		hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_DCB_B, 1);
+ }
+ 
+diff --git a/dpdk/drivers/net/hns3/hns3_common.c b/dpdk/drivers/net/hns3/hns3_common.c
+index 8f224aa00c..5e6cdfdaa0 100644
+--- a/dpdk/drivers/net/hns3/hns3_common.c
++++ b/dpdk/drivers/net/hns3/hns3_common.c
+@@ -85,7 +85,7 @@ hns3_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info)
+ 				 RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE |
+ 				 RTE_ETH_TX_OFFLOAD_VLAN_INSERT);
+ 
+-	if (!hw->port_base_vlan_cfg.state)
++	if (!hns->is_vf && !hw->port_base_vlan_cfg.state)
+ 		info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_QINQ_INSERT;
+ 
+ 	if (hns3_dev_get_support(hw, OUTER_UDP_CKSUM))
+@@ -224,7 +224,7 @@ hns3_parse_dev_caps_mask(const char *key, const char *value, void *extra_args)
+ static int
+ hns3_parse_mbx_time_limit(const char *key, const char *value, void *extra_args)
+ {
+-	uint32_t val;
++	uint64_t val;
+ 
+ 	RTE_SET_USED(key);
+ 
+diff --git a/dpdk/drivers/net/hns3/hns3_dcb.c b/dpdk/drivers/net/hns3/hns3_dcb.c
+index 2831d3dc62..915e4eb768 100644
+--- a/dpdk/drivers/net/hns3/hns3_dcb.c
++++ b/dpdk/drivers/net/hns3/hns3_dcb.c
+@@ -1499,7 +1499,6 @@ hns3_dcb_info_update(struct hns3_adapter *hns, uint8_t num_tc)
+ static int
+ hns3_dcb_hw_configure(struct hns3_adapter *hns)
+ {
+-	struct rte_eth_dcb_rx_conf *dcb_rx_conf;
+ 	struct hns3_pf *pf = &hns->pf;
+ 	struct hns3_hw *hw = &hns->hw;
+ 	enum hns3_fc_status fc_status = hw->current_fc_status;
+@@ -1519,12 +1518,8 @@ hns3_dcb_hw_configure(struct hns3_adapter *hns)
+ 	}
+ 
+ 	if (hw->data->dev_conf.dcb_capability_en & RTE_ETH_DCB_PFC_SUPPORT) {
+-		dcb_rx_conf = &hw->data->dev_conf.rx_adv_conf.dcb_rx_conf;
+-		if (dcb_rx_conf->nb_tcs == 0)
+-			hw->dcb_info.pfc_en = 1; /* tc0 only */
+-		else
+-			hw->dcb_info.pfc_en =
+-			RTE_LEN2MASK((uint8_t)dcb_rx_conf->nb_tcs, uint8_t);
++		hw->dcb_info.pfc_en =
++			RTE_LEN2MASK((uint8_t)HNS3_MAX_USER_PRIO, uint8_t);
+ 
+ 		hw->dcb_info.hw_pfc_map =
+ 				hns3_dcb_undrop_tc_map(hw, hw->dcb_info.pfc_en);
+diff --git a/dpdk/drivers/net/hns3/hns3_ethdev.c b/dpdk/drivers/net/hns3/hns3_ethdev.c
+index ae81368f68..6e72730d75 100644
+--- a/dpdk/drivers/net/hns3/hns3_ethdev.c
++++ b/dpdk/drivers/net/hns3/hns3_ethdev.c
+@@ -380,7 +380,7 @@ hns3_interrupt_handler(void *param)
+ 		hns3_warn(hw, "received reset interrupt");
+ 		hns3_schedule_reset(hns);
+ 	} else if (event_cause == HNS3_VECTOR0_EVENT_MBX) {
+-		hns3_dev_handle_mbx_msg(hw);
++		hns3pf_handle_mbx_msg(hw);
+ 	} else if (event_cause != HNS3_VECTOR0_EVENT_PTP) {
+ 		hns3_warn(hw, "received unknown event: vector0_int_stat:0x%x "
+ 			  "ras_int_stat:0x%x cmdq_int_stat:0x%x",
+@@ -2738,6 +2738,7 @@ hns3_get_capability(struct hns3_hw *hw)
+ 		hw->rss_info.ipv6_sctp_offload_supported = false;
+ 		hw->udp_cksum_mode = HNS3_SPECIAL_PORT_SW_CKSUM_MODE;
+ 		pf->support_multi_tc_pause = false;
++		hw->rx_dma_addr_align = HNS3_RX_DMA_ADDR_ALIGN_64;
+ 		return 0;
+ 	}
+ 
+@@ -2758,6 +2759,7 @@ hns3_get_capability(struct hns3_hw *hw)
+ 	hw->rss_info.ipv6_sctp_offload_supported = true;
+ 	hw->udp_cksum_mode = HNS3_SPECIAL_PORT_HW_CKSUM_MODE;
+ 	pf->support_multi_tc_pause = true;
++	hw->rx_dma_addr_align = HNS3_RX_DMA_ADDR_ALIGN_128;
+ 
+ 	return 0;
+ }
+@@ -5545,28 +5547,14 @@ is_pf_reset_done(struct hns3_hw *hw)
+ static enum hns3_reset_level
+ hns3_detect_reset_event(struct hns3_hw *hw)
+ {
+-	struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
+ 	enum hns3_reset_level new_req = HNS3_NONE_RESET;
+-	enum hns3_reset_level last_req;
+ 	uint32_t vector0_intr_state;
+ 
+-	last_req = hns3_get_reset_level(hns, &hw->reset.pending);
+ 	vector0_intr_state = hns3_read_dev(hw, HNS3_VECTOR0_OTHER_INT_STS_REG);
+-	if (BIT(HNS3_VECTOR0_IMPRESET_INT_B) & vector0_intr_state) {
+-		__atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
++	if (BIT(HNS3_VECTOR0_IMPRESET_INT_B) & vector0_intr_state)
+ 		new_req = HNS3_IMP_RESET;
+-	} else if (BIT(HNS3_VECTOR0_GLOBALRESET_INT_B) & vector0_intr_state) {
+-		__atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
++	else if (BIT(HNS3_VECTOR0_GLOBALRESET_INT_B) & vector0_intr_state)
+ 		new_req = HNS3_GLOBAL_RESET;
+-	}
+-
+-	if (new_req == HNS3_NONE_RESET)
+-		return HNS3_NONE_RESET;
+-
+-	if (last_req == HNS3_NONE_RESET || last_req < new_req) {
+-		hns3_schedule_delayed_reset(hns);
+-		hns3_warn(hw, "High level reset detected, delay do reset");
+-	}
+ 
+ 	return new_req;
+ }
+@@ -5586,10 +5574,14 @@ hns3_is_reset_pending(struct hns3_adapter *hns)
+ 		return false;
+ 
+ 	new_req = hns3_detect_reset_event(hw);
++	if (new_req == HNS3_NONE_RESET)
++		return false;
++
+ 	last_req = hns3_get_reset_level(hns, &hw->reset.pending);
+-	if (last_req != HNS3_NONE_RESET && new_req != HNS3_NONE_RESET &&
+-	    new_req < last_req) {
+-		hns3_warn(hw, "High level reset %d is pending", last_req);
++	if (last_req == HNS3_NONE_RESET || last_req < new_req) {
++		__atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
++		hns3_schedule_delayed_reset(hns);
++		hns3_warn(hw, "High level reset detected, delay do reset");
+ 		return true;
+ 	}
+ 	last_req = hns3_get_reset_level(hns, &hw->reset.request);
+@@ -6054,7 +6046,7 @@ hns3_fec_get_internal(struct hns3_hw *hw, uint32_t *fec_capa)
+ {
+ 	struct hns3_sfp_info_cmd *resp;
+ 	uint32_t tmp_fec_capa;
+-	uint8_t auto_state;
++	uint8_t auto_state = 0;
+ 	struct hns3_cmd_desc desc;
+ 	int ret;
+ 
+@@ -6658,6 +6650,8 @@ static const struct rte_pci_id pci_id_hns3_map[] = {
+ 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_50GE_RDMA) },
+ 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_100G_RDMA_MACSEC) },
+ 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_200G_RDMA) },
++	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_100G_ROH) },
++	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_200G_ROH) },
+ 	{ .vendor_id = 0, }, /* sentinel */
+ };
+ 
+diff --git a/dpdk/drivers/net/hns3/hns3_ethdev.h b/dpdk/drivers/net/hns3/hns3_ethdev.h
+index 12d8299def..c190d5109b 100644
+--- a/dpdk/drivers/net/hns3/hns3_ethdev.h
++++ b/dpdk/drivers/net/hns3/hns3_ethdev.h
+@@ -28,7 +28,9 @@
+ #define HNS3_DEV_ID_25GE_RDMA			0xA222
+ #define HNS3_DEV_ID_50GE_RDMA			0xA224
+ #define HNS3_DEV_ID_100G_RDMA_MACSEC		0xA226
++#define HNS3_DEV_ID_100G_ROH	                0xA227
+ #define HNS3_DEV_ID_200G_RDMA			0xA228
++#define HNS3_DEV_ID_200G_ROH	                0xA22C
+ #define HNS3_DEV_ID_100G_VF			0xA22E
+ #define HNS3_DEV_ID_100G_RDMA_PFC_VF		0xA22F
+ 
+@@ -485,6 +487,9 @@ struct hns3_queue_intr {
+ #define HNS3_PKTS_DROP_STATS_MODE1		0
+ #define HNS3_PKTS_DROP_STATS_MODE2		1
+ 
++#define HNS3_RX_DMA_ADDR_ALIGN_128	128
++#define HNS3_RX_DMA_ADDR_ALIGN_64	64
++
+ struct hns3_hw {
+ 	struct rte_eth_dev_data *data;
+ 	void *io_base;
+@@ -552,6 +557,11 @@ struct hns3_hw {
+ 	 * direction.
+ 	 */
+ 	uint8_t min_tx_pkt_len;
++	/*
++	 * The required alignment of the DMA address of the RX buffer.
++	 * See HNS3_RX_DMA_ADDR_ALIGN_XXX for available values.
++	 */
++	uint16_t rx_dma_addr_align;
+ 
+ 	struct hns3_queue_intr intr;
+ 	/*
+diff --git a/dpdk/drivers/net/hns3/hns3_ethdev_vf.c b/dpdk/drivers/net/hns3/hns3_ethdev_vf.c
+index 916cc0fb1b..d4d691ad86 100644
+--- a/dpdk/drivers/net/hns3/hns3_ethdev_vf.c
++++ b/dpdk/drivers/net/hns3/hns3_ethdev_vf.c
+@@ -91,11 +91,13 @@ hns3vf_add_uc_mac_addr(struct hns3_hw *hw, struct rte_ether_addr *mac_addr)
+ {
+ 	/* mac address was checked by upper level interface */
+ 	char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
++	struct hns3_vf_to_pf_msg req;
+ 	int ret;
+ 
+-	ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_UNICAST,
+-				HNS3_MBX_MAC_VLAN_UC_ADD, mac_addr->addr_bytes,
+-				RTE_ETHER_ADDR_LEN, false, NULL, 0);
++	hns3vf_mbx_setup(&req, HNS3_MBX_SET_UNICAST,
++			 HNS3_MBX_MAC_VLAN_UC_ADD);
++	memcpy(req.data, mac_addr->addr_bytes, RTE_ETHER_ADDR_LEN);
++	ret = hns3vf_mbx_send(hw, &req, false, NULL, 0);
+ 	if (ret) {
+ 		hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
+ 				      mac_addr);
+@@ -110,12 +112,13 @@ hns3vf_remove_uc_mac_addr(struct hns3_hw *hw, struct rte_ether_addr *mac_addr)
+ {
+ 	/* mac address was checked by upper level interface */
+ 	char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
++	struct hns3_vf_to_pf_msg req;
+ 	int ret;
+ 
+-	ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_UNICAST,
+-				HNS3_MBX_MAC_VLAN_UC_REMOVE,
+-				mac_addr->addr_bytes, RTE_ETHER_ADDR_LEN,
+-				false, NULL, 0);
++	hns3vf_mbx_setup(&req, HNS3_MBX_SET_UNICAST,
++			 HNS3_MBX_MAC_VLAN_UC_REMOVE);
++	memcpy(req.data, mac_addr->addr_bytes, RTE_ETHER_ADDR_LEN);
++	ret = hns3vf_mbx_send(hw, &req, false, NULL, 0);
+ 	if (ret) {
+ 		hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
+ 				       mac_addr);
+@@ -134,6 +137,7 @@ hns3vf_set_default_mac_addr(struct rte_eth_dev *dev,
+ 	struct rte_ether_addr *old_addr;
+ 	uint8_t addr_bytes[HNS3_TWO_ETHER_ADDR_LEN]; /* for 2 MAC addresses */
+ 	char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
++	struct hns3_vf_to_pf_msg req;
+ 	int ret;
+ 
+ 	/*
+@@ -146,9 +150,10 @@ hns3vf_set_default_mac_addr(struct rte_eth_dev *dev,
+ 	memcpy(&addr_bytes[RTE_ETHER_ADDR_LEN], old_addr->addr_bytes,
+ 	       RTE_ETHER_ADDR_LEN);
+ 
+-	ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_UNICAST,
+-				HNS3_MBX_MAC_VLAN_UC_MODIFY, addr_bytes,
+-				HNS3_TWO_ETHER_ADDR_LEN, true, NULL, 0);
++	hns3vf_mbx_setup(&req, HNS3_MBX_SET_UNICAST,
++			 HNS3_MBX_MAC_VLAN_UC_MODIFY);
++	memcpy(req.data, addr_bytes, HNS3_TWO_ETHER_ADDR_LEN);
++	ret = hns3vf_mbx_send(hw, &req, true, NULL, 0);
+ 	if (ret) {
+ 		/*
+ 		 * The hns3 VF PMD depends on the hns3 PF kernel ethdev
+@@ -185,12 +190,13 @@ hns3vf_add_mc_mac_addr(struct hns3_hw *hw,
+ 		       struct rte_ether_addr *mac_addr)
+ {
+ 	char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
++	struct hns3_vf_to_pf_msg req;
+ 	int ret;
+ 
+-	ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_MULTICAST,
+-				HNS3_MBX_MAC_VLAN_MC_ADD,
+-				mac_addr->addr_bytes, RTE_ETHER_ADDR_LEN, false,
+-				NULL, 0);
++	hns3vf_mbx_setup(&req, HNS3_MBX_SET_MULTICAST,
++			 HNS3_MBX_MAC_VLAN_MC_ADD);
++	memcpy(req.data, mac_addr->addr_bytes, RTE_ETHER_ADDR_LEN);
++	ret = hns3vf_mbx_send(hw, &req, false, NULL, 0);
+ 	if (ret) {
+ 		hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
+ 				      mac_addr);
+@@ -206,12 +212,13 @@ hns3vf_remove_mc_mac_addr(struct hns3_hw *hw,
+ 			  struct rte_ether_addr *mac_addr)
+ {
+ 	char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
++	struct hns3_vf_to_pf_msg req;
+ 	int ret;
+ 
+-	ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_MULTICAST,
+-				HNS3_MBX_MAC_VLAN_MC_REMOVE,
+-				mac_addr->addr_bytes, RTE_ETHER_ADDR_LEN, false,
+-				NULL, 0);
++	hns3vf_mbx_setup(&req, HNS3_MBX_SET_MULTICAST,
++			 HNS3_MBX_MAC_VLAN_MC_REMOVE);
++	memcpy(req.data, mac_addr->addr_bytes, RTE_ETHER_ADDR_LEN);
++	ret = hns3vf_mbx_send(hw, &req, false, NULL, 0);
+ 	if (ret) {
+ 		hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
+ 				       mac_addr);
+@@ -254,11 +261,12 @@ hns3vf_set_promisc_mode(struct hns3_hw *hw, bool en_bc_pmc,
+ 	 *    the packets with vlan tag in promiscuous mode.
+ 	 */
+ 	hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MBX_VF_TO_PF, false);
+-	req->msg[0] = HNS3_MBX_SET_PROMISC_MODE;
+-	req->msg[1] = en_bc_pmc ? 1 : 0;
+-	req->msg[2] = en_uc_pmc ? 1 : 0;
+-	req->msg[3] = en_mc_pmc ? 1 : 0;
+-	req->msg[4] = hw->promisc_mode == HNS3_LIMIT_PROMISC_MODE ? 1 : 0;
++	req->msg.code = HNS3_MBX_SET_PROMISC_MODE;
++	req->msg.en_bc = en_bc_pmc ? 1 : 0;
++	req->msg.en_uc = en_uc_pmc ? 1 : 0;
++	req->msg.en_mc = en_mc_pmc ? 1 : 0;
++	req->msg.en_limit_promisc =
++		hw->promisc_mode == HNS3_LIMIT_PROMISC_MODE ? 1 : 0;
+ 
+ 	ret = hns3_cmd_send(hw, &desc, 1);
+ 	if (ret)
+@@ -347,30 +355,26 @@ hns3vf_bind_ring_with_vector(struct hns3_hw *hw, uint16_t vector_id,
+ 			     bool mmap, enum hns3_ring_type queue_type,
+ 			     uint16_t queue_id)
+ {
+-	struct hns3_vf_bind_vector_msg bind_msg;
++	struct hns3_vf_to_pf_msg req = {0};
+ 	const char *op_str;
+-	uint16_t code;
+ 	int ret;
+ 
+-	memset(&bind_msg, 0, sizeof(bind_msg));
+-	code = mmap ? HNS3_MBX_MAP_RING_TO_VECTOR :
++	req.code = mmap ? HNS3_MBX_MAP_RING_TO_VECTOR :
+ 		HNS3_MBX_UNMAP_RING_TO_VECTOR;
+-	bind_msg.vector_id = (uint8_t)vector_id;
++	req.vector_id = (uint8_t)vector_id;
++	req.ring_num = 1;
+ 
+ 	if (queue_type == HNS3_RING_TYPE_RX)
+-		bind_msg.param[0].int_gl_index = HNS3_RING_GL_RX;
++		req.ring_param[0].int_gl_index = HNS3_RING_GL_RX;
+ 	else
+-		bind_msg.param[0].int_gl_index = HNS3_RING_GL_TX;
+-
+-	bind_msg.param[0].ring_type = queue_type;
+-	bind_msg.ring_num = 1;
+-	bind_msg.param[0].tqp_index = queue_id;
++		req.ring_param[0].int_gl_index = HNS3_RING_GL_TX;
++	req.ring_param[0].ring_type = queue_type;
++	req.ring_param[0].tqp_index = queue_id;
+ 	op_str = mmap ? "Map" : "Unmap";
+-	ret = hns3_send_mbx_msg(hw, code, 0, (uint8_t *)&bind_msg,
+-				sizeof(bind_msg), false, NULL, 0);
++	ret = hns3vf_mbx_send(hw, &req, false, NULL, 0);
+ 	if (ret)
+-		hns3_err(hw, "%s TQP %u fail, vector_id is %u, ret is %d.",
+-			 op_str, queue_id, bind_msg.vector_id, ret);
++		hns3_err(hw, "%s TQP %u fail, vector_id is %u, ret = %d.",
++			 op_str, queue_id, req.vector_id, ret);
+ 
+ 	return ret;
+ }
+@@ -453,10 +457,12 @@ cfg_err:
+ static int
+ hns3vf_config_mtu(struct hns3_hw *hw, uint16_t mtu)
+ {
++	struct hns3_vf_to_pf_msg req;
+ 	int ret;
+ 
+-	ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_MTU, 0, (const uint8_t *)&mtu,
+-				sizeof(mtu), true, NULL, 0);
++	hns3vf_mbx_setup(&req, HNS3_MBX_SET_MTU, 0);
++	memcpy(req.data, &mtu, sizeof(mtu));
++	ret = hns3vf_mbx_send(hw, &req, true, NULL, 0);
+ 	if (ret)
+ 		hns3_err(hw, "Failed to set mtu (%u) for vf: %d", mtu, ret);
+ 
+@@ -563,13 +569,8 @@ hns3vf_check_event_cause(struct hns3_adapter *hns, uint32_t *clearval)
+ 		val = hns3_read_dev(hw, HNS3_VF_RST_ING);
+ 		hns3_write_dev(hw, HNS3_VF_RST_ING, val | HNS3_VF_RST_ING_BIT);
+ 		val = cmdq_stat_reg & ~BIT(HNS3_VECTOR0_RST_INT_B);
+-		if (clearval) {
+-			hw->reset.stats.global_cnt++;
+-			hns3_warn(hw, "Global reset detected, clear reset status");
+-		} else {
+-			hns3_schedule_delayed_reset(hns);
+-			hns3_warn(hw, "Global reset detected, don't clear reset status");
+-		}
++		hw->reset.stats.global_cnt++;
++		hns3_warn(hw, "Global reset detected, clear reset status");
+ 
+ 		ret = HNS3VF_VECTOR0_EVENT_RST;
+ 		goto out;
+@@ -584,9 +585,9 @@ hns3vf_check_event_cause(struct hns3_adapter *hns, uint32_t *clearval)
+ 
+ 	val = 0;
+ 	ret = HNS3VF_VECTOR0_EVENT_OTHER;
++
+ out:
+-	if (clearval)
+-		*clearval = val;
++	*clearval = val;
+ 	return ret;
+ }
+ 
+@@ -612,7 +613,7 @@ hns3vf_interrupt_handler(void *param)
+ 		hns3_schedule_reset(hns);
+ 		break;
+ 	case HNS3VF_VECTOR0_EVENT_MBX:
+-		hns3_dev_handle_mbx_msg(hw);
++		hns3vf_handle_mbx_msg(hw);
+ 		break;
+ 	default:
+ 		break;
+@@ -647,12 +648,13 @@ hns3vf_get_push_lsc_cap(struct hns3_hw *hw)
+ 	uint16_t val = HNS3_PF_PUSH_LSC_CAP_NOT_SUPPORTED;
+ 	uint16_t exp = HNS3_PF_PUSH_LSC_CAP_UNKNOWN;
+ 	struct hns3_vf *vf = HNS3_DEV_HW_TO_VF(hw);
++	struct hns3_vf_to_pf_msg req;
+ 
+ 	__atomic_store_n(&vf->pf_push_lsc_cap, HNS3_PF_PUSH_LSC_CAP_UNKNOWN,
+ 			 __ATOMIC_RELEASE);
+ 
+-	(void)hns3_send_mbx_msg(hw, HNS3_MBX_GET_LINK_STATUS, 0, NULL, 0, false,
+-				NULL, 0);
++	hns3vf_mbx_setup(&req, HNS3_MBX_GET_LINK_STATUS, 0);
++	(void)hns3vf_mbx_send(hw, &req, false, NULL, 0);
+ 
+ 	while (remain_ms > 0) {
+ 		rte_delay_ms(HNS3_POLL_RESPONE_MS);
+@@ -663,7 +665,7 @@ hns3vf_get_push_lsc_cap(struct hns3_hw *hw)
+ 		 * driver has to actively handle the HNS3_MBX_LINK_STAT_CHANGE
+ 		 * mailbox from PF driver to get this capability.
+ 		 */
+-		hns3_dev_handle_mbx_msg(hw);
++		hns3vf_handle_mbx_msg(hw);
+ 		if (__atomic_load_n(&vf->pf_push_lsc_cap, __ATOMIC_ACQUIRE) !=
+ 			HNS3_PF_PUSH_LSC_CAP_UNKNOWN)
+ 			break;
+@@ -705,6 +707,7 @@ hns3vf_get_capability(struct hns3_hw *hw)
+ 		hw->min_tx_pkt_len = HNS3_HIP08_MIN_TX_PKT_LEN;
+ 		hw->rss_info.ipv6_sctp_offload_supported = false;
+ 		hw->promisc_mode = HNS3_UNLIMIT_PROMISC_MODE;
++		hw->rx_dma_addr_align = HNS3_RX_DMA_ADDR_ALIGN_64;
+ 		return 0;
+ 	}
+ 
+@@ -722,6 +725,7 @@ hns3vf_get_capability(struct hns3_hw *hw)
+ 	hw->drop_stats_mode = HNS3_PKTS_DROP_STATS_MODE2;
+ 	hw->rss_info.ipv6_sctp_offload_supported = true;
+ 	hw->promisc_mode = HNS3_LIMIT_PROMISC_MODE;
++	hw->rx_dma_addr_align = HNS3_RX_DMA_ADDR_ALIGN_128;
+ 
+ 	return 0;
+ }
+@@ -747,12 +751,13 @@ hns3vf_check_tqp_info(struct hns3_hw *hw)
+ static int
+ hns3vf_get_port_base_vlan_filter_state(struct hns3_hw *hw)
+ {
++	struct hns3_vf_to_pf_msg req;
+ 	uint8_t resp_msg;
+ 	int ret;
+ 
+-	ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_VLAN,
+-				HNS3_MBX_GET_PORT_BASE_VLAN_STATE, NULL, 0,
+-				true, &resp_msg, sizeof(resp_msg));
++	hns3vf_mbx_setup(&req, HNS3_MBX_SET_VLAN,
++			 HNS3_MBX_GET_PORT_BASE_VLAN_STATE);
++	ret = hns3vf_mbx_send(hw, &req, true, &resp_msg, sizeof(resp_msg));
+ 	if (ret) {
+ 		if (ret == -ETIME) {
+ 			/*
+@@ -793,10 +798,12 @@ hns3vf_get_queue_info(struct hns3_hw *hw)
+ {
+ #define HNS3VF_TQPS_RSS_INFO_LEN	6
+ 	uint8_t resp_msg[HNS3VF_TQPS_RSS_INFO_LEN];
++	struct hns3_vf_to_pf_msg req;
+ 	int ret;
+ 
+-	ret = hns3_send_mbx_msg(hw, HNS3_MBX_GET_QINFO, 0, NULL, 0, true,
+-				resp_msg, HNS3VF_TQPS_RSS_INFO_LEN);
++	hns3vf_mbx_setup(&req, HNS3_MBX_GET_QINFO, 0);
++	ret = hns3vf_mbx_send(hw, &req, true,
++			      resp_msg, HNS3VF_TQPS_RSS_INFO_LEN);
+ 	if (ret) {
+ 		PMD_INIT_LOG(ERR, "Failed to get tqp info from PF: %d", ret);
+ 		return ret;
+@@ -834,10 +841,11 @@ hns3vf_get_basic_info(struct hns3_hw *hw)
+ {
+ 	uint8_t resp_msg[HNS3_MBX_MAX_RESP_DATA_SIZE];
+ 	struct hns3_basic_info *basic_info;
++	struct hns3_vf_to_pf_msg req;
+ 	int ret;
+ 
+-	ret = hns3_send_mbx_msg(hw, HNS3_MBX_GET_BASIC_INFO, 0, NULL, 0,
+-				true, resp_msg, sizeof(resp_msg));
++	hns3vf_mbx_setup(&req, HNS3_MBX_GET_BASIC_INFO, 0);
++	ret = hns3vf_mbx_send(hw, &req, true, resp_msg, sizeof(resp_msg));
+ 	if (ret) {
+ 		hns3_err(hw, "failed to get basic info from PF, ret = %d.",
+ 				ret);
+@@ -857,10 +865,11 @@ static int
+ hns3vf_get_host_mac_addr(struct hns3_hw *hw)
+ {
+ 	uint8_t host_mac[RTE_ETHER_ADDR_LEN];
++	struct hns3_vf_to_pf_msg req;
+ 	int ret;
+ 
+-	ret = hns3_send_mbx_msg(hw, HNS3_MBX_GET_MAC_ADDR, 0, NULL, 0,
+-				true, host_mac, RTE_ETHER_ADDR_LEN);
++	hns3vf_mbx_setup(&req, HNS3_MBX_GET_MAC_ADDR, 0);
++	ret = hns3vf_mbx_send(hw, &req, true, host_mac, RTE_ETHER_ADDR_LEN);
+ 	if (ret) {
+ 		hns3_err(hw, "Failed to get mac addr from PF: %d", ret);
+ 		return ret;
+@@ -909,6 +918,7 @@ static void
+ hns3vf_request_link_info(struct hns3_hw *hw)
+ {
+ 	struct hns3_vf *vf = HNS3_DEV_HW_TO_VF(hw);
++	struct hns3_vf_to_pf_msg req;
+ 	bool send_req;
+ 	int ret;
+ 
+@@ -920,8 +930,8 @@ hns3vf_request_link_info(struct hns3_hw *hw)
+ 	if (!send_req)
+ 		return;
+ 
+-	ret = hns3_send_mbx_msg(hw, HNS3_MBX_GET_LINK_STATUS, 0, NULL, 0, false,
+-				NULL, 0);
++	hns3vf_mbx_setup(&req, HNS3_MBX_GET_LINK_STATUS, 0);
++	ret = hns3vf_mbx_send(hw, &req, false, NULL, 0);
+ 	if (ret) {
+ 		hns3_err(hw, "failed to fetch link status, ret = %d", ret);
+ 		return;
+@@ -965,19 +975,18 @@ hns3vf_update_link_status(struct hns3_hw *hw, uint8_t link_status,
+ static int
+ hns3vf_vlan_filter_configure(struct hns3_adapter *hns, uint16_t vlan_id, int on)
+ {
+-#define HNS3VF_VLAN_MBX_MSG_LEN 5
++	struct hns3_mbx_vlan_filter *vlan_filter;
++	struct hns3_vf_to_pf_msg req = {0};
+ 	struct hns3_hw *hw = &hns->hw;
+-	uint8_t msg_data[HNS3VF_VLAN_MBX_MSG_LEN];
+-	uint16_t proto = htons(RTE_ETHER_TYPE_VLAN);
+-	uint8_t is_kill = on ? 0 : 1;
+ 
+-	msg_data[0] = is_kill;
+-	memcpy(&msg_data[1], &vlan_id, sizeof(vlan_id));
+-	memcpy(&msg_data[3], &proto, sizeof(proto));
++	req.code = HNS3_MBX_SET_VLAN;
++	req.subcode = HNS3_MBX_VLAN_FILTER;
++	vlan_filter = (struct hns3_mbx_vlan_filter *)req.data;
++	vlan_filter->is_kill = on ? 0 : 1;
++	vlan_filter->proto = rte_cpu_to_le_16(RTE_ETHER_TYPE_VLAN);
++	vlan_filter->vlan_id = rte_cpu_to_le_16(vlan_id);
+ 
+-	return hns3_send_mbx_msg(hw, HNS3_MBX_SET_VLAN, HNS3_MBX_VLAN_FILTER,
+-				 msg_data, HNS3VF_VLAN_MBX_MSG_LEN, true, NULL,
+-				 0);
++	return hns3vf_mbx_send(hw, &req, true, NULL, 0);
+ }
+ 
+ static int
+@@ -1006,6 +1015,7 @@ hns3vf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
+ static int
+ hns3vf_en_vlan_filter(struct hns3_hw *hw, bool enable)
+ {
++	struct hns3_vf_to_pf_msg req;
+ 	uint8_t msg_data;
+ 	int ret;
+ 
+@@ -1013,9 +1023,10 @@ hns3vf_en_vlan_filter(struct hns3_hw *hw, bool enable)
+ 		return 0;
+ 
+ 	msg_data = enable ? 1 : 0;
+-	ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_VLAN,
+-			HNS3_MBX_ENABLE_VLAN_FILTER, &msg_data,
+-			sizeof(msg_data), true, NULL, 0);
++	hns3vf_mbx_setup(&req, HNS3_MBX_SET_VLAN,
++			 HNS3_MBX_ENABLE_VLAN_FILTER);
++	memcpy(req.data, &msg_data, sizeof(msg_data));
++	ret = hns3vf_mbx_send(hw, &req, true, NULL, 0);
+ 	if (ret)
+ 		hns3_err(hw, "%s vlan filter failed, ret = %d.",
+ 				enable ? "enable" : "disable", ret);
+@@ -1026,12 +1037,15 @@ hns3vf_en_vlan_filter(struct hns3_hw *hw, bool enable)
+ static int
+ hns3vf_en_hw_strip_rxvtag(struct hns3_hw *hw, bool enable)
+ {
++	struct hns3_vf_to_pf_msg req;
+ 	uint8_t msg_data;
+ 	int ret;
+ 
+ 	msg_data = enable ? 1 : 0;
+-	ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_VLAN, HNS3_MBX_VLAN_RX_OFF_CFG,
+-				&msg_data, sizeof(msg_data), false, NULL, 0);
++	hns3vf_mbx_setup(&req, HNS3_MBX_SET_VLAN,
++			 HNS3_MBX_VLAN_RX_OFF_CFG);
++	memcpy(req.data, &msg_data, sizeof(msg_data));
++	ret = hns3vf_mbx_send(hw, &req, false, NULL, 0);
+ 	if (ret)
+ 		hns3_err(hw, "vf %s strip failed, ret = %d.",
+ 				enable ? "enable" : "disable", ret);
+@@ -1175,11 +1189,13 @@ hns3vf_dev_configure_vlan(struct rte_eth_dev *dev)
+ static int
+ hns3vf_set_alive(struct hns3_hw *hw, bool alive)
+ {
++	struct hns3_vf_to_pf_msg req;
+ 	uint8_t msg_data;
+ 
+ 	msg_data = alive ? 1 : 0;
+-	return hns3_send_mbx_msg(hw, HNS3_MBX_SET_ALIVE, 0, &msg_data,
+-				 sizeof(msg_data), false, NULL, 0);
++	hns3vf_mbx_setup(&req, HNS3_MBX_SET_ALIVE, 0);
++	memcpy(req.data, &msg_data, sizeof(msg_data));
++	return hns3vf_mbx_send(hw, &req, false, NULL, 0);
+ }
+ 
+ static void
+@@ -1187,11 +1203,12 @@ hns3vf_keep_alive_handler(void *param)
+ {
+ 	struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
+ 	struct hns3_adapter *hns = eth_dev->data->dev_private;
++	struct hns3_vf_to_pf_msg req;
+ 	struct hns3_hw *hw = &hns->hw;
+ 	int ret;
+ 
+-	ret = hns3_send_mbx_msg(hw, HNS3_MBX_KEEP_ALIVE, 0, NULL, 0,
+-				false, NULL, 0);
++	hns3vf_mbx_setup(&req, HNS3_MBX_KEEP_ALIVE, 0);
++	ret = hns3vf_mbx_send(hw, &req, false, NULL, 0);
+ 	if (ret)
+ 		hns3_err(hw, "VF sends keeping alive cmd failed(=%d)",
+ 			 ret);
+@@ -1330,9 +1347,11 @@ err_init_hardware:
+ static int
+ hns3vf_clear_vport_list(struct hns3_hw *hw)
+ {
+-	return hns3_send_mbx_msg(hw, HNS3_MBX_HANDLE_VF_TBL,
+-				 HNS3_MBX_VPORT_LIST_CLEAR, NULL, 0, false,
+-				 NULL, 0);
++	struct hns3_vf_to_pf_msg req;
++
++	hns3vf_mbx_setup(&req, HNS3_MBX_HANDLE_VF_TBL,
++			 HNS3_MBX_VPORT_LIST_CLEAR);
++	return hns3vf_mbx_send(hw, &req, false, NULL, 0);
+ }
+ 
+ static int
+@@ -1709,11 +1728,25 @@ is_vf_reset_done(struct hns3_hw *hw)
+ 	return true;
+ }
+ 
++static enum hns3_reset_level
++hns3vf_detect_reset_event(struct hns3_hw *hw)
++{
++	enum hns3_reset_level reset = HNS3_NONE_RESET;
++	uint32_t cmdq_stat_reg;
++
++	cmdq_stat_reg = hns3_read_dev(hw, HNS3_VECTOR0_CMDQ_STAT_REG);
++	if (BIT(HNS3_VECTOR0_RST_INT_B) & cmdq_stat_reg)
++		reset = HNS3_VF_RESET;
++
++	return reset;
++}
++
+ bool
+ hns3vf_is_reset_pending(struct hns3_adapter *hns)
+ {
++	enum hns3_reset_level last_req;
+ 	struct hns3_hw *hw = &hns->hw;
+-	enum hns3_reset_level reset;
++	enum hns3_reset_level new_req;
+ 
+ 	/*
+ 	 * According to the protocol of PCIe, FLR to a PF device resets the PF
+@@ -1736,13 +1769,18 @@ hns3vf_is_reset_pending(struct hns3_adapter *hns)
+ 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ 		return false;
+ 
+-	hns3vf_check_event_cause(hns, NULL);
+-	reset = hns3vf_get_reset_level(hw, &hw->reset.pending);
+-	if (hw->reset.level != HNS3_NONE_RESET && reset != HNS3_NONE_RESET &&
+-	    hw->reset.level < reset) {
+-		hns3_warn(hw, "High level reset %d is pending", reset);
++	new_req = hns3vf_detect_reset_event(hw);
++	if (new_req == HNS3_NONE_RESET)
++		return false;
++
++	last_req = hns3vf_get_reset_level(hw, &hw->reset.pending);
++	if (last_req == HNS3_NONE_RESET || last_req < new_req) {
++		__atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
++		hns3_schedule_delayed_reset(hns);
++		hns3_warn(hw, "High level reset detected, delay do reset");
+ 		return true;
+ 	}
++
+ 	return false;
+ }
+ 
+@@ -1801,12 +1839,13 @@ hns3vf_wait_hardware_ready(struct hns3_adapter *hns)
+ static int
+ hns3vf_prepare_reset(struct hns3_adapter *hns)
+ {
++	struct hns3_vf_to_pf_msg req;
+ 	struct hns3_hw *hw = &hns->hw;
+ 	int ret;
+ 
+ 	if (hw->reset.level == HNS3_VF_FUNC_RESET) {
+-		ret = hns3_send_mbx_msg(hw, HNS3_MBX_RESET, 0, NULL,
+-					0, true, NULL, 0);
++		hns3vf_mbx_setup(&req, HNS3_MBX_RESET, 0);
++		ret = hns3vf_mbx_send(hw, &req, true, NULL, 0);
+ 		if (ret)
+ 			return ret;
+ 	}
+diff --git a/dpdk/drivers/net/hns3/hns3_mbx.c b/dpdk/drivers/net/hns3/hns3_mbx.c
+index f1743c195e..9cdbc1668a 100644
+--- a/dpdk/drivers/net/hns3/hns3_mbx.c
++++ b/dpdk/drivers/net/hns3/hns3_mbx.c
+@@ -11,8 +11,6 @@
+ #include "hns3_intr.h"
+ #include "hns3_rxtx.h"
+ 
+-#define HNS3_CMD_CODE_OFFSET		2
+-
+ static const struct errno_respcode_map err_code_map[] = {
+ 	{0, 0},
+ 	{1, -EPERM},
+@@ -26,6 +24,14 @@ static const struct errno_respcode_map err_code_map[] = {
+ 	{95, -EOPNOTSUPP},
+ };
+ 
++void
++hns3vf_mbx_setup(struct hns3_vf_to_pf_msg *req, uint8_t code, uint8_t subcode)
++{
++	memset(req, 0, sizeof(struct hns3_vf_to_pf_msg));
++	req->code = code;
++	req->subcode = subcode;
++}
++
+ static int
+ hns3_resp_to_errno(uint16_t resp_code)
+ {
+@@ -72,7 +78,7 @@ hns3_get_mbx_resp(struct hns3_hw *hw, uint16_t code, uint16_t subcode,
+ 			return -EIO;
+ 		}
+ 
+-		hns3_dev_handle_mbx_msg(hw);
++		hns3vf_handle_mbx_msg(hw);
+ 		rte_delay_us(HNS3_WAIT_RESP_US);
+ 
+ 		if (hw->mbx_resp.received_match_resp)
+@@ -120,44 +126,24 @@ hns3_mbx_prepare_resp(struct hns3_hw *hw, uint16_t code, uint16_t subcode)
+ }
+ 
+ int
+-hns3_send_mbx_msg(struct hns3_hw *hw, uint16_t code, uint16_t subcode,
+-		  const uint8_t *msg_data, uint8_t msg_len, bool need_resp,
+-		  uint8_t *resp_data, uint16_t resp_len)
++hns3vf_mbx_send(struct hns3_hw *hw,
++		struct hns3_vf_to_pf_msg *req, bool need_resp,
++		uint8_t *resp_data, uint16_t resp_len)
+ {
+-	struct hns3_mbx_vf_to_pf_cmd *req;
++	struct hns3_mbx_vf_to_pf_cmd *cmd;
+ 	struct hns3_cmd_desc desc;
+-	bool is_ring_vector_msg;
+-	int offset;
+ 	int ret;
+ 
+-	req = (struct hns3_mbx_vf_to_pf_cmd *)desc.data;
+-
+-	/* first two bytes are reserved for code & subcode */
+-	if (msg_len > (HNS3_MBX_MAX_MSG_SIZE - HNS3_CMD_CODE_OFFSET)) {
+-		hns3_err(hw,
+-			 "VF send mbx msg fail, msg len %u exceeds max payload len %d",
+-			 msg_len, HNS3_MBX_MAX_MSG_SIZE - HNS3_CMD_CODE_OFFSET);
+-		return -EINVAL;
+-	}
+-
+ 	hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MBX_VF_TO_PF, false);
+-	req->msg[0] = code;
+-	is_ring_vector_msg = (code == HNS3_MBX_MAP_RING_TO_VECTOR) ||
+-			     (code == HNS3_MBX_UNMAP_RING_TO_VECTOR) ||
+-			     (code == HNS3_MBX_GET_RING_VECTOR_MAP);
+-	if (!is_ring_vector_msg)
+-		req->msg[1] = subcode;
+-	if (msg_data) {
+-		offset = is_ring_vector_msg ? 1 : HNS3_CMD_CODE_OFFSET;
+-		memcpy(&req->msg[offset], msg_data, msg_len);
+-	}
++	cmd = (struct hns3_mbx_vf_to_pf_cmd *)desc.data;
++	cmd->msg = *req;
+ 
+ 	/* synchronous send */
+ 	if (need_resp) {
+-		req->mbx_need_resp |= HNS3_MBX_NEED_RESP_BIT;
++		cmd->mbx_need_resp |= HNS3_MBX_NEED_RESP_BIT;
+ 		rte_spinlock_lock(&hw->mbx_resp.lock);
+-		hns3_mbx_prepare_resp(hw, code, subcode);
+-		req->match_id = hw->mbx_resp.match_id;
++		hns3_mbx_prepare_resp(hw, req->code, req->subcode);
++		cmd->match_id = hw->mbx_resp.match_id;
+ 		ret = hns3_cmd_send(hw, &desc, 1);
+ 		if (ret) {
+ 			rte_spinlock_unlock(&hw->mbx_resp.lock);
+@@ -166,7 +152,8 @@ hns3_send_mbx_msg(struct hns3_hw *hw, uint16_t code, uint16_t subcode,
+ 			return ret;
+ 		}
+ 
+-		ret = hns3_get_mbx_resp(hw, code, subcode, resp_data, resp_len);
++		ret = hns3_get_mbx_resp(hw, req->code, req->subcode,
++					resp_data, resp_len);
+ 		rte_spinlock_unlock(&hw->mbx_resp.lock);
+ 	} else {
+ 		/* asynchronous send */
+@@ -193,17 +180,17 @@ static void
+ hns3vf_handle_link_change_event(struct hns3_hw *hw,
+ 				struct hns3_mbx_pf_to_vf_cmd *req)
+ {
++	struct hns3_mbx_link_status *link_info =
++		(struct hns3_mbx_link_status *)req->msg.msg_data;
+ 	uint8_t link_status, link_duplex;
+-	uint16_t *msg_q = req->msg;
+ 	uint8_t support_push_lsc;
+ 	uint32_t link_speed;
+ 
+-	memcpy(&link_speed, &msg_q[2], sizeof(link_speed));
+-	link_status = rte_le_to_cpu_16(msg_q[1]);
+-	link_duplex = (uint8_t)rte_le_to_cpu_16(msg_q[4]);
+-	hns3vf_update_link_status(hw, link_status, link_speed,
+-				  link_duplex);
+-	support_push_lsc = (*(uint8_t *)&msg_q[5]) & 1u;
++	link_status = (uint8_t)rte_le_to_cpu_16(link_info->link_status);
++	link_speed = rte_le_to_cpu_32(link_info->speed);
++	link_duplex = (uint8_t)rte_le_to_cpu_16(link_info->duplex);
++	hns3vf_update_link_status(hw, link_status, link_speed, link_duplex);
++	support_push_lsc = (link_info->flag) & 1u;
+ 	hns3vf_update_push_lsc_cap(hw, support_push_lsc);
+ }
+ 
+@@ -212,7 +199,6 @@ hns3_handle_asserting_reset(struct hns3_hw *hw,
+ 			    struct hns3_mbx_pf_to_vf_cmd *req)
+ {
+ 	enum hns3_reset_level reset_level;
+-	uint16_t *msg_q = req->msg;
+ 
+ 	/*
+ 	 * PF has asserted reset hence VF should go in pending
+@@ -220,7 +206,7 @@ hns3_handle_asserting_reset(struct hns3_hw *hw,
+ 	 * has been completely reset. After this stack should
+ 	 * eventually be re-initialized.
+ 	 */
+-	reset_level = rte_le_to_cpu_16(msg_q[1]);
++	reset_level = rte_le_to_cpu_16(req->msg.reset_level);
+ 	hns3_atomic_set_bit(reset_level, &hw->reset.pending);
+ 
+ 	hns3_warn(hw, "PF inform reset level %d", reset_level);
+@@ -242,8 +228,9 @@ hns3_handle_mbx_response(struct hns3_hw *hw, struct hns3_mbx_pf_to_vf_cmd *req)
+ 		 * to match the request.
+ 		 */
+ 		if (req->match_id == resp->match_id) {
+-			resp->resp_status = hns3_resp_to_errno(req->msg[3]);
+-			memcpy(resp->additional_info, &req->msg[4],
++			resp->resp_status =
++				hns3_resp_to_errno(req->msg.resp_status);
++			memcpy(resp->additional_info, &req->msg.resp_data,
+ 			       HNS3_MBX_MAX_RESP_DATA_SIZE);
+ 			rte_io_wmb();
+ 			resp->received_match_resp = true;
+@@ -256,7 +243,8 @@ hns3_handle_mbx_response(struct hns3_hw *hw, struct hns3_mbx_pf_to_vf_cmd *req)
+ 	 * support copy request's match_id to its response. So VF follows the
+ 	 * original scheme to process.
+ 	 */
+-	msg_data = (uint32_t)req->msg[1] << HNS3_MBX_RESP_CODE_OFFSET | req->msg[2];
++	msg_data = (uint32_t)req->msg.vf_mbx_msg_code <<
++			HNS3_MBX_RESP_CODE_OFFSET | req->msg.vf_mbx_msg_subcode;
+ 	if (resp->req_msg_data != msg_data) {
+ 		hns3_warn(hw,
+ 			"received response tag (%u) is mismatched with requested tag (%u)",
+@@ -264,8 +252,8 @@ hns3_handle_mbx_response(struct hns3_hw *hw, struct hns3_mbx_pf_to_vf_cmd *req)
+ 		return;
+ 	}
+ 
+-	resp->resp_status = hns3_resp_to_errno(req->msg[3]);
+-	memcpy(resp->additional_info, &req->msg[4],
++	resp->resp_status = hns3_resp_to_errno(req->msg.resp_status);
++	memcpy(resp->additional_info, &req->msg.resp_data,
+ 	       HNS3_MBX_MAX_RESP_DATA_SIZE);
+ 	rte_io_wmb();
+ 	resp->received_match_resp = true;
+@@ -296,11 +284,8 @@ static void
+ hns3pf_handle_link_change_event(struct hns3_hw *hw,
+ 				struct hns3_mbx_vf_to_pf_cmd *req)
+ {
+-#define LINK_STATUS_OFFSET     1
+-#define LINK_FAIL_CODE_OFFSET  2
+-
+-	if (!req->msg[LINK_STATUS_OFFSET])
+-		hns3_link_fail_parse(hw, req->msg[LINK_FAIL_CODE_OFFSET]);
++	if (!req->msg.link_status)
++		hns3_link_fail_parse(hw, req->msg.link_fail_code);
+ 
+ 	hns3_update_linkstatus_and_event(hw, true);
+ }
+@@ -309,8 +294,7 @@ static void
+ hns3_update_port_base_vlan_info(struct hns3_hw *hw,
+ 				struct hns3_mbx_pf_to_vf_cmd *req)
+ {
+-#define PVID_STATE_OFFSET	1
+-	uint16_t new_pvid_state = req->msg[PVID_STATE_OFFSET] ?
++	uint16_t new_pvid_state = req->msg.pvid_state ?
+ 		HNS3_PORT_BASE_VLAN_ENABLE : HNS3_PORT_BASE_VLAN_DISABLE;
+ 	/*
+ 	 * Currently, hardware doesn't support more than two layers VLAN offload
+@@ -359,7 +343,7 @@ hns3_handle_mbx_msg_out_intr(struct hns3_hw *hw)
+ 	while (next_to_use != tail) {
+ 		desc = &crq->desc[next_to_use];
+ 		req = (struct hns3_mbx_pf_to_vf_cmd *)desc->data;
+-		opcode = req->msg[0] & 0xff;
++		opcode = req->msg.code & 0xff;
+ 
+ 		flag = rte_le_to_cpu_16(crq->desc[next_to_use].flag);
+ 		if (!hns3_get_bit(flag, HNS3_CMDQ_RX_OUTVLD_B))
+@@ -388,9 +372,57 @@ scan_next:
+ }
+ 
+ void
+-hns3_dev_handle_mbx_msg(struct hns3_hw *hw)
++hns3pf_handle_mbx_msg(struct hns3_hw *hw)
++{
++	struct hns3_cmq_ring *crq = &hw->cmq.crq;
++	struct hns3_mbx_vf_to_pf_cmd *req;
++	struct hns3_cmd_desc *desc;
++	uint16_t flag;
++
++	rte_spinlock_lock(&hw->cmq.crq.lock);
++
++	while (!hns3_cmd_crq_empty(hw)) {
++		if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED)) {
++			rte_spinlock_unlock(&hw->cmq.crq.lock);
++			return;
++		}
++		desc = &crq->desc[crq->next_to_use];
++		req = (struct hns3_mbx_vf_to_pf_cmd *)desc->data;
++
++		flag = rte_le_to_cpu_16(crq->desc[crq->next_to_use].flag);
++		if (unlikely(!hns3_get_bit(flag, HNS3_CMDQ_RX_OUTVLD_B))) {
++			hns3_warn(hw,
++				  "dropped invalid mailbox message, code = %u",
++				  req->msg.code);
++
++			/* dropping/not processing this invalid message */
++			crq->desc[crq->next_to_use].flag = 0;
++			hns3_mbx_ring_ptr_move_crq(crq);
++			continue;
++		}
++
++		switch (req->msg.code) {
++		case HNS3_MBX_PUSH_LINK_STATUS:
++			hns3pf_handle_link_change_event(hw, req);
++			break;
++		default:
++			hns3_err(hw, "received unsupported(%u) mbx msg",
++				 req->msg.code);
++			break;
++		}
++		crq->desc[crq->next_to_use].flag = 0;
++		hns3_mbx_ring_ptr_move_crq(crq);
++	}
++
++	/* Write back CMDQ_RQ header pointer, IMP need this pointer */
++	hns3_write_dev(hw, HNS3_CMDQ_RX_HEAD_REG, crq->next_to_use);
++
++	rte_spinlock_unlock(&hw->cmq.crq.lock);
++}
++
++void
++hns3vf_handle_mbx_msg(struct hns3_hw *hw)
+ {
+-	struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
+ 	struct hns3_cmq_ring *crq = &hw->cmq.crq;
+ 	struct hns3_mbx_pf_to_vf_cmd *req;
+ 	struct hns3_cmd_desc *desc;
+@@ -401,7 +433,7 @@ hns3_dev_handle_mbx_msg(struct hns3_hw *hw)
+ 	rte_spinlock_lock(&hw->cmq.crq.lock);
+ 
+ 	handle_out = (rte_eal_process_type() != RTE_PROC_PRIMARY ||
+-		      !rte_thread_is_intr()) && hns->is_vf;
++		      !rte_thread_is_intr());
+ 	if (handle_out) {
+ 		/*
+ 		 * Currently, any threads in the primary and secondary processes
+@@ -432,7 +464,7 @@ hns3_dev_handle_mbx_msg(struct hns3_hw *hw)
+ 
+ 		desc = &crq->desc[crq->next_to_use];
+ 		req = (struct hns3_mbx_pf_to_vf_cmd *)desc->data;
+-		opcode = req->msg[0] & 0xff;
++		opcode = req->msg.code & 0xff;
+ 
+ 		flag = rte_le_to_cpu_16(crq->desc[crq->next_to_use].flag);
+ 		if (unlikely(!hns3_get_bit(flag, HNS3_CMDQ_RX_OUTVLD_B))) {
+@@ -446,8 +478,7 @@ hns3_dev_handle_mbx_msg(struct hns3_hw *hw)
+ 			continue;
+ 		}
+ 
+-		handle_out = hns->is_vf && desc->opcode == 0;
+-		if (handle_out) {
++		if (desc->opcode == 0) {
+ 			/* Message already processed by other thread */
+ 			crq->desc[crq->next_to_use].flag = 0;
+ 			hns3_mbx_ring_ptr_move_crq(crq);
+@@ -464,16 +495,6 @@ hns3_dev_handle_mbx_msg(struct hns3_hw *hw)
+ 		case HNS3_MBX_ASSERTING_RESET:
+ 			hns3_handle_asserting_reset(hw, req);
+ 			break;
+-		case HNS3_MBX_PUSH_LINK_STATUS:
+-			/*
+-			 * This message is reported by the firmware and is
+-			 * reported in 'struct hns3_mbx_vf_to_pf_cmd' format.
+-			 * Therefore, we should cast the req variable to
+-			 * 'struct hns3_mbx_vf_to_pf_cmd' and then process it.
+-			 */
+-			hns3pf_handle_link_change_event(hw,
+-				(struct hns3_mbx_vf_to_pf_cmd *)req);
+-			break;
+ 		case HNS3_MBX_PUSH_VLAN_INFO:
+ 			/*
+ 			 * When the PVID configuration status of VF device is
+@@ -488,7 +509,7 @@ hns3_dev_handle_mbx_msg(struct hns3_hw *hw)
+ 			 * hns3 PF kernel driver, VF driver will receive this
+ 			 * mailbox message from PF driver.
+ 			 */
+-			hns3_handle_promisc_info(hw, req->msg[1]);
++			hns3_handle_promisc_info(hw, req->msg.promisc_en);
+ 			break;
+ 		default:
+ 			hns3_err(hw, "received unsupported(%u) mbx msg",
+diff --git a/dpdk/drivers/net/hns3/hns3_mbx.h b/dpdk/drivers/net/hns3/hns3_mbx.h
+index 4a328802b9..2b6cb8f513 100644
+--- a/dpdk/drivers/net/hns3/hns3_mbx.h
++++ b/dpdk/drivers/net/hns3/hns3_mbx.h
+@@ -89,7 +89,6 @@ enum hns3_mbx_link_fail_subcode {
+ 	HNS3_MBX_LF_XSFP_ABSENT,
+ };
+ 
+-#define HNS3_MBX_MAX_MSG_SIZE	16
+ #define HNS3_MBX_MAX_RESP_DATA_SIZE	8
+ #define HNS3_MBX_DEF_TIME_LIMIT_MS	500
+ 
+@@ -107,6 +106,69 @@ struct hns3_mbx_resp_status {
+ 	uint8_t additional_info[HNS3_MBX_MAX_RESP_DATA_SIZE];
+ };
+ 
++struct hns3_ring_chain_param {
++	uint8_t ring_type;
++	uint8_t tqp_index;
++	uint8_t int_gl_index;
++};
++
++struct hns3_mbx_vlan_filter {
++	uint8_t is_kill;
++	uint16_t vlan_id;
++	uint16_t proto;
++} __rte_packed;
++
++struct hns3_mbx_link_status {
++	uint16_t link_status;
++	uint32_t speed;
++	uint16_t duplex;
++	uint8_t flag;
++} __rte_packed;
++
++#define HNS3_MBX_MSG_MAX_DATA_SIZE	14
++#define HNS3_MBX_MAX_RING_CHAIN_PARAM_NUM	4
++struct hns3_vf_to_pf_msg {
++	uint8_t code;
++	union {
++		struct {
++			uint8_t subcode;
++			uint8_t data[HNS3_MBX_MSG_MAX_DATA_SIZE];
++		};
++		struct {
++			uint8_t en_bc;
++			uint8_t en_uc;
++			uint8_t en_mc;
++			uint8_t en_limit_promisc;
++		};
++		struct {
++			uint8_t vector_id;
++			uint8_t ring_num;
++			struct hns3_ring_chain_param
++				ring_param[HNS3_MBX_MAX_RING_CHAIN_PARAM_NUM];
++		};
++		struct {
++			uint8_t link_status;
++			uint8_t link_fail_code;
++		};
++	};
++};
++
++struct hns3_pf_to_vf_msg {
++	uint16_t code;
++	union {
++		struct {
++			uint16_t vf_mbx_msg_code;
++			uint16_t vf_mbx_msg_subcode;
++			uint16_t resp_status;
++			uint8_t resp_data[HNS3_MBX_MAX_RESP_DATA_SIZE];
++		};
++		uint16_t promisc_en;
++		uint16_t reset_level;
++		uint16_t pvid_state;
++		uint8_t msg_data[HNS3_MBX_MSG_MAX_DATA_SIZE];
++	};
++};
++
+ struct errno_respcode_map {
+ 	uint16_t resp_code;
+ 	int err_no;
+@@ -122,7 +184,7 @@ struct hns3_mbx_vf_to_pf_cmd {
+ 	uint8_t msg_len;
+ 	uint8_t rsv2;
+ 	uint16_t match_id;
+-	uint8_t msg[HNS3_MBX_MAX_MSG_SIZE];
++	struct hns3_vf_to_pf_msg msg;
+ };
+ 
+ struct hns3_mbx_pf_to_vf_cmd {
+@@ -131,20 +193,7 @@ struct hns3_mbx_pf_to_vf_cmd {
+ 	uint8_t msg_len;
+ 	uint8_t rsv1;
+ 	uint16_t match_id;
+-	uint16_t msg[8];
+-};
+-
+-struct hns3_ring_chain_param {
+-	uint8_t ring_type;
+-	uint8_t tqp_index;
+-	uint8_t int_gl_index;
+-};
+-
+-#define HNS3_MBX_MAX_RING_CHAIN_PARAM_NUM	4
+-struct hns3_vf_bind_vector_msg {
+-	uint8_t vector_id;
+-	uint8_t ring_num;
+-	struct hns3_ring_chain_param param[HNS3_MBX_MAX_RING_CHAIN_PARAM_NUM];
++	struct hns3_pf_to_vf_msg msg;
+ };
+ 
+ struct hns3_pf_rst_done_cmd {
+@@ -158,8 +207,11 @@ struct hns3_pf_rst_done_cmd {
+ 	((crq)->next_to_use = ((crq)->next_to_use + 1) % (crq)->desc_num)
+ 
+ struct hns3_hw;
+-void hns3_dev_handle_mbx_msg(struct hns3_hw *hw);
+-int hns3_send_mbx_msg(struct hns3_hw *hw, uint16_t code, uint16_t subcode,
+-		      const uint8_t *msg_data, uint8_t msg_len, bool need_resp,
+-		      uint8_t *resp_data, uint16_t resp_len);
++void hns3pf_handle_mbx_msg(struct hns3_hw *hw);
++void hns3vf_handle_mbx_msg(struct hns3_hw *hw);
++void hns3vf_mbx_setup(struct hns3_vf_to_pf_msg *req,
++		      uint8_t code, uint8_t subcode);
++int hns3vf_mbx_send(struct hns3_hw *hw,
++		    struct hns3_vf_to_pf_msg *req_msg, bool need_resp,
++		    uint8_t *resp_data, uint16_t resp_len);
+ #endif /* HNS3_MBX_H */
+diff --git a/dpdk/drivers/net/hns3/hns3_rss.c b/dpdk/drivers/net/hns3/hns3_rss.c
+index 15feb26043..3eae4caf52 100644
+--- a/dpdk/drivers/net/hns3/hns3_rss.c
++++ b/dpdk/drivers/net/hns3/hns3_rss.c
+@@ -153,8 +153,7 @@ static const struct {
+ 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_IP_S) |
+ 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_IP_D) |
+ 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_SCTP_S) |
+-	  BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_SCTP_D) |
+-	  BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_SCTP_VER),
++	  BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_SCTP_D),
+ 	  HNS3_RSS_TUPLE_IPV4_SCTP_M },
+ 
+ 	/* IPV6-FRAG */
+@@ -274,8 +273,7 @@ static const struct {
+ 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_SCTP_EN_IP_S) |
+ 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_SCTP_EN_IP_D) |
+ 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_SCTP_EN_SCTP_D) |
+-	  BIT_ULL(HNS3_RSS_FIELD_IPV6_SCTP_EN_SCTP_S) |
+-	  BIT_ULL(HNS3_RSS_FIELD_IPV6_SCTP_EN_SCTP_VER),
++	  BIT_ULL(HNS3_RSS_FIELD_IPV6_SCTP_EN_SCTP_S),
+ 	  HNS3_RSS_TUPLE_IPV6_SCTP_M },
+ };
+ 
+diff --git a/dpdk/drivers/net/hns3/hns3_rss.h b/dpdk/drivers/net/hns3/hns3_rss.h
+index 9d182a8025..0755760b45 100644
+--- a/dpdk/drivers/net/hns3/hns3_rss.h
++++ b/dpdk/drivers/net/hns3/hns3_rss.h
+@@ -49,7 +49,6 @@ enum hns3_tuple_field {
+ 	HNS3_RSS_FIELD_IPV4_SCTP_EN_SCTP_S,
+ 	HNS3_RSS_FIELD_IPV4_SCTP_EN_IP_D,
+ 	HNS3_RSS_FIELD_IPV4_SCTP_EN_IP_S,
+-	HNS3_RSS_FIELD_IPV4_SCTP_EN_SCTP_VER,
+ 
+ 	/* IPV4 ENABLE FIELD */
+ 	HNS3_RSS_FIELD_IPV4_EN_NONFRAG_IP_D = 24,
+@@ -74,7 +73,6 @@ enum hns3_tuple_field {
+ 	HNS3_RSS_FIELD_IPV6_SCTP_EN_SCTP_S,
+ 	HNS3_RSS_FIELD_IPV6_SCTP_EN_IP_D,
+ 	HNS3_RSS_FIELD_IPV6_SCTP_EN_IP_S,
+-	HNS3_RSS_FIELD_IPV6_SCTP_EN_SCTP_VER,
+ 
+ 	/* IPV6 ENABLE FIELD */
+ 	HNS3_RSS_FIELD_IPV6_NONFRAG_IP_D = 56,
+@@ -96,12 +94,12 @@ enum hns3_tuple_field {
+ 
+ #define HNS3_RSS_TUPLE_IPV4_TCP_M	GENMASK(3, 0)
+ #define HNS3_RSS_TUPLE_IPV4_UDP_M	GENMASK(11, 8)
+-#define HNS3_RSS_TUPLE_IPV4_SCTP_M	GENMASK(20, 16)
++#define HNS3_RSS_TUPLE_IPV4_SCTP_M	GENMASK(19, 16)
+ #define HNS3_RSS_TUPLE_IPV4_NONF_M	GENMASK(25, 24)
+ #define HNS3_RSS_TUPLE_IPV4_FLAG_M	GENMASK(27, 26)
+ #define HNS3_RSS_TUPLE_IPV6_TCP_M	GENMASK(35, 32)
+ #define HNS3_RSS_TUPLE_IPV6_UDP_M	GENMASK(43, 40)
+-#define HNS3_RSS_TUPLE_IPV6_SCTP_M	GENMASK(52, 48)
++#define HNS3_RSS_TUPLE_IPV6_SCTP_M	GENMASK(51, 48)
+ #define HNS3_RSS_TUPLE_IPV6_NONF_M	GENMASK(57, 56)
+ #define HNS3_RSS_TUPLE_IPV6_FLAG_M	GENMASK(59, 58)
+ 
+diff --git a/dpdk/drivers/net/hns3/hns3_rxtx.c b/dpdk/drivers/net/hns3/hns3_rxtx.c
+index 09b7e90c70..8d0db134d2 100644
+--- a/dpdk/drivers/net/hns3/hns3_rxtx.c
++++ b/dpdk/drivers/net/hns3/hns3_rxtx.c
+@@ -86,9 +86,14 @@ hns3_rx_queue_release(void *queue)
+ 	struct hns3_rx_queue *rxq = queue;
+ 	if (rxq) {
+ 		hns3_rx_queue_release_mbufs(rxq);
+-		if (rxq->mz)
++		if (rxq->mz) {
+ 			rte_memzone_free(rxq->mz);
+-		rte_free(rxq->sw_ring);
++			rxq->mz = NULL;
++		}
++		if (rxq->sw_ring) {
++			rte_free(rxq->sw_ring);
++			rxq->sw_ring = NULL;
++		}
+ 		rte_free(rxq);
+ 	}
+ }
+@@ -99,10 +104,18 @@ hns3_tx_queue_release(void *queue)
+ 	struct hns3_tx_queue *txq = queue;
+ 	if (txq) {
+ 		hns3_tx_queue_release_mbufs(txq);
+-		if (txq->mz)
++		if (txq->mz) {
+ 			rte_memzone_free(txq->mz);
+-		rte_free(txq->sw_ring);
+-		rte_free(txq->free);
++			txq->mz = NULL;
++		}
++		if (txq->sw_ring) {
++			rte_free(txq->sw_ring);
++			txq->sw_ring = NULL;
++		}
++		if (txq->free) {
++			rte_free(txq->free);
++			txq->free = NULL;
++		}
+ 		rte_free(txq);
+ 	}
+ }
+@@ -260,12 +273,27 @@ hns3_free_all_queues(struct rte_eth_dev *dev)
+ 	hns3_free_tx_queues(dev);
+ }
+ 
++static int
++hns3_check_rx_dma_addr(struct hns3_hw *hw, uint64_t dma_addr)
++{
++	uint64_t rem;
++
++	rem = dma_addr & (hw->rx_dma_addr_align - 1);
++	if (rem > 0) {
++		hns3_err(hw, "The IO address of the beginning of the mbuf data "
++			 "must be %u-byte aligned", hw->rx_dma_addr_align);
++		return -EINVAL;
++	}
++	return 0;
++}
++
+ static int
+ hns3_alloc_rx_queue_mbufs(struct hns3_hw *hw, struct hns3_rx_queue *rxq)
+ {
+ 	struct rte_mbuf *mbuf;
+ 	uint64_t dma_addr;
+ 	uint16_t i;
++	int ret;
+ 
+ 	for (i = 0; i < rxq->nb_rx_desc; i++) {
+ 		mbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
+@@ -286,6 +314,12 @@ hns3_alloc_rx_queue_mbufs(struct hns3_hw *hw, struct hns3_rx_queue *rxq)
+ 		dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
+ 		rxq->rx_ring[i].addr = dma_addr;
+ 		rxq->rx_ring[i].rx.bd_base_info = 0;
++
++		ret = hns3_check_rx_dma_addr(hw, dma_addr);
++		if (ret != 0) {
++			hns3_rx_queue_release_mbufs(rxq);
++			return ret;
++		}
+ 	}
+ 
+ 	return 0;
+@@ -686,13 +720,12 @@ tqp_reset_fail:
+ static int
+ hns3vf_reset_tqp(struct hns3_hw *hw, uint16_t queue_id)
+ {
+-	uint8_t msg_data[2];
++	struct hns3_vf_to_pf_msg req;
+ 	int ret;
+ 
+-	memcpy(msg_data, &queue_id, sizeof(uint16_t));
+-
+-	ret = hns3_send_mbx_msg(hw, HNS3_MBX_QUEUE_RESET, 0, msg_data,
+-				 sizeof(msg_data), true, NULL, 0);
++	hns3vf_mbx_setup(&req, HNS3_MBX_QUEUE_RESET, 0);
++	memcpy(req.data, &queue_id, sizeof(uint16_t));
++	ret = hns3vf_mbx_send(hw, &req, true, NULL, 0);
+ 	if (ret)
+ 		hns3_err(hw, "fail to reset tqp, queue_id = %u, ret = %d.",
+ 			 queue_id, ret);
+@@ -769,15 +802,14 @@ static int
+ hns3vf_reset_all_tqps(struct hns3_hw *hw)
+ {
+ #define HNS3VF_RESET_ALL_TQP_DONE	1U
++	struct hns3_vf_to_pf_msg req;
+ 	uint8_t reset_status;
+-	uint8_t msg_data[2];
+ 	int ret;
+ 	uint16_t i;
+ 
+-	memset(msg_data, 0, sizeof(msg_data));
+-	ret = hns3_send_mbx_msg(hw, HNS3_MBX_QUEUE_RESET, 0, msg_data,
+-				sizeof(msg_data), true, &reset_status,
+-				sizeof(reset_status));
++	hns3vf_mbx_setup(&req, HNS3_MBX_QUEUE_RESET, 0);
++	ret = hns3vf_mbx_send(hw, &req, true,
++			      &reset_status, sizeof(reset_status));
+ 	if (ret) {
+ 		hns3_err(hw, "fail to send rcb reset mbx, ret = %d.", ret);
+ 		return ret;
+@@ -2390,8 +2422,7 @@ hns3_rx_ptp_timestamp_handle(struct hns3_rx_queue *rxq, struct rte_mbuf *mbuf,
+ {
+ 	struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(rxq->hns);
+ 
+-	mbuf->ol_flags |= RTE_MBUF_F_RX_IEEE1588_PTP |
+-			  RTE_MBUF_F_RX_IEEE1588_TMST;
++	mbuf->ol_flags |= RTE_MBUF_F_RX_IEEE1588_TMST;
+ 	if (hns3_timestamp_rx_dynflag > 0) {
+ 		*RTE_MBUF_DYNFIELD(mbuf, hns3_timestamp_dynfield_offset,
+ 			rte_mbuf_timestamp_t *) = timestamp;
+@@ -2670,6 +2701,7 @@ hns3_recv_scattered_pkts(void *rx_queue,
+ 			continue;
+ 		}
+ 
++		first_seg->ol_flags = 0;
+ 		if (unlikely(bd_base_info & BIT(HNS3_RXD_TS_VLD_B)))
+ 			hns3_rx_ptp_timestamp_handle(rxq, first_seg, timestamp);
+ 
+@@ -2699,7 +2731,7 @@ hns3_recv_scattered_pkts(void *rx_queue,
+ 
+ 		first_seg->port = rxq->port_id;
+ 		first_seg->hash.rss = rte_le_to_cpu_32(rxd.rx.rss_hash);
+-		first_seg->ol_flags = RTE_MBUF_F_RX_RSS_HASH;
++		first_seg->ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
+ 		if (unlikely(bd_base_info & BIT(HNS3_RXD_LUM_B))) {
+ 			first_seg->hash.fdir.hi =
+ 				rte_le_to_cpu_16(rxd.rx.fd_id);
+@@ -3617,58 +3649,6 @@ hns3_pkt_need_linearized(struct rte_mbuf *tx_pkts, uint32_t bd_num,
+ 	return false;
+ }
+ 
+-static bool
+-hns3_outer_ipv4_cksum_prepared(struct rte_mbuf *m, uint64_t ol_flags,
+-				uint32_t *l4_proto)
+-{
+-	struct rte_ipv4_hdr *ipv4_hdr;
+-	ipv4_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *,
+-					   m->outer_l2_len);
+-	if (ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM)
+-		ipv4_hdr->hdr_checksum = 0;
+-	if (ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM) {
+-		struct rte_udp_hdr *udp_hdr;
+-		/*
+-		 * If OUTER_UDP_CKSUM is support, HW can calculate the pseudo
+-		 * header for TSO packets
+-		 */
+-		if (ol_flags & RTE_MBUF_F_TX_TCP_SEG)
+-			return true;
+-		udp_hdr = rte_pktmbuf_mtod_offset(m, struct rte_udp_hdr *,
+-				m->outer_l2_len + m->outer_l3_len);
+-		udp_hdr->dgram_cksum = rte_ipv4_phdr_cksum(ipv4_hdr, ol_flags);
+-
+-		return true;
+-	}
+-	*l4_proto = ipv4_hdr->next_proto_id;
+-	return false;
+-}
+-
+-static bool
+-hns3_outer_ipv6_cksum_prepared(struct rte_mbuf *m, uint64_t ol_flags,
+-				uint32_t *l4_proto)
+-{
+-	struct rte_ipv6_hdr *ipv6_hdr;
+-	ipv6_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv6_hdr *,
+-					   m->outer_l2_len);
+-	if (ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM) {
+-		struct rte_udp_hdr *udp_hdr;
+-		/*
+-		 * If OUTER_UDP_CKSUM is support, HW can calculate the pseudo
+-		 * header for TSO packets
+-		 */
+-		if (ol_flags & RTE_MBUF_F_TX_TCP_SEG)
+-			return true;
+-		udp_hdr = rte_pktmbuf_mtod_offset(m, struct rte_udp_hdr *,
+-				m->outer_l2_len + m->outer_l3_len);
+-		udp_hdr->dgram_cksum = rte_ipv6_phdr_cksum(ipv6_hdr, ol_flags);
+-
+-		return true;
+-	}
+-	*l4_proto = ipv6_hdr->proto;
+-	return false;
+-}
+-
+ static void
+ hns3_outer_header_cksum_prepare(struct rte_mbuf *m)
+ {
+@@ -3676,29 +3656,38 @@ hns3_outer_header_cksum_prepare(struct rte_mbuf *m)
+ 	uint32_t paylen, hdr_len, l4_proto;
+ 	struct rte_udp_hdr *udp_hdr;
+ 
+-	if (!(ol_flags & (RTE_MBUF_F_TX_OUTER_IPV4 | RTE_MBUF_F_TX_OUTER_IPV6)))
++	if (!(ol_flags & (RTE_MBUF_F_TX_OUTER_IPV4 | RTE_MBUF_F_TX_OUTER_IPV6)) &&
++			((ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM) ||
++			!(ol_flags & RTE_MBUF_F_TX_TCP_SEG)))
+ 		return;
+ 
+ 	if (ol_flags & RTE_MBUF_F_TX_OUTER_IPV4) {
+-		if (hns3_outer_ipv4_cksum_prepared(m, ol_flags, &l4_proto))
+-			return;
++		struct rte_ipv4_hdr *ipv4_hdr;
++
++		ipv4_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *,
++			m->outer_l2_len);
++		l4_proto = ipv4_hdr->next_proto_id;
+ 	} else {
+-		if (hns3_outer_ipv6_cksum_prepared(m, ol_flags, &l4_proto))
+-			return;
++		struct rte_ipv6_hdr *ipv6_hdr;
++
++		ipv6_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv6_hdr *,
++					   m->outer_l2_len);
++		l4_proto = ipv6_hdr->proto;
+ 	}
+ 
++	if (l4_proto != IPPROTO_UDP)
++		return;
++
+ 	/* driver should ensure the outer udp cksum is 0 for TUNNEL TSO */
+-	if (l4_proto == IPPROTO_UDP && (ol_flags & RTE_MBUF_F_TX_TCP_SEG)) {
+-		hdr_len = m->l2_len + m->l3_len + m->l4_len;
+-		hdr_len += m->outer_l2_len + m->outer_l3_len;
+-		paylen = m->pkt_len - hdr_len;
+-		if (paylen <= m->tso_segsz)
+-			return;
+-		udp_hdr = rte_pktmbuf_mtod_offset(m, struct rte_udp_hdr *,
+-						  m->outer_l2_len +
+-						  m->outer_l3_len);
+-		udp_hdr->dgram_cksum = 0;
+-	}
++	hdr_len = m->l2_len + m->l3_len + m->l4_len;
++	hdr_len += m->outer_l2_len + m->outer_l3_len;
++	paylen = m->pkt_len - hdr_len;
++	if (paylen <= m->tso_segsz)
++		return;
++	udp_hdr = rte_pktmbuf_mtod_offset(m, struct rte_udp_hdr *,
++					  m->outer_l2_len +
++					  m->outer_l3_len);
++	udp_hdr->dgram_cksum = 0;
+ }
+ 
+ static int
+diff --git a/dpdk/drivers/net/i40e/i40e_ethdev.c b/dpdk/drivers/net/i40e/i40e_ethdev.c
+index 3ca226156b..ffc1f6d874 100644
+--- a/dpdk/drivers/net/i40e/i40e_ethdev.c
++++ b/dpdk/drivers/net/i40e/i40e_ethdev.c
+@@ -3724,8 +3724,12 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
+ 		RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO |
+ 		RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |
+ 		RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
+-		RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM |
+ 		dev_info->tx_queue_offload_capa;
++	if (hw->mac.type == I40E_MAC_X722) {
++		dev_info->tx_offload_capa |=
++			RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM;
++	}
++
+ 	dev_info->dev_capa =
+ 		RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
+ 		RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;
+diff --git a/dpdk/drivers/net/i40e/i40e_flow.c b/dpdk/drivers/net/i40e/i40e_flow.c
+index 877e49151e..92165c8422 100644
+--- a/dpdk/drivers/net/i40e/i40e_flow.c
++++ b/dpdk/drivers/net/i40e/i40e_flow.c
+@@ -1708,8 +1708,7 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
+ 
+ 				ether_type = rte_be_to_cpu_16(eth_spec->hdr.ether_type);
+ 
+-				if (next_type == RTE_FLOW_ITEM_TYPE_VLAN ||
+-				    ether_type == RTE_ETHER_TYPE_IPV4 ||
++				if (ether_type == RTE_ETHER_TYPE_IPV4 ||
+ 				    ether_type == RTE_ETHER_TYPE_IPV6 ||
+ 				    ether_type == i40e_get_outer_vlan(dev)) {
+ 					rte_flow_error_set(error, EINVAL,
+diff --git a/dpdk/drivers/net/i40e/i40e_rxtx.c b/dpdk/drivers/net/i40e/i40e_rxtx.c
+index 9aa5facb53..5e693cb1ea 100644
+--- a/dpdk/drivers/net/i40e/i40e_rxtx.c
++++ b/dpdk/drivers/net/i40e/i40e_rxtx.c
+@@ -295,6 +295,15 @@ i40e_parse_tunneling_params(uint64_t ol_flags,
+ 	 */
+ 	*cd_tunneling |= (tx_offload.l2_len >> 1) <<
+ 		I40E_TXD_CTX_QW0_NATLEN_SHIFT;
++
++	/**
++	 * Calculate the tunneling UDP checksum (only supported with X722).
++	 * Shall be set only if L4TUNT = 01b and EIPT is not zero
++	 */
++	if ((*cd_tunneling & I40E_TXD_CTX_QW0_EXT_IP_MASK) &&
++			(*cd_tunneling & I40E_TXD_CTX_UDP_TUNNELING) &&
++			(ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM))
++		*cd_tunneling |= I40E_TXD_CTX_QW0_L4T_CS_MASK;
+ }
+ 
+ static inline void
+diff --git a/dpdk/drivers/net/i40e/i40e_rxtx_vec_avx2.c b/dpdk/drivers/net/i40e/i40e_rxtx_vec_avx2.c
+index f468c1fd90..19cf0ac718 100644
+--- a/dpdk/drivers/net/i40e/i40e_rxtx_vec_avx2.c
++++ b/dpdk/drivers/net/i40e/i40e_rxtx_vec_avx2.c
+@@ -276,46 +276,30 @@ _recv_raw_pkts_vec_avx2(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts,
+ 				_mm256_loadu_si256((void *)&sw_ring[i + 4]));
+ #endif
+ 
+-		__m256i raw_desc0_1, raw_desc2_3, raw_desc4_5, raw_desc6_7;
+-#ifdef RTE_LIBRTE_I40E_16BYTE_RX_DESC
+-		/* for AVX we need alignment otherwise loads are not atomic */
+-		if (avx_aligned) {
+-			/* load in descriptors, 2 at a time, in reverse order */
+-			raw_desc6_7 = _mm256_load_si256((void *)(rxdp + 6));
+-			rte_compiler_barrier();
+-			raw_desc4_5 = _mm256_load_si256((void *)(rxdp + 4));
+-			rte_compiler_barrier();
+-			raw_desc2_3 = _mm256_load_si256((void *)(rxdp + 2));
+-			rte_compiler_barrier();
+-			raw_desc0_1 = _mm256_load_si256((void *)(rxdp + 0));
+-		} else
+-#endif
+-		do {
+-			const __m128i raw_desc7 = _mm_load_si128((void *)(rxdp + 7));
+-			rte_compiler_barrier();
+-			const __m128i raw_desc6 = _mm_load_si128((void *)(rxdp + 6));
+-			rte_compiler_barrier();
+-			const __m128i raw_desc5 = _mm_load_si128((void *)(rxdp + 5));
+-			rte_compiler_barrier();
+-			const __m128i raw_desc4 = _mm_load_si128((void *)(rxdp + 4));
+-			rte_compiler_barrier();
+-			const __m128i raw_desc3 = _mm_load_si128((void *)(rxdp + 3));
+-			rte_compiler_barrier();
+-			const __m128i raw_desc2 = _mm_load_si128((void *)(rxdp + 2));
+-			rte_compiler_barrier();
+-			const __m128i raw_desc1 = _mm_load_si128((void *)(rxdp + 1));
+-			rte_compiler_barrier();
+-			const __m128i raw_desc0 = _mm_load_si128((void *)(rxdp + 0));
+-
+-			raw_desc6_7 = _mm256_inserti128_si256(
+-					_mm256_castsi128_si256(raw_desc6), raw_desc7, 1);
+-			raw_desc4_5 = _mm256_inserti128_si256(
+-					_mm256_castsi128_si256(raw_desc4), raw_desc5, 1);
+-			raw_desc2_3 = _mm256_inserti128_si256(
+-					_mm256_castsi128_si256(raw_desc2), raw_desc3, 1);
+-			raw_desc0_1 = _mm256_inserti128_si256(
+-					_mm256_castsi128_si256(raw_desc0), raw_desc1, 1);
+-		} while (0);
++		const __m128i raw_desc7 = _mm_load_si128((void *)(rxdp + 7));
++		rte_compiler_barrier();
++		const __m128i raw_desc6 = _mm_load_si128((void *)(rxdp + 6));
++		rte_compiler_barrier();
++		const __m128i raw_desc5 = _mm_load_si128((void *)(rxdp + 5));
++		rte_compiler_barrier();
++		const __m128i raw_desc4 = _mm_load_si128((void *)(rxdp + 4));
++		rte_compiler_barrier();
++		const __m128i raw_desc3 = _mm_load_si128((void *)(rxdp + 3));
++		rte_compiler_barrier();
++		const __m128i raw_desc2 = _mm_load_si128((void *)(rxdp + 2));
++		rte_compiler_barrier();
++		const __m128i raw_desc1 = _mm_load_si128((void *)(rxdp + 1));
++		rte_compiler_barrier();
++		const __m128i raw_desc0 = _mm_load_si128((void *)(rxdp + 0));
++
++		const __m256i raw_desc6_7 = _mm256_inserti128_si256(
++				_mm256_castsi128_si256(raw_desc6), raw_desc7, 1);
++		const __m256i raw_desc4_5 = _mm256_inserti128_si256(
++				_mm256_castsi128_si256(raw_desc4), raw_desc5, 1);
++		const __m256i raw_desc2_3 = _mm256_inserti128_si256(
++				_mm256_castsi128_si256(raw_desc2), raw_desc3, 1);
++		const __m256i raw_desc0_1 = _mm256_inserti128_si256(
++				_mm256_castsi128_si256(raw_desc0), raw_desc1, 1);
+ 
+ 		if (split_packet) {
+ 			int j;
+diff --git a/dpdk/drivers/net/iavf/iavf.h b/dpdk/drivers/net/iavf/iavf.h
+index 10868f2c30..d273d884f5 100644
+--- a/dpdk/drivers/net/iavf/iavf.h
++++ b/dpdk/drivers/net/iavf/iavf.h
+@@ -18,7 +18,8 @@
+ 
+ #define IAVF_AQ_LEN               32
+ #define IAVF_AQ_BUF_SZ            4096
+-#define IAVF_RESET_WAIT_CNT       500
++#define IAVF_RESET_WAIT_CNT       2000
++#define IAVF_RESET_DETECTED_CNT   500
+ #define IAVF_BUF_SIZE_MIN         1024
+ #define IAVF_FRAME_SIZE_MAX       9728
+ #define IAVF_QUEUE_BASE_ADDR_UNIT 128
+@@ -511,5 +512,6 @@ int iavf_flow_sub_check(struct iavf_adapter *adapter,
+ 			struct iavf_fsub_conf *filter);
+ void iavf_dev_watchdog_enable(struct iavf_adapter *adapter);
+ void iavf_dev_watchdog_disable(struct iavf_adapter *adapter);
+-int iavf_handle_hw_reset(struct rte_eth_dev *dev);
++void iavf_handle_hw_reset(struct rte_eth_dev *dev);
++void iavf_set_no_poll(struct iavf_adapter *adapter, bool link_change);
+ #endif /* _IAVF_ETHDEV_H_ */
+diff --git a/dpdk/drivers/net/iavf/iavf_ethdev.c b/dpdk/drivers/net/iavf/iavf_ethdev.c
+index d1edb0dd5c..54bff05675 100644
+--- a/dpdk/drivers/net/iavf/iavf_ethdev.c
++++ b/dpdk/drivers/net/iavf/iavf_ethdev.c
+@@ -296,6 +296,7 @@ iavf_dev_watchdog(void *cb_arg)
+ 			PMD_DRV_LOG(INFO, "VF \"%s\" reset has completed",
+ 				adapter->vf.eth_dev->data->name);
+ 			adapter->vf.vf_reset = false;
++			iavf_set_no_poll(adapter, false);
+ 		}
+ 	/* If not in reset then poll vfr_inprogress register for VFLR event */
+ 	} else {
+@@ -308,6 +309,7 @@ iavf_dev_watchdog(void *cb_arg)
+ 
+ 			/* enter reset state with VFLR event */
+ 			adapter->vf.vf_reset = true;
++			iavf_set_no_poll(adapter, false);
+ 			adapter->vf.link_up = false;
+ 
+ 			iavf_dev_event_post(adapter->vf.eth_dev, RTE_ETH_EVENT_INTR_RESET,
+@@ -628,7 +630,8 @@ iavf_dev_init_vlan(struct rte_eth_dev *dev)
+ 					RTE_ETH_VLAN_FILTER_MASK |
+ 					RTE_ETH_VLAN_EXTEND_MASK);
+ 	if (err) {
+-		PMD_DRV_LOG(ERR, "Failed to update vlan offload");
++		PMD_DRV_LOG(INFO,
++			"VLAN offloading is not supported, or offloading was refused by the PF");
+ 		return err;
+ 	}
+ 
+@@ -704,9 +707,7 @@ iavf_dev_configure(struct rte_eth_dev *dev)
+ 		vf->max_rss_qregion = IAVF_MAX_NUM_QUEUES_DFLT;
+ 	}
+ 
+-	ret = iavf_dev_init_vlan(dev);
+-	if (ret)
+-		PMD_DRV_LOG(ERR, "configure VLAN failed: %d", ret);
++	iavf_dev_init_vlan(dev);
+ 
+ 	if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
+ 		if (iavf_init_rss(ad) != 0) {
+@@ -1086,9 +1087,6 @@ iavf_dev_stop(struct rte_eth_dev *dev)
+ 
+ 	PMD_INIT_FUNC_TRACE();
+ 
+-	if (vf->vf_reset)
+-		return 0;
+-
+ 	if (adapter->closed)
+ 		return -1;
+ 
+@@ -1165,7 +1163,6 @@ iavf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
+ 		RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+ 		RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
+ 		RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+-		RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM |
+ 		RTE_ETH_TX_OFFLOAD_TCP_TSO |
+ 		RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+ 		RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
+@@ -1174,6 +1171,10 @@ iavf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
+ 		RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
+ 		RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
+ 
++	/* X710 does not support outer udp checksum */
++	if (adapter->hw.mac.type != IAVF_MAC_XL710)
++		dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM;
++
+ 	if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_CRC)
+ 		dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_KEEP_CRC;
+ 
+@@ -2916,8 +2917,10 @@ iavf_dev_close(struct rte_eth_dev *dev)
+ 	 * effect.
+ 	 */
+ out:
+-	if (vf->vf_reset && !rte_pci_set_bus_master(pci_dev, true))
++	if (vf->vf_reset && !rte_pci_set_bus_master(pci_dev, true)) {
+ 		vf->vf_reset = false;
++		iavf_set_no_poll(adapter, false);
++	}
+ 
+ 	/* disable watchdog */
+ 	iavf_dev_watchdog_disable(adapter);
+@@ -2948,9 +2951,20 @@ static int
+ iavf_dev_reset(struct rte_eth_dev *dev)
+ {
+ 	int ret;
++	struct iavf_adapter *adapter =
++		IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ 	struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ 
++	if (!vf->in_reset_recovery) {
++		ret = iavf_aq_send_msg_to_pf(hw, VIRTCHNL_OP_RESET_VF,
++						IAVF_SUCCESS, NULL, 0, NULL);
++		if (ret) {
++			PMD_DRV_LOG(ERR, "fail to send cmd VIRTCHNL_OP_RESET_VF");
++			return ret;
++		}
++	}
++
+ 	/*
+ 	 * Check whether the VF reset has been done and inform application,
+ 	 * to avoid calling the virtual channel command, which may cause
+@@ -2961,7 +2975,7 @@ iavf_dev_reset(struct rte_eth_dev *dev)
+ 		PMD_DRV_LOG(ERR, "Wait too long for reset done!\n");
+ 		return ret;
+ 	}
+-	vf->vf_reset = false;
++	iavf_set_no_poll(adapter, false);
+ 
+ 	PMD_DRV_LOG(DEBUG, "Start dev_reset ...\n");
+ 	ret = iavf_dev_uninit(dev);
+@@ -2971,16 +2985,49 @@ iavf_dev_reset(struct rte_eth_dev *dev)
+ 	return iavf_dev_init(dev);
+ }
+ 
++static inline bool
++iavf_is_reset(struct iavf_hw *hw)
++{
++	return !(IAVF_READ_REG(hw, IAVF_VF_ARQLEN1) &
++		IAVF_VF_ARQLEN1_ARQENABLE_MASK);
++}
++
++static bool
++iavf_is_reset_detected(struct iavf_adapter *adapter)
++{
++	struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(adapter);
++	int i;
++
++	/* poll until we see the reset actually happen */
++	for (i = 0; i < IAVF_RESET_DETECTED_CNT; i++) {
++		if (iavf_is_reset(hw))
++			return true;
++		rte_delay_ms(20);
++	}
++
++	return false;
++}
++
+ /*
+  * Handle hardware reset
+  */
+-int
++void
+ iavf_handle_hw_reset(struct rte_eth_dev *dev)
+ {
+ 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
++	struct iavf_adapter *adapter = dev->data->dev_private;
+ 	int ret;
+ 
++	if (!dev->data->dev_started)
++		return;
++
++	if (!iavf_is_reset_detected(adapter)) {
++		PMD_DRV_LOG(DEBUG, "reset not start\n");
++		return;
++	}
++
+ 	vf->in_reset_recovery = true;
++	iavf_set_no_poll(adapter, false);
+ 
+ 	ret = iavf_dev_reset(dev);
+ 	if (ret)
+@@ -2997,15 +3044,26 @@ iavf_handle_hw_reset(struct rte_eth_dev *dev)
+ 	ret = iavf_dev_start(dev);
+ 	if (ret)
+ 		goto error;
+-	dev->data->dev_started = 1;
+ 
+-	vf->in_reset_recovery = false;
+-	return 0;
++	dev->data->dev_started = 1;
++	goto exit;
+ 
+ error:
+ 	PMD_DRV_LOG(DEBUG, "RESET recover with error code=%d\n", ret);
++exit:
+ 	vf->in_reset_recovery = false;
+-	return ret;
++	iavf_set_no_poll(adapter, false);
++
++	return;
++}
++
++void
++iavf_set_no_poll(struct iavf_adapter *adapter, bool link_change)
++{
++	struct iavf_info *vf = &adapter->vf;
++
++	adapter->no_poll = (link_change & !vf->link_up) ||
++		vf->vf_reset || vf->in_reset_recovery;
+ }
+ 
+ static int
+diff --git a/dpdk/drivers/net/iavf/iavf_ipsec_crypto.c b/dpdk/drivers/net/iavf/iavf_ipsec_crypto.c
+index 07a69db540..d6c0180ffd 100644
+--- a/dpdk/drivers/net/iavf/iavf_ipsec_crypto.c
++++ b/dpdk/drivers/net/iavf/iavf_ipsec_crypto.c
+@@ -1518,8 +1518,11 @@ iavf_security_ctx_create(struct iavf_adapter *adapter)
+ 	if (adapter->security_ctx == NULL) {
+ 		adapter->security_ctx = rte_malloc("iavf_security_ctx",
+ 				sizeof(struct iavf_security_ctx), 0);
+-		if (adapter->security_ctx == NULL)
++		if (adapter->security_ctx == NULL) {
++			rte_free(adapter->vf.eth_dev->security_ctx);
++			adapter->vf.eth_dev->security_ctx = NULL;
+ 			return -ENOMEM;
++		}
+ 	}
+ 
+ 	return 0;
+diff --git a/dpdk/drivers/net/iavf/iavf_rxtx_vec_avx2.c b/dpdk/drivers/net/iavf/iavf_rxtx_vec_avx2.c
+index 510b4d8f1c..49d41af953 100644
+--- a/dpdk/drivers/net/iavf/iavf_rxtx_vec_avx2.c
++++ b/dpdk/drivers/net/iavf/iavf_rxtx_vec_avx2.c
+@@ -193,62 +193,30 @@ _iavf_recv_raw_pkts_vec_avx2(struct iavf_rx_queue *rxq,
+ 			 _mm256_loadu_si256((void *)&sw_ring[i + 4]));
+ #endif
+ 
+-		__m256i raw_desc0_1, raw_desc2_3, raw_desc4_5, raw_desc6_7;
+-#ifdef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
+-		/* for AVX we need alignment otherwise loads are not atomic */
+-		if (avx_aligned) {
+-			/* load in descriptors, 2 at a time, in reverse order */
+-			raw_desc6_7 = _mm256_load_si256((void *)(rxdp + 6));
+-			rte_compiler_barrier();
+-			raw_desc4_5 = _mm256_load_si256((void *)(rxdp + 4));
+-			rte_compiler_barrier();
+-			raw_desc2_3 = _mm256_load_si256((void *)(rxdp + 2));
+-			rte_compiler_barrier();
+-			raw_desc0_1 = _mm256_load_si256((void *)(rxdp + 0));
+-		} else
+-#endif
+-		{
+-			const __m128i raw_desc7 =
+-				_mm_load_si128((void *)(rxdp + 7));
+-			rte_compiler_barrier();
+-			const __m128i raw_desc6 =
+-				_mm_load_si128((void *)(rxdp + 6));
+-			rte_compiler_barrier();
+-			const __m128i raw_desc5 =
+-				_mm_load_si128((void *)(rxdp + 5));
+-			rte_compiler_barrier();
+-			const __m128i raw_desc4 =
+-				_mm_load_si128((void *)(rxdp + 4));
+-			rte_compiler_barrier();
+-			const __m128i raw_desc3 =
+-				_mm_load_si128((void *)(rxdp + 3));
+-			rte_compiler_barrier();
+-			const __m128i raw_desc2 =
+-				_mm_load_si128((void *)(rxdp + 2));
+-			rte_compiler_barrier();
+-			const __m128i raw_desc1 =
+-				_mm_load_si128((void *)(rxdp + 1));
+-			rte_compiler_barrier();
+-			const __m128i raw_desc0 =
+-				_mm_load_si128((void *)(rxdp + 0));
+-
+-			raw_desc6_7 =
+-				_mm256_inserti128_si256
+-					(_mm256_castsi128_si256(raw_desc6),
+-					 raw_desc7, 1);
+-			raw_desc4_5 =
+-				_mm256_inserti128_si256
+-					(_mm256_castsi128_si256(raw_desc4),
+-					 raw_desc5, 1);
+-			raw_desc2_3 =
+-				_mm256_inserti128_si256
+-					(_mm256_castsi128_si256(raw_desc2),
+-					 raw_desc3, 1);
+-			raw_desc0_1 =
+-				_mm256_inserti128_si256
+-					(_mm256_castsi128_si256(raw_desc0),
+-					 raw_desc1, 1);
+-		}
++		const __m128i raw_desc7 = _mm_load_si128((void *)(rxdp + 7));
++		rte_compiler_barrier();
++		const __m128i raw_desc6 = _mm_load_si128((void *)(rxdp + 6));
++		rte_compiler_barrier();
++		const __m128i raw_desc5 = _mm_load_si128((void *)(rxdp + 5));
++		rte_compiler_barrier();
++		const __m128i raw_desc4 = _mm_load_si128((void *)(rxdp + 4));
++		rte_compiler_barrier();
++		const __m128i raw_desc3 = _mm_load_si128((void *)(rxdp + 3));
++		rte_compiler_barrier();
++		const __m128i raw_desc2 = _mm_load_si128((void *)(rxdp + 2));
++		rte_compiler_barrier();
++		const __m128i raw_desc1 = _mm_load_si128((void *)(rxdp + 1));
++		rte_compiler_barrier();
++		const __m128i raw_desc0 = _mm_load_si128((void *)(rxdp + 0));
++
++		const __m256i raw_desc6_7 =
++			_mm256_inserti128_si256(_mm256_castsi128_si256(raw_desc6), raw_desc7, 1);
++		const __m256i raw_desc4_5 =
++			_mm256_inserti128_si256(_mm256_castsi128_si256(raw_desc4), raw_desc5, 1);
++		const __m256i raw_desc2_3 =
++			_mm256_inserti128_si256(_mm256_castsi128_si256(raw_desc2), raw_desc3, 1);
++		const __m256i raw_desc0_1 =
++			_mm256_inserti128_si256(_mm256_castsi128_si256(raw_desc0), raw_desc1, 1);
+ 
+ 		if (split_packet) {
+ 			int j;
+diff --git a/dpdk/drivers/net/iavf/iavf_vchnl.c b/dpdk/drivers/net/iavf/iavf_vchnl.c
+index 0a3e1d082c..1111d30f57 100644
+--- a/dpdk/drivers/net/iavf/iavf_vchnl.c
++++ b/dpdk/drivers/net/iavf/iavf_vchnl.c
+@@ -273,20 +273,18 @@ iavf_read_msg_from_pf(struct iavf_adapter *adapter, uint16_t buf_len,
+ 					iavf_dev_watchdog_enable(adapter);
+ 			}
+ 			if (adapter->devargs.no_poll_on_link_down) {
+-				if (vf->link_up && adapter->no_poll) {
+-					adapter->no_poll = false;
+-					PMD_DRV_LOG(DEBUG, "VF no poll turned off");
+-				}
+-				if (!vf->link_up) {
+-					adapter->no_poll = true;
++				iavf_set_no_poll(adapter, true);
++				if (adapter->no_poll)
+ 					PMD_DRV_LOG(DEBUG, "VF no poll turned on");
+-				}
++				else
++					PMD_DRV_LOG(DEBUG, "VF no poll turned off");
+ 			}
+ 			PMD_DRV_LOG(INFO, "Link status update:%s",
+ 					vf->link_up ? "up" : "down");
+ 			break;
+ 		case VIRTCHNL_EVENT_RESET_IMPENDING:
+ 			vf->vf_reset = true;
++			iavf_set_no_poll(adapter, false);
+ 			PMD_DRV_LOG(INFO, "VF is resetting");
+ 			break;
+ 		case VIRTCHNL_EVENT_PF_DRIVER_CLOSE:
+@@ -462,6 +460,7 @@ iavf_handle_pf_event_msg(struct rte_eth_dev *dev, uint8_t *msg,
+ 		vf->link_up = false;
+ 		if (!vf->vf_reset) {
+ 			vf->vf_reset = true;
++			iavf_set_no_poll(adapter, false);
+ 			iavf_dev_event_post(dev, RTE_ETH_EVENT_INTR_RESET,
+ 				NULL, 0);
+ 		}
+@@ -485,14 +484,11 @@ iavf_handle_pf_event_msg(struct rte_eth_dev *dev, uint8_t *msg,
+ 				iavf_dev_watchdog_enable(adapter);
+ 		}
+ 		if (adapter->devargs.no_poll_on_link_down) {
+-			if (vf->link_up && adapter->no_poll) {
+-				adapter->no_poll = false;
+-				PMD_DRV_LOG(DEBUG, "VF no poll turned off");
+-			}
+-			if (!vf->link_up) {
+-				adapter->no_poll = true;
++			iavf_set_no_poll(adapter, true);
++			if (adapter->no_poll)
+ 				PMD_DRV_LOG(DEBUG, "VF no poll turned on");
+-			}
++			else
++				PMD_DRV_LOG(DEBUG, "VF no poll turned off");
+ 		}
+ 		iavf_dev_event_post(dev, RTE_ETH_EVENT_INTR_LSC, NULL, 0);
+ 		break;
+diff --git a/dpdk/drivers/net/ice/base/ice_adminq_cmd.h b/dpdk/drivers/net/ice/base/ice_adminq_cmd.h
+index 844e90bbce..1131379d63 100644
+--- a/dpdk/drivers/net/ice/base/ice_adminq_cmd.h
++++ b/dpdk/drivers/net/ice/base/ice_adminq_cmd.h
+@@ -1728,8 +1728,8 @@ struct ice_aqc_link_topo_addr {
+ #define ICE_AQC_LINK_TOPO_HANDLE_M	(0x3FF << ICE_AQC_LINK_TOPO_HANDLE_S)
+ /* Used to decode the handle field */
+ #define ICE_AQC_LINK_TOPO_HANDLE_BRD_TYPE_M	BIT(9)
+-#define ICE_AQC_LINK_TOPO_HANDLE_BRD_TYPE_LOM	BIT(9)
+-#define ICE_AQC_LINK_TOPO_HANDLE_BRD_TYPE_MEZZ	0
++#define ICE_AQC_LINK_TOPO_HANDLE_BRD_TYPE_LOM	0
++#define ICE_AQC_LINK_TOPO_HANDLE_BRD_TYPE_MEZZ	BIT(9)
+ #define ICE_AQC_LINK_TOPO_HANDLE_NODE_S		0
+ /* In case of a Mezzanine type */
+ #define ICE_AQC_LINK_TOPO_HANDLE_MEZZ_NODE_M	\
+diff --git a/dpdk/drivers/net/ice/base/ice_bitops.h b/dpdk/drivers/net/ice/base/ice_bitops.h
+index 3b71c1b7f5..5c17bcb674 100644
+--- a/dpdk/drivers/net/ice/base/ice_bitops.h
++++ b/dpdk/drivers/net/ice/base/ice_bitops.h
+@@ -418,10 +418,10 @@ ice_bitmap_set(ice_bitmap_t *dst, u16 pos, u16 num_bits)
+  * Note that this function assumes it is operating on a bitmap declared using
+  * ice_declare_bitmap.
+  */
+-static inline int
++static inline u16
+ ice_bitmap_hweight(ice_bitmap_t *bm, u16 size)
+ {
+-	int count = 0;
++	u16 count = 0;
+ 	u16 bit = 0;
+ 
+ 	while (size > (bit = ice_find_next_bit(bm, size, bit))) {
+diff --git a/dpdk/drivers/net/ice/base/ice_common.c b/dpdk/drivers/net/ice/base/ice_common.c
+index 8867279c28..7a50a0f9f0 100644
+--- a/dpdk/drivers/net/ice/base/ice_common.c
++++ b/dpdk/drivers/net/ice/base/ice_common.c
+@@ -3890,8 +3890,10 @@ ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
+ 		break;
+ 	case ICE_FEC_DIS_AUTO:
+ 		/* Set No FEC and auto FEC */
+-		if (!ice_fw_supports_fec_dis_auto(hw))
+-			return ICE_ERR_NOT_SUPPORTED;
++		if (!ice_fw_supports_fec_dis_auto(hw)) {
++			status = ICE_ERR_NOT_SUPPORTED;
++			goto out;
++		}
+ 		cfg->link_fec_opt |= ICE_AQC_PHY_FEC_DIS;
+ 		/* fall-through */
+ 	case ICE_FEC_AUTO:
+@@ -4904,7 +4906,7 @@ ice_read_byte(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
+ 
+ 	ice_memcpy(&dest_byte, src, sizeof(dest_byte), ICE_DMA_TO_NONDMA);
+ 
+-	dest_byte &= ~(mask);
++	dest_byte &= mask;
+ 
+ 	dest_byte >>= shift_width;
+ 
+@@ -4944,7 +4946,7 @@ ice_read_word(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
+ 	/* the data in the memory is stored as little endian so mask it
+ 	 * correctly
+ 	 */
+-	src_word &= ~(CPU_TO_LE16(mask));
++	src_word &= CPU_TO_LE16(mask);
+ 
+ 	/* get the data back into host order before shifting */
+ 	dest_word = LE16_TO_CPU(src_word);
+@@ -4995,7 +4997,7 @@ ice_read_dword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
+ 	/* the data in the memory is stored as little endian so mask it
+ 	 * correctly
+ 	 */
+-	src_dword &= ~(CPU_TO_LE32(mask));
++	src_dword &= CPU_TO_LE32(mask);
+ 
+ 	/* get the data back into host order before shifting */
+ 	dest_dword = LE32_TO_CPU(src_dword);
+@@ -5046,7 +5048,7 @@ ice_read_qword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
+ 	/* the data in the memory is stored as little endian so mask it
+ 	 * correctly
+ 	 */
+-	src_qword &= ~(CPU_TO_LE64(mask));
++	src_qword &= CPU_TO_LE64(mask);
+ 
+ 	/* get the data back into host order before shifting */
+ 	dest_qword = LE64_TO_CPU(src_qword);
+diff --git a/dpdk/drivers/net/ice/base/ice_flex_pipe.c b/dpdk/drivers/net/ice/base/ice_flex_pipe.c
+index f9266447d9..a0e4f5fa27 100644
+--- a/dpdk/drivers/net/ice/base/ice_flex_pipe.c
++++ b/dpdk/drivers/net/ice/base/ice_flex_pipe.c
+@@ -1534,16 +1534,14 @@ ice_write_prof_mask_reg(struct ice_hw *hw, enum ice_block blk, u16 mask_idx,
+ 	switch (blk) {
+ 	case ICE_BLK_RSS:
+ 		offset = GLQF_HMASK(mask_idx);
+-		val = (idx << GLQF_HMASK_MSK_INDEX_S) &
+-			GLQF_HMASK_MSK_INDEX_M;
+-		val |= (mask << GLQF_HMASK_MASK_S) & GLQF_HMASK_MASK_M;
++		val = (idx << GLQF_HMASK_MSK_INDEX_S) & GLQF_HMASK_MSK_INDEX_M;
++		val |= ((u32)mask << GLQF_HMASK_MASK_S) & GLQF_HMASK_MASK_M;
+ 		break;
+ 	case ICE_BLK_FD:
+ 		offset = GLQF_FDMASK(mask_idx);
+ 		val = (idx << GLQF_FDMASK_MSK_INDEX_S) &
+ 			GLQF_FDMASK_MSK_INDEX_M;
+-		val |= (mask << GLQF_FDMASK_MASK_S) &
+-			GLQF_FDMASK_MASK_M;
++		val |= ((u32)mask << GLQF_FDMASK_MASK_S) & GLQF_FDMASK_MASK_M;
+ 		break;
+ 	default:
+ 		ice_debug(hw, ICE_DBG_PKG, "No profile masks for block %d\n",
+diff --git a/dpdk/drivers/net/ice/base/ice_lan_tx_rx.h b/dpdk/drivers/net/ice/base/ice_lan_tx_rx.h
+index d816df0ff6..39673e36f7 100644
+--- a/dpdk/drivers/net/ice/base/ice_lan_tx_rx.h
++++ b/dpdk/drivers/net/ice/base/ice_lan_tx_rx.h
+@@ -1074,10 +1074,9 @@ struct ice_tx_ctx_desc {
+ 	__le64 qw1;
+ };
+ 
+-#define ICE_TX_GSC_DESC_START	0  /* 7 BITS */
+-#define ICE_TX_GSC_DESC_OFFSET	7  /* 4 BITS */
+-#define ICE_TX_GSC_DESC_TYPE	11 /* 2 BITS */
+-#define ICE_TX_GSC_DESC_ENA	13 /* 1 BIT */
++#define ICE_TX_GCS_DESC_START	0  /* 8 BITS */
++#define ICE_TX_GCS_DESC_OFFSET	8  /* 4 BITS */
++#define ICE_TX_GCS_DESC_TYPE	12 /* 3 BITS */
+ 
+ #define ICE_TXD_CTX_QW1_DTYPE_S	0
+ #define ICE_TXD_CTX_QW1_DTYPE_M	(0xFUL << ICE_TXD_CTX_QW1_DTYPE_S)
+diff --git a/dpdk/drivers/net/ice/base/ice_nvm.c b/dpdk/drivers/net/ice/base/ice_nvm.c
+index e46aded12a..6b0794f562 100644
+--- a/dpdk/drivers/net/ice/base/ice_nvm.c
++++ b/dpdk/drivers/net/ice/base/ice_nvm.c
+@@ -72,6 +72,7 @@ ice_read_flat_nvm(struct ice_hw *hw, u32 offset, u32 *length, u8 *data,
+ 	enum ice_status status;
+ 	u32 inlen = *length;
+ 	u32 bytes_read = 0;
++	int retry_cnt = 0;
+ 	bool last_cmd;
+ 
+ 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
+@@ -106,11 +107,24 @@ ice_read_flat_nvm(struct ice_hw *hw, u32 offset, u32 *length, u8 *data,
+ 					 offset, (u16)read_size,
+ 					 data + bytes_read, last_cmd,
+ 					 read_shadow_ram, NULL);
+-		if (status)
+-			break;
+-
+-		bytes_read += read_size;
+-		offset += read_size;
++		if (status) {
++			if (hw->adminq.sq_last_status != ICE_AQ_RC_EBUSY ||
++			    retry_cnt > ICE_SQ_SEND_MAX_EXECUTE)
++				break;
++			ice_debug(hw, ICE_DBG_NVM,
++				  "NVM read EBUSY error, retry %d\n",
++				  retry_cnt + 1);
++			ice_release_nvm(hw);
++			msleep(ICE_SQ_SEND_DELAY_TIME_MS);
++			status = ice_acquire_nvm(hw, ICE_RES_READ);
++			if (status)
++				break;
++			retry_cnt++;
++		} else {
++			bytes_read += read_size;
++			offset += read_size;
++			retry_cnt = 0;
++		}
+ 	} while (!last_cmd);
+ 
+ 	*length = bytes_read;
+@@ -474,7 +488,7 @@ ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len,
+ {
+ 	enum ice_status status;
+ 	u16 pfa_len, pfa_ptr;
+-	u16 next_tlv;
++	u32 next_tlv;
+ 
+ 	status = ice_read_sr_word(hw, ICE_SR_PFA_PTR, &pfa_ptr);
+ 	if (status != ICE_SUCCESS) {
+@@ -490,25 +504,30 @@ ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len,
+ 	 * of TLVs to find the requested one.
+ 	 */
+ 	next_tlv = pfa_ptr + 1;
+-	while (next_tlv < pfa_ptr + pfa_len) {
++	while (next_tlv < ((u32)pfa_ptr + pfa_len)) {
+ 		u16 tlv_sub_module_type;
+ 		u16 tlv_len;
+ 
+ 		/* Read TLV type */
+-		status = ice_read_sr_word(hw, next_tlv, &tlv_sub_module_type);
+-		if (status != ICE_SUCCESS) {
++		status = ice_read_sr_word(hw, (u16)next_tlv,
++					  &tlv_sub_module_type);
++		if (status) {
+ 			ice_debug(hw, ICE_DBG_INIT, "Failed to read TLV type.\n");
+ 			break;
+ 		}
+ 		/* Read TLV length */
+-		status = ice_read_sr_word(hw, next_tlv + 1, &tlv_len);
++		status = ice_read_sr_word(hw, (u16)(next_tlv + 1), &tlv_len);
+ 		if (status != ICE_SUCCESS) {
+ 			ice_debug(hw, ICE_DBG_INIT, "Failed to read TLV length.\n");
+ 			break;
+ 		}
++		if (tlv_len > pfa_len) {
++			ice_debug(hw, ICE_DBG_INIT, "Invalid TLV length.\n");
++			return ICE_ERR_INVAL_SIZE;
++		}
+ 		if (tlv_sub_module_type == module_type) {
+ 			if (tlv_len) {
+-				*module_tlv = next_tlv;
++				*module_tlv = (u16)next_tlv;
+ 				*module_tlv_len = tlv_len;
+ 				return ICE_SUCCESS;
+ 			}
+@@ -749,7 +768,7 @@ ice_get_orom_civd_data(struct ice_hw *hw, enum ice_bank_select bank,
+ 				       orom_data, hw->flash.banks.orom_size);
+ 	if (status) {
+ 		ice_debug(hw, ICE_DBG_NVM, "Unable to read Option ROM data\n");
+-		return status;
++		goto exit_error;;
+ 	}
+ 
+ 	/* Scan the memory buffer to locate the CIVD data section */
+@@ -773,7 +792,8 @@ ice_get_orom_civd_data(struct ice_hw *hw, enum ice_bank_select bank,
+ 		if (sum) {
+ 			ice_debug(hw, ICE_DBG_NVM, "Found CIVD data with invalid checksum of %u\n",
+ 				  sum);
+-			goto err_invalid_checksum;
++			status = ICE_ERR_NVM;
++			goto exit_error;
+ 		}
+ 
+ 		*civd = *tmp;
+@@ -781,11 +801,12 @@ ice_get_orom_civd_data(struct ice_hw *hw, enum ice_bank_select bank,
+ 		return ICE_SUCCESS;
+ 	}
+ 
++	status = ICE_ERR_NVM;
+ 	ice_debug(hw, ICE_DBG_NVM, "Unable to locate CIVD data within the Option ROM\n");
+ 
+-err_invalid_checksum:
++exit_error:
+ 	ice_free(hw, orom_data);
+-	return ICE_ERR_NVM;
++	return status;
+ }
+ 
+ /**
+diff --git a/dpdk/drivers/net/ice/base/ice_ptp_hw.c b/dpdk/drivers/net/ice/base/ice_ptp_hw.c
+index 548ef5e820..c507f211df 100644
+--- a/dpdk/drivers/net/ice/base/ice_ptp_hw.c
++++ b/dpdk/drivers/net/ice/base/ice_ptp_hw.c
+@@ -2817,8 +2817,8 @@ ice_ptp_one_port_cmd_e822(struct ice_hw *hw, u8 port, enum ice_ptp_tmr_cmd cmd,
+ 	val &= ~TS_CMD_MASK;
+ 	val |= cmd_val;
+ 
+-	status = ice_write_phy_reg_e822_lp(hw, port, P_REG_TX_TMR_CMD, val,
+-					   lock_sbq);
++	status = ice_write_phy_reg_e822_lp(hw, port, P_REG_TX_TMR_CMD,
++					val | TS_CMD_RX_TYPE, lock_sbq);
+ 	if (status) {
+ 		ice_debug(hw, ICE_DBG_PTP, "Failed to write back TX_TMR_CMD, status %d\n",
+ 			  status);
+diff --git a/dpdk/drivers/net/ice/base/ice_ptp_hw.h b/dpdk/drivers/net/ice/base/ice_ptp_hw.h
+index 3667c9882d..f53b9e3ecc 100644
+--- a/dpdk/drivers/net/ice/base/ice_ptp_hw.h
++++ b/dpdk/drivers/net/ice/base/ice_ptp_hw.h
+@@ -295,6 +295,8 @@ enum ice_status ice_ptp_init_phy_cfg(struct ice_hw *hw);
+ #define TS_CMD_MASK_E810		0xFF
+ #define TS_CMD_MASK			0xF
+ #define SYNC_EXEC_CMD			0x3
++#define TS_CMD_RX_TYPE_S		0x4
++#define TS_CMD_RX_TYPE			MAKEMASK(0x18, TS_CMD_RX_TYPE_S)
+ 
+ /* Macros to derive port low and high addresses on both quads */
+ #define P_Q0_L(a, p) ((((a) + (0x2000 * (p)))) & 0xFFFF)
+diff --git a/dpdk/drivers/net/ice/base/ice_sched.c b/dpdk/drivers/net/ice/base/ice_sched.c
+index a4d31647fe..21cfe53a6d 100644
+--- a/dpdk/drivers/net/ice/base/ice_sched.c
++++ b/dpdk/drivers/net/ice/base/ice_sched.c
+@@ -28,9 +28,8 @@ ice_sched_add_root_node(struct ice_port_info *pi,
+ 	if (!root)
+ 		return ICE_ERR_NO_MEMORY;
+ 
+-	/* coverity[suspicious_sizeof] */
+ 	root->children = (struct ice_sched_node **)
+-		ice_calloc(hw, hw->max_children[0], sizeof(*root));
++		ice_calloc(hw, hw->max_children[0], sizeof(*root->children));
+ 	if (!root->children) {
+ 		ice_free(hw, root);
+ 		return ICE_ERR_NO_MEMORY;
+@@ -186,9 +185,9 @@ ice_sched_add_node(struct ice_port_info *pi, u8 layer,
+ 	if (!node)
+ 		return ICE_ERR_NO_MEMORY;
+ 	if (hw->max_children[layer]) {
+-		/* coverity[suspicious_sizeof] */
+ 		node->children = (struct ice_sched_node **)
+-			ice_calloc(hw, hw->max_children[layer], sizeof(*node));
++			ice_calloc(hw, hw->max_children[layer],
++				   sizeof(*node->children));
+ 		if (!node->children) {
+ 			ice_free(hw, node);
+ 			return ICE_ERR_NO_MEMORY;
+@@ -1069,11 +1068,11 @@ ice_sched_add_nodes_to_layer(struct ice_port_info *pi,
+ 	u32 *first_teid_ptr = first_node_teid;
+ 	u16 new_num_nodes = num_nodes;
+ 	enum ice_status status = ICE_SUCCESS;
++	u32 temp;
+ 
+ 	*num_nodes_added = 0;
+ 	while (*num_nodes_added < num_nodes) {
+ 		u16 max_child_nodes, num_added = 0;
+-		u32 temp;
+ 
+ 		status = ice_sched_add_nodes_to_hw_layer(pi, tc_node, parent,
+ 							 layer,	new_num_nodes,
+diff --git a/dpdk/drivers/net/ice/base/ice_switch.c b/dpdk/drivers/net/ice/base/ice_switch.c
+index f7fcc3a8d4..c4fd07199e 100644
+--- a/dpdk/drivers/net/ice/base/ice_switch.c
++++ b/dpdk/drivers/net/ice/base/ice_switch.c
+@@ -4603,7 +4603,7 @@ ice_add_update_vsi_list(struct ice_hw *hw,
+ 		u16 vsi_handle_arr[2];
+ 
+ 		/* A rule already exists with the new VSI being added */
+-		if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id)
++		if (cur_fltr->vsi_handle == new_fltr->vsi_handle)
+ 			return ICE_ERR_ALREADY_EXISTS;
+ 
+ 		vsi_handle_arr[0] = cur_fltr->vsi_handle;
+@@ -4651,7 +4651,7 @@ ice_add_update_vsi_list(struct ice_hw *hw,
+ 
+ 		/* A rule already exists with the new VSI being added */
+ 		if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
+-			return ICE_SUCCESS;
++			return ICE_ERR_ALREADY_EXISTS;
+ 
+ 		/* Update the previously created VSI list set with
+ 		 * the new VSI ID passed in
+@@ -7390,7 +7390,7 @@ ice_find_free_recp_res_idx(struct ice_hw *hw, const ice_bitmap_t *profiles,
+ 	ice_xor_bitmap(free_idx, used_idx, possible_idx, ICE_MAX_FV_WORDS);
+ 
+ 	/* return number of free indexes */
+-	return (u16)ice_bitmap_hweight(free_idx, ICE_MAX_FV_WORDS);
++	return ice_bitmap_hweight(free_idx, ICE_MAX_FV_WORDS);
+ }
+ 
+ static void ice_set_recipe_index(unsigned long idx, u8 *bitmap)
+@@ -8101,6 +8101,7 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
+ 	enum ice_status status = ICE_SUCCESS;
+ 	struct ice_sw_recipe *rm;
+ 	u8 i;
++	u16 cnt;
+ 
+ 	if (!ice_is_prof_rule(rinfo->tun_type) && !lkups_cnt)
+ 		return ICE_ERR_PARAM;
+diff --git a/dpdk/drivers/net/ice/ice_ethdev.c b/dpdk/drivers/net/ice/ice_ethdev.c
+index 3ccba4db80..c1d2b91ad7 100644
+--- a/dpdk/drivers/net/ice/ice_ethdev.c
++++ b/dpdk/drivers/net/ice/ice_ethdev.c
+@@ -1804,6 +1804,7 @@ ice_pf_setup(struct ice_pf *pf)
+ 	}
+ 
+ 	pf->main_vsi = vsi;
++	rte_spinlock_init(&pf->link_lock);
+ 
+ 	return 0;
+ }
+@@ -3621,17 +3622,31 @@ ice_rxq_intr_setup(struct rte_eth_dev *dev)
+ 	return 0;
+ }
+ 
++static enum ice_status
++ice_get_link_info_safe(struct ice_pf *pf, bool ena_lse,
++		       struct ice_link_status *link)
++{
++	struct ice_hw *hw = ICE_PF_TO_HW(pf);
++	int ret;
++
++	rte_spinlock_lock(&pf->link_lock);
++
++	ret = ice_aq_get_link_info(hw->port_info, ena_lse, link, NULL);
++
++	rte_spinlock_unlock(&pf->link_lock);
++
++	return ret;
++}
++
+ static void
+ ice_get_init_link_status(struct rte_eth_dev *dev)
+ {
+-	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ 	bool enable_lse = dev->data->dev_conf.intr_conf.lsc ? true : false;
+ 	struct ice_link_status link_status;
+ 	int ret;
+ 
+-	ret = ice_aq_get_link_info(hw->port_info, enable_lse,
+-				   &link_status, NULL);
++	ret = ice_get_link_info_safe(pf, enable_lse, &link_status);
+ 	if (ret != ICE_SUCCESS) {
+ 		PMD_DRV_LOG(ERR, "Failed to get link info");
+ 		pf->init_link_up = false;
+@@ -3735,7 +3750,10 @@ ice_dev_start(struct rte_eth_dev *dev)
+ 	ice_set_tx_function(dev);
+ 
+ 	mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK |
+-			RTE_ETH_VLAN_EXTEND_MASK | RTE_ETH_QINQ_STRIP_MASK;
++			RTE_ETH_VLAN_EXTEND_MASK;
++	if (ice_is_dvm_ena(hw))
++		mask |= RTE_ETH_QINQ_STRIP_MASK;
++
+ 	ret = ice_vlan_offload_set(dev, mask);
+ 	if (ret) {
+ 		PMD_INIT_LOG(ERR, "Unable to set VLAN offload");
+@@ -3876,7 +3894,11 @@ ice_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
+ 			RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+ 			RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
+ 			RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+-			RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM;
++			RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM |
++			RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
++			RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
++			RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO |
++			RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO;
+ 		dev_info->flow_type_rss_offloads |= ICE_RSS_OFFLOAD_ALL;
+ 	}
+ 
+@@ -3996,7 +4018,7 @@ ice_link_update(struct rte_eth_dev *dev, int wait_to_complete)
+ {
+ #define CHECK_INTERVAL 50  /* 50ms */
+ #define MAX_REPEAT_TIME 40  /* 2s (40 * 50ms) in total */
+-	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
++	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ 	struct ice_link_status link_status;
+ 	struct rte_eth_link link, old;
+ 	int status;
+@@ -4010,8 +4032,7 @@ ice_link_update(struct rte_eth_dev *dev, int wait_to_complete)
+ 
+ 	do {
+ 		/* Get link status information from hardware */
+-		status = ice_aq_get_link_info(hw->port_info, enable_lse,
+-					      &link_status, NULL);
++		status = ice_get_link_info_safe(pf, enable_lse, &link_status);
+ 		if (status != ICE_SUCCESS) {
+ 			link.link_speed = RTE_ETH_SPEED_NUM_100M;
+ 			link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
+@@ -4802,19 +4823,35 @@ ice_vlan_offload_set(struct rte_eth_dev *dev, int mask)
+ 			ice_vsi_config_vlan_filter(vsi, false);
+ 	}
+ 
+-	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
+-		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
+-			ice_vsi_config_vlan_stripping(vsi, true);
+-		else
+-			ice_vsi_config_vlan_stripping(vsi, false);
+-	}
++	struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
++	if (!ice_is_dvm_ena(hw)) {
++		if (mask & RTE_ETH_VLAN_STRIP_MASK) {
++			if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
++				ice_vsi_config_vlan_stripping(vsi, true);
++			else
++				ice_vsi_config_vlan_stripping(vsi, false);
++		}
+ 
+-	if (mask & RTE_ETH_QINQ_STRIP_MASK) {
+-		/* Enable or disable outer VLAN stripping */
+-		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP)
+-			ice_vsi_config_outer_vlan_stripping(vsi, true);
+-		else
+-			ice_vsi_config_outer_vlan_stripping(vsi, false);
++		if (mask & RTE_ETH_QINQ_STRIP_MASK) {
++			PMD_DRV_LOG(ERR, "Single VLAN mode (SVM) does not support qinq");
++			return -ENOTSUP;
++		}
++	} else {
++		if ((mask & RTE_ETH_VLAN_STRIP_MASK) |
++				(mask & RTE_ETH_QINQ_STRIP_MASK)) {
++			if (rxmode->offloads & (RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
++						RTE_ETH_RX_OFFLOAD_QINQ_STRIP))
++				ice_vsi_config_outer_vlan_stripping(vsi, true);
++			else
++				ice_vsi_config_outer_vlan_stripping(vsi, false);
++		}
++
++		if (mask & RTE_ETH_QINQ_STRIP_MASK) {
++			if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP)
++				ice_vsi_config_vlan_stripping(vsi, true);
++			else
++				ice_vsi_config_vlan_stripping(vsi, false);
++		}
+ 	}
+ 
+ 	return 0;
+diff --git a/dpdk/drivers/net/ice/ice_ethdev.h b/dpdk/drivers/net/ice/ice_ethdev.h
+index abe6dcdc23..d607f028e0 100644
+--- a/dpdk/drivers/net/ice/ice_ethdev.h
++++ b/dpdk/drivers/net/ice/ice_ethdev.h
+@@ -548,6 +548,10 @@ struct ice_pf {
+ 	uint64_t rss_hf;
+ 	struct ice_tm_conf tm_conf;
+ 	uint16_t outer_ethertype;
++	/* lock prevent race condition between lsc interrupt handler
++	 * and link status update during dev_start.
++	 */
++	rte_spinlock_t link_lock;
+ };
+ 
+ #define ICE_MAX_QUEUE_NUM  2048
+diff --git a/dpdk/drivers/net/ice/ice_hash.c b/dpdk/drivers/net/ice/ice_hash.c
+index f923641533..d8c46347d2 100644
+--- a/dpdk/drivers/net/ice/ice_hash.c
++++ b/dpdk/drivers/net/ice/ice_hash.c
+@@ -650,10 +650,10 @@ ice_hash_parse_raw_pattern(struct ice_adapter *ad,
+ 	uint8_t *pkt_buf, *msk_buf;
+ 	uint8_t tmp_val = 0;
+ 	uint8_t tmp_c = 0;
+-	int i, j;
++	int i, j, ret = 0;
+ 
+ 	if (ad->psr == NULL)
+-		return -rte_errno;
++		return -ENOTSUP;
+ 
+ 	raw_spec = item->spec;
+ 	raw_mask = item->mask;
+@@ -670,8 +670,10 @@ ice_hash_parse_raw_pattern(struct ice_adapter *ad,
+ 		return -ENOMEM;
+ 
+ 	msk_buf = rte_zmalloc(NULL, pkt_len, 0);
+-	if (!msk_buf)
++	if (!msk_buf) {
++		rte_free(pkt_buf);
+ 		return -ENOMEM;
++	}
+ 
+ 	/* convert string to int array */
+ 	for (i = 0, j = 0; i < spec_len; i += 2, j++) {
+@@ -708,18 +710,22 @@ ice_hash_parse_raw_pattern(struct ice_adapter *ad,
+ 			msk_buf[j] = tmp_val * 16 + tmp_c - '0';
+ 	}
+ 
+-	if (ice_parser_run(ad->psr, pkt_buf, pkt_len, &rslt))
+-		return -rte_errno;
++	ret = ice_parser_run(ad->psr, pkt_buf, pkt_len, &rslt);
++	if (ret)
++		goto free_mem;
+ 
+-	if (ice_parser_profile_init(&rslt, pkt_buf, msk_buf,
+-		pkt_len, ICE_BLK_RSS, true, &prof))
+-		return -rte_errno;
++	ret = ice_parser_profile_init(&rslt, pkt_buf, msk_buf,
++			pkt_len, ICE_BLK_RSS, true, &prof);
++	if (ret)
++		goto free_mem;
+ 
+ 	rte_memcpy(&meta->raw.prof, &prof, sizeof(prof));
+ 
++free_mem:
+ 	rte_free(pkt_buf);
+ 	rte_free(msk_buf);
+-	return 0;
++
++	return ret;
+ }
+ 
+ static void
+diff --git a/dpdk/drivers/net/ice/ice_rxtx.c b/dpdk/drivers/net/ice/ice_rxtx.c
+index 73e47ae92d..dea6a5b535 100644
+--- a/dpdk/drivers/net/ice/ice_rxtx.c
++++ b/dpdk/drivers/net/ice/ice_rxtx.c
+@@ -2734,9 +2734,9 @@ ice_parse_tunneling_params(uint64_t ol_flags,
+ 	 * Calculate the tunneling UDP checksum.
+ 	 * Shall be set only if L4TUNT = 01b and EIPT is not zero
+ 	 */
+-	if (!(*cd_tunneling & ICE_TX_CTX_EIPT_NONE) &&
+-		(*cd_tunneling & ICE_TXD_CTX_UDP_TUNNELING) &&
+-		(ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM))
++	if ((*cd_tunneling & ICE_TXD_CTX_QW0_EIPT_M) &&
++			(*cd_tunneling & ICE_TXD_CTX_UDP_TUNNELING) &&
++			(ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM))
+ 		*cd_tunneling |= ICE_TXD_CTX_QW0_L4T_CS_M;
+ }
+ 
+diff --git a/dpdk/drivers/net/ice/ice_rxtx_vec_avx2.c b/dpdk/drivers/net/ice/ice_rxtx_vec_avx2.c
+index 6f6d790967..d6e88dbb29 100644
+--- a/dpdk/drivers/net/ice/ice_rxtx_vec_avx2.c
++++ b/dpdk/drivers/net/ice/ice_rxtx_vec_avx2.c
+@@ -254,62 +254,30 @@ _ice_recv_raw_pkts_vec_avx2(struct ice_rx_queue *rxq, struct rte_mbuf **rx_pkts,
+ 			 _mm256_loadu_si256((void *)&sw_ring[i + 4]));
+ #endif
+ 
+-		__m256i raw_desc0_1, raw_desc2_3, raw_desc4_5, raw_desc6_7;
+-#ifdef RTE_LIBRTE_ICE_16BYTE_RX_DESC
+-		/* for AVX we need alignment otherwise loads are not atomic */
+-		if (avx_aligned) {
+-			/* load in descriptors, 2 at a time, in reverse order */
+-			raw_desc6_7 = _mm256_load_si256((void *)(rxdp + 6));
+-			rte_compiler_barrier();
+-			raw_desc4_5 = _mm256_load_si256((void *)(rxdp + 4));
+-			rte_compiler_barrier();
+-			raw_desc2_3 = _mm256_load_si256((void *)(rxdp + 2));
+-			rte_compiler_barrier();
+-			raw_desc0_1 = _mm256_load_si256((void *)(rxdp + 0));
+-		} else
+-#endif
+-		{
+-			const __m128i raw_desc7 =
+-				_mm_load_si128((void *)(rxdp + 7));
+-			rte_compiler_barrier();
+-			const __m128i raw_desc6 =
+-				_mm_load_si128((void *)(rxdp + 6));
+-			rte_compiler_barrier();
+-			const __m128i raw_desc5 =
+-				_mm_load_si128((void *)(rxdp + 5));
+-			rte_compiler_barrier();
+-			const __m128i raw_desc4 =
+-				_mm_load_si128((void *)(rxdp + 4));
+-			rte_compiler_barrier();
+-			const __m128i raw_desc3 =
+-				_mm_load_si128((void *)(rxdp + 3));
+-			rte_compiler_barrier();
+-			const __m128i raw_desc2 =
+-				_mm_load_si128((void *)(rxdp + 2));
+-			rte_compiler_barrier();
+-			const __m128i raw_desc1 =
+-				_mm_load_si128((void *)(rxdp + 1));
+-			rte_compiler_barrier();
+-			const __m128i raw_desc0 =
+-				_mm_load_si128((void *)(rxdp + 0));
+-
+-			raw_desc6_7 =
+-				_mm256_inserti128_si256
+-					(_mm256_castsi128_si256(raw_desc6),
+-					 raw_desc7, 1);
+-			raw_desc4_5 =
+-				_mm256_inserti128_si256
+-					(_mm256_castsi128_si256(raw_desc4),
+-					 raw_desc5, 1);
+-			raw_desc2_3 =
+-				_mm256_inserti128_si256
+-					(_mm256_castsi128_si256(raw_desc2),
+-					 raw_desc3, 1);
+-			raw_desc0_1 =
+-				_mm256_inserti128_si256
+-					(_mm256_castsi128_si256(raw_desc0),
+-					 raw_desc1, 1);
+-		}
++		const __m128i raw_desc7 = _mm_load_si128((void *)(rxdp + 7));
++		rte_compiler_barrier();
++		const __m128i raw_desc6 = _mm_load_si128((void *)(rxdp + 6));
++		rte_compiler_barrier();
++		const __m128i raw_desc5 = _mm_load_si128((void *)(rxdp + 5));
++		rte_compiler_barrier();
++		const __m128i raw_desc4 = _mm_load_si128((void *)(rxdp + 4));
++		rte_compiler_barrier();
++		const __m128i raw_desc3 = _mm_load_si128((void *)(rxdp + 3));
++		rte_compiler_barrier();
++		const __m128i raw_desc2 = _mm_load_si128((void *)(rxdp + 2));
++		rte_compiler_barrier();
++		const __m128i raw_desc1 = _mm_load_si128((void *)(rxdp + 1));
++		rte_compiler_barrier();
++		const __m128i raw_desc0 = _mm_load_si128((void *)(rxdp + 0));
++
++		const __m256i raw_desc6_7 =
++			_mm256_inserti128_si256(_mm256_castsi128_si256(raw_desc6), raw_desc7, 1);
++		const __m256i raw_desc4_5 =
++			_mm256_inserti128_si256(_mm256_castsi128_si256(raw_desc4), raw_desc5, 1);
++		const __m256i raw_desc2_3 =
++			_mm256_inserti128_si256(_mm256_castsi128_si256(raw_desc2), raw_desc3, 1);
++		const __m256i raw_desc0_1 =
++			_mm256_inserti128_si256(_mm256_castsi128_si256(raw_desc0), raw_desc1, 1);
+ 
+ 		if (split_packet) {
+ 			int j;
+diff --git a/dpdk/drivers/net/ice/ice_rxtx_vec_common.h b/dpdk/drivers/net/ice/ice_rxtx_vec_common.h
+index 55840cf170..4b73465af5 100644
+--- a/dpdk/drivers/net/ice/ice_rxtx_vec_common.h
++++ b/dpdk/drivers/net/ice/ice_rxtx_vec_common.h
+@@ -251,6 +251,10 @@ ice_rxq_vec_setup_default(struct ice_rx_queue *rxq)
+ 		RTE_ETH_TX_OFFLOAD_MULTI_SEGS |		\
+ 		RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |	\
+ 		RTE_ETH_TX_OFFLOAD_TCP_TSO |	\
++		RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |    \
++		RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |    \
++		RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO |    \
++		RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |    \
+ 		RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM)
+ 
+ #define ICE_TX_VECTOR_OFFLOAD (				\
+diff --git a/dpdk/drivers/net/ice/ice_tm.c b/dpdk/drivers/net/ice/ice_tm.c
+index f5ea47ae83..65b9fdf320 100644
+--- a/dpdk/drivers/net/ice/ice_tm.c
++++ b/dpdk/drivers/net/ice/ice_tm.c
+@@ -58,8 +58,15 @@ void
+ ice_tm_conf_uninit(struct rte_eth_dev *dev)
+ {
+ 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
++	struct ice_tm_shaper_profile *shaper_profile;
+ 	struct ice_tm_node *tm_node;
+ 
++	/* clear profile */
++	while ((shaper_profile = TAILQ_FIRST(&pf->tm_conf.shaper_profile_list))) {
++		TAILQ_REMOVE(&pf->tm_conf.shaper_profile_list, shaper_profile, node);
++		rte_free(shaper_profile);
++	}
++
+ 	/* clear node configuration */
+ 	while ((tm_node = TAILQ_FIRST(&pf->tm_conf.queue_list))) {
+ 		TAILQ_REMOVE(&pf->tm_conf.queue_list, tm_node, node);
+@@ -648,6 +655,8 @@ static int ice_move_recfg_lan_txq(struct rte_eth_dev *dev,
+ 	uint16_t buf_size = ice_struct_size(buf, txqs, 1);
+ 
+ 	buf = (struct ice_aqc_move_txqs_data *)ice_malloc(hw, sizeof(*buf));
++	if (buf == NULL)
++		return -ENOMEM;
+ 
+ 	queue_parent_node = queue_sched_node->parent;
+ 	buf->src_teid = queue_parent_node->info.node_teid;
+@@ -659,6 +668,7 @@ static int ice_move_recfg_lan_txq(struct rte_eth_dev *dev,
+ 					NULL, buf, buf_size, &txqs_moved, NULL);
+ 	if (ret || txqs_moved == 0) {
+ 		PMD_DRV_LOG(ERR, "move lan queue %u failed", queue_id);
++		rte_free(buf);
+ 		return ICE_ERR_PARAM;
+ 	}
+ 
+@@ -668,12 +678,14 @@ static int ice_move_recfg_lan_txq(struct rte_eth_dev *dev,
+ 	} else {
+ 		PMD_DRV_LOG(ERR, "invalid children number %d for queue %u",
+ 			    queue_parent_node->num_children, queue_id);
++		rte_free(buf);
+ 		return ICE_ERR_PARAM;
+ 	}
+ 	dst_node->children[dst_node->num_children++] = queue_sched_node;
+ 	queue_sched_node->parent = dst_node;
+ 	ice_sched_query_elem(hw, queue_sched_node->info.node_teid, &queue_sched_node->info);
+ 
++	rte_free(buf);
+ 	return ret;
+ }
+ 
+diff --git a/dpdk/drivers/net/igc/igc_ethdev.c b/dpdk/drivers/net/igc/igc_ethdev.c
+index 58c4f80927..690736b6d1 100644
+--- a/dpdk/drivers/net/igc/igc_ethdev.c
++++ b/dpdk/drivers/net/igc/igc_ethdev.c
+@@ -2853,7 +2853,7 @@ eth_igc_timesync_disable(struct rte_eth_dev *dev)
+ 	IGC_WRITE_REG(hw, IGC_TSYNCRXCTL, 0);
+ 
+ 	val = IGC_READ_REG(hw, IGC_RXPBS);
+-	val &= IGC_RXPBS_CFG_TS_EN;
++	val &= ~IGC_RXPBS_CFG_TS_EN;
+ 	IGC_WRITE_REG(hw, IGC_RXPBS, val);
+ 
+ 	val = IGC_READ_REG(hw, IGC_SRRCTL(0));
+diff --git a/dpdk/drivers/net/ionic/ionic_ethdev.c b/dpdk/drivers/net/ionic/ionic_ethdev.c
+index 340fd0cd59..4ec9598b8e 100644
+--- a/dpdk/drivers/net/ionic/ionic_ethdev.c
++++ b/dpdk/drivers/net/ionic/ionic_ethdev.c
+@@ -561,7 +561,7 @@ ionic_dev_rss_reta_query(struct rte_eth_dev *eth_dev,
+ 	struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev);
+ 	struct ionic_adapter *adapter = lif->adapter;
+ 	struct ionic_identity *ident = &adapter->ident;
+-	int i, num;
++	int i, j, num;
+ 	uint16_t tbl_sz = rte_le_to_cpu_16(ident->lif.eth.rss_ind_tbl_sz);
+ 
+ 	IONIC_PRINT_CALL();
+@@ -582,9 +582,10 @@ ionic_dev_rss_reta_query(struct rte_eth_dev *eth_dev,
+ 	num = reta_size / RTE_ETH_RETA_GROUP_SIZE;
+ 
+ 	for (i = 0; i < num; i++) {
+-		memcpy(reta_conf->reta,
+-			&lif->rss_ind_tbl[i * RTE_ETH_RETA_GROUP_SIZE],
+-			RTE_ETH_RETA_GROUP_SIZE);
++		for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++) {
++			reta_conf->reta[j] =
++				lif->rss_ind_tbl[(i * RTE_ETH_RETA_GROUP_SIZE) + j];
++		}
+ 		reta_conf++;
+ 	}
+ 
+@@ -969,19 +970,21 @@ ionic_dev_close(struct rte_eth_dev *eth_dev)
+ 
+ 	ionic_lif_stop(lif);
+ 
+-	ionic_lif_free_queues(lif);
+-
+ 	IONIC_PRINT(NOTICE, "Removing device %s", eth_dev->device->name);
+ 	if (adapter->intf->unconfigure_intr)
+ 		(*adapter->intf->unconfigure_intr)(adapter);
+ 
+-	rte_eth_dev_destroy(eth_dev, eth_ionic_dev_uninit);
+-
+ 	ionic_port_reset(adapter);
+ 	ionic_reset(adapter);
++
++	ionic_lif_free_queues(lif);
++	ionic_lif_deinit(lif);
++	ionic_lif_free(lif); /* Does not free LIF object */
++
+ 	if (adapter->intf->unmap_bars)
+ 		(*adapter->intf->unmap_bars)(adapter);
+ 
++	lif->adapter = NULL;
+ 	rte_free(adapter);
+ 
+ 	return 0;
+@@ -1058,21 +1061,18 @@ err:
+ static int
+ eth_ionic_dev_uninit(struct rte_eth_dev *eth_dev)
+ {
+-	struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev);
+-	struct ionic_adapter *adapter = lif->adapter;
+-
+ 	IONIC_PRINT_CALL();
+ 
+ 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ 		return 0;
+ 
+-	adapter->lif = NULL;
+-
+-	ionic_lif_deinit(lif);
+-	ionic_lif_free(lif);
++	if (eth_dev->state != RTE_ETH_DEV_UNUSED)
++		ionic_dev_close(eth_dev);
+ 
+-	if (!(lif->state & IONIC_LIF_F_FW_RESET))
+-		ionic_lif_reset(lif);
++	eth_dev->dev_ops = NULL;
++	eth_dev->rx_pkt_burst = NULL;
++	eth_dev->tx_pkt_burst = NULL;
++	eth_dev->tx_pkt_prepare = NULL;
+ 
+ 	return 0;
+ }
+@@ -1227,17 +1227,18 @@ eth_ionic_dev_remove(struct rte_device *rte_dev)
+ {
+ 	char name[RTE_ETH_NAME_MAX_LEN];
+ 	struct rte_eth_dev *eth_dev;
++	int ret = 0;
+ 
+ 	/* Adapter lookup is using the eth_dev name */
+ 	snprintf(name, sizeof(name), "%s_lif", rte_dev->name);
+ 
+ 	eth_dev = rte_eth_dev_allocated(name);
+ 	if (eth_dev)
+-		ionic_dev_close(eth_dev);
++		ret = rte_eth_dev_destroy(eth_dev, eth_ionic_dev_uninit);
+ 	else
+ 		IONIC_PRINT(DEBUG, "Cannot find device %s", rte_dev->name);
+ 
+-	return 0;
++	return ret;
+ }
+ 
+ RTE_LOG_REGISTER_DEFAULT(ionic_logtype, NOTICE);
+diff --git a/dpdk/drivers/net/ionic/ionic_rxtx.c b/dpdk/drivers/net/ionic/ionic_rxtx.c
+index b9e73b4871..170d3b0802 100644
+--- a/dpdk/drivers/net/ionic/ionic_rxtx.c
++++ b/dpdk/drivers/net/ionic/ionic_rxtx.c
+@@ -26,38 +26,40 @@
+ #include "ionic_logs.h"
+ 
+ static void
+-ionic_empty_array(void **array, uint32_t cnt, uint16_t idx)
++ionic_empty_array(void **array, uint32_t free_idx, uint32_t zero_idx)
+ {
+ 	uint32_t i;
+ 
+-	for (i = idx; i < cnt; i++)
++	for (i = 0; i < free_idx; i++)
+ 		if (array[i])
+ 			rte_pktmbuf_free_seg(array[i]);
+ 
+-	memset(array, 0, sizeof(void *) * cnt);
++	memset(array, 0, sizeof(void *) * zero_idx);
+ }
+ 
+ static void __rte_cold
+ ionic_tx_empty(struct ionic_tx_qcq *txq)
+ {
+ 	struct ionic_queue *q = &txq->qcq.q;
++	uint32_t info_len = q->num_descs * q->num_segs;
+ 
+-	ionic_empty_array(q->info, q->num_descs * q->num_segs, 0);
++	ionic_empty_array(q->info, info_len, info_len);
+ }
+ 
+ static void __rte_cold
+ ionic_rx_empty(struct ionic_rx_qcq *rxq)
+ {
+ 	struct ionic_queue *q = &rxq->qcq.q;
++	uint32_t info_len = q->num_descs * q->num_segs;
+ 
+ 	/*
+ 	 * Walk the full info array so that the clean up includes any
+ 	 * fragments that were left dangling for later reuse
+ 	 */
+-	ionic_empty_array(q->info, q->num_descs * q->num_segs, 0);
++	ionic_empty_array(q->info, info_len, info_len);
+ 
+-	ionic_empty_array((void **)rxq->mbs,
+-			IONIC_MBUF_BULK_ALLOC, rxq->mb_idx);
++	ionic_empty_array((void **)rxq->mbs, rxq->mb_idx,
++			IONIC_MBUF_BULK_ALLOC);
+ 	rxq->mb_idx = 0;
+ }
+ 
+@@ -752,7 +754,7 @@ ionic_dev_rx_descriptor_status(void *rx_queue, uint16_t offset)
+ {
+ 	struct ionic_rx_qcq *rxq = rx_queue;
+ 	struct ionic_qcq *qcq = &rxq->qcq;
+-	struct ionic_rxq_comp *cq_desc;
++	volatile struct ionic_rxq_comp *cq_desc;
+ 	uint16_t mask, head, tail, pos;
+ 	bool done_color;
+ 
+@@ -791,7 +793,7 @@ ionic_dev_tx_descriptor_status(void *tx_queue, uint16_t offset)
+ {
+ 	struct ionic_tx_qcq *txq = tx_queue;
+ 	struct ionic_qcq *qcq = &txq->qcq;
+-	struct ionic_txq_comp *cq_desc;
++	volatile struct ionic_txq_comp *cq_desc;
+ 	uint16_t mask, head, tail, pos, cq_pos;
+ 	bool done_color;
+ 
+diff --git a/dpdk/drivers/net/ionic/ionic_rxtx_sg.c b/dpdk/drivers/net/ionic/ionic_rxtx_sg.c
+index ab8e56e91c..241b6f8587 100644
+--- a/dpdk/drivers/net/ionic/ionic_rxtx_sg.c
++++ b/dpdk/drivers/net/ionic/ionic_rxtx_sg.c
+@@ -27,7 +27,8 @@ ionic_tx_flush_sg(struct ionic_tx_qcq *txq)
+ 	struct ionic_cq *cq = &txq->qcq.cq;
+ 	struct ionic_queue *q = &txq->qcq.q;
+ 	struct rte_mbuf *txm;
+-	struct ionic_txq_comp *cq_desc, *cq_desc_base = cq->base;
++	struct ionic_txq_comp *cq_desc_base = cq->base;
++	volatile struct ionic_txq_comp *cq_desc;
+ 	void **info;
+ 	uint32_t i;
+ 
+@@ -252,7 +253,7 @@ ionic_xmit_pkts_sg(void *tx_queue, struct rte_mbuf **tx_pkts,
+  */
+ static __rte_always_inline void
+ ionic_rx_clean_one_sg(struct ionic_rx_qcq *rxq,
+-		struct ionic_rxq_comp *cq_desc,
++		volatile struct ionic_rxq_comp *cq_desc,
+ 		struct ionic_rx_service *rx_svc)
+ {
+ 	struct ionic_queue *q = &rxq->qcq.q;
+@@ -438,7 +439,8 @@ ionic_rxq_service_sg(struct ionic_rx_qcq *rxq, uint32_t work_to_do,
+ 	struct ionic_cq *cq = &rxq->qcq.cq;
+ 	struct ionic_queue *q = &rxq->qcq.q;
+ 	struct ionic_rxq_desc *q_desc_base = q->base;
+-	struct ionic_rxq_comp *cq_desc, *cq_desc_base = cq->base;
++	struct ionic_rxq_comp *cq_desc_base = cq->base;
++	volatile struct ionic_rxq_comp *cq_desc;
+ 	uint32_t work_done = 0;
+ 	uint64_t then, now, hz, delta;
+ 
+diff --git a/dpdk/drivers/net/ionic/ionic_rxtx_simple.c b/dpdk/drivers/net/ionic/ionic_rxtx_simple.c
+index 5f81856256..0992177afc 100644
+--- a/dpdk/drivers/net/ionic/ionic_rxtx_simple.c
++++ b/dpdk/drivers/net/ionic/ionic_rxtx_simple.c
+@@ -27,7 +27,8 @@ ionic_tx_flush(struct ionic_tx_qcq *txq)
+ 	struct ionic_cq *cq = &txq->qcq.cq;
+ 	struct ionic_queue *q = &txq->qcq.q;
+ 	struct rte_mbuf *txm;
+-	struct ionic_txq_comp *cq_desc, *cq_desc_base = cq->base;
++	struct ionic_txq_comp *cq_desc_base = cq->base;
++	volatile struct ionic_txq_comp *cq_desc;
+ 	void **info;
+ 
+ 	cq_desc = &cq_desc_base[cq->tail_idx];
+@@ -225,7 +226,7 @@ ionic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+  */
+ static __rte_always_inline void
+ ionic_rx_clean_one(struct ionic_rx_qcq *rxq,
+-		struct ionic_rxq_comp *cq_desc,
++		volatile struct ionic_rxq_comp *cq_desc,
+ 		struct ionic_rx_service *rx_svc)
+ {
+ 	struct ionic_queue *q = &rxq->qcq.q;
+@@ -359,7 +360,8 @@ ionic_rxq_service(struct ionic_rx_qcq *rxq, uint32_t work_to_do,
+ 	struct ionic_cq *cq = &rxq->qcq.cq;
+ 	struct ionic_queue *q = &rxq->qcq.q;
+ 	struct ionic_rxq_desc *q_desc_base = q->base;
+-	struct ionic_rxq_comp *cq_desc, *cq_desc_base = cq->base;
++	struct ionic_rxq_comp *cq_desc_base = cq->base;
++	volatile struct ionic_rxq_comp *cq_desc;
+ 	uint32_t work_done = 0;
+ 	uint64_t then, now, hz, delta;
+ 
+diff --git a/dpdk/drivers/net/ixgbe/base/ixgbe_phy.c b/dpdk/drivers/net/ixgbe/base/ixgbe_phy.c
+index 74c5db16fa..56267bb00d 100644
+--- a/dpdk/drivers/net/ixgbe/base/ixgbe_phy.c
++++ b/dpdk/drivers/net/ixgbe/base/ixgbe_phy.c
+@@ -432,8 +432,7 @@ enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id)
+ 	case TN1010_PHY_ID:
+ 		phy_type = ixgbe_phy_tn;
+ 		break;
+-	case X550_PHY_ID2:
+-	case X550_PHY_ID3:
++	case X550_PHY_ID:
+ 	case X540_PHY_ID:
+ 		phy_type = ixgbe_phy_aq;
+ 		break;
+@@ -915,6 +914,10 @@ static s32 ixgbe_get_copper_speeds_supported(struct ixgbe_hw *hw)
+ 		hw->phy.speeds_supported |= IXGBE_LINK_SPEED_100_FULL;
+ 
+ 	switch (hw->mac.type) {
++	case ixgbe_mac_X550:
++		hw->phy.speeds_supported |= IXGBE_LINK_SPEED_2_5GB_FULL;
++		hw->phy.speeds_supported |= IXGBE_LINK_SPEED_5GB_FULL;
++		break;
+ 	case ixgbe_mac_X550EM_x:
+ 	case ixgbe_mac_X550EM_a:
+ 		hw->phy.speeds_supported &= ~IXGBE_LINK_SPEED_100_FULL;
+diff --git a/dpdk/drivers/net/ixgbe/base/ixgbe_type.h b/dpdk/drivers/net/ixgbe/base/ixgbe_type.h
+index 1094df5891..f709681df2 100644
+--- a/dpdk/drivers/net/ixgbe/base/ixgbe_type.h
++++ b/dpdk/drivers/net/ixgbe/base/ixgbe_type.h
+@@ -1664,6 +1664,7 @@ struct ixgbe_dmac_config {
+ #define TN1010_PHY_ID	0x00A19410
+ #define TNX_FW_REV	0xB
+ #define X540_PHY_ID	0x01540200
++#define X550_PHY_ID	0x01540220
+ #define X550_PHY_ID2	0x01540223
+ #define X550_PHY_ID3	0x01540221
+ #define X557_PHY_ID	0x01540240
+@@ -1800,7 +1801,7 @@ enum {
+ /* VFRE bitmask */
+ #define IXGBE_VFRE_ENABLE_ALL	0xFFFFFFFF
+ 
+-#define IXGBE_VF_INIT_TIMEOUT	200 /* Number of retries to clear RSTI */
++#define IXGBE_VF_INIT_TIMEOUT	10000 /* Number of retries to clear RSTI */
+ 
+ /* RDHMPN and TDHMPN bitmasks */
+ #define IXGBE_RDHMPN_RDICADDR		0x007FF800
+diff --git a/dpdk/drivers/net/ixgbe/base/ixgbe_vf.c b/dpdk/drivers/net/ixgbe/base/ixgbe_vf.c
+index 5e3ae1b519..11dbbe2a86 100644
+--- a/dpdk/drivers/net/ixgbe/base/ixgbe_vf.c
++++ b/dpdk/drivers/net/ixgbe/base/ixgbe_vf.c
+@@ -585,7 +585,7 @@ s32 ixgbe_check_mac_link_vf(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
+ 	switch (links_reg & IXGBE_LINKS_SPEED_82599) {
+ 	case IXGBE_LINKS_SPEED_10G_82599:
+ 		*speed = IXGBE_LINK_SPEED_10GB_FULL;
+-		if (hw->mac.type >= ixgbe_mac_X550) {
++		if (hw->mac.type >= ixgbe_mac_X550_vf) {
+ 			if (links_reg & IXGBE_LINKS_SPEED_NON_STD)
+ 				*speed = IXGBE_LINK_SPEED_2_5GB_FULL;
+ 		}
+@@ -595,7 +595,7 @@ s32 ixgbe_check_mac_link_vf(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
+ 		break;
+ 	case IXGBE_LINKS_SPEED_100_82599:
+ 		*speed = IXGBE_LINK_SPEED_100_FULL;
+-		if (hw->mac.type == ixgbe_mac_X550) {
++		if (hw->mac.type == ixgbe_mac_X550_vf) {
+ 			if (links_reg & IXGBE_LINKS_SPEED_NON_STD)
+ 				*speed = IXGBE_LINK_SPEED_5GB_FULL;
+ 		}
+@@ -603,7 +603,7 @@ s32 ixgbe_check_mac_link_vf(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
+ 	case IXGBE_LINKS_SPEED_10_X550EM_A:
+ 		*speed = IXGBE_LINK_SPEED_UNKNOWN;
+ 		/* Since Reserved in older MAC's */
+-		if (hw->mac.type >= ixgbe_mac_X550)
++		if (hw->mac.type >= ixgbe_mac_X550_vf)
+ 			*speed = IXGBE_LINK_SPEED_10_FULL;
+ 		break;
+ 	default:
+diff --git a/dpdk/drivers/net/ixgbe/ixgbe_ethdev.c b/dpdk/drivers/net/ixgbe/ixgbe_ethdev.c
+index d6cf00317e..a44497ce51 100644
+--- a/dpdk/drivers/net/ixgbe/ixgbe_ethdev.c
++++ b/dpdk/drivers/net/ixgbe/ixgbe_ethdev.c
+@@ -1190,7 +1190,8 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
+ 	diag = ixgbe_validate_eeprom_checksum(hw, &csum);
+ 	if (diag != IXGBE_SUCCESS) {
+ 		PMD_INIT_LOG(ERR, "The EEPROM checksum is not valid: %d", diag);
+-		return -EIO;
++		ret = -EIO;
++		goto err_exit;
+ 	}
+ 
+ #ifdef RTE_LIBRTE_IXGBE_BYPASS
+@@ -1228,7 +1229,8 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
+ 		PMD_INIT_LOG(ERR, "Unsupported SFP+ Module");
+ 	if (diag) {
+ 		PMD_INIT_LOG(ERR, "Hardware Initialization Failure: %d", diag);
+-		return -EIO;
++		ret = -EIO;
++		goto err_exit;
+ 	}
+ 
+ 	/* Reset the hw statistics */
+@@ -1248,7 +1250,8 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
+ 			     "Failed to allocate %u bytes needed to store "
+ 			     "MAC addresses",
+ 			     RTE_ETHER_ADDR_LEN * hw->mac.num_rar_entries);
+-		return -ENOMEM;
++		ret = -ENOMEM;
++		goto err_exit;
+ 	}
+ 	/* Copy the permanent MAC address */
+ 	rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.perm_addr,
+@@ -1263,7 +1266,8 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
+ 			     RTE_ETHER_ADDR_LEN * IXGBE_VMDQ_NUM_UC_MAC);
+ 		rte_free(eth_dev->data->mac_addrs);
+ 		eth_dev->data->mac_addrs = NULL;
+-		return -ENOMEM;
++		ret = -ENOMEM;
++		goto err_exit;
+ 	}
+ 
+ 	/* initialize the vfta */
+@@ -1347,6 +1351,11 @@ err_pf_host_init:
+ 	eth_dev->data->mac_addrs = NULL;
+ 	rte_free(eth_dev->data->hash_mac_addrs);
+ 	eth_dev->data->hash_mac_addrs = NULL;
++err_exit:
++#ifdef RTE_LIB_SECURITY
++	rte_free(eth_dev->security_ctx);
++	eth_dev->security_ctx = NULL;
++#endif
+ 	return ret;
+ }
+ 
+@@ -4280,6 +4289,9 @@ ixgbe_dev_link_update_share(struct rte_eth_dev *dev,
+ 	int wait = 1;
+ 	u32 esdp_reg;
+ 
++	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
++		return -1;
++
+ 	memset(&link, 0, sizeof(link));
+ 	link.link_status = RTE_ETH_LINK_DOWN;
+ 	link.link_speed = RTE_ETH_SPEED_NUM_NONE;
+@@ -4654,14 +4666,20 @@ ixgbe_dev_interrupt_action(struct rte_eth_dev *dev)
+ 			timeout = IXGBE_LINK_DOWN_CHECK_TIMEOUT;
+ 
+ 		ixgbe_dev_link_status_print(dev);
+-		if (rte_eal_alarm_set(timeout * 1000,
+-				      ixgbe_dev_interrupt_delayed_handler, (void *)dev) < 0)
+-			PMD_DRV_LOG(ERR, "Error setting alarm");
+-		else {
+-			/* remember original mask */
+-			intr->mask_original = intr->mask;
+-			/* only disable lsc interrupt */
+-			intr->mask &= ~IXGBE_EIMS_LSC;
++
++		/* Don't program delayed handler if LSC interrupt is disabled.
++		 * It means one is already programmed.
++		 */
++		if (intr->mask & IXGBE_EIMS_LSC) {
++			if (rte_eal_alarm_set(timeout * 1000,
++					      ixgbe_dev_interrupt_delayed_handler, (void *)dev) < 0)
++				PMD_DRV_LOG(ERR, "Error setting alarm");
++			else {
++				/* remember original mask */
++				intr->mask_original = intr->mask;
++				/* only disable lsc interrupt */
++				intr->mask &= ~IXGBE_EIMS_LSC;
++			}
+ 		}
+ 	}
+ 
+diff --git a/dpdk/drivers/net/ixgbe/ixgbe_rxtx.c b/dpdk/drivers/net/ixgbe/ixgbe_rxtx.c
+index 90b0a7004f..f6c17d4efb 100644
+--- a/dpdk/drivers/net/ixgbe/ixgbe_rxtx.c
++++ b/dpdk/drivers/net/ixgbe/ixgbe_rxtx.c
+@@ -5844,6 +5844,25 @@ ixgbevf_dev_rx_init(struct rte_eth_dev *dev)
+ 		IXGBE_PSRTYPE_RQPL_SHIFT;
+ 	IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
+ 
++	/* Initialize the rss for x550_vf cards if enabled */
++	switch (hw->mac.type) {
++	case ixgbe_mac_X550_vf:
++	case ixgbe_mac_X550EM_x_vf:
++	case ixgbe_mac_X550EM_a_vf:
++		switch (dev->data->dev_conf.rxmode.mq_mode) {
++		case RTE_ETH_MQ_RX_RSS:
++		case RTE_ETH_MQ_RX_DCB_RSS:
++		case RTE_ETH_MQ_RX_VMDQ_RSS:
++			ixgbe_rss_configure(dev);
++			break;
++		default:
++			break;
++		}
++		break;
++	default:
++		break;
++	}
++
+ 	ixgbe_set_rx_function(dev);
+ 
+ 	return 0;
+diff --git a/dpdk/drivers/net/mana/mana.c b/dpdk/drivers/net/mana/mana.c
+index 781ed76139..65ca139be5 100644
+--- a/dpdk/drivers/net/mana/mana.c
++++ b/dpdk/drivers/net/mana/mana.c
+@@ -296,8 +296,8 @@ mana_dev_info_get(struct rte_eth_dev *dev,
+ 	dev_info->min_rx_bufsize = MIN_RX_BUF_SIZE;
+ 	dev_info->max_rx_pktlen = MANA_MAX_MTU + RTE_ETHER_HDR_LEN;
+ 
+-	dev_info->max_rx_queues = priv->max_rx_queues;
+-	dev_info->max_tx_queues = priv->max_tx_queues;
++	dev_info->max_rx_queues = RTE_MIN(priv->max_rx_queues, UINT16_MAX);
++	dev_info->max_tx_queues = RTE_MIN(priv->max_tx_queues, UINT16_MAX);
+ 
+ 	dev_info->max_mac_addrs = MANA_MAX_MAC_ADDR;
+ 	dev_info->max_hash_mac_addrs = 0;
+@@ -338,16 +338,20 @@ mana_dev_info_get(struct rte_eth_dev *dev,
+ 
+ 	/* Buffer limits */
+ 	dev_info->rx_desc_lim.nb_min = MIN_BUFFERS_PER_QUEUE;
+-	dev_info->rx_desc_lim.nb_max = priv->max_rx_desc;
++	dev_info->rx_desc_lim.nb_max = RTE_MIN(priv->max_rx_desc, UINT16_MAX);
+ 	dev_info->rx_desc_lim.nb_align = MIN_BUFFERS_PER_QUEUE;
+-	dev_info->rx_desc_lim.nb_seg_max = priv->max_recv_sge;
+-	dev_info->rx_desc_lim.nb_mtu_seg_max = priv->max_recv_sge;
++	dev_info->rx_desc_lim.nb_seg_max =
++		RTE_MIN(priv->max_recv_sge, UINT16_MAX);
++	dev_info->rx_desc_lim.nb_mtu_seg_max =
++		RTE_MIN(priv->max_recv_sge, UINT16_MAX);
+ 
+ 	dev_info->tx_desc_lim.nb_min = MIN_BUFFERS_PER_QUEUE;
+-	dev_info->tx_desc_lim.nb_max = priv->max_tx_desc;
++	dev_info->tx_desc_lim.nb_max = RTE_MIN(priv->max_tx_desc, UINT16_MAX);
+ 	dev_info->tx_desc_lim.nb_align = MIN_BUFFERS_PER_QUEUE;
+-	dev_info->tx_desc_lim.nb_seg_max = priv->max_send_sge;
+-	dev_info->rx_desc_lim.nb_mtu_seg_max = priv->max_recv_sge;
++	dev_info->tx_desc_lim.nb_seg_max =
++		RTE_MIN(priv->max_send_sge, UINT16_MAX);
++	dev_info->tx_desc_lim.nb_mtu_seg_max =
++		RTE_MIN(priv->max_send_sge, UINT16_MAX);
+ 
+ 	/* Speed */
+ 	dev_info->speed_capa = RTE_ETH_LINK_SPEED_100G;
+@@ -707,7 +711,7 @@ mana_dev_stats_reset(struct rte_eth_dev *dev __rte_unused)
+ static int
+ mana_get_ifname(const struct mana_priv *priv, char (*ifname)[IF_NAMESIZE])
+ {
+-	int ret;
++	int ret = -ENODEV;
+ 	DIR *dir;
+ 	struct dirent *dent;
+ 
+@@ -1385,9 +1389,9 @@ mana_probe_port(struct ibv_device *ibdev, struct ibv_device_attr_ex *dev_attr,
+ 	priv->max_mr = dev_attr->orig_attr.max_mr;
+ 	priv->max_mr_size = dev_attr->orig_attr.max_mr_size;
+ 
+-	DRV_LOG(INFO, "dev %s max queues %d desc %d sge %d",
++	DRV_LOG(INFO, "dev %s max queues %d desc %d sge %d mr %" PRIu64,
+ 		name, priv->max_rx_queues, priv->max_rx_desc,
+-		priv->max_send_sge);
++		priv->max_send_sge, priv->max_mr_size);
+ 
+ 	rte_eth_copy_pci_info(eth_dev, pci_dev);
+ 
+diff --git a/dpdk/drivers/net/mana/mana.h b/dpdk/drivers/net/mana/mana.h
+index 6836872dc2..822b8a1f15 100644
+--- a/dpdk/drivers/net/mana/mana.h
++++ b/dpdk/drivers/net/mana/mana.h
+@@ -522,9 +522,9 @@ void mana_del_pmd_mr(struct mana_mr_cache *mr);
+ void mana_mempool_chunk_cb(struct rte_mempool *mp, void *opaque,
+ 			   struct rte_mempool_memhdr *memhdr, unsigned int idx);
+ 
+-struct mana_mr_cache *mana_mr_btree_lookup(struct mana_mr_btree *bt,
+-					   uint16_t *idx,
+-					   uintptr_t addr, size_t len);
++int mana_mr_btree_lookup(struct mana_mr_btree *bt, uint16_t *idx,
++			 uintptr_t addr, size_t len,
++			 struct mana_mr_cache **cache);
+ int mana_mr_btree_insert(struct mana_mr_btree *bt, struct mana_mr_cache *entry);
+ int mana_mr_btree_init(struct mana_mr_btree *bt, int n, int socket);
+ void mana_mr_btree_free(struct mana_mr_btree *bt);
+diff --git a/dpdk/drivers/net/mana/mr.c b/dpdk/drivers/net/mana/mr.c
+index b8e6ea0bbf..eb6d073a95 100644
+--- a/dpdk/drivers/net/mana/mr.c
++++ b/dpdk/drivers/net/mana/mr.c
+@@ -40,7 +40,7 @@ mana_new_pmd_mr(struct mana_mr_btree *local_tree, struct mana_priv *priv,
+ 	struct ibv_mr *ibv_mr;
+ 	struct mana_range ranges[pool->nb_mem_chunks];
+ 	uint32_t i;
+-	struct mana_mr_cache *mr;
++	struct mana_mr_cache mr;
+ 	int ret;
+ 
+ 	rte_mempool_mem_iter(pool, mana_mempool_chunk_cb, ranges);
+@@ -75,14 +75,13 @@ mana_new_pmd_mr(struct mana_mr_btree *local_tree, struct mana_priv *priv,
+ 			DP_LOG(DEBUG, "MR lkey %u addr %p len %zu",
+ 			       ibv_mr->lkey, ibv_mr->addr, ibv_mr->length);
+ 
+-			mr = rte_calloc("MANA MR", 1, sizeof(*mr), 0);
+-			mr->lkey = ibv_mr->lkey;
+-			mr->addr = (uintptr_t)ibv_mr->addr;
+-			mr->len = ibv_mr->length;
+-			mr->verb_obj = ibv_mr;
++			mr.lkey = ibv_mr->lkey;
++			mr.addr = (uintptr_t)ibv_mr->addr;
++			mr.len = ibv_mr->length;
++			mr.verb_obj = ibv_mr;
+ 
+ 			rte_spinlock_lock(&priv->mr_btree_lock);
+-			ret = mana_mr_btree_insert(&priv->mr_btree, mr);
++			ret = mana_mr_btree_insert(&priv->mr_btree, &mr);
+ 			rte_spinlock_unlock(&priv->mr_btree_lock);
+ 			if (ret) {
+ 				ibv_dereg_mr(ibv_mr);
+@@ -90,7 +89,7 @@ mana_new_pmd_mr(struct mana_mr_btree *local_tree, struct mana_priv *priv,
+ 				return ret;
+ 			}
+ 
+-			ret = mana_mr_btree_insert(local_tree, mr);
++			ret = mana_mr_btree_insert(local_tree, &mr);
+ 			if (ret) {
+ 				/* Don't need to clean up MR as it's already
+ 				 * in the global tree
+@@ -138,8 +137,12 @@ mana_find_pmd_mr(struct mana_mr_btree *local_mr_btree, struct mana_priv *priv,
+ 
+ try_again:
+ 	/* First try to find the MR in local queue tree */
+-	mr = mana_mr_btree_lookup(local_mr_btree, &idx,
+-				  (uintptr_t)mbuf->buf_addr, mbuf->buf_len);
++	ret = mana_mr_btree_lookup(local_mr_btree, &idx,
++				   (uintptr_t)mbuf->buf_addr, mbuf->buf_len,
++				   &mr);
++	if (ret)
++		return NULL;
++
+ 	if (mr) {
+ 		DP_LOG(DEBUG, "Local mr lkey %u addr 0x%" PRIxPTR " len %zu",
+ 		       mr->lkey, mr->addr, mr->len);
+@@ -148,11 +151,14 @@ try_again:
+ 
+ 	/* If not found, try to find the MR in global tree */
+ 	rte_spinlock_lock(&priv->mr_btree_lock);
+-	mr = mana_mr_btree_lookup(&priv->mr_btree, &idx,
+-				  (uintptr_t)mbuf->buf_addr,
+-				  mbuf->buf_len);
++	ret = mana_mr_btree_lookup(&priv->mr_btree, &idx,
++				   (uintptr_t)mbuf->buf_addr,
++				   mbuf->buf_len, &mr);
+ 	rte_spinlock_unlock(&priv->mr_btree_lock);
+ 
++	if (ret)
++		return NULL;
++
+ 	/* If found in the global tree, add it to the local tree */
+ 	if (mr) {
+ 		ret = mana_mr_btree_insert(local_mr_btree, mr);
+@@ -228,22 +234,23 @@ mana_mr_btree_expand(struct mana_mr_btree *bt, int n)
+ /*
+  * Look for a region of memory in MR cache.
+  */
+-struct mana_mr_cache *
+-mana_mr_btree_lookup(struct mana_mr_btree *bt, uint16_t *idx,
+-		     uintptr_t addr, size_t len)
++int mana_mr_btree_lookup(struct mana_mr_btree *bt, uint16_t *idx,
++			 uintptr_t addr, size_t len,
++			 struct mana_mr_cache **cache)
+ {
+ 	struct mana_mr_cache *table;
+ 	uint16_t n;
+ 	uint16_t base = 0;
+ 	int ret;
+ 
+-	n = bt->len;
++	*cache = NULL;
+ 
++	n = bt->len;
+ 	/* Try to double the cache if it's full */
+ 	if (n == bt->size) {
+ 		ret = mana_mr_btree_expand(bt, bt->size << 1);
+ 		if (ret)
+-			return NULL;
++			return ret;
+ 	}
+ 
+ 	table = bt->table;
+@@ -262,14 +269,16 @@ mana_mr_btree_lookup(struct mana_mr_btree *bt, uint16_t *idx,
+ 
+ 	*idx = base;
+ 
+-	if (addr + len <= table[base].addr + table[base].len)
+-		return &table[base];
++	if (addr + len <= table[base].addr + table[base].len) {
++		*cache = &table[base];
++		return 0;
++	}
+ 
+ 	DP_LOG(DEBUG,
+ 	       "addr 0x%" PRIxPTR " len %zu idx %u sum 0x%" PRIxPTR " not found",
+ 	       addr, len, *idx, addr + len);
+ 
+-	return NULL;
++	return 0;
+ }
+ 
+ int
+@@ -314,14 +323,21 @@ mana_mr_btree_insert(struct mana_mr_btree *bt, struct mana_mr_cache *entry)
+ 	struct mana_mr_cache *table;
+ 	uint16_t idx = 0;
+ 	uint16_t shift;
++	int ret;
++
++	ret = mana_mr_btree_lookup(bt, &idx, entry->addr, entry->len, &table);
++	if (ret)
++		return ret;
+ 
+-	if (mana_mr_btree_lookup(bt, &idx, entry->addr, entry->len)) {
++	if (table) {
+ 		DP_LOG(DEBUG, "Addr 0x%" PRIxPTR " len %zu exists in btree",
+ 		       entry->addr, entry->len);
+ 		return 0;
+ 	}
+ 
+ 	if (bt->len >= bt->size) {
++		DP_LOG(ERR, "Btree overflow detected len %u size %u",
++		       bt->len, bt->size);
+ 		bt->overflow = 1;
+ 		return -1;
+ 	}
+diff --git a/dpdk/drivers/net/memif/rte_eth_memif.c b/dpdk/drivers/net/memif/rte_eth_memif.c
+index 7cc8c0da91..18377d9caf 100644
+--- a/dpdk/drivers/net/memif/rte_eth_memif.c
++++ b/dpdk/drivers/net/memif/rte_eth_memif.c
+@@ -265,8 +265,6 @@ memif_free_stored_mbufs(struct pmd_process_private *proc_private, struct memif_q
+ 	cur_tail = __atomic_load_n(&ring->tail, __ATOMIC_ACQUIRE);
+ 	while (mq->last_tail != cur_tail) {
+ 		RTE_MBUF_PREFETCH_TO_FREE(mq->buffers[(mq->last_tail + 1) & mask]);
+-		/* Decrement refcnt and free mbuf. (current segment) */
+-		rte_mbuf_refcnt_update(mq->buffers[mq->last_tail & mask], -1);
+ 		rte_pktmbuf_free_seg(mq->buffers[mq->last_tail & mask]);
+ 		mq->last_tail++;
+ 	}
+@@ -684,7 +682,7 @@ eth_memif_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
+ 		n_free = __atomic_load_n(&ring->head, __ATOMIC_ACQUIRE) - slot;
+ 	}
+ 
+-	uint8_t i;
++	uint16_t i;
+ 	struct rte_mbuf **buf_tmp = bufs;
+ 	mbuf_head = *buf_tmp++;
+ 	struct rte_mempool *mp = mbuf_head->pool;
+@@ -825,10 +823,6 @@ memif_tx_one_zc(struct pmd_process_private *proc_private, struct memif_queue *mq
+ next_in_chain:
+ 	/* store pointer to mbuf to free it later */
+ 	mq->buffers[slot & mask] = mbuf;
+-	/* Increment refcnt to make sure the buffer is not freed before server
+-	 * receives it. (current segment)
+-	 */
+-	rte_mbuf_refcnt_update(mbuf, 1);
+ 	/* populate descriptor */
+ 	d0 = &ring->desc[slot & mask];
+ 	d0->length = rte_pktmbuf_data_len(mbuf);
+diff --git a/dpdk/drivers/net/mlx5/hws/mlx5dr.h b/dpdk/drivers/net/mlx5/hws/mlx5dr.h
+index d88f73ab57..f003d9f446 100644
+--- a/dpdk/drivers/net/mlx5/hws/mlx5dr.h
++++ b/dpdk/drivers/net/mlx5/hws/mlx5dr.h
+@@ -80,6 +80,7 @@ enum mlx5dr_action_aso_ct_flags {
+ };
+ 
+ enum mlx5dr_match_template_flags {
++	MLX5DR_MATCH_TEMPLATE_FLAG_NONE = 0,
+ 	/* Allow relaxed matching by skipping derived dependent match fields. */
+ 	MLX5DR_MATCH_TEMPLATE_FLAG_RELAXED_MATCH = 1,
+ };
+diff --git a/dpdk/drivers/net/mlx5/hws/mlx5dr_action.c b/dpdk/drivers/net/mlx5/hws/mlx5dr_action.c
+index 862ee3e332..a068f100c5 100644
+--- a/dpdk/drivers/net/mlx5/hws/mlx5dr_action.c
++++ b/dpdk/drivers/net/mlx5/hws/mlx5dr_action.c
+@@ -1465,7 +1465,9 @@ mlx5dr_action_handle_tunnel_l3_to_l2(struct mlx5dr_action *action,
+ 
+ 	/* Create a full modify header action list in case shared */
+ 	mlx5dr_action_prepare_decap_l3_actions(hdrs->sz, mh_data, &num_of_actions);
+-	mlx5dr_action_prepare_decap_l3_data(hdrs->data, mh_data, num_of_actions);
++
++	if (action->flags & MLX5DR_ACTION_FLAG_SHARED)
++		mlx5dr_action_prepare_decap_l3_data(hdrs->data, mh_data, num_of_actions);
+ 
+ 	/* All DecapL3 cases require the same max arg size */
+ 	arg_obj = mlx5dr_arg_create_modify_header_arg(ctx,
+@@ -1489,6 +1491,7 @@ mlx5dr_action_handle_tunnel_l3_to_l2(struct mlx5dr_action *action,
+ 
+ 		action[i].modify_header.max_num_of_actions = num_of_actions;
+ 		action[i].modify_header.num_of_actions = num_of_actions;
++		action[i].modify_header.num_of_patterns = num_of_hdrs;
+ 		action[i].modify_header.arg_obj = arg_obj;
+ 		action[i].modify_header.pat_obj = pat_obj;
+ 		action[i].modify_header.require_reparse =
+@@ -2547,6 +2550,7 @@ static void mlx5dr_action_destroy_hws(struct mlx5dr_action *action)
+ 	case MLX5DR_ACTION_TYP_ASO_CT:
+ 	case MLX5DR_ACTION_TYP_PUSH_VLAN:
+ 	case MLX5DR_ACTION_TYP_REMOVE_HEADER:
++	case MLX5DR_ACTION_TYP_VPORT:
+ 		mlx5dr_action_destroy_stcs(action);
+ 		break;
+ 	case MLX5DR_ACTION_TYP_DEST_ROOT:
+@@ -2600,6 +2604,9 @@ static void mlx5dr_action_destroy_hws(struct mlx5dr_action *action)
+ 			if (action->ipv6_route_ext.action[i])
+ 				mlx5dr_action_destroy(action->ipv6_route_ext.action[i]);
+ 		break;
++	default:
++		DR_LOG(ERR, "Not supported action type: %d", action->type);
++		assert(false);
+ 	}
+ }
+ 
+diff --git a/dpdk/drivers/net/mlx5/hws/mlx5dr_cmd.c b/dpdk/drivers/net/mlx5/hws/mlx5dr_cmd.c
+index 876a47147d..0fb764df32 100644
+--- a/dpdk/drivers/net/mlx5/hws/mlx5dr_cmd.c
++++ b/dpdk/drivers/net/mlx5/hws/mlx5dr_cmd.c
+@@ -1027,7 +1027,8 @@ int mlx5dr_cmd_generate_wqe(struct ibv_context *ctx,
+ 
+ 	ret = mlx5_glue->devx_general_cmd(ctx, in, sizeof(in), out, sizeof(out));
+ 	if (ret) {
+-		DR_LOG(ERR, "Failed to write GTA WQE using FW");
++		DR_LOG(ERR, "Failed to write GTA WQE using FW (syndrome: %#x)",
++		       mlx5dr_cmd_get_syndrome(out));
+ 		rte_errno = errno;
+ 		return rte_errno;
+ 	}
+diff --git a/dpdk/drivers/net/mlx5/hws/mlx5dr_context.c b/dpdk/drivers/net/mlx5/hws/mlx5dr_context.c
+index 15d53c578a..7f120b3b1b 100644
+--- a/dpdk/drivers/net/mlx5/hws/mlx5dr_context.c
++++ b/dpdk/drivers/net/mlx5/hws/mlx5dr_context.c
+@@ -263,6 +263,7 @@ struct mlx5dr_context *mlx5dr_context_open(struct ibv_context *ibv_ctx,
+ free_caps:
+ 	simple_free(ctx->caps);
+ free_ctx:
++	pthread_spin_destroy(&ctx->ctrl_lock);
+ 	simple_free(ctx);
+ 	return NULL;
+ }
+diff --git a/dpdk/drivers/net/mlx5/hws/mlx5dr_debug.c b/dpdk/drivers/net/mlx5/hws/mlx5dr_debug.c
+index 11557bcab8..f11c81ffee 100644
+--- a/dpdk/drivers/net/mlx5/hws/mlx5dr_debug.c
++++ b/dpdk/drivers/net/mlx5/hws/mlx5dr_debug.c
+@@ -150,7 +150,7 @@ mlx5dr_debug_dump_matcher_action_template(FILE *f, struct mlx5dr_matcher *matche
+ 			      MLX5DR_DEBUG_RES_TYPE_MATCHER_ACTION_TEMPLATE,
+ 			      (uint64_t)(uintptr_t)at,
+ 			      (uint64_t)(uintptr_t)matcher,
+-			      at->only_term ? 0 : 1,
++			      at->only_term,
+ 			      is_root ? 0 : at->num_of_action_stes,
+ 			      at->num_actions);
+ 		if (ret < 0) {
+diff --git a/dpdk/drivers/net/mlx5/hws/mlx5dr_definer.c b/dpdk/drivers/net/mlx5/hws/mlx5dr_definer.c
+index 0b60479406..031e87bc0c 100644
+--- a/dpdk/drivers/net/mlx5/hws/mlx5dr_definer.c
++++ b/dpdk/drivers/net/mlx5/hws/mlx5dr_definer.c
+@@ -8,8 +8,9 @@
+ #define BAD_PORT	0xBAD
+ #define ETH_TYPE_IPV4_VXLAN	0x0800
+ #define ETH_TYPE_IPV6_VXLAN	0x86DD
+-#define ETH_VXLAN_DEFAULT_PORT	4789
+-#define IP_UDP_PORT_MPLS	6635
++#define UDP_GTPU_PORT   2152
++#define UDP_VXLAN_PORT  4789
++#define UDP_PORT_MPLS   6635
+ #define UDP_ROCEV2_PORT	4791
+ #define DR_FLOW_LAYER_TUNNEL_NO_MPLS (MLX5_FLOW_LAYER_TUNNEL & ~MLX5_FLOW_LAYER_MPLS)
+ 
+@@ -41,6 +42,10 @@
+ 				  (bit_off))); \
+ 	} while (0)
+ 
++/* Getter function based on bit offset and mask, for 32bit DW*/
++#define DR_GET_32(p, byte_off, bit_off, mask) \
++	((rte_be_to_cpu_32(*((const rte_be32_t *)(p) + ((byte_off) / 4))) >> (bit_off)) & (mask))
++
+ /* Setter function based on bit offset and mask */
+ #define DR_SET(p, v, byte_off, bit_off, mask) \
+ 	do { \
+@@ -158,7 +163,7 @@ struct mlx5dr_definer_conv_data {
+ 	X(SET,		tcp_protocol,		STE_TCP,		rte_flow_item_tcp) \
+ 	X(SET_BE16,	tcp_src_port,		v->hdr.src_port,	rte_flow_item_tcp) \
+ 	X(SET_BE16,	tcp_dst_port,		v->hdr.dst_port,	rte_flow_item_tcp) \
+-	X(SET,		gtp_udp_port,		RTE_GTPU_UDP_PORT,	rte_flow_item_gtp) \
++	X(SET,		gtp_udp_port,		UDP_GTPU_PORT,		rte_flow_item_gtp) \
+ 	X(SET_BE32,	gtp_teid,		v->hdr.teid,		rte_flow_item_gtp) \
+ 	X(SET,		gtp_msg_type,		v->hdr.msg_type,	rte_flow_item_gtp) \
+ 	X(SET,		gtp_ext_flag,		!!v->hdr.gtp_hdr_info,	rte_flow_item_gtp) \
+@@ -166,8 +171,8 @@ struct mlx5dr_definer_conv_data {
+ 	X(SET,		gtp_ext_hdr_pdu,	v->hdr.type,		rte_flow_item_gtp_psc) \
+ 	X(SET,		gtp_ext_hdr_qfi,	v->hdr.qfi,		rte_flow_item_gtp_psc) \
+ 	X(SET,		vxlan_flags,		v->flags,		rte_flow_item_vxlan) \
+-	X(SET,		vxlan_udp_port,		ETH_VXLAN_DEFAULT_PORT,	rte_flow_item_vxlan) \
+-	X(SET,		mpls_udp_port,		IP_UDP_PORT_MPLS,	rte_flow_item_mpls) \
++	X(SET,		vxlan_udp_port,		UDP_VXLAN_PORT,		rte_flow_item_vxlan) \
++	X(SET,		mpls_udp_port,		UDP_PORT_MPLS,		rte_flow_item_mpls) \
+ 	X(SET,		source_qp,		v->queue,		mlx5_rte_flow_item_sq) \
+ 	X(SET,		tag,			v->data,		rte_flow_item_tag) \
+ 	X(SET,		metadata,		v->data,		rte_flow_item_meta) \
+@@ -183,6 +188,8 @@ struct mlx5dr_definer_conv_data {
+ 	X(SET,		ib_l4_udp_port,		UDP_ROCEV2_PORT,	rte_flow_item_ib_bth) \
+ 	X(SET,		ib_l4_opcode,		v->hdr.opcode,		rte_flow_item_ib_bth) \
+ 	X(SET,		ib_l4_bth_a,		v->hdr.a,		rte_flow_item_ib_bth) \
++	X(SET,		cvlan,			STE_CVLAN,		rte_flow_item_vlan) \
++	X(SET_BE16,	inner_type,		v->inner_type,		rte_flow_item_vlan) \
+ 
+ /* Item set function format */
+ #define X(set_type, func_name, value, item_type) \
+@@ -377,7 +384,7 @@ mlx5dr_definer_integrity_set(struct mlx5dr_definer_fc *fc,
+ {
+ 	bool inner = (fc->fname == MLX5DR_DEFINER_FNAME_INTEGRITY_I);
+ 	const struct rte_flow_item_integrity *v = item_spec;
+-	uint32_t ok1_bits = 0;
++	uint32_t ok1_bits = DR_GET_32(tag, fc->byte_off, fc->bit_off, fc->bit_mask);
+ 
+ 	if (v->l3_ok)
+ 		ok1_bits |= inner ? BIT(MLX5DR_DEFINER_OKS1_SECOND_L3_OK) :
+@@ -769,6 +776,15 @@ mlx5dr_definer_conv_item_vlan(struct mlx5dr_definer_conv_data *cd,
+ 	struct mlx5dr_definer_fc *fc;
+ 	bool inner = cd->tunnel;
+ 
++	if (!cd->relaxed) {
++		/* Mark packet as tagged (CVLAN) */
++		fc = &cd->fc[DR_CALC_FNAME(VLAN_TYPE, inner)];
++		fc->item_idx = item_idx;
++		fc->tag_mask_set = &mlx5dr_definer_ones_set;
++		fc->tag_set = &mlx5dr_definer_cvlan_set;
++		DR_CALC_SET(fc, eth_l2, first_vlan_qualifier, inner);
++	}
++
+ 	if (!m)
+ 		return 0;
+ 
+@@ -777,8 +793,7 @@ mlx5dr_definer_conv_item_vlan(struct mlx5dr_definer_conv_data *cd,
+ 		return rte_errno;
+ 	}
+ 
+-	if (!cd->relaxed || m->has_more_vlan) {
+-		/* Mark packet as tagged (CVLAN or SVLAN) even if TCI is not specified.*/
++	if (m->has_more_vlan) {
+ 		fc = &cd->fc[DR_CALC_FNAME(VLAN_TYPE, inner)];
+ 		fc->item_idx = item_idx;
+ 		fc->tag_mask_set = &mlx5dr_definer_ones_set;
+@@ -796,7 +811,7 @@ mlx5dr_definer_conv_item_vlan(struct mlx5dr_definer_conv_data *cd,
+ 	if (m->hdr.eth_proto) {
+ 		fc = &cd->fc[DR_CALC_FNAME(ETH_TYPE, inner)];
+ 		fc->item_idx = item_idx;
+-		fc->tag_set = &mlx5dr_definer_eth_type_set;
++		fc->tag_set = &mlx5dr_definer_inner_type_set;
+ 		DR_CALC_SET(fc, eth_l2, l3_ethertype, inner);
+ 	}
+ 
+@@ -1170,6 +1185,12 @@ mlx5dr_definer_conv_item_gtp(struct mlx5dr_definer_conv_data *cd,
+ 	const struct rte_flow_item_gtp *m = item->mask;
+ 	struct mlx5dr_definer_fc *fc;
+ 
++	if (cd->tunnel) {
++		DR_LOG(ERR, "Inner GTPU item not supported");
++		rte_errno = ENOTSUP;
++		return rte_errno;
++	}
++
+ 	/* Overwrite GTPU dest port if not present */
+ 	fc = &cd->fc[DR_CALC_FNAME(L4_DPORT, false)];
+ 	if (!fc->tag_set && !cd->relaxed) {
+@@ -1344,9 +1365,20 @@ mlx5dr_definer_conv_item_vxlan(struct mlx5dr_definer_conv_data *cd,
+ 	struct mlx5dr_definer_fc *fc;
+ 	bool inner = cd->tunnel;
+ 
+-	/* In order to match on VXLAN we must match on ether_type, ip_protocol
+-	 * and l4_dport.
+-	 */
++	if (inner) {
++		DR_LOG(ERR, "Inner VXLAN item not supported");
++		rte_errno = ENOTSUP;
++		return rte_errno;
++	}
++
++	/* In order to match on VXLAN we must match on ip_protocol and l4_dport */
++	if (m && (m->rsvd0[0] != 0 || m->rsvd0[1] != 0 || m->rsvd0[2] != 0 ||
++	    m->rsvd1 != 0)) {
++		DR_LOG(ERR, "reserved fields are not supported");
++		rte_errno = ENOTSUP;
++		return rte_errno;
++	}
++
+ 	if (!cd->relaxed) {
+ 		fc = &cd->fc[DR_CALC_FNAME(IP_PROTOCOL, inner)];
+ 		if (!fc->tag_set) {
+@@ -1369,12 +1401,6 @@ mlx5dr_definer_conv_item_vxlan(struct mlx5dr_definer_conv_data *cd,
+ 		return 0;
+ 
+ 	if (m->flags) {
+-		if (inner) {
+-			DR_LOG(ERR, "Inner VXLAN flags item not supported");
+-			rte_errno = ENOTSUP;
+-			return rte_errno;
+-		}
+-
+ 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_VXLAN_FLAGS];
+ 		fc->item_idx = item_idx;
+ 		fc->tag_set = &mlx5dr_definer_vxlan_flags_set;
+@@ -1384,12 +1410,6 @@ mlx5dr_definer_conv_item_vxlan(struct mlx5dr_definer_conv_data *cd,
+ 	}
+ 
+ 	if (!is_mem_zero(m->vni, 3)) {
+-		if (inner) {
+-			DR_LOG(ERR, "Inner VXLAN vni item not supported");
+-			rte_errno = ENOTSUP;
+-			return rte_errno;
+-		}
+-
+ 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_VXLAN_VNI];
+ 		fc->item_idx = item_idx;
+ 		fc->tag_set = &mlx5dr_definer_vxlan_vni_set;
+@@ -2240,11 +2260,6 @@ mlx5dr_definer_conv_item_esp(struct mlx5dr_definer_conv_data *cd,
+ 	const struct rte_flow_item_esp *m = item->mask;
+ 	struct mlx5dr_definer_fc *fc;
+ 
+-	if (!cd->ctx->caps->ipsec_offload) {
+-		rte_errno = ENOTSUP;
+-		return rte_errno;
+-	}
+-
+ 	if (!m)
+ 		return 0;
+ 	if (m->hdr.spi) {
+@@ -2842,7 +2857,7 @@ mlx5dr_definer_find_best_match_fit(struct mlx5dr_context *ctx,
+ 		return 0;
+ 	}
+ 
+-	DR_LOG(ERR, "Unable to find supporting match/jumbo definer combination");
++	DR_LOG(DEBUG, "Unable to find supporting match/jumbo definer combination");
+ 	rte_errno = ENOTSUP;
+ 	return rte_errno;
+ }
+@@ -2975,7 +2990,7 @@ mlx5dr_definer_calc_layout(struct mlx5dr_matcher *matcher,
+ 	/* Find the match definer layout for header layout match union */
+ 	ret = mlx5dr_definer_find_best_match_fit(ctx, match_definer, match_hl);
+ 	if (ret) {
+-		DR_LOG(ERR, "Failed to create match definer from header layout");
++		DR_LOG(DEBUG, "Failed to create match definer from header layout");
+ 		goto free_fc;
+ 	}
+ 
+@@ -3191,15 +3206,18 @@ mlx5dr_definer_matcher_range_init(struct mlx5dr_context *ctx,
+ 
+ 	/* Create optional range definers */
+ 	for (i = 0; i < matcher->num_of_mt; i++) {
+-		if (!mt[i].fcr_sz)
+-			continue;
+-
+ 		/* All must use range if requested */
+-		if (i && !mt[i - 1].range_definer) {
++		bool is_range = !!mt[i].fcr_sz;
++		bool has_range = matcher->flags & MLX5DR_MATCHER_FLAGS_RANGE_DEFINER;
++
++		if (i && ((is_range && !has_range) || (!is_range && has_range))) {
+ 			DR_LOG(ERR, "Using range and non range templates is not allowed");
+ 			goto free_definers;
+ 		}
+ 
++		if (!mt[i].fcr_sz)
++			continue;
++
+ 		matcher->flags |= MLX5DR_MATCHER_FLAGS_RANGE_DEFINER;
+ 		/* Create definer without fcr binding, already binded */
+ 		mt[i].range_definer = mlx5dr_definer_alloc(ctx,
+@@ -3320,7 +3338,7 @@ int mlx5dr_definer_matcher_init(struct mlx5dr_context *ctx,
+ 
+ 	ret = mlx5dr_definer_calc_layout(matcher, &match_layout, &range_layout);
+ 	if (ret) {
+-		DR_LOG(ERR, "Failed to calculate matcher definer layout");
++		DR_LOG(DEBUG, "Failed to calculate matcher definer layout");
+ 		return ret;
+ 	}
+ 
+diff --git a/dpdk/drivers/net/mlx5/hws/mlx5dr_matcher.c b/dpdk/drivers/net/mlx5/hws/mlx5dr_matcher.c
+index 4ea161eae6..36be96c668 100644
+--- a/dpdk/drivers/net/mlx5/hws/mlx5dr_matcher.c
++++ b/dpdk/drivers/net/mlx5/hws/mlx5dr_matcher.c
+@@ -340,7 +340,7 @@ static int mlx5dr_matcher_disconnect(struct mlx5dr_matcher *matcher)
+ 	return 0;
+ 
+ matcher_reconnect:
+-	if (LIST_EMPTY(&tbl->head))
++	if (LIST_EMPTY(&tbl->head) || prev_matcher == matcher)
+ 		LIST_INSERT_HEAD(&matcher->tbl->head, matcher, next);
+ 	else
+ 		LIST_INSERT_AFTER(prev_matcher, matcher, next);
+@@ -807,7 +807,7 @@ static int mlx5dr_matcher_bind_mt(struct mlx5dr_matcher *matcher)
+ 	/* Calculate match, range and hash definers */
+ 	ret = mlx5dr_definer_matcher_init(ctx, matcher);
+ 	if (ret) {
+-		DR_LOG(ERR, "Failed to set matcher templates with match definers");
++		DR_LOG(DEBUG, "Failed to set matcher templates with match definers");
+ 		return ret;
+ 	}
+ 
+@@ -1171,6 +1171,13 @@ static int mlx5dr_matcher_init_root(struct mlx5dr_matcher *matcher)
+ 		return rte_errno;
+ 	}
+ 
++	ret = flow_hw_get_port_id_from_ctx(ctx, &flow_attr.port_id);
++	if (ret) {
++		DR_LOG(ERR, "Failed to get port id for dev %s", ctx->ibv_ctx->device->name);
++		rte_errno = EINVAL;
++		return rte_errno;
++	}
++
+ 	mask = simple_calloc(1, MLX5_ST_SZ_BYTES(fte_match_param) +
+ 			     offsetof(struct mlx5dv_flow_match_parameters, match_buf));
+ 	if (!mask) {
+diff --git a/dpdk/drivers/net/mlx5/hws/mlx5dr_pat_arg.h b/dpdk/drivers/net/mlx5/hws/mlx5dr_pat_arg.h
+index bbe313102f..c4e0cbc843 100644
+--- a/dpdk/drivers/net/mlx5/hws/mlx5dr_pat_arg.h
++++ b/dpdk/drivers/net/mlx5/hws/mlx5dr_pat_arg.h
+@@ -30,7 +30,6 @@ struct mlx5dr_pattern_cache {
+ struct mlx5dr_pattern_cache_item {
+ 	struct {
+ 		struct mlx5dr_devx_obj *pattern_obj;
+-		struct dr_icm_chunk *chunk;
+ 		uint8_t *data;
+ 		uint16_t num_of_actions;
+ 	} mh_data;
+diff --git a/dpdk/drivers/net/mlx5/hws/mlx5dr_rule.c b/dpdk/drivers/net/mlx5/hws/mlx5dr_rule.c
+index fa19303b91..cc7a30d6d0 100644
+--- a/dpdk/drivers/net/mlx5/hws/mlx5dr_rule.c
++++ b/dpdk/drivers/net/mlx5/hws/mlx5dr_rule.c
+@@ -23,6 +23,9 @@ static void mlx5dr_rule_skip(struct mlx5dr_matcher *matcher,
+ 	*skip_rx = false;
+ 	*skip_tx = false;
+ 
++	if (unlikely(mlx5dr_matcher_is_insert_by_idx(matcher)))
++		return;
++
+ 	if (mt->item_flags & MLX5_FLOW_ITEM_REPRESENTED_PORT) {
+ 		v = items[mt->vport_item_id].spec;
+ 		vport = flow_hw_conv_port_id(v->port_id);
+@@ -55,14 +58,16 @@ static void mlx5dr_rule_init_dep_wqe(struct mlx5dr_send_ring_dep_wqe *dep_wqe,
+ 				     struct mlx5dr_rule *rule,
+ 				     const struct rte_flow_item *items,
+ 				     struct mlx5dr_match_template *mt,
+-				     void *user_data)
++				     struct mlx5dr_rule_attr *attr)
+ {
+ 	struct mlx5dr_matcher *matcher = rule->matcher;
+ 	struct mlx5dr_table *tbl = matcher->tbl;
+ 	bool skip_rx, skip_tx;
+ 
+ 	dep_wqe->rule = rule;
+-	dep_wqe->user_data = user_data;
++	dep_wqe->user_data = attr->user_data;
++	dep_wqe->direct_index = mlx5dr_matcher_is_insert_by_idx(matcher) ?
++		attr->rule_idx : 0;
+ 
+ 	if (!items) { /* rule update */
+ 		dep_wqe->rtc_0 = rule->rtc_0;
+@@ -145,8 +150,13 @@ mlx5dr_rule_save_delete_info(struct mlx5dr_rule *rule,
+ 		rule->tag_ptr = simple_calloc(2, sizeof(*rule->tag_ptr));
+ 		assert(rule->tag_ptr);
+ 
+-		src_tag = (uint8_t *)ste_attr->wqe_data->tag;
+-		memcpy(rule->tag_ptr[0].match, src_tag, MLX5DR_MATCH_TAG_SZ);
++		if (is_jumbo)
++			memcpy(rule->tag_ptr[0].jumbo, ste_attr->wqe_data->action,
++			       MLX5DR_JUMBO_TAG_SZ);
++		else
++			memcpy(rule->tag_ptr[0].match, ste_attr->wqe_data->tag,
++			       MLX5DR_MATCH_TAG_SZ);
++
+ 		rule->tag_ptr[1].reserved[0] = ste_attr->send_attr.match_definer_id;
+ 
+ 		/* Save range definer id and tag for delete */
+@@ -289,8 +299,8 @@ static int mlx5dr_rule_create_hws_fw_wqe(struct mlx5dr_rule *rule,
+ 	}
+ 
+ 	mlx5dr_rule_create_init(rule, &ste_attr, &apply, false);
+-	mlx5dr_rule_init_dep_wqe(&match_wqe, rule, items, mt, attr->user_data);
+-	mlx5dr_rule_init_dep_wqe(&range_wqe, rule, items, mt, attr->user_data);
++	mlx5dr_rule_init_dep_wqe(&match_wqe, rule, items, mt, attr);
++	mlx5dr_rule_init_dep_wqe(&range_wqe, rule, items, mt, attr);
+ 
+ 	ste_attr.direct_index = 0;
+ 	ste_attr.rtc_0 = match_wqe.rtc_0;
+@@ -395,7 +405,7 @@ static int mlx5dr_rule_create_hws(struct mlx5dr_rule *rule,
+ 	 * dep_wqe buffers (ctrl, data) are also reused for all STE writes.
+ 	 */
+ 	dep_wqe = mlx5dr_send_add_new_dep_wqe(queue);
+-	mlx5dr_rule_init_dep_wqe(dep_wqe, rule, items, mt, attr->user_data);
++	mlx5dr_rule_init_dep_wqe(dep_wqe, rule, items, mt, attr);
+ 
+ 	ste_attr.wqe_ctrl = &dep_wqe->wqe_ctrl;
+ 	ste_attr.wqe_data = &dep_wqe->wqe_data;
+@@ -457,8 +467,7 @@ static int mlx5dr_rule_create_hws(struct mlx5dr_rule *rule,
+ 			ste_attr.used_id_rtc_1 = &rule->rtc_1;
+ 			ste_attr.retry_rtc_0 = dep_wqe->retry_rtc_0;
+ 			ste_attr.retry_rtc_1 = dep_wqe->retry_rtc_1;
+-			ste_attr.direct_index = mlx5dr_matcher_is_insert_by_idx(matcher) ?
+-						attr->rule_idx : 0;
++			ste_attr.direct_index = dep_wqe->direct_index;
+ 		} else {
+ 			apply.next_direct_idx = --ste_attr.direct_index;
+ 		}
+@@ -594,6 +603,13 @@ static int mlx5dr_rule_create_root(struct mlx5dr_rule *rule,
+ 	uint8_t match_criteria;
+ 	int ret;
+ 
++	ret = flow_hw_get_port_id_from_ctx(ctx, &flow_attr.port_id);
++	if (ret) {
++		DR_LOG(ERR, "Failed to get port id for dev %s", ctx->ibv_ctx->device->name);
++		rte_errno = EINVAL;
++		return rte_errno;
++	}
++
+ 	attr = simple_calloc(num_actions, sizeof(*attr));
+ 	if (!attr) {
+ 		rte_errno = ENOMEM;
+diff --git a/dpdk/drivers/net/mlx5/hws/mlx5dr_send.c b/dpdk/drivers/net/mlx5/hws/mlx5dr_send.c
+index 622d574bfa..4c279ba42a 100644
+--- a/dpdk/drivers/net/mlx5/hws/mlx5dr_send.c
++++ b/dpdk/drivers/net/mlx5/hws/mlx5dr_send.c
+@@ -50,6 +50,7 @@ void mlx5dr_send_all_dep_wqe(struct mlx5dr_send_engine *queue)
+ 		ste_attr.used_id_rtc_1 = &dep_wqe->rule->rtc_1;
+ 		ste_attr.wqe_ctrl = &dep_wqe->wqe_ctrl;
+ 		ste_attr.wqe_data = &dep_wqe->wqe_data;
++		ste_attr.direct_index = dep_wqe->direct_index;
+ 
+ 		mlx5dr_send_ste(queue, &ste_attr);
+ 
+diff --git a/dpdk/drivers/net/mlx5/hws/mlx5dr_send.h b/dpdk/drivers/net/mlx5/hws/mlx5dr_send.h
+index c1e8616f7e..0c89faa8a7 100644
+--- a/dpdk/drivers/net/mlx5/hws/mlx5dr_send.h
++++ b/dpdk/drivers/net/mlx5/hws/mlx5dr_send.h
+@@ -106,6 +106,7 @@ struct mlx5dr_send_ring_dep_wqe {
+ 	uint32_t rtc_1;
+ 	uint32_t retry_rtc_0;
+ 	uint32_t retry_rtc_1;
++	uint32_t direct_index;
+ 	void *user_data;
+ };
+ 
+@@ -202,8 +203,6 @@ struct mlx5dr_send_ste_attr {
+  *   value to write in CPU endian format.
+  * @param addr
+  *   Address to write to.
+- * @param lock
+- *   Address of the lock to use for that UAR access.
+  */
+ static __rte_always_inline void
+ mlx5dr_uar_write64_relaxed(uint64_t val, void *addr)
+diff --git a/dpdk/drivers/net/mlx5/hws/mlx5dr_table.c b/dpdk/drivers/net/mlx5/hws/mlx5dr_table.c
+index 55b9b20150..ab73017ade 100644
+--- a/dpdk/drivers/net/mlx5/hws/mlx5dr_table.c
++++ b/dpdk/drivers/net/mlx5/hws/mlx5dr_table.c
+@@ -611,8 +611,7 @@ static int mlx5dr_table_set_default_miss_not_valid(struct mlx5dr_table *tbl,
+ 
+ 	if (mlx5dr_table_is_root(tbl) ||
+ 	    (miss_tbl && mlx5dr_table_is_root(miss_tbl)) ||
+-	    (miss_tbl && miss_tbl->type != tbl->type) ||
+-	    (miss_tbl && tbl->default_miss.miss_tbl)) {
++	    (miss_tbl && miss_tbl->type != tbl->type)) {
+ 		DR_LOG(ERR, "Invalid arguments");
+ 		rte_errno = EINVAL;
+ 		return -rte_errno;
+@@ -625,6 +624,7 @@ int mlx5dr_table_set_default_miss(struct mlx5dr_table *tbl,
+ 				  struct mlx5dr_table *miss_tbl)
+ {
+ 	struct mlx5dr_context *ctx = tbl->ctx;
++	struct mlx5dr_table *old_miss_tbl;
+ 	int ret;
+ 
+ 	ret = mlx5dr_table_set_default_miss_not_valid(tbl, miss_tbl);
+@@ -632,15 +632,16 @@ int mlx5dr_table_set_default_miss(struct mlx5dr_table *tbl,
+ 		return ret;
+ 
+ 	pthread_spin_lock(&ctx->ctrl_lock);
+-
++	old_miss_tbl = tbl->default_miss.miss_tbl;
+ 	ret = mlx5dr_table_connect_to_miss_table(tbl, miss_tbl);
+ 	if (ret)
+ 		goto out;
+ 
++	if (old_miss_tbl)
++		LIST_REMOVE(tbl, default_miss.next);
++
+ 	if (miss_tbl)
+ 		LIST_INSERT_HEAD(&miss_tbl->default_miss.head, tbl, default_miss.next);
+-	else
+-		LIST_REMOVE(tbl, default_miss.next);
+ 
+ 	pthread_spin_unlock(&ctx->ctrl_lock);
+ 	return 0;
+diff --git a/dpdk/drivers/net/mlx5/linux/mlx5_ethdev_os.c b/dpdk/drivers/net/mlx5/linux/mlx5_ethdev_os.c
+index dd5a0c546d..1d999ef66b 100644
+--- a/dpdk/drivers/net/mlx5/linux/mlx5_ethdev_os.c
++++ b/dpdk/drivers/net/mlx5/linux/mlx5_ethdev_os.c
+@@ -671,7 +671,7 @@ mlx5_dev_get_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
+ 	ifr.ifr_data = (void *)&ethpause;
+ 	ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr);
+ 	if (ret) {
+-		DRV_LOG(WARNING,
++		DRV_LOG(DEBUG,
+ 			"port %u ioctl(SIOCETHTOOL, ETHTOOL_GPAUSEPARAM) failed:"
+ 			" %s",
+ 			dev->data->port_id, strerror(rte_errno));
+@@ -1286,13 +1286,17 @@ _mlx5_os_read_dev_counters(struct rte_eth_dev *dev, int pf, uint64_t *stats)
+ 	struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl;
+ 	unsigned int i;
+ 	struct ifreq ifr;
+-	unsigned int stats_sz = xstats_ctrl->stats_n * sizeof(uint64_t);
++	unsigned int max_stats_n = RTE_MAX(xstats_ctrl->stats_n, xstats_ctrl->stats_n_2nd);
++	unsigned int stats_sz = max_stats_n * sizeof(uint64_t);
+ 	unsigned char et_stat_buf[sizeof(struct ethtool_stats) + stats_sz];
+ 	struct ethtool_stats *et_stats = (struct ethtool_stats *)et_stat_buf;
+ 	int ret;
++	uint16_t i_idx, o_idx;
++	uint32_t total_stats = xstats_n;
+ 
+ 	et_stats->cmd = ETHTOOL_GSTATS;
+-	et_stats->n_stats = xstats_ctrl->stats_n;
++	/* Pass the maximum value, the driver may ignore this. */
++	et_stats->n_stats = max_stats_n;
+ 	ifr.ifr_data = (caddr_t)et_stats;
+ 	if (pf >= 0)
+ 		ret = mlx5_ifreq_by_ifname(priv->sh->bond.ports[pf].ifname,
+@@ -1305,21 +1309,34 @@ _mlx5_os_read_dev_counters(struct rte_eth_dev *dev, int pf, uint64_t *stats)
+ 			dev->data->port_id);
+ 		return ret;
+ 	}
+-	for (i = 0; i != xstats_ctrl->mlx5_stats_n; ++i) {
+-		if (xstats_ctrl->info[i].dev)
+-			continue;
+-		stats[i] += (uint64_t)
+-			    et_stats->data[xstats_ctrl->dev_table_idx[i]];
++	if (pf <= 0) {
++		for (i = 0; i != total_stats; i++) {
++			i_idx = xstats_ctrl->dev_table_idx[i];
++			o_idx = xstats_ctrl->xstats_o_idx[i];
++			if (i_idx == UINT16_MAX || xstats_ctrl->info[o_idx].dev)
++				continue;
++			stats[o_idx] += (uint64_t)et_stats->data[i_idx];
++		}
++	} else {
++		for (i = 0; i != total_stats; i++) {
++			i_idx = xstats_ctrl->dev_table_idx_2nd[i];
++			o_idx = xstats_ctrl->xstats_o_idx_2nd[i];
++			if (i_idx == UINT16_MAX || xstats_ctrl->info[o_idx].dev)
++				continue;
++			stats[o_idx] += (uint64_t)et_stats->data[i_idx];
++		}
+ 	}
+ 	return 0;
+ }
+ 
+-/**
++/*
+  * Read device counters.
+  *
+  * @param dev
+  *   Pointer to Ethernet device.
+- * @param[out] stats
++ * @param bond_master
++ *   Indicate if the device is a bond master.
++ * @param stats
+  *   Counters table output buffer.
+  *
+  * @return
+@@ -1327,7 +1344,7 @@ _mlx5_os_read_dev_counters(struct rte_eth_dev *dev, int pf, uint64_t *stats)
+  *   rte_errno is set.
+  */
+ int
+-mlx5_os_read_dev_counters(struct rte_eth_dev *dev, uint64_t *stats)
++mlx5_os_read_dev_counters(struct rte_eth_dev *dev, bool bond_master, uint64_t *stats)
+ {
+ 	struct mlx5_priv *priv = dev->data->dev_private;
+ 	struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl;
+@@ -1335,7 +1352,7 @@ mlx5_os_read_dev_counters(struct rte_eth_dev *dev, uint64_t *stats)
+ 
+ 	memset(stats, 0, sizeof(*stats) * xstats_ctrl->mlx5_stats_n);
+ 	/* Read ifreq counters. */
+-	if (priv->master && priv->pf_bond >= 0) {
++	if (bond_master) {
+ 		/* Sum xstats from bonding device member ports. */
+ 		for (i = 0; i < priv->sh->bond.n_port; i++) {
+ 			ret = _mlx5_os_read_dev_counters(dev, i, stats);
+@@ -1347,13 +1364,17 @@ mlx5_os_read_dev_counters(struct rte_eth_dev *dev, uint64_t *stats)
+ 		if (ret)
+ 			return ret;
+ 	}
+-	/* Read IB counters. */
+-	for (i = 0; i != xstats_ctrl->mlx5_stats_n; ++i) {
++	/*
++	 * Read IB dev counters.
++	 * The counters are unique per IB device but not per netdev IF.
++	 * In bonding mode, getting the stats name only from 1 port is enough.
++	 */
++	for (i = xstats_ctrl->dev_cnt_start; i < xstats_ctrl->mlx5_stats_n; i++) {
+ 		if (!xstats_ctrl->info[i].dev)
+ 			continue;
+ 		/* return last xstats counter if fail to read. */
+ 		if (mlx5_os_read_dev_stat(priv, xstats_ctrl->info[i].ctr_name,
+-			    &stats[i]) == 0)
++					  &stats[i]) == 0)
+ 			xstats_ctrl->xstats[i] = stats[i];
+ 		else
+ 			stats[i] = xstats_ctrl->xstats[i];
+@@ -1361,18 +1382,24 @@ mlx5_os_read_dev_counters(struct rte_eth_dev *dev, uint64_t *stats)
+ 	return ret;
+ }
+ 
+-/**
++/*
+  * Query the number of statistics provided by ETHTOOL.
+  *
+  * @param dev
+  *   Pointer to Ethernet device.
++ * @param bond_master
++ *   Indicate if the device is a bond master.
++ * @param n_stats
++ *   Pointer to number of stats to store.
++ * @param n_stats_sec
++ *   Pointer to number of stats to store for the 2nd port of the bond.
+  *
+  * @return
+- *   Number of statistics on success, negative errno value otherwise and
+- *   rte_errno is set.
++ *   0 on success, negative errno value otherwise and rte_errno is set.
+  */
+ int
+-mlx5_os_get_stats_n(struct rte_eth_dev *dev)
++mlx5_os_get_stats_n(struct rte_eth_dev *dev, bool bond_master,
++		    uint16_t *n_stats, uint16_t *n_stats_sec)
+ {
+ 	struct mlx5_priv *priv = dev->data->dev_private;
+ 	struct ethtool_drvinfo drvinfo;
+@@ -1381,18 +1408,34 @@ mlx5_os_get_stats_n(struct rte_eth_dev *dev)
+ 
+ 	drvinfo.cmd = ETHTOOL_GDRVINFO;
+ 	ifr.ifr_data = (caddr_t)&drvinfo;
+-	if (priv->master && priv->pf_bond >= 0)
+-		/* Bonding PF. */
++	/* Bonding PFs. */
++	if (bond_master) {
+ 		ret = mlx5_ifreq_by_ifname(priv->sh->bond.ports[0].ifname,
+ 					   SIOCETHTOOL, &ifr);
+-	else
++		if (ret) {
++			DRV_LOG(WARNING, "bonding port %u unable to query number of"
++				" statistics for the 1st slave, %d", PORT_ID(priv), ret);
++			return ret;
++		}
++		*n_stats = drvinfo.n_stats;
++		ret = mlx5_ifreq_by_ifname(priv->sh->bond.ports[1].ifname,
++					   SIOCETHTOOL, &ifr);
++		if (ret) {
++			DRV_LOG(WARNING, "bonding port %u unable to query number of"
++				" statistics for the 2nd slave, %d", PORT_ID(priv), ret);
++			return ret;
++		}
++		*n_stats_sec = drvinfo.n_stats;
++	} else {
+ 		ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr);
+-	if (ret) {
+-		DRV_LOG(WARNING, "port %u unable to query number of statistics",
+-			dev->data->port_id);
+-		return ret;
++		if (ret) {
++			DRV_LOG(WARNING, "port %u unable to query number of statistics",
++				PORT_ID(priv));
++			return ret;
++		}
++		*n_stats = drvinfo.n_stats;
+ 	}
+-	return drvinfo.n_stats;
++	return 0;
+ }
+ 
+ static const struct mlx5_counter_ctrl mlx5_counters_init[] = {
+@@ -1576,7 +1619,104 @@ static const struct mlx5_counter_ctrl mlx5_counters_init[] = {
+ 	},
+ };
+ 
+-static const unsigned int xstats_n = RTE_DIM(mlx5_counters_init);
++const unsigned int xstats_n = RTE_DIM(mlx5_counters_init);
++
++static int
++mlx5_os_get_stats_strings(struct rte_eth_dev *dev, bool bond_master,
++			  struct ethtool_gstrings *strings,
++			  uint32_t stats_n, uint32_t stats_n_2nd)
++{
++	struct mlx5_priv *priv = dev->data->dev_private;
++	struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl;
++	struct ifreq ifr;
++	int ret;
++	uint32_t i, j, idx;
++
++	/* Ensure no out of bounds access before. */
++	MLX5_ASSERT(xstats_n <= MLX5_MAX_XSTATS);
++	strings->cmd = ETHTOOL_GSTRINGS;
++	strings->string_set = ETH_SS_STATS;
++	strings->len = stats_n;
++	ifr.ifr_data = (caddr_t)strings;
++	if (bond_master)
++		ret = mlx5_ifreq_by_ifname(priv->sh->bond.ports[0].ifname,
++					   SIOCETHTOOL, &ifr);
++	else
++		ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr);
++	if (ret) {
++		DRV_LOG(WARNING, "port %u unable to get statistic names with %d",
++			PORT_ID(priv), ret);
++		return ret;
++	}
++	/* Reorganize the orders to reduce the iterations. */
++	for (j = 0; j < xstats_n; j++) {
++		xstats_ctrl->dev_table_idx[j] = UINT16_MAX;
++		for (i = 0; i < stats_n; i++) {
++			const char *curr_string =
++				(const char *)&strings->data[i * ETH_GSTRING_LEN];
++
++			if (!strcmp(mlx5_counters_init[j].ctr_name, curr_string)) {
++				idx = xstats_ctrl->mlx5_stats_n++;
++				xstats_ctrl->dev_table_idx[j] = i;
++				xstats_ctrl->xstats_o_idx[j] = idx;
++				xstats_ctrl->info[idx] = mlx5_counters_init[j];
++			}
++		}
++	}
++	if (!bond_master) {
++		/* Add dev counters, unique per IB device. */
++		xstats_ctrl->dev_cnt_start = xstats_ctrl->mlx5_stats_n;
++		for (j = 0; j != xstats_n; j++) {
++			if (mlx5_counters_init[j].dev) {
++				idx = xstats_ctrl->mlx5_stats_n++;
++				xstats_ctrl->info[idx] = mlx5_counters_init[j];
++				xstats_ctrl->hw_stats[idx] = 0;
++			}
++		}
++		return 0;
++	}
++
++	strings->len = stats_n_2nd;
++	ret = mlx5_ifreq_by_ifname(priv->sh->bond.ports[1].ifname,
++				   SIOCETHTOOL, &ifr);
++	if (ret) {
++		DRV_LOG(WARNING, "port %u unable to get statistic names for 2nd slave with %d",
++			PORT_ID(priv), ret);
++		return ret;
++	}
++	/* The 2nd slave port may have a different strings set, based on the configuration. */
++	for (j = 0; j != xstats_n; j++) {
++		xstats_ctrl->dev_table_idx_2nd[j] = UINT16_MAX;
++		for (i = 0; i != stats_n_2nd; i++) {
++			const char *curr_string =
++				(const char *)&strings->data[i * ETH_GSTRING_LEN];
++
++			if (!strcmp(mlx5_counters_init[j].ctr_name, curr_string)) {
++				xstats_ctrl->dev_table_idx_2nd[j] = i;
++				if (xstats_ctrl->dev_table_idx[j] != UINT16_MAX) {
++					/* Already mapped in the 1st slave port. */
++					idx = xstats_ctrl->xstats_o_idx[j];
++					xstats_ctrl->xstats_o_idx_2nd[j] = idx;
++				} else {
++					/* Append the new items to the end of the map. */
++					idx = xstats_ctrl->mlx5_stats_n++;
++					xstats_ctrl->xstats_o_idx_2nd[j] = idx;
++					xstats_ctrl->info[idx] = mlx5_counters_init[j];
++				}
++			}
++		}
++	}
++	/* Dev counters are always at the last now. */
++	xstats_ctrl->dev_cnt_start = xstats_ctrl->mlx5_stats_n;
++	for (j = 0; j != xstats_n; j++) {
++		if (mlx5_counters_init[j].dev) {
++			idx = xstats_ctrl->mlx5_stats_n++;
++			xstats_ctrl->info[idx] = mlx5_counters_init[j];
++			xstats_ctrl->hw_stats[idx] = 0;
++		}
++	}
++	return 0;
++}
+ 
+ /**
+  * Init the structures to read device counters.
+@@ -1590,76 +1730,44 @@ mlx5_os_stats_init(struct rte_eth_dev *dev)
+ 	struct mlx5_priv *priv = dev->data->dev_private;
+ 	struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl;
+ 	struct mlx5_stats_ctrl *stats_ctrl = &priv->stats_ctrl;
+-	unsigned int i;
+-	unsigned int j;
+-	struct ifreq ifr;
+ 	struct ethtool_gstrings *strings = NULL;
+-	unsigned int dev_stats_n;
++	uint16_t dev_stats_n = 0;
++	uint16_t dev_stats_n_2nd = 0;
++	unsigned int max_stats_n;
+ 	unsigned int str_sz;
+ 	int ret;
++	bool bond_master = (priv->master && priv->pf_bond >= 0);
+ 
+ 	/* So that it won't aggregate for each init. */
+ 	xstats_ctrl->mlx5_stats_n = 0;
+-	ret = mlx5_os_get_stats_n(dev);
++	ret = mlx5_os_get_stats_n(dev, bond_master, &dev_stats_n, &dev_stats_n_2nd);
+ 	if (ret < 0) {
+ 		DRV_LOG(WARNING, "port %u no extended statistics available",
+ 			dev->data->port_id);
+ 		return;
+ 	}
+-	dev_stats_n = ret;
++	max_stats_n = RTE_MAX(dev_stats_n, dev_stats_n_2nd);
+ 	/* Allocate memory to grab stat names and values. */
+-	str_sz = dev_stats_n * ETH_GSTRING_LEN;
++	str_sz = max_stats_n * ETH_GSTRING_LEN;
+ 	strings = (struct ethtool_gstrings *)
+ 		  mlx5_malloc(0, str_sz + sizeof(struct ethtool_gstrings), 0,
+ 			      SOCKET_ID_ANY);
+ 	if (!strings) {
+ 		DRV_LOG(WARNING, "port %u unable to allocate memory for xstats",
+-		     dev->data->port_id);
++			dev->data->port_id);
+ 		return;
+ 	}
+-	strings->cmd = ETHTOOL_GSTRINGS;
+-	strings->string_set = ETH_SS_STATS;
+-	strings->len = dev_stats_n;
+-	ifr.ifr_data = (caddr_t)strings;
+-	if (priv->master && priv->pf_bond >= 0)
+-		/* Bonding master. */
+-		ret = mlx5_ifreq_by_ifname(priv->sh->bond.ports[0].ifname,
+-					   SIOCETHTOOL, &ifr);
+-	else
+-		ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr);
+-	if (ret) {
+-		DRV_LOG(WARNING, "port %u unable to get statistic names",
++	ret = mlx5_os_get_stats_strings(dev, bond_master, strings,
++					dev_stats_n, dev_stats_n_2nd);
++	if (ret < 0) {
++		DRV_LOG(WARNING, "port %u failed to get the stats strings",
+ 			dev->data->port_id);
+ 		goto free;
+ 	}
+-	for (i = 0; i != dev_stats_n; ++i) {
+-		const char *curr_string = (const char *)
+-			&strings->data[i * ETH_GSTRING_LEN];
+-
+-		for (j = 0; j != xstats_n; ++j) {
+-			if (!strcmp(mlx5_counters_init[j].ctr_name,
+-				    curr_string)) {
+-				unsigned int idx = xstats_ctrl->mlx5_stats_n++;
+-
+-				xstats_ctrl->dev_table_idx[idx] = i;
+-				xstats_ctrl->info[idx] = mlx5_counters_init[j];
+-				break;
+-			}
+-		}
+-	}
+-	/* Add dev counters. */
+-	MLX5_ASSERT(xstats_ctrl->mlx5_stats_n <= MLX5_MAX_XSTATS);
+-	for (i = 0; i != xstats_n; ++i) {
+-		if (mlx5_counters_init[i].dev) {
+-			unsigned int idx = xstats_ctrl->mlx5_stats_n++;
+-
+-			xstats_ctrl->info[idx] = mlx5_counters_init[i];
+-			xstats_ctrl->hw_stats[idx] = 0;
+-		}
+-	}
+ 	xstats_ctrl->stats_n = dev_stats_n;
++	xstats_ctrl->stats_n_2nd = dev_stats_n_2nd;
+ 	/* Copy to base at first time. */
+-	ret = mlx5_os_read_dev_counters(dev, xstats_ctrl->base);
++	ret = mlx5_os_read_dev_counters(dev, bond_master, xstats_ctrl->base);
+ 	if (ret)
+ 		DRV_LOG(ERR, "port %u cannot read device counters: %s",
+ 			dev->data->port_id, strerror(rte_errno));
+diff --git a/dpdk/drivers/net/mlx5/linux/mlx5_os.c b/dpdk/drivers/net/mlx5/linux/mlx5_os.c
+index ae82e1e5d8..2241e84341 100644
+--- a/dpdk/drivers/net/mlx5/linux/mlx5_os.c
++++ b/dpdk/drivers/net/mlx5/linux/mlx5_os.c
+@@ -455,15 +455,16 @@ __mlx5_discovery_misc5_cap(struct mlx5_priv *priv)
+  * Routine checks the reference counter and does actual
+  * resources creation/initialization only if counter is zero.
+  *
+- * @param[in] priv
+- *   Pointer to the private device data structure.
++ * @param[in] eth_dev
++ *   Pointer to the device.
+  *
+  * @return
+  *   Zero on success, positive error code otherwise.
+  */
+ static int
+-mlx5_alloc_shared_dr(struct mlx5_priv *priv)
++mlx5_alloc_shared_dr(struct rte_eth_dev *eth_dev)
+ {
++	struct mlx5_priv *priv = eth_dev->data->dev_private;
+ 	struct mlx5_dev_ctx_shared *sh = priv->sh;
+ 	char s[MLX5_NAME_SIZE] __rte_unused;
+ 	int err;
+@@ -578,6 +579,44 @@ mlx5_alloc_shared_dr(struct mlx5_priv *priv)
+ 		err = errno;
+ 		goto error;
+ 	}
++
++	if (sh->config.dv_flow_en == 1) {
++		/* Query availability of metadata reg_c's. */
++		if (!priv->sh->metadata_regc_check_flag) {
++			err = mlx5_flow_discover_mreg_c(eth_dev);
++			if (err < 0) {
++				err = -err;
++				goto error;
++			}
++		}
++		if (!mlx5_flow_ext_mreg_supported(eth_dev)) {
++			DRV_LOG(DEBUG,
++				"port %u extensive metadata register is not supported",
++				eth_dev->data->port_id);
++			if (sh->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
++				DRV_LOG(ERR, "metadata mode %u is not supported "
++					     "(no metadata registers available)",
++					     sh->config.dv_xmeta_en);
++				err = ENOTSUP;
++				goto error;
++			}
++		}
++		if (sh->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
++		    mlx5_flow_ext_mreg_supported(eth_dev) && sh->dv_regc0_mask) {
++			sh->mreg_cp_tbl = mlx5_hlist_create(MLX5_FLOW_MREG_HNAME,
++							    MLX5_FLOW_MREG_HTABLE_SZ,
++							    false, true, eth_dev,
++							    flow_dv_mreg_create_cb,
++							    flow_dv_mreg_match_cb,
++							    flow_dv_mreg_remove_cb,
++							    flow_dv_mreg_clone_cb,
++							    flow_dv_mreg_clone_free_cb);
++			if (!sh->mreg_cp_tbl) {
++				err = ENOMEM;
++				goto error;
++			}
++		}
++	}
+ #endif
+ 	if (!sh->tunnel_hub && sh->config.dv_miss_info)
+ 		err = mlx5_alloc_tunnel_hub(sh);
+@@ -662,6 +701,10 @@ error:
+ 		mlx5_list_destroy(sh->dest_array_list);
+ 		sh->dest_array_list = NULL;
+ 	}
++	if (sh->mreg_cp_tbl) {
++		mlx5_hlist_destroy(sh->mreg_cp_tbl);
++		sh->mreg_cp_tbl = NULL;
++	}
+ 	return err;
+ }
+ 
+@@ -759,6 +802,10 @@ mlx5_os_free_shared_dr(struct mlx5_priv *priv)
+ 		mlx5_list_destroy(sh->dest_array_list);
+ 		sh->dest_array_list = NULL;
+ 	}
++	if (sh->mreg_cp_tbl) {
++		mlx5_hlist_destroy(sh->mreg_cp_tbl);
++		sh->mreg_cp_tbl = NULL;
++	}
+ }
+ 
+ /**
+@@ -1545,13 +1592,6 @@ err_secondary:
+ 	}
+ 	/* Create context for virtual machine VLAN workaround. */
+ 	priv->vmwa_context = mlx5_vlan_vmwa_init(eth_dev, spawn->ifindex);
+-	if (sh->config.dv_flow_en) {
+-		err = mlx5_alloc_shared_dr(priv);
+-		if (err)
+-			goto error;
+-		if (mlx5_flex_item_port_init(eth_dev) < 0)
+-			goto error;
+-	}
+ 	if (mlx5_devx_obj_ops_en(sh)) {
+ 		priv->obj_ops = devx_obj_ops;
+ 		mlx5_queue_counter_id_prepare(eth_dev);
+@@ -1602,6 +1642,13 @@ err_secondary:
+ 			goto error;
+ 	}
+ 	rte_rwlock_init(&priv->ind_tbls_lock);
++	if (sh->config.dv_flow_en) {
++		err = mlx5_alloc_shared_dr(eth_dev);
++		if (err)
++			goto error;
++		if (mlx5_flex_item_port_init(eth_dev) < 0)
++			goto error;
++	}
+ 	if (priv->sh->config.dv_flow_en == 2) {
+ #ifdef HAVE_MLX5_HWS_SUPPORT
+ 		if (priv->sh->config.dv_esw_en) {
+@@ -1682,43 +1729,6 @@ err_secondary:
+ 		err = -err;
+ 		goto error;
+ 	}
+-	/* Query availability of metadata reg_c's. */
+-	if (!priv->sh->metadata_regc_check_flag) {
+-		err = mlx5_flow_discover_mreg_c(eth_dev);
+-		if (err < 0) {
+-			err = -err;
+-			goto error;
+-		}
+-	}
+-	if (!mlx5_flow_ext_mreg_supported(eth_dev)) {
+-		DRV_LOG(DEBUG,
+-			"port %u extensive metadata register is not supported",
+-			eth_dev->data->port_id);
+-		if (sh->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
+-			DRV_LOG(ERR, "metadata mode %u is not supported "
+-				     "(no metadata registers available)",
+-				     sh->config.dv_xmeta_en);
+-			err = ENOTSUP;
+-			goto error;
+-		}
+-	}
+-	if (sh->config.dv_flow_en &&
+-	    sh->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
+-	    mlx5_flow_ext_mreg_supported(eth_dev) &&
+-	    priv->sh->dv_regc0_mask) {
+-		priv->mreg_cp_tbl = mlx5_hlist_create(MLX5_FLOW_MREG_HNAME,
+-						      MLX5_FLOW_MREG_HTABLE_SZ,
+-						      false, true, eth_dev,
+-						      flow_dv_mreg_create_cb,
+-						      flow_dv_mreg_match_cb,
+-						      flow_dv_mreg_remove_cb,
+-						      flow_dv_mreg_clone_cb,
+-						    flow_dv_mreg_clone_free_cb);
+-		if (!priv->mreg_cp_tbl) {
+-			err = ENOMEM;
+-			goto error;
+-		}
+-	}
+ 	rte_spinlock_init(&priv->shared_act_sl);
+ 	mlx5_flow_counter_mode_config(eth_dev);
+ 	mlx5_flow_drop_action_config(eth_dev);
+@@ -1737,8 +1747,6 @@ error:
+ 		    priv->sh->config.dv_esw_en)
+ 			flow_hw_destroy_vport_action(eth_dev);
+ #endif
+-		if (priv->mreg_cp_tbl)
+-			mlx5_hlist_destroy(priv->mreg_cp_tbl);
+ 		if (priv->sh)
+ 			mlx5_os_free_shared_dr(priv);
+ 		if (priv->nl_socket_route >= 0)
+@@ -2429,8 +2437,7 @@ mlx5_os_pci_probe_pf(struct mlx5_common_device *cdev,
+ 						list[ns].info.master = 0;
+ 						list[ns].info.representor = 0;
+ 					}
+-					if (list[ns].info.port_name == bd)
+-						ns++;
++					ns++;
+ 					break;
+ 				case MLX5_PHYS_PORT_NAME_TYPE_PFHPF:
+ 					/* Fallthrough */
+@@ -2993,9 +3000,15 @@ mlx5_os_read_dev_stat(struct mlx5_priv *priv, const char *ctr_name,
+ 
+ 	if (priv->sh) {
+ 		if (priv->q_counters != NULL &&
+-		    strcmp(ctr_name, "out_of_buffer") == 0)
++		    strcmp(ctr_name, "out_of_buffer") == 0) {
++			if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
++				DRV_LOG(WARNING, "Devx out_of_buffer counter is not supported in the secondary process");
++				rte_errno = ENOTSUP;
++				return 1;
++			}
+ 			return mlx5_devx_cmd_queue_counter_query
+ 					(priv->q_counters, 0, (uint32_t *)stat);
++		}
+ 		MKSTR(path, "%s/ports/%d/hw_counters/%s",
+ 		      priv->sh->ibdev_path,
+ 		      priv->dev_port,
+diff --git a/dpdk/drivers/net/mlx5/mlx5.c b/dpdk/drivers/net/mlx5/mlx5.c
+index 3a182de248..8d4a0a3dda 100644
+--- a/dpdk/drivers/net/mlx5/mlx5.c
++++ b/dpdk/drivers/net/mlx5/mlx5.c
+@@ -1689,7 +1689,8 @@ mlx5_init_shared_dev_registers(struct mlx5_dev_ctx_shared *sh)
+ 	} else {
+ 		DRV_LOG(DEBUG, "ASO register: NONE");
+ 	}
+-	mlx5_init_hws_flow_tags_registers(sh);
++	if (sh->config.dv_flow_en == 2)
++		mlx5_init_hws_flow_tags_registers(sh);
+ }
+ 
+ /**
+@@ -2267,6 +2268,7 @@ mlx5_dev_close(struct rte_eth_dev *dev)
+ 	mlx5_indirect_list_handles_release(dev);
+ #ifdef HAVE_MLX5_HWS_SUPPORT
+ 	flow_hw_destroy_vport_action(dev);
++	/* dr context will be closed after mlx5_os_free_shared_dr. */
+ 	flow_hw_resource_release(dev);
+ 	flow_hw_clear_port_info(dev);
+ #endif
+@@ -2279,7 +2281,7 @@ mlx5_dev_close(struct rte_eth_dev *dev)
+ 		mlx5_free(priv->rxq_privs);
+ 		priv->rxq_privs = NULL;
+ 	}
+-	if (priv->txqs != NULL) {
++	if (priv->txqs != NULL && dev->data->tx_queues != NULL) {
+ 		/* XXX race condition if mlx5_tx_burst() is still running. */
+ 		rte_delay_us_sleep(1000);
+ 		for (i = 0; (i != priv->txqs_n); ++i)
+@@ -2288,16 +2290,20 @@ mlx5_dev_close(struct rte_eth_dev *dev)
+ 		priv->txqs = NULL;
+ 	}
+ 	mlx5_proc_priv_uninit(dev);
++	if (priv->drop_queue.hrxq)
++		mlx5_drop_action_destroy(dev);
+ 	if (priv->q_counters) {
+ 		mlx5_devx_cmd_destroy(priv->q_counters);
+ 		priv->q_counters = NULL;
+ 	}
+-	if (priv->drop_queue.hrxq)
+-		mlx5_drop_action_destroy(dev);
+-	if (priv->mreg_cp_tbl)
+-		mlx5_hlist_destroy(priv->mreg_cp_tbl);
+ 	mlx5_mprq_free_mp(dev);
+ 	mlx5_os_free_shared_dr(priv);
++#ifdef HAVE_MLX5_HWS_SUPPORT
++	if (priv->dr_ctx) {
++		claim_zero(mlx5dr_context_close(priv->dr_ctx));
++		priv->dr_ctx = NULL;
++	}
++#endif
+ 	if (priv->rss_conf.rss_key != NULL)
+ 		mlx5_free(priv->rss_conf.rss_key);
+ 	if (priv->reta_idx != NULL)
+diff --git a/dpdk/drivers/net/mlx5/mlx5.h b/dpdk/drivers/net/mlx5/mlx5.h
+index 263ebead7f..0c81bcab9f 100644
+--- a/dpdk/drivers/net/mlx5/mlx5.h
++++ b/dpdk/drivers/net/mlx5/mlx5.h
+@@ -263,16 +263,29 @@ struct mlx5_counter_ctrl {
+ struct mlx5_xstats_ctrl {
+ 	/* Number of device stats. */
+ 	uint16_t stats_n;
++	/* Number of device stats, for the 2nd port in bond. */
++	uint16_t stats_n_2nd;
+ 	/* Number of device stats identified by PMD. */
+-	uint16_t  mlx5_stats_n;
++	uint16_t mlx5_stats_n;
++	/* First device counters index. */
++	uint16_t dev_cnt_start;
+ 	/* Index in the device counters table. */
+ 	uint16_t dev_table_idx[MLX5_MAX_XSTATS];
++	/* Index in the output table. */
++	uint16_t xstats_o_idx[MLX5_MAX_XSTATS];
+ 	uint64_t base[MLX5_MAX_XSTATS];
+ 	uint64_t xstats[MLX5_MAX_XSTATS];
+ 	uint64_t hw_stats[MLX5_MAX_XSTATS];
+ 	struct mlx5_counter_ctrl info[MLX5_MAX_XSTATS];
++	/* Index in the device counters table, for the 2nd port in bond. */
++	uint16_t dev_table_idx_2nd[MLX5_MAX_XSTATS];
++	/* Index in the output table, for the 2nd port in bond. */
++	uint16_t xstats_o_idx_2nd[MLX5_MAX_XSTATS];
+ };
+ 
++/* xstats array size. */
++extern const unsigned int xstats_n;
++
+ struct mlx5_stats_ctrl {
+ 	/* Base for imissed counter. */
+ 	uint64_t imissed_base;
+@@ -1473,6 +1486,8 @@ struct mlx5_dev_ctx_shared {
+ 		struct mlx5_hlist *flow_tbls; /* SWS flow table. */
+ 		struct mlx5_hlist *groups; /* HWS flow group. */
+ 	};
++	struct mlx5_hlist *mreg_cp_tbl;
++	/* Hash table of Rx metadata register copy table. */
+ 	struct mlx5_flow_tunnel_hub *tunnel_hub;
+ 	/* Direct Rules tables for FDB, NIC TX+RX */
+ 	void *dr_drop_action; /* Pointer to DR drop action, any domain. */
+@@ -1862,11 +1877,7 @@ struct mlx5_priv {
+ 	rte_spinlock_t hw_ctrl_lock;
+ 	LIST_HEAD(hw_ctrl_flow, mlx5_hw_ctrl_flow) hw_ctrl_flows;
+ 	LIST_HEAD(hw_ext_ctrl_flow, mlx5_hw_ctrl_flow) hw_ext_ctrl_flows;
+-	struct rte_flow_template_table *hw_esw_sq_miss_root_tbl;
+-	struct rte_flow_template_table *hw_esw_sq_miss_tbl;
+-	struct rte_flow_template_table *hw_esw_zero_tbl;
+-	struct rte_flow_template_table *hw_tx_meta_cpy_tbl;
+-	struct rte_flow_template_table *hw_lacp_rx_tbl;
++	struct mlx5_flow_hw_ctrl_fdb *hw_ctrl_fdb;
+ 	struct rte_flow_pattern_template *hw_tx_repr_tagging_pt;
+ 	struct rte_flow_actions_template *hw_tx_repr_tagging_at;
+ 	struct rte_flow_template_table *hw_tx_repr_tagging_tbl;
+@@ -1900,8 +1911,6 @@ struct mlx5_priv {
+ 	int nl_socket_rdma; /* Netlink socket (NETLINK_RDMA). */
+ 	int nl_socket_route; /* Netlink socket (NETLINK_ROUTE). */
+ 	struct mlx5_nl_vlan_vmwa_context *vmwa_context; /* VLAN WA context. */
+-	struct mlx5_hlist *mreg_cp_tbl;
+-	/* Hash table of Rx metadata register copy table. */
+ 	struct mlx5_mtr_config mtr_config; /* Meter configuration */
+ 	uint8_t mtr_sfx_reg; /* Meter prefix-suffix flow match REG_C. */
+ 	struct mlx5_legacy_flow_meters flow_meters; /* MTR list. */
+@@ -1989,6 +1998,30 @@ enum dr_dump_rec_type {
+ 	DR_DUMP_REC_TYPE_PMD_COUNTER = 4430,
+ };
+ 
++#if defined(HAVE_MLX5_HWS_SUPPORT)
++static __rte_always_inline struct mlx5_hw_q_job *
++flow_hw_job_get(struct mlx5_priv *priv, uint32_t queue)
++{
++	MLX5_ASSERT(priv->hw_q[queue].job_idx <= priv->hw_q[queue].size);
++	return priv->hw_q[queue].job_idx ?
++	       priv->hw_q[queue].job[--priv->hw_q[queue].job_idx] : NULL;
++}
++
++static __rte_always_inline void
++flow_hw_job_put(struct mlx5_priv *priv, struct mlx5_hw_q_job *job, uint32_t queue)
++{
++	MLX5_ASSERT(priv->hw_q[queue].job_idx < priv->hw_q[queue].size);
++	priv->hw_q[queue].job[priv->hw_q[queue].job_idx++] = job;
++}
++
++struct mlx5_hw_q_job *
++mlx5_flow_action_job_init(struct mlx5_priv *priv, uint32_t queue,
++			  const struct rte_flow_action_handle *handle,
++			  void *user_data, void *query_data,
++			  enum mlx5_hw_job_type type,
++			  struct rte_flow_error *error);
++#endif
++
+ /**
+  * Indicates whether HW objects operations can be created by DevX.
+  *
+@@ -2131,8 +2164,9 @@ int mlx5_get_module_eeprom(struct rte_eth_dev *dev,
+ 			   struct rte_dev_eeprom_info *info);
+ int mlx5_os_read_dev_stat(struct mlx5_priv *priv,
+ 			  const char *ctr_name, uint64_t *stat);
+-int mlx5_os_read_dev_counters(struct rte_eth_dev *dev, uint64_t *stats);
+-int mlx5_os_get_stats_n(struct rte_eth_dev *dev);
++int mlx5_os_read_dev_counters(struct rte_eth_dev *dev, bool bond_master, uint64_t *stats);
++int mlx5_os_get_stats_n(struct rte_eth_dev *dev, bool bond_master,
++			uint16_t *n_stats, uint16_t *n_stats_sec);
+ void mlx5_os_stats_init(struct rte_eth_dev *dev);
+ int mlx5_get_flag_dropless_rq(struct rte_eth_dev *dev);
+ 
+@@ -2394,11 +2428,12 @@ int mlx5_aso_flow_hit_queue_poll_start(struct mlx5_dev_ctx_shared *sh);
+ int mlx5_aso_flow_hit_queue_poll_stop(struct mlx5_dev_ctx_shared *sh);
+ void mlx5_aso_queue_uninit(struct mlx5_dev_ctx_shared *sh,
+ 			   enum mlx5_access_aso_opc_mod aso_opc_mod);
+-int mlx5_aso_meter_update_by_wqe(struct mlx5_dev_ctx_shared *sh, uint32_t queue,
+-		struct mlx5_aso_mtr *mtr, struct mlx5_mtr_bulk *bulk,
+-		void *user_data, bool push);
+-int mlx5_aso_mtr_wait(struct mlx5_dev_ctx_shared *sh, uint32_t queue,
+-		struct mlx5_aso_mtr *mtr);
++int mlx5_aso_meter_update_by_wqe(struct mlx5_priv *priv, uint32_t queue,
++				 struct mlx5_aso_mtr *mtr,
++				 struct mlx5_mtr_bulk *bulk,
++				 struct mlx5_hw_q_job *job, bool push);
++int mlx5_aso_mtr_wait(struct mlx5_priv *priv,
++		      struct mlx5_aso_mtr *mtr, bool is_tmpl_api);
+ int mlx5_aso_ct_update_by_wqe(struct mlx5_dev_ctx_shared *sh, uint32_t queue,
+ 			      struct mlx5_aso_ct_action *ct,
+ 			      const struct rte_flow_action_conntrack *profile,
+diff --git a/dpdk/drivers/net/mlx5/mlx5_devx.c b/dpdk/drivers/net/mlx5/mlx5_devx.c
+index 9fa400fc48..4f08ddf899 100644
+--- a/dpdk/drivers/net/mlx5/mlx5_devx.c
++++ b/dpdk/drivers/net/mlx5/mlx5_devx.c
+@@ -592,7 +592,8 @@ mlx5_rxq_devx_obj_new(struct mlx5_rxq_priv *rxq)
+ 		DRV_LOG(ERR, "Failed to create CQ.");
+ 		goto error;
+ 	}
+-	rxq_data->delay_drop = priv->config.std_delay_drop;
++	if (!rxq_data->shared || !rxq_ctrl->started)
++		rxq_data->delay_drop = priv->config.std_delay_drop;
+ 	/* Create RQ using DevX API. */
+ 	ret = mlx5_rxq_create_devx_rq_resources(rxq);
+ 	if (ret) {
+diff --git a/dpdk/drivers/net/mlx5/mlx5_ethdev.c b/dpdk/drivers/net/mlx5/mlx5_ethdev.c
+index ab30e2c215..ec4bdd8af1 100644
+--- a/dpdk/drivers/net/mlx5/mlx5_ethdev.c
++++ b/dpdk/drivers/net/mlx5/mlx5_ethdev.c
+@@ -146,6 +146,12 @@ mlx5_dev_configure(struct rte_eth_dev *dev)
+ 	ret = mlx5_proc_priv_init(dev);
+ 	if (ret)
+ 		return ret;
++	ret = mlx5_dev_set_mtu(dev, dev->data->mtu);
++	if (ret) {
++		DRV_LOG(ERR, "port %u failed to set MTU to %u", dev->data->port_id,
++			dev->data->mtu);
++		return ret;
++	}
+ 	return 0;
+ }
+ 
+diff --git a/dpdk/drivers/net/mlx5/mlx5_flow.c b/dpdk/drivers/net/mlx5/mlx5_flow.c
+index 85e8c77c81..fdc7c3ea54 100644
+--- a/dpdk/drivers/net/mlx5/mlx5_flow.c
++++ b/dpdk/drivers/net/mlx5/mlx5_flow.c
+@@ -1953,18 +1953,20 @@ mlx5_flow_rxq_dynf_set(struct rte_eth_dev *dev)
+ 		if (rxq == NULL || rxq->ctrl == NULL)
+ 			continue;
+ 		data = &rxq->ctrl->rxq;
+-		if (!rte_flow_dynf_metadata_avail()) {
+-			data->dynf_meta = 0;
+-			data->flow_meta_mask = 0;
+-			data->flow_meta_offset = -1;
+-			data->flow_meta_port_mask = 0;
+-		} else {
+-			data->dynf_meta = 1;
+-			data->flow_meta_mask = rte_flow_dynf_metadata_mask;
+-			data->flow_meta_offset = rte_flow_dynf_metadata_offs;
+-			data->flow_meta_port_mask = priv->sh->dv_meta_mask;
++		if (!data->shared || !rxq->ctrl->started) {
++			if (!rte_flow_dynf_metadata_avail()) {
++				data->dynf_meta = 0;
++				data->flow_meta_mask = 0;
++				data->flow_meta_offset = -1;
++				data->flow_meta_port_mask = 0;
++			} else {
++				data->dynf_meta = 1;
++				data->flow_meta_mask = rte_flow_dynf_metadata_mask;
++				data->flow_meta_offset = rte_flow_dynf_metadata_offs;
++				data->flow_meta_port_mask = priv->sh->dv_meta_mask;
++			}
++			data->mark_flag = mark_flag;
+ 		}
+-		data->mark_flag = mark_flag;
+ 	}
+ }
+ 
+@@ -2504,7 +2506,7 @@ int
+ flow_validate_modify_field_level(const struct rte_flow_action_modify_data *data,
+ 				 struct rte_flow_error *error)
+ {
+-	if (data->level == 0)
++	if (data->level == 0 || data->field == RTE_FLOW_FIELD_FLEX_ITEM)
+ 		return 0;
+ 	if (data->field != RTE_FLOW_FIELD_TAG &&
+ 	    data->field != (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG)
+@@ -5228,8 +5230,8 @@ flow_mreg_add_copy_action(struct rte_eth_dev *dev, uint32_t mark_id,
+ 	};
+ 
+ 	/* Check if already registered. */
+-	MLX5_ASSERT(priv->mreg_cp_tbl);
+-	entry = mlx5_hlist_register(priv->mreg_cp_tbl, mark_id, &ctx);
++	MLX5_ASSERT(priv->sh->mreg_cp_tbl);
++	entry = mlx5_hlist_register(priv->sh->mreg_cp_tbl, mark_id, &ctx);
+ 	if (!entry)
+ 		return NULL;
+ 	return container_of(entry, struct mlx5_flow_mreg_copy_resource,
+@@ -5268,10 +5270,10 @@ flow_mreg_del_copy_action(struct rte_eth_dev *dev,
+ 		return;
+ 	mcp_res = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MCP],
+ 				 flow->rix_mreg_copy);
+-	if (!mcp_res || !priv->mreg_cp_tbl)
++	if (!mcp_res || !priv->sh->mreg_cp_tbl)
+ 		return;
+ 	MLX5_ASSERT(mcp_res->rix_flow);
+-	mlx5_hlist_unregister(priv->mreg_cp_tbl, &mcp_res->hlist_ent);
++	mlx5_hlist_unregister(priv->sh->mreg_cp_tbl, &mcp_res->hlist_ent);
+ 	flow->rix_mreg_copy = 0;
+ }
+ 
+@@ -5293,14 +5295,14 @@ flow_mreg_del_default_copy_action(struct rte_eth_dev *dev)
+ 	uint32_t mark_id;
+ 
+ 	/* Check if default flow is registered. */
+-	if (!priv->mreg_cp_tbl)
++	if (!priv->sh->mreg_cp_tbl)
+ 		return;
+ 	mark_id = MLX5_DEFAULT_COPY_ID;
+ 	ctx.data = &mark_id;
+-	entry = mlx5_hlist_lookup(priv->mreg_cp_tbl, mark_id, &ctx);
++	entry = mlx5_hlist_lookup(priv->sh->mreg_cp_tbl, mark_id, &ctx);
+ 	if (!entry)
+ 		return;
+-	mlx5_hlist_unregister(priv->mreg_cp_tbl, entry);
++	mlx5_hlist_unregister(priv->sh->mreg_cp_tbl, entry);
+ }
+ 
+ /**
+@@ -5338,7 +5340,7 @@ flow_mreg_add_default_copy_action(struct rte_eth_dev *dev,
+ 	 */
+ 	mark_id = MLX5_DEFAULT_COPY_ID;
+ 	ctx.data = &mark_id;
+-	if (mlx5_hlist_lookup(priv->mreg_cp_tbl, mark_id, &ctx))
++	if (mlx5_hlist_lookup(priv->sh->mreg_cp_tbl, mark_id, &ctx))
+ 		return 0;
+ 	mcp_res = flow_mreg_add_copy_action(dev, mark_id, error);
+ 	if (!mcp_res)
+@@ -5492,6 +5494,7 @@ flow_hairpin_split(struct rte_eth_dev *dev,
+ 			}
+ 			break;
+ 		case RTE_FLOW_ACTION_TYPE_COUNT:
++		case RTE_FLOW_ACTION_TYPE_AGE:
+ 			if (encap) {
+ 				rte_memcpy(actions_tx, actions,
+ 					   sizeof(struct rte_flow_action));
+@@ -5817,8 +5820,8 @@ flow_meter_split_prep(struct rte_eth_dev *dev,
+ 	struct mlx5_rte_flow_item_tag *tag_item_spec;
+ 	struct mlx5_rte_flow_item_tag *tag_item_mask;
+ 	uint32_t tag_id = 0;
+-	struct rte_flow_item *vlan_item_dst = NULL;
+-	const struct rte_flow_item *vlan_item_src = NULL;
++	bool vlan_actions;
++	struct rte_flow_item *orig_sfx_items = sfx_items;
+ 	const struct rte_flow_item *orig_items = items;
+ 	struct rte_flow_action *hw_mtr_action;
+ 	struct rte_flow_action *action_pre_head = NULL;
+@@ -5835,6 +5838,7 @@ flow_meter_split_prep(struct rte_eth_dev *dev,
+ 
+ 	/* Prepare the suffix subflow items. */
+ 	tag_item = sfx_items++;
++	tag_item->type = (enum rte_flow_item_type)MLX5_RTE_FLOW_ITEM_TYPE_TAG;
+ 	for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
+ 		int item_type = items->type;
+ 
+@@ -5857,10 +5861,13 @@ flow_meter_split_prep(struct rte_eth_dev *dev,
+ 			sfx_items++;
+ 			break;
+ 		case RTE_FLOW_ITEM_TYPE_VLAN:
+-			/* Determine if copy vlan item below. */
+-			vlan_item_src = items;
+-			vlan_item_dst = sfx_items++;
+-			vlan_item_dst->type = RTE_FLOW_ITEM_TYPE_VOID;
++			/*
++			 * Copy VLAN items in case VLAN actions are performed.
++			 * If there are no VLAN actions, these items will be VOID.
++			 */
++			memcpy(sfx_items, items, sizeof(*sfx_items));
++			sfx_items->type = (enum rte_flow_item_type)MLX5_RTE_FLOW_ITEM_TYPE_VLAN;
++			sfx_items++;
+ 			break;
+ 		default:
+ 			break;
+@@ -5877,6 +5884,7 @@ flow_meter_split_prep(struct rte_eth_dev *dev,
+ 		tag_action = actions_pre++;
+ 	}
+ 	/* Prepare the actions for prefix and suffix flow. */
++	vlan_actions = false;
+ 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
+ 		struct rte_flow_action *action_cur = NULL;
+ 
+@@ -5907,16 +5915,7 @@ flow_meter_split_prep(struct rte_eth_dev *dev,
+ 			break;
+ 		case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
+ 		case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
+-			if (vlan_item_dst && vlan_item_src) {
+-				memcpy(vlan_item_dst, vlan_item_src,
+-					sizeof(*vlan_item_dst));
+-				/*
+-				 * Convert to internal match item, it is used
+-				 * for vlan push and set vid.
+-				 */
+-				vlan_item_dst->type = (enum rte_flow_item_type)
+-						MLX5_RTE_FLOW_ITEM_TYPE_VLAN;
+-			}
++			vlan_actions = true;
+ 			break;
+ 		case RTE_FLOW_ACTION_TYPE_COUNT:
+ 			if (fm->def_policy)
+@@ -5931,6 +5930,14 @@ flow_meter_split_prep(struct rte_eth_dev *dev,
+ 					actions_sfx++ : actions_pre++;
+ 		memcpy(action_cur, actions, sizeof(struct rte_flow_action));
+ 	}
++	/* If there are no VLAN actions, convert VLAN items to VOID in suffix flow items. */
++	if (!vlan_actions) {
++		struct rte_flow_item *it = orig_sfx_items;
++
++		for (; it->type != RTE_FLOW_ITEM_TYPE_END; it++)
++			if (it->type == (enum rte_flow_item_type)MLX5_RTE_FLOW_ITEM_TYPE_VLAN)
++				it->type = RTE_FLOW_ITEM_TYPE_VOID;
++	}
+ 	/* Add end action to the actions. */
+ 	actions_sfx->type = RTE_FLOW_ACTION_TYPE_END;
+ 	if (priv->sh->meter_aso_en) {
+@@ -6020,8 +6027,6 @@ flow_meter_split_prep(struct rte_eth_dev *dev,
+ 	tag_action->type = (enum rte_flow_action_type)
+ 				MLX5_RTE_FLOW_ACTION_TYPE_TAG;
+ 	tag_action->conf = set_tag;
+-	tag_item->type = (enum rte_flow_item_type)
+-				MLX5_RTE_FLOW_ITEM_TYPE_TAG;
+ 	tag_item->spec = tag_item_spec;
+ 	tag_item->last = NULL;
+ 	tag_item->mask = tag_item_mask;
+@@ -6849,6 +6854,19 @@ flow_meter_create_drop_flow_with_org_pattern(struct rte_eth_dev *dev,
+ 				&drop_split_info, error);
+ }
+ 
++static int
++flow_count_vlan_items(const struct rte_flow_item items[])
++{
++	int items_n = 0;
++
++	for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
++		if (items->type == RTE_FLOW_ITEM_TYPE_VLAN ||
++		    items->type == (enum rte_flow_item_type)MLX5_RTE_FLOW_ITEM_TYPE_VLAN)
++			items_n++;
++	}
++	return items_n;
++}
++
+ /**
+  * The splitting for meter feature.
+  *
+@@ -6904,6 +6922,7 @@ flow_create_split_meter(struct rte_eth_dev *dev,
+ 	size_t act_size;
+ 	size_t item_size;
+ 	int actions_n = 0;
++	int vlan_items_n = 0;
+ 	int ret = 0;
+ 
+ 	if (priv->mtr_en)
+@@ -6963,9 +6982,11 @@ flow_create_split_meter(struct rte_eth_dev *dev,
+ 		act_size = (sizeof(struct rte_flow_action) *
+ 			    (actions_n + METER_PREFIX_ACTION)) +
+ 			   sizeof(struct mlx5_rte_flow_action_set_tag);
+-		/* Suffix items: tag, vlan, port id, end. */
+-#define METER_SUFFIX_ITEM 4
+-		item_size = sizeof(struct rte_flow_item) * METER_SUFFIX_ITEM +
++		/* Flow can have multiple VLAN items. Account for them in suffix items. */
++		vlan_items_n = flow_count_vlan_items(items);
++		/* Suffix items: tag, [vlans], port id, end. */
++#define METER_SUFFIX_ITEM 3
++		item_size = sizeof(struct rte_flow_item) * (METER_SUFFIX_ITEM + vlan_items_n) +
+ 			    sizeof(struct mlx5_rte_flow_item_tag) * 2;
+ 		sfx_actions = mlx5_malloc(MLX5_MEM_ZERO, (act_size + item_size),
+ 					  0, SOCKET_ID_ANY);
+diff --git a/dpdk/drivers/net/mlx5/mlx5_flow.h b/dpdk/drivers/net/mlx5/mlx5_flow.h
+index 6dde9de688..bde7dc43a8 100644
+--- a/dpdk/drivers/net/mlx5/mlx5_flow.h
++++ b/dpdk/drivers/net/mlx5/mlx5_flow.h
+@@ -77,7 +77,7 @@ enum mlx5_indirect_type {
+ /* Now, the maximal ports will be supported is 16, action number is 32M. */
+ #define MLX5_INDIRECT_ACT_CT_MAX_PORT 0x10
+ 
+-#define MLX5_INDIRECT_ACT_CT_OWNER_SHIFT 22
++#define MLX5_INDIRECT_ACT_CT_OWNER_SHIFT 25
+ #define MLX5_INDIRECT_ACT_CT_OWNER_MASK (MLX5_INDIRECT_ACT_CT_MAX_PORT - 1)
+ 
+ /* 29-31: type, 25-28: owner port, 0-24: index */
+@@ -1759,6 +1759,28 @@ flow_hw_get_reg_id_from_ctx(void *dr_ctx,
+ 	return REG_NON;
+ }
+ 
++static __rte_always_inline int
++flow_hw_get_port_id_from_ctx(void *dr_ctx, uint32_t *port_val)
++{
++#if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
++	uint32_t port;
++
++	MLX5_ETH_FOREACH_DEV(port, NULL) {
++		struct mlx5_priv *priv;
++		priv = rte_eth_devices[port].data->dev_private;
++
++		if (priv->dr_ctx == dr_ctx) {
++			*port_val = port;
++			return 0;
++		}
++	}
++#else
++	RTE_SET_USED(dr_ctx);
++	RTE_SET_USED(port_val);
++#endif
++	return -EINVAL;
++}
++
+ void flow_hw_set_port_info(struct rte_eth_dev *dev);
+ void flow_hw_clear_port_info(struct rte_eth_dev *dev);
+ int flow_hw_create_vport_action(struct rte_eth_dev *dev);
+@@ -2446,6 +2468,25 @@ struct mlx5_flow_hw_ctrl_rx {
+ 						[MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_MAX];
+ };
+ 
++/* Contains all templates required for control flow rules in FDB with HWS. */
++struct mlx5_flow_hw_ctrl_fdb {
++	struct rte_flow_pattern_template *esw_mgr_items_tmpl;
++	struct rte_flow_actions_template *regc_jump_actions_tmpl;
++	struct rte_flow_template_table *hw_esw_sq_miss_root_tbl;
++	struct rte_flow_pattern_template *regc_sq_items_tmpl;
++	struct rte_flow_actions_template *port_actions_tmpl;
++	struct rte_flow_template_table *hw_esw_sq_miss_tbl;
++	struct rte_flow_pattern_template *port_items_tmpl;
++	struct rte_flow_actions_template *jump_one_actions_tmpl;
++	struct rte_flow_template_table *hw_esw_zero_tbl;
++	struct rte_flow_pattern_template *tx_meta_items_tmpl;
++	struct rte_flow_actions_template *tx_meta_actions_tmpl;
++	struct rte_flow_template_table *hw_tx_meta_cpy_tbl;
++	struct rte_flow_pattern_template *lacp_rx_items_tmpl;
++	struct rte_flow_actions_template *lacp_rx_actions_tmpl;
++	struct rte_flow_template_table *hw_lacp_rx_tbl;
++};
++
+ #define MLX5_CTRL_PROMISCUOUS    (RTE_BIT32(0))
+ #define MLX5_CTRL_ALL_MULTICAST  (RTE_BIT32(1))
+ #define MLX5_CTRL_BROADCAST      (RTE_BIT32(2))
+diff --git a/dpdk/drivers/net/mlx5/mlx5_flow_aso.c b/dpdk/drivers/net/mlx5/mlx5_flow_aso.c
+index f311443472..ab9eb21e01 100644
+--- a/dpdk/drivers/net/mlx5/mlx5_flow_aso.c
++++ b/dpdk/drivers/net/mlx5/mlx5_flow_aso.c
+@@ -792,7 +792,7 @@ mlx5_aso_mtr_sq_enqueue_single(struct mlx5_dev_ctx_shared *sh,
+ 			       struct mlx5_aso_mtr *aso_mtr,
+ 			       struct mlx5_mtr_bulk *bulk,
+ 			       bool need_lock,
+-			       void *user_data,
++			       struct mlx5_hw_q_job *job,
+ 			       bool push)
+ {
+ 	volatile struct mlx5_aso_wqe *wqe = NULL;
+@@ -819,7 +819,7 @@ mlx5_aso_mtr_sq_enqueue_single(struct mlx5_dev_ctx_shared *sh,
+ 	rte_prefetch0(&sq->sq_obj.aso_wqes[(sq->head + 1) & mask]);
+ 	/* Fill next WQE. */
+ 	fm = &aso_mtr->fm;
+-	sq->elts[sq->head & mask].mtr = user_data ? user_data : aso_mtr;
++	sq->elts[sq->head & mask].user_data = job ? job : (void *)aso_mtr;
+ 	if (aso_mtr->type == ASO_METER_INDIRECT) {
+ 		if (likely(sh->config.dv_flow_en == 2))
+ 			pool = aso_mtr->pool;
+@@ -897,24 +897,6 @@ mlx5_aso_mtr_sq_enqueue_single(struct mlx5_dev_ctx_shared *sh,
+ 	return 1;
+ }
+ 
+-static void
+-mlx5_aso_mtrs_status_update(struct mlx5_aso_sq *sq, uint16_t aso_mtrs_nums)
+-{
+-	uint16_t size = 1 << sq->log_desc_n;
+-	uint16_t mask = size - 1;
+-	uint16_t i;
+-	struct mlx5_aso_mtr *aso_mtr = NULL;
+-	uint8_t exp_state = ASO_METER_WAIT;
+-
+-	for (i = 0; i < aso_mtrs_nums; ++i) {
+-		aso_mtr = sq->elts[(sq->tail + i) & mask].mtr;
+-		MLX5_ASSERT(aso_mtr);
+-		(void)__atomic_compare_exchange_n(&aso_mtr->state,
+-				&exp_state, ASO_METER_READY,
+-				false, __ATOMIC_RELAXED, __ATOMIC_RELAXED);
+-	}
+-}
+-
+ static void
+ mlx5_aso_mtr_completion_handle(struct mlx5_aso_sq *sq, bool need_lock)
+ {
+@@ -925,7 +907,7 @@ mlx5_aso_mtr_completion_handle(struct mlx5_aso_sq *sq, bool need_lock)
+ 	uint32_t idx;
+ 	uint32_t next_idx = cq->cq_ci & mask;
+ 	uint16_t max;
+-	uint16_t n = 0;
++	uint16_t i, n = 0;
+ 	int ret;
+ 
+ 	if (need_lock)
+@@ -957,7 +939,19 @@ mlx5_aso_mtr_completion_handle(struct mlx5_aso_sq *sq, bool need_lock)
+ 		cq->cq_ci++;
+ 	} while (1);
+ 	if (likely(n)) {
+-		mlx5_aso_mtrs_status_update(sq, n);
++		uint8_t exp_state = ASO_METER_WAIT;
++		struct mlx5_aso_mtr *aso_mtr;
++		__rte_unused bool verdict;
++
++		for (i = 0; i < n; ++i) {
++			aso_mtr = sq->elts[(sq->tail + i) & mask].mtr;
++			MLX5_ASSERT(aso_mtr);
++			verdict = __atomic_compare_exchange_n(&aso_mtr->state,
++						    &exp_state, ASO_METER_READY,
++						    false, __ATOMIC_RELAXED,
++						    __ATOMIC_RELAXED);
++			MLX5_ASSERT(verdict);
++		}
+ 		sq->tail += n;
+ 		rte_io_wmb();
+ 		cq->cq_obj.db_rec[0] = rte_cpu_to_be_32(cq->cq_ci);
+@@ -966,6 +960,82 @@ mlx5_aso_mtr_completion_handle(struct mlx5_aso_sq *sq, bool need_lock)
+ 		rte_spinlock_unlock(&sq->sqsl);
+ }
+ 
++static __rte_always_inline struct mlx5_aso_sq *
++mlx5_aso_mtr_select_sq(struct mlx5_dev_ctx_shared *sh, uint32_t queue,
++		       struct mlx5_aso_mtr *mtr, bool *need_lock)
++{
++	struct mlx5_aso_sq *sq;
++
++	if (likely(sh->config.dv_flow_en == 2) &&
++	    mtr->type == ASO_METER_INDIRECT) {
++		if (queue == MLX5_HW_INV_QUEUE) {
++			sq = &mtr->pool->sq[mtr->pool->nb_sq - 1];
++			*need_lock = true;
++		} else {
++			sq = &mtr->pool->sq[queue];
++			*need_lock = false;
++		}
++	} else {
++		sq = &sh->mtrmng->pools_mng.sq;
++		*need_lock = true;
++	}
++	return sq;
++}
++
++#if defined(HAVE_MLX5_HWS_SUPPORT)
++static void
++mlx5_aso_poll_cq_mtr_hws(struct mlx5_priv *priv, struct mlx5_aso_sq *sq)
++{
++#define MLX5_HWS_MTR_CMPL_NUM 4
++
++	int i, ret;
++	struct mlx5_aso_mtr *mtr;
++	uint8_t exp_state = ASO_METER_WAIT;
++	struct rte_flow_op_result res[MLX5_HWS_MTR_CMPL_NUM];
++	__rte_unused bool verdict;
++
++	rte_spinlock_lock(&sq->sqsl);
++repeat:
++	ret = mlx5_aso_pull_completion(sq, res, MLX5_HWS_MTR_CMPL_NUM);
++	if (ret) {
++		for (i = 0; i < ret; i++) {
++			struct mlx5_hw_q_job *job = res[i].user_data;
++
++			MLX5_ASSERT(job);
++			mtr = mlx5_ipool_get(priv->hws_mpool->idx_pool,
++					     MLX5_INDIRECT_ACTION_IDX_GET(job->action));
++			MLX5_ASSERT(mtr);
++			verdict = __atomic_compare_exchange_n(&mtr->state,
++						    &exp_state, ASO_METER_READY,
++						    false, __ATOMIC_RELAXED,
++						    __ATOMIC_RELAXED);
++			MLX5_ASSERT(verdict);
++			flow_hw_job_put(priv, job, CTRL_QUEUE_ID(priv));
++		}
++		if (ret == MLX5_HWS_MTR_CMPL_NUM)
++			goto repeat;
++	}
++	rte_spinlock_unlock(&sq->sqsl);
++
++#undef MLX5_HWS_MTR_CMPL_NUM
++}
++#else
++static void
++mlx5_aso_poll_cq_mtr_hws(__rte_unused struct mlx5_priv *priv, __rte_unused struct mlx5_aso_sq *sq)
++{
++	MLX5_ASSERT(false);
++}
++#endif
++
++static void
++mlx5_aso_poll_cq_mtr_sws(__rte_unused struct mlx5_priv *priv,
++			 struct mlx5_aso_sq *sq)
++{
++	mlx5_aso_mtr_completion_handle(sq, true);
++}
++
++typedef void (*poll_cq_t)(struct mlx5_priv *, struct mlx5_aso_sq *);
++
+ /**
+  * Update meter parameter by send WQE.
+  *
+@@ -980,39 +1050,29 @@ mlx5_aso_mtr_completion_handle(struct mlx5_aso_sq *sq, bool need_lock)
+  *   0 on success, a negative errno value otherwise and rte_errno is set.
+  */
+ int
+-mlx5_aso_meter_update_by_wqe(struct mlx5_dev_ctx_shared *sh, uint32_t queue,
+-			struct mlx5_aso_mtr *mtr,
+-			struct mlx5_mtr_bulk *bulk,
+-			void *user_data,
+-			bool push)
++mlx5_aso_meter_update_by_wqe(struct mlx5_priv *priv, uint32_t queue,
++			     struct mlx5_aso_mtr *mtr,
++			     struct mlx5_mtr_bulk *bulk,
++			     struct mlx5_hw_q_job *job, bool push)
+ {
+-	struct mlx5_aso_sq *sq;
+-	uint32_t poll_wqe_times = MLX5_MTR_POLL_WQE_CQE_TIMES;
+ 	bool need_lock;
++	struct mlx5_dev_ctx_shared *sh = priv->sh;
++	struct mlx5_aso_sq *sq =
++		mlx5_aso_mtr_select_sq(sh, queue, mtr, &need_lock);
++	uint32_t poll_wqe_times = MLX5_MTR_POLL_WQE_CQE_TIMES;
++	poll_cq_t poll_mtr_cq =
++		job ? mlx5_aso_poll_cq_mtr_hws : mlx5_aso_poll_cq_mtr_sws;
+ 	int ret;
+ 
+-	if (likely(sh->config.dv_flow_en == 2) &&
+-	    mtr->type == ASO_METER_INDIRECT) {
+-		if (queue == MLX5_HW_INV_QUEUE) {
+-			sq = &mtr->pool->sq[mtr->pool->nb_sq - 1];
+-			need_lock = true;
+-		} else {
+-			sq = &mtr->pool->sq[queue];
+-			need_lock = false;
+-		}
+-	} else {
+-		sq = &sh->mtrmng->pools_mng.sq;
+-		need_lock = true;
+-	}
+ 	if (queue != MLX5_HW_INV_QUEUE) {
+ 		ret = mlx5_aso_mtr_sq_enqueue_single(sh, sq, mtr, bulk,
+-						     need_lock, user_data, push);
++						     need_lock, job, push);
+ 		return ret > 0 ? 0 : -1;
+ 	}
+ 	do {
+-		mlx5_aso_mtr_completion_handle(sq, need_lock);
++		poll_mtr_cq(priv, sq);
+ 		if (mlx5_aso_mtr_sq_enqueue_single(sh, sq, mtr, bulk,
+-						   need_lock, NULL, true))
++						   need_lock, job, true))
+ 			return 0;
+ 		/* Waiting for wqe resource. */
+ 		rte_delay_us_sleep(MLX5_ASO_WQE_CQE_RESPONSE_DELAY);
+@@ -1036,32 +1096,22 @@ mlx5_aso_meter_update_by_wqe(struct mlx5_dev_ctx_shared *sh, uint32_t queue,
+  *   0 on success, a negative errno value otherwise and rte_errno is set.
+  */
+ int
+-mlx5_aso_mtr_wait(struct mlx5_dev_ctx_shared *sh, uint32_t queue,
+-			struct mlx5_aso_mtr *mtr)
++mlx5_aso_mtr_wait(struct mlx5_priv *priv,
++		  struct mlx5_aso_mtr *mtr, bool is_tmpl_api)
+ {
++	bool need_lock;
+ 	struct mlx5_aso_sq *sq;
++	struct mlx5_dev_ctx_shared *sh = priv->sh;
+ 	uint32_t poll_cqe_times = MLX5_MTR_POLL_WQE_CQE_TIMES;
+-	uint8_t state;
+-	bool need_lock;
++	uint8_t state = __atomic_load_n(&mtr->state, __ATOMIC_RELAXED);
++	poll_cq_t poll_mtr_cq =
++		is_tmpl_api ? mlx5_aso_poll_cq_mtr_hws : mlx5_aso_poll_cq_mtr_sws;
+ 
+-	if (likely(sh->config.dv_flow_en == 2) &&
+-	    mtr->type == ASO_METER_INDIRECT) {
+-		if (queue == MLX5_HW_INV_QUEUE) {
+-			sq = &mtr->pool->sq[mtr->pool->nb_sq - 1];
+-			need_lock = true;
+-		} else {
+-			sq = &mtr->pool->sq[queue];
+-			need_lock = false;
+-		}
+-	} else {
+-		sq = &sh->mtrmng->pools_mng.sq;
+-		need_lock = true;
+-	}
+-	state = __atomic_load_n(&mtr->state, __ATOMIC_RELAXED);
+ 	if (state == ASO_METER_READY || state == ASO_METER_WAIT_ASYNC)
+ 		return 0;
++	sq = mlx5_aso_mtr_select_sq(sh, MLX5_HW_INV_QUEUE, mtr, &need_lock);
+ 	do {
+-		mlx5_aso_mtr_completion_handle(sq, need_lock);
++		poll_mtr_cq(priv, sq);
+ 		if (__atomic_load_n(&mtr->state, __ATOMIC_RELAXED) ==
+ 					    ASO_METER_READY)
+ 			return 0;
+diff --git a/dpdk/drivers/net/mlx5/mlx5_flow_dv.c b/dpdk/drivers/net/mlx5/mlx5_flow_dv.c
+index 115d730317..863737ceba 100644
+--- a/dpdk/drivers/net/mlx5/mlx5_flow_dv.c
++++ b/dpdk/drivers/net/mlx5/mlx5_flow_dv.c
+@@ -267,21 +267,41 @@ struct field_modify_info modify_tcp[] = {
+ 	{0, 0, 0},
+ };
+ 
+-static void
++enum mlx5_l3_tunnel_detection {
++	l3_tunnel_none,
++	l3_tunnel_outer,
++	l3_tunnel_inner
++};
++
++static enum mlx5_l3_tunnel_detection
+ mlx5_flow_tunnel_ip_check(const struct rte_flow_item *item __rte_unused,
+-			  uint8_t next_protocol, uint64_t *item_flags,
+-			  int *tunnel)
++			  uint8_t next_protocol, uint64_t item_flags,
++			  uint64_t *l3_tunnel_flag)
+ {
++	enum mlx5_l3_tunnel_detection td = l3_tunnel_none;
++
+ 	MLX5_ASSERT(item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
+ 		    item->type == RTE_FLOW_ITEM_TYPE_IPV6);
+-	if (next_protocol == IPPROTO_IPIP) {
+-		*item_flags |= MLX5_FLOW_LAYER_IPIP;
+-		*tunnel = 1;
+-	}
+-	if (next_protocol == IPPROTO_IPV6) {
+-		*item_flags |= MLX5_FLOW_LAYER_IPV6_ENCAP;
+-		*tunnel = 1;
++	if ((item_flags & MLX5_FLOW_LAYER_OUTER_L3) == 0) {
++		switch (next_protocol) {
++		case IPPROTO_IPIP:
++			td = l3_tunnel_outer;
++			*l3_tunnel_flag = MLX5_FLOW_LAYER_IPIP;
++			break;
++		case IPPROTO_IPV6:
++			td = l3_tunnel_outer;
++			*l3_tunnel_flag = MLX5_FLOW_LAYER_IPV6_ENCAP;
++			break;
++		default:
++			break;
++		}
++	} else {
++		td = l3_tunnel_inner;
++		*l3_tunnel_flag = item->type == RTE_FLOW_ITEM_TYPE_IPV4 ?
++				  MLX5_FLOW_LAYER_IPIP :
++				  MLX5_FLOW_LAYER_IPV6_ENCAP;
+ 	}
++	return td;
+ }
+ 
+ static inline struct mlx5_hlist *
+@@ -1925,7 +1945,7 @@ mlx5_flow_field_id_to_modify_info
+ 			if (priv->sh->config.dv_flow_en == 2)
+ 				reg = flow_hw_get_reg_id(dev,
+ 							 RTE_FLOW_ITEM_TYPE_TAG,
+-							 data->level);
++							 tag_index);
+ 			else
+ 				reg = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG,
+ 							   tag_index, error);
+@@ -5484,13 +5504,6 @@ flow_dv_validate_action_jump(struct rte_eth_dev *dev,
+ 				       &grp_info, error);
+ 	if (ret)
+ 		return ret;
+-	if (attributes->group == target_group &&
+-	    !(action_flags & (MLX5_FLOW_ACTION_TUNNEL_SET |
+-			      MLX5_FLOW_ACTION_TUNNEL_MATCH)))
+-		return rte_flow_error_set(error, EINVAL,
+-					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+-					  "target group must be other than"
+-					  " the current flow group");
+ 	if (table == 0)
+ 		return rte_flow_error_set(error, EINVAL,
+ 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+@@ -5952,7 +5965,7 @@ flow_dv_modify_create_cb(void *tool_ctx, void *cb_ctx)
+ 				   "cannot allocate resource memory");
+ 		return NULL;
+ 	}
+-	rte_memcpy(&entry->ft_type,
++	rte_memcpy(RTE_PTR_ADD(entry, offsetof(typeof(*entry), ft_type)),
+ 		   RTE_PTR_ADD(ref, offsetof(typeof(*ref), ft_type)),
+ 		   key_len + data_len);
+ 	if (entry->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
+@@ -7062,11 +7075,13 @@ flow_dv_validate_attributes(struct rte_eth_dev *dev,
+ }
+ 
+ static int
+-validate_integrity_bits(const struct rte_flow_item_integrity *mask,
++validate_integrity_bits(const void *arg,
+ 			int64_t pattern_flags, uint64_t l3_flags,
+ 			uint64_t l4_flags, uint64_t ip4_flag,
+ 			struct rte_flow_error *error)
+ {
++	const struct rte_flow_item_integrity *mask = arg;
++
+ 	if (mask->l3_ok && !(pattern_flags & l3_flags))
+ 		return rte_flow_error_set(error, EINVAL,
+ 					  RTE_FLOW_ERROR_TYPE_ITEM,
+@@ -7255,6 +7270,40 @@ flow_dv_validate_item_flex(struct rte_eth_dev *dev,
+ 	return 0;
+ }
+ 
++static __rte_always_inline uint8_t
++mlx5_flow_l3_next_protocol(const struct rte_flow_item *l3_item,
++			   enum MLX5_SET_MATCHER key_type)
++{
++#define MLX5_L3_NEXT_PROTOCOL(i, ms)                                            \
++	((i)->type == RTE_FLOW_ITEM_TYPE_IPV4 ?                                  \
++	((const struct rte_flow_item_ipv4 *)(i)->ms)->hdr.next_proto_id :       \
++	(i)->type == RTE_FLOW_ITEM_TYPE_IPV6 ?                                  \
++	((const struct rte_flow_item_ipv6 *)(i)->ms)->hdr.proto :               \
++	(i)->type == RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT ?                         \
++	((const struct rte_flow_item_ipv6_frag_ext *)(i)->ms)->hdr.next_header :\
++	0xff)
++
++	uint8_t next_protocol;
++
++	if (l3_item->mask != NULL && l3_item->spec != NULL) {
++		next_protocol = MLX5_L3_NEXT_PROTOCOL(l3_item, mask);
++		if (next_protocol)
++			next_protocol &= MLX5_L3_NEXT_PROTOCOL(l3_item, spec);
++		else
++			next_protocol = 0xff;
++	} else if (key_type == MLX5_SET_MATCHER_HS_M && l3_item->mask != NULL) {
++		next_protocol =  MLX5_L3_NEXT_PROTOCOL(l3_item, mask);
++	} else if (key_type == MLX5_SET_MATCHER_HS_V && l3_item->spec != NULL) {
++		next_protocol =  MLX5_L3_NEXT_PROTOCOL(l3_item, spec);
++	} else {
++		/* Reset for inner layer. */
++		next_protocol = 0xff;
++	}
++	return next_protocol;
++
++#undef MLX5_L3_NEXT_PROTOCOL
++}
++
+ /**
+  * Validate IB BTH item.
+  *
+@@ -7451,6 +7500,8 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
+ 		return ret;
+ 	is_root = (uint64_t)ret;
+ 	for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
++		enum mlx5_l3_tunnel_detection l3_tunnel_detection;
++		uint64_t l3_tunnel_flag;
+ 		int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
+ 		int type = items->type;
+ 
+@@ -7528,8 +7579,16 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
+ 				vlan_m = items->mask;
+ 			break;
+ 		case RTE_FLOW_ITEM_TYPE_IPV4:
+-			mlx5_flow_tunnel_ip_check(items, next_protocol,
+-						  &item_flags, &tunnel);
++			next_protocol = mlx5_flow_l3_next_protocol
++				(items, (enum MLX5_SET_MATCHER)-1);
++			l3_tunnel_detection =
++				mlx5_flow_tunnel_ip_check(items, next_protocol,
++							  item_flags,
++							  &l3_tunnel_flag);
++			if (l3_tunnel_detection == l3_tunnel_inner) {
++				item_flags |= l3_tunnel_flag;
++				tunnel = 1;
++			}
+ 			ret = flow_dv_validate_item_ipv4(dev, items, item_flags,
+ 							 last_item, ether_type,
+ 							 error);
+@@ -7537,23 +7596,20 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
+ 				return ret;
+ 			last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
+ 					     MLX5_FLOW_LAYER_OUTER_L3_IPV4;
+-			if (items->mask != NULL &&
+-			    ((const struct rte_flow_item_ipv4 *)
+-			     items->mask)->hdr.next_proto_id) {
+-				next_protocol =
+-					((const struct rte_flow_item_ipv4 *)
+-					 (items->spec))->hdr.next_proto_id;
+-				next_protocol &=
+-					((const struct rte_flow_item_ipv4 *)
+-					 (items->mask))->hdr.next_proto_id;
+-			} else {
+-				/* Reset for inner layer. */
+-				next_protocol = 0xff;
+-			}
++			if (l3_tunnel_detection == l3_tunnel_outer)
++				item_flags |= l3_tunnel_flag;
+ 			break;
+ 		case RTE_FLOW_ITEM_TYPE_IPV6:
+-			mlx5_flow_tunnel_ip_check(items, next_protocol,
+-						  &item_flags, &tunnel);
++			next_protocol = mlx5_flow_l3_next_protocol
++				(items, (enum MLX5_SET_MATCHER)-1);
++			l3_tunnel_detection =
++				mlx5_flow_tunnel_ip_check(items, next_protocol,
++							  item_flags,
++							  &l3_tunnel_flag);
++			if (l3_tunnel_detection == l3_tunnel_inner) {
++				item_flags |= l3_tunnel_flag;
++				tunnel = 1;
++			}
+ 			ret = mlx5_flow_validate_item_ipv6(items, item_flags,
+ 							   last_item,
+ 							   ether_type,
+@@ -7563,22 +7619,8 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
+ 				return ret;
+ 			last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
+ 					     MLX5_FLOW_LAYER_OUTER_L3_IPV6;
+-			if (items->mask != NULL &&
+-			    ((const struct rte_flow_item_ipv6 *)
+-			     items->mask)->hdr.proto) {
+-				item_ipv6_proto =
+-					((const struct rte_flow_item_ipv6 *)
+-					 items->spec)->hdr.proto;
+-				next_protocol =
+-					((const struct rte_flow_item_ipv6 *)
+-					 items->spec)->hdr.proto;
+-				next_protocol &=
+-					((const struct rte_flow_item_ipv6 *)
+-					 items->mask)->hdr.proto;
+-			} else {
+-				/* Reset for inner layer. */
+-				next_protocol = 0xff;
+-			}
++			if (l3_tunnel_detection == l3_tunnel_outer)
++				item_flags |= l3_tunnel_flag;
+ 			break;
+ 		case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
+ 			ret = flow_dv_validate_item_ipv6_frag_ext(items,
+@@ -7589,19 +7631,8 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
+ 			last_item = tunnel ?
+ 					MLX5_FLOW_LAYER_INNER_L3_IPV6_FRAG_EXT :
+ 					MLX5_FLOW_LAYER_OUTER_L3_IPV6_FRAG_EXT;
+-			if (items->mask != NULL &&
+-			    ((const struct rte_flow_item_ipv6_frag_ext *)
+-			     items->mask)->hdr.next_header) {
+-				next_protocol =
+-				((const struct rte_flow_item_ipv6_frag_ext *)
+-				 items->spec)->hdr.next_header;
+-				next_protocol &=
+-				((const struct rte_flow_item_ipv6_frag_ext *)
+-				 items->mask)->hdr.next_header;
+-			} else {
+-				/* Reset for inner layer. */
+-				next_protocol = 0xff;
+-			}
++			next_protocol = mlx5_flow_l3_next_protocol
++				(items, (enum MLX5_SET_MATCHER)-1);
+ 			break;
+ 		case RTE_FLOW_ITEM_TYPE_TCP:
+ 			ret = mlx5_flow_validate_item_tcp
+@@ -9985,14 +10016,13 @@ flow_dv_translate_item_geneve_opt(struct rte_eth_dev *dev, void *key,
+ {
+ 	const struct rte_flow_item_geneve_opt *geneve_opt_m;
+ 	const struct rte_flow_item_geneve_opt *geneve_opt_v;
+-	const struct rte_flow_item_geneve_opt *geneve_opt_vv = item->spec;
+-	void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
++	const struct rte_flow_item_geneve_opt *orig_spec = item->spec;
+ 	void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
+ 	rte_be32_t opt_data_key = 0, opt_data_mask = 0;
+-	uint32_t *data;
++	size_t option_byte_len;
+ 	int ret = 0;
+ 
+-	if (MLX5_ITEM_VALID(item, key_type))
++	if (MLX5_ITEM_VALID(item, key_type) || !orig_spec)
+ 		return -1;
+ 	MLX5_ITEM_UPDATE(item, key_type, geneve_opt_v, geneve_opt_m,
+ 			 &rte_flow_item_geneve_opt_mask);
+@@ -10005,36 +10035,15 @@ flow_dv_translate_item_geneve_opt(struct rte_eth_dev *dev, void *key,
+ 			return ret;
+ 		}
+ 	}
+-	/*
+-	 * Set the option length in GENEVE header if not requested.
+-	 * The GENEVE TLV option length is expressed by the option length field
+-	 * in the GENEVE header.
+-	 * If the option length was not requested but the GENEVE TLV option item
+-	 * is present we set the option length field implicitly.
+-	 */
+-	if (!MLX5_GET16(fte_match_set_misc, misc_v, geneve_opt_len)) {
+-		if (key_type & MLX5_SET_MATCHER_M)
+-			MLX5_SET(fte_match_set_misc, misc_v, geneve_opt_len,
+-				 MLX5_GENEVE_OPTLEN_MASK);
+-		else
+-			MLX5_SET(fte_match_set_misc, misc_v, geneve_opt_len,
+-				 geneve_opt_v->option_len + 1);
+-	}
+-	/* Set the data. */
+-	if (key_type == MLX5_SET_MATCHER_SW_V)
+-		data = geneve_opt_vv->data;
+-	else
+-		data = geneve_opt_v->data;
+-	if (data) {
+-		memcpy(&opt_data_key, data,
+-			RTE_MIN((uint32_t)(geneve_opt_v->option_len * 4),
+-				sizeof(opt_data_key)));
+-		memcpy(&opt_data_mask, geneve_opt_m->data,
+-			RTE_MIN((uint32_t)(geneve_opt_v->option_len * 4),
+-				sizeof(opt_data_mask)));
++	/* Convert the option length from DW to bytes for using memcpy. */
++	option_byte_len = RTE_MIN((size_t)(orig_spec->option_len * 4),
++				  sizeof(rte_be32_t));
++	if (geneve_opt_v->data) {
++		memcpy(&opt_data_key, geneve_opt_v->data, option_byte_len);
++		memcpy(&opt_data_mask, geneve_opt_m->data, option_byte_len);
+ 		MLX5_SET(fte_match_set_misc3, misc3_v,
+-				geneve_tlv_option_0_data,
+-			rte_be_to_cpu_32(opt_data_key & opt_data_mask));
++			 geneve_tlv_option_0_data,
++			 rte_be_to_cpu_32(opt_data_key & opt_data_mask));
+ 	}
+ 	return ret;
+ }
+@@ -13658,6 +13667,13 @@ flow_dv_translate_create_conntrack(struct rte_eth_dev *dev,
+ 		return rte_flow_error_set(error, ENOTSUP,
+ 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ 					  "Connection is not supported");
++	if (dev->data->port_id >= MLX5_INDIRECT_ACT_CT_MAX_PORT) {
++		rte_flow_error_set(error, EINVAL,
++				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
++				   "CT supports port indexes up to "
++				   RTE_STR(MLX5_ACTION_CTX_CT_MAX_PORT));
++		return 0;
++	}
+ 	idx = flow_dv_aso_ct_alloc(dev, error);
+ 	if (!idx)
+ 		return rte_flow_error_set(error, rte_errno,
+@@ -13707,6 +13723,8 @@ flow_dv_translate_items(struct rte_eth_dev *dev,
+ 	int tunnel = !!(wks->item_flags & MLX5_FLOW_LAYER_TUNNEL);
+ 	int item_type = items->type;
+ 	uint64_t last_item = wks->last_item;
++	enum mlx5_l3_tunnel_detection l3_tunnel_detection;
++	uint64_t l3_tunnel_flag;
+ 	int ret;
+ 
+ 	switch (item_type) {
+@@ -13750,94 +13768,47 @@ flow_dv_translate_items(struct rte_eth_dev *dev,
+ 					  MLX5_FLOW_LAYER_OUTER_VLAN);
+ 		break;
+ 	case RTE_FLOW_ITEM_TYPE_IPV4:
+-		mlx5_flow_tunnel_ip_check(items, next_protocol,
+-					  &wks->item_flags, &tunnel);
++		next_protocol = mlx5_flow_l3_next_protocol(items, key_type);
++		l3_tunnel_detection =
++			mlx5_flow_tunnel_ip_check(items, next_protocol,
++						  wks->item_flags,
++						  &l3_tunnel_flag);
++		if (l3_tunnel_detection == l3_tunnel_inner) {
++			wks->item_flags |= l3_tunnel_flag;
++			tunnel = 1;
++		}
+ 		flow_dv_translate_item_ipv4(key, items, tunnel,
+ 					    wks->group, key_type);
+ 		wks->priority = MLX5_PRIORITY_MAP_L3;
+ 		last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
+ 				     MLX5_FLOW_LAYER_OUTER_L3_IPV4;
+-		if (items->mask != NULL &&
+-		    items->spec != NULL &&
+-			((const struct rte_flow_item_ipv4 *)
+-			 items->mask)->hdr.next_proto_id) {
+-			next_protocol =
+-				((const struct rte_flow_item_ipv4 *)
+-				 (items->spec))->hdr.next_proto_id;
+-			next_protocol &=
+-				((const struct rte_flow_item_ipv4 *)
+-				 (items->mask))->hdr.next_proto_id;
+-		} else if (key_type == MLX5_SET_MATCHER_HS_M &&
+-			   items->mask != NULL) {
+-			next_protocol =  ((const struct rte_flow_item_ipv4 *)
+-					(items->mask))->hdr.next_proto_id;
+-		} else if (key_type == MLX5_SET_MATCHER_HS_V &&
+-			   items->spec != NULL) {
+-			next_protocol =  ((const struct rte_flow_item_ipv4 *)
+-					(items->spec))->hdr.next_proto_id;
+-		} else {
+-			/* Reset for inner layer. */
+-			next_protocol = 0xff;
+-		}
++		if (l3_tunnel_detection == l3_tunnel_outer)
++			wks->item_flags |= l3_tunnel_flag;
+ 		break;
+ 	case RTE_FLOW_ITEM_TYPE_IPV6:
+-		mlx5_flow_tunnel_ip_check(items, next_protocol,
+-					  &wks->item_flags, &tunnel);
++		next_protocol = mlx5_flow_l3_next_protocol(items, key_type);
++		l3_tunnel_detection =
++			mlx5_flow_tunnel_ip_check(items, next_protocol,
++						  wks->item_flags,
++						  &l3_tunnel_flag);
++		if (l3_tunnel_detection == l3_tunnel_inner) {
++			wks->item_flags |= l3_tunnel_flag;
++			tunnel = 1;
++		}
+ 		flow_dv_translate_item_ipv6(key, items, tunnel,
+ 					    wks->group, key_type);
+ 		wks->priority = MLX5_PRIORITY_MAP_L3;
+ 		last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
+ 				     MLX5_FLOW_LAYER_OUTER_L3_IPV6;
+-		if (items->mask != NULL &&
+-		    items->spec != NULL &&
+-			((const struct rte_flow_item_ipv6 *)
+-			 items->mask)->hdr.proto) {
+-			next_protocol =
+-				((const struct rte_flow_item_ipv6 *)
+-				 items->spec)->hdr.proto;
+-			next_protocol &=
+-				((const struct rte_flow_item_ipv6 *)
+-				 items->mask)->hdr.proto;
+-		} else if (key_type == MLX5_SET_MATCHER_HS_M &&
+-			   items->mask != NULL) {
+-			next_protocol =  ((const struct rte_flow_item_ipv6 *)
+-					(items->mask))->hdr.proto;
+-		} else if (key_type == MLX5_SET_MATCHER_HS_V &&
+-			   items->spec != NULL) {
+-			next_protocol =  ((const struct rte_flow_item_ipv6 *)
+-					(items->spec))->hdr.proto;
+-		} else {
+-			/* Reset for inner layer. */
+-			next_protocol = 0xff;
+-		}
++		if (l3_tunnel_detection == l3_tunnel_outer)
++			wks->item_flags |= l3_tunnel_flag;
+ 		break;
+ 	case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
+ 		flow_dv_translate_item_ipv6_frag_ext
+ 					(key, items, tunnel, key_type);
+ 		last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6_FRAG_EXT :
+ 				     MLX5_FLOW_LAYER_OUTER_L3_IPV6_FRAG_EXT;
+-		if (items->mask != NULL &&
+-		    items->spec != NULL &&
+-			((const struct rte_flow_item_ipv6_frag_ext *)
+-			 items->mask)->hdr.next_header) {
+-			next_protocol =
+-			((const struct rte_flow_item_ipv6_frag_ext *)
+-			 items->spec)->hdr.next_header;
+-			next_protocol &=
+-			((const struct rte_flow_item_ipv6_frag_ext *)
+-			 items->mask)->hdr.next_header;
+-		} else if (key_type == MLX5_SET_MATCHER_HS_M &&
+-			   items->mask != NULL) {
+-			next_protocol =  ((const struct rte_flow_item_ipv6_frag_ext *)
+-					(items->mask))->hdr.next_header;
+-		} else if (key_type == MLX5_SET_MATCHER_HS_V &&
+-			   items->spec != NULL) {
+-			next_protocol =  ((const struct rte_flow_item_ipv6_frag_ext *)
+-					(items->spec))->hdr.next_header;
+-		} else {
+-			/* Reset for inner layer. */
+-			next_protocol = 0xff;
+-		}
++		next_protocol = mlx5_flow_l3_next_protocol(items, key_type);
+ 		break;
+ 	case RTE_FLOW_ITEM_TYPE_TCP:
+ 		flow_dv_translate_item_tcp(key, items, tunnel, key_type);
+@@ -14280,7 +14251,7 @@ flow_dv_translate_items_sws(struct rte_eth_dev *dev,
+ 	 * Avoid be overwritten by other sub mlx5_flows.
+ 	 */
+ 	if (wks.geneve_tlv_option)
+-		dev_flow->flow->geneve_tlv_option = wks.geneve_tlv_option;
++		dev_flow->flow->geneve_tlv_option += wks.geneve_tlv_option;
+ 	return 0;
+ }
+ 
+@@ -15420,7 +15391,8 @@ error:
+ 	SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
+ 		       handle_idx, dh, next) {
+ 		/* hrxq is union, don't clear it if the flag is not set. */
+-		if (dh->fate_action == MLX5_FLOW_FATE_QUEUE && dh->rix_hrxq) {
++		if (dh->fate_action == MLX5_FLOW_FATE_QUEUE && dh->rix_hrxq &&
++		    !dh->dvh.rix_sample && !dh->dvh.rix_dest_array) {
+ 			mlx5_hrxq_release(dev, dh->rix_hrxq);
+ 			dh->rix_hrxq = 0;
+ 		} else if (dh->fate_action == MLX5_FLOW_FATE_SHARED_RSS) {
+@@ -15884,9 +15856,9 @@ flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
+ 		flow_dv_aso_ct_release(dev, flow->ct, NULL);
+ 	else if (flow->age)
+ 		flow_dv_aso_age_release(dev, flow->age);
+-	if (flow->geneve_tlv_option) {
++	while (flow->geneve_tlv_option) {
+ 		flow_dev_geneve_tlv_option_resource_release(priv->sh);
+-		flow->geneve_tlv_option = 0;
++		flow->geneve_tlv_option--;
+ 	}
+ 	while (flow->dev_handles) {
+ 		uint32_t tmp_idx = flow->dev_handles;
+@@ -16350,6 +16322,8 @@ flow_dv_action_create(struct rte_eth_dev *dev,
+ 	case RTE_FLOW_ACTION_TYPE_CONNTRACK:
+ 		ret = flow_dv_translate_create_conntrack(dev, action->conf,
+ 							 err);
++		if (!ret)
++			break;
+ 		idx = MLX5_INDIRECT_ACT_CT_GEN_IDX(PORT_ID(priv), ret);
+ 		break;
+ 	default:
+@@ -17675,9 +17649,8 @@ __flow_dv_create_policy_matcher(struct rte_eth_dev *dev,
+ 		}
+ 	}
+ 	tbl_data = container_of(tbl_rsc, struct mlx5_flow_tbl_data_entry, tbl);
+-	if (priority < RTE_COLOR_RED)
+-		flow_dv_match_meta_reg(matcher.mask.buf,
+-			(enum modify_reg)color_reg_c_idx, color_mask, color_mask);
++	flow_dv_match_meta_reg(matcher.mask.buf,
++		(enum modify_reg)color_reg_c_idx, color_mask, color_mask);
+ 	matcher.priority = priority;
+ 	matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
+ 				    matcher.mask.size);
+@@ -17711,7 +17684,7 @@ __flow_dv_create_policy_matcher(struct rte_eth_dev *dev,
+ static int
+ __flow_dv_create_domain_policy_rules(struct rte_eth_dev *dev,
+ 		struct mlx5_flow_meter_sub_policy *sub_policy,
+-		uint8_t egress, uint8_t transfer, bool match_src_port,
++		uint8_t egress, uint8_t transfer, bool *match_src_port,
+ 		struct mlx5_meter_policy_acts acts[RTE_COLORS])
+ {
+ 	struct mlx5_priv *priv = dev->data->dev_private;
+@@ -17726,9 +17699,9 @@ __flow_dv_create_domain_policy_rules(struct rte_eth_dev *dev,
+ 		.reserved = 0,
+ 	};
+ 	int i;
++	uint16_t priority;
+ 	int ret = mlx5_flow_get_reg_id(dev, MLX5_MTR_COLOR, 0, &flow_err);
+ 	struct mlx5_sub_policy_color_rule *color_rule;
+-	bool svport_match;
+ 	struct mlx5_sub_policy_color_rule *tmp_rules[RTE_COLORS] = {NULL};
+ 
+ 	if (ret < 0)
+@@ -17761,13 +17734,12 @@ __flow_dv_create_domain_policy_rules(struct rte_eth_dev *dev,
+ 		TAILQ_INSERT_TAIL(&sub_policy->color_rules[i],
+ 				  color_rule, next_port);
+ 		color_rule->src_port = priv->representor_id;
+-		/* No use. */
+-		attr.priority = i;
++		priority = (match_src_port[i] == match_src_port[RTE_COLOR_GREEN]) ?
++			   MLX5_MTR_POLICY_MATCHER_PRIO : (MLX5_MTR_POLICY_MATCHER_PRIO + 1);
+ 		/* Create matchers for colors. */
+-		svport_match = (i != RTE_COLOR_RED) ? match_src_port : false;
+ 		if (__flow_dv_create_policy_matcher(dev, color_reg_c_idx,
+-				MLX5_MTR_POLICY_MATCHER_PRIO, sub_policy,
+-				&attr, svport_match, NULL,
++				priority, sub_policy,
++				&attr, match_src_port[i], NULL,
+ 				&color_rule->matcher, &flow_err)) {
+ 			DRV_LOG(ERR, "Failed to create color%u matcher.", i);
+ 			goto err_exit;
+@@ -17777,7 +17749,7 @@ __flow_dv_create_domain_policy_rules(struct rte_eth_dev *dev,
+ 				color_reg_c_idx, (enum rte_color)i,
+ 				color_rule->matcher,
+ 				acts[i].actions_n, acts[i].dv_actions,
+-				svport_match, NULL, &color_rule->rule,
++				match_src_port[i], NULL, &color_rule->rule,
+ 				&attr)) {
+ 			DRV_LOG(ERR, "Failed to create color%u rule.", i);
+ 			goto err_exit;
+@@ -17825,7 +17797,7 @@ __flow_dv_create_policy_acts_rules(struct rte_eth_dev *dev,
+ 	uint8_t egress = (domain == MLX5_MTR_DOMAIN_EGRESS) ? 1 : 0;
+ 	uint8_t transfer = (domain == MLX5_MTR_DOMAIN_TRANSFER) ? 1 : 0;
+ 	bool mtr_first = egress || (transfer && priv->representor_id != UINT16_MAX);
+-	bool match_src_port = false;
++	bool match_src_port[RTE_COLORS] = {false};
+ 	int i;
+ 
+ 	/* If RSS or Queue, no previous actions / rules is created. */
+@@ -17896,7 +17868,7 @@ __flow_dv_create_policy_acts_rules(struct rte_eth_dev *dev,
+ 				acts[i].dv_actions[acts[i].actions_n] =
+ 					port_action->action;
+ 				acts[i].actions_n++;
+-				match_src_port = true;
++				match_src_port[i] = true;
+ 				break;
+ 			case MLX5_FLOW_FATE_DROP:
+ 			case MLX5_FLOW_FATE_JUMP:
+@@ -17948,7 +17920,7 @@ __flow_dv_create_policy_acts_rules(struct rte_eth_dev *dev,
+ 				acts[i].dv_actions[acts[i].actions_n++] =
+ 							tbl_data->jump.action;
+ 				if (mtr_policy->act_cnt[i].modify_hdr)
+-					match_src_port = !!transfer;
++					match_src_port[i] = !!transfer;
+ 				break;
+ 			default:
+ 				/*Queue action do nothing*/
+@@ -17962,9 +17934,9 @@ __flow_dv_create_policy_acts_rules(struct rte_eth_dev *dev,
+ 			"Failed to create policy rules per domain.");
+ 		goto err_exit;
+ 	}
+-	if (match_src_port) {
+-		mtr_policy->match_port = match_src_port;
+-		mtr_policy->hierarchy_match_port = match_src_port;
++	if (match_src_port[RTE_COLOR_GREEN] || match_src_port[RTE_COLOR_YELLOW]) {
++		mtr_policy->match_port = 1;
++		mtr_policy->hierarchy_match_port = 1;
+ 	}
+ 	return 0;
+ err_exit:
+@@ -18026,6 +17998,7 @@ __flow_dv_create_domain_def_policy(struct rte_eth_dev *dev, uint32_t domain)
+ 	uint8_t egress, transfer;
+ 	struct rte_flow_error error;
+ 	struct mlx5_meter_policy_acts acts[RTE_COLORS];
++	bool match_src_port[RTE_COLORS] = {false};
+ 	int ret;
+ 
+ 	egress = (domain == MLX5_MTR_DOMAIN_EGRESS) ? 1 : 0;
+@@ -18101,7 +18074,7 @@ __flow_dv_create_domain_def_policy(struct rte_eth_dev *dev, uint32_t domain)
+ 		/* Create default policy rules. */
+ 		ret = __flow_dv_create_domain_policy_rules(dev,
+ 					&def_policy->sub_policy,
+-					egress, transfer, false, acts);
++					egress, transfer, match_src_port, acts);
+ 		if (ret) {
+ 			DRV_LOG(ERR, "Failed to create default policy rules.");
+ 			goto def_policy_error;
+@@ -18660,7 +18633,7 @@ flow_dv_meter_hierarchy_rule_create(struct rte_eth_dev *dev,
+ 	struct {
+ 		struct mlx5_flow_meter_policy *fm_policy;
+ 		struct mlx5_flow_meter_info *next_fm;
+-		struct mlx5_sub_policy_color_rule *tag_rule[MLX5_MTR_RTE_COLORS];
++		struct mlx5_sub_policy_color_rule *tag_rule[RTE_COLORS];
+ 	} fm_info[MLX5_MTR_CHAIN_MAX_NUM] = { {0} };
+ 	uint32_t fm_cnt = 0;
+ 	uint32_t i, j;
+@@ -18694,14 +18667,22 @@ flow_dv_meter_hierarchy_rule_create(struct rte_eth_dev *dev,
+ 		mtr_policy = fm_info[i].fm_policy;
+ 		rte_spinlock_lock(&mtr_policy->sl);
+ 		sub_policy = mtr_policy->sub_policys[domain][0];
+-		for (j = 0; j < MLX5_MTR_RTE_COLORS; j++) {
++		for (j = 0; j < RTE_COLORS; j++) {
+ 			uint8_t act_n = 0;
+-			struct mlx5_flow_dv_modify_hdr_resource *modify_hdr;
++			struct mlx5_flow_dv_modify_hdr_resource *modify_hdr = NULL;
+ 			struct mlx5_flow_dv_port_id_action_resource *port_action;
++			uint8_t fate_action;
+ 
+-			if (mtr_policy->act_cnt[j].fate_action != MLX5_FLOW_FATE_MTR &&
+-			    mtr_policy->act_cnt[j].fate_action != MLX5_FLOW_FATE_PORT_ID)
+-				continue;
++			if (j == RTE_COLOR_RED) {
++				fate_action = MLX5_FLOW_FATE_DROP;
++			} else {
++				fate_action = mtr_policy->act_cnt[j].fate_action;
++				modify_hdr = mtr_policy->act_cnt[j].modify_hdr;
++				if (fate_action != MLX5_FLOW_FATE_MTR &&
++				    fate_action != MLX5_FLOW_FATE_PORT_ID &&
++				    fate_action != MLX5_FLOW_FATE_DROP)
++					continue;
++			}
+ 			color_rule = mlx5_malloc(MLX5_MEM_ZERO,
+ 						 sizeof(struct mlx5_sub_policy_color_rule),
+ 						 0, SOCKET_ID_ANY);
+@@ -18713,9 +18694,8 @@ flow_dv_meter_hierarchy_rule_create(struct rte_eth_dev *dev,
+ 				goto err_exit;
+ 			}
+ 			color_rule->src_port = src_port;
+-			modify_hdr = mtr_policy->act_cnt[j].modify_hdr;
+ 			/* Prepare to create color rule. */
+-			if (mtr_policy->act_cnt[j].fate_action == MLX5_FLOW_FATE_MTR) {
++			if (fate_action == MLX5_FLOW_FATE_MTR) {
+ 				next_fm = fm_info[i].next_fm;
+ 				if (mlx5_flow_meter_attach(priv, next_fm, &attr, error)) {
+ 					mlx5_free(color_rule);
+@@ -18742,7 +18722,7 @@ flow_dv_meter_hierarchy_rule_create(struct rte_eth_dev *dev,
+ 				}
+ 				acts.dv_actions[act_n++] = tbl_data->jump.action;
+ 				acts.actions_n = act_n;
+-			} else {
++			} else if (fate_action == MLX5_FLOW_FATE_PORT_ID) {
+ 				port_action =
+ 					mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PORT_ID],
+ 						       mtr_policy->act_cnt[j].rix_port_id_action);
+@@ -18755,6 +18735,9 @@ flow_dv_meter_hierarchy_rule_create(struct rte_eth_dev *dev,
+ 					acts.dv_actions[act_n++] = modify_hdr->action;
+ 				acts.dv_actions[act_n++] = port_action->action;
+ 				acts.actions_n = act_n;
++			} else {
++				acts.dv_actions[act_n++] = mtr_policy->dr_drop_action[domain];
++				acts.actions_n = act_n;
+ 			}
+ 			fm_info[i].tag_rule[j] = color_rule;
+ 			TAILQ_INSERT_TAIL(&sub_policy->color_rules[j], color_rule, next_port);
+@@ -18786,7 +18769,7 @@ err_exit:
+ 		mtr_policy = fm_info[i].fm_policy;
+ 		rte_spinlock_lock(&mtr_policy->sl);
+ 		sub_policy = mtr_policy->sub_policys[domain][0];
+-		for (j = 0; j < MLX5_MTR_RTE_COLORS; j++) {
++		for (j = 0; j < RTE_COLORS; j++) {
+ 			color_rule = fm_info[i].tag_rule[j];
+ 			if (!color_rule)
+ 				continue;
+@@ -19116,8 +19099,7 @@ flow_dv_get_aged_flows(struct rte_eth_dev *dev,
+ 	LIST_FOREACH(act, &age_info->aged_aso, next) {
+ 		nb_flows++;
+ 		if (nb_contexts) {
+-			context[nb_flows - 1] =
+-						act->age_params.context;
++			context[nb_flows - 1] = act->age_params.context;
+ 			if (!(--nb_contexts))
+ 				break;
+ 		}
+@@ -19675,11 +19657,13 @@ flow_dv_validate_mtr_policy_acts(struct rte_eth_dev *dev,
+ 		}
+ 	}
+ 	if (next_mtr && *policy_mode == MLX5_MTR_POLICY_MODE_ALL) {
+-		if (!(action_flags[RTE_COLOR_GREEN] & action_flags[RTE_COLOR_YELLOW] &
+-		      MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY))
++		uint64_t hierarchy_type_flag =
++			MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY | MLX5_FLOW_ACTION_JUMP;
++		if (!(action_flags[RTE_COLOR_GREEN] & hierarchy_type_flag) ||
++		    !(action_flags[RTE_COLOR_YELLOW] & hierarchy_type_flag))
+ 			return -rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_METER_POLICY,
+ 						  NULL,
+-						  "Meter hierarchy supports meter action only.");
++						  "Unsupported action in meter hierarchy.");
+ 	}
+ 	/* If both colors have RSS, the attributes should be the same. */
+ 	if (flow_dv_mtr_policy_rss_compare(rss_color[RTE_COLOR_GREEN],
+diff --git a/dpdk/drivers/net/mlx5/mlx5_flow_hw.c b/dpdk/drivers/net/mlx5/mlx5_flow_hw.c
+index da873ae2e2..af4df13b2f 100644
+--- a/dpdk/drivers/net/mlx5/mlx5_flow_hw.c
++++ b/dpdk/drivers/net/mlx5/mlx5_flow_hw.c
+@@ -104,12 +104,40 @@ struct mlx5_tbl_multi_pattern_ctx {
+ 
+ #define MLX5_EMPTY_MULTI_PATTERN_CTX {{{0,}},}
+ 
++static __rte_always_inline struct mlx5_hw_q_job *
++flow_hw_action_job_init(struct mlx5_priv *priv, uint32_t queue,
++			const struct rte_flow_action_handle *handle,
++			void *user_data, void *query_data,
++			enum mlx5_hw_job_type type,
++			enum mlx5_hw_indirect_type indirect_type,
++			struct rte_flow_error *error);
++static void
++flow_hw_age_count_release(struct mlx5_priv *priv, uint32_t queue, struct rte_flow_hw *flow,
++			  struct rte_flow_error *error);
++
+ static int
+ mlx5_tbl_multi_pattern_process(struct rte_eth_dev *dev,
+ 			       struct rte_flow_template_table *tbl,
+ 			       struct mlx5_tbl_multi_pattern_ctx *mpat,
+ 			       struct rte_flow_error *error);
+ 
++static __rte_always_inline enum mlx5_indirect_list_type
++flow_hw_inlist_type_get(const struct rte_flow_action *actions);
++
++static bool
++mlx5_hw_ctx_validate(const struct rte_eth_dev *dev, struct rte_flow_error *error)
++{
++	const struct mlx5_priv *priv = dev->data->dev_private;
++
++	if (!priv->dr_ctx) {
++		rte_flow_error_set(error, EINVAL,
++				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
++				   "non-template flow engine was not configured");
++		return false;
++	}
++	return true;
++}
++
+ static __rte_always_inline int
+ mlx5_multi_pattern_reformat_to_index(enum mlx5dr_action_type type)
+ {
+@@ -274,21 +302,6 @@ static const struct rte_flow_item_eth ctrl_rx_eth_bcast_spec = {
+ 	.hdr.ether_type = 0,
+ };
+ 
+-static __rte_always_inline struct mlx5_hw_q_job *
+-flow_hw_job_get(struct mlx5_priv *priv, uint32_t queue)
+-{
+-	MLX5_ASSERT(priv->hw_q[queue].job_idx <= priv->hw_q[queue].size);
+-	return priv->hw_q[queue].job_idx ?
+-	       priv->hw_q[queue].job[--priv->hw_q[queue].job_idx] : NULL;
+-}
+-
+-static __rte_always_inline void
+-flow_hw_job_put(struct mlx5_priv *priv, struct mlx5_hw_q_job *job, uint32_t queue)
+-{
+-	MLX5_ASSERT(priv->hw_q[queue].job_idx < priv->hw_q[queue].size);
+-	priv->hw_q[queue].job[priv->hw_q[queue].job_idx++] = job;
+-}
+-
+ static inline enum mlx5dr_matcher_insert_mode
+ flow_hw_matcher_insert_mode_get(enum rte_flow_table_insertion_type insert_type)
+ {
+@@ -1010,15 +1023,19 @@ flow_hw_shared_action_translate(struct rte_eth_dev *dev,
+ 		if (!shared_rss || __flow_hw_act_data_shared_rss_append
+ 		    (priv, acts,
+ 		    (enum rte_flow_action_type)MLX5_RTE_FLOW_ACTION_TYPE_RSS,
+-		    action_src, action_dst, idx, shared_rss))
++		    action_src, action_dst, idx, shared_rss)) {
++			DRV_LOG(WARNING, "Indirect RSS action index %d translate failed", act_idx);
+ 			return -1;
++		}
+ 		break;
+ 	case MLX5_INDIRECT_ACTION_TYPE_COUNT:
+ 		if (__flow_hw_act_data_shared_cnt_append(priv, acts,
+ 			(enum rte_flow_action_type)
+ 			MLX5_RTE_FLOW_ACTION_TYPE_COUNT,
+-			action_src, action_dst, act_idx))
++			action_src, action_dst, act_idx)) {
++			DRV_LOG(WARNING, "Indirect count action translate failed");
+ 			return -1;
++		}
+ 		break;
+ 	case MLX5_INDIRECT_ACTION_TYPE_AGE:
+ 		/* Not supported, prevent by validate function. */
+@@ -1026,15 +1043,19 @@ flow_hw_shared_action_translate(struct rte_eth_dev *dev,
+ 		break;
+ 	case MLX5_INDIRECT_ACTION_TYPE_CT:
+ 		if (flow_hw_ct_compile(dev, MLX5_HW_INV_QUEUE,
+-				       idx, &acts->rule_acts[action_dst]))
++				       idx, &acts->rule_acts[action_dst])) {
++			DRV_LOG(WARNING, "Indirect CT action translate failed");
+ 			return -1;
++		}
+ 		break;
+ 	case MLX5_INDIRECT_ACTION_TYPE_METER_MARK:
+ 		if (__flow_hw_act_data_shared_mtr_append(priv, acts,
+ 			(enum rte_flow_action_type)
+ 			MLX5_RTE_FLOW_ACTION_TYPE_METER_MARK,
+-			action_src, action_dst, idx))
++			action_src, action_dst, idx)) {
++			DRV_LOG(WARNING, "Indirect meter mark action translate failed");
+ 			return -1;
++		}
+ 		break;
+ 	case MLX5_INDIRECT_ACTION_TYPE_QUOTA:
+ 		flow_hw_construct_quota(priv, &acts->rule_acts[action_dst], idx);
+@@ -1455,7 +1476,7 @@ flow_hw_meter_compile(struct rte_eth_dev *dev,
+ 	acts->rule_acts[jump_pos].action = (!!group) ?
+ 				    acts->jump->hws_action :
+ 				    acts->jump->root_action;
+-	if (mlx5_aso_mtr_wait(priv->sh, MLX5_HW_INV_QUEUE, aso_mtr))
++	if (mlx5_aso_mtr_wait(priv, aso_mtr, true))
+ 		return -ENOMEM;
+ 	return 0;
+ }
+@@ -1532,7 +1553,7 @@ static rte_be32_t vlan_hdr_to_be32(const struct rte_flow_action *actions)
+ static __rte_always_inline struct mlx5_aso_mtr *
+ flow_hw_meter_mark_alloc(struct rte_eth_dev *dev, uint32_t queue,
+ 			 const struct rte_flow_action *action,
+-			 void *user_data, bool push)
++			 struct mlx5_hw_q_job *job, bool push)
+ {
+ 	struct mlx5_priv *priv = dev->data->dev_private;
+ 	struct mlx5_aso_mtr_pool *pool = priv->hws_mpool;
+@@ -1540,6 +1561,8 @@ flow_hw_meter_mark_alloc(struct rte_eth_dev *dev, uint32_t queue,
+ 	struct mlx5_aso_mtr *aso_mtr;
+ 	struct mlx5_flow_meter_info *fm;
+ 	uint32_t mtr_id;
++	uintptr_t handle = (uintptr_t)MLX5_INDIRECT_ACTION_TYPE_METER_MARK <<
++					MLX5_INDIRECT_ACTION_TYPE_OFFSET;
+ 
+ 	if (meter_mark->profile == NULL)
+ 		return NULL;
+@@ -1558,15 +1581,16 @@ flow_hw_meter_mark_alloc(struct rte_eth_dev *dev, uint32_t queue,
+ 			  ASO_METER_WAIT : ASO_METER_WAIT_ASYNC;
+ 	aso_mtr->offset = mtr_id - 1;
+ 	aso_mtr->init_color = fm->color_aware ? RTE_COLORS : RTE_COLOR_GREEN;
++	job->action = (void *)(handle | mtr_id);
+ 	/* Update ASO flow meter by wqe. */
+-	if (mlx5_aso_meter_update_by_wqe(priv->sh, queue, aso_mtr,
+-					 &priv->mtr_bulk, user_data, push)) {
++	if (mlx5_aso_meter_update_by_wqe(priv, queue, aso_mtr,
++					 &priv->mtr_bulk, job, push)) {
+ 		mlx5_ipool_free(pool->idx_pool, mtr_id);
+ 		return NULL;
+ 	}
+ 	/* Wait for ASO object completion. */
+ 	if (queue == MLX5_HW_INV_QUEUE &&
+-	    mlx5_aso_mtr_wait(priv->sh, MLX5_HW_INV_QUEUE, aso_mtr)) {
++	    mlx5_aso_mtr_wait(priv, aso_mtr, true)) {
+ 		mlx5_ipool_free(pool->idx_pool, mtr_id);
+ 		return NULL;
+ 	}
+@@ -1584,10 +1608,18 @@ flow_hw_meter_mark_compile(struct rte_eth_dev *dev,
+ 	struct mlx5_priv *priv = dev->data->dev_private;
+ 	struct mlx5_aso_mtr_pool *pool = priv->hws_mpool;
+ 	struct mlx5_aso_mtr *aso_mtr;
++	struct mlx5_hw_q_job *job =
++		flow_hw_action_job_init(priv, queue, NULL, NULL, NULL,
++					MLX5_HW_Q_JOB_TYPE_CREATE,
++					MLX5_HW_INDIRECT_TYPE_LEGACY, NULL);
+ 
+-	aso_mtr = flow_hw_meter_mark_alloc(dev, queue, action, NULL, true);
+-	if (!aso_mtr)
++	if (!job)
+ 		return -1;
++	aso_mtr = flow_hw_meter_mark_alloc(dev, queue, action, job, true);
++	if (!aso_mtr) {
++		flow_hw_job_put(priv, job, queue);
++		return -1;
++	}
+ 
+ 	/* Compile METER_MARK action */
+ 	acts[aso_mtr_pos].action = pool->action;
+@@ -1722,15 +1754,9 @@ flow_hw_translate_indirect_meter(struct rte_eth_dev *dev,
+ 	const struct rte_flow_indirect_update_flow_meter_mark **flow_conf =
+ 		(typeof(flow_conf))action_conf->conf;
+ 
+-	/*
+-	 * Masked indirect handle set dr5 action during template table
+-	 * translation.
+-	 */
+-	if (!dr_rule->action) {
+-		ret = flow_dr_set_meter(priv, dr_rule, action_conf);
+-		if (ret)
+-			return ret;
+-	}
++	ret = flow_dr_set_meter(priv, dr_rule, action_conf);
++	if (ret)
++		return ret;
+ 	if (!act_data->shared_meter.conf_masked) {
+ 		if (flow_conf && flow_conf[0] && flow_conf[0]->init_color < RTE_COLORS)
+ 			flow_dr_mtr_flow_color(dr_rule, flow_conf[0]->init_color);
+@@ -2512,6 +2538,9 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
+ 	}
+ 	return 0;
+ err:
++	/* If rte_errno was not initialized and reached error state. */
++	if (!rte_errno)
++		rte_errno = EINVAL;
+ 	err = rte_errno;
+ 	__flow_hw_action_template_destroy(dev, acts);
+ 	return rte_flow_error_set(error, err,
+@@ -2865,6 +2894,30 @@ flow_hw_modify_field_construct(struct mlx5_hw_q_job *job,
+ 	return 0;
+ }
+ 
++/**
++ * Release any actions allocated for the flow rule during actions construction.
++ *
++ * @param[in] flow
++ *   Pointer to flow structure.
++ */
++static void
++flow_hw_release_actions(struct rte_eth_dev *dev,
++			uint32_t queue,
++			struct rte_flow_hw *flow)
++{
++	struct mlx5_priv *priv = dev->data->dev_private;
++	struct mlx5_aso_mtr_pool *pool = priv->hws_mpool;
++
++	if (flow->fate_type == MLX5_FLOW_FATE_JUMP)
++		flow_hw_jump_release(dev, flow->jump);
++	else if (flow->fate_type == MLX5_FLOW_FATE_QUEUE)
++		mlx5_hrxq_obj_release(dev, flow->hrxq);
++	if (mlx5_hws_cnt_id_valid(flow->cnt_id))
++		flow_hw_age_count_release(priv, queue, flow, NULL);
++	if (flow->mtr_id)
++		mlx5_ipool_free(pool->idx_pool, flow->mtr_id);
++}
++
+ /**
+  * Construct flow action array.
+  *
+@@ -2980,7 +3033,7 @@ flow_hw_actions_construct(struct rte_eth_dev *dev,
+ 					(dev, queue, action, table, it_idx,
+ 					 at->action_flags, job->flow,
+ 					 &rule_acts[act_data->action_dst]))
+-				return -1;
++				goto error;
+ 			break;
+ 		case RTE_FLOW_ACTION_TYPE_VOID:
+ 			break;
+@@ -3000,7 +3053,7 @@ flow_hw_actions_construct(struct rte_eth_dev *dev,
+ 			jump = flow_hw_jump_action_register
+ 				(dev, &table->cfg, jump_group, NULL);
+ 			if (!jump)
+-				return -1;
++				goto error;
+ 			rule_acts[act_data->action_dst].action =
+ 			(!!attr.group) ? jump->hws_action : jump->root_action;
+ 			job->flow->jump = jump;
+@@ -3012,7 +3065,7 @@ flow_hw_actions_construct(struct rte_eth_dev *dev,
+ 					ft_flag,
+ 					action);
+ 			if (!hrxq)
+-				return -1;
++				goto error;
+ 			rule_acts[act_data->action_dst].action = hrxq->action;
+ 			job->flow->hrxq = hrxq;
+ 			job->flow->fate_type = MLX5_FLOW_FATE_QUEUE;
+@@ -3022,19 +3075,19 @@ flow_hw_actions_construct(struct rte_eth_dev *dev,
+ 			if (flow_hw_shared_action_get
+ 				(dev, act_data, item_flags,
+ 				 &rule_acts[act_data->action_dst]))
+-				return -1;
++				goto error;
+ 			break;
+ 		case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
+ 			enc_item = ((const struct rte_flow_action_vxlan_encap *)
+ 				   action->conf)->definition;
+ 			if (flow_dv_convert_encap_data(enc_item, buf, &encap_len, NULL))
+-				return -1;
++				goto error;
+ 			break;
+ 		case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
+ 			enc_item = ((const struct rte_flow_action_nvgre_encap *)
+ 				   action->conf)->definition;
+ 			if (flow_dv_convert_encap_data(enc_item, buf, &encap_len, NULL))
+-				return -1;
++				goto error;
+ 			break;
+ 		case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
+ 			raw_encap_data =
+@@ -3063,12 +3116,12 @@ flow_hw_actions_construct(struct rte_eth_dev *dev,
+ 								     hw_acts,
+ 								     action);
+ 			if (ret)
+-				return -1;
++				goto error;
+ 			break;
+ 		case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
+ 			port_action = action->conf;
+ 			if (!priv->hw_vport[port_action->port_id])
+-				return -1;
++				goto error;
+ 			rule_acts[act_data->action_dst].action =
+ 					priv->hw_vport[port_action->port_id];
+ 			break;
+@@ -3088,7 +3141,7 @@ flow_hw_actions_construct(struct rte_eth_dev *dev,
+ 			jump = flow_hw_jump_action_register
+ 				(dev, &table->cfg, aso_mtr->fm.group, NULL);
+ 			if (!jump)
+-				return -1;
++				goto error;
+ 			MLX5_ASSERT
+ 				(!rule_acts[act_data->action_dst + 1].action);
+ 			rule_acts[act_data->action_dst + 1].action =
+@@ -3096,8 +3149,8 @@ flow_hw_actions_construct(struct rte_eth_dev *dev,
+ 							 jump->root_action;
+ 			job->flow->jump = jump;
+ 			job->flow->fate_type = MLX5_FLOW_FATE_JUMP;
+-			if (mlx5_aso_mtr_wait(priv->sh, MLX5_HW_INV_QUEUE, aso_mtr))
+-				return -1;
++			if (mlx5_aso_mtr_wait(priv, aso_mtr, true))
++				goto error;
+ 			break;
+ 		case RTE_FLOW_ACTION_TYPE_AGE:
+ 			age = action->conf;
+@@ -3112,7 +3165,7 @@ flow_hw_actions_construct(struct rte_eth_dev *dev,
+ 							     job->flow->res_idx,
+ 							     error);
+ 			if (age_idx == 0)
+-				return -rte_errno;
++				goto error;
+ 			job->flow->age_idx = age_idx;
+ 			if (at->action_flags & MLX5_FLOW_ACTION_INDIRECT_COUNT)
+ 				/*
+@@ -3123,11 +3176,10 @@ flow_hw_actions_construct(struct rte_eth_dev *dev,
+ 				break;
+ 			/* Fall-through. */
+ 		case RTE_FLOW_ACTION_TYPE_COUNT:
+-			/* If the port is engaged in resource sharing, do not use queue cache. */
+-			cnt_queue = mlx5_hws_cnt_is_pool_shared(priv) ? NULL : &queue;
++			cnt_queue = mlx5_hws_cnt_get_queue(priv, &queue);
+ 			ret = mlx5_hws_cnt_pool_get(priv->hws_cpool, cnt_queue, &cnt_id, age_idx);
+ 			if (ret != 0)
+-				return ret;
++				goto error;
+ 			ret = mlx5_hws_cnt_pool_get_action_offset
+ 				(priv->hws_cpool,
+ 				 cnt_id,
+@@ -3135,7 +3187,7 @@ flow_hw_actions_construct(struct rte_eth_dev *dev,
+ 				 &rule_acts[act_data->action_dst].counter.offset
+ 				 );
+ 			if (ret != 0)
+-				return ret;
++				goto error;
+ 			job->flow->cnt_id = cnt_id;
+ 			break;
+ 		case MLX5_RTE_FLOW_ACTION_TYPE_COUNT:
+@@ -3146,7 +3198,7 @@ flow_hw_actions_construct(struct rte_eth_dev *dev,
+ 				 &rule_acts[act_data->action_dst].counter.offset
+ 				 );
+ 			if (ret != 0)
+-				return ret;
++				goto error;
+ 			job->flow->cnt_id = act_data->shared_counter.id;
+ 			break;
+ 		case RTE_FLOW_ACTION_TYPE_CONNTRACK:
+@@ -3154,7 +3206,7 @@ flow_hw_actions_construct(struct rte_eth_dev *dev,
+ 				 ((uint32_t)(uintptr_t)action->conf);
+ 			if (flow_hw_ct_compile(dev, queue, ct_idx,
+ 					       &rule_acts[act_data->action_dst]))
+-				return -1;
++				goto error;
+ 			break;
+ 		case MLX5_RTE_FLOW_ACTION_TYPE_METER_MARK:
+ 			mtr_id = act_data->shared_meter.id &
+@@ -3162,7 +3214,7 @@ flow_hw_actions_construct(struct rte_eth_dev *dev,
+ 			/* Find ASO object. */
+ 			aso_mtr = mlx5_ipool_get(pool->idx_pool, mtr_id);
+ 			if (!aso_mtr)
+-				return -1;
++				goto error;
+ 			rule_acts[act_data->action_dst].action =
+ 							pool->action;
+ 			rule_acts[act_data->action_dst].aso_meter.offset =
+@@ -3177,7 +3229,7 @@ flow_hw_actions_construct(struct rte_eth_dev *dev,
+ 				act_data->action_dst, action,
+ 				rule_acts, &job->flow->mtr_id, MLX5_HW_INV_QUEUE);
+ 			if (ret != 0)
+-				return ret;
++				goto error;
+ 			break;
+ 		default:
+ 			break;
+@@ -3215,6 +3267,11 @@ flow_hw_actions_construct(struct rte_eth_dev *dev,
+ 	if (mlx5_hws_cnt_id_valid(hw_acts->cnt_id))
+ 		job->flow->cnt_id = hw_acts->cnt_id;
+ 	return 0;
++
++error:
++	flow_hw_release_actions(dev, queue, job->flow);
++	rte_errno = EINVAL;
++	return -rte_errno;
+ }
+ 
+ static const struct rte_flow_item *
+@@ -3320,10 +3377,6 @@ flow_hw_async_flow_create(struct rte_eth_dev *dev,
+ 	uint32_t res_idx = 0;
+ 	int ret;
+ 
+-	if (unlikely((!dev->data->dev_started))) {
+-		rte_errno = EINVAL;
+-		goto error;
+-	}
+ 	job = flow_hw_job_get(priv, queue);
+ 	if (!job) {
+ 		rte_errno = ENOMEM;
+@@ -3368,10 +3421,8 @@ flow_hw_async_flow_create(struct rte_eth_dev *dev,
+ 	if (flow_hw_actions_construct(dev, job,
+ 				      &table->ats[action_template_index],
+ 				      pattern_template_index, actions,
+-				      rule_acts, queue, error)) {
+-		rte_errno = EINVAL;
++				      rule_acts, queue, error))
+ 		goto error;
+-	}
+ 	rule_items = flow_hw_get_rule_items(dev, table, items,
+ 					    pattern_template_index, job);
+ 	if (!rule_items)
+@@ -3722,8 +3773,7 @@ flow_hw_age_count_release(struct mlx5_priv *priv, uint32_t queue,
+ 		}
+ 		return;
+ 	}
+-	/* If the port is engaged in resource sharing, do not use queue cache. */
+-	cnt_queue = mlx5_hws_cnt_is_pool_shared(priv) ? NULL : &queue;
++	cnt_queue = mlx5_hws_cnt_get_queue(priv, &queue);
+ 	/* Put the counter first to reduce the race risk in BG thread. */
+ 	mlx5_hws_cnt_pool_put(priv->hws_cpool, cnt_queue, &flow->cnt_id);
+ 	flow->cnt_id = 0;
+@@ -3780,13 +3830,6 @@ flow_hw_pull_legacy_indirect_comp(struct rte_eth_dev *dev, struct mlx5_hw_q_job
+ 						job->query.hw);
+ 			aso_ct->state = ASO_CONNTRACK_READY;
+ 		}
+-	} else {
+-		/*
+-		 * rte_flow_op_result::user data can point to
+-		 * struct mlx5_aso_mtr object as well
+-		 */
+-		if (queue != CTRL_QUEUE_ID(priv))
+-			MLX5_ASSERT(false);
+ 	}
+ }
+ 
+@@ -4368,12 +4411,23 @@ flow_hw_table_create(struct rte_eth_dev *dev,
+ 	matcher_attr.rule.num_log = rte_log2_u32(nb_flows);
+ 	/* Parse hints information. */
+ 	if (attr->specialize) {
+-		if (attr->specialize == RTE_FLOW_TABLE_SPECIALIZE_TRANSFER_WIRE_ORIG)
+-			matcher_attr.optimize_flow_src = MLX5DR_MATCHER_FLOW_SRC_WIRE;
+-		else if (attr->specialize == RTE_FLOW_TABLE_SPECIALIZE_TRANSFER_VPORT_ORIG)
+-			matcher_attr.optimize_flow_src = MLX5DR_MATCHER_FLOW_SRC_VPORT;
+-		else
+-			DRV_LOG(INFO, "Unsupported hint value %x", attr->specialize);
++		uint32_t val = RTE_FLOW_TABLE_SPECIALIZE_TRANSFER_WIRE_ORIG |
++			       RTE_FLOW_TABLE_SPECIALIZE_TRANSFER_VPORT_ORIG;
++
++		if ((attr->specialize & val) == val) {
++			DRV_LOG(INFO, "Invalid hint value %x",
++				attr->specialize);
++			rte_errno = EINVAL;
++			goto it_error;
++		}
++		if (attr->specialize &
++		    RTE_FLOW_TABLE_SPECIALIZE_TRANSFER_WIRE_ORIG)
++			matcher_attr.optimize_flow_src =
++				MLX5DR_MATCHER_FLOW_SRC_WIRE;
++		else if (attr->specialize &
++			 RTE_FLOW_TABLE_SPECIALIZE_TRANSFER_VPORT_ORIG)
++			matcher_attr.optimize_flow_src =
++				MLX5DR_MATCHER_FLOW_SRC_VPORT;
+ 	}
+ 	/* Build the item template. */
+ 	for (i = 0; i < nb_item_templates; i++) {
+@@ -4623,7 +4677,7 @@ flow_hw_table_destroy(struct rte_eth_dev *dev,
+ 		return rte_flow_error_set(error, EBUSY,
+ 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ 				   NULL,
+-				   "table in use");
++				   "table is in use");
+ 	}
+ 	LIST_REMOVE(table, next);
+ 	for (i = 0; i < table->nb_item_templates; i++)
+@@ -4975,15 +5029,17 @@ flow_hw_validate_action_modify_field(struct rte_eth_dev *dev,
+ 	ret = flow_validate_modify_field_level(&action_conf->dst, error);
+ 	if (ret)
+ 		return ret;
+-	if (action_conf->dst.tag_index &&
+-	    !flow_modify_field_support_tag_array(action_conf->dst.field))
+-		return rte_flow_error_set(error, EINVAL,
+-				RTE_FLOW_ERROR_TYPE_ACTION, action,
+-				"destination tag index is not supported");
+-	if (action_conf->dst.class_id)
+-		return rte_flow_error_set(error, EINVAL,
+-				RTE_FLOW_ERROR_TYPE_ACTION, action,
+-				"destination class id is not supported");
++	if (action_conf->dst.field != RTE_FLOW_FIELD_FLEX_ITEM) {
++		if (action_conf->dst.tag_index &&
++		    !flow_modify_field_support_tag_array(action_conf->dst.field))
++			return rte_flow_error_set(error, EINVAL,
++					RTE_FLOW_ERROR_TYPE_ACTION, action,
++					"destination tag index is not supported");
++		if (action_conf->dst.class_id)
++			return rte_flow_error_set(error, EINVAL,
++					RTE_FLOW_ERROR_TYPE_ACTION, action,
++					"destination class id is not supported");
++	}
+ 	if (mask_conf->dst.level != UINT8_MAX)
+ 		return rte_flow_error_set(error, EINVAL,
+ 			RTE_FLOW_ERROR_TYPE_ACTION, action,
+@@ -4998,15 +5054,17 @@ flow_hw_validate_action_modify_field(struct rte_eth_dev *dev,
+ 				"destination field mask and template are not equal");
+ 	if (action_conf->src.field != RTE_FLOW_FIELD_POINTER &&
+ 	    action_conf->src.field != RTE_FLOW_FIELD_VALUE) {
+-		if (action_conf->src.tag_index &&
+-		    !flow_modify_field_support_tag_array(action_conf->src.field))
+-			return rte_flow_error_set(error, EINVAL,
+-				RTE_FLOW_ERROR_TYPE_ACTION, action,
+-				"source tag index is not supported");
+-		if (action_conf->src.class_id)
+-			return rte_flow_error_set(error, EINVAL,
+-				RTE_FLOW_ERROR_TYPE_ACTION, action,
+-				"source class id is not supported");
++		if (action_conf->src.field != RTE_FLOW_FIELD_FLEX_ITEM) {
++			if (action_conf->src.tag_index &&
++			    !flow_modify_field_support_tag_array(action_conf->src.field))
++				return rte_flow_error_set(error, EINVAL,
++					RTE_FLOW_ERROR_TYPE_ACTION, action,
++					"source tag index is not supported");
++			if (action_conf->src.class_id)
++				return rte_flow_error_set(error, EINVAL,
++					RTE_FLOW_ERROR_TYPE_ACTION, action,
++					"source class id is not supported");
++		}
+ 		if (mask_conf->src.level != UINT8_MAX)
+ 			return rte_flow_error_set(error, EINVAL,
+ 				RTE_FLOW_ERROR_TYPE_ACTION, action,
+@@ -5443,6 +5501,69 @@ mlx5_decap_encap_reformat_type(const struct rte_flow_action *actions,
+ 	       MLX5_FLOW_ACTION_ENCAP : MLX5_FLOW_ACTION_DECAP;
+ }
+ 
++enum mlx5_hw_indirect_list_relative_position {
++	MLX5_INDIRECT_LIST_POSITION_UNKNOWN = -1,
++	MLX5_INDIRECT_LIST_POSITION_BEFORE_MH = 0,
++	MLX5_INDIRECT_LIST_POSITION_AFTER_MH,
++};
++
++static enum mlx5_hw_indirect_list_relative_position
++mlx5_hw_indirect_list_mh_position(const struct rte_flow_action *action)
++{
++	const struct rte_flow_action_indirect_list *conf = action->conf;
++	enum mlx5_indirect_list_type list_type = mlx5_get_indirect_list_type(conf->handle);
++	enum mlx5_hw_indirect_list_relative_position pos = MLX5_INDIRECT_LIST_POSITION_UNKNOWN;
++	const union {
++		struct mlx5_indlst_legacy *legacy;
++		struct mlx5_hw_encap_decap_action *reformat;
++		struct rte_flow_action_list_handle *handle;
++	} h = { .handle = conf->handle};
++
++	switch (list_type) {
++	case  MLX5_INDIRECT_ACTION_LIST_TYPE_LEGACY:
++		switch (h.legacy->legacy_type) {
++		case RTE_FLOW_ACTION_TYPE_AGE:
++		case RTE_FLOW_ACTION_TYPE_COUNT:
++		case RTE_FLOW_ACTION_TYPE_CONNTRACK:
++		case RTE_FLOW_ACTION_TYPE_METER_MARK:
++		case RTE_FLOW_ACTION_TYPE_QUOTA:
++			pos = MLX5_INDIRECT_LIST_POSITION_BEFORE_MH;
++			break;
++		case RTE_FLOW_ACTION_TYPE_RSS:
++			pos = MLX5_INDIRECT_LIST_POSITION_AFTER_MH;
++			break;
++		default:
++			pos = MLX5_INDIRECT_LIST_POSITION_UNKNOWN;
++			break;
++		}
++		break;
++	case MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR:
++		pos = MLX5_INDIRECT_LIST_POSITION_AFTER_MH;
++		break;
++	case MLX5_INDIRECT_ACTION_LIST_TYPE_REFORMAT:
++		switch (h.reformat->action_type) {
++		case MLX5DR_ACTION_TYP_REFORMAT_TNL_L2_TO_L2:
++		case MLX5DR_ACTION_TYP_REFORMAT_TNL_L3_TO_L2:
++			pos = MLX5_INDIRECT_LIST_POSITION_BEFORE_MH;
++			break;
++		case MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2:
++		case MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L3:
++			pos = MLX5_INDIRECT_LIST_POSITION_AFTER_MH;
++			break;
++		default:
++			pos = MLX5_INDIRECT_LIST_POSITION_UNKNOWN;
++			break;
++		}
++		break;
++	default:
++		pos = MLX5_INDIRECT_LIST_POSITION_UNKNOWN;
++		break;
++	}
++	return pos;
++}
++
++#define MLX5_HW_EXPAND_MH_FAILED 0xffff
++
+ static inline uint16_t
+ flow_hw_template_expand_modify_field(struct rte_flow_action actions[],
+ 				     struct rte_flow_action masks[],
+@@ -5479,6 +5600,7 @@ flow_hw_template_expand_modify_field(struct rte_flow_action actions[],
+ 	 * @see action_order_arr[]
+ 	 */
+ 	for (i = act_num - 2; (int)i >= 0; i--) {
++		enum mlx5_hw_indirect_list_relative_position pos;
+ 		enum rte_flow_action_type type = actions[i].type;
+ 		uint64_t reformat_type;
+ 
+@@ -5509,6 +5631,13 @@ flow_hw_template_expand_modify_field(struct rte_flow_action actions[],
+ 			if (actions[i - 1].type == RTE_FLOW_ACTION_TYPE_RAW_DECAP)
+ 				i--;
+ 			break;
++		case RTE_FLOW_ACTION_TYPE_INDIRECT_LIST:
++			pos = mlx5_hw_indirect_list_mh_position(&actions[i]);
++			if (pos == MLX5_INDIRECT_LIST_POSITION_UNKNOWN)
++				return MLX5_HW_EXPAND_MH_FAILED;
++			if (pos == MLX5_INDIRECT_LIST_POSITION_BEFORE_MH)
++				goto insert;
++			break;
+ 		default:
+ 			i++; /* new MF inserted AFTER actions[i] */
+ 			goto insert;
+@@ -5639,6 +5768,8 @@ mlx5_flow_hw_actions_validate(struct rte_eth_dev *dev,
+ 	int ret;
+ 	const struct rte_flow_action_ipv6_ext_remove *remove_data;
+ 
++	if (!mlx5_hw_ctx_validate(dev, error))
++		return -rte_errno;
+ 	/* FDB actions are only valid to proxy port. */
+ 	if (attr->transfer && (!priv->sh->config.dv_esw_en || !priv->master))
+ 		return rte_flow_error_set(error, EINVAL,
+@@ -6151,7 +6282,6 @@ flow_hw_set_vlan_vid(struct rte_eth_dev *dev,
+ 			rm[set_vlan_vid_ix].conf)->vlan_vid != 0);
+ 	const struct rte_flow_action_of_set_vlan_vid *conf =
+ 		ra[set_vlan_vid_ix].conf;
+-	rte_be16_t vid = masked ? conf->vlan_vid : 0;
+ 	int width = mlx5_flow_item_field_width(dev, RTE_FLOW_FIELD_VLAN_ID, 0,
+ 					       NULL, &error);
+ 	*spec = (typeof(*spec)) {
+@@ -6162,8 +6292,6 @@ flow_hw_set_vlan_vid(struct rte_eth_dev *dev,
+ 		},
+ 		.src = {
+ 			.field = RTE_FLOW_FIELD_VALUE,
+-			.level = vid,
+-			.offset = 0,
+ 		},
+ 		.width = width,
+ 	};
+@@ -6175,11 +6303,15 @@ flow_hw_set_vlan_vid(struct rte_eth_dev *dev,
+ 		},
+ 		.src = {
+ 			.field = RTE_FLOW_FIELD_VALUE,
+-			.level = masked ? (1U << width) - 1 : 0,
+-			.offset = 0,
+ 		},
+ 		.width = 0xffffffff,
+ 	};
++	if (masked) {
++		uint32_t mask_val = 0xffffffff;
++
++		rte_memcpy(spec->src.value, &conf->vlan_vid, sizeof(conf->vlan_vid));
++		rte_memcpy(mask->src.value, &mask_val, sizeof(mask_val));
++	}
+ 	ra[set_vlan_vid_ix].type = RTE_FLOW_ACTION_TYPE_MODIFY_FIELD;
+ 	ra[set_vlan_vid_ix].conf = spec;
+ 	rm[set_vlan_vid_ix].type = RTE_FLOW_ACTION_TYPE_MODIFY_FIELD;
+@@ -6206,8 +6338,6 @@ flow_hw_set_vlan_vid_construct(struct rte_eth_dev *dev,
+ 		},
+ 		.src = {
+ 			.field = RTE_FLOW_FIELD_VALUE,
+-			.level = vid,
+-			.offset = 0,
+ 		},
+ 		.width = width,
+ 	};
+@@ -6216,6 +6346,7 @@ flow_hw_set_vlan_vid_construct(struct rte_eth_dev *dev,
+ 		.conf = &conf
+ 	};
+ 
++	rte_memcpy(conf.src.value, &vid, sizeof(vid));
+ 	return flow_hw_modify_field_construct(job, act_data, hw_acts,
+ 					      &modify_action);
+ }
+@@ -6463,6 +6594,12 @@ flow_hw_actions_template_create(struct rte_eth_dev *dev,
+ 							   action_flags,
+ 							   act_num,
+ 							   expand_mf_num);
++		if (pos == MLX5_HW_EXPAND_MH_FAILED) {
++			rte_flow_error_set(error, ENOMEM,
++					   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
++					   NULL, "modify header expansion failed");
++			return NULL;
++		}
+ 		act_num += expand_mf_num;
+ 		for (i = pos + expand_mf_num; i < act_num; i++)
+ 			src_off[i] += expand_mf_num;
+@@ -6585,7 +6722,7 @@ flow_hw_actions_template_destroy(struct rte_eth_dev *dev,
+ 		return rte_flow_error_set(error, EBUSY,
+ 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ 				   NULL,
+-				   "action template in using");
++				   "action template is in use");
+ 	}
+ 	if (template->action_flags & flag)
+ 		mlx5_free_srh_flex_parser(dev);
+@@ -6645,6 +6782,8 @@ flow_hw_pattern_validate(struct rte_eth_dev *dev,
+ 	bool items_end = false;
+ 	uint32_t tag_bitmap = 0;
+ 
++	if (!mlx5_hw_ctx_validate(dev, error))
++		return -rte_errno;
+ 	if (!attr->ingress && !attr->egress && !attr->transfer)
+ 		return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR, NULL,
+ 					  "at least one of the direction attributes"
+@@ -7003,7 +7142,7 @@ flow_hw_pattern_template_destroy(struct rte_eth_dev *dev,
+ 		return rte_flow_error_set(error, EBUSY,
+ 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ 				   NULL,
+-				   "item template in using");
++				   "item template is in use");
+ 	}
+ 	if (template->item_flags & (MLX5_FLOW_ITEM_OUTER_IPV6_ROUTING_EXT |
+ 				    MLX5_FLOW_ITEM_INNER_IPV6_ROUTING_EXT))
+@@ -8366,6 +8505,72 @@ flow_hw_create_ctrl_jump_table(struct rte_eth_dev *dev,
+ 	return flow_hw_table_create(dev, &cfg, &it, 1, &at, 1, error);
+ }
+ 
++/**
++ * Cleans up all template tables and pattern, and actions templates used for
++ * FDB control flow rules.
++ *
++ * @param dev
++ *   Pointer to Ethernet device.
++ */
++static void
++flow_hw_cleanup_ctrl_fdb_tables(struct rte_eth_dev *dev)
++{
++	struct mlx5_priv *priv = dev->data->dev_private;
++	struct mlx5_flow_hw_ctrl_fdb *hw_ctrl_fdb;
++
++	if (!priv->hw_ctrl_fdb)
++		return;
++	hw_ctrl_fdb = priv->hw_ctrl_fdb;
++	/* Clean up templates used for LACP default miss table. */
++	if (hw_ctrl_fdb->hw_lacp_rx_tbl)
++		claim_zero(flow_hw_table_destroy(dev, hw_ctrl_fdb->hw_lacp_rx_tbl, NULL));
++	if (hw_ctrl_fdb->lacp_rx_actions_tmpl)
++		claim_zero(flow_hw_actions_template_destroy(dev, hw_ctrl_fdb->lacp_rx_actions_tmpl,
++			   NULL));
++	if (hw_ctrl_fdb->lacp_rx_items_tmpl)
++		claim_zero(flow_hw_pattern_template_destroy(dev, hw_ctrl_fdb->lacp_rx_items_tmpl,
++			   NULL));
++	/* Clean up templates used for default Tx metadata copy. */
++	if (hw_ctrl_fdb->hw_tx_meta_cpy_tbl)
++		claim_zero(flow_hw_table_destroy(dev, hw_ctrl_fdb->hw_tx_meta_cpy_tbl, NULL));
++	if (hw_ctrl_fdb->tx_meta_actions_tmpl)
++		claim_zero(flow_hw_actions_template_destroy(dev, hw_ctrl_fdb->tx_meta_actions_tmpl,
++			   NULL));
++	if (hw_ctrl_fdb->tx_meta_items_tmpl)
++		claim_zero(flow_hw_pattern_template_destroy(dev, hw_ctrl_fdb->tx_meta_items_tmpl,
++			   NULL));
++	/* Clean up templates used for default FDB jump rule. */
++	if (hw_ctrl_fdb->hw_esw_zero_tbl)
++		claim_zero(flow_hw_table_destroy(dev, hw_ctrl_fdb->hw_esw_zero_tbl, NULL));
++	if (hw_ctrl_fdb->jump_one_actions_tmpl)
++		claim_zero(flow_hw_actions_template_destroy(dev, hw_ctrl_fdb->jump_one_actions_tmpl,
++			   NULL));
++	if (hw_ctrl_fdb->port_items_tmpl)
++		claim_zero(flow_hw_pattern_template_destroy(dev, hw_ctrl_fdb->port_items_tmpl,
++			   NULL));
++	/* Clean up templates used for default SQ miss flow rules - non-root table. */
++	if (hw_ctrl_fdb->hw_esw_sq_miss_tbl)
++		claim_zero(flow_hw_table_destroy(dev, hw_ctrl_fdb->hw_esw_sq_miss_tbl, NULL));
++	if (hw_ctrl_fdb->regc_sq_items_tmpl)
++		claim_zero(flow_hw_pattern_template_destroy(dev, hw_ctrl_fdb->regc_sq_items_tmpl,
++			   NULL));
++	if (hw_ctrl_fdb->port_actions_tmpl)
++		claim_zero(flow_hw_actions_template_destroy(dev, hw_ctrl_fdb->port_actions_tmpl,
++			   NULL));
++	/* Clean up templates used for default SQ miss flow rules - root table. */
++	if (hw_ctrl_fdb->hw_esw_sq_miss_root_tbl)
++		claim_zero(flow_hw_table_destroy(dev, hw_ctrl_fdb->hw_esw_sq_miss_root_tbl, NULL));
++	if (hw_ctrl_fdb->regc_jump_actions_tmpl)
++		claim_zero(flow_hw_actions_template_destroy(dev,
++			   hw_ctrl_fdb->regc_jump_actions_tmpl, NULL));
++	if (hw_ctrl_fdb->esw_mgr_items_tmpl)
++		claim_zero(flow_hw_pattern_template_destroy(dev, hw_ctrl_fdb->esw_mgr_items_tmpl,
++			   NULL));
++	/* Clean up templates structure for FDB control flow rules. */
++	mlx5_free(hw_ctrl_fdb);
++	priv->hw_ctrl_fdb = NULL;
++}
++
+ /*
+  * Create a table on the root group to for the LACP traffic redirecting.
+  *
+@@ -8415,182 +8620,154 @@ flow_hw_create_lacp_rx_table(struct rte_eth_dev *dev,
+  * @return
+  *   0 on success, negative values otherwise
+  */
+-static __rte_unused int
++static int
+ flow_hw_create_ctrl_tables(struct rte_eth_dev *dev, struct rte_flow_error *error)
+ {
+ 	struct mlx5_priv *priv = dev->data->dev_private;
+-	struct rte_flow_pattern_template *esw_mgr_items_tmpl = NULL;
+-	struct rte_flow_pattern_template *regc_sq_items_tmpl = NULL;
+-	struct rte_flow_pattern_template *port_items_tmpl = NULL;
+-	struct rte_flow_pattern_template *tx_meta_items_tmpl = NULL;
+-	struct rte_flow_pattern_template *lacp_rx_items_tmpl = NULL;
+-	struct rte_flow_actions_template *regc_jump_actions_tmpl = NULL;
+-	struct rte_flow_actions_template *port_actions_tmpl = NULL;
+-	struct rte_flow_actions_template *jump_one_actions_tmpl = NULL;
+-	struct rte_flow_actions_template *tx_meta_actions_tmpl = NULL;
+-	struct rte_flow_actions_template *lacp_rx_actions_tmpl = NULL;
++	struct mlx5_flow_hw_ctrl_fdb *hw_ctrl_fdb;
+ 	uint32_t xmeta = priv->sh->config.dv_xmeta_en;
+ 	uint32_t repr_matching = priv->sh->config.repr_matching;
+-	int ret;
++	uint32_t fdb_def_rule = priv->sh->config.fdb_def_rule;
+ 
+-	/* Create templates and table for default SQ miss flow rules - root table. */
+-	esw_mgr_items_tmpl = flow_hw_create_ctrl_esw_mgr_pattern_template(dev, error);
+-	if (!esw_mgr_items_tmpl) {
+-		DRV_LOG(ERR, "port %u failed to create E-Switch Manager item"
+-			" template for control flows", dev->data->port_id);
+-		goto err;
+-	}
+-	regc_jump_actions_tmpl = flow_hw_create_ctrl_regc_jump_actions_template(dev, error);
+-	if (!regc_jump_actions_tmpl) {
+-		DRV_LOG(ERR, "port %u failed to create REG_C set and jump action template"
+-			" for control flows", dev->data->port_id);
+-		goto err;
+-	}
+-	MLX5_ASSERT(priv->hw_esw_sq_miss_root_tbl == NULL);
+-	priv->hw_esw_sq_miss_root_tbl = flow_hw_create_ctrl_sq_miss_root_table
+-			(dev, esw_mgr_items_tmpl, regc_jump_actions_tmpl, error);
+-	if (!priv->hw_esw_sq_miss_root_tbl) {
+-		DRV_LOG(ERR, "port %u failed to create table for default sq miss (root table)"
+-			" for control flows", dev->data->port_id);
+-		goto err;
+-	}
+-	/* Create templates and table for default SQ miss flow rules - non-root table. */
+-	regc_sq_items_tmpl = flow_hw_create_ctrl_regc_sq_pattern_template(dev, error);
+-	if (!regc_sq_items_tmpl) {
+-		DRV_LOG(ERR, "port %u failed to create SQ item template for"
+-			" control flows", dev->data->port_id);
+-		goto err;
+-	}
+-	port_actions_tmpl = flow_hw_create_ctrl_port_actions_template(dev, error);
+-	if (!port_actions_tmpl) {
+-		DRV_LOG(ERR, "port %u failed to create port action template"
+-			" for control flows", dev->data->port_id);
+-		goto err;
+-	}
+-	MLX5_ASSERT(priv->hw_esw_sq_miss_tbl == NULL);
+-	priv->hw_esw_sq_miss_tbl = flow_hw_create_ctrl_sq_miss_table(dev, regc_sq_items_tmpl,
+-								     port_actions_tmpl, error);
+-	if (!priv->hw_esw_sq_miss_tbl) {
+-		DRV_LOG(ERR, "port %u failed to create table for default sq miss (non-root table)"
+-			" for control flows", dev->data->port_id);
+-		goto err;
+-	}
+-	/* Create templates and table for default FDB jump flow rules. */
+-	port_items_tmpl = flow_hw_create_ctrl_port_pattern_template(dev, error);
+-	if (!port_items_tmpl) {
+-		DRV_LOG(ERR, "port %u failed to create SQ item template for"
+-			" control flows", dev->data->port_id);
+-		goto err;
+-	}
+-	jump_one_actions_tmpl = flow_hw_create_ctrl_jump_actions_template
+-			(dev, MLX5_HW_LOWEST_USABLE_GROUP, error);
+-	if (!jump_one_actions_tmpl) {
+-		DRV_LOG(ERR, "port %u failed to create jump action template"
+-			" for control flows", dev->data->port_id);
++	MLX5_ASSERT(priv->hw_ctrl_fdb == NULL);
++	hw_ctrl_fdb = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*hw_ctrl_fdb), 0, SOCKET_ID_ANY);
++	if (!hw_ctrl_fdb) {
++		DRV_LOG(ERR, "port %u failed to allocate memory for FDB control flow templates",
++			dev->data->port_id);
++		rte_errno = ENOMEM;
+ 		goto err;
+ 	}
+-	MLX5_ASSERT(priv->hw_esw_zero_tbl == NULL);
+-	priv->hw_esw_zero_tbl = flow_hw_create_ctrl_jump_table(dev, port_items_tmpl,
+-							       jump_one_actions_tmpl,
+-							       error);
+-	if (!priv->hw_esw_zero_tbl) {
+-		DRV_LOG(ERR, "port %u failed to create table for default jump to group 1"
+-			" for control flows", dev->data->port_id);
+-		goto err;
++	priv->hw_ctrl_fdb = hw_ctrl_fdb;
++	if (fdb_def_rule) {
++		/* Create templates and table for default SQ miss flow rules - root table. */
++		hw_ctrl_fdb->esw_mgr_items_tmpl =
++				flow_hw_create_ctrl_esw_mgr_pattern_template(dev, error);
++		if (!hw_ctrl_fdb->esw_mgr_items_tmpl) {
++			DRV_LOG(ERR, "port %u failed to create E-Switch Manager item"
++				" template for control flows", dev->data->port_id);
++			goto err;
++		}
++		hw_ctrl_fdb->regc_jump_actions_tmpl =
++				flow_hw_create_ctrl_regc_jump_actions_template(dev, error);
++		if (!hw_ctrl_fdb->regc_jump_actions_tmpl) {
++			DRV_LOG(ERR, "port %u failed to create REG_C set and jump action template"
++				" for control flows", dev->data->port_id);
++			goto err;
++		}
++		hw_ctrl_fdb->hw_esw_sq_miss_root_tbl =
++				flow_hw_create_ctrl_sq_miss_root_table
++					(dev, hw_ctrl_fdb->esw_mgr_items_tmpl,
++					 hw_ctrl_fdb->regc_jump_actions_tmpl, error);
++		if (!hw_ctrl_fdb->hw_esw_sq_miss_root_tbl) {
++			DRV_LOG(ERR, "port %u failed to create table for default sq miss (root table)"
++				" for control flows", dev->data->port_id);
++			goto err;
++		}
++		/* Create templates and table for default SQ miss flow rules - non-root table. */
++		hw_ctrl_fdb->regc_sq_items_tmpl =
++				flow_hw_create_ctrl_regc_sq_pattern_template(dev, error);
++		if (!hw_ctrl_fdb->regc_sq_items_tmpl) {
++			DRV_LOG(ERR, "port %u failed to create SQ item template for"
++				" control flows", dev->data->port_id);
++			goto err;
++		}
++		hw_ctrl_fdb->port_actions_tmpl =
++				flow_hw_create_ctrl_port_actions_template(dev, error);
++		if (!hw_ctrl_fdb->port_actions_tmpl) {
++			DRV_LOG(ERR, "port %u failed to create port action template"
++				" for control flows", dev->data->port_id);
++			goto err;
++		}
++		hw_ctrl_fdb->hw_esw_sq_miss_tbl =
++				flow_hw_create_ctrl_sq_miss_table
++					(dev, hw_ctrl_fdb->regc_sq_items_tmpl,
++					 hw_ctrl_fdb->port_actions_tmpl, error);
++		if (!hw_ctrl_fdb->hw_esw_sq_miss_tbl) {
++			DRV_LOG(ERR, "port %u failed to create table for default sq miss (non-root table)"
++				" for control flows", dev->data->port_id);
++			goto err;
++		}
++		/* Create templates and table for default FDB jump flow rules. */
++		hw_ctrl_fdb->port_items_tmpl =
++				flow_hw_create_ctrl_port_pattern_template(dev, error);
++		if (!hw_ctrl_fdb->port_items_tmpl) {
++			DRV_LOG(ERR, "port %u failed to create SQ item template for"
++				" control flows", dev->data->port_id);
++			goto err;
++		}
++		hw_ctrl_fdb->jump_one_actions_tmpl =
++				flow_hw_create_ctrl_jump_actions_template
++					(dev, MLX5_HW_LOWEST_USABLE_GROUP, error);
++		if (!hw_ctrl_fdb->jump_one_actions_tmpl) {
++			DRV_LOG(ERR, "port %u failed to create jump action template"
++				" for control flows", dev->data->port_id);
++			goto err;
++		}
++		hw_ctrl_fdb->hw_esw_zero_tbl = flow_hw_create_ctrl_jump_table
++				(dev, hw_ctrl_fdb->port_items_tmpl,
++				 hw_ctrl_fdb->jump_one_actions_tmpl, error);
++		if (!hw_ctrl_fdb->hw_esw_zero_tbl) {
++			DRV_LOG(ERR, "port %u failed to create table for default jump to group 1"
++				" for control flows", dev->data->port_id);
++			goto err;
++		}
+ 	}
+ 	/* Create templates and table for default Tx metadata copy flow rule. */
+ 	if (!repr_matching && xmeta == MLX5_XMETA_MODE_META32_HWS) {
+-		tx_meta_items_tmpl =
++		hw_ctrl_fdb->tx_meta_items_tmpl =
+ 			flow_hw_create_tx_default_mreg_copy_pattern_template(dev, error);
+-		if (!tx_meta_items_tmpl) {
++		if (!hw_ctrl_fdb->tx_meta_items_tmpl) {
+ 			DRV_LOG(ERR, "port %u failed to Tx metadata copy pattern"
+ 				" template for control flows", dev->data->port_id);
+ 			goto err;
+ 		}
+-		tx_meta_actions_tmpl =
++		hw_ctrl_fdb->tx_meta_actions_tmpl =
+ 			flow_hw_create_tx_default_mreg_copy_actions_template(dev, error);
+-		if (!tx_meta_actions_tmpl) {
++		if (!hw_ctrl_fdb->tx_meta_actions_tmpl) {
+ 			DRV_LOG(ERR, "port %u failed to Tx metadata copy actions"
+ 				" template for control flows", dev->data->port_id);
+ 			goto err;
+ 		}
+-		MLX5_ASSERT(priv->hw_tx_meta_cpy_tbl == NULL);
+-		priv->hw_tx_meta_cpy_tbl =
+-			flow_hw_create_tx_default_mreg_copy_table(dev, tx_meta_items_tmpl,
+-								  tx_meta_actions_tmpl, error);
+-		if (!priv->hw_tx_meta_cpy_tbl) {
++		hw_ctrl_fdb->hw_tx_meta_cpy_tbl =
++			flow_hw_create_tx_default_mreg_copy_table
++				(dev, hw_ctrl_fdb->tx_meta_items_tmpl,
++				 hw_ctrl_fdb->tx_meta_actions_tmpl, error);
++		if (!hw_ctrl_fdb->hw_tx_meta_cpy_tbl) {
+ 			DRV_LOG(ERR, "port %u failed to create table for default"
+ 				" Tx metadata copy flow rule", dev->data->port_id);
+ 			goto err;
+ 		}
+ 	}
+ 	/* Create LACP default miss table. */
+-	if (!priv->sh->config.lacp_by_user && priv->pf_bond >= 0) {
+-		lacp_rx_items_tmpl = flow_hw_create_lacp_rx_pattern_template(dev, error);
+-		if (!lacp_rx_items_tmpl) {
++	if (!priv->sh->config.lacp_by_user && priv->pf_bond >= 0 && priv->master) {
++		hw_ctrl_fdb->lacp_rx_items_tmpl =
++				flow_hw_create_lacp_rx_pattern_template(dev, error);
++		if (!hw_ctrl_fdb->lacp_rx_items_tmpl) {
+ 			DRV_LOG(ERR, "port %u failed to create pattern template"
+ 				" for LACP Rx traffic", dev->data->port_id);
+ 			goto err;
+ 		}
+-		lacp_rx_actions_tmpl = flow_hw_create_lacp_rx_actions_template(dev, error);
+-		if (!lacp_rx_actions_tmpl) {
++		hw_ctrl_fdb->lacp_rx_actions_tmpl =
++				flow_hw_create_lacp_rx_actions_template(dev, error);
++		if (!hw_ctrl_fdb->lacp_rx_actions_tmpl) {
+ 			DRV_LOG(ERR, "port %u failed to create actions template"
+ 				" for LACP Rx traffic", dev->data->port_id);
+ 			goto err;
+ 		}
+-		priv->hw_lacp_rx_tbl = flow_hw_create_lacp_rx_table(dev, lacp_rx_items_tmpl,
+-								    lacp_rx_actions_tmpl, error);
+-		if (!priv->hw_lacp_rx_tbl) {
++		hw_ctrl_fdb->hw_lacp_rx_tbl = flow_hw_create_lacp_rx_table
++				(dev, hw_ctrl_fdb->lacp_rx_items_tmpl,
++				 hw_ctrl_fdb->lacp_rx_actions_tmpl, error);
++		if (!hw_ctrl_fdb->hw_lacp_rx_tbl) {
+ 			DRV_LOG(ERR, "port %u failed to create template table for"
+ 				" for LACP Rx traffic", dev->data->port_id);
+ 			goto err;
+ 		}
+ 	}
+ 	return 0;
++
+ err:
+-	/* Do not overwrite the rte_errno. */
+-	ret = -rte_errno;
+-	if (ret == 0)
+-		ret = rte_flow_error_set(error, EINVAL,
+-					 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+-					 "Failed to create control tables.");
+-	if (priv->hw_tx_meta_cpy_tbl) {
+-		flow_hw_table_destroy(dev, priv->hw_tx_meta_cpy_tbl, NULL);
+-		priv->hw_tx_meta_cpy_tbl = NULL;
+-	}
+-	if (priv->hw_esw_zero_tbl) {
+-		flow_hw_table_destroy(dev, priv->hw_esw_zero_tbl, NULL);
+-		priv->hw_esw_zero_tbl = NULL;
+-	}
+-	if (priv->hw_esw_sq_miss_tbl) {
+-		flow_hw_table_destroy(dev, priv->hw_esw_sq_miss_tbl, NULL);
+-		priv->hw_esw_sq_miss_tbl = NULL;
+-	}
+-	if (priv->hw_esw_sq_miss_root_tbl) {
+-		flow_hw_table_destroy(dev, priv->hw_esw_sq_miss_root_tbl, NULL);
+-		priv->hw_esw_sq_miss_root_tbl = NULL;
+-	}
+-	if (lacp_rx_actions_tmpl)
+-		flow_hw_actions_template_destroy(dev, lacp_rx_actions_tmpl, NULL);
+-	if (tx_meta_actions_tmpl)
+-		flow_hw_actions_template_destroy(dev, tx_meta_actions_tmpl, NULL);
+-	if (jump_one_actions_tmpl)
+-		flow_hw_actions_template_destroy(dev, jump_one_actions_tmpl, NULL);
+-	if (port_actions_tmpl)
+-		flow_hw_actions_template_destroy(dev, port_actions_tmpl, NULL);
+-	if (regc_jump_actions_tmpl)
+-		flow_hw_actions_template_destroy(dev, regc_jump_actions_tmpl, NULL);
+-	if (lacp_rx_items_tmpl)
+-		flow_hw_pattern_template_destroy(dev, lacp_rx_items_tmpl, NULL);
+-	if (tx_meta_items_tmpl)
+-		flow_hw_pattern_template_destroy(dev, tx_meta_items_tmpl, NULL);
+-	if (port_items_tmpl)
+-		flow_hw_pattern_template_destroy(dev, port_items_tmpl, NULL);
+-	if (regc_sq_items_tmpl)
+-		flow_hw_pattern_template_destroy(dev, regc_sq_items_tmpl, NULL);
+-	if (esw_mgr_items_tmpl)
+-		flow_hw_pattern_template_destroy(dev, esw_mgr_items_tmpl, NULL);
+-	return ret;
++	flow_hw_cleanup_ctrl_fdb_tables(dev);
++	return -EINVAL;
+ }
+ 
+ static void
+@@ -9184,6 +9361,38 @@ flow_hw_compare_config(const struct mlx5_flow_hw_attr *hw_attr,
+ 	return true;
+ }
+ 
++static int
++flow_hw_validate_attributes(const struct rte_flow_port_attr *port_attr,
++			    uint16_t nb_queue,
++			    const struct rte_flow_queue_attr *queue_attr[],
++			    struct rte_flow_error *error)
++{
++	uint32_t size;
++	unsigned int i;
++
++	if (port_attr == NULL)
++		return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
++					  "Port attributes must be non-NULL");
++
++	if (nb_queue == 0)
++		return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
++					  "At least one flow queue is required");
++
++	if (queue_attr == NULL)
++		return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
++					  "Queue attributes must be non-NULL");
++
++	size = queue_attr[0]->size;
++	for (i = 1; i < nb_queue; ++i) {
++		if (queue_attr[i]->size != size)
++			return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
++						  NULL,
++						  "All flow queues must have the same size");
++	}
++
++	return 0;
++}
++
+ /**
+  * Configure port HWS resources.
+  *
+@@ -9235,10 +9444,8 @@ flow_hw_configure(struct rte_eth_dev *dev,
+ 	int ret = 0;
+ 	uint32_t action_flags;
+ 
+-	if (!port_attr || !nb_queue || !queue_attr) {
+-		rte_errno = EINVAL;
+-		goto err;
+-	}
++	if (flow_hw_validate_attributes(port_attr, nb_queue, queue_attr, error))
++		return -rte_errno;
+ 	/*
+ 	 * Calling rte_flow_configure() again is allowed if and only if
+ 	 * provided configuration matches the initially provided one.
+@@ -9285,14 +9492,6 @@ flow_hw_configure(struct rte_eth_dev *dev,
+ 	/* Allocate the queue job descriptor LIFO. */
+ 	mem_size = sizeof(priv->hw_q[0]) * nb_q_updated;
+ 	for (i = 0; i < nb_q_updated; i++) {
+-		/*
+-		 * Check if the queues' size are all the same as the
+-		 * limitation from HWS layer.
+-		 */
+-		if (_queue_attr[i]->size != _queue_attr[0]->size) {
+-			rte_errno = EINVAL;
+-			goto err;
+-		}
+ 		mem_size += (sizeof(struct mlx5_hw_q_job *) +
+ 			    sizeof(struct mlx5_hw_q_job) +
+ 			    sizeof(uint8_t) * MLX5_ENCAP_MAX_LEN +
+@@ -9545,6 +9744,14 @@ flow_hw_configure(struct rte_eth_dev *dev,
+ 		priv->hws_strict_queue = 1;
+ 	return 0;
+ err:
++	priv->hws_strict_queue = 0;
++	flow_hw_destroy_vlan(dev);
++	if (priv->hws_age_req)
++		mlx5_hws_age_pool_destroy(priv);
++	if (priv->hws_cpool) {
++		mlx5_hws_cnt_pool_destroy(priv->sh, priv->hws_cpool);
++		priv->hws_cpool = NULL;
++	}
+ 	if (priv->hws_ctpool) {
+ 		flow_hw_ct_pool_destroy(dev, priv->hws_ctpool);
+ 		priv->hws_ctpool = NULL;
+@@ -9553,44 +9760,54 @@ err:
+ 		flow_hw_ct_mng_destroy(dev, priv->ct_mng);
+ 		priv->ct_mng = NULL;
+ 	}
+-	if (priv->hws_age_req)
+-		mlx5_hws_age_pool_destroy(priv);
+-	if (priv->hws_cpool) {
+-		mlx5_hws_cnt_pool_destroy(priv->sh, priv->hws_cpool);
+-		priv->hws_cpool = NULL;
+-	}
+-	mlx5_flow_quota_destroy(dev);
+ 	flow_hw_destroy_send_to_kernel_action(priv);
++	flow_hw_cleanup_ctrl_fdb_tables(dev);
+ 	flow_hw_free_vport_actions(priv);
++	if (priv->hw_def_miss) {
++		mlx5dr_action_destroy(priv->hw_def_miss);
++		priv->hw_def_miss = NULL;
++	}
++	flow_hw_cleanup_tx_repr_tagging(dev);
+ 	for (i = 0; i < MLX5_HW_ACTION_FLAG_MAX; i++) {
+-		if (priv->hw_drop[i])
++		if (priv->hw_drop[i]) {
+ 			mlx5dr_action_destroy(priv->hw_drop[i]);
+-		if (priv->hw_tag[i])
++			priv->hw_drop[i] = NULL;
++		}
++		if (priv->hw_tag[i]) {
+ 			mlx5dr_action_destroy(priv->hw_tag[i]);
++			priv->hw_tag[i] = NULL;
++		}
+ 	}
+-	if (priv->hw_def_miss)
+-		mlx5dr_action_destroy(priv->hw_def_miss);
+-	flow_hw_destroy_vlan(dev);
+-	if (dr_ctx)
++	mlx5_flow_meter_uninit(dev);
++	mlx5_flow_quota_destroy(dev);
++	flow_hw_cleanup_ctrl_rx_tables(dev);
++	if (dr_ctx) {
+ 		claim_zero(mlx5dr_context_close(dr_ctx));
+-	for (i = 0; i < nb_q_updated; i++) {
+-		rte_ring_free(priv->hw_q[i].indir_iq);
+-		rte_ring_free(priv->hw_q[i].indir_cq);
++		priv->dr_ctx = NULL;
+ 	}
+-	mlx5_free(priv->hw_q);
+-	priv->hw_q = NULL;
+-	if (priv->acts_ipool) {
+-		mlx5_ipool_destroy(priv->acts_ipool);
+-		priv->acts_ipool = NULL;
+-	}
+-	if (_queue_attr)
+-		mlx5_free(_queue_attr);
+ 	if (priv->shared_host) {
++		struct mlx5_priv *host_priv = priv->shared_host->data->dev_private;
++
+ 		__atomic_fetch_sub(&host_priv->shared_refcnt, 1, __ATOMIC_RELAXED);
+ 		priv->shared_host = NULL;
+ 	}
++	if (priv->hw_q) {
++		for (i = 0; i < nb_q_updated; i++) {
++			rte_ring_free(priv->hw_q[i].indir_iq);
++			rte_ring_free(priv->hw_q[i].indir_cq);
++		}
++		mlx5_free(priv->hw_q);
++		priv->hw_q = NULL;
++	}
++	if (priv->acts_ipool) {
++		mlx5_ipool_destroy(priv->acts_ipool);
++		priv->acts_ipool = NULL;
++	}
+ 	mlx5_free(priv->hw_attr);
+ 	priv->hw_attr = NULL;
++	priv->nb_queue = 0;
++	if (_queue_attr)
++		mlx5_free(_queue_attr);
+ 	/* Do not overwrite the internal errno information. */
+ 	if (ret)
+ 		return ret;
+@@ -9609,37 +9826,48 @@ void
+ flow_hw_resource_release(struct rte_eth_dev *dev)
+ {
+ 	struct mlx5_priv *priv = dev->data->dev_private;
+-	struct rte_flow_template_table *tbl;
+-	struct rte_flow_pattern_template *it;
+-	struct rte_flow_actions_template *at;
+-	struct mlx5_flow_group *grp;
++	struct rte_flow_template_table *tbl, *temp_tbl;
++	struct rte_flow_pattern_template *it, *temp_it;
++	struct rte_flow_actions_template *at, *temp_at;
++	struct mlx5_flow_group *grp, *temp_grp;
+ 	uint32_t i;
+ 
+ 	if (!priv->dr_ctx)
+ 		return;
+ 	flow_hw_rxq_flag_set(dev, false);
+ 	flow_hw_flush_all_ctrl_flows(dev);
++	flow_hw_cleanup_ctrl_fdb_tables(dev);
+ 	flow_hw_cleanup_tx_repr_tagging(dev);
+ 	flow_hw_cleanup_ctrl_rx_tables(dev);
+-	while (!LIST_EMPTY(&priv->flow_hw_grp)) {
+-		grp = LIST_FIRST(&priv->flow_hw_grp);
+-		flow_hw_group_unset_miss_group(dev, grp, NULL);
+-	}
+-	while (!LIST_EMPTY(&priv->flow_hw_tbl_ongo)) {
+-		tbl = LIST_FIRST(&priv->flow_hw_tbl_ongo);
+-		flow_hw_table_destroy(dev, tbl, NULL);
+-	}
+-	while (!LIST_EMPTY(&priv->flow_hw_tbl)) {
+-		tbl = LIST_FIRST(&priv->flow_hw_tbl);
+-		flow_hw_table_destroy(dev, tbl, NULL);
+-	}
+-	while (!LIST_EMPTY(&priv->flow_hw_itt)) {
+-		it = LIST_FIRST(&priv->flow_hw_itt);
+-		flow_hw_pattern_template_destroy(dev, it, NULL);
+-	}
+-	while (!LIST_EMPTY(&priv->flow_hw_at)) {
+-		at = LIST_FIRST(&priv->flow_hw_at);
+-		flow_hw_actions_template_destroy(dev, at, NULL);
++	grp = LIST_FIRST(&priv->flow_hw_grp);
++	while (grp) {
++		temp_grp = LIST_NEXT(grp, next);
++		claim_zero(flow_hw_group_unset_miss_group(dev, grp, NULL));
++		grp = temp_grp;
++	}
++	tbl = LIST_FIRST(&priv->flow_hw_tbl_ongo);
++	while (tbl) {
++		temp_tbl = LIST_NEXT(tbl, next);
++		claim_zero(flow_hw_table_destroy(dev, tbl, NULL));
++		tbl = temp_tbl;
++	}
++	tbl = LIST_FIRST(&priv->flow_hw_tbl);
++	while (tbl) {
++		temp_tbl = LIST_NEXT(tbl, next);
++		claim_zero(flow_hw_table_destroy(dev, tbl, NULL));
++		tbl = temp_tbl;
++	}
++	it = LIST_FIRST(&priv->flow_hw_itt);
++	while (it) {
++		temp_it = LIST_NEXT(it, next);
++		claim_zero(flow_hw_pattern_template_destroy(dev, it, NULL));
++		it = temp_it;
++	}
++	at = LIST_FIRST(&priv->flow_hw_at);
++	while (at) {
++		temp_at = LIST_NEXT(at, next);
++		claim_zero(flow_hw_actions_template_destroy(dev, at, NULL));
++		at = temp_at;
+ 	}
+ 	for (i = 0; i < MLX5_HW_ACTION_FLAG_MAX; i++) {
+ 		if (priv->hw_drop[i])
+@@ -9677,13 +9905,11 @@ flow_hw_resource_release(struct rte_eth_dev *dev)
+ 	}
+ 	mlx5_free(priv->hw_q);
+ 	priv->hw_q = NULL;
+-	claim_zero(mlx5dr_context_close(priv->dr_ctx));
+ 	if (priv->shared_host) {
+ 		struct mlx5_priv *host_priv = priv->shared_host->data->dev_private;
+ 		__atomic_fetch_sub(&host_priv->shared_refcnt, 1, __ATOMIC_RELAXED);
+ 		priv->shared_host = NULL;
+ 	}
+-	priv->dr_ctx = NULL;
+ 	mlx5_free(priv->hw_attr);
+ 	priv->hw_attr = NULL;
+ 	priv->nb_queue = 0;
+@@ -9853,6 +10079,13 @@ flow_hw_conntrack_create(struct rte_eth_dev *dev, uint32_t queue,
+ 				   "CT is not enabled");
+ 		return 0;
+ 	}
++	if (dev->data->port_id >= MLX5_INDIRECT_ACT_CT_MAX_PORT) {
++		rte_flow_error_set(error, EINVAL,
++				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
++				   "CT supports port indexes up to "
++				   RTE_STR(MLX5_ACTION_CTX_CT_MAX_PORT));
++		return 0;
++	}
+ 	ct = mlx5_ipool_zmalloc(pool->cts, &ct_idx);
+ 	if (!ct) {
+ 		rte_flow_error_set(error, rte_errno,
+@@ -9967,11 +10200,13 @@ flow_hw_action_job_init(struct mlx5_priv *priv, uint32_t queue,
+ 			const struct rte_flow_action_handle *handle,
+ 			void *user_data, void *query_data,
+ 			enum mlx5_hw_job_type type,
++			enum mlx5_hw_indirect_type indirect_type,
+ 			struct rte_flow_error *error)
+ {
+ 	struct mlx5_hw_q_job *job;
+ 
+-	MLX5_ASSERT(queue != MLX5_HW_INV_QUEUE);
++	if (queue == MLX5_HW_INV_QUEUE)
++		queue = CTRL_QUEUE_ID(priv);
+ 	job = flow_hw_job_get(priv, queue);
+ 	if (!job) {
+ 		rte_flow_error_set(error, ENOMEM,
+@@ -9983,9 +10218,21 @@ flow_hw_action_job_init(struct mlx5_priv *priv, uint32_t queue,
+ 	job->action = handle;
+ 	job->user_data = user_data;
+ 	job->query.user = query_data;
++	job->indirect_type = indirect_type;
+ 	return job;
+ }
+ 
++struct mlx5_hw_q_job *
++mlx5_flow_action_job_init(struct mlx5_priv *priv, uint32_t queue,
++			  const struct rte_flow_action_handle *handle,
++			  void *user_data, void *query_data,
++			  enum mlx5_hw_job_type type,
++			  struct rte_flow_error *error)
++{
++	return flow_hw_action_job_init(priv, queue, handle, user_data, query_data,
++				       type, MLX5_HW_INDIRECT_TYPE_LEGACY, error);
++}
++
+ static __rte_always_inline void
+ flow_hw_action_finalize(struct rte_eth_dev *dev, uint32_t queue,
+ 			struct mlx5_hw_q_job *job,
+@@ -10045,15 +10292,17 @@ flow_hw_action_handle_create(struct rte_eth_dev *dev, uint32_t queue,
+ 	const struct rte_flow_action_age *age;
+ 	struct mlx5_aso_mtr *aso_mtr;
+ 	cnt_id_t cnt_id;
+-	uint32_t mtr_id;
+ 	uint32_t age_idx;
+ 	bool push = flow_hw_action_push(attr);
+ 	bool aso = false;
++	bool force_job = action->type == RTE_FLOW_ACTION_TYPE_METER_MARK;
+ 
+-	if (attr) {
++	if (!mlx5_hw_ctx_validate(dev, error))
++		return NULL;
++	if (attr || force_job) {
+ 		job = flow_hw_action_job_init(priv, queue, NULL, user_data,
+ 					      NULL, MLX5_HW_Q_JOB_TYPE_CREATE,
+-					      error);
++					      MLX5_HW_INDIRECT_TYPE_LEGACY, error);
+ 		if (!job)
+ 			return NULL;
+ 	}
+@@ -10105,9 +10354,7 @@ flow_hw_action_handle_create(struct rte_eth_dev *dev, uint32_t queue,
+ 		aso_mtr = flow_hw_meter_mark_alloc(dev, queue, action, job, push);
+ 		if (!aso_mtr)
+ 			break;
+-		mtr_id = (MLX5_INDIRECT_ACTION_TYPE_METER_MARK <<
+-			MLX5_INDIRECT_ACTION_TYPE_OFFSET) | (aso_mtr->fm.meter_id);
+-		handle = (struct rte_flow_action_handle *)(uintptr_t)mtr_id;
++		handle = (void *)(uintptr_t)job->action;
+ 		break;
+ 	case RTE_FLOW_ACTION_TYPE_RSS:
+ 		handle = flow_dv_action_create(dev, conf, action, error);
+@@ -10122,9 +10369,8 @@ flow_hw_action_handle_create(struct rte_eth_dev *dev, uint32_t queue,
+ 				   NULL, "action type not supported");
+ 		break;
+ 	}
+-	if (job) {
++	if (job && !force_job) {
+ 		job->action = handle;
+-		job->indirect_type = MLX5_HW_INDIRECT_TYPE_LEGACY;
+ 		flow_hw_action_finalize(dev, queue, job, push, aso,
+ 					handle != NULL);
+ 	}
+@@ -10155,15 +10401,17 @@ mlx5_flow_update_meter_mark(struct rte_eth_dev *dev, uint32_t queue,
+ 		fm->color_aware = meter_mark->color_mode;
+ 	if (upd_meter_mark->state_valid)
+ 		fm->is_enable = meter_mark->state;
++	aso_mtr->state = (queue == MLX5_HW_INV_QUEUE) ?
++			 ASO_METER_WAIT : ASO_METER_WAIT_ASYNC;
+ 	/* Update ASO flow meter by wqe. */
+-	if (mlx5_aso_meter_update_by_wqe(priv->sh, queue,
++	if (mlx5_aso_meter_update_by_wqe(priv, queue,
+ 					 aso_mtr, &priv->mtr_bulk, job, push))
+ 		return rte_flow_error_set(error, EINVAL,
+ 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ 					  NULL, "Unable to update ASO meter WQE");
+ 	/* Wait for ASO object completion. */
+ 	if (queue == MLX5_HW_INV_QUEUE &&
+-	    mlx5_aso_mtr_wait(priv->sh, MLX5_HW_INV_QUEUE, aso_mtr))
++	    mlx5_aso_mtr_wait(priv, aso_mtr, true))
+ 		return rte_flow_error_set(error, EINVAL,
+ 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ 					  NULL, "Unable to wait for ASO meter CQE");
+@@ -10209,11 +10457,12 @@ flow_hw_action_handle_update(struct rte_eth_dev *dev, uint32_t queue,
+ 	int ret = 0;
+ 	bool push = flow_hw_action_push(attr);
+ 	bool aso = false;
++	bool force_job = type == MLX5_INDIRECT_ACTION_TYPE_METER_MARK;
+ 
+-	if (attr) {
++	if (attr || force_job) {
+ 		job = flow_hw_action_job_init(priv, queue, handle, user_data,
+ 					      NULL, MLX5_HW_Q_JOB_TYPE_UPDATE,
+-					      error);
++					      MLX5_HW_INDIRECT_TYPE_LEGACY, error);
+ 		if (!job)
+ 			return -rte_errno;
+ 	}
+@@ -10247,7 +10496,7 @@ flow_hw_action_handle_update(struct rte_eth_dev *dev, uint32_t queue,
+ 					  "action type not supported");
+ 		break;
+ 	}
+-	if (job)
++	if (job && !force_job)
+ 		flow_hw_action_finalize(dev, queue, job, push, aso, ret == 0);
+ 	return ret;
+ }
+@@ -10290,11 +10539,12 @@ flow_hw_action_handle_destroy(struct rte_eth_dev *dev, uint32_t queue,
+ 	bool push = flow_hw_action_push(attr);
+ 	bool aso = false;
+ 	int ret = 0;
++	bool force_job = type == MLX5_INDIRECT_ACTION_TYPE_METER_MARK;
+ 
+-	if (attr) {
++	if (attr || force_job) {
+ 		job = flow_hw_action_job_init(priv, queue, handle, user_data,
+ 					      NULL, MLX5_HW_Q_JOB_TYPE_DESTROY,
+-					      error);
++					      MLX5_HW_INDIRECT_TYPE_LEGACY, error);
+ 		if (!job)
+ 			return -rte_errno;
+ 	}
+@@ -10327,7 +10577,7 @@ flow_hw_action_handle_destroy(struct rte_eth_dev *dev, uint32_t queue,
+ 		fm = &aso_mtr->fm;
+ 		fm->is_enable = 0;
+ 		/* Update ASO flow meter by wqe. */
+-		if (mlx5_aso_meter_update_by_wqe(priv->sh, queue, aso_mtr,
++		if (mlx5_aso_meter_update_by_wqe(priv, queue, aso_mtr,
+ 						 &priv->mtr_bulk, job, push)) {
+ 			ret = -EINVAL;
+ 			rte_flow_error_set(error, EINVAL,
+@@ -10337,17 +10587,14 @@ flow_hw_action_handle_destroy(struct rte_eth_dev *dev, uint32_t queue,
+ 		}
+ 		/* Wait for ASO object completion. */
+ 		if (queue == MLX5_HW_INV_QUEUE &&
+-		    mlx5_aso_mtr_wait(priv->sh, MLX5_HW_INV_QUEUE, aso_mtr)) {
++		    mlx5_aso_mtr_wait(priv, aso_mtr, true)) {
+ 			ret = -EINVAL;
+ 			rte_flow_error_set(error, EINVAL,
+ 				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ 				NULL, "Unable to wait for ASO meter CQE");
+ 			break;
+ 		}
+-		if (!job)
+-			mlx5_ipool_free(pool->idx_pool, idx);
+-		else
+-			aso = true;
++		aso = true;
+ 		break;
+ 	case MLX5_INDIRECT_ACTION_TYPE_RSS:
+ 		ret = flow_dv_action_destroy(dev, handle, error);
+@@ -10361,7 +10608,7 @@ flow_hw_action_handle_destroy(struct rte_eth_dev *dev, uint32_t queue,
+ 					  "action type not supported");
+ 		break;
+ 	}
+-	if (job)
++	if (job && !force_job)
+ 		flow_hw_action_finalize(dev, queue, job, push, aso, ret == 0);
+ 	return ret;
+ }
+@@ -10607,7 +10854,7 @@ flow_hw_action_handle_query(struct rte_eth_dev *dev, uint32_t queue,
+ 	if (attr) {
+ 		job = flow_hw_action_job_init(priv, queue, handle, user_data,
+ 					      data, MLX5_HW_Q_JOB_TYPE_QUERY,
+-					      error);
++					      MLX5_HW_INDIRECT_TYPE_LEGACY, error);
+ 		if (!job)
+ 			return -rte_errno;
+ 	}
+@@ -10661,7 +10908,7 @@ flow_hw_async_action_handle_query_update
+ 		job = flow_hw_action_job_init(priv, queue, handle, user_data,
+ 					      query,
+ 					      MLX5_HW_Q_JOB_TYPE_UPDATE_QUERY,
+-					      error);
++					      MLX5_HW_INDIRECT_TYPE_LEGACY, error);
+ 		if (!job)
+ 			return -rte_errno;
+ 	}
+@@ -10742,6 +10989,10 @@ flow_hw_get_q_aged_flows(struct rte_eth_dev *dev, uint32_t queue_id,
+ 		return rte_flow_error_set(error, EINVAL,
+ 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ 					  NULL, "empty context");
++	if (!priv->hws_age_req)
++		return rte_flow_error_set(error, ENOENT,
++					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
++					  NULL, "No aging initialized");
+ 	if (priv->hws_strict_queue) {
+ 		if (queue_id >= age_info->hw_q_age->nb_rings)
+ 			return rte_flow_error_set(error, EINVAL,
+@@ -11319,6 +11570,8 @@ flow_hw_async_action_list_handle_create(struct rte_eth_dev *dev, uint32_t queue,
+ 		}
+ 	};
+ 
++	if (!mlx5_hw_ctx_validate(dev, error))
++		return NULL;
+ 	if (!actions) {
+ 		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+ 				   NULL, "No action list");
+@@ -11337,7 +11590,7 @@ flow_hw_async_action_list_handle_create(struct rte_eth_dev *dev, uint32_t queue,
+ 	if (attr) {
+ 		job = flow_hw_action_job_init(priv, queue, NULL, user_data,
+ 					      NULL, MLX5_HW_Q_JOB_TYPE_CREATE,
+-					      error);
++					      MLX5_HW_INDIRECT_TYPE_LIST, error);
+ 		if (!job)
+ 			return NULL;
+ 	}
+@@ -11357,7 +11610,6 @@ flow_hw_async_action_list_handle_create(struct rte_eth_dev *dev, uint32_t queue,
+ 	}
+ 	if (job) {
+ 		job->action = handle;
+-		job->indirect_type = MLX5_HW_INDIRECT_TYPE_LIST;
+ 		flow_hw_action_finalize(dev, queue, job, push, false,
+ 					handle != NULL);
+ 	}
+@@ -11402,7 +11654,7 @@ flow_hw_async_action_list_handle_destroy
+ 	if (attr) {
+ 		job = flow_hw_action_job_init(priv, queue, NULL, user_data,
+ 					      NULL, MLX5_HW_Q_JOB_TYPE_DESTROY,
+-					      error);
++					      MLX5_HW_INDIRECT_TYPE_LIST, error);
+ 		if (!job)
+ 			return rte_errno;
+ 	}
+@@ -11881,8 +12133,9 @@ mlx5_flow_hw_esw_create_sq_miss_flow(struct rte_eth_dev *dev, uint32_t sqn, bool
+ 			       proxy_port_id, port_id);
+ 		return 0;
+ 	}
+-	if (!proxy_priv->hw_esw_sq_miss_root_tbl ||
+-	    !proxy_priv->hw_esw_sq_miss_tbl) {
++	if (!proxy_priv->hw_ctrl_fdb ||
++	    !proxy_priv->hw_ctrl_fdb->hw_esw_sq_miss_root_tbl ||
++	    !proxy_priv->hw_ctrl_fdb->hw_esw_sq_miss_tbl) {
+ 		DRV_LOG(ERR, "Transfer proxy port (port %u) of port %u was configured, but "
+ 			     "default flow tables were not created.",
+ 			     proxy_port_id, port_id);
+@@ -11914,7 +12167,8 @@ mlx5_flow_hw_esw_create_sq_miss_flow(struct rte_eth_dev *dev, uint32_t sqn, bool
+ 	actions[2] = (struct rte_flow_action) {
+ 		.type = RTE_FLOW_ACTION_TYPE_END,
+ 	};
+-	ret = flow_hw_create_ctrl_flow(dev, proxy_dev, proxy_priv->hw_esw_sq_miss_root_tbl,
++	ret = flow_hw_create_ctrl_flow(dev, proxy_dev,
++				       proxy_priv->hw_ctrl_fdb->hw_esw_sq_miss_root_tbl,
+ 				       items, 0, actions, 0, &flow_info, external);
+ 	if (ret) {
+ 		DRV_LOG(ERR, "Port %u failed to create root SQ miss flow rule for SQ %u, ret %d",
+@@ -11945,7 +12199,8 @@ mlx5_flow_hw_esw_create_sq_miss_flow(struct rte_eth_dev *dev, uint32_t sqn, bool
+ 		.type = RTE_FLOW_ACTION_TYPE_END,
+ 	};
+ 	flow_info.type = MLX5_HW_CTRL_FLOW_TYPE_SQ_MISS;
+-	ret = flow_hw_create_ctrl_flow(dev, proxy_dev, proxy_priv->hw_esw_sq_miss_tbl,
++	ret = flow_hw_create_ctrl_flow(dev, proxy_dev,
++				       proxy_priv->hw_ctrl_fdb->hw_esw_sq_miss_tbl,
+ 				       items, 0, actions, 0, &flow_info, external);
+ 	if (ret) {
+ 		DRV_LOG(ERR, "Port %u failed to create HWS SQ miss flow rule for SQ %u, ret %d",
+@@ -11989,10 +12244,13 @@ mlx5_flow_hw_esw_destroy_sq_miss_flow(struct rte_eth_dev *dev, uint32_t sqn)
+ 	}
+ 	proxy_dev = &rte_eth_devices[proxy_port_id];
+ 	proxy_priv = proxy_dev->data->dev_private;
++	/* FDB default flow rules must be enabled. */
++	MLX5_ASSERT(proxy_priv->sh->config.fdb_def_rule);
+ 	if (!proxy_priv->dr_ctx)
+ 		return 0;
+-	if (!proxy_priv->hw_esw_sq_miss_root_tbl ||
+-	    !proxy_priv->hw_esw_sq_miss_tbl)
++	if (!proxy_priv->hw_ctrl_fdb ||
++	    !proxy_priv->hw_ctrl_fdb->hw_esw_sq_miss_root_tbl ||
++	    !proxy_priv->hw_ctrl_fdb->hw_esw_sq_miss_tbl)
+ 		return 0;
+ 	cf = LIST_FIRST(&proxy_priv->hw_ctrl_flows);
+ 	while (cf != NULL) {
+@@ -12052,6 +12310,8 @@ mlx5_flow_hw_esw_create_default_jump_flow(struct rte_eth_dev *dev)
+ 	}
+ 	proxy_dev = &rte_eth_devices[proxy_port_id];
+ 	proxy_priv = proxy_dev->data->dev_private;
++	/* FDB default flow rules must be enabled. */
++	MLX5_ASSERT(proxy_priv->sh->config.fdb_def_rule);
+ 	if (!proxy_priv->dr_ctx) {
+ 		DRV_LOG(DEBUG, "Transfer proxy port (port %u) of port %u must be configured "
+ 			       "for HWS to create default FDB jump rule. Default rule will "
+@@ -12059,7 +12319,7 @@ mlx5_flow_hw_esw_create_default_jump_flow(struct rte_eth_dev *dev)
+ 			       proxy_port_id, port_id);
+ 		return 0;
+ 	}
+-	if (!proxy_priv->hw_esw_zero_tbl) {
++	if (!proxy_priv->hw_ctrl_fdb || !proxy_priv->hw_ctrl_fdb->hw_esw_zero_tbl) {
+ 		DRV_LOG(ERR, "Transfer proxy port (port %u) of port %u was configured, but "
+ 			     "default flow tables were not created.",
+ 			     proxy_port_id, port_id);
+@@ -12067,7 +12327,7 @@ mlx5_flow_hw_esw_create_default_jump_flow(struct rte_eth_dev *dev)
+ 		return -rte_errno;
+ 	}
+ 	return flow_hw_create_ctrl_flow(dev, proxy_dev,
+-					proxy_priv->hw_esw_zero_tbl,
++					proxy_priv->hw_ctrl_fdb->hw_esw_zero_tbl,
+ 					items, 0, actions, 0, &flow_info, false);
+ }
+ 
+@@ -12119,10 +12379,12 @@ mlx5_flow_hw_create_tx_default_mreg_copy_flow(struct rte_eth_dev *dev)
+ 	};
+ 
+ 	MLX5_ASSERT(priv->master);
+-	if (!priv->dr_ctx || !priv->hw_tx_meta_cpy_tbl)
++	if (!priv->dr_ctx ||
++	    !priv->hw_ctrl_fdb ||
++	    !priv->hw_ctrl_fdb->hw_tx_meta_cpy_tbl)
+ 		return 0;
+ 	return flow_hw_create_ctrl_flow(dev, dev,
+-					priv->hw_tx_meta_cpy_tbl,
++					priv->hw_ctrl_fdb->hw_tx_meta_cpy_tbl,
+ 					eth_all, 0, copy_reg_action, 0, &flow_info, false);
+ }
+ 
+@@ -12214,11 +12476,11 @@ mlx5_flow_hw_lacp_rx_flow(struct rte_eth_dev *dev)
+ 		.type = MLX5_HW_CTRL_FLOW_TYPE_LACP_RX,
+ 	};
+ 
+-	MLX5_ASSERT(priv->master);
+-	if (!priv->dr_ctx || !priv->hw_lacp_rx_tbl)
++	if (!priv->dr_ctx || !priv->hw_ctrl_fdb || !priv->hw_ctrl_fdb->hw_lacp_rx_tbl)
+ 		return 0;
+-	return flow_hw_create_ctrl_flow(dev, dev, priv->hw_lacp_rx_tbl, eth_lacp, 0,
+-					miss_action, 0, &flow_info, false);
++	return flow_hw_create_ctrl_flow(dev, dev,
++					priv->hw_ctrl_fdb->hw_lacp_rx_tbl,
++					eth_lacp, 0, miss_action, 0, &flow_info, false);
+ }
+ 
+ static uint32_t
+diff --git a/dpdk/drivers/net/mlx5/mlx5_flow_meter.c b/dpdk/drivers/net/mlx5/mlx5_flow_meter.c
+index 7cbf772ea4..7bf5018c70 100644
+--- a/dpdk/drivers/net/mlx5/mlx5_flow_meter.c
++++ b/dpdk/drivers/net/mlx5/mlx5_flow_meter.c
+@@ -618,6 +618,7 @@ mlx5_flow_meter_profile_get(struct rte_eth_dev *dev,
+ 							meter_profile_id);
+ }
+ 
++#if defined(HAVE_MLX5_HWS_SUPPORT)
+ /**
+  * Callback to add MTR profile with HWS.
+  *
+@@ -697,6 +698,7 @@ mlx5_flow_meter_profile_hws_delete(struct rte_eth_dev *dev,
+ 	memset(fmp, 0, sizeof(struct mlx5_flow_meter_profile));
+ 	return 0;
+ }
++#endif
+ 
+ /**
+  * Find policy by id.
+@@ -839,6 +841,7 @@ mlx5_flow_meter_policy_validate(struct rte_eth_dev *dev,
+ 	return 0;
+ }
+ 
++#if defined(HAVE_MLX5_HWS_SUPPORT)
+ /**
+  * Callback to check MTR policy action validate for HWS
+  *
+@@ -875,6 +878,7 @@ mlx5_flow_meter_policy_hws_validate(struct rte_eth_dev *dev,
+ 	}
+ 	return 0;
+ }
++#endif
+ 
+ static int
+ __mlx5_flow_meter_policy_delete(struct rte_eth_dev *dev,
+@@ -1201,6 +1205,7 @@ mlx5_flow_meter_policy_get(struct rte_eth_dev *dev,
+ 							      &policy_idx);
+ }
+ 
++#if defined(HAVE_MLX5_HWS_SUPPORT)
+ /**
+  * Callback to delete MTR policy for HWS.
+  *
+@@ -1523,7 +1528,7 @@ policy_add_err:
+ 				  RTE_MTR_ERROR_TYPE_UNSPECIFIED,
+ 				  NULL, "Failed to create meter policy.");
+ }
+-
++#endif
+ /**
+  * Check meter validation.
+  *
+@@ -1608,12 +1613,12 @@ mlx5_flow_meter_action_modify(struct mlx5_priv *priv,
+ 	if (sh->meter_aso_en) {
+ 		fm->is_enable = !!is_enable;
+ 		aso_mtr = container_of(fm, struct mlx5_aso_mtr, fm);
+-		ret = mlx5_aso_meter_update_by_wqe(sh, MLX5_HW_INV_QUEUE,
++		ret = mlx5_aso_meter_update_by_wqe(priv, MLX5_HW_INV_QUEUE,
+ 						   aso_mtr, &priv->mtr_bulk,
+ 						   NULL, true);
+ 		if (ret)
+ 			return ret;
+-		ret = mlx5_aso_mtr_wait(sh, MLX5_HW_INV_QUEUE, aso_mtr);
++		ret = mlx5_aso_mtr_wait(priv, aso_mtr, false);
+ 		if (ret)
+ 			return ret;
+ 	} else {
+@@ -1859,7 +1864,7 @@ mlx5_flow_meter_create(struct rte_eth_dev *dev, uint32_t meter_id,
+ 	/* If ASO meter supported, update ASO flow meter by wqe. */
+ 	if (priv->sh->meter_aso_en) {
+ 		aso_mtr = container_of(fm, struct mlx5_aso_mtr, fm);
+-		ret = mlx5_aso_meter_update_by_wqe(priv->sh, MLX5_HW_INV_QUEUE,
++		ret = mlx5_aso_meter_update_by_wqe(priv, MLX5_HW_INV_QUEUE,
+ 						   aso_mtr, &priv->mtr_bulk, NULL, true);
+ 		if (ret)
+ 			goto error;
+@@ -1893,6 +1898,7 @@ error:
+ 		NULL, "Failed to create devx meter.");
+ }
+ 
++#if defined(HAVE_MLX5_HWS_SUPPORT)
+ /**
+  * Create meter rules.
+  *
+@@ -1920,6 +1926,7 @@ mlx5_flow_meter_hws_create(struct rte_eth_dev *dev, uint32_t meter_id,
+ 	struct mlx5_flow_meter_info *fm;
+ 	struct mlx5_flow_meter_policy *policy = NULL;
+ 	struct mlx5_aso_mtr *aso_mtr;
++	struct mlx5_hw_q_job *job;
+ 	int ret;
+ 
+ 	if (!priv->mtr_profile_arr ||
+@@ -1965,17 +1972,26 @@ mlx5_flow_meter_hws_create(struct rte_eth_dev *dev, uint32_t meter_id,
+ 	fm->shared = !!shared;
+ 	fm->initialized = 1;
+ 	/* Update ASO flow meter by wqe. */
+-	ret = mlx5_aso_meter_update_by_wqe(priv->sh, MLX5_HW_INV_QUEUE, aso_mtr,
+-					   &priv->mtr_bulk, NULL, true);
+-	if (ret)
++	job = mlx5_flow_action_job_init(priv, MLX5_HW_INV_QUEUE, NULL, NULL,
++					NULL, MLX5_HW_Q_JOB_TYPE_CREATE, NULL);
++	if (!job)
++		return -rte_mtr_error_set(error, ENOMEM,
++					  RTE_MTR_ERROR_TYPE_MTR_ID,
++					  NULL, "No job context.");
++	ret = mlx5_aso_meter_update_by_wqe(priv, MLX5_HW_INV_QUEUE, aso_mtr,
++					   &priv->mtr_bulk, job, true);
++	if (ret) {
++		flow_hw_job_put(priv, job, CTRL_QUEUE_ID(priv));
+ 		return -rte_mtr_error_set(error, ENOTSUP,
+-			RTE_MTR_ERROR_TYPE_UNSPECIFIED,
+-			NULL, "Failed to create devx meter.");
++					  RTE_MTR_ERROR_TYPE_UNSPECIFIED,
++					  NULL, "Failed to create devx meter.");
++	}
+ 	fm->active_state = params->meter_enable;
+ 	__atomic_fetch_add(&fm->profile->ref_cnt, 1, __ATOMIC_RELAXED);
+ 	__atomic_fetch_add(&policy->ref_cnt, 1, __ATOMIC_RELAXED);
+ 	return 0;
+ }
++#endif
+ 
+ static int
+ mlx5_flow_meter_params_flush(struct rte_eth_dev *dev,
+@@ -2460,6 +2476,7 @@ static const struct rte_mtr_ops mlx5_flow_mtr_ops = {
+ 	.stats_read = mlx5_flow_meter_stats_read,
+ };
+ 
++#if defined(HAVE_MLX5_HWS_SUPPORT)
+ static const struct rte_mtr_ops mlx5_flow_mtr_hws_ops = {
+ 	.capabilities_get = mlx5_flow_mtr_cap_get,
+ 	.meter_profile_add = mlx5_flow_meter_profile_hws_add,
+@@ -2478,6 +2495,7 @@ static const struct rte_mtr_ops mlx5_flow_mtr_hws_ops = {
+ 	.stats_update = NULL,
+ 	.stats_read = NULL,
+ };
++#endif
+ 
+ /**
+  * Get meter operations.
+@@ -2493,12 +2511,16 @@ static const struct rte_mtr_ops mlx5_flow_mtr_hws_ops = {
+ int
+ mlx5_flow_meter_ops_get(struct rte_eth_dev *dev __rte_unused, void *arg)
+ {
++#if defined(HAVE_MLX5_HWS_SUPPORT)
+ 	struct mlx5_priv *priv = dev->data->dev_private;
+ 
+ 	if (priv->sh->config.dv_flow_en == 2)
+ 		*(const struct rte_mtr_ops **)arg = &mlx5_flow_mtr_hws_ops;
+ 	else
+ 		*(const struct rte_mtr_ops **)arg = &mlx5_flow_mtr_ops;
++#else
++	*(const struct rte_mtr_ops **)arg = &mlx5_flow_mtr_ops;
++#endif
+ 	return 0;
+ }
+ 
+@@ -2614,7 +2636,7 @@ mlx5_flow_meter_attach(struct mlx5_priv *priv,
+ 		struct mlx5_aso_mtr *aso_mtr;
+ 
+ 		aso_mtr = container_of(fm, struct mlx5_aso_mtr, fm);
+-		if (mlx5_aso_mtr_wait(priv->sh, MLX5_HW_INV_QUEUE, aso_mtr)) {
++		if (mlx5_aso_mtr_wait(priv, aso_mtr, false)) {
+ 			return rte_flow_error_set(error, ENOENT,
+ 					RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ 					NULL,
+@@ -2877,7 +2899,6 @@ mlx5_flow_meter_flush(struct rte_eth_dev *dev, struct rte_mtr_error *error)
+ 	struct mlx5_flow_meter_profile *fmp;
+ 	struct mlx5_legacy_flow_meter *legacy_fm;
+ 	struct mlx5_flow_meter_info *fm;
+-	struct mlx5_flow_meter_policy *policy;
+ 	struct mlx5_flow_meter_sub_policy *sub_policy;
+ 	void *tmp;
+ 	uint32_t i, mtr_idx, policy_idx;
+@@ -2945,15 +2966,20 @@ mlx5_flow_meter_flush(struct rte_eth_dev *dev, struct rte_mtr_error *error)
+ 		mlx5_l3t_destroy(priv->policy_idx_tbl);
+ 		priv->policy_idx_tbl = NULL;
+ 	}
++#if defined(HAVE_MLX5_HWS_SUPPORT)
+ 	if (priv->mtr_policy_arr) {
++		struct mlx5_flow_meter_policy *policy;
++
+ 		for (i = 0; i < priv->mtr_config.nb_meter_policies; i++) {
+ 			policy = mlx5_flow_meter_policy_find(dev, i,
+ 							     &policy_idx);
+-			if (policy->initialized)
++			if (policy->initialized) {
+ 				mlx5_flow_meter_policy_hws_delete(dev, i,
+ 								  error);
++			}
+ 		}
+ 	}
++#endif
+ 	if (priv->mtr_profile_tbl) {
+ 		MLX5_L3T_FOREACH(priv->mtr_profile_tbl, i, entry) {
+ 			fmp = entry;
+@@ -2967,14 +2993,17 @@ mlx5_flow_meter_flush(struct rte_eth_dev *dev, struct rte_mtr_error *error)
+ 		mlx5_l3t_destroy(priv->mtr_profile_tbl);
+ 		priv->mtr_profile_tbl = NULL;
+ 	}
++#if defined(HAVE_MLX5_HWS_SUPPORT)
+ 	if (priv->mtr_profile_arr) {
+ 		for (i = 0; i < priv->mtr_config.nb_meter_profiles; i++) {
+ 			fmp = mlx5_flow_meter_profile_find(priv, i);
+-			if (fmp->initialized)
++			if (fmp->initialized) {
+ 				mlx5_flow_meter_profile_hws_delete(dev, i,
+ 								   error);
++			}
+ 		}
+ 	}
++#endif
+ 	/* Delete default policy table. */
+ 	mlx5_flow_destroy_def_policy(dev);
+ 	if (priv->sh->refcnt == 1)
+diff --git a/dpdk/drivers/net/mlx5/mlx5_hws_cnt.c b/dpdk/drivers/net/mlx5/mlx5_hws_cnt.c
+index a3bea94811..41edd19bb8 100644
+--- a/dpdk/drivers/net/mlx5/mlx5_hws_cnt.c
++++ b/dpdk/drivers/net/mlx5/mlx5_hws_cnt.c
+@@ -340,6 +340,55 @@ mlx5_hws_cnt_pool_deinit(struct mlx5_hws_cnt_pool * const cntp)
+ 	mlx5_free(cntp);
+ }
+ 
++static bool
++mlx5_hws_cnt_should_enable_cache(const struct mlx5_hws_cnt_pool_cfg *pcfg,
++				 const struct mlx5_hws_cache_param *ccfg)
++{
++	/*
++	 * Enable cache if and only if there are enough counters requested
++	 * to populate all of the caches.
++	 */
++	return pcfg->request_num >= ccfg->q_num * ccfg->size;
++}
++
++static struct mlx5_hws_cnt_pool_caches *
++mlx5_hws_cnt_cache_init(const struct mlx5_hws_cnt_pool_cfg *pcfg,
++			const struct mlx5_hws_cache_param *ccfg)
++{
++	struct mlx5_hws_cnt_pool_caches *cache;
++	char mz_name[RTE_MEMZONE_NAMESIZE];
++	uint32_t qidx;
++
++	/* If counter pool is big enough, setup the counter pool cache. */
++	cache = mlx5_malloc(MLX5_MEM_ANY | MLX5_MEM_ZERO,
++			sizeof(*cache) +
++			sizeof(((struct mlx5_hws_cnt_pool_caches *)0)->qcache[0])
++				* ccfg->q_num, 0, SOCKET_ID_ANY);
++	if (cache == NULL)
++		return NULL;
++	/* Store the necessary cache parameters. */
++	cache->fetch_sz = ccfg->fetch_sz;
++	cache->preload_sz = ccfg->preload_sz;
++	cache->threshold = ccfg->threshold;
++	cache->q_num = ccfg->q_num;
++	for (qidx = 0; qidx < ccfg->q_num; qidx++) {
++		snprintf(mz_name, sizeof(mz_name), "%s_qc/%x", pcfg->name, qidx);
++		cache->qcache[qidx] = rte_ring_create(mz_name, ccfg->size,
++				SOCKET_ID_ANY,
++				RING_F_SP_ENQ | RING_F_SC_DEQ |
++				RING_F_EXACT_SZ);
++		if (cache->qcache[qidx] == NULL)
++			goto error;
++	}
++	return cache;
++
++error:
++	while (qidx--)
++		rte_ring_free(cache->qcache[qidx]);
++	mlx5_free(cache);
++	return NULL;
++}
++
+ static struct mlx5_hws_cnt_pool *
+ mlx5_hws_cnt_pool_init(struct mlx5_dev_ctx_shared *sh,
+ 		       const struct mlx5_hws_cnt_pool_cfg *pcfg,
+@@ -348,7 +397,6 @@ mlx5_hws_cnt_pool_init(struct mlx5_dev_ctx_shared *sh,
+ 	char mz_name[RTE_MEMZONE_NAMESIZE];
+ 	struct mlx5_hws_cnt_pool *cntp;
+ 	uint64_t cnt_num = 0;
+-	uint32_t qidx;
+ 
+ 	MLX5_ASSERT(pcfg);
+ 	MLX5_ASSERT(ccfg);
+@@ -360,17 +408,6 @@ mlx5_hws_cnt_pool_init(struct mlx5_dev_ctx_shared *sh,
+ 	cntp->cfg = *pcfg;
+ 	if (cntp->cfg.host_cpool)
+ 		return cntp;
+-	cntp->cache = mlx5_malloc(MLX5_MEM_ANY | MLX5_MEM_ZERO,
+-			sizeof(*cntp->cache) +
+-			sizeof(((struct mlx5_hws_cnt_pool_caches *)0)->qcache[0])
+-				* ccfg->q_num, 0, SOCKET_ID_ANY);
+-	if (cntp->cache == NULL)
+-		goto error;
+-	 /* store the necessary cache parameters. */
+-	cntp->cache->fetch_sz = ccfg->fetch_sz;
+-	cntp->cache->preload_sz = ccfg->preload_sz;
+-	cntp->cache->threshold = ccfg->threshold;
+-	cntp->cache->q_num = ccfg->q_num;
+ 	if (pcfg->request_num > sh->hws_max_nb_counters) {
+ 		DRV_LOG(ERR, "Counter number %u "
+ 			"is greater than the maximum supported (%u).",
+@@ -418,13 +455,10 @@ mlx5_hws_cnt_pool_init(struct mlx5_dev_ctx_shared *sh,
+ 		DRV_LOG(ERR, "failed to create reuse list ring");
+ 		goto error;
+ 	}
+-	for (qidx = 0; qidx < ccfg->q_num; qidx++) {
+-		snprintf(mz_name, sizeof(mz_name), "%s_qc/%x", pcfg->name, qidx);
+-		cntp->cache->qcache[qidx] = rte_ring_create(mz_name, ccfg->size,
+-				SOCKET_ID_ANY,
+-				RING_F_SP_ENQ | RING_F_SC_DEQ |
+-				RING_F_EXACT_SZ);
+-		if (cntp->cache->qcache[qidx] == NULL)
++	/* Allocate counter cache only if needed. */
++	if (mlx5_hws_cnt_should_enable_cache(pcfg, ccfg)) {
++		cntp->cache = mlx5_hws_cnt_cache_init(pcfg, ccfg);
++		if (cntp->cache == NULL)
+ 			goto error;
+ 	}
+ 	/* Initialize the time for aging-out calculation. */
+@@ -685,7 +719,9 @@ mlx5_hws_cnt_pool_destroy(struct mlx5_dev_ctx_shared *sh,
+ 	 * Maybe blocked for at most 200ms here.
+ 	 */
+ 	rte_spinlock_lock(&sh->cpool_lock);
+-	LIST_REMOVE(cpool, next);
++	/* Try to remove cpool before it was added to list caused segfault. */
++	if (!LIST_EMPTY(&sh->hws_cpool_list) && cpool->next.le_prev)
++		LIST_REMOVE(cpool, next);
+ 	rte_spinlock_unlock(&sh->cpool_lock);
+ 	if (cpool->cfg.host_cpool == NULL) {
+ 		if (--sh->cnt_svc->refcnt == 0)
+diff --git a/dpdk/drivers/net/mlx5/mlx5_hws_cnt.h b/dpdk/drivers/net/mlx5/mlx5_hws_cnt.h
+index 585b5a83ad..e00596088f 100644
+--- a/dpdk/drivers/net/mlx5/mlx5_hws_cnt.h
++++ b/dpdk/drivers/net/mlx5/mlx5_hws_cnt.h
+@@ -557,19 +557,32 @@ mlx5_hws_cnt_pool_get(struct mlx5_hws_cnt_pool *cpool, uint32_t *queue,
+ }
+ 
+ /**
+- * Check if counter pool allocated for HWS is shared between ports.
++ * Decide if the given queue can be used to perform counter allocation/deallcation
++ * based on counter configuration
+  *
+  * @param[in] priv
+  *   Pointer to the port private data structure.
++ * @param[in] queue
++ *   Pointer to the queue index.
+  *
+  * @return
+- *   True if counter pools is shared between ports. False otherwise.
++ *   @p queue if cache related to the queue can be used. NULL otherwise.
+  */
+-static __rte_always_inline bool
+-mlx5_hws_cnt_is_pool_shared(struct mlx5_priv *priv)
++static __rte_always_inline uint32_t *
++mlx5_hws_cnt_get_queue(struct mlx5_priv *priv, uint32_t *queue)
+ {
+-	return priv && priv->hws_cpool &&
+-	    (priv->shared_refcnt || priv->hws_cpool->cfg.host_cpool != NULL);
++	if (priv && priv->hws_cpool) {
++		/* Do not use queue cache if counter pool is shared. */
++		if (priv->shared_refcnt || priv->hws_cpool->cfg.host_cpool != NULL)
++			return NULL;
++		/* Do not use queue cache if counter cache is disabled. */
++		if (priv->hws_cpool->cache == NULL)
++			return NULL;
++		return queue;
++	}
++	/* This case should not be reached if counter pool was successfully configured. */
++	MLX5_ASSERT(false);
++	return NULL;
+ }
+ 
+ static __rte_always_inline unsigned int
+diff --git a/dpdk/drivers/net/mlx5/mlx5_rx.c b/dpdk/drivers/net/mlx5/mlx5_rx.c
+index 5bf1a679b2..cc087348a4 100644
+--- a/dpdk/drivers/net/mlx5/mlx5_rx.c
++++ b/dpdk/drivers/net/mlx5/mlx5_rx.c
+@@ -613,7 +613,8 @@ mlx5_rx_err_handle(struct mlx5_rxq_data *rxq, uint8_t vec,
+  * @param mprq
+  *   Indication if it is called from MPRQ.
+  * @return
+- *   0 in case of empty CQE, MLX5_REGULAR_ERROR_CQE_RET in case of error CQE,
++ *   0 in case of empty CQE,
++ *   MLX5_REGULAR_ERROR_CQE_RET in case of error CQE,
+  *   MLX5_CRITICAL_ERROR_CQE_RET in case of error CQE lead to Rx queue reset,
+  *   otherwise the packet size in regular RxQ,
+  *   and striding byte count format in mprq case.
+@@ -697,6 +698,11 @@ mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe,
+ 					if (ret == MLX5_RECOVERY_ERROR_RET ||
+ 						ret == MLX5_RECOVERY_COMPLETED_RET)
+ 						return MLX5_CRITICAL_ERROR_CQE_RET;
++					if (!mprq && ret == MLX5_RECOVERY_IGNORE_RET) {
++						*skip_cnt = 1;
++						++rxq->cq_ci;
++						return MLX5_ERROR_CQE_MASK;
++					}
+ 				} else {
+ 					return 0;
+ 				}
+@@ -971,19 +977,18 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
+ 			cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_mask];
+ 			len = mlx5_rx_poll_len(rxq, cqe, cqe_n, cqe_mask, &mcqe, &skip_cnt, false);
+ 			if (unlikely(len & MLX5_ERROR_CQE_MASK)) {
++				/* We drop packets with non-critical errors */
++				rte_mbuf_raw_free(rep);
+ 				if (len == MLX5_CRITICAL_ERROR_CQE_RET) {
+-					rte_mbuf_raw_free(rep);
+ 					rq_ci = rxq->rq_ci << sges_n;
+ 					break;
+ 				}
++				/* Skip specified amount of error CQEs packets */
+ 				rq_ci >>= sges_n;
+ 				rq_ci += skip_cnt;
+ 				rq_ci <<= sges_n;
+-				idx = rq_ci & wqe_mask;
+-				wqe = &((volatile struct mlx5_wqe_data_seg *)rxq->wqes)[idx];
+-				seg = (*rxq->elts)[idx];
+-				cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_mask];
+-				len = len & ~MLX5_ERROR_CQE_MASK;
++				MLX5_ASSERT(!pkt);
++				continue;
+ 			}
+ 			if (len == 0) {
+ 				rte_mbuf_raw_free(rep);
+diff --git a/dpdk/drivers/net/mlx5/mlx5_rx.h b/dpdk/drivers/net/mlx5/mlx5_rx.h
+index 2fce908499..d0ceae72ea 100644
+--- a/dpdk/drivers/net/mlx5/mlx5_rx.h
++++ b/dpdk/drivers/net/mlx5/mlx5_rx.h
+@@ -101,14 +101,14 @@ struct mlx5_rxq_data {
+ 	unsigned int shared:1; /* Shared RXQ. */
+ 	unsigned int delay_drop:1; /* Enable delay drop. */
+ 	unsigned int cqe_comp_layout:1; /* CQE Compression Layout*/
+-	unsigned int cq_ci:24;
++	uint16_t port_id;
+ 	volatile uint32_t *rq_db;
+ 	volatile uint32_t *cq_db;
+-	uint16_t port_id;
+ 	uint32_t elts_ci;
+ 	uint32_t rq_ci;
+ 	uint16_t consumed_strd; /* Number of consumed strides in WQE. */
+ 	uint32_t rq_pi;
++	uint32_t cq_ci:24;
+ 	uint16_t rq_repl_thresh; /* Threshold for buffer replenishment. */
+ 	uint32_t byte_mask;
+ 	union {
+diff --git a/dpdk/drivers/net/mlx5/mlx5_stats.c b/dpdk/drivers/net/mlx5/mlx5_stats.c
+index 615e1d073d..f4ac58e2f9 100644
+--- a/dpdk/drivers/net/mlx5/mlx5_stats.c
++++ b/dpdk/drivers/net/mlx5/mlx5_stats.c
+@@ -39,24 +39,36 @@ mlx5_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *stats,
+ 		unsigned int n)
+ {
+ 	struct mlx5_priv *priv = dev->data->dev_private;
+-	unsigned int i;
+-	uint64_t counters[n];
++	uint64_t counters[MLX5_MAX_XSTATS];
+ 	struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl;
++	unsigned int i;
++	uint16_t stats_n = 0;
++	uint16_t stats_n_2nd = 0;
+ 	uint16_t mlx5_stats_n = xstats_ctrl->mlx5_stats_n;
++	bool bond_master = (priv->master && priv->pf_bond >= 0);
+ 
+ 	if (n >= mlx5_stats_n && stats) {
+-		int stats_n;
+ 		int ret;
+ 
+-		stats_n = mlx5_os_get_stats_n(dev);
+-		if (stats_n < 0)
+-			return stats_n;
+-		if (xstats_ctrl->stats_n != stats_n)
++		ret = mlx5_os_get_stats_n(dev, bond_master, &stats_n, &stats_n_2nd);
++		if (ret < 0)
++			return ret;
++		/*
++		 * The number of statistics fetched via "ETH_SS_STATS" may vary because
++		 * of the port configuration each time. This is also true between 2
++		 * ports. There might be a case that the numbers are the same even if
++		 * configurations are different.
++		 * It is not recommended to change the configuration without using
++		 * RTE API. The port(traffic) restart may trigger another initialization
++		 * to make sure the map are correct.
++		 */
++		if (xstats_ctrl->stats_n != stats_n ||
++		    (bond_master && xstats_ctrl->stats_n_2nd != stats_n_2nd))
+ 			mlx5_os_stats_init(dev);
+-		ret = mlx5_os_read_dev_counters(dev, counters);
+-		if (ret)
++		ret = mlx5_os_read_dev_counters(dev, bond_master, counters);
++		if (ret < 0)
+ 			return ret;
+-		for (i = 0; i != mlx5_stats_n; ++i) {
++		for (i = 0; i != mlx5_stats_n; i++) {
+ 			stats[i].id = i;
+ 			if (xstats_ctrl->info[i].dev) {
+ 				uint64_t wrap_n;
+@@ -225,30 +237,32 @@ mlx5_xstats_reset(struct rte_eth_dev *dev)
+ {
+ 	struct mlx5_priv *priv = dev->data->dev_private;
+ 	struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl;
+-	int stats_n;
+ 	unsigned int i;
+ 	uint64_t *counters;
+ 	int ret;
++	uint16_t stats_n = 0;
++	uint16_t stats_n_2nd = 0;
++	bool bond_master = (priv->master && priv->pf_bond >= 0);
+ 
+-	stats_n = mlx5_os_get_stats_n(dev);
+-	if (stats_n < 0) {
++	ret = mlx5_os_get_stats_n(dev, bond_master, &stats_n, &stats_n_2nd);
++	if (ret < 0) {
+ 		DRV_LOG(ERR, "port %u cannot get stats: %s", dev->data->port_id,
+-			strerror(-stats_n));
+-		return stats_n;
++			strerror(-ret));
++		return ret;
+ 	}
+-	if (xstats_ctrl->stats_n != stats_n)
++	if (xstats_ctrl->stats_n != stats_n ||
++	    (bond_master && xstats_ctrl->stats_n_2nd != stats_n_2nd))
+ 		mlx5_os_stats_init(dev);
+-	counters =  mlx5_malloc(MLX5_MEM_SYS, sizeof(*counters) *
+-			xstats_ctrl->mlx5_stats_n, 0,
+-			SOCKET_ID_ANY);
++	/* Considering to use stack directly. */
++	counters = mlx5_malloc(MLX5_MEM_SYS, sizeof(*counters) * xstats_ctrl->mlx5_stats_n,
++			       0, SOCKET_ID_ANY);
+ 	if (!counters) {
+-		DRV_LOG(WARNING, "port %u unable to allocate memory for xstats "
+-				"counters",
++		DRV_LOG(WARNING, "port %u unable to allocate memory for xstats counters",
+ 		     dev->data->port_id);
+ 		rte_errno = ENOMEM;
+ 		return -rte_errno;
+ 	}
+-	ret = mlx5_os_read_dev_counters(dev, counters);
++	ret = mlx5_os_read_dev_counters(dev, bond_master, counters);
+ 	if (ret) {
+ 		DRV_LOG(ERR, "port %u cannot read device counters: %s",
+ 			dev->data->port_id, strerror(rte_errno));
+diff --git a/dpdk/drivers/net/mlx5/mlx5_trigger.c b/dpdk/drivers/net/mlx5/mlx5_trigger.c
+index 5ac25d7e2d..fe2c512c5c 100644
+--- a/dpdk/drivers/net/mlx5/mlx5_trigger.c
++++ b/dpdk/drivers/net/mlx5/mlx5_trigger.c
+@@ -1498,7 +1498,9 @@ mlx5_traffic_enable_hws(struct rte_eth_dev *dev)
+ 		if (!txq)
+ 			continue;
+ 		queue = mlx5_txq_get_sqn(txq);
+-		if ((priv->representor || priv->master) && config->dv_esw_en) {
++		if ((priv->representor || priv->master) &&
++		    config->dv_esw_en &&
++		    config->fdb_def_rule) {
+ 			if (mlx5_flow_hw_esw_create_sq_miss_flow(dev, queue, false)) {
+ 				mlx5_txq_release(dev, i);
+ 				goto error;
+@@ -1524,7 +1526,7 @@ mlx5_traffic_enable_hws(struct rte_eth_dev *dev)
+ 	}
+ 	if (priv->isolated)
+ 		return 0;
+-	if (!priv->sh->config.lacp_by_user && priv->pf_bond >= 0)
++	if (!priv->sh->config.lacp_by_user && priv->pf_bond >= 0 && priv->master)
+ 		if (mlx5_flow_hw_lacp_rx_flow(dev))
+ 			goto error;
+ 	if (dev->data->promiscuous)
+@@ -1632,14 +1634,14 @@ mlx5_traffic_enable(struct rte_eth_dev *dev)
+ 		DRV_LOG(INFO, "port %u FDB default rule is disabled",
+ 			dev->data->port_id);
+ 	}
+-	if (!priv->sh->config.lacp_by_user && priv->pf_bond >= 0) {
++	if (!priv->sh->config.lacp_by_user && priv->pf_bond >= 0 && priv->master) {
+ 		ret = mlx5_flow_lacp_miss(dev);
+ 		if (ret)
+ 			DRV_LOG(INFO, "port %u LACP rule cannot be created - "
+ 				"forward LACP to kernel.", dev->data->port_id);
+ 		else
+-			DRV_LOG(INFO, "LACP traffic will be missed in port %u."
+-				, dev->data->port_id);
++			DRV_LOG(INFO, "LACP traffic will be missed in port %u.",
++				dev->data->port_id);
+ 	}
+ 	if (priv->isolated)
+ 		return 0;
+diff --git a/dpdk/drivers/net/mlx5/mlx5_txq.c b/dpdk/drivers/net/mlx5/mlx5_txq.c
+index 1ac43548b2..aac078a6ed 100644
+--- a/dpdk/drivers/net/mlx5/mlx5_txq.c
++++ b/dpdk/drivers/net/mlx5/mlx5_txq.c
+@@ -1311,11 +1311,18 @@ rte_pmd_mlx5_external_sq_enable(uint16_t port_id, uint32_t sq_num)
+ 	}
+ #ifdef HAVE_MLX5_HWS_SUPPORT
+ 	if (priv->sh->config.dv_flow_en == 2) {
+-		if (mlx5_flow_hw_esw_create_sq_miss_flow(dev, sq_num, true))
+-			return -rte_errno;
++		bool sq_miss_created = false;
++
++		if (priv->sh->config.fdb_def_rule) {
++			if (mlx5_flow_hw_esw_create_sq_miss_flow(dev, sq_num, true))
++				return -rte_errno;
++			sq_miss_created = true;
++		}
++
+ 		if (priv->sh->config.repr_matching &&
+ 		    mlx5_flow_hw_tx_repr_matching_flow(dev, sq_num, true)) {
+-			mlx5_flow_hw_esw_destroy_sq_miss_flow(dev, sq_num);
++			if (sq_miss_created)
++				mlx5_flow_hw_esw_destroy_sq_miss_flow(dev, sq_num);
+ 			return -rte_errno;
+ 		}
+ 		return 0;
+diff --git a/dpdk/drivers/net/mlx5/mlx5_utils.c b/dpdk/drivers/net/mlx5/mlx5_utils.c
+index 4db738785f..b5b6c7c728 100644
+--- a/dpdk/drivers/net/mlx5/mlx5_utils.c
++++ b/dpdk/drivers/net/mlx5/mlx5_utils.c
+@@ -379,7 +379,8 @@ _mlx5_ipool_get_cache(struct mlx5_indexed_pool *pool, int cidx, uint32_t idx)
+ 	idx -= 1;
+ 	trunk_idx = mlx5_trunk_idx_get(pool, idx);
+ 	trunk = lc->trunks[trunk_idx];
+-	MLX5_ASSERT(trunk);
++	if (!trunk)
++		return NULL;
+ 	entry_idx = idx - mlx5_trunk_idx_offset_get(pool, trunk_idx);
+ 	return &trunk->data[entry_idx * pool->cfg.size];
+ }
+diff --git a/dpdk/drivers/net/mlx5/windows/mlx5_ethdev_os.c b/dpdk/drivers/net/mlx5/windows/mlx5_ethdev_os.c
+index a31e1b5494..49f750be68 100644
+--- a/dpdk/drivers/net/mlx5/windows/mlx5_ethdev_os.c
++++ b/dpdk/drivers/net/mlx5/windows/mlx5_ethdev_os.c
+@@ -178,20 +178,29 @@ mlx5_dev_set_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
+ 	return -ENOTSUP;
+ }
+ 
+-/**
++/*
+  * Query the number of statistics provided by ETHTOOL.
+  *
+  * @param dev
+  *   Pointer to Ethernet device.
++ * @param bond_master
++ *   Indicate if the device is a bond master.
++ * @param n_stats
++ *   Pointer to number of stats to store.
++ * @param n_stats_sec
++ *   Pointer to number of stats to store for the 2nd port of the bond.
+  *
+  * @return
+- *   Number of statistics on success, negative errno value otherwise and
+- *   rte_errno is set.
++ *   0 on success, negative errno value otherwise and rte_errno is set.
+  */
+ int
+-mlx5_os_get_stats_n(struct rte_eth_dev *dev)
++mlx5_os_get_stats_n(struct rte_eth_dev *dev, bool bond_master,
++		    uint16_t *n_stats, uint16_t *n_stats_sec)
+ {
+ 	RTE_SET_USED(dev);
++	RTE_SET_USED(bond_master);
++	RTE_SET_USED(n_stats);
++	RTE_SET_USED(n_stats_sec);
+ 	return -ENOTSUP;
+ }
+ 
+@@ -221,6 +230,8 @@ mlx5_os_stats_init(struct rte_eth_dev *dev)
+  *
+  * @param dev
+  *   Pointer to Ethernet device.
++ * @param bond_master
++ *   Indicate if the device is a bond master.
+  * @param[out] stats
+  *   Counters table output buffer.
+  *
+@@ -229,9 +240,10 @@ mlx5_os_stats_init(struct rte_eth_dev *dev)
+  *   rte_errno is set.
+  */
+ int
+-mlx5_os_read_dev_counters(struct rte_eth_dev *dev, uint64_t *stats)
++mlx5_os_read_dev_counters(struct rte_eth_dev *dev, bool bond_master, uint64_t *stats)
+ {
+ 	RTE_SET_USED(dev);
++	RTE_SET_USED(bond_master);
+ 	RTE_SET_USED(stats);
+ 	return -ENOTSUP;
+ }
+diff --git a/dpdk/drivers/net/mvneta/mvneta_ethdev.c b/dpdk/drivers/net/mvneta/mvneta_ethdev.c
+index daa69e533a..212c300c14 100644
+--- a/dpdk/drivers/net/mvneta/mvneta_ethdev.c
++++ b/dpdk/drivers/net/mvneta/mvneta_ethdev.c
+@@ -198,7 +198,8 @@ mvneta_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused)
+ 		RTE_PTYPE_L3_IPV4,
+ 		RTE_PTYPE_L3_IPV6,
+ 		RTE_PTYPE_L4_TCP,
+-		RTE_PTYPE_L4_UDP
++		RTE_PTYPE_L4_UDP,
++		RTE_PTYPE_UNKNOWN
+ 	};
+ 
+ 	return ptypes;
+diff --git a/dpdk/drivers/net/mvpp2/mrvl_ethdev.c b/dpdk/drivers/net/mvpp2/mrvl_ethdev.c
+index c12364941d..4cc64c7cad 100644
+--- a/dpdk/drivers/net/mvpp2/mrvl_ethdev.c
++++ b/dpdk/drivers/net/mvpp2/mrvl_ethdev.c
+@@ -1777,7 +1777,8 @@ mrvl_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused)
+ 		RTE_PTYPE_L3_IPV6_EXT,
+ 		RTE_PTYPE_L2_ETHER_ARP,
+ 		RTE_PTYPE_L4_TCP,
+-		RTE_PTYPE_L4_UDP
++		RTE_PTYPE_L4_UDP,
++		RTE_PTYPE_UNKNOWN
+ 	};
+ 
+ 	return ptypes;
+diff --git a/dpdk/drivers/net/netvsc/hn_ethdev.c b/dpdk/drivers/net/netvsc/hn_ethdev.c
+index b8a32832d7..f8cb05a118 100644
+--- a/dpdk/drivers/net/netvsc/hn_ethdev.c
++++ b/dpdk/drivers/net/netvsc/hn_ethdev.c
+@@ -1127,8 +1127,10 @@ hn_reinit(struct rte_eth_dev *dev, uint16_t mtu)
+ 	int i, ret = 0;
+ 
+ 	/* Point primary queues at new primary channel */
+-	rxqs[0]->chan = hv->channels[0];
+-	txqs[0]->chan = hv->channels[0];
++	if (rxqs[0]) {
++		rxqs[0]->chan = hv->channels[0];
++		txqs[0]->chan = hv->channels[0];
++	}
+ 
+ 	ret = hn_attach(hv, mtu);
+ 	if (ret)
+@@ -1140,10 +1142,12 @@ hn_reinit(struct rte_eth_dev *dev, uint16_t mtu)
+ 		return ret;
+ 
+ 	/* Point any additional queues at new subchannels */
+-	for (i = 1; i < dev->data->nb_rx_queues; i++)
+-		rxqs[i]->chan = hv->channels[i];
+-	for (i = 1; i < dev->data->nb_tx_queues; i++)
+-		txqs[i]->chan = hv->channels[i];
++	if (rxqs[0]) {
++		for (i = 1; i < dev->data->nb_rx_queues; i++)
++			rxqs[i]->chan = hv->channels[i];
++		for (i = 1; i < dev->data->nb_tx_queues; i++)
++			txqs[i]->chan = hv->channels[i];
++	}
+ 
+ 	return ret;
+ }
+diff --git a/dpdk/drivers/net/netvsc/hn_rxtx.c b/dpdk/drivers/net/netvsc/hn_rxtx.c
+index e4f5015aa3..9bf1ec5509 100644
+--- a/dpdk/drivers/net/netvsc/hn_rxtx.c
++++ b/dpdk/drivers/net/netvsc/hn_rxtx.c
+@@ -612,7 +612,9 @@ static void hn_rxpkt(struct hn_rx_queue *rxq, struct hn_rx_bufinfo *rxb,
+ 					   RTE_PTYPE_L4_MASK);
+ 
+ 	if (info->vlan_info != HN_NDIS_VLAN_INFO_INVALID) {
+-		m->vlan_tci = info->vlan_info;
++		m->vlan_tci = RTE_VLAN_TCI_MAKE(NDIS_VLAN_INFO_ID(info->vlan_info),
++						NDIS_VLAN_INFO_PRI(info->vlan_info),
++						NDIS_VLAN_INFO_CFI(info->vlan_info));
+ 		m->ol_flags |= RTE_MBUF_F_RX_VLAN_STRIPPED | RTE_MBUF_F_RX_VLAN;
+ 
+ 		/* NDIS always strips tag, put it back if necessary */
+@@ -1332,7 +1334,9 @@ static void hn_encap(struct rndis_packet_msg *pkt,
+ 	if (m->ol_flags & RTE_MBUF_F_TX_VLAN) {
+ 		pi_data = hn_rndis_pktinfo_append(pkt, NDIS_VLAN_INFO_SIZE,
+ 						  NDIS_PKTINFO_TYPE_VLAN);
+-		*pi_data = m->vlan_tci;
++		*pi_data = NDIS_VLAN_INFO_MAKE(RTE_VLAN_TCI_ID(m->vlan_tci),
++					       RTE_VLAN_TCI_PRI(m->vlan_tci),
++					       RTE_VLAN_TCI_DEI(m->vlan_tci));
+ 	}
+ 
+ 	if (m->ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
+diff --git a/dpdk/drivers/net/netvsc/hn_vf.c b/dpdk/drivers/net/netvsc/hn_vf.c
+index 90cb6f6923..a4e958419d 100644
+--- a/dpdk/drivers/net/netvsc/hn_vf.c
++++ b/dpdk/drivers/net/netvsc/hn_vf.c
+@@ -264,7 +264,7 @@ int hn_vf_add(struct rte_eth_dev *dev, struct hn_data *hv)
+ 			goto exit;
+ 		}
+ 
+-		ret = hn_vf_mtu_set(dev, dev->data->mtu);
++		ret = rte_eth_dev_set_mtu(port, dev->data->mtu);
+ 		if (ret) {
+ 			PMD_DRV_LOG(ERR, "Failed to set VF MTU");
+ 			goto exit;
+@@ -794,7 +794,7 @@ int hn_vf_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
+ 	rte_rwlock_read_lock(&hv->vf_lock);
+ 	vf_dev = hn_get_vf_dev(hv);
+ 	if (hv->vf_ctx.vf_vsc_switched && vf_dev)
+-		ret = vf_dev->dev_ops->mtu_set(vf_dev, mtu);
++		ret = rte_eth_dev_set_mtu(vf_dev->data->port_id, mtu);
+ 	rte_rwlock_read_unlock(&hv->vf_lock);
+ 
+ 	return ret;
+diff --git a/dpdk/drivers/net/nfp/flower/nfp_flower.c b/dpdk/drivers/net/nfp/flower/nfp_flower.c
+index 6b523d98b0..9ecd5f49c7 100644
+--- a/dpdk/drivers/net/nfp/flower/nfp_flower.c
++++ b/dpdk/drivers/net/nfp/flower/nfp_flower.c
+@@ -82,63 +82,6 @@ nfp_flower_pf_start(struct rte_eth_dev *dev)
+ 	return 0;
+ }
+ 
+-/* Reset and stop device. The device can not be restarted. */
+-static int
+-nfp_flower_pf_close(struct rte_eth_dev *dev)
+-{
+-	uint16_t i;
+-	struct nfp_net_hw *hw;
+-	struct nfp_pf_dev *pf_dev;
+-	struct nfp_net_txq *this_tx_q;
+-	struct nfp_net_rxq *this_rx_q;
+-	struct nfp_flower_representor *repr;
+-	struct nfp_app_fw_flower *app_fw_flower;
+-
+-	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+-		return 0;
+-
+-	repr = dev->data->dev_private;
+-	hw = repr->app_fw_flower->pf_hw;
+-	pf_dev = hw->pf_dev;
+-	app_fw_flower = NFP_PRIV_TO_APP_FW_FLOWER(pf_dev->app_fw_priv);
+-
+-	nfp_mtr_priv_uninit(pf_dev);
+-
+-	/*
+-	 * We assume that the DPDK application is stopping all the
+-	 * threads/queues before calling the device close function.
+-	 */
+-	nfp_net_disable_queues(dev);
+-
+-	/* Clear queues */
+-	for (i = 0; i < dev->data->nb_tx_queues; i++) {
+-		this_tx_q = dev->data->tx_queues[i];
+-		nfp_net_reset_tx_queue(this_tx_q);
+-	}
+-
+-	for (i = 0; i < dev->data->nb_rx_queues; i++) {
+-		this_rx_q = dev->data->rx_queues[i];
+-		nfp_net_reset_rx_queue(this_rx_q);
+-	}
+-
+-	/* Cancel possible impending LSC work here before releasing the port */
+-	rte_eal_alarm_cancel(nfp_net_dev_interrupt_delayed_handler, (void *)dev);
+-
+-	nn_cfg_writeb(&hw->super, NFP_NET_CFG_LSC, 0xff);
+-
+-	/* Now it is safe to free all PF resources */
+-	PMD_DRV_LOG(INFO, "Freeing PF resources");
+-	nfp_cpp_area_free(pf_dev->ctrl_area);
+-	nfp_cpp_area_free(pf_dev->qc_area);
+-	free(pf_dev->hwinfo);
+-	free(pf_dev->sym_tbl);
+-	nfp_cpp_free(pf_dev->cpp);
+-	rte_free(app_fw_flower);
+-	rte_free(pf_dev);
+-
+-	return 0;
+-}
+-
+ static const struct eth_dev_ops nfp_flower_pf_vnic_ops = {
+ 	.dev_infos_get          = nfp_net_infos_get,
+ 	.link_update            = nfp_net_link_update,
+@@ -146,7 +89,6 @@ static const struct eth_dev_ops nfp_flower_pf_vnic_ops = {
+ 
+ 	.dev_start              = nfp_flower_pf_start,
+ 	.dev_stop               = nfp_net_stop,
+-	.dev_close              = nfp_flower_pf_close,
+ };
+ 
+ static inline struct nfp_flower_representor *
+@@ -191,7 +133,9 @@ nfp_flower_pf_dispatch_pkts(struct nfp_net_hw *hw,
+ 		return false;
+ 	}
+ 
+-	rte_ring_enqueue(repr->ring, (void *)mbuf);
++	if (rte_ring_enqueue(repr->ring, (void *)mbuf) != 0)
++		return false;
++
+ 	return true;
+ }
+ 
+@@ -567,6 +511,8 @@ nfp_flower_cleanup_ctrl_vnic(struct nfp_net_hw *hw)
+ 
+ 	pci_name = strchr(app_fw_flower->pf_hw->pf_dev->pci_dev->name, ':') + 1;
+ 
++	nfp_net_disable_queues(eth_dev);
++
+ 	snprintf(ctrl_txring_name, sizeof(ctrl_txring_name), "%s_cttx_ring", pci_name);
+ 	for (i = 0; i < hw->max_tx_queues; i++) {
+ 		txq = eth_dev->data->tx_queues[i];
+@@ -858,6 +804,23 @@ app_cleanup:
+ 	return ret;
+ }
+ 
++void
++nfp_uninit_app_fw_flower(struct nfp_pf_dev *pf_dev)
++{
++	struct nfp_app_fw_flower *app_fw_flower;
++
++	app_fw_flower = pf_dev->app_fw_priv;
++	nfp_flower_cleanup_ctrl_vnic(app_fw_flower->ctrl_hw);
++	nfp_cpp_area_free(app_fw_flower->ctrl_hw->ctrl_area);
++	nfp_cpp_area_free(pf_dev->ctrl_area);
++	rte_free(app_fw_flower->pf_hw);
++	nfp_mtr_priv_uninit(pf_dev);
++	nfp_flow_priv_uninit(pf_dev);
++	if (rte_eth_switch_domain_free(app_fw_flower->switch_domain_id) != 0)
++		PMD_DRV_LOG(WARNING, "Failed to free switch domain for device");
++	rte_free(app_fw_flower);
++}
++
+ int
+ nfp_secondary_init_app_fw_flower(struct nfp_pf_dev *pf_dev)
+ {
+diff --git a/dpdk/drivers/net/nfp/flower/nfp_flower.h b/dpdk/drivers/net/nfp/flower/nfp_flower.h
+index 6f27c06acc..8393de66c5 100644
+--- a/dpdk/drivers/net/nfp/flower/nfp_flower.h
++++ b/dpdk/drivers/net/nfp/flower/nfp_flower.h
+@@ -106,6 +106,7 @@ nfp_flower_support_decap_v2(const struct nfp_app_fw_flower *app_fw_flower)
+ 
+ int nfp_init_app_fw_flower(struct nfp_pf_dev *pf_dev,
+ 		const struct nfp_dev_info *dev_info);
++void nfp_uninit_app_fw_flower(struct nfp_pf_dev *pf_dev);
+ int nfp_secondary_init_app_fw_flower(struct nfp_pf_dev *pf_dev);
+ bool nfp_flower_pf_dispatch_pkts(struct nfp_net_hw *hw,
+ 		struct rte_mbuf *mbuf,
+diff --git a/dpdk/drivers/net/nfp/flower/nfp_flower_ctrl.c b/dpdk/drivers/net/nfp/flower/nfp_flower_ctrl.c
+index c25487c277..102daa3d70 100644
+--- a/dpdk/drivers/net/nfp/flower/nfp_flower_ctrl.c
++++ b/dpdk/drivers/net/nfp/flower/nfp_flower_ctrl.c
+@@ -441,6 +441,11 @@ nfp_flower_cmsg_port_mod_rx(struct nfp_app_fw_flower *app_fw_flower,
+ 		return -EINVAL;
+ 	}
+ 
++	if (repr == NULL) {
++		PMD_DRV_LOG(ERR, "Can not get 'repr' for port %#x", port);
++		return -EINVAL;
++	}
++
+ 	repr->link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
+ 	if ((msg->info & NFP_FLOWER_CMSG_PORT_MOD_INFO_LINK) != 0)
+ 		repr->link.link_status = RTE_ETH_LINK_UP;
+diff --git a/dpdk/drivers/net/nfp/flower/nfp_flower_representor.c b/dpdk/drivers/net/nfp/flower/nfp_flower_representor.c
+index 0f0e63aae0..88fb6975af 100644
+--- a/dpdk/drivers/net/nfp/flower/nfp_flower_representor.c
++++ b/dpdk/drivers/net/nfp/flower/nfp_flower_representor.c
+@@ -291,12 +291,156 @@ nfp_flower_repr_tx_burst(void *tx_queue,
+ 	return sent;
+ }
+ 
++static void
++nfp_flower_repr_free_queue(struct nfp_flower_representor *repr)
++{
++	uint16_t i;
++	struct rte_eth_dev *eth_dev = repr->eth_dev;
++
++	for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
++		rte_free(eth_dev->data->tx_queues[i]);
++
++	for (i = 0; i < eth_dev->data->nb_rx_queues; i++)
++		rte_free(eth_dev->data->rx_queues[i]);
++}
++
++static void
++nfp_flower_pf_repr_close_queue(struct nfp_flower_representor *repr)
++{
++	struct rte_eth_dev *eth_dev = repr->eth_dev;
++
++	/*
++	 * We assume that the DPDK application is stopping all the
++	 * threads/queues before calling the device close function.
++	 */
++	nfp_net_disable_queues(eth_dev);
++
++	/* Clear queues */
++	nfp_net_close_tx_queue(eth_dev);
++	nfp_net_close_rx_queue(eth_dev);
++}
++
++static void
++nfp_flower_repr_close_queue(struct nfp_flower_representor *repr)
++{
++	switch (repr->repr_type) {
++	case NFP_REPR_TYPE_PHYS_PORT:
++		nfp_flower_repr_free_queue(repr);
++		break;
++	case NFP_REPR_TYPE_PF:
++		nfp_flower_pf_repr_close_queue(repr);
++		break;
++	case NFP_REPR_TYPE_VF:
++		nfp_flower_repr_free_queue(repr);
++		break;
++	default:
++		PMD_DRV_LOG(ERR, "Unsupported repr port type.");
++		break;
++	}
++}
++
++static int
++nfp_flower_repr_uninit(struct rte_eth_dev *eth_dev)
++{
++	uint16_t index;
++	struct nfp_flower_representor *repr;
++
++	repr = eth_dev->data->dev_private;
++	rte_ring_free(repr->ring);
++
++	if (repr->repr_type == NFP_REPR_TYPE_PHYS_PORT) {
++		index = NFP_FLOWER_CMSG_PORT_PHYS_PORT_NUM(repr->port_id);
++		repr->app_fw_flower->phy_reprs[index] = NULL;
++	} else {
++		index = repr->vf_id;
++		repr->app_fw_flower->vf_reprs[index] = NULL;
++	}
++
++	return 0;
++}
++
++static int
++nfp_flower_pf_repr_uninit(struct rte_eth_dev *eth_dev)
++{
++	struct nfp_flower_representor *repr = eth_dev->data->dev_private;
++
++	repr->app_fw_flower->pf_repr = NULL;
++
++	return 0;
++}
++
++static void
++nfp_flower_repr_free(struct nfp_flower_representor *repr,
++		enum nfp_repr_type repr_type)
++{
++	switch (repr_type) {
++	case NFP_REPR_TYPE_PHYS_PORT:
++		nfp_flower_repr_uninit(repr->eth_dev);
++		break;
++	case NFP_REPR_TYPE_PF:
++		nfp_flower_pf_repr_uninit(repr->eth_dev);
++		break;
++	case NFP_REPR_TYPE_VF:
++		nfp_flower_repr_uninit(repr->eth_dev);
++		break;
++	default:
++		PMD_DRV_LOG(ERR, "Unsupported repr port type.");
++		break;
++	}
++}
++
++/* Reset and stop device. The device can not be restarted. */
++static int
++nfp_flower_repr_dev_close(struct rte_eth_dev *dev)
++{
++	uint16_t i;
++	struct nfp_net_hw *hw;
++	struct nfp_pf_dev *pf_dev;
++	struct nfp_flower_representor *repr;
++	struct nfp_app_fw_flower *app_fw_flower;
++
++	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
++		return 0;
++
++	repr = dev->data->dev_private;
++	app_fw_flower = repr->app_fw_flower;
++	hw = app_fw_flower->pf_hw;
++	pf_dev = hw->pf_dev;
++
++	if (pf_dev->app_fw_id != NFP_APP_FW_FLOWER_NIC)
++		return -EINVAL;
++
++	nfp_flower_repr_close_queue(repr);
++
++	nfp_flower_repr_free(repr, repr->repr_type);
++
++	for (i = 0; i < MAX_FLOWER_VFS; i++) {
++		if (app_fw_flower->vf_reprs[i] != NULL)
++			return 0;
++	}
++
++	for (i = 0; i < NFP_MAX_PHYPORTS; i++) {
++		if (app_fw_flower->phy_reprs[i] != NULL)
++			return 0;
++	}
++
++	if (app_fw_flower->pf_repr != NULL)
++		return 0;
++
++	/* Now it is safe to free all PF resources */
++	nfp_uninit_app_fw_flower(pf_dev);
++	nfp_pf_uninit(pf_dev);
++
++	return 0;
++}
++
+ static const struct eth_dev_ops nfp_flower_pf_repr_dev_ops = {
+ 	.dev_infos_get        = nfp_flower_repr_dev_infos_get,
+ 
+ 	.dev_start            = nfp_flower_pf_start,
+ 	.dev_configure        = nfp_net_configure,
+ 	.dev_stop             = nfp_net_stop,
++	.dev_close            = nfp_flower_repr_dev_close,
+ 
+ 	.rx_queue_setup       = nfp_net_rx_queue_setup,
+ 	.tx_queue_setup       = nfp_net_tx_queue_setup,
+@@ -319,6 +463,7 @@ static const struct eth_dev_ops nfp_flower_repr_dev_ops = {
+ 	.dev_start            = nfp_flower_repr_dev_start,
+ 	.dev_configure        = nfp_net_configure,
+ 	.dev_stop             = nfp_flower_repr_dev_stop,
++	.dev_close            = nfp_flower_repr_dev_close,
+ 
+ 	.rx_queue_setup       = nfp_flower_repr_rx_queue_setup,
+ 	.tx_queue_setup       = nfp_flower_repr_tx_queue_setup,
+@@ -410,6 +555,7 @@ nfp_flower_pf_repr_init(struct rte_eth_dev *eth_dev,
+ 
+ 	repr->app_fw_flower->pf_repr = repr;
+ 	repr->app_fw_flower->pf_hw->eth_dev = eth_dev;
++	repr->eth_dev = eth_dev;
+ 
+ 	return 0;
+ }
+@@ -501,6 +647,8 @@ nfp_flower_repr_init(struct rte_eth_dev *eth_dev,
+ 		app_fw_flower->vf_reprs[index] = repr;
+ 	}
+ 
++	repr->eth_dev = eth_dev;
++
+ 	return 0;
+ 
+ mac_cleanup:
+@@ -511,6 +659,35 @@ ring_cleanup:
+ 	return ret;
+ }
+ 
++static void
++nfp_flower_repr_free_all(struct nfp_app_fw_flower *app_fw_flower)
++{
++	uint32_t i;
++	struct nfp_flower_representor *repr;
++
++	for (i = 0; i < MAX_FLOWER_VFS; i++) {
++		repr = app_fw_flower->vf_reprs[i];
++		if (repr != NULL) {
++			nfp_flower_repr_free(repr, NFP_REPR_TYPE_VF);
++			app_fw_flower->vf_reprs[i] = NULL;
++		}
++	}
++
++	for (i = 0; i < NFP_MAX_PHYPORTS; i++) {
++		repr = app_fw_flower->phy_reprs[i];
++		if (repr != NULL) {
++			nfp_flower_repr_free(repr, NFP_REPR_TYPE_PHYS_PORT);
++			app_fw_flower->phy_reprs[i] = NULL;
++		}
++	}
++
++	repr = app_fw_flower->pf_repr;
++	if (repr != NULL) {
++		nfp_flower_repr_free(repr, NFP_REPR_TYPE_PF);
++		app_fw_flower->pf_repr = NULL;
++	}
++}
++
+ static int
+ nfp_flower_repr_alloc(struct nfp_app_fw_flower *app_fw_flower)
+ {
+@@ -563,7 +740,7 @@ nfp_flower_repr_alloc(struct nfp_app_fw_flower *app_fw_flower)
+ 		eth_port = &nfp_eth_table->ports[i];
+ 		flower_repr.repr_type = NFP_REPR_TYPE_PHYS_PORT;
+ 		flower_repr.port_id = nfp_flower_get_phys_port_id(eth_port->index);
+-		flower_repr.nfp_idx = eth_port->eth_index;
++		flower_repr.nfp_idx = eth_port->index;
+ 		flower_repr.vf_id = i + 1;
+ 
+ 		/* Copy the real mac of the interface to the representor struct */
+@@ -585,7 +762,7 @@ nfp_flower_repr_alloc(struct nfp_app_fw_flower *app_fw_flower)
+ 	}
+ 
+ 	if (i < app_fw_flower->num_phyport_reprs)
+-		return ret;
++		goto repr_free;
+ 
+ 	/*
+ 	 * Now allocate eth_dev's for VF representors.
+@@ -614,9 +791,14 @@ nfp_flower_repr_alloc(struct nfp_app_fw_flower *app_fw_flower)
+ 	}
+ 
+ 	if (i < app_fw_flower->num_vf_reprs)
+-		return ret;
++		goto repr_free;
+ 
+ 	return 0;
++
++repr_free:
++	nfp_flower_repr_free_all(app_fw_flower);
++
++	return ret;
+ }
+ 
+ int
+@@ -634,10 +816,9 @@ nfp_flower_repr_create(struct nfp_app_fw_flower *app_fw_flower)
+ 	pci_dev = pf_dev->pci_dev;
+ 
+ 	/* Allocate a switch domain for the flower app */
+-	if (app_fw_flower->switch_domain_id == RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID &&
+-			rte_eth_switch_domain_alloc(&app_fw_flower->switch_domain_id)) {
++	ret = rte_eth_switch_domain_alloc(&app_fw_flower->switch_domain_id);
++	if (ret != 0)
+ 		PMD_INIT_LOG(WARNING, "failed to allocate switch domain for device");
+-	}
+ 
+ 	/* Now parse PCI device args passed for representor info */
+ 	if (pci_dev->device.devargs != NULL) {
+@@ -677,8 +858,15 @@ nfp_flower_repr_create(struct nfp_app_fw_flower *app_fw_flower)
+ 	ret = nfp_flower_repr_alloc(app_fw_flower);
+ 	if (ret != 0) {
+ 		PMD_INIT_LOG(ERR, "representors allocation failed");
+-		return -EINVAL;
++		ret = -EINVAL;
++		goto domain_free;
+ 	}
+ 
+ 	return 0;
++
++domain_free:
++	if (rte_eth_switch_domain_free(app_fw_flower->switch_domain_id) != 0)
++		PMD_INIT_LOG(WARNING, "failed to free switch domain for device");
++
++	return ret;
+ }
+diff --git a/dpdk/drivers/net/nfp/flower/nfp_flower_representor.h b/dpdk/drivers/net/nfp/flower/nfp_flower_representor.h
+index bcb4c3cdb5..8053617562 100644
+--- a/dpdk/drivers/net/nfp/flower/nfp_flower_representor.h
++++ b/dpdk/drivers/net/nfp/flower/nfp_flower_representor.h
+@@ -20,6 +20,7 @@ struct nfp_flower_representor {
+ 	struct rte_ring *ring;
+ 	struct rte_eth_link link;
+ 	struct rte_eth_stats repr_stats;
++	struct rte_eth_dev *eth_dev;
+ };
+ 
+ int nfp_flower_repr_create(struct nfp_app_fw_flower *app_fw_flower);
+diff --git a/dpdk/drivers/net/nfp/meson.build b/dpdk/drivers/net/nfp/meson.build
+index cf9c16266d..7bf94710f1 100644
+--- a/dpdk/drivers/net/nfp/meson.build
++++ b/dpdk/drivers/net/nfp/meson.build
+@@ -4,6 +4,7 @@
+ if not is_linux or not dpdk_conf.get('RTE_ARCH_64')
+     build = false
+     reason = 'only supported on 64-bit Linux'
++    subdir_done()
+ endif
+ 
+ sources = files(
+diff --git a/dpdk/drivers/net/nfp/nfd3/nfp_nfd3_dp.c b/dpdk/drivers/net/nfp/nfd3/nfp_nfd3_dp.c
+index ff9b10f046..b9da74bc99 100644
+--- a/dpdk/drivers/net/nfp/nfd3/nfp_nfd3_dp.c
++++ b/dpdk/drivers/net/nfp/nfd3/nfp_nfd3_dp.c
+@@ -137,7 +137,7 @@ nfp_net_nfd3_tx_vlan(struct nfp_net_txq *txq,
+ 	}
+ }
+ 
+-static inline void
++static inline int
+ nfp_net_nfd3_set_meta_data(struct nfp_net_meta_raw *meta_data,
+ 		struct nfp_net_txq *txq,
+ 		struct rte_mbuf *pkt)
+@@ -174,7 +174,7 @@ nfp_net_nfd3_set_meta_data(struct nfp_net_meta_raw *meta_data,
+ 	}
+ 
+ 	if (meta_data->length == 0)
+-		return;
++		return 0;
+ 
+ 	meta_info = meta_data->header;
+ 	meta_data->header = rte_cpu_to_be_32(meta_data->header);
+@@ -188,15 +188,16 @@ nfp_net_nfd3_set_meta_data(struct nfp_net_meta_raw *meta_data,
+ 		case NFP_NET_META_VLAN:
+ 			if (vlan_layer > 0) {
+ 				PMD_DRV_LOG(ERR, "At most 1 layers of vlan is supported");
+-				return;
++				return -EINVAL;
+ 			}
++
+ 			nfp_net_set_meta_vlan(meta_data, pkt, layer);
+ 			vlan_layer++;
+ 			break;
+ 		case NFP_NET_META_IPSEC:
+ 			if (ipsec_layer > 2) {
+ 				PMD_DRV_LOG(ERR, "At most 3 layers of ipsec is supported for now.");
+-				return;
++				return -EINVAL;
+ 			}
+ 
+ 			nfp_net_set_meta_ipsec(meta_data, txq, pkt, layer, ipsec_layer);
+@@ -204,11 +205,13 @@ nfp_net_nfd3_set_meta_data(struct nfp_net_meta_raw *meta_data,
+ 			break;
+ 		default:
+ 			PMD_DRV_LOG(ERR, "The metadata type not supported");
+-			return;
++			return -ENOTSUP;
+ 		}
+ 
+ 		memcpy(meta, &meta_data->data[layer], sizeof(meta_data->data[layer]));
+ 	}
++
++	return 0;
+ }
+ 
+ uint16_t
+@@ -225,6 +228,7 @@ nfp_net_nfd3_xmit_pkts_common(void *tx_queue,
+ 		uint16_t nb_pkts,
+ 		bool repr_flag)
+ {
++	int ret;
+ 	uint16_t i;
+ 	uint8_t offset;
+ 	uint32_t pkt_size;
+@@ -271,7 +275,10 @@ nfp_net_nfd3_xmit_pkts_common(void *tx_queue,
+ 		if (!repr_flag) {
+ 			struct nfp_net_meta_raw meta_data;
+ 			memset(&meta_data, 0, sizeof(meta_data));
+-			nfp_net_nfd3_set_meta_data(&meta_data, txq, pkt);
++			ret = nfp_net_nfd3_set_meta_data(&meta_data, txq, pkt);
++			if (unlikely(ret != 0))
++				goto xmit_end;
++
+ 			offset = meta_data.length;
+ 		} else {
+ 			offset = FLOWER_PKT_DATA_OFFSET;
+diff --git a/dpdk/drivers/net/nfp/nfdk/nfp_nfdk_dp.c b/dpdk/drivers/net/nfp/nfdk/nfp_nfdk_dp.c
+index 0141fbcc8f..772c847b9d 100644
+--- a/dpdk/drivers/net/nfp/nfdk/nfp_nfdk_dp.c
++++ b/dpdk/drivers/net/nfp/nfdk/nfp_nfdk_dp.c
+@@ -167,7 +167,7 @@ close_block:
+ 	return nop_slots;
+ }
+ 
+-static void
++static int
+ nfp_net_nfdk_set_meta_data(struct rte_mbuf *pkt,
+ 		struct nfp_net_txq *txq,
+ 		uint64_t *metadata)
+@@ -178,7 +178,6 @@ nfp_net_nfdk_set_meta_data(struct rte_mbuf *pkt,
+ 	uint32_t cap_extend;
+ 	struct nfp_net_hw *hw;
+ 	uint32_t header_offset;
+-	uint8_t vlan_layer = 0;
+ 	uint8_t ipsec_layer = 0;
+ 	struct nfp_net_meta_raw meta_data;
+ 
+@@ -206,8 +205,10 @@ nfp_net_nfdk_set_meta_data(struct rte_mbuf *pkt,
+ 		meta_data.length += 3 * NFP_NET_META_FIELD_SIZE;
+ 	}
+ 
+-	if (meta_data.length == 0)
+-		return;
++	if (meta_data.length == 0) {
++		*metadata = 0;
++		return 0;
++	}
+ 
+ 	meta_type = meta_data.header;
+ 	header_offset = meta_type << NFP_NET_META_NFDK_LENGTH;
+@@ -221,17 +222,13 @@ nfp_net_nfdk_set_meta_data(struct rte_mbuf *pkt,
+ 			meta += NFP_NET_META_FIELD_SIZE) {
+ 		switch (meta_type & NFP_NET_META_FIELD_MASK) {
+ 		case NFP_NET_META_VLAN:
+-			if (vlan_layer > 0) {
+-				PMD_DRV_LOG(ERR, "At most 1 layers of vlan is supported");
+-				return;
+-			}
++
+ 			nfp_net_set_meta_vlan(&meta_data, pkt, layer);
+-			vlan_layer++;
+ 			break;
+ 		case NFP_NET_META_IPSEC:
+ 			if (ipsec_layer > 2) {
+ 				PMD_DRV_LOG(ERR, "At most 3 layers of ipsec is supported for now.");
+-				return;
++				return -EINVAL;
+ 			}
+ 
+ 			nfp_net_set_meta_ipsec(&meta_data, txq, pkt, layer, ipsec_layer);
+@@ -239,13 +236,15 @@ nfp_net_nfdk_set_meta_data(struct rte_mbuf *pkt,
+ 			break;
+ 		default:
+ 			PMD_DRV_LOG(ERR, "The metadata type not supported");
+-			return;
++			return -ENOTSUP;
+ 		}
+ 
+ 		memcpy(meta, &meta_data.data[layer], sizeof(meta_data.data[layer]));
+ 	}
+ 
+ 	*metadata = NFDK_DESC_TX_CHAIN_META;
++
++	return 0;
+ }
+ 
+ uint16_t
+@@ -292,6 +291,7 @@ nfp_net_nfdk_xmit_pkts_common(void *tx_queue,
+ 
+ 	/* Sending packets */
+ 	while (npkts < nb_pkts && free_descs > 0) {
++		int ret;
+ 		int nop_descs;
+ 		uint32_t type;
+ 		uint32_t dma_len;
+@@ -319,10 +319,13 @@ nfp_net_nfdk_xmit_pkts_common(void *tx_queue,
+ 
+ 		temp_pkt = pkt;
+ 
+-		if (repr_flag)
++		if (repr_flag) {
+ 			metadata = NFDK_DESC_TX_CHAIN_META;
+-		else
+-			nfp_net_nfdk_set_meta_data(pkt, txq, &metadata);
++		} else {
++			ret = nfp_net_nfdk_set_meta_data(pkt, txq, &metadata);
++			if (unlikely(ret != 0))
++				goto xmit_end;
++		}
+ 
+ 		if (unlikely(pkt->nb_segs > 1 &&
+ 				(hw->super.cap & NFP_NET_CFG_CTRL_GATHER) == 0)) {
+diff --git a/dpdk/drivers/net/nfp/nfp_ethdev.c b/dpdk/drivers/net/nfp/nfp_ethdev.c
+index f02caf8056..7495b01f16 100644
+--- a/dpdk/drivers/net/nfp/nfp_ethdev.c
++++ b/dpdk/drivers/net/nfp/nfp_ethdev.c
+@@ -310,6 +310,66 @@ nfp_net_keepalive_stop(struct nfp_multi_pf *multi_pf)
+ 	rte_eal_alarm_cancel(nfp_net_beat_timer, (void *)multi_pf);
+ }
+ 
++static void
++nfp_net_uninit(struct rte_eth_dev *eth_dev)
++{
++	struct nfp_net_hw *net_hw;
++
++	net_hw = eth_dev->data->dev_private;
++	rte_free(net_hw->eth_xstats_base);
++	nfp_ipsec_uninit(eth_dev);
++}
++
++static void
++nfp_cleanup_port_app_fw_nic(struct nfp_pf_dev *pf_dev,
++		uint8_t id)
++{
++	struct rte_eth_dev *eth_dev;
++	struct nfp_app_fw_nic *app_fw_nic;
++
++	app_fw_nic = pf_dev->app_fw_priv;
++	if (app_fw_nic->ports[id] != NULL) {
++		eth_dev = app_fw_nic->ports[id]->eth_dev;
++		if (eth_dev != NULL)
++			nfp_net_uninit(eth_dev);
++
++		app_fw_nic->ports[id] = NULL;
++	}
++}
++
++static void
++nfp_uninit_app_fw_nic(struct nfp_pf_dev *pf_dev)
++{
++	nfp_cpp_area_release_free(pf_dev->ctrl_area);
++	rte_free(pf_dev->app_fw_priv);
++}
++
++void
++nfp_pf_uninit(struct nfp_pf_dev *pf_dev)
++{
++	nfp_cpp_area_release_free(pf_dev->mac_stats_area);
++	nfp_cpp_area_release_free(pf_dev->qc_area);
++	free(pf_dev->sym_tbl);
++	if (pf_dev->multi_pf.enabled) {
++		nfp_net_keepalive_stop(&pf_dev->multi_pf);
++		nfp_net_keepalive_uninit(&pf_dev->multi_pf);
++	}
++	free(pf_dev->nfp_eth_table);
++	free(pf_dev->hwinfo);
++	nfp_cpp_free(pf_dev->cpp);
++	rte_free(pf_dev);
++}
++
++static int
++nfp_pf_secondary_uninit(struct nfp_pf_dev *pf_dev)
++{
++	free(pf_dev->sym_tbl);
++	nfp_cpp_free(pf_dev->cpp);
++	rte_free(pf_dev);
++
++	return 0;
++}
++
+ /* Reset and stop device. The device can not be restarted. */
+ static int
+ nfp_net_close(struct rte_eth_dev *dev)
+@@ -321,8 +381,19 @@ nfp_net_close(struct rte_eth_dev *dev)
+ 	struct rte_pci_device *pci_dev;
+ 	struct nfp_app_fw_nic *app_fw_nic;
+ 
+-	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
++	/*
++	 * In secondary process, a released eth device can be found by its name
++	 * in shared memory.
++	 * If the state of the eth device is RTE_ETH_DEV_UNUSED, it means the
++	 * eth device has been released.
++	 */
++	if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
++		if (dev->state == RTE_ETH_DEV_UNUSED)
++			return 0;
++
++		nfp_pf_secondary_uninit(dev->process_private);
+ 		return 0;
++	}
+ 
+ 	hw = dev->data->dev_private;
+ 	pf_dev = hw->pf_dev;
+@@ -339,16 +410,17 @@ nfp_net_close(struct rte_eth_dev *dev)
+ 	nfp_net_close_tx_queue(dev);
+ 	nfp_net_close_rx_queue(dev);
+ 
+-	/* Clear ipsec */
+-	nfp_ipsec_uninit(dev);
+-
+ 	/* Cancel possible impending LSC work here before releasing the port */
+ 	rte_eal_alarm_cancel(nfp_net_dev_interrupt_delayed_handler, (void *)dev);
+ 
+ 	/* Only free PF resources after all physical ports have been closed */
+ 	/* Mark this port as unused and free device priv resources */
+ 	nn_cfg_writeb(&hw->super, NFP_NET_CFG_LSC, 0xff);
+-	app_fw_nic->ports[hw->idx] = NULL;
++
++	if (pf_dev->app_fw_id != NFP_APP_FW_CORE_NIC)
++		return -EINVAL;
++
++	nfp_cleanup_port_app_fw_nic(pf_dev, hw->idx);
+ 
+ 	for (i = 0; i < app_fw_nic->total_phyports; i++) {
+ 		id = nfp_function_id_get(pf_dev, i);
+@@ -358,26 +430,16 @@ nfp_net_close(struct rte_eth_dev *dev)
+ 			return 0;
+ 	}
+ 
+-	/* Now it is safe to free all PF resources */
+-	PMD_INIT_LOG(INFO, "Freeing PF resources");
+-	if (pf_dev->multi_pf.enabled) {
+-		nfp_net_keepalive_stop(&pf_dev->multi_pf);
+-		nfp_net_keepalive_uninit(&pf_dev->multi_pf);
+-	}
+-	nfp_cpp_area_free(pf_dev->ctrl_area);
+-	nfp_cpp_area_free(pf_dev->qc_area);
+-	free(pf_dev->hwinfo);
+-	free(pf_dev->sym_tbl);
+-	nfp_cpp_free(pf_dev->cpp);
+-	rte_free(app_fw_nic);
+-	rte_free(pf_dev);
+-
++	/* Enable in nfp_net_start() */
+ 	rte_intr_disable(pci_dev->intr_handle);
+ 
+-	/* Unregister callback func from eal lib */
++	/* Register in nfp_net_init() */
+ 	rte_intr_callback_unregister(pci_dev->intr_handle,
+ 			nfp_net_dev_interrupt_handler, (void *)dev);
+ 
++	nfp_uninit_app_fw_nic(pf_dev);
++	nfp_pf_uninit(pf_dev);
++
+ 	return 0;
+ }
+ 
+@@ -576,28 +638,13 @@ nfp_net_init(struct rte_eth_dev *eth_dev)
+ 
+ 	rte_eth_copy_pci_info(eth_dev, pci_dev);
+ 
+-	if (port == 0 || pf_dev->multi_pf.enabled) {
+-		uint32_t min_size;
+-
++	if (pf_dev->multi_pf.enabled)
+ 		hw->ctrl_bar = pf_dev->ctrl_bar;
+-		min_size = NFP_MAC_STATS_SIZE * net_hw->pf_dev->nfp_eth_table->max_index;
+-		net_hw->mac_stats_bar = nfp_rtsym_map(net_hw->pf_dev->sym_tbl, "_mac_stats",
+-				min_size, &net_hw->mac_stats_area);
+-		if (net_hw->mac_stats_bar == NULL) {
+-			PMD_INIT_LOG(ERR, "nfp_rtsym_map fails for _mac_stats_bar");
+-			return -EIO;
+-		}
+-
+-		net_hw->mac_stats = net_hw->mac_stats_bar;
+-	} else {
+-		if (pf_dev->ctrl_bar == NULL)
+-			return -ENODEV;
+-
+-		/* Use port offset in pf ctrl_bar for this ports control bar */
++	else
+ 		hw->ctrl_bar = pf_dev->ctrl_bar + (port * NFP_NET_CFG_BAR_SZ);
+-		net_hw->mac_stats = app_fw_nic->ports[0]->mac_stats_bar +
++
++	net_hw->mac_stats = pf_dev->mac_stats_bar +
+ 				(net_hw->nfp_idx * NFP_MAC_STATS_SIZE);
+-	}
+ 
+ 	PMD_INIT_LOG(DEBUG, "ctrl bar: %p", hw->ctrl_bar);
+ 	PMD_INIT_LOG(DEBUG, "MAC stats: %p", net_hw->mac_stats);
+@@ -625,7 +672,8 @@ nfp_net_init(struct rte_eth_dev *eth_dev)
+ 	if (net_hw->eth_xstats_base == NULL) {
+ 		PMD_INIT_LOG(ERR, "no memory for xstats base values on device %s!",
+ 				pci_dev->device.name);
+-		return -ENOMEM;
++		err = -ENOMEM;
++		goto ipsec_exit;
+ 	}
+ 
+ 	/* Work out where in the BAR the queues start. */
+@@ -655,7 +703,8 @@ nfp_net_init(struct rte_eth_dev *eth_dev)
+ 	eth_dev->data->mac_addrs = rte_zmalloc("mac_addr", RTE_ETHER_ADDR_LEN, 0);
+ 	if (eth_dev->data->mac_addrs == NULL) {
+ 		PMD_INIT_LOG(ERR, "Failed to space for MAC address");
+-		return -ENOMEM;
++		err = -ENOMEM;
++		goto xstats_free;
+ 	}
+ 
+ 	nfp_net_pf_read_mac(app_fw_nic, port);
+@@ -693,6 +742,13 @@ nfp_net_init(struct rte_eth_dev *eth_dev)
+ 	nfp_net_stats_reset(eth_dev);
+ 
+ 	return 0;
++
++xstats_free:
++	rte_free(net_hw->eth_xstats_base);
++ipsec_exit:
++	nfp_ipsec_uninit(eth_dev);
++
++	return err;
+ }
+ 
+ #define DEFAULT_FW_PATH       "/lib/firmware/netronome"
+@@ -1120,26 +1176,46 @@ port_cleanup:
+ 				app_fw_nic->ports[id]->eth_dev != NULL) {
+ 			struct rte_eth_dev *tmp_dev;
+ 			tmp_dev = app_fw_nic->ports[id]->eth_dev;
+-			nfp_ipsec_uninit(tmp_dev);
++			nfp_net_uninit(tmp_dev);
+ 			rte_eth_dev_release_port(tmp_dev);
+-			app_fw_nic->ports[id] = NULL;
+ 		}
+ 	}
+-	nfp_cpp_area_free(pf_dev->ctrl_area);
++	nfp_cpp_area_release_free(pf_dev->ctrl_area);
+ app_cleanup:
+ 	rte_free(app_fw_nic);
+ 
+ 	return ret;
+ }
+ 
++/* Force the physical port down to clear the possible DMA error */
+ static int
+-nfp_pf_init(struct rte_pci_device *pci_dev)
++nfp_net_force_port_down(struct nfp_pf_dev *pf_dev,
++		struct nfp_eth_table *nfp_eth_table,
++		struct nfp_cpp *cpp)
+ {
++	int ret;
+ 	uint32_t i;
+ 	uint32_t id;
++	uint32_t index;
++	uint32_t count;
++
++	count = nfp_net_get_port_num(pf_dev, nfp_eth_table);
++	for (i = 0; i < count; i++) {
++		id = nfp_function_id_get(pf_dev, i);
++		index = nfp_eth_table->ports[id].index;
++		ret = nfp_eth_set_configured(cpp, index, 0);
++		if (ret < 0)
++			return ret;
++	}
++
++	return 0;
++}
++
++static int
++nfp_pf_init(struct rte_pci_device *pci_dev)
++{
+ 	int ret = 0;
+ 	uint64_t addr;
+-	uint32_t index;
+ 	uint32_t cpp_id;
+ 	uint8_t function_id;
+ 	struct nfp_cpp *cpp;
+@@ -1211,11 +1287,11 @@ nfp_pf_init(struct rte_pci_device *pci_dev)
+ 	pf_dev->multi_pf.enabled = nfp_check_multi_pf_from_nsp(pci_dev, cpp);
+ 	pf_dev->multi_pf.function_id = function_id;
+ 
+-	/* Force the physical port down to clear the possible DMA error */
+-	for (i = 0; i < nfp_eth_table->count; i++) {
+-		id = nfp_function_id_get(pf_dev, i);
+-		index = nfp_eth_table->ports[id].index;
+-		nfp_eth_set_configured(cpp, index, 0);
++	ret = nfp_net_force_port_down(pf_dev, nfp_eth_table, cpp);
++	if (ret != 0) {
++		PMD_INIT_LOG(ERR, "Failed to force port down");
++		ret = -EIO;
++		goto eth_table_cleanup;
+ 	}
+ 
+ 	if (nfp_fw_setup(pci_dev, cpp, nfp_eth_table, hwinfo,
+@@ -1264,6 +1340,14 @@ nfp_pf_init(struct rte_pci_device *pci_dev)
+ 
+ 	PMD_INIT_LOG(DEBUG, "qc_bar address: %p", pf_dev->qc_bar);
+ 
++	pf_dev->mac_stats_bar = nfp_rtsym_map(sym_tbl, "_mac_stats",
++			NFP_MAC_STATS_SIZE * nfp_eth_table->max_index,
++			&pf_dev->mac_stats_area);
++	if (pf_dev->mac_stats_bar == NULL) {
++		PMD_INIT_LOG(ERR, "nfp_rtsym_map fails for _mac_stats");
++		goto hwqueues_cleanup;
++	}
++
+ 	/*
+ 	 * PF initialization has been done at this point. Call app specific
+ 	 * init code now.
+@@ -1273,14 +1357,14 @@ nfp_pf_init(struct rte_pci_device *pci_dev)
+ 		if (pf_dev->multi_pf.enabled) {
+ 			ret = nfp_enable_multi_pf(pf_dev);
+ 			if (ret != 0)
+-				goto hwqueues_cleanup;
++				goto mac_stats_cleanup;
+ 		}
+ 
+ 		PMD_INIT_LOG(INFO, "Initializing coreNIC");
+ 		ret = nfp_init_app_fw_nic(pf_dev, dev_info);
+ 		if (ret != 0) {
+ 			PMD_INIT_LOG(ERR, "Could not initialize coreNIC!");
+-			goto hwqueues_cleanup;
++			goto mac_stats_cleanup;
+ 		}
+ 		break;
+ 	case NFP_APP_FW_FLOWER_NIC:
+@@ -1288,13 +1372,13 @@ nfp_pf_init(struct rte_pci_device *pci_dev)
+ 		ret = nfp_init_app_fw_flower(pf_dev, dev_info);
+ 		if (ret != 0) {
+ 			PMD_INIT_LOG(ERR, "Could not initialize Flower!");
+-			goto hwqueues_cleanup;
++			goto mac_stats_cleanup;
+ 		}
+ 		break;
+ 	default:
+ 		PMD_INIT_LOG(ERR, "Unsupported Firmware loaded");
+ 		ret = -EINVAL;
+-		goto hwqueues_cleanup;
++		goto mac_stats_cleanup;
+ 	}
+ 
+ 	/* Register the CPP bridge service here for primary use */
+@@ -1304,13 +1388,18 @@ nfp_pf_init(struct rte_pci_device *pci_dev)
+ 
+ 	return 0;
+ 
++mac_stats_cleanup:
++	nfp_cpp_area_release_free(pf_dev->mac_stats_area);
+ hwqueues_cleanup:
+-	nfp_cpp_area_free(pf_dev->qc_area);
++	nfp_cpp_area_release_free(pf_dev->qc_area);
+ sym_tbl_cleanup:
+ 	free(sym_tbl);
+ fw_cleanup:
+ 	nfp_fw_unload(cpp);
+-	nfp_net_keepalive_stop(&pf_dev->multi_pf);
++	if (pf_dev->multi_pf.enabled) {
++		nfp_net_keepalive_stop(&pf_dev->multi_pf);
++		nfp_net_keepalive_uninit(&pf_dev->multi_pf);
++	}
+ eth_table_cleanup:
+ 	free(nfp_eth_table);
+ hwinfo_cleanup:
+@@ -1437,7 +1526,7 @@ nfp_pf_secondary_init(struct rte_pci_device *pci_dev)
+ 	if (sym_tbl == NULL) {
+ 		PMD_INIT_LOG(ERR, "Something is wrong with the firmware symbol table");
+ 		ret = -EIO;
+-		goto pf_cleanup;
++		goto cpp_cleanup;
+ 	}
+ 
+ 	/* Read the app ID of the firmware loaded */
+@@ -1484,6 +1573,8 @@ nfp_pf_secondary_init(struct rte_pci_device *pci_dev)
+ 
+ sym_tbl_cleanup:
+ 	free(sym_tbl);
++cpp_cleanup:
++	nfp_cpp_free(cpp);
+ pf_cleanup:
+ 	rte_free(pf_dev);
+ 
+diff --git a/dpdk/drivers/net/nfp/nfp_ethdev_vf.c b/dpdk/drivers/net/nfp/nfp_ethdev_vf.c
+index 7927f53403..cfe7225ca5 100644
+--- a/dpdk/drivers/net/nfp/nfp_ethdev_vf.c
++++ b/dpdk/drivers/net/nfp/nfp_ethdev_vf.c
+@@ -160,13 +160,17 @@ nfp_netvf_set_link_down(struct rte_eth_dev *dev __rte_unused)
+ static int
+ nfp_netvf_close(struct rte_eth_dev *dev)
+ {
++	struct nfp_net_hw *net_hw;
+ 	struct rte_pci_device *pci_dev;
+ 
+ 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ 		return 0;
+ 
++	net_hw = dev->data->dev_private;
+ 	pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ 
++	rte_free(net_hw->eth_xstats_base);
++
+ 	/*
+ 	 * We assume that the DPDK application is stopping all the
+ 	 * threads/queues before calling the device close function.
+@@ -284,8 +288,6 @@ nfp_netvf_init(struct rte_eth_dev *eth_dev)
+ 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ 		return 0;
+ 
+-	rte_eth_copy_pci_info(eth_dev, pci_dev);
+-
+ 	net_hw->eth_xstats_base = rte_malloc("rte_eth_xstat",
+ 			sizeof(struct rte_eth_xstat) * nfp_net_xstats_size(eth_dev), 0);
+ 	if (net_hw->eth_xstats_base == NULL) {
+@@ -323,7 +325,7 @@ nfp_netvf_init(struct rte_eth_dev *eth_dev)
+ 	if (eth_dev->data->mac_addrs == NULL) {
+ 		PMD_INIT_LOG(ERR, "Failed to space for MAC address");
+ 		err = -ENOMEM;
+-		goto dev_err_ctrl_map;
++		goto free_xstats;
+ 	}
+ 
+ 	nfp_read_mac(hw);
+@@ -360,8 +362,8 @@ nfp_netvf_init(struct rte_eth_dev *eth_dev)
+ 
+ 	return 0;
+ 
+-dev_err_ctrl_map:
+-		nfp_cpp_area_free(net_hw->ctrl_area);
++free_xstats:
++	rte_free(net_hw->eth_xstats_base);
+ 
+ 	return err;
+ }
+diff --git a/dpdk/drivers/net/nfp/nfp_flow.c b/dpdk/drivers/net/nfp/nfp_flow.c
+index f832b52d89..91ebee5db4 100644
+--- a/dpdk/drivers/net/nfp/nfp_flow.c
++++ b/dpdk/drivers/net/nfp/nfp_flow.c
+@@ -312,14 +312,14 @@ nfp_check_mask_add(struct nfp_flow_priv *priv,
+ 		ret = nfp_mask_table_add(priv, mask_data, mask_len, mask_id);
+ 		if (ret != 0)
+ 			return false;
++
++		*meta_flags |= NFP_FL_META_FLAG_MANAGE_MASK;
+ 	} else {
+ 		/* Mask entry already exist */
+ 		mask_entry->ref_cnt++;
+ 		*mask_id = mask_entry->mask_id;
+ 	}
+ 
+-	*meta_flags |= NFP_FL_META_FLAG_MANAGE_MASK;
+-
+ 	return true;
+ }
+ 
+@@ -3658,7 +3658,7 @@ nfp_flow_compile_action(struct nfp_flower_representor *representor,
+ 					ttl_tos_flag = true;
+ 				}
+ 			} else {
+-				nfp_flow_action_set_hl(position, action, ttl_tos_flag);
++				nfp_flow_action_set_hl(position, action, tc_hl_flag);
+ 				if (!tc_hl_flag) {
+ 					position += sizeof(struct nfp_fl_act_set_ipv6_tc_hl_fl);
+ 					tc_hl_flag = true;
+@@ -3675,7 +3675,7 @@ nfp_flow_compile_action(struct nfp_flower_representor *representor,
+ 			break;
+ 		case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP:
+ 			PMD_DRV_LOG(DEBUG, "Process RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP");
+-			nfp_flow_action_set_tc(position, action, ttl_tos_flag);
++			nfp_flow_action_set_tc(position, action, tc_hl_flag);
+ 			if (!tc_hl_flag) {
+ 				position += sizeof(struct nfp_fl_act_set_ipv6_tc_hl_fl);
+ 				tc_hl_flag = true;
+@@ -3741,6 +3741,11 @@ nfp_flow_compile_action(struct nfp_flower_representor *representor,
+ 		total_actions++;
+ 	}
+ 
++	if (nfp_flow->install_flag && total_actions == 0) {
++		PMD_DRV_LOG(ERR, "The action list is empty");
++		return -ENOTSUP;
++	}
++
+ 	if (drop_flag)
+ 		nfp_flow_meta->shortcut = rte_cpu_to_be_32(NFP_FL_SC_ACT_DROP);
+ 	else if (total_actions > 1)
+diff --git a/dpdk/drivers/net/nfp/nfp_ipsec.c b/dpdk/drivers/net/nfp/nfp_ipsec.c
+index 452947380e..b10cda570b 100644
+--- a/dpdk/drivers/net/nfp/nfp_ipsec.c
++++ b/dpdk/drivers/net/nfp/nfp_ipsec.c
+@@ -18,6 +18,7 @@
+ #include "nfp_rxtx.h"
+ 
+ #define NFP_UDP_ESP_PORT            4500
++#define NFP_ESP_IV_LENGTH           8
+ 
+ static const struct rte_cryptodev_capabilities nfp_crypto_caps[] = {
+ 	{
+@@ -521,10 +522,14 @@ nfp_aesgcm_iv_update(struct ipsec_add_sa *cfg,
+ 	char *save;
+ 	char *iv_b;
+ 	char *iv_str;
+-	uint8_t *cfg_iv;
++	const rte_be32_t *iv_value;
++	uint8_t cfg_iv[NFP_ESP_IV_LENGTH] = {};
+ 
+ 	iv_str = strdup(iv_string);
+-	cfg_iv = (uint8_t *)cfg->aesgcm_fields.iv;
++	if (iv_str == NULL) {
++		PMD_DRV_LOG(ERR, "Failed to strdup iv_string");
++		return;
++	}
+ 
+ 	for (i = 0; i < iv_len; i++) {
+ 		iv_b = strtok_r(i ? NULL : iv_str, ",", &save);
+@@ -534,8 +539,9 @@ nfp_aesgcm_iv_update(struct ipsec_add_sa *cfg,
+ 		cfg_iv[i] = strtoul(iv_b, NULL, 0);
+ 	}
+ 
+-	*(uint32_t *)cfg_iv = rte_be_to_cpu_32(*(uint32_t *)cfg_iv);
+-	*(uint32_t *)&cfg_iv[4] = rte_be_to_cpu_32(*(uint32_t *)&cfg_iv[4]);
++	iv_value = (const rte_be32_t *)(cfg_iv);
++	cfg->aesgcm_fields.iv[0] = rte_be_to_cpu_32(iv_value[0]);
++	cfg->aesgcm_fields.iv[1] = rte_be_to_cpu_32(iv_value[1]);
+ 
+ 	free(iv_str);
+ }
+@@ -576,7 +582,7 @@ nfp_aead_map(struct rte_eth_dev *eth_dev,
+ 	uint32_t offset;
+ 	uint32_t device_id;
+ 	const char *iv_str;
+-	const uint32_t *key;
++	const rte_be32_t *key;
+ 	struct nfp_net_hw *net_hw;
+ 
+ 	net_hw = eth_dev->data->dev_private;
+@@ -626,7 +632,7 @@ nfp_aead_map(struct rte_eth_dev *eth_dev,
+ 		return -EINVAL;
+ 	}
+ 
+-	key = (const uint32_t *)(aead->key.data);
++	key = (const rte_be32_t *)(aead->key.data);
+ 
+ 	/*
+ 	 * The CHACHA20's key order needs to be adjusted based on hardware design.
+@@ -638,16 +644,22 @@ nfp_aead_map(struct rte_eth_dev *eth_dev,
+ 
+ 	for (i = 0; i < key_length / sizeof(cfg->cipher_key[0]); i++) {
+ 		index = (i + offset) % (key_length / sizeof(cfg->cipher_key[0]));
+-		cfg->cipher_key[index] = rte_cpu_to_be_32(*key++);
++		cfg->cipher_key[index] = rte_be_to_cpu_32(key[i]);
+ 	}
+ 
+ 	/*
+-	 * The iv of the FW is equal to ESN by default. Reading the
+-	 * iv of the configuration information is not supported.
++	 * The iv of the FW is equal to ESN by default. Only the
++	 * aead algorithm can offload the iv of configuration and
++	 * the length of iv cannot be greater than NFP_ESP_IV_LENGTH.
+ 	 */
+ 	iv_str = getenv("ETH_SEC_IV_OVR");
+ 	if (iv_str != NULL) {
+ 		iv_len = aead->iv.length;
++		if (iv_len > NFP_ESP_IV_LENGTH) {
++			PMD_DRV_LOG(ERR, "Unsupported length of iv data");
++			return -EINVAL;
++		}
++
+ 		nfp_aesgcm_iv_update(cfg, iv_len, iv_str);
+ 	}
+ 
+@@ -664,7 +676,7 @@ nfp_cipher_map(struct rte_eth_dev *eth_dev,
+ 	int ret;
+ 	uint32_t i;
+ 	uint32_t device_id;
+-	const uint32_t *key;
++	const rte_be32_t *key;
+ 	struct nfp_net_hw *net_hw;
+ 
+ 	net_hw = eth_dev->data->dev_private;
+@@ -698,14 +710,14 @@ nfp_cipher_map(struct rte_eth_dev *eth_dev,
+ 		return -EINVAL;
+ 	}
+ 
+-	key = (const uint32_t  *)(cipher->key.data);
++	key = (const rte_be32_t *)(cipher->key.data);
+ 	if (key_length > sizeof(cfg->cipher_key)) {
+ 		PMD_DRV_LOG(ERR, "Insufficient space for offloaded key");
+ 		return -EINVAL;
+ 	}
+ 
+ 	for (i = 0; i < key_length / sizeof(cfg->cipher_key[0]); i++)
+-		cfg->cipher_key[i] = rte_cpu_to_be_32(*key++);
++		cfg->cipher_key[i] = rte_be_to_cpu_32(key[i]);
+ 
+ 	return 0;
+ }
+@@ -800,7 +812,7 @@ nfp_auth_map(struct rte_eth_dev *eth_dev,
+ 	uint32_t i;
+ 	uint8_t key_length;
+ 	uint32_t device_id;
+-	const uint32_t *key;
++	const rte_be32_t *key;
+ 	struct nfp_net_hw *net_hw;
+ 
+ 	if (digest_length == 0) {
+@@ -847,7 +859,7 @@ nfp_auth_map(struct rte_eth_dev *eth_dev,
+ 		return -EINVAL;
+ 	}
+ 
+-	key = (const uint32_t *)(auth->key.data);
++	key = (const rte_be32_t *)(auth->key.data);
+ 	key_length = auth->key.length;
+ 	if (key_length > sizeof(cfg->auth_key)) {
+ 		PMD_DRV_LOG(ERR, "Insufficient space for offloaded auth key!");
+@@ -855,7 +867,7 @@ nfp_auth_map(struct rte_eth_dev *eth_dev,
+ 	}
+ 
+ 	for (i = 0; i < key_length / sizeof(cfg->auth_key[0]); i++)
+-		cfg->auth_key[i] = rte_cpu_to_be_32(*key++);
++		cfg->auth_key[i] = rte_be_to_cpu_32(key[i]);
+ 
+ 	return 0;
+ }
+@@ -895,7 +907,7 @@ nfp_crypto_msg_build(struct rte_eth_dev *eth_dev,
+ 			return ret;
+ 		}
+ 
+-		cfg->aesgcm_fields.salt = rte_cpu_to_be_32(conf->ipsec.salt);
++		cfg->aesgcm_fields.salt = conf->ipsec.salt;
+ 		break;
+ 	case RTE_CRYPTO_SYM_XFORM_AUTH:
+ 		/* Only support Auth + Cipher for inbound */
+@@ -960,7 +972,10 @@ nfp_ipsec_msg_build(struct rte_eth_dev *eth_dev,
+ 		struct rte_security_session_conf *conf,
+ 		struct nfp_ipsec_msg *msg)
+ {
++	int i;
+ 	int ret;
++	rte_be32_t *src_ip;
++	rte_be32_t *dst_ip;
+ 	struct ipsec_add_sa *cfg;
+ 	enum rte_security_ipsec_tunnel_type type;
+ 
+@@ -1018,12 +1033,18 @@ nfp_ipsec_msg_build(struct rte_eth_dev *eth_dev,
+ 		type = conf->ipsec.tunnel.type;
+ 		cfg->ctrl_word.mode = NFP_IPSEC_MODE_TUNNEL;
+ 		if (type == RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
+-			cfg->src_ip.v4 = conf->ipsec.tunnel.ipv4.src_ip;
+-			cfg->dst_ip.v4 = conf->ipsec.tunnel.ipv4.dst_ip;
++			src_ip = (rte_be32_t *)&conf->ipsec.tunnel.ipv4.src_ip.s_addr;
++			dst_ip = (rte_be32_t *)&conf->ipsec.tunnel.ipv4.dst_ip.s_addr;
++			cfg->src_ip[0] = rte_be_to_cpu_32(src_ip[0]);
++			cfg->dst_ip[0] = rte_be_to_cpu_32(dst_ip[0]);
+ 			cfg->ipv6 = 0;
+ 		} else if (type == RTE_SECURITY_IPSEC_TUNNEL_IPV6) {
+-			cfg->src_ip.v6 = conf->ipsec.tunnel.ipv6.src_addr;
+-			cfg->dst_ip.v6 = conf->ipsec.tunnel.ipv6.dst_addr;
++			src_ip = (rte_be32_t *)conf->ipsec.tunnel.ipv6.src_addr.s6_addr;
++			dst_ip = (rte_be32_t *)conf->ipsec.tunnel.ipv6.dst_addr.s6_addr;
++			for (i = 0; i < 4; i++) {
++				cfg->src_ip[i] = rte_be_to_cpu_32(src_ip[i]);
++				cfg->dst_ip[i] = rte_be_to_cpu_32(dst_ip[i]);
++			}
+ 			cfg->ipv6 = 1;
+ 		} else {
+ 			PMD_DRV_LOG(ERR, "Unsupported address family!");
+@@ -1036,9 +1057,11 @@ nfp_ipsec_msg_build(struct rte_eth_dev *eth_dev,
+ 		cfg->ctrl_word.mode = NFP_IPSEC_MODE_TRANSPORT;
+ 		if (type == RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
+ 			memset(&cfg->src_ip, 0, sizeof(cfg->src_ip));
++			memset(&cfg->dst_ip, 0, sizeof(cfg->dst_ip));
+ 			cfg->ipv6 = 0;
+ 		} else if (type == RTE_SECURITY_IPSEC_TUNNEL_IPV6) {
+ 			memset(&cfg->src_ip, 0, sizeof(cfg->src_ip));
++			memset(&cfg->dst_ip, 0, sizeof(cfg->dst_ip));
+ 			cfg->ipv6 = 1;
+ 		} else {
+ 			PMD_DRV_LOG(ERR, "Unsupported address family!");
+@@ -1172,18 +1195,18 @@ nfp_security_set_pkt_metadata(void *device,
+ 		desc_md = RTE_MBUF_DYNFIELD(m, offset, struct nfp_tx_ipsec_desc_msg *);
+ 
+ 		if (priv_session->msg.ctrl_word.ext_seq != 0 && sqn != NULL) {
+-			desc_md->esn.low = rte_cpu_to_be_32(*sqn);
+-			desc_md->esn.hi = rte_cpu_to_be_32(*sqn >> 32);
++			desc_md->esn.low = (uint32_t)*sqn;
++			desc_md->esn.hi = (uint32_t)(*sqn >> 32);
+ 		} else if (priv_session->msg.ctrl_word.ext_seq != 0) {
+-			desc_md->esn.low = rte_cpu_to_be_32(priv_session->ipsec.esn.low);
+-			desc_md->esn.hi = rte_cpu_to_be_32(priv_session->ipsec.esn.hi);
++			desc_md->esn.low = priv_session->ipsec.esn.low;
++			desc_md->esn.hi = priv_session->ipsec.esn.hi;
+ 		} else {
+-			desc_md->esn.low = rte_cpu_to_be_32(priv_session->ipsec.esn.value);
++			desc_md->esn.low = priv_session->ipsec.esn.low;
+ 			desc_md->esn.hi = 0;
+ 		}
+ 
+ 		desc_md->enc = 1;
+-		desc_md->sa_idx = rte_cpu_to_be_32(priv_session->sa_index);
++		desc_md->sa_idx = priv_session->sa_index;
+ 	}
+ 
+ 	return 0;
+diff --git a/dpdk/drivers/net/nfp/nfp_ipsec.h b/dpdk/drivers/net/nfp/nfp_ipsec.h
+index d7a729398a..f7c4f3f225 100644
+--- a/dpdk/drivers/net/nfp/nfp_ipsec.h
++++ b/dpdk/drivers/net/nfp/nfp_ipsec.h
+@@ -36,11 +36,6 @@ struct sa_ctrl_word {
+ 	uint32_t spare2 :1;      /**< Must be set to 0 */
+ };
+ 
+-union nfp_ip_addr {
+-	struct in6_addr v6;
+-	struct in_addr v4;
+-};
+-
+ struct ipsec_add_sa {
+ 	uint32_t cipher_key[8];           /**< Cipher Key */
+ 	union {
+@@ -60,8 +55,8 @@ struct ipsec_add_sa {
+ 	uint8_t spare1;
+ 	uint32_t soft_byte_cnt;           /**< Soft lifetime byte count */
+ 	uint32_t hard_byte_cnt;           /**< Hard lifetime byte count */
+-	union nfp_ip_addr src_ip;         /**< Src IP addr */
+-	union nfp_ip_addr dst_ip;         /**< Dst IP addr */
++	uint32_t src_ip[4];               /**< Src IP addr */
++	uint32_t dst_ip[4];               /**< Dst IP addr */
+ 	uint16_t natt_dst_port;           /**< NAT-T UDP Header dst port */
+ 	uint16_t natt_src_port;           /**< NAT-T UDP Header src port */
+ 	uint32_t soft_lifetime_limit;     /**< Soft lifetime time limit */
+diff --git a/dpdk/drivers/net/nfp/nfp_net_common.c b/dpdk/drivers/net/nfp/nfp_net_common.c
+index e969b840d6..0491912bd3 100644
+--- a/dpdk/drivers/net/nfp/nfp_net_common.c
++++ b/dpdk/drivers/net/nfp/nfp_net_common.c
+@@ -189,9 +189,6 @@ nfp_net_notify_port_speed(struct nfp_net_hw *hw,
+ 			nfp_net_link_speed_rte2nfp(link->link_speed));
+ }
+ 
+-/* The length of firmware version string */
+-#define FW_VER_LEN        32
+-
+ /**
+  * Reconfigure the firmware via the mailbox
+  *
+@@ -1299,6 +1296,7 @@ nfp_net_supported_ptypes_get(struct rte_eth_dev *dev)
+ 		RTE_PTYPE_INNER_L4_NONFRAG,
+ 		RTE_PTYPE_INNER_L4_ICMP,
+ 		RTE_PTYPE_INNER_L4_SCTP,
++		RTE_PTYPE_UNKNOWN
+ 	};
+ 
+ 	if (dev->rx_pkt_burst != nfp_net_recv_pkts)
+@@ -2062,17 +2060,22 @@ nfp_net_firmware_version_get(struct rte_eth_dev *dev,
+ 		size_t fw_size)
+ {
+ 	struct nfp_net_hw *hw;
+-	char mip_name[FW_VER_LEN];
+-	char app_name[FW_VER_LEN];
+-	char nsp_version[FW_VER_LEN];
+-	char vnic_version[FW_VER_LEN];
++	char app_name[FW_VER_LEN] = {0};
++	char mip_name[FW_VER_LEN] = {0};
++	char nsp_version[FW_VER_LEN] = {0};
++	char vnic_version[FW_VER_LEN] = {0};
+ 
+ 	if (fw_size < FW_VER_LEN)
+ 		return FW_VER_LEN;
+ 
+ 	hw = nfp_net_get_hw(dev);
+ 
+-	if ((dev->data->dev_flags & RTE_ETH_DEV_REPRESENTOR) != 0) {
++	if (hw->fw_version[0] != 0) {
++		snprintf(fw_version, FW_VER_LEN, "%s", hw->fw_version);
++		return 0;
++	}
++
++	if ((dev->data->dev_flags & RTE_ETH_DEV_REPRESENTOR) == 0) {
+ 		snprintf(vnic_version, FW_VER_LEN, "%d.%d.%d.%d",
+ 			hw->ver.extend, hw->ver.class,
+ 			hw->ver.major, hw->ver.minor);
+@@ -2084,8 +2087,16 @@ nfp_net_firmware_version_get(struct rte_eth_dev *dev,
+ 	nfp_net_get_mip_name(hw, mip_name);
+ 	nfp_net_get_app_name(hw, app_name);
+ 
+-	snprintf(fw_version, FW_VER_LEN, "%s %s %s %s",
++	if (nsp_version[0] == 0 || mip_name[0] == 0) {
++		snprintf(fw_version, FW_VER_LEN, "%s %s %s %s",
+ 			vnic_version, nsp_version, mip_name, app_name);
++		return 0;
++	}
++
++	snprintf(hw->fw_version, FW_VER_LEN, "%s %s %s %s",
++			vnic_version, nsp_version, mip_name, app_name);
++
++	snprintf(fw_version, FW_VER_LEN, "%s", hw->fw_version);
+ 
+ 	return 0;
+ }
+@@ -2249,3 +2260,13 @@ nfp_net_flow_ctrl_set(struct rte_eth_dev *dev,
+ 
+ 	return 0;
+ }
++
++uint32_t
++nfp_net_get_port_num(struct nfp_pf_dev *pf_dev,
++		struct nfp_eth_table *nfp_eth_table)
++{
++	if (pf_dev->multi_pf.enabled)
++		return 1;
++	else
++		return nfp_eth_table->count;
++}
+diff --git a/dpdk/drivers/net/nfp/nfp_net_common.h b/dpdk/drivers/net/nfp/nfp_net_common.h
+index 30fea7ae02..41d59bfa99 100644
+--- a/dpdk/drivers/net/nfp/nfp_net_common.h
++++ b/dpdk/drivers/net/nfp/nfp_net_common.h
+@@ -38,6 +38,9 @@
+ 
+ #define NFP_BEAT_LENGTH         8
+ 
++/* The length of firmware version string */
++#define FW_VER_LEN        32
++
+ /*
+  * Each PF has corresponding word to beat:
+  * Offset | Usage
+@@ -98,6 +101,9 @@ struct nfp_pf_dev {
+ 
+ 	uint8_t *qc_bar;
+ 
++	struct nfp_cpp_area *mac_stats_area;
++	uint8_t *mac_stats_bar;
++
+ 	struct nfp_hwinfo *hwinfo;
+ 	struct nfp_rtsym_table *sym_tbl;
+ 
+@@ -165,8 +171,6 @@ struct nfp_net_hw {
+ 
+ 	struct nfp_cpp *cpp;
+ 	struct nfp_cpp_area *ctrl_area;
+-	struct nfp_cpp_area *mac_stats_area;
+-	uint8_t *mac_stats_bar;
+ 	uint8_t *mac_stats;
+ 
+ 	/** Sequential physical port number, only valid for CoreNIC firmware */
+@@ -177,6 +181,9 @@ struct nfp_net_hw {
+ 	struct nfp_net_tlv_caps tlv_caps;
+ 
+ 	struct nfp_net_ipsec_data *ipsec_data;
++
++	/** Used for firmware version */
++	char fw_version[FW_VER_LEN];
+ };
+ 
+ static inline uint32_t
+@@ -272,6 +279,9 @@ int nfp_net_flow_ctrl_get(struct rte_eth_dev *dev,
+ 		struct rte_eth_fc_conf *fc_conf);
+ int nfp_net_flow_ctrl_set(struct rte_eth_dev *dev,
+ 		struct rte_eth_fc_conf *fc_conf);
++void nfp_pf_uninit(struct nfp_pf_dev *pf_dev);
++uint32_t nfp_net_get_port_num(struct nfp_pf_dev *pf_dev,
++		struct nfp_eth_table *nfp_eth_table);
+ 
+ #define NFP_PRIV_TO_APP_FW_NIC(app_fw_priv)\
+ 	((struct nfp_app_fw_nic *)app_fw_priv)
+diff --git a/dpdk/drivers/net/nfp/nfp_rxtx.c b/dpdk/drivers/net/nfp/nfp_rxtx.c
+index f21e120a43..8ca651ba55 100644
+--- a/dpdk/drivers/net/nfp/nfp_rxtx.c
++++ b/dpdk/drivers/net/nfp/nfp_rxtx.c
+@@ -747,15 +747,6 @@ nfp_net_recv_pkts(void *rx_queue,
+ 		/* Checking the checksum flag */
+ 		nfp_net_rx_cksum(rxq, rxds, mb);
+ 
+-		if (meta.port_id == 0) {
+-			rx_pkts[avail++] = mb;
+-		} else if (nfp_flower_pf_dispatch_pkts(hw, mb, meta.port_id)) {
+-			avail_multiplexed++;
+-		} else {
+-			rte_pktmbuf_free(mb);
+-			break;
+-		}
+-
+ 		/* Now resetting and updating the descriptor */
+ 		rxds->vals[0] = 0;
+ 		rxds->vals[1] = 0;
+@@ -768,6 +759,15 @@ nfp_net_recv_pkts(void *rx_queue,
+ 		rxq->rd_p++;
+ 		if (unlikely(rxq->rd_p == rxq->rx_count)) /* Wrapping */
+ 			rxq->rd_p = 0;
++
++		if (meta.port_id == 0) {
++			rx_pkts[avail++] = mb;
++		} else if (nfp_flower_pf_dispatch_pkts(hw, mb, meta.port_id)) {
++			avail_multiplexed++;
++		} else {
++			rte_pktmbuf_free(mb);
++			break;
++		}
+ 	}
+ 
+ 	if (nb_hold == 0)
+diff --git a/dpdk/drivers/net/nfp/nfpcore/nfp6000_pcie.c b/dpdk/drivers/net/nfp/nfpcore/nfp6000_pcie.c
+index a6fd89b6c8..ef1ffd6d01 100644
+--- a/dpdk/drivers/net/nfp/nfpcore/nfp6000_pcie.c
++++ b/dpdk/drivers/net/nfp/nfpcore/nfp6000_pcie.c
+@@ -263,19 +263,6 @@ nfp_bitsize_calc(uint64_t mask)
+ 	return bit_size;
+ }
+ 
+-static int
+-nfp_cmp_bars(const void *ptr_a,
+-		const void *ptr_b)
+-{
+-	const struct nfp_bar *a = ptr_a;
+-	const struct nfp_bar *b = ptr_b;
+-
+-	if (a->bitsize == b->bitsize)
+-		return a->index - b->index;
+-	else
+-		return a->bitsize - b->bitsize;
+-}
+-
+ static bool
+ nfp_bars_for_secondary(uint32_t index)
+ {
+@@ -383,9 +370,6 @@ nfp_enable_bars(struct nfp_pcie_user *nfp)
+ 	if (nfp_bar_write(nfp, bar, barcfg_msix_general) < 0)
+ 		return -EIO;
+ 
+-	/* Sort bars by bit size - use the smallest possible first. */
+-	qsort(&nfp->bar[0], nfp->bars, sizeof(nfp->bar[0]), nfp_cmp_bars);
+-
+ 	return 0;
+ }
+ 
+@@ -466,16 +450,18 @@ find_matching_bar(struct nfp_pcie_user *nfp,
+ 		int width)
+ {
+ 	uint32_t n;
++	uint32_t index;
+ 
+-	for (n = 0; n < nfp->bars; n++) {
+-		struct nfp_bar *bar = &nfp->bar[n];
++	for (n = RTE_DIM(nfp->bar) ; n > 0; n--) {
++		index = n - 1;
++		struct nfp_bar *bar = &nfp->bar[index];
+ 
+ 		if (bar->lock)
+ 			continue;
+ 
+ 		if (matching_bar_exist(bar, target, action, token,
+ 				offset, size, width))
+-			return n;
++			return index;
+ 	}
+ 
+ 	return -1;
+@@ -493,10 +479,12 @@ find_unused_bar_noblock(struct nfp_pcie_user *nfp,
+ {
+ 	int ret;
+ 	uint32_t n;
++	uint32_t index;
+ 	const struct nfp_bar *bar;
+ 
+-	for (n = 0; n < nfp->bars; n++) {
+-		bar = &nfp->bar[n];
++	for (n = RTE_DIM(nfp->bar); n > 0; n--) {
++		index = n - 1;
++		bar = &nfp->bar[index];
+ 
+ 		if (bar->bitsize == 0)
+ 			continue;
+@@ -508,7 +496,7 @@ find_unused_bar_noblock(struct nfp_pcie_user *nfp,
+ 			continue;
+ 
+ 		if (!bar->lock)
+-			return n;
++			return index;
+ 	}
+ 
+ 	return -EAGAIN;
+@@ -561,7 +549,7 @@ nfp_disable_bars(struct nfp_pcie_user *nfp)
+ 	uint32_t i;
+ 	struct nfp_bar *bar;
+ 
+-	for (i = 0; i < nfp->bars; i++) {
++	for (i = 0; i < RTE_DIM(nfp->bar); i++) {
+ 		bar = &nfp->bar[i];
+ 		if (bar->iomem != NULL) {
+ 			bar->iomem = NULL;
+diff --git a/dpdk/drivers/net/nfp/nfpcore/nfp_mutex.c b/dpdk/drivers/net/nfp/nfpcore/nfp_mutex.c
+index 3c10c7a090..edb78dfdc9 100644
+--- a/dpdk/drivers/net/nfp/nfpcore/nfp_mutex.c
++++ b/dpdk/drivers/net/nfp/nfpcore/nfp_mutex.c
+@@ -168,7 +168,7 @@ nfp_cpp_mutex_alloc(struct nfp_cpp *cpp,
+ 	if (tmp != key)
+ 		return NULL;
+ 
+-	mutex = calloc(sizeof(*mutex), 1);
++	mutex = calloc(1, sizeof(*mutex));
+ 	if (mutex == NULL)
+ 		return NULL;
+ 
+diff --git a/dpdk/drivers/net/ngbe/base/ngbe_devids.h b/dpdk/drivers/net/ngbe/base/ngbe_devids.h
+index 83eedf423e..e1efa62015 100644
+--- a/dpdk/drivers/net/ngbe/base/ngbe_devids.h
++++ b/dpdk/drivers/net/ngbe/base/ngbe_devids.h
+@@ -83,6 +83,7 @@
+ #define NGBE_YT8521S_SFP_GPIO			0x0062
+ #define NGBE_INTERNAL_YT8521S_SFP_GPIO		0x0064
+ #define NGBE_LY_YT8521S_SFP			0x0070
++#define NGBE_RGMII_FPGA				0x0080
+ #define NGBE_WOL_SUP				0x4000
+ #define NGBE_NCSI_SUP				0x8000
+ 
+diff --git a/dpdk/drivers/net/ngbe/base/ngbe_hw.c b/dpdk/drivers/net/ngbe/base/ngbe_hw.c
+index 22ccdb0b7d..4dced0d328 100644
+--- a/dpdk/drivers/net/ngbe/base/ngbe_hw.c
++++ b/dpdk/drivers/net/ngbe/base/ngbe_hw.c
+@@ -173,6 +173,9 @@ s32 ngbe_reset_hw_em(struct ngbe_hw *hw)
+ 	ngbe_reset_misc_em(hw);
+ 	hw->mac.clear_hw_cntrs(hw);
+ 
++	if (!((hw->sub_device_id & NGBE_OEM_MASK) == NGBE_RGMII_FPGA))
++		hw->phy.set_phy_power(hw, false);
++
+ 	msec_delay(50);
+ 
+ 	/* Store the permanent mac address */
+diff --git a/dpdk/drivers/net/ngbe/base/ngbe_phy_yt.c b/dpdk/drivers/net/ngbe/base/ngbe_phy_yt.c
+index ea313cd9a5..a374b015fd 100644
+--- a/dpdk/drivers/net/ngbe/base/ngbe_phy_yt.c
++++ b/dpdk/drivers/net/ngbe/base/ngbe_phy_yt.c
+@@ -320,6 +320,10 @@ skip_an_fiber:
+ 			value |= value_r4;
+ 			ngbe_write_phy_reg_mdi(hw, YT_ANA, 0, value);
+ 
++			/* config for yt8531sh-ca */
++			ngbe_write_phy_reg_ext_yt(hw, YT_SPEC_CONF, 0,
++						YT_SPEC_CONF_8531SH_CA);
++
+ 			/* software reset to make the above configuration
+ 			 * take effect
+ 			 */
+diff --git a/dpdk/drivers/net/ngbe/base/ngbe_phy_yt.h b/dpdk/drivers/net/ngbe/base/ngbe_phy_yt.h
+index ddf992e79a..c45bec7ce7 100644
+--- a/dpdk/drivers/net/ngbe/base/ngbe_phy_yt.h
++++ b/dpdk/drivers/net/ngbe/base/ngbe_phy_yt.h
+@@ -32,6 +32,8 @@
+ #define YT_MISC				0xA006
+ #define   YT_MISC_FIBER_PRIO		MS16(8, 0x1) /* 0 for UTP */
+ #define   YT_MISC_RESV			MS16(0, 0x1)
++#define YT_SPEC_CONF			0xA023
++#define   YT_SPEC_CONF_8531SH_CA	0x4031
+ 
+ /* SDS EXT */
+ #define YT_AUTO				0xA5
+diff --git a/dpdk/drivers/net/ngbe/ngbe_ethdev.c b/dpdk/drivers/net/ngbe/ngbe_ethdev.c
+index 478da014b2..fb86e7b10d 100644
+--- a/dpdk/drivers/net/ngbe/ngbe_ethdev.c
++++ b/dpdk/drivers/net/ngbe/ngbe_ethdev.c
+@@ -546,7 +546,7 @@ static int eth_ngbe_pci_remove(struct rte_pci_device *pci_dev)
+ 	if (ethdev == NULL)
+ 		return 0;
+ 
+-	return rte_eth_dev_destroy(ethdev, eth_ngbe_dev_uninit);
++	return rte_eth_dev_pci_generic_remove(pci_dev, eth_ngbe_dev_uninit);
+ }
+ 
+ static struct rte_pci_driver rte_ngbe_pmd = {
+@@ -1811,7 +1811,9 @@ ngbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
+ 	dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
+ 	dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
+ 	dev_info->min_rx_bufsize = 1024;
+-	dev_info->max_rx_pktlen = 15872;
++	dev_info->max_rx_pktlen = NGBE_MAX_MTU + NGBE_ETH_OVERHEAD;
++	dev_info->min_mtu = RTE_ETHER_MIN_MTU;
++	dev_info->max_mtu = NGBE_MAX_MTU;
+ 	dev_info->max_mac_addrs = hw->mac.num_rar_entries;
+ 	dev_info->max_hash_mac_addrs = NGBE_VMDQ_NUM_UC_MAC;
+ 	dev_info->max_vfs = pci_dev->max_vfs;
+diff --git a/dpdk/drivers/net/ngbe/ngbe_ethdev.h b/dpdk/drivers/net/ngbe/ngbe_ethdev.h
+index 3cde7c8750..9b43d5f20e 100644
+--- a/dpdk/drivers/net/ngbe/ngbe_ethdev.h
++++ b/dpdk/drivers/net/ngbe/ngbe_ethdev.h
+@@ -32,6 +32,7 @@
+ 
+ #define NGBE_QUEUE_ITR_INTERVAL_DEFAULT	500 /* 500us */
+ 
++#define NGBE_MAX_MTU		9414
+ /* The overhead from MTU to max frame size. */
+ #define NGBE_ETH_OVERHEAD (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN)
+ 
+diff --git a/dpdk/drivers/net/ngbe/ngbe_rxtx.c b/dpdk/drivers/net/ngbe/ngbe_rxtx.c
+index 8a873b858e..4680ff91f1 100644
+--- a/dpdk/drivers/net/ngbe/ngbe_rxtx.c
++++ b/dpdk/drivers/net/ngbe/ngbe_rxtx.c
+@@ -1791,6 +1791,7 @@ ngbe_tx_queue_release(struct ngbe_tx_queue *txq)
+ 		if (txq->ops != NULL) {
+ 			txq->ops->release_mbufs(txq);
+ 			txq->ops->free_swring(txq);
++			rte_memzone_free(txq->mz);
+ 		}
+ 		rte_free(txq);
+ 	}
+@@ -1995,6 +1996,7 @@ ngbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
+ 		return -ENOMEM;
+ 	}
+ 
++	txq->mz = tz;
+ 	txq->nb_tx_desc = nb_desc;
+ 	txq->tx_free_thresh = tx_free_thresh;
+ 	txq->pthresh = tx_conf->tx_thresh.pthresh;
+@@ -2097,6 +2099,7 @@ ngbe_rx_queue_release(struct ngbe_rx_queue *rxq)
+ 		ngbe_rx_queue_release_mbufs(rxq);
+ 		rte_free(rxq->sw_ring);
+ 		rte_free(rxq->sw_sc_ring);
++		rte_memzone_free(rxq->mz);
+ 		rte_free(rxq);
+ 	}
+ }
+@@ -2187,6 +2190,7 @@ ngbe_reset_rx_queue(struct ngbe_adapter *adapter, struct ngbe_rx_queue *rxq)
+ 	rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
+ 	rxq->rx_tail = 0;
+ 	rxq->nb_rx_hold = 0;
++	rte_pktmbuf_free(rxq->pkt_first_seg);
+ 	rxq->pkt_first_seg = NULL;
+ 	rxq->pkt_last_seg = NULL;
+ }
+@@ -2277,6 +2281,7 @@ ngbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
+ 		return -ENOMEM;
+ 	}
+ 
++	rxq->mz = rz;
+ 	/*
+ 	 * Zero init all the descriptors in the ring.
+ 	 */
+diff --git a/dpdk/drivers/net/ngbe/ngbe_rxtx.h b/dpdk/drivers/net/ngbe/ngbe_rxtx.h
+index 9130f9d0df..2914b9a756 100644
+--- a/dpdk/drivers/net/ngbe/ngbe_rxtx.h
++++ b/dpdk/drivers/net/ngbe/ngbe_rxtx.h
+@@ -276,6 +276,7 @@ struct ngbe_rx_queue {
+ 	struct rte_mbuf fake_mbuf;
+ 	/** hold packets to return to application */
+ 	struct rte_mbuf *rx_stage[RTE_PMD_NGBE_RX_MAX_BURST * 2];
++	const struct rte_memzone *mz;
+ };
+ 
+ /**
+@@ -353,6 +354,7 @@ struct ngbe_tx_queue {
+ 	uint8_t              tx_deferred_start; /**< not in global dev start */
+ 
+ 	const struct ngbe_txq_ops *ops;       /**< txq ops */
++	const struct rte_memzone *mz;
+ };
+ 
+ struct ngbe_txq_ops {
+diff --git a/dpdk/drivers/net/pfe/pfe_ethdev.c b/dpdk/drivers/net/pfe/pfe_ethdev.c
+index 551f3cf193..0073dd7405 100644
+--- a/dpdk/drivers/net/pfe/pfe_ethdev.c
++++ b/dpdk/drivers/net/pfe/pfe_ethdev.c
+@@ -520,7 +520,8 @@ pfe_supported_ptypes_get(struct rte_eth_dev *dev)
+ 		RTE_PTYPE_L3_IPV6_EXT,
+ 		RTE_PTYPE_L4_TCP,
+ 		RTE_PTYPE_L4_UDP,
+-		RTE_PTYPE_L4_SCTP
++		RTE_PTYPE_L4_SCTP,
++		RTE_PTYPE_UNKNOWN
+ 	};
+ 
+ 	if (dev->rx_pkt_burst == pfe_recv_pkts ||
+diff --git a/dpdk/drivers/net/sfc/sfc_mae.c b/dpdk/drivers/net/sfc/sfc_mae.c
+index e5ec0ae49d..60ff6d2181 100644
+--- a/dpdk/drivers/net/sfc/sfc_mae.c
++++ b/dpdk/drivers/net/sfc/sfc_mae.c
+@@ -1350,8 +1350,8 @@ sfc_mae_action_set_list_add(struct sfc_adapter *sa,
+ 
+ 	action_set_list->action_sets =
+ 		rte_calloc("sfc_mae_action_set_list_action_sets",
+-			   sizeof(struct sfc_mae_action_set *),
+-			   action_set_list->nb_action_sets, 0);
++			   action_set_list->nb_action_sets,
++			   sizeof(struct sfc_mae_action_set *), 0);
+ 	if (action_set_list->action_sets == NULL) {
+ 		sfc_err(sa, "failed to allocate action set list");
+ 		rte_free(action_set_list);
+diff --git a/dpdk/drivers/net/softnic/rte_eth_softnic_cli.c b/dpdk/drivers/net/softnic/rte_eth_softnic_cli.c
+index 085523fe03..95e705c553 100644
+--- a/dpdk/drivers/net/softnic/rte_eth_softnic_cli.c
++++ b/dpdk/drivers/net/softnic/rte_eth_softnic_cli.c
+@@ -337,6 +337,7 @@ cmd_softnic_pipeline_libbuild(struct pmd_internals *softnic __rte_unused,
+ 		 "-I %s/lib/eal/include "
+ 		 "-I %s/lib/eal/x86/include "
+ 		 "-I %s/lib/eal/include/generic "
++		 "-I %s/lib/log "
+ 		 "-I %s/lib/meter "
+ 		 "-I %s/lib/port "
+ 		 "-I %s/lib/table "
+@@ -361,6 +362,7 @@ cmd_softnic_pipeline_libbuild(struct pmd_internals *softnic __rte_unused,
+ 		 install_dir,
+ 		 install_dir,
+ 		 install_dir,
++		 install_dir,
+ 		 log_file,
+ 		 obj_file,
+ 		 lib_file,
+diff --git a/dpdk/drivers/net/tap/rte_eth_tap.c b/dpdk/drivers/net/tap/rte_eth_tap.c
+index b41fa971cb..3fa03cdbee 100644
+--- a/dpdk/drivers/net/tap/rte_eth_tap.c
++++ b/dpdk/drivers/net/tap/rte_eth_tap.c
+@@ -1803,6 +1803,7 @@ tap_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused)
+ 		RTE_PTYPE_L4_UDP,
+ 		RTE_PTYPE_L4_TCP,
+ 		RTE_PTYPE_L4_SCTP,
++		RTE_PTYPE_UNKNOWN
+ 	};
+ 
+ 	return ptypes;
+diff --git a/dpdk/drivers/net/tap/tap_flow.c b/dpdk/drivers/net/tap/tap_flow.c
+index ed4d42f92f..79cd6a12ca 100644
+--- a/dpdk/drivers/net/tap/tap_flow.c
++++ b/dpdk/drivers/net/tap/tap_flow.c
+@@ -11,6 +11,7 @@
+ 
+ #include <rte_byteorder.h>
+ #include <rte_jhash.h>
++#include <rte_random.h>
+ #include <rte_malloc.h>
+ #include <rte_eth_tap.h>
+ #include <tap_flow.h>
+@@ -1082,8 +1083,11 @@ priv_flow_process(struct pmd_internals *pmd,
+ 		}
+ 		/* use flower filter type */
+ 		tap_nlattr_add(&flow->msg.nh, TCA_KIND, sizeof("flower"), "flower");
+-		if (tap_nlattr_nested_start(&flow->msg, TCA_OPTIONS) < 0)
+-			goto exit_item_not_supported;
++		if (tap_nlattr_nested_start(&flow->msg, TCA_OPTIONS) < 0) {
++			rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_ACTION,
++					   actions, "could not allocated netlink msg");
++			goto exit_return_error;
++		}
+ 	}
+ 	for (; items->type != RTE_FLOW_ITEM_TYPE_END; ++items) {
+ 		const struct tap_flow_items *token = NULL;
+@@ -1199,9 +1203,12 @@ actions:
+ 			if (action)
+ 				goto exit_action_not_supported;
+ 			action = 1;
+-			if (!queue ||
+-			    (queue->index > pmd->dev->data->nb_rx_queues - 1))
+-				goto exit_action_not_supported;
++			if (queue->index >= pmd->dev->data->nb_rx_queues) {
++				rte_flow_error_set(error, ERANGE,
++						   RTE_FLOW_ERROR_TYPE_ACTION, actions,
++						   "queue index out of range");
++				goto exit_return_error;
++			}
+ 			if (flow) {
+ 				struct action_data adata = {
+ 					.id = "skbedit",
+@@ -1227,7 +1234,7 @@ actions:
+ 			if (!pmd->rss_enabled) {
+ 				err = rss_enable(pmd, attr, error);
+ 				if (err)
+-					goto exit_action_not_supported;
++					goto exit_return_error;
+ 			}
+ 			if (flow)
+ 				err = rss_add_actions(flow, pmd, rss, error);
+@@ -1235,7 +1242,7 @@ actions:
+ 			goto exit_action_not_supported;
+ 		}
+ 		if (err)
+-			goto exit_action_not_supported;
++			goto exit_return_error;
+ 	}
+ 	/* When fate is unknown, drop traffic. */
+ 	if (!action) {
+@@ -1258,6 +1265,7 @@ exit_item_not_supported:
+ exit_action_not_supported:
+ 	rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
+ 			   actions, "action not supported");
++exit_return_error:
+ 	return -rte_errno;
+ }
+ 
+@@ -1290,9 +1298,7 @@ tap_flow_validate(struct rte_eth_dev *dev,
+  * In those rules, the handle (uint32_t) is the part that would identify
+  * specifically each rule.
+  *
+- * On 32-bit architectures, the handle can simply be the flow's pointer address.
+- * On 64-bit architectures, we rely on jhash(flow) to find a (sufficiently)
+- * unique handle.
++ * Use jhash of the flow pointer to make a unique handle.
+  *
+  * @param[in, out] flow
+  *   The flow that needs its handle set.
+@@ -1302,16 +1308,18 @@ tap_flow_set_handle(struct rte_flow *flow)
+ {
+ 	union {
+ 		struct rte_flow *flow;
+-		const void *key;
+-	} tmp;
+-	uint32_t handle = 0;
++		uint32_t words[sizeof(flow) / sizeof(uint32_t)];
++	} tmp = {
++		.flow = flow,
++	};
++	uint32_t handle;
++	static uint64_t hash_seed;
+ 
+-	tmp.flow = flow;
++	if (hash_seed == 0)
++		hash_seed = rte_rand();
++
++	handle = rte_jhash_32b(tmp.words, sizeof(flow) / sizeof(uint32_t), hash_seed);
+ 
+-	if (sizeof(flow) > 4)
+-		handle = rte_jhash(tmp.key, sizeof(flow), 1);
+-	else
+-		handle = (uintptr_t)flow;
+ 	/* must be at least 1 to avoid letting the kernel choose one for us */
+ 	if (!handle)
+ 		handle = 1;
+@@ -1587,7 +1595,7 @@ tap_flow_isolate(struct rte_eth_dev *dev,
+ 	 * If netdevice is there, setup appropriate flow rules immediately.
+ 	 * Otherwise it will be set when bringing up the netdevice (tun_alloc).
+ 	 */
+-	if (!process_private->rxq_fds[0])
++	if (process_private->rxq_fds[0] == -1)
+ 		return 0;
+ 	if (set) {
+ 		struct rte_flow *remote_flow;
+diff --git a/dpdk/drivers/net/thunderx/base/nicvf_mbox.c b/dpdk/drivers/net/thunderx/base/nicvf_mbox.c
+index 5993eec4e6..0e0176974d 100644
+--- a/dpdk/drivers/net/thunderx/base/nicvf_mbox.c
++++ b/dpdk/drivers/net/thunderx/base/nicvf_mbox.c
+@@ -485,3 +485,15 @@ nicvf_mbox_reset_xcast(struct nicvf *nic)
+ 	mbx.msg.msg = NIC_MBOX_MSG_RESET_XCAST;
+ 	nicvf_mbox_send_msg_to_pf(nic, &mbx);
+ }
++
++int
++nicvf_mbox_set_xcast(struct nicvf *nic, uint8_t  mode, uint64_t mac)
++{
++	struct nic_mbx mbx = { .msg = { 0 } };
++
++	mbx.xcast.msg = NIC_MBOX_MSG_SET_XCAST;
++	mbx.xcast.mode = mode;
++	mbx.xcast.mac = mac;
++
++	return nicvf_mbox_send_msg_to_pf(nic, &mbx);
++}
+diff --git a/dpdk/drivers/net/thunderx/base/nicvf_mbox.h b/dpdk/drivers/net/thunderx/base/nicvf_mbox.h
+index 322c8159cb..47f3d13755 100644
+--- a/dpdk/drivers/net/thunderx/base/nicvf_mbox.h
++++ b/dpdk/drivers/net/thunderx/base/nicvf_mbox.h
+@@ -45,6 +45,8 @@
+ #define	NIC_MBOX_MSG_CFG_DONE		0xF0	/* VF configuration done */
+ #define	NIC_MBOX_MSG_SHUTDOWN		0xF1	/* VF is being shutdown */
+ #define NIC_MBOX_MSG_RESET_XCAST	0xF2    /* Reset DCAM filtering mode */
++#define	NIC_MBOX_MSG_ADD_MCAST		0xF3	/* ADD MAC to DCAM filters */
++#define	NIC_MBOX_MSG_SET_XCAST		0xF4	/* Set MCAST/BCAST Rx mode */
+ #define	NIC_MBOX_MSG_MAX		0x100	/* Maximum number of messages */
+ 
+ /* Get vNIC VF configuration */
+@@ -190,6 +192,12 @@ struct change_link_mode_msg {
+ 
+ };
+ 
++struct xcast {
++	uint8_t    msg;
++	uint8_t    mode;
++	uint64_t   mac:48;
++};
++
+ struct nic_mbx {
+ /* 128 bit shared memory between PF and each VF */
+ union {
+@@ -209,6 +217,7 @@ union {
+ 	struct reset_stat_cfg	reset_stat;
+ 	struct set_link_state	set_link;
+ 	struct change_link_mode_msg mode;
++	struct xcast xcast;
+ };
+ };
+ 
+@@ -239,5 +248,6 @@ void nicvf_mbox_cfg_done(struct nicvf *nic);
+ void nicvf_mbox_link_change(struct nicvf *nic);
+ void nicvf_mbox_reset_xcast(struct nicvf *nic);
+ int nicvf_mbox_change_mode(struct nicvf *nic, struct change_link_mode *cfg);
++int nicvf_mbox_set_xcast(struct nicvf *nic, uint8_t  mode, uint64_t mac);
+ 
+ #endif /* __THUNDERX_NICVF_MBOX__ */
+diff --git a/dpdk/drivers/net/thunderx/nicvf_ethdev.c b/dpdk/drivers/net/thunderx/nicvf_ethdev.c
+index a504d41dfe..ba2ef4058e 100644
+--- a/dpdk/drivers/net/thunderx/nicvf_ethdev.c
++++ b/dpdk/drivers/net/thunderx/nicvf_ethdev.c
+@@ -58,6 +58,10 @@ RTE_LOG_REGISTER_SUFFIX(nicvf_logtype_driver, driver, NOTICE);
+ #define NICVF_QLM_MODE_SGMII  7
+ #define NICVF_QLM_MODE_XFI   12
+ 
++#define BCAST_ACCEPT      0x01
++#define CAM_ACCEPT        (1 << 3)
++#define BGX_MCAST_MODE(x) ((x) << 1)
++
+ enum nicvf_link_speed {
+ 	NICVF_LINK_SPEED_SGMII,
+ 	NICVF_LINK_SPEED_XAUI,
+@@ -392,12 +396,14 @@ nicvf_dev_supported_ptypes_get(struct rte_eth_dev *dev)
+ 		RTE_PTYPE_L4_TCP,
+ 		RTE_PTYPE_L4_UDP,
+ 		RTE_PTYPE_L4_FRAG,
++		RTE_PTYPE_UNKNOWN
+ 	};
+ 	static const uint32_t ptypes_tunnel[] = {
+ 		RTE_PTYPE_TUNNEL_GRE,
+ 		RTE_PTYPE_TUNNEL_GENEVE,
+ 		RTE_PTYPE_TUNNEL_VXLAN,
+ 		RTE_PTYPE_TUNNEL_NVGRE,
++		RTE_PTYPE_UNKNOWN
+ 	};
+ 	static const uint32_t ptypes_end = RTE_PTYPE_UNKNOWN;
+ 
+@@ -2183,9 +2189,22 @@ nicvf_eth_dev_uninit(struct rte_eth_dev *dev)
+ 	nicvf_dev_close(dev);
+ 	return 0;
+ }
++
++static inline uint64_t ether_addr_to_u64(uint8_t *addr)
++{
++	uint64_t u = 0;
++	int i;
++
++	for (i = 0; i < RTE_ETHER_ADDR_LEN; i++)
++		u = u << 8 | addr[i];
++
++	return u;
++}
++
+ static int
+ nicvf_eth_dev_init(struct rte_eth_dev *eth_dev)
+ {
++	uint8_t dmac_ctrl_reg = 0;
+ 	int ret;
+ 	struct rte_pci_device *pci_dev;
+ 	struct nicvf *nic = nicvf_pmd_priv(eth_dev);
+@@ -2309,6 +2328,15 @@ nicvf_eth_dev_init(struct rte_eth_dev *eth_dev)
+ 		goto malloc_fail;
+ 	}
+ 
++	/* set DMAC CTRL reg to allow MAC */
++	dmac_ctrl_reg = BCAST_ACCEPT | BGX_MCAST_MODE(2) | CAM_ACCEPT;
++	ret = nicvf_mbox_set_xcast(nic, dmac_ctrl_reg,
++			ether_addr_to_u64(nic->mac_addr));
++	if (ret) {
++		PMD_INIT_LOG(ERR, "Failed to set mac addr");
++		goto malloc_fail;
++	}
++
+ 	ret = nicvf_set_first_skip(eth_dev);
+ 	if (ret) {
+ 		PMD_INIT_LOG(ERR, "Failed to configure first skip");
+diff --git a/dpdk/drivers/net/txgbe/base/meson.build b/dpdk/drivers/net/txgbe/base/meson.build
+index a81d6890fe..4cf90a394a 100644
+--- a/dpdk/drivers/net/txgbe/base/meson.build
++++ b/dpdk/drivers/net/txgbe/base/meson.build
+@@ -22,6 +22,6 @@ foreach flag: error_cflags
+ endforeach
+ 
+ base_lib = static_library('txgbe_base', sources,
+-    dependencies: [static_rte_eal, static_rte_net],
++    dependencies: [static_rte_eal, static_rte_net, static_rte_bus_pci],
+     c_args: c_args)
+ base_objs = base_lib.extract_all_objects(recursive: true)
+diff --git a/dpdk/drivers/net/txgbe/base/txgbe_hw.c b/dpdk/drivers/net/txgbe/base/txgbe_hw.c
+index d19fd0065d..7094551fee 100644
+--- a/dpdk/drivers/net/txgbe/base/txgbe_hw.c
++++ b/dpdk/drivers/net/txgbe/base/txgbe_hw.c
+@@ -462,7 +462,7 @@ void txgbe_set_lan_id_multi_port(struct txgbe_hw *hw)
+  **/
+ s32 txgbe_stop_hw(struct txgbe_hw *hw)
+ {
+-	u32 reg_val;
++	s32 status = 0;
+ 	u16 i;
+ 
+ 	/*
+@@ -484,16 +484,26 @@ s32 txgbe_stop_hw(struct txgbe_hw *hw)
+ 	wr32(hw, TXGBE_ICR(0), TXGBE_ICR_MASK);
+ 	wr32(hw, TXGBE_ICR(1), TXGBE_ICR_MASK);
+ 
+-	/* Disable the transmit unit.  Each queue must be disabled. */
+-	for (i = 0; i < hw->mac.max_tx_queues; i++)
+-		wr32(hw, TXGBE_TXCFG(i), TXGBE_TXCFG_FLUSH);
++	wr32(hw, TXGBE_BMECTL, 0x3);
+ 
+ 	/* Disable the receive unit by stopping each queue */
+-	for (i = 0; i < hw->mac.max_rx_queues; i++) {
+-		reg_val = rd32(hw, TXGBE_RXCFG(i));
+-		reg_val &= ~TXGBE_RXCFG_ENA;
+-		wr32(hw, TXGBE_RXCFG(i), reg_val);
+-	}
++	for (i = 0; i < hw->mac.max_rx_queues; i++)
++		wr32(hw, TXGBE_RXCFG(i), 0);
++
++	/* flush all queues disables */
++	txgbe_flush(hw);
++	msec_delay(2);
++
++	/* Prevent the PCI-E bus from hanging by disabling PCI-E master
++	 * access and verify no pending requests
++	 */
++	status = txgbe_set_pcie_master(hw, false);
++	if (status)
++		return status;
++
++	/* Disable the transmit unit.  Each queue must be disabled. */
++	for (i = 0; i < hw->mac.max_tx_queues; i++)
++		wr32(hw, TXGBE_TXCFG(i), 0);
+ 
+ 	/* flush all queues disables */
+ 	txgbe_flush(hw);
+@@ -1174,6 +1184,38 @@ out:
+ 	}
+ }
+ 
++s32 txgbe_set_pcie_master(struct txgbe_hw *hw, bool enable)
++{
++	struct rte_pci_device *pci_dev = (struct rte_pci_device *)hw->back;
++	s32 status = 0;
++	u32 i;
++
++	if (rte_pci_set_bus_master(pci_dev, enable) < 0) {
++		DEBUGOUT("Cannot configure PCI bus master.");
++		return -1;
++	}
++
++	if (enable)
++		goto out;
++
++	/* Exit if master requests are blocked */
++	if (!(rd32(hw, TXGBE_BMEPEND)))
++		goto out;
++
++	/* Poll for master request bit to clear */
++	for (i = 0; i < TXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
++		usec_delay(100);
++		if (!(rd32(hw, TXGBE_BMEPEND)))
++			goto out;
++	}
++
++	DEBUGOUT("PCIe transaction pending bit also did not clear.");
++	status = TXGBE_ERR_MASTER_REQUESTS_PENDING;
++
++out:
++	return status;
++}
++
+ /**
+  *  txgbe_acquire_swfw_sync - Acquire SWFW semaphore
+  *  @hw: pointer to hardware structure
+diff --git a/dpdk/drivers/net/txgbe/base/txgbe_hw.h b/dpdk/drivers/net/txgbe/base/txgbe_hw.h
+index 7031589f7c..4bf9da2d4c 100644
+--- a/dpdk/drivers/net/txgbe/base/txgbe_hw.h
++++ b/dpdk/drivers/net/txgbe/base/txgbe_hw.h
+@@ -40,6 +40,7 @@ s32 txgbe_setup_fc(struct txgbe_hw *hw);
+ s32 txgbe_validate_mac_addr(u8 *mac_addr);
+ s32 txgbe_acquire_swfw_sync(struct txgbe_hw *hw, u32 mask);
+ void txgbe_release_swfw_sync(struct txgbe_hw *hw, u32 mask);
++s32 txgbe_set_pcie_master(struct txgbe_hw *hw, bool enable);
+ 
+ s32 txgbe_get_san_mac_addr(struct txgbe_hw *hw, u8 *san_mac_addr);
+ s32 txgbe_set_san_mac_addr(struct txgbe_hw *hw, u8 *san_mac_addr);
+diff --git a/dpdk/drivers/net/txgbe/base/txgbe_osdep.h b/dpdk/drivers/net/txgbe/base/txgbe_osdep.h
+index b62c0b0824..0d9492c3cb 100644
+--- a/dpdk/drivers/net/txgbe/base/txgbe_osdep.h
++++ b/dpdk/drivers/net/txgbe/base/txgbe_osdep.h
+@@ -19,6 +19,7 @@
+ #include <rte_config.h>
+ #include <rte_io.h>
+ #include <rte_ether.h>
++#include <bus_pci_driver.h>
+ 
+ #include "../txgbe_logs.h"
+ 
+diff --git a/dpdk/drivers/net/txgbe/base/txgbe_regs.h b/dpdk/drivers/net/txgbe/base/txgbe_regs.h
+index 79290a7afe..a2984f1106 100644
+--- a/dpdk/drivers/net/txgbe/base/txgbe_regs.h
++++ b/dpdk/drivers/net/txgbe/base/txgbe_regs.h
+@@ -1022,6 +1022,8 @@ enum txgbe_5tuple_protocol {
+ #define   TXGBE_MACRXFLT_CTL_PASS       LS(3, 6, 0x3)
+ #define   TXGBE_MACRXFLT_RXALL          MS(31, 0x1)
+ 
++#define TXGBE_MAC_WDG_TIMEOUT           0x01100C
++
+ /******************************************************************************
+  * Statistic Registers
+  ******************************************************************************/
+@@ -1236,6 +1238,9 @@ enum txgbe_5tuple_protocol {
+ #define TXGBE_TCPTMR                    0x000170
+ #define TXGBE_ITRSEL                    0x000180
+ 
++#define TXGBE_BMECTL                    0x012020
++#define TXGBE_BMEPEND                   0x000168
++
+ /* P2V Mailbox */
+ #define TXGBE_MBMEM(i)           (0x005000 + 0x40 * (i)) /* 0-63 */
+ #define TXGBE_MBCTL(i)           (0x000600 + 4 * (i)) /* 0-63 */
+diff --git a/dpdk/drivers/net/txgbe/base/txgbe_type.h b/dpdk/drivers/net/txgbe/base/txgbe_type.h
+index 75e839b7de..f52736cae9 100644
+--- a/dpdk/drivers/net/txgbe/base/txgbe_type.h
++++ b/dpdk/drivers/net/txgbe/base/txgbe_type.h
+@@ -29,6 +29,7 @@
+ #define TXGBE_FDIRCMD_CMD_POLL			10
+ #define TXGBE_VF_INIT_TIMEOUT	200 /* Number of retries to clear RSTI */
+ #define TXGBE_SPI_TIMEOUT	10000
++#define TXGBE_PCI_MASTER_DISABLE_TIMEOUT	800
+ 
+ #define TXGBE_ALIGN		128 /* as intel did */
+ 
+diff --git a/dpdk/drivers/net/txgbe/txgbe_ethdev.c b/dpdk/drivers/net/txgbe/txgbe_ethdev.c
+index 6bc231a130..ad29c3cfec 100644
+--- a/dpdk/drivers/net/txgbe/txgbe_ethdev.c
++++ b/dpdk/drivers/net/txgbe/txgbe_ethdev.c
+@@ -601,6 +601,7 @@ eth_txgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
+ 	hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
+ 
+ 	/* Vendor and Device ID need to be set before init of shared code */
++	hw->back = pci_dev;
+ 	hw->device_id = pci_dev->id.device_id;
+ 	hw->vendor_id = pci_dev->id.vendor_id;
+ 	if (pci_dev->id.subsystem_vendor_id == PCI_VENDOR_ID_WANGXUN) {
+@@ -734,6 +735,8 @@ eth_txgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
+ 		PMD_INIT_LOG(ERR,
+ 			     "Failed to allocate %d bytes needed to store MAC addresses",
+ 			     RTE_ETHER_ADDR_LEN * TXGBE_VMDQ_NUM_UC_MAC);
++		rte_free(eth_dev->data->mac_addrs);
++		eth_dev->data->mac_addrs = NULL;
+ 		return -ENOMEM;
+ 	}
+ 
+@@ -901,6 +904,7 @@ static int txgbe_fdir_filter_init(struct rte_eth_dev *eth_dev)
+ 	if (!fdir_info->hash_map) {
+ 		PMD_INIT_LOG(ERR,
+ 			     "Failed to allocate memory for fdir hash map!");
++		rte_hash_free(fdir_info->hash_handle);
+ 		return -ENOMEM;
+ 	}
+ 	fdir_info->mask_added = FALSE;
+@@ -936,6 +940,7 @@ static int txgbe_l2_tn_filter_init(struct rte_eth_dev *eth_dev)
+ 	if (!l2_tn_info->hash_map) {
+ 		PMD_INIT_LOG(ERR,
+ 			"Failed to allocate memory for L2 TN hash map!");
++		rte_hash_free(l2_tn_info->hash_handle);
+ 		return -ENOMEM;
+ 	}
+ 	l2_tn_info->e_tag_en = FALSE;
+@@ -963,7 +968,7 @@ static int eth_txgbe_pci_remove(struct rte_pci_device *pci_dev)
+ 	if (!ethdev)
+ 		return 0;
+ 
+-	return rte_eth_dev_destroy(ethdev, eth_txgbe_dev_uninit);
++	return rte_eth_dev_pci_generic_remove(pci_dev, eth_txgbe_dev_uninit);
+ }
+ 
+ static struct rte_pci_driver rte_txgbe_pmd = {
+@@ -999,41 +1004,25 @@ txgbe_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
+ }
+ 
+ static void
+-txgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
++txgbe_vlan_strip_q_set(struct rte_eth_dev *dev, uint16_t queue, int on)
+ {
+-	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+-	struct txgbe_rx_queue *rxq;
+-	bool restart;
+-	uint32_t rxcfg, rxbal, rxbah;
+-
+ 	if (on)
+ 		txgbe_vlan_hw_strip_enable(dev, queue);
+ 	else
+ 		txgbe_vlan_hw_strip_disable(dev, queue);
++}
+ 
+-	rxq = dev->data->rx_queues[queue];
+-	rxbal = rd32(hw, TXGBE_RXBAL(rxq->reg_idx));
+-	rxbah = rd32(hw, TXGBE_RXBAH(rxq->reg_idx));
+-	rxcfg = rd32(hw, TXGBE_RXCFG(rxq->reg_idx));
+-	if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) {
+-		restart = (rxcfg & TXGBE_RXCFG_ENA) &&
+-			!(rxcfg & TXGBE_RXCFG_VLAN);
+-		rxcfg |= TXGBE_RXCFG_VLAN;
+-	} else {
+-		restart = (rxcfg & TXGBE_RXCFG_ENA) &&
+-			(rxcfg & TXGBE_RXCFG_VLAN);
+-		rxcfg &= ~TXGBE_RXCFG_VLAN;
+-	}
+-	rxcfg &= ~TXGBE_RXCFG_ENA;
++static void
++txgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
++{
++	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ 
+-	if (restart) {
+-		/* set vlan strip for ring */
+-		txgbe_dev_rx_queue_stop(dev, queue);
+-		wr32(hw, TXGBE_RXBAL(rxq->reg_idx), rxbal);
+-		wr32(hw, TXGBE_RXBAH(rxq->reg_idx), rxbah);
+-		wr32(hw, TXGBE_RXCFG(rxq->reg_idx), rxcfg);
+-		txgbe_dev_rx_queue_start(dev, queue);
++	if (!hw->adapter_stopped) {
++		PMD_DRV_LOG(ERR, "Please stop port first");
++		return;
+ 	}
++
++	txgbe_vlan_strip_q_set(dev, queue, on);
+ }
+ 
+ static int
+@@ -1258,9 +1247,9 @@ txgbe_vlan_hw_strip_config(struct rte_eth_dev *dev)
+ 		rxq = dev->data->rx_queues[i];
+ 
+ 		if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
+-			txgbe_vlan_strip_queue_set(dev, i, 1);
++			txgbe_vlan_strip_q_set(dev, i, 1);
+ 		else
+-			txgbe_vlan_strip_queue_set(dev, i, 0);
++			txgbe_vlan_strip_q_set(dev, i, 0);
+ 	}
+ }
+ 
+@@ -1322,6 +1311,13 @@ txgbe_vlan_offload_config(struct rte_eth_dev *dev, int mask)
+ static int
+ txgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
+ {
++	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
++
++	if (!hw->adapter_stopped && (mask & RTE_ETH_VLAN_STRIP_MASK)) {
++		PMD_DRV_LOG(ERR, "Please stop port first");
++		return -EPERM;
++	}
++
+ 	txgbe_config_vlan_strip_on_all_queues(dev, mask);
+ 
+ 	txgbe_vlan_offload_config(dev, mask);
+@@ -1716,6 +1712,8 @@ txgbe_dev_start(struct rte_eth_dev *dev)
+ 	hw->mac.get_link_status = true;
+ 	hw->dev_start = true;
+ 
++	txgbe_set_pcie_master(hw, true);
++
+ 	/* workaround for GPIO intr lost when mng_veto bit is set */
+ 	if (txgbe_check_reset_blocked(hw))
+ 		txgbe_reinit_gpio_intr(hw);
+@@ -1979,6 +1977,8 @@ txgbe_dev_stop(struct rte_eth_dev *dev)
+ 	adapter->rss_reta_updated = 0;
+ 	wr32m(hw, TXGBE_LEDCTL, 0xFFFFFFFF, TXGBE_LEDCTL_SEL_MASK);
+ 
++	txgbe_set_pcie_master(hw, true);
++
+ 	hw->adapter_stopped = true;
+ 	dev->data->dev_started = 0;
+ 	hw->dev_start = false;
+@@ -2061,6 +2061,8 @@ txgbe_dev_close(struct rte_eth_dev *dev)
+ 
+ 	txgbe_dev_free_queues(dev);
+ 
++	txgbe_set_pcie_master(hw, false);
++
+ 	/* reprogram the RAR[0] in case user changed it. */
+ 	txgbe_set_rar(hw, 0, hw->mac.addr, 0, true);
+ 
+@@ -2671,7 +2673,9 @@ txgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
+ 	dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
+ 	dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
+ 	dev_info->min_rx_bufsize = 1024;
+-	dev_info->max_rx_pktlen = 15872;
++	dev_info->max_rx_pktlen = TXGBE_MAX_MTU + TXGBE_ETH_OVERHEAD;
++	dev_info->min_mtu = RTE_ETHER_MIN_MTU;
++	dev_info->max_mtu = TXGBE_MAX_MTU;
+ 	dev_info->max_mac_addrs = hw->mac.num_rar_entries;
+ 	dev_info->max_hash_mac_addrs = TXGBE_VMDQ_NUM_UC_MAC;
+ 	dev_info->max_vfs = pci_dev->max_vfs;
+@@ -2876,6 +2880,7 @@ txgbe_dev_link_update_share(struct rte_eth_dev *dev,
+ 	bool link_up;
+ 	int err;
+ 	int wait = 1;
++	u32 reg;
+ 
+ 	memset(&link, 0, sizeof(link));
+ 	link.link_status = RTE_ETH_LINK_DOWN;
+@@ -2963,9 +2968,14 @@ txgbe_dev_link_update_share(struct rte_eth_dev *dev,
+ 	}
+ 
+ 	/* Re configure MAC RX */
+-	if (hw->mac.type == txgbe_mac_raptor)
++	if (hw->mac.type == txgbe_mac_raptor) {
++		reg = rd32(hw, TXGBE_MACRXCFG);
++		wr32(hw, TXGBE_MACRXCFG, reg);
+ 		wr32m(hw, TXGBE_MACRXFLT, TXGBE_MACRXFLT_PROMISC,
+ 			TXGBE_MACRXFLT_PROMISC);
++		reg = rd32(hw, TXGBE_MAC_WDG_TIMEOUT);
++		wr32(hw, TXGBE_MAC_WDG_TIMEOUT, reg);
++	}
+ 
+ 	return rte_eth_linkstatus_set(dev, &link);
+ }
+@@ -3683,12 +3693,8 @@ txgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
+ 		return -EINVAL;
+ 	}
+ 
+-	if (hw->mode)
+-		wr32m(hw, TXGBE_FRMSZ, TXGBE_FRMSZ_MAX_MASK,
+-			TXGBE_FRAME_SIZE_MAX);
+-	else
+-		wr32m(hw, TXGBE_FRMSZ, TXGBE_FRMSZ_MAX_MASK,
+-			TXGBE_FRMSZ_MAX(frame_size));
++	wr32m(hw, TXGBE_FRMSZ, TXGBE_FRMSZ_MAX_MASK,
++		TXGBE_FRMSZ_MAX(frame_size));
+ 
+ 	return 0;
+ }
+@@ -3839,13 +3845,13 @@ txgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
+ 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ 
+ 	if (queue_id < 32) {
+-		mask = rd32(hw, TXGBE_IMS(0));
+-		mask &= (1 << queue_id);
+-		wr32(hw, TXGBE_IMS(0), mask);
++		mask = rd32(hw, TXGBE_IMC(0));
++		mask |= (1 << queue_id);
++		wr32(hw, TXGBE_IMC(0), mask);
+ 	} else if (queue_id < 64) {
+-		mask = rd32(hw, TXGBE_IMS(1));
+-		mask &= (1 << (queue_id - 32));
+-		wr32(hw, TXGBE_IMS(1), mask);
++		mask = rd32(hw, TXGBE_IMC(1));
++		mask |= (1 << (queue_id - 32));
++		wr32(hw, TXGBE_IMC(1), mask);
+ 	}
+ 	rte_intr_enable(intr_handle);
+ 
+@@ -3860,11 +3866,11 @@ txgbe_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
+ 
+ 	if (queue_id < 32) {
+ 		mask = rd32(hw, TXGBE_IMS(0));
+-		mask &= ~(1 << queue_id);
++		mask |= (1 << queue_id);
+ 		wr32(hw, TXGBE_IMS(0), mask);
+ 	} else if (queue_id < 64) {
+ 		mask = rd32(hw, TXGBE_IMS(1));
+-		mask &= ~(1 << (queue_id - 32));
++		mask |= (1 << (queue_id - 32));
+ 		wr32(hw, TXGBE_IMS(1), mask);
+ 	}
+ 
+@@ -3898,7 +3904,7 @@ txgbe_set_ivar_map(struct txgbe_hw *hw, int8_t direction,
+ 		wr32(hw, TXGBE_IVARMISC, tmp);
+ 	} else {
+ 		/* rx or tx causes */
+-		/* Workaround for ICR lost */
++		msix_vector |= TXGBE_IVAR_VLD; /* Workaround for ICR lost */
+ 		idx = ((16 * (queue & 1)) + (8 * direction));
+ 		tmp = rd32(hw, TXGBE_IVAR(queue >> 1));
+ 		tmp &= ~(0xFF << idx);
+@@ -4004,6 +4010,7 @@ txgbe_syn_filter_set(struct rte_eth_dev *dev,
+ 	struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
+ 	uint32_t syn_info;
+ 	uint32_t synqf;
++	uint16_t queue;
+ 
+ 	if (filter->queue >= TXGBE_MAX_RX_QUEUE_NUM)
+ 		return -EINVAL;
+@@ -4013,7 +4020,11 @@ txgbe_syn_filter_set(struct rte_eth_dev *dev,
+ 	if (add) {
+ 		if (syn_info & TXGBE_SYNCLS_ENA)
+ 			return -EINVAL;
+-		synqf = (uint32_t)TXGBE_SYNCLS_QPID(filter->queue);
++		if (RTE_ETH_DEV_SRIOV(dev).active)
++			queue = RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + filter->queue;
++		else
++			queue = filter->queue;
++		synqf = (uint32_t)TXGBE_SYNCLS_QPID(queue);
+ 		synqf |= TXGBE_SYNCLS_ENA;
+ 
+ 		if (filter->hig_pri)
+@@ -4082,7 +4093,10 @@ txgbe_inject_5tuple_filter(struct rte_eth_dev *dev,
+ 	wr32(hw, TXGBE_5TFPORT(i), sdpqf);
+ 	wr32(hw, TXGBE_5TFCTL0(i), ftqf);
+ 
+-	l34timir |= TXGBE_5TFCTL1_QP(filter->queue);
++	if (RTE_ETH_DEV_SRIOV(dev).active)
++		l34timir |= TXGBE_5TFCTL1_QP(RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + filter->queue);
++	else
++		l34timir |= TXGBE_5TFCTL1_QP(filter->queue);
+ 	wr32(hw, TXGBE_5TFCTL1(i), l34timir);
+ }
+ 
+@@ -4366,7 +4380,17 @@ txgbe_add_del_ethertype_filter(struct rte_eth_dev *dev,
+ 	if (add) {
+ 		etqf = TXGBE_ETFLT_ENA;
+ 		etqf |= TXGBE_ETFLT_ETID(filter->ether_type);
+-		etqs |= TXGBE_ETCLS_QPID(filter->queue);
++		if (RTE_ETH_DEV_SRIOV(dev).active) {
++			int pool, queue;
++
++			pool = RTE_ETH_DEV_SRIOV(dev).def_vmdq_idx;
++			queue = RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + filter->queue;
++			etqf |= TXGBE_ETFLT_POOLENA;
++			etqf |= TXGBE_ETFLT_POOL(pool);
++			etqs |= TXGBE_ETCLS_QPID(queue);
++		} else {
++			etqs |= TXGBE_ETCLS_QPID(filter->queue);
++		}
+ 		etqs |= TXGBE_ETCLS_QENA;
+ 
+ 		ethertype_filter.ethertype = filter->ether_type;
+diff --git a/dpdk/drivers/net/txgbe/txgbe_ethdev.h b/dpdk/drivers/net/txgbe/txgbe_ethdev.h
+index 7feb45d0cf..d5b58018a4 100644
+--- a/dpdk/drivers/net/txgbe/txgbe_ethdev.h
++++ b/dpdk/drivers/net/txgbe/txgbe_ethdev.h
+@@ -56,7 +56,7 @@
+ #define TXGBE_5TUPLE_MAX_PRI            7
+ #define TXGBE_5TUPLE_MIN_PRI            1
+ 
+-
++#define TXGBE_MAX_MTU			9414
+ /* The overhead from MTU to max frame size. */
+ #define TXGBE_ETH_OVERHEAD (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN)
+ 
+diff --git a/dpdk/drivers/net/txgbe/txgbe_ethdev_vf.c b/dpdk/drivers/net/txgbe/txgbe_ethdev_vf.c
+index f1341fbf7e..92603fccc2 100644
+--- a/dpdk/drivers/net/txgbe/txgbe_ethdev_vf.c
++++ b/dpdk/drivers/net/txgbe/txgbe_ethdev_vf.c
+@@ -295,6 +295,8 @@ eth_txgbevf_dev_init(struct rte_eth_dev *eth_dev)
+ 	err = hw->mac.start_hw(hw);
+ 	if (err) {
+ 		PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", err);
++		rte_free(eth_dev->data->mac_addrs);
++		eth_dev->data->mac_addrs = NULL;
+ 		return -EIO;
+ 	}
+ 
+@@ -670,8 +672,10 @@ txgbevf_dev_start(struct rte_eth_dev *dev)
+ 		 * now only one vector is used for Rx queue
+ 		 */
+ 		intr_vector = 1;
+-		if (rte_intr_efd_enable(intr_handle, intr_vector))
++		if (rte_intr_efd_enable(intr_handle, intr_vector)) {
++			txgbe_dev_clear_queues(dev);
+ 			return -1;
++		}
+ 	}
+ 
+ 	if (rte_intr_dp_is_en(intr_handle)) {
+@@ -679,6 +683,7 @@ txgbevf_dev_start(struct rte_eth_dev *dev)
+ 						   dev->data->nb_rx_queues)) {
+ 			PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
+ 				     " intr_vec", dev->data->nb_rx_queues);
++			txgbe_dev_clear_queues(dev);
+ 			return -ENOMEM;
+ 		}
+ 	}
+@@ -965,7 +970,7 @@ txgbevf_set_ivar_map(struct txgbe_hw *hw, int8_t direction,
+ 		wr32(hw, TXGBE_VFIVARMISC, tmp);
+ 	} else {
+ 		/* rx or tx cause */
+-		/* Workaround for ICR lost */
++		msix_vector |= TXGBE_VFIVAR_VLD; /* Workaround for ICR lost */
+ 		idx = ((16 * (queue & 1)) + (8 * direction));
+ 		tmp = rd32(hw, TXGBE_VFIVAR(queue >> 1));
+ 		tmp &= ~(0xFF << idx);
+@@ -1201,9 +1206,13 @@ static int
+ txgbevf_dev_promiscuous_disable(struct rte_eth_dev *dev)
+ {
+ 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
++	int mode = TXGBEVF_XCAST_MODE_NONE;
+ 	int ret;
+ 
+-	switch (hw->mac.update_xcast_mode(hw, TXGBEVF_XCAST_MODE_NONE)) {
++	if (dev->data->all_multicast)
++		mode = TXGBEVF_XCAST_MODE_ALLMULTI;
++
++	switch (hw->mac.update_xcast_mode(hw, mode)) {
+ 	case 0:
+ 		ret = 0;
+ 		break;
+@@ -1224,6 +1233,9 @@ txgbevf_dev_allmulticast_enable(struct rte_eth_dev *dev)
+ 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ 	int ret;
+ 
++	if (dev->data->promiscuous)
++		return 0;
++
+ 	switch (hw->mac.update_xcast_mode(hw, TXGBEVF_XCAST_MODE_ALLMULTI)) {
+ 	case 0:
+ 		ret = 0;
+@@ -1245,6 +1257,9 @@ txgbevf_dev_allmulticast_disable(struct rte_eth_dev *dev)
+ 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ 	int ret;
+ 
++	if (dev->data->promiscuous)
++		return 0;
++
+ 	switch (hw->mac.update_xcast_mode(hw, TXGBEVF_XCAST_MODE_MULTI)) {
+ 	case 0:
+ 		ret = 0;
+diff --git a/dpdk/drivers/net/txgbe/txgbe_fdir.c b/dpdk/drivers/net/txgbe/txgbe_fdir.c
+index a198b6781b..f627ab681d 100644
+--- a/dpdk/drivers/net/txgbe/txgbe_fdir.c
++++ b/dpdk/drivers/net/txgbe/txgbe_fdir.c
+@@ -844,6 +844,9 @@ txgbe_fdir_filter_program(struct rte_eth_dev *dev,
+ 		return -EINVAL;
+ 	}
+ 
++	if (RTE_ETH_DEV_SRIOV(dev).active)
++		queue = RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue;
++
+ 	node = txgbe_fdir_filter_lookup(info, &rule->input);
+ 	if (node) {
+ 		if (!update) {
+diff --git a/dpdk/drivers/net/txgbe/txgbe_rxtx.c b/dpdk/drivers/net/txgbe/txgbe_rxtx.c
+index 1cd4b25965..2efc2bcf29 100644
+--- a/dpdk/drivers/net/txgbe/txgbe_rxtx.c
++++ b/dpdk/drivers/net/txgbe/txgbe_rxtx.c
+@@ -564,26 +564,17 @@ tx_desc_ol_flags_to_ptype(uint64_t oflags)
+ 	switch (oflags & RTE_MBUF_F_TX_TUNNEL_MASK) {
+ 	case RTE_MBUF_F_TX_TUNNEL_VXLAN:
+ 	case RTE_MBUF_F_TX_TUNNEL_VXLAN_GPE:
+-		ptype |= RTE_PTYPE_L2_ETHER |
+-			 RTE_PTYPE_L3_IPV4 |
+-			 RTE_PTYPE_TUNNEL_GRENAT;
++		ptype |= RTE_PTYPE_TUNNEL_GRENAT;
+ 		break;
+ 	case RTE_MBUF_F_TX_TUNNEL_GRE:
+-		ptype |= RTE_PTYPE_L2_ETHER |
+-			 RTE_PTYPE_L3_IPV4 |
+-			 RTE_PTYPE_TUNNEL_GRE;
++		ptype |= RTE_PTYPE_TUNNEL_GRE;
+ 		break;
+ 	case RTE_MBUF_F_TX_TUNNEL_GENEVE:
+-		ptype |= RTE_PTYPE_L2_ETHER |
+-			 RTE_PTYPE_L3_IPV4 |
+-			 RTE_PTYPE_TUNNEL_GENEVE;
+-		ptype |= RTE_PTYPE_INNER_L2_ETHER;
++		ptype |= RTE_PTYPE_TUNNEL_GENEVE;
+ 		break;
+ 	case RTE_MBUF_F_TX_TUNNEL_IPIP:
+ 	case RTE_MBUF_F_TX_TUNNEL_IP:
+-		ptype |= RTE_PTYPE_L2_ETHER |
+-			 RTE_PTYPE_L3_IPV4 |
+-			 RTE_PTYPE_TUNNEL_IP;
++		ptype |= RTE_PTYPE_TUNNEL_IP;
+ 		break;
+ 	}
+ 
+@@ -667,11 +658,20 @@ txgbe_xmit_cleanup(struct txgbe_tx_queue *txq)
+ 	return 0;
+ }
+ 
++#define GRE_CHECKSUM_PRESENT	0x8000
++#define GRE_KEY_PRESENT		0x2000
++#define GRE_SEQUENCE_PRESENT	0x1000
++#define GRE_EXT_LEN		4
++#define GRE_SUPPORTED_FIELDS	(GRE_CHECKSUM_PRESENT | GRE_KEY_PRESENT |\
++				 GRE_SEQUENCE_PRESENT)
++
+ static inline uint8_t
+ txgbe_get_tun_len(struct rte_mbuf *mbuf)
+ {
+ 	struct txgbe_genevehdr genevehdr;
+ 	const struct txgbe_genevehdr *gh;
++	const struct txgbe_grehdr *grh;
++	struct txgbe_grehdr grehdr;
+ 	uint8_t tun_len;
+ 
+ 	switch (mbuf->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) {
+@@ -684,11 +684,16 @@ txgbe_get_tun_len(struct rte_mbuf *mbuf)
+ 			+ sizeof(struct txgbe_vxlanhdr);
+ 		break;
+ 	case RTE_MBUF_F_TX_TUNNEL_GRE:
+-		tun_len = sizeof(struct txgbe_nvgrehdr);
++		tun_len = sizeof(struct txgbe_grehdr);
++		grh = rte_pktmbuf_read(mbuf,
++			mbuf->outer_l2_len + mbuf->outer_l3_len,
++			sizeof(grehdr), &grehdr);
++		if (grh->flags & rte_cpu_to_be_16(GRE_SUPPORTED_FIELDS))
++			tun_len += GRE_EXT_LEN;
+ 		break;
+ 	case RTE_MBUF_F_TX_TUNNEL_GENEVE:
+-		gh = rte_pktmbuf_read(mbuf,
+-			mbuf->outer_l2_len + mbuf->outer_l3_len,
++		gh = rte_pktmbuf_read(mbuf, mbuf->outer_l2_len +
++			mbuf->outer_l3_len + sizeof(struct txgbe_udphdr),
+ 			sizeof(genevehdr), &genevehdr);
+ 		tun_len = sizeof(struct txgbe_udphdr)
+ 			+ sizeof(struct txgbe_genevehdr)
+@@ -702,27 +707,26 @@ txgbe_get_tun_len(struct rte_mbuf *mbuf)
+ }
+ 
+ static inline uint8_t
+-txgbe_parse_tun_ptid(struct rte_mbuf *tx_pkt)
++txgbe_parse_tun_ptid(struct rte_mbuf *tx_pkt, uint8_t tun_len)
+ {
+-	uint64_t l2_vxlan, l2_vxlan_mac, l2_vxlan_mac_vlan;
+-	uint64_t l2_gre, l2_gre_mac, l2_gre_mac_vlan;
++	uint64_t inner_l2_len;
+ 	uint8_t ptid = 0;
+ 
+-	l2_vxlan = sizeof(struct txgbe_udphdr) + sizeof(struct txgbe_vxlanhdr);
+-	l2_vxlan_mac = l2_vxlan + sizeof(struct rte_ether_hdr);
+-	l2_vxlan_mac_vlan = l2_vxlan_mac + sizeof(struct rte_vlan_hdr);
++	inner_l2_len = tx_pkt->l2_len - tun_len;
+ 
+-	l2_gre = sizeof(struct txgbe_grehdr);
+-	l2_gre_mac = l2_gre + sizeof(struct rte_ether_hdr);
+-	l2_gre_mac_vlan = l2_gre_mac + sizeof(struct rte_vlan_hdr);
+-
+-	if (tx_pkt->l2_len == l2_vxlan || tx_pkt->l2_len == l2_gre)
++	switch (inner_l2_len) {
++	case 0:
+ 		ptid = TXGBE_PTID_TUN_EIG;
+-	else if (tx_pkt->l2_len == l2_vxlan_mac || tx_pkt->l2_len == l2_gre_mac)
++		break;
++	case sizeof(struct rte_ether_hdr):
+ 		ptid = TXGBE_PTID_TUN_EIGM;
+-	else if (tx_pkt->l2_len == l2_vxlan_mac_vlan ||
+-			tx_pkt->l2_len == l2_gre_mac_vlan)
++		break;
++	case sizeof(struct rte_ether_hdr) + sizeof(struct rte_vlan_hdr):
+ 		ptid = TXGBE_PTID_TUN_EIGMV;
++		break;
++	default:
++		ptid = TXGBE_PTID_TUN_EI;
++	}
+ 
+ 	return ptid;
+ }
+@@ -789,8 +793,6 @@ txgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ 		tx_ol_req = ol_flags & TXGBE_TX_OFFLOAD_MASK;
+ 		if (tx_ol_req) {
+ 			tx_offload.ptid = tx_desc_ol_flags_to_ptid(tx_ol_req);
+-			if (tx_offload.ptid & TXGBE_PTID_PKT_TUN)
+-				tx_offload.ptid |= txgbe_parse_tun_ptid(tx_pkt);
+ 			tx_offload.l2_len = tx_pkt->l2_len;
+ 			tx_offload.l3_len = tx_pkt->l3_len;
+ 			tx_offload.l4_len = tx_pkt->l4_len;
+@@ -799,6 +801,9 @@ txgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ 			tx_offload.outer_l2_len = tx_pkt->outer_l2_len;
+ 			tx_offload.outer_l3_len = tx_pkt->outer_l3_len;
+ 			tx_offload.outer_tun_len = txgbe_get_tun_len(tx_pkt);
++			if (tx_offload.ptid & TXGBE_PTID_PKT_TUN)
++				tx_offload.ptid |= txgbe_parse_tun_ptid(tx_pkt,
++							tx_offload.outer_tun_len);
+ 
+ #ifdef RTE_LIB_SECURITY
+ 			if (use_ipsec) {
+@@ -2130,6 +2135,7 @@ txgbe_tx_queue_release(struct txgbe_tx_queue *txq)
+ 	if (txq != NULL && txq->ops != NULL) {
+ 		txq->ops->release_mbufs(txq);
+ 		txq->ops->free_swring(txq);
++		rte_memzone_free(txq->mz);
+ 		rte_free(txq);
+ 	}
+ }
+@@ -2341,6 +2347,7 @@ txgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
+ 		return -ENOMEM;
+ 	}
+ 
++	txq->mz = tz;
+ 	txq->nb_tx_desc = nb_desc;
+ 	txq->tx_free_thresh = tx_free_thresh;
+ 	txq->pthresh = tx_conf->tx_thresh.pthresh;
+@@ -2458,6 +2465,7 @@ txgbe_rx_queue_release(struct txgbe_rx_queue *rxq)
+ 		txgbe_rx_queue_release_mbufs(rxq);
+ 		rte_free(rxq->sw_ring);
+ 		rte_free(rxq->sw_sc_ring);
++		rte_memzone_free(rxq->mz);
+ 		rte_free(rxq);
+ 	}
+ }
+@@ -2551,6 +2559,7 @@ txgbe_reset_rx_queue(struct txgbe_adapter *adapter, struct txgbe_rx_queue *rxq)
+ 	rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
+ 	rxq->rx_tail = 0;
+ 	rxq->nb_rx_hold = 0;
++	rte_pktmbuf_free(rxq->pkt_first_seg);
+ 	rxq->pkt_first_seg = NULL;
+ 	rxq->pkt_last_seg = NULL;
+ }
+@@ -2631,6 +2640,7 @@ txgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
+ 		return -ENOMEM;
+ 	}
+ 
++	rxq->mz = rz;
+ 	/*
+ 	 * Zero init all the descriptors in the ring.
+ 	 */
+@@ -5069,6 +5079,7 @@ txgbe_config_rss_filter(struct rte_eth_dev *dev,
+ 	uint32_t reta;
+ 	uint16_t i;
+ 	uint16_t j;
++	uint16_t queue;
+ 	struct rte_eth_rss_conf rss_conf = {
+ 		.rss_key = conf->conf.key_len ?
+ 			(void *)(uintptr_t)conf->conf.key : NULL,
+@@ -5101,7 +5112,12 @@ txgbe_config_rss_filter(struct rte_eth_dev *dev,
+ 	for (i = 0, j = 0; i < RTE_ETH_RSS_RETA_SIZE_128; i++, j++) {
+ 		if (j == conf->conf.queue_num)
+ 			j = 0;
+-		reta = (reta >> 8) | LS32(conf->conf.queue[j], 24, 0xFF);
++		if (RTE_ETH_DEV_SRIOV(dev).active)
++			queue = RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx +
++				conf->conf.queue[j];
++		else
++			queue = conf->conf.queue[j];
++		reta = (reta >> 8) | LS32(queue, 24, 0xFF);
+ 		if ((i & 3) == 3)
+ 			wr32at(hw, TXGBE_REG_RSSTBL, i >> 2, reta);
+ 	}
+diff --git a/dpdk/drivers/net/txgbe/txgbe_rxtx.h b/dpdk/drivers/net/txgbe/txgbe_rxtx.h
+index 27d4c842c0..c579e1a9f2 100644
+--- a/dpdk/drivers/net/txgbe/txgbe_rxtx.h
++++ b/dpdk/drivers/net/txgbe/txgbe_rxtx.h
+@@ -314,6 +314,7 @@ struct txgbe_rx_queue {
+ 	struct rte_mbuf fake_mbuf;
+ 	/** hold packets to return to application */
+ 	struct rte_mbuf *rx_stage[RTE_PMD_TXGBE_RX_MAX_BURST * 2];
++	const struct rte_memzone *mz;
+ };
+ 
+ /**
+@@ -402,6 +403,7 @@ struct txgbe_tx_queue {
+ 	uint8_t		    using_ipsec;
+ 	/**< indicates that IPsec TX feature is in use */
+ #endif
++	const struct rte_memzone *mz;
+ };
+ 
+ struct txgbe_txq_ops {
+diff --git a/dpdk/drivers/net/virtio/virtio_ethdev.c b/dpdk/drivers/net/virtio/virtio_ethdev.c
+index c2c0a1a111..70d4839def 100644
+--- a/dpdk/drivers/net/virtio/virtio_ethdev.c
++++ b/dpdk/drivers/net/virtio/virtio_ethdev.c
+@@ -913,6 +913,8 @@ virtio_mac_addr_add(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
+ 		struct virtio_net_ctrl_mac *tbl
+ 			= rte_is_multicast_ether_addr(addr) ? mc : uc;
+ 
++		if (rte_is_zero_ether_addr(addr))
++			break;
+ 		memcpy(&tbl->macs[tbl->entries++], addr, RTE_ETHER_ADDR_LEN);
+ 	}
+ 
+@@ -1793,8 +1795,6 @@ virtio_init_device(struct rte_eth_dev *eth_dev, uint64_t req_features)
+ 	else
+ 		eth_dev->data->dev_flags &= ~RTE_ETH_DEV_INTR_LSC;
+ 
+-	eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
+-
+ 	/* Setting up rx_header size for the device */
+ 	if (virtio_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF) ||
+ 	    virtio_with_feature(hw, VIRTIO_F_VERSION_1) ||
+diff --git a/dpdk/drivers/net/virtio/virtio_user/vhost_user.c b/dpdk/drivers/net/virtio/virtio_user/vhost_user.c
+index 3c05ac9cc0..c10252506b 100644
+--- a/dpdk/drivers/net/virtio/virtio_user/vhost_user.c
++++ b/dpdk/drivers/net/virtio/virtio_user/vhost_user.c
+@@ -128,7 +128,8 @@ vhost_user_write(int fd, struct vhost_user_msg *msg, int *fds, int fd_num)
+ 	cmsg->cmsg_len = CMSG_LEN(fd_size);
+ 	cmsg->cmsg_level = SOL_SOCKET;
+ 	cmsg->cmsg_type = SCM_RIGHTS;
+-	memcpy(CMSG_DATA(cmsg), fds, fd_size);
++	if (fd_size > 0)
++		memcpy(CMSG_DATA(cmsg), fds, fd_size);
+ 
+ 	do {
+ 		r = sendmsg(fd, &msgh, 0);
+diff --git a/dpdk/drivers/net/virtio/virtio_user/virtio_user_dev.c b/dpdk/drivers/net/virtio/virtio_user/virtio_user_dev.c
+index af1f8c8237..1bfd6aba80 100644
+--- a/dpdk/drivers/net/virtio/virtio_user/virtio_user_dev.c
++++ b/dpdk/drivers/net/virtio/virtio_user/virtio_user_dev.c
+@@ -20,6 +20,7 @@
+ #include <rte_malloc.h>
+ 
+ #include "vhost.h"
++#include "virtio.h"
+ #include "virtio_user_dev.h"
+ #include "../virtio_ethdev.h"
+ 
+@@ -32,6 +33,61 @@ const char * const virtio_user_backend_strings[] = {
+ 	[VIRTIO_USER_BACKEND_VHOST_VDPA] = "VHOST_VDPA",
+ };
+ 
++static int
++virtio_user_uninit_notify_queue(struct virtio_user_dev *dev, uint32_t queue_sel)
++{
++	if (dev->kickfds[queue_sel] >= 0) {
++		close(dev->kickfds[queue_sel]);
++		dev->kickfds[queue_sel] = -1;
++	}
++
++	if (dev->callfds[queue_sel] >= 0) {
++		close(dev->callfds[queue_sel]);
++		dev->callfds[queue_sel] = -1;
++	}
++
++	return 0;
++}
++
++static int
++virtio_user_init_notify_queue(struct virtio_user_dev *dev, uint32_t queue_sel)
++{
++	/* May use invalid flag, but some backend uses kickfd and
++	 * callfd as criteria to judge if dev is alive. so finally we
++	 * use real event_fd.
++	 */
++	dev->callfds[queue_sel] = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK);
++	if (dev->callfds[queue_sel] < 0) {
++		PMD_DRV_LOG(ERR, "(%s) Failed to setup callfd for queue %u: %s",
++				dev->path, queue_sel, strerror(errno));
++		return -1;
++	}
++	dev->kickfds[queue_sel] = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK);
++	if (dev->kickfds[queue_sel] < 0) {
++		PMD_DRV_LOG(ERR, "(%s) Failed to setup kickfd for queue %u: %s",
++				dev->path, queue_sel, strerror(errno));
++		return -1;
++	}
++
++	return 0;
++}
++
++static int
++virtio_user_destroy_queue(struct virtio_user_dev *dev, uint32_t queue_sel)
++{
++	struct vhost_vring_state state;
++	int ret;
++
++	state.index = queue_sel;
++	ret = dev->ops->get_vring_base(dev, &state);
++	if (ret < 0) {
++		PMD_DRV_LOG(ERR, "(%s) Failed to destroy queue %u", dev->path, queue_sel);
++		return -1;
++	}
++
++	return 0;
++}
++
+ static int
+ virtio_user_create_queue(struct virtio_user_dev *dev, uint32_t queue_sel)
+ {
+@@ -127,6 +183,22 @@ err:
+ 	return -1;
+ }
+ 
++static int
++virtio_user_foreach_queue(struct virtio_user_dev *dev, int (*fn)(struct virtio_user_dev *, uint32_t))
++{
++	uint32_t i, nr_vq;
++
++	nr_vq = dev->max_queue_pairs * 2;
++	if (dev->hw_cvq)
++		nr_vq++;
++
++	for (i = 0; i < nr_vq; i++)
++		if (fn(dev, i) < 0)
++			return -1;
++
++	return 0;
++}
++
+ static int
+ virtio_user_queue_setup(struct virtio_user_dev *dev,
+ 			int (*fn)(struct virtio_user_dev *, uint32_t))
+@@ -215,6 +287,12 @@ virtio_user_start_device(struct virtio_user_dev *dev)
+ 	if (ret < 0)
+ 		goto error;
+ 
++	if (dev->scvq) {
++		ret = dev->ops->cvq_enable(dev, 1);
++		if (ret < 0)
++			goto error;
++	}
++
+ 	dev->started = true;
+ 
+ 	pthread_mutex_unlock(&dev->mutex);
+@@ -233,7 +311,6 @@ error:
+ 
+ int virtio_user_stop_device(struct virtio_user_dev *dev)
+ {
+-	struct vhost_vring_state state;
+ 	uint32_t i;
+ 	int ret;
+ 
+@@ -247,16 +324,16 @@ int virtio_user_stop_device(struct virtio_user_dev *dev)
+ 			goto err;
+ 	}
+ 
+-	/* Stop the backend. */
+-	for (i = 0; i < dev->max_queue_pairs * 2; ++i) {
+-		state.index = i;
+-		ret = dev->ops->get_vring_base(dev, &state);
+-		if (ret < 0) {
+-			PMD_DRV_LOG(ERR, "(%s) get_vring_base failed, index=%u", dev->path, i);
++	if (dev->scvq) {
++		ret = dev->ops->cvq_enable(dev, 0);
++		if (ret < 0)
+ 			goto err;
+-		}
+ 	}
+ 
++	/* Stop the backend. */
++	if (virtio_user_foreach_queue(dev, virtio_user_destroy_queue) < 0)
++		goto err;
++
+ 	dev->started = false;
+ 
+ out:
+@@ -386,46 +463,13 @@ out:
+ static int
+ virtio_user_dev_init_notify(struct virtio_user_dev *dev)
+ {
+-	uint32_t i, j, nr_vq;
+-	int callfd;
+-	int kickfd;
+-
+-	nr_vq = dev->max_queue_pairs * 2;
+-	if (dev->hw_cvq)
+-		nr_vq++;
+ 
+-	for (i = 0; i < nr_vq; i++) {
+-		/* May use invalid flag, but some backend uses kickfd and
+-		 * callfd as criteria to judge if dev is alive. so finally we
+-		 * use real event_fd.
+-		 */
+-		callfd = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK);
+-		if (callfd < 0) {
+-			PMD_DRV_LOG(ERR, "(%s) callfd error, %s", dev->path, strerror(errno));
+-			goto err;
+-		}
+-		kickfd = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK);
+-		if (kickfd < 0) {
+-			close(callfd);
+-			PMD_DRV_LOG(ERR, "(%s) kickfd error, %s", dev->path, strerror(errno));
+-			goto err;
+-		}
+-		dev->callfds[i] = callfd;
+-		dev->kickfds[i] = kickfd;
+-	}
++	if (virtio_user_foreach_queue(dev, virtio_user_init_notify_queue) < 0)
++		goto err;
+ 
+ 	return 0;
+ err:
+-	for (j = 0; j < i; j++) {
+-		if (dev->kickfds[j] >= 0) {
+-			close(dev->kickfds[j]);
+-			dev->kickfds[j] = -1;
+-		}
+-		if (dev->callfds[j] >= 0) {
+-			close(dev->callfds[j]);
+-			dev->callfds[j] = -1;
+-		}
+-	}
++	virtio_user_foreach_queue(dev, virtio_user_uninit_notify_queue);
+ 
+ 	return -1;
+ }
+@@ -433,18 +477,8 @@ err:
+ static void
+ virtio_user_dev_uninit_notify(struct virtio_user_dev *dev)
+ {
+-	uint32_t i;
++	virtio_user_foreach_queue(dev, virtio_user_uninit_notify_queue);
+ 
+-	for (i = 0; i < dev->max_queue_pairs * 2; ++i) {
+-		if (dev->kickfds[i] >= 0) {
+-			close(dev->kickfds[i]);
+-			dev->kickfds[i] = -1;
+-		}
+-		if (dev->callfds[i] >= 0) {
+-			close(dev->callfds[i]);
+-			dev->callfds[i] = -1;
+-		}
+-	}
+ }
+ 
+ static int
+@@ -575,7 +609,7 @@ virtio_user_alloc_vrings(struct virtio_user_dev *dev)
+ 	bool packed_ring = !!(dev->device_features & (1ull << VIRTIO_F_RING_PACKED));
+ 
+ 	nr_vrings = dev->max_queue_pairs * 2;
+-	if (dev->device_features & (1ull << VIRTIO_NET_F_MQ))
++	if (dev->frontend_features & (1ull << VIRTIO_NET_F_CTRL_VQ))
+ 		nr_vrings++;
+ 
+ 	dev->callfds = rte_zmalloc("virtio_user_dev", nr_vrings * sizeof(*dev->callfds), 0);
+@@ -725,7 +759,7 @@ virtio_user_dev_init(struct virtio_user_dev *dev, char *path, uint16_t queues,
+ 	if (virtio_user_dev_init_max_queue_pairs(dev, queues))
+ 		dev->unsupported_features |= (1ull << VIRTIO_NET_F_MQ);
+ 
+-	if (dev->max_queue_pairs > 1)
++	if (dev->max_queue_pairs > 1 || dev->hw_cvq)
+ 		cq = 1;
+ 
+ 	if (!mrg_rxbuf)
+@@ -743,8 +777,9 @@ virtio_user_dev_init(struct virtio_user_dev *dev, char *path, uint16_t queues,
+ 		dev->unsupported_features |= (1ull << VIRTIO_NET_F_MAC);
+ 
+ 	if (cq) {
+-		/* device does not really need to know anything about CQ,
+-		 * so if necessary, we just claim to support CQ
++		/* Except for vDPA, the device does not really need to know
++		 * anything about CQ, so if necessary, we just claim to support
++		 * control queue.
+ 		 */
+ 		dev->frontend_features |= (1ull << VIRTIO_NET_F_CTRL_VQ);
+ 	} else {
+@@ -844,9 +879,6 @@ virtio_user_handle_mq(struct virtio_user_dev *dev, uint16_t q_pairs)
+ 	for (i = q_pairs; i < dev->max_queue_pairs; ++i)
+ 		ret |= dev->ops->enable_qp(dev, i, 0);
+ 
+-	if (dev->scvq)
+-		ret |= dev->ops->cvq_enable(dev, 1);
+-
+ 	dev->queue_pairs = q_pairs;
+ 
+ 	return ret;
+diff --git a/dpdk/drivers/net/vmxnet3/base/vmxnet3_defs.h b/dpdk/drivers/net/vmxnet3/base/vmxnet3_defs.h
+index 24c235876e..a6bb281d8d 100644
+--- a/dpdk/drivers/net/vmxnet3/base/vmxnet3_defs.h
++++ b/dpdk/drivers/net/vmxnet3/base/vmxnet3_defs.h
+@@ -126,6 +126,7 @@ typedef enum {
+    VMXNET3_CMD_RESERVED7,
+    VMXNET3_CMD_RESERVED8,
+    VMXNET3_CMD_GET_MAX_QUEUES_CONF,
++   VMXNET3_CMD_RESERVED11,
+    VMXNET3_CMD_GET_MAX_CAPABILITIES,
+    VMXNET3_CMD_GET_DCR0_REG,
+ } Vmxnet3_Cmd;
+diff --git a/dpdk/drivers/net/vmxnet3/vmxnet3_ethdev.c b/dpdk/drivers/net/vmxnet3/vmxnet3_ethdev.c
+index e49191718a..70ae9c6035 100644
+--- a/dpdk/drivers/net/vmxnet3/vmxnet3_ethdev.c
++++ b/dpdk/drivers/net/vmxnet3/vmxnet3_ethdev.c
+@@ -257,6 +257,7 @@ vmxnet3_disable_all_intrs(struct vmxnet3_hw *hw)
+ 		vmxnet3_disable_intr(hw, i);
+ }
+ 
++#ifndef RTE_EXEC_ENV_FREEBSD
+ /*
+  * Enable all intrs used by the device
+  */
+@@ -280,6 +281,7 @@ vmxnet3_enable_all_intrs(struct vmxnet3_hw *hw)
+ 			vmxnet3_enable_intr(hw, i);
+ 	}
+ }
++#endif
+ 
+ /*
+  * Gets tx data ring descriptor size.
+@@ -1129,6 +1131,7 @@ vmxnet3_dev_start(struct rte_eth_dev *dev)
+ 	/* Setting proper Rx Mode and issue Rx Mode Update command */
+ 	vmxnet3_dev_set_rxmode(hw, VMXNET3_RXM_UCAST | VMXNET3_RXM_BCAST, 1);
+ 
++#ifndef RTE_EXEC_ENV_FREEBSD
+ 	/* Setup interrupt callback  */
+ 	rte_intr_callback_register(dev->intr_handle,
+ 				   vmxnet3_interrupt_handler, dev);
+@@ -1140,6 +1143,7 @@ vmxnet3_dev_start(struct rte_eth_dev *dev)
+ 
+ 	/* enable all intrs */
+ 	vmxnet3_enable_all_intrs(hw);
++#endif
+ 
+ 	vmxnet3_process_events(dev);
+ 
+@@ -1928,11 +1932,13 @@ done:
+ static int
+ vmxnet3_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
+ {
++#ifndef RTE_EXEC_ENV_FREEBSD
+ 	struct vmxnet3_hw *hw = dev->data->dev_private;
+ 
+ 	vmxnet3_enable_intr(hw,
+ 			    rte_intr_vec_list_index_get(dev->intr_handle,
+ 							       queue_id));
++#endif
+ 
+ 	return 0;
+ }
+diff --git a/dpdk/drivers/net/vmxnet3/vmxnet3_logs.h b/dpdk/drivers/net/vmxnet3/vmxnet3_logs.h
+index 74154e3a1a..ae8542811a 100644
+--- a/dpdk/drivers/net/vmxnet3/vmxnet3_logs.h
++++ b/dpdk/drivers/net/vmxnet3/vmxnet3_logs.h
+@@ -7,7 +7,7 @@
+ 
+ extern int vmxnet3_logtype_init;
+ #define PMD_INIT_LOG(level, fmt, args...) \
+-	rte_log(RTE_LOG_ ## level, vmxnet3_logtype_driver, \
++	rte_log(RTE_LOG_ ## level, vmxnet3_logtype_init, \
+ 		"%s(): " fmt "\n", __func__, ## args)
+ #define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>")
+ 
+diff --git a/dpdk/drivers/vdpa/mlx5/mlx5_vdpa_event.c b/dpdk/drivers/vdpa/mlx5/mlx5_vdpa_event.c
+index 9557c1042e..32430614d5 100644
+--- a/dpdk/drivers/vdpa/mlx5/mlx5_vdpa_event.c
++++ b/dpdk/drivers/vdpa/mlx5/mlx5_vdpa_event.c
+@@ -244,22 +244,30 @@ mlx5_vdpa_queues_complete(struct mlx5_vdpa_priv *priv)
+ 	return max;
+ }
+ 
++static void
++mlx5_vdpa_drain_cq_one(struct mlx5_vdpa_priv *priv,
++	struct mlx5_vdpa_virtq *virtq)
++{
++	struct mlx5_vdpa_cq *cq = &virtq->eqp.cq;
++
++	mlx5_vdpa_queue_complete(cq);
++	if (cq->cq_obj.cq) {
++		cq->cq_obj.cqes[0].wqe_counter = rte_cpu_to_be_16(UINT16_MAX);
++		virtq->eqp.qp_pi = 0;
++		if (!cq->armed)
++			mlx5_vdpa_cq_arm(priv, cq);
++	}
++}
++
+ void
+ mlx5_vdpa_drain_cq(struct mlx5_vdpa_priv *priv)
+ {
++	struct mlx5_vdpa_virtq *virtq;
+ 	unsigned int i;
+ 
+ 	for (i = 0; i < priv->caps.max_num_virtio_queues; i++) {
+-		struct mlx5_vdpa_cq *cq = &priv->virtqs[i].eqp.cq;
+-
+-		mlx5_vdpa_queue_complete(cq);
+-		if (cq->cq_obj.cq) {
+-			cq->cq_obj.cqes[0].wqe_counter =
+-				rte_cpu_to_be_16(UINT16_MAX);
+-			priv->virtqs[i].eqp.qp_pi = 0;
+-			if (!cq->armed)
+-				mlx5_vdpa_cq_arm(priv, cq);
+-		}
++		virtq = &priv->virtqs[i];
++		mlx5_vdpa_drain_cq_one(priv, virtq);
+ 	}
+ }
+ 
+@@ -632,6 +640,7 @@ mlx5_vdpa_event_qp_prepare(struct mlx5_vdpa_priv *priv, uint16_t desc_n,
+ 	if (eqp->cq.cq_obj.cq != NULL && log_desc_n == eqp->cq.log_desc_n) {
+ 		/* Reuse existing resources. */
+ 		eqp->cq.callfd = callfd;
++		mlx5_vdpa_drain_cq_one(priv, virtq);
+ 		/* FW will set event qp to error state in q destroy. */
+ 		if (reset && !mlx5_vdpa_qps2rst2rts(eqp))
+ 			rte_write32(rte_cpu_to_be_32(RTE_BIT32(log_desc_n)),
+diff --git a/dpdk/drivers/vdpa/sfc/sfc_vdpa_hw.c b/dpdk/drivers/vdpa/sfc/sfc_vdpa_hw.c
+index edb7e35c2c..7e43719f53 100644
+--- a/dpdk/drivers/vdpa/sfc/sfc_vdpa_hw.c
++++ b/dpdk/drivers/vdpa/sfc/sfc_vdpa_hw.c
+@@ -13,8 +13,6 @@
+ #include "sfc_vdpa.h"
+ #include "sfc_vdpa_ops.h"
+ 
+-extern uint32_t sfc_logtype_driver;
+-
+ #ifndef PAGE_SIZE
+ #define PAGE_SIZE   (sysconf(_SC_PAGESIZE))
+ #endif
+diff --git a/dpdk/dts/framework/remote_session/remote/remote_session.py b/dpdk/dts/framework/remote_session/remote/remote_session.py
+index 719f7d1ef7..68894a9686 100644
+--- a/dpdk/dts/framework/remote_session/remote/remote_session.py
++++ b/dpdk/dts/framework/remote_session/remote/remote_session.py
+@@ -3,8 +3,8 @@
+ # Copyright(c) 2022-2023 PANTHEON.tech s.r.o.
+ # Copyright(c) 2022-2023 University of New Hampshire
+ 
+-import dataclasses
+ from abc import ABC, abstractmethod
++from dataclasses import InitVar, dataclass, field
+ from pathlib import PurePath
+ 
+ from framework.config import NodeConfiguration
+@@ -13,7 +13,7 @@ from framework.logger import DTSLOG
+ from framework.settings import SETTINGS
+ 
+ 
+-@dataclasses.dataclass(slots=True, frozen=True)
++@dataclass(slots=True, frozen=True)
+ class CommandResult:
+     """
+     The result of remote execution of a command.
+@@ -21,9 +21,25 @@ class CommandResult:
+ 
+     name: str
+     command: str
+-    stdout: str
+-    stderr: str
++    init_stdout: InitVar[str]
++    init_stderr: InitVar[str]
+     return_code: int
++    stdout: str = field(init=False)
++    stderr: str = field(init=False)
++
++    def __post_init__(self, init_stdout: str, init_stderr: str) -> None:
++        """Strip the whitespaces from stdout and stderr.
++
++        The generated __init__ method uses object.__setattr__() when the dataclass is frozen,
++        so that's what we use here as well.
++
++        In order to get access to dataclass fields in the __post_init__ method,
++        we have to type them as InitVars. These InitVars are included in the __init__ method's
++        signature, so we have to exclude the actual stdout and stderr fields
++        from the __init__ method's signature, so that we have the proper number of arguments.
++        """
++        object.__setattr__(self, "stdout", init_stdout.strip())
++        object.__setattr__(self, "stderr", init_stderr.strip())
+ 
+     def __str__(self) -> str:
+         return (
+diff --git a/dpdk/dts/tests/TestSuite_smoke_tests.py b/dpdk/dts/tests/TestSuite_smoke_tests.py
+index 8958f58dac..5e897cf5d2 100644
+--- a/dpdk/dts/tests/TestSuite_smoke_tests.py
++++ b/dpdk/dts/tests/TestSuite_smoke_tests.py
+@@ -91,7 +91,7 @@ class SmokeTests(TestSuite):
+             # with the address for the nic we are on in the loop and then captures the
+             # name of the driver in a group
+             devbind_info_for_nic = re.search(
+-                f"{nic.pci}[^\\n]*drv=([\\d\\w]*) [^\\n]*",
++                f"{nic.pci}[^\\n]*drv=([\\d\\w-]*) [^\\n]*",
+                 all_nics_in_dpdk_devbind,
+             )
+             self.verify(
+diff --git a/dpdk/examples/fips_validation/fips_validation_rsa.c b/dpdk/examples/fips_validation/fips_validation_rsa.c
+index f675b51051..55f81860a0 100644
+--- a/dpdk/examples/fips_validation/fips_validation_rsa.c
++++ b/dpdk/examples/fips_validation/fips_validation_rsa.c
+@@ -328,6 +328,9 @@ parse_test_rsa_json_interim_writeback(struct fips_val *val)
+ 		if (prepare_vec_rsa() < 0)
+ 			return -1;
+ 
++		if (!vec.rsa.e.val)
++			return -1;
++
+ 		writeback_hex_str("", info.one_line_text, &vec.rsa.n);
+ 		obj = json_string(info.one_line_text);
+ 		json_object_set_new(json_info.json_write_group, "n", obj);
+@@ -474,7 +477,7 @@ fips_test_randomize_message(struct fips_val *msg, struct fips_val *rand)
+ 	uint16_t rv_len;
+ 
+ 	if (!msg->val || !rand->val || rand->len > RV_BUF_LEN
+-		|| msg->len > FIPS_TEST_JSON_BUF_LEN)
++		|| msg->len > (FIPS_TEST_JSON_BUF_LEN - 1))
+ 		return -EINVAL;
+ 
+ 	memset(rv, 0, sizeof(rv));
+@@ -503,7 +506,7 @@ fips_test_randomize_message(struct fips_val *msg, struct fips_val *rand)
+ 		m[i + j] ^= rv[j];
+ 
+ 	m[i + j] = ((uint8_t *)&rv_bitlen)[0];
+-	m[i + j + 1] = (((uint8_t *)&rv_bitlen)[1] >> 8) & 0xFF;
++	m[i + j + 1] = ((uint8_t *)&rv_bitlen)[1];
+ 
+ 	rte_free(msg->val);
+ 	msg->len = (rv_bitlen + m_bitlen + 16) / 8;
+diff --git a/dpdk/examples/ipsec-secgw/event_helper.h b/dpdk/examples/ipsec-secgw/event_helper.h
+index dfb81bfcf1..be635685b4 100644
+--- a/dpdk/examples/ipsec-secgw/event_helper.h
++++ b/dpdk/examples/ipsec-secgw/event_helper.h
+@@ -102,7 +102,7 @@ struct eh_event_link_info {
+ 		/**< Event port ID */
+ 	uint8_t eventq_id;
+ 		/**< Event queue to be linked to the port */
+-	uint8_t lcore_id;
++	uint32_t lcore_id;
+ 		/**< Lcore to be polling on this port */
+ };
+ 
+diff --git a/dpdk/examples/ipsec-secgw/ipsec-secgw.c b/dpdk/examples/ipsec-secgw/ipsec-secgw.c
+index bf98d2618b..761b9cf396 100644
+--- a/dpdk/examples/ipsec-secgw/ipsec-secgw.c
++++ b/dpdk/examples/ipsec-secgw/ipsec-secgw.c
+@@ -220,8 +220,8 @@ static const char *cfgfile;
+ 
+ struct lcore_params {
+ 	uint16_t port_id;
+-	uint8_t queue_id;
+-	uint8_t lcore_id;
++	uint16_t queue_id;
++	uint32_t lcore_id;
+ } __rte_cache_aligned;
+ 
+ static struct lcore_params lcore_params_array[MAX_LCORE_PARAMS];
+@@ -568,7 +568,7 @@ process_pkts_outbound_nosp(struct ipsec_ctx *ipsec_ctx,
+ 
+ static inline void
+ process_pkts(struct lcore_conf *qconf, struct rte_mbuf **pkts,
+-	     uint8_t nb_pkts, uint16_t portid, void *ctx)
++	     uint16_t nb_pkts, uint16_t portid, void *ctx)
+ {
+ 	struct ipsec_traffic traffic;
+ 
+@@ -695,9 +695,7 @@ ipsec_poll_mode_worker(void)
+ 	struct rte_mbuf *pkts[MAX_PKT_BURST];
+ 	uint32_t lcore_id;
+ 	uint64_t prev_tsc, diff_tsc, cur_tsc;
+-	int32_t i, nb_rx;
+-	uint16_t portid;
+-	uint8_t queueid;
++	uint16_t i, nb_rx, portid, queueid;
+ 	struct lcore_conf *qconf;
+ 	int32_t rc, socket_id;
+ 	const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1)
+@@ -744,7 +742,7 @@ ipsec_poll_mode_worker(void)
+ 		portid = rxql[i].port_id;
+ 		queueid = rxql[i].queue_id;
+ 		RTE_LOG(INFO, IPSEC,
+-			" -- lcoreid=%u portid=%u rxqueueid=%hhu\n",
++			" -- lcoreid=%u portid=%u rxqueueid=%" PRIu16 "\n",
+ 			lcore_id, portid, queueid);
+ 	}
+ 
+@@ -789,8 +787,7 @@ int
+ check_flow_params(uint16_t fdir_portid, uint8_t fdir_qid)
+ {
+ 	uint16_t i;
+-	uint16_t portid;
+-	uint8_t queueid;
++	uint16_t portid, queueid;
+ 
+ 	for (i = 0; i < nb_lcore_params; ++i) {
+ 		portid = lcore_params_array[i].port_id;
+@@ -810,7 +807,7 @@ check_flow_params(uint16_t fdir_portid, uint8_t fdir_qid)
+ static int32_t
+ check_poll_mode_params(struct eh_conf *eh_conf)
+ {
+-	uint8_t lcore;
++	uint32_t lcore;
+ 	uint16_t portid;
+ 	uint16_t i;
+ 	int32_t socket_id;
+@@ -829,13 +826,13 @@ check_poll_mode_params(struct eh_conf *eh_conf)
+ 	for (i = 0; i < nb_lcore_params; ++i) {
+ 		lcore = lcore_params[i].lcore_id;
+ 		if (!rte_lcore_is_enabled(lcore)) {
+-			printf("error: lcore %hhu is not enabled in "
++			printf("error: lcore %u is not enabled in "
+ 				"lcore mask\n", lcore);
+ 			return -1;
+ 		}
+ 		socket_id = rte_lcore_to_socket_id(lcore);
+ 		if (socket_id != 0 && numa_on == 0) {
+-			printf("warning: lcore %hhu is on socket %d "
++			printf("warning: lcore %u is on socket %d "
+ 				"with numa off\n",
+ 				lcore, socket_id);
+ 		}
+@@ -852,7 +849,7 @@ check_poll_mode_params(struct eh_conf *eh_conf)
+ 	return 0;
+ }
+ 
+-static uint8_t
++static uint16_t
+ get_port_nb_rx_queues(const uint16_t port)
+ {
+ 	int32_t queue = -1;
+@@ -863,14 +860,14 @@ get_port_nb_rx_queues(const uint16_t port)
+ 				lcore_params[i].queue_id > queue)
+ 			queue = lcore_params[i].queue_id;
+ 	}
+-	return (uint8_t)(++queue);
++	return (uint16_t)(++queue);
+ }
+ 
+ static int32_t
+ init_lcore_rx_queues(void)
+ {
+ 	uint16_t i, nb_rx_queue;
+-	uint8_t lcore;
++	uint32_t lcore;
+ 
+ 	for (i = 0; i < nb_lcore_params; ++i) {
+ 		lcore = lcore_params[i].lcore_id;
+@@ -1051,6 +1048,11 @@ parse_config(const char *q_arg)
+ 	char *str_fld[_NUM_FLD];
+ 	int32_t i;
+ 	uint32_t size;
++	uint32_t max_fld[_NUM_FLD] = {
++		RTE_MAX_ETHPORTS,
++		RTE_MAX_QUEUES_PER_PORT,
++		RTE_MAX_LCORE
++	};
+ 
+ 	nb_lcore_params = 0;
+ 
+@@ -1071,7 +1073,7 @@ parse_config(const char *q_arg)
+ 		for (i = 0; i < _NUM_FLD; i++) {
+ 			errno = 0;
+ 			int_fld[i] = strtoul(str_fld[i], &end, 0);
+-			if (errno != 0 || end == str_fld[i] || int_fld[i] > 255)
++			if (errno != 0 || end == str_fld[i] || int_fld[i] > max_fld[i])
+ 				return -1;
+ 		}
+ 		if (nb_lcore_params >= MAX_LCORE_PARAMS) {
+@@ -1080,11 +1082,11 @@ parse_config(const char *q_arg)
+ 			return -1;
+ 		}
+ 		lcore_params_array[nb_lcore_params].port_id =
+-			(uint8_t)int_fld[FLD_PORT];
++			(uint16_t)int_fld[FLD_PORT];
+ 		lcore_params_array[nb_lcore_params].queue_id =
+-			(uint8_t)int_fld[FLD_QUEUE];
++			(uint16_t)int_fld[FLD_QUEUE];
+ 		lcore_params_array[nb_lcore_params].lcore_id =
+-			(uint8_t)int_fld[FLD_LCORE];
++			(uint32_t)int_fld[FLD_LCORE];
+ 		++nb_lcore_params;
+ 	}
+ 	lcore_params = lcore_params_array;
+@@ -1920,7 +1922,8 @@ port_init(uint16_t portid, uint64_t req_rx_offloads, uint64_t req_tx_offloads,
+ 	struct rte_eth_dev_info dev_info;
+ 	struct rte_eth_txconf *txconf;
+ 	uint16_t nb_tx_queue, nb_rx_queue;
+-	uint16_t tx_queueid, rx_queueid, queue, lcore_id;
++	uint16_t tx_queueid, rx_queueid, queue;
++	uint32_t lcore_id;
+ 	int32_t ret, socket_id;
+ 	struct lcore_conf *qconf;
+ 	struct rte_ether_addr ethaddr;
+@@ -2094,10 +2097,10 @@ port_init(uint16_t portid, uint64_t req_rx_offloads, uint64_t req_tx_offloads,
+ 
+ 			/* Register Rx callback if ptypes are not supported */
+ 			if (!ptype_supported &&
+-			    !rte_eth_add_rx_callback(portid, queue,
++			    !rte_eth_add_rx_callback(portid, rx_queueid,
+ 						     parse_ptype_cb, NULL)) {
+ 				printf("Failed to add rx callback: port=%d, "
+-				       "queue=%d\n", portid, queue);
++				       "rx_queueid=%d\n", portid, rx_queueid);
+ 			}
+ 
+ 
+diff --git a/dpdk/examples/ipsec-secgw/ipsec.c b/dpdk/examples/ipsec-secgw/ipsec.c
+index f5cec4a928..b52b0ffc3d 100644
+--- a/dpdk/examples/ipsec-secgw/ipsec.c
++++ b/dpdk/examples/ipsec-secgw/ipsec.c
+@@ -259,7 +259,7 @@ create_lookaside_session(struct ipsec_ctx *ipsec_ctx_lcore[],
+ 			continue;
+ 
+ 		/* Looking for cryptodev, which can handle this SA */
+-		key.lcore_id = (uint8_t)lcore_id;
++		key.lcore_id = lcore_id;
+ 		key.cipher_algo = (uint8_t)sa->cipher_algo;
+ 		key.auth_algo = (uint8_t)sa->auth_algo;
+ 		key.aead_algo = (uint8_t)sa->aead_algo;
+@@ -288,10 +288,21 @@ create_lookaside_session(struct ipsec_ctx *ipsec_ctx_lcore[],
+ 		if (cdev_id == RTE_CRYPTO_MAX_DEVS)
+ 			cdev_id = ipsec_ctx->tbl[cdev_id_qp].id;
+ 		else if (cdev_id != ipsec_ctx->tbl[cdev_id_qp].id) {
+-			RTE_LOG(ERR, IPSEC,
+-					"SA mapping to multiple cryptodevs is "
+-					"not supported!");
+-			return -EINVAL;
++			struct rte_cryptodev_info dev_info_1, dev_info_2;
++			rte_cryptodev_info_get(cdev_id, &dev_info_1);
++			rte_cryptodev_info_get(ipsec_ctx->tbl[cdev_id_qp].id,
++					&dev_info_2);
++			if (dev_info_1.driver_id == dev_info_2.driver_id) {
++				RTE_LOG(WARNING, IPSEC,
++					"SA mapped to multiple cryptodevs for SPI %d\n",
++					sa->spi);
++
++			} else {
++				RTE_LOG(WARNING, IPSEC,
++					"SA mapped to multiple cryptodevs of different types for SPI %d\n",
++					sa->spi);
++
++			}
+ 		}
+ 
+ 		/* Store per core queue pair information */
+@@ -908,6 +919,7 @@ ipsec_enqueue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx,
+ 			continue;
+ 		}
+ 
++		RTE_ASSERT(sa->cqp[ipsec_ctx->lcore_id] != NULL);
+ 		enqueue_cop(sa->cqp[ipsec_ctx->lcore_id], &priv->cop);
+ 	}
+ }
+diff --git a/dpdk/examples/ipsec-secgw/ipsec.h b/dpdk/examples/ipsec-secgw/ipsec.h
+index bdcada1c40..d4ecfdf08d 100644
+--- a/dpdk/examples/ipsec-secgw/ipsec.h
++++ b/dpdk/examples/ipsec-secgw/ipsec.h
+@@ -117,7 +117,7 @@ struct ipsec_sa {
+ 	uint32_t spi;
+ 	struct cdev_qp *cqp[RTE_MAX_LCORE];
+ 	uint64_t seq;
+-	uint32_t salt;
++	rte_be32_t salt;
+ 	uint32_t fallback_sessions;
+ 	enum rte_crypto_cipher_algorithm cipher_algo;
+ 	enum rte_crypto_auth_algorithm auth_algo;
+@@ -256,11 +256,11 @@ extern struct offloads tx_offloads;
+  * (hash key calculation reads 8 bytes if this struct is size 5 bytes).
+  */
+ struct cdev_key {
+-	uint16_t lcore_id;
++	uint32_t lcore_id;
+ 	uint8_t cipher_algo;
+ 	uint8_t auth_algo;
+ 	uint8_t aead_algo;
+-	uint8_t padding[3]; /* padding to 8-byte size should be zeroed */
++	uint8_t padding; /* padding to 8-byte size should be zeroed */
+ };
+ 
+ struct socket_ctx {
+@@ -285,7 +285,7 @@ struct cnt_blk {
+ 
+ struct lcore_rx_queue {
+ 	uint16_t port_id;
+-	uint8_t queue_id;
++	uint16_t queue_id;
+ 	void *sec_ctx;
+ } __rte_cache_aligned;
+ 
+diff --git a/dpdk/examples/ipsec-secgw/ipsec_worker.c b/dpdk/examples/ipsec-secgw/ipsec_worker.c
+index 8d122e8519..c9c43ebd2b 100644
+--- a/dpdk/examples/ipsec-secgw/ipsec_worker.c
++++ b/dpdk/examples/ipsec-secgw/ipsec_worker.c
+@@ -1598,8 +1598,7 @@ ipsec_poll_mode_wrkr_inl_pr(void)
+ 	int32_t socket_id;
+ 	uint32_t lcore_id;
+ 	int32_t i, nb_rx;
+-	uint16_t portid;
+-	uint8_t queueid;
++	uint16_t portid, queueid;
+ 
+ 	prev_tsc = 0;
+ 	lcore_id = rte_lcore_id();
+@@ -1633,7 +1632,7 @@ ipsec_poll_mode_wrkr_inl_pr(void)
+ 		portid = rxql[i].port_id;
+ 		queueid = rxql[i].queue_id;
+ 		RTE_LOG(INFO, IPSEC,
+-			" -- lcoreid=%u portid=%u rxqueueid=%hhu\n",
++			" -- lcoreid=%u portid=%u rxqueueid=%" PRIu16 "\n",
+ 			lcore_id, portid, queueid);
+ 	}
+ 
+@@ -1729,8 +1728,7 @@ ipsec_poll_mode_wrkr_inl_pr_ss(void)
+ 	uint32_t i, nb_rx, j;
+ 	int32_t socket_id;
+ 	uint32_t lcore_id;
+-	uint16_t portid;
+-	uint8_t queueid;
++	uint16_t portid, queueid;
+ 
+ 	prev_tsc = 0;
+ 	lcore_id = rte_lcore_id();
+@@ -1764,7 +1762,7 @@ ipsec_poll_mode_wrkr_inl_pr_ss(void)
+ 		portid = rxql[i].port_id;
+ 		queueid = rxql[i].queue_id;
+ 		RTE_LOG(INFO, IPSEC,
+-			" -- lcoreid=%u portid=%u rxqueueid=%hhu\n",
++			" -- lcoreid=%u portid=%u rxqueueid=%" PRIu16 "\n",
+ 			lcore_id, portid, queueid);
+ 	}
+ 
+diff --git a/dpdk/examples/ipsec-secgw/ipsec_worker.h b/dpdk/examples/ipsec-secgw/ipsec_worker.h
+index ac980b8bcf..8e937fda3e 100644
+--- a/dpdk/examples/ipsec-secgw/ipsec_worker.h
++++ b/dpdk/examples/ipsec-secgw/ipsec_worker.h
+@@ -469,7 +469,7 @@ fail:
+ 
+ static __rte_always_inline void
+ route4_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[],
+-	    uint8_t nb_pkts, uint64_t tx_offloads, bool ip_cksum)
++	    uint32_t nb_pkts, uint64_t tx_offloads, bool ip_cksum)
+ {
+ 	uint32_t hop[MAX_PKT_BURST * 2];
+ 	uint32_t dst_ip[MAX_PKT_BURST * 2];
+@@ -557,7 +557,7 @@ route4_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[],
+ }
+ 
+ static __rte_always_inline void
+-route6_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts)
++route6_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint32_t nb_pkts)
+ {
+ 	int32_t hop[MAX_PKT_BURST * 2];
+ 	uint8_t dst_ip[MAX_PKT_BURST * 2][16];
+diff --git a/dpdk/examples/ipsec-secgw/parser.c b/dpdk/examples/ipsec-secgw/parser.c
+index 98f8176651..2bd6df335b 100644
+--- a/dpdk/examples/ipsec-secgw/parser.c
++++ b/dpdk/examples/ipsec-secgw/parser.c
+@@ -388,7 +388,7 @@ cfg_parse_neigh(void *parsed_result, __rte_unused struct cmdline *cl,
+ 	rc = parse_mac(res->mac, &mac);
+ 	APP_CHECK(rc == 0, st, "invalid ether addr:%s", res->mac);
+ 	rc = add_dst_ethaddr(res->port, &mac);
+-	APP_CHECK(rc == 0, st, "invalid port numer:%hu", res->port);
++	APP_CHECK(rc == 0, st, "invalid port number:%hu", res->port);
+ 	if (st->status < 0)
+ 		return;
+ }
+diff --git a/dpdk/examples/l3fwd-graph/main.c b/dpdk/examples/l3fwd-graph/main.c
+index 96cb1c81ff..4ded69b4a0 100644
+--- a/dpdk/examples/l3fwd-graph/main.c
++++ b/dpdk/examples/l3fwd-graph/main.c
+@@ -90,7 +90,7 @@ static int pcap_trace_enable;
+ 
+ struct lcore_rx_queue {
+ 	uint16_t port_id;
+-	uint8_t queue_id;
++	uint16_t queue_id;
+ 	char node_name[RTE_NODE_NAMESIZE];
+ };
+ 
+@@ -110,8 +110,8 @@ static struct lcore_conf lcore_conf[RTE_MAX_LCORE];
+ 
+ struct lcore_params {
+ 	uint16_t port_id;
+-	uint8_t queue_id;
+-	uint8_t lcore_id;
++	uint16_t queue_id;
++	uint32_t lcore_id;
+ } __rte_cache_aligned;
+ 
+ static struct lcore_params lcore_params_array[MAX_LCORE_PARAMS];
+@@ -205,19 +205,19 @@ check_worker_model_params(void)
+ static int
+ check_lcore_params(void)
+ {
+-	uint8_t queue, lcore;
++	uint16_t queue, i;
+ 	int socketid;
+-	uint16_t i;
++	uint32_t lcore;
+ 
+ 	for (i = 0; i < nb_lcore_params; ++i) {
+ 		queue = lcore_params[i].queue_id;
+ 		if (queue >= MAX_RX_QUEUE_PER_PORT) {
+-			printf("Invalid queue number: %hhu\n", queue);
++			printf("Invalid queue number: %" PRIu16 "\n", queue);
+ 			return -1;
+ 		}
+ 		lcore = lcore_params[i].lcore_id;
+ 		if (!rte_lcore_is_enabled(lcore)) {
+-			printf("Error: lcore %hhu is not enabled in lcore mask\n",
++			printf("Error: lcore %u is not enabled in lcore mask\n",
+ 			       lcore);
+ 			return -1;
+ 		}
+@@ -228,7 +228,7 @@ check_lcore_params(void)
+ 		}
+ 		socketid = rte_lcore_to_socket_id(lcore);
+ 		if ((socketid != 0) && (numa_on == 0)) {
+-			printf("Warning: lcore %hhu is on socket %d with numa off\n",
++			printf("Warning: lcore %u is on socket %d with numa off\n",
+ 			       lcore, socketid);
+ 		}
+ 	}
+@@ -257,7 +257,7 @@ check_port_config(void)
+ 	return 0;
+ }
+ 
+-static uint8_t
++static uint16_t
+ get_port_n_rx_queues(const uint16_t port)
+ {
+ 	int queue = -1;
+@@ -275,14 +275,14 @@ get_port_n_rx_queues(const uint16_t port)
+ 		}
+ 	}
+ 
+-	return (uint8_t)(++queue);
++	return (uint16_t)(++queue);
+ }
+ 
+ static int
+ init_lcore_rx_queues(void)
+ {
+ 	uint16_t i, nb_rx_queue;
+-	uint8_t lcore;
++	uint32_t lcore;
+ 
+ 	for (i = 0; i < nb_lcore_params; ++i) {
+ 		lcore = lcore_params[i].lcore_id;
+@@ -290,7 +290,7 @@ init_lcore_rx_queues(void)
+ 		if (nb_rx_queue >= MAX_RX_QUEUE_PER_LCORE) {
+ 			printf("Error: too many queues (%u) for lcore: %u\n",
+ 			       (unsigned int)nb_rx_queue + 1,
+-			       (unsigned int)lcore);
++			       lcore);
+ 			return -1;
+ 		}
+ 
+@@ -448,11 +448,11 @@ parse_config(const char *q_arg)
+ 		}
+ 
+ 		lcore_params_array[nb_lcore_params].port_id =
+-			(uint8_t)int_fld[FLD_PORT];
++			(uint16_t)int_fld[FLD_PORT];
+ 		lcore_params_array[nb_lcore_params].queue_id =
+-			(uint8_t)int_fld[FLD_QUEUE];
++			(uint16_t)int_fld[FLD_QUEUE];
+ 		lcore_params_array[nb_lcore_params].lcore_id =
+-			(uint8_t)int_fld[FLD_LCORE];
++			(uint32_t)int_fld[FLD_LCORE];
+ 		++nb_lcore_params;
+ 	}
+ 	lcore_params = lcore_params_array;
+@@ -1011,7 +1011,8 @@ main(int argc, char **argv)
+ 		"ethdev_tx-*",
+ 		"pkt_drop",
+ 	};
+-	uint8_t nb_rx_queue, queue, socketid;
++	uint8_t socketid;
++	uint16_t nb_rx_queue, queue;
+ 	struct rte_graph_param graph_conf;
+ 	struct rte_eth_dev_info dev_info;
+ 	uint32_t nb_ports, nb_conf = 0;
+diff --git a/dpdk/examples/l3fwd-power/main.c b/dpdk/examples/l3fwd-power/main.c
+index 9c0dcd343b..996ac6dc56 100644
+--- a/dpdk/examples/l3fwd-power/main.c
++++ b/dpdk/examples/l3fwd-power/main.c
+@@ -213,7 +213,7 @@ enum freq_scale_hint_t
+ 
+ struct lcore_rx_queue {
+ 	uint16_t port_id;
+-	uint8_t queue_id;
++	uint16_t queue_id;
+ 	enum freq_scale_hint_t freq_up_hint;
+ 	uint32_t zero_rx_packet_count;
+ 	uint32_t idle_hint;
+@@ -837,7 +837,7 @@ sleep_until_rx_interrupt(int num, int lcore)
+ 	struct rte_epoll_event event[num];
+ 	int n, i;
+ 	uint16_t port_id;
+-	uint8_t queue_id;
++	uint16_t queue_id;
+ 	void *data;
+ 
+ 	if (status[lcore].wakeup) {
+@@ -849,9 +849,9 @@ sleep_until_rx_interrupt(int num, int lcore)
+ 	n = rte_epoll_wait(RTE_EPOLL_PER_THREAD, event, num, 10);
+ 	for (i = 0; i < n; i++) {
+ 		data = event[i].epdata.data;
+-		port_id = ((uintptr_t)data) >> CHAR_BIT;
++		port_id = ((uintptr_t)data) >> (sizeof(uint16_t) * CHAR_BIT);
+ 		queue_id = ((uintptr_t)data) &
+-			RTE_LEN2MASK(CHAR_BIT, uint8_t);
++			RTE_LEN2MASK((sizeof(uint16_t) * CHAR_BIT), uint16_t);
+ 		RTE_LOG(INFO, L3FWD_POWER,
+ 			"lcore %u is waked up from rx interrupt on"
+ 			" port %d queue %d\n",
+@@ -866,7 +866,7 @@ static void turn_on_off_intr(struct lcore_conf *qconf, bool on)
+ {
+ 	int i;
+ 	struct lcore_rx_queue *rx_queue;
+-	uint8_t queue_id;
++	uint16_t queue_id;
+ 	uint16_t port_id;
+ 
+ 	for (i = 0; i < qconf->n_rx_queue; ++i) {
+@@ -886,7 +886,7 @@ static void turn_on_off_intr(struct lcore_conf *qconf, bool on)
+ static int event_register(struct lcore_conf *qconf)
+ {
+ 	struct lcore_rx_queue *rx_queue;
+-	uint8_t queueid;
++	uint16_t queueid;
+ 	uint16_t portid;
+ 	uint32_t data;
+ 	int ret;
+@@ -896,7 +896,7 @@ static int event_register(struct lcore_conf *qconf)
+ 		rx_queue = &(qconf->rx_queue_list[i]);
+ 		portid = rx_queue->port_id;
+ 		queueid = rx_queue->queue_id;
+-		data = portid << CHAR_BIT | queueid;
++		data = portid << (sizeof(uint16_t) * CHAR_BIT) | queueid;
+ 
+ 		ret = rte_eth_dev_rx_intr_ctl_q(portid, queueid,
+ 						RTE_EPOLL_PER_THREAD,
+@@ -916,8 +916,7 @@ static int main_intr_loop(__rte_unused void *dummy)
+ 	unsigned int lcore_id;
+ 	uint64_t prev_tsc, diff_tsc, cur_tsc;
+ 	int i, j, nb_rx;
+-	uint8_t queueid;
+-	uint16_t portid;
++	uint16_t portid, queueid;
+ 	struct lcore_conf *qconf;
+ 	struct lcore_rx_queue *rx_queue;
+ 	uint32_t lcore_rx_idle_count = 0;
+@@ -945,7 +944,7 @@ static int main_intr_loop(__rte_unused void *dummy)
+ 		portid = qconf->rx_queue_list[i].port_id;
+ 		queueid = qconf->rx_queue_list[i].queue_id;
+ 		RTE_LOG(INFO, L3FWD_POWER,
+-				" -- lcoreid=%u portid=%u rxqueueid=%hhu\n",
++				" -- lcoreid=%u portid=%u rxqueueid=%" PRIu16 "\n",
+ 				lcore_id, portid, queueid);
+ 	}
+ 
+@@ -1082,8 +1081,7 @@ main_telemetry_loop(__rte_unused void *dummy)
+ 	unsigned int lcore_id;
+ 	uint64_t prev_tsc, diff_tsc, cur_tsc, prev_tel_tsc;
+ 	int i, j, nb_rx;
+-	uint8_t queueid;
+-	uint16_t portid;
++	uint16_t portid, queueid;
+ 	struct lcore_conf *qconf;
+ 	struct lcore_rx_queue *rx_queue;
+ 	uint64_t ep_nep[2] = {0}, fp_nfp[2] = {0};
+@@ -1113,7 +1111,7 @@ main_telemetry_loop(__rte_unused void *dummy)
+ 		portid = qconf->rx_queue_list[i].port_id;
+ 		queueid = qconf->rx_queue_list[i].queue_id;
+ 		RTE_LOG(INFO, L3FWD_POWER, " -- lcoreid=%u portid=%u "
+-			"rxqueueid=%hhu\n", lcore_id, portid, queueid);
++			"rxqueueid=%" PRIu16 "\n", lcore_id, portid, queueid);
+ 	}
+ 
+ 	while (!is_done()) {
+@@ -1204,8 +1202,7 @@ main_legacy_loop(__rte_unused void *dummy)
+ 	uint64_t prev_tsc, diff_tsc, cur_tsc, tim_res_tsc, hz;
+ 	uint64_t prev_tsc_power = 0, cur_tsc_power, diff_tsc_power;
+ 	int i, j, nb_rx;
+-	uint8_t queueid;
+-	uint16_t portid;
++	uint16_t portid, queueid;
+ 	struct lcore_conf *qconf;
+ 	struct lcore_rx_queue *rx_queue;
+ 	enum freq_scale_hint_t lcore_scaleup_hint;
+@@ -1233,7 +1230,7 @@ main_legacy_loop(__rte_unused void *dummy)
+ 		portid = qconf->rx_queue_list[i].port_id;
+ 		queueid = qconf->rx_queue_list[i].queue_id;
+ 		RTE_LOG(INFO, L3FWD_POWER, " -- lcoreid=%u portid=%u "
+-			"rxqueueid=%hhu\n", lcore_id, portid, queueid);
++			"rxqueueid=%" PRIu16 "\n", lcore_id, portid, queueid);
+ 	}
+ 
+ 	/* add into event wait list */
+@@ -1398,25 +1395,25 @@ start_rx:
+ static int
+ check_lcore_params(void)
+ {
+-	uint8_t queue, lcore;
+-	uint16_t i;
++	uint16_t queue, i;
++	uint32_t lcore;
+ 	int socketid;
+ 
+ 	for (i = 0; i < nb_lcore_params; ++i) {
+ 		queue = lcore_params[i].queue_id;
+ 		if (queue >= MAX_RX_QUEUE_PER_PORT) {
+-			printf("invalid queue number: %hhu\n", queue);
++			printf("invalid queue number: %" PRIu16 "\n", queue);
+ 			return -1;
+ 		}
+ 		lcore = lcore_params[i].lcore_id;
+ 		if (!rte_lcore_is_enabled(lcore)) {
+-			printf("error: lcore %hhu is not enabled in lcore "
++			printf("error: lcore %u is not enabled in lcore "
+ 							"mask\n", lcore);
+ 			return -1;
+ 		}
+ 		if ((socketid = rte_lcore_to_socket_id(lcore) != 0) &&
+ 							(numa_on == 0)) {
+-			printf("warning: lcore %hhu is on socket %d with numa "
++			printf("warning: lcore %u is on socket %d with numa "
+ 						"off\n", lcore, socketid);
+ 		}
+ 		if (app_mode == APP_MODE_TELEMETRY && lcore == rte_lcore_id()) {
+@@ -1450,7 +1447,7 @@ check_port_config(void)
+ 	return 0;
+ }
+ 
+-static uint8_t
++static uint16_t
+ get_port_n_rx_queues(const uint16_t port)
+ {
+ 	int queue = -1;
+@@ -1461,21 +1458,21 @@ get_port_n_rx_queues(const uint16_t port)
+ 				lcore_params[i].queue_id > queue)
+ 			queue = lcore_params[i].queue_id;
+ 	}
+-	return (uint8_t)(++queue);
++	return (uint16_t)(++queue);
+ }
+ 
+ static int
+ init_lcore_rx_queues(void)
+ {
+ 	uint16_t i, nb_rx_queue;
+-	uint8_t lcore;
++	uint32_t lcore;
+ 
+ 	for (i = 0; i < nb_lcore_params; ++i) {
+ 		lcore = lcore_params[i].lcore_id;
+ 		nb_rx_queue = lcore_conf[lcore].n_rx_queue;
+ 		if (nb_rx_queue >= MAX_RX_QUEUE_PER_LCORE) {
+ 			printf("error: too many queues (%u) for lcore: %u\n",
+-				(unsigned)nb_rx_queue + 1, (unsigned)lcore);
++				(unsigned int)nb_rx_queue + 1, lcore);
+ 			return -1;
+ 		} else {
+ 			lcore_conf[lcore].rx_queue_list[nb_rx_queue].port_id =
+@@ -1660,6 +1657,11 @@ parse_config(const char *q_arg)
+ 	char *str_fld[_NUM_FLD];
+ 	int i;
+ 	unsigned size;
++	unsigned int max_fld[_NUM_FLD] = {
++		RTE_MAX_ETHPORTS,
++		RTE_MAX_QUEUES_PER_PORT,
++		RTE_MAX_LCORE
++	};
+ 
+ 	nb_lcore_params = 0;
+ 
+@@ -1679,8 +1681,7 @@ parse_config(const char *q_arg)
+ 		for (i = 0; i < _NUM_FLD; i++){
+ 			errno = 0;
+ 			int_fld[i] = strtoul(str_fld[i], &end, 0);
+-			if (errno != 0 || end == str_fld[i] || int_fld[i] >
+-									255)
++			if (errno != 0 || end == str_fld[i] || int_fld[i] > max_fld[i])
+ 				return -1;
+ 		}
+ 		if (nb_lcore_params >= MAX_LCORE_PARAMS) {
+@@ -1689,11 +1690,11 @@ parse_config(const char *q_arg)
+ 			return -1;
+ 		}
+ 		lcore_params_array[nb_lcore_params].port_id =
+-				(uint8_t)int_fld[FLD_PORT];
++				(uint16_t)int_fld[FLD_PORT];
+ 		lcore_params_array[nb_lcore_params].queue_id =
+-				(uint8_t)int_fld[FLD_QUEUE];
++				(uint16_t)int_fld[FLD_QUEUE];
+ 		lcore_params_array[nb_lcore_params].lcore_id =
+-				(uint8_t)int_fld[FLD_LCORE];
++				(uint32_t)int_fld[FLD_LCORE];
+ 		++nb_lcore_params;
+ 	}
+ 	lcore_params = lcore_params_array;
+@@ -2500,8 +2501,8 @@ main(int argc, char **argv)
+ 	uint64_t hz;
+ 	uint32_t n_tx_queue, nb_lcores;
+ 	uint32_t dev_rxq_num, dev_txq_num;
+-	uint8_t nb_rx_queue, queue, socketid;
+-	uint16_t portid;
++	uint8_t socketid;
++	uint16_t portid, nb_rx_queue, queue;
+ 	const char *ptr_strings[NUM_TELSTATS];
+ 
+ 	/* init EAL */
+diff --git a/dpdk/examples/l3fwd-power/main.h b/dpdk/examples/l3fwd-power/main.h
+index 258de98f5b..194bd82102 100644
+--- a/dpdk/examples/l3fwd-power/main.h
++++ b/dpdk/examples/l3fwd-power/main.h
+@@ -9,8 +9,8 @@
+ #define MAX_LCORE_PARAMS 1024
+ struct lcore_params {
+ 	uint16_t port_id;
+-	uint8_t queue_id;
+-	uint8_t lcore_id;
++	uint16_t queue_id;
++	uint32_t lcore_id;
+ } __rte_cache_aligned;
+ 
+ extern struct lcore_params *lcore_params;
+diff --git a/dpdk/examples/l3fwd-power/perf_core.c b/dpdk/examples/l3fwd-power/perf_core.c
+index 41ef6d0c9a..e4bdb62121 100644
+--- a/dpdk/examples/l3fwd-power/perf_core.c
++++ b/dpdk/examples/l3fwd-power/perf_core.c
+@@ -22,9 +22,9 @@ static uint16_t nb_hp_lcores;
+ 
+ struct perf_lcore_params {
+ 	uint16_t port_id;
+-	uint8_t queue_id;
++	uint16_t queue_id;
+ 	uint8_t high_perf;
+-	uint8_t lcore_idx;
++	uint32_t lcore_idx;
+ } __rte_cache_aligned;
+ 
+ static struct perf_lcore_params prf_lc_prms[MAX_LCORE_PARAMS];
+@@ -132,6 +132,12 @@ parse_perf_config(const char *q_arg)
+ 	char *str_fld[_NUM_FLD];
+ 	int i;
+ 	unsigned int size;
++	unsigned int max_fld[_NUM_FLD] = {
++		RTE_MAX_ETHPORTS,
++		RTE_MAX_QUEUES_PER_PORT,
++		255,
++		RTE_MAX_LCORE
++	};
+ 
+ 	nb_prf_lc_prms = 0;
+ 
+@@ -152,7 +158,8 @@ parse_perf_config(const char *q_arg)
+ 		for (i = 0; i < _NUM_FLD; i++) {
+ 			errno = 0;
+ 			int_fld[i] = strtoul(str_fld[i], &end, 0);
+-			if (errno != 0 || end == str_fld[i] || int_fld[i] > 255)
++			if (errno != 0 || end == str_fld[i] || int_fld[i] > max_fld[i])
++
+ 				return -1;
+ 		}
+ 		if (nb_prf_lc_prms >= MAX_LCORE_PARAMS) {
+@@ -161,13 +168,13 @@ parse_perf_config(const char *q_arg)
+ 			return -1;
+ 		}
+ 		prf_lc_prms[nb_prf_lc_prms].port_id =
+-				(uint8_t)int_fld[FLD_PORT];
++				(uint16_t)int_fld[FLD_PORT];
+ 		prf_lc_prms[nb_prf_lc_prms].queue_id =
+-				(uint8_t)int_fld[FLD_QUEUE];
++				(uint16_t)int_fld[FLD_QUEUE];
+ 		prf_lc_prms[nb_prf_lc_prms].high_perf =
+ 				!!(uint8_t)int_fld[FLD_LCORE_HP];
+ 		prf_lc_prms[nb_prf_lc_prms].lcore_idx =
+-				(uint8_t)int_fld[FLD_LCORE_IDX];
++				(uint32_t)int_fld[FLD_LCORE_IDX];
+ 		++nb_prf_lc_prms;
+ 	}
+ 
+diff --git a/dpdk/examples/l3fwd/l3fwd.h b/dpdk/examples/l3fwd/l3fwd.h
+index e7ae0e5834..12c264cb4c 100644
+--- a/dpdk/examples/l3fwd/l3fwd.h
++++ b/dpdk/examples/l3fwd/l3fwd.h
+@@ -74,7 +74,7 @@ struct mbuf_table {
+ 
+ struct lcore_rx_queue {
+ 	uint16_t port_id;
+-	uint8_t queue_id;
++	uint16_t queue_id;
+ } __rte_cache_aligned;
+ 
+ struct lcore_conf {
+diff --git a/dpdk/examples/l3fwd/l3fwd_acl.c b/dpdk/examples/l3fwd/l3fwd_acl.c
+index 401692bcec..31798ccb10 100644
+--- a/dpdk/examples/l3fwd/l3fwd_acl.c
++++ b/dpdk/examples/l3fwd/l3fwd_acl.c
+@@ -962,8 +962,6 @@ setup_acl(const int socket_id)
+ 	acl_log("IPv6 ACL entries %u:\n", acl_num_ipv6);
+ 	dump_ipv6_rules((struct acl6_rule *)acl_base_ipv6, acl_num_ipv6, 1);
+ 
+-	memset(&acl_config, 0, sizeof(acl_config));
+-
+ 	/* Check sockets a context should be created on */
+ 	if (socket_id >= NB_SOCKETS) {
+ 		acl_log("Socket %d is out "
+@@ -973,6 +971,9 @@ setup_acl(const int socket_id)
+ 		return;
+ 	}
+ 
++	rte_acl_free(acl_config.acx_ipv4[socket_id]);
++	rte_acl_free(acl_config.acx_ipv6[socket_id]);
++
+ 	acl_config.acx_ipv4[socket_id] = app_acl_init(route_base_ipv4,
+ 		acl_base_ipv4, route_num_ipv4, acl_num_ipv4,
+ 		0, socket_id);
+@@ -997,7 +998,7 @@ acl_main_loop(__rte_unused void *dummy)
+ 	uint64_t prev_tsc, diff_tsc, cur_tsc;
+ 	int i, nb_rx;
+ 	uint16_t portid;
+-	uint8_t queueid;
++	uint16_t queueid;
+ 	struct lcore_conf *qconf;
+ 	int socketid;
+ 	const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1)
+@@ -1020,7 +1021,7 @@ acl_main_loop(__rte_unused void *dummy)
+ 		portid = qconf->rx_queue_list[i].port_id;
+ 		queueid = qconf->rx_queue_list[i].queue_id;
+ 		RTE_LOG(INFO, L3FWD,
+-			" -- lcoreid=%u portid=%u rxqueueid=%hhu\n",
++			" -- lcoreid=%u portid=%u rxqueueid=%" PRIu16 "\n",
+ 			lcore_id, portid, queueid);
+ 	}
+ 
+@@ -1073,9 +1074,9 @@ acl_main_loop(__rte_unused void *dummy)
+ 
+ 					l3fwd_acl_send_packets(
+ 						qconf,
+-						pkts_burst,
++						acl_search.m_ipv4,
+ 						acl_search.res_ipv4,
+-						nb_rx);
++						acl_search.num_ipv4);
+ 				}
+ 
+ 				if (acl_search.num_ipv6) {
+@@ -1088,9 +1089,9 @@ acl_main_loop(__rte_unused void *dummy)
+ 
+ 					l3fwd_acl_send_packets(
+ 						qconf,
+-						pkts_burst,
++						acl_search.m_ipv6,
+ 						acl_search.res_ipv6,
+-						nb_rx);
++						acl_search.num_ipv6);
+ 				}
+ 			}
+ 		}
+diff --git a/dpdk/examples/l3fwd/l3fwd_em.c b/dpdk/examples/l3fwd/l3fwd_em.c
+index 40e102b38a..f18ac0048b 100644
+--- a/dpdk/examples/l3fwd/l3fwd_em.c
++++ b/dpdk/examples/l3fwd/l3fwd_em.c
+@@ -586,7 +586,7 @@ em_main_loop(__rte_unused void *dummy)
+ 	unsigned lcore_id;
+ 	uint64_t prev_tsc, diff_tsc, cur_tsc;
+ 	int i, nb_rx;
+-	uint8_t queueid;
++	uint16_t queueid;
+ 	uint16_t portid;
+ 	struct lcore_conf *qconf;
+ 	const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) /
+@@ -609,7 +609,7 @@ em_main_loop(__rte_unused void *dummy)
+ 		portid = qconf->rx_queue_list[i].port_id;
+ 		queueid = qconf->rx_queue_list[i].queue_id;
+ 		RTE_LOG(INFO, L3FWD,
+-			" -- lcoreid=%u portid=%u rxqueueid=%hhu\n",
++			" -- lcoreid=%u portid=%u rxqueueid=%" PRIu16 "\n",
+ 			lcore_id, portid, queueid);
+ 	}
+ 
+diff --git a/dpdk/examples/l3fwd/l3fwd_event.h b/dpdk/examples/l3fwd/l3fwd_event.h
+index 9aad358003..c6a4a89127 100644
+--- a/dpdk/examples/l3fwd/l3fwd_event.h
++++ b/dpdk/examples/l3fwd/l3fwd_event.h
+@@ -78,8 +78,8 @@ struct l3fwd_event_resources {
+ 	uint8_t deq_depth;
+ 	uint8_t has_burst;
+ 	uint8_t enabled;
+-	uint8_t eth_rx_queues;
+ 	uint8_t vector_enabled;
++	uint16_t eth_rx_queues;
+ 	uint16_t vector_size;
+ 	uint64_t vector_tmo_ns;
+ };
+diff --git a/dpdk/examples/l3fwd/l3fwd_fib.c b/dpdk/examples/l3fwd/l3fwd_fib.c
+index 6a21984415..f38b19af3f 100644
+--- a/dpdk/examples/l3fwd/l3fwd_fib.c
++++ b/dpdk/examples/l3fwd/l3fwd_fib.c
+@@ -186,7 +186,7 @@ fib_main_loop(__rte_unused void *dummy)
+ 	uint64_t prev_tsc, diff_tsc, cur_tsc;
+ 	int i, nb_rx;
+ 	uint16_t portid;
+-	uint8_t queueid;
++	uint16_t queueid;
+ 	struct lcore_conf *qconf;
+ 	const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) /
+ 			US_PER_S * BURST_TX_DRAIN_US;
+@@ -208,7 +208,7 @@ fib_main_loop(__rte_unused void *dummy)
+ 		portid = qconf->rx_queue_list[i].port_id;
+ 		queueid = qconf->rx_queue_list[i].queue_id;
+ 		RTE_LOG(INFO, L3FWD,
+-				" -- lcoreid=%u portid=%u rxqueueid=%hhu\n",
++				" -- lcoreid=%u portid=%u rxqueueid=%" PRIu16 "\n",
+ 				lcore_id, portid, queueid);
+ 	}
+ 
+diff --git a/dpdk/examples/l3fwd/l3fwd_lpm.c b/dpdk/examples/l3fwd/l3fwd_lpm.c
+index a484a33089..e8fd95aae9 100644
+--- a/dpdk/examples/l3fwd/l3fwd_lpm.c
++++ b/dpdk/examples/l3fwd/l3fwd_lpm.c
+@@ -148,8 +148,7 @@ lpm_main_loop(__rte_unused void *dummy)
+ 	unsigned lcore_id;
+ 	uint64_t prev_tsc, diff_tsc, cur_tsc;
+ 	int i, nb_rx;
+-	uint16_t portid;
+-	uint8_t queueid;
++	uint16_t portid, queueid;
+ 	struct lcore_conf *qconf;
+ 	const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) /
+ 		US_PER_S * BURST_TX_DRAIN_US;
+@@ -171,7 +170,7 @@ lpm_main_loop(__rte_unused void *dummy)
+ 		portid = qconf->rx_queue_list[i].port_id;
+ 		queueid = qconf->rx_queue_list[i].queue_id;
+ 		RTE_LOG(INFO, L3FWD,
+-			" -- lcoreid=%u portid=%u rxqueueid=%hhu\n",
++			" -- lcoreid=%u portid=%u rxqueueid=%" PRIu16 "\n",
+ 			lcore_id, portid, queueid);
+ 	}
+ 
+diff --git a/dpdk/examples/l3fwd/main.c b/dpdk/examples/l3fwd/main.c
+index 3bf28aec0c..a239869ada 100644
+--- a/dpdk/examples/l3fwd/main.c
++++ b/dpdk/examples/l3fwd/main.c
+@@ -98,8 +98,8 @@ struct parm_cfg parm_config;
+ 
+ struct lcore_params {
+ 	uint16_t port_id;
+-	uint8_t queue_id;
+-	uint8_t lcore_id;
++	uint16_t queue_id;
++	uint32_t lcore_id;
+ } __rte_cache_aligned;
+ 
+ static struct lcore_params lcore_params_array[MAX_LCORE_PARAMS];
+@@ -292,24 +292,24 @@ setup_l3fwd_lookup_tables(void)
+ static int
+ check_lcore_params(void)
+ {
+-	uint8_t queue, lcore;
+-	uint16_t i;
++	uint16_t queue, i;
++	uint32_t lcore;
+ 	int socketid;
+ 
+ 	for (i = 0; i < nb_lcore_params; ++i) {
+ 		queue = lcore_params[i].queue_id;
+ 		if (queue >= MAX_RX_QUEUE_PER_PORT) {
+-			printf("invalid queue number: %hhu\n", queue);
++			printf("invalid queue number: %" PRIu16 "\n", queue);
+ 			return -1;
+ 		}
+ 		lcore = lcore_params[i].lcore_id;
+ 		if (!rte_lcore_is_enabled(lcore)) {
+-			printf("error: lcore %hhu is not enabled in lcore mask\n", lcore);
++			printf("error: lcore %u is not enabled in lcore mask\n", lcore);
+ 			return -1;
+ 		}
+ 		if ((socketid = rte_lcore_to_socket_id(lcore) != 0) &&
+ 			(numa_on == 0)) {
+-			printf("warning: lcore %hhu is on socket %d with numa off \n",
++			printf("warning: lcore %u is on socket %d with numa off\n",
+ 				lcore, socketid);
+ 		}
+ 	}
+@@ -336,7 +336,7 @@ check_port_config(void)
+ 	return 0;
+ }
+ 
+-static uint8_t
++static uint16_t
+ get_port_n_rx_queues(const uint16_t port)
+ {
+ 	int queue = -1;
+@@ -352,21 +352,21 @@ get_port_n_rx_queues(const uint16_t port)
+ 						lcore_params[i].port_id);
+ 		}
+ 	}
+-	return (uint8_t)(++queue);
++	return (uint16_t)(++queue);
+ }
+ 
+ static int
+ init_lcore_rx_queues(void)
+ {
+ 	uint16_t i, nb_rx_queue;
+-	uint8_t lcore;
++	uint32_t lcore;
+ 
+ 	for (i = 0; i < nb_lcore_params; ++i) {
+ 		lcore = lcore_params[i].lcore_id;
+ 		nb_rx_queue = lcore_conf[lcore].n_rx_queue;
+ 		if (nb_rx_queue >= MAX_RX_QUEUE_PER_LCORE) {
+ 			printf("error: too many queues (%u) for lcore: %u\n",
+-				(unsigned)nb_rx_queue + 1, (unsigned)lcore);
++				(unsigned int)nb_rx_queue + 1, lcore);
+ 			return -1;
+ 		} else {
+ 			lcore_conf[lcore].rx_queue_list[nb_rx_queue].port_id =
+@@ -500,6 +500,11 @@ parse_config(const char *q_arg)
+ 	char *str_fld[_NUM_FLD];
+ 	int i;
+ 	unsigned size;
++	uint16_t max_fld[_NUM_FLD] = {
++		RTE_MAX_ETHPORTS,
++		RTE_MAX_QUEUES_PER_PORT,
++		RTE_MAX_LCORE
++	};
+ 
+ 	nb_lcore_params = 0;
+ 
+@@ -518,7 +523,7 @@ parse_config(const char *q_arg)
+ 		for (i = 0; i < _NUM_FLD; i++){
+ 			errno = 0;
+ 			int_fld[i] = strtoul(str_fld[i], &end, 0);
+-			if (errno != 0 || end == str_fld[i] || int_fld[i] > 255)
++			if (errno != 0 || end == str_fld[i] || int_fld[i] > max_fld[i])
+ 				return -1;
+ 		}
+ 		if (nb_lcore_params >= MAX_LCORE_PARAMS) {
+@@ -527,11 +532,11 @@ parse_config(const char *q_arg)
+ 			return -1;
+ 		}
+ 		lcore_params_array[nb_lcore_params].port_id =
+-			(uint8_t)int_fld[FLD_PORT];
++			(uint16_t)int_fld[FLD_PORT];
+ 		lcore_params_array[nb_lcore_params].queue_id =
+-			(uint8_t)int_fld[FLD_QUEUE];
++			(uint16_t)int_fld[FLD_QUEUE];
+ 		lcore_params_array[nb_lcore_params].lcore_id =
+-			(uint8_t)int_fld[FLD_LCORE];
++			(uint32_t)int_fld[FLD_LCORE];
+ 		++nb_lcore_params;
+ 	}
+ 	lcore_params = lcore_params_array;
+@@ -630,7 +635,7 @@ parse_event_eth_rx_queues(const char *eth_rx_queues)
+ {
+ 	struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc();
+ 	char *end = NULL;
+-	uint8_t num_eth_rx_queues;
++	uint16_t num_eth_rx_queues;
+ 
+ 	/* parse decimal string */
+ 	num_eth_rx_queues = strtoul(eth_rx_queues, &end, 10);
+@@ -1211,7 +1216,8 @@ config_port_max_pkt_len(struct rte_eth_conf *conf,
+ static void
+ l3fwd_poll_resource_setup(void)
+ {
+-	uint8_t nb_rx_queue, queue, socketid;
++	uint8_t socketid;
++	uint16_t nb_rx_queue, queue;
+ 	struct rte_eth_dev_info dev_info;
+ 	uint32_t n_tx_queue, nb_lcores;
+ 	struct rte_eth_txconf *txconf;
+@@ -1388,6 +1394,7 @@ l3fwd_poll_resource_setup(void)
+ 		fflush(stdout);
+ 		/* init RX queues */
+ 		for(queue = 0; queue < qconf->n_rx_queue; ++queue) {
++			struct rte_eth_conf local_conf;
+ 			struct rte_eth_rxconf rxq_conf;
+ 
+ 			portid = qconf->rx_queue_list[queue].port_id;
+@@ -1408,8 +1415,14 @@ l3fwd_poll_resource_setup(void)
+ 					"Error during getting device (port %u) info: %s\n",
+ 					portid, strerror(-ret));
+ 
++			ret = rte_eth_dev_conf_get(portid, &local_conf);
++			if (ret != 0)
++				rte_exit(EXIT_FAILURE,
++					"Error during getting device (port %u) configuration: %s\n",
++					portid, strerror(-ret));
++
+ 			rxq_conf = dev_info.default_rxconf;
+-			rxq_conf.offloads = port_conf.rxmode.offloads;
++			rxq_conf.offloads = local_conf.rxmode.offloads;
+ 			if (!per_port_pool)
+ 				ret = rte_eth_rx_queue_setup(portid, queueid,
+ 						nb_rxd, socketid,
+@@ -1528,7 +1541,7 @@ main(int argc, char **argv)
+ 	struct lcore_conf *qconf;
+ 	uint16_t queueid, portid;
+ 	unsigned int lcore_id;
+-	uint8_t queue;
++	uint16_t queue;
+ 	int ret;
+ 
+ 	/* init EAL */
+@@ -1577,7 +1590,6 @@ main(int argc, char **argv)
+ 			l3fwd_lkp.main_loop = evt_rsrc->ops.fib_event_loop;
+ 		else
+ 			l3fwd_lkp.main_loop = evt_rsrc->ops.lpm_event_loop;
+-		l3fwd_event_service_setup();
+ 	} else
+ #endif
+ 		l3fwd_poll_resource_setup();
+@@ -1609,6 +1621,11 @@ main(int argc, char **argv)
+ 		}
+ 	}
+ 
++#ifdef RTE_LIB_EVENTDEV
++	if (evt_rsrc->enabled)
++		l3fwd_event_service_setup();
++#endif
++
+ 	printf("\n");
+ 
+ 	for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
+diff --git a/dpdk/examples/packet_ordering/main.c b/dpdk/examples/packet_ordering/main.c
+index d2fd6f77e4..f839db9102 100644
+--- a/dpdk/examples/packet_ordering/main.c
++++ b/dpdk/examples/packet_ordering/main.c
+@@ -5,6 +5,7 @@
+ #include <stdlib.h>
+ #include <signal.h>
+ #include <getopt.h>
++#include <stdbool.h>
+ 
+ #include <rte_eal.h>
+ #include <rte_common.h>
+@@ -427,8 +428,8 @@ int_handler(int sig_num)
+  * The mbufs are then passed to the worker threads via the rx_to_workers
+  * ring.
+  */
+-static int
+-rx_thread(struct rte_ring *ring_out)
++static __rte_always_inline int
++rx_thread(struct rte_ring *ring_out, bool disable_reorder_flag)
+ {
+ 	uint32_t seqn = 0;
+ 	uint16_t i, ret = 0;
+@@ -454,9 +455,11 @@ rx_thread(struct rte_ring *ring_out)
+ 				}
+ 				app_stats.rx.rx_pkts += nb_rx_pkts;
+ 
+-				/* mark sequence number */
+-				for (i = 0; i < nb_rx_pkts; )
+-					*rte_reorder_seqn(pkts[i++]) = seqn++;
++				/* mark sequence number if reorder is enabled */
++				if (!disable_reorder_flag) {
++					for (i = 0; i < nb_rx_pkts;)
++						*rte_reorder_seqn(pkts[i++]) = seqn++;
++				}
+ 
+ 				/* enqueue to rx_to_workers ring */
+ 				ret = rte_ring_enqueue_burst(ring_out,
+@@ -473,6 +476,18 @@ rx_thread(struct rte_ring *ring_out)
+ 	return 0;
+ }
+ 
++static __rte_noinline int
++rx_thread_reorder(struct rte_ring *ring_out)
++{
++	return rx_thread(ring_out, false);
++}
++
++static __rte_noinline int
++rx_thread_reorder_disabled(struct rte_ring *ring_out)
++{
++	return rx_thread(ring_out, true);
++}
++
+ /**
+  * This thread takes bursts of packets from the rx_to_workers ring and
+  * Changes the input port value to output port value. And feds it to
+@@ -772,8 +787,11 @@ main(int argc, char **argv)
+ 				(void *)&send_args, last_lcore_id);
+ 	}
+ 
+-	/* Start rx_thread() on the main core */
+-	rx_thread(rx_to_workers);
++	/* Start rx_thread_xxx() on the main core */
++	if (disable_reorder)
++		rx_thread_reorder_disabled(rx_to_workers);
++	else
++		rx_thread_reorder(rx_to_workers);
+ 
+ 	RTE_LCORE_FOREACH_WORKER(lcore_id) {
+ 		if (rte_eal_wait_lcore(lcore_id) < 0)
+diff --git a/dpdk/examples/pipeline/cli.c b/dpdk/examples/pipeline/cli.c
+index 2ae6cc579f..afb143c01f 100644
+--- a/dpdk/examples/pipeline/cli.c
++++ b/dpdk/examples/pipeline/cli.c
+@@ -714,6 +714,7 @@ cmd_pipeline_libbuild(char **tokens,
+ 		 "-I %s/lib/eal/include "
+ 		 "-I %s/lib/eal/x86/include "
+ 		 "-I %s/lib/eal/include/generic "
++		 "-I %s/lib/log "
+ 		 "-I %s/lib/meter "
+ 		 "-I %s/lib/port "
+ 		 "-I %s/lib/table "
+@@ -738,6 +739,7 @@ cmd_pipeline_libbuild(char **tokens,
+ 		 install_dir,
+ 		 install_dir,
+ 		 install_dir,
++		 install_dir,
+ 		 log_file,
+ 		 obj_file,
+ 		 lib_file,
+diff --git a/dpdk/examples/qos_sched/args.c b/dpdk/examples/qos_sched/args.c
+index e97273152a..886542b3c1 100644
+--- a/dpdk/examples/qos_sched/args.c
++++ b/dpdk/examples/qos_sched/args.c
+@@ -103,8 +103,10 @@ app_parse_opt_vals(const char *conf_str, char separator, uint32_t n_vals, uint32
+ 
+ 	n_tokens = rte_strsplit(string, strnlen(string, 32), tokens, n_vals, separator);
+ 
+-	if (n_tokens > MAX_OPT_VALUES)
++	if (n_tokens > MAX_OPT_VALUES) {
++		free(string);
+ 		return -1;
++	}
+ 
+ 	for (i = 0; i < n_tokens; i++)
+ 		opt_vals[i] = (uint32_t)atol(tokens[i]);
+@@ -182,10 +184,10 @@ app_parse_flow_conf(const char *conf_str)
+ 
+ 	pconf->rx_port = vals[0];
+ 	pconf->tx_port = vals[1];
+-	pconf->rx_core = (uint8_t)vals[2];
+-	pconf->wt_core = (uint8_t)vals[3];
++	pconf->rx_core = vals[2];
++	pconf->wt_core = vals[3];
+ 	if (ret == 5)
+-		pconf->tx_core = (uint8_t)vals[4];
++		pconf->tx_core = vals[4];
+ 	else
+ 		pconf->tx_core = pconf->wt_core;
+ 
+diff --git a/dpdk/examples/qos_sched/init.c b/dpdk/examples/qos_sched/init.c
+index d8abae635a..32964fd57e 100644
+--- a/dpdk/examples/qos_sched/init.c
++++ b/dpdk/examples/qos_sched/init.c
+@@ -335,7 +335,7 @@ int app_init(void)
+ 	for(i = 0; i < nb_pfc; i++) {
+ 		uint32_t socket = rte_lcore_to_socket_id(qos_conf[i].rx_core);
+ 		struct rte_ring *ring;
+-		struct rte_eth_link link = {0};
++		struct rte_eth_link link;
+ 		int retry_count = 100, retry_delay = 100; /* try every 100ms for 10 sec */
+ 
+ 		snprintf(ring_name, MAX_NAME_LEN, "ring-%u-%u", i, qos_conf[i].rx_core);
+@@ -367,6 +367,7 @@ int app_init(void)
+ 		app_init_port(qos_conf[i].rx_port, qos_conf[i].mbuf_pool);
+ 		app_init_port(qos_conf[i].tx_port, qos_conf[i].mbuf_pool);
+ 
++		memset(&link, 0, sizeof(link));
+ 		rte_eth_link_get(qos_conf[i].tx_port, &link);
+ 		if (link.link_status == 0)
+ 			printf("Waiting for link on port %u\n", qos_conf[i].tx_port);
+diff --git a/dpdk/examples/vhost/main.c b/dpdk/examples/vhost/main.c
+index ce5c1efddf..3fc1b151d1 100644
+--- a/dpdk/examples/vhost/main.c
++++ b/dpdk/examples/vhost/main.c
+@@ -260,6 +260,9 @@ open_dma(const char *value)
+ 	char *dma_arg[RTE_MAX_VHOST_DEVICE];
+ 	int args_nr;
+ 
++	if (input == NULL)
++		return -1;
++
+ 	while (isblank(*addrs))
+ 		addrs++;
+ 	if (*addrs == '\0') {
+diff --git a/dpdk/examples/vm_power_manager/guest_cli/vm_power_cli_guest.c b/dpdk/examples/vm_power_manager/guest_cli/vm_power_cli_guest.c
+index 94bfbbaf78..5eddb47847 100644
+--- a/dpdk/examples/vm_power_manager/guest_cli/vm_power_cli_guest.c
++++ b/dpdk/examples/vm_power_manager/guest_cli/vm_power_cli_guest.c
+@@ -401,7 +401,7 @@ check_response_cmd(unsigned int lcore_id, int *result)
+ 
+ struct cmd_set_cpu_freq_result {
+ 	cmdline_fixed_string_t set_cpu_freq;
+-	uint8_t lcore_id;
++	uint32_t lcore_id;
+ 	cmdline_fixed_string_t cmd;
+ };
+ 
+@@ -444,7 +444,7 @@ cmdline_parse_token_string_t cmd_set_cpu_freq =
+ 			set_cpu_freq, "set_cpu_freq");
+ cmdline_parse_token_num_t cmd_set_cpu_freq_core_num =
+ 	TOKEN_NUM_INITIALIZER(struct cmd_set_cpu_freq_result,
+-			lcore_id, RTE_UINT8);
++			lcore_id, RTE_UINT32);
+ cmdline_parse_token_string_t cmd_set_cpu_freq_cmd_cmd =
+ 	TOKEN_STRING_INITIALIZER(struct cmd_set_cpu_freq_result,
+ 			cmd, "up#down#min#max#enable_turbo#disable_turbo");
+diff --git a/dpdk/kernel/freebsd/nic_uio/nic_uio.c b/dpdk/kernel/freebsd/nic_uio/nic_uio.c
+index 7a81694c92..0043892870 100644
+--- a/dpdk/kernel/freebsd/nic_uio/nic_uio.c
++++ b/dpdk/kernel/freebsd/nic_uio/nic_uio.c
+@@ -78,10 +78,14 @@ struct pci_bdf {
+ 	uint32_t function;
+ };
+ 
+-static devclass_t nic_uio_devclass;
+-
+ DEFINE_CLASS_0(nic_uio, nic_uio_driver, nic_uio_methods, sizeof(struct nic_uio_softc));
++
++#if __FreeBSD_version < 1400000
++static devclass_t nic_uio_devclass;
+ DRIVER_MODULE(nic_uio, pci, nic_uio_driver, nic_uio_devclass, nic_uio_modevent, 0);
++#else
++DRIVER_MODULE(nic_uio, pci, nic_uio_driver, nic_uio_modevent, 0);
++#endif
+ 
+ static int
+ nic_uio_mmap(struct cdev *cdev, vm_ooffset_t offset, vm_paddr_t *paddr,
+diff --git a/dpdk/lib/bbdev/rte_bbdev.c b/dpdk/lib/bbdev/rte_bbdev.c
+index cfebea09c7..e09bb97abb 100644
+--- a/dpdk/lib/bbdev/rte_bbdev.c
++++ b/dpdk/lib/bbdev/rte_bbdev.c
+@@ -1106,12 +1106,12 @@ rte_bbdev_queue_intr_ctl(uint16_t dev_id, uint16_t queue_id, int epfd, int op,
+ 
+ 	intr_handle = dev->intr_handle;
+ 	if (intr_handle == NULL) {
+-		rte_bbdev_log(ERR, "Device %u intr handle unset\n", dev_id);
++		rte_bbdev_log(ERR, "Device %u intr handle unset", dev_id);
+ 		return -ENOTSUP;
+ 	}
+ 
+ 	if (queue_id >= RTE_MAX_RXTX_INTR_VEC_ID) {
+-		rte_bbdev_log(ERR, "Device %u queue_id %u is too big\n",
++		rte_bbdev_log(ERR, "Device %u queue_id %u is too big",
+ 				dev_id, queue_id);
+ 		return -ENOTSUP;
+ 	}
+@@ -1120,7 +1120,7 @@ rte_bbdev_queue_intr_ctl(uint16_t dev_id, uint16_t queue_id, int epfd, int op,
+ 	ret = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
+ 	if (ret && (ret != -EEXIST)) {
+ 		rte_bbdev_log(ERR,
+-				"dev %u q %u int ctl error op %d epfd %d vec %u\n",
++				"dev %u q %u int ctl error op %d epfd %d vec %u",
+ 				dev_id, queue_id, op, epfd, vec);
+ 		return ret;
+ 	}
+diff --git a/dpdk/lib/bpf/bpf_validate.c b/dpdk/lib/bpf/bpf_validate.c
+index 95b9ef99ef..da8d5f3deb 100644
+--- a/dpdk/lib/bpf/bpf_validate.c
++++ b/dpdk/lib/bpf/bpf_validate.c
+@@ -29,10 +29,13 @@ struct bpf_reg_val {
+ };
+ 
+ struct bpf_eval_state {
++	SLIST_ENTRY(bpf_eval_state) next; /* for @safe list traversal */
+ 	struct bpf_reg_val rv[EBPF_REG_NUM];
+ 	struct bpf_reg_val sv[MAX_BPF_STACK_SIZE / sizeof(uint64_t)];
+ };
+ 
++SLIST_HEAD(bpf_evst_head, bpf_eval_state);
++
+ /* possible instruction node colour */
+ enum {
+ 	WHITE,
+@@ -52,6 +55,9 @@ enum {
+ 
+ #define	MAX_EDGES	2
+ 
++/* max number of 'safe' evaluated states to track per node */
++#define NODE_EVST_MAX	32
++
+ struct inst_node {
+ 	uint8_t colour;
+ 	uint8_t nb_edge:4;
+@@ -59,7 +65,18 @@ struct inst_node {
+ 	uint8_t edge_type[MAX_EDGES];
+ 	uint32_t edge_dest[MAX_EDGES];
+ 	uint32_t prev_node;
+-	struct bpf_eval_state *evst;
++	struct {
++		struct bpf_eval_state *cur;   /* save/restore for jcc targets */
++		struct bpf_eval_state *start;
++		struct bpf_evst_head safe;    /* safe states for track/prune */
++		uint32_t nb_safe;
++	} evst;
++};
++
++struct evst_pool {
++	uint32_t num;
++	uint32_t cur;
++	struct bpf_eval_state *ent;
+ };
+ 
+ struct bpf_verifier {
+@@ -73,11 +90,8 @@ struct bpf_verifier {
+ 	uint32_t edge_type[MAX_EDGE_TYPE];
+ 	struct bpf_eval_state *evst;
+ 	struct inst_node *evin;
+-	struct {
+-		uint32_t num;
+-		uint32_t cur;
+-		struct bpf_eval_state *ent;
+-	} evst_pool;
++	struct evst_pool evst_sr_pool; /* for evst save/restore */
++	struct evst_pool evst_tp_pool; /* for evst track/prune */
+ };
+ 
+ struct bpf_ins_check {
+@@ -636,14 +650,14 @@ eval_alu(struct bpf_verifier *bvf, const struct ebpf_insn *ins)
+ {
+ 	uint64_t msk;
+ 	uint32_t op;
+-	size_t opsz;
++	size_t opsz, sz;
+ 	const char *err;
+ 	struct bpf_eval_state *st;
+ 	struct bpf_reg_val *rd, rs;
+ 
+-	opsz = (BPF_CLASS(ins->code) == BPF_ALU) ?
++	sz = (BPF_CLASS(ins->code) == BPF_ALU) ?
+ 		sizeof(uint32_t) : sizeof(uint64_t);
+-	opsz = opsz * CHAR_BIT;
++	opsz = sz * CHAR_BIT;
+ 	msk = RTE_LEN2MASK(opsz, uint64_t);
+ 
+ 	st = bvf->evst;
+@@ -652,8 +666,10 @@ eval_alu(struct bpf_verifier *bvf, const struct ebpf_insn *ins)
+ 	if (BPF_SRC(ins->code) == BPF_X) {
+ 		rs = st->rv[ins->src_reg];
+ 		eval_apply_mask(&rs, msk);
+-	} else
++	} else {
++		rs = (struct bpf_reg_val){.v = {.size = sz,},};
+ 		eval_fill_imm(&rs, msk, ins->imm);
++	}
+ 
+ 	eval_apply_mask(rd, msk);
+ 
+@@ -1083,7 +1099,7 @@ eval_jcc(struct bpf_verifier *bvf, const struct ebpf_insn *ins)
+ 	struct bpf_reg_val rvf, rvt;
+ 
+ 	tst = bvf->evst;
+-	fst = bvf->evin->evst;
++	fst = bvf->evin->evst.cur;
+ 
+ 	frd = fst->rv + ins->dst_reg;
+ 	trd = tst->rv + ins->dst_reg;
+@@ -1812,8 +1828,8 @@ add_edge(struct bpf_verifier *bvf, struct inst_node *node, uint32_t nidx)
+ 	uint32_t ne;
+ 
+ 	if (nidx > bvf->prm->nb_ins) {
+-		RTE_BPF_LOG(ERR, "%s: program boundary violation at pc: %u, "
+-			"next pc: %u\n",
++		RTE_BPF_LOG(ERR,
++			"%s: program boundary violation at pc: %u, next pc: %u\n",
+ 			__func__, get_node_idx(bvf, node), nidx);
+ 		return -EINVAL;
+ 	}
+@@ -2089,60 +2105,113 @@ validate(struct bpf_verifier *bvf)
+  * helper functions get/free eval states.
+  */
+ static struct bpf_eval_state *
+-pull_eval_state(struct bpf_verifier *bvf)
++pull_eval_state(struct evst_pool *pool)
+ {
+ 	uint32_t n;
+ 
+-	n = bvf->evst_pool.cur;
+-	if (n == bvf->evst_pool.num)
++	n = pool->cur;
++	if (n == pool->num)
+ 		return NULL;
+ 
+-	bvf->evst_pool.cur = n + 1;
+-	return bvf->evst_pool.ent + n;
++	pool->cur = n + 1;
++	return pool->ent + n;
+ }
+ 
+ static void
+-push_eval_state(struct bpf_verifier *bvf)
++push_eval_state(struct evst_pool *pool)
+ {
+-	bvf->evst_pool.cur--;
++	RTE_ASSERT(pool->cur != 0);
++	pool->cur--;
+ }
+ 
+ static void
+ evst_pool_fini(struct bpf_verifier *bvf)
+ {
+ 	bvf->evst = NULL;
+-	free(bvf->evst_pool.ent);
+-	memset(&bvf->evst_pool, 0, sizeof(bvf->evst_pool));
++	free(bvf->evst_sr_pool.ent);
++	memset(&bvf->evst_sr_pool, 0, sizeof(bvf->evst_sr_pool));
++	memset(&bvf->evst_tp_pool, 0, sizeof(bvf->evst_tp_pool));
+ }
+ 
+ static int
+ evst_pool_init(struct bpf_verifier *bvf)
+ {
+-	uint32_t n;
++	uint32_t k, n;
+ 
+-	n = bvf->nb_jcc_nodes + 1;
++	/*
++	 * We need nb_jcc_nodes + 1 for save_cur/restore_cur
++	 * remaining ones will be used for state tracking/pruning.
++	 */
++	k = bvf->nb_jcc_nodes + 1;
++	n = k * 3;
+ 
+-	bvf->evst_pool.ent = calloc(n, sizeof(bvf->evst_pool.ent[0]));
+-	if (bvf->evst_pool.ent == NULL)
++	bvf->evst_sr_pool.ent = calloc(n, sizeof(bvf->evst_sr_pool.ent[0]));
++	if (bvf->evst_sr_pool.ent == NULL)
+ 		return -ENOMEM;
+ 
+-	bvf->evst_pool.num = n;
+-	bvf->evst_pool.cur = 0;
++	bvf->evst_sr_pool.num = k;
++	bvf->evst_sr_pool.cur = 0;
+ 
+-	bvf->evst = pull_eval_state(bvf);
++	bvf->evst_tp_pool.ent = bvf->evst_sr_pool.ent + k;
++	bvf->evst_tp_pool.num = n - k;
++	bvf->evst_tp_pool.cur = 0;
++
++	bvf->evst = pull_eval_state(&bvf->evst_sr_pool);
+ 	return 0;
+ }
+ 
++/*
++ * try to allocate and initialise new eval state for given node.
++ * later if no errors will be encountered, this state will be accepted as
++ * one of the possible 'safe' states for that node.
++ */
++static void
++save_start_eval_state(struct bpf_verifier *bvf, struct inst_node *node)
++{
++	RTE_ASSERT(node->evst.start == NULL);
++
++	/* limit number of states for one node with some reasonable value */
++	if (node->evst.nb_safe >= NODE_EVST_MAX)
++		return;
++
++	/* try to get new eval_state */
++	node->evst.start = pull_eval_state(&bvf->evst_tp_pool);
++
++	/* make a copy of current state */
++	if (node->evst.start != NULL) {
++		memcpy(node->evst.start, bvf->evst, sizeof(*node->evst.start));
++		SLIST_NEXT(node->evst.start, next) = NULL;
++	}
++}
++
++/*
++ * add @start state to the list of @safe states.
++ */
++static void
++save_safe_eval_state(struct bpf_verifier *bvf, struct inst_node *node)
++{
++	if (node->evst.start == NULL)
++		return;
++
++	SLIST_INSERT_HEAD(&node->evst.safe, node->evst.start, next);
++	node->evst.nb_safe++;
++
++	RTE_BPF_LOG(DEBUG, "%s(bvf=%p,node=%u,state=%p): nb_safe=%u;\n", __func__, bvf, get_node_idx(bvf, node),
++		    node->evst.start, node->evst.nb_safe);
++
++	node->evst.start = NULL;
++}
++
+ /*
+  * Save current eval state.
+  */
+ static int
+-save_eval_state(struct bpf_verifier *bvf, struct inst_node *node)
++save_cur_eval_state(struct bpf_verifier *bvf, struct inst_node *node)
+ {
+ 	struct bpf_eval_state *st;
+ 
+ 	/* get new eval_state for this node */
+-	st = pull_eval_state(bvf);
++	st = pull_eval_state(&bvf->evst_sr_pool);
+ 	if (st == NULL) {
+ 		RTE_BPF_LOG(ERR,
+ 			"%s: internal error (out of space) at pc: %u\n",
+@@ -2154,11 +2223,13 @@ save_eval_state(struct bpf_verifier *bvf, struct inst_node *node)
+ 	memcpy(st, bvf->evst, sizeof(*st));
+ 
+ 	/* swap current state with new one */
+-	node->evst = bvf->evst;
++	RTE_ASSERT(node->evst.cur == NULL);
++	node->evst.cur = bvf->evst;
+ 	bvf->evst = st;
+ 
+ 	RTE_BPF_LOG(DEBUG, "%s(bvf=%p,node=%u) old/new states: %p/%p;\n",
+-		__func__, bvf, get_node_idx(bvf, node), node->evst, bvf->evst);
++		__func__, bvf, get_node_idx(bvf, node), node->evst.cur,
++		bvf->evst);
+ 
+ 	return 0;
+ }
+@@ -2167,14 +2238,15 @@ save_eval_state(struct bpf_verifier *bvf, struct inst_node *node)
+  * Restore previous eval state and mark current eval state as free.
+  */
+ static void
+-restore_eval_state(struct bpf_verifier *bvf, struct inst_node *node)
++restore_cur_eval_state(struct bpf_verifier *bvf, struct inst_node *node)
+ {
+ 	RTE_BPF_LOG(DEBUG, "%s(bvf=%p,node=%u) old/new states: %p/%p;\n",
+-		__func__, bvf, get_node_idx(bvf, node), bvf->evst, node->evst);
++		__func__, bvf, get_node_idx(bvf, node), bvf->evst,
++		node->evst.cur);
+ 
+-	bvf->evst = node->evst;
+-	node->evst = NULL;
+-	push_eval_state(bvf);
++	bvf->evst = node->evst.cur;
++	node->evst.cur = NULL;
++	push_eval_state(&bvf->evst_sr_pool);
+ }
+ 
+ static void
+@@ -2191,26 +2263,124 @@ log_eval_state(const struct bpf_verifier *bvf, const struct ebpf_insn *ins,
+ 
+ 	rte_log(loglvl, rte_bpf_logtype,
+ 		"r%u={\n"
+-		"\tv={type=%u, size=%zu},\n"
++		"\tv={type=%u, size=%zu, buf_size=%zu},\n"
+ 		"\tmask=0x%" PRIx64 ",\n"
+ 		"\tu={min=0x%" PRIx64 ", max=0x%" PRIx64 "},\n"
+ 		"\ts={min=%" PRId64 ", max=%" PRId64 "},\n"
+ 		"};\n",
+ 		ins->dst_reg,
+-		rv->v.type, rv->v.size,
++		rv->v.type, rv->v.size, rv->v.buf_size,
+ 		rv->mask,
+ 		rv->u.min, rv->u.max,
+ 		rv->s.min, rv->s.max);
+ }
+ 
+ /*
+- * Do second pass through CFG and try to evaluate instructions
+- * via each possible path.
+- * Right now evaluation functionality is quite limited.
+- * Still need to add extra checks for:
+- * - use/return uninitialized registers.
+- * - use uninitialized data from the stack.
+- * - memory boundaries violation.
++ * compare two evaluation states.
++ * returns zero if @lv is more conservative (safer) then @rv.
++ * returns non-zero value otherwise.
++ */
++static int
++cmp_reg_val_within(const struct bpf_reg_val *lv, const struct bpf_reg_val *rv)
++{
++	/* expect @v and @mask to be identical */
++	if (memcmp(&lv->v, &rv->v, sizeof(lv->v)) != 0 || lv->mask != rv->mask)
++		return -1;
++
++	/* exact match only for mbuf and stack pointers */
++	if (lv->v.type == RTE_BPF_ARG_PTR_MBUF ||
++			lv->v.type == BPF_ARG_PTR_STACK)
++		return -1;
++
++	if (lv->u.min <= rv->u.min && lv->u.max >= rv->u.max &&
++			lv->s.min <= rv->s.min && lv->s.max >= rv->s.max)
++		return 0;
++
++	return -1;
++}
++
++/*
++ * compare two evaluation states.
++ * returns zero if they are identical.
++ * returns positive value if @lv is more conservative (safer) then @rv.
++ * returns negative value otherwise.
++ */
++static int
++cmp_eval_state(const struct bpf_eval_state *lv, const struct bpf_eval_state *rv)
++{
++	int32_t rc;
++	uint32_t i, k;
++
++	/* for stack expect identical values */
++	rc = memcmp(lv->sv, rv->sv, sizeof(lv->sv));
++	if (rc != 0)
++		return -(2 * EBPF_REG_NUM);
++
++	k = 0;
++	/* check register values */
++	for (i = 0; i != RTE_DIM(lv->rv); i++) {
++		rc = memcmp(&lv->rv[i], &rv->rv[i], sizeof(lv->rv[i]));
++		if (rc != 0 && cmp_reg_val_within(&lv->rv[i], &rv->rv[i]) != 0)
++			return -(i + 1);
++		k += (rc != 0);
++	}
++
++	return k;
++}
++
++/*
++ * check did we already evaluated that path and can it be pruned that time.
++ */
++static int
++prune_eval_state(struct bpf_verifier *bvf, const struct inst_node *node,
++	struct inst_node *next)
++{
++	int32_t rc;
++	struct bpf_eval_state *safe;
++
++	rc = INT32_MIN;
++	SLIST_FOREACH(safe, &next->evst.safe, next) {
++		rc = cmp_eval_state(safe, bvf->evst);
++		if (rc >= 0)
++			break;
++	}
++
++	rc = (rc >= 0) ? 0 : -1;
++
++	/*
++	 * current state doesn't match any safe states,
++	 * so no prunning is possible right now,
++	 * track current state for future references.
++	 */
++	if (rc != 0)
++		save_start_eval_state(bvf, next);
++
++	RTE_BPF_LOG(DEBUG,
++		    "%s(bvf=%p,node=%u,next=%u) returns %d, "
++		    "next->evst.start=%p, next->evst.nb_safe=%u\n",
++		    __func__, bvf, get_node_idx(bvf, node), get_node_idx(bvf, next), rc, next->evst.start,
++		    next->evst.nb_safe);
++	return rc;
++}
++
++/* Do second pass through CFG and try to evaluate instructions
++ * via each possible path. The verifier will try all paths, tracking types of
++ * registers used as input to instructions, and updating resulting type via
++ * register state values. Plus for each register and possible stack value it
++ * tries to estimate possible max/min value.
++ * For conditional jumps, a stack is used to save evaluation state, so one
++ * path is explored while the state for the other path is pushed onto the stack.
++ * Then later, we backtrack to the first pushed instruction and repeat the cycle
++ * until the stack is empty and we're done.
++ * For program with many conditional branches walking through all possible path
++ * could be very excessive. So to minimize number of evaluations we use
++ * heuristic similar to what Linux kernel does - state pruning:
++ * If from given instruction for given program state we explore all possible
++ * paths and for each of them reach _exit() without any complaints and a valid
++ * R0 value, then for that instruction, that program state can be marked as
++ * 'safe'. When we later arrive at the same instruction with a state
++ * equivalent to an earlier instruction's 'safe' state, we can prune the search.
++ * For now, only states for JCC  targets are saved/examined.
+  */
+ static int
+ evaluate(struct bpf_verifier *bvf)
+@@ -2221,6 +2391,13 @@ evaluate(struct bpf_verifier *bvf)
+ 	const struct ebpf_insn *ins;
+ 	struct inst_node *next, *node;
+ 
++	struct {
++		uint32_t nb_eval;
++		uint32_t nb_prune;
++		uint32_t nb_save;
++		uint32_t nb_restore;
++	} stats;
++
+ 	/* initial state of frame pointer */
+ 	static const struct bpf_reg_val rvfp = {
+ 		.v = {
+@@ -2244,6 +2421,8 @@ evaluate(struct bpf_verifier *bvf)
+ 	next = node;
+ 	rc = 0;
+ 
++	memset(&stats, 0, sizeof(stats));
++
+ 	while (node != NULL && rc == 0) {
+ 
+ 		/*
+@@ -2257,11 +2436,14 @@ evaluate(struct bpf_verifier *bvf)
+ 			op = ins[idx].code;
+ 
+ 			/* for jcc node make a copy of evaluation state */
+-			if (node->nb_edge > 1)
+-				rc |= save_eval_state(bvf, node);
++			if (node->nb_edge > 1) {
++				rc |= save_cur_eval_state(bvf, node);
++				stats.nb_save++;
++			}
+ 
+ 			if (ins_chk[op].eval != NULL && rc == 0) {
+ 				err = ins_chk[op].eval(bvf, ins + idx);
++				stats.nb_eval++;
+ 				if (err != NULL) {
+ 					RTE_BPF_LOG(ERR, "%s: %s at pc: %u\n",
+ 						__func__, err, idx);
+@@ -2275,21 +2457,37 @@ evaluate(struct bpf_verifier *bvf)
+ 
+ 		/* proceed through CFG */
+ 		next = get_next_node(bvf, node);
++
+ 		if (next != NULL) {
+ 
+ 			/* proceed with next child */
+ 			if (node->cur_edge == node->nb_edge &&
+-					node->evst != NULL)
+-				restore_eval_state(bvf, node);
++					node->evst.cur != NULL) {
++				restore_cur_eval_state(bvf, node);
++				stats.nb_restore++;
++			}
+ 
+-			next->prev_node = get_node_idx(bvf, node);
+-			node = next;
++			/*
++			 * for jcc targets: check did we already evaluated
++			 * that path and can it's evaluation be skipped that
++			 * time.
++			 */
++			if (node->nb_edge > 1 && prune_eval_state(bvf, node,
++					next) == 0) {
++				next = NULL;
++				stats.nb_prune++;
++			} else {
++				next->prev_node = get_node_idx(bvf, node);
++				node = next;
++			}
+ 		} else {
+ 			/*
+ 			 * finished with current node and all it's kids,
+-			 * proceed with parent
++			 * mark it's @start state as safe for future references,
++			 * and proceed with parent.
+ 			 */
+ 			node->cur_edge = 0;
++			save_safe_eval_state(bvf, node);
+ 			node = get_prev_node(bvf, node);
+ 
+ 			/* finished */
+@@ -2298,6 +2496,14 @@ evaluate(struct bpf_verifier *bvf)
+ 		}
+ 	}
+ 
++	RTE_BPF_LOG(DEBUG,
++		    "%s(%p) returns %d, stats:\n"
++		    "node evaluations=%u;\n"
++		    "state pruned=%u;\n"
++		    "state saves=%u;\n"
++		    "state restores=%u;\n",
++		    __func__, bvf, rc, stats.nb_eval, stats.nb_prune, stats.nb_save, stats.nb_restore);
++
+ 	return rc;
+ }
+ 
+diff --git a/dpdk/lib/bpf/meson.build b/dpdk/lib/bpf/meson.build
+index cd739bb827..aa258a9061 100644
+--- a/dpdk/lib/bpf/meson.build
++++ b/dpdk/lib/bpf/meson.build
+@@ -7,6 +7,12 @@ if is_windows
+     subdir_done()
+ endif
+ 
++if arch_subdir == 'x86' and dpdk_conf.get('RTE_ARCH_32')
++    build = false
++    reason = 'not supported on 32-bit x86'
++    subdir_done()
++endif
++
+ sources = files('bpf.c',
+         'bpf_dump.c',
+         'bpf_exec.c',
+diff --git a/dpdk/lib/cfgfile/rte_cfgfile.c b/dpdk/lib/cfgfile/rte_cfgfile.c
+index eefba6e408..2f9cc0722a 100644
+--- a/dpdk/lib/cfgfile/rte_cfgfile.c
++++ b/dpdk/lib/cfgfile/rte_cfgfile.c
+@@ -137,7 +137,7 @@ rte_cfgfile_check_params(const struct rte_cfgfile_parameters *params)
+ 	unsigned int i;
+ 
+ 	if (!params) {
+-		CFG_LOG(ERR, "missing cfgfile parameters\n");
++		CFG_LOG(ERR, "missing cfgfile parameters");
+ 		return -EINVAL;
+ 	}
+ 
+@@ -150,7 +150,7 @@ rte_cfgfile_check_params(const struct rte_cfgfile_parameters *params)
+ 	}
+ 
+ 	if (valid_comment == 0)	{
+-		CFG_LOG(ERR, "invalid comment characters %c\n",
++		CFG_LOG(ERR, "invalid comment characters %c",
+ 		       params->comment_character);
+ 		return -ENOTSUP;
+ 	}
+@@ -188,7 +188,7 @@ rte_cfgfile_load_with_params(const char *filename, int flags,
+ 		lineno++;
+ 		if ((len >= sizeof(buffer) - 1) && (buffer[len-1] != '\n')) {
+ 			CFG_LOG(ERR, " line %d - no \\n found on string. "
+-					"Check if line too long\n", lineno);
++					"Check if line too long", lineno);
+ 			goto error1;
+ 		}
+ 		/* skip parsing if comment character found */
+@@ -209,7 +209,7 @@ rte_cfgfile_load_with_params(const char *filename, int flags,
+ 			char *end = memchr(buffer, ']', len);
+ 			if (end == NULL) {
+ 				CFG_LOG(ERR,
+-					"line %d - no terminating ']' character found\n",
++					"line %d - no terminating ']' character found",
+ 					lineno);
+ 				goto error1;
+ 			}
+@@ -225,7 +225,7 @@ rte_cfgfile_load_with_params(const char *filename, int flags,
+ 			split[1] = memchr(buffer, '=', len);
+ 			if (split[1] == NULL) {
+ 				CFG_LOG(ERR,
+-					"line %d - no '=' character found\n",
++					"line %d - no '=' character found",
+ 					lineno);
+ 				goto error1;
+ 			}
+@@ -249,7 +249,7 @@ rte_cfgfile_load_with_params(const char *filename, int flags,
+ 			if (!(flags & CFG_FLAG_EMPTY_VALUES) &&
+ 					(*split[1] == '\0')) {
+ 				CFG_LOG(ERR,
+-					"line %d - cannot use empty values\n",
++					"line %d - cannot use empty values",
+ 					lineno);
+ 				goto error1;
+ 			}
+@@ -414,7 +414,7 @@ int rte_cfgfile_set_entry(struct rte_cfgfile *cfg, const char *sectionname,
+ 			return 0;
+ 		}
+ 
+-	CFG_LOG(ERR, "entry name doesn't exist\n");
++	CFG_LOG(ERR, "entry name doesn't exist");
+ 	return -EINVAL;
+ }
+ 
+diff --git a/dpdk/lib/compressdev/rte_compressdev_pmd.c b/dpdk/lib/compressdev/rte_compressdev_pmd.c
+index 156bccd972..762b44f03e 100644
+--- a/dpdk/lib/compressdev/rte_compressdev_pmd.c
++++ b/dpdk/lib/compressdev/rte_compressdev_pmd.c
+@@ -100,12 +100,12 @@ rte_compressdev_pmd_create(const char *name,
+ 	struct rte_compressdev *compressdev;
+ 
+ 	if (params->name[0] != '\0') {
+-		COMPRESSDEV_LOG(INFO, "User specified device name = %s\n",
++		COMPRESSDEV_LOG(INFO, "User specified device name = %s",
+ 				params->name);
+ 		name = params->name;
+ 	}
+ 
+-	COMPRESSDEV_LOG(INFO, "Creating compressdev %s\n", name);
++	COMPRESSDEV_LOG(INFO, "Creating compressdev %s", name);
+ 
+ 	COMPRESSDEV_LOG(INFO, "Init parameters - name: %s, socket id: %d",
+ 			name, params->socket_id);
+diff --git a/dpdk/lib/cryptodev/rte_cryptodev.c b/dpdk/lib/cryptodev/rte_cryptodev.c
+index b258827734..c5eb4ecb01 100644
+--- a/dpdk/lib/cryptodev/rte_cryptodev.c
++++ b/dpdk/lib/cryptodev/rte_cryptodev.c
+@@ -1489,6 +1489,10 @@ rte_cryptodev_add_enq_callback(uint8_t dev_id,
+ 			       rte_cryptodev_callback_fn cb_fn,
+ 			       void *cb_arg)
+ {
++#ifndef RTE_CRYPTO_CALLBACKS
++	rte_errno = ENOTSUP;
++	return NULL;
++#endif
+ 	struct rte_cryptodev *dev;
+ 	struct rte_cryptodev_cb_rcu *list;
+ 	struct rte_cryptodev_cb *cb, *tail;
+@@ -1554,6 +1558,9 @@ rte_cryptodev_remove_enq_callback(uint8_t dev_id,
+ 				  uint16_t qp_id,
+ 				  struct rte_cryptodev_cb *cb)
+ {
++#ifndef RTE_CRYPTO_CALLBACKS
++	return -ENOTSUP;
++#endif
+ 	struct rte_cryptodev *dev;
+ 	RTE_ATOMIC(struct rte_cryptodev_cb *) *prev_cb;
+ 	struct rte_cryptodev_cb *curr_cb;
+@@ -1628,6 +1635,10 @@ rte_cryptodev_add_deq_callback(uint8_t dev_id,
+ 			       rte_cryptodev_callback_fn cb_fn,
+ 			       void *cb_arg)
+ {
++#ifndef RTE_CRYPTO_CALLBACKS
++	rte_errno = ENOTSUP;
++	return NULL;
++#endif
+ 	struct rte_cryptodev *dev;
+ 	struct rte_cryptodev_cb_rcu *list;
+ 	struct rte_cryptodev_cb *cb, *tail;
+@@ -1694,6 +1705,9 @@ rte_cryptodev_remove_deq_callback(uint8_t dev_id,
+ 				  uint16_t qp_id,
+ 				  struct rte_cryptodev_cb *cb)
+ {
++#ifndef RTE_CRYPTO_CALLBACKS
++	return -ENOTSUP;
++#endif
+ 	struct rte_cryptodev *dev;
+ 	RTE_ATOMIC(struct rte_cryptodev_cb *) *prev_cb;
+ 	struct rte_cryptodev_cb *curr_cb;
+@@ -2072,7 +2086,7 @@ rte_cryptodev_sym_session_create(uint8_t dev_id,
+ 	}
+ 
+ 	if (xforms == NULL) {
+-		CDEV_LOG_ERR("Invalid xform\n");
++		CDEV_LOG_ERR("Invalid xform");
+ 		rte_errno = EINVAL;
+ 		return NULL;
+ 	}
+@@ -2682,7 +2696,7 @@ rte_cryptodev_driver_id_get(const char *name)
+ 	int driver_id = -1;
+ 
+ 	if (name == NULL) {
+-		RTE_LOG(DEBUG, CRYPTODEV, "name pointer NULL");
++		CDEV_LOG_DEBUG("name pointer NULL");
+ 		return -1;
+ 	}
+ 
+diff --git a/dpdk/lib/cryptodev/rte_cryptodev.h b/dpdk/lib/cryptodev/rte_cryptodev.h
+index aaeaf294e6..a42a4fc04e 100644
+--- a/dpdk/lib/cryptodev/rte_cryptodev.h
++++ b/dpdk/lib/cryptodev/rte_cryptodev.h
+@@ -26,8 +26,6 @@ extern "C" {
+ 
+ #include "rte_cryptodev_trace_fp.h"
+ 
+-extern const char **rte_cyptodev_names;
+-
+ /* Logging Macros */
+ 
+ #define CDEV_LOG_ERR(...) \
+@@ -1912,7 +1910,7 @@ rte_cryptodev_dequeue_burst(uint8_t dev_id, uint16_t qp_id,
+ 	nb_ops = fp_ops->dequeue_burst(qp, ops, nb_ops);
+ 
+ #ifdef RTE_CRYPTO_CALLBACKS
+-	if (unlikely(fp_ops->qp.deq_cb != NULL)) {
++	if (unlikely(fp_ops->qp.deq_cb[qp_id].next != NULL)) {
+ 		struct rte_cryptodev_cb_rcu *list;
+ 		struct rte_cryptodev_cb *cb;
+ 
+@@ -1979,7 +1977,7 @@ rte_cryptodev_enqueue_burst(uint8_t dev_id, uint16_t qp_id,
+ 	fp_ops = &rte_crypto_fp_ops[dev_id];
+ 	qp = fp_ops->qp.data[qp_id];
+ #ifdef RTE_CRYPTO_CALLBACKS
+-	if (unlikely(fp_ops->qp.enq_cb != NULL)) {
++	if (unlikely(fp_ops->qp.enq_cb[qp_id].next != NULL)) {
+ 		struct rte_cryptodev_cb_rcu *list;
+ 		struct rte_cryptodev_cb *cb;
+ 
+diff --git a/dpdk/lib/dispatcher/rte_dispatcher.c b/dpdk/lib/dispatcher/rte_dispatcher.c
+index 10d02edde9..95dd41b818 100644
+--- a/dpdk/lib/dispatcher/rte_dispatcher.c
++++ b/dpdk/lib/dispatcher/rte_dispatcher.c
+@@ -246,7 +246,7 @@ evd_service_register(struct rte_dispatcher *dispatcher)
+ 	rc = rte_service_component_register(&service, &dispatcher->service_id);
+ 	if (rc != 0)
+ 		RTE_EDEV_LOG_ERR("Registration of dispatcher service "
+-				 "%s failed with error code %d\n",
++				 "%s failed with error code %d",
+ 				 service.name, rc);
+ 
+ 	return rc;
+@@ -260,7 +260,7 @@ evd_service_unregister(struct rte_dispatcher *dispatcher)
+ 	rc = rte_service_component_unregister(dispatcher->service_id);
+ 	if (rc != 0)
+ 		RTE_EDEV_LOG_ERR("Unregistration of dispatcher service "
+-				 "failed with error code %d\n", rc);
++				 "failed with error code %d", rc);
+ 
+ 	return rc;
+ }
+@@ -279,7 +279,7 @@ rte_dispatcher_create(uint8_t event_dev_id)
+ 				  RTE_CACHE_LINE_SIZE, socket_id);
+ 
+ 	if (dispatcher == NULL) {
+-		RTE_EDEV_LOG_ERR("Unable to allocate memory for dispatcher\n");
++		RTE_EDEV_LOG_ERR("Unable to allocate memory for dispatcher");
+ 		rte_errno = ENOMEM;
+ 		return NULL;
+ 	}
+@@ -483,7 +483,7 @@ evd_lcore_uninstall_handler(struct rte_dispatcher_lcore *lcore,
+ 	unreg_handler = evd_lcore_get_handler_by_id(lcore, handler_id);
+ 
+ 	if (unreg_handler == NULL) {
+-		RTE_EDEV_LOG_ERR("Invalid handler id %d\n", handler_id);
++		RTE_EDEV_LOG_ERR("Invalid handler id %d", handler_id);
+ 		return -EINVAL;
+ 	}
+ 
+@@ -602,7 +602,7 @@ rte_dispatcher_finalize_unregister(struct rte_dispatcher *dispatcher,
+ 	unreg_finalizer = evd_get_finalizer_by_id(dispatcher, finalizer_id);
+ 
+ 	if (unreg_finalizer == NULL) {
+-		RTE_EDEV_LOG_ERR("Invalid finalizer id %d\n", finalizer_id);
++		RTE_EDEV_LOG_ERR("Invalid finalizer id %d", finalizer_id);
+ 		return -EINVAL;
+ 	}
+ 
+@@ -636,7 +636,7 @@ evd_set_service_runstate(struct rte_dispatcher *dispatcher, int state)
+ 	 */
+ 	if (rc != 0)
+ 		RTE_EDEV_LOG_ERR("Unexpected error %d occurred while setting "
+-				 "service component run state to %d\n", rc,
++				 "service component run state to %d", rc,
+ 				 state);
+ 
+ 	RTE_VERIFY(rc == 0);
+diff --git a/dpdk/lib/dmadev/rte_dmadev.c b/dpdk/lib/dmadev/rte_dmadev.c
+index 4e5e420c82..5093c6e38b 100644
+--- a/dpdk/lib/dmadev/rte_dmadev.c
++++ b/dpdk/lib/dmadev/rte_dmadev.c
+@@ -158,15 +158,24 @@ static int
+ dma_dev_data_prepare(void)
+ {
+ 	size_t size;
++	void *ptr;
+ 
+ 	if (rte_dma_devices != NULL)
+ 		return 0;
+ 
+-	size = dma_devices_max * sizeof(struct rte_dma_dev);
+-	rte_dma_devices = malloc(size);
+-	if (rte_dma_devices == NULL)
++	/* The DMA device object is expected to align cacheline,
++	 * but the return value of malloc may not be aligned to the cache line.
++	 * Therefore, extra memory is applied for realignment.
++	 * Note: posix_memalign/aligned_alloc are not used
++	 * because not always available, depending on libc.
++	 */
++	size = dma_devices_max * sizeof(struct rte_dma_dev) + RTE_CACHE_LINE_SIZE;
++	ptr = malloc(size);
++	if (ptr == NULL)
+ 		return -ENOMEM;
+-	memset(rte_dma_devices, 0, size);
++	memset(ptr, 0, size);
++
++	rte_dma_devices = RTE_PTR_ALIGN(ptr, RTE_CACHE_LINE_SIZE);
+ 
+ 	return 0;
+ }
+@@ -726,7 +735,7 @@ rte_dma_vchan_status(int16_t dev_id, uint16_t vchan, enum rte_dma_vchan_status *
+ 		return -EINVAL;
+ 
+ 	if (vchan >= dev->data->dev_conf.nb_vchans) {
+-		RTE_DMA_LOG(ERR, "Device %u vchan %u out of range\n", dev_id, vchan);
++		RTE_DMA_LOG(ERR, "Device %u vchan %u out of range", dev_id, vchan);
+ 		return -EINVAL;
+ 	}
+ 
+@@ -1028,7 +1037,7 @@ dmadev_handle_dev_dump(const char *cmd __rte_unused,
+ 	if (*end_param != '\0')
+ 		RTE_DMA_LOG(WARNING, "Extra parameters passed to dmadev telemetry command, ignoring");
+ 
+-	buf = calloc(sizeof(char), RTE_TEL_MAX_SINGLE_STRING_LEN);
++	buf = calloc(RTE_TEL_MAX_SINGLE_STRING_LEN, sizeof(char));
+ 	if (buf == NULL)
+ 		return -ENOMEM;
+ 
+diff --git a/dpdk/lib/eal/common/eal_common_fbarray.c b/dpdk/lib/eal/common/eal_common_fbarray.c
+index 2055bfa57d..253110360e 100644
+--- a/dpdk/lib/eal/common/eal_common_fbarray.c
++++ b/dpdk/lib/eal/common/eal_common_fbarray.c
+@@ -173,7 +173,7 @@ find_next_n(const struct rte_fbarray *arr, unsigned int start, unsigned int n,
+ 
+ 		/* combine current ignore mask with last index ignore mask */
+ 		if (msk_idx == last)
+-			ignore_msk |= last_msk;
++			ignore_msk &= last_msk;
+ 
+ 		/* if we have an ignore mask, ignore once */
+ 		if (ignore_msk) {
+@@ -216,6 +216,8 @@ find_next_n(const struct rte_fbarray *arr, unsigned int start, unsigned int n,
+ 		for (lookahead_idx = msk_idx + 1; lookahead_idx < msk->n_masks;
+ 				lookahead_idx++) {
+ 			unsigned int s_idx, need;
++			uint64_t first_bit = 1;
++
+ 			lookahead_msk = msk->data[lookahead_idx];
+ 
+ 			/* if we're looking for free space, invert the mask */
+@@ -225,18 +227,24 @@ find_next_n(const struct rte_fbarray *arr, unsigned int start, unsigned int n,
+ 			/* figure out how many consecutive bits we need here */
+ 			need = RTE_MIN(left, MASK_ALIGN);
+ 
+-			for (s_idx = 0; s_idx < need - 1; s_idx++)
++			/* count number of shifts we performed */
++			for (s_idx = 0; s_idx < need - 1; s_idx++) {
+ 				lookahead_msk &= lookahead_msk >> 1ULL;
++				/* did we lose the run yet? */
++				if ((lookahead_msk & first_bit) == 0)
++					break;
++			}
+ 
+ 			/* if first bit is not set, we've lost the run */
+-			if ((lookahead_msk & 1) == 0) {
++			if ((lookahead_msk & first_bit) == 0) {
+ 				/*
+ 				 * we've scanned this far, so we know there are
+ 				 * no runs in the space we've lookahead-scanned
+ 				 * as well, so skip that on next iteration.
+ 				 */
+-				ignore_msk = ~((1ULL << need) - 1);
+-				msk_idx = lookahead_idx;
++				ignore_msk = ~((1ULL << (s_idx + 1)) - 1);
++				/* outer loop will increment msk_idx so add 1 */
++				msk_idx = lookahead_idx - 1;
+ 				break;
+ 			}
+ 
+@@ -500,8 +508,13 @@ find_prev_n(const struct rte_fbarray *arr, unsigned int start, unsigned int n,
+ 			/* figure out how many consecutive bits we need here */
+ 			need = RTE_MIN(left, MASK_ALIGN);
+ 
+-			for (s_idx = 0; s_idx < need - 1; s_idx++)
++			/* count number of shifts we performed */
++			for (s_idx = 0; s_idx < need - 1; s_idx++) {
+ 				lookbehind_msk &= lookbehind_msk << 1ULL;
++				/* did we lose the run yet? */
++				if ((lookbehind_msk & last_bit) == 0)
++					break;
++			}
+ 
+ 			/* if last bit is not set, we've lost the run */
+ 			if ((lookbehind_msk & last_bit) == 0) {
+@@ -510,8 +523,9 @@ find_prev_n(const struct rte_fbarray *arr, unsigned int start, unsigned int n,
+ 				 * no runs in the space we've lookbehind-scanned
+ 				 * as well, so skip that on next iteration.
+ 				 */
+-				ignore_msk = UINT64_MAX << need;
+-				msk_idx = lookbehind_idx;
++				ignore_msk = ~(UINT64_MAX << (MASK_ALIGN - s_idx - 1));
++				/* outer loop will decrement msk_idx so add 1 */
++				msk_idx = lookbehind_idx + 1;
+ 				break;
+ 			}
+ 
+diff --git a/dpdk/lib/eal/common/eal_common_options.c b/dpdk/lib/eal/common/eal_common_options.c
+index a6d21f1cba..aec51736f2 100644
+--- a/dpdk/lib/eal/common/eal_common_options.c
++++ b/dpdk/lib/eal/common/eal_common_options.c
+@@ -226,6 +226,8 @@ eal_save_args(int argc, char **argv)
+ 		if (strcmp(argv[i], "--") == 0)
+ 			break;
+ 		eal_args[i] = strdup(argv[i]);
++		if (eal_args[i] == NULL)
++			goto error;
+ 	}
+ 	eal_args[i++] = NULL; /* always finish with NULL */
+ 
+@@ -235,13 +237,31 @@ eal_save_args(int argc, char **argv)
+ 
+ 	eal_app_args = calloc(argc - i + 1, sizeof(*eal_args));
+ 	if (eal_app_args == NULL)
+-		return -1;
++		goto error;
+ 
+-	for (j = 0; i < argc; j++, i++)
++	for (j = 0; i < argc; j++, i++) {
+ 		eal_app_args[j] = strdup(argv[i]);
++		if (eal_app_args[j] == NULL)
++			goto error;
++	}
+ 	eal_app_args[j] = NULL;
+ 
+ 	return 0;
++
++error:
++	if (eal_app_args != NULL) {
++		i = 0;
++		while (eal_app_args[i] != NULL)
++			free(eal_app_args[i++]);
++		free(eal_app_args);
++		eal_app_args = NULL;
++	}
++	i = 0;
++	while (eal_args[i] != NULL)
++		free(eal_args[i++]);
++	free(eal_args);
++	eal_args = NULL;
++	return -1;
+ }
+ #endif
+ 
+@@ -2141,7 +2161,7 @@ rte_vect_set_max_simd_bitwidth(uint16_t bitwidth)
+ 	struct internal_config *internal_conf =
+ 		eal_get_internal_configuration();
+ 	if (internal_conf->max_simd_bitwidth.forced) {
+-		RTE_LOG(NOTICE, EAL, "Cannot set max SIMD bitwidth - user runtime override enabled");
++		RTE_LOG(NOTICE, EAL, "Cannot set max SIMD bitwidth - user runtime override enabled\n");
+ 		return -EPERM;
+ 	}
+ 
+diff --git a/dpdk/lib/eal/common/malloc_mp.c b/dpdk/lib/eal/common/malloc_mp.c
+index 4d62397aba..a704eb01d1 100644
+--- a/dpdk/lib/eal/common/malloc_mp.c
++++ b/dpdk/lib/eal/common/malloc_mp.c
+@@ -756,7 +756,8 @@ request_to_primary(struct malloc_mp_req *user_req)
+ 	do {
+ 		ret = pthread_cond_timedwait(&entry->cond,
+ 				&mp_request_list.lock, &ts);
+-	} while (ret != 0 && ret != ETIMEDOUT);
++	} while ((ret != 0 && ret != ETIMEDOUT) &&
++			entry->state == REQ_STATE_ACTIVE);
+ 
+ 	if (entry->state != REQ_STATE_COMPLETE) {
+ 		RTE_LOG(ERR, EAL, "Request timed out\n");
+diff --git a/dpdk/lib/eal/include/rte_common.h b/dpdk/lib/eal/include/rte_common.h
+index c1ba32d00e..40af7164af 100644
+--- a/dpdk/lib/eal/include/rte_common.h
++++ b/dpdk/lib/eal/include/rte_common.h
+@@ -267,7 +267,7 @@ static void __attribute__((destructor(RTE_PRIO(prio)), used)) func(void)
+ #define RTE_FINI_PRIO(name, priority) \
+ 	static void name(void); \
+ 	__pragma(const_seg(DTOR_PRIORITY_TO_SECTION(priority))) \
+-	__declspec(allocate(DTOR_PRIORITY_TO_SECTION(priority))) name ## _pointer = &name; \
++	__declspec(allocate(DTOR_PRIORITY_TO_SECTION(priority))) void *name ## _pointer = &name; \
+ 	__pragma(const_seg()) \
+ 	static void name(void)
+ #endif
+diff --git a/dpdk/lib/eal/linux/eal_dev.c b/dpdk/lib/eal/linux/eal_dev.c
+index ac76f6174d..c0ba98852b 100644
+--- a/dpdk/lib/eal/linux/eal_dev.c
++++ b/dpdk/lib/eal/linux/eal_dev.c
+@@ -182,6 +182,8 @@ dev_uev_parse(const char *buf, struct rte_dev_event *event, int length)
+ 			i += 14;
+ 			strlcpy(pci_slot_name, buf, sizeof(subsystem));
+ 			event->devname = strdup(pci_slot_name);
++			if (event->devname == NULL)
++				return -1;
+ 		}
+ 		for (; i < length; i++) {
+ 			if (*buf == '\0')
+diff --git a/dpdk/lib/eal/linux/eal_hugepage_info.c b/dpdk/lib/eal/linux/eal_hugepage_info.c
+index 581d9dfc91..36a495fb1f 100644
+--- a/dpdk/lib/eal/linux/eal_hugepage_info.c
++++ b/dpdk/lib/eal/linux/eal_hugepage_info.c
+@@ -403,7 +403,7 @@ inspect_hugedir_cb(const struct walk_hugedir_data *whd)
+ 	struct stat st;
+ 
+ 	if (fstat(whd->file_fd, &st) < 0)
+-		RTE_LOG(DEBUG, EAL, "%s(): stat(\"%s\") failed: %s",
++		RTE_LOG(DEBUG, EAL, "%s(): stat(\"%s\") failed: %s\n",
+ 				__func__, whd->file_name, strerror(errno));
+ 	else
+ 		(*total_size) += st.st_size;
+diff --git a/dpdk/lib/eal/linux/eal_interrupts.c b/dpdk/lib/eal/linux/eal_interrupts.c
+index d4919dff45..eabac24992 100644
+--- a/dpdk/lib/eal/linux/eal_interrupts.c
++++ b/dpdk/lib/eal/linux/eal_interrupts.c
+@@ -1542,7 +1542,7 @@ rte_intr_efd_enable(struct rte_intr_handle *intr_handle, uint32_t nb_efd)
+ 		/* only check, initialization would be done in vdev driver.*/
+ 		if ((uint64_t)rte_intr_efd_counter_size_get(intr_handle) >
+ 		    sizeof(union rte_intr_read_buffer)) {
+-			RTE_LOG(ERR, EAL, "the efd_counter_size is oversized");
++			RTE_LOG(ERR, EAL, "the efd_counter_size is oversized\n");
+ 			return -EINVAL;
+ 		}
+ 	} else {
+diff --git a/dpdk/lib/eal/linux/eal_memalloc.c b/dpdk/lib/eal/linux/eal_memalloc.c
+index 9853ec78a2..b9fc83fe6a 100644
+--- a/dpdk/lib/eal/linux/eal_memalloc.c
++++ b/dpdk/lib/eal/linux/eal_memalloc.c
+@@ -1061,7 +1061,7 @@ eal_memalloc_alloc_seg_bulk(struct rte_memseg **ms, int n_segs, size_t page_sz,
+ 	/* memalloc is locked, so it's safe to use thread-unsafe version */
+ 	ret = rte_memseg_list_walk_thread_unsafe(alloc_seg_walk, &wa);
+ 	if (ret == 0) {
+-		RTE_LOG(ERR, EAL, "%s(): couldn't find suitable memseg_list\n",
++		RTE_LOG(DEBUG, EAL, "%s(): couldn't find suitable memseg_list\n",
+ 			__func__);
+ 		ret = -1;
+ 	} else if (ret > 0) {
+diff --git a/dpdk/lib/eal/unix/eal_firmware.c b/dpdk/lib/eal/unix/eal_firmware.c
+index 1a7cf8e7b7..9fc2f7763a 100644
+--- a/dpdk/lib/eal/unix/eal_firmware.c
++++ b/dpdk/lib/eal/unix/eal_firmware.c
+@@ -15,6 +15,8 @@
+ 
+ #include "eal_firmware.h"
+ 
++static const char * const compression_suffixes[] = { "xz", "zst" };
++
+ #ifdef RTE_HAS_LIBARCHIVE
+ 
+ struct firmware_read_ctx {
+@@ -36,7 +38,11 @@ firmware_open(struct firmware_read_ctx *ctx, const char *name, size_t blocksize)
+ 
+ 	err = archive_read_support_filter_xz(ctx->a);
+ 	if (err != ARCHIVE_OK && err != ARCHIVE_WARN)
+-		goto error;
++		RTE_LOG(DEBUG, EAL, "could not initialise libarchive for xz compression\n");
++
++	err = archive_read_support_filter_zstd(ctx->a);
++	if (err != ARCHIVE_OK && err != ARCHIVE_WARN)
++		RTE_LOG(DEBUG, EAL, "could not initialise libarchive for zstd compression\n");
+ 
+ 	if (archive_read_open_filename(ctx->a, name, blocksize) != ARCHIVE_OK)
+ 		goto error;
+@@ -147,16 +153,21 @@ rte_firmware_read(const char *name, void **buf, size_t *bufsz)
+ 
+ 	ret = firmware_read(name, buf, bufsz);
+ 	if (ret < 0) {
+-		snprintf(path, sizeof(path), "%s.xz", name);
+-		path[PATH_MAX - 1] = '\0';
++		unsigned int i;
++
++		for (i = 0; i < RTE_DIM(compression_suffixes); i++) {
++			snprintf(path, sizeof(path), "%s.%s", name, compression_suffixes[i]);
++			path[PATH_MAX - 1] = '\0';
++			if (access(path, F_OK) != 0)
++				continue;
+ #ifndef RTE_HAS_LIBARCHIVE
+-		if (access(path, F_OK) == 0) {
+ 			RTE_LOG(WARNING, EAL, "libarchive not linked, %s cannot be decompressed\n",
+ 				path);
+-		}
+ #else
+-		ret = firmware_read(path, buf, bufsz);
++			ret = firmware_read(path, buf, bufsz);
+ #endif
++			break;
++		}
+ 	}
+ 	return ret;
+ }
+diff --git a/dpdk/lib/eal/windows/eal_memory.c b/dpdk/lib/eal/windows/eal_memory.c
+index 31410a41fd..fd39155163 100644
+--- a/dpdk/lib/eal/windows/eal_memory.c
++++ b/dpdk/lib/eal/windows/eal_memory.c
+@@ -110,7 +110,7 @@ eal_mem_win32api_init(void)
+ 	VirtualAlloc2_ptr = (VirtualAlloc2_type)(
+ 		(void *)GetProcAddress(library, function));
+ 	if (VirtualAlloc2_ptr == NULL) {
+-		RTE_LOG_WIN32_ERR("GetProcAddress(\"%s\", \"%s\")\n",
++		RTE_LOG_WIN32_ERR("GetProcAddress(\"%s\", \"%s\")",
+ 			library_name, function);
+ 
+ 		/* Contrary to the docs, Server 2016 is not supported. */
+diff --git a/dpdk/lib/eal/windows/include/meson.build b/dpdk/lib/eal/windows/include/meson.build
+index 5fb1962ac7..e985a77d58 100644
+--- a/dpdk/lib/eal/windows/include/meson.build
++++ b/dpdk/lib/eal/windows/include/meson.build
+@@ -6,4 +6,5 @@ includes += include_directories('.')
+ headers += files(
+         'rte_os.h',
+         'rte_windows.h',
++        'sched.h',
+ )
+diff --git a/dpdk/lib/eal/x86/rte_cycles.c b/dpdk/lib/eal/x86/rte_cycles.c
+index 69ed59b4f0..f147a5231d 100644
+--- a/dpdk/lib/eal/x86/rte_cycles.c
++++ b/dpdk/lib/eal/x86/rte_cycles.c
+@@ -10,6 +10,10 @@
+ #include <cpuid.h>
+ #endif
+ 
++#define x86_vendor_amd(t1, t2, t3)        \
++	((t1 == 0x68747541) && /* htuA */   \
++	 (t2 == 0x444d4163) && /* DMAc */   \
++	 (t3 == 0x69746e65))   /* itne */
+ 
+ #include "eal_private.h"
+ 
+@@ -110,6 +114,18 @@ get_tsc_freq_arch(void)
+ 	uint8_t mult, model;
+ 	int32_t ret;
+ 
++#ifdef RTE_TOOLCHAIN_MSVC
++	__cpuid(cpuinfo, 0);
++	a = cpuinfo[0];
++	b = cpuinfo[1];
++	c = cpuinfo[2];
++	d = cpuinfo[3];
++#else
++	__cpuid(0, a, b, c, d);
++#endif
++	if (x86_vendor_amd(b, c, d))
++		return 0;
++
+ 	/*
+ 	 * Time Stamp Counter and Nominal Core Crystal Clock
+ 	 * Information Leaf
+diff --git a/dpdk/lib/ethdev/ethdev_driver.c b/dpdk/lib/ethdev/ethdev_driver.c
+index fff4b7b4cd..47659d5e8c 100644
+--- a/dpdk/lib/ethdev/ethdev_driver.c
++++ b/dpdk/lib/ethdev/ethdev_driver.c
+@@ -297,15 +297,25 @@ rte_eth_dev_create(struct rte_device *device, const char *name,
+ 			return -ENODEV;
+ 
+ 		if (priv_data_size) {
++			/* try alloc private data on device-local node. */
+ 			ethdev->data->dev_private = rte_zmalloc_socket(
+ 				name, priv_data_size, RTE_CACHE_LINE_SIZE,
+ 				device->numa_node);
+ 
+-			if (!ethdev->data->dev_private) {
+-				RTE_ETHDEV_LOG(ERR,
+-					"failed to allocate private data\n");
+-				retval = -ENOMEM;
+-				goto probe_failed;
++			/* fall back to alloc on any socket on failure */
++			if (ethdev->data->dev_private == NULL) {
++				ethdev->data->dev_private = rte_zmalloc(name,
++						priv_data_size, RTE_CACHE_LINE_SIZE);
++
++				if (ethdev->data->dev_private == NULL) {
++					RTE_ETHDEV_LOG(ERR, "failed to allocate private data\n");
++					retval = -ENOMEM;
++					goto probe_failed;
++				}
++				/* got memory, but not local, so issue warning */
++				RTE_ETHDEV_LOG(WARNING,
++					       "Private data for ethdev '%s' not allocated on local NUMA node %d\n",
++					       device->name, device->numa_node);
+ 			}
+ 		}
+ 	} else {
+@@ -487,7 +497,7 @@ rte_eth_devargs_parse(const char *dargs, struct rte_eth_devargs *eth_da)
+ 		pair = &args.pairs[i];
+ 		if (strcmp("representor", pair->key) == 0) {
+ 			if (eth_da->type != RTE_ETH_REPRESENTOR_NONE) {
+-				RTE_LOG(ERR, EAL, "duplicated representor key: %s\n",
++				RTE_ETHDEV_LOG(ERR, "duplicated representor key: %s\n",
+ 					dargs);
+ 				result = -1;
+ 				goto parse_cleanup;
+@@ -713,7 +723,7 @@ rte_eth_representor_id_get(uint16_t port_id,
+ 		if (info->ranges[i].controller != controller)
+ 			continue;
+ 		if (info->ranges[i].id_end < info->ranges[i].id_base) {
+-			RTE_LOG(WARNING, EAL, "Port %hu invalid representor ID Range %u - %u, entry %d\n",
++			RTE_ETHDEV_LOG(WARNING, "Port %hu invalid representor ID Range %u - %u, entry %d\n",
+ 				port_id, info->ranges[i].id_base,
+ 				info->ranges[i].id_end, i);
+ 			continue;
+diff --git a/dpdk/lib/ethdev/ethdev_driver.h b/dpdk/lib/ethdev/ethdev_driver.h
+index b482cd12bb..ec56925882 100644
+--- a/dpdk/lib/ethdev/ethdev_driver.h
++++ b/dpdk/lib/ethdev/ethdev_driver.h
+@@ -1655,18 +1655,13 @@ static inline int
+ rte_eth_linkstatus_set(struct rte_eth_dev *dev,
+ 		       const struct rte_eth_link *new_link)
+ {
+-	RTE_ATOMIC(uint64_t) *dev_link = (uint64_t __rte_atomic *)&(dev->data->dev_link);
+-	union {
+-		uint64_t val64;
+-		struct rte_eth_link link;
+-	} orig;
+-
+-	RTE_BUILD_BUG_ON(sizeof(*new_link) != sizeof(uint64_t));
++	struct rte_eth_link old_link;
+ 
+-	orig.val64 = rte_atomic_exchange_explicit(dev_link, *(const uint64_t *)new_link,
+-					rte_memory_order_seq_cst);
++	old_link.val64 = rte_atomic_exchange_explicit(&dev->data->dev_link.val64,
++						      new_link->val64,
++						      rte_memory_order_seq_cst);
+ 
+-	return (orig.link.link_status == new_link->link_status) ? -1 : 0;
++	return (old_link.link_status == new_link->link_status) ? -1 : 0;
+ }
+ 
+ /**
+@@ -1682,12 +1677,11 @@ static inline void
+ rte_eth_linkstatus_get(const struct rte_eth_dev *dev,
+ 		       struct rte_eth_link *link)
+ {
+-	RTE_ATOMIC(uint64_t) *src = (uint64_t __rte_atomic *)&(dev->data->dev_link);
+-	uint64_t *dst = (uint64_t *)link;
+-
+-	RTE_BUILD_BUG_ON(sizeof(*link) != sizeof(uint64_t));
++	struct rte_eth_link curr_link;
+ 
+-	*dst = rte_atomic_load_explicit(src, rte_memory_order_seq_cst);
++	curr_link.val64 = rte_atomic_load_explicit(&dev->data->dev_link.val64,
++						   rte_memory_order_seq_cst);
++	rte_atomic_store_explicit(&link->val64, curr_link.val64, rte_memory_order_seq_cst);
+ }
+ 
+ /**
+diff --git a/dpdk/lib/ethdev/ethdev_pci.h b/dpdk/lib/ethdev/ethdev_pci.h
+index 320e3e0093..c40bc2ed02 100644
+--- a/dpdk/lib/ethdev/ethdev_pci.h
++++ b/dpdk/lib/ethdev/ethdev_pci.h
+@@ -31,7 +31,7 @@ rte_eth_copy_pci_info(struct rte_eth_dev *eth_dev,
+ 	struct rte_pci_device *pci_dev)
+ {
+ 	if ((eth_dev == NULL) || (pci_dev == NULL)) {
+-		RTE_ETHDEV_LOG(ERR, "NULL pointer eth_dev=%p pci_dev=%p",
++		RTE_ETHDEV_LOG(ERR, "NULL pointer eth_dev=%p pci_dev=%p\n",
+ 			(void *)eth_dev, (void *)pci_dev);
+ 		return;
+ 	}
+@@ -93,12 +93,26 @@ rte_eth_dev_pci_allocate(struct rte_pci_device *dev, size_t private_data_size)
+ 			return NULL;
+ 
+ 		if (private_data_size) {
++			/* Try and alloc the private-data structure on socket local to the device */
+ 			eth_dev->data->dev_private = rte_zmalloc_socket(name,
+ 				private_data_size, RTE_CACHE_LINE_SIZE,
+ 				dev->device.numa_node);
+-			if (!eth_dev->data->dev_private) {
+-				rte_eth_dev_release_port(eth_dev);
+-				return NULL;
++
++			/* if cannot allocate memory on the socket local to the device
++			 * use rte_malloc to allocate memory on some other socket, if available.
++			 */
++			if (eth_dev->data->dev_private == NULL) {
++				eth_dev->data->dev_private = rte_zmalloc(name,
++						private_data_size, RTE_CACHE_LINE_SIZE);
++
++				if (eth_dev->data->dev_private == NULL) {
++					rte_eth_dev_release_port(eth_dev);
++					return NULL;
++				}
++				/* got memory, but not local, so issue warning */
++				RTE_ETHDEV_LOG(WARNING,
++					       "Private data for ethdev '%s' not allocated on local NUMA node %d\n",
++					       dev->device.name, dev->device.numa_node);
+ 			}
+ 		}
+ 	} else {
+diff --git a/dpdk/lib/ethdev/ethdev_private.c b/dpdk/lib/ethdev/ethdev_private.c
+index e98b7188b0..0e1c7b23c1 100644
+--- a/dpdk/lib/ethdev/ethdev_private.c
++++ b/dpdk/lib/ethdev/ethdev_private.c
+@@ -182,7 +182,7 @@ rte_eth_devargs_parse_representor_ports(char *str, void *data)
+ 		RTE_DIM(eth_da->representor_ports));
+ done:
+ 	if (str == NULL)
+-		RTE_LOG(ERR, EAL, "wrong representor format: %s\n", str);
++		RTE_ETHDEV_LOG(ERR, "wrong representor format: %s\n", str);
+ 	return str == NULL ? -1 : 0;
+ }
+ 
+diff --git a/dpdk/lib/ethdev/rte_class_eth.c b/dpdk/lib/ethdev/rte_class_eth.c
+index b61dae849d..311beb17cb 100644
+--- a/dpdk/lib/ethdev/rte_class_eth.c
++++ b/dpdk/lib/ethdev/rte_class_eth.c
+@@ -165,7 +165,7 @@ eth_dev_iterate(const void *start,
+ 			valid_keys = eth_params_keys;
+ 		kvargs = rte_kvargs_parse(str, valid_keys);
+ 		if (kvargs == NULL) {
+-			RTE_LOG(ERR, EAL, "cannot parse argument list\n");
++			RTE_ETHDEV_LOG(ERR, "cannot parse argument list\n");
+ 			rte_errno = EINVAL;
+ 			return NULL;
+ 		}
+diff --git a/dpdk/lib/ethdev/rte_ethdev.c b/dpdk/lib/ethdev/rte_ethdev.c
+index 3858983fcc..b9d99ece15 100644
+--- a/dpdk/lib/ethdev/rte_ethdev.c
++++ b/dpdk/lib/ethdev/rte_ethdev.c
+@@ -724,7 +724,7 @@ rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id)
+ 	uint16_t pid;
+ 
+ 	if (name == NULL) {
+-		RTE_ETHDEV_LOG(ERR, "Cannot get port ID from NULL name");
++		RTE_ETHDEV_LOG(ERR, "Cannot get port ID from NULL name\n");
+ 		return -EINVAL;
+ 	}
+ 
+@@ -2394,41 +2394,41 @@ rte_eth_rx_hairpin_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
+ 		nb_rx_desc = cap.max_nb_desc;
+ 	if (nb_rx_desc > cap.max_nb_desc) {
+ 		RTE_ETHDEV_LOG(ERR,
+-			"Invalid value for nb_rx_desc(=%hu), should be: <= %hu",
++			"Invalid value for nb_rx_desc(=%hu), should be: <= %hu\n",
+ 			nb_rx_desc, cap.max_nb_desc);
+ 		return -EINVAL;
+ 	}
+ 	if (conf->peer_count > cap.max_rx_2_tx) {
+ 		RTE_ETHDEV_LOG(ERR,
+-			"Invalid value for number of peers for Rx queue(=%u), should be: <= %hu",
++			"Invalid value for number of peers for Rx queue(=%u), should be: <= %hu\n",
+ 			conf->peer_count, cap.max_rx_2_tx);
+ 		return -EINVAL;
+ 	}
+ 	if (conf->use_locked_device_memory && !cap.rx_cap.locked_device_memory) {
+ 		RTE_ETHDEV_LOG(ERR,
+-			"Attempt to use locked device memory for Rx queue, which is not supported");
++			"Attempt to use locked device memory for Rx queue, which is not supported\n");
+ 		return -EINVAL;
+ 	}
+ 	if (conf->use_rte_memory && !cap.rx_cap.rte_memory) {
+ 		RTE_ETHDEV_LOG(ERR,
+-			"Attempt to use DPDK memory for Rx queue, which is not supported");
++			"Attempt to use DPDK memory for Rx queue, which is not supported\n");
+ 		return -EINVAL;
+ 	}
+ 	if (conf->use_locked_device_memory && conf->use_rte_memory) {
+ 		RTE_ETHDEV_LOG(ERR,
+-			"Attempt to use mutually exclusive memory settings for Rx queue");
++			"Attempt to use mutually exclusive memory settings for Rx queue\n");
+ 		return -EINVAL;
+ 	}
+ 	if (conf->force_memory &&
+ 	    !conf->use_locked_device_memory &&
+ 	    !conf->use_rte_memory) {
+ 		RTE_ETHDEV_LOG(ERR,
+-			"Attempt to force Rx queue memory settings, but none is set");
++			"Attempt to force Rx queue memory settings, but none is set\n");
+ 		return -EINVAL;
+ 	}
+ 	if (conf->peer_count == 0) {
+ 		RTE_ETHDEV_LOG(ERR,
+-			"Invalid value for number of peers for Rx queue(=%u), should be: > 0",
++			"Invalid value for number of peers for Rx queue(=%u), should be: > 0\n",
+ 			conf->peer_count);
+ 		return -EINVAL;
+ 	}
+@@ -2438,7 +2438,7 @@ rte_eth_rx_hairpin_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
+ 			count++;
+ 	}
+ 	if (count > cap.max_nb_queues) {
+-		RTE_ETHDEV_LOG(ERR, "To many Rx hairpin queues max is %d",
++		RTE_ETHDEV_LOG(ERR, "To many Rx hairpin queues max is %d\n",
+ 		cap.max_nb_queues);
+ 		return -EINVAL;
+ 	}
+@@ -2597,41 +2597,41 @@ rte_eth_tx_hairpin_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
+ 		nb_tx_desc = cap.max_nb_desc;
+ 	if (nb_tx_desc > cap.max_nb_desc) {
+ 		RTE_ETHDEV_LOG(ERR,
+-			"Invalid value for nb_tx_desc(=%hu), should be: <= %hu",
++			"Invalid value for nb_tx_desc(=%hu), should be: <= %hu\n",
+ 			nb_tx_desc, cap.max_nb_desc);
+ 		return -EINVAL;
+ 	}
+ 	if (conf->peer_count > cap.max_tx_2_rx) {
+ 		RTE_ETHDEV_LOG(ERR,
+-			"Invalid value for number of peers for Tx queue(=%u), should be: <= %hu",
++			"Invalid value for number of peers for Tx queue(=%u), should be: <= %hu\n",
+ 			conf->peer_count, cap.max_tx_2_rx);
+ 		return -EINVAL;
+ 	}
+ 	if (conf->use_locked_device_memory && !cap.tx_cap.locked_device_memory) {
+ 		RTE_ETHDEV_LOG(ERR,
+-			"Attempt to use locked device memory for Tx queue, which is not supported");
++			"Attempt to use locked device memory for Tx queue, which is not supported\n");
+ 		return -EINVAL;
+ 	}
+ 	if (conf->use_rte_memory && !cap.tx_cap.rte_memory) {
+ 		RTE_ETHDEV_LOG(ERR,
+-			"Attempt to use DPDK memory for Tx queue, which is not supported");
++			"Attempt to use DPDK memory for Tx queue, which is not supported\n");
+ 		return -EINVAL;
+ 	}
+ 	if (conf->use_locked_device_memory && conf->use_rte_memory) {
+ 		RTE_ETHDEV_LOG(ERR,
+-			"Attempt to use mutually exclusive memory settings for Tx queue");
++			"Attempt to use mutually exclusive memory settings for Tx queue\n");
+ 		return -EINVAL;
+ 	}
+ 	if (conf->force_memory &&
+ 	    !conf->use_locked_device_memory &&
+ 	    !conf->use_rte_memory) {
+ 		RTE_ETHDEV_LOG(ERR,
+-			"Attempt to force Tx queue memory settings, but none is set");
++			"Attempt to force Tx queue memory settings, but none is set\n");
+ 		return -EINVAL;
+ 	}
+ 	if (conf->peer_count == 0) {
+ 		RTE_ETHDEV_LOG(ERR,
+-			"Invalid value for number of peers for Tx queue(=%u), should be: > 0",
++			"Invalid value for number of peers for Tx queue(=%u), should be: > 0\n",
+ 			conf->peer_count);
+ 		return -EINVAL;
+ 	}
+@@ -2641,7 +2641,7 @@ rte_eth_tx_hairpin_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
+ 			count++;
+ 	}
+ 	if (count > cap.max_nb_queues) {
+-		RTE_ETHDEV_LOG(ERR, "To many Tx hairpin queues max is %d",
++		RTE_ETHDEV_LOG(ERR, "To many Tx hairpin queues max is %d\n",
+ 		cap.max_nb_queues);
+ 		return -EINVAL;
+ 	}
+@@ -6716,7 +6716,7 @@ rte_eth_ip_reassembly_capability_get(uint16_t port_id,
+ 	}
+ 
+ 	if (reassembly_capa == NULL) {
+-		RTE_ETHDEV_LOG(ERR, "Cannot get reassembly capability to NULL");
++		RTE_ETHDEV_LOG(ERR, "Cannot get reassembly capability to NULL\n");
+ 		return -EINVAL;
+ 	}
+ 
+@@ -6752,7 +6752,7 @@ rte_eth_ip_reassembly_conf_get(uint16_t port_id,
+ 	}
+ 
+ 	if (conf == NULL) {
+-		RTE_ETHDEV_LOG(ERR, "Cannot get reassembly info to NULL");
++		RTE_ETHDEV_LOG(ERR, "Cannot get reassembly info to NULL\n");
+ 		return -EINVAL;
+ 	}
+ 
+@@ -6780,7 +6780,7 @@ rte_eth_ip_reassembly_conf_set(uint16_t port_id,
+ 	if (dev->data->dev_configured == 0) {
+ 		RTE_ETHDEV_LOG(ERR,
+ 			"Device with port_id=%u is not configured.\n"
+-			"Cannot set IP reassembly configuration",
++			"Cannot set IP reassembly configuration\n",
+ 			port_id);
+ 		return -EINVAL;
+ 	}
+diff --git a/dpdk/lib/ethdev/rte_ethdev.h b/dpdk/lib/ethdev/rte_ethdev.h
+index 77331ce652..545799c341 100644
+--- a/dpdk/lib/ethdev/rte_ethdev.h
++++ b/dpdk/lib/ethdev/rte_ethdev.h
+@@ -331,13 +331,18 @@ struct rte_eth_stats {
+ /**
+  * A structure used to retrieve link-level information of an Ethernet port.
+  */
+-__extension__
+ struct rte_eth_link {
+-	uint32_t link_speed;        /**< RTE_ETH_SPEED_NUM_ */
+-	uint16_t link_duplex  : 1;  /**< RTE_ETH_LINK_[HALF/FULL]_DUPLEX */
+-	uint16_t link_autoneg : 1;  /**< RTE_ETH_LINK_[AUTONEG/FIXED] */
+-	uint16_t link_status  : 1;  /**< RTE_ETH_LINK_[DOWN/UP] */
+-} __rte_aligned(8);      /**< aligned for atomic64 read/write */
++	union {
++		RTE_ATOMIC(uint64_t) val64; /**< used for atomic64 read/write */
++		__extension__
++		struct {
++			uint32_t link_speed;	    /**< RTE_ETH_SPEED_NUM_ */
++			uint16_t link_duplex  : 1;  /**< RTE_ETH_LINK_[HALF/FULL]_DUPLEX */
++			uint16_t link_autoneg : 1;  /**< RTE_ETH_LINK_[AUTONEG/FIXED] */
++			uint16_t link_status  : 1;  /**< RTE_ETH_LINK_[DOWN/UP] */
++		};
++	};
++};
+ 
+ /**@{@name Link negotiation
+  * Constants used in link management.
+diff --git a/dpdk/lib/ethdev/rte_flow.c b/dpdk/lib/ethdev/rte_flow.c
+index 549e329558..fa2a8fedce 100644
+--- a/dpdk/lib/ethdev/rte_flow.c
++++ b/dpdk/lib/ethdev/rte_flow.c
+@@ -216,7 +216,7 @@ static const struct rte_flow_desc_data rte_flow_desc_action[] = {
+ 		       sizeof(struct rte_flow_action_of_push_mpls)),
+ 	MK_FLOW_ACTION(VXLAN_ENCAP, sizeof(struct rte_flow_action_vxlan_encap)),
+ 	MK_FLOW_ACTION(VXLAN_DECAP, 0),
+-	MK_FLOW_ACTION(NVGRE_ENCAP, sizeof(struct rte_flow_action_vxlan_encap)),
++	MK_FLOW_ACTION(NVGRE_ENCAP, sizeof(struct rte_flow_action_nvgre_encap)),
+ 	MK_FLOW_ACTION(NVGRE_DECAP, 0),
+ 	MK_FLOW_ACTION(RAW_ENCAP, sizeof(struct rte_flow_action_raw_encap)),
+ 	MK_FLOW_ACTION(RAW_DECAP, sizeof(struct rte_flow_action_raw_decap)),
+@@ -616,6 +616,7 @@ rte_flow_conv_item_spec(void *buf, const size_t size,
+ 	switch (item->type) {
+ 		union {
+ 			const struct rte_flow_item_raw *raw;
++			const struct rte_flow_item_geneve_opt *geneve_opt;
+ 		} spec;
+ 		union {
+ 			const struct rte_flow_item_raw *raw;
+@@ -625,10 +626,13 @@ rte_flow_conv_item_spec(void *buf, const size_t size,
+ 		} mask;
+ 		union {
+ 			const struct rte_flow_item_raw *raw;
++			const struct rte_flow_item_geneve_opt *geneve_opt;
+ 		} src;
+ 		union {
+ 			struct rte_flow_item_raw *raw;
++			struct rte_flow_item_geneve_opt *geneve_opt;
+ 		} dst;
++		void *deep_src;
+ 		size_t tmp;
+ 
+ 	case RTE_FLOW_ITEM_TYPE_RAW:
+@@ -657,13 +661,30 @@ rte_flow_conv_item_spec(void *buf, const size_t size,
+ 			tmp = last.raw->length & mask.raw->length;
+ 		if (tmp) {
+ 			off = RTE_ALIGN_CEIL(off, sizeof(*dst.raw->pattern));
+-			if (size >= off + tmp)
+-				dst.raw->pattern = rte_memcpy
+-					((void *)((uintptr_t)dst.raw + off),
+-					 src.raw->pattern, tmp);
++			if (size >= off + tmp) {
++				deep_src = (void *)((uintptr_t)dst.raw + off);
++				dst.raw->pattern = rte_memcpy(deep_src,
++							      src.raw->pattern,
++							      tmp);
++			}
+ 			off += tmp;
+ 		}
+ 		break;
++	case RTE_FLOW_ITEM_TYPE_GENEVE_OPT:
++		off = rte_flow_conv_copy(buf, data, size,
++					 rte_flow_desc_item, item->type);
++		spec.geneve_opt = item->spec;
++		src.geneve_opt = data;
++		dst.geneve_opt = buf;
++		tmp = spec.geneve_opt->option_len << 2;
++		if (size > 0 && src.geneve_opt->data) {
++			deep_src = (void *)((uintptr_t)(dst.geneve_opt + 1));
++			dst.geneve_opt->data = rte_memcpy(deep_src,
++							  src.geneve_opt->data,
++							  tmp);
++		}
++		off += tmp;
++		break;
+ 	default:
+ 		off = rte_flow_conv_copy(buf, data, size,
+ 					 rte_flow_desc_item, item->type);
+diff --git a/dpdk/lib/ethdev/rte_flow.h b/dpdk/lib/ethdev/rte_flow.h
+index affdc8121b..4cdc1f1d8f 100644
+--- a/dpdk/lib/ethdev/rte_flow.h
++++ b/dpdk/lib/ethdev/rte_flow.h
+@@ -3471,7 +3471,7 @@ struct rte_flow_action_vxlan_encap {
+  */
+ struct rte_flow_action_nvgre_encap {
+ 	/**
+-	 * Encapsulating vxlan tunnel definition
++	 * Encapsulating nvgre tunnel definition
+ 	 * (terminated by the END pattern item).
+ 	 */
+ 	struct rte_flow_item *definition;
+diff --git a/dpdk/lib/eventdev/eventdev_pmd.h b/dpdk/lib/eventdev/eventdev_pmd.h
+index 30bd90085c..2ec5aec0a8 100644
+--- a/dpdk/lib/eventdev/eventdev_pmd.h
++++ b/dpdk/lib/eventdev/eventdev_pmd.h
+@@ -49,14 +49,14 @@ extern "C" {
+ /* Macros to check for valid device */
+ #define RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, retval) do { \
+ 	if (!rte_event_pmd_is_valid_dev((dev_id))) { \
+-		RTE_EDEV_LOG_ERR("Invalid dev_id=%d\n", dev_id); \
++		RTE_EDEV_LOG_ERR("Invalid dev_id=%d", dev_id); \
+ 		return retval; \
+ 	} \
+ } while (0)
+ 
+ #define RTE_EVENTDEV_VALID_DEVID_OR_ERRNO_RET(dev_id, errno, retval) do { \
+ 	if (!rte_event_pmd_is_valid_dev((dev_id))) { \
+-		RTE_EDEV_LOG_ERR("Invalid dev_id=%d\n", dev_id); \
++		RTE_EDEV_LOG_ERR("Invalid dev_id=%d", dev_id); \
+ 		rte_errno = errno; \
+ 		return retval; \
+ 	} \
+@@ -64,7 +64,7 @@ extern "C" {
+ 
+ #define RTE_EVENTDEV_VALID_DEVID_OR_RET(dev_id) do { \
+ 	if (!rte_event_pmd_is_valid_dev((dev_id))) { \
+-		RTE_EDEV_LOG_ERR("Invalid dev_id=%d\n", dev_id); \
++		RTE_EDEV_LOG_ERR("Invalid dev_id=%d", dev_id); \
+ 		return; \
+ 	} \
+ } while (0)
+diff --git a/dpdk/lib/eventdev/rte_event_crypto_adapter.c b/dpdk/lib/eventdev/rte_event_crypto_adapter.c
+index 1b435c9f0e..d11c0d4be8 100644
+--- a/dpdk/lib/eventdev/rte_event_crypto_adapter.c
++++ b/dpdk/lib/eventdev/rte_event_crypto_adapter.c
+@@ -133,11 +133,33 @@ static struct event_crypto_adapter **event_crypto_adapter;
+ /* Macros to check for valid adapter */
+ #define EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, retval) do { \
+ 	if (!eca_valid_id(id)) { \
+-		RTE_EDEV_LOG_ERR("Invalid crypto adapter id = %d\n", id); \
++		RTE_EDEV_LOG_ERR("Invalid crypto adapter id = %d", id); \
+ 		return retval; \
+ 	} \
+ } while (0)
+ 
++#define ECA_DYNFIELD_NAME "eca_ev_opaque_data"
++/* Device-specific metadata field type */
++typedef uint8_t eca_dynfield_t;
++
++/* mbuf dynamic field offset for device-specific metadata */
++int eca_dynfield_offset = -1;
++
++static int
++eca_dynfield_register(void)
++{
++	static const struct rte_mbuf_dynfield eca_dynfield_desc = {
++		.name = ECA_DYNFIELD_NAME,
++		.size = sizeof(eca_dynfield_t),
++		.align = __alignof__(eca_dynfield_t),
++		.flags = 0,
++	};
++
++	eca_dynfield_offset =
++		rte_mbuf_dynfield_register(&eca_dynfield_desc);
++	return eca_dynfield_offset;
++}
++
+ static inline int
+ eca_valid_id(uint8_t id)
+ {
+@@ -245,20 +267,28 @@ eca_circular_buffer_flush_to_cdev(struct crypto_ops_circular_buffer *bufp,
+ 	struct rte_crypto_op **ops = bufp->op_buffer;
+ 
+ 	if (*tailp > *headp)
++		/* Flush ops from head pointer to (tail - head) OPs */
+ 		n = *tailp - *headp;
+ 	else if (*tailp < *headp)
++		/* Circ buffer - Rollover.
++		 * Flush OPs from head to max size of buffer.
++		 * Rest of the OPs will be flushed in next iteration.
++		 */
+ 		n = bufp->size - *headp;
+ 	else { /* head == tail case */
+ 		/* when head == tail,
+ 		 * circ buff is either full(tail pointer roll over) or empty
+ 		 */
+ 		if (bufp->count != 0) {
+-			/* circ buffer is full */
+-			n = bufp->count;
++			/* Circ buffer - FULL.
++			 * Flush OPs from head to max size of buffer.
++			 * Rest of the OPS will be flushed in next iteration.
++			 */
++			n = bufp->size - *headp;
+ 		} else {
+-			/* circ buffer is empty */
++			/* Circ buffer - Empty */
+ 			*nb_ops_flushed = 0;
+-			return 0;  /* buffer empty */
++			return 0;
+ 		}
+ 	}
+ 
+@@ -309,7 +339,7 @@ eca_default_config_cb(uint8_t id, uint8_t dev_id,
+ 
+ 	ret = rte_event_dev_configure(dev_id, &dev_conf);
+ 	if (ret) {
+-		RTE_EDEV_LOG_ERR("failed to configure event dev %u\n", dev_id);
++		RTE_EDEV_LOG_ERR("failed to configure event dev %u", dev_id);
+ 		if (started) {
+ 			if (rte_event_dev_start(dev_id))
+ 				return -EIO;
+@@ -319,7 +349,7 @@ eca_default_config_cb(uint8_t id, uint8_t dev_id,
+ 
+ 	ret = rte_event_port_setup(dev_id, port_id, port_conf);
+ 	if (ret) {
+-		RTE_EDEV_LOG_ERR("failed to setup event port %u\n", port_id);
++		RTE_EDEV_LOG_ERR("failed to setup event port %u", port_id);
+ 		return ret;
+ 	}
+ 
+@@ -391,7 +421,7 @@ rte_event_crypto_adapter_create_ext(uint8_t id, uint8_t dev_id,
+ 					sizeof(struct crypto_device_info), 0,
+ 					socket_id);
+ 	if (adapter->cdevs == NULL) {
+-		RTE_EDEV_LOG_ERR("Failed to get mem for crypto devices\n");
++		RTE_EDEV_LOG_ERR("Failed to get mem for crypto devices");
+ 		eca_circular_buffer_free(&adapter->ebuf);
+ 		rte_free(adapter);
+ 		return -ENOMEM;
+@@ -483,6 +513,25 @@ eca_enq_to_cryptodev(struct event_crypto_adapter *adapter, struct rte_event *ev,
+ 		crypto_op = ev[i].event_ptr;
+ 		if (crypto_op == NULL)
+ 			continue;
++
++		/** "struct rte_event::impl_opaque" field passed on from
++		 *  eventdev PMD could have different value per event.
++		 *  For session-based crypto operations retain
++		 *  "struct rte_event::impl_opaque" into mbuf dynamic field and
++		 *  restore it back after copying event information from
++		 *  session event metadata.
++		 *  For session-less, each crypto operation carries event
++		 *  metadata and retains "struct rte_event:impl_opaque"
++		 *  information to be passed back to eventdev PMD.
++		 */
++		if (crypto_op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
++			struct rte_mbuf *mbuf = crypto_op->sym->m_src;
++
++			*RTE_MBUF_DYNFIELD(mbuf,
++					eca_dynfield_offset,
++					eca_dynfield_t *) = ev[i].impl_opaque;
++		}
++
+ 		m_data = rte_cryptodev_session_event_mdata_get(crypto_op);
+ 		if (m_data == NULL) {
+ 			rte_pktmbuf_free(crypto_op->sym->m_src);
+@@ -649,6 +698,21 @@ eca_ops_enqueue_burst(struct event_crypto_adapter *adapter,
+ 
+ 		rte_memcpy(ev, &m_data->response_info, sizeof(*ev));
+ 		ev->event_ptr = ops[i];
++
++		/** Restore "struct rte_event::impl_opaque" from mbuf
++		 *  dynamic field for session based crypto operation.
++		 *  For session-less, each crypto operations carries event
++		 *  metadata and retains "struct rte_event::impl_opaque"
++		 *  information to be passed back to eventdev PMD.
++		 */
++		if (ops[i]->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
++			struct rte_mbuf *mbuf = ops[i]->sym->m_src;
++
++			ev->impl_opaque = *RTE_MBUF_DYNFIELD(mbuf,
++							eca_dynfield_offset,
++							eca_dynfield_t *);
++		}
++
+ 		ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
+ 		if (adapter->implicit_release_disabled)
+ 			ev->op = RTE_EVENT_OP_FORWARD;
+@@ -887,6 +951,18 @@ eca_init_service(struct event_crypto_adapter *adapter, uint8_t id)
+ 	}
+ 
+ 	adapter->implicit_release_disabled = (uint8_t)impl_rel;
++
++	/** Register for mbuf dyn field to store/restore
++	 *  "struct rte_event::impl_opaque"
++	 */
++	eca_dynfield_offset = eca_dynfield_register();
++	if (eca_dynfield_offset  < 0) {
++		RTE_EDEV_LOG_ERR("Failed to register eca mbuf dyn field");
++		eca_circular_buffer_free(&adapter->ebuf);
++		rte_free(adapter);
++		return -EINVAL;
++	}
++
+ 	adapter->service_inited = 1;
+ 
+ 	return ret;
+@@ -1403,7 +1479,7 @@ rte_event_crypto_adapter_runtime_params_set(uint8_t id,
+ 	EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
+ 
+ 	if (params == NULL) {
+-		RTE_EDEV_LOG_ERR("params pointer is NULL\n");
++		RTE_EDEV_LOG_ERR("params pointer is NULL");
+ 		return -EINVAL;
+ 	}
+ 
+@@ -1436,7 +1512,7 @@ rte_event_crypto_adapter_runtime_params_get(uint8_t id,
+ 	EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
+ 
+ 	if (params == NULL) {
+-		RTE_EDEV_LOG_ERR("params pointer is NULL\n");
++		RTE_EDEV_LOG_ERR("params pointer is NULL");
+ 		return -EINVAL;
+ 	}
+ 
+diff --git a/dpdk/lib/eventdev/rte_event_dma_adapter.c b/dpdk/lib/eventdev/rte_event_dma_adapter.c
+index af4b5ad388..4196164305 100644
+--- a/dpdk/lib/eventdev/rte_event_dma_adapter.c
++++ b/dpdk/lib/eventdev/rte_event_dma_adapter.c
+@@ -20,7 +20,7 @@
+ #define EVENT_DMA_ADAPTER_ID_VALID_OR_ERR_RET(id, retval) \
+ 	do { \
+ 		if (!edma_adapter_valid_id(id)) { \
+-			RTE_EDEV_LOG_ERR("Invalid DMA adapter id = %d\n", id); \
++			RTE_EDEV_LOG_ERR("Invalid DMA adapter id = %d", id); \
+ 			return retval; \
+ 		} \
+ 	} while (0)
+@@ -313,7 +313,7 @@ edma_default_config_cb(uint8_t id, uint8_t evdev_id, struct rte_event_dma_adapte
+ 
+ 	ret = rte_event_dev_configure(evdev_id, &dev_conf);
+ 	if (ret) {
+-		RTE_EDEV_LOG_ERR("Failed to configure event dev %u\n", evdev_id);
++		RTE_EDEV_LOG_ERR("Failed to configure event dev %u", evdev_id);
+ 		if (started) {
+ 			if (rte_event_dev_start(evdev_id))
+ 				return -EIO;
+@@ -323,7 +323,7 @@ edma_default_config_cb(uint8_t id, uint8_t evdev_id, struct rte_event_dma_adapte
+ 
+ 	ret = rte_event_port_setup(evdev_id, port_id, port_conf);
+ 	if (ret) {
+-		RTE_EDEV_LOG_ERR("Failed to setup event port %u\n", port_id);
++		RTE_EDEV_LOG_ERR("Failed to setup event port %u", port_id);
+ 		return ret;
+ 	}
+ 
+@@ -407,7 +407,7 @@ rte_event_dma_adapter_create_ext(uint8_t id, uint8_t evdev_id,
+ 					       num_dma_dev * sizeof(struct dma_device_info), 0,
+ 					       socket_id);
+ 	if (adapter->dma_devs == NULL) {
+-		RTE_EDEV_LOG_ERR("Failed to get memory for DMA devices\n");
++		RTE_EDEV_LOG_ERR("Failed to get memory for DMA devices");
+ 		edma_circular_buffer_free(&adapter->ebuf);
+ 		rte_free(adapter);
+ 		return -ENOMEM;
+@@ -417,7 +417,7 @@ rte_event_dma_adapter_create_ext(uint8_t id, uint8_t evdev_id,
+ 	for (i = 0; i < num_dma_dev; i++) {
+ 		ret = rte_dma_info_get(i, &info);
+ 		if (ret) {
+-			RTE_EDEV_LOG_ERR("Failed to get dma device info\n");
++			RTE_EDEV_LOG_ERR("Failed to get dma device info");
+ 			edma_circular_buffer_free(&adapter->ebuf);
+ 			rte_free(adapter);
+ 			return ret;
+@@ -1046,7 +1046,7 @@ rte_event_dma_adapter_vchan_add(uint8_t id, int16_t dma_dev_id, uint16_t vchan,
+ 							sizeof(struct dma_vchan_info),
+ 							0, adapter->socket_id);
+ 			if (dev_info->vchanq == NULL) {
+-				printf("Queue pair add not supported\n");
++				RTE_EDEV_LOG_ERR("Queue pair add not supported");
+ 				return -ENOMEM;
+ 			}
+ 		}
+@@ -1057,7 +1057,7 @@ rte_event_dma_adapter_vchan_add(uint8_t id, int16_t dma_dev_id, uint16_t vchan,
+ 						sizeof(struct dma_vchan_info),
+ 						0, adapter->socket_id);
+ 			if (dev_info->tqmap == NULL) {
+-				printf("tq pair add not supported\n");
++				RTE_EDEV_LOG_ERR("tq pair add not supported");
+ 				return -ENOMEM;
+ 			}
+ 		}
+@@ -1297,7 +1297,7 @@ rte_event_dma_adapter_runtime_params_set(uint8_t id,
+ 	EVENT_DMA_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
+ 
+ 	if (params == NULL) {
+-		RTE_EDEV_LOG_ERR("params pointer is NULL\n");
++		RTE_EDEV_LOG_ERR("params pointer is NULL");
+ 		return -EINVAL;
+ 	}
+ 
+@@ -1326,7 +1326,7 @@ rte_event_dma_adapter_runtime_params_get(uint8_t id,
+ 	EVENT_DMA_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
+ 
+ 	if (params == NULL) {
+-		RTE_EDEV_LOG_ERR("params pointer is NULL\n");
++		RTE_EDEV_LOG_ERR("params pointer is NULL");
+ 		return -EINVAL;
+ 	}
+ 
+diff --git a/dpdk/lib/eventdev/rte_event_eth_rx_adapter.c b/dpdk/lib/eventdev/rte_event_eth_rx_adapter.c
+index 6db03adf04..1b83a55b5c 100644
+--- a/dpdk/lib/eventdev/rte_event_eth_rx_adapter.c
++++ b/dpdk/lib/eventdev/rte_event_eth_rx_adapter.c
+@@ -293,14 +293,14 @@ rxa_event_buf_get(struct event_eth_rx_adapter *rx_adapter, uint16_t eth_dev_id,
+ 
+ #define RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, retval) do { \
+ 	if (!rxa_validate_id(id)) { \
+-		RTE_EDEV_LOG_ERR("Invalid eth Rx adapter id = %d\n", id); \
++		RTE_EDEV_LOG_ERR("Invalid eth Rx adapter id = %d", id); \
+ 		return retval; \
+ 	} \
+ } while (0)
+ 
+ #define RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_GOTO_ERR_RET(id, retval) do { \
+ 	if (!rxa_validate_id(id)) { \
+-		RTE_EDEV_LOG_ERR("Invalid eth Rx adapter id = %d\n", id); \
++		RTE_EDEV_LOG_ERR("Invalid eth Rx adapter id = %d", id); \
+ 		ret = retval; \
+ 		goto error; \
+ 	} \
+@@ -308,15 +308,15 @@ rxa_event_buf_get(struct event_eth_rx_adapter *rx_adapter, uint16_t eth_dev_id,
+ 
+ #define RTE_EVENT_ETH_RX_ADAPTER_TOKEN_VALID_OR_GOTO_ERR_RET(token, retval) do { \
+ 	if ((token) == NULL || strlen(token) == 0 || !isdigit(*token)) { \
+-		RTE_EDEV_LOG_ERR("Invalid eth Rx adapter token\n"); \
++		RTE_EDEV_LOG_ERR("Invalid eth Rx adapter token"); \
+ 		ret = retval; \
+ 		goto error; \
+ 	} \
+ } while (0)
+ 
+-#define RTE_ETH_VALID_PORTID_OR_GOTO_ERR_RET(port_id, retval) do { \
++#define RTE_EVENT_ETH_RX_ADAPTER_PORTID_VALID_OR_GOTO_ERR_RET(port_id, retval) do { \
+ 	if (!rte_eth_dev_is_valid_port(port_id)) { \
+-		RTE_ETHDEV_LOG(ERR, "Invalid port_id=%u\n", port_id); \
++		RTE_EDEV_LOG_ERR("Invalid port_id=%u", port_id); \
+ 		ret = retval; \
+ 		goto error; \
+ 	} \
+@@ -1540,7 +1540,7 @@ rxa_default_conf_cb(uint8_t id, uint8_t dev_id,
+ 
+ 	ret = rte_event_dev_configure(dev_id, &dev_conf);
+ 	if (ret) {
+-		RTE_EDEV_LOG_ERR("failed to configure event dev %u\n",
++		RTE_EDEV_LOG_ERR("failed to configure event dev %u",
+ 						dev_id);
+ 		if (started) {
+ 			if (rte_event_dev_start(dev_id))
+@@ -1551,7 +1551,7 @@ rxa_default_conf_cb(uint8_t id, uint8_t dev_id,
+ 
+ 	ret = rte_event_port_setup(dev_id, port_id, port_conf);
+ 	if (ret) {
+-		RTE_EDEV_LOG_ERR("failed to setup event port %u\n",
++		RTE_EDEV_LOG_ERR("failed to setup event port %u",
+ 					port_id);
+ 		return ret;
+ 	}
+@@ -1628,7 +1628,7 @@ rxa_create_intr_thread(struct event_eth_rx_adapter *rx_adapter)
+ 	if (!err)
+ 		return 0;
+ 
+-	RTE_EDEV_LOG_ERR("Failed to create interrupt thread err = %d\n", err);
++	RTE_EDEV_LOG_ERR("Failed to create interrupt thread err = %d", err);
+ 	rte_free(rx_adapter->epoll_events);
+ error:
+ 	rte_ring_free(rx_adapter->intr_ring);
+@@ -1644,12 +1644,12 @@ rxa_destroy_intr_thread(struct event_eth_rx_adapter *rx_adapter)
+ 
+ 	err = pthread_cancel((pthread_t)rx_adapter->rx_intr_thread.opaque_id);
+ 	if (err)
+-		RTE_EDEV_LOG_ERR("Can't cancel interrupt thread err = %d\n",
++		RTE_EDEV_LOG_ERR("Can't cancel interrupt thread err = %d",
+ 				err);
+ 
+ 	err = rte_thread_join(rx_adapter->rx_intr_thread, NULL);
+ 	if (err)
+-		RTE_EDEV_LOG_ERR("Can't join interrupt thread err = %d\n", err);
++		RTE_EDEV_LOG_ERR("Can't join interrupt thread err = %d", err);
+ 
+ 	rte_free(rx_adapter->epoll_events);
+ 	rte_ring_free(rx_adapter->intr_ring);
+@@ -1915,7 +1915,7 @@ rxa_init_service(struct event_eth_rx_adapter *rx_adapter, uint8_t id)
+ 	if (rte_mbuf_dyn_rx_timestamp_register(
+ 			&event_eth_rx_timestamp_dynfield_offset,
+ 			&event_eth_rx_timestamp_dynflag) != 0) {
+-		RTE_EDEV_LOG_ERR("Error registering timestamp field in mbuf\n");
++		RTE_EDEV_LOG_ERR("Error registering timestamp field in mbuf");
+ 		return -rte_errno;
+ 	}
+ 
+@@ -2445,7 +2445,7 @@ rxa_create(uint8_t id, uint8_t dev_id,
+ 			    RTE_DIM(default_rss_key));
+ 
+ 	if (rx_adapter->eth_devices == NULL) {
+-		RTE_EDEV_LOG_ERR("failed to get mem for eth devices\n");
++		RTE_EDEV_LOG_ERR("failed to get mem for eth devices");
+ 		rte_free(rx_adapter);
+ 		return -ENOMEM;
+ 	}
+@@ -2497,12 +2497,12 @@ rxa_config_params_validate(struct rte_event_eth_rx_adapter_params *rxa_params,
+ 		return 0;
+ 	} else if (!rxa_params->use_queue_event_buf &&
+ 		    rxa_params->event_buf_size == 0) {
+-		RTE_EDEV_LOG_ERR("event buffer size can't be zero\n");
++		RTE_EDEV_LOG_ERR("event buffer size can't be zero");
+ 		return -EINVAL;
+ 	} else if (rxa_params->use_queue_event_buf &&
+ 		   rxa_params->event_buf_size != 0) {
+ 		RTE_EDEV_LOG_ERR("event buffer size needs to be configured "
+-				 "as part of queue add\n");
++				 "as part of queue add");
+ 		return -EINVAL;
+ 	}
+ 
+@@ -3597,7 +3597,7 @@ handle_rxa_stats(const char *cmd __rte_unused,
+ 	/* Get Rx adapter stats */
+ 	if (rte_event_eth_rx_adapter_stats_get(rx_adapter_id,
+ 					       &rx_adptr_stats)) {
+-		RTE_EDEV_LOG_ERR("Failed to get Rx adapter stats\n");
++		RTE_EDEV_LOG_ERR("Failed to get Rx adapter stats");
+ 		return -1;
+ 	}
+ 
+@@ -3636,7 +3636,7 @@ handle_rxa_stats_reset(const char *cmd __rte_unused,
+ 
+ 	/* Reset Rx adapter stats */
+ 	if (rte_event_eth_rx_adapter_stats_reset(rx_adapter_id)) {
+-		RTE_EDEV_LOG_ERR("Failed to reset Rx adapter stats\n");
++		RTE_EDEV_LOG_ERR("Failed to reset Rx adapter stats");
+ 		return -1;
+ 	}
+ 
+@@ -3671,7 +3671,7 @@ handle_rxa_get_queue_conf(const char *cmd __rte_unused,
+ 
+ 	/* Get device ID from parameter string */
+ 	eth_dev_id = strtoul(token, NULL, 10);
+-	RTE_ETH_VALID_PORTID_OR_GOTO_ERR_RET(eth_dev_id, -EINVAL);
++	RTE_EVENT_ETH_RX_ADAPTER_PORTID_VALID_OR_GOTO_ERR_RET(eth_dev_id, -EINVAL);
+ 
+ 	token = strtok(NULL, ",");
+ 	RTE_EVENT_ETH_RX_ADAPTER_TOKEN_VALID_OR_GOTO_ERR_RET(token, -1);
+@@ -3743,7 +3743,7 @@ handle_rxa_get_queue_stats(const char *cmd __rte_unused,
+ 
+ 	/* Get device ID from parameter string */
+ 	eth_dev_id = strtoul(token, NULL, 10);
+-	RTE_ETH_VALID_PORTID_OR_GOTO_ERR_RET(eth_dev_id, -EINVAL);
++	RTE_EVENT_ETH_RX_ADAPTER_PORTID_VALID_OR_GOTO_ERR_RET(eth_dev_id, -EINVAL);
+ 
+ 	token = strtok(NULL, ",");
+ 	RTE_EVENT_ETH_RX_ADAPTER_TOKEN_VALID_OR_GOTO_ERR_RET(token, -1);
+@@ -3813,7 +3813,7 @@ handle_rxa_queue_stats_reset(const char *cmd __rte_unused,
+ 
+ 	/* Get device ID from parameter string */
+ 	eth_dev_id = strtoul(token, NULL, 10);
+-	RTE_ETH_VALID_PORTID_OR_GOTO_ERR_RET(eth_dev_id, -EINVAL);
++	RTE_EVENT_ETH_RX_ADAPTER_PORTID_VALID_OR_GOTO_ERR_RET(eth_dev_id, -EINVAL);
+ 
+ 	token = strtok(NULL, ",");
+ 	RTE_EVENT_ETH_RX_ADAPTER_TOKEN_VALID_OR_GOTO_ERR_RET(token, -1);
+@@ -3868,7 +3868,7 @@ handle_rxa_instance_get(const char *cmd __rte_unused,
+ 
+ 	/* Get device ID from parameter string */
+ 	eth_dev_id = strtoul(token, NULL, 10);
+-	RTE_ETH_VALID_PORTID_OR_GOTO_ERR_RET(eth_dev_id, -EINVAL);
++	RTE_EVENT_ETH_RX_ADAPTER_PORTID_VALID_OR_GOTO_ERR_RET(eth_dev_id, -EINVAL);
+ 
+ 	token = strtok(NULL, ",");
+ 	RTE_EVENT_ETH_RX_ADAPTER_TOKEN_VALID_OR_GOTO_ERR_RET(token, -1);
+diff --git a/dpdk/lib/eventdev/rte_event_eth_tx_adapter.c b/dpdk/lib/eventdev/rte_event_eth_tx_adapter.c
+index 360d5caf6a..56435be991 100644
+--- a/dpdk/lib/eventdev/rte_event_eth_tx_adapter.c
++++ b/dpdk/lib/eventdev/rte_event_eth_tx_adapter.c
+@@ -334,7 +334,7 @@ txa_service_conf_cb(uint8_t __rte_unused id, uint8_t dev_id,
+ 
+ 	ret = rte_event_port_setup(dev_id, port_id, pc);
+ 	if (ret) {
+-		RTE_EDEV_LOG_ERR("failed to setup event port %u\n",
++		RTE_EDEV_LOG_ERR("failed to setup event port %u",
+ 					port_id);
+ 		if (started) {
+ 			if (rte_event_dev_start(dev_id))
+diff --git a/dpdk/lib/eventdev/rte_event_timer_adapter.c b/dpdk/lib/eventdev/rte_event_timer_adapter.c
+index 27466707bc..3f22e85173 100644
+--- a/dpdk/lib/eventdev/rte_event_timer_adapter.c
++++ b/dpdk/lib/eventdev/rte_event_timer_adapter.c
+@@ -106,7 +106,7 @@ default_port_conf_cb(uint16_t id, uint8_t event_dev_id, uint8_t *event_port_id,
+ 
+ 	ret = rte_event_dev_configure(dev_id, &dev_conf);
+ 	if (ret < 0) {
+-		EVTIM_LOG_ERR("failed to configure event dev %u\n", dev_id);
++		EVTIM_LOG_ERR("failed to configure event dev %u", dev_id);
+ 		if (started)
+ 			if (rte_event_dev_start(dev_id))
+ 				return -EIO;
+@@ -116,7 +116,7 @@ default_port_conf_cb(uint16_t id, uint8_t event_dev_id, uint8_t *event_port_id,
+ 
+ 	ret = rte_event_port_setup(dev_id, port_id, port_conf);
+ 	if (ret < 0) {
+-		EVTIM_LOG_ERR("failed to setup event port %u on event dev %u\n",
++		EVTIM_LOG_ERR("failed to setup event port %u on event dev %u",
+ 			      port_id, dev_id);
+ 		return ret;
+ 	}
+diff --git a/dpdk/lib/eventdev/rte_eventdev.c b/dpdk/lib/eventdev/rte_eventdev.c
+index 0ca32d6721..1c865e993f 100644
+--- a/dpdk/lib/eventdev/rte_eventdev.c
++++ b/dpdk/lib/eventdev/rte_eventdev.c
+@@ -1007,13 +1007,13 @@ rte_event_port_profile_links_set(uint8_t dev_id, uint8_t port_id, const uint8_t
+ 	}
+ 
+ 	if (*dev->dev_ops->port_link == NULL) {
+-		RTE_EDEV_LOG_ERR("Function not supported\n");
++		RTE_EDEV_LOG_ERR("Function not supported");
+ 		rte_errno = ENOTSUP;
+ 		return 0;
+ 	}
+ 
+ 	if (profile_id && *dev->dev_ops->port_link_profile == NULL) {
+-		RTE_EDEV_LOG_ERR("Function not supported\n");
++		RTE_EDEV_LOG_ERR("Function not supported");
+ 		rte_errno = ENOTSUP;
+ 		return 0;
+ 	}
+@@ -1428,8 +1428,8 @@ rte_event_vector_pool_create(const char *name, unsigned int n,
+ 	int ret;
+ 
+ 	if (!nb_elem) {
+-		RTE_LOG(ERR, EVENTDEV,
+-			"Invalid number of elements=%d requested\n", nb_elem);
++		RTE_EDEV_LOG_ERR("Invalid number of elements=%d requested",
++			nb_elem);
+ 		rte_errno = EINVAL;
+ 		return NULL;
+ 	}
+@@ -1444,7 +1444,7 @@ rte_event_vector_pool_create(const char *name, unsigned int n,
+ 	mp_ops_name = rte_mbuf_best_mempool_ops();
+ 	ret = rte_mempool_set_ops_byname(mp, mp_ops_name, NULL);
+ 	if (ret != 0) {
+-		RTE_LOG(ERR, EVENTDEV, "error setting mempool handler\n");
++		RTE_EDEV_LOG_ERR("error setting mempool handler");
+ 		goto err;
+ 	}
+ 
+@@ -2002,7 +2002,7 @@ handle_dev_dump(const char *cmd __rte_unused,
+ 
+ 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
+ 
+-	buf = calloc(sizeof(char), RTE_TEL_MAX_SINGLE_STRING_LEN);
++	buf = calloc(RTE_TEL_MAX_SINGLE_STRING_LEN, sizeof(char));
+ 	if (buf == NULL)
+ 		return -ENOMEM;
+ 
+diff --git a/dpdk/lib/eventdev/rte_eventdev.h b/dpdk/lib/eventdev/rte_eventdev.h
+index ec9b02455d..7fd9016ca7 100644
+--- a/dpdk/lib/eventdev/rte_eventdev.h
++++ b/dpdk/lib/eventdev/rte_eventdev.h
+@@ -515,9 +515,9 @@ rte_event_dev_attr_get(uint8_t dev_id, uint32_t attr_id,
+ struct rte_event_dev_config {
+ 	uint32_t dequeue_timeout_ns;
+ 	/**< rte_event_dequeue_burst() timeout on this device.
+-	 * This value should be in the range of *min_dequeue_timeout_ns* and
+-	 * *max_dequeue_timeout_ns* which previously provided in
+-	 * rte_event_dev_info_get()
++	 * This value should be in the range of @ref rte_event_dev_info.min_dequeue_timeout_ns and
++	 * @ref rte_event_dev_info.max_dequeue_timeout_ns returned by
++	 * @ref rte_event_dev_info_get()
+ 	 * The value 0 is allowed, in which case, default dequeue timeout used.
+ 	 * @see RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT
+ 	 */
+@@ -525,40 +525,53 @@ struct rte_event_dev_config {
+ 	/**< In a *closed system* this field is the limit on maximum number of
+ 	 * events that can be inflight in the eventdev at a given time. The
+ 	 * limit is required to ensure that the finite space in a closed system
+-	 * is not overwhelmed. The value cannot exceed the *max_num_events*
+-	 * as provided by rte_event_dev_info_get().
+-	 * This value should be set to -1 for *open system*.
++	 * is not exhausted.
++	 * The value cannot exceed @ref rte_event_dev_info.max_num_events
++	 * returned by rte_event_dev_info_get().
++	 *
++	 * This value should be set to -1 for *open systems*, that is,
++	 * those systems returning -1 in @ref rte_event_dev_info.max_num_events.
++	 *
++	 * @see rte_event_port_conf.new_event_threshold
+ 	 */
+ 	uint8_t nb_event_queues;
+ 	/**< Number of event queues to configure on this device.
+-	 * This value cannot exceed the *max_event_queues* which previously
+-	 * provided in rte_event_dev_info_get()
++	 * This value *includes* any single-link queue-port pairs to be used.
++	 * This value cannot exceed @ref rte_event_dev_info.max_event_queues +
++	 * @ref rte_event_dev_info.max_single_link_event_port_queue_pairs
++	 * returned by rte_event_dev_info_get().
++	 * The number of non-single-link queues i.e. this value less
++	 * *nb_single_link_event_port_queues* in this struct, cannot exceed
++	 * @ref rte_event_dev_info.max_event_queues
+ 	 */
+ 	uint8_t nb_event_ports;
+ 	/**< Number of event ports to configure on this device.
+-	 * This value cannot exceed the *max_event_ports* which previously
+-	 * provided in rte_event_dev_info_get()
++	 * This value *includes* any single-link queue-port pairs to be used.
++	 * This value cannot exceed @ref rte_event_dev_info.max_event_ports +
++	 * @ref rte_event_dev_info.max_single_link_event_port_queue_pairs
++	 * returned by rte_event_dev_info_get().
++	 * The number of non-single-link ports i.e. this value less
++	 * *nb_single_link_event_port_queues* in this struct, cannot exceed
++	 * @ref rte_event_dev_info.max_event_ports
+ 	 */
+ 	uint32_t nb_event_queue_flows;
+-	/**< Number of flows for any event queue on this device.
+-	 * This value cannot exceed the *max_event_queue_flows* which previously
+-	 * provided in rte_event_dev_info_get()
++	/**< Max number of flows needed for a single event queue on this device.
++	 * This value cannot exceed @ref rte_event_dev_info.max_event_queue_flows
++	 * returned by rte_event_dev_info_get()
+ 	 */
+ 	uint32_t nb_event_port_dequeue_depth;
+-	/**< Maximum number of events can be dequeued at a time from an
+-	 * event port by this device.
+-	 * This value cannot exceed the *max_event_port_dequeue_depth*
+-	 * which previously provided in rte_event_dev_info_get().
++	/**< Max number of events that can be dequeued at a time from an event port on this device.
++	 * This value cannot exceed @ref rte_event_dev_info.max_event_port_dequeue_depth
++	 * returned by rte_event_dev_info_get().
+ 	 * Ignored when device is not RTE_EVENT_DEV_CAP_BURST_MODE capable.
+-	 * @see rte_event_port_setup()
++	 * @see rte_event_port_setup() rte_event_dequeue_burst()
+ 	 */
+ 	uint32_t nb_event_port_enqueue_depth;
+-	/**< Maximum number of events can be enqueued at a time from an
+-	 * event port by this device.
+-	 * This value cannot exceed the *max_event_port_enqueue_depth*
+-	 * which previously provided in rte_event_dev_info_get().
++	/**< Maximum number of events can be enqueued at a time to an event port on this device.
++	 * This value cannot exceed @ref rte_event_dev_info.max_event_port_enqueue_depth
++	 * returned by rte_event_dev_info_get().
+ 	 * Ignored when device is not RTE_EVENT_DEV_CAP_BURST_MODE capable.
+-	 * @see rte_event_port_setup()
++	 * @see rte_event_port_setup() rte_event_enqueue_burst()
+ 	 */
+ 	uint32_t event_dev_cfg;
+ 	/**< Event device config flags(RTE_EVENT_DEV_CFG_)*/
+@@ -568,7 +581,7 @@ struct rte_event_dev_config {
+ 	 * queues; this value cannot exceed *nb_event_ports* or
+ 	 * *nb_event_queues*. If the device has ports and queues that are
+ 	 * optimized for single-link usage, this field is a hint for how many
+-	 * to allocate; otherwise, regular event ports and queues can be used.
++	 * to allocate; otherwise, regular event ports and queues will be used.
+ 	 */
+ };
+ 
+@@ -1098,10 +1111,8 @@ struct rte_event_vector {
+ 		 * port and queue of the mbufs in the vector
+ 		 */
+ 		struct {
+-			uint16_t port;
+-			/* Ethernet device port id. */
+-			uint16_t queue;
+-			/* Ethernet device queue id. */
++			uint16_t port;   /**< Ethernet device port id. */
++			uint16_t queue;  /**< Ethernet device queue id. */
+ 		};
+ 	};
+ 	/**< Union to hold common attributes of the vector array. */
+@@ -1130,7 +1141,11 @@ struct rte_event_vector {
+ 	 * vector array can be an array of mbufs or pointers or opaque u64
+ 	 * values.
+ 	 */
++#ifndef __DOXYGEN__
+ } __rte_aligned(16);
++#else
++};
++#endif
+ 
+ /* Scheduler type definitions */
+ #define RTE_SCHED_TYPE_ORDERED          0
+diff --git a/dpdk/lib/graph/graph.c b/dpdk/lib/graph/graph.c
+index 26f0968a97..8ea2109645 100644
+--- a/dpdk/lib/graph/graph.c
++++ b/dpdk/lib/graph/graph.c
+@@ -19,11 +19,54 @@
+ 
+ static struct graph_head graph_list = STAILQ_HEAD_INITIALIZER(graph_list);
+ static rte_spinlock_t graph_lock = RTE_SPINLOCK_INITIALIZER;
+-static rte_graph_t graph_id;
+-
+-#define GRAPH_ID_CHECK(id) ID_CHECK(id, graph_id)
+ 
+ /* Private functions */
++static struct graph *
++graph_from_id(rte_graph_t id)
++{
++	struct graph *graph;
++	STAILQ_FOREACH(graph, &graph_list, next) {
++		if (graph->id == id)
++			return graph;
++	}
++	rte_errno = EINVAL;
++	return NULL;
++}
++
++static rte_graph_t
++graph_next_free_id(void)
++{
++	struct graph *graph;
++	rte_graph_t id = 0;
++
++	STAILQ_FOREACH(graph, &graph_list, next) {
++		if (id < graph->id)
++			break;
++		id = graph->id + 1;
++	}
++
++	return id;
++}
++
++static void
++graph_insert_ordered(struct graph *graph)
++{
++	struct graph *after, *g;
++
++	after = NULL;
++	STAILQ_FOREACH(g, &graph_list, next) {
++		if (g->id < graph->id)
++			after = g;
++		else if (g->id > graph->id)
++			break;
++	}
++	if (after == NULL) {
++		STAILQ_INSERT_HEAD(&graph_list, graph, next);
++	} else {
++		STAILQ_INSERT_AFTER(&graph_list, after, graph, next);
++	}
++}
++
+ struct graph_head *
+ graph_list_head_get(void)
+ {
+@@ -279,7 +322,8 @@ rte_graph_model_mcore_dispatch_core_bind(rte_graph_t id, int lcore)
+ {
+ 	struct graph *graph;
+ 
+-	GRAPH_ID_CHECK(id);
++	if (graph_from_id(id) == NULL)
++		goto fail;
+ 	if (!rte_lcore_is_enabled(lcore))
+ 		SET_ERR_JMP(ENOLINK, fail, "lcore %d not enabled", lcore);
+ 
+@@ -309,7 +353,8 @@ rte_graph_model_mcore_dispatch_core_unbind(rte_graph_t id)
+ {
+ 	struct graph *graph;
+ 
+-	GRAPH_ID_CHECK(id);
++	if (graph_from_id(id) == NULL)
++		goto fail;
+ 	STAILQ_FOREACH(graph, &graph_list, next)
+ 		if (graph->id == id)
+ 			break;
+@@ -406,7 +451,7 @@ rte_graph_create(const char *name, struct rte_graph_param *prm)
+ 	graph->socket = prm->socket_id;
+ 	graph->src_node_count = src_node_count;
+ 	graph->node_count = graph_nodes_count(graph);
+-	graph->id = graph_id;
++	graph->id = graph_next_free_id();
+ 	graph->parent_id = RTE_GRAPH_ID_INVALID;
+ 	graph->lcore_id = RTE_MAX_LCORE;
+ 	graph->num_pkt_to_capture = prm->num_pkt_to_capture;
+@@ -422,8 +467,7 @@ rte_graph_create(const char *name, struct rte_graph_param *prm)
+ 		goto graph_mem_destroy;
+ 
+ 	/* All good, Lets add the graph to the list */
+-	graph_id++;
+-	STAILQ_INSERT_TAIL(&graph_list, graph, next);
++	graph_insert_ordered(graph);
+ 
+ 	graph_spinlock_unlock();
+ 	return graph->id;
+@@ -467,7 +511,6 @@ rte_graph_destroy(rte_graph_t id)
+ 			graph_cleanup(graph);
+ 			STAILQ_REMOVE(&graph_list, graph, graph, next);
+ 			free(graph);
+-			graph_id--;
+ 			goto done;
+ 		}
+ 		graph = tmp;
+@@ -520,7 +563,7 @@ graph_clone(struct graph *parent_graph, const char *name, struct rte_graph_param
+ 	graph->parent_id = parent_graph->id;
+ 	graph->lcore_id = parent_graph->lcore_id;
+ 	graph->socket = parent_graph->socket;
+-	graph->id = graph_id;
++	graph->id = graph_next_free_id();
+ 
+ 	/* Allocate the Graph fast path memory and populate the data */
+ 	if (graph_fp_mem_create(graph))
+@@ -539,8 +582,7 @@ graph_clone(struct graph *parent_graph, const char *name, struct rte_graph_param
+ 		goto graph_mem_destroy;
+ 
+ 	/* All good, Lets add the graph to the list */
+-	graph_id++;
+-	STAILQ_INSERT_TAIL(&graph_list, graph, next);
++	graph_insert_ordered(graph);
+ 
+ 	graph_spinlock_unlock();
+ 	return graph->id;
+@@ -561,7 +603,8 @@ rte_graph_clone(rte_graph_t id, const char *name, struct rte_graph_param *prm)
+ {
+ 	struct graph *graph;
+ 
+-	GRAPH_ID_CHECK(id);
++	if (graph_from_id(id) == NULL)
++		goto fail;
+ 	STAILQ_FOREACH(graph, &graph_list, next)
+ 		if (graph->id == id)
+ 			return graph_clone(graph, name, prm);
+@@ -587,7 +630,8 @@ rte_graph_id_to_name(rte_graph_t id)
+ {
+ 	struct graph *graph;
+ 
+-	GRAPH_ID_CHECK(id);
++	if (graph_from_id(id) == NULL)
++		goto fail;
+ 	STAILQ_FOREACH(graph, &graph_list, next)
+ 		if (graph->id == id)
+ 			return graph->name;
+@@ -604,7 +648,8 @@ rte_graph_node_get(rte_graph_t gid, uint32_t nid)
+ 	rte_graph_off_t off;
+ 	rte_node_t count;
+ 
+-	GRAPH_ID_CHECK(gid);
++	if (graph_from_id(gid) == NULL)
++		goto fail;
+ 	STAILQ_FOREACH(graph, &graph_list, next)
+ 		if (graph->id == gid) {
+ 			rte_graph_foreach_node(count, off, graph->graph,
+@@ -729,7 +774,8 @@ graph_scan_dump(FILE *f, rte_graph_t id, bool all)
+ 	struct graph *graph;
+ 
+ 	RTE_VERIFY(f);
+-	GRAPH_ID_CHECK(id);
++	if (graph_from_id(id) == NULL)
++		goto fail;
+ 
+ 	STAILQ_FOREACH(graph, &graph_list, next) {
+ 		if (all == true) {
+@@ -758,7 +804,13 @@ rte_graph_list_dump(FILE *f)
+ rte_graph_t
+ rte_graph_max_count(void)
+ {
+-	return graph_id;
++	struct graph *graph;
++	rte_graph_t count = 0;
++
++	STAILQ_FOREACH(graph, &graph_list, next)
++		count++;
++
++	return count;
+ }
+ 
+ RTE_LOG_REGISTER_DEFAULT(rte_graph_logtype, INFO);
+diff --git a/dpdk/lib/graph/graph_stats.c b/dpdk/lib/graph/graph_stats.c
+index cc32245c05..e99e8cf68a 100644
+--- a/dpdk/lib/graph/graph_stats.c
++++ b/dpdk/lib/graph/graph_stats.c
+@@ -34,6 +34,7 @@ struct rte_graph_cluster_stats {
+ 	uint32_t cluster_node_size; /* Size of struct cluster_node */
+ 	rte_node_t max_nodes;
+ 	int socket_id;
++	bool dispatch;
+ 	void *cookie;
+ 	size_t sz;
+ 
+@@ -74,17 +75,16 @@ print_banner_dispatch(FILE *f)
+ }
+ 
+ static inline void
+-print_banner(FILE *f)
++print_banner(FILE *f, bool dispatch)
+ {
+-	if (rte_graph_worker_model_get(STAILQ_FIRST(graph_list_head_get())->graph) ==
+-	    RTE_GRAPH_MODEL_MCORE_DISPATCH)
++	if (dispatch)
+ 		print_banner_dispatch(f);
+ 	else
+ 		print_banner_default(f);
+ }
+ 
+ static inline void
+-print_node(FILE *f, const struct rte_graph_cluster_node_stats *stat)
++print_node(FILE *f, const struct rte_graph_cluster_node_stats *stat, bool dispatch)
+ {
+ 	double objs_per_call, objs_per_sec, cycles_per_call, ts_per_hz;
+ 	const uint64_t prev_calls = stat->prev_calls;
+@@ -104,8 +104,7 @@ print_node(FILE *f, const struct rte_graph_cluster_node_stats *stat)
+ 	objs_per_sec = ts_per_hz ? (objs - prev_objs) / ts_per_hz : 0;
+ 	objs_per_sec /= 1000000;
+ 
+-	if (rte_graph_worker_model_get(STAILQ_FIRST(graph_list_head_get())->graph) ==
+-	    RTE_GRAPH_MODEL_MCORE_DISPATCH) {
++	if (dispatch) {
+ 		fprintf(f,
+ 			"|%-31s|%-15" PRIu64 "|%-15" PRIu64 "|%-15" PRIu64
+ 			"|%-15" PRIu64 "|%-15" PRIu64
+@@ -123,20 +122,17 @@ print_node(FILE *f, const struct rte_graph_cluster_node_stats *stat)
+ }
+ 
+ static int
+-graph_cluster_stats_cb(bool is_first, bool is_last, void *cookie,
++graph_cluster_stats_cb(bool dispatch, bool is_first, bool is_last, void *cookie,
+ 		       const struct rte_graph_cluster_node_stats *stat)
+ {
+ 	FILE *f = cookie;
+-	int model;
+-
+-	model = rte_graph_worker_model_get(STAILQ_FIRST(graph_list_head_get())->graph);
+ 
+ 	if (unlikely(is_first))
+-		print_banner(f);
++		print_banner(f, dispatch);
+ 	if (stat->objs)
+-		print_node(f, stat);
++		print_node(f, stat, dispatch);
+ 	if (unlikely(is_last)) {
+-		if (model == RTE_GRAPH_MODEL_MCORE_DISPATCH)
++		if (dispatch)
+ 			boarder_model_dispatch();
+ 		else
+ 			boarder();
+@@ -145,6 +141,20 @@ graph_cluster_stats_cb(bool is_first, bool is_last, void *cookie,
+ 	return 0;
+ };
+ 
++static int
++graph_cluster_stats_cb_rtc(bool is_first, bool is_last, void *cookie,
++			   const struct rte_graph_cluster_node_stats *stat)
++{
++	return graph_cluster_stats_cb(false, is_first, is_last, cookie, stat);
++};
++
++static int
++graph_cluster_stats_cb_dispatch(bool is_first, bool is_last, void *cookie,
++				const struct rte_graph_cluster_node_stats *stat)
++{
++	return graph_cluster_stats_cb(true, is_first, is_last, cookie, stat);
++};
++
+ static struct rte_graph_cluster_stats *
+ stats_mem_init(struct cluster *cluster,
+ 	       const struct rte_graph_cluster_stats_param *prm)
+@@ -157,8 +167,13 @@ stats_mem_init(struct cluster *cluster,
+ 
+ 	/* Fix up callback */
+ 	fn = prm->fn;
+-	if (fn == NULL)
+-		fn = graph_cluster_stats_cb;
++	if (fn == NULL) {
++		const struct rte_graph *graph = cluster->graphs[0]->graph;
++		if (graph->model == RTE_GRAPH_MODEL_MCORE_DISPATCH)
++			fn = graph_cluster_stats_cb_dispatch;
++		else
++			fn = graph_cluster_stats_cb_rtc;
++	}
+ 
+ 	cluster_node_size = sizeof(struct cluster_node);
+ 	/* For a given cluster, max nodes will be the max number of graphs */
+@@ -350,6 +365,8 @@ rte_graph_cluster_stats_create(const struct rte_graph_cluster_stats_param *prm)
+ 			if (stats_mem_populate(&stats, graph_fp, graph_node))
+ 				goto realloc_fail;
+ 		}
++		if (graph->graph->model == RTE_GRAPH_MODEL_MCORE_DISPATCH)
++			stats->dispatch = true;
+ 	}
+ 
+ 	/* Finally copy to hugepage memory to avoid pressure on rte_realloc */
+@@ -375,20 +392,18 @@ rte_graph_cluster_stats_destroy(struct rte_graph_cluster_stats *stat)
+ }
+ 
+ static inline void
+-cluster_node_arregate_stats(struct cluster_node *cluster)
++cluster_node_arregate_stats(struct cluster_node *cluster, bool dispatch)
+ {
+ 	uint64_t calls = 0, cycles = 0, objs = 0, realloc_count = 0;
+ 	struct rte_graph_cluster_node_stats *stat = &cluster->stat;
+ 	uint64_t sched_objs = 0, sched_fail = 0;
+ 	struct rte_node *node;
+ 	rte_node_t count;
+-	int model;
+ 
+-	model = rte_graph_worker_model_get(STAILQ_FIRST(graph_list_head_get())->graph);
+ 	for (count = 0; count < cluster->nb_nodes; count++) {
+ 		node = cluster->nodes[count];
+ 
+-		if (model == RTE_GRAPH_MODEL_MCORE_DISPATCH) {
++		if (dispatch) {
+ 			sched_objs += node->dispatch.total_sched_objs;
+ 			sched_fail += node->dispatch.total_sched_fail;
+ 		}
+@@ -403,7 +418,7 @@ cluster_node_arregate_stats(struct cluster_node *cluster)
+ 	stat->objs = objs;
+ 	stat->cycles = cycles;
+ 
+-	if (model == RTE_GRAPH_MODEL_MCORE_DISPATCH) {
++	if (dispatch) {
+ 		stat->dispatch.sched_objs = sched_objs;
+ 		stat->dispatch.sched_fail = sched_fail;
+ 	}
+@@ -433,7 +448,7 @@ rte_graph_cluster_stats_get(struct rte_graph_cluster_stats *stat, bool skip_cb)
+ 	cluster = stat->clusters;
+ 
+ 	for (count = 0; count < stat->max_nodes; count++) {
+-		cluster_node_arregate_stats(cluster);
++		cluster_node_arregate_stats(cluster, stat->dispatch);
+ 		if (!skip_cb)
+ 			rc = stat->fn(!count, (count == stat->max_nodes - 1),
+ 				      stat->cookie, &cluster->stat);
+diff --git a/dpdk/lib/graph/rte_graph_model_mcore_dispatch.h b/dpdk/lib/graph/rte_graph_model_mcore_dispatch.h
+index 75ec388cad..732b89297f 100644
+--- a/dpdk/lib/graph/rte_graph_model_mcore_dispatch.h
++++ b/dpdk/lib/graph/rte_graph_model_mcore_dispatch.h
+@@ -100,7 +100,7 @@ rte_graph_walk_mcore_dispatch(struct rte_graph *graph)
+ 		node = (struct rte_node *)RTE_PTR_ADD(graph, cir_start[(int32_t)head++]);
+ 
+ 		/* skip the src nodes which not bind with current worker */
+-		if ((int32_t)head < 0 && node->dispatch.lcore_id != graph->dispatch.lcore_id)
++		if ((int32_t)head < 1 && node->dispatch.lcore_id != graph->dispatch.lcore_id)
+ 			continue;
+ 
+ 		/* Schedule the node until all task/objs are done */
+diff --git a/dpdk/lib/gro/gro_tcp.h b/dpdk/lib/gro/gro_tcp.h
+index d926c4b8cc..2c68b5f23e 100644
+--- a/dpdk/lib/gro/gro_tcp.h
++++ b/dpdk/lib/gro/gro_tcp.h
+@@ -19,6 +19,8 @@
+ #define INVALID_TCP_HDRLEN(len) \
+ 	(((len) < sizeof(struct rte_tcp_hdr)) || ((len) > MAX_TCP_HLEN))
+ 
++#define VALID_GRO_TCP_FLAGS (RTE_TCP_ACK_FLAG | RTE_TCP_PSH_FLAG | RTE_TCP_FIN_FLAG)
++
+ struct cmn_tcp_key {
+ 	struct rte_ether_addr eth_saddr;
+ 	struct rte_ether_addr eth_daddr;
+@@ -81,11 +83,13 @@ merge_two_tcp_packets(struct gro_tcp_item *item,
+ 		struct rte_mbuf *pkt,
+ 		int cmp,
+ 		uint32_t sent_seq,
++		uint8_t tcp_flags,
+ 		uint16_t ip_id,
+ 		uint16_t l2_offset)
+ {
+ 	struct rte_mbuf *pkt_head, *pkt_tail, *lastseg;
+ 	uint16_t hdr_len, l2_len;
++	struct rte_tcp_hdr *tcp_hdr;
+ 
+ 	if (cmp > 0) {
+ 		pkt_head = item->firstseg;
+@@ -128,6 +132,11 @@ merge_two_tcp_packets(struct gro_tcp_item *item,
+ 	/* update MBUF metadata for the merged packet */
+ 	pkt_head->nb_segs += pkt_tail->nb_segs;
+ 	pkt_head->pkt_len += pkt_tail->pkt_len;
++	if (tcp_flags != RTE_TCP_ACK_FLAG) {
++		tcp_hdr = rte_pktmbuf_mtod_offset(pkt, struct rte_tcp_hdr *,
++						l2_offset + pkt_head->l2_len + pkt_head->l3_len);
++		tcp_hdr->tcp_flags |= tcp_flags;
++	}
+ 
+ 	return 1;
+ }
+diff --git a/dpdk/lib/gro/gro_tcp4.c b/dpdk/lib/gro/gro_tcp4.c
+index 6645de592b..c8b8d7990c 100644
+--- a/dpdk/lib/gro/gro_tcp4.c
++++ b/dpdk/lib/gro/gro_tcp4.c
+@@ -126,6 +126,7 @@ gro_tcp4_reassemble(struct rte_mbuf *pkt,
+ 	uint32_t item_idx;
+ 	uint32_t i, max_flow_num, remaining_flow_num;
+ 	uint8_t find;
++	uint32_t item_start_idx;
+ 
+ 	/*
+ 	 * Don't process the packet whose TCP header length is greater
+@@ -139,11 +140,8 @@ gro_tcp4_reassemble(struct rte_mbuf *pkt,
+ 	tcp_hdr = (struct rte_tcp_hdr *)((char *)ipv4_hdr + pkt->l3_len);
+ 	hdr_len = pkt->l2_len + pkt->l3_len + pkt->l4_len;
+ 
+-	/*
+-	 * Don't process the packet which has FIN, SYN, RST, PSH, URG, ECE
+-	 * or CWR set.
+-	 */
+-	if (tcp_hdr->tcp_flags != RTE_TCP_ACK_FLAG)
++	/* Return early if the TCP flags are not handled in GRO layer */
++	if (tcp_hdr->tcp_flags & ~VALID_GRO_TCP_FLAGS)
+ 		return -1;
+ 
+ 	/* trim the tail padding bytes */
+@@ -183,13 +181,35 @@ gro_tcp4_reassemble(struct rte_mbuf *pkt,
+ 		if (tbl->flows[i].start_index != INVALID_ARRAY_INDEX) {
+ 			if (is_same_tcp4_flow(tbl->flows[i].key, key)) {
+ 				find = 1;
++				item_start_idx = tbl->flows[i].start_index;
+ 				break;
+ 			}
+ 			remaining_flow_num--;
+ 		}
+ 	}
+ 
+-	if (find == 0) {
++	if (find == 1) {
++		/*
++		 * Any packet with additional flags like PSH,FIN should be processed
++		 * and flushed immediately.
++		 * Hence marking the start time to 0, so that the packets will be flushed
++		 * immediately in timer mode.
++		 */
++		if (tcp_hdr->tcp_flags & (RTE_TCP_ACK_FLAG | RTE_TCP_PSH_FLAG | RTE_TCP_FIN_FLAG)) {
++			if (tcp_hdr->tcp_flags != RTE_TCP_ACK_FLAG)
++				tbl->items[item_start_idx].start_time = 0;
++			return process_tcp_item(pkt, tcp_hdr, tcp_dl, tbl->items,
++						tbl->flows[i].start_index, &tbl->item_num,
++						tbl->max_item_num, ip_id, is_atomic, start_time);
++		} else {
++			return -1;
++		}
++	}
++	/*
++	 * Add new flow to the table only if contains ACK flag with data.
++	 * Do not add any packets with additional tcp flags to the GRO table
++	 */
++	if (tcp_hdr->tcp_flags == RTE_TCP_ACK_FLAG) {
+ 		sent_seq = rte_be_to_cpu_32(tcp_hdr->sent_seq);
+ 		item_idx = insert_new_tcp_item(pkt, tbl->items, &tbl->item_num,
+ 						tbl->max_item_num, start_time,
+@@ -209,9 +229,7 @@ gro_tcp4_reassemble(struct rte_mbuf *pkt,
+ 		return 0;
+ 	}
+ 
+-	return process_tcp_item(pkt, tcp_hdr, tcp_dl, tbl->items, tbl->flows[i].start_index,
+-						&tbl->item_num, tbl->max_item_num,
+-						ip_id, is_atomic, start_time);
++	return -1;
+ }
+ 
+ /*
+diff --git a/dpdk/lib/gro/gro_tcp_internal.h b/dpdk/lib/gro/gro_tcp_internal.h
+index cc84abeaeb..e4855da1ad 100644
+--- a/dpdk/lib/gro/gro_tcp_internal.h
++++ b/dpdk/lib/gro/gro_tcp_internal.h
+@@ -101,7 +101,7 @@ process_tcp_item(struct rte_mbuf *pkt,
+ 				is_atomic);
+ 		if (cmp) {
+ 			if (merge_two_tcp_packets(&items[cur_idx],
+-						pkt, cmp, sent_seq, ip_id, 0))
++						pkt, cmp, sent_seq, tcp_hdr->tcp_flags, ip_id, 0))
+ 				return 1;
+ 			/*
+ 			 * Fail to merge the two packets, as the packet
+diff --git a/dpdk/lib/gro/gro_vxlan_tcp4.c b/dpdk/lib/gro/gro_vxlan_tcp4.c
+index 6ab7001922..8dd62a949c 100644
+--- a/dpdk/lib/gro/gro_vxlan_tcp4.c
++++ b/dpdk/lib/gro/gro_vxlan_tcp4.c
+@@ -239,10 +239,11 @@ merge_two_vxlan_tcp4_packets(struct gro_vxlan_tcp4_item *item,
+ 		struct rte_mbuf *pkt,
+ 		int cmp,
+ 		uint32_t sent_seq,
++		uint8_t tcp_flags,
+ 		uint16_t outer_ip_id,
+ 		uint16_t ip_id)
+ {
+-	if (merge_two_tcp_packets(&item->inner_item, pkt, cmp, sent_seq,
++	if (merge_two_tcp_packets(&item->inner_item, pkt, cmp, sent_seq, tcp_flags,
+ 				ip_id, pkt->outer_l2_len +
+ 				pkt->outer_l3_len)) {
+ 		/* Update the outer IPv4 ID to the large value. */
+@@ -413,7 +414,7 @@ gro_vxlan_tcp4_reassemble(struct rte_mbuf *pkt,
+ 				tcp_dl, outer_is_atomic, is_atomic);
+ 		if (cmp) {
+ 			if (merge_two_vxlan_tcp4_packets(&(tbl->items[cur_idx]),
+-						pkt, cmp, sent_seq,
++						pkt, cmp, sent_seq, tcp_hdr->tcp_flags,
+ 						outer_ip_id, ip_id))
+ 				return 1;
+ 			/*
+diff --git a/dpdk/lib/hash/rte_cuckoo_hash.c b/dpdk/lib/hash/rte_cuckoo_hash.c
+index ccdc3b9894..258b6b7f2b 100644
+--- a/dpdk/lib/hash/rte_cuckoo_hash.c
++++ b/dpdk/lib/hash/rte_cuckoo_hash.c
+@@ -166,6 +166,7 @@ rte_hash_create(const struct rte_hash_parameters *params)
+ 	/* Check for valid parameters */
+ 	if ((params->entries > RTE_HASH_ENTRIES_MAX) ||
+ 			(params->entries < RTE_HASH_BUCKET_ENTRIES) ||
++			(params->name == NULL) ||
+ 			(params->key_len == 0)) {
+ 		rte_errno = EINVAL;
+ 		RTE_LOG(ERR, HASH, "rte_hash_create has invalid parameters\n");
+@@ -1550,6 +1551,7 @@ rte_hash_rcu_qsbr_add(struct rte_hash *h, struct rte_hash_rcu_config *cfg)
+ 		if (params.size == 0)
+ 			params.size = total_entries;
+ 		params.trigger_reclaim_limit = cfg->trigger_reclaim_limit;
++		params.max_reclaim_size = cfg->max_reclaim_size;
+ 		if (params.max_reclaim_size == 0)
+ 			params.max_reclaim_size = RTE_HASH_RCU_DQ_RECLAIM_MAX;
+ 		params.esize = sizeof(struct __rte_hash_rcu_dq_entry);
+diff --git a/dpdk/lib/hash/rte_cuckoo_hash.h b/dpdk/lib/hash/rte_cuckoo_hash.h
+index f7afc4dd79..8ea793c66e 100644
+--- a/dpdk/lib/hash/rte_cuckoo_hash.h
++++ b/dpdk/lib/hash/rte_cuckoo_hash.h
+@@ -29,17 +29,6 @@
+ #define RETURN_IF_TRUE(cond, retval)
+ #endif
+ 
+-#if defined(RTE_LIBRTE_HASH_DEBUG)
+-#define ERR_IF_TRUE(cond, fmt, args...) do { \
+-	if (cond) { \
+-		RTE_LOG(ERR, HASH, fmt, ##args); \
+-		return; \
+-	} \
+-} while (0)
+-#else
+-#define ERR_IF_TRUE(cond, fmt, args...)
+-#endif
+-
+ #include <rte_hash_crc.h>
+ #include <rte_jhash.h>
+ 
+diff --git a/dpdk/lib/hash/rte_hash.h b/dpdk/lib/hash/rte_hash.h
+index 7ecc021111..ba96521529 100644
+--- a/dpdk/lib/hash/rte_hash.h
++++ b/dpdk/lib/hash/rte_hash.h
+@@ -286,7 +286,7 @@ rte_hash_add_key_with_hash_data(const struct rte_hash *h, const void *key,
+  * @return
+  *   - -EINVAL if the parameters are invalid.
+  *   - -ENOSPC if there is no space in the hash for this key.
+- *   - A positive value that can be used by the caller as an offset into an
++ *   - A non-negative value that can be used by the caller as an offset into an
+  *     array of user data. This value is unique for this key. This
+  *     unique key id may be larger than the user specified entry count
+  *     when RTE_HASH_EXTRA_FLAGS_MULTI_WRITER_ADD flag is set.
+@@ -310,7 +310,7 @@ rte_hash_add_key(const struct rte_hash *h, const void *key);
+  * @return
+  *   - -EINVAL if the parameters are invalid.
+  *   - -ENOSPC if there is no space in the hash for this key.
+- *   - A positive value that can be used by the caller as an offset into an
++ *   - A non-negative value that can be used by the caller as an offset into an
+  *     array of user data. This value is unique for this key. This
+  *     unique key ID may be larger than the user specified entry count
+  *     when RTE_HASH_EXTRA_FLAGS_MULTI_WRITER_ADD flag is set.
+@@ -341,7 +341,7 @@ rte_hash_add_key_with_hash(const struct rte_hash *h, const void *key, hash_sig_t
+  * @return
+  *   - -EINVAL if the parameters are invalid.
+  *   - -ENOENT if the key is not found.
+- *   - A positive value that can be used by the caller as an offset into an
++ *   - A non-negative value that can be used by the caller as an offset into an
+  *     array of user data. This value is unique for this key, and is the same
+  *     value that was returned when the key was added.
+  */
+@@ -373,7 +373,7 @@ rte_hash_del_key(const struct rte_hash *h, const void *key);
+  * @return
+  *   - -EINVAL if the parameters are invalid.
+  *   - -ENOENT if the key is not found.
+- *   - A positive value that can be used by the caller as an offset into an
++ *   - A non-negative value that can be used by the caller as an offset into an
+  *     array of user data. This value is unique for this key, and is the same
+  *     value that was returned when the key was added.
+  */
+@@ -440,7 +440,7 @@ rte_hash_free_key_with_position(const struct rte_hash *h,
+  * @param data
+  *   Output with pointer to data returned from the hash table.
+  * @return
+- *   - A positive value that can be used by the caller as an offset into an
++ *   - A non-negative value that can be used by the caller as an offset into an
+  *     array of user data. This value is unique for this key, and is the same
+  *     value that was returned when the key was added.
+  *   - -EINVAL if the parameters are invalid.
+@@ -465,7 +465,7 @@ rte_hash_lookup_data(const struct rte_hash *h, const void *key, void **data);
+  * @param data
+  *   Output with pointer to data returned from the hash table.
+  * @return
+- *   - A positive value that can be used by the caller as an offset into an
++ *   - A non-negative value that can be used by the caller as an offset into an
+  *     array of user data. This value is unique for this key, and is the same
+  *     value that was returned when the key was added.
+  *   - -EINVAL if the parameters are invalid.
+@@ -488,7 +488,7 @@ rte_hash_lookup_with_hash_data(const struct rte_hash *h, const void *key,
+  * @return
+  *   - -EINVAL if the parameters are invalid.
+  *   - -ENOENT if the key is not found.
+- *   - A positive value that can be used by the caller as an offset into an
++ *   - A non-negative value that can be used by the caller as an offset into an
+  *     array of user data. This value is unique for this key, and is the same
+  *     value that was returned when the key was added.
+  */
+@@ -510,7 +510,7 @@ rte_hash_lookup(const struct rte_hash *h, const void *key);
+  * @return
+  *   - -EINVAL if the parameters are invalid.
+  *   - -ENOENT if the key is not found.
+- *   - A positive value that can be used by the caller as an offset into an
++ *   - A non-negative value that can be used by the caller as an offset into an
+  *     array of user data. This value is unique for this key, and is the same
+  *     value that was returned when the key was added.
+  */
+diff --git a/dpdk/lib/latencystats/rte_latencystats.c b/dpdk/lib/latencystats/rte_latencystats.c
+index 8985a377db..e47eac2cf8 100644
+--- a/dpdk/lib/latencystats/rte_latencystats.c
++++ b/dpdk/lib/latencystats/rte_latencystats.c
+@@ -164,7 +164,7 @@ calc_latency(uint16_t pid __rte_unused,
+ 	 * a constant smoothing factor between 0 and 1. The value
+ 	 * is used below for measuring average latency.
+ 	 */
+-	const float alpha = 0.2;
++	const float alpha = 0.2f;
+ 
+ 	now = rte_rdtsc();
+ 	for (i = 0; i < nb_pkts; i++) {
+diff --git a/dpdk/lib/lpm/rte_lpm6.c b/dpdk/lib/lpm/rte_lpm6.c
+index 8d21aeddb8..79c75d8dfc 100644
+--- a/dpdk/lib/lpm/rte_lpm6.c
++++ b/dpdk/lib/lpm/rte_lpm6.c
+@@ -279,7 +279,7 @@ rte_lpm6_create(const char *name, int socket_id,
+ 
+ 	rules_tbl = rte_hash_create(&rule_hash_tbl_params);
+ 	if (rules_tbl == NULL) {
+-		RTE_LOG(ERR, LPM, "LPM rules hash table allocation failed: %s (%d)",
++		RTE_LOG(ERR, LPM, "LPM rules hash table allocation failed: %s (%d)\n",
+ 				  rte_strerror(rte_errno), rte_errno);
+ 		goto fail_wo_unlock;
+ 	}
+@@ -289,7 +289,7 @@ rte_lpm6_create(const char *name, int socket_id,
+ 			sizeof(uint32_t) * config->number_tbl8s,
+ 			RTE_CACHE_LINE_SIZE);
+ 	if (tbl8_pool == NULL) {
+-		RTE_LOG(ERR, LPM, "LPM tbl8 pool allocation failed: %s (%d)",
++		RTE_LOG(ERR, LPM, "LPM tbl8 pool allocation failed: %s (%d)\n",
+ 				  rte_strerror(rte_errno), rte_errno);
+ 		rte_errno = ENOMEM;
+ 		goto fail_wo_unlock;
+@@ -300,7 +300,7 @@ rte_lpm6_create(const char *name, int socket_id,
+ 			sizeof(struct rte_lpm_tbl8_hdr) * config->number_tbl8s,
+ 			RTE_CACHE_LINE_SIZE);
+ 	if (tbl8_hdrs == NULL) {
+-		RTE_LOG(ERR, LPM, "LPM tbl8 headers allocation failed: %s (%d)",
++		RTE_LOG(ERR, LPM, "LPM tbl8 headers allocation failed: %s (%d)\n",
+ 				  rte_strerror(rte_errno), rte_errno);
+ 		rte_errno = ENOMEM;
+ 		goto fail_wo_unlock;
+diff --git a/dpdk/lib/mbuf/rte_mbuf.h b/dpdk/lib/mbuf/rte_mbuf.h
+index 286b32b788..c266727a13 100644
+--- a/dpdk/lib/mbuf/rte_mbuf.h
++++ b/dpdk/lib/mbuf/rte_mbuf.h
+@@ -1119,6 +1119,9 @@ rte_pktmbuf_attach_extbuf(struct rte_mbuf *m, void *buf_addr,
+ static inline void
+ rte_mbuf_dynfield_copy(struct rte_mbuf *mdst, const struct rte_mbuf *msrc)
+ {
++#if !RTE_IOVA_IN_MBUF
++	mdst->dynfield2 = msrc->dynfield2;
++#endif
+ 	memcpy(&mdst->dynfield1, msrc->dynfield1, sizeof(mdst->dynfield1));
+ }
+ 
+diff --git a/dpdk/lib/mempool/rte_mempool_ops.c b/dpdk/lib/mempool/rte_mempool_ops.c
+index ae1d288f27..e871de9ec9 100644
+--- a/dpdk/lib/mempool/rte_mempool_ops.c
++++ b/dpdk/lib/mempool/rte_mempool_ops.c
+@@ -46,7 +46,7 @@ rte_mempool_register_ops(const struct rte_mempool_ops *h)
+ 
+ 	if (strlen(h->name) >= sizeof(ops->name) - 1) {
+ 		rte_spinlock_unlock(&rte_mempool_ops_table.sl);
+-		RTE_LOG(DEBUG, EAL, "%s(): mempool_ops <%s>: name too long\n",
++		RTE_LOG(DEBUG, MEMPOOL, "%s(): mempool_ops <%s>: name too long\n",
+ 				__func__, h->name);
+ 		rte_errno = EEXIST;
+ 		return -EEXIST;
+diff --git a/dpdk/lib/meson.build b/dpdk/lib/meson.build
+index 6c143ce5a6..55d483cd26 100644
+--- a/dpdk/lib/meson.build
++++ b/dpdk/lib/meson.build
+@@ -228,7 +228,7 @@ foreach l:libraries
+ 
+     if not build
+         dpdk_libs_disabled += name
+-        set_variable(name.underscorify() + '_disable_reason', reason)
++        set_variable('lib_' + name.underscorify() + '_disable_reason', reason)
+         continue
+     endif
+ 
+diff --git a/dpdk/lib/metrics/rte_metrics_telemetry.c b/dpdk/lib/metrics/rte_metrics_telemetry.c
+index 5be21b2e86..1d133e1f8c 100644
+--- a/dpdk/lib/metrics/rte_metrics_telemetry.c
++++ b/dpdk/lib/metrics/rte_metrics_telemetry.c
+@@ -363,7 +363,7 @@ rte_metrics_tel_stat_names_to_ids(const char * const *stat_names,
+ 			}
+ 		}
+ 		if (j == num_metrics) {
+-			METRICS_LOG_WARN("Invalid stat name %s\n",
++			METRICS_LOG_WARN("Invalid stat name %s",
+ 					stat_names[i]);
+ 			free(names);
+ 			return -EINVAL;
+diff --git a/dpdk/lib/mldev/rte_mldev.c b/dpdk/lib/mldev/rte_mldev.c
+index cc5f2e0cc6..196b1850e6 100644
+--- a/dpdk/lib/mldev/rte_mldev.c
++++ b/dpdk/lib/mldev/rte_mldev.c
+@@ -159,7 +159,7 @@ int
+ rte_ml_dev_init(size_t dev_max)
+ {
+ 	if (dev_max == 0 || dev_max > INT16_MAX) {
+-		RTE_MLDEV_LOG(ERR, "Invalid dev_max = %zu (> %d)\n", dev_max, INT16_MAX);
++		RTE_MLDEV_LOG(ERR, "Invalid dev_max = %zu (> %d)", dev_max, INT16_MAX);
+ 		rte_errno = EINVAL;
+ 		return -rte_errno;
+ 	}
+@@ -217,7 +217,7 @@ rte_ml_dev_socket_id(int16_t dev_id)
+ 	struct rte_ml_dev *dev;
+ 
+ 	if (!rte_ml_dev_is_valid_dev(dev_id)) {
+-		RTE_MLDEV_LOG(ERR, "Invalid dev_id = %d\n", dev_id);
++		RTE_MLDEV_LOG(ERR, "Invalid dev_id = %d", dev_id);
+ 		return -EINVAL;
+ 	}
+ 
+@@ -232,7 +232,7 @@ rte_ml_dev_info_get(int16_t dev_id, struct rte_ml_dev_info *dev_info)
+ 	struct rte_ml_dev *dev;
+ 
+ 	if (!rte_ml_dev_is_valid_dev(dev_id)) {
+-		RTE_MLDEV_LOG(ERR, "Invalid dev_id = %d\n", dev_id);
++		RTE_MLDEV_LOG(ERR, "Invalid dev_id = %d", dev_id);
+ 		return -EINVAL;
+ 	}
+ 
+@@ -241,7 +241,7 @@ rte_ml_dev_info_get(int16_t dev_id, struct rte_ml_dev_info *dev_info)
+ 		return -ENOTSUP;
+ 
+ 	if (dev_info == NULL) {
+-		RTE_MLDEV_LOG(ERR, "Dev %d, dev_info cannot be NULL\n", dev_id);
++		RTE_MLDEV_LOG(ERR, "Dev %d, dev_info cannot be NULL", dev_id);
+ 		return -EINVAL;
+ 	}
+ 	memset(dev_info, 0, sizeof(struct rte_ml_dev_info));
+@@ -257,7 +257,7 @@ rte_ml_dev_configure(int16_t dev_id, const struct rte_ml_dev_config *config)
+ 	int ret;
+ 
+ 	if (!rte_ml_dev_is_valid_dev(dev_id)) {
+-		RTE_MLDEV_LOG(ERR, "Invalid dev_id = %d\n", dev_id);
++		RTE_MLDEV_LOG(ERR, "Invalid dev_id = %d", dev_id);
+ 		return -EINVAL;
+ 	}
+ 
+@@ -271,7 +271,7 @@ rte_ml_dev_configure(int16_t dev_id, const struct rte_ml_dev_config *config)
+ 	}
+ 
+ 	if (config == NULL) {
+-		RTE_MLDEV_LOG(ERR, "Dev %d, config cannot be NULL\n", dev_id);
++		RTE_MLDEV_LOG(ERR, "Dev %d, config cannot be NULL", dev_id);
+ 		return -EINVAL;
+ 	}
+ 
+@@ -280,7 +280,7 @@ rte_ml_dev_configure(int16_t dev_id, const struct rte_ml_dev_config *config)
+ 		return ret;
+ 
+ 	if (config->nb_queue_pairs > dev_info.max_queue_pairs) {
+-		RTE_MLDEV_LOG(ERR, "Device %d num of queues %u > %u\n", dev_id,
++		RTE_MLDEV_LOG(ERR, "Device %d num of queues %u > %u", dev_id,
+ 			      config->nb_queue_pairs, dev_info.max_queue_pairs);
+ 		return -EINVAL;
+ 	}
+@@ -294,7 +294,7 @@ rte_ml_dev_close(int16_t dev_id)
+ 	struct rte_ml_dev *dev;
+ 
+ 	if (!rte_ml_dev_is_valid_dev(dev_id)) {
+-		RTE_MLDEV_LOG(ERR, "Invalid dev_id = %d\n", dev_id);
++		RTE_MLDEV_LOG(ERR, "Invalid dev_id = %d", dev_id);
+ 		return -EINVAL;
+ 	}
+ 
+@@ -318,7 +318,7 @@ rte_ml_dev_start(int16_t dev_id)
+ 	int ret;
+ 
+ 	if (!rte_ml_dev_is_valid_dev(dev_id)) {
+-		RTE_MLDEV_LOG(ERR, "Invalid dev_id = %d\n", dev_id);
++		RTE_MLDEV_LOG(ERR, "Invalid dev_id = %d", dev_id);
+ 		return -EINVAL;
+ 	}
+ 
+@@ -345,7 +345,7 @@ rte_ml_dev_stop(int16_t dev_id)
+ 	int ret;
+ 
+ 	if (!rte_ml_dev_is_valid_dev(dev_id)) {
+-		RTE_MLDEV_LOG(ERR, "Invalid dev_id = %d\n", dev_id);
++		RTE_MLDEV_LOG(ERR, "Invalid dev_id = %d", dev_id);
+ 		return -EINVAL;
+ 	}
+ 
+@@ -372,7 +372,7 @@ rte_ml_dev_queue_pair_setup(int16_t dev_id, uint16_t queue_pair_id,
+ 	struct rte_ml_dev *dev;
+ 
+ 	if (!rte_ml_dev_is_valid_dev(dev_id)) {
+-		RTE_MLDEV_LOG(ERR, "Invalid dev_id = %d\n", dev_id);
++		RTE_MLDEV_LOG(ERR, "Invalid dev_id = %d", dev_id);
+ 		return -EINVAL;
+ 	}
+ 
+@@ -386,7 +386,7 @@ rte_ml_dev_queue_pair_setup(int16_t dev_id, uint16_t queue_pair_id,
+ 	}
+ 
+ 	if (qp_conf == NULL) {
+-		RTE_MLDEV_LOG(ERR, "Dev %d, qp_conf cannot be NULL\n", dev_id);
++		RTE_MLDEV_LOG(ERR, "Dev %d, qp_conf cannot be NULL", dev_id);
+ 		return -EINVAL;
+ 	}
+ 
+@@ -404,7 +404,7 @@ rte_ml_dev_stats_get(int16_t dev_id, struct rte_ml_dev_stats *stats)
+ 	struct rte_ml_dev *dev;
+ 
+ 	if (!rte_ml_dev_is_valid_dev(dev_id)) {
+-		RTE_MLDEV_LOG(ERR, "Invalid dev_id = %d\n", dev_id);
++		RTE_MLDEV_LOG(ERR, "Invalid dev_id = %d", dev_id);
+ 		return -EINVAL;
+ 	}
+ 
+@@ -413,7 +413,7 @@ rte_ml_dev_stats_get(int16_t dev_id, struct rte_ml_dev_stats *stats)
+ 		return -ENOTSUP;
+ 
+ 	if (stats == NULL) {
+-		RTE_MLDEV_LOG(ERR, "Dev %d, stats cannot be NULL\n", dev_id);
++		RTE_MLDEV_LOG(ERR, "Dev %d, stats cannot be NULL", dev_id);
+ 		return -EINVAL;
+ 	}
+ 	memset(stats, 0, sizeof(struct rte_ml_dev_stats));
+@@ -427,7 +427,7 @@ rte_ml_dev_stats_reset(int16_t dev_id)
+ 	struct rte_ml_dev *dev;
+ 
+ 	if (!rte_ml_dev_is_valid_dev(dev_id)) {
+-		RTE_MLDEV_LOG(ERR, "Invalid dev_id = %d\n", dev_id);
++		RTE_MLDEV_LOG(ERR, "Invalid dev_id = %d", dev_id);
+ 		return;
+ 	}
+ 
+@@ -445,7 +445,7 @@ rte_ml_dev_xstats_names_get(int16_t dev_id, enum rte_ml_dev_xstats_mode mode, in
+ 	struct rte_ml_dev *dev;
+ 
+ 	if (!rte_ml_dev_is_valid_dev(dev_id)) {
+-		RTE_MLDEV_LOG(ERR, "Invalid dev_id = %d\n", dev_id);
++		RTE_MLDEV_LOG(ERR, "Invalid dev_id = %d", dev_id);
+ 		return -EINVAL;
+ 	}
+ 
+@@ -462,7 +462,7 @@ rte_ml_dev_xstats_by_name_get(int16_t dev_id, const char *name, uint16_t *stat_i
+ 	struct rte_ml_dev *dev;
+ 
+ 	if (!rte_ml_dev_is_valid_dev(dev_id)) {
+-		RTE_MLDEV_LOG(ERR, "Invalid dev_id = %d\n", dev_id);
++		RTE_MLDEV_LOG(ERR, "Invalid dev_id = %d", dev_id);
+ 		return -EINVAL;
+ 	}
+ 
+@@ -471,12 +471,12 @@ rte_ml_dev_xstats_by_name_get(int16_t dev_id, const char *name, uint16_t *stat_i
+ 		return -ENOTSUP;
+ 
+ 	if (name == NULL) {
+-		RTE_MLDEV_LOG(ERR, "Dev %d, name cannot be NULL\n", dev_id);
++		RTE_MLDEV_LOG(ERR, "Dev %d, name cannot be NULL", dev_id);
+ 		return -EINVAL;
+ 	}
+ 
+ 	if (value == NULL) {
+-		RTE_MLDEV_LOG(ERR, "Dev %d, value cannot be NULL\n", dev_id);
++		RTE_MLDEV_LOG(ERR, "Dev %d, value cannot be NULL", dev_id);
+ 		return -EINVAL;
+ 	}
+ 
+@@ -490,7 +490,7 @@ rte_ml_dev_xstats_get(int16_t dev_id, enum rte_ml_dev_xstats_mode mode, int32_t
+ 	struct rte_ml_dev *dev;
+ 
+ 	if (!rte_ml_dev_is_valid_dev(dev_id)) {
+-		RTE_MLDEV_LOG(ERR, "Invalid dev_id = %d\n", dev_id);
++		RTE_MLDEV_LOG(ERR, "Invalid dev_id = %d", dev_id);
+ 		return -EINVAL;
+ 	}
+ 
+@@ -499,12 +499,12 @@ rte_ml_dev_xstats_get(int16_t dev_id, enum rte_ml_dev_xstats_mode mode, int32_t
+ 		return -ENOTSUP;
+ 
+ 	if (stat_ids == NULL) {
+-		RTE_MLDEV_LOG(ERR, "Dev %d, stat_ids cannot be NULL\n", dev_id);
++		RTE_MLDEV_LOG(ERR, "Dev %d, stat_ids cannot be NULL", dev_id);
+ 		return -EINVAL;
+ 	}
+ 
+ 	if (values == NULL) {
+-		RTE_MLDEV_LOG(ERR, "Dev %d, values cannot be NULL\n", dev_id);
++		RTE_MLDEV_LOG(ERR, "Dev %d, values cannot be NULL", dev_id);
+ 		return -EINVAL;
+ 	}
+ 
+@@ -518,7 +518,7 @@ rte_ml_dev_xstats_reset(int16_t dev_id, enum rte_ml_dev_xstats_mode mode, int32_
+ 	struct rte_ml_dev *dev;
+ 
+ 	if (!rte_ml_dev_is_valid_dev(dev_id)) {
+-		RTE_MLDEV_LOG(ERR, "Invalid dev_id = %d\n", dev_id);
++		RTE_MLDEV_LOG(ERR, "Invalid dev_id = %d", dev_id);
+ 		return -EINVAL;
+ 	}
+ 
+@@ -535,7 +535,7 @@ rte_ml_dev_dump(int16_t dev_id, FILE *fd)
+ 	struct rte_ml_dev *dev;
+ 
+ 	if (!rte_ml_dev_is_valid_dev(dev_id)) {
+-		RTE_MLDEV_LOG(ERR, "Invalid dev_id = %d\n", dev_id);
++		RTE_MLDEV_LOG(ERR, "Invalid dev_id = %d", dev_id);
+ 		return -EINVAL;
+ 	}
+ 
+@@ -544,7 +544,7 @@ rte_ml_dev_dump(int16_t dev_id, FILE *fd)
+ 		return -ENOTSUP;
+ 
+ 	if (fd == NULL) {
+-		RTE_MLDEV_LOG(ERR, "Dev %d, file descriptor cannot be NULL\n", dev_id);
++		RTE_MLDEV_LOG(ERR, "Dev %d, file descriptor cannot be NULL", dev_id);
+ 		return -EINVAL;
+ 	}
+ 
+@@ -557,7 +557,7 @@ rte_ml_dev_selftest(int16_t dev_id)
+ 	struct rte_ml_dev *dev;
+ 
+ 	if (!rte_ml_dev_is_valid_dev(dev_id)) {
+-		RTE_MLDEV_LOG(ERR, "Invalid dev_id = %d\n", dev_id);
++		RTE_MLDEV_LOG(ERR, "Invalid dev_id = %d", dev_id);
+ 		return -EINVAL;
+ 	}
+ 
+@@ -574,7 +574,7 @@ rte_ml_model_load(int16_t dev_id, struct rte_ml_model_params *params, uint16_t *
+ 	struct rte_ml_dev *dev;
+ 
+ 	if (!rte_ml_dev_is_valid_dev(dev_id)) {
+-		RTE_MLDEV_LOG(ERR, "Invalid dev_id = %d\n", dev_id);
++		RTE_MLDEV_LOG(ERR, "Invalid dev_id = %d", dev_id);
+ 		return -EINVAL;
+ 	}
+ 
+@@ -583,12 +583,12 @@ rte_ml_model_load(int16_t dev_id, struct rte_ml_model_params *params, uint16_t *
+ 		return -ENOTSUP;
+ 
+ 	if (params == NULL) {
+-		RTE_MLDEV_LOG(ERR, "Dev %d, params cannot be NULL\n", dev_id);
++		RTE_MLDEV_LOG(ERR, "Dev %d, params cannot be NULL", dev_id);
+ 		return -EINVAL;
+ 	}
+ 
+ 	if (model_id == NULL) {
+-		RTE_MLDEV_LOG(ERR, "Dev %d, model_id cannot be NULL\n", dev_id);
++		RTE_MLDEV_LOG(ERR, "Dev %d, model_id cannot be NULL", dev_id);
+ 		return -EINVAL;
+ 	}
+ 
+@@ -601,7 +601,7 @@ rte_ml_model_unload(int16_t dev_id, uint16_t model_id)
+ 	struct rte_ml_dev *dev;
+ 
+ 	if (!rte_ml_dev_is_valid_dev(dev_id)) {
+-		RTE_MLDEV_LOG(ERR, "Invalid dev_id = %d\n", dev_id);
++		RTE_MLDEV_LOG(ERR, "Invalid dev_id = %d", dev_id);
+ 		return -EINVAL;
+ 	}
+ 
+@@ -618,7 +618,7 @@ rte_ml_model_start(int16_t dev_id, uint16_t model_id)
+ 	struct rte_ml_dev *dev;
+ 
+ 	if (!rte_ml_dev_is_valid_dev(dev_id)) {
+-		RTE_MLDEV_LOG(ERR, "Invalid dev_id = %d\n", dev_id);
++		RTE_MLDEV_LOG(ERR, "Invalid dev_id = %d", dev_id);
+ 		return -EINVAL;
+ 	}
+ 
+@@ -635,7 +635,7 @@ rte_ml_model_stop(int16_t dev_id, uint16_t model_id)
+ 	struct rte_ml_dev *dev;
+ 
+ 	if (!rte_ml_dev_is_valid_dev(dev_id)) {
+-		RTE_MLDEV_LOG(ERR, "Invalid dev_id = %d\n", dev_id);
++		RTE_MLDEV_LOG(ERR, "Invalid dev_id = %d", dev_id);
+ 		return -EINVAL;
+ 	}
+ 
+@@ -652,7 +652,7 @@ rte_ml_model_info_get(int16_t dev_id, uint16_t model_id, struct rte_ml_model_inf
+ 	struct rte_ml_dev *dev;
+ 
+ 	if (!rte_ml_dev_is_valid_dev(dev_id)) {
+-		RTE_MLDEV_LOG(ERR, "Invalid dev_id = %d\n", dev_id);
++		RTE_MLDEV_LOG(ERR, "Invalid dev_id = %d", dev_id);
+ 		return -EINVAL;
+ 	}
+ 
+@@ -661,7 +661,7 @@ rte_ml_model_info_get(int16_t dev_id, uint16_t model_id, struct rte_ml_model_inf
+ 		return -ENOTSUP;
+ 
+ 	if (model_info == NULL) {
+-		RTE_MLDEV_LOG(ERR, "Dev %d, model_id %u, model_info cannot be NULL\n", dev_id,
++		RTE_MLDEV_LOG(ERR, "Dev %d, model_id %u, model_info cannot be NULL", dev_id,
+ 			      model_id);
+ 		return -EINVAL;
+ 	}
+@@ -675,7 +675,7 @@ rte_ml_model_params_update(int16_t dev_id, uint16_t model_id, void *buffer)
+ 	struct rte_ml_dev *dev;
+ 
+ 	if (!rte_ml_dev_is_valid_dev(dev_id)) {
+-		RTE_MLDEV_LOG(ERR, "Invalid dev_id = %d\n", dev_id);
++		RTE_MLDEV_LOG(ERR, "Invalid dev_id = %d", dev_id);
+ 		return -EINVAL;
+ 	}
+ 
+@@ -684,7 +684,7 @@ rte_ml_model_params_update(int16_t dev_id, uint16_t model_id, void *buffer)
+ 		return -ENOTSUP;
+ 
+ 	if (buffer == NULL) {
+-		RTE_MLDEV_LOG(ERR, "Dev %d, buffer cannot be NULL\n", dev_id);
++		RTE_MLDEV_LOG(ERR, "Dev %d, buffer cannot be NULL", dev_id);
+ 		return -EINVAL;
+ 	}
+ 
+@@ -698,7 +698,7 @@ rte_ml_io_quantize(int16_t dev_id, uint16_t model_id, struct rte_ml_buff_seg **d
+ 	struct rte_ml_dev *dev;
+ 
+ 	if (!rte_ml_dev_is_valid_dev(dev_id)) {
+-		RTE_MLDEV_LOG(ERR, "Invalid dev_id = %d\n", dev_id);
++		RTE_MLDEV_LOG(ERR, "Invalid dev_id = %d", dev_id);
+ 		return -EINVAL;
+ 	}
+ 
+@@ -707,12 +707,12 @@ rte_ml_io_quantize(int16_t dev_id, uint16_t model_id, struct rte_ml_buff_seg **d
+ 		return -ENOTSUP;
+ 
+ 	if (dbuffer == NULL) {
+-		RTE_MLDEV_LOG(ERR, "Dev %d, dbuffer cannot be NULL\n", dev_id);
++		RTE_MLDEV_LOG(ERR, "Dev %d, dbuffer cannot be NULL", dev_id);
+ 		return -EINVAL;
+ 	}
+ 
+ 	if (qbuffer == NULL) {
+-		RTE_MLDEV_LOG(ERR, "Dev %d, qbuffer cannot be NULL\n", dev_id);
++		RTE_MLDEV_LOG(ERR, "Dev %d, qbuffer cannot be NULL", dev_id);
+ 		return -EINVAL;
+ 	}
+ 
+@@ -726,7 +726,7 @@ rte_ml_io_dequantize(int16_t dev_id, uint16_t model_id, struct rte_ml_buff_seg *
+ 	struct rte_ml_dev *dev;
+ 
+ 	if (!rte_ml_dev_is_valid_dev(dev_id)) {
+-		RTE_MLDEV_LOG(ERR, "Invalid dev_id = %d\n", dev_id);
++		RTE_MLDEV_LOG(ERR, "Invalid dev_id = %d", dev_id);
+ 		return -EINVAL;
+ 	}
+ 
+@@ -735,12 +735,12 @@ rte_ml_io_dequantize(int16_t dev_id, uint16_t model_id, struct rte_ml_buff_seg *
+ 		return -ENOTSUP;
+ 
+ 	if (qbuffer == NULL) {
+-		RTE_MLDEV_LOG(ERR, "Dev %d, qbuffer cannot be NULL\n", dev_id);
++		RTE_MLDEV_LOG(ERR, "Dev %d, qbuffer cannot be NULL", dev_id);
+ 		return -EINVAL;
+ 	}
+ 
+ 	if (dbuffer == NULL) {
+-		RTE_MLDEV_LOG(ERR, "Dev %d, dbuffer cannot be NULL\n", dev_id);
++		RTE_MLDEV_LOG(ERR, "Dev %d, dbuffer cannot be NULL", dev_id);
+ 		return -EINVAL;
+ 	}
+ 
+@@ -811,7 +811,7 @@ rte_ml_enqueue_burst(int16_t dev_id, uint16_t qp_id, struct rte_ml_op **ops, uin
+ 
+ #ifdef RTE_LIBRTE_ML_DEV_DEBUG
+ 	if (!rte_ml_dev_is_valid_dev(dev_id)) {
+-		RTE_MLDEV_LOG(ERR, "Invalid dev_id = %d\n", dev_id);
++		RTE_MLDEV_LOG(ERR, "Invalid dev_id = %d", dev_id);
+ 		rte_errno = -EINVAL;
+ 		return 0;
+ 	}
+@@ -823,13 +823,13 @@ rte_ml_enqueue_burst(int16_t dev_id, uint16_t qp_id, struct rte_ml_op **ops, uin
+ 	}
+ 
+ 	if (ops == NULL) {
+-		RTE_MLDEV_LOG(ERR, "Dev %d, ops cannot be NULL\n", dev_id);
++		RTE_MLDEV_LOG(ERR, "Dev %d, ops cannot be NULL", dev_id);
+ 		rte_errno = -EINVAL;
+ 		return 0;
+ 	}
+ 
+ 	if (qp_id >= dev->data->nb_queue_pairs) {
+-		RTE_MLDEV_LOG(ERR, "Invalid qp_id %u\n", qp_id);
++		RTE_MLDEV_LOG(ERR, "Invalid qp_id %u", qp_id);
+ 		rte_errno = -EINVAL;
+ 		return 0;
+ 	}
+@@ -847,7 +847,7 @@ rte_ml_dequeue_burst(int16_t dev_id, uint16_t qp_id, struct rte_ml_op **ops, uin
+ 
+ #ifdef RTE_LIBRTE_ML_DEV_DEBUG
+ 	if (!rte_ml_dev_is_valid_dev(dev_id)) {
+-		RTE_MLDEV_LOG(ERR, "Invalid dev_id = %d\n", dev_id);
++		RTE_MLDEV_LOG(ERR, "Invalid dev_id = %d", dev_id);
+ 		rte_errno = -EINVAL;
+ 		return 0;
+ 	}
+@@ -859,13 +859,13 @@ rte_ml_dequeue_burst(int16_t dev_id, uint16_t qp_id, struct rte_ml_op **ops, uin
+ 	}
+ 
+ 	if (ops == NULL) {
+-		RTE_MLDEV_LOG(ERR, "Dev %d, ops cannot be NULL\n", dev_id);
++		RTE_MLDEV_LOG(ERR, "Dev %d, ops cannot be NULL", dev_id);
+ 		rte_errno = -EINVAL;
+ 		return 0;
+ 	}
+ 
+ 	if (qp_id >= dev->data->nb_queue_pairs) {
+-		RTE_MLDEV_LOG(ERR, "Invalid qp_id %u\n", qp_id);
++		RTE_MLDEV_LOG(ERR, "Invalid qp_id %u", qp_id);
+ 		rte_errno = -EINVAL;
+ 		return 0;
+ 	}
+@@ -883,7 +883,7 @@ rte_ml_op_error_get(int16_t dev_id, struct rte_ml_op *op, struct rte_ml_op_error
+ 
+ #ifdef RTE_LIBRTE_ML_DEV_DEBUG
+ 	if (!rte_ml_dev_is_valid_dev(dev_id)) {
+-		RTE_MLDEV_LOG(ERR, "Invalid dev_id = %d\n", dev_id);
++		RTE_MLDEV_LOG(ERR, "Invalid dev_id = %d", dev_id);
+ 		return -EINVAL;
+ 	}
+ 
+@@ -892,12 +892,12 @@ rte_ml_op_error_get(int16_t dev_id, struct rte_ml_op *op, struct rte_ml_op_error
+ 		return -ENOTSUP;
+ 
+ 	if (op == NULL) {
+-		RTE_MLDEV_LOG(ERR, "Dev %d, op cannot be NULL\n", dev_id);
++		RTE_MLDEV_LOG(ERR, "Dev %d, op cannot be NULL", dev_id);
+ 		return -EINVAL;
+ 	}
+ 
+ 	if (error == NULL) {
+-		RTE_MLDEV_LOG(ERR, "Dev %d, error cannot be NULL\n", dev_id);
++		RTE_MLDEV_LOG(ERR, "Dev %d, error cannot be NULL", dev_id);
+ 		return -EINVAL;
+ 	}
+ #else
+diff --git a/dpdk/lib/net/rte_ether.h b/dpdk/lib/net/rte_ether.h
+index ce073ea818..75285bdd12 100644
+--- a/dpdk/lib/net/rte_ether.h
++++ b/dpdk/lib/net/rte_ether.h
+@@ -46,6 +46,20 @@ extern "C" {
+ 
+ #define RTE_ETHER_MIN_MTU 68 /**< Minimum MTU for IPv4 packets, see RFC 791. */
+ 
++/* VLAN header fields */
++#define RTE_VLAN_DEI_SHIFT	12
++#define RTE_VLAN_PRI_SHIFT	13
++#define RTE_VLAN_PRI_MASK	0xe000 /* Priority Code Point */
++#define RTE_VLAN_DEI_MASK	0x1000 /* Drop Eligible Indicator */
++#define RTE_VLAN_ID_MASK	0x0fff /* VLAN Identifier */
++
++#define RTE_VLAN_TCI_ID(vlan_tci)	((vlan_tci) & RTE_VLAN_ID_MASK)
++#define RTE_VLAN_TCI_PRI(vlan_tci)	(((vlan_tci) & RTE_VLAN_PRI_MASK) >> RTE_VLAN_PRI_SHIFT)
++#define RTE_VLAN_TCI_DEI(vlan_tci)	(((vlan_tci) & RTE_VLAN_DEI_MASK) >> RTE_VLAN_DEI_SHIFT)
++#define RTE_VLAN_TCI_MAKE(id, pri, dei)	((id) |					\
++					 ((pri) << RTE_VLAN_PRI_SHIFT) |	\
++					 ((dei) << RTE_VLAN_DEI_SHIFT))
++
+ /**
+  * Ethernet address:
+  * A universally administered address is uniquely assigned to a device by its
+diff --git a/dpdk/lib/net/rte_ip.h b/dpdk/lib/net/rte_ip.h
+index 6fa98a5a0f..0d103d4127 100644
+--- a/dpdk/lib/net/rte_ip.h
++++ b/dpdk/lib/net/rte_ip.h
+@@ -419,11 +419,14 @@ __rte_ipv4_udptcp_cksum_mbuf(const struct rte_mbuf *m,
+ {
+ 	uint16_t raw_cksum;
+ 	uint32_t cksum;
++	uint16_t len;
+ 
+-	if (l4_off > m->pkt_len)
+-		return 0;
++	if (unlikely(l4_off > m->pkt_len))
++		return 0; /* invalid params, return a dummy value */
++
++	len = rte_be_to_cpu_16(ipv4_hdr->total_length) - (uint16_t)rte_ipv4_hdr_len(ipv4_hdr);
+ 
+-	if (rte_raw_cksum_mbuf(m, l4_off, m->pkt_len - l4_off, &raw_cksum))
++	if (rte_raw_cksum_mbuf(m, l4_off, len, &raw_cksum))
+ 		return 0;
+ 
+ 	cksum = raw_cksum + rte_ipv4_phdr_cksum(ipv4_hdr, 0);
+@@ -663,10 +666,10 @@ __rte_ipv6_udptcp_cksum_mbuf(const struct rte_mbuf *m,
+ 	uint16_t raw_cksum;
+ 	uint32_t cksum;
+ 
+-	if (l4_off > m->pkt_len)
+-		return 0;
++	if (unlikely(l4_off > m->pkt_len))
++		return 0; /* invalid params, return a dummy value */
+ 
+-	if (rte_raw_cksum_mbuf(m, l4_off, m->pkt_len - l4_off, &raw_cksum))
++	if (rte_raw_cksum_mbuf(m, l4_off, rte_be_to_cpu_16(ipv6_hdr->payload_len), &raw_cksum))
+ 		return 0;
+ 
+ 	cksum = raw_cksum + rte_ipv6_phdr_cksum(ipv6_hdr, 0);
+diff --git a/dpdk/lib/net/rte_net.h b/dpdk/lib/net/rte_net.h
+index ef3ff4c6fd..efd9d5f5ee 100644
+--- a/dpdk/lib/net/rte_net.h
++++ b/dpdk/lib/net/rte_net.h
+@@ -121,7 +121,8 @@ rte_net_intel_cksum_flags_prepare(struct rte_mbuf *m, uint64_t ol_flags)
+ 	 * no offloads are requested.
+ 	 */
+ 	if (!(ol_flags & (RTE_MBUF_F_TX_IP_CKSUM | RTE_MBUF_F_TX_L4_MASK | RTE_MBUF_F_TX_TCP_SEG |
+-					RTE_MBUF_F_TX_UDP_SEG | RTE_MBUF_F_TX_OUTER_IP_CKSUM)))
++					RTE_MBUF_F_TX_UDP_SEG | RTE_MBUF_F_TX_OUTER_IP_CKSUM |
++					RTE_MBUF_F_TX_OUTER_UDP_CKSUM)))
+ 		return 0;
+ 
+ 	if (ol_flags & (RTE_MBUF_F_TX_OUTER_IPV4 | RTE_MBUF_F_TX_OUTER_IPV6)) {
+@@ -135,6 +136,21 @@ rte_net_intel_cksum_flags_prepare(struct rte_mbuf *m, uint64_t ol_flags)
+ 					struct rte_ipv4_hdr *, m->outer_l2_len);
+ 			ipv4_hdr->hdr_checksum = 0;
+ 		}
++		if (ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM) {
++			if (ol_flags & RTE_MBUF_F_TX_OUTER_IPV4) {
++				ipv4_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *,
++					m->outer_l2_len);
++				udp_hdr = (struct rte_udp_hdr *)((char *)ipv4_hdr +
++					m->outer_l3_len);
++				udp_hdr->dgram_cksum = rte_ipv4_phdr_cksum(ipv4_hdr, m->ol_flags);
++			} else {
++				ipv6_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv6_hdr *,
++					m->outer_l2_len);
++				udp_hdr = rte_pktmbuf_mtod_offset(m, struct rte_udp_hdr *,
++					 m->outer_l2_len + m->outer_l3_len);
++				udp_hdr->dgram_cksum = rte_ipv6_phdr_cksum(ipv6_hdr, m->ol_flags);
++			}
++		}
+ 	}
+ 
+ 	/*
+diff --git a/dpdk/lib/net/rte_net_crc.c b/dpdk/lib/net/rte_net_crc.c
+index a685f9e7bb..900d6de7f4 100644
+--- a/dpdk/lib/net/rte_net_crc.c
++++ b/dpdk/lib/net/rte_net_crc.c
+@@ -179,7 +179,7 @@ avx512_vpclmulqdq_get_handlers(void)
+ 			max_simd_bitwidth >= RTE_VECT_SIMD_512)
+ 		return handlers_avx512;
+ #endif
+-	NET_LOG(INFO, "Requirements not met, can't use AVX512\n");
++	NET_LOG(INFO, "Requirements not met, can't use AVX512");
+ 	return NULL;
+ }
+ 
+@@ -205,7 +205,7 @@ sse42_pclmulqdq_get_handlers(void)
+ 			max_simd_bitwidth >= RTE_VECT_SIMD_128)
+ 		return handlers_sse42;
+ #endif
+-	NET_LOG(INFO, "Requirements not met, can't use SSE\n");
++	NET_LOG(INFO, "Requirements not met, can't use SSE");
+ 	return NULL;
+ }
+ 
+@@ -231,7 +231,7 @@ neon_pmull_get_handlers(void)
+ 			max_simd_bitwidth >= RTE_VECT_SIMD_128)
+ 		return handlers_neon;
+ #endif
+-	NET_LOG(INFO, "Requirements not met, can't use NEON\n");
++	NET_LOG(INFO, "Requirements not met, can't use NEON");
+ 	return NULL;
+ }
+ 
+diff --git a/dpdk/lib/node/ethdev_rx.c b/dpdk/lib/node/ethdev_rx.c
+index 3e8fac1df4..475eff6abe 100644
+--- a/dpdk/lib/node/ethdev_rx.c
++++ b/dpdk/lib/node/ethdev_rx.c
+@@ -160,13 +160,13 @@ ethdev_ptype_setup(uint16_t port, uint16_t queue)
+ 
+ 	if (!l3_ipv4 || !l3_ipv6) {
+ 		node_info("ethdev_rx",
+-			  "Enabling ptype callback for required ptypes on port %u\n",
++			  "Enabling ptype callback for required ptypes on port %u",
+ 			  port);
+ 
+ 		if (!rte_eth_add_rx_callback(port, queue, eth_pkt_parse_cb,
+ 					     NULL)) {
+ 			node_err("ethdev_rx",
+-				 "Failed to add rx ptype cb: port=%d, queue=%d\n",
++				 "Failed to add rx ptype cb: port=%d, queue=%d",
+ 				 port, queue);
+ 			return -EINVAL;
+ 		}
+diff --git a/dpdk/lib/node/ip4_lookup.c b/dpdk/lib/node/ip4_lookup.c
+index 0dbfde64fe..18955971f6 100644
+--- a/dpdk/lib/node/ip4_lookup.c
++++ b/dpdk/lib/node/ip4_lookup.c
+@@ -143,7 +143,7 @@ rte_node_ip4_route_add(uint32_t ip, uint8_t depth, uint16_t next_hop,
+ 				  ip, depth, val);
+ 		if (ret < 0) {
+ 			node_err("ip4_lookup",
+-				 "Unable to add entry %s / %d nh (%x) to LPM table on sock %d, rc=%d\n",
++				 "Unable to add entry %s / %d nh (%x) to LPM table on sock %d, rc=%d",
+ 				 abuf, depth, val, socket, ret);
+ 			return ret;
+ 		}
+diff --git a/dpdk/lib/node/ip6_lookup.c b/dpdk/lib/node/ip6_lookup.c
+index 6f56eb5ec5..309964f60f 100644
+--- a/dpdk/lib/node/ip6_lookup.c
++++ b/dpdk/lib/node/ip6_lookup.c
+@@ -283,7 +283,7 @@ rte_node_ip6_route_add(const uint8_t *ip, uint8_t depth, uint16_t next_hop,
+ 		if (ret < 0) {
+ 			node_err("ip6_lookup",
+ 				 "Unable to add entry %s / %d nh (%x) to LPM "
+-				 "table on sock %d, rc=%d\n",
++				 "table on sock %d, rc=%d",
+ 				 abuf, depth, val, socket, ret);
+ 			return ret;
+ 		}
+diff --git a/dpdk/lib/node/kernel_rx.c b/dpdk/lib/node/kernel_rx.c
+index 2dba7c8cc7..6c20cdbb1e 100644
+--- a/dpdk/lib/node/kernel_rx.c
++++ b/dpdk/lib/node/kernel_rx.c
+@@ -134,7 +134,7 @@ kernel_rx_node_do(struct rte_graph *graph, struct rte_node *node, kernel_rx_node
+ 			if (len == 0 || len == 0xFFFF) {
+ 				rte_pktmbuf_free(m);
+ 				if (rx->idx <= 0)
+-					node_dbg("kernel_rx", "rx_mbuf array is empty\n");
++					node_dbg("kernel_rx", "rx_mbuf array is empty");
+ 				rx->idx--;
+ 				break;
+ 			}
+@@ -207,20 +207,20 @@ kernel_rx_node_init(const struct rte_graph *graph, struct rte_node *node)
+ 	RTE_VERIFY(elem != NULL);
+ 
+ 	if (ctx->pktmbuf_pool == NULL) {
+-		node_err("kernel_rx", "Invalid mbuf pool on graph %s\n", graph->name);
++		node_err("kernel_rx", "Invalid mbuf pool on graph %s", graph->name);
+ 		return -EINVAL;
+ 	}
+ 
+ 	recv_info = rte_zmalloc_socket("kernel_rx_info", sizeof(kernel_rx_info_t),
+ 				       RTE_CACHE_LINE_SIZE, graph->socket);
+ 	if (!recv_info) {
+-		node_err("kernel_rx", "Kernel recv_info is NULL\n");
++		node_err("kernel_rx", "Kernel recv_info is NULL");
+ 		return -ENOMEM;
+ 	}
+ 
+ 	sock = socket(AF_INET, SOCK_RAW, IPPROTO_RAW);
+ 	if (sock < 0) {
+-		node_err("kernel_rx", "Unable to open RAW socket\n");
++		node_err("kernel_rx", "Unable to open RAW socket");
+ 		return sock;
+ 	}
+ 
+diff --git a/dpdk/lib/node/kernel_tx.c b/dpdk/lib/node/kernel_tx.c
+index 27d1808c71..3a96741622 100644
+--- a/dpdk/lib/node/kernel_tx.c
++++ b/dpdk/lib/node/kernel_tx.c
+@@ -36,7 +36,7 @@ kernel_tx_process_mbuf(struct rte_node *node, struct rte_mbuf **mbufs, uint16_t
+ 		sin.sin_addr.s_addr = ip4->dst_addr;
+ 
+ 		if (sendto(ctx->sock, buf, len, 0, (struct sockaddr *)&sin, sizeof(sin)) < 0)
+-			node_err("kernel_tx", "Unable to send packets: %s\n", strerror(errno));
++			node_err("kernel_tx", "Unable to send packets: %s", strerror(errno));
+ 	}
+ }
+ 
+@@ -87,7 +87,7 @@ kernel_tx_node_init(const struct rte_graph *graph __rte_unused, struct rte_node
+ 
+ 	ctx->sock = socket(AF_INET, SOCK_RAW, IPPROTO_RAW);
+ 	if (ctx->sock < 0)
+-		node_err("kernel_tx", "Unable to open RAW socket\n");
++		node_err("kernel_tx", "Unable to open RAW socket");
+ 
+ 	return 0;
+ }
+diff --git a/dpdk/lib/pcapng/rte_pcapng.c b/dpdk/lib/pcapng/rte_pcapng.c
+index f74ec939a9..7254defce7 100644
+--- a/dpdk/lib/pcapng/rte_pcapng.c
++++ b/dpdk/lib/pcapng/rte_pcapng.c
+@@ -128,7 +128,8 @@ pcapng_add_option(struct pcapng_option *popt, uint16_t code,
+ {
+ 	popt->code = code;
+ 	popt->length = len;
+-	memcpy(popt->data, data, len);
++	if (len > 0)
++		memcpy(popt->data, data, len);
+ 
+ 	return (struct pcapng_option *)((uint8_t *)popt + pcapng_optlen(len));
+ }
+diff --git a/dpdk/lib/pipeline/rte_swx_pipeline_spec.c b/dpdk/lib/pipeline/rte_swx_pipeline_spec.c
+index 2bba0d0524..17419e7b85 100644
+--- a/dpdk/lib/pipeline/rte_swx_pipeline_spec.c
++++ b/dpdk/lib/pipeline/rte_swx_pipeline_spec.c
+@@ -2890,7 +2890,7 @@ pipeline_spec_parse(FILE *spec,
+ 	}
+ 
+ 	/* Memory allocation. */
+-	s = calloc(sizeof(struct pipeline_spec), 1);
++	s = calloc(1, sizeof(struct pipeline_spec));
+ 	if (!s) {
+ 		if (err_line)
+ 			*err_line = n_lines;
+@@ -4241,7 +4241,7 @@ pipeline_iospec_parse(FILE *spec,
+ 	}
+ 
+ 	/* Memory allocation. */
+-	s = calloc(sizeof(struct pipeline_iospec), 1);
++	s = calloc(1, sizeof(struct pipeline_iospec));
+ 	if (!s) {
+ 		if (err_line)
+ 			*err_line = n_lines;
+diff --git a/dpdk/lib/power/guest_channel.c b/dpdk/lib/power/guest_channel.c
+index 7b2ae0b650..c964332011 100644
+--- a/dpdk/lib/power/guest_channel.c
++++ b/dpdk/lib/power/guest_channel.c
+@@ -89,7 +89,7 @@ guest_channel_host_connect(const char *path, unsigned int lcore_id)
+ 	flags |= O_NONBLOCK;
+ 	if (fcntl(fd, F_SETFL, flags) < 0) {
+ 		RTE_LOG(ERR, GUEST_CHANNEL, "Failed on setting non-blocking mode for "
+-				"file %s", fd_path);
++				"file %s\n", fd_path);
+ 		goto error;
+ 	}
+ 	/* QEMU needs a delay after connection */
+diff --git a/dpdk/lib/power/power_intel_uncore.c b/dpdk/lib/power/power_intel_uncore.c
+index 688aebc4ee..be174dce44 100644
+--- a/dpdk/lib/power/power_intel_uncore.c
++++ b/dpdk/lib/power/power_intel_uncore.c
+@@ -11,7 +11,6 @@
+ #include "power_intel_uncore.h"
+ #include "power_common.h"
+ 
+-#define MAX_UNCORE_FREQS 32
+ #define MAX_NUMA_DIE 8
+ #define BUS_FREQ     100000
+ #define FILTER_LENGTH 18
+@@ -32,7 +31,7 @@
+ struct uncore_power_info {
+ 	unsigned int die;                  /* Core die id */
+ 	unsigned int pkg;                  /* Package id */
+-	uint32_t freqs[MAX_UNCORE_FREQS];  /* Frequency array */
++	uint32_t freqs[RTE_MAX_UNCORE_FREQS]; /* Frequency array */
+ 	uint32_t nb_freqs;                 /* Number of available freqs */
+ 	FILE *f_cur_min;                   /* FD of scaling_min */
+ 	FILE *f_cur_max;                   /* FD of scaling_max */
+@@ -51,7 +50,7 @@ set_uncore_freq_internal(struct uncore_power_info *ui, uint32_t idx)
+ 	uint32_t target_uncore_freq, curr_max_freq;
+ 	int ret;
+ 
+-	if (idx >= MAX_UNCORE_FREQS || idx >= ui->nb_freqs) {
++	if (idx >= RTE_MAX_UNCORE_FREQS || idx >= ui->nb_freqs) {
+ 		RTE_LOG(DEBUG, POWER, "Invalid uncore frequency index %u, which "
+ 				"should be less than %u\n", idx, ui->nb_freqs);
+ 		return -1;
+@@ -221,7 +220,7 @@ power_get_available_uncore_freqs(struct uncore_power_info *ui)
+ 	uint32_t i, num_uncore_freqs = 0;
+ 
+ 	num_uncore_freqs = (ui->init_max_freq - ui->init_min_freq) / BUS_FREQ + 1;
+-	if (num_uncore_freqs >= MAX_UNCORE_FREQS) {
++	if (num_uncore_freqs >= RTE_MAX_UNCORE_FREQS) {
+ 		RTE_LOG(ERR, POWER, "Too many available uncore frequencies: %d\n",
+ 				num_uncore_freqs);
+ 		goto out;
+diff --git a/dpdk/lib/power/rte_power_pmd_mgmt.c b/dpdk/lib/power/rte_power_pmd_mgmt.c
+index 38f8384085..6f18ed0adf 100644
+--- a/dpdk/lib/power/rte_power_pmd_mgmt.c
++++ b/dpdk/lib/power/rte_power_pmd_mgmt.c
+@@ -686,7 +686,7 @@ int
+ rte_power_pmd_mgmt_set_pause_duration(unsigned int duration)
+ {
+ 	if (duration == 0) {
+-		RTE_LOG(ERR, POWER, "Pause duration must be greater than 0, value unchanged");
++		RTE_LOG(ERR, POWER, "Pause duration must be greater than 0, value unchanged\n");
+ 		return -EINVAL;
+ 	}
+ 	pause_duration = duration;
+@@ -709,7 +709,7 @@ rte_power_pmd_mgmt_set_scaling_freq_min(unsigned int lcore, unsigned int min)
+ 	}
+ 
+ 	if (min > scale_freq_max[lcore]) {
+-		RTE_LOG(ERR, POWER, "Invalid min frequency: Cannot be greater than max frequency");
++		RTE_LOG(ERR, POWER, "Invalid min frequency: Cannot be greater than max frequency\n");
+ 		return -EINVAL;
+ 	}
+ 	scale_freq_min[lcore] = min;
+@@ -729,7 +729,7 @@ rte_power_pmd_mgmt_set_scaling_freq_max(unsigned int lcore, unsigned int max)
+ 	if (max == 0)
+ 		max = UINT32_MAX;
+ 	if (max < scale_freq_min[lcore]) {
+-		RTE_LOG(ERR, POWER, "Invalid max frequency: Cannot be less than min frequency");
++		RTE_LOG(ERR, POWER, "Invalid max frequency: Cannot be less than min frequency\n");
+ 		return -EINVAL;
+ 	}
+ 
+diff --git a/dpdk/lib/rawdev/rte_rawdev.c b/dpdk/lib/rawdev/rte_rawdev.c
+index 474bdc9540..4f8897b639 100644
+--- a/dpdk/lib/rawdev/rte_rawdev.c
++++ b/dpdk/lib/rawdev/rte_rawdev.c
+@@ -656,7 +656,7 @@ handle_dev_dump(const char *cmd __rte_unused,
+ 	if (!rte_rawdev_pmd_is_valid_dev(dev_id))
+ 		return -EINVAL;
+ 
+-	buf = calloc(sizeof(char), RTE_TEL_MAX_SINGLE_STRING_LEN);
++	buf = calloc(RTE_TEL_MAX_SINGLE_STRING_LEN, sizeof(char));
+ 	if (buf == NULL)
+ 		return -ENOMEM;
+ 
+diff --git a/dpdk/lib/rcu/rte_rcu_qsbr.c b/dpdk/lib/rcu/rte_rcu_qsbr.c
+index a9f3d6cc98..41a44be4b9 100644
+--- a/dpdk/lib/rcu/rte_rcu_qsbr.c
++++ b/dpdk/lib/rcu/rte_rcu_qsbr.c
+@@ -92,7 +92,7 @@ rte_rcu_qsbr_thread_register(struct rte_rcu_qsbr *v, unsigned int thread_id)
+ 		return 1;
+ 	}
+ 
+-	__RTE_RCU_IS_LOCK_CNT_ZERO(v, thread_id, ERR, "Lock counter %u\n",
++	__RTE_RCU_IS_LOCK_CNT_ZERO(v, thread_id, ERR, "Lock counter %u",
+ 				v->qsbr_cnt[thread_id].lock_cnt);
+ 
+ 	id = thread_id & __RTE_QSBR_THRID_MASK;
+@@ -144,7 +144,7 @@ rte_rcu_qsbr_thread_unregister(struct rte_rcu_qsbr *v, unsigned int thread_id)
+ 		return 1;
+ 	}
+ 
+-	__RTE_RCU_IS_LOCK_CNT_ZERO(v, thread_id, ERR, "Lock counter %u\n",
++	__RTE_RCU_IS_LOCK_CNT_ZERO(v, thread_id, ERR, "Lock counter %u",
+ 				v->qsbr_cnt[thread_id].lock_cnt);
+ 
+ 	id = thread_id & __RTE_QSBR_THRID_MASK;
+diff --git a/dpdk/lib/rcu/rte_rcu_qsbr.h b/dpdk/lib/rcu/rte_rcu_qsbr.h
+index 5979fb0efb..8bda00e911 100644
+--- a/dpdk/lib/rcu/rte_rcu_qsbr.h
++++ b/dpdk/lib/rcu/rte_rcu_qsbr.h
+@@ -299,7 +299,7 @@ rte_rcu_qsbr_thread_online(struct rte_rcu_qsbr *v, unsigned int thread_id)
+ 
+ 	RTE_ASSERT(v != NULL && thread_id < v->max_threads);
+ 
+-	__RTE_RCU_IS_LOCK_CNT_ZERO(v, thread_id, ERR, "Lock counter %u\n",
++	__RTE_RCU_IS_LOCK_CNT_ZERO(v, thread_id, ERR, "Lock counter %u",
+ 				v->qsbr_cnt[thread_id].lock_cnt);
+ 
+ 	/* Copy the current value of token.
+@@ -350,7 +350,7 @@ rte_rcu_qsbr_thread_offline(struct rte_rcu_qsbr *v, unsigned int thread_id)
+ {
+ 	RTE_ASSERT(v != NULL && thread_id < v->max_threads);
+ 
+-	__RTE_RCU_IS_LOCK_CNT_ZERO(v, thread_id, ERR, "Lock counter %u\n",
++	__RTE_RCU_IS_LOCK_CNT_ZERO(v, thread_id, ERR, "Lock counter %u",
+ 				v->qsbr_cnt[thread_id].lock_cnt);
+ 
+ 	/* The reader can go offline only after the load of the
+@@ -427,7 +427,7 @@ rte_rcu_qsbr_unlock(__rte_unused struct rte_rcu_qsbr *v,
+ 				1, rte_memory_order_release);
+ 
+ 	__RTE_RCU_IS_LOCK_CNT_ZERO(v, thread_id, WARNING,
+-				"Lock counter %u. Nested locks?\n",
++				"Lock counter %u. Nested locks?",
+ 				v->qsbr_cnt[thread_id].lock_cnt);
+ #endif
+ }
+@@ -481,7 +481,7 @@ rte_rcu_qsbr_quiescent(struct rte_rcu_qsbr *v, unsigned int thread_id)
+ 
+ 	RTE_ASSERT(v != NULL && thread_id < v->max_threads);
+ 
+-	__RTE_RCU_IS_LOCK_CNT_ZERO(v, thread_id, ERR, "Lock counter %u\n",
++	__RTE_RCU_IS_LOCK_CNT_ZERO(v, thread_id, ERR, "Lock counter %u",
+ 				v->qsbr_cnt[thread_id].lock_cnt);
+ 
+ 	/* Acquire the changes to the shared data structure released
+@@ -664,16 +664,20 @@ __rte_rcu_qsbr_check_all(struct rte_rcu_qsbr *v, uint64_t t, bool wait)
+ static __rte_always_inline int
+ rte_rcu_qsbr_check(struct rte_rcu_qsbr *v, uint64_t t, bool wait)
+ {
++	uint64_t acked_token;
++
+ 	RTE_ASSERT(v != NULL);
+ 
+ 	/* Check if all the readers have already acknowledged this token */
+-	if (likely(t <= v->acked_token)) {
++	acked_token = rte_atomic_load_explicit(&v->acked_token,
++						rte_memory_order_relaxed);
++	if (likely(t <= acked_token)) {
+ 		__RTE_RCU_DP_LOG(DEBUG,
+ 			"%s: check: token = %" PRIu64 ", wait = %d",
+ 			__func__, t, wait);
+ 		__RTE_RCU_DP_LOG(DEBUG,
+ 			"%s: status: least acked token = %" PRIu64,
+-			__func__, v->acked_token);
++			__func__, acked_token);
+ 		return 1;
+ 	}
+ 
+diff --git a/dpdk/lib/regexdev/rte_regexdev.c b/dpdk/lib/regexdev/rte_regexdev.c
+index caec069182..d38a85eb0b 100644
+--- a/dpdk/lib/regexdev/rte_regexdev.c
++++ b/dpdk/lib/regexdev/rte_regexdev.c
+@@ -19,7 +19,7 @@ static struct {
+ 	struct rte_regexdev_data data[RTE_MAX_REGEXDEV_DEVS];
+ } *rte_regexdev_shared_data;
+ 
+-int rte_regexdev_logtype;
++RTE_LOG_REGISTER_DEFAULT(rte_regexdev_logtype, INFO);
+ 
+ static uint16_t
+ regexdev_find_free_dev(void)
+diff --git a/dpdk/lib/stack/rte_stack.c b/dpdk/lib/stack/rte_stack.c
+index 1fabec2bfe..1dab6d6645 100644
+--- a/dpdk/lib/stack/rte_stack.c
++++ b/dpdk/lib/stack/rte_stack.c
+@@ -56,7 +56,7 @@ rte_stack_create(const char *name, unsigned int count, int socket_id,
+ 	int ret;
+ 
+ 	if (flags & ~(RTE_STACK_F_LF)) {
+-		STACK_LOG_ERR("Unsupported stack flags %#x\n", flags);
++		STACK_LOG_ERR("Unsupported stack flags %#x", flags);
+ 		return NULL;
+ 	}
+ 
+@@ -65,7 +65,7 @@ rte_stack_create(const char *name, unsigned int count, int socket_id,
+ #endif
+ #if !defined(RTE_STACK_LF_SUPPORTED)
+ 	if (flags & RTE_STACK_F_LF) {
+-		STACK_LOG_ERR("Lock-free stack is not supported on your platform\n");
++		STACK_LOG_ERR("Lock-free stack is not supported on your platform");
+ 		rte_errno = ENOTSUP;
+ 		return NULL;
+ 	}
+@@ -82,7 +82,7 @@ rte_stack_create(const char *name, unsigned int count, int socket_id,
+ 
+ 	te = rte_zmalloc("STACK_TAILQ_ENTRY", sizeof(*te), 0);
+ 	if (te == NULL) {
+-		STACK_LOG_ERR("Cannot reserve memory for tailq\n");
++		STACK_LOG_ERR("Cannot reserve memory for tailq");
+ 		rte_errno = ENOMEM;
+ 		return NULL;
+ 	}
+@@ -92,7 +92,7 @@ rte_stack_create(const char *name, unsigned int count, int socket_id,
+ 	mz = rte_memzone_reserve_aligned(mz_name, sz, socket_id,
+ 					 0, __alignof__(*s));
+ 	if (mz == NULL) {
+-		STACK_LOG_ERR("Cannot reserve stack memzone!\n");
++		STACK_LOG_ERR("Cannot reserve stack memzone!");
+ 		rte_mcfg_tailq_write_unlock();
+ 		rte_free(te);
+ 		return NULL;
+diff --git a/dpdk/lib/telemetry/telemetry.c b/dpdk/lib/telemetry/telemetry.c
+index 92982842a8..f688db7981 100644
+--- a/dpdk/lib/telemetry/telemetry.c
++++ b/dpdk/lib/telemetry/telemetry.c
+@@ -169,7 +169,11 @@ container_to_json(const struct rte_tel_data *d, char *out_buf, size_t buf_len)
+ 		d->type != TEL_ARRAY_INT && d->type != TEL_ARRAY_STRING)
+ 		return snprintf(out_buf, buf_len, "null");
+ 
+-	used = rte_tel_json_empty_array(out_buf, buf_len, 0);
++	if (d->type == TEL_DICT)
++		used = rte_tel_json_empty_obj(out_buf, buf_len, 0);
++	else
++		used = rte_tel_json_empty_array(out_buf, buf_len, 0);
++
+ 	if (d->type == TEL_ARRAY_UINT)
+ 		for (i = 0; i < d->data_len; i++)
+ 			used = rte_tel_json_add_array_uint(out_buf,
+@@ -377,8 +381,8 @@ client_handler(void *sock_id)
+ 			"{\"version\":\"%s\",\"pid\":%d,\"max_output_len\":%d}",
+ 			telemetry_version, getpid(), MAX_OUTPUT_LEN);
+ 	if (write(s, info_str, strlen(info_str)) < 0) {
+-		close(s);
+-		return NULL;
++		TMTY_LOG(DEBUG, "Socket write base info to client failed\n");
++		goto exit;
+ 	}
+ 
+ 	/* receive data is not null terminated */
+@@ -403,6 +407,7 @@ client_handler(void *sock_id)
+ 
+ 		bytes = read(s, buffer, sizeof(buffer) - 1);
+ 	}
++exit:
+ 	close(s);
+ 	rte_atomic_fetch_sub_explicit(&v2_clients, 1, rte_memory_order_relaxed);
+ 	return NULL;
+diff --git a/dpdk/lib/telemetry/telemetry_legacy.c b/dpdk/lib/telemetry/telemetry_legacy.c
+index 4c1d1c353a..578230732c 100644
+--- a/dpdk/lib/telemetry/telemetry_legacy.c
++++ b/dpdk/lib/telemetry/telemetry_legacy.c
+@@ -94,7 +94,7 @@ register_client(const char *cmd __rte_unused, const char *params,
+ 	}
+ #ifndef RTE_EXEC_ENV_WINDOWS
+ 	strlcpy(data, strchr(params, ':'), sizeof(data));
+-	memcpy(data, &data[strlen(":\"")], strlen(data));
++	memmove(data, &data[strlen(":\"")], strlen(data));
+ 	if (!strchr(data, '\"')) {
+ 		fprintf(stderr, "Invalid client data\n");
+ 		return -1;
+diff --git a/dpdk/lib/vhost/fd_man.c b/dpdk/lib/vhost/fd_man.c
+index 134414fb4b..84c5da0793 100644
+--- a/dpdk/lib/vhost/fd_man.c
++++ b/dpdk/lib/vhost/fd_man.c
+@@ -307,10 +307,11 @@ fdset_event_dispatch(void *arg)
+ }
+ 
+ static void
+-fdset_pipe_read_cb(int readfd, void *dat __rte_unused,
++fdset_pipe_read_cb(int readfd, void *dat,
+ 		   int *remove __rte_unused)
+ {
+ 	char charbuf[16];
++	struct fdset *fdset = dat;
+ 	int r = read(readfd, charbuf, sizeof(charbuf));
+ 	/*
+ 	 * Just an optimization, we don't care if read() failed
+@@ -318,6 +319,11 @@ fdset_pipe_read_cb(int readfd, void *dat __rte_unused,
+ 	 * compiler happy
+ 	 */
+ 	RTE_SET_USED(r);
++
++	pthread_mutex_lock(&fdset->sync_mutex);
++	fdset->sync = true;
++	pthread_cond_broadcast(&fdset->sync_cond);
++	pthread_mutex_unlock(&fdset->sync_mutex);
+ }
+ 
+ void
+@@ -340,7 +346,7 @@ fdset_pipe_init(struct fdset *fdset)
+ 	}
+ 
+ 	ret = fdset_add(fdset, fdset->u.readfd,
+-			fdset_pipe_read_cb, NULL, NULL);
++			fdset_pipe_read_cb, NULL, fdset);
+ 
+ 	if (ret < 0) {
+ 		RTE_LOG(ERR, VHOST_FDMAN,
+@@ -364,5 +370,18 @@ fdset_pipe_notify(struct fdset *fdset)
+ 	 * compiler happy
+ 	 */
+ 	RTE_SET_USED(r);
++}
++
++void
++fdset_pipe_notify_sync(struct fdset *fdset)
++{
++	pthread_mutex_lock(&fdset->sync_mutex);
++
++	fdset->sync = false;
++	fdset_pipe_notify(fdset);
++
++	while (!fdset->sync)
++		pthread_cond_wait(&fdset->sync_cond, &fdset->sync_mutex);
+ 
++	pthread_mutex_unlock(&fdset->sync_mutex);
+ }
+diff --git a/dpdk/lib/vhost/fd_man.h b/dpdk/lib/vhost/fd_man.h
+index 6315904c8e..7816fb11ac 100644
+--- a/dpdk/lib/vhost/fd_man.h
++++ b/dpdk/lib/vhost/fd_man.h
+@@ -6,6 +6,7 @@
+ #define _FD_MAN_H_
+ #include <pthread.h>
+ #include <poll.h>
++#include <stdbool.h>
+ 
+ #define MAX_FDS 1024
+ 
+@@ -35,6 +36,10 @@ struct fdset {
+ 			int writefd;
+ 		};
+ 	} u;
++
++	pthread_mutex_t sync_mutex;
++	pthread_cond_t sync_cond;
++	bool sync;
+ };
+ 
+ 
+@@ -53,5 +58,6 @@ int fdset_pipe_init(struct fdset *fdset);
+ void fdset_pipe_uninit(struct fdset *fdset);
+ 
+ void fdset_pipe_notify(struct fdset *fdset);
++void fdset_pipe_notify_sync(struct fdset *fdset);
+ 
+ #endif
+diff --git a/dpdk/lib/vhost/socket.c b/dpdk/lib/vhost/socket.c
+index 5882e44176..0b95c54c5b 100644
+--- a/dpdk/lib/vhost/socket.c
++++ b/dpdk/lib/vhost/socket.c
+@@ -93,6 +93,7 @@ static struct vhost_user vhost_user = {
+ 		.fd = { [0 ... MAX_FDS - 1] = {-1, NULL, NULL, NULL, 0} },
+ 		.fd_mutex = PTHREAD_MUTEX_INITIALIZER,
+ 		.fd_pooling_mutex = PTHREAD_MUTEX_INITIALIZER,
++		.sync_mutex = PTHREAD_MUTEX_INITIALIZER,
+ 		.num = 0
+ 	},
+ 	.vsocket_cnt = 0,
+diff --git a/dpdk/lib/vhost/vdpa.c b/dpdk/lib/vhost/vdpa.c
+index 219eef879c..ce4fb09859 100644
+--- a/dpdk/lib/vhost/vdpa.c
++++ b/dpdk/lib/vhost/vdpa.c
+@@ -19,6 +19,7 @@
+ #include "rte_vdpa.h"
+ #include "vdpa_driver.h"
+ #include "vhost.h"
++#include "iotlb.h"
+ 
+ /** Double linked list of vDPA devices. */
+ TAILQ_HEAD(vdpa_device_list, rte_vdpa_device);
+@@ -147,7 +148,6 @@ rte_vdpa_unregister_device(struct rte_vdpa_device *dev)
+ 
+ int
+ rte_vdpa_relay_vring_used(int vid, uint16_t qid, void *vring_m)
+-	__rte_no_thread_safety_analysis /* FIXME: requires iotlb_lock? */
+ {
+ 	struct virtio_net *dev = get_device(vid);
+ 	uint16_t idx, idx_m, desc_id;
+@@ -193,17 +193,21 @@ rte_vdpa_relay_vring_used(int vid, uint16_t qid, void *vring_m)
+ 			if (unlikely(nr_descs > vq->size))
+ 				return -1;
+ 
++			vhost_user_iotlb_rd_lock(vq);
+ 			desc_ring = (struct vring_desc *)(uintptr_t)
+ 				vhost_iova_to_vva(dev, vq,
+ 						vq->desc[desc_id].addr, &dlen,
+ 						VHOST_ACCESS_RO);
++			vhost_user_iotlb_rd_unlock(vq);
+ 			if (unlikely(!desc_ring))
+ 				return -1;
+ 
+ 			if (unlikely(dlen < vq->desc[desc_id].len)) {
++				vhost_user_iotlb_rd_lock(vq);
+ 				idesc = vhost_alloc_copy_ind_table(dev, vq,
+ 						vq->desc[desc_id].addr,
+ 						vq->desc[desc_id].len);
++				vhost_user_iotlb_rd_unlock(vq);
+ 				if (unlikely(!idesc))
+ 					return -1;
+ 
+@@ -220,9 +224,12 @@ rte_vdpa_relay_vring_used(int vid, uint16_t qid, void *vring_m)
+ 			if (unlikely(nr_descs-- == 0))
+ 				goto fail;
+ 			desc = desc_ring[desc_id];
+-			if (desc.flags & VRING_DESC_F_WRITE)
++			if (desc.flags & VRING_DESC_F_WRITE) {
++				vhost_user_iotlb_rd_lock(vq);
+ 				vhost_log_write_iova(dev, vq, desc.addr,
+ 						     desc.len);
++				vhost_user_iotlb_rd_unlock(vq);
++			}
+ 			desc_id = desc.next;
+ 		} while (desc.flags & VRING_DESC_F_NEXT);
+ 
+diff --git a/dpdk/lib/vhost/vduse.c b/dpdk/lib/vhost/vduse.c
+index 080b58f7de..b46f0e53c7 100644
+--- a/dpdk/lib/vhost/vduse.c
++++ b/dpdk/lib/vhost/vduse.c
+@@ -36,6 +36,7 @@ static struct vduse vduse = {
+ 		.fd = { [0 ... MAX_FDS - 1] = {-1, NULL, NULL, NULL, 0} },
+ 		.fd_mutex = PTHREAD_MUTEX_INITIALIZER,
+ 		.fd_pooling_mutex = PTHREAD_MUTEX_INITIALIZER,
++		.sync_mutex = PTHREAD_MUTEX_INITIALIZER,
+ 		.num = 0
+ 	},
+ };
+@@ -196,6 +197,7 @@ vduse_vring_setup(struct virtio_net *dev, unsigned int index)
+ 				vq->size * sizeof(struct batch_copy_elem),
+ 				RTE_CACHE_LINE_SIZE, 0);
+ 
++	rte_rwlock_write_lock(&vq->access_lock);
+ 	vhost_user_iotlb_rd_lock(vq);
+ 	if (vring_translate(dev, vq))
+ 		VHOST_LOG_CONFIG(dev->ifname, ERR, "Failed to translate vring %d addresses\n",
+@@ -206,6 +208,7 @@ vduse_vring_setup(struct virtio_net *dev, unsigned int index)
+ 				"Failed to disable guest notifications on vring %d\n",
+ 				index);
+ 	vhost_user_iotlb_rd_unlock(vq);
++	rte_rwlock_write_unlock(&vq->access_lock);
+ 
+ 	vq_efd.index = index;
+ 	vq_efd.fd = vq->kickfd;
+@@ -259,7 +262,9 @@ vduse_vring_cleanup(struct virtio_net *dev, unsigned int index)
+ 	close(vq->kickfd);
+ 	vq->kickfd = VIRTIO_UNINITIALIZED_EVENTFD;
+ 
++	rte_rwlock_write_lock(&vq->access_lock);
+ 	vring_invalidate(dev, vq);
++	rte_rwlock_write_unlock(&vq->access_lock);
+ 
+ 	rte_free(vq->batch_copy_elems);
+ 	vq->batch_copy_elems = NULL;
+@@ -614,7 +619,7 @@ vduse_device_destroy(const char *path)
+ 	vduse_device_stop(dev);
+ 
+ 	fdset_del(&vduse.fdset, dev->vduse_dev_fd);
+-	fdset_pipe_notify(&vduse.fdset);
++	fdset_pipe_notify_sync(&vduse.fdset);
+ 
+ 	if (dev->vduse_dev_fd >= 0) {
+ 		close(dev->vduse_dev_fd);
+diff --git a/dpdk/lib/vhost/vhost_crypto.c b/dpdk/lib/vhost/vhost_crypto.c
+index 9bf5ef67b9..7b22281815 100644
+--- a/dpdk/lib/vhost/vhost_crypto.c
++++ b/dpdk/lib/vhost/vhost_crypto.c
+@@ -245,7 +245,7 @@ transform_cipher_param(struct rte_crypto_sym_xform *xform,
+ 		return ret;
+ 
+ 	if (param->cipher_key_len > VHOST_USER_CRYPTO_MAX_CIPHER_KEY_LENGTH) {
+-		VC_LOG_DBG("Invalid cipher key length\n");
++		VC_LOG_DBG("Invalid cipher key length");
+ 		return -VIRTIO_CRYPTO_BADMSG;
+ 	}
+ 
+@@ -301,7 +301,7 @@ transform_chain_param(struct rte_crypto_sym_xform *xforms,
+ 		return ret;
+ 
+ 	if (param->cipher_key_len > VHOST_USER_CRYPTO_MAX_CIPHER_KEY_LENGTH) {
+-		VC_LOG_DBG("Invalid cipher key length\n");
++		VC_LOG_DBG("Invalid cipher key length");
+ 		return -VIRTIO_CRYPTO_BADMSG;
+ 	}
+ 
+@@ -321,7 +321,7 @@ transform_chain_param(struct rte_crypto_sym_xform *xforms,
+ 		return ret;
+ 
+ 	if (param->auth_key_len > VHOST_USER_CRYPTO_MAX_HMAC_KEY_LENGTH) {
+-		VC_LOG_DBG("Invalid auth key length\n");
++		VC_LOG_DBG("Invalid auth key length");
+ 		return -VIRTIO_CRYPTO_BADMSG;
+ 	}
+ 
+diff --git a/dpdk/lib/vhost/vhost_user.c b/dpdk/lib/vhost/vhost_user.c
+index e36312181a..f8e42dd619 100644
+--- a/dpdk/lib/vhost/vhost_user.c
++++ b/dpdk/lib/vhost/vhost_user.c
+@@ -1799,6 +1799,7 @@ vhost_user_set_inflight_fd(struct virtio_net **pdev,
+ 		if (!vq)
+ 			continue;
+ 
++		cleanup_vq_inflight(dev, vq);
+ 		if (vq_is_packed(dev)) {
+ 			vq->inflight_packed = addr;
+ 			vq->inflight_packed->desc_num = queue_size;
+@@ -2198,7 +2199,9 @@ vhost_user_get_vring_base(struct virtio_net **pdev,
+ 
+ 	vhost_user_iotlb_flush_all(dev);
+ 
++	rte_rwlock_write_lock(&vq->access_lock);
+ 	vring_invalidate(dev, vq);
++	rte_rwlock_write_unlock(&vq->access_lock);
+ 
+ 	return RTE_VHOST_MSG_RESULT_REPLY;
+ }
+diff --git a/dpdk/lib/vhost/virtio_net.c b/dpdk/lib/vhost/virtio_net.c
+index 8af20f1487..6d53ff932d 100644
+--- a/dpdk/lib/vhost/virtio_net.c
++++ b/dpdk/lib/vhost/virtio_net.c
+@@ -1696,6 +1696,17 @@ virtio_dev_rx_packed(struct virtio_net *dev,
+ 	return pkt_idx;
+ }
+ 
++static void
++virtio_dev_vring_translate(struct virtio_net *dev, struct vhost_virtqueue *vq)
++{
++	rte_rwlock_write_lock(&vq->access_lock);
++	vhost_user_iotlb_rd_lock(vq);
++	if (!vq->access_ok)
++		vring_translate(dev, vq);
++	vhost_user_iotlb_rd_unlock(vq);
++	rte_rwlock_write_unlock(&vq->access_lock);
++}
++
+ static __rte_always_inline uint32_t
+ virtio_dev_rx(struct virtio_net *dev, struct vhost_virtqueue *vq,
+ 	struct rte_mbuf **pkts, uint32_t count)
+@@ -1710,9 +1721,13 @@ virtio_dev_rx(struct virtio_net *dev, struct vhost_virtqueue *vq,
+ 
+ 	vhost_user_iotlb_rd_lock(vq);
+ 
+-	if (unlikely(!vq->access_ok))
+-		if (unlikely(vring_translate(dev, vq) < 0))
+-			goto out;
++	if (unlikely(!vq->access_ok)) {
++		vhost_user_iotlb_rd_unlock(vq);
++		rte_rwlock_read_unlock(&vq->access_lock);
++
++		virtio_dev_vring_translate(dev, vq);
++		goto out_no_unlock;
++	}
+ 
+ 	count = RTE_MIN((uint32_t)MAX_PKT_BURST, count);
+ 	if (count == 0)
+@@ -1731,6 +1746,7 @@ out:
+ out_access_unlock:
+ 	rte_rwlock_read_unlock(&vq->access_lock);
+ 
++out_no_unlock:
+ 	return nb_tx;
+ }
+ 
+@@ -1919,7 +1935,7 @@ vhost_enqueue_async_packed(struct virtio_net *dev,
+ 	else
+ 		max_tries = 1;
+ 
+-	while (size > 0) {
++	do {
+ 		/*
+ 		 * if we tried all available ring items, and still
+ 		 * can't get enough buf, it means something abnormal
+@@ -1946,7 +1962,7 @@ vhost_enqueue_async_packed(struct virtio_net *dev,
+ 		avail_idx += desc_count;
+ 		if (avail_idx >= vq->size)
+ 			avail_idx -= vq->size;
+-	}
++	} while (size > 0);
+ 
+ 	if (unlikely(mbuf_to_desc(dev, vq, pkt, buf_vec, nr_vec, *nr_buffers, true) < 0))
+ 		return -1;
+@@ -2528,9 +2544,13 @@ virtio_dev_rx_async_submit(struct virtio_net *dev, struct vhost_virtqueue *vq,
+ 
+ 	vhost_user_iotlb_rd_lock(vq);
+ 
+-	if (unlikely(!vq->access_ok))
+-		if (unlikely(vring_translate(dev, vq) < 0))
+-			goto out;
++	if (unlikely(!vq->access_ok)) {
++		vhost_user_iotlb_rd_unlock(vq);
++		rte_rwlock_read_unlock(&vq->access_lock);
++
++		virtio_dev_vring_translate(dev, vq);
++		goto out_no_unlock;
++	}
+ 
+ 	count = RTE_MIN((uint32_t)MAX_PKT_BURST, count);
+ 	if (count == 0)
+@@ -2551,6 +2571,7 @@ out:
+ out_access_unlock:
+ 	rte_rwlock_write_unlock(&vq->access_lock);
+ 
++out_no_unlock:
+ 	return nb_tx;
+ }
+ 
+@@ -3083,7 +3104,6 @@ virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
+ {
+ 	uint16_t i;
+ 	uint16_t avail_entries;
+-	uint16_t dropped = 0;
+ 	static bool allocerr_warned;
+ 
+ 	/*
+@@ -3122,11 +3142,8 @@ virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
+ 
+ 		update_shadow_used_ring_split(vq, head_idx, 0);
+ 
+-		if (unlikely(buf_len <= dev->vhost_hlen)) {
+-			dropped += 1;
+-			i++;
++		if (unlikely(buf_len <= dev->vhost_hlen))
+ 			break;
+-		}
+ 
+ 		buf_len -= dev->vhost_hlen;
+ 
+@@ -3143,8 +3160,6 @@ virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
+ 					buf_len, mbuf_pool->name);
+ 				allocerr_warned = true;
+ 			}
+-			dropped += 1;
+-			i++;
+ 			break;
+ 		}
+ 
+@@ -3155,27 +3170,21 @@ virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
+ 				VHOST_LOG_DATA(dev->ifname, ERR, "failed to copy desc to mbuf.\n");
+ 				allocerr_warned = true;
+ 			}
+-			dropped += 1;
+-			i++;
+ 			break;
+ 		}
+-
+ 	}
+ 
+-	if (dropped)
+-		rte_pktmbuf_free_bulk(&pkts[i - 1], count - i + 1);
+-
+-	vq->last_avail_idx += i;
++	if (unlikely(count != i))
++		rte_pktmbuf_free_bulk(&pkts[i], count - i);
+ 
+-	do_data_copy_dequeue(vq);
+-	if (unlikely(i < count))
+-		vq->shadow_used_idx = i;
+ 	if (likely(vq->shadow_used_idx)) {
++		vq->last_avail_idx += vq->shadow_used_idx;
++		do_data_copy_dequeue(vq);
+ 		flush_shadow_used_ring_split(dev, vq);
+ 		vhost_vring_call_split(dev, vq);
+ 	}
+ 
+-	return (i - dropped);
++	return i;
+ }
+ 
+ __rte_noinline
+@@ -3581,11 +3590,13 @@ rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
+ 
+ 	vhost_user_iotlb_rd_lock(vq);
+ 
+-	if (unlikely(!vq->access_ok))
+-		if (unlikely(vring_translate(dev, vq) < 0)) {
+-			count = 0;
+-			goto out;
+-		}
++	if (unlikely(!vq->access_ok)) {
++		vhost_user_iotlb_rd_unlock(vq);
++		rte_rwlock_read_unlock(&vq->access_lock);
++
++		virtio_dev_vring_translate(dev, vq);
++		goto out_no_unlock;
++	}
+ 
+ 	/*
+ 	 * Construct a RARP broadcast packet, and inject it to the "pkts"
+@@ -3646,6 +3657,7 @@ out_access_unlock:
+ 	if (unlikely(rarp_mbuf != NULL))
+ 		count += 1;
+ 
++out_no_unlock:
+ 	return count;
+ }
+ 
+@@ -4196,11 +4208,14 @@ rte_vhost_async_try_dequeue_burst(int vid, uint16_t queue_id,
+ 
+ 	vhost_user_iotlb_rd_lock(vq);
+ 
+-	if (unlikely(vq->access_ok == 0))
+-		if (unlikely(vring_translate(dev, vq) < 0)) {
+-			count = 0;
+-			goto out;
+-		}
++	if (unlikely(vq->access_ok == 0)) {
++		vhost_user_iotlb_rd_unlock(vq);
++		rte_rwlock_read_unlock(&vq->access_lock);
++
++		virtio_dev_vring_translate(dev, vq);
++		count = 0;
++		goto out_no_unlock;
++	}
+ 
+ 	/*
+ 	 * Construct a RARP broadcast packet, and inject it to the "pkts"
+@@ -4266,5 +4281,6 @@ out_access_unlock:
+ 	if (unlikely(rarp_mbuf != NULL))
+ 		count += 1;
+ 
++out_no_unlock:
+ 	return count;
+ }
+diff --git a/dpdk/meson.build b/dpdk/meson.build
+index 5e161f43e5..8b248d4505 100644
+--- a/dpdk/meson.build
++++ b/dpdk/meson.build
+@@ -164,17 +164,17 @@ message(output_message + '\n')
+ output_message = '\n=================\nContent Skipped\n=================\n'
+ output_message += '\napps:\n\t'
+ foreach app:dpdk_apps_disabled
+-    reason = get_variable(app.underscorify() + '_disable_reason')
++    reason = get_variable('app_' + app.underscorify() + '_disable_reason')
+     output_message += app + ':\t' + reason + '\n\t'
+ endforeach
+ output_message += '\nlibs:\n\t'
+ foreach lib:dpdk_libs_disabled
+-    reason = get_variable(lib.underscorify() + '_disable_reason')
++    reason = get_variable('lib_' + lib.underscorify() + '_disable_reason')
+     output_message += lib + ':\t' + reason + '\n\t'
+ endforeach
+ output_message += '\ndrivers:\n\t'
+ foreach drv:dpdk_drvs_disabled
+-    reason = get_variable(drv.underscorify() + '_disable_reason')
++    reason = get_variable('drv_' + drv.underscorify() + '_disable_reason')
+     output_message += drv + ':\t' + reason + '\n\t'
+ endforeach
+ message(output_message + '\n')
diff --git a/SOURCES/openvswitch-hugetlbfs.sysusers b/SOURCES/openvswitch-hugetlbfs.sysusers
new file mode 100644
index 0000000..08b2fb1
--- /dev/null
+++ b/SOURCES/openvswitch-hugetlbfs.sysusers
@@ -0,0 +1,2 @@
+#Type Name         ID         GECOS                   Home directory  Shell
+m     openvswitch  hugetlbfs
diff --git a/SOURCES/openvswitch.sysusers b/SOURCES/openvswitch.sysusers
new file mode 100644
index 0000000..a8d06aa
--- /dev/null
+++ b/SOURCES/openvswitch.sysusers
@@ -0,0 +1,2 @@
+#Type Name         ID         GECOS                   Home directory  Shell
+u     openvswitch  -          "Open vSwitch Daemons"  /               /sbin/nologin
diff --git a/SPECS/openvswitch3.3.spec b/SPECS/openvswitch3.3.spec
new file mode 100644
index 0000000..4e90fe1
--- /dev/null
+++ b/SPECS/openvswitch3.3.spec
@@ -0,0 +1,1542 @@
+# Copyright (C) 2009, 2010, 2013, 2014 Nicira Networks, Inc.
+#
+# Copying and distribution of this file, with or without modification,
+# are permitted in any medium without royalty provided the copyright
+# notice and this notice are preserved.  This file is offered as-is,
+# without warranty of any kind.
+#
+# If tests have to be skipped while building, specify the '--without check'
+# option. For example:
+# rpmbuild -bb --without check rhel/openvswitch-fedora.spec
+
+# This defines the base package name's version.
+
+%define pkgname openvswitch3.3
+
+
+%if 0%{?commit:1}
+%global shortcommit %(c=%{commit}; echo ${c:0:7})
+%endif
+
+# Enable PIE, bz#955181
+%global _hardened_build 1
+
+# RHEL-7 doesn't define _rundir macro yet
+# Fedora 15 onwards uses /run as _rundir
+%if 0%{!?_rundir:1}
+%define _rundir /run
+%endif
+
+# FIXME Test "STP - flush the fdb and mdb when topology changed" fails on s390x
+# FIXME 2 tests fails on ppc64le. They will be hopefully fixed before official 2.11
+%ifarch %{ix86} x86_64 aarch64
+%bcond_without check
+%else
+%bcond_with check
+%endif
+# option to run kernel datapath tests, requires building as root!
+%bcond_with check_datapath_kernel
+# option to build with libcap-ng, needed for running OVS as regular user
+%bcond_without libcapng
+# option to build with ipsec support
+%bcond_without ipsec
+
+# Build python2 (that provides python) and python3 subpackages on Fedora
+# Build only python3 (that provides python) subpackage on RHEL8
+# Build only python subpackage on RHEL7
+%if 0%{?rhel} > 7 || 0%{?fedora}
+# On RHEL8 Sphinx is included in buildroot
+%global external_sphinx 1
+%else
+# Don't use external sphinx (RHV doesn't have optional repositories enabled)
+%global external_sphinx 0
+%endif
+
+Name: %{pkgname}
+Summary: Open vSwitch
+Group: System Environment/Daemons daemon/database/utilities
+URL: http://www.openvswitch.org/
+Version: 3.3.0
+Release: 10%{?dist}
+
+# Nearly all of openvswitch is ASL 2.0.  The bugtool is LGPLv2+, and the
+# lib/sflow*.[ch] files are SISSL
+# datapath/ is GPLv2 (although not built into any of the binary packages)
+License: ASL 2.0 and LGPLv2+ and SISSL
+
+%define dpdkver 23.11
+%define dpdkdir dpdk
+%define dpdksver %(echo %{dpdkver} | cut -d. -f-2)
+# NOTE: DPDK does not currently build for s390x
+# DPDK on aarch64 is not stable enough to be enabled in FDP
+%if 0%{?rhel} > 7 || 0%{?fedora}
+%define dpdkarches x86_64 ppc64le
+%else
+%define dpdkarches
+%endif
+
+%if 0%{?commit:1}
+Source: https://github.com/openvswitch/ovs/archive/%{commit}.tar.gz#/openvswitch-%{commit}.tar.gz
+%else
+Source: https://github.com/openvswitch/ovs/archive/v%{version}.tar.gz#/openvswitch-%{version}.tar.gz
+%endif
+Source2: openvswitch.sysusers
+Source3: openvswitch-hugetlbfs.sysusers
+Source10: https://fast.dpdk.org/rel/dpdk-%{dpdkver}.tar.xz
+
+%define docutilsver 0.12
+%define pygmentsver 1.4
+%define sphinxver   1.2.3
+%define pyelftoolsver 0.27
+Source100: https://pypi.io/packages/source/d/docutils/docutils-%{docutilsver}.tar.gz
+Source101: https://pypi.io/packages/source/P/Pygments/Pygments-%{pygmentsver}.tar.gz
+Source102: https://pypi.io/packages/source/S/Sphinx/Sphinx-%{sphinxver}.tar.gz
+Source103: https://pypi.io/packages/source/p/pyelftools/pyelftools-%{pyelftoolsver}.tar.gz
+
+%define apply_patch %(test -s %{_sourcedir}/openvswitch-%{version}.patch && echo 1 || echo 0)
+
+%if %{apply_patch}
+Patch0:    openvswitch-%{version}.patch
+%endif
+
+# The DPDK is designed to optimize througput of network traffic using, among
+# other techniques, carefully crafted assembly instructions.  As such it
+# needs extensive work to port it to other architectures.
+ExclusiveArch: x86_64 aarch64 ppc64le s390x
+
+# Do not enable this otherwise YUM will break on any upgrade.
+# Provides: openvswitch
+Conflicts: openvswitch < 3.3
+Conflicts: openvswitch-dpdk < 3.3
+Conflicts: openvswitch2.10
+Conflicts: openvswitch2.11
+Conflicts: openvswitch2.12
+Conflicts: openvswitch2.13
+Conflicts: openvswitch2.14
+Conflicts: openvswitch2.15
+Conflicts: openvswitch2.16
+Conflicts: openvswitch2.17
+Conflicts: openvswitch3.0
+Conflicts: openvswitch3.1
+Conflicts: openvswitch3.2
+
+# FIXME Sphinx is used to generate some manpages, unfortunately, on RHEL, it's
+# in the -optional repository and so we can't require it directly since RHV
+# doesn't have the -optional repository enabled and so TPS fails
+%if %{external_sphinx}
+BuildRequires: python3-sphinx
+%else
+# Sphinx dependencies
+BuildRequires: python-devel
+BuildRequires: python-setuptools
+#BuildRequires: python2-docutils
+BuildRequires: python-jinja2
+BuildRequires: python-nose
+#BuildRequires: python2-pygments
+# docutils dependencies
+BuildRequires: python-imaging
+# pygments dependencies
+BuildRequires: python-nose
+%endif
+
+BuildRequires: gcc gcc-c++ make
+BuildRequires: autoconf automake libtool
+BuildRequires: systemd-units systemd-rpm-macros openssl openssl-devel
+BuildRequires: python3-devel python3-setuptools
+BuildRequires: desktop-file-utils
+BuildRequires: groff-base graphviz
+BuildRequires: unbound-devel
+BuildRequires: systemtap-sdt-devel
+# make check dependencies
+BuildRequires: procps-ng
+%if %{with check_datapath_kernel}
+BuildRequires: nmap-ncat
+# would be useful but not available in RHEL or EPEL
+#BuildRequires: pyftpdlib
+%endif
+
+%if %{with libcapng}
+BuildRequires: libcap-ng libcap-ng-devel
+%endif
+
+%ifarch %{dpdkarches}
+BuildRequires: meson
+%if 0%{?rhel} > 8 || 0%{?fedora}
+BuildRequires: python3-pyelftools
+%endif
+# DPDK driver dependencies
+BuildRequires: zlib-devel numactl-devel libarchive-devel
+# libarchive static dependencies
+BuildRequires: bzip2-devel libacl-devel libxml2-devel libzstd-devel lz4-devel xz-devel
+%ifarch x86_64
+BuildRequires: rdma-core-devel >= 15 libmnl-devel
+%endif
+
+# Required by packaging policy for the bundled DPDK
+Provides: bundled(dpdk) = %{dpdkver}
+%endif
+
+Requires: openssl iproute module-init-tools
+#Upstream kernel commit 4f647e0a3c37b8d5086214128614a136064110c3
+#Requires: kernel >= 3.15.0-0
+Requires: openvswitch-selinux-extra-policy
+
+%{?sysusers_requires_compat}
+Requires(post): /bin/sed
+Requires(post): systemd-units
+Requires(preun): systemd-units
+Requires(postun): systemd-units
+Obsoletes: openvswitch-controller <= 0:2.1.0-1
+
+%if 0%{?rhel}
+# sortedcontainers are not packaged on RHEL yet, but ovs includes it
+%global __requires_exclude ^python%{python3_version}dist\\(sortedcontainers\\)$
+%endif
+
+%description
+Open vSwitch provides standard network bridging functions and
+support for the OpenFlow protocol for remote per-flow control of
+traffic.
+
+%package -n python3-%{pkgname}
+Summary: Open vSwitch python3 bindings
+License: ASL 2.0
+Requires: %{pkgname} = %{?epoch:%{epoch}:}%{version}-%{release}
+Provides: python-%{pkgname} = %{?epoch:%{epoch}:}%{version}-%{release}
+
+%description -n python3-%{pkgname}
+Python bindings for the Open vSwitch database
+
+%package test
+Summary: Open vSwitch testing utilities
+License: ASL 2.0
+BuildArch: noarch
+Requires: python3-%{pkgname} = %{?epoch:%{epoch}:}%{version}-%{release}
+Requires: tcpdump
+
+%description test
+Utilities that are useful to diagnose performance and connectivity
+issues in Open vSwitch setup.
+
+%package devel
+Summary: Open vSwitch OpenFlow development package (library, headers)
+License: ASL 2.0
+Requires: %{pkgname} = %{?epoch:%{epoch}:}%{version}-%{release}
+
+%description devel
+This provides shared library, libopenswitch.so and the openvswitch header
+files needed to build an external application.
+
+%if 0%{?rhel} == 8 || 0%{?fedora} > 28
+%package -n network-scripts-%{name}
+Summary: Open vSwitch legacy network service support
+License: ASL 2.0
+Requires: network-scripts
+Supplements: (%{name} and network-scripts)
+
+%description -n network-scripts-%{name}
+This provides the ifup and ifdown scripts for use with the legacy network
+service.
+%endif
+
+%if %{with ipsec}
+%package ipsec
+Summary: Open vSwitch IPsec tunneling support
+License: ASL 2.0
+Requires: python3-%{pkgname} = %{?epoch:%{epoch}:}%{version}-%{release}
+Requires: libreswan
+
+%description ipsec
+This package provides IPsec tunneling support for OVS tunnels.
+%endif
+
+%prep
+%if 0%{?commit:1}
+%setup -q -n ovs-%{commit} -a 10
+%else
+%setup -q -n ovs-%{version} -a 10
+%endif
+%if ! %{external_sphinx}
+%if 0%{?commit:1}
+%setup -n ovs-%{commit} -q -D -T -a 100 -a 101 -a 102
+%else
+%setup -n ovs-%{version} -q -D -T -a 100 -a 101 -a 102
+%endif
+%endif
+%if 0%{?rhel} && 0%{?rhel} < 9
+%if 0%{?commit:1}
+%setup -n ovs-%{commit} -q -D -T -a 103
+%else
+%setup -n ovs-%{version} -q -D -T -a 103
+%endif
+%endif
+
+mv dpdk-*/ %{dpdkdir}/
+
+%if %{apply_patch}
+%patch0 -p1
+%endif
+
+%build
+%if 0%{?rhel} && 0%{?rhel} < 9
+export PYTHONPATH="${PWD}/pyelftools-%{pyelftoolsver}"
+%endif
+# Build Sphinx on RHEL
+%if ! %{external_sphinx}
+export PYTHONPATH="${PYTHONPATH:+$PYTHONPATH:}%{_builddir}/pytmp/lib/python"
+for x in docutils-%{docutilsver} Pygments-%{pygmentsver} Sphinx-%{sphinxver}; do
+    pushd "$x"
+    python2 setup.py install --home %{_builddir}/pytmp
+    popd
+done
+
+export PATH="$PATH:%{_builddir}/pytmp/bin"
+%endif
+
+./boot.sh
+
+%ifarch %{dpdkarches}    # build dpdk
+# Lets build DPDK first
+cd %{dpdkdir}
+
+ENABLED_DRIVERS=(
+    bus/pci
+    bus/vdev
+    mempool/ring
+    net/failsafe
+    net/i40e
+    net/ring
+    net/vhost
+    net/virtio
+    net/tap
+)
+
+%ifarch x86_64
+ENABLED_DRIVERS+=(
+    baseband/acc
+    bus/auxiliary
+    bus/vmbus
+    common/iavf
+    common/mlx5
+    common/nfp
+    net/bnxt
+    net/enic
+    net/iavf
+    net/ice
+    net/mlx5
+    net/netvsc
+    net/nfp
+    net/qede
+    net/vdev_netvsc
+)
+%endif
+
+%ifarch aarch64 x86_64
+ENABLED_DRIVERS+=(
+    net/e1000
+    net/ixgbe
+)
+%endif
+
+for driver in "${ENABLED_DRIVERS[@]}"; do
+    enable_drivers="${enable_drivers:+$enable_drivers,}"$driver
+done
+
+# If doing any updates, this must be aligned with:
+# https://access.redhat.com/articles/3538141
+ENABLED_LIBS=(
+    bbdev
+    bitratestats
+    bpf
+    cmdline
+    cryptodev
+    dmadev
+    gro
+    gso
+    hash
+    ip_frag
+    latencystats
+    member
+    meter
+    metrics
+    pcapng
+    pdump
+    security
+    stack
+    vhost
+)
+
+for lib in "${ENABLED_LIBS[@]}"; do
+    enable_libs="${enable_libs:+$enable_libs,}"$lib
+done
+
+%set_build_flags
+%__meson --prefix=%{_builddir}/dpdk-build \
+         --buildtype=plain \
+         -Denable_libs="$enable_libs" \
+         -Ddisable_apps="*" \
+         -Denable_drivers="$enable_drivers" \
+         -Dplatform=generic \
+         -Dmax_ethports=1024 \
+         -Dmax_numa_nodes=8 \
+         -Dtests=false \
+         %{_vpath_builddir}
+%meson_build
+%__meson install -C %{_vpath_builddir} --no-rebuild
+
+# FIXME currently with LTO enabled OVS tries to link with both static and shared libraries
+rm -v %{_builddir}/dpdk-build/%{_lib}/*.so*
+
+# Generate a list of supported drivers, its hard to tell otherwise.
+cat << EOF > README.DPDK-PMDS
+DPDK drivers included in this package:
+
+EOF
+
+for f in %{_builddir}/dpdk-build/%{_lib}/librte_net_*.a; do
+    basename ${f} | cut -c12- | cut -d. -f1 | tr [:lower:] [:upper:]
+done >> README.DPDK-PMDS
+
+cat << EOF >> README.DPDK-PMDS
+
+For further information about the drivers, see
+http://dpdk.org/doc/guides-%{dpdksver}/nics/index.html
+EOF
+
+cd -
+%endif    # build dpdk
+
+# And now for OVS...
+mkdir build-shared build-static
+pushd build-shared
+ln -s ../configure
+%configure \
+%if %{with libcapng}
+        --enable-libcapng \
+%else
+        --disable-libcapng \
+%endif
+        --disable-static \
+        --enable-shared \
+        --enable-ssl \
+        --with-pkidir=%{_sharedstatedir}/openvswitch/pki \
+        --enable-usdt-probes \
+        --disable-afxdp \
+        --with-version-suffix=-%{release}
+make %{?_smp_mflags}
+popd
+pushd build-static
+ln -s ../configure
+%ifarch %{dpdkarches}
+PKG_CONFIG_PATH=%{_builddir}/dpdk-build/%{_lib}/pkgconfig \
+%endif
+%configure \
+%if %{with libcapng}
+        --enable-libcapng \
+%else
+        --disable-libcapng \
+%endif
+        --enable-ssl \
+%ifarch %{dpdkarches}
+        --with-dpdk=static \
+%endif
+        --with-pkidir=%{_sharedstatedir}/openvswitch/pki \
+        --enable-usdt-probes \
+        --disable-afxdp \
+        --with-version-suffix=-%{release}
+make %{?_smp_mflags}
+popd
+
+/usr/bin/python3 build-aux/dpdkstrip.py \
+        --dpdk \
+        < rhel/usr_lib_systemd_system_ovs-vswitchd.service.in \
+        > rhel/usr_lib_systemd_system_ovs-vswitchd.service
+
+%install
+rm -rf $RPM_BUILD_ROOT
+make -C build-shared install sbin_PROGRAMS=ovsdb/ovsdb-server DESTDIR=$RPM_BUILD_ROOT
+make -C build-static install bin_PROGRAMS= sbin_PROGRAMS=vswitchd/ovs-vswitchd DESTDIR=$RPM_BUILD_ROOT
+
+install -d -m 0755 $RPM_BUILD_ROOT%{_rundir}/openvswitch
+install -d -m 0750 $RPM_BUILD_ROOT%{_localstatedir}/log/openvswitch
+install -d -m 0755 $RPM_BUILD_ROOT%{_sysconfdir}/openvswitch
+
+install -p -D -m 0644 %{SOURCE2} $RPM_BUILD_ROOT%{_sysusersdir}/openvswitch.conf
+%ifarch %{dpdkarches}
+install -p -D -m 0644 %{SOURCE3} $RPM_BUILD_ROOT%{_sysusersdir}/openvswitch-hugetlbfs.conf
+%endif
+
+install -p -D -m 0644 rhel/usr_lib_udev_rules.d_91-vfio.rules \
+        $RPM_BUILD_ROOT%{_udevrulesdir}/91-vfio.rules
+
+install -p -D -m 0644 \
+        rhel/usr_share_openvswitch_scripts_systemd_sysconfig.template \
+        $RPM_BUILD_ROOT/%{_sysconfdir}/sysconfig/openvswitch
+
+for service in openvswitch ovsdb-server ovs-vswitchd \
+               ovs-delete-transient-ports; do
+        install -p -D -m 0644 \
+                        rhel/usr_lib_systemd_system_${service}.service \
+                        $RPM_BUILD_ROOT%{_unitdir}/${service}.service
+done
+
+%if %{with ipsec}
+install -p -D -m 0644 rhel/usr_lib_systemd_system_openvswitch-ipsec.service \
+                      $RPM_BUILD_ROOT%{_unitdir}/openvswitch-ipsec.service
+%endif
+
+install -m 0755 rhel/etc_init.d_openvswitch \
+        $RPM_BUILD_ROOT%{_datadir}/openvswitch/scripts/openvswitch.init
+
+install -p -D -m 0644 rhel/etc_openvswitch_default.conf \
+        $RPM_BUILD_ROOT/%{_sysconfdir}/openvswitch/default.conf
+
+install -p -D -m 0644 rhel/etc_logrotate.d_openvswitch \
+        $RPM_BUILD_ROOT/%{_sysconfdir}/logrotate.d/openvswitch
+
+install -m 0644 vswitchd/vswitch.ovsschema \
+        $RPM_BUILD_ROOT/%{_datadir}/openvswitch/vswitch.ovsschema
+
+%if 0%{?rhel} < 9
+install -d -m 0755 $RPM_BUILD_ROOT/%{_sysconfdir}/sysconfig/network-scripts/
+install -p -m 0755 rhel/etc_sysconfig_network-scripts_ifdown-ovs \
+        $RPM_BUILD_ROOT/%{_sysconfdir}/sysconfig/network-scripts/ifdown-ovs
+install -p -m 0755 rhel/etc_sysconfig_network-scripts_ifup-ovs \
+        $RPM_BUILD_ROOT/%{_sysconfdir}/sysconfig/network-scripts/ifup-ovs
+%endif
+
+install -d -m 0755 $RPM_BUILD_ROOT%{python3_sitelib}
+cp -a $RPM_BUILD_ROOT/%{_datadir}/openvswitch/python/ovstest \
+        $RPM_BUILD_ROOT%{python3_sitelib}
+
+# Build the JSON C extension for the Python lib (#1417738)
+pushd python
+(
+export CPPFLAGS="-I ../include -I ../build-shared/include"
+export LDFLAGS="%{__global_ldflags} -L $RPM_BUILD_ROOT%{_libdir}"
+%py3_build
+%py3_install
+[ -f "$RPM_BUILD_ROOT/%{python3_sitearch}/ovs/_json$(python3-config --extension-suffix)" ]
+)
+popd
+
+rm -rf $RPM_BUILD_ROOT/%{_datadir}/openvswitch/python/
+
+install -d -m 0755 $RPM_BUILD_ROOT/%{_sharedstatedir}/openvswitch
+
+install -d -m 0755 $RPM_BUILD_ROOT%{_prefix}/lib/firewalld/services/
+
+install -p -D -m 0755 \
+        rhel/usr_share_openvswitch_scripts_ovs-systemd-reload \
+        $RPM_BUILD_ROOT%{_datadir}/openvswitch/scripts/ovs-systemd-reload
+
+touch $RPM_BUILD_ROOT%{_sysconfdir}/openvswitch/conf.db
+# The db needs special permission as IPsec Pre-shared keys are stored in it.
+chmod 0640 $RPM_BUILD_ROOT%{_sysconfdir}/openvswitch/conf.db
+
+touch $RPM_BUILD_ROOT%{_sysconfdir}/openvswitch/system-id.conf
+
+# remove unpackaged files
+rm -f $RPM_BUILD_ROOT/%{_bindir}/ovs-benchmark \
+        $RPM_BUILD_ROOT/%{_bindir}/ovs-docker \
+        $RPM_BUILD_ROOT/%{_bindir}/ovs-parse-backtrace \
+        $RPM_BUILD_ROOT/%{_bindir}/ovs-testcontroller \
+        $RPM_BUILD_ROOT/%{_sbindir}/ovs-vlan-bug-workaround \
+        $RPM_BUILD_ROOT/%{_mandir}/man1/ovs-benchmark.1* \
+        $RPM_BUILD_ROOT/%{_mandir}/man8/ovs-testcontroller.* \
+        $RPM_BUILD_ROOT/%{_mandir}/man8/ovs-vlan-bug-workaround.8*
+
+%if ! %{with ipsec}
+rm -f $RPM_BUILD_ROOT/%{_datadir}/openvswitch/scripts/ovs-monitor-ipsec
+%endif
+
+# remove ovn unpackages files
+rm -f $RPM_BUILD_ROOT%{_bindir}/ovn*
+rm -f $RPM_BUILD_ROOT%{_mandir}/man1/ovn*
+rm -f $RPM_BUILD_ROOT%{_mandir}/man5/ovn*
+rm -f $RPM_BUILD_ROOT%{_mandir}/man7/ovn*
+rm -f $RPM_BUILD_ROOT%{_mandir}/man8/ovn*
+rm -f $RPM_BUILD_ROOT%{_datadir}/openvswitch/ovn*
+rm -f $RPM_BUILD_ROOT%{_datadir}/openvswitch/scripts/ovn*
+rm -f $RPM_BUILD_ROOT%{_includedir}/ovn/*
+
+%check
+%if %{with check}
+    pushd build-static
+    touch resolv.conf
+    export OVS_RESOLV_CONF=$(pwd)/resolv.conf
+    if make check TESTSUITEFLAGS='%{_smp_mflags}' ||
+       make check TESTSUITEFLAGS='--recheck'; then :;
+    else
+        cat tests/testsuite.log
+        exit 1
+    fi
+    popd
+%endif
+%if %{with check_datapath_kernel}
+    pushd build-static
+    if make check-kernel RECHECK=yes; then :;
+    else
+        cat tests/system-kmod-testsuite.log
+        exit 1
+    fi
+    popd
+%endif
+
+%clean
+rm -rf $RPM_BUILD_ROOT
+
+%preun
+%if 0%{?systemd_preun:1}
+    %systemd_preun openvswitch.service
+%else
+    if [ $1 -eq 0 ] ; then
+    # Package removal, not upgrade
+        /bin/systemctl --no-reload disable openvswitch.service >/dev/null 2>&1 || :
+        /bin/systemctl stop openvswitch.service >/dev/null 2>&1 || :
+    fi
+%endif
+
+%pre
+%sysusers_create_compat %{SOURCE2}
+%ifarch %{dpdkarches}
+%sysusers_create_compat %{SOURCE3}
+%endif
+
+%post
+if [ $1 -eq 1 ]; then
+    sed -i 's:^#OVS_USER_ID=:OVS_USER_ID=:' /etc/sysconfig/openvswitch
+
+%ifarch %{dpdkarches}
+    sed -i \
+        's@OVS_USER_ID="openvswitch:openvswitch"@OVS_USER_ID="openvswitch:hugetlbfs"@'\
+        /etc/sysconfig/openvswitch
+%endif
+fi
+chown -R openvswitch:openvswitch /etc/openvswitch
+
+%if 0%{?systemd_post:1}
+    %systemd_post openvswitch.service
+%else
+    # Package install, not upgrade
+    if [ $1 -eq 1 ]; then
+        /bin/systemctl daemon-reload >dev/null || :
+    fi
+%endif
+
+%postun
+%if 0%{?systemd_postun:1}
+    %systemd_postun openvswitch.service
+%else
+    /bin/systemctl daemon-reload >/dev/null 2>&1 || :
+%endif
+
+%triggerun -- openvswitch < 2.5.0-22.git20160727%{?dist}
+# old rpm versions restart the service in postun, but
+# due to systemd some preparation is needed.
+if systemctl is-active openvswitch >/dev/null 2>&1 ; then
+    /usr/share/openvswitch/scripts/ovs-ctl stop >/dev/null 2>&1 || :
+    systemctl daemon-reload >/dev/null 2>&1 || :
+    systemctl stop openvswitch ovsdb-server ovs-vswitchd >/dev/null 2>&1 || :
+    systemctl start openvswitch >/dev/null 2>&1 || :
+fi
+exit 0
+
+%files -n python3-%{pkgname}
+%{python3_sitearch}/ovs
+%{python3_sitearch}/ovs-*.egg-info
+%doc LICENSE
+
+%files test
+%{_bindir}/ovs-pcap
+%{_bindir}/ovs-tcpdump
+%{_bindir}/ovs-tcpundump
+%{_datadir}/openvswitch/scripts/usdt/*
+%{_mandir}/man1/ovs-pcap.1*
+%{_mandir}/man8/ovs-tcpdump.8*
+%{_mandir}/man1/ovs-tcpundump.1*
+%{_bindir}/ovs-test
+%{_bindir}/ovs-vlan-test
+%{_bindir}/ovs-l3ping
+%{_mandir}/man8/ovs-test.8*
+%{_mandir}/man8/ovs-vlan-test.8*
+%{_mandir}/man8/ovs-l3ping.8*
+%{python3_sitelib}/ovstest
+
+%files devel
+%{_libdir}/*.so
+%{_libdir}/pkgconfig/*.pc
+%{_includedir}/openvswitch/*
+%{_includedir}/openflow/*
+%exclude %{_libdir}/*.a
+%exclude %{_libdir}/*.la
+
+%if 0%{?rhel} == 8 || 0%{?fedora} > 28
+%files -n network-scripts-%{name}
+%{_sysconfdir}/sysconfig/network-scripts/ifup-ovs
+%{_sysconfdir}/sysconfig/network-scripts/ifdown-ovs
+%endif
+
+%files
+%defattr(-,openvswitch,openvswitch)
+%dir %{_sysconfdir}/openvswitch
+%{_sysconfdir}/openvswitch/default.conf
+%config %ghost %verify(not owner group md5 size mtime) %{_sysconfdir}/openvswitch/conf.db
+%ghost %attr(0600,-,-) %verify(not owner group md5 size mtime) %{_sysconfdir}/openvswitch/.conf.db.~lock~
+%config %ghost %{_sysconfdir}/openvswitch/system-id.conf
+%defattr(-,root,root)
+%config(noreplace) %verify(not md5 size mtime) %{_sysconfdir}/sysconfig/openvswitch
+%{_sysconfdir}/bash_completion.d/ovs-appctl-bashcomp.bash
+%{_sysconfdir}/bash_completion.d/ovs-vsctl-bashcomp.bash
+%config(noreplace) %{_sysconfdir}/logrotate.d/openvswitch
+%{_unitdir}/openvswitch.service
+%{_unitdir}/ovsdb-server.service
+%{_unitdir}/ovs-vswitchd.service
+%{_unitdir}/ovs-delete-transient-ports.service
+%{_datadir}/openvswitch/scripts/openvswitch.init
+%{_datadir}/openvswitch/scripts/ovs-check-dead-ifs
+%{_datadir}/openvswitch/scripts/ovs-lib
+%{_datadir}/openvswitch/scripts/ovs-save
+%{_datadir}/openvswitch/scripts/ovs-vtep
+%{_datadir}/openvswitch/scripts/ovs-ctl
+%{_datadir}/openvswitch/scripts/ovs-kmod-ctl
+%{_datadir}/openvswitch/scripts/ovs-systemd-reload
+%config %{_datadir}/openvswitch/local-config.ovsschema
+%config %{_datadir}/openvswitch/vswitch.ovsschema
+%config %{_datadir}/openvswitch/vtep.ovsschema
+%{_bindir}/ovs-appctl
+%{_bindir}/ovs-dpctl
+%{_bindir}/ovs-ofctl
+%{_bindir}/ovs-vsctl
+%{_bindir}/ovsdb-client
+%{_bindir}/ovsdb-tool
+%{_bindir}/ovs-pki
+%{_bindir}/vtep-ctl
+%{_libdir}/*.so.*
+%{_sbindir}/ovs-vswitchd
+%{_sbindir}/ovsdb-server
+%{_mandir}/man1/ovsdb-client.1*
+%{_mandir}/man1/ovsdb-server.1*
+%{_mandir}/man1/ovsdb-tool.1*
+%{_mandir}/man5/ovsdb.5*
+%{_mandir}/man5/ovsdb.local-config.5*
+%{_mandir}/man5/ovsdb-server.5.*
+%{_mandir}/man5/ovs-vswitchd.conf.db.5*
+%{_mandir}/man5/vtep.5*
+%{_mandir}/man7/ovsdb-server.7*
+%{_mandir}/man7/ovsdb.7*
+%{_mandir}/man7/ovs-actions.7*
+%{_mandir}/man7/ovs-fields.7*
+%{_mandir}/man8/vtep-ctl.8*
+%{_mandir}/man8/ovs-appctl.8*
+%{_mandir}/man8/ovs-ctl.8*
+%{_mandir}/man8/ovs-dpctl.8*
+%{_mandir}/man8/ovs-kmod-ctl.8.*
+%{_mandir}/man8/ovs-ofctl.8*
+%{_mandir}/man8/ovs-pki.8*
+%{_mandir}/man8/ovs-vsctl.8*
+%{_mandir}/man8/ovs-vswitchd.8*
+%{_mandir}/man8/ovs-parse-backtrace.8*
+%{_udevrulesdir}/91-vfio.rules
+%doc LICENSE NOTICE README.rst NEWS rhel/README.RHEL.rst
+%ifarch %{dpdkarches}
+%doc %{dpdkdir}/README.DPDK-PMDS
+%attr(750,openvswitch,hugetlbfs) %verify(not owner group) /var/log/openvswitch
+%else
+%attr(750,openvswitch,openvswitch) %verify(not owner group) /var/log/openvswitch
+%endif
+/var/lib/openvswitch
+%ghost %attr(755,root,root) %verify(not owner group) %{_rundir}/openvswitch
+%{_datadir}/openvswitch/bugtool-plugins/
+%{_datadir}/openvswitch/scripts/ovs-bugtool-*
+%{_bindir}/ovs-dpctl-top
+%{_sbindir}/ovs-bugtool
+%{_mandir}/man8/ovs-dpctl-top.8*
+%{_mandir}/man8/ovs-bugtool.8*
+%if (0%{?rhel} && 0%{?rhel} <= 7) || (0%{?fedora} && 0%{?fedora} < 29)
+%{_sysconfdir}/sysconfig/network-scripts/ifup-ovs
+%{_sysconfdir}/sysconfig/network-scripts/ifdown-ovs
+%endif
+%{_sysusersdir}/openvswitch.conf
+%ifarch %{dpdkarches}
+%{_sysusersdir}/openvswitch-hugetlbfs.conf
+%endif
+
+%if %{with ipsec}
+%files ipsec
+%{_datadir}/openvswitch/scripts/ovs-monitor-ipsec
+%{_unitdir}/openvswitch-ipsec.service
+%endif
+
+%changelog
+* Thu Oct 31 2024 Open vSwitch CI <ovs-ci@redhat.com> - 3.3.0-10
+- Merging upstream branch-3.3 [RH git: abdbdd9326]
+    Commit list:
+    f1b70cb48d meta-flow: Fix nw_frag mask while parsing from string.
+    1fbb050141 ci: Remove dependency on libpcap.
+    a9db0f3177 github: Remove ASLR entropy workaround.
+    4f7c8c2592 bond: Always revalidate unbalanced bonds when active member changes. (FDP-845)
+    a9e40e3227 ofproto-dpif-upcall: Fix redundant mirror on metadata modification. (FDP-699)
+    5d6842f424 dpdk: Use DPDK 23.11.2 release for OVS 3.3.
+    618944a79f ofproto-dpif: Improve load balancing in dp_hash select groups. (FDP-826)
+    5802dd8ba7 Revert "ci: Use sarif-tools v3.0.1 due to issues in earlier versions."
+    3974c52339 ci: Use sarif-tools v3.0.1 due to issues in earlier versions.
+    c4a5a2f7ee AUTHORS: Add Jun Wang.
+    034145ea5e netdev-dpdk: Disable outer udp checksum offload for txgbe driver.
+    5f5c3d9229 selinux: Update policy file.
+    2d14266cb9 ofproto-dpif-mirror: Always revalidate on mirror update. (FDP-788)
+    049d00f683 github: Skip FTP SNAT orig tuple tests due to broken Ubuntu kernel.
+    bb49e027c1 vconn: Always properly free flow stats reply.
+    a0e4171bc9 mcast-snooping: Properly check group_get_lru return code.
+    58ff239470 ovsdb-idl: Fix IDL memory leak.
+    5984640f02 ofproto/bond: Preserve active bond member over restarts.
+    c64206c6e0 ofproto-dpif-upcall: Avoid stale ukeys leaks.
+    823b3dd823 ci: Use previous sarif-tools release due to issue in latest release.
+    a6b49a6a28 userspace: Correctly set ip offload flag in native tunneling.
+    09cfc0ba9e Prepare for 3.3.3.
+    dfe601bbc1 Set release date for 3.3.2.
+    a097f4a39b docs: Fix argument formatting in ovs-appctl(8) man page.
+    1e6c691699 dp-packet: Correct IPv4 checksum calculation. ()
+    c1c70e0838 netdev-linux: Fix unaligned access to rpl_rtnl_link_stats64.
+    b176cea4f4 route-table: Fix another UBsan warning about pointer type.
+    9fac5b970e netdev-linux: Do not offload IP checksum.
+    73c563efc7 tunnel, tests: Add test for mirroring over tunnels.
+    81e64f47d7 ofproto-dpif-xlate: Add a recursion limit to tunnel address lookup.
+
+
+* Thu Oct 31 2024 Open vSwitch CI <ovs-ci@redhat.com> - 3.3.0-9
+- Merging dpdk subtree [RH git: 6990948aec]
+    Commit list:
+    e8eb14e00d version: 23.11.2
+    8401a3e84b version: 23.11.2-rc2
+    50e50f1d99 net/ice/base: fix preparing PHY for timesync command
+    7302cab07c net/nfp: fix firmware abnormal cleanup
+    92c5aa4387 net/nfp: forbid offload flow rules with empty action list
+    e9c4dbd5be crypto/openssl: make per-QP auth context clones
+    729e0848b7 examples: fix port ID restriction
+    3fc9eb2f4f examples: fix lcore ID restriction
+    776c4e37ee doc: add baseline mode in l3fwd-power guide
+    8437250f9f doc: fix DMA performance test invocation
+    d01561713d doc: describe mlx5 HWS actions order
+    1ef3097094 doc: add power uncore in API index
+    09ccd86606 doc: fix mbuf flags
+    613a4879b4 examples/ipsec-secgw: revert SA salt endianness
+    8b87ae54ed doc: remove reference to mbuf pkt field
+    938afb0ab2 examples: fix queue ID restriction
+    80da81b6f9 net/ice/base: fix temporary failures reading NVM
+    034f533709 net/hns3: fix uninitialized variable in FEC query
+    5fa2084ac3 examples/l3fwd: fix crash on multiple sockets
+    1708229729 examples/l3fwd: fix crash in ACL mode for mixed traffic
+    bef8327055 bus/vdev: fix device reinitialization
+    971d455e59 malloc: fix multi-process wait condition handling
+    b53fb811c0 power: fix number of uncore frequencies
+    52bf7488c3 app/pdump: handle SIGTERM and SIGHUP
+    6bed8020a3 app/dumpcap: handle SIGTERM and SIGHUP
+    eacf416207 dma/hisilicon: remove support for HIP09 platform
+    a5375f4492 bus/pci: fix FD in secondary process
+    c076f02992 bus/pci: fix UIO resource mapping in secondary process
+    ca12727f09 app/testpmd: fix build on signed comparison
+    b858eb7a55 net/gve: fix Tx queue state on queue start
+    7ff9eeeb71 ethdev: fix device init without socket-local memory
+    890b02b907 app/testpmd: add postpone option to async flow destroy
+    fbd7ac3b83 net/netvsc: use ethdev API to set VF MTU
+    e8746659b2 ethdev: fix GENEVE option item conversion
+    e2b3b0a5d4 net/ark: fix index arithmetic
+    7deaec6ac8 net/hns3: check Rx DMA address alignmnent
+    6bf4a626e4 net/mlx5: fix disabling E-Switch default flow rules
+    3b60ef3db6 common/mlx5: remove unneeded field when modify RQ table
+    1988c32194 net/mlx5: fix uplink port probing in bonding mode
+    07ad92c1a6 net/mlx5: fix end condition of reading xstats
+    b7c9a02306 net/mlx5/hws: remove unused variable
+    1163643a1e net/mlx5/hws: fix port ID on root item convert
+    4a80ab31f0 net/mlx5/hws: fix deletion of action vport
+    dcd02c715e net/mlx5/hws: fix check of range templates
+    3c9aff8fbf net/mlx5/hws: fix memory leak in modify header
+    85eeb293b3 net/mlx5: fix MTU configuration
+    c1792007ff net/mlx5: fix Arm build with GCC 9.1
+    a8331ab8b2 net/mlx5: fix shared Rx queue data access race
+    15e2b0e736 net/ice: fix return value for raw pattern parsing
+    17e800edd1 net/ice: fix memory leaks in raw pattern parsing
+    8f23521ad7 common/cnxk: fix integer overflow
+    a408bd0bcb crypto/qat: fix placement of OOP offset
+    b2acb5218f test/crypto: fix modex comparison
+    d308cefc96 test/crypto: fix asymmetric capability test
+    adffcf4383 test/crypto: remove unused stats in setup
+    4b0d806bab doc: fix typo in l2fwd-crypto guide
+    0e19bfa703 crypto/qat: fix log message typo
+    5d66d4a3f0 test/crypto: fix allocation comment
+    9fef0db81b crypto/ipsec_mb: fix function comment
+    c6111cb5fd crypto/qat: fix GEN4 write
+    f707532cde net/nfp: fix disabling 32-bit build
+    de0c58c4b8 doc: update AF_XDP device plugin repository
+    811fcdf23a net/nfp: adapt reverse sequence card
+    ea7085704a net/nfp: remove unneeded logic for VLAN layer
+    4c45c694ca doc: update metadata description in nfp guide
+    174b2b5a9a net/nfp: fix getting firmware version
+    241976029a net/nfp: remove redundant function call
+    c90d304f0e net/gve: fix RSS hash endianness in DQO format
+    316baf3f0b net/ena: fix checksum handling
+    ffba3914ad net/ena: fix return value check
+    db4ca6f1cb net/ena: fix bad checksum handling
+    5453c7a0b0 net/nfp: fix repeat disable port
+    51d6936232 net/nfp: fix dereference of null pointer
+    d0d759188e net/nfp: disable ctrl VNIC queues on close
+    c22c079e1c net/ionic: fix mbuf double-free when emptying array
+    2e84f93745 net/nfp: fix flow mask table entry
+    8df473a653 net/nfp: fix allocation of switch domain
+    2a50559bfd net/netvsc: fix MTU set
+    e2ef427581 net/nfp: fix IPv6 TTL and DSCP flow action
+    3762e1ed03 net/vmxnet3: fix init logs
+    fdc5f6074b net/txgbe: fix Rx interrupt
+    2dfb54ff33 net/ngbe: fix memory leaks
+    638e12515a net/txgbe: fix memory leaks
+    d0a84e43b3 net/ngbe: fix MTU range
+    0fe77d1de3 net/txgbe: fix MTU range
+    db50b74b3c net/ngbe: fix hotplug remove
+    5de5204582 net/txgbe: fix hotplug remove
+    6d224307f6 net/ngbe: keep PHY power down while device probing
+    658060fffe net/ngbe: add special config for YT8531SH-CA PHY
+    85bd339e19 net/txgbe: fix VF promiscuous and allmulticast
+    ea62ead19d net/txgbe: reconfigure more MAC Rx registers
+    55d8be2055 net/txgbe: restrict configuration of VLAN strip offload
+    ed2250e120 net/txgbe: fix Tx hang on queue disable
+    6ea637699a net/txgbe: fix flow filters in VT mode
+    842a7baf9c net/txgbe: fix tunnel packet parsing
+    708d5a261b net/mana: fix uninitialized return value
+    e113512712 app/testpmd: fix parsing for connection tracking item
+    11b6493c45 doc: remove empty section from testpmd guide
+    f11711212c app/testpmd: handle IEEE1588 init failure
+    e7f8c62dfc net/cpfl: fix 32-bit build
+    ec9de9db2d net/cpfl: add checks on control queue messages
+    39b2b4c7de common/idpf: fix PTP message validation
+    e0f453462f common/idpf: fix flex descriptor mask
+    bd5b88d172 net/ice/base: fix masking when reading context
+    67a40ce4ef net/ice/base: fix board type definition
+    5167b4d2d3 net/ice/base: fix potential TLV length overflow
+    abd055ea63 net/ice/base: fix check for existing switch rule
+    fddfbdbf49 net/ice/base: fix return type of bitmap hamming weight
+    c9eae16d5e net/ice/base: fix GCS descriptor field offsets
+    8bc9ae6b59 net/ice/base: fix size when allocating children arrays
+    1257bf9a7c net/ice/base: fix sign extension
+    2458257e56 net/ice/base: fix resource leak
+    7a6c0e6212 net/ice/base: fix memory leak in firmware version check
+    87d7cf4082 net/ice/base: fix pointer to variable outside scope
+    aafeb830bc buildtools: fix build with clang 17 and ASan
+    a4e8a4f488 fbarray: fix finding for unaligned length
+    d88beb497f net/mlx5: fix start without duplicate flow patterns
+    77231b2598 net/dpaa: forbid MTU configuration for shared interface
+    d21248db89 bus/dpaa: remove redundant file descriptor check
+    bb85c1fd72 common/dpaax: fix node array overrun
+    90c9f938e5 common/dpaax: fix IOVA table cleanup
+    0b4bc3a5d1 bus/dpaa: fix memory leak in bus scan
+    d36efdb2cd bus/dpaa: fix bus scan for DMA devices
+    daa0d9edd1 app/testpmd: fix help string of BPF load command
+    7353cb767f dma/idxd: fix setup with Ubuntu 24.04
+    f563086258 eal/linux: lower log level on allocation attempt failure
+    8ccf607fad devtools: fix symbol listing
+    997166395e fbarray: fix lookbehind ignore mask handling
+    8baf379032 fbarray: fix lookahead ignore mask handling
+    24869bf93c fbarray: fix incorrect lookbehind behavior
+    5e66590575 fbarray: fix incorrect lookahead behavior
+    427fa07238 examples/ipsec-secgw: fix SA salt endianness
+    d0d02993de crypto/dpaa2_sec: fix event queue user context
+    6e2def6ca9 crypto/dpaa_sec: fix IPsec descriptor
+    b977583692 common/dpaax/caamflib: fix PDCP AES-AES watchdog error
+    5089ef6c28 common/dpaax/caamflib: fix PDCP-SDAP watchdog error
+    4af94ab6e2 crypto/openssl: set cipher padding once
+    4f8c97e941 crypto/openssl: make per-QP cipher context clones
+    ee88b9496c crypto/openssl: optimize 3DES-CTR context init
+    eb6a1a85e6 crypto/openssl: fix GCM and CCM thread unsafe contexts
+    cc8ca588a0 examples/fips_validation: fix dereference and out-of-bound
+    9b3e235581 cryptodev: validate crypto callbacks from next node
+    578ee20720 cryptodev: fix build without crypto callbacks
+    fbb350108f crypto/cnxk: fix minimal input normalization
+    7978b75d1b test/crypto: validate modex from first non-zero
+    ede34a4359 app/crypto-perf: fix result for asymmetric
+    7469762567 app/crypto-perf: remove redundant local variable
+    e585a0db98 crypto/cnxk: fix ECDH public key verification
+    6034788bd6 crypto/cnxk: fix out-of-bound access
+    ea90bc49fc net/virtio-user: fix control queue allocation for non-vDPA
+    15d3dfa07a baseband/la12xx: forbid secondary process
+    f798848548 telemetry: fix connection parameter parsing
+    713520f91d bpf: fix load hangs with six IPv6 addresses
+    59523f029e bpf: fix MOV instruction evaluation
+    c9071e44b7 mbuf: fix dynamic fields copy
+    c13a819a44 graph: fix mcore dispatch walk
+    777f0bc1a5 vdpa/sfc: remove dead code
+    583796e298 dmadev: fix structure alignment
+    d859544e45 common/cnxk: fix flow aging on application exit
+    c343cb088f app/bbdev: fix interrupt tests
+    0c99a3d922 app/bbdev: fix MLD output size computation
+    179f1c6e6b app/bbdev: fix TB logic
+    acdd88c4f9 build: use builtin helper for python dependencies
+    edfa6a87c8 config: fix warning for cross build with meson >= 1.3.0
+    151a54d0b6 v23.11.2-rc1
+    61b7d1f4c1 doc: fix link to hugepage mapping from Linux guide
+    0e68080faf telemetry: lower log level on socket error
+    4fe42b5bd5 test/crypto: fix enqueue/dequeue callback case
+    4dc08a4d14 test/crypto: fix RSA cases in QAT suite
+    f1e088abb9 net/mlx5/hws: fix matcher reconnect
+    db0c8afc11 net/mlx5: fix crash on counter pool destroy
+    59e27c048f net/mlx5: support jump in meter hierarchy
+    15d0fcf1ac net/mlx5: fix access to flow template operations
+    7cc4f4359e net/mlx5: break flow resource release loop
+    8f7a4c4861 net/mlx5: fix flow template indirect action failure
+    0caa8332a4 net/mlx5: fix hash Rx queue release in flow sample
+    5546ccbefe net/mlx5: fix indexed pool with invalid index
+    c12bd3ffbf net/mlx5/hws: fix action template dump
+    096734a9b5 net/mlx5/hws: set default miss when replacing table
+    df8e365511 net/mlx5/hws: extend tag saving for match and jumbo
+    9b84b09d4a net/mlx5/hws: add template match none flag
+    8b2eb11323 net/mlx5/hws: fix spinlock release on context open
+    3811ef8d25 net/mlx5/hws: fix function comment
+    21c0e76d5a common/mlx5: fix PRM structs
+    aec70880d8 net/mlx5/hws: decrease log level for creation failure
+    b4d5b769a9 common/mlx5: fix unsigned/signed mismatch
+    5405ea2f7a hash: fix RCU reclamation size
+    2f62695370 bpf: disable on 32-bit x86
+    61c4175079 graph: fix stats retrieval while destroying a graph
+    e022af0b88 graph: fix ID collisions
+    4541f5810c net/cnxk: fix promiscuous state after MAC change
+    144a806a1b net/cnxk: fix outbound security with higher packet burst
+    01d4a05a9f net/cnxk: update SA userdata and keep original cookie
+    802a3a7d74 net/cnxk: fix extbuf handling for multisegment packet
+    67a8e5ba52 common/cnxk: fix segregation of logs based on module
+    c99b186412 common/cnxk: fix flow aging cleanup
+    af85590165 net/cnxk: fix RSS config
+    1e2d3032e2 net/ixgbe/base: fix PHY ID for X550
+    b371343ddc net/ixgbe/base: fix 5G link speed reported on VF
+    3d128f41b7 net/ixgbe/base: revert advertising for X550 2.5G/5G
+    7ec2441a3e net/e1000/base: fix link power down
+    4c8436297f net/ixgbe: do not create delayed interrupt handler twice
+    f683115cef net/ixgbe: do not update link status in secondary process
+    0fc2747f6c net/ice: fix VLAN stripping in double VLAN mode
+    729144bdae net/fm10k: fix cleanup during init failure
+    bb9096e474 net/iavf: fix VF reset when using DCF
+    b6e445d0d4 eventdev/crypto: fix opaque field handling
+    32c7c20981 event/sw: fix warning from useless snprintf
+    614773e8c7 baseband/acc: fix memory barrier
+    ac1bd05172 net/virtio: fix MAC table update
+    eb821e0ed1 net/virtio-user: fix control queue allocation
+    6e4de6f224 net/virtio-user: fix shadow control queue notification init
+    1d824e440e net/virtio-user: fix control queue destruction
+    2fdb8840ee vhost: cleanup resubmit info before inflight setup
+    8c020a6f4d vhost: fix build with GCC 13
+    1af612de7e hash: check name when creating a hash
+    9616fce23b hash: fix return code description in Doxygen
+    44bcfd6b38 net/nfp: fix xstats for multi PF firmware
+    8bf40f1d11 app/testpmd: fix lcore ID restriction
+    80c5c9789b net/iavf: remove outer UDP checksum offload for X710 VF
+    1970a0ca45 net/i40e: fix outer UDP checksum offload for X710
+    e8c2cccfbd net: fix outer UDP checksum in Intel prepare helper
+    dda814c495 app/testpmd: fix outer IP checksum offload
+    4d57f72a5b net/ice: fix check for outer UDP checksum offload
+    c61b23292e net/axgbe: fix linkup in PHY status
+    b7eddfc563 net/axgbe: delay AN timeout during KR training
+    388f022054 net/axgbe: fix Tx flow on 30H HW
+    e3632f6bbb net/axgbe: check only minimum speed for cables
+    141a4ff6d5 net/axgbe: fix connection for SFP+ active cables
+    4eda15db34 net/axgbe: fix SFP codes check for DAC cables
+    d72913dcad net/axgbe: enable PLL control for fixed PHY modes only
+    3cf40bf1c3 net/axgbe: disable RRC for yellow carp devices
+    ec06a8c3d4 net/axgbe: disable interrupts during device removal
+    a2be089e35 net/axgbe: update DMA coherency values
+    17290bc90b net/axgbe: fix fluctuations for 1G Bel Fuse SFP
+    a61b3c008a net/axgbe: reset link when link never comes back
+    498a5720e3 net/axgbe: fix MDIO access for non-zero ports and CL45 PHYs
+    dea5481a8f net/tap: fix file descriptor check in isolated flow
+    c22f99f86c net/nfp: fix configuration BAR
+    ab2e5cf865 net/nfp: fix resource leak in secondary process
+    442ca8b2ec net/af_xdp: remove unused local statistic
+    b0a4771394 net/af_xdp: fix stats reset
+    fdda0d4d83 net/af_xdp: count mbuf allocation failures
+    c6891273d3 net/af_xdp: fix port ID in Rx mbuf
+    60f2e572ea doc: fix testpmd ring size command
+    bd69f5a43d net/af_packet: align Rx/Tx structs to cache line
+    e9da7f4655 net/vmxnet3: add missing register command
+    e754c1c6d8 ethdev: fix strict aliasing in link up
+    b1be619a77 net/af_xdp: fix multi-interface support for k8s
+    3bbccce3a9 doc: fix AF_XDP device plugin howto
+    02d2453afc net/hns3: disable SCTP verification tag for RSS hash input
+    6e37e43fe3 net/hns3: fix variable overflow
+    16a24e9f99 net/hns3: fix double free for Rx/Tx queue
+    78e4da4546 net/hns3: fix Rx timestamp flag
+    c491084749 net/hns3: fix offload flag of IEEE 1588
+    beda536606 app/testpmd: fix indirect action flush
+    dae924d0a2 net/bonding: fix failover time of LACP with mode 4
+    453e0c281b net/nfp: fix representor port queue release
+    83149f4fea latencystats: fix literal float suffix
+    50b99c8b12 eal/windows: install sched.h file
+    11af26df38 net/virtio-user: add memcpy check
+    eb02060534 pcapng: add memcpy check
+    2dd9223248 eal/unix: support ZSTD compression for firmware
+    09e70301ee eal: fix type in destructor macro for MSVC
+    338632b663 bus/pci: fix build with musl 1.2.4 / Alpine 3.19
+    a6ec5765cf version: 23.11.1
+    51783c9b60 version: 23.11.1-rc2
+    152600d10e net/mlx5/hws: fix tunnel protocol checks
+    67f3179f5c net/mlx5: fix rollback on failed flow configure
+    750d393405 net/mlx5: fix async flow create error handling
+    41c5baeffc net/mlx5/hws: fix port ID for root table
+    cfa8a4cb90 net/ena/base: fix metrics excessive memory consumption
+    a20a3c1129 dts: strip whitespaces from stdout and stderr
+    abc6816134 examples/ipsec-secgw: fix typo in error message
+    cf631810bf test/cfgfile: fix typo in error messages
+    f48e923b46 test/power: fix typo in error message
+    ed3a625fe6 doc: fix typo in packet framework guide
+    e85092f875 doc: fix typo in profiling guide
+    df1119d4a9 net/mlx5: fix sync flow meter action
+    aeebcd33c0 net/mlx5/hws: fix memory access in L3 decapsulation
+    0291a1f49e net/igc: fix timesync disable
+    07fde8240d net/vmxnet3: ignore Rx queue interrupt setup on FreeBSD
+    d2d309e5cf net/ena: fix mbuf double free in fast free mode
+    f7d1b5cff3 app/testpmd: fix auto-completion for indirect action list
+    4aa1a64204 net/nfp: fix uninitialized variable
+    a5ac9baa7a doc: fix default IP fragments maximum in programmer guide
+    f7909e3c75 examples/ipsec-secgw: fix Rx queue ID in Rx callback
+    0a44e64c41 net/bnxt: fix number of Tx queues being created
+    5a8ca987e9 net/mlx5: fix warning about copy length
+    2864fd3102 net/mlx5: fix drop action release timing
+    9aba4dee4d net/mlx5: fix age position in hairpin split
+    fc12ccc047 net/mlx5: prevent ioctl failure log flooding
+    8117b4b2f7 net/mlx5: fix flow configure validation
+    b1749f6ed2 net/mlx5: fix template clean up of FDB control flow rule
+    3735e8e88c net/mlx5/hws: fix direct index insert on depend WQE
+    859bafedf3 net/mlx5: fix DR context release ordering
+    9a9f0acac6 net/mlx5: fix IP-in-IP tunnels recognition
+    c551015ebb net/mlx5: remove duplication of L3 flow item validation
+    a8b06881d9 net/mlx5: fix meter policy priority
+    78d38b5d67 net/mlx5: fix VLAN ID in flow modify
+    f01fd28181 doc: update link to Windows DevX in mlx5 guide
+    af41defcf7 net/mlx5: fix non-masked indirect list meter translation
+    1994df02c9 net/mlx5: fix indirect action async job initialization
+    7192f0ed82 net/mlx5: fix sync meter processing in HWS
+    50eb03f8d3 net/mlx5: fix HWS meter actions availability
+    fe697bbce3 net/hns3: support new device
+    97089aa02e app/testpmd: fix error message for invalid option
+    92c08367ea app/testpmd: fix burst option parsing
+    6c2174ad80 app/testpmd: fix --stats-period option check
+    0884b3bd36 net/nfp: fix initialization failure flow
+    dd48153b15 net/nfp: fix switch domain free check
+    aa850bad00 net/ena/base: restructure interrupt handling
+    e1abac3de0 net/ena/base: limit exponential backoff
+    2fa8497bd3 net/ena: fix fast mbuf free
+    5f75adca7e net/nfp: fix IPsec data endianness
+    bec3117648 net/nfp: fix getting firmware VNIC version
+    5853ebb3b9 doc: add link speeds configuration in features table
+    15952c71eb app/testpmd: fix async indirect action list creation
+    166c5df810 doc: add traffic manager in features table
+    cadb90f711 net/hns3: enable PFC for all user priorities
+    72d3dfa9de crypto/qat: fix crash with CCM null AAD pointer
+    90d0e13d7d examples/ipsec-secgw: fix cryptodev to SA mapping
+    9796ac2ab8 build: pass cflags in subproject
+    7105c8a299 net/virtio: fix vDPA device init advertising control queue
+    587143897e examples/l3fwd: fix Rx queue configuration
+    2f8836901c dts: fix smoke tests driver regex
+    ce95b8c9cd examples/l3fwd: fix Rx over not ready port
+    10296d5f50 examples/packet_ordering: fix Rx with reorder mode disabled
+    e8dccbca30 test: do not count skipped tests as executed
+    5c5df0f292 test: assume C source files are UTF-8 encoded
+    de3976eb27 test/mbuf: fix external mbuf case with assert enabled
+    ced51dd5ef config: fix CPU instruction set for cross-build
+    6148604a43 bus/vdev: fix devargs in secondary process
+    ef4c8a57f3 test: fix probing in secondary process
+    272feb8eb9 net/mlx5: remove device status check in flow creation
+    a10a65c396 net/mlx5: fix flow action template expansion
+    0c31d1220f net/mlx5: fix counters map in bonding mode
+    091234f3cb net/mlx5: fix flow counter cache starvation
+    b90c42e4ff net/mlx5: fix parameters verification in HWS table create
+    0198b11a11 net/mlx5: fix VLAN handling in meter split
+    86c66608c2 net/mlx5/hws: enable multiple integrity items
+    ca1084cd48 net/mlx5: fix HWS registers initialization
+    527857d5c2 net/mlx5: fix connection tracking action validation
+    1d65510ff6 net/mlx5: fix conntrack action handle representation
+    a5d0545e5d net/mlx5: fix condition of LACP miss flow
+    17f644b4a8 net/mlx5/hws: fix VLAN inner type
+    99be466799 net/mlx5: prevent querying aged flows on uninit port
+    bfa6cbba4c net/mlx5: fix error packets drop in regular Rx
+    213cb88068 net/mlx5: fix use after free when releasing Tx queues
+    a06ab8044a net/mlx5/hws: fix VLAN item in non-relaxed mode
+    21d51e8848 net/mlx5/hws: check not supported fields in VXLAN
+    b80ca5960e net/mlx5/hws: skip item when inserting rules by index
+    2368f82fd8 doc: fix aging poll frequency option in cnxk guide
+    630dbc8a92 net/cnxk: improve Tx performance for SW mbuf free
+    37256aa1bf common/cnxk: fix possible out-of-bounds access
+    9172348240 common/cnxk: remove dead code
+    9cb9b9c8a0 common/cnxk: fix link config for SDP
+    6f05d2d461 net/cnxk: fix mbuf fields in multi-segment Tx
+    a6bd2f39c1 common/cnxk: fix mbox struct attributes
+    e5450b2bba net/cnxk: add cookies check for multi-segment offload
+    0e5159a223 net/cnxk: fix indirect mbuf handling in Tx
+    6c6cd1fe53 common/cnxk: fix RSS RETA configuration
+    f4c83ba01c net/cnxk: fix MTU limit
+    3e73021b35 common/cnxk: fix Tx MTU configuration
+    e71ac13a38 net/cnxk: fix buffer size configuration
+    fbfaa5ae04 common/cnxk: remove CN9K inline IPsec FP opcodes
+    b3ef799286 net/bnx2x: fix warnings about memcpy lengths
+    2d11f389b0 net/cnxk: fix Rx packet format check condition
+    8bc81d5447 common/cnxk: fix inline device pointer check
+    dbdcd8bb85 net/ice: remove incorrect 16B descriptor read block
+    72093d3d41 net/iavf: remove incorrect 16B descriptor read block
+    542c8410cb net/i40e: remove incorrect 16B descriptor read block
+    33b5bed057 net/ixgbe: increase VF reset timeout
+    eefc0111de net/iavf: remove error logs for VLAN offloading
+    2aa5a75750 net/ixgbevf: fix RSS init for x550 NICs
+    a71de447a2 net/bnxt: fix null pointer dereference
+    1d5bfd9fdf net/tap: fix traffic control handle calculation
+    e9462a5690 net/tap: do not overwrite flow API errors
+    4a1ffc9b02 app/testpmd: fix async flow create failure handling
+    92ab2d6da2 app/testpmd: return if no packets in GRO heavy weight mode
+    61ce57b13a net/mlx5: fix modify flex item
+    4d1331e972 app/testpmd: fix flow modify tag typo
+    c2d52df599 net/af_xdp: fix leak on XSK configuration failure
+    b2dba501cf vhost: fix VDUSE device destruction failure
+    af414b892d common/qat: fix legacy flag
+    6cacd0e502 doc: fix typos in cryptodev overview
+    14c38e2db1 app/crypto-perf: add missing op resubmission
+    a1f1843146 app/crypto-perf: fix out-of-place mbuf size
+    f0cfffc636 app/crypto-perf: fix copy segment size
+    b2cd908926 eventdev/crypto: fix enqueueing
+    e5ed464710 eventdev: fix Doxygen processing of vector struct
+    2faf71417f eventdev: improve Doxygen comments on configure struct
+    7721c9f498 test/event: fix crash in Tx adapter freeing
+    524c60f422 event/dlb2: remove superfluous memcpy
+    4e8d39a298 doc: fix configuration in baseband 5GNR driver guide
+    b0b971bf66 23.11.1-rc1
+    05bea47b81 app/testpmd: fix GRO packets flush on timeout
+    cc670c7833 net/nfp: fix NFDk metadata process
+    8e79562a0e net/nfp: fix NFD3 metadata process
+    1f3f996269 net/mlx5: fix stats query crash in secondary process
+    5982bea06b net/mlx5: fix GENEVE option item translation
+    22653f6966 net/mlx5: remove GENEVE options length limitation
+    06c494555f common/mlx5: fix query sample info capability
+    10061b4047 common/mlx5: fix duplicate read of general capabilities
+    1825629903 net/mlx5: fix GENEVE TLV option management
+    c3eb862979 net/mlx5/hws: fix ESP flow matching validation
+    d25716a8a0 net/mlx5: fix flow tag modification
+    7c8f2e719a net/mlx5: fix jump action validation
+    01c5db8d99 net/cnxk: fix aged flow query
+    ecdb679c52 common/cnxk: fix VLAN check for inner header
+    4f69dab88c common/cnxk: fix mbox region copy
+    0e5798d30b net/thunderx: fix DMAC control register update
+    874fd28866 net/cnxk: fix flow RSS configuration
+    f047cea926 ml/cnxk: fix xstats calculation
+    a77f545bd7 net/bnxt: fix deadlock in ULP timer callback
+    d9e1762f07 net/bnxt: modify locking for representor Tx
+    c26cb2a644 net/bnxt: fix backward firmware compatibility
+    1fb50b8baa net/bnxt: fix speed change from 200G to 25G on Thor
+    e1f8152ede net/bnxt: fix 50G and 100G forced speed
+    3fa018b15a net/bnxt: fix array overflow
+    c3ccbda492 net/netvsc: fix VLAN metadata parsing
+    de2d362411 net: add macros for VLAN metadata parsing
+    561a3f508f net/gve: fix DQO for chained descriptors
+    de543e342a net/softnic: fix include of log library
+    edaeda9ef7 net/memif: fix extra mbuf refcnt update in zero copy Tx
+    c7b50f40e1 common/sfc_efx/base: use C11 static assert
+    216918c28c net/mana: handle MR cache expansion failure
+    6679de7a8f net/mana: fix memory leak on MR allocation
+    3fb4840708 net/bonding: fix flow count query
+    1ce60b941d net/ionic: fix device close
+    9583f634f3 net/ionic: fix RSS query
+    2ea5bde557 net/ionic: fix missing volatile type for cqe pointers
+    49b4ce1f94 app/testpmd: fix crash in multi-process forwarding
+    db4ba50b3a drivers/net: fix buffer overflow for packet types list
+    7de2520f2d net/mana: prevent values overflow returned from RDMA layer
+    84e9d93f57 net/nfp: free switch domain ID on close
+    a581442d9b net/nfp: fix device resource freeing
+    52bd57a03b net/nfp: fix device close
+    c65a2bfc26 net/vmxnet3: fix initialization on FreeBSD
+    edc0e91ffc app/testpmd: hide --bitrate-stats in help if disabled
+    ec2260423e doc: add --latencystats option in testpmd guide
+    e670d64d34 net/hns3: remove QinQ insert support for VF
+    48fe88cb3c net/nfp: fix Rx descriptor
+    e024c471f9 net/nfp: fix Rx memory leak
+    cb1cef89c4 net/hns3: fix reset level comparison
+    a3584fcde6 net/hns3: fix disable command with firmware
+    48d9241bbd net/hns3: fix VF multiple count on one reset
+    8abf8591dc net/hns3: refactor handle mailbox function
+    1be4ad59be net/hns3: refactor send mailbox function
+    f876981e54 net/hns3: refactor PF mailbox message struct
+    c1c62366ed net/hns3: refactor VF mailbox message struct
+    9cf299a873 net/memif: fix crash with Tx burst larger than 255
+    01809245ba net/af_xdp: fix memzone leak on config failure
+    c2a5c0d085 net/nfp: fix resource leak for VF
+    ddeb9d64a9 net/nfp: fix resource leak for exit of flower firmware
+    e65a677895 net/nfp: fix resource leak for exit of CoreNIC firmware
+    09e1df883a net/nfp: fix resource leak for flower firmware
+    02916557c1 net/nfp: fix resource leak for PF initialization
+    1d53d5495b net/nfp: fix resource leak for CoreNIC firmware
+    f2ee31d52c net/nfp: fix resource leak for device initialization
+    8610d2715d ethdev: fix NVGRE encap flow action description
+    d06a344524 doc: fix commands in eventdev test tool guide
+    9350513462 test/event: skip test if no driver is present
+    6ccd84cf16 event/cnxk: fix dequeue timeout configuration
+    b7fd1f73fe app/crypto-perf: fix encrypt operation verification
+    04d9dfd665 app/crypto-perf: fix data comparison
+    dfc9d45365 app/crypto-perf: fix next segment mbuf
+    8988726643 crypto/cnxk: fix CN9K ECDH public key verification
+    ea096d3e48 common/cnxk: fix memory leak in CPT init
+    f5d6c54154 examples/ipsec-secgw: fix width of variables
+    96d48b5b40 cryptodev: remove unused extern variable
+    e951bbbd18 vhost: fix memory leak in Virtio Tx split path
+    19f0cf0927 vdpa/mlx5: fix queue enable drain CQ
+    5eb1dd92dc vhost: fix deadlock during vDPA SW live migration
+    33fbddf9a4 net/virtio: remove duplicate queue xstats
+    c8e7cd6c6d vhost: fix virtqueue access check in vhost-user setup
+    692a7a0034 vhost: fix virtqueue access check in VDUSE setup
+    bbba917213 vhost: fix virtqueue access check in datapath
+    c139df70dd net: fix TCP/UDP checksum with padding data
+    c30a4f8b31 rcu: fix acked token in debug log
+    94b20c14a6 rcu: use atomic operation on acked token
+    8878a84e2e build: link static libs with whole-archive in subproject
+    5e24d7f2de build: fix linker warnings about undefined symbols
+    63241d7662 net/sfc: fix calloc parameters
+    acad009eed net/nfp: fix calloc parameters
+    238a03cdec net/bnx2x: fix calloc parameters
+    e3ae3295ee common/mlx5: fix calloc parameters
+    7c10528d68 rawdev: fix calloc parameters
+    ad881b0db8 dmadev: fix calloc parameters
+    9173abff75 eventdev: fix calloc parameters
+    600e30b793 pipeline: fix calloc parameters
+    5331b41382 examples/vhost: verify strdup return
+    88f1c9af33 examples/qos_sched: fix memory leak in args parsing
+    fbd04d26f3 test: verify strdup return
+    c830d9e2af app/testpmd: verify strdup return
+    8d5327fcfd app/dma-perf: verify strdup return
+    fefe40a5ed app/crypto-perf: verify strdup return
+    e0fd44c6ab app/pdump: verify strdup return
+    1387327fa4 app/dumpcap: verify strdup return
+    c6790ef542 net/nfp: verify strdup return
+    2044a179a7 net/failsafe: fix memory leak in args parsing
+    cedf721f24 event/cnxk: verify strdup return
+    df74839ea1 dma/idxd: verify strdup return
+    b4943e7a51 bus/vdev: verify strdup return
+    82d4ba69f2 bus/fslmc: verify strdup return
+    8c8e7aeb90 bus/dpaa: verify strdup return
+    2feed5de50 eal: verify strdup return
+    bb34c79bf4 doc: remove cmdline polling mode deprecation notice
+    5f30c47cc5 eal/x86: add AMD vendor check for TSC calibration
+    a9e8fc49d9 ci: update versions of actions in GHA
+    d7a30d20c4 gro: fix reordering of packets
+    b5c580913f telemetry: fix empty JSON dictionaries
+    cbd1c165bb telemetry: fix connected clients count
+    d7dc480432 app/graph: fix build reason
+    7872a7b0bd build: fix reasons conflict
+    54e4045c78 kernel/freebsd: fix module build on FreeBSD 14
+    943de5c27e net/ice: fix memory leaks
+    9aa2da4c02 net/iavf: fix crash on VF start
+    0a72821dd9 net/iavf: fix no polling mode switching
+    c321ba6a9d net/ice: fix tunnel TSO capabilities
+    48efa16873 net/ice: fix link update
+    c655f20c8f net/ixgbe: fix memoy leak after device init failure
+    3defa10a78 net/iavf: fix memory leak on security context error
+    ca47a866b5 net/i40e: remove redundant judgment in flow parsing
+    ec5fe01a28 dma/dpaa2: fix logtype register
+    fd6f07da94 lib: remove redundant newline from logs
+    ec5e780f09 lib: add newline in logs
+    e421bcd708 lib: use dedicated logtypes and macros
+    f1e3dec4b4 regexdev: fix logtype register
+    dc0428a5e4 hash: remove some dead code
+    2c512fe65a buildtools/cmdline: fix IP address initializer
+    3cedd8b9e4 buildtools/cmdline: fix generated code for IP addresses
+
+
+* Fri Aug 09 2024 Timothy Redaelli <tredaelli@redhat.com> - 3.3.0-8
+- netdev-offload-tc: Reserve lower tc prio for vlan ethertype. [RH git: 6e3f0ecc78]
+    The cited commit reserved lower tc priorities for IP ethertypes in order
+    to give IP traffic higher priority than other management traffic.
+    In case of of vlan encap traffic, IP traffic will still get lower
+    priority.
+    
+    Fix it by also reserving low priority tc prio for vlan.
+    
+    Fixes: c230c7579c14 ("netdev-offload-tc: Reserve lower tc prios for ip ethertypes")
+    Signed-off-by: Maor Dickman <maord@nvidia.com>
+    Acked-by: Roi Dayan <roid@nvidia.com>
+    Signed-off-by: Simon Horman <horms@ovn.org>
+    (cherry picked from commit 6280f5d04a8daad2ad7c5723da05e92b217877e2)
+
+
+* Fri Aug 09 2024 Timothy Redaelli <tredaelli@redhat.com> - 3.3.0-7
+- Merging 7428125173 netdev-dpdk: Fix race condition in mempool .. [RH git: d9dae5cf82]
+    Commit list:
+    7428125173 netdev-dpdk: Fix race condition in mempool information dump.
+    e0c0302d71 ovsdb: transaction: Remove incorrect transaction abort in pre-commit.
+    59d80787ad netlink-notifier: Silence the UBsan's function pointer mismatch error.
+    bd133fcf6b Documentation: Update QEMU documentation URLs.
+    6738b680ec match: Fix false-positive snprintf size warning.
+    5bf4c0f09e util: Add non-NULL format assertion to xvasprintf.
+    9cbd8804dd docs: Define Read the Docs configuration for Sphinx HTML parameters.
+    6eca4d13c8 flow: Fix unaligned access to the ND target in miniflow_extract.
+    d2119feb01 dpif-netlink-rtnl: Fix netdev leak in out-of-tree tunnels probe.
+    9c439c23e6 dpctl: Fix netdev reference leak in "show" command.
+    15cbfb19fa cirrus: Update to FreeBSD 14.1.
+    a3722ab1f7 ovs-monitor-ipsec: LibreSwan v5 support.
+    bd18a13732 ovs-monitor-ipsec: LibreSwan autodetect version.
+    504efbe923 netdev-dpdk: Check pending reset when adding device.
+    2393305d96 odp-execute: Check IPv4 checksum offload flag in AVX.
+    0aec0c2cc0 odp-execute: Set IPv6 traffic class in AVX implementation.
+    dd1aea7f8b ofp-prop: Fix unaligned 128 bit access.
+    830fd48237 dpdk: Check other_config:dpdk-extra for '--lcores'.
+    8499e90274 Prepare for 3.3.2.
+    2b87b844db Set release date for 3.3.1.
+    81d2804fc3 python: ovs: flow: Fix nested check_pkt_len acts.
+    8c15b5c0f3 python: idl: Fix index not being updated on row modification.
+    80c935dba3 python: ovsdb-idl: Convert new_uuid insert() arg to UUID.
+    6b6d7f05db python: ovsdb-idl: Make IndexedRows mirror hmap.
+    479dd96e18 ci: Restore vhost-user unit tests in check-dpdk.
+    cc99622485 system-dpdk: Fix socket conflict when starting testpmd.
+    5bb95ad089 netdev-dpdk: Refactor tunnel checksum offloading.
+    a9248871af netdev-dpdk: Use guest TSO segmentation size hint.
+    55d2e6d3dc netdev-dpdk: Refactor TSO request code.
+    2fe3ab83ab netdev-dpdk: Fix inner checksum when outer is not supported.
+    537a6bcda7 netdev-dpdk: Disable outer UDP checksum for net/iavf.
+    2e03f5567c netdev-dpdk: Fallback to non tunnel checksum offloading.
+    7916a24171 nsh: Add support to compose-packet and use it in system tests.
+    310cf419e7 tests: Convert ND, MPLS and CT sendpkt tests to compose-packet.
+    96efed3499 tests: sendpkt: Allow different input formats.
+    2e89ed9946 ipf: Handle common case of ipf defragmentation.
+    69424e53b6 ipf: Only add fragments to batch of same dl_type.
+    5cfbc915de python: ovsdb-idl: Use monitor_cond for _Server DB.
+    84c83589b6 ovsdb-idl: Add C IDL test for "monitor" fallback.
+    79608c4fdd tests: Fix non-portable plus match in python vlog test.
+    6688bff495 odp-execute: Fix AVX checksum calculation.
+    0ec55bbe95 dpdk: Use DPDK 23.11.1 release for OVS 3.3.
+    a3022b08ac netdev-linux: Initialize link speed in error conditions.
+    216c2c1ff9 netdev-linux: Return an error if device feature names are empty.
+    1b78323a21 socket: Fix uninitialized values in inet_parse_ functions.
+    4ece94d2ab dpctl: Fix uninitialized value when deleting flows.
+    7ee12c5ae1 netdev-native-tnl: Fix use of uninitialized offset on SRv6 header pop.
+    258bc1e0c6 netdev-linux: Fix ethtool_cmd is partly outside array bounds.
+    ffbce0c428 atlocal: Replace deprecated pkg_resources.
+    1f0423a4ee atlocal: Fix setting HAVE_PYTEST on unexpected errors.
+    95baf295af srv6: Fix misaligned writes to segment list.
+    8d4ba3b5a8 compiler: Fix errors in Clang 17 ubsan checks.
+    d153eff606 table: Fix freeing global variable.
+    f02dc3cfec vlog: Destroy async_append first then close log_fd.
+    bf1b16364b conntrack: Fully initialize conn struct before insertion.
+    cf461fe282 conntrack: Do not use {0} to initialize unions.
+    20ed5491c5 ovsdb-client: Add missing arg to help for 'dump'.
+    4756bf4baf ofproto-dpif-trace: Fix access to an out-of-scope stack memory.
+    01eca18be1 hash, jhash: Fix unaligned access to the hash remainder.
+    4f61523c0d sparse: Add additional define for sparse on GCC >= 14.
+    9a5c24d70f sparse: Add immintrin.h header.
+    3528cc6f45 tc: Fix -Wgnu-variable-sized-type-not-at-end warning with Clang 18.
+    5814de5687 tests: Fix build failure with Clang 18 due to -Wformat-truncation.
+    a6c3b5202c netdev-dpdk: Fix possible memory leak configuring VF MAC address.
+    42e685916e ovsdb: raft: Fix probe intervals after install snapshot request.
+    1c44cb5963 ovsdb: raft: Fix inability to join a cluster with a large database.
+    5966c22b85 rhel/systemd: Set ovsdb-server timeout to 5 minutes.
+    f19448b861 github: Update python to 3.12.
+    b705fb8dd2 ovsdb-dot: Fix flake8 issues.
+    1ac823cb03 ovsdb-doc: Fix syntax warning with Python 3.12 and flake8 issues.
+    7e99dbd8a0 python: Remove hacking dependency and use recent flake8.
+    41055da769 cirrus: Update to FreeBSD 13.3.
+    6448c1b697 vlog: Log stack trace on vlog_abort.
+    a6852319b4 tests: Fix compatibility issue with Python 3.13 in vlog.at.
+    775507fe91 ofproto-dpif-upcall: Fix ukey installation failure logs and counters.
+    b26baf873e conntrack: Do not use icmp reverse helper for icmpv6.
+    b9f28c5862 conntrack: Fix SNAT with exhaustion system test.
+    43db937876 ovsdb: raft: Fix inability to join after leadership change round trip.
+    2a2f162b57 ovsdb: raft: Fix assertion when 1-node cluster looses leadership.
+    a174a5ab8b ovsdb: raft: Fix permanent joining state on a cluster member.
+    5eac230f22 ovsdb: raft: Fix time intervals for multitasking while joining.
+    99fe661f04 ovsdb: raft: Avoid transferring leadership to unavailable servers.
+    c560f6ca32 ofproto-dpif-xlate: Fix continuations with associated metering.
+    1c1f173ce8 dpif-netdev: Fix crash due to tunnel offloading on recirculation.
+    feb0fefd8d netdev-dpdk: Disable outer UDP checksum offload for ice/i40e driver.
+    3280d95c42 ovs-monitor-ipsec: LibreSwan autodetect paths.
+    4fedcae6ee route-table: Avoid routes from non-standard routing tables.
+    9f39cd4a11 ovs-tcpdump: Fix cleanup mirror failed with twice fatal signals.
+    7fa40643e7 ofproto-dpif: Fix tunnel with different name del/add failure.
+    04dc9d117a ofpbuf: Prevent undefined behavior in ofpbuf_clone.
+    667c9eb2b0 netdev-dpdk: Fix tunnel type check during Tx offload preparation.
+    332300c360 netdev-dpdk: Fix TCP check during Tx offload preparation.
+    e7778b3716 netdev-dpdk: Clear inner packet marks if no inner offloads requested.
+    c85158e265 netdev-dpdk: Clean up all marker flags if no offloads requested.
+    a6bb8be429 github: Reduce ASLR entropy to be compatible with asan in llvm 14.
+    70dcee98f4 netdev-dpdk: Dump packets that fail Tx preparation.
+    5ae591757a bfd: Improve state change log message.
+    efbc37b946 tests: Fix "SSL db: Implementation" test with openssl > 3.2.0.
+    0f1af687cc conntrack: Fix flush not flushing all elements.
+    04f1984a44 m4: Fix linking with OpenSSL 1.1.0+ and 3+ on Windows.
+    8f903b598f ovs-pki: Fix file permissions on Windows.
+    3002010658 bond: Reset stats when deleting post recirc rule.
+    02f0d6db14 ofproto-dpif-trace: Fix infinite recirculation tracing.
+    e68ddb34bc github: Temporarily disable SNAT with exhaustion system test.
+    95d4d7108a dp-packet: Don't offload inner csum if outer isn't supported.
+    05453d807d ofproto-dpif-xlate: Fix ignoring IPv6 local_ip for native tunnels.
+    a0df9c85de netdev-dummy: Add local route entries for IP addresses.
+    2f742d7afc tests: Move the non-local port as tunnel endpoint test.
+    a6bdf7d310 Prepare for 3.3.1.
+
+
+* Fri Aug 09 2024 Timothy Redaelli <tredaelli@redhat.com> - 3.3.0-6
+- Revert "tests: Fix compatibility issue with Python 3.13 in vlog.at." [RH git: b558e43e40]
+    This reverts commit 9ecfc21acbba06af892a1db0d017447a6b5a84c1.
+
+
+* Fri Jul 26 2024 Timothy Redaelli <tredaelli@redhat.com> - 3.3.0-5
+- tests: Fix compatibility issue with Python 3.13 in vlog.at. [RH git: 9ecfc21acb]
+    The vlog - Python3 test makes use of output from Python
+    Tracebacks in its test assertion.
+    
+    In Python 3.13 a line with tophat (``^``) markers is added below
+    Tracebacks from calls to assert [0], which makes the test fail.
+    This change of behavior is also backported to the Python 3.12 and
+    3.11 stable branches [1].
+    
+    Strip lines containing one or more occurrence of the ``^``
+    character from the output before performing the test assertions.
+    
+    0: https://github.com/python/cpython/pull/105935
+    1: https://github.com/python/cpython/issues/116034
+    
+    Reported-at: https://launchpad.net/bugs/2060434
+    Signed-off-by: Frode Nordahl <fnordahl@ubuntu.com>
+    Signed-off-by: Ilya Maximets <i.maximets@ovn.org>
+    (cherry picked from commit 9185793e75435d890f18d391eaaeab0ade6f1415)
+
+
+* Wed Jul 17 2024 Timothy Redaelli <tredaelli@redhat.com> - 3.3.0-4
+- Use --with-version-suffix [RH git: 2fe731f6ff]
+
+
+* Wed Jul 17 2024 Timothy Redaelli <tredaelli@redhat.com> - 3.3.0-3
+- rhel: Make the version, displayed to the user, customizable. [RH git: 5fc7d9c3cd]
+    Since on CentOS/RHEL the builds are based on stable branches and not on
+    tags for debugging purpose it's better to have the downstream version as
+    version so it's easier to know which commits are included in a build.
+    
+    This commit adds --with-version-suffix as ./configure option in
+    order to set an OVS version suffix that should be shown to the user via
+    ovs-vsctl -V and, so, also on database, on ovs-vsctl show and the other
+    utilities.
+    
+    --with-version-suffix is used in Fedora/CentOS/RHEL spec file in order to have
+    the version be aligned with the downstream one.
+    
+    Signed-off-by: Timothy Redaelli <tredaelli@redhat.com>
+    Signed-off-by: Ilya Maximets <i.maximets@ovn.org>
+    (cherry picked from commit 9e6d43ef32152527f7887d7f316a191adb5f338c)
+
+
+* Mon Jun 10 2024 Timothy Redaelli <tredaelli@redhat.com> - 3.3.0-2
+- tests: Fix "SSL db: Implementation" test with openssl > 3.2.0. [RH git: 4b1e722229]
+    In OpenSSL 3.2.0 (81b741f) all the "alert" error messages were updated to
+    replace "sslv3" with "ssl/tls".
+    
+    This commit updates the "SSL db: implementation" test to support both the
+    pre-openssl 3.2.0 error message: "sslv3 alert certificate unknown" and the
+    post-openssl 3.2.0 error message: "ssl/tls alert certificate unknown".
+    
+    Acked-by: Eelco Chaudron <echaudro@redhat.com>
+    Signed-off-by: Timothy Redaelli <tredaelli@redhat.com>
+    Signed-off-by: Ilya Maximets <i.maximets@ovn.org>
+    (cherry picked from commit d2a42f396338210ff7382fc3be9e6306d627db96)
+
+
+* Mon Jun 10 2024 Timothy Redaelli <tredaelli@redhat.com> - 3.3.0-1
+- redhat: Imported Red Hat OVS 3.3 build files. [RH git: 33df956300]
+
+