From 534a520660575e41682d4da2aad60e5cbe74a2a6 Mon Sep 17 00:00:00 2001 From: Alfredo Moralejo Date: Apr 04 2024 14:39:14 +0000 Subject: Import openvswitch3.2-3.2.0-56.el9fdp.src.rpm from FDP --- diff --git a/SOURCES/openvswitch-3.2.0.patch b/SOURCES/openvswitch-3.2.0.patch index 64d0ee5..c88769e 100644 --- a/SOURCES/openvswitch-3.2.0.patch +++ b/SOURCES/openvswitch-3.2.0.patch @@ -1,5 +1,5 @@ diff --git a/.ci/linux-build.sh b/.ci/linux-build.sh -index 99850a943..8227a5748 100755 +index 99850a9434..8227a57487 100755 --- a/.ci/linux-build.sh +++ b/.ci/linux-build.sh @@ -82,6 +82,10 @@ if [ "$DPDK" ] || [ "$DPDK_SHARED" ]; then @@ -13,11 +13,62 @@ index 99850a943..8227a5748 100755 if [ "$CC" = "clang" ]; then CFLAGS_FOR_OVS="${CFLAGS_FOR_OVS} -Wno-error=unused-command-line-argument" elif [ "$M32" ]; then +diff --git a/.cirrus.yml b/.cirrus.yml +index 48931fa085..d8a9722809 100644 +--- a/.cirrus.yml ++++ b/.cirrus.yml +@@ -2,8 +2,8 @@ freebsd_build_task: + + freebsd_instance: + matrix: +- image_family: freebsd-12-4-snap + image_family: freebsd-13-2-snap ++ image_family: freebsd-14-0-snap + cpu: 4 + memory: 4G + diff --git a/.github/workflows/build-and-test.yml b/.github/workflows/build-and-test.yml -index 47d239f10..bc5494e86 100644 +index 47d239f108..b50c42de6f 100644 --- a/.github/workflows/build-and-test.yml +++ b/.github/workflows/build-and-test.yml -@@ -85,6 +85,7 @@ jobs: +@@ -8,16 +8,16 @@ jobs: + dependencies: gcc libnuma-dev ninja-build + CC: gcc + DPDK_GIT: https://dpdk.org/git/dpdk-stable +- DPDK_VER: 22.11.1 ++ DPDK_VER: 22.11.4 + name: dpdk gcc + outputs: + dpdk_key: ${{ steps.gen_dpdk_key.outputs.key }} +- runs-on: ubuntu-20.04 ++ runs-on: ubuntu-22.04 + timeout-minutes: 30 + + steps: + - name: checkout +- uses: actions/checkout@v3 ++ uses: actions/checkout@v4 + + - name: update PATH + run: | +@@ -45,14 +45,14 @@ jobs: + + - name: cache + id: dpdk_cache +- uses: actions/cache@v3 ++ uses: actions/cache@v4 + with: + path: dpdk-dir + key: ${{ steps.gen_dpdk_key.outputs.key }} + + - name: set up python + if: steps.dpdk_cache.outputs.cache-hit != 'true' +- uses: actions/setup-python@v4 ++ uses: actions/setup-python@v5 + with: + python-version: '3.9' + +@@ -85,10 +85,11 @@ jobs: LIBS: ${{ matrix.libs }} M32: ${{ matrix.m32 }} OPTS: ${{ matrix.opts }} @@ -25,6 +76,11 @@ index 47d239f10..bc5494e86 100644 TESTSUITE: ${{ matrix.testsuite }} name: linux ${{ join(matrix.*, ' ') }} +- runs-on: ubuntu-20.04 ++ runs-on: ubuntu-22.04 + timeout-minutes: 30 + + strategy: @@ -100,6 +101,11 @@ jobs: - compiler: clang opts: --disable-ssl @@ -37,9 +93,125 @@ index 47d239f10..bc5494e86 100644 - compiler: gcc testsuite: test - compiler: clang +@@ -160,7 +166,7 @@ jobs: + + steps: + - name: checkout +- uses: actions/checkout@v3 ++ uses: actions/checkout@v4 + + - name: update PATH + run: | +@@ -168,13 +174,13 @@ jobs: + echo "$HOME/.local/bin" >> $GITHUB_PATH + + - name: set up python +- uses: actions/setup-python@v4 ++ uses: actions/setup-python@v5 + with: + python-version: '3.9' + + - name: cache + if: matrix.dpdk != '' || matrix.dpdk_shared != '' +- uses: actions/cache@v3 ++ uses: actions/cache@v4 + with: + path: dpdk-dir + key: ${{ needs.build-dpdk.outputs.dpdk_key }} +@@ -200,9 +206,9 @@ jobs: + - name: copy logs on failure + if: failure() || cancelled() + run: | +- # upload-artifact@v2 throws exceptions if it tries to upload socket ++ # upload-artifact throws exceptions if it tries to upload socket + # files and we could have some socket files in testsuite.dir. +- # Also, upload-artifact@v2 doesn't work well enough with wildcards. ++ # Also, upload-artifact doesn't work well enough with wildcards. + # So, we're just archiving everything here to avoid any issues. + mkdir logs + cp config.log ./logs/ +@@ -211,7 +217,7 @@ jobs: + + - name: upload logs on failure + if: failure() || cancelled() +- uses: actions/upload-artifact@v3 ++ uses: actions/upload-artifact@v4 + with: + name: logs-linux-${{ join(matrix.*, '-') }} + path: logs.tgz +@@ -230,13 +236,13 @@ jobs: + + steps: + - name: checkout +- uses: actions/checkout@v3 ++ uses: actions/checkout@v4 + - name: update PATH + run: | + echo "$HOME/bin" >> $GITHUB_PATH + echo "$HOME/.local/bin" >> $GITHUB_PATH + - name: set up python +- uses: actions/setup-python@v4 ++ uses: actions/setup-python@v5 + with: + python-version: '3.9' + - name: install dependencies +@@ -247,7 +253,7 @@ jobs: + run: ./.ci/osx-build.sh + - name: upload logs on failure + if: failure() +- uses: actions/upload-artifact@v3 ++ uses: actions/upload-artifact@v4 + with: + name: logs-osx-clang---disable-ssl + path: config.log +@@ -271,7 +277,7 @@ jobs: + + steps: + - name: checkout +- uses: actions/checkout@v3 ++ uses: actions/checkout@v4 + + - name: update PATH + run: | +@@ -293,7 +299,7 @@ jobs: + run: ./.ci/linux-build.sh + + - name: upload deb packages +- uses: actions/upload-artifact@v3 ++ uses: actions/upload-artifact@v4 + with: + name: deb-packages-${{ matrix.dpdk }}-dpdk + path: '/home/runner/work/ovs/*.deb' +@@ -301,7 +307,7 @@ jobs: + build-linux-rpm: + name: linux rpm fedora + runs-on: ubuntu-latest +- container: fedora:37 ++ container: fedora:39 + timeout-minutes: 30 + + strategy: +@@ -309,7 +315,7 @@ jobs: + + steps: + - name: checkout +- uses: actions/checkout@v3 ++ uses: actions/checkout@v4 + - name: install dependencies + run: | + dnf install -y rpm-build dnf-plugins-core +@@ -328,7 +334,7 @@ jobs: + run: dnf install -y rpm/rpmbuild/RPMS/*/*.rpm + + - name: upload rpm packages +- uses: actions/upload-artifact@v3 ++ uses: actions/upload-artifact@v4 + with: + name: rpm-packages + path: | diff --git a/.readthedocs.yaml b/.readthedocs.yaml new file mode 100644 -index 000000000..7d505150e +index 0000000000..7d505150ec --- /dev/null +++ b/.readthedocs.yaml @@ -0,0 +1,25 @@ @@ -68,8 +240,53 @@ index 000000000..7d505150e +python: + install: + - requirements: Documentation/requirements.txt +diff --git a/Documentation/faq/releases.rst b/Documentation/faq/releases.rst +index e6bda14e7b..f47d408369 100644 +--- a/Documentation/faq/releases.rst ++++ b/Documentation/faq/releases.rst +@@ -215,10 +215,10 @@ Q: What DPDK version does each Open vSwitch release work with? + 2.14.x 19.11.13 + 2.15.x 20.11.6 + 2.16.x 20.11.6 +- 2.17.x 21.11.2 +- 3.0.x 21.11.2 +- 3.1.x 22.11.1 +- 3.2.x 22.11.1 ++ 2.17.x 21.11.6 ++ 3.0.x 21.11.6 ++ 3.1.x 22.11.4 ++ 3.2.x 22.11.4 + ============ ======== + + Q: Are all the DPDK releases that OVS versions work with maintained? +diff --git a/Documentation/intro/install/dpdk.rst b/Documentation/intro/install/dpdk.rst +index 63a0ebb23b..27df48493e 100644 +--- a/Documentation/intro/install/dpdk.rst ++++ b/Documentation/intro/install/dpdk.rst +@@ -42,7 +42,7 @@ Build requirements + In addition to the requirements described in :doc:`general`, building Open + vSwitch with DPDK will require the following: + +-- DPDK 22.11.1 ++- DPDK 22.11.4 + + - A `DPDK supported NIC`_ + +@@ -73,9 +73,9 @@ Install DPDK + #. Download the `DPDK sources`_, extract the file and set ``DPDK_DIR``:: + + $ cd /usr/src/ +- $ wget https://fast.dpdk.org/rel/dpdk-22.11.1.tar.xz +- $ tar xf dpdk-22.11.1.tar.xz +- $ export DPDK_DIR=/usr/src/dpdk-stable-22.11.1 ++ $ wget https://fast.dpdk.org/rel/dpdk-22.11.4.tar.xz ++ $ tar xf dpdk-22.11.4.tar.xz ++ $ export DPDK_DIR=/usr/src/dpdk-stable-22.11.4 + $ cd $DPDK_DIR + + #. Configure and install DPDK using Meson diff --git a/Documentation/ref/ovs-actions.7.rst b/Documentation/ref/ovs-actions.7.rst -index d13895655..36adcc5db 100644 +index d138956556..36adcc5db2 100644 --- a/Documentation/ref/ovs-actions.7.rst +++ b/Documentation/ref/ovs-actions.7.rst @@ -694,7 +694,8 @@ encapsulated in an OpenFlow ``packet-in`` message. The supported options are: @@ -96,7 +313,7 @@ index d13895655..36adcc5db 100644 The ``enqueue`` action ---------------------- diff --git a/Documentation/requirements.txt b/Documentation/requirements.txt -index 77130c6e0..77f44bd76 100644 +index 77130c6e01..77f44bd765 100644 --- a/Documentation/requirements.txt +++ b/Documentation/requirements.txt @@ -1,2 +1,2 @@ @@ -104,7 +321,7 @@ index 77130c6e0..77f44bd76 100644 +sphinx>=1.1 ovs_sphinx_theme>=1.0,<1.1 diff --git a/Makefile.am b/Makefile.am -index db341504d..94f488d18 100644 +index db341504d3..94f488d183 100644 --- a/Makefile.am +++ b/Makefile.am @@ -84,6 +84,7 @@ EXTRA_DIST = \ @@ -134,12 +351,20 @@ index db341504d..94f488d18 100644 @if cmp -s $(@F).tmp $@; then \ touch $@; \ diff --git a/NEWS b/NEWS -index a3a5c2e4a..eb7a9b1ba 100644 +index a3a5c2e4a2..baeecae046 100644 --- a/NEWS +++ b/NEWS -@@ -1,3 +1,10 @@ -+v3.2.2 - xx xxx xxxx +@@ -1,3 +1,18 @@ ++v3.2.3 - xx xxx xxxx ++-------------------- ++ ++v3.2.2 - 08 Feb 2024 +-------------------- ++ - Bug fixes ++ - Security: ++ * Fixed vulnerabilities CVE-2023-3966 and CVE-2023-5366. ++ - DPDK: ++ * OVS validated with DPDK 22.11.4. + +v3.2.1 - 17 Oct 2023 +-------------------- @@ -149,7 +374,7 @@ index a3a5c2e4a..eb7a9b1ba 100644 -------------------- - OVSDB: diff --git a/build-aux/automake.mk b/build-aux/automake.mk -index b9a77a51c..d65b6da6c 100644 +index b9a77a51cf..d65b6da6c5 100644 --- a/build-aux/automake.mk +++ b/build-aux/automake.mk @@ -1,11 +1,19 @@ @@ -192,7 +417,7 @@ index b9a77a51c..d65b6da6c 100644 + build-aux/soexpand.py \ + build-aux/xml2nroff diff --git a/build-aux/extract-ofp-actions b/build-aux/extract-ofp-actions -index 0aa6c65f3..cc5c1dbb0 100755 +index 0aa6c65f31..cc5c1dbb06 100755 --- a/build-aux/extract-ofp-actions +++ b/build-aux/extract-ofp-actions @@ -17,27 +17,30 @@ version_map = {"1.0": 0x01, @@ -439,7 +664,7 @@ index 0aa6c65f3..cc5c1dbb0 100755 if __name__ == '__main__': argv0 = sys.argv[0] diff --git a/build-aux/extract-ofp-errors b/build-aux/extract-ofp-errors -index 2c3fbfc88..eeefccbee 100755 +index 2c3fbfc881..eeefccbee0 100755 --- a/build-aux/extract-ofp-errors +++ b/build-aux/extract-ofp-errors @@ -22,6 +22,9 @@ tokenRe = "#?" + idRe + "|[0-9]+|." @@ -753,7 +978,7 @@ index 2c3fbfc88..eeefccbee 100755 if '--help' in sys.argv: usage() diff --git a/build-aux/extract-ofp-fields b/build-aux/extract-ofp-fields -index efec59c25..89d80c208 100755 +index efec59c25b..89d80c2085 100755 --- a/build-aux/extract-ofp-fields +++ b/build-aux/extract-ofp-fields @@ -4,9 +4,9 @@ import getopt @@ -934,7 +1159,7 @@ index efec59c25..89d80c208 100755 if __name__ == "__main__": argv0 = sys.argv[0] diff --git a/build-aux/extract-ofp-msgs b/build-aux/extract-ofp-msgs -index 6b3295cf6..c26ea1d35 100755 +index 6b3295cf64..c26ea1d355 100755 --- a/build-aux/extract-ofp-msgs +++ b/build-aux/extract-ofp-msgs @@ -24,6 +24,9 @@ OFPT11_STATS_REQUEST = 18 @@ -1040,7 +1265,7 @@ index 6b3295cf6..c26ea1d35 100755 print(line) - diff --git a/build-aux/gen_ofp_field_decoders b/build-aux/gen_ofp_field_decoders -index 0b797ee8c..0cb6108c2 100755 +index 0b797ee8c8..0cb6108c22 100755 --- a/build-aux/gen_ofp_field_decoders +++ b/build-aux/gen_ofp_field_decoders @@ -2,7 +2,7 @@ @@ -1062,7 +1287,7 @@ index 0b797ee8c..0cb6108c2 100755 field_decoders = {} aliases = {} diff --git a/build-aux/sodepends.py b/build-aux/sodepends.py -index 45812bcbd..ac8dd61a4 100755 +index 45812bcbd7..ac8dd61a4b 100755 --- a/build-aux/sodepends.py +++ b/build-aux/sodepends.py @@ -14,9 +14,10 @@ @@ -1078,7 +1303,7 @@ index 45812bcbd..ac8dd61a4 100755 def sodepends(include_dirs, filenames, dst): ok = True diff --git a/build-aux/soexpand.py b/build-aux/soexpand.py -index 00adcf47a..7d4dc0486 100755 +index 00adcf47a3..7d4dc0486a 100755 --- a/build-aux/soexpand.py +++ b/build-aux/soexpand.py @@ -14,9 +14,10 @@ @@ -1094,7 +1319,7 @@ index 00adcf47a..7d4dc0486 100755 def soexpand(include_dirs, src, dst): ok = True diff --git a/build-aux/xml2nroff b/build-aux/xml2nroff -index ee5553f45..3e937910b 100755 +index ee5553f456..3e937910be 100755 --- a/build-aux/xml2nroff +++ b/build-aux/xml2nroff @@ -18,7 +18,7 @@ import getopt @@ -1130,7 +1355,7 @@ index ee5553f45..3e937910b 100755 sys.exit(1) for line in s.splitlines(): diff --git a/configure.ac b/configure.ac -index 320509c5f..764479514 100644 +index 320509c5fc..25a00dcb11 100644 --- a/configure.ac +++ b/configure.ac @@ -13,7 +13,7 @@ @@ -1138,7 +1363,7 @@ index 320509c5f..764479514 100644 AC_PREREQ(2.63) -AC_INIT(openvswitch, 3.2.0, bugs@openvswitch.org) -+AC_INIT(openvswitch, 3.2.2, bugs@openvswitch.org) ++AC_INIT(openvswitch, 3.2.3, bugs@openvswitch.org) AC_CONFIG_SRCDIR([vswitchd/ovs-vswitchd.c]) AC_CONFIG_MACRO_DIR([m4]) AC_CONFIG_AUX_DIR([build-aux]) @@ -1156,7 +1381,7 @@ index 320509c5f..764479514 100644 AC_PROG_CXX AC_PROG_CPP diff --git a/datapath-windows/include/automake.mk b/datapath-windows/include/automake.mk -index a354f007f..185a06b03 100644 +index a354f007fd..185a06b03e 100644 --- a/datapath-windows/include/automake.mk +++ b/datapath-windows/include/automake.mk @@ -7,6 +7,4 @@ $(srcdir)/datapath-windows/include/OvsDpInterface.h: \ @@ -1167,15 +1392,21 @@ index a354f007f..185a06b03 100644 - CLEANFILES += $(srcdir)/datapath-windows/include/OvsDpInterface.h diff --git a/debian/changelog b/debian/changelog -index 8757e5cb2..780d1e2d8 100644 +index 8757e5cb28..302fc0a45d 100644 --- a/debian/changelog +++ b/debian/changelog -@@ -1,3 +1,15 @@ +@@ -1,3 +1,21 @@ ++openvswitch (3.2.3-1) unstable; urgency=low ++ [ Open vSwitch team ] ++ * New upstream version ++ ++ -- Open vSwitch team Thu, 08 Feb 2024 17:55:30 +0100 ++ +openvswitch (3.2.2-1) unstable; urgency=low + [ Open vSwitch team ] + * New upstream version + -+ -- Open vSwitch team Tue, 17 Oct 2023 13:02:27 +0200 ++ -- Open vSwitch team Thu, 08 Feb 2024 17:55:30 +0100 + +openvswitch (3.2.1-1) unstable; urgency=low + [ Open vSwitch team ] @@ -1187,7 +1418,7 @@ index 8757e5cb2..780d1e2d8 100644 * New upstream version diff --git a/include/automake.mk b/include/automake.mk -index 1e3390ae0..a276c680b 100644 +index 1e3390ae0d..a276c680b5 100644 --- a/include/automake.mk +++ b/include/automake.mk @@ -8,7 +8,6 @@ include/odp-netlink-macros.h: include/odp-netlink.h \ @@ -1199,7 +1430,7 @@ index 1e3390ae0..a276c680b 100644 include include/openflow/automake.mk diff --git a/include/openflow/automake.mk b/include/openflow/automake.mk -index a1d75756c..820c09f84 100644 +index a1d75756c9..820c09f84b 100644 --- a/include/openflow/automake.mk +++ b/include/openflow/automake.mk @@ -22,6 +22,3 @@ HSTAMP_FILES = $(openflowinclude_HEADERS:.h=.hstamp) @@ -1210,7 +1441,7 @@ index a1d75756c..820c09f84 100644 -EXTRA_DIST += build-aux/check-structs - diff --git a/include/openvswitch/compiler.h b/include/openvswitch/compiler.h -index cf009f826..52614a5ac 100644 +index cf009f8264..52614a5ac0 100644 --- a/include/openvswitch/compiler.h +++ b/include/openvswitch/compiler.h @@ -37,6 +37,16 @@ @@ -1231,7 +1462,7 @@ index cf009f826..52614a5ac 100644 #define OVS_UNUSED __attribute__((__unused__)) #define OVS_PRINTF_FORMAT(FMT, ARG1) __attribute__((__format__(printf, FMT, ARG1))) diff --git a/lib/automake.mk b/lib/automake.mk -index e64ee76ce..1be13a420 100644 +index e64ee76ce7..1be13a420a 100644 --- a/lib/automake.mk +++ b/lib/automake.mk @@ -451,7 +451,7 @@ lib_libsflow_la_SOURCES = \ @@ -1274,8 +1505,21 @@ index e64ee76ce..1be13a420 100644 # _server IDL OVSIDL_BUILT += lib/ovsdb-server-idl.c lib/ovsdb-server-idl.h lib/ovsdb-server-idl.ovsidl +diff --git a/lib/backtrace.h b/lib/backtrace.h +index 9ccafd6d47..a2506da5ff 100644 +--- a/lib/backtrace.h ++++ b/lib/backtrace.h +@@ -26,7 +26,7 @@ + #endif + + /* log_backtrace() will save the backtrace of a running program +- * into the log at the DEBUG level. ++ * into the log at the ERROR level. + * + * To use it, insert the following code to where backtrace is + * desired: diff --git a/lib/db-ctl-base.c b/lib/db-ctl-base.c -index 5d2635946..3a8068b12 100644 +index 5d2635946d..3a8068b12c 100644 --- a/lib/db-ctl-base.c +++ b/lib/db-ctl-base.c @@ -820,6 +820,7 @@ check_condition(const struct ovsdb_idl_table_class *table, @@ -1295,7 +1539,7 @@ index 5d2635946..3a8068b12 100644 } diff --git a/lib/dpif-netdev.c b/lib/dpif-netdev.c -index 9730e0eec..b8f065d1d 100644 +index 9730e0eecc..b8f065d1d7 100644 --- a/lib/dpif-netdev.c +++ b/lib/dpif-netdev.c @@ -3380,14 +3380,13 @@ static inline void @@ -1326,7 +1570,7 @@ index 9730e0eec..b8f065d1d 100644 stats->size = ARRAY_SIZE(hwol_stats) * (nb_thread + 1); stats->counters = xcalloc(stats->size, sizeof *stats->counters); diff --git a/lib/fatal-signal.c b/lib/fatal-signal.c -index 77f0c87dd..953150074 100644 +index 77f0c87dd4..9531500747 100644 --- a/lib/fatal-signal.c +++ b/lib/fatal-signal.c @@ -138,10 +138,6 @@ fatal_signal_init(void) @@ -1340,8 +1584,71 @@ index 77f0c87dd..953150074 100644 fatal_signal_create_wakeup_events(); #ifdef _WIN32 +diff --git a/lib/jsonrpc.c b/lib/jsonrpc.c +index c8ce5362e1..3db5f76e28 100644 +--- a/lib/jsonrpc.c ++++ b/lib/jsonrpc.c +@@ -221,19 +221,19 @@ jsonrpc_log_msg(const struct jsonrpc *rpc, const char *title, + } + if (msg->params) { + ds_put_cstr(&s, ", params="); +- json_to_ds(msg->params, 0, &s); ++ json_to_ds(msg->params, JSSF_SORT, &s); + } + if (msg->result) { + ds_put_cstr(&s, ", result="); +- json_to_ds(msg->result, 0, &s); ++ json_to_ds(msg->result, JSSF_SORT, &s); + } + if (msg->error) { + ds_put_cstr(&s, ", error="); +- json_to_ds(msg->error, 0, &s); ++ json_to_ds(msg->error, JSSF_SORT, &s); + } + if (msg->id) { + ds_put_cstr(&s, ", id="); +- json_to_ds(msg->id, 0, &s); ++ json_to_ds(msg->id, JSSF_SORT, &s); + } + VLOG_DBG("%s: %s %s%s", rpc->name, title, + jsonrpc_msg_type_to_string(msg->type), ds_cstr(&s)); +diff --git a/lib/mcast-snooping.c b/lib/mcast-snooping.c +index 029ca28558..43805ae4d5 100644 +--- a/lib/mcast-snooping.c ++++ b/lib/mcast-snooping.c +@@ -946,8 +946,9 @@ mcast_snooping_wait(struct mcast_snooping *ms) + void + mcast_snooping_flush_bundle(struct mcast_snooping *ms, void *port) + { +- struct mcast_group *g; + struct mcast_mrouter_bundle *m; ++ struct mcast_port_bundle *p; ++ struct mcast_group *g; + + if (!mcast_snooping_enabled(ms)) { + return; +@@ -971,5 +972,19 @@ mcast_snooping_flush_bundle(struct mcast_snooping *ms, void *port) + } + } + ++ LIST_FOR_EACH_SAFE (p, node, &ms->fport_list) { ++ if (p->port == port) { ++ mcast_snooping_flush_port(p); ++ ms->need_revalidate = true; ++ } ++ } ++ ++ LIST_FOR_EACH_SAFE (p, node, &ms->rport_list) { ++ if (p->port == port) { ++ mcast_snooping_flush_port(p); ++ ms->need_revalidate = true; ++ } ++ } ++ + ovs_rwlock_unlock(&ms->rwlock); + } diff --git a/lib/meta-flow.xml b/lib/meta-flow.xml -index bdd12f6a7..ac72a44bc 100644 +index bdd12f6a7b..ac72a44bce 100644 --- a/lib/meta-flow.xml +++ b/lib/meta-flow.xml @@ -3517,23 +3517,24 @@ actions=clone(load:0->NXM_OF_IN_PORT[],output:123) @@ -1384,7 +1691,7 @@ index bdd12f6a7..ac72a44bc 100644

diff --git a/lib/netdev-dpdk.c b/lib/netdev-dpdk.c -index 8f1361e21..55700250d 100644 +index 8f1361e21f..55700250df 100644 --- a/lib/netdev-dpdk.c +++ b/lib/netdev-dpdk.c @@ -1312,6 +1312,16 @@ dpdk_eth_dev_init(struct netdev_dpdk *dev) @@ -1413,7 +1720,7 @@ index 8f1361e21..55700250d 100644 } diff --git a/lib/netdev-offload-dpdk.c b/lib/netdev-offload-dpdk.c -index 14bc87771..992627fa2 100644 +index 14bc877719..992627fa23 100644 --- a/lib/netdev-offload-dpdk.c +++ b/lib/netdev-offload-dpdk.c @@ -2537,15 +2537,15 @@ out: @@ -1469,8 +1776,95 @@ index 14bc87771..992627fa2 100644 return 0; } +diff --git a/lib/netdev-offload-tc.c b/lib/netdev-offload-tc.c +index b846a63c22..921d523177 100644 +--- a/lib/netdev-offload-tc.c ++++ b/lib/netdev-offload-tc.c +@@ -1627,7 +1627,9 @@ parse_put_flow_set_action(struct tc_flower *flower, struct tc_action *action, + } + break; + case OVS_TUNNEL_KEY_ATTR_TP_SRC: { +- action->encap.tp_src = nl_attr_get_be16(tun_attr); ++ /* There is no corresponding attribute in TC. */ ++ VLOG_DBG_RL(&rl, "unsupported tunnel key attribute TP_SRC"); ++ return EOPNOTSUPP; + } + break; + case OVS_TUNNEL_KEY_ATTR_TP_DST: { +@@ -1783,12 +1785,12 @@ test_key_and_mask(struct match *match) + return 0; + } + +-static void ++static int + flower_match_to_tun_opt(struct tc_flower *flower, const struct flow_tnl *tnl, + struct flow_tnl *tnl_mask) + { + struct geneve_opt *opt, *opt_mask; +- int len, cnt = 0; ++ int tot_opt_len, len, cnt = 0; + + /* 'flower' always has an exact match on tunnel metadata length, so having + * it in a wrong format is not acceptable unless it is empty. */ +@@ -1804,7 +1806,7 @@ flower_match_to_tun_opt(struct tc_flower *flower, const struct flow_tnl *tnl, + memset(&tnl_mask->metadata.present.map, 0, + sizeof tnl_mask->metadata.present.map); + } +- return; ++ return 0; + } + + tnl_mask->flags &= ~FLOW_TNL_F_UDPIF; +@@ -1818,7 +1820,7 @@ flower_match_to_tun_opt(struct tc_flower *flower, const struct flow_tnl *tnl, + sizeof tnl_mask->metadata.present.len); + + if (!tnl->metadata.present.len) { +- return; ++ return 0; + } + + memcpy(flower->key.tunnel.metadata.opts.gnv, tnl->metadata.opts.gnv, +@@ -1832,7 +1834,16 @@ flower_match_to_tun_opt(struct tc_flower *flower, const struct flow_tnl *tnl, + * also not masks, but actual lengths in the 'flower' structure. */ + len = flower->key.tunnel.metadata.present.len; + while (len) { ++ if (len < sizeof *opt) { ++ return EOPNOTSUPP; ++ } ++ + opt = &flower->key.tunnel.metadata.opts.gnv[cnt]; ++ tot_opt_len = sizeof *opt + opt->length * 4; ++ if (len < tot_opt_len) { ++ return EOPNOTSUPP; ++ } ++ + opt_mask = &flower->mask.tunnel.metadata.opts.gnv[cnt]; + + opt_mask->length = opt->length; +@@ -1840,6 +1851,8 @@ flower_match_to_tun_opt(struct tc_flower *flower, const struct flow_tnl *tnl, + cnt += sizeof(struct geneve_opt) / 4 + opt->length; + len -= sizeof(struct geneve_opt) + opt->length * 4; + } ++ ++ return 0; + } + + static void +@@ -2285,7 +2298,11 @@ netdev_tc_flow_put(struct netdev *netdev, struct match *match, + tnl_mask->flags &= ~(FLOW_TNL_F_DONT_FRAGMENT | FLOW_TNL_F_CSUM); + + if (!strcmp(netdev_get_type(netdev), "geneve")) { +- flower_match_to_tun_opt(&flower, tnl, tnl_mask); ++ err = flower_match_to_tun_opt(&flower, tnl, tnl_mask); ++ if (err) { ++ VLOG_WARN_RL(&warn_rl, "Unable to parse geneve options"); ++ return err; ++ } + } + flower.tunnel = true; + } else { diff --git a/lib/netdev-offload.c b/lib/netdev-offload.c -index a5fa62487..931d634e1 100644 +index a5fa624875..931d634e15 100644 --- a/lib/netdev-offload.c +++ b/lib/netdev-offload.c @@ -872,7 +872,8 @@ netdev_set_flow_api_enabled(const struct smap *ovs_other_config) @@ -1484,7 +1878,7 @@ index a5fa62487..931d634e1 100644 offload_thread_nb); offload_thread_nb = DEFAULT_OFFLOAD_THREAD_NB; diff --git a/lib/netlink-conntrack.c b/lib/netlink-conntrack.c -index 4fcde9ba1..492bfcffb 100644 +index 4fcde9ba1e..492bfcffb8 100644 --- a/lib/netlink-conntrack.c +++ b/lib/netlink-conntrack.c @@ -579,7 +579,8 @@ nl_ct_put_tuple_proto(struct ofpbuf *buf, const struct ct_dpif_tuple *tuple) @@ -1497,8 +1891,95 @@ index 4fcde9ba1..492bfcffb 100644 nl_msg_put_be16(buf, CTA_PROTO_SRC_PORT, tuple->src_port); nl_msg_put_be16(buf, CTA_PROTO_DST_PORT, tuple->dst_port); } else { +diff --git a/lib/odp-util.c b/lib/odp-util.c +index 3eb2c3cb98..9306c9b4d4 100644 +--- a/lib/odp-util.c ++++ b/lib/odp-util.c +@@ -6464,12 +6464,10 @@ odp_flow_key_from_flow__(const struct odp_flow_key_parms *parms, + icmpv6_key->icmpv6_code = ntohs(data->tp_dst); + + if (is_nd(flow, NULL) +- /* Even though 'tp_src' and 'tp_dst' are 16 bits wide, ICMP +- * type and code are 8 bits wide. Therefore, an exact match +- * looks like htons(0xff), not htons(0xffff). See +- * xlate_wc_finish() for details. */ +- && (!export_mask || (data->tp_src == htons(0xff) +- && data->tp_dst == htons(0xff)))) { ++ /* Even though 'tp_src' is 16 bits wide, ICMP type is 8 bits ++ * wide. Therefore, an exact match looks like htons(0xff), ++ * not htons(0xffff). See xlate_wc_finish() for details. */ ++ && (!export_mask || data->tp_src == htons(0xff))) { + struct ovs_key_nd *nd_key; + nd_key = nl_msg_put_unspec_uninit(buf, OVS_KEY_ATTR_ND, + sizeof *nd_key); +@@ -7185,20 +7183,17 @@ parse_l2_5_onward(const struct nlattr *attrs[OVS_KEY_ATTR_MAX + 1], + flow->arp_sha = nd_key->nd_sll; + flow->arp_tha = nd_key->nd_tll; + if (is_mask) { +- /* Even though 'tp_src' and 'tp_dst' are 16 bits wide, +- * ICMP type and code are 8 bits wide. Therefore, an +- * exact match looks like htons(0xff), not +- * htons(0xffff). See xlate_wc_finish() for details. +- * */ ++ /* Even though 'tp_src' is 16 bits wide, ICMP type ++ * is 8 bits wide. Therefore, an exact match looks ++ * like htons(0xff), not htons(0xffff). See ++ * xlate_wc_finish() for details. */ + if (!is_all_zeros(nd_key, sizeof *nd_key) && +- (flow->tp_src != htons(0xff) || +- flow->tp_dst != htons(0xff))) { ++ flow->tp_src != htons(0xff)) { + odp_parse_error(&rl, errorp, +- "ICMP (src,dst) masks should be " +- "(0xff,0xff) but are actually " +- "(%#"PRIx16",%#"PRIx16")", +- ntohs(flow->tp_src), +- ntohs(flow->tp_dst)); ++ "ICMP src mask should be " ++ "(0xff) but is actually " ++ "(%#"PRIx16")", ++ ntohs(flow->tp_src)); + return ODP_FIT_ERROR; + } else { + *expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_ND; +diff --git a/lib/ofp-ct.c b/lib/ofp-ct.c +index 85a9d8beca..a140fba470 100644 +--- a/lib/ofp-ct.c ++++ b/lib/ofp-ct.c +@@ -31,6 +31,9 @@ + #include "openvswitch/ofp-prop.h" + #include "openvswitch/ofp-util.h" + #include "openvswitch/packets.h" ++#include "openvswitch/vlog.h" ++ ++VLOG_DEFINE_THIS_MODULE(ofp_ct); + + static void + ofp_ct_tuple_format(struct ds *ds, const struct ofp_ct_tuple *tuple, +@@ -286,6 +289,10 @@ ofp_ct_tuple_decode_nested(struct ofpbuf *property, struct ofp_ct_tuple *tuple, + case NXT_CT_TUPLE_ICMP_CODE: + error = ofpprop_parse_u8(&inner, &tuple->icmp_code); + break; ++ ++ default: ++ error = OFPPROP_UNKNOWN(false, "NXT_CT_TUPLE", type); ++ break; + } + + if (error) { +@@ -377,6 +384,10 @@ ofp_ct_match_decode(struct ofp_ct_match *match, bool *with_zone, + } + error = ofpprop_parse_u16(&property, zone_id); + break; ++ ++ default: ++ error = OFPPROP_UNKNOWN(false, "NXT_CT_FLUSH", type); ++ break; + } + + if (error) { diff --git a/lib/ofp-table.c b/lib/ofp-table.c -index a956754f2..f9bd3b7f9 100644 +index a956754f2d..f9bd3b7f9c 100644 --- a/lib/ofp-table.c +++ b/lib/ofp-table.c @@ -1416,7 +1416,7 @@ count_common_prefix_run(const char *ids[], size_t n, @@ -1510,8 +1991,90 @@ index a956754f2..f9bd3b7f9 100644 } i++; } +diff --git a/lib/ovs-atomic.h b/lib/ovs-atomic.h +index ab9ce6b2e0..f140d25feb 100644 +--- a/lib/ovs-atomic.h ++++ b/lib/ovs-atomic.h +@@ -328,7 +328,7 @@ + #if __CHECKER__ + /* sparse doesn't understand some GCC extensions we use. */ + #include "ovs-atomic-pthreads.h" +- #elif __has_extension(c_atomic) ++ #elif __clang__ && __has_extension(c_atomic) + #include "ovs-atomic-clang.h" + #elif HAVE_ATOMIC && __cplusplus >= 201103L + #include "ovs-atomic-c++.h" +diff --git a/lib/ovsdb-idl.c b/lib/ovsdb-idl.c +index 634fbb56df..ba720474b6 100644 +--- a/lib/ovsdb-idl.c ++++ b/lib/ovsdb-idl.c +@@ -177,6 +177,7 @@ static void ovsdb_idl_row_mark_backrefs_for_reparsing(struct ovsdb_idl_row *); + static void ovsdb_idl_row_track_change(struct ovsdb_idl_row *, + enum ovsdb_idl_change); + static void ovsdb_idl_row_untrack_change(struct ovsdb_idl_row *); ++static void ovsdb_idl_row_clear_changeseqno(struct ovsdb_idl_row *); + + static void ovsdb_idl_txn_abort_all(struct ovsdb_idl *); + static bool ovsdb_idl_txn_extract_mutations(struct ovsdb_idl_row *, +@@ -1374,6 +1375,7 @@ ovsdb_idl_track_clear__(struct ovsdb_idl *idl, bool flush_all) + row->updated = NULL; + } + ovsdb_idl_row_untrack_change(row); ++ ovsdb_idl_row_clear_changeseqno(row); + + if (ovsdb_idl_row_is_orphan(row)) { + ovsdb_idl_row_unparse(row); +@@ -1632,6 +1634,7 @@ ovsdb_idl_process_update(struct ovsdb_idl_table *table, + ru->columns); + } else if (ovsdb_idl_row_is_orphan(row)) { + ovsdb_idl_row_untrack_change(row); ++ ovsdb_idl_row_clear_changeseqno(row); + ovsdb_idl_insert_row(row, ru->columns); + } else { + VLOG_ERR_RL(&semantic_rl, "cannot add existing row "UUID_FMT" to " +@@ -2283,11 +2286,15 @@ ovsdb_idl_row_untrack_change(struct ovsdb_idl_row *row) + return; + } + ++ ovs_list_remove(&row->track_node); ++ ovs_list_init(&row->track_node); ++} ++ ++static void ovsdb_idl_row_clear_changeseqno(struct ovsdb_idl_row *row) ++{ + row->change_seqno[OVSDB_IDL_CHANGE_INSERT] = + row->change_seqno[OVSDB_IDL_CHANGE_MODIFY] = + row->change_seqno[OVSDB_IDL_CHANGE_DELETE] = 0; +- ovs_list_remove(&row->track_node); +- ovs_list_init(&row->track_node); + } + + static struct ovsdb_idl_row * +diff --git a/lib/ovsdb-types.h b/lib/ovsdb-types.h +index 9777efea33..688fe56337 100644 +--- a/lib/ovsdb-types.h ++++ b/lib/ovsdb-types.h +@@ -238,6 +238,18 @@ static inline bool ovsdb_type_is_map(const struct ovsdb_type *type) + return type->value.type != OVSDB_TYPE_VOID; + } + ++static inline bool ovsdb_type_has_strong_refs(const struct ovsdb_type *type) ++{ ++ return ovsdb_base_type_is_strong_ref(&type->key) ++ || ovsdb_base_type_is_strong_ref(&type->value); ++} ++ ++static inline bool ovsdb_type_has_weak_refs(const struct ovsdb_type *type) ++{ ++ return ovsdb_base_type_is_weak_ref(&type->key) ++ || ovsdb_base_type_is_weak_ref(&type->value); ++} ++ + #ifdef __cplusplus + } + #endif diff --git a/lib/tc.c b/lib/tc.c -index f49048cda..6b38925c3 100644 +index f49048cdab..6b38925c30 100644 --- a/lib/tc.c +++ b/lib/tc.c @@ -3851,15 +3851,13 @@ log_tc_flower_match(const char *msg, @@ -1555,8 +2118,44 @@ index f49048cda..6b38925c3 100644 } } } +diff --git a/lib/tc.h b/lib/tc.h +index 06707ffa46..fdbcf4b7cb 100644 +--- a/lib/tc.h ++++ b/lib/tc.h +@@ -213,7 +213,8 @@ enum nat_type { + struct tc_action_encap { + bool id_present; + ovs_be64 id; +- ovs_be16 tp_src; ++ /* ovs_be16 tp_src; Could have been here, but there is no ++ * TCA_TUNNEL_KEY_ENC_ attribute for it in the kernel. */ + ovs_be16 tp_dst; + uint8_t tos; + uint8_t ttl; +diff --git a/lib/vconn.c b/lib/vconn.c +index b556762277..e9603432d2 100644 +--- a/lib/vconn.c ++++ b/lib/vconn.c +@@ -682,7 +682,6 @@ do_send(struct vconn *vconn, struct ofpbuf *msg) + + ofpmsg_update_length(msg); + if (!VLOG_IS_DBG_ENABLED()) { +- COVERAGE_INC(vconn_sent); + retval = (vconn->vclass->send)(vconn, msg); + } else { + char *s = ofp_to_string(msg->data, msg->size, NULL, NULL, 1); +@@ -693,6 +692,9 @@ do_send(struct vconn *vconn, struct ofpbuf *msg) + } + free(s); + } ++ if (!retval) { ++ COVERAGE_INC(vconn_sent); ++ } + return retval; + } + diff --git a/ofproto/connmgr.c b/ofproto/connmgr.c -index b092e9e04..f7f7b1279 100644 +index b092e9e04e..f7f7b12799 100644 --- a/ofproto/connmgr.c +++ b/ofproto/connmgr.c @@ -1209,7 +1209,7 @@ ofconn_create(struct ofservice *ofservice, struct rconn *rconn, @@ -1599,7 +2198,7 @@ index b092e9e04..f7f7b1279 100644 VLOG_INFO("%s: added %s controller \"%s\"", mgr->name, ofconn_type_to_string(ofservice->type), target); diff --git a/ofproto/ofproto-dpif-upcall.c b/ofproto/ofproto-dpif-upcall.c -index 04b583f81..292500f21 100644 +index 04b583f816..292500f215 100644 --- a/ofproto/ofproto-dpif-upcall.c +++ b/ofproto/ofproto-dpif-upcall.c @@ -988,7 +988,7 @@ udpif_revalidator(void *arg) @@ -1649,7 +2248,7 @@ index 04b583f81..292500f21 100644 unixctl_command_reply(conn, ""); } diff --git a/ofproto/ofproto-dpif-xlate-cache.c b/ofproto/ofproto-dpif-xlate-cache.c -index 9224ee2e6..2e1fcb3a6 100644 +index 9224ee2e6d..2e1fcb3a6f 100644 --- a/ofproto/ofproto-dpif-xlate-cache.c +++ b/ofproto/ofproto-dpif-xlate-cache.c @@ -125,7 +125,7 @@ xlate_push_stats_entry(struct xc_entry *entry, @@ -1662,7 +2261,7 @@ index 9224ee2e6..2e1fcb3a6 100644 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5); VLOG_WARN_RL(&rl, "xcache LEARN action execution failed."); diff --git a/ofproto/ofproto-dpif-xlate.c b/ofproto/ofproto-dpif-xlate.c -index 47ea0f47e..be4bd6657 100644 +index 47ea0f47e7..be4bd66576 100644 --- a/ofproto/ofproto-dpif-xlate.c +++ b/ofproto/ofproto-dpif-xlate.c @@ -1615,7 +1615,8 @@ xlate_lookup_ofproto_(const struct dpif_backer *backer, @@ -1722,7 +2321,7 @@ index 47ea0f47e..be4bd6657 100644 if (!ofm->temp_rule || ofm->temp_rule->state != RULE_INSERTED) { diff --git a/ofproto/ofproto-dpif.c b/ofproto/ofproto-dpif.c -index e22ca757a..ba5706f6a 100644 +index e22ca757ac..ba5706f6ad 100644 --- a/ofproto/ofproto-dpif.c +++ b/ofproto/ofproto-dpif.c @@ -4880,7 +4880,7 @@ packet_xlate(struct ofproto *ofproto_, struct ofproto_packet_out *opo) @@ -1735,7 +2334,7 @@ index e22ca757a..ba5706f6a 100644 goto error_out; } diff --git a/ofproto/ofproto-provider.h b/ofproto/ofproto-provider.h -index 143ded690..9f7b8b6e8 100644 +index 143ded6904..9f7b8b6e83 100644 --- a/ofproto/ofproto-provider.h +++ b/ofproto/ofproto-provider.h @@ -2027,9 +2027,11 @@ enum ofperr ofproto_flow_mod_init_for_learn(struct ofproto *, @@ -1753,7 +2352,7 @@ index 143ded690..9f7b8b6e8 100644 OVS_REQUIRES(ofproto_mutex); void ofproto_flow_mod_learn_revert(struct ofproto_flow_mod *ofm) diff --git a/ofproto/ofproto.c b/ofproto/ofproto.c -index dbf4958bc..e78c80d11 100644 +index dbf4958bc2..e78c80d115 100644 --- a/ofproto/ofproto.c +++ b/ofproto/ofproto.c @@ -5472,7 +5472,8 @@ ofproto_flow_mod_init_for_learn(struct ofproto *ofproto, @@ -1869,8 +2468,20 @@ index dbf4958bc..e78c80d11 100644 error = ofproto_flow_mod_learn_finish(ofm, NULL); } } else { +diff --git a/ofproto/tunnel.c b/ofproto/tunnel.c +index 3455ed233b..80ddee78ac 100644 +--- a/ofproto/tunnel.c ++++ b/ofproto/tunnel.c +@@ -432,6 +432,7 @@ tnl_port_send(const struct ofport_dpif *ofport, struct flow *flow, + flow->tunnel.ipv6_dst = in6addr_any; + } + } ++ flow->tunnel.tp_src = 0; /* Do not carry from a previous tunnel. */ + flow->tunnel.tp_dst = cfg->dst_port; + if (!cfg->out_key_flow) { + flow->tunnel.tun_id = cfg->out_key; diff --git a/ovsdb/condition.c b/ovsdb/condition.c -index 5a3eb4e8a..4911fbf59 100644 +index 5a3eb4e8a3..4911fbf59b 100644 --- a/ovsdb/condition.c +++ b/ovsdb/condition.c @@ -550,9 +550,14 @@ ovsdb_condition_diff(struct ovsdb_condition *diff, @@ -1891,8 +2502,20 @@ index 5a3eb4e8a..4911fbf59 100644 } } +diff --git a/ovsdb/jsonrpc-server.c b/ovsdb/jsonrpc-server.c +index 17868f5b72..5d10f54293 100644 +--- a/ovsdb/jsonrpc-server.c ++++ b/ovsdb/jsonrpc-server.c +@@ -215,6 +215,7 @@ ovsdb_jsonrpc_default_options(const char *target) + options->probe_interval = (stream_or_pstream_needs_probes(target) + ? RECONNECT_DEFAULT_PROBE_INTERVAL + : 0); ++ options->dscp = DSCP_DEFAULT; + return options; + } + diff --git a/ovsdb/monitor.c b/ovsdb/monitor.c -index 01091fabe..f4a9cf8fe 100644 +index 01091fabe7..f4a9cf8fe3 100644 --- a/ovsdb/monitor.c +++ b/ovsdb/monitor.c @@ -821,6 +821,7 @@ ovsdb_monitor_table_condition_update( @@ -1904,7 +2527,7 @@ index 01091fabe..f4a9cf8fe 100644 &mtc->old_condition, &mtc->new_condition); ovsdb_monitor_condition_add_columns(dbmon, diff --git a/ovsdb/ovsdb-doc b/ovsdb/ovsdb-doc -index 10d0c0c13..099770d25 100755 +index 10d0c0c134..099770d253 100755 --- a/ovsdb/ovsdb-doc +++ b/ovsdb/ovsdb-doc @@ -24,7 +24,7 @@ import ovs.json @@ -1916,8 +2539,106 @@ index 10d0c0c13..099770d25 100755 argv0 = sys.argv[0] +diff --git a/ovsdb/row.h b/ovsdb/row.h +index 59f498a20d..6f5e58acb3 100644 +--- a/ovsdb/row.h ++++ b/ovsdb/row.h +@@ -130,6 +130,7 @@ ovsdb_row_get_uuid(const struct ovsdb_row *row) + static inline struct uuid * + ovsdb_row_get_uuid_rw(struct ovsdb_row *row) + { ++ ovsdb_datum_unshare(&row->fields[OVSDB_COL_UUID], &ovsdb_type_uuid); + return &row->fields[OVSDB_COL_UUID].keys[0].uuid; + } + +@@ -142,6 +143,7 @@ ovsdb_row_get_version(const struct ovsdb_row *row) + static inline struct uuid * + ovsdb_row_get_version_rw(struct ovsdb_row *row) + { ++ ovsdb_datum_unshare(&row->fields[OVSDB_COL_VERSION], &ovsdb_type_uuid); + return &row->fields[OVSDB_COL_VERSION].keys[0].uuid; + } + +diff --git a/ovsdb/transaction.c b/ovsdb/transaction.c +index 7cf4a851aa..1bbebd65e5 100644 +--- a/ovsdb/transaction.c ++++ b/ovsdb/transaction.c +@@ -321,7 +321,8 @@ update_row_ref_count(struct ovsdb_txn *txn, struct ovsdb_txn_row *r) + const struct ovsdb_column *column = node->data; + struct ovsdb_error *error; + +- if (bitmap_is_set(r->changed, column->index)) { ++ if (bitmap_is_set(r->changed, column->index) ++ && ovsdb_type_has_strong_refs(&column->type)) { + if (r->old && !r->new) { + error = ovsdb_txn_adjust_row_refs( + txn, r->old, column, +@@ -716,6 +717,10 @@ assess_weak_refs(struct ovsdb_txn *txn, struct ovsdb_txn_row *txn_row) + unsigned int orig_n; + bool zero = false; + ++ if (!ovsdb_type_has_weak_refs(&column->type)) { ++ continue; ++ } ++ + orig_n = datum->n; + + /* Collecting all key-value pairs that references deleted rows. */ +@@ -731,18 +736,24 @@ assess_weak_refs(struct ovsdb_txn *txn, struct ovsdb_txn_row *txn_row) + ovsdb_datum_sort_unique(&deleted_refs, &column->type); + + /* Removing elements that references deleted rows. */ +- ovsdb_datum_subtract(datum, &column->type, +- &deleted_refs, &column->type); ++ if (deleted_refs.n) { ++ ovsdb_datum_subtract(datum, &column->type, ++ &deleted_refs, &column->type); ++ } + ovsdb_datum_destroy(&deleted_refs, &column->type); + + /* Generating the difference between old and new data. */ +- if (txn_row->old) { +- ovsdb_datum_added_removed(&added, &removed, +- &txn_row->old->fields[column->index], +- datum, &column->type); +- } else { +- ovsdb_datum_init_empty(&removed); +- ovsdb_datum_clone(&added, datum); ++ ovsdb_datum_init_empty(&added); ++ ovsdb_datum_init_empty(&removed); ++ if (datum->n != orig_n ++ || bitmap_is_set(txn_row->changed, column->index)) { ++ if (txn_row->old) { ++ ovsdb_datum_added_removed(&added, &removed, ++ &txn_row->old->fields[column->index], ++ datum, &column->type); ++ } else { ++ ovsdb_datum_clone(&added, datum); ++ } + } + + /* Checking added data and creating new references. */ +diff --git a/ovsdb/trigger.c b/ovsdb/trigger.c +index 0edcdd89c6..2a48ccc643 100644 +--- a/ovsdb/trigger.c ++++ b/ovsdb/trigger.c +@@ -278,6 +278,14 @@ ovsdb_trigger_try(struct ovsdb_trigger *t, long long int now) + return false; + } + ++ if (t->read_only) { ++ trigger_convert_error( ++ t, ovsdb_error("not allowed", "conversion is not allowed " ++ "for read-only database %s", ++ t->db->schema->name)); ++ return false; ++ } ++ + /* Validate parameters. */ + const struct json *params = t->request->params; + if (params->type != JSON_ARRAY || params->array.n != 2) { diff --git a/python/automake.mk b/python/automake.mk -index 82a508787..84cf2eab5 100644 +index 82a5087874..84cf2eab57 100644 --- a/python/automake.mk +++ b/python/automake.mk @@ -66,10 +66,10 @@ ovs_pytests = \ @@ -1969,10 +2690,29 @@ index 82a508787..84cf2eab5 100644 UNINSTALL_LOCAL += ovs-uninstall-local diff --git a/python/ovs/db/idl.py b/python/ovs/db/idl.py -index 9fc2159b0..16ece0334 100644 +index 9fc2159b04..a80da84e7a 100644 --- a/python/ovs/db/idl.py +++ b/python/ovs/db/idl.py -@@ -494,6 +494,7 @@ class Idl(object): +@@ -299,6 +299,7 @@ class Idl(object): + self._server_schema_request_id = None + self._server_monitor_request_id = None + self._db_change_aware_request_id = None ++ self._monitor_cancel_request_id = None + self._server_db_name = '_Server' + self._server_db_table = 'Database' + self.server_tables = None +@@ -481,6 +482,10 @@ class Idl(object): + break + else: + self.__parse_update(msg.params[1], OVSDB_UPDATE) ++ elif self.handle_monitor_canceled(msg): ++ break ++ elif self.handle_monitor_cancel_reply(msg): ++ break + elif (msg.type == ovs.jsonrpc.Message.T_REPLY + and self._monitor_request_id is not None + and self._monitor_request_id == msg.id): +@@ -494,6 +499,7 @@ class Idl(object): if not msg.result[0]: self.__clear() self.__parse_update(msg.result[2], OVSDB_UPDATE3) @@ -1980,8 +2720,156 @@ index 9fc2159b0..16ece0334 100644 elif self.state == self.IDL_S_DATA_MONITOR_COND_REQUESTED: self.__clear() self.__parse_update(msg.result, OVSDB_UPDATE2) +@@ -615,6 +621,33 @@ class Idl(object): + + return initial_change_seqno != self.change_seqno + ++ def handle_monitor_canceled(self, msg): ++ if msg.type != msg.T_NOTIFY: ++ return False ++ if msg.method != "monitor_canceled": ++ return False ++ ++ if msg.params[0] == str(self.uuid): ++ params = [str(self.server_monitor_uuid)] ++ elif msg.params[0] == str(self.server_monitor_uuid): ++ params = [str(self.uuid)] ++ else: ++ return False ++ ++ mc_msg = ovs.jsonrpc.Message.create_request("monitor_cancel", params) ++ self._monitor_cancel_request_id = mc_msg.id ++ self.send_request(mc_msg) ++ self.restart_fsm() ++ return True ++ ++ def handle_monitor_cancel_reply(self, msg): ++ if msg.type != msg.T_REPLY: ++ return False ++ if msg.id != self._monitor_cancel_request_id: ++ return False ++ self._monitor_cancel_request_id = None ++ return True ++ + def compose_cond_change(self): + if not self.cond_changed: + return +diff --git a/python/ovs/flow/odp.py b/python/ovs/flow/odp.py +index 88aee17fb2..7d9b165d46 100644 +--- a/python/ovs/flow/odp.py ++++ b/python/ovs/flow/odp.py +@@ -204,6 +204,7 @@ class ODPFlow(Flow): + """Generate the arguments for the action KVDecoders.""" + _decoders = { + "drop": decode_flag, ++ "meter": decode_int, + "lb_output": decode_int, + "trunc": decode_int, + "recirc": decode_int, +@@ -334,8 +335,31 @@ class ODPFlow(Flow): + ) + ), + **ODPFlow._tnl_action_decoder_args(), ++ "hash": nested_kv_decoder( ++ KVDecoders( ++ { ++ "l4": decode_int, ++ "sym_l4": decode_int, ++ } ++ ) ++ ), + } + ++ _decoders["sample"] = nested_kv_decoder( ++ KVDecoders( ++ { ++ "sample": (lambda x: float(x.strip("%"))), ++ "actions": nested_kv_decoder( ++ KVDecoders( ++ decoders=_decoders, ++ default_free=decode_free_output, ++ ), ++ is_list=True, ++ ), ++ } ++ ) ++ ) ++ + _decoders["clone"] = nested_kv_decoder( + KVDecoders(decoders=_decoders, default_free=decode_free_output), + is_list=True, +@@ -343,20 +367,6 @@ class ODPFlow(Flow): + + return { + **_decoders, +- "sample": nested_kv_decoder( +- KVDecoders( +- { +- "sample": (lambda x: float(x.strip("%"))), +- "actions": nested_kv_decoder( +- KVDecoders( +- decoders=_decoders, +- default_free=decode_free_output, +- ), +- is_list=True, +- ), +- } +- ) +- ), + "check_pkt_len": nested_kv_decoder( + KVDecoders( + { +@@ -365,13 +375,15 @@ class ODPFlow(Flow): + KVDecoders( + decoders=_decoders, + default_free=decode_free_output, +- ) ++ ), ++ is_list=True, + ), + "le": nested_kv_decoder( + KVDecoders( + decoders=_decoders, + default_free=decode_free_output, +- ) ++ ), ++ is_list=True, + ), + } + ) +diff --git a/python/ovs/flow/ofp.py b/python/ovs/flow/ofp.py +index 20231fd9f3..3d3226c919 100644 +--- a/python/ovs/flow/ofp.py ++++ b/python/ovs/flow/ofp.py +@@ -170,12 +170,13 @@ class OFPFlow(Flow): + args = { + "table": decode_int, + "duration": decode_time, +- "n_packet": decode_int, ++ "n_packets": decode_int, + "n_bytes": decode_int, + "cookie": decode_int, + "idle_timeout": decode_time, + "hard_timeout": decode_time, + "hard_age": decode_time, ++ "idle_age": decode_time, + } + return KVDecoders(args) + +diff --git a/python/ovs/flow/ofp_act.py b/python/ovs/flow/ofp_act.py +index c540443eae..2c85076a34 100644 +--- a/python/ovs/flow/ofp_act.py ++++ b/python/ovs/flow/ofp_act.py +@@ -54,6 +54,7 @@ def decode_controller(value): + "id": decode_int, + "userdata": decode_default, + "pause": decode_flag, ++ "meter_id": decode_int, + } + ) + )(value) diff --git a/python/ovs/jsonrpc.py b/python/ovs/jsonrpc.py -index d5127268a..d9fe27aec 100644 +index d5127268aa..d9fe27aec6 100644 --- a/python/ovs/jsonrpc.py +++ b/python/ovs/jsonrpc.py @@ -377,7 +377,7 @@ class Session(object): @@ -1993,6 +2881,279 @@ index d5127268a..d9fe27aec 100644 remotes = [remotes] self.remotes = remotes random.shuffle(self.remotes) +diff --git a/python/ovs/tests/test_odp.py b/python/ovs/tests/test_odp.py +index a50d3185cc..f19ec386e8 100644 +--- a/python/ovs/tests/test_odp.py ++++ b/python/ovs/tests/test_odp.py +@@ -13,6 +13,32 @@ from ovs.flow.decoders import ( + ) + + ++def do_test_section(input_string, section, expected): ++ flow = ODPFlow(input_string) ++ kv_list = flow.section(section).data ++ ++ assert len(expected) == len(kv_list) ++ ++ for i in range(len(expected)): ++ assert expected[i].key == kv_list[i].key ++ assert expected[i].value == kv_list[i].value ++ ++ # Assert positions relative to action string are OK. ++ pos = flow.section(section).pos ++ string = flow.section(section).string ++ ++ kpos = kv_list[i].meta.kpos ++ kstr = kv_list[i].meta.kstring ++ vpos = kv_list[i].meta.vpos ++ vstr = kv_list[i].meta.vstring ++ assert string[kpos : kpos + len(kstr)] == kstr ++ if vpos != -1: ++ assert string[vpos : vpos + len(vstr)] == vstr ++ ++ # Assert string meta is correct. ++ assert input_string[pos : pos + len(string)] == string ++ ++ + @pytest.mark.parametrize( + "input_string,expected", + [ +@@ -109,26 +135,7 @@ from ovs.flow.decoders import ( + ], + ) + def test_odp_fields(input_string, expected): +- odp = ODPFlow(input_string) +- match = odp.match_kv +- for i in range(len(expected)): +- assert expected[i].key == match[i].key +- assert expected[i].value == match[i].value +- +- # Assert positions relative to action string are OK. +- mpos = odp.section("match").pos +- mstring = odp.section("match").string +- +- kpos = match[i].meta.kpos +- kstr = match[i].meta.kstring +- vpos = match[i].meta.vpos +- vstr = match[i].meta.vstring +- assert mstring[kpos : kpos + len(kstr)] == kstr +- if vpos != -1: +- assert mstring[vpos : vpos + len(vstr)] == vstr +- +- # Assert mstring meta is correct. +- assert input_string[mpos : mpos + len(mstring)] == mstring ++ do_test_section(input_string, "match", expected) + + + @pytest.mark.parametrize( +@@ -512,48 +519,41 @@ def test_odp_fields(input_string, expected): + "check_pkt_len", + { + "size": 200, +- "gt": {"output": {"port": 4}}, +- "le": {"output": {"port": 5}}, ++ "gt": [{"output": {"port": 4}}], ++ "le": [{"output": {"port": 5}}], + }, + ), + KeyValue( + "check_pkt_len", + { + "size": 200, +- "gt": {"drop": True}, +- "le": {"output": {"port": 5}}, ++ "gt": [{"drop": True}], ++ "le": [{"output": {"port": 5}}], + }, + ), + KeyValue( + "check_pkt_len", + { + "size": 200, +- "gt": {"ct": {"nat": True}}, +- "le": {"drop": True}, ++ "gt": [{"ct": {"nat": True}}], ++ "le": [{"drop": True}], + }, + ), + ], + ), ++ ( ++ "actions:meter(1),hash(l4(0))", ++ [ ++ KeyValue("meter", 1), ++ KeyValue( ++ "hash", ++ { ++ "l4": 0, ++ } ++ ), ++ ], ++ ), + ], + ) + def test_odp_actions(input_string, expected): +- odp = ODPFlow(input_string) +- actions = odp.actions_kv +- for i in range(len(expected)): +- assert expected[i].key == actions[i].key +- assert expected[i].value == actions[i].value +- +- # Assert positions relative to action string are OK. +- apos = odp.section("actions").pos +- astring = odp.section("actions").string +- +- kpos = actions[i].meta.kpos +- kstr = actions[i].meta.kstring +- vpos = actions[i].meta.vpos +- vstr = actions[i].meta.vstring +- assert astring[kpos : kpos + len(kstr)] == kstr +- if vpos != -1: +- assert astring[vpos : vpos + len(vstr)] == vstr +- +- # Assert astring meta is correct. +- assert input_string[apos : apos + len(astring)] == astring ++ do_test_section(input_string, "actions", expected) +diff --git a/python/ovs/tests/test_ofp.py b/python/ovs/tests/test_ofp.py +index 27bcf0c47c..d098520cae 100644 +--- a/python/ovs/tests/test_ofp.py ++++ b/python/ovs/tests/test_ofp.py +@@ -6,6 +6,32 @@ from ovs.flow.kv import KeyValue, ParseError + from ovs.flow.decoders import EthMask, IPMask, decode_mask + + ++def do_test_section(input_string, section, expected): ++ flow = OFPFlow(input_string) ++ kv_list = flow.section(section).data ++ ++ assert len(expected) == len(kv_list) ++ ++ for i in range(len(expected)): ++ assert expected[i].key == kv_list[i].key ++ assert expected[i].value == kv_list[i].value ++ ++ # Assert positions relative to action string are OK. ++ pos = flow.section(section).pos ++ string = flow.section(section).string ++ ++ kpos = kv_list[i].meta.kpos ++ kstr = kv_list[i].meta.kstring ++ vpos = kv_list[i].meta.vpos ++ vstr = kv_list[i].meta.vstring ++ assert string[kpos : kpos + len(kstr)] == kstr ++ if vpos != -1: ++ assert string[vpos : vpos + len(vstr)] == vstr ++ ++ # Assert string meta is correct. ++ assert input_string[pos : pos + len(string)] == string ++ ++ + @pytest.mark.parametrize( + "input_string,expected", + [ +@@ -26,6 +52,21 @@ from ovs.flow.decoders import EthMask, IPMask, decode_mask + KeyValue("controller", {"max_len": 200}), + ], + ), ++ ( ++ "actions=controller(max_len=123,reason=no_match,id=456,userdata=00.00.00.12.00.00.00.00,meter_id=12)", # noqa: E501 ++ [ ++ KeyValue( ++ "controller", ++ { ++ "max_len": 123, ++ "reason": "no_match", ++ "id": 456, ++ "userdata": "00.00.00.12.00.00.00.00", ++ "meter_id": 12, ++ } ++ ), ++ ], ++ ), + ( + "actions=enqueue(foo,42),enqueue:foo:42,enqueue(bar,4242)", + [ +@@ -545,6 +586,20 @@ from ovs.flow.decoders import EthMask, IPMask, decode_mask + ), + ], + ), ++ ( ++ "actions=LOCAL,clone(sample(probability=123))", ++ [ ++ KeyValue("output", {"port": "LOCAL"}), ++ KeyValue( ++ "clone", ++ [ ++ {"sample": { ++ "probability": 123, ++ }}, ++ ] ++ ), ++ ], ++ ), + ( + "actions=doesnotexist(1234)", + ParseError, +@@ -570,27 +625,41 @@ from ovs.flow.decoders import EthMask, IPMask, decode_mask + def test_act(input_string, expected): + if isinstance(expected, type): + with pytest.raises(expected): +- ofp = OFPFlow(input_string) ++ OFPFlow(input_string) + return + +- ofp = OFPFlow(input_string) +- actions = ofp.actions_kv ++ do_test_section(input_string, "actions", expected) + +- for i in range(len(expected)): +- assert expected[i].key == actions[i].key +- assert expected[i].value == actions[i].value + +- # Assert positions relative to action string are OK. +- apos = ofp.section("actions").pos +- astring = ofp.section("actions").string ++@pytest.mark.parametrize( ++ "input_string,expected", ++ [ ++ ( ++ "cookie=0x35f946ead8d8f9e4, duration=97746.271s, table=0, n_packets=12, n_bytes=254, idle_age=117, priority=4,in_port=1", # noqa: E501 ++ ( ++ [ ++ KeyValue("cookie", 0x35f946ead8d8f9e4), ++ KeyValue("duration", 97746.271), ++ KeyValue("table", 0), ++ KeyValue("n_packets", 12), ++ KeyValue("n_bytes", 254), ++ KeyValue("idle_age", 117), ++ ], ++ [ ++ KeyValue("priority", 4), ++ KeyValue("in_port", 1) ++ ], ++ ), ++ ), ++ ], ++) ++def test_key(input_string, expected): ++ if isinstance(expected, type): ++ with pytest.raises(expected): ++ OFPFlow(input_string) ++ return + +- kpos = actions[i].meta.kpos +- kstr = actions[i].meta.kstring +- vpos = actions[i].meta.vpos +- vstr = actions[i].meta.vstring +- assert astring[kpos : kpos + len(kstr)] == kstr +- if vpos != -1: +- assert astring[vpos : vpos + len(vstr)] == vstr ++ input_string += " actions=drop" + +- # Assert astring meta is correct. +- assert input_string[apos : apos + len(astring)] == astring ++ do_test_section(input_string, "info", expected[0]) ++ do_test_section(input_string, "match", expected[1]) diff --git a/python/build/__init__.py b/python/ovs_build_helpers/__init__.py similarity index 100% rename from python/build/__init__.py @@ -2010,7 +3171,7 @@ similarity index 100% rename from python/build/soutil.py rename to python/ovs_build_helpers/soutil.py diff --git a/tests/.gitignore b/tests/.gitignore -index 83b1cb3b4..3a8c45975 100644 +index 83b1cb3b48..3a8c459756 100644 --- a/tests/.gitignore +++ b/tests/.gitignore @@ -3,6 +3,7 @@ @@ -2022,7 +3183,7 @@ index 83b1cb3b4..3a8c45975 100644 /idltest.h /idltest.ovsidl diff --git a/tests/learn.at b/tests/learn.at -index d127fed34..d0bcc8363 100644 +index d127fed348..d0bcc83633 100644 --- a/tests/learn.at +++ b/tests/learn.at @@ -836,3 +836,63 @@ AT_CHECK([ovs-vsctl add-br br1 -- set b br1 datapath_type=dummy]) @@ -2089,8 +3250,341 @@ index d127fed34..d0bcc8363 100644 + +OVS_VSWITCHD_STOP +AT_CLEANUP +diff --git a/tests/mcast-snooping.at b/tests/mcast-snooping.at +index d5b7c4774c..faeb7890d9 100644 +--- a/tests/mcast-snooping.at ++++ b/tests/mcast-snooping.at +@@ -105,6 +105,328 @@ AT_CHECK([ovs-appctl mdb/show br0], [0], [dnl + OVS_VSWITCHD_STOP + AT_CLEANUP + ++ ++AT_SETUP([mcast - check multicast per port flooding]) ++OVS_VSWITCHD_START([]) ++ ++AT_CHECK([ ++ ovs-vsctl set bridge br0 \ ++ datapath_type=dummy \ ++ mcast_snooping_enable=true \ ++ other-config:mcast-snooping-disable-flood-unregistered=false ++], [0]) ++ ++AT_CHECK([ovs-ofctl add-flow br0 action=normal]) ++ ++AT_CHECK([ ++ ovs-vsctl add-port br0 p1 \ ++ -- set Interface p1 type=dummy other-config:hwaddr=aa:55:aa:55:00:01 ofport_request=1 \ ++ -- add-port br0 p2 \ ++ -- set Interface p2 type=dummy other-config:hwaddr=aa:55:aa:55:00:02 ofport_request=2 \ ++ -- add-port br0 p3 \ ++ -- set Interface p3 type=dummy other-config:hwaddr=aa:55:aa:55:00:03 ofport_request=3 \ ++], [0]) ++ ++ovs-appctl time/stop ++ ++AT_CHECK([ovs-appctl ofproto/trace "in_port(3),eth(src=aa:55:aa:55:00:ff,dst=01:00:5e:01:01:01),eth_type(0x0800),ipv4(src=10.0.0.1,dst=224.1.1.1,proto=17,tos=0,ttl=64,frag=no),udp(src=0,dst=8000)"], [0], [stdout]) ++AT_CHECK([grep -v 'Datapath actions:' stdout], [0], [dnl ++Flow: udp,in_port=3,vlan_tci=0x0000,dl_src=aa:55:aa:55:00:ff,dl_dst=01:00:5e:01:01:01,nw_src=10.0.0.1,nw_dst=224.1.1.1,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,tp_src=0,tp_dst=8000 ++ ++bridge("br0") ++------------- ++ 0. priority 32768 ++ NORMAL ++ -> unregistered multicast, flooding ++ ++Final flow: unchanged ++Megaflow: recirc_id=0,eth,udp,in_port=3,dl_src=aa:55:aa:55:00:ff,dl_dst=01:00:5e:01:01:01,nw_dst=224.1.1.1,nw_frag=no ++]) ++AT_CHECK([sed -ne 's/^Datapath actions: \(.*\)$/\1/p' stdout | tr "," "\n" | sort -n], [0], [dnl ++1 ++2 ++100 ++]) ++ ++# Send report packets. ++AT_CHECK([ ++ ovs-appctl netdev-dummy/receive p1 \ ++ '01005E010101000C29A027A108004500001C000100004002CBAEAC10221EE001010112140CE9E0010101' ++], [0]) ++AT_CHECK([ovs-appctl mdb/show br0], [0], [dnl ++ port VLAN GROUP Age ++ 1 0 224.1.1.1 0 ++]) ++ ++AT_CHECK([ovs-appctl ofproto/trace "in_port(3),eth(src=aa:55:aa:55:00:ff,dst=01:00:5e:01:01:01),eth_type(0x0800),ipv4(src=10.0.0.1,dst=224.1.1.1,proto=17,tos=0,ttl=64,frag=no),udp(src=0,dst=8000)"], [0], [dnl ++Flow: udp,in_port=3,vlan_tci=0x0000,dl_src=aa:55:aa:55:00:ff,dl_dst=01:00:5e:01:01:01,nw_src=10.0.0.1,nw_dst=224.1.1.1,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,tp_src=0,tp_dst=8000 ++ ++bridge("br0") ++------------- ++ 0. priority 32768 ++ NORMAL ++ -> forwarding to mcast group port ++ ++Final flow: unchanged ++Megaflow: recirc_id=0,eth,udp,in_port=3,dl_src=aa:55:aa:55:00:ff,dl_dst=01:00:5e:01:01:01,nw_dst=224.1.1.1,nw_frag=no ++Datapath actions: 1 ++]) ++ ++AT_CHECK([ovs-vsctl set port p2 other_config:mcast-snooping-flood=true]) ++ ++AT_CHECK([ovs-appctl ofproto/trace "in_port(3),eth(src=aa:55:aa:55:00:ff,dst=01:00:5e:01:01:01),eth_type(0x0800),ipv4(src=10.0.0.1,dst=224.1.1.1,proto=17,tos=0,ttl=64,frag=no),udp(src=0,dst=8000)"], [0], [dnl ++Flow: udp,in_port=3,vlan_tci=0x0000,dl_src=aa:55:aa:55:00:ff,dl_dst=01:00:5e:01:01:01,nw_src=10.0.0.1,nw_dst=224.1.1.1,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,tp_src=0,tp_dst=8000 ++ ++bridge("br0") ++------------- ++ 0. priority 32768 ++ NORMAL ++ -> forwarding to mcast group port ++ -> forwarding to mcast flood port ++ ++Final flow: unchanged ++Megaflow: recirc_id=0,eth,udp,in_port=3,dl_src=aa:55:aa:55:00:ff,dl_dst=01:00:5e:01:01:01,nw_dst=224.1.1.1,nw_frag=no ++Datapath actions: 1,2 ++]) ++ ++AT_CHECK([ovs-vsctl set port p3 other_config:mcast-snooping-flood=true]) ++ ++AT_CHECK([ovs-appctl ofproto/trace "in_port(3),eth(src=aa:55:aa:55:00:ff,dst=01:00:5e:01:01:01),eth_type(0x0800),ipv4(src=10.0.0.1,dst=224.1.1.1,proto=17,tos=0,ttl=64,frag=no),udp(src=0,dst=8000)"], [0], [dnl ++Flow: udp,in_port=3,vlan_tci=0x0000,dl_src=aa:55:aa:55:00:ff,dl_dst=01:00:5e:01:01:01,nw_src=10.0.0.1,nw_dst=224.1.1.1,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,tp_src=0,tp_dst=8000 ++ ++bridge("br0") ++------------- ++ 0. priority 32768 ++ NORMAL ++ -> forwarding to mcast group port ++ -> forwarding to mcast flood port ++ -> mcast flood port is input port, dropping ++ ++Final flow: unchanged ++Megaflow: recirc_id=0,eth,udp,in_port=3,dl_src=aa:55:aa:55:00:ff,dl_dst=01:00:5e:01:01:01,nw_dst=224.1.1.1,nw_frag=no ++Datapath actions: 1,2 ++]) ++ ++# Change p2 ofport to force a ofbundle change and check that the mdb contains ++# no stale port. ++AT_CHECK([ovs-vsctl set interface p2 ofport_request=4]) ++ ++AT_CHECK([ovs-appctl ofproto/trace "in_port(3),eth(src=aa:55:aa:55:00:ff,dst=01:00:5e:01:01:01),eth_type(0x0800),ipv4(src=10.0.0.1,dst=224.1.1.1,proto=17,tos=0,ttl=64,frag=no),udp(src=0,dst=8000)"], [0], [dnl ++Flow: udp,in_port=3,vlan_tci=0x0000,dl_src=aa:55:aa:55:00:ff,dl_dst=01:00:5e:01:01:01,nw_src=10.0.0.1,nw_dst=224.1.1.1,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,tp_src=0,tp_dst=8000 ++ ++bridge("br0") ++------------- ++ 0. priority 32768 ++ NORMAL ++ -> forwarding to mcast group port ++ -> mcast flood port is input port, dropping ++ -> forwarding to mcast flood port ++ ++Final flow: unchanged ++Megaflow: recirc_id=0,eth,udp,in_port=3,dl_src=aa:55:aa:55:00:ff,dl_dst=01:00:5e:01:01:01,nw_dst=224.1.1.1,nw_frag=no ++Datapath actions: 1,2 ++]) ++ ++OVS_VSWITCHD_STOP ++AT_CLEANUP ++ ++ ++AT_SETUP([mcast - check multicast per port flooding (unregistered flood disabled)]) ++OVS_VSWITCHD_START([]) ++ ++AT_CHECK([ ++ ovs-vsctl set bridge br0 \ ++ datapath_type=dummy \ ++ mcast_snooping_enable=true \ ++ other-config:mcast-snooping-disable-flood-unregistered=true ++], [0]) ++ ++AT_CHECK([ovs-ofctl add-flow br0 action=normal]) ++ ++AT_CHECK([ ++ ovs-vsctl add-port br0 p1 \ ++ -- set Interface p1 type=dummy other-config:hwaddr=aa:55:aa:55:00:01 ofport_request=1 \ ++ -- add-port br0 p2 \ ++ -- set Interface p2 type=dummy other-config:hwaddr=aa:55:aa:55:00:02 ofport_request=2 \ ++ -- add-port br0 p3 \ ++ -- set Interface p3 type=dummy other-config:hwaddr=aa:55:aa:55:00:03 ofport_request=3 \ ++], [0]) ++ ++ovs-appctl time/stop ++ ++AT_CHECK([ovs-appctl ofproto/trace "in_port(3),eth(src=aa:55:aa:55:00:ff,dst=01:00:5e:01:01:01),eth_type(0x0800),ipv4(src=10.0.0.1,dst=224.1.1.1,proto=17,tos=0,ttl=64,frag=no),udp(src=0,dst=8000)"], [0], [dnl ++Flow: udp,in_port=3,vlan_tci=0x0000,dl_src=aa:55:aa:55:00:ff,dl_dst=01:00:5e:01:01:01,nw_src=10.0.0.1,nw_dst=224.1.1.1,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,tp_src=0,tp_dst=8000 ++ ++bridge("br0") ++------------- ++ 0. priority 32768 ++ NORMAL ++ ++Final flow: unchanged ++Megaflow: recirc_id=0,eth,udp,in_port=3,dl_src=aa:55:aa:55:00:ff,dl_dst=01:00:5e:01:01:01,nw_dst=224.1.1.1,nw_frag=no ++Datapath actions: drop ++]) ++ ++AT_CHECK([ovs-vsctl set port p2 other_config:mcast-snooping-flood=true]) ++ ++AT_CHECK([ovs-appctl ofproto/trace "in_port(3),eth(src=aa:55:aa:55:00:ff,dst=01:00:5e:01:01:01),eth_type(0x0800),ipv4(src=10.0.0.1,dst=224.1.1.1,proto=17,tos=0,ttl=64,frag=no),udp(src=0,dst=8000)"], [0], [dnl ++Flow: udp,in_port=3,vlan_tci=0x0000,dl_src=aa:55:aa:55:00:ff,dl_dst=01:00:5e:01:01:01,nw_src=10.0.0.1,nw_dst=224.1.1.1,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,tp_src=0,tp_dst=8000 ++ ++bridge("br0") ++------------- ++ 0. priority 32768 ++ NORMAL ++ -> forwarding to mcast flood port ++ ++Final flow: unchanged ++Megaflow: recirc_id=0,eth,udp,in_port=3,dl_src=aa:55:aa:55:00:ff,dl_dst=01:00:5e:01:01:01,nw_dst=224.1.1.1,nw_frag=no ++Datapath actions: 2 ++]) ++ ++AT_CHECK([ovs-vsctl set port p3 other_config:mcast-snooping-flood=true]) ++ ++AT_CHECK([ovs-appctl ofproto/trace "in_port(3),eth(src=aa:55:aa:55:00:ff,dst=01:00:5e:01:01:01),eth_type(0x0800),ipv4(src=10.0.0.1,dst=224.1.1.1,proto=17,tos=0,ttl=64,frag=no),udp(src=0,dst=8000)"], [0], [dnl ++Flow: udp,in_port=3,vlan_tci=0x0000,dl_src=aa:55:aa:55:00:ff,dl_dst=01:00:5e:01:01:01,nw_src=10.0.0.1,nw_dst=224.1.1.1,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,tp_src=0,tp_dst=8000 ++ ++bridge("br0") ++------------- ++ 0. priority 32768 ++ NORMAL ++ -> forwarding to mcast flood port ++ -> mcast flood port is input port, dropping ++ ++Final flow: unchanged ++Megaflow: recirc_id=0,eth,udp,in_port=3,dl_src=aa:55:aa:55:00:ff,dl_dst=01:00:5e:01:01:01,nw_dst=224.1.1.1,nw_frag=no ++Datapath actions: 2 ++]) ++ ++OVS_VSWITCHD_STOP ++AT_CLEANUP ++ ++ ++AT_SETUP([mcast - check reports per port flooding]) ++OVS_VSWITCHD_START([]) ++ ++AT_CHECK([ ++ ovs-vsctl set bridge br0 \ ++ datapath_type=dummy \ ++ mcast_snooping_enable=true \ ++ other-config:mcast-snooping-disable-flood-unregistered=false ++], [0]) ++ ++AT_CHECK([ovs-ofctl add-flow br0 action=normal]) ++ ++AT_CHECK([ ++ ovs-vsctl add-port br0 p1 \ ++ -- set Interface p1 type=dummy other-config:hwaddr=aa:55:aa:55:00:01 ofport_request=1 \ ++ -- add-port br0 p2 \ ++ -- set Interface p2 type=dummy other-config:hwaddr=aa:55:aa:55:00:02 ofport_request=2 \ ++ -- add-port br0 p3 \ ++ -- set Interface p3 type=dummy other-config:hwaddr=aa:55:aa:55:00:03 ofport_request=3 \ ++], [0]) ++ ++ovs-appctl time/stop ++ ++AT_CHECK([ovs-appctl ofproto/trace "in_port(1)" '01005E010101000C29A027A108004500001C000100004002CBAEAC10221EE001010112140CE9E0010101'], [0], [dnl ++Flow: ip,in_port=1,vlan_tci=0x0000,dl_src=00:0c:29:a0:27:a1,dl_dst=01:00:5e:01:01:01,nw_src=172.16.34.30,nw_dst=224.1.1.1,nw_proto=2,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,tp_src=18,tp_dst=20 ++ ++bridge("br0") ++------------- ++ 0. priority 32768 ++ NORMAL ++ -> learned that 00:0c:29:a0:27:a1 is on port p1 in VLAN 0 ++ -> multicast snooping learned that 224.1.1.1 is on port p1 in VLAN 0 ++ ++Final flow: unchanged ++Megaflow: recirc_id=0,eth,ip,in_port=1,dl_src=00:0c:29:a0:27:a1,dl_dst=01:00:5e:01:01:01,nw_proto=2,nw_frag=no ++Datapath actions: drop ++This flow is handled by the userspace slow path because it: ++ - Uses action(s) not supported by datapath. ++]) ++ ++AT_CHECK([ovs-vsctl set port p3 other_config:mcast-snooping-flood-reports=true]) ++ ++AT_CHECK([ovs-appctl ofproto/trace "in_port(1)" '01005E010101000C29A027A108004500001C000100004002CBAEAC10221EE001010112140CE9E0010101'], [0], [dnl ++Flow: ip,in_port=1,vlan_tci=0x0000,dl_src=00:0c:29:a0:27:a1,dl_dst=01:00:5e:01:01:01,nw_src=172.16.34.30,nw_dst=224.1.1.1,nw_proto=2,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,tp_src=18,tp_dst=20 ++ ++bridge("br0") ++------------- ++ 0. priority 32768 ++ NORMAL ++ -> forwarding report to mcast flagged port ++ ++Final flow: unchanged ++Megaflow: recirc_id=0,eth,ip,in_port=1,dl_src=00:0c:29:a0:27:a1,dl_dst=01:00:5e:01:01:01,nw_proto=2,nw_frag=no ++Datapath actions: 3 ++This flow is handled by the userspace slow path because it: ++ - Uses action(s) not supported by datapath. ++]) ++ ++AT_CHECK([ovs-vsctl set port p2 other_config:mcast-snooping-flood-reports=true]) ++ ++AT_CHECK([ovs-appctl ofproto/trace "in_port(1)" '01005E010101000C29A027A108004500001C000100004002CBAEAC10221EE001010112140CE9E0010101'], [0], [dnl ++Flow: ip,in_port=1,vlan_tci=0x0000,dl_src=00:0c:29:a0:27:a1,dl_dst=01:00:5e:01:01:01,nw_src=172.16.34.30,nw_dst=224.1.1.1,nw_proto=2,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,tp_src=18,tp_dst=20 ++ ++bridge("br0") ++------------- ++ 0. priority 32768 ++ NORMAL ++ -> forwarding report to mcast flagged port ++ -> forwarding report to mcast flagged port ++ ++Final flow: unchanged ++Megaflow: recirc_id=0,eth,ip,in_port=1,dl_src=00:0c:29:a0:27:a1,dl_dst=01:00:5e:01:01:01,nw_proto=2,nw_frag=no ++Datapath actions: 3,2 ++This flow is handled by the userspace slow path because it: ++ - Uses action(s) not supported by datapath. ++]) ++ ++AT_CHECK([ovs-vsctl set port p1 other_config:mcast-snooping-flood-reports=true]) ++ ++AT_CHECK([ovs-appctl ofproto/trace "in_port(1)" '01005E010101000C29A027A108004500001C000100004002CBAEAC10221EE001010112140CE9E0010101'], [0], [dnl ++Flow: ip,in_port=1,vlan_tci=0x0000,dl_src=00:0c:29:a0:27:a1,dl_dst=01:00:5e:01:01:01,nw_src=172.16.34.30,nw_dst=224.1.1.1,nw_proto=2,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,tp_src=18,tp_dst=20 ++ ++bridge("br0") ++------------- ++ 0. priority 32768 ++ NORMAL ++ -> forwarding report to mcast flagged port ++ -> forwarding report to mcast flagged port ++ -> mcast port is input port, dropping the Report ++ ++Final flow: unchanged ++Megaflow: recirc_id=0,eth,ip,in_port=1,dl_src=00:0c:29:a0:27:a1,dl_dst=01:00:5e:01:01:01,nw_proto=2,nw_frag=no ++Datapath actions: 3,2 ++This flow is handled by the userspace slow path because it: ++ - Uses action(s) not supported by datapath. ++]) ++ ++# Change p2 ofport to force a ofbundle change and check that the mdb contains ++# no stale port. ++AT_CHECK([ovs-vsctl set interface p3 ofport_request=4]) ++ ++AT_CHECK([ovs-appctl ofproto/trace "in_port(1)" '01005E010101000C29A027A108004500001C000100004002CBAEAC10221EE001010112140CE9E0010101'], [0], [dnl ++Flow: ip,in_port=1,vlan_tci=0x0000,dl_src=00:0c:29:a0:27:a1,dl_dst=01:00:5e:01:01:01,nw_src=172.16.34.30,nw_dst=224.1.1.1,nw_proto=2,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,tp_src=18,tp_dst=20 ++ ++bridge("br0") ++------------- ++ 0. priority 32768 ++ NORMAL ++ -> forwarding report to mcast flagged port ++ -> mcast port is input port, dropping the Report ++ -> forwarding report to mcast flagged port ++ ++Final flow: unchanged ++Megaflow: recirc_id=0,eth,ip,in_port=1,dl_src=00:0c:29:a0:27:a1,dl_dst=01:00:5e:01:01:01,nw_proto=2,nw_frag=no ++Datapath actions: 2,3 ++This flow is handled by the userspace slow path because it: ++ - Uses action(s) not supported by datapath. ++]) ++ ++OVS_VSWITCHD_STOP ++AT_CLEANUP ++ ++ + AT_SETUP([mcast - delete the port mdb when vlan configuration changed]) + OVS_VSWITCHD_START([]) + diff --git a/tests/mfex_fuzzy.py b/tests/mfex_fuzzy.py -index 30028ba7a..50b987064 100755 +index 30028ba7a0..50b9870641 100755 --- a/tests/mfex_fuzzy.py +++ b/tests/mfex_fuzzy.py @@ -3,12 +3,15 @@ @@ -2115,8 +3609,35 @@ index 30028ba7a..50b987064 100755 # flake8: noqa: E402 from scapy.all import RandMAC, RandIP, PcapWriter, RandIP6, RandShort, fuzz +diff --git a/tests/ofp-print.at b/tests/ofp-print.at +index 14aa554169..6a07e23c64 100644 +--- a/tests/ofp-print.at ++++ b/tests/ofp-print.at +@@ -4180,4 +4180,22 @@ AT_CHECK([ovs-ofctl ofp-print "\ + 00 01 00 20 00 00 00 00 \ + 00 00 00 14 00 00 00 00 00 00 00 00 00 00 ff ff 0a 0a 00 02 00 00 00 00 \ + " | grep -q OFPBPC_BAD_VALUE], [0]) ++ ++AT_CHECK([ovs-ofctl ofp-print "\ ++01 04 00 20 00 00 00 03 00 00 23 20 00 00 00 20 \ ++06 \ ++00 00 00 00 00 00 00 \ ++00 80 00 08 00 00 00 00 \ ++"| grep -q OFPBPC_BAD_TYPE], [0], [ignore], [stderr]) ++AT_CHECK([grep -q "unknown NXT_CT_FLUSH property type 128" stderr], [0]) ++ ++AT_CHECK([ovs-ofctl ofp-print "\ ++01 04 00 28 00 00 00 03 00 00 23 20 00 00 00 20 \ ++06 \ ++00 00 00 00 00 00 00 \ ++00 00 00 10 00 00 00 00 \ ++00 80 00 08 00 50 00 00 \ ++"| grep -q OFPBPC_BAD_TYPE], [0], [ignore], [stderr]) ++AT_CHECK([grep -q "unknown NXT_CT_TUPLE property type 128" stderr], [0]) ++ + AT_CLEANUP diff --git a/tests/ofproto-dpif.at b/tests/ofproto-dpif.at -index f242f77f3..a39d0d3ae 100644 +index f242f77f31..a39d0d3ae9 100644 --- a/tests/ofproto-dpif.at +++ b/tests/ofproto-dpif.at @@ -5854,6 +5854,40 @@ OVS_WAIT_UNTIL([check_flows], [ovs-ofctl dump-flows br0]) @@ -2160,11 +3681,37 @@ index f242f77f3..a39d0d3ae 100644 AT_SETUP([ofproto-dpif - debug_slow action]) OVS_VSWITCHD_START add_of_ports br0 1 2 3 -diff --git a/tests/ofproto.at b/tests/ofproto.at -index 2fa8486a8..2889f81fb 100644 ---- a/tests/ofproto.at -+++ b/tests/ofproto.at -@@ -6720,3 +6720,31 @@ recirc_id(0),in_port(1),packet_type(ns=0,id=0),eth_type(0x0806),arp(tip=172.31.1 +diff --git a/tests/ofproto-macros.at b/tests/ofproto-macros.at +index d2e6ac768b..6213e6d91c 100644 +--- a/tests/ofproto-macros.at ++++ b/tests/ofproto-macros.at +@@ -141,6 +141,21 @@ strip_stats () { + s/bytes:[[0-9]]*/bytes:0/' + } + ++# Strips key32 field from output. ++strip_key32 () { ++ sed 's/key32([[0-9 \/]]*),//' ++} ++ ++# Strips packet-type from output. ++strip_ptype () { ++ sed 's/packet_type(ns=[[0-9]]*,id=[[0-9]]*),//' ++} ++ ++# Strips bare eth from output. ++strip_eth () { ++ sed 's/eth(),//' ++} ++ + # Changes all 'recirc(...)' and 'recirc=...' to say 'recirc()' and + # 'recirc=' respectively. This should make output easier to + # compare. +diff --git a/tests/ofproto.at b/tests/ofproto.at +index 2fa8486a86..2889f81fb1 100644 +--- a/tests/ofproto.at ++++ b/tests/ofproto.at +@@ -6720,3 +6720,31 @@ recirc_id(0),in_port(1),packet_type(ns=0,id=0),eth_type(0x0806),arp(tip=172.31.1 OVS_VSWITCHD_STOP AT_CLEANUP @@ -2196,11 +3743,115 @@ index 2fa8486a8..2889f81fb 100644 +OVS_WAIT_UNTIL([grep "idle 6 seconds, sending inactivity probe" ovs-vswitchd.log]) +OVS_VSWITCHD_STOP(["/br0<->unix:testcontroller: connection failed/d"]) +AT_CLEANUP +diff --git a/tests/ovsdb-client.at b/tests/ovsdb-client.at +index 68fb962bd7..dcddb25874 100644 +--- a/tests/ovsdb-client.at ++++ b/tests/ovsdb-client.at +@@ -270,8 +270,8 @@ AT_CHECK([ovsdb-client --replay=./replay_dir dnl + dnl Waiting for client to exit the same way as it exited during recording. + OVS_WAIT_WHILE([test -e ovsdb-client.pid]) + +-AT_CHECK([diff monitor.stdout monitor-replay.stdout]) +-AT_CHECK([diff monitor.stderr monitor-replay.stderr]) ++AT_CHECK([diff -u monitor.stdout monitor-replay.stdout]) ++AT_CHECK([diff -u monitor.stderr monitor-replay.stderr]) + + dnl Stripping out timestamps, PIDs and poll_loop warnings from the log. + dnl Also stripping socket_util errors as sockets are not used in replay. +@@ -284,6 +284,6 @@ m4_define([CLEAN_LOG_FILE], + CLEAN_LOG_FILE([monitor.log], [monitor.log.clear]) + CLEAN_LOG_FILE([monitor-replay.log], [monitor-replay.log.clear]) + +-AT_CHECK([diff monitor.log.clear monitor-replay.log.clear]) ++AT_CHECK([diff -u monitor.log.clear monitor-replay.log.clear]) + + AT_CLEANUP diff --git a/tests/ovsdb-idl.at b/tests/ovsdb-idl.at -index df5a9d2fd..1028b0237 100644 +index df5a9d2fd2..fb568dd823 100644 --- a/tests/ovsdb-idl.at +++ b/tests/ovsdb-idl.at -@@ -2332,6 +2332,23 @@ CHECK_STREAM_OPEN_BLOCK([Python3], [$PYTHON3 $srcdir/test-stream.py], +@@ -29,8 +29,8 @@ m4_define([OVSDB_START_IDLTEST], + AT_CHECK([ovsdb-tool create db dnl + m4_if([$2], [], [$abs_srcdir/idltest.ovsschema], [$2])]) + PKIDIR=$abs_top_builddir/tests +- AT_CHECK([ovsdb-server -vconsole:warn --log-file --detach --no-chdir dnl +- --pidfile --remote=punix:socket dnl ++ AT_CHECK([ovsdb-server -vconsole:warn -vfile:dbg --log-file dnl ++ --detach --no-chdir --pidfile --remote=punix:socket dnl + m4_if(m4_substr($1, 0, 5), [pssl:], + [--private-key=$PKIDIR/testpki-privkey2.pem dnl + --certificate=$PKIDIR/testpki-cert2.pem dnl +@@ -57,9 +57,9 @@ m4_define([OVSDB_CLUSTER_START_IDLTEST], + done + on_exit 'kill $(cat s*.pid)' + for i in $(seq $n); do +- AT_CHECK([ovsdb-server -vraft -vconsole:warn --detach --no-chdir \ +- --log-file=s$i.log --pidfile=s$i.pid --unixctl=s$i \ +- --remote=punix:s$i.ovsdb \ ++ AT_CHECK([ovsdb-server -vraft -vconsole:warn -vfile:dbg --detach \ ++ --no-chdir --log-file=s$i.log --pidfile=s$i.pid \ ++ --unixctl=s$i --remote=punix:s$i.ovsdb \ + m4_if([$2], [], [], [--remote=$2]) s$i.db]) + done + +@@ -1466,6 +1466,56 @@ OVSDB_CHECK_IDL_TRACK([track, simple idl, initially populated, references, singl + 006: done + ]]) + ++OVSDB_CHECK_IDL_TRACK([track, simple idl, initially populated, weak references, insert+delete batch], ++ [['["idltest", ++ {"op": "insert", ++ "table": "simple", ++ "row": {"s": "row0_s"}, ++ "uuid-name": "uuid_row0_s"}, ++ {"op": "insert", ++ "table": "simple6", ++ "row": {"name": "row0_s6", ++ "weak_ref": ["set", ++ [["named-uuid", "uuid_row0_s"]] ++ ]}}]']], ++ [['condition simple [true];simple6 [true]' \ ++ '["idltest", ++ {"op": "insert", ++ "table": "simple", ++ "row": {"s": "row1_s"}, ++ "uuid-name": "uuid_row1_s"}, ++ {"op": "mutate", ++ "table": "simple6", ++ "where": [["name", "==", "row0_s6"]], ++ "mutations": [["weak_ref", "insert", ["set", [["named-uuid", "uuid_row1_s"]]]]]}]' \ ++ '+["idltest", ++ {"op": "delete", ++ "table": "simple", ++ "where": [["s", "==", "row1_s"]]}]' \ ++ '["idltest", ++ {"op": "insert", ++ "table": "simple", ++ "row": {"s": "row2_s"}}]']], ++ [[000: simple6: conditions unchanged ++000: simple: conditions unchanged ++001: table simple6: inserted row: name=row0_s6 weak_ref=[<0>] uuid=<1> ++001: table simple6: updated columns: name weak_ref ++001: table simple: inserted row: i=0 r=0 b=false s=row0_s u=<2> ia=[] ra=[] ba=[] sa=[] ua=[] uuid=<0> ++001: table simple: updated columns: s ++002: {"error":null,"result":[{"uuid":["uuid","<3>"]},{"count":1}]} ++003: {"error":null,"result":[{"count":1}]} ++004: table simple6: name=row0_s6 weak_ref=[<0>] uuid=<1> ++004: table simple6: updated columns: weak_ref ++004: table simple: inserted/deleted row: i=0 r=0 b=false s=row1_s u=<2> ia=[] ra=[] ba=[] sa=[] ua=[] uuid=<3> ++004: table simple: updated columns: s ++005: {"error":null,"result":[{"uuid":["uuid","<4>"]}]} ++006: table simple6: name=row0_s6 weak_ref=[<0>] uuid=<1> ++006: table simple: i=0 r=0 b=false s=row0_s u=<2> ia=[] ra=[] ba=[] sa=[] ua=[] uuid=<0> ++006: table simple: inserted row: i=0 r=0 b=false s=row2_s u=<2> ia=[] ra=[] ba=[] sa=[] ua=[] uuid=<4> ++006: table simple: updated columns: s ++007: done ++]]) ++ + dnl This test checks that deleting both the destination and source of the + dnl reference doesn't remove the reference in the source tracked record. + OVSDB_CHECK_IDL_TRACK([track, simple idl, initially populated, weak references, multiple deletes], +@@ -2332,6 +2382,23 @@ CHECK_STREAM_OPEN_BLOCK([Python3], [$PYTHON3 $srcdir/test-stream.py], CHECK_STREAM_OPEN_BLOCK([Python3], [$PYTHON3 $srcdir/test-stream.py], [ssl6], [[[::1]]]) @@ -2224,7 +3875,7 @@ index df5a9d2fd..1028b0237 100644 # same as OVSDB_CHECK_IDL but uses Python IDL implementation with tcp # with multiple remotes to assert the idl connects to the leader of the Raft cluster m4_define([OVSDB_CHECK_IDL_LEADER_ONLY_PY], -@@ -2347,10 +2364,11 @@ m4_define([OVSDB_CHECK_IDL_LEADER_ONLY_PY], +@@ -2347,10 +2414,11 @@ m4_define([OVSDB_CHECK_IDL_LEADER_ONLY_PY], pids=$(cat s2.pid s3.pid s1.pid | tr '\n' ',') echo $pids AT_CHECK([$PYTHON3 $srcdir/test-ovsdb.py -t30 idl-cluster $srcdir/idltest.ovsschema $remotes $pids $3], @@ -2237,7 +3888,7 @@ index df5a9d2fd..1028b0237 100644 AT_CLEANUP]) OVSDB_CHECK_IDL_LEADER_ONLY_PY([Check Python IDL connects to leader], 3, ['remote']) -@@ -2393,6 +2411,7 @@ m4_define([OVSDB_CHECK_CLUSTER_IDL_C], +@@ -2393,6 +2461,7 @@ m4_define([OVSDB_CHECK_CLUSTER_IDL_C], AT_CHECK([sort stdout | uuidfilt]m4_if([$7],,, [[| $7]]), [0], [$5]) m4_ifval([$8], [AT_CHECK([grep '$8' stderr], [1])], [], []) @@ -2245,7 +3896,7 @@ index df5a9d2fd..1028b0237 100644 AT_CLEANUP]) # Same as OVSDB_CHECK_CLUSTER_IDL_C but uses the Python IDL implementation. -@@ -2413,6 +2432,7 @@ m4_define([OVSDB_CHECK_CLUSTER_IDL_PY], +@@ -2413,6 +2482,7 @@ m4_define([OVSDB_CHECK_CLUSTER_IDL_PY], AT_CHECK([sort stdout | uuidfilt]m4_if([$7],,, [[| $7]]), [0], [$5]) m4_if([$8], [AT_CHECK([grep '$8' stderr], [1])], [], []) @@ -2253,8 +3904,102 @@ index df5a9d2fd..1028b0237 100644 AT_CLEANUP]) m4_define([OVSDB_CHECK_CLUSTER_IDL], +@@ -2686,3 +2756,93 @@ OVSDB_CHECK_IDL_PERS_UUID_INSERT([simple idl, persistent uuid insert], + 011: done + ]], + [['This UUID would duplicate a UUID already present within the table or deleted within the same transaction']]) ++ ++ ++m4_define([OVSDB_CHECK_IDL_CHANGE_AWARE], ++ [AT_SETUP([simple idl, database change aware, online conversion - $1]) ++ AT_KEYWORDS([ovsdb server idl db_change_aware conversion $1]) ++ ++ m4_if([$1], [clustered], ++ [OVSDB_CLUSTER_START_IDLTEST([1], [punix:socket])], ++ [OVSDB_START_IDLTEST]) ++ ++ dnl Add some data. ++ AT_CHECK([[ovsdb-client transact unix:socket '["idltest", ++ {"op": "insert", ++ "table": "simple", ++ "row": {"i": 1, ++ "r": 2.0, ++ "b": true, ++ "s": "first row", ++ "u": ["uuid", "84f5c8f5-ac76-4dbc-a24f-8860eb407fc1"], ++ "ia": ["set", [1, 2, 3]], ++ "ra": ["set", [-0.5]], ++ "ba": ["set", [true]], ++ "sa": ["set", ["abc", "def"]], ++ "ua": ["set", [["uuid", "69443985-7806-45e2-b35f-574a04e720f9"], ++ ["uuid", "aad11ef0-816a-4b01-93e6-03b8b4256b98"]]]}}, ++ {"op": "insert", ++ "table": "simple", ++ "row": {"b": false, "s": "second row"}}, ++ {"op": "insert", ++ "table": "simple", ++ "row": {"b": true, "s": "third row"}} ++ ]']], [0], [stdout]) ++ ++ dnl Create a new schema by adding 'extra_column' to the 'simple' table. ++ AT_CHECK([sed 's/"ua": {/"extra_column":{"type": "string"},"ua": {/ ++ s/1.2.3/1.2.4/' \ ++ $abs_srcdir/idltest.ovsschema > new-idltest.ovsschema]) ++ dnl Try "needs-conversion". ++ AT_CHECK([ovsdb-client needs-conversion unix:socket $abs_srcdir/idltest.ovsschema], [0], [no ++]) ++ AT_CHECK([ovsdb-client needs-conversion unix:socket new-idltest.ovsschema], [0], [yes ++]) ++ ++ dnl Conditionally exclude the second row from monitoring. ++ m4_define([COND], [['condition simple [["b","==",true]]']]) ++ ++ dnl Start monitoring. ++ OVS_DAEMONIZE([test-ovsdb '-vPATTERN:console:test-ovsdb|%c|%m' -vjsonrpc -t30 \ ++ idl unix:socket COND monitor \ ++ >idl-c.out 2>idl-c.err], [idl-c.pid]) ++ AT_CAPTURE_FILE([idl-c.out]) ++ AT_CAPTURE_FILE([idl-c.err]) ++ ++ OVS_DAEMONIZE([$PYTHON3 $srcdir/test-ovsdb.py -t30 \ ++ idl $srcdir/idltest.ovsschema unix:socket COND monitor \ ++ >idl-python.out 2>idl-python.err], [idl-python.pid]) ++ AT_CAPTURE_FILE([idl-python.out]) ++ AT_CAPTURE_FILE([idl-python.err]) ++ ++ dnl Wait for monitors to receive the data. ++ OVS_WAIT_UNTIL([grep -q 'third row' idl-c.err]) ++ OVS_WAIT_UNTIL([grep -q 'third row' idl-python.err]) ++ ++ dnl Convert the database. ++ AT_CHECK([ovsdb-client convert unix:socket new-idltest.ovsschema]) ++ ++ dnl Check for the monitor cancellation and the data being requested again. ++ m4_foreach([FILE], [[idl-c], [idl-python]], ++ [OVS_WAIT_UNTIL([grep -q 'monitor_canceled' FILE.err]) ++ OVS_WAIT_UNTIL([test 2 -eq $(grep -c 'send request, method="monitor_cond_since", params=."idltest"' FILE.err)]) ++ ++ dnl XXX: Checking for the new schema bits conditionally because standalone ++ dnl databases are not updating the schema in the _Server database properly. ++ m4_if([$1], [clustered], [OVS_WAIT_UNTIL([grep -q 'extra_column' FILE.err])]) ++ ++ dnl Check that there were no unexpected messages. ++ AT_CHECK([! grep 'unexpected' FILE.err]) ++ ++ dnl Check that the data is received twice and the condition is working. ++ AT_CHECK([sort FILE.out | uuidfilt], [0], ++[[000: simple: change conditions ++001: table simple: i=0 r=0 b=true s=third row u=<0> ia=[] ra=[] ba=[] sa=[] ua=[] uuid=<1> ++001: table simple: i=1 r=2 b=true s=first row u=<2> ia=[1 2 3] ra=[-0.5] ba=[true] sa=[abc def] ua=[<3> <4>] uuid=<5> ++002: table simple: i=0 r=0 b=true s=third row u=<0> ia=[] ra=[] ba=[] sa=[] ua=[] uuid=<1> ++002: table simple: i=1 r=2 b=true s=first row u=<2> ia=[1 2 3] ra=[-0.5] ba=[true] sa=[abc def] ua=[<3> <4>] uuid=<5> ++]])]) ++ AT_CLEANUP]) ++ ++OVSDB_CHECK_IDL_CHANGE_AWARE([standalone]) ++OVSDB_CHECK_IDL_CHANGE_AWARE([clustered]) diff --git a/tests/ovsdb-monitor.at b/tests/ovsdb-monitor.at -index 12cd2bc31..3e1df18a1 100644 +index 12cd2bc319..3e1df18a11 100644 --- a/tests/ovsdb-monitor.at +++ b/tests/ovsdb-monitor.at @@ -586,6 +586,7 @@ row,action,name,number,_version @@ -2265,8 +4010,56 @@ index 12cd2bc31..3e1df18a1 100644 [[[["name","==","one"]]]], [[[false]]], [[[true]]]]) +diff --git a/tests/ovsdb-server.at b/tests/ovsdb-server.at +index d36c3c117e..6eb758e229 100644 +--- a/tests/ovsdb-server.at ++++ b/tests/ovsdb-server.at +@@ -1830,9 +1830,14 @@ replication_schema > schema + AT_CHECK([ovsdb-tool create db1 schema], [0], [stdout], [ignore]) + AT_CHECK([ovsdb-tool create db2 schema], [0], [stdout], [ignore]) + +-AT_CHECK([ovsdb-server --detach --no-chdir --log-file=ovsdb-server1.log --pidfile --remote=punix:db.sock db1], [0], [ignore], [ignore]) ++AT_CHECK([ovsdb-server -vfile --detach --no-chdir \ ++ --log-file=ovsdb-server1.log --pidfile --remote=punix:db.sock db1], ++ [0], [ignore], [ignore]) + +-AT_CHECK([ovsdb-server --detach --no-chdir --log-file=ovsdb-server2.log --pidfile=2.pid --remote=punix:db2.sock --unixctl=unixctl2 db2], [0], [ignore], [ignore]) ++AT_CHECK([ovsdb-server -vfile --detach --no-chdir \ ++ --log-file=ovsdb-server2.log --pidfile=2.pid \ ++ --remote=punix:db2.sock --unixctl=unixctl2 db2], ++ [0], [ignore], [ignore]) + + dnl Try to connect without specifying the active server. + AT_CHECK([ovs-appctl -t "`pwd`"/unixctl2 ovsdb-server/connect-active-ovsdb-server], [0], +@@ -2153,9 +2158,16 @@ AT_CHECK([ovsdb-tool transact db2 \ + + dnl Start both 'db1' and 'db2'. + on_exit 'kill `cat *.pid`' +-AT_CHECK([ovsdb-server --detach --no-chdir --log-file=ovsdb-server1.log --pidfile --remote=punix:db.sock --unixctl="`pwd`"/unixctl db1 --active ], [0], [ignore], [ignore]) ++AT_CHECK([ovsdb-server -vfile --detach --no-chdir \ ++ --log-file=ovsdb-server1.log --pidfile \ ++ --remote=punix:db.sock \ ++ --unixctl="$(pwd)"/unixctl db1 --active ], ++ [0], [ignore], [ignore]) + +-AT_CHECK([ovsdb-server --detach --no-chdir --log-file=ovsdb-server2.log --pidfile=2.pid --remote=punix:db2.sock --unixctl="`pwd`"/unixctl2 db2], [0], [ignore], [ignore]) ++AT_CHECK([ovsdb-server -vfile --detach --no-chdir \ ++ --log-file=ovsdb-server2.log --pidfile=2.pid \ ++ --remote=punix:db2.sock --unixctl="$(pwd)"/unixctl2 db2], ++ [0], [ignore], [ignore]) + + OVS_WAIT_UNTIL([ovs-appctl -t "`pwd`"/unixctl ovsdb-server/sync-status |grep active]) + OVS_WAIT_UNTIL([ovs-appctl -t "`pwd`"/unixctl2 ovsdb-server/sync-status |grep active]) +@@ -2382,6 +2394,6 @@ CLEAN_LOG_FILE([2.log], [2.log.clear]) + + dnl Checking that databases and logs are equal. + AT_CHECK([diff db.clear ./replay_dir/db.copy.clear]) +-AT_CHECK([diff 1.log.clear 2.log.clear]) ++AT_CHECK([diff -u 1.log.clear 2.log.clear]) + + AT_CLEANUP diff --git a/tests/pmd.at b/tests/pmd.at -index 7c333a901..7bdaca9e7 100644 +index 7c333a901b..7bdaca9e71 100644 --- a/tests/pmd.at +++ b/tests/pmd.at @@ -1355,18 +1355,22 @@ AT_CHECK([echo 'table=0,in_port=p1,ip,nw_dst=10.1.0.0/16 actions=ct(commit)' | d @@ -2295,7 +4088,7 @@ index 7c333a901..7bdaca9e7 100644 ovs-ofctl --bundle replace-flows br0 -]) diff --git a/tests/system-afxdp.at b/tests/system-afxdp.at -index 0d09906fb..88f660566 100644 +index 0d09906fb6..88f6605663 100644 --- a/tests/system-afxdp.at +++ b/tests/system-afxdp.at @@ -39,7 +39,7 @@ AT_CHECK([ovs-vsctl add-port br0 ovs-p0 -- \ @@ -2307,8 +4100,68 @@ index 0d09906fb..88f660566 100644 3 packets transmitted, 3 received, 0% packet loss, time 0ms ]) +diff --git a/tests/system-dpdk.at b/tests/system-dpdk.at +index 0f58e85742..5ed1c7e676 100644 +--- a/tests/system-dpdk.at ++++ b/tests/system-dpdk.at +@@ -585,14 +585,13 @@ dnl Add userspace bridge and attach it to OVS with default MTU value + AT_CHECK([ovs-vsctl add-br br10 -- set bridge br10 datapath_type=netdev]) + AT_CHECK([ovs-vsctl add-port br10 phy0 -- set Interface phy0 type=dpdk options:dpdk-devargs=$(cat PCI_ADDR)], [], [stdout], [stderr]) + AT_CHECK([ovs-vsctl show], [], [stdout]) +-sleep 2 + + dnl Check default MTU value in the datapath +-AT_CHECK([ovs-appctl dpctl/show], [], [stdout]) +-AT_CHECK([grep -E 'mtu=1500' stdout], [], [stdout]) ++OVS_WAIT_UNTIL_EQUAL([ovs-vsctl get Interface phy0 mtu], [1500]) + + dnl Increase MTU value and check in the datapath + AT_CHECK([ovs-vsctl set Interface phy0 mtu_request=9000]) ++OVS_WAIT_UNTIL_EQUAL([ovs-vsctl get Interface phy0 mtu], [9000]) + + dnl Fail if MTU is not supported + AT_FAIL_IF([grep "Interface phy0 does not support MTU configuration" ovs-vswitchd.log], [], [stdout]) +@@ -600,10 +599,6 @@ AT_FAIL_IF([grep "Interface phy0 does not support MTU configuration" ovs-vswitch + dnl Fail if error is encountered during MTU setup + AT_FAIL_IF([grep "Interface phy0 MTU (9000) setup error" ovs-vswitchd.log], [], [stdout]) + +-AT_CHECK([ovs-appctl dpctl/show], [], [stdout]) +-AT_CHECK([grep -E 'mtu=9000' stdout], [], [stdout]) +- +- + dnl Clean up + AT_CHECK([ovs-vsctl del-port br10 phy0], [], [stdout], [stderr]) + OVS_VSWITCHD_STOP("[SYSTEM_DPDK_ALLOWED_LOGS]") +@@ -627,7 +622,9 @@ AT_CHECK([ovs-vsctl add-br br10 -- set bridge br10 datapath_type=netdev]) + AT_CHECK([ovs-vsctl add-port br10 phy0 -- set Interface phy0 type=dpdk options:dpdk-devargs=$(cat PCI_ADDR)], [], [stdout], [stderr]) + AT_CHECK([ovs-vsctl set Interface phy0 mtu_request=9000]) + AT_CHECK([ovs-vsctl show], [], [stdout]) +-sleep 2 ++ ++dnl Check MTU value in the datapath ++OVS_WAIT_UNTIL_EQUAL([ovs-vsctl get Interface phy0 mtu], [9000]) + + dnl Fail if MTU is not supported + AT_FAIL_IF([grep "Interface phy0 does not support MTU configuration" ovs-vswitchd.log], [], [stdout]) +@@ -635,15 +632,9 @@ AT_FAIL_IF([grep "Interface phy0 does not support MTU configuration" ovs-vswitch + dnl Fail if error is encountered during MTU setup + AT_FAIL_IF([grep "Interface phy0 MTU (9000) setup error" ovs-vswitchd.log], [], [stdout]) + +-dnl Check MTU value in the datapath +-AT_CHECK([ovs-appctl dpctl/show], [], [stdout]) +-AT_CHECK([grep -E 'mtu=9000' stdout], [], [stdout]) +- + dnl Decrease MTU value and check in the datapath + AT_CHECK([ovs-vsctl set Interface phy0 mtu_request=2000]) +- +-AT_CHECK([ovs-appctl dpctl/show], [], [stdout]) +-AT_CHECK([grep -E 'mtu=2000' stdout], [], [stdout]) ++OVS_WAIT_UNTIL_EQUAL([ovs-vsctl get Interface phy0 mtu], [2000]) + + + dnl Clean up diff --git a/tests/system-ipsec.at b/tests/system-ipsec.at -index 07f2b8fd0..d3d27133b 100644 +index 07f2b8fd0e..d3d27133b9 100644 --- a/tests/system-ipsec.at +++ b/tests/system-ipsec.at @@ -141,10 +141,10 @@ m4_define([CHECK_ESP_TRAFFIC], @@ -2325,7 +4178,7 @@ index 07f2b8fd0..d3d27133b 100644 ]) diff --git a/tests/system-layer3-tunnels.at b/tests/system-layer3-tunnels.at -index 81123f730..6fbdedb64 100644 +index 81123f7309..6fbdedb64f 100644 --- a/tests/system-layer3-tunnels.at +++ b/tests/system-layer3-tunnels.at @@ -34,15 +34,15 @@ AT_CHECK([ovs-ofctl add-flow br0 "priority=100 ip,nw_dst=10.1.1.2 action=mod_dl_ @@ -2395,7 +4248,7 @@ index 81123f730..6fbdedb64 100644 ]) OVS_TRAFFIC_VSWITCHD_STOP diff --git a/tests/system-offloads-traffic.at b/tests/system-offloads-traffic.at -index 7215e36e2..bc9ed8b74 100644 +index 7215e36e2d..bc9ed8b740 100644 --- a/tests/system-offloads-traffic.at +++ b/tests/system-offloads-traffic.at @@ -39,7 +39,7 @@ ADD_NAMESPACES(at_ns0, at_ns1) @@ -2644,7 +4497,7 @@ index 7215e36e2..bc9ed8b74 100644 ]) diff --git a/tests/system-tap.at b/tests/system-tap.at -index 871a3bda4..3d84a5318 100644 +index 871a3bda4f..3d84a53182 100644 --- a/tests/system-tap.at +++ b/tests/system-tap.at @@ -22,7 +22,7 @@ AT_CHECK([ip netns exec at_ns1 ip link set dev tap1 up]) @@ -2657,7 +4510,7 @@ index 871a3bda4..3d84a5318 100644 ]) diff --git a/tests/system-traffic.at b/tests/system-traffic.at -index 808c492a2..7ea450202 100644 +index 808c492a22..e051c942f0 100644 --- a/tests/system-traffic.at +++ b/tests/system-traffic.at @@ -10,13 +10,13 @@ ADD_NAMESPACES(at_ns0, at_ns1) @@ -3064,7 +4917,124 @@ index 808c492a2..7ea450202 100644 sleep 1 dnl Generate a single packet trough the controler that needs an ARP modification -@@ -930,7 +930,7 @@ NETNS_DAEMONIZE([at_ns0], [tcpdump -n -i p0 dst host 172.31.1.1 -l > p0.pcap 2>/ +@@ -903,6 +903,116 @@ ovs-pcap p0.pcap + AT_CHECK([ovs-pcap p0.pcap | grep -Eq "^[[[:xdigit:]]]{24}86dd60000000003a1140fc000000000000000000000000000100fc000000000000000000000000000001[[[:xdigit:]]]{4}17c1003a[[[:xdigit:]]]{4}0000655800000000fffffffffffffa163e949d8008060001080006040001[[[:xdigit:]]]{12}0a0000f40000000000000a0000fe$"]) + AT_CLEANUP + ++AT_SETUP([datapath - bridging two geneve tunnels]) ++OVS_CHECK_TUNNEL_TSO() ++OVS_CHECK_GENEVE() ++ ++OVS_TRAFFIC_VSWITCHD_START() ++ADD_BR([br-underlay-0]) ++ADD_BR([br-underlay-1]) ++ ++ADD_NAMESPACES(at_ns0) ++ADD_NAMESPACES(at_ns1) ++ ++dnl Set up underlay link from host into the namespaces using veth pairs. ++ADD_VETH(p0, at_ns0, br-underlay-0, "172.31.1.1/24") ++AT_CHECK([ip addr add dev br-underlay-0 "172.31.1.100/24"]) ++AT_CHECK([ip link set dev br-underlay-0 up]) ++ ++ADD_VETH(p1, at_ns1, br-underlay-1, "172.31.2.1/24") ++AT_CHECK([ip addr add dev br-underlay-1 "172.31.2.100/24"]) ++AT_CHECK([ip link set dev br-underlay-1 up]) ++ ++dnl Set up two OVS tunnel endpoints in a root namespace and two native ++dnl linux devices inside the test namespaces. ++dnl ++dnl ns_gnv0 | ns_gnv1 ++dnl ip: 10.1.1.1/24 | ip: 10.1.1.2/24 ++dnl remote_ip: 172.31.1.100 | remote_ip: 172.31.2.100 ++dnl | | | ++dnl | | | ++dnl p0 | p1 ++dnl ip: 172.31.1.1/24 | ip: 172.31.2.1/24 ++dnl | NS0 | NS1 | ++dnl ---------|------------------------+------------------|-------------------- ++dnl | | ++dnl br-underlay-0: br-underlay-1: ++dnl ip: 172.31.1.100/24 ip: 172.31.2.100/24 ++dnl ovs-p0 ovs-p1 ++dnl | | ++dnl | br0 | ++dnl encap/decap --- ip: 10.1.1.100/24 --------- encap/decap ++dnl at_gnv0 ++dnl remote_ip: 172.31.1.1 ++dnl at_gnv1 ++dnl remote_ip: 172.31.2.1 ++dnl ++ADD_OVS_TUNNEL([geneve], [br0], [at_gnv0], [172.31.1.1], [10.1.1.100/24]) ++ADD_NATIVE_TUNNEL([geneve], [ns_gnv0], [at_ns0], [172.31.1.100], [10.1.1.1/24], ++ [vni 0]) ++ADD_OVS_TUNNEL([geneve], [br0], [at_gnv1], [172.31.2.1], [10.1.1.101/24]) ++ADD_NATIVE_TUNNEL([geneve], [ns_gnv1], [at_ns1], [172.31.2.100], [10.1.1.2/24], ++ [vni 0]) ++ ++AT_CHECK([ovs-ofctl add-flow br0 "actions=normal"]) ++AT_CHECK([ovs-ofctl add-flow br-underlay-0 "actions=normal"]) ++AT_CHECK([ovs-ofctl add-flow br-underlay-1 "actions=normal"]) ++ ++dnl First, check both underlays. ++NS_CHECK_EXEC([at_ns0], [ping -q -c 3 -i 0.3 -W 2 172.31.1.100 | FORMAT_PING], [0], [dnl ++3 packets transmitted, 3 received, 0% packet loss, time 0ms ++]) ++NS_CHECK_EXEC([at_ns1], [ping -q -c 3 -i 0.3 -W 2 172.31.2.100 | FORMAT_PING], [0], [dnl ++3 packets transmitted, 3 received, 0% packet loss, time 0ms ++]) ++ ++dnl Now, check the overlay with different packet sizes. ++NS_CHECK_EXEC([at_ns0], [ping -q -c 3 -i 0.3 -W 2 10.1.1.2 | FORMAT_PING], [0], [dnl ++3 packets transmitted, 3 received, 0% packet loss, time 0ms ++]) ++NS_CHECK_EXEC([at_ns0], [ping -s 1600 -q -c 3 -i 0.3 -W 2 10.1.1.2 | FORMAT_PING], [0], [dnl ++3 packets transmitted, 3 received, 0% packet loss, time 0ms ++]) ++NS_CHECK_EXEC([at_ns0], [ping -s 3200 -q -c 3 -i 0.3 -W 2 10.1.1.2 | FORMAT_PING], [0], [dnl ++3 packets transmitted, 3 received, 0% packet loss, time 0ms ++]) ++ ++OVS_TRAFFIC_VSWITCHD_STOP ++AT_CLEANUP ++ ++AT_SETUP([datapath - handling of geneve corrupted metadata]) ++OVS_CHECK_TUNNEL_TSO() ++OVS_CHECK_GENEVE() ++ ++OVS_TRAFFIC_VSWITCHD_START( ++ [_ADD_BR([br-underlay]) -- \ ++ set bridge br0 other-config:hwaddr=f2:ff:00:00:00:01 -- \ ++ set bridge br-underlay other-config:hwaddr=f2:ff:00:00:00:02]) ++ ++AT_CHECK([ovs-ofctl add-flow br0 "actions=normal"]) ++AT_CHECK([ovs-ofctl add-flow br-underlay "actions=normal"]) ++ ++ADD_NAMESPACES(at_ns0) ++ ++dnl Set up underlay link from host into the namespace using veth pair. ++ADD_VETH(p0, at_ns0, br-underlay, "172.31.1.1/24", f2:ff:00:00:00:03) ++AT_CHECK([ip addr add dev br-underlay "172.31.1.100/24"]) ++AT_CHECK([ip link set dev br-underlay up]) ++ ++dnl Set up tunnel endpoints on OVS outside the namespace and with a native ++dnl linux device inside the namespace. ++ADD_OVS_TUNNEL([geneve], [br0], [at_gnv0], [172.31.1.1], [10.1.1.100/24]) ++ADD_NATIVE_TUNNEL([geneve], [ns_gnv0], [at_ns0], [172.31.1.100], [10.1.1.1/24], ++ [vni 0], [address f2:ff:00:00:00:04]) ++ ++NS_CHECK_EXEC([at_ns0], [$PYTHON3 $srcdir/sendpkt.py p0 f2 ff 00 00 00 02 f2 ff 00 00 00 03 08 00 45 00 00 52 00 01 00 00 40 11 1f f7 ac 1f 01 01 ac 1f 01 64 de c1 17 c1 00 3e 59 e9 01 00 65 58 00 00 00 00 00 03 00 02 f2 ff 00 00 00 01 f2 ff 00 00 00 04 08 00 45 00 00 1c 00 01 00 00 40 01 64 7a 0a 01 01 01 0a 01 01 64 08 00 f7 ff 00 00 00 00 > /dev/null]) ++ ++OVS_WAIT_UNTIL([grep -q 'Invalid Geneve tunnel metadata' ovs-vswitchd.log]) ++ ++OVS_TRAFFIC_VSWITCHD_STOP(["/Invalid Geneve tunnel metadata on bridge br0 while processing icmp,in_port=1,vlan_tci=0x0000,dl_src=f2:ff:00:00:00:04,dl_dst=f2:ff:00:00:00:01,nw_src=10.1.1.1,nw_dst=10.1.1.100,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,icmp_type=8,icmp_code=0/d ++/Unable to parse geneve options/d"]) ++AT_CLEANUP ++ + AT_SETUP([datapath - ping over gre tunnel by simulated packets]) + OVS_CHECK_TUNNEL_TSO() + OVS_CHECK_MIN_KERNEL(3, 10) +@@ -930,7 +1040,7 @@ NETNS_DAEMONIZE([at_ns0], [tcpdump -n -i p0 dst host 172.31.1.1 -l > p0.pcap 2>/ sleep 1 dnl First, check the underlay. @@ -3073,7 +5043,7 @@ index 808c492a2..7ea450202 100644 3 packets transmitted, 3 received, 0% packet loss, time 0ms ]) -@@ -978,7 +978,7 @@ NETNS_DAEMONIZE([at_ns0], [tcpdump -n -x -i p0 dst host 172.31.1.1 -l > p0.pcap +@@ -978,7 +1088,7 @@ NETNS_DAEMONIZE([at_ns0], [tcpdump -n -x -i p0 dst host 172.31.1.1 -l > p0.pcap sleep 1 dnl First, check the underlay @@ -3082,7 +5052,7 @@ index 808c492a2..7ea450202 100644 3 packets transmitted, 3 received, 0% packet loss, time 0ms ]) -@@ -1031,7 +1031,7 @@ NETNS_DAEMONIZE([at_ns0], [tcpdump -n -x -i p0 dst host 172.31.1.1 -l > p0.pcap +@@ -1031,7 +1141,7 @@ NETNS_DAEMONIZE([at_ns0], [tcpdump -n -x -i p0 dst host 172.31.1.1 -l > p0.pcap sleep 1 dnl First, check the underlay. @@ -3091,7 +5061,7 @@ index 808c492a2..7ea450202 100644 3 packets transmitted, 3 received, 0% packet loss, time 0ms ]) -@@ -1088,7 +1088,7 @@ NETNS_DAEMONIZE([at_ns0], [tcpdump -n -x -i p0 dst host fc00:100::1 -l > p0.pcap +@@ -1088,7 +1198,7 @@ NETNS_DAEMONIZE([at_ns0], [tcpdump -n -x -i p0 dst host fc00:100::1 -l > p0.pcap sleep 1 dnl First, check the underlay. @@ -3100,7 +5070,7 @@ index 808c492a2..7ea450202 100644 3 packets transmitted, 3 received, 0% packet loss, time 0ms ]) -@@ -1144,7 +1144,7 @@ NETNS_DAEMONIZE([at_ns0], [tcpdump -n -x -i p0 dst host fc00:100::1 -l > p0.pcap +@@ -1144,7 +1254,7 @@ NETNS_DAEMONIZE([at_ns0], [tcpdump -n -x -i p0 dst host fc00:100::1 -l > p0.pcap sleep 1 dnl First, check the underlay. @@ -3109,7 +5079,7 @@ index 808c492a2..7ea450202 100644 3 packets transmitted, 3 received, 0% packet loss, time 0ms ]) -@@ -1214,12 +1214,12 @@ dnl "connect: Cannot assign requested address" +@@ -1214,12 +1324,12 @@ dnl "connect: Cannot assign requested address" OVS_WAIT_UNTIL([ip netns exec at_ns0 ping6 -c 1 fc00::100]) dnl First, check the underlay. @@ -3124,7 +5094,7 @@ index 808c492a2..7ea450202 100644 3 packets transmitted, 3 received, 0% packet loss, time 0ms ]) -@@ -1276,12 +1276,12 @@ OVS_WAIT_UNTIL([ip netns exec at_ns0 ping6 -c 1 fc00::100]) +@@ -1276,12 +1386,12 @@ OVS_WAIT_UNTIL([ip netns exec at_ns0 ping6 -c 1 fc00::100]) OVS_WAIT_UNTIL([ip netns exec at_ns1 ping6 -c 1 fc00:100::100]) dnl First, check the underlay. @@ -3139,7 +5109,7 @@ index 808c492a2..7ea450202 100644 3 packets transmitted, 3 received, 0% packet loss, time 0ms ]) -@@ -1307,7 +1307,7 @@ priority=10 in_port=2,ip,actions=clone(mod_dl_src(ae:c6:7e:54:8d:4d),mod_dl_dst( +@@ -1307,7 +1417,7 @@ priority=10 in_port=2,ip,actions=clone(mod_dl_src(ae:c6:7e:54:8d:4d),mod_dl_dst( AT_CHECK([ovs-ofctl add-flows br0 flows.txt]) AT_CHECK([ovs-ofctl monitor br0 65534 invalid_ttl --detach --no-chdir --pidfile 2> ofctl_monitor.log]) @@ -3148,7 +5118,7 @@ index 808c492a2..7ea450202 100644 3 packets transmitted, 3 received, 0% packet loss, time 0ms ]) -@@ -1348,11 +1348,11 @@ table=1,priority=10 actions=normal +@@ -1348,11 +1458,11 @@ table=1,priority=10 actions=normal AT_CHECK([ovs-ofctl add-flows br0 flows.txt]) AT_CHECK([ovs-ofctl add-flows br1 flows.txt]) @@ -3162,7 +5132,7 @@ index 808c492a2..7ea450202 100644 3 packets transmitted, 3 received, 0% packet loss, time 0ms ]) -@@ -1387,11 +1387,11 @@ table=3,priority=10 actions=normal +@@ -1387,11 +1497,11 @@ table=3,priority=10 actions=normal AT_CHECK([ovs-ofctl add-flows br0 flows.txt]) AT_CHECK([ovs-ofctl add-flows br1 flows.txt]) @@ -3176,7 +5146,7 @@ index 808c492a2..7ea450202 100644 3 packets transmitted, 3 received, 0% packet loss, time 0ms ]) OVS_TRAFFIC_VSWITCHD_STOP -@@ -2062,7 +2062,7 @@ add in_port=ovs-p1,actions=ovs-p0,br0 +@@ -2062,7 +2172,7 @@ add in_port=ovs-p1,actions=ovs-p0,br0 ]) AT_CHECK([ovs-ofctl add-flows br0 flows.txt]) @@ -3185,7 +5155,7 @@ index 808c492a2..7ea450202 100644 10 packets transmitted, 10 received, 0% packet loss, time 0ms ]) -@@ -2081,7 +2081,7 @@ modify in_port=ovs-p1,actions=ovs-p0 +@@ -2081,7 +2191,7 @@ modify in_port=ovs-p1,actions=ovs-p0 AT_CHECK([ovs-ofctl add-flows br0 flows2.txt]) AT_CHECK([ovs-appctl revalidator/wait], [0]) @@ -3194,7 +5164,7 @@ index 808c492a2..7ea450202 100644 10 packets transmitted, 10 received, 0% packet loss, time 0ms ]) -@@ -2096,7 +2096,7 @@ recirc_id(),in_port(3),eth_type(0x0800),ipv4(frag=no), packets:19, bytes +@@ -2096,7 +2206,7 @@ recirc_id(),in_port(3),eth_type(0x0800),ipv4(frag=no), packets:19, bytes AT_CHECK([ovs-ofctl add-flows br0 flows.txt]) AT_CHECK([ovs-appctl revalidator/wait], [0]) @@ -3203,7 +5173,65 @@ index 808c492a2..7ea450202 100644 10 packets transmitted, 10 received, 0% packet loss, time 0ms ]) -@@ -2516,6 +2516,7 @@ AT_CLEANUP +@@ -2111,6 +2221,57 @@ recirc_id(),in_port(3),eth_type(0x0800),ipv4(frag=no), packets:29, bytes + OVS_TRAFFIC_VSWITCHD_STOP + AT_CLEANUP + ++AT_SETUP([datapath - Neighbor Discovery with loose match]) ++OVS_TRAFFIC_VSWITCHD_START() ++ ++ADD_NAMESPACES(at_ns0, at_ns1) ++ ++ADD_VETH(p0, at_ns0, br0, "2001::1:0:392/64", 36:b1:ee:7c:01:03) ++ADD_VETH(p1, at_ns1, br0, "2001::1:0:9/64", 36:b1:ee:7c:01:02) ++ ++dnl Set up flows for moving icmp ND Solicit around. This should be the ++dnl same for the other ND types. ++AT_DATA([flows.txt], [dnl ++table=0 priority=95 icmp6,icmp_type=136,nd_target=2001::1:0:9 actions=resubmit(,10) ++table=0 priority=95 icmp6,icmp_type=136,nd_target=2001::1:0:392 actions=resubmit(,10) ++table=0 priority=65 actions=resubmit(,20) ++table=10 actions=NORMAL ++table=20 actions=drop ++]) ++AT_CHECK([ovs-ofctl del-flows br0]) ++AT_CHECK([ovs-ofctl --bundle add-flows br0 flows.txt]) ++ ++dnl Send a mismatching neighbor discovery. ++NS_CHECK_EXEC([at_ns0], [$PYTHON3 $srcdir/sendpkt.py p0 36 b1 ee 7c 01 02 36 b1 ee 7c 01 03 86 dd 60 00 00 00 00 20 3a ff fe 80 00 00 00 00 00 00 f8 16 3e ff fe 04 66 04 fe 80 00 00 00 00 00 00 f8 16 3e ff fe a7 dd 0e 88 00 f1 f2 20 00 00 00 30 00 00 00 00 00 00 00 00 00 00 00 00 00 00 01 02 01 36 b1 ee 7c 01 03 > /dev/null]) ++ ++dnl Send a matching neighbor discovery. ++NS_CHECK_EXEC([at_ns0], [$PYTHON3 $srcdir/sendpkt.py p0 36 b1 ee 7c 01 02 36 b1 ee 7c 01 03 86 dd 60 00 00 00 00 20 3a ff fe 80 00 00 00 00 00 00 f8 16 3e ff fe 04 66 04 fe 80 00 00 00 00 00 00 f8 16 3e ff fe a7 dd 0e 88 00 fe 5f 20 00 00 00 20 01 00 00 00 00 00 00 00 00 00 01 00 00 03 92 02 01 36 b1 ee 7c 01 03 > /dev/null]) ++ ++AT_CHECK([ovs-appctl dpctl/dump-flows | strip_stats | strip_used | dnl ++ strip_key32 | strip_ptype | strip_eth | strip_recirc | dnl ++ grep ",nd" | sort], [0], [dnl ++recirc_id(),in_port(2),eth(src=36:b1:ee:7c:01:03,dst=36:b1:ee:7c:01:02),eth_type(0x86dd),ipv6(proto=58,frag=no),icmpv6(type=136),nd(target=2001::1:0:392), packets:0, bytes:0, used:never, actions:1,3 ++recirc_id(),in_port(2),eth_type(0x86dd),ipv6(proto=58,frag=no),icmpv6(type=136),nd(target=3000::1), packets:0, bytes:0, used:never, actions:drop ++]) ++ ++OVS_WAIT_UNTIL([ovs-appctl dpctl/dump-flows | grep ",nd" | wc -l | grep -E ^0]) ++ ++dnl Send a matching neighbor discovery. ++NS_CHECK_EXEC([at_ns0], [$PYTHON3 $srcdir/sendpkt.py p0 36 b1 ee 7c 01 02 36 b1 ee 7c 01 03 86 dd 60 00 00 00 00 20 3a ff fe 80 00 00 00 00 00 00 f8 16 3e ff fe 04 66 04 fe 80 00 00 00 00 00 00 f8 16 3e ff fe a7 dd 0e 88 00 fe 5f 20 00 00 00 20 01 00 00 00 00 00 00 00 00 00 01 00 00 03 92 02 01 36 b1 ee 7c 01 03 > /dev/null]) ++ ++dnl Send a mismatching neighbor discovery. ++NS_CHECK_EXEC([at_ns0], [$PYTHON3 $srcdir/sendpkt.py p0 36 b1 ee 7c 01 02 36 b1 ee 7c 01 03 86 dd 60 00 00 00 00 20 3a ff fe 80 00 00 00 00 00 00 f8 16 3e ff fe 04 66 04 fe 80 00 00 00 00 00 00 f8 16 3e ff fe a7 dd 0e 88 00 f1 f2 20 00 00 00 30 00 00 00 00 00 00 00 00 00 00 00 00 00 00 01 02 01 36 b1 ee 7c 01 03 > /dev/null]) ++ ++AT_CHECK([ovs-appctl dpctl/dump-flows | strip_stats | strip_used | dnl ++ strip_key32 | strip_ptype | strip_eth | strip_recirc | dnl ++ grep ",nd" | sort], [0], [dnl ++recirc_id(),in_port(2),eth(src=36:b1:ee:7c:01:03,dst=36:b1:ee:7c:01:02),eth_type(0x86dd),ipv6(proto=58,frag=no),icmpv6(type=136),nd(target=2001::1:0:392), packets:0, bytes:0, used:never, actions:1,3 ++recirc_id(),in_port(2),eth_type(0x86dd),ipv6(proto=58,frag=no),icmpv6(type=136),nd(target=3000::1), packets:0, bytes:0, used:never, actions:drop ++]) ++ ++OVS_TRAFFIC_VSWITCHD_STOP ++AT_CLEANUP ++ + AT_BANNER([MPLS]) + + AT_SETUP([mpls - encap header dp-support]) +@@ -2516,6 +2677,7 @@ AT_CLEANUP AT_SETUP([conntrack - ct flush]) CHECK_CONNTRACK() @@ -3211,7 +5239,7 @@ index 808c492a2..7ea450202 100644 OVS_TRAFFIC_VSWITCHD_START() ADD_NAMESPACES(at_ns0, at_ns1) -@@ -2526,10 +2527,8 @@ ADD_VETH(p1, at_ns1, br0, "10.1.1.2/24") +@@ -2526,10 +2688,8 @@ ADD_VETH(p1, at_ns1, br0, "10.1.1.2/24") AT_DATA([flows.txt], [dnl priority=1,action=drop priority=10,arp,action=normal @@ -3224,7 +5252,7 @@ index 808c492a2..7ea450202 100644 ]) AT_CHECK([ovs-ofctl --bundle add-flows br0 flows.txt]) -@@ -2564,7 +2563,7 @@ AT_CHECK([FLUSH_CMD zone=5 'ct_nw_src=10.1.1.1,ct_nw_dst=10.1.1.2,ct_nw_proto=17 +@@ -2564,7 +2724,7 @@ AT_CHECK([FLUSH_CMD zone=5 'ct_nw_src=10.1.1.1,ct_nw_dst=10.1.1.2,ct_nw_proto=17 AT_CHECK([ovs-appctl dpctl/dump-conntrack | FORMAT_CT(10.1.1.2)], [0]) dnl Test ICMP traffic @@ -3233,7 +5261,7 @@ index 808c492a2..7ea450202 100644 3 packets transmitted, 3 received, 0% packet loss, time 0ms ]) -@@ -2692,6 +2691,25 @@ udp,orig=(src=10.1.1.2,dst=10.1.1.1,sport=2,dport=1),reply=(src=10.1.1.1,dst=10. +@@ -2692,6 +2852,25 @@ udp,orig=(src=10.1.1.2,dst=10.1.1.1,sport=2,dport=1),reply=(src=10.1.1.1,dst=10. AT_CHECK([FLUSH_CMD]) @@ -3259,7 +5287,7 @@ index 808c492a2..7ea450202 100644 AT_CHECK([ovs-appctl dpctl/dump-conntrack | grep "10\.1\.1\.1"], [1]) ]) -@@ -2745,7 +2763,7 @@ priority=100,in_port=2,icmp,ct_state=+trk+est,action=1 +@@ -2745,7 +2924,7 @@ priority=100,in_port=2,icmp,ct_state=+trk+est,action=1 AT_CHECK([ovs-ofctl --bundle add-flows br0 flows.txt]) dnl Pings from ns0->ns1 should work fine. @@ -3268,7 +5296,7 @@ index 808c492a2..7ea450202 100644 3 packets transmitted, 3 received, 0% packet loss, time 0ms ]) -@@ -2786,7 +2804,7 @@ priority=100,in_port=2,icmp,ct_state=+trk+est,action=1 +@@ -2786,7 +2965,7 @@ priority=100,in_port=2,icmp,ct_state=+trk+est,action=1 AT_CHECK([ovs-ofctl --bundle add-flows br0 flows.txt]) dnl Pings from ns0->ns1 should work fine. @@ -3277,7 +5305,7 @@ index 808c492a2..7ea450202 100644 3 packets transmitted, 3 received, 0% packet loss, time 0ms ]) -@@ -2886,7 +2904,7 @@ NS_CHECK_EXEC([at_ns1], [ping6 -q -c 3 -i 0.3 -w 2 fc00::1 | FORMAT_PING], [0], +@@ -2886,7 +3065,7 @@ NS_CHECK_EXEC([at_ns1], [ping6 -q -c 3 -i 0.3 -w 2 fc00::1 | FORMAT_PING], [0], ]) dnl Pings from ns0->ns1 should work fine. @@ -3286,7 +5314,7 @@ index 808c492a2..7ea450202 100644 3 packets transmitted, 3 received, 0% packet loss, time 0ms ]) -@@ -3796,7 +3814,7 @@ table=0,in_port=ovs-p1,ct_state=+trk+rel+rpl,icmp,actions=ovs-p0 +@@ -3796,7 +3975,7 @@ table=0,in_port=ovs-p1,ct_state=+trk+rel+rpl,icmp,actions=ovs-p0 AT_CHECK([ovs-ofctl --bundle add-flows br0 flows.txt]) rm p0.pcap @@ -3295,7 +5323,7 @@ index 808c492a2..7ea450202 100644 OVS_WAIT_UNTIL([grep "listening" tcpdump0_err]) dnl Send UDP packet from 10.1.1.1:1234 to 10.1.1.240:80 -@@ -3837,12 +3855,12 @@ dnl Modify userspace conntrack fragmentation handling. +@@ -3837,12 +4016,12 @@ dnl Modify userspace conntrack fragmentation handling. DPCTL_MODIFY_FRAGMENTATION() dnl Ipv4 fragmentation connectivity check. @@ -3310,7 +5338,7 @@ index 808c492a2..7ea450202 100644 3 packets transmitted, 3 received, 0% packet loss, time 0ms ]) -@@ -3914,12 +3932,12 @@ dnl Modify userspace conntrack fragmentation handling. +@@ -3914,12 +4093,12 @@ dnl Modify userspace conntrack fragmentation handling. DPCTL_MODIFY_FRAGMENTATION() dnl Ipv4 fragmentation connectivity check. @@ -3325,7 +5353,7 @@ index 808c492a2..7ea450202 100644 3 packets transmitted, 3 received, 0% packet loss, time 0ms ]) -@@ -3960,22 +3978,22 @@ AT_CHECK([ovs-ofctl --bundle add-flows br0 flows.txt]) +@@ -3960,22 +4139,22 @@ AT_CHECK([ovs-ofctl --bundle add-flows br0 flows.txt]) OVS_WAIT_UNTIL([ip netns exec at_ns0 ping -c 1 10.2.2.2]) dnl Ipv4 fragmentation connectivity check. @@ -3352,7 +5380,7 @@ index 808c492a2..7ea450202 100644 3 packets transmitted, 3 received, 0% packet loss, time 0ms ]) -@@ -4134,12 +4152,12 @@ dnl "connect: Cannot assign requested address" +@@ -4134,12 +4313,12 @@ dnl "connect: Cannot assign requested address" OVS_WAIT_UNTIL([ip netns exec at_ns0 ping6 -c 1 fc00::2]) dnl Ipv6 fragmentation connectivity check. @@ -3367,7 +5395,7 @@ index 808c492a2..7ea450202 100644 3 packets transmitted, 3 received, 0% packet loss, time 0ms ]) -@@ -4216,12 +4234,12 @@ dnl "connect: Cannot assign requested address" +@@ -4216,12 +4395,12 @@ dnl "connect: Cannot assign requested address" OVS_WAIT_UNTIL([ip netns exec at_ns0 ping6 -c 1 fc00::2]) dnl Ipv4 fragmentation connectivity check. @@ -3382,7 +5410,7 @@ index 808c492a2..7ea450202 100644 3 packets transmitted, 3 received, 0% packet loss, time 0ms ]) -@@ -4259,22 +4277,22 @@ AT_CHECK([ovs-ofctl --bundle add-flows br0 flows.txt]) +@@ -4259,22 +4438,22 @@ AT_CHECK([ovs-ofctl --bundle add-flows br0 flows.txt]) OVS_WAIT_UNTIL([ip netns exec at_ns0 ping6 -c 1 fc00:1::4]) dnl Ipv6 fragmentation connectivity check. @@ -3409,7 +5437,7 @@ index 808c492a2..7ea450202 100644 3 packets transmitted, 3 received, 0% packet loss, time 0ms ]) -@@ -4486,18 +4504,18 @@ ADD_NATIVE_TUNNEL([vxlan], [at_vxlan1], [at_ns0], [172.31.1.100], [10.1.1.1/24], +@@ -4486,18 +4665,18 @@ ADD_NATIVE_TUNNEL([vxlan], [at_vxlan1], [at_ns0], [172.31.1.100], [10.1.1.1/24], [id 0 dstport 4789]) dnl First, check the underlay @@ -3432,7 +5460,7 @@ index 808c492a2..7ea450202 100644 3 packets transmitted, 3 received, 0% packet loss, time 0ms ]) -@@ -4546,18 +4564,18 @@ dnl "connect: Cannot assign requested address" +@@ -4546,18 +4725,18 @@ dnl "connect: Cannot assign requested address" OVS_WAIT_UNTIL([ip netns exec at_ns0 ping6 -c 1 fc00::2]) dnl First, check the underlay @@ -3455,7 +5483,7 @@ index 808c492a2..7ea450202 100644 3 packets transmitted, 3 received, 0% packet loss, time 0ms ]) -@@ -4670,7 +4688,7 @@ dnl The default udp_single and icmp_first timeouts are 30 seconds in +@@ -4670,7 +4849,7 @@ dnl The default udp_single and icmp_first timeouts are 30 seconds in dnl kernel DP, and 60 seconds in userspace DP. dnl Send ICMP and UDP traffic @@ -3464,7 +5492,7 @@ index 808c492a2..7ea450202 100644 3 packets transmitted, 3 received, 0% packet loss, time 0ms ]) AT_CHECK([ovs-ofctl -O OpenFlow13 packet-out br0 "in_port=1 packet=50540000000a50540000000908004500001c000000000011a4cd0a0101010a0101020001000200080000 actions=resubmit(,0)"]) -@@ -4696,7 +4714,7 @@ done +@@ -4696,7 +4875,7 @@ done AT_CHECK([ovs-vsctl --may-exist add-zone-tp $DP_TYPE zone=5 udp_first=1 udp_single=1 icmp_first=1 icmp_reply=1]) dnl Send ICMP and UDP traffic @@ -3473,7 +5501,7 @@ index 808c492a2..7ea450202 100644 3 packets transmitted, 3 received, 0% packet loss, time 0ms ]) AT_CHECK([ovs-ofctl -O OpenFlow13 packet-out br0 "in_port=1 packet=50540000000a50540000000908004500001c000000000011a4cd0a0101010a0101020001000200080000 actions=resubmit(,0)"]) -@@ -4714,7 +4732,7 @@ AT_CHECK([ovs-appctl dpctl/dump-conntrack | FORMAT_CT(10.1.1.2)], [0], [dnl +@@ -4714,7 +4893,7 @@ AT_CHECK([ovs-appctl dpctl/dump-conntrack | FORMAT_CT(10.1.1.2)], [0], [dnl ]) dnl Re-send ICMP and UDP traffic to test conntrack cache @@ -3482,7 +5510,7 @@ index 808c492a2..7ea450202 100644 3 packets transmitted, 3 received, 0% packet loss, time 0ms ]) AT_CHECK([ovs-ofctl -O OpenFlow13 packet-out br0 "in_port=1 packet=50540000000a50540000000908004500001c000000000011a4cd0a0101010a0101020001000200080000 actions=resubmit(,0)"]) -@@ -4735,7 +4753,7 @@ dnl Set the timeout policy to default again. +@@ -4735,7 +4914,7 @@ dnl Set the timeout policy to default again. AT_CHECK([ovs-vsctl del-zone-tp $DP_TYPE zone=5]) dnl Send ICMP and UDP traffic @@ -3491,7 +5519,7 @@ index 808c492a2..7ea450202 100644 3 packets transmitted, 3 received, 0% packet loss, time 0ms ]) AT_CHECK([ovs-ofctl -O OpenFlow13 packet-out br0 "in_port=1 packet=50540000000a50540000000908004500001c000000000011a4cd0a0101010a0101020001000200080000 actions=resubmit(,0)"]) -@@ -5001,7 +5019,7 @@ table=2,in_port=1,ip,ct_state=+trk+est,ct_zone=2,action=LOCAL +@@ -5001,7 +5180,7 @@ table=2,in_port=1,ip,ct_state=+trk+est,ct_zone=2,action=LOCAL AT_CHECK([ovs-ofctl --bundle add-flows br0 flows.txt]) @@ -3500,7 +5528,7 @@ index 808c492a2..7ea450202 100644 3 packets transmitted, 3 received, 0% packet loss, time 0ms ]) -@@ -5072,7 +5090,7 @@ table=4,priority=100,ip,action=output:NXM_NX_REG0[[]] +@@ -5072,7 +5251,7 @@ table=4,priority=100,ip,action=output:NXM_NX_REG0[[]] AT_CHECK([ovs-ofctl --bundle add-flows br0 flows.txt]) @@ -3509,7 +5537,7 @@ index 808c492a2..7ea450202 100644 3 packets transmitted, 3 received, 0% packet loss, time 0ms ]) -@@ -6140,7 +6158,7 @@ table=10 priority=0 action=drop +@@ -6140,7 +6319,7 @@ table=10 priority=0 action=drop AT_CHECK([ovs-ofctl --bundle add-flows br0 flows.txt]) rm p0.pcap @@ -3518,7 +5546,7 @@ index 808c492a2..7ea450202 100644 sleep 1 dnl UDP packets from ns0->ns1 should solicit "destination unreachable" response. -@@ -6164,7 +6182,7 @@ AT_CHECK([ovs-appctl dpctl/dump-conntrack | FORMAT_CT(10.1.1.2) | sed -e 's/dst= +@@ -6164,7 +6343,7 @@ AT_CHECK([ovs-appctl dpctl/dump-conntrack | FORMAT_CT(10.1.1.2) | sed -e 's/dst= udp,orig=(src=10.1.1.1,dst=10.1.1.2,sport=,dport=),reply=(src=10.1.1.2,dst=10.1.1.2XX,sport=,dport=),mark=1 ]) @@ -3527,7 +5555,7 @@ index 808c492a2..7ea450202 100644 OVS_TRAFFIC_VSWITCHD_STOP AT_CLEANUP -@@ -6854,7 +6872,7 @@ dnl waiting, we get occasional failures due to the following error: +@@ -6854,7 +7033,7 @@ dnl waiting, we get occasional failures due to the following error: dnl "connect: Cannot assign requested address" OVS_WAIT_UNTIL([ip netns exec at_ns0 ping6 -c 1 fc00::240]) @@ -3536,7 +5564,7 @@ index 808c492a2..7ea450202 100644 3 packets transmitted, 3 received, 0% packet loss, time 0ms ]) -@@ -6909,13 +6927,13 @@ OVS_WAIT_UNTIL([ip netns exec at_ns0 ping6 -c 1 fc00::2]) +@@ -6909,13 +7088,13 @@ OVS_WAIT_UNTIL([ip netns exec at_ns0 ping6 -c 1 fc00::2]) AT_CHECK([ovs-appctl dpctl/flush-conntrack]) rm p0.pcap @@ -3552,7 +5580,7 @@ index 808c492a2..7ea450202 100644 AT_CHECK([ovs-appctl dpctl/dump-conntrack | FORMAT_CT(fc00::2)], [0], [dnl udp,orig=(src=fc00::1,dst=fc00::2,sport=,dport=),reply=(src=fc00::2,dst=fc00::240,sport=,dport=) -@@ -6944,7 +6962,7 @@ table=0,in_port=ovs-p1,ct_state=+trk+rel+rpl,icmp6,actions=ovs-p0 +@@ -6944,7 +7123,7 @@ table=0,in_port=ovs-p1,ct_state=+trk+rel+rpl,icmp6,actions=ovs-p0 AT_CHECK([ovs-ofctl --bundle add-flows br0 flows.txt]) rm p0.pcap @@ -3561,7 +5589,7 @@ index 808c492a2..7ea450202 100644 OVS_WAIT_UNTIL([grep "listening" tcpdump0_err]) dnl Send UDP packet from [[fc00::1]]:1234 to [[fc00::240]]:80 -@@ -7587,12 +7605,12 @@ ADD_NATIVE_TUNNEL([geneve], [ns_gnv0], [at_ns0], [172.31.1.100], [10.1.1.1/24], +@@ -7587,12 +7766,12 @@ ADD_NATIVE_TUNNEL([geneve], [ns_gnv0], [at_ns0], [172.31.1.100], [10.1.1.1/24], [vni 0]) dnl First, check the underlay @@ -3576,7 +5604,7 @@ index 808c492a2..7ea450202 100644 3 packets transmitted, 3 received, 0% packet loss, time 0ms ]) -@@ -7635,7 +7653,7 @@ table=2,in_port=ovs-server,ip,ct_state=+trk+rpl,actions=output:ovs-client +@@ -7635,7 +7814,7 @@ table=2,in_port=ovs-server,ip,ct_state=+trk+rpl,actions=output:ovs-client AT_CHECK([ovs-ofctl add-flows br0 flows.txt]) rm server.pcap @@ -3585,7 +5613,7 @@ index 808c492a2..7ea450202 100644 OVS_WAIT_UNTIL([grep "listening" tcpdump0_err]) dnl Send UDP client->server -@@ -7677,7 +7695,7 @@ dnl Check the ICMP error in reply direction +@@ -7677,7 +7856,7 @@ dnl Check the ICMP error in reply direction AT_CHECK([ovs-appctl dpctl/flush-conntrack zone=42]) rm client.pcap @@ -3594,7 +5622,7 @@ index 808c492a2..7ea450202 100644 OVS_WAIT_UNTIL([grep "listening" tcpdump1_err]) dnl Send UDP client->server -@@ -7819,7 +7837,7 @@ dnl CVLAN traffic should match the flow and drop +@@ -7819,7 +7998,7 @@ dnl CVLAN traffic should match the flow and drop AT_CHECK([ovs-appctl revalidator/purge]) AT_CHECK([ovs-vsctl set Open_vSwitch . other_config:vlan-limit=1]) AT_CHECK([ovs-ofctl add-flow br0 "priority=100 dl_type=0x8100 action=drop"]) @@ -3603,7 +5631,7 @@ index 808c492a2..7ea450202 100644 OVS_TRAFFIC_VSWITCHD_STOP AT_CLEANUP -@@ -7869,11 +7887,11 @@ AT_CHECK([ovs-ofctl --bundle add-flows br2 flows-customer-br.txt]) +@@ -7869,11 +8048,11 @@ AT_CHECK([ovs-ofctl --bundle add-flows br2 flows-customer-br.txt]) OVS_WAIT_UNTIL([ip netns exec at_ns0 ping -c 1 10.2.2.2]) @@ -3617,7 +5645,7 @@ index 808c492a2..7ea450202 100644 3 packets transmitted, 3 received, 0% packet loss, time 0ms ]) -@@ -7925,11 +7943,11 @@ AT_CHECK([ovs-ofctl --bundle add-flows br2 flows-customer-br.txt]) +@@ -7925,11 +8104,11 @@ AT_CHECK([ovs-ofctl --bundle add-flows br2 flows-customer-br.txt]) OVS_WAIT_UNTIL([ip netns exec at_ns0 ping -c 1 10.2.2.2]) @@ -3631,7 +5659,7 @@ index 808c492a2..7ea450202 100644 3 packets transmitted, 3 received, 0% packet loss, time 0ms ]) -@@ -7977,24 +7995,24 @@ AT_CHECK([ovs-vsctl set port ovs-p2 vlan_mode=dot1q-tunnel tag=4094 cvlans=100,2 +@@ -7977,24 +8156,24 @@ AT_CHECK([ovs-vsctl set port ovs-p2 vlan_mode=dot1q-tunnel tag=4094 cvlans=100,2 OVS_WAIT_UNTIL([ip netns exec at_ns0 ping -c 1 10.2.2.2]) OVS_WAIT_UNTIL([ip netns exec at_ns0 ping -c 1 10.3.2.2]) @@ -3661,7 +5689,7 @@ index 808c492a2..7ea450202 100644 OVS_TRAFFIC_VSWITCHD_STOP(["/dropping VLAN \(0\|300\) packet received on dot1q-tunnel port/d"]) AT_CLEANUP -@@ -8023,11 +8041,11 @@ AT_CHECK([ovs-ofctl --bundle add-flows br0 flows-br0.txt]) +@@ -8023,11 +8202,11 @@ AT_CHECK([ovs-ofctl --bundle add-flows br0 flows-br0.txt]) OVS_WAIT_UNTIL([ip netns exec at_ns0 ping -c 1 10.2.2.2]) @@ -3676,7 +5704,7 @@ index 808c492a2..7ea450202 100644 ]) diff --git a/tests/system-userspace-packet-type-aware.at b/tests/system-userspace-packet-type-aware.at -index 974304758..aac178eda 100644 +index 974304758f..aac178edaf 100644 --- a/tests/system-userspace-packet-type-aware.at +++ b/tests/system-userspace-packet-type-aware.at @@ -335,7 +335,7 @@ AT_CHECK([ @@ -3707,7 +5735,7 @@ index 974304758..aac178eda 100644 ]) diff --git a/tests/test-barrier.c b/tests/test-barrier.c -index 3bc5291cc..fb0ab0e69 100644 +index 3bc5291cc1..fb0ab0e695 100644 --- a/tests/test-barrier.c +++ b/tests/test-barrier.c @@ -14,13 +14,13 @@ @@ -3729,7 +5757,7 @@ index 3bc5291cc..fb0ab0e69 100644 #include "util.h" diff --git a/tests/test-id-fpool.c b/tests/test-id-fpool.c -index 25275d9ae..7bdb8154d 100644 +index 25275d9aef..7bdb8154d3 100644 --- a/tests/test-id-fpool.c +++ b/tests/test-id-fpool.c @@ -14,12 +14,12 @@ @@ -3757,7 +5785,7 @@ index 25275d9ae..7bdb8154d 100644 for (i = 0; i < n_threads; i++) { if (thread_working_ms[i] >= TIMEOUT_MS) { diff --git a/tests/test-jsonrpc.py b/tests/test-jsonrpc.py -index 1df5afa22..8a4a17593 100644 +index 1df5afa221..8a4a175938 100644 --- a/tests/test-jsonrpc.py +++ b/tests/test-jsonrpc.py @@ -199,13 +199,13 @@ notify REMOTE METHOD PARAMS send notification and exit @@ -3777,7 +5805,7 @@ index 1df5afa22..8a4a17593 100644 sys.stderr.write("%s: \"%s\" requires %d arguments but %d " "provided\n" diff --git a/tests/test-mpsc-queue.c b/tests/test-mpsc-queue.c -index a38bf9e6d..86a223caf 100644 +index a38bf9e6df..86a223caff 100644 --- a/tests/test-mpsc-queue.c +++ b/tests/test-mpsc-queue.c @@ -14,12 +14,12 @@ @@ -3804,8 +5832,26 @@ index a38bf9e6d..86a223caf 100644 printf("%s: %6d", prefix, reader_elapsed); for (i = 0; i < n_threads; i++) { printf(" %6" PRIu64, thread_working_ms[i]); +diff --git a/tests/test-ovsdb.c b/tests/test-ovsdb.c +index c761822e62..d6a47de336 100644 +--- a/tests/test-ovsdb.c ++++ b/tests/test-ovsdb.c +@@ -2800,6 +2800,13 @@ do_idl(struct ovs_cmdl_context *ctx) + } else { + print_idl(idl, step++, terse); + } ++ ++ /* Just run IDL forever for a simple monitoring. */ ++ if (!strcmp(arg, "monitor")) { ++ seqno = ovsdb_idl_get_seqno(idl); ++ i--; ++ continue; ++ } + } + seqno = ovsdb_idl_get_seqno(idl); + diff --git a/tests/test-ovsdb.py b/tests/test-ovsdb.py -index a841adba4..71248854f 100644 +index a841adba4e..48f8ee2d70 100644 --- a/tests/test-ovsdb.py +++ b/tests/test-ovsdb.py @@ -37,7 +37,7 @@ vlog.init(None) @@ -3842,7 +5888,39 @@ index a841adba4..71248854f 100644 for value in json.values(): parse_uuids(value, symtab) -@@ -1049,14 +1049,14 @@ def main(argv): +@@ -757,16 +757,23 @@ def do_idl(schema_file, remote, *commands): + poller.block() + else: + # Wait for update. +- while idl.change_seqno == seqno and not idl.run(): +- rpc.run() ++ while True: ++ while idl.change_seqno == seqno and not idl.run(): ++ rpc.run() + +- poller = ovs.poller.Poller() +- idl.wait(poller) +- rpc.wait(poller) +- poller.block() ++ poller = ovs.poller.Poller() ++ idl.wait(poller) ++ rpc.wait(poller) ++ poller.block() + +- print_idl(idl, step, terse) +- step += 1 ++ print_idl(idl, step, terse) ++ step += 1 ++ ++ # Run IDL forever in case of a simple monitor, otherwise ++ # break and execute the command. ++ seqno = idl.change_seqno ++ if command != "monitor": ++ break + + seqno = idl.change_seqno + +@@ -1049,14 +1056,14 @@ def main(argv): sys.exit(1) func, n_args = commands[command_name] @@ -3859,8 +5937,80 @@ index a841adba4..71248854f 100644 if len(args) != n_args: sys.stderr.write("%s: \"%s\" requires %d arguments but %d " "provided\n" +diff --git a/tests/tunnel.at b/tests/tunnel.at +index ddeb66bc9f..dc706a87bb 100644 +--- a/tests/tunnel.at ++++ b/tests/tunnel.at +@@ -333,6 +333,50 @@ set(tunnel(tun_id=0x5,dst=4.4.4.4,ttl=64,flags(df|key))),1 + OVS_VSWITCHD_STOP + AT_CLEANUP + ++AT_SETUP([tunnel - set_tunnel VXLAN]) ++OVS_VSWITCHD_START([dnl ++ add-port br0 p1 -- set Interface p1 type=vxlan options:key=flow \ ++ options:remote_ip=1.1.1.1 ofport_request=1 \ ++ -- add-port br0 p2 -- set Interface p2 type=vxlan options:key=flow \ ++ options:remote_ip=2.2.2.2 ofport_request=2 \ ++ -- add-port br0 p3 -- set Interface p3 type=vxlan options:key=flow \ ++ options:remote_ip=3.3.3.3 ofport_request=3 \ ++ -- add-port br0 p4 -- set Interface p4 type=vxlan options:key=flow \ ++ options:remote_ip=4.4.4.4 ofport_request=4]) ++AT_DATA([flows.txt], [dnl ++actions=set_tunnel:1,output:1,set_tunnel:2,output:2,set_tunnel:3,output:3,set_tunnel:5,output:4 ++]) ++ ++OVS_VSWITCHD_DISABLE_TUNNEL_PUSH_POP ++AT_CHECK([ovs-ofctl add-flows br0 flows.txt]) ++ ++AT_CHECK([ovs-appctl dpif/show | tail -n +3], [0], [dnl ++ br0 65534/100: (dummy-internal) ++ p1 1/4789: (vxlan: key=flow, remote_ip=1.1.1.1) ++ p2 2/4789: (vxlan: key=flow, remote_ip=2.2.2.2) ++ p3 3/4789: (vxlan: key=flow, remote_ip=3.3.3.3) ++ p4 4/4789: (vxlan: key=flow, remote_ip=4.4.4.4) ++]) ++ ++AT_CHECK([ovs-appctl ofproto/trace ovs-dummy 'in_port(100),eth(src=50:54:00:00:00:05,dst=50:54:00:00:00:07),eth_type(0x0800),ipv4(src=192.168.0.1,dst=192.168.0.2,proto=1,tos=0,ttl=128,frag=no),icmp(type=8,code=0)'], [0], [stdout]) ++AT_CHECK([tail -1 stdout], [0], [Datapath actions: dnl ++set(tunnel(tun_id=0x1,dst=1.1.1.1,ttl=64,tp_dst=4789,flags(df|key))),4789,dnl ++set(tunnel(tun_id=0x2,dst=2.2.2.2,ttl=64,tp_dst=4789,flags(df|key))),4789,dnl ++set(tunnel(tun_id=0x3,dst=3.3.3.3,ttl=64,tp_dst=4789,flags(df|key))),4789,dnl ++set(tunnel(tun_id=0x5,dst=4.4.4.4,ttl=64,tp_dst=4789,flags(df|key))),4789 ++]) ++ ++dnl With pre-existing tunnel metadata. ++AT_CHECK([ovs-appctl ofproto/trace ovs-dummy 'tunnel(tun_id=0x1,src=1.1.1.1,dst=5.5.5.5,tp_src=12345,tp_dst=4789,ttl=64,flags(key)),in_port(4789),eth(src=50:54:00:00:00:05,dst=50:54:00:00:00:07),eth_type(0x0800),ipv4(src=192.168.0.1,dst=192.168.0.2,proto=1,tos=0,ttl=128,frag=no),icmp(type=8,code=0)'], [0], [stdout]) ++AT_CHECK([tail -1 stdout], [0], [Datapath actions: dnl ++set(tunnel(tun_id=0x2,dst=2.2.2.2,ttl=64,tp_dst=4789,flags(df|key))),4789,dnl ++set(tunnel(tun_id=0x3,dst=3.3.3.3,ttl=64,tp_dst=4789,flags(df|key))),4789,dnl ++set(tunnel(tun_id=0x5,dst=4.4.4.4,ttl=64,tp_dst=4789,flags(df|key))),4789 ++]) ++ ++OVS_VSWITCHD_STOP ++AT_CLEANUP ++ + AT_SETUP([tunnel - key]) + OVS_VSWITCHD_START([dnl + add-port br0 p1 -- set Interface p1 type=gre options:key=1 \ +diff --git a/utilities/ovs-ofctl.c b/utilities/ovs-ofctl.c +index 24d0941cf2..25fd38f5f5 100644 +--- a/utilities/ovs-ofctl.c ++++ b/utilities/ovs-ofctl.c +@@ -5113,10 +5113,10 @@ static const struct ovs_cmdl_command all_commands[] = { + 1, 1, ofctl_dump_ipfix_flow, OVS_RO }, + + { "ct-flush-zone", "switch zone", +- 2, 2, ofctl_ct_flush_zone, OVS_RO }, ++ 2, 2, ofctl_ct_flush_zone, OVS_RW }, + + { "ct-flush", "switch [zone=N] [ct-orig-tuple [ct-reply-tuple]]", +- 1, 4, ofctl_ct_flush, OVS_RO }, ++ 1, 4, ofctl_ct_flush, OVS_RW }, + + { "ofp-parse", "file", + 1, 1, ofctl_ofp_parse, OVS_RW }, diff --git a/vswitchd/vswitch.xml b/vswitchd/vswitch.xml -index cfcde34ff..e400043ce 100644 +index cfcde34ffe..e400043ce7 100644 --- a/vswitchd/vswitch.xml +++ b/vswitchd/vswitch.xml @@ -3797,14 +3797,62 @@ ovs-vsctl add-port br0 p0 -- set Interface p0 type=patch options:peer=p1 \ @@ -3930,113 +6080,29374 @@ index cfcde34ff..e400043ce 100644 -diff --git a/dpdk/drivers/net/i40e/i40e_ethdev.c b/dpdk/drivers/net/i40e/i40e_ethdev.c -index 7726a89d99..a982e42264 100644 ---- a/dpdk/drivers/net/i40e/i40e_ethdev.c -+++ b/dpdk/drivers/net/i40e/i40e_ethdev.c -@@ -387,7 +387,6 @@ static int i40e_set_default_mac_addr(struct rte_eth_dev *dev, - struct rte_ether_addr *mac_addr); - - static int i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); --static void i40e_set_mac_max_frame(struct rte_eth_dev *dev, uint16_t size); +diff --git a/dpdk/.github/workflows/build.yml b/dpdk/.github/workflows/build.yml +index 82d83f4030..9512219a05 100644 +--- a/dpdk/.github/workflows/build.yml ++++ b/dpdk/.github/workflows/build.yml +@@ -25,7 +25,8 @@ jobs: + MINGW: ${{ matrix.config.cross == 'mingw' }} + MINI: ${{ matrix.config.mini != '' }} + PPC64LE: ${{ matrix.config.cross == 'ppc64le' }} +- REF_GIT_TAG: none ++ REF_GIT_REPO: https://dpdk.org/git/dpdk-stable ++ REF_GIT_TAG: v22.11.1 + RISCV64: ${{ matrix.config.cross == 'riscv64' }} + RUN_TESTS: ${{ contains(matrix.config.checks, 'tests') }} + +@@ -52,6 +53,7 @@ jobs: + cross: mingw + - os: ubuntu-20.04 + compiler: gcc ++ library: shared + cross: aarch64 + - os: ubuntu-20.04 + compiler: gcc +@@ -67,7 +69,7 @@ jobs: + id: get_ref_keys + run: | + echo 'ccache=ccache-${{ matrix.config.os }}-${{ matrix.config.compiler }}-${{ matrix.config.cross }}-'$(date -u +%Y-w%W) >> $GITHUB_OUTPUT +- echo 'libabigail=libabigail-${{ matrix.config.os }}' >> $GITHUB_OUTPUT ++ echo 'libabigail=libabigail-${{ env.LIBABIGAIL_VERSION }}-${{ matrix.config.os }}' >> $GITHUB_OUTPUT + echo 'abi=abi-${{ matrix.config.os }}-${{ matrix.config.compiler }}-${{ matrix.config.cross }}-${{ env.LIBABIGAIL_VERSION }}-${{ env.REF_GIT_TAG }}' >> $GITHUB_OUTPUT + - name: Retrieve ccache cache + uses: actions/cache@v3 +diff --git a/dpdk/.mailmap b/dpdk/.mailmap +index 75884b6fe2..6032ae9ea2 100644 +--- a/dpdk/.mailmap ++++ b/dpdk/.mailmap +@@ -64,6 +64,7 @@ Ali Volkan Atli + Allain Legacy + Allen Hubbe + Alok Makhariya ++Alvaro Karsz + Alvin Zhang + Aman Singh + Amaranath Somalapuram +@@ -143,6 +144,7 @@ Balazs Nemeth + Bao-Long Tran + Barak Enat + Barry Cao ++Bartosz Staszewski + Baruch Siach + Bassam Zaid AlKilani + Beilei Xing +@@ -166,7 +168,9 @@ Bin Huang + Bin Zheng + Björn Töpel + Bo Chen ++Boleslav Stankevich + Boon Ang ++Boris Ouretskey + Boris Pismenny + Brandon Lo + Brendan Ryan +@@ -195,6 +199,7 @@ Chaoyong He + Chao Zhu + Charles Brett + Charles Myers ++Charles Stoll + Chas Williams <3chas3@gmail.com> + Chenbo Xia + Chengchang Tang +@@ -295,6 +300,8 @@ Deepak Khandelwal + Deepak Kumar Jain + Deirdre O'Connor + Dekel Peled ++Dengdui Huang ++Denis Pryazhennikov + Dennis Marinus + Derek Chickles + Des O Dea +@@ -338,6 +345,7 @@ Dzmitry Sautsa + Ed Czeck + Eduard Serra + Edward Makarov ++Edwin Brossette + Eelco Chaudron + Elad Nachman + Elad Persiko +@@ -371,6 +379,7 @@ Farah Smith + Fei Chen + Feifei Wang + Fei Qin ++Fengnan Chang + Fengtian Guo + Ferdinand Thiessen + Ferruh Yigit +@@ -474,6 +483,7 @@ Helin Zhang + Hemant Agrawal + Heng Ding + Hengjian Zhang ++Heng Jiang + Heng Wang + Henning Schild + Henry Cai +@@ -524,6 +534,7 @@ Ilya Maximets + Ilya V. Matveychikov + Ilyes Ben Hamouda + Intiyaz Basha ++Isaac Boukris + Itsuro Oda + Ivan Boule + Ivan Dyukov +@@ -601,6 +612,7 @@ Jie Liu + Jie Pan + Jie Wang + Jie Zhou ++Jieqiang Wang + Jijiang Liu + Jilei Chen + Jim Harris +@@ -667,9 +679,12 @@ Jun Yang + Junyu Jiang + Juraj LinkeÅ¡ + Kai Ji ++Kaijun Zeng + Kaiwen Deng ++Kaiyu Zhang + Kalesh AP + Kamalakannan R ++Kamalakshitha Aligeri + Kamil Bednarczyk + Kamil Chalupnik + Kamil Rytarowski +@@ -708,6 +723,7 @@ Konstantin Ananyev + Krzysztof Galazka + Krzysztof Kanas ++Krzysztof Karas + Krzysztof Witek + Kuba Kozak + Kumar Amber +@@ -747,7 +763,7 @@ Liming Sun + Linfan Hu + Lingli Chen + Lingyu Liu +-Lin Li ++Lin Li + Linsi Yuan + Lior Margalit + Li Qiang +@@ -886,6 +902,7 @@ Michal Litwicki + Michal Mazurek + Michal Michalik + MichaÅ‚ MirosÅ‚aw ++Michal Schmidt + Michal Swiatkowski + Michal Wilczynski + Michel Machado +@@ -911,6 +928,7 @@ Mitch Williams + Mit Matelske + Mohamad Noor Alim Hussin + Mohammad Abdul Awal ++Mohammad Iqbal Ahmad + Mohammed Gamal + Mohsin Kazmi + Mohsin Mazhar Shaikh +@@ -1024,6 +1042,7 @@ Pawel Rutkowski + Pawel Wodkowski + Pei Chao + Pei Zhang ++Pengfei Sun + Peng He + Peng Huang + Peng Sun +@@ -1035,6 +1054,7 @@ Peter Spreadborough + Petr Houska + Phanendra Vukkisala + Phil Yang ++Philip Prindeville + Pierre Pfister + Piotr Azarewicz + Piotr Bartosiewicz +@@ -1050,6 +1070,7 @@ Prashant Upadhyaya + Prateek Agarwal + Praveen Shetty + Pravin Pathak ++Priyalee Kushwaha + Priyanka Jain + Przemyslaw Ciesielski + Przemyslaw Czesnowicz +@@ -1143,6 +1164,7 @@ Roy Franz + Roy Pledge + Roy Shterman + Ruifeng Wang ++Rushil Gupta + Ryan E Hall + Sabyasachi Sengupta + Sachin Saxena +@@ -1159,6 +1181,7 @@ Sangjin Han + Sankar Chokkalingam + Santoshkumar Karanappa Rastapur + Santosh Shukla ++Saoirse O'Donovan + Saori Usami + Sarath Somasekharan + Sarosh Arif +@@ -1210,6 +1233,7 @@ Shiqi Liu <835703180@qq.com> + Shiri Kuzin + Shivanshu Shukla + Shiweixian ++Shiyang He + Shlomi Gridish + Shougang Wang + Shraddha Joshi +@@ -1239,6 +1263,7 @@ Smadar Fuks + Solal Pirelli + Solganik Alexander + Somnath Kotur ++Song Jiale + Song Zhu + Sony Chacko + Sotiris Salloumis +@@ -1386,6 +1411,7 @@ Vijay Kumar Srivastava + Vijay Srivastava + Vikas Aggarwal + Vikas Gupta ++Vikash Poddar + Vimal Chungath + Vincent Guo + Vincent Jardin +@@ -1393,6 +1419,7 @@ Vincent Li + Vincent S. Cojot + Vipin Varghese + Vipul Ashri ++Visa Hankala + Vishal Kulkarni + Vishwas Danivas + Vitaliy Mysak +@@ -1562,6 +1589,7 @@ Zhipeng Lu + Zhirun Yan + Zhiwei He + Zhiyong Yang ++Zhuobin Huang + Zi Hu + Zijie Pan + Ziyang Xuan +diff --git a/dpdk/.travis.yml b/dpdk/.travis.yml +index 4bb5bf629e..0ea1242d97 100644 +--- a/dpdk/.travis.yml ++++ b/dpdk/.travis.yml +@@ -42,7 +42,8 @@ script: ./.ci/${TRAVIS_OS_NAME}-build.sh + env: + global: + - LIBABIGAIL_VERSION=libabigail-1.8 +- - REF_GIT_TAG=none ++ - REF_GIT_REPO=https://dpdk.org/git/dpdk-stable ++ - REF_GIT_TAG=v22.11.1 + + jobs: + include: +diff --git a/dpdk/MAINTAINERS b/dpdk/MAINTAINERS +index 22ef2ea4b9..1338ca00ba 100644 +--- a/dpdk/MAINTAINERS ++++ b/dpdk/MAINTAINERS +@@ -850,7 +850,7 @@ F: buildtools/options-ibverbs-static.sh + F: doc/guides/nics/mlx5.rst + F: doc/guides/nics/features/mlx5.ini + +-Microsoft mana - EXPERIMENTAL ++Microsoft mana + M: Long Li + F: drivers/net/mana/ + F: doc/guides/nics/mana.rst +diff --git a/dpdk/VERSION b/dpdk/VERSION +index 7378dd9f9e..af32bf4300 100644 +--- a/dpdk/VERSION ++++ b/dpdk/VERSION +@@ -1 +1 @@ +-22.11.1 ++22.11.3 +diff --git a/dpdk/app/dumpcap/main.c b/dpdk/app/dumpcap/main.c +index 2eb8414efa..4751ca26b8 100644 +--- a/dpdk/app/dumpcap/main.c ++++ b/dpdk/app/dumpcap/main.c +@@ -202,6 +202,7 @@ static void add_interface(uint16_t port, const char *name) + rte_exit(EXIT_FAILURE, "no memory for interface\n"); + + memset(intf, 0, sizeof(*intf)); ++ intf->port = port; + rte_strscpy(intf->name, name, sizeof(intf->name)); + + printf("Capturing on '%s'\n", name); +diff --git a/dpdk/app/test-bbdev/test_bbdev_perf.c b/dpdk/app/test-bbdev/test_bbdev_perf.c +index b285d3f3a7..f77ebc4b47 100644 +--- a/dpdk/app/test-bbdev/test_bbdev_perf.c ++++ b/dpdk/app/test-bbdev/test_bbdev_perf.c +@@ -78,13 +78,12 @@ + + #define SYNC_WAIT 0 + #define SYNC_START 1 +-#define INVALID_OPAQUE -1 + + #define INVALID_QUEUE_ID -1 + /* Increment for next code block in external HARQ memory */ + #define HARQ_INCR 32768 + /* Headroom for filler LLRs insertion in HARQ buffer */ +-#define FILLER_HEADROOM 1024 ++#define FILLER_HEADROOM 2048 + /* Constants from K0 computation from 3GPP 38.212 Table 5.4.2.1-2 */ + #define N_ZC_1 66 /* N = 66 Zc for BG 1 */ + #define N_ZC_2 50 /* N = 50 Zc for BG 2 */ +@@ -95,6 +94,7 @@ + #define K0_3_1 56 /* K0 fraction numerator for rv 3 and BG 1 */ + #define K0_3_2 43 /* K0 fraction numerator for rv 3 and BG 2 */ + ++#define HARQ_MEM_TOLERANCE 256 + static struct test_bbdev_vector test_vector; + + /* Switch between PMD and Interrupt for throughput TC */ +@@ -1952,10 +1952,9 @@ check_enc_status_and_ordering(struct rte_bbdev_enc_op *op, + "op_status (%d) != expected_status (%d)", + op->status, expected_status); + +- if (op->opaque_data != (void *)(uintptr_t)INVALID_OPAQUE) +- TEST_ASSERT((void *)(uintptr_t)order_idx == op->opaque_data, +- "Ordering error, expected %p, got %p", +- (void *)(uintptr_t)order_idx, op->opaque_data); ++ TEST_ASSERT((void *)(uintptr_t)order_idx == op->opaque_data, ++ "Ordering error, expected %p, got %p", ++ (void *)(uintptr_t)order_idx, op->opaque_data); + + return TEST_SUCCESS; + } +@@ -2092,13 +2091,17 @@ validate_op_harq_chain(struct rte_bbdev_op_data *op, + uint16_t data_len = rte_pktmbuf_data_len(m) - offset; + total_data_size += orig_op->segments[i].length; + +- TEST_ASSERT(orig_op->segments[i].length < +- (uint32_t)(data_len + 64), ++ TEST_ASSERT(orig_op->segments[i].length < (uint32_t)(data_len + HARQ_MEM_TOLERANCE), + "Length of segment differ in original (%u) and filled (%u) op", + orig_op->segments[i].length, data_len); + harq_orig = (int8_t *) orig_op->segments[i].addr; + harq_out = rte_pktmbuf_mtod_offset(m, int8_t *, offset); + ++ /* Cannot compare HARQ output data for such cases */ ++ if ((ldpc_llr_decimals > 1) && ((ops_ld->op_flags & RTE_BBDEV_LDPC_LLR_COMPRESSION) ++ || (ops_ld->op_flags & RTE_BBDEV_LDPC_HARQ_6BIT_COMPRESSION))) ++ break; ++ + if (!(ldpc_cap_flags & + RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_FILLERS + ) || (ops_ld->op_flags & +@@ -2113,9 +2116,9 @@ validate_op_harq_chain(struct rte_bbdev_op_data *op, + ops_ld->n_filler; + if (data_len > deRmOutSize) + data_len = deRmOutSize; +- if (data_len > orig_op->segments[i].length) +- data_len = orig_op->segments[i].length; + } ++ if (data_len > orig_op->segments[i].length) ++ data_len = orig_op->segments[i].length; + /* + * HARQ output can have minor differences + * due to integer representation and related scaling +@@ -2174,7 +2177,7 @@ validate_op_harq_chain(struct rte_bbdev_op_data *op, + + /* Validate total mbuf pkt length */ + uint32_t pkt_len = rte_pktmbuf_pkt_len(op->data) - op->offset; +- TEST_ASSERT(total_data_size < pkt_len + 64, ++ TEST_ASSERT(total_data_size < pkt_len + HARQ_MEM_TOLERANCE, + "Length of data differ in original (%u) and filled (%u) op", + total_data_size, pkt_len); + +@@ -4933,13 +4936,95 @@ get_bbdev_queue_stats(uint16_t dev_id, uint16_t queue_id, + stats->dequeued_count = q_stats->dequeued_count; + stats->enqueue_err_count = q_stats->enqueue_err_count; + stats->dequeue_err_count = q_stats->dequeue_err_count; +- stats->enqueue_warning_count = q_stats->enqueue_warning_count; +- stats->dequeue_warning_count = q_stats->dequeue_warning_count; ++ stats->enqueue_warn_count = q_stats->enqueue_warn_count; ++ stats->dequeue_warn_count = q_stats->dequeue_warn_count; + stats->acc_offload_cycles = q_stats->acc_offload_cycles; + + return 0; + } - static int i40e_ethertype_filter_convert( - const struct rte_eth_ethertype_filter *input, -@@ -1711,6 +1710,11 @@ eth_i40e_dev_init(struct rte_eth_dev *dev, void *init_params __rte_unused) ++static int ++offload_latency_test_fft(struct rte_mempool *mempool, struct test_buffers *bufs, ++ struct rte_bbdev_fft_op *ref_op, uint16_t dev_id, ++ uint16_t queue_id, const uint16_t num_to_process, ++ uint16_t burst_sz, struct test_time_stats *time_st) ++{ ++ int i, dequeued, ret; ++ struct rte_bbdev_fft_op *ops_enq[MAX_BURST], *ops_deq[MAX_BURST]; ++ uint64_t enq_start_time, deq_start_time; ++ uint64_t enq_sw_last_time, deq_last_time; ++ struct rte_bbdev_stats stats; ++ ++ for (i = 0, dequeued = 0; dequeued < num_to_process; ++i) { ++ uint16_t enq = 0, deq = 0; ++ ++ if (unlikely(num_to_process - dequeued < burst_sz)) ++ burst_sz = num_to_process - dequeued; ++ ++ ret = rte_bbdev_fft_op_alloc_bulk(mempool, ops_enq, burst_sz); ++ TEST_ASSERT_SUCCESS(ret, "Allocation failed for %d ops", burst_sz); ++ if (test_vector.op_type != RTE_BBDEV_OP_NONE) ++ copy_reference_fft_op(ops_enq, burst_sz, dequeued, ++ bufs->inputs, ++ bufs->hard_outputs, bufs->soft_outputs, ++ ref_op); ++ ++ /* Start time meas for enqueue function offload latency */ ++ enq_start_time = rte_rdtsc_precise(); ++ do { ++ enq += rte_bbdev_enqueue_fft_ops(dev_id, queue_id, ++ &ops_enq[enq], burst_sz - enq); ++ } while (unlikely(burst_sz != enq)); ++ ++ ret = get_bbdev_queue_stats(dev_id, queue_id, &stats); ++ TEST_ASSERT_SUCCESS(ret, ++ "Failed to get stats for queue (%u) of device (%u)", ++ queue_id, dev_id); ++ ++ enq_sw_last_time = rte_rdtsc_precise() - enq_start_time - ++ stats.acc_offload_cycles; ++ time_st->enq_sw_max_time = RTE_MAX(time_st->enq_sw_max_time, ++ enq_sw_last_time); ++ time_st->enq_sw_min_time = RTE_MIN(time_st->enq_sw_min_time, ++ enq_sw_last_time); ++ time_st->enq_sw_total_time += enq_sw_last_time; ++ ++ time_st->enq_acc_max_time = RTE_MAX(time_st->enq_acc_max_time, ++ stats.acc_offload_cycles); ++ time_st->enq_acc_min_time = RTE_MIN(time_st->enq_acc_min_time, ++ stats.acc_offload_cycles); ++ time_st->enq_acc_total_time += stats.acc_offload_cycles; ++ ++ /* give time for device to process ops */ ++ rte_delay_us(WAIT_OFFLOAD_US); ++ ++ /* Start time meas for dequeue function offload latency */ ++ deq_start_time = rte_rdtsc_precise(); ++ /* Dequeue one operation */ ++ do { ++ deq += rte_bbdev_dequeue_fft_ops(dev_id, queue_id, ++ &ops_deq[deq], enq); ++ } while (unlikely(deq == 0)); ++ ++ deq_last_time = rte_rdtsc_precise() - deq_start_time; ++ time_st->deq_max_time = RTE_MAX(time_st->deq_max_time, ++ deq_last_time); ++ time_st->deq_min_time = RTE_MIN(time_st->deq_min_time, ++ deq_last_time); ++ time_st->deq_total_time += deq_last_time; ++ ++ /* Dequeue remaining operations if needed*/ ++ while (burst_sz != deq) ++ deq += rte_bbdev_dequeue_fft_ops(dev_id, queue_id, ++ &ops_deq[deq], burst_sz - deq); ++ ++ rte_bbdev_fft_op_free_bulk(ops_enq, deq); ++ dequeued += deq; ++ } ++ ++ return i; ++} ++ + static int + offload_latency_test_dec(struct rte_mempool *mempool, struct test_buffers *bufs, + struct rte_bbdev_dec_op *ref_op, uint16_t dev_id, +@@ -4958,7 +5043,8 @@ offload_latency_test_dec(struct rte_mempool *mempool, struct test_buffers *bufs, + if (unlikely(num_to_process - dequeued < burst_sz)) + burst_sz = num_to_process - dequeued; + +- rte_bbdev_dec_op_alloc_bulk(mempool, ops_enq, burst_sz); ++ ret = rte_bbdev_dec_op_alloc_bulk(mempool, ops_enq, burst_sz); ++ TEST_ASSERT_SUCCESS(ret, "Allocation failed for %d ops", burst_sz); + if (test_vector.op_type != RTE_BBDEV_OP_NONE) + copy_reference_dec_op(ops_enq, burst_sz, dequeued, + bufs->inputs, +@@ -5043,7 +5129,8 @@ offload_latency_test_ldpc_dec(struct rte_mempool *mempool, + if (unlikely(num_to_process - dequeued < burst_sz)) + burst_sz = num_to_process - dequeued; + +- rte_bbdev_dec_op_alloc_bulk(mempool, ops_enq, burst_sz); ++ ret = rte_bbdev_dec_op_alloc_bulk(mempool, ops_enq, burst_sz); ++ TEST_ASSERT_SUCCESS(ret, "Allocation failed for %d ops", burst_sz); + if (test_vector.op_type != RTE_BBDEV_OP_NONE) + copy_reference_ldpc_dec_op(ops_enq, burst_sz, dequeued, + bufs->inputs, +@@ -5295,7 +5382,7 @@ offload_cost_test(struct active_device *ad, + printf("Set RTE_BBDEV_OFFLOAD_COST to 'y' to turn the test on.\n"); + return TEST_SKIPPED; + #else +- int iter; ++ int iter, ret; + uint16_t burst_sz = op_params->burst_sz; + const uint16_t num_to_process = op_params->num_to_process; + const enum rte_bbdev_op_type op_type = test_vector.op_type; +@@ -5390,7 +5477,10 @@ offload_cost_test(struct active_device *ad, + rte_get_tsc_hz()); + + struct rte_bbdev_stats stats = {0}; +- get_bbdev_queue_stats(ad->dev_id, queue_id, &stats); ++ ret = get_bbdev_queue_stats(ad->dev_id, queue_id, &stats); ++ TEST_ASSERT_SUCCESS(ret, ++ "Failed to get stats for queue (%u) of device (%u)", ++ queue_id, ad->dev_id); + if (op_type != RTE_BBDEV_OP_LDPC_DEC) { + TEST_ASSERT_SUCCESS(stats.enqueued_count != num_to_process, + "Mismatch in enqueue count %10"PRIu64" %d", +diff --git a/dpdk/app/test-compress-perf/comp_perf_options.h b/dpdk/app/test-compress-perf/comp_perf_options.h +index 57dd146330..d00b299247 100644 +--- a/dpdk/app/test-compress-perf/comp_perf_options.h ++++ b/dpdk/app/test-compress-perf/comp_perf_options.h +@@ -32,9 +32,9 @@ enum cperf_test_type { + }; + + enum comp_operation { +- COMPRESS_ONLY, +- DECOMPRESS_ONLY, +- COMPRESS_DECOMPRESS ++ COMPRESS = (1 << 0), ++ DECOMPRESS = (1 << 1), ++ COMPRESS_DECOMPRESS = (COMPRESS | DECOMPRESS), + }; + + struct range_list { +diff --git a/dpdk/app/test-compress-perf/comp_perf_options_parse.c b/dpdk/app/test-compress-perf/comp_perf_options_parse.c +index 019eddb7bd..303e714cda 100644 +--- a/dpdk/app/test-compress-perf/comp_perf_options_parse.c ++++ b/dpdk/app/test-compress-perf/comp_perf_options_parse.c +@@ -446,11 +446,11 @@ parse_op_type(struct comp_test_data *test_data, const char *arg) + struct name_id_map optype_namemap[] = { + { + "comp", +- COMPRESS_ONLY ++ COMPRESS + }, + { + "decomp", +- DECOMPRESS_ONLY ++ DECOMPRESS + }, + { + "comp_and_decomp", +@@ -491,7 +491,7 @@ parse_huffman_enc(struct comp_test_data *test_data, const char *arg) + int id = get_str_key_id_mapping(huffman_namemap, + RTE_DIM(huffman_namemap), arg); + if (id < 0) { +- RTE_LOG(ERR, USER1, "Invalid Huffmane encoding specified\n"); ++ RTE_LOG(ERR, USER1, "Invalid Huffman encoding specified\n"); + return -1; + } + +@@ -507,7 +507,7 @@ parse_level(struct comp_test_data *test_data, const char *arg) + + /* + * Try parsing the argument as a range, if it fails, +- * arse it as a list ++ * parse it as a list */ - i40e_add_tx_flow_control_drop_filter(pf); + if (parse_range(arg, &test_data->level_lst.min, + &test_data->level_lst.max, +diff --git a/dpdk/app/test-compress-perf/comp_perf_test_common.c b/dpdk/app/test-compress-perf/comp_perf_test_common.c +index b402a0d839..78487196ad 100644 +--- a/dpdk/app/test-compress-perf/comp_perf_test_common.c ++++ b/dpdk/app/test-compress-perf/comp_perf_test_common.c +@@ -227,23 +227,43 @@ comp_perf_allocate_memory(struct comp_test_data *test_data, + { + uint16_t comp_mbuf_size; + uint16_t decomp_mbuf_size; ++ size_t comp_data_size; ++ size_t decomp_data_size; ++ size_t output_data_sz; -+ /* Set the max frame size to 0x2600 by default, -+ * in case other drivers changed the default value. -+ */ -+ i40e_aq_set_mac_config(hw, I40E_FRAME_SIZE_MAX, TRUE, false, 0, NULL); + test_data->out_seg_sz = find_buf_size(test_data->seg_sz); + +- /* Number of segments for input and output +- * (compression and decompression) +- */ +- test_data->total_segs = DIV_CEIL(test_data->input_data_sz, +- test_data->seg_sz); ++ if (test_data->test_op & COMPRESS) { ++ /* ++ * Number of segments for input and output ++ * (compression and decompression) ++ */ ++ test_data->total_segs = DIV_CEIL(test_data->input_data_sz, ++ test_data->seg_sz); ++ } else { ++ /* ++ * When application does decompression only, input data is ++ * compressed and smaller than the output. The expected size of ++ * uncompressed data given by the user in segment size argument. ++ */ ++ test_data->total_segs = test_data->max_sgl_segs; ++ } + - /* initialize RSS rule list */ - TAILQ_INIT(&pf->rss_config_list); ++ output_data_sz = (size_t) test_data->out_seg_sz * test_data->total_segs; ++ output_data_sz = ++ RTE_MAX(output_data_sz, (size_t) MIN_COMPRESSED_BUF_SIZE); + + if (test_data->use_external_mbufs != 0) { + if (comp_perf_allocate_external_mbufs(test_data, mem) < 0) + return -1; + comp_mbuf_size = 0; + decomp_mbuf_size = 0; +- } else { ++ } else if (test_data->test_op & COMPRESS) { + comp_mbuf_size = test_data->out_seg_sz + RTE_PKTMBUF_HEADROOM; + decomp_mbuf_size = test_data->seg_sz + RTE_PKTMBUF_HEADROOM; ++ } else { ++ comp_mbuf_size = test_data->seg_sz + RTE_PKTMBUF_HEADROOM; ++ decomp_mbuf_size = test_data->out_seg_sz + RTE_PKTMBUF_HEADROOM; + } + + char pool_name[32] = ""; +@@ -287,26 +307,28 @@ comp_perf_allocate_memory(struct comp_test_data *test_data, + return -1; + } -@@ -2328,7 +2332,6 @@ i40e_dev_start(struct rte_eth_dev *dev) - uint32_t intr_vector = 0; - struct i40e_vsi *vsi; - uint16_t nb_rxq, nb_txq; -- uint16_t max_frame_size; +- /* +- * Compressed data might be a bit larger than input data, +- * if data cannot be compressed +- */ +- mem->compressed_data = rte_zmalloc_socket(NULL, +- RTE_MAX( +- (size_t) test_data->out_seg_sz * +- test_data->total_segs, +- (size_t) MIN_COMPRESSED_BUF_SIZE), +- 0, +- rte_socket_id()); ++ if (test_data->test_op & COMPRESS) { ++ /* ++ * Compressed data might be a bit larger than input data, ++ * if data cannot be compressed ++ */ ++ comp_data_size = output_data_sz; ++ decomp_data_size = test_data->input_data_sz; ++ } else { ++ comp_data_size = test_data->input_data_sz; ++ decomp_data_size = output_data_sz; ++ } ++ ++ mem->compressed_data = rte_zmalloc_socket(NULL, comp_data_size, 0, ++ rte_socket_id()); + if (mem->compressed_data == NULL) { + RTE_LOG(ERR, USER1, "Memory to hold the data from the input " + "file could not be allocated\n"); + return -1; + } + +- mem->decompressed_data = rte_zmalloc_socket(NULL, +- test_data->input_data_sz, 0, +- rte_socket_id()); ++ mem->decompressed_data = rte_zmalloc_socket(NULL, decomp_data_size, 0, ++ rte_socket_id()); + if (mem->decompressed_data == NULL) { + RTE_LOG(ERR, USER1, "Memory to hold the data from the input " + "file could not be allocated\n"); +@@ -344,6 +366,7 @@ int + prepare_bufs(struct comp_test_data *test_data, struct cperf_mem_resources *mem) + { + uint32_t remaining_data = test_data->input_data_sz; ++ uint32_t remaining_data_decomp = test_data->input_data_sz; + uint8_t *input_data_ptr = test_data->input_data; + size_t data_sz = 0; + uint8_t *data_addr; +@@ -351,6 +374,7 @@ prepare_bufs(struct comp_test_data *test_data, struct cperf_mem_resources *mem) + uint16_t segs_per_mbuf = 0; + uint32_t cmz = 0; + uint32_t dmz = 0; ++ bool decompress_only = !!(test_data->test_op == DECOMPRESS); + + for (i = 0; i < mem->total_bufs; i++) { + /* Allocate data in input mbuf and copy data from input file */ +@@ -361,8 +385,6 @@ prepare_bufs(struct comp_test_data *test_data, struct cperf_mem_resources *mem) + return -1; + } + +- data_sz = RTE_MIN(remaining_data, test_data->seg_sz); +- + if (test_data->use_external_mbufs != 0) { + rte_pktmbuf_attach_extbuf(mem->decomp_bufs[i], + mem->decomp_memzones[dmz]->addr, +@@ -372,16 +394,23 @@ prepare_bufs(struct comp_test_data *test_data, struct cperf_mem_resources *mem) + dmz++; + } + ++ if (!decompress_only) ++ data_sz = RTE_MIN(remaining_data, test_data->seg_sz); ++ else ++ data_sz = test_data->out_seg_sz; ++ + data_addr = (uint8_t *) rte_pktmbuf_append( + mem->decomp_bufs[i], data_sz); + if (data_addr == NULL) { + RTE_LOG(ERR, USER1, "Could not append data\n"); + return -1; + } +- rte_memcpy(data_addr, input_data_ptr, data_sz); + +- input_data_ptr += data_sz; +- remaining_data -= data_sz; ++ if (!decompress_only) { ++ rte_memcpy(data_addr, input_data_ptr, data_sz); ++ input_data_ptr += data_sz; ++ remaining_data -= data_sz; ++ } + + /* Already one segment in the mbuf */ + segs_per_mbuf = 1; +@@ -398,8 +427,6 @@ prepare_bufs(struct comp_test_data *test_data, struct cperf_mem_resources *mem) + return -1; + } + +- data_sz = RTE_MIN(remaining_data, test_data->seg_sz); +- + if (test_data->use_external_mbufs != 0) { + rte_pktmbuf_attach_extbuf( + next_seg, +@@ -410,6 +437,12 @@ prepare_bufs(struct comp_test_data *test_data, struct cperf_mem_resources *mem) + dmz++; + } + ++ if (!decompress_only) ++ data_sz = RTE_MIN(remaining_data, ++ test_data->seg_sz); ++ else ++ data_sz = test_data->out_seg_sz; ++ + data_addr = (uint8_t *)rte_pktmbuf_append(next_seg, + data_sz); + +@@ -418,9 +451,11 @@ prepare_bufs(struct comp_test_data *test_data, struct cperf_mem_resources *mem) + return -1; + } + +- rte_memcpy(data_addr, input_data_ptr, data_sz); +- input_data_ptr += data_sz; +- remaining_data -= data_sz; ++ if (!decompress_only) { ++ rte_memcpy(data_addr, input_data_ptr, data_sz); ++ input_data_ptr += data_sz; ++ remaining_data -= data_sz; ++ } + + if (rte_pktmbuf_chain(mem->decomp_bufs[i], + next_seg) < 0) { +@@ -447,16 +482,26 @@ prepare_bufs(struct comp_test_data *test_data, struct cperf_mem_resources *mem) + cmz++; + } + +- data_addr = (uint8_t *) rte_pktmbuf_append( +- mem->comp_bufs[i], +- test_data->out_seg_sz); ++ if (decompress_only) ++ data_sz = RTE_MIN(remaining_data_decomp, test_data->seg_sz); ++ else ++ data_sz = test_data->out_seg_sz; ++ ++ data_addr = (uint8_t *) rte_pktmbuf_append(mem->comp_bufs[i], ++ data_sz); + if (data_addr == NULL) { + RTE_LOG(ERR, USER1, "Could not append data\n"); + return -1; + } + ++ if (decompress_only) { ++ rte_memcpy(data_addr, input_data_ptr, data_sz); ++ input_data_ptr += data_sz; ++ remaining_data_decomp -= data_sz; ++ } ++ + /* Chain mbufs if needed for output mbufs */ +- for (j = 1; j < segs_per_mbuf; j++) { ++ for (j = 1; j < segs_per_mbuf && remaining_data_decomp > 0; j++) { + struct rte_mbuf *next_seg = + rte_pktmbuf_alloc(mem->comp_buf_pool); + +@@ -476,13 +521,25 @@ prepare_bufs(struct comp_test_data *test_data, struct cperf_mem_resources *mem) + cmz++; + } + ++ if (decompress_only) ++ data_sz = RTE_MIN(remaining_data_decomp, ++ test_data->seg_sz); ++ else ++ data_sz = test_data->out_seg_sz; ++ + data_addr = (uint8_t *)rte_pktmbuf_append(next_seg, +- test_data->out_seg_sz); ++ data_sz); + if (data_addr == NULL) { + RTE_LOG(ERR, USER1, "Could not append data\n"); + return -1; + } + ++ if (decompress_only) { ++ rte_memcpy(data_addr, input_data_ptr, data_sz); ++ input_data_ptr += data_sz; ++ remaining_data_decomp -= data_sz; ++ } ++ + if (rte_pktmbuf_chain(mem->comp_bufs[i], + next_seg) < 0) { + RTE_LOG(ERR, USER1, "Could not chain mbufs\n"); +diff --git a/dpdk/app/test-compress-perf/comp_perf_test_cyclecount.c b/dpdk/app/test-compress-perf/comp_perf_test_cyclecount.c +index 7473cb6277..ce6c4d7605 100644 +--- a/dpdk/app/test-compress-perf/comp_perf_test_cyclecount.c ++++ b/dpdk/app/test-compress-perf/comp_perf_test_cyclecount.c +@@ -514,38 +514,55 @@ cperf_cyclecount_test_runner(void *test_ctx) + if (cperf_verify_test_runner(&ctx->ver)) + return EXIT_FAILURE; - hw->adapter_stopped = 0; +- /* +- * Run the tests twice, discarding the first performance +- * results, before the cache is warmed up +- */ +- +- /* C O M P R E S S */ +- for (i = 0; i < 2; i++) { +- if (main_loop(ctx, RTE_COMP_COMPRESS) < 0) +- return EXIT_FAILURE; +- } ++ if (test_data->test_op & COMPRESS) { ++ /* ++ * Run the test twice, discarding the first performance ++ * results, before the cache is warmed up ++ */ ++ for (i = 0; i < 2; i++) { ++ if (main_loop(ctx, RTE_COMP_COMPRESS) < 0) ++ return EXIT_FAILURE; ++ } + +- ops_enq_retries_comp = ctx->ops_enq_retries; +- ops_deq_retries_comp = ctx->ops_deq_retries; ++ ops_enq_retries_comp = ctx->ops_enq_retries; ++ ops_deq_retries_comp = ctx->ops_deq_retries; + +- duration_enq_per_op_comp = ctx->duration_enq / +- (ctx->ver.mem.total_bufs * test_data->num_iter); +- duration_deq_per_op_comp = ctx->duration_deq / +- (ctx->ver.mem.total_bufs * test_data->num_iter); ++ duration_enq_per_op_comp = ctx->duration_enq / ++ (ctx->ver.mem.total_bufs * test_data->num_iter); ++ duration_deq_per_op_comp = ctx->duration_deq / ++ (ctx->ver.mem.total_bufs * test_data->num_iter); ++ } else { ++ ops_enq_retries_comp = 0; ++ ops_deq_retries_comp = 0; + +- /* D E C O M P R E S S */ +- for (i = 0; i < 2; i++) { +- if (main_loop(ctx, RTE_COMP_DECOMPRESS) < 0) +- return EXIT_FAILURE; ++ duration_enq_per_op_comp = 0; ++ duration_deq_per_op_comp = 0; + } + +- ops_enq_retries_decomp = ctx->ops_enq_retries; +- ops_deq_retries_decomp = ctx->ops_deq_retries; ++ if (test_data->test_op & DECOMPRESS) { ++ /* ++ * Run the test twice, discarding the first performance ++ * results, before the cache is warmed up ++ */ ++ for (i = 0; i < 2; i++) { ++ if (main_loop(ctx, RTE_COMP_DECOMPRESS) < 0) ++ return EXIT_FAILURE; ++ } + +- duration_enq_per_op_decomp = ctx->duration_enq / +- (ctx->ver.mem.total_bufs * test_data->num_iter); +- duration_deq_per_op_decomp = ctx->duration_deq / +- (ctx->ver.mem.total_bufs * test_data->num_iter); ++ ops_enq_retries_decomp = ctx->ops_enq_retries; ++ ops_deq_retries_decomp = ctx->ops_deq_retries; ++ ++ duration_enq_per_op_decomp = ctx->duration_enq / ++ (ctx->ver.mem.total_bufs * test_data->num_iter); ++ duration_deq_per_op_decomp = ctx->duration_deq / ++ (ctx->ver.mem.total_bufs * test_data->num_iter); ++ } else { ++ ops_enq_retries_decomp = 0; ++ ops_deq_retries_decomp = 0; ++ ++ duration_enq_per_op_decomp = 0; ++ duration_deq_per_op_decomp = 0; ++ } + + duration_setup_per_op = ctx->duration_op / + (ctx->ver.mem.total_bufs * test_data->num_iter); +@@ -563,7 +580,7 @@ cperf_cyclecount_test_runner(void *test_ctx) + " [D-e] - decompression enqueue\n" + " [D-d] - decompression dequeue\n" + " - Cycles section: number of cycles per 'op' for the following operations:\n" +- " setup/op - memory allocation, op configuration and memory dealocation\n" ++ " setup/op - memory allocation, op configuration and memory deallocation\n" + " [C-e] - compression enqueue\n" + " [C-d] - compression dequeue\n" + " [D-e] - decompression enqueue\n" +diff --git a/dpdk/app/test-compress-perf/comp_perf_test_throughput.c b/dpdk/app/test-compress-perf/comp_perf_test_throughput.c +index 79cd2b2bf2..c9f8237626 100644 +--- a/dpdk/app/test-compress-perf/comp_perf_test_throughput.c ++++ b/dpdk/app/test-compress-perf/comp_perf_test_throughput.c +@@ -359,41 +359,53 @@ cperf_throughput_test_runner(void *test_ctx) + * First the verification part is needed + */ + if (cperf_verify_test_runner(&ctx->ver)) { +- ret = EXIT_FAILURE; ++ ret = EXIT_FAILURE; + goto end; + } -@@ -2467,9 +2470,6 @@ i40e_dev_start(struct rte_eth_dev *dev) - "please call hierarchy_commit() " - "before starting the port"); +- /* +- * Run the tests twice, discarding the first performance +- * results, before the cache is warmed up +- */ +- for (i = 0; i < 2; i++) { +- if (main_loop(ctx, RTE_COMP_COMPRESS) < 0) { +- ret = EXIT_FAILURE; +- goto end; ++ if (test_data->test_op & COMPRESS) { ++ /* ++ * Run the test twice, discarding the first performance ++ * results, before the cache is warmed up ++ */ ++ for (i = 0; i < 2; i++) { ++ if (main_loop(ctx, RTE_COMP_COMPRESS) < 0) { ++ ret = EXIT_FAILURE; ++ goto end; ++ } + } +- } -- max_frame_size = dev->data->mtu + I40E_ETH_OVERHEAD; -- i40e_set_mac_max_frame(dev, max_frame_size); +- for (i = 0; i < 2; i++) { +- if (main_loop(ctx, RTE_COMP_DECOMPRESS) < 0) { +- ret = EXIT_FAILURE; +- goto end; +- } ++ ctx->comp_tsc_byte = ++ (double)(ctx->comp_tsc_duration[test_data->level]) / ++ test_data->input_data_sz; ++ ctx->comp_gbps = rte_get_tsc_hz() / ctx->comp_tsc_byte * 8 / ++ 1000000000; ++ } else { ++ ctx->comp_tsc_byte = 0; ++ ctx->comp_gbps = 0; + } + +- ctx->comp_tsc_byte = +- (double)(ctx->comp_tsc_duration[test_data->level]) / +- test_data->input_data_sz; ++ if (test_data->test_op & DECOMPRESS) { ++ /* ++ * Run the test twice, discarding the first performance ++ * results, before the cache is warmed up ++ */ ++ for (i = 0; i < 2; i++) { ++ if (main_loop(ctx, RTE_COMP_DECOMPRESS) < 0) { ++ ret = EXIT_FAILURE; ++ goto end; ++ } ++ } + +- ctx->decomp_tsc_byte = ++ ctx->decomp_tsc_byte = + (double)(ctx->decomp_tsc_duration[test_data->level]) / +- test_data->input_data_sz; - - return I40E_SUCCESS; +- ctx->comp_gbps = rte_get_tsc_hz() / ctx->comp_tsc_byte * 8 / +- 1000000000; +- +- ctx->decomp_gbps = rte_get_tsc_hz() / ctx->decomp_tsc_byte * 8 / +- 1000000000; ++ test_data->input_data_sz; ++ ctx->decomp_gbps = rte_get_tsc_hz() / ctx->decomp_tsc_byte * 8 / ++ 1000000000; ++ } else { ++ ctx->decomp_tsc_byte = 0; ++ ctx->decomp_gbps = 0; ++ } + + exp = 0; + if (__atomic_compare_exchange_n(&display_once, &exp, 1, 0, +diff --git a/dpdk/app/test-compress-perf/comp_perf_test_verify.c b/dpdk/app/test-compress-perf/comp_perf_test_verify.c +index 8964442891..7d6b6abecd 100644 +--- a/dpdk/app/test-compress-perf/comp_perf_test_verify.c ++++ b/dpdk/app/test-compress-perf/comp_perf_test_verify.c +@@ -114,7 +114,8 @@ main_loop(struct cperf_verify_ctx *ctx, enum rte_comp_xform_type type) + output_data_sz = &ctx->decomp_data_sz; + input_bufs = mem->comp_bufs; + output_bufs = mem->decomp_bufs; +- out_seg_sz = test_data->seg_sz; ++ out_seg_sz = (test_data->test_op & COMPRESS) ? ++ test_data->seg_sz : test_data->out_seg_sz; + } + + /* Create private xform */ +@@ -226,7 +227,7 @@ main_loop(struct cperf_verify_ctx *ctx, enum rte_comp_xform_type type) + op->status == + RTE_COMP_OP_STATUS_OUT_OF_SPACE_RECOVERABLE) { + RTE_LOG(ERR, USER1, +-"Out of space error occurred due to uncompressible input data expanding to larger than destination buffer. Increase the EXPANSE_RATIO constant to use this data.\n"); ++"Out of space error occurred due to incompressible input data expanding to larger than destination buffer. Increase the EXPANSE_RATIO constant to use this data.\n"); + res = -1; + goto end; + } else if (op->status != +@@ -311,7 +312,7 @@ main_loop(struct cperf_verify_ctx *ctx, enum rte_comp_xform_type type) + op->status == + RTE_COMP_OP_STATUS_OUT_OF_SPACE_RECOVERABLE) { + RTE_LOG(ERR, USER1, +-"Out of space error occurred due to uncompressible input data expanding to larger than destination buffer. Increase the EXPANSE_RATIO constant to use this data.\n"); ++"Out of space error occurred due to incompressible input data expanding to larger than destination buffer. Increase the EXPANSE_RATIO constant to use this data.\n"); + res = -1; + goto end; + } else if (op->status != +@@ -392,44 +393,59 @@ cperf_verify_test_runner(void *test_ctx) + int ret = EXIT_SUCCESS; + static uint16_t display_once; + uint32_t lcore = rte_lcore_id(); ++ uint16_t exp = 0; + + ctx->mem.lcore_id = lcore; + + test_data->ratio = 0; + +- if (main_loop(ctx, RTE_COMP_COMPRESS) < 0) { +- ret = EXIT_FAILURE; +- goto end; ++ if (test_data->test_op & COMPRESS) { ++ if (main_loop(ctx, RTE_COMP_COMPRESS) < 0) { ++ ret = EXIT_FAILURE; ++ goto end; ++ } + } + +- if (main_loop(ctx, RTE_COMP_DECOMPRESS) < 0) { +- ret = EXIT_FAILURE; +- goto end; +- } ++ if (test_data->test_op & DECOMPRESS) { ++ if (main_loop(ctx, RTE_COMP_DECOMPRESS) < 0) { ++ ret = EXIT_FAILURE; ++ goto end; ++ } + +- if (ctx->decomp_data_sz != test_data->input_data_sz) { +- RTE_LOG(ERR, USER1, +- "Decompressed data length not equal to input data length\n"); +- RTE_LOG(ERR, USER1, +- "Decompressed size = %zu, expected = %zu\n", +- ctx->decomp_data_sz, test_data->input_data_sz); +- ret = EXIT_FAILURE; +- goto end; +- } else { +- if (memcmp(ctx->mem.decompressed_data, +- test_data->input_data, +- test_data->input_data_sz) != 0) { ++ if (!(test_data->test_op & COMPRESS)) { ++ /* ++ * For DECOMPRESS_ONLY mode there is no more ++ * verifications, reset the 'ratio' and 'comp_data_sz' ++ * fields for other tests report. ++ */ ++ ctx->comp_data_sz = 0; ++ ctx->ratio = 0; ++ goto end; ++ } ++ ++ if (ctx->decomp_data_sz != test_data->input_data_sz) { ++ RTE_LOG(ERR, USER1, ++ "Decompressed data length not equal to input data length\n"); + RTE_LOG(ERR, USER1, +- "Decompressed data is not the same as file data\n"); ++ "Decompressed size = %zu, expected = %zu\n", ++ ctx->decomp_data_sz, test_data->input_data_sz); + ret = EXIT_FAILURE; + goto end; ++ } else { ++ if (memcmp(ctx->mem.decompressed_data, ++ test_data->input_data, ++ test_data->input_data_sz) != 0) { ++ RTE_LOG(ERR, USER1, ++ "Decompressed data is not the same as file data\n"); ++ ret = EXIT_FAILURE; ++ goto end; ++ } + } + } + + ctx->ratio = (double) ctx->comp_data_sz / + test_data->input_data_sz * 100; + +- uint16_t exp = 0; + if (!ctx->silent) { + if (__atomic_compare_exchange_n(&display_once, &exp, 1, 0, + __ATOMIC_RELAXED, __ATOMIC_RELAXED)) { +diff --git a/dpdk/app/test-compress-perf/main.c b/dpdk/app/test-compress-perf/main.c +index 41b8edc2bd..bbb4c7917b 100644 +--- a/dpdk/app/test-compress-perf/main.c ++++ b/dpdk/app/test-compress-perf/main.c +@@ -254,6 +254,14 @@ comp_perf_dump_input_data(struct comp_test_data *test_data) + goto end; + } + ++ if (!(test_data->test_op & COMPRESS) && ++ test_data->input_data_sz > ++ (size_t) test_data->seg_sz * (size_t) test_data->max_sgl_segs) { ++ RTE_LOG(ERR, USER1, ++ "Size of input must be less than total segments\n"); ++ goto end; ++ } ++ + test_data->input_data = rte_zmalloc_socket(NULL, + test_data->input_data_sz, 0, rte_socket_id()); + +diff --git a/dpdk/app/test-crypto-perf/cperf_ops.c b/dpdk/app/test-crypto-perf/cperf_ops.c +index 61a3967697..93b9bfb240 100644 +--- a/dpdk/app/test-crypto-perf/cperf_ops.c ++++ b/dpdk/app/test-crypto-perf/cperf_ops.c +@@ -42,8 +42,7 @@ test_ipsec_vec_populate(struct rte_mbuf *m, const struct cperf_options *options, + { + struct rte_ipv4_hdr *ip = rte_pktmbuf_mtod(m, struct rte_ipv4_hdr *); - tx_err: -@@ -2809,9 +2809,6 @@ i40e_dev_set_link_down(struct rte_eth_dev *dev) - return i40e_phy_conf_link(hw, abilities, speed, false); +- if ((options->aead_op == RTE_CRYPTO_AEAD_OP_ENCRYPT) || +- (options->cipher_op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)) { ++ if (options->is_outbound) { + memcpy(ip, test_vector->plaintext.data, + sizeof(struct rte_ipv4_hdr)); + +@@ -645,8 +644,9 @@ create_ipsec_session(struct rte_mempool *sess_mp, + const struct cperf_test_vector *test_vector, + uint16_t iv_offset) + { +- struct rte_crypto_sym_xform xform = {0}; + struct rte_crypto_sym_xform auth_xform = {0}; ++ struct rte_crypto_sym_xform *crypto_xform; ++ struct rte_crypto_sym_xform xform = {0}; + + if (options->aead_algo != 0) { + /* Setup AEAD Parameters */ +@@ -660,10 +660,10 @@ create_ipsec_session(struct rte_mempool *sess_mp, + xform.aead.iv.length = test_vector->aead_iv.length; + xform.aead.digest_length = options->digest_sz; + xform.aead.aad_length = options->aead_aad_sz; ++ crypto_xform = &xform; + } else if (options->cipher_algo != 0 && options->auth_algo != 0) { + /* Setup Cipher Parameters */ + xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER; +- xform.next = NULL; + xform.cipher.algo = options->cipher_algo; + xform.cipher.op = options->cipher_op; + xform.cipher.iv.offset = iv_offset; +@@ -680,7 +680,6 @@ create_ipsec_session(struct rte_mempool *sess_mp, + + /* Setup Auth Parameters */ + auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH; +- auth_xform.next = NULL; + auth_xform.auth.algo = options->auth_algo; + auth_xform.auth.op = options->auth_op; + auth_xform.auth.iv.offset = iv_offset + +@@ -699,7 +698,15 @@ create_ipsec_session(struct rte_mempool *sess_mp, + auth_xform.auth.iv.length = 0; + } + +- xform.next = &auth_xform; ++ if (options->is_outbound) { ++ crypto_xform = &xform; ++ xform.next = &auth_xform; ++ auth_xform.next = NULL; ++ } else { ++ crypto_xform = &auth_xform; ++ auth_xform.next = &xform; ++ xform.next = NULL; ++ } + } else { + return NULL; + } +@@ -722,30 +729,26 @@ create_ipsec_session(struct rte_mempool *sess_mp, + .action_type = RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL, + .protocol = RTE_SECURITY_PROTOCOL_IPSEC, + {.ipsec = { +- .spi = rte_lcore_id(), ++ .spi = rte_lcore_id() + 1, + /**< For testing sake, lcore_id is taken as SPI so that + * for every core a different session is created. + */ + .salt = CPERF_IPSEC_SALT, + .options = { 0 }, + .replay_win_sz = 0, +- .direction = +- ((options->cipher_op == +- RTE_CRYPTO_CIPHER_OP_ENCRYPT) && +- (options->auth_op == +- RTE_CRYPTO_AUTH_OP_GENERATE)) || +- (options->aead_op == +- RTE_CRYPTO_AEAD_OP_ENCRYPT) ? +- RTE_SECURITY_IPSEC_SA_DIR_EGRESS : +- RTE_SECURITY_IPSEC_SA_DIR_INGRESS, + .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP, + .mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL, + .tunnel = tunnel, + } }, + .userdata = NULL, +- .crypto_xform = &xform ++ .crypto_xform = crypto_xform, + }; + ++ if (options->is_outbound) ++ sess_conf.ipsec.direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS; ++ else ++ sess_conf.ipsec.direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS; ++ + struct rte_security_ctx *ctx = (struct rte_security_ctx *) + rte_cryptodev_get_sec_ctx(dev_id); + +diff --git a/dpdk/app/test-crypto-perf/cperf_options.h b/dpdk/app/test-crypto-perf/cperf_options.h +index 613d6d31e2..6966e0b286 100644 +--- a/dpdk/app/test-crypto-perf/cperf_options.h ++++ b/dpdk/app/test-crypto-perf/cperf_options.h +@@ -105,6 +105,7 @@ struct cperf_options { + uint32_t out_of_place:1; + uint32_t silent:1; + uint32_t csv:1; ++ uint32_t is_outbound:1; + + enum rte_crypto_cipher_algorithm cipher_algo; + enum rte_crypto_cipher_operation cipher_op; +diff --git a/dpdk/app/test-crypto-perf/cperf_options_parsing.c b/dpdk/app/test-crypto-perf/cperf_options_parsing.c +index bc5e312c81..1f06e15d10 100644 +--- a/dpdk/app/test-crypto-perf/cperf_options_parsing.c ++++ b/dpdk/app/test-crypto-perf/cperf_options_parsing.c +@@ -519,6 +519,7 @@ parse_test_file(struct cperf_options *opts, + if (access(opts->test_file, F_OK) != -1) + return 0; + RTE_LOG(ERR, USER1, "Test vector file doesn't exist\n"); ++ free(opts->test_file); + + return -1; } +@@ -1318,6 +1319,21 @@ cperf_options_check(struct cperf_options *options) + if (check_docsis_buffer_length(options) < 0) + return -EINVAL; + } ++ ++ if (options->op_type == CPERF_IPSEC) { ++ if (options->aead_algo) { ++ if (options->aead_op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ++ options->is_outbound = 1; ++ else ++ options->is_outbound = 0; ++ } else { ++ if (options->cipher_op == RTE_CRYPTO_CIPHER_OP_ENCRYPT && ++ options->auth_op == RTE_CRYPTO_AUTH_OP_GENERATE) ++ options->is_outbound = 1; ++ else ++ options->is_outbound = 0; ++ } ++ } + #endif --#define CHECK_INTERVAL 100 /* 100ms */ --#define MAX_REPEAT_TIME 10 /* 1s (10 * 100ms) in total */ -- - static __rte_always_inline void - update_link_reg(struct i40e_hw *hw, struct rte_eth_link *link) + return 0; +diff --git a/dpdk/app/test-crypto-perf/cperf_test_common.c b/dpdk/app/test-crypto-perf/cperf_test_common.c +index 27646cd619..932aab16df 100644 +--- a/dpdk/app/test-crypto-perf/cperf_test_common.c ++++ b/dpdk/app/test-crypto-perf/cperf_test_common.c +@@ -197,9 +197,11 @@ cperf_alloc_common_memory(const struct cperf_options *options, + RTE_CACHE_LINE_ROUNDUP(crypto_op_total_size); + uint32_t mbuf_size = sizeof(struct rte_mbuf) + options->segment_sz; + uint32_t max_size = options->max_buffer_size + options->digest_sz; +- uint16_t segments_nb = (max_size % options->segment_sz) ? +- (max_size / options->segment_sz) + 1 : +- max_size / options->segment_sz; ++ uint32_t segment_data_len = options->segment_sz - options->headroom_sz - ++ options->tailroom_sz; ++ uint16_t segments_nb = (max_size % segment_data_len) ? ++ (max_size / segment_data_len) + 1 : ++ (max_size / segment_data_len); + uint32_t obj_size = crypto_op_total_size_padded + + (mbuf_size * segments_nb); + +diff --git a/dpdk/app/test-crypto-perf/cperf_test_latency.c b/dpdk/app/test-crypto-perf/cperf_test_latency.c +index 49bf421c01..406e082e4e 100644 +--- a/dpdk/app/test-crypto-perf/cperf_test_latency.c ++++ b/dpdk/app/test-crypto-perf/cperf_test_latency.c +@@ -43,15 +43,28 @@ struct priv_op_data { + static void + cperf_latency_test_free(struct cperf_latency_ctx *ctx) { -@@ -2878,6 +2875,8 @@ static __rte_always_inline void - update_link_aq(struct i40e_hw *hw, struct rte_eth_link *link, - bool enable_lse, int wait_to_complete) +- if (ctx) { +- if (ctx->sess) +- rte_cryptodev_sym_session_free(ctx->dev_id, ctx->sess); +- +- rte_mempool_free(ctx->pool); ++ if (ctx == NULL) ++ return; + +- rte_free(ctx->res); +- rte_free(ctx); ++ if (ctx->sess != NULL) { ++ if (ctx->options->op_type == CPERF_ASYM_MODEX) ++ rte_cryptodev_asym_session_free(ctx->dev_id, ctx->sess); ++#ifdef RTE_LIB_SECURITY ++ else if (ctx->options->op_type == CPERF_PDCP || ++ ctx->options->op_type == CPERF_DOCSIS || ++ ctx->options->op_type == CPERF_IPSEC) { ++ struct rte_security_ctx *sec_ctx = ++ rte_cryptodev_get_sec_ctx(ctx->dev_id); ++ rte_security_session_destroy(sec_ctx, ctx->sess); ++ } ++#endif ++ else ++ rte_cryptodev_sym_session_free(ctx->dev_id, ctx->sess); + } ++ ++ rte_mempool_free(ctx->pool); ++ rte_free(ctx->res); ++ rte_free(ctx); + } + + void * +diff --git a/dpdk/app/test-crypto-perf/cperf_test_vector_parsing.c b/dpdk/app/test-crypto-perf/cperf_test_vector_parsing.c +index 98e46c3381..737d61d4af 100644 +--- a/dpdk/app/test-crypto-perf/cperf_test_vector_parsing.c ++++ b/dpdk/app/test-crypto-perf/cperf_test_vector_parsing.c +@@ -30,6 +30,7 @@ free_test_vector(struct cperf_test_vector *vector, struct cperf_options *opts) + rte_free(vector->cipher_key.data); + rte_free(vector->auth_key.data); + rte_free(vector->ciphertext.data); ++ free(opts->test_file); + } + + rte_free(vector); +diff --git a/dpdk/app/test-crypto-perf/cperf_test_verify.c b/dpdk/app/test-crypto-perf/cperf_test_verify.c +index c03e1d5ba5..8042c94e04 100644 +--- a/dpdk/app/test-crypto-perf/cperf_test_verify.c ++++ b/dpdk/app/test-crypto-perf/cperf_test_verify.c +@@ -38,14 +38,27 @@ struct cperf_op_result { + static void + cperf_verify_test_free(struct cperf_verify_ctx *ctx) { -+#define CHECK_INTERVAL 100 /* 100ms */ -+#define MAX_REPEAT_TIME 10 /* 1s (10 * 100ms) in total */ - uint32_t rep_cnt = MAX_REPEAT_TIME; - struct i40e_link_status link_status; - int status; -@@ -6738,7 +6737,6 @@ i40e_dev_handle_aq_msg(struct rte_eth_dev *dev) - if (!ret) - rte_eth_dev_callback_process(dev, - RTE_ETH_EVENT_INTR_LSC, NULL); +- if (ctx) { +- if (ctx->sess) +- rte_cryptodev_sym_session_free(ctx->dev_id, ctx->sess); - - break; - default: - PMD_DRV_LOG(DEBUG, "Request %u is not supported yet", -@@ -12123,40 +12121,6 @@ i40e_cloud_filter_qinq_create(struct i40e_pf *pf) - return ret; +- rte_mempool_free(ctx->pool); ++ if (ctx == NULL) ++ return; + +- rte_free(ctx); ++ if (ctx->sess != NULL) { ++ if (ctx->options->op_type == CPERF_ASYM_MODEX) ++ rte_cryptodev_asym_session_free(ctx->dev_id, ctx->sess); ++#ifdef RTE_LIB_SECURITY ++ else if (ctx->options->op_type == CPERF_PDCP || ++ ctx->options->op_type == CPERF_DOCSIS || ++ ctx->options->op_type == CPERF_IPSEC) { ++ struct rte_security_ctx *sec_ctx = ++ rte_cryptodev_get_sec_ctx(ctx->dev_id); ++ rte_security_session_destroy(sec_ctx, ctx->sess); ++ } ++#endif ++ else ++ rte_cryptodev_sym_session_free(ctx->dev_id, ctx->sess); + } ++ ++ rte_mempool_free(ctx->pool); ++ rte_free(ctx); } --static void --i40e_set_mac_max_frame(struct rte_eth_dev *dev, uint16_t size) --{ -- struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); -- uint32_t rep_cnt = MAX_REPEAT_TIME; -- struct rte_eth_link link; -- enum i40e_status_code status; -- bool can_be_set = true; + void * +diff --git a/dpdk/app/test-crypto-perf/main.c b/dpdk/app/test-crypto-perf/main.c +index af5bd0d23b..bc1f0f9659 100644 +--- a/dpdk/app/test-crypto-perf/main.c ++++ b/dpdk/app/test-crypto-perf/main.c +@@ -193,11 +193,10 @@ cperf_initialize_cryptodev(struct cperf_options *opts, uint8_t *enabled_cdevs) + #endif + + struct rte_cryptodev_info cdev_info; +- uint8_t socket_id = rte_cryptodev_socket_id(cdev_id); +- /* range check the socket_id - negative values become big +- * positive ones due to use of unsigned value +- */ +- if (socket_id >= RTE_MAX_NUMA_NODES) ++ int socket_id = rte_cryptodev_socket_id(cdev_id); ++ ++ /* Use the first socket if SOCKET_ID_ANY is returned. */ ++ if (socket_id == SOCKET_ID_ANY) + socket_id = 0; + + rte_cryptodev_info_get(cdev_id, &cdev_info); +@@ -650,7 +649,11 @@ main(int argc, char **argv) + + cdev_id = enabled_cdevs[cdev_index]; + +- uint8_t socket_id = rte_cryptodev_socket_id(cdev_id); ++ int socket_id = rte_cryptodev_socket_id(cdev_id); ++ ++ /* Use the first socket if SOCKET_ID_ANY is returned. */ ++ if (socket_id == SOCKET_ID_ANY) ++ socket_id = 0; + + ctx[i] = cperf_testmap[opts.test].constructor( + session_pool_socket[socket_id].sess_mp, +diff --git a/dpdk/app/test-flow-perf/main.c b/dpdk/app/test-flow-perf/main.c +index 4a9206803a..e0ef78a840 100644 +--- a/dpdk/app/test-flow-perf/main.c ++++ b/dpdk/app/test-flow-perf/main.c +@@ -848,7 +848,12 @@ args_parse(int argc, char **argv) + /* Control */ + if (strcmp(lgopts[opt_idx].name, + "rules-batch") == 0) { +- rules_batch = atoi(optarg); ++ n = atoi(optarg); ++ if (n > 0) ++ rules_batch = n; ++ else ++ rte_exit(EXIT_FAILURE, ++ "flow rules-batch should be > 0\n"); + } + if (strcmp(lgopts[opt_idx].name, + "rules-count") == 0) { +diff --git a/dpdk/app/test-pmd/cmdline.c b/dpdk/app/test-pmd/cmdline.c +index b32dc8bfd4..07432f3e57 100644 +--- a/dpdk/app/test-pmd/cmdline.c ++++ b/dpdk/app/test-pmd/cmdline.c +@@ -12917,32 +12917,25 @@ cmdline_read_from_file(const char *filename) + printf("Read CLI commands from %s\n", filename); + } + ++void ++prompt_exit(void) ++{ ++ cmdline_quit(testpmd_cl); ++} ++ + /* prompt function, called from main on MAIN lcore */ + void + prompt(void) + { +- int ret; - -- /* -- * I40E_MEDIA_TYPE_BASET link up can be ignored -- * I40E_MEDIA_TYPE_BASET link down that hw->phy.media_type -- * is I40E_MEDIA_TYPE_UNKNOWN -- */ -- if (hw->phy.media_type != I40E_MEDIA_TYPE_BASET && -- hw->phy.media_type != I40E_MEDIA_TYPE_UNKNOWN) { -- do { -- update_link_reg(hw, &link); -- if (link.link_status) -- break; -- rte_delay_ms(CHECK_INTERVAL); + testpmd_cl = cmdline_stdin_new(main_ctx, "testpmd> "); +- if (testpmd_cl == NULL) ++ if (testpmd_cl == NULL) { ++ fprintf(stderr, ++ "Failed to create stdin based cmdline context\n"); + return; +- +- ret = atexit(prompt_exit); +- if (ret != 0) +- fprintf(stderr, "Cannot set exit function for cmdline\n"); ++ } + + cmdline_interact(testpmd_cl); +- if (ret != 0) +- cmdline_stdin_exit(testpmd_cl); +-} +- +-void +-prompt_exit(void) +-{ +- if (testpmd_cl != NULL) { +- cmdline_quit(testpmd_cl); +- cmdline_stdin_exit(testpmd_cl); +- } ++ cmdline_stdin_exit(testpmd_cl); + } + + void +diff --git a/dpdk/app/test-pmd/cmdline_flow.c b/dpdk/app/test-pmd/cmdline_flow.c +index 88108498e0..6970f90307 100644 +--- a/dpdk/app/test-pmd/cmdline_flow.c ++++ b/dpdk/app/test-pmd/cmdline_flow.c +@@ -2940,6 +2940,7 @@ static const struct token token_list[] = { + NEXT_ENTRY(COMMON_UNSIGNED)), + .args = ARGS(ARGS_ENTRY(struct buffer, + args.table.attr.nb_flows)), ++ .call = parse_table, + }, + [TABLE_PATTERN_TEMPLATE] = { + .name = "pattern_template", +@@ -7737,15 +7738,15 @@ parse_vc_action_l2_encap(struct context *ctx, const struct token *token, + l2_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN); + memcpy(eth.src.addr_bytes, + l2_encap_conf.eth_src, RTE_ETHER_ADDR_LEN); +- memcpy(header, ð, sizeof(eth)); +- header += sizeof(eth); ++ memcpy(header, ð.hdr, sizeof(struct rte_ether_hdr)); ++ header += sizeof(struct rte_ether_hdr); + if (l2_encap_conf.select_vlan) { + if (l2_encap_conf.select_ipv4) + vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4); + else + vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6); +- memcpy(header, &vlan, sizeof(vlan)); +- header += sizeof(vlan); ++ memcpy(header, &vlan.hdr, sizeof(struct rte_vlan_hdr)); ++ header += sizeof(struct rte_vlan_hdr); + } + action_encap_data->conf.size = header - + action_encap_data->data; +@@ -7793,11 +7794,11 @@ parse_vc_action_l2_decap(struct context *ctx, const struct token *token, + header = action_decap_data->data; + if (l2_decap_conf.select_vlan) + eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN); +- memcpy(header, ð, sizeof(eth)); +- header += sizeof(eth); ++ memcpy(header, ð.hdr, sizeof(struct rte_ether_hdr)); ++ header += sizeof(struct rte_ether_hdr); + if (l2_decap_conf.select_vlan) { +- memcpy(header, &vlan, sizeof(vlan)); +- header += sizeof(vlan); ++ memcpy(header, &vlan.hdr, sizeof(struct rte_vlan_hdr)); ++ header += sizeof(struct rte_vlan_hdr); + } + action_decap_data->conf.size = header - + action_decap_data->data; +@@ -7877,15 +7878,15 @@ parse_vc_action_mplsogre_encap(struct context *ctx, const struct token *token, + mplsogre_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN); + memcpy(eth.src.addr_bytes, + mplsogre_encap_conf.eth_src, RTE_ETHER_ADDR_LEN); +- memcpy(header, ð, sizeof(eth)); +- header += sizeof(eth); ++ memcpy(header, ð.hdr, sizeof(struct rte_ether_hdr)); ++ header += sizeof(struct rte_ether_hdr); + if (mplsogre_encap_conf.select_vlan) { + if (mplsogre_encap_conf.select_ipv4) + vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4); + else + vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6); +- memcpy(header, &vlan, sizeof(vlan)); +- header += sizeof(vlan); ++ memcpy(header, &vlan.hdr, sizeof(struct rte_vlan_hdr)); ++ header += sizeof(struct rte_vlan_hdr); + } + if (mplsogre_encap_conf.select_ipv4) { + memcpy(header, &ipv4, sizeof(ipv4)); +@@ -7972,15 +7973,15 @@ parse_vc_action_mplsogre_decap(struct context *ctx, const struct token *token, + mplsogre_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN); + memcpy(eth.src.addr_bytes, + mplsogre_encap_conf.eth_src, RTE_ETHER_ADDR_LEN); +- memcpy(header, ð, sizeof(eth)); +- header += sizeof(eth); ++ memcpy(header, ð.hdr, sizeof(struct rte_ether_hdr)); ++ header += sizeof(struct rte_ether_hdr); + if (mplsogre_encap_conf.select_vlan) { + if (mplsogre_encap_conf.select_ipv4) + vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4); + else + vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6); +- memcpy(header, &vlan, sizeof(vlan)); +- header += sizeof(vlan); ++ memcpy(header, &vlan.hdr, sizeof(struct rte_vlan_hdr)); ++ header += sizeof(struct rte_vlan_hdr); + } + if (mplsogre_encap_conf.select_ipv4) { + memcpy(header, &ipv4, sizeof(ipv4)); +@@ -8071,15 +8072,15 @@ parse_vc_action_mplsoudp_encap(struct context *ctx, const struct token *token, + mplsoudp_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN); + memcpy(eth.src.addr_bytes, + mplsoudp_encap_conf.eth_src, RTE_ETHER_ADDR_LEN); +- memcpy(header, ð, sizeof(eth)); +- header += sizeof(eth); ++ memcpy(header, ð.hdr, sizeof(struct rte_ether_hdr)); ++ header += sizeof(struct rte_ether_hdr); + if (mplsoudp_encap_conf.select_vlan) { + if (mplsoudp_encap_conf.select_ipv4) + vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4); + else + vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6); +- memcpy(header, &vlan, sizeof(vlan)); +- header += sizeof(vlan); ++ memcpy(header, &vlan.hdr, sizeof(struct rte_vlan_hdr)); ++ header += sizeof(struct rte_vlan_hdr); + } + if (mplsoudp_encap_conf.select_ipv4) { + memcpy(header, &ipv4, sizeof(ipv4)); +@@ -8168,15 +8169,15 @@ parse_vc_action_mplsoudp_decap(struct context *ctx, const struct token *token, + mplsoudp_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN); + memcpy(eth.src.addr_bytes, + mplsoudp_encap_conf.eth_src, RTE_ETHER_ADDR_LEN); +- memcpy(header, ð, sizeof(eth)); +- header += sizeof(eth); ++ memcpy(header, ð.hdr, sizeof(struct rte_ether_hdr)); ++ header += sizeof(struct rte_ether_hdr); + if (mplsoudp_encap_conf.select_vlan) { + if (mplsoudp_encap_conf.select_ipv4) + vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4); + else + vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6); +- memcpy(header, &vlan, sizeof(vlan)); +- header += sizeof(vlan); ++ memcpy(header, &vlan.hdr, sizeof(struct rte_vlan_hdr)); ++ header += sizeof(struct rte_vlan_hdr); + } + if (mplsoudp_encap_conf.select_ipv4) { + memcpy(header, &ipv4, sizeof(ipv4)); +@@ -8993,6 +8994,11 @@ parse_table(struct context *ctx, const struct token *token, + case TABLE_TRANSFER: + out->args.table.attr.flow_attr.transfer = 1; + return len; ++ case TABLE_RULES_NUMBER: ++ ctx->objdata = 0; ++ ctx->object = out; ++ ctx->objmask = NULL; ++ return len; + default: + return -1; + } +diff --git a/dpdk/app/test-pmd/config.c b/dpdk/app/test-pmd/config.c +index acccb6b035..6a9eb4609c 100644 +--- a/dpdk/app/test-pmd/config.c ++++ b/dpdk/app/test-pmd/config.c +@@ -1875,6 +1875,7 @@ port_action_handle_update(portid_t port_id, uint32_t id, + struct rte_flow_error error; + struct rte_flow_action_handle *action_handle; + struct port_indirect_action *pia; ++ struct rte_flow_update_meter_mark mtr_update; + const void *update; + + action_handle = port_action_handle_get_by_id(port_id, id); +@@ -1888,6 +1889,17 @@ port_action_handle_update(portid_t port_id, uint32_t id, + case RTE_FLOW_ACTION_TYPE_CONNTRACK: + update = action->conf; + break; ++ case RTE_FLOW_ACTION_TYPE_METER_MARK: ++ memcpy(&mtr_update.meter_mark, action->conf, ++ sizeof(struct rte_flow_action_meter_mark)); ++ if (mtr_update.meter_mark.profile) ++ mtr_update.profile_valid = 1; ++ if (mtr_update.meter_mark.policy) ++ mtr_update.policy_valid = 1; ++ mtr_update.color_mode_valid = 1; ++ mtr_update.state_valid = 1; ++ update = &mtr_update; ++ break; + default: + update = action; + break; +@@ -2924,8 +2936,10 @@ port_queue_action_handle_update(portid_t port_id, + case RTE_FLOW_ACTION_TYPE_METER_MARK: + rte_memcpy(&mtr_update.meter_mark, action->conf, + sizeof(struct rte_flow_action_meter_mark)); +- mtr_update.profile_valid = 1; +- mtr_update.policy_valid = 1; ++ if (mtr_update.meter_mark.profile) ++ mtr_update.profile_valid = 1; ++ if (mtr_update.meter_mark.policy) ++ mtr_update.policy_valid = 1; + mtr_update.color_mode_valid = 1; + mtr_update.init_color_valid = 1; + mtr_update.state_valid = 1; +diff --git a/dpdk/app/test-pmd/csumonly.c b/dpdk/app/test-pmd/csumonly.c +index 1c24598515..4efb72be77 100644 +--- a/dpdk/app/test-pmd/csumonly.c ++++ b/dpdk/app/test-pmd/csumonly.c +@@ -250,7 +250,7 @@ parse_gtp(struct rte_udp_hdr *udp_hdr, + info->l4_proto = 0; + } + +- info->l2_len += RTE_ETHER_GTP_HLEN; ++ info->l2_len += gtp_len + sizeof(*udp_hdr); + } + + /* Parse a vxlan header */ +@@ -1168,10 +1168,13 @@ tunnel_update: + + nb_prep = rte_eth_tx_prepare(fs->tx_port, fs->tx_queue, + tx_pkts_burst, nb_rx); +- if (nb_prep != nb_rx) ++ if (nb_prep != nb_rx) { + fprintf(stderr, + "Preparing packet burst to transmit failed: %s\n", + rte_strerror(rte_errno)); ++ fs->fwd_dropped += (nb_rx - nb_prep); ++ rte_pktmbuf_free_bulk(&tx_pkts_burst[nb_prep], nb_rx - nb_prep); ++ } + + nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, tx_pkts_burst, + nb_prep); +@@ -1179,12 +1182,12 @@ tunnel_update: + /* + * Retry if necessary + */ +- if (unlikely(nb_tx < nb_rx) && fs->retry_enabled) { ++ if (unlikely(nb_tx < nb_prep) && fs->retry_enabled) { + retry = 0; +- while (nb_tx < nb_rx && retry++ < burst_tx_retry_num) { ++ while (nb_tx < nb_prep && retry++ < burst_tx_retry_num) { + rte_delay_us(burst_tx_delay_time); + nb_tx += rte_eth_tx_burst(fs->tx_port, fs->tx_queue, +- &tx_pkts_burst[nb_tx], nb_rx - nb_tx); ++ &tx_pkts_burst[nb_tx], nb_prep - nb_tx); + } + } + fs->tx_packets += nb_tx; +@@ -1194,11 +1197,11 @@ tunnel_update: + fs->rx_bad_outer_ip_csum += rx_bad_outer_ip_csum; + + inc_tx_burst_stats(fs, nb_tx); +- if (unlikely(nb_tx < nb_rx)) { +- fs->fwd_dropped += (nb_rx - nb_tx); ++ if (unlikely(nb_tx < nb_prep)) { ++ fs->fwd_dropped += (nb_prep - nb_tx); + do { + rte_pktmbuf_free(tx_pkts_burst[nb_tx]); +- } while (++nb_tx < nb_rx); ++ } while (++nb_tx < nb_prep); + } + + get_end_cycles(fs, start_tsc); +diff --git a/dpdk/app/test-pmd/ieee1588fwd.c b/dpdk/app/test-pmd/ieee1588fwd.c +index fc4e2d014c..896d5ef26a 100644 +--- a/dpdk/app/test-pmd/ieee1588fwd.c ++++ b/dpdk/app/test-pmd/ieee1588fwd.c +@@ -184,13 +184,13 @@ ieee1588_packet_fwd(struct fwd_stream *fs) + + /* Forward PTP packet with hardware TX timestamp */ + mb->ol_flags |= RTE_MBUF_F_TX_IEEE1588_TMST; +- fs->tx_packets += 1; + if (rte_eth_tx_burst(fs->rx_port, fs->tx_queue, &mb, 1) == 0) { + printf("Port %u sent PTP packet dropped\n", fs->rx_port); + fs->fwd_dropped += 1; + rte_pktmbuf_free(mb); + return; + } ++ fs->tx_packets += 1; + + /* + * Check the TX timestamp. +diff --git a/dpdk/app/test-pmd/noisy_vnf.c b/dpdk/app/test-pmd/noisy_vnf.c +index c65ec6f06a..abd99a0407 100644 +--- a/dpdk/app/test-pmd/noisy_vnf.c ++++ b/dpdk/app/test-pmd/noisy_vnf.c +@@ -214,9 +214,10 @@ flush: + sent = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, + tmp_pkts, nb_deqd); + if (unlikely(sent < nb_deqd) && fs->retry_enabled) +- nb_tx += do_retry(nb_rx, nb_tx, tmp_pkts, fs); +- inc_tx_burst_stats(fs, nb_tx); ++ sent += do_retry(nb_deqd, sent, tmp_pkts, fs); ++ inc_tx_burst_stats(fs, sent); + fs->fwd_dropped += drop_pkts(tmp_pkts, nb_deqd, sent); ++ nb_tx += sent; + ncf->prev_time = rte_get_timer_cycles(); + } + } +diff --git a/dpdk/app/test-pmd/testpmd.c b/dpdk/app/test-pmd/testpmd.c +index 134d79a555..b69b248e47 100644 +--- a/dpdk/app/test-pmd/testpmd.c ++++ b/dpdk/app/test-pmd/testpmd.c +@@ -11,6 +11,7 @@ + #include + #ifndef RTE_EXEC_ENV_WINDOWS + #include ++#include + #endif + #include + #include +@@ -231,7 +232,7 @@ unsigned int xstats_display_num; /**< Size of extended statistics to show */ + * In container, it cannot terminate the process which running with 'stats-period' + * option. Set flag to exit stats period loop after received SIGINT/SIGTERM. + */ +-static volatile uint8_t f_quit; ++volatile uint8_t f_quit; + uint8_t cl_quit; /* Quit testpmd from cmdline. */ + + /* +@@ -2056,6 +2057,8 @@ fwd_stats_display(void) + fwd_cycles += fs->core_cycles; + } + for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { ++ uint64_t tx_dropped = 0; ++ + pt_id = fwd_ports_ids[i]; + port = &ports[pt_id]; + +@@ -2077,8 +2080,9 @@ fwd_stats_display(void) + total_recv += stats.ipackets; + total_xmit += stats.opackets; + total_rx_dropped += stats.imissed; +- total_tx_dropped += ports_stats[pt_id].tx_dropped; +- total_tx_dropped += stats.oerrors; ++ tx_dropped += ports_stats[pt_id].tx_dropped; ++ tx_dropped += stats.oerrors; ++ total_tx_dropped += tx_dropped; + total_rx_nombuf += stats.rx_nombuf; + + printf("\n %s Forward statistics for port %-2d %s\n", +@@ -2105,8 +2109,8 @@ fwd_stats_display(void) + + printf(" TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64 + "TX-total: %-"PRIu64"\n", +- stats.opackets, ports_stats[pt_id].tx_dropped, +- stats.opackets + ports_stats[pt_id].tx_dropped); ++ stats.opackets, tx_dropped, ++ stats.opackets + tx_dropped); + + if (record_burst_stats) { + if (ports_stats[pt_id].rx_stream) +@@ -2339,6 +2343,70 @@ launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore) + } + } + ++static void ++update_rx_queue_state(uint16_t port_id, uint16_t queue_id) ++{ ++ struct rte_eth_rxq_info rx_qinfo; ++ int32_t rc; ++ ++ rc = rte_eth_rx_queue_info_get(port_id, ++ queue_id, &rx_qinfo); ++ if (rc == 0) { ++ ports[port_id].rxq[queue_id].state = ++ rx_qinfo.queue_state; ++ } else if (rc == -ENOTSUP) { ++ /* ++ * Set the rxq state to RTE_ETH_QUEUE_STATE_STARTED ++ * to ensure that the PMDs do not implement ++ * rte_eth_rx_queue_info_get can forward. ++ */ ++ ports[port_id].rxq[queue_id].state = ++ RTE_ETH_QUEUE_STATE_STARTED; ++ } else { ++ TESTPMD_LOG(WARNING, ++ "Failed to get rx queue info\n"); ++ } ++} ++ ++static void ++update_tx_queue_state(uint16_t port_id, uint16_t queue_id) ++{ ++ struct rte_eth_txq_info tx_qinfo; ++ int32_t rc; ++ ++ rc = rte_eth_tx_queue_info_get(port_id, ++ queue_id, &tx_qinfo); ++ if (rc == 0) { ++ ports[port_id].txq[queue_id].state = ++ tx_qinfo.queue_state; ++ } else if (rc == -ENOTSUP) { ++ /* ++ * Set the txq state to RTE_ETH_QUEUE_STATE_STARTED ++ * to ensure that the PMDs do not implement ++ * rte_eth_tx_queue_info_get can forward. ++ */ ++ ports[port_id].txq[queue_id].state = ++ RTE_ETH_QUEUE_STATE_STARTED; ++ } else { ++ TESTPMD_LOG(WARNING, ++ "Failed to get tx queue info\n"); ++ } ++} ++ ++static void ++update_queue_state(void) ++{ ++ portid_t pi; ++ queueid_t qi; ++ ++ RTE_ETH_FOREACH_DEV(pi) { ++ for (qi = 0; qi < nb_rxq; qi++) ++ update_rx_queue_state(pi, qi); ++ for (qi = 0; qi < nb_txq; qi++) ++ update_tx_queue_state(pi, qi); ++ } ++} ++ + /* + * Launch packet forwarding configuration. + */ +@@ -2378,9 +2446,12 @@ start_packet_forwarding(int with_tx_first) + if (!pkt_fwd_shared_rxq_check()) + return; + +- if (stream_init != NULL) ++ if (stream_init != NULL) { ++ if (rte_eal_process_type() == RTE_PROC_SECONDARY) ++ update_queue_state(); + for (i = 0; i < cur_fwd_config.nb_fwd_streams; i++) + stream_init(fwd_streams[i]); ++ } + + port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin; + if (port_fwd_begin != NULL) { +@@ -2880,7 +2951,7 @@ update_bonding_port_dev_conf(portid_t bond_pid) + int + start_port(portid_t pid) + { +- int diag, need_check_link_status = -1; ++ int diag; + portid_t pi; + portid_t p_pi = RTE_MAX_ETHPORTS; + portid_t pl[RTE_MAX_ETHPORTS]; +@@ -2891,6 +2962,9 @@ start_port(portid_t pid) + queueid_t qi; + struct rte_port *port; + struct rte_eth_hairpin_cap cap; ++ bool at_least_one_port_exist = false; ++ bool all_ports_already_started = true; ++ bool at_least_one_port_successfully_started = false; + + if (port_id_is_invalid(pid, ENABLED_WARN)) + return 0; +@@ -2906,11 +2980,13 @@ start_port(portid_t pid) + continue; + } + +- need_check_link_status = 0; ++ at_least_one_port_exist = true; ++ + port = &ports[pi]; +- if (port->port_status == RTE_PORT_STOPPED) ++ if (port->port_status == RTE_PORT_STOPPED) { + port->port_status = RTE_PORT_HANDLING; +- else { ++ all_ports_already_started = false; ++ } else { + fprintf(stderr, "Port %d is now not stopped\n", pi); + continue; + } +@@ -3130,15 +3206,17 @@ start_port(portid_t pid) + printf("Port %d: " RTE_ETHER_ADDR_PRT_FMT "\n", pi, + RTE_ETHER_ADDR_BYTES(&port->eth_addr)); + +- /* at least one port started, need checking link status */ +- need_check_link_status = 1; ++ at_least_one_port_successfully_started = true; + + pl[cfg_pi++] = pi; + } + +- if (need_check_link_status == 1 && !no_link_check) ++ if (rte_eal_process_type() == RTE_PROC_SECONDARY) ++ update_queue_state(); ++ ++ if (at_least_one_port_successfully_started && !no_link_check) + check_all_ports_link_status(RTE_PORT_ALL); +- else if (need_check_link_status == 0) ++ else if (at_least_one_port_exist & all_ports_already_started) + fprintf(stderr, "Please stop the ports first\n"); + + if (hairpin_mode & 0xf) { +@@ -4315,13 +4393,6 @@ init_port(void) + memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS); + } + +-static void +-force_quit(void) +-{ +- pmd_test_exit(); +- prompt_exit(); +-} +- + static void + print_stats(void) + { +@@ -4340,28 +4411,10 @@ print_stats(void) + } + + static void +-signal_handler(int signum) ++signal_handler(int signum __rte_unused) + { +- if (signum == SIGINT || signum == SIGTERM) { +- fprintf(stderr, "\nSignal %d received, preparing to exit...\n", +- signum); +-#ifdef RTE_LIB_PDUMP +- /* uninitialize packet capture framework */ +- rte_pdump_uninit(); +-#endif +-#ifdef RTE_LIB_LATENCYSTATS +- if (latencystats_enabled != 0) +- rte_latencystats_uninit(); +-#endif +- force_quit(); +- /* Set flag to indicate the force termination. */ +- f_quit = 1; +- /* exit with the expected status */ +-#ifndef RTE_EXEC_ENV_WINDOWS +- signal(signum, SIG_DFL); +- kill(getpid(), signum); +-#endif +- } ++ f_quit = 1; ++ prompt_exit(); + } + + int +@@ -4372,8 +4425,18 @@ main(int argc, char** argv) + uint16_t count; + int ret; + ++#ifdef RTE_EXEC_ENV_WINDOWS + signal(SIGINT, signal_handler); + signal(SIGTERM, signal_handler); ++#else ++ /* Want read() not to be restarted on signal */ ++ struct sigaction action = { ++ .sa_handler = signal_handler, ++ }; ++ ++ sigaction(SIGINT, &action, NULL); ++ sigaction(SIGTERM, &action, NULL); ++#endif + + testpmd_logtype = rte_log_register("testpmd"); + if (testpmd_logtype < 0) +@@ -4385,6 +4448,9 @@ main(int argc, char** argv) + rte_exit(EXIT_FAILURE, "Cannot init EAL: %s\n", + rte_strerror(rte_errno)); + ++ /* allocate port structures, and init them */ ++ init_port(); ++ + ret = register_eth_event_callback(); + if (ret != 0) + rte_exit(EXIT_FAILURE, "Cannot register for ethdev events"); +@@ -4403,9 +4469,6 @@ main(int argc, char** argv) + if (nb_ports == 0) + TESTPMD_LOG(WARNING, "No probed ethernet devices\n"); + +- /* allocate port structures, and init them */ +- init_port(); +- + set_def_fwd_config(); + if (nb_lcores == 0) + rte_exit(EXIT_FAILURE, "No cores defined for forwarding\n" +@@ -4483,8 +4546,13 @@ main(int argc, char** argv) + } + } + +- if (!no_device_start && start_port(RTE_PORT_ALL) != 0) +- rte_exit(EXIT_FAILURE, "Start ports failed\n"); ++ if (!no_device_start && start_port(RTE_PORT_ALL) != 0) { ++ if (!interactive) { ++ rte_eal_cleanup(); ++ rte_exit(EXIT_FAILURE, "Start ports failed\n"); ++ } ++ fprintf(stderr, "Start ports failed\n"); ++ } + + /* set all ports to promiscuous mode by default */ + RTE_ETH_FOREACH_DEV(port_id) { +@@ -4536,15 +4604,9 @@ main(int argc, char** argv) + start_packet_forwarding(0); + } + prompt(); +- pmd_test_exit(); + } else + #endif + { +- char c; +- int rc; +- +- f_quit = 0; +- + printf("No commandline core given, start packet forwarding\n"); + start_packet_forwarding(tx_first); + if (stats_period != 0) { +@@ -4567,15 +4629,41 @@ main(int argc, char** argv) + prev_time = cur_time; + rte_delay_us_sleep(US_PER_S); + } +- } ++ } else { ++ char c; ++ fd_set fds; ++ ++ printf("Press enter to exit\n"); ++ ++ FD_ZERO(&fds); ++ FD_SET(0, &fds); ++ ++ /* wait for signal or enter */ ++ ret = select(1, &fds, NULL, NULL, NULL); ++ if (ret < 0 && errno != EINTR) ++ rte_exit(EXIT_FAILURE, ++ "Select failed: %s\n", ++ strerror(errno)); + +- printf("Press enter to exit\n"); +- rc = read(0, &c, 1); +- pmd_test_exit(); +- if (rc < 0) +- return 1; ++ /* if got enter then consume it */ ++ if (ret == 1 && read(0, &c, 1) < 0) ++ rte_exit(EXIT_FAILURE, ++ "Read failed: %s\n", ++ strerror(errno)); ++ } + } + ++ pmd_test_exit(); ++ ++#ifdef RTE_LIB_PDUMP ++ /* uninitialize packet capture framework */ ++ rte_pdump_uninit(); ++#endif ++#ifdef RTE_LIB_LATENCYSTATS ++ if (latencystats_enabled != 0) ++ rte_latencystats_uninit(); ++#endif ++ + ret = rte_eal_cleanup(); + if (ret != 0) + rte_exit(EXIT_FAILURE, +diff --git a/dpdk/app/test-pmd/testpmd.h b/dpdk/app/test-pmd/testpmd.h +index 7d24d25970..022210a7a9 100644 +--- a/dpdk/app/test-pmd/testpmd.h ++++ b/dpdk/app/test-pmd/testpmd.h +@@ -34,6 +34,7 @@ + #define RTE_PORT_HANDLING (uint16_t)3 + + extern uint8_t cl_quit; ++extern volatile uint8_t f_quit; + + /* + * It is used to allocate the memory for hash key. +diff --git a/dpdk/app/test/meson.build b/dpdk/app/test/meson.build +index f34d19e3c3..96702c2078 100644 +--- a/dpdk/app/test/meson.build ++++ b/dpdk/app/test/meson.build +@@ -190,6 +190,7 @@ fast_tests = [ + ['fib_autotest', true, true], + ['fib6_autotest', true, true], + ['func_reentrancy_autotest', false, true], ++ ['graph_autotest', true, true], + ['hash_autotest', true, true], + ['interrupt_autotest', true, true], + ['ipfrag_autotest', false, true], +@@ -206,6 +207,7 @@ fast_tests = [ + ['memzone_autotest', false, true], + ['meter_autotest', true, true], + ['multiprocess_autotest', false, false], ++ ['node_list_dump', true, true], + ['per_lcore_autotest', true, true], + ['pflock_autotest', true, true], + ['prefetch_autotest', true, true], +@@ -295,6 +297,7 @@ perf_test_names = [ + 'trace_perf_autotest', + 'ipsec_perf_autotest', + 'thash_perf_autotest', ++ 'graph_perf_autotest', + ] + + driver_test_names = [ +diff --git a/dpdk/app/test/packet_burst_generator.c b/dpdk/app/test/packet_burst_generator.c +index 6b42b9b83b..867a88da00 100644 +--- a/dpdk/app/test/packet_burst_generator.c ++++ b/dpdk/app/test/packet_burst_generator.c +@@ -263,11 +263,11 @@ generate_packet_burst(struct rte_mempool *mp, struct rte_mbuf **pkts_burst, + void *ip_hdr, uint8_t ipv4, struct rte_udp_hdr *udp_hdr, + int nb_pkt_per_burst, uint8_t pkt_len, uint8_t nb_pkt_segs) + { +- int i, nb_pkt = 0; +- size_t eth_hdr_size; +- ++ const uint8_t pkt_seg_data_len = pkt_len / nb_pkt_segs; + struct rte_mbuf *pkt_seg; + struct rte_mbuf *pkt; ++ size_t eth_hdr_size; ++ int i, nb_pkt = 0; + + for (nb_pkt = 0; nb_pkt < nb_pkt_per_burst; nb_pkt++) { + pkt = rte_pktmbuf_alloc(mp); +@@ -278,7 +278,7 @@ nomore_mbuf: + break; + } + +- pkt->data_len = pkt_len; ++ pkt->data_len = pkt_seg_data_len; + pkt_seg = pkt; + for (i = 1; i < nb_pkt_segs; i++) { + pkt_seg->next = rte_pktmbuf_alloc(mp); +@@ -288,7 +288,10 @@ nomore_mbuf: + goto nomore_mbuf; + } + pkt_seg = pkt_seg->next; +- pkt_seg->data_len = pkt_len; ++ if (i != nb_pkt_segs - 1) ++ pkt_seg->data_len = pkt_seg_data_len; ++ else ++ pkt_seg->data_len = pkt_seg_data_len + pkt_len % nb_pkt_segs; + } + pkt_seg->next = NULL; /* Last segment of packet. */ + +@@ -344,11 +347,11 @@ generate_packet_burst_proto(struct rte_mempool *mp, + uint8_t ipv4, uint8_t proto, void *proto_hdr, + int nb_pkt_per_burst, uint8_t pkt_len, uint8_t nb_pkt_segs) + { +- int i, nb_pkt = 0; +- size_t eth_hdr_size; +- ++ const uint8_t pkt_seg_data_len = pkt_len / nb_pkt_segs; + struct rte_mbuf *pkt_seg; + struct rte_mbuf *pkt; ++ size_t eth_hdr_size; ++ int i, nb_pkt = 0; + + for (nb_pkt = 0; nb_pkt < nb_pkt_per_burst; nb_pkt++) { + pkt = rte_pktmbuf_alloc(mp); +@@ -359,7 +362,7 @@ nomore_mbuf: + break; + } + +- pkt->data_len = pkt_len; ++ pkt->data_len = pkt_seg_data_len; + pkt_seg = pkt; + for (i = 1; i < nb_pkt_segs; i++) { + pkt_seg->next = rte_pktmbuf_alloc(mp); +@@ -369,7 +372,10 @@ nomore_mbuf: + goto nomore_mbuf; + } + pkt_seg = pkt_seg->next; +- pkt_seg->data_len = pkt_len; ++ if (i != nb_pkt_segs - 1) ++ pkt_seg->data_len = pkt_seg_data_len; ++ else ++ pkt_seg->data_len = pkt_seg_data_len + pkt_len % nb_pkt_segs; + } + pkt_seg->next = NULL; /* Last segment of packet. */ + +diff --git a/dpdk/app/test/test_cryptodev.c b/dpdk/app/test/test_cryptodev.c +index d6ae762df9..bdd3da7a7c 100644 +--- a/dpdk/app/test/test_cryptodev.c ++++ b/dpdk/app/test/test_cryptodev.c +@@ -136,6 +136,17 @@ security_proto_supported(enum rte_security_session_action_type action, + static int + dev_configure_and_start(uint64_t ff_disable); + ++static int ++check_cipher_capability(const struct crypto_testsuite_params *ts_params, ++ const enum rte_crypto_cipher_algorithm cipher_algo, ++ const uint16_t key_size, const uint16_t iv_size); ++ ++static int ++check_auth_capability(const struct crypto_testsuite_params *ts_params, ++ const enum rte_crypto_auth_algorithm auth_algo, ++ const uint16_t key_size, const uint16_t iv_size, ++ const uint16_t tag_size); ++ + static struct rte_mbuf * + setup_test_string(struct rte_mempool *mpool, + const char *string, size_t len, uint8_t blocksize) +@@ -4761,7 +4772,6 @@ test_zuc_cipher_auth(const struct wireless_test_data *tdata) + unsigned int plaintext_len; + + struct rte_cryptodev_info dev_info; +- struct rte_cryptodev_sym_capability_idx cap_idx; + + rte_cryptodev_info_get(ts_params->valid_devs[0], &dev_info); + uint64_t feat_flags = dev_info.feature_flags; +@@ -4783,19 +4793,14 @@ test_zuc_cipher_auth(const struct wireless_test_data *tdata) + return TEST_SKIPPED; + + /* Check if device supports ZUC EEA3 */ +- cap_idx.type = RTE_CRYPTO_SYM_XFORM_CIPHER; +- cap_idx.algo.cipher = RTE_CRYPTO_CIPHER_ZUC_EEA3; +- +- if (rte_cryptodev_sym_capability_get(ts_params->valid_devs[0], +- &cap_idx) == NULL) ++ if (check_cipher_capability(ts_params, RTE_CRYPTO_CIPHER_ZUC_EEA3, ++ tdata->key.len, tdata->cipher_iv.len) < 0) + return TEST_SKIPPED; + + /* Check if device supports ZUC EIA3 */ +- cap_idx.type = RTE_CRYPTO_SYM_XFORM_AUTH; +- cap_idx.algo.auth = RTE_CRYPTO_AUTH_ZUC_EIA3; +- +- if (rte_cryptodev_sym_capability_get(ts_params->valid_devs[0], +- &cap_idx) == NULL) ++ if (check_auth_capability(ts_params, RTE_CRYPTO_AUTH_ZUC_EIA3, ++ tdata->key.len, tdata->auth_iv.len, ++ tdata->digest.len) < 0) + return TEST_SKIPPED; + + /* Create ZUC session */ +@@ -4853,7 +4858,7 @@ test_zuc_cipher_auth(const struct wireless_test_data *tdata) + TEST_ASSERT_BUFFERS_ARE_EQUAL( + ut_params->digest, + tdata->digest.data, +- 4, ++ tdata->digest.len, + "ZUC Generated auth tag not as expected"); + return 0; + } +@@ -6415,7 +6420,7 @@ test_zuc_auth_cipher(const struct wireless_test_data *tdata, + TEST_ASSERT_BUFFERS_ARE_EQUAL( + ut_params->digest, + tdata->digest.data, +- DIGEST_BYTE_LENGTH_KASUMI_F9, ++ tdata->digest.len, + "ZUC Generated auth tag not as expected"); + } + return 0; +@@ -6453,6 +6458,9 @@ test_zuc_auth_cipher_sgl(const struct wireless_test_data *tdata, + tdata->digest.len) < 0) + return TEST_SKIPPED; + ++ if (gbl_action_type == RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO) ++ return TEST_SKIPPED; ++ + rte_cryptodev_info_get(ts_params->valid_devs[0], &dev_info); + + uint64_t feat_flags = dev_info.feature_flags; +@@ -6622,7 +6630,7 @@ test_zuc_auth_cipher_sgl(const struct wireless_test_data *tdata, + TEST_ASSERT_BUFFERS_ARE_EQUAL( + digest, + tdata->digest.data, +- DIGEST_BYTE_LENGTH_KASUMI_F9, ++ tdata->digest.len, + "ZUC Generated auth tag not as expected"); + } + return 0; +@@ -6852,6 +6860,7 @@ snow3g_hash_test_vector_setup(const struct snow3g_test_data *pattern, + static int + test_snow3g_decryption_with_digest_test_case_1(void) + { ++ int ret; + struct snow3g_hash_test_data snow3g_hash_data; + struct rte_cryptodev_info dev_info; + struct crypto_testsuite_params *ts_params = &testsuite_params; +@@ -6870,8 +6879,9 @@ test_snow3g_decryption_with_digest_test_case_1(void) + */ + snow3g_hash_test_vector_setup(&snow3g_test_case_7, &snow3g_hash_data); + +- if (test_snow3g_decryption(&snow3g_test_case_7)) +- return TEST_FAILED; ++ ret = test_snow3g_decryption(&snow3g_test_case_7); ++ if (ret != 0) ++ return ret; + + return test_snow3g_authentication_verify(&snow3g_hash_data); + } +@@ -7648,6 +7658,9 @@ test_mixed_auth_cipher_sgl(const struct mixed_cipher_auth_test_data *tdata, + } + } + ++ if (gbl_action_type == RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO) ++ return TEST_SKIPPED; ++ + /* Create the session */ + if (verify) + retval = create_wireless_algo_cipher_auth_session( +@@ -8433,7 +8446,7 @@ test_authenticated_encryption(const struct aead_test_data *tdata) + tdata->key.data, tdata->key.len, + tdata->aad.len, tdata->auth_tag.len, + tdata->iv.len); +- if (retval < 0) ++ if (retval != TEST_SUCCESS) + return retval; + + if (tdata->aad.len > MBUF_SIZE) { +@@ -11567,7 +11580,7 @@ test_authenticated_decryption_oop(const struct aead_test_data *tdata) + tdata->key.data, tdata->key.len, + tdata->aad.len, tdata->auth_tag.len, + tdata->iv.len); +- if (retval < 0) ++ if (retval != TEST_SUCCESS) + return retval; + + /* alloc mbuf and set payload */ +@@ -11981,11 +11994,11 @@ test_stats(void) + TEST_ASSERT((stats.enqueued_count == 1), + "rte_cryptodev_stats_get returned unexpected enqueued stat"); + TEST_ASSERT((stats.dequeued_count == 1), +- "rte_cryptodev_stats_get returned unexpected enqueued stat"); ++ "rte_cryptodev_stats_get returned unexpected dequeued stat"); + TEST_ASSERT((stats.enqueue_err_count == 0), +- "rte_cryptodev_stats_get returned unexpected enqueued stat"); ++ "rte_cryptodev_stats_get returned unexpected enqueued error count stat"); + TEST_ASSERT((stats.dequeue_err_count == 0), +- "rte_cryptodev_stats_get returned unexpected enqueued stat"); ++ "rte_cryptodev_stats_get returned unexpected dequeued error count stat"); + + /* invalid device but should ignore and not reset device stats*/ + rte_cryptodev_stats_reset(ts_params->valid_devs[0] + 300); +@@ -11993,7 +12006,7 @@ test_stats(void) + &stats), + "rte_cryptodev_stats_get failed"); + TEST_ASSERT((stats.enqueued_count == 1), +- "rte_cryptodev_stats_get returned unexpected enqueued stat"); ++ "rte_cryptodev_stats_get returned unexpected enqueued stat after invalid reset"); + + /* check that a valid reset clears stats */ + rte_cryptodev_stats_reset(ts_params->valid_devs[0]); +@@ -12001,9 +12014,9 @@ test_stats(void) + &stats), + "rte_cryptodev_stats_get failed"); + TEST_ASSERT((stats.enqueued_count == 0), +- "rte_cryptodev_stats_get returned unexpected enqueued stat"); ++ "rte_cryptodev_stats_get returned unexpected enqueued stat after valid reset"); + TEST_ASSERT((stats.dequeued_count == 0), +- "rte_cryptodev_stats_get returned unexpected enqueued stat"); ++ "rte_cryptodev_stats_get returned unexpected dequeued stat after valid reset"); + + return TEST_SUCCESS; + } +@@ -14450,8 +14463,13 @@ test_authenticated_encryption_SGL(const struct aead_test_data *tdata, + &cap_idx) == NULL) + return TEST_SKIPPED; + +- /* OOP not supported with CPU crypto */ +- if (oop && gbl_action_type == RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO) ++ /* ++ * SGL not supported on AESNI_MB PMD CPU crypto, ++ * OOP not supported on AESNI_GCM CPU crypto ++ */ ++ if (gbl_action_type == RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO && ++ (gbl_driver_id == rte_cryptodev_driver_id_get( ++ RTE_STR(CRYPTODEV_NAME_AESNI_MB_PMD)) || oop)) + return TEST_SKIPPED; + + /* Detailed check for the particular SGL support flag */ +diff --git a/dpdk/app/test/test_cryptodev_aes_test_vectors.h b/dpdk/app/test/test_cryptodev_aes_test_vectors.h +index ea7b21ce53..f3686beeb5 100644 +--- a/dpdk/app/test/test_cryptodev_aes_test_vectors.h ++++ b/dpdk/app/test/test_cryptodev_aes_test_vectors.h +@@ -4969,7 +4969,7 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = { + }, + { + .test_descr = "AES-256-XTS Encryption (512-byte plaintext" +- " Dataunit 512) Scater gather OOP", ++ " Dataunit 512) Scatter gather OOP", + .test_data = &aes_test_data_xts_wrapped_key_48_pt_512_du_512, + .op_mask = BLOCKCIPHER_TEST_OP_ENCRYPT, + .feature_mask = BLOCKCIPHER_TEST_FEATURE_OOP | +@@ -4979,7 +4979,7 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = { + }, + { + .test_descr = "AES-256-XTS Decryption (512-byte plaintext" +- " Dataunit 512) Scater gather OOP", ++ " Dataunit 512) Scatter gather OOP", + .test_data = &aes_test_data_xts_wrapped_key_48_pt_512_du_512, + .op_mask = BLOCKCIPHER_TEST_OP_DECRYPT, + .feature_mask = BLOCKCIPHER_TEST_FEATURE_OOP | +@@ -4989,7 +4989,7 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = { + }, + { + .test_descr = "AES-256-XTS Encryption (512-byte plaintext" +- " Dataunit 0) Scater gather OOP", ++ " Dataunit 0) Scatter gather OOP", + .test_data = &aes_test_data_xts_wrapped_key_48_pt_512_du_0, + .op_mask = BLOCKCIPHER_TEST_OP_ENCRYPT, + .feature_mask = BLOCKCIPHER_TEST_FEATURE_OOP | +@@ -4999,7 +4999,7 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = { + }, + { + .test_descr = "AES-256-XTS Decryption (512-byte plaintext" +- " Dataunit 0) Scater gather OOP", ++ " Dataunit 0) Scatter gather OOP", + .test_data = &aes_test_data_xts_wrapped_key_48_pt_512_du_0, + .op_mask = BLOCKCIPHER_TEST_OP_DECRYPT, + .feature_mask = BLOCKCIPHER_TEST_FEATURE_OOP | +@@ -5009,7 +5009,7 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = { + }, + { + .test_descr = "AES-256-XTS Encryption (4096-byte plaintext" +- " Dataunit 4096) Scater gather OOP", ++ " Dataunit 4096) Scatter gather OOP", + .test_data = &aes_test_data_xts_wrapped_key_48_pt_4096_du_4096, + .op_mask = BLOCKCIPHER_TEST_OP_ENCRYPT, + .feature_mask = BLOCKCIPHER_TEST_FEATURE_OOP | +@@ -5019,7 +5019,7 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = { + }, + { + .test_descr = "AES-256-XTS Decryption (4096-byte plaintext" +- " Dataunit 4096) Scater gather OOP", ++ " Dataunit 4096) Scatter gather OOP", + .test_data = &aes_test_data_xts_wrapped_key_48_pt_4096_du_4096, + .op_mask = BLOCKCIPHER_TEST_OP_DECRYPT, + .feature_mask = BLOCKCIPHER_TEST_FEATURE_OOP | +@@ -5029,7 +5029,7 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = { + }, + { + .test_descr = "AES-256-XTS Encryption (4096-byte plaintext" +- " Dataunit 0) Scater gather OOP", ++ " Dataunit 0) Scatter gather OOP", + .test_data = &aes_test_data_xts_wrapped_key_48_pt_4096_du_0, + .op_mask = BLOCKCIPHER_TEST_OP_ENCRYPT, + .feature_mask = BLOCKCIPHER_TEST_FEATURE_OOP | +@@ -5039,7 +5039,7 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = { + }, + { + .test_descr = "AES-256-XTS Decryption (4096-byte plaintext" +- " Dataunit 0) Scater gather OOP", ++ " Dataunit 0) Scatter gather OOP", + .test_data = &aes_test_data_xts_wrapped_key_48_pt_4096_du_0, + .op_mask = BLOCKCIPHER_TEST_OP_DECRYPT, + .feature_mask = BLOCKCIPHER_TEST_FEATURE_OOP | +diff --git a/dpdk/app/test/test_cryptodev_security_ipsec_test_vectors.h b/dpdk/app/test/test_cryptodev_security_ipsec_test_vectors.h +index 2686bbeb62..6e60e32b9d 100644 +--- a/dpdk/app/test/test_cryptodev_security_ipsec_test_vectors.h ++++ b/dpdk/app/test/test_cryptodev_security_ipsec_test_vectors.h +@@ -417,7 +417,8 @@ struct ipsec_test_data pkt_aes_256_ccm = { + .op = RTE_CRYPTO_AEAD_OP_ENCRYPT, + .algo = RTE_CRYPTO_AEAD_AES_CCM, + .key.length = 32, +- .iv.length = 12, ++ /* IV includes 3B salt and 8B per packet IV */ ++ .iv.length = 11, + .iv.offset = IV_OFFSET, + .digest_length = 16, + .aad_length = 12, +diff --git a/dpdk/app/test/test_cryptodev_security_pdcp_sdap_test_vectors.h b/dpdk/app/test/test_cryptodev_security_pdcp_sdap_test_vectors.h +index f43f693edb..b0fa0ec458 100644 +--- a/dpdk/app/test/test_cryptodev_security_pdcp_sdap_test_vectors.h ++++ b/dpdk/app/test/test_cryptodev_security_pdcp_sdap_test_vectors.h +@@ -769,7 +769,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x0a, 0x98, 0xc1, 0x3c, 0x98, 0x82, + 0xdc, 0xb6, 0xc2, 0x36 }, + .data_in = +- (uint8_t[]){ 0x50, 0x01, 0x86, 0xb8, 0xf8, 0xdb, 0x2d, ++ (uint8_t[]){ 0x80, 0x01, 0x86, 0xb8, 0xf8, 0xdb, 0x2d, + 0x3f, 0x23, 0x82, 0x53, 0xfd, 0x37, 0xde, + 0x88, 0x63, 0x08, 0x4f, 0xd3, 0x71, 0xfb, + 0xeb, 0x35, 0xf3, 0x64, 0xd3, 0x5e, 0xaf, +@@ -781,7 +781,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x7a, 0xf9, 0xdd }, + .in_len = 66, + .data_out = +- (uint8_t[]){ 0x50, 0x01, 0x86, 0x69, 0xf2, 0x5d, 0xd7, ++ (uint8_t[]){ 0x80, 0x01, 0x86, 0x69, 0xf2, 0x5d, 0xd7, + 0xf5, 0xc1, 0xf7, 0x1e, 0x47, 0x5d, 0xce, + 0xbe, 0x48, 0xb5, 0x0b, 0x6a, 0x73, 0x9a, + 0x5a, 0xa3, 0x06, 0x47, 0x40, 0x96, 0xcf, +@@ -790,7 +790,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0xc9, 0x7f, 0x11, 0x79, 0x24, 0x44, 0xe8, + 0x39, 0x11, 0x03, 0x0a, 0x9d, 0x4f, 0xe2, + 0x95, 0x9f, 0x47, 0x73, 0x37, 0x83, 0x8c, +- 0x23, 0xfa, 0x16, 0x39, 0xf7, 0x15, 0x11 }, ++ 0x23, 0xfa, 0x16, 0xb2, 0xb0, 0x17, 0x4a }, + .sn_size = 12, + .hfn = 0x1, + .hfn_threshold = 0xfa558, +@@ -817,7 +817,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x0a, 0x98, 0xc1, 0x3c, 0x98, 0x82, + 0xdc, 0xb6, 0xc2, 0x36 }, + .data_in = +- (uint8_t[]){ 0x50, 0x01, 0x86, 0xb8, 0xf8, 0xdb, 0x2d, ++ (uint8_t[]){ 0x80, 0x01, 0x86, 0xb8, 0xf8, 0xdb, 0x2d, + 0x3f, 0x23, 0x82, 0x53, 0xfd, 0x37, 0xde, + 0x88, 0x63, 0x08, 0x4f, 0xd3, 0x71, 0xfb, + 0xeb, 0x35, 0xf3, 0x64, 0xd3, 0x5e, 0xaf, +@@ -829,7 +829,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x7a, 0xf9, 0xdd }, + .in_len = 66, + .data_out = +- (uint8_t[]){ 0x50, 0x01, 0x86, 0x22, 0x2a, 0x8f, 0x86, ++ (uint8_t[]){ 0x80, 0x01, 0x86, 0x22, 0x2a, 0x8f, 0x86, + 0x25, 0x92, 0xcd, 0xa9, 0xa1, 0xa0, 0xf5, + 0x86, 0x0d, 0xe8, 0xe4, 0xef, 0xe0, 0x78, + 0x45, 0x7c, 0x0c, 0x41, 0x5c, 0x8f, 0x07, +@@ -838,7 +838,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x35, 0xff, 0x7a, 0x42, 0xac, 0x94, 0x21, + 0x60, 0x1c, 0x13, 0xcc, 0x7e, 0x6b, 0x2f, + 0x6f, 0x91, 0x89, 0xc6, 0xd4, 0xe6, 0x17, +- 0xae, 0xde, 0xfb, 0x90, 0x62, 0x59, 0xcb }, ++ 0xae, 0xde, 0xfb, 0x19, 0xDa, 0x9a, 0xc2 }, + .sn_size = 12, + .hfn = 0x1, + .hfn_threshold = 0xfa558, +@@ -865,7 +865,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x0a, 0x98, 0xc1, 0x3c, 0x98, 0x82, + 0xdc, 0xb6, 0xc2, 0x36 }, + .data_in = +- (uint8_t[]){ 0x50, 0x01, 0x86, 0xb8, 0xf8, 0xdb, 0x2d, ++ (uint8_t[]){ 0x80, 0x01, 0x86, 0xb8, 0xf8, 0xdb, 0x2d, + 0x3f, 0x23, 0x82, 0x53, 0xfd, 0x37, 0xde, + 0x88, 0x63, 0x08, 0x4f, 0xd3, 0x71, 0xfb, + 0xeb, 0x35, 0xf3, 0x64, 0xd3, 0x5e, 0xaf, +@@ -877,7 +877,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x7a, 0xf9, 0xdd }, + .in_len = 66, + .data_out = +- (uint8_t[]){ 0x50, 0x01, 0x86, 0x69, 0xf2, 0x5d, 0xd7, ++ (uint8_t[]){ 0x80, 0x01, 0x86, 0x69, 0xf2, 0x5d, 0xd7, + 0xf5, 0xc1, 0xf7, 0x1e, 0x47, 0x5d, 0xce, + 0xbe, 0x48, 0xb5, 0x0b, 0x6a, 0x73, 0x9a, + 0x5a, 0xa3, 0x06, 0x47, 0x40, 0x96, 0xcf, +@@ -886,7 +886,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0xc9, 0x7f, 0x11, 0x79, 0x24, 0x44, 0xe8, + 0x39, 0x11, 0x03, 0x0a, 0x9d, 0x4f, 0xe2, + 0x95, 0x9f, 0x47, 0x73, 0x37, 0x83, 0x8c, +- 0x23, 0xfa, 0x16, 0x72, 0x3e, 0x14, 0xa9 }, ++ 0x23, 0xfa, 0x16, 0x6c, 0xcb, 0x92, 0xdf }, + .sn_size = 12, + .hfn = 0x1, + .hfn_threshold = 0xfa558, +@@ -913,7 +913,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x0a, 0x98, 0xc1, 0x3c, 0x98, 0x82, + 0xdc, 0xb6, 0xc2, 0x36 }, + .data_in = +- (uint8_t[]){ 0x50, 0x01, 0x86, 0xb8, 0xf8, 0xdb, 0x2d, ++ (uint8_t[]){ 0x80, 0x01, 0x86, 0xb8, 0xf8, 0xdb, 0x2d, + 0x3f, 0x23, 0x82, 0x53, 0xfd, 0x37, 0xde, + 0x88, 0x63, 0x08, 0x4f, 0xd3, 0x71, 0xfb, + 0xeb, 0x35, 0xf3, 0x64, 0xd3, 0x5e, 0xaf, +@@ -925,7 +925,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x7a, 0xf9, 0xdd }, + .in_len = 66, + .data_out = +- (uint8_t[]){ 0x50, 0x01, 0x86, 0x22, 0x2a, 0x8f, 0x86, ++ (uint8_t[]){ 0x80, 0x01, 0x86, 0x22, 0x2a, 0x8f, 0x86, + 0x25, 0x92, 0xcd, 0xa9, 0xa1, 0xa0, 0xf5, + 0x86, 0x0d, 0xe8, 0xe4, 0xef, 0xe0, 0x78, + 0x45, 0x7c, 0x0c, 0x41, 0x5c, 0x8f, 0x07, +@@ -934,7 +934,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x35, 0xff, 0x7a, 0x42, 0xac, 0x94, 0x21, + 0x60, 0x1c, 0x13, 0xcc, 0x7e, 0x6b, 0x2f, + 0x6f, 0x91, 0x89, 0xc6, 0xd4, 0xe6, 0x17, +- 0xae, 0xde, 0xfb, 0x3f, 0x47, 0xaa, 0x9b }, ++ 0xae, 0xde, 0xfb, 0x5b, 0xc2, 0x9f, 0x29 }, + .sn_size = 12, + .hfn = 0x1, + .hfn_threshold = 0xfa558, +@@ -961,7 +961,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x0a, 0x98, 0xc1, 0x3c, 0x98, 0x82, + 0xdc, 0xb6, 0xc2, 0x36 }, + .data_in = +- (uint8_t[]){ 0x50, 0x01, 0x86, 0xb8, 0xf8, 0xdb, 0x2d, ++ (uint8_t[]){ 0x80, 0x01, 0x86, 0xb8, 0xf8, 0xdb, 0x2d, + 0x3f, 0x23, 0x82, 0x53, 0xfd, 0x37, 0xde, + 0x88, 0x63, 0x08, 0x4f, 0xd3, 0x71, 0xfb, + 0xeb, 0x35, 0xf3, 0x64, 0xd3, 0x5e, 0xaf, +@@ -974,7 +974,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + .in_len = 66, + .data_out = + (uint8_t[]){ +- 0x50, 0x01, 0x86, 0x69, 0xf2, 0x5d, 0xd7, 0xf5, ++ 0x80, 0x01, 0x86, 0x69, 0xf2, 0x5d, 0xd7, 0xf5, + 0xc1, 0xf7, 0x1e, 0x47, 0x5d, 0xce, 0xbe, 0x48, + 0xb5, 0x0b, 0x6a, 0x73, 0x9a, 0x5a, 0xa3, 0x06, + 0x47, 0x40, 0x96, 0xcf, 0x86, 0x98, 0x3d, 0x6f, +@@ -982,7 +982,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0xa6, 0x24, 0xc9, 0x7f, 0x11, 0x79, 0x24, 0x44, + 0xe8, 0x39, 0x11, 0x03, 0x0a, 0x9d, 0x4f, 0xe2, + 0x95, 0x9f, 0x47, 0x73, 0x37, 0x83, 0x8c, 0x23, +- 0xfa, 0x16, 0x52, 0x69, 0x16, 0xfc, ++ 0xfa, 0x16, 0x5d, 0x83, 0x73, 0x34, + }, + .sn_size = 12, + .hfn = 0x1, +@@ -1010,7 +1010,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x0a, 0x98, 0xc1, 0x3c, 0x98, 0x82, + 0xdc, 0xb6, 0xc2, 0x36 }, + .data_in = +- (uint8_t[]){ 0x50, 0x01, 0x86, 0xb8, 0xf8, 0xdb, 0x2d, ++ (uint8_t[]){ 0x80, 0x01, 0x86, 0xb8, 0xf8, 0xdb, 0x2d, + 0x3f, 0x23, 0x82, 0x53, 0xfd, 0x37, 0xde, + 0x88, 0x63, 0x08, 0x4f, 0xd3, 0x71, 0xfb, + 0xeb, 0x35, 0xf3, 0x64, 0xd3, 0x5e, 0xaf, +@@ -1022,7 +1022,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x7a, 0xf9, 0xdd }, + .in_len = 66, + .data_out = +- (uint8_t[]){ 0x50, 0x01, 0x86, 0x22, 0x2a, 0x8f, 0x86, ++ (uint8_t[]){ 0x80, 0x01, 0x86, 0x22, 0x2a, 0x8f, 0x86, + 0x25, 0x92, 0xcd, 0xa9, 0xa1, 0xa0, 0xf5, + 0x86, 0x0d, 0xe8, 0xe4, 0xef, 0xe0, 0x78, + 0x45, 0x7c, 0x0c, 0x41, 0x5c, 0x8f, 0x07, +@@ -1031,7 +1031,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x35, 0xff, 0x7a, 0x42, 0xac, 0x94, 0x21, + 0x60, 0x1c, 0x13, 0xcc, 0x7e, 0x6b, 0x2f, + 0x6f, 0x91, 0x89, 0xc6, 0xd4, 0xe6, 0x17, +- 0xae, 0xde, 0xfb, 0xf5, 0xda, 0x73, 0xa7 }, ++ 0xae, 0xde, 0xfb, 0xff, 0xf9, 0xef, 0xff }, + .sn_size = 12, + .hfn = 0x1, + .hfn_threshold = 0xfa558, +@@ -1154,7 +1154,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x0a, 0x98, 0xc1, 0x3c, 0x98, 0x82, + 0xdc, 0xb6, 0xc2, 0x36 }, + .data_in = +- (uint8_t[]){ 0x50, 0x01, 0x86, 0xb8, 0xf8, 0xdb, 0x2d, ++ (uint8_t[]){ 0x80, 0x01, 0x86, 0xb8, 0xf8, 0xdb, 0x2d, + 0x3f, 0x23, 0x82, 0x53, 0xfd, 0x37, 0xde, + 0x88, 0x63, 0x08, 0x4f, 0xd3, 0x71, 0xfb, + 0xeb, 0x35, 0xf3, 0x64, 0xd3, 0x5e, 0xaf, +@@ -1166,7 +1166,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x7a, 0xf9, 0xdd }, + .in_len = 66, + .data_out = +- (uint8_t[]){ 0x50, 0x01, 0x86, 0xe8, 0x8c, 0x96, 0x38, ++ (uint8_t[]){ 0x80, 0x01, 0x86, 0xe8, 0x8c, 0x96, 0x38, + 0x6e, 0xe4, 0x1f, 0xb9, 0x85, 0x61, 0x2e, + 0x64, 0x31, 0x22, 0x97, 0x5b, 0xf6, 0x40, + 0x08, 0x65, 0xc3, 0xfa, 0x72, 0xd2, 0x22, +@@ -1175,7 +1175,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x23, 0x80, 0xcc, 0x5f, 0xfd, 0x74, 0xb1, + 0xfb, 0xe1, 0xea, 0x6f, 0xf9, 0x9d, 0xf3, + 0x90, 0x02, 0x6e, 0xf1, 0xf8, 0x95, 0x97, +- 0xbd, 0xba, 0x08, 0x39, 0x63, 0x21, 0x82 }, ++ 0xbd, 0xba, 0x08, 0xb2, 0x24, 0x23, 0xd9 }, + .sn_size = 12, + .hfn = 0x1, + .hfn_threshold = 0xfa558, +@@ -1202,7 +1202,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x0a, 0x98, 0xc1, 0x3c, 0x98, 0x82, + 0xdc, 0xb6, 0xc2, 0x36 }, + .data_in = +- (uint8_t[]){ 0x50, 0x01, 0x86, 0xb8, 0xf8, 0xdb, 0x2d, ++ (uint8_t[]){ 0x80, 0x01, 0x86, 0xb8, 0xf8, 0xdb, 0x2d, + 0x3f, 0x23, 0x82, 0x53, 0xfd, 0x37, 0xde, + 0x88, 0x63, 0x08, 0x4f, 0xd3, 0x71, 0xfb, + 0xeb, 0x35, 0xf3, 0x64, 0xd3, 0x5e, 0xaf, +@@ -1214,7 +1214,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x7a, 0xf9, 0xdd }, + .in_len = 66, + .data_out = +- (uint8_t[]){ 0x50, 0x01, 0x86, 0xd2, 0xc0, 0x3a, 0x41, ++ (uint8_t[]){ 0x80, 0x01, 0x86, 0xd2, 0xc0, 0x3a, 0x41, + 0xbc, 0x0b, 0x95, 0xa4, 0x57, 0x3e, 0x36, + 0x10, 0xb2, 0xff, 0x48, 0x1f, 0x10, 0xa8, + 0x06, 0x42, 0xc5, 0xa2, 0x33, 0xed, 0x2e, +@@ -1223,7 +1223,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x35, 0xd2, 0x31, 0x1a, 0xb8, 0x33, 0xc7, + 0x9c, 0xa4, 0xa4, 0x9b, 0x20, 0xc6, 0x47, + 0xdd, 0xe5, 0xa3, 0x4b, 0x9e, 0x66, 0xde, +- 0xc9, 0x0a, 0x64, 0x8e, 0x79, 0xde, 0xaa }, ++ 0xc9, 0x0a, 0x64, 0x07, 0xc1, 0x1d, 0xa3 }, + .sn_size = 12, + .hfn = 0x1, + .hfn_threshold = 0xfa558, +@@ -1250,7 +1250,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x0a, 0x98, 0xc1, 0x3c, 0x98, 0x82, + 0xdc, 0xb6, 0xc2, 0x36 }, + .data_in = +- (uint8_t[]){ 0x50, 0x01, 0x86, 0xb8, 0xf8, 0xdb, 0x2d, ++ (uint8_t[]){ 0x80, 0x01, 0x86, 0xb8, 0xf8, 0xdb, 0x2d, + 0x3f, 0x23, 0x82, 0x53, 0xfd, 0x37, 0xde, + 0x88, 0x63, 0x08, 0x4f, 0xd3, 0x71, 0xfb, + 0xeb, 0x35, 0xf3, 0x64, 0xd3, 0x5e, 0xaf, +@@ -1262,7 +1262,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x7a, 0xf9, 0xdd }, + .in_len = 66, + .data_out = +- (uint8_t[]){ 0x50, 0x01, 0x86, 0xe8, 0x8c, 0x96, 0x38, ++ (uint8_t[]){ 0x80, 0x01, 0x86, 0xe8, 0x8c, 0x96, 0x38, + 0x6e, 0xe4, 0x1f, 0xb9, 0x85, 0x61, 0x2e, + 0x64, 0x31, 0x22, 0x97, 0x5b, 0xf6, 0x40, + 0x08, 0x65, 0xc3, 0xfa, 0x72, 0xd2, 0x22, +@@ -1271,7 +1271,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x23, 0x80, 0xcc, 0x5f, 0xfd, 0x74, 0xb1, + 0xfb, 0xe1, 0xea, 0x6f, 0xf9, 0x9d, 0xf3, + 0x90, 0x02, 0x6e, 0xf1, 0xf8, 0x95, 0x97, +- 0xbd, 0xba, 0x08, 0x72, 0xaa, 0x20, 0x3a }, ++ 0xbd, 0xba, 0x08, 0x6c, 0x5f, 0xa6, 0x4c }, + .sn_size = 12, + .hfn = 0x1, + .hfn_threshold = 0xfa558, +@@ -1298,7 +1298,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x0a, 0x98, 0xc1, 0x3c, 0x98, 0x82, + 0xdc, 0xb6, 0xc2, 0x36 }, + .data_in = +- (uint8_t[]){ 0x50, 0x01, 0x86, 0xb8, 0xf8, 0xdb, 0x2d, ++ (uint8_t[]){ 0x80, 0x01, 0x86, 0xb8, 0xf8, 0xdb, 0x2d, + 0x3f, 0x23, 0x82, 0x53, 0xfd, 0x37, 0xde, + 0x88, 0x63, 0x08, 0x4f, 0xd3, 0x71, 0xfb, + 0xeb, 0x35, 0xf3, 0x64, 0xd3, 0x5e, 0xaf, +@@ -1310,7 +1310,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x7a, 0xf9, 0xdd }, + .in_len = 66, + .data_out = +- (uint8_t[]){ 0x50, 0x01, 0x86, 0xd2, 0xc0, 0x3a, 0x41, ++ (uint8_t[]){ 0x80, 0x01, 0x86, 0xd2, 0xc0, 0x3a, 0x41, + 0xbc, 0x0b, 0x95, 0xa4, 0x57, 0x3e, 0x36, + 0x10, 0xb2, 0xff, 0x48, 0x1f, 0x10, 0xa8, + 0x06, 0x42, 0xc5, 0xa2, 0x33, 0xed, 0x2e, +@@ -1319,7 +1319,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x35, 0xd2, 0x31, 0x1a, 0xb8, 0x33, 0xc7, + 0x9c, 0xa4, 0xa4, 0x9b, 0x20, 0xc6, 0x47, + 0xdd, 0xe5, 0xa3, 0x4b, 0x9e, 0x66, 0xde, +- 0xc9, 0x0a, 0x64, 0x21, 0x5c, 0x2d, 0xfa }, ++ 0xc9, 0x0a, 0x64, 0x45, 0xd9, 0x18, 0x48 }, + .sn_size = 12, + .hfn = 0x1, + .hfn_threshold = 0xfa558, +@@ -1346,7 +1346,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x0a, 0x98, 0xc1, 0x3c, 0x98, 0x82, + 0xdc, 0xb6, 0xc2, 0x36 }, + .data_in = +- (uint8_t[]){ 0x50, 0x01, 0x86, 0xb8, 0xf8, 0xdb, 0x2d, ++ (uint8_t[]){ 0x80, 0x01, 0x86, 0xb8, 0xf8, 0xdb, 0x2d, + 0x3f, 0x23, 0x82, 0x53, 0xfd, 0x37, 0xde, + 0x88, 0x63, 0x08, 0x4f, 0xd3, 0x71, 0xfb, + 0xeb, 0x35, 0xf3, 0x64, 0xd3, 0x5e, 0xaf, +@@ -1358,7 +1358,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x7a, 0xf9, 0xdd }, + .in_len = 66, + .data_out = +- (uint8_t[]){ 0x50, 0x01, 0x86, 0xe8, 0x8c, 0x96, 0x38, ++ (uint8_t[]){ 0x80, 0x01, 0x86, 0xe8, 0x8c, 0x96, 0x38, + 0x6e, 0xe4, 0x1f, 0xb9, 0x85, 0x61, 0x2e, + 0x64, 0x31, 0x22, 0x97, 0x5b, 0xf6, 0x40, + 0x08, 0x65, 0xc3, 0xfa, 0x72, 0xd2, 0x22, +@@ -1367,7 +1367,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x23, 0x80, 0xcc, 0x5f, 0xfd, 0x74, 0xb1, + 0xfb, 0xe1, 0xea, 0x6f, 0xf9, 0x9d, 0xf3, + 0x90, 0x02, 0x6e, 0xf1, 0xf8, 0x95, 0x97, +- 0xbd, 0xba, 0x08, 0x52, 0xfd, 0x22, 0x6f }, ++ 0xbd, 0xba, 0x08, 0x5d, 0x17, 0x47, 0xa7 }, + .sn_size = 12, + .hfn = 0x1, + .hfn_threshold = 0xfa558, +@@ -1394,7 +1394,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x0a, 0x98, 0xc1, 0x3c, 0x98, 0x82, + 0xdc, 0xb6, 0xc2, 0x36 }, + .data_in = +- (uint8_t[]){ 0x50, 0x01, 0x86, 0xb8, 0xf8, 0xdb, 0x2d, ++ (uint8_t[]){ 0x80, 0x01, 0x86, 0xb8, 0xf8, 0xdb, 0x2d, + 0x3f, 0x23, 0x82, 0x53, 0xfd, 0x37, 0xde, + 0x88, 0x63, 0x08, 0x4f, 0xd3, 0x71, 0xfb, + 0xeb, 0x35, 0xf3, 0x64, 0xd3, 0x5e, 0xaf, +@@ -1406,7 +1406,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x7a, 0xf9, 0xdd }, + .in_len = 66, + .data_out = +- (uint8_t[]){ 0x50, 0x01, 0x86, 0xd2, 0xc0, 0x3a, 0x41, ++ (uint8_t[]){ 0x80, 0x01, 0x86, 0xd2, 0xc0, 0x3a, 0x41, + 0xbc, 0x0b, 0x95, 0xa4, 0x57, 0x3e, 0x36, + 0x10, 0xb2, 0xff, 0x48, 0x1f, 0x10, 0xa8, + 0x06, 0x42, 0xc5, 0xa2, 0x33, 0xed, 0x2e, +@@ -1415,7 +1415,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x35, 0xd2, 0x31, 0x1a, 0xb8, 0x33, 0xc7, + 0x9c, 0xa4, 0xa4, 0x9b, 0x20, 0xc6, 0x47, + 0xdd, 0xe5, 0xa3, 0x4b, 0x9e, 0x66, 0xde, +- 0xc9, 0x0a, 0x64, 0xeb, 0xc1, 0xf4, 0xc6 }, ++ 0xc9, 0x0a, 0x64, 0xe1, 0xe2, 0x68, 0x9e }, + .sn_size = 12, + .hfn = 0x1, + .hfn_threshold = 0xfa558, +@@ -1538,7 +1538,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x0a, 0x98, 0xc1, 0x3c, 0x98, 0x82, + 0xdc, 0xb6, 0xc2, 0x36 }, + .data_in = +- (uint8_t[]){ 0x50, 0x01, 0x86, 0xb8, 0xf8, 0xdb, 0x2d, ++ (uint8_t[]){ 0x80, 0x01, 0x86, 0xb8, 0xf8, 0xdb, 0x2d, + 0x3f, 0x23, 0x82, 0x53, 0xfd, 0x37, 0xde, + 0x88, 0x63, 0x08, 0x4f, 0xd3, 0x71, 0xfb, + 0xeb, 0x35, 0xf3, 0x64, 0xd3, 0x5e, 0xaf, +@@ -1550,7 +1550,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x7a, 0xf9, 0xdd }, + .in_len = 66, + .data_out = +- (uint8_t[]){ 0x50, 0x01, 0x86, 0x79, 0xdb, 0x02, 0x27, ++ (uint8_t[]){ 0x80, 0x01, 0x86, 0x79, 0xdb, 0x02, 0x27, + 0xa0, 0x85, 0x82, 0x87, 0x6b, 0x35, 0x2b, + 0xe1, 0x96, 0x5b, 0xfd, 0x67, 0xe1, 0x67, + 0x69, 0x17, 0x5b, 0x0a, 0x8f, 0xb8, 0x37, +@@ -1559,7 +1559,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0xda, 0x94, 0x61, 0x8e, 0x2d, 0x7a, 0x5d, + 0x64, 0x0f, 0x4a, 0xc7, 0x7c, 0x9d, 0x56, + 0x5a, 0x2a, 0xcb, 0xff, 0x79, 0x07, 0xa5, +- 0xaf, 0x96, 0x5c, 0xb6, 0x6c, 0xeb, 0x14 }, ++ 0xaf, 0x96, 0x5c, 0x3d, 0x2b, 0xe9, 0x4f }, + .sn_size = 12, + .hfn = 0x1, + .hfn_threshold = 0xfa558, +@@ -1586,7 +1586,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x0a, 0x98, 0xc1, 0x3c, 0x98, 0x82, + 0xdc, 0xb6, 0xc2, 0x36 }, + .data_in = +- (uint8_t[]){ 0x50, 0x01, 0x86, 0xb8, 0xf8, 0xdb, 0x2d, ++ (uint8_t[]){ 0x80, 0x01, 0x86, 0xb8, 0xf8, 0xdb, 0x2d, + 0x3f, 0x23, 0x82, 0x53, 0xfd, 0x37, 0xde, + 0x88, 0x63, 0x08, 0x4f, 0xd3, 0x71, 0xfb, + 0xeb, 0x35, 0xf3, 0x64, 0xd3, 0x5e, 0xaf, +@@ -1598,7 +1598,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x7a, 0xf9, 0xdd }, + .in_len = 66, + .data_out = +- (uint8_t[]){ 0x50, 0x01, 0x86, 0x2c, 0x0f, 0xc1, 0x08, ++ (uint8_t[]){ 0x80, 0x01, 0x86, 0x2c, 0x0f, 0xc1, 0x08, + 0x2e, 0xa7, 0x97, 0xd2, 0x6c, 0x17, 0x8f, + 0x22, 0x1f, 0x99, 0x1b, 0x48, 0x04, 0x29, + 0x54, 0x3f, 0x07, 0x01, 0xda, 0x19, 0x59, +@@ -1607,7 +1607,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x04, 0xf4, 0xfe, 0xe6, 0xf7, 0x53, 0x51, + 0x70, 0x10, 0x56, 0x83, 0x59, 0xb5, 0x64, + 0x0b, 0xa6, 0x2e, 0xa5, 0x03, 0x62, 0x13, +- 0x91, 0xaf, 0x24, 0xb2, 0x82, 0xfb, 0x27 }, ++ 0x91, 0xaf, 0x24, 0x3b, 0x3a, 0x38, 0x2e }, + .sn_size = 12, + .hfn = 0x1, + .hfn_threshold = 0xfa558, +@@ -1634,7 +1634,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x0a, 0x98, 0xc1, 0x3c, 0x98, 0x82, + 0xdc, 0xb6, 0xc2, 0x36 }, + .data_in = +- (uint8_t[]){ 0x50, 0x01, 0x86, 0xb8, 0xf8, 0xdb, 0x2d, ++ (uint8_t[]){ 0x80, 0x01, 0x86, 0xb8, 0xf8, 0xdb, 0x2d, + 0x3f, 0x23, 0x82, 0x53, 0xfd, 0x37, 0xde, + 0x88, 0x63, 0x08, 0x4f, 0xd3, 0x71, 0xfb, + 0xeb, 0x35, 0xf3, 0x64, 0xd3, 0x5e, 0xaf, +@@ -1646,7 +1646,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x7a, 0xf9, 0xdd }, + .in_len = 66, + .data_out = +- (uint8_t[]){ 0x50, 0x01, 0x86, 0x79, 0xdb, 0x02, 0x27, ++ (uint8_t[]){ 0x80, 0x01, 0x86, 0x79, 0xdb, 0x02, 0x27, + 0xa0, 0x85, 0x82, 0x87, 0x6b, 0x35, 0x2b, + 0xe1, 0x96, 0x5b, 0xfd, 0x67, 0xe1, 0x67, + 0x69, 0x17, 0x5b, 0x0a, 0x8f, 0xb8, 0x37, +@@ -1655,7 +1655,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0xda, 0x94, 0x61, 0x8e, 0x2d, 0x7a, 0x5d, + 0x64, 0x0f, 0x4a, 0xc7, 0x7c, 0x9d, 0x56, + 0x5a, 0x2a, 0xcb, 0xff, 0x79, 0x07, 0xa5, +- 0xaf, 0x96, 0x5c, 0xfd, 0xa5, 0xea, 0xac }, ++ 0xaf, 0x96, 0x5c, 0xe3, 0x50, 0x6c, 0xda }, + .sn_size = 12, + .hfn = 0x1, + .hfn_threshold = 0xfa558, +@@ -1682,7 +1682,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x0a, 0x98, 0xc1, 0x3c, 0x98, 0x82, + 0xdc, 0xb6, 0xc2, 0x36 }, + .data_in = +- (uint8_t[]){ 0x50, 0x01, 0x86, 0xb8, 0xf8, 0xdb, 0x2d, ++ (uint8_t[]){ 0x80, 0x01, 0x86, 0xb8, 0xf8, 0xdb, 0x2d, + 0x3f, 0x23, 0x82, 0x53, 0xfd, 0x37, 0xde, + 0x88, 0x63, 0x08, 0x4f, 0xd3, 0x71, 0xfb, + 0xeb, 0x35, 0xf3, 0x64, 0xd3, 0x5e, 0xaf, +@@ -1694,7 +1694,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x7a, 0xf9, 0xdd }, + .in_len = 66, + .data_out = +- (uint8_t[]){ 0x50, 0x01, 0x86, 0x2c, 0x0f, 0xc1, 0x08, ++ (uint8_t[]){ 0x80, 0x01, 0x86, 0x2c, 0x0f, 0xc1, 0x08, + 0x2e, 0xa7, 0x97, 0xd2, 0x6c, 0x17, 0x8f, + 0x22, 0x1f, 0x99, 0x1b, 0x48, 0x04, 0x29, + 0x54, 0x3f, 0x07, 0x01, 0xda, 0x19, 0x59, +@@ -1703,7 +1703,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x04, 0xf4, 0xfe, 0xe6, 0xf7, 0x53, 0x51, + 0x70, 0x10, 0x56, 0x83, 0x59, 0xb5, 0x64, + 0x0b, 0xa6, 0x2e, 0xa5, 0x03, 0x62, 0x13, +- 0x91, 0xaf, 0x24, 0x1d, 0xa7, 0x08, 0x77 }, ++ 0x91, 0xaf, 0x24, 0x79, 0x22, 0x3d, 0xc5 }, + .sn_size = 12, + .hfn = 0x1, + .hfn_threshold = 0xfa558, +@@ -1730,7 +1730,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x0a, 0x98, 0xc1, 0x3c, 0x98, 0x82, + 0xdc, 0xb6, 0xc2, 0x36 }, + .data_in = +- (uint8_t[]){ 0x50, 0x01, 0x86, 0xb8, 0xf8, 0xdb, 0x2d, ++ (uint8_t[]){ 0x80, 0x01, 0x86, 0xb8, 0xf8, 0xdb, 0x2d, + 0x3f, 0x23, 0x82, 0x53, 0xfd, 0x37, 0xde, + 0x88, 0x63, 0x08, 0x4f, 0xd3, 0x71, 0xfb, + 0xeb, 0x35, 0xf3, 0x64, 0xd3, 0x5e, 0xaf, +@@ -1742,7 +1742,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x7a, 0xf9, 0xdd }, + .in_len = 66, + .data_out = +- (uint8_t[]){ 0x50, 0x01, 0x86, 0x79, 0xdb, 0x02, 0x27, ++ (uint8_t[]){ 0x80, 0x01, 0x86, 0x79, 0xdb, 0x02, 0x27, + 0xa0, 0x85, 0x82, 0x87, 0x6b, 0x35, 0x2b, + 0xe1, 0x96, 0x5b, 0xfd, 0x67, 0xe1, 0x67, + 0x69, 0x17, 0x5b, 0x0a, 0x8f, 0xb8, 0x37, +@@ -1751,7 +1751,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0xda, 0x94, 0x61, 0x8e, 0x2d, 0x7a, 0x5d, + 0x64, 0x0f, 0x4a, 0xc7, 0x7c, 0x9d, 0x56, + 0x5a, 0x2a, 0xcb, 0xff, 0x79, 0x07, 0xa5, +- 0xaf, 0x96, 0x5c, 0xdd, 0xf2, 0xe8, 0xf9 }, ++ 0xaf, 0x96, 0x5c, 0xd2, 0x18, 0x8d, 0x31 }, + .sn_size = 12, + .hfn = 0x1, + .hfn_threshold = 0xfa558, +@@ -1778,7 +1778,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x0a, 0x98, 0xc1, 0x3c, 0x98, 0x82, + 0xdc, 0xb6, 0xc2, 0x36 }, + .data_in = +- (uint8_t[]){ 0x50, 0x01, 0x86, 0xb8, 0xf8, 0xdb, 0x2d, ++ (uint8_t[]){ 0x80, 0x01, 0x86, 0xb8, 0xf8, 0xdb, 0x2d, + 0x3f, 0x23, 0x82, 0x53, 0xfd, 0x37, 0xde, + 0x88, 0x63, 0x08, 0x4f, 0xd3, 0x71, 0xfb, + 0xeb, 0x35, 0xf3, 0x64, 0xd3, 0x5e, 0xaf, +@@ -1790,7 +1790,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x7a, 0xf9, 0xdd }, + .in_len = 66, + .data_out = +- (uint8_t[]){ 0x50, 0x01, 0x86, 0x2c, 0x0f, 0xc1, 0x08, ++ (uint8_t[]){ 0x80, 0x01, 0x86, 0x2c, 0x0f, 0xc1, 0x08, + 0x2e, 0xa7, 0x97, 0xd2, 0x6c, 0x17, 0x8f, + 0x22, 0x1f, 0x99, 0x1b, 0x48, 0x04, 0x29, + 0x54, 0x3f, 0x07, 0x01, 0xda, 0x19, 0x59, +@@ -1799,7 +1799,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x04, 0xf4, 0xfe, 0xe6, 0xf7, 0x53, 0x51, + 0x70, 0x10, 0x56, 0x83, 0x59, 0xb5, 0x64, + 0x0b, 0xa6, 0x2e, 0xa5, 0x03, 0x62, 0x13, +- 0x91, 0xaf, 0x24, 0xd7, 0x3a, 0xd1, 0x4b }, ++ 0x91, 0xaf, 0x24, 0xdd, 0x19, 0x4d, 0x13 }, + .sn_size = 12, + .hfn = 0x1, + .hfn_threshold = 0xfa558, +@@ -2556,7 +2556,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x4b, 0x32, 0x87, 0xf9, 0xdb, 0xe0, + 0x31, 0x5f, 0x3a, 0x15 }, + .data_in = +- (uint8_t[]){ 0xf8, 0x00, 0x00, 0xf8, 0xdb, 0x2d, 0x3f, ++ (uint8_t[]){ 0x80, 0x00, 0x00, 0xf8, 0xdb, 0x2d, 0x3f, + 0x23, 0x82, 0x53, 0xfd, 0x37, 0xde, 0x88, + 0x63, 0x08, 0x4f, 0xD3, 0x71, 0xfb, 0xeb, + 0x35, 0xf3, 0x64, 0xd3, 0x5e, 0xaf, 0x3f, +@@ -2568,7 +2568,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0xf9, 0xdd, 0xcc, 0x69 }, + .in_len = 67, + .data_out = +- (uint8_t[]){ 0xf8, 0x00, 0x00, 0xf8, 0x78, 0xee, 0x01, ++ (uint8_t[]){ 0x80, 0x00, 0x00, 0xf8, 0x78, 0xee, 0x01, + 0x4c, 0x79, 0xc7, 0x14, 0x79, 0x2c, 0x1b, + 0x8a, 0xd0, 0x17, 0xb4, 0xb4, 0xfe, 0xa7, + 0x17, 0x9a, 0x93, 0x5d, 0xad, 0x27, 0x8a, +@@ -2577,8 +2577,8 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x37, 0xfb, 0x98, 0x1b, 0x06, 0x1a, 0x76, + 0xf8, 0x07, 0x72, 0xe9, 0xa2, 0x89, 0x47, + 0x95, 0x8f, 0x8f, 0x4e, 0x45, 0x55, 0xd6, +- 0x0e, 0xf4, 0xe7, 0xe8, 0x78, 0xdd, 0xc1, +- 0x92 }, ++ 0x0e, 0xf4, 0xe7, 0xe8, 0xc0, 0x48, 0x6a, ++ 0x7c }, + .sn_size = 18, + .hfn = 0x1, + .hfn_threshold = 0xfa558, +@@ -2605,7 +2605,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x4b, 0x32, 0x87, 0xf9, 0xdb, 0xe0, + 0x31, 0x5f, 0x3a, 0x15 }, + .data_in = +- (uint8_t[]){ 0xf8, 0x00, 0x00, 0xf8, 0xdb, 0x2d, 0x3f, ++ (uint8_t[]){ 0x80, 0x00, 0x00, 0xf8, 0xdb, 0x2d, 0x3f, + 0x23, 0x82, 0x53, 0xfd, 0x37, 0xde, 0x88, + 0x63, 0x08, 0x4f, 0xD3, 0x71, 0xfb, 0xeb, + 0x35, 0xf3, 0x64, 0xd3, 0x5e, 0xaf, 0x3f, +@@ -2617,7 +2617,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0xf9, 0xdd, 0xcc, 0x69 }, + .in_len = 67, + .data_out = +- (uint8_t[]){ 0xf8, 0x00, 0x00, 0xf8, 0xbf, 0x21, 0xc9, ++ (uint8_t[]){ 0x80, 0x00, 0x00, 0xf8, 0xbf, 0x21, 0xc9, + 0x7d, 0x02, 0x8d, 0xf7, 0xf7, 0x80, 0x50, + 0x60, 0x32, 0x09, 0xb8, 0x69, 0x48, 0xab, + 0x58, 0xf0, 0xd9, 0x63, 0x63, 0x36, 0x01, +@@ -2626,8 +2626,8 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x4c, 0x00, 0xd0, 0x0e, 0x31, 0x1c, 0x87, + 0xee, 0x1c, 0xc3, 0x81, 0xb4, 0xb6, 0x09, + 0xb5, 0x3c, 0x85, 0xb9, 0xcc, 0x2d, 0x2e, +- 0x9c, 0x85, 0x0b, 0xf7, 0xb1, 0x80, 0x30, +- 0xa5 }, ++ 0x9c, 0x85, 0x0b, 0xf7, 0x17, 0x28, 0x0f, ++ 0x7d }, + .sn_size = 18, + .hfn = 0x1, + .hfn_threshold = 0xfa558, +@@ -2654,7 +2654,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x4b, 0x32, 0x87, 0xf9, 0xdb, 0xe0, + 0x31, 0x5f, 0x3a, 0x15 }, + .data_in = +- (uint8_t[]){ 0xf8, 0x00, 0x00, 0xf8, 0xdb, 0x2d, 0x3f, ++ (uint8_t[]){ 0x80, 0x00, 0x00, 0xf8, 0xdb, 0x2d, 0x3f, + 0x23, 0x82, 0x53, 0xfd, 0x37, 0xde, 0x88, + 0x63, 0x08, 0x4f, 0xD3, 0x71, 0xfb, 0xeb, + 0x35, 0xf3, 0x64, 0xd3, 0x5e, 0xaf, 0x3f, +@@ -2666,7 +2666,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0xf9, 0xdd, 0xcc, 0x69 }, + .in_len = 67, + .data_out = +- (uint8_t[]){ 0xf8, 0x00, 0x00, 0xf8, 0x78, 0xee, 0x01, ++ (uint8_t[]){ 0x80, 0x00, 0x00, 0xf8, 0x78, 0xee, 0x01, + 0x4c, 0x79, 0xc7, 0x14, 0x79, 0x2c, 0x1b, + 0x8a, 0xd0, 0x17, 0xb4, 0xb4, 0xfe, 0xa7, + 0x17, 0x9a, 0x93, 0x5d, 0xad, 0x27, 0x8a, +@@ -2675,8 +2675,8 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x37, 0xfb, 0x98, 0x1b, 0x06, 0x1a, 0x76, + 0xf8, 0x07, 0x72, 0xe9, 0xa2, 0x89, 0x47, + 0x95, 0x8f, 0x8f, 0x4e, 0x45, 0x55, 0xd6, +- 0x0e, 0xf4, 0xe7, 0xe8, 0x08, 0xa6, 0xdb, +- 0x19 }, ++ 0x0e, 0xf4, 0xe7, 0xe8, 0x8e, 0x76, 0x4a, ++ 0x4e }, + .sn_size = 18, + .hfn = 0x1, + .hfn_threshold = 0xfa558, +@@ -2703,7 +2703,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x4b, 0x32, 0x87, 0xf9, 0xdb, 0xe0, + 0x31, 0x5f, 0x3a, 0x15 }, + .data_in = +- (uint8_t[]){ 0xf8, 0x00, 0x00, 0xf8, 0xdb, 0x2d, 0x3f, ++ (uint8_t[]){ 0x80, 0x00, 0x00, 0xf8, 0xdb, 0x2d, 0x3f, + 0x23, 0x82, 0x53, 0xfd, 0x37, 0xde, 0x88, + 0x63, 0x08, 0x4f, 0xD3, 0x71, 0xfb, 0xeb, + 0x35, 0xf3, 0x64, 0xd3, 0x5e, 0xaf, 0x3f, +@@ -2715,7 +2715,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0xf9, 0xdd, 0xcc, 0x69 }, + .in_len = 67, + .data_out = +- (uint8_t[]){ 0xf8, 0x00, 0x00, 0xf8, 0xbf, 0x21, 0xc9, ++ (uint8_t[]){ 0x80, 0x00, 0x00, 0xf8, 0xbf, 0x21, 0xc9, + 0x7d, 0x02, 0x8d, 0xf7, 0xf7, 0x80, 0x50, + 0x60, 0x32, 0x09, 0xb8, 0x69, 0x48, 0xab, + 0x58, 0xf0, 0xd9, 0x63, 0x63, 0x36, 0x01, +@@ -2724,8 +2724,8 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x4c, 0x00, 0xd0, 0x0e, 0x31, 0x1c, 0x87, + 0xee, 0x1c, 0xc3, 0x81, 0xb4, 0xb6, 0x09, + 0xb5, 0x3c, 0x85, 0xb9, 0xcc, 0x2d, 0x2e, +- 0x9c, 0x85, 0x0b, 0xf7, 0x97, 0x5a, 0x56, +- 0xab }, ++ 0x9c, 0x85, 0x0b, 0xf7, 0xc1, 0x27, 0x82, ++ 0xc3 }, + .sn_size = 18, + .hfn = 0x1, + .hfn_threshold = 0xfa558, +@@ -2752,7 +2752,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x4b, 0x32, 0x87, 0xf9, 0xdb, 0xe0, + 0x31, 0x5f, 0x3a, 0x15 }, + .data_in = +- (uint8_t[]){ 0xf8, 0x00, 0x00, 0xf8, 0xdb, 0x2d, 0x3f, ++ (uint8_t[]){ 0x80, 0x00, 0x00, 0xf8, 0xdb, 0x2d, 0x3f, + 0x23, 0x82, 0x53, 0xfd, 0x37, 0xde, 0x88, + 0x63, 0x08, 0x4f, 0xD3, 0x71, 0xfb, 0xeb, + 0x35, 0xf3, 0x64, 0xd3, 0x5e, 0xaf, 0x3f, +@@ -2764,7 +2764,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0xf9, 0xdd, 0xcc, 0x69 }, + .in_len = 67, + .data_out = +- (uint8_t[]){ 0xf8, 0x00, 0x00, 0xf8, 0x78, 0xee, 0x01, ++ (uint8_t[]){ 0x80, 0x00, 0x00, 0xf8, 0x78, 0xee, 0x01, + 0x4c, 0x79, 0xc7, 0x14, 0x79, 0x2c, 0x1b, + 0x8a, 0xd0, 0x17, 0xb4, 0xb4, 0xfe, 0xa7, + 0x17, 0x9a, 0x93, 0x5d, 0xad, 0x27, 0x8a, +@@ -2773,8 +2773,8 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x37, 0xfb, 0x98, 0x1b, 0x06, 0x1a, 0x76, + 0xf8, 0x07, 0x72, 0xe9, 0xa2, 0x89, 0x47, + 0x95, 0x8f, 0x8f, 0x4e, 0x45, 0x55, 0xd6, +- 0x0e, 0xf4, 0xe7, 0xe8, 0x08, 0x68, 0xff, +- 0x7c }, ++ 0x0e, 0xf4, 0xe7, 0xe8, 0x97, 0x76, 0xce, ++ 0xac }, + .sn_size = 18, + .hfn = 0x1, + .hfn_threshold = 0xfa558, +@@ -2801,7 +2801,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x4b, 0x32, 0x87, 0xf9, 0xdb, 0xe0, + 0x31, 0x5f, 0x3a, 0x15 }, + .data_in = +- (uint8_t[]){ 0xf8, 0x00, 0x00, 0xf8, 0xdb, 0x2d, 0x3f, ++ (uint8_t[]){ 0x80, 0x00, 0x00, 0xf8, 0xdb, 0x2d, 0x3f, + 0x23, 0x82, 0x53, 0xfd, 0x37, 0xde, 0x88, + 0x63, 0x08, 0x4f, 0xD3, 0x71, 0xfb, 0xeb, + 0x35, 0xf3, 0x64, 0xd3, 0x5e, 0xaf, 0x3f, +@@ -2813,7 +2813,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0xf9, 0xdd, 0xcc, 0x69 }, + .in_len = 67, + .data_out = +- (uint8_t[]){ 0xf8, 0x00, 0x00, 0xf8, 0xbf, 0x21, 0xc9, ++ (uint8_t[]){ 0x80, 0x00, 0x00, 0xf8, 0xbf, 0x21, 0xc9, + 0x7d, 0x02, 0x8d, 0xf7, 0xf7, 0x80, 0x50, + 0x60, 0x32, 0x09, 0xb8, 0x69, 0x48, 0xab, + 0x58, 0xf0, 0xd9, 0x63, 0x63, 0x36, 0x01, +@@ -2822,8 +2822,8 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x4c, 0x00, 0xd0, 0x0e, 0x31, 0x1c, 0x87, + 0xee, 0x1c, 0xc3, 0x81, 0xb4, 0xb6, 0x09, + 0xb5, 0x3c, 0x85, 0xb9, 0xcc, 0x2d, 0x2e, +- 0x9c, 0x85, 0x0b, 0xf7, 0x41, 0xdd, 0x19, +- 0x32 }, ++ 0x9c, 0x85, 0x0b, 0xf7, 0x69, 0x56, 0x6f, ++ 0xaf }, + .sn_size = 18, + .hfn = 0x1, + .hfn_threshold = 0xfa558, +@@ -2948,7 +2948,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x4b, 0x32, 0x87, 0xf9, 0xdb, 0xe0, + 0x31, 0x5f, 0x3a, 0x15 }, + .data_in = +- (uint8_t[]){ 0xf8, 0x00, 0x00, 0xf8, 0xdb, 0x2d, 0x3f, ++ (uint8_t[]){ 0x80, 0x00, 0x00, 0xf8, 0xdb, 0x2d, 0x3f, + 0x23, 0x82, 0x53, 0xfd, 0x37, 0xde, 0x88, + 0x63, 0x08, 0x4f, 0xD3, 0x71, 0xfb, 0xeb, + 0x35, 0xf3, 0x64, 0xd3, 0x5e, 0xaf, 0x3f, +@@ -2960,7 +2960,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0xf9, 0xdd, 0xcc, 0x69 }, + .in_len = 67, + .data_out = +- (uint8_t[]){ 0xf8, 0x00, 0x00, 0xf8, 0x0c, 0xa8, 0x9d, ++ (uint8_t[]){ 0x80, 0x00, 0x00, 0xf8, 0x0c, 0xa8, 0x9d, + 0x9e, 0xcc, 0xf0, 0x1a, 0xc0, 0xf2, 0x9f, + 0x8c, 0xc9, 0x57, 0xc7, 0x99, 0x4d, 0xde, + 0xc5, 0x19, 0x69, 0x58, 0x5b, 0x1a, 0x51, +@@ -2969,8 +2969,8 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0xb3, 0x24, 0x88, 0x68, 0x5f, 0x78, 0x3a, + 0xd2, 0x6c, 0xcc, 0xa0, 0xb5, 0xf0, 0x95, + 0x3e, 0xf1, 0xf4, 0x3e, 0x43, 0x8f, 0x6c, +- 0xae, 0x22, 0x59, 0x11, 0xf6, 0x97, 0x0b, +- 0x7b }, ++ 0xae, 0x22, 0x59, 0x11, 0x4e, 0x02, 0xa0, ++ 0x95 }, + .sn_size = 18, + .hfn = 0x1, + .hfn_threshold = 0xfa558, +@@ -2997,7 +2997,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x4b, 0x32, 0x87, 0xf9, 0xdb, 0xe0, + 0x31, 0x5f, 0x3a, 0x15 }, + .data_in = +- (uint8_t[]){ 0xf8, 0x00, 0x00, 0xf8, 0xdb, 0x2d, 0x3f, ++ (uint8_t[]){ 0x80, 0x00, 0x00, 0xf8, 0xdb, 0x2d, 0x3f, + 0x23, 0x82, 0x53, 0xfd, 0x37, 0xde, 0x88, + 0x63, 0x08, 0x4f, 0xD3, 0x71, 0xfb, 0xeb, + 0x35, 0xf3, 0x64, 0xd3, 0x5e, 0xaf, 0x3f, +@@ -3009,7 +3009,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0xf9, 0xdd, 0xcc, 0x69 }, + .in_len = 67, + .data_out = +- (uint8_t[]){ 0xf8, 0x00, 0x00, 0xf8, 0x5a, 0xa9, 0xb5, ++ (uint8_t[]){ 0x80, 0x00, 0x00, 0xf8, 0x5a, 0xa9, 0xb5, + 0x61, 0x8b, 0x8a, 0xb7, 0x6a, 0x98, 0x30, + 0x6c, 0xed, 0x84, 0x69, 0xff, 0x6b, 0x7e, + 0x30, 0x59, 0x55, 0x80, 0x32, 0xd0, 0x0f, +@@ -3018,8 +3018,8 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x0e, 0x2e, 0x33, 0xe6, 0xa4, 0xea, 0x17, + 0xd6, 0x56, 0xa2, 0x3b, 0x5f, 0x56, 0x3a, + 0xa7, 0x6f, 0x4c, 0xc1, 0xca, 0xbd, 0x5f, +- 0xad, 0x3d, 0x99, 0x4a, 0xa3, 0xab, 0xd5, +- 0x7c }, ++ 0xad, 0x3d, 0x99, 0x4a, 0x05, 0x03, 0xea, ++ 0xa4 }, + .sn_size = 18, + .hfn = 0x1, + .hfn_threshold = 0xfa558, +@@ -3046,7 +3046,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x4b, 0x32, 0x87, 0xf9, 0xdb, 0xe0, + 0x31, 0x5f, 0x3a, 0x15 }, + .data_in = +- (uint8_t[]){ 0xf8, 0x00, 0x00, 0xf8, 0xdb, 0x2d, 0x3f, ++ (uint8_t[]){ 0x80, 0x00, 0x00, 0xf8, 0xdb, 0x2d, 0x3f, + 0x23, 0x82, 0x53, 0xfd, 0x37, 0xde, 0x88, + 0x63, 0x08, 0x4f, 0xD3, 0x71, 0xfb, 0xeb, + 0x35, 0xf3, 0x64, 0xd3, 0x5e, 0xaf, 0x3f, +@@ -3058,7 +3058,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0xf9, 0xdd, 0xcc, 0x69 }, + .in_len = 67, + .data_out = +- (uint8_t[]){ 0xf8, 0x00, 0x00, 0xf8, 0x0c, 0xa8, 0x9d, ++ (uint8_t[]){ 0x80, 0x00, 0x00, 0xf8, 0x0c, 0xa8, 0x9d, + 0x9e, 0xcc, 0xf0, 0x1a, 0xc0, 0xf2, 0x9f, + 0x8c, 0xc9, 0x57, 0xc7, 0x99, 0x4d, 0xde, + 0xc5, 0x19, 0x69, 0x58, 0x5b, 0x1a, 0x51, +@@ -3067,8 +3067,8 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0xb3, 0x24, 0x88, 0x68, 0x5f, 0x78, 0x3a, + 0xd2, 0x6c, 0xcc, 0xa0, 0xb5, 0xf0, 0x95, + 0x3e, 0xf1, 0xf4, 0x3e, 0x43, 0x8f, 0x6c, +- 0xae, 0x22, 0x59, 0x11, 0x86, 0xec, 0x11, +- 0xf0 }, ++ 0xae, 0x22, 0x59, 0x11, 0x00, 0x3c, 0x80, ++ 0xa7 }, + .sn_size = 18, + .hfn = 0x1, + .hfn_threshold = 0xfa558, +@@ -3095,7 +3095,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x4b, 0x32, 0x87, 0xf9, 0xdb, 0xe0, + 0x31, 0x5f, 0x3a, 0x15 }, + .data_in = +- (uint8_t[]){ 0xf8, 0x00, 0x00, 0xf8, 0xdb, 0x2d, 0x3f, ++ (uint8_t[]){ 0x80, 0x00, 0x00, 0xf8, 0xdb, 0x2d, 0x3f, + 0x23, 0x82, 0x53, 0xfd, 0x37, 0xde, 0x88, + 0x63, 0x08, 0x4f, 0xD3, 0x71, 0xfb, 0xeb, + 0x35, 0xf3, 0x64, 0xd3, 0x5e, 0xaf, 0x3f, +@@ -3107,7 +3107,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0xf9, 0xdd, 0xcc, 0x69 }, + .in_len = 67, + .data_out = +- (uint8_t[]){ 0xf8, 0x00, 0x00, 0xf8, 0x5a, 0xa9, 0xb5, ++ (uint8_t[]){ 0x80, 0x00, 0x00, 0xf8, 0x5a, 0xa9, 0xb5, + 0x61, 0x8b, 0x8a, 0xb7, 0x6a, 0x98, 0x30, + 0x6c, 0xed, 0x84, 0x69, 0xff, 0x6b, 0x7e, + 0x30, 0x59, 0x55, 0x80, 0x32, 0xd0, 0x0f, +@@ -3116,8 +3116,8 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x0e, 0x2e, 0x33, 0xe6, 0xa4, 0xea, 0x17, + 0xd6, 0x56, 0xa2, 0x3b, 0x5f, 0x56, 0x3a, + 0xa7, 0x6f, 0x4c, 0xc1, 0xca, 0xbd, 0x5f, +- 0xad, 0x3d, 0x99, 0x4a, 0x85, 0x71, 0xb3, +- 0x72 }, ++ 0xad, 0x3d, 0x99, 0x4a, 0xd3, 0x0c, 0x67, ++ 0x1a }, + .sn_size = 18, + .hfn = 0x1, + .hfn_threshold = 0xfa558, +@@ -3144,7 +3144,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x4b, 0x32, 0x87, 0xf9, 0xdb, 0xe0, + 0x31, 0x5f, 0x3a, 0x15 }, + .data_in = +- (uint8_t[]){ 0xf8, 0x00, 0x00, 0xf8, 0xdb, 0x2d, 0x3f, ++ (uint8_t[]){ 0x80, 0x00, 0x00, 0xf8, 0xdb, 0x2d, 0x3f, + 0x23, 0x82, 0x53, 0xfd, 0x37, 0xde, 0x88, + 0x63, 0x08, 0x4f, 0xD3, 0x71, 0xfb, 0xeb, + 0x35, 0xf3, 0x64, 0xd3, 0x5e, 0xaf, 0x3f, +@@ -3156,7 +3156,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0xf9, 0xdd, 0xcc, 0x69 }, + .in_len = 67, + .data_out = +- (uint8_t[]){ 0xf8, 0x00, 0x00, 0xf8, 0x0c, 0xa8, 0x9d, ++ (uint8_t[]){ 0x80, 0x00, 0x00, 0xf8, 0x0c, 0xa8, 0x9d, + 0x9e, 0xcc, 0xf0, 0x1a, 0xc0, 0xf2, 0x9f, + 0x8c, 0xc9, 0x57, 0xc7, 0x99, 0x4d, 0xde, + 0xc5, 0x19, 0x69, 0x58, 0x5b, 0x1a, 0x51, +@@ -3165,8 +3165,8 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0xb3, 0x24, 0x88, 0x68, 0x5f, 0x78, 0x3a, + 0xd2, 0x6c, 0xcc, 0xa0, 0xb5, 0xf0, 0x95, + 0x3e, 0xf1, 0xf4, 0x3e, 0x43, 0x8f, 0x6c, +- 0xae, 0x22, 0x59, 0x11, 0x86, 0x22, 0x35, +- 0x95 }, ++ 0xae, 0x22, 0x59, 0x11, 0x19, 0x3c, 0x04, ++ 0x45 }, + .sn_size = 18, + .hfn = 0x1, + .hfn_threshold = 0xfa558, +@@ -3193,7 +3193,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x4b, 0x32, 0x87, 0xf9, 0xdb, 0xe0, + 0x31, 0x5f, 0x3a, 0x15 }, + .data_in = +- (uint8_t[]){ 0xf8, 0x00, 0x00, 0xf8, 0xdb, 0x2d, 0x3f, ++ (uint8_t[]){ 0x80, 0x00, 0x00, 0xf8, 0xdb, 0x2d, 0x3f, + 0x23, 0x82, 0x53, 0xfd, 0x37, 0xde, 0x88, + 0x63, 0x08, 0x4f, 0xD3, 0x71, 0xfb, 0xeb, + 0x35, 0xf3, 0x64, 0xd3, 0x5e, 0xaf, 0x3f, +@@ -3205,7 +3205,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0xf9, 0xdd, 0xcc, 0x69 }, + .in_len = 67, + .data_out = +- (uint8_t[]){ 0xf8, 0x00, 0x00, 0xf8, 0x5a, 0xa9, 0xb5, ++ (uint8_t[]){ 0x80, 0x00, 0x00, 0xf8, 0x5a, 0xa9, 0xb5, + 0x61, 0x8b, 0x8a, 0xb7, 0x6a, 0x98, 0x30, + 0x6c, 0xed, 0x84, 0x69, 0xff, 0x6b, 0x7e, + 0x30, 0x59, 0x55, 0x80, 0x32, 0xd0, 0x0f, +@@ -3214,8 +3214,8 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x0e, 0x2e, 0x33, 0xe6, 0xa4, 0xea, 0x17, + 0xd6, 0x56, 0xa2, 0x3b, 0x5f, 0x56, 0x3a, + 0xa7, 0x6f, 0x4c, 0xc1, 0xca, 0xbd, 0x5f, +- 0xad, 0x3d, 0x99, 0x4a, 0x53, 0xf6, 0xfc, +- 0xeb }, ++ 0xad, 0x3d, 0x99, 0x4a, 0x7b, 0x7d, 0x8a, ++ 0x76 }, + .sn_size = 18, + .hfn = 0x1, + .hfn_threshold = 0xfa558, +@@ -3340,7 +3340,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x4b, 0x32, 0x87, 0xf9, 0xdb, 0xe0, + 0x31, 0x5f, 0x3a, 0x15 }, + .data_in = +- (uint8_t[]){ 0xf8, 0x00, 0x00, 0xf8, 0xdb, 0x2d, 0x3f, ++ (uint8_t[]){ 0x80, 0x00, 0x00, 0xf8, 0xdb, 0x2d, 0x3f, + 0x23, 0x82, 0x53, 0xfd, 0x37, 0xde, 0x88, + 0x63, 0x08, 0x4f, 0xD3, 0x71, 0xfb, 0xeb, + 0x35, 0xf3, 0x64, 0xd3, 0x5e, 0xaf, 0x3f, +@@ -3352,7 +3352,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0xf9, 0xdd, 0xcc, 0x69 }, + .in_len = 67, + .data_out = +- (uint8_t[]){ 0xf8, 0x00, 0x00, 0xf8, 0x9b, 0x9c, 0xde, ++ (uint8_t[]){ 0x80, 0x00, 0x00, 0xf8, 0x9b, 0x9c, 0xde, + 0xa1, 0x69, 0x9b, 0x27, 0xd3, 0x38, 0x93, + 0xf2, 0x12, 0xb1, 0xc6, 0x60, 0xac, 0xb7, + 0xf2, 0x37, 0xf3, 0x72, 0xaf, 0x50, 0x9d, +@@ -3361,8 +3361,8 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0xbb, 0x15, 0xf3, 0x6a, 0x5d, 0x61, 0x12, + 0x6e, 0x6d, 0x1b, 0x92, 0xd8, 0x42, 0x60, + 0x3e, 0x1f, 0xe0, 0x6c, 0x28, 0x89, 0xf7, +- 0x0c, 0x61, 0x76, 0xdc, 0x25, 0x8a, 0x31, +- 0xed }, ++ 0x0c, 0x61, 0x76, 0xdc, 0x9d, 0x1f, 0x9a, ++ 0x03 }, + .sn_size = 18, + .hfn = 0x1, + .hfn_threshold = 0xfa558, +@@ -3389,7 +3389,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x4b, 0x32, 0x87, 0xf9, 0xdb, 0xe0, + 0x31, 0x5f, 0x3a, 0x15 }, + .data_in = +- (uint8_t[]){ 0xf8, 0x00, 0x00, 0xf8, 0xdb, 0x2d, 0x3f, ++ (uint8_t[]){ 0x80, 0x00, 0x00, 0xf8, 0xdb, 0x2d, 0x3f, + 0x23, 0x82, 0x53, 0xfd, 0x37, 0xde, 0x88, + 0x63, 0x08, 0x4f, 0xD3, 0x71, 0xfb, 0xeb, + 0x35, 0xf3, 0x64, 0xd3, 0x5e, 0xaf, 0x3f, +@@ -3401,7 +3401,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0xf9, 0xdd, 0xcc, 0x69 }, + .in_len = 67, + .data_out = +- (uint8_t[]){ 0xf8, 0x00, 0x00, 0xf8, 0x0c, 0xef, 0x82, ++ (uint8_t[]){ 0x80, 0x00, 0x00, 0xf8, 0x0c, 0xef, 0x82, + 0x59, 0x6b, 0x6b, 0x61, 0xbe, 0x54, 0x23, + 0x7d, 0x29, 0x6d, 0xa7, 0xd2, 0xfa, 0x26, + 0xcc, 0x1d, 0x18, 0x39, 0x99, 0xea, 0xac, +@@ -3410,8 +3410,8 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x7a, 0x8a, 0xe5, 0x00, 0x35, 0x4d, 0x0e, + 0x69, 0x9f, 0x4b, 0x72, 0x94, 0x8a, 0x25, + 0x69, 0x43, 0x28, 0xdc, 0x40, 0x60, 0x4f, +- 0x6c, 0xed, 0x6a, 0x50, 0xf3, 0x5e, 0x90, +- 0x42 }, ++ 0x6c, 0xed, 0x6a, 0x50, 0x55, 0xf6, 0xaf, ++ 0x9a }, + .sn_size = 18, + .hfn = 0x1, + .hfn_threshold = 0xfa558, +@@ -3438,7 +3438,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x4b, 0x32, 0x87, 0xf9, 0xdb, 0xe0, + 0x31, 0x5f, 0x3a, 0x15 }, + .data_in = +- (uint8_t[]){ 0xf8, 0x00, 0x00, 0xf8, 0xdb, 0x2d, 0x3f, ++ (uint8_t[]){ 0x80, 0x00, 0x00, 0xf8, 0xdb, 0x2d, 0x3f, + 0x23, 0x82, 0x53, 0xfd, 0x37, 0xde, 0x88, + 0x63, 0x08, 0x4f, 0xD3, 0x71, 0xfb, 0xeb, + 0x35, 0xf3, 0x64, 0xd3, 0x5e, 0xaf, 0x3f, +@@ -3450,7 +3450,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0xf9, 0xdd, 0xcc, 0x69 }, + .in_len = 67, + .data_out = +- (uint8_t[]){ 0xf8, 0x00, 0x00, 0xf8, 0x9b, 0x9c, 0xde, ++ (uint8_t[]){ 0x80, 0x00, 0x00, 0xf8, 0x9b, 0x9c, 0xde, + 0xa1, 0x69, 0x9b, 0x27, 0xd3, 0x38, 0x93, + 0xf2, 0x12, 0xb1, 0xc6, 0x60, 0xac, 0xb7, + 0xf2, 0x37, 0xf3, 0x72, 0xaf, 0x50, 0x9d, +@@ -3459,8 +3459,8 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0xbb, 0x15, 0xf3, 0x6a, 0x5d, 0x61, 0x12, + 0x6e, 0x6d, 0x1b, 0x92, 0xd8, 0x42, 0x60, + 0x3e, 0x1f, 0xe0, 0x6c, 0x28, 0x89, 0xf7, +- 0x0c, 0x61, 0x76, 0xdc, 0x55, 0xf1, 0x2b, +- 0x66 }, ++ 0x0c, 0x61, 0x76, 0xdc, 0xd3, 0x21, 0xba, ++ 0x31 }, + .sn_size = 18, + .hfn = 0x1, + .hfn_threshold = 0xfa558, +@@ -3487,7 +3487,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x4b, 0x32, 0x87, 0xf9, 0xdb, 0xe0, + 0x31, 0x5f, 0x3a, 0x15 }, + .data_in = +- (uint8_t[]){ 0xf8, 0x00, 0x00, 0xf8, 0xdb, 0x2d, 0x3f, ++ (uint8_t[]){ 0x80, 0x00, 0x00, 0xf8, 0xdb, 0x2d, 0x3f, + 0x23, 0x82, 0x53, 0xfd, 0x37, 0xde, 0x88, + 0x63, 0x08, 0x4f, 0xD3, 0x71, 0xfb, 0xeb, + 0x35, 0xf3, 0x64, 0xd3, 0x5e, 0xaf, 0x3f, +@@ -3499,7 +3499,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0xf9, 0xdd, 0xcc, 0x69 }, + .in_len = 67, + .data_out = +- (uint8_t[]){ 0xf8, 0x00, 0x00, 0xf8, 0x0c, 0xef, 0x82, ++ (uint8_t[]){ 0x80, 0x00, 0x00, 0xf8, 0x0c, 0xef, 0x82, + 0x59, 0x6b, 0x6b, 0x61, 0xbe, 0x54, 0x23, + 0x7d, 0x29, 0x6d, 0xa7, 0xd2, 0xfa, 0x26, + 0xcc, 0x1d, 0x18, 0x39, 0x99, 0xea, 0xac, +@@ -3508,8 +3508,8 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x7a, 0x8a, 0xe5, 0x00, 0x35, 0x4d, 0x0e, + 0x69, 0x9f, 0x4b, 0x72, 0x94, 0x8a, 0x25, + 0x69, 0x43, 0x28, 0xdc, 0x40, 0x60, 0x4f, +- 0x6c, 0xed, 0x6a, 0x50, 0xd5, 0x84, 0xf6, +- 0x4c }, ++ 0x6c, 0xed, 0x6a, 0x50, 0x83, 0xf9, 0x22, ++ 0x24 }, + .sn_size = 18, + .hfn = 0x1, + .hfn_threshold = 0xfa558, +@@ -3536,7 +3536,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x4b, 0x32, 0x87, 0xf9, 0xdb, 0xe0, + 0x31, 0x5f, 0x3a, 0x15 }, + .data_in = +- (uint8_t[]){ 0xf8, 0x00, 0x00, 0xf8, 0xdb, 0x2d, 0x3f, ++ (uint8_t[]){ 0x80, 0x00, 0x00, 0xf8, 0xdb, 0x2d, 0x3f, + 0x23, 0x82, 0x53, 0xfd, 0x37, 0xde, 0x88, + 0x63, 0x08, 0x4f, 0xD3, 0x71, 0xfb, 0xeb, + 0x35, 0xf3, 0x64, 0xd3, 0x5e, 0xaf, 0x3f, +@@ -3548,7 +3548,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0xf9, 0xdd, 0xcc, 0x69 }, + .in_len = 67, + .data_out = +- (uint8_t[]){ 0xf8, 0x00, 0x00, 0xf8, 0x9b, 0x9c, 0xde, ++ (uint8_t[]){ 0x80, 0x00, 0x00, 0xf8, 0x9b, 0x9c, 0xde, + 0xa1, 0x69, 0x9b, 0x27, 0xd3, 0x38, 0x93, + 0xf2, 0x12, 0xb1, 0xc6, 0x60, 0xac, 0xb7, + 0xf2, 0x37, 0xf3, 0x72, 0xaf, 0x50, 0x9d, +@@ -3557,8 +3557,8 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0xbb, 0x15, 0xf3, 0x6a, 0x5d, 0x61, 0x12, + 0x6e, 0x6d, 0x1b, 0x92, 0xd8, 0x42, 0x60, + 0x3e, 0x1f, 0xe0, 0x6c, 0x28, 0x89, 0xf7, +- 0x0c, 0x61, 0x76, 0xdc, 0x55, 0x3f, 0x0f, +- 0x03 }, ++ 0x0c, 0x61, 0x76, 0xdc, 0xca, 0x21, 0x3e, ++ 0xd3 }, + .sn_size = 18, + .hfn = 0x1, + .hfn_threshold = 0xfa558, +@@ -3585,7 +3585,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x4b, 0x32, 0x87, 0xf9, 0xdb, 0xe0, + 0x31, 0x5f, 0x3a, 0x15 }, + .data_in = +- (uint8_t[]){ 0xf8, 0x00, 0x00, 0xf8, 0xdb, 0x2d, 0x3f, ++ (uint8_t[]){ 0x80, 0x00, 0x00, 0xf8, 0xdb, 0x2d, 0x3f, + 0x23, 0x82, 0x53, 0xfd, 0x37, 0xde, 0x88, + 0x63, 0x08, 0x4f, 0xD3, 0x71, 0xfb, 0xeb, + 0x35, 0xf3, 0x64, 0xd3, 0x5e, 0xaf, 0x3f, +@@ -3597,7 +3597,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0xf9, 0xdd, 0xcc, 0x69 }, + .in_len = 67, + .data_out = +- (uint8_t[]){ 0xf8, 0x00, 0x00, 0xf8, 0x0c, 0xef, 0x82, ++ (uint8_t[]){ 0x80, 0x00, 0x00, 0xf8, 0x0c, 0xef, 0x82, + 0x59, 0x6b, 0x6b, 0x61, 0xbe, 0x54, 0x23, + 0x7d, 0x29, 0x6d, 0xa7, 0xd2, 0xfa, 0x26, + 0xcc, 0x1d, 0x18, 0x39, 0x99, 0xea, 0xac, +@@ -3606,8 +3606,8 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x7a, 0x8a, 0xe5, 0x00, 0x35, 0x4d, 0x0e, + 0x69, 0x9f, 0x4b, 0x72, 0x94, 0x8a, 0x25, + 0x69, 0x43, 0x28, 0xdc, 0x40, 0x60, 0x4f, +- 0x6c, 0xed, 0x6a, 0x50, 0x03, 0x03, 0xb9, +- 0xd5 }, ++ 0x6c, 0xed, 0x6a, 0x50, 0x2b, 0x88, 0xcf, ++ 0x48 }, + .sn_size = 18, + .hfn = 0x1, + .hfn_threshold = 0xfa558, +diff --git a/dpdk/app/test/test_cryptodev_security_pdcp_test_vectors.h b/dpdk/app/test/test_cryptodev_security_pdcp_test_vectors.h +index 6fdc4cd9e3..56d4884529 100644 +--- a/dpdk/app/test/test_cryptodev_security_pdcp_test_vectors.h ++++ b/dpdk/app/test/test_cryptodev_security_pdcp_test_vectors.h +@@ -5560,7 +5560,7 @@ static uint8_t *pdcp_test_data_out[] = { + 0xC2, 0xE2, 0x91, 0x91, 0xA3, 0x9C, 0xE6, 0x30, 0x69, 0x70, + 0x33, 0x8A, 0x15, 0xD0, 0x36, 0x47, 0x0E, 0x8F, 0xEE, 0x2C, + 0x96, 0x0C, 0xD7, 0x7D, 0x70, 0x1B, 0x01, 0x7F, 0x96, 0x46, +- 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD}, ++ 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD, 0x00, 0x00, 0x00, 0x00}, + /* Control Plane w/NULL enc. + NULL int. DL LONG SN */ + (uint8_t[]){0xA0, 0x00, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, + 0x53, 0xFD, 0x37, 0xDE, 0x88, 0x63, 0x08, 0x4F, 0xD3, 0x71, +@@ -5568,7 +5568,7 @@ static uint8_t *pdcp_test_data_out[] = { + 0xC2, 0xE2, 0x91, 0x91, 0xA3, 0x9C, 0xE6, 0x30, 0x69, 0x70, + 0x33, 0x8A, 0x15, 0xD0, 0x36, 0x47, 0x0E, 0x8F, 0xEE, 0x2C, + 0x96, 0x0C, 0xD7, 0x7D, 0x70, 0x1B, 0x01, 0x7F, 0x96, 0x46, +- 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD}, ++ 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD, 0x00, 0x00, 0x00, 0x00}, + /* Control Plane w/NULL enc. + SNOW f9 int. UL LONG SN */ + (uint8_t[]){0x00, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, + 0x53, 0xFD, 0x37, 0xDE, 0x88, 0x63, 0x08, 0x4F, 0xD3, 0x71, +@@ -5954,7 +5954,7 @@ static uint8_t *pdcp_test_data_out[] = { + 0xC2, 0xE2, 0x91, 0x91, 0xA3, 0x9C, 0xE6, 0x30, 0x69, 0x70, + 0x33, 0x8A, 0x15, 0xD0, 0x36, 0x47, 0x0E, 0x8F, 0xEE, 0x2C, + 0x96, 0x0C, 0xD7, 0x7D, 0x70, 0x1B, 0x01, 0x7F, 0x96, 0x46, +- 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD}, ++ 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD, 0x00, 0x00, 0x00, 0x00}, + /* User Plane w/NULL enc. + NULL int. DL for 12-bit SN */ + (uint8_t[]){0xA0, 0x00, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, + 0x53, 0xFD, 0x37, 0xDE, 0x88, 0x63, 0x08, 0x4F, 0xD3, 0x71, +@@ -5962,7 +5962,7 @@ static uint8_t *pdcp_test_data_out[] = { + 0xC2, 0xE2, 0x91, 0x91, 0xA3, 0x9C, 0xE6, 0x30, 0x69, 0x70, + 0x33, 0x8A, 0x15, 0xD0, 0x36, 0x47, 0x0E, 0x8F, 0xEE, 0x2C, + 0x96, 0x0C, 0xD7, 0x7D, 0x70, 0x1B, 0x01, 0x7F, 0x96, 0x46, +- 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD}, ++ 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD, 0x00, 0x00, 0x00, 0x00}, + /* User Plane w/NULL enc. + SNOW f9 int. UL for 12-bit SN */ + (uint8_t[]){0x80, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, + 0x53, 0xFD, 0x37, 0xDE, 0x88, 0x63, 0x08, 0x4F, 0xD3, 0x71, +diff --git a/dpdk/app/test/test_event_timer_adapter.c b/dpdk/app/test/test_event_timer_adapter.c +index 1a440dfd10..12d5936c60 100644 +--- a/dpdk/app/test/test_event_timer_adapter.c ++++ b/dpdk/app/test/test_event_timer_adapter.c +@@ -57,9 +57,10 @@ static uint64_t global_bkt_tck_ns; + static uint64_t global_info_bkt_tck_ns; + static volatile uint8_t arm_done; + +-#define CALC_TICKS(tks) \ +- ceil((double)(tks * global_bkt_tck_ns) / global_info_bkt_tck_ns) ++#define CALC_TICKS(tks) ceil((double)((tks) * global_bkt_tck_ns) / global_info_bkt_tck_ns) + ++/* Wait double timeout ticks for software and an extra tick for hardware */ ++#define WAIT_TICKS(tks) (using_services ? 2 * (tks) : tks + 1) + + static bool using_services; + static uint32_t test_lcore1; +@@ -441,10 +442,31 @@ timdev_teardown(void) + rte_mempool_free(eventdev_test_mempool); + } + ++static inline uint16_t ++timeout_event_dequeue(struct rte_event *evs, uint64_t nb_evs, uint64_t ticks) ++{ ++ uint16_t ev_cnt = 0; ++ uint64_t end_cycle; ++ ++ if (using_services && nb_evs == MAX_TIMERS) ++ ticks = 2 * ticks; ++ ++ end_cycle = rte_rdtsc() + ticks * global_bkt_tck_ns * rte_get_tsc_hz() / 1E9; ++ ++ while (ev_cnt < nb_evs && rte_rdtsc() < end_cycle) { ++ ev_cnt += rte_event_dequeue_burst(evdev, TEST_PORT_ID, &evs[ev_cnt], nb_evs, 0); ++ rte_pause(); ++ } ++ ++ return ev_cnt; ++} ++ + static inline int + test_timer_state(void) + { + struct rte_event_timer *ev_tim; ++ const uint64_t max_ticks = 100; ++ uint64_t ticks, wait_ticks; + struct rte_event ev; + const struct rte_event_timer tim = { + .ev.op = RTE_EVENT_OP_NEW, +@@ -455,11 +477,10 @@ test_timer_state(void) + .state = RTE_EVENT_TIMER_NOT_ARMED, + }; + +- + rte_mempool_get(eventdev_test_mempool, (void **)&ev_tim); + *ev_tim = tim; + ev_tim->ev.event_ptr = ev_tim; +- ev_tim->timeout_ticks = CALC_TICKS(120); ++ ev_tim->timeout_ticks = CALC_TICKS(max_ticks + 20); + + TEST_ASSERT_EQUAL(rte_event_timer_arm_burst(timdev, &ev_tim, 1), 0, + "Armed timer exceeding max_timeout."); +@@ -467,8 +488,9 @@ test_timer_state(void) + "Improper timer state set expected %d returned %d", + RTE_EVENT_TIMER_ERROR_TOOLATE, ev_tim->state); + ++ ticks = 10; + ev_tim->state = RTE_EVENT_TIMER_NOT_ARMED; +- ev_tim->timeout_ticks = CALC_TICKS(10); ++ ev_tim->timeout_ticks = CALC_TICKS(ticks); + + TEST_ASSERT_EQUAL(rte_event_timer_arm_burst(timdev, &ev_tim, 1), 1, + "Failed to arm timer with proper timeout."); +@@ -477,14 +499,15 @@ test_timer_state(void) + RTE_EVENT_TIMER_ARMED, ev_tim->state); + + if (!using_services) +- rte_delay_us(20); ++ wait_ticks = 2 * ticks; + else +- rte_delay_us(1000 + 200); +- TEST_ASSERT_EQUAL(rte_event_dequeue_burst(evdev, 0, &ev, 1, 0), 1, +- "Armed timer failed to trigger."); ++ wait_ticks = ticks; ++ ++ TEST_ASSERT_EQUAL(timeout_event_dequeue(&ev, 1, WAIT_TICKS(wait_ticks)), 1, ++ "Armed timer failed to trigger."); + + ev_tim->state = RTE_EVENT_TIMER_NOT_ARMED; +- ev_tim->timeout_ticks = CALC_TICKS(90); ++ ev_tim->timeout_ticks = CALC_TICKS(max_ticks - 10); + TEST_ASSERT_EQUAL(rte_event_timer_arm_burst(timdev, &ev_tim, 1), 1, + "Failed to arm timer with proper timeout."); + TEST_ASSERT_EQUAL(rte_event_timer_cancel_burst(timdev, &ev_tim, 1), +@@ -1208,8 +1231,9 @@ stat_inc_reset_ev_enq(void) + int ret, i, n; + int num_evtims = MAX_TIMERS; + struct rte_event_timer *evtims[num_evtims]; +- struct rte_event evs[BATCH_SIZE]; ++ struct rte_event evs[num_evtims]; + struct rte_event_timer_adapter_stats stats; ++ uint64_t ticks = 5; + const struct rte_event_timer init_tim = { + .ev.op = RTE_EVENT_OP_NEW, + .ev.queue_id = TEST_QUEUE_ID, +@@ -1217,7 +1241,7 @@ stat_inc_reset_ev_enq(void) + .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL, + .ev.event_type = RTE_EVENT_TYPE_TIMER, + .state = RTE_EVENT_TIMER_NOT_ARMED, +- .timeout_ticks = CALC_TICKS(5), // expire in .5 sec ++ .timeout_ticks = CALC_TICKS(ticks), /**< expire in .5 sec */ + }; + + ret = rte_mempool_get_bulk(eventdev_test_mempool, (void **)evtims, +@@ -1242,31 +1266,12 @@ stat_inc_reset_ev_enq(void) + "succeeded = %d, rte_errno = %s", + num_evtims, ret, rte_strerror(rte_errno)); + +- rte_delay_ms(1000); +- +-#define MAX_TRIES num_evtims +- int sum = 0; +- int tries = 0; +- bool done = false; +- while (!done) { +- sum += rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, +- RTE_DIM(evs), 10); +- if (sum >= num_evtims || ++tries >= MAX_TRIES) +- done = true; +- +- rte_delay_ms(10); +- } +- +- TEST_ASSERT_EQUAL(sum, num_evtims, "Expected %d timer expiry events, " +- "got %d", num_evtims, sum); +- +- TEST_ASSERT(tries < MAX_TRIES, "Exceeded max tries"); +- +- rte_delay_ms(100); ++ n = timeout_event_dequeue(evs, RTE_DIM(evs), WAIT_TICKS(ticks)); ++ TEST_ASSERT_EQUAL(n, num_evtims, "Expected %d timer expiry events, got %d", ++ num_evtims, n); + + /* Make sure the eventdev is still empty */ +- n = rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, RTE_DIM(evs), +- 10); ++ n = timeout_event_dequeue(evs, 1, WAIT_TICKS(1)); + + TEST_ASSERT_EQUAL(n, 0, "Dequeued unexpected number of timer expiry " + "events from event device"); +@@ -1303,6 +1308,7 @@ event_timer_arm(void) + struct rte_event_timer_adapter *adapter = timdev; + struct rte_event_timer *evtim = NULL; + struct rte_event evs[BATCH_SIZE]; ++ uint64_t ticks = 5; + const struct rte_event_timer init_tim = { + .ev.op = RTE_EVENT_OP_NEW, + .ev.queue_id = TEST_QUEUE_ID, +@@ -1310,7 +1316,7 @@ event_timer_arm(void) + .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL, + .ev.event_type = RTE_EVENT_TYPE_TIMER, + .state = RTE_EVENT_TIMER_NOT_ARMED, +- .timeout_ticks = CALC_TICKS(5), // expire in .5 sec ++ .timeout_ticks = CALC_TICKS(ticks), /**< expire in .5 sec */ + }; + + rte_mempool_get(eventdev_test_mempool, (void **)&evtim); +@@ -1337,10 +1343,7 @@ event_timer_arm(void) + TEST_ASSERT_EQUAL(rte_errno, EALREADY, "Unexpected rte_errno value " + "after arming already armed timer"); + +- /* Let timer expire */ +- rte_delay_ms(1000); +- +- n = rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, RTE_DIM(evs), 0); ++ n = timeout_event_dequeue(evs, RTE_DIM(evs), WAIT_TICKS(ticks)); + TEST_ASSERT_EQUAL(n, 1, "Failed to dequeue expected number of expiry " + "events from event device"); + +@@ -1360,6 +1363,7 @@ event_timer_arm_double(void) + struct rte_event_timer_adapter *adapter = timdev; + struct rte_event_timer *evtim = NULL; + struct rte_event evs[BATCH_SIZE]; ++ uint64_t ticks = 5; + const struct rte_event_timer init_tim = { + .ev.op = RTE_EVENT_OP_NEW, + .ev.queue_id = TEST_QUEUE_ID, +@@ -1367,7 +1371,7 @@ event_timer_arm_double(void) + .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL, + .ev.event_type = RTE_EVENT_TYPE_TIMER, + .state = RTE_EVENT_TIMER_NOT_ARMED, +- .timeout_ticks = CALC_TICKS(5), // expire in .5 sec ++ .timeout_ticks = CALC_TICKS(ticks), /**< expire in .5 sec */ + }; + + rte_mempool_get(eventdev_test_mempool, (void **)&evtim); +@@ -1387,10 +1391,7 @@ event_timer_arm_double(void) + TEST_ASSERT_EQUAL(rte_errno, EALREADY, "Unexpected rte_errno value " + "after double-arm"); + +- /* Let timer expire */ +- rte_delay_ms(600); +- +- n = rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, RTE_DIM(evs), 0); ++ n = timeout_event_dequeue(evs, RTE_DIM(evs), WAIT_TICKS(ticks)); + TEST_ASSERT_EQUAL(n, 1, "Dequeued incorrect number of expiry events - " + "expected: 1, actual: %d", n); + +@@ -1417,6 +1418,7 @@ event_timer_arm_expiry(void) + .ev.event_type = RTE_EVENT_TYPE_TIMER, + .state = RTE_EVENT_TIMER_NOT_ARMED, + }; ++ uint64_t ticks = 30; + + rte_mempool_get(eventdev_test_mempool, (void **)&evtim); + if (evtim == NULL) { +@@ -1426,7 +1428,7 @@ event_timer_arm_expiry(void) + + /* Set up an event timer */ + *evtim = init_tim; +- evtim->timeout_ticks = CALC_TICKS(30), // expire in 3 secs ++ evtim->timeout_ticks = CALC_TICKS(ticks); /**< expire in 3 secs */ + evtim->ev.event_ptr = evtim; + + ret = rte_event_timer_arm_burst(adapter, &evtim, 1); +@@ -1435,17 +1437,10 @@ event_timer_arm_expiry(void) + TEST_ASSERT_EQUAL(evtim->state, RTE_EVENT_TIMER_ARMED, "Event " + "timer in incorrect state"); + +- rte_delay_ms(2999); +- +- n = rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, RTE_DIM(evs), 0); ++ n = timeout_event_dequeue(evs, RTE_DIM(evs), ticks - 1); + TEST_ASSERT_EQUAL(n, 0, "Dequeued unexpected timer expiry event"); + +- /* Delay 100 ms to account for the adapter tick window - should let us +- * dequeue one event +- */ +- rte_delay_ms(100); +- +- n = rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, RTE_DIM(evs), 0); ++ n = timeout_event_dequeue(evs, RTE_DIM(evs), WAIT_TICKS(1)); + TEST_ASSERT_EQUAL(n, 1, "Dequeued incorrect number (%d) of timer " + "expiry events", n); + TEST_ASSERT_EQUAL(evs[0].event_type, RTE_EVENT_TYPE_TIMER, +@@ -1477,6 +1472,7 @@ event_timer_arm_rearm(void) + .ev.event_type = RTE_EVENT_TYPE_TIMER, + .state = RTE_EVENT_TIMER_NOT_ARMED, + }; ++ uint64_t ticks = 1; + + rte_mempool_get(eventdev_test_mempool, (void **)&evtim); + if (evtim == NULL) { +@@ -1486,7 +1482,7 @@ event_timer_arm_rearm(void) + + /* Set up a timer */ + *evtim = init_tim; +- evtim->timeout_ticks = CALC_TICKS(1); // expire in 0.1 sec ++ evtim->timeout_ticks = CALC_TICKS(ticks); /**< expire in 0.1 sec */ + evtim->ev.event_ptr = evtim; + + /* Arm it */ +@@ -1494,10 +1490,7 @@ event_timer_arm_rearm(void) + TEST_ASSERT_EQUAL(ret, 1, "Failed to arm event timer: %s\n", + rte_strerror(rte_errno)); + +- /* Add 100ms to account for the adapter tick window */ +- rte_delay_ms(100 + 100); +- +- n = rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, RTE_DIM(evs), 0); ++ n = timeout_event_dequeue(evs, RTE_DIM(evs), WAIT_TICKS(ticks)); + TEST_ASSERT_EQUAL(n, 1, "Failed to dequeue expected number of expiry " + "events from event device"); + +@@ -1514,10 +1507,7 @@ event_timer_arm_rearm(void) + TEST_ASSERT_EQUAL(ret, 1, "Failed to arm event timer: %s\n", + rte_strerror(rte_errno)); + +- /* Add 100ms to account for the adapter tick window */ +- rte_delay_ms(100 + 100); +- +- n = rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, RTE_DIM(evs), 0); ++ n = timeout_event_dequeue(evs, RTE_DIM(evs), WAIT_TICKS(ticks)); + TEST_ASSERT_EQUAL(n, 1, "Failed to dequeue expected number of expiry " + "events from event device"); + +@@ -1539,7 +1529,8 @@ event_timer_arm_max(void) + int ret, i, n; + int num_evtims = MAX_TIMERS; + struct rte_event_timer *evtims[num_evtims]; +- struct rte_event evs[BATCH_SIZE]; ++ struct rte_event evs[num_evtims]; ++ uint64_t ticks = 5; + const struct rte_event_timer init_tim = { + .ev.op = RTE_EVENT_OP_NEW, + .ev.queue_id = TEST_QUEUE_ID, +@@ -1547,7 +1538,7 @@ event_timer_arm_max(void) + .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL, + .ev.event_type = RTE_EVENT_TYPE_TIMER, + .state = RTE_EVENT_TIMER_NOT_ARMED, +- .timeout_ticks = CALC_TICKS(5), // expire in .5 sec ++ .timeout_ticks = CALC_TICKS(ticks), /**< expire in .5 sec */ + }; + + ret = rte_mempool_get_bulk(eventdev_test_mempool, (void **)evtims, +@@ -1567,31 +1558,12 @@ event_timer_arm_max(void) + "succeeded = %d, rte_errno = %s", + num_evtims, ret, rte_strerror(rte_errno)); + +- rte_delay_ms(1000); +- +-#define MAX_TRIES num_evtims +- int sum = 0; +- int tries = 0; +- bool done = false; +- while (!done) { +- sum += rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, +- RTE_DIM(evs), 10); +- if (sum >= num_evtims || ++tries >= MAX_TRIES) +- done = true; +- +- rte_delay_ms(10); +- } +- +- TEST_ASSERT_EQUAL(sum, num_evtims, "Expected %d timer expiry events, " +- "got %d", num_evtims, sum); +- +- TEST_ASSERT(tries < MAX_TRIES, "Exceeded max tries"); +- +- rte_delay_ms(100); ++ n = timeout_event_dequeue(evs, RTE_DIM(evs), WAIT_TICKS(ticks)); ++ TEST_ASSERT_EQUAL(n, num_evtims, "Expected %d timer expiry events, got %d", ++ num_evtims, n); + + /* Make sure the eventdev is still empty */ +- n = rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, RTE_DIM(evs), +- 10); ++ n = timeout_event_dequeue(evs, 1, WAIT_TICKS(1)); + + TEST_ASSERT_EQUAL(n, 0, "Dequeued unexpected number of timer expiry " + "events from event device"); +@@ -1711,6 +1683,7 @@ event_timer_cancel(void) + .ev.event_type = RTE_EVENT_TYPE_TIMER, + .state = RTE_EVENT_TIMER_NOT_ARMED, + }; ++ uint64_t ticks = 30; + + rte_mempool_get(eventdev_test_mempool, (void **)&evtim); + if (evtim == NULL) { +@@ -1728,7 +1701,7 @@ event_timer_cancel(void) + /* Set up a timer */ + *evtim = init_tim; + evtim->ev.event_ptr = evtim; +- evtim->timeout_ticks = CALC_TICKS(30); // expire in 3 sec ++ evtim->timeout_ticks = CALC_TICKS(ticks); /**< expire in 3 sec */ + + /* Check that cancelling an inited but unarmed timer fails */ + ret = rte_event_timer_cancel_burst(adapter, &evtim, 1); +@@ -1752,10 +1725,8 @@ event_timer_cancel(void) + TEST_ASSERT_EQUAL(evtim->state, RTE_EVENT_TIMER_CANCELED, + "evtim in incorrect state"); + +- rte_delay_ms(3000); +- + /* Make sure that no expiry event was generated */ +- n = rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, RTE_DIM(evs), 0); ++ n = timeout_event_dequeue(evs, RTE_DIM(evs), WAIT_TICKS(ticks)); + TEST_ASSERT_EQUAL(n, 0, "Dequeued unexpected timer expiry event\n"); + + rte_mempool_put(eventdev_test_mempool, evtim); +@@ -1778,8 +1749,8 @@ event_timer_cancel_double(void) + .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL, + .ev.event_type = RTE_EVENT_TYPE_TIMER, + .state = RTE_EVENT_TIMER_NOT_ARMED, +- .timeout_ticks = CALC_TICKS(5), // expire in .5 sec + }; ++ uint64_t ticks = 30; + + rte_mempool_get(eventdev_test_mempool, (void **)&evtim); + if (evtim == NULL) { +@@ -1790,7 +1761,7 @@ event_timer_cancel_double(void) + /* Set up a timer */ + *evtim = init_tim; + evtim->ev.event_ptr = evtim; +- evtim->timeout_ticks = CALC_TICKS(30); // expire in 3 sec ++ evtim->timeout_ticks = CALC_TICKS(ticks); /**< expire in 3 sec */ + + ret = rte_event_timer_arm_burst(adapter, &evtim, 1); + TEST_ASSERT_EQUAL(ret, 1, "Failed to arm event timer: %s\n", +@@ -1812,10 +1783,8 @@ event_timer_cancel_double(void) + TEST_ASSERT_EQUAL(rte_errno, EALREADY, "Unexpected rte_errno value " + "after double-cancel: rte_errno = %d", rte_errno); + +- rte_delay_ms(3000); +- + /* Still make sure that no expiry event was generated */ +- n = rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, RTE_DIM(evs), 0); ++ n = timeout_event_dequeue(evs, RTE_DIM(evs), WAIT_TICKS(ticks)); + TEST_ASSERT_EQUAL(n, 0, "Dequeued unexpected timer expiry event\n"); + + rte_mempool_put(eventdev_test_mempool, evtim); +diff --git a/dpdk/app/test/test_link_bonding.c b/dpdk/app/test/test_link_bonding.c +index 5c496352c2..2f46e4c6ee 100644 +--- a/dpdk/app/test/test_link_bonding.c ++++ b/dpdk/app/test/test_link_bonding.c +@@ -2,7 +2,7 @@ + * Copyright(c) 2010-2014 Intel Corporation + */ + +-#include "unistd.h" ++#include + #include + #include + #include +diff --git a/dpdk/app/test/test_malloc.c b/dpdk/app/test/test_malloc.c +index de40e50611..ff081dd931 100644 +--- a/dpdk/app/test/test_malloc.c ++++ b/dpdk/app/test/test_malloc.c +@@ -302,11 +302,11 @@ test_multi_alloc_statistics(void) + rte_malloc_get_socket_stats(socket,&post_stats); + /* Check statistics reported are correct */ + /* All post stats should be equal to pre stats after alloc freed */ +- if ((post_stats.heap_totalsz_bytes != pre_stats.heap_totalsz_bytes) && +- (post_stats.heap_freesz_bytes!=pre_stats.heap_freesz_bytes) && +- (post_stats.heap_allocsz_bytes!=pre_stats.heap_allocsz_bytes)&& +- (post_stats.alloc_count!=pre_stats.alloc_count)&& +- (post_stats.free_count!=pre_stats.free_count)) { ++ if ((post_stats.heap_totalsz_bytes != pre_stats.heap_totalsz_bytes) || ++ (post_stats.heap_freesz_bytes != pre_stats.heap_freesz_bytes) || ++ (post_stats.heap_allocsz_bytes != pre_stats.heap_allocsz_bytes) || ++ (post_stats.alloc_count != pre_stats.alloc_count) || ++ (post_stats.free_count != pre_stats.free_count)) { + printf("Malloc statistics are incorrect - freed alloc\n"); + return -1; + } +@@ -363,11 +363,11 @@ test_multi_alloc_statistics(void) + return -1; + } + +- if ((post_stats.heap_totalsz_bytes != pre_stats.heap_totalsz_bytes) && +- (post_stats.heap_freesz_bytes!=pre_stats.heap_freesz_bytes) && +- (post_stats.heap_allocsz_bytes!=pre_stats.heap_allocsz_bytes)&& +- (post_stats.alloc_count!=pre_stats.alloc_count)&& +- (post_stats.free_count!=pre_stats.free_count)) { ++ if ((post_stats.heap_totalsz_bytes != pre_stats.heap_totalsz_bytes) || ++ (post_stats.heap_freesz_bytes != pre_stats.heap_freesz_bytes) || ++ (post_stats.heap_allocsz_bytes != pre_stats.heap_allocsz_bytes) || ++ (post_stats.alloc_count != pre_stats.alloc_count) || ++ (post_stats.free_count != pre_stats.free_count)) { + printf("Malloc statistics are incorrect - freed alloc\n"); + return -1; + } +@@ -937,6 +937,7 @@ test_alloc_single_socket(int32_t socket) + if (mem == NULL) + return -1; + if (addr_to_socket(mem) != desired_socket) { ++ rte_free(mem); + return -1; + } + rte_free(mem); +diff --git a/dpdk/app/test/test_mbuf.c b/dpdk/app/test/test_mbuf.c +index 53fe898a38..7a2f8a9980 100644 +--- a/dpdk/app/test/test_mbuf.c ++++ b/dpdk/app/test/test_mbuf.c +@@ -1167,38 +1167,16 @@ test_failing_mbuf_sanity_check(struct rte_mempool *pktmbuf_pool) + return TEST_SKIPPED; + } + #else +- +-#include +-#include +-#include +-#include +- +-/* use fork() to test mbuf errors panic */ +-static int +-verify_mbuf_check_panics(struct rte_mbuf *buf) ++/* Verify if mbuf can pass the check */ ++static bool ++mbuf_check_pass(struct rte_mbuf *buf) + { +- int pid; +- int status; +- +- pid = fork(); +- +- if (pid == 0) { +- struct rlimit rl; ++ const char *reason; + +- /* No need to generate a coredump when panicking. */ +- rl.rlim_cur = rl.rlim_max = 0; +- setrlimit(RLIMIT_CORE, &rl); +- rte_mbuf_sanity_check(buf, 1); /* should panic */ +- exit(0); /* return normally if it doesn't panic */ +- } else if (pid < 0) { +- printf("Fork Failed\n"); +- return -1; +- } +- wait(&status); +- if(status == 0) +- return -1; ++ if (rte_mbuf_check(buf, 1, &reason) == 0) ++ return true; + +- return 0; ++ return false; + } + + static int +@@ -1215,19 +1193,19 @@ test_failing_mbuf_sanity_check(struct rte_mempool *pktmbuf_pool) + return -1; + + printf("Checking good mbuf initially\n"); +- if (verify_mbuf_check_panics(buf) != -1) ++ if (!mbuf_check_pass(buf)) + return -1; + + printf("Now checking for error conditions\n"); + +- if (verify_mbuf_check_panics(NULL)) { ++ if (mbuf_check_pass(NULL)) { + printf("Error with NULL mbuf test\n"); + return -1; + } + + badbuf = *buf; + badbuf.pool = NULL; +- if (verify_mbuf_check_panics(&badbuf)) { ++ if (mbuf_check_pass(&badbuf)) { + printf("Error with bad-pool mbuf test\n"); + return -1; + } +@@ -1235,7 +1213,7 @@ test_failing_mbuf_sanity_check(struct rte_mempool *pktmbuf_pool) + if (RTE_IOVA_AS_PA) { + badbuf = *buf; + rte_mbuf_iova_set(&badbuf, 0); +- if (verify_mbuf_check_panics(&badbuf)) { ++ if (mbuf_check_pass(&badbuf)) { + printf("Error with bad-physaddr mbuf test\n"); + return -1; + } +@@ -1243,21 +1221,21 @@ test_failing_mbuf_sanity_check(struct rte_mempool *pktmbuf_pool) + + badbuf = *buf; + badbuf.buf_addr = NULL; +- if (verify_mbuf_check_panics(&badbuf)) { ++ if (mbuf_check_pass(&badbuf)) { + printf("Error with bad-addr mbuf test\n"); + return -1; + } + + badbuf = *buf; + badbuf.refcnt = 0; +- if (verify_mbuf_check_panics(&badbuf)) { ++ if (mbuf_check_pass(&badbuf)) { + printf("Error with bad-refcnt(0) mbuf test\n"); + return -1; + } + + badbuf = *buf; + badbuf.refcnt = UINT16_MAX; +- if (verify_mbuf_check_panics(&badbuf)) { ++ if (mbuf_check_pass(&badbuf)) { + printf("Error with bad-refcnt(MAX) mbuf test\n"); + return -1; + } +@@ -2744,6 +2722,7 @@ test_nb_segs_and_next_reset(void) + + /* split m0 chain in two, between m1 and m2 */ + m0->nb_segs = 2; ++ m0->pkt_len -= m2->data_len; + m1->next = NULL; + m2->nb_segs = 1; + +@@ -2764,6 +2743,7 @@ test_nb_segs_and_next_reset(void) + m2->nb_segs != 1 || m2->next != NULL) + GOTO_FAIL("nb_segs or next was not reset properly"); + ++ rte_mempool_free(pool); + return 0; + + fail: +diff --git a/dpdk/app/test/test_reorder.c b/dpdk/app/test/test_reorder.c +index f0714a5c18..7b5e590bac 100644 +--- a/dpdk/app/test/test_reorder.c ++++ b/dpdk/app/test/test_reorder.c +@@ -278,6 +278,7 @@ test_reorder_drain(void) + goto exit; + } + rte_pktmbuf_free(robufs[0]); ++ memset(robufs, 0, sizeof(robufs)); + + /* Insert more packets + * RB[] = {NULL, NULL, NULL, NULL} +@@ -313,6 +314,7 @@ test_reorder_drain(void) + for (i = 0; i < 3; i++) { + rte_pktmbuf_free(robufs[i]); + } ++ memset(robufs, 0, sizeof(robufs)); + + /* + * RB[] = {NULL, NULL, NULL, NULL} +diff --git a/dpdk/app/test/test_security_inline_proto.c b/dpdk/app/test/test_security_inline_proto.c +index 79858e559f..e411a3c21d 100644 +--- a/dpdk/app/test/test_security_inline_proto.c ++++ b/dpdk/app/test/test_security_inline_proto.c +@@ -678,6 +678,8 @@ free_mbuf(struct rte_mbuf *mbuf) + ip_reassembly_dynfield_offset, + rte_eth_ip_reassembly_dynfield_t *); + rte_pktmbuf_free(mbuf); ++ if (dynfield.nb_frags == 0) ++ break; + mbuf = dynfield.next_frag; + } + } +@@ -735,6 +737,53 @@ get_and_verify_incomplete_frags(struct rte_mbuf *mbuf, + return ret; + } + ++static int ++event_tx_burst(struct rte_mbuf **tx_pkts, uint16_t nb_pkts) ++{ ++ struct rte_event ev; ++ int i, nb_sent = 0; ++ ++ /* Convert packets to events */ ++ memset(&ev, 0, sizeof(ev)); ++ ev.sched_type = RTE_SCHED_TYPE_PARALLEL; ++ for (i = 0; i < nb_pkts; i++) { ++ ev.mbuf = tx_pkts[i]; ++ ev.mbuf->port = port_id; ++ nb_sent += rte_event_eth_tx_adapter_enqueue( ++ eventdev_id, port_id, &ev, 1, 0); ++ } ++ ++ return nb_sent; ++} ++ ++static int ++event_rx_burst(struct rte_mbuf **rx_pkts, uint16_t nb_pkts_to_rx) ++{ ++ int nb_ev, nb_rx = 0, j = 0; ++ const int ms_per_pkt = 5; ++ struct rte_event ev; ++ ++ do { ++ nb_ev = rte_event_dequeue_burst(eventdev_id, port_id, ++ &ev, 1, 0); ++ ++ if (nb_ev == 0) { ++ rte_delay_ms(1); ++ continue; ++ } ++ ++ /* Get packet from event */ ++ if (ev.event_type != RTE_EVENT_TYPE_ETHDEV) { ++ printf("Unsupported event type: %i\n", ++ ev.event_type); ++ continue; ++ } ++ rx_pkts[nb_rx++] = ev.mbuf; ++ } while (j++ < (nb_pkts_to_rx * ms_per_pkt) && nb_rx < nb_pkts_to_rx); ++ ++ return nb_rx; ++} ++ + static int + test_ipsec_with_reassembly(struct reassembly_vector *vector, + const struct ipsec_test_flags *flags) +@@ -761,26 +810,9 @@ test_ipsec_with_reassembly(struct reassembly_vector *vector, + burst_sz = vector->burst ? ENCAP_DECAP_BURST_SZ : 1; + nb_tx = vector->nb_frags * burst_sz; + +- rte_eth_dev_stop(port_id); +- if (ret != 0) { +- printf("rte_eth_dev_stop: err=%s, port=%u\n", +- rte_strerror(-ret), port_id); +- return ret; +- } + rte_eth_ip_reassembly_capability_get(port_id, &reass_capa); + if (reass_capa.max_frags < vector->nb_frags) + return TEST_SKIPPED; +- if (reass_capa.timeout_ms > APP_REASS_TIMEOUT) { +- reass_capa.timeout_ms = APP_REASS_TIMEOUT; +- rte_eth_ip_reassembly_conf_set(port_id, &reass_capa); +- } +- +- ret = rte_eth_dev_start(port_id); +- if (ret < 0) { +- printf("rte_eth_dev_start: err=%d, port=%d\n", +- ret, port_id); +- return ret; +- } + + memset(tx_pkts_burst, 0, sizeof(tx_pkts_burst[0]) * nb_tx); + memset(rx_pkts_burst, 0, sizeof(rx_pkts_burst[0]) * nb_tx); +@@ -871,7 +903,10 @@ test_ipsec_with_reassembly(struct reassembly_vector *vector, + if (ret) + goto out; + +- nb_sent = rte_eth_tx_burst(port_id, 0, tx_pkts_burst, nb_tx); ++ if (event_mode_enabled) ++ nb_sent = event_tx_burst(tx_pkts_burst, nb_tx); ++ else ++ nb_sent = rte_eth_tx_burst(port_id, 0, tx_pkts_burst, nb_tx); + if (nb_sent != nb_tx) { + ret = -1; + printf("\nFailed to tx %u pkts", nb_tx); +@@ -883,14 +918,17 @@ test_ipsec_with_reassembly(struct reassembly_vector *vector, + /* Retry few times before giving up */ + nb_rx = 0; + j = 0; +- do { +- nb_rx += rte_eth_rx_burst(port_id, 0, &rx_pkts_burst[nb_rx], +- nb_tx - nb_rx); +- j++; +- if (nb_rx >= nb_tx) +- break; +- rte_delay_ms(1); +- } while (j < 5 || !nb_rx); ++ if (event_mode_enabled) ++ nb_rx = event_rx_burst(rx_pkts_burst, nb_tx); ++ else ++ do { ++ nb_rx += rte_eth_rx_burst(port_id, 0, &rx_pkts_burst[nb_rx], ++ nb_tx - nb_rx); ++ j++; ++ if (nb_rx >= nb_tx) ++ break; ++ rte_delay_ms(1); ++ } while (j < 5 || !nb_rx); + + /* Check for minimum number of Rx packets expected */ + if ((vector->nb_frags == 1 && nb_rx != nb_tx) || +@@ -950,52 +988,6 @@ out: + return ret; + } + +-static int +-event_tx_burst(struct rte_mbuf **tx_pkts, uint16_t nb_pkts) +-{ +- struct rte_event ev; +- int i, nb_sent = 0; +- +- /* Convert packets to events */ +- memset(&ev, 0, sizeof(ev)); +- ev.sched_type = RTE_SCHED_TYPE_PARALLEL; +- for (i = 0; i < nb_pkts; i++) { +- ev.mbuf = tx_pkts[i]; +- nb_sent += rte_event_eth_tx_adapter_enqueue( +- eventdev_id, port_id, &ev, 1, 0); +- } +- +- return nb_sent; +-} +- +-static int +-event_rx_burst(struct rte_mbuf **rx_pkts, uint16_t nb_pkts_to_rx) +-{ +- int nb_ev, nb_rx = 0, j = 0; +- const int ms_per_pkt = 3; +- struct rte_event ev; +- +- do { +- nb_ev = rte_event_dequeue_burst(eventdev_id, port_id, +- &ev, 1, 0); +- +- if (nb_ev == 0) { +- rte_delay_ms(1); +- continue; +- } +- +- /* Get packet from event */ +- if (ev.event_type != RTE_EVENT_TYPE_ETHDEV) { +- printf("Unsupported event type: %i\n", +- ev.event_type); +- continue; +- } +- rx_pkts[nb_rx++] = ev.mbuf; +- } while (j++ < (nb_pkts_to_rx * ms_per_pkt) && nb_rx < nb_pkts_to_rx); +- +- return nb_rx; +-} +- + static int + test_ipsec_inline_sa_exp_event_callback(uint16_t port_id, + enum rte_eth_event_type type, void *param, void *ret_param) +@@ -1475,10 +1467,32 @@ out: + } + + static int +-ut_setup_inline_ipsec(void) ++ut_setup_inline_ipsec_reassembly(void) + { ++ struct rte_eth_ip_reassembly_params reass_capa = {0}; + int ret; + ++ rte_eth_ip_reassembly_capability_get(port_id, &reass_capa); ++ if (reass_capa.timeout_ms > APP_REASS_TIMEOUT) { ++ reass_capa.timeout_ms = APP_REASS_TIMEOUT; ++ rte_eth_ip_reassembly_conf_set(port_id, &reass_capa); ++ } ++ ++ /* Start event devices */ ++ if (event_mode_enabled) { ++ ret = rte_event_eth_rx_adapter_start(rx_adapter_id); ++ if (ret < 0) { ++ printf("Failed to start rx adapter %d\n", ret); ++ return ret; ++ } ++ ++ ret = rte_event_dev_start(eventdev_id); ++ if (ret < 0) { ++ printf("Failed to start event device %d\n", ret); ++ return ret; ++ } ++ } ++ + /* Start device */ + ret = rte_eth_dev_start(port_id); + if (ret < 0) { +@@ -1500,12 +1514,16 @@ ut_setup_inline_ipsec(void) + } + + static void +-ut_teardown_inline_ipsec(void) ++ut_teardown_inline_ipsec_reassembly(void) + { + struct rte_eth_ip_reassembly_params reass_conf = {0}; + uint16_t portid; + int ret; + ++ /* Stop event devices */ ++ if (event_mode_enabled) ++ rte_event_dev_stop(eventdev_id); ++ + /* port tear down */ + RTE_ETH_FOREACH_DEV(portid) { + ret = rte_eth_dev_stop(portid); +@@ -1517,6 +1535,58 @@ ut_teardown_inline_ipsec(void) + rte_eth_ip_reassembly_conf_set(portid, &reass_conf); + } + } ++static int ++ut_setup_inline_ipsec(void) ++{ ++ int ret; ++ ++ /* Start event devices */ ++ if (event_mode_enabled) { ++ ret = rte_event_dev_start(eventdev_id); ++ if (ret < 0) { ++ printf("Failed to start event device %d\n", ret); ++ return ret; ++ } ++ } ++ ++ /* Start device */ ++ ret = rte_eth_dev_start(port_id); ++ if (ret < 0) { ++ printf("rte_eth_dev_start: err=%d, port=%d\n", ++ ret, port_id); ++ return ret; ++ } ++ /* always enable promiscuous */ ++ ret = rte_eth_promiscuous_enable(port_id); ++ if (ret != 0) { ++ printf("rte_eth_promiscuous_enable: err=%s, port=%d\n", ++ rte_strerror(-ret), port_id); ++ return ret; ++ } ++ ++ check_all_ports_link_status(1, RTE_PORT_ALL); ++ ++ return 0; ++} ++ ++static void ++ut_teardown_inline_ipsec(void) ++{ ++ uint16_t portid; ++ int ret; ++ ++ /* Stop event devices */ ++ if (event_mode_enabled) ++ rte_event_dev_stop(eventdev_id); ++ ++ /* port tear down */ ++ RTE_ETH_FOREACH_DEV(portid) { ++ ret = rte_eth_dev_stop(portid); ++ if (ret != 0) ++ printf("rte_eth_dev_stop: err=%s, port=%u\n", ++ rte_strerror(-ret), portid); ++ } ++} + + static int + inline_ipsec_testsuite_setup(void) +@@ -3048,43 +3118,43 @@ static struct unit_test_suite inline_ipsec_testsuite = { + + TEST_CASE_NAMED_WITH_DATA( + "IPv4 Reassembly with 2 fragments", +- ut_setup_inline_ipsec, ut_teardown_inline_ipsec, ++ ut_setup_inline_ipsec_reassembly, ut_teardown_inline_ipsec_reassembly, + test_inline_ip_reassembly, &ipv4_2frag_vector), + TEST_CASE_NAMED_WITH_DATA( + "IPv6 Reassembly with 2 fragments", +- ut_setup_inline_ipsec, ut_teardown_inline_ipsec, ++ ut_setup_inline_ipsec_reassembly, ut_teardown_inline_ipsec_reassembly, + test_inline_ip_reassembly, &ipv6_2frag_vector), + TEST_CASE_NAMED_WITH_DATA( + "IPv4 Reassembly with 4 fragments", +- ut_setup_inline_ipsec, ut_teardown_inline_ipsec, ++ ut_setup_inline_ipsec_reassembly, ut_teardown_inline_ipsec_reassembly, + test_inline_ip_reassembly, &ipv4_4frag_vector), + TEST_CASE_NAMED_WITH_DATA( + "IPv6 Reassembly with 4 fragments", +- ut_setup_inline_ipsec, ut_teardown_inline_ipsec, ++ ut_setup_inline_ipsec_reassembly, ut_teardown_inline_ipsec_reassembly, + test_inline_ip_reassembly, &ipv6_4frag_vector), + TEST_CASE_NAMED_WITH_DATA( + "IPv4 Reassembly with 5 fragments", +- ut_setup_inline_ipsec, ut_teardown_inline_ipsec, ++ ut_setup_inline_ipsec_reassembly, ut_teardown_inline_ipsec_reassembly, + test_inline_ip_reassembly, &ipv4_5frag_vector), + TEST_CASE_NAMED_WITH_DATA( + "IPv6 Reassembly with 5 fragments", +- ut_setup_inline_ipsec, ut_teardown_inline_ipsec, ++ ut_setup_inline_ipsec_reassembly, ut_teardown_inline_ipsec_reassembly, + test_inline_ip_reassembly, &ipv6_5frag_vector), + TEST_CASE_NAMED_WITH_DATA( + "IPv4 Reassembly with incomplete fragments", +- ut_setup_inline_ipsec, ut_teardown_inline_ipsec, ++ ut_setup_inline_ipsec_reassembly, ut_teardown_inline_ipsec_reassembly, + test_inline_ip_reassembly, &ipv4_incomplete_vector), + TEST_CASE_NAMED_WITH_DATA( + "IPv4 Reassembly with overlapping fragments", +- ut_setup_inline_ipsec, ut_teardown_inline_ipsec, ++ ut_setup_inline_ipsec_reassembly, ut_teardown_inline_ipsec_reassembly, + test_inline_ip_reassembly, &ipv4_overlap_vector), + TEST_CASE_NAMED_WITH_DATA( + "IPv4 Reassembly with out of order fragments", +- ut_setup_inline_ipsec, ut_teardown_inline_ipsec, ++ ut_setup_inline_ipsec_reassembly, ut_teardown_inline_ipsec_reassembly, + test_inline_ip_reassembly, &ipv4_out_of_order_vector), + TEST_CASE_NAMED_WITH_DATA( + "IPv4 Reassembly with burst of 4 fragments", +- ut_setup_inline_ipsec, ut_teardown_inline_ipsec, ++ ut_setup_inline_ipsec_reassembly, ut_teardown_inline_ipsec_reassembly, + test_inline_ip_reassembly, &ipv4_4frag_burst_vector), + + TEST_CASES_END() /**< NULL terminate unit test array */ +diff --git a/dpdk/config/meson.build b/dpdk/config/meson.build +index 6d9ffd4f4b..7cd375e991 100644 +--- a/dpdk/config/meson.build ++++ b/dpdk/config/meson.build +@@ -139,7 +139,7 @@ endif + + toolchain = cc.get_id() + dpdk_conf.set_quoted('RTE_TOOLCHAIN', toolchain) +-dpdk_conf.set('RTE_TOOLCHAIN_' + toolchain.to_upper(), 1) ++dpdk_conf.set('RTE_TOOLCHAIN_' + toolchain.to_upper().underscorify(), 1) + + dpdk_conf.set('RTE_ARCH_64', cc.sizeof('void *') == 8) + dpdk_conf.set('RTE_ARCH_32', cc.sizeof('void *') == 4) +@@ -191,7 +191,7 @@ if find_libnuma + endif + + has_libfdt = 0 +-fdt_dep = cc.find_library('libfdt', required: false) ++fdt_dep = cc.find_library('fdt', required: false) + if fdt_dep.found() and cc.has_header('fdt.h') + dpdk_conf.set10('RTE_HAS_LIBFDT', true) + has_libfdt = 1 +@@ -199,11 +199,12 @@ if fdt_dep.found() and cc.has_header('fdt.h') + dpdk_extra_ldflags += '-lfdt' + endif + +-libexecinfo = cc.find_library('libexecinfo', required: false) +-if libexecinfo.found() and cc.has_header('execinfo.h') ++libexecinfo = cc.find_library('execinfo', required: false) ++if libexecinfo.found() + add_project_link_arguments('-lexecinfo', language: 'c') + dpdk_extra_ldflags += '-lexecinfo' + endif ++dpdk_conf.set('RTE_BACKTRACE', cc.has_header('execinfo.h') or is_windows) + + libarchive = dependency('libarchive', required: false, method: 'pkg-config') + if libarchive.found() +@@ -365,7 +366,7 @@ if max_numa_nodes == 'detect' + error('Discovery of max_numa_nodes not supported for cross-compilation.') + endif + # overwrite the default value with discovered values +- max_numa_nodes = run_command(get_numa_count_cmd).stdout().to_int() ++ max_numa_nodes = run_command(get_numa_count_cmd, check: true).stdout().to_int() + message('Found @0@ numa nodes'.format(max_numa_nodes)) + dpdk_conf.set('RTE_MAX_NUMA_NODES', max_numa_nodes) + elif max_numa_nodes != 'default' +diff --git a/dpdk/config/rte_config.h b/dpdk/config/rte_config.h +index 3c4876d434..7b8c85e948 100644 +--- a/dpdk/config/rte_config.h ++++ b/dpdk/config/rte_config.h +@@ -37,7 +37,6 @@ + #define RTE_MAX_MEMZONE 2560 + #define RTE_MAX_TAILQ 32 + #define RTE_LOG_DP_LEVEL RTE_LOG_INFO +-#define RTE_BACKTRACE 1 + #define RTE_MAX_VFIO_CONTAINERS 64 + + /* bsd module defines */ +diff --git a/dpdk/devtools/check-git-log.sh b/dpdk/devtools/check-git-log.sh +index 01d8aa0717..2ee7f2db64 100755 +--- a/dpdk/devtools/check-git-log.sh ++++ b/dpdk/devtools/check-git-log.sh +@@ -120,7 +120,7 @@ words="$selfdir/words-case.txt" + for word in $(cat $words); do + bad=$(echo "$headlines" | grep -iw $word | grep -vw $word) + if [ "$word" = "Tx" ]; then +- bad=$(echo $bad | grep -v 'OCTEON\ TX') ++ bad=$(echo $bad | grep -v 'OCTEON TX') + fi + for bad_line in $bad; do + bad_word=$(echo $bad_line | cut -d":" -f2 | grep -iwo $word) +@@ -259,6 +259,23 @@ done) + [ -z "$bad" ] || { printf "Missing 'Signed-off-by:' tag: \n$bad\n"\ + && failure=true;} + ++# check names ++names=$(git log --format='From: %an <%ae>%n%b' --reverse $range | ++ sed -rn 's,.*: (.*<.*@.*>),\1,p' | ++ sort -u) ++bad=$(for contributor in $names ; do ++ contributor=$(echo $contributor | sed 's,(,\\(,') ++ ! grep -qE "^$contributor($| <)" $selfdir/../.mailmap || continue ++ name=${contributor%% <*} ++ if grep -q "^$name <" $selfdir/../.mailmap ; then ++ printf "\t$contributor is not the primary email address\n" ++ else ++ printf "\t$contributor is unknown in .mailmap\n" ++ fi ++done) ++[ -z "$bad" ] || { printf "Contributor name/email mismatch with .mailmap: \n$bad\n"\ ++ && failure=true;} ++ + total=$(echo "$commits" | wc -l) + if $failure ; then + printf "\nInvalid patch(es) found - checked $total patch" +diff --git a/dpdk/devtools/checkpatches.sh b/dpdk/devtools/checkpatches.sh +index be1cb03ea7..a07bbc83cb 100755 +--- a/dpdk/devtools/checkpatches.sh ++++ b/dpdk/devtools/checkpatches.sh +@@ -248,28 +248,6 @@ check_release_notes() { # + grep -v $current_rel_notes + } + +-check_names() { # +- res=0 +- +- old_IFS=$IFS +- IFS=' +-' +- for contributor in $(sed -rn '1,/^--- / {s/.*: (.*<.*@.*>)/\1/p}' $1); do +- ! grep -qE "^$contributor($| <)" .mailmap || continue +- name=${contributor%% <*} +- if grep -q "^$name <" .mailmap; then +- reason="$name mail differs from primary mail" +- else +- reason="$contributor is unknown" +- fi +- echo "$reason, please fix the commit message or update .mailmap." +- res=1 +- done +- IFS=$old_IFS +- +- return $res +-} +- + number=0 + range='origin/main..' + quiet=false +@@ -378,14 +356,6 @@ check () { # + ret=1 + fi + +- ! $verbose || printf '\nChecking names in commit log:\n' +- report=$(check_names "$tmpinput") +- if [ $? -ne 0 ] ; then +- $headline_printed || print_headline "$subject" +- printf '%s\n' "$report" +- ret=1 +- fi +- + if [ "$tmpinput" != "$1" ]; then + rm -f "$tmpinput" + trap - INT +diff --git a/dpdk/doc/api/doxy-api-index.md b/dpdk/doc/api/doxy-api-index.md +index de488c7abf..bbca14be3d 100644 +--- a/dpdk/doc/api/doxy-api-index.md ++++ b/dpdk/doc/api/doxy-api-index.md +@@ -22,6 +22,7 @@ The public API headers are grouped by topics: + [compress](@ref rte_comp.h), + [regexdev](@ref rte_regexdev.h), + [dmadev](@ref rte_dmadev.h), ++ [gpudev](@ref rte_gpudev.h), + [eventdev](@ref rte_eventdev.h), + [event_eth_rx_adapter](@ref rte_event_eth_rx_adapter.h), + [event_eth_tx_adapter](@ref rte_event_eth_tx_adapter.h), +diff --git a/dpdk/doc/api/doxy-api.conf.in b/dpdk/doc/api/doxy-api.conf.in +index f0886c3bd1..dd8ebab447 100644 +--- a/dpdk/doc/api/doxy-api.conf.in ++++ b/dpdk/doc/api/doxy-api.conf.in +@@ -99,7 +99,6 @@ GENERATE_DEPRECATEDLIST = YES + VERBATIM_HEADERS = NO + ALPHABETICAL_INDEX = NO + +-HTML_TIMESTAMP = NO + HTML_DYNAMIC_SECTIONS = YES + HTML_EXTRA_STYLESHEET = @TOPDIR@/doc/api/custom.css + SEARCHENGINE = YES +diff --git a/dpdk/doc/guides/conf.py b/dpdk/doc/guides/conf.py +index a55ce38800..0f7ff5282d 100644 +--- a/dpdk/doc/guides/conf.py ++++ b/dpdk/doc/guides/conf.py +@@ -203,6 +203,7 @@ def generate_overview_table(output_filename, table_id, section, table_name, titl + num_cols = len(header_names) + + print_table_css(outfile, table_id) ++ print('.. _' + table_name + ':', file=outfile) + print('.. table:: ' + table_name + '\n', file=outfile) + print_table_header(outfile, num_cols, header_names, title) + print_table_body(outfile, num_cols, ini_files, ini_data, default_features) +diff --git a/dpdk/doc/guides/gpus/cuda.rst b/dpdk/doc/guides/gpus/cuda.rst +index 114e3bc8cb..6520c17c3e 100644 +--- a/dpdk/doc/guides/gpus/cuda.rst ++++ b/dpdk/doc/guides/gpus/cuda.rst +@@ -12,20 +12,19 @@ Information and documentation about these devices can be found on the + Build dependencies + ------------------ + +-The CUDA GPU driver library has an header-only dependency on ``cuda.h`` and ``cudaTypedefs.h``. +-To get these headers there are two options: ++The CUDA GPU driver library has a header-only dependency on ``cuda.h`` and ``cudaTypedefs.h``. ++To get these headers, there are two options: + + - Install `CUDA Toolkit `_ + (either regular or stubs installation). + - Download these two headers from this `CUDA headers + `_ repository. + +-You need to indicate to meson where CUDA headers files are through the CFLAGS variable. +-Three ways: ++You can point to CUDA header files either with the ``CFLAGS`` environment variable, ++or with the ``c_args`` Meson option. Examples: + +-- Set ``export CFLAGS=-I/usr/local/cuda/include`` before building +-- Add CFLAGS in the meson command line ``CFLAGS=-I/usr/local/cuda/include meson setup build`` +-- Add the ``-Dc_args`` in meson command line ``meson setup build -Dc_args=-I/usr/local/cuda/include`` ++- ``CFLAGS=-I/usr/local/cuda/include meson setup build`` ++- ``meson setup build -Dc_args=-I/usr/local/cuda/include`` + + If headers are not found, the CUDA GPU driver library is not built. + +@@ -46,15 +45,15 @@ A quick recipe to download, build and run GDRCopy library and driver: + $ # Launch gdrdrv kernel module on the system + $ sudo ./insmod.sh + +-You need to indicate to meson where GDRCopy headers files are as in case of CUDA headers. ++You need to indicate to Meson where GDRCopy header files are as in case of CUDA headers. + An example would be: + + .. code-block:: console + + $ meson setup build -Dc_args="-I/usr/local/cuda/include -I/path/to/gdrcopy/include" + +-If headers are not found, the CUDA GPU driver library is built without the CPU map capability +-and will return error if the application invokes the gpudev ``rte_gpu_mem_cpu_map`` function. ++If headers are not found, the CUDA GPU driver library is built without the CPU map capability, ++and will return an error if the application invokes the gpudev ``rte_gpu_mem_cpu_map`` function. + + + CUDA Shared Library +@@ -143,7 +142,7 @@ if the address is not in the table the CUDA driver library will return an error. + Features + -------- + +-- Register new child devices aka new CUDA Driver contexts. ++- Register new child devices, aka CUDA driver contexts. + - Allocate memory on the GPU. + - Register CPU memory to make it visible from GPU. + +@@ -189,9 +188,10 @@ External references + A good example of how to use the GPU CUDA driver library through the gpudev library + is the l2fwd-nv application that can be found `here `_. + +-The application is based on vanilla DPDK example l2fwd +-and is enhanced with GPU memory managed through gpudev library +-and CUDA to launch the swap of packets MAC addresses workload on the GPU. ++The application is based on the DPDK example l2fwd, ++with GPU memory managed through gpudev library. ++It includes a CUDA workload swapping MAC addresses ++of packets received in the GPU. + + l2fwd-nv is not intended to be used for performance + (testpmd is the good candidate for this). +diff --git a/dpdk/doc/guides/linux_gsg/enable_func.rst b/dpdk/doc/guides/linux_gsg/enable_func.rst +index 829084d80e..2344d97403 100644 +--- a/dpdk/doc/guides/linux_gsg/enable_func.rst ++++ b/dpdk/doc/guides/linux_gsg/enable_func.rst +@@ -55,12 +55,12 @@ Refer to the `documentation ++ setcap cap_dac_read_search,cap_ipc_lock,cap_sys_admin+ep + + If physical addresses are not accessible, + the following message will appear during EAL initialization:: +diff --git a/dpdk/doc/guides/linux_gsg/sys_reqs.rst b/dpdk/doc/guides/linux_gsg/sys_reqs.rst +index a7e8261e22..dfeaf4e1c5 100644 +--- a/dpdk/doc/guides/linux_gsg/sys_reqs.rst ++++ b/dpdk/doc/guides/linux_gsg/sys_reqs.rst +@@ -32,7 +32,7 @@ Compilation of the DPDK + + * For RHEL/Fedora systems these can be installed using ``dnf groupinstall "Development Tools"`` + * For Ubuntu/Debian systems these can be installed using ``apt install build-essential`` +- * For Alpine Linux, ``apk add alpine-sdk bsd-compat-headers libexecinfo-dev`` ++ * For Alpine Linux, ``apk add alpine-sdk bsd-compat-headers`` + + .. note:: + +diff --git a/dpdk/doc/guides/nics/bnxt.rst b/dpdk/doc/guides/nics/bnxt.rst +index 293eab8787..871d14142c 100644 +--- a/dpdk/doc/guides/nics/bnxt.rst ++++ b/dpdk/doc/guides/nics/bnxt.rst +@@ -912,6 +912,7 @@ Shown below are Ethernet Network Adapters and their supported firmware versions + * ``BCM57500 NetXtreme-E\ |reg| Family`` ... Firmware 219.0.0 or later + + Shown below are DPDK LTS releases and their supported firmware versions: ++ + * ``DPDK Release 19.11`` ... Firmware 219.0.103 or later + * ``DPDK Release 20.11`` ... Firmware 219.0.103 or later + * ``DPDK Release 21.11`` ... Firmware 221.0.101 or later +@@ -1018,8 +1019,7 @@ Listed below are the rte_flow functions supported: + rte_flow Items + ~~~~~~~~~~~~~~ + +-Refer to "Table 1.2 rte_flow items availability in networking drivers" in +-`Overview of Networking Drivers `. ++Refer to :ref:`rte_flow items availability in networking drivers`. + + Listed below are the rte_flow items supported: + +@@ -1044,8 +1044,7 @@ Listed below are the rte_flow items supported: + rte_flow Actions + ~~~~~~~~~~~~~~~~ + +-Refer to "Table 1.3 rte_flow actions availability in networking drivers" in +-`Overview of Networking Drivers `. ++Refer to :ref:`rte_flow actions availability in networking drivers`. + + Listed below are the rte_flow actions supported: + +diff --git a/dpdk/doc/guides/nics/features/iavf.ini b/dpdk/doc/guides/nics/features/iavf.ini +index 9db2865b71..5cdf0ddee6 100644 +--- a/dpdk/doc/guides/nics/features/iavf.ini ++++ b/dpdk/doc/guides/nics/features/iavf.ini +@@ -21,7 +21,7 @@ RSS key update = Y + RSS reta update = Y + VLAN filter = Y + CRC offload = Y +-VLAN offload = Y ++VLAN offload = P + L3 checksum offload = P + L4 checksum offload = P + Timestamp offload = P +diff --git a/dpdk/doc/guides/nics/hns3.rst b/dpdk/doc/guides/nics/hns3.rst +index 791c9cc2ed..6fbd4320ef 100644 +--- a/dpdk/doc/guides/nics/hns3.rst ++++ b/dpdk/doc/guides/nics/hns3.rst +@@ -81,7 +81,8 @@ Runtime Config Options + ``common``. + + For example:: +- -a 0000:7d:00.0,rx_func_hint=simple ++ ++ -a 0000:7d:00.0,rx_func_hint=simple + + - ``tx_func_hint`` (default ``none``) + +@@ -101,7 +102,8 @@ Runtime Config Options + ``common``. + + For example:: +- -a 0000:7d:00.0,tx_func_hint=common ++ ++ -a 0000:7d:00.0,tx_func_hint=common + + - ``dev_caps_mask`` (default ``0``) + +@@ -113,22 +115,25 @@ Runtime Config Options + Its main purpose is to debug and avoid problems. + + For example:: +- -a 0000:7d:00.0,dev_caps_mask=0xF ++ ++ -a 0000:7d:00.0,dev_caps_mask=0xF + + - ``mbx_time_limit_ms`` (default ``500``) +- Used to define the mailbox time limit by user. +- Current, the max waiting time for MBX response is 500ms, but in +- some scenarios, it is not enough. Since it depends on the response +- of the kernel mode driver, and its response time is related to the +- scheduling of the system. In this special scenario, most of the +- cores are isolated, and only a few cores are used for system +- scheduling. When a large number of services are started, the +- scheduling of the system will be very busy, and the reply of the +- mbx message will time out, which will cause our PMD initialization +- to fail. So provide access to set mailbox time limit for user. +- +- For example:: +- -a 0000:7d:00.0,mbx_time_limit_ms=600 ++ ++ Used to define the mailbox time limit by user. ++ Current, the max waiting time for MBX response is 500ms, but in ++ some scenarios, it is not enough. Since it depends on the response ++ of the kernel mode driver, and its response time is related to the ++ scheduling of the system. In this special scenario, most of the ++ cores are isolated, and only a few cores are used for system ++ scheduling. When a large number of services are started, the ++ scheduling of the system will be very busy, and the reply of the ++ mbx message will time out, which will cause our PMD initialization ++ to fail. So provide access to set mailbox time limit for user. ++ ++ For example:: ++ ++ -a 0000:7d:00.0,mbx_time_limit_ms=600 + + Link status event Pre-conditions + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +@@ -137,7 +142,8 @@ Firmware 1.8.0.0 and later versions support reporting link changes to the PF. + Therefore, to use the LSC for the PF driver, ensure that the firmware version + also supports reporting link changes. + If the VF driver needs to support LSC, special patch must be added: +-``_. ++``_. ++ + Note: The patch has been uploaded to 5.13 of the Linux kernel mainline. + + +diff --git a/dpdk/doc/guides/nics/ice.rst b/dpdk/doc/guides/nics/ice.rst +index ce075e067c..b3dc72d421 100644 +--- a/dpdk/doc/guides/nics/ice.rst ++++ b/dpdk/doc/guides/nics/ice.rst +@@ -331,18 +331,18 @@ Additional Options + + ip link set dev enp24s0f0 vf 0 trust on + +-#. Bind the VF0, and run testpmd with 'cap=dcf' devarg:: ++#. Bind the VF0, and run testpmd with 'cap=dcf' with port representor for VF 1 and 2:: + +- dpdk-testpmd -l 22-25 -n 4 -a 18:01.0,cap=dcf -- -i ++ dpdk-testpmd -l 22-25 -n 4 -a 18:01.0,cap=dcf,representor=vf[1-2] -- -i + + #. Monitor the VF2 interface network traffic:: + + tcpdump -e -nn -i enp24s1f2 + +-#. Create one flow to redirect the traffic to VF2 by DCF:: ++#. Create one flow to redirect the traffic to VF2 by DCF (assume the representor port ID is 5):: + + flow create 0 priority 0 ingress pattern eth / ipv4 src is 192.168.0.2 \ +- dst is 192.168.0.3 / end actions vf id 2 / end ++ dst is 192.168.0.3 / end actions represented_port ethdev_port_id 5 / end + + #. Send the packet, and it should be displayed on tcpdump:: + +diff --git a/dpdk/doc/guides/nics/mana.rst b/dpdk/doc/guides/nics/mana.rst +index 005c0b2ca7..341146c4e7 100644 +--- a/dpdk/doc/guides/nics/mana.rst ++++ b/dpdk/doc/guides/nics/mana.rst +@@ -29,6 +29,7 @@ and must be installed separately: + It allows slow and privileged operations + (context initialization, hardware resources allocations) + to be managed by the kernel and fast operations to never leave user space. ++ The minimum required rdma-core version is v44. + + In most cases, rdma-core is shipped as a package with an OS distribution. + User can also install the upstream version of the rdma-core from +@@ -39,15 +40,14 @@ and must be installed separately: + Low-level user space driver library + for Microsoft Azure Network Adapter devices, + it is automatically loaded by libibverbs. +- +- The support of MANA is not merged in rdma-core 42. ++ The minimum required version of rdma-core with libmana is v44. + + - **Kernel modules** + + They provide the kernel-side verbs API and low level device drivers + that manage actual hardware initialization + and resources sharing with user space processes. +- The minimum required Linux kernel version is 6.1. ++ The minimum required Linux kernel version is 6.2. + + Unlike most other PMDs, these modules must remain loaded + and bound to their devices: +@@ -56,8 +56,6 @@ and must be installed separately: + - mana_ib: InifiniBand device driver. + - ib_uverbs: user space driver for verbs (entry point for libibverbs). + +- The support of MANA is planned in Linux 6.2. +- + Driver compilation and testing + ------------------------------ + +diff --git a/dpdk/doc/guides/nics/mlx5.rst b/dpdk/doc/guides/nics/mlx5.rst +index 51f51259e3..937fa5c6e0 100644 +--- a/dpdk/doc/guides/nics/mlx5.rst ++++ b/dpdk/doc/guides/nics/mlx5.rst +@@ -1121,6 +1121,9 @@ for an additional list of options shared with other mlx5 drivers. + - 0. If representor matching is disabled, then there will be no implicit + item added. As a result, ingress flow rules will match traffic + coming to any port, not only the port on which flow rule is created. ++ Because of that, default flow rules for ingress traffic cannot be created ++ and port starts in isolated mode by default. Port cannot be switched back ++ to non-isolated mode. + + - 1. If representor matching is enabled (default setting), + then each ingress pattern template has an implicit REPRESENTED_PORT +@@ -1547,6 +1550,14 @@ shortened below as "OFED". + | | | ConnectX-5 | | ConnectX-5 | + +-----------------------+-----------------+-----------------+ + ++.. table:: Minimal SW/HW versions for flow template API ++ ++ +-----------------+--------------------+--------------------+ ++ | DPDK | NIC | Firmware | ++ +=================+====================+====================+ ++ | 22.11 | ConnectX-6 Dx | xx.35.1012 | ++ +-----------------+--------------------+--------------------+ ++ + Notes for metadata + ------------------ + +diff --git a/dpdk/doc/guides/nics/tap.rst b/dpdk/doc/guides/nics/tap.rst +index 2f7417bddd..07df0d35a2 100644 +--- a/dpdk/doc/guides/nics/tap.rst ++++ b/dpdk/doc/guides/nics/tap.rst +@@ -34,14 +34,14 @@ Using the option ``mac=fixed`` you can create a fixed known MAC address:: + + The MAC address will have a fixed value with the last octet incrementing by one + for each interface string containing ``mac=fixed``. The MAC address is formatted +-as 00:'d':'t':'a':'p':[00-FF]. Convert the characters to hex and you get the +-actual MAC address: ``00:64:74:61:70:[00-FF]``. ++as 02:'d':'t':'a':'p':[00-FF]. Convert the characters to hex and you get the ++actual MAC address: ``02:64:74:61:70:[00-FF]``. + +- --vdev=net_tap0,mac="00:64:74:61:70:11" ++ --vdev=net_tap0,mac="02:64:74:61:70:11" + + The MAC address will have a user value passed as string. The MAC address is in + format with delimiter ``:``. The string is byte converted to hex and you get +-the actual MAC address: ``00:64:74:61:70:11``. ++the actual MAC address: ``02:64:74:61:70:11``. + + It is possible to specify a remote netdevice to capture packets from by adding + ``remote=foo1``, for example:: +diff --git a/dpdk/doc/guides/platform/cnxk.rst b/dpdk/doc/guides/platform/cnxk.rst +index aadd60b5d4..0eafde71d6 100644 +--- a/dpdk/doc/guides/platform/cnxk.rst ++++ b/dpdk/doc/guides/platform/cnxk.rst +@@ -253,7 +253,7 @@ context or stats using debugfs. + + Enable ``debugfs`` by: + +-1. Compile kernel with debugfs enabled, i.e ``CONFIG_DEBUGFS=y``. ++1. Compile kernel with debugfs enabled, i.e ``CONFIG_DEBUG_FS=y``. + 2. Boot OCTEON CN9K/CN10K with debugfs supported kernel. + 3. Verify ``debugfs`` mounted by default "mount | grep -i debugfs" or mount it manually by using. + +diff --git a/dpdk/doc/guides/prog_guide/cryptodev_lib.rst b/dpdk/doc/guides/prog_guide/cryptodev_lib.rst +index 01aad842a9..2b513bbf82 100644 +--- a/dpdk/doc/guides/prog_guide/cryptodev_lib.rst ++++ b/dpdk/doc/guides/prog_guide/cryptodev_lib.rst +@@ -98,14 +98,10 @@ The rte_cryptodev_configure API is used to configure a Crypto device. + The ``rte_cryptodev_config`` structure is used to pass the configuration + parameters for socket selection and number of queue pairs. + +-.. code-block:: c +- +- struct rte_cryptodev_config { +- int socket_id; +- /**< Socket to allocate resources on */ +- uint16_t nb_queue_pairs; +- /**< Number of queue pairs to configure on device */ +- }; ++.. literalinclude:: ../../../lib/cryptodev/rte_cryptodev.h ++ :language: c ++ :start-after: Structure rte_cryptodev_config 8< ++ :end-before: >8 End of structure rte_cryptodev_config. + + + Configuration of Queue Pairs +@@ -121,11 +117,11 @@ Each queue pairs resources may be allocated on a specified socket. + const struct rte_cryptodev_qp_conf *qp_conf, + int socket_id) + +- struct rte_cryptodev_qp_conf { +- uint32_t nb_descriptors; /**< Number of descriptors per queue pair */ +- struct rte_mempool *mp_session; +- /**< The mempool for creating session in sessionless mode */ +- }; ++ ++.. literalinclude:: ../../../lib/cryptodev/rte_cryptodev.h ++ :language: c ++ :start-after: Structure rte_cryptodev_qp_conf 8< ++ :end-before: >8 End of structure rte_cryptodev_qp_conf. + + + The field ``mp_session`` is used for creating temporary session to process +@@ -271,23 +267,10 @@ This allows the user to query a specific Crypto PMD and get all the device + features and capabilities. The ``rte_cryptodev_info`` structure contains all the + relevant information for the device. + +-.. code-block:: c +- +- struct rte_cryptodev_info { +- const char *driver_name; +- uint8_t driver_id; +- struct rte_device *device; +- +- uint64_t feature_flags; +- +- const struct rte_cryptodev_capabilities *capabilities; +- +- unsigned max_nb_queue_pairs; +- +- struct { +- unsigned max_nb_sessions; +- } sym; +- }; ++.. literalinclude:: ../../../lib/cryptodev/rte_cryptodev.h ++ :language: c ++ :start-after: Structure rte_cryptodev_info 8< ++ :end-before: >8 End of structure rte_cryptodev_info. + + + Operation Processing +@@ -499,37 +482,29 @@ a flow. Crypto sessions cache this immutable data in a optimal way for the + underlying PMD and this allows further acceleration of the offload of + Crypto workloads. + +-.. figure:: img/cryptodev_sym_sess.* +- + The Crypto device framework provides APIs to create session mempool and allocate + and initialize sessions for crypto devices, where sessions are mempool objects. + The application has to use ``rte_cryptodev_sym_session_pool_create()`` to +-create the session header mempool that creates a mempool with proper element +-size automatically and stores necessary information for safely accessing the +-session in the mempool's private data field. +- +-To create a mempool for storing session private data, the application has two +-options. The first is to create another mempool with elt size equal to or +-bigger than the maximum session private data size of all crypto devices that +-will share the same session header. The creation of the mempool shall use the +-traditional ``rte_mempool_create()`` with the correct ``elt_size``. The other +-option is to change the ``elt_size`` parameter in +-``rte_cryptodev_sym_session_pool_create()`` to the correct value. The first +-option is more complex to implement but may result in better memory usage as +-a session header normally takes smaller memory footprint as the session private +-data. ++create the session mempool header and the private data with the size specified ++by the user through the ``elt_size`` parameter in the function. ++The session private data is for the driver to initialize and access ++during crypto operations, hence the ``elt_size`` should be big enough ++for all drivers that will share this mempool. ++To obtain the proper session private data size of a crypto device, ++the user can call ``rte_cryptodev_sym_get_private_session_size()`` function. ++In case of heterogeneous crypto devices which will share the same session mempool, ++the maximum session private data size of them should be passed. + + Once the session mempools have been created, ``rte_cryptodev_sym_session_create()`` +-is used to allocate an uninitialized session from the given mempool. +-The session then must be initialized using ``rte_cryptodev_sym_session_init()`` +-for each of the required crypto devices. A symmetric transform chain +-is used to specify the operation and its parameters. See the section below for +-details on transforms. ++is used to allocate and initialize the session from the given mempool. ++The created session can ONLY be used by the crypto devices sharing the same driver ID ++as the device ID passed into the function as the parameter. ++In addition, a symmetric transform chain is used to specify the operation and its parameters. ++See the section below for details on transforms. + +-When a session is no longer used, user must call ``rte_cryptodev_sym_session_clear()`` +-for each of the crypto devices that are using the session, to free all driver +-private session data. Once this is done, session should be freed using +-``rte_cryptodev_sym_session_free`` which returns them to their mempool. ++When a session is no longer used, user must call ``rte_cryptodev_sym_session_free()`` ++to uninitialize the session data and return the session ++back to the mempool it belongs. + + + Transforms and Transform Chaining +@@ -548,22 +523,10 @@ Currently there are three transforms types cipher, authentication and AEAD. + Also it is important to note that the order in which the + transforms are passed indicates the order of the chaining. + +-.. code-block:: c +- +- struct rte_crypto_sym_xform { +- struct rte_crypto_sym_xform *next; +- /**< next xform in chain */ +- enum rte_crypto_sym_xform_type type; +- /**< xform type */ +- union { +- struct rte_crypto_auth_xform auth; +- /**< Authentication / hash xform */ +- struct rte_crypto_cipher_xform cipher; +- /**< Cipher xform */ +- struct rte_crypto_aead_xform aead; +- /**< AEAD xform */ +- }; +- }; ++.. literalinclude:: ../../../lib/cryptodev/rte_crypto_sym.h ++ :language: c ++ :start-after: Structure rte_crypto_sym_xform 8< ++ :end-before: >8 End of structure rte_crypto_sym_xform. + + The API does not place a limit on the number of transforms that can be chained + together but this will be limited by the underlying Crypto device poll mode +@@ -586,61 +549,11 @@ authentication/ cipher/ AEAD parameters required depending on the type of operat + specified in the session or the transform + chain. + +-.. code-block:: c ++.. literalinclude:: ../../../lib/cryptodev/rte_crypto_sym.h ++ :language: c ++ :start-after: Structure rte_crypto_sym_op 8< ++ :end-before: >8 End of structure rte_crypto_sym_op. + +- struct rte_crypto_sym_op { +- struct rte_mbuf *m_src; +- struct rte_mbuf *m_dst; +- +- union { +- void *session; +- /**< Handle for the initialised session context */ +- struct rte_crypto_sym_xform *xform; +- /**< Session-less API Crypto operation parameters */ +- }; +- +- union { +- struct { +- struct { +- uint32_t offset; +- uint32_t length; +- } data; /**< Data offsets and length for AEAD */ +- +- struct { +- uint8_t *data; +- rte_iova_t phys_addr; +- } digest; /**< Digest parameters */ +- +- struct { +- uint8_t *data; +- rte_iova_t phys_addr; +- } aad; +- /**< Additional authentication parameters */ +- } aead; +- +- struct { +- struct { +- struct { +- uint32_t offset; +- uint32_t length; +- } data; /**< Data offsets and length for ciphering */ +- } cipher; +- +- struct { +- struct { +- uint32_t offset; +- uint32_t length; +- } data; +- /**< Data offsets and length for authentication */ +- +- struct { +- uint8_t *data; +- rte_iova_t phys_addr; +- } digest; /**< Digest parameters */ +- } auth; +- }; +- }; +- }; + + Synchronous mode + ---------------- +diff --git a/dpdk/doc/guides/prog_guide/event_timer_adapter.rst b/dpdk/doc/guides/prog_guide/event_timer_adapter.rst +index d7307a29bb..7733424aac 100644 +--- a/dpdk/doc/guides/prog_guide/event_timer_adapter.rst ++++ b/dpdk/doc/guides/prog_guide/event_timer_adapter.rst +@@ -35,7 +35,7 @@ device upon timer expiration. + + The Event Timer Adapter API represents each event timer with a generic struct, + which contains an event and user metadata. The ``rte_event_timer`` struct is +-defined in ``lib/event/librte_event_timer_adapter.h``. ++defined in ``rte_event_timer_adapter.h``. + + .. _timer_expiry_event: + +@@ -229,9 +229,7 @@ Note that it is necessary to initialize the event timer state to + RTE_EVENT_TIMER_NOT_ARMED. Also note that we have saved a pointer to the + ``conn`` object in the timer's event payload. This will allow us to locate + the connection object again once we dequeue the timer expiry event from the +-event device later. As a convenience, the application may specify no value for +-ev.event_ptr, and the adapter will by default set it to point at the event +-timer itself. ++event device later. + + Now we can arm the event timer with ``rte_event_timer_arm_burst()``: + +diff --git a/dpdk/doc/guides/prog_guide/graph_lib.rst b/dpdk/doc/guides/prog_guide/graph_lib.rst +index 1cfdc86433..4ab0623f44 100644 +--- a/dpdk/doc/guides/prog_guide/graph_lib.rst ++++ b/dpdk/doc/guides/prog_guide/graph_lib.rst +@@ -173,7 +173,7 @@ Create the graph object + ~~~~~~~~~~~~~~~~~~~~~~~ + Now that the nodes are linked, Its time to create a graph by including + the required nodes. The application can provide a set of node patterns to +-form a graph object. The ``famish()`` API used underneath for the pattern ++form a graph object. The ``fnmatch()`` API used underneath for the pattern + matching to include the required nodes. After the graph create any changes to + nodes or graph is not allowed. + +diff --git a/dpdk/doc/guides/prog_guide/img/cryptodev_sym_sess.svg b/dpdk/doc/guides/prog_guide/img/cryptodev_sym_sess.svg +deleted file mode 100644 +index 9b522458c8..0000000000 +--- a/dpdk/doc/guides/prog_guide/img/cryptodev_sym_sess.svg ++++ /dev/null +@@ -1,417 +0,0 @@ +- +- +- +-image/svg+xmlRounded Rectangle.12Crypto Symmetric SessionRounded Rectangle.13Private Session Data +- +- +-Rounded Rectangle.12Crypto Symmetric SessionCrypto Driver Private Session +-Crypto Symmetric Session +-uint16_t nb_drivers; +-struct { +-void *data; +-} session_data[]; +-uint16_t user_data_sz; +-user_data +-uint16_t refcnt; +-uint64_t opaque_data; +-Rounded Rectangle.13Private Session DataPrivate Session Data +-Rounded Rectangle.12Crypto Symmetric SessionCrypto Driver Private Session +-Rounded Rectangle.13Private Session DataPrivate Session Data +- +-... +- +\ No newline at end of file +diff --git a/dpdk/doc/guides/prog_guide/multi_proc_support.rst b/dpdk/doc/guides/prog_guide/multi_proc_support.rst +index 815e8bdc43..df234548a7 100644 +--- a/dpdk/doc/guides/prog_guide/multi_proc_support.rst ++++ b/dpdk/doc/guides/prog_guide/multi_proc_support.rst +@@ -107,15 +107,19 @@ Running Multiple Independent DPDK Applications + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + In addition to the above scenarios involving multiple DPDK processes working together, +-it is possible to run multiple DPDK processes side-by-side, ++it is possible to run multiple DPDK processes concurrently, + where those processes are all working independently. + Support for this usage scenario is provided using the ``--file-prefix`` parameter to the EAL. + +-By default, the EAL creates hugepage files on each hugetlbfs filesystem using the rtemap_X filename, ++The EAL puts shared runtime files in a directory based on standard conventions. ++If ``$RUNTIME_DIRECTORY`` is defined in the environment, ++it is used (as ``$RUNTIME_DIRECTORY/dpdk``). ++Otherwise, if DPDK is run as root user, it uses ``/var/run/dpdk`` ++or if run as non-root user then the ``/tmp/dpdk`` (or ``$XDG_RUNTIME_DIRECTORY/dpdk``) is used. ++Hugepage files on each hugetlbfs filesystem use the ``rtemap_X`` filename, + where X is in the range 0 to the maximum number of hugepages -1. +-Similarly, it creates shared configuration files, memory mapped in each process, using the /var/run/.rte_config filename, +-when run as root (or $HOME/.rte_config when run as a non-root user; +-if filesystem and device permissions are set up to allow this). ++Similarly, it creates shared configuration files, memory mapped in each process, ++using the ``.rte_config`` filename. + The rte part of the filenames of each of the above is configurable using the file-prefix parameter. + + In addition to specifying the file-prefix parameter, +diff --git a/dpdk/doc/guides/prog_guide/rte_flow.rst b/dpdk/doc/guides/prog_guide/rte_flow.rst +index 3e6242803d..d0b7833a2f 100644 +--- a/dpdk/doc/guides/prog_guide/rte_flow.rst ++++ b/dpdk/doc/guides/prog_guide/rte_flow.rst +@@ -148,14 +148,15 @@ Attribute: Group + Flow rules can be grouped by assigning them a common group number. Groups + allow a logical hierarchy of flow rule groups (tables) to be defined. These + groups can be supported virtually in the PMD or in the physical device. +-Group 0 is the default group and this is the only group which flows are +-guarantee to matched against, all subsequent groups can only be reached by +-way of the JUMP action from a matched flow rule. ++Group 0 is the default group and is the only group that ++flows are guaranteed to be matched against. ++All subsequent groups can only be reached by using a JUMP action ++from a matched flow rule. + + Although optional, applications are encouraged to group similar rules as + much as possible to fully take advantage of hardware capabilities + (e.g. optimized matching) and work around limitations (e.g. a single pattern +-type possibly allowed in a given group), while being aware that the groups ++type possibly allowed in a given group), while being aware that the groups' + hierarchies must be programmed explicitly. + + Note that support for more than a single group is not guaranteed. +@@ -170,7 +171,7 @@ Priority levels are arbitrary and up to the application, they do + not need to be contiguous nor start from 0, however the maximum number + varies between devices and may be affected by existing flow rules. + +-A flow which matches multiple rules in the same group will always matched by ++A flow which matches multiple rules in the same group will always be matched by + the rule with the highest priority in that group. + + If a packet is matched by several rules of a given group for a given +@@ -1513,22 +1514,15 @@ rte_flow_flex_item_create() routine. + value and mask. + + Item: ``L2TPV2`` +-^^^^^^^^^^^^^^^^^^^ ++^^^^^^^^^^^^^^^^ + + Matches a L2TPv2 header. + +-- ``flags_version``: flags(12b), version(4b). +-- ``length``: total length of the message. +-- ``tunnel_id``: identifier for the control connection. +-- ``session_id``: identifier for a session within a tunnel. +-- ``ns``: sequence number for this date or control message. +-- ``nr``: sequence number expected in the next control message to be received. +-- ``offset_size``: offset of payload data. +-- ``offset_padding``: offset padding, variable length. ++- ``hdr``: header definition (``rte_l2tpv2.h``). + - Default ``mask`` matches flags_version only. + + Item: ``PPP`` +-^^^^^^^^^^^^^^^^^^^ ++^^^^^^^^^^^^^ + + Matches a PPP header. + +@@ -1748,12 +1742,12 @@ flow group/tables on the device, this action redirects the matched flow to + the specified group on that device. + + If a matched flow is redirected to a table which doesn't contain a matching +-rule for that flow then the behavior is undefined and the resulting behavior +-is up to the specific device. Best practice when using groups would be define ++rule for that flow, then the behavior is undefined and the resulting behavior ++is up to the specific device. Best practice when using groups would be to define + a default flow rule for each group which a defines the default actions in that + group so a consistent behavior is defined. + +-Defining an action for matched flow in a group to jump to a group which is ++Defining an action for a matched flow in a group to jump to a group which is + higher in the group hierarchy may not be supported by physical devices, + depending on how groups are mapped to the physical devices. In the + definitions of jump actions, applications should be aware that it may be +@@ -1925,8 +1919,8 @@ Also, regarding packet encapsulation ``level``: + level. + + - ``2`` and subsequent values request RSS to be performed on the specified +- inner packet encapsulation level, from outermost to innermost (lower to +- higher values). ++ inner packet encapsulation level, from outermost to innermost (lower to ++ higher values). + + Values other than ``0`` are not necessarily supported. + +@@ -2888,20 +2882,23 @@ The immediate value ``RTE_FLOW_FIELD_VALUE`` (or a pointer to it + ``RTE_FLOW_FIELD_START`` is used to point to the beginning of a packet. + See ``enum rte_flow_field_id`` for the list of supported fields. + +-``op`` selects the operation to perform on a destination field. ++``op`` selects the operation to perform on a destination field: ++ + - ``set`` copies the data from ``src`` field to ``dst`` field. + - ``add`` adds together ``dst`` and ``src`` and stores the result into ``dst``. +-- ``sub`` subtracts ``src`` from ``dst`` and stores the result into ``dst`` ++- ``sub`` subtracts ``src`` from ``dst`` and stores the result into ``dst``. + + ``width`` defines a number of bits to use from ``src`` field. + + ``level`` is used to access any packet field on any encapsulation level +-as well as any tag element in the tag array. +-- ``0`` means the default behaviour. Depending on the packet type, it can +-mean outermost, innermost or anything in between. ++as well as any tag element in the tag array: ++ ++- ``0`` means the default behaviour. Depending on the packet type, ++ it can mean outermost, innermost or anything in between. + - ``1`` requests access to the outermost packet encapsulation level. + - ``2`` and subsequent values requests access to the specified packet +-encapsulation level, from outermost to innermost (lower to higher values). ++ encapsulation level, from outermost to innermost (lower to higher values). ++ + For the tag array (in case of multiple tags are supported and present) + ``level`` translates directly into the array index. + +@@ -3609,6 +3606,7 @@ Asynchronous operations + ----------------------- + + Flow rules management can be done via special lockless flow management queues. ++ + - Queue operations are asynchronous and not thread-safe. + + - Operations can thus be invoked by the app's datapath, +diff --git a/dpdk/doc/guides/rawdevs/ntb.rst b/dpdk/doc/guides/rawdevs/ntb.rst +index 2bb115d13f..f8befc6594 100644 +--- a/dpdk/doc/guides/rawdevs/ntb.rst ++++ b/dpdk/doc/guides/rawdevs/ntb.rst +@@ -1,6 +1,8 @@ + .. SPDX-License-Identifier: BSD-3-Clause + Copyright(c) 2018 Intel Corporation. + ++.. include:: ++ + NTB Rawdev Driver + ================= + +@@ -17,19 +19,23 @@ some information by using scratchpad registers. + BIOS setting on Intel Xeon + -------------------------- + +-Intel Non-transparent Bridge needs special BIOS setting. The reference for +-Skylake is https://www.intel.com/content/dam/support/us/en/documents/server-products/Intel_Xeon_Processor_Scalable_Family_BIOS_User_Guide.pdf +- +-- Set the needed PCIe port as NTB to NTB mode on both hosts. +-- Enable NTB bars and set bar size of bar 23 and bar 45 as 12-29 (4K-512M) +- on both hosts (for Ice Lake, bar size can be set as 12-51, namely 4K-128PB). +- Note that bar size on both hosts should be the same. +-- Disable split bars for both hosts. +-- Set crosslink control override as DSD/USP on one host, USD/DSP on +- another host. +-- Disable PCIe PII SSC (Spread Spectrum Clocking) for both hosts. This +- is a hardware requirement. +- ++Intel Non-transparent Bridge (NTB) needs special BIOS settings on both systems. ++Note that for 4th Generation Intel\ |reg| Xeon\ |reg| Scalable Processors, ++option ``Port Subsystem Mode`` should be changed from ``Gen5`` to ``Gen4 Only``, ++then reboot. ++ ++- Set ``Non-Transparent Bridge PCIe Port Definition`` for needed PCIe ports ++ as ``NTB to NTB`` mode, on both hosts. ++- Set ``Enable NTB BARs`` as ``Enabled``, on both hosts. ++- Set ``Enable SPLIT BARs`` as ``Disabled``, on both hosts. ++- Set ``Imbar1 Size``, ``Imbar2 Size``, ``Embar1 Size`` and ``Embar2 Size``, ++ as 12-29 (i.e., 4K-512M) for 2nd Generation Intel\ |reg| Xeon\ |reg| Scalable Processors; ++ as 12-51 (i.e., 4K-128PB) for 3rd and 4th Generation Intel\ |reg| Xeon\ |reg| Scalable Processors. ++ Note that those bar sizes on both hosts should be the same. ++- Set ``Crosslink Control override`` as ``DSD/USP`` on one host, ++ ``USD/DSP`` on another host. ++- Set ``PCIe PLL SSC (Spread Spectrum Clocking)`` as ``Disabled``, on both hosts. ++ This is a hardware requirement when using Re-timer Cards. + + Device Setup + ------------ +@@ -145,4 +151,8 @@ like the following: + Limitation + ---------- + +-- This PMD only supports Intel Skylake and Ice Lake platforms. ++This PMD is only supported on Intel Xeon Platforms: ++ ++- 4th Generation Intel® Xeon® Scalable Processors. ++- 3rd Generation Intel® Xeon® Scalable Processors. ++- 2nd Generation Intel® Xeon® Scalable Processors. +diff --git a/dpdk/doc/guides/rel_notes/release_22_11.rst b/dpdk/doc/guides/rel_notes/release_22_11.rst +index 26e0560725..764017b5e5 100644 +--- a/dpdk/doc/guides/rel_notes/release_22_11.rst ++++ b/dpdk/doc/guides/rel_notes/release_22_11.rst +@@ -805,3 +805,722 @@ Tested Platforms + ~~~~~~~~~~~~~ + + * drivers: fix symbol exports when map is omitted ++ ++22.11.2 Release Notes ++--------------------- ++ ++ ++22.11.2 Fixes ++~~~~~~~~~~~~~ ++ ++* acl: fix crash on PPC64 with GCC 11 ++* app/bbdev: add allocation checks ++* app/bbdev: check statistics failure ++* app/bbdev: fix build with optional flag ++* app/bbdev: fix build with optional flag ++* app/compress-perf: fix remaining data for ops ++* app/compress-perf: fix some typos ++* app/compress-perf: fix testing single operation ++* app/crypto-perf: fix IPsec direction ++* app/crypto-perf: fix number of segments ++* app/crypto-perf: fix session freeing ++* app/crypto-perf: fix SPI zero ++* app/crypto-perf: fix test file memory leak ++* app/dumpcap: fix storing port identifier ++* app/flow-perf: fix division or module by zero ++* app/testpmd: cleanup cleanly from signal ++* app/testpmd: fix crash on cleanup ++* app/testpmd: fix encap/decap size calculation ++* app/testpmd: fix forwarding stats for Tx dropped ++* app/testpmd: fix interactive mode on Windows ++* app/testpmd: fix interactive mode with no ports ++* app/testpmd: fix link check condition on port start ++* app/testpmd: fix packet count in IEEE 1588 engine ++* app/testpmd: fix packet transmission in noisy VNF engine ++* app/testpmd: fix secondary process packet forwarding ++* app/testpmd: fix Tx preparation in checksum engine ++* baseband/acc: add explicit mbuf append for soft output ++* baseband/acc: fix acc100 iteration counter in TB ++* baseband/acc: fix acc100 queue mapping to 64 bits ++* baseband/acc: fix check after deref and dead code ++* baseband/acc: fix iteration counter in TB mode ++* baseband/acc: fix memory leak on acc100 close ++* baseband/acc: fix multiplexing acc100 operations ++* baseband/acc: prevent to dequeue more than requested ++* baseband/acc: protect from TB negative scenario ++* build: detect backtrace availability ++* build: fix dependencies lookup ++* build: fix toolchain definition ++* bus/fslmc: fix deadlock on MC send command timeout ++* bus/ifpga: fix devargs handling ++* cmdline: handle EOF as quit ++* cmdline: make rdline status not private ++* common/cnxk: add memory clobber to steor and ldeor ++* common/cnxk: fix aura ID handling ++* common/cnxk: fix auth key length ++* common/cnxk: fix channel mask for SDP interfaces ++* common/cnxk: fix dual VLAN parsing ++* common/cnxk: fix IPv6 extension header parsing ++* common/cnxk: fix IPv6 extension matching ++* common/cnxk: fix second pass flow rule layer type ++* common/cnxk: reduce channel count per LMAC ++* common/mlx5: fix offset of a field ++* common/mlx5: improve AES-XTS tweak capability check ++* common/mlx5: use just sufficient barrier for Arm ++* common/sfc_efx/base: add MAE mark reset action ++* compressdev: fix empty devargs parsing ++* compressdev: fix end of driver list ++* compress/mlx5: fix decompress xform validation ++* compress/mlx5: fix output Adler-32 checksum offset ++* compress/mlx5: fix queue setup for partial transformations ++* crypto/ccp: fix IOVA handling ++* crypto/ccp: fix PCI probing ++* crypto/ccp: remove some dead code for UIO ++* crypto/ccp: remove some printf ++* crypto/cnxk: fix digest for empty input data ++* cryptodev: fix empty devargs parsing ++* cryptodev: fix sym session mempool creation description ++* cryptodev: fix telemetry data truncation ++* crypto/ipsec_mb: fix ZUC-256 maximum tag length ++* crypto/ipsec_mb: relax multi-process requirement ++* crypto/ipsec_mb: remove unnecessary null check ++* crypto/openssl: fix freeing in RSA EVP ++* crypto/openssl: fix warning on copy length ++* crypto/qat: fix build ++* crypto/qat: fix build for generic x86 with GCC 12 ++* crypto/qat: fix SM3 auth mode ++* crypto/qat: fix stream cipher direction ++* devtools: fix escaped space in grep pattern ++* devtools: fix name check with mbox files ++* devtools: move mailmap check after patch applied ++* dma/ioat: fix device stop if no copies done ++* dma/ioat: fix error reporting on restart ++* dma/ioat: fix indexes after restart ++* dma/skeleton: fix empty devargs parsing ++* doc: add gpudev to the Doxygen index ++* doc: add Linux capability to access physical addresses ++* doc: fix code blocks in cryptodev guide ++* doc: fix DCF instructions in ice guide ++* doc: fix dependency setup in l2fwd-cat example guide ++* doc: fix description of L2TPV2 flow item ++* doc: fix firmware list in bnxt guide ++* doc: fix LPM support in l3forward guide ++* doc: fix pipeline example path in user guide ++* doc: fix reference to event timer header ++* drivers/bus: fix leak for devices without driver ++* drivers: fix symbol exports when map is omitted ++* eal: cleanup alarm and hotplug before memory detach ++* eal/freebsd: fix lock in alarm callback ++* eal/linux: fix hugetlbfs sub-directories discovery ++* eal/unix: fix thread creation ++* eal: use same atomic intrinsics for GCC and clang ++* eal/windows: fix pedantic build ++* eal/windows: fix thread creation ++* eal/windows: mark memory config as complete ++* ethdev: fix build with LTO ++* ethdev: fix telemetry data truncation ++* ethdev: remove telemetry Rx mbuf alloc failed field ++* event/cnxk: fix burst timer arm ++* event/cnxk: fix SSO cleanup ++* event/cnxk: fix timer operations in secondary process ++* event/cnxk: wait for CPT flow control on WQE path ++* eventdev/crypto: fix enqueue count ++* eventdev/crypto: fix failed events ++* eventdev/crypto: fix function symbol export ++* eventdev/crypto: fix offset used while flushing events ++* eventdev/crypto: fix overflow in circular buffer ++* eventdev/eth_rx: fix getting adapter instance ++* eventdev/eth_tx: fix devices loop ++* eventdev: fix memory size for telemetry ++* eventdev/timer: fix overflow ++* examples/cmdline: fix build with GCC 12 ++* examples/fips_validation: add extra space in JSON buffer ++* examples/fips_validation: fix AES-GCM tests ++* examples/fips_validation: fix AES-XTS sequence number ++* examples/fips_validation: fix integer parsing ++* examples/fips_validation: fix MCT output for SHA ++* examples/ipsec-secgw: fix auth IV length ++* examples/ipsec-secgw: fix offload variable init ++* examples/l2fwd-event: fix worker cleanup ++* examples/l3fwd: remove hash entry number ++* examples/qos_sched: fix config entries in wrong sections ++* examples/qos_sched: fix debug mode ++* examples/qos_sched: fix Tx port config when link down ++* fbarray: fix metadata dump ++* gpudev: export header file for external drivers ++* gpudev: fix deadlocks when registering callback ++* graph: fix node shrink ++* hash: fix GFNI implementation build with GCC 12 ++* kni: fix build on RHEL 9.1 ++* kni: fix possible starvation when mbufs are exhausted ++* kvargs: add API documentation for process callback ++* mem: fix heap ID in telemetry ++* mem: fix hugepage info mapping ++* mem: fix telemetry data truncation ++* mempool: fix telemetry data truncation ++* net/bnxt: fix link state change interrupt config ++* net/bnxt: fix RSS hash in mbuf ++* net/bnxt: fix Rx queue stats after queue stop and start ++* net/bnxt: fix Tx queue stats after queue stop and start ++* net/cnxk: fix deadlock in security session creation ++* net/cnxk: fix LBK BPID usage ++* net/cnxk: fix packet type for IPv6 packets post decryption ++* net/cnxk: validate RED threshold config ++* net/e1000: fix saving of stripped VLAN TCI ++* net/ena: fix deadlock in RSS RETA update ++* net/gve: fix offloading capability ++* net/hns3: add debug info for Rx/Tx dummy function ++* net/hns3: add verification of RSS types ++* net/hns3: allow adding queue buffer size hash rule ++* net/hns3: declare flow rule keeping capability ++* net/hns3: extract common functions to set Rx/Tx ++* net/hns3: extract common function to query device ++* net/hns3: fix burst mode query with dummy function ++* net/hns3: fix clearing RSS configuration ++* net/hns3: fix config struct used for conversion ++* net/hns3: fix duplicate RSS rule check ++* net/hns3: fix empty devargs parsing ++* net/hns3: fix inaccurate RTC time to read ++* net/hns3: fix log about indirection table size ++* net/hns3: fix possible truncation of hash key when config ++* net/hns3: fix possible truncation of redirection table ++* net/hns3: fix RSS key size compatibility ++* net/hns3: fix warning on flush or destroy rule ++* net/hns3: make getting Tx function static ++* net/hns3: refactor set RSS hash algorithm and key interface ++* net/hns3: reimplement hash flow function ++* net/hns3: remove debug condition for Tx prepare ++* net/hns3: remove useless code when destroy valid RSS rule ++* net/hns3: save hash algo to RSS filter list node ++* net/hns3: separate flow RSS config from RSS conf ++* net/hns3: separate setting and clearing RSS rule ++* net/hns3: separate setting hash algorithm ++* net/hns3: separate setting hash key ++* net/hns3: separate setting redirection table ++* net/hns3: separate setting RSS types ++* net/hns3: separate Tx prepare from getting Tx function ++* net/hns3: use hardware config to report hash key ++* net/hns3: use hardware config to report hash types ++* net/hns3: use hardware config to report redirection table ++* net/hns3: use new RSS rule to configure hardware ++* net/hns3: use RSS filter list to check duplicated rule ++* net/i40e: fix AVX512 fast-free path ++* net/i40e: fix MAC loopback on X722 ++* net/i40e: fix maximum frame size configuration ++* net/i40e: fix validation of flow transfer attribute ++* net/i40e: reduce interrupt interval in multi-driver mode ++* net/i40e: revert link status check on device start ++* net/iavf: add lock for VF commands ++* net/iavf: fix building data desc ++* net/iavf: fix device stop during reset ++* net/iavf: fix outer UDP checksum offload ++* net/iavf: fix VLAN offload with AVX2 ++* net/iavf: protect insertion in flow list ++* net/ice: fix Rx timestamp ++* net/ice: fix validation of flow transfer attribute ++* net/idpf: fix driver infos ++* net/idpf: fix mbuf leak in split Tx ++* net/idpf: reset queue flag when queue is stopped ++* net/ipn3ke: fix representor name ++* net/ipn3ke: fix thread exit ++* net/ixgbe: enable IPv6 mask in flow rules ++* net/ixgbe: fix firmware version consistency ++* net/ixgbe: fix IPv6 mask in flow director ++* net/mana: enable driver by default ++* net/mana: fix stats counters ++* net/mlx5: check compressed CQE opcode in vectorized Rx ++* net/mlx5: fix available tag registers calculation for HWS ++* net/mlx5: fix build with GCC 12 and ASan ++* net/mlx5: fix CQE dump for Tx ++* net/mlx5: fix crash on action template failure ++* net/mlx5: fix egress group translation in HWS ++* net/mlx5: fix error CQE dumping for vectorized Rx ++* net/mlx5: fix flow sample with ConnectX-5 ++* net/mlx5: fix GENEVE resource overwrite ++* net/mlx5: fix hairpin Tx queue reference count ++* net/mlx5: fix isolated mode if no representor matching ++* net/mlx5: fix read device clock in real time mode ++* net/mlx5: fix sysfs port name translation ++* net/mlx5: fix wait descriptor opcode for ConnectX-7 ++* net/mlx5: fix warning for Tx scheduling option ++* net/mlx5: fix Windows build with MinGW GCC 12 ++* net/mlx5/hws: fix error code of send queue action ++* net/mlx5/hws: fix IPv4 fragment matching ++* net/mlx5/hws: fix memory leak on general pool DB init ++* net/mlx5/hws: fix pattern creation ++* net/mlx5: ignore non-critical syndromes for Rx queue ++* net/nfp: fix 48-bit DMA support for NFDk ++* net/nfp: fix firmware name derived from PCI name ++* net/nfp: fix getting RSS configuration ++* net/nfp: fix max DMA length ++* net/nfp: fix MTU configuration order ++* net/nfp: fix offload of multiple output actions ++* net/nfp: fix set DSCP flow action ++* net/nfp: fix set IPv4 flow action ++* net/nfp: fix set IPv6 flow action ++* net/nfp: fix set MAC flow action ++* net/nfp: fix set TP flow action ++* net/nfp: fix set TTL flow action ++* net/nfp: fix teardown of flows sharing a mask ID ++* net/nfp: fix Tx packet drop for large data length ++* net/nfp: fix VNI of VXLAN encap action ++* net/nfp: restrict flow flush to the port ++* net/nfp: store counter reset before zeroing flow query ++* net/ngbe: add spinlock protection on YT PHY ++* net/ngbe: fix packet type to parse from offload flags ++* net/sfc: enforce fate action in transfer flow rules ++* net/sfc: export pick transfer proxy callback to representors ++* net/sfc: fix MAC address entry leak in transfer flow parsing ++* net/sfc: fix resetting mark in tunnel offload switch rules ++* net/sfc: invalidate switch port entry on representor unplug ++* net/txgbe: fix default signal quality value for KX/KX4 ++* net/txgbe: fix interrupt loss ++* net/txgbe: fix packet type to parse from offload flags ++* net/txgbe: fix Rx buffer size in config register ++* net/vhost: add missing newline in logs ++* net/vhost: fix leak in interrupt handle setup ++* net/vhost: fix Rx interrupt ++* net/virtio: deduce IP length for TSO checksum ++* net/virtio: fix empty devargs parsing ++* net/virtio: remove address width limit for modern devices ++* net/virtio-user: fix device starting failure handling ++* pdump: fix build with GCC 12 ++* raw/ifpga/base: fix init with multi-process ++* raw/skeleton: fix empty devargs parsing ++* raw/skeleton: fix selftest ++* regex/mlx5: fix doorbell record ++* regex/mlx5: utilize all available queue pairs ++* reorder: fix sequence number mbuf field register ++* reorder: invalidate buffer from ready queue in drain ++* ring: silence GCC 12 warnings ++* sched: fix alignment of structs in subport ++* table: fix action selector group size log2 setting ++* telemetry: fix repeat display when callback don't init dict ++* telemetry: move include after guard ++* test/bbdev: extend HARQ tolerance ++* test/bbdev: fix crash for non supported HARQ length ++* test/bbdev: remove check for invalid opaque data ++* test/crypto: add missing MAC-I to PDCP vectors ++* test/crypto: fix capability check for ZUC cipher-auth ++* test/crypto: fix skip condition for CPU crypto SGL ++* test/crypto: fix statistics error messages ++* test/crypto: fix typo in AES test ++* test/crypto: fix ZUC digest length in comparison ++* test: fix segment length in packet generator ++* test/mbuf: fix mbuf reset test ++* test/mbuf: fix test with mbuf debug enabled ++* test/reorder: fix double free of drained buffers ++* vdpa/ifc: fix argument compatibility check ++* vdpa/ifc: fix reconnection in SW-assisted live migration ++* version: 22.11.2-rc1 ++* vhost: decrease log level for unimplemented requests ++* vhost: fix net header settings in datapath ++* vhost: fix OOB access for invalid vhost ID ++* vhost: fix possible FD leaks ++* vhost: fix possible FD leaks on truncation ++* vhost: fix slot index in async split virtqueue Tx ++ ++22.11.2 Validation ++~~~~~~~~~~~~~~~~~~ ++ ++* Intel(R) Testing ++ ++ * Basic Intel(R) NIC testing ++ ++ * Build & CFLAG compile: cover the build test combination with latest GCC/Clang version and the popular OS revision such as Ubuntu20.04, Ubuntu22.04, Fedora35, Fedora37, RHEL8.6, RHEL8.4, FreeBSD13.1, SUSE15, CentOS7.9, openEuler22.03-SP1 etc. ++ * PF(i40e, ixgbe): test scenarios including RTE_FLOW/TSO/Jumboframe/checksum offload/VLAN/VXLAN, etc. ++ * VF(i40e, ixgbe): test scenarios including VF-RTE_FLOW/TSO/Jumboframe/checksum offload/VLAN/VXLAN, etc. ++ * PF/VF(ice): test scenarios including Switch features/Package Management/Flow Director/Advanced Tx/Advanced RSS/ACL/DCF/Flexible Descriptor, etc. ++ * Intel NIC single core/NIC performance: test scenarios including PF/VF single core performance test, etc. ++ * IPsec: test scenarios including ipsec/ipsec-gw/ipsec library basic test - QAT&SW/FIB library, etc. ++ ++ * Basic cryptodev and virtio testing ++ ++ * Virtio: both function and performance test are covered. Such as PVP/Virtio_loopback/virtio-user loopback/virtio-net VM2VM perf testing/VMAWARE ESXI 7.0u3, etc. ++ * Cryptodev: ++ ++ * Function test: test scenarios including Cryptodev API testing/CompressDev ISA-L/QAT/ZLIB PMD Testing/FIPS, etc. ++ * Performance test: test scenarios including Thoughput Performance/Cryptodev Latency, etc. ++ ++* Nvidia(R) Testing ++ ++ * Basic functionality ++ ++ * Tx/Rx, xstats, timestamps, link status, RTE flow, RSS, VLAN, checksum and TSO, ptype... ++ * link_status interrupt, l3fwd-power, multi-process. ++ * LRO, regEx, buffer split, Tx scheduling. ++ ++ * Build tests ++ ++ * Ubuntu 20.04.6 with MLNX_OFED_LINUX-5.9-0.5.6.0. ++ * Ubuntu 20.04.6 with rdma-core master (d2dbc88). ++ * Ubuntu 20.04.6 with rdma-core v28.0. ++ * Ubuntu 18.04.6 with rdma-core v17.1. ++ * Ubuntu 18.04.6 with rdma-core master (d2dbc88) (i386). ++ * Fedora 38 with rdma-core v44.0. ++ * Fedora 39 (Rawhide) with rdma-core v44.0. ++ * CentOS 7 7.9.2009 with rdma-core master (d2dbc88). ++ * CentOS 7 7.9.2009 with MLNX_OFED_LINUX-5.9-0.5.6.0. ++ * CentOS 8 8.4.2105 with rdma-core master (d2dbc88). ++ * OpenSUSE Leap 15.4 with rdma-core v38.1. ++ * Windows Server 2019 with Clang 11.0.0. ++ ++ * Test platform ++ ++ * NIC: ConnectX-5 / OS: Ubuntu 20.04 / Kernel: 6.3.0 / Driver: rdma-core v45.0 / Firmware: 16.35.2000 ++ * NIC: ConnectX-6 Dx / OS: Ubuntu 20.04 / Driver: MLNX_OFED_LINUX-5.9-0.5.6.0 / Firmware: 22.36.1010 ++ * NIC: ConnectX-7 / OS: Ubuntu 20.04 / Driver: MLNX_OFED_LINUX-5.9-0.5.6.0 / Firmware: 22.36.1010 ++ * DPU: BlueField-2 / DOCA SW version: 1.5.1 / Firmware: 24.35.2000 ++ ++22.11.2 Known Issues ++~~~~~~~~~~~~~~~~~~~~ ++ ++ ++ ++22.11.3 Release Notes ++--------------------- ++ ++ ++22.11.3 Fixes ++~~~~~~~~~~~~~ ++ ++* app/crypto-perf: fix socket ID default value ++* app/testpmd: fix checksum engine with GTP on 32-bit ++* app/testpmd: fix flow rule number parsing ++* app/testpmd: fix GTP L2 length in checksum engine ++* app/testpmd: fix meter mark handle update ++* app/testpmd: fix primary process not polling all queues ++* app/testpmd: revert primary process polling all queues fix ++* baseband/fpga_5gnr_fec: fix possible division by zero ++* baseband/fpga_5gnr_fec: fix starting unconfigured queue ++* build: fix warning when getting NUMA nodes ++* ci: fix build for Arm cross compilation in GHA ++* ci: fix libabigail cache in GHA ++* common/cnxk: fix CPT backpressure disable on LBK ++* common/cnxk: fix inline device VF identification ++* common/cnxk: fix IPsec IPv6 tunnel address byte swap ++* common/cnxk: fix receive queue with multiple mask ++* common/cnxk: fix setting channel mask for SDP interfaces ++* common/cnxk: fix uninitialized pointer read ++* common/iavf: fix MAC type for 710 NIC ++* common/idpf/base: fix control queue send and receive ++* common/idpf/base: fix ITR register definitions for AVF ++* common/idpf/base: fix memory leaks on control queue ++* common/idpf/base: fix parameters when send msg to cp ++* common/idpf: fix memory leak on AVX512 Tx queue close ++* common/idpf: remove device stop flag ++* common/mlx5: adjust fork call with new kernel API ++* common/mlx5: fix obtaining IB device in LAG mode ++* common/qat: detach crypto from compress build ++* common/qat: fix command parameter corruption ++* common/sfc_efx/base: fix Rx queue without RSS hash prefix ++* crypto/cnxk: fix IPsec CCM capabilities ++* cryptodev: clarify error codes for symmetric session ++* cryptodev: fix comments of modular operation parameters ++* cryptodev: fix device socket ID type ++* crypto/ipsec_mb: fix enqueue counter for SNOW3G ++* crypto/ipsec_mb: optimize allocation in session ++* crypto/openssl: fix memory free ++* crypto/openssl: fix memory leak in auth processing ++* crypto/openssl: skip workaround at compilation time ++* crypto/qat: fix null algorithm digest placement ++* crypto/qat: fix stack buffer overflow in SGL loop ++* crypto/qat: fix sym device prototype ++* crypto/scheduler: fix last element for valid args ++* devtools: fix bashism in mailmap check ++* devtools: fix mailmap check for parentheses ++* dma/dpaa2: set VFA bit for route-by-port with VF ++* doc: add flow template API requirements for mlx5 ++* doc: fix auth algos in cryptoperf app ++* doc: fix description of runtime directories ++* doc: fix event timer adapter guide ++* doc: fix format in flow API guide ++* doc: fix kernel patch link in hns3 guide ++* doc: fix link to flow capabilities from bnxt guide ++* doc: fix number of leading spaces in hns3 guide ++* doc: fix syntax in hns3 guide ++* doc: fix typo in cnxk platform guide ++* doc: fix typo in graph guide ++* doc: fix typos and wording in flow API guide ++* doc: improve wording of cuda guide ++* doc: remove warning with Doxygen 1.9.7 ++* doc: update BIOS settings and supported HW for NTB ++* eal: avoid calling cleanup twice ++* eal/linux: fix legacy mem init with many segments ++* eal/linux: fix secondary process crash for mp hotplug requests ++* eal/x86: improve multiple of 64 bytes memcpy performance ++* ethdev: check that at least one FEC mode is specified ++* ethdev: fix calloc arguments ++* ethdev: fix indirect action conversion ++* ethdev: fix MAC address occupies two entries ++* ethdev: fix potential leak in PCI probing helper ++* ethdev: update documentation for API to get FEC ++* ethdev: update documentation for API to set FEC ++* event/cnxk: fix mempool cookies check ++* event/cnxk: fix nanoseconds to ticks conversion ++* event/cnxk: fix setting attributes in empty get work ++* event/cnxk: fix Tx adapter data pointer ++* eventdev/timer: fix buffer flush ++* eventdev/timer: fix timeout event wait behavior ++* event/dsw: free rings on close ++* examples/fips_validation: fix digest length in AES-GCM ++* examples/fips_validation: fix external build ++* examples/ip_pipeline: fix build with GCC 13 ++* examples/ipsec-secgw: fix socket ID default value ++* examples/ipsec-secgw: fix TAP default MAC address ++* examples/ipsec-secgw: fix zero address in ethernet header ++* examples/l2fwd-cat: fix external build ++* examples/l3fwd: fix duplicate expression for default nexthop ++* examples/ntb: fix build with GCC 13 ++* fib: fix adding default route ++* hash: fix reading unaligned bits in Toeplitz hash ++* ipc: fix file descriptor leakage with unhandled messages ++* ipsec: fix NAT-T header length ++* kernel/freebsd: fix function parameter list ++* kni: fix build with Linux 6.3 ++* kni: fix build with Linux 6.5 ++* mbuf: fix Doxygen comment of distributor metadata ++* member: fix PRNG seed reset in NitroSketch mode ++* mem: fix memsegs exhausted message ++* mempool/cnxk: avoid hang when counting batch allocs ++* net/bonding: fix destroy dedicated queues flow ++* net/bonding: fix startup when NUMA is not supported ++* net/cnxk: fix cookies check with security offload ++* net/cnxk: fix flow queue index validation ++* net/cnxk: flush SQ before configuring MTU ++* net/dpaa2: fix checksum good flags ++* net/e1000: fix queue number initialization ++* net/e1000: fix Rx and Tx queue status ++* net: fix return type of IPv4 L4 packet checksum ++* net/hns3: delete duplicate macro definition ++* net/hns3: extract PTP to its own header file ++* net/hns3: fix build warning ++* net/hns3: fix device start return value ++* net/hns3: fix FEC mode check ++* net/hns3: fix FEC mode for 200G ports ++* net/hns3: fix IMP reset trigger ++* net/hns3: fix inaccurate log ++* net/hns3: fix index to look up table in NEON Rx ++* net/hns3: fix mbuf leakage when RxQ started after reset ++* net/hns3: fix mbuf leakage when RxQ started during reset ++* net/hns3: fix missing FEC capability ++* net/hns3: fix never set MAC flow control ++* net/hns3: fix non-zero weight for disabled TC ++* net/hns3: fix redundant line break in log ++* net/hns3: fix RTC time after reset ++* net/hns3: fix RTC time on initialization ++* net/hns3: fix Rx multiple firmware reset interrupts ++* net/hns3: fix uninitialized variable ++* net/hns3: fix variable type mismatch ++* net/hns3: uninitialize PTP ++* net/i40e: fix comments ++* net/i40e: fix Rx data buffer size ++* net/i40e: fix tunnel packet Tx descriptor ++* net/iavf: fix abnormal disable HW interrupt ++* net/iavf: fix protocol agnostic offloading with big packets ++* net/iavf: fix Rx data buffer size ++* net/iavf: fix stop ordering ++* net/iavf: fix tunnel TSO path selection ++* net/iavf: fix virtchnl command called in interrupt ++* net/iavf: fix VLAN insertion in vector path ++* net/iavf: fix VLAN offload with AVX512 ++* net/iavf: release large VF when closing device ++* net/ice: adjust timestamp mbuf register ++* net/ice/base: fix incorrect defines for DCBx ++* net/ice/base: remove unreachable code ++* net/ice: fix 32-bit build ++* net/ice: fix DCF control thread crash ++* net/ice: fix DCF RSS initialization ++* net/ice: fix MAC type of E822 and E823 ++* net/ice: fix outer UDP checksum offload ++* net/ice: fix protocol agnostic offloading with big packets ++* net/ice: fix RSS hash key generation ++* net/ice: fix Rx data buffer size ++* net/ice: fix statistics ++* net/ice: fix timestamp enabling ++* net/ice: fix tunnel packet Tx descriptor ++* net/ice: fix VLAN mode parser ++* net/ice: initialize parser for double VLAN ++* net/idpf: fix Rx data buffer size ++* net/igc: fix Rx and Tx queue status ++* net/ixgbe: add proper memory barriers in Rx ++* net/ixgbe: fix Rx and Tx queue status ++* net/mana: avoid unnecessary assignments in data path ++* net/mana: fix counter overflow for posted WQE ++* net/mana: fix Tx queue statistics ++* net/mana: fix WQE count for ringing RQ doorbell ++* net/mana: optimize completion queue by batch processing ++* net/mana: return probing failure if no device found ++* net/mana: use datapath logging ++* net/mlx5: enhance error log for tunnel offloading ++* net/mlx5: fix device removal event handling ++* net/mlx5: fix drop action attribute validation ++* net/mlx5: fix drop action memory leak ++* net/mlx5: fix duplicated tag index matching in SWS ++* net/mlx5: fix error in VLAN actions creation ++* net/mlx5: fix error set for age pool initialization ++* net/mlx5: fix error set in control tables create ++* net/mlx5: fix error set in Tx representor tagging ++* net/mlx5: fix flow dump for modify field ++* net/mlx5: fix flow workspace destruction ++* net/mlx5: fix handle validation for meter mark ++* net/mlx5: fix LRO TCP checksum ++* net/mlx5: fix matcher layout size calculation ++* net/mlx5: fix MPRQ stride size for headroom ++* net/mlx5: fix profile check of meter mark ++* net/mlx5: fix query for NIC flow capability ++* net/mlx5: fix return value of vport action ++* net/mlx5: fix risk in NEON Rx descriptor read ++* net/mlx5: fix RSS expansion inner buffer overflow ++* net/mlx5: fix validation for conntrack indirect action ++* net/mlx5: fix VXLAN matching with zero value ++* net/mlx5: forbid duplicated tag index in pattern template ++* net/mlx5: forbid MPRQ restart ++* net/mlx5: reduce counter pool name length ++* net/netvsc: fix sizeof calculation ++* net/nfp: fix address always related with PF ID 0 ++* net/nfp: fix control mempool creation ++* net/nfp: fix disabling promiscuous mode ++* net/nfp: fix endian conversion for tunnel decap action ++* net/nfp: fix flow hash table creation ++* net/nfp: fix IPv6 address for set flow action ++* net/nfp: fix IPv6 flow item ++* net/nfp: fix offloading flows ++* net/nfp: fix representor creation ++* net/nfp: fix representor name too long ++* net/nfp: fix TOS of IPv6 GENEVE encap flow action ++* net/nfp: fix TOS of IPv6 NVGRE encap flow action ++* net/nfp: fix TOS of IPv6 VXLAN encap flow action ++* net/nfp: fix TP flow action for UDP ++* net/nfp: fix Tx descriptor free logic of NFD3 ++* net/nfp: fix unneeded endian conversion ++* net/nfp: fix VLAN push flow action ++* net/nfp: fix VNI of IPv4 NVGRE encap action ++* net/nfp: fix VNI of IPv6 NVGRE encap action ++* net/nfp: fix VNI of VXLAN encap action ++* net/ngbe: adapt to MNG veto bit setting ++* net/ngbe: fix extended statistics ++* net/ngbe: fix link status in no LSC mode ++* net/ngbe: fix RSS offload capability ++* net/ngbe: remove redundant codes ++* net/qede: fix RSS indirection table initialization ++* net/sfc: invalidate dangling MAE flow action FW resource IDs ++* net/sfc: stop misuse of Rx ingress m-port metadata on EF100 ++* net/tap: set locally administered bit for fixed MAC address ++* net/txgbe: adapt to MNG veto bit setting ++* net/txgbe/base: fix Tx with fiber hotplug ++* net/txgbe: fix blocking system events ++* net/txgbe: fix extended statistics ++* net/txgbe: fix interrupt enable mask ++* net/txgbe: fix to set autoneg for 1G speed ++* net/txgbe: fix use-after-free on remove ++* net/virtio: fix initialization to return negative errno ++* net/virtio: propagate interrupt configuration error values ++* net/virtio-user: fix leak when initialisation fails ++* net/vmxnet3: fix drop of empty segments in Tx ++* net/vmxnet3: fix return code in initializing ++* pci: fix comment referencing renamed function ++* pipeline: fix double free for table stats ++* raw/ntb: avoid disabling interrupt twice ++* Revert "net/iavf: fix tunnel TSO path selection" ++* ring: fix dequeue parameter name ++* ring: fix use after free ++* telemetry: fix autotest on Alpine ++* test: add graph tests ++* test/bonding: fix include of standard header ++* test/crypto: fix IPsec AES CCM vector ++* test/crypto: fix PDCP-SDAP test vectors ++* test/crypto: fix return value for SNOW3G ++* test/crypto: fix session creation check ++* test/malloc: fix missing free ++* test/malloc: fix statistics checks ++* test/mbuf: fix crash in a forked process ++* test/security: fix event inline IPsec reassembly tests ++* version: 22.11.3-rc1 ++* vfio: fix include with musl runtime ++* vhost: fix invalid call FD handling ++* vhost: fix notification stats for packed ring ++ ++22.11.3 Validation ++~~~~~~~~~~~~~~~~~~ ++ ++* Intel(R) Testing ++ ++ * Basic Intel(R) NIC testing ++ ++ * Build & CFLAG compile: cover the build test combination with latest GCC/Clang version and the popular OS revision such as Ubuntu20.04, Ubuntu22.04, Fedora38, RHEL8.7, RHEL9.2, FreeBSD13.1, SUSE15, CentOS7.9, openEuler22.03-SP1,OpenAnolis8.8 etc. ++ * PF(i40e, ixgbe): test scenarios including RTE_FLOW/TSO/Jumboframe/checksum offload/VLAN/VXLAN, etc. ++ * VF(i40e, ixgbe): test scenarios including VF-RTE_FLOW/TSO/Jumboframe/checksum offload/VLAN/VXLAN, etc. ++ * PF/VF(ice): test scenarios including Switch features/Package Management/Flow Director/Advanced Tx/Advanced RSS/ACL/DCF/Flexible Descriptor, etc. ++ * Intel NIC single core/NIC performance: test scenarios including PF/VF single core performance test, etc. ++ * IPsec: test scenarios including ipsec/ipsec-gw/ipsec library basic test - QAT&SW/FIB library, etc. ++ ++ * Basic cryptodev and virtio testing ++ ++ * Virtio: both function and performance test are covered. Such as PVP/Virtio_loopback/virtio-user loopback/virtio-net VM2VM perf testing/VMAWARE ESXI 8.0, etc. ++ * Cryptodev: ++ ++ * Function test: test scenarios including Cryptodev API testing/CompressDev ISA-L/QAT/ZLIB PMD Testing/FIPS, etc. ++ * Performance test: test scenarios including Thoughput Performance/Cryptodev Latency, etc. ++ ++* Nvidia(R) Testing ++ ++ * Basic functionality ++ ++ * Tx/Rx, xstats, timestamps, link status, RTE flow, RSS, VLAN, checksum and TSO, ptype... ++ * link_status interrupt, l3fwd-power, multi-process. ++ * LRO, regEx, buffer split, Tx scheduling. ++ ++ * Build tests ++ ++ * Ubuntu 20.04.6 with MLNX_OFED_LINUX-23.04-1.1.3.0. ++ * Ubuntu 20.04.6 with rdma-core master (4cce53f). ++ * Ubuntu 20.04.6 with rdma-core v28.0. ++ * Ubuntu 18.04.6 with rdma-core master (4cce53f) (i386). ++ * Fedora 38 with rdma-core v44.0. ++ * Fedora 39 (Rawhide) with rdma-core v46.0. ++ * OpenSUSE Leap 15.5 with rdma-core v42.0. ++ * Windows Server 2019 with Clang 11.0.0. ++ ++ * Test platform ++ ++ * NIC: ConnectX-6 Dx / OS: Ubuntu 20.04 / Driver: MLNX_OFED_LINUX-23.07-0.5.0.0 / Firmware: 22.38.1002 ++ * NIC: ConnectX-7 / OS: Ubuntu 20.04 / Driver: MLNX_OFED_LINUX-23.07-0.5.0.0 / Firmware: 28.38.1002 ++ * DPU: BlueField-2 / DOCA SW version: 2.2.0 / Firmware: 24.38.1002 ++ ++* Redhat Testing ++ ++ * Test scenarios ++ ++ * Guest with device assignment(PF) throughput testing(1G hugepage size) ++ * Guest with device assignment(PF) throughput testing(2M hugepage size) ++ * Guest with device assignment(VF) throughput testing ++ * PVP (host dpdk testpmd as vswitch) 1Q: throughput testing ++ * PVP vhost-user 2Q throughput testing ++ * PVP vhost-user 1Q - cross numa node throughput testing ++ * Guest with vhost-user 2 queues throughput testing ++ * vhost-user reconnect with dpdk-client, qemu-server qemu reconnect ++ * vhost-user reconnect with dpdk-client, qemu-server ovs reconnect ++ * PVP reconnect with dpdk-client, qemu-server ++ * PVP 1Q live migration testing ++ * PVP 1Q cross numa node live migration testing ++ * Guest with ovs+dpdk+vhost-user 1Q live migration testing ++ * Guest with ovs+dpdk+vhost-user 1Q live migration testing (2M) ++ * Guest with ovs+dpdk+vhost-user 2Q live migration testing ++ * Guest with ovs+dpdk+vhost-user 4Q live migration testing ++ * Host PF + DPDK testing ++ * Host VF + DPDK testing ++ ++ * Version Information: ++ ++ * RHEL9 ++ * qemu-kvm-6.2.0 + qemu-kvm-7.2.0 ++ * kernel 5.14 ++ * X540-AT2 NIC(ixgbe, 10G) ++ ++22.11.3 Known Issues ++~~~~~~~~~~~~~~~~~~~~ ++ ++* Intel(R) Testing ++ ++ * Cryptodev: Performance drop for 1c1t scenario +diff --git a/dpdk/doc/guides/sample_app_ug/l2_forward_cat.rst b/dpdk/doc/guides/sample_app_ug/l2_forward_cat.rst +index 3ada3575ba..51621b692f 100644 +--- a/dpdk/doc/guides/sample_app_ug/l2_forward_cat.rst ++++ b/dpdk/doc/guides/sample_app_ug/l2_forward_cat.rst +@@ -50,13 +50,12 @@ Compiling the Application + * https://github.com/01org/intel-cmt-cat + + +-#. To compile the application export the path to PQoS lib +- and the DPDK source tree and go to the example directory: ++To compile the application, export the path to PQoS lib: + +- .. code-block:: console +- +- export PQOS_INSTALL_PATH=/path/to/libpqos ++.. code-block:: console + ++ export CFLAGS=-I/path/to/intel-cmt-cat/include ++ export LDFLAGS=-L/path/to/intel-cmt-cat/lib + + To compile the sample application see :doc:`compiling`. + +diff --git a/dpdk/doc/guides/sample_app_ug/l3_forward.rst b/dpdk/doc/guides/sample_app_ug/l3_forward.rst +index 94b22da01e..1cc2c1dd1d 100644 +--- a/dpdk/doc/guides/sample_app_ug/l3_forward.rst ++++ b/dpdk/doc/guides/sample_app_ug/l3_forward.rst +@@ -56,9 +56,8 @@ for the IPv4/IPv6 5-tuple syntax specifically. + The 5-tuple syntax consists of a source IP address, a destination IP address, + a source port, a destination port and a protocol identifier. + +-In the sample application, hash-based, FIB-based and ACL-based forwarding supports ++In the sample application, hash-based, LPM-based, FIB-based and ACL-based forwarding supports + both IPv4 and IPv6. +-LPM-based forwarding supports IPv4 only. + During the initialization phase route rules for IPv4 and IPv6 are read from rule files. + + Compiling the Application +diff --git a/dpdk/doc/guides/sample_app_ug/pipeline.rst b/dpdk/doc/guides/sample_app_ug/pipeline.rst +index 49d50136bc..7c86bf484a 100644 +--- a/dpdk/doc/guides/sample_app_ug/pipeline.rst ++++ b/dpdk/doc/guides/sample_app_ug/pipeline.rst +@@ -58,7 +58,7 @@ The following is an example command to run the application configured for the VX + + .. code-block:: console + +- $ .//examples/dpdk-pipeline -c 0x3 -- -s examples/vxlan.cli ++ $ .//examples/dpdk-pipeline -c 0x3 -- -s examples/pipeline/examples/vxlan.cli + + The application should start successfully and display as follows: + +diff --git a/dpdk/doc/guides/tools/cryptoperf.rst b/dpdk/doc/guides/tools/cryptoperf.rst +index c77e253417..f30784674d 100644 +--- a/dpdk/doc/guides/tools/cryptoperf.rst ++++ b/dpdk/doc/guides/tools/cryptoperf.rst +@@ -232,7 +232,6 @@ The following are the application command-line options: + Set authentication algorithm name, where ``name`` is one + of the following:: + +- 3des-cbc + aes-cbc-mac + aes-cmac + aes-gmac +diff --git a/dpdk/drivers/baseband/acc/rte_acc100_pmd.c b/dpdk/drivers/baseband/acc/rte_acc100_pmd.c +index ba8247d47e..7757db81fe 100644 +--- a/dpdk/drivers/baseband/acc/rte_acc100_pmd.c ++++ b/dpdk/drivers/baseband/acc/rte_acc100_pmd.c +@@ -622,6 +622,7 @@ acc100_dev_close(struct rte_bbdev *dev) + rte_free(d->tail_ptrs); + rte_free(d->info_ring); + rte_free(d->sw_rings_base); ++ rte_free(d->harq_layout); + d->sw_rings_base = NULL; + d->tail_ptrs = NULL; + d->info_ring = NULL; +@@ -663,7 +664,7 @@ acc100_find_free_queue_idx(struct rte_bbdev *dev, + for (aq_idx = 0; aq_idx < qtop->num_aqs_per_groups; aq_idx++) { + if (((d->q_assigned_bit_map[group_idx] >> aq_idx) & 0x1) == 0) { + /* Mark the Queue as assigned */ +- d->q_assigned_bit_map[group_idx] |= (1 << aq_idx); ++ d->q_assigned_bit_map[group_idx] |= (1ULL << aq_idx); + /* Report the AQ Index */ + return (group_idx << ACC100_GRP_ID_SHIFT) + aq_idx; + } +@@ -3422,9 +3423,9 @@ acc100_enqueue_ldpc_enc_cb(struct rte_bbdev_queue_data *q_data, + } + avail--; + enq = RTE_MIN(left, ACC_MUX_5GDL_DESC); +- if (check_mux(&ops[i], enq)) { +- ret = enqueue_ldpc_enc_n_op_cb(q, &ops[i], +- desc_idx, enq); ++ enq = check_mux(&ops[i], enq); ++ if (enq > 1) { ++ ret = enqueue_ldpc_enc_n_op_cb(q, &ops[i], desc_idx, enq); + if (ret < 0) { + acc_enqueue_invalid(q_data); + break; +@@ -4034,8 +4035,12 @@ dequeue_dec_one_op_tb(struct acc_queue *q, struct rte_bbdev_dec_op **ref_op, + /* CRC invalid if error exists */ + if (!op->status) + op->status |= rsp.crc_status << RTE_BBDEV_CRC_ERROR; +- op->turbo_dec.iter_count = RTE_MAX((uint8_t) rsp.iter_cnt, +- op->turbo_dec.iter_count); ++ if (q->op_type == RTE_BBDEV_OP_LDPC_DEC) ++ op->ldpc_dec.iter_count = RTE_MAX((uint8_t) rsp.iter_cnt, ++ op->ldpc_dec.iter_count); ++ else ++ op->turbo_dec.iter_count = RTE_MAX((uint8_t) rsp.iter_cnt, ++ op->turbo_dec.iter_count); + + /* Check if this is the last desc in batch (Atomic Queue) */ + if (desc->req.last_desc_in_batch) { +@@ -4119,8 +4124,6 @@ acc100_dequeue_ldpc_enc(struct rte_bbdev_queue_data *q_data, + struct rte_bbdev_enc_op *op; + union acc_dma_desc *desc; + +- if (q == NULL) +- return 0; + #ifdef RTE_LIBRTE_BBDEV_DEBUG + if (unlikely(ops == 0)) + return 0; +diff --git a/dpdk/drivers/baseband/acc/rte_acc200_pmd.c b/dpdk/drivers/baseband/acc/rte_acc200_pmd.c +index c5123cfef0..b25a83a588 100644 +--- a/dpdk/drivers/baseband/acc/rte_acc200_pmd.c ++++ b/dpdk/drivers/baseband/acc/rte_acc200_pmd.c +@@ -1848,6 +1848,9 @@ enqueue_enc_one_op_tb(struct acc_queue *q, struct rte_bbdev_enc_op *op, + r = op->turbo_enc.tb_params.r; + + while (mbuf_total_left > 0 && r < c) { ++ if (unlikely((input == NULL) || (output == NULL))) ++ return -1; ++ + seg_total_left = rte_pktmbuf_data_len(input) - in_offset; + /* Set up DMA descriptor */ + desc = acc_desc(q, total_enqueued_cbs); +@@ -1882,6 +1885,10 @@ enqueue_enc_one_op_tb(struct acc_queue *q, struct rte_bbdev_enc_op *op, + r++; + } + ++ /* In case the number of CB doesn't match, the configuration was invalid. */ ++ if (unlikely(current_enqueued_cbs != cbs_in_tb)) ++ return -1; ++ + /* Set SDone on last CB descriptor for TB mode. */ + desc->req.sdone_enable = 1; + +@@ -2079,6 +2086,10 @@ enqueue_ldpc_dec_one_op_cb(struct acc_queue *q, struct rte_bbdev_dec_op *op, + } + } + ++ if (op->ldpc_dec.soft_output.length > 0) ++ mbuf_append(op->ldpc_dec.soft_output.data, op->ldpc_dec.soft_output.data, ++ op->ldpc_dec.soft_output.length); ++ + #ifdef RTE_LIBRTE_BBDEV_DEBUG + rte_memdump(stderr, "FCW", &desc->req.fcw_ld, + sizeof(desc->req.fcw_ld) - 8); +@@ -2128,6 +2139,9 @@ enqueue_ldpc_dec_one_op_tb(struct acc_queue *q, struct rte_bbdev_dec_op *op, + } + + while (mbuf_total_left > 0 && r < c) { ++ if (unlikely((input == NULL) || (h_output == NULL))) ++ return -1; ++ + if (check_bit(op->ldpc_dec.op_flags, RTE_BBDEV_LDPC_DEC_SCATTER_GATHER)) + seg_total_left = rte_pktmbuf_data_len(input) - in_offset; + else +@@ -2173,6 +2187,10 @@ enqueue_ldpc_dec_one_op_tb(struct acc_queue *q, struct rte_bbdev_dec_op *op, + r++; + } + ++ /* In case the number of CB doesn't match, the configuration was invalid. */ ++ if (unlikely(current_enqueued_cbs != cbs_in_tb)) ++ return -1; ++ + #ifdef RTE_LIBRTE_BBDEV_DEBUG + if (check_mbuf_total_left(mbuf_total_left) != 0) + return -EINVAL; +@@ -2215,6 +2233,8 @@ enqueue_dec_one_op_tb(struct acc_queue *q, struct rte_bbdev_dec_op *op, + r = op->turbo_dec.tb_params.r; + + while (mbuf_total_left > 0 && r < c) { ++ if (unlikely((input == NULL) || (h_output == NULL))) ++ return -1; + + seg_total_left = rte_pktmbuf_data_len(input) - in_offset; + +@@ -2265,6 +2285,10 @@ enqueue_dec_one_op_tb(struct acc_queue *q, struct rte_bbdev_dec_op *op, + r++; + } + ++ /* In case the number of CB doesn't match, the configuration was invalid. */ ++ if (unlikely(current_enqueued_cbs != cbs_in_tb)) ++ return -1; ++ + /* Set SDone on last CB descriptor for TB mode */ + desc->req.sdone_enable = 1; + +@@ -2636,7 +2660,8 @@ acc200_enqueue_ldpc_dec(struct rte_bbdev_queue_data *q_data, + /* Dequeue one encode operations from ACC200 device in CB mode. */ + static inline int + dequeue_enc_one_op_cb(struct acc_queue *q, struct rte_bbdev_enc_op **ref_op, +- uint16_t *dequeued_ops, uint32_t *aq_dequeued, uint16_t *dequeued_descs) ++ uint16_t *dequeued_ops, uint32_t *aq_dequeued, uint16_t *dequeued_descs, ++ uint16_t max_requested_ops) + { + union acc_dma_desc *desc, atom_desc; + union acc_dma_rsp_desc rsp; +@@ -2649,6 +2674,9 @@ dequeue_enc_one_op_cb(struct acc_queue *q, struct rte_bbdev_enc_op **ref_op, + desc = q->ring_addr + desc_idx; + atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc, __ATOMIC_RELAXED); + ++ if (*dequeued_ops + desc->req.numCBs > max_requested_ops) ++ return -1; ++ + /* Check fdone bit. */ + if (!(atom_desc.rsp.val & ACC_FDONE)) + return -1; +@@ -2690,7 +2718,7 @@ dequeue_enc_one_op_cb(struct acc_queue *q, struct rte_bbdev_enc_op **ref_op, + static inline int + dequeue_enc_one_op_tb(struct acc_queue *q, struct rte_bbdev_enc_op **ref_op, + uint16_t *dequeued_ops, uint32_t *aq_dequeued, +- uint16_t *dequeued_descs) ++ uint16_t *dequeued_descs, uint16_t max_requested_ops) + { + union acc_dma_desc *desc, *last_desc, atom_desc; + union acc_dma_rsp_desc rsp; +@@ -2701,6 +2729,9 @@ dequeue_enc_one_op_tb(struct acc_queue *q, struct rte_bbdev_enc_op **ref_op, + desc = acc_desc_tail(q, *dequeued_descs); + atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc, __ATOMIC_RELAXED); + ++ if (*dequeued_ops + 1 > max_requested_ops) ++ return -1; ++ + /* Check fdone bit. */ + if (!(atom_desc.rsp.val & ACC_FDONE)) + return -1; +@@ -2864,7 +2895,7 @@ dequeue_ldpc_dec_one_op_cb(struct rte_bbdev_queue_data *q_data, + return 1; + } + +-/* Dequeue one decode operations from ACC200 device in TB mode. */ ++/* Dequeue one decode operations from device in TB mode for 4G or 5G. */ + static inline int + dequeue_dec_one_op_tb(struct acc_queue *q, struct rte_bbdev_dec_op **ref_op, + uint16_t dequeued_cbs, uint32_t *aq_dequeued) +@@ -2918,8 +2949,12 @@ dequeue_dec_one_op_tb(struct acc_queue *q, struct rte_bbdev_dec_op **ref_op, + /* CRC invalid if error exists. */ + if (!op->status) + op->status |= rsp.crc_status << RTE_BBDEV_CRC_ERROR; +- op->turbo_dec.iter_count = RTE_MAX((uint8_t) rsp.iter_cnt, +- op->turbo_dec.iter_count); ++ if (q->op_type == RTE_BBDEV_OP_LDPC_DEC) ++ op->ldpc_dec.iter_count = RTE_MAX((uint8_t) rsp.iter_cnt, ++ op->ldpc_dec.iter_count); ++ else ++ op->turbo_dec.iter_count = RTE_MAX((uint8_t) rsp.iter_cnt, ++ op->turbo_dec.iter_count); + + /* Check if this is the last desc in batch (Atomic Queue). */ + if (desc->req.last_desc_in_batch) { +@@ -2961,25 +2996,23 @@ acc200_dequeue_enc(struct rte_bbdev_queue_data *q_data, + + cbm = op->turbo_enc.code_block_mode; + +- for (i = 0; i < num; i++) { ++ for (i = 0; i < avail; i++) { + if (cbm == RTE_BBDEV_TRANSPORT_BLOCK) + ret = dequeue_enc_one_op_tb(q, &ops[dequeued_ops], + &dequeued_ops, &aq_dequeued, +- &dequeued_descs); ++ &dequeued_descs, num); + else + ret = dequeue_enc_one_op_cb(q, &ops[dequeued_ops], + &dequeued_ops, &aq_dequeued, +- &dequeued_descs); ++ &dequeued_descs, num); + if (ret < 0) + break; +- if (dequeued_ops >= num) +- break; + } + + q->aq_dequeued += aq_dequeued; + q->sw_ring_tail += dequeued_descs; + +- /* Update enqueue stats */ ++ /* Update enqueue stats. */ + q_data->queue_stats.dequeued_count += dequeued_ops; + + return dequeued_ops; +@@ -3005,15 +3038,13 @@ acc200_dequeue_ldpc_enc(struct rte_bbdev_queue_data *q_data, + if (cbm == RTE_BBDEV_TRANSPORT_BLOCK) + ret = dequeue_enc_one_op_tb(q, &ops[dequeued_ops], + &dequeued_ops, &aq_dequeued, +- &dequeued_descs); ++ &dequeued_descs, num); + else + ret = dequeue_enc_one_op_cb(q, &ops[dequeued_ops], + &dequeued_ops, &aq_dequeued, +- &dequeued_descs); ++ &dequeued_descs, num); + if (ret < 0) + break; +- if (dequeued_ops >= num) +- break; + } + + q->aq_dequeued += aq_dequeued; +diff --git a/dpdk/drivers/baseband/fpga_5gnr_fec/rte_fpga_5gnr_fec.c b/dpdk/drivers/baseband/fpga_5gnr_fec/rte_fpga_5gnr_fec.c +index d520d5238f..0dfeba08e1 100644 +--- a/dpdk/drivers/baseband/fpga_5gnr_fec/rte_fpga_5gnr_fec.c ++++ b/dpdk/drivers/baseband/fpga_5gnr_fec/rte_fpga_5gnr_fec.c +@@ -569,17 +569,21 @@ static int + fpga_queue_start(struct rte_bbdev *dev, uint16_t queue_id) + { + struct fpga_5gnr_fec_device *d = dev->data->dev_private; ++ struct fpga_queue *q = dev->data->queues[queue_id].queue_private; ++ uint32_t offset = FPGA_5GNR_FEC_RING_CTRL_REGS + ++ (sizeof(struct fpga_ring_ctrl_reg) * q->q_idx); ++ uint8_t enable = 0x01; ++ uint16_t zero = 0x0000; + #ifdef RTE_LIBRTE_BBDEV_DEBUG + if (d == NULL) { + rte_bbdev_log(ERR, "Invalid device pointer"); + return -1; + } + #endif +- struct fpga_queue *q = dev->data->queues[queue_id].queue_private; +- uint32_t offset = FPGA_5GNR_FEC_RING_CTRL_REGS + +- (sizeof(struct fpga_ring_ctrl_reg) * q->q_idx); +- uint8_t enable = 0x01; +- uint16_t zero = 0x0000; ++ if (dev->data->queues[queue_id].queue_private == NULL) { ++ rte_bbdev_log(ERR, "Cannot start invalid queue %d", queue_id); ++ return -1; ++ } + + /* Clear queue head and tail variables */ + q->tail = q->head_free_desc = 0; +@@ -887,9 +891,11 @@ check_desc_error(uint32_t error_code) { + static inline uint16_t + get_k0(uint16_t n_cb, uint16_t z_c, uint8_t bg, uint8_t rv_index) + { ++ uint16_t n = (bg == 1 ? N_ZC_1 : N_ZC_2) * z_c; + if (rv_index == 0) + return 0; +- uint16_t n = (bg == 1 ? N_ZC_1 : N_ZC_2) * z_c; ++ if (z_c == 0) ++ return 0; + if (n_cb == n) { + if (rv_index == 1) + return (bg == 1 ? K0_1_1 : K0_1_2) * z_c; +diff --git a/dpdk/drivers/baseband/turbo_sw/meson.build b/dpdk/drivers/baseband/turbo_sw/meson.build +index 417ec63394..aeb9a76f9e 100644 +--- a/dpdk/drivers/baseband/turbo_sw/meson.build ++++ b/dpdk/drivers/baseband/turbo_sw/meson.build +@@ -6,11 +6,11 @@ dep_turbo = dependency('flexran_sdk_turbo', required: false) + dep_dec5g = dependency('flexran_sdk_ldpc_decoder_5gnr', required: false) + + if dep_turbo.found() +- ext_deps += cc.find_library('libstdc++', required: true) +- ext_deps += cc.find_library('libirc', required: true) +- ext_deps += cc.find_library('libimf', required: true) +- ext_deps += cc.find_library('libipps', required: true) +- ext_deps += cc.find_library('libsvml', required: true) ++ ext_deps += cc.find_library('stdc++', required: true) ++ ext_deps += cc.find_library('irc', required: true) ++ ext_deps += cc.find_library('imf', required: true) ++ ext_deps += cc.find_library('ipps', required: true) ++ ext_deps += cc.find_library('svml', required: true) + ext_deps += dep_turbo + ext_deps += dependency('flexran_sdk_crc', required: true) + ext_deps += dependency('flexran_sdk_rate_matching', required: true) +diff --git a/dpdk/drivers/bus/fslmc/mc/mc_sys.c b/dpdk/drivers/bus/fslmc/mc/mc_sys.c +index ab9a074835..76fdcd5c8a 100644 +--- a/dpdk/drivers/bus/fslmc/mc/mc_sys.c ++++ b/dpdk/drivers/bus/fslmc/mc/mc_sys.c +@@ -77,8 +77,11 @@ int mc_send_command(struct fsl_mc_io *mc_io, struct mc_command *cmd) + total_time = rte_get_timer_cycles() - start_time; + } while (status == MC_CMD_STATUS_READY && total_time <= time_to_wait); + +- if (status == MC_CMD_STATUS_READY) ++ if (status == MC_CMD_STATUS_READY) { ++ rte_spinlock_unlock(&mc_portal_lock); ++ + return mc_status_to_error(MC_CMD_STATUS_TIMEOUT); ++ } + + /* Read the response back into the command buffer */ + mc_read_response(mc_io->regs, cmd); +diff --git a/dpdk/drivers/bus/ifpga/ifpga_bus.c b/dpdk/drivers/bus/ifpga/ifpga_bus.c +index bb943b58b5..07e316b38e 100644 +--- a/dpdk/drivers/bus/ifpga/ifpga_bus.c ++++ b/dpdk/drivers/bus/ifpga/ifpga_bus.c +@@ -135,6 +135,8 @@ ifpga_scan_one(struct rte_rawdev *rawdev, + goto end; + } + afu_pr_conf.pr_enable = 1; ++ strlcpy(afu_pr_conf.bs_path, path, ++ sizeof(afu_pr_conf.bs_path)); + } else { + afu_pr_conf.pr_enable = 0; + } +@@ -174,7 +176,6 @@ ifpga_scan_one(struct rte_rawdev *rawdev, + rawdev->dev_ops->dev_start(rawdev)) + goto end; + +- strlcpy(afu_pr_conf.bs_path, path, sizeof(afu_pr_conf.bs_path)); + if (rawdev->dev_ops && + rawdev->dev_ops->firmware_load && + rawdev->dev_ops->firmware_load(rawdev, +diff --git a/dpdk/drivers/bus/pci/linux/pci_vfio.c b/dpdk/drivers/bus/pci/linux/pci_vfio.c +index fab3483d9f..fe83e1a04e 100644 +--- a/dpdk/drivers/bus/pci/linux/pci_vfio.c ++++ b/dpdk/drivers/bus/pci/linux/pci_vfio.c +@@ -2,6 +2,7 @@ + * Copyright(c) 2010-2014 Intel Corporation + */ + ++#include + #include + #include + #include +diff --git a/dpdk/drivers/bus/pci/pci_common.c b/dpdk/drivers/bus/pci/pci_common.c +index bc3a7f39fe..e32a9d517a 100644 +--- a/dpdk/drivers/bus/pci/pci_common.c ++++ b/dpdk/drivers/bus/pci/pci_common.c +@@ -448,7 +448,7 @@ pci_cleanup(void) + int ret = 0; + + if (drv == NULL || drv->remove == NULL) +- continue; ++ goto free; + + ret = drv->remove(dev); + if (ret < 0) { +@@ -458,6 +458,7 @@ pci_cleanup(void) + dev->driver = NULL; + dev->device.driver = NULL; + ++free: + /* free interrupt handles */ + rte_intr_instance_free(dev->intr_handle); + dev->intr_handle = NULL; +diff --git a/dpdk/drivers/bus/vdev/vdev.c b/dpdk/drivers/bus/vdev/vdev.c +index 41bc07dde7..7974b27295 100644 +--- a/dpdk/drivers/bus/vdev/vdev.c ++++ b/dpdk/drivers/bus/vdev/vdev.c +@@ -578,18 +578,19 @@ vdev_cleanup(void) + int ret = 0; + + if (dev->device.driver == NULL) +- continue; ++ goto free; + + drv = container_of(dev->device.driver, const struct rte_vdev_driver, driver); + + if (drv->remove == NULL) +- continue; ++ goto free; + + ret = drv->remove(dev); + if (ret < 0) + error = -1; + + dev->device.driver = NULL; ++free: + free(dev); + } + +diff --git a/dpdk/drivers/common/cnxk/cnxk_security.c b/dpdk/drivers/common/cnxk/cnxk_security.c +index 85105472a1..bdb5433d13 100644 +--- a/dpdk/drivers/common/cnxk/cnxk_security.c ++++ b/dpdk/drivers/common/cnxk/cnxk_security.c +@@ -274,6 +274,14 @@ ot_ipsec_inb_ctx_size(struct roc_ot_ipsec_inb_sa *sa) + return size; + } + ++static void ++ot_ipsec_update_ipv6_addr_endianness(uint64_t *addr) ++{ ++ *addr = rte_be_to_cpu_64(*addr); ++ addr++; ++ *addr = rte_be_to_cpu_64(*addr); ++} ++ + static int + ot_ipsec_inb_tunnel_hdr_fill(struct roc_ot_ipsec_inb_sa *sa, + struct rte_security_ipsec_xform *ipsec_xfrm) +@@ -310,6 +318,10 @@ ot_ipsec_inb_tunnel_hdr_fill(struct roc_ot_ipsec_inb_sa *sa, + memcpy(&sa->outer_hdr.ipv6.dst_addr, &tunnel->ipv6.dst_addr, + sizeof(struct in6_addr)); + ++ /* IP Source and Dest are in LE/CPU endian */ ++ ot_ipsec_update_ipv6_addr_endianness((uint64_t *)&sa->outer_hdr.ipv6.src_addr); ++ ot_ipsec_update_ipv6_addr_endianness((uint64_t *)&sa->outer_hdr.ipv6.dst_addr); ++ + break; + default: + return -EINVAL; +@@ -499,6 +511,10 @@ cnxk_ot_ipsec_outb_sa_fill(struct roc_ot_ipsec_outb_sa *sa, + memcpy(&sa->outer_hdr.ipv6.dst_addr, &tunnel->ipv6.dst_addr, + sizeof(struct in6_addr)); + ++ /* IP Source and Dest are in LE/CPU endian */ ++ ot_ipsec_update_ipv6_addr_endianness((uint64_t *)&sa->outer_hdr.ipv6.src_addr); ++ ot_ipsec_update_ipv6_addr_endianness((uint64_t *)&sa->outer_hdr.ipv6.dst_addr); ++ + /* Outer header flow label source */ + if (!ipsec_xfrm->options.copy_flabel) { + sa->w2.s.ipv4_df_src_or_ipv6_flw_lbl_src = +diff --git a/dpdk/drivers/common/cnxk/roc_dev.c b/dpdk/drivers/common/cnxk/roc_dev.c +index 59128a3552..33865f43fa 100644 +--- a/dpdk/drivers/common/cnxk/roc_dev.c ++++ b/dpdk/drivers/common/cnxk/roc_dev.c +@@ -969,6 +969,7 @@ dev_vf_hwcap_update(struct plt_pci_device *pci_dev, struct dev *dev) + case PCI_DEVID_CNXK_RVU_AF_VF: + case PCI_DEVID_CNXK_RVU_VF: + case PCI_DEVID_CNXK_RVU_SDP_VF: ++ case PCI_DEVID_CNXK_RVU_NIX_INL_VF: + dev->hwcap |= DEV_HWCAP_F_VF; + break; + } +diff --git a/dpdk/drivers/common/cnxk/roc_io.h b/dpdk/drivers/common/cnxk/roc_io.h +index 13f98ed549..45cbb4e587 100644 +--- a/dpdk/drivers/common/cnxk/roc_io.h ++++ b/dpdk/drivers/common/cnxk/roc_io.h +@@ -125,7 +125,8 @@ roc_lmt_submit_ldeor(plt_iova_t io_address) + + asm volatile(PLT_CPU_FEATURE_PREAMBLE "ldeor xzr, %x[rf], [%[rs]]" + : [rf] "=r"(result) +- : [rs] "r"(io_address)); ++ : [rs] "r"(io_address) ++ : "memory"); + return result; + } + +@@ -136,7 +137,8 @@ roc_lmt_submit_ldeorl(plt_iova_t io_address) + + asm volatile(PLT_CPU_FEATURE_PREAMBLE "ldeorl xzr,%x[rf],[%[rs]]" + : [rf] "=r"(result) +- : [rs] "r"(io_address)); ++ : [rs] "r"(io_address) ++ : "memory"); + return result; + } + +@@ -145,7 +147,8 @@ roc_lmt_submit_steor(uint64_t data, plt_iova_t io_address) + { + asm volatile(PLT_CPU_FEATURE_PREAMBLE + "steor %x[d], [%[rs]]" ::[d] "r"(data), +- [rs] "r"(io_address)); ++ [rs] "r"(io_address) ++ : "memory"); + } + + static __plt_always_inline void +@@ -153,7 +156,8 @@ roc_lmt_submit_steorl(uint64_t data, plt_iova_t io_address) + { + asm volatile(PLT_CPU_FEATURE_PREAMBLE + "steorl %x[d], [%[rs]]" ::[d] "r"(data), +- [rs] "r"(io_address)); ++ [rs] "r"(io_address) ++ : "memory"); + } + + static __plt_always_inline void +diff --git a/dpdk/drivers/common/cnxk/roc_mbox.h b/dpdk/drivers/common/cnxk/roc_mbox.h +index 8b0384c737..fd9d3e73cd 100644 +--- a/dpdk/drivers/common/cnxk/roc_mbox.h ++++ b/dpdk/drivers/common/cnxk/roc_mbox.h +@@ -1169,7 +1169,7 @@ struct nix_bp_cfg_req { + * so maximum 256 channels are possible. + */ + #define NIX_MAX_CHAN 256 +-#define NIX_CGX_MAX_CHAN 16 ++#define NIX_CGX_MAX_CHAN 8 + #define NIX_LBK_MAX_CHAN 1 + struct nix_bp_cfg_rsp { + struct mbox_msghdr hdr; +diff --git a/dpdk/drivers/common/cnxk/roc_nix_fc.c b/dpdk/drivers/common/cnxk/roc_nix_fc.c +index 033e17a4bf..5e8a01c775 100644 +--- a/dpdk/drivers/common/cnxk/roc_nix_fc.c ++++ b/dpdk/drivers/common/cnxk/roc_nix_fc.c +@@ -88,17 +88,6 @@ nix_fc_rxchan_bpid_set(struct roc_nix *roc_nix, bool enable) + req->chan_cnt = 1; + req->bpid_per_chan = 0; + +- rc = mbox_process_msg(mbox, (void *)&rsp); +- if (rc) +- goto exit; +- } else { +- req = mbox_alloc_msg_nix_cpt_bp_disable(mbox); +- if (req == NULL) +- return rc; +- req->chan_base = 0; +- req->chan_cnt = 1; +- req->bpid_per_chan = 0; +- + rc = mbox_process_msg(mbox, (void *)&rsp); + if (rc) + goto exit; +diff --git a/dpdk/drivers/common/cnxk/roc_nix_inl.c b/dpdk/drivers/common/cnxk/roc_nix_inl.c +index 782536db4c..92ff44888d 100644 +--- a/dpdk/drivers/common/cnxk/roc_nix_inl.c ++++ b/dpdk/drivers/common/cnxk/roc_nix_inl.c +@@ -1039,7 +1039,7 @@ roc_nix_inl_rq_ena_dis(struct roc_nix *roc_nix, bool enable) + return -EFAULT; + + if (roc_model_is_cn10kb_a0()) { +- rc = nix_inl_rq_mask_cfg(roc_nix, true); ++ rc = nix_inl_rq_mask_cfg(roc_nix, enable); + if (rc) { + plt_err("Failed to get rq mask rc=%d", rc); + return rc; +diff --git a/dpdk/drivers/common/cnxk/roc_nix_inl_dev.c b/dpdk/drivers/common/cnxk/roc_nix_inl_dev.c +index c3d94dd0da..4ab4209dba 100644 +--- a/dpdk/drivers/common/cnxk/roc_nix_inl_dev.c ++++ b/dpdk/drivers/common/cnxk/roc_nix_inl_dev.c +@@ -265,7 +265,7 @@ nix_inl_sso_setup(struct nix_inl_dev *inl_dev) + } + + /* Setup xaq for hwgrps */ +- rc = sso_hwgrp_alloc_xaq(dev, inl_dev->xaq.aura_handle, 1); ++ rc = sso_hwgrp_alloc_xaq(dev, roc_npa_aura_handle_to_aura(inl_dev->xaq.aura_handle), 1); + if (rc) { + plt_err("Failed to setup hwgrp xaq aura, rc=%d", rc); + goto destroy_pool; +diff --git a/dpdk/drivers/common/cnxk/roc_npa.h b/dpdk/drivers/common/cnxk/roc_npa.h +index fed1942404..46b668a310 100644 +--- a/dpdk/drivers/common/cnxk/roc_npa.h ++++ b/dpdk/drivers/common/cnxk/roc_npa.h +@@ -253,19 +253,23 @@ roc_npa_aura_batch_alloc_issue(uint64_t aura_handle, uint64_t *buf, + } + + static inline void +-roc_npa_batch_alloc_wait(uint64_t *cache_line) ++roc_npa_batch_alloc_wait(uint64_t *cache_line, unsigned int wait_us) + { ++ const uint64_t ticks = (uint64_t)wait_us * plt_tsc_hz() / (uint64_t)1E6; ++ const uint64_t start = plt_tsc_cycles(); ++ + /* Batch alloc status code is updated in bits [5:6] of the first word + * of the 128 byte cache line. + */ + while (((__atomic_load_n(cache_line, __ATOMIC_RELAXED) >> 5) & 0x3) == + ALLOC_CCODE_INVAL) +- ; ++ if (wait_us && (plt_tsc_cycles() - start) >= ticks) ++ break; + } + + static inline unsigned int + roc_npa_aura_batch_alloc_count(uint64_t *aligned_buf, unsigned int num, +- unsigned int do_wait) ++ unsigned int wait_us) + { + unsigned int count, i; + +@@ -279,8 +283,7 @@ roc_npa_aura_batch_alloc_count(uint64_t *aligned_buf, unsigned int num, + + status = (struct npa_batch_alloc_status_s *)&aligned_buf[i]; + +- if (do_wait) +- roc_npa_batch_alloc_wait(&aligned_buf[i]); ++ roc_npa_batch_alloc_wait(&aligned_buf[i], wait_us); + + count += status->count; + } +@@ -305,7 +308,7 @@ roc_npa_aura_batch_alloc_extract(uint64_t *buf, uint64_t *aligned_buf, + + status = (struct npa_batch_alloc_status_s *)&aligned_buf[i]; + +- roc_npa_batch_alloc_wait(&aligned_buf[i]); ++ roc_npa_batch_alloc_wait(&aligned_buf[i], 0); + + line_count = status->count; + +diff --git a/dpdk/drivers/common/cnxk/roc_npc.c b/dpdk/drivers/common/cnxk/roc_npc.c +index b38389b18a..5e1ca6bc03 100644 +--- a/dpdk/drivers/common/cnxk/roc_npc.c ++++ b/dpdk/drivers/common/cnxk/roc_npc.c +@@ -1242,12 +1242,39 @@ npc_vtag_action_program(struct roc_npc *roc_npc, + return 0; + } + ++static void ++roc_npc_sdp_channel_get(struct roc_npc *roc_npc, uint16_t *chan_base, uint16_t *chan_mask) ++{ ++ struct roc_nix *roc_nix = roc_npc->roc_nix; ++ struct nix *nix = roc_nix_to_nix_priv(roc_nix); ++ uint16_t num_chan, range, num_bits = 0; ++ uint16_t mask = 0; ++ ++ *chan_base = nix->rx_chan_base; ++ num_chan = nix->rx_chan_cnt - 1; ++ if (num_chan) { ++ range = *chan_base ^ (*chan_base + num_chan); ++ num_bits = (sizeof(uint32_t) * 8) - __builtin_clz(range) - 1; ++ /* Set mask for (15 - numbits) MSB bits */ ++ *chan_mask = (uint16_t)~GENMASK(num_bits, 0); ++ } else { ++ *chan_mask = (uint16_t)GENMASK(15, 0); ++ } ++ ++ mask = (uint16_t)GENMASK(num_bits, 0); ++ if (mask > num_chan + 1) ++ plt_warn( ++ "npc: SDP channel base:%x, channel count:%x. channel mask:%x covers more than channel count", ++ *chan_base, nix->rx_chan_cnt, *chan_mask); ++} ++ + struct roc_npc_flow * + roc_npc_flow_create(struct roc_npc *roc_npc, const struct roc_npc_attr *attr, + const struct roc_npc_item_info pattern[], + const struct roc_npc_action actions[], int *errcode) + { + struct npc *npc = roc_npc_to_npc_priv(roc_npc); ++ uint16_t sdp_chan_base = 0, sdp_chan_mask = 0; + struct roc_npc_flow *flow, *flow_iter; + struct npc_parse_state parse_state; + struct npc_flow_list *list; +@@ -1260,11 +1287,9 @@ roc_npc_flow_create(struct roc_npc *roc_npc, const struct roc_npc_attr *attr, + npc->sdp_channel = roc_npc->sdp_channel; + npc->sdp_channel_mask = roc_npc->sdp_channel_mask; + } else { +- /* By default set the channel and mask to cover +- * the whole SDP channel range. +- */ +- npc->sdp_channel = (uint16_t)NIX_CHAN_SDP_CH_START; +- npc->sdp_channel_mask = (uint16_t)NIX_CHAN_SDP_CH_START; ++ roc_npc_sdp_channel_get(roc_npc, &sdp_chan_base, &sdp_chan_mask); ++ npc->sdp_channel = sdp_chan_base; ++ npc->sdp_channel_mask = sdp_chan_mask; + } + } + +diff --git a/dpdk/drivers/common/cnxk/roc_npc.h b/dpdk/drivers/common/cnxk/roc_npc.h +index 1b4e5521cb..60f9c5d634 100644 +--- a/dpdk/drivers/common/cnxk/roc_npc.h ++++ b/dpdk/drivers/common/cnxk/roc_npc.h +@@ -123,6 +123,17 @@ struct roc_ipv6_hdr { + uint8_t dst_addr[16]; /**< IP address of destination host(s). */ + } __plt_packed; + ++struct roc_ipv6_fragment_ext { ++ uint8_t next_header; /**< Next header type */ ++ uint8_t reserved; /**< Reserved */ ++ uint16_t frag_data; /**< All fragmentation data */ ++ uint32_t id; /**< Packet ID */ ++} __plt_packed; ++ ++struct roc_flow_item_ipv6_ext { ++ uint8_t next_hdr; /**< Next header. */ ++}; ++ + struct roc_npc_flow_item_ipv6 { + struct roc_ipv6_hdr hdr; /**< IPv6 header definition. */ + uint32_t has_hop_ext : 1; +diff --git a/dpdk/drivers/common/cnxk/roc_npc_mcam.c b/dpdk/drivers/common/cnxk/roc_npc_mcam.c +index a725cabc57..3bf35cdf48 100644 +--- a/dpdk/drivers/common/cnxk/roc_npc_mcam.c ++++ b/dpdk/drivers/common/cnxk/roc_npc_mcam.c +@@ -551,6 +551,8 @@ npc_mcam_alloc_and_write(struct npc *npc, struct roc_npc_flow *flow, + struct idev_cfg *idev; + uint16_t pf_func = 0; + uint16_t ctr = ~(0); ++ uint32_t la_offset; ++ uint64_t mask; + int rc, idx; + int entry; + +@@ -617,17 +619,42 @@ npc_mcam_alloc_and_write(struct npc *npc, struct roc_npc_flow *flow, + flow->npc_action &= ~(GENMASK(19, 4)); + flow->npc_action |= (uint64_t)pf_func << 4; + +- npc_mcam_set_channel(flow, req, inl_dev->channel, +- inl_dev->chan_mask, false); ++ npc_mcam_set_channel(flow, req, inl_dev->channel, inl_dev->chan_mask, ++ false); + } else if (npc->is_sdp_link) { +- npc_mcam_set_channel(flow, req, npc->sdp_channel, +- npc->sdp_channel_mask, ++ npc_mcam_set_channel(flow, req, npc->sdp_channel, npc->sdp_channel_mask, + pst->is_second_pass_rule); + } else { +- npc_mcam_set_channel(flow, req, npc->channel, +- (BIT_ULL(12) - 1), ++ npc_mcam_set_channel(flow, req, npc->channel, (BIT_ULL(12) - 1), + pst->is_second_pass_rule); + } ++ /* ++ * For second pass rule, set LA LTYPE to CPT_HDR. ++ * For all other rules, set LA LTYPE to match both 1st pass and 2nd pass ltypes. ++ */ ++ if (pst->is_second_pass_rule || (!pst->is_second_pass_rule && pst->has_eth_type)) { ++ la_offset = __builtin_popcount(npc->keyx_supp_nmask[flow->nix_intf] & ++ ((1ULL << 9 /* LA offset */) - 1)); ++ la_offset *= 4; ++ ++ mask = ~((0xfULL << la_offset)); ++ req->entry_data.kw[0] &= mask; ++ req->entry_data.kw_mask[0] &= mask; ++ flow->mcam_data[0] &= mask; ++ flow->mcam_mask[0] &= mask; ++ if (pst->is_second_pass_rule) { ++ req->entry_data.kw[0] |= ((uint64_t)NPC_LT_LA_CPT_HDR) << la_offset; ++ req->entry_data.kw_mask[0] |= (0xFULL << la_offset); ++ flow->mcam_data[0] |= ((uint64_t)NPC_LT_LA_CPT_HDR) << la_offset; ++ flow->mcam_mask[0] |= (0xFULL << la_offset); ++ } else { ++ /* Mask ltype ETHER (0x2) and CPT_HDR (0xa) */ ++ req->entry_data.kw[0] |= (0x2ULL << la_offset); ++ req->entry_data.kw_mask[0] |= (0x7ULL << la_offset); ++ flow->mcam_data[0] |= (0x2ULL << la_offset); ++ flow->mcam_mask[0] |= (0x7ULL << la_offset); ++ } ++ } + } else { + uint16_t pf_func = (flow->npc_action >> 4) & 0xffff; + +@@ -701,15 +728,16 @@ npc_set_ipv6ext_ltype_mask(struct npc_parse_state *pst) + * because for AH and ESP, LC LFLAG is zero and we don't want to match + * zero in LFLAG. + */ +- lcflag_offset = +- __builtin_popcount(pst->npc->keyx_supp_nmask[pst->nix_intf] & +- ((1ULL << NPC_LFLAG_LC_OFFSET) - 1)); +- lcflag_offset *= 4; +- +- mask = (0xfULL << lcflag_offset); +- val = pst->flow->mcam_data[0] & mask; +- if (val) +- pst->flow->mcam_mask[0] |= mask; ++ if (pst->npc->keyx_supp_nmask[pst->nix_intf] & (1ULL << NPC_LFLAG_LC_OFFSET)) { ++ lcflag_offset = __builtin_popcount(pst->npc->keyx_supp_nmask[pst->nix_intf] & ++ ((1ULL << NPC_LFLAG_LC_OFFSET) - 1)); ++ lcflag_offset *= 4; ++ ++ mask = (0xfULL << lcflag_offset); ++ val = pst->flow->mcam_data[0] & mask; ++ if (val) ++ pst->flow->mcam_mask[0] |= mask; ++ } + } + + int +diff --git a/dpdk/drivers/common/cnxk/roc_npc_mcam_dump.c b/dpdk/drivers/common/cnxk/roc_npc_mcam_dump.c +index fe57811a84..cc1599ef33 100644 +--- a/dpdk/drivers/common/cnxk/roc_npc_mcam_dump.c ++++ b/dpdk/drivers/common/cnxk/roc_npc_mcam_dump.c +@@ -69,8 +69,10 @@ static const char *const ltype_str[NPC_MAX_LID][NPC_MAX_LT] = { + [NPC_LID_LA][NPC_LT_LA_IH_NIX_ETHER] = "LA_IH_NIX_ETHER", + [NPC_LID_LA][NPC_LT_LA_HIGIG2_ETHER] = "LA_HIGIG2_ETHER", + [NPC_LID_LA][NPC_LT_LA_IH_NIX_HIGIG2_ETHER] = "LA_IH_NIX_HIGIG2_ETHER", +- [NPC_LID_LA][NPC_LT_LA_CUSTOM_PRE_L2_ETHER] = +- "NPC_LT_LA_CUSTOM_PRE_L2_ETHER", ++ [NPC_LID_LA][NPC_LT_LA_CUSTOM_L2_90B_ETHER] = "LA_CUSTOM_L2_90B_ETHER", ++ [NPC_LID_LA][NPC_LT_LA_CPT_HDR] = "LA_CPT_HDR", ++ [NPC_LID_LA][NPC_LT_LA_CUSTOM_L2_24B_ETHER] = "LA_CUSTOM_L2_24B_ETHER", ++ [NPC_LID_LA][NPC_LT_LA_CUSTOM_PRE_L2_ETHER] = "NPC_LT_LA_CUSTOM_PRE_L2_ETHER", + [NPC_LID_LB][0] = "NONE", + [NPC_LID_LB][NPC_LT_LB_CTAG] = "LB_CTAG", + [NPC_LID_LB][NPC_LT_LB_STAG_QINQ] = "LB_STAG_QINQ", +diff --git a/dpdk/drivers/common/cnxk/roc_npc_parse.c b/dpdk/drivers/common/cnxk/roc_npc_parse.c +index ff00c746d6..e695b755d7 100644 +--- a/dpdk/drivers/common/cnxk/roc_npc_parse.c ++++ b/dpdk/drivers/common/cnxk/roc_npc_parse.c +@@ -97,6 +97,7 @@ npc_parse_pre_l2(struct npc_parse_state *pst) + (const struct roc_npc_flow_item_raw *)pst->pattern->mask, &info, + raw_spec_buf, raw_mask_buf); + ++ info.def_mask = NULL; + info.hw_mask = &hw_mask; + npc_get_hw_supp_mask(pst, &info, lid, lt); + +@@ -193,6 +194,7 @@ npc_parse_la(struct npc_parse_state *pst) + if (pst->pattern->type != ROC_NPC_ITEM_TYPE_ETH) + return 0; + ++ pst->has_eth_type = true; + eth_item = pst->pattern->spec; + + lid = NPC_LID_LA; +@@ -238,10 +240,185 @@ npc_parse_la(struct npc_parse_state *pst) + + #define NPC_MAX_SUPPORTED_VLANS 3 + ++static int ++npc_parse_vlan_count(const struct roc_npc_item_info *pattern, ++ const struct roc_npc_item_info **pattern_list, ++ const struct roc_npc_flow_item_vlan **vlan_items, int *vlan_count) ++{ ++ *vlan_count = 0; ++ while (pattern->type == ROC_NPC_ITEM_TYPE_VLAN) { ++ if (*vlan_count > NPC_MAX_SUPPORTED_VLANS - 1) ++ return NPC_ERR_PATTERN_NOTSUP; ++ ++ /* Don't support ranges */ ++ if (pattern->last != NULL) ++ return NPC_ERR_INVALID_RANGE; ++ ++ /* If spec is NULL, both mask and last must be NULL, this ++ * makes it to match ANY value (eq to mask = 0). ++ * Setting either mask or last without spec is an error ++ */ ++ if (pattern->spec == NULL) { ++ if (pattern->last != NULL && pattern->mask != NULL) ++ return NPC_ERR_INVALID_SPEC; ++ } ++ ++ pattern_list[*vlan_count] = pattern; ++ vlan_items[*vlan_count] = pattern->spec; ++ (*vlan_count)++; ++ ++ pattern++; ++ pattern = npc_parse_skip_void_and_any_items(pattern); ++ } ++ ++ return 0; ++} ++ ++static int ++npc_parse_vlan_ltype_get(struct npc_parse_state *pst, ++ const struct roc_npc_flow_item_vlan **vlan_item, int vlan_count, ++ int *ltype, int *lflags) ++{ ++ switch (vlan_count) { ++ case 1: ++ *ltype = NPC_LT_LB_CTAG; ++ if (vlan_item[0] && vlan_item[0]->has_more_vlan) ++ *ltype = NPC_LT_LB_STAG_QINQ; ++ break; ++ case 2: ++ if (vlan_item[1] && vlan_item[1]->has_more_vlan) { ++ if (!(pst->npc->keyx_supp_nmask[pst->nix_intf] & ++ 0x3ULL << NPC_LFLAG_LB_OFFSET)) ++ return NPC_ERR_PATTERN_NOTSUP; ++ ++ /* This lflag value will match either one of ++ * NPC_F_LB_L_WITH_STAG_STAG, ++ * NPC_F_LB_L_WITH_QINQ_CTAG, ++ * NPC_F_LB_L_WITH_QINQ_QINQ and ++ * NPC_F_LB_L_WITH_ITAG (0b0100 to 0b0111). For ++ * NPC_F_LB_L_WITH_ITAG, ltype is NPC_LT_LB_ETAG ++ * hence will not match. ++ */ ++ ++ *lflags = NPC_F_LB_L_WITH_QINQ_CTAG & NPC_F_LB_L_WITH_QINQ_QINQ & ++ NPC_F_LB_L_WITH_STAG_STAG; ++ } ++ *ltype = NPC_LT_LB_STAG_QINQ; ++ break; ++ case 3: ++ if (vlan_item[2] && vlan_item[2]->has_more_vlan) ++ return NPC_ERR_PATTERN_NOTSUP; ++ if (!(pst->npc->keyx_supp_nmask[pst->nix_intf] & 0x3ULL << NPC_LFLAG_LB_OFFSET)) ++ return NPC_ERR_PATTERN_NOTSUP; ++ *ltype = NPC_LT_LB_STAG_QINQ; ++ *lflags = NPC_F_STAG_STAG_CTAG; ++ break; ++ default: ++ return NPC_ERR_PATTERN_NOTSUP; ++ } ++ ++ return 0; ++} ++ ++static int ++npc_update_vlan_parse_state(struct npc_parse_state *pst, const struct roc_npc_item_info *pattern, ++ int lid, int lt, uint8_t lflags, int vlan_count) ++{ ++ uint8_t vlan_spec[NPC_MAX_SUPPORTED_VLANS * sizeof(struct roc_vlan_hdr)]; ++ uint8_t vlan_mask[NPC_MAX_SUPPORTED_VLANS * sizeof(struct roc_vlan_hdr)]; ++ int rc = 0, i, offset = NPC_TPID_LENGTH; ++ struct npc_parse_item_info parse_info; ++ char hw_mask[NPC_MAX_EXTRACT_HW_LEN]; ++ ++ memset(vlan_spec, 0, sizeof(struct roc_vlan_hdr) * NPC_MAX_SUPPORTED_VLANS); ++ memset(vlan_mask, 0, sizeof(struct roc_vlan_hdr) * NPC_MAX_SUPPORTED_VLANS); ++ memset(&parse_info, 0, sizeof(parse_info)); ++ ++ if (vlan_count > 2) ++ vlan_count = 2; ++ ++ for (i = 0; i < vlan_count; i++) { ++ if (pattern[i].spec) ++ memcpy(vlan_spec + offset, pattern[i].spec, sizeof(struct roc_vlan_hdr)); ++ if (pattern[i].mask) ++ memcpy(vlan_mask + offset, pattern[i].mask, sizeof(struct roc_vlan_hdr)); ++ ++ offset += 4; ++ } ++ ++ parse_info.def_mask = NULL; ++ parse_info.spec = vlan_spec; ++ parse_info.mask = vlan_mask; ++ parse_info.def_mask = NULL; ++ parse_info.hw_hdr_len = 0; ++ ++ lid = NPC_LID_LB; ++ parse_info.hw_mask = hw_mask; ++ ++ if (lt == NPC_LT_LB_CTAG) ++ parse_info.len = sizeof(struct roc_vlan_hdr) + NPC_TPID_LENGTH; ++ ++ if (lt == NPC_LT_LB_STAG_QINQ) ++ parse_info.len = sizeof(struct roc_vlan_hdr) * 2 + NPC_TPID_LENGTH; ++ ++ memset(hw_mask, 0, sizeof(hw_mask)); ++ ++ parse_info.hw_mask = &hw_mask; ++ npc_get_hw_supp_mask(pst, &parse_info, lid, lt); ++ ++ rc = npc_mask_is_supported(parse_info.mask, parse_info.hw_mask, parse_info.len); ++ if (!rc) ++ return NPC_ERR_INVALID_MASK; ++ ++ /* Point pattern to last item consumed */ ++ pst->pattern = pattern; ++ return npc_update_parse_state(pst, &parse_info, lid, lt, lflags); ++} ++ ++static int ++npc_parse_lb_vlan(struct npc_parse_state *pst) ++{ ++ const struct roc_npc_flow_item_vlan *vlan_items[NPC_MAX_SUPPORTED_VLANS]; ++ const struct roc_npc_item_info *pattern_list[NPC_MAX_SUPPORTED_VLANS]; ++ const struct roc_npc_item_info *last_pattern; ++ int vlan_count = 0, rc = 0; ++ int lid, lt, lflags; ++ ++ lid = NPC_LID_LB; ++ lflags = 0; ++ last_pattern = pst->pattern; ++ ++ rc = npc_parse_vlan_count(pst->pattern, pattern_list, vlan_items, &vlan_count); ++ if (rc) ++ return rc; ++ ++ rc = npc_parse_vlan_ltype_get(pst, vlan_items, vlan_count, <, &lflags); ++ if (rc) ++ return rc; ++ ++ if (vlan_count == 3) { ++ if (pattern_list[2]->spec != NULL && pattern_list[2]->mask != NULL && ++ pattern_list[2]->last != NULL) ++ return NPC_ERR_PATTERN_NOTSUP; ++ ++ /* Matching can be done only for two tags. */ ++ vlan_count = 2; ++ last_pattern++; ++ } ++ ++ rc = npc_update_vlan_parse_state(pst, pattern_list[0], lid, lt, lflags, vlan_count); ++ if (rc) ++ return rc; ++ ++ if (vlan_count > 1) ++ pst->pattern = last_pattern + vlan_count; ++ ++ return 0; ++} ++ + int + npc_parse_lb(struct npc_parse_state *pst) + { +- const struct roc_npc_flow_item_vlan *vlan_item[NPC_MAX_SUPPORTED_VLANS]; + const struct roc_npc_item_info *pattern = pst->pattern; + const struct roc_npc_item_info *last_pattern; + const struct roc_npc_flow_item_raw *raw_spec; +@@ -250,7 +427,6 @@ npc_parse_lb(struct npc_parse_state *pst) + char hw_mask[NPC_MAX_EXTRACT_HW_LEN]; + struct npc_parse_item_info info; + int lid, lt, lflags, len = 0; +- int nr_vlans = 0; + int rc; + + info.def_mask = NULL; +@@ -267,68 +443,10 @@ npc_parse_lb(struct npc_parse_state *pst) + /* RTE vlan is either 802.1q or 802.1ad, + * this maps to either CTAG/STAG. We need to decide + * based on number of VLANS present. Matching is +- * supported on first tag only. ++ * supported on first two tags. + */ +- info.hw_mask = NULL; +- info.len = sizeof(vlan_item[0]->hdr); +- +- pattern = pst->pattern; +- while (pattern->type == ROC_NPC_ITEM_TYPE_VLAN) { +- if (nr_vlans > NPC_MAX_SUPPORTED_VLANS - 1) +- return NPC_ERR_PATTERN_NOTSUP; +- +- vlan_item[nr_vlans] = pattern->spec; +- nr_vlans++; +- +- /* Basic validation of Second/Third vlan item */ +- if (nr_vlans > 1) { +- rc = npc_parse_item_basic(pattern, &info); +- if (rc != 0) +- return rc; +- } +- last_pattern = pattern; +- pattern++; +- pattern = npc_parse_skip_void_and_any_items(pattern); +- } + +- switch (nr_vlans) { +- case 1: +- lt = NPC_LT_LB_CTAG; +- if (vlan_item[0] && vlan_item[0]->has_more_vlan) +- lt = NPC_LT_LB_STAG_QINQ; +- break; +- case 2: +- if (vlan_item[1] && vlan_item[1]->has_more_vlan) { +- if (!(pst->npc->keyx_supp_nmask[pst->nix_intf] & +- 0x3ULL << NPC_LFLAG_LB_OFFSET)) +- return NPC_ERR_PATTERN_NOTSUP; +- +- /* This lflag value will match either one of +- * NPC_F_LB_L_WITH_STAG_STAG, +- * NPC_F_LB_L_WITH_QINQ_CTAG, +- * NPC_F_LB_L_WITH_QINQ_QINQ and +- * NPC_F_LB_L_WITH_ITAG (0b0100 to 0b0111). For +- * NPC_F_LB_L_WITH_ITAG, ltype is NPC_LT_LB_ETAG +- * hence will not match. +- */ +- +- lflags = NPC_F_LB_L_WITH_QINQ_CTAG & +- NPC_F_LB_L_WITH_QINQ_QINQ & +- NPC_F_LB_L_WITH_STAG_STAG; +- } else { +- lflags = NPC_F_LB_L_WITH_CTAG; +- } +- lt = NPC_LT_LB_STAG_QINQ; +- break; +- case 3: +- if (vlan_item[2] && vlan_item[2]->has_more_vlan) +- return NPC_ERR_PATTERN_NOTSUP; +- lt = NPC_LT_LB_STAG_QINQ; +- lflags = NPC_F_STAG_STAG_CTAG; +- break; +- default: +- return NPC_ERR_PATTERN_NOTSUP; +- } ++ return npc_parse_lb_vlan(pst); + } else if (pst->pattern->type == ROC_NPC_ITEM_TYPE_E_TAG) { + /* we can support ETAG and match a subsequent CTAG + * without any matching support. +@@ -546,10 +664,125 @@ npc_handle_ipv6ext_attr(const struct roc_npc_flow_item_ipv6 *ipv6_spec, + return 0; + } + ++static int ++npc_process_ipv6_item(struct npc_parse_state *pst) ++{ ++ uint8_t ipv6_hdr_mask[sizeof(struct roc_ipv6_hdr) + sizeof(struct roc_ipv6_fragment_ext)]; ++ uint8_t ipv6_hdr_buf[sizeof(struct roc_ipv6_hdr) + sizeof(struct roc_ipv6_fragment_ext)]; ++ const struct roc_npc_flow_item_ipv6 *ipv6_spec, *ipv6_mask; ++ const struct roc_npc_item_info *pattern = pst->pattern; ++ int offset = 0, rc = 0, lid, item_count = 0; ++ struct npc_parse_item_info parse_info; ++ char hw_mask[NPC_MAX_EXTRACT_HW_LEN]; ++ uint8_t flags = 0, ltype; ++ ++ memset(ipv6_hdr_buf, 0, sizeof(ipv6_hdr_buf)); ++ memset(ipv6_hdr_mask, 0, sizeof(ipv6_hdr_mask)); ++ ++ ipv6_spec = pst->pattern->spec; ++ ipv6_mask = pst->pattern->mask; ++ ++ parse_info.def_mask = NULL; ++ parse_info.spec = ipv6_hdr_buf; ++ parse_info.mask = ipv6_hdr_mask; ++ parse_info.def_mask = NULL; ++ parse_info.hw_hdr_len = 0; ++ parse_info.len = sizeof(ipv6_spec->hdr); ++ ++ pst->set_ipv6ext_ltype_mask = true; ++ ++ lid = NPC_LID_LC; ++ ltype = NPC_LT_LC_IP6; ++ ++ if (pattern->type == ROC_NPC_ITEM_TYPE_IPV6) { ++ item_count++; ++ if (ipv6_spec) { ++ memcpy(ipv6_hdr_buf, &ipv6_spec->hdr, sizeof(struct roc_ipv6_hdr)); ++ rc = npc_handle_ipv6ext_attr(ipv6_spec, pst, &flags); ++ if (rc) ++ return rc; ++ } ++ if (ipv6_mask) ++ memcpy(ipv6_hdr_mask, &ipv6_mask->hdr, sizeof(struct roc_ipv6_hdr)); ++ } ++ ++ offset = sizeof(struct roc_ipv6_hdr); ++ ++ while (pattern->type != ROC_NPC_ITEM_TYPE_END) { ++ /* Don't support ranges */ ++ if (pattern->last != NULL) ++ return NPC_ERR_INVALID_RANGE; ++ ++ /* If spec is NULL, both mask and last must be NULL, this ++ * makes it to match ANY value (eq to mask = 0). ++ * Setting either mask or last without spec is ++ * an error ++ */ ++ if (pattern->spec == NULL) { ++ if (pattern->last != NULL && pattern->mask != NULL) ++ return NPC_ERR_INVALID_SPEC; ++ } ++ /* Either one ROC_NPC_ITEM_TYPE_IPV6_EXT or ++ * one ROC_NPC_ITEM_TYPE_IPV6_FRAG_EXT is supported ++ * following an ROC_NPC_ITEM_TYPE_IPV6 item. ++ */ ++ if (pattern->type == ROC_NPC_ITEM_TYPE_IPV6_EXT) { ++ item_count++; ++ ltype = NPC_LT_LC_IP6_EXT; ++ parse_info.len = ++ sizeof(struct roc_ipv6_hdr) + sizeof(struct roc_flow_item_ipv6_ext); ++ if (pattern->spec) ++ memcpy(ipv6_hdr_buf + offset, pattern->spec, ++ sizeof(struct roc_flow_item_ipv6_ext)); ++ if (pattern->mask) ++ memcpy(ipv6_hdr_mask + offset, pattern->mask, ++ sizeof(struct roc_flow_item_ipv6_ext)); ++ break; ++ } else if (pattern->type == ROC_NPC_ITEM_TYPE_IPV6_FRAG_EXT) { ++ item_count++; ++ ltype = NPC_LT_LC_IP6_EXT; ++ flags = NPC_F_LC_U_IP6_FRAG; ++ parse_info.len = ++ sizeof(struct roc_ipv6_hdr) + sizeof(struct roc_ipv6_fragment_ext); ++ if (pattern->spec) ++ memcpy(ipv6_hdr_buf + offset, pattern->spec, ++ sizeof(struct roc_ipv6_fragment_ext)); ++ if (pattern->mask) ++ memcpy(ipv6_hdr_mask + offset, pattern->mask, ++ sizeof(struct roc_ipv6_fragment_ext)); ++ ++ break; ++ } ++ ++ pattern++; ++ pattern = npc_parse_skip_void_and_any_items(pattern); ++ } ++ ++ memset(hw_mask, 0, sizeof(hw_mask)); ++ ++ parse_info.hw_mask = &hw_mask; ++ npc_get_hw_supp_mask(pst, &parse_info, lid, ltype); ++ ++ rc = npc_mask_is_supported(parse_info.mask, parse_info.hw_mask, parse_info.len); ++ if (!rc) ++ return NPC_ERR_INVALID_MASK; ++ ++ rc = npc_update_parse_state(pst, &parse_info, lid, ltype, flags); ++ if (rc) ++ return rc; ++ ++ /* npc_update_parse_state() increments pattern once. ++ * Check if additional increment is required. ++ */ ++ if (item_count == 2) ++ pst->pattern++; ++ ++ return 0; ++} ++ + int + npc_parse_lc(struct npc_parse_state *pst) + { +- const struct roc_npc_flow_item_ipv6 *ipv6_spec; + const struct roc_npc_flow_item_raw *raw_spec; + uint8_t raw_spec_buf[NPC_MAX_RAW_ITEM_LEN]; + uint8_t raw_mask_buf[NPC_MAX_RAW_ITEM_LEN]; +@@ -574,32 +807,12 @@ npc_parse_lc(struct npc_parse_state *pst) + info.len = pst->pattern->size; + break; + case ROC_NPC_ITEM_TYPE_IPV6: +- ipv6_spec = pst->pattern->spec; +- lid = NPC_LID_LC; +- lt = NPC_LT_LC_IP6; +- if (ipv6_spec) { +- rc = npc_handle_ipv6ext_attr(ipv6_spec, pst, &flags); +- if (rc) +- return rc; +- } +- info.len = sizeof(ipv6_spec->hdr); +- break; +- case ROC_NPC_ITEM_TYPE_ARP_ETH_IPV4: +- lt = NPC_LT_LC_ARP; +- info.len = pst->pattern->size; +- break; + case ROC_NPC_ITEM_TYPE_IPV6_EXT: +- lid = NPC_LID_LC; +- lt = NPC_LT_LC_IP6_EXT; +- info.len = pst->pattern->size; +- info.hw_hdr_len = 40; +- break; + case ROC_NPC_ITEM_TYPE_IPV6_FRAG_EXT: +- lid = NPC_LID_LC; +- lt = NPC_LT_LC_IP6_EXT; +- flags = NPC_F_LC_U_IP6_FRAG; ++ return npc_process_ipv6_item(pst); ++ case ROC_NPC_ITEM_TYPE_ARP_ETH_IPV4: ++ lt = NPC_LT_LC_ARP; + info.len = pst->pattern->size; +- info.hw_hdr_len = 40; + break; + case ROC_NPC_ITEM_TYPE_L3_CUSTOM: + lt = NPC_LT_LC_CUSTOM0; +diff --git a/dpdk/drivers/common/cnxk/roc_npc_priv.h b/dpdk/drivers/common/cnxk/roc_npc_priv.h +index 1a597280d1..1de33932e7 100644 +--- a/dpdk/drivers/common/cnxk/roc_npc_priv.h ++++ b/dpdk/drivers/common/cnxk/roc_npc_priv.h +@@ -77,6 +77,9 @@ + #define NPC_LFLAG_LC_OFFSET (NPC_LTYPE_OFFSET_START + 6) + #define NPC_LTYPE_LC_OFFSET (NPC_LTYPE_OFFSET_START + 8) + ++#define CN10K_SDP_CH_START 0x80 ++#define CN10K_SDP_CH_MASK 0xF80 ++ + struct npc_action_vtag_info { + uint16_t vlan_id; + uint16_t vlan_ethtype; +@@ -196,6 +199,7 @@ struct npc_parse_state { + bool set_vlan_ltype_mask; + bool set_ipv6ext_ltype_mask; + bool is_second_pass_rule; ++ bool has_eth_type; + }; + + enum npc_kpu_parser_flag { +@@ -416,17 +420,15 @@ int npc_mcam_alloc_entry(struct npc *npc, struct roc_npc_flow *mcam, + int npc_mcam_alloc_entries(struct npc *npc, int ref_mcam, int *alloc_entry, + int req_count, int prio, int *resp_count); + +-int npc_mcam_ena_dis_entry(struct npc *npc, struct roc_npc_flow *mcam, +- bool enable); ++int npc_mcam_ena_dis_entry(struct npc *npc, struct roc_npc_flow *mcam, bool enable); + int npc_mcam_write_entry(struct npc *npc, struct roc_npc_flow *mcam); + int npc_flow_enable_all_entries(struct npc *npc, bool enable); +-int npc_update_parse_state(struct npc_parse_state *pst, +- struct npc_parse_item_info *info, int lid, int lt, +- uint8_t flags); +-void npc_get_hw_supp_mask(struct npc_parse_state *pst, +- struct npc_parse_item_info *info, int lid, int lt); +-int npc_parse_item_basic(const struct roc_npc_item_info *item, +- struct npc_parse_item_info *info); ++int npc_update_parse_state(struct npc_parse_state *pst, struct npc_parse_item_info *info, int lid, ++ int lt, uint8_t flags); ++void npc_get_hw_supp_mask(struct npc_parse_state *pst, struct npc_parse_item_info *info, int lid, ++ int lt); ++int npc_mask_is_supported(const char *mask, const char *hw_mask, int len); ++int npc_parse_item_basic(const struct roc_npc_item_info *item, struct npc_parse_item_info *info); + int npc_parse_meta_items(struct npc_parse_state *pst); + int npc_parse_mark_item(struct npc_parse_state *pst); + int npc_parse_pre_l2(struct npc_parse_state *pst); +diff --git a/dpdk/drivers/common/cnxk/roc_npc_utils.c b/dpdk/drivers/common/cnxk/roc_npc_utils.c +index 8bdabc116d..fda3073cba 100644 +--- a/dpdk/drivers/common/cnxk/roc_npc_utils.c ++++ b/dpdk/drivers/common/cnxk/roc_npc_utils.c +@@ -88,7 +88,7 @@ npc_get_hw_supp_mask(struct npc_parse_state *pst, + } + } + +-static inline int ++inline int + npc_mask_is_supported(const char *mask, const char *hw_mask, int len) + { + /* +diff --git a/dpdk/drivers/common/cnxk/roc_se.h b/dpdk/drivers/common/cnxk/roc_se.h +index c357c19c0b..5b0ddac42d 100644 +--- a/dpdk/drivers/common/cnxk/roc_se.h ++++ b/dpdk/drivers/common/cnxk/roc_se.h +@@ -316,16 +316,15 @@ struct roc_se_ctx { + uint64_t enc_cipher : 8; + uint64_t hash_type : 8; + uint64_t mac_len : 8; +- uint64_t auth_key_len : 8; ++ uint64_t auth_key_len : 16; + uint64_t fc_type : 4; + uint64_t hmac : 1; + uint64_t zsk_flags : 3; + uint64_t k_ecb : 1; + uint64_t pdcp_ci_alg : 2; + uint64_t pdcp_auth_alg : 2; +- uint16_t ciph_then_auth : 1; +- uint16_t auth_then_ciph : 1; +- uint64_t rsvd : 17; ++ uint64_t ciph_then_auth : 1; ++ uint64_t auth_then_ciph : 1; + union cpt_inst_w4 template_w4; + /* Below fields are accessed by hardware */ + union { +diff --git a/dpdk/drivers/common/iavf/iavf_common.c b/dpdk/drivers/common/iavf/iavf_common.c +index 855a0ab2f5..dc7662bc1b 100644 +--- a/dpdk/drivers/common/iavf/iavf_common.c ++++ b/dpdk/drivers/common/iavf/iavf_common.c +@@ -27,6 +27,8 @@ enum iavf_status iavf_set_mac_type(struct iavf_hw *hw) + break; + case IAVF_DEV_ID_VF: + case IAVF_DEV_ID_VF_HV: ++ hw->mac.type = IAVF_MAC_XL710; ++ break; + case IAVF_DEV_ID_ADAPTIVE_VF: + hw->mac.type = IAVF_MAC_VF; + break; +diff --git a/dpdk/drivers/common/idpf/base/idpf_common.c b/dpdk/drivers/common/idpf/base/idpf_common.c +index 3a9fdb1878..de82c3458f 100644 +--- a/dpdk/drivers/common/idpf/base/idpf_common.c ++++ b/dpdk/drivers/common/idpf/base/idpf_common.c +@@ -130,6 +130,8 @@ int idpf_init_hw(struct idpf_hw *hw, struct idpf_ctlq_size ctlq_size) + hw->mac.addr[4] = 0x03; + hw->mac.addr[5] = 0x14; + ++ idpf_free(hw, q_info); ++ + return 0; + } + +@@ -146,7 +148,7 @@ int idpf_init_hw(struct idpf_hw *hw, struct idpf_ctlq_size ctlq_size) + * is sent asynchronously, i.e. idpf_asq_send_command() does not wait for + * completion before returning. + */ +-int idpf_send_msg_to_cp(struct idpf_hw *hw, enum virtchnl_ops v_opcode, ++int idpf_send_msg_to_cp(struct idpf_hw *hw, int v_opcode, + int v_retval, u8 *msg, u16 msglen) + { + struct idpf_ctlq_msg ctlq_msg = { 0 }; +@@ -219,6 +221,7 @@ bool idpf_check_asq_alive(struct idpf_hw *hw) + int idpf_clean_arq_element(struct idpf_hw *hw, + struct idpf_arq_event_info *e, u16 *pending) + { ++ struct idpf_dma_mem *dma_mem = NULL; + struct idpf_ctlq_msg msg = { 0 }; + int status; + u16 msg_data_len; +@@ -226,6 +229,8 @@ int idpf_clean_arq_element(struct idpf_hw *hw, + *pending = 1; + + status = idpf_ctlq_recv(hw->arq, pending, &msg); ++ if (status == -ENOMSG) ++ goto exit; + + /* ctlq_msg does not align to ctlq_desc, so copy relevant data here */ + e->desc.opcode = msg.opcode; +@@ -240,7 +245,14 @@ int idpf_clean_arq_element(struct idpf_hw *hw, + msg_data_len = msg.data_len; + idpf_memcpy(e->msg_buf, msg.ctx.indirect.payload->va, msg_data_len, + IDPF_DMA_TO_NONDMA); ++ dma_mem = msg.ctx.indirect.payload; ++ } else { ++ *pending = 0; + } ++ ++ status = idpf_ctlq_post_rx_buffs(hw, hw->arq, pending, &dma_mem); ++ ++exit: + return status; + } + +diff --git a/dpdk/drivers/common/idpf/base/idpf_controlq.c b/dpdk/drivers/common/idpf/base/idpf_controlq.c +index 3af81e5a64..8e4d3ee54f 100644 +--- a/dpdk/drivers/common/idpf/base/idpf_controlq.c ++++ b/dpdk/drivers/common/idpf/base/idpf_controlq.c +@@ -311,18 +311,14 @@ int idpf_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq, + + for (i = 0; i < num_q_msg; i++) { + struct idpf_ctlq_msg *msg = &q_msg[i]; +- u64 msg_cookie; + + desc = IDPF_CTLQ_DESC(cq, cq->next_to_use); + + desc->opcode = CPU_TO_LE16(msg->opcode); + desc->pfid_vfid = CPU_TO_LE16(msg->func_id); + +- msg_cookie = *(u64 *)&msg->cookie; +- desc->cookie_high = +- CPU_TO_LE32(IDPF_HI_DWORD(msg_cookie)); +- desc->cookie_low = +- CPU_TO_LE32(IDPF_LO_DWORD(msg_cookie)); ++ desc->cookie_high = CPU_TO_LE32(msg->cookie.mbx.chnl_opcode); ++ desc->cookie_low = CPU_TO_LE32(msg->cookie.mbx.chnl_retval); + + desc->flags = CPU_TO_LE16((msg->host_id & IDPF_HOST_ID_MASK) << + IDPF_CTLQ_FLAG_HOST_ID_S); +@@ -620,8 +616,6 @@ int idpf_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg, + num_to_clean = *num_q_msg; + + for (i = 0; i < num_to_clean; i++) { +- u64 msg_cookie; +- + /* Fetch next descriptor and check if marked as done */ + desc = IDPF_CTLQ_DESC(cq, ntc); + flags = LE16_TO_CPU(desc->flags); +@@ -639,10 +633,8 @@ int idpf_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg, + if (flags & IDPF_CTLQ_FLAG_ERR) + ret_code = -EBADMSG; + +- msg_cookie = (u64)LE32_TO_CPU(desc->cookie_high) << 32; +- msg_cookie |= (u64)LE32_TO_CPU(desc->cookie_low); +- idpf_memcpy(&q_msg[i].cookie, &msg_cookie, sizeof(u64), +- IDPF_NONDMA_TO_NONDMA); ++ q_msg[i].cookie.mbx.chnl_opcode = LE32_TO_CPU(desc->cookie_high); ++ q_msg[i].cookie.mbx.chnl_retval = LE32_TO_CPU(desc->cookie_low); + + q_msg[i].opcode = LE16_TO_CPU(desc->opcode); + q_msg[i].data_len = LE16_TO_CPU(desc->datalen); +diff --git a/dpdk/drivers/common/idpf/base/idpf_lan_pf_regs.h b/dpdk/drivers/common/idpf/base/idpf_lan_pf_regs.h +index 3df2347bd7..7f731ec3d6 100644 +--- a/dpdk/drivers/common/idpf/base/idpf_lan_pf_regs.h ++++ b/dpdk/drivers/common/idpf/base/idpf_lan_pf_regs.h +@@ -77,8 +77,13 @@ + #define PF_GLINT_DYN_CTL_WB_ON_ITR_M BIT(PF_GLINT_DYN_CTL_WB_ON_ITR_S) + #define PF_GLINT_DYN_CTL_INTENA_MSK_S 31 + #define PF_GLINT_DYN_CTL_INTENA_MSK_M BIT(PF_GLINT_DYN_CTL_INTENA_MSK_S) +-#define PF_GLINT_ITR_V2(_i, _reg_start) (((_i) * 4) + (_reg_start)) +-#define PF_GLINT_ITR(_i, _INT) (PF_GLINT_BASE + (((_i) + 1) * 4) + ((_INT) * 0x1000)) ++/* _ITR is ITR index, _INT is interrupt index, _itrn_indx_spacing is ++ * spacing b/w itrn registers of the same vector. ++ */ ++#define PF_GLINT_ITR_ADDR(_ITR, _reg_start, _itrn_indx_spacing) \ ++ ((_reg_start) + (((_ITR)) * (_itrn_indx_spacing))) ++/* For PF, itrn_indx_spacing is 4 and itrn_reg_spacing is 0x1000 */ ++#define PF_GLINT_ITR(_ITR, _INT) (PF_GLINT_BASE + (((_ITR) + 1) * 4) + ((_INT) * 0x1000)) + #define PF_GLINT_ITR_MAX_INDEX 2 + #define PF_GLINT_ITR_INTERVAL_S 0 + #define PF_GLINT_ITR_INTERVAL_M MAKEMASK(0xFFF, PF_GLINT_ITR_INTERVAL_S) +diff --git a/dpdk/drivers/common/idpf/base/idpf_lan_vf_regs.h b/dpdk/drivers/common/idpf/base/idpf_lan_vf_regs.h +index 9cd4f757d9..13c5c5a7da 100644 +--- a/dpdk/drivers/common/idpf/base/idpf_lan_vf_regs.h ++++ b/dpdk/drivers/common/idpf/base/idpf_lan_vf_regs.h +@@ -90,11 +90,18 @@ + #define VF_INT_DYN_CTLN_WB_ON_ITR_M BIT(VF_INT_DYN_CTLN_WB_ON_ITR_S) + #define VF_INT_DYN_CTLN_INTENA_MSK_S 31 + #define VF_INT_DYN_CTLN_INTENA_MSK_M BIT(VF_INT_DYN_CTLN_INTENA_MSK_S) +-#define VF_INT_ITR0(_i) (0x00004C00 + ((_i) * 4)) +-#define VF_INT_ITRN_V2(_i, _reg_start) ((_reg_start) + (((_i)) * 4)) +-#define VF_INT_ITRN(_i, _INT) (0x00002800 + ((_i) * 4) + ((_INT) * 0x40)) +-#define VF_INT_ITRN_64(_i, _INT) (0x00002C00 + ((_i) * 4) + ((_INT) * 0x100)) +-#define VF_INT_ITRN_2K(_i, _INT) (0x00072000 + ((_i) * 4) + ((_INT) * 0x100)) ++/* _ITR is ITR index, _INT is interrupt index, _itrn_indx_spacing is spacing ++ * b/w itrn registers of the same vector ++ */ ++#define VF_INT_ITR0(_ITR) (0x00004C00 + ((_ITR) * 4)) ++#define VF_INT_ITRN_ADDR(_ITR, _reg_start, _itrn_indx_spacing) \ ++ ((_reg_start) + (((_ITR)) * (_itrn_indx_spacing))) ++/* For VF with 16 vector support, itrn_reg_spacing is 0x4 and itrn_indx_spacing is 0x40 */ ++#define VF_INT_ITRN(_INT, _ITR) (0x00002800 + ((_INT) * 4) + ((_ITR) * 0x40)) ++/* For VF with 64 vector support, itrn_reg_spacing is 0x4 and itrn_indx_spacing is 0x100 */ ++#define VF_INT_ITRN_64(_INT, _ITR) (0x00002C00 + ((_INT) * 4) + ((_ITR) * 0x100)) ++/* For VF with 2k vector support, itrn_reg_spacing is 0x4 and itrn_indx_spacing is 0x2000 */ ++#define VF_INT_ITRN_2K(_INT, _ITR) (0x00072000 + ((_INT) * 4) + ((_ITR) * 0x2000)) + #define VF_INT_ITRN_MAX_INDEX 2 + #define VF_INT_ITRN_INTERVAL_S 0 + #define VF_INT_ITRN_INTERVAL_M MAKEMASK(0xFFF, VF_INT_ITRN_INTERVAL_S) +diff --git a/dpdk/drivers/common/idpf/base/idpf_prototype.h b/dpdk/drivers/common/idpf/base/idpf_prototype.h +index 529b62212d..3ce25e644d 100644 +--- a/dpdk/drivers/common/idpf/base/idpf_prototype.h ++++ b/dpdk/drivers/common/idpf/base/idpf_prototype.h +@@ -40,6 +40,6 @@ int idpf_set_rss_key(struct idpf_hw *hw, u16 seid, + int idpf_set_mac_type(struct idpf_hw *hw); + + int idpf_reset(struct idpf_hw *hw); +-int idpf_send_msg_to_cp(struct idpf_hw *hw, enum virtchnl_ops v_opcode, ++int idpf_send_msg_to_cp(struct idpf_hw *hw, int v_opcode, + int v_retval, u8 *msg, u16 msglen); + #endif /* _IDPF_PROTOTYPE_H_ */ +diff --git a/dpdk/drivers/common/mlx5/linux/meson.build b/dpdk/drivers/common/mlx5/linux/meson.build +index 7e1575efc8..b13ae29844 100644 +--- a/dpdk/drivers/common/mlx5/linux/meson.build ++++ b/dpdk/drivers/common/mlx5/linux/meson.build +@@ -217,6 +217,8 @@ has_sym_args = [ + 'ibv_import_device' ], + [ 'HAVE_MLX5DV_DR_ACTION_CREATE_DEST_ROOT_TABLE', 'infiniband/mlx5dv.h', + 'mlx5dv_dr_action_create_dest_root_table' ], ++ [ 'HAVE_IBV_FORK_UNNEEDED', 'infiniband/verbs.h', ++ 'ibv_is_fork_initialized'], + ] + if libmtcr_ul_found + has_sym_args += [ +diff --git a/dpdk/drivers/common/mlx5/linux/mlx5_common_os.c b/dpdk/drivers/common/mlx5/linux/mlx5_common_os.c +index aafff60eeb..2ebb8ac8b6 100644 +--- a/dpdk/drivers/common/mlx5/linux/mlx5_common_os.c ++++ b/dpdk/drivers/common/mlx5/linux/mlx5_common_os.c +@@ -555,7 +555,7 @@ mlx5_os_pd_prepare(struct mlx5_common_device *cdev) + } + + static struct ibv_device * +-mlx5_os_get_ibv_device(const struct rte_pci_addr *addr) ++mlx5_os_get_ibv_device(const struct rte_pci_device *pci_dev) + { + int n; + struct ibv_device **ibv_list = mlx5_glue->get_device_list(&n); +@@ -564,6 +564,8 @@ mlx5_os_get_ibv_device(const struct rte_pci_addr *addr) + uint8_t guid2[32] = {0}; + int ret1, ret2 = -1; + struct rte_pci_addr paddr; ++ const struct rte_pci_addr *addr = &pci_dev->addr; ++ bool is_vf_dev = mlx5_dev_is_vf_pci(pci_dev); + + if (ibv_list == NULL || !n) { + rte_errno = ENOSYS; +@@ -579,11 +581,11 @@ mlx5_os_get_ibv_device(const struct rte_pci_addr *addr) + if (ret1 > 0) + ret2 = mlx5_get_device_guid(&paddr, guid2, sizeof(guid2)); + /* Bond device can bond secondary PCIe */ +- if ((strstr(ibv_list[n]->name, "bond") && +- ((ret1 > 0 && ret2 > 0 && !memcmp(guid1, guid2, sizeof(guid1))) || +- (addr->domain == paddr.domain && addr->bus == paddr.bus && +- addr->devid == paddr.devid))) || +- !rte_pci_addr_cmp(addr, &paddr)) { ++ if ((strstr(ibv_list[n]->name, "bond") && !is_vf_dev && ++ ((ret1 > 0 && ret2 > 0 && !memcmp(guid1, guid2, sizeof(guid1))) || ++ (addr->domain == paddr.domain && addr->bus == paddr.bus && ++ addr->devid == paddr.devid))) || ++ !rte_pci_addr_cmp(addr, &paddr)) { + ibv_match = ibv_list[n]; + break; + } +@@ -697,7 +699,7 @@ mlx5_os_get_ibv_dev(const struct rte_device *dev) + struct ibv_device *ibv; + + if (mlx5_dev_is_pci(dev)) +- ibv = mlx5_os_get_ibv_device(&RTE_DEV_TO_PCI_CONST(dev)->addr); ++ ibv = mlx5_os_get_ibv_device(RTE_DEV_TO_PCI_CONST(dev)); + else + ibv = mlx5_get_aux_ibv_device(RTE_DEV_TO_AUXILIARY_CONST(dev)); + if (ibv == NULL) { +diff --git a/dpdk/drivers/common/mlx5/linux/mlx5_glue.c b/dpdk/drivers/common/mlx5/linux/mlx5_glue.c +index 702eb36b62..88b99fe029 100644 +--- a/dpdk/drivers/common/mlx5/linux/mlx5_glue.c ++++ b/dpdk/drivers/common/mlx5/linux/mlx5_glue.c +@@ -19,6 +19,10 @@ + static int + mlx5_glue_fork_init(void) + { ++#ifdef HAVE_IBV_FORK_UNNEEDED ++ if (ibv_is_fork_initialized() == IBV_FORK_UNNEEDED) ++ return 0; /* ibv_fork_init() not needed */ ++#endif + return ibv_fork_init(); + } + +diff --git a/dpdk/drivers/common/mlx5/meson.build b/dpdk/drivers/common/mlx5/meson.build +index 60ccd95cbc..9dc809f192 100644 +--- a/dpdk/drivers/common/mlx5/meson.build ++++ b/dpdk/drivers/common/mlx5/meson.build +@@ -1,9 +1,14 @@ + # SPDX-License-Identifier: BSD-3-Clause + # Copyright 2019 Mellanox Technologies, Ltd + +-if not (is_linux or (is_windows and is_ms_linker)) ++if not (is_linux or is_windows) + build = false +- reason = 'only supported on Linux and Windows build with clang' ++ reason = 'only supported on Linux and Windows' ++ subdir_done() ++endif ++if is_windows and not is_ms_linker and not meson.is_cross_build() ++ build = false ++ reason = 'MinGW is supported only for cross-compilation test' + subdir_done() + endif + +diff --git a/dpdk/drivers/common/mlx5/mlx5_common.h b/dpdk/drivers/common/mlx5/mlx5_common.h +index d6e91b5296..02b5d54363 100644 +--- a/dpdk/drivers/common/mlx5/mlx5_common.h ++++ b/dpdk/drivers/common/mlx5/mlx5_common.h +@@ -203,7 +203,12 @@ check_cqe(volatile struct mlx5_cqe *cqe, const uint16_t cqes_n, + + if (unlikely((op_owner != (!!(idx))) || (op_code == MLX5_CQE_INVALID))) + return MLX5_CQE_STATUS_HW_OWN; +- rte_io_rmb(); ++ ++ /* Prevent speculative reading of other fields in CQE until ++ * CQE is valid. ++ */ ++ rte_atomic_thread_fence(__ATOMIC_ACQUIRE); ++ + if (unlikely(op_code == MLX5_CQE_RESP_ERR || + op_code == MLX5_CQE_REQ_ERR)) + return MLX5_CQE_STATUS_ERR; +@@ -221,6 +226,7 @@ check_cqe(volatile struct mlx5_cqe *cqe, const uint16_t cqes_n, + * - 0 on success. + * - Negative value and rte_errno is set otherwise. + */ ++__rte_internal + int mlx5_dev_to_pci_str(const struct rte_device *dev, char *addr, size_t size); + + /* +@@ -552,7 +558,7 @@ mlx5_dev_is_pci(const struct rte_device *dev); + */ + __rte_internal + bool +-mlx5_dev_is_vf_pci(struct rte_pci_device *pci_dev); ++mlx5_dev_is_vf_pci(const struct rte_pci_device *pci_dev); + + __rte_internal + int +diff --git a/dpdk/drivers/common/mlx5/mlx5_common_pci.c b/dpdk/drivers/common/mlx5/mlx5_common_pci.c +index 73178ce0f3..fdf03f2a53 100644 +--- a/dpdk/drivers/common/mlx5/mlx5_common_pci.c ++++ b/dpdk/drivers/common/mlx5/mlx5_common_pci.c +@@ -109,7 +109,7 @@ mlx5_dev_is_pci(const struct rte_device *dev) + } + + bool +-mlx5_dev_is_vf_pci(struct rte_pci_device *pci_dev) ++mlx5_dev_is_vf_pci(const struct rte_pci_device *pci_dev) + { + switch (pci_dev->id.device_id) { + case PCI_DEVICE_ID_MELLANOX_CONNECTX4VF: +diff --git a/dpdk/drivers/common/mlx5/mlx5_devx_cmds.c b/dpdk/drivers/common/mlx5/mlx5_devx_cmds.c +index 59cebb530f..5742f9e831 100644 +--- a/dpdk/drivers/common/mlx5/mlx5_devx_cmds.c ++++ b/dpdk/drivers/common/mlx5/mlx5_devx_cmds.c +@@ -1002,6 +1002,7 @@ mlx5_devx_cmd_query_hca_attr(void *ctx, + general_obj_types) & + MLX5_GENERAL_OBJ_TYPES_CAP_CONN_TRACK_OFFLOAD); + attr->rq_delay_drop = MLX5_GET(cmd_hca_cap, hcattr, rq_delay_drop); ++ attr->nic_flow_table = MLX5_GET(cmd_hca_cap, hcattr, nic_flow_table); + attr->max_flow_counter_15_0 = MLX5_GET(cmd_hca_cap, hcattr, + max_flow_counter_15_0); + attr->max_flow_counter_31_16 = MLX5_GET(cmd_hca_cap, hcattr, +@@ -1013,7 +1014,9 @@ mlx5_devx_cmd_query_hca_attr(void *ctx, + attr->flow_access_aso_opc_mod = MLX5_GET(cmd_hca_cap, hcattr, + flow_access_aso_opc_mod); + if (attr->crypto) { +- attr->aes_xts = MLX5_GET(cmd_hca_cap, hcattr, aes_xts); ++ attr->aes_xts = MLX5_GET(cmd_hca_cap, hcattr, aes_xts) || ++ MLX5_GET(cmd_hca_cap, hcattr, aes_xts_multi_block_be_tweak) || ++ MLX5_GET(cmd_hca_cap, hcattr, aes_xts_single_block_le_tweak); + hcattr = mlx5_devx_get_hca_cap(ctx, in, out, &rc, + MLX5_GET_HCA_CAP_OP_MOD_CRYPTO | + MLX5_HCA_CAP_OPMOD_GET_CUR); +diff --git a/dpdk/drivers/common/mlx5/mlx5_prm.h b/dpdk/drivers/common/mlx5/mlx5_prm.h +index 2b5c43ee6e..dab70b9469 100644 +--- a/dpdk/drivers/common/mlx5/mlx5_prm.h ++++ b/dpdk/drivers/common/mlx5/mlx5_prm.h +@@ -1679,7 +1679,9 @@ struct mlx5_ifc_cmd_hca_cap_bits { + u8 log_min_hairpin_wq_data_sz[0x5]; + u8 reserved_at_3e8[0x3]; + u8 log_max_vlan_list[0x5]; +- u8 reserved_at_3f0[0x3]; ++ u8 reserved_at_3f0[0x1]; ++ u8 aes_xts_single_block_le_tweak[1]; ++ u8 aes_xts_multi_block_be_tweak[1]; + u8 log_max_current_mc_list[0x5]; + u8 reserved_at_3f8[0x3]; + u8 log_max_current_uc_list[0x5]; +@@ -2121,10 +2123,11 @@ struct mlx5_ifc_cmd_hca_cap_2_bits { + u8 hairpin_sq_wqe_bb_size[0x5]; + u8 hairpin_sq_wq_in_host_mem[0x1]; + u8 hairpin_data_buffer_locked[0x1]; +- u8 reserved_at_16a[0x36]; +- u8 reserved_at_1a0[0xb]; ++ u8 reserved_at_16a[0x16]; ++ u8 reserved_at_180[0x20]; ++ u8 reserved_at_1a0[0xa]; + u8 format_select_dw_8_6_ext[0x1]; +- u8 reserved_at_1ac[0x14]; ++ u8 reserved_at_1ac[0x15]; + u8 general_obj_types_127_64[0x40]; + u8 reserved_at_200[0x53]; + u8 flow_counter_bulk_log_max_alloc[0x5]; +@@ -3040,6 +3043,7 @@ struct mlx5_ifc_health_buffer_bits { + u8 ext_synd[0x10]; + }; + ++/* HCA PCI BAR resource structure. */ + struct mlx5_ifc_initial_seg_bits { + u8 fw_rev_minor[0x10]; + u8 fw_rev_major[0x10]; +@@ -3067,7 +3071,9 @@ struct mlx5_ifc_initial_seg_bits { + u8 clear_int[0x1]; + u8 health_syndrome[0x8]; + u8 health_counter[0x18]; +- u8 reserved_8[0x17fc0]; ++ u8 reserved_8[0x160]; ++ u8 real_time[0x40]; ++ u8 reserved_9[0x17e20]; + }; + + struct mlx5_ifc_create_cq_out_bits { +diff --git a/dpdk/drivers/common/mlx5/version.map b/dpdk/drivers/common/mlx5/version.map +index 4f72900519..03c8ce5593 100644 +--- a/dpdk/drivers/common/mlx5/version.map ++++ b/dpdk/drivers/common/mlx5/version.map +@@ -14,6 +14,7 @@ INTERNAL { + + mlx5_dev_is_pci; + mlx5_dev_is_vf_pci; ++ mlx5_dev_to_pci_str; + mlx5_dev_mempool_unregister; + mlx5_dev_mempool_subscribe; + +diff --git a/dpdk/drivers/common/mlx5/windows/mlx5_win_defs.h b/dpdk/drivers/common/mlx5/windows/mlx5_win_defs.h +index 3554e4a7ff..65da820c5e 100644 +--- a/dpdk/drivers/common/mlx5/windows/mlx5_win_defs.h ++++ b/dpdk/drivers/common/mlx5/windows/mlx5_win_defs.h +@@ -2,8 +2,10 @@ + * Copyright (C) Mellanox Technologies, Ltd. 2001-2020. + */ + +-#ifndef __MLX5_WIN_DEFS_H__ +-#define __MLX5_WIN_DEFS_H__ ++#ifndef MLX5_WIN_DEFS_H ++#define MLX5_WIN_DEFS_H ++ ++#include + + enum { + MLX5_CQE_OWNER_MASK = 1, +@@ -40,29 +42,29 @@ enum { + }; + + enum mlx5dv_cq_init_attr_mask { +- MLX5DV_CQ_INIT_ATTR_MASK_COMPRESSED_CQE = 1 << 0, +- MLX5DV_CQ_INIT_ATTR_MASK_FLAGS = 1 << 1, +- MLX5DV_CQ_INIT_ATTR_MASK_CQE_SIZE = 1 << 2, ++ MLX5DV_CQ_INIT_ATTR_MASK_COMPRESSED_CQE = RTE_BIT32(0), ++ MLX5DV_CQ_INIT_ATTR_MASK_FLAG = RTE_BIT32(1), ++ MLX5DV_CQ_INIT_ATTR_MASK_CQE_SIZE = RTE_BIT32(2), + }; + + enum mlx5dv_cqe_comp_res_format { +- MLX5DV_CQE_RES_FORMAT_HASH = 1 << 0, +- MLX5DV_CQE_RES_FORMAT_CSUM = 1 << 1, +- MLX5DV_CQE_RES_FORMAT_CSUM_STRIDX = 1 << 2, ++ MLX5DV_CQE_RES_FORMAT_HASH = RTE_BIT32(0), ++ MLX5DV_CQE_RES_FORMAT_CSUM = RTE_BIT32(1), ++ MLX5DV_CQE_RES_FORMAT_CSUM_STRIDX = RTE_BIT32(2), + }; + + enum ibv_access_flags { +- IBV_ACCESS_LOCAL_WRITE = 1, +- IBV_ACCESS_REMOTE_WRITE = 1 << 1, +- IBV_ACCESS_REMOTE_READ = 1 << 2, +- IBV_ACCESS_REMOTE_ATOMIC = 1 << 3, +- IBV_ACCESS_MW_BIND = 1 << 4, +- IBV_ACCESS_ZERO_BASED = 1 << 5, +- IBV_ACCESS_ON_DEMAND = 1 << 6, ++ IBV_ACCESS_LOCAL_WRITE = RTE_BIT32(0), ++ IBV_ACCESS_REMOTE_WRITE = RTE_BIT32(1), ++ IBV_ACCESS_REMOTE_READ = RTE_BIT32(2), ++ IBV_ACCESS_REMOTE_ATOMIC = RTE_BIT32(3), ++ IBV_ACCESS_MW_BIND = RTE_BIT32(4), ++ IBV_ACCESS_ZERO_BASED = RTE_BIT32(5), ++ IBV_ACCESS_ON_DEMAND = RTE_BIT32(6), + }; + + enum mlx5_ib_uapi_devx_create_event_channel_flags { +- MLX5_IB_UAPI_DEVX_CR_EV_CH_FLAGS_OMIT_DATA = 1 << 0, ++ MLX5_IB_UAPI_DEVX_CR_EV_CH_FLAGS_OMIT_DATA = RTE_BIT32(0), + }; + + #define MLX5DV_DEVX_CREATE_EVENT_CHANNEL_FLAGS_OMIT_EV_DATA \ +@@ -85,15 +87,15 @@ enum { + }; + + enum { +- MLX5_ETH_WQE_L3_CSUM = (1 << 6), +- MLX5_ETH_WQE_L4_CSUM = (1 << 7), ++ MLX5_ETH_WQE_L3_CSUM = RTE_BIT32(6), ++ MLX5_ETH_WQE_L4_CSUM = RTE_BIT32(7), + }; + + enum { +- MLX5_WQE_CTRL_CQ_UPDATE = 2 << 2, +- MLX5_WQE_CTRL_SOLICITED = 1 << 1, +- MLX5_WQE_CTRL_FENCE = 4 << 5, +- MLX5_WQE_CTRL_INITIATOR_SMALL_FENCE = 1 << 5, ++ MLX5_WQE_CTRL_SOLICITED = RTE_BIT32(1), ++ MLX5_WQE_CTRL_CQ_UPDATE = RTE_BIT32(3), ++ MLX5_WQE_CTRL_INITIATOR_SMALL_FENCE = RTE_BIT32(5), ++ MLX5_WQE_CTRL_FENCE = RTE_BIT32(7), + }; + + enum { +@@ -101,6 +103,11 @@ enum { + MLX5_SEND_WQE_SHIFT = 6, + }; + ++/* Verbs headers do not support -pedantic. */ ++#ifdef PEDANTIC ++#pragma GCC diagnostic ignored "-Wpedantic" ++#endif ++ + /* + * RX Hash fields enable to set which incoming packet's field should + * participates in RX Hash. Each flag represent certain packet's field, +@@ -110,18 +117,22 @@ enum { + * TCP and UDP flags can't be enabled together on the same QP. + */ + enum ibv_rx_hash_fields { +- IBV_RX_HASH_SRC_IPV4 = 1 << 0, +- IBV_RX_HASH_DST_IPV4 = 1 << 1, +- IBV_RX_HASH_SRC_IPV6 = 1 << 2, +- IBV_RX_HASH_DST_IPV6 = 1 << 3, +- IBV_RX_HASH_SRC_PORT_TCP = 1 << 4, +- IBV_RX_HASH_DST_PORT_TCP = 1 << 5, +- IBV_RX_HASH_SRC_PORT_UDP = 1 << 6, +- IBV_RX_HASH_DST_PORT_UDP = 1 << 7, +- IBV_RX_HASH_IPSEC_SPI = 1 << 8, +- IBV_RX_HASH_INNER = (1 << 31), ++ IBV_RX_HASH_SRC_IPV4 = RTE_BIT32(0), ++ IBV_RX_HASH_DST_IPV4 = RTE_BIT32(1), ++ IBV_RX_HASH_SRC_IPV6 = RTE_BIT32(2), ++ IBV_RX_HASH_DST_IPV6 = RTE_BIT32(3), ++ IBV_RX_HASH_SRC_PORT_TCP = RTE_BIT32(4), ++ IBV_RX_HASH_DST_PORT_TCP = RTE_BIT32(5), ++ IBV_RX_HASH_SRC_PORT_UDP = RTE_BIT32(6), ++ IBV_RX_HASH_DST_PORT_UDP = RTE_BIT32(7), ++ IBV_RX_HASH_IPSEC_SPI = RTE_BIT32(8), ++ IBV_RX_HASH_INNER = RTE_BIT32(31), + }; + ++#ifdef PEDANTIC ++#pragma GCC diagnostic error "-Wpedantic" ++#endif ++ + enum { + MLX5_RCV_DBR = 0, + MLX5_SND_DBR = 1, +@@ -141,9 +152,9 @@ enum { + #endif + + enum ibv_flow_flags { +- IBV_FLOW_ATTR_FLAGS_ALLOW_LOOP_BACK = 1 << 0, +- IBV_FLOW_ATTR_FLAGS_DONT_TRAP = 1 << 1, +- IBV_FLOW_ATTR_FLAGS_EGRESS = 1 << 2, ++ IBV_FLOW_ATTR_FLAGS_ALLOW_LOOP_BACK = RTE_BIT32(0), ++ IBV_FLOW_ATTR_FLAGS_DONT_TRAP = RTE_BIT32(1), ++ IBV_FLOW_ATTR_FLAGS_EGRESS = RTE_BIT32(2), + }; + + enum ibv_flow_attr_type { +@@ -240,11 +251,11 @@ struct mlx5_wqe_data_seg { + rte_be64_t addr; + }; + +-#define MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP (1 << 4) +-#define IBV_DEVICE_RAW_IP_CSUM (1 << 26) +-#define IBV_RAW_PACKET_CAP_CVLAN_STRIPPING (1 << 0) +-#define IBV_RAW_PACKET_CAP_SCATTER_FCS (1 << 1) +-#define IBV_QPT_RAW_PACKET 8 ++#define MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP RTE_BIT32(4) ++#define IBV_DEVICE_RAW_IP_CSUM RTE_BIT32(26) ++#define IBV_RAW_PACKET_CAP_CVLAN_STRIPPING RTE_BIT32(0) ++#define IBV_RAW_PACKET_CAP_SCATTER_FCS RTE_BIT32(1) ++#define IBV_QPT_RAW_PACKET 8 + + enum { + MLX5_FLOW_CONTEXT_DEST_TYPE_VPORT = 0x0, +@@ -254,8 +265,9 @@ enum { + }; + + enum { +- MLX5_MATCH_OUTER_HEADERS = 1 << 0, +- MLX5_MATCH_MISC_PARAMETERS = 1 << 1, +- MLX5_MATCH_INNER_HEADERS = 1 << 2, ++ MLX5_MATCH_OUTER_HEADERS = RTE_BIT32(0), ++ MLX5_MATCH_MISC_PARAMETERS = RTE_BIT32(1), ++ MLX5_MATCH_INNER_HEADERS = RTE_BIT32(2), + }; +-#endif /* __MLX5_WIN_DEFS_H__ */ ++ ++#endif /* MLX5_WIN_DEFS_H */ +diff --git a/dpdk/drivers/common/qat/dev/qat_dev_gen1.c b/dpdk/drivers/common/qat/dev/qat_dev_gen1.c +index cf480dcba8..dd2e878e90 100644 +--- a/dpdk/drivers/common/qat/dev/qat_dev_gen1.c ++++ b/dpdk/drivers/common/qat/dev/qat_dev_gen1.c +@@ -242,7 +242,7 @@ qat_dev_get_extra_size_gen1(void) + } + + static int +-qat_get_dev_slice_map_gen1(uint16_t *map __rte_unused, ++qat_get_dev_slice_map_gen1(uint32_t *map __rte_unused, + const struct rte_pci_device *pci_dev __rte_unused) + { + return 0; +diff --git a/dpdk/drivers/common/qat/dev/qat_dev_gen2.c b/dpdk/drivers/common/qat/dev/qat_dev_gen2.c +index f51be46eb0..061dfdb698 100644 +--- a/dpdk/drivers/common/qat/dev/qat_dev_gen2.c ++++ b/dpdk/drivers/common/qat/dev/qat_dev_gen2.c +@@ -22,7 +22,7 @@ static struct qat_qp_hw_spec_funcs qat_qp_hw_spec_gen2 = { + }; + + static int +-qat_dev_get_slice_map_gen2(uint16_t *map __rte_unused, ++qat_dev_get_slice_map_gen2(uint32_t *map __rte_unused, + const struct rte_pci_device *pci_dev __rte_unused) + { + return 0; +diff --git a/dpdk/drivers/common/qat/dev/qat_dev_gen3.c b/dpdk/drivers/common/qat/dev/qat_dev_gen3.c +index e4197f3c0f..f01b98ff86 100644 +--- a/dpdk/drivers/common/qat/dev/qat_dev_gen3.c ++++ b/dpdk/drivers/common/qat/dev/qat_dev_gen3.c +@@ -68,7 +68,7 @@ static struct qat_qp_hw_spec_funcs qat_qp_hw_spec_gen3 = { + }; + + static int +-qat_dev_get_slice_map_gen3(uint16_t *map, ++qat_dev_get_slice_map_gen3(uint32_t *map, + const struct rte_pci_device *pci_dev) + { + if (rte_pci_read_config(pci_dev, map, +diff --git a/dpdk/drivers/common/qat/dev/qat_dev_gen4.c b/dpdk/drivers/common/qat/dev/qat_dev_gen4.c +index 1b3a5deabf..1ce262f715 100644 +--- a/dpdk/drivers/common/qat/dev/qat_dev_gen4.c ++++ b/dpdk/drivers/common/qat/dev/qat_dev_gen4.c +@@ -283,7 +283,7 @@ qat_dev_get_misc_bar_gen4(struct rte_mem_resource **mem_resource, + } + + static int +-qat_dev_get_slice_map_gen4(uint16_t *map __rte_unused, ++qat_dev_get_slice_map_gen4(uint32_t *map __rte_unused, + const struct rte_pci_device *pci_dev __rte_unused) + { + return 0; +diff --git a/dpdk/drivers/common/qat/meson.build b/dpdk/drivers/common/qat/meson.build +index b84e5b3c6c..95b52b78c3 100644 +--- a/dpdk/drivers/common/qat/meson.build ++++ b/dpdk/drivers/common/qat/meson.build +@@ -54,14 +54,6 @@ if libipsecmb.found() and libcrypto_3.found() + endif + endif + +-# The driver should not build if both compression and crypto are disabled +-#FIXME common code depends on compression files so check only compress! +-if not qat_compress # and not qat_crypto +- build = false +- reason = '' # rely on reason for compress/crypto above +- subdir_done() +-endif +- + deps += ['bus_pci', 'cryptodev', 'net', 'compressdev'] + sources += files( + 'qat_common.c', +diff --git a/dpdk/drivers/common/qat/qat_device.c b/dpdk/drivers/common/qat/qat_device.c +index 8bce2ac073..ed75b66041 100644 +--- a/dpdk/drivers/common/qat/qat_device.c ++++ b/dpdk/drivers/common/qat/qat_device.c +@@ -361,7 +361,7 @@ static int qat_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, + { + int sym_ret = 0, asym_ret = 0, comp_ret = 0; + int num_pmds_created = 0; +- uint16_t capa = 0; ++ uint32_t capa = 0; + struct qat_pci_device *qat_pci_dev; + struct qat_dev_hw_spec_funcs *ops_hw; + struct qat_dev_cmd_param qat_dev_cmd_param[] = { +diff --git a/dpdk/drivers/common/qat/qat_device.h b/dpdk/drivers/common/qat/qat_device.h +index bc3da04238..4a79cdded3 100644 +--- a/dpdk/drivers/common/qat/qat_device.h ++++ b/dpdk/drivers/common/qat/qat_device.h +@@ -37,7 +37,7 @@ typedef int (*qat_dev_get_misc_bar_t) + typedef int (*qat_dev_read_config_t) + (struct qat_pci_device *); + typedef int (*qat_dev_get_extra_size_t)(void); +-typedef int (*qat_dev_get_slice_map_t)(uint16_t *map, ++typedef int (*qat_dev_get_slice_map_t)(uint32_t *map, + const struct rte_pci_device *pci_dev); + + struct qat_dev_hw_spec_funcs { +diff --git a/dpdk/drivers/common/qat/qat_qp.c b/dpdk/drivers/common/qat/qat_qp.c +index 9cbd19a481..e95df292e8 100644 +--- a/dpdk/drivers/common/qat/qat_qp.c ++++ b/dpdk/drivers/common/qat/qat_qp.c +@@ -449,20 +449,6 @@ adf_configure_queues(struct qat_qp *qp, enum qat_device_gen qat_dev_gen) + return 0; + } + +-static inline void +-txq_write_tail(enum qat_device_gen qat_dev_gen, +- struct qat_qp *qp, struct qat_queue *q) +-{ +- struct qat_qp_hw_spec_funcs *ops = +- qat_qp_hw_spec[qat_dev_gen]; +- +- /* +- * Pointer check should be done during +- * initialization +- */ +- ops->qat_qp_csr_write_tail(qp, q); +-} +- + static inline void + qat_qp_csr_write_head(enum qat_device_gen qat_dev_gen, struct qat_qp *qp, + struct qat_queue *q, uint32_t new_head) +@@ -631,179 +617,6 @@ kick_tail: + return nb_ops_sent; + } + +-/* Use this for compression only - but keep consistent with above common +- * function as much as possible. +- */ +-uint16_t +-qat_enqueue_comp_op_burst(void *qp, void **ops, uint16_t nb_ops) +-{ +- register struct qat_queue *queue; +- struct qat_qp *tmp_qp = (struct qat_qp *)qp; +- register uint32_t nb_ops_sent = 0; +- register int nb_desc_to_build; +- uint16_t nb_ops_possible = nb_ops; +- register uint8_t *base_addr; +- register uint32_t tail; +- +- int descriptors_built, total_descriptors_built = 0; +- int nb_remaining_descriptors; +- int overflow = 0; +- +- if (unlikely(nb_ops == 0)) +- return 0; +- +- /* read params used a lot in main loop into registers */ +- queue = &(tmp_qp->tx_q); +- base_addr = (uint8_t *)queue->base_addr; +- tail = queue->tail; +- +- /* Find how many can actually fit on the ring */ +- { +- /* dequeued can only be written by one thread, but it may not +- * be this thread. As it's 4-byte aligned it will be read +- * atomically here by any Intel CPU. +- * enqueued can wrap before dequeued, but cannot +- * lap it as var size of enq/deq (uint32_t) > var size of +- * max_inflights (uint16_t). In reality inflights is never +- * even as big as max uint16_t, as it's <= ADF_MAX_DESC. +- * On wrapping, the calculation still returns the correct +- * positive value as all three vars are unsigned. +- */ +- uint32_t inflights = +- tmp_qp->enqueued - tmp_qp->dequeued; +- +- /* Find how many can actually fit on the ring */ +- overflow = (inflights + nb_ops) - tmp_qp->max_inflights; +- if (overflow > 0) { +- nb_ops_possible = nb_ops - overflow; +- if (nb_ops_possible == 0) +- return 0; +- } +- +- /* QAT has plenty of work queued already, so don't waste cycles +- * enqueueing, wait til the application has gathered a bigger +- * burst or some completed ops have been dequeued +- */ +- if (tmp_qp->min_enq_burst_threshold && inflights > +- QAT_QP_MIN_INFL_THRESHOLD && nb_ops_possible < +- tmp_qp->min_enq_burst_threshold) { +- tmp_qp->stats.threshold_hit_count++; +- return 0; +- } +- } +- +- /* At this point nb_ops_possible is assuming a 1:1 mapping +- * between ops and descriptors. +- * Fewer may be sent if some ops have to be split. +- * nb_ops_possible is <= burst size. +- * Find out how many spaces are actually available on the qp in case +- * more are needed. +- */ +- nb_remaining_descriptors = nb_ops_possible +- + ((overflow >= 0) ? 0 : overflow * (-1)); +- QAT_DP_LOG(DEBUG, "Nb ops requested %d, nb descriptors remaining %d", +- nb_ops, nb_remaining_descriptors); +- +- while (nb_ops_sent != nb_ops_possible && +- nb_remaining_descriptors > 0) { +- struct qat_comp_op_cookie *cookie = +- tmp_qp->op_cookies[tail >> queue->trailz]; +- +- descriptors_built = 0; +- +- QAT_DP_LOG(DEBUG, "--- data length: %u", +- ((struct rte_comp_op *)*ops)->src.length); +- +- nb_desc_to_build = qat_comp_build_request(*ops, +- base_addr + tail, cookie, tmp_qp->qat_dev_gen); +- QAT_DP_LOG(DEBUG, "%d descriptors built, %d remaining, " +- "%d ops sent, %d descriptors needed", +- total_descriptors_built, nb_remaining_descriptors, +- nb_ops_sent, nb_desc_to_build); +- +- if (unlikely(nb_desc_to_build < 0)) { +- /* this message cannot be enqueued */ +- tmp_qp->stats.enqueue_err_count++; +- if (nb_ops_sent == 0) +- return 0; +- goto kick_tail; +- } else if (unlikely(nb_desc_to_build > 1)) { +- /* this op is too big and must be split - get more +- * descriptors and retry +- */ +- +- QAT_DP_LOG(DEBUG, "Build %d descriptors for this op", +- nb_desc_to_build); +- +- nb_remaining_descriptors -= nb_desc_to_build; +- if (nb_remaining_descriptors >= 0) { +- /* There are enough remaining descriptors +- * so retry +- */ +- int ret2 = qat_comp_build_multiple_requests( +- *ops, tmp_qp, tail, +- nb_desc_to_build); +- +- if (unlikely(ret2 < 1)) { +- QAT_DP_LOG(DEBUG, +- "Failed to build (%d) descriptors, status %d", +- nb_desc_to_build, ret2); +- +- qat_comp_free_split_op_memzones(cookie, +- nb_desc_to_build - 1); +- +- tmp_qp->stats.enqueue_err_count++; +- +- /* This message cannot be enqueued */ +- if (nb_ops_sent == 0) +- return 0; +- goto kick_tail; +- } else { +- descriptors_built = ret2; +- total_descriptors_built += +- descriptors_built; +- nb_remaining_descriptors -= +- descriptors_built; +- QAT_DP_LOG(DEBUG, +- "Multiple descriptors (%d) built ok", +- descriptors_built); +- } +- } else { +- QAT_DP_LOG(ERR, "For the current op, number of requested descriptors (%d) " +- "exceeds number of available descriptors (%d)", +- nb_desc_to_build, +- nb_remaining_descriptors + +- nb_desc_to_build); +- +- qat_comp_free_split_op_memzones(cookie, +- nb_desc_to_build - 1); +- +- /* Not enough extra descriptors */ +- if (nb_ops_sent == 0) +- return 0; +- goto kick_tail; +- } +- } else { +- descriptors_built = 1; +- total_descriptors_built++; +- nb_remaining_descriptors--; +- QAT_DP_LOG(DEBUG, "Single descriptor built ok"); +- } +- +- tail = adf_modulo(tail + (queue->msg_size * descriptors_built), +- queue->modulo_mask); +- ops++; +- nb_ops_sent++; +- } +- +-kick_tail: +- queue->tail = tail; +- tmp_qp->enqueued += total_descriptors_built; +- tmp_qp->stats.enqueued_count += nb_ops_sent; +- txq_write_tail(tmp_qp->qat_dev_gen, tmp_qp, queue); +- return nb_ops_sent; +-} +- + uint16_t + qat_dequeue_op_burst(void *qp, void **ops, + qat_op_dequeue_t qat_dequeue_process_response, uint16_t nb_ops) +diff --git a/dpdk/drivers/common/qat/qat_qp.h b/dpdk/drivers/common/qat/qat_qp.h +index 66f00943a5..f911125e86 100644 +--- a/dpdk/drivers/common/qat/qat_qp.h ++++ b/dpdk/drivers/common/qat/qat_qp.h +@@ -127,9 +127,6 @@ uint16_t + qat_enqueue_op_burst(void *qp, qat_op_build_request_t op_build_request, + void **ops, uint16_t nb_ops); + +-uint16_t +-qat_enqueue_comp_op_burst(void *qp, void **ops, uint16_t nb_ops); +- + uint16_t + qat_dequeue_op_burst(void *qp, void **ops, + qat_op_dequeue_t qat_dequeue_process_response, uint16_t nb_ops); +@@ -201,6 +198,21 @@ struct qat_qp_hw_spec_funcs { + qat_qp_get_hw_data_t qat_qp_get_hw_data; + }; + +-extern struct qat_qp_hw_spec_funcs *qat_qp_hw_spec[]; ++extern struct qat_qp_hw_spec_funcs* ++ qat_qp_hw_spec[]; ++ ++static inline void ++txq_write_tail(enum qat_device_gen qat_dev_gen, ++ struct qat_qp *qp, struct qat_queue *q) ++{ ++ struct qat_qp_hw_spec_funcs *ops = ++ qat_qp_hw_spec[qat_dev_gen]; ++ ++ /* ++ * Pointer check should be done during ++ * initialization ++ */ ++ ops->qat_qp_csr_write_tail(qp, q); ++} + + #endif /* _QAT_QP_H_ */ +diff --git a/dpdk/drivers/common/sfc_efx/base/efx.h b/dpdk/drivers/common/sfc_efx/base/efx.h +index 92ec18761b..49e29dcc1c 100644 +--- a/dpdk/drivers/common/sfc_efx/base/efx.h ++++ b/dpdk/drivers/common/sfc_efx/base/efx.h +@@ -4582,6 +4582,24 @@ efx_mae_action_set_populate_mark( + __in efx_mae_actions_t *spec, + __in uint32_t mark_value); + ++/* ++ * Whilst efx_mae_action_set_populate_mark() can be used to request setting ++ * a user mark in matching packets and demands that the request come before ++ * setting the final destination (deliver action), this API can be invoked ++ * after deliver action has been added in order to request mark reset if ++ * the user's own mark request has not been added as a result of parsing. ++ * ++ * It is useful when the driver chains an outer rule (OR) with an action ++ * rule (AR) by virtue of a recirculation ID. The OR may set mark from ++ * this ID to help the driver identify packets that hit the OR and do ++ * not hit the AR. But, for packets that do hit the AR, the driver ++ * wants to reset the mark value to avoid confusing recipients. ++ */ ++LIBEFX_API ++extern void ++efx_mae_action_set_populate_mark_reset( ++ __in efx_mae_actions_t *spec); ++ + LIBEFX_API + extern __checkReturn efx_rc_t + efx_mae_action_set_populate_deliver( +@@ -4730,6 +4748,20 @@ efx_mae_action_set_fill_in_counter_id( + __in efx_mae_actions_t *spec, + __in const efx_counter_t *counter_idp); + ++/* ++ * Clears dangling FW object IDs (counter ID, for instance) in ++ * the action set specification. Useful for adapter restarts, ++ * when all MAE objects need to be reallocated by the driver. ++ * ++ * This method only clears the IDs in the specification. ++ * The driver is still responsible for keeping the IDs ++ * separately and freeing them when stopping the port. ++ */ ++LIBEFX_API ++extern void ++efx_mae_action_set_clear_fw_rsrc_ids( ++ __in efx_mae_actions_t *spec); ++ + /* Action set ID */ + typedef struct efx_mae_aset_id_s { + uint32_t id; +diff --git a/dpdk/drivers/common/sfc_efx/base/efx_impl.h b/dpdk/drivers/common/sfc_efx/base/efx_impl.h +index 9a5d465fa0..45e99d01c5 100644 +--- a/dpdk/drivers/common/sfc_efx/base/efx_impl.h ++++ b/dpdk/drivers/common/sfc_efx/base/efx_impl.h +@@ -1800,6 +1800,10 @@ typedef struct efx_mae_action_vlan_push_s { + uint16_t emavp_tci_be; + } efx_mae_action_vlan_push_t; + ++/* ++ * Helper efx_mae_action_set_clear_fw_rsrc_ids() is responsible ++ * to initialise every field in this structure to INVALID value. ++ */ + typedef struct efx_mae_actions_rsrc_s { + efx_mae_mac_id_t emar_dst_mac_id; + efx_mae_mac_id_t emar_src_mac_id; +diff --git a/dpdk/drivers/common/sfc_efx/base/efx_mae.c b/dpdk/drivers/common/sfc_efx/base/efx_mae.c +index 31f51b5548..4c33471f28 100644 +--- a/dpdk/drivers/common/sfc_efx/base/efx_mae.c ++++ b/dpdk/drivers/common/sfc_efx/base/efx_mae.c +@@ -1394,10 +1394,7 @@ efx_mae_action_set_spec_init( + goto fail1; + } + +- spec->ema_rsrc.emar_dst_mac_id.id = EFX_MAE_RSRC_ID_INVALID; +- spec->ema_rsrc.emar_src_mac_id.id = EFX_MAE_RSRC_ID_INVALID; +- spec->ema_rsrc.emar_eh_id.id = EFX_MAE_RSRC_ID_INVALID; +- spec->ema_rsrc.emar_counter_id.id = EFX_MAE_RSRC_ID_INVALID; ++ efx_mae_action_set_clear_fw_rsrc_ids(spec); + + /* + * Helpers which populate v2 actions must reject them when v2 is not +@@ -1916,6 +1913,18 @@ efx_mae_action_set_populate_mark( + EFX_MAE_ACTION_MARK, sizeof (mark_value), arg)); + } + ++ void ++efx_mae_action_set_populate_mark_reset( ++ __in efx_mae_actions_t *spec) ++{ ++ uint32_t action_mask = (1U << EFX_MAE_ACTION_MARK); ++ ++ if ((spec->ema_actions & action_mask) == 0) { ++ spec->ema_actions |= action_mask; ++ spec->ema_mark_value = 0; ++ } ++} ++ + __checkReturn efx_rc_t + efx_mae_action_set_populate_deliver( + __in efx_mae_actions_t *spec, +@@ -3015,6 +3024,16 @@ fail1: + return (rc); + } + ++ void ++efx_mae_action_set_clear_fw_rsrc_ids( ++ __in efx_mae_actions_t *spec) ++{ ++ spec->ema_rsrc.emar_dst_mac_id.id = EFX_MAE_RSRC_ID_INVALID; ++ spec->ema_rsrc.emar_src_mac_id.id = EFX_MAE_RSRC_ID_INVALID; ++ spec->ema_rsrc.emar_eh_id.id = EFX_MAE_RSRC_ID_INVALID; ++ spec->ema_rsrc.emar_counter_id.id = EFX_MAE_RSRC_ID_INVALID; ++} ++ + __checkReturn efx_rc_t + efx_mae_counters_alloc( + __in efx_nic_t *enp, +diff --git a/dpdk/drivers/common/sfc_efx/base/efx_rx.c b/dpdk/drivers/common/sfc_efx/base/efx_rx.c +index 68f42f5cac..61726a9f0b 100644 +--- a/dpdk/drivers/common/sfc_efx/base/efx_rx.c ++++ b/dpdk/drivers/common/sfc_efx/base/efx_rx.c +@@ -937,8 +937,10 @@ efx_rx_qcreate_internal( + + rss_hash_field = + &erplp->erpl_fields[EFX_RX_PREFIX_FIELD_RSS_HASH]; +- if (rss_hash_field->erpfi_width_bits == 0) ++ if (rss_hash_field->erpfi_width_bits == 0) { ++ rc = ENOTSUP; + goto fail5; ++ } + } + + enp->en_rx_qcount++; +diff --git a/dpdk/drivers/common/sfc_efx/version.map b/dpdk/drivers/common/sfc_efx/version.map +index a54aab0a08..d9b04a611d 100644 +--- a/dpdk/drivers/common/sfc_efx/version.map ++++ b/dpdk/drivers/common/sfc_efx/version.map +@@ -89,6 +89,7 @@ INTERNAL { + efx_mae_action_rule_insert; + efx_mae_action_rule_remove; + efx_mae_action_set_alloc; ++ efx_mae_action_set_clear_fw_rsrc_ids; + efx_mae_action_set_fill_in_counter_id; + efx_mae_action_set_fill_in_dst_mac_id; + efx_mae_action_set_fill_in_eh_id; +@@ -103,6 +104,7 @@ INTERNAL { + efx_mae_action_set_populate_encap; + efx_mae_action_set_populate_flag; + efx_mae_action_set_populate_mark; ++ efx_mae_action_set_populate_mark_reset; + efx_mae_action_set_populate_set_dst_mac; + efx_mae_action_set_populate_set_src_mac; + efx_mae_action_set_populate_vlan_pop; +diff --git a/dpdk/drivers/compress/mlx5/mlx5_compress.c b/dpdk/drivers/compress/mlx5/mlx5_compress.c +index fb2bda9745..c4bf62ed41 100644 +--- a/dpdk/drivers/compress/mlx5/mlx5_compress.c ++++ b/dpdk/drivers/compress/mlx5/mlx5_compress.c +@@ -96,9 +96,7 @@ static const struct rte_compressdev_capabilities mlx5_caps[] = { + RTE_COMP_FF_HUFFMAN_DYNAMIC, + .window_size = {.min = 10, .max = 15, .increment = 1}, + }, +- { +- .algo = RTE_COMP_ALGO_LIST_END, +- } ++ RTE_COMP_END_OF_CAPABILITIES_LIST() + }; + + static void +@@ -245,8 +243,8 @@ mlx5_compress_qp_setup(struct rte_compressdev *dev, uint16_t qp_id, + mlx5_ts_format_conv(priv->cdev->config.hca_attr.qp_ts_format); + qp_attr.num_of_receive_wqes = 0; + qp_attr.num_of_send_wqbbs = RTE_BIT32(log_ops_n); +- qp_attr.mmo = priv->mmo_decomp_qp && priv->mmo_comp_qp +- && priv->mmo_dma_qp; ++ qp_attr.mmo = priv->mmo_decomp_qp || priv->mmo_comp_qp || ++ priv->mmo_dma_qp; + ret = mlx5_devx_qp_create(priv->cdev->ctx, &qp->qp, + qp_attr.num_of_send_wqbbs * + MLX5_WQE_SIZE, &qp_attr, socket_id); +@@ -315,7 +313,7 @@ mlx5_compress_xform_create(struct rte_compressdev *dev, + DRV_LOG(ERR, "Not enough capabilities to support decompress operation, maybe old FW/OFED version?"); + return -ENOTSUP; + } +- if (xform->compress.hash_algo != RTE_COMP_HASH_ALGO_NONE) { ++ if (xform->decompress.hash_algo != RTE_COMP_HASH_ALGO_NONE) { + DRV_LOG(ERR, "SHA is not supported."); + return -ENOTSUP; + } +@@ -635,7 +633,7 @@ mlx5_compress_dequeue_burst(void *queue_pair, struct rte_comp_op **ops, + break; + case RTE_COMP_CHECKSUM_ADLER32: + op->output_chksum = (uint64_t)rte_be_to_cpu_32 +- (opaq[idx].adler32) << 32; ++ (opaq[idx].adler32); + break; + case RTE_COMP_CHECKSUM_CRC32_ADLER32: + op->output_chksum = (uint64_t)rte_be_to_cpu_32 +diff --git a/dpdk/drivers/compress/qat/dev/qat_comp_pmd_gen1.c b/dpdk/drivers/compress/qat/dev/qat_comp_pmd_gen1.c +index 12d9d89072..3a8484eef1 100644 +--- a/dpdk/drivers/compress/qat/dev/qat_comp_pmd_gen1.c ++++ b/dpdk/drivers/compress/qat/dev/qat_comp_pmd_gen1.c +@@ -26,7 +26,7 @@ const struct rte_compressdev_capabilities qat_gen1_comp_capabilities[] = { + RTE_COMP_FF_OOP_LB_IN_SGL_OUT | + RTE_COMP_FF_STATEFUL_DECOMPRESSION, + .window_size = {.min = 15, .max = 15, .increment = 0} }, +- {RTE_COMP_ALGO_LIST_END, 0, {0, 0, 0} } }; ++ RTE_COMP_END_OF_CAPABILITIES_LIST() }; + + static int + qat_comp_dev_config_gen1(struct rte_compressdev *dev, +diff --git a/dpdk/drivers/compress/qat/dev/qat_comp_pmd_gen4.c b/dpdk/drivers/compress/qat/dev/qat_comp_pmd_gen4.c +index 79b2ceb414..05906f13e0 100644 +--- a/dpdk/drivers/compress/qat/dev/qat_comp_pmd_gen4.c ++++ b/dpdk/drivers/compress/qat/dev/qat_comp_pmd_gen4.c +@@ -25,7 +25,7 @@ qat_gen4_comp_capabilities[] = { + RTE_COMP_FF_OOP_SGL_IN_LB_OUT | + RTE_COMP_FF_OOP_LB_IN_SGL_OUT, + .window_size = {.min = 15, .max = 15, .increment = 0} }, +- {RTE_COMP_ALGO_LIST_END, 0, {0, 0, 0} } }; ++ RTE_COMP_END_OF_CAPABILITIES_LIST() }; + + static int + qat_comp_dev_config_gen4(struct rte_compressdev *dev, +diff --git a/dpdk/drivers/compress/qat/qat_comp.c b/dpdk/drivers/compress/qat/qat_comp.c +index fe4a4999c6..559948a46a 100644 +--- a/dpdk/drivers/compress/qat/qat_comp.c ++++ b/dpdk/drivers/compress/qat/qat_comp.c +@@ -1144,3 +1144,185 @@ qat_comp_stream_free(struct rte_compressdev *dev, void *stream) + } + return -EINVAL; + } ++ ++/** ++ * Enqueue packets for processing on queue pair of a device ++ * ++ * @param qp ++ * qat queue pair ++ * @param ops ++ * Compressdev operation ++ * @param nb_ops ++ * number of operations ++ * @return ++ * - nb_ops_sent if successful ++ */ ++uint16_t ++qat_enqueue_comp_op_burst(void *qp, void **ops, uint16_t nb_ops) ++{ ++ register struct qat_queue *queue; ++ struct qat_qp *tmp_qp = (struct qat_qp *)qp; ++ register uint32_t nb_ops_sent = 0; ++ register int nb_desc_to_build; ++ uint16_t nb_ops_possible = nb_ops; ++ register uint8_t *base_addr; ++ register uint32_t tail; ++ ++ int descriptors_built, total_descriptors_built = 0; ++ int nb_remaining_descriptors; ++ int overflow = 0; ++ ++ if (unlikely(nb_ops == 0)) ++ return 0; ++ ++ /* read params used a lot in main loop into registers */ ++ queue = &(tmp_qp->tx_q); ++ base_addr = (uint8_t *)queue->base_addr; ++ tail = queue->tail; ++ ++ /* Find how many can actually fit on the ring */ ++ { ++ /* dequeued can only be written by one thread, but it may not ++ * be this thread. As it's 4-byte aligned it will be read ++ * atomically here by any Intel CPU. ++ * enqueued can wrap before dequeued, but cannot ++ * lap it as var size of enq/deq (uint32_t) > var size of ++ * max_inflights (uint16_t). In reality inflights is never ++ * even as big as max uint16_t, as it's <= ADF_MAX_DESC. ++ * On wrapping, the calculation still returns the correct ++ * positive value as all three vars are unsigned. ++ */ ++ uint32_t inflights = ++ tmp_qp->enqueued - tmp_qp->dequeued; ++ ++ /* Find how many can actually fit on the ring */ ++ overflow = (inflights + nb_ops) - tmp_qp->max_inflights; ++ if (overflow > 0) { ++ nb_ops_possible = nb_ops - overflow; ++ if (nb_ops_possible == 0) ++ return 0; ++ } ++ ++ /* QAT has plenty of work queued already, so don't waste cycles ++ * enqueueing, wait til the application has gathered a bigger ++ * burst or some completed ops have been dequeued ++ */ ++ if (tmp_qp->min_enq_burst_threshold && inflights > ++ QAT_QP_MIN_INFL_THRESHOLD && nb_ops_possible < ++ tmp_qp->min_enq_burst_threshold) { ++ tmp_qp->stats.threshold_hit_count++; ++ return 0; ++ } ++ } ++ ++ /* At this point nb_ops_possible is assuming a 1:1 mapping ++ * between ops and descriptors. ++ * Fewer may be sent if some ops have to be split. ++ * nb_ops_possible is <= burst size. ++ * Find out how many spaces are actually available on the qp in case ++ * more are needed. ++ */ ++ nb_remaining_descriptors = nb_ops_possible ++ + ((overflow >= 0) ? 0 : overflow * (-1)); ++ QAT_DP_LOG(DEBUG, "Nb ops requested %d, nb descriptors remaining %d", ++ nb_ops, nb_remaining_descriptors); ++ ++ while (nb_ops_sent != nb_ops_possible && ++ nb_remaining_descriptors > 0) { ++ struct qat_comp_op_cookie *cookie = ++ tmp_qp->op_cookies[tail >> queue->trailz]; ++ ++ descriptors_built = 0; ++ ++ QAT_DP_LOG(DEBUG, "--- data length: %u", ++ ((struct rte_comp_op *)*ops)->src.length); ++ ++ nb_desc_to_build = qat_comp_build_request(*ops, ++ base_addr + tail, cookie, tmp_qp->qat_dev_gen); ++ QAT_DP_LOG(DEBUG, "%d descriptors built, %d remaining, " ++ "%d ops sent, %d descriptors needed", ++ total_descriptors_built, nb_remaining_descriptors, ++ nb_ops_sent, nb_desc_to_build); ++ ++ if (unlikely(nb_desc_to_build < 0)) { ++ /* this message cannot be enqueued */ ++ tmp_qp->stats.enqueue_err_count++; ++ if (nb_ops_sent == 0) ++ return 0; ++ goto kick_tail; ++ } else if (unlikely(nb_desc_to_build > 1)) { ++ /* this op is too big and must be split - get more ++ * descriptors and retry ++ */ ++ ++ QAT_DP_LOG(DEBUG, "Build %d descriptors for this op", ++ nb_desc_to_build); ++ ++ nb_remaining_descriptors -= nb_desc_to_build; ++ if (nb_remaining_descriptors >= 0) { ++ /* There are enough remaining descriptors ++ * so retry ++ */ ++ int ret2 = qat_comp_build_multiple_requests( ++ *ops, tmp_qp, tail, ++ nb_desc_to_build); ++ ++ if (unlikely(ret2 < 1)) { ++ QAT_DP_LOG(DEBUG, ++ "Failed to build (%d) descriptors, status %d", ++ nb_desc_to_build, ret2); ++ ++ qat_comp_free_split_op_memzones(cookie, ++ nb_desc_to_build - 1); ++ ++ tmp_qp->stats.enqueue_err_count++; ++ ++ /* This message cannot be enqueued */ ++ if (nb_ops_sent == 0) ++ return 0; ++ goto kick_tail; ++ } else { ++ descriptors_built = ret2; ++ total_descriptors_built += ++ descriptors_built; ++ nb_remaining_descriptors -= ++ descriptors_built; ++ QAT_DP_LOG(DEBUG, ++ "Multiple descriptors (%d) built ok", ++ descriptors_built); ++ } ++ } else { ++ QAT_DP_LOG(ERR, "For the current op, number of requested descriptors (%d) " ++ "exceeds number of available descriptors (%d)", ++ nb_desc_to_build, ++ nb_remaining_descriptors + ++ nb_desc_to_build); ++ ++ qat_comp_free_split_op_memzones(cookie, ++ nb_desc_to_build - 1); ++ ++ /* Not enough extra descriptors */ ++ if (nb_ops_sent == 0) ++ return 0; ++ goto kick_tail; ++ } ++ } else { ++ descriptors_built = 1; ++ total_descriptors_built++; ++ nb_remaining_descriptors--; ++ QAT_DP_LOG(DEBUG, "Single descriptor built ok"); ++ } ++ ++ tail = adf_modulo(tail + (queue->msg_size * descriptors_built), ++ queue->modulo_mask); ++ ops++; ++ nb_ops_sent++; ++ } ++ ++kick_tail: ++ queue->tail = tail; ++ tmp_qp->enqueued += total_descriptors_built; ++ tmp_qp->stats.enqueued_count += nb_ops_sent; ++ txq_write_tail(tmp_qp->qat_dev_gen, tmp_qp, queue); ++ return nb_ops_sent; ++} +diff --git a/dpdk/drivers/compress/qat/qat_comp.h b/dpdk/drivers/compress/qat/qat_comp.h +index da7b9a6eec..dc220cd6e3 100644 +--- a/dpdk/drivers/compress/qat/qat_comp.h ++++ b/dpdk/drivers/compress/qat/qat_comp.h +@@ -141,5 +141,8 @@ qat_comp_stream_create(struct rte_compressdev *dev, + int + qat_comp_stream_free(struct rte_compressdev *dev, void *stream); + ++uint16_t ++qat_enqueue_comp_op_burst(void *qp, void **ops, uint16_t nb_ops); ++ + #endif + #endif +diff --git a/dpdk/drivers/crypto/ccp/ccp_crypto.c b/dpdk/drivers/crypto/ccp/ccp_crypto.c +index b21b32e507..4b84b3303e 100644 +--- a/dpdk/drivers/crypto/ccp/ccp_crypto.c ++++ b/dpdk/drivers/crypto/ccp/ccp_crypto.c +@@ -26,15 +26,12 @@ + + #include "ccp_dev.h" + #include "ccp_crypto.h" +-#include "ccp_pci.h" + #include "ccp_pmd_private.h" + + #include + #include + #include + +-extern int iommu_mode; +-void *sha_ctx; + /* SHA initial context values */ + uint32_t ccp_sha1_init[SHA_COMMON_DIGEST_SIZE / sizeof(uint32_t)] = { + SHA1_H4, SHA1_H3, +@@ -748,13 +745,8 @@ ccp_configure_session_cipher(struct ccp_session *sess, + CCP_LOG_ERR("Invalid CCP Engine"); + return -ENOTSUP; + } +- if (iommu_mode == 2) { +- sess->cipher.nonce_phys = rte_mem_virt2iova(sess->cipher.nonce); +- sess->cipher.key_phys = rte_mem_virt2iova(sess->cipher.key_ccp); +- } else { +- sess->cipher.nonce_phys = rte_mem_virt2phy(sess->cipher.nonce); +- sess->cipher.key_phys = rte_mem_virt2phy(sess->cipher.key_ccp); +- } ++ sess->cipher.nonce_phys = rte_mem_virt2iova(sess->cipher.nonce); ++ sess->cipher.key_phys = rte_mem_virt2iova(sess->cipher.key_ccp); + return 0; + } + +@@ -793,7 +785,6 @@ ccp_configure_session_auth(struct ccp_session *sess, + sess->auth.ctx = (void *)ccp_sha1_init; + sess->auth.ctx_len = CCP_SB_BYTES; + sess->auth.offset = CCP_SB_BYTES - SHA1_DIGEST_SIZE; +- rte_memcpy(sha_ctx, sess->auth.ctx, SHA_COMMON_DIGEST_SIZE); + break; + case RTE_CRYPTO_AUTH_SHA1_HMAC: + if (sess->auth_opt) { +@@ -832,7 +823,6 @@ ccp_configure_session_auth(struct ccp_session *sess, + sess->auth.ctx = (void *)ccp_sha224_init; + sess->auth.ctx_len = CCP_SB_BYTES; + sess->auth.offset = CCP_SB_BYTES - SHA224_DIGEST_SIZE; +- rte_memcpy(sha_ctx, sess->auth.ctx, SHA256_DIGEST_SIZE); + break; + case RTE_CRYPTO_AUTH_SHA224_HMAC: + if (sess->auth_opt) { +@@ -895,7 +885,6 @@ ccp_configure_session_auth(struct ccp_session *sess, + sess->auth.ctx = (void *)ccp_sha256_init; + sess->auth.ctx_len = CCP_SB_BYTES; + sess->auth.offset = CCP_SB_BYTES - SHA256_DIGEST_SIZE; +- rte_memcpy(sha_ctx, sess->auth.ctx, SHA256_DIGEST_SIZE); + break; + case RTE_CRYPTO_AUTH_SHA256_HMAC: + if (sess->auth_opt) { +@@ -958,7 +947,6 @@ ccp_configure_session_auth(struct ccp_session *sess, + sess->auth.ctx = (void *)ccp_sha384_init; + sess->auth.ctx_len = CCP_SB_BYTES << 1; + sess->auth.offset = (CCP_SB_BYTES << 1) - SHA384_DIGEST_SIZE; +- rte_memcpy(sha_ctx, sess->auth.ctx, SHA512_DIGEST_SIZE); + break; + case RTE_CRYPTO_AUTH_SHA384_HMAC: + if (sess->auth_opt) { +@@ -1023,7 +1011,6 @@ ccp_configure_session_auth(struct ccp_session *sess, + sess->auth.ctx = (void *)ccp_sha512_init; + sess->auth.ctx_len = CCP_SB_BYTES << 1; + sess->auth.offset = (CCP_SB_BYTES << 1) - SHA512_DIGEST_SIZE; +- rte_memcpy(sha_ctx, sess->auth.ctx, SHA512_DIGEST_SIZE); + break; + case RTE_CRYPTO_AUTH_SHA512_HMAC: + if (sess->auth_opt) { +@@ -1173,13 +1160,8 @@ ccp_configure_session_aead(struct ccp_session *sess, + CCP_LOG_ERR("Unsupported aead algo"); + return -ENOTSUP; + } +- if (iommu_mode == 2) { +- sess->cipher.nonce_phys = rte_mem_virt2iova(sess->cipher.nonce); +- sess->cipher.key_phys = rte_mem_virt2iova(sess->cipher.key_ccp); +- } else { +- sess->cipher.nonce_phys = rte_mem_virt2phy(sess->cipher.nonce); +- sess->cipher.key_phys = rte_mem_virt2phy(sess->cipher.key_ccp); +- } ++ sess->cipher.nonce_phys = rte_mem_virt2iova(sess->cipher.nonce); ++ sess->cipher.key_phys = rte_mem_virt2iova(sess->cipher.key_ccp); + return 0; + } + +@@ -1592,14 +1574,8 @@ ccp_perform_hmac(struct rte_crypto_op *op, + op->sym->auth.data.offset); + append_ptr = (void *)rte_pktmbuf_append(op->sym->m_src, + session->auth.ctx_len); +- if (iommu_mode == 2) { +- dest_addr = (phys_addr_t)rte_mem_virt2iova(append_ptr); +- pst.src_addr = (phys_addr_t)rte_mem_virt2iova((void *)addr); +- } else { +- dest_addr = (phys_addr_t)rte_mem_virt2phy(append_ptr); +- pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)addr); +- } +- dest_addr_t = dest_addr; ++ dest_addr_t = dest_addr = (phys_addr_t)rte_mem_virt2iova(append_ptr); ++ pst.src_addr = (phys_addr_t)rte_mem_virt2iova((void *)addr); + + /** Load PHash1 to LSB*/ + pst.dest_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES); +@@ -1681,10 +1657,7 @@ ccp_perform_hmac(struct rte_crypto_op *op, + + /** Load PHash2 to LSB*/ + addr += session->auth.ctx_len; +- if (iommu_mode == 2) +- pst.src_addr = (phys_addr_t)rte_mem_virt2iova((void *)addr); +- else +- pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)addr); ++ pst.src_addr = (phys_addr_t)rte_mem_virt2iova((void *)addr); + pst.dest_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES); + pst.len = session->auth.ctx_len; + pst.dir = 1; +@@ -1770,14 +1743,8 @@ ccp_perform_sha(struct rte_crypto_op *op, + op->sym->auth.data.offset); + append_ptr = (void *)rte_pktmbuf_append(op->sym->m_src, + session->auth.ctx_len); +- if (iommu_mode == 2) { +- dest_addr = (phys_addr_t)rte_mem_virt2iova(append_ptr); +- pst.src_addr = (phys_addr_t)sha_ctx; +- } else { +- dest_addr = (phys_addr_t)rte_mem_virt2phy(append_ptr); +- pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *) +- session->auth.ctx); +- } ++ pst.src_addr = (phys_addr_t)rte_mem_virt2iova((void *)session->auth.ctx); ++ dest_addr = (phys_addr_t)rte_mem_virt2iova(append_ptr); + + /** Passthru sha context*/ + +@@ -1865,15 +1832,8 @@ ccp_perform_sha3_hmac(struct rte_crypto_op *op, + CCP_LOG_ERR("CCP MBUF append failed\n"); + return -1; + } +- if (iommu_mode == 2) { +- dest_addr = (phys_addr_t)rte_mem_virt2iova((void *)append_ptr); +- ctx_paddr = (phys_addr_t)rte_mem_virt2iova( +- session->auth.pre_compute); +- } else { +- dest_addr = (phys_addr_t)rte_mem_virt2phy((void *)append_ptr); +- ctx_paddr = (phys_addr_t)rte_mem_virt2phy( +- session->auth.pre_compute); +- } ++ dest_addr = (phys_addr_t)rte_mem_virt2iova((void *)append_ptr); ++ ctx_paddr = (phys_addr_t)rte_mem_virt2iova(session->auth.pre_compute); + dest_addr_t = dest_addr + (session->auth.ctx_len / 2); + desc = &cmd_q->qbase_desc[cmd_q->qidx]; + memset(desc, 0, Q_DESC_SIZE); +@@ -2009,13 +1969,8 @@ ccp_perform_sha3(struct rte_crypto_op *op, + CCP_LOG_ERR("CCP MBUF append failed\n"); + return -1; + } +- if (iommu_mode == 2) { +- dest_addr = (phys_addr_t)rte_mem_virt2iova((void *)append_ptr); +- ctx_paddr = (phys_addr_t)rte_mem_virt2iova((void *)ctx_addr); +- } else { +- dest_addr = (phys_addr_t)rte_mem_virt2phy((void *)append_ptr); +- ctx_paddr = (phys_addr_t)rte_mem_virt2phy((void *)ctx_addr); +- } ++ dest_addr = (phys_addr_t)rte_mem_virt2iova((void *)append_ptr); ++ ctx_paddr = (phys_addr_t)rte_mem_virt2iova((void *)ctx_addr); + + ctx_addr = session->auth.sha3_ctx; + +@@ -2089,13 +2044,7 @@ ccp_perform_aes_cmac(struct rte_crypto_op *op, + + ctx_addr = session->auth.pre_compute; + memset(ctx_addr, 0, AES_BLOCK_SIZE); +- if (iommu_mode == 2) +- pst.src_addr = (phys_addr_t)rte_mem_virt2iova( +- (void *)ctx_addr); +- else +- pst.src_addr = (phys_addr_t)rte_mem_virt2phy( +- (void *)ctx_addr); +- ++ pst.src_addr = (phys_addr_t)rte_mem_virt2iova((void *)ctx_addr); + pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES); + pst.len = CCP_SB_BYTES; + pst.dir = 1; +@@ -2133,12 +2082,7 @@ ccp_perform_aes_cmac(struct rte_crypto_op *op, + } else { + ctx_addr = session->auth.pre_compute + CCP_SB_BYTES; + memset(ctx_addr, 0, AES_BLOCK_SIZE); +- if (iommu_mode == 2) +- pst.src_addr = (phys_addr_t)rte_mem_virt2iova( +- (void *)ctx_addr); +- else +- pst.src_addr = (phys_addr_t)rte_mem_virt2phy( +- (void *)ctx_addr); ++ pst.src_addr = (phys_addr_t)rte_mem_virt2iova((void *)ctx_addr); + pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES); + pst.len = CCP_SB_BYTES; + pst.dir = 1; +@@ -2328,12 +2272,7 @@ ccp_perform_3des(struct rte_crypto_op *op, + + rte_memcpy(lsb_buf + (CCP_SB_BYTES - session->iv.length), + iv, session->iv.length); +- if (iommu_mode == 2) +- pst.src_addr = (phys_addr_t)rte_mem_virt2iova( +- (void *) lsb_buf); +- else +- pst.src_addr = (phys_addr_t)rte_mem_virt2phy( +- (void *) lsb_buf); ++ pst.src_addr = (phys_addr_t)rte_mem_virt2iova((void *) lsb_buf); + pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES); + pst.len = CCP_SB_BYTES; + pst.dir = 1; +@@ -2356,11 +2295,7 @@ ccp_perform_3des(struct rte_crypto_op *op, + else + dest_addr = src_addr; + +- if (iommu_mode == 2) +- key_addr = rte_mem_virt2iova(session->cipher.key_ccp); +- else +- key_addr = rte_mem_virt2phy(session->cipher.key_ccp); +- ++ key_addr = rte_mem_virt2iova(session->cipher.key_ccp); + desc = &cmd_q->qbase_desc[cmd_q->qidx]; + + memset(desc, 0, Q_DESC_SIZE); +@@ -2746,12 +2681,7 @@ process_ops_to_enqueue(struct ccp_qp *qp, + b_info->lsb_buf_idx = 0; + b_info->desccnt = 0; + b_info->cmd_q = cmd_q; +- if (iommu_mode == 2) +- b_info->lsb_buf_phys = +- (phys_addr_t)rte_mem_virt2iova((void *)b_info->lsb_buf); +- else +- b_info->lsb_buf_phys = +- (phys_addr_t)rte_mem_virt2phy((void *)b_info->lsb_buf); ++ b_info->lsb_buf_phys = (phys_addr_t)rte_mem_virt2iova((void *)b_info->lsb_buf); + + rte_atomic64_sub(&b_info->cmd_q->free_slots, slots_req); + +diff --git a/dpdk/drivers/crypto/ccp/ccp_dev.c b/dpdk/drivers/crypto/ccp/ccp_dev.c +index 424ead82c3..ee30f5ac30 100644 +--- a/dpdk/drivers/crypto/ccp/ccp_dev.c ++++ b/dpdk/drivers/crypto/ccp/ccp_dev.c +@@ -20,11 +20,9 @@ + #include + + #include "ccp_dev.h" +-#include "ccp_pci.h" + #include "ccp_pmd_private.h" + +-int iommu_mode; +-struct ccp_list ccp_list = TAILQ_HEAD_INITIALIZER(ccp_list); ++static TAILQ_HEAD(, ccp_device) ccp_list = TAILQ_HEAD_INITIALIZER(ccp_list); + static int ccp_dev_id; + + int +@@ -69,7 +67,7 @@ ccp_read_hwrng(uint32_t *value) + struct ccp_device *dev; + + TAILQ_FOREACH(dev, &ccp_list, next) { +- void *vaddr = (void *)(dev->pci.mem_resource[2].addr); ++ void *vaddr = (void *)(dev->pci->mem_resource[2].addr); + + while (dev->hwrng_retries++ < CCP_MAX_TRNG_RETRIES) { + *value = CCP_READ_REG(vaddr, TRNG_OUT_REG); +@@ -362,7 +360,7 @@ ccp_find_lsb_regions(struct ccp_queue *cmd_q, uint64_t status) + if (ccp_get_bit(&cmd_q->lsbmask, j)) + weight++; + +- printf("Queue %d can access %d LSB regions of mask %lu\n", ++ CCP_LOG_DBG("Queue %d can access %d LSB regions of mask %lu\n", + (int)cmd_q->id, weight, cmd_q->lsbmask); + + return weight ? 0 : -EINVAL; +@@ -481,7 +479,7 @@ ccp_assign_lsbs(struct ccp_device *ccp) + } + + static int +-ccp_add_device(struct ccp_device *dev, int type) ++ccp_add_device(struct ccp_device *dev) + { + int i; + uint32_t qmr, status_lo, status_hi, dma_addr_lo, dma_addr_hi; +@@ -495,9 +493,9 @@ ccp_add_device(struct ccp_device *dev, int type) + + dev->id = ccp_dev_id++; + dev->qidx = 0; +- vaddr = (void *)(dev->pci.mem_resource[2].addr); ++ vaddr = (void *)(dev->pci->mem_resource[2].addr); + +- if (type == CCP_VERSION_5B) { ++ if (dev->pci->id.device_id == AMD_PCI_CCP_5B) { + CCP_WRITE_REG(vaddr, CMD_TRNG_CTL_OFFSET, 0x00012D57); + CCP_WRITE_REG(vaddr, CMD_CONFIG_0_OFFSET, 0x00000003); + for (i = 0; i < 12; i++) { +@@ -616,54 +614,20 @@ ccp_remove_device(struct ccp_device *dev) + TAILQ_REMOVE(&ccp_list, dev, next); + } + +-static int +-is_ccp_device(const char *dirname, +- const struct rte_pci_id *ccp_id, +- int *type) +-{ +- char filename[PATH_MAX]; +- const struct rte_pci_id *id; +- uint16_t vendor, device_id; +- int i; +- unsigned long tmp; +- +- /* get vendor id */ +- snprintf(filename, sizeof(filename), "%s/vendor", dirname); +- if (ccp_pci_parse_sysfs_value(filename, &tmp) < 0) +- return 0; +- vendor = (uint16_t)tmp; +- +- /* get device id */ +- snprintf(filename, sizeof(filename), "%s/device", dirname); +- if (ccp_pci_parse_sysfs_value(filename, &tmp) < 0) +- return 0; +- device_id = (uint16_t)tmp; +- +- for (id = ccp_id, i = 0; id->vendor_id != 0; id++, i++) { +- if (vendor == id->vendor_id && +- device_id == id->device_id) { +- *type = i; +- return 1; /* Matched device */ +- } +- } +- return 0; +-} +- +-static int +-ccp_probe_device(int ccp_type, struct rte_pci_device *pci_dev) ++int ++ccp_probe_device(struct rte_pci_device *pci_dev) + { +- struct ccp_device *ccp_dev = NULL; +- int uio_fd = -1; ++ struct ccp_device *ccp_dev; + + ccp_dev = rte_zmalloc("ccp_device", sizeof(*ccp_dev), + RTE_CACHE_LINE_SIZE); + if (ccp_dev == NULL) + goto fail; + +- ccp_dev->pci = *pci_dev; ++ ccp_dev->pci = pci_dev; + + /* device is valid, add in list */ +- if (ccp_add_device(ccp_dev, ccp_type)) { ++ if (ccp_add_device(ccp_dev)) { + ccp_remove_device(ccp_dev); + goto fail; + } +@@ -671,51 +635,6 @@ ccp_probe_device(int ccp_type, struct rte_pci_device *pci_dev) + return 0; + fail: + CCP_LOG_ERR("CCP Device probe failed"); +- if (uio_fd >= 0) +- close(uio_fd); + rte_free(ccp_dev); + return -1; + } +- +-int +-ccp_probe_devices(struct rte_pci_device *pci_dev, +- const struct rte_pci_id *ccp_id) +-{ +- int dev_cnt = 0; +- int ccp_type = 0; +- struct dirent *d; +- DIR *dir; +- int ret = 0; +- int module_idx = 0; +- uint16_t domain; +- uint8_t bus, devid, function; +- char dirname[PATH_MAX]; +- +- module_idx = ccp_check_pci_uio_module(); +- if (module_idx < 0) +- return -1; +- +- iommu_mode = module_idx; +- TAILQ_INIT(&ccp_list); +- dir = opendir(SYSFS_PCI_DEVICES); +- if (dir == NULL) +- return -1; +- while ((d = readdir(dir)) != NULL) { +- if (d->d_name[0] == '.') +- continue; +- if (ccp_parse_pci_addr_format(d->d_name, sizeof(d->d_name), +- &domain, &bus, &devid, &function) != 0) +- continue; +- snprintf(dirname, sizeof(dirname), "%s/%s", +- SYSFS_PCI_DEVICES, d->d_name); +- if (is_ccp_device(dirname, ccp_id, &ccp_type)) { +- printf("CCP : Detected CCP device with ID = 0x%x\n", +- ccp_id[ccp_type].device_id); +- ret = ccp_probe_device(ccp_type, pci_dev); +- if (ret == 0) +- dev_cnt++; +- } +- } +- closedir(dir); +- return dev_cnt; +-} +diff --git a/dpdk/drivers/crypto/ccp/ccp_dev.h b/dpdk/drivers/crypto/ccp/ccp_dev.h +index 9deaae7980..e3ec481dd3 100644 +--- a/dpdk/drivers/crypto/ccp/ccp_dev.h ++++ b/dpdk/drivers/crypto/ccp/ccp_dev.h +@@ -19,6 +19,12 @@ + #include + #include + ++/* CCP PCI device identifiers */ ++#define AMD_PCI_VENDOR_ID 0x1022 ++#define AMD_PCI_CCP_5A 0x1456 ++#define AMD_PCI_CCP_5B 0x1468 ++#define AMD_PCI_CCP_RV 0x15df ++ + /**< CCP specific */ + #define MAX_HW_QUEUES 5 + #define CCP_MAX_TRNG_RETRIES 10 +@@ -169,18 +175,6 @@ static inline uint32_t ccp_pci_reg_read(void *base, int offset) + #define CCP_WRITE_REG(hw_addr, reg_offset, value) \ + ccp_pci_reg_write(hw_addr, reg_offset, value) + +-TAILQ_HEAD(ccp_list, ccp_device); +- +-extern struct ccp_list ccp_list; +- +-/** +- * CCP device version +- */ +-enum ccp_device_version { +- CCP_VERSION_5A = 0, +- CCP_VERSION_5B, +-}; +- + /** + * A structure describing a CCP command queue. + */ +@@ -233,8 +227,8 @@ struct ccp_device { + /**< ccp queue */ + int cmd_q_count; + /**< no. of ccp Queues */ +- struct rte_pci_device pci; +- /**< ccp pci identifier */ ++ struct rte_pci_device *pci; ++ /**< ccp pci device */ + unsigned long lsbmap[CCP_BITMAP_SIZE(SLSB_MAP_SIZE)]; + /**< shared lsb mask of ccp */ + rte_spinlock_t lsb_lock; +@@ -468,13 +462,12 @@ high32_value(unsigned long addr) + int ccp_dev_start(struct rte_cryptodev *dev); + + /** +- * Detect ccp platform and initialize all ccp devices ++ * Initialize one ccp device + * +- * @param ccp_id rte_pci_id list for supported CCP devices +- * @return no. of successfully initialized CCP devices ++ * @dev rte pci device ++ * @return 0 on success otherwise -1 + */ +-int ccp_probe_devices(struct rte_pci_device *pci_dev, +- const struct rte_pci_id *ccp_id); ++int ccp_probe_device(struct rte_pci_device *pci_dev); + + /** + * allocate a ccp command queue +diff --git a/dpdk/drivers/crypto/ccp/ccp_pci.c b/dpdk/drivers/crypto/ccp/ccp_pci.c +deleted file mode 100644 +index 38029a9081..0000000000 +--- a/dpdk/drivers/crypto/ccp/ccp_pci.c ++++ /dev/null +@@ -1,240 +0,0 @@ +-/* SPDX-License-Identifier: BSD-3-Clause +- * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved. +- */ +- +-#include +-#include +-#include +-#include +-#include +- +-#include +- +-#include "ccp_pci.h" +- +-static const char * const uio_module_names[] = { +- "igb_uio", +- "uio_pci_generic", +- "vfio_pci" +-}; +- +-int +-ccp_check_pci_uio_module(void) +-{ +- FILE *fp; +- int i; +- char buf[BUFSIZ]; +- +- fp = fopen(PROC_MODULES, "r"); +- if (fp == NULL) +- return -1; +- i = 0; +- while (uio_module_names[i] != NULL) { +- while (fgets(buf, sizeof(buf), fp) != NULL) { +- if (!strncmp(buf, uio_module_names[i], +- strlen(uio_module_names[i]))) { +- fclose(fp); +- return i; +- } +- } +- i++; +- rewind(fp); +- } +- fclose(fp); +- printf("Insert igb_uio or uio_pci_generic kernel module(s)"); +- return -1;/* uio not inserted */ +-} +- +-/* +- * split up a pci address into its constituent parts. +- */ +-int +-ccp_parse_pci_addr_format(const char *buf, int bufsize, uint16_t *domain, +- uint8_t *bus, uint8_t *devid, uint8_t *function) +-{ +- /* first split on ':' */ +- union splitaddr { +- struct { +- char *domain; +- char *bus; +- char *devid; +- char *function; +- }; +- char *str[PCI_FMT_NVAL]; +- /* last element-separator is "." not ":" */ +- } splitaddr; +- +- char *buf_copy = strndup(buf, bufsize); +- +- if (buf_copy == NULL) +- return -1; +- +- if (rte_strsplit(buf_copy, bufsize, splitaddr.str, PCI_FMT_NVAL, ':') +- != PCI_FMT_NVAL - 1) +- goto error; +- /* final split is on '.' between devid and function */ +- splitaddr.function = strchr(splitaddr.devid, '.'); +- if (splitaddr.function == NULL) +- goto error; +- *splitaddr.function++ = '\0'; +- +- /* now convert to int values */ +- errno = 0; +- *domain = (uint8_t)strtoul(splitaddr.domain, NULL, 16); +- *bus = (uint8_t)strtoul(splitaddr.bus, NULL, 16); +- *devid = (uint8_t)strtoul(splitaddr.devid, NULL, 16); +- *function = (uint8_t)strtoul(splitaddr.function, NULL, 10); +- if (errno != 0) +- goto error; +- +- free(buf_copy); /* free the copy made with strdup */ +- return 0; +-error: +- free(buf_copy); +- return -1; +-} +- +-int +-ccp_pci_parse_sysfs_value(const char *filename, unsigned long *val) +-{ +- FILE *f; +- char buf[BUFSIZ]; +- char *end = NULL; +- +- f = fopen(filename, "r"); +- if (f == NULL) +- return -1; +- if (fgets(buf, sizeof(buf), f) == NULL) { +- fclose(f); +- return -1; +- } +- *val = strtoul(buf, &end, 0); +- if ((buf[0] == '\0') || (end == NULL) || (*end != '\n')) { +- fclose(f); +- return -1; +- } +- fclose(f); +- return 0; +-} +- +-/** IO resource type: */ +-#define IORESOURCE_IO 0x00000100 +-#define IORESOURCE_MEM 0x00000200 +- +-/* parse one line of the "resource" sysfs file (note that the 'line' +- * string is modified) +- */ +-static int +-ccp_pci_parse_one_sysfs_resource(char *line, size_t len, uint64_t *phys_addr, +- uint64_t *end_addr, uint64_t *flags) +-{ +- union pci_resource_info { +- struct { +- char *phys_addr; +- char *end_addr; +- char *flags; +- }; +- char *ptrs[PCI_RESOURCE_FMT_NVAL]; +- } res_info; +- +- if (rte_strsplit(line, len, res_info.ptrs, 3, ' ') != 3) +- return -1; +- errno = 0; +- *phys_addr = strtoull(res_info.phys_addr, NULL, 16); +- *end_addr = strtoull(res_info.end_addr, NULL, 16); +- *flags = strtoull(res_info.flags, NULL, 16); +- if (errno != 0) +- return -1; +- +- return 0; +-} +- +-/* parse the "resource" sysfs file */ +-int +-ccp_pci_parse_sysfs_resource(const char *filename, struct rte_pci_device *dev) +-{ +- FILE *fp; +- char buf[BUFSIZ]; +- int i; +- uint64_t phys_addr, end_addr, flags; +- +- fp = fopen(filename, "r"); +- if (fp == NULL) +- return -1; +- +- for (i = 0; i < PCI_MAX_RESOURCE; i++) { +- if (fgets(buf, sizeof(buf), fp) == NULL) +- goto error; +- if (ccp_pci_parse_one_sysfs_resource(buf, sizeof(buf), +- &phys_addr, &end_addr, &flags) < 0) +- goto error; +- +- if (flags & IORESOURCE_MEM) { +- dev->mem_resource[i].phys_addr = phys_addr; +- dev->mem_resource[i].len = end_addr - phys_addr + 1; +- /* not mapped for now */ +- dev->mem_resource[i].addr = NULL; +- } +- } +- fclose(fp); +- return 0; +- +-error: +- fclose(fp); +- return -1; +-} +- +-int +-ccp_find_uio_devname(const char *dirname) +-{ +- +- DIR *dir; +- struct dirent *e; +- char dirname_uio[PATH_MAX]; +- unsigned int uio_num; +- int ret = -1; +- +- /* depending on kernel version, uio can be located in uio/uioX +- * or uio:uioX +- */ +- snprintf(dirname_uio, sizeof(dirname_uio), "%s/uio", dirname); +- dir = opendir(dirname_uio); +- if (dir == NULL) { +- /* retry with the parent directory might be different kernel version*/ +- dir = opendir(dirname); +- if (dir == NULL) +- return -1; +- } +- +- /* take the first file starting with "uio" */ +- while ((e = readdir(dir)) != NULL) { +- /* format could be uio%d ...*/ +- int shortprefix_len = sizeof("uio") - 1; +- /* ... or uio:uio%d */ +- int longprefix_len = sizeof("uio:uio") - 1; +- char *endptr; +- +- if (strncmp(e->d_name, "uio", 3) != 0) +- continue; +- +- /* first try uio%d */ +- errno = 0; +- uio_num = strtoull(e->d_name + shortprefix_len, &endptr, 10); +- if (errno == 0 && endptr != (e->d_name + shortprefix_len)) { +- ret = uio_num; +- break; +- } +- +- /* then try uio:uio%d */ +- errno = 0; +- uio_num = strtoull(e->d_name + longprefix_len, &endptr, 10); +- if (errno == 0 && endptr != (e->d_name + longprefix_len)) { +- ret = uio_num; +- break; +- } +- } +- closedir(dir); +- return ret; +- +- +-} +diff --git a/dpdk/drivers/crypto/ccp/ccp_pci.h b/dpdk/drivers/crypto/ccp/ccp_pci.h +deleted file mode 100644 +index 6736bf8ad3..0000000000 +--- a/dpdk/drivers/crypto/ccp/ccp_pci.h ++++ /dev/null +@@ -1,27 +0,0 @@ +-/* SPDX-License-Identifier: BSD-3-Clause +- * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved. +- */ +- +-#ifndef _CCP_PCI_H_ +-#define _CCP_PCI_H_ +- +-#include +- +-#include +- +-#define SYSFS_PCI_DEVICES "/sys/bus/pci/devices" +-#define PROC_MODULES "/proc/modules" +- +-int ccp_check_pci_uio_module(void); +- +-int ccp_parse_pci_addr_format(const char *buf, int bufsize, uint16_t *domain, +- uint8_t *bus, uint8_t *devid, uint8_t *function); +- +-int ccp_pci_parse_sysfs_value(const char *filename, unsigned long *val); +- +-int ccp_pci_parse_sysfs_resource(const char *filename, +- struct rte_pci_device *dev); +- +-int ccp_find_uio_devname(const char *dirname); +- +-#endif /* _CCP_PCI_H_ */ +diff --git a/dpdk/drivers/crypto/ccp/meson.build b/dpdk/drivers/crypto/ccp/meson.build +index a4f3406009..a9abaa4da0 100644 +--- a/dpdk/drivers/crypto/ccp/meson.build ++++ b/dpdk/drivers/crypto/ccp/meson.build +@@ -18,7 +18,6 @@ sources = files( + 'rte_ccp_pmd.c', + 'ccp_crypto.c', + 'ccp_dev.c', +- 'ccp_pci.c', + 'ccp_pmd_ops.c', + ) + +diff --git a/dpdk/drivers/crypto/ccp/rte_ccp_pmd.c b/dpdk/drivers/crypto/ccp/rte_ccp_pmd.c +index 221a0a5235..a5271d7227 100644 +--- a/dpdk/drivers/crypto/ccp/rte_ccp_pmd.c ++++ b/dpdk/drivers/crypto/ccp/rte_ccp_pmd.c +@@ -22,7 +22,6 @@ + static unsigned int ccp_pmd_init_done; + uint8_t ccp_cryptodev_driver_id; + uint8_t cryptodev_cnt; +-extern void *sha_ctx; + + struct ccp_pmd_init_params { + struct rte_cryptodev_pmd_init_params def_p; +@@ -168,15 +167,9 @@ ccp_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops, + * The set of PCI devices this driver supports + */ + static struct rte_pci_id ccp_pci_id[] = { +- { +- RTE_PCI_DEVICE(0x1022, 0x1456), /* AMD CCP-5a */ +- }, +- { +- RTE_PCI_DEVICE(0x1022, 0x1468), /* AMD CCP-5b */ +- }, +- { +- RTE_PCI_DEVICE(0x1022, 0x15df), /* AMD CCP RV */ +- }, ++ { RTE_PCI_DEVICE(AMD_PCI_VENDOR_ID, AMD_PCI_CCP_5A), }, ++ { RTE_PCI_DEVICE(AMD_PCI_VENDOR_ID, AMD_PCI_CCP_5B), }, ++ { RTE_PCI_DEVICE(AMD_PCI_VENDOR_ID, AMD_PCI_CCP_RV), }, + {.device_id = 0}, + }; + +@@ -200,7 +193,6 @@ cryptodev_ccp_remove(struct rte_pci_device *pci_dev) + return -ENODEV; + + ccp_pmd_init_done = 0; +- rte_free(sha_ctx); + + RTE_LOG(INFO, PMD, "Closing ccp device %s on numa socket %u\n", + name, rte_socket_id()); +@@ -230,14 +222,13 @@ cryptodev_ccp_create(const char *name, + goto init_error; + } + +- cryptodev_cnt = ccp_probe_devices(pci_dev, ccp_pci_id); +- +- if (cryptodev_cnt == 0) { ++ if (ccp_probe_device(pci_dev) != 0) { + CCP_LOG_ERR("failed to detect CCP crypto device"); + goto init_error; + } ++ cryptodev_cnt++; + +- printf("CCP : Crypto device count = %d\n", cryptodev_cnt); ++ CCP_LOG_DBG("CCP : Crypto device count = %d\n", cryptodev_cnt); + dev->device = &pci_dev->device; + dev->device->driver = &pci_drv->driver; + dev->driver_id = ccp_cryptodev_driver_id; +@@ -287,7 +278,6 @@ cryptodev_ccp_probe(struct rte_pci_driver *pci_drv __rte_unused, + .auth_opt = CCP_PMD_AUTH_OPT_CCP, + }; + +- sha_ctx = (void *)rte_malloc(NULL, SHA512_DIGEST_SIZE, 64); + if (ccp_pmd_init_done) { + RTE_LOG(INFO, PMD, "CCP PMD already initialized\n"); + return -EFAULT; +diff --git a/dpdk/drivers/crypto/cnxk/cnxk_cryptodev_capabilities.c b/dpdk/drivers/crypto/cnxk/cnxk_cryptodev_capabilities.c +index 6c28f8942e..eb4e6ff966 100644 +--- a/dpdk/drivers/crypto/cnxk/cnxk_cryptodev_capabilities.c ++++ b/dpdk/drivers/crypto/cnxk/cnxk_cryptodev_capabilities.c +@@ -936,8 +936,8 @@ static const struct rte_cryptodev_capabilities sec_caps_aes[] = { + .increment = 4 + }, + .iv_size = { +- .min = 12, +- .max = 12, ++ .min = 11, ++ .max = 11, + .increment = 0 + } + }, } +diff --git a/dpdk/drivers/crypto/cnxk/cnxk_se.h b/dpdk/drivers/crypto/cnxk/cnxk_se.h +index b07fc22858..32e2b2cd64 100644 +--- a/dpdk/drivers/crypto/cnxk/cnxk_se.h ++++ b/dpdk/drivers/crypto/cnxk/cnxk_se.h +@@ -244,7 +244,7 @@ fill_sg_comp_from_iov(struct roc_se_sglist_comp *list, uint32_t i, + uint32_t extra_len = extra_buf ? extra_buf->size : 0; + uint32_t size = *psize; + +- for (j = 0; (j < from->buf_cnt) && size; j++) { ++ for (j = 0; j < from->buf_cnt; j++) { + struct roc_se_sglist_comp *to = &list[i >> 2]; + uint32_t buf_sz = from->bufs[j].size; + void *vaddr = from->bufs[j].vaddr; +@@ -311,6 +311,9 @@ fill_sg_comp_from_iov(struct roc_se_sglist_comp *list, uint32_t i, + if (extra_offset) + extra_offset -= size; + i++; ++ ++ if (unlikely(!size)) ++ break; + } + + *psize = size; +@@ -367,7 +370,9 @@ fill_sg2_comp_from_iov(struct roc_se_sg2list_comp *list, uint32_t i, struct roc_ + uint32_t extra_len = extra_buf ? extra_buf->size : 0; + uint32_t size = *psize; + +- for (j = 0; (j < from->buf_cnt) && size; j++) { ++ rte_prefetch2(psize); ++ ++ for (j = 0; j < from->buf_cnt; j++) { + struct roc_se_sg2list_comp *to = &list[i / 3]; + uint32_t buf_sz = from->bufs[j].size; + void *vaddr = from->bufs[j].vaddr; +@@ -433,6 +438,9 @@ fill_sg2_comp_from_iov(struct roc_se_sg2list_comp *list, uint32_t i, struct roc_ + if (extra_offset) + extra_offset -= size; + i++; ++ ++ if (unlikely(!size)) ++ break; + } + + *psize = size; +@@ -884,20 +892,10 @@ cpt_digest_gen_sg_ver1_prep(uint32_t flags, uint64_t d_lens, struct roc_se_fc_pa + + /* input data */ + size = data_len; +- if (size) { +- i = fill_sg_comp_from_iov(gather_comp, i, params->src_iov, 0, +- &size, NULL, 0); +- if (unlikely(size)) { +- plt_dp_err("Insufficient dst IOV size, short by %dB", +- size); +- return -1; +- } +- } else { +- /* +- * Looks like we need to support zero data +- * gather ptr in case of hash & hmac +- */ +- i++; ++ i = fill_sg_comp_from_iov(gather_comp, i, params->src_iov, 0, &size, NULL, 0); ++ if (unlikely(size)) { ++ plt_dp_err("Insufficient dst IOV size, short by %dB", size); ++ return -1; + } + ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i); + g_size_bytes = ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp); +@@ -1008,18 +1006,10 @@ cpt_digest_gen_sg_ver2_prep(uint32_t flags, uint64_t d_lens, struct roc_se_fc_pa + + /* input data */ + size = data_len; +- if (size) { +- i = fill_sg2_comp_from_iov(gather_comp, i, params->src_iov, 0, &size, NULL, 0); +- if (unlikely(size)) { +- plt_dp_err("Insufficient dst IOV size, short by %dB", size); +- return -1; +- } +- } else { +- /* +- * Looks like we need to support zero data +- * gather ptr in case of hash & hmac +- */ +- i++; ++ i = fill_sg2_comp_from_iov(gather_comp, i, params->src_iov, 0, &size, NULL, 0); ++ if (unlikely(size)) { ++ plt_dp_err("Insufficient dst IOV size, short by %dB", size); ++ return -1; + } + cpt_inst_w5.s.gather_sz = ((i + 2) / 3); + +diff --git a/dpdk/drivers/crypto/ipsec_mb/ipsec_mb_ops.c b/dpdk/drivers/crypto/ipsec_mb/ipsec_mb_ops.c +index 71e02cd051..30f919cd40 100644 +--- a/dpdk/drivers/crypto/ipsec_mb/ipsec_mb_ops.c ++++ b/dpdk/drivers/crypto/ipsec_mb/ipsec_mb_ops.c +@@ -139,15 +139,12 @@ int + ipsec_mb_qp_release(struct rte_cryptodev *dev, uint16_t qp_id) + { + struct ipsec_mb_qp *qp = dev->data->queue_pairs[qp_id]; +- struct rte_ring *r = NULL; + + if (!qp) + return 0; + + if (rte_eal_process_type() == RTE_PROC_PRIMARY) { +- r = rte_ring_lookup(qp->name); +- if (r) +- rte_ring_free(r); ++ rte_ring_free(rte_ring_lookup(qp->name)); + + #if IMB_VERSION(1, 1, 0) > IMB_VERSION_NUM + if (qp->mb_mgr) +@@ -437,15 +434,22 @@ ipsec_mb_sym_session_configure( + struct ipsec_mb_dev_private *internals = dev->data->dev_private; + struct ipsec_mb_internals *pmd_data = + &ipsec_mb_pmds[internals->pmd_type]; +- IMB_MGR *mb_mgr = alloc_init_mb_mgr(); ++ struct ipsec_mb_qp *qp = dev->data->queue_pairs[0]; ++ IMB_MGR *mb_mgr; + int ret = 0; + ++ if (qp != NULL) ++ mb_mgr = qp->mb_mgr; ++ else ++ mb_mgr = alloc_init_mb_mgr(); ++ + if (!mb_mgr) + return -ENOMEM; + + if (unlikely(sess == NULL)) { + IPSEC_MB_LOG(ERR, "invalid session struct"); +- free_mb_mgr(mb_mgr); ++ if (qp == NULL) ++ free_mb_mgr(mb_mgr); + return -EINVAL; + } + +@@ -455,11 +459,13 @@ ipsec_mb_sym_session_configure( + IPSEC_MB_LOG(ERR, "failed configure session parameters"); + + /* Return session to mempool */ +- free_mb_mgr(mb_mgr); ++ if (qp == NULL) ++ free_mb_mgr(mb_mgr); + return ret; + } + +- free_mb_mgr(mb_mgr); ++ if (qp == NULL) ++ free_mb_mgr(mb_mgr); + return 0; + } + +diff --git a/dpdk/drivers/crypto/ipsec_mb/ipsec_mb_private.c b/dpdk/drivers/crypto/ipsec_mb/ipsec_mb_private.c +index 50b789a29b..64f2b4b604 100644 +--- a/dpdk/drivers/crypto/ipsec_mb/ipsec_mb_private.c ++++ b/dpdk/drivers/crypto/ipsec_mb/ipsec_mb_private.c +@@ -170,7 +170,7 @@ ipsec_mb_create(struct rte_vdev_device *vdev, + + if (rte_eal_process_type() == RTE_PROC_PRIMARY) { + retval = ipsec_mb_mp_request_register(); +- if (retval && (rte_errno == EEXIST)) ++ if (retval && ((rte_errno == EEXIST) || (rte_errno == ENOTSUP))) + /* Safe to proceed, return 0 */ + return 0; + +diff --git a/dpdk/drivers/crypto/ipsec_mb/pmd_aesni_mb.c b/dpdk/drivers/crypto/ipsec_mb/pmd_aesni_mb.c +index 147a38932d..ac20d01937 100644 +--- a/dpdk/drivers/crypto/ipsec_mb/pmd_aesni_mb.c ++++ b/dpdk/drivers/crypto/ipsec_mb/pmd_aesni_mb.c +@@ -199,7 +199,7 @@ aesni_mb_set_session_auth_parameters(const IMB_MGR *mb_mgr, + } + } else if (xform->auth.key.length == 32) { + sess->auth.algo = IMB_AUTH_ZUC256_EIA3_BITLEN; +-#if IMB_VERSION(1, 2, 0) > IMB_VERSION_NUM ++#if IMB_VERSION(1, 2, 0) < IMB_VERSION_NUM + if (sess->auth.req_digest_len != 4 && + sess->auth.req_digest_len != 8 && + sess->auth.req_digest_len != 16) { +diff --git a/dpdk/drivers/crypto/ipsec_mb/pmd_aesni_mb_priv.h b/dpdk/drivers/crypto/ipsec_mb/pmd_aesni_mb_priv.h +index 55fafbbbec..8a7c74f621 100644 +--- a/dpdk/drivers/crypto/ipsec_mb/pmd_aesni_mb_priv.h ++++ b/dpdk/drivers/crypto/ipsec_mb/pmd_aesni_mb_priv.h +@@ -566,7 +566,7 @@ static const struct rte_cryptodev_capabilities aesni_mb_capabilities[] = { + }, + .digest_size = { + .min = 4, +-#if IMB_VERSION(1, 2, 0) > IMB_VERSION_NUM ++#if IMB_VERSION(1, 2, 0) < IMB_VERSION_NUM + .max = 16, + .increment = 4 + #else +diff --git a/dpdk/drivers/crypto/ipsec_mb/pmd_snow3g.c b/dpdk/drivers/crypto/ipsec_mb/pmd_snow3g.c +index 8ed069f428..e64df1a462 100644 +--- a/dpdk/drivers/crypto/ipsec_mb/pmd_snow3g.c ++++ b/dpdk/drivers/crypto/ipsec_mb/pmd_snow3g.c +@@ -372,9 +372,10 @@ process_ops(struct rte_crypto_op **ops, struct snow3g_session *session, + /** Process a crypto op with length/offset in bits. */ + static int + process_op_bit(struct rte_crypto_op *op, struct snow3g_session *session, +- struct ipsec_mb_qp *qp, uint16_t *accumulated_enqueued_ops) ++ struct ipsec_mb_qp *qp) + { +- uint32_t enqueued_op, processed_op; ++ unsigned int processed_op; ++ int ret; + + switch (session->op) { + case IPSEC_MB_OP_ENCRYPT_ONLY: +@@ -421,9 +422,10 @@ process_op_bit(struct rte_crypto_op *op, struct snow3g_session *session, + + if (unlikely(processed_op != 1)) + return 0; +- enqueued_op = rte_ring_enqueue(qp->ingress_queue, op); +- qp->stats.enqueued_count += enqueued_op; +- *accumulated_enqueued_ops += enqueued_op; ++ ++ ret = rte_ring_enqueue(qp->ingress_queue, op); ++ if (ret != 0) ++ return ret; + + return 1; + } +@@ -439,7 +441,6 @@ snow3g_pmd_dequeue_burst(void *queue_pair, + struct snow3g_session *prev_sess = NULL, *curr_sess = NULL; + uint32_t i; + uint8_t burst_size = 0; +- uint16_t enqueued_ops = 0; + uint8_t processed_ops; + uint32_t nb_dequeued; + +@@ -479,8 +480,7 @@ snow3g_pmd_dequeue_burst(void *queue_pair, + prev_sess = NULL; + } + +- processed_ops = process_op_bit(curr_c_op, curr_sess, +- qp, &enqueued_ops); ++ processed_ops = process_op_bit(curr_c_op, curr_sess, qp); + if (processed_ops != 1) + break; + +diff --git a/dpdk/drivers/crypto/openssl/rte_openssl_pmd.c b/dpdk/drivers/crypto/openssl/rte_openssl_pmd.c +index 05449b6e98..6825b0469e 100644 +--- a/dpdk/drivers/crypto/openssl/rte_openssl_pmd.c ++++ b/dpdk/drivers/crypto/openssl/rte_openssl_pmd.c +@@ -696,7 +696,7 @@ openssl_set_session_auth_parameters(struct openssl_session *sess, + algo = digest_name_get(xform->auth.algo); + if (!algo) + return -EINVAL; +- rte_memcpy(algo_name, algo, (sizeof(algo)+1)); ++ strlcpy(algo_name, algo, sizeof(algo_name)); + + mac = EVP_MAC_fetch(NULL, "HMAC", NULL); + sess->auth.hmac.ctx = EVP_MAC_CTX_new(mac); +@@ -1195,8 +1195,11 @@ process_openssl_auth_encryption_gcm(struct rte_mbuf *mbuf_src, int offset, + int srclen, uint8_t *aad, int aadlen, uint8_t *iv, + uint8_t *dst, uint8_t *tag, EVP_CIPHER_CTX *ctx) + { +- int len = 0, unused = 0; ++ int len = 0; ++#if OPENSSL_VERSION_NUMBER < 0x10100000L ++ int unused = 0; + uint8_t empty[] = {}; ++#endif + + if (EVP_EncryptInit_ex(ctx, NULL, NULL, NULL, iv) <= 0) + goto process_auth_encryption_gcm_err; +@@ -1210,9 +1213,11 @@ process_openssl_auth_encryption_gcm(struct rte_mbuf *mbuf_src, int offset, + srclen, ctx, 0)) + goto process_auth_encryption_gcm_err; + ++#if OPENSSL_VERSION_NUMBER < 0x10100000L + /* Workaround open ssl bug in version less then 1.0.1f */ + if (EVP_EncryptUpdate(ctx, empty, &unused, empty, 0) <= 0) + goto process_auth_encryption_gcm_err; ++#endif + + if (EVP_EncryptFinal_ex(ctx, dst, &len) <= 0) + goto process_auth_encryption_gcm_err; +@@ -1274,8 +1279,11 @@ process_openssl_auth_decryption_gcm(struct rte_mbuf *mbuf_src, int offset, + int srclen, uint8_t *aad, int aadlen, uint8_t *iv, + uint8_t *dst, uint8_t *tag, EVP_CIPHER_CTX *ctx) + { +- int len = 0, unused = 0; ++ int len = 0; ++#if OPENSSL_VERSION_NUMBER < 0x10100000L ++ int unused = 0; + uint8_t empty[] = {}; ++#endif + + if (EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_GCM_SET_TAG, 16, tag) <= 0) + goto process_auth_decryption_gcm_err; +@@ -1292,9 +1300,11 @@ process_openssl_auth_decryption_gcm(struct rte_mbuf *mbuf_src, int offset, + srclen, ctx, 0)) + goto process_auth_decryption_gcm_err; + ++#if OPENSSL_VERSION_NUMBER < 0x10100000L + /* Workaround open ssl bug in version less then 1.0.1f */ + if (EVP_DecryptUpdate(ctx, empty, &unused, empty, 0) <= 0) + goto process_auth_decryption_gcm_err; ++#endif + + if (EVP_DecryptFinal_ex(ctx, dst, &len) <= 0) + return -EFAULT; +@@ -1797,7 +1807,6 @@ process_openssl_auth_op(struct openssl_qp *qp, struct rte_crypto_op *op, + # if OPENSSL_VERSION_NUMBER >= 0x30000000L + EVP_MAC_CTX *ctx_h; + EVP_MAC_CTX *ctx_c; +- EVP_MAC *mac; + # else + HMAC_CTX *ctx_h; + CMAC_CTX *ctx_c; +@@ -1818,10 +1827,7 @@ process_openssl_auth_op(struct openssl_qp *qp, struct rte_crypto_op *op, + break; + case OPENSSL_AUTH_AS_HMAC: + # if OPENSSL_VERSION_NUMBER >= 0x30000000L +- mac = EVP_MAC_fetch(NULL, "HMAC", NULL); +- ctx_h = EVP_MAC_CTX_new(mac); + ctx_h = EVP_MAC_CTX_dup(sess->auth.hmac.ctx); +- EVP_MAC_free(mac); + status = process_openssl_auth_mac(mbuf_src, dst, + op->sym->auth.data.offset, srclen, + ctx_h); +@@ -1836,10 +1842,7 @@ process_openssl_auth_op(struct openssl_qp *qp, struct rte_crypto_op *op, + break; + case OPENSSL_AUTH_AS_CMAC: + # if OPENSSL_VERSION_NUMBER >= 0x30000000L +- mac = EVP_MAC_fetch(NULL, OSSL_MAC_NAME_CMAC, NULL); +- ctx_c = EVP_MAC_CTX_new(mac); + ctx_c = EVP_MAC_CTX_dup(sess->auth.cmac.ctx); +- EVP_MAC_free(mac); + status = process_openssl_auth_mac(mbuf_src, dst, + op->sym->auth.data.offset, srclen, + ctx_c); +@@ -1927,7 +1930,7 @@ process_openssl_dsa_sign_op_evp(struct rte_crypto_op *cop, + + if (EVP_PKEY_sign(dsa_ctx, dsa_sign_data, &outlen, op->message.data, + op->message.length) <= 0) { +- free(dsa_sign_data); ++ OPENSSL_free(dsa_sign_data); + goto err_dsa_sign; + } + +@@ -1935,7 +1938,7 @@ process_openssl_dsa_sign_op_evp(struct rte_crypto_op *cop, + DSA_SIG *sign = d2i_DSA_SIG(NULL, &dsa_sign_data_p, outlen); + if (!sign) { + OPENSSL_LOG(ERR, "%s:%d\n", __func__, __LINE__); +- free(dsa_sign_data); ++ OPENSSL_free(dsa_sign_data); + goto err_dsa_sign; + } else { + const BIGNUM *r = NULL, *s = NULL; +@@ -1947,7 +1950,7 @@ process_openssl_dsa_sign_op_evp(struct rte_crypto_op *cop, + } + + DSA_SIG_free(sign); +- free(dsa_sign_data); ++ OPENSSL_free(dsa_sign_data); + return 0; + + err_dsa_sign: +@@ -2633,7 +2636,7 @@ process_openssl_rsa_op_evp(struct rte_crypto_op *cop, + if (EVP_PKEY_verify_recover(rsa_ctx, tmp, &outlen, + op->rsa.sign.data, + op->rsa.sign.length) <= 0) { +- rte_free(tmp); ++ OPENSSL_free(tmp); + goto err_rsa; + } + +@@ -2645,7 +2648,7 @@ process_openssl_rsa_op_evp(struct rte_crypto_op *cop, + op->rsa.message.length)) { + OPENSSL_LOG(ERR, "RSA sign Verification failed"); + } +- rte_free(tmp); ++ OPENSSL_free(tmp); + break; + + default: +diff --git a/dpdk/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c b/dpdk/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c +index 7f00f6097d..1f6f63c831 100644 +--- a/dpdk/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c ++++ b/dpdk/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c +@@ -140,6 +140,9 @@ static struct rte_cryptodev_capabilities qat_sym_crypto_caps_gen3[] = { + QAT_SYM_CIPHER_CAP(SM4_CTR, + CAP_SET(block_size, 16), + CAP_RNG(key_size, 16, 16, 0), CAP_RNG(iv_size, 16, 16, 0)), ++ QAT_SYM_PLAIN_AUTH_CAP(SM3, ++ CAP_SET(block_size, 64), ++ CAP_RNG(digest_size, 32, 32, 0)), + RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST() + }; + +@@ -404,7 +407,7 @@ qat_sym_build_op_auth_gen3(void *in_op, struct qat_sym_session *ctx, + rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req)); + + ofs.raw = qat_sym_convert_op_to_vec_auth(op, ctx, &in_sgl, &out_sgl, +- NULL, &auth_iv, &digest); ++ NULL, &auth_iv, &digest, op_cookie); + if (unlikely(ofs.raw == UINT64_MAX)) { + op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; + return -EINVAL; +diff --git a/dpdk/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c b/dpdk/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c +index b1e5fa9a82..b219a418ba 100644 +--- a/dpdk/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c ++++ b/dpdk/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c +@@ -100,6 +100,9 @@ static struct rte_cryptodev_capabilities qat_sym_crypto_caps_gen4[] = { + QAT_SYM_CIPHER_CAP(SM4_CTR, + CAP_SET(block_size, 16), + CAP_RNG(key_size, 16, 16, 0), CAP_RNG(iv_size, 16, 16, 0)), ++ QAT_SYM_PLAIN_AUTH_CAP(SM3, ++ CAP_SET(block_size, 64), ++ CAP_RNG(digest_size, 32, 32, 0)), + RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST() + }; + +diff --git a/dpdk/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h b/dpdk/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h +index 524c291340..7972c7cfeb 100644 +--- a/dpdk/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h ++++ b/dpdk/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h +@@ -290,7 +290,8 @@ qat_sym_convert_op_to_vec_auth(struct rte_crypto_op *op, + struct rte_crypto_sgl *in_sgl, struct rte_crypto_sgl *out_sgl, + struct rte_crypto_va_iova_ptr *cipher_iv __rte_unused, + struct rte_crypto_va_iova_ptr *auth_iv, +- struct rte_crypto_va_iova_ptr *digest) ++ struct rte_crypto_va_iova_ptr *digest, ++ struct qat_sym_op_cookie *cookie) + { + uint32_t auth_ofs = 0, auth_len = 0; + int n_src, ret; +@@ -355,7 +356,11 @@ qat_sym_convert_op_to_vec_auth(struct rte_crypto_op *op, + out_sgl->num = 0; + + digest->va = (void *)op->sym->auth.digest.data; +- digest->iova = op->sym->auth.digest.phys_addr; ++ ++ if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) ++ digest->iova = cookie->digest_null_phys_addr; ++ else ++ digest->iova = op->sym->auth.digest.phys_addr; + + return 0; + } +@@ -366,7 +371,8 @@ qat_sym_convert_op_to_vec_chain(struct rte_crypto_op *op, + struct rte_crypto_sgl *in_sgl, struct rte_crypto_sgl *out_sgl, + struct rte_crypto_va_iova_ptr *cipher_iv, + struct rte_crypto_va_iova_ptr *auth_iv_or_aad, +- struct rte_crypto_va_iova_ptr *digest) ++ struct rte_crypto_va_iova_ptr *digest, ++ struct qat_sym_op_cookie *cookie) + { + union rte_crypto_sym_ofs ofs; + uint32_t max_len = 0; +@@ -390,7 +396,11 @@ qat_sym_convert_op_to_vec_chain(struct rte_crypto_op *op, + auth_iv_or_aad->iova = rte_crypto_op_ctophys_offset(op, + ctx->auth_iv.offset); + digest->va = (void *)op->sym->auth.digest.data; +- digest->iova = op->sym->auth.digest.phys_addr; ++ ++ if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) ++ digest->iova = cookie->digest_null_phys_addr; ++ else ++ digest->iova = op->sym->auth.digest.phys_addr; + + ret = qat_cipher_is_len_in_bits(ctx, op); + switch (ret) { +@@ -682,7 +692,8 @@ enqueue_one_chain_job_gen1(struct qat_sym_session *ctx, + while (remaining_off >= cvec->len && i >= 1) { + i--; + remaining_off -= cvec->len; +- cvec++; ++ if (i) ++ cvec++; + } + + auth_iova_end = cvec->iova + remaining_off; +diff --git a/dpdk/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c b/dpdk/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c +index 91d5cfa71d..2709b0ab04 100644 +--- a/dpdk/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c ++++ b/dpdk/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c +@@ -274,7 +274,7 @@ qat_sym_build_op_auth_gen1(void *in_op, struct qat_sym_session *ctx, + rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req)); + + ofs.raw = qat_sym_convert_op_to_vec_auth(op, ctx, &in_sgl, &out_sgl, +- NULL, &auth_iv, &digest); ++ NULL, &auth_iv, &digest, op_cookie); + if (unlikely(ofs.raw == UINT64_MAX)) { + op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; + return -EINVAL; +@@ -368,7 +368,7 @@ qat_sym_build_op_chain_gen1(void *in_op, struct qat_sym_session *ctx, + rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req)); + + ofs.raw = qat_sym_convert_op_to_vec_chain(op, ctx, &in_sgl, &out_sgl, +- &cipher_iv, &auth_iv, &digest); ++ &cipher_iv, &auth_iv, &digest, cookie); + if (unlikely(ofs.raw == UINT64_MAX)) { + op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; + return -EINVAL; +diff --git a/dpdk/drivers/crypto/qat/qat_sym.c b/dpdk/drivers/crypto/qat/qat_sym.c +index 08e92191a3..18f99089e8 100644 +--- a/dpdk/drivers/crypto/qat/qat_sym.c ++++ b/dpdk/drivers/crypto/qat/qat_sym.c +@@ -51,6 +51,11 @@ qat_sym_init_op_cookie(void *op_cookie) + rte_mempool_virt2iova(cookie) + + offsetof(struct qat_sym_op_cookie, + opt.spc_gmac.cd_cipher); ++ ++ cookie->digest_null_phys_addr = ++ rte_mempool_virt2iova(cookie) + ++ offsetof(struct qat_sym_op_cookie, ++ digest_null); + } + + static __rte_always_inline int +@@ -179,7 +184,7 @@ qat_sym_dequeue_burst(void *qp, struct rte_crypto_op **ops, + + int + qat_sym_dev_create(struct qat_pci_device *qat_pci_dev, +- struct qat_dev_cmd_param *qat_dev_cmd_param __rte_unused) ++ struct qat_dev_cmd_param *qat_dev_cmd_param) + { + int i = 0, ret = 0; + uint16_t slice_map = 0; +diff --git a/dpdk/drivers/crypto/qat/qat_sym.h b/dpdk/drivers/crypto/qat/qat_sym.h +index 9a4251e08b..a45bddf848 100644 +--- a/dpdk/drivers/crypto/qat/qat_sym.h ++++ b/dpdk/drivers/crypto/qat/qat_sym.h +@@ -116,6 +116,8 @@ struct qat_sym_op_cookie { + phys_addr_t cd_phys_addr; + } spc_gmac; + } opt; ++ uint8_t digest_null[4]; ++ phys_addr_t digest_null_phys_addr; + }; + + struct qat_sym_dp_ctx { +diff --git a/dpdk/drivers/crypto/qat/qat_sym_session.c b/dpdk/drivers/crypto/qat/qat_sym_session.c +index 0ebc66f89e..52ed921db2 100644 +--- a/dpdk/drivers/crypto/qat/qat_sym_session.c ++++ b/dpdk/drivers/crypto/qat/qat_sym_session.c +@@ -10,6 +10,7 @@ + #include /* Needed for bpi runt block processing */ + + #ifdef RTE_QAT_LIBIPSECMB ++#define NO_COMPAT_IMB_API_053 + #if defined(RTE_ARCH_ARM) + #include + #else +@@ -696,7 +697,7 @@ qat_sym_session_configure_auth(struct rte_cryptodev *dev, + switch (auth_xform->algo) { + case RTE_CRYPTO_AUTH_SM3: + session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SM3; +- session->auth_mode = ICP_QAT_HW_AUTH_MODE2; ++ session->auth_mode = ICP_QAT_HW_AUTH_MODE0; + break; + case RTE_CRYPTO_AUTH_SHA1: + session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA1; +@@ -1331,6 +1332,8 @@ static int qat_sym_do_precomputes_ipsec_mb(enum icp_qat_hw_auth_algo hash_alg, + /* init ipad and opad from key and xor with fixed values */ + memset(ipad, 0, block_size); + memset(opad, 0, block_size); ++ RTE_VERIFY(auth_keylen <= sizeof(ipad)); ++ RTE_VERIFY(auth_keylen <= sizeof(opad)); + rte_memcpy(ipad, auth_key, auth_keylen); + rte_memcpy(opad, auth_key, auth_keylen); + +@@ -1849,9 +1852,10 @@ int qat_sym_cd_cipher_set(struct qat_sym_session *cdesc, + key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT; + } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 + || cdesc->qat_cipher_alg == +- ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) ++ ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) { + key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT; +- else if (cdesc->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT) ++ cdesc->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT; ++ } else if (cdesc->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT) + key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT; + else if (cdesc->qat_mode == ICP_QAT_HW_CIPHER_AEAD_MODE) + key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT; +diff --git a/dpdk/drivers/crypto/scheduler/scheduler_pmd.c b/dpdk/drivers/crypto/scheduler/scheduler_pmd.c +index 9d1ce46622..4e8bbf0e09 100644 +--- a/dpdk/drivers/crypto/scheduler/scheduler_pmd.c ++++ b/dpdk/drivers/crypto/scheduler/scheduler_pmd.c +@@ -50,7 +50,8 @@ static const char * const scheduler_valid_params[] = { + RTE_CRYPTODEV_VDEV_MAX_NB_QP_ARG, + RTE_CRYPTODEV_VDEV_SOCKET_ID, + RTE_CRYPTODEV_VDEV_COREMASK, +- RTE_CRYPTODEV_VDEV_CORELIST ++ RTE_CRYPTODEV_VDEV_CORELIST, ++ NULL + }; + + struct scheduler_parse_map { +diff --git a/dpdk/drivers/dma/dpaa2/dpaa2_qdma.c b/dpdk/drivers/dma/dpaa2/dpaa2_qdma.c +index d5a5f08ecc..8968bb853b 100644 +--- a/dpdk/drivers/dma/dpaa2/dpaa2_qdma.c ++++ b/dpdk/drivers/dma/dpaa2/dpaa2_qdma.c +@@ -117,6 +117,7 @@ dpaa2_qdma_populate_fle(struct qbman_fle *fle, + /* source */ + sdd->read_cmd.portid = rbp->sportid; + sdd->rbpcmd_simple.pfid = rbp->spfid; ++ sdd->rbpcmd_simple.vfa = rbp->vfa; + sdd->rbpcmd_simple.vfid = rbp->svfid; + + if (rbp->srbp) { +@@ -129,6 +130,7 @@ dpaa2_qdma_populate_fle(struct qbman_fle *fle, + /* destination */ + sdd->write_cmd.portid = rbp->dportid; + sdd->rbpcmd_simple.pfid = rbp->dpfid; ++ sdd->rbpcmd_simple.vfa = rbp->vfa; + sdd->rbpcmd_simple.vfid = rbp->dvfid; + + if (rbp->drbp) { +diff --git a/dpdk/drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h b/dpdk/drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h +index dc8acb4aec..5a8da46d12 100644 +--- a/dpdk/drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h ++++ b/dpdk/drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h +@@ -44,7 +44,9 @@ struct rte_dpaa2_qdma_rbp { + uint32_t svfid:6; + /* using route by port for source */ + uint32_t srbp:1; +- uint32_t rsv:4; ++ /* Virtual Function Active */ ++ uint32_t vfa:1; ++ uint32_t rsv:3; + }; + + /** Determines a QDMA job */ +diff --git a/dpdk/drivers/dma/ioat/ioat_dmadev.c b/dpdk/drivers/dma/ioat/ioat_dmadev.c +index 5906eb45aa..57c18c081d 100644 +--- a/dpdk/drivers/dma/ioat/ioat_dmadev.c ++++ b/dpdk/drivers/dma/ioat/ioat_dmadev.c +@@ -142,10 +142,20 @@ ioat_dev_start(struct rte_dma_dev *dev) + ioat->regs->chainaddr = ioat->ring_addr; + /* Inform hardware of where to write the status/completions. */ + ioat->regs->chancmp = ioat->status_addr; ++ /* Ensure channel control is set to abort on error, so we get status writeback. */ ++ ioat->regs->chanctrl = IOAT_CHANCTRL_ANY_ERR_ABORT_EN | ++ IOAT_CHANCTRL_ERR_COMPLETION_EN; + + /* Prime the status register to be set to the last element. */ + ioat->status = ioat->ring_addr + ((ioat->qcfg.nb_desc - 1) * DESC_SZ); + ++ /* reset all counters */ ++ ioat->next_read = 0; ++ ioat->next_write = 0; ++ ioat->last_write = 0; ++ ioat->offset = 0; ++ ioat->failure = 0; ++ + printf("IOAT.status: %s [0x%"PRIx64"]\n", + chansts_readable[ioat->status & IOAT_CHANSTS_STATUS], + ioat->status); +@@ -166,17 +176,28 @@ static int + ioat_dev_stop(struct rte_dma_dev *dev) + { + struct ioat_dmadev *ioat = dev->fp_obj->dev_private; ++ unsigned int chansts; + uint32_t retry = 0; + +- ioat->regs->chancmd = IOAT_CHANCMD_SUSPEND; ++ chansts = (unsigned int)(ioat->regs->chansts & IOAT_CHANSTS_STATUS); ++ if (chansts == IOAT_CHANSTS_ACTIVE || chansts == IOAT_CHANSTS_IDLE) ++ ioat->regs->chancmd = IOAT_CHANCMD_SUSPEND; ++ else ++ ioat->regs->chancmd = IOAT_CHANCMD_RESET; + + do { + rte_pause(); + retry++; +- } while ((ioat->regs->chansts & IOAT_CHANSTS_STATUS) != IOAT_CHANSTS_SUSPENDED +- && retry < 200); ++ chansts = (unsigned int)(ioat->regs->chansts & IOAT_CHANSTS_STATUS); ++ } while (chansts != IOAT_CHANSTS_SUSPENDED && ++ chansts != IOAT_CHANSTS_HALTED && retry < 200); + +- return ((ioat->regs->chansts & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_SUSPENDED) ? 0 : -1; ++ if (chansts == IOAT_CHANSTS_SUSPENDED || chansts == IOAT_CHANSTS_HALTED) ++ return 0; ++ ++ IOAT_PMD_WARN("Channel could not be suspended on stop. (chansts = %u [%s])", ++ chansts, chansts_readable[chansts]); ++ return -1; + } + + /* Get device information of a device. */ +@@ -664,8 +685,6 @@ ioat_dmadev_create(const char *name, struct rte_pci_device *dev) + return -EIO; + } + } +- ioat->regs->chanctrl = IOAT_CHANCTRL_ANY_ERR_ABORT_EN | +- IOAT_CHANCTRL_ERR_COMPLETION_EN; + + dmadev->fp_obj->dev_private = ioat; + +diff --git a/dpdk/drivers/dma/skeleton/skeleton_dmadev.c b/dpdk/drivers/dma/skeleton/skeleton_dmadev.c +index 9b6da655fd..daf35eccce 100644 +--- a/dpdk/drivers/dma/skeleton/skeleton_dmadev.c ++++ b/dpdk/drivers/dma/skeleton/skeleton_dmadev.c +@@ -515,9 +515,15 @@ skeldma_parse_lcore(const char *key __rte_unused, + const char *value, + void *opaque) + { +- int lcore_id = atoi(value); ++ int lcore_id; ++ ++ if (value == NULL || opaque == NULL) ++ return -EINVAL; ++ ++ lcore_id = atoi(value); + if (lcore_id >= 0 && lcore_id < RTE_MAX_LCORE) + *(int *)opaque = lcore_id; ++ + return 0; + } + +diff --git a/dpdk/drivers/event/cnxk/cn10k_worker.h b/dpdk/drivers/event/cnxk/cn10k_worker.h +index 75a2ff244a..a93d40ed40 100644 +--- a/dpdk/drivers/event/cnxk/cn10k_worker.h ++++ b/dpdk/drivers/event/cnxk/cn10k_worker.h +@@ -100,9 +100,6 @@ cn10k_wqe_to_mbuf(uint64_t wqe, const uint64_t __mbuf, uint8_t port_id, + (flags & NIX_RX_OFFLOAD_TSTAMP_F ? 8 : 0); + struct rte_mbuf *mbuf = (struct rte_mbuf *)__mbuf; + +- /* Mark mempool obj as "get" as it is alloc'ed by NIX */ +- RTE_MEMPOOL_CHECK_COOKIES(mbuf->pool, (void **)&mbuf, 1, 1); +- + cn10k_nix_cqe_to_mbuf((struct nix_cqe_hdr_s *)wqe, tag, + (struct rte_mbuf *)mbuf, lookup_mem, + mbuf_init | ((uint64_t)port_id) << 48, flags); +@@ -239,6 +236,10 @@ cn10k_sso_hws_post_process(struct cn10k_sso_hws *ws, uint64_t *u64, + + mbuf = u64[1] - sizeof(struct rte_mbuf); + rte_prefetch0((void *)mbuf); ++ ++ /* Mark mempool obj as "get" as it is alloc'ed by NIX */ ++ RTE_MEMPOOL_CHECK_COOKIES(((struct rte_mbuf *)mbuf)->pool, (void **)&mbuf, 1, 1); ++ + if (flags & NIX_RX_OFFLOAD_SECURITY_F) { + const uint64_t mbuf_init = + 0x100010000ULL | RTE_PKTMBUF_HEADROOM | +@@ -316,6 +317,9 @@ cn10k_sso_hws_get_work(struct cn10k_sso_hws *ws, struct rte_event *ev, + ws->gw_rdata = gw.u64[0]; + if (gw.u64[1]) + cn10k_sso_hws_post_process(ws, gw.u64, flags); ++ else ++ gw.u64[0] = (gw.u64[0] & (0x3ull << 32)) << 6 | ++ (gw.u64[0] & (0x3FFull << 36)) << 4 | (gw.u64[0] & 0xffffffff); + + ev->event = gw.u64[0]; + ev->u64 = gw.u64[1]; +diff --git a/dpdk/drivers/event/cnxk/cn9k_worker.h b/dpdk/drivers/event/cnxk/cn9k_worker.h +index 4c3932da47..0ccdb7baf3 100644 +--- a/dpdk/drivers/event/cnxk/cn9k_worker.h ++++ b/dpdk/drivers/event/cnxk/cn9k_worker.h +@@ -353,8 +353,10 @@ cn9k_sso_hws_get_work_empty(uint64_t base, struct rte_event *ev, + #endif + + if (gw.u64[1]) +- cn9k_sso_hws_post_process(gw.u64, mbuf, flags, lookup_mem, +- tstamp); ++ cn9k_sso_hws_post_process(gw.u64, mbuf, flags, lookup_mem, tstamp); ++ else ++ gw.u64[0] = (gw.u64[0] & (0x3ull << 32)) << 6 | ++ (gw.u64[0] & (0x3FFull << 36)) << 4 | (gw.u64[0] & 0xffffffff); + + ev->event = gw.u64[0]; + ev->u64 = gw.u64[1]; +@@ -730,6 +732,7 @@ cn9k_sso_hws_xmit_sec_one(const struct cn9k_eth_txq *txq, uint64_t base, + + rte_io_wmb(); + cn9k_sso_txq_fc_wait(txq); ++ cn9k_nix_sec_fc_wait_one(txq); + + /* Write CPT instruction to lmt line */ + vst1q_u64(lmt_addr, cmd01); +diff --git a/dpdk/drivers/event/cnxk/cnxk_eventdev.c b/dpdk/drivers/event/cnxk/cnxk_eventdev.c +index db62d32a81..93e46e1b9b 100644 +--- a/dpdk/drivers/event/cnxk/cnxk_eventdev.c ++++ b/dpdk/drivers/event/cnxk/cnxk_eventdev.c +@@ -319,9 +319,9 @@ int + cnxk_sso_timeout_ticks(struct rte_eventdev *event_dev, uint64_t ns, + uint64_t *tmo_ticks) + { +- RTE_SET_USED(event_dev); +- *tmo_ticks = NSEC2TICK(ns, rte_get_timer_hz()); ++ struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev); + ++ *tmo_ticks = dev->deq_tmo_ns ? ns / dev->deq_tmo_ns : 0; + return 0; + } + +@@ -613,9 +613,8 @@ cnxk_sso_fini(struct rte_eventdev *event_dev) + + cnxk_tim_fini(); + roc_sso_rsrc_fini(&dev->sso); +- roc_sso_dev_fini(&dev->sso); + +- return 0; ++ return roc_sso_dev_fini(&dev->sso); + } + + int +diff --git a/dpdk/drivers/event/cnxk/cnxk_eventdev_adptr.c b/dpdk/drivers/event/cnxk/cnxk_eventdev_adptr.c +index 5ec436382c..e78d215630 100644 +--- a/dpdk/drivers/event/cnxk/cnxk_eventdev_adptr.c ++++ b/dpdk/drivers/event/cnxk/cnxk_eventdev_adptr.c +@@ -635,6 +635,7 @@ cnxk_sso_tx_adapter_free(uint8_t id __rte_unused, + if (dev->tx_adptr_data_sz && dev->tx_adptr_active_mask == 0) { + dev->tx_adptr_data_sz = 0; + free(dev->tx_adptr_data); ++ dev->tx_adptr_data = NULL; + } + + return 0; +diff --git a/dpdk/drivers/event/cnxk/cnxk_tim_evdev.c b/dpdk/drivers/event/cnxk/cnxk_tim_evdev.c +index 5dd79cbd47..c155764b77 100644 +--- a/dpdk/drivers/event/cnxk/cnxk_tim_evdev.c ++++ b/dpdk/drivers/event/cnxk/cnxk_tim_evdev.c +@@ -381,6 +381,7 @@ cnxk_tim_caps_get(const struct rte_eventdev *evdev, uint64_t flags, + cnxk_sso_set_priv_mem_t priv_mem_fn) + { + struct cnxk_tim_evdev *dev = cnxk_tim_priv_get(); ++ struct cnxk_tim_ring *tim_ring; + + RTE_SET_USED(flags); + +@@ -403,6 +404,12 @@ cnxk_tim_caps_get(const struct rte_eventdev *evdev, uint64_t flags, + dev->event_dev = (struct rte_eventdev *)(uintptr_t)evdev; + *caps = RTE_EVENT_TIMER_ADAPTER_CAP_INTERNAL_PORT | + RTE_EVENT_TIMER_ADAPTER_CAP_PERIODIC; ++ ++ tim_ring = ((struct rte_event_timer_adapter_data ++ *)((char *)caps - offsetof(struct rte_event_timer_adapter_data, caps))) ++ ->adapter_priv; ++ if (tim_ring != NULL && rte_eal_process_type() == RTE_PROC_SECONDARY) ++ cnxk_tim_set_fp_ops(tim_ring); + *ops = &cnxk_tim_ops; + + return 0; +diff --git a/dpdk/drivers/event/cnxk/cnxk_tim_worker.h b/dpdk/drivers/event/cnxk/cnxk_tim_worker.h +index eda84c6f31..6be31f6f9d 100644 +--- a/dpdk/drivers/event/cnxk/cnxk_tim_worker.h ++++ b/dpdk/drivers/event/cnxk/cnxk_tim_worker.h +@@ -270,7 +270,8 @@ __retry: + } while (hbt_state & BIT_ULL(33)); + #endif + +- if (!(hbt_state & BIT_ULL(34))) { ++ if (!(hbt_state & BIT_ULL(34)) || ++ !(hbt_state & GENMASK(31, 0))) { + cnxk_tim_bkt_dec_lock(bkt); + goto __retry; + } +@@ -352,7 +353,8 @@ __retry: + } while (hbt_state & BIT_ULL(33)); + #endif + +- if (!(hbt_state & BIT_ULL(34))) { ++ if (!(hbt_state & BIT_ULL(34)) || ++ !(hbt_state & GENMASK(31, 0))) { + cnxk_tim_bkt_dec_lock(bkt); + goto __retry; + } +@@ -449,10 +451,10 @@ cnxk_tim_add_entry_brst(struct cnxk_tim_ring *const tim_ring, + struct cnxk_tim_ent *chunk = NULL; + struct cnxk_tim_bkt *mirr_bkt; + struct cnxk_tim_bkt *bkt; +- uint16_t chunk_remainder; ++ int16_t chunk_remainder; + uint16_t index = 0; + uint64_t lock_sema; +- int16_t rem, crem; ++ int16_t rem; + uint8_t lock_cnt; + + __retry: +@@ -460,31 +462,6 @@ __retry: + + /* Only one thread beyond this. */ + lock_sema = cnxk_tim_bkt_inc_lock(bkt); +- lock_cnt = (uint8_t)((lock_sema >> TIM_BUCKET_W1_S_LOCK) & +- TIM_BUCKET_W1_M_LOCK); +- +- if (lock_cnt) { +- cnxk_tim_bkt_dec_lock(bkt); +-#ifdef RTE_ARCH_ARM64 +- asm volatile(PLT_CPU_FEATURE_PREAMBLE +- " ldxrb %w[lock_cnt], [%[lock]] \n" +- " tst %w[lock_cnt], 255 \n" +- " beq dne%= \n" +- " sevl \n" +- "rty%=: wfe \n" +- " ldxrb %w[lock_cnt], [%[lock]] \n" +- " tst %w[lock_cnt], 255 \n" +- " bne rty%= \n" +- "dne%=: \n" +- : [lock_cnt] "=&r"(lock_cnt) +- : [lock] "r"(&bkt->lock) +- : "memory"); +-#else +- while (__atomic_load_n(&bkt->lock, __ATOMIC_RELAXED)) +- ; +-#endif +- goto __retry; +- } + + /* Bucket related checks. */ + if (unlikely(cnxk_tim_bkt_get_hbt(lock_sema))) { +@@ -509,21 +486,46 @@ __retry: + } while (hbt_state & BIT_ULL(33)); + #endif + +- if (!(hbt_state & BIT_ULL(34))) { ++ if (!(hbt_state & BIT_ULL(34)) || ++ !(hbt_state & GENMASK(31, 0))) { + cnxk_tim_bkt_dec_lock(bkt); + goto __retry; + } + } + } + ++ lock_cnt = (uint8_t)((lock_sema >> TIM_BUCKET_W1_S_LOCK) & ++ TIM_BUCKET_W1_M_LOCK); ++ if (lock_cnt) { ++ cnxk_tim_bkt_dec_lock(bkt); ++#ifdef RTE_ARCH_ARM64 ++ asm volatile(PLT_CPU_FEATURE_PREAMBLE ++ " ldxrb %w[lock_cnt], [%[lock]] \n" ++ " tst %w[lock_cnt], 255 \n" ++ " beq dne%= \n" ++ " sevl \n" ++ "rty%=: wfe \n" ++ " ldxrb %w[lock_cnt], [%[lock]] \n" ++ " tst %w[lock_cnt], 255 \n" ++ " bne rty%= \n" ++ "dne%=: \n" ++ : [lock_cnt] "=&r"(lock_cnt) ++ : [lock] "r"(&bkt->lock) ++ : "memory"); ++#else ++ while (__atomic_load_n(&bkt->lock, __ATOMIC_RELAXED)) ++ ; ++#endif ++ goto __retry; ++ } ++ + chunk_remainder = cnxk_tim_bkt_fetch_rem(lock_sema); + rem = chunk_remainder - nb_timers; + if (rem < 0) { +- crem = tim_ring->nb_chunk_slots - chunk_remainder; +- if (chunk_remainder && crem) { ++ if (chunk_remainder > 0) { + chunk = ((struct cnxk_tim_ent *) + mirr_bkt->current_chunk) + +- crem; ++ tim_ring->nb_chunk_slots - chunk_remainder; + + index = cnxk_tim_cpy_wrk(index, chunk_remainder, chunk, + tim, ents, bkt); +@@ -537,18 +539,19 @@ __retry: + chunk = cnxk_tim_insert_chunk(bkt, mirr_bkt, tim_ring); + + if (unlikely(chunk == NULL)) { +- cnxk_tim_bkt_dec_lock(bkt); ++ cnxk_tim_bkt_dec_lock_relaxed(bkt); + rte_errno = ENOMEM; + tim[index]->state = RTE_EVENT_TIMER_ERROR; +- return crem; ++ return index; + } + *(uint64_t *)(chunk + tim_ring->nb_chunk_slots) = 0; + mirr_bkt->current_chunk = (uintptr_t)chunk; +- cnxk_tim_cpy_wrk(index, nb_timers, chunk, tim, ents, bkt); ++ index = cnxk_tim_cpy_wrk(index, nb_timers, chunk, tim, ents, ++ bkt) - ++ index; + +- rem = nb_timers - chunk_remainder; +- cnxk_tim_bkt_set_rem(bkt, tim_ring->nb_chunk_slots - rem); +- cnxk_tim_bkt_add_nent(bkt, rem); ++ cnxk_tim_bkt_set_rem(bkt, tim_ring->nb_chunk_slots - index); ++ cnxk_tim_bkt_add_nent(bkt, index); + } else { + chunk = (struct cnxk_tim_ent *)mirr_bkt->current_chunk; + chunk += (tim_ring->nb_chunk_slots - chunk_remainder); +diff --git a/dpdk/drivers/event/dsw/dsw_evdev.c b/dpdk/drivers/event/dsw/dsw_evdev.c +index ffabf0d23d..6c5cde2468 100644 +--- a/dpdk/drivers/event/dsw/dsw_evdev.c ++++ b/dpdk/drivers/event/dsw/dsw_evdev.c +@@ -363,6 +363,10 @@ static int + dsw_close(struct rte_eventdev *dev) + { + struct dsw_evdev *dsw = dsw_pmd_priv(dev); ++ uint16_t port_id; ++ ++ for (port_id = 0; port_id < dsw->num_ports; port_id++) ++ dsw_port_release(&dsw->ports[port_id]); + + dsw->num_ports = 0; + dsw->num_queues = 0; +diff --git a/dpdk/drivers/mempool/cnxk/cn10k_mempool_ops.c b/dpdk/drivers/mempool/cnxk/cn10k_mempool_ops.c +index ba826f0f01..ff0015d8de 100644 +--- a/dpdk/drivers/mempool/cnxk/cn10k_mempool_ops.c ++++ b/dpdk/drivers/mempool/cnxk/cn10k_mempool_ops.c +@@ -9,6 +9,7 @@ + + #define BATCH_ALLOC_SZ ROC_CN10K_NPA_BATCH_ALLOC_MAX_PTRS + #define BATCH_OP_DATA_TABLE_MZ_NAME "batch_op_data_table_mz" ++#define BATCH_ALLOC_WAIT_US 5 + + enum batch_op_status { + BATCH_ALLOC_OP_NOT_ISSUED = 0, +@@ -178,7 +179,7 @@ cn10k_mempool_get_count(const struct rte_mempool *mp) + + if (mem->status == BATCH_ALLOC_OP_ISSUED) + count += roc_npa_aura_batch_alloc_count( +- mem->objs, BATCH_ALLOC_SZ, 1); ++ mem->objs, BATCH_ALLOC_SZ, BATCH_ALLOC_WAIT_US); + + if (mem->status == BATCH_ALLOC_OP_DONE) + count += mem->sz; +diff --git a/dpdk/drivers/net/bnxt/bnxt_ethdev.c b/dpdk/drivers/net/bnxt/bnxt_ethdev.c +index b3de490d36..753e86b4b2 100644 +--- a/dpdk/drivers/net/bnxt/bnxt_ethdev.c ++++ b/dpdk/drivers/net/bnxt/bnxt_ethdev.c +@@ -1017,7 +1017,6 @@ static int bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev, + .tx_free_thresh = 32, + .tx_rs_thresh = 32, + }; +- eth_dev->data->dev_conf.intr_conf.lsc = 1; + + dev_info->rx_desc_lim.nb_min = BNXT_MIN_RING_DESC; + dev_info->rx_desc_lim.nb_max = BNXT_MAX_RX_RING_DESC; +@@ -5859,6 +5858,7 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev, void *params __rte_unused) + + rte_eth_copy_pci_info(eth_dev, pci_dev); + eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS; ++ eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC; + + bp = eth_dev->data->dev_private; + +diff --git a/dpdk/drivers/net/bnxt/bnxt_rxq.c b/dpdk/drivers/net/bnxt/bnxt_rxq.c +index fabbbd4560..99758dd304 100644 +--- a/dpdk/drivers/net/bnxt/bnxt_rxq.c ++++ b/dpdk/drivers/net/bnxt/bnxt_rxq.c +@@ -471,6 +471,12 @@ int bnxt_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) + return -EINVAL; + } + ++ /* reset the previous stats for the rx_queue since the counters ++ * will be cleared when the queue is started. ++ */ ++ memset(&bp->prev_rx_ring_stats[rx_queue_id], 0, ++ sizeof(struct bnxt_ring_stats)); ++ + /* Set the queue state to started here. + * We check the status of the queue while posting buffer. + * If queue is it started, we do not post buffers for Rx. +diff --git a/dpdk/drivers/net/bnxt/bnxt_rxr.c b/dpdk/drivers/net/bnxt/bnxt_rxr.c +index daaf9ffc1e..0eebddb05d 100644 +--- a/dpdk/drivers/net/bnxt/bnxt_rxr.c ++++ b/dpdk/drivers/net/bnxt/bnxt_rxr.c +@@ -813,7 +813,6 @@ bnxt_ulp_set_mark_in_mbuf(struct bnxt *bp, struct rx_pkt_cmpl_hi *rxcmp1, + + skip_mark: + mbuf->hash.fdir.hi = 0; +- mbuf->hash.fdir.id = 0; + + return 0; + } +diff --git a/dpdk/drivers/net/bnxt/bnxt_txr.c b/dpdk/drivers/net/bnxt/bnxt_txr.c +index 67e016775c..21c2217092 100644 +--- a/dpdk/drivers/net/bnxt/bnxt_txr.c ++++ b/dpdk/drivers/net/bnxt/bnxt_txr.c +@@ -560,6 +560,12 @@ int bnxt_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) + if (rc) + return rc; + ++ /* reset the previous stats for the tx_queue since the counters ++ * will be cleared when the queue is started. ++ */ ++ memset(&bp->prev_tx_ring_stats[tx_queue_id], 0, ++ sizeof(struct bnxt_ring_stats)); ++ + bnxt_free_hwrm_tx_ring(bp, tx_queue_id); + rc = bnxt_alloc_hwrm_tx_ring(bp, tx_queue_id); + if (rc) +diff --git a/dpdk/drivers/net/bonding/rte_eth_bond_api.c b/dpdk/drivers/net/bonding/rte_eth_bond_api.c +index c0178369b4..85d0528b7c 100644 +--- a/dpdk/drivers/net/bonding/rte_eth_bond_api.c ++++ b/dpdk/drivers/net/bonding/rte_eth_bond_api.c +@@ -712,6 +712,16 @@ __eth_bond_slave_remove_lock_free(uint16_t bonded_port_id, + } + } + ++ /* Remove the dedicated queues flow */ ++ if (internals->mode == BONDING_MODE_8023AD && ++ internals->mode4.dedicated_queues.enabled == 1 && ++ internals->mode4.dedicated_queues.flow[slave_port_id] != NULL) { ++ rte_flow_destroy(slave_port_id, ++ internals->mode4.dedicated_queues.flow[slave_port_id], ++ &flow_error); ++ internals->mode4.dedicated_queues.flow[slave_port_id] = NULL; ++ } ++ + slave_eth_dev = &rte_eth_devices[slave_port_id]; + slave_remove(internals, slave_eth_dev); + slave_eth_dev->data->dev_flags &= (~RTE_ETH_DEV_BONDED_SLAVE); +diff --git a/dpdk/drivers/net/bonding/rte_eth_bond_args.c b/dpdk/drivers/net/bonding/rte_eth_bond_args.c +index 6553166f5c..c137efd55f 100644 +--- a/dpdk/drivers/net/bonding/rte_eth_bond_args.c ++++ b/dpdk/drivers/net/bonding/rte_eth_bond_args.c +@@ -212,6 +212,12 @@ bond_ethdev_parse_socket_id_kvarg(const char *key __rte_unused, + if (*endptr != 0 || errno != 0) + return -1; + ++ /* SOCKET_ID_ANY also consider a valid socket id */ ++ if ((int8_t)socket_id == SOCKET_ID_ANY) { ++ *(int *)extra_args = SOCKET_ID_ANY; ++ return 0; ++ } ++ + /* validate socket id value */ + if (socket_id >= 0 && socket_id < RTE_MAX_NUMA_NODES) { + *(int *)extra_args = (int)socket_id; +diff --git a/dpdk/drivers/net/bonding/rte_eth_bond_pmd.c b/dpdk/drivers/net/bonding/rte_eth_bond_pmd.c +index b9bcebc6cb..8cd78ce1ed 100644 +--- a/dpdk/drivers/net/bonding/rte_eth_bond_pmd.c ++++ b/dpdk/drivers/net/bonding/rte_eth_bond_pmd.c +@@ -3362,7 +3362,7 @@ static int + bond_alloc(struct rte_vdev_device *dev, uint8_t mode) + { + const char *name = rte_vdev_device_name(dev); +- uint8_t socket_id = dev->device.numa_node; ++ int socket_id = dev->device.numa_node; + struct bond_dev_private *internals = NULL; + struct rte_eth_dev *eth_dev = NULL; + uint32_t vlan_filter_bmp_size; +@@ -3564,7 +3564,7 @@ bond_probe(struct rte_vdev_device *dev) + port_id = bond_alloc(dev, bonding_mode); + if (port_id < 0) { + RTE_BOND_LOG(ERR, "Failed to create socket %s in mode %u on " +- "socket %u.", name, bonding_mode, socket_id); ++ "socket %d.", name, bonding_mode, socket_id); + goto parse_error; + } + internals = rte_eth_devices[port_id].data->dev_private; +@@ -3589,7 +3589,7 @@ bond_probe(struct rte_vdev_device *dev) + + rte_eth_dev_probing_finish(&rte_eth_devices[port_id]); + RTE_BOND_LOG(INFO, "Create bonded device %s on port %d in mode %u on " +- "socket %u.", name, port_id, bonding_mode, socket_id); ++ "socket %d.", name, port_id, bonding_mode, socket_id); + return 0; + + parse_error: +diff --git a/dpdk/drivers/net/cnxk/cn10k_rx.h b/dpdk/drivers/net/cnxk/cn10k_rx.h +index 721127dddd..20384e64c7 100644 +--- a/dpdk/drivers/net/cnxk/cn10k_rx.h ++++ b/dpdk/drivers/net/cnxk/cn10k_rx.h +@@ -1216,6 +1216,12 @@ cn10k_nix_recv_pkts_vector(void *args, struct rte_mbuf **mbufs, uint16_t pkts, + mbuf2 = (struct rte_mbuf *)vgetq_lane_u64(mbuf23, 0); + mbuf3 = (struct rte_mbuf *)vgetq_lane_u64(mbuf23, 1); + ++ /* Mark mempool obj as "get" as it is alloc'ed by NIX */ ++ RTE_MEMPOOL_CHECK_COOKIES(mbuf0->pool, (void **)&mbuf0, 1, 1); ++ RTE_MEMPOOL_CHECK_COOKIES(mbuf1->pool, (void **)&mbuf1, 1, 1); ++ RTE_MEMPOOL_CHECK_COOKIES(mbuf2->pool, (void **)&mbuf2, 1, 1); ++ RTE_MEMPOOL_CHECK_COOKIES(mbuf3->pool, (void **)&mbuf3, 1, 1); ++ + if (!(flags & NIX_RX_VWQE_F)) { + /* Mask to get packet len from NIX_RX_SG_S */ + const uint8x16_t shuf_msk = { +@@ -1716,12 +1722,6 @@ cn10k_nix_recv_pkts_vector(void *args, struct rte_mbuf **mbufs, uint16_t pkts, + vst1q_u64((uint64_t *)&mbufs[packets], mbuf01); + vst1q_u64((uint64_t *)&mbufs[packets + 2], mbuf23); + +- /* Mark mempool obj as "get" as it is alloc'ed by NIX */ +- RTE_MEMPOOL_CHECK_COOKIES(mbuf0->pool, (void **)&mbuf0, 1, 1); +- RTE_MEMPOOL_CHECK_COOKIES(mbuf1->pool, (void **)&mbuf1, 1, 1); +- RTE_MEMPOOL_CHECK_COOKIES(mbuf2->pool, (void **)&mbuf2, 1, 1); +- RTE_MEMPOOL_CHECK_COOKIES(mbuf3->pool, (void **)&mbuf3, 1, 1); +- + nix_mbuf_validate_next(mbuf0); + nix_mbuf_validate_next(mbuf1); + nix_mbuf_validate_next(mbuf2); +diff --git a/dpdk/drivers/net/cnxk/cn10k_tx.h b/dpdk/drivers/net/cnxk/cn10k_tx.h +index 815cd2ff1f..63cf6821d2 100644 +--- a/dpdk/drivers/net/cnxk/cn10k_tx.h ++++ b/dpdk/drivers/net/cnxk/cn10k_tx.h +@@ -1696,10 +1696,12 @@ cn10k_nix_xmit_store(struct rte_mbuf *mbuf, uint8_t segdw, uintptr_t laddr, + vst1q_u64(LMT_OFF(laddr, 0, 16), cmd2); + vst1q_u64(LMT_OFF(laddr, 0, 32), cmd1); + } ++ RTE_MEMPOOL_CHECK_COOKIES(mbuf->pool, (void **)&mbuf, 1, 0); + } else { + /* Store the prepared send desc to LMT lines */ + vst1q_u64(LMT_OFF(laddr, 0, 0), cmd0); + vst1q_u64(LMT_OFF(laddr, 0, 16), cmd1); ++ RTE_MEMPOOL_CHECK_COOKIES(mbuf->pool, (void **)&mbuf, 1, 0); + } + } + +diff --git a/dpdk/drivers/net/cnxk/cn9k_ethdev_sec.c b/dpdk/drivers/net/cnxk/cn9k_ethdev_sec.c +index 67966a4e49..327f221e38 100644 +--- a/dpdk/drivers/net/cnxk/cn9k_ethdev_sec.c ++++ b/dpdk/drivers/net/cnxk/cn9k_ethdev_sec.c +@@ -556,6 +556,7 @@ cn9k_eth_sec_session_create(void *device, + + if (!dev->outb.lf_base) { + plt_err("Could not allocate security session private data"); ++ rte_spinlock_unlock(lock); + return -ENOMEM; + } + +diff --git a/dpdk/drivers/net/cnxk/cn9k_rx.h b/dpdk/drivers/net/cnxk/cn9k_rx.h +index 1a9f920b41..0e23609df5 100644 +--- a/dpdk/drivers/net/cnxk/cn9k_rx.h ++++ b/dpdk/drivers/net/cnxk/cn9k_rx.h +@@ -260,8 +260,8 @@ nix_rx_sec_mbuf_err_update(const union nix_rx_parse_u *rx, uint16_t res, + } + + static __rte_always_inline uint64_t +-nix_rx_sec_mbuf_update(const struct nix_cqe_hdr_s *cq, struct rte_mbuf *m, +- uintptr_t sa_base, uint64_t *rearm_val, uint16_t *len) ++nix_rx_sec_mbuf_update(const struct nix_cqe_hdr_s *cq, struct rte_mbuf *m, uintptr_t sa_base, ++ uint64_t *rearm_val, uint16_t *len, uint32_t packet_type) + { + uintptr_t res_sg0 = ((uintptr_t)cq + ROC_ONF_IPSEC_INB_RES_OFF - 8); + const union nix_rx_parse_u *rx = +@@ -315,15 +315,18 @@ nix_rx_sec_mbuf_update(const struct nix_cqe_hdr_s *cq, struct rte_mbuf *m, + ip = (struct rte_ipv4_hdr *)(data + ROC_ONF_IPSEC_INB_SPI_SEQ_SZ + + ROC_ONF_IPSEC_INB_MAX_L2_SZ); + ++ packet_type = (packet_type & ~(RTE_PTYPE_L3_MASK | RTE_PTYPE_TUNNEL_MASK)); + if (((ip->version_ihl & 0xf0) >> RTE_IPV4_IHL_MULTIPLIER) == + IPVERSION) { + *len = rte_be_to_cpu_16(ip->total_length) + lcptr; ++ packet_type |= RTE_PTYPE_L3_IPV4_EXT_UNKNOWN; + } else { + PLT_ASSERT(((ip->version_ihl & 0xf0) >> + RTE_IPV4_IHL_MULTIPLIER) == 6); + ip6 = (struct rte_ipv6_hdr *)ip; + *len = rte_be_to_cpu_16(ip6->payload_len) + + sizeof(struct rte_ipv6_hdr) + lcptr; ++ packet_type |= RTE_PTYPE_L3_IPV6_EXT_UNKNOWN; + } + + /* Update data offset */ +@@ -332,6 +335,7 @@ nix_rx_sec_mbuf_update(const struct nix_cqe_hdr_s *cq, struct rte_mbuf *m, + *rearm_val = *rearm_val & ~(BIT_ULL(16) - 1); + *rearm_val |= data_off; + ++ m->packet_type = packet_type; + return RTE_MBUF_F_RX_SEC_OFFLOAD; + } + +@@ -363,14 +367,7 @@ cn9k_nix_cqe_to_mbuf(const struct nix_cqe_hdr_s *cq, const uint32_t tag, + /* Get SA Base from lookup mem */ + sa_base = cnxk_nix_sa_base_get(port, lookup_mem); + +- ol_flags |= nix_rx_sec_mbuf_update(cq, mbuf, sa_base, &val, +- &len); +- +- /* Only Tunnel inner IPv4 is supported */ +- packet_type = (packet_type & +- ~(RTE_PTYPE_L3_MASK | RTE_PTYPE_TUNNEL_MASK)); +- packet_type |= RTE_PTYPE_L3_IPV4_EXT_UNKNOWN; +- mbuf->packet_type = packet_type; ++ ol_flags |= nix_rx_sec_mbuf_update(cq, mbuf, sa_base, &val, &len, packet_type); + goto skip_parse; + } + +diff --git a/dpdk/drivers/net/cnxk/cn9k_tx.h b/dpdk/drivers/net/cnxk/cn9k_tx.h +index 404edd6aed..7362025a34 100644 +--- a/dpdk/drivers/net/cnxk/cn9k_tx.h ++++ b/dpdk/drivers/net/cnxk/cn9k_tx.h +@@ -388,6 +388,16 @@ cn9k_nix_xmit_prep_lmt(uint64_t *cmd, void *lmt_addr, const uint32_t flags) + roc_lmt_mov(lmt_addr, cmd, cn9k_nix_tx_ext_subs(flags)); + } + ++static __rte_always_inline void ++cn9k_nix_sec_fc_wait_one(const struct cn9k_eth_txq *txq) ++{ ++ uint64_t nb_desc = txq->cpt_desc; ++ uint64_t *fc = txq->cpt_fc; ++ ++ while (nb_desc <= __atomic_load_n(fc, __ATOMIC_RELAXED)) ++ ; ++} ++ + static __rte_always_inline uint64_t + cn9k_nix_xmit_submit_lmt(const rte_iova_t io_addr) + { +diff --git a/dpdk/drivers/net/cnxk/cnxk_ethdev.c b/dpdk/drivers/net/cnxk/cnxk_ethdev.c +index bf1585fe67..f9245258cb 100644 +--- a/dpdk/drivers/net/cnxk/cnxk_ethdev.c ++++ b/dpdk/drivers/net/cnxk/cnxk_ethdev.c +@@ -884,6 +884,27 @@ cnxk_rss_ethdev_to_nix(struct cnxk_eth_dev *dev, uint64_t ethdev_rss, + return flowkey_cfg; + } + ++static int ++nix_rxchan_cfg_disable(struct cnxk_eth_dev *dev) ++{ ++ struct roc_nix *nix = &dev->nix; ++ struct roc_nix_fc_cfg fc_cfg; ++ int rc; ++ ++ if (!roc_nix_is_lbk(nix)) ++ return 0; ++ ++ memset(&fc_cfg, 0, sizeof(struct roc_nix_fc_cfg)); ++ fc_cfg.type = ROC_NIX_FC_RXCHAN_CFG; ++ fc_cfg.rxchan_cfg.enable = false; ++ rc = roc_nix_fc_config_set(nix, &fc_cfg); ++ if (rc) { ++ plt_err("Failed to setup flow control, rc=%d(%s)", rc, roc_error_msg_get(rc)); ++ return rc; ++ } ++ return 0; ++} ++ + static void + nix_free_queue_mem(struct cnxk_eth_dev *dev) + { +@@ -1202,6 +1223,7 @@ cnxk_nix_configure(struct rte_eth_dev *eth_dev) + goto fail_configure; + + roc_nix_tm_fini(nix); ++ nix_rxchan_cfg_disable(dev); + roc_nix_lf_free(nix); + } + +@@ -1431,6 +1453,7 @@ tm_fini: + roc_nix_tm_fini(nix); + free_nix_lf: + nix_free_queue_mem(dev); ++ rc |= nix_rxchan_cfg_disable(dev); + rc |= roc_nix_lf_free(nix); + fail_configure: + dev->configured = 0; +@@ -1980,6 +2003,11 @@ cnxk_eth_dev_uninit(struct rte_eth_dev *eth_dev, bool reset) + /* Free ROC RQ's, SQ's and CQ's memory */ + nix_free_queue_mem(dev); + ++ /* free nix bpid */ ++ rc = nix_rxchan_cfg_disable(dev); ++ if (rc) ++ plt_err("Failed to free nix bpid, rc=%d", rc); ++ + /* Free nix lf resources */ + rc = roc_nix_lf_free(nix); + if (rc) +diff --git a/dpdk/drivers/net/cnxk/cnxk_ethdev.h b/dpdk/drivers/net/cnxk/cnxk_ethdev.h +index 651ef45ea8..83d6e9398f 100644 +--- a/dpdk/drivers/net/cnxk/cnxk_ethdev.h ++++ b/dpdk/drivers/net/cnxk/cnxk_ethdev.h +@@ -494,6 +494,7 @@ int cnxk_nix_probe(struct rte_pci_driver *pci_drv, + struct rte_pci_device *pci_dev); + int cnxk_nix_remove(struct rte_pci_device *pci_dev); + int cnxk_nix_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu); ++int cnxk_nix_sq_flush(struct rte_eth_dev *eth_dev); + int cnxk_nix_mc_addr_list_configure(struct rte_eth_dev *eth_dev, + struct rte_ether_addr *mc_addr_set, + uint32_t nb_mc_addr); +diff --git a/dpdk/drivers/net/cnxk/cnxk_ethdev_cman.c b/dpdk/drivers/net/cnxk/cnxk_ethdev_cman.c +index d5e647c64d..a7ccdfb756 100644 +--- a/dpdk/drivers/net/cnxk/cnxk_ethdev_cman.c ++++ b/dpdk/drivers/net/cnxk/cnxk_ethdev_cman.c +@@ -68,6 +68,11 @@ nix_cman_config_validate(struct rte_eth_dev *eth_dev, const struct rte_eth_cman_ + return -EINVAL; + } + ++ if (config->mode_param.red.min_th > config->mode_param.red.max_th) { ++ plt_err("RED minimum threshold must be less or equal to maximum threshold"); ++ return -EINVAL; ++ } ++ + return 0; + } + +diff --git a/dpdk/drivers/net/cnxk/cnxk_ethdev_ops.c b/dpdk/drivers/net/cnxk/cnxk_ethdev_ops.c +index 8f7287161b..7a7478cda8 100644 +--- a/dpdk/drivers/net/cnxk/cnxk_ethdev_ops.c ++++ b/dpdk/drivers/net/cnxk/cnxk_ethdev_ops.c +@@ -463,6 +463,44 @@ cnxk_nix_mac_addr_del(struct rte_eth_dev *eth_dev, uint32_t index) + dev->dmac_filter_count--; + } + ++int ++cnxk_nix_sq_flush(struct rte_eth_dev *eth_dev) ++{ ++ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev); ++ struct rte_eth_dev_data *data = eth_dev->data; ++ int i, rc = 0; ++ ++ /* Flush all tx queues */ ++ for (i = 0; i < eth_dev->data->nb_tx_queues; i++) { ++ struct roc_nix_sq *sq = &dev->sqs[i]; ++ ++ if (eth_dev->data->tx_queues[i] == NULL) ++ continue; ++ ++ rc = roc_nix_tm_sq_aura_fc(sq, false); ++ if (rc) { ++ plt_err("Failed to disable sqb aura fc, rc=%d", rc); ++ goto exit; ++ } ++ ++ /* Wait for sq entries to be flushed */ ++ rc = roc_nix_tm_sq_flush_spin(sq); ++ if (rc) { ++ plt_err("Failed to drain sq, rc=%d\n", rc); ++ goto exit; ++ } ++ if (data->tx_queue_state[i] == RTE_ETH_QUEUE_STATE_STARTED) { ++ rc = roc_nix_tm_sq_aura_fc(sq, true); ++ if (rc) { ++ plt_err("Failed to enable sq aura fc, txq=%u, rc=%d", i, rc); ++ goto exit; ++ } ++ } ++ } ++exit: ++ return rc; ++} ++ + int + cnxk_nix_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu) + { +@@ -506,6 +544,15 @@ cnxk_nix_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu) + goto exit; + } + ++ /* if new MTU was smaller than old one, then flush all SQs before MTU change */ ++ if (old_frame_size > frame_size) { ++ if (data->dev_started) { ++ plt_err("Reducing MTU is not supported when device started"); ++ goto exit; ++ } ++ cnxk_nix_sq_flush(eth_dev); ++ } ++ + frame_size -= RTE_ETHER_CRC_LEN; + + /* Update mtu on Tx */ +diff --git a/dpdk/drivers/net/cnxk/cnxk_flow.c b/dpdk/drivers/net/cnxk/cnxk_flow.c +index 6d155d924c..422c5d74df 100644 +--- a/dpdk/drivers/net/cnxk/cnxk_flow.c ++++ b/dpdk/drivers/net/cnxk/cnxk_flow.c +@@ -115,14 +115,15 @@ cnxk_map_actions(struct rte_eth_dev *eth_dev, const struct rte_flow_attr *attr, + struct roc_npc_action in_actions[], uint32_t *flowkey_cfg) + { + struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev); ++ const struct rte_flow_action_queue *act_q = NULL; + const struct rte_flow_action_ethdev *act_ethdev; + const struct rte_flow_action_port_id *port_act; +- const struct rte_flow_action_queue *act_q; + struct roc_npc *roc_npc_src = &dev->npc; + struct rte_eth_dev *portid_eth_dev; + char if_name[RTE_ETH_NAME_MAX_LEN]; + struct cnxk_eth_dev *hw_dst; + struct roc_npc *roc_npc_dst; ++ bool is_vf_action = false; + int i = 0, rc = 0; + int rq; + +@@ -156,6 +157,7 @@ cnxk_map_actions(struct rte_eth_dev *eth_dev, const struct rte_flow_attr *attr, + case RTE_FLOW_ACTION_TYPE_VF: + in_actions[i].type = ROC_NPC_ACTION_TYPE_VF; + in_actions[i].conf = actions->conf; ++ is_vf_action = true; + break; + + case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT: +@@ -193,13 +195,7 @@ cnxk_map_actions(struct rte_eth_dev *eth_dev, const struct rte_flow_attr *attr, + break; + + case RTE_FLOW_ACTION_TYPE_QUEUE: +- act_q = (const struct rte_flow_action_queue *) +- actions->conf; +- rq = act_q->index; +- if (rq >= eth_dev->data->nb_rx_queues) { +- plt_npc_dbg("Invalid queue index"); +- goto err_exit; +- } ++ act_q = (const struct rte_flow_action_queue *)actions->conf; + in_actions[i].type = ROC_NPC_ACTION_TYPE_QUEUE; + in_actions[i].conf = actions->conf; + break; +@@ -245,6 +241,14 @@ cnxk_map_actions(struct rte_eth_dev *eth_dev, const struct rte_flow_attr *attr, + } + i++; + } ++ ++ if (!is_vf_action && act_q) { ++ rq = act_q->index; ++ if (rq >= eth_dev->data->nb_rx_queues) { ++ plt_npc_dbg("Invalid queue index"); ++ goto err_exit; ++ } ++ } + in_actions[i].type = ROC_NPC_ACTION_TYPE_END; + return 0; + +diff --git a/dpdk/drivers/net/dpaa2/dpaa2_rxtx.c b/dpdk/drivers/net/dpaa2/dpaa2_rxtx.c +index f60e78e1fd..85910bbd8f 100644 +--- a/dpdk/drivers/net/dpaa2/dpaa2_rxtx.c ++++ b/dpdk/drivers/net/dpaa2/dpaa2_rxtx.c +@@ -198,8 +198,12 @@ dpaa2_dev_rx_parse_slow(struct rte_mbuf *mbuf, + + if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L3CE)) + mbuf->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD; +- else if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L4CE)) ++ else ++ mbuf->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD; ++ if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L4CE)) + mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD; ++ else ++ mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD; + + if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_FIRST_FRAGMENT | + L3_IP_1_MORE_FRAGMENT | +@@ -241,8 +245,12 @@ dpaa2_dev_rx_parse(struct rte_mbuf *mbuf, void *hw_annot_addr) + + if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L3CE)) + mbuf->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD; +- else if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L4CE)) ++ else ++ mbuf->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD; ++ if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L4CE)) + mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD; ++ else ++ mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD; + + if (dpaa2_enable_ts[mbuf->port]) { + *dpaa2_timestamp_dynfield(mbuf) = annotation->word2; +diff --git a/dpdk/drivers/net/e1000/em_ethdev.c b/dpdk/drivers/net/e1000/em_ethdev.c +index 8ee9be12ad..18efa78ac3 100644 +--- a/dpdk/drivers/net/e1000/em_ethdev.c ++++ b/dpdk/drivers/net/e1000/em_ethdev.c +@@ -1073,8 +1073,8 @@ eth_em_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) + * To avoid it we support just one RX queue for now (no RSS). + */ + +- dev_info->max_rx_queues = 1; +- dev_info->max_tx_queues = 1; ++ dev_info->max_rx_queues = 2; ++ dev_info->max_tx_queues = 2; + + dev_info->rx_queue_offload_capa = em_get_rx_queue_offloads_capa(); + dev_info->rx_offload_capa = em_get_rx_port_offloads_capa() | +diff --git a/dpdk/drivers/net/e1000/em_rxtx.c b/dpdk/drivers/net/e1000/em_rxtx.c +index d48fd52404..cb5ce2307b 100644 +--- a/dpdk/drivers/net/e1000/em_rxtx.c ++++ b/dpdk/drivers/net/e1000/em_rxtx.c +@@ -1030,6 +1030,7 @@ eth_em_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, + * - RX port identifier, + * - hardware offload data, if any: + * - IP checksum flag, ++ * - VLAN TCI, if any, + * - error flags. + */ + first_seg->port = rxq->port_id; +@@ -1039,7 +1040,7 @@ eth_em_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, + rx_desc_error_to_pkt_flags(rxd.errors); + + /* Only valid if RTE_MBUF_F_RX_VLAN set in pkt_flags */ +- rxm->vlan_tci = rte_le_to_cpu_16(rxd.special); ++ first_seg->vlan_tci = rte_le_to_cpu_16(rxd.special); + + /* Prefetch data of first segment, if configured to do so. */ + rte_packet_prefetch((char *)first_seg->buf_addr + +diff --git a/dpdk/drivers/net/e1000/igb_rxtx.c b/dpdk/drivers/net/e1000/igb_rxtx.c +index f32dee46df..1d23e081b6 100644 +--- a/dpdk/drivers/net/e1000/igb_rxtx.c ++++ b/dpdk/drivers/net/e1000/igb_rxtx.c +@@ -1853,6 +1853,7 @@ igb_dev_clear_queues(struct rte_eth_dev *dev) + if (txq != NULL) { + igb_tx_queue_release_mbufs(txq); + igb_reset_tx_queue(txq, dev); ++ dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; + } + } + +@@ -1861,6 +1862,7 @@ igb_dev_clear_queues(struct rte_eth_dev *dev) + if (rxq != NULL) { + igb_rx_queue_release_mbufs(rxq); + igb_reset_rx_queue(rxq); ++ dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; + } + } + } +@@ -2441,6 +2443,7 @@ eth_igb_rx_init(struct rte_eth_dev *dev) + rxdctl |= ((rxq->hthresh & 0x1F) << 8); + rxdctl |= ((rxq->wthresh & 0x1F) << 16); + E1000_WRITE_REG(hw, E1000_RXDCTL(rxq->reg_idx), rxdctl); ++ dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; + } + + if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_SCATTER) { +@@ -2605,6 +2608,7 @@ eth_igb_tx_init(struct rte_eth_dev *dev) + txdctl |= ((txq->wthresh & 0x1F) << 16); + txdctl |= E1000_TXDCTL_QUEUE_ENABLE; + E1000_WRITE_REG(hw, E1000_TXDCTL(txq->reg_idx), txdctl); ++ dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; + } + + /* Program the Transmit Control Register. */ +diff --git a/dpdk/drivers/net/ena/ena_rss.c b/dpdk/drivers/net/ena/ena_rss.c +index b682d01c20..d0ba9d5c0a 100644 +--- a/dpdk/drivers/net/ena/ena_rss.c ++++ b/dpdk/drivers/net/ena/ena_rss.c +@@ -105,6 +105,7 @@ int ena_rss_reta_update(struct rte_eth_dev *dev, + if (unlikely(rc != 0)) { + PMD_DRV_LOG(ERR, + "Cannot fill indirection table\n"); ++ rte_spinlock_unlock(&adapter->admin_lock); + return rc; + } + } +diff --git a/dpdk/drivers/net/gve/gve_ethdev.c b/dpdk/drivers/net/gve/gve_ethdev.c +index 97781f0ed3..e357f16e16 100644 +--- a/dpdk/drivers/net/gve/gve_ethdev.c ++++ b/dpdk/drivers/net/gve/gve_ethdev.c +@@ -282,7 +282,6 @@ gve_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) + dev_info->rx_offload_capa = 0; + dev_info->tx_offload_capa = + RTE_ETH_TX_OFFLOAD_MULTI_SEGS | +- RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | + RTE_ETH_TX_OFFLOAD_UDP_CKSUM | + RTE_ETH_TX_OFFLOAD_TCP_CKSUM | + RTE_ETH_TX_OFFLOAD_SCTP_CKSUM | +diff --git a/dpdk/drivers/net/hns3/hns3_cmd.h b/dpdk/drivers/net/hns3/hns3_cmd.h +index 994dfc48cc..eb394c9dec 100644 +--- a/dpdk/drivers/net/hns3/hns3_cmd.h ++++ b/dpdk/drivers/net/hns3/hns3_cmd.h +@@ -606,6 +606,7 @@ struct hns3_rss_input_tuple_cmd { + #define HNS3_RSS_CFG_TBL_SIZE_H 4 + #define HNS3_RSS_CFG_TBL_BW_H 2 + #define HNS3_RSS_CFG_TBL_BW_L 8 ++#define HNS3_RSS_CFG_TBL_BW_H_M 0x3 + + /* Configure the indirection table, opcode:0x0D07 */ + struct hns3_rss_indirection_table_cmd { +diff --git a/dpdk/drivers/net/hns3/hns3_common.c b/dpdk/drivers/net/hns3/hns3_common.c +index 7adc6a4972..f077ef5057 100644 +--- a/dpdk/drivers/net/hns3/hns3_common.c ++++ b/dpdk/drivers/net/hns3/hns3_common.c +@@ -10,6 +10,7 @@ + #include "hns3_logs.h" + #include "hns3_regs.h" + #include "hns3_rxtx.h" ++#include "hns3_dcb.h" + #include "hns3_common.h" + + int +@@ -90,10 +91,11 @@ hns3_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info) + if (hns3_dev_get_support(hw, OUTER_UDP_CKSUM)) + info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM; + ++ info->dev_capa = RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP | ++ RTE_ETH_DEV_CAPA_FLOW_SHARED_OBJECT_KEEP; + if (hns3_dev_get_support(hw, INDEP_TXRX)) +- info->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP | +- RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP; +- info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP; ++ info->dev_capa |= RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP | ++ RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP; + + if (hns3_dev_get_support(hw, PTP)) + info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_TIMESTAMP; +@@ -128,7 +130,7 @@ hns3_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info) + }; + + info->reta_size = hw->rss_ind_tbl_size; +- info->hash_key_size = HNS3_RSS_KEY_SIZE; ++ info->hash_key_size = hw->rss_key_size; + info->flow_type_rss_offloads = HNS3_ETH_RSS_SUPPORT; + + info->default_rxportconf.burst_size = HNS3_DEFAULT_PORT_CONF_BURST_SIZE; +@@ -161,6 +163,9 @@ hns3_parse_io_hint_func(const char *key, const char *value, void *extra_args) + + RTE_SET_USED(key); + ++ if (value == NULL || extra_args == NULL) ++ return 0; ++ + if (strcmp(value, "vec") == 0) + hint = HNS3_IO_FUNC_HINT_VEC; + else if (strcmp(value, "sve") == 0) +@@ -201,6 +206,9 @@ hns3_parse_dev_caps_mask(const char *key, const char *value, void *extra_args) + + RTE_SET_USED(key); + ++ if (value == NULL || extra_args == NULL) ++ return 0; ++ + val = strtoull(value, NULL, HNS3_CONVERT_TO_HEXADECIMAL); + *(uint64_t *)extra_args = val; + +@@ -214,6 +222,9 @@ hns3_parse_mbx_time_limit(const char *key, const char *value, void *extra_args) + + RTE_SET_USED(key); + ++ if (value == NULL || extra_args == NULL) ++ return 0; ++ + val = strtoul(value, NULL, HNS3_CONVERT_TO_DECIMAL); + + /* +@@ -845,3 +856,87 @@ hns3_get_pci_revision_id(struct hns3_hw *hw, uint8_t *revision_id) + + return 0; + } ++ ++void ++hns3_set_default_dev_specifications(struct hns3_hw *hw) ++{ ++ struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); ++ ++ hw->max_non_tso_bd_num = HNS3_MAX_NON_TSO_BD_PER_PKT; ++ hw->rss_ind_tbl_size = HNS3_RSS_IND_TBL_SIZE; ++ hw->rss_key_size = HNS3_RSS_KEY_SIZE; ++ hw->intr.int_ql_max = HNS3_INTR_QL_NONE; ++ ++ if (hns->is_vf) ++ return; ++ ++ hw->max_tm_rate = HNS3_ETHER_MAX_RATE; ++} ++ ++static void ++hns3_parse_dev_specifications(struct hns3_hw *hw, struct hns3_cmd_desc *desc) ++{ ++ struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); ++ struct hns3_dev_specs_0_cmd *req0; ++ struct hns3_dev_specs_1_cmd *req1; ++ ++ req0 = (struct hns3_dev_specs_0_cmd *)desc[0].data; ++ req1 = (struct hns3_dev_specs_1_cmd *)desc[1].data; ++ ++ hw->max_non_tso_bd_num = req0->max_non_tso_bd_num; ++ hw->rss_ind_tbl_size = rte_le_to_cpu_16(req0->rss_ind_tbl_size); ++ hw->rss_key_size = rte_le_to_cpu_16(req0->rss_key_size); ++ hw->intr.int_ql_max = rte_le_to_cpu_16(req0->intr_ql_max); ++ hw->min_tx_pkt_len = req1->min_tx_pkt_len; ++ ++ if (hns->is_vf) ++ return; ++ ++ hw->max_tm_rate = rte_le_to_cpu_32(req0->max_tm_rate); ++} ++ ++static int ++hns3_check_dev_specifications(struct hns3_hw *hw) ++{ ++ if (hw->rss_ind_tbl_size == 0 || ++ hw->rss_ind_tbl_size > HNS3_RSS_IND_TBL_SIZE_MAX) { ++ hns3_err(hw, "the indirection table size obtained (%u) is invalid, and should not be zero or exceed the maximum(%u)", ++ hw->rss_ind_tbl_size, HNS3_RSS_IND_TBL_SIZE_MAX); ++ return -EINVAL; ++ } ++ ++ if (hw->rss_key_size == 0 || hw->rss_key_size > HNS3_RSS_KEY_SIZE_MAX) { ++ hns3_err(hw, "the RSS key size obtained (%u) is invalid, and should not be zero or exceed the maximum(%u)", ++ hw->rss_key_size, HNS3_RSS_KEY_SIZE_MAX); ++ return -EINVAL; ++ } ++ ++ if (hw->rss_key_size > HNS3_RSS_KEY_SIZE) ++ hns3_warn(hw, "the RSS key size obtained (%u) is greater than the default key size (%u)", ++ hw->rss_key_size, HNS3_RSS_KEY_SIZE); ++ ++ return 0; ++} ++ ++int ++hns3_query_dev_specifications(struct hns3_hw *hw) ++{ ++ struct hns3_cmd_desc desc[HNS3_QUERY_DEV_SPECS_BD_NUM]; ++ int ret; ++ int i; ++ ++ for (i = 0; i < HNS3_QUERY_DEV_SPECS_BD_NUM - 1; i++) { ++ hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_QUERY_DEV_SPECS, ++ true); ++ desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); ++ } ++ hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_QUERY_DEV_SPECS, true); ++ ++ ret = hns3_cmd_send(hw, desc, HNS3_QUERY_DEV_SPECS_BD_NUM); ++ if (ret) ++ return ret; ++ ++ hns3_parse_dev_specifications(hw, desc); ++ ++ return hns3_check_dev_specifications(hw); ++} +diff --git a/dpdk/drivers/net/hns3/hns3_common.h b/dpdk/drivers/net/hns3/hns3_common.h +index 5aa001f0cc..8eaeda26e7 100644 +--- a/dpdk/drivers/net/hns3/hns3_common.h ++++ b/dpdk/drivers/net/hns3/hns3_common.h +@@ -60,5 +60,7 @@ void hns3_unmap_rx_interrupt(struct rte_eth_dev *dev); + int hns3_restore_rx_interrupt(struct hns3_hw *hw); + + int hns3_get_pci_revision_id(struct hns3_hw *hw, uint8_t *revision_id); ++void hns3_set_default_dev_specifications(struct hns3_hw *hw); ++int hns3_query_dev_specifications(struct hns3_hw *hw); + + #endif /* HNS3_COMMON_H */ +diff --git a/dpdk/drivers/net/hns3/hns3_dcb.c b/dpdk/drivers/net/hns3/hns3_dcb.c +index af045b22f7..07b8c46a81 100644 +--- a/dpdk/drivers/net/hns3/hns3_dcb.c ++++ b/dpdk/drivers/net/hns3/hns3_dcb.c +@@ -237,9 +237,9 @@ hns3_dcb_qs_weight_cfg(struct hns3_hw *hw, uint16_t qs_id, uint8_t dwrr) + static int + hns3_dcb_ets_tc_dwrr_cfg(struct hns3_hw *hw) + { +-#define DEFAULT_TC_WEIGHT 1 + #define DEFAULT_TC_OFFSET 14 + struct hns3_ets_tc_weight_cmd *ets_weight; ++ struct hns3_pg_info *pg_info; + struct hns3_cmd_desc desc; + uint8_t i; + +@@ -247,13 +247,6 @@ hns3_dcb_ets_tc_dwrr_cfg(struct hns3_hw *hw) + ets_weight = (struct hns3_ets_tc_weight_cmd *)desc.data; + + for (i = 0; i < HNS3_MAX_TC_NUM; i++) { +- struct hns3_pg_info *pg_info; +- +- ets_weight->tc_weight[i] = DEFAULT_TC_WEIGHT; +- +- if (!(hw->hw_tc_map & BIT(i))) +- continue; +- + pg_info = &hw->dcb_info.pg_info[hw->dcb_info.tc_info[i].pgid]; + ets_weight->tc_weight[i] = pg_info->tc_dwrr[i]; + } +diff --git a/dpdk/drivers/net/hns3/hns3_ethdev.c b/dpdk/drivers/net/hns3/hns3_ethdev.c +index d326f70129..b9a848540b 100644 +--- a/dpdk/drivers/net/hns3/hns3_ethdev.c ++++ b/dpdk/drivers/net/hns3/hns3_ethdev.c +@@ -15,6 +15,7 @@ + #include "hns3_dcb.h" + #include "hns3_mp.h" + #include "hns3_flow.h" ++#include "hns3_ptp.h" + #include "hns3_ethdev.h" + + #define HNS3_SERVICE_INTERVAL 1000000 /* us */ +@@ -43,6 +44,7 @@ + #define HNS3_VECTOR0_IMP_CMDQ_ERR_B 4U + #define HNS3_VECTOR0_IMP_RD_POISON_B 5U + #define HNS3_VECTOR0_ALL_MSIX_ERR_B 6U ++#define HNS3_VECTOR0_TRIGGER_IMP_RESET_B 7U + + #define HNS3_RESET_WAIT_MS 100 + #define HNS3_RESET_WAIT_CNT 200 +@@ -60,6 +62,13 @@ enum hns3_evt_cause { + HNS3_VECTOR0_EVENT_OTHER, + }; + ++#define HNS3_SPEEDS_SUPP_FEC (RTE_ETH_LINK_SPEED_10G | \ ++ RTE_ETH_LINK_SPEED_25G | \ ++ RTE_ETH_LINK_SPEED_40G | \ ++ RTE_ETH_LINK_SPEED_50G | \ ++ RTE_ETH_LINK_SPEED_100G | \ ++ RTE_ETH_LINK_SPEED_200G) ++ + static const struct rte_eth_fec_capa speed_fec_capa_tbl[] = { + { RTE_ETH_SPEED_NUM_10G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) | + RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) | +@@ -83,8 +92,7 @@ static const struct rte_eth_fec_capa speed_fec_capa_tbl[] = { + RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) | + RTE_ETH_FEC_MODE_CAPA_MASK(RS) }, + +- { RTE_ETH_SPEED_NUM_200G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) | +- RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) | ++ { RTE_ETH_SPEED_NUM_200G, RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) | + RTE_ETH_FEC_MODE_CAPA_MASK(RS) } + }; + +@@ -286,6 +294,19 @@ hns3_handle_mac_tnl(struct hns3_hw *hw) + } + } + ++static void ++hns3_delay_before_clear_event_cause(struct hns3_hw *hw, uint32_t event_type, uint32_t regclr) ++{ ++#define IMPRESET_WAIT_MS_TIME 5 ++ ++ if (event_type == HNS3_VECTOR0_EVENT_RST && ++ regclr & BIT(HNS3_VECTOR0_IMPRESET_INT_B) && ++ hw->revision >= PCI_REVISION_ID_HIP09_A) { ++ rte_delay_ms(IMPRESET_WAIT_MS_TIME); ++ hns3_dbg(hw, "wait firmware watchdog initialization completed."); ++ } ++} ++ + static void + hns3_interrupt_handler(void *param) + { +@@ -305,6 +326,7 @@ hns3_interrupt_handler(void *param) + vector0_int = hns3_read_dev(hw, HNS3_VECTOR0_OTHER_INT_STS_REG); + ras_int = hns3_read_dev(hw, HNS3_RAS_PF_OTHER_INT_STS_REG); + cmdq_int = hns3_read_dev(hw, HNS3_VECTOR0_CMDQ_SRC_REG); ++ hns3_delay_before_clear_event_cause(hw, event_cause, clearval); + hns3_clear_event_cause(hw, event_cause, clearval); + /* vector 0 interrupt is shared with reset and mailbox source events. */ + if (event_cause == HNS3_VECTOR0_EVENT_ERR) { +@@ -2257,6 +2279,7 @@ hns3_dev_link_update(struct rte_eth_dev *eth_dev, int wait_to_complete) + struct rte_eth_link new_link; + int ret; + ++ memset(&new_link, 0, sizeof(new_link)); + /* When port is stopped, report link down. */ + if (eth_dev->data->dev_started == 0) { + new_link.link_autoneg = mac->link_autoneg; +@@ -2280,7 +2303,6 @@ hns3_dev_link_update(struct rte_eth_dev *eth_dev, int wait_to_complete) + rte_delay_ms(HNS3_LINK_CHECK_INTERVAL); + } while (retry_cnt--); + +- memset(&new_link, 0, sizeof(new_link)); + hns3_setup_linkstatus(eth_dev, &new_link); + + out: +@@ -2647,69 +2669,6 @@ hns3_parse_speed(int speed_cmd, uint32_t *speed) + return 0; + } + +-static void +-hns3_set_default_dev_specifications(struct hns3_hw *hw) +-{ +- hw->max_non_tso_bd_num = HNS3_MAX_NON_TSO_BD_PER_PKT; +- hw->rss_ind_tbl_size = HNS3_RSS_IND_TBL_SIZE; +- hw->rss_key_size = HNS3_RSS_KEY_SIZE; +- hw->max_tm_rate = HNS3_ETHER_MAX_RATE; +- hw->intr.int_ql_max = HNS3_INTR_QL_NONE; +-} +- +-static void +-hns3_parse_dev_specifications(struct hns3_hw *hw, struct hns3_cmd_desc *desc) +-{ +- struct hns3_dev_specs_0_cmd *req0; +- struct hns3_dev_specs_1_cmd *req1; +- +- req0 = (struct hns3_dev_specs_0_cmd *)desc[0].data; +- req1 = (struct hns3_dev_specs_1_cmd *)desc[1].data; +- +- hw->max_non_tso_bd_num = req0->max_non_tso_bd_num; +- hw->rss_ind_tbl_size = rte_le_to_cpu_16(req0->rss_ind_tbl_size); +- hw->rss_key_size = rte_le_to_cpu_16(req0->rss_key_size); +- hw->max_tm_rate = rte_le_to_cpu_32(req0->max_tm_rate); +- hw->intr.int_ql_max = rte_le_to_cpu_16(req0->intr_ql_max); +- hw->min_tx_pkt_len = req1->min_tx_pkt_len; +-} +- +-static int +-hns3_check_dev_specifications(struct hns3_hw *hw) +-{ +- if (hw->rss_ind_tbl_size == 0 || +- hw->rss_ind_tbl_size > HNS3_RSS_IND_TBL_SIZE_MAX) { +- hns3_err(hw, "the size of hash lookup table configured (%u) exceeds the maximum(%u)", +- hw->rss_ind_tbl_size, HNS3_RSS_IND_TBL_SIZE_MAX); +- return -EINVAL; +- } +- +- return 0; +-} +- +-static int +-hns3_query_dev_specifications(struct hns3_hw *hw) +-{ +- struct hns3_cmd_desc desc[HNS3_QUERY_DEV_SPECS_BD_NUM]; +- int ret; +- int i; +- +- for (i = 0; i < HNS3_QUERY_DEV_SPECS_BD_NUM - 1; i++) { +- hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_QUERY_DEV_SPECS, +- true); +- desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); +- } +- hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_QUERY_DEV_SPECS, true); +- +- ret = hns3_cmd_send(hw, desc, HNS3_QUERY_DEV_SPECS_BD_NUM); +- if (ret) +- return ret; +- +- hns3_parse_dev_specifications(hw, desc); +- +- return hns3_check_dev_specifications(hw); +-} +- + static int + hns3_get_capability(struct hns3_hw *hw) + { +@@ -3677,7 +3636,7 @@ hns3_get_mac_ethertype_cmd_status(uint16_t cmdq_resp, uint8_t resp_code) + + if (cmdq_resp) { + PMD_INIT_LOG(ERR, +- "cmdq execute failed for get_mac_ethertype_cmd_status, status=%u.\n", ++ "cmdq execute failed for get_mac_ethertype_cmd_status, status=%u.", + cmdq_resp); + return -EIO; + } +@@ -4451,6 +4410,12 @@ hns3_init_hardware(struct hns3_adapter *hns) + goto err_mac_init; + } + ++ ret = hns3_ptp_init(hw); ++ if (ret) { ++ PMD_INIT_LOG(ERR, "Failed to init PTP, ret = %d", ret); ++ goto err_mac_init; ++ } ++ + return 0; + + err_mac_init: +@@ -4630,10 +4595,6 @@ hns3_init_pf(struct rte_eth_dev *eth_dev) + goto err_intr_callback_register; + } + +- ret = hns3_ptp_init(hw); +- if (ret) +- goto err_get_config; +- + /* Enable interrupt */ + rte_intr_enable(pci_dev->intr_handle); + hns3_pf_enable_irq0(hw); +@@ -4690,6 +4651,7 @@ err_enable_intr: + hns3_fdir_filter_uninit(hns); + err_fdir: + hns3_uninit_umv_space(hw); ++ hns3_ptp_uninit(hw); + err_init_hw: + hns3_stats_uninit(hw); + err_get_config: +@@ -4725,6 +4687,7 @@ hns3_uninit_pf(struct rte_eth_dev *eth_dev) + hns3_flow_uninit(eth_dev); + hns3_fdir_filter_uninit(hns); + hns3_uninit_umv_space(hw); ++ hns3_ptp_uninit(hw); + hns3_stats_uninit(hw); + hns3_config_mac_tnl_int(hw, false); + hns3_pf_disable_irq0(hw); +@@ -5115,8 +5078,7 @@ hns3_dev_start(struct rte_eth_dev *dev) + rte_spinlock_unlock(&hw->lock); + + hns3_rx_scattered_calc(dev); +- hns3_set_rxtx_function(dev); +- hns3_mp_req_start_rxtx(dev); ++ hns3_start_rxtx_datapath(dev); + + /* Enable interrupt of all rx queues before enabling queues */ + hns3_dev_all_rx_queue_intr_enable(hw, true); +@@ -5194,12 +5156,7 @@ hns3_dev_stop(struct rte_eth_dev *dev) + dev->data->dev_started = 0; + + hw->adapter_state = HNS3_NIC_STOPPING; +- hns3_set_rxtx_function(dev); +- rte_wmb(); +- /* Disable datapath on secondary process. */ +- hns3_mp_req_stop_rxtx(dev); +- /* Prevent crashes when queues are still in use. */ +- rte_delay_ms(hw->cfg_max_queues); ++ hns3_stop_rxtx_datapath(dev); + + rte_spinlock_lock(&hw->lock); + if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) { +@@ -5373,16 +5330,7 @@ hns3_check_fc_autoneg_valid(struct hns3_hw *hw, uint8_t autoneg) + + if (!pf->support_fc_autoneg) { + if (autoneg != 0) { +- hns3_err(hw, "unsupported fc auto-negotiation setting."); +- return -EOPNOTSUPP; +- } +- +- /* +- * Flow control auto-negotiation of the NIC is not supported, +- * but other auto-negotiation features may be supported. +- */ +- if (autoneg != hw->mac.link_autoneg) { +- hns3_err(hw, "please use 'link_speeds' in struct rte_eth_conf to disable autoneg!"); ++ hns3_err(hw, "unsupported fc auto-negotiation."); + return -EOPNOTSUPP; + } + +@@ -5662,17 +5610,6 @@ hns3_func_reset_cmd(struct hns3_hw *hw, int func_id) + return hns3_cmd_send(hw, &desc, 1); + } + +-static int +-hns3_imp_reset_cmd(struct hns3_hw *hw) +-{ +- struct hns3_cmd_desc desc; +- +- hns3_cmd_setup_basic_desc(&desc, 0xFFFE, false); +- desc.data[0] = 0xeedd; +- +- return hns3_cmd_send(hw, &desc, 1); +-} +- + static void + hns3_msix_process(struct hns3_adapter *hns, enum hns3_reset_level reset_level) + { +@@ -5690,7 +5627,9 @@ hns3_msix_process(struct hns3_adapter *hns, enum hns3_reset_level reset_level) + + switch (reset_level) { + case HNS3_IMP_RESET: +- hns3_imp_reset_cmd(hw); ++ val = hns3_read_dev(hw, HNS3_VECTOR0_OTER_EN_REG); ++ hns3_set_bit(val, HNS3_VECTOR0_TRIGGER_IMP_RESET_B, 1); ++ hns3_write_dev(hw, HNS3_VECTOR0_OTER_EN_REG, val); + hns3_warn(hw, "IMP Reset requested time=%ld.%.6ld", + tv.tv_sec, tv.tv_usec); + break; +@@ -5815,12 +5754,7 @@ hns3_stop_service(struct hns3_adapter *hns) + rte_eal_alarm_cancel(hns3_service_handler, eth_dev); + hns3_update_linkstatus_and_event(hw, false); + } +- +- hns3_set_rxtx_function(eth_dev); +- rte_wmb(); +- /* Disable datapath on secondary process. */ +- hns3_mp_req_stop_rxtx(eth_dev); +- rte_delay_ms(hw->cfg_max_queues); ++ hns3_stop_rxtx_datapath(eth_dev); + + rte_spinlock_lock(&hw->lock); + if (hns->hw.adapter_state == HNS3_NIC_STARTED || +@@ -5853,8 +5787,7 @@ hns3_start_service(struct hns3_adapter *hns) + hw->reset.level == HNS3_GLOBAL_RESET) + hns3_set_rst_done(hw); + eth_dev = &rte_eth_devices[hw->data->port_id]; +- hns3_set_rxtx_function(eth_dev); +- hns3_mp_req_start_rxtx(eth_dev); ++ hns3_start_rxtx_datapath(eth_dev); + if (hw->adapter_state == HNS3_NIC_STARTED) { + /* + * This API parent function already hold the hns3_hw.lock, the +@@ -6003,56 +5936,27 @@ hns3_reset_service(void *param) + hns3_msix_process(hns, reset_level); + } + +-static unsigned int +-hns3_get_speed_capa_num(uint16_t device_id) ++static uint32_t ++hns3_get_speed_fec_capa(struct rte_eth_fec_capa *speed_fec_capa, ++ uint32_t speed_capa) + { +- unsigned int num; +- +- switch (device_id) { +- case HNS3_DEV_ID_25GE: +- case HNS3_DEV_ID_25GE_RDMA: +- num = 2; +- break; +- case HNS3_DEV_ID_100G_RDMA_MACSEC: +- case HNS3_DEV_ID_200G_RDMA: +- num = 1; +- break; +- default: +- num = 0; +- break; +- } ++ uint32_t speed_bit; ++ uint32_t num = 0; ++ uint32_t i; + +- return num; +-} ++ for (i = 0; i < RTE_DIM(speed_fec_capa_tbl); i++) { ++ speed_bit = ++ rte_eth_speed_bitflag(speed_fec_capa_tbl[i].speed, ++ RTE_ETH_LINK_FULL_DUPLEX); ++ if ((speed_capa & speed_bit) == 0) ++ continue; + +-static int +-hns3_get_speed_fec_capa(struct rte_eth_fec_capa *speed_fec_capa, +- uint16_t device_id) +-{ +- switch (device_id) { +- case HNS3_DEV_ID_25GE: +- /* fallthrough */ +- case HNS3_DEV_ID_25GE_RDMA: +- speed_fec_capa[0].speed = speed_fec_capa_tbl[1].speed; +- speed_fec_capa[0].capa = speed_fec_capa_tbl[1].capa; +- +- /* In HNS3 device, the 25G NIC is compatible with 10G rate */ +- speed_fec_capa[1].speed = speed_fec_capa_tbl[0].speed; +- speed_fec_capa[1].capa = speed_fec_capa_tbl[0].capa; +- break; +- case HNS3_DEV_ID_100G_RDMA_MACSEC: +- speed_fec_capa[0].speed = speed_fec_capa_tbl[4].speed; +- speed_fec_capa[0].capa = speed_fec_capa_tbl[4].capa; +- break; +- case HNS3_DEV_ID_200G_RDMA: +- speed_fec_capa[0].speed = speed_fec_capa_tbl[5].speed; +- speed_fec_capa[0].capa = speed_fec_capa_tbl[5].capa; +- break; +- default: +- return -ENOTSUP; ++ speed_fec_capa[num].speed = speed_fec_capa_tbl[i].speed; ++ speed_fec_capa[num].capa = speed_fec_capa_tbl[i].capa; ++ num++; + } + +- return 0; ++ return num; + } + + static int +@@ -6061,28 +5965,28 @@ hns3_fec_get_capability(struct rte_eth_dev *dev, + unsigned int num) + { + struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); +- struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); +- uint16_t device_id = pci_dev->id.device_id; +- unsigned int capa_num; +- int ret; ++ unsigned int speed_num; ++ uint32_t speed_capa; + +- capa_num = hns3_get_speed_capa_num(device_id); +- if (capa_num == 0) { +- hns3_err(hw, "device(0x%x) is not supported by hns3 PMD", +- device_id); ++ speed_capa = hns3_get_speed_capa(hw); ++ /* speed_num counts number of speed capabilities */ ++ speed_num = __builtin_popcount(speed_capa & HNS3_SPEEDS_SUPP_FEC); ++ if (speed_num == 0) + return -ENOTSUP; +- } + +- if (speed_fec_capa == NULL || num < capa_num) +- return capa_num; ++ if (speed_fec_capa == NULL) ++ return speed_num; + +- ret = hns3_get_speed_fec_capa(speed_fec_capa, device_id); +- if (ret) +- return -ENOTSUP; ++ if (num < speed_num) { ++ hns3_err(hw, "not enough array size(%u) to store FEC capabilities, should not be less than %u", ++ num, speed_num); ++ return -EINVAL; ++ } + +- return capa_num; ++ return hns3_get_speed_fec_capa(speed_fec_capa, speed_capa); + } + ++ + static int + get_current_fec_auto_state(struct hns3_hw *hw, uint8_t *state) + { +@@ -6220,61 +6124,27 @@ hns3_set_fec_hw(struct hns3_hw *hw, uint32_t mode) + } + + static uint32_t +-get_current_speed_fec_cap(struct hns3_hw *hw, struct rte_eth_fec_capa *fec_capa) ++hns3_get_current_speed_fec_cap(struct hns3_mac *mac) + { +- struct hns3_mac *mac = &hw->mac; +- uint32_t cur_capa; ++ uint32_t i; + +- switch (mac->link_speed) { +- case RTE_ETH_SPEED_NUM_10G: +- cur_capa = fec_capa[1].capa; +- break; +- case RTE_ETH_SPEED_NUM_25G: +- case RTE_ETH_SPEED_NUM_100G: +- case RTE_ETH_SPEED_NUM_200G: +- cur_capa = fec_capa[0].capa; +- break; +- default: +- cur_capa = 0; +- break; ++ for (i = 0; i < RTE_DIM(speed_fec_capa_tbl); i++) { ++ if (mac->link_speed == speed_fec_capa_tbl[i].speed) ++ return speed_fec_capa_tbl[i].capa; + } + +- return cur_capa; +-} +- +-static bool +-is_fec_mode_one_bit_set(uint32_t mode) +-{ +- int cnt = 0; +- uint8_t i; +- +- for (i = 0; i < sizeof(mode); i++) +- if (mode >> i & 0x1) +- cnt++; +- +- return cnt == 1 ? true : false; ++ return 0; + } + + static int +-hns3_fec_set(struct rte_eth_dev *dev, uint32_t mode) ++hns3_fec_mode_valid(struct rte_eth_dev *dev, uint32_t mode) + { +-#define FEC_CAPA_NUM 2 + struct hns3_adapter *hns = dev->data->dev_private; + struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(hns); +- struct hns3_pf *pf = &hns->pf; +- struct rte_eth_fec_capa fec_capa[FEC_CAPA_NUM]; + uint32_t cur_capa; +- uint32_t num = FEC_CAPA_NUM; +- int ret; +- +- ret = hns3_fec_get_capability(dev, fec_capa, num); +- if (ret < 0) +- return ret; + +- /* HNS3 PMD only support one bit set mode, e.g. 0x1, 0x4 */ +- if (!is_fec_mode_one_bit_set(mode)) { +- hns3_err(hw, "FEC mode(0x%x) not supported in HNS3 PMD, " +- "FEC mode should be only one bit set", mode); ++ if (__builtin_popcount(mode) != 1) { ++ hns3_err(hw, "FEC mode(0x%x) should be only one bit set", mode); + return -EINVAL; + } + +@@ -6282,12 +6152,27 @@ hns3_fec_set(struct rte_eth_dev *dev, uint32_t mode) + * Check whether the configured mode is within the FEC capability. + * If not, the configured mode will not be supported. + */ +- cur_capa = get_current_speed_fec_cap(hw, fec_capa); +- if (!(cur_capa & mode)) { +- hns3_err(hw, "unsupported FEC mode = 0x%x", mode); ++ cur_capa = hns3_get_current_speed_fec_cap(&hw->mac); ++ if ((cur_capa & mode) == 0) { ++ hns3_err(hw, "unsupported FEC mode(0x%x)", mode); + return -EINVAL; + } + ++ return 0; ++} ++ ++static int ++hns3_fec_set(struct rte_eth_dev *dev, uint32_t mode) ++{ ++ struct hns3_adapter *hns = dev->data->dev_private; ++ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(hns); ++ struct hns3_pf *pf = &hns->pf; ++ int ret; ++ ++ ret = hns3_fec_mode_valid(dev, mode); ++ if (ret != 0) ++ return ret; ++ + rte_spinlock_lock(&hw->lock); + ret = hns3_set_fec_hw(hw, mode); + if (ret) { +@@ -6342,7 +6227,7 @@ hns3_optical_module_existed(struct hns3_hw *hw) + ret = hns3_cmd_send(hw, &desc, 1); + if (ret) { + hns3_err(hw, +- "fail to get optical module exist state, ret = %d.\n", ++ "fail to get optical module exist state, ret = %d.", + ret); + return false; + } +@@ -6380,7 +6265,7 @@ hns3_get_module_eeprom_data(struct hns3_hw *hw, uint32_t offset, + + ret = hns3_cmd_send(hw, desc, HNS3_SFP_INFO_CMD_NUM); + if (ret) { +- hns3_err(hw, "fail to get module EEPROM info, ret = %d.\n", ++ hns3_err(hw, "fail to get module EEPROM info, ret = %d.", + ret); + return ret; + } +@@ -6417,7 +6302,7 @@ hns3_get_module_eeprom(struct rte_eth_dev *dev, + return -ENOTSUP; + + if (!hns3_optical_module_existed(hw)) { +- hns3_err(hw, "fail to read module EEPROM: no module is connected.\n"); ++ hns3_err(hw, "fail to read module EEPROM: no module is connected."); + return -EIO; + } + +@@ -6480,7 +6365,7 @@ hns3_get_module_info(struct rte_eth_dev *dev, + modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8636_MAX_LEN; + break; + default: +- hns3_err(hw, "unknown module, type = %u, extra_type = %u.\n", ++ hns3_err(hw, "unknown module, type = %u, extra_type = %u.", + sfp_type.type, sfp_type.ext_type); + return -EINVAL; + } +diff --git a/dpdk/drivers/net/hns3/hns3_ethdev.h b/dpdk/drivers/net/hns3/hns3_ethdev.h +index 2457754b3d..58572948fe 100644 +--- a/dpdk/drivers/net/hns3/hns3_ethdev.h ++++ b/dpdk/drivers/net/hns3/hns3_ethdev.h +@@ -871,13 +871,6 @@ struct hns3_adapter { + struct hns3_ptype_table ptype_tbl __rte_cache_aligned; + }; + +-#define HNS3_DEVARG_RX_FUNC_HINT "rx_func_hint" +-#define HNS3_DEVARG_TX_FUNC_HINT "tx_func_hint" +- +-#define HNS3_DEVARG_DEV_CAPS_MASK "dev_caps_mask" +- +-#define HNS3_DEVARG_MBX_TIME_LIMIT_MS "mbx_time_limit_ms" +- + enum hns3_dev_cap { + HNS3_DEV_SUPPORT_DCB_B, + HNS3_DEV_SUPPORT_COPPER_B, +@@ -996,15 +989,6 @@ static inline uint32_t hns3_read_reg(void *base, uint32_t reg) + #define hns3_read_dev(a, reg) \ + hns3_read_reg((a)->io_base, (reg)) + +-#define NEXT_ITEM_OF_ACTION(act, actions, index) \ +- do { \ +- (act) = (actions) + (index); \ +- while ((act)->type == RTE_FLOW_ACTION_TYPE_VOID) { \ +- (index)++; \ +- (act) = (actions) + (index); \ +- } \ +- } while (0) +- + static inline uint64_t + hns3_atomic_test_bit(unsigned int nr, volatile uint64_t *addr) + { +@@ -1046,21 +1030,6 @@ void hns3vf_update_link_status(struct hns3_hw *hw, uint8_t link_status, + uint32_t link_speed, uint8_t link_duplex); + void hns3vf_update_push_lsc_cap(struct hns3_hw *hw, bool supported); + +-int hns3_restore_ptp(struct hns3_adapter *hns); +-int hns3_mbuf_dyn_rx_timestamp_register(struct rte_eth_dev *dev, +- struct rte_eth_conf *conf); +-int hns3_ptp_init(struct hns3_hw *hw); +-int hns3_timesync_enable(struct rte_eth_dev *dev); +-int hns3_timesync_disable(struct rte_eth_dev *dev); +-int hns3_timesync_read_rx_timestamp(struct rte_eth_dev *dev, +- struct timespec *timestamp, +- uint32_t flags __rte_unused); +-int hns3_timesync_read_tx_timestamp(struct rte_eth_dev *dev, +- struct timespec *timestamp); +-int hns3_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts); +-int hns3_timesync_write_time(struct rte_eth_dev *dev, +- const struct timespec *ts); +-int hns3_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta); + + static inline bool + is_reset_pending(struct hns3_adapter *hns) +diff --git a/dpdk/drivers/net/hns3/hns3_ethdev_vf.c b/dpdk/drivers/net/hns3/hns3_ethdev_vf.c +index d220522c43..5aac62a41f 100644 +--- a/dpdk/drivers/net/hns3/hns3_ethdev_vf.c ++++ b/dpdk/drivers/net/hns3/hns3_ethdev_vf.c +@@ -688,67 +688,6 @@ hns3vf_interrupt_handler(void *param) + hns3vf_enable_irq0(hw); + } + +-static void +-hns3vf_set_default_dev_specifications(struct hns3_hw *hw) +-{ +- hw->max_non_tso_bd_num = HNS3_MAX_NON_TSO_BD_PER_PKT; +- hw->rss_ind_tbl_size = HNS3_RSS_IND_TBL_SIZE; +- hw->rss_key_size = HNS3_RSS_KEY_SIZE; +- hw->intr.int_ql_max = HNS3_INTR_QL_NONE; +-} +- +-static void +-hns3vf_parse_dev_specifications(struct hns3_hw *hw, struct hns3_cmd_desc *desc) +-{ +- struct hns3_dev_specs_0_cmd *req0; +- struct hns3_dev_specs_1_cmd *req1; +- +- req0 = (struct hns3_dev_specs_0_cmd *)desc[0].data; +- req1 = (struct hns3_dev_specs_1_cmd *)desc[1].data; +- +- hw->max_non_tso_bd_num = req0->max_non_tso_bd_num; +- hw->rss_ind_tbl_size = rte_le_to_cpu_16(req0->rss_ind_tbl_size); +- hw->rss_key_size = rte_le_to_cpu_16(req0->rss_key_size); +- hw->intr.int_ql_max = rte_le_to_cpu_16(req0->intr_ql_max); +- hw->min_tx_pkt_len = req1->min_tx_pkt_len; +-} +- +-static int +-hns3vf_check_dev_specifications(struct hns3_hw *hw) +-{ +- if (hw->rss_ind_tbl_size == 0 || +- hw->rss_ind_tbl_size > HNS3_RSS_IND_TBL_SIZE_MAX) { +- hns3_warn(hw, "the size of hash lookup table configured (%u) exceeds the maximum(%u)", +- hw->rss_ind_tbl_size, HNS3_RSS_IND_TBL_SIZE_MAX); +- return -EINVAL; +- } +- +- return 0; +-} +- +-static int +-hns3vf_query_dev_specifications(struct hns3_hw *hw) +-{ +- struct hns3_cmd_desc desc[HNS3_QUERY_DEV_SPECS_BD_NUM]; +- int ret; +- int i; +- +- for (i = 0; i < HNS3_QUERY_DEV_SPECS_BD_NUM - 1; i++) { +- hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_QUERY_DEV_SPECS, +- true); +- desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); +- } +- hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_QUERY_DEV_SPECS, true); +- +- ret = hns3_cmd_send(hw, desc, HNS3_QUERY_DEV_SPECS_BD_NUM); +- if (ret) +- return ret; +- +- hns3vf_parse_dev_specifications(hw, desc); +- +- return hns3vf_check_dev_specifications(hw); +-} +- + void + hns3vf_update_push_lsc_cap(struct hns3_hw *hw, bool supported) + { +@@ -826,7 +765,7 @@ hns3vf_get_capability(struct hns3_hw *hw) + return ret; + + if (hw->revision < PCI_REVISION_ID_HIP09_A) { +- hns3vf_set_default_dev_specifications(hw); ++ hns3_set_default_dev_specifications(hw); + hw->intr.mapping_mode = HNS3_INTR_MAPPING_VEC_RSV_ONE; + hw->intr.gl_unit = HNS3_INTR_COALESCE_GL_UINT_2US; + hw->tso_mode = HNS3_TSO_SW_CAL_PSEUDO_H_CSUM; +@@ -837,7 +776,7 @@ hns3vf_get_capability(struct hns3_hw *hw) + return 0; + } + +- ret = hns3vf_query_dev_specifications(hw); ++ ret = hns3_query_dev_specifications(hw); + if (ret) { + PMD_INIT_LOG(ERR, + "failed to query dev specifications, ret = %d", +@@ -1633,12 +1572,7 @@ hns3vf_dev_stop(struct rte_eth_dev *dev) + dev->data->dev_started = 0; + + hw->adapter_state = HNS3_NIC_STOPPING; +- hns3_set_rxtx_function(dev); +- rte_wmb(); +- /* Disable datapath on secondary process. */ +- hns3_mp_req_stop_rxtx(dev); +- /* Prevent crashes when queues are still in use. */ +- rte_delay_ms(hw->cfg_max_queues); ++ hns3_stop_rxtx_datapath(dev); + + rte_spinlock_lock(&hw->lock); + if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) { +@@ -1740,8 +1674,10 @@ hns3vf_do_start(struct hns3_adapter *hns, bool reset_queue) + hns3_enable_rxd_adv_layout(hw); + + ret = hns3_init_queues(hns, reset_queue); +- if (ret) ++ if (ret) { + hns3_err(hw, "failed to init queues, ret = %d.", ret); ++ return ret; ++ } + + return hns3_restore_filter(hns); + } +@@ -1792,8 +1728,7 @@ hns3vf_dev_start(struct rte_eth_dev *dev) + rte_spinlock_unlock(&hw->lock); + + hns3_rx_scattered_calc(dev); +- hns3_set_rxtx_function(dev); +- hns3_mp_req_start_rxtx(dev); ++ hns3_start_rxtx_datapath(dev); + + /* Enable interrupt of all rx queues before enabling queues */ + hns3_dev_all_rx_queue_intr_enable(hw, true); +@@ -1963,11 +1898,7 @@ hns3vf_stop_service(struct hns3_adapter *hns) + } + hw->mac.link_status = RTE_ETH_LINK_DOWN; + +- hns3_set_rxtx_function(eth_dev); +- rte_wmb(); +- /* Disable datapath on secondary process. */ +- hns3_mp_req_stop_rxtx(eth_dev); +- rte_delay_ms(hw->cfg_max_queues); ++ hns3_stop_rxtx_datapath(eth_dev); + + rte_spinlock_lock(&hw->lock); + if (hw->adapter_state == HNS3_NIC_STARTED || +@@ -1999,8 +1930,7 @@ hns3vf_start_service(struct hns3_adapter *hns) + struct rte_eth_dev *eth_dev; + + eth_dev = &rte_eth_devices[hw->data->port_id]; +- hns3_set_rxtx_function(eth_dev); +- hns3_mp_req_start_rxtx(eth_dev); ++ hns3_start_rxtx_datapath(eth_dev); + + rte_eal_alarm_set(HNS3VF_KEEP_ALIVE_INTERVAL, hns3vf_keep_alive_handler, + eth_dev); +diff --git a/dpdk/drivers/net/hns3/hns3_flow.c b/dpdk/drivers/net/hns3/hns3_flow.c +index a2c1589c39..d5c9c22633 100644 +--- a/dpdk/drivers/net/hns3/hns3_flow.c ++++ b/dpdk/drivers/net/hns3/hns3_flow.c +@@ -10,6 +10,125 @@ + #include "hns3_logs.h" + #include "hns3_flow.h" + ++#define NEXT_ITEM_OF_ACTION(act, actions, index) \ ++ do { \ ++ (act) = (actions) + (index); \ ++ while ((act)->type == RTE_FLOW_ACTION_TYPE_VOID) { \ ++ (index)++; \ ++ (act) = (actions) + (index); \ ++ } \ ++ } while (0) ++ ++#define NEXT_ITEM_OF_PATTERN(item, pattern, index) \ ++ do { \ ++ (item) = (pattern) + (index); \ ++ while ((item)->type == RTE_FLOW_ITEM_TYPE_VOID) { \ ++ (index)++; \ ++ (item) = (pattern) + (index); \ ++ } \ ++ } while (0) ++ ++#define HNS3_HASH_HDR_ETH RTE_BIT64(0) ++#define HNS3_HASH_HDR_IPV4 RTE_BIT64(1) ++#define HNS3_HASH_HDR_IPV6 RTE_BIT64(2) ++#define HNS3_HASH_HDR_TCP RTE_BIT64(3) ++#define HNS3_HASH_HDR_UDP RTE_BIT64(4) ++#define HNS3_HASH_HDR_SCTP RTE_BIT64(5) ++ ++#define HNS3_HASH_VOID_NEXT_ALLOW BIT_ULL(RTE_FLOW_ITEM_TYPE_ETH) ++ ++#define HNS3_HASH_ETH_NEXT_ALLOW (BIT_ULL(RTE_FLOW_ITEM_TYPE_IPV4) | \ ++ BIT_ULL(RTE_FLOW_ITEM_TYPE_IPV6)) ++ ++#define HNS3_HASH_IP_NEXT_ALLOW (BIT_ULL(RTE_FLOW_ITEM_TYPE_TCP) | \ ++ BIT_ULL(RTE_FLOW_ITEM_TYPE_UDP) | \ ++ BIT_ULL(RTE_FLOW_ITEM_TYPE_SCTP)) ++ ++static const uint64_t hash_pattern_next_allow_items[] = { ++ [RTE_FLOW_ITEM_TYPE_VOID] = HNS3_HASH_VOID_NEXT_ALLOW, ++ [RTE_FLOW_ITEM_TYPE_ETH] = HNS3_HASH_ETH_NEXT_ALLOW, ++ [RTE_FLOW_ITEM_TYPE_IPV4] = HNS3_HASH_IP_NEXT_ALLOW, ++ [RTE_FLOW_ITEM_TYPE_IPV6] = HNS3_HASH_IP_NEXT_ALLOW, ++}; ++ ++static const uint64_t hash_pattern_item_header[] = { ++ [RTE_FLOW_ITEM_TYPE_ETH] = HNS3_HASH_HDR_ETH, ++ [RTE_FLOW_ITEM_TYPE_IPV4] = HNS3_HASH_HDR_IPV4, ++ [RTE_FLOW_ITEM_TYPE_IPV6] = HNS3_HASH_HDR_IPV6, ++ [RTE_FLOW_ITEM_TYPE_TCP] = HNS3_HASH_HDR_TCP, ++ [RTE_FLOW_ITEM_TYPE_UDP] = HNS3_HASH_HDR_UDP, ++ [RTE_FLOW_ITEM_TYPE_SCTP] = HNS3_HASH_HDR_SCTP, ++}; ++ ++#define HNS3_HASH_IPV4 (HNS3_HASH_HDR_ETH | HNS3_HASH_HDR_IPV4) ++#define HNS3_HASH_IPV4_TCP (HNS3_HASH_HDR_ETH | \ ++ HNS3_HASH_HDR_IPV4 | \ ++ HNS3_HASH_HDR_TCP) ++#define HNS3_HASH_IPV4_UDP (HNS3_HASH_HDR_ETH | \ ++ HNS3_HASH_HDR_IPV4 | \ ++ HNS3_HASH_HDR_UDP) ++#define HNS3_HASH_IPV4_SCTP (HNS3_HASH_HDR_ETH | \ ++ HNS3_HASH_HDR_IPV4 | \ ++ HNS3_HASH_HDR_SCTP) ++#define HNS3_HASH_IPV6 (HNS3_HASH_HDR_ETH | HNS3_HASH_HDR_IPV6) ++#define HNS3_HASH_IPV6_TCP (HNS3_HASH_HDR_ETH | \ ++ HNS3_HASH_HDR_IPV6 | \ ++ HNS3_HASH_HDR_TCP) ++#define HNS3_HASH_IPV6_UDP (HNS3_HASH_HDR_ETH | \ ++ HNS3_HASH_HDR_IPV6 | \ ++ HNS3_HASH_HDR_UDP) ++#define HNS3_HASH_IPV6_SCTP (HNS3_HASH_HDR_ETH | \ ++ HNS3_HASH_HDR_IPV6 | \ ++ HNS3_HASH_HDR_SCTP) ++ ++static const struct hns3_hash_map_info { ++ /* flow type specified, zero means action works for all flow types. */ ++ uint64_t pattern_type; ++ uint64_t rss_pctype; /* packet type with prefix RTE_ETH_RSS_xxx */ ++ uint64_t l3l4_types; /* Supported L3/L4 RSS types for this packet type */ ++ uint64_t hw_pctype; /* packet type in driver */ ++ uint64_t tuple_mask; /* full tuples of the hw_pctype */ ++} hash_map_table[] = { ++ /* IPV4 */ ++ { HNS3_HASH_IPV4, ++ RTE_ETH_RSS_IPV4, HNS3_RSS_SUPPORT_L3_SRC_DST, ++ HNS3_RSS_PCTYPE_IPV4_NONF, HNS3_RSS_TUPLE_IPV4_NONF_M }, ++ { HNS3_HASH_IPV4, ++ RTE_ETH_RSS_NONFRAG_IPV4_OTHER, HNS3_RSS_SUPPORT_L3_SRC_DST, ++ HNS3_RSS_PCTYPE_IPV4_NONF, HNS3_RSS_TUPLE_IPV4_NONF_M }, ++ { HNS3_HASH_IPV4, ++ RTE_ETH_RSS_FRAG_IPV4, HNS3_RSS_SUPPORT_L3_SRC_DST, ++ HNS3_RSS_PCTYPE_IPV4_FLAG, HNS3_RSS_TUPLE_IPV4_FLAG_M }, ++ { HNS3_HASH_IPV4_TCP, ++ RTE_ETH_RSS_NONFRAG_IPV4_TCP, HNS3_RSS_SUPPORT_L3L4, ++ HNS3_RSS_PCTYPE_IPV4_TCP, HNS3_RSS_TUPLE_IPV4_TCP_M }, ++ { HNS3_HASH_IPV4_UDP, ++ RTE_ETH_RSS_NONFRAG_IPV4_UDP, HNS3_RSS_SUPPORT_L3L4, ++ HNS3_RSS_PCTYPE_IPV4_UDP, HNS3_RSS_TUPLE_IPV4_UDP_M }, ++ { HNS3_HASH_IPV4_SCTP, ++ RTE_ETH_RSS_NONFRAG_IPV4_SCTP, HNS3_RSS_SUPPORT_L3L4, ++ HNS3_RSS_PCTYPE_IPV4_SCTP, HNS3_RSS_TUPLE_IPV4_SCTP_M }, ++ /* IPV6 */ ++ { HNS3_HASH_IPV6, ++ RTE_ETH_RSS_IPV6, HNS3_RSS_SUPPORT_L3_SRC_DST, ++ HNS3_RSS_PCTYPE_IPV6_NONF, HNS3_RSS_TUPLE_IPV6_NONF_M }, ++ { HNS3_HASH_IPV6, ++ RTE_ETH_RSS_NONFRAG_IPV6_OTHER, HNS3_RSS_SUPPORT_L3_SRC_DST, ++ HNS3_RSS_PCTYPE_IPV6_NONF, HNS3_RSS_TUPLE_IPV6_NONF_M }, ++ { HNS3_HASH_IPV6, ++ RTE_ETH_RSS_FRAG_IPV6, HNS3_RSS_SUPPORT_L3_SRC_DST, ++ HNS3_RSS_PCTYPE_IPV6_FLAG, HNS3_RSS_TUPLE_IPV6_FLAG_M }, ++ { HNS3_HASH_IPV6_TCP, ++ RTE_ETH_RSS_NONFRAG_IPV6_TCP, HNS3_RSS_SUPPORT_L3L4, ++ HNS3_RSS_PCTYPE_IPV6_TCP, HNS3_RSS_TUPLE_IPV6_TCP_M }, ++ { HNS3_HASH_IPV6_UDP, ++ RTE_ETH_RSS_NONFRAG_IPV6_UDP, HNS3_RSS_SUPPORT_L3L4, ++ HNS3_RSS_PCTYPE_IPV6_UDP, HNS3_RSS_TUPLE_IPV6_UDP_M }, ++ { HNS3_HASH_IPV6_SCTP, ++ RTE_ETH_RSS_NONFRAG_IPV6_SCTP, HNS3_RSS_SUPPORT_L3L4, ++ HNS3_RSS_PCTYPE_IPV6_SCTP, HNS3_RSS_TUPLE_IPV6_SCTP_M }, ++}; ++ + static const uint8_t full_mask[VNI_OR_TNI_LEN] = { 0xFF, 0xFF, 0xFF }; + static const uint8_t zero_mask[VNI_OR_TNI_LEN] = { 0x00, 0x00, 0x00 }; + +@@ -79,7 +198,7 @@ net_addr_to_host(uint32_t *dst, const rte_be32_t *src, size_t len) + } + + /* +- * This function is used to find rss general action. ++ * This function is used to parse filter type. + * 1. As we know RSS is used to spread packets among several queues, the flow + * API provide the struct rte_flow_action_rss, user could config its field + * sush as: func/level/types/key/queue to control RSS function. +@@ -87,16 +206,18 @@ net_addr_to_host(uint32_t *dst, const rte_be32_t *src, size_t len) + * implemented by FDIR + RSS in hns3 hardware, user can create one FDIR rule + * which action is RSS queues region. + * 3. When action is RSS, we use the following rule to distinguish: +- * Case 1: pattern have ETH and action's queue_num > 0, indicate it is queue +- * region configuration. ++ * Case 1: pattern has ETH and all fields in RSS action except 'queues' are ++ * zero or default, indicate it is queue region configuration. + * Case other: an rss general action. + */ +-static const struct rte_flow_action * +-hns3_find_rss_general_action(const struct rte_flow_item pattern[], +- const struct rte_flow_action actions[]) ++static void ++hns3_parse_filter_type(const struct rte_flow_item pattern[], ++ const struct rte_flow_action actions[], ++ struct hns3_filter_info *filter_info) + { ++ const struct rte_flow_action_rss *rss_act; + const struct rte_flow_action *act = NULL; +- const struct hns3_rss_conf *rss; ++ bool only_has_queues = false; + bool have_eth = false; + + for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { +@@ -105,8 +226,10 @@ hns3_find_rss_general_action(const struct rte_flow_item pattern[], + break; + } + } +- if (!act) +- return NULL; ++ if (act == NULL) { ++ filter_info->type = RTE_ETH_FILTER_FDIR; ++ return; ++ } + + for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) { + if (pattern->type == RTE_FLOW_ITEM_TYPE_ETH) { +@@ -115,19 +238,21 @@ hns3_find_rss_general_action(const struct rte_flow_item pattern[], + } + } + +- rss = act->conf; +- if (have_eth && rss->conf.queue_num) { ++ rss_act = act->conf; ++ only_has_queues = (rss_act->queue_num > 0) && ++ (rss_act->func == RTE_ETH_HASH_FUNCTION_DEFAULT && ++ rss_act->types == 0 && rss_act->key_len == 0); ++ if (have_eth && only_has_queues) { + /* +- * Pattern have ETH and action's queue_num > 0, indicate this is +- * queue region configuration. +- * Because queue region is implemented by FDIR + RSS in hns3 +- * hardware, it needs to enter FDIR process, so here return NULL +- * to avoid enter RSS process. ++ * Pattern has ETH and all fields in RSS action except 'queues' ++ * are zero or default, which indicates this is queue region ++ * configuration. + */ +- return NULL; ++ filter_info->type = RTE_ETH_FILTER_FDIR; ++ return; + } + +- return act; ++ filter_info->type = RTE_ETH_FILTER_HASH; + } + + static inline struct hns3_flow_counter * +@@ -1246,7 +1371,6 @@ hns3_filterlist_flush(struct rte_eth_dev *dev) + { + struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct hns3_fdir_rule_ele *fdir_rule_ptr; +- struct hns3_rss_conf_ele *rss_filter_ptr; + struct hns3_flow_mem *flow_node; + + fdir_rule_ptr = TAILQ_FIRST(&hw->flow_fdir_list); +@@ -1256,13 +1380,6 @@ hns3_filterlist_flush(struct rte_eth_dev *dev) + fdir_rule_ptr = TAILQ_FIRST(&hw->flow_fdir_list); + } + +- rss_filter_ptr = TAILQ_FIRST(&hw->flow_rss_list); +- while (rss_filter_ptr) { +- TAILQ_REMOVE(&hw->flow_rss_list, rss_filter_ptr, entries); +- rte_free(rss_filter_ptr); +- rss_filter_ptr = TAILQ_FIRST(&hw->flow_rss_list); +- } +- + flow_node = TAILQ_FIRST(&hw->flow_list); + while (flow_node) { + TAILQ_REMOVE(&hw->flow_list, flow_node, entries); +@@ -1273,238 +1390,478 @@ hns3_filterlist_flush(struct rte_eth_dev *dev) + } + + static bool +-hns3_action_rss_same(const struct rte_flow_action_rss *comp, +- const struct rte_flow_action_rss *with) ++hns3_flow_rule_key_same(const struct rte_flow_action_rss *comp, ++ const struct rte_flow_action_rss *with) + { +- bool rss_key_is_same; +- bool func_is_same; ++ if (comp->key_len != with->key_len) ++ return false; + +- /* +- * When user flush all RSS rule, RSS func is set invalid with +- * RTE_ETH_HASH_FUNCTION_MAX. Then the user create a flow after +- * flushed, any validate RSS func is different with it before +- * flushed. Others, when user create an action RSS with RSS func +- * specified RTE_ETH_HASH_FUNCTION_DEFAULT, the func is the same +- * between continuous RSS flow. +- */ +- if (comp->func == RTE_ETH_HASH_FUNCTION_MAX) +- func_is_same = false; +- else +- func_is_same = (with->func != RTE_ETH_HASH_FUNCTION_DEFAULT) ? +- (comp->func == with->func) : true; ++ if (with->key_len == 0) ++ return true; + +- if (with->key_len == 0 || with->key == NULL) +- rss_key_is_same = 1; +- else +- rss_key_is_same = comp->key_len == with->key_len && +- !memcmp(comp->key, with->key, with->key_len); ++ if (comp->key == NULL && with->key == NULL) ++ return true; + +- return (func_is_same && rss_key_is_same && +- comp->types == (with->types & HNS3_ETH_RSS_SUPPORT) && +- comp->level == with->level && +- comp->queue_num == with->queue_num && +- !memcmp(comp->queue, with->queue, +- sizeof(*with->queue) * with->queue_num)); ++ if (!(comp->key != NULL && with->key != NULL)) ++ return false; ++ ++ return !memcmp(comp->key, with->key, with->key_len); + } + +-static int +-hns3_rss_conf_copy(struct hns3_rss_conf *out, +- const struct rte_flow_action_rss *in) ++static bool ++hns3_flow_rule_queues_same(const struct rte_flow_action_rss *comp, ++ const struct rte_flow_action_rss *with) + { +- if (in->key_len > RTE_DIM(out->key) || +- in->queue_num > RTE_DIM(out->queue)) +- return -EINVAL; +- if (in->key == NULL && in->key_len) +- return -EINVAL; +- out->conf = (struct rte_flow_action_rss) { +- .func = in->func, +- .level = in->level, +- .types = in->types, +- .key_len = in->key_len, +- .queue_num = in->queue_num, +- }; +- out->conf.queue = memcpy(out->queue, in->queue, +- sizeof(*in->queue) * in->queue_num); +- if (in->key) +- out->conf.key = memcpy(out->key, in->key, in->key_len); ++ if (comp->queue_num != with->queue_num) ++ return false; + +- return 0; ++ if (with->queue_num == 0) ++ return true; ++ ++ if (comp->queue == NULL && with->queue == NULL) ++ return true; ++ ++ if (!(comp->queue != NULL && with->queue != NULL)) ++ return false; ++ ++ return !memcmp(comp->queue, with->queue, with->queue_num); + } + + static bool +-hns3_rss_input_tuple_supported(struct hns3_hw *hw, +- const struct rte_flow_action_rss *rss) ++hns3_action_rss_same(const struct rte_flow_action_rss *comp, ++ const struct rte_flow_action_rss *with) ++{ ++ bool same_level; ++ bool same_types; ++ bool same_func; ++ ++ same_level = (comp->level == with->level); ++ same_types = (comp->types == with->types); ++ same_func = (comp->func == with->func); ++ ++ return same_level && same_types && same_func && ++ hns3_flow_rule_key_same(comp, with) && ++ hns3_flow_rule_queues_same(comp, with); ++} ++ ++static bool ++hns3_valid_ipv6_sctp_rss_types(struct hns3_hw *hw, uint64_t types) + { + /* +- * For IP packet, it is not supported to use src/dst port fields to RSS +- * hash for the following packet types. +- * - IPV4 FRAG | IPV4 NONFRAG | IPV6 FRAG | IPV6 NONFRAG +- * Besides, for Kunpeng920, the NIC HW is not supported to use src/dst +- * port fields to RSS hash for IPV6 SCTP packet type. However, the +- * Kunpeng930 and future kunpeng series support to use src/dst port +- * fields to RSS hash for IPv6 SCTP packet type. ++ * Some hardware don't support to use src/dst port fields to hash ++ * for IPV6 SCTP packet type. + */ +- if (rss->types & (RTE_ETH_RSS_L4_DST_ONLY | RTE_ETH_RSS_L4_SRC_ONLY) && +- (rss->types & RTE_ETH_RSS_IP || +- (!hw->rss_info.ipv6_sctp_offload_supported && +- rss->types & RTE_ETH_RSS_NONFRAG_IPV6_SCTP))) ++ if (types & RTE_ETH_RSS_NONFRAG_IPV6_SCTP && ++ types & HNS3_RSS_SUPPORT_L4_SRC_DST && ++ !hw->rss_info.ipv6_sctp_offload_supported) + return false; + + return true; + } + +-/* +- * This function is used to parse rss action validation. +- */ + static int +-hns3_parse_rss_filter(struct rte_eth_dev *dev, +- const struct rte_flow_action *actions, +- struct rte_flow_error *error) ++hns3_flow_parse_hash_func(const struct rte_flow_action_rss *rss_act, ++ struct hns3_flow_rss_conf *rss_conf, ++ struct rte_flow_error *error) + { +- struct hns3_adapter *hns = dev->data->dev_private; +- struct hns3_hw *hw = &hns->hw; +- struct hns3_rss_conf *rss_conf = &hw->rss_info; +- const struct rte_flow_action_rss *rss; +- const struct rte_flow_action *act; +- uint32_t act_index = 0; +- uint16_t n; ++ if (rss_act->func >= RTE_ETH_HASH_FUNCTION_MAX) ++ return rte_flow_error_set(error, ENOTSUP, ++ RTE_FLOW_ERROR_TYPE_ACTION_CONF, ++ NULL, "RSS hash func are not supported"); + +- NEXT_ITEM_OF_ACTION(act, actions, act_index); +- rss = act->conf; ++ rss_conf->conf.func = rss_act->func; ++ return 0; ++} + +- if (rss == NULL) { ++static int ++hns3_flow_parse_hash_key(struct hns3_hw *hw, ++ const struct rte_flow_action_rss *rss_act, ++ struct hns3_flow_rss_conf *rss_conf, ++ struct rte_flow_error *error) ++{ ++ if (rss_act->key_len != hw->rss_key_size) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, +- act, "no valid queues"); +- } ++ NULL, "invalid RSS key length"); ++ ++ if (rss_act->key != NULL) ++ memcpy(rss_conf->key, rss_act->key, rss_act->key_len); ++ else ++ memcpy(rss_conf->key, hns3_hash_key, ++ RTE_MIN(sizeof(hns3_hash_key), rss_act->key_len)); ++ /* Need to record if user sets hash key. */ ++ rss_conf->conf.key = rss_act->key; ++ rss_conf->conf.key_len = rss_act->key_len; ++ ++ return 0; ++} ++ ++static int ++hns3_flow_parse_queues(struct hns3_hw *hw, ++ const struct rte_flow_action_rss *rss_act, ++ struct hns3_flow_rss_conf *rss_conf, ++ struct rte_flow_error *error) ++{ ++ uint16_t i; + +- if (rss->queue_num > RTE_DIM(rss_conf->queue)) ++ if (rss_act->queue_num > hw->rss_ind_tbl_size) + return rte_flow_error_set(error, ENOTSUP, +- RTE_FLOW_ERROR_TYPE_ACTION_CONF, act, +- "queue number configured exceeds " +- "queue buffer size driver supported"); ++ RTE_FLOW_ERROR_TYPE_ACTION_CONF, ++ NULL, ++ "queue number can not exceed RSS indirection table."); + +- for (n = 0; n < rss->queue_num; n++) { +- if (rss->queue[n] < hw->alloc_rss_size) +- continue; +- return rte_flow_error_set(error, EINVAL, +- RTE_FLOW_ERROR_TYPE_ACTION_CONF, act, +- "queue id must be less than queue number allocated to a TC"); ++ if (rss_act->queue_num > HNS3_RSS_QUEUES_BUFFER_NUM) ++ return rte_flow_error_set(error, ENOTSUP, ++ RTE_FLOW_ERROR_TYPE_ACTION_CONF, ++ NULL, ++ "queue number configured exceeds queue buffer size driver supported"); ++ ++ for (i = 0; i < rss_act->queue_num; i++) { ++ if (rss_act->queue[i] >= hw->alloc_rss_size) ++ return rte_flow_error_set(error, EINVAL, ++ RTE_FLOW_ERROR_TYPE_ACTION_CONF, ++ NULL, ++ "queue id must be less than queue number allocated to a TC"); + } + +- if (!(rss->types & HNS3_ETH_RSS_SUPPORT) && rss->types) ++ memcpy(rss_conf->queue, rss_act->queue, ++ rss_act->queue_num * sizeof(rss_conf->queue[0])); ++ rss_conf->conf.queue = rss_conf->queue; ++ rss_conf->conf.queue_num = rss_act->queue_num; ++ ++ return 0; ++} ++ ++static int ++hns3_flow_get_hw_pctype(struct hns3_hw *hw, ++ const struct rte_flow_action_rss *rss_act, ++ const struct hns3_hash_map_info *map, ++ struct hns3_flow_rss_conf *rss_conf, ++ struct rte_flow_error *error) ++{ ++ uint64_t l3l4_src_dst, l3l4_refine, left_types; ++ ++ if (rss_act->types == 0) { ++ /* Disable RSS hash of this packet type if types is zero. */ ++ rss_conf->hw_pctypes |= map->hw_pctype; ++ return 0; ++ } ++ ++ /* ++ * Can not have extra types except rss_pctype and l3l4_type in this map. ++ */ ++ left_types = ~map->rss_pctype & rss_act->types; ++ if (left_types & ~map->l3l4_types) + return rte_flow_error_set(error, EINVAL, +- RTE_FLOW_ERROR_TYPE_ACTION_CONF, +- act, +- "Flow types is unsupported by " +- "hns3's RSS"); +- if (rss->func >= RTE_ETH_HASH_FUNCTION_MAX) +- return rte_flow_error_set(error, ENOTSUP, +- RTE_FLOW_ERROR_TYPE_ACTION_CONF, act, +- "RSS hash func are not supported"); +- if (rss->level) ++ RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, ++ "cannot set extra types."); ++ ++ l3l4_src_dst = left_types; ++ /* L3/L4 SRC and DST shouldn't be specified at the same time. */ ++ l3l4_refine = rte_eth_rss_hf_refine(l3l4_src_dst); ++ if (l3l4_refine != l3l4_src_dst) + return rte_flow_error_set(error, ENOTSUP, +- RTE_FLOW_ERROR_TYPE_ACTION_CONF, act, +- "a nonzero RSS encapsulation level is not supported"); +- if (rss->key_len && rss->key_len != RTE_DIM(rss_conf->key)) ++ RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, ++ "cannot specify L3_SRC/DST_ONLY or L4_SRC/DST_ONLY at the same."); ++ ++ if (!hns3_valid_ipv6_sctp_rss_types(hw, rss_act->types)) + return rte_flow_error_set(error, ENOTSUP, +- RTE_FLOW_ERROR_TYPE_ACTION_CONF, act, +- "RSS hash key must be exactly 40 bytes"); ++ RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, ++ "hardware doesn't support to use L4 src/dst to hash for IPV6-SCTP."); + +- if (!hns3_rss_input_tuple_supported(hw, rss)) +- return rte_flow_error_set(error, EINVAL, ++ rss_conf->hw_pctypes |= map->hw_pctype; ++ ++ return 0; ++} ++ ++static int ++hns3_flow_parse_rss_types_by_ptype(struct hns3_hw *hw, ++ const struct rte_flow_action_rss *rss_act, ++ uint64_t pattern_type, ++ struct hns3_flow_rss_conf *rss_conf, ++ struct rte_flow_error *error) ++{ ++ const struct hns3_hash_map_info *map; ++ bool matched = false; ++ uint16_t i; ++ int ret; ++ ++ for (i = 0; i < RTE_DIM(hash_map_table); i++) { ++ map = &hash_map_table[i]; ++ if (map->pattern_type != pattern_type) { ++ /* ++ * If the target pattern type is already matched with ++ * the one before this pattern in the hash map table, ++ * no need to continue walk. ++ */ ++ if (matched) ++ break; ++ continue; ++ } ++ matched = true; ++ ++ /* ++ * If pattern type is matched and the 'types' is zero, all packet flow ++ * types related to this pattern type disable RSS hash. ++ * Otherwise, RSS types must match the pattern type and cannot have no ++ * extra or unsupported types. ++ */ ++ if (rss_act->types != 0 && !(map->rss_pctype & rss_act->types)) ++ continue; ++ ++ ret = hns3_flow_get_hw_pctype(hw, rss_act, map, rss_conf, error); ++ if (ret != 0) ++ return ret; ++ } ++ ++ if (rss_conf->hw_pctypes != 0) ++ return 0; ++ ++ if (matched) ++ return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, +- &rss->types, +- "input RSS types are not supported"); ++ NULL, "RSS types are unsupported"); + +- act_index++; ++ return rte_flow_error_set(error, ENOTSUP, ++ RTE_FLOW_ERROR_TYPE_ACTION_CONF, ++ NULL, "Pattern specified is unsupported"); ++} + +- /* Check if the next not void action is END */ +- NEXT_ITEM_OF_ACTION(act, actions, act_index); +- if (act->type != RTE_FLOW_ACTION_TYPE_END) { +- memset(rss_conf, 0, sizeof(struct hns3_rss_conf)); +- return rte_flow_error_set(error, EINVAL, +- RTE_FLOW_ERROR_TYPE_ACTION, +- act, "Not supported action."); ++static uint64_t ++hns3_flow_get_all_hw_pctypes(uint64_t types) ++{ ++ uint64_t hw_pctypes = 0; ++ uint16_t i; ++ ++ for (i = 0; i < RTE_DIM(hash_map_table); i++) { ++ if (types & hash_map_table[i].rss_pctype) ++ hw_pctypes |= hash_map_table[i].hw_pctype; + } + +- return 0; ++ return hw_pctypes; ++} ++ ++static int ++hns3_flow_parse_rss_types(struct hns3_hw *hw, ++ const struct rte_flow_action_rss *rss_act, ++ uint64_t pattern_type, ++ struct hns3_flow_rss_conf *rss_conf, ++ struct rte_flow_error *error) ++{ ++ rss_conf->conf.types = rss_act->types; ++ ++ /* no pattern specified to set global RSS types. */ ++ if (pattern_type == 0) { ++ if (!hns3_check_rss_types_valid(hw, rss_act->types)) ++ return rte_flow_error_set(error, EINVAL, ++ RTE_FLOW_ERROR_TYPE_ACTION_CONF, ++ NULL, "RSS types is invalid."); ++ rss_conf->hw_pctypes = ++ hns3_flow_get_all_hw_pctypes(rss_act->types); ++ return 0; ++ } ++ ++ return hns3_flow_parse_rss_types_by_ptype(hw, rss_act, pattern_type, ++ rss_conf, error); + } + + static int +-hns3_disable_rss(struct hns3_hw *hw) ++hns3_flow_parse_hash_global_conf(struct rte_eth_dev *dev, ++ const struct rte_flow_action_rss *rss_act, ++ struct hns3_flow_rss_conf *rss_conf, ++ struct rte_flow_error *error) + { ++ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int ret; + +- ret = hns3_set_rss_tuple_by_rss_hf(hw, 0); +- if (ret) ++ ret = hns3_flow_parse_hash_func(rss_act, rss_conf, error); ++ if (ret != 0) + return ret; + +- return 0; ++ if (rss_act->queue_num > 0) { ++ ret = hns3_flow_parse_queues(hw, rss_act, rss_conf, error); ++ if (ret != 0) ++ return ret; ++ } ++ ++ if (rss_act->key_len > 0) { ++ ret = hns3_flow_parse_hash_key(hw, rss_act, rss_conf, error); ++ if (ret != 0) ++ return ret; ++ } ++ ++ return hns3_flow_parse_rss_types(hw, rss_act, rss_conf->pattern_type, ++ rss_conf, error); + } + +-static void +-hns3_adjust_rss_key(struct hns3_hw *hw, struct rte_flow_action_rss *rss_conf) ++static int ++hns3_flow_parse_pattern_type(const struct rte_flow_item pattern[], ++ uint64_t *ptype, struct rte_flow_error *error) + { +- if (rss_conf->key == NULL || rss_conf->key_len < HNS3_RSS_KEY_SIZE) { +- hns3_warn(hw, "Default RSS hash key to be set"); +- rss_conf->key = hns3_hash_key; +- rss_conf->key_len = HNS3_RSS_KEY_SIZE; ++ enum rte_flow_item_type pre_type = RTE_FLOW_ITEM_TYPE_VOID; ++ const char *message = "Pattern specified isn't supported"; ++ uint64_t item_hdr, pattern_hdrs = 0; ++ enum rte_flow_item_type cur_type; ++ ++ for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) { ++ if (pattern->type == RTE_FLOW_ITEM_TYPE_VOID) ++ continue; ++ if (pattern->mask || pattern->spec || pattern->last) { ++ message = "Header info shouldn't be specified"; ++ goto unsup; ++ } ++ ++ /* Check the sub-item allowed by the previous item . */ ++ if (pre_type >= RTE_DIM(hash_pattern_next_allow_items) || ++ !(hash_pattern_next_allow_items[pre_type] & ++ BIT_ULL(pattern->type))) ++ goto unsup; ++ ++ cur_type = pattern->type; ++ /* Unsupported for current type being greater than array size. */ ++ if (cur_type >= RTE_DIM(hash_pattern_item_header)) ++ goto unsup; ++ ++ /* The value is zero, which means unsupported current header. */ ++ item_hdr = hash_pattern_item_header[cur_type]; ++ if (item_hdr == 0) ++ goto unsup; ++ ++ /* Have duplicate pattern header. */ ++ if (item_hdr & pattern_hdrs) ++ goto unsup; ++ pre_type = cur_type; ++ pattern_hdrs |= item_hdr; + } ++ ++ if (pattern_hdrs != 0) { ++ *ptype = pattern_hdrs; ++ return 0; ++ } ++ ++unsup: ++ return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, ++ pattern, message); + } + + static int +-hns3_parse_rss_algorithm(struct hns3_hw *hw, enum rte_eth_hash_function *func, +- uint8_t *hash_algo) +-{ +- enum rte_eth_hash_function algo_func = *func; +- switch (algo_func) { +- case RTE_ETH_HASH_FUNCTION_DEFAULT: +- /* Keep *hash_algo as what it used to be */ +- algo_func = hw->rss_info.conf.func; +- break; +- case RTE_ETH_HASH_FUNCTION_TOEPLITZ: +- *hash_algo = HNS3_RSS_HASH_ALGO_TOEPLITZ; +- break; +- case RTE_ETH_HASH_FUNCTION_SIMPLE_XOR: +- *hash_algo = HNS3_RSS_HASH_ALGO_SIMPLE; +- break; +- case RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ: +- *hash_algo = HNS3_RSS_HASH_ALGO_SYMMETRIC_TOEP; +- break; +- default: +- hns3_err(hw, "Invalid RSS algorithm configuration(%d)", +- algo_func); +- return -EINVAL; ++hns3_flow_parse_pattern_act(struct rte_eth_dev *dev, ++ const struct rte_flow_item pattern[], ++ const struct rte_flow_action_rss *rss_act, ++ struct hns3_flow_rss_conf *rss_conf, ++ struct rte_flow_error *error) ++{ ++ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); ++ int ret; ++ ++ ret = hns3_flow_parse_hash_func(rss_act, rss_conf, error); ++ if (ret != 0) ++ return ret; ++ ++ if (rss_act->key_len > 0) { ++ ret = hns3_flow_parse_hash_key(hw, rss_act, rss_conf, error); ++ if (ret != 0) ++ return ret; + } +- *func = algo_func; ++ ++ if (rss_act->queue_num > 0) { ++ ret = hns3_flow_parse_queues(hw, rss_act, rss_conf, error); ++ if (ret != 0) ++ return ret; ++ } ++ ++ ret = hns3_flow_parse_pattern_type(pattern, &rss_conf->pattern_type, ++ error); ++ if (ret != 0) ++ return ret; ++ ++ ret = hns3_flow_parse_rss_types(hw, rss_act, rss_conf->pattern_type, ++ rss_conf, error); ++ if (ret != 0) ++ return ret; ++ ++ if (rss_act->func != RTE_ETH_HASH_FUNCTION_DEFAULT || ++ rss_act->key_len > 0 || rss_act->queue_num > 0) ++ hns3_warn(hw, "hash func, key and queues are global config, which work for all flow types. " ++ "Recommend: don't set them together with pattern."); + + return 0; + } + ++static bool ++hns3_rss_action_is_dup(struct hns3_hw *hw, ++ const struct hns3_flow_rss_conf *conf) ++{ ++ struct hns3_rss_conf_ele *filter; ++ ++ TAILQ_FOREACH(filter, &hw->flow_rss_list, entries) { ++ if (conf->pattern_type != filter->filter_info.pattern_type) ++ continue; ++ ++ if (hns3_action_rss_same(&filter->filter_info.conf, &conf->conf)) ++ return true; ++ } ++ ++ return false; ++} ++ ++/* ++ * This function is used to parse rss action validation. ++ */ + static int +-hns3_hw_rss_hash_set(struct hns3_hw *hw, struct rte_flow_action_rss *rss_config) ++hns3_parse_rss_filter(struct rte_eth_dev *dev, ++ const struct rte_flow_item pattern[], ++ const struct rte_flow_action *actions, ++ struct hns3_flow_rss_conf *rss_conf, ++ struct rte_flow_error *error) + { ++ struct hns3_adapter *hns = dev->data->dev_private; ++ const struct rte_flow_action_rss *rss_act; ++ const struct rte_flow_action *act; ++ const struct rte_flow_item *pat; ++ struct hns3_hw *hw = &hns->hw; ++ uint32_t index = 0; + int ret; + +- hns3_adjust_rss_key(hw, rss_config); ++ NEXT_ITEM_OF_ACTION(act, actions, index); ++ if (actions[1].type != RTE_FLOW_ACTION_TYPE_END) ++ return rte_flow_error_set(error, EINVAL, ++ RTE_FLOW_ERROR_TYPE_ACTION, ++ &actions[1], ++ "Only support one action for RSS."); + +- ret = hns3_parse_rss_algorithm(hw, &rss_config->func, +- &hw->rss_info.hash_algo); +- if (ret) +- return ret; ++ rss_act = (const struct rte_flow_action_rss *)act->conf; ++ if (rss_act == NULL) { ++ return rte_flow_error_set(error, EINVAL, ++ RTE_FLOW_ERROR_TYPE_ACTION_CONF, ++ act, "lost RSS action configuration"); ++ } + +- ret = hns3_rss_set_algo_key(hw, rss_config->key); +- if (ret) ++ if (rss_act->level != 0) ++ return rte_flow_error_set(error, ENOTSUP, ++ RTE_FLOW_ERROR_TYPE_ACTION_CONF, ++ act, ++ "RSS level is not supported"); ++ ++ index = 0; ++ NEXT_ITEM_OF_PATTERN(pat, pattern, index); ++ if (pat[0].type == RTE_FLOW_ITEM_TYPE_END) { ++ rss_conf->pattern_type = 0; ++ ret = hns3_flow_parse_hash_global_conf(dev, rss_act, ++ rss_conf, error); ++ } else { ++ ret = hns3_flow_parse_pattern_act(dev, pat, rss_act, ++ rss_conf, error); ++ } ++ if (ret != 0) + return ret; + +- hw->rss_info.conf.func = rss_config->func; +- +- ret = hns3_set_rss_tuple_by_rss_hf(hw, rss_config->types); +- if (ret) +- hns3_err(hw, "Update RSS tuples by rss hf failed %d", ret); ++ if (hns3_rss_action_is_dup(hw, rss_conf)) ++ return rte_flow_error_set(error, EINVAL, ++ RTE_FLOW_ERROR_TYPE_ACTION_CONF, ++ act, "duplicate RSS rule"); + +- return ret; ++ return 0; + } + + static int +@@ -1516,8 +1873,6 @@ hns3_update_indir_table(struct hns3_hw *hw, + uint32_t i; + + /* Fill in redirection table */ +- memcpy(indir_tbl, hw->rss_info.rss_indirection_tbl, +- sizeof(hw->rss_info.rss_indirection_tbl)); + for (i = 0, j = 0; i < hw->rss_ind_tbl_size; i++, j++) { + j %= num; + if (conf->queue[j] >= hw->alloc_rss_size) { +@@ -1532,82 +1887,106 @@ hns3_update_indir_table(struct hns3_hw *hw, + return hns3_set_rss_indir_table(hw, indir_tbl, hw->rss_ind_tbl_size); + } + ++static uint64_t ++hns3_flow_get_pctype_tuple_mask(uint64_t hw_pctype) ++{ ++ uint64_t tuple_mask = 0; ++ uint16_t i; ++ ++ for (i = 0; i < RTE_DIM(hash_map_table); i++) { ++ if (hw_pctype == hash_map_table[i].hw_pctype) { ++ tuple_mask = hash_map_table[i].tuple_mask; ++ break; ++ } ++ } ++ ++ return tuple_mask; ++} ++ ++static int ++hns3_flow_set_rss_ptype_tuple(struct hns3_hw *hw, ++ struct hns3_flow_rss_conf *rss_conf) ++{ ++ uint64_t old_tuple_fields, new_tuple_fields; ++ uint64_t hw_pctypes, tuples, tuple_mask = 0; ++ bool cfg_global_tuple; ++ int ret; ++ ++ cfg_global_tuple = (rss_conf->pattern_type == 0); ++ if (!cfg_global_tuple) { ++ /* ++ * To ensure that different packets do not affect each other, ++ * we have to first read all tuple fields, and then only modify ++ * the tuples for the specified packet type. ++ */ ++ ret = hns3_get_rss_tuple_field(hw, &old_tuple_fields); ++ if (ret != 0) ++ return ret; ++ ++ new_tuple_fields = old_tuple_fields; ++ hw_pctypes = rss_conf->hw_pctypes; ++ while (hw_pctypes > 0) { ++ uint32_t idx = rte_bsf64(hw_pctypes); ++ uint64_t pctype = BIT_ULL(idx); ++ ++ tuple_mask = hns3_flow_get_pctype_tuple_mask(pctype); ++ tuples = hns3_rss_calc_tuple_filed(rss_conf->conf.types); ++ new_tuple_fields &= ~tuple_mask; ++ new_tuple_fields |= tuples; ++ hw_pctypes &= ~pctype; ++ } ++ } else { ++ new_tuple_fields = ++ hns3_rss_calc_tuple_filed(rss_conf->conf.types); ++ } ++ ++ ret = hns3_set_rss_tuple_field(hw, new_tuple_fields); ++ if (ret != 0) ++ return ret; ++ ++ if (!cfg_global_tuple) ++ hns3_info(hw, "RSS tuple fields changed from 0x%" PRIx64 " to 0x%" PRIx64, ++ old_tuple_fields, new_tuple_fields); ++ ++ return 0; ++} ++ + static int + hns3_config_rss_filter(struct hns3_hw *hw, +- const struct hns3_rss_conf *conf, bool add) ++ struct hns3_flow_rss_conf *rss_conf) + { +- struct hns3_rss_conf *rss_info; +- uint64_t flow_types; +- uint16_t num; ++ struct rte_flow_action_rss *rss_act; + int ret; + +- struct rte_flow_action_rss rss_flow_conf = { +- .func = conf->conf.func, +- .level = conf->conf.level, +- .types = conf->conf.types, +- .key_len = conf->conf.key_len, +- .queue_num = conf->conf.queue_num, +- .key = conf->conf.key_len ? +- (void *)(uintptr_t)conf->conf.key : NULL, +- .queue = conf->conf.queue, +- }; +- +- /* Filter the unsupported flow types */ +- flow_types = conf->conf.types ? +- rss_flow_conf.types & HNS3_ETH_RSS_SUPPORT : +- hw->rss_info.conf.types; +- if (flow_types != rss_flow_conf.types) +- hns3_warn(hw, "modified RSS types based on hardware support, " +- "requested:0x%" PRIx64 " configured:0x%" PRIx64, +- rss_flow_conf.types, flow_types); +- /* Update the useful flow types */ +- rss_flow_conf.types = flow_types; +- +- rss_info = &hw->rss_info; +- if (!add) { +- if (!conf->valid) +- return 0; +- +- ret = hns3_disable_rss(hw); ++ rss_act = &rss_conf->conf; ++ if (rss_act->queue_num > 0) { ++ ret = hns3_update_indir_table(hw, rss_act, rss_act->queue_num); + if (ret) { +- hns3_err(hw, "RSS disable failed(%d)", ret); ++ hns3_err(hw, "set queues action failed, ret = %d", ret); + return ret; + } ++ } + +- if (rss_flow_conf.queue_num) { +- /* +- * Due the content of queue pointer have been reset to +- * 0, the rss_info->conf.queue should be set to NULL +- */ +- rss_info->conf.queue = NULL; +- rss_info->conf.queue_num = 0; ++ if (rss_act->key_len > 0 || ++ rss_act->func != RTE_ETH_HASH_FUNCTION_DEFAULT) { ++ ret = hns3_update_rss_algo_key(hw, rss_act->func, rss_conf->key, ++ rss_act->key_len); ++ if (ret != 0) { ++ hns3_err(hw, "set func or hash key action failed, ret = %d", ++ ret); ++ return ret; + } +- +- return 0; + } + +- /* Set rx queues to use */ +- num = RTE_MIN(hw->data->nb_rx_queues, rss_flow_conf.queue_num); +- if (rss_flow_conf.queue_num > num) +- hns3_warn(hw, "Config queue numbers %u are beyond the scope of truncated", +- rss_flow_conf.queue_num); +- hns3_info(hw, "Max of contiguous %u PF queues are configured", num); +- if (num) { +- ret = hns3_update_indir_table(hw, &rss_flow_conf, num); +- if (ret) ++ if (rss_conf->hw_pctypes > 0) { ++ ret = hns3_flow_set_rss_ptype_tuple(hw, rss_conf); ++ if (ret != 0) { ++ hns3_err(hw, "set types action failed, ret = %d", ret); + return ret; ++ } + } + +- /* Set hash algorithm and flow types by the user's config */ +- ret = hns3_hw_rss_hash_set(hw, &rss_flow_conf); +- if (ret) +- return ret; +- +- ret = hns3_rss_conf_copy(rss_info, &rss_flow_conf); +- if (ret) +- hns3_err(hw, "RSS config init fail(%d)", ret); +- +- return ret; ++ return 0; + } + + static int +@@ -1616,51 +1995,44 @@ hns3_clear_rss_filter(struct rte_eth_dev *dev) + struct hns3_adapter *hns = dev->data->dev_private; + struct hns3_rss_conf_ele *rss_filter_ptr; + struct hns3_hw *hw = &hns->hw; +- int rss_rule_succ_cnt = 0; /* count for success of clearing RSS rules */ +- int rss_rule_fail_cnt = 0; /* count for failure of clearing RSS rules */ +- int ret = 0; + + rss_filter_ptr = TAILQ_FIRST(&hw->flow_rss_list); + while (rss_filter_ptr) { + TAILQ_REMOVE(&hw->flow_rss_list, rss_filter_ptr, entries); +- ret = hns3_config_rss_filter(hw, &rss_filter_ptr->filter_info, +- false); +- if (ret) +- rss_rule_fail_cnt++; +- else +- rss_rule_succ_cnt++; + rte_free(rss_filter_ptr); + rss_filter_ptr = TAILQ_FIRST(&hw->flow_rss_list); + } + +- if (rss_rule_fail_cnt) { +- hns3_err(hw, "fail to delete all RSS filters, success num = %d fail num = %d", +- rss_rule_succ_cnt, rss_rule_fail_cnt); +- ret = -EIO; +- } +- +- return ret; ++ return hns3_config_rss(hns); + } + + static int +-hns3_restore_rss_filter(struct hns3_hw *hw) ++hns3_reconfig_all_rss_filter(struct hns3_hw *hw) + { + struct hns3_rss_conf_ele *filter; +- int ret = 0; ++ uint32_t rule_no = 0; ++ int ret; + +- pthread_mutex_lock(&hw->flows_lock); + TAILQ_FOREACH(filter, &hw->flow_rss_list, entries) { +- if (!filter->filter_info.valid) +- continue; +- +- ret = hns3_config_rss_filter(hw, &filter->filter_info, true); ++ ret = hns3_config_rss_filter(hw, &filter->filter_info); + if (ret != 0) { +- hns3_err(hw, "restore RSS filter failed, ret=%d", ret); +- goto out; ++ hns3_err(hw, "config %uth RSS filter failed, ret = %d", ++ rule_no, ret); ++ return ret; + } ++ rule_no++; + } + +-out: ++ return 0; ++} ++ ++static int ++hns3_restore_rss_filter(struct hns3_hw *hw) ++{ ++ int ret; ++ ++ pthread_mutex_lock(&hw->flows_lock); ++ ret = hns3_reconfig_all_rss_filter(hw); + pthread_mutex_unlock(&hw->flows_lock); + + return ret; +@@ -1679,23 +2051,6 @@ hns3_restore_filter(struct hns3_adapter *hns) + return hns3_restore_rss_filter(hw); + } + +-static int +-hns3_flow_parse_rss(struct rte_eth_dev *dev, +- const struct hns3_rss_conf *conf, bool add) +-{ +- struct hns3_adapter *hns = dev->data->dev_private; +- struct hns3_hw *hw = &hns->hw; +- bool ret; +- +- ret = hns3_action_rss_same(&hw->rss_info.conf, &conf->conf); +- if (ret) { +- hns3_err(hw, "Enter duplicate RSS configuration : %d", ret); +- return -EINVAL; +- } +- +- return hns3_config_rss_filter(hw, conf, add); +-} +- + static int + hns3_flow_args_check(const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], +@@ -1729,31 +2084,55 @@ static int + hns3_flow_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], +- struct rte_flow_error *error) ++ struct rte_flow_error *error, ++ struct hns3_filter_info *filter_info) + { +- struct hns3_fdir_rule fdir_rule; ++ union hns3_filter_conf *conf; + int ret; + + ret = hns3_flow_args_check(attr, pattern, actions, error); + if (ret) + return ret; + +- if (hns3_find_rss_general_action(pattern, actions)) +- return hns3_parse_rss_filter(dev, actions, error); ++ hns3_parse_filter_type(pattern, actions, filter_info); ++ conf = &filter_info->conf; ++ if (filter_info->type == RTE_ETH_FILTER_HASH) ++ return hns3_parse_rss_filter(dev, pattern, actions, ++ &conf->rss_conf, error); ++ ++ return hns3_parse_fdir_filter(dev, pattern, actions, ++ &conf->fdir_conf, error); ++} ++ ++static int ++hns3_flow_rebuild_all_rss_filter(struct hns3_adapter *hns) ++{ ++ struct hns3_hw *hw = &hns->hw; ++ int ret; + +- memset(&fdir_rule, 0, sizeof(struct hns3_fdir_rule)); +- return hns3_parse_fdir_filter(dev, pattern, actions, &fdir_rule, error); ++ ret = hns3_config_rss(hns); ++ if (ret != 0) { ++ hns3_err(hw, "restore original RSS configuration failed, ret = %d.", ++ ret); ++ return ret; ++ } ++ ret = hns3_reconfig_all_rss_filter(hw); ++ if (ret != 0) ++ hns3_err(hw, "rebuild all RSS filter failed, ret = %d.", ret); ++ ++ return ret; + } + + static int + hns3_flow_create_rss_rule(struct rte_eth_dev *dev, +- const struct rte_flow_action *act, ++ struct hns3_flow_rss_conf *rss_conf, + struct rte_flow *flow) + { + struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); ++ struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); + struct hns3_rss_conf_ele *rss_filter_ptr; +- struct hns3_rss_conf_ele *filter_ptr; +- const struct hns3_rss_conf *rss_conf; ++ struct hns3_flow_rss_conf *new_conf; ++ struct rte_flow_action_rss *rss_act; + int ret; + + rss_filter_ptr = rte_zmalloc("hns3 rss filter", +@@ -1763,28 +2142,29 @@ hns3_flow_create_rss_rule(struct rte_eth_dev *dev, + return -ENOMEM; + } + ++ new_conf = &rss_filter_ptr->filter_info; ++ memcpy(new_conf, rss_conf, sizeof(*new_conf)); ++ rss_act = &new_conf->conf; ++ if (rss_act->queue_num > 0) ++ new_conf->conf.queue = new_conf->queue; + /* +- * After all the preceding tasks are successfully configured, configure +- * rules to the hardware to simplify the rollback of rules in the +- * hardware. ++ * There are two ways to deliver hash key action: ++ * 1> 'key_len' is greater than zero and 'key' isn't NULL. ++ * 2> 'key_len' is greater than zero, but 'key' is NULL. ++ * For case 2, we need to keep 'key' of the new_conf is NULL so as to ++ * inherit the configuration from user in case of failing to verify ++ * duplicate rule later. + */ +- rss_conf = (const struct hns3_rss_conf *)act->conf; +- ret = hns3_flow_parse_rss(dev, rss_conf, true); ++ if (rss_act->key_len > 0 && rss_act->key != NULL) ++ new_conf->conf.key = new_conf->key; ++ ++ ret = hns3_config_rss_filter(hw, new_conf); + if (ret != 0) { + rte_free(rss_filter_ptr); ++ (void)hns3_flow_rebuild_all_rss_filter(hns); + return ret; + } + +- hns3_rss_conf_copy(&rss_filter_ptr->filter_info, &rss_conf->conf); +- rss_filter_ptr->filter_info.valid = true; +- +- /* +- * When create a new RSS rule, the old rule will be overlaid and set +- * invalid. +- */ +- TAILQ_FOREACH(filter_ptr, &hw->flow_rss_list, entries) +- filter_ptr->filter_info.valid = false; +- + TAILQ_INSERT_TAIL(&hw->flow_rss_list, rss_filter_ptr, entries); + flow->rule = rss_filter_ptr; + flow->filter_type = RTE_ETH_FILTER_HASH; +@@ -1794,31 +2174,24 @@ hns3_flow_create_rss_rule(struct rte_eth_dev *dev, + + static int + hns3_flow_create_fdir_rule(struct rte_eth_dev *dev, +- const struct rte_flow_item pattern[], +- const struct rte_flow_action actions[], ++ struct hns3_fdir_rule *fdir_rule, + struct rte_flow_error *error, + struct rte_flow *flow) + { + struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); + struct hns3_fdir_rule_ele *fdir_rule_ptr; +- struct hns3_fdir_rule fdir_rule; + bool indir; + int ret; + +- memset(&fdir_rule, 0, sizeof(struct hns3_fdir_rule)); +- ret = hns3_parse_fdir_filter(dev, pattern, actions, &fdir_rule, error); +- if (ret != 0) +- return ret; +- +- indir = !!(fdir_rule.flags & HNS3_RULE_FLAG_COUNTER_INDIR); +- if (fdir_rule.flags & HNS3_RULE_FLAG_COUNTER) { +- ret = hns3_counter_new(dev, indir, fdir_rule.act_cnt.id, ++ indir = !!(fdir_rule->flags & HNS3_RULE_FLAG_COUNTER_INDIR); ++ if (fdir_rule->flags & HNS3_RULE_FLAG_COUNTER) { ++ ret = hns3_counter_new(dev, indir, fdir_rule->act_cnt.id, + error); + if (ret != 0) + return ret; + +- flow->counter_id = fdir_rule.act_cnt.id; ++ flow->counter_id = fdir_rule->act_cnt.id; + } + + fdir_rule_ptr = rte_zmalloc("hns3 fdir rule", +@@ -1834,11 +2207,11 @@ hns3_flow_create_fdir_rule(struct rte_eth_dev *dev, + * rules to the hardware to simplify the rollback of rules in the + * hardware. + */ +- ret = hns3_fdir_filter_program(hns, &fdir_rule, false); ++ ret = hns3_fdir_filter_program(hns, fdir_rule, false); + if (ret != 0) + goto err_fdir_filter; + +- memcpy(&fdir_rule_ptr->fdir_conf, &fdir_rule, ++ memcpy(&fdir_rule_ptr->fdir_conf, fdir_rule, + sizeof(struct hns3_fdir_rule)); + TAILQ_INSERT_TAIL(&hw->flow_fdir_list, fdir_rule_ptr, entries); + flow->rule = fdir_rule_ptr; +@@ -1849,8 +2222,8 @@ hns3_flow_create_fdir_rule(struct rte_eth_dev *dev, + err_fdir_filter: + rte_free(fdir_rule_ptr); + err_malloc: +- if (fdir_rule.flags & HNS3_RULE_FLAG_COUNTER) +- hns3_counter_release(dev, fdir_rule.act_cnt.id); ++ if (fdir_rule->flags & HNS3_RULE_FLAG_COUNTER) ++ hns3_counter_release(dev, fdir_rule->act_cnt.id); + + return ret; + } +@@ -1868,13 +2241,15 @@ hns3_flow_create(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, + struct rte_flow_error *error) + { + struct hns3_adapter *hns = dev->data->dev_private; +- struct hns3_hw *hw = &hns->hw; ++ struct hns3_filter_info filter_info = {0}; + struct hns3_flow_mem *flow_node; +- const struct rte_flow_action *act; ++ struct hns3_hw *hw = &hns->hw; ++ union hns3_filter_conf *conf; + struct rte_flow *flow; + int ret; + +- ret = hns3_flow_validate(dev, attr, pattern, actions, error); ++ ret = hns3_flow_validate(dev, attr, pattern, actions, error, ++ &filter_info); + if (ret) + return NULL; + +@@ -1894,13 +2269,12 @@ hns3_flow_create(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, + } + + flow_node->flow = flow; ++ conf = &filter_info.conf; + TAILQ_INSERT_TAIL(&hw->flow_list, flow_node, entries); +- +- act = hns3_find_rss_general_action(pattern, actions); +- if (act) +- ret = hns3_flow_create_rss_rule(dev, act, flow); ++ if (filter_info.type == RTE_ETH_FILTER_HASH) ++ ret = hns3_flow_create_rss_rule(dev, &conf->rss_conf, flow); + else +- ret = hns3_flow_create_fdir_rule(dev, pattern, actions, ++ ret = hns3_flow_create_fdir_rule(dev, &conf->fdir_conf, + error, flow); + if (ret == 0) + return flow; +@@ -1954,16 +2328,10 @@ hns3_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow, + break; + case RTE_ETH_FILTER_HASH: + rss_filter_ptr = (struct hns3_rss_conf_ele *)flow->rule; +- ret = hns3_config_rss_filter(hw, &rss_filter_ptr->filter_info, +- false); +- if (ret) +- return rte_flow_error_set(error, EIO, +- RTE_FLOW_ERROR_TYPE_HANDLE, +- flow, +- "Destroy RSS fail.Try again"); + TAILQ_REMOVE(&hw->flow_rss_list, rss_filter_ptr, entries); + rte_free(rss_filter_ptr); + rss_filter_ptr = NULL; ++ (void)hns3_flow_rebuild_all_rss_filter(hns); + break; + default: + return rte_flow_error_set(error, EINVAL, +@@ -2069,10 +2437,12 @@ hns3_flow_validate_wrap(struct rte_eth_dev *dev, + struct rte_flow_error *error) + { + struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); ++ struct hns3_filter_info filter_info = {0}; + int ret; + + pthread_mutex_lock(&hw->flows_lock); +- ret = hns3_flow_validate(dev, attr, pattern, actions, error); ++ ret = hns3_flow_validate(dev, attr, pattern, actions, error, ++ &filter_info); + pthread_mutex_unlock(&hw->flows_lock); + + return ret; +diff --git a/dpdk/drivers/net/hns3/hns3_flow.h b/dpdk/drivers/net/hns3/hns3_flow.h +index e4b2fdf2e6..1b49673f11 100644 +--- a/dpdk/drivers/net/hns3/hns3_flow.h ++++ b/dpdk/drivers/net/hns3/hns3_flow.h +@@ -9,6 +9,7 @@ + #include + + #include "hns3_rss.h" ++#include "hns3_fdir.h" + + struct hns3_flow_counter { + LIST_ENTRY(hns3_flow_counter) next; /* Pointer to the next counter. */ +@@ -24,10 +25,18 @@ struct rte_flow { + uint32_t counter_id; + }; + ++struct hns3_flow_rss_conf { ++ struct rte_flow_action_rss conf; ++ uint8_t key[HNS3_RSS_KEY_SIZE_MAX]; /* Hash key */ ++ uint16_t queue[HNS3_RSS_QUEUES_BUFFER_NUM]; /* Queues indices to use */ ++ uint64_t pattern_type; ++ uint64_t hw_pctypes; /* packet types in driver */ ++}; ++ + /* rss filter list structure */ + struct hns3_rss_conf_ele { + TAILQ_ENTRY(hns3_rss_conf_ele) entries; +- struct hns3_rss_conf filter_info; ++ struct hns3_flow_rss_conf filter_info; + }; + + /* hns3_flow memory list structure */ +@@ -45,6 +54,16 @@ struct rte_flow_action_handle { + uint32_t counter_id; + }; + ++union hns3_filter_conf { ++ struct hns3_fdir_rule fdir_conf; ++ struct hns3_flow_rss_conf rss_conf; ++}; ++ ++struct hns3_filter_info { ++ enum rte_filter_type type; ++ union hns3_filter_conf conf; ++}; ++ + TAILQ_HEAD(hns3_rss_filter_list, hns3_rss_conf_ele); + TAILQ_HEAD(hns3_flow_mem_list, hns3_flow_mem); + +diff --git a/dpdk/drivers/net/hns3/hns3_mp.c b/dpdk/drivers/net/hns3/hns3_mp.c +index 7184f9ad58..556f1941c6 100644 +--- a/dpdk/drivers/net/hns3/hns3_mp.c ++++ b/dpdk/drivers/net/hns3/hns3_mp.c +@@ -89,12 +89,12 @@ mp_secondary_handle(const struct rte_mp_msg *mp_msg, const void *peer) + case HNS3_MP_REQ_START_RXTX: + PMD_INIT_LOG(INFO, "port %u starting datapath", + dev->data->port_id); +- hns3_set_rxtx_function(dev); ++ hns3_start_rxtx_datapath(dev); + break; + case HNS3_MP_REQ_STOP_RXTX: + PMD_INIT_LOG(INFO, "port %u stopping datapath", + dev->data->port_id); +- hns3_set_rxtx_function(dev); ++ hns3_stop_rxtx_datapath(dev); + break; + case HNS3_MP_REQ_START_TX: + PMD_INIT_LOG(INFO, "port %u starting Tx datapath", +diff --git a/dpdk/drivers/net/hns3/hns3_ptp.c b/dpdk/drivers/net/hns3/hns3_ptp.c +index 6bbd85ba23..894ac6dd71 100644 +--- a/dpdk/drivers/net/hns3/hns3_ptp.c ++++ b/dpdk/drivers/net/hns3/hns3_ptp.c +@@ -7,7 +7,7 @@ + #include + + #include "hns3_ethdev.h" +-#include "hns3_regs.h" ++#include "hns3_ptp.h" + #include "hns3_logs.h" + + uint64_t hns3_timestamp_rx_dynflag; +@@ -56,9 +56,23 @@ hns3_ptp_int_en(struct hns3_hw *hw, bool en) + return ret; + } + ++static void ++hns3_ptp_timesync_write_time(struct hns3_hw *hw, const struct timespec *ts) ++{ ++ uint64_t sec = ts->tv_sec; ++ uint64_t ns = ts->tv_nsec; ++ ++ /* Set the timecounters to a new value. */ ++ hns3_write_dev(hw, HNS3_CFG_TIME_SYNC_H, upper_32_bits(sec)); ++ hns3_write_dev(hw, HNS3_CFG_TIME_SYNC_M, lower_32_bits(sec)); ++ hns3_write_dev(hw, HNS3_CFG_TIME_SYNC_L, lower_32_bits(ns)); ++ hns3_write_dev(hw, HNS3_CFG_TIME_SYNC_RDY, 1); ++} ++ + int + hns3_ptp_init(struct hns3_hw *hw) + { ++ struct timespec sys_time; + int ret; + + if (!hns3_dev_get_support(hw, PTP)) +@@ -71,6 +85,10 @@ hns3_ptp_init(struct hns3_hw *hw) + /* Start PTP timer */ + hns3_write_dev(hw, HNS3_CFG_TIME_CYC_EN, 1); + ++ /* Initializing the RTC. */ ++ clock_gettime(CLOCK_REALTIME, &sys_time); ++ hns3_ptp_timesync_write_time(hw, &sys_time); ++ + return 0; + } + +@@ -216,17 +234,21 @@ hns3_timesync_read_tx_timestamp(struct rte_eth_dev *dev, + int + hns3_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts) + { ++#define HNS3_PTP_SEC_H_OFFSET 32 ++#define HNS3_PTP_SEC_H_MASK 0xFFFF ++ + struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); ++ uint32_t sec_hi, sec_lo; + uint64_t ns, sec; + + if (!hns3_dev_get_support(hw, PTP)) + return -ENOTSUP; + +- sec = hns3_read_dev(hw, HNS3_CURR_TIME_OUT_L); +- sec |= (uint64_t)(hns3_read_dev(hw, HNS3_CURR_TIME_OUT_H) & 0xFFFF) +- << 32; +- + ns = hns3_read_dev(hw, HNS3_CURR_TIME_OUT_NS); ++ sec_hi = hns3_read_dev(hw, HNS3_CURR_TIME_OUT_H) & HNS3_PTP_SEC_H_MASK; ++ sec_lo = hns3_read_dev(hw, HNS3_CURR_TIME_OUT_L); ++ sec = ((uint64_t)sec_hi << HNS3_PTP_SEC_H_OFFSET) | sec_lo; ++ + ns += sec * NSEC_PER_SEC; + *ts = rte_ns_to_timespec(ns); + +@@ -237,17 +259,11 @@ int + hns3_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts) + { + struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); +- uint64_t sec = ts->tv_sec; +- uint64_t ns = ts->tv_nsec; + + if (!hns3_dev_get_support(hw, PTP)) + return -ENOTSUP; + +- /* Set the timecounters to a new value. */ +- hns3_write_dev(hw, HNS3_CFG_TIME_SYNC_H, upper_32_bits(sec)); +- hns3_write_dev(hw, HNS3_CFG_TIME_SYNC_M, lower_32_bits(sec)); +- hns3_write_dev(hw, HNS3_CFG_TIME_SYNC_L, lower_32_bits(ns)); +- hns3_write_dev(hw, HNS3_CFG_TIME_SYNC_RDY, 1); ++ hns3_ptp_timesync_write_time(hw, ts); + + return 0; + } +@@ -290,3 +306,21 @@ hns3_restore_ptp(struct hns3_adapter *hns) + + return ret; + } ++ ++void ++hns3_ptp_uninit(struct hns3_hw *hw) ++{ ++ struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); ++ int ret; ++ ++ if (!hns3_dev_get_support(hw, PTP)) ++ return; ++ ++ ret = hns3_ptp_int_en(hw, false); ++ if (ret != 0) ++ hns3_err(hw, "disable PTP interrupt failed, ret = %d.", ret); ++ ++ ret = hns3_timesync_configure(hns, false); ++ if (ret != 0) ++ hns3_err(hw, "disable timesync failed, ret = %d.", ret); ++} +diff --git a/dpdk/drivers/net/hns3/hns3_ptp.h b/dpdk/drivers/net/hns3/hns3_ptp.h +new file mode 100644 +index 0000000000..2b8717fa3c +--- /dev/null ++++ b/dpdk/drivers/net/hns3/hns3_ptp.h +@@ -0,0 +1,48 @@ ++/* SPDX-License-Identifier: BSD-3-Clause ++ * Copyright(c) 2023 HiSilicon Limited. ++ */ ++ ++#ifndef HNS3_PTP_H ++#define HNS3_PTP_H ++ ++/* Register bit for 1588 event */ ++#define HNS3_VECTOR0_1588_INT_B 0 ++ ++#define HNS3_PTP_BASE_ADDRESS 0x29000 ++ ++#define HNS3_TX_1588_SEQID_BACK (HNS3_PTP_BASE_ADDRESS + 0x0) ++#define HNS3_TX_1588_TSP_BACK_0 (HNS3_PTP_BASE_ADDRESS + 0x4) ++#define HNS3_TX_1588_TSP_BACK_1 (HNS3_PTP_BASE_ADDRESS + 0x8) ++#define HNS3_TX_1588_TSP_BACK_2 (HNS3_PTP_BASE_ADDRESS + 0xc) ++ ++#define HNS3_TX_1588_BACK_TSP_CNT (HNS3_PTP_BASE_ADDRESS + 0x30) ++ ++#define HNS3_CFG_TIME_SYNC_H (HNS3_PTP_BASE_ADDRESS + 0x50) ++#define HNS3_CFG_TIME_SYNC_M (HNS3_PTP_BASE_ADDRESS + 0x54) ++#define HNS3_CFG_TIME_SYNC_L (HNS3_PTP_BASE_ADDRESS + 0x58) ++#define HNS3_CFG_TIME_SYNC_RDY (HNS3_PTP_BASE_ADDRESS + 0x5c) ++ ++#define HNS3_CFG_TIME_CYC_EN (HNS3_PTP_BASE_ADDRESS + 0x70) ++ ++#define HNS3_CURR_TIME_OUT_H (HNS3_PTP_BASE_ADDRESS + 0x74) ++#define HNS3_CURR_TIME_OUT_L (HNS3_PTP_BASE_ADDRESS + 0x78) ++#define HNS3_CURR_TIME_OUT_NS (HNS3_PTP_BASE_ADDRESS + 0x7c) ++ ++int hns3_restore_ptp(struct hns3_adapter *hns); ++int hns3_mbuf_dyn_rx_timestamp_register(struct rte_eth_dev *dev, ++ struct rte_eth_conf *conf); ++int hns3_ptp_init(struct hns3_hw *hw); ++void hns3_ptp_uninit(struct hns3_hw *hw); ++int hns3_timesync_enable(struct rte_eth_dev *dev); ++int hns3_timesync_disable(struct rte_eth_dev *dev); ++int hns3_timesync_read_rx_timestamp(struct rte_eth_dev *dev, ++ struct timespec *timestamp, ++ uint32_t flags __rte_unused); ++int hns3_timesync_read_tx_timestamp(struct rte_eth_dev *dev, ++ struct timespec *timestamp); ++int hns3_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts); ++int hns3_timesync_write_time(struct rte_eth_dev *dev, ++ const struct timespec *ts); ++int hns3_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta); ++ ++#endif /* HNS3_PTP_H */ +diff --git a/dpdk/drivers/net/hns3/hns3_regs.c b/dpdk/drivers/net/hns3/hns3_regs.c +index 33392fd1f0..5d6f92e4bb 100644 +--- a/dpdk/drivers/net/hns3/hns3_regs.c ++++ b/dpdk/drivers/net/hns3/hns3_regs.c +@@ -294,8 +294,9 @@ hns3_direct_access_regs(struct hns3_hw *hw, uint32_t *data) + struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); + uint32_t *origin_data_ptr = data; + uint32_t reg_offset; +- uint16_t i, j; + size_t reg_num; ++ uint16_t j; ++ size_t i; + + /* fetching per-PF registers values from PF PCIe register space */ + reg_num = sizeof(cmdq_reg_addrs) / sizeof(uint32_t); +diff --git a/dpdk/drivers/net/hns3/hns3_regs.h b/dpdk/drivers/net/hns3/hns3_regs.h +index 459bbaf773..6b037f81c1 100644 +--- a/dpdk/drivers/net/hns3/hns3_regs.h ++++ b/dpdk/drivers/net/hns3/hns3_regs.h +@@ -124,29 +124,6 @@ + #define HNS3_TQP_INTR_RL_DEFAULT 0 + #define HNS3_TQP_INTR_QL_DEFAULT 0 + +-/* Register bit for 1588 event */ +-#define HNS3_VECTOR0_1588_INT_B 0 +- +-#define HNS3_PTP_BASE_ADDRESS 0x29000 +- +-#define HNS3_TX_1588_SEQID_BACK (HNS3_PTP_BASE_ADDRESS + 0x0) +-#define HNS3_TX_1588_TSP_BACK_0 (HNS3_PTP_BASE_ADDRESS + 0x4) +-#define HNS3_TX_1588_TSP_BACK_1 (HNS3_PTP_BASE_ADDRESS + 0x8) +-#define HNS3_TX_1588_TSP_BACK_2 (HNS3_PTP_BASE_ADDRESS + 0xc) +- +-#define HNS3_TX_1588_BACK_TSP_CNT (HNS3_PTP_BASE_ADDRESS + 0x30) +- +-#define HNS3_CFG_TIME_SYNC_H (HNS3_PTP_BASE_ADDRESS + 0x50) +-#define HNS3_CFG_TIME_SYNC_M (HNS3_PTP_BASE_ADDRESS + 0x54) +-#define HNS3_CFG_TIME_SYNC_L (HNS3_PTP_BASE_ADDRESS + 0x58) +-#define HNS3_CFG_TIME_SYNC_RDY (HNS3_PTP_BASE_ADDRESS + 0x5c) +- +-#define HNS3_CFG_TIME_CYC_EN (HNS3_PTP_BASE_ADDRESS + 0x70) +- +-#define HNS3_CURR_TIME_OUT_H (HNS3_PTP_BASE_ADDRESS + 0x74) +-#define HNS3_CURR_TIME_OUT_L (HNS3_PTP_BASE_ADDRESS + 0x78) +-#define HNS3_CURR_TIME_OUT_NS (HNS3_PTP_BASE_ADDRESS + 0x7c) +- + /* gl_usec convert to hardware count, as writing each 1 represents 2us */ + #define HNS3_GL_USEC_TO_REG(gl_usec) ((gl_usec) >> 1) + /* rl_usec convert to hardware count, as writing each 1 represents 4us */ +diff --git a/dpdk/drivers/net/hns3/hns3_rss.c b/dpdk/drivers/net/hns3/hns3_rss.c +index ca5a129234..6126512bd7 100644 +--- a/dpdk/drivers/net/hns3/hns3_rss.c ++++ b/dpdk/drivers/net/hns3/hns3_rss.c +@@ -18,56 +18,11 @@ const uint8_t hns3_hash_key[HNS3_RSS_KEY_SIZE] = { + 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA + }; + +-enum hns3_tuple_field { +- /* IPV4_TCP ENABLE FIELD */ +- HNS3_RSS_FIELD_IPV4_TCP_EN_TCP_D = 0, +- HNS3_RSS_FIELD_IPV4_TCP_EN_TCP_S, +- HNS3_RSS_FIELD_IPV4_TCP_EN_IP_D, +- HNS3_RSS_FIELD_IPV4_TCP_EN_IP_S, +- +- /* IPV4_UDP ENABLE FIELD */ +- HNS3_RSS_FIELD_IPV4_UDP_EN_UDP_D = 8, +- HNS3_RSS_FIELD_IPV4_UDP_EN_UDP_S, +- HNS3_RSS_FIELD_IPV4_UDP_EN_IP_D, +- HNS3_RSS_FIELD_IPV4_UDP_EN_IP_S, +- +- /* IPV4_SCTP ENABLE FIELD */ +- HNS3_RSS_FIELD_IPV4_SCTP_EN_SCTP_D = 16, +- HNS3_RSS_FIELD_IPV4_SCTP_EN_SCTP_S, +- HNS3_RSS_FIELD_IPV4_SCTP_EN_IP_D, +- HNS3_RSS_FIELD_IPV4_SCTP_EN_IP_S, +- HNS3_RSS_FIELD_IPV4_SCTP_EN_SCTP_VER, +- +- /* IPV4 ENABLE FIELD */ +- HNS3_RSS_FIELD_IPV4_EN_NONFRAG_IP_D = 24, +- HNS3_RSS_FIELD_IPV4_EN_NONFRAG_IP_S, +- HNS3_RSS_FIELD_IPV4_EN_FRAG_IP_D, +- HNS3_RSS_FIELD_IPV4_EN_FRAG_IP_S, +- +- /* IPV6_TCP ENABLE FIELD */ +- HNS3_RSS_FIELD_IPV6_TCP_EN_TCP_D = 32, +- HNS3_RSS_FIELD_IPV6_TCP_EN_TCP_S, +- HNS3_RSS_FIELD_IPV6_TCP_EN_IP_D, +- HNS3_RSS_FIELD_IPV6_TCP_EN_IP_S, +- +- /* IPV6_UDP ENABLE FIELD */ +- HNS3_RSS_FIELD_IPV6_UDP_EN_UDP_D = 40, +- HNS3_RSS_FIELD_IPV6_UDP_EN_UDP_S, +- HNS3_RSS_FIELD_IPV6_UDP_EN_IP_D, +- HNS3_RSS_FIELD_IPV6_UDP_EN_IP_S, +- +- /* IPV6_SCTP ENABLE FIELD */ +- HNS3_RSS_FIELD_IPV6_SCTP_EN_SCTP_D = 48, +- HNS3_RSS_FIELD_IPV6_SCTP_EN_SCTP_S, +- HNS3_RSS_FIELD_IPV6_SCTP_EN_IP_D, +- HNS3_RSS_FIELD_IPV6_SCTP_EN_IP_S, +- HNS3_RSS_FIELD_IPV6_SCTP_EN_SCTP_VER, +- +- /* IPV6 ENABLE FIELD */ +- HNS3_RSS_FIELD_IPV6_NONFRAG_IP_D = 56, +- HNS3_RSS_FIELD_IPV6_NONFRAG_IP_S, +- HNS3_RSS_FIELD_IPV6_FRAG_IP_D, +- HNS3_RSS_FIELD_IPV6_FRAG_IP_S ++const uint8_t hns3_hash_func_map[] = { ++ [RTE_ETH_HASH_FUNCTION_DEFAULT] = HNS3_RSS_HASH_ALGO_TOEPLITZ, ++ [RTE_ETH_HASH_FUNCTION_TOEPLITZ] = HNS3_RSS_HASH_ALGO_TOEPLITZ, ++ [RTE_ETH_HASH_FUNCTION_SIMPLE_XOR] = HNS3_RSS_HASH_ALGO_SIMPLE, ++ [RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ] = HNS3_RSS_HASH_ALGO_SYMMETRIC_TOEP, + }; + + enum hns3_rss_tuple_type { +@@ -79,243 +34,285 @@ static const struct { + uint64_t rss_types; + uint16_t tuple_type; + uint64_t rss_field; ++ uint64_t tuple_mask; + } hns3_set_tuple_table[] = { + /* IPV4-FRAG */ + { RTE_ETH_RSS_FRAG_IPV4 | RTE_ETH_RSS_L3_SRC_ONLY, + HNS3_RSS_IP_TUPLE, +- BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_FRAG_IP_S) }, ++ BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_FRAG_IP_S), ++ HNS3_RSS_TUPLE_IPV4_FLAG_M }, + { RTE_ETH_RSS_FRAG_IPV4 | RTE_ETH_RSS_L3_DST_ONLY, + HNS3_RSS_IP_TUPLE, +- BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_FRAG_IP_D) }, ++ BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_FRAG_IP_D), ++ HNS3_RSS_TUPLE_IPV4_FLAG_M }, + { RTE_ETH_RSS_FRAG_IPV4, + HNS3_RSS_IP_TUPLE, + BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_FRAG_IP_S) | +- BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_FRAG_IP_D) }, ++ BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_FRAG_IP_D), ++ HNS3_RSS_TUPLE_IPV4_FLAG_M }, + + /* IPV4 */ + { RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_L3_SRC_ONLY, + HNS3_RSS_IP_TUPLE, +- BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_NONFRAG_IP_S) }, ++ BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_NONFRAG_IP_S), ++ HNS3_RSS_TUPLE_IPV4_NONF_M }, + { RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_L3_DST_ONLY, + HNS3_RSS_IP_TUPLE, +- BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_NONFRAG_IP_D) }, ++ BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_NONFRAG_IP_D), ++ HNS3_RSS_TUPLE_IPV4_NONF_M }, + { RTE_ETH_RSS_IPV4, + HNS3_RSS_IP_TUPLE, + BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_NONFRAG_IP_S) | +- BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_NONFRAG_IP_D) }, ++ BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_NONFRAG_IP_D), ++ HNS3_RSS_TUPLE_IPV4_NONF_M }, + + /* IPV4-OTHER */ + { RTE_ETH_RSS_NONFRAG_IPV4_OTHER | RTE_ETH_RSS_L3_SRC_ONLY, + HNS3_RSS_IP_TUPLE, +- BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_NONFRAG_IP_S) }, ++ BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_NONFRAG_IP_S), ++ HNS3_RSS_TUPLE_IPV4_NONF_M }, + { RTE_ETH_RSS_NONFRAG_IPV4_OTHER | RTE_ETH_RSS_L3_DST_ONLY, + HNS3_RSS_IP_TUPLE, +- BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_NONFRAG_IP_D) }, ++ BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_NONFRAG_IP_D), ++ HNS3_RSS_TUPLE_IPV4_NONF_M }, + { RTE_ETH_RSS_NONFRAG_IPV4_OTHER, + HNS3_RSS_IP_TUPLE, + BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_NONFRAG_IP_S) | +- BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_NONFRAG_IP_D) }, ++ BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_NONFRAG_IP_D), ++ HNS3_RSS_TUPLE_IPV4_NONF_M }, + + /* IPV4-TCP */ + { RTE_ETH_RSS_NONFRAG_IPV4_TCP | RTE_ETH_RSS_L3_SRC_ONLY, + HNS3_RSS_IP_L4_TUPLE, +- BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_IP_S) }, ++ BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_IP_S), ++ HNS3_RSS_TUPLE_IPV4_TCP_M }, + { RTE_ETH_RSS_NONFRAG_IPV4_TCP | RTE_ETH_RSS_L3_DST_ONLY, + HNS3_RSS_IP_L4_TUPLE, +- BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_IP_D) }, ++ BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_IP_D), ++ HNS3_RSS_TUPLE_IPV4_TCP_M }, + { RTE_ETH_RSS_NONFRAG_IPV4_TCP | RTE_ETH_RSS_L4_SRC_ONLY, + HNS3_RSS_IP_L4_TUPLE, +- BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_TCP_S) }, ++ BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_TCP_S), ++ HNS3_RSS_TUPLE_IPV4_TCP_M }, + { RTE_ETH_RSS_NONFRAG_IPV4_TCP | RTE_ETH_RSS_L4_DST_ONLY, + HNS3_RSS_IP_L4_TUPLE, +- BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_TCP_D) }, ++ BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_TCP_D), ++ HNS3_RSS_TUPLE_IPV4_TCP_M }, + { RTE_ETH_RSS_NONFRAG_IPV4_TCP, + HNS3_RSS_IP_L4_TUPLE, + BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_IP_S) | + BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_IP_D) | + BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_TCP_S) | +- BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_TCP_D) }, ++ BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_TCP_D), ++ HNS3_RSS_TUPLE_IPV4_TCP_M }, + + /* IPV4-UDP */ + { RTE_ETH_RSS_NONFRAG_IPV4_UDP | RTE_ETH_RSS_L3_SRC_ONLY, + HNS3_RSS_IP_L4_TUPLE, +- BIT_ULL(HNS3_RSS_FIELD_IPV4_UDP_EN_IP_S) }, ++ BIT_ULL(HNS3_RSS_FIELD_IPV4_UDP_EN_IP_S), ++ HNS3_RSS_TUPLE_IPV4_UDP_M }, + { RTE_ETH_RSS_NONFRAG_IPV4_UDP | RTE_ETH_RSS_L3_DST_ONLY, + HNS3_RSS_IP_L4_TUPLE, +- BIT_ULL(HNS3_RSS_FIELD_IPV4_UDP_EN_IP_D) }, ++ BIT_ULL(HNS3_RSS_FIELD_IPV4_UDP_EN_IP_D), ++ HNS3_RSS_TUPLE_IPV4_UDP_M }, + { RTE_ETH_RSS_NONFRAG_IPV4_UDP | RTE_ETH_RSS_L4_SRC_ONLY, + HNS3_RSS_IP_L4_TUPLE, +- BIT_ULL(HNS3_RSS_FIELD_IPV4_UDP_EN_UDP_S) }, ++ BIT_ULL(HNS3_RSS_FIELD_IPV4_UDP_EN_UDP_S), ++ HNS3_RSS_TUPLE_IPV4_UDP_M }, + { RTE_ETH_RSS_NONFRAG_IPV4_UDP | RTE_ETH_RSS_L4_DST_ONLY, + HNS3_RSS_IP_L4_TUPLE, +- BIT_ULL(HNS3_RSS_FIELD_IPV4_UDP_EN_UDP_D) }, ++ BIT_ULL(HNS3_RSS_FIELD_IPV4_UDP_EN_UDP_D), ++ HNS3_RSS_TUPLE_IPV4_UDP_M }, + { RTE_ETH_RSS_NONFRAG_IPV4_UDP, + HNS3_RSS_IP_L4_TUPLE, + BIT_ULL(HNS3_RSS_FIELD_IPV4_UDP_EN_IP_S) | + BIT_ULL(HNS3_RSS_FIELD_IPV4_UDP_EN_IP_D) | + BIT_ULL(HNS3_RSS_FIELD_IPV4_UDP_EN_UDP_S) | +- BIT_ULL(HNS3_RSS_FIELD_IPV4_UDP_EN_UDP_D) }, ++ BIT_ULL(HNS3_RSS_FIELD_IPV4_UDP_EN_UDP_D), ++ HNS3_RSS_TUPLE_IPV4_UDP_M }, + + /* IPV4-SCTP */ + { RTE_ETH_RSS_NONFRAG_IPV4_SCTP | RTE_ETH_RSS_L3_SRC_ONLY, + HNS3_RSS_IP_L4_TUPLE, +- BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_IP_S) }, ++ BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_IP_S), ++ HNS3_RSS_TUPLE_IPV4_SCTP_M }, + { RTE_ETH_RSS_NONFRAG_IPV4_SCTP | RTE_ETH_RSS_L3_DST_ONLY, + HNS3_RSS_IP_L4_TUPLE, +- BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_IP_D) }, ++ BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_IP_D), ++ HNS3_RSS_TUPLE_IPV4_SCTP_M }, + { RTE_ETH_RSS_NONFRAG_IPV4_SCTP | RTE_ETH_RSS_L4_SRC_ONLY, + HNS3_RSS_IP_L4_TUPLE, +- BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_SCTP_S) }, ++ BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_SCTP_S), ++ HNS3_RSS_TUPLE_IPV4_SCTP_M }, + { RTE_ETH_RSS_NONFRAG_IPV4_SCTP | RTE_ETH_RSS_L4_DST_ONLY, + HNS3_RSS_IP_L4_TUPLE, +- BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_SCTP_D) }, ++ BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_SCTP_D), ++ HNS3_RSS_TUPLE_IPV4_SCTP_M }, + { RTE_ETH_RSS_NONFRAG_IPV4_SCTP, + HNS3_RSS_IP_L4_TUPLE, + BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_IP_S) | + BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_IP_D) | + BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_SCTP_S) | + BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_SCTP_D) | +- BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_SCTP_VER) }, ++ BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_SCTP_VER), ++ HNS3_RSS_TUPLE_IPV4_SCTP_M }, + + /* IPV6-FRAG */ + { RTE_ETH_RSS_FRAG_IPV6 | RTE_ETH_RSS_L3_SRC_ONLY, + HNS3_RSS_IP_TUPLE, +- BIT_ULL(HNS3_RSS_FIELD_IPV6_FRAG_IP_S) }, ++ BIT_ULL(HNS3_RSS_FIELD_IPV6_FRAG_IP_S), ++ HNS3_RSS_TUPLE_IPV6_FLAG_M }, + { RTE_ETH_RSS_FRAG_IPV6 | RTE_ETH_RSS_L3_DST_ONLY, + HNS3_RSS_IP_TUPLE, +- BIT_ULL(HNS3_RSS_FIELD_IPV6_FRAG_IP_D) }, ++ BIT_ULL(HNS3_RSS_FIELD_IPV6_FRAG_IP_D), ++ HNS3_RSS_TUPLE_IPV6_FLAG_M }, + { RTE_ETH_RSS_FRAG_IPV6, + HNS3_RSS_IP_TUPLE, + BIT_ULL(HNS3_RSS_FIELD_IPV6_FRAG_IP_S) | +- BIT_ULL(HNS3_RSS_FIELD_IPV6_FRAG_IP_D) }, ++ BIT_ULL(HNS3_RSS_FIELD_IPV6_FRAG_IP_D), ++ HNS3_RSS_TUPLE_IPV6_FLAG_M }, + + /* IPV6 */ + { RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_L3_SRC_ONLY, + HNS3_RSS_IP_TUPLE, +- BIT_ULL(HNS3_RSS_FIELD_IPV6_NONFRAG_IP_S) }, ++ BIT_ULL(HNS3_RSS_FIELD_IPV6_NONFRAG_IP_S), ++ HNS3_RSS_TUPLE_IPV6_NONF_M }, + { RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_L3_DST_ONLY, + HNS3_RSS_IP_TUPLE, +- BIT_ULL(HNS3_RSS_FIELD_IPV6_NONFRAG_IP_D) }, ++ BIT_ULL(HNS3_RSS_FIELD_IPV6_NONFRAG_IP_D), ++ HNS3_RSS_TUPLE_IPV6_NONF_M }, + { RTE_ETH_RSS_IPV6, + HNS3_RSS_IP_TUPLE, + BIT_ULL(HNS3_RSS_FIELD_IPV6_NONFRAG_IP_S) | +- BIT_ULL(HNS3_RSS_FIELD_IPV6_NONFRAG_IP_D) }, ++ BIT_ULL(HNS3_RSS_FIELD_IPV6_NONFRAG_IP_D), ++ HNS3_RSS_TUPLE_IPV6_NONF_M }, + + /* IPV6-OTHER */ + { RTE_ETH_RSS_NONFRAG_IPV6_OTHER | RTE_ETH_RSS_L3_SRC_ONLY, + HNS3_RSS_IP_TUPLE, +- BIT_ULL(HNS3_RSS_FIELD_IPV6_NONFRAG_IP_S) }, ++ BIT_ULL(HNS3_RSS_FIELD_IPV6_NONFRAG_IP_S), ++ HNS3_RSS_TUPLE_IPV6_NONF_M }, + { RTE_ETH_RSS_NONFRAG_IPV6_OTHER | RTE_ETH_RSS_L3_DST_ONLY, + HNS3_RSS_IP_TUPLE, +- BIT_ULL(HNS3_RSS_FIELD_IPV6_NONFRAG_IP_D) }, ++ BIT_ULL(HNS3_RSS_FIELD_IPV6_NONFRAG_IP_D), ++ HNS3_RSS_TUPLE_IPV6_NONF_M }, + { RTE_ETH_RSS_NONFRAG_IPV6_OTHER, + HNS3_RSS_IP_TUPLE, + BIT_ULL(HNS3_RSS_FIELD_IPV6_NONFRAG_IP_S) | +- BIT_ULL(HNS3_RSS_FIELD_IPV6_NONFRAG_IP_D) }, ++ BIT_ULL(HNS3_RSS_FIELD_IPV6_NONFRAG_IP_D), ++ HNS3_RSS_TUPLE_IPV6_NONF_M }, + + /* IPV6-TCP */ + { RTE_ETH_RSS_NONFRAG_IPV6_TCP | RTE_ETH_RSS_L3_SRC_ONLY, + HNS3_RSS_IP_L4_TUPLE, +- BIT_ULL(HNS3_RSS_FIELD_IPV6_TCP_EN_IP_S) }, ++ BIT_ULL(HNS3_RSS_FIELD_IPV6_TCP_EN_IP_S), ++ HNS3_RSS_TUPLE_IPV6_TCP_M }, + { RTE_ETH_RSS_NONFRAG_IPV6_TCP | RTE_ETH_RSS_L3_DST_ONLY, + HNS3_RSS_IP_L4_TUPLE, +- BIT_ULL(HNS3_RSS_FIELD_IPV6_TCP_EN_IP_D) }, ++ BIT_ULL(HNS3_RSS_FIELD_IPV6_TCP_EN_IP_D), ++ HNS3_RSS_TUPLE_IPV6_TCP_M }, + { RTE_ETH_RSS_NONFRAG_IPV6_TCP | RTE_ETH_RSS_L4_SRC_ONLY, + HNS3_RSS_IP_L4_TUPLE, +- BIT_ULL(HNS3_RSS_FIELD_IPV6_TCP_EN_TCP_S) }, ++ BIT_ULL(HNS3_RSS_FIELD_IPV6_TCP_EN_TCP_S), ++ HNS3_RSS_TUPLE_IPV6_TCP_M }, + { RTE_ETH_RSS_NONFRAG_IPV6_TCP | RTE_ETH_RSS_L4_DST_ONLY, + HNS3_RSS_IP_L4_TUPLE, +- BIT_ULL(HNS3_RSS_FIELD_IPV6_TCP_EN_TCP_D) }, ++ BIT_ULL(HNS3_RSS_FIELD_IPV6_TCP_EN_TCP_D), ++ HNS3_RSS_TUPLE_IPV6_TCP_M }, + { RTE_ETH_RSS_NONFRAG_IPV6_TCP, + HNS3_RSS_IP_L4_TUPLE, + BIT_ULL(HNS3_RSS_FIELD_IPV6_TCP_EN_IP_S) | + BIT_ULL(HNS3_RSS_FIELD_IPV6_TCP_EN_IP_D) | + BIT_ULL(HNS3_RSS_FIELD_IPV6_TCP_EN_TCP_S) | +- BIT_ULL(HNS3_RSS_FIELD_IPV6_TCP_EN_TCP_D) }, ++ BIT_ULL(HNS3_RSS_FIELD_IPV6_TCP_EN_TCP_D), ++ HNS3_RSS_TUPLE_IPV6_TCP_M }, + + /* IPV6-UDP */ + { RTE_ETH_RSS_NONFRAG_IPV6_UDP | RTE_ETH_RSS_L3_SRC_ONLY, + HNS3_RSS_IP_L4_TUPLE, +- BIT_ULL(HNS3_RSS_FIELD_IPV6_UDP_EN_IP_S) }, ++ BIT_ULL(HNS3_RSS_FIELD_IPV6_UDP_EN_IP_S), ++ HNS3_RSS_TUPLE_IPV6_UDP_M }, + { RTE_ETH_RSS_NONFRAG_IPV6_UDP | RTE_ETH_RSS_L3_DST_ONLY, + HNS3_RSS_IP_L4_TUPLE, +- BIT_ULL(HNS3_RSS_FIELD_IPV6_UDP_EN_IP_D) }, ++ BIT_ULL(HNS3_RSS_FIELD_IPV6_UDP_EN_IP_D), ++ HNS3_RSS_TUPLE_IPV6_UDP_M }, + { RTE_ETH_RSS_NONFRAG_IPV6_UDP | RTE_ETH_RSS_L4_SRC_ONLY, + HNS3_RSS_IP_L4_TUPLE, +- BIT_ULL(HNS3_RSS_FIELD_IPV6_UDP_EN_UDP_S) }, ++ BIT_ULL(HNS3_RSS_FIELD_IPV6_UDP_EN_UDP_S), ++ HNS3_RSS_TUPLE_IPV6_UDP_M }, + { RTE_ETH_RSS_NONFRAG_IPV6_UDP | RTE_ETH_RSS_L4_DST_ONLY, + HNS3_RSS_IP_L4_TUPLE, +- BIT_ULL(HNS3_RSS_FIELD_IPV6_UDP_EN_UDP_D) }, ++ BIT_ULL(HNS3_RSS_FIELD_IPV6_UDP_EN_UDP_D), ++ HNS3_RSS_TUPLE_IPV6_UDP_M }, + { RTE_ETH_RSS_NONFRAG_IPV6_UDP, + HNS3_RSS_IP_L4_TUPLE, + BIT_ULL(HNS3_RSS_FIELD_IPV6_UDP_EN_IP_S) | + BIT_ULL(HNS3_RSS_FIELD_IPV6_UDP_EN_IP_D) | + BIT_ULL(HNS3_RSS_FIELD_IPV6_UDP_EN_UDP_S) | +- BIT_ULL(HNS3_RSS_FIELD_IPV6_UDP_EN_UDP_D) }, ++ BIT_ULL(HNS3_RSS_FIELD_IPV6_UDP_EN_UDP_D), ++ HNS3_RSS_TUPLE_IPV6_UDP_M }, + + /* IPV6-SCTP */ + { RTE_ETH_RSS_NONFRAG_IPV6_SCTP | RTE_ETH_RSS_L3_SRC_ONLY, + HNS3_RSS_IP_L4_TUPLE, +- BIT_ULL(HNS3_RSS_FIELD_IPV6_SCTP_EN_IP_S) }, ++ BIT_ULL(HNS3_RSS_FIELD_IPV6_SCTP_EN_IP_S), ++ HNS3_RSS_TUPLE_IPV6_SCTP_M }, + { RTE_ETH_RSS_NONFRAG_IPV6_SCTP | RTE_ETH_RSS_L3_DST_ONLY, + HNS3_RSS_IP_L4_TUPLE, +- BIT_ULL(HNS3_RSS_FIELD_IPV6_SCTP_EN_IP_D) }, ++ BIT_ULL(HNS3_RSS_FIELD_IPV6_SCTP_EN_IP_D), ++ HNS3_RSS_TUPLE_IPV6_SCTP_M }, + { RTE_ETH_RSS_NONFRAG_IPV6_SCTP | RTE_ETH_RSS_L4_SRC_ONLY, + HNS3_RSS_IP_L4_TUPLE, +- BIT_ULL(HNS3_RSS_FIELD_IPV6_SCTP_EN_SCTP_S) }, ++ BIT_ULL(HNS3_RSS_FIELD_IPV6_SCTP_EN_SCTP_S), ++ HNS3_RSS_TUPLE_IPV6_SCTP_M }, + { RTE_ETH_RSS_NONFRAG_IPV6_SCTP | RTE_ETH_RSS_L4_DST_ONLY, + HNS3_RSS_IP_L4_TUPLE, +- BIT_ULL(HNS3_RSS_FIELD_IPV6_SCTP_EN_SCTP_D) }, ++ BIT_ULL(HNS3_RSS_FIELD_IPV6_SCTP_EN_SCTP_D), ++ HNS3_RSS_TUPLE_IPV6_SCTP_M }, + { RTE_ETH_RSS_NONFRAG_IPV6_SCTP, + HNS3_RSS_IP_L4_TUPLE, + BIT_ULL(HNS3_RSS_FIELD_IPV6_SCTP_EN_IP_S) | + BIT_ULL(HNS3_RSS_FIELD_IPV6_SCTP_EN_IP_D) | + BIT_ULL(HNS3_RSS_FIELD_IPV6_SCTP_EN_SCTP_D) | + BIT_ULL(HNS3_RSS_FIELD_IPV6_SCTP_EN_SCTP_S) | +- BIT_ULL(HNS3_RSS_FIELD_IPV6_SCTP_EN_SCTP_VER) }, ++ BIT_ULL(HNS3_RSS_FIELD_IPV6_SCTP_EN_SCTP_VER), ++ HNS3_RSS_TUPLE_IPV6_SCTP_M }, + }; + + /* + * rss_generic_config command function, opcode:0x0D01. +- * Used to set algorithm, key_offset and hash key of rss. ++ * Used to set algorithm and hash key of RSS. + */ + int +-hns3_rss_set_algo_key(struct hns3_hw *hw, const uint8_t *key) ++hns3_rss_set_algo_key(struct hns3_hw *hw, uint8_t hash_algo, ++ const uint8_t *key, uint8_t key_len) + { +-#define HNS3_KEY_OFFSET_MAX 3 +-#define HNS3_SET_HASH_KEY_BYTE_FOUR 2 +- + struct hns3_rss_generic_config_cmd *req; + struct hns3_cmd_desc desc; +- uint32_t key_offset, key_size; +- const uint8_t *key_cur; +- uint8_t cur_offset; ++ const uint8_t *cur_key; ++ uint16_t cur_key_size; ++ uint16_t max_bd_num; ++ uint16_t idx; + int ret; + + req = (struct hns3_rss_generic_config_cmd *)desc.data; + +- /* +- * key_offset=0, hash key byte0~15 is set to hardware. +- * key_offset=1, hash key byte16~31 is set to hardware. +- * key_offset=2, hash key byte32~39 is set to hardware. +- */ +- for (key_offset = 0; key_offset < HNS3_KEY_OFFSET_MAX; key_offset++) { ++ max_bd_num = DIV_ROUND_UP(key_len, HNS3_RSS_HASH_KEY_NUM); ++ for (idx = 0; idx < max_bd_num; idx++) { + hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RSS_GENERIC_CONFIG, + false); + +- req->hash_config |= +- (hw->rss_info.hash_algo & HNS3_RSS_HASH_ALGO_MASK); +- req->hash_config |= (key_offset << HNS3_RSS_HASH_KEY_OFFSET_B); ++ req->hash_config |= (hash_algo & HNS3_RSS_HASH_ALGO_MASK); ++ req->hash_config |= (idx << HNS3_RSS_HASH_KEY_OFFSET_B); + +- if (key_offset == HNS3_SET_HASH_KEY_BYTE_FOUR) +- key_size = HNS3_RSS_KEY_SIZE - HNS3_RSS_HASH_KEY_NUM * +- HNS3_SET_HASH_KEY_BYTE_FOUR; ++ if (idx == max_bd_num - 1 && ++ (key_len % HNS3_RSS_HASH_KEY_NUM) != 0) ++ cur_key_size = key_len % HNS3_RSS_HASH_KEY_NUM; + else +- key_size = HNS3_RSS_HASH_KEY_NUM; ++ cur_key_size = HNS3_RSS_HASH_KEY_NUM; + +- cur_offset = key_offset * HNS3_RSS_HASH_KEY_NUM; +- key_cur = key + cur_offset; +- memcpy(req->hash_key, key_cur, key_size); ++ cur_key = key + idx * HNS3_RSS_HASH_KEY_NUM; ++ memcpy(req->hash_key, cur_key, cur_key_size); + + ret = hns3_cmd_send(hw, &desc, 1); + if (ret) { +@@ -323,8 +320,49 @@ hns3_rss_set_algo_key(struct hns3_hw *hw, const uint8_t *key) + return ret; + } + } +- /* Update the shadow RSS key with user specified */ +- memcpy(hw->rss_info.key, key, HNS3_RSS_KEY_SIZE); ++ ++ return 0; ++} ++ ++int ++hns3_rss_get_algo_key(struct hns3_hw *hw, uint8_t *hash_algo, ++ uint8_t *key, uint8_t key_len) ++{ ++ struct hns3_rss_generic_config_cmd *req; ++ struct hns3_cmd_desc desc; ++ uint16_t cur_key_size; ++ uint16_t max_bd_num; ++ uint8_t *cur_key; ++ uint16_t idx; ++ int ret; ++ ++ req = (struct hns3_rss_generic_config_cmd *)desc.data; ++ max_bd_num = DIV_ROUND_UP(key_len, HNS3_RSS_HASH_KEY_NUM); ++ for (idx = 0; idx < max_bd_num; idx++) { ++ hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RSS_GENERIC_CONFIG, ++ true); ++ ++ req->hash_config |= (idx << HNS3_RSS_HASH_KEY_OFFSET_B); ++ ret = hns3_cmd_send(hw, &desc, 1); ++ if (ret) { ++ hns3_err(hw, "fail to obtain RSS algo and key from firmware, ret = %d", ++ ret); ++ return ret; ++ } ++ ++ if (idx == 0) ++ *hash_algo = req->hash_config & HNS3_RSS_HASH_ALGO_MASK; ++ ++ if (idx == max_bd_num - 1 && ++ (key_len % HNS3_RSS_HASH_KEY_NUM) != 0) ++ cur_key_size = key_len % HNS3_RSS_HASH_KEY_NUM; ++ else ++ cur_key_size = HNS3_RSS_HASH_KEY_NUM; ++ ++ cur_key = key + idx * HNS3_RSS_HASH_KEY_NUM; ++ memcpy(cur_key, req->hash_key, cur_key_size); ++ } ++ + return 0; + } + +@@ -336,6 +374,7 @@ int + hns3_set_rss_indir_table(struct hns3_hw *hw, uint16_t *indir, uint16_t size) + { + struct hns3_rss_indirection_table_cmd *req; ++ uint16_t max_bd_num, cfg_tbl_size; + struct hns3_cmd_desc desc; + uint8_t qid_msb_off; + uint8_t qid_msb_val; +@@ -344,14 +383,20 @@ hns3_set_rss_indir_table(struct hns3_hw *hw, uint16_t *indir, uint16_t size) + int ret; + + req = (struct hns3_rss_indirection_table_cmd *)desc.data; +- +- for (i = 0; i < size / HNS3_RSS_CFG_TBL_SIZE; i++) { ++ max_bd_num = DIV_ROUND_UP(size, HNS3_RSS_CFG_TBL_SIZE); ++ for (i = 0; i < max_bd_num; i++) { + hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RSS_INDIR_TABLE, + false); + req->start_table_index = + rte_cpu_to_le_16(i * HNS3_RSS_CFG_TBL_SIZE); + req->rss_set_bitmap = rte_cpu_to_le_16(HNS3_RSS_SET_BITMAP_MSK); +- for (j = 0; j < HNS3_RSS_CFG_TBL_SIZE; j++) { ++ ++ if (i == max_bd_num - 1 && (size % HNS3_RSS_CFG_TBL_SIZE) != 0) ++ cfg_tbl_size = size % HNS3_RSS_CFG_TBL_SIZE; ++ else ++ cfg_tbl_size = HNS3_RSS_CFG_TBL_SIZE; ++ ++ for (j = 0; j < cfg_tbl_size; j++) { + q_id = indir[i * HNS3_RSS_CFG_TBL_SIZE + j]; + req->rss_result_l[j] = q_id & 0xff; + +@@ -372,9 +417,53 @@ hns3_set_rss_indir_table(struct hns3_hw *hw, uint16_t *indir, uint16_t size) + } + } + +- /* Update redirection table of hw */ +- memcpy(hw->rss_info.rss_indirection_tbl, indir, +- sizeof(uint16_t) * size); ++ return 0; ++} ++ ++static int ++hns3_get_rss_indir_table(struct hns3_hw *hw, uint16_t *indir, uint16_t size) ++{ ++ struct hns3_rss_indirection_table_cmd *req; ++ uint16_t max_bd_num, cfg_tbl_size; ++ uint8_t qid_msb_off, qid_msb_idx; ++ struct hns3_cmd_desc desc; ++ uint16_t q_id, q_hi, q_lo; ++ uint8_t rss_result_h; ++ uint16_t i, j; ++ int ret; ++ ++ req = (struct hns3_rss_indirection_table_cmd *)desc.data; ++ max_bd_num = DIV_ROUND_UP(size, HNS3_RSS_CFG_TBL_SIZE); ++ for (i = 0; i < max_bd_num; i++) { ++ hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RSS_INDIR_TABLE, ++ true); ++ req->start_table_index = ++ rte_cpu_to_le_16(i * HNS3_RSS_CFG_TBL_SIZE); ++ ret = hns3_cmd_send(hw, &desc, 1); ++ if (ret) { ++ hns3_err(hw, "fail to get RSS indirection table from firmware, ret = %d", ++ ret); ++ return ret; ++ } ++ ++ if (i == max_bd_num - 1 && (size % HNS3_RSS_CFG_TBL_SIZE) != 0) ++ cfg_tbl_size = size % HNS3_RSS_CFG_TBL_SIZE; ++ else ++ cfg_tbl_size = HNS3_RSS_CFG_TBL_SIZE; ++ ++ for (j = 0; j < cfg_tbl_size; j++) { ++ qid_msb_idx = ++ j * HNS3_RSS_CFG_TBL_BW_H / HNS3_BITS_PER_BYTE; ++ rss_result_h = req->rss_result_h[qid_msb_idx]; ++ qid_msb_off = ++ j * HNS3_RSS_CFG_TBL_BW_H % HNS3_BITS_PER_BYTE; ++ q_hi = (rss_result_h >> qid_msb_off) & ++ HNS3_RSS_CFG_TBL_BW_H_M; ++ q_lo = req->rss_result_l[j]; ++ q_id = (q_hi << HNS3_RSS_CFG_TBL_BW_L) | q_lo; ++ indir[i * HNS3_RSS_CFG_TBL_SIZE + j] = q_id; ++ } ++ } + + return 0; + } +@@ -393,41 +482,72 @@ hns3_rss_reset_indir_table(struct hns3_hw *hw) + } + + ret = hns3_set_rss_indir_table(hw, lut, hw->rss_ind_tbl_size); +- if (ret) +- hns3_err(hw, "RSS uninit indir table failed: %d", ret); ++ if (ret != 0) ++ hns3_err(hw, "RSS uninit indir table failed, ret = %d.", ret); ++ else ++ memcpy(hw->rss_info.rss_indirection_tbl, lut, ++ sizeof(uint16_t) * hw->rss_ind_tbl_size); + rte_free(lut); + + return ret; + } + +-static void +-hns3_rss_check_l3l4_types(struct hns3_hw *hw, uint64_t rss_hf) ++bool ++hns3_check_rss_types_valid(struct hns3_hw *hw, uint64_t types) + { + uint64_t ip_mask = RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 | + RTE_ETH_RSS_NONFRAG_IPV4_OTHER | + RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6 | + RTE_ETH_RSS_NONFRAG_IPV6_OTHER; +- uint64_t l4_mask = RTE_ETH_RSS_NONFRAG_IPV4_TCP | +- RTE_ETH_RSS_NONFRAG_IPV4_UDP | +- RTE_ETH_RSS_NONFRAG_IPV4_SCTP | +- RTE_ETH_RSS_NONFRAG_IPV6_TCP | +- RTE_ETH_RSS_NONFRAG_IPV6_UDP | +- RTE_ETH_RSS_NONFRAG_IPV6_SCTP; +- uint64_t l3_src_dst_mask = RTE_ETH_RSS_L3_SRC_ONLY | +- RTE_ETH_RSS_L3_DST_ONLY; +- uint64_t l4_src_dst_mask = RTE_ETH_RSS_L4_SRC_ONLY | +- RTE_ETH_RSS_L4_DST_ONLY; +- +- if (rss_hf & l3_src_dst_mask && +- !(rss_hf & ip_mask || rss_hf & l4_mask)) +- hns3_warn(hw, "packet type isn't specified, L3_SRC/DST_ONLY is ignored."); +- +- if (rss_hf & l4_src_dst_mask && !(rss_hf & l4_mask)) +- hns3_warn(hw, "packet type isn't specified, L4_SRC/DST_ONLY is ignored."); ++ uint64_t ip_l4_mask = RTE_ETH_RSS_NONFRAG_IPV4_TCP | ++ RTE_ETH_RSS_NONFRAG_IPV4_UDP | ++ RTE_ETH_RSS_NONFRAG_IPV4_SCTP | ++ RTE_ETH_RSS_NONFRAG_IPV6_TCP | ++ RTE_ETH_RSS_NONFRAG_IPV6_UDP | ++ RTE_ETH_RSS_NONFRAG_IPV6_SCTP; ++ bool has_l4_src_dst = !!(types & HNS3_RSS_SUPPORT_L4_SRC_DST); ++ bool has_ip_pkt = !!(types & ip_mask); ++ uint64_t final_types; ++ ++ if (types == 0) ++ return true; ++ ++ if ((types & HNS3_ETH_RSS_SUPPORT) == 0) { ++ hns3_err(hw, "specified types(0x%" PRIx64 ") are unsupported.", ++ types); ++ return false; ++ } ++ ++ if ((types & HNS3_RSS_SUPPORT_L3_SRC_DST) != 0 && ++ (types & HNS3_RSS_SUPPORT_FLOW_TYPE) == 0) { ++ hns3_err(hw, "IP or IP-TCP/UDP/SCTP packet type isn't specified, L3_SRC/DST_ONLY cannot be set."); ++ return false; ++ } ++ ++ if (has_l4_src_dst && (types & ip_l4_mask) == 0) { ++ if (!has_ip_pkt) { ++ hns3_err(hw, "IP-TCP/UDP/SCTP packet type isn't specified, L4_SRC/DST_ONLY cannot be set."); ++ return false; ++ } ++ /* ++ * For the case that the types has L4_SRC/DST_ONLY but hasn't ++ * IP-TCP/UDP/SCTP packet type, this types is considered valid ++ * if it also has IP packet type. ++ */ ++ hns3_warn(hw, "L4_SRC/DST_ONLY is ignored because of no including L4 packet."); ++ } ++ ++ if ((types & ~HNS3_ETH_RSS_SUPPORT) != 0) { ++ final_types = types & HNS3_ETH_RSS_SUPPORT; ++ hns3_warn(hw, "set RSS types based on hardware support, requested:0x%" PRIx64 " configured:0x%" PRIx64 "", ++ types, final_types); ++ } ++ ++ return true; + } + +-static uint64_t +-hns3_rss_calc_tuple_filed(struct hns3_hw *hw, uint64_t rss_hf) ++uint64_t ++hns3_rss_calc_tuple_filed(uint64_t rss_hf) + { + uint64_t l3_only_mask = RTE_ETH_RSS_L3_SRC_ONLY | + RTE_ETH_RSS_L3_DST_ONLY; +@@ -456,34 +576,40 @@ hns3_rss_calc_tuple_filed(struct hns3_hw *hw, uint64_t rss_hf) + !has_l3_l4_only) + tuple |= hns3_set_tuple_table[i].rss_field; + } +- hns3_rss_check_l3l4_types(hw, rss_hf); + + return tuple; + } + + int +-hns3_set_rss_tuple_by_rss_hf(struct hns3_hw *hw, uint64_t rss_hf) ++hns3_set_rss_tuple_field(struct hns3_hw *hw, uint64_t tuple_fields) + { + struct hns3_rss_input_tuple_cmd *req; + struct hns3_cmd_desc desc; +- uint64_t tuple_field; + int ret; + + hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RSS_INPUT_TUPLE, false); + req = (struct hns3_rss_input_tuple_cmd *)desc.data; +- +- tuple_field = hns3_rss_calc_tuple_filed(hw, rss_hf); +- req->tuple_field = rte_cpu_to_le_64(tuple_field); ++ req->tuple_field = rte_cpu_to_le_64(tuple_fields); + ret = hns3_cmd_send(hw, &desc, 1); +- if (ret) { +- hns3_err(hw, "Update RSS flow types tuples failed %d", ret); +- return ret; +- } ++ if (ret != 0) ++ hns3_err(hw, "set RSS hash tuple fields failed ret = %d", ret); + +- /* Update supported flow types when set tuple success */ +- hw->rss_info.conf.types = rss_hf; ++ return ret; ++} + +- return 0; ++int ++hns3_set_rss_tuple_by_rss_hf(struct hns3_hw *hw, uint64_t rss_hf) ++{ ++ uint64_t tuple_fields; ++ int ret; ++ ++ tuple_fields = hns3_rss_calc_tuple_filed(rss_hf); ++ ret = hns3_set_rss_tuple_field(hw, tuple_fields); ++ if (ret != 0) ++ hns3_err(hw, "Update RSS flow types tuples failed, ret = %d", ++ ret); ++ ++ return ret; + } + + /* +@@ -500,28 +626,35 @@ hns3_dev_rss_hash_update(struct rte_eth_dev *dev, + struct rte_eth_rss_conf *rss_conf) + { + struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); +- uint64_t rss_hf_bk = hw->rss_info.conf.types; ++ uint64_t rss_hf_bk = hw->rss_info.rss_hf; + uint8_t key_len = rss_conf->rss_key_len; + uint64_t rss_hf = rss_conf->rss_hf; + uint8_t *key = rss_conf->rss_key; + int ret; + +- if (key && key_len != HNS3_RSS_KEY_SIZE) { ++ if (key && key_len != hw->rss_key_size) { + hns3_err(hw, "the hash key len(%u) is invalid, must be %u", +- key_len, HNS3_RSS_KEY_SIZE); ++ key_len, hw->rss_key_size); + return -EINVAL; + } + ++ if (!hns3_check_rss_types_valid(hw, rss_hf)) ++ return -EINVAL; ++ + rte_spinlock_lock(&hw->lock); + ret = hns3_set_rss_tuple_by_rss_hf(hw, rss_hf); + if (ret) + goto set_tuple_fail; + + if (key) { +- ret = hns3_rss_set_algo_key(hw, key); ++ ret = hns3_rss_set_algo_key(hw, hw->rss_info.hash_algo, ++ key, hw->rss_key_size); + if (ret) + goto set_algo_key_fail; ++ /* Update the shadow RSS key with user specified */ ++ memcpy(hw->rss_info.key, key, hw->rss_key_size); + } ++ hw->rss_info.rss_hf = rss_hf; + rte_spinlock_unlock(&hw->lock); + + return 0; +@@ -533,6 +666,96 @@ set_tuple_fail: + return ret; + } + ++int ++hns3_get_rss_tuple_field(struct hns3_hw *hw, uint64_t *tuple_fields) ++{ ++ struct hns3_rss_input_tuple_cmd *req; ++ struct hns3_cmd_desc desc; ++ int ret; ++ ++ hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RSS_INPUT_TUPLE, true); ++ req = (struct hns3_rss_input_tuple_cmd *)desc.data; ++ ret = hns3_cmd_send(hw, &desc, 1); ++ if (ret != 0) { ++ hns3_err(hw, "fail to get RSS hash tuple fields from firmware, ret = %d", ++ ret); ++ return ret; ++ } ++ ++ *tuple_fields = rte_le_to_cpu_64(req->tuple_field); ++ ++ return 0; ++} ++ ++static uint64_t ++hns3_rss_tuple_fields_to_rss_hf(struct hns3_hw *hw, uint64_t tuple_fields) ++{ ++ uint64_t ipv6_sctp_l4_mask = ++ BIT_ULL(HNS3_RSS_FIELD_IPV6_SCTP_EN_SCTP_D) | ++ BIT_ULL(HNS3_RSS_FIELD_IPV6_SCTP_EN_SCTP_S); ++ uint64_t rss_hf = 0; ++ uint64_t tuple_mask; ++ uint32_t i; ++ ++ for (i = 0; i < RTE_DIM(hns3_set_tuple_table); i++) { ++ tuple_mask = hns3_set_tuple_table[i].tuple_mask; ++ /* ++ * The RSS hash of the packet type is disabled if its tuples is ++ * zero. ++ */ ++ if ((tuple_fields & tuple_mask) == 0) ++ continue; ++ ++ /* ++ * Some hardware don't support to use src/dst port fields to ++ * hash for IPV6-SCTP packet. ++ */ ++ if ((hns3_set_tuple_table[i].rss_types & ++ RTE_ETH_RSS_NONFRAG_IPV6_SCTP) && ++ !hw->rss_info.ipv6_sctp_offload_supported) ++ tuple_mask &= ~ipv6_sctp_l4_mask; ++ ++ /* ++ * The framework (ethdev ops) or driver (rte flow API) ensure ++ * that both L3_SRC/DST_ONLY and L4_SRC/DST_ONLY cannot be set ++ * to driver at the same time. But if user doesn't specify ++ * anything L3/L4_SRC/DST_ONLY, driver enables all tuple fields. ++ * In this case, driver should not report L3/L4_SRC/DST_ONLY. ++ */ ++ if ((tuple_fields & tuple_mask) == tuple_mask) { ++ /* Skip the item enabled part tuples. */ ++ if ((tuple_fields & hns3_set_tuple_table[i].rss_field) != ++ tuple_mask) ++ continue; ++ ++ rss_hf |= hns3_set_tuple_table[i].rss_types; ++ continue; ++ } ++ ++ /* Match the item enabled part tuples.*/ ++ if ((tuple_fields & hns3_set_tuple_table[i].rss_field) == ++ hns3_set_tuple_table[i].rss_field) ++ rss_hf |= hns3_set_tuple_table[i].rss_types; ++ } ++ ++ return rss_hf; ++} ++ ++static int ++hns3_rss_hash_get_rss_hf(struct hns3_hw *hw, uint64_t *rss_hf) ++{ ++ uint64_t tuple_fields; ++ int ret; ++ ++ ret = hns3_get_rss_tuple_field(hw, &tuple_fields); ++ if (ret != 0) ++ return ret; ++ ++ *rss_hf = hns3_rss_tuple_fields_to_rss_hf(hw, tuple_fields); ++ ++ return 0; ++} ++ + /* + * Get rss key and rss_hf types set of RSS hash configuration. + * @param dev +@@ -548,19 +771,32 @@ hns3_dev_rss_hash_conf_get(struct rte_eth_dev *dev, + { + struct hns3_adapter *hns = dev->data->dev_private; + struct hns3_hw *hw = &hns->hw; +- struct hns3_rss_conf *rss_cfg = &hw->rss_info; ++ uint8_t hash_algo; ++ int ret; + + rte_spinlock_lock(&hw->lock); +- rss_conf->rss_hf = rss_cfg->conf.types; ++ ret = hns3_rss_hash_get_rss_hf(hw, &rss_conf->rss_hf); ++ if (ret != 0) { ++ hns3_err(hw, "obtain hash tuples failed, ret = %d", ret); ++ goto out; ++ } + + /* Get the RSS Key required by the user */ +- if (rss_conf->rss_key && rss_conf->rss_key_len >= HNS3_RSS_KEY_SIZE) { +- memcpy(rss_conf->rss_key, rss_cfg->key, HNS3_RSS_KEY_SIZE); +- rss_conf->rss_key_len = HNS3_RSS_KEY_SIZE; ++ if (rss_conf->rss_key && rss_conf->rss_key_len >= hw->rss_key_size) { ++ ret = hns3_rss_get_algo_key(hw, &hash_algo, rss_conf->rss_key, ++ hw->rss_key_size); ++ if (ret != 0) { ++ hns3_err(hw, "obtain hash algo and key failed, ret = %d", ++ ret); ++ goto out; ++ } ++ rss_conf->rss_key_len = hw->rss_key_size; + } ++ ++out: + rte_spinlock_unlock(&hw->lock); + +- return 0; ++ return ret; + } + + /* +@@ -600,12 +836,12 @@ hns3_dev_rss_reta_update(struct rte_eth_dev *dev, + idx = i / RTE_ETH_RETA_GROUP_SIZE; + shift = i % RTE_ETH_RETA_GROUP_SIZE; + if (reta_conf[idx].reta[shift] >= hw->alloc_rss_size) { +- rte_spinlock_unlock(&hw->lock); + hns3_err(hw, "queue id(%u) set to redirection table " + "exceeds queue number(%u) allocated to a TC", + reta_conf[idx].reta[shift], + hw->alloc_rss_size); +- return -EINVAL; ++ ret = -EINVAL; ++ goto out; + } + + if (reta_conf[idx].mask & (1ULL << shift)) +@@ -614,7 +850,13 @@ hns3_dev_rss_reta_update(struct rte_eth_dev *dev, + + ret = hns3_set_rss_indir_table(hw, indirection_tbl, + hw->rss_ind_tbl_size); ++ if (ret != 0) ++ goto out; + ++ memcpy(rss_cfg->rss_indirection_tbl, indirection_tbl, ++ sizeof(uint16_t) * hw->rss_ind_tbl_size); ++ ++out: + rte_spinlock_unlock(&hw->lock); + return ret; + } +@@ -636,10 +878,11 @@ hns3_dev_rss_reta_query(struct rte_eth_dev *dev, + uint16_t reta_size) + { + struct hns3_adapter *hns = dev->data->dev_private; ++ uint16_t reta_table[HNS3_RSS_IND_TBL_SIZE_MAX]; + struct hns3_hw *hw = &hns->hw; +- struct hns3_rss_conf *rss_cfg = &hw->rss_info; + uint16_t idx, shift; + uint16_t i; ++ int ret; + + if (reta_size != hw->rss_ind_tbl_size) { + hns3_err(hw, "The size of hash lookup table configured (%u)" +@@ -648,14 +891,22 @@ hns3_dev_rss_reta_query(struct rte_eth_dev *dev, + return -EINVAL; + } + rte_spinlock_lock(&hw->lock); ++ ret = hns3_get_rss_indir_table(hw, reta_table, reta_size); ++ if (ret != 0) { ++ rte_spinlock_unlock(&hw->lock); ++ hns3_err(hw, "query RSS redirection table failed, ret = %d.", ++ ret); ++ return ret; ++ } ++ rte_spinlock_unlock(&hw->lock); ++ + for (i = 0; i < reta_size; i++) { + idx = i / RTE_ETH_RETA_GROUP_SIZE; + shift = i % RTE_ETH_RETA_GROUP_SIZE; + if (reta_conf[idx].mask & (1ULL << shift)) +- reta_conf[idx].reta[shift] = +- rss_cfg->rss_indirection_tbl[i]; ++ reta_conf[idx].reta[shift] = reta_table[i]; + } +- rte_spinlock_unlock(&hw->lock); ++ + return 0; + } + +@@ -733,6 +984,52 @@ hns3_set_rss_tc_mode(struct hns3_hw *hw) + return ret; + } + ++/* ++ * Note: the 'hash_algo' is defined by enum rte_eth_hash_function. ++ */ ++int ++hns3_update_rss_algo_key(struct hns3_hw *hw, uint8_t hash_func, ++ uint8_t *key, uint8_t key_len) ++{ ++ uint8_t rss_key[HNS3_RSS_KEY_SIZE_MAX] = {0}; ++ bool modify_key, modify_algo; ++ uint8_t hash_algo; ++ int ret; ++ ++ modify_key = (key != NULL && key_len > 0); ++ modify_algo = hash_func != RTE_ETH_HASH_FUNCTION_DEFAULT; ++ if (!modify_key && !modify_algo) ++ return 0; ++ ++ if (modify_algo && hash_func >= RTE_DIM(hns3_hash_func_map)) { ++ hns3_err(hw, "hash func (%u) is unsupported.", hash_func); ++ return -ENOTSUP; ++ } ++ if (modify_key && key_len != hw->rss_key_size) { ++ hns3_err(hw, "hash key length (%u) is invalid.", key_len); ++ return -EINVAL; ++ } ++ ++ ret = hns3_rss_get_algo_key(hw, &hash_algo, rss_key, hw->rss_key_size); ++ if (ret != 0) { ++ hns3_err(hw, "fail to get RSS hash algorithm and key, ret = %d", ++ ret); ++ return ret; ++ } ++ ++ if (modify_algo) ++ hash_algo = hns3_hash_func_map[hash_func]; ++ if (modify_key) ++ memcpy(rss_key, key, key_len); ++ ++ ret = hns3_rss_set_algo_key(hw, hash_algo, rss_key, hw->rss_key_size); ++ if (ret != 0) ++ hns3_err(hw, "fail to set RSS hash algorithm and key, ret = %d", ++ ret); ++ ++ return ret; ++} ++ + static void + hns3_rss_tuple_uninit(struct hns3_hw *hw) + { +@@ -759,10 +1056,11 @@ hns3_rss_set_default_args(struct hns3_hw *hw) + uint16_t i; + + /* Default hash algorithm */ +- rss_cfg->conf.func = RTE_ETH_HASH_FUNCTION_TOEPLITZ; ++ rss_cfg->hash_algo = HNS3_RSS_HASH_ALGO_TOEPLITZ; + +- /* Default RSS key */ +- memcpy(rss_cfg->key, hns3_hash_key, HNS3_RSS_KEY_SIZE); ++ hw->rss_info.rss_hf = 0; ++ memcpy(rss_cfg->key, hns3_hash_key, ++ RTE_MIN(sizeof(hns3_hash_key), hw->rss_key_size)); + + /* Initialize RSS indirection table */ + for (i = 0; i < hw->rss_ind_tbl_size; i++) +@@ -783,20 +1081,8 @@ hns3_config_rss(struct hns3_adapter *hns) + + enum rte_eth_rx_mq_mode mq_mode = hw->data->dev_conf.rxmode.mq_mode; + +- switch (hw->rss_info.conf.func) { +- case RTE_ETH_HASH_FUNCTION_SIMPLE_XOR: +- hw->rss_info.hash_algo = HNS3_RSS_HASH_ALGO_SIMPLE; +- break; +- case RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ: +- hw->rss_info.hash_algo = HNS3_RSS_HASH_ALGO_SYMMETRIC_TOEP; +- break; +- default: +- hw->rss_info.hash_algo = HNS3_RSS_HASH_ALGO_TOEPLITZ; +- break; +- } +- +- /* Configure RSS hash algorithm and hash key offset */ +- ret = hns3_rss_set_algo_key(hw, hash_key); ++ ret = hns3_rss_set_algo_key(hw, rss_cfg->hash_algo, ++ hash_key, hw->rss_key_size); + if (ret) + return ret; + +@@ -810,15 +1096,22 @@ hns3_config_rss(struct hns3_adapter *hns) + return ret; + + /* +- * When muli-queue RSS mode flag is not set or unsupported tuples are ++ * When multi-queue RSS mode flag is not set or unsupported tuples are + * set, disable all tuples. + */ +- rss_hf = hw->rss_info.conf.types; ++ rss_hf = hw->rss_info.rss_hf; + if (!((uint32_t)mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) || + !(rss_hf & HNS3_ETH_RSS_SUPPORT)) + rss_hf = 0; + +- return hns3_set_rss_tuple_by_rss_hf(hw, rss_hf); ++ ret = hns3_set_rss_tuple_by_rss_hf(hw, rss_hf); ++ if (ret != 0) { ++ hns3_err(hw, "set RSS tuples failed, ret = %d.", ret); ++ return ret; ++ } ++ hw->rss_info.rss_hf = rss_hf; ++ ++ return 0; + } + + /* +@@ -836,5 +1129,5 @@ hns3_rss_uninit(struct hns3_adapter *hns) + return; + + /* Disable RSS */ +- hw->rss_info.conf.types = 0; ++ hw->rss_info.rss_hf = 0; + } +diff --git a/dpdk/drivers/net/hns3/hns3_rss.h b/dpdk/drivers/net/hns3/hns3_rss.h +index 8e8b056f4e..415430a399 100644 +--- a/dpdk/drivers/net/hns3/hns3_rss.h ++++ b/dpdk/drivers/net/hns3/hns3_rss.h +@@ -8,27 +8,107 @@ + #include + #include + +-#define HNS3_ETH_RSS_SUPPORT ( \ +- RTE_ETH_RSS_IPV4 | \ +- RTE_ETH_RSS_FRAG_IPV4 | \ +- RTE_ETH_RSS_NONFRAG_IPV4_TCP | \ +- RTE_ETH_RSS_NONFRAG_IPV4_UDP | \ +- RTE_ETH_RSS_NONFRAG_IPV4_SCTP | \ +- RTE_ETH_RSS_NONFRAG_IPV4_OTHER | \ +- RTE_ETH_RSS_IPV6 | \ +- RTE_ETH_RSS_FRAG_IPV6 | \ +- RTE_ETH_RSS_NONFRAG_IPV6_TCP | \ +- RTE_ETH_RSS_NONFRAG_IPV6_UDP | \ +- RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \ +- RTE_ETH_RSS_NONFRAG_IPV6_OTHER | \ +- RTE_ETH_RSS_L3_SRC_ONLY | \ +- RTE_ETH_RSS_L3_DST_ONLY | \ +- RTE_ETH_RSS_L4_SRC_ONLY | \ +- RTE_ETH_RSS_L4_DST_ONLY) ++#define HNS3_RSS_SUPPORT_L3_SRC_DST (RTE_ETH_RSS_L3_SRC_ONLY | \ ++ RTE_ETH_RSS_L3_DST_ONLY) ++#define HNS3_RSS_SUPPORT_L4_SRC_DST (RTE_ETH_RSS_L4_SRC_ONLY | \ ++ RTE_ETH_RSS_L4_DST_ONLY) ++#define HNS3_RSS_SUPPORT_L3L4 (HNS3_RSS_SUPPORT_L3_SRC_DST | \ ++ HNS3_RSS_SUPPORT_L4_SRC_DST) ++ ++#define HNS3_RSS_SUPPORT_FLOW_TYPE (RTE_ETH_RSS_IPV4 | \ ++ RTE_ETH_RSS_FRAG_IPV4 | \ ++ RTE_ETH_RSS_NONFRAG_IPV4_TCP | \ ++ RTE_ETH_RSS_NONFRAG_IPV4_UDP | \ ++ RTE_ETH_RSS_NONFRAG_IPV4_SCTP | \ ++ RTE_ETH_RSS_NONFRAG_IPV4_OTHER | \ ++ RTE_ETH_RSS_IPV6 | \ ++ RTE_ETH_RSS_FRAG_IPV6 | \ ++ RTE_ETH_RSS_NONFRAG_IPV6_TCP | \ ++ RTE_ETH_RSS_NONFRAG_IPV6_UDP | \ ++ RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \ ++ RTE_ETH_RSS_NONFRAG_IPV6_OTHER) ++ ++#define HNS3_ETH_RSS_SUPPORT (HNS3_RSS_SUPPORT_FLOW_TYPE | \ ++ HNS3_RSS_SUPPORT_L3L4) ++ ++enum hns3_tuple_field { ++ /* IPV4_TCP ENABLE FIELD */ ++ HNS3_RSS_FIELD_IPV4_TCP_EN_TCP_D = 0, ++ HNS3_RSS_FIELD_IPV4_TCP_EN_TCP_S, ++ HNS3_RSS_FIELD_IPV4_TCP_EN_IP_D, ++ HNS3_RSS_FIELD_IPV4_TCP_EN_IP_S, ++ ++ /* IPV4_UDP ENABLE FIELD */ ++ HNS3_RSS_FIELD_IPV4_UDP_EN_UDP_D = 8, ++ HNS3_RSS_FIELD_IPV4_UDP_EN_UDP_S, ++ HNS3_RSS_FIELD_IPV4_UDP_EN_IP_D, ++ HNS3_RSS_FIELD_IPV4_UDP_EN_IP_S, ++ ++ /* IPV4_SCTP ENABLE FIELD */ ++ HNS3_RSS_FIELD_IPV4_SCTP_EN_SCTP_D = 16, ++ HNS3_RSS_FIELD_IPV4_SCTP_EN_SCTP_S, ++ HNS3_RSS_FIELD_IPV4_SCTP_EN_IP_D, ++ HNS3_RSS_FIELD_IPV4_SCTP_EN_IP_S, ++ HNS3_RSS_FIELD_IPV4_SCTP_EN_SCTP_VER, ++ ++ /* IPV4 ENABLE FIELD */ ++ HNS3_RSS_FIELD_IPV4_EN_NONFRAG_IP_D = 24, ++ HNS3_RSS_FIELD_IPV4_EN_NONFRAG_IP_S, ++ HNS3_RSS_FIELD_IPV4_EN_FRAG_IP_D, ++ HNS3_RSS_FIELD_IPV4_EN_FRAG_IP_S, ++ ++ /* IPV6_TCP ENABLE FIELD */ ++ HNS3_RSS_FIELD_IPV6_TCP_EN_TCP_D = 32, ++ HNS3_RSS_FIELD_IPV6_TCP_EN_TCP_S, ++ HNS3_RSS_FIELD_IPV6_TCP_EN_IP_D, ++ HNS3_RSS_FIELD_IPV6_TCP_EN_IP_S, ++ ++ /* IPV6_UDP ENABLE FIELD */ ++ HNS3_RSS_FIELD_IPV6_UDP_EN_UDP_D = 40, ++ HNS3_RSS_FIELD_IPV6_UDP_EN_UDP_S, ++ HNS3_RSS_FIELD_IPV6_UDP_EN_IP_D, ++ HNS3_RSS_FIELD_IPV6_UDP_EN_IP_S, ++ ++ /* IPV6_SCTP ENABLE FIELD */ ++ HNS3_RSS_FIELD_IPV6_SCTP_EN_SCTP_D = 48, ++ HNS3_RSS_FIELD_IPV6_SCTP_EN_SCTP_S, ++ HNS3_RSS_FIELD_IPV6_SCTP_EN_IP_D, ++ HNS3_RSS_FIELD_IPV6_SCTP_EN_IP_S, ++ HNS3_RSS_FIELD_IPV6_SCTP_EN_SCTP_VER, ++ ++ /* IPV6 ENABLE FIELD */ ++ HNS3_RSS_FIELD_IPV6_NONFRAG_IP_D = 56, ++ HNS3_RSS_FIELD_IPV6_NONFRAG_IP_S, ++ HNS3_RSS_FIELD_IPV6_FRAG_IP_D, ++ HNS3_RSS_FIELD_IPV6_FRAG_IP_S ++}; ++ ++#define HNS3_RSS_PCTYPE_IPV4_TCP BIT_ULL(0) ++#define HNS3_RSS_PCTYPE_IPV4_UDP BIT_ULL(8) ++#define HNS3_RSS_PCTYPE_IPV4_SCTP BIT_ULL(16) ++#define HNS3_RSS_PCTYPE_IPV4_NONF BIT_ULL(24) ++#define HNS3_RSS_PCTYPE_IPV4_FLAG BIT_ULL(26) ++#define HNS3_RSS_PCTYPE_IPV6_TCP BIT_ULL(32) ++#define HNS3_RSS_PCTYPE_IPV6_UDP BIT_ULL(40) ++#define HNS3_RSS_PCTYPE_IPV6_SCTP BIT_ULL(48) ++#define HNS3_RSS_PCTYPE_IPV6_NONF BIT_ULL(56) ++#define HNS3_RSS_PCTYPE_IPV6_FLAG BIT_ULL(58) ++ ++#define HNS3_RSS_TUPLE_IPV4_TCP_M GENMASK(3, 0) ++#define HNS3_RSS_TUPLE_IPV4_UDP_M GENMASK(11, 8) ++#define HNS3_RSS_TUPLE_IPV4_SCTP_M GENMASK(20, 16) ++#define HNS3_RSS_TUPLE_IPV4_NONF_M GENMASK(25, 24) ++#define HNS3_RSS_TUPLE_IPV4_FLAG_M GENMASK(27, 26) ++#define HNS3_RSS_TUPLE_IPV6_TCP_M GENMASK(35, 32) ++#define HNS3_RSS_TUPLE_IPV6_UDP_M GENMASK(43, 40) ++#define HNS3_RSS_TUPLE_IPV6_SCTP_M GENMASK(52, 48) ++#define HNS3_RSS_TUPLE_IPV6_NONF_M GENMASK(57, 56) ++#define HNS3_RSS_TUPLE_IPV6_FLAG_M GENMASK(59, 58) + + #define HNS3_RSS_IND_TBL_SIZE 512 /* The size of hash lookup table */ + #define HNS3_RSS_IND_TBL_SIZE_MAX 2048 + #define HNS3_RSS_KEY_SIZE 40 ++#define HNS3_RSS_KEY_SIZE_MAX 128 + #define HNS3_RSS_SET_BITMAP_MSK 0xffff + + #define HNS3_RSS_HASH_ALGO_TOEPLITZ 0 +@@ -36,15 +116,13 @@ + #define HNS3_RSS_HASH_ALGO_SYMMETRIC_TOEP 2 + #define HNS3_RSS_HASH_ALGO_MASK 0xf + +-#define HNS3_RSS_QUEUES_BUFFER_NUM 64 /* Same as the Max rx/tx queue num */ ++/* Same as the Max queue num under TC */ ++#define HNS3_RSS_QUEUES_BUFFER_NUM 512 + struct hns3_rss_conf { +- /* RSS parameters :algorithm, flow_types, key, queue */ +- struct rte_flow_action_rss conf; ++ uint64_t rss_hf; + uint8_t hash_algo; /* hash function type defined by hardware */ +- uint8_t key[HNS3_RSS_KEY_SIZE]; /* Hash key */ ++ uint8_t key[HNS3_RSS_KEY_SIZE_MAX]; /* Hash key */ + uint16_t rss_indirection_tbl[HNS3_RSS_IND_TBL_SIZE_MAX]; +- uint16_t queue[HNS3_RSS_QUEUES_BUFFER_NUM]; /* Queues indices to use */ +- bool valid; /* check if RSS rule is valid */ + /* + * For IPv6 SCTP packets type, check whether the NIC hardware support + * RSS hash using the src/dst port as the input tuple. For Kunpeng920 +@@ -108,7 +186,16 @@ int hns3_set_rss_indir_table(struct hns3_hw *hw, uint16_t *indir, + int hns3_rss_reset_indir_table(struct hns3_hw *hw); + int hns3_config_rss(struct hns3_adapter *hns); + void hns3_rss_uninit(struct hns3_adapter *hns); ++bool hns3_check_rss_types_valid(struct hns3_hw *hw, uint64_t types); + int hns3_set_rss_tuple_by_rss_hf(struct hns3_hw *hw, uint64_t rss_hf); +-int hns3_rss_set_algo_key(struct hns3_hw *hw, const uint8_t *key); ++int hns3_set_rss_tuple_field(struct hns3_hw *hw, uint64_t tuple_fields); ++int hns3_get_rss_tuple_field(struct hns3_hw *hw, uint64_t *tuple_fields); ++int hns3_rss_set_algo_key(struct hns3_hw *hw, uint8_t hash_algo, ++ const uint8_t *key, uint8_t key_len); ++int hns3_rss_get_algo_key(struct hns3_hw *hw, uint8_t *hash_algo, ++ uint8_t *key, uint8_t key_len); ++uint64_t hns3_rss_calc_tuple_filed(uint64_t rss_hf); ++int hns3_update_rss_algo_key(struct hns3_hw *hw, uint8_t hash_algo, ++ uint8_t *key, uint8_t key_len); + + #endif /* HNS3_RSS_H */ +diff --git a/dpdk/drivers/net/hns3/hns3_rxtx.c b/dpdk/drivers/net/hns3/hns3_rxtx.c +index f1163ce8a9..296aba8b35 100644 +--- a/dpdk/drivers/net/hns3/hns3_rxtx.c ++++ b/dpdk/drivers/net/hns3/hns3_rxtx.c +@@ -50,6 +50,8 @@ hns3_rx_queue_release_mbufs(struct hns3_rx_queue *rxq) + rxq->sw_ring[i].mbuf = NULL; + } + } ++ for (i = 0; i < rxq->rx_rearm_nb; i++) ++ rxq->sw_ring[rxq->rx_rearm_start + i].mbuf = NULL; + } + + for (i = 0; i < rxq->bulk_mbuf_num; i++) +@@ -584,7 +586,7 @@ hns3_tqp_enable(struct hns3_hw *hw, uint16_t queue_id, bool enable) + + ret = hns3_cmd_send(hw, &desc, 1); + if (ret) +- hns3_err(hw, "TQP enable fail, ret = %d", ret); ++ hns3_err(hw, "TQP %s fail, ret = %d", enable ? "enable" : "disable", ret); + + return ret; + } +@@ -1637,7 +1639,7 @@ hns3_set_fake_rx_or_tx_queues(struct rte_eth_dev *dev, uint16_t nb_rx_q, + + ret = hns3_fake_tx_queue_config(hw, tx_need_add_nb_q); + if (ret) { +- hns3_err(hw, "Fail to configure fake rx queues: %d", ret); ++ hns3_err(hw, "Fail to configure fake tx queues: %d", ret); + goto cfg_fake_tx_q_fail; + } + +@@ -2786,6 +2788,7 @@ hns3_rx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id, + { hns3_recv_scattered_pkts, "Scalar Scattered" }, + { hns3_recv_pkts_vec, "Vector Neon" }, + { hns3_recv_pkts_vec_sve, "Vector Sve" }, ++ { rte_eth_pkt_burst_dummy, "Dummy" }, + }; + + eth_rx_burst_t pkt_burst = dev->rx_pkt_burst; +@@ -4272,24 +4275,31 @@ int + hns3_tx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id, + struct rte_eth_burst_mode *mode) + { ++ static const struct { ++ eth_tx_burst_t pkt_burst; ++ const char *info; ++ } burst_infos[] = { ++ { hns3_xmit_pkts_simple, "Scalar Simple" }, ++ { hns3_xmit_pkts, "Scalar" }, ++ { hns3_xmit_pkts_vec, "Vector Neon" }, ++ { hns3_xmit_pkts_vec_sve, "Vector Sve" }, ++ { rte_eth_pkt_burst_dummy, "Dummy" }, ++ }; ++ + eth_tx_burst_t pkt_burst = dev->tx_pkt_burst; +- const char *info = NULL; +- +- if (pkt_burst == hns3_xmit_pkts_simple) +- info = "Scalar Simple"; +- else if (pkt_burst == hns3_xmit_pkts) +- info = "Scalar"; +- else if (pkt_burst == hns3_xmit_pkts_vec) +- info = "Vector Neon"; +- else if (pkt_burst == hns3_xmit_pkts_vec_sve) +- info = "Vector Sve"; +- +- if (info == NULL) +- return -EINVAL; ++ int ret = -EINVAL; ++ unsigned int i; + +- snprintf(mode->info, sizeof(mode->info), "%s", info); ++ for (i = 0; i < RTE_DIM(burst_infos); i++) { ++ if (pkt_burst == burst_infos[i].pkt_burst) { ++ snprintf(mode->info, sizeof(mode->info), "%s", ++ burst_infos[i].info); ++ ret = 0; ++ break; ++ } ++ } + +- return 0; ++ return ret; + } + + static bool +@@ -4303,11 +4313,6 @@ hns3_tx_check_simple_support(struct rte_eth_dev *dev) + static bool + hns3_get_tx_prep_needed(struct rte_eth_dev *dev) + { +-#ifdef RTE_LIBRTE_ETHDEV_DEBUG +- RTE_SET_USED(dev); +- /* always perform tx_prepare when debug */ +- return true; +-#else + #define HNS3_DEV_TX_CSKUM_TSO_OFFLOAD_MASK (\ + RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | \ + RTE_ETH_TX_OFFLOAD_TCP_CKSUM | \ +@@ -4321,27 +4326,30 @@ hns3_get_tx_prep_needed(struct rte_eth_dev *dev) + RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO) + + uint64_t tx_offload = dev->data->dev_conf.txmode.offloads; ++ + if (tx_offload & HNS3_DEV_TX_CSKUM_TSO_OFFLOAD_MASK) + return true; + + return false; +-#endif + } + +-eth_tx_burst_t +-hns3_get_tx_function(struct rte_eth_dev *dev, eth_tx_prep_t *prep) ++static eth_tx_prep_t ++hns3_get_tx_prepare(struct rte_eth_dev *dev) ++{ ++ return hns3_get_tx_prep_needed(dev) ? hns3_prep_pkts : NULL; ++} ++ ++static eth_tx_burst_t ++hns3_get_tx_function(struct rte_eth_dev *dev) + { + struct hns3_adapter *hns = dev->data->dev_private; + bool vec_allowed, sve_allowed, simple_allowed; +- bool vec_support, tx_prepare_needed; ++ bool vec_support; + + vec_support = hns3_tx_check_vec_support(dev) == 0; + vec_allowed = vec_support && hns3_get_default_vec_support(); + sve_allowed = vec_support && hns3_get_sve_support(); + simple_allowed = hns3_tx_check_simple_support(dev); +- tx_prepare_needed = hns3_get_tx_prep_needed(dev); +- +- *prep = NULL; + + if (hns->tx_func_hint == HNS3_IO_FUNC_HINT_VEC && vec_allowed) + return hns3_xmit_pkts_vec; +@@ -4349,19 +4357,14 @@ hns3_get_tx_function(struct rte_eth_dev *dev, eth_tx_prep_t *prep) + return hns3_xmit_pkts_vec_sve; + if (hns->tx_func_hint == HNS3_IO_FUNC_HINT_SIMPLE && simple_allowed) + return hns3_xmit_pkts_simple; +- if (hns->tx_func_hint == HNS3_IO_FUNC_HINT_COMMON) { +- if (tx_prepare_needed) +- *prep = hns3_prep_pkts; ++ if (hns->tx_func_hint == HNS3_IO_FUNC_HINT_COMMON) + return hns3_xmit_pkts; +- } + + if (vec_allowed) + return hns3_xmit_pkts_vec; + if (simple_allowed) + return hns3_xmit_pkts_simple; + +- if (tx_prepare_needed) +- *prep = hns3_prep_pkts; + return hns3_xmit_pkts; + } + +@@ -4401,7 +4404,6 @@ hns3_set_rxtx_function(struct rte_eth_dev *eth_dev) + { + struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); + struct hns3_adapter *hns = eth_dev->data->dev_private; +- eth_tx_prep_t prep = NULL; + + if (hns->hw.adapter_state == HNS3_NIC_STARTED && + __atomic_load_n(&hns->hw.reset.resetting, __ATOMIC_RELAXED) == 0) { +@@ -4409,16 +4411,16 @@ hns3_set_rxtx_function(struct rte_eth_dev *eth_dev) + eth_dev->rx_descriptor_status = hns3_dev_rx_descriptor_status; + eth_dev->tx_pkt_burst = hw->set_link_down ? + rte_eth_pkt_burst_dummy : +- hns3_get_tx_function(eth_dev, &prep); +- eth_dev->tx_pkt_prepare = prep; ++ hns3_get_tx_function(eth_dev); ++ eth_dev->tx_pkt_prepare = hns3_get_tx_prepare(eth_dev); + eth_dev->tx_descriptor_status = hns3_dev_tx_descriptor_status; +- hns3_trace_rxtx_function(eth_dev); + } else { + eth_dev->rx_pkt_burst = rte_eth_pkt_burst_dummy; + eth_dev->tx_pkt_burst = rte_eth_pkt_burst_dummy; + eth_dev->tx_pkt_prepare = NULL; + } + ++ hns3_trace_rxtx_function(eth_dev); + hns3_eth_dev_fp_ops_config(eth_dev); + } + +@@ -4469,6 +4471,13 @@ hns3_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) + return -ENOTSUP; + + rte_spinlock_lock(&hw->lock); ++ ++ if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) { ++ hns3_err(hw, "fail to start Rx queue during resetting."); ++ rte_spinlock_unlock(&hw->lock); ++ return -EIO; ++ } ++ + ret = hns3_reset_queue(hw, rx_queue_id, HNS3_RING_TYPE_RX); + if (ret) { + hns3_err(hw, "fail to reset Rx queue %u, ret = %d.", +@@ -4477,6 +4486,9 @@ hns3_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) + return ret; + } + ++ if (rxq->sw_ring[0].mbuf != NULL) ++ hns3_rx_queue_release_mbufs(rxq); ++ + ret = hns3_init_rxq(hns, rx_queue_id); + if (ret) { + hns3_err(hw, "fail to init Rx queue %u, ret = %d.", +@@ -4515,6 +4527,13 @@ hns3_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) + return -ENOTSUP; + + rte_spinlock_lock(&hw->lock); ++ ++ if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) { ++ hns3_err(hw, "fail to stop Rx queue during resetting."); ++ rte_spinlock_unlock(&hw->lock); ++ return -EIO; ++ } ++ + hns3_enable_rxq(rxq, false); + + hns3_rx_queue_release_mbufs(rxq); +@@ -4537,6 +4556,13 @@ hns3_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) + return -ENOTSUP; + + rte_spinlock_lock(&hw->lock); ++ ++ if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) { ++ hns3_err(hw, "fail to start Tx queue during resetting."); ++ rte_spinlock_unlock(&hw->lock); ++ return -EIO; ++ } ++ + ret = hns3_reset_queue(hw, tx_queue_id, HNS3_RING_TYPE_TX); + if (ret) { + hns3_err(hw, "fail to reset Tx queue %u, ret = %d.", +@@ -4563,6 +4589,13 @@ hns3_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) + return -ENOTSUP; + + rte_spinlock_lock(&hw->lock); ++ ++ if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) { ++ hns3_err(hw, "fail to stop Tx queue during resetting."); ++ rte_spinlock_unlock(&hw->lock); ++ return -EIO; ++ } ++ + hns3_enable_txq(txq, false); + hns3_tx_queue_release_mbufs(txq); + /* +@@ -4756,10 +4789,8 @@ hns3_stop_tx_datapath(struct rte_eth_dev *dev) + void + hns3_start_tx_datapath(struct rte_eth_dev *dev) + { +- eth_tx_prep_t prep = NULL; +- +- dev->tx_pkt_burst = hns3_get_tx_function(dev, &prep); +- dev->tx_pkt_prepare = prep; ++ dev->tx_pkt_burst = hns3_get_tx_function(dev); ++ dev->tx_pkt_prepare = hns3_get_tx_prepare(dev); + hns3_eth_dev_fp_ops_config(dev); + + if (rte_eal_process_type() == RTE_PROC_SECONDARY) +@@ -4767,3 +4798,31 @@ hns3_start_tx_datapath(struct rte_eth_dev *dev) + + hns3_mp_req_start_tx(dev); + } ++ ++void ++hns3_stop_rxtx_datapath(struct rte_eth_dev *dev) ++{ ++ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); ++ ++ hns3_set_rxtx_function(dev); ++ ++ if (rte_eal_process_type() == RTE_PROC_SECONDARY) ++ return; ++ ++ rte_wmb(); ++ /* Disable datapath on secondary process. */ ++ hns3_mp_req_stop_rxtx(dev); ++ /* Prevent crashes when queues are still in use. */ ++ rte_delay_ms(hw->cfg_max_queues); ++} ++ ++void ++hns3_start_rxtx_datapath(struct rte_eth_dev *dev) ++{ ++ hns3_set_rxtx_function(dev); ++ ++ if (rte_eal_process_type() == RTE_PROC_SECONDARY) ++ return; ++ ++ hns3_mp_req_start_rxtx(dev); ++} +diff --git a/dpdk/drivers/net/hns3/hns3_rxtx.h b/dpdk/drivers/net/hns3/hns3_rxtx.h +index ea1a805491..fa39f6481a 100644 +--- a/dpdk/drivers/net/hns3/hns3_rxtx.h ++++ b/dpdk/drivers/net/hns3/hns3_rxtx.h +@@ -740,9 +740,6 @@ int hns3_tx_burst_mode_get(struct rte_eth_dev *dev, + const uint32_t *hns3_dev_supported_ptypes_get(struct rte_eth_dev *dev); + void hns3_init_rx_ptype_tble(struct rte_eth_dev *dev); + void hns3_set_rxtx_function(struct rte_eth_dev *eth_dev); +-eth_tx_burst_t hns3_get_tx_function(struct rte_eth_dev *dev, +- eth_tx_prep_t *prep); +- + uint32_t hns3_get_tqp_intr_reg_offset(uint16_t tqp_intr_id); + void hns3_set_queue_intr_gl(struct hns3_hw *hw, uint16_t queue_id, + uint8_t gl_idx, uint16_t gl_value); +@@ -776,5 +773,7 @@ int hns3_dev_tx_descriptor_status(void *tx_queue, uint16_t offset); + void hns3_tx_push_init(struct rte_eth_dev *dev); + void hns3_stop_tx_datapath(struct rte_eth_dev *dev); + void hns3_start_tx_datapath(struct rte_eth_dev *dev); ++void hns3_stop_rxtx_datapath(struct rte_eth_dev *dev); ++void hns3_start_rxtx_datapath(struct rte_eth_dev *dev); + + #endif /* HNS3_RXTX_H */ +diff --git a/dpdk/drivers/net/hns3/hns3_rxtx_vec_neon.h b/dpdk/drivers/net/hns3/hns3_rxtx_vec_neon.h +index 55d9bf817d..a20a6b6acb 100644 +--- a/dpdk/drivers/net/hns3/hns3_rxtx_vec_neon.h ++++ b/dpdk/drivers/net/hns3/hns3_rxtx_vec_neon.h +@@ -142,8 +142,8 @@ hns3_recv_burst_vec(struct hns3_rx_queue *__restrict rxq, + /* mask to shuffle from desc to mbuf's rx_descriptor_fields1 */ + uint8x16_t shuf_desc_fields_msk = { + 0xff, 0xff, 0xff, 0xff, /* packet type init zero */ +- 22, 23, 0xff, 0xff, /* rx.pkt_len to rte_mbuf.pkt_len */ +- 20, 21, /* size to rte_mbuf.data_len */ ++ 20, 21, 0xff, 0xff, /* rx.pkt_len to rte_mbuf.pkt_len */ ++ 22, 23, /* size to rte_mbuf.data_len */ + 0xff, 0xff, /* rte_mbuf.vlan_tci init zero */ + 8, 9, 10, 11, /* rx.rss_hash to rte_mbuf.hash.rss */ + }; +diff --git a/dpdk/drivers/net/hns3/hns3_stats.c b/dpdk/drivers/net/hns3/hns3_stats.c +index bad65fcbed..c2e692a2c5 100644 +--- a/dpdk/drivers/net/hns3/hns3_stats.c ++++ b/dpdk/drivers/net/hns3/hns3_stats.c +@@ -317,7 +317,7 @@ hns3_update_mac_stats(struct hns3_hw *hw) + uint32_t stats_iterms; + uint64_t *desc_data; + uint32_t desc_num; +- uint16_t i; ++ uint32_t i; + int ret; + + /* The first desc has a 64-bit header, so need to consider it. */ +diff --git a/dpdk/drivers/net/i40e/i40e_ethdev.c b/dpdk/drivers/net/i40e/i40e_ethdev.c +index 7726a89d99..cb0070f94b 100644 +--- a/dpdk/drivers/net/i40e/i40e_ethdev.c ++++ b/dpdk/drivers/net/i40e/i40e_ethdev.c +@@ -387,7 +387,6 @@ static int i40e_set_default_mac_addr(struct rte_eth_dev *dev, + struct rte_ether_addr *mac_addr); + + static int i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); +-static void i40e_set_mac_max_frame(struct rte_eth_dev *dev, uint16_t size); + + static int i40e_ethertype_filter_convert( + const struct rte_eth_ethertype_filter *input, +@@ -2412,10 +2411,21 @@ i40e_dev_start(struct rte_eth_dev *dev) + } + } + ++ /* Disable mac loopback mode */ ++ if (dev->data->dev_conf.lpbk_mode == I40E_AQ_LB_MODE_NONE) { ++ ret = i40e_aq_set_lb_modes(hw, I40E_AQ_LB_MODE_NONE, NULL); ++ if (ret != I40E_SUCCESS) { ++ PMD_DRV_LOG(ERR, "fail to set loopback link"); ++ goto tx_err; ++ } ++ } ++ + /* Enable mac loopback mode */ +- if (dev->data->dev_conf.lpbk_mode == I40E_AQ_LB_MODE_NONE || +- dev->data->dev_conf.lpbk_mode == I40E_AQ_LB_PHY_LOCAL) { +- ret = i40e_aq_set_lb_modes(hw, dev->data->dev_conf.lpbk_mode, NULL); ++ if (dev->data->dev_conf.lpbk_mode == I40E_AQ_LB_MODE_EN) { ++ if (hw->mac.type == I40E_MAC_X722) ++ ret = i40e_aq_set_lb_modes(hw, I40E_AQ_LB_MAC_LOCAL_X722, NULL); ++ else ++ ret = i40e_aq_set_lb_modes(hw, I40E_AQ_LB_MAC, NULL); + if (ret != I40E_SUCCESS) { + PMD_DRV_LOG(ERR, "fail to set loopback link"); + goto tx_err; +@@ -2449,7 +2459,7 @@ i40e_dev_start(struct rte_eth_dev *dev) + PMD_DRV_LOG(WARNING, "Fail to set phy mask"); + + /* Call get_link_info aq command to enable/disable LSE */ +- i40e_dev_link_update(dev, 0); ++ i40e_dev_link_update(dev, 1); + } + + if (dev->data->dev_conf.intr_conf.rxq == 0) { +@@ -2467,8 +2477,12 @@ i40e_dev_start(struct rte_eth_dev *dev) + "please call hierarchy_commit() " + "before starting the port"); + +- max_frame_size = dev->data->mtu + I40E_ETH_OVERHEAD; +- i40e_set_mac_max_frame(dev, max_frame_size); ++ max_frame_size = dev->data->mtu ? ++ dev->data->mtu + I40E_ETH_OVERHEAD : ++ I40E_FRAME_SIZE_MAX; ++ ++ /* Set the max frame size to HW*/ ++ i40e_aq_set_mac_config(hw, max_frame_size, TRUE, false, 0, NULL); + + return I40E_SUCCESS; + +@@ -2809,9 +2823,6 @@ i40e_dev_set_link_down(struct rte_eth_dev *dev) + return i40e_phy_conf_link(hw, abilities, speed, false); + } + +-#define CHECK_INTERVAL 100 /* 100ms */ +-#define MAX_REPEAT_TIME 10 /* 1s (10 * 100ms) in total */ +- + static __rte_always_inline void + update_link_reg(struct i40e_hw *hw, struct rte_eth_link *link) + { +@@ -2878,6 +2889,8 @@ static __rte_always_inline void + update_link_aq(struct i40e_hw *hw, struct rte_eth_link *link, + bool enable_lse, int wait_to_complete) + { ++#define CHECK_INTERVAL 100 /* 100ms */ ++#define MAX_REPEAT_TIME 10 /* 1s (10 * 100ms) in total */ + uint32_t rep_cnt = MAX_REPEAT_TIME; + struct i40e_link_status link_status; + int status; +@@ -6738,7 +6751,6 @@ i40e_dev_handle_aq_msg(struct rte_eth_dev *dev) + if (!ret) + rte_eth_dev_callback_process(dev, + RTE_ETH_EVENT_INTR_LSC, NULL); +- + break; + default: + PMD_DRV_LOG(DEBUG, "Request %u is not supported yet", +@@ -12123,40 +12135,6 @@ i40e_cloud_filter_qinq_create(struct i40e_pf *pf) + return ret; + } + +-static void +-i40e_set_mac_max_frame(struct rte_eth_dev *dev, uint16_t size) +-{ +- struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); +- uint32_t rep_cnt = MAX_REPEAT_TIME; +- struct rte_eth_link link; +- enum i40e_status_code status; +- bool can_be_set = true; +- +- /* +- * I40E_MEDIA_TYPE_BASET link up can be ignored +- * I40E_MEDIA_TYPE_BASET link down that hw->phy.media_type +- * is I40E_MEDIA_TYPE_UNKNOWN +- */ +- if (hw->phy.media_type != I40E_MEDIA_TYPE_BASET && +- hw->phy.media_type != I40E_MEDIA_TYPE_UNKNOWN) { +- do { +- update_link_reg(hw, &link); +- if (link.link_status) +- break; +- rte_delay_ms(CHECK_INTERVAL); - } while (--rep_cnt); - can_be_set = !!link.link_status; - } - -- if (can_be_set) { -- status = i40e_aq_set_mac_config(hw, size, TRUE, 0, false, NULL); -- if (status != I40E_SUCCESS) -- PMD_DRV_LOG(ERR, "Failed to set max frame size at port level"); -- } else { -- PMD_DRV_LOG(ERR, "Set max frame size at port level not applicable on link down"); +- if (can_be_set) { +- status = i40e_aq_set_mac_config(hw, size, TRUE, 0, false, NULL); +- if (status != I40E_SUCCESS) +- PMD_DRV_LOG(ERR, "Failed to set max frame size at port level"); +- } else { +- PMD_DRV_LOG(ERR, "Set max frame size at port level not applicable on link down"); +- } +-} +- + RTE_LOG_REGISTER_SUFFIX(i40e_logtype_init, init, NOTICE); + RTE_LOG_REGISTER_SUFFIX(i40e_logtype_driver, driver, NOTICE); + #ifdef RTE_ETHDEV_DEBUG_RX +diff --git a/dpdk/drivers/net/i40e/i40e_ethdev.h b/dpdk/drivers/net/i40e/i40e_ethdev.h +index fe943a45ff..9b806d130e 100644 +--- a/dpdk/drivers/net/i40e/i40e_ethdev.h ++++ b/dpdk/drivers/net/i40e/i40e_ethdev.h +@@ -48,6 +48,9 @@ + #define I40E_MAX_VF 128 + /*flag of no loopback*/ + #define I40E_AQ_LB_MODE_NONE 0x0 ++#define I40E_AQ_LB_MODE_EN 0x01 ++#define I40E_AQ_LB_MAC 0x01 ++#define I40E_AQ_LB_MAC_LOCAL_X722 0x04 + /* + * vlan_id is a 12 bit number. + * The VFTA array is actually a 4096 bit array, 128 of 32bit elements. +@@ -1497,7 +1500,7 @@ i40e_calc_itr_interval(bool is_pf, bool is_multi_drv) + uint16_t interval = 0; + + if (is_multi_drv) { +- interval = I40E_QUEUE_ITR_INTERVAL_MAX; ++ interval = I40E_QUEUE_ITR_INTERVAL_DEFAULT; + } else { + if (is_pf) + interval = I40E_QUEUE_ITR_INTERVAL_DEFAULT; +diff --git a/dpdk/drivers/net/i40e/i40e_flow.c b/dpdk/drivers/net/i40e/i40e_flow.c +index 65a826d51c..67df77890a 100644 +--- a/dpdk/drivers/net/i40e/i40e_flow.c ++++ b/dpdk/drivers/net/i40e/i40e_flow.c +@@ -1236,6 +1236,14 @@ i40e_flow_parse_attr(const struct rte_flow_attr *attr, + return -rte_errno; + } + ++ /* Not supported */ ++ if (attr->transfer) { ++ rte_flow_error_set(error, EINVAL, ++ RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, ++ attr, "Not support transfer."); ++ return -rte_errno; ++ } ++ + /* Not supported */ + if (attr->priority) { + rte_flow_error_set(error, EINVAL, +diff --git a/dpdk/drivers/net/i40e/i40e_rxtx.c b/dpdk/drivers/net/i40e/i40e_rxtx.c +index 788ffb51c2..b4f65b58fa 100644 +--- a/dpdk/drivers/net/i40e/i40e_rxtx.c ++++ b/dpdk/drivers/net/i40e/i40e_rxtx.c +@@ -304,10 +304,7 @@ i40e_txd_enable_checksum(uint64_t ol_flags, + union i40e_tx_offload tx_offload) + { + /* Set MACLEN */ +- if (ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) +- *td_offset |= (tx_offload.outer_l2_len >> 1) +- << I40E_TX_DESC_LENGTH_MACLEN_SHIFT; +- else ++ if (!(ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK)) + *td_offset |= (tx_offload.l2_len >> 1) + << I40E_TX_DESC_LENGTH_MACLEN_SHIFT; + +@@ -1171,9 +1168,12 @@ i40e_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) + + /* Fill in tunneling parameters if necessary */ + cd_tunneling_params = 0; +- if (ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) ++ if (ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) { ++ td_offset |= (tx_offload.outer_l2_len >> 1) ++ << I40E_TX_DESC_LENGTH_MACLEN_SHIFT; + i40e_parse_tunneling_params(ol_flags, tx_offload, + &cd_tunneling_params); ++ } + /* Enable checksum offloading */ + if (ol_flags & I40E_TX_CKSUM_OFFLOAD_MASK) + i40e_txd_enable_checksum(ol_flags, &td_cmd, +@@ -2904,6 +2904,8 @@ i40e_rx_queue_config(struct i40e_rx_queue *rxq) + rxq->rx_hdr_len = 0; + rxq->rx_buf_len = RTE_ALIGN_FLOOR(buf_size, + (1 << I40E_RXQ_CTX_DBUFF_SHIFT)); ++ rxq->rx_buf_len = RTE_MIN(rxq->rx_buf_len, ++ I40E_RX_MAX_DATA_BUF_SIZE); + rxq->hs_mode = i40e_header_split_none; + break; + } +diff --git a/dpdk/drivers/net/i40e/i40e_rxtx.h b/dpdk/drivers/net/i40e/i40e_rxtx.h +index 5e6eecc501..a8686224e5 100644 +--- a/dpdk/drivers/net/i40e/i40e_rxtx.h ++++ b/dpdk/drivers/net/i40e/i40e_rxtx.h +@@ -21,6 +21,9 @@ + /* In none-PXE mode QLEN must be whole number of 32 descriptors. */ + #define I40E_ALIGN_RING_DESC 32 + ++/* Max data buffer size must be 16K - 128 bytes */ ++#define I40E_RX_MAX_DATA_BUF_SIZE (16 * 1024 - 128) ++ + #define I40E_MIN_RING_DESC 64 + #define I40E_MAX_RING_DESC 4096 + +@@ -166,7 +169,7 @@ struct i40e_tx_queue { + bool q_set; /**< indicate if tx queue has been configured */ + bool tx_deferred_start; /**< don't start this queue in dev start */ + uint8_t dcb_tc; /**< Traffic class of tx queue */ +- uint64_t offloads; /**< Tx offload flags of RTE_ETH_RX_OFFLOAD_* */ ++ uint64_t offloads; /**< Tx offload flags of RTE_ETH_TX_OFFLOAD_* */ + const struct rte_memzone *mz; + }; + +diff --git a/dpdk/drivers/net/i40e/i40e_rxtx_vec_altivec.c b/dpdk/drivers/net/i40e/i40e_rxtx_vec_altivec.c +index 2dfa04599c..da4a1bc03b 100644 +--- a/dpdk/drivers/net/i40e/i40e_rxtx_vec_altivec.c ++++ b/dpdk/drivers/net/i40e/i40e_rxtx_vec_altivec.c +@@ -448,8 +448,6 @@ _recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts, + + /* Notice: + * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet +- * - nb_pkts > RTE_I40E_VPMD_RX_BURST, only scan RTE_I40E_VPMD_RX_BURST +- * numbers of DD bits + */ + uint16_t + i40e_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts, +diff --git a/dpdk/drivers/net/i40e/i40e_rxtx_vec_avx512.c b/dpdk/drivers/net/i40e/i40e_rxtx_vec_avx512.c +index 60c97d5331..74ff54c653 100644 +--- a/dpdk/drivers/net/i40e/i40e_rxtx_vec_avx512.c ++++ b/dpdk/drivers/net/i40e/i40e_rxtx_vec_avx512.c +@@ -906,16 +906,13 @@ i40e_tx_free_bufs_avx512(struct i40e_tx_queue *txq) + struct rte_mempool_cache *cache = rte_mempool_default_cache(mp, + rte_lcore_id()); + +- if (!cache || cache->len == 0) +- goto normal; +- +- cache_objs = &cache->objs[cache->len]; +- +- if (n > RTE_MEMPOOL_CACHE_MAX_SIZE) { +- rte_mempool_ops_enqueue_bulk(mp, (void *)txep, n); ++ if (!cache || n > RTE_MEMPOOL_CACHE_MAX_SIZE) { ++ rte_mempool_generic_put(mp, (void *)txep, n, cache); + goto done; + } + ++ cache_objs = &cache->objs[cache->len]; ++ + /* The cache follows the following algorithm + * 1. Add the objects to the cache + * 2. Anything greater than the cache min value (if it +@@ -947,7 +944,6 @@ i40e_tx_free_bufs_avx512(struct i40e_tx_queue *txq) + goto done; + } + +-normal: + m = rte_pktmbuf_prefree_seg(txep[0].mbuf); + if (likely(m)) { + free[0] = m; +diff --git a/dpdk/drivers/net/i40e/i40e_rxtx_vec_neon.c b/dpdk/drivers/net/i40e/i40e_rxtx_vec_neon.c +index 12e6f1cbcb..90e388ae27 100644 +--- a/dpdk/drivers/net/i40e/i40e_rxtx_vec_neon.c ++++ b/dpdk/drivers/net/i40e/i40e_rxtx_vec_neon.c +@@ -573,8 +573,6 @@ _recv_raw_pkts_vec(struct i40e_rx_queue *__rte_restrict rxq, + /* + * Notice: + * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet +- * - nb_pkts > RTE_I40E_VPMD_RX_BURST, only scan RTE_I40E_VPMD_RX_BURST +- * numbers of DD bits + */ + uint16_t + i40e_recv_pkts_vec(void *__rte_restrict rx_queue, +diff --git a/dpdk/drivers/net/i40e/i40e_rxtx_vec_sse.c b/dpdk/drivers/net/i40e/i40e_rxtx_vec_sse.c +index bdc979a839..79029ab433 100644 +--- a/dpdk/drivers/net/i40e/i40e_rxtx_vec_sse.c ++++ b/dpdk/drivers/net/i40e/i40e_rxtx_vec_sse.c +@@ -595,8 +595,6 @@ _recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts, + /* + * Notice: + * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet +- * - nb_pkts > RTE_I40E_VPMD_RX_BURST, only scan RTE_I40E_VPMD_RX_BURST +- * numbers of DD bits + */ + uint16_t + i40e_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts, +diff --git a/dpdk/drivers/net/iavf/iavf.h b/dpdk/drivers/net/iavf/iavf.h +index 1edebab8dc..aa18650ffa 100644 +--- a/dpdk/drivers/net/iavf/iavf.h ++++ b/dpdk/drivers/net/iavf/iavf.h +@@ -262,6 +262,7 @@ struct iavf_info { + struct iavf_qv_map *qv_map; /* queue vector mapping */ + struct iavf_flow_list flow_list; + rte_spinlock_t flow_ops_lock; ++ rte_spinlock_t aq_lock; + struct iavf_parser_list rss_parser_list; + struct iavf_parser_list dist_parser_list; + struct iavf_parser_list ipsec_crypto_parser_list; +diff --git a/dpdk/drivers/net/iavf/iavf_ethdev.c b/dpdk/drivers/net/iavf/iavf_ethdev.c +index 3196210f2c..51a8f24973 100644 +--- a/dpdk/drivers/net/iavf/iavf_ethdev.c ++++ b/dpdk/drivers/net/iavf/iavf_ethdev.c +@@ -1065,6 +1065,9 @@ iavf_dev_stop(struct rte_eth_dev *dev) + + PMD_INIT_FUNC_TRACE(); + ++ if (vf->vf_reset) ++ return 0; ++ + if (adapter->closed) + return -1; + +@@ -1075,8 +1078,6 @@ iavf_dev_stop(struct rte_eth_dev *dev) + if (adapter->stopped == 1) + return 0; + +- iavf_stop_queues(dev); +- + /* Disable the interrupt for Rx */ + rte_intr_efd_disable(intr_handle); + /* Rx interrupt vector mapping free */ +@@ -1089,6 +1090,8 @@ iavf_dev_stop(struct rte_eth_dev *dev) + iavf_add_del_mc_addr_list(adapter, vf->mc_addrs, vf->mc_addrs_num, + false); + ++ iavf_stop_queues(dev); ++ + adapter->stopped = 1; + dev->data->dev_started = 0; + +@@ -2607,6 +2610,9 @@ iavf_dev_init(struct rte_eth_dev *eth_dev) + adapter->dev_data = eth_dev->data; + adapter->stopped = 1; + ++ if (iavf_dev_event_handler_init()) ++ goto init_vf_err; ++ + if (iavf_init_vf(eth_dev) != 0) { + PMD_INIT_LOG(ERR, "Init vf failed"); + return -1; +@@ -2634,8 +2640,6 @@ iavf_dev_init(struct rte_eth_dev *eth_dev) + rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.addr, + ð_dev->data->mac_addrs[0]); + +- if (iavf_dev_event_handler_init()) +- goto init_vf_err; + + if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) { + /* register callback func to eal lib */ +@@ -2732,6 +2736,18 @@ iavf_dev_close(struct rte_eth_dev *dev) + if (vf->promisc_unicast_enabled || vf->promisc_multicast_enabled) + iavf_config_promisc(adapter, false, false); + ++ /* ++ * Release redundant queue resource when close the dev ++ * so that other vfs can re-use the queues. ++ */ ++ if (vf->lv_enabled) { ++ ret = iavf_request_queues(dev, IAVF_MAX_NUM_QUEUES_DFLT); ++ if (ret) ++ PMD_DRV_LOG(ERR, "Reset the num of queues failed"); ++ ++ vf->max_rss_qregion = IAVF_MAX_NUM_QUEUES_DFLT; ++ } ++ + iavf_shutdown_adminq(hw); + if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) { + /* disable uio intr before callback unregister */ +diff --git a/dpdk/drivers/net/iavf/iavf_generic_flow.c b/dpdk/drivers/net/iavf/iavf_generic_flow.c +index f33c764764..6f6e95fc45 100644 +--- a/dpdk/drivers/net/iavf/iavf_generic_flow.c ++++ b/dpdk/drivers/net/iavf/iavf_generic_flow.c +@@ -2278,11 +2278,12 @@ iavf_flow_create(struct rte_eth_dev *dev, + } + + flow->engine = engine; ++ rte_spinlock_lock(&vf->flow_ops_lock); + TAILQ_INSERT_TAIL(&vf->flow_list, flow, node); ++ rte_spinlock_unlock(&vf->flow_ops_lock); + PMD_DRV_LOG(INFO, "Succeeded to create (%d) flow", engine->type); + + free_flow: +- rte_spinlock_unlock(&vf->flow_ops_lock); + return flow; + } + +diff --git a/dpdk/drivers/net/iavf/iavf_hash.c b/dpdk/drivers/net/iavf/iavf_hash.c +index ae6fb38594..cf4d677101 100644 +--- a/dpdk/drivers/net/iavf/iavf_hash.c ++++ b/dpdk/drivers/net/iavf/iavf_hash.c +@@ -886,8 +886,8 @@ iavf_hash_parse_raw_pattern(const struct rte_flow_item *item, + struct iavf_rss_meta *meta) + { + const struct rte_flow_item_raw *raw_spec, *raw_mask; ++ uint16_t spec_len, pkt_len; + uint8_t *pkt_buf, *msk_buf; +- uint8_t spec_len, pkt_len; + uint8_t tmp_val = 0; + uint8_t tmp_c = 0; + int i, j; +diff --git a/dpdk/drivers/net/iavf/iavf_rxtx.c b/dpdk/drivers/net/iavf/iavf_rxtx.c +index cf87a6beda..4a38a7b985 100644 +--- a/dpdk/drivers/net/iavf/iavf_rxtx.c ++++ b/dpdk/drivers/net/iavf/iavf_rxtx.c +@@ -654,6 +654,7 @@ iavf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, + + len = rte_pktmbuf_data_room_size(rxq->mp) - RTE_PKTMBUF_HEADROOM; + rxq->rx_buf_len = RTE_ALIGN_FLOOR(len, (1 << IAVF_RXQ_CTX_DBUFF_SHIFT)); ++ rxq->rx_buf_len = RTE_MIN(rxq->rx_buf_len, IAVF_RX_MAX_DATA_BUF_SIZE); + + /* Allocate the software ring. */ + len = nb_desc + IAVF_RX_MAX_BURST; +@@ -2453,8 +2454,11 @@ iavf_fill_ctx_desc_tunnelling_field(volatile uint64_t *qw0, + * Calculate the tunneling UDP checksum. + * Shall be set only if L4TUNT = 01b and EIPT is not zero + */ +- if (!(eip_typ & IAVF_TX_CTX_EXT_IP_NONE) && +- (eip_typ & IAVF_TXD_CTX_UDP_TUNNELING)) ++ if ((eip_typ & (IAVF_TX_CTX_EXT_IP_IPV6 | ++ IAVF_TX_CTX_EXT_IP_IPV4 | ++ IAVF_TX_CTX_EXT_IP_IPV4_NO_CSUM)) && ++ (eip_typ & IAVF_TXD_CTX_UDP_TUNNELING) && ++ (m->ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM)) + eip_typ |= IAVF_TXD_CTX_QW0_L4T_CS_MASK; + } + +@@ -2616,10 +2620,21 @@ iavf_build_data_desc_cmd_offset_fields(volatile uint64_t *qw1, + offset |= (m->l3_len >> 2) << IAVF_TX_DESC_LENGTH_IPLEN_SHIFT; + } + +- if (m->ol_flags & RTE_MBUF_F_TX_TCP_SEG) { +- command |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP; ++ if (m->ol_flags & (RTE_MBUF_F_TX_TCP_SEG | RTE_MBUF_F_TX_UDP_SEG)) { ++ if (m->ol_flags & RTE_MBUF_F_TX_TCP_SEG) ++ command |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP; ++ else ++ command |= IAVF_TX_DESC_CMD_L4T_EOFT_UDP; + offset |= (m->l4_len >> 2) << + IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; ++ ++ *qw1 = rte_cpu_to_le_64((((uint64_t)command << ++ IAVF_TXD_DATA_QW1_CMD_SHIFT) & IAVF_TXD_DATA_QW1_CMD_MASK) | ++ (((uint64_t)offset << IAVF_TXD_DATA_QW1_OFFSET_SHIFT) & ++ IAVF_TXD_DATA_QW1_OFFSET_MASK) | ++ ((uint64_t)l2tag1 << IAVF_TXD_DATA_QW1_L2TAG1_SHIFT)); ++ ++ return; + } + + /* Enable L4 checksum offloads */ +diff --git a/dpdk/drivers/net/iavf/iavf_rxtx.h b/dpdk/drivers/net/iavf/iavf_rxtx.h +index a6ad88885b..354326c235 100644 +--- a/dpdk/drivers/net/iavf/iavf_rxtx.h ++++ b/dpdk/drivers/net/iavf/iavf_rxtx.h +@@ -16,6 +16,9 @@ + /* used for Rx Bulk Allocate */ + #define IAVF_RX_MAX_BURST 32 + ++/* Max data buffer size must be 16K - 128 bytes */ ++#define IAVF_RX_MAX_DATA_BUF_SIZE (16 * 1024 - 128) ++ + /* used for Vector PMD */ + #define IAVF_VPMD_RX_MAX_BURST 32 + #define IAVF_VPMD_TX_MAX_BURST 32 +@@ -24,6 +27,8 @@ + #define IAVF_VPMD_TX_MAX_FREE_BUF 64 + + #define IAVF_TX_NO_VECTOR_FLAGS ( \ ++ RTE_ETH_TX_OFFLOAD_VLAN_INSERT | \ ++ RTE_ETH_TX_OFFLOAD_QINQ_INSERT | \ + RTE_ETH_TX_OFFLOAD_MULTI_SEGS | \ + RTE_ETH_TX_OFFLOAD_TCP_TSO | \ + RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM | \ +@@ -31,8 +36,6 @@ + RTE_ETH_TX_OFFLOAD_SECURITY) + + #define IAVF_TX_VECTOR_OFFLOAD ( \ +- RTE_ETH_TX_OFFLOAD_VLAN_INSERT | \ +- RTE_ETH_TX_OFFLOAD_QINQ_INSERT | \ + RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | \ + RTE_ETH_TX_OFFLOAD_SCTP_CKSUM | \ + RTE_ETH_TX_OFFLOAD_UDP_CKSUM | \ +diff --git a/dpdk/drivers/net/iavf/iavf_rxtx_vec_avx2.c b/dpdk/drivers/net/iavf/iavf_rxtx_vec_avx2.c +index 862f6eb0c0..b4ebac9d34 100644 +--- a/dpdk/drivers/net/iavf/iavf_rxtx_vec_avx2.c ++++ b/dpdk/drivers/net/iavf/iavf_rxtx_vec_avx2.c +@@ -1074,7 +1074,10 @@ _iavf_recv_raw_pkts_vec_avx2_flex_rxd(struct iavf_rx_queue *rxq, + _mm256_set_epi8(0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0, +- 0, 0, 0, 0, ++ 0, 0, ++ RTE_MBUF_F_RX_VLAN | ++ RTE_MBUF_F_RX_VLAN_STRIPPED, ++ 0, + /* end up 128-bits */ + 0, 0, 0, 0, + 0, 0, 0, 0, +diff --git a/dpdk/drivers/net/iavf/iavf_rxtx_vec_avx512.c b/dpdk/drivers/net/iavf/iavf_rxtx_vec_avx512.c +index b416a716cf..b0546a14c6 100644 +--- a/dpdk/drivers/net/iavf/iavf_rxtx_vec_avx512.c ++++ b/dpdk/drivers/net/iavf/iavf_rxtx_vec_avx512.c +@@ -1338,7 +1338,10 @@ _iavf_recv_raw_pkts_vec_avx512_flex_rxd(struct iavf_rx_queue *rxq, + (0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0, +- 0, 0, 0, 0, ++ 0, 0, ++ RTE_MBUF_F_RX_VLAN | ++ RTE_MBUF_F_RX_VLAN_STRIPPED, ++ 0, + /* end up 128-bits */ + 0, 0, 0, 0, + 0, 0, 0, 0, +diff --git a/dpdk/drivers/net/iavf/iavf_vchnl.c b/dpdk/drivers/net/iavf/iavf_vchnl.c +index f92daf97f2..aeffb07cca 100644 +--- a/dpdk/drivers/net/iavf/iavf_vchnl.c ++++ b/dpdk/drivers/net/iavf/iavf_vchnl.c +@@ -256,6 +256,7 @@ iavf_read_msg_from_pf(struct iavf_adapter *adapter, uint16_t buf_len, + vf->link_speed = iavf_convert_link_speed(speed); + } + iavf_dev_link_update(vf->eth_dev, 0); ++ iavf_dev_event_post(vf->eth_dev, RTE_ETH_EVENT_INTR_LSC, NULL, 0); + PMD_DRV_LOG(INFO, "Link status update:%s", + vf->link_up ? "up" : "down"); + break; +@@ -368,28 +369,48 @@ iavf_execute_vf_cmd(struct iavf_adapter *adapter, struct iavf_cmd_info *args, + _clear_cmd(vf); + break; + default: +- /* For other virtchnl ops in running time, +- * wait for the cmd done flag. +- */ +- do { +- if (vf->pend_cmd == VIRTCHNL_OP_UNKNOWN) +- break; +- iavf_msec_delay(ASQ_DELAY_MS); +- /* If don't read msg or read sys event, continue */ +- } while (i++ < MAX_TRY_TIMES); +- +- if (i >= MAX_TRY_TIMES) { +- PMD_DRV_LOG(ERR, "No response for cmd %d", args->ops); ++ if (rte_thread_is_intr()) { ++ /* For virtchnl ops were executed in eal_intr_thread, ++ * need to poll the response. ++ */ ++ do { ++ result = iavf_read_msg_from_pf(adapter, args->out_size, ++ args->out_buffer); ++ if (result == IAVF_MSG_CMD) ++ break; ++ iavf_msec_delay(ASQ_DELAY_MS); ++ } while (i++ < MAX_TRY_TIMES); ++ if (i >= MAX_TRY_TIMES || ++ vf->cmd_retval != VIRTCHNL_STATUS_SUCCESS) { ++ err = -1; ++ PMD_DRV_LOG(ERR, "No response or return failure (%d)" ++ " for cmd %d", vf->cmd_retval, args->ops); ++ } + _clear_cmd(vf); +- err = -EIO; +- } else if (vf->cmd_retval == +- VIRTCHNL_STATUS_ERR_NOT_SUPPORTED) { +- PMD_DRV_LOG(ERR, "Cmd %d not supported", args->ops); +- err = -ENOTSUP; +- } else if (vf->cmd_retval != VIRTCHNL_STATUS_SUCCESS) { +- PMD_DRV_LOG(ERR, "Return failure %d for cmd %d", +- vf->cmd_retval, args->ops); +- err = -EINVAL; ++ } else { ++ /* For other virtchnl ops in running time, ++ * wait for the cmd done flag. ++ */ ++ do { ++ if (vf->pend_cmd == VIRTCHNL_OP_UNKNOWN) ++ break; ++ iavf_msec_delay(ASQ_DELAY_MS); ++ /* If don't read msg or read sys event, continue */ ++ } while (i++ < MAX_TRY_TIMES); ++ ++ if (i >= MAX_TRY_TIMES) { ++ PMD_DRV_LOG(ERR, "No response for cmd %d", args->ops); ++ _clear_cmd(vf); ++ err = -EIO; ++ } else if (vf->cmd_retval == ++ VIRTCHNL_STATUS_ERR_NOT_SUPPORTED) { ++ PMD_DRV_LOG(ERR, "Cmd %d not supported", args->ops); ++ err = -ENOTSUP; ++ } else if (vf->cmd_retval != VIRTCHNL_STATUS_SUCCESS) { ++ PMD_DRV_LOG(ERR, "Return failure %d for cmd %d", ++ vf->cmd_retval, args->ops); ++ err = -EINVAL; ++ } + } + break; + } +@@ -397,6 +418,26 @@ iavf_execute_vf_cmd(struct iavf_adapter *adapter, struct iavf_cmd_info *args, + return err; + } + ++static int ++iavf_execute_vf_cmd_safe(struct iavf_adapter *adapter, ++ struct iavf_cmd_info *args, int async) ++{ ++ struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter); ++ int ret; ++ int is_intr_thread = rte_thread_is_intr(); ++ ++ if (is_intr_thread) { ++ if (!rte_spinlock_trylock(&vf->aq_lock)) ++ return -EIO; ++ } else { ++ rte_spinlock_lock(&vf->aq_lock); ++ } ++ ret = iavf_execute_vf_cmd(adapter, args, async); ++ rte_spinlock_unlock(&vf->aq_lock); ++ ++ return ret; ++} ++ + static void + iavf_handle_pf_event_msg(struct rte_eth_dev *dev, uint8_t *msg, + uint16_t msglen) +@@ -554,7 +595,7 @@ iavf_enable_vlan_strip(struct iavf_adapter *adapter) + args.in_args_size = 0; + args.out_buffer = vf->aq_resp; + args.out_size = IAVF_AQ_BUF_SZ; +- ret = iavf_execute_vf_cmd(adapter, &args, 0); ++ ret = iavf_execute_vf_cmd_safe(adapter, &args, 0); + if (ret) + PMD_DRV_LOG(ERR, "Failed to execute command of" + " OP_ENABLE_VLAN_STRIPPING"); +@@ -575,7 +616,7 @@ iavf_disable_vlan_strip(struct iavf_adapter *adapter) + args.in_args_size = 0; + args.out_buffer = vf->aq_resp; + args.out_size = IAVF_AQ_BUF_SZ; +- ret = iavf_execute_vf_cmd(adapter, &args, 0); ++ ret = iavf_execute_vf_cmd_safe(adapter, &args, 0); + if (ret) + PMD_DRV_LOG(ERR, "Failed to execute command of" + " OP_DISABLE_VLAN_STRIPPING"); +@@ -604,7 +645,7 @@ iavf_check_api_version(struct iavf_adapter *adapter) + args.out_buffer = vf->aq_resp; + args.out_size = IAVF_AQ_BUF_SZ; + +- err = iavf_execute_vf_cmd(adapter, &args, 0); ++ err = iavf_execute_vf_cmd_safe(adapter, &args, 0); + if (err) { + PMD_INIT_LOG(ERR, "Fail to execute command of OP_VERSION"); + return err; +@@ -665,7 +706,7 @@ iavf_get_vf_resource(struct iavf_adapter *adapter) + args.in_args = (uint8_t *)∩︀ + args.in_args_size = sizeof(caps); + +- err = iavf_execute_vf_cmd(adapter, &args, 0); ++ err = iavf_execute_vf_cmd_safe(adapter, &args, 0); + + if (err) { + PMD_DRV_LOG(ERR, +@@ -710,7 +751,7 @@ iavf_get_supported_rxdid(struct iavf_adapter *adapter) + args.out_buffer = vf->aq_resp; + args.out_size = IAVF_AQ_BUF_SZ; + +- ret = iavf_execute_vf_cmd(adapter, &args, 0); ++ ret = iavf_execute_vf_cmd_safe(adapter, &args, 0); + if (ret) { + PMD_DRV_LOG(ERR, + "Failed to execute command of OP_GET_SUPPORTED_RXDIDS"); +@@ -754,7 +795,7 @@ iavf_config_vlan_strip_v2(struct iavf_adapter *adapter, bool enable) + args.in_args_size = sizeof(vlan_strip); + args.out_buffer = vf->aq_resp; + args.out_size = IAVF_AQ_BUF_SZ; +- ret = iavf_execute_vf_cmd(adapter, &args, 0); ++ ret = iavf_execute_vf_cmd_safe(adapter, &args, 0); + if (ret) + PMD_DRV_LOG(ERR, "fail to execute command %s", + enable ? "VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2" : +@@ -794,7 +835,7 @@ iavf_config_vlan_insert_v2(struct iavf_adapter *adapter, bool enable) + args.in_args_size = sizeof(vlan_insert); + args.out_buffer = vf->aq_resp; + args.out_size = IAVF_AQ_BUF_SZ; +- ret = iavf_execute_vf_cmd(adapter, &args, 0); ++ ret = iavf_execute_vf_cmd_safe(adapter, &args, 0); + if (ret) + PMD_DRV_LOG(ERR, "fail to execute command %s", + enable ? "VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2" : +@@ -837,7 +878,7 @@ iavf_add_del_vlan_v2(struct iavf_adapter *adapter, uint16_t vlanid, bool add) + args.in_args_size = sizeof(vlan_filter); + args.out_buffer = vf->aq_resp; + args.out_size = IAVF_AQ_BUF_SZ; +- err = iavf_execute_vf_cmd(adapter, &args, 0); ++ err = iavf_execute_vf_cmd_safe(adapter, &args, 0); + if (err) + PMD_DRV_LOG(ERR, "fail to execute command %s", + add ? "OP_ADD_VLAN_V2" : "OP_DEL_VLAN_V2"); +@@ -858,7 +899,7 @@ iavf_get_vlan_offload_caps_v2(struct iavf_adapter *adapter) + args.out_buffer = vf->aq_resp; + args.out_size = IAVF_AQ_BUF_SZ; + +- ret = iavf_execute_vf_cmd(adapter, &args, 0); ++ ret = iavf_execute_vf_cmd_safe(adapter, &args, 0); + if (ret) { + PMD_DRV_LOG(ERR, + "Failed to execute command of VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS"); +@@ -889,7 +930,7 @@ iavf_enable_queues(struct iavf_adapter *adapter) + args.in_args_size = sizeof(queue_select); + args.out_buffer = vf->aq_resp; + args.out_size = IAVF_AQ_BUF_SZ; +- err = iavf_execute_vf_cmd(adapter, &args, 0); ++ err = iavf_execute_vf_cmd_safe(adapter, &args, 0); + if (err) { + PMD_DRV_LOG(ERR, + "Failed to execute command of OP_ENABLE_QUEUES"); +@@ -917,7 +958,7 @@ iavf_disable_queues(struct iavf_adapter *adapter) + args.in_args_size = sizeof(queue_select); + args.out_buffer = vf->aq_resp; + args.out_size = IAVF_AQ_BUF_SZ; +- err = iavf_execute_vf_cmd(adapter, &args, 0); ++ err = iavf_execute_vf_cmd_safe(adapter, &args, 0); + if (err) { + PMD_DRV_LOG(ERR, + "Failed to execute command of OP_DISABLE_QUEUES"); +@@ -953,7 +994,7 @@ iavf_switch_queue(struct iavf_adapter *adapter, uint16_t qid, + args.in_args_size = sizeof(queue_select); + args.out_buffer = vf->aq_resp; + args.out_size = IAVF_AQ_BUF_SZ; +- err = iavf_execute_vf_cmd(adapter, &args, 0); ++ err = iavf_execute_vf_cmd_safe(adapter, &args, 0); + if (err) + PMD_DRV_LOG(ERR, "Failed to execute command of %s", + on ? "OP_ENABLE_QUEUES" : "OP_DISABLE_QUEUES"); +@@ -995,7 +1036,7 @@ iavf_enable_queues_lv(struct iavf_adapter *adapter) + args.in_args_size = len; + args.out_buffer = vf->aq_resp; + args.out_size = IAVF_AQ_BUF_SZ; +- err = iavf_execute_vf_cmd(adapter, &args, 0); ++ err = iavf_execute_vf_cmd_safe(adapter, &args, 0); + if (err) + PMD_DRV_LOG(ERR, + "Failed to execute command of OP_ENABLE_QUEUES_V2"); +@@ -1039,7 +1080,7 @@ iavf_disable_queues_lv(struct iavf_adapter *adapter) + args.in_args_size = len; + args.out_buffer = vf->aq_resp; + args.out_size = IAVF_AQ_BUF_SZ; +- err = iavf_execute_vf_cmd(adapter, &args, 0); ++ err = iavf_execute_vf_cmd_safe(adapter, &args, 0); + if (err) + PMD_DRV_LOG(ERR, + "Failed to execute command of OP_DISABLE_QUEUES_V2"); +@@ -1085,7 +1126,7 @@ iavf_switch_queue_lv(struct iavf_adapter *adapter, uint16_t qid, + args.in_args_size = len; + args.out_buffer = vf->aq_resp; + args.out_size = IAVF_AQ_BUF_SZ; +- err = iavf_execute_vf_cmd(adapter, &args, 0); ++ err = iavf_execute_vf_cmd_safe(adapter, &args, 0); + if (err) + PMD_DRV_LOG(ERR, "Failed to execute command of %s", + on ? "OP_ENABLE_QUEUES_V2" : "OP_DISABLE_QUEUES_V2"); +@@ -1117,7 +1158,7 @@ iavf_configure_rss_lut(struct iavf_adapter *adapter) + args.out_buffer = vf->aq_resp; + args.out_size = IAVF_AQ_BUF_SZ; + +- err = iavf_execute_vf_cmd(adapter, &args, 0); ++ err = iavf_execute_vf_cmd_safe(adapter, &args, 0); + if (err) + PMD_DRV_LOG(ERR, + "Failed to execute command of OP_CONFIG_RSS_LUT"); +@@ -1149,7 +1190,7 @@ iavf_configure_rss_key(struct iavf_adapter *adapter) + args.out_buffer = vf->aq_resp; + args.out_size = IAVF_AQ_BUF_SZ; + +- err = iavf_execute_vf_cmd(adapter, &args, 0); ++ err = iavf_execute_vf_cmd_safe(adapter, &args, 0); + if (err) + PMD_DRV_LOG(ERR, + "Failed to execute command of OP_CONFIG_RSS_KEY"); +@@ -1247,7 +1288,7 @@ iavf_configure_queues(struct iavf_adapter *adapter, + args.out_buffer = vf->aq_resp; + args.out_size = IAVF_AQ_BUF_SZ; + +- err = iavf_execute_vf_cmd(adapter, &args, 0); ++ err = iavf_execute_vf_cmd_safe(adapter, &args, 0); + if (err) + PMD_DRV_LOG(ERR, "Failed to execute command of" + " VIRTCHNL_OP_CONFIG_VSI_QUEUES"); +@@ -1288,7 +1329,7 @@ iavf_config_irq_map(struct iavf_adapter *adapter) + args.in_args_size = len; + args.out_buffer = vf->aq_resp; + args.out_size = IAVF_AQ_BUF_SZ; +- err = iavf_execute_vf_cmd(adapter, &args, 0); ++ err = iavf_execute_vf_cmd_safe(adapter, &args, 0); + if (err) + PMD_DRV_LOG(ERR, "fail to execute command OP_CONFIG_IRQ_MAP"); + +@@ -1329,7 +1370,7 @@ iavf_config_irq_map_lv(struct iavf_adapter *adapter, uint16_t num, + args.in_args_size = len; + args.out_buffer = vf->aq_resp; + args.out_size = IAVF_AQ_BUF_SZ; +- err = iavf_execute_vf_cmd(adapter, &args, 0); ++ err = iavf_execute_vf_cmd_safe(adapter, &args, 0); + if (err) + PMD_DRV_LOG(ERR, "fail to execute command OP_MAP_QUEUE_VECTOR"); + +@@ -1389,7 +1430,7 @@ iavf_add_del_all_mac_addr(struct iavf_adapter *adapter, bool add) + args.in_args_size = len; + args.out_buffer = vf->aq_resp; + args.out_size = IAVF_AQ_BUF_SZ; +- err = iavf_execute_vf_cmd(adapter, &args, 0); ++ err = iavf_execute_vf_cmd_safe(adapter, &args, 0); + if (err) + PMD_DRV_LOG(ERR, "fail to execute command %s", + add ? "OP_ADD_ETHER_ADDRESS" : +@@ -1419,7 +1460,7 @@ iavf_query_stats(struct iavf_adapter *adapter, + args.out_buffer = vf->aq_resp; + args.out_size = IAVF_AQ_BUF_SZ; + +- err = iavf_execute_vf_cmd(adapter, &args, 0); ++ err = iavf_execute_vf_cmd_safe(adapter, &args, 0); + if (err) { + PMD_DRV_LOG(ERR, "fail to execute command OP_GET_STATS"); + *pstats = NULL; +@@ -1457,7 +1498,7 @@ iavf_config_promisc(struct iavf_adapter *adapter, + args.out_buffer = vf->aq_resp; + args.out_size = IAVF_AQ_BUF_SZ; + +- err = iavf_execute_vf_cmd(adapter, &args, 0); ++ err = iavf_execute_vf_cmd_safe(adapter, &args, 0); + + if (err) { + PMD_DRV_LOG(ERR, +@@ -1500,7 +1541,7 @@ iavf_add_del_eth_addr(struct iavf_adapter *adapter, struct rte_ether_addr *addr, + args.in_args_size = sizeof(cmd_buffer); + args.out_buffer = vf->aq_resp; + args.out_size = IAVF_AQ_BUF_SZ; +- err = iavf_execute_vf_cmd(adapter, &args, 0); ++ err = iavf_execute_vf_cmd_safe(adapter, &args, 0); + if (err) + PMD_DRV_LOG(ERR, "fail to execute command %s", + add ? "OP_ADD_ETH_ADDR" : "OP_DEL_ETH_ADDR"); +@@ -1527,7 +1568,7 @@ iavf_add_del_vlan(struct iavf_adapter *adapter, uint16_t vlanid, bool add) + args.in_args_size = sizeof(cmd_buffer); + args.out_buffer = vf->aq_resp; + args.out_size = IAVF_AQ_BUF_SZ; +- err = iavf_execute_vf_cmd(adapter, &args, 0); ++ err = iavf_execute_vf_cmd_safe(adapter, &args, 0); + if (err) + PMD_DRV_LOG(ERR, "fail to execute command %s", + add ? "OP_ADD_VLAN" : "OP_DEL_VLAN"); +@@ -1554,7 +1595,7 @@ iavf_fdir_add(struct iavf_adapter *adapter, + args.out_buffer = vf->aq_resp; + args.out_size = IAVF_AQ_BUF_SZ; + +- err = iavf_execute_vf_cmd(adapter, &args, 0); ++ err = iavf_execute_vf_cmd_safe(adapter, &args, 0); + if (err) { + PMD_DRV_LOG(ERR, "fail to execute command OP_ADD_FDIR_FILTER"); + return err; +@@ -1614,7 +1655,7 @@ iavf_fdir_del(struct iavf_adapter *adapter, + args.out_buffer = vf->aq_resp; + args.out_size = IAVF_AQ_BUF_SZ; + +- err = iavf_execute_vf_cmd(adapter, &args, 0); ++ err = iavf_execute_vf_cmd_safe(adapter, &args, 0); + if (err) { + PMD_DRV_LOG(ERR, "fail to execute command OP_DEL_FDIR_FILTER"); + return err; +@@ -1661,7 +1702,7 @@ iavf_fdir_check(struct iavf_adapter *adapter, + args.out_buffer = vf->aq_resp; + args.out_size = IAVF_AQ_BUF_SZ; + +- err = iavf_execute_vf_cmd(adapter, &args, 0); ++ err = iavf_execute_vf_cmd_safe(adapter, &args, 0); + if (err) { + PMD_DRV_LOG(ERR, "fail to check flow director rule"); + return err; +@@ -1704,7 +1745,7 @@ iavf_flow_sub(struct iavf_adapter *adapter, struct iavf_fsub_conf *filter) + args.out_buffer = vf->aq_resp; + args.out_size = IAVF_AQ_BUF_SZ; + +- err = iavf_execute_vf_cmd(adapter, &args, 0); ++ err = iavf_execute_vf_cmd_safe(adapter, &args, 0); + if (err) { + PMD_DRV_LOG(ERR, "Failed to execute command of " + "OP_FLOW_SUBSCRIBE"); +@@ -1755,7 +1796,7 @@ iavf_flow_unsub(struct iavf_adapter *adapter, struct iavf_fsub_conf *filter) + args.out_buffer = vf->aq_resp; + args.out_size = IAVF_AQ_BUF_SZ; + +- err = iavf_execute_vf_cmd(adapter, &args, 0); ++ err = iavf_execute_vf_cmd_safe(adapter, &args, 0); + if (err) { + PMD_DRV_LOG(ERR, "Failed to execute command of " + "OP_FLOW_UNSUBSCRIBE"); +@@ -1798,7 +1839,7 @@ iavf_flow_sub_check(struct iavf_adapter *adapter, + args.out_buffer = vf->aq_resp; + args.out_size = IAVF_AQ_BUF_SZ; + +- err = iavf_execute_vf_cmd(adapter, &args, 0); ++ err = iavf_execute_vf_cmd_safe(adapter, &args, 0); + if (err) { + PMD_DRV_LOG(ERR, "Failed to check flow subscription rule"); + return err; +@@ -1838,7 +1879,7 @@ iavf_add_del_rss_cfg(struct iavf_adapter *adapter, + args.out_buffer = vf->aq_resp; + args.out_size = IAVF_AQ_BUF_SZ; + +- err = iavf_execute_vf_cmd(adapter, &args, 0); ++ err = iavf_execute_vf_cmd_safe(adapter, &args, 0); + if (err) + PMD_DRV_LOG(ERR, + "Failed to execute command of %s", +@@ -1861,7 +1902,7 @@ iavf_get_hena_caps(struct iavf_adapter *adapter, uint64_t *caps) + args.out_buffer = vf->aq_resp; + args.out_size = IAVF_AQ_BUF_SZ; + +- err = iavf_execute_vf_cmd(adapter, &args, 0); ++ err = iavf_execute_vf_cmd_safe(adapter, &args, 0); + if (err) { + PMD_DRV_LOG(ERR, + "Failed to execute command of OP_GET_RSS_HENA_CAPS"); +@@ -1887,7 +1928,7 @@ iavf_set_hena(struct iavf_adapter *adapter, uint64_t hena) + args.out_buffer = vf->aq_resp; + args.out_size = IAVF_AQ_BUF_SZ; + +- err = iavf_execute_vf_cmd(adapter, &args, 0); ++ err = iavf_execute_vf_cmd_safe(adapter, &args, 0); + if (err) + PMD_DRV_LOG(ERR, + "Failed to execute command of OP_SET_RSS_HENA"); +@@ -1908,7 +1949,7 @@ iavf_get_qos_cap(struct iavf_adapter *adapter) + args.in_args_size = 0; + args.out_buffer = vf->aq_resp; + args.out_size = IAVF_AQ_BUF_SZ; +- err = iavf_execute_vf_cmd(adapter, &args, 0); ++ err = iavf_execute_vf_cmd_safe(adapter, &args, 0); + + if (err) { + PMD_DRV_LOG(ERR, +@@ -1941,7 +1982,7 @@ int iavf_set_q_tc_map(struct rte_eth_dev *dev, + args.out_buffer = vf->aq_resp; + args.out_size = IAVF_AQ_BUF_SZ; + +- err = iavf_execute_vf_cmd(adapter, &args, 0); ++ err = iavf_execute_vf_cmd_safe(adapter, &args, 0); + if (err) + PMD_DRV_LOG(ERR, "Failed to execute command of" + " VIRTCHNL_OP_CONFIG_TC_MAP"); +@@ -1964,7 +2005,7 @@ int iavf_set_q_bw(struct rte_eth_dev *dev, + args.out_buffer = vf->aq_resp; + args.out_size = IAVF_AQ_BUF_SZ; + +- err = iavf_execute_vf_cmd(adapter, &args, 0); ++ err = iavf_execute_vf_cmd_safe(adapter, &args, 0); + if (err) + PMD_DRV_LOG(ERR, "Failed to execute command of" + " VIRTCHNL_OP_CONFIG_QUEUE_BW"); +@@ -2009,7 +2050,7 @@ iavf_add_del_mc_addr_list(struct iavf_adapter *adapter, + i * sizeof(struct virtchnl_ether_addr); + args.out_buffer = vf->aq_resp; + args.out_size = IAVF_AQ_BUF_SZ; +- err = iavf_execute_vf_cmd(adapter, &args, 0); ++ err = iavf_execute_vf_cmd_safe(adapter, &args, 0); + + if (err) { + PMD_DRV_LOG(ERR, "fail to execute command %s", +@@ -2053,13 +2094,17 @@ iavf_request_queues(struct rte_eth_dev *dev, uint16_t num) + if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) { + /* disable interrupt to avoid the admin queue message to be read + * before iavf_read_msg_from_pf. ++ * ++ * don't disable interrupt handler until ready to execute vf cmd. + */ ++ rte_spinlock_lock(&vf->aq_lock); + rte_intr_disable(pci_dev->intr_handle); + err = iavf_execute_vf_cmd(adapter, &args, 0); + rte_intr_enable(pci_dev->intr_handle); ++ rte_spinlock_unlock(&vf->aq_lock); + } else { + rte_eal_alarm_cancel(iavf_dev_alarm_handler, dev); +- err = iavf_execute_vf_cmd(adapter, &args, 0); ++ err = iavf_execute_vf_cmd_safe(adapter, &args, 0); + rte_eal_alarm_set(IAVF_ALARM_INTERVAL, + iavf_dev_alarm_handler, dev); + } +@@ -2098,7 +2143,7 @@ iavf_get_max_rss_queue_region(struct iavf_adapter *adapter) + args.out_buffer = vf->aq_resp; + args.out_size = IAVF_AQ_BUF_SZ; + +- err = iavf_execute_vf_cmd(adapter, &args, 0); ++ err = iavf_execute_vf_cmd_safe(adapter, &args, 0); + if (err) { + PMD_DRV_LOG(ERR, "Failed to execute command of VIRTCHNL_OP_GET_MAX_RSS_QREGION"); + return err; +@@ -2129,7 +2174,7 @@ iavf_ipsec_crypto_request(struct iavf_adapter *adapter, + args.out_buffer = vf->aq_resp; + args.out_size = IAVF_AQ_BUF_SZ; + +- err = iavf_execute_vf_cmd(adapter, &args, 1); ++ err = iavf_execute_vf_cmd_safe(adapter, &args, 1); + if (err) { + PMD_DRV_LOG(ERR, "fail to execute command %s", + "OP_INLINE_IPSEC_CRYPTO"); +@@ -2163,7 +2208,7 @@ iavf_set_vf_quanta_size(struct iavf_adapter *adapter, u16 start_queue_id, u16 nu + args.out_buffer = vf->aq_resp; + args.out_size = IAVF_AQ_BUF_SZ; + +- err = iavf_execute_vf_cmd(adapter, &args, 0); ++ err = iavf_execute_vf_cmd_safe(adapter, &args, 0); + if (err) { + PMD_DRV_LOG(ERR, "Failed to execute command VIRTCHNL_OP_CONFIG_QUANTA"); + return err; +@@ -2189,7 +2234,7 @@ iavf_get_ptp_cap(struct iavf_adapter *adapter) + args.out_buffer = vf->aq_resp; + args.out_size = IAVF_AQ_BUF_SZ; + +- err = iavf_execute_vf_cmd(adapter, &args, 0); ++ err = iavf_execute_vf_cmd_safe(adapter, &args, 0); + if (err) { + PMD_DRV_LOG(ERR, + "Failed to execute command of OP_1588_PTP_GET_CAPS"); +@@ -2217,7 +2262,7 @@ iavf_get_phc_time(struct iavf_rx_queue *rxq) + args.out_size = IAVF_AQ_BUF_SZ; + + rte_spinlock_lock(&vf->phc_time_aq_lock); +- err = iavf_execute_vf_cmd(adapter, &args, 0); ++ err = iavf_execute_vf_cmd_safe(adapter, &args, 0); + if (err) { + PMD_DRV_LOG(ERR, + "Failed to execute command of VIRTCHNL_OP_1588_PTP_GET_TIME"); +diff --git a/dpdk/drivers/net/ice/base/ice_adminq_cmd.h b/dpdk/drivers/net/ice/base/ice_adminq_cmd.h +index 5a817982b4..93a3a6007f 100644 +--- a/dpdk/drivers/net/ice/base/ice_adminq_cmd.h ++++ b/dpdk/drivers/net/ice/base/ice_adminq_cmd.h +@@ -2002,8 +2002,8 @@ struct ice_aqc_lldp_get_mib { + #define ICE_AQ_LLDP_DCBX_S 6 + #define ICE_AQ_LLDP_DCBX_M (0x3 << ICE_AQ_LLDP_DCBX_S) + #define ICE_AQ_LLDP_DCBX_NA 0 +-#define ICE_AQ_LLDP_DCBX_IEEE 1 +-#define ICE_AQ_LLDP_DCBX_CEE 2 ++#define ICE_AQ_LLDP_DCBX_CEE 1 ++#define ICE_AQ_LLDP_DCBX_IEEE 2 + /* The following bytes are reserved for the Get LLDP MIB command (0x0A00) + * and in the LLDP MIB Change Event (0x0A01). They are valid for the + * Get LLDP MIB (0x0A00) response only. +diff --git a/dpdk/drivers/net/ice/base/ice_common.c b/dpdk/drivers/net/ice/base/ice_common.c +index 5391bd666b..a327a4b374 100644 +--- a/dpdk/drivers/net/ice/base/ice_common.c ++++ b/dpdk/drivers/net/ice/base/ice_common.c +@@ -167,6 +167,8 @@ static enum ice_status ice_set_mac_type(struct ice_hw *hw) + case ICE_DEV_ID_E823C_QSFP: + case ICE_DEV_ID_E823C_SFP: + case ICE_DEV_ID_E823C_SGMII: ++ hw->mac_type = ICE_MAC_GENERIC; ++ break; + case ICE_DEV_ID_E824S: + case ICE_DEV_ID_E825C_BACKPLANE: + case ICE_DEV_ID_E825C_QSFP: +@@ -2554,9 +2556,7 @@ ice_parse_1588_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p, + struct ice_aqc_list_caps_elem *cap) + { + struct ice_ts_func_info *info = &func_p->ts_func_info; +- u32 number = ICE_TS_FUNC_ENA_M | ICE_TS_SRC_TMR_OWND_M | +- ICE_TS_TMR_ENA_M | ICE_TS_TMR_IDX_OWND_M | +- ICE_TS_TMR_IDX_ASSOC_M; ++ u32 number = LE32_TO_CPU(cap->number); + u8 clk_freq; + + ice_debug(hw, ICE_DBG_INIT, "1588 func caps: raw value %x\n", number); +diff --git a/dpdk/drivers/net/ice/base/ice_sched.c b/dpdk/drivers/net/ice/base/ice_sched.c +index a526c8f32c..b16b27dcbf 100644 +--- a/dpdk/drivers/net/ice/base/ice_sched.c ++++ b/dpdk/drivers/net/ice/base/ice_sched.c +@@ -1417,11 +1417,6 @@ void ice_sched_get_psm_clk_freq(struct ice_hw *hw) + clk_src = (val & GLGEN_CLKSTAT_SRC_PSM_CLK_SRC_M) >> + GLGEN_CLKSTAT_SRC_PSM_CLK_SRC_S; + +-#define PSM_CLK_SRC_367_MHZ 0x0 +-#define PSM_CLK_SRC_416_MHZ 0x1 +-#define PSM_CLK_SRC_446_MHZ 0x2 +-#define PSM_CLK_SRC_390_MHZ 0x3 +- + switch (clk_src) { + case PSM_CLK_SRC_367_MHZ: + hw->psm_clk_freq = ICE_PSM_CLK_367MHZ_IN_HZ; +@@ -1435,11 +1430,12 @@ void ice_sched_get_psm_clk_freq(struct ice_hw *hw) + case PSM_CLK_SRC_390_MHZ: + hw->psm_clk_freq = ICE_PSM_CLK_390MHZ_IN_HZ; + break; +- default: +- ice_debug(hw, ICE_DBG_SCHED, "PSM clk_src unexpected %u\n", +- clk_src); +- /* fall back to a safe default */ +- hw->psm_clk_freq = ICE_PSM_CLK_446MHZ_IN_HZ; ++ ++ /* default condition is not required as clk_src is restricted ++ * to a 2-bit value from GLGEN_CLKSTAT_SRC_PSM_CLK_SRC_M mask. ++ * The above switch statements cover the possible values of ++ * this variable. ++ */ + } + } + +diff --git a/dpdk/drivers/net/ice/base/ice_sched.h b/dpdk/drivers/net/ice/base/ice_sched.h +index 3724ef33a8..64ed5e0f9b 100644 +--- a/dpdk/drivers/net/ice/base/ice_sched.h ++++ b/dpdk/drivers/net/ice/base/ice_sched.h +@@ -38,6 +38,11 @@ + #define ICE_PSM_CLK_446MHZ_IN_HZ 446428571 + #define ICE_PSM_CLK_390MHZ_IN_HZ 390625000 + ++#define PSM_CLK_SRC_367_MHZ 0x0 ++#define PSM_CLK_SRC_416_MHZ 0x1 ++#define PSM_CLK_SRC_446_MHZ 0x2 ++#define PSM_CLK_SRC_390_MHZ 0x3 ++ + struct rl_profile_params { + u32 bw; /* in Kbps */ + u16 rl_multiplier; +diff --git a/dpdk/drivers/net/ice/ice_dcf.c b/dpdk/drivers/net/ice/ice_dcf.c +index 1c3d22ae0f..6f7e103c3b 100644 +--- a/dpdk/drivers/net/ice/ice_dcf.c ++++ b/dpdk/drivers/net/ice/ice_dcf.c +@@ -32,6 +32,8 @@ + #define ICE_DCF_ARQ_MAX_RETRIES 200 + #define ICE_DCF_ARQ_CHECK_TIME 2 /* msecs */ + ++#define ICE_DCF_CHECK_INTERVAL 100 /* 100ms */ ++ + #define ICE_DCF_VF_RES_BUF_SZ \ + (sizeof(struct virtchnl_vf_resource) + \ + IAVF_MAX_VF_VSI * sizeof(struct virtchnl_vsi_resource)) +@@ -639,6 +641,8 @@ ice_dcf_init_hw(struct rte_eth_dev *eth_dev, struct ice_dcf_hw *hw) + rte_spinlock_init(&hw->vc_cmd_queue_lock); + TAILQ_INIT(&hw->vc_cmd_queue); + ++ __atomic_store_n(&hw->vsi_update_thread_num, 0, __ATOMIC_RELAXED); ++ + hw->arq_buf = rte_zmalloc("arq_buf", ICE_DCF_AQ_BUF_SZ, 0); + if (hw->arq_buf == NULL) { + PMD_INIT_LOG(ERR, "unable to allocate AdminQ buffer memory"); +@@ -760,6 +764,11 @@ ice_dcf_uninit_hw(struct rte_eth_dev *eth_dev, struct ice_dcf_hw *hw) + rte_intr_callback_unregister(intr_handle, + ice_dcf_dev_interrupt_handler, hw); + ++ /* Wait for all `ice-thread` threads to exit. */ ++ while (__atomic_load_n(&hw->vsi_update_thread_num, ++ __ATOMIC_ACQUIRE) != 0) ++ rte_delay_ms(ICE_DCF_CHECK_INTERVAL); ++ + ice_dcf_mode_disable(hw); + iavf_shutdown_adminq(&hw->avf); + +@@ -854,7 +863,8 @@ ice_dcf_init_rss(struct ice_dcf_hw *hw) + { + struct rte_eth_dev *dev = hw->eth_dev; + struct rte_eth_rss_conf *rss_conf; +- uint8_t i, j, nb_q; ++ uint8_t j, nb_q; ++ size_t i; + int ret; + + rss_conf = &dev->data->dev_conf.rx_adv_conf.rss_conf; +diff --git a/dpdk/drivers/net/ice/ice_dcf.h b/dpdk/drivers/net/ice/ice_dcf.h +index 7f42ebabe9..7becf6d187 100644 +--- a/dpdk/drivers/net/ice/ice_dcf.h ++++ b/dpdk/drivers/net/ice/ice_dcf.h +@@ -105,6 +105,8 @@ struct ice_dcf_hw { + void (*vc_event_msg_cb)(struct ice_dcf_hw *dcf_hw, + uint8_t *msg, uint16_t msglen); + ++ int vsi_update_thread_num; ++ + uint8_t *arq_buf; + + uint16_t num_vfs; +diff --git a/dpdk/drivers/net/ice/ice_dcf_ethdev.c b/dpdk/drivers/net/ice/ice_dcf_ethdev.c +index dcbf2af5b0..7304ea721c 100644 +--- a/dpdk/drivers/net/ice/ice_dcf_ethdev.c ++++ b/dpdk/drivers/net/ice/ice_dcf_ethdev.c +@@ -115,7 +115,8 @@ ice_dcf_init_rxq(struct rte_eth_dev *dev, struct ice_rx_queue *rxq) + + buf_size = rte_pktmbuf_data_room_size(rxq->mp) - RTE_PKTMBUF_HEADROOM; + rxq->rx_hdr_len = 0; +- rxq->rx_buf_len = RTE_ALIGN(buf_size, (1 << ICE_RLAN_CTX_DBUF_S)); ++ rxq->rx_buf_len = RTE_ALIGN_FLOOR(buf_size, (1 << ICE_RLAN_CTX_DBUF_S)); ++ rxq->rx_buf_len = RTE_MIN(rxq->rx_buf_len, ICE_RX_MAX_DATA_BUF_SIZE); + max_pkt_len = RTE_MIN(ICE_SUPPORT_CHAIN_NUM * rxq->rx_buf_len, + dev->data->mtu + ICE_ETH_OVERHEAD); + +diff --git a/dpdk/drivers/net/ice/ice_dcf_parent.c b/dpdk/drivers/net/ice/ice_dcf_parent.c +index 01e390ddda..0563edb0b2 100644 +--- a/dpdk/drivers/net/ice/ice_dcf_parent.c ++++ b/dpdk/drivers/net/ice/ice_dcf_parent.c +@@ -124,6 +124,9 @@ ice_dcf_vsi_update_service_handler(void *param) + container_of(hw, struct ice_dcf_adapter, real_hw); + struct ice_adapter *parent_adapter = &adapter->parent; + ++ __atomic_fetch_add(&hw->vsi_update_thread_num, 1, ++ __ATOMIC_RELAXED); ++ + pthread_detach(pthread_self()); + + rte_delay_us(ICE_DCF_VSI_UPDATE_SERVICE_INTERVAL); +@@ -154,6 +157,9 @@ ice_dcf_vsi_update_service_handler(void *param) + + free(param); + ++ __atomic_fetch_sub(&hw->vsi_update_thread_num, 1, ++ __ATOMIC_RELEASE); ++ + return NULL; + } + +diff --git a/dpdk/drivers/net/ice/ice_ethdev.c b/dpdk/drivers/net/ice/ice_ethdev.c +index 0bc739daf0..bba2715b1d 100644 +--- a/dpdk/drivers/net/ice/ice_ethdev.c ++++ b/dpdk/drivers/net/ice/ice_ethdev.c +@@ -2399,6 +2399,17 @@ ice_dev_init(struct rte_eth_dev *dev) + /* Initialize TM configuration */ + ice_tm_conf_init(dev); + ++ if (ice_is_e810(hw)) ++ hw->phy_cfg = ICE_PHY_E810; ++ else ++ hw->phy_cfg = ICE_PHY_E822; ++ ++ if (hw->phy_cfg == ICE_PHY_E822) { ++ ret = ice_start_phy_timer_e822(hw, hw->pf_id, true); ++ if (ret) ++ PMD_INIT_LOG(ERR, "Failed to start phy timer\n"); ++ } ++ + if (!ad->is_safe_mode) { + ret = ice_flow_init(ad); + if (ret) { +@@ -2415,6 +2426,9 @@ ice_dev_init(struct rte_eth_dev *dev) + + pf->supported_rxdid = ice_get_supported_rxdid(hw); + ++ /* reset all stats of the device, including pf and main vsi */ ++ ice_stats_reset(dev); ++ + return 0; + + err_flow_init: +@@ -3298,7 +3312,8 @@ static int ice_init_rss(struct ice_pf *pf) + + rss_conf = &dev_data->dev_conf.rx_adv_conf.rss_conf; + nb_q = dev_data->nb_rx_queues; +- vsi->rss_key_size = ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE; ++ vsi->rss_key_size = ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE + ++ ICE_AQC_GET_SET_RSS_KEY_DATA_HASH_KEY_SIZE; + vsi->rss_lut_size = pf->hash_lut_size; + + if (nb_q == 0) { +@@ -3339,7 +3354,10 @@ static int ice_init_rss(struct ice_pf *pf) + vsi->rss_key_size)); + + rte_memcpy(key.standard_rss_key, vsi->rss_key, +- RTE_MIN(sizeof(key.standard_rss_key), vsi->rss_key_size)); ++ ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE); ++ rte_memcpy(key.extended_hash_key, ++ &vsi->rss_key[ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE], ++ ICE_AQC_GET_SET_RSS_KEY_DATA_HASH_KEY_SIZE); + ret = ice_aq_set_rss_key(hw, vsi->idx, &key); + if (ret) + goto out; +@@ -3660,6 +3678,16 @@ ice_dev_start(struct rte_eth_dev *dev) + } + } + ++ if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) { ++ /* Register mbuf field and flag for Rx timestamp */ ++ ret = rte_mbuf_dyn_rx_timestamp_register(&ice_timestamp_dynfield_offset, ++ &ice_timestamp_dynflag); ++ if (ret) { ++ PMD_DRV_LOG(ERR, "Cannot register mbuf field/flag for timestamp"); ++ goto tx_err; ++ } ++ } ++ + /* program Rx queues' context in hardware*/ + for (nb_rxq = 0; nb_rxq < data->nb_rx_queues; nb_rxq++) { + ret = ice_rx_queue_start(dev, nb_rxq); +@@ -5800,11 +5828,6 @@ ice_timesync_enable(struct rte_eth_dev *dev) + return -1; + } + +- if (ice_is_e810(hw)) +- hw->phy_cfg = ICE_PHY_E810; +- else +- hw->phy_cfg = ICE_PHY_E822; +- + if (hw->func_caps.ts_func_info.src_tmr_owned) { + ret = ice_ptp_init_phc(hw); + if (ret) { +@@ -5925,16 +5948,17 @@ ice_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts) + struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ice_adapter *ad = + ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); ++ uint8_t tmr_idx = hw->func_caps.ts_func_info.tmr_index_assoc; + uint32_t hi, lo, lo2; + uint64_t time, ns; + +- lo = ICE_READ_REG(hw, GLTSYN_TIME_L(0)); +- hi = ICE_READ_REG(hw, GLTSYN_TIME_H(0)); +- lo2 = ICE_READ_REG(hw, GLTSYN_TIME_L(0)); ++ lo = ICE_READ_REG(hw, GLTSYN_TIME_L(tmr_idx)); ++ hi = ICE_READ_REG(hw, GLTSYN_TIME_H(tmr_idx)); ++ lo2 = ICE_READ_REG(hw, GLTSYN_TIME_L(tmr_idx)); + + if (lo2 < lo) { +- lo = ICE_READ_REG(hw, GLTSYN_TIME_L(0)); +- hi = ICE_READ_REG(hw, GLTSYN_TIME_H(0)); ++ lo = ICE_READ_REG(hw, GLTSYN_TIME_L(tmr_idx)); ++ hi = ICE_READ_REG(hw, GLTSYN_TIME_H(tmr_idx)); + } + + time = ((uint64_t)hi << 32) | lo; +@@ -5950,6 +5974,7 @@ ice_timesync_disable(struct rte_eth_dev *dev) + struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ice_adapter *ad = + ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); ++ uint8_t tmr_idx = hw->func_caps.ts_func_info.tmr_index_assoc; + uint64_t val; + uint8_t lport; + +@@ -5957,12 +5982,12 @@ ice_timesync_disable(struct rte_eth_dev *dev) + + ice_clear_phy_tstamp(hw, lport, 0); + +- val = ICE_READ_REG(hw, GLTSYN_ENA(0)); ++ val = ICE_READ_REG(hw, GLTSYN_ENA(tmr_idx)); + val &= ~GLTSYN_ENA_TSYN_ENA_M; +- ICE_WRITE_REG(hw, GLTSYN_ENA(0), val); ++ ICE_WRITE_REG(hw, GLTSYN_ENA(tmr_idx), val); + +- ICE_WRITE_REG(hw, GLTSYN_INCVAL_L(0), 0); +- ICE_WRITE_REG(hw, GLTSYN_INCVAL_H(0), 0); ++ ICE_WRITE_REG(hw, GLTSYN_INCVAL_L(tmr_idx), 0); ++ ICE_WRITE_REG(hw, GLTSYN_INCVAL_H(tmr_idx), 0); + + ad->ptp_ena = 0; + +diff --git a/dpdk/drivers/net/ice/ice_fdir_filter.c b/dpdk/drivers/net/ice/ice_fdir_filter.c +index 7914ba9407..81e88c1dd8 100644 +--- a/dpdk/drivers/net/ice/ice_fdir_filter.c ++++ b/dpdk/drivers/net/ice/ice_fdir_filter.c +@@ -1876,7 +1876,7 @@ ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad, + (uint8_t *)(uintptr_t)raw_mask->pattern; + uint8_t *tmp_spec, *tmp_mask; + uint16_t tmp_val = 0; +- uint8_t pkt_len = 0; ++ uint16_t pkt_len = 0; + uint8_t tmp = 0; + int i, j; + +diff --git a/dpdk/drivers/net/ice/ice_generic_flow.c b/dpdk/drivers/net/ice/ice_generic_flow.c +index d496c28dec..91bf1d6fcb 100644 +--- a/dpdk/drivers/net/ice/ice_generic_flow.c ++++ b/dpdk/drivers/net/ice/ice_generic_flow.c +@@ -1836,6 +1836,13 @@ ice_flow_init(struct ice_adapter *ad) + if (ice_parser_create(&ad->hw, &ad->psr) != ICE_SUCCESS) + PMD_INIT_LOG(WARNING, "Failed to initialize DDP parser, raw packet filter will not be supported"); + ++ if (ad->psr) { ++ if (ice_is_dvm_ena(&ad->hw)) ++ ice_parser_dvm_set(ad->psr, true); ++ else ++ ice_parser_dvm_set(ad->psr, false); ++ } ++ + RTE_TAILQ_FOREACH_SAFE(engine, &engine_list, node, temp) { + if (engine->init == NULL) { + PMD_INIT_LOG(ERR, "Invalid engine type (%d)", +@@ -2030,6 +2037,14 @@ ice_flow_valid_attr(struct ice_adapter *ad, + return -rte_errno; + } + ++ /* Not supported */ ++ if (attr->transfer) { ++ rte_flow_error_set(error, EINVAL, ++ RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, ++ attr, "Not support transfer."); ++ return -rte_errno; ++ } ++ + /* Check pipeline mode support to set classification stage */ + if (ad->devargs.pipe_mode_support) { + if (attr->priority == 0) +diff --git a/dpdk/drivers/net/ice/ice_hash.c b/dpdk/drivers/net/ice/ice_hash.c +index f35727856e..52646e9408 100644 +--- a/dpdk/drivers/net/ice/ice_hash.c ++++ b/dpdk/drivers/net/ice/ice_hash.c +@@ -653,8 +653,8 @@ ice_hash_parse_raw_pattern(struct ice_adapter *ad, + const struct rte_flow_item_raw *raw_spec, *raw_mask; + struct ice_parser_profile prof; + struct ice_parser_result rslt; ++ uint16_t spec_len, pkt_len; + uint8_t *pkt_buf, *msk_buf; +- uint8_t spec_len, pkt_len; + uint8_t tmp_val = 0; + uint8_t tmp_c = 0; + int i, j; +diff --git a/dpdk/drivers/net/ice/ice_rxtx.c b/dpdk/drivers/net/ice/ice_rxtx.c +index 0ea0045836..598edb29c9 100644 +--- a/dpdk/drivers/net/ice/ice_rxtx.c ++++ b/dpdk/drivers/net/ice/ice_rxtx.c +@@ -259,7 +259,8 @@ ice_program_hw_rx_queue(struct ice_rx_queue *rxq) + /* Set buffer size as the head split is disabled. */ + buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) - + RTE_PKTMBUF_HEADROOM); +- rxq->rx_buf_len = RTE_ALIGN(buf_size, (1 << ICE_RLAN_CTX_DBUF_S)); ++ rxq->rx_buf_len = RTE_ALIGN_FLOOR(buf_size, (1 << ICE_RLAN_CTX_DBUF_S)); ++ rxq->rx_buf_len = RTE_MIN(rxq->rx_buf_len, ICE_RX_MAX_DATA_BUF_SIZE); + rxq->max_pkt_len = + RTE_MIN((uint32_t)ICE_SUPPORT_CHAIN_NUM * rxq->rx_buf_len, + frame_size); +@@ -273,7 +274,7 @@ ice_program_hw_rx_queue(struct ice_rx_queue *rxq) + return -EINVAL; + } + +- if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) { ++ if (!rxq->ts_enable && (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)) { + /* Register mbuf field and flag for Rx timestamp */ + err = rte_mbuf_dyn_rx_timestamp_register( + &ice_timestamp_dynfield_offset, +@@ -283,6 +284,7 @@ ice_program_hw_rx_queue(struct ice_rx_queue *rxq) + "Cannot register mbuf field/flag for timestamp"); + return -EINVAL; + } ++ rxq->ts_enable = true; + } + + memset(&rx_ctx, 0, sizeof(rx_ctx)); +@@ -670,6 +672,8 @@ ice_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) + return -EINVAL; + } + ++ if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) ++ rxq->ts_enable = true; + err = ice_program_hw_rx_queue(rxq); + if (err) { + PMD_DRV_LOG(ERR, "fail to program RX queue %u", +@@ -1761,7 +1765,8 @@ ice_rx_scan_hw_ring(struct ice_rx_queue *rxq) + ice_rxd_to_vlan_tci(mb, &rxdp[j]); + rxd_to_pkt_fields_ops[rxq->rxdid](rxq, mb, &rxdp[j]); + #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC +- if (ice_timestamp_dynflag > 0) { ++ if (ice_timestamp_dynflag > 0 && ++ (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)) { + rxq->time_high = + rte_le_to_cpu_32(rxdp[j].wb.flex_ts.ts_high); + if (unlikely(is_tsinit)) { +@@ -2127,7 +2132,8 @@ ice_recv_scattered_pkts(void *rx_queue, + rxd_to_pkt_fields_ops[rxq->rxdid](rxq, first_seg, &rxd); + pkt_flags = ice_rxd_error_to_pkt_flags(rx_stat_err0); + #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC +- if (ice_timestamp_dynflag > 0) { ++ if (ice_timestamp_dynflag > 0 && ++ (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)) { + rxq->time_high = + rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high); + if (unlikely(is_tsinit)) { +@@ -2617,7 +2623,8 @@ ice_recv_pkts(void *rx_queue, + rxd_to_pkt_fields_ops[rxq->rxdid](rxq, rxm, &rxd); + pkt_flags = ice_rxd_error_to_pkt_flags(rx_stat_err0); + #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC +- if (ice_timestamp_dynflag > 0) { ++ if (ice_timestamp_dynflag > 0 && ++ (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)) { + rxq->time_high = + rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high); + if (unlikely(is_tsinit)) { +@@ -2727,7 +2734,8 @@ ice_parse_tunneling_params(uint64_t ol_flags, + * Shall be set only if L4TUNT = 01b and EIPT is not zero + */ + if (!(*cd_tunneling & ICE_TX_CTX_EIPT_NONE) && +- (*cd_tunneling & ICE_TXD_CTX_UDP_TUNNELING)) ++ (*cd_tunneling & ICE_TXD_CTX_UDP_TUNNELING) && ++ (ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM)) + *cd_tunneling |= ICE_TXD_CTX_QW0_L4T_CS_M; + } + +@@ -2738,10 +2746,7 @@ ice_txd_enable_checksum(uint64_t ol_flags, + union ice_tx_offload tx_offload) + { + /* Set MACLEN */ +- if (ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) +- *td_offset |= (tx_offload.outer_l2_len >> 1) +- << ICE_TX_DESC_LEN_MACLEN_S; +- else ++ if (!(ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK)) + *td_offset |= (tx_offload.l2_len >> 1) + << ICE_TX_DESC_LEN_MACLEN_S; + +@@ -3002,9 +3007,12 @@ ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) + + /* Fill in tunneling parameters if necessary */ + cd_tunneling_params = 0; +- if (ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) ++ if (ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) { ++ td_offset |= (tx_offload.outer_l2_len >> 1) ++ << ICE_TX_DESC_LEN_MACLEN_S; + ice_parse_tunneling_params(ol_flags, tx_offload, + &cd_tunneling_params); ++ } + + /* Enable checksum offloading */ + if (ol_flags & ICE_TX_CKSUM_OFFLOAD_MASK) +diff --git a/dpdk/drivers/net/ice/ice_rxtx.h b/dpdk/drivers/net/ice/ice_rxtx.h +index 4947d5c25f..268289716e 100644 +--- a/dpdk/drivers/net/ice/ice_rxtx.h ++++ b/dpdk/drivers/net/ice/ice_rxtx.h +@@ -51,6 +51,9 @@ extern int ice_timestamp_dynfield_offset; + /* Max header size can be 2K - 64 bytes */ + #define ICE_RX_HDR_BUF_SIZE (2048 - 64) + ++/* Max data buffer size must be 16K - 128 bytes */ ++#define ICE_RX_MAX_DATA_BUF_SIZE (16 * 1024 - 128) ++ + #define ICE_HEADER_SPLIT_ENA BIT(0) + + typedef void (*ice_rx_release_mbufs_t)(struct ice_rx_queue *rxq); +@@ -117,6 +120,7 @@ struct ice_rx_queue { + uint64_t hw_time_update; /* SW time of HW record updating */ + struct rte_eth_rxseg_split rxseg[ICE_RX_MAX_NSEG]; + uint32_t rxseg_nb; ++ bool ts_enable; /* if rxq timestamp is enabled */ + }; + + struct ice_tx_entry { +@@ -349,26 +353,27 @@ static inline + uint64_t ice_tstamp_convert_32b_64b(struct ice_hw *hw, struct ice_adapter *ad, + uint32_t flag, uint32_t in_timestamp) + { ++ uint8_t tmr_idx = hw->func_caps.ts_func_info.tmr_index_assoc; + const uint64_t mask = 0xFFFFFFFF; + uint32_t hi, lo, lo2, delta; + uint64_t ns; + + if (flag) { +- lo = ICE_READ_REG(hw, GLTSYN_TIME_L(0)); +- hi = ICE_READ_REG(hw, GLTSYN_TIME_H(0)); ++ lo = ICE_READ_REG(hw, GLTSYN_TIME_L(tmr_idx)); ++ hi = ICE_READ_REG(hw, GLTSYN_TIME_H(tmr_idx)); + + /* + * On typical system, the delta between lo and lo2 is ~1000ns, + * so 10000 seems a large-enough but not overly-big guard band. + */ + if (lo > (UINT32_MAX - ICE_TIMESYNC_REG_WRAP_GUARD_BAND)) +- lo2 = ICE_READ_REG(hw, GLTSYN_TIME_L(0)); ++ lo2 = ICE_READ_REG(hw, GLTSYN_TIME_L(tmr_idx)); + else + lo2 = lo; + + if (lo2 < lo) { +- lo = ICE_READ_REG(hw, GLTSYN_TIME_L(0)); +- hi = ICE_READ_REG(hw, GLTSYN_TIME_H(0)); ++ lo = ICE_READ_REG(hw, GLTSYN_TIME_L(tmr_idx)); ++ hi = ICE_READ_REG(hw, GLTSYN_TIME_H(tmr_idx)); + } + + ad->time_hw = ((uint64_t)hi << 32) | lo; +diff --git a/dpdk/drivers/net/ice/ice_rxtx_vec_common.h b/dpdk/drivers/net/ice/ice_rxtx_vec_common.h +index eec6ea2134..55840cf170 100644 +--- a/dpdk/drivers/net/ice/ice_rxtx_vec_common.h ++++ b/dpdk/drivers/net/ice/ice_rxtx_vec_common.h +@@ -72,7 +72,7 @@ ice_rx_reassemble_packets(struct ice_rx_queue *rxq, struct rte_mbuf **rx_bufs, + /* save the partial packet for next time */ + rxq->pkt_first_seg = start; + rxq->pkt_last_seg = end; +- rte_memcpy(rx_bufs, pkts, pkt_idx * (sizeof(*pkts))); ++ memcpy(rx_bufs, pkts, pkt_idx * (sizeof(*pkts))); + return pkt_idx; + } + +diff --git a/dpdk/drivers/net/idpf/idpf_ethdev.c b/dpdk/drivers/net/idpf/idpf_ethdev.c +index 8b347631ce..b31cb47e90 100644 +--- a/dpdk/drivers/net/idpf/idpf_ethdev.c ++++ b/dpdk/drivers/net/idpf/idpf_ethdev.c +@@ -563,8 +563,6 @@ idpf_dev_start(struct rte_eth_dev *dev) + uint16_t req_vecs_num; + int ret; + +- vport->stopped = 0; +- + if (dev->data->mtu > vport->max_mtu) { + PMD_DRV_LOG(ERR, "MTU should be less than %d", vport->max_mtu); + ret = -EINVAL; +@@ -622,7 +620,7 @@ idpf_dev_stop(struct rte_eth_dev *dev) + { + struct idpf_vport *vport = dev->data->dev_private; + +- if (vport->stopped == 1) ++ if (dev->data->dev_started == 0) + return 0; + + idpf_vc_ena_dis_vport(vport, false); +@@ -634,8 +632,6 @@ idpf_dev_stop(struct rte_eth_dev *dev) + if (vport->recv_vectors != NULL) + idpf_vc_dealloc_vectors(vport); + +- vport->stopped = 1; +- + return 0; + } + +@@ -1313,7 +1309,11 @@ static struct rte_pci_driver rte_idpf_pmd = { + */ + RTE_PMD_REGISTER_PCI(net_idpf, rte_idpf_pmd); + RTE_PMD_REGISTER_PCI_TABLE(net_idpf, pci_id_idpf_map); +-RTE_PMD_REGISTER_KMOD_DEP(net_ice, "* igb_uio | uio_pci_generic | vfio-pci"); ++RTE_PMD_REGISTER_KMOD_DEP(net_idpf, "* igb_uio | vfio-pci"); ++RTE_PMD_REGISTER_PARAM_STRING(net_idpf, ++ IDPF_TX_SINGLE_Q "=<0|1> " ++ IDPF_RX_SINGLE_Q "=<0|1> " ++ IDPF_VPORT "=[vport_set0,[vport_set1],...]"); + + RTE_LOG_REGISTER_SUFFIX(idpf_logtype_init, init, NOTICE); + RTE_LOG_REGISTER_SUFFIX(idpf_logtype_driver, driver, NOTICE); +diff --git a/dpdk/drivers/net/idpf/idpf_ethdev.h b/dpdk/drivers/net/idpf/idpf_ethdev.h +index 991677c3bc..5633057a81 100644 +--- a/dpdk/drivers/net/idpf/idpf_ethdev.h ++++ b/dpdk/drivers/net/idpf/idpf_ethdev.h +@@ -128,7 +128,6 @@ struct idpf_vport { + + uint16_t devarg_id; + +- bool stopped; + }; + + struct idpf_adapter { +@@ -164,7 +163,6 @@ struct idpf_adapter { + uint32_t max_txq_per_msg; + + uint32_t ptype_tbl[IDPF_MAX_PKT_TYPE] __rte_cache_min_aligned; +- + bool rx_vec_allowed; + bool tx_vec_allowed; + bool rx_use_avx512; +diff --git a/dpdk/drivers/net/idpf/idpf_rxtx.c b/dpdk/drivers/net/idpf/idpf_rxtx.c +index b4a396c3f5..ceb34d4d32 100644 +--- a/dpdk/drivers/net/idpf/idpf_rxtx.c ++++ b/dpdk/drivers/net/idpf/idpf_rxtx.c +@@ -374,7 +374,8 @@ idpf_rx_split_bufq_setup(struct rte_eth_dev *dev, struct idpf_rx_queue *bufq, + bufq->adapter = adapter; + + len = rte_pktmbuf_data_room_size(bufq->mp) - RTE_PKTMBUF_HEADROOM; +- bufq->rx_buf_len = len; ++ bufq->rx_buf_len = RTE_ALIGN_FLOOR(len, (1 << IDPF_RLAN_CTX_DBUF_S)); ++ bufq->rx_buf_len = RTE_MIN(bufq->rx_buf_len, IDPF_RX_MAX_DATA_BUF_SIZE); + + /* Allocate the software ring. */ + len = nb_desc + IDPF_RX_MAX_BURST; +@@ -473,7 +474,8 @@ idpf_rx_split_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, + rxq->offloads = offloads; + + len = rte_pktmbuf_data_room_size(rxq->mp) - RTE_PKTMBUF_HEADROOM; +- rxq->rx_buf_len = len; ++ rxq->rx_buf_len = RTE_ALIGN_FLOOR(len, (1 << IDPF_RLAN_CTX_DBUF_S)); ++ rxq->rx_buf_len = RTE_MIN(rxq->rx_buf_len, IDPF_RX_MAX_DATA_BUF_SIZE); + + len = rxq->nb_rx_desc + IDPF_RX_MAX_BURST; + ring_size = RTE_ALIGN(len * +@@ -1162,6 +1164,7 @@ idpf_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) + } + + rxq = dev->data->rx_queues[rx_queue_id]; ++ rxq->q_started = false; + if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) { + rxq->ops->release_mbufs(rxq); + reset_single_rx_queue(rxq); +@@ -1193,6 +1196,7 @@ idpf_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) + } + + txq = dev->data->tx_queues[tx_queue_id]; ++ txq->q_started = false; + txq->ops->release_mbufs(txq); + if (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) { + reset_single_tx_queue(txq); +@@ -1508,6 +1512,7 @@ idpf_split_tx_free(struct idpf_tx_queue *cq) + struct idpf_tx_entry *txe; + struct idpf_tx_queue *txq; + uint16_t gen, qid, q_head; ++ uint16_t nb_desc_clean; + uint8_t ctype; + + txd = &compl_ring[next]; +@@ -1525,20 +1530,24 @@ idpf_split_tx_free(struct idpf_tx_queue *cq) + + switch (ctype) { + case IDPF_TXD_COMPLT_RE: +- if (q_head == 0) +- txq->last_desc_cleaned = txq->nb_tx_desc - 1; +- else +- txq->last_desc_cleaned = q_head - 1; +- if (unlikely((txq->last_desc_cleaned % 32) == 0)) { ++ /* clean to q_head which indicates be fetched txq desc id + 1. ++ * TODO: need to refine and remove the if condition. ++ */ ++ if (unlikely(q_head % 32)) { + PMD_DRV_LOG(ERR, "unexpected desc (head = %u) completion.", + q_head); + return; + } +- ++ if (txq->last_desc_cleaned > q_head) ++ nb_desc_clean = (txq->nb_tx_desc - txq->last_desc_cleaned) + ++ q_head; ++ else ++ nb_desc_clean = q_head - txq->last_desc_cleaned; ++ txq->nb_free += nb_desc_clean; ++ txq->last_desc_cleaned = q_head; + break; + case IDPF_TXD_COMPLT_RS: +- txq->nb_free++; +- txq->nb_used--; ++ /* q_head indicates sw_id when ctype is 2 */ + txe = &txq->sw_ring[q_head]; + if (txe->mbuf != NULL) { + rte_pktmbuf_free_seg(txe->mbuf); +@@ -1693,12 +1702,16 @@ idpf_splitq_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, + /* fill the last descriptor with End of Packet (EOP) bit */ + txd->qw1.cmd_dtype |= IDPF_TXD_FLEX_FLOW_CMD_EOP; + +- if (unlikely((tx_id % 32) == 0)) +- txd->qw1.cmd_dtype |= IDPF_TXD_FLEX_FLOW_CMD_RE; + if (ol_flags & IDPF_TX_CKSUM_OFFLOAD_MASK) + txd->qw1.cmd_dtype |= IDPF_TXD_FLEX_FLOW_CMD_CS_EN; + txq->nb_free = (uint16_t)(txq->nb_free - nb_used); + txq->nb_used = (uint16_t)(txq->nb_used + nb_used); ++ ++ if (txq->nb_used >= 32) { ++ txd->qw1.cmd_dtype |= IDPF_TXD_FLEX_FLOW_CMD_RE; ++ /* Update txq RE bit counters */ ++ txq->nb_used = 0; ++ } + } + + /* update the tail pointer if any packets were processed */ +diff --git a/dpdk/drivers/net/idpf/idpf_rxtx.h b/dpdk/drivers/net/idpf/idpf_rxtx.h +index 730dc64ebc..1c5b5b7c38 100644 +--- a/dpdk/drivers/net/idpf/idpf_rxtx.h ++++ b/dpdk/drivers/net/idpf/idpf_rxtx.h +@@ -6,6 +6,9 @@ + #define _IDPF_RXTX_H_ + + #include "idpf_ethdev.h" ++#define IDPF_RLAN_CTX_DBUF_S 7 ++#define IDPF_RX_MAX_DATA_BUF_SIZE (16 * 1024 - 128) ++ + + /* MTS */ + #define GLTSYN_CMD_SYNC_0_0 (PF_TIMESYNC_BASE + 0x0) +diff --git a/dpdk/drivers/net/idpf/idpf_rxtx_vec_avx512.c b/dpdk/drivers/net/idpf/idpf_rxtx_vec_avx512.c +index fb2b6bb53c..f31582f5fd 100644 +--- a/dpdk/drivers/net/idpf/idpf_rxtx_vec_avx512.c ++++ b/dpdk/drivers/net/idpf/idpf_rxtx_vec_avx512.c +@@ -843,6 +843,10 @@ idpf_singleq_tx_release_mbufs_avx512(struct idpf_tx_queue *txq) + } + i = 0; + } ++ for (; i < txq->tx_tail; i++) { ++ rte_pktmbuf_free_seg(swr[i].mbuf); ++ swr[i].mbuf = NULL; ++ } + } + + static const struct idpf_txq_ops avx512_singleq_tx_vec_ops = { +diff --git a/dpdk/drivers/net/igc/igc_txrx.c b/dpdk/drivers/net/igc/igc_txrx.c +index ffd219b0df..160865e911 100644 +--- a/dpdk/drivers/net/igc/igc_txrx.c ++++ b/dpdk/drivers/net/igc/igc_txrx.c +@@ -1291,6 +1291,7 @@ igc_rx_init(struct rte_eth_dev *dev) + dvmolr |= IGC_DVMOLR_STRCRC; + + IGC_WRITE_REG(hw, IGC_DVMOLR(rxq->reg_idx), dvmolr); ++ dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; + } + + return 0; +@@ -1934,6 +1935,7 @@ igc_dev_clear_queues(struct rte_eth_dev *dev) + if (txq != NULL) { + igc_tx_queue_release_mbufs(txq); + igc_reset_tx_queue(txq); ++ dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; + } + } + +@@ -1942,6 +1944,7 @@ igc_dev_clear_queues(struct rte_eth_dev *dev) + if (rxq != NULL) { + igc_rx_queue_release_mbufs(rxq); + igc_reset_rx_queue(rxq); ++ dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; + } + } + } +@@ -2187,6 +2190,7 @@ igc_tx_init(struct rte_eth_dev *dev) + IGC_TXDCTL_WTHRESH_MSK; + txdctl |= IGC_TXDCTL_QUEUE_ENABLE; + IGC_WRITE_REG(hw, IGC_TXDCTL(txq->reg_idx), txdctl); ++ dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; + } + + igc_config_collision_dist(hw); +diff --git a/dpdk/drivers/net/ipn3ke/ipn3ke_ethdev.c b/dpdk/drivers/net/ipn3ke/ipn3ke_ethdev.c +index 70a06a3b15..2c15611a23 100644 +--- a/dpdk/drivers/net/ipn3ke/ipn3ke_ethdev.c ++++ b/dpdk/drivers/net/ipn3ke/ipn3ke_ethdev.c +@@ -558,7 +558,7 @@ static int ipn3ke_vswitch_remove(struct rte_afu_device *afu_dev) + snprintf(name, sizeof(name), "net_%s_representor_%d", + afu_dev->device.name, i); + +- ethdev = rte_eth_dev_allocated(afu_dev->device.name); ++ ethdev = rte_eth_dev_allocated(name); + if (ethdev != NULL) + rte_eth_dev_destroy(ethdev, ipn3ke_rpst_uninit); + } +diff --git a/dpdk/drivers/net/ipn3ke/ipn3ke_representor.c b/dpdk/drivers/net/ipn3ke/ipn3ke_representor.c +index 2ef96a984a..e50fc73f43 100644 +--- a/dpdk/drivers/net/ipn3ke/ipn3ke_representor.c ++++ b/dpdk/drivers/net/ipn3ke/ipn3ke_representor.c +@@ -2579,7 +2579,7 @@ ipn3ke_rpst_scan_handle_request(__rte_unused void *param) + } + rte_delay_us(50 * MS); + +- if (num == 0xffffff) ++ if (num == 0 || num == 0xffffff) + return NULL; + } + +diff --git a/dpdk/drivers/net/ixgbe/ixgbe_ethdev.c b/dpdk/drivers/net/ixgbe/ixgbe_ethdev.c +index ae9f65b334..65655b9212 100644 +--- a/dpdk/drivers/net/ixgbe/ixgbe_ethdev.c ++++ b/dpdk/drivers/net/ixgbe/ixgbe_ethdev.c +@@ -3852,23 +3852,32 @@ static int + ixgbe_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size) + { + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); +- u16 eeprom_verh, eeprom_verl; +- u32 etrack_id; ++ struct ixgbe_nvm_version nvm_ver; + int ret; + +- ixgbe_read_eeprom(hw, 0x2e, &eeprom_verh); +- ixgbe_read_eeprom(hw, 0x2d, &eeprom_verl); ++ ixgbe_get_oem_prod_version(hw, &nvm_ver); ++ if (nvm_ver.oem_valid) { ++ snprintf(fw_version, fw_size, "%x.%x.%x", ++ nvm_ver.oem_major, nvm_ver.oem_minor, ++ nvm_ver.oem_release); ++ return 0; ++ } ++ ++ ixgbe_get_etk_id(hw, &nvm_ver); ++ ixgbe_get_orom_version(hw, &nvm_ver); + +- etrack_id = (eeprom_verh << 16) | eeprom_verl; +- ret = snprintf(fw_version, fw_size, "0x%08x", etrack_id); ++ if (nvm_ver.or_valid) { ++ snprintf(fw_version, fw_size, "0x%08x, %d.%d.%d", ++ nvm_ver.etk_id, nvm_ver.or_major, ++ nvm_ver.or_build, nvm_ver.or_patch); ++ return 0; ++ } ++ ++ ret = snprintf(fw_version, fw_size, "0x%08x", nvm_ver.etk_id); + if (ret < 0) + return -EINVAL; + +- ret += 1; /* add the size of '\0' */ +- if (fw_size < (size_t)ret) +- return ret; +- else +- return 0; ++ return (fw_size < (size_t)ret++) ? ret : 0; + } + + static int +diff --git a/dpdk/drivers/net/ixgbe/ixgbe_flow.c b/dpdk/drivers/net/ixgbe/ixgbe_flow.c +index 110ff34fcc..7cccbfddb3 100644 +--- a/dpdk/drivers/net/ixgbe/ixgbe_flow.c ++++ b/dpdk/drivers/net/ixgbe/ixgbe_flow.c +@@ -1918,9 +1918,9 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev, + + /* check src addr mask */ + for (j = 0; j < 16; j++) { +- if (ipv6_mask->hdr.src_addr[j] == UINT8_MAX) { +- rule->mask.src_ipv6_mask |= 1 << j; +- } else if (ipv6_mask->hdr.src_addr[j] != 0) { ++ if (ipv6_mask->hdr.src_addr[j] == 0) { ++ rule->mask.src_ipv6_mask &= ~(1 << j); ++ } else if (ipv6_mask->hdr.src_addr[j] != UINT8_MAX) { + memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, +@@ -1931,9 +1931,9 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev, + + /* check dst addr mask */ + for (j = 0; j < 16; j++) { +- if (ipv6_mask->hdr.dst_addr[j] == UINT8_MAX) { +- rule->mask.dst_ipv6_mask |= 1 << j; +- } else if (ipv6_mask->hdr.dst_addr[j] != 0) { ++ if (ipv6_mask->hdr.dst_addr[j] == 0) { ++ rule->mask.dst_ipv6_mask &= ~(1 << j); ++ } else if (ipv6_mask->hdr.dst_addr[j] != UINT8_MAX) { + memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, +diff --git a/dpdk/drivers/net/ixgbe/ixgbe_rxtx.c b/dpdk/drivers/net/ixgbe/ixgbe_rxtx.c +index c9d6ca9efe..a3a7c68806 100644 +--- a/dpdk/drivers/net/ixgbe/ixgbe_rxtx.c ++++ b/dpdk/drivers/net/ixgbe/ixgbe_rxtx.c +@@ -1818,11 +1818,22 @@ ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, + * of accesses cannot be reordered by the compiler. If they were + * not volatile, they could be reordered which could lead to + * using invalid descriptor fields when read from rxd. ++ * ++ * Meanwhile, to prevent the CPU from executing out of order, we ++ * need to use a proper memory barrier to ensure the memory ++ * ordering below. + */ + rxdp = &rx_ring[rx_id]; + staterr = rxdp->wb.upper.status_error; + if (!(staterr & rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD))) + break; ++ ++ /* ++ * Use acquire fence to ensure that status_error which includes ++ * DD bit is loaded before loading of other descriptor words. ++ */ ++ rte_atomic_thread_fence(__ATOMIC_ACQUIRE); ++ + rxd = *rxdp; + + /* +@@ -2089,32 +2100,10 @@ ixgbe_recv_pkts_lro(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts, + + next_desc: + /* +- * The code in this whole file uses the volatile pointer to +- * ensure the read ordering of the status and the rest of the +- * descriptor fields (on the compiler level only!!!). This is so +- * UGLY - why not to just use the compiler barrier instead? DPDK +- * even has the rte_compiler_barrier() for that. +- * +- * But most importantly this is just wrong because this doesn't +- * ensure memory ordering in a general case at all. For +- * instance, DPDK is supposed to work on Power CPUs where +- * compiler barrier may just not be enough! +- * +- * I tried to write only this function properly to have a +- * starting point (as a part of an LRO/RSC series) but the +- * compiler cursed at me when I tried to cast away the +- * "volatile" from rx_ring (yes, it's volatile too!!!). So, I'm +- * keeping it the way it is for now. +- * +- * The code in this file is broken in so many other places and +- * will just not work on a big endian CPU anyway therefore the +- * lines below will have to be revisited together with the rest +- * of the ixgbe PMD. +- * +- * TODO: +- * - Get rid of "volatile" and let the compiler do its job. +- * - Use the proper memory barrier (rte_rmb()) to ensure the +- * memory ordering below. ++ * "Volatile" only prevents caching of the variable marked ++ * volatile. Most important, "volatile" cannot prevent the CPU ++ * from executing out of order. So, it is necessary to use a ++ * proper memory barrier to ensure the memory ordering below. + */ + rxdp = &rx_ring[rx_id]; + staterr = rte_le_to_cpu_32(rxdp->wb.upper.status_error); +@@ -2122,6 +2111,12 @@ next_desc: + if (!(staterr & IXGBE_RXDADV_STAT_DD)) + break; + ++ /* ++ * Use acquire fence to ensure that status_error which includes ++ * DD bit is loaded before loading of other descriptor words. ++ */ ++ rte_atomic_thread_fence(__ATOMIC_ACQUIRE); ++ + rxd = *rxdp; + + PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u " +@@ -3384,6 +3379,7 @@ ixgbe_dev_clear_queues(struct rte_eth_dev *dev) + if (txq != NULL) { + txq->ops->release_mbufs(txq); + txq->ops->reset(txq); ++ dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; + } + } + +@@ -3393,6 +3389,7 @@ ixgbe_dev_clear_queues(struct rte_eth_dev *dev) + if (rxq != NULL) { + ixgbe_rx_queue_release_mbufs(rxq); + ixgbe_reset_rx_queue(adapter, rxq); ++ dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; + } + } + /* If loopback mode was enabled, reconfigure the link accordingly */ +@@ -5830,6 +5827,8 @@ ixgbevf_dev_rxtx_start(struct rte_eth_dev *dev) + } while (--poll_ms && !(txdctl & IXGBE_TXDCTL_ENABLE)); + if (!poll_ms) + PMD_INIT_LOG(ERR, "Could not enable Tx Queue %d", i); ++ else ++ dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; + } + for (i = 0; i < dev->data->nb_rx_queues; i++) { + +@@ -5847,6 +5846,8 @@ ixgbevf_dev_rxtx_start(struct rte_eth_dev *dev) + } while (--poll_ms && !(rxdctl & IXGBE_RXDCTL_ENABLE)); + if (!poll_ms) + PMD_INIT_LOG(ERR, "Could not enable Rx Queue %d", i); ++ else ++ dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; + rte_wmb(); + IXGBE_WRITE_REG(hw, IXGBE_VFRDT(i), rxq->nb_rx_desc - 1); + +diff --git a/dpdk/drivers/net/mana/gdma.c b/dpdk/drivers/net/mana/gdma.c +index 3d4039014f..65685fe236 100644 +--- a/dpdk/drivers/net/mana/gdma.c ++++ b/dpdk/drivers/net/mana/gdma.c +@@ -14,12 +14,12 @@ gdma_get_wqe_pointer(struct mana_gdma_queue *queue) + (queue->head * GDMA_WQE_ALIGNMENT_UNIT_SIZE) & + (queue->size - 1); + +- DRV_LOG(DEBUG, "txq sq_head %u sq_size %u offset_in_bytes %u", +- queue->head, queue->size, offset_in_bytes); ++ DP_LOG(DEBUG, "txq sq_head %u sq_size %u offset_in_bytes %u", ++ queue->head, queue->size, offset_in_bytes); + + if (offset_in_bytes + GDMA_WQE_ALIGNMENT_UNIT_SIZE > queue->size) +- DRV_LOG(ERR, "fatal error: offset_in_bytes %u too big", +- offset_in_bytes); ++ DP_LOG(ERR, "fatal error: offset_in_bytes %u too big", ++ offset_in_bytes); + + return ((uint8_t *)queue->buffer) + offset_in_bytes; + } +@@ -39,11 +39,11 @@ write_dma_client_oob(uint8_t *work_queue_buffer_pointer, + client_oob_size / sizeof(uint32_t); + header->client_data_unit = work_request->client_data_unit; + +- DRV_LOG(DEBUG, "queue buf %p sgl %u oob_h %u du %u oob_buf %p oob_b %u", +- work_queue_buffer_pointer, header->num_sgl_entries, +- header->inline_client_oob_size_in_dwords, +- header->client_data_unit, work_request->inline_oob_data, +- work_request->inline_oob_size_in_bytes); ++ DP_LOG(DEBUG, "queue buf %p sgl %u oob_h %u du %u oob_buf %p oob_b %u", ++ work_queue_buffer_pointer, header->num_sgl_entries, ++ header->inline_client_oob_size_in_dwords, ++ header->client_data_unit, work_request->inline_oob_data, ++ work_request->inline_oob_size_in_bytes); + + p += sizeof(struct gdma_wqe_dma_oob); + if (work_request->inline_oob_data && +@@ -73,8 +73,8 @@ write_scatter_gather_list(uint8_t *work_queue_head_pointer, + uint32_t size_to_queue_end; + uint32_t sge_list_size; + +- DRV_LOG(DEBUG, "work_queue_cur_pointer %p work_request->flags %x", +- work_queue_cur_pointer, work_request->flags); ++ DP_LOG(DEBUG, "work_queue_cur_pointer %p work_request->flags %x", ++ work_queue_cur_pointer, work_request->flags); + + num_sge = work_request->num_sgl_elements; + sge_list = work_request->sgl; +@@ -110,9 +110,9 @@ write_scatter_gather_list(uint8_t *work_queue_head_pointer, + sge_list_size = size; + } + +- DRV_LOG(DEBUG, "sge %u address 0x%" PRIx64 " size %u key %u list_s %u", +- num_sge, sge_list->address, sge_list->size, +- sge_list->memory_key, sge_list_size); ++ DP_LOG(DEBUG, "sge %u address 0x%" PRIx64 " size %u key %u list_s %u", ++ num_sge, sge_list->address, sge_list->size, ++ sge_list->memory_key, sge_list_size); + + return sge_list_size; + } +@@ -123,7 +123,7 @@ write_scatter_gather_list(uint8_t *work_queue_head_pointer, + int + gdma_post_work_request(struct mana_gdma_queue *queue, + struct gdma_work_request *work_req, +- struct gdma_posted_wqe_info *wqe_info) ++ uint32_t *wqe_size_in_bu) + { + uint32_t client_oob_size = + work_req->inline_oob_size_in_bytes > +@@ -141,22 +141,15 @@ gdma_post_work_request(struct mana_gdma_queue *queue, + uint32_t queue_free_units = queue->count - (queue->head - queue->tail); + + if (wqe_size / GDMA_WQE_ALIGNMENT_UNIT_SIZE > queue_free_units) { +- DRV_LOG(DEBUG, "WQE size %u queue count %u head %u tail %u", +- wqe_size, queue->count, queue->head, queue->tail); ++ DP_LOG(DEBUG, "WQE size %u queue count %u head %u tail %u", ++ wqe_size, queue->count, queue->head, queue->tail); + return -EBUSY; + } + +- DRV_LOG(DEBUG, "client_oob_size %u sgl_data_size %u wqe_size %u", +- client_oob_size, sgl_data_size, wqe_size); ++ DP_LOG(DEBUG, "client_oob_size %u sgl_data_size %u wqe_size %u", ++ client_oob_size, sgl_data_size, wqe_size); + +- if (wqe_info) { +- wqe_info->wqe_index = +- ((queue->head * GDMA_WQE_ALIGNMENT_UNIT_SIZE) & +- (queue->size - 1)) / GDMA_WQE_ALIGNMENT_UNIT_SIZE; +- wqe_info->unmasked_queue_offset = queue->head; +- wqe_info->wqe_size_in_bu = +- wqe_size / GDMA_WQE_ALIGNMENT_UNIT_SIZE; +- } ++ *wqe_size_in_bu = wqe_size / GDMA_WQE_ALIGNMENT_UNIT_SIZE; + + wq_buffer_pointer = gdma_get_wqe_pointer(queue); + wq_buffer_pointer += write_dma_client_oob(wq_buffer_pointer, work_req, +@@ -242,15 +235,15 @@ mana_ring_doorbell(void *db_page, enum gdma_queue_types queue_type, + break; + + default: +- DRV_LOG(ERR, "Unsupported queue type %d", queue_type); ++ DP_LOG(ERR, "Unsupported queue type %d", queue_type); + return -1; + } + + /* Ensure all writes are done before ringing doorbell */ + rte_wmb(); + +- DRV_LOG(DEBUG, "db_page %p addr %p queue_id %u type %u tail %u arm %u", +- db_page, addr, queue_id, queue_type, tail, arm); ++ DP_LOG(DEBUG, "db_page %p addr %p queue_id %u type %u tail %u arm %u", ++ db_page, addr, queue_id, queue_type, tail, arm); + + rte_write64(e.as_uint64, addr); + return 0; +@@ -259,45 +252,51 @@ mana_ring_doorbell(void *db_page, enum gdma_queue_types queue_type, + /* + * Poll completion queue for completions. + */ +-int +-gdma_poll_completion_queue(struct mana_gdma_queue *cq, struct gdma_comp *comp) ++uint32_t ++gdma_poll_completion_queue(struct mana_gdma_queue *cq, ++ struct gdma_comp *gdma_comp, uint32_t max_comp) + { + struct gdma_hardware_completion_entry *cqe; +- uint32_t head = cq->head % cq->count; + uint32_t new_owner_bits, old_owner_bits; + uint32_t cqe_owner_bits; ++ uint32_t num_comp = 0; + struct gdma_hardware_completion_entry *buffer = cq->buffer; + +- cqe = &buffer[head]; +- new_owner_bits = (cq->head / cq->count) & COMPLETION_QUEUE_OWNER_MASK; +- old_owner_bits = (cq->head / cq->count - 1) & +- COMPLETION_QUEUE_OWNER_MASK; +- cqe_owner_bits = cqe->owner_bits; ++ while (num_comp < max_comp) { ++ cqe = &buffer[cq->head % cq->count]; ++ new_owner_bits = (cq->head / cq->count) & ++ COMPLETION_QUEUE_OWNER_MASK; ++ old_owner_bits = (cq->head / cq->count - 1) & ++ COMPLETION_QUEUE_OWNER_MASK; ++ cqe_owner_bits = cqe->owner_bits; ++ ++ DP_LOG(DEBUG, "comp cqe bits 0x%x owner bits 0x%x", ++ cqe_owner_bits, old_owner_bits); ++ ++ /* No new entry */ ++ if (cqe_owner_bits == old_owner_bits) ++ break; ++ ++ if (cqe_owner_bits != new_owner_bits) { ++ DRV_LOG(ERR, "CQ overflowed, ID %u cqe 0x%x new 0x%x", ++ cq->id, cqe_owner_bits, new_owner_bits); ++ break; ++ } + +- DRV_LOG(DEBUG, "comp cqe bits 0x%x owner bits 0x%x", +- cqe_owner_bits, old_owner_bits); ++ gdma_comp[num_comp].cqe_data = cqe->dma_client_data; ++ num_comp++; + +- if (cqe_owner_bits == old_owner_bits) +- return 0; /* No new entry */ ++ cq->head++; + +- if (cqe_owner_bits != new_owner_bits) { +- DRV_LOG(ERR, "CQ overflowed, ID %u cqe 0x%x new 0x%x", +- cq->id, cqe_owner_bits, new_owner_bits); +- return -1; ++ DP_LOG(DEBUG, "comp new 0x%x old 0x%x cqe 0x%x wq %u sq %u head %u", ++ new_owner_bits, old_owner_bits, cqe_owner_bits, ++ cqe->wq_num, cqe->is_sq, cq->head); + } + +- /* Ensure checking owner bits happens before reading from CQE */ ++ /* Make sure the CQE owner bits are checked before we access the data ++ * in CQE ++ */ + rte_rmb(); + +- comp->work_queue_number = cqe->wq_num; +- comp->send_work_queue = cqe->is_sq; +- +- memcpy(comp->completion_data, cqe->dma_client_data, GDMA_COMP_DATA_SIZE); +- +- cq->head++; +- +- DRV_LOG(DEBUG, "comp new 0x%x old 0x%x cqe 0x%x wq %u sq %u head %u", +- new_owner_bits, old_owner_bits, cqe_owner_bits, +- comp->work_queue_number, comp->send_work_queue, cq->head); +- return 1; ++ return num_comp; + } +diff --git a/dpdk/drivers/net/mana/mana.c b/dpdk/drivers/net/mana/mana.c +index 43221e743e..7630118d4f 100644 +--- a/dpdk/drivers/net/mana/mana.c ++++ b/dpdk/drivers/net/mana/mana.c +@@ -487,6 +487,15 @@ mana_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, + goto fail; + } + ++ txq->gdma_comp_buf = rte_malloc_socket("mana_txq_comp", ++ sizeof(*txq->gdma_comp_buf) * nb_desc, ++ RTE_CACHE_LINE_SIZE, socket_id); ++ if (!txq->gdma_comp_buf) { ++ DRV_LOG(ERR, "failed to allocate txq comp"); ++ ret = -ENOMEM; ++ goto fail; ++ } ++ + ret = mana_mr_btree_init(&txq->mr_btree, + MANA_MR_BTREE_PER_QUEUE_N, socket_id); + if (ret) { +@@ -506,6 +515,7 @@ mana_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, + return 0; + + fail: ++ rte_free(txq->gdma_comp_buf); + rte_free(txq->desc_ring); + rte_free(txq); + return ret; +@@ -518,6 +528,7 @@ mana_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid) + + mana_mr_btree_free(&txq->mr_btree); + ++ rte_free(txq->gdma_comp_buf); + rte_free(txq->desc_ring); + rte_free(txq); + } +@@ -557,6 +568,15 @@ mana_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, + rxq->desc_ring_head = 0; + rxq->desc_ring_tail = 0; + ++ rxq->gdma_comp_buf = rte_malloc_socket("mana_rxq_comp", ++ sizeof(*rxq->gdma_comp_buf) * nb_desc, ++ RTE_CACHE_LINE_SIZE, socket_id); ++ if (!rxq->gdma_comp_buf) { ++ DRV_LOG(ERR, "failed to allocate rxq comp"); ++ ret = -ENOMEM; ++ goto fail; ++ } ++ + ret = mana_mr_btree_init(&rxq->mr_btree, + MANA_MR_BTREE_PER_QUEUE_N, socket_id); + if (ret) { +@@ -572,6 +592,7 @@ mana_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, + return 0; + + fail: ++ rte_free(rxq->gdma_comp_buf); + rte_free(rxq->desc_ring); + rte_free(rxq); + return ret; +@@ -584,6 +605,7 @@ mana_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid) + + mana_mr_btree_free(&rxq->mr_btree); + ++ rte_free(rxq->gdma_comp_buf); + rte_free(rxq->desc_ring); + rte_free(rxq); + } +@@ -616,9 +638,9 @@ mana_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) + if (!txq) + continue; + +- stats->opackets = txq->stats.packets; +- stats->obytes = txq->stats.bytes; +- stats->oerrors = txq->stats.errors; ++ stats->opackets += txq->stats.packets; ++ stats->obytes += txq->stats.bytes; ++ stats->oerrors += txq->stats.errors; + + if (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) { + stats->q_opackets[i] = txq->stats.packets; +@@ -633,9 +655,9 @@ mana_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) + if (!rxq) + continue; + +- stats->ipackets = rxq->stats.packets; +- stats->ibytes = rxq->stats.bytes; +- stats->ierrors = rxq->stats.errors; ++ stats->ipackets += rxq->stats.packets; ++ stats->ibytes += rxq->stats.bytes; ++ stats->ierrors += rxq->stats.errors; + + /* There is no good way to get stats->imissed, not setting it */ + +@@ -1321,6 +1343,7 @@ failed: + /* + * Goes through the IB device list to look for the IB port matching the + * mac_addr. If found, create a rte_eth_dev for it. ++ * Return value: number of successfully probed devices + */ + static int + mana_pci_probe_mac(struct rte_pci_device *pci_dev, +@@ -1330,8 +1353,9 @@ mana_pci_probe_mac(struct rte_pci_device *pci_dev, + int ibv_idx; + struct ibv_context *ctx; + int num_devices; +- int ret = 0; ++ int ret; + uint8_t port; ++ int count = 0; + + ibv_list = ibv_get_device_list(&num_devices); + for (ibv_idx = 0; ibv_idx < num_devices; ibv_idx++) { +@@ -1361,6 +1385,12 @@ mana_pci_probe_mac(struct rte_pci_device *pci_dev, + ret = ibv_query_device_ex(ctx, NULL, &dev_attr); + ibv_close_device(ctx); + ++ if (ret) { ++ DRV_LOG(ERR, "Failed to query IB device %s", ++ ibdev->name); ++ continue; ++ } ++ + for (port = 1; port <= dev_attr.orig_attr.phys_port_cnt; + port++) { + struct rte_ether_addr addr; +@@ -1372,15 +1402,17 @@ mana_pci_probe_mac(struct rte_pci_device *pci_dev, + continue; + + ret = mana_probe_port(ibdev, &dev_attr, port, pci_dev, &addr); +- if (ret) ++ if (ret) { + DRV_LOG(ERR, "Probe on IB port %u failed %d", port, ret); +- else ++ } else { ++ count++; + DRV_LOG(INFO, "Successfully probed on IB port %u", port); ++ } + } + } + + ibv_free_device_list(ibv_list); +- return ret; ++ return count; + } + + /* +@@ -1394,6 +1426,7 @@ mana_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, + struct mana_conf conf = {0}; + unsigned int i; + int ret; ++ int count = 0; + + if (args && args->drv_str) { + ret = mana_parse_args(args, &conf); +@@ -1411,16 +1444,21 @@ mana_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, + } + + /* If there are no driver parameters, probe on all ports */ +- if (!conf.index) +- return mana_pci_probe_mac(pci_dev, NULL); ++ if (conf.index) { ++ for (i = 0; i < conf.index; i++) ++ count += mana_pci_probe_mac(pci_dev, ++ &conf.mac_array[i]); ++ } else { ++ count = mana_pci_probe_mac(pci_dev, NULL); ++ } + +- for (i = 0; i < conf.index; i++) { +- ret = mana_pci_probe_mac(pci_dev, &conf.mac_array[i]); +- if (ret) +- return ret; ++ if (!count) { ++ rte_memzone_free(mana_shared_mz); ++ mana_shared_mz = NULL; ++ ret = -ENODEV; + } + +- return 0; ++ return ret; + } + + static int +@@ -1453,6 +1491,7 @@ mana_pci_remove(struct rte_pci_device *pci_dev) + if (!mana_shared_data->primary_cnt) { + DRV_LOG(DEBUG, "free shared memezone data"); + rte_memzone_free(mana_shared_mz); ++ mana_shared_mz = NULL; + } + + rte_spinlock_unlock(&mana_shared_data_lock); +diff --git a/dpdk/drivers/net/mana/mana.h b/dpdk/drivers/net/mana/mana.h +index 4a05238a96..7dfacd57f3 100644 +--- a/dpdk/drivers/net/mana/mana.h ++++ b/dpdk/drivers/net/mana/mana.h +@@ -142,19 +142,6 @@ struct gdma_header { + #define COMPLETION_QUEUE_OWNER_MASK \ + ((1 << (COMPLETION_QUEUE_ENTRY_OWNER_BITS_SIZE)) - 1) + +-struct gdma_comp { +- struct gdma_header gdma_header; +- +- /* Filled by GDMA core */ +- uint32_t completion_data[GDMA_COMP_DATA_SIZE_IN_UINT32]; +- +- /* Filled by GDMA core */ +- uint32_t work_queue_number; +- +- /* Filled by GDMA core */ +- bool send_work_queue; +-}; +- + struct gdma_hardware_completion_entry { + char dma_client_data[GDMA_COMP_DATA_SIZE]; + union { +@@ -391,6 +378,11 @@ struct mana_gdma_queue { + + #define MANA_MR_BTREE_PER_QUEUE_N 64 + ++struct gdma_comp { ++ /* Filled by GDMA core */ ++ char *cqe_data; ++}; ++ + struct mana_txq { + struct mana_priv *priv; + uint32_t num_desc; +@@ -399,6 +391,7 @@ struct mana_txq { + + struct mana_gdma_queue gdma_sq; + struct mana_gdma_queue gdma_cq; ++ struct gdma_comp *gdma_comp_buf; + + uint32_t tx_vp_offset; + +@@ -433,6 +426,7 @@ struct mana_rxq { + + struct mana_gdma_queue gdma_rq; + struct mana_gdma_queue gdma_cq; ++ struct gdma_comp *gdma_comp_buf; + + struct mana_stats stats; + struct mana_mr_btree mr_btree; +@@ -447,6 +441,9 @@ extern int mana_logtype_init; + rte_log(RTE_LOG_ ## level, mana_logtype_driver, "%s(): " fmt "\n", \ + __func__, ## args) + ++#define DP_LOG(level, fmt, args...) \ ++ RTE_LOG_DP(level, PMD, fmt, ## args) ++ + #define PMD_INIT_LOG(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, mana_logtype_init, "%s(): " fmt "\n",\ + __func__, ## args) +@@ -455,11 +452,11 @@ extern int mana_logtype_init; + + int mana_ring_doorbell(void *db_page, enum gdma_queue_types queue_type, + uint32_t queue_id, uint32_t tail, uint8_t arm); +-int mana_rq_ring_doorbell(struct mana_rxq *rxq, uint8_t arm); ++int mana_rq_ring_doorbell(struct mana_rxq *rxq); + + int gdma_post_work_request(struct mana_gdma_queue *queue, + struct gdma_work_request *work_req, +- struct gdma_posted_wqe_info *wqe_info); ++ uint32_t *wqe_size_in_bu); + uint8_t *gdma_get_wqe_pointer(struct mana_gdma_queue *queue); + + uint16_t mana_rx_burst(void *dpdk_rxq, struct rte_mbuf **rx_pkts, +@@ -473,8 +470,9 @@ uint16_t mana_rx_burst_removed(void *dpdk_rxq, struct rte_mbuf **pkts, + uint16_t mana_tx_burst_removed(void *dpdk_rxq, struct rte_mbuf **pkts, + uint16_t pkts_n); + +-int gdma_poll_completion_queue(struct mana_gdma_queue *cq, +- struct gdma_comp *comp); ++uint32_t gdma_poll_completion_queue(struct mana_gdma_queue *cq, ++ struct gdma_comp *gdma_comp, ++ uint32_t max_comp); + + int mana_start_rx_queues(struct rte_eth_dev *dev); + int mana_start_tx_queues(struct rte_eth_dev *dev); +diff --git a/dpdk/drivers/net/mana/mp.c b/dpdk/drivers/net/mana/mp.c +index 92432c431d..738487f65a 100644 +--- a/dpdk/drivers/net/mana/mp.c ++++ b/dpdk/drivers/net/mana/mp.c +@@ -28,8 +28,8 @@ mana_mp_mr_create(struct mana_priv *priv, uintptr_t addr, uint32_t len) + if (!ibv_mr) + return -errno; + +- DRV_LOG(DEBUG, "MR (2nd) lkey %u addr %p len %zu", +- ibv_mr->lkey, ibv_mr->addr, ibv_mr->length); ++ DP_LOG(DEBUG, "MR (2nd) lkey %u addr %p len %zu", ++ ibv_mr->lkey, ibv_mr->addr, ibv_mr->length); + + mr = rte_calloc("MANA MR", 1, sizeof(*mr), 0); + if (!mr) { +diff --git a/dpdk/drivers/net/mana/mr.c b/dpdk/drivers/net/mana/mr.c +index 22df0917bb..fec0dc961c 100644 +--- a/dpdk/drivers/net/mana/mr.c ++++ b/dpdk/drivers/net/mana/mr.c +@@ -47,23 +47,23 @@ mana_new_pmd_mr(struct mana_mr_btree *local_tree, struct mana_priv *priv, + + for (i = 0; i < pool->nb_mem_chunks; i++) { + if (ranges[i].len > priv->max_mr_size) { +- DRV_LOG(ERR, "memory chunk size %u exceeding max MR", +- ranges[i].len); ++ DP_LOG(ERR, "memory chunk size %u exceeding max MR", ++ ranges[i].len); + return -ENOMEM; + } + +- DRV_LOG(DEBUG, +- "registering memory chunk start 0x%" PRIx64 " len %u", +- ranges[i].start, ranges[i].len); ++ DP_LOG(DEBUG, ++ "registering memory chunk start 0x%" PRIx64 " len %u", ++ ranges[i].start, ranges[i].len); + + if (rte_eal_process_type() == RTE_PROC_SECONDARY) { + /* Send a message to the primary to do MR */ + ret = mana_mp_req_mr_create(priv, ranges[i].start, + ranges[i].len); + if (ret) { +- DRV_LOG(ERR, +- "MR failed start 0x%" PRIx64 " len %u", +- ranges[i].start, ranges[i].len); ++ DP_LOG(ERR, ++ "MR failed start 0x%" PRIx64 " len %u", ++ ranges[i].start, ranges[i].len); + return ret; + } + continue; +@@ -72,8 +72,8 @@ mana_new_pmd_mr(struct mana_mr_btree *local_tree, struct mana_priv *priv, + ibv_mr = ibv_reg_mr(priv->ib_pd, (void *)ranges[i].start, + ranges[i].len, IBV_ACCESS_LOCAL_WRITE); + if (ibv_mr) { +- DRV_LOG(DEBUG, "MR lkey %u addr %p len %" PRIu64, +- ibv_mr->lkey, ibv_mr->addr, ibv_mr->length); ++ DP_LOG(DEBUG, "MR lkey %u addr %p len %" PRIu64, ++ ibv_mr->lkey, ibv_mr->addr, ibv_mr->length); + + mr = rte_calloc("MANA MR", 1, sizeof(*mr), 0); + mr->lkey = ibv_mr->lkey; +@@ -86,7 +86,7 @@ mana_new_pmd_mr(struct mana_mr_btree *local_tree, struct mana_priv *priv, + rte_spinlock_unlock(&priv->mr_btree_lock); + if (ret) { + ibv_dereg_mr(ibv_mr); +- DRV_LOG(ERR, "Failed to add to global MR btree"); ++ DP_LOG(ERR, "Failed to add to global MR btree"); + return ret; + } + +@@ -95,12 +95,12 @@ mana_new_pmd_mr(struct mana_mr_btree *local_tree, struct mana_priv *priv, + /* Don't need to clean up MR as it's already + * in the global tree + */ +- DRV_LOG(ERR, "Failed to add to local MR btree"); ++ DP_LOG(ERR, "Failed to add to local MR btree"); + return ret; + } + } else { +- DRV_LOG(ERR, "MR failed at 0x%" PRIx64 " len %u", +- ranges[i].start, ranges[i].len); ++ DP_LOG(ERR, "MR failed at 0x%" PRIx64 " len %u", ++ ranges[i].start, ranges[i].len); + return -errno; + } + } +@@ -118,7 +118,7 @@ mana_del_pmd_mr(struct mana_mr_cache *mr) + + ret = ibv_dereg_mr(ibv_mr); + if (ret) +- DRV_LOG(ERR, "dereg MR failed ret %d", ret); ++ DP_LOG(ERR, "dereg MR failed ret %d", ret); + } + + /* +@@ -133,17 +133,16 @@ mana_find_pmd_mr(struct mana_mr_btree *local_mr_btree, struct mana_priv *priv, + struct mana_mr_cache *mr; + uint16_t idx; + +- DRV_LOG(DEBUG, "finding mr for mbuf addr %p len %d", +- mbuf->buf_addr, mbuf->buf_len); ++ DP_LOG(DEBUG, "finding mr for mbuf addr %p len %d", ++ mbuf->buf_addr, mbuf->buf_len); + + try_again: + /* First try to find the MR in local queue tree */ + mr = mana_mr_btree_lookup(local_mr_btree, &idx, + (uintptr_t)mbuf->buf_addr, mbuf->buf_len); + if (mr) { +- DRV_LOG(DEBUG, +- "Local mr lkey %u addr 0x%" PRIx64 " len %" PRIu64, +- mr->lkey, mr->addr, mr->len); ++ DP_LOG(DEBUG, "Local mr lkey %u addr 0x%" PRIx64 " len %" PRIu64, ++ mr->lkey, mr->addr, mr->len); + return mr; + } + +@@ -158,25 +157,25 @@ try_again: + if (mr) { + ret = mana_mr_btree_insert(local_mr_btree, mr); + if (ret) { +- DRV_LOG(DEBUG, "Failed to add MR to local tree."); ++ DP_LOG(ERR, "Failed to add MR to local tree."); + return NULL; + } + +- DRV_LOG(DEBUG, +- "Added local MR key %u addr 0x%" PRIx64 " len %" PRIu64, +- mr->lkey, mr->addr, mr->len); ++ DP_LOG(DEBUG, ++ "Added local MR key %u addr 0x%" PRIx64 " len %" PRIu64, ++ mr->lkey, mr->addr, mr->len); + return mr; + } + + if (second_try) { +- DRV_LOG(ERR, "Internal error second try failed"); ++ DP_LOG(ERR, "Internal error second try failed"); + return NULL; + } + + ret = mana_new_pmd_mr(local_mr_btree, priv, pool); + if (ret) { +- DRV_LOG(ERR, "Failed to allocate MR ret %d addr %p len %d", +- ret, mbuf->buf_addr, mbuf->buf_len); ++ DP_LOG(ERR, "Failed to allocate MR ret %d addr %p len %d", ++ ret, mbuf->buf_addr, mbuf->buf_len); + return NULL; + } + +@@ -215,11 +214,11 @@ mana_mr_btree_expand(struct mana_mr_btree *bt, int n) + mem = rte_realloc_socket(bt->table, n * sizeof(struct mana_mr_cache), + 0, bt->socket); + if (!mem) { +- DRV_LOG(ERR, "Failed to expand btree size %d", n); ++ DP_LOG(ERR, "Failed to expand btree size %d", n); + return -1; + } + +- DRV_LOG(ERR, "Expanded btree to size %d", n); ++ DP_LOG(ERR, "Expanded btree to size %d", n); + bt->table = mem; + bt->size = n; + +@@ -266,9 +265,9 @@ mana_mr_btree_lookup(struct mana_mr_btree *bt, uint16_t *idx, + if (addr + len <= table[base].addr + table[base].len) + return &table[base]; + +- DRV_LOG(DEBUG, +- "addr 0x%" PRIx64 " len %zu idx %u sum 0x%" PRIx64 " not found", +- addr, len, *idx, addr + len); ++ DP_LOG(DEBUG, ++ "addr 0x%" PRIx64 " len %zu idx %u sum 0x%" PRIx64 " not found", ++ addr, len, *idx, addr + len); + + return NULL; + } +@@ -317,8 +316,8 @@ mana_mr_btree_insert(struct mana_mr_btree *bt, struct mana_mr_cache *entry) + uint16_t shift; + + if (mana_mr_btree_lookup(bt, &idx, entry->addr, entry->len)) { +- DRV_LOG(DEBUG, "Addr 0x%" PRIx64 " len %zu exists in btree", +- entry->addr, entry->len); ++ DP_LOG(DEBUG, "Addr 0x%" PRIx64 " len %zu exists in btree", ++ entry->addr, entry->len); + return 0; + } + +@@ -332,17 +331,17 @@ mana_mr_btree_insert(struct mana_mr_btree *bt, struct mana_mr_cache *entry) + idx++; + shift = (bt->len - idx) * sizeof(struct mana_mr_cache); + if (shift) { +- DRV_LOG(DEBUG, "Moving %u bytes from idx %u to %u", +- shift, idx, idx + 1); ++ DP_LOG(DEBUG, "Moving %u bytes from idx %u to %u", ++ shift, idx, idx + 1); + memmove(&table[idx + 1], &table[idx], shift); + } + + table[idx] = *entry; + bt->len++; + +- DRV_LOG(DEBUG, +- "Inserted MR b-tree table %p idx %d addr 0x%" PRIx64 " len %zu", +- table, idx, entry->addr, entry->len); ++ DP_LOG(DEBUG, ++ "Inserted MR b-tree table %p idx %d addr 0x%" PRIx64 " len %zu", ++ table, idx, entry->addr, entry->len); + + return 0; + } +diff --git a/dpdk/drivers/net/mana/rx.c b/dpdk/drivers/net/mana/rx.c +index 55247889c1..fdb56ce05d 100644 +--- a/dpdk/drivers/net/mana/rx.c ++++ b/dpdk/drivers/net/mana/rx.c +@@ -22,7 +22,7 @@ static uint8_t mana_rss_hash_key_default[TOEPLITZ_HASH_KEY_SIZE_IN_BYTES] = { + }; + + int +-mana_rq_ring_doorbell(struct mana_rxq *rxq, uint8_t arm) ++mana_rq_ring_doorbell(struct mana_rxq *rxq) + { + struct mana_priv *priv = rxq->priv; + int ret; +@@ -36,13 +36,16 @@ mana_rq_ring_doorbell(struct mana_rxq *rxq, uint8_t arm) + db_page = process_priv->db_page; + } + ++ /* Hardware Spec specifies that software client should set 0 for ++ * wqe_cnt for Receive Queues. ++ */ + ret = mana_ring_doorbell(db_page, GDMA_QUEUE_RECEIVE, + rxq->gdma_rq.id, + rxq->gdma_rq.head * GDMA_WQE_ALIGNMENT_UNIT_SIZE, +- arm); ++ 0); + + if (ret) +- DRV_LOG(ERR, "failed to ring RX doorbell ret %d", ret); ++ DP_LOG(ERR, "failed to ring RX doorbell ret %d", ret); + + return ret; + } +@@ -52,8 +55,8 @@ mana_alloc_and_post_rx_wqe(struct mana_rxq *rxq) + { + struct rte_mbuf *mbuf = NULL; + struct gdma_sgl_element sgl[1]; +- struct gdma_work_request request = {0}; +- struct gdma_posted_wqe_info wqe_info = {0}; ++ struct gdma_work_request request; ++ uint32_t wqe_size_in_bu; + struct mana_priv *priv = rxq->priv; + int ret; + struct mana_mr_cache *mr; +@@ -66,13 +69,12 @@ mana_alloc_and_post_rx_wqe(struct mana_rxq *rxq) + + mr = mana_find_pmd_mr(&rxq->mr_btree, priv, mbuf); + if (!mr) { +- DRV_LOG(ERR, "failed to register RX MR"); ++ DP_LOG(ERR, "failed to register RX MR"); + rte_pktmbuf_free(mbuf); + return -ENOMEM; + } + + request.gdma_header.struct_size = sizeof(request); +- wqe_info.gdma_header.struct_size = sizeof(wqe_info); + + sgl[0].address = rte_cpu_to_le_64(rte_pktmbuf_mtod(mbuf, uint64_t)); + sgl[0].memory_key = mr->lkey; +@@ -87,17 +89,17 @@ mana_alloc_and_post_rx_wqe(struct mana_rxq *rxq) + request.flags = 0; + request.client_data_unit = NOT_USING_CLIENT_DATA_UNIT; + +- ret = gdma_post_work_request(&rxq->gdma_rq, &request, &wqe_info); ++ ret = gdma_post_work_request(&rxq->gdma_rq, &request, &wqe_size_in_bu); + if (!ret) { + struct mana_rxq_desc *desc = + &rxq->desc_ring[rxq->desc_ring_head]; + + /* update queue for tracking pending packets */ + desc->pkt = mbuf; +- desc->wqe_size_in_bu = wqe_info.wqe_size_in_bu; ++ desc->wqe_size_in_bu = wqe_size_in_bu; + rxq->desc_ring_head = (rxq->desc_ring_head + 1) % rxq->num_desc; + } else { +- DRV_LOG(ERR, "failed to post recv ret %d", ret); ++ DP_LOG(DEBUG, "failed to post recv ret %d", ret); + return ret; + } + +@@ -116,12 +118,12 @@ mana_alloc_and_post_rx_wqes(struct mana_rxq *rxq) + for (i = 0; i < rxq->num_desc; i++) { + ret = mana_alloc_and_post_rx_wqe(rxq); + if (ret) { +- DRV_LOG(ERR, "failed to post RX ret = %d", ret); ++ DP_LOG(ERR, "failed to post RX ret = %d", ret); + return ret; + } + } + +- mana_rq_ring_doorbell(rxq, rxq->num_desc); ++ mana_rq_ring_doorbell(rxq); + + return ret; + } +@@ -381,27 +383,20 @@ uint16_t + mana_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) + { + uint16_t pkt_received = 0; +- uint8_t wqe_posted = 0; ++ uint16_t wqe_posted = 0; + struct mana_rxq *rxq = dpdk_rxq; + struct mana_priv *priv = rxq->priv; +- struct gdma_comp comp; + struct rte_mbuf *mbuf; + int ret; ++ uint32_t num_pkts; + +- while (pkt_received < pkts_n && +- gdma_poll_completion_queue(&rxq->gdma_cq, &comp) == 1) { +- struct mana_rxq_desc *desc; +- struct mana_rx_comp_oob *oob = +- (struct mana_rx_comp_oob *)&comp.completion_data[0]; +- +- if (comp.work_queue_number != rxq->gdma_rq.id) { +- DRV_LOG(ERR, "rxq comp id mismatch wqid=0x%x rcid=0x%x", +- comp.work_queue_number, rxq->gdma_rq.id); +- rxq->stats.errors++; +- break; +- } ++ num_pkts = gdma_poll_completion_queue(&rxq->gdma_cq, rxq->gdma_comp_buf, pkts_n); ++ for (uint32_t i = 0; i < num_pkts; i++) { ++ struct mana_rx_comp_oob *oob = (struct mana_rx_comp_oob *) ++ rxq->gdma_comp_buf[i].cqe_data; ++ struct mana_rxq_desc *desc = ++ &rxq->desc_ring[rxq->desc_ring_tail]; + +- desc = &rxq->desc_ring[rxq->desc_ring_tail]; + rxq->gdma_rq.tail += desc->wqe_size_in_bu; + mbuf = desc->pkt; + +@@ -411,22 +406,22 @@ mana_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) + break; + + case CQE_RX_TRUNCATED: +- DRV_LOG(ERR, "Drop a truncated packet"); ++ DP_LOG(DEBUG, "Drop a truncated packet"); + rxq->stats.errors++; + rte_pktmbuf_free(mbuf); + goto drop; + + case CQE_RX_COALESCED_4: +- DRV_LOG(ERR, "RX coalescing is not supported"); ++ DP_LOG(ERR, "RX coalescing is not supported"); + continue; + + default: +- DRV_LOG(ERR, "Unknown RX CQE type %d", +- oob->cqe_hdr.cqe_type); ++ DP_LOG(ERR, "Unknown RX CQE type %d", ++ oob->cqe_hdr.cqe_type); + continue; + } + +- DRV_LOG(DEBUG, "mana_rx_comp_oob CQE_RX_OKAY rxq %p", rxq); ++ DP_LOG(DEBUG, "mana_rx_comp_oob CQE_RX_OKAY rxq %p", rxq); + + mbuf->data_off = RTE_PKTMBUF_HEADROOM; + mbuf->nb_segs = 1; +@@ -470,7 +465,7 @@ drop: + /* Post another request */ + ret = mana_alloc_and_post_rx_wqe(rxq); + if (ret) { +- DRV_LOG(ERR, "failed to post rx wqe ret=%d", ret); ++ DP_LOG(ERR, "failed to post rx wqe ret=%d", ret); + break; + } + +@@ -478,7 +473,7 @@ drop: + } + + if (wqe_posted) +- mana_rq_ring_doorbell(rxq, wqe_posted); ++ mana_rq_ring_doorbell(rxq); + + return pkt_received; + } +@@ -490,8 +485,8 @@ mana_arm_cq(struct mana_rxq *rxq, uint8_t arm) + uint32_t head = rxq->gdma_cq.head % + (rxq->gdma_cq.count << COMPLETION_QUEUE_ENTRY_OWNER_BITS_SIZE); + +- DRV_LOG(ERR, "Ringing completion queue ID %u head %u arm %d", +- rxq->gdma_cq.id, head, arm); ++ DP_LOG(DEBUG, "Ringing completion queue ID %u head %u arm %d", ++ rxq->gdma_cq.id, head, arm); + + return mana_ring_doorbell(priv->db_page, GDMA_QUEUE_COMPLETION, + rxq->gdma_cq.id, head, arm); +@@ -521,8 +516,8 @@ mana_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id) + + if (ret) { + if (ret != EAGAIN) +- DRV_LOG(ERR, "Can't disable RX intr queue %d", +- rx_queue_id); ++ DP_LOG(ERR, "Can't disable RX intr queue %d", ++ rx_queue_id); + } else { + ibv_ack_cq_events(rxq->cq, 1); + } +diff --git a/dpdk/drivers/net/mana/tx.c b/dpdk/drivers/net/mana/tx.c +index 300bf27cc1..39cc59550e 100644 +--- a/dpdk/drivers/net/mana/tx.c ++++ b/dpdk/drivers/net/mana/tx.c +@@ -170,32 +170,35 @@ mana_tx_burst(void *dpdk_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) + { + struct mana_txq *txq = dpdk_txq; + struct mana_priv *priv = txq->priv; +- struct gdma_comp comp; + int ret; + void *db_page; + uint16_t pkt_sent = 0; ++ uint32_t num_comp; + + /* Process send completions from GDMA */ +- while (gdma_poll_completion_queue(&txq->gdma_cq, &comp) == 1) { ++ num_comp = gdma_poll_completion_queue(&txq->gdma_cq, ++ txq->gdma_comp_buf, txq->num_desc); ++ ++ for (uint32_t i = 0; i < num_comp; i++) { + struct mana_txq_desc *desc = + &txq->desc_ring[txq->desc_ring_tail]; +- struct mana_tx_comp_oob *oob = +- (struct mana_tx_comp_oob *)&comp.completion_data[0]; ++ struct mana_tx_comp_oob *oob = (struct mana_tx_comp_oob *) ++ txq->gdma_comp_buf[i].cqe_data; + + if (oob->cqe_hdr.cqe_type != CQE_TX_OKAY) { +- DRV_LOG(ERR, +- "mana_tx_comp_oob cqe_type %u vendor_err %u", +- oob->cqe_hdr.cqe_type, oob->cqe_hdr.vendor_err); ++ DP_LOG(ERR, ++ "mana_tx_comp_oob cqe_type %u vendor_err %u", ++ oob->cqe_hdr.cqe_type, oob->cqe_hdr.vendor_err); + txq->stats.errors++; + } else { +- DRV_LOG(DEBUG, "mana_tx_comp_oob CQE_TX_OKAY"); ++ DP_LOG(DEBUG, "mana_tx_comp_oob CQE_TX_OKAY"); + txq->stats.packets++; + } + + if (!desc->pkt) { +- DRV_LOG(ERR, "mana_txq_desc has a NULL pkt"); ++ DP_LOG(ERR, "mana_txq_desc has a NULL pkt"); + } else { +- txq->stats.bytes += desc->pkt->data_len; ++ txq->stats.bytes += desc->pkt->pkt_len; + rte_pktmbuf_free(desc->pkt); + } + +@@ -208,14 +211,14 @@ mana_tx_burst(void *dpdk_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) + for (uint16_t pkt_idx = 0; pkt_idx < nb_pkts; pkt_idx++) { + struct rte_mbuf *m_pkt = tx_pkts[pkt_idx]; + struct rte_mbuf *m_seg = m_pkt; +- struct transmit_oob_v2 tx_oob = {0}; +- struct one_sgl sgl = {0}; ++ struct transmit_oob_v2 tx_oob; ++ struct one_sgl sgl; + uint16_t seg_idx; + + /* Drop the packet if it exceeds max segments */ + if (m_pkt->nb_segs > priv->max_send_sge) { +- DRV_LOG(ERR, "send packet segments %d exceeding max", +- m_pkt->nb_segs); ++ DP_LOG(ERR, "send packet segments %d exceeding max", ++ m_pkt->nb_segs); + continue; + } + +@@ -257,12 +260,14 @@ mana_tx_burst(void *dpdk_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) + tcp_hdr->cksum = rte_ipv6_phdr_cksum(ip_hdr, + m_pkt->ol_flags); + } else { +- DRV_LOG(ERR, "Invalid input for TCP CKSUM"); ++ DP_LOG(ERR, "Invalid input for TCP CKSUM"); + } + + tx_oob.short_oob.tx_compute_TCP_checksum = 1; + tx_oob.short_oob.tx_transport_header_offset = + m_pkt->l2_len + m_pkt->l3_len; ++ } else { ++ tx_oob.short_oob.tx_compute_TCP_checksum = 0; + } + + if ((m_pkt->ol_flags & RTE_MBUF_F_TX_L4_MASK) == +@@ -297,10 +302,12 @@ mana_tx_burst(void *dpdk_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) + m_pkt->ol_flags); + + } else { +- DRV_LOG(ERR, "Invalid input for UDP CKSUM"); ++ DP_LOG(ERR, "Invalid input for UDP CKSUM"); + } + + tx_oob.short_oob.tx_compute_UDP_checksum = 1; ++ } else { ++ tx_oob.short_oob.tx_compute_UDP_checksum = 0; + } + + tx_oob.short_oob.suppress_tx_CQE_generation = 0; +@@ -310,20 +317,20 @@ mana_tx_burst(void *dpdk_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) + get_vsq_frame_num(txq->gdma_sq.id); + tx_oob.short_oob.short_vport_offset = txq->tx_vp_offset; + +- DRV_LOG(DEBUG, "tx_oob packet_format %u ipv4 %u ipv6 %u", +- tx_oob.short_oob.packet_format, +- tx_oob.short_oob.tx_is_outer_ipv4, +- tx_oob.short_oob.tx_is_outer_ipv6); ++ DP_LOG(DEBUG, "tx_oob packet_format %u ipv4 %u ipv6 %u", ++ tx_oob.short_oob.packet_format, ++ tx_oob.short_oob.tx_is_outer_ipv4, ++ tx_oob.short_oob.tx_is_outer_ipv6); + +- DRV_LOG(DEBUG, "tx_oob checksum ip %u tcp %u udp %u offset %u", +- tx_oob.short_oob.tx_compute_IP_header_checksum, +- tx_oob.short_oob.tx_compute_TCP_checksum, +- tx_oob.short_oob.tx_compute_UDP_checksum, +- tx_oob.short_oob.tx_transport_header_offset); ++ DP_LOG(DEBUG, "tx_oob checksum ip %u tcp %u udp %u offset %u", ++ tx_oob.short_oob.tx_compute_IP_header_checksum, ++ tx_oob.short_oob.tx_compute_TCP_checksum, ++ tx_oob.short_oob.tx_compute_UDP_checksum, ++ tx_oob.short_oob.tx_transport_header_offset); + +- DRV_LOG(DEBUG, "pkt[%d]: buf_addr 0x%p, nb_segs %d, pkt_len %d", +- pkt_idx, m_pkt->buf_addr, m_pkt->nb_segs, +- m_pkt->pkt_len); ++ DP_LOG(DEBUG, "pkt[%d]: buf_addr 0x%p, nb_segs %d, pkt_len %d", ++ pkt_idx, m_pkt->buf_addr, m_pkt->nb_segs, ++ m_pkt->pkt_len); + + /* Create SGL for packet data buffers */ + for (seg_idx = 0; seg_idx < m_pkt->nb_segs; seg_idx++) { +@@ -331,8 +338,8 @@ mana_tx_burst(void *dpdk_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) + mana_find_pmd_mr(&txq->mr_btree, priv, m_seg); + + if (!mr) { +- DRV_LOG(ERR, "failed to get MR, pkt_idx %u", +- pkt_idx); ++ DP_LOG(ERR, "failed to get MR, pkt_idx %u", ++ pkt_idx); + break; + } + +@@ -342,11 +349,11 @@ mana_tx_burst(void *dpdk_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) + sgl.gdma_sgl[seg_idx].size = m_seg->data_len; + sgl.gdma_sgl[seg_idx].memory_key = mr->lkey; + +- DRV_LOG(DEBUG, +- "seg idx %u addr 0x%" PRIx64 " size %x key %x", +- seg_idx, sgl.gdma_sgl[seg_idx].address, +- sgl.gdma_sgl[seg_idx].size, +- sgl.gdma_sgl[seg_idx].memory_key); ++ DP_LOG(DEBUG, ++ "seg idx %u addr 0x%" PRIx64 " size %x key %x", ++ seg_idx, sgl.gdma_sgl[seg_idx].address, ++ sgl.gdma_sgl[seg_idx].size, ++ sgl.gdma_sgl[seg_idx].memory_key); + + m_seg = m_seg->next; + } +@@ -355,11 +362,10 @@ mana_tx_burst(void *dpdk_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) + if (seg_idx != m_pkt->nb_segs) + continue; + +- struct gdma_work_request work_req = {0}; +- struct gdma_posted_wqe_info wqe_info = {0}; ++ struct gdma_work_request work_req; ++ uint32_t wqe_size_in_bu; + + work_req.gdma_header.struct_size = sizeof(work_req); +- wqe_info.gdma_header.struct_size = sizeof(wqe_info); + + work_req.sgl = sgl.gdma_sgl; + work_req.num_sgl_elements = m_pkt->nb_segs; +@@ -370,24 +376,24 @@ mana_tx_burst(void *dpdk_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) + work_req.client_data_unit = NOT_USING_CLIENT_DATA_UNIT; + + ret = gdma_post_work_request(&txq->gdma_sq, &work_req, +- &wqe_info); ++ &wqe_size_in_bu); + if (!ret) { + struct mana_txq_desc *desc = + &txq->desc_ring[txq->desc_ring_head]; + + /* Update queue for tracking pending requests */ + desc->pkt = m_pkt; +- desc->wqe_size_in_bu = wqe_info.wqe_size_in_bu; ++ desc->wqe_size_in_bu = wqe_size_in_bu; + txq->desc_ring_head = + (txq->desc_ring_head + 1) % txq->num_desc; + + pkt_sent++; + +- DRV_LOG(DEBUG, "nb_pkts %u pkt[%d] sent", +- nb_pkts, pkt_idx); ++ DP_LOG(DEBUG, "nb_pkts %u pkt[%d] sent", ++ nb_pkts, pkt_idx); + } else { +- DRV_LOG(INFO, "pkt[%d] failed to post send ret %d", +- pkt_idx, ret); ++ DP_LOG(DEBUG, "pkt[%d] failed to post send ret %d", ++ pkt_idx, ret); + break; + } + } +@@ -409,7 +415,7 @@ mana_tx_burst(void *dpdk_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) + GDMA_WQE_ALIGNMENT_UNIT_SIZE, + 0); + if (ret) +- DRV_LOG(ERR, "mana_ring_doorbell failed ret %d", ret); ++ DP_LOG(ERR, "mana_ring_doorbell failed ret %d", ret); + } + + return pkt_sent; +diff --git a/dpdk/drivers/net/meson.build b/dpdk/drivers/net/meson.build +index 6470bf3636..f83a6de117 100644 +--- a/dpdk/drivers/net/meson.build ++++ b/dpdk/drivers/net/meson.build +@@ -36,6 +36,7 @@ drivers = [ + 'ixgbe', + 'kni', + 'liquidio', ++ 'mana', + 'memif', + 'mlx4', + 'mlx5', +diff --git a/dpdk/drivers/net/mlx5/hws/mlx5dr_cmd.c b/dpdk/drivers/net/mlx5/hws/mlx5dr_cmd.c +index 721376b8da..acad42e12e 100644 +--- a/dpdk/drivers/net/mlx5/hws/mlx5dr_cmd.c ++++ b/dpdk/drivers/net/mlx5/hws/mlx5dr_cmd.c +@@ -507,9 +507,11 @@ mlx5dr_cmd_header_modify_pattern_create(struct ibv_context *ctx, + uint32_t in[MLX5_ST_SZ_DW(create_header_modify_pattern_in)] = {0}; + uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0}; + struct mlx5dr_devx_obj *devx_obj; +- void *pattern_data; ++ uint64_t *pattern_data; ++ int num_of_actions; + void *pattern; + void *attr; ++ int i; + + if (pattern_length > MAX_ACTIONS_DATA_IN_HEADER_MODIFY) { + DR_LOG(ERR, "Pattern length %d exceeds limit %d", +@@ -535,9 +537,19 @@ mlx5dr_cmd_header_modify_pattern_create(struct ibv_context *ctx, + /* Pattern_length is in ddwords */ + MLX5_SET(header_modify_pattern_in, pattern, pattern_length, pattern_length / (2 * DW_SIZE)); + +- pattern_data = MLX5_ADDR_OF(header_modify_pattern_in, pattern, pattern_data); ++ pattern_data = (uint64_t *)MLX5_ADDR_OF(header_modify_pattern_in, pattern, pattern_data); + memcpy(pattern_data, actions, pattern_length); + ++ num_of_actions = pattern_length / MLX5DR_MODIFY_ACTION_SIZE; ++ for (i = 0; i < num_of_actions; i++) { ++ int type; ++ ++ type = MLX5_GET(set_action_in, &pattern_data[i], action_type); ++ if (type != MLX5_MODIFICATION_TYPE_COPY) ++ /* Action typ-copy use all bytes for control */ ++ MLX5_SET(set_action_in, &pattern_data[i], data, 0); ++ } ++ + devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out)); + if (!devx_obj->obj) { + DR_LOG(ERR, "Failed to create header_modify_pattern"); +diff --git a/dpdk/drivers/net/mlx5/hws/mlx5dr_definer.c b/dpdk/drivers/net/mlx5/hws/mlx5dr_definer.c +index 6b98eb8c96..e61ca7b9b9 100644 +--- a/dpdk/drivers/net/mlx5/hws/mlx5dr_definer.c ++++ b/dpdk/drivers/net/mlx5/hws/mlx5dr_definer.c +@@ -123,6 +123,7 @@ struct mlx5dr_definer_conv_data { + X(SET, ipv4_next_proto, v->next_proto_id, rte_ipv4_hdr) \ + X(SET, ipv4_version, STE_IPV4, rte_ipv4_hdr) \ + X(SET_BE16, ipv4_frag, v->fragment_offset, rte_ipv4_hdr) \ ++ X(SET, ip_fragmented, !!v->fragment_offset, rte_ipv4_hdr) \ + X(SET_BE16, ipv6_payload_len, v->hdr.payload_len, rte_flow_item_ipv6) \ + X(SET, ipv6_proto, v->hdr.proto, rte_flow_item_ipv6) \ + X(SET, ipv6_hop_limits, v->hdr.hop_limits, rte_flow_item_ipv6) \ +@@ -542,8 +543,13 @@ mlx5dr_definer_conv_item_ipv4(struct mlx5dr_definer_conv_data *cd, + if (m->fragment_offset) { + fc = &cd->fc[DR_CALC_FNAME(IP_FRAG, inner)]; + fc->item_idx = item_idx; +- fc->tag_set = &mlx5dr_definer_ipv4_frag_set; +- DR_CALC_SET(fc, eth_l3, fragment_offset, inner); ++ if (rte_be_to_cpu_16(m->fragment_offset) == 0x3fff) { ++ fc->tag_set = &mlx5dr_definer_ip_fragmented_set; ++ DR_CALC_SET(fc, eth_l2, ip_fragmented, inner); ++ } else { ++ fc->tag_set = &mlx5dr_definer_ipv4_frag_set; ++ DR_CALC_SET(fc, eth_l3, ipv4_frag, inner); ++ } + } + + if (m->next_proto_id) { +diff --git a/dpdk/drivers/net/mlx5/hws/mlx5dr_definer.h b/dpdk/drivers/net/mlx5/hws/mlx5dr_definer.h +index d52c6b0627..5b38a54e6b 100644 +--- a/dpdk/drivers/net/mlx5/hws/mlx5dr_definer.h ++++ b/dpdk/drivers/net/mlx5/hws/mlx5dr_definer.h +@@ -196,8 +196,13 @@ struct mlx5_ifc_definer_hl_eth_l3_bits { + u8 time_to_live_hop_limit[0x8]; + u8 protocol_next_header[0x8]; + u8 identification[0x10]; +- u8 flags[0x3]; +- u8 fragment_offset[0xd]; ++ union { ++ u8 ipv4_frag[0x10]; ++ struct { ++ u8 flags[0x3]; ++ u8 fragment_offset[0xd]; ++ }; ++ }; + u8 ipv4_total_length[0x10]; + u8 checksum[0x10]; + u8 reserved_at_60[0xc]; +diff --git a/dpdk/drivers/net/mlx5/hws/mlx5dr_pool.c b/dpdk/drivers/net/mlx5/hws/mlx5dr_pool.c +index fdbd3d438d..af6a5c743b 100644 +--- a/dpdk/drivers/net/mlx5/hws/mlx5dr_pool.c ++++ b/dpdk/drivers/net/mlx5/hws/mlx5dr_pool.c +@@ -464,13 +464,6 @@ static void mlx5dr_pool_general_element_db_uninit(struct mlx5dr_pool *pool) + */ + static int mlx5dr_pool_general_element_db_init(struct mlx5dr_pool *pool) + { +- pool->db.element_manager = simple_calloc(1, sizeof(*pool->db.element_manager)); +- if (!pool->db.element_manager) { +- DR_LOG(ERR, "No mem for general elemnt_manager"); +- rte_errno = ENOMEM; +- return rte_errno; +- } +- + pool->p_db_uninit = &mlx5dr_pool_general_element_db_uninit; + pool->p_get_chunk = &mlx5dr_pool_general_element_db_get_chunk; + pool->p_put_chunk = &mlx5dr_pool_general_element_db_put_chunk; +diff --git a/dpdk/drivers/net/mlx5/hws/mlx5dr_send.c b/dpdk/drivers/net/mlx5/hws/mlx5dr_send.c +index 5c8bbe6fc6..a8aba31cbe 100644 +--- a/dpdk/drivers/net/mlx5/hws/mlx5dr_send.c ++++ b/dpdk/drivers/net/mlx5/hws/mlx5dr_send.c +@@ -842,8 +842,8 @@ int mlx5dr_send_queue_action(struct mlx5dr_context *ctx, + /* Signal on the last posted WQE */ + mlx5dr_send_engine_flush_queue(queue); + } else { +- rte_errno = -EINVAL; +- return rte_errno; ++ rte_errno = EINVAL; ++ return -rte_errno; + } + + return 0; +diff --git a/dpdk/drivers/net/mlx5/linux/mlx5_ethdev_os.c b/dpdk/drivers/net/mlx5/linux/mlx5_ethdev_os.c +index 72268c0c8a..639e629fe4 100644 +--- a/dpdk/drivers/net/mlx5/linux/mlx5_ethdev_os.c ++++ b/dpdk/drivers/net/mlx5/linux/mlx5_ethdev_os.c +@@ -28,6 +28,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -745,6 +746,7 @@ mlx5_dev_interrupt_device_fatal(struct mlx5_dev_ctx_shared *sh) + + for (i = 0; i < sh->max_port; ++i) { + struct rte_eth_dev *dev; ++ struct mlx5_priv *priv; + + if (sh->port[i].ih_port_id >= RTE_MAX_ETHPORTS) { + /* +@@ -755,9 +757,14 @@ mlx5_dev_interrupt_device_fatal(struct mlx5_dev_ctx_shared *sh) + } + dev = &rte_eth_devices[sh->port[i].ih_port_id]; + MLX5_ASSERT(dev); +- if (dev->data->dev_conf.intr_conf.rmv) ++ priv = dev->data->dev_private; ++ MLX5_ASSERT(priv); ++ if (!priv->rmv_notified && dev->data->dev_conf.intr_conf.rmv) { ++ /* Notify driver about removal only once. */ ++ priv->rmv_notified = 1; + rte_eth_dev_callback_process + (dev, RTE_ETH_EVENT_INTR_RMV, NULL); ++ } + } + } + +@@ -829,21 +836,29 @@ mlx5_dev_interrupt_handler(void *cb_arg) + struct rte_eth_dev *dev; + uint32_t tmp; + +- if (mlx5_glue->get_async_event(sh->cdev->ctx, &event)) ++ if (mlx5_glue->get_async_event(sh->cdev->ctx, &event)) { ++ if (errno == EIO) { ++ DRV_LOG(DEBUG, ++ "IBV async event queue closed on: %s", ++ sh->ibdev_name); ++ mlx5_dev_interrupt_device_fatal(sh); ++ } + break; +- /* Retrieve and check IB port index. */ +- tmp = (uint32_t)event.element.port_num; +- if (!tmp && event.event_type == IBV_EVENT_DEVICE_FATAL) { ++ } ++ if (event.event_type == IBV_EVENT_DEVICE_FATAL) { + /* +- * The DEVICE_FATAL event is called once for +- * entire device without port specifying. +- * We should notify all existing ports. ++ * The DEVICE_FATAL event can be called by kernel ++ * twice - from mlx5 and uverbs layers, and port ++ * index is not applicable. We should notify all ++ * existing ports. + */ +- mlx5_glue->ack_async_event(&event); + mlx5_dev_interrupt_device_fatal(sh); ++ mlx5_glue->ack_async_event(&event); + continue; + } +- MLX5_ASSERT(tmp && (tmp <= sh->max_port)); ++ /* Retrieve and check IB port index. */ ++ tmp = (uint32_t)event.element.port_num; ++ MLX5_ASSERT(tmp <= sh->max_port); + if (!tmp) { + /* Unsupported device level event. */ + mlx5_glue->ack_async_event(&event); +@@ -1034,7 +1049,8 @@ int + mlx5_sysfs_switch_info(unsigned int ifindex, struct mlx5_switch_info *info) + { + char ifname[IF_NAMESIZE]; +- char port_name[IF_NAMESIZE]; ++ char *port_name = NULL; ++ size_t port_name_size = 0; + FILE *file; + struct mlx5_switch_info data = { + .master = 0, +@@ -1047,6 +1063,7 @@ mlx5_sysfs_switch_info(unsigned int ifindex, struct mlx5_switch_info *info) + bool port_switch_id_set = false; + bool device_dir = false; + char c; ++ ssize_t line_size; + + if (!if_indextoname(ifindex, ifname)) { + rte_errno = errno; +@@ -1062,8 +1079,21 @@ mlx5_sysfs_switch_info(unsigned int ifindex, struct mlx5_switch_info *info) + + file = fopen(phys_port_name, "rb"); + if (file != NULL) { +- if (fgets(port_name, IF_NAMESIZE, file) != NULL) ++ char *tail_nl; ++ ++ line_size = getline(&port_name, &port_name_size, file); ++ if (line_size < 0) { ++ fclose(file); ++ rte_errno = errno; ++ return -rte_errno; ++ } else if (line_size > 0) { ++ /* Remove tailing newline character. */ ++ tail_nl = strchr(port_name, '\n'); ++ if (tail_nl) ++ *tail_nl = '\0'; + mlx5_translate_port_name(port_name, &data); ++ } ++ free(port_name); + fclose(file); + } + file = fopen(phys_switch_id, "rb"); +@@ -1776,3 +1806,70 @@ exit: + mlx5_free(sset_info); + return ret; + } ++ ++/** ++ * Unmaps HCA PCI BAR from the current process address space. ++ * ++ * @param dev ++ * Pointer to Ethernet device structure. ++ */ ++void mlx5_txpp_unmap_hca_bar(struct rte_eth_dev *dev) ++{ ++ struct mlx5_proc_priv *ppriv = dev->process_private; ++ ++ if (ppriv && ppriv->hca_bar) { ++ rte_mem_unmap(ppriv->hca_bar, MLX5_ST_SZ_BYTES(initial_seg)); ++ ppriv->hca_bar = NULL; ++ } ++} ++ ++/** ++ * Maps HCA PCI BAR to the current process address space. ++ * Stores pointer in the process private structure allowing ++ * to read internal and real time counter directly from the HW. ++ * ++ * @param dev ++ * Pointer to Ethernet device structure. ++ * ++ * @return ++ * 0 on success and not NULL pointer to mapped area in process structure. ++ * negative otherwise and NULL pointer ++ */ ++int mlx5_txpp_map_hca_bar(struct rte_eth_dev *dev) ++{ ++ struct mlx5_proc_priv *ppriv = dev->process_private; ++ char pci_addr[PCI_PRI_STR_SIZE] = { 0 }; ++ void *base, *expected = NULL; ++ int fd, ret; ++ ++ if (!ppriv) { ++ rte_errno = ENOMEM; ++ return -rte_errno; ++ } ++ if (ppriv->hca_bar) ++ return 0; ++ ret = mlx5_dev_to_pci_str(dev->device, pci_addr, sizeof(pci_addr)); ++ if (ret < 0) ++ return -rte_errno; ++ /* Open PCI device resource 0 - HCA initialize segment */ ++ MKSTR(name, "/sys/bus/pci/devices/%s/resource0", pci_addr); ++ fd = open(name, O_RDWR | O_SYNC); ++ if (fd == -1) { ++ rte_errno = ENOTSUP; ++ return -ENOTSUP; ++ } ++ base = rte_mem_map(NULL, MLX5_ST_SZ_BYTES(initial_seg), ++ RTE_PROT_READ, RTE_MAP_SHARED, fd, 0); ++ close(fd); ++ if (!base) { ++ rte_errno = ENOTSUP; ++ return -ENOTSUP; ++ } ++ /* Check there is no concurrent mapping in other thread. */ ++ if (!__atomic_compare_exchange_n(&ppriv->hca_bar, &expected, ++ base, false, ++ __ATOMIC_RELAXED, __ATOMIC_RELAXED)) ++ rte_mem_unmap(base, MLX5_ST_SZ_BYTES(initial_seg)); ++ return 0; ++} ++ +diff --git a/dpdk/drivers/net/mlx5/linux/mlx5_flow_os.c b/dpdk/drivers/net/mlx5/linux/mlx5_flow_os.c +index 3c9a823edf..b139bb75b9 100644 +--- a/dpdk/drivers/net/mlx5/linux/mlx5_flow_os.c ++++ b/dpdk/drivers/net/mlx5/linux/mlx5_flow_os.c +@@ -51,7 +51,7 @@ mlx5_flow_os_validate_item_esp(const struct rte_flow_item *item, + int + mlx5_flow_os_init_workspace_once(void) + { +- if (rte_thread_key_create(&key_workspace, flow_release_workspace)) { ++ if (rte_thread_key_create(&key_workspace, NULL)) { + DRV_LOG(ERR, "Can't create flow workspace data thread key."); + rte_errno = ENOMEM; + return -rte_errno; +diff --git a/dpdk/drivers/net/mlx5/linux/mlx5_os.c b/dpdk/drivers/net/mlx5/linux/mlx5_os.c +index a71474c90a..6fdade7dab 100644 +--- a/dpdk/drivers/net/mlx5/linux/mlx5_os.c ++++ b/dpdk/drivers/net/mlx5/linux/mlx5_os.c +@@ -873,10 +873,10 @@ mlx5_flow_drop_action_config(struct rte_eth_dev *dev __rte_unused) + */ + if (!priv->sh->drop_action_check_flag) { + if (!mlx5_flow_discover_dr_action_support(dev)) +- priv->sh->dr_drop_action_en = 1; ++ priv->sh->dr_root_drop_action_en = 1; + priv->sh->drop_action_check_flag = 1; + } +- if (priv->sh->dr_drop_action_en) ++ if (priv->sh->dr_root_drop_action_en) + priv->root_drop_action = priv->sh->dr_drop_action; + else + priv->root_drop_action = priv->drop_queue.hrxq->action; +@@ -1613,6 +1613,22 @@ err_secondary: + err = EINVAL; + goto error; + } ++ /* ++ * If representor matching is disabled, PMD cannot create default flow rules ++ * to receive traffic for all ports, since implicit source port match is not added. ++ * Isolated mode is forced. ++ */ ++ if (priv->sh->config.dv_esw_en && !priv->sh->config.repr_matching) { ++ err = mlx5_flow_isolate(eth_dev, 1, NULL); ++ if (err < 0) { ++ err = -err; ++ goto error; ++ } ++ DRV_LOG(WARNING, "port %u ingress traffic is restricted to defined " ++ "flow rules (isolated mode) since representor " ++ "matching is disabled", ++ eth_dev->data->port_id); ++ } + return eth_dev; + #else + DRV_LOG(ERR, "DV support is missing for HWS."); +diff --git a/dpdk/drivers/net/mlx5/mlx5.c b/dpdk/drivers/net/mlx5/mlx5.c +index e55be8720e..8250c94803 100644 +--- a/dpdk/drivers/net/mlx5/mlx5.c ++++ b/dpdk/drivers/net/mlx5/mlx5.c +@@ -1401,7 +1401,8 @@ mlx5_shared_dev_ctx_args_config(struct mlx5_dev_ctx_shared *sh, + rte_errno = ENODEV; + return -rte_errno; + } +- if (!config->tx_pp && config->tx_skew) { ++ if (!config->tx_pp && config->tx_skew && ++ !sh->cdev->config.hca_attr.wait_on_time) { + DRV_LOG(WARNING, + "\"tx_skew\" doesn't affect without \"tx_pp\"."); + } +@@ -1729,6 +1730,7 @@ mlx5_free_shared_dev_ctx(struct mlx5_dev_ctx_shared *sh) + if (LIST_EMPTY(&mlx5_dev_ctx_list)) { + mlx5_os_net_cleanup(); + mlx5_flow_os_release_workspace(); ++ mlx5_flow_workspace_gc_release(); + } + pthread_mutex_unlock(&mlx5_dev_ctx_list_mutex); + if (sh->flex_parsers_dv) { +@@ -1976,8 +1978,12 @@ mlx5_proc_priv_init(struct rte_eth_dev *dev) + void + mlx5_proc_priv_uninit(struct rte_eth_dev *dev) + { +- if (!dev->process_private) ++ struct mlx5_proc_priv *ppriv = dev->process_private; ++ ++ if (!ppriv) + return; ++ if (ppriv->hca_bar) ++ mlx5_txpp_unmap_hca_bar(dev); + mlx5_free(dev->process_private); + dev->process_private = NULL; + } +@@ -2473,6 +2479,7 @@ mlx5_port_args_config(struct mlx5_priv *priv, struct mlx5_kvargs_ctrl *mkvlist, + config->mprq.max_memcpy_len = MLX5_MPRQ_MEMCPY_DEFAULT_LEN; + config->mprq.min_rxqs_num = MLX5_MPRQ_MIN_RXQS; + config->mprq.log_stride_num = MLX5_MPRQ_DEFAULT_LOG_STRIDE_NUM; ++ config->mprq.log_stride_size = MLX5_MPRQ_DEFAULT_LOG_STRIDE_SIZE; + config->log_hp_size = MLX5_ARG_UNSET; + config->std_delay_drop = 0; + config->hp_delay_drop = 0; +diff --git a/dpdk/drivers/net/mlx5/mlx5.h b/dpdk/drivers/net/mlx5/mlx5.h +index 31982002ee..5f8361c52b 100644 +--- a/dpdk/drivers/net/mlx5/mlx5.h ++++ b/dpdk/drivers/net/mlx5/mlx5.h +@@ -1367,7 +1367,7 @@ struct mlx5_dev_ctx_shared { + uint32_t tunnel_header_0_1:1; /* tunnel_header_0_1 is supported. */ + uint32_t tunnel_header_2_3:1; /* tunnel_header_2_3 is supported. */ + uint32_t misc5_cap:1; /* misc5 matcher parameter is supported. */ +- uint32_t dr_drop_action_en:1; /* Use DR drop action. */ ++ uint32_t dr_root_drop_action_en:1; /* DR drop action is usable on root tables. */ + uint32_t drop_action_check_flag:1; /* Check Flag for drop action. */ + uint32_t flow_priority_check_flag:1; /* Check Flag for flow priority. */ + uint32_t metadata_regc_check_flag:1; /* Check Flag for metadata REGC. */ +@@ -1463,6 +1463,8 @@ struct mlx5_dev_ctx_shared { + * Caution, secondary process may rebuild the struct during port start. + */ + struct mlx5_proc_priv { ++ void *hca_bar; ++ /* Mapped HCA PCI BAR area. */ + size_t uar_table_sz; + /* Size of UAR register table. */ + struct mlx5_uar_data uar_table[]; +@@ -1663,6 +1665,7 @@ struct mlx5_priv { + unsigned int mtr_en:1; /* Whether support meter. */ + unsigned int mtr_reg_share:1; /* Whether support meter REG_C share. */ + unsigned int lb_used:1; /* Loopback queue is referred to. */ ++ unsigned int rmv_notified:1; /* Notified about removal event */ + uint32_t mark_enabled:1; /* If mark action is enabled on rxqs. */ + uint16_t domain_id; /* Switch domain identifier. */ + uint16_t vport_id; /* Associated VF vport index (if any). */ +@@ -2163,6 +2166,8 @@ int mlx5_txpp_xstats_get_names(struct rte_eth_dev *dev, + struct rte_eth_xstat_name *xstats_names, + unsigned int n, unsigned int n_used); + void mlx5_txpp_interrupt_handler(void *cb_arg); ++int mlx5_txpp_map_hca_bar(struct rte_eth_dev *dev); ++void mlx5_txpp_unmap_hca_bar(struct rte_eth_dev *dev); + + /* mlx5_rxtx.c */ + +diff --git a/dpdk/drivers/net/mlx5/mlx5_devx.c b/dpdk/drivers/net/mlx5/mlx5_devx.c +index 02deaac612..7e0ec91328 100644 +--- a/dpdk/drivers/net/mlx5/mlx5_devx.c ++++ b/dpdk/drivers/net/mlx5/mlx5_devx.c +@@ -1127,6 +1127,10 @@ mlx5_devx_drop_action_destroy(struct rte_eth_dev *dev) + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_hrxq *hrxq = priv->drop_queue.hrxq; + ++#if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H) ++ if (hrxq->action != NULL) ++ mlx5_flow_os_destroy_flow_action(hrxq->action); ++#endif + if (hrxq->tir != NULL) + mlx5_devx_tir_destroy(hrxq); + if (hrxq->ind_table->ind_table != NULL) +diff --git a/dpdk/drivers/net/mlx5/mlx5_flow.c b/dpdk/drivers/net/mlx5/mlx5_flow.c +index a0cf677fb0..942dccf518 100644 +--- a/dpdk/drivers/net/mlx5/mlx5_flow.c ++++ b/dpdk/drivers/net/mlx5/mlx5_flow.c +@@ -364,7 +364,7 @@ mlx5_flow_expand_rss_skip_explicit(const struct mlx5_flow_expand_node graph[], + return next; + } + +-#define MLX5_RSS_EXP_ELT_N 16 ++#define MLX5_RSS_EXP_ELT_N 32 + + /** + * Expand RSS flows into several possible flows according to the RSS hash +@@ -529,6 +529,7 @@ mlx5_flow_expand_rss(struct mlx5_flow_expand_rss *buf, size_t size, + if (lsize > size) + return -EINVAL; + n = elt * sizeof(*item); ++ MLX5_ASSERT((buf->entries) < MLX5_RSS_EXP_ELT_N); + buf->entry[buf->entries].priority = + stack_pos + 1 + missed; + buf->entry[buf->entries].pattern = addr; +@@ -1903,8 +1904,10 @@ mlx5_flow_validate_action_mark(const struct rte_flow_action *action, + /* + * Validate the drop action. + * +- * @param[in] action_flags +- * Bit-fields that holds the actions detected until now. ++ * @param[in] dev ++ * Pointer to the Ethernet device structure. ++ * @param[in] is_root ++ * True if flow is validated for root table. False otherwise. + * @param[in] attr + * Attributes of flow that includes this action. + * @param[out] error +@@ -1914,15 +1917,25 @@ mlx5_flow_validate_action_mark(const struct rte_flow_action *action, + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ + int +-mlx5_flow_validate_action_drop(uint64_t action_flags __rte_unused, ++mlx5_flow_validate_action_drop(struct rte_eth_dev *dev, ++ bool is_root, + const struct rte_flow_attr *attr, + struct rte_flow_error *error) + { +- if (attr->egress) ++ struct mlx5_priv *priv = dev->data->dev_private; ++ ++ if (priv->sh->config.dv_flow_en == 0 && attr->egress) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, + "drop action not supported for " + "egress"); ++ if (priv->sh->config.dv_flow_en == 1 && is_root && (attr->egress || attr->transfer) && ++ !priv->sh->dr_root_drop_action_en) { ++ return rte_flow_error_set(error, ENOTSUP, ++ RTE_FLOW_ERROR_TYPE_ATTR, NULL, ++ "drop action not supported for " ++ "egress and transfer on group 0"); ++ } + return 0; + } + +@@ -6125,13 +6138,14 @@ flow_sample_split_prep(struct rte_eth_dev *dev, + /* Prepare the prefix tag action. */ + append_index++; + set_tag = (void *)(actions_pre + actions_n + append_index); +- ret = mlx5_flow_get_reg_id(dev, MLX5_SAMPLE_ID, 0, error); + /* Trust VF/SF on CX5 not supported meter so that the reserved + * metadata regC is REG_NON, back to use application tag + * index 0. + */ +- if (unlikely(ret == REG_NON)) ++ if (unlikely(priv->mtr_color_reg == REG_NON)) + ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, 0, error); ++ else ++ ret = mlx5_flow_get_reg_id(dev, MLX5_SAMPLE_ID, 0, error); + if (ret < 0) + return ret; + mlx5_ipool_malloc(priv->sh->ipool +@@ -6918,36 +6932,6 @@ flow_tunnel_from_rule(const struct mlx5_flow *flow) + return tunnel; + } + +-/** +- * Adjust flow RSS workspace if needed. +- * +- * @param wks +- * Pointer to thread flow work space. +- * @param rss_desc +- * Pointer to RSS descriptor. +- * @param[in] nrssq_num +- * New RSS queue number. +- * +- * @return +- * 0 on success, -1 otherwise and rte_errno is set. +- */ +-static int +-flow_rss_workspace_adjust(struct mlx5_flow_workspace *wks, +- struct mlx5_flow_rss_desc *rss_desc, +- uint32_t nrssq_num) +-{ +- if (likely(nrssq_num <= wks->rssq_num)) +- return 0; +- rss_desc->queue = realloc(rss_desc->queue, +- sizeof(*rss_desc->queue) * RTE_ALIGN(nrssq_num, 2)); +- if (!rss_desc->queue) { +- rte_errno = ENOMEM; +- return -1; +- } +- wks->rssq_num = RTE_ALIGN(nrssq_num, 2); +- return 0; +-} +- + /** + * Create a flow and add it to @p list. + * +@@ -7066,8 +7050,7 @@ flow_list_create(struct rte_eth_dev *dev, enum mlx5_flow_type type, + if (attr->ingress) + rss = flow_get_rss_action(dev, p_actions_rx); + if (rss) { +- if (flow_rss_workspace_adjust(wks, rss_desc, rss->queue_num)) +- return 0; ++ MLX5_ASSERT(rss->queue_num <= RTE_ETH_RSS_RETA_SIZE_512); + /* + * The following information is required by + * mlx5_flow_hashfields_adjust() in advance. +@@ -7555,12 +7538,34 @@ flow_release_workspace(void *data) + + while (wks) { + next = wks->next; +- free(wks->rss_desc.queue); + free(wks); + wks = next; + } + } + ++static struct mlx5_flow_workspace *gc_head; ++static rte_spinlock_t mlx5_flow_workspace_lock = RTE_SPINLOCK_INITIALIZER; ++ ++static void ++mlx5_flow_workspace_gc_add(struct mlx5_flow_workspace *ws) ++{ ++ rte_spinlock_lock(&mlx5_flow_workspace_lock); ++ ws->gc = gc_head; ++ gc_head = ws; ++ rte_spinlock_unlock(&mlx5_flow_workspace_lock); ++} ++ ++void ++mlx5_flow_workspace_gc_release(void) ++{ ++ while (gc_head) { ++ struct mlx5_flow_workspace *wks = gc_head; ++ ++ gc_head = wks->gc; ++ flow_release_workspace(wks); ++ } ++} ++ + /** + * Get thread specific current flow workspace. + * +@@ -7586,23 +7591,17 @@ mlx5_flow_get_thread_workspace(void) + static struct mlx5_flow_workspace* + flow_alloc_thread_workspace(void) + { +- struct mlx5_flow_workspace *data = calloc(1, sizeof(*data)); ++ size_t data_size = RTE_ALIGN(sizeof(struct mlx5_flow_workspace), sizeof(long)); ++ size_t rss_queue_array_size = sizeof(uint16_t) * RTE_ETH_RSS_RETA_SIZE_512; ++ struct mlx5_flow_workspace *data = calloc(1, data_size + ++ rss_queue_array_size); + + if (!data) { +- DRV_LOG(ERR, "Failed to allocate flow workspace " +- "memory."); ++ DRV_LOG(ERR, "Failed to allocate flow workspace memory."); + return NULL; + } +- data->rss_desc.queue = calloc(1, +- sizeof(uint16_t) * MLX5_RSSQ_DEFAULT_NUM); +- if (!data->rss_desc.queue) +- goto err; +- data->rssq_num = MLX5_RSSQ_DEFAULT_NUM; ++ data->rss_desc.queue = RTE_PTR_ADD(data, data_size); + return data; +-err: +- free(data->rss_desc.queue); +- free(data); +- return NULL; + } + + /** +@@ -7623,6 +7622,7 @@ mlx5_flow_push_thread_workspace(void) + data = flow_alloc_thread_workspace(); + if (!data) + return NULL; ++ mlx5_flow_workspace_gc_add(data); + } else if (!curr->inuse) { + data = curr; + } else if (curr->next) { +@@ -7971,6 +7971,10 @@ mlx5_flow_isolate(struct rte_eth_dev *dev, + "port must be stopped first"); + return -rte_errno; + } ++ if (!enable && !priv->sh->config.repr_matching) ++ return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, ++ "isolated mode cannot be disabled when " ++ "representor matching is disabled"); + priv->isolated = !!enable; + if (enable) + dev->dev_ops = &mlx5_dev_ops_isolate; +@@ -9758,23 +9762,47 @@ mlx5_flow_dev_dump_sh_all(struct rte_eth_dev *dev, + } + i = lcore_index; + +- for (j = 0; j <= h->mask; j++) { +- l_inconst = &h->buckets[j].l; +- if (!l_inconst || !l_inconst->cache[i]) +- continue; +- +- e = LIST_FIRST(&l_inconst->cache[i]->h); +- while (e) { +- modify_hdr = +- (struct mlx5_flow_dv_modify_hdr_resource *)e; +- data = (const uint8_t *)modify_hdr->actions; +- size = (size_t)(modify_hdr->actions_num) * 8; +- actions_num = modify_hdr->actions_num; +- id = (uint64_t)(uintptr_t)modify_hdr->action; +- type = DR_DUMP_REC_TYPE_PMD_MODIFY_HDR; +- save_dump_file(data, size, type, id, +- (void *)(&actions_num), file); +- e = LIST_NEXT(e, next); ++ if (lcore_index == MLX5_LIST_NLCORE) { ++ for (i = 0; i <= (uint32_t)lcore_index; i++) { ++ for (j = 0; j <= h->mask; j++) { ++ l_inconst = &h->buckets[j].l; ++ if (!l_inconst || !l_inconst->cache[i]) ++ continue; ++ ++ e = LIST_FIRST(&l_inconst->cache[i]->h); ++ while (e) { ++ modify_hdr = ++ (struct mlx5_flow_dv_modify_hdr_resource *)e; ++ data = (const uint8_t *)modify_hdr->actions; ++ size = (size_t)(modify_hdr->actions_num) * 8; ++ actions_num = modify_hdr->actions_num; ++ id = (uint64_t)(uintptr_t)modify_hdr->action; ++ type = DR_DUMP_REC_TYPE_PMD_MODIFY_HDR; ++ save_dump_file(data, size, type, id, ++ (void *)(&actions_num), file); ++ e = LIST_NEXT(e, next); ++ } ++ } ++ } ++ } else { ++ for (j = 0; j <= h->mask; j++) { ++ l_inconst = &h->buckets[j].l; ++ if (!l_inconst || !l_inconst->cache[i]) ++ continue; ++ ++ e = LIST_FIRST(&l_inconst->cache[i]->h); ++ while (e) { ++ modify_hdr = ++ (struct mlx5_flow_dv_modify_hdr_resource *)e; ++ data = (const uint8_t *)modify_hdr->actions; ++ size = (size_t)(modify_hdr->actions_num) * 8; ++ actions_num = modify_hdr->actions_num; ++ id = (uint64_t)(uintptr_t)modify_hdr->action; ++ type = DR_DUMP_REC_TYPE_PMD_MODIFY_HDR; ++ save_dump_file(data, size, type, id, ++ (void *)(&actions_num), file); ++ e = LIST_NEXT(e, next); ++ } + } + } + +@@ -10104,9 +10132,19 @@ mlx5_action_handle_update(struct rte_eth_dev *dev, + const struct mlx5_flow_driver_ops *fops = + flow_get_drv_ops(flow_get_drv_type(dev, &attr)); + int ret; ++ uint32_t act_idx = (uint32_t)(uintptr_t)handle; ++ uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET; + +- ret = flow_drv_action_validate(dev, NULL, +- (const struct rte_flow_action *)update, fops, error); ++ switch (type) { ++ case MLX5_INDIRECT_ACTION_TYPE_CT: ++ case MLX5_INDIRECT_ACTION_TYPE_METER_MARK: ++ ret = 0; ++ break; ++ default: ++ ret = flow_drv_action_validate(dev, NULL, ++ (const struct rte_flow_action *)update, ++ fops, error); ++ } + if (ret) + return ret; + return flow_drv_action_update(dev, handle, update, fops, +@@ -10841,7 +10879,7 @@ mlx5_flow_tunnel_validate(struct rte_eth_dev *dev, + if (!is_tunnel_offload_active(dev)) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, +- "tunnel offload was not activated"); ++ "tunnel offload was not activated, consider setting dv_xmeta_en=3"); + if (!tunnel) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, +diff --git a/dpdk/drivers/net/mlx5/mlx5_flow.h b/dpdk/drivers/net/mlx5/mlx5_flow.h +index 1f57ecd6e1..9724b88996 100644 +--- a/dpdk/drivers/net/mlx5/mlx5_flow.h ++++ b/dpdk/drivers/net/mlx5/mlx5_flow.h +@@ -1437,10 +1437,10 @@ struct mlx5_flow_workspace { + /* If creating another flow in same thread, push new as stack. */ + struct mlx5_flow_workspace *prev; + struct mlx5_flow_workspace *next; ++ struct mlx5_flow_workspace *gc; + uint32_t inuse; /* can't create new flow with current. */ + struct mlx5_flow flows[MLX5_NUM_MAX_DEV_FLOWS]; + struct mlx5_flow_rss_desc rss_desc; +- uint32_t rssq_num; /* Allocated queue num in rss_desc. */ + uint32_t flow_idx; /* Intermediate device flow index. */ + struct mlx5_flow_meter_info *fm; /* Pointer to the meter in flow. */ + struct mlx5_flow_meter_policy *policy; +@@ -1926,6 +1926,8 @@ struct mlx5_flow_driver_ops { + struct mlx5_flow_workspace *mlx5_flow_push_thread_workspace(void); + void mlx5_flow_pop_thread_workspace(void); + struct mlx5_flow_workspace *mlx5_flow_get_thread_workspace(void); ++void mlx5_flow_workspace_gc_release(void); ++ + __extension__ + struct flow_grp_info { + uint64_t external:1; +@@ -2226,7 +2228,8 @@ int mlx5_validate_action_rss(struct rte_eth_dev *dev, + int mlx5_flow_validate_action_count(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + struct rte_flow_error *error); +-int mlx5_flow_validate_action_drop(uint64_t action_flags, ++int mlx5_flow_validate_action_drop(struct rte_eth_dev *dev, ++ bool is_root, + const struct rte_flow_attr *attr, + struct rte_flow_error *error); + int mlx5_flow_validate_action_flag(uint64_t action_flags, +diff --git a/dpdk/drivers/net/mlx5/mlx5_flow_dv.c b/dpdk/drivers/net/mlx5/mlx5_flow_dv.c +index 62c38b87a1..22058ed980 100644 +--- a/dpdk/drivers/net/mlx5/mlx5_flow_dv.c ++++ b/dpdk/drivers/net/mlx5/mlx5_flow_dv.c +@@ -2129,6 +2129,8 @@ flow_dv_validate_item_meta(struct rte_eth_dev *dev __rte_unused, + * Pointer to the rte_eth_dev structure. + * @param[in] item + * Item specification. ++ * @param[in] tag_bitmap ++ * Tag index bitmap. + * @param[in] attr + * Attributes of flow that includes this item. + * @param[out] error +@@ -2140,6 +2142,7 @@ flow_dv_validate_item_meta(struct rte_eth_dev *dev __rte_unused, + static int + flow_dv_validate_item_tag(struct rte_eth_dev *dev, + const struct rte_flow_item *item, ++ uint32_t *tag_bitmap, + const struct rte_flow_attr *attr __rte_unused, + struct rte_flow_error *error) + { +@@ -2183,6 +2186,12 @@ flow_dv_validate_item_tag(struct rte_eth_dev *dev, + if (ret < 0) + return ret; + MLX5_ASSERT(ret != REG_NON); ++ if (*tag_bitmap & (1 << ret)) ++ return rte_flow_error_set(error, EINVAL, ++ RTE_FLOW_ERROR_TYPE_ITEM_SPEC, ++ item->spec, ++ "Duplicated tag index"); ++ *tag_bitmap |= 1 << ret; + return 0; + } + +@@ -7051,9 +7060,10 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, + bool def_policy = false; + bool shared_count = false; + uint16_t udp_dport = 0; +- uint32_t tag_id = 0; ++ uint32_t tag_id = 0, tag_bitmap = 0; + const struct rte_flow_action_age *non_shared_age = NULL; + const struct rte_flow_action_count *count = NULL; ++ const struct mlx5_rte_flow_item_tag *mlx5_tag; + struct mlx5_priv *act_priv = NULL; + int aso_after_sample = 0; + +@@ -7371,7 +7381,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, + last_item = MLX5_FLOW_LAYER_ICMP6; + break; + case RTE_FLOW_ITEM_TYPE_TAG: +- ret = flow_dv_validate_item_tag(dev, items, ++ ret = flow_dv_validate_item_tag(dev, items, &tag_bitmap, + attr, error); + if (ret < 0) + return ret; +@@ -7381,6 +7391,13 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, + last_item = MLX5_FLOW_ITEM_SQ; + break; + case MLX5_RTE_FLOW_ITEM_TYPE_TAG: ++ mlx5_tag = (const struct mlx5_rte_flow_item_tag *)items->spec; ++ if (tag_bitmap & (1 << mlx5_tag->id)) ++ return rte_flow_error_set(error, EINVAL, ++ RTE_FLOW_ERROR_TYPE_ITEM_SPEC, ++ items->spec, ++ "Duplicated tag index"); ++ tag_bitmap |= 1 << mlx5_tag->id; + break; + case RTE_FLOW_ITEM_TYPE_GTP: + ret = flow_dv_validate_item_gtp(dev, items, item_flags, +@@ -7562,7 +7579,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, + rw_act_num += MLX5_ACT_NUM_SET_TAG; + break; + case RTE_FLOW_ACTION_TYPE_DROP: +- ret = mlx5_flow_validate_action_drop(action_flags, ++ ret = mlx5_flow_validate_action_drop(dev, is_root, + attr, error); + if (ret < 0) + return ret; +@@ -9223,12 +9240,10 @@ flow_dv_translate_item_vxlan(struct rte_eth_dev *dev, + { + const struct rte_flow_item_vxlan *vxlan_m; + const struct rte_flow_item_vxlan *vxlan_v; +- const struct rte_flow_item_vxlan *vxlan_vv = item->spec; + void *headers_v; + void *misc_v; + void *misc5_v; + uint32_t tunnel_v; +- uint32_t *tunnel_header_v; + char *vni_v; + uint16_t dport; + int size; +@@ -9280,24 +9295,11 @@ flow_dv_translate_item_vxlan(struct rte_eth_dev *dev, + vni_v[i] = vxlan_m->vni[i] & vxlan_v->vni[i]; + return; + } +- tunnel_header_v = (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc5, +- misc5_v, +- tunnel_header_1); + tunnel_v = (vxlan_v->vni[0] & vxlan_m->vni[0]) | + (vxlan_v->vni[1] & vxlan_m->vni[1]) << 8 | + (vxlan_v->vni[2] & vxlan_m->vni[2]) << 16; +- *tunnel_header_v = tunnel_v; +- if (key_type == MLX5_SET_MATCHER_SW_M) { +- tunnel_v = (vxlan_vv->vni[0] & vxlan_m->vni[0]) | +- (vxlan_vv->vni[1] & vxlan_m->vni[1]) << 8 | +- (vxlan_vv->vni[2] & vxlan_m->vni[2]) << 16; +- if (!tunnel_v) +- *tunnel_header_v = 0x0; +- if (vxlan_vv->rsvd1 & vxlan_m->rsvd1) +- *tunnel_header_v |= vxlan_v->rsvd1 << 24; +- } else { +- *tunnel_header_v |= (vxlan_v->rsvd1 & vxlan_m->rsvd1) << 24; +- } ++ tunnel_v |= (vxlan_v->rsvd1 & vxlan_m->rsvd1) << 24; ++ MLX5_SET(fte_match_set_misc5, misc5_v, tunnel_header_1, RTE_BE32(tunnel_v)); + } + + /** +@@ -13717,7 +13719,12 @@ flow_dv_translate_items_sws(struct rte_eth_dev *dev, + * is the suffix flow. + */ + dev_flow->handle->layers |= wks.item_flags; +- dev_flow->flow->geneve_tlv_option = wks.geneve_tlv_option; ++ /* ++ * Update geneve_tlv_option flag only it is set in workspace. ++ * Avoid be overwritten by other sub mlx5_flows. ++ */ ++ if (wks.geneve_tlv_option) ++ dev_flow->flow->geneve_tlv_option = wks.geneve_tlv_option; + return 0; + } + +@@ -14820,7 +14827,7 @@ flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow, + } + dv->actions[n++] = priv->sh->default_miss_action; + } +- misc_mask = flow_dv_matcher_enable(dv->value.buf); ++ misc_mask = flow_dv_matcher_enable(dv_h->matcher->mask.buf); + __flow_dv_adjust_buf_size(&dv->value.size, misc_mask); + err = mlx5_flow_os_create_flow(dv_h->matcher->matcher_object, + (void *)&dv->value, n, +@@ -17020,7 +17027,7 @@ flow_dv_destroy_def_policy(struct rte_eth_dev *dev) + static int + __flow_dv_create_policy_flow(struct rte_eth_dev *dev, + uint32_t color_reg_c_idx, +- enum rte_color color, void *matcher_object, ++ enum rte_color color, struct mlx5_flow_dv_matcher *matcher, + int actions_n, void *actions, + bool match_src_port, const struct rte_flow_item *item, + void **rule, const struct rte_flow_attr *attr) +@@ -17050,9 +17057,9 @@ __flow_dv_create_policy_flow(struct rte_eth_dev *dev, + } + flow_dv_match_meta_reg(value.buf, (enum modify_reg)color_reg_c_idx, + rte_col_2_mlx5_col(color), UINT32_MAX); +- misc_mask = flow_dv_matcher_enable(value.buf); ++ misc_mask = flow_dv_matcher_enable(matcher->mask.buf); + __flow_dv_adjust_buf_size(&value.size, misc_mask); +- ret = mlx5_flow_os_create_flow(matcher_object, (void *)&value, ++ ret = mlx5_flow_os_create_flow(matcher->matcher_object, (void *)&value, + actions_n, actions, rule); + if (ret) { + DRV_LOG(ERR, "Failed to create meter policy%d flow.", color); +@@ -17206,7 +17213,7 @@ __flow_dv_create_domain_policy_rules(struct rte_eth_dev *dev, + /* Create flow, matching color. */ + if (__flow_dv_create_policy_flow(dev, + color_reg_c_idx, (enum rte_color)i, +- color_rule->matcher->matcher_object, ++ color_rule->matcher, + acts[i].actions_n, acts[i].dv_actions, + svport_match, NULL, &color_rule->rule, + &attr)) { +@@ -17674,7 +17681,7 @@ flow_dv_create_mtr_tbls(struct rte_eth_dev *dev, + actions[i++] = priv->sh->dr_drop_action; + flow_dv_match_meta_reg_all(matcher_para.buf, value.buf, + (enum modify_reg)mtr_id_reg_c, 0, 0); +- misc_mask = flow_dv_matcher_enable(value.buf); ++ misc_mask = flow_dv_matcher_enable(mtrmng->def_matcher[domain]->mask.buf); + __flow_dv_adjust_buf_size(&value.size, misc_mask); + ret = mlx5_flow_os_create_flow + (mtrmng->def_matcher[domain]->matcher_object, +@@ -17719,7 +17726,7 @@ flow_dv_create_mtr_tbls(struct rte_eth_dev *dev, + fm->drop_cnt, NULL); + actions[i++] = cnt->action; + actions[i++] = priv->sh->dr_drop_action; +- misc_mask = flow_dv_matcher_enable(value.buf); ++ misc_mask = flow_dv_matcher_enable(drop_matcher->mask.buf); + __flow_dv_adjust_buf_size(&value.size, misc_mask); + ret = mlx5_flow_os_create_flow(drop_matcher->matcher_object, + (void *)&value, i, actions, +@@ -18199,7 +18206,7 @@ flow_dv_meter_hierarchy_rule_create(struct rte_eth_dev *dev, + goto err_exit; + } + if (__flow_dv_create_policy_flow(dev, color_reg_c_idx, (enum rte_color)j, +- color_rule->matcher->matcher_object, ++ color_rule->matcher, + acts.actions_n, acts.dv_actions, + true, item, &color_rule->rule, &attr)) { + rte_spinlock_unlock(&mtr_policy->sl); +@@ -18909,7 +18916,7 @@ flow_dv_validate_mtr_policy_acts(struct rte_eth_dev *dev, + break; + case RTE_FLOW_ACTION_TYPE_DROP: + ret = mlx5_flow_validate_action_drop +- (action_flags[i], attr, &flow_err); ++ (dev, false, attr, &flow_err); + if (ret < 0) + return -rte_mtr_error_set(error, + ENOTSUP, +@@ -19243,7 +19250,7 @@ flow_dv_discover_priorities(struct rte_eth_dev *dev, + break; + } + /* Try to apply the flow to HW. */ +- misc_mask = flow_dv_matcher_enable(flow.dv.value.buf); ++ misc_mask = flow_dv_matcher_enable(flow.handle->dvh.matcher->mask.buf); + __flow_dv_adjust_buf_size(&flow.dv.value.size, misc_mask); + err = mlx5_flow_os_create_flow + (flow.handle->dvh.matcher->matcher_object, +diff --git a/dpdk/drivers/net/mlx5/mlx5_flow_hw.c b/dpdk/drivers/net/mlx5/mlx5_flow_hw.c +index a3c8056515..102f67a925 100644 +--- a/dpdk/drivers/net/mlx5/mlx5_flow_hw.c ++++ b/dpdk/drivers/net/mlx5/mlx5_flow_hw.c +@@ -1243,6 +1243,8 @@ flow_hw_meter_mark_alloc(struct rte_eth_dev *dev, uint32_t queue, + struct mlx5_flow_meter_info *fm; + uint32_t mtr_id; + ++ if (meter_mark->profile == NULL) ++ return NULL; + aso_mtr = mlx5_ipool_malloc(priv->hws_mpool->idx_pool, &mtr_id); + if (!aso_mtr) + return NULL; +@@ -3252,14 +3254,18 @@ flow_hw_translate_group(struct rte_eth_dev *dev, + "group index not supported"); + *table_group = group + 1; + } else if (config->dv_esw_en && +- !(config->repr_matching && config->dv_xmeta_en == MLX5_XMETA_MODE_META32_HWS) && ++ (config->repr_matching || config->dv_xmeta_en == MLX5_XMETA_MODE_META32_HWS) && + cfg->external && + flow_attr->egress) { + /* +- * On E-Switch setups, egress group translation is not done if and only if +- * representor matching is disabled and legacy metadata mode is selected. +- * In all other cases, egree group 0 is reserved for representor tagging flows +- * and metadata copy flows. ++ * On E-Switch setups, default egress flow rules are inserted to allow ++ * representor matching and/or preserving metadata across steering domains. ++ * These flow rules are inserted in group 0 and this group is reserved by PMD ++ * for these purposes. ++ * ++ * As a result, if representor matching or extended metadata mode is enabled, ++ * group provided by the user must be incremented to avoid inserting flow rules ++ * in group 0. + */ + if (group > MLX5_HW_MAX_EGRESS_GROUP) + return rte_flow_error_set(error, EINVAL, +@@ -4534,6 +4540,9 @@ error: + mlx5dr_action_template_destroy(at->tmpl); + mlx5_free(at); + } ++ rte_flow_error_set(error, rte_errno, ++ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, ++ "Failed to create action template"); + return NULL; + } + +@@ -4614,8 +4623,9 @@ flow_hw_pattern_validate(struct rte_eth_dev *dev, + struct rte_flow_error *error) + { + struct mlx5_priv *priv = dev->data->dev_private; +- int i; ++ int i, tag_idx; + bool items_end = false; ++ uint32_t tag_bitmap = 0; + + if (!attr->ingress && !attr->egress && !attr->transfer) + return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR, NULL, +@@ -4657,16 +4667,26 @@ flow_hw_pattern_validate(struct rte_eth_dev *dev, + switch (type) { + case RTE_FLOW_ITEM_TYPE_TAG: + { +- int reg; + const struct rte_flow_item_tag *tag = + (const struct rte_flow_item_tag *)items[i].spec; + +- reg = flow_hw_get_reg_id(RTE_FLOW_ITEM_TYPE_TAG, tag->index); +- if (reg == REG_NON) ++ if (tag == NULL) ++ return rte_flow_error_set(error, EINVAL, ++ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, ++ NULL, ++ "Tag spec is NULL"); ++ tag_idx = flow_hw_get_reg_id(RTE_FLOW_ITEM_TYPE_TAG, tag->index); ++ if (tag_idx == REG_NON) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "Unsupported tag index"); ++ if (tag_bitmap & (1 << tag_idx)) ++ return rte_flow_error_set(error, EINVAL, ++ RTE_FLOW_ERROR_TYPE_ITEM, ++ NULL, ++ "Duplicated tag index"); ++ tag_bitmap |= 1 << tag_idx; + break; + } + case MLX5_RTE_FLOW_ITEM_TYPE_TAG: +@@ -4680,6 +4700,12 @@ flow_hw_pattern_validate(struct rte_eth_dev *dev, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "Unsupported internal tag index"); ++ if (tag_bitmap & (1 << tag->index)) ++ return rte_flow_error_set(error, EINVAL, ++ RTE_FLOW_ERROR_TYPE_ITEM, ++ NULL, ++ "Duplicated tag index"); ++ tag_bitmap |= 1 << tag->index; + break; + } + case RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT: +@@ -4790,7 +4816,7 @@ flow_hw_pattern_template_create(struct rte_eth_dev *dev, + struct rte_flow_pattern_template *it; + struct rte_flow_item *copied_items = NULL; + const struct rte_flow_item *tmpl_items; +- uint64_t orig_item_nb; ++ uint32_t orig_item_nb; + struct rte_flow_item port = { + .type = RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT, + .mask = &rte_flow_item_ethdev_mask, +@@ -5271,12 +5297,14 @@ flow_hw_free_vport_actions(struct mlx5_priv *priv) + * + * @param dev + * Pointer to Ethernet device. ++ * @param[out] error ++ * Pointer to error structure. + * + * @return + * Pointer to pattern template on success. NULL otherwise, and rte_errno is set. + */ + static struct rte_flow_pattern_template * +-flow_hw_create_tx_repr_sq_pattern_tmpl(struct rte_eth_dev *dev) ++flow_hw_create_tx_repr_sq_pattern_tmpl(struct rte_eth_dev *dev, struct rte_flow_error *error) + { + struct rte_flow_pattern_template_attr attr = { + .relaxed_matching = 0, +@@ -5295,7 +5323,7 @@ flow_hw_create_tx_repr_sq_pattern_tmpl(struct rte_eth_dev *dev) + }, + }; + +- return flow_hw_pattern_template_create(dev, &attr, items, NULL); ++ return flow_hw_pattern_template_create(dev, &attr, items, error); + } + + static __rte_always_inline uint32_t +@@ -5353,12 +5381,15 @@ flow_hw_update_action_mask(struct rte_flow_action *action, + * + * @param dev + * Pointer to Ethernet device. ++ * @param[out] error ++ * Pointer to error structure. + * + * @return + * Pointer to actions template on success. NULL otherwise, and rte_errno is set. + */ + static struct rte_flow_actions_template * +-flow_hw_create_tx_repr_tag_jump_acts_tmpl(struct rte_eth_dev *dev) ++flow_hw_create_tx_repr_tag_jump_acts_tmpl(struct rte_eth_dev *dev, ++ struct rte_flow_error *error) + { + uint32_t tag_mask = flow_hw_tx_tag_regc_mask(dev); + uint32_t tag_value = flow_hw_tx_tag_regc_value(dev); +@@ -5444,7 +5475,7 @@ flow_hw_create_tx_repr_tag_jump_acts_tmpl(struct rte_eth_dev *dev) + NULL, NULL); + idx++; + MLX5_ASSERT(idx <= RTE_DIM(actions_v)); +- return flow_hw_actions_template_create(dev, &attr, actions_v, actions_m, NULL); ++ return flow_hw_actions_template_create(dev, &attr, actions_v, actions_m, error); + } + + static void +@@ -5473,12 +5504,14 @@ flow_hw_cleanup_tx_repr_tagging(struct rte_eth_dev *dev) + * + * @param dev + * Pointer to Ethernet device. ++ * @param[out] error ++ * Pointer to error structure. + * + * @return + * 0 on success, negative errno value otherwise. + */ + static int +-flow_hw_setup_tx_repr_tagging(struct rte_eth_dev *dev) ++flow_hw_setup_tx_repr_tagging(struct rte_eth_dev *dev, struct rte_flow_error *error) + { + struct mlx5_priv *priv = dev->data->dev_private; + struct rte_flow_template_table_attr attr = { +@@ -5496,20 +5529,22 @@ flow_hw_setup_tx_repr_tagging(struct rte_eth_dev *dev) + + MLX5_ASSERT(priv->sh->config.dv_esw_en); + MLX5_ASSERT(priv->sh->config.repr_matching); +- priv->hw_tx_repr_tagging_pt = flow_hw_create_tx_repr_sq_pattern_tmpl(dev); ++ priv->hw_tx_repr_tagging_pt = ++ flow_hw_create_tx_repr_sq_pattern_tmpl(dev, error); + if (!priv->hw_tx_repr_tagging_pt) +- goto error; +- priv->hw_tx_repr_tagging_at = flow_hw_create_tx_repr_tag_jump_acts_tmpl(dev); ++ goto err; ++ priv->hw_tx_repr_tagging_at = ++ flow_hw_create_tx_repr_tag_jump_acts_tmpl(dev, error); + if (!priv->hw_tx_repr_tagging_at) +- goto error; ++ goto err; + priv->hw_tx_repr_tagging_tbl = flow_hw_table_create(dev, &cfg, + &priv->hw_tx_repr_tagging_pt, 1, + &priv->hw_tx_repr_tagging_at, 1, +- NULL); ++ error); + if (!priv->hw_tx_repr_tagging_tbl) +- goto error; ++ goto err; + return 0; +-error: ++err: + flow_hw_cleanup_tx_repr_tagging(dev); + return -rte_errno; + } +@@ -5540,12 +5575,15 @@ flow_hw_esw_mgr_regc_marker(struct rte_eth_dev *dev) + * + * @param dev + * Pointer to Ethernet device. ++ * @param error ++ * Pointer to error structure. + * + * @return + * Pointer to flow pattern template on success, NULL otherwise. + */ + static struct rte_flow_pattern_template * +-flow_hw_create_ctrl_esw_mgr_pattern_template(struct rte_eth_dev *dev) ++flow_hw_create_ctrl_esw_mgr_pattern_template(struct rte_eth_dev *dev, ++ struct rte_flow_error *error) + { + struct rte_flow_pattern_template_attr attr = { + .relaxed_matching = 0, +@@ -5575,7 +5613,7 @@ flow_hw_create_ctrl_esw_mgr_pattern_template(struct rte_eth_dev *dev) + }, + }; + +- return flow_hw_pattern_template_create(dev, &attr, items, NULL); ++ return flow_hw_pattern_template_create(dev, &attr, items, error); + } + + /** +@@ -5588,12 +5626,15 @@ flow_hw_create_ctrl_esw_mgr_pattern_template(struct rte_eth_dev *dev) + * + * @param dev + * Pointer to Ethernet device. ++ * @param error ++ * Pointer to error structure. + * + * @return + * Pointer to flow pattern template on success, NULL otherwise. + */ + static struct rte_flow_pattern_template * +-flow_hw_create_ctrl_regc_sq_pattern_template(struct rte_eth_dev *dev) ++flow_hw_create_ctrl_regc_sq_pattern_template(struct rte_eth_dev *dev, ++ struct rte_flow_error *error) + { + struct rte_flow_pattern_template_attr attr = { + .relaxed_matching = 0, +@@ -5626,7 +5667,7 @@ flow_hw_create_ctrl_regc_sq_pattern_template(struct rte_eth_dev *dev) + }, + }; + +- return flow_hw_pattern_template_create(dev, &attr, items, NULL); ++ return flow_hw_pattern_template_create(dev, &attr, items, error); + } + + /** +@@ -5636,12 +5677,15 @@ flow_hw_create_ctrl_regc_sq_pattern_template(struct rte_eth_dev *dev) + * + * @param dev + * Pointer to Ethernet device. ++ * @param error ++ * Pointer to error structure. + * + * @return + * Pointer to flow pattern template on success, NULL otherwise. + */ + static struct rte_flow_pattern_template * +-flow_hw_create_ctrl_port_pattern_template(struct rte_eth_dev *dev) ++flow_hw_create_ctrl_port_pattern_template(struct rte_eth_dev *dev, ++ struct rte_flow_error *error) + { + struct rte_flow_pattern_template_attr attr = { + .relaxed_matching = 0, +@@ -5660,7 +5704,7 @@ flow_hw_create_ctrl_port_pattern_template(struct rte_eth_dev *dev) + }, + }; + +- return flow_hw_pattern_template_create(dev, &attr, items, NULL); ++ return flow_hw_pattern_template_create(dev, &attr, items, error); + } + + /* +@@ -5670,12 +5714,15 @@ flow_hw_create_ctrl_port_pattern_template(struct rte_eth_dev *dev) + * + * @param dev + * Pointer to Ethernet device. ++ * @param error ++ * Pointer to error structure. + * + * @return + * Pointer to flow pattern template on success, NULL otherwise. + */ + static struct rte_flow_pattern_template * +-flow_hw_create_tx_default_mreg_copy_pattern_template(struct rte_eth_dev *dev) ++flow_hw_create_tx_default_mreg_copy_pattern_template(struct rte_eth_dev *dev, ++ struct rte_flow_error *error) + { + struct rte_flow_pattern_template_attr tx_pa_attr = { + .relaxed_matching = 0, +@@ -5696,10 +5743,8 @@ flow_hw_create_tx_default_mreg_copy_pattern_template(struct rte_eth_dev *dev) + .type = RTE_FLOW_ITEM_TYPE_END, + }, + }; +- struct rte_flow_error drop_err; + +- RTE_SET_USED(drop_err); +- return flow_hw_pattern_template_create(dev, &tx_pa_attr, eth_all, &drop_err); ++ return flow_hw_pattern_template_create(dev, &tx_pa_attr, eth_all, error); + } + + /** +@@ -5710,12 +5755,15 @@ flow_hw_create_tx_default_mreg_copy_pattern_template(struct rte_eth_dev *dev) + * + * @param dev + * Pointer to Ethernet device. ++ * @param error ++ * Pointer to error structure. + * + * @return + * Pointer to flow actions template on success, NULL otherwise. + */ + static struct rte_flow_actions_template * +-flow_hw_create_ctrl_regc_jump_actions_template(struct rte_eth_dev *dev) ++flow_hw_create_ctrl_regc_jump_actions_template(struct rte_eth_dev *dev, ++ struct rte_flow_error *error) + { + uint32_t marker_mask = flow_hw_esw_mgr_regc_marker_mask(dev); + uint32_t marker_bits = flow_hw_esw_mgr_regc_marker(dev); +@@ -5781,7 +5829,7 @@ flow_hw_create_ctrl_regc_jump_actions_template(struct rte_eth_dev *dev) + set_reg_v.dst.offset = rte_bsf32(marker_mask); + rte_memcpy(set_reg_v.src.value, &marker_bits, sizeof(marker_bits)); + rte_memcpy(set_reg_m.src.value, &marker_mask, sizeof(marker_mask)); +- return flow_hw_actions_template_create(dev, &attr, actions_v, actions_m, NULL); ++ return flow_hw_actions_template_create(dev, &attr, actions_v, actions_m, error); + } + + /** +@@ -5793,13 +5841,16 @@ flow_hw_create_ctrl_regc_jump_actions_template(struct rte_eth_dev *dev) + * Pointer to Ethernet device. + * @param group + * Destination group for this action template. ++ * @param error ++ * Pointer to error structure. + * + * @return + * Pointer to flow actions template on success, NULL otherwise. + */ + static struct rte_flow_actions_template * + flow_hw_create_ctrl_jump_actions_template(struct rte_eth_dev *dev, +- uint32_t group) ++ uint32_t group, ++ struct rte_flow_error *error) + { + struct rte_flow_actions_template_attr attr = { + .transfer = 1, +@@ -5829,8 +5880,8 @@ flow_hw_create_ctrl_jump_actions_template(struct rte_eth_dev *dev, + } + }; + +- return flow_hw_actions_template_create(dev, &attr, actions_v, actions_m, +- NULL); ++ return flow_hw_actions_template_create(dev, &attr, actions_v, ++ actions_m, error); + } + + /** +@@ -5839,12 +5890,15 @@ flow_hw_create_ctrl_jump_actions_template(struct rte_eth_dev *dev, + * + * @param dev + * Pointer to Ethernet device. ++ * @param error ++ * Pointer to error structure. + * + * @return + * Pointer to flow action template on success, NULL otherwise. + */ + static struct rte_flow_actions_template * +-flow_hw_create_ctrl_port_actions_template(struct rte_eth_dev *dev) ++flow_hw_create_ctrl_port_actions_template(struct rte_eth_dev *dev, ++ struct rte_flow_error *error) + { + struct rte_flow_actions_template_attr attr = { + .transfer = 1, +@@ -5874,8 +5928,7 @@ flow_hw_create_ctrl_port_actions_template(struct rte_eth_dev *dev) + } + }; + +- return flow_hw_actions_template_create(dev, &attr, actions_v, actions_m, +- NULL); ++ return flow_hw_actions_template_create(dev, &attr, actions_v, actions_m, error); + } + + /* +@@ -5884,12 +5937,15 @@ flow_hw_create_ctrl_port_actions_template(struct rte_eth_dev *dev) + * + * @param dev + * Pointer to Ethernet device. ++ * @param error ++ * Pointer to error structure. + * + * @return + * Pointer to flow actions template on success, NULL otherwise. + */ + static struct rte_flow_actions_template * +-flow_hw_create_tx_default_mreg_copy_actions_template(struct rte_eth_dev *dev) ++flow_hw_create_tx_default_mreg_copy_actions_template(struct rte_eth_dev *dev, ++ struct rte_flow_error *error) + { + struct rte_flow_actions_template_attr tx_act_attr = { + .egress = 1, +@@ -5952,11 +6008,9 @@ flow_hw_create_tx_default_mreg_copy_actions_template(struct rte_eth_dev *dev) + .type = RTE_FLOW_ACTION_TYPE_END, + }, + }; +- struct rte_flow_error drop_err; + +- RTE_SET_USED(drop_err); + return flow_hw_actions_template_create(dev, &tx_act_attr, actions, +- masks, &drop_err); ++ masks, error); + } + + /** +@@ -5969,6 +6023,8 @@ flow_hw_create_tx_default_mreg_copy_actions_template(struct rte_eth_dev *dev) + * Pointer to flow pattern template. + * @param at + * Pointer to flow actions template. ++ * @param error ++ * Pointer to error structure. + * + * @return + * Pointer to flow table on success, NULL otherwise. +@@ -5976,7 +6032,8 @@ flow_hw_create_tx_default_mreg_copy_actions_template(struct rte_eth_dev *dev) + static struct rte_flow_template_table* + flow_hw_create_ctrl_sq_miss_root_table(struct rte_eth_dev *dev, + struct rte_flow_pattern_template *it, +- struct rte_flow_actions_template *at) ++ struct rte_flow_actions_template *at, ++ struct rte_flow_error *error) + { + struct rte_flow_template_table_attr attr = { + .flow_attr = { +@@ -5993,7 +6050,7 @@ flow_hw_create_ctrl_sq_miss_root_table(struct rte_eth_dev *dev, + .external = false, + }; + +- return flow_hw_table_create(dev, &cfg, &it, 1, &at, 1, NULL); ++ return flow_hw_table_create(dev, &cfg, &it, 1, &at, 1, error); + } + + +@@ -6007,6 +6064,8 @@ flow_hw_create_ctrl_sq_miss_root_table(struct rte_eth_dev *dev, + * Pointer to flow pattern template. + * @param at + * Pointer to flow actions template. ++ * @param error ++ * Pointer to error structure. + * + * @return + * Pointer to flow table on success, NULL otherwise. +@@ -6014,7 +6073,8 @@ flow_hw_create_ctrl_sq_miss_root_table(struct rte_eth_dev *dev, + static struct rte_flow_template_table* + flow_hw_create_ctrl_sq_miss_table(struct rte_eth_dev *dev, + struct rte_flow_pattern_template *it, +- struct rte_flow_actions_template *at) ++ struct rte_flow_actions_template *at, ++ struct rte_flow_error *error) + { + struct rte_flow_template_table_attr attr = { + .flow_attr = { +@@ -6031,7 +6091,7 @@ flow_hw_create_ctrl_sq_miss_table(struct rte_eth_dev *dev, + .external = false, + }; + +- return flow_hw_table_create(dev, &cfg, &it, 1, &at, 1, NULL); ++ return flow_hw_table_create(dev, &cfg, &it, 1, &at, 1, error); + } + + /* +@@ -6043,6 +6103,8 @@ flow_hw_create_ctrl_sq_miss_table(struct rte_eth_dev *dev, + * Pointer to flow pattern template. + * @param at + * Pointer to flow actions template. ++ * @param error ++ * Pointer to error structure. + * + * @return + * Pointer to flow table on success, NULL otherwise. +@@ -6050,7 +6112,8 @@ flow_hw_create_ctrl_sq_miss_table(struct rte_eth_dev *dev, + static struct rte_flow_template_table* + flow_hw_create_tx_default_mreg_copy_table(struct rte_eth_dev *dev, + struct rte_flow_pattern_template *pt, +- struct rte_flow_actions_template *at) ++ struct rte_flow_actions_template *at, ++ struct rte_flow_error *error) + { + struct rte_flow_template_table_attr tx_tbl_attr = { + .flow_attr = { +@@ -6064,14 +6127,8 @@ flow_hw_create_tx_default_mreg_copy_table(struct rte_eth_dev *dev, + .attr = tx_tbl_attr, + .external = false, + }; +- struct rte_flow_error drop_err = { +- .type = RTE_FLOW_ERROR_TYPE_NONE, +- .cause = NULL, +- .message = NULL, +- }; + +- RTE_SET_USED(drop_err); +- return flow_hw_table_create(dev, &tx_tbl_cfg, &pt, 1, &at, 1, &drop_err); ++ return flow_hw_table_create(dev, &tx_tbl_cfg, &pt, 1, &at, 1, error); + } + + /** +@@ -6084,6 +6141,8 @@ flow_hw_create_tx_default_mreg_copy_table(struct rte_eth_dev *dev, + * Pointer to flow pattern template. + * @param at + * Pointer to flow actions template. ++ * @param error ++ * Pointer to error structure. + * + * @return + * Pointer to flow table on success, NULL otherwise. +@@ -6091,7 +6150,8 @@ flow_hw_create_tx_default_mreg_copy_table(struct rte_eth_dev *dev, + static struct rte_flow_template_table * + flow_hw_create_ctrl_jump_table(struct rte_eth_dev *dev, + struct rte_flow_pattern_template *it, +- struct rte_flow_actions_template *at) ++ struct rte_flow_actions_template *at, ++ struct rte_flow_error *error) + { + struct rte_flow_template_table_attr attr = { + .flow_attr = { +@@ -6108,7 +6168,7 @@ flow_hw_create_ctrl_jump_table(struct rte_eth_dev *dev, + .external = false, + }; + +- return flow_hw_table_create(dev, &cfg, &it, 1, &at, 1, NULL); ++ return flow_hw_table_create(dev, &cfg, &it, 1, &at, 1, error); + } + + /** +@@ -6117,12 +6177,14 @@ flow_hw_create_ctrl_jump_table(struct rte_eth_dev *dev, + * + * @param dev + * Pointer to Ethernet device. ++ * @param error ++ * Pointer to error structure. + * + * @return +- * 0 on success, EINVAL otherwise ++ * 0 on success, negative values otherwise + */ + static __rte_unused int +-flow_hw_create_ctrl_tables(struct rte_eth_dev *dev) ++flow_hw_create_ctrl_tables(struct rte_eth_dev *dev, struct rte_flow_error *error) + { + struct mlx5_priv *priv = dev->data->dev_private; + struct rte_flow_pattern_template *esw_mgr_items_tmpl = NULL; +@@ -6135,96 +6197,107 @@ flow_hw_create_ctrl_tables(struct rte_eth_dev *dev) + struct rte_flow_actions_template *tx_meta_actions_tmpl = NULL; + uint32_t xmeta = priv->sh->config.dv_xmeta_en; + uint32_t repr_matching = priv->sh->config.repr_matching; ++ int ret; + + /* Create templates and table for default SQ miss flow rules - root table. */ +- esw_mgr_items_tmpl = flow_hw_create_ctrl_esw_mgr_pattern_template(dev); ++ esw_mgr_items_tmpl = flow_hw_create_ctrl_esw_mgr_pattern_template(dev, error); + if (!esw_mgr_items_tmpl) { + DRV_LOG(ERR, "port %u failed to create E-Switch Manager item" + " template for control flows", dev->data->port_id); +- goto error; ++ goto err; + } +- regc_jump_actions_tmpl = flow_hw_create_ctrl_regc_jump_actions_template(dev); ++ regc_jump_actions_tmpl = flow_hw_create_ctrl_regc_jump_actions_template(dev, error); + if (!regc_jump_actions_tmpl) { + DRV_LOG(ERR, "port %u failed to create REG_C set and jump action template" + " for control flows", dev->data->port_id); +- goto error; ++ goto err; + } + MLX5_ASSERT(priv->hw_esw_sq_miss_root_tbl == NULL); + priv->hw_esw_sq_miss_root_tbl = flow_hw_create_ctrl_sq_miss_root_table +- (dev, esw_mgr_items_tmpl, regc_jump_actions_tmpl); ++ (dev, esw_mgr_items_tmpl, regc_jump_actions_tmpl, error); + if (!priv->hw_esw_sq_miss_root_tbl) { + DRV_LOG(ERR, "port %u failed to create table for default sq miss (root table)" + " for control flows", dev->data->port_id); +- goto error; ++ goto err; + } + /* Create templates and table for default SQ miss flow rules - non-root table. */ +- regc_sq_items_tmpl = flow_hw_create_ctrl_regc_sq_pattern_template(dev); ++ regc_sq_items_tmpl = flow_hw_create_ctrl_regc_sq_pattern_template(dev, error); + if (!regc_sq_items_tmpl) { + DRV_LOG(ERR, "port %u failed to create SQ item template for" + " control flows", dev->data->port_id); +- goto error; ++ goto err; + } +- port_actions_tmpl = flow_hw_create_ctrl_port_actions_template(dev); ++ port_actions_tmpl = flow_hw_create_ctrl_port_actions_template(dev, error); + if (!port_actions_tmpl) { + DRV_LOG(ERR, "port %u failed to create port action template" + " for control flows", dev->data->port_id); +- goto error; ++ goto err; + } + MLX5_ASSERT(priv->hw_esw_sq_miss_tbl == NULL); + priv->hw_esw_sq_miss_tbl = flow_hw_create_ctrl_sq_miss_table(dev, regc_sq_items_tmpl, +- port_actions_tmpl); ++ port_actions_tmpl, error); + if (!priv->hw_esw_sq_miss_tbl) { + DRV_LOG(ERR, "port %u failed to create table for default sq miss (non-root table)" + " for control flows", dev->data->port_id); +- goto error; ++ goto err; + } + /* Create templates and table for default FDB jump flow rules. */ +- port_items_tmpl = flow_hw_create_ctrl_port_pattern_template(dev); ++ port_items_tmpl = flow_hw_create_ctrl_port_pattern_template(dev, error); + if (!port_items_tmpl) { + DRV_LOG(ERR, "port %u failed to create SQ item template for" + " control flows", dev->data->port_id); +- goto error; ++ goto err; + } + jump_one_actions_tmpl = flow_hw_create_ctrl_jump_actions_template +- (dev, MLX5_HW_LOWEST_USABLE_GROUP); ++ (dev, MLX5_HW_LOWEST_USABLE_GROUP, error); + if (!jump_one_actions_tmpl) { + DRV_LOG(ERR, "port %u failed to create jump action template" + " for control flows", dev->data->port_id); +- goto error; ++ goto err; + } + MLX5_ASSERT(priv->hw_esw_zero_tbl == NULL); + priv->hw_esw_zero_tbl = flow_hw_create_ctrl_jump_table(dev, port_items_tmpl, +- jump_one_actions_tmpl); ++ jump_one_actions_tmpl, ++ error); + if (!priv->hw_esw_zero_tbl) { + DRV_LOG(ERR, "port %u failed to create table for default jump to group 1" + " for control flows", dev->data->port_id); +- goto error; ++ goto err; + } + /* Create templates and table for default Tx metadata copy flow rule. */ + if (!repr_matching && xmeta == MLX5_XMETA_MODE_META32_HWS) { +- tx_meta_items_tmpl = flow_hw_create_tx_default_mreg_copy_pattern_template(dev); ++ tx_meta_items_tmpl = ++ flow_hw_create_tx_default_mreg_copy_pattern_template(dev, error); + if (!tx_meta_items_tmpl) { + DRV_LOG(ERR, "port %u failed to Tx metadata copy pattern" + " template for control flows", dev->data->port_id); +- goto error; ++ goto err; + } +- tx_meta_actions_tmpl = flow_hw_create_tx_default_mreg_copy_actions_template(dev); ++ tx_meta_actions_tmpl = ++ flow_hw_create_tx_default_mreg_copy_actions_template(dev, error); + if (!tx_meta_actions_tmpl) { + DRV_LOG(ERR, "port %u failed to Tx metadata copy actions" + " template for control flows", dev->data->port_id); +- goto error; ++ goto err; + } + MLX5_ASSERT(priv->hw_tx_meta_cpy_tbl == NULL); +- priv->hw_tx_meta_cpy_tbl = flow_hw_create_tx_default_mreg_copy_table(dev, +- tx_meta_items_tmpl, tx_meta_actions_tmpl); ++ priv->hw_tx_meta_cpy_tbl = ++ flow_hw_create_tx_default_mreg_copy_table(dev, tx_meta_items_tmpl, ++ tx_meta_actions_tmpl, error); + if (!priv->hw_tx_meta_cpy_tbl) { + DRV_LOG(ERR, "port %u failed to create table for default" + " Tx metadata copy flow rule", dev->data->port_id); +- goto error; ++ goto err; + } + } + return 0; +-error: ++err: ++ /* Do not overwrite the rte_errno. */ ++ ret = -rte_errno; ++ if (ret == 0) ++ ret = rte_flow_error_set(error, EINVAL, ++ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, ++ "Failed to create control tables."); + if (priv->hw_esw_zero_tbl) { + flow_hw_table_destroy(dev, priv->hw_esw_zero_tbl, NULL); + priv->hw_esw_zero_tbl = NULL; +@@ -6253,7 +6326,7 @@ error: + flow_hw_pattern_template_destroy(dev, regc_sq_items_tmpl, NULL); + if (esw_mgr_items_tmpl) + flow_hw_pattern_template_destroy(dev, esw_mgr_items_tmpl, NULL); +- return -EINVAL; ++ return ret; + } + + static void +@@ -6376,27 +6449,28 @@ flow_hw_create_vlan(struct rte_eth_dev *dev) + MLX5DR_ACTION_FLAG_HWS_FDB + }; + ++ /* rte_errno is set in the mlx5dr_action* functions. */ + for (i = MLX5DR_TABLE_TYPE_NIC_RX; i <= MLX5DR_TABLE_TYPE_NIC_TX; i++) { + priv->hw_pop_vlan[i] = + mlx5dr_action_create_pop_vlan(priv->dr_ctx, flags[i]); + if (!priv->hw_pop_vlan[i]) +- return -ENOENT; ++ return -rte_errno; + priv->hw_push_vlan[i] = + mlx5dr_action_create_push_vlan(priv->dr_ctx, flags[i]); + if (!priv->hw_pop_vlan[i]) +- return -ENOENT; ++ return -rte_errno; + } + if (priv->sh->config.dv_esw_en && priv->master) { + priv->hw_pop_vlan[MLX5DR_TABLE_TYPE_FDB] = + mlx5dr_action_create_pop_vlan + (priv->dr_ctx, MLX5DR_ACTION_FLAG_HWS_FDB); + if (!priv->hw_pop_vlan[MLX5DR_TABLE_TYPE_FDB]) +- return -ENOENT; ++ return -rte_errno; + priv->hw_push_vlan[MLX5DR_TABLE_TYPE_FDB] = + mlx5dr_action_create_push_vlan + (priv->dr_ctx, MLX5DR_ACTION_FLAG_HWS_FDB); + if (!priv->hw_pop_vlan[MLX5DR_TABLE_TYPE_FDB]) +- return -ENOENT; ++ return -rte_errno; + } + return 0; + } +@@ -6836,8 +6910,7 @@ flow_hw_configure(struct rte_eth_dev *dev, + goto err; + } + +- memcpy(_queue_attr, queue_attr, +- sizeof(void *) * nb_queue); ++ memcpy(_queue_attr, queue_attr, sizeof(void *) * nb_queue); + _queue_attr[nb_queue] = &ctrl_queue_attr; + priv->acts_ipool = mlx5_ipool_create(&cfg); + if (!priv->acts_ipool) +@@ -6952,23 +7025,20 @@ flow_hw_configure(struct rte_eth_dev *dev, + goto err; + } + if (priv->sh->config.dv_esw_en && priv->sh->config.repr_matching) { +- ret = flow_hw_setup_tx_repr_tagging(dev); +- if (ret) { +- rte_errno = -ret; ++ ret = flow_hw_setup_tx_repr_tagging(dev, error); ++ if (ret) + goto err; +- } + } + if (is_proxy) { + ret = flow_hw_create_vport_actions(priv); + if (ret) { +- rte_errno = -ret; ++ rte_flow_error_set(error, -ret, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, ++ NULL, "Failed to create vport actions."); + goto err; + } +- ret = flow_hw_create_ctrl_tables(dev); +- if (ret) { +- rte_errno = -ret; ++ ret = flow_hw_create_ctrl_tables(dev, error); ++ if (ret) + goto err; +- } + } + if (port_attr->nb_conn_tracks) { + mem_size = sizeof(struct mlx5_aso_sq) * nb_q_updated + +@@ -7005,12 +7075,18 @@ flow_hw_configure(struct rte_eth_dev *dev, + goto err; + } + ret = mlx5_hws_age_pool_init(dev, port_attr, nb_queue); +- if (ret < 0) ++ if (ret < 0) { ++ rte_flow_error_set(error, -ret, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, ++ NULL, "Failed to init age pool."); + goto err; ++ } + } + ret = flow_hw_create_vlan(dev); +- if (ret) ++ if (ret) { ++ rte_flow_error_set(error, -ret, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, ++ NULL, "Failed to VLAN actions."); + goto err; ++ } + if (_queue_attr) + mlx5_free(_queue_attr); + if (port_attr->flags & RTE_FLOW_PORT_FLAG_STRICT_QUEUE) +@@ -7178,9 +7254,9 @@ void flow_hw_init_tags_set(struct rte_eth_dev *dev) + uint32_t meta_mode = priv->sh->config.dv_xmeta_en; + uint8_t masks = (uint8_t)priv->sh->cdev->config.hca_attr.set_reg_c; + uint32_t i, j; +- enum modify_reg copy[MLX5_FLOW_HW_TAGS_MAX] = {REG_NON}; ++ uint8_t reg_off; + uint8_t unset = 0; +- uint8_t copy_masks = 0; ++ uint8_t common_masks = 0; + + /* + * The CAPA is global for common device but only used in net. +@@ -7195,29 +7271,35 @@ void flow_hw_init_tags_set(struct rte_eth_dev *dev) + if (meta_mode == MLX5_XMETA_MODE_META32_HWS) + unset |= 1 << (REG_C_1 - REG_C_0); + masks &= ~unset; ++ /* ++ * If available tag registers were previously calculated, ++ * calculate a bitmask with an intersection of sets of: ++ * - registers supported by current port, ++ * - previously calculated available tag registers. ++ */ + if (mlx5_flow_hw_avl_tags_init_cnt) { + MLX5_ASSERT(mlx5_flow_hw_aso_tag == priv->mtr_color_reg); + for (i = 0; i < MLX5_FLOW_HW_TAGS_MAX; i++) { +- if (mlx5_flow_hw_avl_tags[i] != REG_NON && !!((1 << i) & masks)) { +- copy[mlx5_flow_hw_avl_tags[i] - REG_C_0] = +- mlx5_flow_hw_avl_tags[i]; +- copy_masks |= (1 << (mlx5_flow_hw_avl_tags[i] - REG_C_0)); +- } +- } +- if (copy_masks != masks) { +- j = 0; +- for (i = 0; i < MLX5_FLOW_HW_TAGS_MAX; i++) +- if (!!((1 << i) & copy_masks)) +- mlx5_flow_hw_avl_tags[j++] = copy[i]; +- } +- } else { +- j = 0; +- for (i = 0; i < MLX5_FLOW_HW_TAGS_MAX; i++) { +- if (!!((1 << i) & masks)) +- mlx5_flow_hw_avl_tags[j++] = +- (enum modify_reg)(i + (uint32_t)REG_C_0); ++ if (mlx5_flow_hw_avl_tags[i] == REG_NON) ++ continue; ++ reg_off = mlx5_flow_hw_avl_tags[i] - REG_C_0; ++ if ((1 << reg_off) & masks) ++ common_masks |= (1 << reg_off); + } ++ if (common_masks != masks) ++ masks = common_masks; ++ else ++ goto after_avl_tags; ++ } ++ j = 0; ++ for (i = 0; i < MLX5_FLOW_HW_TAGS_MAX; i++) { ++ if ((1 << i) & masks) ++ mlx5_flow_hw_avl_tags[j++] = (enum modify_reg)(i + (uint32_t)REG_C_0); + } ++ /* Clear the rest of unusable tag indexes. */ ++ for (; j < MLX5_FLOW_HW_TAGS_MAX; j++) ++ mlx5_flow_hw_avl_tags[j] = REG_NON; ++after_avl_tags: + priv->sh->hws_tags = 1; + mlx5_flow_hw_aso_tag = (enum modify_reg)priv->mtr_color_reg; + mlx5_flow_hw_avl_tags_init_cnt++; +diff --git a/dpdk/drivers/net/mlx5/mlx5_flow_verbs.c b/dpdk/drivers/net/mlx5/mlx5_flow_verbs.c +index 28ea28bfbe..1e9c7cf7c5 100644 +--- a/dpdk/drivers/net/mlx5/mlx5_flow_verbs.c ++++ b/dpdk/drivers/net/mlx5/mlx5_flow_verbs.c +@@ -1280,14 +1280,14 @@ flow_verbs_validate(struct rte_eth_dev *dev, + uint16_t ether_type = 0; + bool is_empty_vlan = false; + uint16_t udp_dport = 0; +- bool is_root; ++ /* Verbs interface does not support groups higher than 0. */ ++ bool is_root = true; + + if (items == NULL) + return -1; + ret = flow_verbs_validate_attributes(dev, attr, error); + if (ret < 0) + return ret; +- is_root = ret; + for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) { + int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); + int ret = 0; +@@ -1484,7 +1484,8 @@ flow_verbs_validate(struct rte_eth_dev *dev, + action_flags |= MLX5_FLOW_ACTION_MARK; + break; + case RTE_FLOW_ACTION_TYPE_DROP: +- ret = mlx5_flow_validate_action_drop(action_flags, ++ ret = mlx5_flow_validate_action_drop(dev, ++ is_root, + attr, + error); + if (ret < 0) +diff --git a/dpdk/drivers/net/mlx5/mlx5_hws_cnt.c b/dpdk/drivers/net/mlx5/mlx5_hws_cnt.c +index 51704ef754..8ccc6ab1f8 100644 +--- a/dpdk/drivers/net/mlx5/mlx5_hws_cnt.c ++++ b/dpdk/drivers/net/mlx5/mlx5_hws_cnt.c +@@ -410,8 +410,7 @@ mlx5_hws_cnt_pool_init(struct mlx5_dev_ctx_shared *sh, + goto error; + } + for (qidx = 0; qidx < ccfg->q_num; qidx++) { +- snprintf(mz_name, sizeof(mz_name), "%s_cache/%u", pcfg->name, +- qidx); ++ snprintf(mz_name, sizeof(mz_name), "%s_qc/%x", pcfg->name, qidx); + cntp->cache->qcache[qidx] = rte_ring_create(mz_name, ccfg->size, + SOCKET_ID_ANY, + RING_F_SP_ENQ | RING_F_SC_DEQ | +@@ -634,7 +633,7 @@ mlx5_hws_cnt_pool_create(struct rte_eth_dev *dev, + SOCKET_ID_ANY); + if (mp_name == NULL) + goto error; +- snprintf(mp_name, RTE_MEMZONE_NAMESIZE, "MLX5_HWS_CNT_POOL_%u", ++ snprintf(mp_name, RTE_MEMZONE_NAMESIZE, "MLX5_HWS_CNT_P_%x", + dev->data->port_id); + pcfg.name = mp_name; + pcfg.request_num = pattr->nb_counters; +diff --git a/dpdk/drivers/net/mlx5/mlx5_rx.c b/dpdk/drivers/net/mlx5/mlx5_rx.c +index 917c517b83..b41f7a51f5 100644 +--- a/dpdk/drivers/net/mlx5/mlx5_rx.c ++++ b/dpdk/drivers/net/mlx5/mlx5_rx.c +@@ -39,7 +39,8 @@ rxq_cq_to_pkt_type(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe, + + static __rte_always_inline int + mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe, +- uint16_t cqe_cnt, volatile struct mlx5_mini_cqe8 **mcqe); ++ uint16_t cqe_cnt, volatile struct mlx5_mini_cqe8 **mcqe, ++ uint16_t *skip_cnt, bool mprq); + + static __rte_always_inline uint32_t + rxq_cq_to_ol_flags(volatile struct mlx5_cqe *cqe); +@@ -408,10 +409,14 @@ mlx5_rxq_initialize(struct mlx5_rxq_data *rxq) + *rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci); + } + ++#define MLX5_ERROR_CQE_MASK 0x40000000 + /* Must be negative. */ +-#define MLX5_ERROR_CQE_RET (-1) ++#define MLX5_REGULAR_ERROR_CQE_RET (-5) ++#define MLX5_CRITICAL_ERROR_CQE_RET (-4) + /* Must not be negative. */ + #define MLX5_RECOVERY_ERROR_RET 0 ++#define MLX5_RECOVERY_IGNORE_RET 1 ++#define MLX5_RECOVERY_COMPLETED_RET 2 + + /** + * Handle a Rx error. +@@ -425,12 +430,18 @@ mlx5_rxq_initialize(struct mlx5_rxq_data *rxq) + * @param[in] vec + * 1 when called from vectorized Rx burst, need to prepare mbufs for the RQ. + * 0 when called from non-vectorized Rx burst. ++ * @param[in] err_n ++ * Number of CQEs to check for an error. + * + * @return +- * MLX5_RECOVERY_ERROR_RET in case of recovery error, otherwise the CQE status. ++ * MLX5_RECOVERY_ERROR_RET in case of recovery error, ++ * MLX5_RECOVERY_IGNORE_RET in case of non-critical error syndrome, ++ * MLX5_RECOVERY_COMPLETED_RET in case of recovery is completed, ++ * otherwise the CQE status after ignored error syndrome or queue reset. + */ + int +-mlx5_rx_err_handle(struct mlx5_rxq_data *rxq, uint8_t vec) ++mlx5_rx_err_handle(struct mlx5_rxq_data *rxq, uint8_t vec, ++ uint16_t err_n, uint16_t *skip_cnt) + { + const uint16_t cqe_n = 1 << rxq->cqe_n; + const uint16_t cqe_mask = cqe_n - 1; +@@ -442,13 +453,39 @@ mlx5_rx_err_handle(struct mlx5_rxq_data *rxq, uint8_t vec) + volatile struct mlx5_cqe *cqe; + volatile struct mlx5_err_cqe *err_cqe; + } u = { +- .cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_mask], ++ .cqe = &(*rxq->cqes)[(rxq->cq_ci - vec) & cqe_mask], + }; + struct mlx5_mp_arg_queue_state_modify sm; +- int ret; ++ bool critical_syndrome = false; ++ int ret, i; + + switch (rxq->err_state) { ++ case MLX5_RXQ_ERR_STATE_IGNORE: ++ ret = check_cqe(u.cqe, cqe_n, rxq->cq_ci - vec); ++ if (ret != MLX5_CQE_STATUS_ERR) { ++ rxq->err_state = MLX5_RXQ_ERR_STATE_NO_ERROR; ++ return ret; ++ } ++ /* Fall-through */ + case MLX5_RXQ_ERR_STATE_NO_ERROR: ++ for (i = 0; i < (int)err_n; i++) { ++ u.cqe = &(*rxq->cqes)[(rxq->cq_ci - vec - i) & cqe_mask]; ++ if (MLX5_CQE_OPCODE(u.cqe->op_own) == MLX5_CQE_RESP_ERR) { ++ if (u.err_cqe->syndrome == MLX5_CQE_SYNDROME_LOCAL_QP_OP_ERR || ++ u.err_cqe->syndrome == MLX5_CQE_SYNDROME_LOCAL_PROT_ERR || ++ u.err_cqe->syndrome == MLX5_CQE_SYNDROME_WR_FLUSH_ERR) ++ critical_syndrome = true; ++ break; ++ } ++ } ++ if (!critical_syndrome) { ++ if (rxq->err_state == MLX5_RXQ_ERR_STATE_NO_ERROR) { ++ *skip_cnt = 0; ++ if (i == err_n) ++ rxq->err_state = MLX5_RXQ_ERR_STATE_IGNORE; ++ } ++ return MLX5_RECOVERY_IGNORE_RET; ++ } + rxq->err_state = MLX5_RXQ_ERR_STATE_NEED_RESET; + /* Fall-through */ + case MLX5_RXQ_ERR_STATE_NEED_RESET: +@@ -507,7 +544,6 @@ mlx5_rx_err_handle(struct mlx5_rxq_data *rxq, uint8_t vec) + rxq->elts_ci : rxq->rq_ci; + uint32_t elt_idx; + struct rte_mbuf **elt; +- int i; + unsigned int n = elts_n - (elts_ci - + rxq->rq_pi); + +@@ -540,6 +576,7 @@ mlx5_rx_err_handle(struct mlx5_rxq_data *rxq, uint8_t vec) + } + mlx5_rxq_initialize(rxq); + rxq->err_state = MLX5_RXQ_ERR_STATE_NO_ERROR; ++ return MLX5_RECOVERY_COMPLETED_RET; + } + return ret; + default: +@@ -559,19 +596,24 @@ mlx5_rx_err_handle(struct mlx5_rxq_data *rxq, uint8_t vec) + * @param[out] mcqe + * Store pointer to mini-CQE if compressed. Otherwise, the pointer is not + * written. +- * ++ * @param[out] skip_cnt ++ * Number of packets skipped due to recoverable errors. ++ * @param mprq ++ * Indication if it is called from MPRQ. + * @return +- * 0 in case of empty CQE, MLX5_ERROR_CQE_RET in case of error CQE, +- * otherwise the packet size in regular RxQ, and striding byte +- * count format in mprq case. ++ * 0 in case of empty CQE, MLX5_REGULAR_ERROR_CQE_RET in case of error CQE, ++ * MLX5_CRITICAL_ERROR_CQE_RET in case of error CQE lead to Rx queue reset, ++ * otherwise the packet size in regular RxQ, ++ * and striding byte count format in mprq case. + */ + static inline int + mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe, +- uint16_t cqe_cnt, volatile struct mlx5_mini_cqe8 **mcqe) ++ uint16_t cqe_cnt, volatile struct mlx5_mini_cqe8 **mcqe, ++ uint16_t *skip_cnt, bool mprq) + { + struct rxq_zip *zip = &rxq->zip; + uint16_t cqe_n = cqe_cnt + 1; +- int len; ++ int len = 0, ret = 0; + uint16_t idx, end; + + do { +@@ -620,7 +662,6 @@ mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe, + * compressed. + */ + } else { +- int ret; + int8_t op_own; + uint32_t cq_ci; + +@@ -628,10 +669,12 @@ mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe, + if (unlikely(ret != MLX5_CQE_STATUS_SW_OWN)) { + if (unlikely(ret == MLX5_CQE_STATUS_ERR || + rxq->err_state)) { +- ret = mlx5_rx_err_handle(rxq, 0); +- if (ret == MLX5_CQE_STATUS_HW_OWN || +- ret == MLX5_RECOVERY_ERROR_RET) +- return MLX5_ERROR_CQE_RET; ++ ret = mlx5_rx_err_handle(rxq, 0, 1, skip_cnt); ++ if (ret == MLX5_CQE_STATUS_HW_OWN) ++ return MLX5_ERROR_CQE_MASK; ++ if (ret == MLX5_RECOVERY_ERROR_RET || ++ ret == MLX5_RECOVERY_COMPLETED_RET) ++ return MLX5_CRITICAL_ERROR_CQE_RET; + } else { + return 0; + } +@@ -684,8 +727,15 @@ mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe, + } + } + if (unlikely(rxq->err_state)) { ++ if (rxq->err_state == MLX5_RXQ_ERR_STATE_IGNORE && ++ ret == MLX5_CQE_STATUS_SW_OWN) { ++ rxq->err_state = MLX5_RXQ_ERR_STATE_NO_ERROR; ++ return len & MLX5_ERROR_CQE_MASK; ++ } + cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_cnt]; + ++rxq->stats.idropped; ++ (*skip_cnt) += mprq ? (len & MLX5_MPRQ_STRIDE_NUM_MASK) >> ++ MLX5_MPRQ_STRIDE_NUM_SHIFT : 1; + } else { + return len; + } +@@ -837,6 +887,7 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) + int len = 0; /* keep its value across iterations. */ + + while (pkts_n) { ++ uint16_t skip_cnt; + unsigned int idx = rq_ci & wqe_cnt; + volatile struct mlx5_wqe_data_seg *wqe = + &((volatile struct mlx5_wqe_data_seg *)rxq->wqes)[idx]; +@@ -875,11 +926,24 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) + } + if (!pkt) { + cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_cnt]; +- len = mlx5_rx_poll_len(rxq, cqe, cqe_cnt, &mcqe); +- if (len <= 0) { +- rte_mbuf_raw_free(rep); +- if (unlikely(len == MLX5_ERROR_CQE_RET)) ++ len = mlx5_rx_poll_len(rxq, cqe, cqe_cnt, &mcqe, &skip_cnt, false); ++ if (unlikely(len & MLX5_ERROR_CQE_MASK)) { ++ if (len == MLX5_CRITICAL_ERROR_CQE_RET) { ++ rte_mbuf_raw_free(rep); + rq_ci = rxq->rq_ci << sges_n; ++ break; ++ } ++ rq_ci >>= sges_n; ++ rq_ci += skip_cnt; ++ rq_ci <<= sges_n; ++ idx = rq_ci & wqe_cnt; ++ wqe = &((volatile struct mlx5_wqe_data_seg *)rxq->wqes)[idx]; ++ seg = (*rxq->elts)[idx]; ++ cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_cnt]; ++ len = len & ~MLX5_ERROR_CQE_MASK; ++ } ++ if (len == 0) { ++ rte_mbuf_raw_free(rep); + break; + } + pkt = seg; +@@ -981,6 +1045,7 @@ mlx5_lro_update_tcp_hdr(struct rte_tcp_hdr *__rte_restrict tcp, + tcp->cksum = 0; + csum += rte_raw_cksum(tcp, (tcp->data_off >> 4) * 4); + csum = ((csum & 0xffff0000) >> 16) + (csum & 0xffff); ++ csum = ((csum & 0xffff0000) >> 16) + (csum & 0xffff); + csum = (~csum) & 0xffff; + if (csum == 0) + csum = 0xffff; +@@ -1089,6 +1154,7 @@ mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) + uint16_t strd_cnt; + uint16_t strd_idx; + uint32_t byte_cnt; ++ uint16_t skip_cnt; + volatile struct mlx5_mini_cqe8 *mcqe = NULL; + enum mlx5_rqx_code rxq_code; + +@@ -1101,14 +1167,26 @@ mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) + buf = (*rxq->mprq_bufs)[rq_ci & wq_mask]; + } + cqe = &(*rxq->cqes)[rxq->cq_ci & cq_mask]; +- ret = mlx5_rx_poll_len(rxq, cqe, cq_mask, &mcqe); ++ ret = mlx5_rx_poll_len(rxq, cqe, cq_mask, &mcqe, &skip_cnt, true); ++ if (unlikely(ret & MLX5_ERROR_CQE_MASK)) { ++ if (ret == MLX5_CRITICAL_ERROR_CQE_RET) { ++ rq_ci = rxq->rq_ci; ++ consumed_strd = rxq->consumed_strd; ++ break; ++ } ++ consumed_strd += skip_cnt; ++ while (consumed_strd >= strd_n) { ++ /* Replace WQE if the buffer is still in use. */ ++ mprq_buf_replace(rxq, rq_ci & wq_mask); ++ /* Advance to the next WQE. */ ++ consumed_strd -= strd_n; ++ ++rq_ci; ++ buf = (*rxq->mprq_bufs)[rq_ci & wq_mask]; ++ } ++ cqe = &(*rxq->cqes)[rxq->cq_ci & cq_mask]; ++ } + if (ret == 0) + break; +- if (unlikely(ret == MLX5_ERROR_CQE_RET)) { +- rq_ci = rxq->rq_ci; +- consumed_strd = rxq->consumed_strd; +- break; +- } + byte_cnt = ret; + len = (byte_cnt & MLX5_MPRQ_LEN_MASK) >> MLX5_MPRQ_LEN_SHIFT; + MLX5_ASSERT((int)len >= (rxq->crc_present << 2)); +diff --git a/dpdk/drivers/net/mlx5/mlx5_rx.h b/dpdk/drivers/net/mlx5/mlx5_rx.h +index e078aaf3dc..6b42e27c89 100644 +--- a/dpdk/drivers/net/mlx5/mlx5_rx.h ++++ b/dpdk/drivers/net/mlx5/mlx5_rx.h +@@ -62,6 +62,7 @@ enum mlx5_rxq_err_state { + MLX5_RXQ_ERR_STATE_NO_ERROR = 0, + MLX5_RXQ_ERR_STATE_NEED_RESET, + MLX5_RXQ_ERR_STATE_NEED_READY, ++ MLX5_RXQ_ERR_STATE_IGNORE, + }; + + enum mlx5_rqx_code { +@@ -286,7 +287,8 @@ int mlx5_hrxq_modify(struct rte_eth_dev *dev, uint32_t hxrq_idx, + + uint16_t mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n); + void mlx5_rxq_initialize(struct mlx5_rxq_data *rxq); +-__rte_noinline int mlx5_rx_err_handle(struct mlx5_rxq_data *rxq, uint8_t vec); ++__rte_noinline int mlx5_rx_err_handle(struct mlx5_rxq_data *rxq, uint8_t vec, ++ uint16_t err_n, uint16_t *skip_cnt); + void mlx5_mprq_buf_free(struct mlx5_mprq_buf *buf); + uint16_t mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts, + uint16_t pkts_n); +diff --git a/dpdk/drivers/net/mlx5/mlx5_rxq.c b/dpdk/drivers/net/mlx5/mlx5_rxq.c +index 81aa3f074a..6b2af87cd2 100644 +--- a/dpdk/drivers/net/mlx5/mlx5_rxq.c ++++ b/dpdk/drivers/net/mlx5/mlx5_rxq.c +@@ -528,12 +528,12 @@ mlx5_rx_queue_stop(struct rte_eth_dev *dev, uint16_t idx) + * synchronized, that might be broken on RQ restart + * and cause Rx malfunction, so queue stopping is + * not supported if vectorized Rx burst is engaged. +- * The routine pointer depends on the process +- * type, should perform check there. ++ * The routine pointer depends on the process type, ++ * should perform check there. MPRQ is not supported as well. + */ +- if (pkt_burst == mlx5_rx_burst_vec) { +- DRV_LOG(ERR, "Rx queue stop is not supported " +- "for vectorized Rx"); ++ if (pkt_burst != mlx5_rx_burst) { ++ DRV_LOG(ERR, "Rx queue stop is only supported " ++ "for non-vectorized single-packet Rx"); + rte_errno = EINVAL; + return -EINVAL; + } +@@ -1601,23 +1601,38 @@ mlx5_mprq_prepare(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, + } else { + *actual_log_stride_num = config->mprq.log_stride_num; + } +- if (config->mprq.log_stride_size) { +- /* Checks if chosen size of stride is in supported range. */ +- if (config->mprq.log_stride_size > log_max_stride_size || +- config->mprq.log_stride_size < log_min_stride_size) { +- *actual_log_stride_size = log_def_stride_size; ++ /* Checks if chosen size of stride is in supported range. */ ++ if (config->mprq.log_stride_size > log_max_stride_size || ++ config->mprq.log_stride_size < log_min_stride_size) { ++ *actual_log_stride_size = log_def_stride_size; ++ DRV_LOG(WARNING, ++ "Port %u Rx queue %u size of a stride for Multi-Packet RQ is out of range, setting default value (%u)", ++ dev->data->port_id, idx, ++ RTE_BIT32(log_def_stride_size)); ++ } else { ++ *actual_log_stride_size = config->mprq.log_stride_size; ++ } ++ /* Make the stride fit the mbuf size by default. */ ++ if (*actual_log_stride_size == MLX5_MPRQ_DEFAULT_LOG_STRIDE_SIZE) { ++ if (min_mbuf_size <= RTE_BIT32(log_max_stride_size)) { + DRV_LOG(WARNING, +- "Port %u Rx queue %u size of a stride for Multi-Packet RQ is out of range, setting default value (%u)", +- dev->data->port_id, idx, +- RTE_BIT32(log_def_stride_size)); ++ "Port %u Rx queue %u size of a stride for Multi-Packet RQ is adjusted to match the mbuf size (%u)", ++ dev->data->port_id, idx, min_mbuf_size); ++ *actual_log_stride_size = log2above(min_mbuf_size); + } else { +- *actual_log_stride_size = config->mprq.log_stride_size; ++ goto unsupport; + } +- } else { +- if (min_mbuf_size <= RTE_BIT32(log_max_stride_size)) +- *actual_log_stride_size = log2above(min_mbuf_size); +- else ++ } ++ /* Make sure the stride size is greater than the headroom. */ ++ if (RTE_BIT32(*actual_log_stride_size) < RTE_PKTMBUF_HEADROOM) { ++ if (RTE_BIT32(log_max_stride_size) > RTE_PKTMBUF_HEADROOM) { ++ DRV_LOG(WARNING, ++ "Port %u Rx queue %u size of a stride for Multi-Packet RQ is adjusted to accommodate the headroom (%u)", ++ dev->data->port_id, idx, RTE_PKTMBUF_HEADROOM); ++ *actual_log_stride_size = log2above(RTE_PKTMBUF_HEADROOM); ++ } else { + goto unsupport; ++ } + } + log_stride_wqe_size = *actual_log_stride_num + *actual_log_stride_size; + /* Check if WQE buffer size is supported by hardware. */ +diff --git a/dpdk/drivers/net/mlx5/mlx5_rxtx_vec.c b/dpdk/drivers/net/mlx5/mlx5_rxtx_vec.c +index 0e2eab068a..667475a93e 100644 +--- a/dpdk/drivers/net/mlx5/mlx5_rxtx_vec.c ++++ b/dpdk/drivers/net/mlx5/mlx5_rxtx_vec.c +@@ -51,6 +51,7 @@ rxq_handle_pending_error(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, + uint16_t pkts_n) + { + uint16_t n = 0; ++ uint16_t skip_cnt; + unsigned int i; + #ifdef MLX5_PMD_SOFT_COUNTERS + uint32_t err_bytes = 0; +@@ -74,7 +75,7 @@ rxq_handle_pending_error(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, + rxq->stats.ipackets -= (pkts_n - n); + rxq->stats.ibytes -= err_bytes; + #endif +- mlx5_rx_err_handle(rxq, 1); ++ mlx5_rx_err_handle(rxq, 1, pkts_n, &skip_cnt); + return n; + } + +@@ -253,8 +254,6 @@ rxq_copy_mprq_mbuf_v(struct mlx5_rxq_data *rxq, + } + rxq->rq_pi += i; + rxq->cq_ci += i; +- rte_io_wmb(); +- *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci); + if (rq_ci != rxq->rq_ci) { + rxq->rq_ci = rq_ci; + rte_io_wmb(); +@@ -361,8 +360,6 @@ rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, + rxq->decompressed -= n; + } + } +- rte_io_wmb(); +- *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci); + *no_cq = !rcvd_pkt; + return rcvd_pkt; + } +@@ -390,6 +387,7 @@ mlx5_rx_burst_vec(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) + bool no_cq = false; + + do { ++ err = 0; + nb_rx = rxq_burst_v(rxq, pkts + tn, pkts_n - tn, + &err, &no_cq); + if (unlikely(err | rxq->err_state)) +@@ -397,6 +395,8 @@ mlx5_rx_burst_vec(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) + tn += nb_rx; + if (unlikely(no_cq)) + break; ++ rte_io_wmb(); ++ *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci); + } while (tn != pkts_n); + return tn; + } +@@ -524,6 +524,7 @@ mlx5_rx_burst_mprq_vec(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) + bool no_cq = false; + + do { ++ err = 0; + nb_rx = rxq_burst_mprq_v(rxq, pkts + tn, pkts_n - tn, + &err, &no_cq); + if (unlikely(err | rxq->err_state)) +@@ -531,6 +532,8 @@ mlx5_rx_burst_mprq_vec(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) + tn += nb_rx; + if (unlikely(no_cq)) + break; ++ rte_io_wmb(); ++ *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci); + } while (tn != pkts_n); + return tn; + } +diff --git a/dpdk/drivers/net/mlx5/mlx5_rxtx_vec_altivec.h b/dpdk/drivers/net/mlx5/mlx5_rxtx_vec_altivec.h +index 683a8f9a6c..204d17a8f2 100644 +--- a/dpdk/drivers/net/mlx5/mlx5_rxtx_vec_altivec.h ++++ b/dpdk/drivers/net/mlx5/mlx5_rxtx_vec_altivec.h +@@ -783,7 +783,7 @@ rxq_cq_process_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq, + { + const uint16_t q_n = 1 << rxq->cqe_n; + const uint16_t q_mask = q_n - 1; +- unsigned int pos; ++ unsigned int pos, adj; + uint64_t n = 0; + uint64_t comp_idx = MLX5_VPMD_DESCS_PER_LOOP; + uint16_t nocmp_n = 0; +@@ -866,7 +866,7 @@ rxq_cq_process_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq, + __vector unsigned char pkt_mb0, pkt_mb1, pkt_mb2, pkt_mb3; + __vector unsigned char op_own, op_own_tmp1, op_own_tmp2; + __vector unsigned char opcode, owner_mask, invalid_mask; +- __vector unsigned char comp_mask; ++ __vector unsigned char comp_mask, mini_mask; + __vector unsigned char mask; + #ifdef MLX5_PMD_SOFT_COUNTERS + const __vector unsigned char lower_half = { +@@ -1174,6 +1174,16 @@ rxq_cq_process_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq, + (__vector unsigned long)mask); + + /* D.3 check error in opcode. */ ++ adj = (comp_idx != MLX5_VPMD_DESCS_PER_LOOP && comp_idx == n); ++ mask = (__vector unsigned char)(__vector unsigned long){ ++ (adj * sizeof(uint16_t) * 8), 0}; ++ lshift = vec_splat((__vector unsigned long)mask, 0); ++ shmask = vec_cmpgt(shmax, lshift); ++ mini_mask = (__vector unsigned char) ++ vec_sl((__vector unsigned long)invalid_mask, lshift); ++ mini_mask = (__vector unsigned char) ++ vec_sel((__vector unsigned long)shmask, ++ (__vector unsigned long)mini_mask, shmask); + opcode = (__vector unsigned char) + vec_cmpeq((__vector unsigned int)resp_err_check, + (__vector unsigned int)opcode); +@@ -1182,7 +1192,7 @@ rxq_cq_process_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq, + (__vector unsigned int)zero); + opcode = (__vector unsigned char) + vec_andc((__vector unsigned long)opcode, +- (__vector unsigned long)invalid_mask); ++ (__vector unsigned long)mini_mask); + + /* D.4 mark if any error is set */ + *err |= ((__vector unsigned long)opcode)[0]; +diff --git a/dpdk/drivers/net/mlx5/mlx5_rxtx_vec_neon.h b/dpdk/drivers/net/mlx5/mlx5_rxtx_vec_neon.h +index f7bbde4e0e..6d3c594e56 100644 +--- a/dpdk/drivers/net/mlx5/mlx5_rxtx_vec_neon.h ++++ b/dpdk/drivers/net/mlx5/mlx5_rxtx_vec_neon.h +@@ -524,7 +524,7 @@ rxq_cq_process_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq, + { + const uint16_t q_n = 1 << rxq->cqe_n; + const uint16_t q_mask = q_n - 1; +- unsigned int pos; ++ unsigned int pos, adj; + uint64_t n = 0; + uint64_t comp_idx = MLX5_VPMD_DESCS_PER_LOOP; + uint16_t nocmp_n = 0; +@@ -616,7 +616,7 @@ rxq_cq_process_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq, + pos += MLX5_VPMD_DESCS_PER_LOOP) { + uint16x4_t op_own; + uint16x4_t opcode, owner_mask, invalid_mask; +- uint16x4_t comp_mask; ++ uint16x4_t comp_mask, mini_mask; + uint16x4_t mask; + uint16x4_t byte_cnt; + uint32x4_t ptype_info, flow_tag; +@@ -647,6 +647,14 @@ rxq_cq_process_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq, + c0 = vld1q_u64((uint64_t *)(p0 + 48)); + /* Synchronize for loading the rest of blocks. */ + rte_io_rmb(); ++ /* B.0 (CQE 3) reload lower half of the block. */ ++ c3 = vld1q_lane_u64((uint64_t *)(p3 + 48), c3, 0); ++ /* B.0 (CQE 2) reload lower half of the block. */ ++ c2 = vld1q_lane_u64((uint64_t *)(p2 + 48), c2, 0); ++ /* B.0 (CQE 1) reload lower half of the block. */ ++ c1 = vld1q_lane_u64((uint64_t *)(p1 + 48), c1, 0); ++ /* B.0 (CQE 0) reload lower half of the block. */ ++ c0 = vld1q_lane_u64((uint64_t *)(p0 + 48), c0, 0); + /* Prefetch next 4 CQEs. */ + if (pkts_n - pos >= 2 * MLX5_VPMD_DESCS_PER_LOOP) { + unsigned int next = pos + MLX5_VPMD_DESCS_PER_LOOP; +@@ -780,8 +788,12 @@ rxq_cq_process_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq, + -1UL >> (n * sizeof(uint16_t) * 8) : 0); + invalid_mask = vorr_u16(invalid_mask, mask); + /* D.3 check error in opcode. */ ++ adj = (comp_idx != MLX5_VPMD_DESCS_PER_LOOP && comp_idx == n); ++ mask = vcreate_u16(adj ? ++ -1UL >> ((n + 1) * sizeof(uint16_t) * 8) : -1UL); ++ mini_mask = vand_u16(invalid_mask, mask); + opcode = vceq_u16(resp_err_check, opcode); +- opcode = vbic_u16(opcode, invalid_mask); ++ opcode = vbic_u16(opcode, mini_mask); + /* D.4 mark if any error is set */ + *err |= vget_lane_u64(vreinterpret_u64_u16(opcode), 0); + /* C.4 fill in mbuf - rearm_data and packet_type. */ +diff --git a/dpdk/drivers/net/mlx5/mlx5_rxtx_vec_sse.h b/dpdk/drivers/net/mlx5/mlx5_rxtx_vec_sse.h +index 185d2695db..ab69af0c55 100644 +--- a/dpdk/drivers/net/mlx5/mlx5_rxtx_vec_sse.h ++++ b/dpdk/drivers/net/mlx5/mlx5_rxtx_vec_sse.h +@@ -523,7 +523,7 @@ rxq_cq_process_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq, + { + const uint16_t q_n = 1 << rxq->cqe_n; + const uint16_t q_mask = q_n - 1; +- unsigned int pos; ++ unsigned int pos, adj; + uint64_t n = 0; + uint64_t comp_idx = MLX5_VPMD_DESCS_PER_LOOP; + uint16_t nocmp_n = 0; +@@ -591,7 +591,7 @@ rxq_cq_process_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq, + __m128i pkt_mb0, pkt_mb1, pkt_mb2, pkt_mb3; + __m128i op_own, op_own_tmp1, op_own_tmp2; + __m128i opcode, owner_mask, invalid_mask; +- __m128i comp_mask; ++ __m128i comp_mask, mini_mask; + __m128i mask; + #ifdef MLX5_PMD_SOFT_COUNTERS + __m128i byte_cnt; +@@ -729,9 +729,12 @@ rxq_cq_process_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq, + mask = _mm_sll_epi64(ones, mask); + invalid_mask = _mm_or_si128(invalid_mask, mask); + /* D.3 check error in opcode. */ ++ adj = (comp_idx != MLX5_VPMD_DESCS_PER_LOOP && comp_idx == n); ++ mask = _mm_set_epi64x(0, adj * sizeof(uint16_t) * 8); ++ mini_mask = _mm_sll_epi64(invalid_mask, mask); + opcode = _mm_cmpeq_epi32(resp_err_check, opcode); + opcode = _mm_packs_epi32(opcode, zero); +- opcode = _mm_andnot_si128(invalid_mask, opcode); ++ opcode = _mm_andnot_si128(mini_mask, opcode); + /* D.4 mark if any error is set */ + *err |= _mm_cvtsi128_si64(opcode); + /* D.5 fill in mbuf - rearm_data and packet_type. */ +diff --git a/dpdk/drivers/net/mlx5/mlx5_stats.c b/dpdk/drivers/net/mlx5/mlx5_stats.c +index f64fa3587b..615e1d073d 100644 +--- a/dpdk/drivers/net/mlx5/mlx5_stats.c ++++ b/dpdk/drivers/net/mlx5/mlx5_stats.c +@@ -288,10 +288,9 @@ mlx5_xstats_get_names(struct rte_eth_dev *dev, + + if (n >= mlx5_xstats_n && xstats_names) { + for (i = 0; i != mlx5_xstats_n; ++i) { +- strncpy(xstats_names[i].name, ++ strlcpy(xstats_names[i].name, + xstats_ctrl->info[i].dpdk_name, + RTE_ETH_XSTATS_NAME_SIZE); +- xstats_names[i].name[RTE_ETH_XSTATS_NAME_SIZE - 1] = 0; + } + } + mlx5_xstats_n = mlx5_txpp_xstats_get_names(dev, xstats_names, +diff --git a/dpdk/drivers/net/mlx5/mlx5_trigger.c b/dpdk/drivers/net/mlx5/mlx5_trigger.c +index f54443ed1a..6479e44a94 100644 +--- a/dpdk/drivers/net/mlx5/mlx5_trigger.c ++++ b/dpdk/drivers/net/mlx5/mlx5_trigger.c +@@ -896,11 +896,11 @@ mlx5_hairpin_unbind_single_port(struct rte_eth_dev *dev, uint16_t rx_port) + } + /* Indeed, only the first used queue needs to be checked. */ + if (txq_ctrl->hairpin_conf.manual_bind == 0) { ++ mlx5_txq_release(dev, i); + if (cur_port != rx_port) { + rte_errno = EINVAL; + DRV_LOG(ERR, "port %u and port %u are in" + " auto-bind mode", cur_port, rx_port); +- mlx5_txq_release(dev, i); + return -rte_errno; + } else { + return 0; +diff --git a/dpdk/drivers/net/mlx5/mlx5_tx.c b/dpdk/drivers/net/mlx5/mlx5_tx.c +index a13c7e937c..14e1487e59 100644 +--- a/dpdk/drivers/net/mlx5/mlx5_tx.c ++++ b/dpdk/drivers/net/mlx5/mlx5_tx.c +@@ -107,7 +107,7 @@ mlx5_tx_error_cqe_handle(struct mlx5_txq_data *__rte_restrict txq, + mlx5_dump_debug_information(name, "MLX5 Error CQ:", + (const void *)((uintptr_t) + txq->cqes), +- sizeof(*err_cqe) * ++ sizeof(struct mlx5_cqe) * + (1 << txq->cqe_n)); + mlx5_dump_debug_information(name, "MLX5 Error SQ:", + (const void *)((uintptr_t) +diff --git a/dpdk/drivers/net/mlx5/mlx5_tx.h b/dpdk/drivers/net/mlx5/mlx5_tx.h +index a44050a1ce..a056be7ca8 100644 +--- a/dpdk/drivers/net/mlx5/mlx5_tx.h ++++ b/dpdk/drivers/net/mlx5/mlx5_tx.h +@@ -817,7 +817,7 @@ mlx5_tx_wseg_init(struct mlx5_txq_data *restrict txq, + struct mlx5_wqe_wseg *ws; + + ws = RTE_PTR_ADD(wqe, MLX5_WSEG_SIZE); +- ws->operation = rte_cpu_to_be_32(MLX5_WAIT_COND_CYCLIC_BIGGER); ++ ws->operation = rte_cpu_to_be_32(MLX5_WAIT_COND_CYCLIC_SMALLER); + ws->lkey = RTE_BE32(0); + ws->va_high = RTE_BE32(0); + ws->va_low = RTE_BE32(0); +diff --git a/dpdk/drivers/net/mlx5/mlx5_txpp.c b/dpdk/drivers/net/mlx5/mlx5_txpp.c +index f853a67f58..0e1da1d5f5 100644 +--- a/dpdk/drivers/net/mlx5/mlx5_txpp.c ++++ b/dpdk/drivers/net/mlx5/mlx5_txpp.c +@@ -969,6 +969,8 @@ mlx5_txpp_read_clock(struct rte_eth_dev *dev, uint64_t *timestamp) + { + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_dev_ctx_shared *sh = priv->sh; ++ struct mlx5_proc_priv *ppriv; ++ uint64_t ts; + int ret; + + if (sh->txpp.refcnt) { +@@ -979,7 +981,6 @@ mlx5_txpp_read_clock(struct rte_eth_dev *dev, uint64_t *timestamp) + rte_int128_t u128; + struct mlx5_cqe_ts cts; + } to; +- uint64_t ts; + + mlx5_atomic_read_cqe((rte_int128_t *)&cqe->timestamp, &to.u128); + if (to.cts.op_own >> 4) { +@@ -994,6 +995,18 @@ mlx5_txpp_read_clock(struct rte_eth_dev *dev, uint64_t *timestamp) + *timestamp = ts; + return 0; + } ++ /* Check and try to map HCA PIC BAR to allow reading real time. */ ++ ppriv = dev->process_private; ++ if (ppriv && !ppriv->hca_bar && ++ sh->dev_cap.rt_timestamp && mlx5_dev_is_pci(dev->device)) ++ mlx5_txpp_map_hca_bar(dev); ++ /* Check if we can read timestamp directly from hardware. */ ++ if (ppriv && ppriv->hca_bar) { ++ ts = MLX5_GET64(initial_seg, ppriv->hca_bar, real_time); ++ ts = mlx5_txpp_convert_rx_ts(sh, ts); ++ *timestamp = ts; ++ return 0; ++ } + /* Not supported in isolated mode - kernel does not see the CQEs. */ + if (priv->isolated || rte_eal_process_type() != RTE_PROC_PRIMARY) + return -ENOTSUP; +@@ -1050,11 +1063,9 @@ int mlx5_txpp_xstats_get_names(struct rte_eth_dev *dev __rte_unused, + + if (n >= n_used + n_txpp && xstats_names) { + for (i = 0; i < n_txpp; ++i) { +- strncpy(xstats_names[i + n_used].name, ++ strlcpy(xstats_names[i + n_used].name, + mlx5_txpp_stat_names[i], + RTE_ETH_XSTATS_NAME_SIZE); +- xstats_names[i + n_used].name +- [RTE_ETH_XSTATS_NAME_SIZE - 1] = 0; + } + } + return n_used + n_txpp; +diff --git a/dpdk/drivers/net/mlx5/windows/mlx5_ethdev_os.c b/dpdk/drivers/net/mlx5/windows/mlx5_ethdev_os.c +index 88d8213f55..a31e1b5494 100644 +--- a/dpdk/drivers/net/mlx5/windows/mlx5_ethdev_os.c ++++ b/dpdk/drivers/net/mlx5/windows/mlx5_ethdev_os.c +@@ -416,3 +416,33 @@ int mlx5_get_flag_dropless_rq(struct rte_eth_dev *dev) + RTE_SET_USED(dev); + return -ENOTSUP; + } ++ ++/** ++ * Unmaps HCA PCI BAR from the current process address space. ++ * ++ * @param dev ++ * Pointer to Ethernet device structure. ++ */ ++void mlx5_txpp_unmap_hca_bar(struct rte_eth_dev *dev) ++{ ++ RTE_SET_USED(dev); ++} ++ ++/** ++ * Maps HCA PCI BAR to the current process address space. ++ * Stores pointer in the process private structure allowing ++ * to read internal and real time counter directly from the HW. ++ * ++ * @param dev ++ * Pointer to Ethernet device structure. ++ * ++ * @return ++ * 0 on success and not NULL pointer to mapped area in process structure. ++ * negative otherwise and NULL pointer ++ */ ++int mlx5_txpp_map_hca_bar(struct rte_eth_dev *dev) ++{ ++ RTE_SET_USED(dev); ++ rte_errno = ENOTSUP; ++ return -ENOTSUP; ++} +diff --git a/dpdk/drivers/net/mlx5/windows/mlx5_os.c b/dpdk/drivers/net/mlx5/windows/mlx5_os.c +index 77f04cc931..f401264b61 100644 +--- a/dpdk/drivers/net/mlx5/windows/mlx5_os.c ++++ b/dpdk/drivers/net/mlx5/windows/mlx5_os.c +@@ -193,8 +193,8 @@ mlx5_os_capabilities_prepare(struct mlx5_dev_ctx_shared *sh) + * Once DPDK supports it, take max size from device attr. + */ + sh->dev_cap.ind_table_max_size = +- RTE_MIN(1 << hca_attr->rss_ind_tbl_cap, +- (unsigned int)RTE_ETH_RSS_RETA_SIZE_512); ++ RTE_MIN((uint32_t)1 << hca_attr->rss_ind_tbl_cap, ++ (uint32_t)RTE_ETH_RSS_RETA_SIZE_512); + DRV_LOG(DEBUG, "Maximum Rx indirection table size is %u", + sh->dev_cap.ind_table_max_size); + } +diff --git a/dpdk/drivers/net/netvsc/hn_rndis.c b/dpdk/drivers/net/netvsc/hn_rndis.c +index e6f1f28768..29c6009b2c 100644 +--- a/dpdk/drivers/net/netvsc/hn_rndis.c ++++ b/dpdk/drivers/net/netvsc/hn_rndis.c +@@ -329,7 +329,8 @@ void hn_rndis_receive_response(struct hn_data *hv, + + hn_rndis_dump(data); + +- if (len < sizeof(3 * sizeof(uint32_t))) { ++ /* Check we can read first three data fields from RNDIS header */ ++ if (len < 3 * sizeof(uint32_t)) { + PMD_DRV_LOG(ERR, + "missing RNDIS header %u", len); + return; +diff --git a/dpdk/drivers/net/nfp/flower/nfp_flower.c b/dpdk/drivers/net/nfp/flower/nfp_flower.c +index e447258d97..0661c38f08 100644 +--- a/dpdk/drivers/net/nfp/flower/nfp_flower.c ++++ b/dpdk/drivers/net/nfp/flower/nfp_flower.c +@@ -25,7 +25,6 @@ + #include "nfp_flower_cmsg.h" + + #define CTRL_VNIC_NB_DESC 512 +-#define DEFAULT_FLBUF_SIZE 9216 + + static void + nfp_pf_repr_enable_queues(struct rte_eth_dev *dev) +@@ -451,7 +450,7 @@ nfp_flower_pf_recv_pkts(void *rx_queue, + rxds->vals[1] = 0; + dma_addr = rte_cpu_to_le_64(RTE_MBUF_DMA_ADDR_DEFAULT(new_mb)); + rxds->fld.dd = 0; +- rxds->fld.dma_addr_hi = (dma_addr >> 32) & 0xff; ++ rxds->fld.dma_addr_hi = (dma_addr >> 32) & 0xffff; + rxds->fld.dma_addr_lo = dma_addr & 0xffffffff; + nb_hold++; + +@@ -631,13 +630,6 @@ nfp_flower_init_vnic_common(struct nfp_net_hw *hw, const char *vnic_type) + pf_dev = hw->pf_dev; + pci_dev = hw->pf_dev->pci_dev; + +- /* NFP can not handle DMA addresses requiring more than 40 bits */ +- if (rte_mem_check_dma_mask(40)) { +- PMD_INIT_LOG(ERR, "Device %s can not be used: restricted dma mask to 40 bits!\n", +- pci_dev->device.name); +- return -ENODEV; +- }; +- + hw->device_id = pci_dev->id.device_id; + hw->vendor_id = pci_dev->id.vendor_id; + hw->subsystem_device_id = pci_dev->id.subsystem_device_id; +@@ -666,6 +658,9 @@ nfp_flower_init_vnic_common(struct nfp_net_hw *hw, const char *vnic_type) + hw->mtu = hw->max_mtu; + hw->flbufsz = DEFAULT_FLBUF_SIZE; + ++ if (nfp_net_check_dma_mask(hw, pci_dev->name) != 0) ++ return -ENODEV; ++ + /* read the Rx offset configured from firmware */ + if (NFD_CFG_MAJOR_VERSION_of(hw->ver) < 2) + hw->rx_offset = NFP_NET_RX_OFFSET; +@@ -703,6 +698,7 @@ nfp_flower_init_ctrl_vnic(struct nfp_net_hw *hw) + struct rte_eth_dev *eth_dev; + const struct rte_memzone *tz; + struct nfp_app_fw_flower *app_fw_flower; ++ char ctrl_pktmbuf_pool_name[RTE_MEMZONE_NAMESIZE]; + + /* Set up some pointers here for ease of use */ + pf_dev = hw->pf_dev; +@@ -736,7 +732,10 @@ nfp_flower_init_ctrl_vnic(struct nfp_net_hw *hw) + + /* Create a mbuf pool for the ctrl vNIC */ + numa_node = rte_socket_id(); +- app_fw_flower->ctrl_pktmbuf_pool = rte_pktmbuf_pool_create("ctrl_mbuf_pool", ++ snprintf(ctrl_pktmbuf_pool_name, sizeof(ctrl_pktmbuf_pool_name), ++ "%s_ctrlmp", pf_dev->pci_dev->device.name); ++ app_fw_flower->ctrl_pktmbuf_pool = ++ rte_pktmbuf_pool_create(ctrl_pktmbuf_pool_name, + 4 * CTRL_VNIC_NB_DESC, 64, 0, 9216, numa_node); + if (app_fw_flower->ctrl_pktmbuf_pool == NULL) { + PMD_INIT_LOG(ERR, "Create mbuf pool for ctrl vnic failed"); +diff --git a/dpdk/drivers/net/nfp/flower/nfp_flower_ctrl.c b/dpdk/drivers/net/nfp/flower/nfp_flower_ctrl.c +index 3631e764fe..1c6340f3d7 100644 +--- a/dpdk/drivers/net/nfp/flower/nfp_flower_ctrl.c ++++ b/dpdk/drivers/net/nfp/flower/nfp_flower_ctrl.c +@@ -123,7 +123,7 @@ nfp_flower_ctrl_vnic_recv(void *rx_queue, + rxds->vals[1] = 0; + dma_addr = rte_cpu_to_le_64(RTE_MBUF_DMA_ADDR_DEFAULT(new_mb)); + rxds->fld.dd = 0; +- rxds->fld.dma_addr_hi = (dma_addr >> 32) & 0xff; ++ rxds->fld.dma_addr_hi = (dma_addr >> 32) & 0xffff; + rxds->fld.dma_addr_lo = dma_addr & 0xffffffff; + nb_hold++; + +diff --git a/dpdk/drivers/net/nfp/flower/nfp_flower_representor.c b/dpdk/drivers/net/nfp/flower/nfp_flower_representor.c +index 5809c838b3..d319aefb08 100644 +--- a/dpdk/drivers/net/nfp/flower/nfp_flower_representor.c ++++ b/dpdk/drivers/net/nfp/flower/nfp_flower_representor.c +@@ -528,7 +528,7 @@ static const struct eth_dev_ops nfp_flower_pf_repr_dev_ops = { + .stats_reset = nfp_flower_repr_stats_reset, + + .promiscuous_enable = nfp_net_promisc_enable, +- .promiscuous_disable = nfp_net_promisc_enable, ++ .promiscuous_disable = nfp_net_promisc_disable, + + .mac_addr_set = nfp_flower_repr_mac_addr_set, + }; +@@ -549,7 +549,7 @@ static const struct eth_dev_ops nfp_flower_repr_dev_ops = { + .stats_reset = nfp_flower_repr_stats_reset, + + .promiscuous_enable = nfp_net_promisc_enable, +- .promiscuous_disable = nfp_net_promisc_enable, ++ .promiscuous_disable = nfp_net_promisc_disable, + + .mac_addr_set = nfp_flower_repr_mac_addr_set, + +@@ -730,7 +730,9 @@ nfp_flower_repr_alloc(struct nfp_app_fw_flower *app_fw_flower) + { + int i; + int ret; ++ const char *pci_name; + struct rte_eth_dev *eth_dev; ++ struct rte_pci_device *pci_dev; + struct nfp_eth_table *nfp_eth_table; + struct nfp_eth_table_port *eth_port; + struct nfp_flower_representor flower_repr = { +@@ -753,7 +755,13 @@ nfp_flower_repr_alloc(struct nfp_app_fw_flower *app_fw_flower) + + /* PF vNIC reprs get a random MAC address */ + rte_eth_random_addr(flower_repr.mac_addr.addr_bytes); +- sprintf(flower_repr.name, "flower_repr_pf"); ++ ++ pci_dev = app_fw_flower->pf_hw->pf_dev->pci_dev; ++ ++ pci_name = strchr(pci_dev->name, ':') + 1; ++ ++ snprintf(flower_repr.name, sizeof(flower_repr.name), ++ "%s_repr_pf", pci_name); + + /* Create a eth_dev for this representor */ + ret = rte_eth_dev_create(eth_dev->device, flower_repr.name, +@@ -775,7 +783,8 @@ nfp_flower_repr_alloc(struct nfp_app_fw_flower *app_fw_flower) + /* Copy the real mac of the interface to the representor struct */ + rte_ether_addr_copy((struct rte_ether_addr *)eth_port->mac_addr, + &flower_repr.mac_addr); +- sprintf(flower_repr.name, "flower_repr_p%d", i); ++ snprintf(flower_repr.name, sizeof(flower_repr.name), ++ "%s_repr_p%d", pci_name, i); + + /* + * Create a eth_dev for this representor +@@ -806,7 +815,8 @@ nfp_flower_repr_alloc(struct nfp_app_fw_flower *app_fw_flower) + + /* VF reprs get a random MAC address */ + rte_eth_random_addr(flower_repr.mac_addr.addr_bytes); +- sprintf(flower_repr.name, "flower_repr_vf%d", i); ++ snprintf(flower_repr.name, sizeof(flower_repr.name), ++ "%s_repr_vf%d", pci_name, i); + + /* This will also allocate private memory for the device*/ + ret = rte_eth_dev_create(eth_dev->device, flower_repr.name, +diff --git a/dpdk/drivers/net/nfp/nfp_common.c b/dpdk/drivers/net/nfp/nfp_common.c +index 71711bfa22..b673370f20 100644 +--- a/dpdk/drivers/net/nfp/nfp_common.c ++++ b/dpdk/drivers/net/nfp/nfp_common.c +@@ -977,9 +977,9 @@ nfp_net_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) + return -EBUSY; + } + +- /* MTU larger then current mbufsize not supported */ ++ /* MTU larger than current mbufsize not supported */ + if (mtu > hw->flbufsz) { +- PMD_DRV_LOG(ERR, "MTU (%u) larger then current mbufsize (%u) not supported", ++ PMD_DRV_LOG(ERR, "MTU (%u) larger than current mbufsize (%u) not supported", + mtu, hw->flbufsz); + return -ERANGE; + } +@@ -1256,7 +1256,7 @@ nfp_net_rss_hash_conf_get(struct rte_eth_dev *dev, + cfg_rss_ctrl = nn_cfg_readl(hw, NFP_NET_CFG_RSS_CTRL); + + if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV4) +- rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP | RTE_ETH_RSS_NONFRAG_IPV4_UDP; ++ rss_hf |= RTE_ETH_RSS_IPV4; + + if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV4_TCP) + rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP; +@@ -1271,7 +1271,7 @@ nfp_net_rss_hash_conf_get(struct rte_eth_dev *dev, + rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_UDP; + + if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV6) +- rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_UDP | RTE_ETH_RSS_NONFRAG_IPV6_UDP; ++ rss_hf |= RTE_ETH_RSS_IPV6; + + /* Propagate current RSS hash functions to caller */ + rss_conf->rss_hf = rss_hf; +@@ -1413,6 +1413,24 @@ nfp_net_set_vxlan_port(struct nfp_net_hw *hw, + RTE_LOG_REGISTER_SUFFIX(nfp_logtype_init, init, NOTICE); + RTE_LOG_REGISTER_SUFFIX(nfp_logtype_driver, driver, NOTICE); + RTE_LOG_REGISTER_SUFFIX(nfp_logtype_cpp, cpp, NOTICE); ++/* ++ * The firmware with NFD3 can not handle DMA address requiring more ++ * than 40 bits ++ */ ++int ++nfp_net_check_dma_mask(struct nfp_net_hw *hw, char *name) ++{ ++ if (NFD_CFG_CLASS_VER_of(hw->ver) == NFP_NET_CFG_VERSION_DP_NFD3 && ++ rte_mem_check_dma_mask(40) != 0) { ++ PMD_DRV_LOG(ERR, ++ "The device %s can't be used: restricted dma mask to 40 bits!", ++ name); ++ return -ENODEV; ++ } ++ ++ return 0; ++} ++ + /* + * Local variables: + * c-file-style: "Linux" +diff --git a/dpdk/drivers/net/nfp/nfp_common.h b/dpdk/drivers/net/nfp/nfp_common.h +index 36c19b47e4..67c8dc33d8 100644 +--- a/dpdk/drivers/net/nfp/nfp_common.h ++++ b/dpdk/drivers/net/nfp/nfp_common.h +@@ -111,6 +111,7 @@ struct nfp_net_adapter; + + /* Maximum supported NFP frame size (MTU + layer 2 headers) */ + #define NFP_FRAME_SIZE_MAX 10048 ++#define DEFAULT_FLBUF_SIZE 9216 + + #include + #include +@@ -447,6 +448,7 @@ void nfp_net_close_rx_queue(struct rte_eth_dev *dev); + void nfp_net_stop_tx_queue(struct rte_eth_dev *dev); + void nfp_net_close_tx_queue(struct rte_eth_dev *dev); + int nfp_net_set_vxlan_port(struct nfp_net_hw *hw, size_t idx, uint16_t port); ++int nfp_net_check_dma_mask(struct nfp_net_hw *hw, char *name); + + #define NFP_NET_DEV_PRIVATE_TO_HW(adapter)\ + (&((struct nfp_net_adapter *)adapter)->hw) +diff --git a/dpdk/drivers/net/nfp/nfp_ethdev.c b/dpdk/drivers/net/nfp/nfp_ethdev.c +index 0956ea81df..29491f6e6d 100644 +--- a/dpdk/drivers/net/nfp/nfp_ethdev.c ++++ b/dpdk/drivers/net/nfp/nfp_ethdev.c +@@ -517,14 +517,6 @@ nfp_net_init(struct rte_eth_dev *eth_dev) + /* Use backpointer to the CoreNIC app struct */ + app_fw_nic = NFP_PRIV_TO_APP_FW_NIC(pf_dev->app_fw_priv); + +- /* NFP can not handle DMA addresses requiring more than 40 bits */ +- if (rte_mem_check_dma_mask(40)) { +- RTE_LOG(ERR, PMD, +- "device %s can not be used: restricted dma mask to 40 bits!\n", +- pci_dev->device.name); +- return -ENODEV; +- } +- + port = ((struct nfp_net_hw *)eth_dev->data->dev_private)->idx; + if (port < 0 || port > 7) { + PMD_DRV_LOG(ERR, "Port value is wrong"); +@@ -572,6 +564,9 @@ nfp_net_init(struct rte_eth_dev *eth_dev) + + hw->ver = nn_cfg_readl(hw, NFP_NET_CFG_VERSION); + ++ if (nfp_net_check_dma_mask(hw, pci_dev->name) != 0) ++ return -ENODEV; ++ + if (nfp_net_ethdev_ops_mount(hw, eth_dev)) + return -EINVAL; + +@@ -609,6 +604,7 @@ nfp_net_init(struct rte_eth_dev *eth_dev) + hw->cap = nn_cfg_readl(hw, NFP_NET_CFG_CAP); + hw->max_mtu = nn_cfg_readl(hw, NFP_NET_CFG_MAX_MTU); + hw->mtu = RTE_ETHER_MTU; ++ hw->flbufsz = DEFAULT_FLBUF_SIZE; + + /* VLAN insertion is incompatible with LSOv2 */ + if (hw->cap & NFP_NET_CFG_CTRL_LSO2) +@@ -724,7 +720,7 @@ nfp_fw_upload(struct rte_pci_device *dev, struct nfp_nsp *nsp, char *card) + goto load_fw; + /* Then try the PCI name */ + snprintf(fw_name, sizeof(fw_name), "%s/pci-%s.nffw", DEFAULT_FW_PATH, +- dev->device.name); ++ dev->name); + + PMD_DRV_LOG(DEBUG, "Trying with fw file: %s", fw_name); + if (rte_firmware_read(fw_name, &fw_buf, &fsize) == 0) +@@ -933,6 +929,7 @@ nfp_pf_init(struct rte_pci_device *pci_dev) + int ret; + int err = 0; + uint64_t addr; ++ uint32_t cpp_id; + struct nfp_cpp *cpp; + enum nfp_app_fw_id app_fw_id; + struct nfp_pf_dev *pf_dev; +@@ -1032,7 +1029,8 @@ nfp_pf_init(struct rte_pci_device *pci_dev) + goto pf_cleanup; + } + +- pf_dev->hw_queues = nfp_cpp_map_area(pf_dev->cpp, 0, 0, ++ cpp_id = NFP_CPP_ISLAND_ID(0, NFP_CPP_ACTION_RW, 0, 0); ++ pf_dev->hw_queues = nfp_cpp_map_area(pf_dev->cpp, cpp_id, + addr, NFP_QCP_QUEUE_AREA_SZ, + &pf_dev->hwqueues_area); + if (pf_dev->hw_queues == NULL) { +diff --git a/dpdk/drivers/net/nfp/nfp_ethdev_vf.c b/dpdk/drivers/net/nfp/nfp_ethdev_vf.c +index d1427b63bc..1877d6b76b 100644 +--- a/dpdk/drivers/net/nfp/nfp_ethdev_vf.c ++++ b/dpdk/drivers/net/nfp/nfp_ethdev_vf.c +@@ -291,14 +291,6 @@ nfp_netvf_init(struct rte_eth_dev *eth_dev) + + pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); + +- /* NFP can not handle DMA addresses requiring more than 40 bits */ +- if (rte_mem_check_dma_mask(40)) { +- RTE_LOG(ERR, PMD, +- "device %s can not be used: restricted dma mask to 40 bits!\n", +- pci_dev->device.name); +- return -ENODEV; +- } +- + hw = NFP_NET_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); + + hw->ctrl_bar = (uint8_t *)pci_dev->mem_resource[0].addr; +@@ -312,6 +304,9 @@ nfp_netvf_init(struct rte_eth_dev *eth_dev) + + hw->ver = nn_cfg_readl(hw, NFP_NET_CFG_VERSION); + ++ if (nfp_net_check_dma_mask(hw, pci_dev->name) != 0) ++ return -ENODEV; ++ + if (nfp_netvf_ethdev_ops_mount(hw, eth_dev)) + return -EINVAL; + +@@ -366,6 +361,7 @@ nfp_netvf_init(struct rte_eth_dev *eth_dev) + hw->cap = nn_cfg_readl(hw, NFP_NET_CFG_CAP); + hw->max_mtu = nn_cfg_readl(hw, NFP_NET_CFG_MAX_MTU); + hw->mtu = RTE_ETHER_MTU; ++ hw->flbufsz = DEFAULT_FLBUF_SIZE; + + /* VLAN insertion is incompatible with LSOv2 */ + if (hw->cap & NFP_NET_CFG_CTRL_LSO2) +diff --git a/dpdk/drivers/net/nfp/nfp_flow.c b/dpdk/drivers/net/nfp/nfp_flow.c +index 6f79d950db..faa0eda325 100644 +--- a/dpdk/drivers/net/nfp/nfp_flow.c ++++ b/dpdk/drivers/net/nfp/nfp_flow.c +@@ -285,7 +285,7 @@ nfp_check_mask_remove(struct nfp_flow_priv *priv, + + rte_free(mask_entry); + if (meta_flags) +- *meta_flags &= ~NFP_FL_META_FLAG_MANAGE_MASK; ++ *meta_flags |= NFP_FL_META_FLAG_MANAGE_MASK; + } + + return true; +@@ -339,7 +339,7 @@ nfp_flow_table_search(struct nfp_flow_priv *priv, + } + + static struct rte_flow * +-nfp_flow_alloc(struct nfp_fl_key_ls *key_layer) ++nfp_flow_alloc(struct nfp_fl_key_ls *key_layer, uint32_t port_id) + { + char *tmp; + size_t len; +@@ -357,6 +357,7 @@ nfp_flow_alloc(struct nfp_fl_key_ls *key_layer) + + nfp_flow->length = len; + ++ nfp_flow->port_id = port_id; + payload = &nfp_flow->payload; + payload->meta = (struct nfp_fl_rule_metadata *)tmp; + payload->unmasked_data = tmp + sizeof(struct nfp_fl_rule_metadata); +@@ -727,7 +728,7 @@ nfp_flow_key_layers_calculate_items(const struct rte_flow_item items[], + ethdev = &rte_eth_devices[port_id->id]; + representor = (struct nfp_flower_representor *) + ethdev->data->dev_private; +- key_ls->port = rte_cpu_to_be_32(representor->port_id); ++ key_ls->port = representor->port_id; + break; + case RTE_FLOW_ITEM_TYPE_VLAN: + PMD_DRV_LOG(DEBUG, "RTE_FLOW_ITEM_TYPE_VLAN detected"); +@@ -1220,6 +1221,7 @@ nfp_flow_merge_ipv6(__rte_unused struct nfp_app_fw_flower *app_fw_flower, + bool is_mask, + bool is_outer_layer) + { ++ uint32_t vtc_flow; + struct nfp_flower_ipv6 *ipv6; + const struct rte_ipv6_hdr *hdr; + struct nfp_flower_meta_tci *meta_tci; +@@ -1243,12 +1245,12 @@ nfp_flow_merge_ipv6(__rte_unused struct nfp_app_fw_flower *app_fw_flower, + + hdr = is_mask ? &mask->hdr : &spec->hdr; + ++ vtc_flow = rte_be_to_cpu_32(hdr->vtc_flow); + if (ext_meta && (rte_be_to_cpu_32(ext_meta->nfp_flow_key_layer2) & + NFP_FLOWER_LAYER2_GRE)) { + ipv6_gre_tun = (struct nfp_flower_ipv6_gre_tun *)*mbuf_off; + +- ipv6_gre_tun->ip_ext.tos = (hdr->vtc_flow & +- RTE_IPV6_HDR_TC_MASK) >> RTE_IPV6_HDR_TC_SHIFT; ++ ipv6_gre_tun->ip_ext.tos = vtc_flow >> RTE_IPV6_HDR_TC_SHIFT; + ipv6_gre_tun->ip_ext.ttl = hdr->hop_limits; + memcpy(ipv6_gre_tun->ipv6.ipv6_src, hdr->src_addr, + sizeof(ipv6_gre_tun->ipv6.ipv6_src)); +@@ -1257,8 +1259,7 @@ nfp_flow_merge_ipv6(__rte_unused struct nfp_app_fw_flower *app_fw_flower, + } else { + ipv6_udp_tun = (struct nfp_flower_ipv6_udp_tun *)*mbuf_off; + +- ipv6_udp_tun->ip_ext.tos = (hdr->vtc_flow & +- RTE_IPV6_HDR_TC_MASK) >> RTE_IPV6_HDR_TC_SHIFT; ++ ipv6_udp_tun->ip_ext.tos = vtc_flow >> RTE_IPV6_HDR_TC_SHIFT; + ipv6_udp_tun->ip_ext.ttl = hdr->hop_limits; + memcpy(ipv6_udp_tun->ipv6.ipv6_src, hdr->src_addr, + sizeof(ipv6_udp_tun->ipv6.ipv6_src)); +@@ -1279,10 +1280,10 @@ nfp_flow_merge_ipv6(__rte_unused struct nfp_app_fw_flower *app_fw_flower, + *mbuf_off += sizeof(struct nfp_flower_tp_ports); + + hdr = is_mask ? &mask->hdr : &spec->hdr; ++ vtc_flow = rte_be_to_cpu_32(hdr->vtc_flow); + ipv6 = (struct nfp_flower_ipv6 *)*mbuf_off; + +- ipv6->ip_ext.tos = (hdr->vtc_flow & RTE_IPV6_HDR_TC_MASK) >> +- RTE_IPV6_HDR_TC_SHIFT; ++ ipv6->ip_ext.tos = vtc_flow >> RTE_IPV6_HDR_TC_SHIFT; + ipv6->ip_ext.proto = hdr->proto; + ipv6->ip_ext.ttl = hdr->hop_limits; + memcpy(ipv6->ipv6_src, hdr->src_addr, sizeof(ipv6->ipv6_src)); +@@ -1897,6 +1898,19 @@ nfp_flow_inner_item_get(const struct rte_flow_item items[], + return false; + } + ++static bool ++nfp_flow_tcp_flag_check(const struct rte_flow_item items[]) ++{ ++ const struct rte_flow_item *item; ++ ++ for (item = items; item->type != RTE_FLOW_ITEM_TYPE_END; ++item) { ++ if (item->type == RTE_FLOW_ITEM_TYPE_TCP) ++ return true; ++ } ++ ++ return false; ++} ++ + static int + nfp_flow_compile_item_proc(struct nfp_flower_representor *repr, + const struct rte_flow_item items[], +@@ -1992,6 +2006,9 @@ nfp_flow_compile_items(struct nfp_flower_representor *representor, + mbuf_off_mask += sizeof(struct nfp_flower_ext_meta); + } + ++ if (nfp_flow_tcp_flag_check(items)) ++ nfp_flow->tcp_flag = true; ++ + /* Check if this is a tunnel flow and get the inner item*/ + is_tun_flow = nfp_flow_inner_item_get(items, &loop_item); + if (is_tun_flow) +@@ -2021,7 +2038,8 @@ nfp_flow_compile_items(struct nfp_flower_representor *representor, + static int + nfp_flow_action_output(char *act_data, + const struct rte_flow_action *action, +- struct nfp_fl_rule_metadata *nfp_flow_meta) ++ struct nfp_fl_rule_metadata *nfp_flow_meta, ++ uint32_t output_cnt) + { + size_t act_size; + struct rte_eth_dev *ethdev; +@@ -2040,8 +2058,9 @@ nfp_flow_action_output(char *act_data, + output = (struct nfp_fl_act_output *)act_data; + output->head.jump_id = NFP_FL_ACTION_OPCODE_OUTPUT; + output->head.len_lw = act_size >> NFP_FL_LW_SIZ; +- output->flags = rte_cpu_to_be_16(NFP_FL_OUT_FLAGS_LAST); + output->port = rte_cpu_to_be_32(representor->port_id); ++ if (output_cnt == 0) ++ output->flags = rte_cpu_to_be_16(NFP_FL_OUT_FLAGS_LAST); + + nfp_flow_meta->shortcut = rte_cpu_to_be_32(representor->port_id); + +@@ -2054,6 +2073,7 @@ nfp_flow_action_set_mac(char *act_data, + bool mac_src_flag, + bool mac_set_flag) + { ++ uint8_t i; + size_t act_size; + struct nfp_fl_act_set_eth *set_eth; + const struct rte_flow_action_set_mac *set_mac; +@@ -2072,9 +2092,13 @@ nfp_flow_action_set_mac(char *act_data, + if (mac_src_flag) { + rte_memcpy(&set_eth->eth_addr[RTE_ETHER_ADDR_LEN], + set_mac->mac_addr, RTE_ETHER_ADDR_LEN); ++ for (i = 0; i < RTE_ETHER_ADDR_LEN; i++) ++ set_eth->eth_addr_mask[RTE_ETHER_ADDR_LEN + i] = 0xff; + } else { + rte_memcpy(&set_eth->eth_addr[0], + set_mac->mac_addr, RTE_ETHER_ADDR_LEN); ++ for (i = 0; i < RTE_ETHER_ADDR_LEN; i++) ++ set_eth->eth_addr_mask[i] = 0xff; + } + } + +@@ -2115,10 +2139,13 @@ nfp_flow_action_set_ip(char *act_data, + set_ip->reserved = 0; + + set_ipv4 = (const struct rte_flow_action_set_ipv4 *)action->conf; +- if (ip_src_flag) ++ if (ip_src_flag) { + set_ip->ipv4_src = set_ipv4->ipv4_addr; +- else ++ set_ip->ipv4_src_mask = RTE_BE32(0xffffffff); ++ } else { + set_ip->ipv4_dst = set_ipv4->ipv4_addr; ++ set_ip->ipv4_dst_mask = RTE_BE32(0xffffffff); ++ } + } + + static void +@@ -2127,6 +2154,7 @@ nfp_flow_action_set_ipv6(char *act_data, + bool ip_src_flag) + { + int i; ++ rte_be32_t tmp; + size_t act_size; + struct nfp_fl_act_set_ipv6_addr *set_ip; + const struct rte_flow_action_set_ipv6 *set_ipv6; +@@ -2143,15 +2171,19 @@ nfp_flow_action_set_ipv6(char *act_data, + set_ip->head.len_lw = act_size >> NFP_FL_LW_SIZ; + set_ip->reserved = 0; + +- for (i = 0; i < 4; i++) +- set_ip->ipv6[i].exact = set_ipv6->ipv6_addr[i]; ++ for (i = 0; i < 4; i++) { ++ rte_memcpy(&tmp, &set_ipv6->ipv6_addr[i * 4], 4); ++ set_ip->ipv6[i].exact = tmp; ++ set_ip->ipv6[i].mask = RTE_BE32(0xffffffff); ++ } + } + + static void + nfp_flow_action_set_tp(char *act_data, + const struct rte_flow_action *action, + bool tp_src_flag, +- bool tp_set_flag) ++ bool tp_set_flag, ++ bool tcp_flag) + { + size_t act_size; + struct nfp_fl_act_set_tport *set_tp; +@@ -2163,21 +2195,29 @@ nfp_flow_action_set_tp(char *act_data, + set_tp = (struct nfp_fl_act_set_tport *)act_data; + + act_size = sizeof(struct nfp_fl_act_set_tport); +- set_tp->head.jump_id = NFP_FL_ACTION_OPCODE_SET_TCP; ++ if (tcp_flag) ++ set_tp->head.jump_id = NFP_FL_ACTION_OPCODE_SET_TCP; ++ else ++ set_tp->head.jump_id = NFP_FL_ACTION_OPCODE_SET_UDP; + set_tp->head.len_lw = act_size >> NFP_FL_LW_SIZ; + set_tp->reserved = 0; + + set_tp_conf = (const struct rte_flow_action_set_tp *)action->conf; +- if (tp_src_flag) ++ if (tp_src_flag) { + set_tp->src_port = set_tp_conf->port; +- else ++ set_tp->src_port_mask = RTE_BE16(0xffff); ++ } else { + set_tp->dst_port = set_tp_conf->port; ++ set_tp->dst_port_mask = RTE_BE16(0xffff); ++ } + } + + static int + nfp_flow_action_push_vlan(char *act_data, + const struct rte_flow_action *action) + { ++ uint8_t pcp; ++ uint16_t vid; + size_t act_size; + struct nfp_fl_act_push_vlan *push_vlan; + const struct rte_flow_action_of_push_vlan *push_vlan_conf; +@@ -2200,9 +2240,11 @@ nfp_flow_action_push_vlan(char *act_data, + (action + 1)->conf; + vlan_vid_conf = (const struct rte_flow_action_of_set_vlan_vid *) + (action + 2)->conf; ++ ++ vid = rte_be_to_cpu_16(vlan_vid_conf->vlan_vid) & 0x0fff; ++ pcp = vlan_pcp_conf->vlan_pcp & 0x07; + push_vlan->vlan_tpid = push_vlan_conf->ethertype; +- push_vlan->vlan_tci = ((vlan_pcp_conf->vlan_pcp & 0x07) << 13) | +- (vlan_vid_conf->vlan_vid & 0x0fff); ++ push_vlan->vlan_tci = rte_cpu_to_be_16(vid | (pcp << 13)); + + return 0; + } +@@ -2227,6 +2269,7 @@ nfp_flow_action_set_ttl(char *act_data, + + ttl_conf = (const struct rte_flow_action_set_ttl *)action->conf; + ttl_tos->ipv4_ttl = ttl_conf->ttl_value; ++ ttl_tos->ipv4_ttl_mask = 0xff; + ttl_tos->reserved = 0; + } + +@@ -2250,6 +2293,7 @@ nfp_flow_action_set_hl(char *act_data, + + ttl_conf = (const struct rte_flow_action_set_ttl *)action->conf; + tc_hl->ipv6_hop_limit = ttl_conf->ttl_value; ++ tc_hl->ipv6_hop_limit_mask = 0xff; + tc_hl->reserved = 0; + } + +@@ -2273,6 +2317,7 @@ nfp_flow_action_set_tos(char *act_data, + + tos_conf = (const struct rte_flow_action_set_dscp *)action->conf; + ttl_tos->ipv4_tos = tos_conf->dscp; ++ ttl_tos->ipv4_tos_mask = 0xff; + ttl_tos->reserved = 0; + } + +@@ -2296,6 +2341,7 @@ nfp_flow_action_set_tc(char *act_data, + + tos_conf = (const struct rte_flow_action_set_dscp *)action->conf; + tc_hl->ipv6_tc = tos_conf->dscp; ++ tc_hl->ipv6_tc_mask = 0xff; + tc_hl->reserved = 0; + } + +@@ -2443,10 +2489,10 @@ nfp_flower_add_tun_neigh_v4_decap(struct nfp_app_fw_flower *app_fw_flower, + if (nfp_flower_support_decap_v2(app_fw_flower)) { + if (meta_tci->tci != 0) { + payload.ext.vlan_tci = meta_tci->tci; +- payload.ext.vlan_tpid = 0x88a8; ++ payload.ext.vlan_tpid = RTE_BE16(0x88a8); + } else { +- payload.ext.vlan_tci = 0xffff; +- payload.ext.vlan_tpid = 0xffff; ++ payload.ext.vlan_tci = RTE_BE16(0xffff); ++ payload.ext.vlan_tpid = RTE_BE16(0xffff); + } + payload.ext.host_ctx = nfp_flow_meta->host_ctx_id; + } +@@ -2570,10 +2616,10 @@ nfp_flower_add_tun_neigh_v6_decap(struct nfp_app_fw_flower *app_fw_flower, + if (nfp_flower_support_decap_v2(app_fw_flower)) { + if (meta_tci->tci != 0) { + payload.ext.vlan_tci = meta_tci->tci; +- payload.ext.vlan_tpid = 0x88a8; ++ payload.ext.vlan_tpid = RTE_BE16(0x88a8); + } else { +- payload.ext.vlan_tci = 0xffff; +- payload.ext.vlan_tpid = 0xffff; ++ payload.ext.vlan_tci = RTE_BE16(0xffff); ++ payload.ext.vlan_tpid = RTE_BE16(0xffff); + } + payload.ext.host_ctx = nfp_flow_meta->host_ctx_id; + } +@@ -2675,6 +2721,7 @@ nfp_flow_action_vxlan_encap_v4(struct nfp_app_fw_flower *app_fw_flower, + struct nfp_fl_rule_metadata *nfp_flow_meta, + struct nfp_fl_tun *tun) + { ++ uint64_t tun_id; + struct nfp_fl_act_pre_tun *pre_tun; + struct nfp_fl_act_set_tun *set_tun; + const struct rte_flow_item_eth *eth; +@@ -2693,7 +2740,8 @@ nfp_flow_action_vxlan_encap_v4(struct nfp_app_fw_flower *app_fw_flower, + + set_tun = (struct nfp_fl_act_set_tun *)(act_data + act_pre_size); + memset(set_tun, 0, act_set_size); +- nfp_flow_set_tun_process(set_tun, NFP_FL_TUN_VXLAN, vxlan->hdr.vx_vni, ++ tun_id = rte_be_to_cpu_32(vxlan->hdr.vx_vni) >> 8; ++ nfp_flow_set_tun_process(set_tun, NFP_FL_TUN_VXLAN, tun_id, + ipv4->hdr.time_to_live, ipv4->hdr.type_of_service); + set_tun->tun_flags = vxlan->hdr.vx_flags; + +@@ -2710,6 +2758,8 @@ nfp_flow_action_vxlan_encap_v6(struct nfp_app_fw_flower *app_fw_flower, + struct nfp_fl_rule_metadata *nfp_flow_meta, + struct nfp_fl_tun *tun) + { ++ uint8_t tos; ++ uint64_t tun_id; + struct nfp_fl_act_pre_tun *pre_tun; + struct nfp_fl_act_set_tun *set_tun; + const struct rte_flow_item_eth *eth; +@@ -2728,9 +2778,10 @@ nfp_flow_action_vxlan_encap_v6(struct nfp_app_fw_flower *app_fw_flower, + + set_tun = (struct nfp_fl_act_set_tun *)(act_data + act_pre_size); + memset(set_tun, 0, act_set_size); +- nfp_flow_set_tun_process(set_tun, NFP_FL_TUN_VXLAN, vxlan->hdr.vx_vni, +- ipv6->hdr.hop_limits, +- (ipv6->hdr.vtc_flow >> RTE_IPV6_HDR_TC_SHIFT) & 0xff); ++ tun_id = rte_be_to_cpu_32(vxlan->hdr.vx_vni) >> 8; ++ tos = rte_be_to_cpu_32(ipv6->hdr.vtc_flow) >> RTE_IPV6_HDR_TC_SHIFT; ++ nfp_flow_set_tun_process(set_tun, NFP_FL_TUN_VXLAN, tun_id, ++ ipv6->hdr.hop_limits, tos); + set_tun->tun_flags = vxlan->hdr.vx_flags; + + /* Send the tunnel neighbor cmsg to fw */ +@@ -3094,7 +3145,7 @@ nfp_flow_action_geneve_encap_v6(struct nfp_app_fw_flower *app_fw_flower, + + set_tun = (struct nfp_fl_act_set_tun *)(act_data + act_pre_size); + memset(set_tun, 0, act_set_size); +- tos = (ipv6->hdr.vtc_flow >> RTE_IPV6_HDR_TC_SHIFT) & 0xff; ++ tos = rte_be_to_cpu_32(ipv6->hdr.vtc_flow) >> RTE_IPV6_HDR_TC_SHIFT; + tun_id = (geneve->vni[0] << 16) | (geneve->vni[1] << 8) | geneve->vni[2]; + nfp_flow_set_tun_process(set_tun, NFP_FL_TUN_GENEVE, tun_id, + ipv6->hdr.hop_limits, tos); +@@ -3113,6 +3164,7 @@ nfp_flow_action_nvgre_encap_v4(struct nfp_app_fw_flower *app_fw_flower, + struct nfp_fl_rule_metadata *nfp_flow_meta, + struct nfp_fl_tun *tun) + { ++ uint64_t tun_id; + const struct rte_ether_hdr *eth; + const struct rte_flow_item_ipv4 *ipv4; + const struct rte_flow_item_gre *gre; +@@ -3124,6 +3176,7 @@ nfp_flow_action_nvgre_encap_v4(struct nfp_app_fw_flower *app_fw_flower, + eth = (const struct rte_ether_hdr *)raw_encap->data; + ipv4 = (const struct rte_flow_item_ipv4 *)(eth + 1); + gre = (const struct rte_flow_item_gre *)(ipv4 + 1); ++ tun_id = rte_be_to_cpu_32(*(const rte_be32_t *)(gre + 1)); + + pre_tun = (struct nfp_fl_act_pre_tun *)actions; + memset(pre_tun, 0, act_pre_size); +@@ -3131,7 +3184,7 @@ nfp_flow_action_nvgre_encap_v4(struct nfp_app_fw_flower *app_fw_flower, + + set_tun = (struct nfp_fl_act_set_tun *)(act_data + act_pre_size); + memset(set_tun, 0, act_set_size); +- nfp_flow_set_tun_process(set_tun, NFP_FL_TUN_GRE, 0, ++ nfp_flow_set_tun_process(set_tun, NFP_FL_TUN_GRE, tun_id, + ipv4->hdr.time_to_live, ipv4->hdr.type_of_service); + set_tun->tun_proto = gre->protocol; + +@@ -3149,6 +3202,7 @@ nfp_flow_action_nvgre_encap_v6(struct nfp_app_fw_flower *app_fw_flower, + struct nfp_fl_tun *tun) + { + uint8_t tos; ++ uint64_t tun_id; + const struct rte_ether_hdr *eth; + const struct rte_flow_item_ipv6 *ipv6; + const struct rte_flow_item_gre *gre; +@@ -3160,6 +3214,7 @@ nfp_flow_action_nvgre_encap_v6(struct nfp_app_fw_flower *app_fw_flower, + eth = (const struct rte_ether_hdr *)raw_encap->data; + ipv6 = (const struct rte_flow_item_ipv6 *)(eth + 1); + gre = (const struct rte_flow_item_gre *)(ipv6 + 1); ++ tun_id = rte_be_to_cpu_32(*(const rte_be32_t *)(gre + 1)); + + pre_tun = (struct nfp_fl_act_pre_tun *)actions; + memset(pre_tun, 0, act_pre_size); +@@ -3167,8 +3222,8 @@ nfp_flow_action_nvgre_encap_v6(struct nfp_app_fw_flower *app_fw_flower, + + set_tun = (struct nfp_fl_act_set_tun *)(act_data + act_pre_size); + memset(set_tun, 0, act_set_size); +- tos = (ipv6->hdr.vtc_flow >> RTE_IPV6_HDR_TC_SHIFT) & 0xff; +- nfp_flow_set_tun_process(set_tun, NFP_FL_TUN_GRE, 0, ++ tos = rte_be_to_cpu_32(ipv6->hdr.vtc_flow) >> RTE_IPV6_HDR_TC_SHIFT; ++ nfp_flow_set_tun_process(set_tun, NFP_FL_TUN_GRE, tun_id, + ipv6->hdr.hop_limits, tos); + set_tun->tun_proto = gre->protocol; + +@@ -3232,12 +3287,27 @@ nfp_flow_action_raw_encap(struct nfp_app_fw_flower *app_fw_flower, + return ret; + } + ++static uint32_t ++nfp_flow_count_output(const struct rte_flow_action actions[]) ++{ ++ uint32_t count = 0; ++ const struct rte_flow_action *action; ++ ++ for (action = actions; action->type != RTE_FLOW_ACTION_TYPE_END; ++action) { ++ if (action->type == RTE_FLOW_ACTION_TYPE_PORT_ID) ++ count++; ++ } ++ ++ return count; ++} ++ + static int + nfp_flow_compile_action(struct nfp_flower_representor *representor, + const struct rte_flow_action actions[], + struct rte_flow *nfp_flow) + { + int ret = 0; ++ uint32_t count; + char *position; + char *action_data; + bool ttl_tos_flag = false; +@@ -3256,6 +3326,8 @@ nfp_flow_compile_action(struct nfp_flower_representor *representor, + position = action_data; + meta_tci = (struct nfp_flower_meta_tci *)nfp_flow->payload.unmasked_data; + ++ count = nfp_flow_count_output(actions); ++ + for (action = actions; action->type != RTE_FLOW_ACTION_TYPE_END; ++action) { + switch (action->type) { + case RTE_FLOW_ACTION_TYPE_VOID: +@@ -3272,7 +3344,8 @@ nfp_flow_compile_action(struct nfp_flower_representor *representor, + break; + case RTE_FLOW_ACTION_TYPE_PORT_ID: + PMD_DRV_LOG(DEBUG, "Process RTE_FLOW_ACTION_TYPE_PORT_ID"); +- ret = nfp_flow_action_output(position, action, nfp_flow_meta); ++ count--; ++ ret = nfp_flow_action_output(position, action, nfp_flow_meta, count); + if (ret != 0) { + PMD_DRV_LOG(ERR, "Failed when process" + " RTE_FLOW_ACTION_TYPE_PORT_ID"); +@@ -3347,7 +3420,8 @@ nfp_flow_compile_action(struct nfp_flower_representor *representor, + break; + case RTE_FLOW_ACTION_TYPE_SET_TP_SRC: + PMD_DRV_LOG(DEBUG, "Process RTE_FLOW_ACTION_TYPE_SET_TP_SRC"); +- nfp_flow_action_set_tp(position, action, true, tp_set_flag); ++ nfp_flow_action_set_tp(position, action, true, ++ tp_set_flag, nfp_flow->tcp_flag); + if (!tp_set_flag) { + position += sizeof(struct nfp_fl_act_set_tport); + tp_set_flag = true; +@@ -3355,7 +3429,8 @@ nfp_flow_compile_action(struct nfp_flower_representor *representor, + break; + case RTE_FLOW_ACTION_TYPE_SET_TP_DST: + PMD_DRV_LOG(DEBUG, "Process RTE_FLOW_ACTION_TYPE_SET_TP_DST"); +- nfp_flow_action_set_tp(position, action, false, tp_set_flag); ++ nfp_flow_action_set_tp(position, action, false, ++ tp_set_flag, nfp_flow->tcp_flag); + if (!tp_set_flag) { + position += sizeof(struct nfp_fl_act_set_tport); + tp_set_flag = true; +@@ -3484,7 +3559,7 @@ nfp_flow_process(struct nfp_flower_representor *representor, + return NULL; + } + +- nfp_flow = nfp_flow_alloc(&key_layer); ++ nfp_flow = nfp_flow_alloc(&key_layer, representor->port_id); + if (nfp_flow == NULL) { + PMD_DRV_LOG(ERR, "Alloc nfp flow failed."); + goto free_stats; +@@ -3592,6 +3667,7 @@ nfp_flow_teardown(struct nfp_flow_priv *priv, + nfp_flow_meta = nfp_flow->payload.meta; + mask_data = nfp_flow->payload.mask_data; + mask_len = nfp_flow_meta->mask_len << NFP_FL_LW_SIZ; ++ nfp_flow_meta->flags &= ~NFP_FL_META_FLAG_MANAGE_MASK; + if (!nfp_check_mask_remove(priv, mask_data, mask_len, + &nfp_flow_meta->flags)) { + PMD_DRV_LOG(ERR, "nfp mask del check failed."); +@@ -3791,14 +3867,21 @@ nfp_flow_flush(struct rte_eth_dev *dev, + void *next_data; + uint32_t iter = 0; + const void *next_key; ++ struct rte_flow *nfp_flow; + struct nfp_flow_priv *priv; ++ struct nfp_flower_representor *representor; ++ ++ representor = dev->data->dev_private; + + priv = nfp_flow_dev_to_priv(dev); + + while (rte_hash_iterate(priv->flow_table, &next_key, &next_data, &iter) >= 0) { +- ret = nfp_flow_destroy(dev, (struct rte_flow *)next_data, error); +- if (ret != 0) +- break; ++ nfp_flow = next_data; ++ if (nfp_flow->port_id == representor->port_id) { ++ ret = nfp_flow_destroy(dev, nfp_flow, error); ++ if (ret != 0) ++ break; ++ } + } + + return ret; +@@ -3809,6 +3892,7 @@ nfp_flow_stats_get(struct rte_eth_dev *dev, + struct rte_flow *nfp_flow, + void *data) + { ++ bool reset; + uint32_t ctx_id; + struct rte_flow *flow; + struct nfp_flow_priv *priv; +@@ -3823,6 +3907,7 @@ nfp_flow_stats_get(struct rte_eth_dev *dev, + } + + query = (struct rte_flow_query_count *)data; ++ reset = query->reset; + memset(query, 0, sizeof(*query)); + + ctx_id = rte_be_to_cpu_32(nfp_flow->payload.meta->host_ctx_id); +@@ -3834,7 +3919,7 @@ nfp_flow_stats_get(struct rte_eth_dev *dev, + query->bytes = stats->bytes; + query->hits_set = 1; + query->bytes_set = 1; +- if (query->reset != 0) { ++ if (reset) { + stats->pkts = 0; + stats->bytes = 0; + } +@@ -3981,11 +4066,21 @@ nfp_flow_priv_init(struct nfp_pf_dev *pf_dev) + size_t stats_size; + uint64_t ctx_count; + uint64_t ctx_split; ++ char mask_name[RTE_HASH_NAMESIZE]; ++ char flow_name[RTE_HASH_NAMESIZE]; ++ char pretun_name[RTE_HASH_NAMESIZE]; + struct nfp_flow_priv *priv; + struct nfp_app_fw_flower *app_fw_flower; + ++ snprintf(mask_name, sizeof(mask_name), "%s_mask", ++ pf_dev->pci_dev->device.name); ++ snprintf(flow_name, sizeof(flow_name), "%s_flow", ++ pf_dev->pci_dev->device.name); ++ snprintf(pretun_name, sizeof(pretun_name), "%s_pretun", ++ pf_dev->pci_dev->device.name); ++ + struct rte_hash_parameters mask_hash_params = { +- .name = "mask_hash_table", ++ .name = mask_name, + .entries = NFP_MASK_TABLE_ENTRIES, + .hash_func = rte_jhash, + .socket_id = rte_socket_id(), +@@ -3994,7 +4089,7 @@ nfp_flow_priv_init(struct nfp_pf_dev *pf_dev) + }; + + struct rte_hash_parameters flow_hash_params = { +- .name = "flow_hash_table", ++ .name = flow_name, + .hash_func = rte_jhash, + .socket_id = rte_socket_id(), + .key_len = sizeof(uint32_t), +@@ -4002,7 +4097,7 @@ nfp_flow_priv_init(struct nfp_pf_dev *pf_dev) + }; + + struct rte_hash_parameters pre_tun_hash_params = { +- .name = "pre_tunnel_table", ++ .name = pretun_name, + .entries = 32, + .hash_func = rte_jhash, + .socket_id = rte_socket_id(), +diff --git a/dpdk/drivers/net/nfp/nfp_flow.h b/dpdk/drivers/net/nfp/nfp_flow.h +index b0c2aaf6d8..314ff2083a 100644 +--- a/dpdk/drivers/net/nfp/nfp_flow.h ++++ b/dpdk/drivers/net/nfp/nfp_flow.h +@@ -222,7 +222,9 @@ struct rte_flow { + struct nfp_fl_tun tun; + size_t length; + uint32_t hash_key; ++ uint32_t port_id; + bool install_flag; ++ bool tcp_flag; /**< Used in the SET_TP_* action */ + enum nfp_flow_type type; + }; + +diff --git a/dpdk/drivers/net/nfp/nfp_rxtx.c b/dpdk/drivers/net/nfp/nfp_rxtx.c +index 01cffdfde0..ac0fa80d4c 100644 +--- a/dpdk/drivers/net/nfp/nfp_rxtx.c ++++ b/dpdk/drivers/net/nfp/nfp_rxtx.c +@@ -48,7 +48,7 @@ nfp_net_rx_fill_freelist(struct nfp_net_rxq *rxq) + + rxd = &rxq->rxds[i]; + rxd->fld.dd = 0; +- rxd->fld.dma_addr_hi = (dma_addr >> 32) & 0xff; ++ rxd->fld.dma_addr_hi = (dma_addr >> 32) & 0xffff; + rxd->fld.dma_addr_lo = dma_addr & 0xffffffff; + rxe[i].mbuf = mbuf; + PMD_RX_LOG(DEBUG, "[%d]: %" PRIx64, i, dma_addr); +@@ -361,7 +361,7 @@ nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) + rxds->vals[1] = 0; + dma_addr = rte_cpu_to_le_64(RTE_MBUF_DMA_ADDR_DEFAULT(new_mb)); + rxds->fld.dd = 0; +- rxds->fld.dma_addr_hi = (dma_addr >> 32) & 0xff; ++ rxds->fld.dma_addr_hi = (dma_addr >> 32) & 0xffff; + rxds->fld.dma_addr_lo = dma_addr & 0xffffffff; + nb_hold++; + +@@ -1063,6 +1063,7 @@ nfp_net_nfdk_tx_maybe_close_block(struct nfp_net_txq *txq, struct rte_mbuf *pkt) + if (unlikely(n_descs > NFDK_TX_DESC_GATHER_MAX)) + return -EINVAL; + ++ /* Under count by 1 (don't count meta) for the round down to work out */ + n_descs += !!(pkt->ol_flags & RTE_MBUF_F_TX_TCP_SEG); + + if (round_down(txq->wr_p, NFDK_TX_DESC_BLOCK_CNT) != +@@ -1214,13 +1215,24 @@ nfp_net_nfdk_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pk + if ((hw->cap & NFP_NET_CFG_CTRL_LSO_ANY) && + (pkt->ol_flags & RTE_MBUF_F_TX_TCP_SEG)) { + type = NFDK_DESC_TX_TYPE_TSO; +- } else if (!pkt->next && dma_len < NFDK_TX_MAX_DATA_PER_HEAD) { ++ } else if (pkt->next == NULL && dma_len <= NFDK_TX_MAX_DATA_PER_HEAD) { + type = NFDK_DESC_TX_TYPE_SIMPLE; + } else { + type = NFDK_DESC_TX_TYPE_GATHER; + } ++ ++ /* Implicitly truncates to chunk in below logic */ + dma_len -= 1; +- dlen_type = (NFDK_DESC_TX_DMA_LEN_HEAD & dma_len) | ++ ++ /* ++ * We will do our best to pass as much data as we can in descriptor ++ * and we need to make sure the first descriptor includes whole ++ * head since there is limitation in firmware side. Sometimes the ++ * value of 'dma_len & NFDK_DESC_TX_DMA_LEN_HEAD' will be less ++ * than packet head len. ++ */ ++ dlen_type = (dma_len > NFDK_DESC_TX_DMA_LEN_HEAD ? ++ NFDK_DESC_TX_DMA_LEN_HEAD : dma_len) | + (NFDK_DESC_TX_TYPE_HEAD & (type << 12)); + ktxds->dma_len_type = rte_cpu_to_le_16(dlen_type); + dma_addr = rte_mbuf_data_iova(pkt); +@@ -1230,10 +1242,18 @@ nfp_net_nfdk_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pk + ktxds->dma_addr_lo = rte_cpu_to_le_32(dma_addr & 0xffffffff); + ktxds++; + ++ /* ++ * Preserve the original dlen_type, this way below the EOP logic ++ * can use dlen_type. ++ */ + tmp_dlen = dlen_type & NFDK_DESC_TX_DMA_LEN_HEAD; + dma_len -= tmp_dlen; + dma_addr += tmp_dlen + 1; + ++ /* ++ * The rest of the data (if any) will be in larger DMA descriptors ++ * and is handled with the dma_len loop. ++ */ + while (pkt) { + if (*lmbuf) + rte_pktmbuf_free_seg(*lmbuf); +diff --git a/dpdk/drivers/net/nfp/nfp_rxtx.h b/dpdk/drivers/net/nfp/nfp_rxtx.h +index ced05fde90..1c2a00e300 100644 +--- a/dpdk/drivers/net/nfp/nfp_rxtx.h ++++ b/dpdk/drivers/net/nfp/nfp_rxtx.h +@@ -227,8 +227,8 @@ struct nfp_net_rx_desc { + union { + /* Freelist descriptor */ + struct { +- uint8_t dma_addr_hi; +- __le16 spare; ++ __le16 dma_addr_hi; ++ uint8_t spare; + uint8_t dd; + + __le32 dma_addr_lo; +@@ -340,10 +340,14 @@ nfp_net_mbuf_alloc_failed(struct nfp_net_rxq *rxq) + static inline uint32_t + nfp_net_nfd3_free_tx_desc(struct nfp_net_txq *txq) + { ++ uint32_t free_desc; ++ + if (txq->wr_p >= txq->rd_p) +- return txq->tx_count - (txq->wr_p - txq->rd_p) - 8; ++ free_desc = txq->tx_count - (txq->wr_p - txq->rd_p); + else +- return txq->rd_p - txq->wr_p - 8; ++ free_desc = txq->rd_p - txq->wr_p; ++ ++ return (free_desc > 8) ? (free_desc - 8) : 0; + } + + /* +diff --git a/dpdk/drivers/net/nfp/nfpcore/nfp_cpp.h b/dpdk/drivers/net/nfp/nfpcore/nfp_cpp.h +index a04a68f546..68851b22e4 100644 +--- a/dpdk/drivers/net/nfp/nfpcore/nfp_cpp.h ++++ b/dpdk/drivers/net/nfp/nfpcore/nfp_cpp.h +@@ -34,6 +34,9 @@ struct nfp_cpp { + */ + uint32_t imb_cat_table[16]; + ++ /* MU access type bit offset */ ++ uint32_t mu_locality_lsb; ++ + int driver_lock_needed; + }; + +@@ -363,7 +366,7 @@ struct nfp_cpp_area *nfp_cpp_area_alloc_acquire(struct nfp_cpp *cpp, + */ + void nfp_cpp_area_release_free(struct nfp_cpp_area *area); + +-uint8_t *nfp_cpp_map_area(struct nfp_cpp *cpp, int domain, int target, ++uint8_t *nfp_cpp_map_area(struct nfp_cpp *cpp, uint32_t cpp_id, + uint64_t addr, unsigned long size, + struct nfp_cpp_area **area); + /* +@@ -778,4 +781,6 @@ int nfp_cpp_mutex_unlock(struct nfp_cpp_mutex *mutex); + */ + int nfp_cpp_mutex_trylock(struct nfp_cpp_mutex *mutex); + ++uint32_t nfp_cpp_mu_locality_lsb(struct nfp_cpp *cpp); ++ + #endif /* !__NFP_CPP_H__ */ +diff --git a/dpdk/drivers/net/nfp/nfpcore/nfp_cpp_pcie_ops.c b/dpdk/drivers/net/nfp/nfpcore/nfp_cpp_pcie_ops.c +index 22c8bc4b14..e597315498 100644 +--- a/dpdk/drivers/net/nfp/nfpcore/nfp_cpp_pcie_ops.c ++++ b/dpdk/drivers/net/nfp/nfpcore/nfp_cpp_pcie_ops.c +@@ -66,8 +66,8 @@ + #define NFP_PCIE_P2C_GENERAL_TOKEN_OFFSET(bar, x) ((x) << ((bar)->bitsize - 4)) + #define NFP_PCIE_P2C_GENERAL_SIZE(bar) (1 << ((bar)->bitsize - 4)) + +-#define NFP_PCIE_CFG_BAR_PCIETOCPPEXPBAR(bar, slot) \ +- (NFP_PCIE_BAR(0) + ((bar) * 8 + (slot)) * 4) ++#define NFP_PCIE_CFG_BAR_PCIETOCPPEXPBAR(id, bar, slot) \ ++ (NFP_PCIE_BAR(id) + ((bar) * 8 + (slot)) * 4) + + #define NFP_PCIE_CPP_BAR_PCIETOCPPEXPBAR(bar, slot) \ + (((bar) * 8 + (slot)) * 4) +@@ -117,6 +117,7 @@ struct nfp_pcie_user { + int secondary_lock; + char busdev[BUSDEV_SZ]; + int barsz; ++ int dev_id; + char *cfg; + }; + +@@ -258,7 +259,7 @@ nfp_bar_write(struct nfp_pcie_user *nfp, struct nfp_bar *bar, + return (-ENOMEM); + + bar->csr = nfp->cfg + +- NFP_PCIE_CFG_BAR_PCIETOCPPEXPBAR(base, slot); ++ NFP_PCIE_CFG_BAR_PCIETOCPPEXPBAR(nfp->dev_id, base, slot); + + *(uint32_t *)(bar->csr) = newcfg; + +@@ -332,10 +333,8 @@ nfp_enable_bars(struct nfp_pcie_user *nfp) + bar->base = 0; + bar->iomem = NULL; + bar->lock = 0; +- bar->csr = nfp->cfg + +- NFP_PCIE_CFG_BAR_PCIETOCPPEXPBAR(bar->index >> 3, +- bar->index & 7); +- ++ bar->csr = nfp->cfg + NFP_PCIE_CFG_BAR_PCIETOCPPEXPBAR(nfp->dev_id, ++ bar->index >> 3, bar->index & 7); + bar->iomem = nfp->cfg + (bar->index << bar->bitsize); + } + return 0; +@@ -850,6 +849,7 @@ nfp6000_init(struct nfp_cpp *cpp, struct rte_pci_device *dev) + goto error; + + desc->cfg = (char *)dev->mem_resource[0].addr; ++ desc->dev_id = dev->addr.function & 0x7; + + nfp_enable_bars(desc); + +diff --git a/dpdk/drivers/net/nfp/nfpcore/nfp_cppcore.c b/dpdk/drivers/net/nfp/nfpcore/nfp_cppcore.c +index 37799af558..014f6c9df8 100644 +--- a/dpdk/drivers/net/nfp/nfpcore/nfp_cppcore.c ++++ b/dpdk/drivers/net/nfp/nfpcore/nfp_cppcore.c +@@ -19,6 +19,7 @@ + #include "nfp6000/nfp6000.h" + #include "nfp6000/nfp_xpb.h" + #include "nfp_nffw.h" ++#include "../nfp_logs.h" + + #define NFP_PL_DEVICE_ID 0x00000004 + #define NFP_PL_DEVICE_ID_MASK 0xff +@@ -118,6 +119,36 @@ nfp_cpp_area_name(struct nfp_cpp_area *cpp_area) + return cpp_area->name; + } + ++#define NFP_IMB_TGTADDRESSMODECFG_MODE_of(_x) (((_x) >> 13) & 0x7) ++#define NFP_IMB_TGTADDRESSMODECFG_ADDRMODE RTE_BIT32(12) ++ ++static int ++nfp_cpp_set_mu_locality_lsb(struct nfp_cpp *cpp) ++{ ++ int ret; ++ int mode; ++ int addr40; ++ uint32_t imbcppat; ++ ++ imbcppat = cpp->imb_cat_table[NFP_CPP_TARGET_MU]; ++ mode = NFP_IMB_TGTADDRESSMODECFG_MODE_of(imbcppat); ++ addr40 = imbcppat & NFP_IMB_TGTADDRESSMODECFG_ADDRMODE; ++ ++ ret = nfp_cppat_mu_locality_lsb(mode, addr40); ++ if (ret < 0) ++ return ret; ++ ++ cpp->mu_locality_lsb = ret; ++ ++ return 0; ++} ++ ++uint32_t ++nfp_cpp_mu_locality_lsb(struct nfp_cpp *cpp) ++{ ++ return cpp->mu_locality_lsb; ++} ++ + /* + * nfp_cpp_area_alloc - allocate a new CPP area + * @cpp: CPP handle +@@ -142,10 +173,6 @@ nfp_cpp_area_alloc_with_name(struct nfp_cpp *cpp, uint32_t dest, + if (!cpp) + return NULL; + +- /* CPP bus uses only a 40-bit address */ +- if ((address + size) > (1ULL << 40)) +- return NFP_ERRPTR(EFAULT); +- + /* Remap from cpp_island to cpp_target */ + err = nfp_target_cpp(dest, tmp64, &dest, &tmp64, cpp->imb_cat_table); + if (err < 0) +@@ -588,6 +615,13 @@ nfp_cpp_alloc(struct rte_pci_device *dev, int driver_lock_needed) + } + } + ++ err = nfp_cpp_set_mu_locality_lsb(cpp); ++ if (err < 0) { ++ PMD_DRV_LOG(ERR, "Can't calculate MU locality bit offset"); ++ free(cpp); ++ return NULL; ++ } ++ + return cpp; + } + +@@ -819,8 +853,7 @@ __nfp_cpp_model_autodetect(struct nfp_cpp *cpp, uint32_t *model) + /* + * nfp_cpp_map_area() - Helper function to map an area + * @cpp: NFP CPP handler +- * @domain: CPP domain +- * @target: CPP target ++ * @cpp_id: CPP ID + * @addr: CPP address + * @size: Size of the area + * @area: Area handle (output) +@@ -828,18 +861,15 @@ __nfp_cpp_model_autodetect(struct nfp_cpp *cpp, uint32_t *model) + * Map an area of IOMEM access. To undo the effect of this function call + * @nfp_cpp_area_release_free(*area). + * +- * Return: Pointer to memory mapped area or ERR_PTR ++ * Return: Pointer to memory mapped area or NULL + */ + uint8_t * +-nfp_cpp_map_area(struct nfp_cpp *cpp, int domain, int target, uint64_t addr, ++nfp_cpp_map_area(struct nfp_cpp *cpp, uint32_t cpp_id, uint64_t addr, + unsigned long size, struct nfp_cpp_area **area) + { + uint8_t *res; +- uint32_t dest; +- +- dest = NFP_CPP_ISLAND_ID(target, NFP_CPP_ACTION_RW, 0, domain); + +- *area = nfp_cpp_area_alloc_acquire(cpp, dest, addr, size); ++ *area = nfp_cpp_area_alloc_acquire(cpp, cpp_id, addr, size); + if (!*area) + goto err_eio; + +diff --git a/dpdk/drivers/net/nfp/nfpcore/nfp_rtsym.c b/dpdk/drivers/net/nfp/nfpcore/nfp_rtsym.c +index 56bbf05cd8..21879f7eb6 100644 +--- a/dpdk/drivers/net/nfp/nfpcore/nfp_rtsym.c ++++ b/dpdk/drivers/net/nfp/nfpcore/nfp_rtsym.c +@@ -14,6 +14,7 @@ + #include "nfp_mip.h" + #include "nfp_rtsym.h" + #include "nfp6000/nfp6000.h" ++#include "../nfp_logs.h" + + /* These need to match the linker */ + #define SYM_TGT_LMEM 0 +@@ -213,6 +214,113 @@ nfp_rtsym_lookup(struct nfp_rtsym_table *rtbl, const char *name) + return NULL; + } + ++static uint64_t ++nfp_rtsym_size(const struct nfp_rtsym *sym) ++{ ++ switch (sym->type) { ++ case NFP_RTSYM_TYPE_NONE: ++ PMD_DRV_LOG(ERR, "rtsym '%s': type NONE", sym->name); ++ return 0; ++ case NFP_RTSYM_TYPE_OBJECT: /* Fall through */ ++ case NFP_RTSYM_TYPE_FUNCTION: ++ return sym->size; ++ case NFP_RTSYM_TYPE_ABS: ++ return sizeof(uint64_t); ++ default: ++ PMD_DRV_LOG(ERR, "rtsym '%s': unknown type: %d", sym->name, sym->type); ++ return 0; ++ } ++} ++ ++static int ++nfp_rtsym_to_dest(struct nfp_cpp *cpp, ++ const struct nfp_rtsym *sym, ++ uint8_t action, ++ uint8_t token, ++ uint64_t offset, ++ uint32_t *cpp_id, ++ uint64_t *addr) ++{ ++ if (sym->type != NFP_RTSYM_TYPE_OBJECT) { ++ PMD_DRV_LOG(ERR, "rtsym '%s': direct access to non-object rtsym", ++ sym->name); ++ return -EINVAL; ++ } ++ ++ *addr = sym->addr + offset; ++ ++ if (sym->target >= 0) { ++ *cpp_id = NFP_CPP_ISLAND_ID(sym->target, action, token, sym->domain); ++ } else if (sym->target == NFP_RTSYM_TARGET_EMU_CACHE) { ++ int locality_off = nfp_cpp_mu_locality_lsb(cpp); ++ ++ *addr &= ~(NFP_MU_ADDR_ACCESS_TYPE_MASK << locality_off); ++ *addr |= NFP_MU_ADDR_ACCESS_TYPE_DIRECT << locality_off; ++ ++ *cpp_id = NFP_CPP_ISLAND_ID(NFP_CPP_TARGET_MU, action, token, ++ sym->domain); ++ } else { ++ PMD_DRV_LOG(ERR, "rtsym '%s': unhandled target encoding: %d", ++ sym->name, sym->target); ++ return -EINVAL; ++ } ++ ++ return 0; ++} ++ ++static int ++nfp_rtsym_readl(struct nfp_cpp *cpp, ++ const struct nfp_rtsym *sym, ++ uint8_t action, ++ uint8_t token, ++ uint64_t offset, ++ uint32_t *value) ++{ ++ int ret; ++ uint64_t addr; ++ uint32_t cpp_id; ++ ++ if (offset + 4 > nfp_rtsym_size(sym)) { ++ PMD_DRV_LOG(ERR, "rtsym '%s': readl out of bounds", sym->name); ++ return -ENXIO; ++ } ++ ++ ret = nfp_rtsym_to_dest(cpp, sym, action, token, offset, &cpp_id, &addr); ++ if (ret != 0) ++ return ret; ++ ++ return nfp_cpp_readl(cpp, cpp_id, addr, value); ++} ++ ++static int ++nfp_rtsym_readq(struct nfp_cpp *cpp, ++ const struct nfp_rtsym *sym, ++ uint8_t action, ++ uint8_t token, ++ uint64_t offset, ++ uint64_t *value) ++{ ++ int ret; ++ uint64_t addr; ++ uint32_t cpp_id; ++ ++ if (offset + 8 > nfp_rtsym_size(sym)) { ++ PMD_DRV_LOG(ERR, "rtsym '%s': readq out of bounds", sym->name); ++ return -ENXIO; ++ } ++ ++ if (sym->type == NFP_RTSYM_TYPE_ABS) { ++ *value = sym->addr; ++ return 0; ++ } ++ ++ ret = nfp_rtsym_to_dest(cpp, sym, action, token, offset, &cpp_id, &addr); ++ if (ret != 0) ++ return ret; ++ ++ return nfp_cpp_readq(cpp, cpp_id, addr, value); ++} ++ + /* + * nfp_rtsym_read_le() - Read a simple unsigned scalar value from symbol + * @rtbl: NFP RTsym table +@@ -229,7 +337,7 @@ uint64_t + nfp_rtsym_read_le(struct nfp_rtsym_table *rtbl, const char *name, int *error) + { + const struct nfp_rtsym *sym; +- uint32_t val32, id; ++ uint32_t val32; + uint64_t val; + int err; + +@@ -239,19 +347,13 @@ nfp_rtsym_read_le(struct nfp_rtsym_table *rtbl, const char *name, int *error) + goto exit; + } + +- id = NFP_CPP_ISLAND_ID(sym->target, NFP_CPP_ACTION_RW, 0, sym->domain); +- +-#ifdef DEBUG +- printf("Reading symbol %s with size %" PRIu64 " at %" PRIx64 "\n", +- name, sym->size, sym->addr); +-#endif + switch (sym->size) { + case 4: +- err = nfp_cpp_readl(rtbl->cpp, id, sym->addr, &val32); ++ err = nfp_rtsym_readl(rtbl->cpp, sym, NFP_CPP_ACTION_RW, 0, 0, &val32); + val = val32; + break; + case 8: +- err = nfp_cpp_readq(rtbl->cpp, id, sym->addr, &val); ++ err = nfp_rtsym_readq(rtbl->cpp, sym, NFP_CPP_ACTION_RW, 0, 0, &val); + break; + default: + printf("rtsym '%s' unsupported size: %" PRId64 "\n", +@@ -276,8 +378,11 @@ uint8_t * + nfp_rtsym_map(struct nfp_rtsym_table *rtbl, const char *name, + unsigned int min_size, struct nfp_cpp_area **area) + { +- const struct nfp_rtsym *sym; ++ int ret; + uint8_t *mem; ++ uint64_t addr; ++ uint32_t cpp_id; ++ const struct nfp_rtsym *sym; + + #ifdef DEBUG + printf("mapping symbol %s\n", name); +@@ -288,14 +393,20 @@ nfp_rtsym_map(struct nfp_rtsym_table *rtbl, const char *name, + return NULL; + } + ++ ret = nfp_rtsym_to_dest(rtbl->cpp, sym, NFP_CPP_ACTION_RW, 0, 0, ++ &cpp_id, &addr); ++ if (ret != 0) { ++ PMD_DRV_LOG(ERR, "rtsym '%s': mapping failed", name); ++ return NULL; ++ } ++ + if (sym->size < min_size) { + printf("Symbol %s too small (%" PRIu64 " < %u)\n", name, + sym->size, min_size); + return NULL; + } + +- mem = nfp_cpp_map_area(rtbl->cpp, sym->domain, sym->target, sym->addr, +- sym->size, area); ++ mem = nfp_cpp_map_area(rtbl->cpp, cpp_id, addr, sym->size, area); + if (!mem) { + printf("Failed to map symbol %s\n", name); + return NULL; +diff --git a/dpdk/drivers/net/ngbe/base/ngbe_hw.c b/dpdk/drivers/net/ngbe/base/ngbe_hw.c +index 283cdca367..27243d85c8 100644 +--- a/dpdk/drivers/net/ngbe/base/ngbe_hw.c ++++ b/dpdk/drivers/net/ngbe/base/ngbe_hw.c +@@ -1541,11 +1541,15 @@ s32 ngbe_clear_vfta(struct ngbe_hw *hw) + s32 ngbe_check_mac_link_em(struct ngbe_hw *hw, u32 *speed, + bool *link_up, bool link_up_wait_to_complete) + { +- u32 i, reg; ++ u32 i; + s32 status = 0; + +- reg = rd32(hw, NGBE_GPIOINTSTAT); +- wr32(hw, NGBE_GPIOEOI, reg); ++ if (hw->lsc) { ++ u32 reg; ++ ++ reg = rd32(hw, NGBE_GPIOINTSTAT); ++ wr32(hw, NGBE_GPIOEOI, reg); ++ } + + if (link_up_wait_to_complete) { + for (i = 0; i < hw->mac.max_link_up_time; i++) { +diff --git a/dpdk/drivers/net/ngbe/base/ngbe_phy_rtl.c b/dpdk/drivers/net/ngbe/base/ngbe_phy_rtl.c +index 9b323624ec..b0eb6c97c0 100644 +--- a/dpdk/drivers/net/ngbe/base/ngbe_phy_rtl.c ++++ b/dpdk/drivers/net/ngbe/base/ngbe_phy_rtl.c +@@ -120,6 +120,8 @@ s32 ngbe_init_phy_rtl(struct ngbe_hw *hw) + hw->init_phy = true; + msec_delay(1); + ++ hw->phy.set_phy_power(hw, true); ++ + for (i = 0; i < 15; i++) { + if (!rd32m(hw, NGBE_STAT, + NGBE_STAT_GPHY_IN_RST(hw->bus.lan_id))) +@@ -390,6 +392,26 @@ s32 ngbe_check_phy_link_rtl(struct ngbe_hw *hw, u32 *speed, bool *link_up) + *speed = NGBE_LINK_SPEED_10M_FULL; + } + ++ if (hw->lsc) ++ return status; ++ ++ /* ++ * Because of the slow speed of getting link state, RTL_PHYSR ++ * may still be up while the actual link state is down. ++ * So we read RTL_GBSR to get accurate state when speed is 1G ++ * in polling mode. ++ */ ++ if (*speed == NGBE_LINK_SPEED_1GB_FULL) { ++ status = hw->phy.read_reg(hw, RTL_GBSR, ++ RTL_DEV_ZERO, &phy_data); ++ phy_link = phy_data & RTL_GBSR_LRS; ++ ++ /* Only need to detect link down */ ++ if (!phy_link) { ++ *link_up = false; ++ *speed = NGBE_LINK_SPEED_UNKNOWN; ++ } ++ } + return status; + } + +diff --git a/dpdk/drivers/net/ngbe/base/ngbe_phy_rtl.h b/dpdk/drivers/net/ngbe/base/ngbe_phy_rtl.h +index b2fbc4f74d..6093ee7d5c 100644 +--- a/dpdk/drivers/net/ngbe/base/ngbe_phy_rtl.h ++++ b/dpdk/drivers/net/ngbe/base/ngbe_phy_rtl.h +@@ -35,6 +35,8 @@ + #define RTL_ANLPAR_LP MS16(10, 0x3) + #define RTL_GBCR 0x9 + #define RTL_GBCR_1000F MS16(9, 0x1) ++#define RTL_GBSR 0xA ++#define RTL_GBSR_LRS MS16(13, 0x1) + /* Page 0xa42*/ + #define RTL_GSR 0x10 + #define RTL_GSR_ST MS16(0, 0x7) +diff --git a/dpdk/drivers/net/ngbe/base/ngbe_phy_yt.c b/dpdk/drivers/net/ngbe/base/ngbe_phy_yt.c +index c88946f7c3..754faadd6a 100644 +--- a/dpdk/drivers/net/ngbe/base/ngbe_phy_yt.c ++++ b/dpdk/drivers/net/ngbe/base/ngbe_phy_yt.c +@@ -100,11 +100,17 @@ s32 ngbe_write_phy_reg_sds_ext_yt(struct ngbe_hw *hw, + + s32 ngbe_init_phy_yt(struct ngbe_hw *hw) + { +- /* close sds area register */ +- ngbe_write_phy_reg_ext_yt(hw, YT_SMI_PHY, 0, 0); +- /* enable interrupts */ +- ngbe_write_phy_reg_mdi(hw, YT_INTR, 0, +- YT_INTR_ENA_MASK | YT_SDS_INTR_ENA_MASK); ++ rte_spinlock_init(&hw->phy_lock); ++ ++ if (hw->lsc) { ++ rte_spinlock_lock(&hw->phy_lock); ++ /* close sds area register */ ++ ngbe_write_phy_reg_ext_yt(hw, YT_SMI_PHY, 0, 0); ++ /* enable interrupts */ ++ ngbe_write_phy_reg_mdi(hw, YT_INTR, 0, ++ YT_INTR_ENA_MASK | YT_SDS_INTR_ENA_MASK); ++ rte_spinlock_unlock(&hw->phy_lock); ++ } + + hw->phy.set_phy_power(hw, false); + +@@ -123,7 +129,9 @@ s32 ngbe_setup_phy_link_yt(struct ngbe_hw *hw, u32 speed, + hw->phy.autoneg_advertised = 0; + + /* check chip_mode first */ ++ rte_spinlock_lock(&hw->phy_lock); + ngbe_read_phy_reg_ext_yt(hw, YT_CHIP, 0, &value); ++ rte_spinlock_unlock(&hw->phy_lock); + if ((value & YT_CHIP_MODE_MASK) == YT_CHIP_MODE_SEL(0)) { + /* UTP to rgmii */ + if (!hw->mac.autoneg) { +@@ -146,11 +154,14 @@ s32 ngbe_setup_phy_link_yt(struct ngbe_hw *hw, u32 speed, + } + /* duplex full */ + value |= YT_BCR_DUPLEX | YT_BCR_RESET; ++ rte_spinlock_lock(&hw->phy_lock); + ngbe_write_phy_reg_mdi(hw, YT_BCR, 0, value); ++ rte_spinlock_unlock(&hw->phy_lock); + + goto skip_an; + } + ++ rte_spinlock_lock(&hw->phy_lock); + /*disable 100/10base-T Self-negotiation ability*/ + ngbe_read_phy_reg_mdi(hw, YT_ANA, 0, &value); + value &= ~(YT_ANA_100BASET_FULL | YT_ANA_100BASET_HALF | +@@ -189,6 +200,7 @@ s32 ngbe_setup_phy_link_yt(struct ngbe_hw *hw, u32 speed, + ngbe_read_phy_reg_mdi(hw, YT_BCR, 0, &value); + value |= YT_BCR_RESET | YT_BCR_ANE | YT_BCR_RESTART_AN; + ngbe_write_phy_reg_mdi(hw, YT_BCR, 0, value); ++ rte_spinlock_unlock(&hw->phy_lock); + skip_an: + hw->phy.set_phy_power(hw, true); + } else if ((value & YT_CHIP_MODE_MASK) == YT_CHIP_MODE_SEL(1)) { +@@ -199,6 +211,7 @@ skip_an: + value = YT_RGMII_CONF1_RXDELAY | + YT_RGMII_CONF1_TXDELAY_FE | + YT_RGMII_CONF1_TXDELAY; ++ rte_spinlock_lock(&hw->phy_lock); + ngbe_write_phy_reg_ext_yt(hw, YT_RGMII_CONF1, 0, value); + value = YT_CHIP_MODE_SEL(1) | + YT_CHIP_SW_LDO_EN | +@@ -225,17 +238,21 @@ skip_an: + value = YT_BCR_RESET | YT_BCR_DUPLEX | + YT_BCR_SPEED_SELECT1; + hw->phy.write_reg(hw, YT_BCR, 0, value); ++ rte_spinlock_unlock(&hw->phy_lock); + + hw->phy.set_phy_power(hw, true); + } else if ((value & YT_CHIP_MODE_MASK) == YT_CHIP_MODE_SEL(2)) { + hw->phy.set_phy_power(hw, true); + ++ rte_spinlock_lock(&hw->phy_lock); + hw->phy.read_reg(hw, YT_SPST, 0, &value); ++ rte_spinlock_unlock(&hw->phy_lock); + if (value & YT_SPST_LINK) { + /* fiber up */ + hw->phy.autoneg_advertised |= NGBE_LINK_SPEED_1GB_FULL; + } else { + /* utp up */ ++ rte_spinlock_lock(&hw->phy_lock); + /*disable 100/10base-T Self-negotiation ability*/ + ngbe_read_phy_reg_mdi(hw, YT_ANA, 0, &value); + value &= ~(YT_ANA_100BASET_FULL | YT_ANA_100BASET_HALF | +@@ -279,10 +296,12 @@ skip_an: + ngbe_read_phy_reg_mdi(hw, YT_BCR, 0, &value); + value |= YT_BCR_RESET; + ngbe_write_phy_reg_mdi(hw, YT_BCR, 0, value); ++ rte_spinlock_unlock(&hw->phy_lock); + } + } else if ((value & YT_CHIP_MODE_MASK) == YT_CHIP_MODE_SEL(4)) { + hw->phy.autoneg_advertised |= NGBE_LINK_SPEED_1GB_FULL; + ++ rte_spinlock_lock(&hw->phy_lock); + ngbe_read_phy_reg_ext_yt(hw, YT_RGMII_CONF1, 0, &value); + value |= YT_RGMII_CONF1_MODE; + ngbe_write_phy_reg_ext_yt(hw, YT_RGMII_CONF1, 0, value); +@@ -297,6 +316,7 @@ skip_an: + ngbe_read_phy_reg_ext_yt(hw, YT_CHIP, 0, &value); + value &= ~YT_SMI_PHY_SW_RST; + ngbe_write_phy_reg_ext_yt(hw, YT_CHIP, 0, value); ++ rte_spinlock_unlock(&hw->phy_lock); + + hw->phy.set_phy_power(hw, true); + } else if ((value & YT_CHIP_MODE_MASK) == YT_CHIP_MODE_SEL(5)) { +@@ -320,7 +340,9 @@ skip_an: + } + /* duplex full */ + value |= YT_BCR_DUPLEX | YT_BCR_RESET; ++ rte_spinlock_lock(&hw->phy_lock); + hw->phy.write_reg(hw, YT_BCR, 0, value); ++ rte_spinlock_unlock(&hw->phy_lock); + + goto skip_an_sr; + } +@@ -339,19 +361,23 @@ skip_an: + + /* duplex full */ + value |= YT_BCR_DUPLEX | YT_BCR_RESET; ++ rte_spinlock_lock(&hw->phy_lock); + hw->phy.write_reg(hw, YT_BCR, 0, value); + + /* software reset to make the above configuration take effect */ + hw->phy.read_reg(hw, YT_BCR, 0, &value); + value |= YT_BCR_RESET | YT_BCR_ANE | YT_BCR_RESTART_AN; + hw->phy.write_reg(hw, 0x0, 0, value); ++ rte_spinlock_unlock(&hw->phy_lock); + + skip_an_sr: + hw->phy.set_phy_power(hw, true); + } + ++ rte_spinlock_lock(&hw->phy_lock); + ngbe_write_phy_reg_ext_yt(hw, YT_SMI_PHY, 0, 0); + ngbe_read_phy_reg_mdi(hw, YT_INTR_STATUS, 0, &value); ++ rte_spinlock_unlock(&hw->phy_lock); + + return 0; + } +@@ -366,6 +392,7 @@ s32 ngbe_reset_phy_yt(struct ngbe_hw *hw) + hw->phy.type != ngbe_phy_yt8521s_sfi) + return NGBE_ERR_PHY_TYPE; + ++ rte_spinlock_lock(&hw->phy_lock); + /* check chip_mode first */ + ngbe_read_phy_reg_ext_yt(hw, YT_CHIP, 0, &ctrl); + if (ctrl & YT_CHIP_MODE_MASK) { +@@ -395,6 +422,7 @@ s32 ngbe_reset_phy_yt(struct ngbe_hw *hw) + msleep(1); + } + } ++ rte_spinlock_unlock(&hw->phy_lock); + + if (i == YT_PHY_RST_WAIT_PERIOD) { + DEBUGOUT("PHY reset polling failed to complete."); +@@ -409,7 +437,9 @@ s32 ngbe_get_phy_advertised_pause_yt(struct ngbe_hw *hw, u8 *pause_bit) + u16 value; + s32 status = 0; + ++ rte_spinlock_lock(&hw->phy_lock); + status = hw->phy.read_reg(hw, YT_ANA, 0, &value); ++ rte_spinlock_unlock(&hw->phy_lock); + value &= YT_FANA_PAUSE_MASK; + *pause_bit = (u8)(value >> 7); + +@@ -421,7 +451,9 @@ s32 ngbe_get_phy_lp_advertised_pause_yt(struct ngbe_hw *hw, u8 *pause_bit) + u16 value; + s32 status = 0; + ++ rte_spinlock_lock(&hw->phy_lock); + status = hw->phy.read_reg(hw, YT_LPAR, 0, &value); ++ rte_spinlock_unlock(&hw->phy_lock); + value &= YT_FLPAR_PAUSE_MASK; + *pause_bit = (u8)(value >> 7); + +@@ -433,10 +465,12 @@ s32 ngbe_set_phy_pause_adv_yt(struct ngbe_hw *hw, u16 pause_bit) + u16 value; + s32 status = 0; + ++ rte_spinlock_lock(&hw->phy_lock); + status = hw->phy.read_reg(hw, YT_ANA, 0, &value); + value &= ~YT_FANA_PAUSE_MASK; + value |= pause_bit; + status = hw->phy.write_reg(hw, YT_ANA, 0, value); ++ rte_spinlock_unlock(&hw->phy_lock); + + return status; + } +@@ -453,6 +487,7 @@ s32 ngbe_check_phy_link_yt(struct ngbe_hw *hw, + /* Initialize speed and link to default case */ + *link_up = false; + *speed = NGBE_LINK_SPEED_UNKNOWN; ++ rte_spinlock_lock(&hw->phy_lock); + + ngbe_write_phy_reg_ext_yt(hw, YT_SMI_PHY, 0, 0); + ngbe_read_phy_reg_mdi(hw, YT_INTR_STATUS, 0, &insr); +@@ -472,6 +507,7 @@ s32 ngbe_check_phy_link_yt(struct ngbe_hw *hw, + *link_up = true; + } + ++ rte_spinlock_unlock(&hw->phy_lock); + if (*link_up) { + if (phy_speed == YT_SPST_SPEED_1000M) + *speed = NGBE_LINK_SPEED_1GB_FULL; +@@ -488,6 +524,7 @@ s32 ngbe_set_phy_power_yt(struct ngbe_hw *hw, bool on) + { + u16 value = 0; + ++ rte_spinlock_lock(&hw->phy_lock); + /* power down/up in fiber mode */ + hw->phy.read_reg(hw, YT_BCR, 0, &value); + if (on) +@@ -504,6 +541,7 @@ s32 ngbe_set_phy_power_yt(struct ngbe_hw *hw, bool on) + else + value |= YT_BCR_PWDN; + ngbe_write_phy_reg_mdi(hw, YT_BCR, 0, value); ++ rte_spinlock_unlock(&hw->phy_lock); + + return 0; + } +diff --git a/dpdk/drivers/net/ngbe/base/ngbe_type.h b/dpdk/drivers/net/ngbe/base/ngbe_type.h +index aa5c41146c..37be288a74 100644 +--- a/dpdk/drivers/net/ngbe/base/ngbe_type.h ++++ b/dpdk/drivers/net/ngbe/base/ngbe_type.h +@@ -431,8 +431,10 @@ struct ngbe_hw { + bool offset_loaded; + bool is_pf; + bool gpio_ctl; ++ bool lsc; + u32 led_conf; + bool init_phy; ++ rte_spinlock_t phy_lock; + struct { + u64 rx_qp_packets; + u64 tx_qp_packets; +diff --git a/dpdk/drivers/net/ngbe/ngbe_ethdev.c b/dpdk/drivers/net/ngbe/ngbe_ethdev.c +index afdb3ad41f..cb643c6eba 100644 +--- a/dpdk/drivers/net/ngbe/ngbe_ethdev.c ++++ b/dpdk/drivers/net/ngbe/ngbe_ethdev.c +@@ -160,7 +160,9 @@ static const struct rte_ngbe_xstats_name_off rte_ngbe_stats_strings[] = { + HW_XSTAT(tx_total_packets), + HW_XSTAT(rx_total_missed_packets), + HW_XSTAT(rx_broadcast_packets), ++ HW_XSTAT(tx_broadcast_packets), + HW_XSTAT(rx_multicast_packets), ++ HW_XSTAT(tx_multicast_packets), + HW_XSTAT(rx_management_packets), + HW_XSTAT(tx_management_packets), + HW_XSTAT(rx_management_dropped), +@@ -972,9 +974,6 @@ ngbe_dev_start(struct rte_eth_dev *dev) + + PMD_INIT_FUNC_TRACE(); + +- /* Stop the link setup handler before resetting the HW. */ +- rte_eal_alarm_cancel(ngbe_dev_setup_link_alarm_handler, dev); +- + /* disable uio/vfio intr/eventfd mapping */ + rte_intr_disable(intr_handle); + +@@ -1050,6 +1049,8 @@ ngbe_dev_start(struct rte_eth_dev *dev) + if (hw->is_pf && dev->data->dev_conf.lpbk_mode) + goto skip_link_setup; + ++ hw->lsc = dev->data->dev_conf.intr_conf.lsc; ++ + err = hw->mac.check_link(hw, &speed, &link_up, 0); + if (err != 0) + goto error; +@@ -1168,8 +1169,6 @@ ngbe_dev_stop(struct rte_eth_dev *dev) + + PMD_INIT_FUNC_TRACE(); + +- rte_eal_alarm_cancel(ngbe_dev_setup_link_alarm_handler, dev); +- + if (hw->gpio_ctl) { + /* gpio0 is used to power on/off control*/ + wr32(hw, NGBE_GPIODATA, NGBE_GPIOBIT_0); +@@ -1869,24 +1868,6 @@ ngbe_dev_supported_ptypes_get(struct rte_eth_dev *dev) + return NULL; + } + +-void +-ngbe_dev_setup_link_alarm_handler(void *param) +-{ +- struct rte_eth_dev *dev = (struct rte_eth_dev *)param; +- struct ngbe_hw *hw = ngbe_dev_hw(dev); +- struct ngbe_interrupt *intr = ngbe_dev_intr(dev); +- u32 speed; +- bool autoneg = false; +- +- speed = hw->phy.autoneg_advertised; +- if (!speed) +- hw->mac.get_link_capabilities(hw, &speed, &autoneg); +- +- hw->mac.setup_link(hw, speed, true); +- +- intr->flags &= ~NGBE_FLAG_NEED_LINK_CONFIG; +-} +- + /* return 0 means link status changed, -1 means not changed */ + int + ngbe_dev_link_update_share(struct rte_eth_dev *dev, +@@ -1896,7 +1877,6 @@ ngbe_dev_link_update_share(struct rte_eth_dev *dev, + struct rte_eth_link link; + u32 link_speed = NGBE_LINK_SPEED_UNKNOWN; + u32 lan_speed = 0; +- struct ngbe_interrupt *intr = ngbe_dev_intr(dev); + bool link_up; + int err; + int wait = 1; +@@ -1910,9 +1890,6 @@ ngbe_dev_link_update_share(struct rte_eth_dev *dev, + + hw->mac.get_link_status = true; + +- if (intr->flags & NGBE_FLAG_NEED_LINK_CONFIG) +- return rte_eth_linkstatus_set(dev, &link); +- + /* check if it needs to wait to complete, if lsc interrupt is enabled */ + if (wait_to_complete == 0 || dev->data->dev_conf.intr_conf.lsc != 0) + wait = 0; +@@ -1927,7 +1904,6 @@ ngbe_dev_link_update_share(struct rte_eth_dev *dev, + if (!link_up) + return rte_eth_linkstatus_set(dev, &link); + +- intr->flags &= ~NGBE_FLAG_NEED_LINK_CONFIG; + link.link_status = RTE_ETH_LINK_UP; + link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX; + +diff --git a/dpdk/drivers/net/ngbe/ngbe_ethdev.h b/dpdk/drivers/net/ngbe/ngbe_ethdev.h +index 8d500fd38c..bb96f6a5e7 100644 +--- a/dpdk/drivers/net/ngbe/ngbe_ethdev.h ++++ b/dpdk/drivers/net/ngbe/ngbe_ethdev.h +@@ -341,7 +341,6 @@ void ngbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, + uint16_t queue, bool on); + void ngbe_config_vlan_strip_on_all_queues(struct rte_eth_dev *dev, + int mask); +-void ngbe_dev_setup_link_alarm_handler(void *param); + void ngbe_read_stats_registers(struct ngbe_hw *hw, + struct ngbe_hw_stats *hw_stats); + +diff --git a/dpdk/drivers/net/ngbe/ngbe_rxtx.c b/dpdk/drivers/net/ngbe/ngbe_rxtx.c +index 9fd24fa444..f31906cc2f 100644 +--- a/dpdk/drivers/net/ngbe/ngbe_rxtx.c ++++ b/dpdk/drivers/net/ngbe/ngbe_rxtx.c +@@ -24,15 +24,11 @@ + + /* Bit Mask to indicate what bits required for building Tx context */ + static const u64 NGBE_TX_OFFLOAD_MASK = (RTE_MBUF_F_TX_IP_CKSUM | +- RTE_MBUF_F_TX_OUTER_IPV6 | +- RTE_MBUF_F_TX_OUTER_IPV4 | + RTE_MBUF_F_TX_IPV6 | + RTE_MBUF_F_TX_IPV4 | + RTE_MBUF_F_TX_VLAN | + RTE_MBUF_F_TX_L4_MASK | + RTE_MBUF_F_TX_TCP_SEG | +- RTE_MBUF_F_TX_TUNNEL_MASK | +- RTE_MBUF_F_TX_OUTER_IP_CKSUM | + NGBE_TX_IEEE1588_TMST); + + #define NGBE_TX_OFFLOAD_NOTSUP_MASK \ +@@ -333,34 +329,15 @@ ngbe_set_xmit_ctx(struct ngbe_tx_queue *txq, + } + + vlan_macip_lens = NGBE_TXD_IPLEN(tx_offload.l3_len >> 1); +- +- if (ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) { +- tx_offload_mask.outer_tun_len |= ~0; +- tx_offload_mask.outer_l2_len |= ~0; +- tx_offload_mask.outer_l3_len |= ~0; +- tx_offload_mask.l2_len |= ~0; +- tunnel_seed = NGBE_TXD_ETUNLEN(tx_offload.outer_tun_len >> 1); +- tunnel_seed |= NGBE_TXD_EIPLEN(tx_offload.outer_l3_len >> 2); +- +- switch (ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) { +- case RTE_MBUF_F_TX_TUNNEL_IPIP: +- /* for non UDP / GRE tunneling, set to 0b */ +- break; +- default: +- PMD_TX_LOG(ERR, "Tunnel type not supported"); +- return; +- } +- vlan_macip_lens |= NGBE_TXD_MACLEN(tx_offload.outer_l2_len); +- } else { +- tunnel_seed = 0; +- vlan_macip_lens |= NGBE_TXD_MACLEN(tx_offload.l2_len); +- } ++ vlan_macip_lens |= NGBE_TXD_MACLEN(tx_offload.l2_len); + + if (ol_flags & RTE_MBUF_F_TX_VLAN) { + tx_offload_mask.vlan_tci |= ~0; + vlan_macip_lens |= NGBE_TXD_VLAN(tx_offload.vlan_tci); + } + ++ tunnel_seed = 0; ++ + txq->ctx_cache[ctx_idx].flags = ol_flags; + txq->ctx_cache[ctx_idx].tx_offload.data[0] = + tx_offload_mask.data[0] & tx_offload.data[0]; +@@ -449,16 +426,10 @@ tx_desc_ol_flags_to_cmdtype(uint64_t ol_flags) + return cmdtype; + } + +-static inline uint8_t +-tx_desc_ol_flags_to_ptid(uint64_t oflags, uint32_t ptype) ++static inline uint32_t ++tx_desc_ol_flags_to_ptype(uint64_t oflags) + { +- bool tun; +- +- if (ptype) +- return ngbe_encode_ptype(ptype); +- +- /* Only support flags in NGBE_TX_OFFLOAD_MASK */ +- tun = !!(oflags & RTE_MBUF_F_TX_TUNNEL_MASK); ++ uint32_t ptype; + + /* L2 level */ + ptype = RTE_PTYPE_L2_ETHER; +@@ -466,41 +437,36 @@ tx_desc_ol_flags_to_ptid(uint64_t oflags, uint32_t ptype) + ptype |= RTE_PTYPE_L2_ETHER_VLAN; + + /* L3 level */ +- if (oflags & (RTE_MBUF_F_TX_OUTER_IPV4 | RTE_MBUF_F_TX_OUTER_IP_CKSUM)) +- ptype |= RTE_PTYPE_L3_IPV4; +- else if (oflags & (RTE_MBUF_F_TX_OUTER_IPV6)) +- ptype |= RTE_PTYPE_L3_IPV6; +- + if (oflags & (RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_IP_CKSUM)) +- ptype |= (tun ? RTE_PTYPE_INNER_L3_IPV4 : RTE_PTYPE_L3_IPV4); ++ ptype |= RTE_PTYPE_L3_IPV4; + else if (oflags & (RTE_MBUF_F_TX_IPV6)) +- ptype |= (tun ? RTE_PTYPE_INNER_L3_IPV6 : RTE_PTYPE_L3_IPV6); ++ ptype |= RTE_PTYPE_L3_IPV6; + + /* L4 level */ + switch (oflags & (RTE_MBUF_F_TX_L4_MASK)) { + case RTE_MBUF_F_TX_TCP_CKSUM: +- ptype |= (tun ? RTE_PTYPE_INNER_L4_TCP : RTE_PTYPE_L4_TCP); ++ ptype |= RTE_PTYPE_L4_TCP; + break; + case RTE_MBUF_F_TX_UDP_CKSUM: +- ptype |= (tun ? RTE_PTYPE_INNER_L4_UDP : RTE_PTYPE_L4_UDP); ++ ptype |= RTE_PTYPE_L4_UDP; + break; + case RTE_MBUF_F_TX_SCTP_CKSUM: +- ptype |= (tun ? RTE_PTYPE_INNER_L4_SCTP : RTE_PTYPE_L4_SCTP); ++ ptype |= RTE_PTYPE_L4_SCTP; + break; + } + + if (oflags & RTE_MBUF_F_TX_TCP_SEG) +- ptype |= (tun ? RTE_PTYPE_INNER_L4_TCP : RTE_PTYPE_L4_TCP); +- +- /* Tunnel */ +- switch (oflags & RTE_MBUF_F_TX_TUNNEL_MASK) { +- case RTE_MBUF_F_TX_TUNNEL_IPIP: +- case RTE_MBUF_F_TX_TUNNEL_IP: +- ptype |= RTE_PTYPE_L2_ETHER | +- RTE_PTYPE_L3_IPV4 | +- RTE_PTYPE_TUNNEL_IP; +- break; +- } ++ ptype |= RTE_PTYPE_L4_TCP; ++ ++ return ptype; ++} ++ ++static inline uint8_t ++tx_desc_ol_flags_to_ptid(uint64_t oflags) ++{ ++ uint32_t ptype; ++ ++ ptype = tx_desc_ol_flags_to_ptype(oflags); + + return ngbe_encode_ptype(ptype); + } +@@ -622,16 +588,12 @@ ngbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, + /* If hardware offload required */ + tx_ol_req = ol_flags & NGBE_TX_OFFLOAD_MASK; + if (tx_ol_req) { +- tx_offload.ptid = tx_desc_ol_flags_to_ptid(tx_ol_req, +- tx_pkt->packet_type); ++ tx_offload.ptid = tx_desc_ol_flags_to_ptid(tx_ol_req); + tx_offload.l2_len = tx_pkt->l2_len; + tx_offload.l3_len = tx_pkt->l3_len; + tx_offload.l4_len = tx_pkt->l4_len; + tx_offload.vlan_tci = tx_pkt->vlan_tci; + tx_offload.tso_segsz = tx_pkt->tso_segsz; +- tx_offload.outer_l2_len = tx_pkt->outer_l2_len; +- tx_offload.outer_l3_len = tx_pkt->outer_l3_len; +- tx_offload.outer_tun_len = 0; + + /* If new context need be built or reuse the exist ctx*/ + ctx = what_ctx_update(txq, tx_ol_req, tx_offload); +@@ -752,10 +714,6 @@ ngbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, + */ + pkt_len -= (tx_offload.l2_len + + tx_offload.l3_len + tx_offload.l4_len); +- pkt_len -= +- (tx_pkt->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) +- ? tx_offload.outer_l2_len + +- tx_offload.outer_l3_len : 0; + } + + /* +@@ -1939,12 +1897,8 @@ ngbe_get_tx_port_offloads(struct rte_eth_dev *dev) + RTE_ETH_TX_OFFLOAD_UDP_CKSUM | + RTE_ETH_TX_OFFLOAD_TCP_CKSUM | + RTE_ETH_TX_OFFLOAD_SCTP_CKSUM | +- RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM | + RTE_ETH_TX_OFFLOAD_TCP_TSO | + RTE_ETH_TX_OFFLOAD_UDP_TSO | +- RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO | +- RTE_ETH_TX_OFFLOAD_IP_TNL_TSO | +- RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO | + RTE_ETH_TX_OFFLOAD_MULTI_SEGS; + + if (hw->is_pf) +@@ -2237,6 +2191,7 @@ ngbe_get_rx_port_offloads(struct rte_eth_dev *dev) + RTE_ETH_RX_OFFLOAD_TCP_CKSUM | + RTE_ETH_RX_OFFLOAD_KEEP_CRC | + RTE_ETH_RX_OFFLOAD_VLAN_FILTER | ++ RTE_ETH_RX_OFFLOAD_RSS_HASH | + RTE_ETH_RX_OFFLOAD_SCATTER; + + if (hw->is_pf) +diff --git a/dpdk/drivers/net/qede/qede_ethdev.c b/dpdk/drivers/net/qede/qede_ethdev.c +index a4923670d6..22cd470646 100644 +--- a/dpdk/drivers/net/qede/qede_ethdev.c ++++ b/dpdk/drivers/net/qede/qede_ethdev.c +@@ -2142,6 +2142,7 @@ int qede_rss_hash_update(struct rte_eth_dev *eth_dev, + rss_params.rss_enable = 1; + } + ++ rss_params.update_rss_ind_table = 1; + rss_params.update_rss_config = 1; + /* tbl_size has to be set with capabilities */ + rss_params.rss_table_size_log = 7; +diff --git a/dpdk/drivers/net/sfc/sfc_dp_rx.h b/dpdk/drivers/net/sfc/sfc_dp_rx.h +index 246adbd87c..8a504bdcf1 100644 +--- a/dpdk/drivers/net/sfc/sfc_dp_rx.h ++++ b/dpdk/drivers/net/sfc/sfc_dp_rx.h +@@ -69,6 +69,7 @@ struct sfc_dp_rx_qcreate_info { + /** Receive queue flags initializer */ + unsigned int flags; + #define SFC_RXQ_FLAG_RSS_HASH 0x1 ++#define SFC_RXQ_FLAG_INGRESS_MPORT 0x2 + + /** Rx queue size */ + unsigned int rxq_entries; +diff --git a/dpdk/drivers/net/sfc/sfc_ef100_rx.c b/dpdk/drivers/net/sfc/sfc_ef100_rx.c +index 16cd8524d3..37b754fa33 100644 +--- a/dpdk/drivers/net/sfc/sfc_ef100_rx.c ++++ b/dpdk/drivers/net/sfc/sfc_ef100_rx.c +@@ -810,6 +810,9 @@ sfc_ef100_rx_qcreate(uint16_t port_id, uint16_t queue_id, + if (rxq->nic_dma_info->nb_regions > 0) + rxq->flags |= SFC_EF100_RXQ_NIC_DMA_MAP; + ++ if (info->flags & SFC_RXQ_FLAG_INGRESS_MPORT) ++ rxq->flags |= SFC_EF100_RXQ_INGRESS_MPORT; ++ + sfc_ef100_rx_debug(rxq, "RxQ doorbell is %p", rxq->doorbell); + + *dp_rxqp = &rxq->dp; +@@ -876,11 +879,18 @@ sfc_ef100_rx_qstart(struct sfc_dp_rxq *dp_rxq, unsigned int evq_read_ptr, + else + rxq->flags &= ~SFC_EF100_RXQ_USER_MARK; + ++ ++ /* ++ * At the moment, this feature is used only ++ * by the representor proxy Rx queue and is ++ * essential for representor support, so if ++ * it has been requested but is unsupported, ++ * point this inconsistency out to the user. ++ */ + if ((unsup_rx_prefix_fields & +- (1U << EFX_RX_PREFIX_FIELD_INGRESS_MPORT)) == 0) +- rxq->flags |= SFC_EF100_RXQ_INGRESS_MPORT; +- else +- rxq->flags &= ~SFC_EF100_RXQ_INGRESS_MPORT; ++ (1U << EFX_RX_PREFIX_FIELD_INGRESS_MPORT)) && ++ (rxq->flags & SFC_EF100_RXQ_INGRESS_MPORT)) ++ return ENOTSUP; + + rxq->prefix_size = pinfo->erpl_length; + rxq->rearm_data = sfc_ef100_mk_mbuf_rearm_data(rxq->dp.dpq.port_id, +diff --git a/dpdk/drivers/net/sfc/sfc_mae.c b/dpdk/drivers/net/sfc/sfc_mae.c +index 421bb6da95..c7d28eae71 100644 +--- a/dpdk/drivers/net/sfc/sfc_mae.c ++++ b/dpdk/drivers/net/sfc/sfc_mae.c +@@ -1180,6 +1180,8 @@ sfc_mae_action_set_disable(struct sfc_adapter *sa, + } + + if (fw_rsrc->refcnt == 1) { ++ efx_mae_action_set_clear_fw_rsrc_ids(action_set->spec); ++ + rc = efx_mae_action_set_free(sa->nic, &fw_rsrc->aset_id); + if (rc == 0) { + sfc_dbg(sa, "disabled action_set=%p with AS_ID=0x%08x", +@@ -3896,12 +3898,10 @@ sfc_mae_rule_parse_actions(struct sfc_adapter *sa, + break; + case SFC_FT_RULE_SWITCH: + /* +- * Packets that go to the rule's AR have FT mark set (from the +- * TUNNEL rule OR's RECIRC_ID). Remove this mark in matching +- * packets. The user may have provided their own action +- * MARK above, so don't check the return value here. ++ * Packets that go to the rule's AR have FT mark set (from ++ * the TUNNEL rule OR's RECIRC_ID). Reset the mark to zero. + */ +- (void)efx_mae_action_set_populate_mark(ctx.spec, 0); ++ efx_mae_action_set_populate_mark_reset(ctx.spec); + + ctx.ft_switch_hit_counter = + &spec_mae->ft_ctx->switch_hit_counter; +@@ -3910,8 +3910,25 @@ sfc_mae_rule_parse_actions(struct sfc_adapter *sa, + SFC_ASSERT(B_FALSE); + } + ++ /* ++ * A DPDK flow entry must specify a fate action, which the parser ++ * converts into a DELIVER action in a libefx action set. An ++ * attempt to replace the action in the action set should ++ * fail. If it succeeds then report an error, as the ++ * parsed flow entry did not contain a fate action. ++ */ ++ rc = efx_mae_action_set_populate_drop(ctx.spec); ++ if (rc == 0) { ++ rc = rte_flow_error_set(error, EINVAL, ++ RTE_FLOW_ERROR_TYPE_ACTION, NULL, ++ "no fate action found"); ++ goto fail_check_fate_action; ++ } ++ + spec_mae->action_set = sfc_mae_action_set_attach(sa, &ctx); + if (spec_mae->action_set != NULL) { ++ sfc_mae_mac_addr_del(sa, ctx.src_mac); ++ sfc_mae_mac_addr_del(sa, ctx.dst_mac); + sfc_mae_encap_header_del(sa, ctx.encap_header); + efx_mae_action_set_spec_fini(sa->nic, ctx.spec); + return 0; +@@ -3924,6 +3941,7 @@ sfc_mae_rule_parse_actions(struct sfc_adapter *sa, + return 0; + + fail_action_set_add: ++fail_check_fate_action: + fail_workaround_tunnel_delivery: + fail_nb_count: + sfc_mae_encap_header_del(sa, ctx.encap_header); +diff --git a/dpdk/drivers/net/sfc/sfc_repr.c b/dpdk/drivers/net/sfc/sfc_repr.c +index 417d0073cb..919048e278 100644 +--- a/dpdk/drivers/net/sfc/sfc_repr.c ++++ b/dpdk/drivers/net/sfc/sfc_repr.c +@@ -9,6 +9,8 @@ + + #include + ++#include ++#include + #include + #include + #include +@@ -834,6 +836,8 @@ sfc_repr_dev_close(struct rte_eth_dev *dev) + + (void)sfc_repr_proxy_del_port(srs->pf_port_id, srs->repr_id); + ++ sfc_mae_clear_switch_port(srs->switch_domain_id, srs->switch_port_id); ++ + dev->rx_pkt_burst = NULL; + dev->tx_pkt_burst = NULL; + dev->dev_ops = NULL; +@@ -888,6 +892,29 @@ sfc_repr_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) + return 0; + } + ++static int ++sfc_repr_flow_pick_transfer_proxy(struct rte_eth_dev *dev, ++ uint16_t *transfer_proxy_port, ++ struct rte_flow_error *error) ++{ ++ struct sfc_repr_shared *srs = sfc_repr_shared_by_eth_dev(dev); ++ ++ return rte_flow_pick_transfer_proxy(srs->pf_port_id, ++ transfer_proxy_port, error); ++} ++ ++const struct rte_flow_ops sfc_repr_flow_ops = { ++ .pick_transfer_proxy = sfc_repr_flow_pick_transfer_proxy, ++}; ++ ++static int ++sfc_repr_dev_flow_ops_get(struct rte_eth_dev *dev __rte_unused, ++ const struct rte_flow_ops **ops) ++{ ++ *ops = &sfc_repr_flow_ops; ++ return 0; ++} ++ + static const struct eth_dev_ops sfc_repr_dev_ops = { + .dev_configure = sfc_repr_dev_configure, + .dev_start = sfc_repr_dev_start, +@@ -901,6 +928,7 @@ static const struct eth_dev_ops sfc_repr_dev_ops = { + .rx_queue_release = sfc_repr_rx_queue_release, + .tx_queue_setup = sfc_repr_tx_queue_setup, + .tx_queue_release = sfc_repr_tx_queue_release, ++ .flow_ops_get = sfc_repr_dev_flow_ops_get, + }; + + +diff --git a/dpdk/drivers/net/sfc/sfc_rx.c b/dpdk/drivers/net/sfc/sfc_rx.c +index 5ea98187c3..edd0f0c038 100644 +--- a/dpdk/drivers/net/sfc/sfc_rx.c ++++ b/dpdk/drivers/net/sfc/sfc_rx.c +@@ -1225,6 +1225,9 @@ sfc_rx_qinit(struct sfc_adapter *sa, sfc_sw_index_t sw_index, + else + rxq_info->rxq_flags = 0; + ++ if (rxq_info->type_flags & EFX_RXQ_FLAG_INGRESS_MPORT) ++ rxq_info->rxq_flags |= SFC_RXQ_FLAG_INGRESS_MPORT; ++ + rxq->buf_size = buf_size; + + rc = sfc_dma_alloc(sa, "rxq", sw_index, EFX_NIC_DMA_ADDR_RX_RING, +diff --git a/dpdk/drivers/net/sfc/sfc_switch.c b/dpdk/drivers/net/sfc/sfc_switch.c +index 5c10e8fc74..8f1ee97fa8 100644 +--- a/dpdk/drivers/net/sfc/sfc_switch.c ++++ b/dpdk/drivers/net/sfc/sfc_switch.c +@@ -489,6 +489,7 @@ sfc_mae_clear_switch_port(uint16_t switch_domain_id, + uint16_t switch_port_id) + { + struct sfc_mae_switch_domain *domain; ++ struct sfc_mae_switch_port *port; + + rte_spinlock_lock(&sfc_mae_switch.lock); + +@@ -504,6 +505,17 @@ sfc_mae_clear_switch_port(uint16_t switch_domain_id, + domain->mae_admin_port = NULL; + } + ++ TAILQ_FOREACH(port, &domain->ports, switch_domain_ports) { ++ if (port->id == switch_port_id) { ++ /* ++ * Invalidate the field to prevent wrong ++ * look-ups from flow rule handling path. ++ */ ++ port->ethdev_port_id = RTE_MAX_ETHPORTS; ++ break; ++ } ++ } ++ + rte_spinlock_unlock(&sfc_mae_switch.lock); + return 0; + } +diff --git a/dpdk/drivers/net/tap/rte_eth_tap.c b/dpdk/drivers/net/tap/rte_eth_tap.c +index f2a6c33a19..66595f8312 100644 +--- a/dpdk/drivers/net/tap/rte_eth_tap.c ++++ b/dpdk/drivers/net/tap/rte_eth_tap.c +@@ -2303,8 +2303,8 @@ set_mac_type(const char *key __rte_unused, + if (!strncasecmp(ETH_TAP_MAC_FIXED, value, strlen(ETH_TAP_MAC_FIXED))) { + static int iface_idx; + +- /* fixed mac = 00:64:74:61:70: */ +- memcpy((char *)user_mac->addr_bytes, "\0dtap", ++ /* fixed mac = 02:64:74:61:70: */ ++ memcpy((char *)user_mac->addr_bytes, "\002dtap", + RTE_ETHER_ADDR_LEN); + user_mac->addr_bytes[RTE_ETHER_ADDR_LEN - 1] = + iface_idx++ + '0'; +diff --git a/dpdk/drivers/net/txgbe/base/txgbe_hw.c b/dpdk/drivers/net/txgbe/base/txgbe_hw.c +index 8966453a03..de96549ae8 100644 +--- a/dpdk/drivers/net/txgbe/base/txgbe_hw.c ++++ b/dpdk/drivers/net/txgbe/base/txgbe_hw.c +@@ -2273,10 +2273,24 @@ s32 txgbe_setup_mac_link_multispeed_fiber(struct txgbe_hw *hw, + } + + if (speed & TXGBE_LINK_SPEED_1GB_FULL) { ++ u32 curr_autoneg; ++ + speedcnt++; + if (highest_link_speed == TXGBE_LINK_SPEED_UNKNOWN) + highest_link_speed = TXGBE_LINK_SPEED_1GB_FULL; + ++ status = hw->mac.check_link(hw, &link_speed, &link_up, false); ++ if (status != 0) ++ return status; ++ ++ /* If we already have link at this speed, just jump out */ ++ if (link_speed == TXGBE_LINK_SPEED_1GB_FULL) { ++ curr_autoneg = rd32_epcs(hw, SR_MII_MMD_CTL); ++ if (link_up && (hw->autoneg == ++ !!(curr_autoneg & SR_MII_MMD_CTL_AN_EN))) ++ goto out; ++ } ++ + /* Set the module link speed */ + switch (hw->phy.media_type) { + case txgbe_media_type_fiber: +@@ -2987,10 +3001,6 @@ void txgbe_disable_tx_laser_multispeed_fiber(struct txgbe_hw *hw) + { + u32 esdp_reg = rd32(hw, TXGBE_GPIODATA); + +- /* Blocked by MNG FW so bail */ +- if (txgbe_check_reset_blocked(hw)) +- return; +- + if (txgbe_close_notify(hw)) + txgbe_led_off(hw, TXGBE_LEDCTL_UP | TXGBE_LEDCTL_10G | + TXGBE_LEDCTL_1G | TXGBE_LEDCTL_ACTIVE); +@@ -3038,10 +3048,6 @@ void txgbe_enable_tx_laser_multispeed_fiber(struct txgbe_hw *hw) + **/ + void txgbe_flap_tx_laser_multispeed_fiber(struct txgbe_hw *hw) + { +- /* Blocked by MNG FW so bail */ +- if (txgbe_check_reset_blocked(hw)) +- return; +- + if (hw->mac.autotry_restart) { + txgbe_disable_tx_laser_multispeed_fiber(hw); + txgbe_enable_tx_laser_multispeed_fiber(hw); +@@ -3432,18 +3438,9 @@ s32 txgbe_reset_hw(struct txgbe_hw *hw) + autoc = hw->mac.autoc_read(hw); + + mac_reset_top: +- /* +- * Issue global reset to the MAC. Needs to be SW reset if link is up. +- * If link reset is used when link is up, it might reset the PHY when +- * mng is using it. If link is down or the flag to force full link +- * reset is set, then perform link reset. +- */ +- if (txgbe_mng_present(hw)) { +- txgbe_hic_reset(hw); +- } else { +- wr32(hw, TXGBE_RST, TXGBE_RST_LAN(hw->bus.lan_id)); +- txgbe_flush(hw); +- } ++ /* Do LAN reset, the MNG domain will not be reset. */ ++ wr32(hw, TXGBE_RST, TXGBE_RST_LAN(hw->bus.lan_id)); ++ txgbe_flush(hw); + usec_delay(10); + + txgbe_reset_misc(hw); +diff --git a/dpdk/drivers/net/txgbe/base/txgbe_phy.c b/dpdk/drivers/net/txgbe/base/txgbe_phy.c +index 9f46d5bdb0..a7c11c50df 100644 +--- a/dpdk/drivers/net/txgbe/base/txgbe_phy.c ++++ b/dpdk/drivers/net/txgbe/base/txgbe_phy.c +@@ -1380,7 +1380,9 @@ txgbe_set_sgmii_an37_ability(struct txgbe_hw *hw) + wr32_epcs(hw, SR_MII_MMD_AN_CTL, 0x0105); + wr32_epcs(hw, SR_MII_MMD_DIGI_CTL, 0x0200); + value = rd32_epcs(hw, SR_MII_MMD_CTL); +- value = (value & ~0x1200) | (0x1 << 12) | (0x1 << 9); ++ value = (value & ~0x1200) | (0x1 << 9); ++ if (hw->autoneg) ++ value |= SR_MII_MMD_CTL_AN_EN; + wr32_epcs(hw, SR_MII_MMD_CTL, value); + } + +@@ -1519,8 +1521,9 @@ txgbe_set_link_to_kx4(struct txgbe_hw *hw, bool autoneg) + goto out; + } + +- wr32m(hw, TXGBE_MACTXCFG, TXGBE_MACTXCFG_TXE, +- ~TXGBE_MACTXCFG_TXE); ++ wr32m(hw, TXGBE_MACTXCFG, TXGBE_MACTXCFG_TXE, ~TXGBE_MACTXCFG_TXE); ++ wr32m(hw, TXGBE_MACRXCFG, TXGBE_MACRXCFG_ENA, ~TXGBE_MACRXCFG_ENA); ++ hw->mac.disable_sec_tx_path(hw); + + /* 2. Disable xpcs AN-73 */ + if (!autoneg) +@@ -1693,9 +1696,10 @@ txgbe_set_link_to_kx4(struct txgbe_hw *hw, bool autoneg) + wr32_epcs(hw, TXGBE_PHY_TX_EQ_CTL1, value); + } else if (hw->fw_version <= TXGBE_FW_N_TXEQ) { + value = (0x1804 & ~0x3F3F); ++ value |= 40 << 8; + wr32_epcs(hw, TXGBE_PHY_TX_EQ_CTL0, value); + +- value = (0x50 & ~0x7F) | 40 | (1 << 6); ++ value = (0x50 & ~0x7F) | (1 << 6); + wr32_epcs(hw, TXGBE_PHY_TX_EQ_CTL1, value); + } + out: +@@ -1733,8 +1737,9 @@ txgbe_set_link_to_kx(struct txgbe_hw *hw, + goto out; + } + +- wr32m(hw, TXGBE_MACTXCFG, TXGBE_MACTXCFG_TXE, +- ~TXGBE_MACTXCFG_TXE); ++ wr32m(hw, TXGBE_MACTXCFG, TXGBE_MACTXCFG_TXE, ~TXGBE_MACTXCFG_TXE); ++ wr32m(hw, TXGBE_MACRXCFG, TXGBE_MACRXCFG_ENA, ~TXGBE_MACRXCFG_ENA); ++ hw->mac.disable_sec_tx_path(hw); + + /* 2. Disable xpcs AN-73 */ + if (!autoneg) +@@ -1907,10 +1912,10 @@ txgbe_set_link_to_kx(struct txgbe_hw *hw, + value |= hw->phy.ffe_post | (1 << 6); + wr32_epcs(hw, TXGBE_PHY_TX_EQ_CTL1, value); + } else if (hw->fw_version <= TXGBE_FW_N_TXEQ) { +- value = (0x1804 & ~0x3F3F) | (24 << 8) | 4; ++ value = (0x1804 & ~0x3F3F) | (40 << 8); + wr32_epcs(hw, TXGBE_PHY_TX_EQ_CTL0, value); + +- value = (0x50 & ~0x7F) | 16 | (1 << 6); ++ value = (0x50 & ~0x7F) | (1 << 6); + wr32_epcs(hw, TXGBE_PHY_TX_EQ_CTL1, value); + } + out: +@@ -1940,8 +1945,9 @@ txgbe_set_link_to_sfi(struct txgbe_hw *hw, + goto out; + } + +- wr32m(hw, TXGBE_MACTXCFG, TXGBE_MACTXCFG_TXE, +- ~TXGBE_MACTXCFG_TXE); ++ wr32m(hw, TXGBE_MACTXCFG, TXGBE_MACTXCFG_TXE, ~TXGBE_MACTXCFG_TXE); ++ wr32m(hw, TXGBE_MACRXCFG, TXGBE_MACRXCFG_ENA, ~TXGBE_MACRXCFG_ENA); ++ hw->mac.disable_sec_tx_path(hw); + + /* 2. Disable xpcs AN-73 */ + wr32_epcs(hw, SR_AN_CTRL, 0x0); +@@ -2292,6 +2298,8 @@ void txgbe_autoc_write(struct txgbe_hw *hw, u64 autoc) + txgbe_set_sgmii_an37_ability(hw); + } + ++ hw->mac.enable_sec_tx_path(hw); ++ + if (speed == TXGBE_LINK_SPEED_10GB_FULL) + mactxcfg = TXGBE_MACTXCFG_SPEED_10G; + else if (speed == TXGBE_LINK_SPEED_1GB_FULL) +@@ -2301,6 +2309,7 @@ void txgbe_autoc_write(struct txgbe_hw *hw, u64 autoc) + wr32m(hw, TXGBE_MACTXCFG, + TXGBE_MACTXCFG_SPEED_MASK | TXGBE_MACTXCFG_TXE, + mactxcfg | TXGBE_MACTXCFG_TXE); ++ wr32m(hw, TXGBE_MACRXCFG, TXGBE_MACRXCFG_ENA, TXGBE_MACRXCFG_ENA); + } + + void txgbe_bp_down_event(struct txgbe_hw *hw) +diff --git a/dpdk/drivers/net/txgbe/base/txgbe_regs.h b/dpdk/drivers/net/txgbe/base/txgbe_regs.h +index 911bb6e04e..79290a7afe 100644 +--- a/dpdk/drivers/net/txgbe/base/txgbe_regs.h ++++ b/dpdk/drivers/net/txgbe/base/txgbe_regs.h +@@ -1579,6 +1579,7 @@ enum txgbe_5tuple_protocol { + #define TXGBE_GPIOINTMASK 0x014834 + #define TXGBE_GPIOINTTYPE 0x014838 + #define TXGBE_GPIOINTSTAT 0x014840 ++#define TXGBE_GPIORAWINTSTAT 0x014844 + #define TXGBE_GPIOEOI 0x01484C + + +@@ -1884,7 +1885,19 @@ po32m(struct txgbe_hw *hw, u32 reg, u32 mask, u32 expect, u32 *actual, + } + + /* flush all write operations */ +-#define txgbe_flush(hw) rd32(hw, 0x00100C) ++static inline void txgbe_flush(struct txgbe_hw *hw) ++{ ++ switch (hw->mac.type) { ++ case txgbe_mac_raptor: ++ rd32(hw, TXGBE_PWR); ++ break; ++ case txgbe_mac_raptor_vf: ++ rd32(hw, TXGBE_VFSTATUS); ++ break; ++ default: ++ break; ++ } ++} + + #define rd32a(hw, reg, idx) ( \ + rd32((hw), (reg) + ((idx) << 2))) +diff --git a/dpdk/drivers/net/txgbe/base/txgbe_type.h b/dpdk/drivers/net/txgbe/base/txgbe_type.h +index c3486b472f..75e839b7de 100644 +--- a/dpdk/drivers/net/txgbe/base/txgbe_type.h ++++ b/dpdk/drivers/net/txgbe/base/txgbe_type.h +@@ -783,6 +783,7 @@ struct txgbe_hw { + bool allow_unsupported_sfp; + bool need_crosstalk_fix; + bool dev_start; ++ bool autoneg; + struct txgbe_devargs devarg; + + uint64_t isb_dma; +diff --git a/dpdk/drivers/net/txgbe/txgbe_ethdev.c b/dpdk/drivers/net/txgbe/txgbe_ethdev.c +index 86ef979b29..2c7d71c0db 100644 +--- a/dpdk/drivers/net/txgbe/txgbe_ethdev.c ++++ b/dpdk/drivers/net/txgbe/txgbe_ethdev.c +@@ -179,7 +179,9 @@ static const struct rte_txgbe_xstats_name_off rte_txgbe_stats_strings[] = { + HW_XSTAT(tx_total_packets), + HW_XSTAT(rx_total_missed_packets), + HW_XSTAT(rx_broadcast_packets), ++ HW_XSTAT(tx_broadcast_packets), + HW_XSTAT(rx_multicast_packets), ++ HW_XSTAT(tx_multicast_packets), + HW_XSTAT(rx_management_packets), + HW_XSTAT(tx_management_packets), + HW_XSTAT(rx_management_dropped), +@@ -543,6 +545,7 @@ null: + static int + eth_txgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused) + { ++ struct txgbe_adapter *ad = eth_dev->data->dev_private; + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); + struct txgbe_hw *hw = TXGBE_DEV_HW(eth_dev); + struct txgbe_vfta *shadow_vfta = TXGBE_DEV_VFTA(eth_dev); +@@ -591,6 +594,7 @@ eth_txgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused) + return 0; + } + ++ __atomic_clear(&ad->link_thread_running, __ATOMIC_SEQ_CST); + rte_eth_copy_pci_info(eth_dev, pci_dev); + + hw->hw_addr = (void *)pci_dev->mem_resource[0].addr; +@@ -1530,6 +1534,25 @@ txgbe_dev_configure(struct rte_eth_dev *dev) + return 0; + } + ++static void txgbe_reinit_gpio_intr(struct txgbe_hw *hw) ++{ ++ u32 reg; ++ ++ wr32(hw, TXGBE_GPIOINTMASK, 0xFF); ++ reg = rd32(hw, TXGBE_GPIORAWINTSTAT); ++ ++ if (reg & TXGBE_GPIOBIT_2) ++ wr32(hw, TXGBE_GPIOEOI, TXGBE_GPIOBIT_2); ++ ++ if (reg & TXGBE_GPIOBIT_3) ++ wr32(hw, TXGBE_GPIOEOI, TXGBE_GPIOBIT_3); ++ ++ if (reg & TXGBE_GPIOBIT_6) ++ wr32(hw, TXGBE_GPIOEOI, TXGBE_GPIOBIT_6); ++ ++ wr32(hw, TXGBE_GPIOINTMASK, 0); ++} ++ + static void + txgbe_dev_phy_intr_setup(struct rte_eth_dev *dev) + { +@@ -1647,7 +1670,7 @@ txgbe_dev_start(struct rte_eth_dev *dev) + PMD_INIT_FUNC_TRACE(); + + /* Stop the link setup handler before resetting the HW. */ +- rte_eal_alarm_cancel(txgbe_dev_setup_link_alarm_handler, dev); ++ txgbe_dev_wait_setup_link_complete(dev, 0); + + /* disable uio/vfio intr/eventfd mapping */ + rte_intr_disable(intr_handle); +@@ -1668,6 +1691,10 @@ txgbe_dev_start(struct rte_eth_dev *dev) + hw->mac.get_link_status = true; + hw->dev_start = true; + ++ /* workaround for GPIO intr lost when mng_veto bit is set */ ++ if (txgbe_check_reset_blocked(hw)) ++ txgbe_reinit_gpio_intr(hw); ++ + /* configure PF module if SRIOV enabled */ + txgbe_pf_host_configure(dev); + +@@ -1786,6 +1813,7 @@ txgbe_dev_start(struct rte_eth_dev *dev) + speed = (TXGBE_LINK_SPEED_100M_FULL | + TXGBE_LINK_SPEED_1GB_FULL | + TXGBE_LINK_SPEED_10GB_FULL); ++ hw->autoneg = true; + } else { + if (*link_speeds & RTE_ETH_LINK_SPEED_10G) + speed |= TXGBE_LINK_SPEED_10GB_FULL; +@@ -1797,6 +1825,7 @@ txgbe_dev_start(struct rte_eth_dev *dev) + speed |= TXGBE_LINK_SPEED_1GB_FULL; + if (*link_speeds & RTE_ETH_LINK_SPEED_100M) + speed |= TXGBE_LINK_SPEED_100M_FULL; ++ hw->autoneg = false; + } + + err = hw->mac.setup_link(hw, speed, link_up); +@@ -1879,11 +1908,15 @@ txgbe_dev_stop(struct rte_eth_dev *dev) + + PMD_INIT_FUNC_TRACE(); + +- rte_eal_alarm_cancel(txgbe_dev_setup_link_alarm_handler, dev); ++ txgbe_dev_wait_setup_link_complete(dev, 0); + + /* disable interrupts */ + txgbe_disable_intr(hw); + ++ /* workaround for GPIO intr lost when mng_veto bit is set */ ++ if (txgbe_check_reset_blocked(hw)) ++ txgbe_reinit_gpio_intr(hw); ++ + /* reset the NIC */ + txgbe_pf_reset_hw(hw); + hw->adapter_stopped = 0; +@@ -2019,8 +2052,9 @@ txgbe_dev_close(struct rte_eth_dev *dev) + rte_delay_ms(100); + } while (retries++ < (10 + TXGBE_LINK_UP_TIME)); + +- /* cancel the delay handler before remove dev */ ++ /* cancel all alarm handler before remove dev */ + rte_eal_alarm_cancel(txgbe_dev_interrupt_delayed_handler, dev); ++ rte_eal_alarm_cancel(txgbe_dev_setup_link_alarm_handler, dev); + + /* uninitialize PF if max_vfs not zero */ + txgbe_pf_host_uninit(dev); +@@ -2690,11 +2724,52 @@ txgbe_dev_setup_link_alarm_handler(void *param) + intr->flags &= ~TXGBE_FLAG_NEED_LINK_CONFIG; + } + ++/* ++ * If @timeout_ms was 0, it means that it will not return until link complete. ++ * It returns 1 on complete, return 0 on timeout. ++ */ ++int ++txgbe_dev_wait_setup_link_complete(struct rte_eth_dev *dev, uint32_t timeout_ms) ++{ ++#define WARNING_TIMEOUT 9000 /* 9s in total */ ++ struct txgbe_adapter *ad = TXGBE_DEV_ADAPTER(dev); ++ uint32_t timeout = timeout_ms ? timeout_ms : WARNING_TIMEOUT; ++ ++ while (__atomic_load_n(&ad->link_thread_running, __ATOMIC_SEQ_CST)) { ++ msec_delay(1); ++ timeout--; ++ ++ if (timeout_ms) { ++ if (!timeout) ++ return 0; ++ } else if (!timeout) { ++ /* It will not return until link complete */ ++ timeout = WARNING_TIMEOUT; ++ PMD_DRV_LOG(ERR, "TXGBE link thread not complete too long time!"); ++ } ++ } ++ ++ return 1; ++} ++ ++static uint32_t ++txgbe_dev_setup_link_thread_handler(void *param) ++{ ++ struct rte_eth_dev *dev = (struct rte_eth_dev *)param; ++ struct txgbe_adapter *ad = TXGBE_DEV_ADAPTER(dev); ++ ++ rte_thread_detach(rte_thread_self()); ++ txgbe_dev_setup_link_alarm_handler(dev); ++ __atomic_clear(&ad->link_thread_running, __ATOMIC_SEQ_CST); ++ return 0; ++} ++ + /* return 0 means link status changed, -1 means not changed */ + int + txgbe_dev_link_update_share(struct rte_eth_dev *dev, + int wait_to_complete) + { ++ struct txgbe_adapter *ad = TXGBE_DEV_ADAPTER(dev); + struct txgbe_hw *hw = TXGBE_DEV_HW(dev); + struct rte_eth_link link; + u32 link_speed = TXGBE_LINK_SPEED_UNKNOWN; +@@ -2731,10 +2806,24 @@ txgbe_dev_link_update_share(struct rte_eth_dev *dev, + if ((hw->subsystem_device_id & 0xFF) == + TXGBE_DEV_ID_KR_KX_KX4) { + hw->mac.bp_down_event(hw); +- } else if (hw->phy.media_type == txgbe_media_type_fiber) { +- intr->flags |= TXGBE_FLAG_NEED_LINK_CONFIG; +- rte_eal_alarm_set(10, +- txgbe_dev_setup_link_alarm_handler, dev); ++ } else if (hw->phy.media_type == txgbe_media_type_fiber && ++ dev->data->dev_conf.intr_conf.lsc != 0) { ++ txgbe_dev_wait_setup_link_complete(dev, 0); ++ if (!__atomic_test_and_set(&ad->link_thread_running, __ATOMIC_SEQ_CST)) { ++ /* To avoid race condition between threads, set ++ * the TXGBE_FLAG_NEED_LINK_CONFIG flag only ++ * when there is no link thread running. ++ */ ++ intr->flags |= TXGBE_FLAG_NEED_LINK_CONFIG; ++ if (rte_thread_create(&ad->link_thread_tid, NULL, ++ txgbe_dev_setup_link_thread_handler, dev) < 0) { ++ PMD_DRV_LOG(ERR, "Create link thread failed!"); ++ __atomic_clear(&ad->link_thread_running, __ATOMIC_SEQ_CST); ++ } ++ } else { ++ PMD_DRV_LOG(ERR, ++ "Other link thread is running now!"); ++ } + } + return rte_eth_linkstatus_set(dev, &link); + } else if (!hw->dev_start) { +@@ -2949,9 +3038,6 @@ txgbe_dev_interrupt_get_status(struct rte_eth_dev *dev, + rte_intr_type_get(intr_handle) != RTE_INTR_HANDLE_VFIO_MSIX) + wr32(hw, TXGBE_PX_INTA, 1); + +- /* clear all cause mask */ +- txgbe_disable_intr(hw); +- + /* read-on-clear nic registers here */ + eicr = ((u32 *)hw->isb_mem)[TXGBE_ISB_MISC]; + PMD_DRV_LOG(DEBUG, "eicr %x", eicr); +@@ -2974,6 +3060,8 @@ txgbe_dev_interrupt_get_status(struct rte_eth_dev *dev, + if (eicr & TXGBE_ICRMISC_GPIO) + intr->flags |= TXGBE_FLAG_PHY_INTERRUPT; + ++ ++ ((u32 *)hw->isb_mem)[TXGBE_ISB_MISC] = 0; + return 0; + } + +@@ -3143,7 +3231,8 @@ txgbe_dev_interrupt_delayed_handler(void *param) + } + + /* restore original mask */ +- intr->mask_misc |= TXGBE_ICRMISC_LSC; ++ if (dev->data->dev_conf.intr_conf.lsc == 1) ++ intr->mask_misc |= TXGBE_ICRMISC_LSC; + + intr->mask = intr->mask_orig; + intr->mask_orig = 0; +diff --git a/dpdk/drivers/net/txgbe/txgbe_ethdev.h b/dpdk/drivers/net/txgbe/txgbe_ethdev.h +index 6a18865e23..b8a39204e2 100644 +--- a/dpdk/drivers/net/txgbe/txgbe_ethdev.h ++++ b/dpdk/drivers/net/txgbe/txgbe_ethdev.h +@@ -369,6 +369,9 @@ struct txgbe_adapter { + + /* For RSS reta table update */ + uint8_t rss_reta_updated; ++ ++ uint32_t link_thread_running; ++ rte_thread_t link_thread_tid; + }; + + #define TXGBE_DEV_ADAPTER(dev) \ +@@ -560,6 +563,9 @@ void txgbe_configure_dcb(struct rte_eth_dev *dev); + int + txgbe_dev_link_update_share(struct rte_eth_dev *dev, + int wait_to_complete); ++int ++txgbe_dev_wait_setup_link_complete(struct rte_eth_dev *dev, ++ uint32_t timeout_ms); + int txgbe_pf_host_init(struct rte_eth_dev *eth_dev); + + void txgbe_pf_host_uninit(struct rte_eth_dev *eth_dev); +diff --git a/dpdk/drivers/net/txgbe/txgbe_ethdev_vf.c b/dpdk/drivers/net/txgbe/txgbe_ethdev_vf.c +index 3b1f7c913b..f1341fbf7e 100644 +--- a/dpdk/drivers/net/txgbe/txgbe_ethdev_vf.c ++++ b/dpdk/drivers/net/txgbe/txgbe_ethdev_vf.c +@@ -165,6 +165,7 @@ eth_txgbevf_dev_init(struct rte_eth_dev *eth_dev) + { + int err; + uint32_t tc, tcs; ++ struct txgbe_adapter *ad = eth_dev->data->dev_private; + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); + struct rte_intr_handle *intr_handle = pci_dev->intr_handle; + struct txgbe_hw *hw = TXGBE_DEV_HW(eth_dev); +@@ -205,6 +206,7 @@ eth_txgbevf_dev_init(struct rte_eth_dev *eth_dev) + return 0; + } + ++ __atomic_clear(&ad->link_thread_running, __ATOMIC_SEQ_CST); + rte_eth_copy_pci_info(eth_dev, pci_dev); + + hw->device_id = pci_dev->id.device_id; +@@ -618,7 +620,7 @@ txgbevf_dev_start(struct rte_eth_dev *dev) + PMD_INIT_FUNC_TRACE(); + + /* Stop the link setup handler before resetting the HW. */ +- rte_eal_alarm_cancel(txgbe_dev_setup_link_alarm_handler, dev); ++ txgbe_dev_wait_setup_link_complete(dev, 0); + + err = hw->mac.reset_hw(hw); + if (err) { +@@ -720,7 +722,7 @@ txgbevf_dev_stop(struct rte_eth_dev *dev) + + PMD_INIT_FUNC_TRACE(); + +- rte_eal_alarm_cancel(txgbe_dev_setup_link_alarm_handler, dev); ++ txgbe_dev_wait_setup_link_complete(dev, 0); + + txgbevf_intr_disable(dev); + +diff --git a/dpdk/drivers/net/txgbe/txgbe_rxtx.c b/dpdk/drivers/net/txgbe/txgbe_rxtx.c +index ac1bba08a3..427f8b82ac 100644 +--- a/dpdk/drivers/net/txgbe/txgbe_rxtx.c ++++ b/dpdk/drivers/net/txgbe/txgbe_rxtx.c +@@ -516,20 +516,21 @@ tx_desc_ol_flags_to_cmdtype(uint64_t ol_flags) + return cmdtype; + } + +-static inline uint8_t +-tx_desc_ol_flags_to_ptid(uint64_t oflags, uint32_t ptype) ++static inline uint32_t ++tx_desc_ol_flags_to_ptype(uint64_t oflags) + { ++ uint32_t ptype; + bool tun; + +- if (ptype) +- return txgbe_encode_ptype(ptype); +- + /* Only support flags in TXGBE_TX_OFFLOAD_MASK */ + tun = !!(oflags & RTE_MBUF_F_TX_TUNNEL_MASK); + + /* L2 level */ + ptype = RTE_PTYPE_L2_ETHER; + if (oflags & RTE_MBUF_F_TX_VLAN) ++ ptype |= (tun ? RTE_PTYPE_INNER_L2_ETHER_VLAN : RTE_PTYPE_L2_ETHER_VLAN); ++ ++ if (oflags & RTE_MBUF_F_TX_QINQ) /* tunnel + QINQ is not supported */ + ptype |= RTE_PTYPE_L2_ETHER_VLAN; + + /* L3 level */ +@@ -587,6 +588,16 @@ tx_desc_ol_flags_to_ptid(uint64_t oflags, uint32_t ptype) + break; + } + ++ return ptype; ++} ++ ++static inline uint8_t ++tx_desc_ol_flags_to_ptid(uint64_t oflags) ++{ ++ uint32_t ptype; ++ ++ ptype = tx_desc_ol_flags_to_ptype(oflags); ++ + return txgbe_encode_ptype(ptype); + } + +@@ -776,8 +787,7 @@ txgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, + /* If hardware offload required */ + tx_ol_req = ol_flags & TXGBE_TX_OFFLOAD_MASK; + if (tx_ol_req) { +- tx_offload.ptid = tx_desc_ol_flags_to_ptid(tx_ol_req, +- tx_pkt->packet_type); ++ tx_offload.ptid = tx_desc_ol_flags_to_ptid(tx_ol_req); + if (tx_offload.ptid & TXGBE_PTID_PKT_TUN) + tx_offload.ptid |= txgbe_parse_tun_ptid(tx_pkt); + tx_offload.l2_len = tx_pkt->l2_len; +@@ -4382,7 +4392,7 @@ txgbe_dev_rx_init(struct rte_eth_dev *dev) + */ + buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) - + RTE_PKTMBUF_HEADROOM); +- buf_size = ROUND_UP(buf_size, 0x1 << 10); ++ buf_size = ROUND_DOWN(buf_size, 0x1 << 10); + srrctl |= TXGBE_RXCFG_PKTLEN(buf_size); + + wr32(hw, TXGBE_RXCFG(rxq->reg_idx), srrctl); +diff --git a/dpdk/drivers/net/vhost/rte_eth_vhost.c b/dpdk/drivers/net/vhost/rte_eth_vhost.c +index b152279fac..f7e1b268ed 100644 +--- a/dpdk/drivers/net/vhost/rte_eth_vhost.c ++++ b/dpdk/drivers/net/vhost/rte_eth_vhost.c +@@ -78,8 +78,9 @@ struct vhost_queue { + uint16_t port; + uint16_t virtqueue_id; + struct vhost_stats stats; +- int intr_enable; + rte_spinlock_t intr_lock; ++ struct epoll_event ev; ++ int kickfd; + }; + + struct pmd_internal { +@@ -297,7 +298,7 @@ vhost_dev_csum_configure(struct rte_eth_dev *eth_dev) + if (internal->features & (1ULL << VIRTIO_NET_F_CSUM)) { + if (!(rxmode->offloads & + (RTE_ETH_RX_OFFLOAD_UDP_CKSUM | RTE_ETH_RX_OFFLOAD_TCP_CKSUM))) { +- VHOST_LOG(NOTICE, "Rx csum will be done in SW, may impact performance."); ++ VHOST_LOG(NOTICE, "Rx csum will be done in SW, may impact performance.\n"); + internal->rx_sw_csum = true; + } + } +@@ -305,7 +306,7 @@ vhost_dev_csum_configure(struct rte_eth_dev *eth_dev) + if (!(internal->features & (1ULL << VIRTIO_NET_F_GUEST_CSUM))) { + if (txmode->offloads & + (RTE_ETH_TX_OFFLOAD_UDP_CKSUM | RTE_ETH_TX_OFFLOAD_TCP_CKSUM)) { +- VHOST_LOG(NOTICE, "Tx csum will be done in SW, may impact performance."); ++ VHOST_LOG(NOTICE, "Tx csum will be done in SW, may impact performance.\n"); + internal->tx_sw_csum = true; + } + } +@@ -545,115 +546,68 @@ find_internal_resource(char *ifname) + return list; + } + +-static int ++static void + eth_vhost_update_intr(struct rte_eth_dev *eth_dev, uint16_t rxq_idx) + { +- struct rte_intr_handle *handle = eth_dev->intr_handle; +- struct rte_epoll_event rev, *elist; +- int epfd, ret; +- +- if (handle == NULL) +- return 0; +- +- elist = rte_intr_elist_index_get(handle, rxq_idx); +- if (rte_intr_efds_index_get(handle, rxq_idx) == elist->fd) +- return 0; +- +- VHOST_LOG(INFO, "kickfd for rxq-%d was changed, updating handler.\n", +- rxq_idx); ++ struct rte_vhost_vring vring; ++ struct vhost_queue *vq; + +- if (elist->fd != -1) +- VHOST_LOG(ERR, "Unexpected previous kickfd value (Got %d, expected -1).\n", +- elist->fd); ++ vq = eth_dev->data->rx_queues[rxq_idx]; ++ if (vq == NULL || vq->vid < 0) ++ return; + +- /* +- * First remove invalid epoll event, and then install +- * the new one. May be solved with a proper API in the +- * future. +- */ +- epfd = elist->epfd; +- rev = *elist; +- ret = rte_epoll_ctl(epfd, EPOLL_CTL_DEL, rev.fd, +- elist); +- if (ret) { +- VHOST_LOG(ERR, "Delete epoll event failed.\n"); +- return ret; ++ if (rte_vhost_get_vhost_vring(vq->vid, (rxq_idx << 1) + 1, &vring) < 0) { ++ VHOST_LOG(DEBUG, "Failed to get rxq-%d's vring, skip!\n", rxq_idx); ++ return; + } + +- rev.fd = rte_intr_efds_index_get(handle, rxq_idx); +- if (rte_intr_elist_index_set(handle, rxq_idx, rev)) +- return -rte_errno; ++ rte_spinlock_lock(&vq->intr_lock); + +- elist = rte_intr_elist_index_get(handle, rxq_idx); +- ret = rte_epoll_ctl(epfd, EPOLL_CTL_ADD, rev.fd, elist); +- if (ret) { +- VHOST_LOG(ERR, "Add epoll event failed.\n"); +- return ret; ++ /* Remove previous kickfd from proxy epoll */ ++ if (vq->kickfd >= 0 && vq->kickfd != vring.kickfd) { ++ if (epoll_ctl(vq->ev.data.fd, EPOLL_CTL_DEL, vq->kickfd, &vq->ev) < 0) { ++ VHOST_LOG(DEBUG, "Failed to unregister %d from rxq-%d epoll: %s\n", ++ vq->kickfd, rxq_idx, strerror(errno)); ++ } else { ++ VHOST_LOG(DEBUG, "Unregistered %d from rxq-%d epoll\n", ++ vq->kickfd, rxq_idx); ++ } ++ vq->kickfd = -1; ++ } ++ ++ /* Add new one, if valid */ ++ if (vq->kickfd != vring.kickfd && vring.kickfd >= 0) { ++ if (epoll_ctl(vq->ev.data.fd, EPOLL_CTL_ADD, vring.kickfd, &vq->ev) < 0) { ++ VHOST_LOG(ERR, "Failed to register %d in rxq-%d epoll: %s\n", ++ vring.kickfd, rxq_idx, strerror(errno)); ++ } else { ++ vq->kickfd = vring.kickfd; ++ VHOST_LOG(DEBUG, "Registered %d in rxq-%d epoll\n", ++ vq->kickfd, rxq_idx); ++ } + } + +- return 0; ++ rte_spinlock_unlock(&vq->intr_lock); + } + + static int + eth_rxq_intr_enable(struct rte_eth_dev *dev, uint16_t qid) + { +- struct rte_vhost_vring vring; +- struct vhost_queue *vq; +- int old_intr_enable, ret = 0; ++ struct vhost_queue *vq = dev->data->rx_queues[qid]; + +- vq = dev->data->rx_queues[qid]; +- if (!vq) { +- VHOST_LOG(ERR, "rxq%d is not setup yet\n", qid); +- return -1; +- } ++ if (vq->vid >= 0) ++ rte_vhost_enable_guest_notification(vq->vid, (qid << 1) + 1, 1); + +- rte_spinlock_lock(&vq->intr_lock); +- old_intr_enable = vq->intr_enable; +- vq->intr_enable = 1; +- ret = eth_vhost_update_intr(dev, qid); +- rte_spinlock_unlock(&vq->intr_lock); +- +- if (ret < 0) { +- VHOST_LOG(ERR, "Failed to update rxq%d's intr\n", qid); +- vq->intr_enable = old_intr_enable; +- return ret; +- } +- +- ret = rte_vhost_get_vhost_vring(vq->vid, (qid << 1) + 1, &vring); +- if (ret < 0) { +- VHOST_LOG(ERR, "Failed to get rxq%d's vring\n", qid); +- return ret; +- } +- VHOST_LOG(INFO, "Enable interrupt for rxq%d\n", qid); +- rte_vhost_enable_guest_notification(vq->vid, (qid << 1) + 1, 1); +- rte_wmb(); +- +- return ret; ++ return 0; + } + + static int + eth_rxq_intr_disable(struct rte_eth_dev *dev, uint16_t qid) + { +- struct rte_vhost_vring vring; +- struct vhost_queue *vq; +- int ret = 0; ++ struct vhost_queue *vq = dev->data->rx_queues[qid]; + +- vq = dev->data->rx_queues[qid]; +- if (!vq) { +- VHOST_LOG(ERR, "rxq%d is not setup yet\n", qid); +- return -1; +- } +- +- ret = rte_vhost_get_vhost_vring(vq->vid, (qid << 1) + 1, &vring); +- if (ret < 0) { +- VHOST_LOG(ERR, "Failed to get rxq%d's vring", qid); +- return ret; +- } +- VHOST_LOG(INFO, "Disable interrupt for rxq%d\n", qid); +- rte_vhost_enable_guest_notification(vq->vid, (qid << 1) + 1, 0); +- rte_wmb(); +- +- vq->intr_enable = 0; ++ if (vq->vid >= 0) ++ rte_vhost_enable_guest_notification(vq->vid, (qid << 1) + 1, 0); + + return 0; + } +@@ -664,6 +618,14 @@ eth_vhost_uninstall_intr(struct rte_eth_dev *dev) + struct rte_intr_handle *intr_handle = dev->intr_handle; + + if (intr_handle != NULL) { ++ int i; ++ ++ for (i = 0; i < dev->data->nb_rx_queues; i++) { ++ int epoll_fd = rte_intr_efds_index_get(dev->intr_handle, i); ++ ++ if (epoll_fd >= 0) ++ close(epoll_fd); ++ } + rte_intr_vec_list_free(intr_handle); + rte_intr_instance_free(intr_handle); + } +@@ -673,72 +635,111 @@ eth_vhost_uninstall_intr(struct rte_eth_dev *dev) + static int + eth_vhost_install_intr(struct rte_eth_dev *dev) + { +- struct rte_vhost_vring vring; +- struct vhost_queue *vq; + int nb_rxq = dev->data->nb_rx_queues; +- int i; +- int ret; ++ struct vhost_queue *vq; + +- /* uninstall firstly if we are reconnecting */ +- if (dev->intr_handle != NULL) +- eth_vhost_uninstall_intr(dev); ++ int ret; ++ int i; + + dev->intr_handle = rte_intr_instance_alloc(RTE_INTR_INSTANCE_F_PRIVATE); + if (dev->intr_handle == NULL) { + VHOST_LOG(ERR, "Fail to allocate intr_handle\n"); +- return -ENOMEM; ++ ret = -ENOMEM; ++ goto error; ++ } ++ if (rte_intr_efd_counter_size_set(dev->intr_handle, 0)) { ++ ret = -rte_errno; ++ goto error; + } +- if (rte_intr_efd_counter_size_set(dev->intr_handle, sizeof(uint64_t))) +- return -rte_errno; + + if (rte_intr_vec_list_alloc(dev->intr_handle, NULL, nb_rxq)) { +- VHOST_LOG(ERR, +- "Failed to allocate memory for interrupt vector\n"); +- rte_intr_instance_free(dev->intr_handle); +- return -ENOMEM; ++ VHOST_LOG(ERR, "Failed to allocate memory for interrupt vector\n"); ++ ret = -ENOMEM; ++ goto error; + } + +- +- VHOST_LOG(INFO, "Prepare intr vec\n"); ++ VHOST_LOG(DEBUG, "Prepare intr vec\n"); + for (i = 0; i < nb_rxq; i++) { +- if (rte_intr_vec_list_index_set(dev->intr_handle, i, RTE_INTR_VEC_RXTX_OFFSET + i)) +- return -rte_errno; +- if (rte_intr_efds_index_set(dev->intr_handle, i, -1)) +- return -rte_errno; +- vq = dev->data->rx_queues[i]; +- if (!vq) { +- VHOST_LOG(INFO, "rxq-%d not setup yet, skip!\n", i); +- continue; +- } ++ int epoll_fd = epoll_create1(0); + +- ret = rte_vhost_get_vhost_vring(vq->vid, (i << 1) + 1, &vring); +- if (ret < 0) { +- VHOST_LOG(INFO, +- "Failed to get rxq-%d's vring, skip!\n", i); +- continue; ++ if (epoll_fd < 0) { ++ VHOST_LOG(ERR, "Failed to create proxy epoll fd for rxq-%d\n", i); ++ ret = -errno; ++ goto error; + } + +- if (vring.kickfd < 0) { +- VHOST_LOG(INFO, +- "rxq-%d's kickfd is invalid, skip!\n", i); +- continue; ++ if (rte_intr_vec_list_index_set(dev->intr_handle, i, ++ RTE_INTR_VEC_RXTX_OFFSET + i) || ++ rte_intr_efds_index_set(dev->intr_handle, i, epoll_fd)) { ++ ret = -rte_errno; ++ close(epoll_fd); ++ goto error; + } + +- if (rte_intr_efds_index_set(dev->intr_handle, i, vring.kickfd)) +- continue; +- VHOST_LOG(INFO, "Installed intr vec for rxq-%d\n", i); ++ vq = dev->data->rx_queues[i]; ++ memset(&vq->ev, 0, sizeof(vq->ev)); ++ vq->ev.events = EPOLLIN; ++ vq->ev.data.fd = epoll_fd; + } + +- if (rte_intr_nb_efd_set(dev->intr_handle, nb_rxq)) +- return -rte_errno; ++ if (rte_intr_nb_efd_set(dev->intr_handle, nb_rxq)) { ++ ret = -rte_errno; ++ goto error; ++ } ++ if (rte_intr_max_intr_set(dev->intr_handle, nb_rxq + 1)) { ++ ret = -rte_errno; ++ goto error; ++ } ++ if (rte_intr_type_set(dev->intr_handle, RTE_INTR_HANDLE_VDEV)) { ++ ret = -rte_errno; ++ goto error; ++ } + +- if (rte_intr_max_intr_set(dev->intr_handle, nb_rxq + 1)) +- return -rte_errno; ++ return 0; + +- if (rte_intr_type_set(dev->intr_handle, RTE_INTR_HANDLE_VDEV)) +- return -rte_errno; ++error: ++ eth_vhost_uninstall_intr(dev); ++ return ret; ++} + +- return 0; ++static void ++eth_vhost_configure_intr(struct rte_eth_dev *dev) ++{ ++ int i; ++ ++ VHOST_LOG(DEBUG, "Configure intr vec\n"); ++ for (i = 0; i < dev->data->nb_rx_queues; i++) ++ eth_vhost_update_intr(dev, i); ++} ++ ++static void ++eth_vhost_unconfigure_intr(struct rte_eth_dev *eth_dev) ++{ ++ struct vhost_queue *vq; ++ int i; ++ ++ VHOST_LOG(DEBUG, "Unconfigure intr vec\n"); ++ for (i = 0; i < eth_dev->data->nb_rx_queues; i++) { ++ vq = eth_dev->data->rx_queues[i]; ++ if (vq == NULL || vq->vid < 0) ++ continue; ++ ++ rte_spinlock_lock(&vq->intr_lock); ++ ++ /* Remove previous kickfd from proxy epoll */ ++ if (vq->kickfd >= 0) { ++ if (epoll_ctl(vq->ev.data.fd, EPOLL_CTL_DEL, vq->kickfd, &vq->ev) < 0) { ++ VHOST_LOG(DEBUG, "Failed to unregister %d from rxq-%d epoll: %s\n", ++ vq->kickfd, i, strerror(errno)); ++ } else { ++ VHOST_LOG(DEBUG, "Unregistered %d from rxq-%d epoll\n", ++ vq->kickfd, i); ++ } ++ vq->kickfd = -1; ++ } ++ ++ rte_spinlock_unlock(&vq->intr_lock); ++ } + } + + static void +@@ -847,16 +848,8 @@ new_device(int vid) + internal->vid = vid; + if (rte_atomic32_read(&internal->started) == 1) { + queue_setup(eth_dev, internal); +- +- if (dev_conf->intr_conf.rxq) { +- if (eth_vhost_install_intr(eth_dev) < 0) { +- VHOST_LOG(INFO, +- "Failed to install interrupt handler."); +- return -1; +- } +- } +- } else { +- VHOST_LOG(INFO, "RX/TX queues not exist yet\n"); ++ if (dev_conf->intr_conf.rxq) ++ eth_vhost_configure_intr(eth_dev); + } + + for (i = 0; i < rte_vhost_get_vring_num(vid); i++) +@@ -900,6 +893,7 @@ destroy_device(int vid) + + rte_atomic32_set(&internal->dev_attached, 0); + update_queuing_status(eth_dev, true); ++ eth_vhost_unconfigure_intr(eth_dev); + + eth_dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN; + +@@ -928,55 +922,10 @@ destroy_device(int vid) + rte_spinlock_unlock(&state->lock); + + VHOST_LOG(INFO, "Vhost device %d destroyed\n", vid); +- eth_vhost_uninstall_intr(eth_dev); + + rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_LSC, NULL); + } + +-static int +-vring_conf_update(int vid, struct rte_eth_dev *eth_dev, uint16_t vring_id) +-{ +- struct rte_eth_conf *dev_conf = ð_dev->data->dev_conf; +- struct pmd_internal *internal = eth_dev->data->dev_private; +- struct vhost_queue *vq; +- struct rte_vhost_vring vring; +- int rx_idx = vring_id % 2 ? (vring_id - 1) >> 1 : -1; +- int ret = 0; +- +- /* +- * The vring kickfd may be changed after the new device notification. +- * Update it when the vring state is updated. +- */ +- if (rx_idx >= 0 && rx_idx < eth_dev->data->nb_rx_queues && +- rte_atomic32_read(&internal->dev_attached) && +- rte_atomic32_read(&internal->started) && +- dev_conf->intr_conf.rxq) { +- ret = rte_vhost_get_vhost_vring(vid, vring_id, &vring); +- if (ret) { +- VHOST_LOG(ERR, "Failed to get vring %d information.\n", +- vring_id); +- return ret; +- } +- +- if (rte_intr_efds_index_set(eth_dev->intr_handle, rx_idx, +- vring.kickfd)) +- return -rte_errno; +- +- vq = eth_dev->data->rx_queues[rx_idx]; +- if (!vq) { +- VHOST_LOG(ERR, "rxq%d is not setup yet\n", rx_idx); +- return -1; +- } +- +- rte_spinlock_lock(&vq->intr_lock); +- if (vq->intr_enable) +- ret = eth_vhost_update_intr(eth_dev, rx_idx); +- rte_spinlock_unlock(&vq->intr_lock); +- } +- +- return ret; +-} +- + static int + vring_state_changed(int vid, uint16_t vring, int enable) + { +@@ -996,9 +945,8 @@ vring_state_changed(int vid, uint16_t vring, int enable) + /* won't be NULL */ + state = vring_states[eth_dev->data->port_id]; + +- if (enable && vring_conf_update(vid, eth_dev, vring)) +- VHOST_LOG(INFO, "Failed to update vring-%d configuration.\n", +- (int)vring); ++ if (eth_dev->data->dev_conf.intr_conf.rxq && vring % 2) ++ eth_vhost_update_intr(eth_dev, (vring - 1) >> 1); + + rte_spinlock_lock(&state->lock); + if (state->cur[vring] == enable) { +@@ -1185,18 +1133,17 @@ eth_dev_start(struct rte_eth_dev *eth_dev) + struct pmd_internal *internal = eth_dev->data->dev_private; + struct rte_eth_conf *dev_conf = ð_dev->data->dev_conf; + +- queue_setup(eth_dev, internal); +- +- if (rte_atomic32_read(&internal->dev_attached) == 1) { +- if (dev_conf->intr_conf.rxq) { +- if (eth_vhost_install_intr(eth_dev) < 0) { +- VHOST_LOG(INFO, +- "Failed to install interrupt handler."); +- return -1; +- } +- } ++ eth_vhost_uninstall_intr(eth_dev); ++ if (dev_conf->intr_conf.rxq && eth_vhost_install_intr(eth_dev) < 0) { ++ VHOST_LOG(ERR, "Failed to install interrupt handler.\n"); ++ return -1; + } + ++ queue_setup(eth_dev, internal); ++ if (rte_atomic32_read(&internal->dev_attached) == 1 && ++ dev_conf->intr_conf.rxq) ++ eth_vhost_configure_intr(eth_dev); ++ + rte_atomic32_set(&internal->started, 1); + update_queuing_status(eth_dev, false); + +@@ -1251,6 +1198,8 @@ eth_dev_close(struct rte_eth_dev *dev) + rte_free(internal->iface_name); + rte_free(internal); + ++ eth_vhost_uninstall_intr(dev); ++ + dev->data->dev_private = NULL; + + rte_free(vring_states[dev->data->port_id]); +@@ -1278,6 +1227,7 @@ eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id, + vq->mb_pool = mb_pool; + vq->virtqueue_id = rx_queue_id * VIRTIO_QNUM + VIRTIO_TXQ; + rte_spinlock_init(&vq->intr_lock); ++ vq->kickfd = -1; + dev->data->rx_queues[rx_queue_id] = vq; + + return 0; +@@ -1300,6 +1250,7 @@ eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id, + + vq->virtqueue_id = tx_queue_id * VIRTIO_QNUM + VIRTIO_RXQ; + rte_spinlock_init(&vq->intr_lock); ++ vq->kickfd = -1; + dev->data->tx_queues[tx_queue_id] = vq; + + return 0; +diff --git a/dpdk/drivers/net/virtio/virtio_ethdev.c b/dpdk/drivers/net/virtio/virtio_ethdev.c +index 760ba4e368..c4f6fa55b3 100644 +--- a/dpdk/drivers/net/virtio/virtio_ethdev.c ++++ b/dpdk/drivers/net/virtio/virtio_ethdev.c +@@ -1797,22 +1797,25 @@ static int + virtio_configure_intr(struct rte_eth_dev *dev) + { + struct virtio_hw *hw = dev->data->dev_private; ++ int ret; + + if (!rte_intr_cap_multiple(dev->intr_handle)) { + PMD_INIT_LOG(ERR, "Multiple intr vector not supported"); + return -ENOTSUP; + } + +- if (rte_intr_efd_enable(dev->intr_handle, dev->data->nb_rx_queues)) { ++ ret = rte_intr_efd_enable(dev->intr_handle, dev->data->nb_rx_queues); ++ if (ret < 0) { + PMD_INIT_LOG(ERR, "Fail to create eventfd"); +- return -1; ++ return ret; + } + +- if (rte_intr_vec_list_alloc(dev->intr_handle, "intr_vec", +- hw->max_queue_pairs)) { ++ ret = rte_intr_vec_list_alloc(dev->intr_handle, "intr_vec", ++ hw->max_queue_pairs); ++ if (ret < 0) { + PMD_INIT_LOG(ERR, "Failed to allocate %u rxq vectors", + hw->max_queue_pairs); +- return -ENOMEM; ++ return ret; + } + + if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC) { +@@ -1833,12 +1836,13 @@ virtio_configure_intr(struct rte_eth_dev *dev) + */ + if (virtio_intr_enable(dev) < 0) { + PMD_DRV_LOG(ERR, "interrupt enable failed"); +- return -1; ++ return -EINVAL; + } + +- if (virtio_queues_bind_intr(dev) < 0) { ++ ret = virtio_queues_bind_intr(dev); ++ if (ret < 0) { + PMD_INIT_LOG(ERR, "Failed to bind queue/interrupt"); +- return -1; ++ return ret; + } + + return 0; +@@ -2161,7 +2165,7 @@ virtio_dev_rss_init(struct rte_eth_dev *eth_dev) + eth_dev->device->numa_node); + if (!hw->rss_key) { + PMD_INIT_LOG(ERR, "Failed to allocate RSS key"); +- return -1; ++ return -ENOMEM; + } + } + +@@ -2183,7 +2187,7 @@ virtio_dev_rss_init(struct rte_eth_dev *eth_dev) + eth_dev->device->numa_node); + if (!hw->rss_reta) { + PMD_INIT_LOG(ERR, "Failed to allocate RSS reta"); +- return -1; ++ return -ENOMEM; + } + + hw->rss_rx_queues = 0; +@@ -2223,7 +2227,7 @@ virtio_init_device(struct rte_eth_dev *eth_dev, uint64_t req_features) + /* Tell the host we've known how to drive the device. */ + virtio_set_status(hw, VIRTIO_CONFIG_STATUS_DRIVER); + if (virtio_ethdev_negotiate_features(hw, req_features) < 0) +- return -1; ++ return -EINVAL; + + hw->weak_barriers = !virtio_with_feature(hw, VIRTIO_F_ORDER_PLATFORM); + +@@ -2305,7 +2309,7 @@ virtio_init_device(struct rte_eth_dev *eth_dev, uint64_t req_features) + if (config->mtu < RTE_ETHER_MIN_MTU) { + PMD_INIT_LOG(ERR, "invalid max MTU value (%u)", + config->mtu); +- return -1; ++ return -EINVAL; + } + + hw->max_mtu = config->mtu; +@@ -2318,9 +2322,11 @@ virtio_init_device(struct rte_eth_dev *eth_dev, uint64_t req_features) + } + + hw->rss_hash_types = 0; +- if (virtio_with_feature(hw, VIRTIO_NET_F_RSS)) +- if (virtio_dev_rss_init(eth_dev)) +- return -1; ++ if (virtio_with_feature(hw, VIRTIO_NET_F_RSS)) { ++ ret = virtio_dev_rss_init(eth_dev); ++ if (ret < 0) ++ return ret; ++ } + + PMD_INIT_LOG(DEBUG, "config->max_virtqueue_pairs=%d", + config->max_virtqueue_pairs); +@@ -2342,10 +2348,11 @@ virtio_init_device(struct rte_eth_dev *eth_dev, uint64_t req_features) + return ret; + + if (eth_dev->data->dev_conf.intr_conf.rxq) { +- if (virtio_configure_intr(eth_dev) < 0) { ++ ret = virtio_configure_intr(eth_dev); ++ if (ret < 0) { + PMD_INIT_LOG(ERR, "failed to configure interrupt"); + virtio_free_queues(hw); +- return -1; ++ return ret; + } + } + +@@ -2457,6 +2464,9 @@ virtio_dev_speed_capa_get(uint32_t speed) + static int vectorized_check_handler(__rte_unused const char *key, + const char *value, void *ret_val) + { ++ if (value == NULL || ret_val == NULL) ++ return -EINVAL; ++ + if (strcmp(value, "1") == 0) + *(int *)ret_val = 1; + else +diff --git a/dpdk/drivers/net/virtio/virtio_pci.c b/dpdk/drivers/net/virtio/virtio_pci.c +index 9cf4d760b4..29eb739b04 100644 +--- a/dpdk/drivers/net/virtio/virtio_pci.c ++++ b/dpdk/drivers/net/virtio/virtio_pci.c +@@ -33,22 +33,6 @@ + + struct virtio_pci_internal virtio_pci_internal[RTE_MAX_ETHPORTS]; + +-static inline int +-check_vq_phys_addr_ok(struct virtqueue *vq) +-{ +- /* Virtio PCI device VIRTIO_PCI_QUEUE_PF register is 32bit, +- * and only accepts 32 bit page frame number. +- * Check if the allocated physical memory exceeds 16TB. +- */ +- if ((vq->vq_ring_mem + vq->vq_ring_size - 1) >> +- (VIRTIO_PCI_QUEUE_ADDR_SHIFT + 32)) { +- PMD_INIT_LOG(ERR, "vring address shouldn't be above 16TB!"); +- return 0; +- } +- +- return 1; +-} +- + #define PCI_MSIX_ENABLE 0x8000 + + static enum virtio_msix_status +@@ -273,8 +257,15 @@ legacy_setup_queue(struct virtio_hw *hw, struct virtqueue *vq) + { + uint32_t src; + +- if (!check_vq_phys_addr_ok(vq)) ++ /* Virtio PCI device VIRTIO_PCI_QUEUE_PFN register is 32bit, ++ * and only accepts 32 bit page frame number. ++ * Check if the allocated physical memory exceeds 16TB. ++ */ ++ if ((vq->vq_ring_mem + vq->vq_ring_size - 1) >> ++ (VIRTIO_PCI_QUEUE_ADDR_SHIFT + 32)) { ++ PMD_INIT_LOG(ERR, "vring address shouldn't be above 16TB!"); + return -1; ++ } + + rte_pci_ioport_write(VTPCI_IO(hw), &vq->vq_queue_index, 2, + VIRTIO_PCI_QUEUE_SEL); +@@ -476,9 +467,6 @@ modern_setup_queue(struct virtio_hw *hw, struct virtqueue *vq) + uint64_t desc_addr, avail_addr, used_addr; + uint16_t notify_off; + +- if (!check_vq_phys_addr_ok(vq)) +- return -1; +- + desc_addr = vq->vq_ring_mem; + avail_addr = desc_addr + vq->vq_nentries * sizeof(struct vring_desc); + used_addr = RTE_ALIGN_CEIL(avail_addr + offsetof(struct vring_avail, +diff --git a/dpdk/drivers/net/virtio/virtio_pci_ethdev.c b/dpdk/drivers/net/virtio/virtio_pci_ethdev.c +index abc63b0935..9b4b846f8a 100644 +--- a/dpdk/drivers/net/virtio/virtio_pci_ethdev.c ++++ b/dpdk/drivers/net/virtio/virtio_pci_ethdev.c +@@ -148,6 +148,9 @@ eth_virtio_pci_uninit(struct rte_eth_dev *eth_dev) + static int vdpa_check_handler(__rte_unused const char *key, + const char *value, void *ret_val) + { ++ if (value == NULL || ret_val == NULL) ++ return -EINVAL; ++ + if (strcmp(value, "1") == 0) + *(int *)ret_val = 1; + else +diff --git a/dpdk/drivers/net/virtio/virtio_rxtx.c b/dpdk/drivers/net/virtio/virtio_rxtx.c +index d9d40832e0..c3e686cf0c 100644 +--- a/dpdk/drivers/net/virtio/virtio_rxtx.c ++++ b/dpdk/drivers/net/virtio/virtio_rxtx.c +@@ -404,29 +404,36 @@ virtio_tso_fix_cksum(struct rte_mbuf *m) + if (likely(rte_pktmbuf_data_len(m) >= m->l2_len + m->l3_len + + m->l4_len)) { + struct rte_ipv4_hdr *iph; +- struct rte_ipv6_hdr *ip6h; + struct rte_tcp_hdr *th; +- uint16_t prev_cksum, new_cksum, ip_len, ip_paylen; ++ uint16_t prev_cksum, new_cksum; ++ uint32_t ip_paylen; + uint32_t tmp; + + iph = rte_pktmbuf_mtod_offset(m, + struct rte_ipv4_hdr *, m->l2_len); + th = RTE_PTR_ADD(iph, m->l3_len); ++ ++ /* ++ * Calculate IPv4 header checksum with current total length value ++ * (whatever it is) to have correct checksum after update on edits ++ * done by TSO. ++ */ + if ((iph->version_ihl >> 4) == 4) { + iph->hdr_checksum = 0; + iph->hdr_checksum = rte_ipv4_cksum(iph); +- ip_len = iph->total_length; +- ip_paylen = rte_cpu_to_be_16(rte_be_to_cpu_16(ip_len) - +- m->l3_len); +- } else { +- ip6h = (struct rte_ipv6_hdr *)iph; +- ip_paylen = ip6h->payload_len; + } + ++ /* ++ * Do not use IPv4 total length and IPv6 payload length fields to get ++ * TSO payload length since it could not fit into 16 bits. ++ */ ++ ip_paylen = rte_cpu_to_be_32(rte_pktmbuf_pkt_len(m) - m->l2_len - ++ m->l3_len); ++ + /* calculate the new phdr checksum not including ip_paylen */ + prev_cksum = th->cksum; + tmp = prev_cksum; +- tmp += ip_paylen; ++ tmp += (ip_paylen & 0xffff) + (ip_paylen >> 16); + tmp = (tmp & 0xffff) + (tmp >> 16); + new_cksum = tmp; + +diff --git a/dpdk/drivers/net/virtio/virtio_user/virtio_user_dev.c b/dpdk/drivers/net/virtio/virtio_user/virtio_user_dev.c +index 19599aa3f6..697a8dcd6b 100644 +--- a/dpdk/drivers/net/virtio/virtio_user/virtio_user_dev.c ++++ b/dpdk/drivers/net/virtio/virtio_user/virtio_user_dev.c +@@ -586,11 +586,7 @@ virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues, + dev->frontend_features = 0; + dev->unsupported_features = 0; + dev->backend_type = backend_type; +- +- if (*ifname) { +- dev->ifname = *ifname; +- *ifname = NULL; +- } ++ dev->ifname = *ifname; + + if (virtio_user_dev_setup(dev) < 0) { + PMD_INIT_LOG(ERR, "(%s) backend set up fails", dev->path); +@@ -663,6 +659,7 @@ virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues, + } + } + ++ *ifname = NULL; + return 0; + } + +diff --git a/dpdk/drivers/net/virtio/virtio_user_ethdev.c b/dpdk/drivers/net/virtio/virtio_user_ethdev.c +index d32abec327..78b1ed9ace 100644 +--- a/dpdk/drivers/net/virtio/virtio_user_ethdev.c ++++ b/dpdk/drivers/net/virtio/virtio_user_ethdev.c +@@ -90,10 +90,15 @@ virtio_user_set_status(struct virtio_hw *hw, uint8_t status) + if (status & VIRTIO_CONFIG_STATUS_FEATURES_OK && + ~old_status & VIRTIO_CONFIG_STATUS_FEATURES_OK) + virtio_user_dev_set_features(dev); +- if (status & VIRTIO_CONFIG_STATUS_DRIVER_OK) +- virtio_user_start_device(dev); +- else if (status == VIRTIO_CONFIG_STATUS_RESET) ++ ++ if (status & VIRTIO_CONFIG_STATUS_DRIVER_OK) { ++ if (virtio_user_start_device(dev)) { ++ virtio_user_dev_update_status(dev); ++ return; ++ } ++ } else if (status == VIRTIO_CONFIG_STATUS_RESET) { + virtio_user_reset(hw); ++ } + + virtio_user_dev_set_status(dev, status); + } +diff --git a/dpdk/drivers/net/vmxnet3/vmxnet3_rxtx.c b/dpdk/drivers/net/vmxnet3/vmxnet3_rxtx.c +index a875ffec07..14c6504505 100644 +--- a/dpdk/drivers/net/vmxnet3/vmxnet3_rxtx.c ++++ b/dpdk/drivers/net/vmxnet3/vmxnet3_rxtx.c +@@ -412,8 +412,8 @@ vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, + + nb_tx = 0; + while (nb_tx < nb_pkts) { +- Vmxnet3_GenericDesc *gdesc; +- vmxnet3_buf_info_t *tbi; ++ Vmxnet3_GenericDesc *gdesc = NULL; ++ vmxnet3_buf_info_t *tbi = NULL; + uint32_t first2fill, avail, dw2; + struct rte_mbuf *txm = tx_pkts[nb_tx]; + struct rte_mbuf *m_seg = txm; +@@ -457,18 +457,18 @@ vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, + continue; + } + ++ /* Skip empty packets */ ++ if (unlikely(rte_pktmbuf_pkt_len(txm) == 0)) { ++ txq->stats.drop_total++; ++ rte_pktmbuf_free(txm); ++ nb_tx++; ++ continue; ++ } ++ + if (txm->nb_segs == 1 && + rte_pktmbuf_pkt_len(txm) <= txq->txdata_desc_size) { + struct Vmxnet3_TxDataDesc *tdd; + +- /* Skip empty packets */ +- if (unlikely(rte_pktmbuf_pkt_len(txm) == 0)) { +- txq->stats.drop_total++; +- rte_pktmbuf_free(txm); +- nb_tx++; +- continue; +- } +- + tdd = (struct Vmxnet3_TxDataDesc *) + ((uint8 *)txq->data_ring.base + + txq->cmd_ring.next2fill * +@@ -481,6 +481,10 @@ vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, + dw2 = (txq->cmd_ring.gen ^ 0x1) << VMXNET3_TXD_GEN_SHIFT; + first2fill = txq->cmd_ring.next2fill; + do { ++ /* Skip empty segments */ ++ if (unlikely(m_seg->data_len == 0)) ++ continue; ++ + /* Remember the transmit buffer for cleanup */ + tbi = txq->cmd_ring.buf_info + txq->cmd_ring.next2fill; + +@@ -490,10 +494,6 @@ vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, + */ + gdesc = txq->cmd_ring.base + txq->cmd_ring.next2fill; + +- /* Skip empty segments */ +- if (unlikely(m_seg->data_len == 0)) +- continue; +- + if (copy_size) { + uint64 offset = + (uint64)txq->cmd_ring.next2fill * +@@ -514,6 +514,11 @@ vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, + /* use the right gen for non-SOP desc */ + dw2 = txq->cmd_ring.gen << VMXNET3_TXD_GEN_SHIFT; + } while ((m_seg = m_seg->next) != NULL); ++ /* We must have executed the complete preceding loop at least ++ * once without skipping an empty segment, as we can't have ++ * a packet with only empty segments. ++ * Thus, tbi and gdesc have been initialized. ++ */ + + /* set the last buf_info for the pkt */ + tbi->m = txm; +@@ -1311,11 +1316,18 @@ vmxnet3_dev_rxtx_init(struct rte_eth_dev *dev) + for (j = 0; j < VMXNET3_RX_CMDRING_SIZE; j++) { + /* Passing 0 as alloc_num will allocate full ring */ + ret = vmxnet3_post_rx_bufs(rxq, j); +- if (ret <= 0) { ++ ++ /* Zero number of descriptors in the configuration of the RX queue */ ++ if (ret == 0) { + PMD_INIT_LOG(ERR, +- "ERROR: Posting Rxq: %d buffers ring: %d", +- i, j); +- return -ret; ++ "Invalid configuration in Rx queue: %d, buffers ring: %d\n", ++ i, j); ++ return -EINVAL; ++ } ++ /* Return the error number */ ++ if (ret < 0) { ++ PMD_INIT_LOG(ERR, "Posting Rxq: %d buffers ring: %d", i, j); ++ return ret; + } + /* + * Updating device with the index:next2fill to fill the +diff --git a/dpdk/drivers/raw/ifpga/base/opae_hw_api.c b/dpdk/drivers/raw/ifpga/base/opae_hw_api.c +index 1117c3e160..6d48d227d6 100644 +--- a/dpdk/drivers/raw/ifpga/base/opae_hw_api.c ++++ b/dpdk/drivers/raw/ifpga/base/opae_hw_api.c +@@ -380,7 +380,7 @@ static pthread_mutex_t *opae_adapter_mutex_open(struct opae_adapter *adapter) + PROT_READ | PROT_WRITE, MAP_SHARED, + shm_id, 0); + adapter->lock = (pthread_mutex_t *)ptr; +- if (ptr) { ++ if (ptr != MAP_FAILED) { + dev_info(NULL, + "shared memory %s address is %p\n", + shm_name, ptr); +@@ -499,7 +499,7 @@ static void *opae_adapter_shm_alloc(struct opae_adapter *adapter) + adapter->shm.size = size; + adapter->shm.ptr = mmap(NULL, size, PROT_READ | PROT_WRITE, + MAP_SHARED, shm_id, 0); +- if (adapter->shm.ptr) { ++ if (adapter->shm.ptr != MAP_FAILED) { + dev_info(NULL, + "shared memory %s address is %p\n", + shm_name, adapter->shm.ptr); +diff --git a/dpdk/drivers/raw/ntb/ntb.c b/dpdk/drivers/raw/ntb/ntb.c +index 76e98fe515..0ed4c14592 100644 +--- a/dpdk/drivers/raw/ntb/ntb.c ++++ b/dpdk/drivers/raw/ntb/ntb.c +@@ -1045,6 +1045,11 @@ ntb_dev_close(struct rte_rawdev *dev) + hw->queue_pairs = 0; + + intr_handle = hw->pci_dev->intr_handle; ++ /* Disable interrupt only once */ ++ if (!rte_intr_nb_efd_get(intr_handle) && ++ !rte_intr_max_intr_get(intr_handle)) ++ return 0; ++ + /* Clean datapath event and vec mapping */ + rte_intr_efd_disable(intr_handle); + rte_intr_vec_list_free(intr_handle); +diff --git a/dpdk/drivers/raw/skeleton/skeleton_rawdev.c b/dpdk/drivers/raw/skeleton/skeleton_rawdev.c +index b2ca1cc5cd..6e99d35536 100644 +--- a/dpdk/drivers/raw/skeleton/skeleton_rawdev.c ++++ b/dpdk/drivers/raw/skeleton/skeleton_rawdev.c +@@ -428,7 +428,7 @@ static int skeleton_rawdev_enqueue_bufs(struct rte_rawdev *dev, + * help in complex implementation which require more information than + * just an integer - for example, a queue-pair. + */ +- q_id = *((int *)context); ++ q_id = *((uint16_t *)context); + + for (i = 0; i < count; i++) + queue_buf[q_id].bufs[i] = buffers[i]->buf_addr; +@@ -450,7 +450,7 @@ static int skeleton_rawdev_dequeue_bufs(struct rte_rawdev *dev, + * help in complex implementation which require more information than + * just an integer - for example, a queue-pair. + */ +- q_id = *((int *)context); ++ q_id = *((uint16_t *)context); + + for (i = 0; i < count; i++) + buffers[i]->buf_addr = queue_buf[q_id].bufs[i]; +@@ -664,6 +664,8 @@ skeldev_get_selftest(const char *key __rte_unused, + void *opaque) + { + int *flag = opaque; ++ if (value == NULL || opaque == NULL) ++ return -EINVAL; + *flag = atoi(value); + return 0; + } +diff --git a/dpdk/drivers/raw/skeleton/skeleton_rawdev_test.c b/dpdk/drivers/raw/skeleton/skeleton_rawdev_test.c +index ca15c49990..b7a7f623aa 100644 +--- a/dpdk/drivers/raw/skeleton/skeleton_rawdev_test.c ++++ b/dpdk/drivers/raw/skeleton/skeleton_rawdev_test.c +@@ -370,40 +370,34 @@ static int + test_rawdev_enqdeq(void) + { + int ret; +- unsigned int count = 1; + uint16_t queue_id = 0; +- struct rte_rawdev_buf buffers[1]; +- struct rte_rawdev_buf *deq_buffers = NULL; +- +- buffers[0].buf_addr = malloc(strlen(TEST_DEV_NAME) + 3); +- if (!buffers[0].buf_addr) +- goto cleanup; +- snprintf(buffers[0].buf_addr, strlen(TEST_DEV_NAME) + 2, "%s%d", ++ struct rte_rawdev_buf buffer; ++ struct rte_rawdev_buf *buffers[1]; ++ struct rte_rawdev_buf deq_buffer; ++ struct rte_rawdev_buf *deq_buffers[1]; ++ ++ buffers[0] = &buffer; ++ buffer.buf_addr = malloc(strlen(TEST_DEV_NAME) + 3); ++ if (!buffer.buf_addr) ++ return TEST_FAILED; ++ snprintf(buffer.buf_addr, strlen(TEST_DEV_NAME) + 2, "%s%d", + TEST_DEV_NAME, 0); + +- ret = rte_rawdev_enqueue_buffers(test_dev_id, +- (struct rte_rawdev_buf **)&buffers, +- count, &queue_id); +- RTE_TEST_ASSERT_EQUAL((unsigned int)ret, count, ++ ret = rte_rawdev_enqueue_buffers(test_dev_id, buffers, ++ RTE_DIM(buffers), &queue_id); ++ RTE_TEST_ASSERT_EQUAL((unsigned int)ret, RTE_DIM(buffers), + "Unable to enqueue buffers"); + +- deq_buffers = malloc(sizeof(struct rte_rawdev_buf) * count); +- if (!deq_buffers) +- goto cleanup; +- +- ret = rte_rawdev_dequeue_buffers(test_dev_id, +- (struct rte_rawdev_buf **)&deq_buffers, +- count, &queue_id); +- RTE_TEST_ASSERT_EQUAL((unsigned int)ret, count, ++ deq_buffers[0] = &deq_buffer; ++ ret = rte_rawdev_dequeue_buffers(test_dev_id, deq_buffers, ++ RTE_DIM(deq_buffers), &queue_id); ++ RTE_TEST_ASSERT_EQUAL((unsigned int)ret, RTE_DIM(buffers), + "Unable to dequeue buffers"); ++ RTE_TEST_ASSERT_EQUAL(deq_buffers[0]->buf_addr, buffers[0]->buf_addr, ++ "Did not retrieve expected object"); + +- free(deq_buffers); +- ++ free(buffer.buf_addr); + return TEST_SUCCESS; +-cleanup: +- free(buffers[0].buf_addr); +- +- return TEST_FAILED; + } + + static void skeldev_test_run(int (*setup)(void), +diff --git a/dpdk/drivers/regex/cn9k/meson.build b/dpdk/drivers/regex/cn9k/meson.build +index 06c906710c..19b2e70111 100644 +--- a/dpdk/drivers/regex/cn9k/meson.build ++++ b/dpdk/drivers/regex/cn9k/meson.build +@@ -8,10 +8,10 @@ if not is_linux or not dpdk_conf.get('RTE_ARCH_64') + subdir_done() + endif + +-lib = cc.find_library('librxp_compiler', required: false) ++lib = cc.find_library('rxp_compiler', required: false) + if lib.found() + ext_deps += lib +- ext_deps += cc.find_library('libstdc++', required: true) ++ ext_deps += cc.find_library('stdc++', required: true) + cflags += ['-DREE_COMPILER_SDK'] + endif + +diff --git a/dpdk/drivers/regex/mlx5/mlx5_regex.h b/dpdk/drivers/regex/mlx5/mlx5_regex.h +index b8554fd1cf..481f6fc59f 100644 +--- a/dpdk/drivers/regex/mlx5/mlx5_regex.h ++++ b/dpdk/drivers/regex/mlx5/mlx5_regex.h +@@ -37,7 +37,7 @@ struct mlx5_regex_qp { + struct mlx5_regex_hw_qp *qps; /* Pointer to qp array. */ + uint16_t nb_obj; /* Number of qp objects. */ + struct mlx5_regex_cq cq; /* CQ struct. */ +- uint32_t free_qps; ++ uint64_t free_qps; + struct mlx5_regex_job *jobs; + struct ibv_mr *metadata; + struct ibv_mr *outputs; +diff --git a/dpdk/drivers/regex/mlx5/mlx5_regex_fastpath.c b/dpdk/drivers/regex/mlx5/mlx5_regex_fastpath.c +index 143c7d7cdf..8e5f8c9c95 100644 +--- a/dpdk/drivers/regex/mlx5/mlx5_regex_fastpath.c ++++ b/dpdk/drivers/regex/mlx5/mlx5_regex_fastpath.c +@@ -211,8 +211,8 @@ send_doorbell(struct mlx5_regex_priv *priv, struct mlx5_regex_hw_qp *qp) + (MLX5_SEND_WQE_BB << (priv->has_umr ? 2 : 0)) + + (priv->has_umr ? MLX5_REGEX_UMR_WQE_SIZE : 0); + uint8_t *wqe = (uint8_t *)(uintptr_t)qp->qp_obj.wqes + wqe_offset; +- uint32_t actual_pi = (priv->has_umr ? (qp->db_pi * 4 + 3) : qp->db_pi) & +- MLX5_REGEX_MAX_WQE_INDEX; ++ uint32_t actual_pi = (priv->has_umr ? ((1 + qp->db_pi) * 4) : qp->db_pi) ++ & MLX5_REGEX_MAX_WQE_INDEX; + + /* Or the fm_ce_se instead of set, avoid the fence be cleared. */ + ((struct mlx5_wqe_ctrl_seg *)wqe)->fm_ce_se |= MLX5_WQE_CTRL_CQ_UPDATE; +@@ -417,7 +417,7 @@ mlx5_regexdev_enqueue_gga(struct rte_regexdev *dev, uint16_t qp_id, + return 0; + #endif + +- while ((hw_qpid = ffs(queue->free_qps))) { ++ while ((hw_qpid = ffsll(queue->free_qps))) { + hw_qpid--; /* ffs returns 1 for bit 0 */ + qp_obj = &queue->qps[hw_qpid]; + nb_desc = get_free(qp_obj, priv->has_umr); +@@ -426,7 +426,7 @@ mlx5_regexdev_enqueue_gga(struct rte_regexdev *dev, uint16_t qp_id, + if (nb_desc > nb_left) + nb_desc = nb_left; + else +- queue->free_qps &= ~(1 << hw_qpid); ++ queue->free_qps &= ~(1ULL << hw_qpid); + prep_regex_umr_wqe_set(priv, queue, qp_obj, ops, + nb_desc); + send_doorbell(priv, qp_obj); +@@ -456,7 +456,7 @@ mlx5_regexdev_enqueue(struct rte_regexdev *dev, uint16_t qp_id, + return 0; + #endif + +- while ((hw_qpid = ffs(queue->free_qps))) { ++ while ((hw_qpid = ffsll(queue->free_qps))) { + hw_qpid--; /* ffs returns 1 for bit 0 */ + qp_obj = &queue->qps[hw_qpid]; + while (get_free(qp_obj, priv->has_umr)) { +@@ -470,7 +470,7 @@ mlx5_regexdev_enqueue(struct rte_regexdev *dev, uint16_t qp_id, + goto out; + } + } +- queue->free_qps &= ~(1 << hw_qpid); ++ queue->free_qps &= ~(1ULL << hw_qpid); + send_doorbell(priv, qp_obj); + } + +@@ -603,7 +603,7 @@ mlx5_regexdev_dequeue(struct rte_regexdev *dev, uint16_t qp_id, + cq->ci = (cq->ci + 1) & 0xffffff; + rte_wmb(); + cq->cq_obj.db_rec[0] = rte_cpu_to_be_32(cq->ci); +- queue->free_qps |= (1 << hw_qpid); ++ queue->free_qps |= (1ULL << hw_qpid); + } + + out: +@@ -642,7 +642,7 @@ setup_qps(struct mlx5_regex_priv *priv, struct mlx5_regex_qp *queue) + (uintptr_t)job->output); + wqe += 64; + } +- queue->free_qps |= 1 << hw_qpid; ++ queue->free_qps |= 1ULL << hw_qpid; + } + } + +diff --git a/dpdk/drivers/vdpa/ifc/ifcvf_vdpa.c b/dpdk/drivers/vdpa/ifc/ifcvf_vdpa.c +index 49d68ad1b1..35520ea3ae 100644 +--- a/dpdk/drivers/vdpa/ifc/ifcvf_vdpa.c ++++ b/dpdk/drivers/vdpa/ifc/ifcvf_vdpa.c +@@ -1044,6 +1044,8 @@ ifcvf_sw_fallback_switchover(struct ifcvf_internal *internal) + + vdpa_disable_vfio_intr(internal); + ++ rte_atomic32_set(&internal->running, 0); ++ + ret = rte_vhost_host_notifier_ctrl(vid, RTE_VHOST_QUEUE_ALL, false); + if (ret && ret != -ENOTSUP) + goto error; +@@ -1746,6 +1748,11 @@ ifcvf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, + goto error; + } + internal->sw_lm = sw_fallback_lm; ++ if (!internal->sw_lm && !internal->hw.lm_cfg) { ++ DRV_LOG(ERR, "Device %s does not support HW assist live migration, please enable sw-live-migration!", ++ pci_dev->name); ++ goto error; ++ } + + pthread_mutex_lock(&internal_list_lock); + TAILQ_INSERT_TAIL(&internal_list, list, next); +diff --git a/dpdk/examples/cmdline/parse_obj_list.h b/dpdk/examples/cmdline/parse_obj_list.h +index 6516d3e2c2..1223ac1e8b 100644 +--- a/dpdk/examples/cmdline/parse_obj_list.h ++++ b/dpdk/examples/cmdline/parse_obj_list.h +@@ -12,8 +12,9 @@ + + #include + #include ++#include + +-#define OBJ_NAME_LEN_MAX 64 ++#define OBJ_NAME_LEN_MAX sizeof(cmdline_fixed_string_t) + + struct object { + SLIST_ENTRY(object) next; +diff --git a/dpdk/examples/fips_validation/Makefile b/dpdk/examples/fips_validation/Makefile +index bca6647f55..fbb778d57a 100644 +--- a/dpdk/examples/fips_validation/Makefile ++++ b/dpdk/examples/fips_validation/Makefile +@@ -15,6 +15,8 @@ SRCS-y += fips_validation_ccm.c + SRCS-y += fips_validation_sha.c + SRCS-y += fips_dev_self_test.c + SRCS-y += fips_validation_xts.c ++SRCS-y += fips_validation_rsa.c ++SRCS-y += fips_validation_ecdsa.c + SRCS-y += main.c + + PKGCONF ?= pkg-config +diff --git a/dpdk/examples/fips_validation/fips_validation.c b/dpdk/examples/fips_validation/fips_validation.c +index f7a6d821ea..d3b6099d73 100644 +--- a/dpdk/examples/fips_validation/fips_validation.c ++++ b/dpdk/examples/fips_validation/fips_validation.c +@@ -543,15 +543,28 @@ fips_test_parse_one_json_case(void) + + for (i = 0; info.callbacks[i].key != NULL; i++) { + param = json_object_get(json_info.json_test_case, info.callbacks[i].key); +- if (param) { +- strcpy(info.one_line_text, json_string_value(param)); +- ret = info.callbacks[i].cb( +- info.callbacks[i].key, info.one_line_text, +- info.callbacks[i].val +- ); +- if (ret < 0) +- return ret; ++ if (!param) ++ continue; ++ ++ switch (json_typeof(param)) { ++ case JSON_STRING: ++ snprintf(info.one_line_text, MAX_LINE_CHAR, "%s", ++ json_string_value(param)); ++ break; ++ ++ case JSON_INTEGER: ++ snprintf(info.one_line_text, MAX_LINE_CHAR, "%"JSON_INTEGER_FORMAT, ++ json_integer_value(param)); ++ break; ++ ++ default: ++ return -EINVAL; + } ++ ++ ret = info.callbacks[i].cb(info.callbacks[i].key, info.one_line_text, ++ info.callbacks[i].val); ++ if (ret < 0) ++ return ret; + } + + return 0; +diff --git a/dpdk/examples/fips_validation/fips_validation.h b/dpdk/examples/fips_validation/fips_validation.h +index 565a5cd36e..c4bb041785 100644 +--- a/dpdk/examples/fips_validation/fips_validation.h ++++ b/dpdk/examples/fips_validation/fips_validation.h +@@ -244,7 +244,7 @@ struct ecdsa_interim_data { + * Esp, in asym op, modulo bits decide char buffer size. + * max = (modulo / 4) + */ +-#define FIPS_TEST_JSON_BUF_LEN (4096 / 4) ++#define FIPS_TEST_JSON_BUF_LEN ((4096 / 4) + 1) + + struct fips_test_json_info { + /* Information used for reading from json */ +diff --git a/dpdk/examples/fips_validation/fips_validation_gcm.c b/dpdk/examples/fips_validation/fips_validation_gcm.c +index a80d8b3e4d..bf08d1b995 100644 +--- a/dpdk/examples/fips_validation/fips_validation_gcm.c ++++ b/dpdk/examples/fips_validation/fips_validation_gcm.c +@@ -79,7 +79,7 @@ parser_read_gcm_pt_len(const char *key, char *src, + if (ret < 0) + return ret; + +- if (vec.pt.len == 0) { ++ if (info.algo == FIPS_TEST_ALGO_AES_GMAC && vec.pt.len == 0) { + info.interim_info.gcm_data.is_gmac = 1; + test_ops.prepare_sym_op = prepare_auth_op; + test_ops.prepare_sym_xform = prepare_gmac_xform; +@@ -296,6 +296,7 @@ parse_test_gcm_json_writeback(struct fips_val *val) + tmp_val.val = val->val; + tmp_val.len = vec.pt.len; + ++ info.one_line_text[0] = '\0'; + writeback_hex_str("", info.one_line_text, &tmp_val); + ct = json_string(info.one_line_text); + json_object_set_new(json_info.json_write_case, CT_JSON_STR, ct); +@@ -326,6 +327,7 @@ parse_test_gcm_json_writeback(struct fips_val *val) + tmp_val.val = val->val; + tmp_val.len = vec.pt.len; + ++ info.one_line_text[0] = '\0'; + writeback_hex_str("", info.one_line_text, &tmp_val); + json_object_set_new(json_info.json_write_case, PT_JSON_STR, + json_string(info.one_line_text)); +@@ -334,12 +336,8 @@ parse_test_gcm_json_writeback(struct fips_val *val) + json_true()); + } + } else { +- if (!info.interim_info.gcm_data.is_gmac) +- json_object_set_new(json_info.json_write_case, PT_JSON_STR, +- json_string("")); +- else +- json_object_set_new(json_info.json_write_case, "testPassed", +- json_false()); ++ json_object_set_new(json_info.json_write_case, "testPassed", ++ json_false()); + } + } + +diff --git a/dpdk/examples/fips_validation/fips_validation_sha.c b/dpdk/examples/fips_validation/fips_validation_sha.c +index c5da2cc623..178ea492d3 100644 +--- a/dpdk/examples/fips_validation/fips_validation_sha.c ++++ b/dpdk/examples/fips_validation/fips_validation_sha.c +@@ -182,7 +182,7 @@ parse_test_sha_json_writeback(struct fips_val *val) + static int + parse_test_sha_mct_json_writeback(struct fips_val *val) + { +- json_t *tcId, *msg, *md, *resArr, *res; ++ json_t *tcId, *md, *resArr, *res; + struct fips_val val_local; + + tcId = json_object_get(json_info.json_test_case, "tcId"); +@@ -208,11 +208,7 @@ parse_test_sha_mct_json_writeback(struct fips_val *val) + + res = json_object(); + +- writeback_hex_str("", info.one_line_text, &val[1]); +- msg = json_string(info.one_line_text); +- json_object_set_new(res, "msg", msg); +- +- val_local.val = val[0].val + vec.pt.len; ++ val_local.val = val->val + vec.pt.len; + val_local.len = vec.cipher_auth.digest.len; + + writeback_hex_str("", info.one_line_text, &val_local); +diff --git a/dpdk/examples/fips_validation/fips_validation_xts.c b/dpdk/examples/fips_validation/fips_validation_xts.c +index 531e3c688e..530df78ab4 100644 +--- a/dpdk/examples/fips_validation/fips_validation_xts.c ++++ b/dpdk/examples/fips_validation/fips_validation_xts.c +@@ -34,6 +34,7 @@ + #define DATAUNITLEN_JSON_STR "dataUnitLen" + #define PAYLOADLEN_JSON_STR "payloadLen" + #define TWEAKVALUE_JSON_STR "tweakValue" ++#define SEQNUMBER_JSON_STR "sequenceNumber" + #define PT_JSON_STR "pt" + #define CT_JSON_STR "ct" + +@@ -95,14 +96,17 @@ parser_xts_read_keylen(const char *key, char *src, struct fips_val *val) + static int + parser_xts_read_tweakval(const char *key, char *src, struct fips_val *val) + { ++ char num_str[4] = {0}; + int ret; + +- if (info.interim_info.xts_data.tweak_mode == XTS_TWEAK_MODE_HEX) ++ if (info.interim_info.xts_data.tweak_mode == XTS_TWEAK_MODE_HEX) { + ret = parse_uint8_hex_str(key, src, val); +- else if (info.interim_info.xts_data.tweak_mode == XTS_TWEAK_MODE_NUMBER) +- ret = parser_read_uint32_bit_val(key, src, val); +- else ++ } else if (info.interim_info.xts_data.tweak_mode == XTS_TWEAK_MODE_NUMBER) { ++ snprintf(num_str, RTE_DIM(num_str), "%x", atoi(src)); ++ ret = parse_uint8_hex_str(key, num_str, val); ++ } else { + ret = -1; ++ } + + return ret; + } +@@ -122,6 +126,7 @@ struct fips_test_callback xts_interim_json_vectors[] = { + struct fips_test_callback xts_enc_json_vectors[] = { + {KEY_JSON_STR, parse_uint8_known_len_hex_str, &vec.cipher_auth.key}, + {TWEAKVALUE_JSON_STR, parser_xts_read_tweakval, &vec.iv}, ++ {SEQNUMBER_JSON_STR, parser_xts_read_tweakval, &vec.iv}, + {PT_JSON_STR, parse_uint8_hex_str, &vec.pt}, + {NULL, NULL, NULL} /**< end pointer */ + }; +diff --git a/dpdk/examples/fips_validation/main.c b/dpdk/examples/fips_validation/main.c +index 622f8b5a6e..cc68a1620b 100644 +--- a/dpdk/examples/fips_validation/main.c ++++ b/dpdk/examples/fips_validation/main.c +@@ -834,7 +834,7 @@ prepare_aead_op(void) + RTE_LOG(ERR, USER1, "Not enough memory\n"); + return -ENOMEM; + } +- env.digest_len = vec.cipher_auth.digest.len; ++ env.digest_len = vec.aead.digest.len; + + sym->aead.data.length = vec.pt.len; + sym->aead.digest.data = env.digest; +@@ -843,7 +843,7 @@ prepare_aead_op(void) + ret = prepare_data_mbufs(&vec.ct); + if (ret < 0) + return ret; +- ++ env.digest_len = vec.aead.digest.len; + sym->aead.data.length = vec.ct.len; + sym->aead.digest.data = vec.aead.digest.val; + sym->aead.digest.phys_addr = rte_malloc_virt2iova( +@@ -2268,8 +2268,7 @@ fips_mct_sha_test(void) + #define SHA_EXTERN_ITER 100 + #define SHA_INTERN_ITER 1000 + #define SHA_MD_BLOCK 3 +- /* val[0] is op result and other value is for parse_writeback callback */ +- struct fips_val val[2] = {{NULL, 0},}; ++ struct fips_val val = {NULL, 0}; + struct fips_val md[SHA_MD_BLOCK], msg; + int ret; + uint32_t i, j; +@@ -2328,7 +2327,7 @@ fips_mct_sha_test(void) + return ret; + } + +- ret = get_writeback_data(&val[0]); ++ ret = get_writeback_data(&val); + if (ret < 0) + return ret; + +@@ -2337,7 +2336,7 @@ fips_mct_sha_test(void) + memcpy(md[1].val, md[2].val, md[2].len); + md[1].len = md[2].len; + +- memcpy(md[2].val, (val[0].val + vec.pt.len), ++ memcpy(md[2].val, (val.val + vec.pt.len), + vec.cipher_auth.digest.len); + md[2].len = vec.cipher_auth.digest.len; + } +@@ -2348,9 +2347,7 @@ fips_mct_sha_test(void) + if (info.file_type != FIPS_TYPE_JSON) + fprintf(info.fp_wr, "COUNT = %u\n", j); + +- val[1].val = msg.val; +- val[1].len = msg.len; +- info.parse_writeback(val); ++ info.parse_writeback(&val); + + if (info.file_type != FIPS_TYPE_JSON) + fprintf(info.fp_wr, "\n"); +@@ -2361,7 +2358,7 @@ fips_mct_sha_test(void) + + rte_free(vec.pt.val); + +- free(val[0].val); ++ free(val.val); + free(msg.val); + + return 0; +@@ -2528,6 +2525,7 @@ error_one_case: + if (env.digest) { + rte_free(env.digest); + env.digest = NULL; ++ env.digest_len = 0; + } + rte_pktmbuf_free(env.mbuf); + +diff --git a/dpdk/examples/ip_pipeline/thread.c b/dpdk/examples/ip_pipeline/thread.c +index 82d5f87c38..9817657ca9 100644 +--- a/dpdk/examples/ip_pipeline/thread.c ++++ b/dpdk/examples/ip_pipeline/thread.c +@@ -430,7 +430,7 @@ thread_pipeline_disable(uint32_t thread_id, + static inline struct thread_msg_req * + thread_msg_recv(struct rte_ring *msgq_req) + { +- struct thread_msg_req *req; ++ struct thread_msg_req *req = NULL; + + int status = rte_ring_sc_dequeue(msgq_req, (void **) &req); + +diff --git a/dpdk/examples/ipsec-secgw/ipsec-secgw.c b/dpdk/examples/ipsec-secgw/ipsec-secgw.c +index a64a26c992..82a4916fb2 100644 +--- a/dpdk/examples/ipsec-secgw/ipsec-secgw.c ++++ b/dpdk/examples/ipsec-secgw/ipsec-secgw.c +@@ -99,10 +99,10 @@ uint32_t qp_desc_nb = 2048; + #define MTU_TO_FRAMELEN(x) ((x) + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN) + + struct ethaddr_info ethaddr_tbl[RTE_MAX_ETHPORTS] = { +- { 0, ETHADDR(0x00, 0x16, 0x3e, 0x7e, 0x94, 0x9a) }, +- { 0, ETHADDR(0x00, 0x16, 0x3e, 0x22, 0xa1, 0xd9) }, +- { 0, ETHADDR(0x00, 0x16, 0x3e, 0x08, 0x69, 0x26) }, +- { 0, ETHADDR(0x00, 0x16, 0x3e, 0x49, 0x9e, 0xdd) } ++ { {{0}}, {{0x00, 0x16, 0x3e, 0x7e, 0x94, 0x9a}} }, ++ { {{0}}, {{0x00, 0x16, 0x3e, 0x22, 0xa1, 0xd9}} }, ++ { {{0}}, {{0x00, 0x16, 0x3e, 0x08, 0x69, 0x26}} }, ++ { {{0}}, {{0x00, 0x16, 0x3e, 0x49, 0x9e, 0xdd}} } + }; + + struct offloads tx_offloads; +@@ -1427,9 +1427,8 @@ add_dst_ethaddr(uint16_t port, const struct rte_ether_addr *addr) + if (port >= RTE_DIM(ethaddr_tbl)) + return -EINVAL; + +- ethaddr_tbl[port].dst = ETHADDR_TO_UINT64(addr); +- rte_ether_addr_copy((struct rte_ether_addr *)ðaddr_tbl[port].dst, +- (struct rte_ether_addr *)(val_eth + port)); ++ rte_ether_addr_copy(addr, ðaddr_tbl[port].dst); ++ rte_ether_addr_copy(addr, (struct rte_ether_addr *)(val_eth + port)); + return 0; + } + +@@ -1700,6 +1699,9 @@ cryptodevs_init(enum eh_pkt_transfer_mode mode) + + total_nb_qps += qp; + dev_conf.socket_id = rte_cryptodev_socket_id(cdev_id); ++ /* Use the first socket if SOCKET_ID_ANY is returned. */ ++ if (dev_conf.socket_id == SOCKET_ID_ANY) ++ dev_conf.socket_id = 0; + dev_conf.nb_queue_pairs = qp; + dev_conf.ff_disable = RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO; + +@@ -1907,11 +1909,12 @@ port_init(uint16_t portid, uint64_t req_rx_offloads, uint64_t req_tx_offloads, + "Error getting MAC address (port %u): %s\n", + portid, rte_strerror(-ret)); + +- ethaddr_tbl[portid].src = ETHADDR_TO_UINT64(ðaddr); ++ rte_ether_addr_copy(ðaddr, ðaddr_tbl[portid].src); + +- rte_ether_addr_copy((struct rte_ether_addr *)ðaddr_tbl[portid].dst, ++ rte_ether_addr_copy(ðaddr_tbl[portid].dst, + (struct rte_ether_addr *)(val_eth + portid)); +- rte_ether_addr_copy((struct rte_ether_addr *)ðaddr_tbl[portid].src, ++ ++ rte_ether_addr_copy(ðaddr_tbl[portid].src, + (struct rte_ether_addr *)(val_eth + portid) + 1); + + print_ethaddr("Address: ", ðaddr); +diff --git a/dpdk/examples/ipsec-secgw/ipsec-secgw.h b/dpdk/examples/ipsec-secgw/ipsec-secgw.h +index 0e0012d058..53665adf03 100644 +--- a/dpdk/examples/ipsec-secgw/ipsec-secgw.h ++++ b/dpdk/examples/ipsec-secgw/ipsec-secgw.h +@@ -84,7 +84,7 @@ struct ipsec_traffic_nb { + + /* port/source ethernet addr and destination ethernet addr */ + struct ethaddr_info { +- uint64_t src, dst; ++ struct rte_ether_addr src, dst; + }; + + struct ipsec_spd_stats { +diff --git a/dpdk/examples/ipsec-secgw/sa.c b/dpdk/examples/ipsec-secgw/sa.c +index 7da9444a7b..45cd29f18b 100644 +--- a/dpdk/examples/ipsec-secgw/sa.c ++++ b/dpdk/examples/ipsec-secgw/sa.c +@@ -1247,6 +1247,7 @@ sa_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[], + struct ipsec_sa *sa; + uint32_t i, idx; + uint16_t iv_length, aad_length; ++ uint16_t auth_iv_length = 0; + int inline_status; + int32_t rc; + struct rte_ipsec_session *ips; +@@ -1340,7 +1341,7 @@ sa_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[], + + /* AES_GMAC uses salt like AEAD algorithms */ + if (sa->auth_algo == RTE_CRYPTO_AUTH_AES_GMAC) +- iv_length = 12; ++ auth_iv_length = 12; + + if (inbound) { + sa_ctx->xf[idx].b.type = RTE_CRYPTO_SYM_XFORM_CIPHER; +@@ -1364,7 +1365,7 @@ sa_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[], + sa_ctx->xf[idx].a.auth.op = + RTE_CRYPTO_AUTH_OP_VERIFY; + sa_ctx->xf[idx].a.auth.iv.offset = IV_OFFSET; +- sa_ctx->xf[idx].a.auth.iv.length = iv_length; ++ sa_ctx->xf[idx].a.auth.iv.length = auth_iv_length; + + } else { /* outbound */ + sa_ctx->xf[idx].a.type = RTE_CRYPTO_SYM_XFORM_CIPHER; +@@ -1388,7 +1389,7 @@ sa_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[], + sa_ctx->xf[idx].b.auth.op = + RTE_CRYPTO_AUTH_OP_GENERATE; + sa_ctx->xf[idx].b.auth.iv.offset = IV_OFFSET; +- sa_ctx->xf[idx].b.auth.iv.length = iv_length; ++ sa_ctx->xf[idx].b.auth.iv.length = auth_iv_length; + + } + +@@ -1828,6 +1829,7 @@ sa_check_offloads(uint16_t port_id, uint64_t *rx_offloads, + + *rx_offloads = 0; + *tx_offloads = 0; ++ *hw_reassembly = 0; + + ret = rte_eth_dev_info_get(port_id, &dev_info); + if (ret != 0) +diff --git a/dpdk/examples/ipsec-secgw/test/common_defs.sh b/dpdk/examples/ipsec-secgw/test/common_defs.sh +index 3ef06bc761..6e04ffc1a6 100644 +--- a/dpdk/examples/ipsec-secgw/test/common_defs.sh ++++ b/dpdk/examples/ipsec-secgw/test/common_defs.sh +@@ -26,7 +26,7 @@ fi + + LOCAL_IFACE=dtap0 + +-LOCAL_MAC="00:64:74:61:70:30" ++LOCAL_MAC="02:64:74:61:70:30" + + REMOTE_IPV4=192.168.31.14 + LOCAL_IPV4=192.168.31.92 +diff --git a/dpdk/examples/l2fwd-cat/Makefile b/dpdk/examples/l2fwd-cat/Makefile +index 23a09550a4..d06053451a 100644 +--- a/dpdk/examples/l2fwd-cat/Makefile ++++ b/dpdk/examples/l2fwd-cat/Makefile +@@ -35,6 +35,7 @@ endif + endif + + CFLAGS += -DALLOW_EXPERIMENTAL_API ++CFLAGS += -D_GNU_SOURCE + + LDFLAGS += -lpqos + +diff --git a/dpdk/examples/l2fwd-event/l2fwd_event.c b/dpdk/examples/l2fwd-event/l2fwd_event.c +index 63450537fe..4b5a032e35 100644 +--- a/dpdk/examples/l2fwd-event/l2fwd_event.c ++++ b/dpdk/examples/l2fwd-event/l2fwd_event.c +@@ -284,7 +284,7 @@ l2fwd_event_loop_burst(struct l2fwd_resources *rsrc, + } + } + +- l2fwd_event_worker_cleanup(event_d_id, port_id, ev, nb_rx, nb_tx, 0); ++ l2fwd_event_worker_cleanup(event_d_id, port_id, ev, nb_tx, nb_rx, 0); + } + + static __rte_always_inline void +@@ -468,7 +468,7 @@ l2fwd_event_loop_vector(struct l2fwd_resources *rsrc, const uint32_t flags) + } + } + +- l2fwd_event_worker_cleanup(event_d_id, port_id, ev, nb_rx, nb_tx, 1); ++ l2fwd_event_worker_cleanup(event_d_id, port_id, ev, nb_tx, nb_rx, 1); + } + + static void __rte_noinline +diff --git a/dpdk/examples/l3fwd/l3fwd.h b/dpdk/examples/l3fwd/l3fwd.h +index ca1426a687..b55855c932 100644 +--- a/dpdk/examples/l3fwd/l3fwd.h ++++ b/dpdk/examples/l3fwd/l3fwd.h +@@ -55,7 +55,6 @@ + /* 32-bit has less address-space for hugepage memory, limit to 1M entries */ + #define L3FWD_HASH_ENTRIES (1024*1024*1) + #endif +-#define HASH_ENTRY_NUMBER_DEFAULT 16 + + struct parm_cfg { + const char *rule_ipv4_name; +diff --git a/dpdk/examples/l3fwd/l3fwd_fib.c b/dpdk/examples/l3fwd/l3fwd_fib.c +index edc0dd69b9..18398492ae 100644 +--- a/dpdk/examples/l3fwd/l3fwd_fib.c ++++ b/dpdk/examples/l3fwd/l3fwd_fib.c +@@ -359,10 +359,10 @@ fib_event_loop(struct l3fwd_event_resources *evt_rsrc, + nh = (uint16_t)hopsv4[ipv4_arr_assem++]; + else + nh = (uint16_t)hopsv6[ipv6_arr_assem++]; +- if (nh != FIB_DEFAULT_HOP) +- hops[i] = nh != FIB_DEFAULT_HOP ? +- nh : +- events[i].mbuf->port; ++ ++ hops[i] = nh != FIB_DEFAULT_HOP ? ++ nh : ++ events[i].mbuf->port; + process_packet(events[i].mbuf, &hops[i]); + events[i].mbuf->port = hops[i] != BAD_PORT ? + hops[i] : +diff --git a/dpdk/examples/l3fwd/main.c b/dpdk/examples/l3fwd/main.c +index 5198ff30dd..a4f061537e 100644 +--- a/dpdk/examples/l3fwd/main.c ++++ b/dpdk/examples/l3fwd/main.c +@@ -89,7 +89,6 @@ uint32_t enabled_port_mask; + + /* Used only in exact match mode. */ + int ipv6; /**< ipv6 is false by default. */ +-uint32_t hash_entry_number = HASH_ENTRY_NUMBER_DEFAULT; + + struct lcore_conf lcore_conf[RTE_MAX_LCORE]; + +@@ -395,7 +394,6 @@ print_usage(const char *prgname) + " [--eth-dest=X,MM:MM:MM:MM:MM:MM]" + " [--max-pkt-len PKTLEN]" + " [--no-numa]" +- " [--hash-entry-num]" + " [--ipv6]" + " [--parse-ptype]" + " [--per-port-pool]" +@@ -419,7 +417,6 @@ print_usage(const char *prgname) + " --eth-dest=X,MM:MM:MM:MM:MM:MM: Ethernet destination for port X\n" + " --max-pkt-len PKTLEN: maximum packet length in decimal (64-9600)\n" + " --no-numa: Disable numa awareness\n" +- " --hash-entry-num: Specify the hash entry number in hexadecimal to be setup\n" + " --ipv6: Set if running ipv6 packets\n" + " --parse-ptype: Set to use software to analyze packet type\n" + " --per-port-pool: Use separate buffer pool per port\n" +@@ -479,22 +476,6 @@ parse_portmask(const char *portmask) + return pm; + } + +-static int +-parse_hash_entry_number(const char *hash_entry_num) +-{ +- char *end = NULL; +- unsigned long hash_en; +- /* parse hexadecimal string */ +- hash_en = strtoul(hash_entry_num, &end, 16); +- if ((hash_entry_num[0] == '\0') || (end == NULL) || (*end != '\0')) +- return -1; +- +- if (hash_en == 0) +- return -1; +- +- return hash_en; +-} +- + static int + parse_config(const char *q_arg) + { +@@ -852,14 +833,7 @@ parse_args(int argc, char **argv) + break; + + case CMD_LINE_OPT_HASH_ENTRY_NUM_NUM: +- ret = parse_hash_entry_number(optarg); +- if ((ret > 0) && (ret <= L3FWD_HASH_ENTRIES)) { +- hash_entry_number = ret; +- } else { +- fprintf(stderr, "invalid hash entry number\n"); +- print_usage(prgname); +- return -1; +- } ++ fprintf(stderr, "Hash entry number will be ignored\n"); + break; + + case CMD_LINE_OPT_PARSE_PTYPE_NUM: +@@ -963,16 +937,6 @@ parse_args(int argc, char **argv) + lookup_mode = L3FWD_LOOKUP_LPM; + } + +- /* +- * ipv6 and hash flags are valid only for +- * exact match, reset them to default for +- * longest-prefix match. +- */ +- if (lookup_mode == L3FWD_LOOKUP_LPM) { +- ipv6 = 0; +- hash_entry_number = HASH_ENTRY_NUMBER_DEFAULT; +- } +- + /* For ACL, update port config rss hash filter */ + if (lookup_mode == L3FWD_LOOKUP_ACL) { + port_conf.rx_adv_conf.rss_conf.rss_hf |= +diff --git a/dpdk/examples/ntb/ntb_fwd.c b/dpdk/examples/ntb/ntb_fwd.c +index f9abed28e4..585aad9d70 100644 +--- a/dpdk/examples/ntb/ntb_fwd.c ++++ b/dpdk/examples/ntb/ntb_fwd.c +@@ -865,7 +865,7 @@ ntb_stats_clear(void) + + /* Clear NTB dev stats */ + nb_ids = rte_rawdev_xstats_names_get(dev_id, NULL, 0); +- if (nb_ids < 0) { ++ if (nb_ids <= 0) { + printf("Error: Cannot get count of xstats\n"); + return; + } +@@ -923,7 +923,7 @@ ntb_stats_display(void) + + /* Get NTB dev stats and stats names */ + nb_ids = rte_rawdev_xstats_names_get(dev_id, NULL, 0); +- if (nb_ids < 0) { ++ if (nb_ids <= 0) { + printf("Error: Cannot get count of xstats\n"); + return; + } +diff --git a/dpdk/examples/qos_sched/init.c b/dpdk/examples/qos_sched/init.c +index 0709aec10c..7a27c03b64 100644 +--- a/dpdk/examples/qos_sched/init.c ++++ b/dpdk/examples/qos_sched/init.c +@@ -79,6 +79,7 @@ app_init_port(uint16_t portid, struct rte_mempool *mp) + if (app_inited_port_mask & (1u << portid)) + return 0; + ++ memset(&rx_conf, 0, sizeof(struct rte_eth_rxconf)); + rx_conf.rx_thresh.pthresh = rx_thresh.pthresh; + rx_conf.rx_thresh.hthresh = rx_thresh.hthresh; + rx_conf.rx_thresh.wthresh = rx_thresh.wthresh; +@@ -86,6 +87,7 @@ app_init_port(uint16_t portid, struct rte_mempool *mp) + rx_conf.rx_drop_en = 0; + rx_conf.rx_deferred_start = 0; + ++ memset(&tx_conf, 0, sizeof(struct rte_eth_txconf)); + tx_conf.tx_thresh.pthresh = tx_thresh.pthresh; + tx_conf.tx_thresh.hthresh = tx_thresh.hthresh; + tx_conf.tx_thresh.wthresh = tx_thresh.wthresh; +@@ -326,6 +328,8 @@ int app_init(void) + for(i = 0; i < nb_pfc; i++) { + uint32_t socket = rte_lcore_to_socket_id(qos_conf[i].rx_core); + struct rte_ring *ring; ++ struct rte_eth_link link = {0}; ++ int retry_count = 100, retry_delay = 100; /* try every 100ms for 10 sec */ + + snprintf(ring_name, MAX_NAME_LEN, "ring-%u-%u", i, qos_conf[i].rx_core); + ring = rte_ring_lookup(ring_name); +@@ -356,6 +360,14 @@ int app_init(void) + app_init_port(qos_conf[i].rx_port, qos_conf[i].mbuf_pool); + app_init_port(qos_conf[i].tx_port, qos_conf[i].mbuf_pool); + ++ rte_eth_link_get(qos_conf[i].tx_port, &link); ++ if (link.link_status == 0) ++ printf("Waiting for link on port %u\n", qos_conf[i].tx_port); ++ while (link.link_status == 0 && retry_count--) { ++ rte_delay_ms(retry_delay); ++ rte_eth_link_get(qos_conf[i].tx_port, &link); ++ } ++ + qos_conf[i].sched_port = app_init_sched_port(qos_conf[i].tx_port, socket); + } + +diff --git a/dpdk/examples/qos_sched/profile.cfg b/dpdk/examples/qos_sched/profile.cfg +index e8de101b6c..00d4c7c1a5 100644 +--- a/dpdk/examples/qos_sched/profile.cfg ++++ b/dpdk/examples/qos_sched/profile.cfg +@@ -26,6 +26,8 @@ number of subports per port = 1 + number of pipes per subport = 4096 + queue sizes = 64 64 64 64 64 64 64 64 64 64 64 64 64 + ++pipe 0-4095 = 0 ; These pipes are configured with pipe profile 0 ++ + [subport profile 0] + tb rate = 1250000000 ; Bytes per second + tb size = 1000000 ; Bytes +@@ -46,8 +48,6 @@ tc 12 rate = 1250000000 ; Bytes per second + + tc period = 10 ; Milliseconds + +-pipe 0-4095 = 0 ; These pipes are configured with pipe profile 0 +- + ; Pipe configuration + [pipe profile 0] + tb rate = 305175 ; Bytes per second +@@ -71,4 +71,4 @@ tc period = 40 ; Milliseconds + + tc 12 oversubscription weight = 1 + +-tc 12 wrr weights = 1 1 1 1 +\ No newline at end of file ++tc 12 wrr weights = 1 1 1 1 +diff --git a/dpdk/examples/qos_sched/profile_ov.cfg b/dpdk/examples/qos_sched/profile_ov.cfg +index 14c89ae340..b6fe21ee1e 100644 +--- a/dpdk/examples/qos_sched/profile_ov.cfg ++++ b/dpdk/examples/qos_sched/profile_ov.cfg +@@ -6,12 +6,14 @@ + frame overhead = 24 + number of subports per port = 1 + ++subport 0-8 = 0 ++ + ; Subport configuration + [subport 0] + number of pipes per subport = 32 + queue sizes = 64 64 64 64 64 64 64 64 64 64 64 64 64 + +-subport 0-8 = 0 ++pipe 0-31 = 0 ; These pipes are configured with pipe profile 0 + + [subport profile 0] + tb rate = 8400000 ; Bytes per second +@@ -32,8 +34,6 @@ tc 11 rate = 8400000 ; Bytes per second + tc 12 rate = 8400000 ; Bytes per second + tc period = 10 ; Milliseconds + +-pipe 0-31 = 0 ; These pipes are configured with pipe profile 0 +- + ; Pipe configuration + [pipe profile 0] + tb rate = 16800000 ; Bytes per second +diff --git a/dpdk/examples/qos_sched/profile_pie.cfg b/dpdk/examples/qos_sched/profile_pie.cfg +index 241f748b33..bbc09d912b 100644 +--- a/dpdk/examples/qos_sched/profile_pie.cfg ++++ b/dpdk/examples/qos_sched/profile_pie.cfg +@@ -21,12 +21,14 @@ + frame overhead = 24 + number of subports per port = 1 + ++subport 0-8 = 0 ; These subports are configured with subport profile 0 ++ + ; Subport configuration + [subport 0] + number of pipes per subport = 4096 + queue sizes = 64 64 64 64 64 64 64 64 64 64 64 64 64 + +-subport 0-8 = 0 ; These subports are configured with subport profile 0 ++pipe 0-4095 = 0 ; These pipes are configured with pipe profile 0 + + [subport profile 0] + tb rate = 1250000000 ; Bytes per second +@@ -48,8 +50,6 @@ tc 12 rate = 1250000000 ; Bytes per second + + tc period = 10 ; Milliseconds + +-pipe 0-4095 = 0 ; These pipes are configured with pipe profile 0 +- + ; Pipe configuration + [pipe profile 0] + tb rate = 305175 ; Bytes per second +diff --git a/dpdk/examples/qos_sched/profile_red.cfg b/dpdk/examples/qos_sched/profile_red.cfg +index 4486d2799e..cee1470fd7 100644 +--- a/dpdk/examples/qos_sched/profile_red.cfg ++++ b/dpdk/examples/qos_sched/profile_red.cfg +@@ -21,12 +21,14 @@ + frame overhead = 24 + number of subports per port = 1 + ++subport 0-8 = 0 ; These subports are configured with subport profile 0 ++ + ; Subport configuration + [subport 0] + number of pipes per subport = 4096 + queue sizes = 64 64 64 64 64 64 64 64 64 64 64 64 64 + +-subport 0-8 = 0 ; These subports are configured with subport profile 0 ++pipe 0-4095 = 0 ; These pipes are configured with pipe profile 0 + + [subport profile 0] + tb rate = 1250000000 ; Bytes per second +@@ -48,8 +50,6 @@ tc 12 rate = 1250000000 ; Bytes per second + + tc period = 10 ; Milliseconds + +-pipe 0-4095 = 0 ; These pipes are configured with pipe profile 0 +- + ; Pipe configuration + [pipe profile 0] + tb rate = 305175 ; Bytes per second +diff --git a/dpdk/kernel/freebsd/contigmem/contigmem.c b/dpdk/kernel/freebsd/contigmem/contigmem.c +index bd72f4d620..7dd87599d9 100644 +--- a/dpdk/kernel/freebsd/contigmem/contigmem.c ++++ b/dpdk/kernel/freebsd/contigmem/contigmem.c +@@ -111,7 +111,7 @@ static struct cdevsw contigmem_ops = { + }; + + static int +-contigmem_load() ++contigmem_load(void) + { + char index_string[8], description[32]; + int i, error = 0; +@@ -178,7 +178,7 @@ error: + } + + static int +-contigmem_unload() ++contigmem_unload(void) + { + int i; + +diff --git a/dpdk/kernel/linux/kni/compat.h b/dpdk/kernel/linux/kni/compat.h +index 3a86d12bbc..8beb670465 100644 +--- a/dpdk/kernel/linux/kni/compat.h ++++ b/dpdk/kernel/linux/kni/compat.h +@@ -146,6 +146,12 @@ + #define HAVE_ETH_HW_ADDR_SET + #endif + +-#if KERNEL_VERSION(5, 18, 0) > LINUX_VERSION_CODE ++#if KERNEL_VERSION(5, 18, 0) > LINUX_VERSION_CODE && \ ++ (!(defined(RHEL_RELEASE_CODE) && \ ++ RHEL_RELEASE_VERSION(9, 1) <= RHEL_RELEASE_CODE)) + #define HAVE_NETIF_RX_NI + #endif ++ ++#if KERNEL_VERSION(6, 5, 0) > LINUX_VERSION_CODE ++#define HAVE_VMA_IN_GUP ++#endif +diff --git a/dpdk/kernel/linux/kni/kni_dev.h b/dpdk/kernel/linux/kni/kni_dev.h +index a2c6d9fc1a..975379825b 100644 +--- a/dpdk/kernel/linux/kni/kni_dev.h ++++ b/dpdk/kernel/linux/kni/kni_dev.h +@@ -105,11 +105,13 @@ static inline phys_addr_t iova_to_phys(struct task_struct *tsk, + + /* Read one page struct info */ + #ifdef HAVE_TSK_IN_GUP +- ret = get_user_pages_remote(tsk, tsk->mm, iova, 1, +- FOLL_TOUCH, &page, NULL, NULL); ++ ret = get_user_pages_remote(tsk, tsk->mm, iova, 1, 0, &page, NULL, NULL); + #else +- ret = get_user_pages_remote(tsk->mm, iova, 1, +- FOLL_TOUCH, &page, NULL, NULL); ++ #ifdef HAVE_VMA_IN_GUP ++ ret = get_user_pages_remote(tsk->mm, iova, 1, 0, &page, NULL, NULL); ++ #else ++ ret = get_user_pages_remote(tsk->mm, iova, 1, 0, &page, NULL); ++ #endif + #endif + if (ret < 0) + return 0; +diff --git a/dpdk/lib/acl/acl_run_altivec.h b/dpdk/lib/acl/acl_run_altivec.h +index 4dfe7a14b4..4556e1503b 100644 +--- a/dpdk/lib/acl/acl_run_altivec.h ++++ b/dpdk/lib/acl/acl_run_altivec.h +@@ -102,7 +102,7 @@ acl_match_check_x4(int slot, const struct rte_acl_ctx *ctx, struct parms *parms, + /* + * Process 4 transitions (in 2 XMM registers) in parallel + */ +-static inline __attribute__((optimize("O2"))) xmm_t ++static __rte_always_inline xmm_t + transition4(xmm_t next_input, const uint64_t *trans, + xmm_t *indices1, xmm_t *indices2) + { +diff --git a/dpdk/lib/cmdline/cmdline.c b/dpdk/lib/cmdline/cmdline.c +index e1009ba4c4..355c7d8ca6 100644 +--- a/dpdk/lib/cmdline/cmdline.c ++++ b/dpdk/lib/cmdline/cmdline.c +@@ -173,6 +173,7 @@ cmdline_quit(struct cmdline *cl) + { + if (!cl) + return; ++ cmdline_cancel(cl); + rdline_quit(&cl->rdl); + } + +@@ -197,9 +198,14 @@ cmdline_poll(struct cmdline *cl) + if (read_status < 0) + return read_status; + +- status = cmdline_in(cl, &c, 1); +- if (status < 0 && cl->rdl.status != RDLINE_EXITED) +- return status; ++ if (read_status == 0) { ++ /* end of file is implicit quit */ ++ cmdline_quit(cl); ++ } else { ++ status = cmdline_in(cl, &c, 1); ++ if (status < 0 && cl->rdl.status != RDLINE_EXITED) ++ return status; ++ } + } + + return cl->rdl.status; +diff --git a/dpdk/lib/cmdline/cmdline.h b/dpdk/lib/cmdline/cmdline.h +index 96674dfda2..b14355ef51 100644 +--- a/dpdk/lib/cmdline/cmdline.h ++++ b/dpdk/lib/cmdline/cmdline.h +@@ -23,6 +23,12 @@ + extern "C" { + #endif + ++enum rdline_status { ++ RDLINE_INIT, ++ RDLINE_RUNNING, ++ RDLINE_EXITED ++}; ++ + struct cmdline; + + struct cmdline *cmdline_new(cmdline_parse_ctx_t *ctx, const char *prompt, int s_in, int s_out); +diff --git a/dpdk/lib/cmdline/cmdline_os_unix.c b/dpdk/lib/cmdline/cmdline_os_unix.c +index 64a945a34f..9a4ec4e334 100644 +--- a/dpdk/lib/cmdline/cmdline_os_unix.c ++++ b/dpdk/lib/cmdline/cmdline_os_unix.c +@@ -51,3 +51,9 @@ cmdline_vdprintf(int fd, const char *format, va_list op) + { + return vdprintf(fd, format, op); + } ++ ++/* This function is not needed on Linux, instead use sigaction() */ ++void ++cmdline_cancel(__rte_unused struct cmdline *cl) ++{ ++} +diff --git a/dpdk/lib/cmdline/cmdline_os_windows.c b/dpdk/lib/cmdline/cmdline_os_windows.c +index 73ed9ba290..80863bfc8a 100644 +--- a/dpdk/lib/cmdline/cmdline_os_windows.c ++++ b/dpdk/lib/cmdline/cmdline_os_windows.c +@@ -203,3 +203,17 @@ cmdline_vdprintf(int fd, const char *format, va_list op) + + return ret; + } ++ ++void ++cmdline_cancel(struct cmdline *cl) ++{ ++ if (!cl) ++ return; ++ ++ /* force the outstanding read on console to exit */ ++ if (cl->oldterm.is_console_input) { ++ HANDLE handle = (HANDLE)_get_osfhandle(cl->s_in); ++ ++ CancelIoEx(handle, NULL); ++ } ++} +diff --git a/dpdk/lib/cmdline/cmdline_private.h b/dpdk/lib/cmdline/cmdline_private.h +index c2e906d8de..86a46cdea6 100644 +--- a/dpdk/lib/cmdline/cmdline_private.h ++++ b/dpdk/lib/cmdline/cmdline_private.h +@@ -23,14 +23,8 @@ + #define RDLINE_HISTORY_BUF_SIZE BUFSIZ + #define RDLINE_HISTORY_MAX_LINE 64 + +-enum rdline_status { +- RDLINE_INIT, +- RDLINE_RUNNING, +- RDLINE_EXITED +-}; +- + struct rdline { +- enum rdline_status status; ++ volatile enum rdline_status status; + /* rdline bufs */ + struct cirbuf left; + struct cirbuf right; +@@ -96,6 +90,9 @@ int cmdline_poll_char(struct cmdline *cl); + /* Read one character from input. */ + ssize_t cmdline_read_char(struct cmdline *cl, char *c); + ++/* Force current cmdline read to unblock. */ ++void cmdline_cancel(struct cmdline *cl); ++ + /* vdprintf(3) */ + __rte_format_printf(2, 0) + int cmdline_vdprintf(int fd, const char *format, va_list op); +diff --git a/dpdk/lib/compressdev/rte_compressdev.h b/dpdk/lib/compressdev/rte_compressdev.h +index 42bda9fc79..7eb5c58798 100644 +--- a/dpdk/lib/compressdev/rte_compressdev.h ++++ b/dpdk/lib/compressdev/rte_compressdev.h +@@ -353,7 +353,7 @@ rte_compressdev_stats_reset(uint8_t dev_id); + * @note The capabilities field of dev_info is set to point to the first + * element of an array of struct rte_compressdev_capabilities. + * The element after the last valid element has it's op field set to +- * RTE_COMP_ALGO_LIST_END. ++ * RTE_COMP_ALGO_UNSPECIFIED. + */ + __rte_experimental + void +diff --git a/dpdk/lib/compressdev/rte_compressdev_pmd.c b/dpdk/lib/compressdev/rte_compressdev_pmd.c +index e139bc86e7..156bccd972 100644 +--- a/dpdk/lib/compressdev/rte_compressdev_pmd.c ++++ b/dpdk/lib/compressdev/rte_compressdev_pmd.c +@@ -23,6 +23,9 @@ rte_compressdev_pmd_parse_name_arg(const char *key __rte_unused, + struct rte_compressdev_pmd_init_params *params = extra_args; + int n; + ++ if (value == NULL || extra_args == NULL) ++ return -EINVAL; ++ + n = strlcpy(params->name, value, RTE_COMPRESSDEV_NAME_MAX_LEN); + if (n >= RTE_COMPRESSDEV_NAME_MAX_LEN) + return -EINVAL; +@@ -40,6 +43,9 @@ rte_compressdev_pmd_parse_uint_arg(const char *key __rte_unused, + int i; + char *end; + ++ if (value == NULL || extra_args == NULL) ++ return -EINVAL; ++ + errno = 0; + i = strtol(value, &end, 10); + if (*end != 0 || errno != 0 || i < 0) +diff --git a/dpdk/lib/cryptodev/cryptodev_pmd.c b/dpdk/lib/cryptodev/cryptodev_pmd.c +index 77b269f312..d8073a601d 100644 +--- a/dpdk/lib/cryptodev/cryptodev_pmd.c ++++ b/dpdk/lib/cryptodev/cryptodev_pmd.c +@@ -22,6 +22,9 @@ rte_cryptodev_pmd_parse_name_arg(const char *key __rte_unused, + struct rte_cryptodev_pmd_init_params *params = extra_args; + int n; + ++ if (value == NULL || extra_args == NULL) ++ return -EINVAL; ++ + n = strlcpy(params->name, value, RTE_CRYPTODEV_NAME_MAX_LEN); + if (n >= RTE_CRYPTODEV_NAME_MAX_LEN) + return -EINVAL; +@@ -38,6 +41,10 @@ rte_cryptodev_pmd_parse_uint_arg(const char *key __rte_unused, + { + int i; + char *end; ++ ++ if (value == NULL || extra_args == NULL) ++ return -EINVAL; ++ + errno = 0; + + i = strtol(value, &end, 10); +diff --git a/dpdk/lib/cryptodev/cryptodev_pmd.h b/dpdk/lib/cryptodev/cryptodev_pmd.h +index 0020102eb7..8c1467037e 100644 +--- a/dpdk/lib/cryptodev/cryptodev_pmd.h ++++ b/dpdk/lib/cryptodev/cryptodev_pmd.h +@@ -65,7 +65,7 @@ struct rte_cryptodev_data { + /** Device ID for this instance */ + uint8_t dev_id; + /** Socket ID where memory is allocated */ +- uint8_t socket_id; ++ int socket_id; + /** Unique identifier name */ + char name[RTE_CRYPTODEV_NAME_MAX_LEN]; + +diff --git a/dpdk/lib/cryptodev/rte_crypto_asym.h b/dpdk/lib/cryptodev/rte_crypto_asym.h +index 38c8b60779..70ef69b97b 100644 +--- a/dpdk/lib/cryptodev/rte_crypto_asym.h ++++ b/dpdk/lib/cryptodev/rte_crypto_asym.h +@@ -388,9 +388,9 @@ struct rte_crypto_ec_xform { + */ + struct rte_crypto_mod_op_param { + rte_crypto_uint base; +- /** Base of modular exponentiation/multiplicative inverse */ ++ /**< Base of modular exponentiation/multiplicative inverse. */ + rte_crypto_uint result; +- /** Result of modular exponentiation/multiplicative inverse */ ++ /**< Result of modular exponentiation/multiplicative inverse. */ + }; + + /** +diff --git a/dpdk/lib/cryptodev/rte_crypto_sym.h b/dpdk/lib/cryptodev/rte_crypto_sym.h +index 33b4966e16..0d625ec103 100644 +--- a/dpdk/lib/cryptodev/rte_crypto_sym.h ++++ b/dpdk/lib/cryptodev/rte_crypto_sym.h +@@ -574,6 +574,7 @@ enum rte_crypto_sym_xform_type { + * hold a single transform, the type field is used to specify which transform + * is contained within the union + */ ++/* Structure rte_crypto_sym_xform 8< */ + struct rte_crypto_sym_xform { + struct rte_crypto_sym_xform *next; + /**< next xform in chain */ +@@ -589,6 +590,7 @@ struct rte_crypto_sym_xform { + /**< AEAD xform */ + }; + }; ++/* >8 End of structure rte_crypto_sym_xform. */ + + /** + * Symmetric Cryptographic Operation. +@@ -620,6 +622,7 @@ struct rte_crypto_sym_xform { + * destination buffer being at a different alignment, relative to buffer start, + * to the data in the source buffer. + */ ++/* Structure rte_crypto_sym_op 8< */ + struct rte_crypto_sym_op { + struct rte_mbuf *m_src; /**< source mbuf */ + struct rte_mbuf *m_dst; /**< destination mbuf */ +@@ -881,6 +884,7 @@ struct rte_crypto_sym_op { + }; + }; + }; ++/* >8 End of structure rte_crypto_sym_op. */ + + + /** +diff --git a/dpdk/lib/cryptodev/rte_cryptodev.c b/dpdk/lib/cryptodev/rte_cryptodev.c +index 2165a0688c..515d0df5ce 100644 +--- a/dpdk/lib/cryptodev/rte_cryptodev.c ++++ b/dpdk/lib/cryptodev/rte_cryptodev.c +@@ -2692,7 +2692,7 @@ cryptodev_handle_dev_info(const char *cmd __rte_unused, + rte_tel_data_start_dict(d); + rte_tel_data_add_dict_string(d, "device_name", + cryptodev_info.device->name); +- rte_tel_data_add_dict_int(d, "max_nb_queue_pairs", ++ rte_tel_data_add_dict_u64(d, "max_nb_queue_pairs", + cryptodev_info.max_nb_queue_pairs); + + return 0; +diff --git a/dpdk/lib/cryptodev/rte_cryptodev.h b/dpdk/lib/cryptodev/rte_cryptodev.h +index 86d792e2e7..4c210b876c 100644 +--- a/dpdk/lib/cryptodev/rte_cryptodev.h ++++ b/dpdk/lib/cryptodev/rte_cryptodev.h +@@ -501,6 +501,7 @@ extern const char * + rte_cryptodev_get_feature_name(uint64_t flag); + + /** Crypto device information */ ++/* Structure rte_cryptodev_info 8< */ + struct rte_cryptodev_info { + const char *driver_name; /**< Driver name. */ + uint8_t driver_id; /**< Driver identifier */ +@@ -529,6 +530,7 @@ struct rte_cryptodev_info { + */ + } sym; + }; ++/* >8 End of structure rte_cryptodev_info. */ + + #define RTE_CRYPTODEV_DETACHED (0) + #define RTE_CRYPTODEV_ATTACHED (1) +@@ -541,11 +543,13 @@ enum rte_cryptodev_event_type { + }; + + /** Crypto device queue pair configuration structure. */ ++/* Structure rte_cryptodev_qp_conf 8<*/ + struct rte_cryptodev_qp_conf { + uint32_t nb_descriptors; /**< Number of descriptors per queue pair */ + struct rte_mempool *mp_session; + /**< The mempool for creating session in sessionless mode */ + }; ++/* >8 End of structure rte_cryptodev_qp_conf. */ + + /** + * Function type used for processing crypto ops when enqueue/dequeue burst is +@@ -674,6 +678,7 @@ extern int + rte_cryptodev_socket_id(uint8_t dev_id); + + /** Crypto device configuration structure */ ++/* Structure rte_cryptodev_config 8< */ + struct rte_cryptodev_config { + int socket_id; /**< Socket to allocate resources on */ + uint16_t nb_queue_pairs; +@@ -686,6 +691,7 @@ struct rte_cryptodev_config { + * - RTE_CRYTPODEV_FF_SECURITY + */ + }; ++/* >8 End of structure rte_cryptodev_config. */ + + /** + * Configure a device. +@@ -911,11 +917,14 @@ rte_cryptodev_get_sec_ctx(uint8_t dev_id); + * @param nb_elts + * The number of elements in the mempool. + * @param elt_size +- * The size of the element. This value will be ignored if it is smaller than +- * the minimum session header size required for the system. For the user who +- * want to use the same mempool for sym session and session private data it +- * can be the maximum value of all existing devices' private data and session +- * header sizes. ++ * The size of the element. This should be the size of the cryptodev PMD ++ * session private data obtained through ++ * rte_cryptodev_sym_get_private_session_size() function call. ++ * For the user who wants to use the same mempool for heterogeneous PMDs ++ * this value should be the maximum value of their private session sizes. ++ * Please note the created mempool will have bigger elt size than this ++ * value as necessary session header and the possible padding are filled ++ * into each elt. + * @param cache_size + * The number of per-lcore cache elements + * @param priv_size +@@ -926,8 +935,8 @@ rte_cryptodev_get_sec_ctx(uint8_t dev_id); + * constraint for the reserved zone. + * + * @return +- * - On success return size of the session +- * - On failure returns 0 ++ * - On success returns the created session mempool pointer ++ * - On failure returns NULL + */ + __rte_experimental + struct rte_mempool * +@@ -968,11 +977,14 @@ rte_cryptodev_asym_session_pool_create(const char *name, uint32_t nb_elts, + * @param dev_id ID of device that we want the session to be used on + * @param xforms Symmetric crypto transform operations to apply on flow + * processed with this session +- * @param mp Mempool where the private data is allocated. ++ * @param mp Mempool to allocate symmetric session objects from + * + * @return + * - On success return pointer to sym-session. +- * - On failure returns NULL. ++ * - On failure returns NULL and rte_errno is set to the error code: ++ * - EINVAL on invalid arguments. ++ * - ENOMEM on memory error for session allocation. ++ * - ENOTSUP if device doesn't support session configuration. + */ + void * + rte_cryptodev_sym_session_create(uint8_t dev_id, +diff --git a/dpdk/lib/eal/common/eal_common_debug.c b/dpdk/lib/eal/common/eal_common_debug.c +index dcb554af1e..9cac9c6390 100644 +--- a/dpdk/lib/eal/common/eal_common_debug.c ++++ b/dpdk/lib/eal/common/eal_common_debug.c +@@ -4,10 +4,12 @@ + + #include + #include ++#include + + #include + #include + #include ++#include + + void + __rte_panic(const char *funcname, const char *format, ...) +@@ -39,7 +41,7 @@ rte_exit(int exit_code, const char *format, ...) + rte_vlog(RTE_LOG_CRIT, RTE_LOGTYPE_EAL, format, ap); + va_end(ap); + +- if (rte_eal_cleanup() != 0) ++ if (rte_eal_cleanup() != 0 && rte_errno != EALREADY) + RTE_LOG(CRIT, EAL, + "EAL could not release all resources\n"); + exit(exit_code); +diff --git a/dpdk/lib/eal/common/eal_common_dynmem.c b/dpdk/lib/eal/common/eal_common_dynmem.c +index 52e52e5986..bdbbe233a0 100644 +--- a/dpdk/lib/eal/common/eal_common_dynmem.c ++++ b/dpdk/lib/eal/common/eal_common_dynmem.c +@@ -120,8 +120,7 @@ eal_dynmem_memseg_lists_init(void) + max_seglists_per_type = RTE_MAX_MEMSEG_LISTS / n_memtypes; + + if (max_seglists_per_type == 0) { +- RTE_LOG(ERR, EAL, "Cannot accommodate all memory types, please increase %s\n", +- RTE_STR(RTE_MAX_MEMSEG_LISTS)); ++ RTE_LOG(ERR, EAL, "Cannot accommodate all memory types, please increase RTE_MAX_MEMSEG_LISTS\n"); + goto out; + } + +@@ -180,8 +179,7 @@ eal_dynmem_memseg_lists_init(void) + for (cur_seglist = 0; cur_seglist < n_seglists; cur_seglist++) { + if (msl_idx >= RTE_MAX_MEMSEG_LISTS) { + RTE_LOG(ERR, EAL, +- "No more space in memseg lists, please increase %s\n", +- RTE_STR(RTE_MAX_MEMSEG_LISTS)); ++ "No more space in memseg lists, please increase RTE_MAX_MEMSEG_LISTS\n"); + goto out; + } + msl = &mcfg->memsegs[msl_idx++]; +diff --git a/dpdk/lib/eal/common/eal_common_fbarray.c b/dpdk/lib/eal/common/eal_common_fbarray.c +index f11f87979f..169e66e04b 100644 +--- a/dpdk/lib/eal/common/eal_common_fbarray.c ++++ b/dpdk/lib/eal/common/eal_common_fbarray.c +@@ -1482,7 +1482,7 @@ rte_fbarray_dump_metadata(struct rte_fbarray *arr, FILE *f) + + if (fully_validate(arr->name, arr->elt_sz, arr->len)) { + fprintf(f, "Invalid file-backed array\n"); +- goto out; ++ return; + } + + /* prevent array from changing under us */ +@@ -1496,6 +1496,5 @@ rte_fbarray_dump_metadata(struct rte_fbarray *arr, FILE *f) + + for (i = 0; i < msk->n_masks; i++) + fprintf(f, "msk idx %i: 0x%016" PRIx64 "\n", i, msk->data[i]); +-out: + rte_rwlock_read_unlock(&arr->rwlock); + } +diff --git a/dpdk/lib/eal/common/eal_common_memory.c b/dpdk/lib/eal/common/eal_common_memory.c +index 688dc615d7..da6711d129 100644 +--- a/dpdk/lib/eal/common/eal_common_memory.c ++++ b/dpdk/lib/eal/common/eal_common_memory.c +@@ -1139,7 +1139,7 @@ handle_eal_heap_info_request(const char *cmd __rte_unused, const char *params, + malloc_heap_get_stats(heap, &sock_stats); + + rte_tel_data_start_dict(d); +- rte_tel_data_add_dict_int(d, "Head id", heap_id); ++ rte_tel_data_add_dict_u64(d, "Head_id", heap_id); + rte_tel_data_add_dict_string(d, "Name", heap->name); + rte_tel_data_add_dict_u64(d, "Heap_size", + sock_stats.heap_totalsz_bytes); +@@ -1201,13 +1201,13 @@ handle_eal_memzone_info_request(const char *cmd __rte_unused, + mz = rte_fbarray_get(&mcfg->memzones, mz_idx); + + rte_tel_data_start_dict(d); +- rte_tel_data_add_dict_int(d, "Zone", mz_idx); ++ rte_tel_data_add_dict_u64(d, "Zone", mz_idx); + rte_tel_data_add_dict_string(d, "Name", mz->name); +- rte_tel_data_add_dict_int(d, "Length", mz->len); ++ rte_tel_data_add_dict_u64(d, "Length", mz->len); + snprintf(addr, ADDR_STR, "%p", mz->addr); + rte_tel_data_add_dict_string(d, "Address", addr); + rte_tel_data_add_dict_int(d, "Socket", mz->socket_id); +- rte_tel_data_add_dict_int(d, "Flags", mz->flags); ++ rte_tel_data_add_dict_u64(d, "Flags", mz->flags); + + /* go through each page occupied by this memzone */ + msl = rte_mem_virt2memseg_list(mz->addr); +@@ -1222,7 +1222,7 @@ handle_eal_memzone_info_request(const char *cmd __rte_unused, + ms_idx = RTE_PTR_DIFF(mz->addr, msl->base_va) / page_sz; + ms = rte_fbarray_get(&msl->memseg_arr, ms_idx); + +- rte_tel_data_add_dict_int(d, "Hugepage_size", page_sz); ++ rte_tel_data_add_dict_u64(d, "Hugepage_size", page_sz); + snprintf(addr, ADDR_STR, "%p", ms->addr); + rte_tel_data_add_dict_string(d, "Hugepage_base", addr); + +diff --git a/dpdk/lib/eal/common/eal_common_proc.c b/dpdk/lib/eal/common/eal_common_proc.c +index 1fc1d6c53b..9676dd73c5 100644 +--- a/dpdk/lib/eal/common/eal_common_proc.c ++++ b/dpdk/lib/eal/common/eal_common_proc.c +@@ -321,6 +321,15 @@ retry: + return msglen; + } + ++static void ++cleanup_msg_fds(const struct rte_mp_msg *msg) ++{ ++ int i; ++ ++ for (i = 0; i < msg->num_fds; i++) ++ close(msg->fds[i]); ++} ++ + static void + process_msg(struct mp_msg_internal *m, struct sockaddr_un *s) + { +@@ -349,8 +358,10 @@ process_msg(struct mp_msg_internal *m, struct sockaddr_un *s) + else if (pending_req->type == REQUEST_TYPE_ASYNC) + req = async_reply_handle_thread_unsafe( + pending_req); +- } else ++ } else { + RTE_LOG(ERR, EAL, "Drop mp reply: %s\n", msg->name); ++ cleanup_msg_fds(msg); ++ } + pthread_mutex_unlock(&pending_requests.lock); + + if (req != NULL) +@@ -380,6 +391,7 @@ process_msg(struct mp_msg_internal *m, struct sockaddr_un *s) + RTE_LOG(ERR, EAL, "Cannot find action: %s\n", + msg->name); + } ++ cleanup_msg_fds(msg); + } else if (action(msg, s->sun_path) < 0) { + RTE_LOG(ERR, EAL, "Fail to handle message: %s\n", msg->name); + } +diff --git a/dpdk/lib/eal/freebsd/eal.c b/dpdk/lib/eal/freebsd/eal.c +index 607684c1a3..122daf6c1f 100644 +--- a/dpdk/lib/eal/freebsd/eal.c ++++ b/dpdk/lib/eal/freebsd/eal.c +@@ -889,6 +889,16 @@ rte_eal_init(int argc, char **argv) + int + rte_eal_cleanup(void) + { ++ static uint32_t run_once; ++ uint32_t has_run = 0; ++ ++ if (!__atomic_compare_exchange_n(&run_once, &has_run, 1, 0, ++ __ATOMIC_RELAXED, __ATOMIC_RELAXED)) { ++ RTE_LOG(WARNING, EAL, "Already called cleanup\n"); ++ rte_errno = EALREADY; ++ return -1; ++ } ++ + struct internal_config *internal_conf = + eal_get_internal_configuration(); + rte_service_finalize(); +@@ -896,9 +906,9 @@ rte_eal_cleanup(void) + eal_bus_cleanup(); + rte_trace_save(); + eal_trace_fini(); ++ rte_eal_alarm_cleanup(); + /* after this point, any DPDK pointers will become dangling */ + rte_eal_memory_detach(); +- rte_eal_alarm_cleanup(); + eal_cleanup_config(internal_conf); + return 0; + } +diff --git a/dpdk/lib/eal/freebsd/eal_alarm.c b/dpdk/lib/eal/freebsd/eal_alarm.c +index 1023c32937..1a3e6c0aad 100644 +--- a/dpdk/lib/eal/freebsd/eal_alarm.c ++++ b/dpdk/lib/eal/freebsd/eal_alarm.c +@@ -171,12 +171,12 @@ eal_alarm_callback(void *arg __rte_unused) + struct timespec now; + struct alarm_entry *ap; + +- rte_spinlock_lock(&alarm_list_lk); +- ap = LIST_FIRST(&alarm_list); +- + if (clock_gettime(CLOCK_TYPE_ID, &now) < 0) + return; + ++ rte_spinlock_lock(&alarm_list_lk); ++ ap = LIST_FIRST(&alarm_list); ++ + while (ap != NULL && timespec_cmp(&now, &ap->time) >= 0) { + ap->executing = 1; + ap->executing_id = pthread_self(); +diff --git a/dpdk/lib/eal/freebsd/eal_hugepage_info.c b/dpdk/lib/eal/freebsd/eal_hugepage_info.c +index 9dbe375bd3..e58e618469 100644 +--- a/dpdk/lib/eal/freebsd/eal_hugepage_info.c ++++ b/dpdk/lib/eal/freebsd/eal_hugepage_info.c +@@ -33,7 +33,7 @@ map_shared_memory(const char *filename, const size_t mem_size, int flags) + } + retval = mmap(NULL, mem_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); + close(fd); +- return retval; ++ return retval == MAP_FAILED ? NULL : retval; + } + + static void * +diff --git a/dpdk/lib/eal/freebsd/eal_memory.c b/dpdk/lib/eal/freebsd/eal_memory.c +index 17ab10e0ca..5c6165c580 100644 +--- a/dpdk/lib/eal/freebsd/eal_memory.c ++++ b/dpdk/lib/eal/freebsd/eal_memory.c +@@ -172,9 +172,8 @@ rte_eal_hugepage_init(void) + break; + } + if (msl_idx == RTE_MAX_MEMSEG_LISTS) { +- RTE_LOG(ERR, EAL, "Could not find space for memseg. Please increase %s and/or %s in configuration.\n", +- RTE_STR(RTE_MAX_MEMSEG_PER_TYPE), +- RTE_STR(RTE_MAX_MEM_MB_PER_TYPE)); ++ RTE_LOG(ERR, EAL, "Could not find space for memseg. Please increase RTE_MAX_MEMSEG_PER_LIST " ++ "RTE_MAX_MEMSEG_PER_TYPE and/or RTE_MAX_MEM_MB_PER_TYPE in configuration.\n"); + return -1; + } + arr = &msl->memseg_arr; +@@ -404,8 +403,7 @@ memseg_primary_init(void) + + if (msl_idx >= RTE_MAX_MEMSEG_LISTS) { + RTE_LOG(ERR, EAL, +- "No more space in memseg lists, please increase %s\n", +- RTE_STR(RTE_MAX_MEMSEG_LISTS)); ++ "No more space in memseg lists, please increase RTE_MAX_MEMSEG_LISTS\n"); + return -1; + } + +diff --git a/dpdk/lib/eal/include/generic/rte_atomic.h b/dpdk/lib/eal/include/generic/rte_atomic.h +index f5c49a9870..234b268b91 100644 +--- a/dpdk/lib/eal/include/generic/rte_atomic.h ++++ b/dpdk/lib/eal/include/generic/rte_atomic.h +@@ -176,11 +176,7 @@ rte_atomic16_exchange(volatile uint16_t *dst, uint16_t val); + static inline uint16_t + rte_atomic16_exchange(volatile uint16_t *dst, uint16_t val) + { +-#if defined(__clang__) + return __atomic_exchange_n(dst, val, __ATOMIC_SEQ_CST); +-#else +- return __atomic_exchange_2(dst, val, __ATOMIC_SEQ_CST); +-#endif + } + #endif + +@@ -459,11 +455,7 @@ rte_atomic32_exchange(volatile uint32_t *dst, uint32_t val); + static inline uint32_t + rte_atomic32_exchange(volatile uint32_t *dst, uint32_t val) + { +-#if defined(__clang__) + return __atomic_exchange_n(dst, val, __ATOMIC_SEQ_CST); +-#else +- return __atomic_exchange_4(dst, val, __ATOMIC_SEQ_CST); +-#endif + } + #endif + +@@ -741,11 +733,7 @@ rte_atomic64_exchange(volatile uint64_t *dst, uint64_t val); + static inline uint64_t + rte_atomic64_exchange(volatile uint64_t *dst, uint64_t val) + { +-#if defined(__clang__) + return __atomic_exchange_n(dst, val, __ATOMIC_SEQ_CST); +-#else +- return __atomic_exchange_8(dst, val, __ATOMIC_SEQ_CST); +-#endif + } + #endif + +diff --git a/dpdk/lib/eal/linux/eal.c b/dpdk/lib/eal/linux/eal.c +index 8c118d0d9f..336698379f 100644 +--- a/dpdk/lib/eal/linux/eal.c ++++ b/dpdk/lib/eal/linux/eal.c +@@ -1056,12 +1056,6 @@ rte_eal_init(int argc, char **argv) + } + } + +- /* register multi-process action callbacks for hotplug */ +- if (eal_mp_dev_hotplug_init() < 0) { +- rte_eal_init_alert("failed to register mp callback for hotplug"); +- return -1; +- } +- + if (rte_bus_scan()) { + rte_eal_init_alert("Cannot scan the buses for devices"); + rte_errno = ENODEV; +@@ -1206,6 +1200,12 @@ rte_eal_init(int argc, char **argv) + return -1; + } + ++ /* register multi-process action callbacks for hotplug after memory init */ ++ if (eal_mp_dev_hotplug_init() < 0) { ++ rte_eal_init_alert("failed to register mp callback for hotplug"); ++ return -1; ++ } ++ + if (rte_eal_tailqs_init() < 0) { + rte_eal_init_alert("Cannot init tail queues for objects"); + rte_errno = EFAULT; +@@ -1354,6 +1354,16 @@ mark_freeable(const struct rte_memseg_list *msl, const struct rte_memseg *ms, + int + rte_eal_cleanup(void) + { ++ static uint32_t run_once; ++ uint32_t has_run = 0; ++ ++ if (!__atomic_compare_exchange_n(&run_once, &has_run, 1, 0, ++ __ATOMIC_RELAXED, __ATOMIC_RELAXED)) { ++ RTE_LOG(WARNING, EAL, "Already called cleanup\n"); ++ rte_errno = EALREADY; ++ return -1; ++ } ++ + /* if we're in a primary process, we need to mark hugepages as freeable + * so that finalization can release them back to the system. + */ +@@ -1372,11 +1382,11 @@ rte_eal_cleanup(void) + eal_bus_cleanup(); + rte_trace_save(); + eal_trace_fini(); ++ eal_mp_dev_hotplug_cleanup(); ++ rte_eal_alarm_cleanup(); + /* after this point, any DPDK pointers will become dangling */ + rte_eal_memory_detach(); +- eal_mp_dev_hotplug_cleanup(); + rte_eal_malloc_heap_cleanup(); +- rte_eal_alarm_cleanup(); + eal_cleanup_config(internal_conf); + rte_eal_log_cleanup(); + return 0; +diff --git a/dpdk/lib/eal/linux/eal_hugepage_info.c b/dpdk/lib/eal/linux/eal_hugepage_info.c +index a1b6cb31ff..581d9dfc91 100644 +--- a/dpdk/lib/eal/linux/eal_hugepage_info.c ++++ b/dpdk/lib/eal/linux/eal_hugepage_info.c +@@ -50,7 +50,7 @@ map_shared_memory(const char *filename, const size_t mem_size, int flags) + retval = mmap(NULL, mem_size, PROT_READ | PROT_WRITE, + MAP_SHARED, fd, 0); + close(fd); +- return retval; ++ return retval == MAP_FAILED ? NULL : retval; + } + + static void * +@@ -214,6 +214,8 @@ get_hugepage_dir(uint64_t hugepage_sz, char *hugedir, int len) + char buf[BUFSIZ]; + const struct internal_config *internal_conf = + eal_get_internal_configuration(); ++ const size_t hugepage_dir_len = (internal_conf->hugepage_dir != NULL) ? ++ strlen(internal_conf->hugepage_dir) : 0; + struct stat st; + + /* +@@ -233,6 +235,7 @@ get_hugepage_dir(uint64_t hugepage_sz, char *hugedir, int len) + + while (fgets(buf, sizeof(buf), fd)){ + const char *pagesz_str; ++ size_t mountpt_len = 0; + + if (rte_strsplit(buf, sizeof(buf), splitstr, _FIELDNAME_MAX, + split_tok) != _FIELDNAME_MAX) { +@@ -265,12 +268,16 @@ get_hugepage_dir(uint64_t hugepage_sz, char *hugedir, int len) + break; + } + ++ mountpt_len = strlen(splitstr[MOUNTPT]); ++ + /* +- * Ignore any mount that doesn't contain the --huge-dir +- * directory. ++ * Ignore any mount that doesn't contain the --huge-dir directory ++ * or where mount point is not a parent path of --huge-dir + */ + if (strncmp(internal_conf->hugepage_dir, splitstr[MOUNTPT], +- strlen(splitstr[MOUNTPT])) != 0) { ++ mountpt_len) != 0 || ++ (hugepage_dir_len > mountpt_len && ++ internal_conf->hugepage_dir[mountpt_len] != '/')) { + continue; + } + +@@ -278,7 +285,7 @@ get_hugepage_dir(uint64_t hugepage_sz, char *hugedir, int len) + * We found a match, but only prefer it if it's a longer match + * (so /mnt/1 is preferred over /mnt for matching /mnt/1/2)). + */ +- if (strlen(splitstr[MOUNTPT]) > strlen(found)) ++ if (mountpt_len > strlen(found)) + strlcpy(found, splitstr[MOUNTPT], len); + } /* end while fgets */ + +diff --git a/dpdk/lib/eal/linux/eal_memory.c b/dpdk/lib/eal/linux/eal_memory.c +index 60fc8cc6ca..9b6f08fba8 100644 +--- a/dpdk/lib/eal/linux/eal_memory.c ++++ b/dpdk/lib/eal/linux/eal_memory.c +@@ -681,6 +681,7 @@ remap_segment(struct hugepage_file *hugepages, int seg_start, int seg_end) + + /* find free space in memseg lists */ + for (msl_idx = 0; msl_idx < RTE_MAX_MEMSEG_LISTS; msl_idx++) { ++ int free_len; + bool empty; + msl = &mcfg->memsegs[msl_idx]; + arr = &msl->memseg_arr; +@@ -692,24 +693,31 @@ remap_segment(struct hugepage_file *hugepages, int seg_start, int seg_end) + + /* leave space for a hole if array is not empty */ + empty = arr->count == 0; +- ms_idx = rte_fbarray_find_next_n_free(arr, 0, +- seg_len + (empty ? 0 : 1)); +- +- /* memseg list is full? */ ++ /* find start of the biggest contiguous block and its size */ ++ ms_idx = rte_fbarray_find_biggest_free(arr, 0); + if (ms_idx < 0) + continue; +- ++ /* hole is 1 segment long, so at least two segments long. */ ++ free_len = rte_fbarray_find_contig_free(arr, ms_idx); ++ if (free_len < 2) ++ continue; + /* leave some space between memsegs, they are not IOVA + * contiguous, so they shouldn't be VA contiguous either. + */ +- if (!empty) ++ if (!empty) { + ms_idx++; ++ free_len--; ++ } ++ ++ /* we might not get all of the space we wanted */ ++ free_len = RTE_MIN(seg_len, free_len); ++ seg_end = seg_start + free_len; ++ seg_len = seg_end - seg_start; + break; + } + if (msl_idx == RTE_MAX_MEMSEG_LISTS) { +- RTE_LOG(ERR, EAL, "Could not find space for memseg. Please increase %s and/or %s in configuration.\n", +- RTE_STR(RTE_MAX_MEMSEG_PER_TYPE), +- RTE_STR(RTE_MAX_MEM_MB_PER_TYPE)); ++ RTE_LOG(ERR, EAL, "Could not find space for memseg. Please increase RTE_MAX_MEMSEG_PER_LIST " ++ "RTE_MAX_MEMSEG_PER_TYPE and/or RTE_MAX_MEM_MB_PER_TYPE in configuration.\n"); + return -1; + } + +@@ -787,7 +795,7 @@ remap_segment(struct hugepage_file *hugepages, int seg_start, int seg_end) + } + RTE_LOG(DEBUG, EAL, "Allocated %" PRIu64 "M on socket %i\n", + (seg_len * page_sz) >> 20, socket_id); +- return 0; ++ return seg_len; + } + + static uint64_t +@@ -957,8 +965,7 @@ prealloc_segments(struct hugepage_file *hugepages, int n_pages) + break; + } + if (msl_idx == RTE_MAX_MEMSEG_LISTS) { +- RTE_LOG(ERR, EAL, "Not enough space in memseg lists, please increase %s\n", +- RTE_STR(RTE_MAX_MEMSEG_LISTS)); ++ RTE_LOG(ERR, EAL, "Not enough space in memseg lists, please increase RTE_MAX_MEMSEG_LISTS\n"); + return -1; + } + +@@ -1022,10 +1029,16 @@ remap_needed_hugepages(struct hugepage_file *hugepages, int n_pages) + if (new_memseg) { + /* if this isn't the first time, remap segment */ + if (cur_page != 0) { +- ret = remap_segment(hugepages, seg_start_page, +- cur_page); +- if (ret != 0) +- return -1; ++ int n_remapped = 0; ++ int n_needed = cur_page - seg_start_page; ++ while (n_remapped < n_needed) { ++ ret = remap_segment(hugepages, seg_start_page, ++ cur_page); ++ if (ret < 0) ++ return -1; ++ n_remapped += ret; ++ seg_start_page += ret; ++ } + } + /* remember where we started */ + seg_start_page = cur_page; +@@ -1034,10 +1047,16 @@ remap_needed_hugepages(struct hugepage_file *hugepages, int n_pages) + } + /* we were stopped, but we didn't remap the last segment, do it now */ + if (cur_page != 0) { +- ret = remap_segment(hugepages, seg_start_page, +- cur_page); +- if (ret != 0) +- return -1; ++ int n_remapped = 0; ++ int n_needed = cur_page - seg_start_page; ++ while (n_remapped < n_needed) { ++ ret = remap_segment(hugepages, seg_start_page, ++ cur_page); ++ if (ret < 0) ++ return -1; ++ n_remapped += ret; ++ seg_start_page += ret; ++ } + } + return 0; + } +@@ -1812,8 +1831,7 @@ memseg_primary_init_32(void) + + if (msl_idx >= RTE_MAX_MEMSEG_LISTS) { + RTE_LOG(ERR, EAL, +- "No more space in memseg lists, please increase %s\n", +- RTE_STR(RTE_MAX_MEMSEG_LISTS)); ++ "No more space in memseg lists, please increase RTE_MAX_MEMSEG_LISTS\n"); + return -1; + } + +diff --git a/dpdk/lib/eal/unix/rte_thread.c b/dpdk/lib/eal/unix/rte_thread.c +index 37ebfcfca1..f4076122a4 100644 +--- a/dpdk/lib/eal/unix/rte_thread.c ++++ b/dpdk/lib/eal/unix/rte_thread.c +@@ -5,6 +5,7 @@ + + #include + #include ++#include + #include + #include + +@@ -16,9 +17,14 @@ struct eal_tls_key { + pthread_key_t thread_index; + }; + +-struct thread_routine_ctx { ++struct thread_start_context { + rte_thread_func thread_func; +- void *routine_args; ++ void *thread_args; ++ const rte_thread_attr_t *thread_attr; ++ pthread_mutex_t wrapper_mutex; ++ pthread_cond_t wrapper_cond; ++ int wrapper_ret; ++ bool wrapper_done; + }; + + static int +@@ -81,13 +87,29 @@ thread_map_os_priority_to_eal_priority(int policy, int os_pri, + } + + static void * +-thread_func_wrapper(void *arg) ++thread_start_wrapper(void *arg) + { +- struct thread_routine_ctx ctx = *(struct thread_routine_ctx *)arg; ++ struct thread_start_context *ctx = (struct thread_start_context *)arg; ++ rte_thread_func thread_func = ctx->thread_func; ++ void *thread_args = ctx->thread_args; ++ int ret = 0; ++ ++ if (ctx->thread_attr != NULL && CPU_COUNT(&ctx->thread_attr->cpuset) > 0) { ++ ret = rte_thread_set_affinity_by_id(rte_thread_self(), &ctx->thread_attr->cpuset); ++ if (ret != 0) ++ RTE_LOG(DEBUG, EAL, "rte_thread_set_affinity_by_id failed\n"); ++ } + +- free(arg); ++ pthread_mutex_lock(&ctx->wrapper_mutex); ++ ctx->wrapper_ret = ret; ++ ctx->wrapper_done = true; ++ pthread_cond_signal(&ctx->wrapper_cond); ++ pthread_mutex_unlock(&ctx->wrapper_mutex); + +- return (void *)(uintptr_t)ctx.thread_func(ctx.routine_args); ++ if (ret != 0) ++ return NULL; ++ ++ return (void *)(uintptr_t)thread_func(thread_args); + } + + int +@@ -98,20 +120,18 @@ rte_thread_create(rte_thread_t *thread_id, + int ret = 0; + pthread_attr_t attr; + pthread_attr_t *attrp = NULL; +- struct thread_routine_ctx *ctx; + struct sched_param param = { + .sched_priority = 0, + }; + int policy = SCHED_OTHER; +- +- ctx = calloc(1, sizeof(*ctx)); +- if (ctx == NULL) { +- RTE_LOG(DEBUG, EAL, "Insufficient memory for thread context allocations\n"); +- ret = ENOMEM; +- goto cleanup; +- } +- ctx->routine_args = args; +- ctx->thread_func = thread_func; ++ struct thread_start_context ctx = { ++ .thread_func = thread_func, ++ .thread_args = args, ++ .thread_attr = thread_attr, ++ .wrapper_done = false, ++ .wrapper_mutex = PTHREAD_MUTEX_INITIALIZER, ++ .wrapper_cond = PTHREAD_COND_INITIALIZER, ++ }; + + if (thread_attr != NULL) { + ret = pthread_attr_init(&attr); +@@ -133,7 +153,6 @@ rte_thread_create(rte_thread_t *thread_id, + goto cleanup; + } + +- + if (thread_attr->priority == + RTE_THREAD_PRIORITY_REALTIME_CRITICAL) { + ret = ENOTSUP; +@@ -158,24 +177,22 @@ rte_thread_create(rte_thread_t *thread_id, + } + + ret = pthread_create((pthread_t *)&thread_id->opaque_id, attrp, +- thread_func_wrapper, ctx); ++ thread_start_wrapper, &ctx); + if (ret != 0) { + RTE_LOG(DEBUG, EAL, "pthread_create failed\n"); + goto cleanup; + } + +- if (thread_attr != NULL && CPU_COUNT(&thread_attr->cpuset) > 0) { +- ret = rte_thread_set_affinity_by_id(*thread_id, +- &thread_attr->cpuset); +- if (ret != 0) { +- RTE_LOG(DEBUG, EAL, "rte_thread_set_affinity_by_id failed\n"); +- goto cleanup; +- } +- } ++ pthread_mutex_lock(&ctx.wrapper_mutex); ++ while (!ctx.wrapper_done) ++ pthread_cond_wait(&ctx.wrapper_cond, &ctx.wrapper_mutex); ++ ret = ctx.wrapper_ret; ++ pthread_mutex_unlock(&ctx.wrapper_mutex); ++ ++ if (ret != 0) ++ pthread_join((pthread_t)thread_id->opaque_id, NULL); + +- ctx = NULL; + cleanup: +- free(ctx); + if (attrp != NULL) + pthread_attr_destroy(&attr); + +diff --git a/dpdk/lib/eal/windows/eal.c b/dpdk/lib/eal/windows/eal.c +index adb929a014..56fadc7afe 100644 +--- a/dpdk/lib/eal/windows/eal.c ++++ b/dpdk/lib/eal/windows/eal.c +@@ -462,6 +462,9 @@ rte_eal_init(int argc, char **argv) + */ + rte_eal_mp_remote_launch(sync_func, NULL, SKIP_MAIN); + rte_eal_mp_wait_lcore(); ++ ++ eal_mcfg_complete(); ++ + return fctret; + } + +diff --git a/dpdk/lib/eal/windows/include/pthread.h b/dpdk/lib/eal/windows/include/pthread.h +index 27fd2cca52..f7cf0e9ddf 100644 +--- a/dpdk/lib/eal/windows/include/pthread.h ++++ b/dpdk/lib/eal/windows/include/pthread.h +@@ -134,7 +134,8 @@ pthread_create(void *threadid, const void *threadattr, void *threadfunc, + { + RTE_SET_USED(threadattr); + HANDLE hThread; +- hThread = CreateThread(NULL, 0, (LPTHREAD_START_ROUTINE)threadfunc, ++ hThread = CreateThread(NULL, 0, ++ (LPTHREAD_START_ROUTINE)(uintptr_t)threadfunc, + args, 0, (LPDWORD)threadid); + if (hThread) { + SetPriorityClass(GetCurrentProcess(), NORMAL_PRIORITY_CLASS); +diff --git a/dpdk/lib/eal/windows/rte_thread.c b/dpdk/lib/eal/windows/rte_thread.c +index 1c1e9d01e3..3538633816 100644 +--- a/dpdk/lib/eal/windows/rte_thread.c ++++ b/dpdk/lib/eal/windows/rte_thread.c +@@ -17,6 +17,7 @@ struct eal_tls_key { + + struct thread_routine_ctx { + rte_thread_func thread_func; ++ bool thread_init_failed; + void *routine_args; + }; + +@@ -165,9 +166,13 @@ static DWORD + thread_func_wrapper(void *arg) + { + struct thread_routine_ctx ctx = *(struct thread_routine_ctx *)arg; ++ const bool thread_exit = __atomic_load_n(&ctx.thread_init_failed, __ATOMIC_ACQUIRE); + + free(arg); + ++ if (thread_exit) ++ return 0; ++ + return (DWORD)ctx.thread_func(ctx.routine_args); + } + +@@ -181,6 +186,7 @@ rte_thread_create(rte_thread_t *thread_id, + HANDLE thread_handle = NULL; + GROUP_AFFINITY thread_affinity; + struct thread_routine_ctx *ctx; ++ bool thread_exit = false; + + ctx = calloc(1, sizeof(*ctx)); + if (ctx == NULL) { +@@ -190,6 +196,7 @@ rte_thread_create(rte_thread_t *thread_id, + } + ctx->routine_args = args; + ctx->thread_func = thread_func; ++ ctx->thread_init_failed = false; + + thread_handle = CreateThread(NULL, 0, thread_func_wrapper, ctx, + CREATE_SUSPENDED, &tid); +@@ -207,23 +214,29 @@ rte_thread_create(rte_thread_t *thread_id, + ); + if (ret != 0) { + RTE_LOG(DEBUG, EAL, "Unable to convert cpuset to thread affinity\n"); +- goto cleanup; ++ thread_exit = true; ++ goto resume_thread; + } + + if (!SetThreadGroupAffinity(thread_handle, + &thread_affinity, NULL)) { + ret = thread_log_last_error("SetThreadGroupAffinity()"); +- goto cleanup; ++ thread_exit = true; ++ goto resume_thread; + } + } + ret = rte_thread_set_priority(*thread_id, + thread_attr->priority); + if (ret != 0) { + RTE_LOG(DEBUG, EAL, "Unable to set thread priority\n"); +- goto cleanup; ++ thread_exit = true; ++ goto resume_thread; + } + } + ++resume_thread: ++ __atomic_store_n(&ctx->thread_init_failed, thread_exit, __ATOMIC_RELEASE); ++ + if (ResumeThread(thread_handle) == (DWORD)-1) { + ret = thread_log_last_error("ResumeThread()"); + goto cleanup; +diff --git a/dpdk/lib/eal/x86/include/rte_memcpy.h b/dpdk/lib/eal/x86/include/rte_memcpy.h +index d4d7a5cfc8..fd151be708 100644 +--- a/dpdk/lib/eal/x86/include/rte_memcpy.h ++++ b/dpdk/lib/eal/x86/include/rte_memcpy.h +@@ -846,7 +846,7 @@ rte_memcpy_aligned(void *dst, const void *src, size_t n) + } + + /* Copy 64 bytes blocks */ +- for (; n >= 64; n -= 64) { ++ for (; n > 64; n -= 64) { + rte_mov64((uint8_t *)dst, (const uint8_t *)src); + dst = (uint8_t *)dst + 64; + src = (const uint8_t *)src + 64; +diff --git a/dpdk/lib/ethdev/ethdev_driver.h b/dpdk/lib/ethdev/ethdev_driver.h +index 6a550cfc83..cd2cd89649 100644 +--- a/dpdk/lib/ethdev/ethdev_driver.h ++++ b/dpdk/lib/ethdev/ethdev_driver.h +@@ -117,7 +117,11 @@ struct rte_eth_dev_data { + + uint64_t rx_mbuf_alloc_failed; /**< Rx ring mbuf allocation failures */ + +- /** Device Ethernet link address. @see rte_eth_dev_release_port() */ ++ /** ++ * Device Ethernet link addresses. ++ * All entries are unique. ++ * The first entry (index zero) is the default address. ++ */ + struct rte_ether_addr *mac_addrs; + /** Bitmap associating MAC addresses to pools */ + uint64_t mac_pool_sel[RTE_ETH_NUM_RECEIVE_MAC_ADDR]; +diff --git a/dpdk/lib/ethdev/ethdev_pci.h b/dpdk/lib/ethdev/ethdev_pci.h +index 94b8fba5d7..320e3e0093 100644 +--- a/dpdk/lib/ethdev/ethdev_pci.h ++++ b/dpdk/lib/ethdev/ethdev_pci.h +@@ -126,12 +126,13 @@ rte_eth_dev_pci_generic_probe(struct rte_pci_device *pci_dev, + struct rte_eth_dev *eth_dev; + int ret; + ++ if (*dev_init == NULL) ++ return -EINVAL; ++ + eth_dev = rte_eth_dev_pci_allocate(pci_dev, private_data_size); + if (!eth_dev) + return -ENOMEM; + +- if (*dev_init == NULL) +- return -EINVAL; + ret = dev_init(eth_dev); + if (ret) + rte_eth_dev_release_port(eth_dev); +diff --git a/dpdk/lib/ethdev/rte_class_eth.c b/dpdk/lib/ethdev/rte_class_eth.c +index 838b3a8f9f..b61dae849d 100644 +--- a/dpdk/lib/ethdev/rte_class_eth.c ++++ b/dpdk/lib/ethdev/rte_class_eth.c +@@ -67,7 +67,7 @@ eth_representor_cmp(const char *key __rte_unused, + const struct rte_eth_dev *edev = opaque; + const struct rte_eth_dev_data *data = edev->data; + struct rte_eth_devargs eth_da; +- uint16_t id, nc, np, nf, i, c, p, f; ++ uint16_t id = 0, nc, np, nf, i, c, p, f; + + if ((data->dev_flags & RTE_ETH_DEV_REPRESENTOR) == 0) + return -1; /* not a representor port */ +diff --git a/dpdk/lib/ethdev/rte_ethdev.c b/dpdk/lib/ethdev/rte_ethdev.c +index 5d5e18db1e..437d04b34e 100644 +--- a/dpdk/lib/ethdev/rte_ethdev.c ++++ b/dpdk/lib/ethdev/rte_ethdev.c +@@ -4362,6 +4362,11 @@ rte_eth_fec_set(uint16_t port_id, uint32_t fec_capa) + RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); + dev = &rte_eth_devices[port_id]; + ++ if (fec_capa == 0) { ++ RTE_ETHDEV_LOG(ERR, "At least one FEC mode should be specified\n"); ++ return -EINVAL; ++ } ++ + if (*dev->dev_ops->fec_set == NULL) + return -ENOTSUP; + return eth_err(port_id, (*dev->dev_ops->fec_set)(dev, fec_capa)); +@@ -4499,6 +4504,7 @@ int + rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct rte_ether_addr *addr) + { + struct rte_eth_dev *dev; ++ int index; + int ret; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); +@@ -4517,6 +4523,15 @@ rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct rte_ether_addr *addr) + if (*dev->dev_ops->mac_addr_set == NULL) + return -ENOTSUP; + ++ /* Keep address unique in dev->data->mac_addrs[]. */ ++ index = eth_dev_get_mac_addr_index(port_id, addr); ++ if (index > 0) { ++ RTE_ETHDEV_LOG(ERR, ++ "New default address for port %u was already in the address list. Please remove it first.\n", ++ port_id); ++ return -EEXIST; ++ } ++ + ret = (*dev->dev_ops->mac_addr_set)(dev, addr); + if (ret < 0) + return ret; +@@ -5935,7 +5950,7 @@ eth_dev_handle_port_dump_priv(const char *cmd __rte_unused, + if (!rte_eth_dev_is_valid_port(port_id)) + return -EINVAL; + +- buf = calloc(sizeof(char), RTE_TEL_MAX_SINGLE_STRING_LEN); ++ buf = calloc(RTE_TEL_MAX_SINGLE_STRING_LEN, sizeof(char)); + if (buf == NULL) + return -ENOMEM; + +@@ -6037,10 +6052,8 @@ eth_dev_handle_port_info(const char *cmd __rte_unused, + eth_dev->data->nb_tx_queues); + rte_tel_data_add_dict_int(d, "port_id", eth_dev->data->port_id); + rte_tel_data_add_dict_int(d, "mtu", eth_dev->data->mtu); +- rte_tel_data_add_dict_int(d, "rx_mbuf_size_min", ++ rte_tel_data_add_dict_u64(d, "rx_mbuf_size_min", + eth_dev->data->min_rx_buf_size); +- rte_tel_data_add_dict_int(d, "rx_mbuf_alloc_fail", +- eth_dev->data->rx_mbuf_alloc_failed); + rte_ether_format_addr(mac_addr, sizeof(mac_addr), + eth_dev->data->mac_addrs); + rte_tel_data_add_dict_string(d, "mac_addr", mac_addr); +@@ -6068,12 +6081,12 @@ eth_dev_handle_port_info(const char *cmd __rte_unused, + rte_tel_data_add_dict_container(d, "rxq_state", rxq_state, 0); + rte_tel_data_add_dict_container(d, "txq_state", txq_state, 0); + rte_tel_data_add_dict_int(d, "numa_node", eth_dev->data->numa_node); +- rte_tel_data_add_dict_int(d, "dev_flags", eth_dev->data->dev_flags); +- rte_tel_data_add_dict_int(d, "rx_offloads", ++ rte_tel_data_add_dict_u64(d, "dev_flags", eth_dev->data->dev_flags); ++ rte_tel_data_add_dict_u64(d, "rx_offloads", + eth_dev->data->dev_conf.rxmode.offloads); +- rte_tel_data_add_dict_int(d, "tx_offloads", ++ rte_tel_data_add_dict_u64(d, "tx_offloads", + eth_dev->data->dev_conf.txmode.offloads); +- rte_tel_data_add_dict_int(d, "ethdev_rss_hf", ++ rte_tel_data_add_dict_u64(d, "ethdev_rss_hf", + eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf); + + return 0; +diff --git a/dpdk/lib/ethdev/rte_ethdev.h b/dpdk/lib/ethdev/rte_ethdev.h +index c129ca1eaf..5f187131e2 100644 +--- a/dpdk/lib/ethdev/rte_ethdev.h ++++ b/dpdk/lib/ethdev/rte_ethdev.h +@@ -4177,10 +4177,7 @@ int rte_eth_fec_get_capability(uint16_t port_id, + * @param port_id + * The port identifier of the Ethernet device. + * @param fec_capa +- * A bitmask of enabled FEC modes. If AUTO bit is set, other +- * bits specify FEC modes which may be negotiated. If AUTO +- * bit is clear, specify FEC modes to be used (only one valid +- * mode per speed may be set). ++ * A bitmask with the current FEC mode. + * @return + * - (0) if successful. + * - (-ENOTSUP) if underlying hardware OR driver doesn't support. +@@ -4200,10 +4197,13 @@ int rte_eth_fec_get(uint16_t port_id, uint32_t *fec_capa); + * @param port_id + * The port identifier of the Ethernet device. + * @param fec_capa +- * A bitmask of allowed FEC modes. If AUTO bit is set, other +- * bits specify FEC modes which may be negotiated. If AUTO +- * bit is clear, specify FEC modes to be used (only one valid +- * mode per speed may be set). ++ * A bitmask of allowed FEC modes. ++ * If only the AUTO bit is set, the decision on which FEC ++ * mode to use will be made by HW/FW or driver. ++ * If the AUTO bit is set with some FEC modes, only specified ++ * FEC modes can be set. ++ * If AUTO bit is clear, specify FEC mode to be used ++ * (only one valid mode per speed may be set). + * @return + * - (0) if successful. + * - (-EINVAL) if the FEC mode is not valid. +@@ -4354,6 +4354,9 @@ int rte_eth_dev_mac_addr_remove(uint16_t port_id, + + /** + * Set the default MAC address. ++ * It replaces the address at index 0 of the MAC address list. ++ * If the address was already in the MAC address list, ++ * please remove it first. + * + * @param port_id + * The port identifier of the Ethernet device. +@@ -4364,6 +4367,7 @@ int rte_eth_dev_mac_addr_remove(uint16_t port_id, + * - (-ENOTSUP) if hardware doesn't support. + * - (-ENODEV) if *port* invalid. + * - (-EINVAL) if MAC address is invalid. ++ * - (-EEXIST) if MAC address was already in the address list. + */ + int rte_eth_dev_default_mac_addr_set(uint16_t port_id, + struct rte_ether_addr *mac_addr); +diff --git a/dpdk/lib/ethdev/rte_flow.c b/dpdk/lib/ethdev/rte_flow.c +index 7d0c24366c..1a67a987f5 100644 +--- a/dpdk/lib/ethdev/rte_flow.c ++++ b/dpdk/lib/ethdev/rte_flow.c +@@ -855,7 +855,15 @@ rte_flow_conv_actions(struct rte_flow_action *dst, + src -= num; + dst -= num; + do { +- if (src->conf) { ++ if (src->type == RTE_FLOW_ACTION_TYPE_INDIRECT) { ++ /* ++ * Indirect action conf fills the indirect action ++ * handler. Copy the action handle directly instead ++ * of duplicating the pointer memory. ++ */ ++ if (size) ++ dst->conf = src->conf; ++ } else if (src->conf) { + off = RTE_ALIGN_CEIL(off, sizeof(double)); + ret = rte_flow_conv_action_conf + ((void *)(data + off), +diff --git a/dpdk/lib/eventdev/rte_event_crypto_adapter.c b/dpdk/lib/eventdev/rte_event_crypto_adapter.c +index 3c585d7b0d..4e1dbefb8e 100644 +--- a/dpdk/lib/eventdev/rte_event_crypto_adapter.c ++++ b/dpdk/lib/eventdev/rte_event_crypto_adapter.c +@@ -497,6 +497,9 @@ eca_enq_to_cryptodev(struct event_crypto_adapter *adapter, struct rte_event *ev, + cdev_id, + qp_id, + &nb_enqueued); ++ stats->crypto_enq_count += nb_enqueued; ++ n += nb_enqueued; ++ + /** + * If some crypto ops failed to flush to cdev and + * space for another batch is not available, stop +@@ -507,9 +510,6 @@ eca_enq_to_cryptodev(struct event_crypto_adapter *adapter, struct rte_event *ev, + &qp_info->cbuf))) + adapter->stop_enq_to_cryptodev = true; + } +- +- stats->crypto_enq_count += nb_enqueued; +- n += nb_enqueued; + } + + return n; +@@ -585,14 +585,15 @@ eca_crypto_adapter_enq_run(struct event_crypto_adapter *adapter, + if (adapter->mode == RTE_EVENT_CRYPTO_ADAPTER_OP_NEW) + return 0; + +- if (unlikely(adapter->stop_enq_to_cryptodev)) { +- nb_enqueued += eca_crypto_enq_flush(adapter); ++ for (nb_enq = 0; nb_enq < max_enq; nb_enq += n) { + +- if (unlikely(adapter->stop_enq_to_cryptodev)) +- goto skip_event_dequeue_burst; - } ++ if (unlikely(adapter->stop_enq_to_cryptodev)) { ++ nb_enqueued += eca_crypto_enq_flush(adapter); ++ ++ if (unlikely(adapter->stop_enq_to_cryptodev)) ++ break; ++ } + +- for (nb_enq = 0; nb_enq < max_enq; nb_enq += n) { + stats->event_poll_count++; + n = rte_event_dequeue_burst(event_dev_id, + event_port_id, ev, BATCH_SIZE, 0); +@@ -603,8 +604,6 @@ eca_crypto_adapter_enq_run(struct event_crypto_adapter *adapter, + nb_enqueued += eca_enq_to_cryptodev(adapter, ev, n); + } + +-skip_event_dequeue_burst: +- + if ((++adapter->transmit_loop_count & + (CRYPTO_ENQ_FLUSH_THRESHOLD - 1)) == 0) { + nb_enqueued += eca_crypto_enq_flush(adapter); +@@ -681,7 +680,7 @@ eca_circular_buffer_flush_to_evdev(struct event_crypto_adapter *adapter, + else + return 0; /* buffer empty */ + +- nb_ops_flushed = eca_ops_enqueue_burst(adapter, ops, n); ++ nb_ops_flushed = eca_ops_enqueue_burst(adapter, &ops[*headp], n); + bufp->count -= nb_ops_flushed; + if (!bufp->count) { + *headp = 0; +@@ -766,7 +765,7 @@ eca_crypto_adapter_deq_run(struct event_crypto_adapter *adapter, + for (i = nb_enqueued; i < n; i++) + eca_circular_buffer_add( + &adapter->ebuf, +- ops[nb_enqueued]); ++ ops[i]); + + check: + nb_deq += n; +diff --git a/dpdk/lib/eventdev/rte_event_crypto_adapter.h b/dpdk/lib/eventdev/rte_event_crypto_adapter.h +index 83d154a6ce..2a69290097 100644 +--- a/dpdk/lib/eventdev/rte_event_crypto_adapter.h ++++ b/dpdk/lib/eventdev/rte_event_crypto_adapter.h +@@ -595,6 +595,9 @@ int + rte_event_crypto_adapter_event_port_get(uint8_t id, uint8_t *event_port_id); + + /** ++ * @warning ++ * @b EXPERIMENTAL: this API may change without prior notice ++ * + * Retrieve vector limits for a given event dev and crypto dev pair. + * @see rte_event_crypto_adapter_vector_limits + * +@@ -610,6 +613,7 @@ rte_event_crypto_adapter_event_port_get(uint8_t id, uint8_t *event_port_id); + * - 0: Success. + * - <0: Error code on failure. + */ ++__rte_experimental + int rte_event_crypto_adapter_vector_limits_get( + uint8_t dev_id, uint16_t cdev_id, + struct rte_event_crypto_adapter_vector_limits *limits); +diff --git a/dpdk/lib/eventdev/rte_event_eth_rx_adapter.c b/dpdk/lib/eventdev/rte_event_eth_rx_adapter.c +index cf7bbd4d69..170823a03c 100644 +--- a/dpdk/lib/eventdev/rte_event_eth_rx_adapter.c ++++ b/dpdk/lib/eventdev/rte_event_eth_rx_adapter.c +@@ -3415,14 +3415,10 @@ rte_event_eth_rx_adapter_instance_get(uint16_t eth_dev_id, + if (!rte_event_eth_rx_adapter_caps_get(rx_adapter->eventdev_id, + eth_dev_id, + &caps)) { +- if (caps & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT) { +- ret = rxa_dev_instance_get(rx_adapter) ? +- rxa_dev_instance_get(rx_adapter) +- (eth_dev_id, +- rx_queue_id, +- rxa_inst_id) +- : -EINVAL; +- } ++ if (caps & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT && ++ rxa_dev_instance_get(rx_adapter)) ++ ret = rxa_dev_instance_get(rx_adapter)(eth_dev_id, rx_queue_id, ++ rxa_inst_id); + } + + /* return if entry found */ +diff --git a/dpdk/lib/eventdev/rte_event_eth_tx_adapter.c b/dpdk/lib/eventdev/rte_event_eth_tx_adapter.c +index 88309d2aaa..ba7a1c7f1b 100644 +--- a/dpdk/lib/eventdev/rte_event_eth_tx_adapter.c ++++ b/dpdk/lib/eventdev/rte_event_eth_tx_adapter.c +@@ -676,7 +676,7 @@ txa_service_func(void *args) + RTE_ETH_FOREACH_DEV(i) { + uint16_t q; + +- if (i == txa->dev_count) ++ if (i >= txa->dev_count) + break; + + dev = tdi[i].dev; +diff --git a/dpdk/lib/eventdev/rte_event_timer_adapter.c b/dpdk/lib/eventdev/rte_event_timer_adapter.c +index a0f14bf861..a13ddce627 100644 +--- a/dpdk/lib/eventdev/rte_event_timer_adapter.c ++++ b/dpdk/lib/eventdev/rte_event_timer_adapter.c +@@ -17,6 +17,7 @@ + #include + #include + #include ++#include + + #include "event_timer_adapter_pmd.h" + #include "eventdev_pmd.h" +@@ -699,13 +700,51 @@ swtim_callback(struct rte_timer *tim) + } + } + +-static __rte_always_inline uint64_t ++static __rte_always_inline int + get_timeout_cycles(struct rte_event_timer *evtim, +- const struct rte_event_timer_adapter *adapter) ++ const struct rte_event_timer_adapter *adapter, ++ uint64_t *timeout_cycles) + { +- struct swtim *sw = swtim_pmd_priv(adapter); +- uint64_t timeout_ns = evtim->timeout_ticks * sw->timer_tick_ns; +- return timeout_ns * rte_get_timer_hz() / NSECPERSEC; ++ static struct rte_reciprocal_u64 nsecpersec_inverse; ++ static uint64_t timer_hz; ++ uint64_t rem_cycles, secs_cycles = 0; ++ uint64_t secs, timeout_nsecs; ++ uint64_t nsecpersec; ++ struct swtim *sw; ++ ++ sw = swtim_pmd_priv(adapter); ++ nsecpersec = (uint64_t)NSECPERSEC; ++ ++ timeout_nsecs = evtim->timeout_ticks * sw->timer_tick_ns; ++ if (timeout_nsecs > sw->max_tmo_ns) ++ return -1; ++ if (timeout_nsecs < sw->timer_tick_ns) ++ return -2; ++ ++ /* Set these values in the first invocation */ ++ if (!timer_hz) { ++ timer_hz = rte_get_timer_hz(); ++ nsecpersec_inverse = rte_reciprocal_value_u64(nsecpersec); ++ } ++ ++ /* If timeout_nsecs > nsecpersec, decrease timeout_nsecs by the number ++ * of whole seconds it contains and convert that value to a number ++ * of cycles. This keeps timeout_nsecs in the interval [0..nsecpersec) ++ * in order to avoid overflow when we later multiply by timer_hz. ++ */ ++ if (timeout_nsecs > nsecpersec) { ++ secs = rte_reciprocal_divide_u64(timeout_nsecs, ++ &nsecpersec_inverse); ++ secs_cycles = secs * timer_hz; ++ timeout_nsecs -= secs * nsecpersec; ++ } ++ ++ rem_cycles = rte_reciprocal_divide_u64(timeout_nsecs * timer_hz, ++ &nsecpersec_inverse); ++ ++ *timeout_cycles = secs_cycles + rem_cycles; ++ ++ return 0; + } + + /* This function returns true if one or more (adapter) ticks have occurred since +@@ -739,23 +778,6 @@ swtim_did_tick(struct swtim *sw) + return false; + } + +-/* Check that event timer timeout value is in range */ +-static __rte_always_inline int +-check_timeout(struct rte_event_timer *evtim, +- const struct rte_event_timer_adapter *adapter) +-{ +- uint64_t tmo_nsec; +- struct swtim *sw = swtim_pmd_priv(adapter); +- +- tmo_nsec = evtim->timeout_ticks * sw->timer_tick_ns; +- if (tmo_nsec > sw->max_tmo_ns) +- return -1; +- if (tmo_nsec < sw->timer_tick_ns) +- return -2; +- +- return 0; -} - - RTE_LOG_REGISTER_SUFFIX(i40e_logtype_init, init, NOTICE); - RTE_LOG_REGISTER_SUFFIX(i40e_logtype_driver, driver, NOTICE); - #ifdef RTE_ETHDEV_DEBUG_RX + /* Check that event timer event queue sched type matches destination event queue + * sched type + */ +@@ -798,17 +820,18 @@ swtim_service_func(void *arg) + sw->n_expired_timers); + sw->n_expired_timers = 0; + +- event_buffer_flush(&sw->buffer, +- adapter->data->event_dev_id, +- adapter->data->event_port_id, +- &nb_evs_flushed, +- &nb_evs_invalid); +- +- sw->stats.ev_enq_count += nb_evs_flushed; +- sw->stats.ev_inv_count += nb_evs_invalid; + sw->stats.adapter_tick_count++; + } + ++ event_buffer_flush(&sw->buffer, ++ adapter->data->event_dev_id, ++ adapter->data->event_port_id, ++ &nb_evs_flushed, ++ &nb_evs_invalid); ++ ++ sw->stats.ev_enq_count += nb_evs_flushed; ++ sw->stats.ev_inv_count += nb_evs_invalid; ++ + rte_event_maintain(adapter->data->event_dev_id, + adapter->data->event_port_id, 0); + +@@ -1140,21 +1163,6 @@ __swtim_arm_burst(const struct rte_event_timer_adapter *adapter, + break; + } + +- ret = check_timeout(evtims[i], adapter); +- if (unlikely(ret == -1)) { +- __atomic_store_n(&evtims[i]->state, +- RTE_EVENT_TIMER_ERROR_TOOLATE, +- __ATOMIC_RELAXED); +- rte_errno = EINVAL; +- break; +- } else if (unlikely(ret == -2)) { +- __atomic_store_n(&evtims[i]->state, +- RTE_EVENT_TIMER_ERROR_TOOEARLY, +- __ATOMIC_RELAXED); +- rte_errno = EINVAL; +- break; +- } +- + if (unlikely(check_destination_event_queue(evtims[i], + adapter) < 0)) { + __atomic_store_n(&evtims[i]->state, +@@ -1170,7 +1178,21 @@ __swtim_arm_burst(const struct rte_event_timer_adapter *adapter, + evtims[i]->impl_opaque[0] = (uintptr_t)tim; + evtims[i]->impl_opaque[1] = (uintptr_t)adapter; + +- cycles = get_timeout_cycles(evtims[i], adapter); ++ ret = get_timeout_cycles(evtims[i], adapter, &cycles); ++ if (unlikely(ret == -1)) { ++ __atomic_store_n(&evtims[i]->state, ++ RTE_EVENT_TIMER_ERROR_TOOLATE, ++ __ATOMIC_RELAXED); ++ rte_errno = EINVAL; ++ break; ++ } else if (unlikely(ret == -2)) { ++ __atomic_store_n(&evtims[i]->state, ++ RTE_EVENT_TIMER_ERROR_TOOEARLY, ++ __ATOMIC_RELAXED); ++ rte_errno = EINVAL; ++ break; ++ } ++ + ret = rte_timer_alt_reset(sw->timer_data_id, tim, cycles, + type, lcore_id, NULL, evtims[i]); + if (ret < 0) { +diff --git a/dpdk/lib/eventdev/rte_eventdev.c b/dpdk/lib/eventdev/rte_eventdev.c +index b0414206d9..78336faa6a 100644 +--- a/dpdk/lib/eventdev/rte_eventdev.c ++++ b/dpdk/lib/eventdev/rte_eventdev.c +@@ -1678,7 +1678,7 @@ eventdev_build_telemetry_data(int dev_id, + if (xstat_names == NULL) + return -1; + +- ids = malloc((sizeof(unsigned int)) * num_xstats); ++ ids = malloc((sizeof(uint64_t)) * num_xstats); + if (ids == NULL) { + free(xstat_names); + return -1; +diff --git a/dpdk/lib/eventdev/version.map b/dpdk/lib/eventdev/version.map +index dd63ec6f68..c155af6d50 100644 +--- a/dpdk/lib/eventdev/version.map ++++ b/dpdk/lib/eventdev/version.map +@@ -110,6 +110,7 @@ EXPERIMENTAL { + rte_event_eth_rx_adapter_event_port_get; + + # added in 22.07 ++ rte_event_crypto_adapter_vector_limits_get; + rte_event_port_quiesce; + rte_event_queue_attr_set; + +diff --git a/dpdk/lib/fib/dir24_8.c b/dpdk/lib/fib/dir24_8.c +index a8ba4f64ca..3efdcb533c 100644 +--- a/dpdk/lib/fib/dir24_8.c ++++ b/dpdk/lib/fib/dir24_8.c +@@ -390,7 +390,7 @@ modify_fib(struct dir24_8_tbl *dp, struct rte_rib *rib, uint32_t ip, + (uint32_t)(1ULL << (32 - tmp_depth)); + } else { + redge = ip + (uint32_t)(1ULL << (32 - depth)); +- if (ledge == redge) ++ if (ledge == redge && ledge != 0) + break; + ret = install_to_fib(dp, ledge, redge, + next_hop); +diff --git a/dpdk/lib/gpudev/gpudev.c b/dpdk/lib/gpudev/gpudev.c +index 805719d00c..8f12abef23 100644 +--- a/dpdk/lib/gpudev/gpudev.c ++++ b/dpdk/lib/gpudev/gpudev.c +@@ -408,6 +408,7 @@ rte_gpu_callback_register(int16_t dev_id, enum rte_gpu_event event, + callback->function == function && + callback->user_data == user_data) { + GPU_LOG(INFO, "callback already registered"); ++ rte_rwlock_write_unlock(&gpu_callback_lock); + return 0; + } + } +@@ -415,7 +416,9 @@ rte_gpu_callback_register(int16_t dev_id, enum rte_gpu_event event, + callback = malloc(sizeof(*callback)); + if (callback == NULL) { + GPU_LOG(ERR, "cannot allocate callback"); +- return -ENOMEM; ++ rte_rwlock_write_unlock(&gpu_callback_lock); ++ rte_errno = ENOMEM; ++ return -rte_errno; + } + callback->function = function; + callback->user_data = user_data; +diff --git a/dpdk/lib/gpudev/gpudev_driver.h b/dpdk/lib/gpudev/gpudev_driver.h +index d5e2c8e1ef..42898c7c8b 100644 +--- a/dpdk/lib/gpudev/gpudev_driver.h ++++ b/dpdk/lib/gpudev/gpudev_driver.h +@@ -19,6 +19,10 @@ + #include + #include "rte_gpudev.h" + ++#ifdef __cplusplus ++extern "C" { ++#endif ++ + /* Flags indicate current state of device. */ + enum rte_gpu_state { + RTE_GPU_STATE_UNUSED, /* not initialized */ +@@ -106,4 +110,8 @@ int rte_gpu_release(struct rte_gpu *dev); + __rte_internal + void rte_gpu_notify(struct rte_gpu *dev, enum rte_gpu_event); + ++#ifdef __cplusplus ++} ++#endif ++ + #endif /* RTE_GPUDEV_DRIVER_H */ +diff --git a/dpdk/lib/gpudev/meson.build b/dpdk/lib/gpudev/meson.build +index 89a118f357..d21fadc052 100644 +--- a/dpdk/lib/gpudev/meson.build ++++ b/dpdk/lib/gpudev/meson.build +@@ -5,6 +5,10 @@ headers = files( + 'rte_gpudev.h', + ) + ++driver_sdk_headers = files( ++ 'gpudev_driver.h', ++) ++ + sources = files( + 'gpudev.c', + ) +diff --git a/dpdk/lib/graph/node.c b/dpdk/lib/graph/node.c +index fc6345de07..149414dcd9 100644 +--- a/dpdk/lib/graph/node.c ++++ b/dpdk/lib/graph/node.c +@@ -300,16 +300,16 @@ rte_node_edge_shrink(rte_node_t id, rte_edge_t size) + if (node->id == id) { + if (node->nb_edges < size) { + rte_errno = E2BIG; +- goto fail; ++ } else { ++ node->nb_edges = size; ++ rc = size; + } +- node->nb_edges = size; +- rc = size; + break; + } + } + +-fail: + graph_spinlock_unlock(); ++fail: + return rc; + } + +diff --git a/dpdk/lib/hash/rte_thash.c b/dpdk/lib/hash/rte_thash.c +index 0249883b8d..2228af576b 100644 +--- a/dpdk/lib/hash/rte_thash.c ++++ b/dpdk/lib/hash/rte_thash.c +@@ -670,7 +670,7 @@ rte_thash_get_gfni_matrices(struct rte_thash_ctx *ctx) + } + + static inline uint8_t +-read_unaligned_byte(uint8_t *ptr, unsigned int len, unsigned int offset) ++read_unaligned_byte(uint8_t *ptr, unsigned int offset) + { + uint8_t ret = 0; + +@@ -681,13 +681,14 @@ read_unaligned_byte(uint8_t *ptr, unsigned int len, unsigned int offset) + (CHAR_BIT - (offset % CHAR_BIT)); + } + +- return ret >> (CHAR_BIT - len); ++ return ret; + } + + static inline uint32_t + read_unaligned_bits(uint8_t *ptr, int len, int offset) + { + uint32_t ret = 0; ++ int shift; + + len = RTE_MAX(len, 0); + len = RTE_MIN(len, (int)(sizeof(uint32_t) * CHAR_BIT)); +@@ -695,13 +696,14 @@ read_unaligned_bits(uint8_t *ptr, int len, int offset) + while (len > 0) { + ret <<= CHAR_BIT; + +- ret |= read_unaligned_byte(ptr, RTE_MIN(len, CHAR_BIT), +- offset); ++ ret |= read_unaligned_byte(ptr, offset); + offset += CHAR_BIT; + len -= CHAR_BIT; + } + +- return ret; ++ shift = (len == 0) ? 0 : ++ (CHAR_BIT - ((len + CHAR_BIT) % CHAR_BIT)); ++ return ret >> shift; + } + + /* returns mask for len bits with given offset inside byte */ +diff --git a/dpdk/lib/hash/rte_thash_x86_gfni.h b/dpdk/lib/hash/rte_thash_x86_gfni.h +index 880739b710..7bb76ac1bb 100644 +--- a/dpdk/lib/hash/rte_thash_x86_gfni.h ++++ b/dpdk/lib/hash/rte_thash_x86_gfni.h +@@ -88,8 +88,10 @@ __rte_thash_gfni(const uint64_t *mtrx, const uint8_t *tuple, + const __m512i shift_8 = _mm512_set1_epi8(8); + __m512i xor_acc = _mm512_setzero_si512(); + __m512i perm_bytes = _mm512_setzero_si512(); +- __m512i vals, matrixes, tuple_bytes, tuple_bytes_2; +- __mmask64 load_mask, permute_mask, permute_mask_2; ++ __m512i vals, matrixes, tuple_bytes_2; ++ __m512i tuple_bytes = _mm512_setzero_si512(); ++ __mmask64 load_mask, permute_mask_2; ++ __mmask64 permute_mask = 0; + int chunk_len = 0, i = 0; + uint8_t mtrx_msk; + const int prepend = 3; +diff --git a/dpdk/lib/ipsec/esp_outb.c b/dpdk/lib/ipsec/esp_outb.c +index 9cbd9202f6..ec87b1dce2 100644 +--- a/dpdk/lib/ipsec/esp_outb.c ++++ b/dpdk/lib/ipsec/esp_outb.c +@@ -198,7 +198,7 @@ outb_tun_pkt_prepare(struct rte_ipsec_sa *sa, rte_be64_t sqc, + struct rte_udp_hdr *udph = (struct rte_udp_hdr *) + (ph + sa->hdr_len - sizeof(struct rte_udp_hdr)); + udph->dgram_len = rte_cpu_to_be_16(mb->pkt_len - sqh_len - +- sa->hdr_l3_off - sa->hdr_len); ++ sa->hdr_len + sizeof(struct rte_udp_hdr)); + } + + /* update original and new ip header fields */ +diff --git a/dpdk/lib/ipsec/sa.c b/dpdk/lib/ipsec/sa.c +index 59a547637d..2297bd6d72 100644 +--- a/dpdk/lib/ipsec/sa.c ++++ b/dpdk/lib/ipsec/sa.c +@@ -371,7 +371,7 @@ esp_outb_tun_init(struct rte_ipsec_sa *sa, const struct rte_ipsec_sa_prm *prm) + + /* update l2_len and l3_len fields for outbound mbuf */ + sa->tx_offload.val = rte_mbuf_tx_offload(sa->hdr_l3_off, +- sa->hdr_len - sa->hdr_l3_off, 0, 0, 0, 0, 0); ++ prm->tun.hdr_len - sa->hdr_l3_off, 0, 0, 0, 0, 0); + + esp_outb_init(sa, sa->hdr_len, prm->ipsec_xform.esn.value); + } +diff --git a/dpdk/lib/kni/rte_kni.c b/dpdk/lib/kni/rte_kni.c +index 8ab6c47153..bfa6a001ff 100644 +--- a/dpdk/lib/kni/rte_kni.c ++++ b/dpdk/lib/kni/rte_kni.c +@@ -634,8 +634,8 @@ rte_kni_rx_burst(struct rte_kni *kni, struct rte_mbuf **mbufs, unsigned int num) + { + unsigned int ret = kni_fifo_get(kni->tx_q, (void **)mbufs, num); + +- /* If buffers removed, allocate mbufs and then put them into alloc_q */ +- if (ret) ++ /* If buffers removed or alloc_q is empty, allocate mbufs and then put them into alloc_q */ ++ if (ret || (kni_fifo_count(kni->alloc_q) == 0)) + kni_allocate_mbufs(kni); + + return ret; +diff --git a/dpdk/lib/kvargs/rte_kvargs.h b/dpdk/lib/kvargs/rte_kvargs.h +index 359a9f5b09..4900b750bc 100644 +--- a/dpdk/lib/kvargs/rte_kvargs.h ++++ b/dpdk/lib/kvargs/rte_kvargs.h +@@ -36,7 +36,19 @@ extern "C" { + /** separator character used between key and value */ + #define RTE_KVARGS_KV_DELIM "=" + +-/** Type of callback function used by rte_kvargs_process() */ ++/** ++ * Callback prototype used by rte_kvargs_process(). ++ * ++ * @param key ++ * The key to consider, it will not be NULL. ++ * @param value ++ * The value corresponding to the key, it may be NULL (e.g. only with key) ++ * @param opaque ++ * An opaque pointer coming from the caller. ++ * @return ++ * - >=0 handle key success. ++ * - <0 on error. ++ */ + typedef int (*arg_handler_t)(const char *key, const char *value, void *opaque); + + /** A key/value association */ +diff --git a/dpdk/lib/mbuf/rte_mbuf_core.h b/dpdk/lib/mbuf/rte_mbuf_core.h +index a30e1e0eaf..3ab7be49fa 100644 +--- a/dpdk/lib/mbuf/rte_mbuf_core.h ++++ b/dpdk/lib/mbuf/rte_mbuf_core.h +@@ -584,8 +584,8 @@ struct rte_mbuf { + * @see rte_event_eth_tx_adapter_txq_set() + */ + } txadapter; /**< Eventdev ethdev Tx adapter */ +- /**< User defined tags. See rte_distributor_process() */ + uint32_t usr; ++ /**< User defined tags. See rte_distributor_process() */ + } hash; /**< hash information */ + }; + +diff --git a/dpdk/lib/member/rte_member.h b/dpdk/lib/member/rte_member.h +index 072a253c89..d08b143e51 100644 +--- a/dpdk/lib/member/rte_member.h ++++ b/dpdk/lib/member/rte_member.h +@@ -314,6 +314,7 @@ struct rte_member_parameters { + * for bucket location. + * For vBF type, these two hashes and their combinations are used as + * hash locations to index the bit array. ++ * For Sketch type, these seeds are not used. + */ + uint32_t prim_hash_seed; + +diff --git a/dpdk/lib/member/rte_member_sketch.c b/dpdk/lib/member/rte_member_sketch.c +index 524ba77620..d5f35aabe9 100644 +--- a/dpdk/lib/member/rte_member_sketch.c ++++ b/dpdk/lib/member/rte_member_sketch.c +@@ -227,7 +227,6 @@ rte_member_create_sketch(struct rte_member_setsum *ss, + goto error_runtime; + } + +- rte_srand(ss->prim_hash_seed); + for (i = 0; i < ss->num_row; i++) + ss->hash_seeds[i] = rte_rand(); + +diff --git a/dpdk/lib/mempool/rte_mempool.c b/dpdk/lib/mempool/rte_mempool.c +index f33f455790..950d01ffac 100644 +--- a/dpdk/lib/mempool/rte_mempool.c ++++ b/dpdk/lib/mempool/rte_mempool.c +@@ -1500,27 +1500,27 @@ mempool_info_cb(struct rte_mempool *mp, void *arg) + return; + + rte_tel_data_add_dict_string(info->d, "name", mp->name); +- rte_tel_data_add_dict_int(info->d, "pool_id", mp->pool_id); +- rte_tel_data_add_dict_int(info->d, "flags", mp->flags); ++ rte_tel_data_add_dict_u64(info->d, "pool_id", mp->pool_id); ++ rte_tel_data_add_dict_u64(info->d, "flags", mp->flags); + rte_tel_data_add_dict_int(info->d, "socket_id", mp->socket_id); +- rte_tel_data_add_dict_int(info->d, "size", mp->size); +- rte_tel_data_add_dict_int(info->d, "cache_size", mp->cache_size); +- rte_tel_data_add_dict_int(info->d, "elt_size", mp->elt_size); +- rte_tel_data_add_dict_int(info->d, "header_size", mp->header_size); +- rte_tel_data_add_dict_int(info->d, "trailer_size", mp->trailer_size); +- rte_tel_data_add_dict_int(info->d, "private_data_size", ++ rte_tel_data_add_dict_u64(info->d, "size", mp->size); ++ rte_tel_data_add_dict_u64(info->d, "cache_size", mp->cache_size); ++ rte_tel_data_add_dict_u64(info->d, "elt_size", mp->elt_size); ++ rte_tel_data_add_dict_u64(info->d, "header_size", mp->header_size); ++ rte_tel_data_add_dict_u64(info->d, "trailer_size", mp->trailer_size); ++ rte_tel_data_add_dict_u64(info->d, "private_data_size", + mp->private_data_size); + rte_tel_data_add_dict_int(info->d, "ops_index", mp->ops_index); +- rte_tel_data_add_dict_int(info->d, "populated_size", ++ rte_tel_data_add_dict_u64(info->d, "populated_size", + mp->populated_size); + + mz = mp->mz; + rte_tel_data_add_dict_string(info->d, "mz_name", mz->name); +- rte_tel_data_add_dict_int(info->d, "mz_len", mz->len); +- rte_tel_data_add_dict_int(info->d, "mz_hugepage_sz", ++ rte_tel_data_add_dict_u64(info->d, "mz_len", mz->len); ++ rte_tel_data_add_dict_u64(info->d, "mz_hugepage_sz", + mz->hugepage_sz); + rte_tel_data_add_dict_int(info->d, "mz_socket_id", mz->socket_id); +- rte_tel_data_add_dict_int(info->d, "mz_flags", mz->flags); ++ rte_tel_data_add_dict_u64(info->d, "mz_flags", mz->flags); + } + + static int +diff --git a/dpdk/lib/net/rte_ip.h b/dpdk/lib/net/rte_ip.h +index 9c8e8206f0..0cafb980ef 100644 +--- a/dpdk/lib/net/rte_ip.h ++++ b/dpdk/lib/net/rte_ip.h +@@ -514,7 +514,7 @@ rte_ipv4_udptcp_cksum_verify(const struct rte_ipv4_hdr *ipv4_hdr, + * Return 0 if the checksum is correct, else -1. + */ + __rte_experimental +-static inline uint16_t ++static inline int + rte_ipv4_udptcp_cksum_mbuf_verify(const struct rte_mbuf *m, + const struct rte_ipv4_hdr *ipv4_hdr, + uint16_t l4_off) +diff --git a/dpdk/lib/pci/rte_pci.h b/dpdk/lib/pci/rte_pci.h +index 5088157e74..aab761b918 100644 +--- a/dpdk/lib/pci/rte_pci.h ++++ b/dpdk/lib/pci/rte_pci.h +@@ -104,8 +104,7 @@ struct rte_pci_addr { + + /** + * Utility function to write a pci device name, this device name can later be +- * used to retrieve the corresponding rte_pci_addr using eal_parse_pci_* +- * BDF helpers. ++ * used to retrieve the corresponding rte_pci_addr using rte_pci_addr_parse(). + * + * @param addr + * The PCI Bus-Device-Function address +diff --git a/dpdk/lib/pdump/rte_pdump.c b/dpdk/lib/pdump/rte_pdump.c +index a81544cb57..4b7a4b3483 100644 +--- a/dpdk/lib/pdump/rte_pdump.c ++++ b/dpdk/lib/pdump/rte_pdump.c +@@ -134,7 +134,7 @@ pdump_copy(uint16_t port_id, uint16_t queue, + + __atomic_fetch_add(&stats->accepted, d_pkts, __ATOMIC_RELAXED); + +- ring_enq = rte_ring_enqueue_burst(ring, (void *)dup_bufs, d_pkts, NULL); ++ ring_enq = rte_ring_enqueue_burst(ring, (void *)&dup_bufs[0], d_pkts, NULL); + if (unlikely(ring_enq < d_pkts)) { + unsigned int drops = d_pkts - ring_enq; + +diff --git a/dpdk/lib/pipeline/rte_swx_pipeline.c b/dpdk/lib/pipeline/rte_swx_pipeline.c +index 0e631dea2b..084c614639 100644 +--- a/dpdk/lib/pipeline/rte_swx_pipeline.c ++++ b/dpdk/lib/pipeline/rte_swx_pipeline.c +@@ -8450,6 +8450,7 @@ table_build_free(struct rte_swx_pipeline *p) + free(p->table_stats[i].n_pkts_action); + + free(p->table_stats); ++ p->table_stats = NULL; + } + } + +@@ -9364,6 +9365,7 @@ learner_build_free(struct rte_swx_pipeline *p) + free(p->learner_stats[i].n_pkts_action); + + free(p->learner_stats); ++ p->learner_stats = NULL; + } + } + +diff --git a/dpdk/lib/reorder/rte_reorder.c b/dpdk/lib/reorder/rte_reorder.c +index 385ee479da..bc85b83b14 100644 +--- a/dpdk/lib/reorder/rte_reorder.c ++++ b/dpdk/lib/reorder/rte_reorder.c +@@ -60,6 +60,11 @@ rte_reorder_init(struct rte_reorder_buffer *b, unsigned int bufsize, + { + const unsigned int min_bufsize = sizeof(*b) + + (2 * size * sizeof(struct rte_mbuf *)); ++ static const struct rte_mbuf_dynfield reorder_seqn_dynfield_desc = { ++ .name = RTE_REORDER_SEQN_DYNFIELD_NAME, ++ .size = sizeof(rte_reorder_seqn_t), ++ .align = __alignof__(rte_reorder_seqn_t), ++ }; + + if (b == NULL) { + RTE_LOG(ERR, REORDER, "Invalid reorder buffer parameter:" +@@ -86,6 +91,15 @@ rte_reorder_init(struct rte_reorder_buffer *b, unsigned int bufsize, + return NULL; + } + ++ rte_reorder_seqn_dynfield_offset = rte_mbuf_dynfield_register(&reorder_seqn_dynfield_desc); ++ if (rte_reorder_seqn_dynfield_offset < 0) { ++ RTE_LOG(ERR, REORDER, ++ "Failed to register mbuf field for reorder sequence number, rte_errno: %i\n", ++ rte_errno); ++ rte_errno = ENOMEM; ++ return NULL; ++ } ++ + memset(b, 0, bufsize); + strlcpy(b->name, name, sizeof(b->name)); + b->memsize = bufsize; +@@ -98,21 +112,45 @@ rte_reorder_init(struct rte_reorder_buffer *b, unsigned int bufsize, + return b; + } + ++/* ++ * Insert new entry into global list. ++ * Returns pointer to already inserted entry if such exists, or to newly inserted one. ++ */ ++static struct rte_tailq_entry * ++rte_reorder_entry_insert(struct rte_tailq_entry *new_te) ++{ ++ struct rte_reorder_list *reorder_list; ++ struct rte_reorder_buffer *b, *nb; ++ struct rte_tailq_entry *te; ++ ++ rte_mcfg_tailq_write_lock(); ++ ++ reorder_list = RTE_TAILQ_CAST(rte_reorder_tailq.head, rte_reorder_list); ++ /* guarantee there's no existing */ ++ TAILQ_FOREACH(te, reorder_list, next) { ++ b = (struct rte_reorder_buffer *) te->data; ++ nb = (struct rte_reorder_buffer *) new_te->data; ++ if (strncmp(nb->name, b->name, RTE_REORDER_NAMESIZE) == 0) ++ break; ++ } ++ ++ if (te == NULL) { ++ TAILQ_INSERT_TAIL(reorder_list, new_te, next); ++ te = new_te; ++ } ++ ++ rte_mcfg_tailq_write_unlock(); ++ ++ return te; ++} ++ + struct rte_reorder_buffer* + rte_reorder_create(const char *name, unsigned socket_id, unsigned int size) + { + struct rte_reorder_buffer *b = NULL; +- struct rte_tailq_entry *te; +- struct rte_reorder_list *reorder_list; ++ struct rte_tailq_entry *te, *te_inserted; + const unsigned int bufsize = sizeof(struct rte_reorder_buffer) + + (2 * size * sizeof(struct rte_mbuf *)); +- static const struct rte_mbuf_dynfield reorder_seqn_dynfield_desc = { +- .name = RTE_REORDER_SEQN_DYNFIELD_NAME, +- .size = sizeof(rte_reorder_seqn_t), +- .align = __alignof__(rte_reorder_seqn_t), +- }; +- +- reorder_list = RTE_TAILQ_CAST(rte_reorder_tailq.head, rte_reorder_list); + + /* Check user arguments. */ + if (!rte_is_power_of_2(size)) { +@@ -128,32 +166,12 @@ rte_reorder_create(const char *name, unsigned socket_id, unsigned int size) + return NULL; + } + +- rte_reorder_seqn_dynfield_offset = +- rte_mbuf_dynfield_register(&reorder_seqn_dynfield_desc); +- if (rte_reorder_seqn_dynfield_offset < 0) { +- RTE_LOG(ERR, REORDER, "Failed to register mbuf field for reorder sequence number\n"); +- rte_errno = ENOMEM; +- return NULL; +- } +- +- rte_mcfg_tailq_write_lock(); +- +- /* guarantee there's no existing */ +- TAILQ_FOREACH(te, reorder_list, next) { +- b = (struct rte_reorder_buffer *) te->data; +- if (strncmp(name, b->name, RTE_REORDER_NAMESIZE) == 0) +- break; +- } +- if (te != NULL) +- goto exit; +- + /* allocate tailq entry */ + te = rte_zmalloc("REORDER_TAILQ_ENTRY", sizeof(*te), 0); + if (te == NULL) { + RTE_LOG(ERR, REORDER, "Failed to allocate tailq entry\n"); + rte_errno = ENOMEM; +- b = NULL; +- goto exit; ++ return NULL; + } + + /* Allocate memory to store the reorder buffer structure. */ +@@ -162,14 +180,23 @@ rte_reorder_create(const char *name, unsigned socket_id, unsigned int size) + RTE_LOG(ERR, REORDER, "Memzone allocation failed\n"); + rte_errno = ENOMEM; + rte_free(te); ++ return NULL; + } else { +- rte_reorder_init(b, bufsize, name, size); ++ if (rte_reorder_init(b, bufsize, name, size) == NULL) { ++ rte_free(b); ++ rte_free(te); ++ return NULL; ++ } + te->data = (void *)b; +- TAILQ_INSERT_TAIL(reorder_list, te, next); + } + +-exit: +- rte_mcfg_tailq_write_unlock(); ++ te_inserted = rte_reorder_entry_insert(te); ++ if (te_inserted != te) { ++ rte_free(b); ++ rte_free(te); ++ return te_inserted->data; ++ } ++ + return b; + } + +@@ -389,6 +416,7 @@ rte_reorder_drain(struct rte_reorder_buffer *b, struct rte_mbuf **mbufs, + /* Try to fetch requested number of mbufs from ready buffer */ + while ((drain_cnt < max_mbufs) && (ready_buf->tail != ready_buf->head)) { + mbufs[drain_cnt++] = ready_buf->entries[ready_buf->tail]; ++ ready_buf->entries[ready_buf->tail] = NULL; + ready_buf->tail = (ready_buf->tail + 1) & ready_buf->mask; + } + +diff --git a/dpdk/lib/reorder/rte_reorder.h b/dpdk/lib/reorder/rte_reorder.h +index 5abdb258e2..79c758f450 100644 +--- a/dpdk/lib/reorder/rte_reorder.h ++++ b/dpdk/lib/reorder/rte_reorder.h +@@ -82,6 +82,7 @@ rte_reorder_create(const char *name, unsigned socket_id, unsigned int size); + * The initialized reorder buffer instance, or NULL on error + * On error case, rte_errno will be set appropriately: + * - EINVAL - invalid parameters ++ * - ENOMEM - not enough memory to register dynamic field + */ + struct rte_reorder_buffer * + rte_reorder_init(struct rte_reorder_buffer *b, unsigned int bufsize, +diff --git a/dpdk/lib/ring/rte_ring.c b/dpdk/lib/ring/rte_ring.c +index cddaf6b287..d068ca4d37 100644 +--- a/dpdk/lib/ring/rte_ring.c ++++ b/dpdk/lib/ring/rte_ring.c +@@ -332,11 +332,6 @@ rte_ring_free(struct rte_ring *r) + return; + } + +- if (rte_memzone_free(r->memzone) != 0) { +- RTE_LOG(ERR, RING, "Cannot free memory\n"); +- return; +- } +- + ring_list = RTE_TAILQ_CAST(rte_ring_tailq.head, rte_ring_list); + rte_mcfg_tailq_write_lock(); + +@@ -355,6 +350,9 @@ rte_ring_free(struct rte_ring *r) + + rte_mcfg_tailq_write_unlock(); + ++ if (rte_memzone_free(r->memzone) != 0) ++ RTE_LOG(ERR, RING, "Cannot free memory\n"); ++ + rte_free(te); + } + +diff --git a/dpdk/lib/ring/rte_ring_elem_pvt.h b/dpdk/lib/ring/rte_ring_elem_pvt.h +index 83788c56e6..4b80f58980 100644 +--- a/dpdk/lib/ring/rte_ring_elem_pvt.h ++++ b/dpdk/lib/ring/rte_ring_elem_pvt.h +@@ -10,6 +10,12 @@ + #ifndef _RTE_RING_ELEM_PVT_H_ + #define _RTE_RING_ELEM_PVT_H_ + ++#if defined(RTE_TOOLCHAIN_GCC) && (GCC_VERSION >= 120000) ++#pragma GCC diagnostic push ++#pragma GCC diagnostic ignored "-Wstringop-overflow" ++#pragma GCC diagnostic ignored "-Wstringop-overread" ++#endif ++ + static __rte_always_inline void + __rte_ring_enqueue_elems_32(struct rte_ring *r, const uint32_t size, + uint32_t idx, const void *obj_table, uint32_t n) +@@ -188,12 +194,12 @@ __rte_ring_dequeue_elems_32(struct rte_ring *r, const uint32_t size, + } + + static __rte_always_inline void +-__rte_ring_dequeue_elems_64(struct rte_ring *r, uint32_t prod_head, ++__rte_ring_dequeue_elems_64(struct rte_ring *r, uint32_t cons_head, + void *obj_table, uint32_t n) + { + unsigned int i; + const uint32_t size = r->size; +- uint32_t idx = prod_head & r->mask; ++ uint32_t idx = cons_head & r->mask; + uint64_t *ring = (uint64_t *)&r[1]; + unaligned_uint64_t *obj = (unaligned_uint64_t *)obj_table; + if (likely(idx + n <= size)) { +@@ -221,12 +227,12 @@ __rte_ring_dequeue_elems_64(struct rte_ring *r, uint32_t prod_head, + } + + static __rte_always_inline void +-__rte_ring_dequeue_elems_128(struct rte_ring *r, uint32_t prod_head, ++__rte_ring_dequeue_elems_128(struct rte_ring *r, uint32_t cons_head, + void *obj_table, uint32_t n) + { + unsigned int i; + const uint32_t size = r->size; +- uint32_t idx = prod_head & r->mask; ++ uint32_t idx = cons_head & r->mask; + rte_int128_t *ring = (rte_int128_t *)&r[1]; + rte_int128_t *obj = (rte_int128_t *)obj_table; + if (likely(idx + n <= size)) { +@@ -382,4 +388,8 @@ end: + return n; + } + ++#if defined(RTE_TOOLCHAIN_GCC) && (GCC_VERSION >= 120000) ++#pragma GCC diagnostic pop ++#endif ++ + #endif /* _RTE_RING_ELEM_PVT_H_ */ +diff --git a/dpdk/lib/sched/rte_sched.c b/dpdk/lib/sched/rte_sched.c +index c91697131d..19768d8c38 100644 +--- a/dpdk/lib/sched/rte_sched.c ++++ b/dpdk/lib/sched/rte_sched.c +@@ -202,6 +202,9 @@ struct rte_sched_subport { + uint32_t qsize_add[RTE_SCHED_QUEUES_PER_PIPE]; + uint32_t qsize_sum; + ++ /* TC oversubscription activation */ ++ int tc_ov_enabled; ++ + struct rte_sched_pipe *pipe; + struct rte_sched_queue *queue; + struct rte_sched_queue_extra *queue_extra; +@@ -209,9 +212,6 @@ struct rte_sched_subport { + uint8_t *bmp_array; + struct rte_mbuf **queue_array; + uint8_t memory[0] __rte_cache_aligned; +- +- /* TC oversubscription activation */ +- int tc_ov_enabled; + } __rte_cache_aligned; + + struct rte_sched_port { +diff --git a/dpdk/lib/table/rte_swx_table_selector.c b/dpdk/lib/table/rte_swx_table_selector.c +index ad99f18453..18e021fe6f 100644 +--- a/dpdk/lib/table/rte_swx_table_selector.c ++++ b/dpdk/lib/table/rte_swx_table_selector.c +@@ -232,7 +232,7 @@ table_params_copy(struct table *t, struct rte_swx_table_selector_params *params) + t->params.n_members_per_group_max = rte_align32pow2(params->n_members_per_group_max); + + for (i = 0; i < 32; i++) +- if (params->n_members_per_group_max == 1U << i) ++ if (t->params.n_members_per_group_max == 1U << i) + t->n_members_per_group_max_log2 = i; + + /* t->params.selector_mask */ +diff --git a/dpdk/lib/telemetry/rte_telemetry.h b/dpdk/lib/telemetry/rte_telemetry.h +index d9918c4e96..40e9a3bf9d 100644 +--- a/dpdk/lib/telemetry/rte_telemetry.h ++++ b/dpdk/lib/telemetry/rte_telemetry.h +@@ -2,9 +2,6 @@ + * Copyright(c) 2018 Intel Corporation + */ + +-#include +- +- + #ifndef _RTE_TELEMETRY_H_ + #define _RTE_TELEMETRY_H_ + +@@ -12,6 +9,8 @@ + extern "C" { + #endif + ++#include ++ + /** Maximum length for string used in object. */ + #define RTE_TEL_MAX_STRING_LEN 128 + /** Maximum length of string. */ +diff --git a/dpdk/lib/telemetry/telemetry.c b/dpdk/lib/telemetry/telemetry.c +index 8fbb4f3060..9c3c346ff5 100644 +--- a/dpdk/lib/telemetry/telemetry.c ++++ b/dpdk/lib/telemetry/telemetry.c +@@ -208,7 +208,11 @@ container_to_json(const struct rte_tel_data *d, char *out_buf, size_t buf_len) + break; + case RTE_TEL_CONTAINER: + { +- char temp[buf_len]; ++ char *temp = malloc(buf_len); ++ if (temp == NULL) ++ break; ++ *temp = '\0'; /* ensure valid string */ ++ + const struct container *cont = + &v->value.container; + if (container_to_json(cont->data, +@@ -219,6 +223,7 @@ container_to_json(const struct rte_tel_data *d, char *out_buf, size_t buf_len) + v->name, temp); + if (!cont->keep) + rte_tel_data_free(cont->data); ++ free(temp); + break; + } + } +@@ -275,7 +280,11 @@ output_json(const char *cmd, const struct rte_tel_data *d, int s) + break; + case RTE_TEL_CONTAINER: + { +- char temp[buf_len]; ++ char *temp = malloc(buf_len); ++ if (temp == NULL) ++ break; ++ *temp = '\0'; /* ensure valid string */ ++ + const struct container *cont = + &v->value.container; + if (container_to_json(cont->data, +@@ -286,6 +295,7 @@ output_json(const char *cmd, const struct rte_tel_data *d, int s) + v->name, temp); + if (!cont->keep) + rte_tel_data_free(cont->data); ++ free(temp); + } + } + } +@@ -311,7 +321,11 @@ output_json(const char *cmd, const struct rte_tel_data *d, int s) + buf_len, used, + d->data.array[i].u64val); + else if (d->type == RTE_TEL_ARRAY_CONTAINER) { +- char temp[buf_len]; ++ char *temp = malloc(buf_len); ++ if (temp == NULL) ++ break; ++ *temp = '\0'; /* ensure valid string */ ++ + const struct container *rec_data = + &d->data.array[i].container; + if (container_to_json(rec_data->data, +@@ -321,6 +335,7 @@ output_json(const char *cmd, const struct rte_tel_data *d, int s) + buf_len, used, temp); + if (!rec_data->keep) + rte_tel_data_free(rec_data->data); ++ free(temp); + } + break; + } +@@ -333,7 +348,7 @@ output_json(const char *cmd, const struct rte_tel_data *d, int s) + static void + perform_command(telemetry_cb fn, const char *cmd, const char *param, int s) + { +- struct rte_tel_data data; ++ struct rte_tel_data data = {0}; + + int ret = fn(cmd, param, &data); + if (ret < 0) { +diff --git a/dpdk/lib/vhost/socket.c b/dpdk/lib/vhost/socket.c +index 863a6f6d52..669c322e12 100644 +--- a/dpdk/lib/vhost/socket.c ++++ b/dpdk/lib/vhost/socket.c +@@ -129,10 +129,12 @@ read_fd_message(char *ifname, int sockfd, char *buf, int buflen, int *fds, int m + return ret; + } + +- if (msgh.msg_flags & (MSG_TRUNC | MSG_CTRUNC)) { ++ if (msgh.msg_flags & MSG_TRUNC) + VHOST_LOG_CONFIG(ifname, ERR, "truncated msg (fd %d)\n", sockfd); +- return -1; +- } ++ ++ /* MSG_CTRUNC may be caused by LSM misconfiguration */ ++ if (msgh.msg_flags & MSG_CTRUNC) ++ VHOST_LOG_CONFIG(ifname, ERR, "truncated control data (fd %d)\n", sockfd); + + for (cmsg = CMSG_FIRSTHDR(&msgh); cmsg != NULL; + cmsg = CMSG_NXTHDR(&msgh, cmsg)) { +diff --git a/dpdk/lib/vhost/vhost.h b/dpdk/lib/vhost/vhost.h +index ef211ed519..63e2f3f577 100644 +--- a/dpdk/lib/vhost/vhost.h ++++ b/dpdk/lib/vhost/vhost.h +@@ -782,7 +782,10 @@ hva_to_gpa(struct virtio_net *dev, uint64_t vva, uint64_t len) + static __rte_always_inline struct virtio_net * + get_device(int vid) + { +- struct virtio_net *dev = vhost_devices[vid]; ++ struct virtio_net *dev = NULL; ++ ++ if (likely(vid >= 0 && vid < RTE_MAX_VHOST_DEVICE)) ++ dev = vhost_devices[vid]; + + if (unlikely(!dev)) { + VHOST_LOG_CONFIG("device", ERR, "(%d) device not found.\n", vid); +@@ -878,9 +881,9 @@ vhost_vring_call_split(struct virtio_net *dev, struct vhost_virtqueue *vq) + "%s: used_event_idx=%d, old=%d, new=%d\n", + __func__, vhost_used_event(vq), old, new); + +- if ((vhost_need_event(vhost_used_event(vq), new, old) && +- (vq->callfd >= 0)) || +- unlikely(!signalled_used_valid)) { ++ if ((vhost_need_event(vhost_used_event(vq), new, old) || ++ unlikely(!signalled_used_valid)) && ++ vq->callfd >= 0) { + eventfd_write(vq->callfd, (eventfd_t) 1); + if (dev->flags & VIRTIO_DEV_STATS_ENABLED) + vq->stats.guest_notifications++; +@@ -947,8 +950,10 @@ vhost_vring_call_packed(struct virtio_net *dev, struct vhost_virtqueue *vq) + if (vhost_need_event(off, new, old)) + kick = true; + kick: +- if (kick) { ++ if (kick && vq->callfd >= 0) { + eventfd_write(vq->callfd, (eventfd_t)1); ++ if (dev->flags & VIRTIO_DEV_STATS_ENABLED) ++ vq->stats.guest_notifications++; + if (dev->notify_ops->guest_notified) + dev->notify_ops->guest_notified(dev->vid); + } +diff --git a/dpdk/lib/vhost/vhost_user.c b/dpdk/lib/vhost/vhost_user.c +index 9902ae9944..8df66e68b3 100644 +--- a/dpdk/lib/vhost/vhost_user.c ++++ b/dpdk/lib/vhost/vhost_user.c +@@ -1809,7 +1809,7 @@ static int vhost_user_set_vring_err(struct virtio_net **pdev, + + if (!(ctx->msg.payload.u64 & VHOST_USER_VRING_NOFD_MASK)) + close(ctx->fds[0]); +- VHOST_LOG_CONFIG(dev->ifname, INFO, "not implemented\n"); ++ VHOST_LOG_CONFIG(dev->ifname, DEBUG, "not implemented\n"); + + return RTE_VHOST_MSG_RESULT_OK; + } +@@ -2326,7 +2326,7 @@ static int vhost_user_set_log_fd(struct virtio_net **pdev, + return RTE_VHOST_MSG_RESULT_ERR; + + close(ctx->fds[0]); +- VHOST_LOG_CONFIG(dev->ifname, INFO, "not implemented.\n"); ++ VHOST_LOG_CONFIG(dev->ifname, DEBUG, "not implemented.\n"); + + return RTE_VHOST_MSG_RESULT_OK; + } +@@ -2817,29 +2817,36 @@ read_vhost_message(struct virtio_net *dev, int sockfd, struct vhu_msg_context * + + ret = read_fd_message(dev->ifname, sockfd, (char *)&ctx->msg, VHOST_USER_HDR_SIZE, + ctx->fds, VHOST_MEMORY_MAX_NREGIONS, &ctx->fd_num); +- if (ret <= 0) { +- return ret; +- } else if (ret != VHOST_USER_HDR_SIZE) { ++ if (ret <= 0) ++ goto out; ++ ++ if (ret != VHOST_USER_HDR_SIZE) { + VHOST_LOG_CONFIG(dev->ifname, ERR, "Unexpected header size read\n"); +- close_msg_fds(ctx); +- return -1; ++ ret = -1; ++ goto out; + } + + if (ctx->msg.size) { + if (ctx->msg.size > sizeof(ctx->msg.payload)) { + VHOST_LOG_CONFIG(dev->ifname, ERR, "invalid msg size: %d\n", + ctx->msg.size); +- return -1; ++ ret = -1; ++ goto out; + } + ret = read(sockfd, &ctx->msg.payload, ctx->msg.size); + if (ret <= 0) +- return ret; ++ goto out; + if (ret != (int)ctx->msg.size) { + VHOST_LOG_CONFIG(dev->ifname, ERR, "read control message failed\n"); +- return -1; ++ ret = -1; ++ goto out; + } + } + ++out: ++ if (ret <= 0) ++ close_msg_fds(ctx); ++ + return ret; + } + +@@ -2987,13 +2994,10 @@ vhost_user_msg_handler(int vid, int fd) + } + } + ++ ctx.msg.request.master = VHOST_USER_NONE; + ret = read_vhost_message(dev, fd, &ctx); +- if (ret <= 0) { +- if (ret < 0) +- VHOST_LOG_CONFIG(dev->ifname, ERR, "vhost read message failed\n"); +- else +- VHOST_LOG_CONFIG(dev->ifname, INFO, "vhost peer closed\n"); +- ++ if (ret == 0) { ++ VHOST_LOG_CONFIG(dev->ifname, INFO, "vhost peer closed\n"); + return -1; + } + +@@ -3003,6 +3007,14 @@ vhost_user_msg_handler(int vid, int fd) + else + msg_handler = NULL; + ++ if (ret < 0) { ++ VHOST_LOG_CONFIG(dev->ifname, ERR, "vhost read message %s%s%sfailed\n", ++ msg_handler != NULL ? "for " : "", ++ msg_handler != NULL ? msg_handler->description : "", ++ msg_handler != NULL ? " " : ""); ++ return -1; ++ } ++ + if (msg_handler != NULL && msg_handler->description != NULL) { + if (request != VHOST_USER_IOTLB_MSG) + VHOST_LOG_CONFIG(dev->ifname, INFO, +diff --git a/dpdk/lib/vhost/virtio_net.c b/dpdk/lib/vhost/virtio_net.c +index 9abf752f30..26f184f8b2 100644 +--- a/dpdk/lib/vhost/virtio_net.c ++++ b/dpdk/lib/vhost/virtio_net.c +@@ -1453,6 +1453,12 @@ virtio_dev_rx_batch_packed_copy(struct virtio_net *dev, + sizeof(struct virtio_net_hdr_mrg_rxbuf); + } + ++ if (rxvq_is_mergeable(dev)) { ++ vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) { ++ ASSIGN_UNLESS_EQUAL(hdrs[i]->num_buffers, 1); ++ } ++ } ++ + vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) + virtio_enqueue_offload(pkts[i], &hdrs[i]->hdr); + +@@ -3470,6 +3476,7 @@ virtio_dev_tx_async_split(struct virtio_net *dev, struct vhost_virtqueue *vq, + allocerr_warned = true; + } + dropped = true; ++ slot_idx--; + break; + } + diff --git a/SPECS/openvswitch3.2.spec b/SPECS/openvswitch3.2.spec index 38516aa..12aa803 100644 --- a/SPECS/openvswitch3.2.spec +++ b/SPECS/openvswitch3.2.spec @@ -57,7 +57,7 @@ Summary: Open vSwitch Group: System Environment/Daemons daemon/database/utilities URL: http://www.openvswitch.org/ Version: 3.2.0 -Release: 32%{?dist} +Release: 56%{?dist} # Nearly all of openvswitch is ASL 2.0. The bugtool is LGPLv2+, and the # lib/sflow*.[ch] files are SISSL @@ -761,6 +761,732 @@ exit 0 %endif %changelog +* Fri Feb 09 2024 Open vSwitch CI - 3.2.0-56 +- Merging upstream branch-3.2 [RH git: 45ac6932dd] + Commit list: + 59403c48ec Prepare for 3.2.3. + 7614e02283 Set release date for 3.2.2. + 2cfbcd5247 netdev-offload-tc: Check geneve metadata length. + 7570744c5a odp: ND: Follow Open Flow spec converting from OF to DP. + + +* Thu Feb 08 2024 Open vSwitch CI - 3.2.0-55 +- Merging upstream branch-3.2 [RH git: 286a4ee527] + Commit list: + e3bfabd257 dpdk: Use DPDK 22.11.4 release for OVS 3.2. + + +* Wed Jan 31 2024 Open vSwitch CI - 3.2.0-54 +- Merging upstream branch-3.2 [RH git: fa23958c51] + Commit list: + 61cf7110d2 github: Bump Fedora version to 39. + + +* Mon Jan 29 2024 Open vSwitch CI - 3.2.0-53 +- Merging upstream branch-3.2 [RH git: d0d062524a] + Commit list: + 49e64f13b2 github: Update versions of action dependencies (Node.js 20). + + +* Fri Jan 19 2024 Open vSwitch CI - 3.2.0-52 +- Merging upstream branch-3.2 [RH git: a792ba9ab7] + Commit list: + f6757eb214 python: ovs: flow: Add meter_id to controller. + dae1ffc17b python: ovs: flow: Make check_pkt_len action a list. + 699d26d425 python: ovs: flow: Add idle_age to openflow flows. + 5bef199078 python: tests: Refactor test_odp section testing. + e3f2eca15d python: ovs: flow: Add dp hash and meter actions. + 1072d0d221 python: ovs: flow: Add sample to nested actions. + 065bdb3e15 python: tests: Add info and key tests for OFPFlows. + 4374b1e64e python: ovs: flow: Fix typo in n_packets. + + +* Fri Jan 19 2024 Open vSwitch CI - 3.2.0-51 +- Merging upstream branch-3.2 [RH git: 081d35df71] + Commit list: + cd8ffc956c ovs-atomic: Fix inclusion of Clang header by GCC 14. + b3d094b4fb ovsdb-idl.at: Test IDL behavior during database conversion. + a1935e9628 python: idl: Handle monitor_canceled. + + +* Wed Jan 17 2024 Timothy Redaelli - 3.2.0-50 +- redhat: Fix SyntaxWarnings with Python 3.12 [RH git: c0b26fd2ea] + + +* Mon Jan 15 2024 Timothy Redaelli - 3.2.0-49 +- redhat: fix pkgtool for subtree [RH git: ee128f19c0] + + +* Mon Jan 15 2024 Timothy Redaelli - 3.2.0-48 +- Fix README.rst to use subtree for dpdk [RH git: bdacdc3289] + + +* Mon Jan 15 2024 Timothy Redaelli - 3.2.0-47 +- Use subtree for dpdk in redhat/merge [RH git: fd97fec96d] + + +* Mon Jan 15 2024 Timothy Redaelli - 3.2.0-46 +- Merge commit '42ed2ce550ca14cf939c2e6cc383a54c15878b74' into private-tredaell-subtree [RH git: aa4e3a2c4c] + + +* Mon Jan 15 2024 Timothy Redaelli - 3.2.0-45 +- Add 'dpdk/' from commit '9dae7a15aea76313c592c11db44e1386e85f86fe' [RH git: 6e27925dcd] + git-subtree-dir: dpdk + git-subtree-mainline: fd8549da9d66b42c117d8eec6af5768507501c01 + git-subtree-split: 9dae7a15aea76313c592c11db44e1386e85f86fe + + +* Mon Jan 15 2024 Timothy Redaelli - 3.2.0-44 +- Remove dpdk submodule [RH git: fd8549da9d] + You probably also wants to do: + + git submodule deinit -f dpdk + rm -rf .git/modules/dpdk + + +* Tue Jan 09 2024 Open vSwitch CI - 3.2.0-43 +- Merging upstream branch-3.2 [RH git: cdde3a7096] + Commit list: + 946d5ef01e vconn: Count vconn_sent regardless of log level. + 7a307b3563 backtrace: Fix error in log_backtrace() documentation. + ab08bffa32 ovsdb: trigger: Do not allow conversion in read-only mode. + 4e74ac6da1 ovsdb: jsonrpc-server: Fix the DSCP value in default options. + 04fec86f7a jsonrpc: Sort JSON objects while printing debug messages. + c4d655bc12 tests: ovsdb: Use diff -up format for replay test. + 4279901afe ovsdb-server.at: Enbale debug logs in active-backup tests. + 8583cdb990 ovsdb: transaction: Don't try to diff unchanged columns. + 7d9662345d ovsdb: transaction: Avoid diffs for different type references. + abf5ec4109 ovsdb: Fix incorrect sharing of UUID and _version columns. + + +* Tue Jan 09 2024 Open vSwitch CI - 3.2.0-42 +- Merging upstream branch-3.2 [RH git: 37c4409b08] + Commit list: + a3a05b7109 ci: Update the GitHub Ubuntu runner image to Ubuntu 22.04. + + +* Thu Jan 04 2024 Open vSwitch CI - 3.2.0-41 +- Merging upstream branch-3.2 [RH git: 1304925041] + Commit list: + ec1d730163 ovsdb-idl: Preserve change_seqno when deleting rows. + + +* Tue Dec 19 2023 Open vSwitch CI - 3.2.0-40 +- Merging dpdk submodule [RH git: 3a3bf9bf69] + Commit list: + 42ed2ce550 Revert "net/iavf: fix abnormal disable HW interrupt" + + +* Thu Dec 07 2023 Open vSwitch CI - 3.2.0-39 +- Merging upstream branch-3.2 [RH git: 86d14e32a7] + Commit list: + 36022655de system-dpdk: Wait for MTU changes to be applied. + + +* Tue Dec 05 2023 Open vSwitch CI - 3.2.0-38 +- Merging upstream branch-3.2 [RH git: e156ad8e70] + Commit list: + c1ee47eb68 tunnel: Do not carry source port from a previous tunnel. + 74626ad6bb netdev-offload-tc: Fix offload of tunnel key tp_src. + a49ebed1c4 ofp-ct: Return error for unknown property in CT flush. + + +* Fri Dec 01 2023 Open vSwitch CI - 3.2.0-37 +- Merging upstream branch-3.2 [RH git: 32acf50ac9] + Commit list: + 57916a78a8 cirrus: Update from FreeBSD 12 to 14. + + +* Fri Dec 01 2023 Open vSwitch CI - 3.2.0-36 +- Merging dpdk submodule [RH git: 92d950c6f8] + Commit list: + db913c25d3 Merge tag 'v22.11.3' into 22.11 + b721cba875 Revert "net/i40e: revert link status check on device start" + 7849366693 version: 22.11.3 + 774466c6dc Revert "net/iavf: fix tunnel TSO path selection" + 5e87ad0cf8 version: 22.11.3-rc1 + 9fb53245ea common/cnxk: fix CPT backpressure disable on LBK + 17fef613a1 net/nfp: fix offloading flows + 338d0d589a net/nfp: fix Tx descriptor free logic of NFD3 + 6c779801be test/bonding: fix include of standard header + 22c3a016bb net/ngbe: fix RSS offload capability + fc56ce5a01 net/mana: fix Tx queue statistics + 50dc477c2e doc: fix link to flow capabilities from bnxt guide + 56e6c7ef88 doc: update BIOS settings and supported HW for NTB + 89a839ade5 doc: fix description of runtime directories + b5513f8e84 doc: improve wording of cuda guide + 1a4aa1d333 doc: add flow template API requirements for mlx5 + 75a8d9b6d9 app/testpmd: fix meter mark handle update + 4b42c698a2 net/mlx5: fix handle validation for meter mark + 6b686c0912 net/mlx5: fix validation for conntrack indirect action + 5cf1399e93 net/mlx5: fix MPRQ stride size for headroom + 2e3420f544 net/mlx5: fix LRO TCP checksum + 8397c9086a net/mlx5: fix drop action memory leak + e6365bd3d2 net/e1000: fix Rx and Tx queue status + 4e200ede54 net/igc: fix Rx and Tx queue status + 4cae890867 net/ixgbe: fix Rx and Tx queue status + 912704eee5 common/iavf: fix MAC type for 710 NIC + ca8c2ad458 net/iavf: fix stop ordering + 0a1fa750bc net/i40e: fix comments + cb30cddab7 doc: fix typos and wording in flow API guide + c12b2943c9 devtools: fix bashism in mailmap check + 0192d0ac53 kni: fix build with Linux 6.5 + 44632490d9 examples/l3fwd: fix duplicate expression for default nexthop + d385831f49 ipsec: fix NAT-T header length + 93d999a062 examples/ipsec-secgw: fix TAP default MAC address + 1fad66d244 examples/ipsec-secgw: fix socket ID default value + 76422de830 app/crypto-perf: fix socket ID default value + 1a98004f93 cryptodev: fix device socket ID type + 900b4ca132 examples/fips_validation: fix digest length in AES-GCM + 31daa7382f test/crypto: fix PDCP-SDAP test vectors + 440e528ae5 common/qat: detach crypto from compress build + 2e8f88824d crypto/qat: fix null algorithm digest placement + 64db6b40e9 baseband/fpga_5gnr_fec: fix starting unconfigured queue + 4325aff0d4 baseband/fpga_5gnr_fec: fix possible division by zero + 5ff397c56f net/ice: fix RSS hash key generation + 9b7215f150 net/iavf: fix tunnel TSO path selection + 35e69229eb net/ice: fix 32-bit build + 90a0f328b8 net/iavf: fix VLAN insertion in vector path + c14d373562 net/ice: fix VLAN mode parser + 16ce632452 net/mlx5: fix query for NIC flow capability + d165bff7dc net/mlx5: fix RSS expansion inner buffer overflow + 6a567b41ed net/mlx5: forbid MPRQ restart + 03e279ee8b net/mlx5: fix flow workspace destruction + b5075ab2f9 net/mlx5: reduce counter pool name length + b74fa83cf2 net/mlx5: fix profile check of meter mark + db32b39aad common/mlx5: fix obtaining IB device in LAG mode + fcf46d9c1d net/mlx5: fix error in VLAN actions creation + 0c50d16325 net/mlx5: fix error set for age pool initialization + 77159a5063 net/mlx5: fix error set in control tables create + aa9f4b4c58 net/mlx5: fix return value of vport action + d8ec0d83dd net/mlx5: fix error set in Tx representor tagging + d3457cd43d net/mlx5: fix flow dump for modify field + a944f7e99c ethdev: fix potential leak in PCI probing helper + b6025f7a95 net/hns3: fix index to look up table in NEON Rx + a2dc813bc4 net/mana: fix WQE count for ringing RQ doorbell + 9d30178ddd net/mana: fix counter overflow for posted WQE + 92d3673001 app/testpmd: fix flow rule number parsing + 36791bbc4b net/hns3: fix non-zero weight for disabled TC + 56b89656df app/testpmd: revert primary process polling all queues fix + 09d8d1fe4f net/txgbe: fix blocking system events + 7a44cac46c doc: fix number of leading spaces in hns3 guide + 8b5633f1e2 doc: fix syntax in hns3 guide + ece673c7a1 doc: fix kernel patch link in hns3 guide + 9b9ae1aff1 net/hns3: delete duplicate macro definition + 3a935374f2 app/testpmd: fix checksum engine with GTP on 32-bit + 67912c46f4 net/netvsc: fix sizeof calculation + d5309394d1 member: fix PRNG seed reset in NitroSketch mode + 1db71d413e hash: fix reading unaligned bits in Toeplitz hash + 7e6b4e9d63 mem: fix memsegs exhausted message + 7a57c9edd6 fib: fix adding default route + fc4be70ded devtools: fix mailmap check for parentheses + 90aa091a20 ipc: fix file descriptor leakage with unhandled messages + 008fdd0e07 raw/ntb: avoid disabling interrupt twice + 54182cce72 cryptodev: fix comments of modular operation parameters + a60ddd6954 net/cnxk: fix flow queue index validation + cd88b5ff39 net/cnxk: fix cookies check with security offload + c98cf071f8 net/cnxk: flush SQ before configuring MTU + dcd21e65bd common/mlx5: adjust fork call with new kernel API + 79310b1b61 net/mlx5: fix device removal event handling + ce6b9b3772 net/mlx5: fix risk in NEON Rx descriptor read + e149ea19c2 net/iavf: fix protocol agnostic offloading with big packets + 04de2b4a56 net/ice: fix protocol agnostic offloading with big packets + 15b67727ca net/ice: fix MAC type of E822 and E823 + a45102d324 net/e1000: fix queue number initialization + 4f26624221 net/i40e: fix tunnel packet Tx descriptor + 2bdeeb53da net/iavf: fix abnormal disable HW interrupt + 520330cbb1 common/idpf: fix memory leak on AVX512 Tx queue close + 00f8c02c41 net/ixgbe: add proper memory barriers in Rx + c8b6f57dc4 net/ice: fix tunnel packet Tx descriptor + 8a0712739d net/iavf: fix VLAN offload with AVX512 + 64c315b9e9 common/sfc_efx/base: fix Rx queue without RSS hash prefix + 47326eff66 net/nfp: fix address always related with PF ID 0 + 715143b51e net/nfp: fix representor name too long + 16a82d0d1a app/testpmd: fix primary process not polling all queues + 368138b61b net/ngbe: remove redundant codes + e66f9e3fdf net/ngbe: fix link status in no LSC mode + 1c7e19d4a0 net/ngbe: adapt to MNG veto bit setting + c523de9115 net/ngbe: fix extended statistics + 409cd85a16 net/txgbe: fix extended statistics + 19c8a701a7 net/txgbe: fix to set autoneg for 1G speed + 57b3a57dfa net/txgbe: adapt to MNG veto bit setting + a62297801c net/txgbe: fix interrupt enable mask + d96c0259e7 net/txgbe/base: fix Tx with fiber hotplug + ff476852e7 net/bonding: fix destroy dedicated queues flow + c193423238 net/bonding: fix startup when NUMA is not supported + 00d2dc7c42 net/nfp: fix VNI of IPv6 NVGRE encap action + 7797d05cd9 net/nfp: fix VNI of IPv4 NVGRE encap action + dc6adc599c net/nfp: fix VNI of VXLAN encap action + fa5f6cff44 ethdev: update documentation for API to get FEC + 7594b6e44d ethdev: check that at least one FEC mode is specified + ff56bf6f3a ethdev: update documentation for API to set FEC + c25e954265 net/nfp: fix endian conversion for tunnel decap action + 502acb0f83 net/nfp: fix unneeded endian conversion + a526063092 net/nfp: fix IPv6 address for set flow action + 64839553ee test/security: fix event inline IPsec reassembly tests + 643423f0b7 crypto/openssl: skip workaround at compilation time + 91085d3b58 crypto/openssl: fix memory leak in auth processing + 981792ebc4 crypto/qat: fix sym device prototype + 791a0227d2 common/qat: fix command parameter corruption + 0d5c3e03c5 ci: fix libabigail cache in GHA + ef98a4da59 mbuf: fix Doxygen comment of distributor metadata + 5887a7d14d test: add graph tests + 2bdfd0d7ba examples/fips_validation: fix external build + 72c87f820d examples/l2fwd-cat: fix external build + 11757d12ee dma/dpaa2: set VFA bit for route-by-port with VF + 9047d5c9bf doc: remove warning with Doxygen 1.9.7 + d8e35e7efe doc: fix typo in graph guide + 55fbb9de8d test/mbuf: fix crash in a forked process + 4d116ff1ac net/iavf: fix virtchnl command called in interrupt + bd5c63549a net/ice: fix outer UDP checksum offload + bae61d2305 net/ice: initialize parser for double VLAN + 1f35681b6e net/ice: fix timestamp enabling + 3b032bbf78 net/ice: adjust timestamp mbuf register + 1d71b68237 net/ice/base: remove unreachable code + 7691c220b6 net/ice/base: fix incorrect defines for DCBx + 4ec6da600e net/ice: fix DCF control thread crash + c4d749115d net/iavf: release large VF when closing device + 21ec365e75 net/ice: fix DCF RSS initialization + 83ad87933a net/ice: fix statistics + b9f3b81e9e common/idpf/base: fix memory leaks on control queue + 1b04a2f618 common/idpf/base: fix parameters when send msg to cp + e5868f6648 common/idpf/base: fix ITR register definitions for AVF + 4cc85337b3 common/idpf/base: fix control queue send and receive + 1bfcca5b65 common/idpf: remove device stop flag + c86c1efd2f net/idpf: fix Rx data buffer size + 78c374bf41 net/iavf: fix Rx data buffer size + 1f9af08714 net/ice: fix Rx data buffer size + 7a72db6add net/i40e: fix Rx data buffer size + 65303f7c26 doc: fix typo in cnxk platform guide + d9ba8a4251 net/qede: fix RSS indirection table initialization + df11a52e33 common/cnxk: fix receive queue with multiple mask + 6cbb5fc911 common/cnxk: fix inline device VF identification + d0f357fb6e common/cnxk: fix uninitialized pointer read + 103a33d02d common/cnxk: fix setting channel mask for SDP interfaces + c1e167a078 event/cnxk: fix mempool cookies check + 338514e558 event/cnxk: fix Tx adapter data pointer + 46d6c05770 common/cnxk: fix IPsec IPv6 tunnel address byte swap + 9d1dbc6a01 mempool/cnxk: avoid hang when counting batch allocs + 255fcff79e net/mlx5: fix drop action attribute validation + b53417d63b net/mlx5: fix duplicated tag index matching in SWS + 124a919b4e net/mlx5: forbid duplicated tag index in pattern template + eb02902423 net/mlx5: fix VXLAN matching with zero value + 2e6f71cb51 net/mlx5: fix matcher layout size calculation + 9015baea29 net/mlx5: enhance error log for tunnel offloading + 2a45186cf4 net/virtio-user: fix leak when initialisation fails + d228fa562b net/virtio: fix initialization to return negative errno + a43f6d459a net/virtio: propagate interrupt configuration error values + 94bed3c2fa vhost: fix invalid call FD handling + fe3e1fc9d8 vhost: fix notification stats for packed ring + 3a5561e8b5 crypto/ipsec_mb: optimize allocation in session + a659f5f18c test/crypto: fix IPsec AES CCM vector + 7e0080ff4f crypto/cnxk: fix IPsec CCM capabilities + c53467f356 crypto/ipsec_mb: fix enqueue counter for SNOW3G + d2e09fc0fc test/crypto: fix session creation check + e3456f2478 crypto/openssl: fix memory free + 48b48d2ba8 cryptodev: clarify error codes for symmetric session + 400a3302de examples/ipsec-secgw: fix zero address in ethernet header + 6217afd84c test/crypto: fix return value for SNOW3G + 228857a34c crypto/scheduler: fix last element for valid args + 4413744138 crypto/qat: fix stack buffer overflow in SGL loop + 0af5332dff doc: fix auth algos in cryptoperf app + 3a8502c73d net/vmxnet3: fix return code in initializing + c40b9a9873 net/nfp: fix TP flow action for UDP + 74870817fb net/nfp: fix flow hash table creation + 18493825d1 net/nfp: fix representor creation + de78bd45d6 net/nfp: fix control mempool creation + caa96e94b0 net/nfp: fix TOS of IPv6 NVGRE encap flow action + 67544106ac net/nfp: fix TOS of IPv6 GENEVE encap flow action + c81816d07f net/nfp: fix TOS of IPv6 VXLAN encap flow action + a1d864c39b net/nfp: fix IPv6 flow item + aae5fcb267 net/nfp: fix disabling promiscuous mode + e659a163e3 ethdev: fix calloc arguments + 175e7a7bac net/hns3: fix IMP reset trigger + 219305d227 net/hns3: fix redundant line break in log + 4292ebcef4 net/hns3: fix inaccurate log + 1c81e1a0a8 net/hns3: fix uninitialized variable + f99fa19b60 net/hns3: fix device start return value + 19c20cef27 net/hns3: fix mbuf leakage when RxQ started after reset + e9ade95ac4 net/hns3: fix mbuf leakage when RxQ started during reset + 422a5e09d2 net/hns3: extract PTP to its own header file + a8ad010661 net/hns3: uninitialize PTP + 3999b58cd3 net/hns3: fix RTC time after reset + 69bff6ea1c net/hns3: fix RTC time on initialization + bdf2131156 doc: fix format in flow API guide + 9ca1814402 net/hns3: fix missing FEC capability + 340cb03d7f net/hns3: fix FEC mode check + b048cdca82 net/hns3: fix FEC mode for 200G ports + a678d7da2b ethdev: fix indirect action conversion + d859368e8f net/hns3: fix Rx multiple firmware reset interrupts + f2dd43a6e9 net/hns3: fix variable type mismatch + 2a7aef1dd0 net/hns3: fix never set MAC flow control + 88cf99ed01 net/sfc: invalidate dangling MAE flow action FW resource IDs + 3254062488 net/mana: return probing failure if no device found + 631f57f5ef ethdev: fix MAC address occupies two entries + 32e0eaa22b net/txgbe: fix use-after-free on remove + 799cc0612c net/vmxnet3: fix drop of empty segments in Tx + acbaa6bb5d net/nfp: fix VLAN push flow action + 9d1fbdb584 app/testpmd: fix GTP L2 length in checksum engine + e7e0590338 net/dpaa2: fix checksum good flags + ecd3e1f354 net/mana: optimize completion queue by batch processing + 5b1a78987f net/mana: avoid unnecessary assignments in data path + 753a735ea0 net/mana: use datapath logging + 8e27036b63 net/tap: set locally administered bit for fixed MAC address + bd14912ab4 net/sfc: stop misuse of Rx ingress m-port metadata on EF100 + a1dfa52406 net/hns3: fix build warning + 10a2ee0333 eal/linux: fix legacy mem init with many segments + f4d5f30fe4 eal/linux: fix secondary process crash for mp hotplug requests + 53e6b8c3f0 event/cnxk: fix nanoseconds to ticks conversion + cf79458fad eventdev/timer: fix buffer flush + d3f784ad5f event/cnxk: fix setting attributes in empty get work + d0257dc168 event/dsw: free rings on close + c12daa6d2d doc: fix event timer adapter guide + ca72f7fdba eventdev/timer: fix timeout event wait behavior + 5ecf2e459d eal/x86: improve multiple of 64 bytes memcpy performance + f4ccd39056 pci: fix comment referencing renamed function + c3b5322fc3 build: fix warning when getting NUMA nodes + 44a9b3b4f7 ci: fix build for Arm cross compilation in GHA + 94babf61c5 eal: avoid calling cleanup twice + 6413085d5f test/malloc: fix statistics checks + 37e859deb8 test/malloc: fix missing free + 2d3c4df8f5 pipeline: fix double free for table stats + cada66aff2 ring: fix dequeue parameter name + 57dd0e53b1 telemetry: fix autotest on Alpine + f14b25d925 kernel/freebsd: fix function parameter list + 47da400295 vfio: fix include with musl runtime + 8742732344 ring: fix use after free + f9a20a28ec examples/ntb: fix build with GCC 13 + 28aa181578 examples/ip_pipeline: fix build with GCC 13 + 156b423f54 kni: fix build with Linux 6.3 + 403d133d7f net: fix return type of IPv4 L4 packet checksum + d75b66ad1c version: 22.11.2 + 8edef444c7 build: detect backtrace availability + aa96e66c9a version: 22.11.2-rc1 + 9dcf799d88 common/mlx5: use just sufficient barrier for Arm + 6940159325 net/mlx5/hws: fix IPv4 fragment matching + f567b1ee98 common/cnxk: fix IPv6 extension header parsing + f9f94070e0 doc: fix DCF instructions in ice guide + d4dc7b1d1b doc: add Linux capability to access physical addresses + 7ac4d1cebf doc: fix pipeline example path in user guide + abf5150780 devtools: move mailmap check after patch applied + 0dce56b788 acl: fix crash on PPC64 with GCC 11 + ac849cac65 pdump: fix build with GCC 12 + fcf7e0e6f7 test/crypto: fix statistics error messages + 95c2df95d2 doc: fix code blocks in cryptodev guide + 2fe7fcc1a3 net/mlx5: fix sysfs port name translation + 80ec04827f net/mlx5: fix CQE dump for Tx + 1f4de71406 net/mlx5/hws: fix error code of send queue action + 644bcdb856 net/mlx5: fix build with GCC 12 and ASan + d4335766d4 net/mlx5/hws: fix pattern creation + 1cbb884f35 app/testpmd: fix encap/decap size calculation + 6a3a2809eb examples/qos_sched: fix config entries in wrong sections + 782eda8807 net/ipn3ke: fix representor name + abe24f4bd0 net/ipn3ke: fix thread exit + 3bdf07e682 bus/ifpga: fix devargs handling + 0680a33bc7 net/mlx5: fix isolated mode if no representor matching + e10c220053 net/mlx5: fix egress group translation in HWS + 3f7e967684 doc: fix LPM support in l3forward guide + 003a860c86 examples/l3fwd: remove hash entry number + e528b4f96c net/mlx5: fix hairpin Tx queue reference count + b836bb43cf net/iavf: fix device stop during reset + 158c05028f net/idpf: reset queue flag when queue is stopped + 8cb2ee860c net/i40e: fix MAC loopback on X722 + 8cc24b8aab net/e1000: fix saving of stripped VLAN TCI + 91a60dfeee net/i40e: fix AVX512 fast-free path + 8a6d064801 net/sfc: invalidate switch port entry on representor unplug + d59d4fbffa net/virtio: remove address width limit for modern devices + 7be8e75277 net/vhost: fix Rx interrupt + 1ecf04df54 net/vhost: fix leak in interrupt handle setup + 9736b58d62 net/vhost: add missing newline in logs + b80c949b77 app/bbdev: check statistics failure + 39ca0f7397 crypto/ipsec_mb: relax multi-process requirement + 6919a8d874 app/compress-perf: fix remaining data for ops + dc6e5405ca test/crypto: fix skip condition for CPU crypto SGL + 58dff4134c test/crypto: fix capability check for ZUC cipher-auth + 0217458d5e test/crypto: fix ZUC digest length in comparison + 7d3df2701d app/testpmd: fix interactive mode on Windows + ef700bf7d0 gpudev: export header file for external drivers + db12268a95 eal/unix: fix thread creation + 29cdc2b88f test/mbuf: fix test with mbuf debug enabled + 64f4cb8bce test: fix segment length in packet generator + 8f35765d15 reorder: fix sequence number mbuf field register + 38369a1974 ring: silence GCC 12 warnings + ebcd68e43a raw/skeleton: fix selftest + 8160c206ca examples/ipsec-secgw: fix offload variable init + 55f236de5c app/flow-perf: fix division or module by zero + 161ef7c943 app/crypto-perf: fix test file memory leak + 72c1da7b2d crypto/openssl: fix freeing in RSA EVP + 1536a7304a crypto/qat: fix SM3 auth mode + eba4eee77b crypto/ccp: fix PCI probing + f081724c6b net/mlx5: fix Windows build with MinGW GCC 12 + 5af10c2a20 net/hns3: add verification of RSS types + 3a5105f1de net/hns3: reimplement hash flow function + 7307f96575 net/hns3: separate flow RSS config from RSS conf + 796c8188a5 net/hns3: allow adding queue buffer size hash rule + adf239587c net/hns3: save hash algo to RSS filter list node + fc620e6832 net/hns3: use new RSS rule to configure hardware + a2804bcc0c net/hns3: separate setting and clearing RSS rule + 679028a2ee net/hns3: separate setting RSS types + 3b85ef547e net/hns3: separate setting redirection table + 99c75dbbfe net/hns3: separate setting hash key + 9c24ae1b2d net/hns3: separate setting hash algorithm + dde9ec15ea net/hns3: use hardware config to report redirection table + dbcf64d12e net/hns3: use hardware config to report hash types + 68da0b3986 net/hns3: use hardware config to report hash key + 5042fd8459 net/hns3: fix possible truncation of redirection table + 81fbc0298c net/hns3: fix possible truncation of hash key when config + 73b11178a3 mem: fix heap ID in telemetry + 57294e8df4 app/testpmd: cleanup cleanly from signal + 564dbb6a03 cmdline: handle EOF as quit + 1a22081244 cmdline: make rdline status not private + 09b626852e kni: fix possible starvation when mbufs are exhausted + 993c0d08eb net/sfc: fix resetting mark in tunnel offload switch rules + f6bdbdf6f2 common/sfc_efx/base: add MAE mark reset action + 4224d5f521 regex/mlx5: fix doorbell record + b5512d3186 regex/mlx5: utilize all available queue pairs + cb4baf72a4 table: fix action selector group size log2 setting + 60f6d9449c raw/skeleton: fix empty devargs parsing + 0848681e90 dma/skeleton: fix empty devargs parsing + ce3c0aa7aa net/virtio: fix empty devargs parsing + 33b1cea25d net/hns3: fix empty devargs parsing + 63ba1d9f2c cryptodev: fix empty devargs parsing + 00e3a4efbc compressdev: fix empty devargs parsing + 6567e0cf76 kvargs: add API documentation for process callback + bb296faffc net/nfp: fix MTU configuration order + bf878ca704 eal/windows: fix thread creation + fac8d80e0d common/cnxk: add memory clobber to steor and ldeor + 67479d24a4 net/cnxk: fix LBK BPID usage + 14688b03c0 net/ice: fix Rx timestamp + 8ec56c115a net/i40e: fix maximum frame size configuration + 287a57f832 net/i40e: revert link status check on device start + dc67b490e8 net/ixgbe: fix IPv6 mask in flow director + b0901e6d5b app/testpmd: fix secondary process packet forwarding + 17637f9c0f net/nfp: fix offload of multiple output actions + fc325877f6 net/nfp: fix set DSCP flow action + 336d6c1c41 net/nfp: fix set TTL flow action + 264cbadbee net/nfp: fix set TP flow action + dcc0be2c8a net/nfp: fix set IPv6 flow action + d6fb8fc9c1 net/nfp: fix set IPv4 flow action + a0a1f437b2 net/nfp: fix set MAC flow action + aaa501436c ethdev: remove telemetry Rx mbuf alloc failed field + 03cfac3c99 net/nfp: fix getting RSS configuration + 5475d251f1 net/mana: fix stats counters + 00af2661ac ethdev: fix build with LTO + cd2635a2b5 app/testpmd: fix packet transmission in noisy VNF engine + e8996dc08d app/testpmd: fix packet count in IEEE 1588 engine + a4141026d7 app/testpmd: fix Tx preparation in checksum engine + 62324129c1 net/mana: enable driver by default + 64d858dc03 net/gve: fix offloading capability + 90f12bb74f net/nfp: fix 48-bit DMA support for NFDk + 6ede2ab472 common/cnxk: fix auth key length + 995a6460b2 examples/fips_validation: add extra space in JSON buffer + 903f4ee07d examples/fips_validation: fix AES-XTS sequence number + 91c80e936a examples/fips_validation: fix AES-GCM tests + 809de506c9 examples/fips_validation: fix integer parsing + ac87a06f6b examples/fips_validation: fix MCT output for SHA + 435188c3c4 compress/mlx5: fix queue setup for partial transformations + 2a091b2f31 compress/mlx5: fix output Adler-32 checksum offset + 9d7902bf42 compress/mlx5: fix decompress xform validation + 2bf416240d examples/ipsec-secgw: fix auth IV length + 33f3ef5021 net/virtio: deduce IP length for TSO checksum + 950227315b vhost: fix OOB access for invalid vhost ID + a6d0c8f6aa vhost: fix slot index in async split virtqueue Tx + 47cef65600 test/bbdev: remove check for invalid opaque data + dccdf95daa test/bbdev: extend HARQ tolerance + cdfa1794a6 test/bbdev: fix crash for non supported HARQ length + 4bdaf50031 baseband/acc: fix check after deref and dead code + bf521b9f80 baseband/acc: fix iteration counter in TB mode + ff4d7c9a71 baseband/acc: prevent to dequeue more than requested + 6c9f6f15ee baseband/acc: add explicit mbuf append for soft output + 63797158c2 baseband/acc: protect from TB negative scenario + f76551552e eventdev: fix memory size for telemetry + 2f9bb3f72a event/cnxk: fix SSO cleanup + 0a3f30f07c doc: fix reference to event timer header + e806f0529d doc: add gpudev to the Doxygen index + 300d83b989 eal/windows: fix pedantic build + 03299eb5d4 net/mlx5: fix crash on action template failure + 4dc4547f2d common/cnxk: fix second pass flow rule layer type + dc7302e733 examples/qos_sched: fix Tx port config when link down + ecb065fe53 examples/cmdline: fix build with GCC 12 + 1b93563378 eal: use same atomic intrinsics for GCC and clang + a5d83cee8e build: fix toolchain definition + 15ae43b33a test/reorder: fix double free of drained buffers + b515c436e6 reorder: invalidate buffer from ready queue in drain + 106e0f2637 dma/ioat: fix error reporting on restart + 90ad21e988 dma/ioat: fix indexes after restart + 881919396f dma/ioat: fix device stop if no copies done + fb395ef1d0 eal/freebsd: fix lock in alarm callback + a656595e9b sched: fix alignment of structs in subport + 38465cec0a app/testpmd: fix crash on cleanup + 31f4d9ce13 net/nfp: restrict flow flush to the port + 7ae80e1379 net/nfp: fix VNI of VXLAN encap action + 8cdbec0d75 net/bnxt: fix link state change interrupt config + fd1ff4fecb common/cnxk: fix channel mask for SDP interfaces + ad62d3f410 app/compress-perf: fix testing single operation + 17f4281693 app/compress-perf: fix some typos + c028045915 net/iavf: fix VLAN offload with AVX2 + 3c047b3d86 net/ixgbe: enable IPv6 mask in flow rules + 79781cd646 net/iavf: fix building data desc + 644d38523a net/iavf: protect insertion in flow list + e13bcf7645 net/ice: fix validation of flow transfer attribute + 199591d8d1 net/i40e: fix validation of flow transfer attribute + 5aacd2a62c net/iavf: add lock for VF commands + dbe54bfd9b net/ixgbe: fix firmware version consistency + bfa9955652 net/idpf: fix driver infos + 96ccb4c287 net/idpf: fix mbuf leak in split Tx + 489ddd71b3 net/i40e: reduce interrupt interval in multi-driver mode + 6e50af124f net/iavf: fix outer UDP checksum offload + 4c6cddb596 net/mlx5: check compressed CQE opcode in vectorized Rx + 547b239a21 net/mlx5: ignore non-critical syndromes for Rx queue + 47f5a0e5f3 net/mlx5: fix error CQE dumping for vectorized Rx + 9e0308d5ef net/mlx5/hws: fix memory leak on general pool DB init + 394ba3cfc4 net/mlx5: fix read device clock in real time mode + 35eadc22bc net/mlx5: fix warning for Tx scheduling option + a04fa37e6e net/mlx5: fix wait descriptor opcode for ConnectX-7 + d5be082621 net/mlx5: fix flow sample with ConnectX-5 + 575cfce6a7 common/mlx5: fix offset of a field + f3282a003f common/mlx5: improve AES-XTS tweak capability check + 4434048bbe net/mlx5: fix GENEVE resource overwrite + dcb16c48e0 net/mlx5: fix available tag registers calculation for HWS + a93bb50b6c net/txgbe: fix Rx buffer size in config register + 3a842fbbb7 net/sfc: enforce fate action in transfer flow rules + be7d6baf28 net/sfc: fix MAC address entry leak in transfer flow parsing + 56789776c6 net/hns3: extract common functions to set Rx/Tx + 13f062ec55 net/hns3: make getting Tx function static + 1df48cce5e net/hns3: separate Tx prepare from getting Tx function + 418d6cbc35 net/hns3: remove debug condition for Tx prepare + 873d6edb18 net/hns3: add debug info for Rx/Tx dummy function + c6b36e8183 net/hns3: fix burst mode query with dummy function + 6c9834cd2a net/nfp: fix max DMA length + f418af17fc app/testpmd: fix link check condition on port start + e4f7453ec1 net/sfc: export pick transfer proxy callback to representors + 6ae7e4345e net/hns3: fix duplicate RSS rule check + 5c129d8898 net/hns3: fix config struct used for conversion + f2de3c967d net/hns3: fix warning on flush or destroy rule + 2e99d819d3 net/hns3: remove useless code when destroy valid RSS rule + d45c8fe415 net/hns3: use RSS filter list to check duplicated rule + 93fa374ce4 net/hns3: fix clearing RSS configuration + 9126ed75be net/hns3: fix RSS key size compatibility + 36d6105f51 net/hns3: refactor set RSS hash algorithm and key interface + 14d988ad25 net/hns3: extract common function to query device + ffc3022e2d net/hns3: fix log about indirection table size + 547a2c7a55 net/txgbe: fix interrupt loss + c22430a6e5 net/ngbe: add spinlock protection on YT PHY + 767d609db7 net/ngbe: fix packet type to parse from offload flags + c96c4e1b9e net/txgbe: fix packet type to parse from offload flags + e7149d390b net/txgbe: fix default signal quality value for KX/KX4 + a1d3811bc0 app/testpmd: fix forwarding stats for Tx dropped + 8eefe1d245 doc: fix description of L2TPV2 flow item + 5876103dd2 net/hns3: declare flow rule keeping capability + 58910572e8 net/virtio-user: fix device starting failure handling + 78d828c247 vhost: fix possible FD leaks on truncation + eab1940f97 vhost: fix possible FD leaks + cf313170bf vhost: decrease log level for unimplemented requests + 0772cc1d89 eal: cleanup alarm and hotplug before memory detach + f7825956c0 eventdev/timer: fix overflow + fd3e2fb617 test/mbuf: fix mbuf reset test + cc69b3523e drivers/bus: fix leak for devices without driver + acacb53810 eal/linux: fix hugetlbfs sub-directories discovery + 0343d4cb72 telemetry: fix repeat display when callback don't init dict + f5803ba5c5 raw/ifpga/base: fix init with multi-process + 08019befc3 mem: fix hugepage info mapping + 23cb90427a net/cnxk: validate RED threshold config + d7d670cbe2 common/cnxk: reduce channel count per LMAC + 4060bba354 common/cnxk: fix IPv6 extension matching + 5444957e57 common/cnxk: fix dual VLAN parsing + f40c4f3e0b net/cnxk: fix deadlock in security session creation + 78c0d2ab85 common/cnxk: fix aura ID handling + 87725aa6d2 net/cnxk: fix packet type for IPv6 packets post decryption + efb6eccf8e event/cnxk: fix timer operations in secondary process + 903d4a18a1 event/cnxk: fix burst timer arm + 77f50c0971 eventdev/eth_rx: fix getting adapter instance + 0a78560065 event/cnxk: wait for CPT flow control on WQE path + 5e9f154f81 eventdev/crypto: fix function symbol export + 48870212ad doc: fix firmware list in bnxt guide + a4d58197b2 net/bnxt: fix RSS hash in mbuf + 3f1568d969 net/bnxt: fix Rx queue stats after queue stop and start + bc1682ee55 net/bnxt: fix Tx queue stats after queue stop and start + d357ef8a3c crypto/ipsec_mb: fix ZUC-256 maximum tag length + 031b48d670 compressdev: fix end of driver list + ce54658be7 cryptodev: fix sym session mempool creation description + cbd714b1f9 test/crypto: add missing MAC-I to PDCP vectors + ca39bc2337 test/crypto: fix typo in AES test + 11f862d605 crypto/ipsec_mb: remove unnecessary null check + 24dc362291 crypto/openssl: fix warning on copy length + 2d28201221 crypto/ccp: fix IOVA handling + f11d779c24 crypto/ccp: remove some dead code for UIO + 4e76b62756 crypto/ccp: remove some printf + f3d0a011e3 baseband/acc: fix acc100 queue mapping to 64 bits + 8f06dfc381 baseband/acc: fix multiplexing acc100 operations + 3c330a28d7 baseband/acc: fix acc100 iteration counter in TB + f770622597 baseband/acc: fix memory leak on acc100 close + e44360ae85 app/bbdev: add allocation checks + 9f13ab03f6 app/bbdev: fix build with optional flag + 1e82bbe91d app/bbdev: fix build with optional flag + be1187e1a6 vhost: fix net header settings in datapath + 72820dd60f vdpa/ifc: fix reconnection in SW-assisted live migration + a347909ae2 vdpa/ifc: fix argument compatibility check + ab2779057e build: fix dependencies lookup + 573de4f522 app/dumpcap: fix storing port identifier + cb3c0ba47e examples/qos_sched: fix debug mode + 0e846591b5 mem: fix telemetry data truncation + 453a4d30ed cryptodev: fix telemetry data truncation + 16f272c96f mempool: fix telemetry data truncation + bd11b88f8b ethdev: fix telemetry data truncation + 4f9c6db77b telemetry: move include after guard + 8ab731497d app/testpmd: fix interactive mode with no ports + 1df8c20923 net/nfp: fix teardown of flows sharing a mask ID + 375086efb5 net/nfp: store counter reset before zeroing flow query + 58db4361dc net/hns3: fix inaccurate RTC time to read + 36ec039309 net/ena: fix deadlock in RSS RETA update + f029ceb7aa net/nfp: fix firmware name derived from PCI name + a87a84560c net/nfp: fix Tx packet drop for large data length + 5ef77a1221 graph: fix node shrink + b9a14f6737 gpudev: fix deadlocks when registering callback + 1586e3b7bc fbarray: fix metadata dump + 23a5b25c8a bus/fslmc: fix deadlock on MC send command timeout + a0b1faaad2 crypto/qat: fix build + a636c94996 crypto/qat: fix build for generic x86 with GCC 12 + b527b4c168 crypto/qat: fix stream cipher direction + e2c05f4621 examples/l2fwd-event: fix worker cleanup + 478bec21d0 eventdev/eth_tx: fix devices loop + 4c388e80e9 eventdev/crypto: fix failed events + ddaec3cd54 eventdev/crypto: fix overflow in circular buffer + e1ca28205f eventdev/crypto: fix offset used while flushing events + d66216823d eventdev/crypto: fix enqueue count + b81247d47b app/crypto-perf: fix IPsec direction + 683ce01b19 app/crypto-perf: fix SPI zero + bbba565c73 app/crypto-perf: fix session freeing + a932cd545f app/crypto-perf: fix number of segments + 193ff405b3 crypto/cnxk: fix digest for empty input data + a996e9816f devtools: fix escaped space in grep pattern + b6d9ddee2c doc: fix dependency setup in l2fwd-cat example guide + 760cf7543c hash: fix GFNI implementation build with GCC 12 + c8af2a4ad5 kni: fix build on RHEL 9.1 + fc98f9c6d7 eal/windows: mark memory config as complete + c12e76d298 devtools: fix name check with mbox files + 07e68b092c drivers: fix symbol exports when map is omitted + + +* Thu Nov 30 2023 Open vSwitch CI - 3.2.0-35 +- Merging upstream branch-3.2 [RH git: 6879730533] + Commit list: + 349e02766c dpdk: Use DPDK 22.11.3 release for OVS 3.2. + + +* Wed Nov 29 2023 Open vSwitch CI - 3.2.0-34 +- Merging upstream branch-3.2 [RH git: d72ace5709] + Commit list: + a52d28b7e0 ovs-ofctl: Correctly mark the CT flush commands. + + +* Mon Nov 27 2023 Open vSwitch CI - 3.2.0-33 +- Merging upstream branch-3.2 [RH git: d892f1b66a] + Commit list: + 319a97e412 mcast-snooping: Flush flood and report ports when deleting interfaces. + d7e9117ca3 mcast-snooping: Test per port explicit flooding. + + * Wed Nov 01 2023 Open vSwitch CI - 3.2.0-32 - Merging upstream branch-3.2 [RH git: 109500e1ee] Commit list: